diff --git a/.fossa.yml b/.fossa.yml new file mode 100644 index 00000000000..013accd4498 --- /dev/null +++ b/.fossa.yml @@ -0,0 +1,11 @@ +version: 3 + +project: + id: vitess + name: vitess + +# Exclude the maven based scanning of our java client until we can get it working again. +targets: + exclude: + - type: maven + path: java diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 755ca395b68..d1f30ba5827 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,7 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /.github/workflows/ @deepthi @frouioui @mattlord @rohit-nayak-ps /config/mycnf/ @deepthi @shlomi-noach @mattlord /doc/ @deepthi @frouioui @GuptaManan100 -/docker/ @deepthi @derekperkins @dkhenry @mattlord @GuptaManan100 @frouioui +/docker/ @deepthi @derekperkins @mattlord @GuptaManan100 @frouioui /examples/compose @shlomi-noach @GuptaManan100 @frouioui /examples/demo @mattlord @rohit-nayak-ps /examples/local @rohit-nayak-ps @frouioui @mattlord @GuptaManan100 @@ -15,14 +15,14 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /examples/region_sharding @deepthi @mattlord /java/ @harshit-gangal /go/cache @vmg -/go/cmd @ajm188 @deepthi +/go/cmd @ajm188 @deepthi @mattlord /go/cmd/vtadmin @ajm188 @notfelineit /go/cmd/vtctldclient @ajm188 @mattlord /go/cmd/vtctldclient/command/vreplication @mattlord @rohit-nayak-ps /go/internal/flag @ajm188 @rohit-nayak-ps /go/mysql @harshit-gangal @systay @mattlord /go/pools @deepthi @harshit-gangal -/go/protoutil @ajm188 @deepthi +/go/protoutil @ajm188 @deepthi @mattlord /go/sqltypes @harshit-gangal @shlomi-noach @vmg /go/test/endtoend/onlineddl @rohit-nayak-ps @shlomi-noach /go/test/endtoend/messaging @mattlord @rohit-nayak-ps @derekperkins @@ -46,11 +46,11 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /go/vt/vtadmin @ajm188 @notfelineit @rohit-nayak-ps /go/vt/vtctl @ajm188 @deepthi @rohit-nayak-ps /go/vt/vtctl/vtctl.go @notfelineit @rohit-nayak-ps -/go/vt/vtctl/grpcvtctldclient @ajm188 @notfelineit -/go/vt/vtctl/grpcvtctldserver @ajm188 @notfelineit +/go/vt/vtctl/grpcvtctldclient @ajm188 @notfelineit @mattlord +/go/vt/vtctl/grpcvtctldserver @ajm188 @notfelineit @mattlord /go/vt/vtctl/reparentutil @ajm188 @GuptaManan100 @deepthi -/go/vt/vtctl/vtctldclient @ajm188 @notfelineit -/go/vt/vtctld @ajm188 @deepthi @notfelineit @rohit-nayak-ps +/go/vt/vtctl/vtctldclient @ajm188 @notfelineit @mattlord +/go/vt/vtctld @ajm188 @deepthi @notfelineit @rohit-nayak-ps @mattlord /go/vt/vterrors @harshit-gangal @systay /go/vt/vtexplain @systay @harshit-gangal /go/vt/vtgate @harshit-gangal @systay @frouioui @GuptaManan100 @@ -73,9 +73,9 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui /go/vt/wrangler @deepthi @mattlord @rohit-nayak-ps /go/vt/workflow @mattlord @rohit-nayak-ps /proto/ @deepthi @harshit-gangal -/proto/vtadmin.proto @ajm188 @notfelineit -/proto/vtctldata.proto @ajm188 @notfelineit -/proto/vtctlservice.proto @ajm188 @notfelineit +/proto/vtadmin.proto @ajm188 @notfelineit @mattlord +/proto/vtctldata.proto @ajm188 @notfelineit @mattlord +/proto/vtctlservice.proto @ajm188 @notfelineit @mattlord /test/ @GuptaManan100 @frouioui @rohit-nayak-ps @deepthi @mattlord @harshit-gangal /tools/ @frouioui @rohit-nayak-ps /web/vtadmin @ajm188 @notfelineit diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 595995681f9..7306523f000 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -18,9 +18,10 @@ ## Checklist -- [ ] "Backport to:" labels have been added if this change should be back-ported +- [ ] "Backport to:" labels have been added if this change should be back-ported to release branches +- [ ] If this change is to be back-ported to previous releases, a justification is included in the PR description - [ ] Tests were added or are not required -- [ ] Did the new or modified tests pass consistently locally and on the CI +- [ ] Did the new or modified tests pass consistently locally and on CI? - [ ] Documentation was added or is not required ## Deployment Notes diff --git a/.github/workflows/assign_milestone.yml b/.github/workflows/assign_milestone.yml index 7c56f45728f..fd2258cbd93 100644 --- a/.github/workflows/assign_milestone.yml +++ b/.github/workflows/assign_milestone.yml @@ -18,12 +18,12 @@ jobs: steps: - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Assign Milestone run: | diff --git a/.github/workflows/auto_approve_pr.yml b/.github/workflows/auto_approve_pr.yml index 552f1ec2e68..6985f78e224 100644 --- a/.github/workflows/auto_approve_pr.yml +++ b/.github/workflows/auto_approve_pr.yml @@ -3,14 +3,20 @@ on: pull_request: types: [opened, reopened] +permissions: + contents: read + jobs: auto_approve: name: Auto Approve Pull Request runs-on: ubuntu-latest + + permissions: + pull-requests: write # only given on local PRs, forks run with `read` access + steps: - name: Checkout code - uses: actions/checkout@v3 - + uses: actions/checkout@v4 - name: Auto Approve Pull Request env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/check_label.yml b/.github/workflows/check_label.yml index c3c89273df8..ec5309c6757 100644 --- a/.github/workflows/check_label.yml +++ b/.github/workflows/check_label.yml @@ -3,10 +3,6 @@ on: pull_request: types: [opened, labeled, unlabeled, synchronize] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Check Pull Request labels') - cancel-in-progress: true - permissions: read-all jobs: @@ -68,6 +64,12 @@ jobs: echo "Expecting PR to not have the NeedsIssue label; please create a linked issue and remove the label." exit 1 fi + if cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'NeedsBackportReason' ; then + if cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Backport to:'; then + echo "Expecting PR to not have the NeedsBackportReason label; please add your justification to the PR description and remove the label." + exit 1 + fi + fi - name: Do Not Merge label diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml index 8f9199e7658..dd6608f766a 100644 --- a/.github/workflows/check_make_vtadmin_authz_testgen.yml +++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml @@ -27,11 +27,11 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -47,10 +47,10 @@ jobs: - '.github/workflows/check_make_vtadmin_authz_testgen.yml' - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true' with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true' diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml index 5f3302fc97c..df6ee528a0e 100644 --- a/.github/workflows/check_make_vtadmin_web_proto.yml +++ b/.github/workflows/check_make_vtadmin_web_proto.yml @@ -27,11 +27,11 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -49,17 +49,17 @@ jobs: - '.github/workflows/check_make_vtadmin_web_proto.yml' - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Setup Node if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: # node-version should match package.json - node-version: '18.16.0' + node-version: '20.12.2' - name: Install npm dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true' diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml index 5ce650f1ea6..e9558f33a73 100644 --- a/.github/workflows/cluster_endtoend_12.yml +++ b/.github/workflows/cluster_endtoend_12.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml index fa98916736f..012afa11807 100644 --- a/.github/workflows/cluster_endtoend_13.yml +++ b/.github/workflows/cluster_endtoend_13.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml index 2501f26ab58..c089026a128 100644 --- a/.github/workflows/cluster_endtoend_15.yml +++ b/.github/workflows/cluster_endtoend_15.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml index 234e672afb0..1734eca79e1 100644 --- a/.github/workflows/cluster_endtoend_18.yml +++ b/.github/workflows/cluster_endtoend_18.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml index feeedcd46b8..163d6a20d28 100644 --- a/.github/workflows/cluster_endtoend_21.yml +++ b/.github/workflows/cluster_endtoend_21.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml index f4cee992fb2..19e008cb115 100644 --- a/.github/workflows/cluster_endtoend_22.yml +++ b/.github/workflows/cluster_endtoend_22.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_backup_pitr.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml index b3b6e0d56f6..2c06218d198 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml index 6cad7922321..6887ede16ad 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml +++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,15 +94,15 @@ jobs: run: | # Setup Percona Server for MySQL 8.0 - sudo apt-get update - sudo apt-get install -y lsb-release gnupg2 curl + sudo apt-get -qq update + sudo apt-get -qq install -y lsb-release gnupg2 curl wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo percona-release setup ps80 - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -111,7 +113,7 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD - sudo apt-get install -y percona-xtrabackup-80 lz4 + sudo apt-get -qq install -y percona-xtrabackup-80 lz4 - name: Setup launchable dependencies if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml deleted file mode 100644 index b895a19a8d0..00000000000 --- a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml +++ /dev/null @@ -1,175 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (backup_pitr_xtrabackup) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr_xtrabackup) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - - # This is used if we need to pin the xtrabackup version used in tests. - # If this is NOT set then the latest version available will be used. - #XTRABACKUP_VERSION: "2.4.24-1" - -jobs: - build: - name: Run endtoend tests on Cluster (backup_pitr_xtrabackup) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb" - sudo apt-get install -y gnupg2 - sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb" - sudo apt-get update - if [[ -n $XTRABACKUP_VERSION ]]; then - debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb" - wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile" - sudo apt install -y "./$debfile" - else - sudo apt-get install -y percona-xtrabackup-24 - fi - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard backup_pitr_xtrabackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml index f65d2625c28..3c59c3c4b68 100644 --- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml +++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml index 5c3739aafd0..40e26b36445 100644 --- a/.github/workflows/cluster_endtoend_mysql80.yml +++ b/.github/workflows/cluster_endtoend_mysql80.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml index 793e7372309..dbb77b37b45 100644 --- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml +++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml deleted file mode 100644 index 43dc184c204..00000000000 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml +++ /dev/null @@ -1,160 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (onlineddl_ghost) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_ghost) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_ghost) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml index d2c6e23ee86..d4dfef915ad 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -70,13 +72,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -93,14 +95,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml index 38031f4441e..6d12d8bd5a0 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -70,13 +72,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -93,14 +95,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml deleted file mode 100644 index 0a205266c4f..00000000000 --- a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml +++ /dev/null @@ -1,160 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (onlineddl_scheduler) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_scheduler) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_scheduler) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml index d83fb7010b8..b7f0db9db46 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -70,13 +72,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -93,14 +95,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -134,7 +136,7 @@ jobs: set -exo pipefail - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml deleted file mode 100644 index a941c9faef0..00000000000 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml +++ /dev/null @@ -1,160 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (onlineddl_vrepl) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_vrepl) mysql57 - runs-on: gh-hosted-runners-16cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml index a51cb6c33fe..dc731b09f8b 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -70,13 +72,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -93,14 +95,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -134,7 +136,7 @@ jobs: set -exo pipefail - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml deleted file mode 100644 index 77626919a89..00000000000 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml +++ /dev/null @@ -1,160 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (onlineddl_vrepl_stress) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) mysql57 - runs-on: gh-hosted-runners-16cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml index 1230fcd3518..24bfbd1f8ea 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -70,13 +72,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -93,14 +95,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -134,7 +136,7 @@ jobs: set -exo pipefail - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml deleted file mode 100644 index 86ef8eec019..00000000000 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml +++ /dev/null @@ -1,160 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (onlineddl_vrepl_stress_suite) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_stress_suite) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite) mysql57 - runs-on: gh-hosted-runners-16cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml index 34e521d648f..8922e25cd2b 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -70,13 +72,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -93,14 +95,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -134,7 +136,7 @@ jobs: set -exo pipefail - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml deleted file mode 100644 index a400ea99677..00000000000 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml +++ /dev/null @@ -1,160 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (onlineddl_vrepl_suite) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_suite) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) mysql57 - runs-on: gh-hosted-runners-16cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml index 68a25ee46ec..19d55d0f613 100644 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml +++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -70,13 +72,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -93,14 +95,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -134,7 +136,7 @@ jobs: set -exo pipefail - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml deleted file mode 100644 index ba57948d162..00000000000 --- a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml +++ /dev/null @@ -1,160 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (schemadiff_vrepl) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (schemadiff_vrepl) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (schemadiff_vrepl) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml index 0fe0d4e18da..2d0d42ac59e 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml index 5af0e2ff852..795a67833e8 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml deleted file mode 100644 index e1ae8eeb69c..00000000000 --- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml +++ /dev/null @@ -1,159 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (tabletmanager_tablegc) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_tablegc) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (tabletmanager_tablegc) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml index 8b6826f257c..2ed58e21628 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml index bb59336df48..22ea9f84816 100644 --- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml +++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml index ec3d101629e..8fb159404ab 100644 --- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml +++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -151,7 +153,7 @@ jobs: slow-query-log=OFF EOF - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml index ea6219bf869..eff4dad0846 100644 --- a/.github/workflows/cluster_endtoend_vreplication_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -151,7 +153,7 @@ jobs: slow-query-log=OFF EOF - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml index 5ef46750668..eacbc0ff34f 100644 --- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml +++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -151,7 +153,7 @@ jobs: slow-query-log=OFF EOF - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml b/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml similarity index 87% rename from .github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml rename to .github/workflows/cluster_endtoend_vreplication_copy_parallel.yml index c002a72d1e7..9995379cf7c 100644 --- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml +++ b/.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vreplication_partial_movetables_sequences) +name: Cluster (vreplication_copy_parallel) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_sequences)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_copy_parallel)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vreplication_partial_movetables_sequences) + name: Run endtoend tests on Cluster (vreplication_copy_parallel) runs-on: gh-hosted-runners-4cores-1 steps: @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,17 +67,17 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml' + - '.github/workflows/cluster_endtoend_vreplication_copy_parallel.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -151,12 +153,12 @@ jobs: slow-query-log=OFF EOF - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_sequences | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_copy_parallel | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml similarity index 77% rename from .github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml rename to .github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml index 27620919d99..197a3377242 100644 --- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml +++ b/.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vstream_with_keyspaces_to_watch) +name: Cluster (vreplication_foreign_key_stress) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_with_keyspaces_to_watch)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_foreign_key_stress)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vstream_with_keyspaces_to_watch) + name: Run endtoend tests on Cluster (vreplication_foreign_key_stress) runs-on: gh-hosted-runners-4cores-1 steps: @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,17 +67,17 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml' + - '.github/workflows/cluster_endtoend_vreplication_foreign_key_stress.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -133,8 +135,30 @@ jobs: set -exo pipefail + # Increase our open file descriptor limit as we could hit this + ulimit -n 65536 + cat <<-EOF>>./config/mycnf/mysql8026.cnf + innodb_buffer_pool_dump_at_shutdown=OFF + innodb_buffer_pool_in_core_file=OFF + innodb_buffer_pool_load_at_startup=OFF + innodb_buffer_pool_size=64M + innodb_doublewrite=OFF + innodb_flush_log_at_trx_commit=0 + innodb_flush_method=O_DIRECT + innodb_numa_interleave=ON + innodb_adaptive_hash_index=OFF + sync_binlog=0 + sync_relay_log=0 + performance_schema=OFF + slow-query-log=OFF + EOF + + cat <<-EOF>>./config/mycnf/mysql8026.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_with_keyspaces_to_watch | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_foreign_key_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml similarity index 77% rename from .github/workflows/cluster_endtoend_onlineddl_ghost.yml rename to .github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml index af61a6a5059..dd8201cd922 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml +++ b/.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (onlineddl_ghost) +name: Cluster (vreplication_mariadb_to_mysql) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_ghost)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_mariadb_to_mysql)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (onlineddl_ghost) + name: Run endtoend tests on Cluster (vreplication_mariadb_to_mysql) runs-on: gh-hosted-runners-4cores-1 steps: @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,18 +67,17 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_ghost.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' + - '.github/workflows/cluster_endtoend_vreplication_mariadb_to_mysql.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -93,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -134,8 +135,30 @@ jobs: set -exo pipefail + # Increase our open file descriptor limit as we could hit this + ulimit -n 65536 + cat <<-EOF>>./config/mycnf/mysql8026.cnf + innodb_buffer_pool_dump_at_shutdown=OFF + innodb_buffer_pool_in_core_file=OFF + innodb_buffer_pool_load_at_startup=OFF + innodb_buffer_pool_size=64M + innodb_doublewrite=OFF + innodb_flush_log_at_trx_commit=0 + innodb_flush_method=O_DIRECT + innodb_numa_interleave=ON + innodb_adaptive_hash_index=OFF + sync_binlog=0 + sync_relay_log=0 + performance_schema=OFF + slow-query-log=OFF + EOF + + cat <<-EOF>>./config/mycnf/mysql8026.cnf + binlog-transaction-compression=ON + EOF + # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_mariadb_to_mysql | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml index d8961314a46..0246e0fbf3c 100644 --- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml +++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -151,7 +153,7 @@ jobs: slow-query-log=OFF EOF - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml similarity index 88% rename from .github/workflows/cluster_endtoend_vreplication_multicell.yml rename to .github/workflows/cluster_endtoend_vreplication_multi_tenant.yml index 328c062e1d0..2523b982e4e 100644 --- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml +++ b/.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vreplication_multicell) +name: Cluster (vreplication_multi_tenant) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_multicell)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_multi_tenant)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vreplication_multicell) + name: Run endtoend tests on Cluster (vreplication_multi_tenant) runs-on: gh-hosted-runners-4cores-1 steps: @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,17 +67,17 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_multicell.yml' + - '.github/workflows/cluster_endtoend_vreplication_multi_tenant.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -151,12 +153,12 @@ jobs: slow-query-log=OFF EOF - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_multicell | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_multi_tenant | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml similarity index 87% rename from .github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml rename to .github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml index 28dca240332..4125b1f9946 100644 --- a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vreplication_partial_movetables_basic) +name: Cluster (vreplication_partial_movetables_and_materialize) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_basic)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_and_materialize)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vreplication_partial_movetables_basic) + name: Run endtoend tests on Cluster (vreplication_partial_movetables_and_materialize) runs-on: gh-hosted-runners-4cores-1 steps: @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,17 +67,17 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml' + - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_and_materialize.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -151,12 +153,12 @@ jobs: slow-query-log=OFF EOF - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_and_materialize | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml index 9229b34a5bf..6c280f83547 100644 --- a/.github/workflows/cluster_endtoend_vreplication_v2.yml +++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -151,7 +153,7 @@ jobs: slow-query-log=OFF EOF - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream.yml similarity index 87% rename from .github/workflows/cluster_endtoend_vstream_failover.yml rename to .github/workflows/cluster_endtoend_vstream.yml index a620b8caad9..79a9278def2 100644 --- a/.github/workflows/cluster_endtoend_vstream_failover.yml +++ b/.github/workflows/cluster_endtoend_vstream.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (vstream_failover) +name: Cluster (vstream) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_failover)') + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream)') cancel-in-progress: true permissions: read-all @@ -15,7 +15,7 @@ env: jobs: build: - name: Run endtoend tests on Cluster (vstream_failover) + name: Run endtoend tests on Cluster (vstream) runs-on: gh-hosted-runners-4cores-1 steps: @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,17 +67,17 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_failover.yml' + - '.github/workflows/cluster_endtoend_vstream.yml' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -134,7 +136,7 @@ jobs: set -exo pipefail # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_failover | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- go run test.go -docker=false -follow -shard vstream | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml deleted file mode 100644 index 5db27dad710..00000000000 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml +++ /dev/null @@ -1,148 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vstream_stoponreshard_false) -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_false)') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vstream_stoponreshard_false) - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 - - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_false | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vtbackup.yml b/.github/workflows/cluster_endtoend_vtbackup.yml index 8f2dcd3768b..7905778ce8c 100644 --- a/.github/workflows/cluster_endtoend_vtbackup.yml +++ b/.github/workflows/cluster_endtoend_vtbackup.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml index aad84a910c6..5760c8c7251 100644 --- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml index 19bb9efe86c..fd29ba2184b 100644 --- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml +++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml index e2824c5844d..f7e01ff1462 100644 --- a/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml +++ b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml index 205de4b5e68..18bd53252da 100644 --- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml +++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml index 98d59d60aee..8a4d037d197 100644 --- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml index 2f4082d10d4..4e7b40f7bf9 100644 --- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml +++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml index 4a9f6e227fb..35691664026 100644 --- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml +++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml index 6d41d922fc4..5c8b2032c0a 100644 --- a/.github/workflows/cluster_endtoend_vtgate_queries.yml +++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml index 028e1492029..cac04f07f48 100644 --- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml +++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml index 5972472402e..52e0b191ab5 100644 --- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml +++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml index 68a2bd697be..f0a61b6b1f3 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml index 1c5d1e675f8..e728cea0ff3 100644 --- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml +++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml index 26adb43fd74..9535cb586d2 100644 --- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml +++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml index 49945a607d8..40a16d1bd9d 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml index ee72650dcbd..631c4e15c4e 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml index 4051373d9aa..f4f8005d850 100644 --- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml +++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml index b7cc848692f..58690ed4bc0 100644 --- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml +++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml index b6359682993..ae742e8ba6d 100644 --- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml +++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml index 83fb2b2d829..ea21e597d8f 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -135,7 +137,7 @@ jobs: # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml index 4c2f3b2637d..098277a15fa 100644 --- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml +++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml index 872576ab8b5..8b0aadbd8d6 100644 --- a/.github/workflows/cluster_endtoend_vtorc.yml +++ b/.github/workflows/cluster_endtoend_vtorc.yml @@ -36,26 +36,37 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") draft=$(echo "$PR_DATA" | jq .draft -r) echo "is_draft=${draft}" >> $GITHUB_OUTPUT + - name: Check Memory + run: | + totalMem=$(free -g | awk 'NR==2 {print $2}') + echo "total memory $totalMem GB" + if [[ "$totalMem" -lt 15 ]]; then + echo "Less memory than required" + exit 1 + fi + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +80,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +103,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml deleted file mode 100644 index 72baf7940b6..00000000000 --- a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml +++ /dev/null @@ -1,159 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vtorc) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtorc) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - -jobs: - build: - name: Run endtoend tests on Cluster (vtorc) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vtorc_mysql57.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml index b56d4dc61a5..e2e6247d492 100644 --- a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml +++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,14 +94,14 @@ jobs: run: | # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml index f24baaf31af..a6ca9447469 100644 --- a/.github/workflows/cluster_endtoend_xb_backup.yml +++ b/.github/workflows/cluster_endtoend_xb_backup.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,15 +94,15 @@ jobs: run: | # Setup Percona Server for MySQL 8.0 - sudo apt-get update - sudo apt-get install -y lsb-release gnupg2 curl + sudo apt-get -qq update + sudo apt-get -qq install -y lsb-release gnupg2 curl wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo percona-release setup ps80 - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -111,7 +113,7 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD - sudo apt-get install -y percona-xtrabackup-80 lz4 + sudo apt-get -qq install -y percona-xtrabackup-80 lz4 - name: Setup launchable dependencies if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml deleted file mode 100644 index b85628a0dbe..00000000000 --- a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml +++ /dev/null @@ -1,175 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (xb_backup) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_backup) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - - # This is used if we need to pin the xtrabackup version used in tests. - # If this is NOT set then the latest version available will be used. - #XTRABACKUP_VERSION: "2.4.24-1" - -jobs: - build: - name: Run endtoend tests on Cluster (xb_backup) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_xb_backup_mysql57.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb" - sudo apt-get install -y gnupg2 - sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb" - sudo apt-get update - if [[ -n $XTRABACKUP_VERSION ]]; then - debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb" - wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile" - sudo apt install -y "./$debfile" - else - sudo apt-get install -y percona-xtrabackup-24 - fi - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml index 3fbe34b0569..4a662d5f5e9 100644 --- a/.github/workflows/cluster_endtoend_xb_recovery.yml +++ b/.github/workflows/cluster_endtoend_xb_recovery.yml @@ -36,7 +36,7 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") @@ -45,17 +45,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -69,13 +71,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -92,15 +94,15 @@ jobs: run: | # Setup Percona Server for MySQL 8.0 - sudo apt-get update - sudo apt-get install -y lsb-release gnupg2 curl + sudo apt-get -qq update + sudo apt-get -qq install -y lsb-release gnupg2 curl wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo percona-release setup ps80 - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 sudo service mysql stop sudo service etcd stop @@ -111,7 +113,7 @@ jobs: # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD - sudo apt-get install -y percona-xtrabackup-80 lz4 + sudo apt-get -qq install -y percona-xtrabackup-80 lz4 - name: Setup launchable dependencies if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' diff --git a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml deleted file mode 100644 index aaa2b034105..00000000000 --- a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml +++ /dev/null @@ -1,175 +0,0 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (xb_recovery) mysql57 -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (xb_recovery) mysql57') - cancel-in-progress: true - -permissions: read-all - -env: - LAUNCHABLE_ORGANIZATION: "vitess" - LAUNCHABLE_WORKSPACE: "vitess-app" - GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" - - # This is used if we need to pin the xtrabackup version used in tests. - # If this is NOT set then the latest version available will be used. - #XTRABACKUP_VERSION: "2.4.24-1" - -jobs: - build: - name: Run endtoend tests on Cluster (xb_recovery) mysql57 - runs-on: gh-hosted-runners-4cores-1 - - steps: - - name: Skip CI - run: | - if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - PR_DATA=$(curl \ - -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ - -H "Accept: application/vnd.github.v3+json" \ - "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") - draft=$(echo "$PR_DATA" | jq .draft -r) - echo "is_draft=${draft}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml' - - - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 - - - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - - - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - sudo apt-get update - - # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean - sudo deluser mysql - sudo rm -rf /var/lib/mysql - sudo rm -rf /etc/mysql - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata - sudo service mysql stop - sudo service etcd stop - - # install JUnit report formatter - go install github.com/vitessio/go-junit-report@HEAD - - wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb" - sudo apt-get install -y gnupg2 - sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb" - sudo apt-get update - if [[ -n $XTRABACKUP_VERSION ]]; then - debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb" - wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile" - sudo apt install -y "./$debfile" - else - sudo apt-get install -y percona-xtrabackup-24 - fi - - - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' - run: | - # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up - pip3 install --user launchable~=1.0 > /dev/null - - # verify that launchable setup is all correct. - launchable verify || true - - # Tell Launchable about the build you are producing and testing - launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml - - - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() - run: | - if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then - # send recorded tests to launchable - launchable record tests --build "$GITHUB_RUN_ID" go-test . || true - fi - - # print test output - cat output.txt diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml new file mode 100644 index 00000000000..b467bef83a1 --- /dev/null +++ b/.github/workflows/codecov.yml @@ -0,0 +1,119 @@ +name: Code Coverage +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Code Coverage') + cancel-in-progress: true + +permissions: read-all + +jobs: + test: + name: Code Coverage + runs-on: gh-hosted-runners-16cores-1 + + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Check for changes in files relevant to code coverage + uses: dorny/paths-filter@v3.0.1 + id: changes + with: + token: '' + filters: | + changed_files: + - .github/workflows/codecov.yml + - 'go/**' + - go.mod + - go.sum + - Makefile + + - name: Set up Go + if: steps.changes.outputs.changed_files == 'true' + uses: actions/setup-go@v5 + with: + go-version: 1.22.3 + + - name: Set up python + if: steps.changes.outputs.changed_files == 'true' + uses: actions/setup-python@v5 + + - name: Tune the OS + if: steps.changes.outputs.changed_files == 'true' + run: | + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.changes.outputs.changed_files == 'true' + run: | + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get update + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + + # mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + + sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata + sudo service mysql stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + go install golang.org/x/tools/cmd/goimports@latest + + - name: Run make tools + if: steps.changes.outputs.changed_files == 'true' + run: | + make tools + + - name: Run unit tests and generate code coverage reports + if: steps.changes.outputs.changed_files == 'true' + timeout-minutes: 45 + run: | + set -exo pipefail + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + + export NOVTADMINBUILD=1 + + # Exclude endtoend tests from the coverage report. + # TODO: figure out how best to include our endtoend tests in the coverage report. + rm -rf go/test/endtoend go/*/endtoend go/vt/*/endtoend go/cmd/vttestserver + + eatmydata -- make unit_test_cover + + # Restore the files we deleted as codecov tries to fix their paths. + git reset --hard HEAD + + - name: Upload coverage reports to codecov.io + if: steps.changes.outputs.changed_files == 'true' + uses: codecov/codecov-action@v4 + with: + fail_ci_if_error: true + verbose: true + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml index 8bafc62213a..3b109b81307 100644 --- a/.github/workflows/codeql_analysis.yml +++ b/.github/workflows/codeql_analysis.yml @@ -27,11 +27,16 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 1.22.3 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify cu stom queries, you can do so here or in a config file. @@ -41,11 +46,6 @@ jobs: # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality - - name: Set up Go - uses: actions/setup-go@v4 - with: - go-version: 1.21.3 - - name: Get base dependencies run: | sudo DEBIAN_FRONTEND="noninteractive" apt-get update @@ -58,8 +58,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -88,7 +88,7 @@ jobs: make build - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 - name: Slack Workflow Notification if: ${{ failure() }} diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index 52c90038680..a44a01ce71d 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -18,21 +18,21 @@ jobs: steps: - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Setup node - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: - node-version: '18.16.0' + node-version: '20.12.2' - name: Tune the OS run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - name: Check out code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies run: | diff --git a/.github/workflows/docker_build_images.yml b/.github/workflows/docker_build_images.yml new file mode 100644 index 00000000000..347af8f5887 --- /dev/null +++ b/.github/workflows/docker_build_images.yml @@ -0,0 +1,169 @@ +name: Docker Build Images (v20+) +on: + push: + branches: + - main + tags: + - 'v[2-9][0-9]*.*' # run only on tags greater or equal to v20.0.0 + +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Docker Build Images (v20+)') + cancel-in-progress: true + +permissions: read-all + +jobs: + build_and_push_lite: + name: Build and push vitess/lite Docker images + runs-on: gh-hosted-runners-16cores-1 + if: github.repository == 'vitessio/vitess' + + strategy: + fail-fast: true + matrix: + branch: [ latest ] + + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set Dockerfile path + run: | + if [[ "${{ matrix.branch }}" == "latest" ]]; then + echo "DOCKERFILE=./docker/lite/Dockerfile" >> $GITHUB_ENV + else + echo "DOCKERFILE=./docker/lite/Dockerfile.${{ matrix.branch }}" >> $GITHUB_ENV + fi + + - name: Build and push on main + if: github.ref == 'refs/heads/main' + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ env.DOCKERFILE }} + push: true + tags: vitess/lite:${{ matrix.branch }} + + ###### + # All code below only applies to new tags + ###### + - name: Get the Git tag + if: startsWith(github.ref, 'refs/tags/') + run: echo "TAG_NAME=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV + + - name: Set Docker tag name + if: startsWith(github.ref, 'refs/tags/') + run: | + if [[ "${{ matrix.branch }}" == "latest" ]]; then + echo "DOCKER_TAG=vitess/lite:${TAG_NAME}" >> $GITHUB_ENV + else + echo "DOCKER_TAG=vitess/lite:${TAG_NAME}-${{ matrix.branch }}" >> $GITHUB_ENV + fi + + - name: Build and push on tags + if: startsWith(github.ref, 'refs/tags/') + uses: docker/build-push-action@v5 + with: + context: . + file: ${{ env.DOCKERFILE }} + push: true + tags: ${{ env.DOCKER_TAG }} + + build_and_push_components: + name: Build and push vitess components Docker images + needs: build_and_push_lite + runs-on: gh-hosted-runners-16cores-1 + if: github.repository == 'vitessio/vitess' + + strategy: + fail-fast: true + matrix: + debian: [ bullseye, bookworm ] + component: [ vtadmin, vtorc, vtgate, vttablet, mysqlctld, mysqlctl, vtctl, vtctlclient, vtctld, logrotate, logtail, vtbackup, vtexplain ] + + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set Docker context path + run: | + echo "DOCKER_CTX=./docker/binaries/${{ matrix.component }}" >> $GITHUB_ENV + + - name: Build and push on main latest tag + if: github.ref == 'refs/heads/main' && matrix.debian == 'bookworm' + uses: docker/build-push-action@v5 + with: + context: ${{ env.DOCKER_CTX }} + push: true + tags: vitess/${{ matrix.component }}:latest + build-args: | + VT_BASE_VER=latest + DEBIAN_VER=${{ matrix.debian }}-slim + + - name: Build and push on main debian specific tag + if: github.ref == 'refs/heads/main' + uses: docker/build-push-action@v5 + with: + context: ${{ env.DOCKER_CTX }} + push: true + tags: vitess/${{ matrix.component }}:latest-${{ matrix.debian }} + build-args: | + VT_BASE_VER=latest + DEBIAN_VER=${{ matrix.debian }}-slim + + ###### + # All code below only applies to new tags + ###### + + - name: Get the Git tag + if: startsWith(github.ref, 'refs/tags/') + run: echo "TAG_NAME=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV + + # We push git-tag-based images to three tags, i.e. for 'v19.0.0' we push to: + # + # vitess/${{ matrix.component }}:v19.0.0 (DOCKER_TAG_DEFAULT_DEBIAN) + # vitess/${{ matrix.component }}:v19.0.0-bookworm (DOCKER_TAG) + # vitess/${{ matrix.component }}:v19.0.0-bullseye (DOCKER_TAG) + # + - name: Set Docker tag name + if: startsWith(github.ref, 'refs/tags/') + run: | + echo "DOCKER_TAG_DEFAULT_DEBIAN=vitess/${{ matrix.component }}:${TAG_NAME}" >> $GITHUB_ENV + echo "DOCKER_TAG=vitess/${{ matrix.component }}:${TAG_NAME}-${{ matrix.debian }}" >> $GITHUB_ENV + + # Build and Push component image to DOCKER_TAG, applies to both debian version + - name: Build and push on tags using Debian extension + if: startsWith(github.ref, 'refs/tags/') + uses: docker/build-push-action@v5 + with: + context: ${{ env.DOCKER_CTX }} + push: true + tags: ${{ env.DOCKER_TAG }} + build-args: | + VT_BASE_VER=${{ env.TAG_NAME }} + DEBIAN_VER=${{ matrix.debian }}-slim + + # Build and Push component image to DOCKER_TAG_DEFAULT_DEBIAN, only applies when building the default Debian version (bookworm) + # It is fine to build a second time here when "matrix.debian == 'bookworm'" as we have cached the first build already + - name: Build and push on tags without Debian extension + if: startsWith(github.ref, 'refs/tags/') && matrix.debian == 'bookworm' + uses: docker/build-push-action@v5 + with: + context: ${{ env.DOCKER_CTX }} + push: true + tags: ${{ env.DOCKER_TAG_DEFAULT_DEBIAN }} + build-args: | + VT_BASE_VER=${{ env.TAG_NAME }} + DEBIAN_VER=${{ matrix.debian }}-slim \ No newline at end of file diff --git a/.github/workflows/docker_build_base.yml b/.github/workflows/docker_build_old_base.yml similarity index 97% rename from .github/workflows/docker_build_base.yml rename to .github/workflows/docker_build_old_base.yml index 00848e2518e..e7e280963b1 100644 --- a/.github/workflows/docker_build_base.yml +++ b/.github/workflows/docker_build_old_base.yml @@ -1,13 +1,11 @@ -name: Docker Build Base +name: Docker Build Base (> $GITHUB_OUTPUT + + PR_DATA=$(curl \ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -55,9 +67,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -68,29 +84,44 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | export DEBIAN_FRONTEND="noninteractive" - sudo apt-get update + sudo apt-get -qq update # mysql80 - sudo apt-get install -y mysql-server mysql-client + sudo apt-get -qq install -y mysql-server mysql-client - sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata + sudo apt-get -qq install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata sudo service mysql stop sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" mkdir -p dist bin - curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + curl -s -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ go mod download go install golang.org/x/tools/cmd/goimports@latest + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD - name: Run make tools if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | make tools + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + - name: unit_race if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 45 @@ -99,5 +130,17 @@ jobs: # which musn't be more than 107 characters long. export VTDATAROOT="/tmp/" export NOVTADMINBUILD=1 + export VTEVALENGINETEST="0" + + eatmydata -- make unit_test_race | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi - eatmydata -- make unit_test_race + # print test output + cat output.txt \ No newline at end of file diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/unit_race_evalengine.yml similarity index 61% rename from .github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml rename to .github/workflows/unit_race_evalengine.yml index 32e7685bf8f..cdcdfd680be 100644 --- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml +++ b/.github/workflows/unit_race_evalengine.yml @@ -1,9 +1,7 @@ -# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" - -name: Cluster (vstream_stoponreshard_true) +name: unit_race_evalengine on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vstream_stoponreshard_true)') + group: format('{0}-{1}', ${{ github.ref }}, 'unit_race_evalengine') cancel-in-progress: true permissions: read-all @@ -14,10 +12,10 @@ env: GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" jobs: - build: - name: Run endtoend tests on Cluster (vstream_stoponreshard_true) - runs-on: gh-hosted-runners-4cores-1 + build: + name: Unit Test (Evalengine_Race) + runs-on: gh-hosted-runners-16cores-1 steps: - name: Skip CI run: | @@ -35,7 +33,7 @@ jobs: fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - + PR_DATA=$(curl \ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ -H "Accept: application/vnd.github.v3+json" \ @@ -45,17 +43,17 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | - end_to_end: - - 'go/**/*.go' + unit_tests: + - 'go/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,53 +63,55 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml' + - '.github/workflows/unit_race_evalengine.yml' - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + uses: actions/setup-python@v5 - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | - # Limit local port range to not use ports that overlap with server side - # ports that we listen on. sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" - # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio - echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf - sudo sysctl -p /etc/sysctl.conf - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | - - # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections - sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get -qq update + + # mysql80 + sudo apt-get -qq install -y mysql-server mysql-client + sudo apt-get -qq install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata sudo service mysql stop - sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + + mkdir -p dist bin + curl -s -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + go mod download + go install golang.org/x/tools/cmd/goimports@latest + # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD + - name: Run make tools + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + run: | + make tools + - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -122,22 +122,20 @@ jobs: # Tell Launchable about the build you are producing and testing launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + - name: unit_race_evalengine + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' timeout-minutes: 45 run: | # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file # which musn't be more than 107 characters long. export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail + export NOVTADMINBUILD=1 + export VTEVALENGINETEST="1" - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_true | tee -a output.txt | go-junit-report -set-exit-code > report.xml + eatmydata -- make unit_test_race | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() run: | if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then # send recorded tests to launchable @@ -145,4 +143,4 @@ jobs: fi # print test output - cat output.txt + cat output.txt \ No newline at end of file diff --git a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml b/.github/workflows/unit_test_evalengine_mysql57.yml similarity index 67% rename from .github/workflows/cluster_endtoend_backup_pitr_mysql57.yml rename to .github/workflows/unit_test_evalengine_mysql57.yml index fb9946fdb0b..49410e597ba 100644 --- a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml +++ b/.github/workflows/unit_test_evalengine_mysql57.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (backup_pitr) mysql57 +name: Unit Test (evalengine_mysql57) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr) mysql57') + group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (evalengine_mysql57)') cancel-in-progress: true permissions: read-all @@ -14,8 +14,8 @@ env: GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" jobs: - build: - name: Run endtoend tests on Cluster (backup_pitr) mysql57 + test: + name: Unit Test (evalengine_mysql57) runs-on: gh-hosted-runners-4cores-1 steps: @@ -45,17 +45,17 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | - end_to_end: - - 'go/**/*.go' + unit_tests: + - 'go/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,20 +65,20 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml' + - '.github/workflows/unit_test_evalengine_mysql57.yml' - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + uses: actions/setup-python@v5 - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio @@ -86,43 +86,56 @@ jobs: sudo sysctl -p /etc/sysctl.conf - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | - sudo apt-get update + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get -qq update # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -qq -y autoremove + sudo apt-get -qq -y autoclean sudo deluser mysql sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + # mysql57 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 # packages for Jammy. echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 + sudo apt-get -qq update + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata + sudo apt-get -qq install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata sudo service mysql stop - sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + mkdir -p dist bin + curl -s -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + go install golang.org/x/tools/cmd/goimports@latest + # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD + - name: Run make tools + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + run: | + make tools + - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -133,22 +146,22 @@ jobs: # Tell Launchable about the build you are producing and testing launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 + - name: Run test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + timeout-minutes: 30 run: | + set -exo pipefail # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file # which musn't be more than 107 characters long. export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml + export NOVTADMINBUILD=1 + export VTEVALENGINETEST="1" + + eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() run: | if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then # send recorded tests to launchable diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml b/.github/workflows/unit_test_evalengine_mysql80.yml similarity index 66% rename from .github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml rename to .github/workflows/unit_test_evalengine_mysql80.yml index ac93c1ac532..e400c5bcf74 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml +++ b/.github/workflows/unit_test_evalengine_mysql80.yml @@ -1,9 +1,9 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (onlineddl_revert) mysql57 +name: Unit Test (evalengine_mysql80) on: [push, pull_request] concurrency: - group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_revert) mysql57') + group: format('{0}-{1}', ${{ github.ref }}, 'Unit Test (evalengine_mysql80)') cancel-in-progress: true permissions: read-all @@ -14,8 +14,8 @@ env: GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" jobs: - build: - name: Run endtoend tests on Cluster (onlineddl_revert) mysql57 + test: + name: Unit Test (evalengine_mysql80) runs-on: gh-hosted-runners-4cores-1 steps: @@ -45,17 +45,17 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | - end_to_end: - - 'go/**/*.go' + unit_tests: + - 'go/**' - 'test.go' - 'Makefile' - 'build.env' @@ -65,21 +65,20 @@ jobs: - 'tools/**' - 'config/**' - 'bootstrap.sh' - - '.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml' - - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' + - '.github/workflows/unit_test_evalengine_mysql80.yml' - name: Set up Go - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + uses: actions/setup-python@v5 - name: Tune the OS - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio @@ -87,43 +86,53 @@ jobs: sudo sysctl -p /etc/sysctl.conf - name: Get dependencies - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | - sudo apt-get update + export DEBIAN_FRONTEND="noninteractive" + sudo apt-get -qq update # Uninstall any previously installed MySQL first - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -qq -y autoremove + sudo apt-get -qq -y autoclean sudo deluser mysql sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb - # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 - # packages for Jammy. - echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections - echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections + # mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 + sudo apt-get -qq update + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-server mysql-client - sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata + sudo apt-get -qq install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata sudo service mysql stop - sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + mkdir -p dist bin + curl -s -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ + + go mod download + go install golang.org/x/tools/cmd/goimports@latest + # install JUnit report formatter go install github.com/vitessio/go-junit-report@HEAD + - name: Run make tools + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + run: | + make tools + - name: Setup launchable dependencies - if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main' run: | # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up pip3 install --user launchable~=1.0 > /dev/null @@ -134,22 +143,22 @@ jobs: # Tell Launchable about the build you are producing and testing launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . - - name: Run cluster endtoend test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 45 + - name: Run test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' + timeout-minutes: 30 run: | + set -exo pipefail # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file # which musn't be more than 107 characters long. export VTDATAROOT="/tmp/" - source build.env - - set -exo pipefail - # run the tests however you normally do, then produce a JUnit XML file - eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml + export NOVTADMINBUILD=1 + export VTEVALENGINETEST="1" + + eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always() run: | if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then # send recorded tests to launchable diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml index 5c5b9c2a206..616bcf28e54 100644 --- a/.github/workflows/unit_test_mysql57.yml +++ b/.github/workflows/unit_test_mysql57.yml @@ -45,11 +45,11 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -69,13 +69,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -89,38 +89,38 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | export DEBIAN_FRONTEND="noninteractive" - sudo apt-get update + sudo apt-get -qq update # Uninstall any previously installed MySQL first sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -qq -y autoremove + sudo apt-get -qq -y autoclean sudo deluser mysql sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # mysql57 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 # packages for Jammy. echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 + sudo apt-get -qq update + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 - sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata + sudo apt-get -qq install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata sudo service mysql stop sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" mkdir -p dist bin - curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + curl -s -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ go mod download @@ -156,6 +156,8 @@ jobs: export VTDATAROOT="/tmp/" export NOVTADMINBUILD=1 + export VTEVALENGINETEST="0" + eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml index 0427ef18158..970127fce37 100644 --- a/.github/workflows/unit_test_mysql80.yml +++ b/.github/workflows/unit_test_mysql80.yml @@ -45,11 +45,11 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -69,13 +69,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -89,35 +89,35 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | export DEBIAN_FRONTEND="noninteractive" - sudo apt-get update + sudo apt-get -qq update # Uninstall any previously installed MySQL first sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -qq -y autoremove + sudo apt-get -qq -y autoclean sudo deluser mysql sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # mysql80 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + sudo apt-get -qq update + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-server mysql-client - sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata + sudo apt-get -qq install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata sudo service mysql stop sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" mkdir -p dist bin - curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + curl -s -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ go mod download @@ -153,6 +153,8 @@ jobs: export VTDATAROOT="/tmp/" export NOVTADMINBUILD=1 + export VTEVALENGINETEST="0" + eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft diff --git a/.github/workflows/update_golang_dependencies.yml b/.github/workflows/update_golang_dependencies.yml new file mode 100644 index 00000000000..25dd835919f --- /dev/null +++ b/.github/workflows/update_golang_dependencies.yml @@ -0,0 +1,59 @@ +name: Update Golang Dependencies + +on: + schedule: + - cron: "0 0 1,15 * *" # Runs every month on the 1st and 15th days at midnight UTC + workflow_dispatch: + +permissions: read-all + +jobs: + update_golang_deps: + if: github.repository == 'vitessio/vitess' + permissions: + contents: write + pull-requests: write + name: Update Golang Dependencies + runs-on: ubuntu-latest + steps: + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 1.22.3 + + - name: Check out code + uses: actions/checkout@v4 + with: + ref: main + + - name: Upgrade the Golang Dependencies + id: detect-and-update + run: | + go get -u ./... + + output=$(git status -s) + if [ -z "${output}" ]; then + exit 0 + fi + + go mod tidy + + - name: Create Pull Request + uses: peter-evans/create-pull-request@v4 + with: + token: ${{ secrets.CREATE_PR_VITESS_BOT }} + branch: "upgrade-go-deps-on-main" + commit-message: "upgrade go deps" + signoff: true + delete-branch: true + team-reviewers: Release + title: "Upgrade the Golang Dependencies" + body: | + This Pull Request updates all the Golang dependencies to their latest version using `go get -u ./...`. + + cc @vitessio/release + base: main + labels: | + go + Component: General + Type: Dependencies diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml index a30ba27a5a7..c8f517eba48 100644 --- a/.github/workflows/update_golang_version.yml +++ b/.github/workflows/update_golang_version.yml @@ -15,17 +15,17 @@ jobs: pull-requests: write strategy: matrix: - branch: [ main, release-18.0, release-17.0, release-16.0, release-15.0 ] + branch: [ main, release-19.0, release-18.0, release-17.0, release-16.0 ] name: Update Golang Version runs-on: ubuntu-latest steps: - name: Set up Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Check out code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ matrix.branch }} @@ -66,10 +66,12 @@ jobs: if: steps.detect-and-update.outputs.create-pr == 'true' uses: peter-evans/create-pull-request@v4 with: + token: ${{ secrets.CREATE_PR_VITESS_BOT }} branch: "upgrade-go-to-${{steps.detect-and-update.outputs.go-version}}-on-${{ matrix.branch }}" commit-message: "bump go version to go${{steps.detect-and-update.outputs.go-version}}" signoff: true delete-branch: true + team-reviewers: Release title: "[${{ matrix.branch }}] Upgrade the Golang version to `go${{steps.detect-and-update.outputs.go-version}}`" body: | This Pull Request bumps the Golang version to `go${{steps.detect-and-update.outputs.go-version}}` and the bootstrap version to `${{steps.detect-and-update.outputs.bootstrap-version}}`. @@ -81,6 +83,8 @@ jobs: - [ ] Build and Push the bootstrap images to Docker Hub, the bot cannot handle that. - [ ] Update the `./.github/workflows/*.yml` files with the newer Golang version, the bot cannot handle that due to permissions. - To accomplish this, run the following: `go run ./go/tools/go-upgrade/go-upgrade.go upgrade workflows --go-to=${{steps.detect-and-update.outputs.go-version}}` + + cc @vitessio/release base: ${{ matrix.branch }} labels: | Skip CI diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml index 9532995d49c..d26eaa5bf59 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing - Backups - E2E +name: Backups - E2E - Upgrade Downgrade Testing on: push: pull_request: @@ -10,33 +10,10 @@ concurrency: permissions: read-all jobs: - get_previous_release: - if: always() - name: Get Previous Release - Backups - E2E - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - upgrade_downgrade_test_e2e: timeout-minutes: 60 - if: always() && needs.get_previous_release.result == 'success' name: Run Upgrade Downgrade Test - Backups - E2E runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -58,11 +35,21 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + if: steps.skip-workflow.outputs.skip-workflow == 'false' + id: output-previous-release-ref + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -83,13 +70,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -117,11 +104,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -141,7 +128,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml index cc8e3afb42a..c09b3ac6636 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing - Backups - E2E - Next Release +name: Backups - E2E - Next Release - Upgrade Downgrade Testing on: push: pull_request: @@ -10,33 +10,11 @@ concurrency: permissions: read-all jobs: - get_next_release: - if: always() - name: Get Latest Release - Backups - E2E - Next Release - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test_e2e: timeout-minutes: 60 - if: always() && needs.get_next_release.result == 'success' name: Run Upgrade Downgrade Test - Backups - E2E - Next Release runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -46,6 +24,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -53,19 +43,15 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -86,13 +72,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -120,11 +106,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -144,7 +130,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml index 6789dda2067..462471422db 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing - Backups - Manual +name: Backups - Manual - Upgrade Downgrade Testing on: push: pull_request: @@ -10,34 +10,12 @@ concurrency: permissions: read-all jobs: - get_previous_release: - if: always() - name: Get Previous Release - Backups - Manual - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT # This job usually execute in ± 20 minutes upgrade_downgrade_test_manual: timeout-minutes: 40 - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Backups - Manual runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -60,11 +38,21 @@ jobs: # Checkout to this build's commit - name: Checkout to commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -82,16 +70,17 @@ jobs: - 'config/**' - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_backups_manual.yml' + - 'examples/**' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -112,8 +101,8 @@ jobs: sudo rm -rf /etc/mysql # Install MySQL 8.0 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -137,11 +126,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Checkout to the other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Checkout to the other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -161,7 +150,7 @@ jobs: # Checkout to this build's commit - name: Checkout to commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml index 0120571a78e..25e4abea54f 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing - Backups - Manual - Next Release +name: Backups - Manual - Next Release - Upgrade Downgrade Testing on: push: pull_request: @@ -10,34 +10,12 @@ concurrency: permissions: read-all jobs: - get_next_release: - if: always() - name: Get Previous Release - Backups - Manual - Next Release - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT # This job usually execute in ± 20 minutes upgrade_downgrade_test_manual: timeout-minutes: 40 - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Backups - Manual - Next Release runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -47,6 +25,19 @@ jobs: exit 1 fi + # Checkout to this build's commit + - name: Checkout to commit's code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -54,20 +45,15 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - # Checkout to this build's commit - - name: Checkout to commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -85,16 +71,17 @@ jobs: - 'config/**' - 'bootstrap.sh' - '.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml' + - 'examples/**' - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -115,8 +102,8 @@ jobs: sudo rm -rf /etc/mysql # Install MySQL 8.0 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -140,11 +127,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Checkout to the other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Checkout to the other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -164,7 +151,7 @@ jobs: # Checkout to this build's commit - name: Checkout to commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml index a3dc81f3723..30be98ee25a 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Query Serving (Queries) +name: Query Serving (Queries) - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - get_previous_release: - if: always() - name: Get Previous Release - Query Serving (Queries) - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Queries) runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -60,11 +38,21 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -85,13 +73,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -111,8 +99,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -135,11 +123,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -159,7 +147,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -175,16 +163,6 @@ jobs: mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ - # Running a test with vtgate and vttablet using version n - - name: Run query serving tests (vtgate=N, vttablet=N) - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - rm -rf /tmp/vtdataroot - mkdir -p /tmp/vtdataroot - - source build.env - eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries - # Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n - name: Use last release's VTGate if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -213,6 +191,12 @@ jobs: rm -f $PWD/bin/vtgate $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld cp /tmp/vitess-build-current/bin/vtgate $PWD/bin/vtgate + + cp /tmp/vitess-build-other/bin/vtctld $PWD/bin + cp /tmp/vitess-build-other/bin/vtctldclient $PWD/bin + cp /tmp/vitess-build-other/bin/vtctl $PWD/bin + cp /tmp/vitess-build-other/bin/vtctlclient $PWD/bin + cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml index 923c766e377..91b2787a86a 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Query Serving (Queries) Next Release +name: Query Serving (Queries) Next Release - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - get_next_release: - if: always() - name: Get Latest Release - Query Serving (Queries) Next Release - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Queries) Next Release runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -48,6 +26,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -55,19 +45,15 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -88,13 +74,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,8 +100,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -138,11 +124,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -162,7 +148,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -178,16 +164,6 @@ jobs: mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ - # Running a test with vtgate and vttablet using version n - - name: Run query serving tests (vtgate=N, vttablet=N) - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - rm -rf /tmp/vtdataroot - mkdir -p /tmp/vtdataroot - - source build.env - eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_queries - # Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n - name: Use next release's VTGate if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml index 14c8afaf87f..6b38354c7ed 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Query Serving (Schema) +name: Query Serving (Schema) - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - get_previous_release: - if: always() - name: Get Previous Release - Query Serving (Schema) - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Schema) runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -60,11 +38,21 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -85,13 +73,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -111,8 +99,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -135,11 +123,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -159,7 +147,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -175,16 +163,6 @@ jobs: mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ - # Running a test with vtgate and vttablet using version n - - name: Run query serving tests (vtgate=N, vttablet=N) - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - rm -rf /tmp/vtdataroot - mkdir -p /tmp/vtdataroot - - source build.env - eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema - # Swap the binaries in the bin. Use vtgate version n-1 and keep vttablet at version n - name: Use last release's VTGate if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml index f22ece10010..605440b2f55 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Query Serving (Schema) Next Release +name: Query Serving (Schema) Next Release - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - get_next_release: - if: always() - name: Get Latest Release - Query Serving (Schema) Next Release - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Schema) Next Release runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -48,6 +26,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -55,19 +45,15 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -88,13 +74,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,8 +100,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -138,11 +124,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -162,7 +148,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -178,16 +164,6 @@ jobs: mkdir -p /tmp/vitess-build-current/ cp -R bin /tmp/vitess-build-current/ - # Running a test with vtgate and vttablet using version n - - name: Run query serving tests (vtgate=N, vttablet=N) - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - rm -rf /tmp/vtdataroot - mkdir -p /tmp/vtdataroot - - source build.env - eatmydata -- go run test.go -skip-build -keep-data=false -docker=false -print-log -follow -tag upgrade_downgrade_query_serving_schema - # Swap the binaries in the bin. Use vtgate version n+1 and keep vttablet at version n - name: Use next release's VTGate if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml index 82d6f267856..f1dd0384002 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Reparent New Vtctl +name: Reparent New Vtctl - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtctl, vttablet, etc) built on different versions. jobs: - get_next_release: - if: always() - name: Get Latest Release - Reparent New Vtctl - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent New Vtctl runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -48,6 +26,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -55,19 +45,15 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -88,13 +74,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,8 +100,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -138,11 +124,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -162,7 +148,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml index c5b6c964124..e83c6948bba 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Reparent New VTTablet +name: Reparent New VTTablet - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtctl, vttablet, etc) built on different versions. jobs: - get_next_release: - if: always() - name: Get Latest Release - Reparent New VTTablet - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent New VTTablet runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -48,6 +26,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -55,19 +45,15 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -88,13 +74,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -114,8 +100,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -138,11 +124,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -162,7 +148,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml index c4391efdef5..6c898d4bd16 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Reparent Old Vtctl +name: Reparent Old Vtctl - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtctl, vttablet, etc) built on different versions. jobs: - get_previous_release: - if: always() - name: Get Previous Release - Reparent Old Vtctl - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent Old Vtctl runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -60,11 +38,21 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -85,13 +73,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -111,8 +99,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -135,11 +123,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -159,7 +147,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml index f3ffcaa2d17..fe6a426f97a 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Reparent Old VTTablet +name: Reparent Old VTTablet - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtctl, vttablet, etc) built on different versions. jobs: - get_previous_release: - if: always() - name: Get Previous Release - Reparent Old VTTablet - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent Old VTTablet runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -60,11 +38,21 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -85,13 +73,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -111,8 +99,8 @@ jobs: sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql # Install mysql80 - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update @@ -135,11 +123,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -159,7 +147,7 @@ jobs: # Checkout to this build's commit - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get dependencies for this commit if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/vtadmin_web_build.yml b/.github/workflows/vtadmin_web_build.yml index 24ade4d9227..ccc920312c5 100644 --- a/.github/workflows/vtadmin_web_build.yml +++ b/.github/workflows/vtadmin_web_build.yml @@ -35,14 +35,14 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '18.16.0' + node-version: '20.12.2' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml index 055e1934fb0..e40d1174953 100644 --- a/.github/workflows/vtadmin_web_lint.yml +++ b/.github/workflows/vtadmin_web_lint.yml @@ -35,14 +35,14 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '18.16.0' + node-version: '20.12.2' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.github/workflows/vtadmin_web_unit_tests.yml b/.github/workflows/vtadmin_web_unit_tests.yml index 1efa474fde3..df1de751466 100644 --- a/.github/workflows/vtadmin_web_unit_tests.yml +++ b/.github/workflows/vtadmin_web_unit_tests.yml @@ -35,14 +35,14 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 if: steps.skip-workflow.outputs.skip-workflow == 'false' with: # node-version should match package.json - node-version: '18.16.0' + node-version: '20.12.2' - name: Install dependencies if: steps.skip-workflow.outputs.skip-workflow == 'false' diff --git a/.gitignore b/.gitignore index 881e89890cc..43f352d1b80 100644 --- a/.gitignore +++ b/.gitignore @@ -86,3 +86,7 @@ report # plan test output /go/vt/vtgate/planbuilder/testdata/plan_test* +/go/vt/vtgate/planbuilder/testdata/expected + +# mise files +.mise.toml diff --git a/.golangci.yml b/.golangci.yml index 9c674953a76..74c55100516 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -11,6 +11,7 @@ linters-settings: disable: # not supported when using Generics in 1.18 - nilness - unusedwrite + - loopclosure # fixed in go1.22 linters: disable-all: true @@ -26,7 +27,6 @@ linters: # Extras - gofmt - goimports - - exportloopref - bodyclose # revive is a replacement for golint, but we do not run it in CI for now. diff --git a/ADOPTERS.md b/ADOPTERS.md index a471983a06e..0fa9a3d841b 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -15,6 +15,7 @@ This is an alphabetical list of known adopters of Vitess. Some have already gone * [Pinterest](https://pinterest.com) * [Pixel Federation](https://pixelfederation.com) * [Quiz of Kings](https://quizofkings.com) +* [Shopify](https://www.shopify.com) * [Slack](https://slack.com) * [Square](https://square.com) * [Stitch Labs](https://stitchlabs.com) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 271010b8304..df12facb4da 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,3 +15,7 @@ to let everyone know what you're planning to work on, and to track progress and * How to make yourself familiar with Go and Vitess. * How to go through the GitHub workflow. * What to look for during code reviews. + +### Contributions Related to Spelling and Grammar + +At this time, we will not be accepting contributions that only fix spelling, naming or grammatical errors in documentation, code, comments or elsewhere, from accounts created in the last 365 days. We appreciate your interest in contributing to Vitess, and we encourage you to contribute in other ways. \ No newline at end of file diff --git a/GITHUB_SELF_HOSTED_RUNNERS.md b/GITHUB_SELF_HOSTED_RUNNERS.md deleted file mode 100644 index 47d0f223df9..00000000000 --- a/GITHUB_SELF_HOSTED_RUNNERS.md +++ /dev/null @@ -1,91 +0,0 @@ -## Setting up and using GitHub Self hosted runners - -### Adding a new self-hosted runner -Steps to follow to add a new self-hosted runner for GitHub. -You will need access to the Equinix account for Vitess's CI testing and Admin -access to Vitess. - -1. Spawn a new c3.small instance and name it on the Equinix dashboard -2. use ssh to connect to the server -3. Install docker on the server by running the following commands - 1. `curl -fsSL https://get.docker.com -o get-docker.sh` - 2. `sudo sh get-docker.sh` -4. Create a new user with a home directory for the action runner - 1. `useradd -m github-runner` -5. Add the user to the docker group so that it can use docker as well - 1. `sudo usermod -aG docker github-runner` -6. Switch to the newly created user - 1. `su github-runner` -7. Goto the home directory of the user and follow the steps in [Adding self hosted runners to repository](https://docs.github.com/en/actions/hosting-your-own-runners/adding-self-hosted-runners#adding-a-self-hosted-runner-to-a-repository) - 1. `mkdir github-runner- && cd github-runner-` - 2. `curl -o actions-runner-linux-x64-2.280.3.tar.gz -L https://github.com/actions/runner/releases/download/v2.280.3/actions-runner-linux-x64-2.280.3.tar.gz` - 3. `tar xzf ./actions-runner-linux-x64-2.280.3.tar.gz` - 4. `./config.sh --url https://github.com/vitessio/vitess --token --name github-runner-` - 5. With a screen execute `./run.sh` -8. Set up a cron job to remove docker volumes and images every other weekday - 1. `crontab -e` - 2. Within the file add a line `0 5 * * 1,3,5 docker system prune -f --volumes --all` -9. Vtorc, Cluster 14 and some other tests use multiple MySQL instances which are all brought up with asynchronous I/O setup in InnoDB. This sometimes leads to us hitting the Linux asynchronous I/O limit. -To fix this we increase the default limit on the self-hosted runners by - - 1. To set the aio-max-nr value, add the following line to the /etc/sysctl.conf file: - 1. `fs.aio-max-nr = 1048576` - 2. To activate the new setting, run the following command: - 1. `sysctl -p /etc/sysctl.conf` - -### Moving a test to a self-hosted runner -Most of the code for running the tests is generated code by `make generate_ci_workflows` which uses the file `ci_workflow_gen.go` - -To move a unit test from GitHub runners to self-hosted runners, just move the test from `unitTestDatabases` to `unitTestSelfHostedDatabases` in `ci_workflow_gen.go` and call `make generate_ci_workflows` - -To move a cluster test from GitHub runners to self-hosted runners, just move the test from `clusterList` to `clusterSelfHostedList` in `ci_workflow_gen.go` and call `make generate_ci_workflows` - -### Using a self-hosted runner to debug a flaky test -You will need access to the self-hosted runner machine to be able to connect to it via SSH. -1. From the output of the run on GitHub Actions, find the `Machine name` in the `Set up job` step -2. Find that machine on the Equinix dashboard and connect to it via ssh -3. From the output of the `Print Volume Used` step find the volume used -4. From the output of the `Build Docker Image` step find the docker image built for this workflow -5. On the machine run `docker run -d -v :/vt/vtdataroot /bin/bash -c "sleep 600000000000"` -6. On the terminal copy the docker id of the newly created container -7. Now execute `docker exec -it /bin/bash` to go into the container and use the `/vt/vtdataroot` directory to find the output of the run along with the debug files -8. Alternately, execute `docker cp :/vt/vtdataroot ./debugFiles/` to copy the files from the docker container to the servers local file system -9. You can browse the files there or go a step further and download them locally via `scp`. -10. Please remember to cleanup the folders created and remove the docker container via `docker stop `. - -## Single Self-Hosted runners -There is currently one self-hosted runner which only hosts a single runner. This allows us to run tests -that do not use docker on that runner. - -All that is needed to be done is to add `runs-on: single-self-hosted`, remove any code that downloads -dependencies (since they are already present on the self-hosted runner) and add a couple of lines to save -the vtdataroot output if needed. - -[9944](https://github.com/vitessio/vitess/pull/9944/) is an example PR that moves one of the tests to a single-self-hosted runner. - -**NOTE** - It is essential to ensure that all the binaries spawned while running the test be stopped even on failure. -Otherwise, they will keep on running until someone goes ahead and removes them manually. They might interfere -with the future runs as well. - -### Using a single-self-hosted runner to debug a flaky test -The logs will be stored in the `savedRuns` directory and can be copied locally via `scp`. - -A cronjob is already setup to empty the `savedRuns` directory every week so please download the runs -before they are deleted. - -## Running out of disk space in Self-hosted runners - -If the loads on the self-hosted runners increases due to multiple tests being moved to them or some other reason, -they sometimes end up running out of disk space. This causes the runner to stop working all together. - -In order to fix this issue follow the following steps - -1. `ssh` into the self-hosted runner by finding its address from the equinix dashboard. -2. Clear out the disk by running `docker system prune -f --volumes --all`. This is the same command that we run on a cron on the server. -3. Switch to the `github-runner` user - 1. `su github-runner` -4. Resume an existing `screen` - 1. `screen -r` -5. Start the runner again. - 1. `./run.sh` -6. Verify that the runner has started accepting jobs again. Detach the screen and close the `ssh` connection. - - diff --git a/MAINTAINERS.md b/MAINTAINERS.md index d75cecd956f..d94ed7652ad 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -3,9 +3,8 @@ This page lists all active maintainers and their areas of expertise. This can be The following is the full list, alphabetically ordered. * Andres Taylor ([systay](https://github.com/systay)) andres@planetscale.com -* Andrew Mason ([amason](https://github.com/ajm188)) andrew@planetscale.com +* Andrew Mason ([amason](https://github.com/ajm188)) amason@hey.com * Arthur Schreiber ([arthurschreiber](https://github.com/arthurschreiber)) arthurschreiber@github.com -* Dan Kozlowski ([dkhenry](https://github.com/dkhenry)) dan.kozlowski@gmail.com * Deepthi Sigireddi ([deepthi](https://github.com/deepthi)) deepthi@planetscale.com * Derek Perkins ([derekperkins](https://github.com/derekperkins)) derek@nozzle.io * Dirkjan Bussink ([dbussink](https://github.com/dbussink)) dbussink@planetscale.com @@ -16,6 +15,7 @@ The following is the full list, alphabetically ordered. * Matt Lord ([mattlord](https://github.com/mattlord)) mlord@planetscale.com * Rohit Nayak ([rohit-nayak-ps](https://github.com/rohit-nayak-ps)) rohit@planetscale.com * Shlomi Noach ([shlomi-noach](https://github.com/shlomi-noach)) shlomi@planetscale.com +* Tim Vaillancourt ([timvaillancourt](https://github.com/timvaillancourt)) tim@timvaillancourt.com * Vicent Marti ([vmg](https://github.com/vmg)) vmg@planetscale.com ## Areas of expertise @@ -68,6 +68,7 @@ We thank the following past maintainers for their contributions. * Alain Jobart ([alainjobart](https://github.com/alainjobart)) * Alkin Tezuysal ([askdba](https://github.com/askdba)) * Anthony Yeh ([enisoc](https://github.com/enisoc)) +* Dan Kozlowski ([dkhenry](https://github.com/dkhenry)) * David Weitzman ([dweitzman](https://github.com/dweitzman)) * Jon Tirsen ([tirsen](https://github.com/tirsen)) * Leo X. Lin ([leoxlin](https://github.com/leoxlin)) diff --git a/Makefile b/Makefile index 625a28baac0..93ba30a359f 100644 --- a/Makefile +++ b/Makefile @@ -139,7 +139,7 @@ endif install: build # binaries mkdir -p "$${PREFIX}/bin" - cp "$${VTROOTBIN}/"{mysqlctl,mysqlctld,vtorc,vtadmin,vtctld,vtctlclient,vtctldclient,vtgate,vttablet,vtbackup} "$${PREFIX}/bin/" + cp "$${VTROOTBIN}/"{mysqlctl,mysqlctld,vtorc,vtadmin,vtctl,vtctld,vtctlclient,vtctldclient,vtgate,vttablet,vtbackup,vtexplain} "$${PREFIX}/bin/" # Will only work inside the docker bootstrap for now cross-install: cross-build @@ -214,10 +214,14 @@ e2e_test: build go test $(VT_GO_PARALLEL) ./go/.../endtoend/... # Run the code coverage tools, compute aggregate. -# If you want to improve in a directory, run: -# go test -coverprofile=coverage.out && go tool cover -html=coverage.out -unit_test_cover: build - go test $(VT_GO_PARALLEL) -cover ./go/... | misc/parse_cover.py +unit_test_cover: build dependency_check demo + source build.env + go test $(VT_GO_PARALLEL) -count=1 -failfast -covermode=atomic -coverpkg=vitess.io/vitess/go/... -coverprofile=coverage.out ./go/... + # Handle go tool cover failures due to not handling `//line` directives, which + # the goyacc compiler adds to the generated parser in sql.go. See: + # https://github.com/golang/go/issues/41222 + sed -i'' -e '/^vitess.io\/vitess\/go\/vt\/sqlparser\/yaccpar/d' coverage.out + go tool $(VT_GO_PARALLEL) cover -html=coverage.out unit_test_race: build dependency_check tools/unit_test_race.sh @@ -253,7 +257,7 @@ PROTO_SRCS = $(wildcard proto/*.proto) PROTO_SRC_NAMES = $(basename $(notdir $(PROTO_SRCS))) PROTO_GO_OUTS = $(foreach name, $(PROTO_SRC_NAMES), go/vt/proto/$(name)/$(name).pb.go) # This rule rebuilds all the go files from the proto definitions for gRPC. -proto: $(PROTO_GO_OUTS) vtadmin_web_proto_types +proto: $(PROTO_GO_OUTS) vtadmin_web_proto_types vtctldclient ifndef NOBANNER echo $$(date): Compiling proto definitions @@ -278,7 +282,7 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto # This rule builds the bootstrap images for all flavors. DOCKER_IMAGES_FOR_TEST = mysql57 mysql80 percona57 percona80 DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST) -BOOTSTRAP_VERSION=24 +BOOTSTRAP_VERSION=32 ensure_bootstrap_version: find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \; sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go @@ -314,35 +318,12 @@ define build_docker_image fi endef -docker_base: - ${call build_docker_image,docker/base/Dockerfile,vitess/base} - -DOCKER_BASE_SUFFIX = mysql80 percona57 percona80 -DOCKER_BASE_TARGETS = $(addprefix docker_base_, $(DOCKER_BASE_SUFFIX)) -$(DOCKER_BASE_TARGETS): docker_base_%: - ${call build_docker_image,docker/base/Dockerfile.$*,vitess/base:$*} - -docker_base_all: docker_base $(DOCKER_BASE_TARGETS) - -DOCKER_MYSQL_VERSIONS = 8.0.30 8.0.34 -docker_mysql: - for i in $(DOCKER_MYSQL_VERSIONS); do echo "building vitess/mysql:$$i"; ${call build_docker_image,docker/mysql/Dockerfile.$$i,vitess/mysql:$$i} || exit 1; done - -docker_mysql_push: - for i in $(DOCKER_MYSQL_VERSIONS); do echo "pushing vitess/mysql:$$i"; docker push vitess/mysql:$$i || exit 1; done - docker_lite: ${call build_docker_image,docker/lite/Dockerfile,vitess/lite} -DOCKER_LITE_SUFFIX = mysql57 ubi7.mysql57 mysql80 ubi7.mysql80 percona57 ubi7.percona57 percona80 ubi7.percona80 testing ubi8.mysql80 ubi8.arm64.mysql80 -DOCKER_LITE_TARGETS = $(addprefix docker_lite_,$(DOCKER_LITE_SUFFIX)) -$(DOCKER_LITE_TARGETS): docker_lite_%: - ${call build_docker_image,docker/lite/Dockerfile.$*,vitess/lite:$*} - docker_lite_push: - for i in $(DOCKER_LITE_SUFFIX); do echo "pushing lite image: $$i"; docker push vitess/lite:$$i || exit 1; done - -docker_lite_all: docker_lite $(DOCKER_LITE_TARGETS) + echo "pushing lite image: latest" + docker push vitess/lite:latest docker_local: ${call build_docker_image,docker/local/Dockerfile,vitess/local} @@ -368,20 +349,6 @@ docker_test: docker_unit_test: go run test.go -flavor $(flavor) unit -# Release a version. -# This will generate a tar.gz file into the releases folder with the current source -release: docker_base - @if [ -z "$VERSION" ]; then \ - echo "Set the env var VERSION with the release version"; exit 1;\ - fi - mkdir -p releases - docker build -f docker/Dockerfile.release -t vitess/release . - docker run -v ${PWD}/releases:/vt/releases --env VERSION=$(VERSION) vitess/release - git tag -m Version\ $(VERSION) v$(VERSION) - echo "A git tag was created, you can push it with:" - echo "git push origin v$(VERSION)" - echo "Also, don't forget the upload releases/v$(VERSION).tar.gz file to GitHub releases" - create_release: ./tools/create_release.sh diff --git a/README.md b/README.md index 6f021141aca..adc8cd93c19 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ [![Maven Central](https://maven-badges.herokuapp.com/maven-central/io.vitess/vitess-jdbc/badge.svg)](https://maven-badges.herokuapp.com/maven-central/io.vitess/vitess-jdbc) -[![codebeat badge](https://codebeat.co/badges/51c9a056-1103-4522-9a9c-dc623821ea87)](https://codebeat.co/projects/github-com-youtube-vitess) +[![Coverage Status](https://codecov.io/gh/vitessio/vitess/branch/main/graph/badge.svg)](https://app.codecov.io/gh/vitessio/vitess/tree/main) [![Go Report Card](https://goreportcard.com/badge/vitess.io/vitess)](https://goreportcard.com/report/vitess.io/vitess) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fvitessio%2Fvitess.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fvitessio%2Fvitess?ref=badge_shield) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fvitess.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fvitess?ref=badge_shield&issueType=license) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1724/badge)](https://bestpractices.coreinfrastructure.org/projects/1724) # Vitess @@ -19,11 +19,10 @@ since 2011, and has grown to encompass tens of thousands of MySQL nodes. For more about Vitess, please visit [vitess.io](https://vitess.io). -Vitess has a growing community. You can view the list of adopters -[here](https://github.com/vitessio/vitess/blob/main/ADOPTERS.md). +Vitess has a growing community. [View the list of adopters](https://github.com/vitessio/vitess/blob/main/ADOPTERS.md). ## Reporting a Problem, Issue, or Bug -To report a problem, the best way to get attention is to create a GitHub [issue](.https://github.com/vitessio/vitess/issues ) using proper severity level based on this [guide](https://github.com/vitessio/vitess/blob/main/SEVERITY.md). +To report a problem, create a [GitHub issue](https://github.com/vitessio/vitess/issues). For topics that are better discussed live, please join the [Vitess Slack](https://vitess.io/slack) workspace. You may post any questions on the #general channel or join some of the special-interest channels. @@ -40,11 +39,11 @@ See [Security](SECURITY.md) for a full outline of the security process. ### Security Audit -A third party security audit was performed by Cure53. You can see the full report [here](doc/VIT-01-report.pdf). +A third party security audit was performed by ADA Logics. [Read the full report](doc/VIT-03-report-security-audit.pdf). ## License Unless otherwise noted, the Vitess source files are distributed under the Apache Version 2.0 license found in the LICENSE file. -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fvitessio%2Fvitess.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fvitessio%2Fvitess?ref=badge_large) +[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fvitess.svg?type=large&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fvitess?ref=badge_large&issueType=license) diff --git a/bootstrap.sh b/bootstrap.sh index d3a4943ad38..5d6840496d3 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -113,7 +113,7 @@ install_protoc() { esac # This is how we'd download directly from source: - "${VTROOT}/tools/wget-retry" https://github.com/protocolbuffers/protobuf/releases/download/v$version/protoc-$version-$platform-${target}.zip + "${VTROOT}/tools/wget-retry" -q https://github.com/protocolbuffers/protobuf/releases/download/v$version/protoc-$version-$platform-${target}.zip #"${VTROOT}/tools/wget-retry" "${VITESS_RESOURCES_DOWNLOAD_URL}/protoc-$version-$platform-${target}.zip" unzip "protoc-$version-$platform-${target}.zip" @@ -128,9 +128,9 @@ install_zookeeper() { zk="zookeeper-$version" # This is how we'd download directly from source: # wget "https://dlcdn.apache.org/zookeeper/$zk/apache-$zk.tar.gz" - "${VTROOT}/tools/wget-retry" "${VITESS_RESOURCES_DOWNLOAD_URL}/apache-${zk}.tar.gz" + "${VTROOT}/tools/wget-retry" -q "${VITESS_RESOURCES_DOWNLOAD_URL}/apache-${zk}.tar.gz" tar -xzf "$dist/apache-$zk.tar.gz" - mvn -f $dist/apache-$zk/zookeeper-contrib/zookeeper-contrib-fatjar/pom.xml clean install -P fatjar -DskipTests + mvn -q -f $dist/apache-$zk/zookeeper-contrib/zookeeper-contrib-fatjar/pom.xml clean install -P fatjar -DskipTests mkdir -p $dist/lib cp "$dist/apache-$zk/zookeeper-contrib/zookeeper-contrib-fatjar/target/$zk-fatjar.jar" "$dist/lib/$zk-fatjar.jar" rm -rf "$dist/apache-$zk" @@ -158,7 +158,7 @@ install_etcd() { file="etcd-${version}-${platform}-${target}.${ext}" # This is how we'd download directly from source: - "${VTROOT}/tools/wget-retry" "https://github.com/etcd-io/etcd/releases/download/$version/$file" + "${VTROOT}/tools/wget-retry" -q "https://github.com/etcd-io/etcd/releases/download/$version/$file" #"${VTROOT}/tools/wget-retry" "${VITESS_RESOURCES_DOWNLOAD_URL}/${file}" if [ "$ext" = "tar.gz" ]; then tar xzf "$file" @@ -191,7 +191,7 @@ install_consul() { # This is how we'd download directly from source: # download_url=https://releases.hashicorp.com/consul # wget "${download_url}/${version}/consul_${version}_${platform}_${target}.zip" - "${VTROOT}/tools/wget-retry" "${VITESS_RESOURCES_DOWNLOAD_URL}/consul_${version}_${platform}_${target}.zip" + "${VTROOT}/tools/wget-retry" -q "${VITESS_RESOURCES_DOWNLOAD_URL}/consul_${version}_${platform}_${target}.zip" unzip "consul_${version}_${platform}_${target}.zip" ln -snf "$dist/consul" "$VTROOT/bin/consul" } @@ -217,7 +217,7 @@ install_toxiproxy() { # This is how we'd download directly from source: file="toxiproxy-server-${platform}-${target}" - "${VTROOT}/tools/wget-retry" "https://github.com/Shopify/toxiproxy/releases/download/$version/$file" + "${VTROOT}/tools/wget-retry" -q "https://github.com/Shopify/toxiproxy/releases/download/$version/$file" chmod +x "$dist/$file" ln -snf "$dist/$file" "$VTROOT/bin/toxiproxy-server" } diff --git a/build.env b/build.env index 5986ee247b0..34da6721aa7 100755 --- a/build.env +++ b/build.env @@ -17,7 +17,7 @@ source ./tools/shell_functions.inc go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions." -goversion_min 1.21.3 || echo "Go version reported: `go version`. Version 1.21.3+ recommended. See https://vitess.io/contributing/build-from-source for install instructions." +goversion_min 1.22.3 || echo "Go version reported: `go version`. Version 1.22.3+ recommended. See https://vitess.io/contributing/build-from-source for install instructions." mkdir -p dist mkdir -p bin @@ -31,7 +31,7 @@ export PROTOC_VER=21.3 export ZK_VER=${ZK_VERSION:-3.8.0} export ETCD_VER=v3.5.6 export CONSUL_VER=1.11.4 -export TOXIPROXY_VER=v2.5.0 +export TOXIPROXY_VER=v2.7.0 mkdir -p "$VTDATAROOT" diff --git a/changelog/16.0/16.0.6/changelog.md b/changelog/16.0/16.0.6/changelog.md new file mode 100644 index 00000000000..959bf2bd570 --- /dev/null +++ b/changelog/16.0/16.0.6/changelog.md @@ -0,0 +1,43 @@ +# Changelog of Vitess v16.0.6 + +### Bug fixes +#### CLI + * [release-16.0] Fix anonymous paths in cobra code-gen (#14185) [#14236](https://github.com/vitessio/vitess/pull/14236) +#### Examples + * [release-16.0] examples: fix flag syntax for zkctl (#14469) [#14485](https://github.com/vitessio/vitess/pull/14485) +#### Online DDL + * [Release 16.0]: Online DDL: timeouts for all gRPC calls (#14182) [#14191](https://github.com/vitessio/vitess/pull/14191) + * [release-16.0] schemadiff: fix missing `DROP CONSTRAINT` in duplicate/redundant constraints scenario. (#14387) [#14389](https://github.com/vitessio/vitess/pull/14389) +#### Query Serving + * [release-16.0] Rewrite `USING` to `ON` condition for joins (#13931) [#13940](https://github.com/vitessio/vitess/pull/13940) + * [release-16.0] Make column resolution closer to MySQL (#14426) [#14428](https://github.com/vitessio/vitess/pull/14428) + * [release-16.0] vtgate/engine: Fix race condition in join logic (#14435) [#14439](https://github.com/vitessio/vitess/pull/14439) + * [release-16.0] Ensure hexval and int don't share BindVar after Normalization (#14451) [#14477](https://github.com/vitessio/vitess/pull/14477) +#### Throttler + * [release-16.0] Tablet throttler: fix race condition by removing goroutine call (#14179) [#14200](https://github.com/vitessio/vitess/pull/14200) +#### VReplication + * [release-16.0] VDiff: wait for shard streams of one table diff to complete for before starting that of the next table (#14345) [#14380](https://github.com/vitessio/vitess/pull/14380) +### CI/Build +#### General + * [release-16.0] Upgrade the Golang version to `go1.20.9` [#14194](https://github.com/vitessio/vitess/pull/14194) + * [release-16.0] Upgrade the Golang version to `go1.20.10` [#14228](https://github.com/vitessio/vitess/pull/14228) +#### Online DDL + * [release-16.0] OnlineDDL: reduce vrepl_stress workload in forks (#14302) [#14347](https://github.com/vitessio/vitess/pull/14347) +### Dependabot +#### General + * [release-16.0] Bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 (#14239) [#14251](https://github.com/vitessio/vitess/pull/14251) + * [release-16.0] Bump golang.org/x/net from 0.14.0 to 0.17.0 (#14260) [#14262](https://github.com/vitessio/vitess/pull/14262) + * [release-16.0] Bump google.golang.org/grpc from 1.55.0-dev to 1.59.0 (#14364) [#14496](https://github.com/vitessio/vitess/pull/14496) +#### VTAdmin + * [release-16.0] Bump postcss from 8.4.21 to 8.4.31 in /web/vtadmin (#14173) [#14256](https://github.com/vitessio/vitess/pull/14256) + * [release-16.0] Bump @babel/traverse from 7.21.4 to 7.23.2 in /web/vtadmin (#14304) [#14306](https://github.com/vitessio/vitess/pull/14306) +### Enhancement +#### Build/CI + * [release-16.0] Automatic approval of `vitess-bot` clean backports (#14352) [#14355](https://github.com/vitessio/vitess/pull/14355) +### Release +#### General + * Code freeze of release-16.0 [#14409](https://github.com/vitessio/vitess/pull/14409) +### Testing +#### Query Serving + * [release-16.0] vtgate: Allow additional errors in warnings test (#14461) [#14463](https://github.com/vitessio/vitess/pull/14463) + diff --git a/changelog/16.0/16.0.6/release_notes.md b/changelog/16.0/16.0.6/release_notes.md new file mode 100644 index 00000000000..881ed26b348 --- /dev/null +++ b/changelog/16.0/16.0.6/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v16.0.6 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.6/changelog.md). + +The release includes 21 merged Pull Requests. + +Thanks to all our contributors: @app/github-actions, @app/vitess-bot, @harshit-gangal, @shlomi-noach + diff --git a/changelog/16.0/16.0.7/changelog.md b/changelog/16.0/16.0.7/changelog.md new file mode 100644 index 00000000000..d4565e2f9ba --- /dev/null +++ b/changelog/16.0/16.0.7/changelog.md @@ -0,0 +1,42 @@ +# Changelog of Vitess v16.0.7 + +### Bug fixes +#### Build/CI + * [release-16.0] Update create_release.sh (#14492) [#14514](https://github.com/vitessio/vitess/pull/14514) +#### Cluster management + * [release-16.0] Fix Panic in PRS due to a missing nil check (#14656) [#14674](https://github.com/vitessio/vitess/pull/14674) +#### Query Serving + * [release-16.0] expression rewriting: enable more rewrites and limit CNF rewrites (#14560) [#14574](https://github.com/vitessio/vitess/pull/14574) + * [release-16.0] fix concurrency on stream execute engine primitives (#14586) [#14590](https://github.com/vitessio/vitess/pull/14590) + * [16.0] bug fix: stop all kinds of expressions from cnf-exploding [#14595](https://github.com/vitessio/vitess/pull/14595) + * [release-16.0] tabletserver: do not consolidate streams on primary tablet when consolidator mode is `notOnPrimary` (#14332) [#14683](https://github.com/vitessio/vitess/pull/14683) +#### VReplication + * Revert "[release-16.0] Replace use of `WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` with `WAIT_FOR_EXECUTED_GTID_SET` (#14612)" [#14743](https://github.com/vitessio/vitess/pull/14743) + * [release-16.0] VReplication: Update singular workflow in traffic switcher (#14826) [#14827](https://github.com/vitessio/vitess/pull/14827) +### CI/Build +#### Build/CI + * [release-16.0] Update MySQL apt package and GPG signature (#14785) [#14790](https://github.com/vitessio/vitess/pull/14790) +#### Docker + * [release-16.0] Build and push Docker Images from GitHub Actions [#14513](https://github.com/vitessio/vitess/pull/14513) +#### General + * [release-16.0] Upgrade the Golang version to `go1.20.12` [#14691](https://github.com/vitessio/vitess/pull/14691) +### Dependabot +#### General + * [release-16.0] build(deps): bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#14814) [#14818](https://github.com/vitessio/vitess/pull/14818) +### Enhancement +#### Build/CI + * [release-16.0] Add step to static check to ensure consistency of GHA workflows (#14724) [#14725](https://github.com/vitessio/vitess/pull/14725) +### Internal Cleanup +#### TabletManager + * [release-16.0] Replace use of `WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` with `WAIT_FOR_EXECUTED_GTID_SET` (#14612) [#14620](https://github.com/vitessio/vitess/pull/14620) +### Performance +#### Query Serving + * [release-16.0] vindexes: fix pooled collator buffer memory leak (#14621) [#14622](https://github.com/vitessio/vitess/pull/14622) +### Release +#### General + * [release-16.0] Code Freeze for `v16.0.7` [#14808](https://github.com/vitessio/vitess/pull/14808) +### Testing +#### Backup and Restore + * [release-16.0] Add a retry to remove the vttablet directory during upgrade/downgrade backup tests (#14753) [#14756](https://github.com/vitessio/vitess/pull/14756) + * [release-16.0] Backup flaky test [#14819](https://github.com/vitessio/vitess/pull/14819) + diff --git a/changelog/16.0/16.0.7/release_notes.md b/changelog/16.0/16.0.7/release_notes.md new file mode 100644 index 00000000000..4a2b5703d9d --- /dev/null +++ b/changelog/16.0/16.0.7/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v16.0.7 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.7/changelog.md). + +The release includes 18 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/github-actions, @app/vitess-bot, @deepthi, @frouioui, @harshit-gangal, @maxenglander, @shlomi-noach, @systay + diff --git a/changelog/16.0/README.md b/changelog/16.0/README.md index d45a817ad48..86fd6e15961 100644 --- a/changelog/16.0/README.md +++ b/changelog/16.0/README.md @@ -1,5 +1,13 @@ ## v16.0 The dedicated team for this release can be found [here](team.md). +* **[16.0.7](16.0.7)** + * [Changelog](16.0.7/changelog.md) + * [Release Notes](16.0.7/release_notes.md) + +* **[16.0.6](16.0.6)** + * [Changelog](16.0.6/changelog.md) + * [Release Notes](16.0.6/release_notes.md) + * **[16.0.5](16.0.5)** * [Changelog](16.0.5/changelog.md) * [Release Notes](16.0.5/release_notes.md) diff --git a/changelog/17.0/17.0.0/release_notes.md b/changelog/17.0/17.0.0/release_notes.md index eff847c3c19..9900ded0e3c 100644 --- a/changelog/17.0/17.0.0/release_notes.md +++ b/changelog/17.0/17.0.0/release_notes.md @@ -7,7 +7,6 @@ - [Schema-initialization stuck on semi-sync ACKs while upgrading to v17.0.0](#schema-init-upgrade) - **[Major Changes](#major-changes)** - **[Breaking Changes](#breaking-changes)** - - [VTTablet: Initializing all replicas with super_read_only](#vttablet-initialization) - [Default Local Cell Preference for TabletPicker](#tablet-picker-cell-preference) - [Dedicated stats for VTGate Prepare operations](#dedicated-vtgate-prepare-stats) - [VTAdmin web migrated from create-react-app to vite](#migrated-vtadmin) diff --git a/changelog/17.0/17.0.0/summary.md b/changelog/17.0/17.0.0/summary.md index 0c258bb8868..92ac2897463 100644 --- a/changelog/17.0/17.0.0/summary.md +++ b/changelog/17.0/17.0.0/summary.md @@ -6,7 +6,6 @@ - [Schema-initialization stuck on semi-sync ACKs while upgrading to v17.0.0](#schema-init-upgrade) - **[Major Changes](#major-changes)** - **[Breaking Changes](#breaking-changes)** - - [VTTablet: Initializing all replicas with super_read_only](#vttablet-initialization) - [Default Local Cell Preference for TabletPicker](#tablet-picker-cell-preference) - [Dedicated stats for VTGate Prepare operations](#dedicated-vtgate-prepare-stats) - [VTAdmin web migrated from create-react-app to vite](#migrated-vtadmin) diff --git a/changelog/17.0/17.0.4/changelog.md b/changelog/17.0/17.0.4/changelog.md new file mode 100644 index 00000000000..3aba7b735f9 --- /dev/null +++ b/changelog/17.0/17.0.4/changelog.md @@ -0,0 +1,48 @@ +# Changelog of Vitess v17.0.4 + +### Bug fixes +#### CLI + * [release-17.0] Fix anonymous paths in cobra code-gen (#14185) [#14237](https://github.com/vitessio/vitess/pull/14237) +#### Evalengine + * [release-17.0] evalengine: Misc bugs (#14351) [#14353](https://github.com/vitessio/vitess/pull/14353) +#### Examples + * [release-17.0] examples: fix flag syntax for zkctl (#14469) [#14486](https://github.com/vitessio/vitess/pull/14486) +#### General + * [release-17.0] viper: register dynamic config with both disk and live (#14453) [#14454](https://github.com/vitessio/vitess/pull/14454) +#### Online DDL + * [Release 17.0]: Online DDL: timeouts for all gRPC calls (#14182) [#14190](https://github.com/vitessio/vitess/pull/14190) + * [release-17.0] schemadiff: fix missing `DROP CONSTRAINT` in duplicate/redundant constraints scenario. (#14387) [#14390](https://github.com/vitessio/vitess/pull/14390) +#### Query Serving + * [release-17.0] Make column resolution closer to MySQL (#14426) [#14429](https://github.com/vitessio/vitess/pull/14429) + * [release-17.0] vtgate/engine: Fix race condition in join logic (#14435) [#14440](https://github.com/vitessio/vitess/pull/14440) + * [release-17.0] Ensure hexval and int don't share BindVar after Normalization (#14451) [#14478](https://github.com/vitessio/vitess/pull/14478) +#### Throttler + * [release-17.0] Tablet throttler: fix race condition by removing goroutine call (#14179) [#14199](https://github.com/vitessio/vitess/pull/14199) +#### VReplication + * [release-17.0] VDiff: wait for shard streams of one table diff to complete for before starting that of the next table (#14345) [#14381](https://github.com/vitessio/vitess/pull/14381) +### CI/Build +#### General + * [release-17.0] Upgrade the Golang version to `go1.20.9` [#14196](https://github.com/vitessio/vitess/pull/14196) + * [release-17.0] Upgrade the Golang version to `go1.20.10` [#14229](https://github.com/vitessio/vitess/pull/14229) +#### Online DDL + * [release-17.0] OnlineDDL: reduce vrepl_stress workload in forks (#14302) [#14348](https://github.com/vitessio/vitess/pull/14348) +### Dependabot +#### General + * [release-17.0] Bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 (#14239) [#14252](https://github.com/vitessio/vitess/pull/14252) + * [release-17.0] Bump golang.org/x/net from 0.14.0 to 0.17.0 (#14260) [#14263](https://github.com/vitessio/vitess/pull/14263) + * [release-17.0] Bump google.golang.org/grpc from 1.55.0-dev to 1.59.0 (#14364) [#14497](https://github.com/vitessio/vitess/pull/14497) +#### VTAdmin + * [release-17.0] Bump postcss from 8.4.21 to 8.4.31 in /web/vtadmin (#14173) [#14257](https://github.com/vitessio/vitess/pull/14257) + * [release-17.0] Bump @babel/traverse from 7.21.4 to 7.23.2 in /web/vtadmin (#14304) [#14307](https://github.com/vitessio/vitess/pull/14307) +### Enhancement +#### Build/CI + * [release-17.0] Automatic approval of `vitess-bot` clean backports (#14352) [#14356](https://github.com/vitessio/vitess/pull/14356) +### Release +#### General + * Code freeze of release-17.0 [#14407](https://github.com/vitessio/vitess/pull/14407) +### Testing +#### Cluster management + * Fix Upgrade downgrade reparent tests in release-17.0 [#14507](https://github.com/vitessio/vitess/pull/14507) +#### Query Serving + * [release-17.0] vtgate: Allow more errors for the warning check (#14421) [#14422](https://github.com/vitessio/vitess/pull/14422) + diff --git a/changelog/17.0/17.0.4/release_notes.md b/changelog/17.0/17.0.4/release_notes.md new file mode 100644 index 00000000000..30d3c9274e9 --- /dev/null +++ b/changelog/17.0/17.0.4/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v17.0.4 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/17.0/17.0.4/changelog.md). + +The release includes 23 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/github-actions, @app/vitess-bot, @mattlord, @shlomi-noach + diff --git a/changelog/17.0/17.0.5/changelog.md b/changelog/17.0/17.0.5/changelog.md new file mode 100644 index 00000000000..91078d04d7e --- /dev/null +++ b/changelog/17.0/17.0.5/changelog.md @@ -0,0 +1,49 @@ +# Changelog of Vitess v17.0.5 + +### Bug fixes +#### Build/CI + * [release-17.0] Update create_release.sh (#14492) [#14515](https://github.com/vitessio/vitess/pull/14515) +#### Cluster management + * [release-17.0] Fix Panic in PRS due to a missing nil check (#14656) [#14675](https://github.com/vitessio/vitess/pull/14675) + * Revert "[release-17.0] Replace use of `WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` with `WAIT_FOR_EXECUTED_GTID_SET` (#14612)" [#14744](https://github.com/vitessio/vitess/pull/14744) +#### Evalengine + * [release-17.0] Fix nullability checks in evalengine (#14556) [#14563](https://github.com/vitessio/vitess/pull/14563) +#### Query Serving + * [release-17.0] expression rewriting: enable more rewrites and limit CNF rewrites (#14560) [#14575](https://github.com/vitessio/vitess/pull/14575) + * [release-17.0] fix concurrency on stream execute engine primitives (#14586) [#14591](https://github.com/vitessio/vitess/pull/14591) + * [17.0] bug fix: stop all kinds of expressions from cnf-exploding [#14594](https://github.com/vitessio/vitess/pull/14594) + * [release-17.0] tabletserver: do not consolidate streams on primary tablet when consolidator mode is `notOnPrimary` (#14332) [#14678](https://github.com/vitessio/vitess/pull/14678) + * Fix accepting bind variables in time related function calls. [#14763](https://github.com/vitessio/vitess/pull/14763) +#### VReplication + * [release-17.0] VReplication: Update singular workflow in traffic switcher (#14826) [#14828](https://github.com/vitessio/vitess/pull/14828) +### CI/Build +#### Build/CI + * [release-17.0] Update MySQL apt package and GPG signature (#14785) [#14791](https://github.com/vitessio/vitess/pull/14791) +#### Docker + * [release-17.0] Build and push Docker Images from GitHub Actions [#14512](https://github.com/vitessio/vitess/pull/14512) +#### General + * [release-17.0] Upgrade the Golang version to `go1.20.11` [#14489](https://github.com/vitessio/vitess/pull/14489) + * [release-17.0] Upgrade the Golang version to `go1.20.12` [#14692](https://github.com/vitessio/vitess/pull/14692) +### Dependabot +#### General + * [release-17.0] build(deps): bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#14814) [#14816](https://github.com/vitessio/vitess/pull/14816) +#### VTAdmin + * [release-17.0] Bump @adobe/css-tools from 4.3.1 to 4.3.2 in /web/vtadmin (#14654) [#14667](https://github.com/vitessio/vitess/pull/14667) +### Enhancement +#### Build/CI + * [release-17.0] Add step to static check to ensure consistency of GHA workflows (#14724) [#14726](https://github.com/vitessio/vitess/pull/14726) +### Internal Cleanup +#### TabletManager + * [release-17.0] Replace use of `WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` with `WAIT_FOR_EXECUTED_GTID_SET` (#14612) [#14619](https://github.com/vitessio/vitess/pull/14619) +#### vtctldclient + * [release-17.0] Fix typo for `--cells` flag help description in `ApplyRoutingRules` (#14721) [#14722](https://github.com/vitessio/vitess/pull/14722) +### Performance +#### Query Serving + * [release-17.0] vindexes: fix pooled collator buffer memory leak (#14621) [#14623](https://github.com/vitessio/vitess/pull/14623) +### Release +#### General + * [release-17.0] Code Freeze for `v17.0.5` [#14806](https://github.com/vitessio/vitess/pull/14806) +### Testing +#### Backup and Restore + * [release-17.0] Add a retry to remove the vttablet directory during upgrade/downgrade backup tests (#14753) [#14757](https://github.com/vitessio/vitess/pull/14757) + diff --git a/changelog/17.0/17.0.5/release_notes.md b/changelog/17.0/17.0.5/release_notes.md new file mode 100644 index 00000000000..5f032a06fb5 --- /dev/null +++ b/changelog/17.0/17.0.5/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v17.0.5 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/17.0/17.0.5/changelog.md). + +The release includes 22 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/github-actions, @app/vitess-bot, @deepthi, @frouioui, @harshit-gangal, @shlomi-noach, @systay + diff --git a/changelog/17.0/17.0.6/changelog.md b/changelog/17.0/17.0.6/changelog.md new file mode 100644 index 00000000000..74d8a28a65f --- /dev/null +++ b/changelog/17.0/17.0.6/changelog.md @@ -0,0 +1,49 @@ +# Changelog of Vitess v17.0.6 + +### Bug fixes +#### Evalengine + * [release-17.0] evalengine: Fix week overflow (#14859) [#14860](https://github.com/vitessio/vitess/pull/14860) + * [release-17.0] evalengine: Return evalTemporal types for current date / time (#15079) [#15083](https://github.com/vitessio/vitess/pull/15083) +#### General + * [release-17.0] Protect `ExecuteFetchAsDBA` against multi-statements, excluding a sequence of `CREATE TABLE|VIEW`. (#14954) [#14983](https://github.com/vitessio/vitess/pull/14983) +#### Online DDL + * [release-17.0] VReplication/OnlineDDL: reordering enum values (#15103) [#15350](https://github.com/vitessio/vitess/pull/15350) +#### Query Serving + * [release-17]: Vindexes: Pass context in consistent lookup handleDup (#14653) [#14912](https://github.com/vitessio/vitess/pull/14912) + * [release-17.0] evalengine bugfix: handle nil evals correctly when coercing values (#14906) [#14913](https://github.com/vitessio/vitess/pull/14913) + * [release-17.0] Fix Go routine leaks in streaming calls (#15293) [#15299](https://github.com/vitessio/vitess/pull/15299) + * [release-17.0] SHOW VITESS_REPLICATION_STATUS: Only use replication tracker when it's enabled (#15348) [#15360](https://github.com/vitessio/vitess/pull/15360) +#### Throttler + * [release-17.0] examples: rm heartbeat flags (#14980) [#14998](https://github.com/vitessio/vitess/pull/14998) +#### vtexplain + * [release-17.0] vtexplain: Fix setting up the column information (#15275) [#15280](https://github.com/vitessio/vitess/pull/15280) + * [release-17.0] vtexplain: Ensure memory topo is set up for throttler (#15279) [#15283](https://github.com/vitessio/vitess/pull/15283) +#### vttestserver + * [release-17.0] use proper mysql version in the `vttestserver` images (#15235) [#15237](https://github.com/vitessio/vitess/pull/15237) +### CI/Build +#### Build/CI + * [release-17.0] Fix relevant files listing for `endtoend` CI (#15104) [#15109](https://github.com/vitessio/vitess/pull/15109) + * [release-17.0] Remove concurrency group for check labels workflow (#15197) [#15207](https://github.com/vitessio/vitess/pull/15207) + * [release-17.0] Update all actions setup to latest versions (#15443) [#15444](https://github.com/vitessio/vitess/pull/15444) +### Dependabot +#### General + * [release-17.0] Bumps deps and use proper Go version in upgrade tests [#15408](https://github.com/vitessio/vitess/pull/15408) +#### Java + * [release-17.0] build(deps): bump io.netty:netty-handler from 4.1.93.Final to 4.1.94.Final in /java (#14863) [#14880](https://github.com/vitessio/vitess/pull/14880) +### Documentation +#### Documentation + * [release-17.0] 17.0.6 release notes: ExecuteFetchAsDBA breaking change [#15011](https://github.com/vitessio/vitess/pull/15011) +### Enhancement +#### Build/CI + * [release-17.0] Update paths filter action (#15254) [#15262](https://github.com/vitessio/vitess/pull/15262) +### Regression +#### Throttler + * [release-17.0] Enable 'heartbeat_on_demand_duration' in local/examples (#15204) [#15290](https://github.com/vitessio/vitess/pull/15290) +### Release +### Testing +#### Build/CI + * [release-17.0] Bump upgrade tests to `go1.21.7` [#15160](https://github.com/vitessio/vitess/pull/15160) + * [release-17.0] CI: Address data races on memorytopo Conn.closed (#15365) [#15369](https://github.com/vitessio/vitess/pull/15369) +#### Query Serving + * [release-17.0] Refactor Upgrade downgrade tests (#14782) [#14831](https://github.com/vitessio/vitess/pull/14831) + diff --git a/changelog/17.0/17.0.6/release_notes.md b/changelog/17.0/17.0.6/release_notes.md new file mode 100644 index 00000000000..fd8b9de4a5d --- /dev/null +++ b/changelog/17.0/17.0.6/release_notes.md @@ -0,0 +1,34 @@ +# Release of Vitess v17.0.6 +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [ExecuteFetchAsDBA rejects multi-statement SQL](#execute-fetch-as-dba-reject-multi) + +## Major Changes + +### Breaking Changes + +#### ExecuteFetchAsDBA rejects multi-statement SQL + +`vtctldclient ExecuteFetchAsDBA` (and similarly the `vtctl` and `vtctlclient` commands) now reject multi-statement SQL with error. + +For example, `vtctldclient ExecuteFetchAsDBA my-tablet "stop replica; change replication source to auto_position=1; start replica` will return an error, without attempting to execute any of these queries. + +Previously, `ExecuteFetchAsDBA` silently accepted multi statement SQL. It would (attempt to) execute all of them, but: + +- It would only indicate error for the first statement. Errors on 2nd, 3rd, ... statements were silently ignored. +- It would not consume the result sets of the 2nd, 3rd, ... statements. It would then return the used connection to the pool in a dirty state. Any further query that happens to take that connection out of the pool could get unexpected results. +- As another side effect, multi-statement schema changes would cause schema to be reloaded with only the first change, leaving the cached schema inconsistent with the underlying database. + +`ExecuteFetchAsDBA` does allow a specific use case of multi-statement SQL, which is where all statements are in the form of `CREATE TABLE` or `CREATE VIEW`. This is to support a common pattern of schema initialization. + +------------ +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/17.0/17.0.6/changelog.md). + +The release includes 24 merged Pull Requests. + +Thanks to all our contributors: @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay + diff --git a/changelog/17.0/17.0.6/summary.md b/changelog/17.0/17.0.6/summary.md new file mode 100644 index 00000000000..947aabd3f68 --- /dev/null +++ b/changelog/17.0/17.0.6/summary.md @@ -0,0 +1,25 @@ +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [ExecuteFetchAsDBA rejects multi-statement SQL](#execute-fetch-as-dba-reject-multi) + +## Major Changes + +### Breaking Changes + +#### ExecuteFetchAsDBA rejects multi-statement SQL + +`vtctldclient ExecuteFetchAsDBA` (and similarly the `vtctl` and `vtctlclient` commands) now reject multi-statement SQL with error. + +For example, `vtctldclient ExecuteFetchAsDBA my-tablet "stop replica; change replication source to auto_position=1; start replica` will return an error, without attempting to execute any of these queries. + +Previously, `ExecuteFetchAsDBA` silently accepted multi statement SQL. It would (attempt to) execute all of them, but: + +- It would only indicate error for the first statement. Errors on 2nd, 3rd, ... statements were silently ignored. +- It would not consume the result sets of the 2nd, 3rd, ... statements. It would then return the used connection to the pool in a dirty state. Any further query that happens to take that connection out of the pool could get unexpected results. +- As another side effect, multi-statement schema changes would cause schema to be reloaded with only the first change, leaving the cached schema inconsistent with the underlying database. + +`ExecuteFetchAsDBA` does allow a specific use case of multi-statement SQL, which is where all statements are in the form of `CREATE TABLE` or `CREATE VIEW`. This is to support a common pattern of schema initialization. diff --git a/changelog/17.0/17.0.7/changelog.md b/changelog/17.0/17.0.7/changelog.md new file mode 100644 index 00000000000..6741780d2e3 --- /dev/null +++ b/changelog/17.0/17.0.7/changelog.md @@ -0,0 +1,29 @@ +# Changelog of Vitess v17.0.7 + +### Bug fixes +#### Query Serving + * [release-17.0] TxThrottler: dont throttle unless lag (#14789) [#15189](https://github.com/vitessio/vitess/pull/15189) + * [release-17.0] Fix aliasing in routes that have a derived table (#15550) [#15552](https://github.com/vitessio/vitess/pull/15552) + * [release-17.0] fix: don't forget DISTINCT for derived tables (#15672) [#15676](https://github.com/vitessio/vitess/pull/15676) + * [release-17.0] Fix wrong assignment to `sql_id_opt` in the parser (#15862) [#15867](https://github.com/vitessio/vitess/pull/15867) +#### Topology + * [release-17.0] Fix ZooKeeper Topology connection locks not being cleaned up correctly (#15757) [#15762](https://github.com/vitessio/vitess/pull/15762) +#### VReplication + * [release-17.0] VReplication: Take replication lag into account in VStreamManager healthcheck result processing (#15761) [#15772](https://github.com/vitessio/vitess/pull/15772) +#### VTAdmin + * [release-17.0] [VTAdmin API] Fix schema cache flag, add documentation (#15704) [#15718](https://github.com/vitessio/vitess/pull/15718) +### CI/Build +#### Build/CI + * [release-17.0] Update to latest CodeQL (#15530) [#15532](https://github.com/vitessio/vitess/pull/15532) + * [release-17.0] Upgrade go version in upgrade tests to `go1.21.9` [#15640](https://github.com/vitessio/vitess/pull/15640) +#### General + * [release-17.0] Upgrade Golang from `v1.20.13` to `v1.21.9` [#15669](https://github.com/vitessio/vitess/pull/15669) + * [release-17.0] Upgrade the Golang version to `go1.21.10` [#15863](https://github.com/vitessio/vitess/pull/15863) +### Performance +#### VTTablet + * [release-17.0] Improve performance for `BaseShowTablesWithSizes` query. (#15713) [#15792](https://github.com/vitessio/vitess/pull/15792) +### Release +#### General + * [release-17.0] Bump to `v17.0.7-SNAPSHOT` after the `v17.0.6` release [#15487](https://github.com/vitessio/vitess/pull/15487) + * [release-17.0] Code Freeze for `v17.0.7` [#15878](https://github.com/vitessio/vitess/pull/15878) + diff --git a/changelog/17.0/17.0.7/release_notes.md b/changelog/17.0/17.0.7/release_notes.md new file mode 100644 index 00000000000..4d4bd64176a --- /dev/null +++ b/changelog/17.0/17.0.7/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v17.0.7 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/17.0/17.0.7/changelog.md). + +The release includes 14 merged Pull Requests. + +Thanks to all our contributors: @app/vitess-bot, @frouioui, @systay, @vitess-bot + diff --git a/changelog/17.0/README.md b/changelog/17.0/README.md index dcbba316bdd..b58ad9a55d4 100644 --- a/changelog/17.0/README.md +++ b/changelog/17.0/README.md @@ -1,4 +1,20 @@ ## v17.0 +* **[17.0.7](17.0.7)** + * [Changelog](17.0.7/changelog.md) + * [Release Notes](17.0.7/release_notes.md) + +* **[17.0.6](17.0.6)** + * [Changelog](17.0.6/changelog.md) + * [Release Notes](17.0.6/release_notes.md) + +* **[17.0.5](17.0.5)** + * [Changelog](17.0.5/changelog.md) + * [Release Notes](17.0.5/release_notes.md) + +* **[17.0.4](17.0.4)** + * [Changelog](17.0.4/changelog.md) + * [Release Notes](17.0.4/release_notes.md) + * **[17.0.3](17.0.3)** * [Changelog](17.0.3/changelog.md) * [Release Notes](17.0.3/release_notes.md) diff --git a/changelog/18.0/18.0.0/summary.md b/changelog/18.0/18.0.0/summary.md index eb2b6692201..35ad018b7bc 100644 --- a/changelog/18.0/18.0.0/summary.md +++ b/changelog/18.0/18.0.0/summary.md @@ -95,6 +95,8 @@ There are 3 foreign key modes now supported in Vitess - 3. `disallow` - In this mode Vitess explicitly disallows any DDL statements that try to create a foreign key constraint. This mode is equivalent to running VTGate with the flag `--foreign_key_mode=disallow`. +In addition to query support, there is a new flag to `MoveTables` called `--atomic-copy` which should be used to import data into Vitess from databases which have foreign keys defined in the schema. + #### Upgrade process After upgrading from v17 to v18, users should specify the correct foreign key mode for all their keyspaces in the VSchema using the new property. diff --git a/changelog/18.0/18.0.1/changelog.md b/changelog/18.0/18.0.1/changelog.md new file mode 100644 index 00000000000..efae252075f --- /dev/null +++ b/changelog/18.0/18.0.1/changelog.md @@ -0,0 +1,40 @@ +# Changelog of Vitess v18.0.1 + +### Bug fixes +#### Backup and Restore + * [release 18.0]: `ReadBinlogFilesTimestamps` backwards compatibility [#14526](https://github.com/vitessio/vitess/pull/14526) +#### Build/CI + * [release-18.0] Update create_release.sh (#14492) [#14516](https://github.com/vitessio/vitess/pull/14516) +#### Evalengine + * [release-18.0] Fix nullability checks in evalengine (#14556) [#14564](https://github.com/vitessio/vitess/pull/14564) +#### Examples + * [release-18.0] examples: fix flag syntax for zkctl (#14469) [#14487](https://github.com/vitessio/vitess/pull/14487) +#### Observability + * [release-18.0] Fix #14414: resilient_server metrics name/prefix logic is inverted, leading to no metrics being recorded (#14415) [#14527](https://github.com/vitessio/vitess/pull/14527) +#### Query Serving + * [release-18.0] Make column resolution closer to MySQL (#14426) [#14430](https://github.com/vitessio/vitess/pull/14430) + * [release-18.0] Bug fix: Use target tablet from health stats cache when checking replication status (#14436) [#14456](https://github.com/vitessio/vitess/pull/14456) + * [release-18.0] Ensure hexval and int don't share BindVar after Normalization (#14451) [#14479](https://github.com/vitessio/vitess/pull/14479) + * [release-18.0] planbuilder bugfix: expose columns through derived tables (#14501) [#14504](https://github.com/vitessio/vitess/pull/14504) + * [release-18.0] expression rewriting: enable more rewrites and limit CNF rewrites (#14560) [#14576](https://github.com/vitessio/vitess/pull/14576) +#### vtctldclient + * [release-18.0] vtctldclient: Apply tablet type filtering for keyspace+shard in GetTablets (#14467) [#14470](https://github.com/vitessio/vitess/pull/14470) +### CI/Build +#### Docker + * [release-18.0] Build and push Docker Images from GitHub Actions [#14511](https://github.com/vitessio/vitess/pull/14511) +### Dependabot +#### General + * [release-18.0] Bump google.golang.org/grpc from 1.55.0-dev to 1.59.0 (#14364) [#14498](https://github.com/vitessio/vitess/pull/14498) +### Documentation +#### Documentation + * [release-18.0] release notes: add FK import to summary (#14518) [#14519](https://github.com/vitessio/vitess/pull/14519) +### Internal Cleanup +#### Query Serving + * [release-18.0] Remove excessive VTGate logging of default planner selection (#14554) [#14561](https://github.com/vitessio/vitess/pull/14561) +### Release +#### General + * [release-18.0] Code Freeze for `v18.0.1` [#14549](https://github.com/vitessio/vitess/pull/14549) +### Testing +#### Query Serving + * [release-18.0] vtgate: Allow additional errors in warnings test (#14461) [#14465](https://github.com/vitessio/vitess/pull/14465) + diff --git a/changelog/18.0/18.0.1/release_notes.md b/changelog/18.0/18.0.1/release_notes.md new file mode 100644 index 00000000000..f6f07d6e652 --- /dev/null +++ b/changelog/18.0/18.0.1/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v18.0.1 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/18.0/18.0.1/changelog.md). + +The release includes 17 merged Pull Requests. + +Thanks to all our contributors: @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach + diff --git a/changelog/18.0/18.0.2/changelog.md b/changelog/18.0/18.0.2/changelog.md new file mode 100644 index 00000000000..c7866efd06a --- /dev/null +++ b/changelog/18.0/18.0.2/changelog.md @@ -0,0 +1,56 @@ +# Changelog of Vitess v18.0.2 + +### Bug fixes +#### Cluster management + * [release-18.0] Fix Panic in PRS due to a missing nil check (#14656) [#14676](https://github.com/vitessio/vitess/pull/14676) + * Revert "[release-18.0] Replace use of `WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` with `WAIT_FOR_EXECUTED_GTID_SET` (#14612)" [#14742](https://github.com/vitessio/vitess/pull/14742) +#### Evalengine + * [release-18.0] evalengine: Fix the min / max calculation for decimals (#14614) [#14616](https://github.com/vitessio/vitess/pull/14616) +#### Query Serving + * [release-18.0] fix concurrency on stream execute engine primitives (#14586) [#14592](https://github.com/vitessio/vitess/pull/14592) + * [18.0] bug fix: stop all kinds of expressions from cnf-exploding [#14593](https://github.com/vitessio/vitess/pull/14593) + * [release-18.0] bugfix: do not rewrite an expression twice (#14641) [#14643](https://github.com/vitessio/vitess/pull/14643) + * [release-18.0] tabletserver: do not consolidate streams on primary tablet when consolidator mode is `notOnPrimary` (#14332) [#14679](https://github.com/vitessio/vitess/pull/14679) + * [release-18.0] TabletServer: Handle nil targets properly everywhere (#14734) [#14741](https://github.com/vitessio/vitess/pull/14741) +#### VReplication + * [release-18.0] VReplication TableStreamer: Only stream tables in tablestreamer (ignore views) (#14646) [#14649](https://github.com/vitessio/vitess/pull/14649) + * [release-18.0] VDiff: Fix vtctldclient limit bug (#14778) [#14780](https://github.com/vitessio/vitess/pull/14780) + * [release-18.0] Backport: VReplication SwitchWrites: Properly return errors in SwitchWrites #14800 [#14824](https://github.com/vitessio/vitess/pull/14824) + * [release-18.0] VReplication: Update singular workflow in traffic switcher (#14826) [#14829](https://github.com/vitessio/vitess/pull/14829) +### CI/Build +#### Build/CI + * [release-18.0] Update MySQL apt package and GPG signature (#14785) [#14792](https://github.com/vitessio/vitess/pull/14792) +#### General + * [release-18.0] Upgrade the Golang version to `go1.21.5` [#14690](https://github.com/vitessio/vitess/pull/14690) +### Dependabot +#### General + * [release-18.0] build(deps): bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#14814) [#14817](https://github.com/vitessio/vitess/pull/14817) +#### VTAdmin + * [release-18.0] Bump @adobe/css-tools from 4.3.1 to 4.3.2 in /web/vtadmin (#14654) [#14668](https://github.com/vitessio/vitess/pull/14668) +### Enhancement +#### Backup and Restore + * [release-18.0] increase vtctlclient backupShard command success rate (#14604) [#14639](https://github.com/vitessio/vitess/pull/14639) +#### Build/CI + * [release-18.0] Add step to static check to ensure consistency of GHA workflows (#14724) [#14727](https://github.com/vitessio/vitess/pull/14727) +#### Query Serving + * [release-18.0] planbuilder: push down ordering through filter (#14583) [#14584](https://github.com/vitessio/vitess/pull/14584) +### Internal Cleanup +#### TabletManager + * [release-18.0] Replace use of `WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` with `WAIT_FOR_EXECUTED_GTID_SET` (#14612) [#14617](https://github.com/vitessio/vitess/pull/14617) +#### vtctldclient + * [release-18.0] Fix typo for `--cells` flag help description in `ApplyRoutingRules` (#14721) [#14723](https://github.com/vitessio/vitess/pull/14723) +### Performance +#### Query Serving + * vindexes: fix pooled collator buffer memory leak [#14621](https://github.com/vitessio/vitess/pull/14621) +### Regression +#### Query Serving + * [release-18.0] plabuilder: use OR for not in comparisons (#14607) [#14615](https://github.com/vitessio/vitess/pull/14615) + * [release-18.0] fix: insert on duplicate key update missing BindVars (#14728) [#14755](https://github.com/vitessio/vitess/pull/14755) +### Release +#### General + * Back to dev mode after v18.0.1 [#14580](https://github.com/vitessio/vitess/pull/14580) + * [release-18.0] Code Freeze for `v18.0.2` [#14804](https://github.com/vitessio/vitess/pull/14804) +### Testing +#### Backup and Restore + * [release-18.0] Add a retry to remove the vttablet directory during upgrade/downgrade backup tests (#14753) [#14758](https://github.com/vitessio/vitess/pull/14758) + diff --git a/changelog/18.0/18.0.2/release_notes.md b/changelog/18.0/18.0.2/release_notes.md new file mode 100644 index 00000000000..e431e9be6c5 --- /dev/null +++ b/changelog/18.0/18.0.2/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v18.0.2 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/18.0/18.0.2/changelog.md). + +The release includes 27 merged Pull Requests. + +Thanks to all our contributors: @app/github-actions, @app/vitess-bot, @brendar, @deepthi, @harshit-gangal, @rohit-nayak-ps, @systay + diff --git a/changelog/18.0/18.0.3/changelog.md b/changelog/18.0/18.0.3/changelog.md new file mode 100644 index 00000000000..29d02f62030 --- /dev/null +++ b/changelog/18.0/18.0.3/changelog.md @@ -0,0 +1,81 @@ +# Changelog of Vitess v18.0.3 + +### Bug fixes +#### CLI + * [release-18.0] Fix some binaries to print the versions (#15306) [#15309](https://github.com/vitessio/vitess/pull/15309) +#### Evalengine + * [release-18.0] evalengine: Fix week overflow (#14859) [#14861](https://github.com/vitessio/vitess/pull/14861) + * [release-18.0] evalengine: Return evalTemporal types for current date / time (#15079) [#15084](https://github.com/vitessio/vitess/pull/15084) +#### General + * [release-18.0] Protect `ExecuteFetchAsDBA` against multi-statements, excluding a sequence of `CREATE TABLE|VIEW`. (#14954) [#14984](https://github.com/vitessio/vitess/pull/14984) +#### Online DDL + * [release-18.0] VReplication/OnlineDDL: reordering enum values (#15103) [#15351](https://github.com/vitessio/vitess/pull/15351) +#### Query Serving + * [release-18]: Vindexes: Pass context in consistent lookup handleDup (#14653) [#14911](https://github.com/vitessio/vitess/pull/14911) + * [release-18.0] evalengine bugfix: handle nil evals correctly when coercing values (#14906) [#14914](https://github.com/vitessio/vitess/pull/14914) + * [release-18.0] bugfix: Columns alias expanding (#14935) [#14955](https://github.com/vitessio/vitess/pull/14955) + * [release-18.0] Improve efficiency and accuracy of mysqld.GetVersionString (#15096) [#15111](https://github.com/vitessio/vitess/pull/15111) + * [release-18.0] In the same sqltypes.Type, Copy expression types to avoid weight_strings and derived tables (#15069) [#15129](https://github.com/vitessio/vitess/pull/15129) + * [release-18.0] make sure to handle unsupported collations well (#15134) [#15142](https://github.com/vitessio/vitess/pull/15142) + * [release-18.0] fix: ignore internal tables in schema tracking (#15141) [#15146](https://github.com/vitessio/vitess/pull/15146) + * [release-18.0] TxThrottler: dont throttle unless lag (#14789) [#15190](https://github.com/vitessio/vitess/pull/15190) + * [release-18.0] Avoid rewriting unsharded queries and split semantic analysis in two (#15217) [#15229](https://github.com/vitessio/vitess/pull/15229) + * [release-18.0] sqlparser: use integers instead of literals for Length/Precision (#15256) [#15268](https://github.com/vitessio/vitess/pull/15268) + * [release-18.0] Fix Go routine leaks in streaming calls (#15293) [#15300](https://github.com/vitessio/vitess/pull/15300) + * [release-18.0] Column alias expanding on ORDER BY (#15302) [#15331](https://github.com/vitessio/vitess/pull/15331) + * [release-18.0] go/vt/discovery: use protobuf getters for SrvVschema (#15343) [#15345](https://github.com/vitessio/vitess/pull/15345) + * [release-18.0] SHOW VITESS_REPLICATION_STATUS: Only use replication tracker when it's enabled (#15348) [#15361](https://github.com/vitessio/vitess/pull/15361) + * [release-18.0] Bugfix: GROUP BY/HAVING alias resolution (#15344) [#15381](https://github.com/vitessio/vitess/pull/15381) +#### Schema Tracker + * [release-18.0] discovery: fix crash with nil server vschema (#15086) [#15092](https://github.com/vitessio/vitess/pull/15092) +#### Throttler + * [release-18.0] examples: rm heartbeat flags (#14980) [#14999](https://github.com/vitessio/vitess/pull/14999) +#### VReplication + * [release-18.0] VReplication: Make Target Sequence Initialization More Robust (#15289) [#15307](https://github.com/vitessio/vitess/pull/15307) + * [release-18.0] VtctldClient Reshard: add e2e tests to confirm CLI options and fix discovered issues. (#15353) [#15471](https://github.com/vitessio/vitess/pull/15471) +#### VTCombo + * [release-18.0] Correctly set log_dir default in vtcombo (#15153) [#15154](https://github.com/vitessio/vitess/pull/15154) +#### vtexplain + * [release-18.0] vtexplain: Fix setting up the column information (#15275) [#15281](https://github.com/vitessio/vitess/pull/15281) + * [release-18.0] vtexplain: Ensure memory topo is set up for throttler (#15279) [#15284](https://github.com/vitessio/vitess/pull/15284) +#### vttestserver + * [release-18.0] Revert unwanted logging change to `vttestserver` (#15148) [#15149](https://github.com/vitessio/vitess/pull/15149) + * [release-18.0] use proper mysql version in the `vttestserver` images (#15235) [#15238](https://github.com/vitessio/vitess/pull/15238) +### CI/Build +#### Build/CI + * [release-18.0] Fix relevant files listing for `endtoend` CI (#15104) [#15110](https://github.com/vitessio/vitess/pull/15110) + * [release-18.0] Remove concurrency group for check labels workflow (#15197) [#15208](https://github.com/vitessio/vitess/pull/15208) + * [release-18.0] bump `github.com/golang/protobuf` to `v1.5.4` (#15426) [#15427](https://github.com/vitessio/vitess/pull/15427) + * [release-18.0] Update all actions setup to latest versions (#15443) [#15445](https://github.com/vitessio/vitess/pull/15445) +#### General + * [release-18.0] Upgrade the Golang version to `go1.21.8` [#15407](https://github.com/vitessio/vitess/pull/15407) +### Dependabot +#### Java + * [release-18.0] build(deps): bump io.netty:netty-handler from 4.1.93.Final to 4.1.94.Final in /java (#14863) [#14881](https://github.com/vitessio/vitess/pull/14881) +### Documentation +#### Documentation + * [release-18.0] 18.0.3 release notes: ExecuteFetchAsDBA breaking change [#15013](https://github.com/vitessio/vitess/pull/15013) + * [release-18.0] Fix docs for unmanaged tablets (#15437) [#15473](https://github.com/vitessio/vitess/pull/15473) +### Enhancement +#### Build/CI + * [release-18.0] Update paths filter action (#15254) [#15263](https://github.com/vitessio/vitess/pull/15263) +### Performance +#### Throttler + * [release-18.0] Throttler: Use tmclient pool for CheckThrottler tabletmanager RPC [#15087](https://github.com/vitessio/vitess/pull/15087) +### Regression +#### Query Serving + * [release-18.0] Subquery inside aggregration function (#14844) [#14845](https://github.com/vitessio/vitess/pull/14845) + * [release-18.0] Fix routing rule query rewrite (#15253) [#15258](https://github.com/vitessio/vitess/pull/15258) +#### Throttler + * [release-18.0] Enable 'heartbeat_on_demand_duration' in local/examples (#15204) [#15291](https://github.com/vitessio/vitess/pull/15291) +#### vttestserver + * [release-18.0] Fix logging issue when running in Docker with the syslog daemon disabled (#15176) [#15185](https://github.com/vitessio/vitess/pull/15185) +### Release +#### General + * Back to dev mode after v18.0.2 [#14839](https://github.com/vitessio/vitess/pull/14839) + * [release-18.0] Code Freeze for `v18.0.3` [#15480](https://github.com/vitessio/vitess/pull/15480) +### Testing +#### Build/CI + * [release-18.0] Use `go1.22.0` in upgrade tests [#15170](https://github.com/vitessio/vitess/pull/15170) + * [release-18.0] CI: Address data races on memorytopo Conn.closed (#15365) [#15370](https://github.com/vitessio/vitess/pull/15370) + diff --git a/changelog/18.0/18.0.3/release_notes.md b/changelog/18.0/18.0.3/release_notes.md new file mode 100644 index 00000000000..e72e32fa81d --- /dev/null +++ b/changelog/18.0/18.0.3/release_notes.md @@ -0,0 +1,34 @@ +# Release of Vitess v18.0.3 +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [ExecuteFetchAsDBA rejects multi-statement SQL](#execute-fetch-as-dba-reject-multi) + +## Major Changes + +### Breaking Changes + +#### ExecuteFetchAsDBA rejects multi-statement SQL + +`vtctldclient ExecuteFetchAsDBA` (and similarly the `vtctl` and `vtctlclient` commands) now reject multi-statement SQL with error. + +For example, `vtctldclient ExecuteFetchAsDBA my-tablet "stop replica; change replication source to auto_position=1; start replica` will return an error, without attempting to execute any of these queries. + +Previously, `ExecuteFetchAsDBA` silently accepted multi statement SQL. It would (attempt to) execute all of them, but: + +- It would only indicate error for the first statement. Errors on 2nd, 3rd, ... statements were silently ignored. +- It would not consume the result sets of the 2nd, 3rd, ... statements. It would then return the used connection to the pool in a dirty state. Any further query that happens to take that connection out of the pool could get unexpected results. +- As another side effect, multi-statement schema changes would cause schema to be reloaded with only the first change, leaving the cached schema inconsistent with the underlying database. + +`ExecuteFetchAsDBA` does allow a specific use case of multi-statement SQL, which is where all statements are in the form of `CREATE TABLE` or `CREATE VIEW`. This is to support a common pattern of schema initialization, formalized in `ApplySchema --batch-size` which uses `ExecuteFetchAsDBA` under the hood. + +------------ +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/18.0/18.0.3/changelog.md). + +The release includes 47 merged Pull Requests. + +Thanks to all our contributors: @app/github-actions, @app/vitess-bot, @frouioui, @harshit-gangal, @rohit-nayak-ps, @shlomi-noach, @systay, @wangweicugw + diff --git a/changelog/18.0/18.0.3/summary.md b/changelog/18.0/18.0.3/summary.md new file mode 100644 index 00000000000..1573ac0c0e3 --- /dev/null +++ b/changelog/18.0/18.0.3/summary.md @@ -0,0 +1,25 @@ +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Breaking Changes](#breaking-changes)** + - [ExecuteFetchAsDBA rejects multi-statement SQL](#execute-fetch-as-dba-reject-multi) + +## Major Changes + +### Breaking Changes + +#### ExecuteFetchAsDBA rejects multi-statement SQL + +`vtctldclient ExecuteFetchAsDBA` (and similarly the `vtctl` and `vtctlclient` commands) now reject multi-statement SQL with error. + +For example, `vtctldclient ExecuteFetchAsDBA my-tablet "stop replica; change replication source to auto_position=1; start replica` will return an error, without attempting to execute any of these queries. + +Previously, `ExecuteFetchAsDBA` silently accepted multi statement SQL. It would (attempt to) execute all of them, but: + +- It would only indicate error for the first statement. Errors on 2nd, 3rd, ... statements were silently ignored. +- It would not consume the result sets of the 2nd, 3rd, ... statements. It would then return the used connection to the pool in a dirty state. Any further query that happens to take that connection out of the pool could get unexpected results. +- As another side effect, multi-statement schema changes would cause schema to be reloaded with only the first change, leaving the cached schema inconsistent with the underlying database. + +`ExecuteFetchAsDBA` does allow a specific use case of multi-statement SQL, which is where all statements are in the form of `CREATE TABLE` or `CREATE VIEW`. This is to support a common pattern of schema initialization, formalized in `ApplySchema --batch-size` which uses `ExecuteFetchAsDBA` under the hood. diff --git a/changelog/18.0/18.0.4/changelog.md b/changelog/18.0/18.0.4/changelog.md new file mode 100644 index 00000000000..a5e376b85f3 --- /dev/null +++ b/changelog/18.0/18.0.4/changelog.md @@ -0,0 +1,22 @@ +# Changelog of Vitess v18.0.4 + +### Bug fixes +#### VReplication + * [release-18.0] VReplication: Fix workflow update changed handling (#15621) [#15628](https://github.com/vitessio/vitess/pull/15628) +### CI/Build +#### Build/CI + * [release-18.0] Update to latest CodeQL (#15530) [#15533](https://github.com/vitessio/vitess/pull/15533) + * [release-18.0] Update go.mod go version to 1.21.9 [#15646](https://github.com/vitessio/vitess/pull/15646) + * [release-18.0] Updated `golang.org/x/net` [#15650](https://github.com/vitessio/vitess/pull/15650) +#### General + * [release-18.0] Upgrade to go1.21.9 [#15639](https://github.com/vitessio/vitess/pull/15639) +### Regression +#### Query Serving + * [release-18.0] fix: remove keyspace from column during query builder (#15514) [#15516](https://github.com/vitessio/vitess/pull/15516) +### Release +#### General + * [release-18.0] Bump to `v18.0.4-SNAPSHOT` after the `v18.0.3` release [#15489](https://github.com/vitessio/vitess/pull/15489) +### Testing +#### VReplication + * [release-18.0] VReplication: Fix vtctldclient SwitchReads related bugs and move the TestBasicV2Workflows e2e test to vtctldclient (#15579) [#15583](https://github.com/vitessio/vitess/pull/15583) + diff --git a/changelog/18.0/18.0.4/release_notes.md b/changelog/18.0/18.0.4/release_notes.md new file mode 100644 index 00000000000..a4b18f5f2fc --- /dev/null +++ b/changelog/18.0/18.0.4/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v18.0.4 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/18.0/18.0.4/changelog.md). + +The release includes 8 merged Pull Requests. + +Thanks to all our contributors: @app/vitess-bot, @frouioui, @shlomi-noach, @systay + diff --git a/changelog/18.0/18.0.5/changelog.md b/changelog/18.0/18.0.5/changelog.md new file mode 100644 index 00000000000..7b59717ada2 --- /dev/null +++ b/changelog/18.0/18.0.5/changelog.md @@ -0,0 +1,32 @@ +# Changelog of Vitess v18.0.5 + +### Bug fixes +#### Query Serving + * [release-18.0] fix: don't forget DISTINCT for derived tables (#15672) [#15677](https://github.com/vitessio/vitess/pull/15677) + * [release-18.0] Fix panic in aggregation (#15728) [#15735](https://github.com/vitessio/vitess/pull/15735) + * [release-18.0] Fix wrong assignment to `sql_id_opt` in the parser (#15862) [#15868](https://github.com/vitessio/vitess/pull/15868) +#### Topology + * [release-18.0] Fix ZooKeeper Topology connection locks not being cleaned up correctly (#15757) [#15763](https://github.com/vitessio/vitess/pull/15763) +#### VReplication + * [release-18.0] VReplication: Take replication lag into account in VStreamManager healthcheck result processing (#15761) [#15773](https://github.com/vitessio/vitess/pull/15773) +#### VTAdmin + * [release-18.0] [VTAdmin API] Fix schema cache flag, add documentation (#15704) [#15719](https://github.com/vitessio/vitess/pull/15719) + * [VTAdmin] Remove vtctld web link, improve local example (#15607) [#15825](https://github.com/vitessio/vitess/pull/15825) +### CI/Build +#### General + * [release-18.0] Upgrade the Golang version to `go1.21.10` [#15866](https://github.com/vitessio/vitess/pull/15866) +#### VReplication + * [release-18.0] VReplication: Get workflowFlavorVtctl endtoend testing working properly again (#15636) [#15666](https://github.com/vitessio/vitess/pull/15666) +#### VTAdmin + * [release-18.0] Update VTAdmin build script (#15839) [#15849](https://github.com/vitessio/vitess/pull/15849) +### Performance +#### VTTablet + * [release-18.0] Improve performance for `BaseShowTablesWithSizes` query. (#15713) [#15793](https://github.com/vitessio/vitess/pull/15793) +### Regression +#### Query Serving + * [release-18.0] Direct PR. Fix regression where reference tables with a different name on sharded keyspaces were not routed correctly. [#15788](https://github.com/vitessio/vitess/pull/15788) +### Release +#### General + * [release-18.0] Bump to `v18.0.5-SNAPSHOT` after the `v18.0.4` release [#15660](https://github.com/vitessio/vitess/pull/15660) + * [release-18.0] Code Freeze for `v18.0.5` [#15876](https://github.com/vitessio/vitess/pull/15876) + diff --git a/changelog/18.0/18.0.5/release_notes.md b/changelog/18.0/18.0.5/release_notes.md new file mode 100644 index 00000000000..b2fc0af7e48 --- /dev/null +++ b/changelog/18.0/18.0.5/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v18.0.5 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/18.0/18.0.5/changelog.md). + +The release includes 14 merged Pull Requests. + +Thanks to all our contributors: @app/vitess-bot, @notfelineit, @rohit-nayak-ps, @shlomi-noach, @vitess-bot + diff --git a/changelog/18.0/README.md b/changelog/18.0/README.md index 97676dc7e39..77dddf2cfbc 100644 --- a/changelog/18.0/README.md +++ b/changelog/18.0/README.md @@ -1,4 +1,24 @@ ## v18.0 +* **[18.0.5](18.0.5)** + * [Changelog](18.0.5/changelog.md) + * [Release Notes](18.0.5/release_notes.md) + +* **[18.0.4](18.0.4)** + * [Changelog](18.0.4/changelog.md) + * [Release Notes](18.0.4/release_notes.md) + +* **[18.0.3](18.0.3)** + * [Changelog](18.0.3/changelog.md) + * [Release Notes](18.0.3/release_notes.md) + +* **[18.0.2](18.0.2)** + * [Changelog](18.0.2/changelog.md) + * [Release Notes](18.0.2/release_notes.md) + +* **[18.0.1](18.0.1)** + * [Changelog](18.0.1/changelog.md) + * [Release Notes](18.0.1/release_notes.md) + * **[18.0.0](18.0.0)** * [Changelog](18.0.0/changelog.md) * [Release Notes](18.0.0/release_notes.md) diff --git a/changelog/19.0/19.0.0/changelog.md b/changelog/19.0/19.0.0/changelog.md new file mode 100644 index 00000000000..8c7a9223a18 --- /dev/null +++ b/changelog/19.0/19.0.0/changelog.md @@ -0,0 +1,571 @@ +# Changelog of Vitess v19.0.0 + +### Announcement +#### General + * summary: updated summary with 19.0 changes [#15132](https://github.com/vitessio/vitess/pull/15132) +### Bug fixes +#### Backup and Restore + * MysqlCtl: implement missing `ReadBinlogFilesTimestamps` function [#14525](https://github.com/vitessio/vitess/pull/14525) + * Replication: Have the DB flavor process waiting for a pos [#14745](https://github.com/vitessio/vitess/pull/14745) + * [release-19.0 backport] Mysqld: capture mysqlbinlog std error output (#15278) [#15379](https://github.com/vitessio/vitess/pull/15379) +#### Build/CI + * Update create_release.sh [#14492](https://github.com/vitessio/vitess/pull/14492) + * Set minimal tokens for auto_approve_pr [#14534](https://github.com/vitessio/vitess/pull/14534) + * Run Go deps upgrade every week [#14910](https://github.com/vitessio/vitess/pull/14910) +#### CLI + * Fix anonymous paths in cobra code-gen [#14185](https://github.com/vitessio/vitess/pull/14185) + * [release-19.0] Fix some binaries to print the versions (#15306) [#15310](https://github.com/vitessio/vitess/pull/15310) + * [release-19.0] tablet: remove max-waiters setting (#15323) [#15325](https://github.com/vitessio/vitess/pull/15325) +#### Cluster management + * Fix Panic in PRS due to a missing nil check [#14656](https://github.com/vitessio/vitess/pull/14656) + * Fix hearbeatWriter Close being stuck if waiting for a semi-sync ACK [#14823](https://github.com/vitessio/vitess/pull/14823) + * Block replication and query RPC calls until wait for dba grants has completed [#14836](https://github.com/vitessio/vitess/pull/14836) + * Fix the parser to allow multiple strings one after the other [#15076](https://github.com/vitessio/vitess/pull/15076) +#### Docker + * [Docker] Fix VTadmin build [#14363](https://github.com/vitessio/vitess/pull/14363) +#### Evalengine + * evalengine: Misc bugs [#14351](https://github.com/vitessio/vitess/pull/14351) + * datetime: obey the evalengine's environment time [#14358](https://github.com/vitessio/vitess/pull/14358) + * Fix nullability checks in evalengine [#14556](https://github.com/vitessio/vitess/pull/14556) + * evalengine: Handle zero dates correctly [#14610](https://github.com/vitessio/vitess/pull/14610) + * evalengine: Fix the min / max calculation for decimals [#14614](https://github.com/vitessio/vitess/pull/14614) + * evalengine: Fix week overflow [#14859](https://github.com/vitessio/vitess/pull/14859) + * evalengine: Return evalTemporal types for current date / time [#15079](https://github.com/vitessio/vitess/pull/15079) + * mysql/datetime: Improve TIME parsing logic [#15135](https://github.com/vitessio/vitess/pull/15135) +#### Examples + * examples: fix flag syntax for zkctl [#14469](https://github.com/vitessio/vitess/pull/14469) +#### General + * viper: register dynamic config with both disk and live [#14453](https://github.com/vitessio/vitess/pull/14453) + * Protect `ExecuteFetchAsDBA` against multi-statements, excluding a sequence of `CREATE TABLE|VIEW`. [#14954](https://github.com/vitessio/vitess/pull/14954) + * Use the correct parser for truncation [#14985](https://github.com/vitessio/vitess/pull/14985) + * Fix log format error in vttls.go [#15035](https://github.com/vitessio/vitess/pull/15035) +#### Observability + * Fix #14414: resilient_server metrics name/prefix logic is inverted, leading to no metrics being recorded [#14415](https://github.com/vitessio/vitess/pull/14415) +#### Online DDL + * Online DDL: timeouts for all gRPC calls [#14182](https://github.com/vitessio/vitess/pull/14182) + * OnlineDDL: fix scenarios where migration hangs instead of directly failing [#14290](https://github.com/vitessio/vitess/pull/14290) + * schemadiff: fix missing `DROP CONSTRAINT` in duplicate/redundant constraints scenario. [#14387](https://github.com/vitessio/vitess/pull/14387) + * [release-19.0] VReplication/OnlineDDL: reordering enum values (#15103) [#15352](https://github.com/vitessio/vitess/pull/15352) +#### Query Serving + * bugfix: use the proper interface for comment directives [#14267](https://github.com/vitessio/vitess/pull/14267) + * evalengine: Use the right unknown type to initialize [#14313](https://github.com/vitessio/vitess/pull/14313) + * engine: fix race when reading fields in Concatenate [#14324](https://github.com/vitessio/vitess/pull/14324) + * tabletserver: do not consolidate streams on primary tablet when consolidator mode is `notOnPrimary` [#14332](https://github.com/vitessio/vitess/pull/14332) + * Planner bugfix [#14365](https://github.com/vitessio/vitess/pull/14365) + * semantics: Fix missing union pop from scoper [#14401](https://github.com/vitessio/vitess/pull/14401) + * fix: mismatch in column count and value count [#14417](https://github.com/vitessio/vitess/pull/14417) + * Make column resolution closer to MySQL [#14426](https://github.com/vitessio/vitess/pull/14426) + * vtgate/engine: Fix race condition in join logic [#14435](https://github.com/vitessio/vitess/pull/14435) + * Bug fix: Use target tablet from health stats cache when checking replication status [#14436](https://github.com/vitessio/vitess/pull/14436) + * Ensure hexval and int don't share BindVar after Normalization [#14451](https://github.com/vitessio/vitess/pull/14451) + * Make connection killing resilient to MySQL hangs [#14500](https://github.com/vitessio/vitess/pull/14500) + * planbuilder bugfix: expose columns through derived tables [#14501](https://github.com/vitessio/vitess/pull/14501) + * Fix missing query serving error code [#14520](https://github.com/vitessio/vitess/pull/14520) + * Fix type coercion in cascading non-literal updates [#14524](https://github.com/vitessio/vitess/pull/14524) + * Type Cast all update expressions in verify queries [#14555](https://github.com/vitessio/vitess/pull/14555) + * expression rewriting: enable more rewrites and limit CNF rewrites [#14560](https://github.com/vitessio/vitess/pull/14560) + * bug fix: stop all kinds of expressions from cnf-exploding [#14585](https://github.com/vitessio/vitess/pull/14585) + * fix concurrency on stream execute engine primitives [#14586](https://github.com/vitessio/vitess/pull/14586) + * bugfix: do not rewrite an expression twice [#14641](https://github.com/vitessio/vitess/pull/14641) + * txserializer: change log message based on dry run [#14651](https://github.com/vitessio/vitess/pull/14651) + * Vindexes: Pass context in consistent lookup handleDup [#14653](https://github.com/vitessio/vitess/pull/14653) + * Fail correlated subquery in planning phase instead of a runtime error [#14701](https://github.com/vitessio/vitess/pull/14701) + * bugfix: use the original expression and not the alias [#14704](https://github.com/vitessio/vitess/pull/14704) + * Fix RegisterNotifier to use a copy of the tables to prevent data races [#14716](https://github.com/vitessio/vitess/pull/14716) + * fix: flush tables with read lock to run only with reserved connection [#14720](https://github.com/vitessio/vitess/pull/14720) + * TabletServer: Handle nil targets properly everywhere [#14734](https://github.com/vitessio/vitess/pull/14734) + * bugfix: don't panic when missing schema information [#14787](https://github.com/vitessio/vitess/pull/14787) + * schemadiff: allow char->varchar FK reference type matching [#14849](https://github.com/vitessio/vitess/pull/14849) + * sqlparser: FORCE_CUTOVER is a non-reserved keyword [#14885](https://github.com/vitessio/vitess/pull/14885) + * Improve err extraction logic [#14887](https://github.com/vitessio/vitess/pull/14887) + * Add nil check to prevent panics [#14902](https://github.com/vitessio/vitess/pull/14902) + * evalengine bugfix: handle nil evals correctly when coercing values [#14906](https://github.com/vitessio/vitess/pull/14906) + * Vttablet panic in requests Wait [#14924](https://github.com/vitessio/vitess/pull/14924) + * `schemadiff`: fix diffing of textual columns with implicit charsets [#14930](https://github.com/vitessio/vitess/pull/14930) + * bugfix: Columns alias expanding [#14935](https://github.com/vitessio/vitess/pull/14935) + * Fix panic for unknown columns in foreign key managed mode [#15025](https://github.com/vitessio/vitess/pull/15025) + * Fix subquery cloning and dependencies [#15039](https://github.com/vitessio/vitess/pull/15039) + * Fix `buffer_drain_concurrency` not doing anything [#15042](https://github.com/vitessio/vitess/pull/15042) + * Copy expression types to avoid weight_strings and derived tables [#15069](https://github.com/vitessio/vitess/pull/15069) + * Improve efficiency and accuracy of mysqld.GetVersionString [#15096](https://github.com/vitessio/vitess/pull/15096) + * mysql: Ensure we set up the initial collation correctly [#15115](https://github.com/vitessio/vitess/pull/15115) + * make sure to handle unsupported collations well [#15134](https://github.com/vitessio/vitess/pull/15134) + * [release-19.0] make sure to handle unsupported collations well [#15143](https://github.com/vitessio/vitess/pull/15143) + * [release-19.0] fix: ignore internal tables in schema tracking (#15141) [#15147](https://github.com/vitessio/vitess/pull/15147) + * [release-19.0] Planner Bug: Joins inside derived table (#14974) [#15177](https://github.com/vitessio/vitess/pull/15177) + * [release-19.0] TxThrottler: dont throttle unless lag (#14789) [#15196](https://github.com/vitessio/vitess/pull/15196) + * [release-19.0] bugfix: wrong field type returned for SUM (#15192) [#15206](https://github.com/vitessio/vitess/pull/15206) + * [release-19.0] Avoid rewriting unsharded queries and split semantic analysis in two (#15217) [#15230](https://github.com/vitessio/vitess/pull/15230) + * [release-19.0] Fix Delete with multi-tables related by foreign keys (#15218) [#15255](https://github.com/vitessio/vitess/pull/15255) + * [release-19.0] sqlparser: use integers instead of literals for Length/Precision (#15256) [#15269](https://github.com/vitessio/vitess/pull/15269) + * [release-19.0] Fix Go routine leaks in streaming calls (#15293) [#15301](https://github.com/vitessio/vitess/pull/15301) + * [release-19.0] planner: support union statements with ctes (#15312) [#15324](https://github.com/vitessio/vitess/pull/15324) + * [release-19.0] Column alias expanding on ORDER BY (#15302) [#15329](https://github.com/vitessio/vitess/pull/15329) + * [release-19.0] go/vt/discovery: use protobuf getters for SrvVschema (#15343) [#15346](https://github.com/vitessio/vitess/pull/15346) + * [release-19.0] SHOW VITESS_REPLICATION_STATUS: Only use replication tracker when it's enabled (#15348) [#15362](https://github.com/vitessio/vitess/pull/15362) + * [release-19.0] Bugfix: GROUP BY/HAVING alias resolution (#15344) [#15377](https://github.com/vitessio/vitess/pull/15377) +#### Schema Tracker + * discovery: fix crash with nil server vschema [#15086](https://github.com/vitessio/vitess/pull/15086) +#### TabletManager + * mysqlctl: Cleanup stale socket lockfile [#14553](https://github.com/vitessio/vitess/pull/14553) + * mysqlctl: Fix cleaning up the stale lock file [#14600](https://github.com/vitessio/vitess/pull/14600) + * tabletserver: Skip wait for DBA grants for external tablets [#14629](https://github.com/vitessio/vitess/pull/14629) + * mysqlctl: Error out on stale socket [#14650](https://github.com/vitessio/vitess/pull/14650) +#### Throttler + * Throttler: set timeouts on gRPC communication and on topo communication [#14165](https://github.com/vitessio/vitess/pull/14165) + * examples: rm heartbeat flags [#14980](https://github.com/vitessio/vitess/pull/14980) + * [release-19.0] Throttler: fix nil pointer dereference error (#15180) [#15181](https://github.com/vitessio/vitess/pull/15181) +#### Topology + * Ignore non-Shard keys in FindAllShardsInKeyspace List impl [#15117](https://github.com/vitessio/vitess/pull/15117) +#### VReplication + * VReplication: error on vtctldclient commands w/o tablet types [#14294](https://github.com/vitessio/vitess/pull/14294) + * VDiff: wait for shard streams of one table diff to complete for before starting that of the next table [#14345](https://github.com/vitessio/vitess/pull/14345) + * Vtctld SwitchReads: fix bug where writes were also being switched as part of switching reads when all traffic was switched using SwitchTraffic [#14360](https://github.com/vitessio/vitess/pull/14360) + * VDiff tablet selection: pick non-serving tablets in Reshard workflows [#14413](https://github.com/vitessio/vitess/pull/14413) + * VDiff: "show all" should only report vdiffs for the specified keyspace and workflow [#14442](https://github.com/vitessio/vitess/pull/14442) + * VReplication: Properly Handle FK Constraints When Deferring Secondary Keys [#14543](https://github.com/vitessio/vitess/pull/14543) + * Materializer: normalize schema via schemadiff on --atomic-copy [#14636](https://github.com/vitessio/vitess/pull/14636) + * VReplication TableStreamer: Only stream tables in tablestreamer (ignore views) [#14646](https://github.com/vitessio/vitess/pull/14646) + * VDiff: Fix vtctldclient limit bug [#14778](https://github.com/vitessio/vitess/pull/14778) + * VReplication: Guard against unsafe _vt.vreplication writes [#14797](https://github.com/vitessio/vitess/pull/14797) + * VReplication SwitchWrites: Properly return errors in SwitchWrites [#14800](https://github.com/vitessio/vitess/pull/14800) + * VReplication: Update singular workflow in traffic switcher [#14826](https://github.com/vitessio/vitess/pull/14826) + * Flakes: Fix flaky vtctl unit test TestMoveTables [#14886](https://github.com/vitessio/vitess/pull/14886) + * VReplication: send unique key name to `rowstreamer`, which can then use with `FORCE INDEX` [#14916](https://github.com/vitessio/vitess/pull/14916) + * VDiff: Make max diff duration upgrade/downgrade safe [#14995](https://github.com/vitessio/vitess/pull/14995) + * [release-19.0] VReplication: disable foreign_key_checks for bulk data cleanup (#15261) [#15265](https://github.com/vitessio/vitess/pull/15265) + * [release-19.0] VReplication: Make Target Sequence Initialization More Robust (#15289) [#15308](https://github.com/vitessio/vitess/pull/15308) +#### VTCombo + * [release-19.0] Correctly set log_dir default in vtcombo (#15153) [#15155](https://github.com/vitessio/vitess/pull/15155) +#### vtctl + * VReplication: Add missing info to vtctldclient workflow SHOW output [#14225](https://github.com/vitessio/vitess/pull/14225) +#### vtctldclient + * vtctldclient: Apply tablet type filtering for keyspace+shard in GetTablets [#14467](https://github.com/vitessio/vitess/pull/14467) +#### vtexplain + * [release-19.0] vtexplain: Fix setting up the column information (#15275) [#15282](https://github.com/vitessio/vitess/pull/15282) + * [release-19.0] vtexplain: Ensure memory topo is set up for throttler (#15279) [#15285](https://github.com/vitessio/vitess/pull/15285) +#### vttestserver + * [release-19.0] Revert unwanted logging change to `vttestserver` (#15148) [#15150](https://github.com/vitessio/vitess/pull/15150) + * [release-19.0] use proper mysql version in the `vttestserver` images (#15235) [#15239](https://github.com/vitessio/vitess/pull/15239) +### CI/Build +#### Backup and Restore + * Incremental backup: fix race condition in reading 'mysqlbinlog' output [#14330](https://github.com/vitessio/vitess/pull/14330) +#### Build/CI + * Enhance PR template + CI workflow for backport labels. [#14779](https://github.com/vitessio/vitess/pull/14779) + * Update MySQL apt package and GPG signature [#14785](https://github.com/vitessio/vitess/pull/14785) + * fix: build on delete operator [#14833](https://github.com/vitessio/vitess/pull/14833) + * CI: Adjust FOSSA API secret name [#14918](https://github.com/vitessio/vitess/pull/14918) + * CI: Tweak our code coverage profile behavior [#14967](https://github.com/vitessio/vitess/pull/14967) + * Fix relevant files listing for `endtoend` CI [#15104](https://github.com/vitessio/vitess/pull/15104) + * [release-19.0] Remove concurrency group for check labels workflow (#15197) [#15209](https://github.com/vitessio/vitess/pull/15209) + * [release-19.0] Update toolchain version in go.mod (#15245) [#15246](https://github.com/vitessio/vitess/pull/15246) +#### Docker + * Vitess MySQL Docker Image [#14158](https://github.com/vitessio/vitess/pull/14158) + * Build and push Docker `vitess/vttestserver` DockerHub from GitHub Actions [#14314](https://github.com/vitessio/vitess/pull/14314) + * Add `vtexplain` and `vtbackup` to base docker auto-build [#14318](https://github.com/vitessio/vitess/pull/14318) +#### Evalengine + * Fix codegen command with the right type [#14376](https://github.com/vitessio/vitess/pull/14376) +#### General + * [main] Upgrade the Golang version to `go1.21.2` [#14193](https://github.com/vitessio/vitess/pull/14193) + * [main] Upgrade the Golang version to `go1.21.3` [#14231](https://github.com/vitessio/vitess/pull/14231) + * [main] Upgrade the Golang version to `go1.21.4` [#14488](https://github.com/vitessio/vitess/pull/14488) + * [main] Upgrade the Golang version to `go1.21.5` [#14689](https://github.com/vitessio/vitess/pull/14689) + * connpool: fix racy test [#14731](https://github.com/vitessio/vitess/pull/14731) + * [release-19.0] Upgrade the Golang version to `go1.22.0` [#15169](https://github.com/vitessio/vitess/pull/15169) + * [release-19.0] Upgrade the Golang version to `go1.22.1` [#15406](https://github.com/vitessio/vitess/pull/15406) +#### Online DDL + * onlineddl_vrepl_stress: fix flakiness caused by timeouts [#14295](https://github.com/vitessio/vitess/pull/14295) + * OnlineDDL: reduce vrepl_stress workload in forks [#14302](https://github.com/vitessio/vitess/pull/14302) + * Online DDL: fix endtoend test dropping foreign key [#14522](https://github.com/vitessio/vitess/pull/14522) + * VTGate/foreign keys stress test: add tests for 'standard' replica [#14747](https://github.com/vitessio/vitess/pull/14747) +#### VTAdmin + * Update vtadmin dependencies [#14336](https://github.com/vitessio/vitess/pull/14336) + * Fix stray vtadmin package-lock.json content [#14350](https://github.com/vitessio/vitess/pull/14350) +#### VTorc + * docker: add dedicated vtorc container [#14126](https://github.com/vitessio/vitess/pull/14126) +### Dependabot +#### General + * Bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 [#14239](https://github.com/vitessio/vitess/pull/14239) + * Bump golang.org/x/net from 0.14.0 to 0.17.0 [#14260](https://github.com/vitessio/vitess/pull/14260) + * Bump google.golang.org/grpc from 1.55.0-dev to 1.59.0 [#14364](https://github.com/vitessio/vitess/pull/14364) + * build(deps): bump golang.org/x/crypto from 0.16.0 to 0.17.0 [#14814](https://github.com/vitessio/vitess/pull/14814) +#### Java + * build(deps): bump com.google.guava:guava from 30.1.1-jre to 32.0.0-jre in /java [#14759](https://github.com/vitessio/vitess/pull/14759) + * build(deps): bump io.netty:netty-handler from 4.1.93.Final to 4.1.94.Final in /java [#14863](https://github.com/vitessio/vitess/pull/14863) +#### VTAdmin + * Bump @cypress/request and cypress in /vitess-mixin/e2e [#14038](https://github.com/vitessio/vitess/pull/14038) + * Bump postcss from 8.4.21 to 8.4.31 in /web/vtadmin [#14173](https://github.com/vitessio/vitess/pull/14173) + * Bump @babel/traverse from 7.21.4 to 7.23.2 in /web/vtadmin [#14304](https://github.com/vitessio/vitess/pull/14304) + * Bump @adobe/css-tools from 4.3.1 to 4.3.2 in /web/vtadmin [#14654](https://github.com/vitessio/vitess/pull/14654) + * build(deps-dev): bump vite from 4.2.3 to 4.5.2 in /web/vtadmin [#15001](https://github.com/vitessio/vitess/pull/15001) +### Documentation +#### CLI + * Bypass cobra completion commands so they still function [#14217](https://github.com/vitessio/vitess/pull/14217) +#### Documentation + * release notes: edit summary for consistency [#14319](https://github.com/vitessio/vitess/pull/14319) + * release notes: add FK import to summary [#14518](https://github.com/vitessio/vitess/pull/14518) + * 19.0 release notes: ExecuteFetchAsDBA breaking change [#15021](https://github.com/vitessio/vitess/pull/15021) +#### General + * Add summary changes for recent PRs [#14598](https://github.com/vitessio/vitess/pull/14598) + * Add summary changes to indicate MySQL 5.7 is EOL and Vitess is dropping support for it in v19 [#14663](https://github.com/vitessio/vitess/pull/14663) +### Enhancement +#### Backup and Restore + * increase vtctlclient backupShard command success rate [#14604](https://github.com/vitessio/vitess/pull/14604) + * Backup: `--incremental-from-pos` supports backup name [#14923](https://github.com/vitessio/vitess/pull/14923) + * Incremental backup: do not error on empty backup [#15022](https://github.com/vitessio/vitess/pull/15022) +#### Build/CI + * CI: Re-enable FOSSA scan and add Codecov [#14333](https://github.com/vitessio/vitess/pull/14333) + * Automatic approval of `vitess-bot` clean backports [#14352](https://github.com/vitessio/vitess/pull/14352) + * Tell shellcheck to follow sourced files [#14377](https://github.com/vitessio/vitess/pull/14377) + * Add step to static check to ensure consistency of GHA workflows [#14724](https://github.com/vitessio/vitess/pull/14724) + * Add `release-19.0` to the auto go upgrade [#15157](https://github.com/vitessio/vitess/pull/15157) + * [release-19.0] Update paths filter action (#15254) [#15264](https://github.com/vitessio/vitess/pull/15264) +#### CLI + * VReplication: Add traffic state to vtctldclient workflow status output [#14280](https://github.com/vitessio/vitess/pull/14280) + * vtctldclient,grpcvtctldserver ApplySchema: return unknown params from grpcvtctldserver.ApplySchema, log them in vtctldclient.ApplySchema [#14672](https://github.com/vitessio/vitess/pull/14672) +#### Cluster management + * Add HealthCheck's `healthy` map to the VTGate UI [#14521](https://github.com/vitessio/vitess/pull/14521) + * Make vttablet wait for vt_dba user to be granted privileges [#14565](https://github.com/vitessio/vitess/pull/14565) + * Add wait for reading mycnf to prevent race [#14626](https://github.com/vitessio/vitess/pull/14626) + * Add log for error to help debug [#14632](https://github.com/vitessio/vitess/pull/14632) + * Take replication lag into account while selecting primary candidate [#14634](https://github.com/vitessio/vitess/pull/14634) + * Postpone waiting for dba grants after restore has succeeded [#14680](https://github.com/vitessio/vitess/pull/14680) + * vtctldclient: --strict rejects unknown vindex params in ApplyVSchema [#14862](https://github.com/vitessio/vitess/pull/14862) + * Respect tolerable replication lag even when the new primary has been provided in PRS [#15090](https://github.com/vitessio/vitess/pull/15090) + * [release-19.0] go/vt/wrangler: pass reparent options structs (#15251) [#15286](https://github.com/vitessio/vitess/pull/15286) +#### Docker + * Build and push Docker `vitess/lite` to DockerHub from GitHub Actions [#14243](https://github.com/vitessio/vitess/pull/14243) + * Build and push Docker `vitess/base` and component images to DockerHub from GitHub Actions [#14271](https://github.com/vitessio/vitess/pull/14271) + * Be more explicit in release notes regarding the deprecation of certain `vitess/lite` tags [#15040](https://github.com/vitessio/vitess/pull/15040) +#### Evalengine + * evalengine: Improve the typing situation for functions [#14533](https://github.com/vitessio/vitess/pull/14533) + * evalengine: Implement SUBSTRING [#14899](https://github.com/vitessio/vitess/pull/14899) + * evalengine: Implement FROM_DAYS [#15058](https://github.com/vitessio/vitess/pull/15058) + * evalengine: Implement TO_DAYS [#15065](https://github.com/vitessio/vitess/pull/15065) + * evalengine: Add MID alias [#15066](https://github.com/vitessio/vitess/pull/15066) + * evalEngine: Implement TIME_TO_SEC [#15094](https://github.com/vitessio/vitess/pull/15094) +#### Examples + * Tools: Remove dependencies installed by `make tools` [#14309](https://github.com/vitessio/vitess/pull/14309) + * Deprecate `mysqld` in `vitess/lite` and use `mysql:8.0.30` image for the operator [#14990](https://github.com/vitessio/vitess/pull/14990) +#### General + * build: Allow compilation on Windows [#14718](https://github.com/vitessio/vitess/pull/14718) +#### Observability + * Debug vars: Expose build version in `/debug/vars` [#14713](https://github.com/vitessio/vitess/pull/14713) + * [servenv] optional pprof endpoints [#14796](https://github.com/vitessio/vitess/pull/14796) + * vtgate: increment vtgate_warnings counter for non atomic commits [#15010](https://github.com/vitessio/vitess/pull/15010) + * query_executor: Record `WaitingForConnection` stat in all cases [#15073](https://github.com/vitessio/vitess/pull/15073) +#### Online DDL + * Online DDL: support DROP FOREIGN KEY statement [#14338](https://github.com/vitessio/vitess/pull/14338) + * Online DDL: revert considerations for migrations with foreign key constraints [#14368](https://github.com/vitessio/vitess/pull/14368) + * Enable Online DDL foreign key support (also in vtgate stress tests) when backing MySQL includes appropriate patch [#14370](https://github.com/vitessio/vitess/pull/14370) + * Online DDL: lint DDL strategy flags [#14373](https://github.com/vitessio/vitess/pull/14373) + * schemadiff: remove table name from auto-generated FK constraint name [#14385](https://github.com/vitessio/vitess/pull/14385) + * TableGC: speed up GC process via `RequestChecks()`. Utilized by Online DDL for artifact cleanup [#14431](https://github.com/vitessio/vitess/pull/14431) + * Support `fast_analyze_table` variable, introduced in public MySQL fork [#14494](https://github.com/vitessio/vitess/pull/14494) + * Online DDL: edit CONSTRAINT names in CREATE TABLE [#14517](https://github.com/vitessio/vitess/pull/14517) + * Online DDL: support migration cut-over backoff and forced cut-over [#14546](https://github.com/vitessio/vitess/pull/14546) + * ApplySchema: log selected flags [#14798](https://github.com/vitessio/vitess/pull/14798) + * schemadiff: using MySQL capabilities to analyze a SchemaDiff and whether changes are applicable instantly/immediately. [#14878](https://github.com/vitessio/vitess/pull/14878) + * OnlineDDL to use schemadiff version capabilities; refactor some `flavor` code. [#14883](https://github.com/vitessio/vitess/pull/14883) + * `schemadiff`: formalize `InstantDDLCapability` [#14900](https://github.com/vitessio/vitess/pull/14900) +#### Query Serving + * add option for warming reads to mirror primary read queries onto replicas from vtgates to warm bufferpools [#13206](https://github.com/vitessio/vitess/pull/13206) + * Add support for new lock syntax in MySQL8 [#13965](https://github.com/vitessio/vitess/pull/13965) + * gen4: Support explicit column aliases on derived tables [#14129](https://github.com/vitessio/vitess/pull/14129) + * Gracefully shutdown VTGate instances [#14219](https://github.com/vitessio/vitess/pull/14219) + * Mark non-unique lookup vindex as backfill to ignore vindex selection [#14227](https://github.com/vitessio/vitess/pull/14227) + * UNION column type coercion [#14245](https://github.com/vitessio/vitess/pull/14245) + * Add support for common table expressions [#14321](https://github.com/vitessio/vitess/pull/14321) + * Add cycle detection for foreign keys [#14339](https://github.com/vitessio/vitess/pull/14339) + * feat: support invisible columns [#14366](https://github.com/vitessio/vitess/pull/14366) + * Add support for more queries [#14369](https://github.com/vitessio/vitess/pull/14369) + * schemadiff: identify a FK sequential execution scenario, and more [#14397](https://github.com/vitessio/vitess/pull/14397) + * Add support for AVG on sharded queries [#14419](https://github.com/vitessio/vitess/pull/14419) + * Use hash joins when nested loop joins are not feasible [#14448](https://github.com/vitessio/vitess/pull/14448) + * Make `Foreign_key_checks` a Vitess Aware variable [#14484](https://github.com/vitessio/vitess/pull/14484) + * Add `SHOW VSCHEMA KEYSPACES` query [#14505](https://github.com/vitessio/vitess/pull/14505) + * Support unlimited number of ORs in `ExtractINFromOR` [#14566](https://github.com/vitessio/vitess/pull/14566) + * planbuilder: push down ordering through filter [#14583](https://github.com/vitessio/vitess/pull/14583) + * refactor the INSERT engine primitive [#14606](https://github.com/vitessio/vitess/pull/14606) + * Optimise hash joins [#14644](https://github.com/vitessio/vitess/pull/14644) + * schemadiff: granular foreign key reference errors [#14682](https://github.com/vitessio/vitess/pull/14682) + * schemadiff: pursue foreign key errors and proceed to build schema [#14705](https://github.com/vitessio/vitess/pull/14705) + * schemadiff: additional FK column type matching rules [#14751](https://github.com/vitessio/vitess/pull/14751) + * Fix order by and group by normalization [#14764](https://github.com/vitessio/vitess/pull/14764) + * reduce NOWAIT usage to tables with unique keys for foreign key plans [#14772](https://github.com/vitessio/vitess/pull/14772) + * vtgate: record warning for partially successful cross-shard commits [#14848](https://github.com/vitessio/vitess/pull/14848) + * Added support for group_concat and count distinct with multiple expressions [#14851](https://github.com/vitessio/vitess/pull/14851) + * Multi Table Delete Planner Support [#14855](https://github.com/vitessio/vitess/pull/14855) + * Improve sharded query routing for tuple list [#14892](https://github.com/vitessio/vitess/pull/14892) + * Make Schema Tracking case-sensitive [#14904](https://github.com/vitessio/vitess/pull/14904) + * Explain Statement plan improvement [#14928](https://github.com/vitessio/vitess/pull/14928) + * planner: support cross shard DELETE with LIMIT/ORDER BY [#14959](https://github.com/vitessio/vitess/pull/14959) + * `transaction_mode` variable to return flag default if unset [#15032](https://github.com/vitessio/vitess/pull/15032) + * evalengine: Implement LAST_DAY [#15038](https://github.com/vitessio/vitess/pull/15038) + * `schemadiff`: analyze and report foreign key loops/cycles [#15062](https://github.com/vitessio/vitess/pull/15062) + * Add support for multi table deletes with foreign keys [#15081](https://github.com/vitessio/vitess/pull/15081) + * Add support for delete planning with limits in presence of foreign keys [#15097](https://github.com/vitessio/vitess/pull/15097) +#### TabletManager + * Allow for passing in the MySQL shutdown timeout [#14568](https://github.com/vitessio/vitess/pull/14568) +#### Throttler + * Tablet throttler: post 18 refactoring, race condition fixes, unit & race testing, deprecation of HTTP checks [#14181](https://github.com/vitessio/vitess/pull/14181) +#### VReplication + * vreplication timeout query optimizer hints [#13840](https://github.com/vitessio/vitess/pull/13840) + * VReplication: Ensure that RowStreamer uses optimal index when possible [#13893](https://github.com/vitessio/vitess/pull/13893) + * go/vt/wrangler: add len(qr.Rows) check to no streams found log msg [#14062](https://github.com/vitessio/vitess/pull/14062) + * Migrate CreateLookupVindex and ExternalizeVindex to vtctldclient [#14086](https://github.com/vitessio/vitess/pull/14086) + * set vreplication net read and net write timeout session vars to high values [#14203](https://github.com/vitessio/vitess/pull/14203) + * allow tablet picker to exclude specified tablets from its candidate list [#14224](https://github.com/vitessio/vitess/pull/14224) + * VReplication: Add --all-cells flag to create sub-commands [#14341](https://github.com/vitessio/vitess/pull/14341) + * go/vt/wrangler: reduce VReplicationExec calls when getting copy state [#14375](https://github.com/vitessio/vitess/pull/14375) + * VStream: Skip vindex keyrange filtering when we can [#14384](https://github.com/vitessio/vitess/pull/14384) + * implement `--max-report-sample-rows` for VDiff [#14437](https://github.com/vitessio/vitess/pull/14437) + * VReplication VPlayer: support statement and transaction batching [#14502](https://github.com/vitessio/vitess/pull/14502) + * Snapshot connection: revert to explicit table locks when `FTWRL` is unavailable [#14578](https://github.com/vitessio/vitess/pull/14578) + * VReplication: Improve replication plan error messages [#14752](https://github.com/vitessio/vitess/pull/14752) + * VDiff: Support a max diff time for tables [#14786](https://github.com/vitessio/vitess/pull/14786) + * VDiff: Support diffing tables without a defined Primary Key [#14794](https://github.com/vitessio/vitess/pull/14794) +#### VTAdmin + * Optimize the GetWorkflows RPC [#14212](https://github.com/vitessio/vitess/pull/14212) +#### vtctldclient + * Support cluster bootstrapping in vtctldclient [#14315](https://github.com/vitessio/vitess/pull/14315) +#### vttestserver + * Make vttestserver docker image work with vtctldclient [#14665](https://github.com/vitessio/vitess/pull/14665) +### Feature Request +#### Build/CI + * Automatically update the Golang dependencies using a CRON [#14891](https://github.com/vitessio/vitess/pull/14891) +#### Cluster management + * [release-19.0] [vtctldclient] Add GetShardReplication (#15389) [#15390](https://github.com/vitessio/vitess/pull/15390) +#### Evalengine + * evalengine: implement AggregateEvalTypes [#15085](https://github.com/vitessio/vitess/pull/15085) +#### Query Serving + * Cache stream query plans in vttablet [#13264](https://github.com/vitessio/vitess/pull/13264) + * Foreign key on update action with non literal values [#14278](https://github.com/vitessio/vitess/pull/14278) + * `Replace into` statement plan with foreign keys : unsharded [#14396](https://github.com/vitessio/vitess/pull/14396) + * Enable REPLACE INTO engine and Fix Foreign key locking issue [#14532](https://github.com/vitessio/vitess/pull/14532) + * Add foreign key support for insert on duplicate key update [#14638](https://github.com/vitessio/vitess/pull/14638) + * Multi Table Delete Support: join with reference table [#14784](https://github.com/vitessio/vitess/pull/14784) + * `schemadiff`: `EnumReorderStrategy`, checking if enum or set values change ordinal [#15106](https://github.com/vitessio/vitess/pull/15106) +#### VReplication + * Provide subset of shards for certain VReplication Commands [#14873](https://github.com/vitessio/vitess/pull/14873) +#### VTAdmin + * vtadmin onlineddl endpoints [#15114](https://github.com/vitessio/vitess/pull/15114) + * [release-19.0] vtadmin onlineddl endpoints (#15114) [#15144](https://github.com/vitessio/vitess/pull/15144) +### Internal Cleanup +#### Backup and Restore + * vtbackup: Fix copy pasta typo in option description [#14664](https://github.com/vitessio/vitess/pull/14664) +#### Build/CI + * Typo fix and remove unsupported branch for go version upgrade matrix [#14896](https://github.com/vitessio/vitess/pull/14896) + * Reduce the frequency of the golang dependency upgrade CRON [#15008](https://github.com/vitessio/vitess/pull/15008) + * Remove codebeat badge [#15116](https://github.com/vitessio/vitess/pull/15116) +#### CLI + * Make vtctldclient mount command more standard [#14281](https://github.com/vitessio/vitess/pull/14281) + * remove deprecated flags from the codebase [#14544](https://github.com/vitessio/vitess/pull/14544) + * cleanup deprecated flag types in tabletenv [#14733](https://github.com/vitessio/vitess/pull/14733) +#### Cluster management + * Enable verbose logging for some more RPCs [#14770](https://github.com/vitessio/vitess/pull/14770) + * go/vt/topo: add error value to GetTablet logs [#14846](https://github.com/vitessio/vitess/pull/14846) +#### Docker + * Remove `MYSQL_FLAVOR` from all Docker images [#14159](https://github.com/vitessio/vitess/pull/14159) +#### Documentation + * Fix broken link in docker readme [#14222](https://github.com/vitessio/vitess/pull/14222) + * Mention roadmap planning/modification in the release process [#14254](https://github.com/vitessio/vitess/pull/14254) +#### Evalengine + * refactor: introduce evalengine type and use it [#14292](https://github.com/vitessio/vitess/pull/14292) + * sqlparser: export all Expr interfaces [#14371](https://github.com/vitessio/vitess/pull/14371) + * evalengine: Proper support for bit literals [#14374](https://github.com/vitessio/vitess/pull/14374) + * evalengine: fix numeric coercibility [#14473](https://github.com/vitessio/vitess/pull/14473) + * evalengine: Internal cleanup and consistency fixes [#14854](https://github.com/vitessio/vitess/pull/14854) +#### General + * chore: unnecessary use of fmt.Sprintf [#14328](https://github.com/vitessio/vitess/pull/14328) + * Miscellaneous typo fixes to comments [#14472](https://github.com/vitessio/vitess/pull/14472) + * Refactor: use NonEmpty() instead of !IsEmpty() [#14499](https://github.com/vitessio/vitess/pull/14499) + * Fix license header typo [#14630](https://github.com/vitessio/vitess/pull/14630) + * go/vt/vtgate: fix nilness issues [#14685](https://github.com/vitessio/vitess/pull/14685) + * go/vt/vttablet: fix nilness issues [#14686](https://github.com/vitessio/vitess/pull/14686) + * go/vt/vtadmin: fix nilness issues [#14687](https://github.com/vitessio/vitess/pull/14687) + * go/cache: fix nilness issues and unused code [#14688](https://github.com/vitessio/vitess/pull/14688) + * Keyspace ServedFrom: remove this deprecated attribute and related code [#14694](https://github.com/vitessio/vitess/pull/14694) + * go/vt/topo: fix nilness issues and unused variables [#14709](https://github.com/vitessio/vitess/pull/14709) + * go/vt/wrangler: fix nilness issues and unused variable [#14710](https://github.com/vitessio/vitess/pull/14710) + * go/vt/vtctl: fix nilness issues and error scopes [#14711](https://github.com/vitessio/vitess/pull/14711) + * mysql: Refactor out usage of servenv [#14732](https://github.com/vitessio/vitess/pull/14732) + * Remove servenv usage and config flags from collations [#14781](https://github.com/vitessio/vitess/pull/14781) + * Remove unused EventStreamer [#14783](https://github.com/vitessio/vitess/pull/14783) + * Cleanup of dead code [#14799](https://github.com/vitessio/vitess/pull/14799) + * go: resolve various nilness issues [#14803](https://github.com/vitessio/vitess/pull/14803) + * sqlparser: Refactor out servenv and inject everywhere [#14822](https://github.com/vitessio/vitess/pull/14822) + * Allow for building 32 bit libraries for subparts [#14841](https://github.com/vitessio/vitess/pull/14841) + * Improve links in README [#14867](https://github.com/vitessio/vitess/pull/14867) + * Use one canonical style for unlimited queries [#14870](https://github.com/vitessio/vitess/pull/14870) + * Fix a number of CodeQL warnings [#14882](https://github.com/vitessio/vitess/pull/14882) + * Update Go dependencies [#14888](https://github.com/vitessio/vitess/pull/14888) + * Modify the release instructions to properly clone Vitess when using the vtop examples [#14889](https://github.com/vitessio/vitess/pull/14889) + * Dead code cleanup [#14894](https://github.com/vitessio/vitess/pull/14894) + * Refactor out more usage of servenv for mysql version [#14938](https://github.com/vitessio/vitess/pull/14938) + * refac: deprecate `vitess/go/maps2` for `golang.org/x/exp/maps` [#14960](https://github.com/vitessio/vitess/pull/14960) + * vtenv: Introduce vtenv for passing in collation & parser information [#14994](https://github.com/vitessio/vitess/pull/14994) +#### Observability + * Remove some logs that are logging excessively on large clusters [#14825](https://github.com/vitessio/vitess/pull/14825) + * vstreamer: rm excessive logging [#14856](https://github.com/vitessio/vitess/pull/14856) +#### Online DDL + * New unified internal table names format, part 1: identifying and accepting new format tables [#14613](https://github.com/vitessio/vitess/pull/14613) +#### Query Serving + * Use panic instead of errors inside the operator package [#14085](https://github.com/vitessio/vitess/pull/14085) + * sqlparser: normalize IndexInfo [#14177](https://github.com/vitessio/vitess/pull/14177) + * refactor plan test cases [#14192](https://github.com/vitessio/vitess/pull/14192) + * Rename `BinaryIsAtVersion` to `BinaryIsAtLeastAtVersion` [#14269](https://github.com/vitessio/vitess/pull/14269) + * Refactor: foreign key in semantic analysis phase [#14273](https://github.com/vitessio/vitess/pull/14273) + * Rename Foreign Key enum values in VSchema and drop `FK_` prefix [#14274](https://github.com/vitessio/vitess/pull/14274) + * Refactor: New operator InsertionSelection to adhere to the operator model [#14286](https://github.com/vitessio/vitess/pull/14286) + * refactor: move more code from logical plans to ops [#14287](https://github.com/vitessio/vitess/pull/14287) + * evalengine: serialize to SQL [#14337](https://github.com/vitessio/vitess/pull/14337) + * vindexes: Efficient unicode hashing [#14395](https://github.com/vitessio/vitess/pull/14395) + * tx throttler: remove unused topology watchers [#14412](https://github.com/vitessio/vitess/pull/14412) + * sqlparser: Use KEY instead of INDEX for normalized form [#14416](https://github.com/vitessio/vitess/pull/14416) + * tx_throttler: delete topo watcher metric instead of deprecating [#14445](https://github.com/vitessio/vitess/pull/14445) + * Remove excessive VTGate logging of default planner selection [#14554](https://github.com/vitessio/vitess/pull/14554) + * refactor: minor cleanups in planner code [#14642](https://github.com/vitessio/vitess/pull/14642) + * planbuilder: clean up code [#14657](https://github.com/vitessio/vitess/pull/14657) + * Pass on vindex errors with wrap than overriding them [#14737](https://github.com/vitessio/vitess/pull/14737) + * refactor: remove more errors from operator planning [#14767](https://github.com/vitessio/vitess/pull/14767) + * Change variable name for better readability [#14771](https://github.com/vitessio/vitess/pull/14771) + * go/cache: use generics and remove unused API [#14850](https://github.com/vitessio/vitess/pull/14850) + * Export `convertMySQLVersionToCommentVersion` to use it in vitess-operator [#14988](https://github.com/vitessio/vitess/pull/14988) + * [release-19.0] schemadiff: Clean up MySQL version from diff hints (#15210) [#15213](https://github.com/vitessio/vitess/pull/15213) +#### TabletManager + * logging: log time taken for tablet initialization only once [#14597](https://github.com/vitessio/vitess/pull/14597) + * Replace use of `WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS` with `WAIT_FOR_EXECUTED_GTID_SET` [#14612](https://github.com/vitessio/vitess/pull/14612) +#### Throttler + * MaxReplicationLagModule.recalculateRate no longer fills the log [#14875](https://github.com/vitessio/vitess/pull/14875) +#### VReplication + * VReplication: VTTablet flag cleanup [#14297](https://github.com/vitessio/vitess/pull/14297) +#### vtctl + * Move all examples to vtctldclient [#14226](https://github.com/vitessio/vitess/pull/14226) +#### vtctldclient + * Fix typo for `--cells` flag help description in `ApplyRoutingRules` [#14721](https://github.com/vitessio/vitess/pull/14721) +### Performance +#### Evalengine + * Tiny Weights [#14402](https://github.com/vitessio/vitess/pull/14402) +#### General + * Replace usages of bytes.Buffer with strings.Builder [#14539](https://github.com/vitessio/vitess/pull/14539) + * [release-19.0] prevent vtctld from creating tons of S3 connections (#15296) [#15401](https://github.com/vitessio/vitess/pull/15401) +#### Query Serving + * Improved Connection Pooling [#14034](https://github.com/vitessio/vitess/pull/14034) + * schemadiff: improved heuristic for dependent migration permutation evaluation time [#14249](https://github.com/vitessio/vitess/pull/14249) + * mysql/conn: do not allocate during writes [#14482](https://github.com/vitessio/vitess/pull/14482) + * Use GetTabletsByCell in healthcheck [#14693](https://github.com/vitessio/vitess/pull/14693) + * mysql: do not allocate in parseOKPacket [#15067](https://github.com/vitessio/vitess/pull/15067) + * mysql: remove more allocations from parseOKPacket [#15082](https://github.com/vitessio/vitess/pull/15082) +#### Throttler + * Throttler: Use tmclient pool for CheckThrottler tabletmanager RPC [#14979](https://github.com/vitessio/vitess/pull/14979) +#### Topology + * go/vt/topo: enable concurrency for FindAllShardsInKeyspace [#14670](https://github.com/vitessio/vitess/pull/14670) + * Improve TopoServer Performance and Efficiency For Keyspace Shards [#15047](https://github.com/vitessio/vitess/pull/15047) +### Regression +#### Query Serving + * use aggregation engine over distinct engine when overlapping order by [#14359](https://github.com/vitessio/vitess/pull/14359) + * Performance Fixes for Vitess 18 [#14383](https://github.com/vitessio/vitess/pull/14383) + * tuple: serialized form [#14392](https://github.com/vitessio/vitess/pull/14392) + * planbuilder: use OR for not in comparisons [#14607](https://github.com/vitessio/vitess/pull/14607) + * add foreign key as part of set statement when reserved connection is needed [#14696](https://github.com/vitessio/vitess/pull/14696) + * fix: insert on duplicate key update missing BindVars [#14728](https://github.com/vitessio/vitess/pull/14728) + * Subquery inside aggregration function [#14844](https://github.com/vitessio/vitess/pull/14844) + * [release-19.0] Fix routing rule query rewrite (#15253) [#15259](https://github.com/vitessio/vitess/pull/15259) +#### Throttler + * [release-19.0] Enable 'heartbeat_on_demand_duration' in local/examples (#15204) [#15292](https://github.com/vitessio/vitess/pull/15292) +#### vttestserver + * [release-19.0] Fix logging issue when running in Docker with the syslog daemon disabled (#15176) [#15186](https://github.com/vitessio/vitess/pull/15186) +### Release +#### CLI + * [main] Add vtctldclient info to the 18.0 summary (#14259) [#14265](https://github.com/vitessio/vitess/pull/14265) +#### Documentation + * Add release instructions for Milestones [#14175](https://github.com/vitessio/vitess/pull/14175) + * Update v17 Release notes with a Breaking Change [#14215](https://github.com/vitessio/vitess/pull/14215) +#### General + * add release-18.0 to golang upgrade [#14133](https://github.com/vitessio/vitess/pull/14133) + * moving main to 19.0 snapshot [#14137](https://github.com/vitessio/vitess/pull/14137) + * update release notes on `main` after releases [#14171](https://github.com/vitessio/vitess/pull/14171) + * tooling: don't add bots to authors list [#14411](https://github.com/vitessio/vitess/pull/14411) + * move release notes of 18 to main [#14474](https://github.com/vitessio/vitess/pull/14474) + * v18.0.1 release notes to main [#14579](https://github.com/vitessio/vitess/pull/14579) + * port release notes of v18.0.2, v17.0.5 and v16.0.7 to main [#14840](https://github.com/vitessio/vitess/pull/14840) + * [release-19.0] Code Freeze for `v19.0.0-RC1` [#15137](https://github.com/vitessio/vitess/pull/15137) + * [release-19.0] Release of `v19.0.0-RC1` [#15139](https://github.com/vitessio/vitess/pull/15139) + * [release-19.0] Bump to `v19.0.0-SNAPSHOT` after the `v19.0.0-RC1` release [#15165](https://github.com/vitessio/vitess/pull/15165) + * [release-19.0] Code Freeze for `v19.0.0` [#15358](https://github.com/vitessio/vitess/pull/15358) +### Testing +#### Backup and Restore + * Add a retry to remove the vttablet directory during upgrade/downgrade backup tests [#14753](https://github.com/vitessio/vitess/pull/14753) +#### Build/CI + * Flakes: Shutdown vttablet before mysqld in backup tests [#14647](https://github.com/vitessio/vitess/pull/14647) + * Tests: Fix vdiff test breakage from concurrent merge [#14865](https://github.com/vitessio/vitess/pull/14865) + * Flakes: De-flake TestGatewayBufferingWhenPrimarySwitchesServingState [#14968](https://github.com/vitessio/vitess/pull/14968) + * Remove usage of additional test package [#15007](https://github.com/vitessio/vitess/pull/15007) + * Revert "exclude test from race" [#15014](https://github.com/vitessio/vitess/pull/15014) + * Added missing tests for the sqltypes package [#15056](https://github.com/vitessio/vitess/pull/15056) +#### CLI + * Fixed bug in flagutil package and added tests [#15046](https://github.com/vitessio/vitess/pull/15046) +#### General + * Reduce wait time in test helpers [#14476](https://github.com/vitessio/vitess/pull/14476) + * go/vt/tlstest: fix nilness issues [#14812](https://github.com/vitessio/vitess/pull/14812) + * Add logging for failing tests in CI [#14821](https://github.com/vitessio/vitess/pull/14821) + * bytes2: Add tests for StringUnsafe and Reset methods [#14940](https://github.com/vitessio/vitess/pull/14940) + * Tests: Add test in syslogger for `LOG_EMERG` level [#14942](https://github.com/vitessio/vitess/pull/14942) + * Add required tests for `go/acl` [#14943](https://github.com/vitessio/vitess/pull/14943) + * Add missing test for `go/bucketpool` [#14944](https://github.com/vitessio/vitess/pull/14944) + * test: adds test for acl [#14956](https://github.com/vitessio/vitess/pull/14956) + * tests: improve coverage for `go/bytes2/buffer.go` [#14958](https://github.com/vitessio/vitess/pull/14958) + * tests: add tests for `go/list` [#14962](https://github.com/vitessio/vitess/pull/14962) + * tests: add tests for `go/json2` [#14964](https://github.com/vitessio/vitess/pull/14964) + * tests: add tests for `go/protoutil/duration` [#14965](https://github.com/vitessio/vitess/pull/14965) + * tests: add tests to `go/mathutil` [#14969](https://github.com/vitessio/vitess/pull/14969) + * tests: add tests for `vitess/go/cmd/zk/internal/zkfilepath` [#14970](https://github.com/vitessio/vitess/pull/14970) + * unit test for go/sets/set.go [#14973](https://github.com/vitessio/vitess/pull/14973) + * Add required tests for `go/mysql/hex` [#14976](https://github.com/vitessio/vitess/pull/14976) + * Remove `AppendFloat` from `go/mysql/format` and add required tests [#14986](https://github.com/vitessio/vitess/pull/14986) + * tests: add tests for `go/sqlescape` [#14987](https://github.com/vitessio/vitess/pull/14987) + * tests: add tests for `go/slice` [#14989](https://github.com/vitessio/vitess/pull/14989) + * tests: Add tests for `go/textutil` [#14991](https://github.com/vitessio/vitess/pull/14991) + * tests: increase coverage for multiple files in `vitess/go/stats` to 100% [#14997](https://github.com/vitessio/vitess/pull/14997) + * tests: add tests for `zkfs` utilities [#15002](https://github.com/vitessio/vitess/pull/15002) + * Add missing tests for `go/event/syslogger` [#15005](https://github.com/vitessio/vitess/pull/15005) + * fix: `Unescape(Escape(str))` now returns the original string [#15009](https://github.com/vitessio/vitess/pull/15009) + * tests: add tests for `go/vt/hook` [#15015](https://github.com/vitessio/vitess/pull/15015) + * Added unit tests for the tools/codegen package [#15016](https://github.com/vitessio/vitess/pull/15016) + * Added unit tests for the tools/releases package [#15017](https://github.com/vitessio/vitess/pull/15017) + * tests: Add tests for `go/vt/external` [#15023](https://github.com/vitessio/vitess/pull/15023) + * Move test files to regular names again [#15037](https://github.com/vitessio/vitess/pull/15037) + * Add required tests for `go/unicode2` [#15051](https://github.com/vitessio/vitess/pull/15051) + * tests: add tests for `go/mathstats` [#15054](https://github.com/vitessio/vitess/pull/15054) + * Added tests for the go/vt/callinfo package [#15059](https://github.com/vitessio/vitess/pull/15059) + * Added tests for the vt/logz package [#15060](https://github.com/vitessio/vitess/pull/15060) + * Add required tests for `go/tb` [#15063](https://github.com/vitessio/vitess/pull/15063) + * [release-19.0] modernize various tests (#15184) [#15198](https://github.com/vitessio/vitess/pull/15198) +#### Query Serving + * Fix data race in `TestWarmingReads` [#14187](https://github.com/vitessio/vitess/pull/14187) + * vtgate: Allow more errors for the warning check [#14421](https://github.com/vitessio/vitess/pull/14421) + * vtgate: Allow additional errors in warnings test [#14461](https://github.com/vitessio/vitess/pull/14461) + * Foreign Key Fuzzer Benchmark [#14542](https://github.com/vitessio/vitess/pull/14542) + * test: enable test in downgrade testing [#14625](https://github.com/vitessio/vitess/pull/14625) + * Refactor Upgrade downgrade tests [#14782](https://github.com/vitessio/vitess/pull/14782) + * Add check to avoid runtime error and add tests for `go/mysql/fastparse` [#15000](https://github.com/vitessio/vitess/pull/15000) + * tests: add tests for `vt/vtgate/engine/opcode` [#15045](https://github.com/vitessio/vitess/pull/15045) + * tests: add tests to `go/vt/vtgate/semantics/bitset` [#15049](https://github.com/vitessio/vitess/pull/15049) + * Added test for AnalyzeStrict [#15126](https://github.com/vitessio/vitess/pull/15126) +#### Throttler + * Throttler: refactor global configuration setting as throttler member [#14853](https://github.com/vitessio/vitess/pull/14853) + * Throttler: fix race conditions in Operate() termination and in tests [#14971](https://github.com/vitessio/vitess/pull/14971) +#### Topology + * FlakyFix: `TestZk2Topo` [#14162](https://github.com/vitessio/vitess/pull/14162) +#### VReplication + * VReplication: extended e2e test for workflows with tables containing foreign key constraints [#14327](https://github.com/vitessio/vitess/pull/14327) + * TestStreamMigrateMainflow: fix panic in test [#14420](https://github.com/vitessio/vitess/pull/14420) + * Flaky TestFKExtWorkflow: fix Foreign Key stress test flakiness [#14714](https://github.com/vitessio/vitess/pull/14714) + * Some VReplication e2e Refactoring [#14735](https://github.com/vitessio/vitess/pull/14735) + * Test: Take test host/runner specs into account for VDiff diff duration test [#14868](https://github.com/vitessio/vitess/pull/14868) + * vtctldclient CLI validation: Add e2e test to check that options to the vtctldclient commands are supported [#14957](https://github.com/vitessio/vitess/pull/14957) + * [release-19.0] VtctldClient Reshard: add e2e tests to confirm CLI options and fix discovered issues. (#15353) [#15364](https://github.com/vitessio/vitess/pull/15364) +#### vtctl + * Reduce flakiness in TestShardReplicationPositions [#14708](https://github.com/vitessio/vitess/pull/14708) + diff --git a/changelog/19.0/19.0.0/release_notes.md b/changelog/19.0/19.0.0/release_notes.md new file mode 100644 index 00000000000..98603d4240a --- /dev/null +++ b/changelog/19.0/19.0.0/release_notes.md @@ -0,0 +1,229 @@ +# Release of Vitess v19.0.0 +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Dropping Support for MySQL 5.7](#drop-support-mysql57)** + - **[Deprecations and Deletions](#deprecations-and-deletions)** + - [VTTablet Flags](#vttablet-flags) + - [Docker Image vitess/lite](#deprecation-vitess-lite-mysqld) + - [Explain Statement Format](#explain-stmt-format) + - **[Breaking Changes](#breaking-changes)** + - [ExecuteFetchAsDBA rejects multi-statement SQL](#execute-fetch-as-dba-reject-multi) + - **[New Stats](#new-stats)** + - [Stream Consolidations](#stream-consolidations) + - [Build Version in `/debug/vars`](#build-version-in-debug-vars) + - **[Planned Reparent Shard](#planned-reparent-shard)** + - [`--tolerable-replication-lag` Sub-flag](#tolerable-repl-lag) + - **[Query Compatibility](#query-compatibility)** + - [Multi Table Delete Support](#multi-table-delete) + - [`SHOW VSCHEMA KEYSPACES` Query](#show-vschema-keyspaces) + - [`FOREIGN_KEY_CHECKS` is now a Vitess Aware Variable](#fk-checks-vitess-aware) + - [Explain Statement](#explain-statement) + - [Partial Multi-shard Commit Warnings](#partial-multi-shard-commit-warnings) + - [New Lock Syntax](#lock-syntax) + - [Support for AVG()](#avg-support) + - [Support for non-recursive CTEs](#cte-support) + - **[Vttestserver](#vttestserver)** + - [`--vtcombo-bind-host` flag](#vtcombo-bind-host) +- **[Minor Changes](#minor-changes)** + - **[Apply VSchema](#apply-vschema)** + - [`--strict` sub-flag and `strict` gRPC field](#strict-flag-and-field) + +## Major Changes + +### Dropping Support for MySQL 5.7 + +Oracle has marked MySQL 5.7 end of life as of October 2023. Vitess is also dropping support for MySQL 5.7 from v19 onwards. Users are advised to upgrade to MySQL 8.0 while on v18 version of Vitess before +upgrading to v19. + +Vitess will however, continue to support importing from MySQL 5.7 into Vitess even in v19. + + +### Deprecations and Deletions + +- The `MYSQL_FLAVOR` environment variable is now removed from all Docker Images. + +#### VTTablet Flags + +- The following flags — which were deprecated in Vitess 7.0 — have been removed: +`--vreplication_healthcheck_topology_refresh`, `--vreplication_healthcheck_retry_delay`, and `--vreplication_healthcheck_timeout`. +- The `--vreplication_tablet_type` flag is now deprecated and ignored. + +#### Docker Image vitess/lite + +The `mysqld` binary is now deprecated in the `vitess/lite` Docker image and will be removed in a future release. +This means that the MySQL/Percona version specific image tags for the `vitess/lite` image are deprecated. + +Below is a full list of available tags for `v19.0.0` and their deprecation status: + +| Image | Deprecated | +|---------------------------------|------------| +| `vitess/lite:v19.0.0` | NO | +| `vitess/lite:v19.0.0-mysql57` | YES | +| `vitess/lite:v19.0.0-mysql80` | YES | +| `vitess/lite:v19.0.0-percona57` | YES | +| `vitess/lite:v19.0.0-percona80` | YES | + +If you are currently using `vitess/lite` as your `mysqld` image in your vitess-operator deployment we invite you to use an official MySQL image, such as `mysql:8.0.30`. + +Below is an example of a kubernetes yaml file before and after upgrading to an official MySQL image: + +```yaml +# before: + +# the image used here includes MySQL 8.0.30 and its binaries + + mysqld: + mysql80Compatible: vitess/lite:v19.0.0-mysql80 +``` +```yaml +# after: + +# if we still want to use MySQL 8.0.30, we now have to use the +# official MySQL image with the 8.0.30 tag as shown below + + mysqld: + mysql80Compatible: mysql:8.0.30 # or even mysql:8.0.34 for instance +``` + +#### Explain Statement Format + +Explain statement format `vitess` and `vexplain` were deprecated in v16 and removed in v19 version. +Use [VExplain Statement](https://vitess.io/docs/19.0/user-guides/sql/vexplain/) for understanding Vitess plans. + +### Breaking Changes + +#### ExecuteFetchAsDBA rejects multi-statement SQL + +`vtctldclient ExecuteFetchAsDBA` (and similarly the `vtctl` and `vtctlclient` commands) now reject multi-statement SQL with error. + +For example, `vtctldclient ExecuteFetchAsDBA my-tablet "stop replica; change replication source to auto_position=1; start replica` will return an error, without attempting to execute any of these queries. + +Previously, `ExecuteFetchAsDBA` silently accepted multi statement SQL. It would (attempt to) execute all of them, but: + +- It would only indicate error for the first statement. Errors on 2nd, 3rd, ... statements were silently ignored. +- It would not consume the result sets of the 2nd, 3rd, ... statements. It would then return the used connection to the pool in a dirty state. Any further query that happens to take that connection out of the pool could get unexpected results. +- As another side effect, multi-statement schema changes would cause schema to be reloaded with only the first change, leaving the cached schema inconsistent with the underlying database. + +`ExecuteFetchAsDBA` does allow a specific use case of multi-statement SQL, which is where all statements are in the form of `CREATE TABLE` or `CREATE VIEW`. This is to support a common pattern of schema initialization, formalized in `ApplySchema --batch-size` which uses `ExecuteFetchAsDBA` under the hood. + +### New Stats + +#### Stream Consolidations + +Prior to 19.0 VTTablet reported how much time non-streaming executions spend waiting for consolidations to occur. In 19.0, VTTablet reports a similar stat for streaming executions in `/debug/vars` stat `Waits.Histograms.StreamConsolidations`. + +#### Build Version in `/debug/vars` + +The build version (e.g., `19.0.0-SNAPSHOT`) has been added to `/debug/vars`, allowing users to programmatically inspect Vitess components' build version at runtime. + +### Planned Reparent Shard + +#### `--tolerable-replication-lag` Sub-flag + +A new sub-flag `--tolerable-replication-lag` has been added to the command `PlannedReparentShard` that allows users to specify the amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary. +This feature is opt-in and not specifying this sub-flag makes Vitess ignore the replication lag entirely. + +A new flag in VTOrc with the same name has been added to control the behaviour of the PlannedReparentShard calls that VTOrc issues. + +### Query Compatibility + +#### Multi Table Delete Support + +Support is added for sharded multi-table delete with target on single table using multiple table join. + +Example: `Delete t1 from t1 join t2 on t1.id = t2.id join t3 on t1.col = t3.col where t3.foo = 5 and t2.bar = 7` + +More details about how it works is available in [MySQL Docs](https://dev.mysql.com/doc/refman/8.0/en/delete.html) + +#### `SHOW VSCHEMA KEYSPACES` Query + +A SQL query, `SHOW VSCHEMA KEYSPACES` is now supported in Vitess. This query prints the vschema information +for all the keyspaces. It is useful for seeing the foreign key mode, whether the keyspace is sharded, and if there is an +error in the VSchema for the keyspace. + +An example output of the query looks like - +```sql +mysql> show vschema keyspaces; ++----------+---------+-------------+---------+ +| Keyspace | Sharded | Foreign Key | Comment | ++----------+---------+-------------+---------+ +| ks | true | managed | | +| uks | false | managed | | ++----------+---------+-------------+---------+ +2 rows in set (0.01 sec) +``` + +#### `FOREIGN_KEY_CHECKS` is now a Vitess Aware Variable + +When VTGate receives a query to change the `FOREIGN_KEY_CHECKS` value for a session, instead of sending the value down to MySQL, VTGate now keeps track of the value and changes the queries by adding `SET_VAR(FOREIGN_KEY_CHECKS=On/Off)` style query optimizer hints wherever required. + +#### Explain Statement + +`Explain` statement can handle routed table queries now. `Explain` is unsupported when the tables involved in the query refers more than one keyspace. Users should use [VExplain Statement](https://vitess.io/docs/19.0/user-guides/sql/vexplain/) in those cases. + +#### Partial Multi-shard Commit Warnings + +When using `multi` transaction mode (the default), it is possible for Vitess to successfully commit to one shard, but fail to commit to a subsequent shard, thus breaking the atomicity of a multi-shard transaction. + +In `v19.0`, VTGate reports partial-success commits in warnings, e.g.: + +```mysql +mysql> commit; +ERROR 1317 (70100): target: customer.-80.primary: vttablet: rpc error: code = Aborted desc = transaction 1703182545849001001: ended at 2023-12-21 14:07:41.515 EST (exceeded timeout: 30s) (CallerID: userData1) +mysql> show warnings; ++---------+------+----------------------------------------------------------+ +| Level | Code | Message | ++---------+------+----------------------------------------------------------+ +| Warning | 301 | multi-db commit failed after committing to 1 shards: 80- | ++---------+------+----------------------------------------------------------+ +1 row in set, 1 warning (0.00 sec) +``` + +### Vttestserver + +#### `--vtcombo-bind-host` flag + +A new flag `--vtcombo-bind-host` has been added to vttestserver that allows the users to configure the bind host that vtcombo uses. This is especially useful when running vttestserver as a docker image and you want to run vtctld commands and look at the vtcombo `/debug/status` dashboard. + +### New lock syntax + +Vitess now supports the following LOCK syntax + +```sql +SELECT .. FOR SHARE (NOWAIT|SKIP LOCKED) +SELECT .. FOR UPDATE (NOWAIT|SKIP LOCKED) +``` + +### Support for AVG() aggregation function + +Vtgate can now evaluate `AVG` on sharded keyspaces, by using a combination of `SUM/COUNT` + +### Support for non-recursive CTEs + +Common table expressions that are not recursive can now be used. + +```sql +with userCount as ( + select id, count(*) as nr from user group by id) +select ref.col, userCount.nr +from ref join userCount on ref.user_id = userCount.id +``` + +## Minor Changes + +### Apply VSchema + +#### `--strict` sub-flag and `strict` gRPC field + +A new sub-flag `--strict` has been added to the command `ApplyVSchema` `vtctl` command that produces an error if unknown params are found in any Vindexes. An equivalent `strict` field has been added to the `ApplyVSchema` gRPC `vtctld` command. + +------------ +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/19.0/19.0.0/changelog.md). + +The release includes 461 merged Pull Requests. + +Thanks to all our contributors: @ChaitanyaD48, @EshaanAgg, @FirePing32, @GuptaManan100, @Its-Maniaco, @Maniktherana, @Manni-99, @MrFabio, @VaibhavMalik4187, @ajm188, @aparajon, @app/dependabot, @app/github-actions, @app/vitess-bot, @aquarapid, @arthurschreiber, @austenLacy, @beingnoble03, @brendar, @davidpiegza, @dbussink, @deepthi, @derekperkins, @ejortegau, @frouioui, @gerayking, @glokta1, @harshit-gangal, @iheanyi, @jwangace, @lixin963, @mattlord, @mattrobenolt, @maxenglander, @mcrauwel, @mdlayher, @olyazavr, @pbibra, @pnacht, @rajivharlalka, @ravicodelabs, @rbranson, @rohit-nayak-ps, @samanthadrago, @shlomi-noach, @skullface, @systay, @testwill, @tycol7, @vmg, @wangweicugw, @williammartin, @wlx5575 + diff --git a/changelog/19.0/19.0.0/summary.md b/changelog/19.0/19.0.0/summary.md index 5d413c25cae..498457e0404 100644 --- a/changelog/19.0/19.0.0/summary.md +++ b/changelog/19.0/19.0.0/summary.md @@ -3,21 +3,218 @@ ### Table of Contents - **[Major Changes](#major-changes)** + - **[Dropping Support for MySQL 5.7](#drop-support-mysql57)** - **[Deprecations and Deletions](#deprecations-and-deletions)** - - **[Docker](#docker)** - - [New MySQL Image](#mysql-image) + - [VTTablet Flags](#vttablet-flags) + - [Docker Image vitess/lite](#deprecation-vitess-lite-mysqld) + - [Explain Statement Format](#explain-stmt-format) + - **[Breaking Changes](#breaking-changes)** + - [ExecuteFetchAsDBA rejects multi-statement SQL](#execute-fetch-as-dba-reject-multi) + - **[New Stats](#new-stats)** + - [Stream Consolidations](#stream-consolidations) + - [Build Version in `/debug/vars`](#build-version-in-debug-vars) + - **[Planned Reparent Shard](#planned-reparent-shard)** + - [`--tolerable-replication-lag` Sub-flag](#tolerable-repl-lag) + - **[Query Compatibility](#query-compatibility)** + - [Multi Table Delete Support](#multi-table-delete) + - [`SHOW VSCHEMA KEYSPACES` Query](#show-vschema-keyspaces) + - [`FOREIGN_KEY_CHECKS` is now a Vitess Aware Variable](#fk-checks-vitess-aware) + - [Explain Statement](#explain-statement) + - [Partial Multi-shard Commit Warnings](#partial-multi-shard-commit-warnings) + - [New Lock Syntax](#lock-syntax) + - [Support for AVG()](#avg-support) + - [Support for non-recursive CTEs](#cte-support) + - **[Vttestserver](#vttestserver)** + - [`--vtcombo-bind-host` flag](#vtcombo-bind-host) +- **[Minor Changes](#minor-changes)** + - **[Apply VSchema](#apply-vschema)** + - [`--strict` sub-flag and `strict` gRPC field](#strict-flag-and-field) ## Major Changes +### Dropping Support for MySQL 5.7 + +Oracle has marked MySQL 5.7 end of life as of October 2023. Vitess is also dropping support for MySQL 5.7 from v19 onwards. Users are advised to upgrade to MySQL 8.0 while on v18 version of Vitess before +upgrading to v19. + +Vitess will however, continue to support importing from MySQL 5.7 into Vitess even in v19. + + ### Deprecations and Deletions - The `MYSQL_FLAVOR` environment variable is now removed from all Docker Images. -### Docker +#### VTTablet Flags + +- The following flags — which were deprecated in Vitess 7.0 — have been removed: +`--vreplication_healthcheck_topology_refresh`, `--vreplication_healthcheck_retry_delay`, and `--vreplication_healthcheck_timeout`. +- The `--vreplication_tablet_type` flag is now deprecated and ignored. + +#### Docker Image vitess/lite + +The `mysqld` binary is now deprecated in the `vitess/lite` Docker image and will be removed in a future release. +This means that the MySQL/Percona version specific image tags for the `vitess/lite` image are deprecated. + +Below is a full list of available tags for `v19.0.0` and their deprecation status: + +| Image | Deprecated | +|---------------------------------|------------| +| `vitess/lite:v19.0.0` | NO | +| `vitess/lite:v19.0.0-mysql57` | YES | +| `vitess/lite:v19.0.0-mysql80` | YES | +| `vitess/lite:v19.0.0-percona57` | YES | +| `vitess/lite:v19.0.0-percona80` | YES | + +If you are currently using `vitess/lite` as your `mysqld` image in your vitess-operator deployment we invite you to use an official MySQL image, such as `mysql:8.0.30`. + +Below is an example of a kubernetes yaml file before and after upgrading to an official MySQL image: + +```yaml +# before: + +# the image used here includes MySQL 8.0.30 and its binaries + + mysqld: + mysql80Compatible: vitess/lite:v19.0.0-mysql80 +``` +```yaml +# after: + +# if we still want to use MySQL 8.0.30, we now have to use the +# official MySQL image with the 8.0.30 tag as shown below + + mysqld: + mysql80Compatible: mysql:8.0.30 # or even mysql:8.0.34 for instance +``` + +#### Explain Statement Format + +Explain statement format `vitess` and `vexplain` were deprecated in v16 and removed in v19 version. +Use [VExplain Statement](https://vitess.io/docs/19.0/user-guides/sql/vexplain/) for understanding Vitess plans. + +### Breaking Changes + +#### ExecuteFetchAsDBA rejects multi-statement SQL + +`vtctldclient ExecuteFetchAsDBA` (and similarly the `vtctl` and `vtctlclient` commands) now reject multi-statement SQL with error. + +For example, `vtctldclient ExecuteFetchAsDBA my-tablet "stop replica; change replication source to auto_position=1; start replica` will return an error, without attempting to execute any of these queries. + +Previously, `ExecuteFetchAsDBA` silently accepted multi statement SQL. It would (attempt to) execute all of them, but: + +- It would only indicate error for the first statement. Errors on 2nd, 3rd, ... statements were silently ignored. +- It would not consume the result sets of the 2nd, 3rd, ... statements. It would then return the used connection to the pool in a dirty state. Any further query that happens to take that connection out of the pool could get unexpected results. +- As another side effect, multi-statement schema changes would cause schema to be reloaded with only the first change, leaving the cached schema inconsistent with the underlying database. + +`ExecuteFetchAsDBA` does allow a specific use case of multi-statement SQL, which is where all statements are in the form of `CREATE TABLE` or `CREATE VIEW`. This is to support a common pattern of schema initialization, formalized in `ApplySchema --batch-size` which uses `ExecuteFetchAsDBA` under the hood. + +### New Stats + +#### Stream Consolidations + +Prior to 19.0 VTTablet reported how much time non-streaming executions spend waiting for consolidations to occur. In 19.0, VTTablet reports a similar stat for streaming executions in `/debug/vars` stat `Waits.Histograms.StreamConsolidations`. + +#### Build Version in `/debug/vars` + +The build version (e.g., `19.0.0-SNAPSHOT`) has been added to `/debug/vars`, allowing users to programmatically inspect Vitess components' build version at runtime. + +### Planned Reparent Shard + +#### `--tolerable-replication-lag` Sub-flag + +A new sub-flag `--tolerable-replication-lag` has been added to the command `PlannedReparentShard` that allows users to specify the amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary. +This feature is opt-in and not specifying this sub-flag makes Vitess ignore the replication lag entirely. + +A new flag in VTOrc with the same name has been added to control the behaviour of the PlannedReparentShard calls that VTOrc issues. + +### Query Compatibility + +#### Multi Table Delete Support + +Support is added for sharded multi-table delete with target on single table using multiple table join. + +Example: `Delete t1 from t1 join t2 on t1.id = t2.id join t3 on t1.col = t3.col where t3.foo = 5 and t2.bar = 7` + +More details about how it works is available in [MySQL Docs](https://dev.mysql.com/doc/refman/8.0/en/delete.html) + +#### `SHOW VSCHEMA KEYSPACES` Query + +A SQL query, `SHOW VSCHEMA KEYSPACES` is now supported in Vitess. This query prints the vschema information +for all the keyspaces. It is useful for seeing the foreign key mode, whether the keyspace is sharded, and if there is an +error in the VSchema for the keyspace. + +An example output of the query looks like - +```sql +mysql> show vschema keyspaces; ++----------+---------+-------------+---------+ +| Keyspace | Sharded | Foreign Key | Comment | ++----------+---------+-------------+---------+ +| ks | true | managed | | +| uks | false | managed | | ++----------+---------+-------------+---------+ +2 rows in set (0.01 sec) +``` + +#### `FOREIGN_KEY_CHECKS` is now a Vitess Aware Variable + +When VTGate receives a query to change the `FOREIGN_KEY_CHECKS` value for a session, instead of sending the value down to MySQL, VTGate now keeps track of the value and changes the queries by adding `SET_VAR(FOREIGN_KEY_CHECKS=On/Off)` style query optimizer hints wherever required. + +#### Explain Statement + +`Explain` statement can handle routed table queries now. `Explain` is unsupported when the tables involved in the query refers more than one keyspace. Users should use [VExplain Statement](https://vitess.io/docs/19.0/user-guides/sql/vexplain/) in those cases. + +#### Partial Multi-shard Commit Warnings + +When using `multi` transaction mode (the default), it is possible for Vitess to successfully commit to one shard, but fail to commit to a subsequent shard, thus breaking the atomicity of a multi-shard transaction. + +In `v19.0`, VTGate reports partial-success commits in warnings, e.g.: + +```mysql +mysql> commit; +ERROR 1317 (70100): target: customer.-80.primary: vttablet: rpc error: code = Aborted desc = transaction 1703182545849001001: ended at 2023-12-21 14:07:41.515 EST (exceeded timeout: 30s) (CallerID: userData1) +mysql> show warnings; ++---------+------+----------------------------------------------------------+ +| Level | Code | Message | ++---------+------+----------------------------------------------------------+ +| Warning | 301 | multi-db commit failed after committing to 1 shards: 80- | ++---------+------+----------------------------------------------------------+ +1 row in set, 1 warning (0.00 sec) +``` + +### Vttestserver + +#### `--vtcombo-bind-host` flag + +A new flag `--vtcombo-bind-host` has been added to vttestserver that allows the users to configure the bind host that vtcombo uses. This is especially useful when running vttestserver as a docker image and you want to run vtctld commands and look at the vtcombo `/debug/status` dashboard. + +### New lock syntax + +Vitess now supports the following LOCK syntax + +```sql +SELECT .. FOR SHARE (NOWAIT|SKIP LOCKED) +SELECT .. FOR UPDATE (NOWAIT|SKIP LOCKED) +``` + +### Support for AVG() aggregation function + +Vtgate can now evaluate `AVG` on sharded keyspaces, by using a combination of `SUM/COUNT` + +### Support for non-recursive CTEs + +Common table expressions that are not recursive can now be used. + +```sql +with userCount as ( + select id, count(*) as nr from user group by id) +select ref.col, userCount.nr +from ref join userCount on ref.user_id = userCount.id +``` + +## Minor Changes -#### New MySQL Image +### Apply VSchema -In `v19.0` the Vitess team is shipping a new image: `vitess/mysql`. -This lightweight image is a replacement of `vitess/lite` to only run `mysqld`. +#### `--strict` sub-flag and `strict` gRPC field -Several tags are available to let you choose what version of MySQL you want to use: `vitess/mysql:8.0.30`, `vitess/mysql:8.0.34`. +A new sub-flag `--strict` has been added to the command `ApplyVSchema` `vtctl` command that produces an error if unknown params are found in any Vindexes. An equivalent `strict` field has been added to the `ApplyVSchema` gRPC `vtctld` command. diff --git a/changelog/19.0/19.0.1/changelog.md b/changelog/19.0/19.0.1/changelog.md new file mode 100644 index 00000000000..e709c56394a --- /dev/null +++ b/changelog/19.0/19.0.1/changelog.md @@ -0,0 +1,26 @@ +# Changelog of Vitess v19.0.1 + +### Bug fixes +#### Backup and Restore + * [release-19.0] Ensure that WithParams keeps the transport (#15421) [#15422](https://github.com/vitessio/vitess/pull/15422) +#### Query Serving + * [release-19.0] engine: fix race in concatenate (#15454) [#15461](https://github.com/vitessio/vitess/pull/15461) + * [release-19.0] Fix view tracking on sharded keyspace (#15436) [#15477](https://github.com/vitessio/vitess/pull/15477) +### CI/Build +#### Build/CI + * [release-19.0] Ensure to use latest golangci-lint (#15413) [#15414](https://github.com/vitessio/vitess/pull/15414) + * [release-19.0] bump `github.com/golang/protobuf` to `v1.5.4` (#15426) [#15428](https://github.com/vitessio/vitess/pull/15428) + * [release-19.0] Update all actions setup to latest versions (#15443) [#15446](https://github.com/vitessio/vitess/pull/15446) +#### Online DDL + * [release-19.0] `onlineddl_scheduler` test: fix flakiness in artifact cleanup test (#15396) [#15399](https://github.com/vitessio/vitess/pull/15399) +### Documentation +#### Documentation + * [release-19.0] Fix docs for unmanaged tablets (#15437) [#15474](https://github.com/vitessio/vitess/pull/15474) +### Release +#### General + * [release-19.0] Bump to `v19.0.1-SNAPSHOT` after the `v19.0.0` release [#15418](https://github.com/vitessio/vitess/pull/15418) + * [release-19.0] Code Freeze for `v19.0.1` [#15481](https://github.com/vitessio/vitess/pull/15481) +### Testing +#### Build/CI + * [release-19.0] CI: Address data races on memorytopo Conn.closed (#15365) [#15371](https://github.com/vitessio/vitess/pull/15371) + diff --git a/changelog/19.0/19.0.1/release_notes.md b/changelog/19.0/19.0.1/release_notes.md new file mode 100644 index 00000000000..d878c9beba2 --- /dev/null +++ b/changelog/19.0/19.0.1/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v19.0.1 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/19.0/19.0.1/changelog.md). + +The release includes 11 merged Pull Requests. + +Thanks to all our contributors: @app/vitess-bot, @systay + diff --git a/changelog/19.0/19.0.3/changelog.md b/changelog/19.0/19.0.3/changelog.md new file mode 100644 index 00000000000..f5efcd1ac7b --- /dev/null +++ b/changelog/19.0/19.0.3/changelog.md @@ -0,0 +1,37 @@ +# Changelog of Vitess v19.0.3 + +### Bug fixes +#### Backup and Restore +* [release-19.0] Configurable incremental restore files path (#15451) [#15564](https://github.com/vitessio/vitess/pull/15564) +#### Build/CI +* CI: Upgrade/Downgrade use N+1 version of vtctld when testing with N+1 of vttablet [#15631](https://github.com/vitessio/vitess/pull/15631) +#### Evalengine +* [release-19.0] evalengine: Ensure to pass down the precision (#15611) [#15612](https://github.com/vitessio/vitess/pull/15612) +* [release-19.0] evalengine: Fix additional time type handling (#15614) [#15616](https://github.com/vitessio/vitess/pull/15616) +#### Query Serving +* [release-19.0] Fix aliasing in routes that have a derived table (#15550) [#15554](https://github.com/vitessio/vitess/pull/15554) +* [release-19.0] bugfix: handling of ANDed join predicates (#15551) [#15557](https://github.com/vitessio/vitess/pull/15557) +* [release-19.0] Fail insert when primary vindex cannot be mapped to a shard (#15500) [#15573](https://github.com/vitessio/vitess/pull/15573) +#### Throttler +* [release-19.0] Dedicated poolDialer logic for VTOrc, throttler (#15562) [#15567](https://github.com/vitessio/vitess/pull/15567) +#### VReplication +* [release-19.0] VReplication: Fix workflow update changed handling (#15621) [#15629](https://github.com/vitessio/vitess/pull/15629) +### CI/Build +#### Build/CI +* [release-19.0] Update to latest CodeQL (#15530) [#15534](https://github.com/vitessio/vitess/pull/15534) +#### General +* [release-19.0] Upgrade go version to go1.22.2 [#15641](https://github.com/vitessio/vitess/pull/15641) +### Regression +#### Query Serving +* [release-19.0] fix: remove keyspace from column during query builder (#15514) [#15517](https://github.com/vitessio/vitess/pull/15517) +### Release +#### General +* [release-19.0] Bump to `v19.0.2-SNAPSHOT` after the `v19.0.1` release [#15491](https://github.com/vitessio/vitess/pull/15491) +* [release-19.0] Code Freeze for `v19.0.2` [#15644](https://github.com/vitessio/vitess/pull/15644) + * [release-19.0] Bump to `v19.0.3-SNAPSHOT` after the `v19.0.2` release [#15648](https://github.com/vitessio/vitess/pull/15648) +### Testing +#### VReplication +* [release-19.0] Fix vtctldclient SwitchReads related bugs and move the TestBasicV2Workflows e2e test to vtctldclient (#15579) [#15584](https://github.com/vitessio/vitess/pull/15584) +### Internal Cleanup +#### General + * Update dependencies for golang.org/x/net to latest [#15651](https://github.com/vitessio/vitess/pull/15651) diff --git a/changelog/19.0/19.0.3/release_notes.md b/changelog/19.0/19.0.3/release_notes.md new file mode 100644 index 00000000000..ea6e370c25b --- /dev/null +++ b/changelog/19.0/19.0.3/release_notes.md @@ -0,0 +1,14 @@ +# Release of Vitess v19.0.3 + +## Removal of v19.0.2 + +The maintainers team decided to remove the `v19.0.2` release as it contained a CVE fixed in `v19.0.3`. + +--- + +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/19.0/19.0.3/changelog.md). + +The release includes 16 merged Pull Requests. + +Thanks to all our contributors: @GrahamCampbell, @app/vitess-bot, @frouioui, @rohit-nayak-ps, @shlomi-noach, @systay + diff --git a/changelog/19.0/19.0.4/changelog.md b/changelog/19.0/19.0.4/changelog.md new file mode 100644 index 00000000000..c91ee7adcd9 --- /dev/null +++ b/changelog/19.0/19.0.4/changelog.md @@ -0,0 +1,41 @@ +# Changelog of Vitess v19.0.4 + +### Bug fixes +#### Evalengine + * [release-19.0] projection: Return correct collation information (#15801) [#15804](https://github.com/vitessio/vitess/pull/15804) +#### General + * [release-19.0] GRPC: Address potential segfault in dedicated connection pooling (#15751) [#15753](https://github.com/vitessio/vitess/pull/15753) + * [release-19.0] Properly unescape keyspace name in FindAllShardsInKeyspace (#15765) [#15786](https://github.com/vitessio/vitess/pull/15786) +#### Query Serving + * [release-19.0] Fix TPCH test by providing the correct field information in evalengine (#15623) [#15655](https://github.com/vitessio/vitess/pull/15655) + * [release-19.0] fix: don't forget DISTINCT for derived tables (#15672) [#15678](https://github.com/vitessio/vitess/pull/15678) + * [release-19.0] Fix panic in aggregation (#15728) [#15736](https://github.com/vitessio/vitess/pull/15736) + * [release-19.0] Fix wrong assignment to `sql_id_opt` in the parser (#15862) [#15869](https://github.com/vitessio/vitess/pull/15869) +#### Topology + * [release-19.0] discovery: Fix tablets removed from healthcheck when topo server GetTablet call fails (#15633) [#15681](https://github.com/vitessio/vitess/pull/15681) + * [release-19.0] Fix ZooKeeper Topology connection locks not being cleaned up correctly (#15757) [#15764](https://github.com/vitessio/vitess/pull/15764) +#### VReplication + * [release-19.0] VReplication: Take replication lag into account in VStreamManager healthcheck result processing (#15761) [#15774](https://github.com/vitessio/vitess/pull/15774) +#### VTAdmin + * [release-19.0] [VTAdmin API] Fix schema cache flag, add documentation (#15704) [#15720](https://github.com/vitessio/vitess/pull/15720) +#### VTorc + * [release-19.0]: VTOrc optimize TMC usage (#15356) [#15759](https://github.com/vitessio/vitess/pull/15759) +### CI/Build +#### General + * [release-19.0] Upgrade the Golang version to `go1.22.3` [#15864](https://github.com/vitessio/vitess/pull/15864) +#### VReplication + * [release-19.0] VReplication: Get workflowFlavorVtctl endtoend testing working properly again (#15636) [#15667](https://github.com/vitessio/vitess/pull/15667) +### Internal Cleanup +#### General + * [release-19.0] changelogs: squash 19.0.2/19.0.3 into just 19.0.3 and remove 19.0.2 (#15665) [#15668](https://github.com/vitessio/vitess/pull/15668) +### Performance +#### VTTablet + * [release-19.0] Improve performance for `BaseShowTablesWithSizes` query. (#15713) [#15795](https://github.com/vitessio/vitess/pull/15795) +### Regression +#### Query Serving + * [release-19.0] Fix regression where inserts into reference tables with a different name on sharded keyspaces were not routed correctly. (#15796) [#15860](https://github.com/vitessio/vitess/pull/15860) +### Release +#### General + * [release-19.0] Bump to `v19.0.4-SNAPSHOT` after the `v19.0.3` release [#15662](https://github.com/vitessio/vitess/pull/15662) + * [release-19.0] Code Freeze for `v19.0.4` [#15874](https://github.com/vitessio/vitess/pull/15874) + diff --git a/changelog/19.0/19.0.4/release_notes.md b/changelog/19.0/19.0.4/release_notes.md new file mode 100644 index 00000000000..bf68099f583 --- /dev/null +++ b/changelog/19.0/19.0.4/release_notes.md @@ -0,0 +1,7 @@ +# Release of Vitess v19.0.4 +The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/19.0/19.0.4/changelog.md). + +The release includes 19 merged Pull Requests. + +Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @systay, @vitess-bot + diff --git a/changelog/19.0/README.md b/changelog/19.0/README.md index aecff732ce1..b5c6489f101 100644 --- a/changelog/19.0/README.md +++ b/changelog/19.0/README.md @@ -1,2 +1,16 @@ ## v19.0 +* **[19.0.4](19.0.4)** + * [Changelog](19.0.4/changelog.md) + * [Release Notes](19.0.4/release_notes.md) + +* **[19.0.3](19.0.3)** + * [Changelog](19.0.3/changelog.md) + * [Release Notes](19.0.3/release_notes.md) + +* **[19.0.1](19.0.1)** + * [Changelog](19.0.1/changelog.md) + * [Release Notes](19.0.1/release_notes.md) + * **[19.0.0](19.0.0)** + * [Changelog](19.0.0/changelog.md) + * [Release Notes](19.0.0/release_notes.md) diff --git a/changelog/20.0/20.0.0/summary.md b/changelog/20.0/20.0.0/summary.md new file mode 100644 index 00000000000..aeacf8d7a2c --- /dev/null +++ b/changelog/20.0/20.0.0/summary.md @@ -0,0 +1,370 @@ +## Summary + +### Table of Contents + +- **[Major Changes](#major-changes)** + - **[Deletions](#deletions)** + - [`--vreplication_tablet_type` flag](#vreplication-tablet-type-deletion) + - [Pool Capacity Flags](#pool-flags-deletion) + - [MySQL binaries in the vitess/lite Docker images](#vitess-lite) + - [vitess/base and vitess/k8s Docker images](#base-k8s-images) + - [`gh-ost` binary and endtoend tests](#gh-ost-binary-tests-removal) + - [Legacy `EmergencyReparentShard` stats](#legacy-emergencyshardreparent-stats) + - **[Breaking changes](#breaking-changes)** + - [Metric Name Changes in VTOrc](#metric-change-vtorc) + - [ENUM and SET column handling in VTGate VStream API](#enum-set-vstream) + - [`shutdown_grace_period` Default Change](#shutdown-grace-period-default) + - [New `unmanaged` Flag and `disable_active_reparents` deprecation](#unmanaged-flag) + - [`recovery-period-block-duration` Flag deprecation](#recovery-block-deprecation) + - [`mysqlctld` `onterm-timeout` Default Change](#mysqlctld-onterm-timeout) + - [`MoveTables` now removes `auto_increment` clauses by default when moving tables from an unsharded keyspace to a sharded one](#move-tables-auto-increment) + - [`Durabler` interface method renaming](#durabler-interface-method-renaming) + - **[Query Compatibility](#query-compatibility)** + - [Vindex Hints](#vindex-hints) + - [Update with Limit Support](#update-limit) + - [Update with Multi Table Support](#multi-table-update) + - [Update with Multi Target Support](#update-multi-target) + - [Delete with Subquery Support](#delete-subquery) + - [Delete with Multi Target Support](#delete-multi-target) + - [User Defined Functions Support](#udf-support) + - [Insert Row Alias Support](#insert-row-alias-support) + - **[Query Timeout](#query-timeout)** + - **[Flag changes](#flag-changes)** + - [`pprof-http` default change](#pprof-http-default) + - [New `healthcheck-dial-concurrency` flag](#healthcheck-dial-concurrency-flag) + - [New minimum for `--buffer_min_time_between_failovers`](#buffer_min_time_between_failovers-flag) + - [New `track-udfs` vtgate flag](#vtgate-track-udfs-flag) +- **[Minor Changes](#minor-changes)** + - **[New Stats](#new-stats)** + - [VTTablet Query Cache Hits and Misses](#vttablet-query-cache-hits-and-misses) + - **[`SIGHUP` reload of gRPC client static auth creds](#sighup-reload-of-grpc-client-auth-creds)** + - **[VTAdmin](#vtadmin)** + - [Updated to node v20.12.2](#updated-node) + +## Major Changes + +### Deletion + +#### `--vreplication_tablet_type` flag + +The previously deprecated flag `--vreplication_tablet_type` has been deleted. + +#### Pool Capacity Flags + +The previously deprecated flags `--queryserver-config-query-pool-waiter-cap`, `--queryserver-config-stream-pool-waiter-cap` and `--queryserver-config-txpool-waiter-cap` have been deleted. + +#### MySQL binaries in the `vitess/lite` Docker images + +In `v19.0.0` we had deprecated the `mysqld` binary in the `vitess/lite` Docker image. +Making MySQL/Percona version specific image tags also deprecated. + +Starting in `v20.0.0` we no longer build the MySQL/Percona version specific image tags. +Moreover, the `mysqld` binary is no longer present on the `vitess/lite` image. + +Here are the images we will no longer build and push: + +| Image | Available | +|---------------------------------|-----------| +| `vitess/lite:v20.0.0` | YES | +| `vitess/lite:v20.0.0-mysql57` | NO | +| `vitess/lite:v20.0.0-mysql80` | NO | +| `vitess/lite:v20.0.0-percona57` | NO | +| `vitess/lite:v20.0.0-percona80` | NO | + + +If you have not done it yet, you can use an official MySQL Docker image for your `mysqld` container now such as: `mysql:8.0.30`. +Below is an example of a kubernetes yaml file before and after upgrading to an official MySQL image: + +```yaml +# before: + +# you are still on v19 and are looking to upgrade to v20 +# the image used here includes MySQL 8.0.30 and its binaries + + mysqld: + mysql80Compatible: vitess/lite:v19.0.0-mysql80 +``` +```yaml +# after: + +# if we still want to use MySQL 8.0.30, we now have to use the +# official MySQL image with the 8.0.30 tag as shown below + + mysqld: + mysql80Compatible: mysql:8.0.30 # or even mysql:8.0.34 for instance +``` + +#### `vitess/base` and `vitess/k8s` Docker images + +Since we have deleted MySQL from our `vitess/lite` image, we are removing the `vitess/base` and `vitess/k8s` images. + +These images are no longer useful since we can use `vitess/lite` as the base of many other Docker images (`vitess/vtgate`, `vitess/vtgate`, ...). + +#### `gh-ost` binary and endtoend tests + +Vitess 20.0 drops support for `gh-ost` DDL strategy. + +`vttablet` binary no longer embeds a `gh-ost` binary. Users of `gh-ost` DDL strategy will need to supply a `gh-ost` binary on the `vttablet` host or pod. Vitess will look for the `gh-ost` binary in the system `PATH`; otherwise the user should supply `vttablet --gh-ost-path`. + +Vitess' endtoend tests no longer use nor test `gh-ost` migrations. + +#### Legacy `EmergencyReparentShard` stats + +The following `EmergencyReparentShard` stats were deprecated in Vitess 18.0 and are removed in Vitess 20.0: +- `ers_counter` +- `ers_success_counter` +- `ers_failure_counter` + +These counters are replaced by the following stats _(introduced in Vitess 18.0)_: +- `emergency_reparent_counts` - Number of times `EmergencyReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation. +- `planned_reparent_counts` - Number of times `PlannedReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation. + +Also, the `reparent_shard_operation_timings` stat was added to provide per-operation timings of reparent operations. + +### Breaking Changes + +#### Metric Name Changes in VTOrc + +The following metric names have been changed in VTOrc. The old metrics are still available in `/debug/vars` for this release, but will be removed in later releases. The new metric names and the deprecated metric names resolve to the same metric name on prometheus, so there is no change there. + +| Old Metric Name | New Metric Name | Name in Prometheus | +|:--------------------------------------------:|:----------------------------------------:|:--------------------------------------------------:| +| `analysis.change.write` | `AnalysisChangeWrite` | `vtorc_analysis_change_write` | +| `audit.write` | `AuditWrite` | `vtorc_audit_write` | +| `discoveries.attempt` | `DiscoveriesAttempt` | `vtorc_discoveries_attempt` | +| `discoveries.fail` | `DiscoveriesFail` | `vtorc_discoveries_fail` | +| `discoveries.instance_poll_seconds_exceeded` | `DiscoveriesInstancePollSecondsExceeded` | `vtorc_discoveries_instance_poll_seconds_exceeded` | +| `discoveries.queue_length` | `DiscoveriesQueueLength` | `vtorc_discoveries_queue_length` | +| `discoveries.recent_count` | `DiscoveriesRecentCount` | `vtorc_discoveries_recent_count` | +| `instance.read` | `InstanceRead` | `vtorc_instance_read` | +| `instance.read_topology` | `InstanceReadTopology` | `vtorc_instance_read_topology` | + + + +#### ENUM and SET column handling in VTGate VStream API + +The [VTGate VStream API](https://vitess.io/docs/reference/vreplication/vstream/) now returns [`ENUM`](https://dev.mysql.com/doc/refman/en/enum.html) and [`SET`](https://dev.mysql.com/doc/refman/en/set.html) column type values in [`VEvent`](https://pkg.go.dev/vitess.io/vitess/go/vt/proto/binlogdata#VEvent) messages (in the embedded [`RowChange`](https://pkg.go.dev/vitess.io/vitess/go/vt/proto/binlogdata#RowChange) messages) as their string values instead of the integer based ones — in both the copy/snapshot phase and the streaming phase. This change was done to make the `VStream` API more user-friendly, intuitive, and to align the behavior across both phases. Before [this change](https://github.com/vitessio/vitess/pull/15723) the values for [`ENUM`](https://dev.mysql.com/doc/refman/en/enum.html) and [`SET`](https://dev.mysql.com/doc/refman/en/set.html) columns were string values in the copy phase but integer values (which only have an internal meaning to MySQL) in the streaming phase. This inconsistency led to various [challenges and issues](https://github.com/vitessio/vitess/issues/15750) for each `VStream` client/consumer (e.g. the [`Debezium` Vitess connector](https://debezium.io/documentation/reference/stable/connectors/vitess.html) failed to properly perform a snapshot for tables containing these column types). Now the behavior is intuitive — clients need the string values as the eventual sink is often not MySQL so each consumer needed to perform the mappings themselves — and consistent. While this is a (potentially) breaking change, a new boolean field has been added to the [`FieldEvent`](https://pkg.go.dev/vitess.io/vitess/go/vt/proto/binlogdata#FieldEvent) message called `EnumSetStringValues`. When that field is `false` (in Vitess v19 and older) then the consumer will need to perform the mappings during streaming phase, but not during copy phase. When this field is `true`, then no mapping is required. This will help to ensure a smooth transition for all consumers over time. To demonstrate, let's look at the textual output (printing the received `VEvents` as strings) when streaming a single `enum_set_test` table from the unsharded `commerce` keyspace so that we can see what the VStream looks like before and after when we start a new VStream in copy/snapshot mode and then transition to streaming mode for the following table: + +```sql +CREATE TABLE `enum_set_test` ( + `id` int NOT NULL AUTO_INCREMENT, + `name` varchar(120) DEFAULT NULL, + `shirt_size` enum('small','medium','large','xlarge','xxlarge') DEFAULT NULL, + `hobbies` set('knitting','cooking','pickleball','biking','hiking','motorcycle','video games','reading') DEFAULT NULL, + PRIMARY KEY (`id`) +) +``` + +And with the table having this data when we start our `VStream` and begin the copy/snapshot phase: + +```sql +mysql> select * from enum_set_test; ++----+-----------+------------+-------------------------+ +| id | name | shirt_size | hobbies | ++----+-----------+------------+-------------------------+ +| 1 | Billy Bob | xlarge | cooking,reading | +| 2 | Sally Mae | medium | knitting,cooking,hiking | ++----+-----------+------------+-------------------------+ +2 rows in set (0.00 sec) +``` + +And finally we will perform the following inserts and updates to the table during the streaming phase: + +```sql +insert into enum_set_test values (3, "Matt Lord", 'medium', 'pickleball,biking,hiking,motorcycle,video games,reading'); +insert into enum_set_test values (4, "Jerry Badyellow", 'large', ''); +update enum_set_test set shirt_size = 'small', hobbies = 'knitting,cooking,hiking,reading' where id = 2; +``` + +Vitess v19 and older: + +```text +[type:BEGIN keyspace:"commerce" shard:"0" type:FIELD field_event:{table_name:"commerce.enum_set_test" fields:{name:"id" type:INT32 table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"id" column_length:11 charset:63 flags:49667 column_type:"int"} fields:{name:"name" type:VARCHAR table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"name" column_length:480 charset:255 column_type:"varchar(120)"} fields:{name:"shirt_size" type:ENUM table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"shirt_size" column_length:28 charset:255 flags:256 column_type:"enum('small','medium','large','xlarge','xxlarge')"} fields:{name:"hobbies" type:SET table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"hobbies" column_length:288 charset:255 flags:2048 column_type:"set('knitting','cooking','pickleball','biking','hiking','motorcycle','video games','reading')"} keyspace:"commerce" shard:"0"} keyspace:"commerce" shard:"0"] +[type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/ce357206-0d49-11ef-8fd1-a74564279579:1-35"}} keyspace:"commerce" shard:"0"] +[type:ROW row_event:{table_name:"commerce.enum_set_test" row_changes:{after:{lengths:1 lengths:9 lengths:6 lengths:15 values:"1Billy Bobxlargecooking,reading"}} keyspace:"commerce" shard:"0"} keyspace:"commerce" shard:"0" type:ROW row_event:{table_name:"commerce.enum_set_test" row_changes:{after:{lengths:1 lengths:9 lengths:6 lengths:23 values:"2Sally Maemediumknitting,cooking,hiking"}} keyspace:"commerce" shard:"0"} keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/ce357206-0d49-11ef-8fd1-a74564279579:1-35" table_p_ks:{table_name:"enum_set_test" lastpk:{fields:{name:"id" type:INT32 charset:63 flags:49667} rows:{lengths:1 values:"2"}}}}} keyspace:"commerce" shard:"0" type:COMMIT keyspace:"commerce" shard:"0"] +[type:BEGIN keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/ce357206-0d49-11ef-8fd1-a74564279579:1-35"}} keyspace:"commerce" shard:"0" type:COMMIT keyspace:"commerce" shard:"0"] +[type:COPY_COMPLETED keyspace:"commerce" shard:"0" type:COPY_COMPLETED] +[type:BEGIN timestamp:1715179728 current_time:1715179728532658000 keyspace:"commerce" shard:"0" type:FIELD timestamp:1715179728 field_event:{table_name:"commerce.enum_set_test" fields:{name:"id" type:INT32 table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"id" column_length:11 charset:63 flags:49667 column_type:"int"} fields:{name:"name" type:VARCHAR table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"name" column_length:480 charset:255 column_type:"varchar(120)"} fields:{name:"shirt_size" type:ENUM table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"shirt_size" column_length:28 charset:255 flags:256 column_type:"enum('small','medium','large','xlarge','xxlarge')"} fields:{name:"hobbies" type:SET table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"hobbies" column_length:288 charset:255 flags:2048 column_type:"set('knitting','cooking','pickleball','biking','hiking','motorcycle','video games','reading')"} keyspace:"commerce" shard:"0"} current_time:1715179728535652000 keyspace:"commerce" shard:"0" type:ROW timestamp:1715179728 row_event:{table_name:"commerce.enum_set_test" row_changes:{after:{lengths:1 lengths:9 lengths:1 lengths:3 values:"3Matt Lord2252"}} keyspace:"commerce" shard:"0" flags:1} current_time:1715179728535739000 keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/ce357206-0d49-11ef-8fd1-a74564279579:1-36"}} keyspace:"commerce" shard:"0" type:COMMIT timestamp:1715179728 current_time:1715179728535754000 keyspace:"commerce" shard:"0"] +[type:BEGIN timestamp:1715179735 current_time:1715179735538607000 keyspace:"commerce" shard:"0" type:ROW timestamp:1715179735 row_event:{table_name:"commerce.enum_set_test" row_changes:{after:{lengths:1 lengths:15 lengths:1 lengths:1 values:"4Jerry Badyellow30"}} keyspace:"commerce" shard:"0" flags:1} current_time:1715179735538659000 keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/ce357206-0d49-11ef-8fd1-a74564279579:1-37"}} keyspace:"commerce" shard:"0" type:COMMIT timestamp:1715179735 current_time:1715179735538672000 keyspace:"commerce" shard:"0"] +[type:BEGIN timestamp:1715179741 current_time:1715179741728690000 keyspace:"commerce" shard:"0" type:ROW timestamp:1715179741 row_event:{table_name:"commerce.enum_set_test" row_changes:{before:{lengths:1 lengths:9 lengths:1 lengths:2 values:"2Sally Mae219"} after:{lengths:1 lengths:9 lengths:1 lengths:3 values:"2Sally Mae1147"}} keyspace:"commerce" shard:"0" flags:1} current_time:1715179741728730000 keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/ce357206-0d49-11ef-8fd1-a74564279579:1-38"}} keyspace:"commerce" shard:"0" type:COMMIT timestamp:1715179741 current_time:1715179741728744000 keyspace:"commerce" shard:"0"] +``` + +Vitess v20 and newer: + +```text +[type:BEGIN keyspace:"commerce" shard:"0" type:FIELD field_event:{table_name:"commerce.enum_set_test" fields:{name:"id" type:INT32 table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"id" column_length:11 charset:63 flags:49667 column_type:"int"} fields:{name:"name" type:VARCHAR table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"name" column_length:480 charset:255 column_type:"varchar(120)"} fields:{name:"shirt_size" type:ENUM table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"shirt_size" column_length:28 charset:255 flags:256 column_type:"enum('small','medium','large','xlarge','xxlarge')"} fields:{name:"hobbies" type:SET table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"hobbies" column_length:288 charset:255 flags:2048 column_type:"set('knitting','cooking','pickleball','biking','hiking','motorcycle','video games','reading')"} keyspace:"commerce" shard:"0" enum_set_string_values:true} keyspace:"commerce" shard:"0"] +[type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/156f702a-0d47-11ef-8723-653d045ab990:1-50"}} keyspace:"commerce" shard:"0"] +[type:ROW row_event:{table_name:"commerce.enum_set_test" row_changes:{after:{lengths:1 lengths:9 lengths:6 lengths:15 values:"1Billy Bobxlargecooking,reading"}} keyspace:"commerce" shard:"0"} keyspace:"commerce" shard:"0" type:ROW row_event:{table_name:"commerce.enum_set_test" row_changes:{after:{lengths:1 lengths:9 lengths:6 lengths:23 values:"2Sally Maemediumknitting,cooking,hiking"}} keyspace:"commerce" shard:"0"} keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/156f702a-0d47-11ef-8723-653d045ab990:1-50" table_p_ks:{table_name:"enum_set_test" lastpk:{fields:{name:"id" type:INT32 charset:63 flags:49667} rows:{lengths:1 values:"2"}}}}} keyspace:"commerce" shard:"0" type:COMMIT keyspace:"commerce" shard:"0"] +[type:BEGIN keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/156f702a-0d47-11ef-8723-653d045ab990:1-50"}} keyspace:"commerce" shard:"0" type:COMMIT keyspace:"commerce" shard:"0"] +[type:COPY_COMPLETED keyspace:"commerce" shard:"0" type:COPY_COMPLETED] +[type:BEGIN timestamp:1715179399 current_time:1715179399817221000 keyspace:"commerce" shard:"0" type:FIELD timestamp:1715179399 field_event:{table_name:"commerce.enum_set_test" fields:{name:"id" type:INT32 table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"id" column_length:11 charset:63 flags:49667 column_type:"int"} fields:{name:"name" type:VARCHAR table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"name" column_length:480 charset:255 column_type:"varchar(120)"} fields:{name:"shirt_size" type:ENUM table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"shirt_size" column_length:28 charset:255 flags:256 column_type:"enum('small','medium','large','xlarge','xxlarge')"} fields:{name:"hobbies" type:SET table:"enum_set_test" org_table:"enum_set_test" database:"vt_commerce" org_name:"hobbies" column_length:288 charset:255 flags:2048 column_type:"set('knitting','cooking','pickleball','biking','hiking','motorcycle','video games','reading')"} keyspace:"commerce" shard:"0" enum_set_string_values:true} current_time:1715179399821735000 keyspace:"commerce" shard:"0" type:ROW timestamp:1715179399 row_event:{table_name:"commerce.enum_set_test" row_changes:{after:{lengths:1 lengths:9 lengths:6 lengths:55 values:"3Matt Lordmediumpickleball,biking,hiking,motorcycle,video games,reading"}} keyspace:"commerce" shard:"0" flags:1} current_time:1715179399821762000 keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/156f702a-0d47-11ef-8723-653d045ab990:1-51"}} keyspace:"commerce" shard:"0" type:COMMIT timestamp:1715179399 current_time:1715179399821801000 keyspace:"commerce" shard:"0"] +[type:BEGIN timestamp:1715179399 current_time:1715179399822310000 keyspace:"commerce" shard:"0" type:ROW timestamp:1715179399 row_event:{table_name:"commerce.enum_set_test" row_changes:{after:{lengths:1 lengths:15 lengths:5 lengths:0 values:"4Jerry Badyellowlarge"}} keyspace:"commerce" shard:"0" flags:1} current_time:1715179399822355000 keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/156f702a-0d47-11ef-8723-653d045ab990:1-52"}} keyspace:"commerce" shard:"0" type:COMMIT timestamp:1715179399 current_time:1715179399822360000 keyspace:"commerce" shard:"0"] +[type:BEGIN timestamp:1715179400 current_time:1715179400512056000 keyspace:"commerce" shard:"0" type:ROW timestamp:1715179400 row_event:{table_name:"commerce.enum_set_test" row_changes:{before:{lengths:1 lengths:9 lengths:6 lengths:23 values:"2Sally Maemediumknitting,cooking,hiking"} after:{lengths:1 lengths:9 lengths:5 lengths:31 values:"2Sally Maesmallknitting,cooking,hiking,reading"}} keyspace:"commerce" shard:"0" flags:1} current_time:1715179400512094000 keyspace:"commerce" shard:"0" type:VGTID vgtid:{shard_gtids:{keyspace:"commerce" shard:"0" gtid:"MySQL56/156f702a-0d47-11ef-8723-653d045ab990:1-53"}} keyspace:"commerce" shard:"0" type:COMMIT timestamp:1715179400 current_time:1715179400512108000 keyspace:"commerce" shard:"0"] +``` + +An example key difference there being that `after:{lengths:1 lengths:9 lengths:1 lengths:3 values:"2Sally Mae1147"}` from Vitess v19 and older becomes `after:{lengths:1 lengths:9 lengths:5 lengths:31 values:"2Sally Maesmallknitting,cooking,hiking,reading"}` from Vitess v20 and newer. So `1` -> `small` and `147` -> `knitting,cooking,hiking,reading` for the `ENUM` and `SET` column values respectively. This also demonstrates why this mapping is necessary in consumers/clients, as `147` has no logical meaning/value for this column outside of MySQL internals. + +If you're using the [`Debezium` Vitess connector](https://debezium.io/documentation/reference/stable/connectors/vitess.html), you should upgrade your connector to 2.7 (the next release) — which should contain [the relevant necessary changes](https://issues.redhat.com/browse/DBZ-7792) — *prior to upgrading Vitess* to v20.0.1 or later. If you're using any of the PlanetScale connectors ([`AirByte`](https://github.com/planetscale/airbyte-source/), [`FiveTran`](https://github.com/planetscale/fivetran-source), or [`singer-tap`](https://github.com/planetscale/singer-tap)) then no actions are required. + +If you're using a custom `VStream` client/consumer, then you will need to build a new client with the updated v20 [binlogdata protos](https://pkg.go.dev/vitess.io/vitess/go/vt/proto/binlogdata) ([source](https://github.com/vitessio/vitess/blob/main/proto/binlogdata.proto) for which would be in `main` or the `release-20.0` branch) before needing to support Vitess v20.0.1 or later. Your client will then be able to handle old and new messages, with older messages always having this new field set to `false`. + +#### `shutdown_grace_period` Default Change + +The `--shutdown_grace_period` flag, which was introduced in v2 with a default of `0 seconds`, has now been changed to default to `3 seconds`. +This makes reparenting in Vitess resilient to client errors, and prevents PlannedReparentShard from timing out. + +In order to preserve the old behaviour, the users can set the flag back to `0 seconds` causing open transactions to never be shutdown, but in that case, they run the risk of PlannedReparentShard calls timing out. + +#### New `unmanaged` Flag and `disable_active_reparents` deprecation + +New flag `--unmanaged` has been introduced in this release to make it easier to flag unmanaged tablets. It also runs validations to make sure the unmanaged tablets are configured properly. `--disable_active_reparents` flag has been deprecated for `vttablet`, `vtcombo` and `vttestserver` binaries and will be removed in future releases. Specifying the `--unmanaged` flag will also block replication commands and replication repairs. + +Starting this release, all unmanaged tablets should specify this flag. + + +#### `recovery-period-block-duration` Flag deprecation + +The flag `--recovery-period-block-duration` has been deprecated in VTOrc from this release. Its value is now ignored and the flag will be removed in later releases. +VTOrc no longer blocks recoveries for a certain duration after a previous recovery has completed. Since VTOrc refreshes the required information after +acquiring a shard lock, blocking of recoveries is not required. + +#### `mysqlctld` `onterm_timeout` Default Change + +The `--onterm_timeout` flag default value has changed for `mysqlctld`. It now is by default long enough to be able to wait for the default `--shutdown-wait-time` when shutting down on a `TERM` signal. + +This is necessary since otherwise MySQL would never shut down cleanly with the old defaults, since `mysqlctld` would shut down already after 10 seconds by default. + +#### `MoveTables` now removes `auto_increment` clauses by default when moving tables from an unsharded keyspace to a sharded one + +A new `--remove-sharded-auto-increment` flag has been added to the [`MoveTables` create sub-command](https://vitess.io/docs/20.0/reference/programs/vtctldclient/vtctldclient_movetables/vtctldclient_movetables_create/) and it is set to `true` by default. This flag controls whether any [MySQL `auto_increment`](https://dev.mysql.com/doc/refman/en/example-auto-increment.html) clauses should be removed from the table definitions when moving tables from an unsharded keyspace to a sharded one. This is now done by default as `auto_increment` clauses should not typically be used with sharded tables and you should instead rely on externally generated values such as a form of universally/globally unique identifiers or use [Vitess sequences](https://vitess.io/docs/reference/features/vitess-sequences/) in order to ensure that each row has a unique identifier (Primary Key value) across all shards. If for some reason you want to retain them you can set this new flag to `false` when creating the workflow. + +#### `Durabler` interface method renaming + +The methods of [the `Durabler` interface](https://github.com/vitessio/vitess/blob/main/go/vt/vtctl/reparentutil/durability.go#L70-L79) in `go/vt/vtctl/reparentutil` were renamed to be public _(capitalized)_ methods to make it easier to integrate custom Durability Policies from external packages. See [RFC for details](https://github.com/vitessio/vitess/issues/15544). + +Users of custom Durability Policies must rename private `Durabler` methods. + +Changes: +- The `promotionRule` method was renamed to `PromotionRule` +- The `semiSyncAckers` method was renamed to `SemiSyncAckers` +- The `isReplicaSemiSync` method was renamed to `IsReplicaSemiSync` + +### Query Compatibility + +#### Vindex Hints + +Vitess now supports Vindex hints that provide a way for users to influence the shard routing of queries in Vitess by specifying, which vindexes should be considered or ignored by the query planner. This feature enhances the control over query execution, allowing for potentially more efficient data access patterns in sharded databases. + +Example: + ```sql + SELECT * FROM user USE VINDEX (hash_user_id, secondary_vindex) WHERE user_id = 123; + SELECT * FROM order IGNORE VINDEX (range_order_id) WHERE order_date = '2021-01-01'; + ``` + +For more information about Vindex hints and its usage, please consult the documentation. + +#### Update with Limit Support + +Support is added for sharded update with limit. + +Example: `update t1 set t1.foo = 'abc', t1.bar = 23 where t1.baz > 5 limit 1` + +More details about how it works is available in [MySQL Docs](https://dev.mysql.com/doc/refman/8.0/en/update.html) + +#### Update with Multi Table Support + +Support is added for sharded multi-table update with column update on single target table using multiple table join. + +Example: `update t1 join t2 on t1.id = t2.id join t3 on t1.col = t3.col set t1.baz = 'abc', t1.apa = 23 where t3.foo = 5 and t2.bar = 7` + +More details about how it works is available in [MySQL Docs](https://dev.mysql.com/doc/refman/8.0/en/update.html) + +#### Update with Multi Target Support + +Support is added for sharded multi table target update. + +Example: `update t1 join t2 on t1.id = t2.id set t1.foo = 'abc', t2.bar = 23` + +More details about how it works is available in [MySQL Docs](https://dev.mysql.com/doc/refman/8.0/en/update.html) + +#### Delete with Subquery Support + +Support is added for sharded table delete with subquery + +Example: `delete from t1 where id in (select col from t2 where foo = 32 and bar = 43)` + +#### Delete with Multi Target Support + +Support is added for sharded multi table target delete. + +Example: `delete t1, t3 from t1 join t2 on t1.id = t2.id join t3 on t1.col = t3.col` + +More details about how it works is available in [MySQL Docs](https://dev.mysql.com/doc/refman/8.0/en/delete.html) + +#### User Defined Functions Support + +VTGate can track any user defined functions for better planning. +User Defined Functions (UDFs) should be directly loaded in the underlying MySQL. + +It should be enabled in VTGate with the `--track-udfs` flag. +This will enable the tracking of UDFs in VTGate and will be used for planning. +Without this flag, VTGate will not be aware that there might be aggregating user-defined functions in the query that need to be pushed down to MySQL. + +More details about how to load UDFs is available in [MySQL Docs](https://dev.mysql.com/doc/extending-mysql/8.0/en/adding-loadable-function.html) + +#### Insert Row Alias Support + +Support is added to have row alias in Insert statement to be used with `on duplicate key update`. + +Example: +- `insert into user(id, name, email) valies (100, 'Alice', 'alice@mail.com') as new on duplicate key update name = new.name, email = new.email` +- `insert into user(id, name, email) valies (100, 'Alice', 'alice@mail.com') as new(m, n, p) on duplicate key update name = n, email = p` + +More details about how it works is available in [MySQL Docs](https://dev.mysql.com/doc/refman/8.0/en/insert-on-duplicate.html) + +### Query Timeout +On a query timeout, Vitess closed the connection using the `kill connection` statement. This leads to connection churn +which is not desirable in some cases. To avoid this, Vitess now uses the `kill query` statement to cancel the query. +This will only cancel the query and does not terminate the connection. + +### Flag Changes + +#### `pprof-http` Default Change + +The `--pprof-http` flag, which was introduced in v19 with a default of `true`, has now been changed to default to `false`. +This makes HTTP `pprof` endpoints now an *opt-in* feature, rather than opt-out. +To continue enabling these endpoints, explicitly set `--pprof-http` when starting up Vitess components. + +#### New `--healthcheck-dial-concurrency` flag + +The new `--healthcheck-dial-concurrency` flag defines the maximum number of healthcheck connections that can open concurrently. This limit is to avoid hitting Go runtime panics on deployments watching enough tablets [to hit the runtime's maximum thread limit of `10000`](https://pkg.go.dev/runtime/debug#SetMaxThreads) due to blocking network syscalls. This flag applies to `vtcombo`, `vtctld` and `vtgate` only and a value less than the runtime max thread limit _(`10000`)_ is recommended. + +#### New minimum for `--buffer_min_time_between_failovers` + +The `--buffer_min_time_between_failovers` `vttablet` flag now has a minimum value of `1s`. This is because a value of 0 can cause issues with the buffering mechanics resulting in unexpected and unnecessary query errors — in particular during `MoveTables SwitchTraffic` operations. If you are currently specifying a value of 0 for this flag then you will need to update the config value to 1s *prior to upgrading to v20 or later* as `vttablet` will report an error and terminate if you attempt to start it with a value of 0. + +#### New `--track-udfs` vtgate flag + +The new `--track-udfs` flag enables VTGate to track user defined functions for better planning. + +## Minor Changes + +### New Stats + +#### VTTablet Query Cache Hits and Misses + +VTTablet exposes two new counter stats: + + * `QueryCacheHits`: Query engine query cache hits + * `QueryCacheMisses`: Query engine query cache misses + +### `SIGHUP` reload of gRPC client static auth creds + +The internal gRPC client now caches the static auth credentials and supports reloading via the `SIGHUP` signal. Previous to v20 the credentials were not cached. They were re-loaded from disk on every use. + +### VTAdmin + +#### vtadmin-web updated to node v20.12.2 (LTS) + +Building `vtadmin-web` now requires node >= v20.12.0 (LTS). Breaking changes from v18 to v20 can be found at https://nodejs.org/en/blog/release/v20.12.0 -- with no known issues that apply to VTAdmin. +Full details on the node v20.12.2 release can be found at https://nodejs.org/en/blog/release/v20.12.2. diff --git a/changelog/20.0/README.md b/changelog/20.0/README.md new file mode 100644 index 00000000000..4fb70ae78c1 --- /dev/null +++ b/changelog/20.0/README.md @@ -0,0 +1,2 @@ +## v20.0 +* **[20.0.0](20.0.0)** diff --git a/changelog/README.md b/changelog/README.md index 66ed9543e5d..3a55d986643 100644 --- a/changelog/README.md +++ b/changelog/README.md @@ -1,4 +1,5 @@ ## Releases +* [20.0](20.0) * [19.0](19.0) * [18.0](18.0) * [17.0](17.0) diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000000..b9094718fed --- /dev/null +++ b/codecov.yml @@ -0,0 +1,56 @@ +# https://docs.codecov.com/docs/ +# https://docs.codecov.com/docs/codecov-yaml + +codecov: + branch: main # Set the default branch + +parsers: + go: + partials_as_hits: true + +ignore: + # Ignore our end-to-end test code + - "go/flags/endtoend/**" + - "go/mysql/endtoend/**" + - "go/test/endtoend/**" + - "go/vt/vtctl/endtoend/**" + - "go/vt/vtctl/grpcvtctldserver/endtoend/**" + - "go/vt/vtgate/endtoend/**" + - "go/vt/vttablet/endtoend/**" + - "go/cmd/vttestserver/**" # This relies on end-to-end test packages + # Ignore generated code + - "go/**/cached_size.go" # Code generated by Sizegen + - "go/vt/sqlparser/ast_clone.go" # Code generated by ASTHelperGen + - "go/vt/sqlparser/ast_copy_on_rewrite.go" # Code generated by ASTHelperGen + - "go/vt/sqlparser/ast_equals.go" # Code generated by ASTHelperGen + - "go/vt/sqlparser/ast_format_fast.go" # Code generated by ASTFmtGen + - "go/vt/sqlparser/ast_rewrite.go" # Code generated by ASTHelperGen + - "go/vt/sqlparser/ast_visit.go" # Code generated by ASTHelperGen + - "go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go" # Code generated by MockGen + - "go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go" # Code generated by MockGen + - "go/vt/sqlparser/sql.go" # Code generated by goyacc + - "go/mysql/collations/charset/korean/tables.go" # Code generated by go run maketables.go + - "go/mysql/collations/charset/simplifiedchinese/tables.go" # Code generated by go run maketables.go + - "go/mysql/collations/colldata/mysqldata.go" # Code generated by makecolldata + - "go/mysql/collations/colldata/mysqlucadata.go" # Code generated by makecolldata + - "go/mysql/collations/internal/uca/fasttables.go" # Code generated by makecolldata + - "go/mysql/collations/mysqlversion.go" # Code generated by makecolldata + - "go/mysql/collations/supported.go" # Code generated by makecolldata + # Ignore proto files + - "go/vt/proto/**" + +github_checks: + annotations: false + +comment: # https://docs.codecov.com/docs/pull-request-comments + hide_project_coverage: false + +coverage: + status: # https://docs.codecov.com/docs/commit-status + patch: + default: + informational: true # Don't ever fail the codecov/patch test + project: + default: + informational: true # Don't ever fail the codecov/project test + diff --git a/config/embed.go b/config/embed.go index b2a9333e6de..6660e749aa6 100644 --- a/config/embed.go +++ b/config/embed.go @@ -16,3 +16,9 @@ var MycnfMySQL57 string //go:embed mycnf/mysql80.cnf var MycnfMySQL80 string + +//go:embed mycnf/mysql8026.cnf +var MycnfMySQL8026 string + +//go:embed mycnf/mysql84.cnf +var MycnfMySQL84 string diff --git a/config/init_db.sql b/config/init_db.sql index d04960633de..25ea2a42f3a 100644 --- a/config/init_db.sql +++ b/config/init_db.sql @@ -1,12 +1,5 @@ # This file is executed immediately after initializing a fresh data directory. -############################################################################### -# WARNING: This sql is *NOT* safe for production use, -# as it contains default well-known users and passwords. -# Care should be taken to change these users and passwords -# for production. -############################################################################### - ############################################################################### # Equivalent of mysql_secure_installation ############################################################################### @@ -14,17 +7,16 @@ # these commands. Note that disabling it does NOT disable read_only. # We save the current value so that we only re-enable it at the end if it was # enabled before. + SET @original_super_read_only=IF(@@global.super_read_only=1, 'ON', 'OFF'); SET GLOBAL super_read_only='OFF'; # Changes during the init db should not make it to the binlog. # They could potentially create errant transactions on replicas. SET sql_log_bin = 0; -# Remove anonymous users. -DELETE FROM mysql.user WHERE User = ''; -# Disable remote root access (only allow UNIX socket). -DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost'; +# Remove anonymous users & disable remote root access (only allow UNIX socket). +DROP USER IF EXISTS ''@'%', ''@'localhost', 'root'@'%'; # Remove test database. DROP DATABASE IF EXISTS test; @@ -78,11 +70,6 @@ GRANT SELECT, PROCESS, SUPER, REPLICATION CLIENT, RELOAD GRANT SELECT, UPDATE, DELETE, DROP ON performance_schema.* TO 'vt_monitoring'@'localhost'; -FLUSH PRIVILEGES; - -RESET SLAVE ALL; -RESET MASTER; - # custom sql is used to add custom scripts like creating users/passwords. We use it in our tests # {{custom_sql}} diff --git a/config/mycnf/mysql8026.cnf b/config/mycnf/mysql8026.cnf new file mode 100644 index 00000000000..c7755be488f --- /dev/null +++ b/config/mycnf/mysql8026.cnf @@ -0,0 +1,37 @@ +# This file is auto-included when MySQL 8.0.26 or later is detected. + +# MySQL 8.0 enables binlog by default with sync_binlog and TABLE info repositories +# It does not enable GTIDs or enforced GTID consistency + +gtid_mode = ON +enforce_gtid_consistency +relay_log_recovery = 1 +binlog_expire_logs_seconds = 259200 + +# disable mysqlx +mysqlx = 0 + +# 8.0 changes the default auth-plugin to caching_sha2_password +default_authentication_plugin = mysql_native_password + +# Semi-sync replication is required for automated unplanned failover +# (when the primary goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# VTTablet will enable semi-sync at the proper time when replication is set up, +# or when a primary is promoted or demoted based on the durability policy configured. +plugin-load = rpl_semi_sync_source=semisync_source.so;rpl_semi_sync_replica=semisync_replica.so + +# MySQL 8.0.26 and later will not load plugins during --initialize +# which makes these options unknown. Prefixing with --loose +# tells the server it's fine if they are not understood. +loose_rpl_semi_sync_source_timeout = 1000000000000000000 +loose_rpl_semi_sync_source_wait_no_replica = 1 + +# In order to protect against any errand GTIDs we will start the mysql instance +# in super-read-only mode. +super-read-only + +# Replication parameters to ensure reparents are fast. +replica_net_timeout = 8 + diff --git a/config/mycnf/mysql84.cnf b/config/mycnf/mysql84.cnf new file mode 100644 index 00000000000..90d7a535602 --- /dev/null +++ b/config/mycnf/mysql84.cnf @@ -0,0 +1,39 @@ +# This file is auto-included when MySQL 8.4.0 or later is detected. + +# MySQL 8.0 enables binlog by default with sync_binlog and TABLE info repositories +# It does not enable GTIDs or enforced GTID consistency + +gtid_mode = ON +enforce_gtid_consistency +relay_log_recovery = 1 +binlog_expire_logs_seconds = 259200 + +# disable mysqlx +mysqlx = 0 + +# 8.4 changes the default auth-plugin to caching_sha2_password and +# disables mysql_native_password by default. +mysql_native_password = ON +default_authentication_plugin = mysql_native_password + +# Semi-sync replication is required for automated unplanned failover +# (when the primary goes away). Here we just load the plugin so it's +# available if desired, but it's disabled at startup. +# +# VTTablet will enable semi-sync at the proper time when replication is set up, +# or when a primary is promoted or demoted based on the durability policy configured. +plugin-load = rpl_semi_sync_source=semisync_source.so;rpl_semi_sync_replica=semisync_replica.so + +# MySQL 8.0.26 and later will not load plugins during --initialize +# which makes these options unknown. Prefixing with --loose +# tells the server it's fine if they are not understood. +loose_rpl_semi_sync_source_timeout = 1000000000000000000 +loose_rpl_semi_sync_source_wait_no_replica = 1 + +# In order to protect against any errand GTIDs we will start the mysql instance +# in super-read-only mode. +super-read-only + +# Replication parameters to ensure reparents are fast. +replica_net_timeout = 8 + diff --git a/config/mycnf/test-suite.cnf b/config/mycnf/test-suite.cnf index e6d0992f6e6..28f4ac16e0d 100644 --- a/config/mycnf/test-suite.cnf +++ b/config/mycnf/test-suite.cnf @@ -1,5 +1,5 @@ # This sets some unsafe settings specifically for -# the test-suite which is currently MySQL 5.7 based +# the test-suite which is currently MySQL 8.0 based # In future it should be renamed testsuite.cnf innodb_buffer_pool_size = 32M @@ -14,13 +14,6 @@ key_buffer_size = 2M sync_binlog=0 innodb_doublewrite=0 -# These two settings are required for the testsuite to pass, -# but enabling them does not spark joy. They should be removed -# in the future. See: -# https://github.com/vitessio/vitess/issues/5396 - -sql_mode = STRICT_TRANS_TABLES - # set a short heartbeat interval in order to detect failures quickly slave_net_timeout = 4 # Disabling `super-read-only`. `test-suite` is mainly used for `vttestserver`. Since `vttestserver` uses a single MySQL for primary and replicas, diff --git a/config/tablet/default.yaml b/config/tablet/default.yaml index f996bb04737..ec9d1f94833 100644 --- a/config/tablet/default.yaml +++ b/config/tablet/default.yaml @@ -57,21 +57,18 @@ oltpReadPool: idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout maxLifetimeSeconds: 0 # queryserver-config-pool-conn-max-lifetime prefillParallelism: 0 # queryserver-config-pool-prefill-parallelism - maxWaiters: 50000 # queryserver-config-query-pool-waiter-cap olapReadPool: size: 200 # queryserver-config-stream-pool-size timeoutSeconds: 0 # queryserver-config-query-pool-timeout idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout prefillParallelism: 0 # queryserver-config-stream-pool-prefill-parallelism - maxWaiters: 0 txPool: size: 20 # queryserver-config-transaction-cap timeoutSeconds: 1 # queryserver-config-txpool-timeout idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout prefillParallelism: 0 # queryserver-config-transaction-prefill-parallelism - maxWaiters: 50000 # queryserver-config-txpool-waiter-cap oltp: queryTimeoutSeconds: 30 # queryserver-config-query-timeout diff --git a/doc/DockerBuild.md b/doc/DockerBuild.md index c4134556cc2..2f24d62a92c 100644 --- a/doc/DockerBuild.md +++ b/doc/DockerBuild.md @@ -24,45 +24,22 @@ Then you can run our build script for the `lite` image which extracts the Vitess or [vendor.json](https://github.com/vitessio/vitess/blob/main/vendor/vendor.json), for example to add new dependencies. If you do need it then build the bootstrap image, otherwise pull the image using one of the following - commands depending on the MySQL flavor you want: + command. ```sh - vitess$ docker pull vitess/bootstrap:mysql57 # MySQL Community Edition 5.7 - vitess$ docker pull vitess/bootstrap:mysql56 # MySQL Community Edition 5.6 - vitess$ docker pull vitess/bootstrap:percona57 # Percona Server 5.7 - vitess$ docker pull vitess/bootstrap:percona # Percona Server + vitess$ docker pull vitess/bootstrap:latest ``` **Note:** If you have already downloaded the `vitess/bootstrap:` image on your machine before then it could be old, which may cause build failures. So it would be a good idea to always execute this step. -1. Build the `vitess/base[:]` image. - It will include the compiled the Vitess binaries. - (`vitess/base` also contains the source code and tests i.e. everything needed for development work.) - - Choose one of the following commands (the command without suffix builds - the default image containing MySQL 5.7): - - ```sh - vitess$ make docker_base - vitess$ make docker_base_mysql56 - vitess$ make docker_base_percona57 - vitess$ make docker_base_percona - ``` - 1. Build the `vitess/lite[:]` image. - This will run a script that extracts from `vitess/base` only the files + This will run a script that extracts from `vitess/bootstrap` only the files needed to run Vitess. - Choose one of the following commands (the command without suffix builds - the default image containing MySQL 5.7): - ```sh vitess$ make docker_lite - vitess$ make docker_lite_mysql56 - vitess$ make docker_lite_percona57 - vitess$ make docker_lite_percona ``` 1. Re-tag the image under your personal repository, then upload it. diff --git a/doc/design-docs/TabletServerParamsAsYAML.md b/doc/design-docs/TabletServerParamsAsYAML.md index 49d073d1313..52d48a5e6f6 100644 --- a/doc/design-docs/TabletServerParamsAsYAML.md +++ b/doc/design-docs/TabletServerParamsAsYAML.md @@ -95,21 +95,18 @@ oltpReadPool: timeoutSeconds: 0 # queryserver-config-query-pool-timeout idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout prefillParallelism: 0 # queryserver-config-pool-prefill-parallelism - maxWaiters: 50000 # queryserver-config-query-pool-waiter-cap olapReadPool: size: 200 # queryserver-config-stream-pool-size timeoutSeconds: 0 # queryserver-config-query-pool-timeout idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout prefillParallelism: 0 # queryserver-config-stream-pool-prefill-parallelism - maxWaiters: 0 txPool: size: 20 # queryserver-config-transaction-cap timeoutSeconds: 1 # queryserver-config-txpool-timeout idleTimeoutSeconds: 1800 # queryserver-config-idle-timeout prefillParallelism: 0 # queryserver-config-transaction-prefill-parallelism - maxWaiters: 50000 # queryserver-config-txpool-waiter-cap oltp: queryTimeoutSeconds: 30 # queryserver-config-query-timeout diff --git a/doc/design-docs/TopoLocks.md b/doc/design-docs/TopoLocks.md new file mode 100644 index 00000000000..b25927b4ed8 --- /dev/null +++ b/doc/design-docs/TopoLocks.md @@ -0,0 +1,35 @@ +Locking Using Topology Servers +===================== + +This doc describes the working of shard locking that Vitess does using the topo servers. + +There are 2 variants of shard locking, `LockShard` which is a blocking call, and `TryLockShard` which tries to be a non-blocking call, but does not guarantee it. + +`TryLockShard` tries to find out if the shard is available to be locked or not. If it finds that the shard is locked, it returns with an error. However, there is still a race when the shard is not locked, that can cause `TryLockShard` to still block. + +### Working of LockShard + +`getLockTimeout` gets the amount of time we have to acquire a shard lock. It is not the amount of time that we acquire the shard lock for. It is currently misadvertised. `LockShard` returns a context, but that context doesn't have a timeout on it. When the shard lock expires, the context doesn't expire, because it doesn't have a timeout. To check whether the shard is locked or not, we have `CheckShardLocked`. + +The implementations of `LockShard` and `CheckShardLocked` differ slightly for all the different topology servers. We'll look at each of them separately. + +### Etcd + +In Etcd implementation, we use `KeepAlive` API to keep renewing the context that we have for acquiring the shard lock every 10 seconds. The duration of the lease is controlled by the `--topo_etcd_lease_ttl` flag which defaults to 10 seconds. Once we acquire the shard lock, the context for acquiring the shard lock expires and that stops the KeepAlives too. + +The shard lock is released either when the unlock function is called, or if the lease ttl expires. This guards against servers crashing while holding the shard lock. + +The Check function of etcd, is unique in the sense that apart from just checking whether the shard is locked or not, it also renews the lease by running `KeepAliveOnce`. + + +### ZooKeeper + +In ZooKeeper, locks are implemented by creating ephemeral files. The ephemeral files are present until the connection is alive. So there doesn't look like a timeout on the shard lock, unless the connection/process dies. + +The Check function doesn't do anything in ZooKeeper. The implementation just returns nil. To implement the Check functionality, we just need to check that the connection isn't broken and the ephemeral node exists. + +### Consul + +In Consul, the timeout for the lock is controlled by the `--topo_consul_lock_session_ttl` flag. + +The Check function works properly and checks if the lock still exists. \ No newline at end of file diff --git a/doc/internal/release/how-to-release.md b/doc/internal/release/how-to-release.md index fd5caa81e03..08411f9c0ac 100644 --- a/doc/internal/release/how-to-release.md +++ b/doc/internal/release/how-to-release.md @@ -83,6 +83,7 @@ That includes: > - There are several pages we want to update: > - [The releases page](https://vitess.io/docs/releases/): we must add the new release to the list with all its information and link. The links can be broken (404 error) while we are preparing for the release, this is fine. > - [The local install page](https://vitess.io/docs/get-started/local/): we must use the proper version increment for this guide and the proper SHA. The SHA will have to be modified once the Release Pull Request and the release is tagged is merged. + > - [The Vitess Operator for Kubernetes page](https://vitess.io/docs/get-started/operator/#install-the-operator), [the Local Install via source for Mac page](https://vitess.io/docs/get-started/local-mac/#install-vitess), [the Local Install via Docker page](https://vitess.io/docs/get-started/local-docker/#check-out-the-vitessiovitess-repository), and [the Vttestserver Docker Image page](https://vitess.io/docs/get-started/vttestserver-docker-image/#check-out-the-vitessiovitess-repository): we must checkout to the proper release branch after cloning Vitess. > - If we are doing a GA or RC release follow the instructions below: > - There are two scripts in the website repository in `./tools/{ga|rc}_release.sh`, use them to update the website documentation. The scripts automate: > - For an RC, we need to create a new entry in the sidebar which represents the next version on `main` and mark the version we are releasing as RC. diff --git a/doc/vtadmin/clusters.yaml b/doc/vtadmin/clusters.yaml index e4ed5335cc6..d2e506cec58 100644 --- a/doc/vtadmin/clusters.yaml +++ b/doc/vtadmin/clusters.yaml @@ -63,3 +63,24 @@ defaults: # - schema-read-pool => for GetSchema, GetSchemas, and FindSchema api methods # - topo-read-pool => for generic topo methods (e.g. GetKeyspace, FindAllShardsInKeyspace) # - workflow-read-pool => for GetWorkflow/GetWorkflows api methods. + + # How long to keep values in schema cache by default (duration passed to Add takes precedence). + # A value of "0m" means values will never be cached, a positive duration "1m" means items will be cached + # for that duration, and passing nothing will default to "NoExpiration". + schema-cache-default-expiration: 1m + # How many outstanding backfil requests to permit in schema cache. + # If the queue is full, calls backfill schemas will return false, and those requests will be discarded. + # A value of "0" means that the underlying channel will have a size of 0, + # and every send to the backfill queue will block until the queue is "empty" again. + schema-cache-backfill-queue-size: 0 + # How often expired values are removed from schema cache. + schema-cache-cleanup-interval: 5m + # How long a backfill request is considered valid. + # If the backfill goroutin encounters a request older than this, it is discarded. + schema-cache-backfill-request-ttl: 100ms + # How much time must pass before the backfill goroutine will re-backfill the same key. + # Used to prevent multiple callers from queueing up too many requests for the same key, + # when one backfill would satisfy all of them. + schema-cache-backfill-request-duplicate-interval: 1m + # How long to wait whe attempting to enqueue a backfill request before giving up. + schema-cache-backfill-enqueue-wait-time: 50ms diff --git a/docker/Dockerfile.release b/docker/Dockerfile.release deleted file mode 100644 index 1c36b503acc..00000000000 --- a/docker/Dockerfile.release +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# We rely on the base image, as that will re-copy the local -# working tree and build it. Because of so, we trust the local -# vendor folder is up to data. -FROM vitess/base - -# Clean local files, and keep vendorer libs -RUN git clean -xdf --exclude="vendor" - -RUN mkdir /vt/releases - -CMD tar -czf /vt/releases/v$VERSION.tar.gz --exclude .git . \ No newline at end of file diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile deleted file mode 100644 index 75c51b4ad1b..00000000000 --- a/docker/base/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" - -FROM "${image}" - -# Allows some docker builds to disable CGO -ARG CGO_ENABLED=0 - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Allows docker builds to set the BUILD_GIT_BRANCH -ARG BUILD_GIT_BRANCH - -# Allows docker builds to set the BUILD_GIT_REV -ARG BUILD_GIT_REV - -# Allows docker builds to set the BUILD_TIME -ARG BUILD_TIME - -# Re-copy sources from working tree -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -USER vitess - -# Build Vitess -RUN make build diff --git a/docker/base/Dockerfile.mysql57 b/docker/base/Dockerfile.mysql57 deleted file mode 100644 index 586cf1d94da..00000000000 --- a/docker/base/Dockerfile.mysql57 +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2023 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" - -FROM "${image}" - -# Allows some docker builds to disable CGO -ARG CGO_ENABLED=0 - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Allows docker builds to set the BUILD_GIT_BRANCH -ARG BUILD_GIT_BRANCH - -# Allows docker builds to set the BUILD_GIT_REV -ARG BUILD_GIT_REV - -# Allows docker builds to set the BUILD_TIME -ARG BUILD_TIME - -# Re-copy sources from working tree -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -USER vitess - -# Build Vitess -RUN make build diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57 deleted file mode 100644 index fce0412250b..00000000000 --- a/docker/base/Dockerfile.percona57 +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2023 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-percona57" - -FROM "${image}" - -# Allows some docker builds to disable CGO -ARG CGO_ENABLED=0 - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Allows docker builds to set the BUILD_GIT_BRANCH -ARG BUILD_GIT_BRANCH - -# Allows docker builds to set the BUILD_GIT_REV -ARG BUILD_GIT_REV - -# Allows docker builds to set the BUILD_TIME -ARG BUILD_TIME - -# Re-copy sources from working tree -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -USER vitess - -# Build Vitess -RUN make build diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80 deleted file mode 100644 index a236035c511..00000000000 --- a/docker/base/Dockerfile.percona80 +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2023 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-percona80" - -FROM "${image}" - -# Allows some docker builds to disable CGO -ARG CGO_ENABLED=0 - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Allows docker builds to set the BUILD_GIT_BRANCH -ARG BUILD_GIT_BRANCH - -# Allows docker builds to set the BUILD_GIT_REV -ARG BUILD_GIT_REV - -# Allows docker builds to set the BUILD_TIME -ARG BUILD_TIME - -# Re-copy sources from working tree -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -USER vitess - -# Build Vitess -RUN make build diff --git a/docker/k8s/logrotate/Dockerfile b/docker/binaries/logrotate/Dockerfile similarity index 100% rename from docker/k8s/logrotate/Dockerfile rename to docker/binaries/logrotate/Dockerfile diff --git a/docker/k8s/logrotate/logrotate.conf b/docker/binaries/logrotate/logrotate.conf similarity index 100% rename from docker/k8s/logrotate/logrotate.conf rename to docker/binaries/logrotate/logrotate.conf diff --git a/docker/k8s/logrotate/rotate.sh b/docker/binaries/logrotate/rotate.sh similarity index 100% rename from docker/k8s/logrotate/rotate.sh rename to docker/binaries/logrotate/rotate.sh diff --git a/docker/k8s/logtail/Dockerfile b/docker/binaries/logtail/Dockerfile similarity index 100% rename from docker/k8s/logtail/Dockerfile rename to docker/binaries/logtail/Dockerfile diff --git a/docker/k8s/logtail/tail.sh b/docker/binaries/logtail/tail.sh similarity index 100% rename from docker/k8s/logtail/tail.sh rename to docker/binaries/logtail/tail.sh diff --git a/docker/k8s/mysqlctl/Dockerfile b/docker/binaries/mysqlctl/Dockerfile similarity index 84% rename from docker/k8s/mysqlctl/Dockerfile rename to docker/binaries/mysqlctl/Dockerfile index 6c449552354..72c80c6363b 100644 --- a/docker/k8s/mysqlctl/Dockerfile +++ b/docker/binaries/mysqlctl/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -28,13 +28,13 @@ RUN mkdir -p /vt/bin && \ mkdir -p /vt/config && mkdir -p /vtdataroot # Copy binaries -COPY --from=k8s /vt/bin/mysqlctl /vt/bin/ +COPY --from=lite /vt/bin/mysqlctl /vt/bin/ # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt # copy vitess config -COPY --from=k8s /vt/config /vt/config +COPY --from=lite /vt/config /vt/config # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/mysqlctld/Dockerfile b/docker/binaries/mysqlctld/Dockerfile similarity index 86% rename from docker/k8s/mysqlctld/Dockerfile rename to docker/binaries/mysqlctld/Dockerfile index 6fdb30f012a..81909b9f092 100644 --- a/docker/k8s/mysqlctld/Dockerfile +++ b/docker/binaries/mysqlctld/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -35,13 +35,13 @@ RUN mkdir -p /vt/bin && \ mkdir -p /vt/config && mkdir -p /vtdataroot # Copy binaries -COPY --from=k8s /vt/bin/mysqlctld /vt/bin/ +COPY --from=lite /vt/bin/mysqlctld /vt/bin/ # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt # copy vitess config -COPY --from=k8s /vt/config /vt/config +COPY --from=lite /vt/config /vt/config # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/vtadmin/Dockerfile b/docker/binaries/vtadmin/Dockerfile similarity index 86% rename from docker/k8s/vtadmin/Dockerfile rename to docker/binaries/vtadmin/Dockerfile index f952681d3c9..9d30ba565e0 100644 --- a/docker/k8s/vtadmin/Dockerfile +++ b/docker/binaries/vtadmin/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=bullseye-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM node:18-${DEBIAN_VER} as node @@ -23,7 +23,7 @@ FROM node:18-${DEBIAN_VER} as node RUN mkdir -p /vt/web # copy web admin files -COPY --from=k8s /vt/web/vtadmin /vt/web/vtadmin +COPY --from=lite /vt/web/vtadmin /vt/web/vtadmin # install/build/clean web dependencies RUN npm --prefix /vt/web/vtadmin ci && \ @@ -37,10 +37,10 @@ ENV VTADMIN_WEB_PORT=14201 ENV VTROOT /vt # Copy binaries -COPY --from=k8s /vt/bin/vtadmin /vt/bin/ +COPY --from=lite /vt/bin/vtadmin /vt/bin/ # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY --chown=nginx --from=node /vt/web/vtadmin/build /var/www/ COPY --chown=nginx default.conf /etc/nginx/templates/default.conf.template diff --git a/docker/k8s/vtadmin/default.conf b/docker/binaries/vtadmin/default.conf similarity index 100% rename from docker/k8s/vtadmin/default.conf rename to docker/binaries/vtadmin/default.conf diff --git a/docker/k8s/vtbackup/Dockerfile b/docker/binaries/vtbackup/Dockerfile similarity index 84% rename from docker/k8s/vtbackup/Dockerfile rename to docker/binaries/vtbackup/Dockerfile index 4a8b3be9d52..3b429928cb4 100644 --- a/docker/k8s/vtbackup/Dockerfile +++ b/docker/binaries/vtbackup/Dockerfile @@ -16,7 +16,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -28,13 +28,13 @@ ENV VTDATAROOT /vtdataroot RUN mkdir -p /vt/bin && mkdir -p /vtdataroot # Copy binaries -COPY --from=k8s /vt/bin/vtbackup /vt/bin/ +COPY --from=lite /vt/bin/vtbackup /vt/bin/ # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt # Copy vitess config -COPY --from=k8s /vt/config /vt/config +COPY --from=lite /vt/config /vt/config # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/vtctl/Dockerfile b/docker/binaries/vtctl/Dockerfile similarity index 86% rename from docker/k8s/vtctl/Dockerfile rename to docker/binaries/vtctl/Dockerfile index cce3b6dc63c..21ca6aaf77c 100644 --- a/docker/k8s/vtctl/Dockerfile +++ b/docker/binaries/vtctl/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -26,10 +26,10 @@ ENV VTROOT /vt RUN mkdir -p /vt/bin && mkdir -p /vtdataroot # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt # Copy binaries -COPY --from=k8s /vt/bin/vtctl /vt/bin/ +COPY --from=lite /vt/bin/vtctl /vt/bin/ # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/vtctlclient/Dockerfile b/docker/binaries/vtctlclient/Dockerfile similarity index 92% rename from docker/k8s/vtctlclient/Dockerfile rename to docker/binaries/vtctlclient/Dockerfile index 17eab700eb4..c3d67cb8e4b 100644 --- a/docker/k8s/vtctlclient/Dockerfile +++ b/docker/binaries/vtctlclient/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -26,7 +26,7 @@ RUN apt-get update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* -COPY --from=k8s /vt/bin/vtctlclient /usr/bin/ +COPY --from=lite /vt/bin/vtctlclient /usr/bin/ # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/vtctld/Dockerfile b/docker/binaries/vtctld/Dockerfile similarity index 84% rename from docker/k8s/vtctld/Dockerfile rename to docker/binaries/vtctld/Dockerfile index 5daaf929486..a33297fe0ff 100644 --- a/docker/k8s/vtctld/Dockerfile +++ b/docker/binaries/vtctld/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -27,13 +27,13 @@ RUN mkdir -p /vt/bin && \ mkdir -p /vt/web && mkdir -p /vtdataroot # Copy binaries -COPY --from=k8s /vt/bin/vtctld /vt/bin/ +COPY --from=lite /vt/bin/vtctld /vt/bin/ # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt # copy web admin files -COPY --from=k8s /vt/web /vt/web +COPY --from=lite /vt/web /vt/web # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/vtexplain/Dockerfile b/docker/binaries/vtexplain/Dockerfile similarity index 92% rename from docker/k8s/vtexplain/Dockerfile rename to docker/binaries/vtexplain/Dockerfile index fb2f375d41c..58e2bd040db 100644 --- a/docker/k8s/vtexplain/Dockerfile +++ b/docker/binaries/vtexplain/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/base:${VT_BASE_VER} AS base +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -26,7 +26,7 @@ ENV VTROOT /vt RUN mkdir -p /vt/bin && mkdir -p /vtdataroot # Copy binaries -COPY --from=base /vt/bin/vtexplain /vt/bin/ +COPY --from=lite /vt/bin/vtexplain /vt/bin/ # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/vtgate/Dockerfile b/docker/binaries/vtgate/Dockerfile similarity index 86% rename from docker/k8s/vtgate/Dockerfile rename to docker/binaries/vtgate/Dockerfile index 3829227e2fa..761700c1673 100644 --- a/docker/k8s/vtgate/Dockerfile +++ b/docker/binaries/vtgate/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -26,10 +26,10 @@ ENV VTROOT /vt RUN mkdir -p /vt/bin && mkdir -p /vtdataroot # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt # Copy binaries -COPY --from=k8s /vt/bin/vtgate /vt/bin/ +COPY --from=lite /vt/bin/vtgate /vt/bin/ # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/vtorc/Dockerfile b/docker/binaries/vtorc/Dockerfile similarity index 86% rename from docker/k8s/vtorc/Dockerfile rename to docker/binaries/vtorc/Dockerfile index b62b30ee676..b24588f8ee4 100644 --- a/docker/k8s/vtorc/Dockerfile +++ b/docker/binaries/vtorc/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -26,10 +26,10 @@ ENV VTROOT /vt RUN mkdir -p /vt/bin && mkdir -p /vtdataroot # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt # Copy binaries -COPY --from=k8s /vt/bin/vtorc /vt/bin/ +COPY --from=lite /vt/bin/vtorc /vt/bin/ # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/k8s/vttablet/Dockerfile b/docker/binaries/vttablet/Dockerfile similarity index 83% rename from docker/k8s/vttablet/Dockerfile rename to docker/binaries/vttablet/Dockerfile index dd504d7860d..1c6ff92a588 100644 --- a/docker/k8s/vttablet/Dockerfile +++ b/docker/binaries/vttablet/Dockerfile @@ -15,7 +15,7 @@ ARG VT_BASE_VER=latest ARG DEBIAN_VER=stable-slim -FROM vitess/k8s:${VT_BASE_VER} AS k8s +FROM vitess/lite:${VT_BASE_VER} AS lite FROM debian:${DEBIAN_VER} @@ -27,11 +27,11 @@ ENV VTDATAROOT /vtdataroot RUN mkdir -p /vt/bin && mkdir -p /vtdataroot # Copy binaries -COPY --from=k8s /vt/bin/vttablet /vt/bin/ -COPY --from=k8s /vt/bin/vtctlclient /vt/bin/ +COPY --from=lite /vt/bin/vttablet /vt/bin/ +COPY --from=lite /vt/bin/vtctlclient /vt/bin/ # Copy certs to allow https calls -COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=lite /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt # add vitess user/group and add permissions RUN groupadd -r --gid 2000 vitess && \ diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md index e363dfc0ded..8d80bbea69b 100644 --- a/docker/bootstrap/CHANGELOG.md +++ b/docker/bootstrap/CHANGELOG.md @@ -92,4 +92,36 @@ List of changes between bootstrap image versions. ## [24] - 2023-10-10 ### Changes -- Update build to golang 1.21.3 \ No newline at end of file +- Update build to golang 1.21.3 + +## [25] - 2023-11-08 +### Changes +- Update build to golang 1.21.4 + +## [26] - 2023-12-06 +### Changes +- Update build to golang 1.21.5 + +## [27] - 2024-01-10 +### Changes +- Update build to golang 1.21.6 + +## [28] - 2024-02-07 +### Changes +- Update build to golang 1.22.0 + +## [29] - 2024-03-05 +### Changes +- Update build to golang 1.22.1 + +## [30] - 2024-04-01 +### Changes +- Move the bootstrap phase to the common image so other Dockerfiles don't have to rely on the version based tags. + +## [31] - 2024-04-03 +### Changes +- Update build to golang 1.22.2 + +## [32] - 2024-05-07 +### Changes +- Update build to golang 1.22.3 \ No newline at end of file diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common index 39b0c16566a..12dc2d98407 100644 --- a/docker/bootstrap/Dockerfile.common +++ b/docker/bootstrap/Dockerfile.common @@ -1,9 +1,10 @@ -FROM --platform=linux/amd64 golang:1.21.3-bullseye +FROM --platform=linux/amd64 golang:1.22.3-bullseye # Install Vitess build dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ # TODO(mberlin): Group these to make it easier to understand which library actually requires them. ant \ + ca-certificates \ chromium \ curl \ default-jdk \ @@ -46,5 +47,11 @@ VOLUME /vt/vtdataroot # Add compatibility to the previous layout for now RUN su vitess -c "mkdir -p /vt/src/vitess.io/vitess/bin && rm -rf /vt/bin && ln -s /vt/src/vitess.io/vitess/bin /vt/bin" +# Bootstrap Vitess +WORKDIR /vt/src/vitess.io/vitess + +USER vitess +RUN ./bootstrap.sh + # If the user doesn't specify a command, load a shell. CMD ["/bin/bash"] diff --git a/docker/bootstrap/Dockerfile.mysql57 b/docker/bootstrap/Dockerfile.mysql57 index 4e9b335ddac..c5be81c1cdc 100644 --- a/docker/bootstrap/Dockerfile.mysql57 +++ b/docker/bootstrap/Dockerfile.mysql57 @@ -3,9 +3,11 @@ ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM --platform=linux/amd64 "${image}" +USER root + # Install MySQL 5.7 RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates && \ - for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 467B942D3A79BD29 && break; done && \ + for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com A8D3785C && break; done && \ add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-5.7' && \ for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \ @@ -18,8 +20,4 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server libmysqlclient-dev libdbd-mysql-perl rsync libev4 percona-xtrabackup-24 && \ rm -rf /var/lib/apt/lists/* -# Bootstrap Vitess -WORKDIR /vt/src/vitess.io/vitess - USER vitess -RUN ./bootstrap.sh diff --git a/docker/bootstrap/Dockerfile.mysql80 b/docker/bootstrap/Dockerfile.mysql80 index 46dec046411..b4fec6b7d11 100644 --- a/docker/bootstrap/Dockerfile.mysql80 +++ b/docker/bootstrap/Dockerfile.mysql80 @@ -3,9 +3,11 @@ ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM --platform=linux/amd64 "${image}" +USER root + # Install MySQL 8.0 RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done && \ - for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 467B942D3A79BD29 && break; done && \ + for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com A8D3785C && break; done && \ add-apt-repository 'deb http://repo.mysql.com/apt/debian/ bullseye mysql-8.0' && \ for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \ @@ -18,8 +20,4 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyser DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server libmysqlclient-dev libdbd-mysql-perl rsync libev4 libcurl4-openssl-dev percona-xtrabackup-80 && \ rm -rf /var/lib/apt/lists/* -# Bootstrap Vitess -WORKDIR /vt/src/vitess.io/vitess - USER vitess -RUN ./bootstrap.sh diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57 index febe09fd8bf..96a23da221a 100644 --- a/docker/bootstrap/Dockerfile.percona57 +++ b/docker/bootstrap/Dockerfile.percona57 @@ -3,6 +3,8 @@ ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM --platform=linux/amd64 "${image}" +USER root + # Install Percona 5.7 RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ add-apt-repository 'deb http://repo.percona.com/apt bullseye main' && \ @@ -16,8 +18,4 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.c apt-get install -y --no-install-recommends libperconaserverclient20-dev percona-xtrabackup-24 && \ rm -rf /var/lib/apt/lists/* -# Bootstrap Vitess -WORKDIR /vt/src/vitess.io/vitess - USER vitess -RUN ./bootstrap.sh diff --git a/docker/bootstrap/Dockerfile.percona80 b/docker/bootstrap/Dockerfile.percona80 index f4d9f1c0458..147b988b002 100644 --- a/docker/bootstrap/Dockerfile.percona80 +++ b/docker/bootstrap/Dockerfile.percona80 @@ -3,6 +3,8 @@ ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM --platform=linux/amd64 "${image}" +USER root + # Install Percona 8.0 RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done \ && echo 'deb http://repo.percona.com/ps-80/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \ @@ -31,8 +33,4 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.c && apt-get install -y --no-install-recommends percona-xtrabackup-80 \ && rm -rf /var/lib/apt/lists/* -# Bootstrap Vitess -WORKDIR /vt/src/vitess.io/vitess - USER vitess -RUN ./bootstrap.sh diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile deleted file mode 100644 index 3ba46595a83..00000000000 --- a/docker/k8s/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -ARG VT_BASE_VER=latest -ARG DEBIAN_VER=stable-slim - -FROM vitess/base:${VT_BASE_VER} AS base - -FROM debian:${DEBIAN_VER} - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vtdataroot - -# Prepare directory structure. -RUN mkdir -p /vt && \ - mkdir -p /vt/bin && \ - mkdir -p /vt/config && \ - mkdir -p /vt/web && \ - mkdir -p /vtdataroot/tabletdata - -# Copy CA certs for https calls -COPY --from=base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt - -# Copy binaries -COPY --from=base /vt/bin/mysqlctld /vt/bin/ -COPY --from=base /vt/bin/mysqlctl /vt/bin/ -COPY --from=base /vt/bin/vtctld /vt/bin/ -COPY --from=base /vt/bin/vtctl /vt/bin/ -COPY --from=base /vt/bin/vtctlclient /vt/bin/ -COPY --from=base /vt/bin/vtgate /vt/bin/ -COPY --from=base /vt/bin/vttablet /vt/bin/ -COPY --from=base /vt/bin/vtbackup /vt/bin/ -COPY --from=base /vt/bin/vtadmin /vt/bin/ -COPY --from=base /vt/bin/vtorc /vt/bin/ - -# copy web admin files -COPY --from=base $VTROOT/web /vt/web/ - -# copy vitess config -COPY --from=base $VTROOT/config/init_db.sql /vt/config/ - -# my.cnf include files -COPY --from=base $VTROOT/config/mycnf /vt/config/mycnf - -# add vitess user and add permissions -RUN groupadd -r --gid 2000 vitess && useradd -r -g vitess --uid 1000 vitess && \ - chown -R vitess:vitess /vt; diff --git a/docker/lite/Dockerfile b/docker/lite/Dockerfile deleted file mode 120000 index e058f627eca..00000000000 --- a/docker/lite/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -Dockerfile.mysql80 \ No newline at end of file diff --git a/docker/lite/Dockerfile b/docker/lite/Dockerfile new file mode 100644 index 00000000000..49a1ec1c8f6 --- /dev/null +++ b/docker/lite/Dockerfile @@ -0,0 +1,67 @@ +# Copyright 2019 The Vitess Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# NOTE: We have to build the Vitess binaries from scratch instead of sharing +# a base image because Docker Hub dropped the feature we relied upon to +# ensure images contain the right binaries. + +# Use a temporary layer for the build stage. +ARG bootstrap_version=32 +ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" + +FROM "${image}" AS builder + +# Allows docker builds to set the BUILD_NUMBER +ARG BUILD_NUMBER + +# Re-copy sources from working tree. +COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess + +# Build and install Vitess in a temporary output directory. +USER vitess + +RUN make install PREFIX=/vt/install + +# Start over and build the final image. +FROM debian:bullseye-slim + +# Install mysqlbinglog +RUN apt-get update && apt-get -y install libssl1.1 gnupg +COPY --from=builder /usr/bin/mysqlbinlog /usr/bin/mysqlbinlog + +# Install xtrabackup +RUN apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 +RUN echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list +RUN apt-get update -y +RUN apt-get install -y percona-xtrabackup-80 + +# Set up Vitess user and directory tree. +RUN groupadd -r vitess && useradd -r -g vitess vitess +RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt + +# Set up Vitess environment (just enough to run pre-built Go binaries) +ENV VTROOT /vt/src/vitess.io/vitess +ENV VTDATAROOT /vt/vtdataroot +ENV PATH $VTROOT/bin:$PATH + +# Copy artifacts from builder layer. +COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=builder --chown=vitess:vitess /vt/install /vt +COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin +COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/config/init_db.sql /vt/config/ +COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/config/mycnf /vt/config/ + +# Create mount point for actual data (e.g. MySQL data dir) +VOLUME /vt/vtdataroot +USER vitess diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57 deleted file mode 100644 index 8c07b1a4411..00000000000 --- a/docker/lite/Dockerfile.mysql57 +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM debian:bullseye-slim - -# Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh mysql57 - -# Set up Vitess user and directory tree. -RUN groupadd -r vitess && useradd -r -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80 deleted file mode 100644 index bc4ad7861c8..00000000000 --- a/docker/lite/Dockerfile.mysql80 +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM debian:bullseye-slim - -# Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh mysql80 - -# Set up Vitess user and directory tree. -RUN groupadd -r vitess && useradd -r -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57 deleted file mode 100644 index 39d31542fe8..00000000000 --- a/docker/lite/Dockerfile.percona57 +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-percona57" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM debian:bullseye-slim - -# Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh percona57 - -# Set up Vitess user and directory tree. -RUN groupadd -r vitess && useradd -r -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80 deleted file mode 100644 index e20359ea300..00000000000 --- a/docker/lite/Dockerfile.percona80 +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-percona80" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM debian:bullseye-slim - -# Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh percona80 - -# Set up Vitess user and directory tree. -RUN groupadd -r vitess && useradd -r -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing deleted file mode 100644 index 118db24699b..00000000000 --- a/docker/lite/Dockerfile.testing +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install-testing PREFIX=/vt/install - -# Start over and build the final image. -FROM debian:bullseye-slim - -# Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh mysql57 - -# Set up Vitess user and directory tree. -RUN groupadd -r vitess && useradd -r -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57 deleted file mode 100644 index 08ae84cfeb0..00000000000 --- a/docker/lite/Dockerfile.ubi7.mysql57 +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM registry.access.redhat.com/ubi7/ubi:latest - -# Install keys and dependencies -RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 -RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ - && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ - && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ - && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ - && gpg --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 \ - && gpg --export --armor A4A9406876FCBD3C456770C88C718D3B5072E1F5 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL \ - && rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 ${GNUPGHOME}/RPM-GPG-KEY-MySQL /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/mysqlrepo.rpm https://dev.mysql.com/get/mysql80-community-release-el7-3.noarch.rpm \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/perconarepo.rpm https://repo.percona.com/yum/percona-release-latest.noarch.rpm \ - && rpmkeys --checksig /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \ - && rpm -Uvh /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \ - && rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm -RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace -RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \ - jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \ -# Can't use alwaysprompt=no here, since we need to pick up deps -# No way to separate key imports and accept deps separately in yum/dnf - && yum install -y --setopt=tsflags=nodocs --enablerepo mysql57-community --disablerepo mysql80-community \ - mysql-community-client mysql-community-server \ -# Have to use hacks to ignore conflicts on /etc/my.cnf install - && mkdir -p /tmp/1 \ - && yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql57-community --disablerepo mysql80-community percona-xtrabackup-24 percona-toolkit \ - && rpm -Uvh --replacefiles /tmp/1/*rpm \ - && rm -rf /tmp/1 \ - && yum clean all \ - && yum clean all --enablerepo mysql57-community --disablerepo mysql80-community \ - && rm -rf /etc/my.cnf /var/lib/mysql /tmp/gpg /sbin/mysqld-debug - -# Set up Vitess user and directory tree. -RUN groupadd -g 1001 -r vitess && useradd -r -u 1001 -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -RUN mkdir -p /licenses -COPY LICENSE /licenses - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess - -LABEL name="Vitess Lite image - MySQL Community Server 5.7" \ - io.k8s.display-name="Vitess Lite image - MySQL Community Server 5.7" \ - maintainer="cncf-vitess-maintainers@lists.cncf.io" \ - vendor="CNCF" \ - version="6.0.0" \ - release="1" \ - summary="Vitess base container image, containing Vitess components along with MySQL Community Server 5.7" \ - description="Vitess base container image, containing Vitess components along with MySQL Community Server 5.7" \ - io.k8s.description="Vitess base container image, containing Vitess components along with MySQL Community Server 5.7" \ - distribution-scope="public" \ - url="https://vitess.io" diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80 deleted file mode 100644 index c11ac4a6ed4..00000000000 --- a/docker/lite/Dockerfile.ubi7.mysql80 +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM registry.access.redhat.com/ubi7/ubi:latest - -# Install keys and dependencies -RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 -RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ - && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ - && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ - && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ - && gpg --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 \ - && gpg --export --armor A4A9406876FCBD3C456770C88C718D3B5072E1F5 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL \ - && rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 ${GNUPGHOME}/RPM-GPG-KEY-MySQL /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/mysqlrepo.rpm https://dev.mysql.com/get/mysql80-community-release-el7-3.noarch.rpm \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/perconarepo.rpm https://repo.percona.com/yum/percona-release-latest.noarch.rpm \ - && rpmkeys --checksig /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \ - && rpm -Uvh /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \ - && rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm -RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace -RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \ - jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \ -# Can't use alwaysprompt=no here, since we need to pick up deps -# No way to separate key imports and accept deps separately in yum/dnf - && yum install -y --setopt=tsflags=nodocs --enablerepo mysql80-community --disablerepo mysql57-community \ - mysql-community-client mysql-community-server \ -# Have to use hacks to ignore conflicts on /etc/my.cnf install - && mkdir -p /tmp/1 \ - && yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community --disablerepo mysql57-community percona-xtrabackup-80 percona-toolkit \ - && rpm -Uvh --replacefiles /tmp/1/*rpm \ - && rm -rf /tmp/1 \ - && yum clean all \ - && yum clean all --enablerepo mysql80-community --disablerepo mysql57-community \ - && rm -rf /etc/my.cnf /var/lib/mysql /tmp/gpg /sbin/mysqld-debug - -# Set up Vitess user and directory tree. -RUN groupadd -g 1001 -r vitess && useradd -r -u 1001 -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -RUN mkdir -p /licenses -COPY LICENSE /licenses - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess - -LABEL name="Vitess Lite image - MySQL Community Server 8.0" \ - io.k8s.display-name="Vitess Lite image - MySQL Community Server 8.0" \ - maintainer="cncf-vitess-maintainers@lists.cncf.io" \ - vendor="CNCF" \ - version="6.0.0" \ - release="1" \ - summary="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - description="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - io.k8s.description="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - distribution-scope="public" \ - url="https://vitess.io" diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57 deleted file mode 100644 index ef55a6b527a..00000000000 --- a/docker/lite/Dockerfile.ubi7.percona57 +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-percona57" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM registry.access.redhat.com/ubi7/ubi:latest - -# Install keys and dependencies -RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ - && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 ) \ - && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ - && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ - && gpg --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 \ - && rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/perconarepo.rpm https://repo.percona.com/yum/percona-release-latest.noarch.rpm \ - && rpmkeys --checksig /tmp/perconarepo.rpm \ - && rpm -Uvh /tmp/perconarepo.rpm \ - && rm -f /tmp/perconarepo.rpm -RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace -RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \ - jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \ -# Can't use alwaysprompt=no here, since we need to pick up deps -# No way to separate key imports and accept deps separately in yum/dnf - && yum install -y --setopt=tsflags=nodocs Percona-Server-server-57 percona-xtrabackup-24 percona-toolkit \ - && yum clean all \ - && rm -rf /etc/my.cnf /var/lib/mysql /tmp/gpg /sbin/mysqld-debug - -# Set up Vitess user and directory tree. -RUN groupadd -g 1001 -r vitess && useradd -r -u 1001 -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -RUN mkdir -p /licenses -COPY LICENSE /licenses - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess - -LABEL name="Vitess Lite image - Percona Server 5.7" \ - io.k8s.display-name="Vitess Lite image - Percona Server 5.7" \ - maintainer="cncf-vitess-maintainers@lists.cncf.io" \ - vendor="CNCF" \ - version="6.0.0" \ - release="1" \ - summary="Vitess base container image, containing Vitess components along with Percona Server 5.7" \ - description="Vitess base container image, containing Vitess components along with Percona Server 5.7" \ - io.k8s.description="Vitess base container image, containing Vitess components along with Percona Server 5.7" \ - distribution-scope="public" \ - url="https://vitess.io" diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80 deleted file mode 100644 index 61092685177..00000000000 --- a/docker/lite/Dockerfile.ubi7.percona80 +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-percona80" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM registry.access.redhat.com/ubi7/ubi:latest - -# Install keys and dependencies -RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ - && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 ) \ - && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ - && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ - && gpg --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 \ - && rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/perconarepo.rpm https://repo.percona.com/yum/percona-release-latest.noarch.rpm \ - && rpmkeys --checksig /tmp/perconarepo.rpm \ - && rpm -Uvh /tmp/perconarepo.rpm \ - && rm -f /tmp/perconarepo.rpm -RUN echo H4sICH852V8CA2ZvbwC1jr0OgkAQhPt7CgrbY7W6xOQaDaEgRqKxMMTiOFYg/F2WI9G39xCttKGg2UxmJrNfokWqeryxVjUo99ja45kLj3s757IxGqiWhbVmC9CURB352rW63u8oh0mCAHdWY1uRLoDlJtcF6kpuRlnhU97LGt0CoNVgqhLINNxFcIoPPIxDHgVX/v3OsFVpjZlcM5ZoMZhMWex/ES9TMIPyM7UYKj4sqT+kwdufAToNLcP5AvRgmV7zAQAA | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c7base --enablerepo c7updates --enablerepo c7extras install libev gperftools-libs numactl-libs sysstat strace -RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \ - jemalloc gperftools-libs procps-ng rsync wget openssl hostname curl tzdata make \ - && percona-release setup ps80 \ -# Without this pause, the subsequent yum install fails downloads -# regularly - && sleep 5 \ -# Can't use alwaysprompt=no here, since we need to pick up deps -# No way to separate key imports and accept deps separately in yum/dnf - && yum install -y --setopt=tsflags=nodocs percona-server-server percona-xtrabackup-80 percona-toolkit \ - && yum clean all \ - && rm -rf /etc/my.cnf /var/lib/mysql /tmp/gpg /sbin/mysqld-debug - -# Set up Vitess user and directory tree. -RUN groupadd -g 1001 -r vitess && useradd -r -u 1001 -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -RUN mkdir -p /licenses -COPY LICENSE /licenses - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess - -LABEL name="Vitess Lite image - Percona Server 8.0" \ - io.k8s.display-name="Vitess Lite image - Percona Server 8.0" \ - maintainer="cncf-vitess-maintainers@lists.cncf.io" \ - vendor="CNCF" \ - version="6.0.0" \ - release="1" \ - summary="Vitess base container image, containing Vitess components along with Percona Server 8.0" \ - description="Vitess base container image, containing Vitess components along with Percona Server 8.0" \ - io.k8s.description="Vitess base container image, containing Vitess components along with Percona Server 8.0" \ - distribution-scope="public" \ - url="https://vitess.io" diff --git a/docker/lite/Dockerfile.ubi8.arm64.mysql80 b/docker/lite/Dockerfile.ubi8.arm64.mysql80 deleted file mode 100644 index 5c2e99c3b51..00000000000 --- a/docker/lite/Dockerfile.ubi8.arm64.mysql80 +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2022 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make cross-install PREFIX=/vt/install GOOS=linux GOARCH=arm64 - -# Start over and build the final image. -FROM registry.access.redhat.com/ubi8/ubi:latest - -# Install keys and dependencies -RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 -RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ - && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \ - # No xtrabackup packages for aarch64 yet, but still keeping this here - && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ - && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ - && gpg --export --armor 99DB70FAE1D7CE227FB6488205B555B38483C65D > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-8 \ - && gpg --export --armor 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 > ${GNUPGHOME}/RPM-GPG-KEY-EPEL-8 \ - && gpg --export --armor 3A79BD29 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 \ - && gpg --export --armor A4A9406876FCBD3C456770C88C718D3B5072E1F5 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.2 \ - && rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 ${GNUPGHOME}/RPM-GPG-KEY-CentOS-8 ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 ${GNUPGHOME}/RPM-GPG-KEY-MySQL.2 /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/mysqlrepo.rpm https://dev.mysql.com/get/mysql80-community-release-el8-1.noarch.rpm \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/perconarepo.rpm https://repo.percona.com/yum/percona-release-latest.noarch.rpm \ - && rpmkeys --checksig /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \ - && rpm -Uvh /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \ - && rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm -RUN echo H4sICIDAHmICA2ZvbwDVkDFLxEAQhfv9FVfY7o4RhCBsoXJcIXKHwUIOi7m5MVk2yS6zG0//vYlRULTU4rrHvOHN+2ZL5Q4TP6oeO7bX3Od1pcuFXlyNUzVZg7S2yTmmCwDsgzjuDSUyB5SDI2+QzOChcyJBEnwkPOPQZijNuTkrigKmsHUFJ1MeCjUQEqg61tQweVtM0vOrfXItj1eAM0H0DiR2erTgbnOrV5uVvlk+6M+Kinvctby3p0ptqRziHjOnnxz3s/FnKJcxVlkYu/+k4Zcs+AvM8n3+jWW8MBc2NO6FZILUMEsoYQ76UvWI/vAGB/SOZZsCAAA= | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c8base --enablerepo c8updates --enablerepo c8extras libev numactl-libs sysstat strace \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/g/gperftools-libs-2.7-9.el8.aarch64.rpm https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/j/jemalloc-5.2.1-2.el8.aarch64.rpm https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/l/libunwind-1.3.1-3.el8.aarch64.rpm -RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \ - procps-ng rsync wget openssl hostname curl tzdata make \ -# Can't use alwaysprompt=no here, since we need to pick up deps -# No way to separate key imports and accept deps separately in yum/dnf - && yum install -y --setopt=tsflags=nodocs --enablerepo mysql80-community \ - mysql-community-client mysql-community-server \ -# Have to use hacks to ignore conflicts on /etc/my.cnf install - && mkdir -p /tmp/1 \ -# Enable xtrabackup and percona toolkit repo using the new percona-release tool, commented out for now (no aarch64 packages) - #&& /usr/bin/percona-release enable-only pxb-80 \ - #&& /usr/bin/percona-release enable pt \ - #&& yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community percona-xtrabackup-80 percona-toolkit \ - #&& rpm -Uvh --replacefiles /tmp/1/*rpm \ - && rm -rf /tmp/1 \ - && yum clean all \ - && yum clean all --enablerepo mysql80-community \ - && rm -rf /etc/my.cnf /var/lib/mysql /tmp/gpg /sbin/mysqld-debug - -# Set up Vitess user and directory tree. -RUN groupadd -g 1001 -r vitess && useradd -r -u 1001 -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -RUN mkdir -p /licenses -COPY LICENSE /licenses - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess - -LABEL name="Vitess Lite image - MySQL Community Server 8.0" \ - io.k8s.display-name="Vitess Lite image - MySQL Community Server 8.0" \ - maintainer="cncf-vitess-maintainers@lists.cncf.io" \ - vendor="CNCF" \ - version="13.0.0" \ - release="1" \ - summary="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - description="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - io.k8s.description="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - distribution-scope="public" \ - url="https://vitess.io" diff --git a/docker/lite/Dockerfile.ubi8.mysql80 b/docker/lite/Dockerfile.ubi8.mysql80 deleted file mode 100644 index 094dc8fa712..00000000000 --- a/docker/lite/Dockerfile.ubi8.mysql80 +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2022 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install PREFIX=/vt/install - -# Start over and build the final image. -FROM registry.access.redhat.com/ubi8/ubi:latest - -# Install keys and dependencies -RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022 -RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ - && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \ - && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ - && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ - && gpg --export --armor 99DB70FAE1D7CE227FB6488205B555B38483C65D > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-8 \ - && gpg --export --armor 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 > ${GNUPGHOME}/RPM-GPG-KEY-EPEL-8 \ - && gpg --export --armor 3A79BD29 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 \ - && gpg --export --armor A4A9406876FCBD3C456770C88C718D3B5072E1F5 > ${GNUPGHOME}/RPM-GPG-KEY-MySQL.2 \ - && rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 ${GNUPGHOME}/RPM-GPG-KEY-CentOS-8 ${GNUPGHOME}/RPM-GPG-KEY-MySQL.1 ${GNUPGHOME}/RPM-GPG-KEY-MySQL.2 /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release ${GNUPGHOME}/RPM-GPG-KEY-EPEL-8 \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/mysqlrepo.rpm https://dev.mysql.com/get/mysql80-community-release-el8-1.noarch.rpm \ - && curl -L --retry-delay 10 --retry 3 -o /tmp/perconarepo.rpm https://repo.percona.com/yum/percona-release-latest.noarch.rpm \ - && rpmkeys --checksig /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \ - && rpm -Uvh /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm \ - && rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm -RUN echo H4sICIDAHmICA2ZvbwDVkDFLxEAQhfv9FVfY7o4RhCBsoXJcIXKHwUIOi7m5MVk2yS6zG0//vYlRULTU4rrHvOHN+2ZL5Q4TP6oeO7bX3Od1pcuFXlyNUzVZg7S2yTmmCwDsgzjuDSUyB5SDI2+QzOChcyJBEnwkPOPQZijNuTkrigKmsHUFJ1MeCjUQEqg61tQweVtM0vOrfXItj1eAM0H0DiR2erTgbnOrV5uVvlk+6M+Kinvctby3p0ptqRziHjOnnxz3s/FnKJcxVlkYu/+k4Zcs+AvM8n3+jWW8MBc2NO6FZILUMEsoYQ76UvWI/vAGB/SOZZsCAAA= | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c8base --enablerepo c8updates --enablerepo c8extras libev numactl-libs sysstat strace \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://download-ib01.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/g/gperftools-libs-2.7-9.el8.x86_64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/j/jemalloc-5.2.1-2.el8.x86_64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/l/libunwind-1.3.1-3.el8.x86_64.rpm -RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \ - && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \ - procps-ng rsync wget openssl hostname curl tzdata make \ -# Can't use alwaysprompt=no here, since we need to pick up deps -# No way to separate key imports and accept deps separately in yum/dnf - && yum install -y --setopt=tsflags=nodocs --enablerepo mysql80-community \ - mysql-community-client mysql-community-server \ -# Have to use hacks to ignore conflicts on /etc/my.cnf install - && mkdir -p /tmp/1 \ - && /usr/bin/percona-release enable-only pxb-80 \ - && /usr/bin/percona-release enable pt \ - && yum install -y --setopt=alwaysprompt=no --downloadonly --downloaddir=/tmp/1 --enablerepo mysql80-community percona-xtrabackup-80 percona-toolkit \ - && rpm -Uvh --replacefiles /tmp/1/*rpm \ - && rm -rf /tmp/1 \ - && yum clean all \ - && yum clean all --enablerepo mysql80-community \ - && rm -rf /etc/my.cnf /var/lib/mysql /tmp/gpg /sbin/mysqld-debug - -# Set up Vitess user and directory tree. -RUN groupadd -g 1001 -r vitess && useradd -r -u 1001 -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt/src/vitess.io/vitess -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt -COPY --from=builder --chown=vitess:vitess /vt/src/vitess.io/vitess/web/vtadmin /vt/web/vtadmin - -RUN mkdir -p /licenses -COPY LICENSE /licenses - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess - -LABEL name="Vitess Lite image - MySQL Community Server 8.0" \ - io.k8s.display-name="Vitess Lite image - MySQL Community Server 8.0" \ - maintainer="cncf-vitess-maintainers@lists.cncf.io" \ - vendor="CNCF" \ - version="13.0.0" \ - release="1" \ - summary="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - description="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - io.k8s.description="Vitess base container image, containing Vitess components along with MySQL Community Server 8.0" \ - distribution-scope="public" \ - url="https://vitess.io" diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile index 6643799842d..b7d5b509562 100644 --- a/docker/local/Dockerfile +++ b/docker/local/Dockerfile @@ -1,4 +1,4 @@ -ARG bootstrap_version=24 +ARG bootstrap_version=32 ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM "${image}" @@ -7,8 +7,8 @@ RUN apt-get update RUN apt-get install -y sudo curl vim jq # Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh mysql57 +COPY docker/utils/install_dependencies.sh /vt/dist/install_dependencies.sh +RUN /vt/dist/install_dependencies.sh mysql80 COPY docker/local/install_local_dependencies.sh /vt/dist/install_local_dependencies.sh RUN /vt/dist/install_local_dependencies.sh diff --git a/docker/mini/Dockerfile b/docker/mini/Dockerfile index 469fbef8d9e..0bb3aac7561 100644 --- a/docker/mini/Dockerfile +++ b/docker/mini/Dockerfile @@ -12,17 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -FROM vitess/base AS base - FROM vitess/lite USER root +# Install dependencies +COPY docker/utils/install_dependencies.sh /vt/dist/install_dependencies.sh +RUN /vt/dist/install_dependencies.sh mysql80 + RUN apt-get update RUN apt-get install -y sudo curl vim python3 jq sqlite3 RUN ln -s /usr/bin/python3 /usr/bin/python @@ -40,9 +37,6 @@ COPY docker/mini/vttablet-mini-up.sh /vt/dist/scripts/vttablet-mini-up.sh RUN echo "hostname=127.0.0.1" >> /vt/dist/scripts/env.sh RUN cat /vt/dist/scripts/env.sh | egrep "^alias" >> /etc/bash.bashrc -COPY --from=base /vt/bin/vtctl /vt/bin/ -COPY --from=base /vt/bin/mysqlctl /vt/bin/ - # Set up Vitess environment (just enough to run pre-built Go binaries) ENV VTROOT /vt/src/vitess.io/vitess ENV VTDATAROOT /vt/vtdataroot diff --git a/docker/mysql/Dockerfile b/docker/mysql/Dockerfile deleted file mode 120000 index 6671907eaf2..00000000000 --- a/docker/mysql/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -Dockerfile.8.0.30 \ No newline at end of file diff --git a/docker/mysql/Dockerfile.8.0.30 b/docker/mysql/Dockerfile.8.0.30 deleted file mode 100644 index 5b5e68263fe..00000000000 --- a/docker/mysql/Dockerfile.8.0.30 +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2023 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM debian:bullseye-slim - -RUN mkdir -p /vt/dist - -# Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh mysql80 8.0.30 - -# Set up Vitess user and directory tree. -RUN groupadd -r vitess && useradd -r -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -VOLUME /vt/vtdataroot -USER vitess \ No newline at end of file diff --git a/docker/mysql/Dockerfile.8.0.34 b/docker/mysql/Dockerfile.8.0.34 deleted file mode 100644 index 5bd81d9802c..00000000000 --- a/docker/mysql/Dockerfile.8.0.34 +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2023 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM debian:bullseye-slim - -RUN mkdir -p /vt/dist - -# Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh mysql80 8.0.34 - -# Set up Vitess user and directory tree. -RUN groupadd -r vitess && useradd -r -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -VOLUME /vt/vtdataroot -USER vitess \ No newline at end of file diff --git a/docker/root/Dockerfile b/docker/root/Dockerfile deleted file mode 100644 index 37fc3d24a6c..00000000000 --- a/docker/root/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2019 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This is an image that just changes the default user to root. -# It's useful for cases when the 'docker run --user' flag can't be used, -# for example when running containers through a scheduler like Kubernetes. -FROM vitess/base - -USER root diff --git a/docker/test/run.sh b/docker/test/run.sh index e41a529c51d..dd2a0cbc7d5 100755 --- a/docker/test/run.sh +++ b/docker/test/run.sh @@ -30,16 +30,16 @@ # # Examples: # a) Start an interactive shell within the Docker image. -# $ docker/test/run.sh mysql57 bash +# $ docker/test/run.sh mysql80 bash # # b) Build the code and run a test. -# $ docker/test/run.sh mysql57 "make build && ./test/keyrange_test.py -v" +# $ docker/test/run.sh mysql80 "make build && ./test/keyrange_test.py -v" # -# c) Cache the output of the command e.g. cache "make build" as we do for Travis CI. -# $ docker/test/run.sh --create_docker_cache vitess/bootstrap:rm_mysql57_test_cache_do_NOT_push mysql57 "make build" +# c) Cache the output of the command e.g. cache "make build" as we do for CI. +# $ docker/test/run.sh --create_docker_cache vitess/bootstrap:rm_mysql80_test_cache_do_NOT_push mysql80 "make build" # # d) Run the test using a cache image. -# $ docker/test/run.sh --use_docker_cache vitess/bootstrap:rm_mysql57_test_cache_do_NOT_push mysql57 "./test/keyrange_test.py -v" +# $ docker/test/run.sh --use_docker_cache vitess/bootstrap:rm_mysql80_test_cache_do_NOT_push mysql80 "./test/keyrange_test.py -v" # Functions. diff --git a/docker/lite/install_dependencies.sh b/docker/utils/install_dependencies.sh similarity index 99% rename from docker/lite/install_dependencies.sh rename to docker/utils/install_dependencies.sh index 0cbc47fd9cf..b686c2418bf 100755 --- a/docker/lite/install_dependencies.sh +++ b/docker/utils/install_dependencies.sh @@ -146,7 +146,7 @@ esac # Get GPG keys for extra apt repositories. # repo.mysql.com add_apt_key 8C718D3B5072E1F5 -add_apt_key 467B942D3A79BD29 +add_apt_key A8D3785C # All flavors include Percona XtraBackup (from repo.percona.com). add_apt_key 9334A25F8507EFA5 diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57 deleted file mode 100644 index 195f0cd62e4..00000000000 --- a/docker/vttestserver/Dockerfile.mysql57 +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2021 The Vitess Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: We have to build the Vitess binaries from scratch instead of sharing -# a base image because Docker Hub dropped the feature we relied upon to -# ensure images contain the right binaries. - -# Use a temporary layer for the build stage. -ARG bootstrap_version=24 -ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" - -FROM "${image}" AS builder - -# Allows docker builds to set the BUILD_NUMBER -ARG BUILD_NUMBER - -# Re-copy sources from working tree. -COPY --chown=vitess:vitess . /vt/src/vitess.io/vitess - -# Build and install Vitess in a temporary output directory. -USER vitess -RUN make install-testing PREFIX=/vt/install - -# Start over and build the final image. -FROM debian:bullseye-slim - -# Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh -RUN /vt/dist/install_dependencies.sh mysql57 - -# Set up Vitess user and directory tree. -RUN groupadd -r vitess && useradd -r -g vitess vitess -RUN mkdir -p /vt/vtdataroot && chown -R vitess:vitess /vt - -# Set up Vitess environment (just enough to run pre-built Go binaries) -ENV VTROOT /vt -ENV VTDATAROOT /vt/vtdataroot -ENV PATH $VTROOT/bin:$PATH - -# Copy artifacts from builder layer. -COPY --from=builder --chown=vitess:vitess /vt/install /vt - -# Create mount point for actual data (e.g. MySQL data dir) -VOLUME /vt/vtdataroot -USER vitess - -COPY docker/vttestserver/setup_vschema_folder.sh /vt/setup_vschema_folder.sh -COPY docker/vttestserver/run.sh /vt/run.sh - -CMD /vt/run.sh "5.7.9-vitess" diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80 index 2dcc190a957..14fd0466dcb 100644 --- a/docker/vttestserver/Dockerfile.mysql80 +++ b/docker/vttestserver/Dockerfile.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=24 +ARG bootstrap_version=32 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -36,7 +36,7 @@ RUN make install-testing PREFIX=/vt/install FROM debian:bullseye-slim # Install dependencies -COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh +COPY docker/utils/install_dependencies.sh /vt/dist/install_dependencies.sh RUN /vt/dist/install_dependencies.sh mysql80 # Set up Vitess user and directory tree. @@ -58,4 +58,4 @@ USER vitess COPY docker/vttestserver/setup_vschema_folder.sh /vt/setup_vschema_folder.sh COPY docker/vttestserver/run.sh /vt/run.sh -CMD /vt/run.sh "8.0.21-vitess" +CMD /vt/run.sh "8.0.30-Vitess" diff --git a/docker/vttestserver/run.sh b/docker/vttestserver/run.sh index 1ff79153af5..e3a99ab38f4 100755 --- a/docker/vttestserver/run.sh +++ b/docker/vttestserver/run.sh @@ -35,6 +35,7 @@ rm -vf "$VTDATAROOT"/"$tablet_dir"/{mysql.sock,mysql.sock.lock} --keyspaces "$KEYSPACES" \ --num_shards "$NUM_SHARDS" \ --mysql_bind_host "${MYSQL_BIND_HOST:-127.0.0.1}" \ + --vtcombo-bind-host "${VTCOMBO_BIND_HOST:-127.0.0.1}" \ --mysql_server_version "${MYSQL_SERVER_VERSION:-$1}" \ --charset "${CHARSET:-utf8mb4}" \ --foreign_key_mode "${FOREIGN_KEY_MODE:-allow}" \ diff --git a/examples/backups/stop_tablets.sh b/examples/backups/stop_tablets.sh index 6a3ced6ab74..d387128309c 100755 --- a/examples/backups/stop_tablets.sh +++ b/examples/backups/stop_tablets.sh @@ -30,7 +30,25 @@ for tablet in 100 200 300; do CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh echo "Removing tablet directory zone1-$uid" vtctldclient DeleteTablets --allow-primary zone1-$uid - rm -Rf $VTDATAROOT/vt_0000000$uid + + for ((i=0; i<30; i++)); do + # Redirect stderr to a temporary file + temp_file=$(mktemp) + rm -Rf $VTDATAROOT/vt_0000000$uid 2>"$temp_file" + + if grep -q 'Directory not empty' "$temp_file"; then + echo "Directory not empty, retrying..." + elif [ ! -s "$temp_file" ]; then + echo "Deletion succeeded." + rm -f "$temp_file" + break + else + echo "An error occurred." + cat "$temp_file" + fi + rm -f "$temp_file" + sleep 1 + done done fi done diff --git a/examples/common/env.sh b/examples/common/env.sh index 51d0fcb6487..8b717f4df3a 100644 --- a/examples/common/env.sh +++ b/examples/common/env.sh @@ -76,9 +76,11 @@ mkdir -p "${VTDATAROOT}/tmp" # In your own environment you may prefer to use config files, # such as ~/.my.cnf -alias mysql="command mysql --no-defaults -h 127.0.0.1 -P 15306" +alias mysql="command mysql --no-defaults -h 127.0.0.1 -P 15306 --binary-as-hex=false" alias vtctldclient="command vtctldclient --server localhost:15999" -# Make sure aliases are expanded in non-interactive shell -shopt -s expand_aliases +# If using bash, make sure aliases are expanded in non-interactive shell +if [[ -n ${BASH} ]]; then + shopt -s expand_aliases +fi diff --git a/examples/common/scripts/consul-up.sh b/examples/common/scripts/consul-up.sh index 584a25f437a..fb75495b278 100755 --- a/examples/common/scripts/consul-up.sh +++ b/examples/common/scripts/consul-up.sh @@ -40,13 +40,13 @@ sleep 5 # Add the CellInfo description for the cell. # If the node already exists, it's fine, means we used existing data. -echo "add $cell CellInfo" +echo "add ${cell} CellInfo" set +e # shellcheck disable=SC2086 -vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \ - --root "vitess/$cell" \ +command vtctldclient --server internal --topo-implementation consul --topo-global-server "${CONSUL_SERVER}:${consul_http_port}" AddCellInfo \ + --root "/vitess/${cell}" \ --server-address "${CONSUL_SERVER}:${consul_http_port}" \ - "$cell" + "${cell}" set -e echo "consul start done..." diff --git a/examples/common/scripts/etcd-up.sh b/examples/common/scripts/etcd-up.sh index ac81c1fbd28..1ed22ffce2e 100755 --- a/examples/common/scripts/etcd-up.sh +++ b/examples/common/scripts/etcd-up.sh @@ -32,13 +32,12 @@ sleep 5 # And also add the CellInfo description for the cell. # If the node already exists, it's fine, means we used existing data. -echo "add $cell CellInfo" +echo "add ${cell} CellInfo" set +e -# shellcheck disable=SC2086 -vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \ - --root /vitess/$cell \ +command vtctldclient --server internal AddCellInfo \ + --root "/vitess/${cell}" \ --server-address "${ETCD_SERVER}" \ - $cell + "${cell}" set -e echo "etcd is running!" diff --git a/examples/common/scripts/vtadmin-up.sh b/examples/common/scripts/vtadmin-up.sh index faa2e6a177f..356f6ac3880 100755 --- a/examples/common/scripts/vtadmin-up.sh +++ b/examples/common/scripts/vtadmin-up.sh @@ -14,6 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +function output() { + echo -e "$@" +} + script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")" source "${script_dir}/../env.sh" @@ -24,9 +28,13 @@ web_dir="${script_dir}/../../../web/vtadmin" vtadmin_api_port=14200 vtadmin_web_port=14201 +case_insensitive_hostname=$(echo "$hostname" | tr '[:upper:]' '[:lower:]') + +output "\n\033[1;32mvtadmin-api expects vtadmin-web at, and set http-origin to \"http://${case_insensitive_hostname}:${vtadmin_web_port}\"\033[0m" + vtadmin \ - --addr "${hostname}:${vtadmin_api_port}" \ - --http-origin "http://${hostname}:${vtadmin_web_port}" \ + --addr "${case_insensitive_hostname}:${vtadmin_api_port}" \ + --http-origin "http://${case_insensitive_hostname}:${vtadmin_web_port}" \ --http-tablet-url-tmpl "http://{{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \ --tracer "opentracing-jaeger" \ --grpc-tracing \ @@ -43,15 +51,18 @@ echo ${vtadmin_api_pid} > "${log_dir}/vtadmin-api.pid" echo "\ vtadmin-api is running! - - API: http://${hostname}:${vtadmin_api_port} + - API: http://${case_insensitive_hostname}:${vtadmin_api_port} - Logs: ${log_dir}/vtadmin-api.out - PID: ${vtadmin_api_pid} " +echo "Building vtadmin-web..." +source "${web_dir}/build.sh" + # Wait for vtadmin to successfully discover the cluster expected_cluster_result="{\"result\":{\"clusters\":[{\"id\":\"${cluster_name}\",\"name\":\"${cluster_name}\"}]},\"ok\":true}" -for _ in {0..300}; do - result=$(curl -s "http://${hostname}:${vtadmin_api_port}/api/clusters") +for _ in {0..100}; do + result=$(curl -s "http://${case_insensitive_hostname}:${vtadmin_api_port}/api/clusters") if [[ ${result} == "${expected_cluster_result}" ]]; then break fi @@ -59,7 +70,7 @@ for _ in {0..300}; do done # Check one last time -[[ $(curl -s "http://${hostname}:${vtadmin_api_port}/api/clusters") == "${expected_cluster_result}" ]] || fail "vtadmin failed to discover the running example Vitess cluster." +[[ $(curl -s "http://${case_insensitive_hostname}:${vtadmin_api_port}/api/clusters") == "${expected_cluster_result}" ]] || fail "vtadmin failed to discover the running example Vitess cluster." [[ ! -d "$web_dir/build" ]] && fail "Please make sure the VTAdmin files are built in $web_dir/build, using 'make build'" @@ -71,7 +82,7 @@ echo ${vtadmin_web_pid} > "${log_dir}/vtadmin-web.pid" echo "\ vtadmin-web is running! - - Browser: http://${hostname}:${vtadmin_web_port} + - Browser: http://${case_insensitive_hostname}:${vtadmin_web_port} - Logs: ${log_dir}/vtadmin-web.out - PID: ${vtadmin_web_pid} " diff --git a/examples/common/scripts/vttablet-up.sh b/examples/common/scripts/vttablet-up.sh index 56d212af218..daa40aee894 100755 --- a/examples/common/scripts/vttablet-up.sh +++ b/examples/common/scripts/vttablet-up.sh @@ -53,8 +53,6 @@ vttablet \ --grpc_port $grpc_port \ --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \ --pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \ - --heartbeat_enable \ - --heartbeat_interval=250ms \ --heartbeat_on_demand_duration=5s \ > $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 & diff --git a/examples/common/scripts/zk-up.sh b/examples/common/scripts/zk-up.sh index 3137ed724cc..2b79053d2f6 100755 --- a/examples/common/scripts/zk-up.sh +++ b/examples/common/scripts/zk-up.sh @@ -52,10 +52,10 @@ echo "Started zk servers." # If the node already exists, it's fine, means we used existing data. set +e # shellcheck disable=SC2086 -vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \ - --root /vitess/$cell \ - --server-address $ZK_SERVER \ - $cell +command vtctldclient --server internal --topo-implementation zk2 --topo-global-server "${ZK_SERVER}" AddCellInfo \ + --root "/vitess/${cell}" \ + --server-address "${ZK_SERVER}" \ + "${cell}" set -e echo "Configured zk servers." diff --git a/examples/compose/client.go b/examples/compose/client.go index 8beaef683cd..1fb4a4eb463 100644 --- a/examples/compose/client.go +++ b/examples/compose/client.go @@ -27,7 +27,7 @@ package main import ( "fmt" - "math/rand" + "math/rand/v2" "os" "time" @@ -53,13 +53,13 @@ func main() { // Insert some messages on random pages. fmt.Println("Inserting into primary...") - for i := 0; i < 3; i++ { + for range 3 { tx, err := db.Begin() if err != nil { fmt.Printf("begin failed: %v\n", err) os.Exit(1) } - page := rand.Intn(100) + 1 + page := rand.IntN(100) + 1 timeCreated := time.Now().UnixNano() if _, err := tx.Exec("INSERT INTO messages (page,time_created_ns,message) VALUES (?,?,?)", page, timeCreated, "V is for speed"); err != nil { diff --git a/examples/compose/config/init_db.sql b/examples/compose/config/init_db.sql index 8239d5ed5ec..b567faf1722 100644 --- a/examples/compose/config/init_db.sql +++ b/examples/compose/config/init_db.sql @@ -12,10 +12,8 @@ SET GLOBAL super_read_only='OFF'; # Changes during the init db should not make it to the binlog. # They could potentially create errant transactions on replicas. SET sql_log_bin = 0; -# Remove anonymous users. -DELETE FROM mysql.user WHERE User = ''; -# Disable remote root access (only allow UNIX socket). -DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost'; +# Remove anonymous users & disable remote root access (only allow UNIX socket). +DROP USER IF EXISTS ''@'%', ''@'localhost', 'root'@'%'; # Remove test database. DROP DATABASE IF EXISTS test; ############################################################################### @@ -70,9 +68,6 @@ GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER ON *.* TO 'vt_filtered'@'localhost'; -FLUSH PRIVILEGES; -RESET SLAVE ALL; -RESET MASTER; # custom sql is used to add custom scripts like creating users/passwords. We use it in our tests # {{custom_sql}} diff --git a/examples/compose/external_db/mysql/grant.sh b/examples/compose/external_db/mysql/grant.sh index 897c1b5dcf2..9371377d074 100755 --- a/examples/compose/external_db/mysql/grant.sh +++ b/examples/compose/external_db/mysql/grant.sh @@ -3,5 +3,5 @@ echo '**********GRANTING PRIVILEGES START*******************' echo ${mysql[@]} # PURGE BINARY LOGS BEFORE DATE(NOW()); mysql --protocol=socket -uroot -hlocalhost --socket=/var/run/mysqld/mysqld.sock -p$MYSQL_ROOT_PASSWORD -e \ -"GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD'; FLUSH PRIVILEGES;" -echo '*************GRANTING PRIVILEGES END****************' \ No newline at end of file +"GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD'" +echo '*************GRANTING PRIVILEGES END****************' diff --git a/examples/compose/fix_replication.sh b/examples/compose/fix_replication.sh index 71631efc310..6aa9648308a 100755 --- a/examples/compose/fix_replication.sh +++ b/examples/compose/fix_replication.sh @@ -26,25 +26,25 @@ cd "$(dirname "${BASH_SOURCE[0]}")" function get_replication_status() { # Get replication status - STATUS_LINE=$(mysql -u$DB_USER -p$DB_PASS -h 127.0.0.1 -e "SHOW SLAVE STATUS\G") + STATUS_LINE=$(mysql -u$DB_USER -p$DB_PASS -h 127.0.0.1 -e "SHOW REPLICA STATUS\G") LAST_ERRNO=$(grep "Last_IO_Errno:" <<< "$STATUS_LINE" | awk '{ print $2 }') - SLAVE_SQL_RUNNING=$(grep "Slave_SQL_Running:" <<< "$STATUS_LINE" | awk '{ print $2 }') - SLAVE_IO_RUNNING=$(grep "Slave_IO_Running:" <<< "$STATUS_LINE" | awk '{ print $2 }') - MASTER_HOST=$(grep "Master_Host:" <<< "$STATUS_LINE" | awk '{ print $2 }') - MASTER_PORT=$(grep "Master_Port:" <<< "$STATUS_LINE" | awk '{ print $2 }') + REPLICA_SQL_RUNNING=$(grep "Replica_SQL_Running:" <<< "$STATUS_LINE" | awk '{ print $2 }') + REPLICA_IO_RUNNING=$(grep "Replica_IO_Running:" <<< "$STATUS_LINE" | awk '{ print $2 }') + SOURCE_HOST=$(grep "Source_Host:" <<< "$STATUS_LINE" | awk '{ print $2 }') + SOURCE_PORT=$(grep "Source_Port:" <<< "$STATUS_LINE" | awk '{ print $2 }') - echo "Slave_SQL_Running: $SLAVE_SQL_RUNNING" - echo "Slave_IO_Running: $SLAVE_IO_RUNNING" + echo "Replica_SQL_Running: $REPLICA_SQL_RUNNING" + echo "Replica_IO_Running: $REPLICA_IO_RUNNING" echo "Last_IO_Errno: $LAST_ERRNO" } function reset_replication() { # Necessary before sql file can be imported echo "Importing MysqlDump: $KEYSPACE.sql" - mysql -u$DB_USER -p$DB_PASS -h 127.0.0.1 -e "RESET MASTER;STOP SLAVE;CHANGE MASTER TO MASTER_AUTO_POSITION = 0;source $KEYSPACE.sql;START SLAVE;" + mysql -u$DB_USER -p$DB_PASS -h 127.0.0.1 -e "RESET MASTER;STOP REPLICA;CHANGE REPLICATION SOURCE TO SOURCE_AUTO_POSITION = 0;source $KEYSPACE.sql;START REPLICA;" # Restore Master Auto Position echo "Restoring Master Auto Setting" - mysql -u$DB_USER -p$DB_PASS -h 127.0.0.1 -e "STOP SLAVE;CHANGE MASTER TO MASTER_AUTO_POSITION = 1;START SLAVE;" + mysql -u$DB_USER -p$DB_PASS -h 127.0.0.1 -e "STOP REPLICA;CHANGE REPLICATION SOURCE TO SOURCE_AUTO_POSITION = 1;START REPLICA;" } # Retrieve replication status @@ -54,7 +54,7 @@ get_replication_status [ ${1:-''} != 'status' ] || exit 0; # Check if IO_Thread is running -if [[ $SLAVE_IO_RUNNING = "No" && $LAST_ERRNO = 1236 ]]; then +if [[ $REPLICA_IO_RUNNING = "No" && $LAST_ERRNO = 1236 ]]; then echo "Primary has purged bin logs that replica requires. Sync will require restore from mysqldump" if [[ -f $KEYSPACE.sql ]] ; then @@ -64,7 +64,7 @@ if [[ $SLAVE_IO_RUNNING = "No" && $LAST_ERRNO = 1236 ]]; then else echo "Starting mysqldump. This may take a while.." # Modify flags to user's requirements - if mysqldump -h $MASTER_HOST -P $MASTER_PORT -u$DB_USER -p$DB_PASS --databases $KEYSPACE \ + if mysqldump -h $SOURCE_HOST -P $SOURCE_PORT -u$DB_USER -p$DB_PASS --databases $KEYSPACE \ --triggers --routines --events --hex-blob --master-data=1 --quick --order-by-primary \ --no-autocommit --skip-comments --skip-add-drop-table --skip-add-locks \ --skip-disable-keys --single-transaction --set-gtid-purged=on --verbose > $KEYSPACE.sql; then diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go index c6df1d72e48..3bcfd8315e5 100644 --- a/examples/compose/vtcompose/vtcompose.go +++ b/examples/compose/vtcompose/vtcompose.go @@ -218,24 +218,6 @@ func main() { writeFile(dockerComposeFile, "docker-compose.yml") } -func applyFilePatch(dockerYaml []byte, patchFile string) []byte { - yamlPatch, err := os.ReadFile(patchFile) - if err != nil { - log.Fatalf("reading yaml patch file %s: %s", patchFile, err) - } - - patch, err := yamlpatch.DecodePatch(yamlPatch) - if err != nil { - log.Fatalf("decoding patch failed: %s", err) - } - - bs, err := patch.Apply(dockerYaml) - if err != nil { - log.Fatalf("applying patch failed: %s", err) - } - return bs -} - func applyJsonInMemoryPatch(vSchemaFile []byte, patchString string) []byte { patch, err := jsonpatch.DecodePatch([]byte(patchString)) if err != nil { @@ -446,7 +428,7 @@ func applyKeyspaceDependentPatches( dockerComposeFile = applyShardPatches(dockerComposeFile, tabAlias, shard, keyspaceData, externalDbInfoMap, opts) } else { // Determine shard range - for i := 0; i < keyspaceData.shards; i++ { + for i := range keyspaceData.shards { if i == 0 { shard = fmt.Sprintf("-%x", interval) } else if i == (keyspaceData.shards - 1) { @@ -517,28 +499,6 @@ func applyShardPatches( return dockerComposeFile } -func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo, opts vtOptions) string { - aliases := []int{tabAlias + 1} // primary alias, e.g. 201 - for i := 0; i < keyspaceData.replicaTablets; i++ { - aliases = append(aliases, tabAlias+2+i) // replica aliases, e.g. 202, 203, ... - } - tabletDepends := make([]string, len(aliases)) - for i, tabletId := range aliases { - tabletDepends[i] = fmt.Sprintf("vttablet%d: {condition : service_healthy}", tabletId) - } - // Wait on all shard tablets to be healthy - dependsOn := "depends_on: {" + strings.Join(tabletDepends, ", ") + "}" - - return fmt.Sprintf(` -- op: add - path: /services/init_shard_primary%[2]d - value: - image: vitess/lite:${VITESS_TAG:-latest} - command: ["sh", "-c", "/vt/bin/vtctldclient %[5]s InitShardPrimary --force %[4]s/%[3]s %[6]s-%[2]d "] - %[1]s -`, dependsOn, aliases[0], shard, keyspaceData.keyspace, opts.topologyFlags, opts.cell) -} - func generateExternalPrimary( tabAlias int, shard string, @@ -548,7 +508,7 @@ func generateExternalPrimary( ) string { aliases := []int{tabAlias + 1} // primary alias, e.g. 201 - for i := 0; i < keyspaceData.replicaTablets; i++ { + for i := range keyspaceData.replicaTablets { aliases = append(aliases, tabAlias+2+i) // replica aliases, e.g. 202, 203, ... } @@ -611,7 +571,7 @@ func applyTabletPatches( dbInfo = val } dockerComposeFile = applyInMemoryPatch(dockerComposeFile, generateDefaultTablet(tabAlias+1, shard, "primary", keyspaceData.keyspace, dbInfo, opts)) - for i := 0; i < keyspaceData.replicaTablets; i++ { + for i := range keyspaceData.replicaTablets { dockerComposeFile = applyInMemoryPatch(dockerComposeFile, generateDefaultTablet(tabAlias+2+i, shard, "replica", keyspaceData.keyspace, dbInfo, opts)) } return dockerComposeFile diff --git a/examples/compose/vttablet-up.sh b/examples/compose/vttablet-up.sh index a131e555dfa..8d02a7a528e 100755 --- a/examples/compose/vttablet-up.sh +++ b/examples/compose/vttablet-up.sh @@ -68,7 +68,7 @@ if [ "$external" = "1" ]; then # We need a common user for the unmanaged and managed tablets else tools like orchestrator will not function correctly echo "Creating matching user for managed tablets..." echo "CREATE USER IF NOT EXISTS '$DB_USER'@'%' IDENTIFIED BY '$DB_PASS';" >> $init_db_sql_file - echo "GRANT ALL ON *.* TO '$DB_USER'@'%';FLUSH PRIVILEGES;" >> $init_db_sql_file + echo "GRANT ALL ON *.* TO '$DB_USER'@'%';" >> $init_db_sql_file fi echo "##[CUSTOM_SQL_END]##" >> $init_db_sql_file diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml index 22cdc6f686d..2fc7ebe6a92 100644 --- a/examples/operator/101_initial_cluster.yaml +++ b/examples/operator/101_initial_cluster.yaml @@ -15,7 +15,7 @@ spec: vtbackup: vitess/lite:latest vtorc: vitess/lite:latest mysqld: - mysql80Compatible: vitess/lite:latest + mysql80Compatible: mysql:8.0.30 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 @@ -145,11 +145,8 @@ stringData: # Changes during the init db should not make it to the binlog. # They could potentially create errant transactions on replicas. SET sql_log_bin = 0; - # Remove anonymous users. - DELETE FROM mysql.user WHERE User = ''; - - # Disable remote root access (only allow UNIX socket). - DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost'; + # Remove anonymous users & disable remote root access (only allow UNIX socket). + DROP USER IF EXISTS ''@'%', ''@'localhost', 'root'@'%'; # Remove test database. DROP DATABASE IF EXISTS test; @@ -215,11 +212,6 @@ stringData: SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER ON *.* TO 'vt_filtered'@'localhost'; - FLUSH PRIVILEGES; - - RESET SLAVE ALL; - RESET MASTER; - # custom sql is used to add custom scripts like creating users/passwords. We use it in our tests # {{custom_sql}} diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml index 5800a5e05df..25c9d26d892 100644 --- a/examples/operator/201_customer_tablets.yaml +++ b/examples/operator/201_customer_tablets.yaml @@ -11,7 +11,7 @@ spec: vtbackup: vitess/lite:latest vtorc: vitess/lite:latest mysqld: - mysql80Compatible: vitess/lite:latest + mysql80Compatible: mysql:8.0.30 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml index 2e15bc40d28..4caf35ed856 100644 --- a/examples/operator/302_new_shards.yaml +++ b/examples/operator/302_new_shards.yaml @@ -11,7 +11,7 @@ spec: vtbackup: vitess/lite:latest vtorc: vitess/lite:latest mysqld: - mysql80Compatible: vitess/lite:latest + mysql80Compatible: mysql:8.0.30 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml index 4bdb694d678..adc22280490 100644 --- a/examples/operator/306_down_shard_0.yaml +++ b/examples/operator/306_down_shard_0.yaml @@ -11,7 +11,7 @@ spec: vtbackup: vitess/lite:latest vtorc: vitess/lite:latest mysqld: - mysql80Compatible: vitess/lite:latest + mysql80Compatible: mysql:8.0.30 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/go.mod b/go.mod index 19fec748de8..ee8536eb0eb 100644 --- a/go.mod +++ b/go.mod @@ -1,100 +1,97 @@ module vitess.io/vitess -go 1.21 +go 1.22.3 require ( - cloud.google.com/go/storage v1.29.0 - github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 + cloud.google.com/go/storage v1.40.0 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.15.0 - github.com/DataDog/datadog-go v4.8.3+incompatible github.com/HdrHistogram/hdrhistogram-go v0.9.0 // indirect github.com/aquarapid/vaultlib v0.5.1 github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go v1.44.258 + github.com/aws/aws-sdk-go v1.52.3 github.com/buger/jsonparser v1.1.1 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/corpix/uarand v0.1.1 // indirect - github.com/dave/jennifer v1.6.0 - github.com/evanphx/json-patch v5.6.0+incompatible - github.com/fsnotify/fsnotify v1.6.0 - github.com/go-sql-driver/mysql v1.7.0 - github.com/golang/glog v1.0.0 - github.com/golang/protobuf v1.5.3 + github.com/dave/jennifer v1.7.0 + github.com/evanphx/json-patch v5.9.0+incompatible + github.com/fsnotify/fsnotify v1.7.0 + github.com/go-sql-driver/mysql v1.7.1 + github.com/golang/glog v1.2.1 + github.com/golang/protobuf v1.5.4 github.com/golang/snappy v0.0.4 - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/google/uuid v1.3.0 - github.com/gorilla/handlers v1.5.1 - github.com/gorilla/mux v1.8.0 - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 + github.com/google/uuid v1.6.0 + github.com/gorilla/handlers v1.5.2 + github.com/gorilla/mux v1.8.1 + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/consul/api v1.20.0 + github.com/hashicorp/consul/api v1.28.2 github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/serf v0.10.1 // indirect github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/klauspost/compress v1.16.5 - github.com/klauspost/pgzip v1.2.5 + github.com/klauspost/compress v1.17.8 + github.com/klauspost/pgzip v1.2.6 github.com/krishicks/yaml-patch v0.0.10 - github.com/magiconair/properties v1.8.7 - github.com/mattn/go-sqlite3 v1.14.16 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 - github.com/montanaflynn/stats v0.7.0 + github.com/montanaflynn/stats v0.7.1 github.com/olekukonko/tablewriter v0.0.5 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing/opentracing-go v1.2.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/philhofer/fwd v1.1.2 // indirect github.com/pierrec/lz4 v2.6.1+incompatible - github.com/pires/go-proxyproto v0.6.2 + github.com/pires/go-proxyproto v0.7.0 github.com/pkg/errors v0.9.1 github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a github.com/planetscale/vtprotobuf v0.5.0 - github.com/prometheus/client_golang v1.15.1 - github.com/prometheus/common v0.43.0 // indirect - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 + github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/common v0.53.0 github.com/sjmudd/stopwatch v0.1.1 github.com/soheilhy/cmux v0.1.5 - github.com/spf13/cobra v1.6.1 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.15.0 - github.com/stretchr/testify v1.8.2 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.9.0 github.com/tchap/go-patricia v2.3.0+incompatible - github.com/tidwall/gjson v1.12.1 - github.com/tinylib/msgp v1.1.8 // indirect + github.com/tidwall/gjson v1.17.1 + github.com/tinylib/msgp v1.1.9 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 github.com/z-division/go-zookeeper v1.0.0 - go.etcd.io/etcd/api/v3 v3.5.8 - go.etcd.io/etcd/client/pkg/v3 v3.5.8 - go.etcd.io/etcd/client/v3 v3.5.8 + go.etcd.io/etcd/api/v3 v3.5.13 + go.etcd.io/etcd/client/pkg/v3 v3.5.13 + go.etcd.io/etcd/client/v3 v3.5.13 go.uber.org/mock v0.2.0 - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.17.0 - golang.org/x/oauth2 v0.7.0 - golang.org/x/sys v0.13.0 - golang.org/x/term v0.13.0 - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.3.0 - golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 - google.golang.org/api v0.121.0 - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/grpc v1.55.0-dev + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 + golang.org/x/oauth2 v0.20.0 + golang.org/x/sys v0.20.0 + golang.org/x/term v0.20.0 + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.5.0 + golang.org/x/tools v0.21.0 + google.golang.org/api v0.178.0 + google.golang.org/genproto v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/grpc v1.63.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b - google.golang.org/protobuf v1.30.0 - gopkg.in/DataDog/dd-trace-go.v1 v1.50.1 + google.golang.org/protobuf v1.34.1 + gopkg.in/DataDog/dd-trace-go.v1 v1.63.1 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/ldap.v2 v2.5.1 - gotest.tools v2.2.0+incompatible - sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/yaml v1.4.0 ) require ( - github.com/Shopify/toxiproxy/v2 v2.5.0 + github.com/DataDog/datadog-go/v5 v5.5.0 + github.com/Shopify/toxiproxy/v2 v2.9.0 github.com/bndr/gotabulate v1.1.2 github.com/gammazero/deque v0.2.1 github.com/google/safehtml v0.1.0 @@ -103,91 +100,99 @@ require ( github.com/kr/text v0.2.0 github.com/mitchellh/mapstructure v1.5.0 github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 - github.com/spf13/afero v1.9.3 + github.com/spf13/afero v1.11.0 github.com/spf13/jwalterweatherman v1.1.0 github.com/xlab/treeprint v1.2.0 - go.uber.org/goleak v1.2.1 - golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 - golang.org/x/sync v0.3.0 + go.uber.org/goleak v1.3.0 + golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 + golang.org/x/sync v0.7.0 gonum.org/v1/gonum v0.14.0 - modernc.org/sqlite v1.20.3 + modernc.org/sqlite v1.29.9 ) require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.19.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.13.0 // indirect - github.com/DataDog/appsec-internal-go v1.0.0 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1 // indirect - github.com/DataDog/datadog-go/v5 v5.2.0 // indirect - github.com/DataDog/go-libddwaf v1.1.0 // indirect - github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect - github.com/DataDog/sketches-go v1.4.1 // indirect - github.com/Microsoft/go-winio v0.6.0 // indirect + cloud.google.com/go v0.112.2 // indirect + cloud.google.com/go/auth v0.3.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/iam v1.1.8 // indirect + github.com/DataDog/appsec-internal-go v1.5.0 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 // indirect + github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect + github.com/DataDog/go-sqllexer v0.0.11 // indirect + github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect + github.com/DataDog/sketches-go v1.4.4 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/cyphar/filepath-securejoin v0.2.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/fatih/color v1.15.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/ebitengine/purego v0.7.1 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/btree v1.0.1 // indirect - github.com/google/s2a-go v0.1.3 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.8.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-ieproxy v0.0.10 // indirect - github.com/mattn/go-isatty v0.0.18 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mattn/go-ieproxy v0.0.11 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect github.com/onsi/gomega v1.23.0 // indirect - github.com/outcaste-io/ristretto v0.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.0.7 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/outcaste-io/ristretto v0.2.3 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/procfs v0.14.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/rivo/uniseg v0.4.4 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/tidwall/match v1.1.1 // indirect - github.com/tidwall/pretty v1.2.0 // indirect + github.com/tidwall/pretty v1.2.1 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect + go.opentelemetry.io/otel v1.26.0 // indirect + go.opentelemetry.io/otel/metric v1.26.0 // indirect + go.opentelemetry.io/otel/trace v1.26.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.24.0 // indirect - go4.org/intern v0.0.0-20230205224052-192e9f60865c // indirect - go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect - lukechampine.com/uint128 v1.2.0 // indirect - modernc.org/cc/v3 v3.40.0 // indirect - modernc.org/ccgo/v3 v3.16.13 // indirect - modernc.org/libc v1.22.5 // indirect - modernc.org/mathutil v1.5.0 // indirect - modernc.org/memory v1.5.0 // indirect - modernc.org/opt v0.1.3 // indirect - modernc.org/strutil v1.1.3 // indirect + modernc.org/gc/v3 v3.0.0-20240304020402-f0dba7c97c2b // indirect + modernc.org/libc v1.50.5 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.8.0 // indirect + modernc.org/strutil v1.2.0 // indirect modernc.org/token v1.1.0 // indirect ) diff --git a/go.sum b/go.sum index f0b6cc35a35..64e69b6e84d 100644 --- a/go.sum +++ b/go.sum @@ -1,55 +1,19 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= +cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= +cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs= +cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= @@ -66,43 +30,39 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/appsec-internal-go v1.0.0 h1:2u5IkF4DBj3KVeQn5Vg2vjPUtt513zxEYglcqnd500U= -github.com/DataDog/appsec-internal-go v1.0.0/go.mod h1:+Y+4klVWKPOnZx6XESG7QHydOaUGEXyH2j/vSg9JiNM= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1 h1:HG4dOM6Ou+zZsaKC++4kpM9VGJ/TYo9X61LPz2mmjDE= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1/go.mod h1:o+rJy3B2o+Zb+wCgLSkMlkD7EiUEA5Q63cid53fZkQY= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1 h1:0OK84DbAucLUwoDYoBFve1cuhDWtoquruVVDjgucYlI= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM= +github.com/DataDog/appsec-internal-go v1.5.0 h1:8kS5zSx5T49uZ8dZTdT19QVAvC/B8ByyZdhQKYQWHno= +github.com/DataDog/appsec-internal-go v1.5.0/go.mod h1:pEp8gjfNLtEOmz+iZqC8bXhu0h4k7NUsW/qiQb34k1U= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1 h1:/oxF4p/4XUGNpNw2TE7vDu/pJV3elEAZ+jES0/MWtiI= +github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.1/go.mod h1:AVPQWekk3h9AOC7+plBlNB68Sy6UIGFoMMVUDeSoNoI= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1 h1:mmkGuCHBFuDBpuwNMcqtY1x1I2fCaPH2Br4xPAAjbkM= +github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.1/go.mod h1:JhAilx32dkIgoDkFXquCTfaWDsAOfe+vfBaxbiZoPI0= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q= -github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/datadog-go/v5 v5.2.0 h1:kSptqUGSNK67DgA+By3rwtFnAh6pTBxJ7Hn8JCLZcKY= -github.com/DataDog/datadog-go/v5 v5.2.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q= -github.com/DataDog/go-libddwaf v1.1.0 h1:PhlI/31yxu88JEgTYqxffhd8oM4KQMfNWUVyICqIDMY= -github.com/DataDog/go-libddwaf v1.1.0/go.mod h1:DI5y8obPajk+Tvy2o+nZc2g/5Ria/Rfq5/624k7pHpE= -github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU= -github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs= -github.com/DataDog/gostackparse v0.5.0 h1:jb72P6GFHPHz2W0onsN51cS3FkaMDcjb0QzgxxA4gDk= -github.com/DataDog/gostackparse v0.5.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= -github.com/DataDog/sketches-go v1.4.1 h1:j5G6as+9FASM2qC36lvpvQAj9qsv/jUs3FtO8CwZNAY= -github.com/DataDog/sketches-go v1.4.1/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= +github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU= +github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= +github.com/DataDog/go-libddwaf/v2 v2.4.2 h1:ilquGKUmN9/Ty0sIxiEyznVRxP3hKfmH15Y1SMq5gjA= +github.com/DataDog/go-libddwaf/v2 v2.4.2/go.mod h1:gsCdoijYQfj8ce/T2bEDNPZFIYnmHluAgVDpuQOWMZE= +github.com/DataDog/go-sqllexer v0.0.11 h1:OfPBjmayreblOXreszbrOTICNZ3qWrA6Bg4sypvxpbw= +github.com/DataDog/go-sqllexer v0.0.11/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= +github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= +github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= +github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= +github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8= +github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0= github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg= github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= -github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= -github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Shopify/toxiproxy/v2 v2.9.0 h1:DIaDZG2/r/kv3Em6UxYBUVnnWl1mHlYTGFv+sTPV7VI= +github.com/Shopify/toxiproxy/v2 v2.9.0/go.mod h1:2uPRyxR46fsx2yUr9i8zcejzdkWfK7p6G23jV/X6YNs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aquarapid/vaultlib v0.5.1 h1:vuLWR6bZzLHybjJBSUYPgZlIp6KZ+SXeHLRRYTuk6d4= github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -111,9 +71,8 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.44.258 h1:JVk1lgpsTnb1kvUw3eGhPLcTpEBp6HeSf1fxcYDs2Ho= -github.com/aws/aws-sdk-go v1.44.258/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/aws/aws-sdk-go v1.52.3 h1:BNPJmHOXNoM/iBWJKrvaQvJOweRcp3KLpzdb65CfQwU= +github.com/aws/aws-sdk-go v1.52.3/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -126,22 +85,12 @@ github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMU github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -149,60 +98,59 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/dave/jennifer v1.6.0 h1:MQ/6emI2xM7wt0tJzJzyUik2Q3Tcn2eE0vtYgh4GPVI= -github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+EgvszgGRnk= +github.com/dave/jennifer v1.7.0 h1:uRbSBH9UTS64yXbh4FrMHfgfY762RD+C7bUPKODpSJE= +github.com/dave/jennifer v1.7.0/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= +github.com/ebitengine/purego v0.7.1 h1:6/55d26lG3o9VCZX8lping+bZcmShseiqlh2bnUDiPA= +github.com/ebitengine/purego v0.7.1/go.mod h1:ah1In8AOtksoNK6yk5z1HTJeUkC1Ez4Wk2idgGslMwQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= -github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0= github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -210,27 +158,17 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -240,96 +178,72 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= -github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.3 h1:FAgZmpLl/SXurPEZyCMPBIiiYeTbqfjlbdnCNTAkbGE= -github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/safehtml v0.1.0 h1:EwLKo8qawTKfsi0orxcQAZzu07cICaBeFMegAU9eaT8= github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc= -github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= +github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= -github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= -github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= -github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= +github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= +github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= @@ -343,16 +257,17 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= @@ -360,11 +275,8 @@ github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4 github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -373,20 +285,15 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= -github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -408,27 +315,24 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.10 h1:P+2QihaKCLgbs/32dhFLbxXlqsy8tIG1LUXHIoPaQPo= -github.com/mattn/go-ieproxy v0.0.10/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= +github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo= +github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= +github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 h1:jw16EimP5oAEM/2wt+SiEUov/YDyTCTDuPtIKgQIvk0= github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -441,9 +345,11 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= -github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE= github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= @@ -454,14 +360,10 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= @@ -469,79 +371,76 @@ github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/outcaste-io/ristretto v0.2.0/go.mod h1:iBZA7RCt6jaOr0z6hiBQ6t662/oZ6Gx/yauuPvIWHAI= -github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64= -github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= +github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= +github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= -github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pires/go-proxyproto v0.6.2 h1:KAZ7UteSOt6urjme6ZldyFm4wDe/z0ZUP0Yv0Dos0d8= -github.com/pires/go-proxyproto v0.6.2/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= +github.com/pires/go-proxyproto v0.7.0 h1:IukmRewDQFWC7kfnb66CSomk2q/seBuilHBYFwyq0Hs= +github.com/pires/go-proxyproto v0.7.0/go.mod h1:Vz/1JPY/OACxWGQNIRY2BeyDmpoaWmEP40O9LbuiFR4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a h1:y0OpQ4+5tKxeh9+H+2cVgASl9yMZYV9CILinKOiKafA= github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE= github.com/planetscale/vtprotobuf v0.5.0 h1:l8PXm6Colok5z6qQLNhAj2Jq5BfoMTIHxLER5a6nDqM= github.com/planetscale/vtprotobuf v0.5.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= -github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.43.0 h1:iq+BVjvYLei5f27wiuNiB1DN6DYQkp1c8Bx0Vykh5us= -github.com/prometheus/common v0.43.0/go.mod h1:NCvr5cQIh3Y/gy73/RdVtC9r8xxrxwJnB+2lB3BxrFc= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s= +github.com/prometheus/procfs v0.14.0/go.mod h1:XL+Iwz8k8ZabyZfMFHPiilCniixqQarAy5Mu67pHlNQ= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA= github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= -github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK124wJD/r2f9ZhIUuKIeBsCBT8= -github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk= +github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= +github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -551,25 +450,28 @@ github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1 github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= -github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -580,21 +482,22 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs= github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo= -github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= -github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= +github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= @@ -604,109 +507,68 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/z-division/go-zookeeper v1.0.0 h1:ULsCj0nP6+U1liDFWe+2oEF6o4amixoDcDlwEUghVUY= github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA= -go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4= -go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= -go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M= -go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= -go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4= -go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.etcd.io/etcd/api/v3 v3.5.13 h1:8WXU2/NBge6AUF1K1gOexB6e07NgsN1hXK0rSTtgSp4= +go.etcd.io/etcd/api/v3 v3.5.13/go.mod h1:gBqlqkcMMZMVTMm4NDZloEVJzxQOQIls8splbqBDa0c= +go.etcd.io/etcd/client/pkg/v3 v3.5.13 h1:RVZSAnWWWiI5IrYAXjQorajncORbS0zI48LQlE2kQWg= +go.etcd.io/etcd/client/pkg/v3 v3.5.13/go.mod h1:XxHT4u1qU12E2+po+UVPrEeL94Um6zL58ppuJWXSAB8= +go.etcd.io/etcd/client/v3 v3.5.13 h1:o0fHTNJLeO0MyVbc7I3fsCf6nrOqn5d+diSarKnB2js= +go.etcd.io/etcd/client/v3 v3.5.13/go.mod h1:cqiAeY8b5DEEcpxvgWKsbLIWNM/8Wy2xJSDMtioMcoI= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0 h1:A3SayB3rNyt+1S6qpI9mHPkeHTZbD7XILEqWnYZb2l0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.51.0/go.mod h1:27iA5uvhuRNmalO+iEUdVn5ZMj2qy10Mm+XRIpRmyuU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc= +go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= +go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= +go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= +go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU= go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= -go4.org/intern v0.0.0-20230205224052-192e9f60865c h1:b8WZ7Ja8nKegYxfwDLLwT00ZKv4lXAQrw8LYPK+cHSI= -go4.org/intern v0.0.0-20230205224052-192e9f60865c/go.mod h1:RJ0SVrOMpxLhgb5noIV+09zI1RsRlMsbUcSxpWHqbrE= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230204201903-c31fa085b70e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160 h1:LrTREdITdNDW/JRlUuG3fhXvCK3ZcKXTCf1BbxE8sT4= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= +golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -716,76 +578,35 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -794,250 +615,95 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= +golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0= gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.121.0 h1:8Oopoo8Vavxx6gt+sgs8s8/X60WBAtKQq6JqnkF+xow= -google.golang.org/api v0.121.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= +google.golang.org/api v0.178.0 h1:yoW/QMI4bRVCHF+NWOTa4cL8MoWL3Jnuc7FlcFF91Ok= +google.golang.org/api v0.178.0/go.mod h1:84/k2v8DFpDRebpGcooklv/lais3MEfqpaBLA12gl2U= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20240506185236-b8a5c65736ae h1:HjgkYCl6cWQEKSHkpUp4Q8VB74swzyBwTz1wtTzahm0= +google.golang.org/genproto v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:i4np6Wrjp8EujFAUn0CM0SH+iZhY1EbrfzEIJbFkHFM= +google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= +google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.55.0-dev h1:b3WG8LoyS+X/C5ZbIWsJGjt8Hhqq0wUVX8+rPF/BHZo= -google.golang.org/grpc v1.55.0-dev/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b h1:D/GTYPo6I1oEo08Bfpuj3xl5XE+UGHj7//5fVyKxhsQ= @@ -1055,10 +721,10 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/DataDog/dd-trace-go.v1 v1.50.1 h1:DUpHhh+MHtpYnUyGr5rpfvKUXkRg93TSEHii/LZVF6g= -gopkg.in/DataDog/dd-trace-go.v1 v1.50.1/go.mod h1:sw4gV8LIXseC5ISMbDJmm79OJDdl8I2Hhtelb6lpHuQ= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/DataDog/dd-trace-go.v1 v1.63.1 h1:POnTNQLAJHnuywfk48N+l/EiwQJ6Kdaa7nwV5dbfdUY= +gopkg.in/DataDog/dd-trace-go.v1 v1.63.1/go.mod h1:pv2V0h4+skvObjdi3pWV4k6JHsdQk+flbjdC25mmTfU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -1067,7 +733,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -1078,7 +743,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1086,49 +750,38 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/gotraceui v0.2.0 h1:dmNsfQ9Vl3GwbiVD7Z8d/osC6WtGGrasyrC2suc4ZIQ= +honnef.co/go/gotraceui v0.2.0/go.mod h1:qHo4/W75cA3bX0QQoSvDjbJa4R8mAyyFjbWAj63XElc= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU= -inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= -lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= -modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= -modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= -modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= -modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= -modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= -modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= -modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/cc/v4 v4.21.0 h1:D/gLKtcztomvWbsbvBKo3leKQv+86f+DdqEZBBXhnag= +modernc.org/cc/v4 v4.21.0/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= +modernc.org/ccgo/v4 v4.17.3 h1:t2CQci84jnxKw3GGnHvjGKjiNZeZqyQx/023spkk4hU= +modernc.org/ccgo/v4 v4.17.3/go.mod h1:1FCbAtWYJoKuc+AviS+dH+vGNtYmFJqBeRWjmnDWsIg= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw= +modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= +modernc.org/gc/v3 v3.0.0-20240304020402-f0dba7c97c2b h1:BnN1t+pb1cy61zbvSUV7SeI0PwosMhlAEi/vBY4qxp8= +modernc.org/gc/v3 v3.0.0-20240304020402-f0dba7c97c2b/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.50.5 h1:ZzeUd0dIc/sUtoPTCYIrgypkuzoGzNu6kbEWj2VuEmk= +modernc.org/libc v1.50.5/go.mod h1:rhzrUx5oePTSTIzBgM0mTftwWHK8tiT9aNFUt1mldl0= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.20.3 h1:SqGJMMxjj1PHusLxdYxeQSodg7Jxn9WWkaAQjKrntZs= -modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A= -modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.15.0 h1:oY+JeD11qVVSgVvodMJsu7Edf8tr5E/7tuhF5cNYz34= -modernc.org/tcl v1.15.0/go.mod h1:xRoGotBZ6dU+Zo2tca+2EqVEeMmOUBzHnhIwq4YrVnE= +modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= +modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= +modernc.org/sqlite v1.29.9 h1:9RhNMklxJs+1596GNuAX+O/6040bvOwacTxuFcRuQow= +modernc.org/sqlite v1.29.9/go.mod h1:ItX2a1OVGgNsFh6Dv60JQvGfJfTPHPVpV6DF59akYOA= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE= -modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/go/README.md b/go/README.md index fc6efdde602..6f9ca0421e6 100644 --- a/go/README.md +++ b/go/README.md @@ -1,3 +1,5 @@ +# README + This directory contains all the Go code for Vitess. Most of the packages at the top level are general-purpose and are suitable @@ -16,4 +18,3 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) ``` - diff --git a/go/acl/acl_test.go b/go/acl/acl_test.go index 680044c1461..65f6ed2b96d 100644 --- a/go/acl/acl_test.go +++ b/go/acl/acl_test.go @@ -18,8 +18,15 @@ package acl import ( "errors" + "fmt" "net/http" + "net/http/httptest" + "os" + "os/exec" "testing" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" ) type TestPolicy struct{} @@ -50,41 +57,103 @@ func TestSimplePolicy(t *testing.T) { currentPolicy = policies["test"] err := CheckAccessActor("", ADMIN) want := "not allowed" - if err == nil || err.Error() != want { - t.Errorf("got %v, want %s", err, want) - } + assert.Equalf(t, err.Error(), want, "got %v, want %s", err, want) + err = CheckAccessActor("", DEBUGGING) - if err != nil { - t.Errorf("got %v, want no error", err) - } + assert.Equalf(t, err, nil, "got %v, want no error", err) + + err = CheckAccessActor("", MONITORING) + assert.Equalf(t, err, nil, "got %v, want no error", err) err = CheckAccessHTTP(nil, ADMIN) - if err == nil || err.Error() != want { - t.Errorf("got %v, want %s", err, want) - } + assert.Equalf(t, err.Error(), want, "got %v, want %s", err, want) + err = CheckAccessHTTP(nil, DEBUGGING) - if err != nil { - t.Errorf("got %v, want no error", err) - } + assert.Equalf(t, err, nil, "got %v, want no error", err) + + err = CheckAccessHTTP(nil, MONITORING) + assert.Equalf(t, err, nil, "got %v, want no error", err) } func TestEmptyPolicy(t *testing.T) { currentPolicy = nil err := CheckAccessActor("", ADMIN) - if err != nil { - t.Errorf("got %v, want no error", err) - } + assert.Equalf(t, err, nil, "got %v, want no error", err) + err = CheckAccessActor("", DEBUGGING) - if err != nil { - t.Errorf("got %v, want no error", err) - } + assert.Equalf(t, err, nil, "got %v, want no error", err) + + err = CheckAccessActor("", MONITORING) + assert.Equalf(t, err, nil, "got %v, want no error", err) err = CheckAccessHTTP(nil, ADMIN) - if err != nil { - t.Errorf("got %v, want no error", err) - } + assert.Equalf(t, err, nil, "got %v, want no error", err) + err = CheckAccessHTTP(nil, DEBUGGING) - if err != nil { - t.Errorf("got %v, want no error", err) + assert.Equalf(t, err, nil, "got %v, want no error", err) + + err = CheckAccessHTTP(nil, MONITORING) + assert.Equalf(t, err, nil, "got %v, want no error", err) +} + +func TestValidSecurityPolicy(t *testing.T) { + securityPolicy = "test" + savePolicy() + + assert.Equalf(t, TestPolicy{}, currentPolicy, "got %v, expected %v", currentPolicy, TestPolicy{}) +} + +func TestInvalidSecurityPolicy(t *testing.T) { + securityPolicy = "invalidSecurityPolicy" + savePolicy() + + assert.Equalf(t, denyAllPolicy{}, currentPolicy, "got %v, expected %v", currentPolicy, denyAllPolicy{}) +} + +func TestSendError(t *testing.T) { + testW := httptest.NewRecorder() + + testErr := errors.New("Testing error message") + SendError(testW, testErr) + + // Check the status code + assert.Equalf(t, testW.Code, http.StatusForbidden, "got %v; want %v", testW.Code, http.StatusForbidden) + + // Check the writer body + want := fmt.Sprintf("Access denied: %v\n", testErr) + got := testW.Body.String() + assert.Equalf(t, got, want, "got %v; want %v", got, want) +} + +func TestRegisterFlags(t *testing.T) { + testFs := pflag.NewFlagSet("test", pflag.ExitOnError) + securityPolicy = "test" + + RegisterFlags(testFs) + + securityPolicyFlag := testFs.Lookup("security_policy") + assert.NotNil(t, securityPolicyFlag, "no security_policy flag is registered") + + // Check the default value of the flag + want := "test" + got := securityPolicyFlag.DefValue + assert.Equalf(t, got, want, "got %v; want %v", got, want) +} + +func TestAlreadyRegisteredPolicy(t *testing.T) { + if os.Getenv("TEST_ACL") == "1" { + RegisterPolicy("test", nil) + return + } + + // Run subprocess to test os.Exit which is called by log.fatalf + // os.Exit should be called if we try to re-register a policy + cmd := exec.Command(os.Args[0], "-test.run=TestAlreadyRegisteredPolicy") + cmd.Env = append(os.Environ(), "TEST_ACL=1") + err := cmd.Run() + if e, ok := err.(*exec.ExitError); ok && !e.Success() { + return } + + t.Errorf("process ran with err %v, want exit status 1", err) } diff --git a/go/acl/deny_all_policy_test.go b/go/acl/deny_all_policy_test.go new file mode 100644 index 00000000000..b66a344af2e --- /dev/null +++ b/go/acl/deny_all_policy_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package acl + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDenyAllPolicy(t *testing.T) { + testDenyAllPolicy := denyAllPolicy{} + + want := errDenyAll + err := testDenyAllPolicy.CheckAccessActor("", ADMIN) + assert.Equalf(t, err, want, "got %v; want %v", err, want) + + err = testDenyAllPolicy.CheckAccessActor("", DEBUGGING) + assert.Equalf(t, err, want, "got %v; want %v", err, want) + + err = testDenyAllPolicy.CheckAccessActor("", MONITORING) + assert.Equalf(t, err, want, "got %v; want %v", err, want) + + err = testDenyAllPolicy.CheckAccessHTTP(nil, ADMIN) + assert.Equalf(t, err, want, "got %v; want %v", err, want) + + err = testDenyAllPolicy.CheckAccessHTTP(nil, DEBUGGING) + assert.Equalf(t, err, want, "got %v; want %v", err, want) + + err = testDenyAllPolicy.CheckAccessHTTP(nil, MONITORING) + assert.Equalf(t, err, want, "got %v; want %v", err, want) +} diff --git a/go/acl/read_only_policy_test.go b/go/acl/read_only_policy_test.go new file mode 100644 index 00000000000..c5e988a6734 --- /dev/null +++ b/go/acl/read_only_policy_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package acl + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestReadOnlyPolicy(t *testing.T) { + testReadOnlyPolicy := readOnlyPolicy{} + + want := errReadOnly + err := testReadOnlyPolicy.CheckAccessActor("", ADMIN) + assert.Equalf(t, err, want, "got %v; want %v", err, want) + + err = testReadOnlyPolicy.CheckAccessActor("", DEBUGGING) + assert.Equalf(t, err, nil, "got %v; want no error", err) + + err = testReadOnlyPolicy.CheckAccessActor("", MONITORING) + assert.Equalf(t, err, nil, "got %v; want no error", err) + + err = testReadOnlyPolicy.CheckAccessHTTP(nil, ADMIN) + assert.Equalf(t, err, want, "got %v; want %v", err, want) + + err = testReadOnlyPolicy.CheckAccessHTTP(nil, DEBUGGING) + assert.Equalf(t, err, nil, "got %v; want no error", err) + + err = testReadOnlyPolicy.CheckAccessHTTP(nil, MONITORING) + assert.Equalf(t, err, nil, "got %v; want no error", err) +} diff --git a/go/bucketpool/bucketpool_test.go b/go/bucketpool/bucketpool_test.go index 7649f9b6278..dc3baaa289d 100644 --- a/go/bucketpool/bucketpool_test.go +++ b/go/bucketpool/bucketpool_test.go @@ -17,178 +17,121 @@ limitations under the License. package bucketpool import ( - "math/rand" + "math/rand/v2" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestPool(t *testing.T) { maxSize := 16384 pool := New(1024, maxSize) - if pool.maxSize != maxSize { - t.Fatalf("Invalid max pool size: %d, expected %d", pool.maxSize, maxSize) - } - if len(pool.pools) != 5 { - t.Fatalf("Invalid number of pools: %d, expected %d", len(pool.pools), 5) - } + require.Equal(t, maxSize, pool.maxSize, "Invalid max pool size") + require.Len(t, pool.pools, 5, "Invalid number of pools") buf := pool.Get(64) - if len(*buf) != 64 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 1024 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 64, "unexpected buf length") + require.Equal(t, 1024, cap(*buf), "unexpected buf cap") // get from same pool, check that length is right buf = pool.Get(128) - if len(*buf) != 128 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 1024 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 128, "unexpected buf length") + require.Equal(t, 1024, cap(*buf), "unexpected buf cap") pool.Put(buf) // get boundary size buf = pool.Get(1024) - if len(*buf) != 1024 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 1024 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 1024, "unexpected buf length") + require.Equal(t, 1024, cap(*buf), "unexpected buf cap") pool.Put(buf) // get from the middle buf = pool.Get(5000) - if len(*buf) != 5000 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 8192 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 5000, "unexpected buf length") + require.Equal(t, 8192, cap(*buf), "unexpected buf cap") pool.Put(buf) // check last pool buf = pool.Get(16383) - if len(*buf) != 16383 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 16384 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 16383, "unexpected buf length") + require.Equal(t, 16384, cap(*buf), "unexpected buf cap") pool.Put(buf) // get big buffer buf = pool.Get(16385) - if len(*buf) != 16385 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 16385 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 16385, "unexpected buf length") + require.Equal(t, 16385, cap(*buf), "unexpected buf cap") pool.Put(buf) } func TestPoolOneSize(t *testing.T) { maxSize := 1024 pool := New(1024, maxSize) - if pool.maxSize != maxSize { - t.Fatalf("Invalid max pool size: %d, expected %d", pool.maxSize, maxSize) - } + require.Equal(t, maxSize, pool.maxSize, "Invalid max pool size") buf := pool.Get(64) - if len(*buf) != 64 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 1024 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 64, "unexpected buf length") + require.Equal(t, 1024, cap(*buf), "unexpected buf cap") pool.Put(buf) buf = pool.Get(1025) - if len(*buf) != 1025 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 1025 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 1025, "unexpected buf length") + require.Equal(t, 1025, cap(*buf), "unexpected buf cap") pool.Put(buf) } func TestPoolTwoSizeNotMultiplier(t *testing.T) { maxSize := 2000 pool := New(1024, maxSize) - if pool.maxSize != maxSize { - t.Fatalf("Invalid max pool size: %d, expected %d", pool.maxSize, maxSize) - } + require.Equal(t, maxSize, pool.maxSize, "Invalid max pool size") buf := pool.Get(64) - if len(*buf) != 64 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 1024 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 64, "unexpected buf length") + require.Equal(t, 1024, cap(*buf), "unexpected buf cap") pool.Put(buf) buf = pool.Get(2001) - if len(*buf) != 2001 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 2001 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 2001, "unexpected buf length") + require.Equal(t, 2001, cap(*buf), "unexpected buf cap") pool.Put(buf) } +func TestPoolMaxSizeLessThanMinSize(t *testing.T) { + assert.Panics(t, func() { New(15000, 1024) }) +} + func TestPoolWeirdMaxSize(t *testing.T) { maxSize := 15000 pool := New(1024, maxSize) - if pool.maxSize != maxSize { - t.Fatalf("Invalid max pool size: %d, expected %d", pool.maxSize, maxSize) - } + require.Equal(t, maxSize, pool.maxSize, "Invalid max pool size") buf := pool.Get(14000) - if len(*buf) != 14000 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 15000 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 14000, "unexpected buf length") + require.Equal(t, 15000, cap(*buf), "unexpected buf cap") pool.Put(buf) buf = pool.Get(16383) - if len(*buf) != 16383 { - t.Fatalf("unexpected buf length: %d", len(*buf)) - } - if cap(*buf) != 16383 { - t.Fatalf("unexpected buf cap: %d", cap(*buf)) - } + require.Len(t, *buf, 16383, "unexpected buf length") + require.Equal(t, 16383, cap(*buf), "unexpected buf cap") pool.Put(buf) } func TestFuzz(t *testing.T) { maxTestSize := 16384 - for i := 0; i < 20000; i++ { - minSize := rand.Intn(maxTestSize) + for range 20000 { + minSize := rand.IntN(maxTestSize) if minSize == 0 { minSize = 1 } - maxSize := rand.Intn(maxTestSize-minSize) + minSize + maxSize := rand.IntN(maxTestSize-minSize) + minSize p := New(minSize, maxSize) - bufSize := rand.Intn(maxTestSize) + bufSize := rand.IntN(maxTestSize) buf := p.Get(bufSize) - if len(*buf) != bufSize { - t.Fatalf("Invalid length %d, expected %d", len(*buf), bufSize) - } + require.Len(t, *buf, bufSize, "unexpected buf length") sPool := p.findPool(bufSize) if sPool == nil { - if cap(*buf) != len(*buf) { - t.Fatalf("Invalid cap %d, expected %d", cap(*buf), len(*buf)) - } + require.Equal(t, len(*buf), cap(*buf), "unexpected buf cap") } else { - if cap(*buf) != sPool.size { - t.Fatalf("Invalid cap %d, expected %d", cap(*buf), sPool.size) - } + require.Equal(t, sPool.size, cap(*buf), "unexpected buf cap") } p.Put(buf) } @@ -200,7 +143,7 @@ func BenchmarkPool(b *testing.B) { b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { - randomSize := rand.Intn(pool.maxSize) + randomSize := rand.IntN(pool.maxSize) data := pool.Get(randomSize) pool.Put(data) } @@ -213,7 +156,7 @@ func BenchmarkPoolGet(b *testing.B) { b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { - randomSize := rand.Intn(pool.maxSize) + randomSize := rand.IntN(pool.maxSize) data := pool.Get(randomSize) _ = data } diff --git a/go/bytes2/buffer.go b/go/bytes2/buffer.go index 1725274c43c..48561c5e493 100644 --- a/go/bytes2/buffer.go +++ b/go/bytes2/buffer.go @@ -65,7 +65,7 @@ func (buf *Buffer) String() string { // is _not_ allocated, so modifying this buffer after calling StringUnsafe will lead // to undefined behavior. func (buf *Buffer) StringUnsafe() string { - return *(*string)(unsafe.Pointer(&buf.bytes)) + return unsafe.String(unsafe.SliceData(buf.bytes), len(buf.bytes)) } // Reset is equivalent to bytes.Buffer.Reset. diff --git a/go/bytes2/buffer_test.go b/go/bytes2/buffer_test.go index 83cdb346ec9..1652f176df4 100644 --- a/go/bytes2/buffer_test.go +++ b/go/bytes2/buffer_test.go @@ -18,21 +18,39 @@ package bytes2 import ( "testing" + + "github.com/stretchr/testify/assert" ) func TestBuffer(t *testing.T) { b := NewBuffer(nil) + + // Test Write function b.Write([]byte("ab")) + assert.Equal(t, "ab", string(b.Bytes()), "Write()") + + // Test WriteString function b.WriteString("cd") + assert.Equal(t, "abcd", string(b.Bytes()), "WriteString()") + + // Test WriteByte function b.WriteByte('e') - want := "abcde" - if got := string(b.Bytes()); got != want { - t.Errorf("b.Bytes(): %s, want %s", got, want) - } - if got := b.String(); got != want { - t.Errorf("b.String(): %s, want %s", got, want) - } - if got := b.Len(); got != 5 { - t.Errorf("b.Len(): %d, want 5", got) - } + assert.Equal(t, "abcde", string(b.Bytes()), "WriteByte()") + + // Test Bytes function + assert.Equal(t, "abcde", string(b.Bytes())) + + // Test String function + assert.Equal(t, "abcde", b.String()) + + // Test StringUnsafe function + assert.Equal(t, "abcde", b.StringUnsafe()) + + // Test Len function + assert.Equal(t, 5, b.Len()) + + // Test Reset function + b.Reset() + assert.Equal(t, "", string(b.Bytes())) + assert.Equal(t, 0, b.Len()) } diff --git a/go/cache/lru_cache.go b/go/cache/lru_cache.go index d845265b77b..beee6cc3d26 100644 --- a/go/cache/lru_cache.go +++ b/go/cache/lru_cache.go @@ -31,15 +31,13 @@ import ( // LRUCache is a typical LRU cache implementation. If the cache // reaches the capacity, the least recently used item is deleted from -// the cache. Note the capacity is not the number of items, but the -// total sum of the CachedSize() of each item. -type LRUCache struct { +// the cache. +type LRUCache[T any] struct { mu sync.Mutex // list & table contain *entry objects. list *list.List table map[string]*list.Element - cost func(any) int64 size int64 capacity int64 @@ -49,46 +47,44 @@ type LRUCache struct { } // Item is what is stored in the cache -type Item struct { +type Item[T any] struct { Key string - Value any + Value T } -type entry struct { +type entry[T any] struct { key string - value any - size int64 + value T timeAccessed time.Time } // NewLRUCache creates a new empty cache with the given capacity. -func NewLRUCache(capacity int64, cost func(any) int64) *LRUCache { - return &LRUCache{ +func NewLRUCache[T any](capacity int64) *LRUCache[T] { + return &LRUCache[T]{ list: list.New(), table: make(map[string]*list.Element), capacity: capacity, - cost: cost, } } // Get returns a value from the cache, and marks the entry as most // recently used. -func (lru *LRUCache) Get(key string) (v any, ok bool) { +func (lru *LRUCache[T]) Get(key string) (v T, ok bool) { lru.mu.Lock() defer lru.mu.Unlock() element := lru.table[key] if element == nil { lru.misses++ - return nil, false + return *new(T), false } lru.moveToFront(element) lru.hits++ - return element.Value.(*entry).value, true + return element.Value.(*entry[T]).value, true } // Set sets a value in the cache. -func (lru *LRUCache) Set(key string, value any) bool { +func (lru *LRUCache[T]) Set(key string, value T) bool { lru.mu.Lock() defer lru.mu.Unlock() @@ -102,7 +98,7 @@ func (lru *LRUCache) Set(key string, value any) bool { } // Delete removes an entry from the cache, and returns if the entry existed. -func (lru *LRUCache) delete(key string) bool { +func (lru *LRUCache[T]) delete(key string) bool { lru.mu.Lock() defer lru.mu.Unlock() @@ -113,27 +109,17 @@ func (lru *LRUCache) delete(key string) bool { lru.list.Remove(element) delete(lru.table, key) - lru.size -= element.Value.(*entry).size + lru.size-- return true } // Delete removes an entry from the cache -func (lru *LRUCache) Delete(key string) { +func (lru *LRUCache[T]) Delete(key string) { lru.delete(key) } -// Clear will clear the entire cache. -func (lru *LRUCache) Clear() { - lru.mu.Lock() - defer lru.mu.Unlock() - - lru.list.Init() - lru.table = make(map[string]*list.Element) - lru.size = 0 -} - // Len returns the size of the cache (in entries) -func (lru *LRUCache) Len() int { +func (lru *LRUCache[T]) Len() int { lru.mu.Lock() defer lru.mu.Unlock() return lru.list.Len() @@ -142,7 +128,7 @@ func (lru *LRUCache) Len() int { // SetCapacity will set the capacity of the cache. If the capacity is // smaller, and the current cache size exceed that capacity, the cache // will be shrank. -func (lru *LRUCache) SetCapacity(capacity int64) { +func (lru *LRUCache[T]) SetCapacity(capacity int64) { lru.mu.Lock() defer lru.mu.Unlock() @@ -150,105 +136,80 @@ func (lru *LRUCache) SetCapacity(capacity int64) { lru.checkCapacity() } -// Wait is a no-op in the LRU cache -func (lru *LRUCache) Wait() {} - // UsedCapacity returns the size of the cache (in bytes) -func (lru *LRUCache) UsedCapacity() int64 { +func (lru *LRUCache[T]) UsedCapacity() int64 { return lru.size } // MaxCapacity returns the cache maximum capacity. -func (lru *LRUCache) MaxCapacity() int64 { +func (lru *LRUCache[T]) MaxCapacity() int64 { lru.mu.Lock() defer lru.mu.Unlock() return lru.capacity } // Evictions returns the number of evictions -func (lru *LRUCache) Evictions() int64 { +func (lru *LRUCache[T]) Evictions() int64 { lru.mu.Lock() defer lru.mu.Unlock() return lru.evictions } // Hits returns number of cache hits since creation -func (lru *LRUCache) Hits() int64 { +func (lru *LRUCache[T]) Hits() int64 { lru.mu.Lock() defer lru.mu.Unlock() return lru.hits } // Misses returns number of cache misses since creation -func (lru *LRUCache) Misses() int64 { +func (lru *LRUCache[T]) Misses() int64 { lru.mu.Lock() defer lru.mu.Unlock() return lru.misses } -// ForEach yields all the values for the cache, ordered from most recently -// used to least recently used. -func (lru *LRUCache) ForEach(callback func(value any) bool) { - lru.mu.Lock() - defer lru.mu.Unlock() - - for e := lru.list.Front(); e != nil; e = e.Next() { - v := e.Value.(*entry) - if !callback(v.value) { - break - } - } -} - // Items returns all the values for the cache, ordered from most recently // used to least recently used. -func (lru *LRUCache) Items() []Item { +func (lru *LRUCache[T]) Items() []Item[T] { lru.mu.Lock() defer lru.mu.Unlock() - items := make([]Item, 0, lru.list.Len()) + items := make([]Item[T], 0, lru.list.Len()) for e := lru.list.Front(); e != nil; e = e.Next() { - v := e.Value.(*entry) - items = append(items, Item{Key: v.key, Value: v.value}) + v := e.Value.(*entry[T]) + items = append(items, Item[T]{Key: v.key, Value: v.value}) } return items } -func (lru *LRUCache) updateInplace(element *list.Element, value any) { - valueSize := lru.cost(value) - sizeDiff := valueSize - element.Value.(*entry).size - element.Value.(*entry).value = value - element.Value.(*entry).size = valueSize - lru.size += sizeDiff +func (lru *LRUCache[T]) updateInplace(element *list.Element, value T) { + element.Value.(*entry[T]).value = value lru.moveToFront(element) lru.checkCapacity() } -func (lru *LRUCache) moveToFront(element *list.Element) { +func (lru *LRUCache[T]) moveToFront(element *list.Element) { lru.list.MoveToFront(element) - element.Value.(*entry).timeAccessed = time.Now() + element.Value.(*entry[T]).timeAccessed = time.Now() } -func (lru *LRUCache) addNew(key string, value any) { - newEntry := &entry{key, value, lru.cost(value), time.Now()} +func (lru *LRUCache[T]) addNew(key string, value T) { + newEntry := &entry[T]{key, value, time.Now()} element := lru.list.PushFront(newEntry) lru.table[key] = element - lru.size += newEntry.size + lru.size++ lru.checkCapacity() } -func (lru *LRUCache) checkCapacity() { +func (lru *LRUCache[T]) checkCapacity() { // Partially duplicated from Delete for lru.size > lru.capacity { delElem := lru.list.Back() - delValue := delElem.Value.(*entry) + delValue := delElem.Value.(*entry[T]) lru.list.Remove(delElem) delete(lru.table, delValue.key) - lru.size -= delValue.size + lru.size-- lru.evictions++ } } - -func (lru *LRUCache) Close() { - lru.Clear() -} diff --git a/go/cache/lru_cache_test.go b/go/cache/lru_cache_test.go index 3faea669d3f..af9db72852e 100644 --- a/go/cache/lru_cache_test.go +++ b/go/cache/lru_cache_test.go @@ -18,47 +18,33 @@ package cache import ( "testing" + + "github.com/stretchr/testify/assert" ) type CacheValue struct { size int64 } -func cacheValueSize(val any) int64 { - return val.(*CacheValue).size -} - func TestInitialState(t *testing.T) { - cache := NewLRUCache(5, cacheValueSize) + cache := NewLRUCache[*CacheValue](5) l, sz, c, e, h, m := cache.Len(), cache.UsedCapacity(), cache.MaxCapacity(), cache.Evictions(), cache.Hits(), cache.Misses() - if l != 0 { - t.Errorf("length = %v, want 0", l) - } - if sz != 0 { - t.Errorf("size = %v, want 0", sz) - } - if c != 5 { - t.Errorf("capacity = %v, want 5", c) - } - if e != 0 { - t.Errorf("evictions = %v, want 0", c) - } - if h != 0 { - t.Errorf("hits = %v, want 0", c) - } - if m != 0 { - t.Errorf("misses = %v, want 0", c) - } + assert.Zero(t, l) + assert.EqualValues(t, 0, sz) + assert.EqualValues(t, 5, c) + assert.EqualValues(t, 0, e) + assert.EqualValues(t, 0, h) + assert.EqualValues(t, 0, m) } func TestSetInsertsValue(t *testing.T) { - cache := NewLRUCache(100, cacheValueSize) + cache := NewLRUCache[*CacheValue](100) data := &CacheValue{0} key := "key" cache.Set(key, data) v, ok := cache.Get(key) - if !ok || v.(*CacheValue) != data { + if !ok || v != data { t.Errorf("Cache has incorrect value: %v != %v", data, v) } @@ -69,40 +55,24 @@ func TestSetInsertsValue(t *testing.T) { } func TestGetValueWithMultipleTypes(t *testing.T) { - cache := NewLRUCache(100, cacheValueSize) + cache := NewLRUCache[*CacheValue](100) data := &CacheValue{0} key := "key" cache.Set(key, data) v, ok := cache.Get("key") - if !ok || v.(*CacheValue) != data { + if !ok || v != data { t.Errorf("Cache has incorrect value for \"key\": %v != %v", data, v) } v, ok = cache.Get(string([]byte{'k', 'e', 'y'})) - if !ok || v.(*CacheValue) != data { + if !ok || v != data { t.Errorf("Cache has incorrect value for []byte {'k','e','y'}: %v != %v", data, v) } } -func TestSetUpdatesSize(t *testing.T) { - cache := NewLRUCache(100, cacheValueSize) - emptyValue := &CacheValue{0} - key := "key1" - cache.Set(key, emptyValue) - if sz := cache.UsedCapacity(); sz != 0 { - t.Errorf("cache.UsedCapacity() = %v, expected 0", sz) - } - someValue := &CacheValue{20} - key = "key2" - cache.Set(key, someValue) - if sz := cache.UsedCapacity(); sz != 20 { - t.Errorf("cache.UsedCapacity() = %v, expected 20", sz) - } -} - func TestSetWithOldKeyUpdatesValue(t *testing.T) { - cache := NewLRUCache(100, cacheValueSize) + cache := NewLRUCache[*CacheValue](100) emptyValue := &CacheValue{0} key := "key1" cache.Set(key, emptyValue) @@ -110,31 +80,13 @@ func TestSetWithOldKeyUpdatesValue(t *testing.T) { cache.Set(key, someValue) v, ok := cache.Get(key) - if !ok || v.(*CacheValue) != someValue { + if !ok || v != someValue { t.Errorf("Cache has incorrect value: %v != %v", someValue, v) } } -func TestSetWithOldKeyUpdatesSize(t *testing.T) { - cache := NewLRUCache(100, cacheValueSize) - emptyValue := &CacheValue{0} - key := "key1" - cache.Set(key, emptyValue) - - if sz := cache.UsedCapacity(); sz != 0 { - t.Errorf("cache.UsedCapacity() = %v, expected %v", sz, 0) - } - - someValue := &CacheValue{20} - cache.Set(key, someValue) - expected := int64(someValue.size) - if sz := cache.UsedCapacity(); sz != expected { - t.Errorf("cache.UsedCapacity() = %v, expected %v", sz, expected) - } -} - func TestGetNonExistent(t *testing.T) { - cache := NewLRUCache(100, cacheValueSize) + cache := NewLRUCache[*CacheValue](100) if _, ok := cache.Get("notthere"); ok { t.Error("Cache returned a notthere value after no inserts.") @@ -142,7 +94,7 @@ func TestGetNonExistent(t *testing.T) { } func TestDelete(t *testing.T) { - cache := NewLRUCache(100, cacheValueSize) + cache := NewLRUCache[*CacheValue](100) value := &CacheValue{1} key := "key" @@ -159,22 +111,9 @@ func TestDelete(t *testing.T) { } } -func TestClear(t *testing.T) { - cache := NewLRUCache(100, cacheValueSize) - value := &CacheValue{1} - key := "key" - - cache.Set(key, value) - cache.Clear() - - if sz := cache.UsedCapacity(); sz != 0 { - t.Errorf("cache.UsedCapacity() = %v, expected 0 after Clear()", sz) - } -} - func TestCapacityIsObeyed(t *testing.T) { size := int64(3) - cache := NewLRUCache(100, cacheValueSize) + cache := NewLRUCache[*CacheValue](100) cache.SetCapacity(size) value := &CacheValue{1} @@ -188,12 +127,8 @@ func TestCapacityIsObeyed(t *testing.T) { // Insert one more; something should be evicted to make room. cache.Set("key4", value) sz, evictions := cache.UsedCapacity(), cache.Evictions() - if sz != size { - t.Errorf("post-evict cache.UsedCapacity() = %v, expected %v", sz, size) - } - if evictions != 1 { - t.Errorf("post-evict cache.Evictions() = %v, expected 1", evictions) - } + assert.Equal(t, size, sz) + assert.EqualValues(t, 1, evictions) // Check various other stats if l := cache.Len(); int64(l) != size { @@ -215,7 +150,7 @@ func TestCapacityIsObeyed(t *testing.T) { func TestLRUIsEvicted(t *testing.T) { size := int64(3) - cache := NewLRUCache(size, cacheValueSize) + cache := NewLRUCache[*CacheValue](size) cache.Set("key1", &CacheValue{1}) cache.Set("key2", &CacheValue{1}) diff --git a/go/cache/theine/bf/bf.go b/go/cache/theine/bf/bf.go index f68e34d81e3..97b27a5c217 100644 --- a/go/cache/theine/bf/bf.go +++ b/go/cache/theine/bf/bf.go @@ -54,7 +54,7 @@ func (d *Bloomfilter) EnsureCapacity(capacity int) { func (d *Bloomfilter) Exist(h uint64) bool { h1, h2 := uint32(h), uint32(h>>32) var o uint = 1 - for i := uint32(0); i < d.K; i++ { + for i := range d.K { o &= d.Filter.get((h1 + (i * h2)) & (d.M - 1)) } return o == 1 @@ -65,7 +65,7 @@ func (d *Bloomfilter) Exist(h uint64) bool { func (d *Bloomfilter) Insert(h uint64) bool { h1, h2 := uint32(h), uint32(h>>32) var o uint = 1 - for i := uint32(0); i < d.K; i++ { + for i := range d.K { o &= d.Filter.getset((h1 + (i * h2)) & (d.M - 1)) } return o == 1 diff --git a/go/cache/theine/bf/bf_test.go b/go/cache/theine/bf/bf_test.go index f0e505766e7..135826195ac 100644 --- a/go/cache/theine/bf/bf_test.go +++ b/go/cache/theine/bf/bf_test.go @@ -21,4 +21,24 @@ func TestBloom(t *testing.T) { exist = bf.Exist(456) require.False(t, exist) + + bf = New(0.01) + require.Equal(t, 512, bf.Capacity) + require.Equal(t, 0.01, bf.FalsePositiveRate) + + bf.Insert(123) + exist = bf.Exist(123) + require.True(t, exist) + + bf.Insert(256) + exist = bf.Exist(256) + require.True(t, exist) + + bf.Reset() + + exist = bf.Exist(123) + require.False(t, exist) + + exist = bf.Exist(256) + require.False(t, exist) } diff --git a/go/cache/theine/list_test.go b/go/cache/theine/list_test.go index aad68f5c142..a0b607338dd 100644 --- a/go/cache/theine/list_test.go +++ b/go/cache/theine/list_test.go @@ -28,7 +28,7 @@ func TestList(t *testing.T) { l := NewList[StringKey, string](5, LIST_PROBATION) require.Equal(t, uint(5), l.capacity) require.Equal(t, LIST_PROBATION, l.listType) - for i := 0; i < 5; i++ { + for i := range 5 { evicted := l.PushFront(NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1)) require.Nil(t, evicted) } @@ -42,7 +42,7 @@ func TestList(t *testing.T) { require.Equal(t, "5/4/3/2/1", l.display()) require.Equal(t, "1/2/3/4/5", l.displayReverse()) - for i := 0; i < 5; i++ { + for i := range 5 { entry := l.PopTail() require.Equal(t, StringKey(fmt.Sprintf("%d", i+1)), entry.key) } @@ -50,7 +50,7 @@ func TestList(t *testing.T) { require.Nil(t, entry) var entries []*Entry[StringKey, string] - for i := 0; i < 5; i++ { + for i := range 5 { new := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1) evicted := l.PushFront(new) entries = append(entries, new) @@ -76,13 +76,13 @@ func TestListCountCost(t *testing.T) { l := NewList[StringKey, string](100, LIST_PROBATION) require.Equal(t, uint(100), l.capacity) require.Equal(t, LIST_PROBATION, l.listType) - for i := 0; i < 5; i++ { + for i := range 5 { evicted := l.PushFront(NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 20)) require.Nil(t, evicted) } require.Equal(t, 100, l.len) require.Equal(t, 5, l.count) - for i := 0; i < 3; i++ { + for range 3 { entry := l.PopTail() require.NotNil(t, entry) } diff --git a/go/cache/theine/singleflight_test.go b/go/cache/theine/singleflight_test.go index 60b28e69b4e..bf5018a8891 100644 --- a/go/cache/theine/singleflight_test.go +++ b/go/cache/theine/singleflight_test.go @@ -32,6 +32,9 @@ import ( "sync/atomic" "testing" "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDo(t *testing.T) { @@ -39,12 +42,9 @@ func TestDo(t *testing.T) { v, err, _ := g.Do("key", func() (string, error) { return "bar", nil }) - if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want { - t.Errorf("Do = %v; want %v", got, want) - } - if err != nil { - t.Errorf("Do error = %v", err) - } + + assert.Equal(t, "bar (string)", fmt.Sprintf("%v (%T)", v, v)) + assert.NoError(t, err) } func TestDoErr(t *testing.T) { @@ -53,12 +53,9 @@ func TestDoErr(t *testing.T) { v, err, _ := g.Do("key", func() (string, error) { return "", someErr }) - if err != someErr { - t.Errorf("Do error = %v; want someErr %v", err, someErr) - } - if v != "" { - t.Errorf("unexpected non-nil value %#v", v) - } + + assert.ErrorIs(t, err, someErr, "incorrect Do error") + assert.Empty(t, v, "unexpected non-empty value") } func TestDoDupSuppress(t *testing.T) { @@ -81,20 +78,18 @@ func TestDoDupSuppress(t *testing.T) { const n = 10 wg1.Add(1) - for i := 0; i < n; i++ { + for range n { wg1.Add(1) wg2.Add(1) go func() { defer wg2.Done() wg1.Done() v, err, _ := g.Do("key", fn) - if err != nil { - t.Errorf("Do error: %v", err) + if !assert.NoError(t, err) { return } - if s := v; s != "bar" { - t.Errorf("Do = %T %v; want %q", v, v, "bar") - } + + assert.Equal(t, "bar", v) }() } wg1.Wait() @@ -102,9 +97,9 @@ func TestDoDupSuppress(t *testing.T) { // least reached the line before the Do. c <- "bar" wg2.Wait() - if got := atomic.LoadInt32(&calls); got <= 0 || got >= n { - t.Errorf("number of calls = %d; want over 0 and less than %d", got, n) - } + got := atomic.LoadInt32(&calls) + assert.Greater(t, got, int32(0)) + assert.Less(t, got, int32(n)) } // Test singleflight behaves correctly after Do panic. @@ -119,7 +114,7 @@ func TestPanicDo(t *testing.T) { waited := int32(n) panicCount := int32(0) done := make(chan struct{}) - for i := 0; i < n; i++ { + for range n { go func() { defer func() { if err := recover(); err != nil { @@ -137,11 +132,9 @@ func TestPanicDo(t *testing.T) { select { case <-done: - if panicCount != n { - t.Errorf("Expect %d panic, but got %d", n, panicCount) - } + assert.EqualValues(t, n, panicCount) case <-time.After(time.Second): - t.Fatalf("Do hangs") + require.Fail(t, "Do hangs") } } @@ -155,13 +148,12 @@ func TestGoexitDo(t *testing.T) { const n = 5 waited := int32(n) done := make(chan struct{}) - for i := 0; i < n; i++ { + for range n { go func() { var err error defer func() { - if err != nil { - t.Errorf("Error should be nil, but got: %v", err) - } + assert.NoError(t, err) + if atomic.AddInt32(&waited, -1) == 0 { close(done) } @@ -173,7 +165,7 @@ func TestGoexitDo(t *testing.T) { select { case <-done: case <-time.After(time.Second): - t.Fatalf("Do hangs") + require.Fail(t, "Do hangs") } } @@ -201,10 +193,9 @@ func randKeys(b *testing.B, count, length uint) []string { keys := make([]string, 0, count) key := make([]byte, length) - for i := uint(0); i < count; i++ { - if _, err := io.ReadFull(rand.Reader, key); err != nil { - b.Fatalf("Failed to generate random key %d of %d of length %d: %s", i+1, count, length, err) - } + for i := range uint(count) { + _, err := io.ReadFull(rand.Reader, key) + require.NoError(b, err, "Failed to generate random key %d of %d length %d", i+1, count, length) keys = append(keys, string(key)) } return keys diff --git a/go/cache/theine/sketch_test.go b/go/cache/theine/sketch_test.go index 3437f0cac3c..fb53fa8e5fb 100644 --- a/go/cache/theine/sketch_test.go +++ b/go/cache/theine/sketch_test.go @@ -23,7 +23,7 @@ func TestSketch(t *testing.T) { sketch.SampleSize = 5120 failed := 0 - for i := 0; i < 500; i++ { + for i := range 500 { key := fmt.Sprintf("key:%d", i) keyh := xxhash.Sum64String(key) sketch.Add(keyh) diff --git a/go/cache/theine/store.go b/go/cache/theine/store.go index 3d86e549867..cef5a89c8b7 100644 --- a/go/cache/theine/store.go +++ b/go/cache/theine/store.go @@ -44,17 +44,17 @@ const ( ) type Shard[K cachekey, V any] struct { - hashmap map[K]*Entry[K, V] - dookeeper *bf.Bloomfilter - deque *deque.Deque[*Entry[K, V]] - group *Group[K, V] - qsize uint - qlen int - counter uint - mu sync.RWMutex + hashmap map[K]*Entry[K, V] + doorkeeper *bf.Bloomfilter + deque *deque.Deque[*Entry[K, V]] + group *Group[K, V] + qsize uint + qlen int + counter uint + mu sync.RWMutex } -func NewShard[K cachekey, V any](size uint, qsize uint, doorkeeper bool) *Shard[K, V] { +func NewShard[K cachekey, V any](qsize uint, doorkeeper bool) *Shard[K, V] { s := &Shard[K, V]{ hashmap: make(map[K]*Entry[K, V]), qsize: qsize, @@ -62,17 +62,17 @@ func NewShard[K cachekey, V any](size uint, qsize uint, doorkeeper bool) *Shard[ group: NewGroup[K, V](), } if doorkeeper { - s.dookeeper = bf.New(0.01) + s.doorkeeper = bf.New(0.01) } return s } func (s *Shard[K, V]) set(key K, entry *Entry[K, V]) { s.hashmap[key] = entry - if s.dookeeper != nil { + if s.doorkeeper != nil { ds := 20 * len(s.hashmap) - if ds > s.dookeeper.Capacity { - s.dookeeper.EnsureCapacity(ds) + if ds > s.doorkeeper.Capacity { + s.doorkeeper.EnsureCapacity(ds) } } } @@ -195,10 +195,6 @@ func NewStore[K cachekey, V cacheval](maxsize int64, doorkeeper bool) *Store[K, shardCount = 128 } dequeSize := int(maxsize) / 100 / shardCount - shardSize := int(maxsize) / shardCount - if shardSize < 50 { - shardSize = 50 - } policySize := int(maxsize) - (dequeSize * shardCount) s := &Store[K, V]{ @@ -212,8 +208,8 @@ func NewStore[K cachekey, V cacheval](maxsize int64, doorkeeper bool) *Store[K, writebufsize: writeBufSize, } s.shards = make([]*Shard[K, V], 0, s.shardCount) - for i := 0; i < int(s.shardCount); i++ { - s.shards = append(s.shards, NewShard[K, V](uint(shardSize), uint(dequeSize), doorkeeper)) + for range s.shardCount { + s.shards = append(s.shards, NewShard[K, V](uint(dequeSize), doorkeeper)) } go s.maintenance() @@ -329,11 +325,11 @@ func (s *Store[K, V]) setInternal(key K, value V, cost int64, epoch uint32) (*Sh return shard, exist, true } if s.doorkeeper { - if shard.counter > uint(shard.dookeeper.Capacity) { - shard.dookeeper.Reset() + if shard.counter > uint(shard.doorkeeper.Capacity) { + shard.doorkeeper.Reset() shard.counter = 0 } - hit := shard.dookeeper.Insert(h) + hit := shard.doorkeeper.Insert(h) if !hit { shard.counter += 1 shard.mu.Unlock() @@ -373,7 +369,6 @@ func (s *Store[K, V]) processDeque(shard *Shard[K, V], epoch uint32) { return } var evictedkv []dequeKV[K, V] - var expiredkv []dequeKV[K, V] // send to slru send := make([]*Entry[K, V], 0, 2) @@ -422,9 +417,6 @@ func (s *Store[K, V]) processDeque(shard *Shard[K, V], epoch uint32) { for _, kv := range evictedkv { s.OnRemoval(kv.k, kv.v, EVICTED) } - for _, kv := range expiredkv { - s.OnRemoval(kv.k, kv.v, EXPIRED) - } } } diff --git a/go/cache/theine/store_test.go b/go/cache/theine/store_test.go index 880acf30193..e6a2f9d5679 100644 --- a/go/cache/theine/store_test.go +++ b/go/cache/theine/store_test.go @@ -52,7 +52,7 @@ func TestProcessDeque(t *testing.T) { shard := store.shards[index] shard.qsize = 10 - for i := keyint(0); i < 5; i++ { + for i := range keyint(5) { entry := &Entry[keyint, cachedint]{key: i} entry.cost.Store(1) store.shards[index].deque.PushFront(entry) @@ -74,9 +74,9 @@ func TestProcessDeque(t *testing.T) { func TestDoorKeeperDynamicSize(t *testing.T) { store := NewStore[keyint, cachedint](200000, true) shard := store.shards[0] - require.True(t, shard.dookeeper.Capacity == 512) - for i := keyint(0); i < 5000; i++ { + require.True(t, shard.doorkeeper.Capacity == 512) + for i := range keyint(5000) { shard.set(i, &Entry[keyint, cachedint]{}) } - require.True(t, shard.dookeeper.Capacity > 100000) + require.True(t, shard.doorkeeper.Capacity > 100000) } diff --git a/go/cache/theine/tlfu_test.go b/go/cache/theine/tlfu_test.go index ac6ddaabdb6..f798f89549f 100644 --- a/go/cache/theine/tlfu_test.go +++ b/go/cache/theine/tlfu_test.go @@ -33,7 +33,7 @@ func TestTlfu(t *testing.T) { require.Equal(t, 0, tlfu.slru.protected.len) var entries []*Entry[StringKey, string] - for i := 0; i < 200; i++ { + for i := range 200 { e := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1) evicted := tlfu.Set(e) entries = append(entries, e) @@ -78,7 +78,7 @@ func TestTlfu(t *testing.T) { require.Equal(t, 998, tlfu.slru.probation.len) var entries2 []*Entry[StringKey, string] - for i := 0; i < 1000; i++ { + for i := range 1000 { e := NewEntry(StringKey(fmt.Sprintf("%d*", i)), "", 1) tlfu.Set(e) entries2 = append(entries2, e) @@ -103,7 +103,7 @@ func TestEvictEntries(t *testing.T) { require.Equal(t, 0, tlfu.slru.probation.len) require.Equal(t, 0, tlfu.slru.protected.len) - for i := 0; i < 500; i++ { + for i := range 500 { tlfu.Set(NewEntry(StringKey(fmt.Sprintf("%d:1", i)), "", 1)) } require.Equal(t, 500, tlfu.slru.probation.len) diff --git a/go/cmd/internal/docgen/docgen.go b/go/cmd/internal/docgen/docgen.go index f52042e80af..eea935ed396 100644 --- a/go/cmd/internal/docgen/docgen.go +++ b/go/cmd/internal/docgen/docgen.go @@ -66,7 +66,7 @@ func GenerateMarkdownTree(cmd *cobra.Command, dir string) error { switch fi, err := os.Stat(dir); { case errors.Is(err, fs.ErrNotExist): if err := os.MkdirAll(dir, 0755); err != nil { - return err + return fmt.Errorf("failed to create \"%s\" directory: %w", dir, err) } case err != nil: return err @@ -194,7 +194,7 @@ func anonymizeHomedir(file string) (err error) { // We're replacing the stuff inside the square brackets in the example sed // below: // 's:Paths to search for config files in. (default \[.*\])$:Paths to search for config files in. (default \[\]):' - sed := exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:%s::i", wd), file) + sed := exec.Command("sed", "-i", "-e", fmt.Sprintf("s:%s::i", wd), file) if out, err := sed.CombinedOutput(); err != nil { return fmt.Errorf("%w: %s", err, out) } @@ -215,7 +215,7 @@ func getCommitID(ref string) (string, error) { gitShow := exec.Command("git", "show", "--pretty=format:%H", "--no-patch", ref) out, err := gitShow.Output() if err != nil { - return "", err + return "", fmt.Errorf("failed to get the commit id for reference \"%s\": %w", ref, err) } return string(out), nil diff --git a/go/cmd/internal/docgen/docgen_test.go b/go/cmd/internal/docgen/docgen_test.go new file mode 100644 index 00000000000..2370727cde5 --- /dev/null +++ b/go/cmd/internal/docgen/docgen_test.go @@ -0,0 +1,191 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docgen + +import ( + "strings" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" +) + +func TestGenerateMarkdownTree(t *testing.T) { + tests := []struct { + name string + dir string + cmd *cobra.Command + expectErr bool + }{ + { + name: "Empty dir", + dir: "", + cmd: &cobra.Command{}, + expectErr: true, + }, + { + name: "current dir", + dir: "./", + cmd: &cobra.Command{}, + expectErr: false, + }, + { + name: "Permission denied", + dir: "/root", + cmd: &cobra.Command{}, + expectErr: true, + }, + { + name: "Not a directory error", + dir: "./docgen.go", + cmd: &cobra.Command{}, + expectErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := GenerateMarkdownTree(tt.cmd, tt.dir) + if !tt.expectErr { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestRestructure(t *testing.T) { + rootCmd := &cobra.Command{ + Use: "root-command", + } + cmd := &cobra.Command{ + Use: "random", + } + rootCmd.AddCommand(cmd) + cmds := []*cobra.Command{rootCmd} + + tests := []struct { + name string + rootDir string + dir string + cmds []*cobra.Command + expectErr bool + }{ + { + name: "Empty commands", + cmds: []*cobra.Command{}, + }, + { + name: "Non-empty commands", + rootDir: "../", + dir: "./", + cmds: cmds, + expectErr: true, + }, + { + name: "No subcommands", + rootDir: "../", + dir: "./", + cmds: []*cobra.Command{{Use: "help"}, {Use: "test-cmd"}}, + expectErr: true, + }, + { + name: "No subcommands with rootDir and dir unset", + cmds: []*cobra.Command{{Use: "random"}}, + expectErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := restructure(tt.rootDir, tt.dir, tt.name, tt.cmds) + if !tt.expectErr { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestLinkHandler(t *testing.T) { + tests := []struct { + name string + fileName string + expectedStr string + }{ + { + name: "Normal value", + fileName: "Some_value", + expectedStr: "./some_value/", + }, + { + name: "Abnormal value", + fileName: `./.jash13_24`, + expectedStr: "../", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + str := linkHandler(tt.fileName) + require.Equal(t, tt.expectedStr, str) + }) + } +} + +func TestNewParentLinkSedCommand(t *testing.T) { + tests := []struct { + name string + parentDir string + fileName string + expectedOutput string + }{ + { + name: "Empty values", + expectedOutput: "sed -i -e s:(.//):(../):i ", + }, + { + name: "Normal value", + parentDir: "./", + fileName: "Some_value", + expectedOutput: "sed -i -e s:(././/):(../):i Some_value", + }, + { + name: "Abnormal value", + parentDir: "/root", + fileName: `./.jash13_24`, + expectedOutput: "sed -i -e s:(.//root/):(../):i ./.jash13_24", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cmd := newParentLinkSedCommand(tt.parentDir, tt.fileName) + // We only check for suffix because the sed command's actual path may differ on different machines. + require.True(t, strings.HasSuffix(cmd.String(), tt.expectedOutput)) + }) + } +} + +func TestGetCommitID(t *testing.T) { + // This function should return an error when the reference is not in the + // git tree. + _, err := getCommitID("invalid ref") + require.Error(t, err) +} diff --git a/go/cmd/mysqlctl/command/init.go b/go/cmd/mysqlctl/command/init.go index 71a9661aa80..afaf1c566df 100644 --- a/go/cmd/mysqlctl/command/init.go +++ b/go/cmd/mysqlctl/command/init.go @@ -49,13 +49,13 @@ var initArgs = struct { func commandInit(cmd *cobra.Command, args []string) error { // Generate my.cnf from scratch and use it to find mysqld. - mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort, collationEnv) if err != nil { return fmt.Errorf("failed to initialize mysql config: %v", err) } defer mysqld.Close() - ctx, cancel := context.WithTimeout(context.Background(), initArgs.WaitTime) + ctx, cancel := context.WithTimeout(cmd.Context(), initArgs.WaitTime) defer cancel() if err := mysqld.Init(ctx, cnf, initArgs.InitDbSQLFile); err != nil { return fmt.Errorf("failed init mysql: %v", err) diff --git a/go/cmd/mysqlctl/command/init_config.go b/go/cmd/mysqlctl/command/init_config.go index 70e751e02cb..36687482e08 100644 --- a/go/cmd/mysqlctl/command/init_config.go +++ b/go/cmd/mysqlctl/command/init_config.go @@ -40,7 +40,7 @@ var InitConfig = &cobra.Command{ func commandInitConfig(cmd *cobra.Command, args []string) error { // Generate my.cnf from scratch and use it to find mysqld. - mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort, collationEnv) if err != nil { return fmt.Errorf("failed to initialize mysql config: %v", err) } diff --git a/go/cmd/mysqlctl/command/reinit_config.go b/go/cmd/mysqlctl/command/reinit_config.go index b06642c8203..fd7523c0411 100644 --- a/go/cmd/mysqlctl/command/reinit_config.go +++ b/go/cmd/mysqlctl/command/reinit_config.go @@ -41,7 +41,7 @@ var ReinitConfig = &cobra.Command{ func commandReinitConfig(cmd *cobra.Command, args []string) error { // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { return fmt.Errorf("failed to find mysql config: %v", err) } diff --git a/go/cmd/mysqlctl/command/root.go b/go/cmd/mysqlctl/command/root.go index 4f5626ef7e6..78b3a623666 100644 --- a/go/cmd/mysqlctl/command/root.go +++ b/go/cmd/mysqlctl/command/root.go @@ -23,21 +23,22 @@ import ( "vitess.io/vitess/go/acl" vtcmd "vitess.io/vitess/go/cmd" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" ) var ( - mysqlPort = 3306 - tabletUID = uint32(41983) - mysqlSocket string + mysqlPort = 3306 + tabletUID = uint32(41983) + mysqlSocket string + collationEnv *collations.Environment Root = &cobra.Command{ Use: "mysqlctl", Short: "mysqlctl initializes and controls mysqld with Vitess-specific configuration.", Long: "`mysqlctl` is a command-line client used for managing `mysqld` instances.\n\n" + - "It is responsible for bootstrapping tasks such as generating a configuration file for `mysqld` and initializing the instance and its data directory.\n" + "The `mysqld_safe` watchdog is utilized when present.\n" + "This helps ensure that `mysqld` is automatically restarted after failures.", @@ -74,4 +75,6 @@ func init() { Root.PersistentFlags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file.") acl.RegisterFlags(Root.PersistentFlags()) + + collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) } diff --git a/go/cmd/mysqlctl/command/shutdown.go b/go/cmd/mysqlctl/command/shutdown.go index 41c804856eb..30e0c8c0f8e 100644 --- a/go/cmd/mysqlctl/command/shutdown.go +++ b/go/cmd/mysqlctl/command/shutdown.go @@ -30,7 +30,6 @@ var Shutdown = &cobra.Command{ Use: "shutdown", Short: "Shuts down mysqld, without removing any files.", Long: "Stop a `mysqld` instance that was previously started with `init` or `start`.\n\n" + - "For large `mysqld` instances, you may need to extend the `wait_time` to shutdown cleanly.", Example: `mysqlctl --tablet_uid 101 --alsologtostderr shutdown`, Args: cobra.NoArgs, @@ -45,15 +44,15 @@ var shutdownArgs = struct { func commandShutdown(cmd *cobra.Command, args []string) error { // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { return fmt.Errorf("failed to find mysql config: %v", err) } defer mysqld.Close() - ctx, cancel := context.WithTimeout(context.Background(), shutdownArgs.WaitTime) + ctx, cancel := context.WithTimeout(cmd.Context(), shutdownArgs.WaitTime+10*time.Second) defer cancel() - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + if err := mysqld.Shutdown(ctx, cnf, true, shutdownArgs.WaitTime); err != nil { return fmt.Errorf("failed shutdown mysql: %v", err) } return nil diff --git a/go/cmd/mysqlctl/command/start.go b/go/cmd/mysqlctl/command/start.go index 397909e0966..aff8d723a8b 100644 --- a/go/cmd/mysqlctl/command/start.go +++ b/go/cmd/mysqlctl/command/start.go @@ -45,13 +45,13 @@ var startArgs = struct { func commandStart(cmd *cobra.Command, args []string) error { // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { return fmt.Errorf("failed to find mysql config: %v", err) } defer mysqld.Close() - ctx, cancel := context.WithTimeout(context.Background(), startArgs.WaitTime) + ctx, cancel := context.WithTimeout(cmd.Context(), startArgs.WaitTime) defer cancel() if err := mysqld.Start(ctx, cnf, startArgs.MySQLdArgs...); err != nil { return fmt.Errorf("failed start mysql: %v", err) diff --git a/go/cmd/mysqlctl/command/teardown.go b/go/cmd/mysqlctl/command/teardown.go index 0d37a15cfdc..3e7e7bfd0ef 100644 --- a/go/cmd/mysqlctl/command/teardown.go +++ b/go/cmd/mysqlctl/command/teardown.go @@ -32,7 +32,6 @@ var Teardown = &cobra.Command{ Long: "{{< warning >}}\n" + "This is a destructive operation.\n" + "{{}}\n\n" + - "Shuts down a `mysqld` instance and removes its data directory.", Example: `mysqlctl --tablet_uid 101 --alsologtostderr teardown`, Args: cobra.NoArgs, @@ -48,15 +47,15 @@ var teardownArgs = struct { func commandTeardown(cmd *cobra.Command, args []string) error { // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { return fmt.Errorf("failed to find mysql config: %v", err) } defer mysqld.Close() - ctx, cancel := context.WithTimeout(context.Background(), teardownArgs.WaitTime) + ctx, cancel := context.WithTimeout(cmd.Context(), teardownArgs.WaitTime+10*time.Second) defer cancel() - if err := mysqld.Teardown(ctx, cnf, teardownArgs.Force); err != nil { + if err := mysqld.Teardown(ctx, cnf, teardownArgs.Force, teardownArgs.WaitTime); err != nil { return fmt.Errorf("failed teardown mysql (forced? %v): %v", teardownArgs.Force, err) } return nil diff --git a/go/cmd/mysqlctld/cli/mysqlctld.go b/go/cmd/mysqlctld/cli/mysqlctld.go index 6ebaa5dc422..ee3fe241440 100644 --- a/go/cmd/mysqlctld/cli/mysqlctld.go +++ b/go/cmd/mysqlctld/cli/mysqlctld.go @@ -28,6 +28,7 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -40,13 +41,15 @@ var ( mysqld *mysqlctl.Mysqld cnf *mysqlctl.Mycnf - mysqlPort = 3306 - tabletUID = uint32(41983) - mysqlSocket string + mysqlPort = 3306 + tabletUID = uint32(41983) + mysqlSocket string + collationEnv *collations.Environment // mysqlctl init flags - waitTime = 5 * time.Minute - initDBSQLFile string + waitTime = 5 * time.Minute + shutdownWaitTime = 5 * time.Minute + initDBSQLFile string Main = &cobra.Command{ Use: "mysqlctld", @@ -64,15 +67,22 @@ var ( --mysql_port=17100 \ --socket_file=/path/to/socket_file`, Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), PreRunE: servenv.CobraPreRunE, RunE: run, } + + timeouts = &servenv.TimeoutFlags{ + LameduckPeriod: 50 * time.Millisecond, + OnTermTimeout: shutdownWaitTime + 10*time.Second, + OnCloseTimeout: 10 * time.Second, + } ) func init() { servenv.RegisterDefaultFlags() servenv.RegisterDefaultSocketFileFlags() - servenv.RegisterFlags() + servenv.RegisterFlagsWithTimeouts(timeouts) servenv.RegisterGRPCServerFlags() servenv.RegisterGRPCServerAuthFlags() servenv.RegisterServiceMapFlag() @@ -84,10 +94,13 @@ func init() { Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port") Main.Flags().Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID") Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file") - Main.Flags().DurationVar(&waitTime, "wait_time", waitTime, "How long to wait for mysqld startup or shutdown") + Main.Flags().DurationVar(&waitTime, "wait_time", waitTime, "How long to wait for mysqld startup") Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "Path to .sql file to run after mysqld initialization") + Main.Flags().DurationVar(&shutdownWaitTime, "shutdown-wait-time", shutdownWaitTime, "How long to wait for mysqld shutdown") acl.RegisterFlags(Main.Flags()) + + collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) } func run(cmd *cobra.Command, args []string) error { @@ -101,14 +114,14 @@ func run(cmd *cobra.Command, args []string) error { } // Start or Init mysqld as needed. - ctx, cancel := context.WithTimeout(context.Background(), waitTime) + ctx, cancel := context.WithTimeout(cmd.Context(), waitTime) mycnfFile := mysqlctl.MycnfFile(tabletUID) if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) { // Generate my.cnf from scratch and use it to find mysqld. log.Infof("mycnf file (%s) doesn't exist, initializing", mycnfFile) var err error - mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort, collationEnv) if err != nil { cancel() return fmt.Errorf("failed to initialize mysql config: %w", err) @@ -124,7 +137,7 @@ func run(cmd *cobra.Command, args []string) error { log.Infof("mycnf file (%s) already exists, starting without init", mycnfFile) var err error - mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { cancel() return fmt.Errorf("failed to find mysql config: %w", err) @@ -154,8 +167,9 @@ func run(cmd *cobra.Command, args []string) error { // Take mysqld down with us on SIGTERM before entering lame duck. servenv.OnTermSync(func() { log.Infof("mysqlctl received SIGTERM, shutting down mysqld first") - ctx := context.Background() - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + ctx, cancel := context.WithTimeout(cmd.Context(), shutdownWaitTime+10*time.Second) + defer cancel() + if err := mysqld.Shutdown(ctx, cnf, true, shutdownWaitTime); err != nil { log.Errorf("failed to shutdown mysqld: %v", err) } }) diff --git a/go/cmd/rulesctl/cmd/add_test.go b/go/cmd/rulesctl/cmd/add_test.go new file mode 100644 index 00000000000..54c6623dab8 --- /dev/null +++ b/go/cmd/rulesctl/cmd/add_test.go @@ -0,0 +1,133 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" +) + +func TestAdd(t *testing.T) { + cmd := Add() + require.NotNil(t, cmd) + require.Equal(t, "add-rule", cmd.Name()) + configFile = "./testdata/rules.json" + + tests := []struct { + name string + args []string + expectedOutput string + }{ + { + name: "Action fail", + args: []string{"--dry-run=true", "--name=Rule", `--description="New rules that will be added to the file"`, "--action=fail", "--plan=Select"}, + expectedOutput: `[ + { + "Description": "Some value", + "Name": "Name", + "Action": "FAIL" + }, + { + "Description": "\"New rules that will be added to the file\"", + "Name": "Rule", + "Plans": [ + "Select" + ], + "Action": "FAIL" + } +] +`, + }, + { + name: "Action fail_retry", + args: []string{"--dry-run=true", "--name=Rule", `--description="New rules that will be added to the file"`, "--action=fail_retry", "--plan=Select"}, + expectedOutput: `[ + { + "Description": "Some value", + "Name": "Name", + "Action": "FAIL" + }, + { + "Description": "\"New rules that will be added to the file\"", + "Name": "Rule", + "Plans": [ + "Select", + "Select" + ], + "Action": "FAIL_RETRY" + } +] +`, + }, + { + name: "Action continue with query", + args: []string{"--dry-run=true", "--name=Rule", `--description="New rules that will be added to the file"`, "--action=continue", "--plan=Select", "--query=secret", "--leading-comment=None", "--trailing-comment=Yoho", "--table=Temp"}, + expectedOutput: `[ + { + "Description": "Some value", + "Name": "Name", + "Action": "FAIL" + }, + { + "Description": "\"New rules that will be added to the file\"", + "Name": "Rule", + "Query": "secret", + "LeadingComment": "None", + "TrailingComment": "Yoho", + "Plans": [ + "Select", + "Select", + "Select" + ], + "TableNames": [ + "Temp" + ] + } +] +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.args != nil { + cmd.SetArgs(tt.args) + err := cmd.Execute() + require.NoError(t, err) + } + + originalStdOut := os.Stdout + defer func() { + os.Stdout = originalStdOut + }() + // Redirect stdout to a buffer + r, w, _ := os.Pipe() + os.Stdout = w + + cmd.Run(&cobra.Command{}, []string{}) + + err := w.Close() + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + require.EqualValues(t, tt.expectedOutput, string(got)) + }) + } +} diff --git a/go/cmd/rulesctl/cmd/explain_test.go b/go/cmd/rulesctl/cmd/explain_test.go new file mode 100644 index 00000000000..cc515a1eb3d --- /dev/null +++ b/go/cmd/rulesctl/cmd/explain_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" +) + +func TestExplainWithQueryPlanArguement(t *testing.T) { + explainCmd := Explain() + + require.NotNil(t, explainCmd) + require.Equal(t, "explain", explainCmd.Name()) + + originalStdOut := os.Stdout + defer func() { + os.Stdout = originalStdOut + }() + // Redirect stdout to a buffer + r, w, _ := os.Pipe() + os.Stdout = w + + explainCmd.Run(&cobra.Command{}, []string{"query-plans"}) + + err := w.Close() + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + + expected := "Query Plans!" + require.Contains(t, string(got), expected) +} + +func TestExplainWithRandomArguement(t *testing.T) { + explainCmd := Explain() + + require.NotNil(t, explainCmd) + require.Equal(t, "explain", explainCmd.Name()) + + // Redirect stdout to a buffer + originalStdOut := os.Stdout + defer func() { + os.Stdout = originalStdOut + }() + // Redirect stdout to a buffer + r, w, _ := os.Pipe() + os.Stdout = w + + explainCmd.Run(&cobra.Command{}, []string{"random"}) + + err := w.Close() + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + + expected := "I don't know anything about" + require.Contains(t, string(got), expected) +} diff --git a/go/cmd/rulesctl/cmd/list_test.go b/go/cmd/rulesctl/cmd/list_test.go new file mode 100644 index 00000000000..d787481165e --- /dev/null +++ b/go/cmd/rulesctl/cmd/list_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" +) + +func TestList(t *testing.T) { + cmd := List() + require.NotNil(t, cmd) + require.Equal(t, "list", cmd.Name()) + configFile = "./testdata/rules.json" + + tests := []struct { + name string + args []string + expectedOutput string + }{ + { + name: "No args", + expectedOutput: `[ + { + "Description": "Some value", + "Name": "Name", + "Action": "FAIL" + } +] +`, + }, + { + name: "Name only", + args: []string{"--names-only=true"}, + expectedOutput: `[ + "Name" +] +`, + }, + { + name: "Name flag set", + args: []string{"--name=Name"}, + expectedOutput: `"Name" +`, + }, + { + name: "Random name in name flag", + args: []string{"--name=Random"}, + expectedOutput: `"" +`, + }, + { + name: "Random name in name flag and names-only false", + args: []string{"--name=Random", "--names-only=false"}, + expectedOutput: `null +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.args != nil { + cmd.SetArgs(tt.args) + err := cmd.Execute() + require.NoError(t, err) + } + + originalStdOut := os.Stdout + defer func() { + os.Stdout = originalStdOut + }() + // Redirect stdout to a buffer + r, w, _ := os.Pipe() + os.Stdout = w + + cmd.Run(&cobra.Command{}, []string{}) + + err := w.Close() + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + + require.EqualValues(t, tt.expectedOutput, string(got)) + }) + } +} diff --git a/go/cmd/rulesctl/cmd/main_test.go b/go/cmd/rulesctl/cmd/main_test.go new file mode 100644 index 00000000000..cbdba6c00e6 --- /dev/null +++ b/go/cmd/rulesctl/cmd/main_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMainFunction(t *testing.T) { + rootCmd := Main() + require.NotNil(t, rootCmd) + require.Equal(t, "rulesctl", rootCmd.Name()) + + originalStdOut := os.Stdout + defer func() { + os.Stdout = originalStdOut + }() + // Redirect stdout to a buffer + r, w, _ := os.Pipe() + os.Stdout = w + + args := os.Args + t.Cleanup(func() { os.Args = args }) + os.Args = []string{"rulesctl", "-f=testdata/rules.json", "list"} + err := rootCmd.Execute() + require.NoError(t, err) + + err = w.Close() + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + + expected := `[ + { + "Description": "Some value", + "Name": "Name", + "Action": "FAIL" + } +] +` + require.EqualValues(t, expected, string(got)) +} diff --git a/go/cmd/rulesctl/cmd/remove_test.go b/go/cmd/rulesctl/cmd/remove_test.go new file mode 100644 index 00000000000..d0ee9f9880e --- /dev/null +++ b/go/cmd/rulesctl/cmd/remove_test.go @@ -0,0 +1,114 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "io" + "os" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" +) + +func TestRemoveOld(t *testing.T) { + removeCmd := Remove() + + require.NotNil(t, removeCmd) + require.Equal(t, "remove-rule", removeCmd.Name()) + + originalStdOut := os.Stdout + defer func() { + os.Stdout = originalStdOut + }() + // Redirect stdout to a buffer + r, w, _ := os.Pipe() + os.Stdout = w + + configFile = "../common/testdata/rules.json" + removeCmd.Run(&cobra.Command{}, []string{""}) + + err := w.Close() + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + + expected := "No rule found:" + require.Contains(t, string(got), expected) +} + +func TestRemove(t *testing.T) { + cmd := Remove() + require.NotNil(t, cmd) + require.Equal(t, "remove-rule", cmd.Name()) + configFile = "./testdata/rules.json" + defer func() { + _ = os.WriteFile(configFile, []byte(`[ + { + "Description": "Some value", + "Name": "Name" + } +] +`), 0777) + }() + + tests := []struct { + name string + args []string + expectedOutput string + }{ + { + name: "No args", + expectedOutput: "No rule found: ''", + }, + { + name: "Dry run and name both set", + args: []string{"--dry-run=true", "--name=Name"}, + expectedOutput: "[]\n", + }, + { + name: "Dry run not set name set", + args: []string{"--dry-run=false", "--name=Name"}, + expectedOutput: "No rule found: 'Name'", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + originalStdOut := os.Stdout + defer func() { + os.Stdout = originalStdOut + }() + // Redirect stdout to a buffer + r, w, _ := os.Pipe() + os.Stdout = w + + if tt.args != nil { + cmd.SetArgs(tt.args) + err := cmd.Execute() + require.NoError(t, err) + } + cmd.Run(&cobra.Command{}, []string{}) + + err := w.Close() + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + + require.Contains(t, string(got), tt.expectedOutput) + }) + } +} diff --git a/go/cmd/rulesctl/cmd/testdata/rules.json b/go/cmd/rulesctl/cmd/testdata/rules.json new file mode 100644 index 00000000000..12f0bfa0b5a --- /dev/null +++ b/go/cmd/rulesctl/cmd/testdata/rules.json @@ -0,0 +1,6 @@ +[ + { + "Description": "Some value", + "Name": "Name" + } +] diff --git a/go/cmd/rulesctl/common/common_test.go b/go/cmd/rulesctl/common/common_test.go new file mode 100644 index 00000000000..aff7f012c20 --- /dev/null +++ b/go/cmd/rulesctl/common/common_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "io" + "os" + "path" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetRules(t *testing.T) { + rules := GetRules("testdata/rules.json") + require.NotEmpty(t, rules) +} + +type testStruct struct { + StringField string `yaml:"stringfield"` + IntField int `yaml:"intfield"` + BoolField bool `yaml:"boolfield"` + Float64Field float64 `yaml:"float64field"` +} + +var testData = testStruct{ + "tricky text to test text", + 32, + true, + 3.141, +} + +func TestMustPrintJSON(t *testing.T) { + originalStdOut := os.Stdout + defer func() { + os.Stdout = originalStdOut + }() + + // Redirect stdout to a buffer + r, w, _ := os.Pipe() + os.Stdout = w + MustPrintJSON(testData) + + err := w.Close() + require.NoError(t, err) + got, err := io.ReadAll(r) + require.NoError(t, err) + require.Equal(t, `{ + "StringField": "tricky text to test text", + "IntField": 32, + "BoolField": true, + "Float64Field": 3.141 +} +`, string(got)) +} + +func TestMustWriteJSON(t *testing.T) { + tmpFile := path.Join(t.TempDir(), "temp.json") + MustWriteJSON(testData, tmpFile) + + res, err := os.ReadFile(tmpFile) + require.NoError(t, err) + + require.EqualValues(t, `{ + "StringField": "tricky text to test text", + "IntField": 32, + "BoolField": true, + "Float64Field": 3.141 +}`, string(res)) +} diff --git a/go/cmd/rulesctl/common/testdata/rules.json b/go/cmd/rulesctl/common/testdata/rules.json new file mode 100644 index 00000000000..12f0bfa0b5a --- /dev/null +++ b/go/cmd/rulesctl/common/testdata/rules.json @@ -0,0 +1,6 @@ +[ + { + "Description": "Some value", + "Name": "Name" + } +] diff --git a/go/cmd/topo2topo/cli/plugin_consultopo.go b/go/cmd/topo2topo/cli/plugin_consultopo.go index a128f294a42..56d178e2975 100644 --- a/go/cmd/topo2topo/cli/plugin_consultopo.go +++ b/go/cmd/topo2topo/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/topo2topo/cli/topo2topo.go b/go/cmd/topo2topo/cli/topo2topo.go index 6e7e173872b..13539d97629 100644 --- a/go/cmd/topo2topo/cli/topo2topo.go +++ b/go/cmd/topo2topo/cli/topo2topo.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/vt/grpccommon" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/helpers" ) @@ -51,6 +52,7 @@ var ( It can also be used to compare data between two topologies.`, Args: cobra.NoArgs, PreRunE: servenv.CobraPreRunE, + Version: servenv.AppVersion.String(), RunE: run, } ) @@ -88,18 +90,27 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("Cannot open 'to' topo %v: %w", toImplementation, err) } - ctx := context.Background() + ctx := cmd.Context() if compare { return compareTopos(ctx, fromTS, toTS) } - return copyTopos(ctx, fromTS, toTS) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("cannot create sqlparser: %w", err) + } + + return copyTopos(ctx, fromTS, toTS, parser) } -func copyTopos(ctx context.Context, fromTS, toTS *topo.Server) error { +func copyTopos(ctx context.Context, fromTS, toTS *topo.Server, parser *sqlparser.Parser) error { if doKeyspaces { - if err := helpers.CopyKeyspaces(ctx, fromTS, toTS); err != nil { + if err := helpers.CopyKeyspaces(ctx, fromTS, toTS, parser); err != nil { return err } } diff --git a/go/cmd/vtadmin/main.go b/go/cmd/vtadmin/main.go index 210e2edb918..ad93d058c00 100644 --- a/go/cmd/vtadmin/main.go +++ b/go/cmd/vtadmin/main.go @@ -17,13 +17,13 @@ limitations under the License. package main import ( - "context" "flag" "io" "time" "github.com/spf13/cobra" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -35,8 +35,7 @@ import ( vtadminhttp "vitess.io/vitess/go/vt/vtadmin/http" "vitess.io/vitess/go/vt/vtadmin/http/debug" "vitess.io/vitess/go/vt/vtadmin/rbac" - - _flag "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/vt/vtenv" ) var ( @@ -97,7 +96,7 @@ func startTracing(cmd *cobra.Command) { } func run(cmd *cobra.Command, args []string) { - bootSpan, ctx := trace.NewSpan(context.Background(), "vtadmin.boot") + bootSpan, ctx := trace.NewSpan(cmd.Context(), "vtadmin.boot") defer bootSpan.Finish() configs := clusterFileConfig.Combine(defaultClusterConfig, clusterConfigs) @@ -139,7 +138,15 @@ func run(cmd *cobra.Command, args []string) { } cache.SetCacheRefreshKey(cacheRefreshKey) - s := vtadmin.NewAPI(clusters, vtadmin.Options{ + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + fatal(err) + } + s := vtadmin.NewAPI(env, clusters, vtadmin.Options{ GRPCOpts: opts, HTTPOpts: httpOpts, RBAC: rbacConfig, @@ -208,11 +215,11 @@ func main() { rootCmd.Flags().AddGoFlag(flag.Lookup("stderrthreshold")) rootCmd.Flags().AddGoFlag(flag.Lookup("log_dir")) + servenv.RegisterMySQLServerFlags(rootCmd.Flags()) + if err := rootCmd.Execute(); err != nil { log.Fatal(err) } - - log.Flush() } type noopCloser struct{} diff --git a/go/cmd/vtbackup/cli/vtbackup.go b/go/cmd/vtbackup/cli/vtbackup.go index 9d9138c5756..1b61c886ae7 100644 --- a/go/cmd/vtbackup/cli/vtbackup.go +++ b/go/cmd/vtbackup/cli/vtbackup.go @@ -29,11 +29,11 @@ import ( "github.com/spf13/cobra" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/acl" "vitess.io/vitess/go/cmd" "vitess.io/vitess/go/exit" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -84,13 +84,16 @@ var ( incrementalFromPos string // mysqlctld-like flags - mysqlPort = 3306 - mysqlSocket string - mysqlTimeout = 5 * time.Minute - initDBSQLFile string - detachedMode bool - keepAliveTimeout time.Duration - disableRedoLog bool + mysqlPort = 3306 + mysqlSocket string + mysqlTimeout = 5 * time.Minute + mysqlShutdownTimeout = mysqlctl.DefaultShutdownTimeout + initDBSQLFile string + detachedMode bool + keepAliveTimeout time.Duration + disableRedoLog bool + + collationEnv *collations.Environment // Deprecated, use "Phase" instead. deprecatedDurationByPhase = stats.NewGaugesWithSingleLabel( @@ -201,24 +204,27 @@ func init() { Main.Flags().StringVar(&initKeyspace, "init_keyspace", initKeyspace, "(init parameter) keyspace to use for this tablet") Main.Flags().StringVar(&initShard, "init_shard", initShard, "(init parameter) shard to use for this tablet") Main.Flags().IntVar(&concurrency, "concurrency", concurrency, "(init restore parameter) how many concurrent files to restore at once") - Main.Flags().StringVar(&incrementalFromPos, "incremental_from_pos", incrementalFromPos, "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + Main.Flags().StringVar(&incrementalFromPos, "incremental_from_pos", incrementalFromPos, "Position, or name of backup from which to create an incremental backup. Default: empty. If given, then this backup becomes an incremental backup from given position or given backup. If value is 'auto', this backup will be taken from the last successful backup position.") // mysqlctld-like flags Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port") Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "path to the mysql socket") Main.Flags().DurationVar(&mysqlTimeout, "mysql_timeout", mysqlTimeout, "how long to wait for mysqld startup") + Main.Flags().DurationVar(&mysqlShutdownTimeout, "mysql-shutdown-timeout", mysqlShutdownTimeout, "how long to wait for mysqld shutdown") Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "path to .sql file to run after mysql_install_db") Main.Flags().BoolVar(&detachedMode, "detach", detachedMode, "detached mode - run backups detached from the terminal") Main.Flags().DurationVar(&keepAliveTimeout, "keep-alive-timeout", keepAliveTimeout, "Wait until timeout elapses after a successful backup before shutting down.") Main.Flags().BoolVar(&disableRedoLog, "disable-redo-log", disableRedoLog, "Disable InnoDB redo log during replication-from-primary phase of backup.") acl.RegisterFlags(Main.Flags()) + + collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) } -func run(_ *cobra.Command, args []string) error { +func run(cc *cobra.Command, args []string) error { servenv.Init() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(cc.Context()) servenv.OnClose(func() { cancel() }) @@ -276,7 +282,7 @@ func run(_ *cobra.Command, args []string) error { return fmt.Errorf("Can't take backup: %w", err) } if doBackup { - if err := takeBackup(ctx, topoServer, backupStorage); err != nil { + if err := takeBackup(ctx, cc.Context(), topoServer, backupStorage); err != nil { return fmt.Errorf("Failed to take backup: %w", err) } } @@ -298,7 +304,7 @@ func run(_ *cobra.Command, args []string) error { return nil } -func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage) error { +func takeBackup(ctx, backgroundCtx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage) error { // This is an imaginary tablet alias. The value doesn't matter for anything, // except that we generate a random UID to ensure the target backup // directory is unique if multiple vtbackup instances are launched for the @@ -325,7 +331,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back }() // Start up mysqld as if we are mysqlctld provisioning a fresh tablet. - mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(tabletAlias.Uid, mysqlSocket, mysqlPort) + mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(tabletAlias.Uid, mysqlSocket, mysqlPort, collationEnv) if err != nil { return fmt.Errorf("failed to initialize mysql config: %v", err) } @@ -338,11 +344,11 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back deprecatedDurationByPhase.Set("InitMySQLd", int64(time.Since(initMysqldAt).Seconds())) // Shut down mysqld when we're done. defer func() { - // Be careful not to use the original context, because we don't want to - // skip shutdown just because we timed out waiting for other things. - mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) + // Be careful use the background context, not the init one, because we don't want to + // skip shutdown just because we timed out waiting for init. + mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(backgroundCtx, mysqlShutdownTimeout+10*time.Second) defer mysqlShutdownCancel() - if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false); err != nil { + if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false, mysqlShutdownTimeout); err != nil { log.Errorf("failed to shutdown mysqld: %v", err) } }() @@ -356,18 +362,19 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back } backupParams := mysqlctl.BackupParams{ - Cnf: mycnf, - Mysqld: mysqld, - Logger: logutil.NewConsoleLogger(), - Concurrency: concurrency, - IncrementalFromPos: incrementalFromPos, - HookExtraEnv: extraEnv, - TopoServer: topoServer, - Keyspace: initKeyspace, - Shard: initShard, - TabletAlias: topoproto.TabletAliasString(tabletAlias), - Stats: backupstats.BackupStats(), - UpgradeSafe: upgradeSafe, + Cnf: mycnf, + Mysqld: mysqld, + Logger: logutil.NewConsoleLogger(), + Concurrency: concurrency, + IncrementalFromPos: incrementalFromPos, + HookExtraEnv: extraEnv, + TopoServer: topoServer, + Keyspace: initKeyspace, + Shard: initShard, + TabletAlias: topoproto.TabletAliasString(tabletAlias), + Stats: backupstats.BackupStats(), + UpgradeSafe: upgradeSafe, + MysqlShutdownTimeout: mysqlShutdownTimeout, } // In initial_backup mode, just take a backup of this empty database. if initialBackup { @@ -380,7 +387,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back return fmt.Errorf("can't reset replication: %v", err) } // We need to switch off super_read_only before we create the database. - resetFunc, err := mysqld.SetSuperReadOnly(false) + resetFunc, err := mysqld.SetSuperReadOnly(ctx, false) if err != nil { return fmt.Errorf("failed to disable super_read_only during backup: %v", err) } @@ -416,16 +423,17 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back log.Infof("Restoring latest backup from directory %v", backupDir) restoreAt := time.Now() params := mysqlctl.RestoreParams{ - Cnf: mycnf, - Mysqld: mysqld, - Logger: logutil.NewConsoleLogger(), - Concurrency: concurrency, - HookExtraEnv: extraEnv, - DeleteBeforeRestore: true, - DbName: dbName, - Keyspace: initKeyspace, - Shard: initShard, - Stats: backupstats.RestoreStats(), + Cnf: mycnf, + Mysqld: mysqld, + Logger: logutil.NewConsoleLogger(), + Concurrency: concurrency, + HookExtraEnv: extraEnv, + DeleteBeforeRestore: true, + DbName: dbName, + Keyspace: initKeyspace, + Shard: initShard, + Stats: backupstats.RestoreStats(), + MysqlShutdownTimeout: mysqlShutdownTimeout, } backupManifest, err := mysqlctl.Restore(ctx, params) var restorePos replication.Position @@ -520,7 +528,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back } lastStatus = status - status, statusErr = mysqld.ReplicationStatusWithContext(ctx) + status, statusErr = mysqld.ReplicationStatus(ctx) if statusErr != nil { log.Warningf("Error getting replication status: %v", statusErr) continue @@ -552,12 +560,12 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back phase.Set(phaseNameCatchupReplication, int64(0)) // Stop replication and see where we are. - if err := mysqld.StopReplication(nil); err != nil { + if err := mysqld.StopReplication(ctx, nil); err != nil { return fmt.Errorf("can't stop replication: %v", err) } // Did we make any progress? - status, statusErr = mysqld.ReplicationStatusWithContext(ctx) + status, statusErr = mysqld.ReplicationStatus(ctx) if statusErr != nil { return fmt.Errorf("can't get replication status: %v", err) } @@ -583,7 +591,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back return fmt.Errorf("Could not prep for full shutdown: %v", err) } // Shutdown, waiting for it to finish - if err := mysqld.Shutdown(ctx, mycnf, true); err != nil { + if err := mysqld.Shutdown(ctx, mycnf, true, mysqlShutdownTimeout); err != nil { return fmt.Errorf("Something went wrong during full MySQL shutdown: %v", err) } // Start MySQL, waiting for it to come up @@ -613,11 +621,10 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back } func resetReplication(ctx context.Context, pos replication.Position, mysqld mysqlctl.MysqlDaemon) error { - cmds := []string{ - "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget replication source host:port. + if err := mysqld.StopReplication(ctx, nil); err != nil { + return vterrors.Wrap(err, "failed to stop replication") } - if err := mysqld.ExecuteSuperQueryList(ctx, cmds); err != nil { + if err := mysqld.ResetReplicationParameters(ctx); err != nil { return vterrors.Wrap(err, "failed to reset replication") } @@ -653,7 +660,7 @@ func startReplication(ctx context.Context, mysqld mysqlctl.MysqlDaemon, topoServ } // Stop replication (in case we're restarting), set replication source, and start replication. - if err := mysqld.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, ti.Tablet.MysqlPort, true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { + if err := mysqld.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, ti.Tablet.MysqlPort, 0, true, true); err != nil { return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed") } return nil diff --git a/go/cmd/vtbench/cli/vtbench.go b/go/cmd/vtbench/cli/vtbench.go index 69b866bb60d..e36f06cb69e 100644 --- a/go/cmd/vtbench/cli/vtbench.go +++ b/go/cmd/vtbench/cli/vtbench.go @@ -212,7 +212,7 @@ func run(cmd *cobra.Command, args []string) error { b := vtbench.NewBench(threads, count, connParams, sql) - ctx, cancel := context.WithTimeout(context.Background(), deadline) + ctx, cancel := context.WithTimeout(cmd.Context(), deadline) defer cancel() fmt.Printf("Initializing test with %s protocol / %d threads / %d iterations\n", diff --git a/go/cmd/vtclient/cli/vtclient.go b/go/cmd/vtclient/cli/vtclient.go index 949af851ab4..e8bcd9b7ff2 100644 --- a/go/cmd/vtclient/cli/vtclient.go +++ b/go/cmd/vtclient/cli/vtclient.go @@ -22,7 +22,7 @@ import ( "encoding/json" "fmt" "io" - "math/rand" + "math/rand/v2" "os" "sort" "sync" @@ -174,7 +174,7 @@ func _run(cmd *cobra.Command, args []string) (*results, error) { go func() { if useRandom { for { - seqChan <- rand.Intn(maxSeqID-minSeqID) + minSeqID + seqChan <- rand.IntN(maxSeqID-minSeqID) + minSeqID } } else { for i := minSeqID; i < maxSeqID; i++ { @@ -197,7 +197,7 @@ func _run(cmd *cobra.Command, args []string) (*results, error) { log.Infof("Sending the query...") - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(cmd.Context(), timeout) defer cancel() return execMulti(ctx, db, cmd.Flags().Arg(0)) } diff --git a/go/cmd/vtclient/cli/vtclient_test.go b/go/cmd/vtclient/cli/vtclient_test.go index a5ee571cd0b..bf0c1206167 100644 --- a/go/cmd/vtclient/cli/vtclient_test.go +++ b/go/cmd/vtclient/cli/vtclient_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -17,6 +17,7 @@ limitations under the License. package cli import ( + "context" "fmt" "os" "strings" @@ -129,6 +130,7 @@ func TestVtclient(t *testing.T) { err := Main.ParseFlags(args) require.NoError(t, err) + Main.SetContext(context.Background()) results, err := _run(Main, args) if q.errMsg != "" { if got, want := err.Error(), q.errMsg; !strings.Contains(got, want) { diff --git a/go/cmd/vtcombo/cli/main.go b/go/cmd/vtcombo/cli/main.go index bfc0ad894fe..189441594bb 100644 --- a/go/cmd/vtcombo/cli/main.go +++ b/go/cmd/vtcombo/cli/main.go @@ -31,7 +31,9 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -40,9 +42,11 @@ import ( "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtcombo" "vitess.io/vitess/go/vt/vtctld" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -76,9 +80,14 @@ In particular, it contains: plannerName string vschemaPersistenceDir string - tpb vttestpb.VTTestTopology - ts *topo.Server - resilientServer *srvtopo.ResilientServer + tpb vttestpb.VTTestTopology + ts *topo.Server + resilientServer *srvtopo.ResilientServer + tabletTypesToWait []topodatapb.TabletType + + env *vtenv.Environment + + srvTopoCounts *stats.CountersWithSingleLabel ) func init() { @@ -111,19 +120,32 @@ func init() { Main.Flags().Var(vttest.TextTopoData(&tpb), "proto_topo", "vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information.") Main.Flags().Var(vttest.JSONTopoData(&tpb), "json_topo", "vttest proto definition of the topology, encoded in json format. See vttest.proto for more information.") + Main.Flags().Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.") + // We're going to force the value later, so don't even bother letting the // user know about this flag. Main.Flags().MarkHidden("tablet_protocol") + + var err error + env, err = vtenv.New(vtenv.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("unable to initialize env: %v", err) + } + srvTopoCounts = stats.NewCountersWithSingleLabel("ResilientSrvTopoServer", "Resilient srvtopo server operations", "type") } -func startMysqld(uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err error) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) +func startMysqld(ctx context.Context, uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err error) { + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() mycnfFile := mysqlctl.MycnfFile(uid) if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) { - mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(uid, "", mysqlPort) + mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(uid, "", mysqlPort, env.CollationEnv()) if err != nil { return nil, nil, fmt.Errorf("failed to initialize mysql config :%w", err) } @@ -131,7 +153,7 @@ func startMysqld(uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err return nil, nil, fmt.Errorf("failed to initialize mysql :%w", err) } } else { - mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(uid) + mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(uid, env.CollationEnv()) if err != nil { return nil, nil, fmt.Errorf("failed to find mysql config: %w", err) } @@ -163,21 +185,24 @@ func run(cmd *cobra.Command, args []string) (err error) { // vtctld UI requires the cell flag cmd.Flags().Set("cell", tpb.Cells[0]) - if cmd.Flags().Lookup("log_dir") == nil { + if f := cmd.Flags().Lookup("log_dir"); f != nil && !f.Changed { cmd.Flags().Set("log_dir", "$VTDATAROOT/tmp") } + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() if externalTopoServer { // Open topo server based on the command line flags defined at topo/server.go // do not create cell info as it should be done by whoever sets up the external topo server ts = topo.Open() } else { // Create topo server. We use a 'memorytopo' implementation. - ts = memorytopo.NewServer(context.Background(), tpb.Cells...) + ts = memorytopo.NewServer(ctx, tpb.Cells...) } + defer ts.Close() // attempt to load any routing rules specified by tpb - if err := vtcombo.InitRoutingRules(context.Background(), ts, tpb.GetRoutingRules()); err != nil { + if err := vtcombo.InitRoutingRules(ctx, ts, tpb.GetRoutingRules()); err != nil { return fmt.Errorf("Failed to load routing rules: %w", err) } @@ -190,18 +215,20 @@ func run(cmd *cobra.Command, args []string) (err error) { ) if startMysql { - mysqld.Mysqld, cnf, err = startMysqld(1) + mysqld.Mysqld, cnf, err = startMysqld(ctx, 1) if err != nil { return err } servenv.OnClose(func() { - mysqld.Shutdown(context.TODO(), cnf, true) + shutdownCtx, shutdownCancel := context.WithTimeout(cmd.Context(), mysqlctl.DefaultShutdownTimeout+10*time.Second) + defer shutdownCancel() + mysqld.Shutdown(shutdownCtx, cnf, true, mysqlctl.DefaultShutdownTimeout) }) // We want to ensure we can write to this database - mysqld.SetReadOnly(false) + mysqld.SetReadOnly(ctx, false) } else { - dbconfigs.GlobalDBConfigs.InitWithSocket("") + dbconfigs.GlobalDBConfigs.InitWithSocket("", env.CollationEnv()) mysqld.Mysqld = mysqlctl.NewMysqld(&dbconfigs.GlobalDBConfigs) servenv.OnClose(mysqld.Close) } @@ -213,11 +240,13 @@ func run(cmd *cobra.Command, args []string) (err error) { // to be the "internal" protocol that InitTabletMap registers. cmd.Flags().Set("tablet_manager_protocol", "internal") cmd.Flags().Set("tablet_protocol", "internal") - uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, startMysql) + uid, err := vtcombo.InitTabletMap(env, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, startMysql, srvTopoCounts) if err != nil { // ensure we start mysql in the event we fail here if startMysql { - mysqld.Shutdown(context.TODO(), cnf, true) + startCtx, startCancel := context.WithTimeout(ctx, mysqlctl.DefaultShutdownTimeout+10*time.Second) + defer startCancel() + mysqld.Shutdown(startCtx, cnf, true, mysqlctl.DefaultShutdownTimeout) } return fmt.Errorf("initTabletMapProto failed: %w", err) @@ -236,8 +265,8 @@ func run(cmd *cobra.Command, args []string) (err error) { } } - wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil) - newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, ks, true, uid, wr) + wr := wrangler.New(env, logutil.NewConsoleLogger(), ts, nil) + newUID, err := vtcombo.CreateKs(ctx, env, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, ks, true, uid, wr, srvTopoCounts) if err != nil { return err } @@ -261,10 +290,12 @@ func run(cmd *cobra.Command, args []string) (err error) { // Now that we have fully initialized the tablets, rebuild the keyspace graph. for _, ks := range tpb.Keyspaces { - err := topotools.RebuildKeyspace(context.Background(), logutil.NewConsoleLogger(), ts, ks.GetName(), tpb.Cells, false) + err := topotools.RebuildKeyspace(cmd.Context(), logutil.NewConsoleLogger(), ts, ks.GetName(), tpb.Cells, false) if err != nil { if startMysql { - mysqld.Shutdown(context.TODO(), cnf, true) + shutdownCtx, shutdownCancel := context.WithTimeout(cmd.Context(), mysqlctl.DefaultShutdownTimeout+10*time.Second) + defer shutdownCancel() + mysqld.Shutdown(shutdownCtx, cnf, true, mysqlctl.DefaultShutdownTimeout) } return fmt.Errorf("Couldn't build srv keyspace for (%v: %v). Got error: %w", ks, tpb.Cells, err) @@ -272,43 +303,47 @@ func run(cmd *cobra.Command, args []string) (err error) { } // vtgate configuration and init - resilientServer = srvtopo.NewResilientServer(context.Background(), ts, "ResilientSrvTopoServer") - tabletTypesToWait := []topodatapb.TabletType{ - topodatapb.TabletType_PRIMARY, - topodatapb.TabletType_REPLICA, - topodatapb.TabletType_RDONLY, + + resilientServer = srvtopo.NewResilientServer(ctx, ts, srvTopoCounts) + + tabletTypes := make([]topodatapb.TabletType, 0, 1) + if len(tabletTypesToWait) != 0 { + for _, tt := range tabletTypesToWait { + if topoproto.IsServingType(tt) { + tabletTypes = append(tabletTypes, tt) + } + } + + if len(tabletTypes) == 0 { + log.Exitf("tablet_types_to_wait should contain at least one serving tablet type") + } + } else { + tabletTypes = append(tabletTypes, topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY) } + plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName) vtgate.QueryLogHandler = "/debug/vtgate/querylog" vtgate.QueryLogzHandler = "/debug/vtgate/querylogz" vtgate.QueryzHandler = "/debug/vtgate/queryz" + // pass nil for healthcheck, it will get created - vtg := vtgate.Init(context.Background(), nil, resilientServer, tpb.Cells[0], tabletTypesToWait, plannerVersion) + vtg := vtgate.Init(ctx, env, nil, resilientServer, tpb.Cells[0], tabletTypes, plannerVersion) // vtctld configuration and init - err = vtctld.InitVtctld(ts) + err = vtctld.InitVtctld(env, ts) if err != nil { return err } if vschemaPersistenceDir != "" && !externalTopoServer { - startVschemaWatcher(vschemaPersistenceDir, tpb.Keyspaces, ts) + startVschemaWatcher(ctx, vschemaPersistenceDir, ts) } servenv.OnRun(func() { addStatusParts(vtg) }) - servenv.OnTerm(func() { - log.Error("Terminating") - // FIXME(alainjobart): stop vtgate - }) - servenv.OnClose(func() { - // We will still use the topo server during lameduck period - // to update our state, so closing it in OnClose() - ts.Close() - }) servenv.RunDefault() return nil @@ -323,17 +358,17 @@ type vtcomboMysqld struct { } // SetReplicationSource implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error { +func (mysqld *vtcomboMysqld) SetReplicationSource(ctx context.Context, host string, port int32, heartbeatInterval float64, stopReplicationBefore bool, startReplicationAfter bool) error { return nil } // StartReplication implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) StartReplication(hookExtraEnv map[string]string) error { +func (mysqld *vtcomboMysqld) StartReplication(ctx context.Context, hookExtraEnv map[string]string) error { return nil } // RestartReplication implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) RestartReplication(hookExtraEnv map[string]string) error { +func (mysqld *vtcomboMysqld) RestartReplication(ctx context.Context, hookExtraEnv map[string]string) error { return nil } @@ -343,16 +378,16 @@ func (mysqld *vtcomboMysqld) StartReplicationUntilAfter(ctx context.Context, pos } // StopReplication implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) StopReplication(hookExtraEnv map[string]string) error { +func (mysqld *vtcomboMysqld) StopReplication(ctx context.Context, hookExtraEnv map[string]string) error { return nil } // SetSemiSyncEnabled implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) SetSemiSyncEnabled(source, replica bool) error { +func (mysqld *vtcomboMysqld) SetSemiSyncEnabled(ctx context.Context, source, replica bool) error { return nil } // SemiSyncExtensionLoaded implements the MysqlDaemon interface -func (mysqld *vtcomboMysqld) SemiSyncExtensionLoaded() (bool, error) { - return true, nil +func (mysqld *vtcomboMysqld) SemiSyncExtensionLoaded(ctx context.Context) (mysql.SemiSyncType, error) { + return mysql.SemiSyncTypeSource, nil } diff --git a/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go index 2cf8eed8368..7f6000f5af9 100644 --- a/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctld") { - grpcvtctldserver.StartServer(servenv.GRPCServer, ts) + grpcvtctldserver.StartServer(servenv.GRPCServer, env, ts) } }) } diff --git a/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go index 8b7f918bc58..236b83e3d28 100644 --- a/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctl") { - grpcvtctlserver.StartServer(servenv.GRPCServer, ts) + grpcvtctlserver.StartServer(servenv.GRPCServer, env, ts) } }) } diff --git a/go/cmd/vtcombo/cli/status.go b/go/cmd/vtcombo/cli/status.go index 8069fc72606..80176d4a11a 100644 --- a/go/cmd/vtcombo/cli/status.go +++ b/go/cmd/vtcombo/cli/status.go @@ -41,7 +41,10 @@ func addStatusParts(vtg *vtgate.VTGate) { servenv.AddStatusPart("Gateway Status", vtgate.StatusTemplate, func() any { return vtg.GetGatewayCacheStatus() }) - servenv.AddStatusPart("Health Check Cache", discovery.HealthCheckTemplate, func() any { + servenv.AddStatusPart("Health Check - Cache", discovery.HealthCheckCacheTemplate, func() any { return vtg.Gateway().TabletsCacheStatus() }) + servenv.AddStatusPart("Health Check - Healthy Tablets", discovery.HealthCheckHealthyTemplate, func() any { + return vtg.Gateway().TabletsHealthyStatus() + }) } diff --git a/go/cmd/vtcombo/cli/vschema_watcher.go b/go/cmd/vtcombo/cli/vschema_watcher.go index c1c9f120b96..484c7736424 100644 --- a/go/cmd/vtcombo/cli/vschema_watcher.go +++ b/go/cmd/vtcombo/cli/vschema_watcher.go @@ -27,28 +27,27 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" vschemapb "vitess.io/vitess/go/vt/proto/vschema" - vttestpb "vitess.io/vitess/go/vt/proto/vttest" ) -func startVschemaWatcher(vschemaPersistenceDir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) { +func startVschemaWatcher(ctx context.Context, vschemaPersistenceDir string, ts *topo.Server) { // Create the directory if it doesn't exist. if err := createDirectoryIfNotExists(vschemaPersistenceDir); err != nil { log.Fatalf("Unable to create vschema persistence directory %v: %v", vschemaPersistenceDir, err) } // If there are keyspace files, load them. - loadKeyspacesFromDir(vschemaPersistenceDir, keyspaces, ts) + loadKeyspacesFromDir(ctx, vschemaPersistenceDir, ts) // Rebuild the SrvVSchema object in case we loaded vschema from file - if err := ts.RebuildSrvVSchema(context.Background(), tpb.Cells); err != nil { + if err := ts.RebuildSrvVSchema(ctx, tpb.Cells); err != nil { log.Fatalf("RebuildSrvVSchema failed: %v", err) } // Now watch for changes in the SrvVSchema object and persist them to disk. - go watchSrvVSchema(context.Background(), ts, tpb.Cells[0]) + go watchSrvVSchema(ctx, ts, tpb.Cells[0]) } -func loadKeyspacesFromDir(dir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) { +func loadKeyspacesFromDir(ctx context.Context, dir string, ts *topo.Server) { for _, ks := range tpb.Keyspaces { ksFile := path.Join(dir, ks.Name+".json") if _, err := os.Stat(ksFile); err == nil { @@ -63,18 +62,18 @@ func loadKeyspacesFromDir(dir string, keyspaces []*vttestpb.Keyspace, ts *topo.S log.Fatalf("Unable to parse keyspace file %v: %v", ksFile, err) } - _, err = vindexes.BuildKeyspace(keyspace) + _, err = vindexes.BuildKeyspace(keyspace, env.Parser()) if err != nil { log.Fatalf("Invalid keyspace definition: %v", err) } - ts.SaveVSchema(context.Background(), ks.Name, keyspace) + ts.SaveVSchema(ctx, ks.Name, keyspace) log.Infof("Loaded keyspace %v from %v\n", ks.Name, ksFile) } } } func watchSrvVSchema(ctx context.Context, ts *topo.Server, cell string) { - data, ch, err := ts.WatchSrvVSchema(context.Background(), tpb.Cells[0]) + data, ch, err := ts.WatchSrvVSchema(ctx, tpb.Cells[0]) if err != nil { log.Fatalf("WatchSrvVSchema failed: %v", err) } diff --git a/go/cmd/vtctl/plugin_cephbackupstorage.go b/go/cmd/vtctl/plugin_cephbackupstorage.go index 6cd2d5619d0..0d4710ec982 100644 --- a/go/cmd/vtctl/plugin_cephbackupstorage.go +++ b/go/cmd/vtctl/plugin_cephbackupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctl/plugin_s3backupstorage.go b/go/cmd/vtctl/plugin_s3backupstorage.go index a5b5c671ebb..f1d1a454041 100644 --- a/go/cmd/vtctl/plugin_s3backupstorage.go +++ b/go/cmd/vtctl/plugin_s3backupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go index 175e49c6831..ba84369620a 100644 --- a/go/cmd/vtctl/vtctl.go +++ b/go/cmd/vtctl/vtctl.go @@ -19,7 +19,6 @@ package main import ( "context" "fmt" - "log/syslog" "os" "os/signal" "strings" @@ -40,6 +39,7 @@ import ( "vitess.io/vitess/go/vt/vtctl" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" ) @@ -118,11 +118,7 @@ func main() { startMsg := fmt.Sprintf("USER=%v SUDO_USER=%v %v", os.Getenv("USER"), os.Getenv("SUDO_USER"), strings.Join(os.Args, " ")) - if syslogger, err := syslog.New(syslog.LOG_INFO, "vtctl "); err == nil { - syslogger.Info(startMsg) // nolint:errcheck - } else { - log.Warningf("cannot connect to syslog: %v", err) - } + logSyslog(startMsg) closer := trace.StartTracing("vtctl") defer trace.LogErrorsWhenClosing(closer) @@ -131,10 +127,18 @@ func main() { ts := topo.Open() defer ts.Close() - ctx, cancel := context.WithTimeout(context.Background(), waitTime) installSignalHandlers(cancel) + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("cannot initialize sql parser: %v", err) + } + // (TODO:ajm188) . // // For v12, we are going to support new commands by prefixing as: @@ -159,7 +163,7 @@ func main() { // New behavior. Strip off the prefix, and set things up to run through // the vtctldclient command tree, using the localvtctldclient (in-process) // client. - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(env, ts) localvtctldclient.SetServer(vtctld) command.VtctldClientProtocol = "local" @@ -175,8 +179,7 @@ func main() { fallthrough default: log.Warningf("WARNING: vtctl should only be used for VDiff v1 workflows. Please use VDiff v2 and consider using vtctldclient for all other commands.") - - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(env, logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) if args[0] == "--" { vtctl.PrintDoubleDashDeprecationNotice(wr) @@ -184,7 +187,7 @@ func main() { } action = args[0] - err := vtctl.RunCommand(ctx, wr, args) + err = vtctl.RunCommand(ctx, wr, args) cancel() switch err { case vtctl.ErrUnknownCommand: diff --git a/go/vt/vtgate/planbuilder/primitive_wrapper.go b/go/cmd/vtctl/vtctl_unix.go similarity index 59% rename from go/vt/vtgate/planbuilder/primitive_wrapper.go rename to go/cmd/vtctl/vtctl_unix.go index a03c94ce850..bee0be238d7 100644 --- a/go/vt/vtgate/planbuilder/primitive_wrapper.go +++ b/go/cmd/vtctl/vtctl_unix.go @@ -1,5 +1,7 @@ +//go:build !windows + /* -Copyright 2022 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -package planbuilder +package main import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) + "log/syslog" -// primitiveWrapper is used when only need a logical plan that supports plan.Primitive() and nothing else -type primitiveWrapper struct { - prim engine.Primitive -} + "vitess.io/vitess/go/vt/log" +) -func (p *primitiveWrapper) Primitive() engine.Primitive { - return p.prim +func logSyslog(msg string) { + if syslogger, err := syslog.New(syslog.LOG_INFO, "vtctl "); err == nil { + syslogger.Info(msg) // nolint:errcheck + } else { + log.Warningf("cannot connect to syslog: %v", err) + } } - -var _ logicalPlan = (*primitiveWrapper)(nil) diff --git a/go/maps2/maps.go b/go/cmd/vtctl/vtctl_windows.go similarity index 55% rename from go/maps2/maps.go rename to go/cmd/vtctl/vtctl_windows.go index 56191bea1a7..63c5cceb63b 100644 --- a/go/maps2/maps.go +++ b/go/cmd/vtctl/vtctl_windows.go @@ -1,3 +1,5 @@ +//go:build windows + /* Copyright 2023 The Vitess Authors. @@ -14,24 +16,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package maps2 +package main -// Keys returns the keys of the map m. -// The keys will be in an indeterminate order. -func Keys[M ~map[K]V, K comparable, V any](m M) []K { - r := make([]K, 0, len(m)) - for k := range m { - r = append(r, k) - } - return r -} +import ( + "vitess.io/vitess/go/vt/log" +) -// Values returns the values of the map m. -// The values will be in an indeterminate order. -func Values[M ~map[K]V, K comparable, V any](m M) []V { - r := make([]V, 0, len(m)) - for _, v := range m { - r = append(r, v) - } - return r +func logSyslog(msg string) { + log.Warningf("windows does not have syslog support") } diff --git a/go/cmd/vtctld/cli/cli.go b/go/cmd/vtctld/cli/cli.go index e5124133adb..8cf208a66f0 100644 --- a/go/cmd/vtctld/cli/cli.go +++ b/go/cmd/vtctld/cli/cli.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -23,10 +23,12 @@ import ( "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctld" + "vitess.io/vitess/go/vt/vtenv" ) var ( ts *topo.Server + env *vtenv.Environment Main = &cobra.Command{ Use: "vtctld", Short: "The Vitess cluster management daemon.", @@ -59,8 +61,17 @@ func run(cmd *cobra.Command, args []string) error { ts = topo.Open() defer ts.Close() + var err error + env, err = vtenv.New(vtenv.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return err + } // Init the vtctld core - if err := vtctld.InitVtctld(ts); err != nil { + if err := vtctld.InitVtctld(env, ts); err != nil { return err } @@ -68,7 +79,7 @@ func run(cmd *cobra.Command, args []string) error { vtctld.RegisterDebugHealthHandler(ts) // Start schema manager service. - initSchema() + initSchema(cmd.Context()) // And run the server. servenv.RunDefault() diff --git a/go/cmd/vtctld/cli/plugin_cephbackupstorage.go b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go index 171198f5e29..7755e1cae2d 100644 --- a/go/cmd/vtctld/cli/plugin_cephbackupstorage.go +++ b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/plugin_consultopo.go b/go/cmd/vtctld/cli/plugin_consultopo.go index 4617d753953..b8f8f2e8cdc 100644 --- a/go/cmd/vtctld/cli/plugin_consultopo.go +++ b/go/cmd/vtctld/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go index ff283d91336..b712d9c0fd0 100644 --- a/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctld") { - grpcvtctldserver.StartServer(servenv.GRPCServer, ts) + grpcvtctldserver.StartServer(servenv.GRPCServer, env, ts) } }) } diff --git a/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go index 8b7f918bc58..236b83e3d28 100644 --- a/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctl") { - grpcvtctlserver.StartServer(servenv.GRPCServer, ts) + grpcvtctlserver.StartServer(servenv.GRPCServer, env, ts) } }) } diff --git a/go/cmd/vtctld/cli/plugin_s3backupstorage.go b/go/cmd/vtctld/cli/plugin_s3backupstorage.go index 4b3ecb33edb..e09f6060809 100644 --- a/go/cmd/vtctld/cli/plugin_s3backupstorage.go +++ b/go/cmd/vtctld/cli/plugin_s3backupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/plugin_zk2topo.go b/go/cmd/vtctld/cli/plugin_zk2topo.go index 77f86d98d52..f1e5f27ea1b 100644 --- a/go/cmd/vtctld/cli/plugin_zk2topo.go +++ b/go/cmd/vtctld/cli/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctld/cli/schema.go b/go/cmd/vtctld/cli/schema.go index 480679a09e6..a330a23abe2 100644 --- a/go/cmd/vtctld/cli/schema.go +++ b/go/cmd/vtctld/cli/schema.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -47,7 +47,7 @@ func init() { Main.Flags().DurationVar(&schemaChangeReplicasTimeout, "schema_change_replicas_timeout", schemaChangeReplicasTimeout, "How long to wait for replicas to receive a schema change.") } -func initSchema() { +func initSchema(ctx context.Context) { // Start schema manager service if needed. if schemaChangeDir != "" { interval := schemaChangeCheckInterval @@ -70,12 +70,11 @@ func initSchema() { log.Errorf("failed to get controller, error: %v", err) return } - ctx := context.Background() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(env, logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) _, err = schemamanager.Run( ctx, controller, - schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0), + schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0, env.Parser()), ) if err != nil { log.Errorf("Schema change failed, error: %v", err) diff --git a/go/cmd/vtctld/main.go b/go/cmd/vtctld/main.go index 6f9ab7384fc..46ce01e409f 100644 --- a/go/cmd/vtctld/main.go +++ b/go/cmd/vtctld/main.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtctldclient/cli/pflag.go b/go/cmd/vtctldclient/cli/pflag.go index 04d202cd644..f985e74901e 100644 --- a/go/cmd/vtctldclient/cli/pflag.go +++ b/go/cmd/vtctldclient/cli/pflag.go @@ -19,23 +19,11 @@ package cli import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/vt/topo/topoproto" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -// StringMapValue augments flagutil.StringMapValue so it can be used as a -// pflag.Value. -type StringMapValue struct { - flagutil.StringMapValue -} - -// Type is part of the pflag.Value interface. -func (v *StringMapValue) Type() string { - return "cli.StringMapValue" -} - // KeyspaceTypeFlag adds the pflag.Value interface to a topodatapb.KeyspaceType. type KeyspaceTypeFlag topodatapb.KeyspaceType diff --git a/go/cmd/vtctldclient/command/backups.go b/go/cmd/vtctldclient/command/backups.go index e6314ed7d6e..ea439ded70e 100644 --- a/go/cmd/vtctldclient/command/backups.go +++ b/go/cmd/vtctldclient/command/backups.go @@ -35,7 +35,7 @@ import ( var ( // Backup makes a Backup gRPC call to a vtctld. Backup = &cobra.Command{ - Use: "Backup [--concurrency ] [--allow-primary] [--incremental-from-pos=|auto] [--upgrade-safe] ", + Use: "Backup [--concurrency ] [--allow-primary] [--incremental-from-pos=||auto] [--upgrade-safe] ", Short: "Uses the BackupStorage service on the given tablet to create and store a new backup.", DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -43,7 +43,7 @@ var ( } // BackupShard makes a BackupShard gRPC call to a vtctld. BackupShard = &cobra.Command{ - Use: "BackupShard [--concurrency ] [--allow-primary] [--incremental-from-pos=|auto] [--upgrade-safe] ", + Use: "BackupShard [--concurrency ] [--allow-primary] [--incremental-from-pos=||auto] [--upgrade-safe] ", Short: "Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup.", Long: `Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup. @@ -80,7 +80,7 @@ If no replica-type tablet can be found, the backup can be taken on the primary i var backupOptions = struct { AllowPrimary bool - Concurrency uint64 + Concurrency int32 IncrementalFromPos string UpgradeSafe bool }{} @@ -119,7 +119,7 @@ func commandBackup(cmd *cobra.Command, args []string) error { var backupShardOptions = struct { AllowPrimary bool - Concurrency uint64 + Concurrency int32 IncrementalFromPos string UpgradeSafe bool }{} @@ -280,15 +280,15 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error { func init() { Backup.Flags().BoolVar(&backupOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.") - Backup.Flags().Uint64Var(&backupOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.") - Backup.Flags().StringVar(&backupOptions.IncrementalFromPos, "incremental-from-pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + Backup.Flags().Int32Var(&backupOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.") + Backup.Flags().StringVar(&backupOptions.IncrementalFromPos, "incremental-from-pos", "", "Position, or name of backup from which to create an incremental backup. Default: empty. If given, then this backup becomes an incremental backup from given position or given backup. If value is 'auto', this backup will be taken from the last successful backup position.") Backup.Flags().BoolVar(&backupOptions.UpgradeSafe, "upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") Root.AddCommand(Backup) BackupShard.Flags().BoolVar(&backupShardOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.") - BackupShard.Flags().Uint64Var(&backupShardOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.") - BackupShard.Flags().StringVar(&backupShardOptions.IncrementalFromPos, "incremental-from-pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + BackupShard.Flags().Int32Var(&backupShardOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.") + BackupShard.Flags().StringVar(&backupShardOptions.IncrementalFromPos, "incremental-from-pos", "", "Position, or name of backup from which to create an incremental backup. Default: empty. If given, then this backup becomes an incremental backup from given position or given backup. If value is 'auto', this backup will be taken from the last successful backup position.") BackupShard.Flags().BoolVar(&backupOptions.UpgradeSafe, "upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") Root.AddCommand(BackupShard) diff --git a/go/cmd/vtctldclient/command/keyspace_routing_rules.go b/go/cmd/vtctldclient/command/keyspace_routing_rules.go new file mode 100644 index 00000000000..7d1134d3abf --- /dev/null +++ b/go/cmd/vtctldclient/command/keyspace_routing_rules.go @@ -0,0 +1,154 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "errors" + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/json2" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // ApplyKeyspaceRoutingRules makes an ApplyKeyspaceRoutingRules gRPC call to a vtctld. + ApplyKeyspaceRoutingRules = &cobra.Command{ + Use: "ApplyKeyspaceRoutingRules {--rules RULES | --rules-file RULES_FILE} [--cells=c1,c2,...] [--skip-rebuild] [--dry-run]", + Short: "Applies the provided keyspace routing rules.", + DisableFlagsInUseLine: true, + Args: cobra.NoArgs, + PreRunE: validateApplyKeyspaceRoutingRulesOptions, + RunE: commandApplyKeyspaceRoutingRules, + } + // GetKeyspaceRoutingRules makes a GetKeyspaceRoutingRules gRPC call to a vtctld. + GetKeyspaceRoutingRules = &cobra.Command{ + Use: "GetKeyspaceRoutingRules", + Short: "Displays the currently active keyspace routing rules.", + DisableFlagsInUseLine: true, + Args: cobra.NoArgs, + RunE: commandGetKeyspaceRoutingRules, + } +) + +func validateApplyKeyspaceRoutingRulesOptions(cmd *cobra.Command, args []string) error { + opts := applyKeyspaceRoutingRulesOptions + if (opts.Rules != "" && opts.RulesFilePath != "") || (opts.Rules == "" && opts.RulesFilePath == "") { + return errors.New("must pass exactly one of --rules or --rules-file") + } + return nil +} + +var applyKeyspaceRoutingRulesOptions = struct { + Rules string + RulesFilePath string + Cells []string + SkipRebuild bool + DryRun bool +}{} + +func commandApplyKeyspaceRoutingRules(cmd *cobra.Command, args []string) error { + opts := applyKeyspaceRoutingRulesOptions + cli.FinishedParsing(cmd) + var rulesBytes []byte + if opts.RulesFilePath != "" { + data, err := os.ReadFile(opts.RulesFilePath) + if err != nil { + return err + } + rulesBytes = data + } else { + rulesBytes = []byte(opts.Rules) + } + + krr := &vschemapb.KeyspaceRoutingRules{} + if err := json2.Unmarshal(rulesBytes, &krr); err != nil { + return err + } + + if opts.DryRun { + // Round-trip so that when we display the result it's readable. + data, err := cli.MarshalJSON(krr) + if err != nil { + return err + } + + fmt.Printf("[DRY RUN] Would have saved new KeyspaceRoutingRules object:\n%s\n", data) + + if opts.SkipRebuild { + fmt.Println("[DRY RUN] Would not have rebuilt VSchema graph, would have required operator to run RebuildVSchemaGraph for changes to take effect.") + } else { + fmt.Print("[DRY RUN] Would have rebuilt the VSchema graph") + if len(opts.Cells) == 0 { + fmt.Print(" in all cells\n") + } else { + fmt.Printf(" in the following cells: %s.\n", strings.Join(applyKeyspaceRoutingRulesOptions.Cells, ", ")) + } + } + return nil + } + + resp, err := client.ApplyKeyspaceRoutingRules(commandCtx, &vtctldatapb.ApplyKeyspaceRoutingRulesRequest{ + KeyspaceRoutingRules: krr, + SkipRebuild: opts.SkipRebuild, + RebuildCells: opts.Cells, + }) + if err != nil { + return err + } + + respJSON, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + fmt.Printf("%s\n", respJSON) + return nil +} + +func commandGetKeyspaceRoutingRules(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + resp, err := client.GetKeyspaceRoutingRules(commandCtx, &vtctldatapb.GetKeyspaceRoutingRulesRequest{}) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.KeyspaceRoutingRules) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func init() { + ApplyKeyspaceRoutingRules.Flags().StringVarP(&applyKeyspaceRoutingRulesOptions.Rules, "rules", "r", "", "Keyspace routing rules, specified as a string") + ApplyKeyspaceRoutingRules.Flags().StringVarP(&applyKeyspaceRoutingRulesOptions.RulesFilePath, "rules-file", "f", "", "Path to a file containing keyspace routing rules specified as JSON") + ApplyKeyspaceRoutingRules.Flags().StringSliceVarP(&applyKeyspaceRoutingRulesOptions.Cells, "cells", "c", nil, "Limit the VSchema graph rebuilding to the specified cells. Ignored if --skip-rebuild is specified.") + ApplyKeyspaceRoutingRules.Flags().BoolVar(&applyKeyspaceRoutingRulesOptions.SkipRebuild, "skip-rebuild", false, "Skip rebuilding the SrvVSchema objects.") + ApplyKeyspaceRoutingRules.Flags().BoolVarP(&applyKeyspaceRoutingRulesOptions.DryRun, "dry-run", "d", false, "Validate the specified keyspace routing rules and note actions that would be taken, but do not actually apply the rules to the topo.") + Root.AddCommand(ApplyKeyspaceRoutingRules) + Root.AddCommand(GetKeyspaceRoutingRules) +} diff --git a/go/cmd/vtctldclient/command/keyspaces.go b/go/cmd/vtctldclient/command/keyspaces.go index 420c274ddd5..6330220d773 100644 --- a/go/cmd/vtctldclient/command/keyspaces.go +++ b/go/cmd/vtctldclient/command/keyspaces.go @@ -30,8 +30,6 @@ import ( "vitess.io/vitess/go/cmd/vtctldclient/cli" "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/topo" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" "vitess.io/vitess/go/vt/proto/vttime" @@ -135,8 +133,6 @@ var createKeyspaceOptions = struct { Force bool AllowEmptyVSchema bool - ServedFromsMap cli.StringMapValue - KeyspaceType cli.KeyspaceTypeFlag BaseKeyspace string SnapshotTimestamp string @@ -203,18 +199,6 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error { SidecarDbName: createKeyspaceOptions.SidecarDBName, } - for n, v := range createKeyspaceOptions.ServedFromsMap.StringMapValue { - tt, err := topo.ParseServingTabletType(n) - if err != nil { - return err - } - - req.ServedFroms = append(req.ServedFroms, &topodatapb.Keyspace_ServedFrom{ - TabletType: tt, - Keyspace: v, - }) - } - resp, err := client.CreateKeyspace(commandCtx, req) if err != nil { return err @@ -422,7 +406,6 @@ func commandValidateVersionKeyspace(cmd *cobra.Command, args []string) error { func init() { CreateKeyspace.Flags().BoolVarP(&createKeyspaceOptions.Force, "force", "f", false, "Proceeds even if the keyspace already exists. Does not overwrite the existing keyspace record.") CreateKeyspace.Flags().BoolVarP(&createKeyspaceOptions.AllowEmptyVSchema, "allow-empty-vschema", "e", false, "Allows a new keyspace to have no vschema.") - CreateKeyspace.Flags().Var(&createKeyspaceOptions.ServedFromsMap, "served-from", "Specifies a set of db_type:keyspace pairs used to serve traffic for the keyspace.") CreateKeyspace.Flags().Var(&createKeyspaceOptions.KeyspaceType, "type", "The type of the keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.") CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.") diff --git a/go/cmd/vtctldclient/command/legacy_shim.go b/go/cmd/vtctldclient/command/legacy_shim.go index 95c3ea2d688..d7594e1fdff 100644 --- a/go/cmd/vtctldclient/command/legacy_shim.go +++ b/go/cmd/vtctldclient/command/legacy_shim.go @@ -43,7 +43,7 @@ var ( Args: cobra.ArbitraryArgs, RunE: func(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) - return runLegacyCommand(args) + return runLegacyCommand(cmd.Context(), args) }, Long: strings.TrimSpace(` LegacyVtctlCommand uses the legacy vtctl grpc client to make an ExecuteVtctlCommand @@ -76,11 +76,11 @@ LegacyVtctlCommand -- AddCellInfo --server_address "localhost:5678" --root "/vit } ) -func runLegacyCommand(args []string) error { +func runLegacyCommand(ctx context.Context, args []string) error { // Duplicated (mostly) from go/cmd/vtctlclient/main.go. logger := logutil.NewConsoleLogger() - ctx, cancel := context.WithTimeout(context.Background(), actionTimeout) + ctx, cancel := context.WithTimeout(ctx, actionTimeout) defer cancel() err := vtctlclient.RunCommandAndWait(ctx, server, args, func(e *logutilpb.Event) { diff --git a/go/cmd/vtctldclient/command/onlineddl.go b/go/cmd/vtctldclient/command/onlineddl.go index dbe927de2bf..6193de9b2af 100644 --- a/go/cmd/vtctldclient/command/onlineddl.go +++ b/go/cmd/vtctldclient/command/onlineddl.go @@ -102,6 +102,14 @@ var ( Args: cobra.ExactArgs(2), RunE: commandOnlineDDLUnthrottle, } + OnlineDDLForceCutOver = &cobra.Command{ + Use: "force-cutover ", + Short: "Mark a given schema migration, or all pending migrations, for forced cut over.", + Example: "OnlineDDL force-cutover test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandOnlineDDLForceCutOver, + } OnlineDDLShow = &cobra.Command{ Use: "show", Short: "Display information about online DDL operations.", @@ -184,6 +192,30 @@ func commandOnlineDDLCleanup(cmd *cobra.Command, args []string) error { return nil } +func commandOnlineDDLForceCutOver(cmd *cobra.Command, args []string) error { + keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) + if err != nil { + return err + } + cli.FinishedParsing(cmd) + + resp, err := client.ForceCutOverSchemaMigration(commandCtx, &vtctldatapb.ForceCutOverSchemaMigrationRequest{ + Keyspace: keyspace, + Uuid: uuid, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil +} + func commandOnlineDDLComplete(cmd *cobra.Command, args []string) error { keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd) if err != nil { @@ -393,6 +425,7 @@ func init() { OnlineDDL.AddCommand(OnlineDDLRetry) OnlineDDL.AddCommand(OnlineDDLThrottle) OnlineDDL.AddCommand(OnlineDDLUnthrottle) + OnlineDDL.AddCommand(OnlineDDLForceCutOver) OnlineDDLShow.Flags().BoolVar(&onlineDDLShowArgs.JSON, "json", false, "Output JSON instead of human-readable table.") OnlineDDLShow.Flags().StringVar(&onlineDDLShowArgs.OrderStr, "order", "asc", "Sort the results by `id` property of the Schema migration.") diff --git a/go/cmd/vtctldclient/command/query.go b/go/cmd/vtctldclient/command/query.go index f169623936b..d00b7447bdd 100644 --- a/go/cmd/vtctldclient/command/query.go +++ b/go/cmd/vtctldclient/command/query.go @@ -46,6 +46,15 @@ var ( RunE: commandExecuteFetchAsDBA, Aliases: []string{"ExecuteFetchAsDba"}, } + // ExecuteMultiFetchAsDBA makes an ExecuteMultiFetchAsDBA gRPC call to a vtctld. + ExecuteMultiFetchAsDBA = &cobra.Command{ + Use: "ExecuteMultiFetchAsDBA [--max-rows ] [--json|-j] [--disable-binlogs] [--reload-schema] ", + Short: "Executes given multiple queries as the DBA user on the remote tablet.", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandExecuteMultiFetchAsDBA, + Aliases: []string{"ExecuteMultiFetchAsDba"}, + } ) var executeFetchAsAppOptions = struct { @@ -138,6 +147,57 @@ func commandExecuteFetchAsDBA(cmd *cobra.Command, args []string) error { return nil } +var executeMultiFetchAsDBAOptions = struct { + MaxRows int64 + DisableBinlogs bool + ReloadSchema bool + JSON bool +}{ + MaxRows: 10_000, +} + +func commandExecuteMultiFetchAsDBA(cmd *cobra.Command, args []string) error { + alias, err := topoproto.ParseTabletAlias(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + sql := cmd.Flags().Arg(1) + + resp, err := client.ExecuteMultiFetchAsDBA(commandCtx, &vtctldatapb.ExecuteMultiFetchAsDBARequest{ + TabletAlias: alias, + Sql: sql, + MaxRows: executeMultiFetchAsDBAOptions.MaxRows, + DisableBinlogs: executeMultiFetchAsDBAOptions.DisableBinlogs, + ReloadSchema: executeMultiFetchAsDBAOptions.ReloadSchema, + }) + if err != nil { + return err + } + + var qrs []*sqltypes.Result + for _, result := range resp.Results { + qr := sqltypes.Proto3ToResult(result) + qrs = append(qrs, qr) + } + + switch executeMultiFetchAsDBAOptions.JSON { + case true: + data, err := cli.MarshalJSON(qrs) + if err != nil { + return err + } + fmt.Printf("%s\n", data) + default: + for _, qr := range qrs { + cli.WriteQueryResultTable(cmd.OutOrStdout(), qr) + } + } + return nil +} + func init() { ExecuteFetchAsApp.Flags().Int64Var(&executeFetchAsAppOptions.MaxRows, "max-rows", 10_000, "The maximum number of rows to fetch from the remote tablet.") ExecuteFetchAsApp.Flags().BoolVar(&executeFetchAsAppOptions.UsePool, "use-pool", false, "Use the tablet connection pool instead of creating a fresh connection.") @@ -149,4 +209,10 @@ func init() { ExecuteFetchAsDBA.Flags().BoolVar(&executeFetchAsDBAOptions.ReloadSchema, "reload-schema", false, "Instructs the tablet to reload its schema after executing the query.") ExecuteFetchAsDBA.Flags().BoolVarP(&executeFetchAsDBAOptions.JSON, "json", "j", false, "Output the results in JSON instead of a human-readable table.") Root.AddCommand(ExecuteFetchAsDBA) + + ExecuteMultiFetchAsDBA.Flags().Int64Var(&executeMultiFetchAsDBAOptions.MaxRows, "max-rows", 10_000, "The maximum number of rows to fetch from the remote tablet.") + ExecuteMultiFetchAsDBA.Flags().BoolVar(&executeMultiFetchAsDBAOptions.DisableBinlogs, "disable-binlogs", false, "Disables binary logging during the query.") + ExecuteMultiFetchAsDBA.Flags().BoolVar(&executeMultiFetchAsDBAOptions.ReloadSchema, "reload-schema", false, "Instructs the tablet to reload its schema after executing the query.") + ExecuteMultiFetchAsDBA.Flags().BoolVarP(&executeMultiFetchAsDBAOptions.JSON, "json", "j", false, "Output the results in JSON instead of a human-readable table.") + Root.AddCommand(ExecuteMultiFetchAsDBA) } diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go index 5c83016701a..17b87eaba4f 100644 --- a/go/cmd/vtctldclient/command/reparents.go +++ b/go/cmd/vtctldclient/command/reparents.go @@ -183,9 +183,10 @@ func commandInitShardPrimary(cmd *cobra.Command, args []string) error { } var plannedReparentShardOptions = struct { - NewPrimaryAliasStr string - AvoidPrimaryAliasStr string - WaitReplicasTimeout time.Duration + NewPrimaryAliasStr string + AvoidPrimaryAliasStr string + WaitReplicasTimeout time.Duration + TolerableReplicationLag time.Duration }{} func commandPlannedReparentShard(cmd *cobra.Command, args []string) error { @@ -216,11 +217,12 @@ func commandPlannedReparentShard(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) resp, err := client.PlannedReparentShard(commandCtx, &vtctldatapb.PlannedReparentShardRequest{ - Keyspace: keyspace, - Shard: shard, - NewPrimary: newPrimaryAlias, - AvoidPrimary: avoidPrimaryAlias, - WaitReplicasTimeout: protoutil.DurationToProto(plannedReparentShardOptions.WaitReplicasTimeout), + Keyspace: keyspace, + Shard: shard, + NewPrimary: newPrimaryAlias, + AvoidPrimary: avoidPrimaryAlias, + WaitReplicasTimeout: protoutil.DurationToProto(plannedReparentShardOptions.WaitReplicasTimeout), + TolerableReplicationLag: protoutil.DurationToProto(plannedReparentShardOptions.TolerableReplicationLag), }) if err != nil { return err @@ -292,6 +294,7 @@ func init() { Root.AddCommand(InitShardPrimary) PlannedReparentShard.Flags().DurationVar(&plannedReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", topo.RemoteOperationTimeout, "Time to wait for replicas to catch up on replication both before and after reparenting.") + PlannedReparentShard.Flags().DurationVar(&plannedReparentShardOptions.TolerableReplicationLag, "tolerable-replication-lag", 0, "Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary.") PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary.") PlannedReparentShard.Flags().StringVar(&plannedReparentShardOptions.AvoidPrimaryAliasStr, "avoid-primary", "", "Alias of a tablet that should not be the primary; i.e. \"reparent to any other tablet if this one is the primary\".") Root.AddCommand(PlannedReparentShard) diff --git a/go/cmd/vtctldclient/command/root.go b/go/cmd/vtctldclient/command/root.go index 1194b49ec8f..3ebe019f94d 100644 --- a/go/cmd/vtctldclient/command/root.go +++ b/go/cmd/vtctldclient/command/root.go @@ -22,14 +22,22 @@ import ( "fmt" "io" "strconv" + "strings" + "sync" "time" "github.com/spf13/cobra" "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" + "vitess.io/vitess/go/vt/vtctl/localvtctldclient" "vitess.io/vitess/go/vt/vtctl/vtctldclient" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vttablet/tmclient" // These imports ensure init()s within them get called and they register their commands/subcommands. "vitess.io/vitess/go/cmd/vtctldclient/cli" @@ -42,8 +50,16 @@ import ( _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/reshard" _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/vdiff" _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/workflow" + + // These imports register the topo factories to use when --server=internal. + _ "vitess.io/vitess/go/vt/topo/consultopo" + _ "vitess.io/vitess/go/vt/topo/etcd2topo" + _ "vitess.io/vitess/go/vt/topo/zk2topo" ) +// The --server value if you want to use a "local" vtctld server. +const useInternalVtctld = "internal" + var ( // VtctldClientProtocol is the protocol to use when creating the vtctldclient.VtctldClient. VtctldClientProtocol = "grpc" @@ -54,14 +70,39 @@ var ( commandCtx context.Context commandCancel func() + // Register functions to be called when the command completes. + onTerm = []func(){} + + // Register our nil tmclient grpc handler only one time. + // This is primarily for tests where we execute the root + // command multiple times. + once = sync.Once{} + server string actionTimeout time.Duration compactOutput bool + env *vtenv.Environment + + topoOptions = struct { + implementation string + globalServerAddresses []string + globalRoot string + }{ // Set defaults + implementation: "etcd2", + globalServerAddresses: []string{"localhost:2379"}, + globalRoot: "/vitess/global", + } + // Root is the main entrypoint to the vtctldclient CLI. Root = &cobra.Command{ Use: "vtctldclient", Short: "Executes a cluster management command on the remote vtctld server.", + Long: fmt.Sprintf(`Executes a cluster management command on the remote vtctld server. +If there are no running vtctld servers -- for example when bootstrapping +a new Vitess cluster -- you can specify a --server value of '%s'. +When doing so, you would use the --topo* flags so that the client can +connect directly to the topo server(s).`, useInternalVtctld), // We use PersistentPreRun to set up the tracer, grpc client, and // command context for every command. PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { @@ -70,7 +111,7 @@ var ( client, err = getClientForCommand(cmd) ctx := cmd.Context() if ctx == nil { - ctx = context.Background() + ctx = cmd.Context() } commandCtx, commandCancel = context.WithTimeout(ctx, actionTimeout) if compactOutput { @@ -87,6 +128,10 @@ var ( if client != nil { err = client.Close() } + // Execute any registered onTerm functions. + for _, f := range onTerm { + f() + } trace.LogErrorsWhenClosing(traceCloser) return err }, @@ -152,12 +197,46 @@ func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error) return nil, errNoServer } - return vtctldclient.New(VtctldClientProtocol, server) + if server == useInternalVtctld { + ts, err := topo.OpenServer(topoOptions.implementation, strings.Join(topoOptions.globalServerAddresses, ","), topoOptions.globalRoot) + if err != nil { + return nil, fmt.Errorf("failed to connect to the topology server: %v", err) + } + onTerm = append(onTerm, ts.Close) + + // Use internal vtctld server implementation. + // Register a nil grpc handler -- we will not use tmclient at all but + // a factory still needs to be registered. + once.Do(func() { + tmclient.RegisterTabletManagerClientFactory("grpc", func() tmclient.TabletManagerClient { + return nil + }) + }) + vtctld := grpcvtctldserver.NewVtctldServer(env, ts) + localvtctldclient.SetServer(vtctld) + VtctldClientProtocol = "local" + server = "" + } + + return vtctldclient.New(cmd.Context(), VtctldClientProtocol, server) } func init() { Root.PersistentFlags().StringVar(&server, "server", "", "server to use for the connection (required)") Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout to use for the command") Root.PersistentFlags().BoolVar(&compactOutput, "compact", false, "use compact format for otherwise verbose outputs") + Root.PersistentFlags().StringVar(&topoOptions.implementation, "topo-implementation", topoOptions.implementation, "the topology implementation to use") + Root.PersistentFlags().StringSliceVar(&topoOptions.globalServerAddresses, "topo-global-server-address", topoOptions.globalServerAddresses, "the address of the global topology server(s)") + Root.PersistentFlags().StringVar(&topoOptions.globalRoot, "topo-global-root", topoOptions.globalRoot, "the path of the global topology data in the global topology server") vreplcommon.RegisterCommands(Root) + + var err error + env, err = vtenv.New(vtenv.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("failed to initialize vtenv: %v", err) + } } diff --git a/go/cmd/vtctldclient/command/root_test.go b/go/cmd/vtctldclient/command/root_test.go index 155fac78705..5efe844e1a1 100644 --- a/go/cmd/vtctldclient/command/root_test.go +++ b/go/cmd/vtctldclient/command/root_test.go @@ -17,13 +17,19 @@ limitations under the License. package command_test import ( + "context" + "fmt" "os" + "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/cmd/vtctldclient/command" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" @@ -52,3 +58,64 @@ func TestRoot(t *testing.T) { assert.Contains(t, err.Error(), "unknown command") }) } + +// TestRootWithInternalVtctld tests that the internal VtctldServer +// implementation -- used with --server=internal -- works for +// commands as expected. +func TestRootWithInternalVtctld(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + cell := "zone1" + ts, factory := memorytopo.NewServerAndFactory(ctx, cell) + topo.RegisterFactory("test", factory) + command.VtctldClientProtocol = "local" + baseArgs := []string{"vtctldclient", "--server", "internal", "--topo-implementation", "test"} + + args := append([]string{}, os.Args...) + protocol := command.VtctldClientProtocol + t.Cleanup(func() { + ts.Close() + os.Args = append([]string{}, args...) + command.VtctldClientProtocol = protocol + }) + + testCases := []struct { + command string + args []string + expectErr string + }{ + { + command: "AddCellInfo", + args: []string{"--root", fmt.Sprintf("/vitess/%s", cell), "--server-address", "", cell}, + expectErr: "node already exists", // Cell already exists + }, + { + command: "GetTablets", + }, + { + command: "NoCommandDrJones", + expectErr: "unknown command", // Invalid command + }, + } + + for _, tc := range testCases { + t.Run(tc.command, func(t *testing.T) { + defer func() { + // Reset the OS args. + os.Args = append([]string{}, args...) + }() + + os.Args = append(baseArgs, tc.command) + os.Args = append(os.Args, tc.args...) + + err := command.Root.Execute() + if tc.expectErr != "" { + if !strings.Contains(err.Error(), tc.expectErr) { + t.Errorf(fmt.Sprintf("%s error = %v, expectErr = %v", tc.command, err, tc.expectErr)) + } + } else { + require.NoError(t, err, "unexpected error: %v", err) + } + }) + } +} diff --git a/go/cmd/vtctldclient/command/routing_rules.go b/go/cmd/vtctldclient/command/routing_rules.go index 5f16ad7ab07..0ffee0c2c24 100644 --- a/go/cmd/vtctldclient/command/routing_rules.go +++ b/go/cmd/vtctldclient/command/routing_rules.go @@ -148,7 +148,7 @@ func commandGetRoutingRules(cmd *cobra.Command, args []string) error { func init() { ApplyRoutingRules.Flags().StringVarP(&applyRoutingRulesOptions.Rules, "rules", "r", "", "Routing rules, specified as a string.") ApplyRoutingRules.Flags().StringVarP(&applyRoutingRulesOptions.RulesFilePath, "rules-file", "f", "", "Path to a file containing routing rules specified as JSON.") - ApplyRoutingRules.Flags().StringSliceVarP(&applyRoutingRulesOptions.Cells, "cells", "c", nil, "Limit the VSchema graph rebuildingg to the specified cells. Ignored if --skip-rebuild is specified.") + ApplyRoutingRules.Flags().StringSliceVarP(&applyRoutingRulesOptions.Cells, "cells", "c", nil, "Limit the VSchema graph rebuilding to the specified cells. Ignored if --skip-rebuild is specified.") ApplyRoutingRules.Flags().BoolVar(&applyRoutingRulesOptions.SkipRebuild, "skip-rebuild", false, "Skip rebuilding the SrvVSchema objects.") ApplyRoutingRules.Flags().BoolVarP(&applyRoutingRulesOptions.DryRun, "dry-run", "d", false, "Load the specified routing rules as a validation step, but do not actually apply the rules to the topo.") Root.AddCommand(ApplyRoutingRules) diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go index 795b1315e89..db34bd2588f 100644 --- a/go/cmd/vtctldclient/command/schema.go +++ b/go/cmd/vtctldclient/command/schema.go @@ -29,7 +29,6 @@ import ( "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -123,7 +122,7 @@ func commandApplySchema(cmd *cobra.Command, args []string) error { allSQL = strings.Join(applySchemaOptions.SQL, ";") } - parts, err := sqlparser.SplitStatementToPieces(allSQL) + parts, err := env.Parser().SplitStatementToPieces(allSQL) if err != nil { return err } @@ -230,7 +229,7 @@ func commandReloadSchema(cmd *cobra.Command, args []string) error { } var reloadSchemaKeyspaceOptions = struct { - Concurrency uint32 + Concurrency int32 IncludePrimary bool }{ Concurrency: 10, @@ -255,7 +254,7 @@ func commandReloadSchemaKeyspace(cmd *cobra.Command, args []string) error { } var reloadSchemaShardOptions = struct { - Concurrency uint32 + Concurrency int32 IncludePrimary bool }{ Concurrency: 10, @@ -286,8 +285,6 @@ func commandReloadSchemaShard(cmd *cobra.Command, args []string) error { } func init() { - ApplySchema.Flags().Bool("allow-long-unavailability", false, "Deprecated and has no effect.") - ApplySchema.Flags().MarkDeprecated("--allow-long-unavailability", "") ApplySchema.Flags().StringVar(&applySchemaOptions.DDLStrategy, "ddl-strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'.") ApplySchema.Flags().StringSliceVar(&applySchemaOptions.UUIDList, "uuid", nil, "Optional, comma-delimited, repeatable, explicit UUIDs for migration. If given, must match number of DDL changes.") ApplySchema.Flags().StringVar(&applySchemaOptions.MigrationContext, "migration-context", "", "For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess.") @@ -310,11 +307,11 @@ func init() { Root.AddCommand(ReloadSchema) - ReloadSchemaKeyspace.Flags().Uint32Var(&reloadSchemaKeyspaceOptions.Concurrency, "concurrency", 10, "Number of tablets to reload in parallel. Set to zero for unbounded concurrency.") + ReloadSchemaKeyspace.Flags().Int32Var(&reloadSchemaKeyspaceOptions.Concurrency, "concurrency", 10, "Number of tablets to reload in parallel. Set to zero for unbounded concurrency.") ReloadSchemaKeyspace.Flags().BoolVar(&reloadSchemaKeyspaceOptions.IncludePrimary, "include-primary", false, "Also reload the primary tablets.") Root.AddCommand(ReloadSchemaKeyspace) - ReloadSchemaShard.Flags().Uint32Var(&reloadSchemaShardOptions.Concurrency, "concurrency", 10, "Number of tablets to reload in parallel. Set to zero for unbounded concurrency.") + ReloadSchemaShard.Flags().Int32Var(&reloadSchemaShardOptions.Concurrency, "concurrency", 10, "Number of tablets to reload in parallel. Set to zero for unbounded concurrency.") ReloadSchemaShard.Flags().BoolVar(&reloadSchemaShardOptions.IncludePrimary, "include-primary", false, "Also reload the primary tablet.") Root.AddCommand(ReloadSchemaShard) } diff --git a/go/cmd/vtctldclient/command/shards.go b/go/cmd/vtctldclient/command/shards.go index 231a44b3949..1a3288a30b8 100644 --- a/go/cmd/vtctldclient/command/shards.go +++ b/go/cmd/vtctldclient/command/shards.go @@ -93,6 +93,14 @@ that shard.`, Args: cobra.ExactArgs(1), RunE: commandGetShard, } + // GetShardReplication makes a GetShardReplication gRPC request to a vtctld. + GetShardReplication = &cobra.Command{ + Use: "GetShardReplication [cell1 [cell2...]]", + Short: "Returns information about the replication relationships for a shard in the given cell(s).", + DisableFlagsInUseLine: true, + Args: cobra.MinimumNArgs(1), + RunE: commandGetShardReplication, + } // RemoveShardCell makes a RemoveShardCell gRPC request to a vtctld. RemoveShardCell = &cobra.Command{ Use: "RemoveShardCell [--force|-f] [--recursive|-r] ", @@ -286,6 +294,36 @@ func commandGetShard(cmd *cobra.Command, args []string) error { return nil } +func commandGetShardReplication(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cells := cmd.Flags().Args()[1:] + + cli.FinishedParsing(cmd) + + resp, err := client.GetShardReplication(commandCtx, &vtctldatapb.GetShardReplicationRequest{ + Keyspace: keyspace, + Shard: shard, + Cells: cells, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil + +} + var removeShardCellOptions = struct { Force bool Recursive bool @@ -558,7 +596,7 @@ func commandSourceShardDelete(cmd *cobra.Command, args []string) error { return err } - uid, err := strconv.ParseUint(cmd.Flags().Arg(1), 10, 32) + uid, err := strconv.ParseInt(cmd.Flags().Arg(1), 10, 32) if err != nil { return fmt.Errorf("Failed to parse SourceShard uid: %w", err) // nolint } @@ -624,6 +662,7 @@ func init() { Root.AddCommand(DeleteShards) Root.AddCommand(GetShard) + Root.AddCommand(GetShardReplication) Root.AddCommand(GenerateShardRanges) RemoveShardCell.Flags().BoolVarP(&removeShardCellOptions.Force, "force", "f", false, "Proceed even if the cell's topology server cannot be reached. The assumption is that you turned down the entire cell, and just need to update the global topo data.") diff --git a/go/cmd/vtctldclient/command/vreplication/common/cancel.go b/go/cmd/vtctldclient/command/vreplication/common/cancel.go index 48abcc89584..838a95faad9 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/cancel.go +++ b/go/cmd/vtctldclient/command/vreplication/common/cancel.go @@ -30,6 +30,7 @@ import ( var CancelOptions = struct { KeepData bool KeepRoutingRules bool + Shards []string }{} func GetCancelCommand(opts *SubCommandsOpts) *cobra.Command { @@ -58,6 +59,7 @@ func commandCancel(cmd *cobra.Command, args []string) error { Workflow: BaseOptions.Workflow, KeepData: CancelOptions.KeepData, KeepRoutingRules: CancelOptions.KeepRoutingRules, + Shards: CancelOptions.Shards, } resp, err := GetClient().WorkflowDelete(GetCommandCtx(), req) if err != nil { diff --git a/go/cmd/vtctldclient/command/vreplication/common/complete.go b/go/cmd/vtctldclient/command/vreplication/common/complete.go index 6e210b188fe..19f82548af7 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/complete.go +++ b/go/cmd/vtctldclient/command/vreplication/common/complete.go @@ -16,6 +16,7 @@ var CompleteOptions = struct { KeepRoutingRules bool RenameTables bool DryRun bool + Shards []string }{} func GetCompleteCommand(opts *SubCommandsOpts) *cobra.Command { diff --git a/go/cmd/vtctldclient/command/vreplication/common/show.go b/go/cmd/vtctldclient/command/vreplication/common/show.go index 71e6675f690..8022296153b 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/show.go +++ b/go/cmd/vtctldclient/command/vreplication/common/show.go @@ -26,8 +26,9 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) -var showOptions = struct { +var ShowOptions = struct { IncludeLogs bool + Shards []string }{} func GetShowCommand(opts *SubCommandsOpts) *cobra.Command { @@ -40,7 +41,7 @@ func GetShowCommand(opts *SubCommandsOpts) *cobra.Command { Args: cobra.NoArgs, RunE: commandShow, } - cmd.Flags().BoolVar(&showOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflow.") + cmd.Flags().BoolVar(&ShowOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflow.") return cmd } @@ -50,7 +51,8 @@ func commandShow(cmd *cobra.Command, args []string) error { req := &vtctldatapb.GetWorkflowsRequest{ Keyspace: BaseOptions.TargetKeyspace, Workflow: BaseOptions.Workflow, - IncludeLogs: showOptions.IncludeLogs, + IncludeLogs: ShowOptions.IncludeLogs, + Shards: ShowOptions.Shards, } resp, err := GetClient().GetWorkflows(GetCommandCtx(), req) if err != nil { diff --git a/go/cmd/vtctldclient/command/vreplication/common/status.go b/go/cmd/vtctldclient/command/vreplication/common/status.go index ad038c42536..54a2b45ce2c 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/status.go +++ b/go/cmd/vtctldclient/command/vreplication/common/status.go @@ -26,6 +26,10 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) +var StatusOptions = struct { + Shards []string +}{} + func GetStatusCommand(opts *SubCommandsOpts) *cobra.Command { cmd := &cobra.Command{ Use: "status", @@ -49,6 +53,7 @@ func commandStatus(cmd *cobra.Command, args []string) error { req := &vtctldatapb.WorkflowStatusRequest{ Keyspace: BaseOptions.TargetKeyspace, Workflow: BaseOptions.Workflow, + Shards: StatusOptions.Shards, } resp, err := GetClient().WorkflowStatus(GetCommandCtx(), req) if err != nil { diff --git a/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go b/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go index 019367fe82b..4004afc0ac0 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go +++ b/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go @@ -91,6 +91,7 @@ func commandSwitchTraffic(cmd *cobra.Command, args []string) error { req := &vtctldatapb.WorkflowSwitchTrafficRequest{ Keyspace: BaseOptions.TargetKeyspace, Workflow: BaseOptions.Workflow, + Cells: SwitchTrafficOptions.Cells, TabletTypes: SwitchTrafficOptions.TabletTypes, MaxReplicationLagAllowed: protoutil.DurationToProto(SwitchTrafficOptions.MaxReplicationLagAllowed), Timeout: protoutil.DurationToProto(SwitchTrafficOptions.Timeout), diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils.go b/go/cmd/vtctldclient/command/vreplication/common/utils.go index da6e3329579..a742f31a9ff 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/utils.go +++ b/go/cmd/vtctldclient/command/vreplication/common/utils.go @@ -64,6 +64,9 @@ var ( DeferSecondaryKeys bool AutoStart bool StopAfterCopy bool + MySQLServerVersion string + TruncateUILen int + TruncateErrLen int }{} ) @@ -230,6 +233,7 @@ var SwitchTrafficOptions = struct { DryRun bool Direction workflow.TrafficSwitchDirection InitializeTargetSequences bool + Shards []string }{} func AddCommonSwitchTrafficFlags(cmd *cobra.Command, initializeTargetSequences bool) { @@ -243,3 +247,7 @@ func AddCommonSwitchTrafficFlags(cmd *cobra.Command, initializeTargetSequences b cmd.Flags().BoolVar(&SwitchTrafficOptions.InitializeTargetSequences, "initialize-target-sequences", false, "When moving tables from an unsharded keyspace to a sharded keyspace, initialize any sequences that are being used on the target when switching writes.") } } + +func AddShardSubsetFlag(cmd *cobra.Command, shardsOption *[]string) { + cmd.Flags().StringSliceVar(shardsOption, "shards", nil, "(Optional) Specifies a comma-separated list of shards to operate on.") +} diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils_test.go b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go index 0dc179060d6..39de482da2c 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/utils_test.go +++ b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" "vitess.io/vitess/go/vt/vtctl/vtctldclient" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" ) @@ -144,10 +145,10 @@ func SetupLocalVtctldClient(t *testing.T, ctx context.Context, cells ...string) tmclient.RegisterTabletManagerClientFactory("grpc", func() tmclient.TabletManagerClient { return nil }) - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) localvtctldclient.SetServer(vtctld) command.VtctldClientProtocol = "local" - client, err := vtctldclient.New(command.VtctldClientProtocol, "") + client, err := vtctldclient.New(ctx, command.VtctldClientProtocol, "") require.NoError(t, err, "failed to create local vtctld client which uses an internal vtctld server") common.SetClient(client) } diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/create.go b/go/cmd/vtctldclient/command/vreplication/materialize/create.go index d835b0f3426..3eccd20df2a 100644 --- a/go/cmd/vtctldclient/command/vreplication/materialize/create.go +++ b/go/cmd/vtctldclient/command/vreplication/materialize/create.go @@ -93,6 +93,7 @@ func commandCreate(cmd *cobra.Command, args []string) error { ms := &vtctldatapb.MaterializeSettings{ Workflow: common.BaseOptions.Workflow, + MaterializationIntent: vtctldatapb.MaterializationIntent_CUSTOM, TargetKeyspace: common.BaseOptions.TargetKeyspace, SourceKeyspace: createOptions.SourceKeyspace, TableSettings: createOptions.TableSettings.val, @@ -102,6 +103,15 @@ func commandCreate(cmd *cobra.Command, args []string) error { TabletSelectionPreference: tsp, } + createOptions.TableSettings.parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: common.CreateOptions.MySQLServerVersion, + TruncateUILen: common.CreateOptions.TruncateUILen, + TruncateErrLen: common.CreateOptions.TruncateErrLen, + }) + if err != nil { + return err + } + req := &vtctldatapb.MaterializeCreateRequest{ Settings: ms, } @@ -132,7 +142,8 @@ func commandCreate(cmd *cobra.Command, args []string) error { // tableSettings is a wrapper around a slice of TableMaterializeSettings // proto messages that implements the pflag.Value interface. type tableSettings struct { - val []*vtctldatapb.TableMaterializeSettings + val []*vtctldatapb.TableMaterializeSettings + parser *sqlparser.Parser } func (ts *tableSettings) String() string { @@ -157,7 +168,7 @@ func (ts *tableSettings) Set(v string) error { return fmt.Errorf("missing target_table or source_expression") } // Validate that the query is valid. - stmt, err := sqlparser.Parse(tms.SourceExpression) + stmt, err := ts.parser.Parse(tms.SourceExpression) if err != nil { return fmt.Errorf("invalid source_expression: %q", tms.SourceExpression) } @@ -167,7 +178,7 @@ func (ts *tableSettings) Set(v string) error { err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { switch node := node.(type) { case sqlparser.TableName: - if !node.Name.IsEmpty() { + if node.Name.NotEmpty() { if seenSourceTables[node.Name.String()] { return false, fmt.Errorf("multiple source_expression queries use the same table: %q", node.Name.String()) } diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go index 58be1ec4433..5845504af3f 100644 --- a/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go +++ b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go @@ -17,9 +17,12 @@ limitations under the License. package materialize import ( + "fmt" + "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/vt/topo/topoproto" ) @@ -46,6 +49,9 @@ func registerCommands(root *cobra.Command) { create.Flags().Var(&createOptions.TableSettings, "table-settings", "A JSON array defining what tables to materialize using what select statements. See the --help output for more details.") create.MarkFlagRequired("table-settings") create.Flags().BoolVar(&common.CreateOptions.StopAfterCopy, "stop-after-copy", false, "Stop the workflow after it's finished copying the existing rows and before it starts replicating changes.") + create.Flags().StringVar(&common.CreateOptions.MySQLServerVersion, "mysql_server_version", fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion), "Configure the MySQL version to use for example for the parser.") + create.Flags().IntVar(&common.CreateOptions.TruncateUILen, "sql-max-length-ui", 512, "truncate queries in debug UIs to the given length (default 512)") + create.Flags().IntVar(&common.CreateOptions.TruncateErrLen, "sql-max-length-errors", 0, "truncate queries in error logs to the given length (default unlimited)") base.AddCommand(create) // Generic workflow commands. diff --git a/go/cmd/vtctldclient/command/vreplication/mount/mount.go b/go/cmd/vtctldclient/command/vreplication/mount/mount.go index 95ce3961e71..33bc69e5626 100644 --- a/go/cmd/vtctldclient/command/vreplication/mount/mount.go +++ b/go/cmd/vtctldclient/command/vreplication/mount/mount.go @@ -143,9 +143,7 @@ func commandList(cmd *cobra.Command, args []string) error { if err != nil { return err } - if err != nil { - return err - } + data, err := json.Marshal(resp) if err != nil { return err diff --git a/go/cmd/vtctldclient/command/vreplication/movetables/create.go b/go/cmd/vtctldclient/command/vreplication/movetables/create.go index 95c50f4f97e..e7d5dbdfd82 100644 --- a/go/cmd/vtctldclient/command/vreplication/movetables/create.go +++ b/go/cmd/vtctldclient/command/vreplication/movetables/create.go @@ -39,6 +39,7 @@ var ( SourceTimeZone string NoRoutingRules bool AtomicCopy bool + WorkflowOptions vtctldatapb.WorkflowOptions }{} // create makes a MoveTablesCreate gRPC call to a vtctld. @@ -77,6 +78,15 @@ var ( if err := checkAtomicCopyOptions(); err != nil { return err } + + tenantId := createOptions.WorkflowOptions.GetTenantId() + if len(createOptions.WorkflowOptions.GetShards()) > 0 && tenantId == "" { + return fmt.Errorf("--shards specified, but not --tenant-id: you can only specify target shards for multi-tenant migrations") + } + if tenantId != "" && len(createOptions.SourceShards) > 0 { + return fmt.Errorf("cannot specify both --tenant-id (i.e. a multi-tenant migration) and --source-shards (i.e. a shard-by-shard migration)") + } + return nil }, RunE: commandCreate, @@ -109,6 +119,7 @@ func commandCreate(cmd *cobra.Command, args []string) error { StopAfterCopy: common.CreateOptions.StopAfterCopy, NoRoutingRules: createOptions.NoRoutingRules, AtomicCopy: createOptions.AtomicCopy, + WorkflowOptions: &createOptions.WorkflowOptions, } resp, err := common.GetClient().MoveTablesCreate(common.GetCommandCtx(), req) diff --git a/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go b/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go index e2c7daed223..d729230e7a7 100644 --- a/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go +++ b/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go @@ -47,24 +47,34 @@ func registerCommands(root *cobra.Command) { create.Flags().StringSliceVar(&createOptions.ExcludeTables, "exclude-tables", nil, "Source tables to exclude from copying.") create.Flags().BoolVar(&createOptions.NoRoutingRules, "no-routing-rules", false, "(Advanced) Do not create routing rules while creating the workflow. See the reference documentation for limitations if you use this flag.") create.Flags().BoolVar(&createOptions.AtomicCopy, "atomic-copy", false, "(EXPERIMENTAL) A single copy phase is run for all tables from the source. Use this, for example, if your source keyspace has tables which use foreign key constraints.") + create.Flags().StringVar(&createOptions.WorkflowOptions.TenantId, "tenant-id", "", "(EXPERIMENTAL: Multi-tenant migrations only) The tenant ID to use for the MoveTables workflow into a multi-tenant keyspace.") + create.Flags().BoolVar(&createOptions.WorkflowOptions.StripShardedAutoIncrement, "remove-sharded-auto-increment", true, "If moving the table(s) to a sharded keyspace, remove any auto_increment clauses when copying the schema to the target as sharded keyspaces should rely on either user/application generated values or Vitess sequences to ensure uniqueness.") + create.Flags().StringSliceVar(&createOptions.WorkflowOptions.Shards, "shards", nil, "(EXPERIMENTAL: Multi-tenant migrations only) Specify that vreplication streams should only be created on this subset of target shards. Warning: you should first ensure that all rows on the source route to the specified subset of target shards using your VIndex of choice or you could lose data during the migration.") base.AddCommand(create) opts := &common.SubCommandsOpts{ SubCommand: "MoveTables", Workflow: "commerce2customer", } - base.AddCommand(common.GetShowCommand(opts)) - base.AddCommand(common.GetStatusCommand(opts)) + showCommand := common.GetShowCommand(opts) + common.AddShardSubsetFlag(showCommand, &common.ShowOptions.Shards) + base.AddCommand(showCommand) + + statusCommand := common.GetStatusCommand(opts) + common.AddShardSubsetFlag(statusCommand, &common.StatusOptions.Shards) + base.AddCommand(statusCommand) base.AddCommand(common.GetStartCommand(opts)) base.AddCommand(common.GetStopCommand(opts)) switchTrafficCommand := common.GetSwitchTrafficCommand(opts) common.AddCommonSwitchTrafficFlags(switchTrafficCommand, true) + common.AddShardSubsetFlag(switchTrafficCommand, &common.SwitchTrafficOptions.Shards) base.AddCommand(switchTrafficCommand) reverseTrafficCommand := common.GetReverseTrafficCommand(opts) common.AddCommonSwitchTrafficFlags(reverseTrafficCommand, false) + common.AddShardSubsetFlag(reverseTrafficCommand, &common.SwitchTrafficOptions.Shards) base.AddCommand(reverseTrafficCommand) complete := common.GetCompleteCommand(opts) @@ -72,11 +82,13 @@ func registerCommands(root *cobra.Command) { complete.Flags().BoolVar(&common.CompleteOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules in place that direct table traffic from the source keyspace to the target keyspace of the MoveTables workflow.") complete.Flags().BoolVar(&common.CompleteOptions.RenameTables, "rename-tables", false, "Keep the original source table data that was copied by the MoveTables workflow, but rename each table to '__old'.") complete.Flags().BoolVar(&common.CompleteOptions.DryRun, "dry-run", false, "Print the actions that would be taken and report any known errors that would have occurred.") + common.AddShardSubsetFlag(complete, &common.CompleteOptions.Shards) base.AddCommand(complete) cancel := common.GetCancelCommand(opts) cancel.Flags().BoolVar(&common.CancelOptions.KeepData, "keep-data", false, "Keep the partially copied table data from the MoveTables workflow in the target keyspace.") cancel.Flags().BoolVar(&common.CancelOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules created for the MoveTables workflow.") + common.AddShardSubsetFlag(cancel, &common.CancelOptions.Shards) base.AddCommand(cancel) } diff --git a/go/cmd/vtctldclient/command/vreplication/reshard/create.go b/go/cmd/vtctldclient/command/vreplication/reshard/create.go index b8506ae61d0..05700dbb9fe 100644 --- a/go/cmd/vtctldclient/command/vreplication/reshard/create.go +++ b/go/cmd/vtctldclient/command/vreplication/reshard/create.go @@ -60,9 +60,8 @@ func commandReshardCreate(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) req := &vtctldatapb.ReshardCreateRequest{ - Workflow: common.BaseOptions.Workflow, - Keyspace: common.BaseOptions.TargetKeyspace, - + Workflow: common.BaseOptions.Workflow, + Keyspace: common.BaseOptions.TargetKeyspace, TabletTypes: common.CreateOptions.TabletTypes, TabletSelectionPreference: tsp, Cells: common.CreateOptions.Cells, @@ -70,10 +69,9 @@ func commandReshardCreate(cmd *cobra.Command, args []string) error { DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys, AutoStart: common.CreateOptions.AutoStart, StopAfterCopy: common.CreateOptions.StopAfterCopy, - - SourceShards: reshardCreateOptions.sourceShards, - TargetShards: reshardCreateOptions.targetShards, - SkipSchemaCopy: reshardCreateOptions.skipSchemaCopy, + SourceShards: reshardCreateOptions.sourceShards, + TargetShards: reshardCreateOptions.targetShards, + SkipSchemaCopy: reshardCreateOptions.skipSchemaCopy, } resp, err := common.GetClient().ReshardCreate(common.GetCommandCtx(), req) if err != nil { diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go index a98cf3ad743..8af72cedfdb 100644 --- a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go +++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go @@ -57,15 +57,17 @@ var ( TargetCells []string TabletTypes []topodatapb.TabletType Tables []string - Limit uint32 // We only accept positive values but pass on an int64 + Limit int64 FilteredReplicationWaitTime time.Duration DebugQuery bool + MaxReportSampleRows int64 OnlyPKs bool UpdateTableStats bool - MaxExtraRowsToCompare uint32 // We only accept positive values but pass on an int64 + MaxExtraRowsToCompare int64 Wait bool WaitUpdateInterval time.Duration AutoRetry bool + MaxDiffDuration time.Duration }{} deleteOptions = struct { @@ -112,6 +114,16 @@ var ( createOptions.Tables[i] = strings.TrimSpace(table) } } + // Enforce non-negative values for limits and max options. + if createOptions.Limit < 1 { + return fmt.Errorf("--limit must be a positive value") + } + if createOptions.MaxReportSampleRows < 0 { + return fmt.Errorf("--max-report-sample-rows must not be a negative value") + } + if createOptions.MaxExtraRowsToCompare < 0 { + return fmt.Errorf("--max-extra-rows-to-compare must not be a negative value") + } return nil } @@ -142,7 +154,7 @@ vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --targe delete = &cobra.Command{ Use: "delete", Short: "Delete VDiffs.", - Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace delete a037a9e2-5628-11ee-8c99-0242ac120002 + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer delete a037a9e2-5628-11ee-8c99-0242ac120002 vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace delete all`, DisableFlagsInUseLine: true, Aliases: []string{"Delete"}, @@ -167,7 +179,7 @@ vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --targe resume = &cobra.Command{ Use: "resume", Short: "Resume a VDiff.", - Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace resume a037a9e2-5628-11ee-8c99-0242ac120002`, + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer resume a037a9e2-5628-11ee-8c99-0242ac120002`, DisableFlagsInUseLine: true, Aliases: []string{"Resume"}, Args: cobra.ExactArgs(1), @@ -186,9 +198,9 @@ vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --targe show = &cobra.Command{ Use: "show", Short: "Show the status of a VDiff.", - Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show last -vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show a037a9e2-5628-11ee-8c99-0242ac120002 -vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show all`, + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer show last +vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer show a037a9e2-5628-11ee-8c99-0242ac120002 +vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer show all`, DisableFlagsInUseLine: true, Aliases: []string{"Show"}, Args: cobra.ExactArgs(1), @@ -212,7 +224,7 @@ vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --targe stop = &cobra.Command{ Use: "stop", Short: "Stop a running VDiff.", - Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace stop a037a9e2-5628-11ee-8c99-0242ac120002`, + Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer stop a037a9e2-5628-11ee-8c99-0242ac120002`, DisableFlagsInUseLine: true, Aliases: []string{"Stop"}, Args: cobra.ExactArgs(1), @@ -270,15 +282,17 @@ func commandCreate(cmd *cobra.Command, args []string) error { TabletTypes: createOptions.TabletTypes, TabletSelectionPreference: tsp, Tables: createOptions.Tables, - Limit: int64(createOptions.Limit), + Limit: createOptions.Limit, FilteredReplicationWaitTime: protoutil.DurationToProto(createOptions.FilteredReplicationWaitTime), DebugQuery: createOptions.DebugQuery, OnlyPKs: createOptions.OnlyPKs, UpdateTableStats: createOptions.UpdateTableStats, - MaxExtraRowsToCompare: int64(createOptions.MaxExtraRowsToCompare), + MaxExtraRowsToCompare: createOptions.MaxExtraRowsToCompare, Wait: createOptions.Wait, WaitUpdateInterval: protoutil.DurationToProto(createOptions.WaitUpdateInterval), AutoRetry: createOptions.AutoRetry, + MaxReportSampleRows: createOptions.MaxReportSampleRows, + MaxDiffDuration: protoutil.DurationToProto(createOptions.MaxDiffDuration), }) if err != nil { @@ -861,15 +875,17 @@ func registerCommands(root *cobra.Command) { create.Flags().Var((*topoprotopb.TabletTypeListFlag)(&createOptions.TabletTypes), "tablet-types", "Tablet types to use on the source and target.") create.Flags().BoolVar(&common.CreateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") create.Flags().DurationVar(&createOptions.FilteredReplicationWaitTime, "filtered-replication-wait-time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for replication to catch up when syncing tablet streams.") - create.Flags().Uint32Var(&createOptions.Limit, "limit", math.MaxUint32, "Max rows to stop comparing after.") + create.Flags().Int64Var(&createOptions.Limit, "limit", math.MaxInt64, "Max rows to stop comparing after.") create.Flags().BoolVar(&createOptions.DebugQuery, "debug-query", false, "Adds a mysql query to the report that can be used for further debugging.") + create.Flags().Int64Var(&createOptions.MaxReportSampleRows, "max-report-sample-rows", 10, "Maximum number of row differences to report (0 for all differences). NOTE: when increasing this value it is highly recommended to also specify --only-pks") create.Flags().BoolVar(&createOptions.OnlyPKs, "only-pks", false, "When reporting missing rows, only show primary keys in the report.") create.Flags().StringSliceVar(&createOptions.Tables, "tables", nil, "Only run vdiff for these tables in the workflow.") - create.Flags().Uint32Var(&createOptions.MaxExtraRowsToCompare, "max-extra-rows-to-compare", 1000, "If there are collation differences between the source and target, you can have rows that are identical but simply returned in a different order from MySQL. We will do a second pass to compare the rows for any actual differences in this case and this flag allows you to control the resources used for this operation.") + create.Flags().Int64Var(&createOptions.MaxExtraRowsToCompare, "max-extra-rows-to-compare", 1000, "If there are collation differences between the source and target, you can have rows that are identical but simply returned in a different order from MySQL. We will do a second pass to compare the rows for any actual differences in this case and this flag allows you to control the resources used for this operation.") create.Flags().BoolVar(&createOptions.Wait, "wait", false, "When creating or resuming a vdiff, wait for it to finish before exiting.") create.Flags().DurationVar(&createOptions.WaitUpdateInterval, "wait-update-interval", time.Duration(1*time.Minute), "When waiting on a vdiff to finish, check and display the current status this often.") create.Flags().BoolVar(&createOptions.AutoRetry, "auto-retry", true, "Should this vdiff automatically retry and continue in case of recoverable errors.") create.Flags().BoolVar(&createOptions.UpdateTableStats, "update-table-stats", false, "Update the table statistics, using ANALYZE TABLE, on each table involved in the VDiff during initialization. This will ensure that progress estimates are as accurate as possible -- but it does involve locks and can potentially impact query processing on the target keyspace.") + create.Flags().DurationVar(&createOptions.MaxDiffDuration, "max-diff-duration", 0, "How long should an individual table diff run before being stopped and restarted in order to lessen the impact on tablets due to holding open database snapshots for long periods of time (0 is the default and means no time limit).") base.AddCommand(create) base.AddCommand(delete) diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go index 1a2a374cf81..9c98338bf67 100644 --- a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go +++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go @@ -21,7 +21,7 @@ import ( "context" "fmt" "io" - "math/rand" + "math/rand/v2" "sync" "testing" @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconn" @@ -83,12 +84,12 @@ func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShar tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), } - env.ws = workflow.NewServer(env.topoServ, env.tmc) + env.ws = workflow.NewServer(vtenv.NewTestEnv(), env.topoServ, env.tmc) env.tmc.testEnv = env // Generate a unique dialer name. - dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.Intn(1000000000)) - tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.IntN(1000000000)) + tabletconn.RegisterDialer(dialerName, func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { env.mu.Lock() defer env.mu.Unlock() if qs, ok := env.tablets[int(tablet.Alias.Uid)]; ok { diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go index fd535bb2aad..8742d22abd0 100644 --- a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go +++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go @@ -269,6 +269,277 @@ func TestVDiffUnsharded(t *testing.T) { } } ]`), + }, { + id: "9", // --max-vdiff-report-rows=20 --only-pks + result: sqltypes.MakeTestResult(fields, + "completed||t1|"+UUID+"|completed|30|"+starttime+"|30|"+comptime+"|1|"+ + `{"TableName": "t1", "MatchingRows": 10, "ProcessedRows": 30, "MismatchedRows": 20, "ExtraRowsSource": 0, `+ + `"ExtraRowsTarget": 0, "MismatchedRowsSample": [`+ + `{"Source": {"Row": {"c1": "2"}}, "Target": {"Row": {"c1": "2"}}},`+ + `{"Source": {"Row": {"c1": "3"}}, "Target": {"Row": {"c1": "3"}}},`+ + `{"Source": {"Row": {"c1": "4"}}, "Target": {"Row": {"c1": "4"}}},`+ + `{"Source": {"Row": {"c1": "5"}}, "Target": {"Row": {"c1": "5"}}},`+ + `{"Source": {"Row": {"c1": "6"}}, "Target": {"Row": {"c1": "6"}}},`+ + `{"Source": {"Row": {"c1": "7"}}, "Target": {"Row": {"c1": "7"}}},`+ + `{"Source": {"Row": {"c1": "8"}}, "Target": {"Row": {"c1": "8"}}},`+ + `{"Source": {"Row": {"c1": "9"}}, "Target": {"Row": {"c1": "9"}}},`+ + `{"Source": {"Row": {"c1": "10"}}, "Target": {"Row": {"c1": "10"}}},`+ + `{"Source": {"Row": {"c1": "11"}}, "Target": {"Row": {"c1": "11"}}},`+ + `{"Source": {"Row": {"c1": "12"}}, "Target": {"Row": {"c1": "12"}}},`+ + `{"Source": {"Row": {"c1": "13"}}, "Target": {"Row": {"c1": "13"}}},`+ + `{"Source": {"Row": {"c1": "14"}}, "Target": {"Row": {"c1": "14"}}},`+ + `{"Source": {"Row": {"c1": "15"}}, "Target": {"Row": {"c1": "15"}}},`+ + `{"Source": {"Row": {"c1": "16"}}, "Target": {"Row": {"c1": "16"}}},`+ + `{"Source": {"Row": {"c1": "17"}}, "Target": {"Row": {"c1": "17"}}},`+ + `{"Source": {"Row": {"c1": "18"}}, "Target": {"Row": {"c1": "18"}}},`+ + `{"Source": {"Row": {"c1": "19"}}, "Target": {"Row": {"c1": "19"}}},`+ + `{"Source": {"Row": {"c1": "20"}}, "Target": {"Row": {"c1": "20"}}},`+ + `{"Source": {"Row": {"c1": "21"}}, "Target": {"Row": {"c1": "21"}}}`+ + `]}`), + report: fmt.Sprintf(badReportfmt, + env.targetKeyspace, UUID, 30, true, starttime, comptime, 30, 10, 20, 0, 0, 30, 10, 20, 0, 0, + `"MismatchedRowsSample": [ + { + "Source": { + "Row": { + "c1": "2" + } + }, + "Target": { + "Row": { + "c1": "2" + } + } + }, + { + "Source": { + "Row": { + "c1": "3" + } + }, + "Target": { + "Row": { + "c1": "3" + } + } + }, + { + "Source": { + "Row": { + "c1": "4" + } + }, + "Target": { + "Row": { + "c1": "4" + } + } + }, + { + "Source": { + "Row": { + "c1": "5" + } + }, + "Target": { + "Row": { + "c1": "5" + } + } + }, + { + "Source": { + "Row": { + "c1": "6" + } + }, + "Target": { + "Row": { + "c1": "6" + } + } + }, + { + "Source": { + "Row": { + "c1": "7" + } + }, + "Target": { + "Row": { + "c1": "7" + } + } + }, + { + "Source": { + "Row": { + "c1": "8" + } + }, + "Target": { + "Row": { + "c1": "8" + } + } + }, + { + "Source": { + "Row": { + "c1": "9" + } + }, + "Target": { + "Row": { + "c1": "9" + } + } + }, + { + "Source": { + "Row": { + "c1": "10" + } + }, + "Target": { + "Row": { + "c1": "10" + } + } + }, + { + "Source": { + "Row": { + "c1": "11" + } + }, + "Target": { + "Row": { + "c1": "11" + } + } + }, + { + "Source": { + "Row": { + "c1": "12" + } + }, + "Target": { + "Row": { + "c1": "12" + } + } + }, + { + "Source": { + "Row": { + "c1": "13" + } + }, + "Target": { + "Row": { + "c1": "13" + } + } + }, + { + "Source": { + "Row": { + "c1": "14" + } + }, + "Target": { + "Row": { + "c1": "14" + } + } + }, + { + "Source": { + "Row": { + "c1": "15" + } + }, + "Target": { + "Row": { + "c1": "15" + } + } + }, + { + "Source": { + "Row": { + "c1": "16" + } + }, + "Target": { + "Row": { + "c1": "16" + } + } + }, + { + "Source": { + "Row": { + "c1": "17" + } + }, + "Target": { + "Row": { + "c1": "17" + } + } + }, + { + "Source": { + "Row": { + "c1": "18" + } + }, + "Target": { + "Row": { + "c1": "18" + } + } + }, + { + "Source": { + "Row": { + "c1": "19" + } + }, + "Target": { + "Row": { + "c1": "19" + } + } + }, + { + "Source": { + "Row": { + "c1": "20" + } + }, + "Target": { + "Row": { + "c1": "20" + } + } + }, + { + "Source": { + "Row": { + "c1": "21" + } + }, + "Target": { + "Row": { + "c1": "21" + } + } + } + ]`), }, } diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/delete.go b/go/cmd/vtctldclient/command/vreplication/workflow/delete.go index 4eae8076fec..3739979ff5f 100644 --- a/go/cmd/vtctldclient/command/vreplication/workflow/delete.go +++ b/go/cmd/vtctldclient/command/vreplication/workflow/delete.go @@ -54,6 +54,7 @@ func commandDelete(cmd *cobra.Command, args []string) error { Workflow: baseOptions.Workflow, KeepData: deleteOptions.KeepData, KeepRoutingRules: deleteOptions.KeepRoutingRules, + Shards: baseOptions.Shards, } resp, err := common.GetClient().WorkflowDelete(common.GetCommandCtx(), req) if err != nil { diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/show.go b/go/cmd/vtctldclient/command/vreplication/workflow/show.go index ebc18ea250d..cbb1d01ba10 100644 --- a/go/cmd/vtctldclient/command/vreplication/workflow/show.go +++ b/go/cmd/vtctldclient/command/vreplication/workflow/show.go @@ -59,6 +59,7 @@ func commandShow(cmd *cobra.Command, args []string) error { Keyspace: baseOptions.Keyspace, Workflow: baseOptions.Workflow, IncludeLogs: workflowShowOptions.IncludeLogs, + Shards: baseOptions.Shards, } resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), req) if err != nil { diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/update.go b/go/cmd/vtctldclient/command/vreplication/workflow/update.go index 466d81e8be4..3d06ad3e64e 100644 --- a/go/cmd/vtctldclient/command/vreplication/workflow/update.go +++ b/go/cmd/vtctldclient/command/vreplication/workflow/update.go @@ -111,6 +111,7 @@ func commandUpdate(cmd *cobra.Command, args []string) error { TabletTypes: updateOptions.TabletTypes, TabletSelectionPreference: tsp, OnDdl: binlogdatapb.OnDDLAction(onddl), + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), // We don't allow changing this in the client command }, } diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go b/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go index e552b61d476..a4fbb37d4bd 100644 --- a/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go +++ b/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go @@ -39,6 +39,7 @@ var ( baseOptions = struct { Keyspace string Workflow string + Shards []string }{} workflowShowOptions = struct { @@ -59,21 +60,26 @@ func registerCommands(root *cobra.Command) { delete.MarkFlagRequired("workflow") delete.Flags().BoolVar(&deleteOptions.KeepData, "keep-data", false, "Keep the partially copied table data from the workflow in the target keyspace.") delete.Flags().BoolVar(&deleteOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules created for the workflow.") + common.AddShardSubsetFlag(delete, &baseOptions.Shards) base.AddCommand(delete) + common.AddShardSubsetFlag(workflowList, &baseOptions.Shards) base.AddCommand(workflowList) show.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want the details for.") show.MarkFlagRequired("workflow") show.Flags().BoolVar(&workflowShowOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflow.") + common.AddShardSubsetFlag(show, &baseOptions.Shards) base.AddCommand(show) start.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to start.") start.MarkFlagRequired("workflow") + common.AddShardSubsetFlag(start, &baseOptions.Shards) base.AddCommand(start) stop.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to stop.") stop.MarkFlagRequired("workflow") + common.AddShardSubsetFlag(stop, &baseOptions.Shards) base.AddCommand(stop) update.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to update.") @@ -82,6 +88,7 @@ func registerCommands(root *cobra.Command) { update.Flags().VarP((*topoproto.TabletTypeListFlag)(&updateOptions.TabletTypes), "tablet-types", "t", "New source tablet types to replicate from (e.g. PRIMARY,REPLICA,RDONLY).") update.Flags().BoolVar(&updateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.") update.Flags().StringVar(&updateOptions.OnDDL, "on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.") + common.AddShardSubsetFlag(update, &baseOptions.Shards) base.AddCommand(update) } diff --git a/go/cmd/vtctldclient/command/vschemas.go b/go/cmd/vtctldclient/command/vschemas.go index c7faf6765f4..37ad00ccb6b 100644 --- a/go/cmd/vtctldclient/command/vschemas.go +++ b/go/cmd/vtctldclient/command/vschemas.go @@ -40,7 +40,7 @@ var ( } // ApplyVSchema makes an ApplyVSchema gRPC call to a vtctld. ApplyVSchema = &cobra.Command{ - Use: "ApplyVSchema {--vschema= || --vschema-file= || --sql= || --sql-file=} [--cells=c1,c2,...] [--skip-rebuild] [--dry-run] ", + Use: "ApplyVSchema {--vschema= || --vschema-file= || --sql= || --sql-file=} [--cells=c1,c2,...] [--skip-rebuild] [--dry-run] [--strict] ", Short: "Applies the VTGate routing schema to the provided keyspace. Shows the result after application.", DisableFlagsInUseLine: true, Args: cobra.ExactArgs(1), @@ -56,6 +56,7 @@ var applyVSchemaOptions = struct { DryRun bool SkipRebuild bool Cells []string + Strict bool }{} func commandApplyVSchema(cmd *cobra.Command, args []string) error { @@ -75,6 +76,7 @@ func commandApplyVSchema(cmd *cobra.Command, args []string) error { SkipRebuild: applyVSchemaOptions.SkipRebuild, Cells: applyVSchemaOptions.Cells, DryRun: applyVSchemaOptions.DryRun, + Strict: applyVSchemaOptions.Strict, } var err error @@ -113,11 +115,16 @@ func commandApplyVSchema(cmd *cobra.Command, args []string) error { if err != nil { return err } - data, err := cli.MarshalJSON(res.VSchema) + vsData, err := cli.MarshalJSON(res.VSchema) if err != nil { return err } - fmt.Printf("New VSchema object:\n%s\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n", data) + fmt.Printf("New VSchema object:\n%s\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n", vsData) + for vdxName, ups := range res.UnknownVindexParams { + for _, param := range ups.Params { + fmt.Printf("Unknown parameter in vindex %s: %s\n", vdxName, param) + } + } return nil } @@ -151,6 +158,7 @@ func init() { ApplyVSchema.Flags().BoolVar(&applyVSchemaOptions.DryRun, "dry-run", false, "If set, do not save the altered vschema, simply echo to console.") ApplyVSchema.Flags().BoolVar(&applyVSchemaOptions.SkipRebuild, "skip-rebuild", false, "Skip rebuilding the SrvSchema objects.") ApplyVSchema.Flags().StringSliceVar(&applyVSchemaOptions.Cells, "cells", nil, "Limits the rebuild to the specified cells, after application. Ignored if --skip-rebuild is set.") + ApplyVSchema.Flags().BoolVar(&applyVSchemaOptions.Strict, "strict", false, "If set, treat unknown vindex params as errors.") Root.AddCommand(ApplyVSchema) Root.AddCommand(GetVSchema) diff --git a/go/cmd/vtexplain/cli/vtexplain.go b/go/cmd/vtexplain/cli/vtexplain.go index 8b0622cf8a3..824b0c31f84 100644 --- a/go/cmd/vtexplain/cli/vtexplain.go +++ b/go/cmd/vtexplain/cli/vtexplain.go @@ -22,8 +22,11 @@ import ( "os" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtexplain" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -78,12 +81,11 @@ If no keyspace name is present, VTExplain will return the following error: ` + "```\n", Example: "Explain how Vitess will execute the query `SELECT * FROM users` using the VSchema contained in `vschemas.json` and the database schema `schema.sql`:\n\n" + "```\nvtexplain --vschema-file vschema.json --schema-file schema.sql --sql \"SELECT * FROM users\"\n```\n\n" + - "Explain how the example will execute on 128 shards using Row-based replication:\n\n" + - "```\nvtexplain -- -shards 128 --vschema-file vschema.json --schema-file schema.sql --replication-mode \"ROW\" --output-mode text --sql \"INSERT INTO users (user_id, name) VALUES(1, 'john')\"\n```\n", Args: cobra.NoArgs, PreRunE: servenv.CobraPreRunE, + Version: servenv.AppVersion.String(), RunE: run, } ) @@ -137,10 +139,10 @@ func run(cmd *cobra.Command, args []string) error { defer logutil.Flush() servenv.Init() - return parseAndRun() + return parseAndRun(cmd.Context()) } -func parseAndRun() error { +func parseAndRun(ctx context.Context) error { plannerVersion, _ := plancontext.PlannerNameToVersion(plannerVersionStr) if plannerVersionStr != "" && plannerVersion != querypb.ExecuteOptions_Gen4 { return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid value is Gen4 or an empty value to use the default planner", plannerVersionStr) @@ -175,7 +177,17 @@ func parseAndRun() error { Target: dbName, } - vte, err := vtexplain.Init(context.Background(), vschema, schema, ksShardMap, opts) + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return err + } + ts := memorytopo.NewServer(ctx, vtexplain.Cell) + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + vte, err := vtexplain.Init(ctx, env, ts, vschema, schema, ksShardMap, opts, srvTopoCounts) if err != nil { return err } diff --git a/go/cmd/vtgate/cli/cli.go b/go/cmd/vtgate/cli/cli.go index 9182bfcf9a4..e0040e2d880 100644 --- a/go/cmd/vtgate/cli/cli.go +++ b/go/cmd/vtgate/cli/cli.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -25,11 +25,13 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/exit" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -72,8 +74,14 @@ var ( PreRunE: servenv.CobraPreRunE, RunE: run, } + + srvTopoCounts *stats.CountersWithSingleLabel ) +func init() { + srvTopoCounts = stats.NewCountersWithSingleLabel("ResilientSrvTopoServer", "Resilient srvtopo server operations", "type") +} + // CheckCellFlags will check validation of cell and cells_to_watch flag // it will help to avoid strange behaviors when vtgate runs but actually does not work func CheckCellFlags(ctx context.Context, serv srvtopo.Server, cell string, cellsToWatch string) error { @@ -135,10 +143,16 @@ func run(cmd *cobra.Command, args []string) error { servenv.Init() + // Ensure we open the topo before we start the context, so that the + // defer that closes the topo runs after cancelling the context. + // This ensures that we've properly closed things like the watchers + // at that point. ts := topo.Open() defer ts.Close() - resilientServer = srvtopo.NewResilientServer(context.Background(), ts, "ResilientSrvTopoServer") + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + resilientServer = srvtopo.NewResilientServer(ctx, ts, srvTopoCounts) tabletTypes := make([]topodatapb.TabletType, 0, 1) for _, tt := range tabletTypesToWait { @@ -151,15 +165,24 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("tablet_types_to_wait must contain at least one serving tablet type") } - err := CheckCellFlags(context.Background(), resilientServer, cell, vtgate.CellsToWatch) + err := CheckCellFlags(ctx, resilientServer, cell, vtgate.CellsToWatch) if err != nil { return fmt.Errorf("cells_to_watch validation failed: %v", err) } plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName) + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("unable to initialize env: %v", err) + } + // pass nil for HealthCheck and it will be created - vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion) + vtg := vtgate.Init(ctx, env, nil, resilientServer, cell, tabletTypes, plannerVersion) servenv.OnRun(func() { // Flags are parsed now. Parse the template using the actual flag value and overwrite the current template. @@ -167,7 +190,7 @@ func run(cmd *cobra.Command, args []string) error { addStatusParts(vtg) }) servenv.OnClose(func() { - _ = vtg.Gateway().Close(context.Background()) + _ = vtg.Gateway().Close(ctx) }) servenv.RunDefault() diff --git a/go/cmd/vtgate/cli/plugin_auth_clientcert.go b/go/cmd/vtgate/cli/plugin_auth_clientcert.go index 1a1334e71ba..d486669847f 100644 --- a/go/cmd/vtgate/cli/plugin_auth_clientcert.go +++ b/go/cmd/vtgate/cli/plugin_auth_clientcert.go @@ -23,6 +23,10 @@ import ( "vitess.io/vitess/go/vt/vtgate" ) +var clientcertAuthMethod string + func init() { - vtgate.RegisterPluginInitializer(func() { mysql.InitAuthServerClientCert() }) + Main.Flags().StringVar(&clientcertAuthMethod, "mysql_clientcert_auth_method", string(mysql.MysqlClearPassword), "client-side authentication method to use. Supported values: mysql_clear_password, dialog.") + + vtgate.RegisterPluginInitializer(func() { mysql.InitAuthServerClientCert(clientcertAuthMethod) }) } diff --git a/go/cmd/vtgate/cli/plugin_auth_ldap.go b/go/cmd/vtgate/cli/plugin_auth_ldap.go index 7dc5b246f72..f8312267504 100644 --- a/go/cmd/vtgate/cli/plugin_auth_ldap.go +++ b/go/cmd/vtgate/cli/plugin_auth_ldap.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -19,10 +19,21 @@ package cli // This plugin imports ldapauthserver to register the LDAP implementation of AuthServer. import ( + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/ldapauthserver" "vitess.io/vitess/go/vt/vtgate" ) +var ( + ldapAuthConfigFile string + ldapAuthConfigString string + ldapAuthMethod string +) + func init() { - vtgate.RegisterPluginInitializer(func() { ldapauthserver.Init() }) + Main.Flags().StringVar(&ldapAuthConfigFile, "mysql_ldap_auth_config_file", "", "JSON File from which to read LDAP server config.") + Main.Flags().StringVar(&ldapAuthConfigString, "mysql_ldap_auth_config_string", "", "JSON representation of LDAP server config.") + Main.Flags().StringVar(&ldapAuthMethod, "mysql_ldap_auth_method", string(mysql.MysqlClearPassword), "client-side authentication method to use. Supported values: mysql_clear_password, dialog.") + + vtgate.RegisterPluginInitializer(func() { ldapauthserver.Init(ldapAuthConfigFile, ldapAuthConfigString, ldapAuthMethod) }) } diff --git a/go/cmd/vtgate/cli/plugin_auth_static.go b/go/cmd/vtgate/cli/plugin_auth_static.go index 9ffd60a79f2..7ed0e7b8f61 100644 --- a/go/cmd/vtgate/cli/plugin_auth_static.go +++ b/go/cmd/vtgate/cli/plugin_auth_static.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -19,10 +19,24 @@ package cli // This plugin imports staticauthserver to register the flat-file implementation of AuthServer. import ( + "time" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/vtgate" ) +var ( + mysqlAuthServerStaticFile string + mysqlAuthServerStaticString string + mysqlAuthServerStaticReloadInterval time.Duration +) + func init() { - vtgate.RegisterPluginInitializer(func() { mysql.InitAuthServerStatic() }) + Main.Flags().StringVar(&mysqlAuthServerStaticFile, "mysql_auth_server_static_file", "", "JSON File to read the users/passwords from.") + Main.Flags().StringVar(&mysqlAuthServerStaticString, "mysql_auth_server_static_string", "", "JSON representation of the users/passwords config.") + Main.Flags().DurationVar(&mysqlAuthServerStaticReloadInterval, "mysql_auth_static_reload_interval", 0, "Ticker to reload credentials") + + vtgate.RegisterPluginInitializer(func() { + mysql.InitAuthServerStatic(mysqlAuthServerStaticFile, mysqlAuthServerStaticString, mysqlAuthServerStaticReloadInterval) + }) } diff --git a/go/cmd/vtgate/cli/plugin_auth_vault.go b/go/cmd/vtgate/cli/plugin_auth_vault.go index 2aee32e3940..a119d2d389b 100644 --- a/go/cmd/vtgate/cli/plugin_auth_vault.go +++ b/go/cmd/vtgate/cli/plugin_auth_vault.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -19,10 +19,36 @@ package cli // This plugin imports InitAuthServerVault to register the HashiCorp Vault implementation of AuthServer. import ( + "time" + "vitess.io/vitess/go/mysql/vault" "vitess.io/vitess/go/vt/vtgate" ) +var ( + vaultAddr string + vaultTimeout time.Duration + vaultCACert string + vaultPath string + vaultCacheTTL time.Duration + vaultTokenFile string + vaultRoleID string + vaultRoleSecretIDFile string + vaultRoleMountPoint string +) + func init() { - vtgate.RegisterPluginInitializer(func() { vault.InitAuthServerVault() }) + Main.Flags().StringVar(&vaultAddr, "mysql_auth_vault_addr", "", "URL to Vault server") + Main.Flags().DurationVar(&vaultTimeout, "mysql_auth_vault_timeout", 10*time.Second, "Timeout for vault API operations") + Main.Flags().StringVar(&vaultCACert, "mysql_auth_vault_tls_ca", "", "Path to CA PEM for validating Vault server certificate") + Main.Flags().StringVar(&vaultPath, "mysql_auth_vault_path", "", "Vault path to vtgate credentials JSON blob, e.g.: secret/data/prod/vtgatecreds") + Main.Flags().DurationVar(&vaultCacheTTL, "mysql_auth_vault_ttl", 30*time.Minute, "How long to cache vtgate credentials from the Vault server") + Main.Flags().StringVar(&vaultTokenFile, "mysql_auth_vault_tokenfile", "", "Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable") + Main.Flags().StringVar(&vaultRoleID, "mysql_auth_vault_roleid", "", "Vault AppRole id; can also be passed using VAULT_ROLEID environment variable") + Main.Flags().StringVar(&vaultRoleSecretIDFile, "mysql_auth_vault_role_secretidfile", "", "Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable") + Main.Flags().StringVar(&vaultRoleMountPoint, "mysql_auth_vault_role_mountpoint", "approle", "Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable") + + vtgate.RegisterPluginInitializer(func() { + vault.InitAuthServerVault(vaultAddr, vaultTimeout, vaultCACert, vaultPath, vaultCacheTTL, vaultTokenFile, vaultRoleID, vaultRoleSecretIDFile, vaultRoleMountPoint) + }) } diff --git a/go/cmd/vtgate/cli/plugin_consultopo.go b/go/cmd/vtgate/cli/plugin_consultopo.go index a128f294a42..56d178e2975 100644 --- a/go/cmd/vtgate/cli/plugin_consultopo.go +++ b/go/cmd/vtgate/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/plugin_zk2topo.go b/go/cmd/vtgate/cli/plugin_zk2topo.go index 1870a3b2bb3..66d14988c75 100644 --- a/go/cmd/vtgate/cli/plugin_zk2topo.go +++ b/go/cmd/vtgate/cli/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtgate/cli/status.go b/go/cmd/vtgate/cli/status.go index 2fdab073d5a..d38cac2d50e 100644 --- a/go/cmd/vtgate/cli/status.go +++ b/go/cmd/vtgate/cli/status.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -37,7 +37,10 @@ func addStatusParts(vtg *vtgate.VTGate) { servenv.AddStatusPart("Gateway Status", vtgate.StatusTemplate, func() any { return vtg.GetGatewayCacheStatus() }) - servenv.AddStatusPart("Health Check Cache", discovery.HealthCheckTemplate, func() any { + servenv.AddStatusPart("Health Check - Cache", discovery.HealthCheckCacheTemplate, func() any { return vtg.Gateway().TabletsCacheStatus() }) + servenv.AddStatusPart("Health Check - Healthy Tablets", discovery.HealthCheckHealthyTemplate, func() any { + return vtg.Gateway().TabletsHealthyStatus() + }) } diff --git a/go/cmd/vtorc/cli/cli.go b/go/cmd/vtorc/cli/cli.go index f521ae05e57..1233c1e2ac2 100644 --- a/go/cmd/vtorc/cli/cli.go +++ b/go/cmd/vtorc/cli/cli.go @@ -39,7 +39,6 @@ var ( --topo_global_root /vitess/global \ --log_dir $VTDATAROOT/tmp \ --port 15000 \ - --recovery-period-block-duration "10m" \ --instance-poll-time "1s" \ --topo-information-refresh-duration "30s" \ --alsologtostderr`, @@ -85,7 +84,7 @@ func run(cmd *cobra.Command, args []string) { // addStatusParts adds UI parts to the /debug/status page of VTOrc func addStatusParts() { servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any { - recoveries, _ := logic.ReadRecentRecoveries(false, 0) + recoveries, _ := logic.ReadRecentRecoveries(0) return recoveries }) } diff --git a/go/cmd/vtorc/cli/plugin_consultopo.go b/go/cmd/vtorc/cli/plugin_consultopo.go index a128f294a42..56d178e2975 100644 --- a/go/cmd/vtorc/cli/plugin_consultopo.go +++ b/go/cmd/vtorc/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtorc/cli/plugin_zk2topo.go b/go/cmd/vtorc/cli/plugin_zk2topo.go index d71a7e2e196..0b2884cc258 100644 --- a/go/cmd/vtorc/cli/plugin_zk2topo.go +++ b/go/cmd/vtorc/cli/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go index 101265b16c5..c85d51e9325 100644 --- a/go/cmd/vtorc/main.go +++ b/go/cmd/vtorc/main.go @@ -17,9 +17,6 @@ package main import ( - _ "github.com/go-sql-driver/mysql" - _ "modernc.org/sqlite" - "vitess.io/vitess/go/cmd/vtorc/cli" "vitess.io/vitess/go/vt/log" ) diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go index 1efa35613d7..3fb1e98877f 100644 --- a/go/cmd/vttablet/cli/cli.go +++ b/go/cmd/vttablet/cli/cli.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -17,7 +17,6 @@ limitations under the License. package cli import ( - "bytes" "context" "fmt" "os" @@ -26,6 +25,8 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/binlog" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -35,14 +36,13 @@ import ( "vitess.io/vitess/go/vt/tableacl/simpleacl" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vttablet/onlineddl" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletmanager" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tabletserver" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/yaml2" - "vitess.io/vitess/resources" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -79,9 +79,8 @@ See "Unmanaged Tablet" for the full guide. Even if a MySQL is external, you can still make vttablet perform some management functions. They are as follows: ` + - "* `--disable_active_reparents`: If this flag is set, then any reparent or replica commands will not be allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current primary.\n" + + "* `--unmanaged`: This flag indicates that this tablet is running in unmanaged mode. In this mode, any reparent or replica commands are not allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. You should use the TabletExternallyReparented command to inform vitess of the current primary.\n" + "* `--replication_connect_retry`: This value is give to mysql when it connects a replica to the primary as the retry duration parameter.\n" + - "* `--enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if `--disable_active_reparents` was not turned on.\n" + "* `--heartbeat_enable` and `--heartbeat_interval duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag.\n", Example: ` vttablet \ @@ -100,24 +99,49 @@ vttablet \ PreRunE: servenv.CobraPreRunE, RunE: run, } + + srvTopoCounts *stats.CountersWithSingleLabel ) +func init() { + srvTopoCounts = stats.NewCountersWithSingleLabel("TabletSrvTopo", "Resilient srvtopo server operations", "type") +} + func run(cmd *cobra.Command, args []string) error { servenv.Init() + // Ensure we open the topo before we start the context, so that the + // defer that closes the topo runs after cancelling the context. + // This ensures that we've properly closed things like the watchers + // at that point. + ts := topo.Open() + defer ts.Close() + + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + tabletAlias, err := topoproto.ParseTabletAlias(tabletPath) if err != nil { return fmt.Errorf("failed to parse --tablet-path: %w", err) } + mysqlVersion := servenv.MySQLServerVersion() + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: mysqlVersion, + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("cannot initialize vtenv: %w", err) + } + // config and mycnf initializations are intertwined. - config, mycnf, err := initConfig(tabletAlias) + config, mycnf, err := initConfig(tabletAlias, env.CollationEnv()) if err != nil { return err } - ts := topo.Open() - qsc, err := createTabletServer(context.Background(), config, ts, tabletAlias) + qsc, err := createTabletServer(ctx, env, config, ts, tabletAlias, srvTopoCounts) if err != nil { ts.Close() return err @@ -126,32 +150,28 @@ func run(cmd *cobra.Command, args []string) error { mysqld := mysqlctl.NewMysqld(config.DB) servenv.OnClose(mysqld.Close) - if err := extractOnlineDDL(); err != nil { - ts.Close() - return fmt.Errorf("failed to extract online DDL binaries: %w", err) - } - // Initialize and start tm. gRPCPort := int32(0) if servenv.GRPCPort() != 0 { gRPCPort = int32(servenv.GRPCPort()) } - tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB) + tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB, env.CollationEnv()) if err != nil { return fmt.Errorf("failed to parse --tablet-path: %w", err) } tm = &tabletmanager.TabletManager{ - BatchCtx: context.Background(), + BatchCtx: ctx, + Env: env, TopoServer: ts, Cnf: mycnf, MysqlDaemon: mysqld, DBConfigs: config.DB.Clone(), QueryServiceControl: qsc, - UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()), - VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()), - VDiffEngine: vdiff.NewEngine(config, ts, tablet), + UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine(), env.Parser()), + VREngine: vreplication.NewEngine(env, config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()), + VDiffEngine: vdiff.NewEngine(ts, tablet, env.CollationEnv(), env.Parser()), } - if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil { + if err := tm.Start(tablet, config); err != nil { ts.Close() return fmt.Errorf("failed to parse --tablet-path or initialize DB credentials: %w", err) } @@ -159,9 +179,6 @@ func run(cmd *cobra.Command, args []string) error { // Close the tm so that our topo entry gets pruned properly and any // background goroutines that use the topo connection are stopped. tm.Close() - - // tm uses ts. So, it should be closed after tm. - ts.Close() }) servenv.RunDefault() @@ -169,7 +186,7 @@ func run(cmd *cobra.Command, args []string) error { return nil } -func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, *mysqlctl.Mycnf, error) { +func initConfig(tabletAlias *topodatapb.TabletAlias, collationEnv *collations.Environment) (*tabletenv.TabletConfig, *mysqlctl.Mycnf, error) { tabletenv.Init() // Load current config after tabletenv.Init, because it changes it. config := tabletenv.NewCurrentConfig() @@ -211,41 +228,23 @@ func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, * // If connection parameters were specified, socketFile will be empty. // Otherwise, the socketFile (read from mycnf) will be used to initialize // dbconfigs. - config.DB.InitWithSocket(socketFile) + config.DB.InitWithSocket(socketFile, collationEnv) for _, cfg := range config.ExternalConnections { - cfg.InitWithSocket("") + cfg.InitWithSocket("", collationEnv) } return config, mycnf, nil } -// extractOnlineDDL extracts the gh-ost binary from this executable. gh-ost is appended -// to vttablet executable by `make build` with a go:embed -func extractOnlineDDL() error { - if binaryFileName, isOverride := onlineddl.GhostBinaryFileName(); !isOverride { - if err := os.WriteFile(binaryFileName, resources.GhostBinary, 0755); err != nil { - // One possibility of failure is that gh-ost is up and running. In that case, - // let's pause and check if the running gh-ost is exact same binary as the one we wish to extract. - foundBytes, _ := os.ReadFile(binaryFileName) - if bytes.Equal(resources.GhostBinary, foundBytes) { - // OK, it's the same binary, there is no need to extract the file anyway - return nil - } - return err - } - } - - return nil -} - -func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias) (*tabletserver.TabletServer, error) { +func createTabletServer(ctx context.Context, env *vtenv.Environment, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias, srvTopoCounts *stats.CountersWithSingleLabel) (*tabletserver.TabletServer, error) { if tableACLConfig != "" { // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory tableacl.Register("simpleacl", &simpleacl.Factory{}) } else if enforceTableACLConfig { return nil, fmt.Errorf("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.") } + // creates and registers the query service - qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias) + qsc := tabletserver.NewTabletServer(ctx, env, "", config, ts, tabletAlias, srvTopoCounts) servenv.OnRun(func() { qsc.Register() addStatusParts(qsc) diff --git a/go/cmd/vttablet/cli/plugin_cephbackupstorage.go b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go index 171198f5e29..7755e1cae2d 100644 --- a/go/cmd/vttablet/cli/plugin_cephbackupstorage.go +++ b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_consultopo.go b/go/cmd/vttablet/cli/plugin_consultopo.go index a128f294a42..56d178e2975 100644 --- a/go/cmd/vttablet/cli/plugin_consultopo.go +++ b/go/cmd/vttablet/cli/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_s3backupstorage.go b/go/cmd/vttablet/cli/plugin_s3backupstorage.go index 4b3ecb33edb..e09f6060809 100644 --- a/go/cmd/vttablet/cli/plugin_s3backupstorage.go +++ b/go/cmd/vttablet/cli/plugin_s3backupstorage.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_sysloglogger.go b/go/cmd/vttablet/cli/plugin_sysloglogger.go index a7260d6f8cc..90860abe826 100644 --- a/go/cmd/vttablet/cli/plugin_sysloglogger.go +++ b/go/cmd/vttablet/cli/plugin_sysloglogger.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. @@ -7,7 +9,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/plugin_zk2topo.go b/go/cmd/vttablet/cli/plugin_zk2topo.go index d71a7e2e196..0b2884cc258 100644 --- a/go/cmd/vttablet/cli/plugin_zk2topo.go +++ b/go/cmd/vttablet/cli/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttablet/cli/status.go b/go/cmd/vttablet/cli/status.go index 762a9fa646e..de3bfcbce74 100644 --- a/go/cmd/vttablet/cli/status.go +++ b/go/cmd/vttablet/cli/status.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttestserver/cli/main.go b/go/cmd/vttestserver/cli/main.go index ea92ae7dda0..5601623b2fa 100644 --- a/go/cmd/vttestserver/cli/main.go +++ b/go/cmd/vttestserver/cli/main.go @@ -22,6 +22,7 @@ import ( "fmt" "os" "os/signal" + "path" "strconv" "strings" "syscall" @@ -47,13 +48,14 @@ type topoFlags struct { } var ( - basePort int - config vttest.Config - doSeed bool - mycnf string - protoTopo string - seed vttest.SeedConfig - topo topoFlags + basePort int + config vttest.Config + doSeed bool + mycnf string + protoTopo string + seed vttest.SeedConfig + topo topoFlags + doCreateTCPUser bool ) func (t *topoFlags) buildTopology() (*vttestpb.VTTestTopology, error) { @@ -104,6 +106,7 @@ func New() (cmd *cobra.Command) { Short: "vttestserver allows users to spawn a self-contained Vitess server for local testing/CI.", Args: cobra.NoArgs, PreRunE: servenv.CobraPreRunE, + Version: servenv.AppVersion.String(), RunE: run, } @@ -177,6 +180,9 @@ func New() (cmd *cobra.Command) { cmd.Flags().StringVar(&config.MySQLBindHost, "mysql_bind_host", "localhost", "which host to bind vtgate mysql listener to") + cmd.Flags().StringVar(&config.VtComboBindAddress, "vtcombo-bind-host", "localhost", + "which host to bind vtcombo servenv listener to") + cmd.Flags().StringVar(&mycnf, "extra_my_cnf", "", "extra files to add to the config, separated by ':'") @@ -216,25 +222,46 @@ func New() (cmd *cobra.Command) { cmd.Flags().StringVar(&config.ExternalTopoGlobalRoot, "external_topo_global_root", "", "the path of the global topology data in the global topology server for vtcombo process") cmd.Flags().DurationVar(&config.VtgateTabletRefreshInterval, "tablet_refresh_interval", 10*time.Second, "Interval at which vtgate refreshes tablet information from topology server.") + + cmd.Flags().BoolVar(&doCreateTCPUser, "initialize-with-vt-dba-tcp", false, "If this flag is enabled, MySQL will be initialized with an additional user named vt_dba_tcp, who will have access via TCP/IP connection.") + + cmd.Flags().BoolVar(&config.NoScatter, "no_scatter", false, "when set to true, the planner will fail instead of producing a plan that includes scatter queries") acl.RegisterFlags(cmd.Flags()) return cmd } -func newEnv() (env vttest.Environment, err error) { - if basePort != 0 { +func newEnv() (env *vttest.LocalTestEnv, err error) { + if basePort == 0 { + env, err = vttest.NewLocalTestEnv(0) + } else { if config.DataDir == "" { env, err = vttest.NewLocalTestEnv(basePort) - if err != nil { - return - } } else { env, err = vttest.NewLocalTestEnvWithDirectory(basePort, config.DataDir) - if err != nil { - return - } } } + if err != nil { + return + } + + if doCreateTCPUser { + // The original initFile does not have any users who can access through TCP/IP connection. + // Here we update the init file to create the user. + mysqlInitFile := env.InitDBFile + createUserCmd := ` + # Admin user for TCP/IP connection with all privileges. + CREATE USER 'vt_dba_tcp'@'%'; + GRANT ALL ON *.* TO 'vt_dba_tcp'@'%'; + GRANT GRANT OPTION ON *.* TO 'vt_dba_tcp'@'%'; + ` + newInitFile := path.Join(env.Directory(), "init_db_with_vt_dba_tcp.sql") + err = vttest.WriteInitDBFile(mysqlInitFile, createUserCmd, newInitFile) + if err != nil { + return + } + env.InitDBFile = newInitFile + } if protoTopo == "" { config.Topology, err = topo.buildTopology() diff --git a/go/cmd/vttestserver/cli/main_test.go b/go/cmd/vttestserver/cli/main_test.go index 39dc8e4ea78..75597ffe687 100644 --- a/go/cmd/vttestserver/cli/main_test.go +++ b/go/cmd/vttestserver/cli/main_test.go @@ -20,7 +20,7 @@ import ( "context" "fmt" "io" - "math/rand" + "math/rand/v2" "os/exec" "path" "strings" @@ -34,6 +34,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/tlstest" @@ -185,21 +186,81 @@ func TestForeignKeysAndDDLModes(t *testing.T) { assert.NoError(t, err) } -func TestCanGetKeyspaces(t *testing.T) { +func TestNoScatter(t *testing.T) { conf := config defer resetConfig(conf) - cluster, err := startCluster() + cluster, err := startCluster("--no_scatter") assert.NoError(t, err) defer cluster.TearDown() - assertGetKeyspaces(t, cluster) + _ = execOnCluster(cluster, "app_customer", func(conn *mysql.Conn) error { + _, err = conn.ExecuteFetch("SELECT * FROM customers", 100, false) + require.ErrorContains(t, err, "plan includes scatter, which is disallowed") + return nil + }) +} + +// TestCreateDbaTCPUser tests that the vt_dba_tcp user is created and can connect through TCP/IP connection +// when --initialize-with-vt-dba-tcp is set to true. +func TestCreateDbaTCPUser(t *testing.T) { + conf := config + defer resetConfig(conf) + + clusterInstance, err := startCluster("--initialize-with-vt-dba-tcp=true") + assert.NoError(t, err) + defer clusterInstance.TearDown() + + defer func() { + if t.Failed() { + cluster.PrintFiles(t, clusterInstance.Env.Directory(), "init_db_with_vt_dba_tcp.sql") + } + }() + + // Ensure that the vt_dba_tcp user was created and can connect through TCP/IP connection. + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "127.0.0.1", + Uname: "vt_dba_tcp", + Port: clusterInstance.Env.PortForProtocol("mysql", ""), + } + conn, err := mysql.Connect(ctx, &vtParams) + assert.NoError(t, err) + defer conn.Close() + + // Ensure that the existing vt_dba user remains unaffected, meaning it cannot connect through TCP/IP connection. + vtParams.Uname = "vt_dba" + _, err = mysql.Connect(ctx, &vtParams) + assert.Error(t, err) +} + +func TestCanGetKeyspaces(t *testing.T) { + conf := config + defer resetConfig(conf) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + clusterInstance, err := startCluster() + assert.NoError(t, err) + defer clusterInstance.TearDown() + + defer func() { + if t.Failed() { + cluster.PrintFiles(t, clusterInstance.Env.Directory(), "vtcombo.INFO", "error.log") + } + }() + + assertGetKeyspaces(ctx, t, clusterInstance) } func TestExternalTopoServerConsul(t *testing.T) { conf := config defer resetConfig(conf) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + // Start a single consul in the background. cmd, serverAddr := startConsul(t) defer func() { @@ -218,7 +279,7 @@ func TestExternalTopoServerConsul(t *testing.T) { assert.NoError(t, err) defer cluster.TearDown() - assertGetKeyspaces(t, cluster) + assertGetKeyspaces(ctx, t, cluster) } func TestMtlsAuth(t *testing.T) { @@ -386,16 +447,16 @@ func resetConfig(conf vttest.Config) { } func randomPort() int { - v := rand.Int31n(20000) + v := rand.Int32N(20000) return int(v + 10000) } -func assertGetKeyspaces(t *testing.T, cluster vttest.LocalCluster) { - client, err := vtctlclient.New(fmt.Sprintf("localhost:%v", cluster.GrpcPort())) +func assertGetKeyspaces(ctx context.Context, t *testing.T, cluster vttest.LocalCluster) { + client, err := vtctlclient.New(ctx, fmt.Sprintf("localhost:%v", cluster.GrpcPort())) assert.NoError(t, err) defer client.Close() stream, err := client.ExecuteVtctlCommand( - context.Background(), + ctx, []string{ "GetKeyspaces", "--server", diff --git a/go/cmd/vttlstest/cli/vttlstest.go b/go/cmd/vttlstest/cli/vttlstest.go index 4e0f9c2b95e..9791645cdc2 100644 --- a/go/cmd/vttlstest/cli/vttlstest.go +++ b/go/cmd/vttlstest/cli/vttlstest.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/vttlstest/vttlstest.go b/go/cmd/vttlstest/vttlstest.go index 08e994c096d..8b98687c7a8 100644 --- a/go/cmd/vttlstest/vttlstest.go +++ b/go/cmd/vttlstest/vttlstest.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/add_auth.go b/go/cmd/zk/command/add_auth.go index 566c463f4a8..117ddf1cd8a 100644 --- a/go/cmd/zk/command/add_auth.go +++ b/go/cmd/zk/command/add_auth.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/cat.go b/go/cmd/zk/command/cat.go index 1d5460f7006..6dae3c903a9 100644 --- a/go/cmd/zk/command/cat.go +++ b/go/cmd/zk/command/cat.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/chmod.go b/go/cmd/zk/command/chmod.go index 39125d618c4..63bd103fdb2 100644 --- a/go/cmd/zk/command/chmod.go +++ b/go/cmd/zk/command/chmod.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/cp.go b/go/cmd/zk/command/cp.go index e89486413ea..b45baab1b6e 100644 --- a/go/cmd/zk/command/cp.go +++ b/go/cmd/zk/command/cp.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/edit.go b/go/cmd/zk/command/edit.go index ec4b74c4b62..90348161502 100644 --- a/go/cmd/zk/command/edit.go +++ b/go/cmd/zk/command/edit.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/ls.go b/go/cmd/zk/command/ls.go index 83c1d31363b..5d28f20ae60 100644 --- a/go/cmd/zk/command/ls.go +++ b/go/cmd/zk/command/ls.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/rm.go b/go/cmd/zk/command/rm.go index 5e5b5f4c494..8b710b2fb74 100644 --- a/go/cmd/zk/command/rm.go +++ b/go/cmd/zk/command/rm.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/root.go b/go/cmd/zk/command/root.go index f3f02e7d4f2..2aabcd50e4f 100644 --- a/go/cmd/zk/command/root.go +++ b/go/cmd/zk/command/root.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/stat.go b/go/cmd/zk/command/stat.go index 713a68a3d4e..28aa6bb3465 100644 --- a/go/cmd/zk/command/stat.go +++ b/go/cmd/zk/command/stat.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/touch.go b/go/cmd/zk/command/touch.go index 76c390cf169..53e80a05214 100644 --- a/go/cmd/zk/command/touch.go +++ b/go/cmd/zk/command/touch.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/unzip.go b/go/cmd/zk/command/unzip.go index f4c800e0533..81e04bf4564 100644 --- a/go/cmd/zk/command/unzip.go +++ b/go/cmd/zk/command/unzip.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/wait.go b/go/cmd/zk/command/wait.go index 864f6e83626..8aa844d8bc6 100644 --- a/go/cmd/zk/command/wait.go +++ b/go/cmd/zk/command/wait.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/watch.go b/go/cmd/zk/command/watch.go index eb28cc29ca2..7d6de784718 100644 --- a/go/cmd/zk/command/watch.go +++ b/go/cmd/zk/command/watch.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/command/zip.go b/go/cmd/zk/command/zip.go index b765f5bb00e..5f06b97c508 100644 --- a/go/cmd/zk/command/zip.go +++ b/go/cmd/zk/command/zip.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zk/internal/zkfilepath/zfilepath_test.go b/go/cmd/zk/internal/zkfilepath/zfilepath_test.go new file mode 100644 index 00000000000..2ab649fb3a4 --- /dev/null +++ b/go/cmd/zk/internal/zkfilepath/zfilepath_test.go @@ -0,0 +1,97 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zkfilepath + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/z-division/go-zookeeper/zk" +) + +func TestClean(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"/path/to/some/dir/", "/path/to/some/dir"}, + {"/", "/"}, + {"", "."}, + {"/root", "/root"}, + {"no/slash/at/the/end", "no/slash/at/the/end"}, + } + + for _, test := range tests { + result := Clean(test.input) + assert.Equal(t, test.expected, result) + } +} + +func TestFormat(t *testing.T) { + testTime := time.Now() + stat := &zk.Stat{ + DataLength: 100, + NumChildren: 1, + Mtime: testTime.UnixMilli(), + EphemeralOwner: 1, + } + + tests := []struct { + stat *zk.Stat + zkPath string + showFullPath bool + longListing bool + expected string + }{ + // Checking the effect of showFullPath without longListing + {stat, "/path/to/node", true, false, "/path/to/node\n"}, + {stat, "/path/to/node", false, false, "node\n"}, + + // Checking the effect of showFullPath with longListing + {stat, "/path/to/node", true, true, "drwxrwxrwx zk zk 100 " + testTime.Format(TimeFmt) + " /path/to/node\n"}, + {stat, "/path/to/node", false, true, "drwxrwxrwx zk zk 100 " + testTime.Format(TimeFmt) + " node\n"}, + } + + for _, test := range tests { + result := Format(test.stat, test.zkPath, test.showFullPath, test.longListing) + assert.Equal(t, test.expected, result) + } +} + +func TestGetPermissions(t *testing.T) { + tests := []struct { + numChildren int32 + dataLength int32 + ephemeralOwner int64 + expected string + }{ + // Children, Data, Ephemeral, Expected + {0, 0, 0, "-rw-rw-rw-"}, + {1, 1, 0, "drwxrwxrwx"}, + {1, 0, 123, "drwxrwxrwx"}, + {0, 1, 1, "erw-rw-rw-"}, + {1, 1, 0, "drwxrwxrwx"}, + {0, 0, 1, "erw-rw-rw-"}, + {0, 0, 0, "-rw-rw-rw-"}, + } + + for _, test := range tests { + result := getPermissions(test.numChildren, test.dataLength, test.ephemeralOwner) + assert.Equal(t, test.expected, result) + } +} diff --git a/go/cmd/zk/internal/zkfilepath/zkfilepath.go b/go/cmd/zk/internal/zkfilepath/zkfilepath.go index 7febc7a9677..2e9edb1fdf8 100644 --- a/go/cmd/zk/internal/zkfilepath/zkfilepath.go +++ b/go/cmd/zk/internal/zkfilepath/zkfilepath.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -44,7 +44,7 @@ func Clean(zkPath string) string { // Format returns a path formatted to a canonical string. func Format(stat *zk.Stat, zkPath string, showFullPath bool, longListing bool) string { - var name, perms string + var name string if !showFullPath { name = path.Base(zkPath) @@ -53,19 +53,9 @@ func Format(stat *zk.Stat, zkPath string, showFullPath bool, longListing bool) s } if longListing { - if stat.NumChildren > 0 { - // FIXME(msolomon) do permissions check? - perms = "drwxrwxrwx" - if stat.DataLength > 0 { - // give a visual indication that this node has data as well as children - perms = "nrw-rw-rw-" - } - } else if stat.EphemeralOwner != 0 { - perms = "erw-rw-rw-" - } else { - perms = "-rw-rw-rw-" - } - // always print the Local version of the time. zookeeper's + perms := getPermissions(stat.NumChildren, stat.DataLength, stat.EphemeralOwner) + + // Always print the Local version of the time. zookeeper's // go / C library would return a local time anyway, but // might as well be sure. return fmt.Sprintf("%v %v %v % 8v % 20v %v\n", perms, "zk", "zk", stat.DataLength, zk2topo.Time(stat.Mtime).Local().Format(TimeFmt), name) @@ -73,3 +63,19 @@ func Format(stat *zk.Stat, zkPath string, showFullPath bool, longListing bool) s return fmt.Sprintf("%v\n", name) } } + +// Utility function to return the permissions for a node +func getPermissions(numChildren int32, dataLength int32, ephemeralOwner int64) string { + if numChildren > 0 { + // FIXME(msolomon) do permissions check? + if dataLength > 0 { + // give a visual indication that this node has data as well as children + return "drwxrwxrwx" + } + return "drwxrwxrwx" + } else if ephemeralOwner != 0 { + return "erw-rw-rw-" + } else { + return "-rw-rw-rw-" + } +} diff --git a/go/cmd/zk/internal/zkfs/zkfs.go b/go/cmd/zk/internal/zkfs/zkfs.go index 9bab19ec1e4..9f2fe732ec8 100644 --- a/go/cmd/zk/internal/zkfs/zkfs.go +++ b/go/cmd/zk/internal/zkfs/zkfs.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -158,6 +158,10 @@ func IsFile(path string) bool { // ParsePermMode parses the mode string as a perm mask. func ParsePermMode(mode string) (mask int32) { + if len(mode) < 2 { + panic("invalid mode") + } + for _, c := range mode[2:] { mask |= charPermMap[string(c)] } diff --git a/go/cmd/zk/internal/zkfs/zkfs_test.go b/go/cmd/zk/internal/zkfs/zkfs_test.go new file mode 100644 index 00000000000..2aed00af4c1 --- /dev/null +++ b/go/cmd/zk/internal/zkfs/zkfs_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package zkfs + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/z-division/go-zookeeper/zk" +) + +func TestIsFile(t *testing.T) { + assert.True(t, IsFile("/zk/somepath")) + assert.False(t, IsFile("/nonzk/somepath")) + assert.False(t, IsFile("nonzkpath")) +} + +func TestParsePermMode(t *testing.T) { + assert.Equal(t, int32(0), ParsePermMode("zk")) + assert.Equal(t, int32(zk.PermRead|zk.PermWrite), ParsePermMode("zkrw")) + assert.Equal(t, int32(zk.PermRead|zk.PermWrite|zk.PermAdmin), ParsePermMode("zkrwa")) + assert.PanicsWithValue(t, "invalid mode", func() { + ParsePermMode("") + }) + assert.PanicsWithValue(t, "invalid mode", func() { + ParsePermMode("z") + }) +} + +func TestFormatACL(t *testing.T) { + testCases := []struct { + name string + acl zk.ACL + expected string + }{ + { + name: "Full Permissions", + acl: zk.ACL{Perms: zk.PermAll}, + expected: "rwdca", + }, + { + name: "Read and Write Permissions", + acl: zk.ACL{Perms: zk.PermRead | zk.PermWrite}, + expected: "rw---", + }, + { + name: "No Permissions", + acl: zk.ACL{Perms: 0}, + expected: "-----", + }, + { + name: "Create and Admin Permissions", + acl: zk.ACL{Perms: zk.PermAdmin | zk.PermCreate}, + expected: "---ca", + }, + { + name: "Mixed Permissions", + acl: zk.ACL{Perms: zk.PermRead | zk.PermDelete | zk.PermAdmin}, + expected: "r-d-a", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, FormatACL(tc.acl)) + }) + } +} diff --git a/go/cmd/zk/zkcmd.go b/go/cmd/zk/zkcmd.go index f03ac41c6ef..f0c39d6b0f8 100644 --- a/go/cmd/zk/zkcmd.go +++ b/go/cmd/zk/zkcmd.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/cmd/zkctld/cli/zkctld.go b/go/cmd/zkctld/cli/zkctld.go index 101f1013722..5ac3520868e 100644 --- a/go/cmd/zkctld/cli/zkctld.go +++ b/go/cmd/zkctld/cli/zkctld.go @@ -41,6 +41,7 @@ var ( Use: "zkctld", Short: "zkctld is a daemon that starts or initializes ZooKeeper with Vitess-specific configuration. It will stay running as long as the underlying ZooKeeper server, and will pass along SIGTERM.", Args: cobra.NoArgs, + Version: servenv.AppVersion.String(), PersistentPreRunE: servenv.CobraPreRunE, PostRun: func(cmd *cobra.Command, args []string) { logutil.Flush() diff --git a/go/errors/errors.go b/go/errors/errors.go index d3349d320ed..22a3ba937e9 100644 --- a/go/errors/errors.go +++ b/go/errors/errors.go @@ -32,7 +32,7 @@ func Unwrap(err error) []error { return nil } -// Unwrap unwraps an error created by errors.Join() in Go 1.20, into its components, recursively +// UnwrapAll unwraps an error created by errors.Join() in Go 1.20, into its components, recursively func UnwrapAll(err error) (errs []error) { if err == nil { return nil @@ -46,7 +46,7 @@ func UnwrapAll(err error) (errs []error) { return []error{err} } -// Unwrap unwraps an error created by errors.Join() in Go 1.20, into its components, recursively, +// UnwrapFirst unwraps an error created by errors.Join() in Go 1.20, into its components, recursively, // and returns one (the first) unwrapped error func UnwrapFirst(err error) error { if err == nil { diff --git a/go/event/event_test.go b/go/event/event_test.go index 36d4d56bf5d..cdca98abd85 100644 --- a/go/event/event_test.go +++ b/go/event/event_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -20,6 +20,8 @@ import ( "reflect" "testing" "time" + + "github.com/stretchr/testify/assert" ) type testInterface1 interface { @@ -56,10 +58,7 @@ func TestStaticListener(t *testing.T) { AddListener(func(testEvent1) { triggered = true }) AddListener(func(testEvent2) { t.Errorf("wrong listener type triggered") }) Dispatch(testEvent1{}) - - if !triggered { - t.Errorf("static listener failed to trigger") - } + assert.True(t, triggered, "static listener failed to trigger") } func TestPointerListener(t *testing.T) { @@ -69,10 +68,7 @@ func TestPointerListener(t *testing.T) { AddListener(func(ev *testEvent2) { ev.triggered = true }) AddListener(func(testEvent2) { t.Errorf("non-pointer listener triggered on pointer type") }) Dispatch(testEvent) - - if !testEvent.triggered { - t.Errorf("pointer listener failed to trigger") - } + assert.True(t, testEvent.triggered, "pointer listener failed to trigger") } func TestInterfaceListener(t *testing.T) { @@ -82,10 +78,7 @@ func TestInterfaceListener(t *testing.T) { AddListener(func(testInterface1) { triggered = true }) AddListener(func(testInterface2) { t.Errorf("interface listener triggered on non-matching type") }) Dispatch(testEvent1{}) - - if !triggered { - t.Errorf("interface listener failed to trigger") - } + assert.True(t, triggered, "interface listener failed to trigger") } func TestEmptyInterfaceListener(t *testing.T) { @@ -94,10 +87,7 @@ func TestEmptyInterfaceListener(t *testing.T) { triggered := false AddListener(func(any) { triggered = true }) Dispatch("this should match any") - - if !triggered { - t.Errorf("any listener failed to trigger") - } + assert.True(t, triggered, "empty listener failed to trigger") } func TestMultipleListeners(t *testing.T) { @@ -144,7 +134,6 @@ func TestBadListenerWrongType(t *testing.T) { defer func() { err := recover() - if err == nil { t.Errorf("bad listener type (not a func) failed to trigger panic") } @@ -186,10 +175,8 @@ func TestDispatchPointerToValueInterfaceListener(t *testing.T) { triggered = true }) Dispatch(&testEvent1{}) + assert.True(t, triggered, "Dispatch by pointer failed to trigger interface listener") - if !triggered { - t.Errorf("Dispatch by pointer failed to trigger interface listener") - } } func TestDispatchValueToValueInterfaceListener(t *testing.T) { @@ -200,10 +187,7 @@ func TestDispatchValueToValueInterfaceListener(t *testing.T) { triggered = true }) Dispatch(testEvent1{}) - - if !triggered { - t.Errorf("Dispatch by value failed to trigger interface listener") - } + assert.True(t, triggered, "Dispatch by value failed to trigger interface listener") } func TestDispatchPointerToPointerInterfaceListener(t *testing.T) { @@ -212,10 +196,8 @@ func TestDispatchPointerToPointerInterfaceListener(t *testing.T) { triggered := false AddListener(func(testInterface2) { triggered = true }) Dispatch(&testEvent2{}) + assert.True(t, triggered, "interface listener failed to trigger for pointer") - if !triggered { - t.Errorf("interface listener failed to trigger for pointer") - } } func TestDispatchValueToPointerInterfaceListener(t *testing.T) { @@ -245,10 +227,8 @@ func TestDispatchUpdate(t *testing.T) { ev := &testUpdateEvent{} DispatchUpdate(ev, "hello") + assert.True(t, triggered, "listener failed to trigger on DispatchUpdate()") - if !triggered { - t.Errorf("listener failed to trigger on DispatchUpdate()") - } want := "hello" if got := ev.update.(string); got != want { t.Errorf("ev.update = %#v, want %#v", got, want) diff --git a/go/event/hooks_test.go b/go/event/hooks_test.go index 197dd59a062..3d8e3361e94 100644 --- a/go/event/hooks_test.go +++ b/go/event/hooks_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/event/syslogger/fake_logger.go b/go/event/syslogger/fake_logger.go index 63c0942c069..852ca2a72a6 100644 --- a/go/event/syslogger/fake_logger.go +++ b/go/event/syslogger/fake_logger.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/event/syslogger/fake_logger_test.go b/go/event/syslogger/fake_logger_test.go new file mode 100644 index 00000000000..df4a8f8294e --- /dev/null +++ b/go/event/syslogger/fake_logger_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syslogger + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetLogsForNoLogs(t *testing.T) { + tl := NewTestLogger() + errLoggerMsg := tl.getLog() + + want := loggerMsg{ + msg: "no logs!", + level: "ERROR", + } + + assert.Equal(t, errLoggerMsg, want) +} + +func TestGetAllLogs(t *testing.T) { + tl := NewTestLogger() + tl.recordInfof("Test info log") + tl.recordErrorf("Test error log") + tl.recordWarningf("Test warning log") + + want := []string{"INFO:Test info log", "ERROR:Test error log", "WARNING:Test warning log"} + loggerMsgs := tl.GetAllLogs() + + assert.Equal(t, loggerMsgs, want) +} diff --git a/go/event/syslogger/syslogger.go b/go/event/syslogger/syslogger.go index 1c8ff22136b..234f2b5a712 100644 --- a/go/event/syslogger/syslogger.go +++ b/go/event/syslogger/syslogger.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. @@ -52,9 +54,11 @@ import ( "fmt" "log/syslog" "os" + "testing" "vitess.io/vitess/go/event" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/servenv" ) // Syslogger is the interface that events should implement if they want to be @@ -143,10 +147,28 @@ func listener(ev Syslogger) { } func init() { + // We only want to init syslog when the app is being initialized + // Some binaries import the syslog package indirectly leading to + // the syslog.New function being called and this might fail if + // running inside Docker without the syslog daemon enabled, leading + // logging the error which will make glog think there are not --log_dir + // flag set as we have not parsed the flags yet. + // https://github.com/vitessio/vitess/issues/15120 + servenv.OnInit(func() { + initSyslog() + }) + + // We still do the init of syslog if we are testing this package. + if testing.Testing() { + initSyslog() + } +} + +func initSyslog() { var err error writer, err = syslog.New(syslog.LOG_INFO|syslog.LOG_USER, os.Args[0]) if err != nil { - log.Errorf("can't connect to syslog") + log.Errorf("can't connect to syslog: %v", err.Error()) writer = nil } diff --git a/go/event/syslogger/syslogger_test.go b/go/event/syslogger/syslogger_test.go index 4847fecac2a..6c6a181d2e5 100644 --- a/go/event/syslogger/syslogger_test.go +++ b/go/event/syslogger/syslogger_test.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. @@ -7,7 +9,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -23,6 +25,8 @@ import ( "testing" "vitess.io/vitess/go/event" + + "github.com/stretchr/testify/assert" ) type TestEvent struct { @@ -68,10 +72,8 @@ func TestSyslog(t *testing.T) { ev := new(TestEvent) event.Dispatch(ev) + assert.True(t, ev.triggered) - if !ev.triggered { - t.Errorf("Syslog() was not called on event that implements Syslogger") - } } // TestBadWriter verifies we are still triggering (to normal logs) if @@ -85,50 +87,40 @@ func TestBadWriter(t *testing.T) { wantLevel := "ERROR" ev := &TestEvent{priority: syslog.LOG_ALERT, message: wantMsg} event.Dispatch(ev) - if !strings.Contains(tl.getLog().msg, wantMsg) { - t.Errorf("error log msg [%s], want msg [%s]", tl.getLog().msg, wantMsg) - } - if !strings.Contains(tl.getLog().level, wantLevel) { - t.Errorf("error log level [%s], want level [%s]", tl.getLog().level, wantLevel) - } + assert.True(t, strings.Contains(tl.getLog().msg, wantMsg)) + assert.True(t, strings.Contains(tl.getLog().level, wantLevel)) + ev = &TestEvent{priority: syslog.LOG_CRIT, message: wantMsg} event.Dispatch(ev) - if !strings.Contains(tl.getLog().level, wantLevel) { - t.Errorf("error log level [%s], want level [%s]", tl.getLog().level, wantLevel) - } + assert.True(t, strings.Contains(tl.getLog().level, wantLevel)) + ev = &TestEvent{priority: syslog.LOG_ERR, message: wantMsg} event.Dispatch(ev) - if !strings.Contains(tl.getLog().level, wantLevel) { - t.Errorf("error log level [%s], want level [%s]", tl.getLog().level, wantLevel) - } + assert.True(t, strings.Contains(tl.getLog().level, wantLevel)) + + ev = &TestEvent{priority: syslog.LOG_EMERG, message: wantMsg} + event.Dispatch(ev) + assert.True(t, strings.Contains(tl.getLog().level, wantLevel)) wantLevel = "WARNING" ev = &TestEvent{priority: syslog.LOG_WARNING, message: wantMsg} event.Dispatch(ev) - if !strings.Contains(tl.getLog().level, wantLevel) { - t.Errorf("error log level [%s], want level [%s]", tl.getLog().level, wantLevel) - } + assert.True(t, strings.Contains(tl.getLog().level, wantLevel)) wantLevel = "INFO" ev = &TestEvent{priority: syslog.LOG_INFO, message: wantMsg} event.Dispatch(ev) - if !strings.Contains(tl.getLog().level, wantLevel) { - t.Errorf("error log level [%s], want level [%s]", tl.getLog().level, wantLevel) - } + assert.True(t, strings.Contains(tl.getLog().level, wantLevel)) + ev = &TestEvent{priority: syslog.LOG_NOTICE, message: wantMsg} event.Dispatch(ev) - if !strings.Contains(tl.getLog().level, wantLevel) { - t.Errorf("error log level [%s], want level [%s]", tl.getLog().level, wantLevel) - } + assert.True(t, strings.Contains(tl.getLog().level, wantLevel)) + ev = &TestEvent{priority: syslog.LOG_DEBUG, message: wantMsg} event.Dispatch(ev) - if !strings.Contains(tl.getLog().level, wantLevel) { - t.Errorf("error log level [%s], want level [%s]", tl.getLog().level, wantLevel) - } + assert.True(t, strings.Contains(tl.getLog().level, wantLevel)) + assert.True(t, ev.triggered) - if !ev.triggered { - t.Errorf("passed nil writer to client") - } } // TestWriteError checks that we don't panic on a write error. @@ -143,10 +135,8 @@ func TestInvalidSeverity(t *testing.T) { writer = fw event.Dispatch(&TestEvent{priority: syslog.Priority(123), message: "log me"}) + assert.NotEqual(t, "log me", fw.message) - if fw.message == "log me" { - t.Errorf("message was logged despite invalid severity") - } } func testSeverity(sev syslog.Priority, t *testing.T) { @@ -154,13 +144,9 @@ func testSeverity(sev syslog.Priority, t *testing.T) { writer = fw event.Dispatch(&TestEvent{priority: sev, message: "log me"}) + assert.Equal(t, sev, fw.priority) + assert.Equal(t, "log me", fw.message) - if fw.priority != sev { - t.Errorf("wrong priority: got %v, want %v", fw.priority, sev) - } - if fw.message != "log me" { - t.Errorf(`wrong message: got "%v", want "%v"`, fw.message, "log me") - } } func TestEmerg(t *testing.T) { diff --git a/go/exit/exit_test.go b/go/exit/exit_test.go index e51a0938534..7f08f4ea1e2 100644 --- a/go/exit/exit_test.go +++ b/go/exit/exit_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/fileutil/wildcards_test.go b/go/fileutil/wildcards_test.go index 9d332a507db..e4494dd7b2c 100644 --- a/go/fileutil/wildcards_test.go +++ b/go/fileutil/wildcards_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/flags/endtoend/flags_test.go b/go/flags/endtoend/flags_test.go index 25cca54caf9..cfc237dae5c 100644 --- a/go/flags/endtoend/flags_test.go +++ b/go/flags/endtoend/flags_test.go @@ -118,7 +118,7 @@ var ( func TestHelpOutput(t *testing.T) { wd, err := os.Getwd() require.NoError(t, err) - + t.Parallel() args := []string{"--help"} for binary, helptext := range helpOutput { t.Run(binary, func(t *testing.T) { diff --git a/go/flags/endtoend/mysqlctl.txt b/go/flags/endtoend/mysqlctl.txt index a8f832d3345..044d12981d5 100644 --- a/go/flags/endtoend/mysqlctl.txt +++ b/go/flags/endtoend/mysqlctl.txt @@ -64,7 +64,7 @@ Flags: --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -81,17 +81,18 @@ Flags: --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --socket_file string Local unix socket file to listen on - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. --tablet_uid uint32 Tablet UID. (default 41983) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging Use "mysqlctl [command] --help" for more information about a command. diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt index 06b48347bf6..6bb1beb5bae 100644 --- a/go/flags/endtoend/mysqlctld.txt +++ b/go/flags/endtoend/mysqlctld.txt @@ -84,12 +84,14 @@ Flags: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + --grpc_server_keepalive_time duration After a duration of this time, if the server doesn't see any activity, it pings the client to see if the transport is still alive. (default 10s) + --grpc_server_keepalive_timeout duration After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) -h, --help help for mysqlctld --init_db_sql_file string Path to .sql file to run after mysqld initialization --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -101,21 +103,23 @@ Flags: --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions) --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) - --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) + --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 5m10s) --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice + --shutdown-wait-time duration How long to wait for mysqld shutdown (default 5m0s) --socket_file string Local unix socket file to listen on - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. --tablet_uid uint32 Tablet UID (default 41983) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging - --wait_time duration How long to wait for mysqld startup or shutdown (default 5m0s) + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging + --wait_time duration How long to wait for mysqld startup (default 5m0s) diff --git a/go/flags/endtoend/topo2topo.txt b/go/flags/endtoend/topo2topo.txt index 4391a32a1a8..c003c3584f3 100644 --- a/go/flags/endtoend/topo2topo.txt +++ b/go/flags/endtoend/topo2topo.txt @@ -27,18 +27,19 @@ Flags: -h, --help help for topo2topo --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --to_implementation string topology implementation to copy data to --to_root string topology server root to copy data to --to_server string topology server address to copy data to --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/flags/endtoend/vtaclcheck.txt b/go/flags/endtoend/vtaclcheck.txt index 34bef9a05f9..8917df63c66 100644 --- a/go/flags/endtoend/vtaclcheck.txt +++ b/go/flags/endtoend/vtaclcheck.txt @@ -15,16 +15,17 @@ Flags: -h, --help help for vtaclcheck --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --static-auth-file string The path of the auth_server_static JSON file to check - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt index 2dd6bf3ef28..a814cd80342 100644 --- a/go/flags/endtoend/vtbackup.txt +++ b/go/flags/endtoend/vtbackup.txt @@ -58,6 +58,7 @@ Flags: --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) + --builtinbackup-incremental-restore-path string the directory where incremental restore files, namely binlog files, are extracted to. In k8s environments, this should be set to a directory that is shared between the vttablet and mysqld pods. The path should exist. When empty, the default OS temp dir is assumed. --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s) --ceph_backup_storage_config string Path to JSON config file for ceph backup storage. (default "ceph_backup_config.json") @@ -137,7 +138,7 @@ Flags: --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216) --grpc_prometheus Enable gRPC monitoring with Prometheus. -h, --help help for vtbackup - --incremental_from_pos string Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position + --incremental_from_pos string Position, or name of backup from which to create an incremental backup. Default: empty. If given, then this backup becomes an incremental backup from given position or given backup. If value is 'auto', this backup will be taken from the last successful backup position. --init_db_name_override string (init parameter) override the name of the db used by vttablet --init_db_sql_file string path to .sql file to run after mysql_install_db --init_keyspace string (init parameter) keyspace to use for this tablet @@ -147,7 +148,7 @@ Flags: --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -174,6 +175,7 @@ Flags: --mycnf_slow_log_path string mysql slow query log path --mycnf_socket_file string mysql socket file --mycnf_tmp_dir string mysql tmp directory + --mysql-shutdown-timeout duration how long to wait for mysqld shutdown (default 5m0s) --mysql_port int mysql port (default 3306) --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --mysql_socket string path to the mysql socket @@ -181,6 +183,7 @@ Flags: --opentsdb_uri string URI of opentsdb /api/put method --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --remote_operation_timeout duration time to wait for a remote operation (default 15s) --restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs. @@ -201,10 +204,10 @@ Flags: --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 --stats_drop_variables string Variables to be dropped from the list of exported variables. --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,App}, CheckThrottler and FullStatus) (default 8) --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting --tablet_manager_grpc_key string the key to use to connect @@ -230,7 +233,7 @@ Flags: --upgrade-safe Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades. --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command diff --git a/go/flags/endtoend/vtbench.txt b/go/flags/endtoend/vtbench.txt index d74dc13ebc8..260451f6b03 100644 --- a/go/flags/endtoend/vtbench.txt +++ b/go/flags/endtoend/vtbench.txt @@ -64,7 +64,7 @@ Flags: --host string VTGate host(s) in the form 'host1,host2,...' --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -72,13 +72,14 @@ Flags: --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --port int VTGate port --pprof strings enable profiling + --pprof-http enable pprof http endpoints --protocol string Client protocol, either mysql (default), grpc-vtgate, or grpc-vttablet (default "mysql") --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --sql string SQL statement to execute --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --tablet_grpc_ca string the server ca to use to validate servers when connecting --tablet_grpc_cert string the cert to use to connect --tablet_grpc_crl string the server crl to use to validate server certificates when connecting @@ -89,7 +90,7 @@ Flags: --user string Username to connect using mysql (password comes from the db-credentials-file) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vtgate_grpc_ca string the server ca to use to validate servers when connecting --vtgate_grpc_cert string the cert to use to connect --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting diff --git a/go/flags/endtoend/vtclient.txt b/go/flags/endtoend/vtclient.txt index 3d17734168c..57ddf892ac8 100644 --- a/go/flags/endtoend/vtclient.txt +++ b/go/flags/endtoend/vtclient.txt @@ -28,7 +28,7 @@ Flags: --json Output JSON instead of human-readable table --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -38,15 +38,16 @@ Flags: --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --parallel int DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing. (default 1) --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --qps int queries per second to throttle each thread at. --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --server string vtgate server to connect to - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --streaming use a streaming query --target string keyspace:shard@tablet_type --timeout duration timeout for queries (default 30s) --use_random_sequence use random sequence for generating [min_sequence_id, max_sequence_id) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt index 71c11c54088..fd09f940b76 100644 --- a/go/flags/endtoend/vtcombo.txt +++ b/go/flags/endtoend/vtcombo.txt @@ -38,6 +38,7 @@ Flags: --buffer_window duration Duration for how long a request should be buffered at most. (default 10s) --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) + --builtinbackup-incremental-restore-path string the directory where incremental restore files, namely binlog files, are extracted to. In k8s environments, this should be set to a directory that is shared between the vttablet and mysqld pods. The path should exist. When empty, the default OS temp dir is assumed. --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s) --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified @@ -108,7 +109,6 @@ Flags: --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default "direct") --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY) --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s) - --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents. --emit_stats If set, emit stats to push-based monitoring and stats backends --enable-consolidator Synonym to -enable_consolidator (default true) --enable-consolidator-replicas Synonym to -enable_consolidator_replicas @@ -139,7 +139,7 @@ Flags: --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s) --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s) - --gh-ost-path string override default gh-ost binary full path + --gh-ost-path string override default gh-ost binary full path (default "gh-ost") --grpc-send-session-in-streaming If set, will send the session as last packet in streaming api to support transactions in streaming --grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups. --grpc-use-static-authentication-callerid If set, will set the immediate caller id to the username authenticated by the static auth plugin. @@ -163,8 +163,11 @@ Flags: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + --grpc_server_keepalive_time duration After a duration of this time, if the server doesn't see any activity, it pings the client to see if the transport is still alive. (default 10s) + --grpc_server_keepalive_timeout duration After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) --grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal. --health_check_interval duration Interval between health checks (default 20s) + --healthcheck-dial-concurrency int Maximum concurrency of new healthcheck connections. This should be less than the golang max thread limit of 10000. (default 1024) --healthcheck_retry_delay duration health check retry delay (default 2ms) --healthcheck_timeout duration the health check timeout period (default 1m0s) --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the sidecar database's heartbeat table. The result is used to inform the serving state of the vttablet via healthchecks. @@ -189,7 +192,7 @@ Flags: --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) --lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s) --lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_queries_to_file string Enable query logging to the specified file @@ -222,11 +225,13 @@ Flags: --mycnf_tmp_dir string mysql tmp directory --mysql-server-keepalive-period duration TCP period between keep-alives --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers + --mysql-shutdown-timeout duration timeout to use when MySQL is being shut down. (default 5m0s) --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections. --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static") --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP") --mysql_port int mysql port (default 3306) --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance. + --mysql_server_flush_delay duration Delay after which buffered response will be flushed to the client. (default 100ms) --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1) --mysql_server_query_timeout duration mysql query timeout --mysql_server_read_timeout duration connection read timeout @@ -254,10 +259,11 @@ Flags: --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints --proto_topo vttest.TopoData vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information. --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket --proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them - --pt-osc-path string override default pt-online-schema-change binary full path + --pt-osc-path string override default pt-online-schema-change binary full path (default "/usr/bin/pt-online-schema-change") --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog") @@ -269,30 +275,27 @@ Flags: --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables). --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results - --queryserver-config-idle-timeout duration query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) + --queryserver-config-idle-timeout duration query server idle timeout, vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000) --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4) --queryserver-config-olap-transaction-timeout duration query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30s) --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting - --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s) + --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) - --queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s) - --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000) - --queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) + --queryserver-config-query-pool-timeout duration query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. + --queryserver-config-query-timeout duration query server query timeout, this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true) - --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) + --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768) --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200) - --queryserver-config-stream-pool-timeout duration query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. (default 0s) - --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection + --queryserver-config-stream-pool-timeout duration query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. --queryserver-config-strict-table-acl only allow queries that pass table acl checks --queryserver-config-terse-errors prevent bind vars from escaping in client error messages --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20) - --queryserver-config-transaction-timeout duration query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30s) + --queryserver-config-transaction-timeout duration query server transaction timeout, a transaction will be killed if it takes longer than this value (default 30s) --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s) - --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000) --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this --queryserver-enable-settings-pool Enable pooling of connections with modified system settings (default true) --queryserver-enable-views Enable views support in vttablet. @@ -317,7 +320,7 @@ Flags: --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s) - --shutdown_grace_period duration how long to wait (in seconds) for queries and transactions to complete during graceful shutdown. (default 0s) + --shutdown_grace_period duration how long to wait for queries and transactions to complete during graceful shutdown. (default 3s) --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s) @@ -329,18 +332,18 @@ Flags: --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 --stats_drop_variables string Variables to be dropped from the list of exported variables. --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768) --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class - --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default "hold,purge,evac,drop") + --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implicitly always included) (default "hold,purge,evac,drop") --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. --tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch. --tablet_health_keep_alive duration close streaming tablet health connection if there are no requests for this long (default 5m0s) --tablet_hostname string if not empty, this hostname will be assumed instead of trying to resolve it --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,App}, CheckThrottler and FullStatus) (default 8) --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting --tablet_manager_grpc_key string the key to use to connect @@ -348,8 +351,9 @@ Flags: --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s) --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true) + --tablet_types_to_wait strings Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types. --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{ "{{.GetTabletHostPort}}" }}") - --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica") + --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' always implicitly included (default "replica") --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") --topo_consul_lock_session_ttl string TTL for consul session. @@ -372,6 +376,7 @@ Flags: --tracing-enable-logging whether to enable logging in the tracing service --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1) --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const") + --track-udfs Track UDFs in vtgate. --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position --transaction-log-stream-handler string URL handler for streaming transactions log (default "/debug/txlog") --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit. @@ -393,17 +398,15 @@ Flags: --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler. --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s) + --unmanaged Indicates an unmanaged tablet, i.e. using an external mysql-compatible database --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vreplication-parallel-insert-workers int Number of parallel insertion workers to use during copy phase. Set <= 1 to disable parallelism, or > 1 to enable concurrent insertion during copy phase. (default 1) --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s) --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000) --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200) --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 3) - --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s) - --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s) - --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s) --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1) --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence --vreplication_net_read_timeout int Session value of net_read_timeout for vreplication, in seconds (default 300) @@ -411,7 +414,6 @@ Flags: --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s) --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s) --vreplication_store_compressed_gtid Store compressed gtids in the pos column of the sidecar database's vreplication table - --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY") --vschema-persistence-dir string If set, per-keyspace vschema will be persisted in this directory and reloaded into the in-memory topology server across restarts. Bookkeeping is performed using a simple watcher goroutine. This is useful when running vtcombo as an application development container (e.g. vttestserver) where you want to keep the same vschema even if developer's machine reboots. This works in tandem with vttestserver's --persistent_mode flag. Needless to say, this is neither a perfect nor a production solution for vschema persistence. Consider using the --external_topo_server flag if you require a more complete solution. This flag is ignored if --external_topo_server is set. --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users. --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864) diff --git a/go/flags/endtoend/vtctlclient.txt b/go/flags/endtoend/vtctlclient.txt index 7fa186acbd0..3c9c0a3cbb0 100644 --- a/go/flags/endtoend/vtctlclient.txt +++ b/go/flags/endtoend/vtctlclient.txt @@ -22,23 +22,26 @@ Usage of vtctlclient: --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors + --log_link string If non-empty, add symbolic links in this directory to the log files --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms. --logtostderr log to standard error instead of files --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --server string server to use for connection - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --tracer string tracing service to use (default "noop") --tracing-enable-logging whether to enable logging in the tracing service --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1) --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const") --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc") --vtctld_grpc_ca string the server ca to use to validate servers when connecting --vtctld_grpc_cert string the cert to use to connect diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt index a9a5cebb0f3..969e4f9774e 100644 --- a/go/flags/endtoend/vtctld.txt +++ b/go/flags/endtoend/vtctld.txt @@ -37,6 +37,7 @@ Flags: --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system. --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) + --builtinbackup-incremental-restore-path string the directory where incremental restore files, namely binlog files, are extracted to. In k8s environments, this should be set to a directory that is shared between the vttablet and mysqld pods. The path should exist. When empty, the default OS temp dir is assumed. --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s) --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified @@ -82,24 +83,29 @@ Flags: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + --grpc_server_keepalive_time duration After a duration of this time, if the server doesn't see any activity, it pings the client to see if the transport is still alive. (default 10s) + --grpc_server_keepalive_timeout duration After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) + --healthcheck-dial-concurrency int Maximum concurrency of new healthcheck connections. This should be less than the golang max thread limit of 10000. (default 1024) -h, --help help for vtctld --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --max-stack-size int configure the maximum stack size in bytes (default 67108864) + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) --opentsdb_uri string URI of opentsdb /api/put method --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints --proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --remote_operation_timeout duration time to wait for a remote operation (default 15s) @@ -126,7 +132,7 @@ Flags: --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 --stats_drop_variables string Variables to be dropped from the list of exported variables. --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. --tablet_grpc_ca string the server ca to use to validate servers when connecting @@ -137,7 +143,7 @@ Flags: --tablet_health_keep_alive duration close streaming tablet health connection if there are no requests for this long (default 5m0s) --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,App}, CheckThrottler and FullStatus) (default 8) --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting --tablet_manager_grpc_key string the key to use to connect @@ -171,5 +177,5 @@ Flags: --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const") --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vtctld_sanitize_log_messages When true, vtctld sanitizes logging. diff --git a/go/flags/endtoend/vtctldclient.txt b/go/flags/endtoend/vtctldclient.txt index 7fddb7eebfe..393b9ada10d 100644 --- a/go/flags/endtoend/vtctldclient.txt +++ b/go/flags/endtoend/vtctldclient.txt @@ -1,4 +1,8 @@ Executes a cluster management command on the remote vtctld server. +If there are no running vtctld servers -- for example when bootstrapping +a new Vitess cluster -- you can specify a --server value of 'internal'. +When doing so, you would use the --topo* flags so that the client can +connect directly to the topo server(s). Usage: vtctldclient [flags] @@ -7,6 +11,7 @@ Usage: Available Commands: AddCellInfo Registers a local topology service in a new cell by creating the CellInfo. AddCellsAlias Defines a group of cells that can be referenced by a single name (the alias). + ApplyKeyspaceRoutingRules Applies the provided keyspace routing rules. ApplyRoutingRules Applies the VSchema routing rules. ApplySchema Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication. ApplyShardRoutingRules Applies the provided shard routing rules. @@ -26,6 +31,7 @@ Available Commands: ExecuteFetchAsApp Executes the given query as the App user on the remote tablet. ExecuteFetchAsDBA Executes the given query as the DBA user on the remote tablet. ExecuteHook Runs the specified hook on the given tablet. + ExecuteMultiFetchAsDBA Executes given multiple queries as the DBA user on the remote tablet. FindAllShardsInKeyspace Returns a map of shard names to shard references for a given keyspace. GenerateShardRanges Print a set of shard ranges assuming a keyspace with N shards. GetBackups Lists backups for the given shard. @@ -34,11 +40,13 @@ Available Commands: GetCellsAliases Gets all CellsAlias objects in the cluster. GetFullStatus Outputs a JSON structure that contains full status of MySQL including the replication information, semi-sync information, GTID information among others. GetKeyspace Returns information about the given keyspace from the topology. + GetKeyspaceRoutingRules Displays the currently active keyspace routing rules. GetKeyspaces Returns information about every keyspace in the topology. GetPermissions Displays the permissions for a tablet. GetRoutingRules Displays the VSchema routing rules. GetSchema Displays the full schema for a tablet, optionally restricted to the specified tables/views. GetShard Returns information about a shard in the topology. + GetShardReplication Returns information about the replication relationships for a shard in the given cell(s). GetShardRoutingRules Displays the currently active shard routing rules as a JSON document. GetSrvKeyspaceNames Outputs a JSON mapping of cell=>keyspace names served in that cell. Omit to query all cells. GetSrvKeyspaces Returns the SrvKeyspaces for the given keyspace in one or more cells. @@ -115,18 +123,23 @@ Flags: -h, --help help for vtctldclient --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory + --log_link string If non-empty, add symbolic links in this directory to the log files --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) + --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms. --logtostderr log to standard error instead of files --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --server string server to use for the connection (required) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) + --topo-global-root string the path of the global topology data in the global topology server (default "/vitess/global") + --topo-global-server-address strings the address of the global topology server(s) (default [localhost:2379]) + --topo-implementation string the topology implementation to use (default "etcd2") -v, --v Level log level for V logs --version version for vtctldclient - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc") --vtctld_grpc_ca string the server ca to use to validate servers when connecting --vtctld_grpc_cert string the cert to use to connect diff --git a/go/flags/endtoend/vtexplain.txt b/go/flags/endtoend/vtexplain.txt index f75559474c0..fdd289e63c7 100644 --- a/go/flags/endtoend/vtexplain.txt +++ b/go/flags/endtoend/vtexplain.txt @@ -54,7 +54,7 @@ Flags: --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace --ks-shard-map-file string File containing json blob of keyspace name -> shard name -> ShardReference object - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -64,6 +64,7 @@ Flags: --output-mode string Output in human-friendly text or json (default "text") --planner-version string Sets the default planner to use. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW") --schema string The SQL table schema @@ -74,9 +75,9 @@ Flags: --sql-file string Identifies the file that contains the SQL commands to analyze --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vschema string Identifies the VTGate routing schema --vschema-file string Identifies the VTGate routing schema file diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt index 6bad7c768aa..7d0b3272cc8 100644 --- a/go/flags/endtoend/vtgate.txt +++ b/go/flags/endtoend/vtgate.txt @@ -93,7 +93,10 @@ Flags: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + --grpc_server_keepalive_time duration After a duration of this time, if the server doesn't see any activity, it pings the client to see if the transport is still alive. (default 10s) + --grpc_server_keepalive_timeout duration After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) --grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal. + --healthcheck-dial-concurrency int Maximum concurrency of new healthcheck connections. This should be less than the golang max thread limit of 10000. (default 1024) --healthcheck_retry_delay duration health check retry delay (default 2ms) --healthcheck_timeout duration the health check timeout period (default 1m0s) -h, --help help for vtgate @@ -105,7 +108,7 @@ Flags: --legacy_replication_lag_algorithm Use the legacy algorithm when selecting vttablets for serving. (default true) --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) --lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_queries_to_file string Enable query logging to the specified file @@ -163,6 +166,7 @@ Flags: --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --query-timeout int Sets the default query timeout (in ms). Can be overridden by session variable (query_timeout) or comment directive (QUERY_TIMEOUT_MS) @@ -188,7 +192,7 @@ Flags: --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) --statsd_address string Address for statsd client --statsd_sample_rate float Sample rate for statsd metrics (default 1) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class --tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch. @@ -224,11 +228,12 @@ Flags: --tracing-enable-logging whether to enable logging in the tracing service --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1) --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const") + --track-udfs Track UDFs in vtgate. --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI") --truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users. --vtgate-config-terse-errors prevent bind vars from escaping in returned errors --warming-reads-concurrency int Number of concurrent warming reads allowed (default 500) diff --git a/go/flags/endtoend/vtgateclienttest.txt b/go/flags/endtoend/vtgateclienttest.txt index 4580d4d6ce7..e7d8fc5e177 100644 --- a/go/flags/endtoend/vtgateclienttest.txt +++ b/go/flags/endtoend/vtgateclienttest.txt @@ -40,11 +40,13 @@ Flags: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + --grpc_server_keepalive_time duration After a duration of this time, if the server doesn't see any activity, it pings the client to see if the transport is still alive. (default 10s) + --grpc_server_keepalive_timeout duration After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) -h, --help help for vtgateclienttest --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -56,12 +58,13 @@ Flags: --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users. diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt index b13756e793c..187426a4afa 100644 --- a/go/flags/endtoend/vtorc.txt +++ b/go/flags/endtoend/vtorc.txt @@ -10,7 +10,6 @@ vtorc \ --topo_global_root /vitess/global \ --log_dir $VTDATAROOT/tmp \ --port 15000 \ - --recovery-period-block-duration "10m" \ --instance-poll-time "1s" \ --topo-information-refresh-duration "30s" \ --alsologtostderr @@ -50,7 +49,7 @@ Flags: --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -61,10 +60,10 @@ Flags: --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints --prevent-cross-cell-failover Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --reasonable-replication-lag duration Maximum replication lag on replicas which is deemed to be acceptable (default 10s) - --recovery-period-block-duration duration Duration for which a new recovery is blocked on an instance after running a recovery (default 30s) --recovery-poll-duration duration Timer duration on which VTOrc polls its database to run a recovery (default 1s) --remote_operation_timeout duration time to wait for a remote operation (default 15s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) @@ -76,16 +75,17 @@ Flags: --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2 --stats_drop_variables string Variables to be dropped from the list of exported variables. --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,App}, CheckThrottler and FullStatus) (default 8) --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting --tablet_manager_grpc_key string the key to use to connect --tablet_manager_grpc_server_name string the server name to use to validate server certificate --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") + --tolerable-replication-lag duration Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS --topo-information-refresh-duration duration Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server (default 15s) --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") @@ -106,5 +106,5 @@ Flags: --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --wait-replicas-timeout duration Duration for which to wait for replica's to respond when issuing RPCs (default 30s) diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index 30fe5e41172..38b30f46ffa 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -17,9 +17,8 @@ See "Unmanaged Tablet" for the full guide. Even if a MySQL is external, you can still make vttablet perform some management functions. They are as follows: -* `--disable_active_reparents`: If this flag is set, then any reparent or replica commands will not be allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current primary. +* `--unmanaged`: This flag indicates that this tablet is running in unmanaged mode. In this mode, any reparent or replica commands are not allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. You should use the TabletExternallyReparented command to inform vitess of the current primary. * `--replication_connect_retry`: This value is give to mysql when it connects a replica to the primary as the retry duration parameter. -* `--enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if `--disable_active_reparents` was not turned on. * `--heartbeat_enable` and `--heartbeat_interval duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag. Usage: @@ -73,6 +72,7 @@ Flags: --binlog_user string PITR restore parameter: username of binlog server. --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) + --builtinbackup-incremental-restore-path string the directory where incremental restore files, namely binlog files, are extracted to. In k8s environments, this should be set to a directory that is shared between the vttablet and mysqld pods. The path should exist. When empty, the default OS temp dir is assumed. --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s) --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified @@ -140,7 +140,6 @@ Flags: --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s) --dba_pool_size int Size of the connection pool for dba connections (default 20) --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s) - --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents. --emit_stats If set, emit stats to push-based monitoring and stats backends --enable-consolidator Synonym to -enable_consolidator (default true) --enable-consolidator-replicas Synonym to -enable_consolidator_replicas @@ -166,7 +165,7 @@ Flags: --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s) --gcs_backup_storage_bucket string Google Cloud Storage bucket to use for backups. --gcs_backup_storage_root string Root prefix for all backup-related object names. - --gh-ost-path string override default gh-ost binary full path + --gh-ost-path string override default gh-ost binary full path (default "gh-ost") --grpc_auth_mode string Which auth plugin implementation to use (eg: static) --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon). --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server. @@ -193,6 +192,8 @@ Flags: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + --grpc_server_keepalive_time duration After a duration of this time, if the server doesn't see any activity, it pings the client to see if the transport is still alive. (default 10s) + --grpc_server_keepalive_timeout duration After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) --health_check_interval duration Interval between health checks (default 20s) --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the sidecar database's heartbeat table. The result is used to inform the serving state of the vttablet via healthchecks. --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s) @@ -213,7 +214,7 @@ Flags: --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s) --lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_queries Enable query logging to syslog. @@ -242,6 +243,7 @@ Flags: --mycnf_slow_log_path string mysql slow query log path --mycnf_socket_file string mysql socket file --mycnf_tmp_dir string mysql tmp directory + --mysql-shutdown-timeout duration timeout to use when MySQL is being shut down. (default 5m0s) --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions) @@ -253,7 +255,8 @@ Flags: --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int port for the server --pprof strings enable profiling - --pt-osc-path string override default pt-online-schema-change binary full path + --pprof-http enable pprof http endpoints + --pt-osc-path string override default pt-online-schema-change binary full path (default "/usr/bin/pt-online-schema-change") --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog") @@ -263,30 +266,27 @@ Flags: --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables). --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results - --queryserver-config-idle-timeout duration query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) + --queryserver-config-idle-timeout duration query server idle timeout, vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s) --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000) --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4) --queryserver-config-olap-transaction-timeout duration query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30s) --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting - --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s) + --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16) --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432) - --queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s) - --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000) - --queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) + --queryserver-config-query-pool-timeout duration query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. + --queryserver-config-query-timeout duration query server query timeout, this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s) --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true) - --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) + --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s) --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768) --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200) - --queryserver-config-stream-pool-timeout duration query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. (default 0s) - --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection + --queryserver-config-stream-pool-timeout duration query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. --queryserver-config-strict-table-acl only allow queries that pass table acl checks --queryserver-config-terse-errors prevent bind vars from escaping in client error messages --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20) - --queryserver-config-transaction-timeout duration query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30s) + --queryserver-config-transaction-timeout duration query server transaction timeout, a transaction will be killed if it takes longer than this value (default 30s) --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate) --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s) - --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000) --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this --queryserver-enable-settings-pool Enable pooling of connections with modified system settings (default true) --queryserver-enable-views Enable views support in vttablet. @@ -318,7 +318,7 @@ Flags: --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s) - --shutdown_grace_period duration how long to wait (in seconds) for queries and transactions to complete during graceful shutdown. (default 0s) + --shutdown_grace_period duration how long to wait for queries and transactions to complete during graceful shutdown. (default 3s) --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s) @@ -331,12 +331,12 @@ Flags: --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s) --statsd_address string Address for statsd client --statsd_sample_rate float Sample rate for statsd metrics (default 1) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20) --table-acl-config string path to table access checker config file; send SIGHUP to reload this file --table-acl-config-reload-interval duration Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class - --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default "hold,purge,evac,drop") + --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implicitly always included) (default "hold,purge,evac,drop") --tablet-path string tablet alias --tablet_config string YAML file config for tablet --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. @@ -348,14 +348,14 @@ Flags: --tablet_hostname string if not empty, this hostname will be assumed instead of trying to resolve it --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,App}, CheckThrottler and FullStatus) (default 8) --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting --tablet_manager_grpc_key string the key to use to connect --tablet_manager_grpc_server_name string the server name to use to validate server certificate --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc") --tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc") - --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica") + --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' always implicitly included (default "replica") --topo_consul_lock_delay duration LockDelay for consul session. (default 15s) --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth") --topo_consul_lock_session_ttl string TTL for consul session. @@ -398,17 +398,15 @@ Flags: --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9") --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler. --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s) + --unmanaged Indicates an unmanaged tablet, i.e. using an external mysql-compatible database --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vreplication-parallel-insert-workers int Number of parallel insertion workers to use during copy phase. Set <= 1 to disable parallelism, or > 1 to enable concurrent insertion during copy phase. (default 1) --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s) --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000) --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200) --vreplication_experimental_flags int (Bitmask) of experimental features in vreplication to enable (default 3) - --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s) - --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s) - --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s) --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1) --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence --vreplication_net_read_timeout int Session value of net_read_timeout for vreplication, in seconds (default 300) @@ -416,7 +414,6 @@ Flags: --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s) --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s) --vreplication_store_compressed_gtid Store compressed gtids in the pos column of the sidecar database's vreplication table - --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY") --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864) --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true) --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000) diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt index fb9c42d932a..8cce76afc65 100644 --- a/go/flags/endtoend/vttestserver.txt +++ b/go/flags/endtoend/vttestserver.txt @@ -13,6 +13,7 @@ Flags: --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2) --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0. --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152) + --builtinbackup-incremental-restore-path string the directory where incremental restore files, namely binlog files, are extracted to. In k8s environments, this should be set to a directory that is shared between the vttablet and mysqld pods. The path should exist. When empty, the default OS temp dir is assumed. --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s) --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s) --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified @@ -31,7 +32,6 @@ Flags: --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s) --dba_pool_size int Size of the connection pool for dba connections (default 20) --default_schema_dir string Default directory for initial schema files. If no schema is found in schema_dir, default to this location. - --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents. --enable_direct_ddl Allow users to submit direct DDL statements (default true) --enable_online_ddl Allow users to submit, review and control Online DDL (default true) --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true) @@ -69,13 +69,16 @@ Flags: --grpc_server_initial_window_size int gRPC server initial window size --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s) --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs) + --grpc_server_keepalive_time duration After a duration of this time, if the server doesn't see any activity, it pings the client to see if the transport is still alive. (default 10s) + --grpc_server_keepalive_timeout duration After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s) -h, --help help for vttestserver + --initialize-with-vt-dba-tcp If this flag is enabled, MySQL will be initialized with an additional user named vt_dba_tcp, who will have access via TCP/IP connection. --initialize_with_random_data If this flag is each table-shard will be initialized with random data. See also the 'rng_seed' and 'min_shard_size' and 'max_shard_size' flags. --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) --keyspaces strings Comma separated list of keyspaces (default [test_keyspace]) --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) @@ -89,6 +92,7 @@ Flags: --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions) + --no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries --null_probability float The probability to initialize a field with 'NULL' if --initialize_with_random_data is true. Only applies to fields that can contain NULL values. (default 0.1) --num_shards strings Comma separated shard count (one per keyspace) (default [2]) --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) @@ -99,6 +103,7 @@ Flags: --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int Port to use for vtcombo. If this is 0, a random port will be chosen. --pprof strings enable profiling + --pprof-http enable pprof http endpoints --proto_topo string Define the fake cluster topology as a compact text format encoded vttest proto. See vttest.proto for more information. --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value @@ -112,13 +117,13 @@ Flags: --snapshot_file string A MySQL DB snapshot file --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited) --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid. --tablet_hostname string The hostname to use for the tablet otherwise it will be derived from OS' hostname (default "localhost") --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting --tablet_manager_grpc_cert string the cert to use to connect - --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8) + --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,App}, CheckThrottler and FullStatus) (default 8) --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100) --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting --tablet_manager_grpc_key string the key to use to connect @@ -138,8 +143,9 @@ Flags: --transaction_mode string Transaction mode MULTI (default), SINGLE or TWOPC (default "MULTI") --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --vschema_ddl_authorized_users string Comma separated list of users authorized to execute vschema ddl operations via vtgate + --vtcombo-bind-host string which host to bind vtcombo servenv listener to (default "localhost") --vtctl_client_protocol string Protocol to use to talk to the vtctl server. (default "grpc") --vtctld_grpc_ca string the server ca to use to validate servers when connecting --vtctld_grpc_cert string the cert to use to connect diff --git a/go/flags/endtoend/zkctl.txt b/go/flags/endtoend/zkctl.txt index d1aea061ea5..b89528766d8 100644 --- a/go/flags/endtoend/zkctl.txt +++ b/go/flags/endtoend/zkctl.txt @@ -22,17 +22,18 @@ Flags: -h, --help help for zkctl --keep_logs duration keep logs for this long (using ctime) (zero to keep forever) --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever) - --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) + --log_backtrace_at traceLocations when logging hits line file:N, emit a stack trace --log_dir string If non-empty, write log files in this directory --log_err_stacks log stack traces for errors --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --pprof strings enable profiling + --pprof-http enable pprof http endpoints --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) - --stderrthreshold severity logs at or above this threshold go to stderr (default 1) + --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --v Level log level for V logs -v, --version print binary version - --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging + --vmodule vModuleFlag comma-separated list of pattern=N settings for file-filtered logging --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803") --zk.extra stringArray extra config line(s) to append verbatim to config (flag can be specified more than once) --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname diff --git a/go/flags/endtoend/zkctld.txt b/go/flags/endtoend/zkctld.txt index d808bd7ce67..20371e9e2d7 100644 --- a/go/flags/endtoend/zkctld.txt +++ b/go/flags/endtoend/zkctld.txt @@ -4,4 +4,5 @@ Usage: zkctld [flags] Flags: - -h, --help help for zkctld + -h, --help help for zkctld + -v, --version version for zkctld diff --git a/go/flagutil/deprecated_float64_seconds.go b/go/flagutil/deprecated_float64_seconds.go deleted file mode 100644 index d9afb11aaa2..00000000000 --- a/go/flagutil/deprecated_float64_seconds.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flagutil - -import ( - "strconv" - "time" - - "vitess.io/vitess/go/vt/log" -) - -type DeprecatedFloat64Seconds struct { - name string - val time.Duration -} - -var _ Value[time.Duration] = (*DeprecatedFloat64Seconds)(nil) - -func NewDeprecatedFloat64Seconds(name string, defVal time.Duration) DeprecatedFloat64Seconds { - return DeprecatedFloat64Seconds{ - name: name, - val: defVal, - } -} - -func (f *DeprecatedFloat64Seconds) String() string { return f.val.String() } -func (f *DeprecatedFloat64Seconds) Type() string { return "duration" } - -func (f *DeprecatedFloat64Seconds) Set(arg string) error { - v, err := time.ParseDuration(arg) - if err != nil { - log.Warningf("failed to parse %s as duration (err: %v); falling back to parsing to %s as seconds. this is deprecated and will be removed in a future release", f.name, err, f.val) - - n, err := strconv.ParseFloat(arg, 64) - if err != nil { - return err - } - - v = time.Duration(n * float64(time.Second)) - } - - f.val = v - return nil -} - -func (f DeprecatedFloat64Seconds) Clone() DeprecatedFloat64Seconds { - return DeprecatedFloat64Seconds{ - name: f.name, - val: f.val, - } -} - -func (f DeprecatedFloat64Seconds) Name() string { return f.name } -func (f DeprecatedFloat64Seconds) Get() time.Duration { return f.val } - -func (f *DeprecatedFloat64Seconds) UnmarshalJSON(data []byte) error { - return f.Set(string(data)) -} diff --git a/go/flagutil/enum.go b/go/flagutil/enum.go index 5bc279ee493..1a571aa63a1 100644 --- a/go/flagutil/enum.go +++ b/go/flagutil/enum.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -86,11 +86,12 @@ func newStringEnum(name string, initialValue string, choices []string, caseInsen } return &StringEnum{ - name: name, - val: initialValue, - choices: choiceMap, - choiceNames: choiceNames, - choiceMapper: choiceMapper, + name: name, + val: initialValue, + choices: choiceMap, + choiceNames: choiceNames, + choiceMapper: choiceMapper, + caseInsensitive: caseInsensitive, } } diff --git a/go/flagutil/enum_test.go b/go/flagutil/enum_test.go new file mode 100644 index 00000000000..337710678db --- /dev/null +++ b/go/flagutil/enum_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStringEnum(t *testing.T) { + tests := []struct { + name string + initialValue string + choices []string + caseInsensitive bool + secondValue string + expectedErr string + }{ + { + name: "valid set call", + initialValue: "mango", + choices: []string{"apple", "mango", "kiwi"}, + caseInsensitive: true, + secondValue: "kiwi", + }, + { + name: "invalid set call", + initialValue: "apple", + choices: []string{"apple", "mango", "kiwi"}, + caseInsensitive: false, + secondValue: "banana", + expectedErr: "invalid choice for enum (valid choices: [apple kiwi mango])", + }, + { + name: "invalid set call case insensitive", + initialValue: "apple", + choices: []string{"apple", "kiwi"}, + caseInsensitive: true, + secondValue: "banana", + expectedErr: "invalid choice for enum (valid choices: [apple kiwi] [case insensitive])", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var enum *StringEnum + if tt.caseInsensitive { + enum = NewCaseInsensitiveStringEnum(tt.name, tt.initialValue, tt.choices) + } else { + enum = NewStringEnum(tt.name, tt.initialValue, tt.choices) + } + + require.Equal(t, "string", enum.Type()) + err := enum.Set(tt.secondValue) + if tt.expectedErr == "" { + require.NoError(t, err) + require.Equal(t, tt.secondValue, enum.String()) + } else { + require.ErrorContains(t, err, tt.expectedErr) + require.Equal(t, tt.initialValue, enum.String()) + } + }) + } +} diff --git a/go/flagutil/flagutil.go b/go/flagutil/flagutil.go index ebf4ccef485..28d0b54e4ec 100644 --- a/go/flagutil/flagutil.go +++ b/go/flagutil/flagutil.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/flagutil/flagutil_test.go b/go/flagutil/flagutil_test.go index f95c46a53f7..1ddbf693e27 100644 --- a/go/flagutil/flagutil_test.go +++ b/go/flagutil/flagutil_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -21,6 +21,8 @@ import ( "testing" "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStringList(t *testing.T) { @@ -33,16 +35,12 @@ func TestStringList(t *testing.T) { "3ala,": "3ala.", } for in, out := range wanted { - if err := p.Set(in); err != nil { - t.Errorf("v.Set(%v): %v", in, err) - continue - } - if strings.Join(p, ".") != out { - t.Errorf("want %#v, got %#v", strings.Split(out, "."), p) - } - if p.String() != in { - t.Errorf("v.String(): want %#v, got %#v", in, p.String()) - } + err := p.Set(in) + assert.NoError(t, err) + + assert.Equal(t, out, strings.Join(p, ".")) + assert.Equal(t, in, p.String()) + } } @@ -50,12 +48,10 @@ func TestStringList(t *testing.T) { func TestEmptyStringList(t *testing.T) { var p StringListValue var _ pflag.Value = &p - if err := p.Set(""); err != nil { - t.Fatalf("p.Set(\"\"): %v", err) - } - if len(p) != 0 { - t.Fatalf("len(p) != 0: got %v", len(p)) - } + + err := p.Set("") + require.NoError(t, err) + require.Len(t, p, 0) } type pair struct { @@ -82,27 +78,204 @@ func TestStringMap(t *testing.T) { }, } for _, want := range wanted { - if err := v.Set(want.in); err != want.err { - t.Errorf("v.Set(%v): %v", want.in, want.err) - continue - } + err := v.Set(want.in) + assert.ErrorIs(t, err, want.err) + if want.err != nil { continue } - if len(want.out) != len(v) { - t.Errorf("want %#v, got %#v", want.out, v) - continue - } - for key, value := range want.out { - if v[key] != value { - t.Errorf("want %#v, got %#v", want.out, v) - continue - } - } + assert.EqualValues(t, want.out, v) + assert.Equal(t, want.in, v.String()) + } +} - if vs := v.String(); vs != want.in { - t.Errorf("v.String(): want %#v, got %#v", want.in, vs) - } +func TestStringListValue(t *testing.T) { + strListVal := StringListValue{"temp", "val"} + require.Equal(t, []string([]string{"temp", "val"}), strListVal.Get()) + require.Equal(t, "strings", strListVal.Type()) +} + +func TestStringMapValue(t *testing.T) { + strMapVal := StringMapValue{ + "key": "val", + } + require.Equal(t, "StringMap", strMapVal.Type()) + require.Equal(t, map[string]string(map[string]string{"key": "val"}), strMapVal.Get()) +} + +func TestDualFormatStringListVar(t *testing.T) { + testFlagSet := pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + testFlagName := "test-flag_name" + var flagVal []string + testValue := []string{"testValue1", "testValue2", "testValue3"} + + DualFormatStringListVar(testFlagSet, &flagVal, testFlagName, testValue, "usage string") + assert.Equal(t, testValue, flagVal) + + want := "testValue1,testValue2,testValue3" + f := testFlagSet.Lookup("test-flag-name") + assert.NotNil(t, f) + assert.Equal(t, want, f.Value.String()) + + f = testFlagSet.Lookup("test_flag_name") + assert.NotNil(t, f) + assert.Equal(t, want, f.Value.String()) + + newVal := "newValue1,newValue2" + err := testFlagSet.Set("test-flag-name", newVal) + assert.NoError(t, err) + + assert.Equal(t, newVal, f.Value.String()) + assert.Equal(t, []string{"newValue1", "newValue2"}, flagVal) +} + +func TestDualFormatStringVar(t *testing.T) { + testFlagSet := pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + testFlagName := "test-flag_name" + var flagVal string + testValue := "testValue" + + DualFormatStringVar(testFlagSet, &flagVal, testFlagName, testValue, "usage string") + assert.Equal(t, testValue, flagVal) + + f := testFlagSet.Lookup("test-flag-name") + assert.NotNil(t, f) + assert.Equal(t, testValue, f.Value.String()) + + f = testFlagSet.Lookup("test_flag_name") + assert.NotNil(t, f) + assert.Equal(t, testValue, f.Value.String()) + + newVal := "newValue" + err := testFlagSet.Set("test-flag-name", newVal) + assert.NoError(t, err) + + assert.Equal(t, newVal, f.Value.String()) + assert.Equal(t, newVal, flagVal) +} + +func TestDualFormatBoolVar(t *testing.T) { + testFlagSet := pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + testFlagName := "test-flag_name" + var flagVal bool + + DualFormatBoolVar(testFlagSet, &flagVal, testFlagName, true, "usage string") + assert.True(t, flagVal) + + f := testFlagSet.Lookup("test-flag-name") + assert.NotNil(t, f) + assert.Equal(t, "true", f.Value.String()) + + f = testFlagSet.Lookup("test_flag_name") + assert.NotNil(t, f) + assert.Equal(t, "true", f.Value.String()) + + err := testFlagSet.Set("test-flag-name", "false") + assert.NoError(t, err) + + assert.Equal(t, "false", f.Value.String()) + assert.False(t, flagVal) +} + +func TestDualFormatInt64Var(t *testing.T) { + testFlagSet := pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + testFlagName := "test-flag_name" + var flagVal int64 + + DualFormatInt64Var(testFlagSet, &flagVal, testFlagName, int64(256), "usage string") + assert.Equal(t, int64(256), flagVal) + + f := testFlagSet.Lookup("test-flag-name") + assert.NotNil(t, f) + assert.Equal(t, "256", f.Value.String()) + + f = testFlagSet.Lookup("test_flag_name") + assert.NotNil(t, f) + assert.Equal(t, "256", f.Value.String()) + + newVal := "128" + err := testFlagSet.Set("test-flag-name", newVal) + assert.NoError(t, err) + + assert.Equal(t, newVal, f.Value.String()) + assert.Equal(t, int64(128), flagVal) +} + +func TestDualFormatIntVar(t *testing.T) { + testFlagSet := pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + testFlagName := "test-flag_name" + var flagVal int + + DualFormatIntVar(testFlagSet, &flagVal, testFlagName, 128, "usage string") + assert.Equal(t, 128, flagVal) + + f := testFlagSet.Lookup("test-flag-name") + assert.NotNil(t, f) + assert.Equal(t, "128", f.Value.String()) + + f = testFlagSet.Lookup("test_flag_name") + assert.NotNil(t, f) + assert.Equal(t, "128", f.Value.String()) + + newVal := "256" + err := testFlagSet.Set("test-flag-name", newVal) + assert.NoError(t, err) + + assert.Equal(t, newVal, f.Value.String()) + assert.Equal(t, 256, flagVal) +} + +type MockValue struct { + val *bool +} + +func (b MockValue) Set(s string) error { + if s == "true" { + *b.val = true + } else { + *b.val = false } + return nil +} + +func (b MockValue) String() string { + if *b.val { + return "true" + } + return "false" +} + +func (b MockValue) Type() string { + return "bool" +} + +func TestDualFormatVar(t *testing.T) { + testFlagSet := pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + testFlagName := "test-flag_name" + flagVal := true + value := MockValue{val: &flagVal} + + DualFormatVar(testFlagSet, value, testFlagName, "usage string") + + f := testFlagSet.Lookup("test-flag-name") + assert.NotNil(t, f) + assert.Equal(t, "true", f.Value.String()) + + f = testFlagSet.Lookup("test_flag_name") + assert.NotNil(t, f) + assert.Equal(t, "true", f.Value.String()) + + newVal := "false" + err := testFlagSet.Set("test-flag-name", newVal) + assert.NoError(t, err) + + assert.Equal(t, newVal, f.Value.String()) + assert.False(t, flagVal) } diff --git a/go/flagutil/optional_test.go b/go/flagutil/optional_test.go new file mode 100644 index 00000000000..b7a35a8786e --- /dev/null +++ b/go/flagutil/optional_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewOptionalFloat64(t *testing.T) { + fl := NewOptionalFloat64(4.187) + require.NotEmpty(t, fl) + require.Equal(t, false, fl.IsSet()) + + require.Equal(t, "4.187", fl.String()) + require.Equal(t, "float64", fl.Type()) + + err := fl.Set("invalid value") + require.ErrorContains(t, err, "parse error") + + err = fl.Set("7.77") + require.NoError(t, err) + require.Equal(t, 7.77, fl.Get()) + require.Equal(t, true, fl.IsSet()) + + err = fl.Set("1e1000") + require.ErrorContains(t, err, "value out of range") +} + +func TestNewOptionalString(t *testing.T) { + optStr := NewOptionalString("4.187") + require.NotEmpty(t, optStr) + require.Equal(t, false, optStr.IsSet()) + + require.Equal(t, "4.187", optStr.String()) + require.Equal(t, "string", optStr.Type()) + + err := optStr.Set("value") + require.NoError(t, err) + + require.Equal(t, "value", optStr.Get()) + require.Equal(t, true, optStr.IsSet()) +} diff --git a/go/flagutil/sets_test.go b/go/flagutil/sets_test.go new file mode 100644 index 00000000000..0c07f5b63b7 --- /dev/null +++ b/go/flagutil/sets_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStringSetFlag(t *testing.T) { + strSetFlag := StringSetFlag{} + set := strSetFlag.ToSet() + require.Empty(t, set) + + set = set.Insert("mango", "apple", "mango") + strSetFlag.set = set + + require.Equal(t, "StringSetFlag", strSetFlag.Type()) + require.Equal(t, "apple, mango", strSetFlag.String()) + + err := strSetFlag.Set("guvava") + require.NoError(t, err) + require.Equal(t, "apple, guvava, mango", strSetFlag.String()) + + require.NotEmpty(t, strSetFlag.ToSet()) +} + +func TestStringSetFlagWithEmptySet(t *testing.T) { + strSetFlag := StringSetFlag{} + require.Equal(t, "", strSetFlag.String()) + + err := strSetFlag.Set("tmp") + require.NoError(t, err) + require.Empty(t, strSetFlag.ToSet()) + + err = strSetFlag.Set("guvava") + require.NoError(t, err) + require.Equal(t, "guvava", strSetFlag.String()) +} diff --git a/go/hack/hack_test.go b/go/hack/hack_test.go index cf8b6423aff..9f71d82cf11 100644 --- a/go/hack/hack_test.go +++ b/go/hack/hack_test.go @@ -20,21 +20,30 @@ package hack import ( "testing" + + "github.com/stretchr/testify/assert" ) func TestByteToString(t *testing.T) { v1 := []byte("1234") - if s := String(v1); s != "1234" { - t.Errorf("String(\"1234\"): %q, want 1234", s) - } + s := String(v1) + assert.Equal(t, "1234", s) v1 = []byte("") - if s := String(v1); s != "" { - t.Errorf("String(\"\"): %q, want empty", s) - } + s = String(v1) + assert.Equal(t, "", s) v1 = nil - if s := String(v1); s != "" { - t.Errorf("String(\"\"): %q, want empty", s) - } + s = String(v1) + assert.Equal(t, "", s) +} + +func TestStringToByte(t *testing.T) { + s := "1234" + b := StringBytes(s) + assert.Equal(t, []byte("1234"), b) + + s = "" + b = StringBytes(s) + assert.Nil(t, b) } diff --git a/go/hack/runtime.go b/go/hack/runtime.go index 5f6b946e33d..74bce583a84 100644 --- a/go/hack/runtime.go +++ b/go/hack/runtime.go @@ -22,21 +22,10 @@ import ( "unsafe" ) -//go:noescape -//go:linkname memhash runtime.memhash -func memhash(p unsafe.Pointer, h, s uintptr) uintptr - //go:noescape //go:linkname strhash runtime.strhash func strhash(p unsafe.Pointer, h uintptr) uintptr -// RuntimeMemhash provides access to the Go runtime's default hash function for arbitrary bytes. -// This is an optimal hash function which takes an input seed and is potentially implemented in hardware -// for most architectures. This is the same hash function that the language's `map` uses. -func RuntimeMemhash(b []byte, seed uint64) uint64 { - return uint64(memhash(unsafe.Pointer(unsafe.SliceData(b)), uintptr(seed), uintptr(len(b)))) -} - // RuntimeStrhash provides access to the Go runtime's default hash function for strings. // This is an optimal hash function which takes an input seed and is potentially implemented in hardware // for most architectures. This is the same hash function that the language's `map` uses. @@ -57,6 +46,3 @@ func Atof64(s string) (float64, int, error) //go:linkname Atof32 strconv.atof32 func Atof32(s string) (float32, int, error) - -//go:linkname FastRand runtime.fastrand -func FastRand() uint32 diff --git a/go/history/history.go b/go/history/history.go index 94af347ec52..f38be9239e0 100644 --- a/go/history/history.go +++ b/go/history/history.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/history/history_test.go b/go/history/history_test.go index 8b0559bd47e..34c57756315 100644 --- a/go/history/history_test.go +++ b/go/history/history_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -18,6 +18,8 @@ package history import ( "testing" + + "github.com/stretchr/testify/assert" ) func TestHistory(t *testing.T) { @@ -33,9 +35,8 @@ func TestHistory(t *testing.T) { t.Errorf("len(records): want %v, got %v. records: %+v", want, got, q) } for i, record := range records { - if record != want[i] { - t.Errorf("record doesn't match: want %v, got %v", want[i], record) - } + assert.Equal(t, want[i], record) + } for ; i < 6; i++ { @@ -48,9 +49,8 @@ func TestHistory(t *testing.T) { t.Errorf("len(records): want %v, got %v. records: %+v", want, got, q) } for i, record := range records { - if record != want[i] { - t.Errorf("record doesn't match: want %v, got %v", want[i], record) - } + assert.Equal(t, want[i], record) + } } diff --git a/go/internal/flag/flag_test.go b/go/internal/flag/flag_test.go new file mode 100644 index 00000000000..1f1ff5dc5ec --- /dev/null +++ b/go/internal/flag/flag_test.go @@ -0,0 +1,298 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "os" + "testing" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestPreventGlogVFlagFromClobberingVersionFlagShorthand(t *testing.T) { + oldCommandLine := goflag.CommandLine + defer func() { + goflag.CommandLine = oldCommandLine + }() + + goflag.CommandLine = goflag.NewFlagSet(os.Args[0], goflag.ExitOnError) + + var v bool + + goflag.BoolVar(&v, "v", true, "") + + testFlagSet := pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + PreventGlogVFlagFromClobberingVersionFlagShorthand(testFlagSet) + + f := testFlagSet.Lookup("v") + assert.NotNil(t, f) + assert.Equal(t, "", f.Shorthand) + + // The function should not panic if -v flag is already defined + assert.NotPanics(t, func() { PreventGlogVFlagFromClobberingVersionFlagShorthand(testFlagSet) }) +} + +func TestParse(t *testing.T) { + oldCommandLine := goflag.CommandLine + defer func() { + goflag.CommandLine = oldCommandLine + }() + + var testFlag bool + goflag.CommandLine = goflag.NewFlagSet(os.Args[0], goflag.ExitOnError) + goflag.BoolVar(&testFlag, "testFlag", true, "") + + testFlagSet := pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + Parse(testFlagSet) + + f := testFlagSet.ShorthandLookup("h") + assert.NotNil(t, f) + assert.Equal(t, "false", f.DefValue) + + f = testFlagSet.Lookup("help") + assert.NotNil(t, f) + assert.Equal(t, "false", f.DefValue) + + testFlagSet = pflag.NewFlagSet("testFlagSet2", pflag.ExitOnError) + + // If shorthand "h" is already defined, shorthand for "help" should be empty + var h bool + testFlagSet.BoolVarP(&h, "testH", "h", false, "") + + Parse(testFlagSet) + f = testFlagSet.Lookup("help") + assert.NotNil(t, f) + assert.Equal(t, "", f.Shorthand) + + // Check if AddGoFlagSet was called + f = testFlagSet.Lookup("testFlag") + assert.NotNil(t, f) + assert.Equal(t, "true", f.DefValue) +} + +func TestIsFlagProvided(t *testing.T) { + oldPflagCommandLine := pflag.CommandLine + defer func() { + pflag.CommandLine = oldPflagCommandLine + }() + + pflag.CommandLine = pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + flagName := "testFlag" + isProvided := IsFlagProvided(flagName) + assert.False(t, isProvided, "flag %q should not exist", flagName) + + var testFlag bool + pflag.BoolVar(&testFlag, flagName, false, "") + + // Should return false as testFlag is not set + isProvided = IsFlagProvided(flagName) + assert.False(t, isProvided, "flag %q should not be provided", flagName) + + pflag.Parse() + _ = pflag.Set(flagName, "true") + + // Should return true as testFlag is set + isProvided = IsFlagProvided(flagName) + assert.True(t, isProvided, "flag %q should be provided", flagName) +} + +func TestFilterTestFlags(t *testing.T) { + oldOsArgs := os.Args + defer func() { + os.Args = oldOsArgs + }() + + os.Args = []string{ + "-test.run", + "TestFilter", + "otherArgs1", + "otherArgs2", + "-test.run=TestFilter", + } + + otherArgs, testFlags := filterTestFlags() + + expectedTestFlags := []string{ + "-test.run", + "TestFilter", + "-test.run=TestFilter", + } + expectedOtherArgs := []string{ + "otherArgs1", + "otherArgs2", + } + + assert.Equal(t, expectedOtherArgs, otherArgs) + assert.Equal(t, expectedTestFlags, testFlags) +} + +func TestParseFlagsForTest(t *testing.T) { + oldOsArgs := os.Args + oldPflagCommandLine := pflag.CommandLine + oldCommandLine := goflag.CommandLine + + defer func() { + os.Args = oldOsArgs + pflag.CommandLine = oldPflagCommandLine + goflag.CommandLine = oldCommandLine + }() + + pflag.CommandLine = pflag.NewFlagSet("testFlagSet", pflag.ExitOnError) + + os.Args = []string{ + "-test.run", + "TestFilter", + "otherArgs1", + "otherArgs2", + "-test.run=TestFilter", + } + + ParseFlagsForTest() + + expectedOsArgs := []string{ + "otherArgs1", + "otherArgs2", + } + + assert.Equal(t, expectedOsArgs, os.Args) + assert.Equal(t, true, pflag.Parsed()) +} + +func TestParsed(t *testing.T) { + oldPflagCommandLine := pflag.CommandLine + oldCommandLine := goflag.CommandLine + + defer func() { + pflag.CommandLine = oldPflagCommandLine + goflag.CommandLine = oldCommandLine + }() + + pflag.CommandLine = pflag.NewFlagSet("testPflagSet", pflag.ExitOnError) + goflag.CommandLine = goflag.NewFlagSet("testGoflagSet", goflag.ExitOnError) + + b := Parsed() + assert.False(t, b, "command-line flags should not be parsed") + + pflag.Parse() + b = Parsed() + assert.True(t, b, "command-line flags should be parsed") +} + +func TestLookup(t *testing.T) { + oldPflagCommandLine := pflag.CommandLine + oldCommandLine := goflag.CommandLine + + defer func() { + pflag.CommandLine = oldPflagCommandLine + goflag.CommandLine = oldCommandLine + }() + + pflag.CommandLine = pflag.NewFlagSet("testPflagSet", pflag.ExitOnError) + goflag.CommandLine = goflag.NewFlagSet("testGoflagSet", goflag.ExitOnError) + + var testGoFlag, testPflag, testFlag bool + + goflag.BoolVar(&testGoFlag, "testGoFlag", true, "") + goflag.BoolVar(&testFlag, "t", true, "") + pflag.BoolVar(&testPflag, "testPflag", true, "") + + testCases := []struct { + shorthand string + name string + }{ + { + // If single character flag is passed, the shorthand should be the same + shorthand: "t", + name: "t", + }, + { + shorthand: "", + name: "testGoFlag", + }, + { + shorthand: "", + name: "testPflag", + }, + } + + for _, tt := range testCases { + f := Lookup(tt.name) + + assert.NotNil(t, f) + assert.Equal(t, tt.shorthand, f.Shorthand) + assert.Equal(t, tt.name, f.Name) + } + + f := Lookup("non-existent-flag") + assert.Nil(t, f) +} + +func TestArgs(t *testing.T) { + oldPflagCommandLine := pflag.CommandLine + oldOsArgs := os.Args + + defer func() { + pflag.CommandLine = oldPflagCommandLine + os.Args = oldOsArgs + }() + + pflag.CommandLine = pflag.NewFlagSet("testPflagSet", pflag.ExitOnError) + + os.Args = []string{ + "arg0", + "arg1", + "arg2", + "arg3", + } + + expectedArgs := []string{ + "arg1", + "arg2", + "arg3", + } + + pflag.Parse() + // Should work equivalent to pflag.Args if there's no double dash + args := Args() + assert.Equal(t, expectedArgs, args) + + arg := Arg(2) + assert.Equal(t, "arg3", arg) + + // Should return empty string if the index is greater than len of CommandLine.args + arg = Arg(3) + assert.Equal(t, "", arg) +} + +func TestIsZeroValue(t *testing.T) { + var testFlag string + + testFlagSet := goflag.NewFlagSet("testFlagSet", goflag.ExitOnError) + testFlagSet.StringVar(&testFlag, "testflag", "default", "Description of testflag") + + f := testFlagSet.Lookup("testflag") + + result := isZeroValue(f, "") + assert.True(t, result, "empty string should represent zero value for string flag") + + result = isZeroValue(f, "anyValue") + assert.False(t, result, "non-empty string should not represent zero value for string flag") +} diff --git a/go/internal/flag/usage_test.go b/go/internal/flag/usage_test.go new file mode 100644 index 00000000000..461cd2580ea --- /dev/null +++ b/go/internal/flag/usage_test.go @@ -0,0 +1,121 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "io" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSetUsage(t *testing.T) { + fs := goflag.NewFlagSet("test", goflag.ExitOnError) + fs.String("testflag", "default", "`test` flag") + + opts := UsageOptions{ + Preface: func(w io.Writer) { + _, _ = w.Write([]byte("test preface")) + }, + Epilogue: func(w io.Writer) { + _, _ = w.Write([]byte("test epilogue")) + }, + FlagFilter: func(f *goflag.Flag) bool { + return f.Value.String() == "default" + }, + } + + SetUsage(fs, opts) + + var builder strings.Builder + fs.SetOutput(&builder) + + _ = fs.Set("testflag", "not default") + fs.Usage() + + output := builder.String() + assert.NotContains(t, output, "test flag") + + // Set the value back to default + _ = fs.Set("testflag", "default") + fs.Usage() + output = builder.String() + + assert.Contains(t, output, "test preface") + assert.Contains(t, output, "--testflag test") + assert.Contains(t, output, "test epilogue") + assert.Contains(t, output, "test flag") +} + +func TestSetUsageWithNilFlagFilterAndPreface(t *testing.T) { + oldOsArgs := os.Args + defer func() { + os.Args = oldOsArgs + }() + + os.Args = []string{"testOsArg"} + fs := goflag.NewFlagSet("test", goflag.ExitOnError) + fs.String("testflag", "default", "`test` flag") + + opts := UsageOptions{ + Epilogue: func(w io.Writer) { + _, _ = w.Write([]byte("test epilogue")) + }, + } + + SetUsage(fs, opts) + + var builder strings.Builder + fs.SetOutput(&builder) + fs.Usage() + output := builder.String() + + assert.Contains(t, output, "Usage of testOsArg:") + assert.Contains(t, output, "--testflag test") + assert.Contains(t, output, "test epilogue") +} + +func TestSetUsageWithBoolFlag(t *testing.T) { + fs := goflag.NewFlagSet("test2", goflag.ExitOnError) + var tBool bool + fs.BoolVar(&tBool, "t", true, "`t` flag") + + opts := UsageOptions{ + Preface: func(w io.Writer) { + _, _ = w.Write([]byte("test preface")) + }, + Epilogue: func(w io.Writer) { + _, _ = w.Write([]byte("test epilogue")) + }, + FlagFilter: func(f *goflag.Flag) bool { + return f.Value.String() == "true" + }, + } + + SetUsage(fs, opts) + + var builder strings.Builder + fs.SetOutput(&builder) + fs.Usage() + output := builder.String() + + assert.Contains(t, output, "test preface") + assert.Contains(t, output, "-t\tt flag") +} diff --git a/go/ioutil/meter_test.go b/go/ioutil/meter_test.go new file mode 100644 index 00000000000..10e2b83b485 --- /dev/null +++ b/go/ioutil/meter_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var ( + calledBytes int + calledDuration time.Duration +) + +func testfn(b int, d time.Duration) { + calledBytes = b + calledDuration = d +} + +func TestMeter(t *testing.T) { + tm := meter{ + fs: []func(b int, d time.Duration){testfn}, + bytes: 123, + duration: time.Second, + } + + assert.Equal(t, int64(123), tm.Bytes()) + assert.Equal(t, time.Second, tm.Duration()) + + tf := func(p []byte) (int, error) { + return 1, nil + } + + b, err := tm.measure(tf, []byte("")) + wantDuration := time.Second + calledDuration + wantBytes := int64(123) + int64(calledBytes) + + assert.NoError(t, err) + assert.Equal(t, 1, b) + assert.Equal(t, wantDuration, tm.duration) + assert.Equal(t, wantBytes, tm.bytes) +} diff --git a/go/ioutil/reader_test.go b/go/ioutil/reader_test.go new file mode 100644 index 00000000000..ea284f13225 --- /dev/null +++ b/go/ioutil/reader_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type mockReadCloser struct{} + +func (*mockReadCloser) Read([]byte) (int, error) { + return 2, nil +} + +func (*mockReadCloser) Close() error { + return nil +} + +type mockRead struct{} + +func (*mockRead) Read([]byte) (int, error) { + return 3, nil +} + +func TestMeteredReader(t *testing.T) { + mrc := NewMeteredReadCloser(&mockReadCloser{}, testfn) + n, err := mrc.Read([]byte("")) + assert.NoError(t, err) + assert.Equal(t, 2, n) + assert.Equal(t, 2, calledBytes) + assert.Equal(t, calledDuration, mrc.Duration()) + + mr := NewMeteredReader(&mockRead{}, testfn) + n, err = mr.Read([]byte("")) + assert.NoError(t, err) + assert.Equal(t, 3, n) + assert.Equal(t, 3, calledBytes) + assert.Equal(t, calledDuration, mr.Duration()) +} diff --git a/go/ioutil/writer_test.go b/go/ioutil/writer_test.go new file mode 100644 index 00000000000..b21fb34b397 --- /dev/null +++ b/go/ioutil/writer_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type mockWriteCloser struct{} + +func (*mockWriteCloser) Write([]byte) (int, error) { + return 2, nil +} + +func (*mockWriteCloser) Close() error { + return nil +} + +type mockWrite struct{} + +func (*mockWrite) Write([]byte) (int, error) { + return 3, nil +} + +func TestMeteredWriter(t *testing.T) { + mwc := NewMeteredWriteCloser(&mockWriteCloser{}, testfn) + n, err := mwc.Write([]byte("")) + assert.NoError(t, err) + assert.Equal(t, 2, n) + assert.Equal(t, 2, calledBytes) + assert.Equal(t, calledDuration, mwc.Duration()) + + mw := NewMeteredWriter(&mockWrite{}, testfn) + n, err = mw.Write([]byte("")) + assert.NoError(t, err) + assert.Equal(t, 3, n) + assert.Equal(t, 3, calledBytes) + assert.Equal(t, calledDuration, mw.Duration()) +} diff --git a/go/json2/marshal_test.go b/go/json2/marshal_test.go index 96b7f508d73..b155126fb17 100644 --- a/go/json2/marshal_test.go +++ b/go/json2/marshal_test.go @@ -19,6 +19,9 @@ package json2 import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + querypb "vitess.io/vitess/go/vt/proto/query" vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) @@ -29,11 +32,21 @@ func TestMarshalPB(t *testing.T) { Type: querypb.Type_VARCHAR, } b, err := MarshalPB(col) - if err != nil { - t.Fatal(err) - } + + require.NoErrorf(t, err, "MarshalPB(%+v) error", col) want := "{\"name\":\"c1\",\"type\":\"VARCHAR\"}" - if string(b) != want { - t.Errorf("MarshalPB(col): %q, want %q", b, want) + assert.Equalf(t, want, string(b), "MarshalPB(%+v)", col) +} + +func TestMarshalIndentPB(t *testing.T) { + col := &vschemapb.Column{ + Name: "c1", + Type: querypb.Type_VARCHAR, } + indent := " " + b, err := MarshalIndentPB(col, indent) + + require.NoErrorf(t, err, "MarshalIndentPB(%+v, %q) error", col, indent) + want := "{\n \"name\": \"c1\",\n \"type\": \"VARCHAR\"\n}" + assert.Equal(t, want, string(b), "MarshalIndentPB(%+v, %q)", col, indent) } diff --git a/go/json2/unmarshal_test.go b/go/json2/unmarshal_test.go index 9b6a6af1ca2..ff18a29def8 100644 --- a/go/json2/unmarshal_test.go +++ b/go/json2/unmarshal_test.go @@ -17,7 +17,14 @@ limitations under the License. package json2 import ( + "fmt" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/emptypb" ) func TestUnmarshal(t *testing.T) { @@ -37,14 +44,50 @@ func TestUnmarshal(t *testing.T) { err: "", }} for _, tcase := range tcases { - out := make(map[string]any) + out := make(map[string]interface{}) err := Unmarshal([]byte(tcase.in), &out) + got := "" if err != nil { got = err.Error() } - if got != tcase.err { - t.Errorf("Unmarshal(%v) err: %v, want %v", tcase.in, got, tcase.err) - } + assert.Equal(t, tcase.err, got, "Unmarshal(%v) err", tcase.in) + } +} + +func TestUnmarshalProto(t *testing.T) { + protoData := &emptypb.Empty{} + protoJSONData, err := protojson.Marshal(protoData) + assert.Nil(t, err, "protojson.Marshal error") + + tcase := struct { + in string + out *emptypb.Empty + }{ + in: string(protoJSONData), + out: &emptypb.Empty{}, + } + + err = Unmarshal([]byte(tcase.in), tcase.out) + + assert.Nil(t, err, "Unmarshal(%v) protobuf message", tcase.in) + assert.Equal(t, protoData, tcase.out, "Unmarshal(%v) protobuf message result", tcase.in) +} + +func TestAnnotate(t *testing.T) { + tcases := []struct { + data []byte + err error + }{ + { + data: []byte("invalid JSON"), + err: fmt.Errorf("line: 1, position 1: invalid character 'i' looking for beginning of value"), + }, + } + + for _, tcase := range tcases { + err := annotate(tcase.data, tcase.err) + + require.Equal(t, tcase.err, err, "annotate(%s, %v) error", string(tcase.data), tcase.err) } } diff --git a/go/jsonutil/json_test.go b/go/jsonutil/json_test.go index 50488dd3f3c..00d53d92021 100644 --- a/go/jsonutil/json_test.go +++ b/go/jsonutil/json_test.go @@ -18,6 +18,8 @@ package jsonutil import ( "testing" + + "github.com/stretchr/testify/assert" ) func TestMarshalNoEscape(t *testing.T) { @@ -53,9 +55,7 @@ func TestMarshalNoEscape(t *testing.T) { t.Run(c.name, func(t *testing.T) { json, _ := MarshalNoEscape(c.v) sjson := string(json[:len(json)-1]) - if sjson != c.expected { - t.Errorf("expected: %v, got: %v", c.expected, sjson) - } + assert.Equal(t, c.expected, sjson) }) } } @@ -97,9 +97,7 @@ func TestMarshalIndentNoEscape(t *testing.T) { t.Run(c.name, func(t *testing.T) { json, _ := MarshalIndentNoEscape(c.v, c.prefix, c.ident) sjson := string(json[:len(json)-1]) - if sjson != c.expected { - t.Errorf("expected: %v, got: %v", c.expected, sjson) - } + assert.Equal(t, c.expected, sjson) }) } } diff --git a/go/list/list_test.go b/go/list/list_test.go new file mode 100644 index 00000000000..16d23d6cc8e --- /dev/null +++ b/go/list/list_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2024 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestInitEmptyList(t *testing.T) { + l := New[int]() + assert.Equal(t, 0, l.Len()) + assert.Nil(t, l.Front()) + assert.Nil(t, l.Back()) +} + +func TestInsertFront(t *testing.T) { + l := New[int]() + e := l.PushFront(1) + assert.Equal(t, 1, l.Len()) + assert.Equal(t, e, l.Front()) + assert.Equal(t, e, l.Back()) +} + +func TestInsertBack(t *testing.T) { + l := New[int]() + e := l.PushBack(1) + assert.Equal(t, 1, l.Len()) + assert.Equal(t, e, l.Front()) + assert.Equal(t, e, l.Back()) +} + +func TestInsertFrontEmptyList(t *testing.T) { + l := New[int]() + e := l.PushFront(1) + assert.Equal(t, 1, l.Len()) + assert.Equal(t, e, l.Front()) + assert.Equal(t, e, l.Back()) +} + +func TestInsertBackEmptyList(t *testing.T) { + l := New[int]() + e := l.PushBack(1) + assert.Equal(t, 1, l.Len()) + assert.Equal(t, e, l.Front()) + assert.Equal(t, e, l.Back()) +} + +func TestRemoveOnlyElement(t *testing.T) { + l := New[int]() + e := l.PushFront(1) + l.Remove(e) + assert.Equal(t, 0, l.Len()) + assert.Nil(t, l.Front()) + assert.Nil(t, l.Back()) +} + +func TestRemoveFromWrongList(t *testing.T) { + l1 := New[int]() + l2 := New[int]() + e := l1.PushFront(1) + assert.Panics(t, func() { l2.Remove(e) }) +} + +func TestGetFirstElement(t *testing.T) { + l := New[int]() + e := l.PushFront(1) + assert.Equal(t, e, l.Front()) +} + +func TestGetLastElement(t *testing.T) { + l := New[int]() + e := l.PushBack(1) + assert.Equal(t, e, l.Back()) +} + +func TestGetNextElement(t *testing.T) { + l := New[int]() + e := l.PushBack(1) + assert.Nil(t, e.Next()) + f := l.PushBack(2) + assert.Equal(t, f, e.Next()) +} + +func TestGetPrevElement(t *testing.T) { + l := New[int]() + e := l.PushBack(1) + assert.Nil(t, e.Prev()) + f := l.PushBack(2) + assert.Equal(t, e, f.Prev()) +} + +func TestMoveElement(t *testing.T) { + l := New[int]() + e := l.PushBack(1) + l.move(e, e) + assert.Equal(t, e, l.Front()) + f := l.PushBack(2) + l.move(e, f) + assert.Equal(t, f, l.Front()) + assert.Equal(t, e, l.Back()) + assert.Equal(t, e, f.next) +} + +func TestPushBackValue(t *testing.T) { + l := New[int]() + m := New[int]() + a := m.PushBack(5) + e := l.PushBack(1) + l.PushBackValue(a) + assert.Equal(t, a, l.Back()) + assert.Equal(t, a, e.next) +} + +func TestPushFrontValue(t *testing.T) { + l := New[int]() + m := New[int]() + a := m.PushBack(5) + e := l.PushBack(1) + l.PushFrontValue(a) + assert.Equal(t, a, l.Front()) + assert.Equal(t, a, e.prev) +} diff --git a/go/logstats/logger.go b/go/logstats/logger.go new file mode 100644 index 00000000000..90e208e7703 --- /dev/null +++ b/go/logstats/logger.go @@ -0,0 +1,217 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logstats + +import ( + "io" + "slices" + "strconv" + "strings" + "sync" + "time" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +type logbv struct { + Name string + BVar *querypb.BindVariable +} + +// Logger is a zero-allocation logger for logstats. +// It can output logs as JSON or as plaintext, following the commonly used +// logstats format that is shared between the tablets and the gates. +type Logger struct { + b []byte + bvars []logbv + n int + json bool +} + +func sortBVars(sorted []logbv, bvars map[string]*querypb.BindVariable) []logbv { + for k, bv := range bvars { + sorted = append(sorted, logbv{k, bv}) + } + slices.SortFunc(sorted, func(a, b logbv) int { + return strings.Compare(a.Name, b.Name) + }) + return sorted +} + +func (log *Logger) appendBVarsJSON(b []byte, bvars map[string]*querypb.BindVariable, full bool) []byte { + log.bvars = sortBVars(log.bvars[:0], bvars) + + b = append(b, '{') + for i, bv := range log.bvars { + if i > 0 { + b = append(b, ',', ' ') + } + b = strconv.AppendQuote(b, bv.Name) + b = append(b, `: {"type": `...) + b = strconv.AppendQuote(b, querypb.Type_name[int32(bv.BVar.Type)]) + b = append(b, `, "value": `...) + + if sqltypes.IsIntegral(bv.BVar.Type) || sqltypes.IsFloat(bv.BVar.Type) { + b = append(b, bv.BVar.Value...) + } else if bv.BVar.Type == sqltypes.Tuple { + b = append(b, '"') + b = strconv.AppendInt(b, int64(len(bv.BVar.Values)), 10) + b = append(b, ` items"`...) + } else { + if full { + b = strconv.AppendQuote(b, hack.String(bv.BVar.Value)) + } else { + b = append(b, '"') + b = strconv.AppendInt(b, int64(len(bv.BVar.Values)), 10) + b = append(b, ` bytes"`...) + } + } + b = append(b, '}') + } + return append(b, '}') +} + +func (log *Logger) Init(json bool) { + log.n = 0 + log.json = json + if log.json { + log.b = append(log.b, '{') + } +} + +func (log *Logger) Redacted() { + log.String("[REDACTED]") +} + +func (log *Logger) Key(key string) { + if log.json { + if log.n > 0 { + log.b = append(log.b, ',', ' ') + } + log.b = append(log.b, '"') + log.b = append(log.b, key...) + log.b = append(log.b, '"', ':', ' ') + } else { + if log.n > 0 { + log.b = append(log.b, '\t') + } + } + log.n++ +} + +func (log *Logger) StringUnquoted(value string) { + if log.json { + log.b = strconv.AppendQuote(log.b, value) + } else { + log.b = append(log.b, value...) + } +} + +func (log *Logger) TabTerminated() { + if !log.json { + log.b = append(log.b, '\t') + } +} + +func (log *Logger) String(value string) { + log.b = strconv.AppendQuote(log.b, value) +} + +func (log *Logger) StringSingleQuoted(value string) { + if log.json { + log.b = strconv.AppendQuote(log.b, value) + } else { + log.b = append(log.b, '\'') + log.b = append(log.b, value...) + log.b = append(log.b, '\'') + } +} + +func (log *Logger) Time(t time.Time) { + const timeFormat = "2006-01-02 15:04:05.000000" + if log.json { + log.b = append(log.b, '"') + log.b = t.AppendFormat(log.b, timeFormat) + log.b = append(log.b, '"') + } else { + log.b = t.AppendFormat(log.b, timeFormat) + } +} + +func (log *Logger) Duration(t time.Duration) { + log.b = strconv.AppendFloat(log.b, t.Seconds(), 'f', 6, 64) +} + +func (log *Logger) BindVariables(bvars map[string]*querypb.BindVariable, full bool) { + // the bind variables are printed as JSON in text mode because the original + // printing syntax, which was simply `fmt.Sprintf("%v")`, is not stable or + // safe to parse + log.b = log.appendBVarsJSON(log.b, bvars, full) +} + +func (log *Logger) Int(i int64) { + log.b = strconv.AppendInt(log.b, i, 10) +} + +func (log *Logger) Uint(u uint64) { + log.b = strconv.AppendUint(log.b, u, 10) +} + +func (log *Logger) Bool(b bool) { + log.b = strconv.AppendBool(log.b, b) +} + +func (log *Logger) Strings(strs []string) { + log.b = append(log.b, '[') + for i, t := range strs { + if i > 0 { + log.b = append(log.b, ',') + } + log.b = strconv.AppendQuote(log.b, t) + } + log.b = append(log.b, ']') +} + +func (log *Logger) Flush(w io.Writer) (err error) { + if log.json { + log.b = append(log.b, '}') + } + log.b = append(log.b, '\n') + _, err = w.Write(log.b) + + clear(log.bvars) + log.bvars = log.bvars[:0] + log.b = log.b[:0] + log.n = 0 + + loggerPool.Put(log) + return err +} + +var loggerPool = sync.Pool{New: func() any { + return &Logger{} +}} + +// NewLogger returns a new Logger instance to perform logstats logging. +// The logger must be initialized with (*Logger).Init before usage and +// flushed with (*Logger).Flush once all the key-values have been written +// to it. +func NewLogger() *Logger { + return loggerPool.Get().(*Logger) +} diff --git a/go/logstats/logger_test.go b/go/logstats/logger_test.go new file mode 100644 index 00000000000..fccdfe1f935 --- /dev/null +++ b/go/logstats/logger_test.go @@ -0,0 +1,320 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logstats + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +func TestInit(t *testing.T) { + tl := Logger{} + + tl.Init(false) + assert.Nil(t, tl.b) + assert.Equal(t, 0, tl.n) + assert.Equal(t, false, tl.json) + + tl.Init(true) + assert.Equal(t, []byte{'{'}, tl.b) + assert.Equal(t, 0, tl.n) + assert.Equal(t, true, tl.json) +} + +func TestRedacted(t *testing.T) { + tl := Logger{} + tl.Init(false) + + tl.Redacted() + assert.Equal(t, []byte("\"[REDACTED]\""), tl.b) + + // Test for json + tl.b = []byte{} + tl.Init(true) + + tl.Redacted() + assert.Equal(t, []byte("{\"[REDACTED]\""), tl.b) +} + +func TestKey(t *testing.T) { + tl := Logger{ + b: []byte("test"), + } + tl.Init(false) + + // Expect tab not be appended at first + tl.Key("testKey") + assert.Equal(t, []byte("test"), tl.b) + + tl.Key("testKey") + assert.Equal(t, []byte("test\t"), tl.b) + + tl.b = []byte{} + tl.Init(true) + + tl.Key("testKey") + assert.Equal(t, []byte("{\"testKey\": "), tl.b) + + tl.Key("testKey2") + assert.Equal(t, []byte("{\"testKey\": , \"testKey2\": "), tl.b) +} + +func TestStringUnquoted(t *testing.T) { + tl := Logger{} + tl.Init(true) + + tl.StringUnquoted("testValue") + assert.Equal(t, []byte("{\"testValue\""), tl.b) + + tl.b = []byte{} + tl.Init(false) + + tl.StringUnquoted("testValue") + assert.Equal(t, []byte("testValue"), tl.b) +} + +func TestTabTerminated(t *testing.T) { + tl := Logger{} + tl.Init(true) + + tl.TabTerminated() + // Should not be tab terminated in case of json + assert.Equal(t, []byte("{"), tl.b) + + tl.b = []byte("test") + tl.Init(false) + + tl.TabTerminated() + assert.Equal(t, []byte("test\t"), tl.b) +} + +func TestString(t *testing.T) { + tl := Logger{} + tl.Init(true) + + tl.String("testValue") + assert.Equal(t, []byte("{\"testValue\""), tl.b) + + tl.b = []byte{} + tl.Init(false) + + tl.String("testValue") + assert.Equal(t, []byte("\"testValue\""), tl.b) +} + +func TestStringSingleQuoted(t *testing.T) { + tl := Logger{} + tl.Init(true) + + tl.StringSingleQuoted("testValue") + // Should be double quoted in case of json + assert.Equal(t, []byte("{\"testValue\""), tl.b) + + tl.b = []byte{} + tl.Init(false) + + tl.StringSingleQuoted("testValue") + assert.Equal(t, []byte("'testValue'"), tl.b) +} + +func TestTime(t *testing.T) { + tl := Logger{} + tl.Init(false) + + testTime := time.Date(2024, 9, 3, 7, 10, 12, 1233, time.UTC) + tl.Time(testTime) + assert.Equal(t, []byte("2024-09-03 07:10:12.000001"), tl.b) + + tl.b = []byte{} + tl.Init(true) + + tl.Time(testTime) + assert.Equal(t, []byte("{\"2024-09-03 07:10:12.000001\""), tl.b) +} + +func TestDuration(t *testing.T) { + tl := Logger{} + tl.Init(false) + + tl.Duration(2 * time.Minute) + assert.Equal(t, []byte("120.000000"), tl.b) + + tl.b = []byte{} + tl.Init(true) + + tl.Duration(6 * time.Microsecond) + assert.Equal(t, []byte("{0.000006"), tl.b) +} + +func TestInt(t *testing.T) { + tl := Logger{} + tl.Init(false) + + tl.Int(98) + assert.Equal(t, []byte("98"), tl.b) + + tl.b = []byte{} + tl.Init(true) + + tl.Int(-1234) + assert.Equal(t, []byte("{-1234"), tl.b) +} + +func TestUint(t *testing.T) { + tl := Logger{} + tl.Init(false) + + tl.Uint(98) + assert.Equal(t, []byte("98"), tl.b) + + tl.b = []byte{} + tl.Init(true) + + tl.Uint(1234) + assert.Equal(t, []byte("{1234"), tl.b) +} + +func TestBool(t *testing.T) { + tl := Logger{} + tl.Init(false) + + tl.Bool(true) + assert.Equal(t, []byte("true"), tl.b) + + tl.b = []byte{} + tl.Init(true) + + tl.Bool(false) + assert.Equal(t, []byte("{false"), tl.b) +} + +func TestStrings(t *testing.T) { + tl := Logger{} + tl.Init(false) + + tl.Strings([]string{"testValue1", "testValue2"}) + assert.Equal(t, []byte("[\"testValue1\",\"testValue2\"]"), tl.b) + + tl.b = []byte{} + tl.Init(true) + + tl.Strings([]string{"testValue1"}) + assert.Equal(t, []byte("{[\"testValue1\"]"), tl.b) +} + +var calledValue []byte + +type mockWriter struct{} + +func (*mockWriter) Write(p []byte) (int, error) { + calledValue = p + return 1, nil +} + +func TestFlush(t *testing.T) { + tl := NewLogger() + tl.Init(true) + + tl.Key("testKey") + tl.String("testValue") + + tw := mockWriter{} + + err := tl.Flush(&tw) + assert.NoError(t, err) + assert.Equal(t, []byte("{\"testKey\": \"testValue\"}\n"), calledValue) +} + +func TestBindVariables(t *testing.T) { + tcases := []struct { + name string + bVars map[string]*querypb.BindVariable + want []byte + full bool + }{ + { + name: "int32, float64", + bVars: map[string]*querypb.BindVariable{ + "v1": sqltypes.Int32BindVariable(10), + "v2": sqltypes.Float64BindVariable(10.122), + }, + want: []byte(`{{"v1": {"type": "INT32", "value": 10}, "v2": {"type": "FLOAT64", "value": 10.122}}`), + }, + { + name: "varbinary, float64", + bVars: map[string]*querypb.BindVariable{ + "v1": { + Type: querypb.Type_VARBINARY, + Value: []byte("aa"), + }, + "v2": sqltypes.Float64BindVariable(10.122), + }, + want: []byte(`{{"v1": {"type": "VARBINARY", "value": "0 bytes"}, "v2": {"type": "FLOAT64", "value": 10.122}}`), + }, + { + name: "varbinary, varchar", + bVars: map[string]*querypb.BindVariable{ + "v1": { + Type: querypb.Type_VARBINARY, + Value: []byte("abc"), + }, + "v2": { + Type: querypb.Type_VARCHAR, + Value: []byte("aa"), + }, + }, + full: true, + want: []byte(`{{"v1": {"type": "VARBINARY", "value": "abc"}, "v2": {"type": "VARCHAR", "value": "aa"}}`), + }, + + { + name: "int64, tuple", + bVars: map[string]*querypb.BindVariable{ + "v1": { + Type: querypb.Type_INT64, + Value: []byte("12"), + }, + "v2": { + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{ + Type: querypb.Type_VARCHAR, + Value: []byte("aa"), + }, { + Type: querypb.Type_VARCHAR, + Value: []byte("bb"), + }}, + }, + }, + want: []byte(`{{"v1": {"type": "INT64", "value": 12}, "v2": {"type": "TUPLE", "value": "2 items"}}`), + }, + } + + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { + tl := Logger{} + tl.Init(true) + + tl.BindVariables(tc.bVars, tc.full) + assert.Equal(t, tc.want, tl.b) + }) + } +} diff --git a/go/mathstats/beta_test.go b/go/mathstats/beta_test.go index 2878493a57d..524beda7fcd 100644 --- a/go/mathstats/beta_test.go +++ b/go/mathstats/beta_test.go @@ -5,7 +5,10 @@ package mathstats import ( + "math" "testing" + + "github.com/stretchr/testify/assert" ) func TestBetaInc(t *testing.T) { @@ -26,3 +29,27 @@ func TestBetaInc(t *testing.T) { 10: 0.01928710937500, }) } + +func TestBetaincPanic(t *testing.T) { + defer func() { + if r := recover(); r != nil { + assert.Contains(t, r, "betainc: a or b too big; failed to converge") + } else { + t.Error("Expected panic, but no panic occurred") + } + }() + + a := 1e30 + b := 1e30 + x := 0.5 + + _ = mathBetaInc(x, a, b) +} + +func TestMathBetaIncNaN(t *testing.T) { + x := -0.1 + + result := mathBetaInc(x, 2.0, 3.0) + + assert.True(t, math.IsNaN(result), "Expected NaN for x < 0, got %v", result) +} diff --git a/go/mathstats/sample_test.go b/go/mathstats/sample_test.go index fb9d6dbc6ee..7b2b5101bcf 100644 --- a/go/mathstats/sample_test.go +++ b/go/mathstats/sample_test.go @@ -4,7 +4,12 @@ package mathstats -import "testing" +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" +) func TestSamplePercentile(t *testing.T) { s := Sample{Xs: []float64{15, 20, 35, 40, 50}} @@ -19,3 +24,228 @@ func TestSamplePercentile(t *testing.T) { 2: 50, }) } + +func TestSamplePercentileEmpty(t *testing.T) { + s := Sample{Xs: []float64{}} + assert.True(t, math.IsNaN(s.Percentile(0.5)), "Percentile should return NaN for empty sample") +} + +func TestSampleStdDev(t *testing.T) { + values := []float64{2, 4, 4, 4, 5, 5, 7, 9} + expected := 2.138089935299395 + + sample := Sample{Xs: values} + result := sample.StdDev() + + assert.Equal(t, expected, result) +} + +func TestBounds(t *testing.T) { + tt := []struct { + xs []float64 + min float64 + max float64 + }{ + {[]float64{15, 20, 35, 40, 50}, 15, 50}, + {[]float64{}, math.NaN(), math.NaN()}, + {[]float64{10, 20, 5, 30, 15}, 5, 30}, + } + + for _, tc := range tt { + min, max := Bounds(tc.xs) + + if len(tc.xs) == 0 { + assert.True(t, math.IsNaN(min), "min value should be NaN") + assert.True(t, math.IsNaN(max), "max value should be NaN") + } else { + assert.Equal(t, tc.min, min, "min value mismatch") + assert.Equal(t, tc.max, max, "max value mismatch") + } + } +} + +func TestSampleBounds(t *testing.T) { + tt := []struct { + sample Sample + min float64 + max float64 + }{ + {Sample{Xs: []float64{15, 20, 35, 40, 50}, Sorted: false}, 15, 50}, + {Sample{Xs: []float64{}, Sorted: false}, math.NaN(), math.NaN()}, + {Sample{Xs: []float64{15, 20, 35, 40, 50}, Sorted: true}, 15, 50}, + } + + for _, tc := range tt { + min, max := tc.sample.Bounds() + + if len(tc.sample.Xs) == 0 { + assert.True(t, math.IsNaN(min), "min value should be NaN") + assert.True(t, math.IsNaN(max), "max value should be NaN") + } else { + assert.Equal(t, tc.min, min, "min value mismatch") + assert.Equal(t, tc.max, max, "max value mismatch") + } + } +} + +func TestVecSum(t *testing.T) { + tt := []struct { + xs []float64 + sum float64 + }{ + {[]float64{15, 20, 35, 40, 50}, 160}, + {[]float64{}, 0}, + } + + for _, tc := range tt { + sum := vecSum(tc.xs) + assert.Equal(t, tc.sum, sum, "sum value mismatch") + } +} + +func TestSampleSum(t *testing.T) { + tt := []struct { + sample Sample + sum float64 + }{ + {Sample{Xs: []float64{15, 20, 35, 40, 50}}, 160}, + {Sample{Xs: []float64{}}, 0}, + } + + for _, tc := range tt { + sum := tc.sample.Sum() + assert.Equal(t, tc.sum, sum, "sum value mismatch") + } +} + +func TestMean(t *testing.T) { + tt := []struct { + xs []float64 + expected float64 + }{ + {[]float64{1, 2, 3, 4, 5}, 3}, + {[]float64{-1, 0, 1}, 0}, + {[]float64{}, math.NaN()}, + {[]float64{10}, 10}, + {[]float64{-2, 2, -2, 2}, 0}, + } + + for _, tc := range tt { + mean := Mean(tc.xs) + + if math.IsNaN(tc.expected) { + assert.True(t, math.IsNaN(mean), "Expected NaN") + } else { + assert.Equal(t, tc.expected, mean, "Mean value mismatch") + } + } +} +func TestSampleCopy(t *testing.T) { + s := Sample{Xs: []float64{15, 20, 35, 40, 50}, Sorted: true} + copySample := s.Copy() + + // Modify the original sample and check if the copy remains unchanged + s.Xs[0] = 100 + + assert.NotEqual(t, s.Xs[0], copySample.Xs[0], "Original and copied samples should not share data") + assert.Equal(t, len(s.Xs), len(copySample.Xs), "Length of original and copied samples should be the same") + assert.Equal(t, s.Sorted, copySample.Sorted, "Sorting status should be the same") +} + +func TestSampleFilterOutliers(t *testing.T) { + s := Sample{Xs: []float64{15, 20, 35, 40, 50, 100, 200}} + s.FilterOutliers() + + expected := []float64{15, 20, 35, 40, 50, 100} + assert.Equal(t, expected, s.Xs, "FilterOutliers should remove outliers") +} + +func TestSampleClear(t *testing.T) { + s := Sample{Xs: []float64{15, 20, 35, 40, 50}, Sorted: true} + s.Clear() + + assert.Empty(t, s.Xs, "Clear should reset the sample to contain 0 values") + assert.False(t, s.Sorted, "Sorting status should be false after clearing") +} + +func TestIQR(t *testing.T) { + tt := []struct { + sample Sample + expected float64 + }{ + {Sample{Xs: []float64{15, 20, 35, 40, 50}}, 24.999999999999996}, + {Sample{Xs: []float64{}, Sorted: false}, math.NaN()}, + {Sample{Xs: []float64{15, 0, 0, 40, 50}}, 43.33333333333333}, + {Sample{Xs: []float64{10, 2, 1, 0, 23}}, 13.666666666666663}, + } + + for _, tc := range tt { + iqr := tc.sample.IQR() + + if math.IsNaN(tc.expected) { + assert.True(t, math.IsNaN(iqr)) + } else { + assert.Equal(t, tc.expected, iqr) + } + } +} + +func TestSampleSort(t *testing.T) { + tt := []struct { + sample Sample + expected []float64 + }{ + {Sample{Xs: []float64{15, 20, 35, 40, 50}, Sorted: false}, []float64{15, 20, 35, 40, 50}}, + {Sample{Xs: []float64{}, Sorted: false}, []float64{}}, + {Sample{Xs: []float64{15, 20, 35, 40, 50}, Sorted: true}, []float64{15, 20, 35, 40, 50}}, + {Sample{Xs: []float64{10, 5, 30, 20, 15}, Sorted: false}, []float64{5, 10, 15, 20, 30}}, + } + + for _, tc := range tt { + sortedSample := tc.sample.Sort() + + assert.Equal(t, tc.expected, sortedSample.Xs, "Sorted values mismatch") + } +} + +func TestGeoMean(t *testing.T) { + tt := []struct { + name string + values []float64 + expected float64 + }{ + { + name: "Valid_case", + values: []float64{2, 4, 8, 16}, + expected: 5.65685424949238, + }, + { + name: "Empty_values", + values: []float64{}, + expected: math.NaN(), + }, + { + name: "Zero_value", + values: []float64{1, 0, 3}, + expected: math.NaN(), + }, + { + name: "Negative_value", + values: []float64{2, -4, 8, 16}, + expected: math.NaN(), + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + sample := Sample{Xs: tc.values} + result := sample.GeoMean() + + if math.IsNaN(tc.expected) { + assert.True(t, math.IsNaN(result)) + } else { + assert.Equal(t, tc.expected, result) + } + }) + } +} diff --git a/go/mathstats/tdist_test.go b/go/mathstats/tdist_test.go index b30ba95662b..e243126e47b 100644 --- a/go/mathstats/tdist_test.go +++ b/go/mathstats/tdist_test.go @@ -4,7 +4,12 @@ package mathstats -import "testing" +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" +) func TestT(t *testing.T) { testFunc(t, "PDF(%v|v=1)", TDist{1}.PDF, map[float64]float64{ @@ -93,3 +98,17 @@ func TestT(t *testing.T) { 8: 0.99975354666971372, 9: 0.9998586600128780}) } +func TestCDFNan(t *testing.T) { + tDist := TDist{V: 1} + + result := tDist.CDF(math.NaN()) + assert.True(t, math.IsNaN(result), "CDF(NaN) = %v, expected NaN", result) +} + +func TestBounds_tdist(t *testing.T) { + tDist := TDist{V: 1} + + lower, upper := tDist.Bounds() + assert.Equal(t, -4.0, lower, "Lower bound should be -4") + assert.Equal(t, 4.0, upper, "Upper bound should be 4") +} diff --git a/go/mathstats/ttest_test.go b/go/mathstats/ttest_test.go index 0c9b78fdb9f..9c23a24ec29 100644 --- a/go/mathstats/ttest_test.go +++ b/go/mathstats/ttest_test.go @@ -4,7 +4,11 @@ package mathstats -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/assert" +) func TestTTest(t *testing.T) { s1 := Sample{Xs: []float64{2, 1, 3, 4}} @@ -69,3 +73,160 @@ func TestTTest(t *testing.T) { }, 4, 0, 0, 3, 0.5, 1, 0.5) } + +func TestTwoSampleTTestErrors(t *testing.T) { + tt := []struct { + name string + x1 TTestSample + x2 TTestSample + alt LocationHypothesis + err error + }{ + { + name: "One sample size is 0", + x1: &Sample{Xs: []float64{1, 2, 3}}, + x2: &Sample{Xs: []float64{}}, + alt: LocationDiffers, + err: ErrSampleSize, + }, + { + name: "Both sample sizes are 0", + x1: &Sample{Xs: []float64{}}, + x2: &Sample{Xs: []float64{}}, + alt: LocationDiffers, + err: ErrSampleSize, + }, + { + name: "One sample has zero variance", + x1: &Sample{Xs: []float64{1}}, + x2: &Sample{Xs: []float64{1}}, + alt: LocationDiffers, + err: ErrZeroVariance, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result, err := TwoSampleTTest(tc.x1, tc.x2, tc.alt) + assert.Equal(t, tc.err, err) + assert.Nil(t, result) + }) + } +} + +func TestTwoSampleWelchTTestErrors(t *testing.T) { + tt := []struct { + name string + x1 TTestSample + x2 TTestSample + alt LocationHypothesis + err error + }{ + { + name: "One sample size is 1", + x1: &Sample{Xs: []float64{1}}, + x2: &Sample{Xs: []float64{2, 3, 4}}, + alt: LocationDiffers, + err: ErrSampleSize, + }, + { + name: "Both sample sizes are 1", + x1: &Sample{Xs: []float64{1}}, + x2: &Sample{Xs: []float64{2}}, + alt: LocationDiffers, + err: ErrSampleSize, + }, + { + name: "One sample has zero variance", + x1: &Sample{Xs: []float64{1, 1, 1}}, + x2: &Sample{Xs: []float64{2, 2, 2}}, + alt: LocationDiffers, + err: ErrZeroVariance, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result, err := TwoSampleWelchTTest(tc.x1, tc.x2, tc.alt) + assert.Equal(t, tc.err, err) + assert.Nil(t, result) + }) + } +} + +func TestPairedTTestErrors(t *testing.T) { + tt := []struct { + name string + x1 []float64 + x2 []float64 + μ0 float64 + alt LocationHypothesis + err error + }{ + { + name: "Samples have different lengths", + x1: []float64{1, 2, 3}, + x2: []float64{4, 5}, + μ0: 0, + alt: LocationDiffers, + err: ErrMismatchedSamples, + }, + { + name: "Samples have length <= 1", + x1: []float64{1}, + x2: []float64{2}, + μ0: 0, + alt: LocationDiffers, + err: ErrSampleSize, + }, + { + name: "Samples result in zero standard deviation", + x1: []float64{1, 1, 1}, + x2: []float64{1, 1, 1}, + μ0: 0, + alt: LocationDiffers, + err: ErrZeroVariance, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result, err := PairedTTest(tc.x1, tc.x2, tc.μ0, tc.alt) + assert.Equal(t, tc.err, err) + assert.Nil(t, result) + }) + } +} + +func TestOneSampleTTestErrors(t *testing.T) { + tt := []struct { + name string + x TTestSample + μ0 float64 + alt LocationHypothesis + err error + }{ + { + name: "Sample size is 0", + x: &Sample{Xs: []float64{}}, + μ0: 0, + alt: LocationDiffers, + err: ErrSampleSize, + }, + { + name: "Sample has zero variance", + x: &Sample{Xs: []float64{1, 1, 1}}, + μ0: 0, + alt: LocationDiffers, + err: ErrZeroVariance, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result, err := OneSampleTTest(tc.x, tc.μ0, tc.alt) + assert.Equal(t, tc.err, err) + assert.Nil(t, result) + }) + } +} diff --git a/go/mathutil/equivalence_relation_test.go b/go/mathutil/equivalence_relation_test.go index 3873562a080..754e9f792ab 100644 --- a/go/mathutil/equivalence_relation_test.go +++ b/go/mathutil/equivalence_relation_test.go @@ -174,3 +174,154 @@ func TestEquivalenceRelation(t *testing.T) { }) } } + +func TestEquivalenceRelationError(t *testing.T) { + type ttError struct { + name string + element1 string + element2 string + expectedErr string + } + + testsRelateErrorCases := []ttError{ + { + name: "UnknownElementError", + element1: "x", + element2: "b", + expectedErr: "unknown element x", + }, + { + name: "UnknownClassError", + element1: "a", + element2: "y", + expectedErr: "unknown element y", + }, + } + + for _, tc := range testsRelateErrorCases { + t.Run(tc.name, func(t *testing.T) { + r := NewEquivalenceRelation() + r.AddAll([]string{"a", "b", "c"}) + + _, err := r.Relate(tc.element1, tc.element2) + assert.Error(t, err) + assert.EqualError(t, err, tc.expectedErr) + }) + } +} + +func TestUnknownElementError(t *testing.T) { + err := &UnknownElementError{element: "test_element"} + + assert.EqualError(t, err, "unknown element test_element") +} + +func TestUnknownClassError(t *testing.T) { + err := &UnknownClassError{class: 42} + + assert.EqualError(t, err, "unknown class 42") +} + +func TestAdd(t *testing.T) { + r := NewEquivalenceRelation() + initialElements := []string{"a", "b", "c"} + + for _, element := range initialElements { + r.Add(element) + } + + for _, element := range initialElements { + class, err := r.ElementClass(element) + require.NoError(t, err) + assert.Contains(t, r.classElementsMap[class], element) + } + + classCounter := r.classCounter + r.Add("a") + assert.Equal(t, classCounter, r.classCounter) +} + +func TestElementClass(t *testing.T) { + r := NewEquivalenceRelation() + element := "test_element" + + _, err := r.ElementClass(element) + assert.Error(t, err) + + r.Add(element) + class, err := r.ElementClass(element) + require.NoError(t, err) + assert.Greater(t, class, -1) +} + +func TestRelated(t *testing.T) { + type tt struct { + name string + relations []string + element1 string + element2 string + expect bool + err error + } + + tests := []tt{ + { + name: "related, same class", + relations: []string{"ab"}, + element1: "a", + element2: "b", + expect: true, + err: nil, + }, + { + name: "related, different classes", + relations: []string{"ab, cd"}, + element1: "a", + element2: "c", + expect: false, + err: nil, + }, + { + name: "related, unknown element", + relations: []string{"ab"}, + element1: "x", + element2: "b", + expect: false, + err: &UnknownElementError{element: "x"}, + }, + { + name: "related, unknown element 2", + relations: []string{"ab"}, + element1: "a", + element2: "y", + expect: false, + err: &UnknownElementError{element: "y"}, + }, + { + name: "related, both elements unknown", + relations: []string{"ab"}, + element1: "x", + element2: "y", + expect: false, + err: &UnknownElementError{element: "x"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + r := NewEquivalenceRelation() + r.AddAll([]string{"a", "b", "c", "d"}) + for _, relation := range tc.relations { + _, err := r.Relate(relation[0:1], relation[1:2]) + require.NoError(t, err) + } + + result, err := r.Related(tc.element1, tc.element2) + if tc.err != nil { + assert.EqualError(t, err, tc.err.Error()) + } else { + assert.Equal(t, tc.expect, result) + } + }) + } +} diff --git a/go/mysql/auth_server_clientcert.go b/go/mysql/auth_server_clientcert.go index 10a01487208..bb0a4028683 100644 --- a/go/mysql/auth_server_clientcert.go +++ b/go/mysql/auth_server_clientcert.go @@ -23,17 +23,8 @@ import ( "github.com/spf13/pflag" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" ) -var clientcertAuthMethod string - -func init() { - servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { - fs.StringVar(&clientcertAuthMethod, "mysql_clientcert_auth_method", string(MysqlClearPassword), "client-side authentication method to use. Supported values: mysql_clear_password, dialog.") - }) -} - // AuthServerClientCert implements AuthServer which enforces client side certificates type AuthServerClientCert struct { methods []AuthMethod @@ -41,7 +32,7 @@ type AuthServerClientCert struct { } // InitAuthServerClientCert is public so it can be called from plugin_auth_clientcert.go (go/cmd/vtgate) -func InitAuthServerClientCert() { +func InitAuthServerClientCert(clientcertAuthMethod string) { if pflag.CommandLine.Lookup("mysql_server_ssl_ca").Value.String() == "" { log.Info("Not configuring AuthServerClientCert because mysql_server_ssl_ca is empty") return @@ -50,11 +41,11 @@ func InitAuthServerClientCert() { log.Exitf("Invalid mysql_clientcert_auth_method value: only support mysql_clear_password or dialog") } - ascc := newAuthServerClientCert() + ascc := newAuthServerClientCert(clientcertAuthMethod) RegisterAuthServer("clientcert", ascc) } -func newAuthServerClientCert() *AuthServerClientCert { +func newAuthServerClientCert(clientcertAuthMethod string) *AuthServerClientCert { ascc := &AuthServerClientCert{ Method: AuthMethodDescription(clientcertAuthMethod), } diff --git a/go/mysql/auth_server_clientcert_test.go b/go/mysql/auth_server_clientcert_test.go index 28ed19fd9c5..eff92053d94 100644 --- a/go/mysql/auth_server_clientcert_test.go +++ b/go/mysql/auth_server_clientcert_test.go @@ -33,19 +33,13 @@ import ( const clientCertUsername = "Client Cert" -func init() { - // These tests do not invoke the servenv.Parse codepaths, so this default - // does not get set by the OnParseFor hook. - clientcertAuthMethod = string(MysqlClearPassword) -} - func TestValidCert(t *testing.T) { th := &testHandler{} - authServer := newAuthServerClientCert() + authServer := newAuthServerClientCert(string(MysqlClearPassword)) // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -111,10 +105,10 @@ func TestValidCert(t *testing.T) { func TestNoCert(t *testing.T) { th := &testHandler{} - authServer := newAuthServerClientCert() + authServer := newAuthServerClientCert(string(MysqlClearPassword)) // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() diff --git a/go/mysql/auth_server_static.go b/go/mysql/auth_server_static.go index fae886039f0..6e3a9693c69 100644 --- a/go/mysql/auth_server_static.go +++ b/go/mysql/auth_server_static.go @@ -27,34 +27,15 @@ import ( "syscall" "time" - "github.com/spf13/pflag" - "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" ) -var ( - mysqlAuthServerStaticFile string - mysqlAuthServerStaticString string - mysqlAuthServerStaticReloadInterval time.Duration - mysqlServerFlushDelay = 100 * time.Millisecond -) - -func init() { - servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { - fs.StringVar(&mysqlAuthServerStaticFile, "mysql_auth_server_static_file", "", "JSON File to read the users/passwords from.") - fs.StringVar(&mysqlAuthServerStaticString, "mysql_auth_server_static_string", "", "JSON representation of the users/passwords config.") - fs.DurationVar(&mysqlAuthServerStaticReloadInterval, "mysql_auth_static_reload_interval", 0, "Ticker to reload credentials") - fs.DurationVar(&mysqlServerFlushDelay, "mysql_server_flush_delay", mysqlServerFlushDelay, "Delay after which buffered response will be flushed to the client.") - }) -} - const ( localhostName = "localhost" ) @@ -94,7 +75,7 @@ type AuthServerStaticEntry struct { } // InitAuthServerStatic Handles initializing the AuthServerStatic if necessary. -func InitAuthServerStatic() { +func InitAuthServerStatic(mysqlAuthServerStaticFile, mysqlAuthServerStaticString string, mysqlAuthServerStaticReloadInterval time.Duration) { // Check parameters. if mysqlAuthServerStaticFile == "" && mysqlAuthServerStaticString == "" { // Not configured, nothing to do. diff --git a/go/mysql/auth_server_static_flaky_test.go b/go/mysql/auth_server_static_test.go similarity index 100% rename from go/mysql/auth_server_static_flaky_test.go rename to go/mysql/auth_server_static_test.go diff --git a/go/mysql/binlog/rbr_test.go b/go/mysql/binlog/rbr_test.go index 260af2f3821..1dfaf90a33e 100644 --- a/go/mysql/binlog/rbr_test.go +++ b/go/mysql/binlog/rbr_test.go @@ -78,7 +78,7 @@ func TestCellLengthAndData(t *testing.T) { styp: querypb.Type_UINT32, data: []byte{0x84, 0x83, 0x82, 0x81}, out: sqltypes.MakeTrusted(querypb.Type_UINT32, - []byte(fmt.Sprintf("%v", 0x81828384))), + []byte(fmt.Sprintf("%v", uint32(0x81828384)))), }, { typ: TypeLong, styp: querypb.Type_INT32, diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go index e58cb9b254c..3acf99c2408 100644 --- a/go/mysql/binlog_event.go +++ b/go/mysql/binlog_event.go @@ -19,7 +19,9 @@ package mysql import ( "fmt" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -216,6 +218,13 @@ type TableMap struct { // - If the metadata is one byte, only the lower 8 bits are used. // - If the metadata is two bytes, all 16 bits are used. Metadata []uint16 + + // ColumnCollationIDs contains information about the inherited + // or implied column default collation and any explicit per-column + // override for text based columns ONLY. This means that the + // array position needs to be mapped to the ordered list of + // text based columns in the table. + ColumnCollationIDs []collations.ID } // Rows contains data from a {WRITE,UPDATE,DELETE}_ROWS_EVENT. diff --git a/go/mysql/binlog_event_make_test.go b/go/mysql/binlog_event_make_test.go index 12d8a54ff97..32401bfa401 100644 --- a/go/mysql/binlog_event_make_test.go +++ b/go/mysql/binlog_event_make_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/binlog" @@ -222,6 +223,7 @@ func TestTableMapEvent(t *testing.T) { 0, 384, // Length of the varchar field. }, + ColumnCollationIDs: []collations.ID{}, } tm.CanBeNull.Set(1, true) tm.CanBeNull.Set(2, true) @@ -258,12 +260,13 @@ func TestLargeTableMapEvent(t *testing.T) { } tm := &TableMap{ - Flags: 0x8090, - Database: "my_database", - Name: "my_table", - Types: types, - CanBeNull: NewServerBitmap(colLen), - Metadata: metadata, + Flags: 0x8090, + Database: "my_database", + Name: "my_table", + Types: types, + CanBeNull: NewServerBitmap(colLen), + Metadata: metadata, + ColumnCollationIDs: []collations.ID{}, } tm.CanBeNull.Set(1, true) tm.CanBeNull.Set(2, true) diff --git a/go/mysql/binlog_event_rbr.go b/go/mysql/binlog_event_rbr.go index 58777d4cfba..64d17c2b306 100644 --- a/go/mysql/binlog_event_rbr.go +++ b/go/mysql/binlog_event_rbr.go @@ -20,12 +20,35 @@ import ( "encoding/binary" "vitess.io/vitess/go/mysql/binlog" - "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +// These are the TABLE_MAP_EVENT's optional metadata field types from +// MySQL's libbinlogevents/include/rows_event.h. +// See also: https://dev.mysql.com/doc/dev/mysql-server/8.0.34/structbinary__log_1_1Table__map__event_1_1Optional__metadata__fields.html +const ( + tableMapSignedness uint8 = iota + 1 + tableMapDefaultCharset + tableMapColumnCharset + tableMapColumnName + tableMapSetStrValue + tableMapEnumStrValue + tableMapGeometryType + tableMapSimplePrimaryKey + tableMapPrimaryKeyWithPrefix + tableMapEnumAndSetDefaultCharset + tableMapEnumAndSetColumnCharset + tableMapColumnVisibility +) + +// This byte in the optional metadata indicates that we should +// read the next 2 bytes as a collation ID. +const readTwoByteCollationID = 252 + // TableMap implements BinlogEvent.TableMap(). // // Expected format (L = total length of event data): @@ -43,6 +66,7 @@ import ( // cc column-def, one byte per column // column-meta-def (var-len encoded string) // n NULL-bitmask, length: (cc + 7) / 8 +// n Optional Metadata func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { data := ev.Bytes()[f.HeaderLength:] @@ -64,7 +88,7 @@ func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { columnCount, read, ok := readLenEncInt(data, pos) if !ok { - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "expected column count at position %v (data=%v)", pos, data) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "expected column count at position %v (data=%v)", pos, data) } pos = read @@ -73,7 +97,7 @@ func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { metaLen, read, ok := readLenEncInt(data, pos) if !ok { - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "expected metadata length at position %v (data=%v)", pos, data) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "expected metadata length at position %v (data=%v)", pos, data) } pos = read @@ -88,11 +112,19 @@ func (ev binlogEvent) TableMap(f BinlogFormat) (*TableMap, error) { } } if pos != expectedEnd { - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected metadata end: got %v was expecting %v (data=%v)", pos, expectedEnd, data) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected metadata end: got %v was expecting %v (data=%v)", pos, expectedEnd, data) } // A bit array that says if each column can be NULL. - result.CanBeNull, _ = newBitmap(data, pos, int(columnCount)) + result.CanBeNull, read = newBitmap(data, pos, int(columnCount)) + pos = read + + // Read any text based column collation values provided in the optional metadata. + // The binlog_row_metadata only contains this info for text based columns. + var err error + if result.ColumnCollationIDs, err = readColumnCollationIDs(data, pos, int(columnCount)); err != nil { + return nil, err + } return result, nil } @@ -118,7 +150,7 @@ func metadataLength(typ byte) int { default: // Unknown type. This is used in tests only, so panic. - panic(vterrors.Errorf(vtrpc.Code_INTERNAL, "metadataLength: unhandled data type: %v", typ)) + panic(vterrors.Errorf(vtrpcpb.Code_INTERNAL, "metadataLength: unhandled data type: %v", typ)) } } @@ -154,7 +186,7 @@ func metadataRead(data []byte, pos int, typ byte) (uint16, int, error) { default: // Unknown types, we can't go on. - return 0, 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "metadataRead: unhandled data type: %v", typ) + return 0, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "metadataRead: unhandled data type: %v", typ) } } @@ -185,8 +217,45 @@ func metadataWrite(data []byte, pos int, typ byte, value uint16) int { default: // Unknown type. This is used in tests only, so panic. - panic(vterrors.Errorf(vtrpc.Code_INTERNAL, "metadataRead: unhandled data type: %v", typ)) + panic(vterrors.Errorf(vtrpcpb.Code_INTERNAL, "metadataRead: unhandled data type: %v", typ)) + } +} + +// readColumnCollationIDs reads from the optional metadata that exists. +// See: https://github.com/mysql/mysql-server/blob/8.0/libbinlogevents/include/rows_event.h +// What's included depends on the server configuration: +// https://dev.mysql.com/doc/refman/en/replication-options-binary-log.html#sysvar_binlog_row_metadata +// and the table definition. +// We only care about any collation IDs in the optional metadata and +// this info is provided in all binlog_row_metadata formats. Note that +// this info is only provided for text based columns. +func readColumnCollationIDs(data []byte, pos, count int) ([]collations.ID, error) { + collationIDs := make([]collations.ID, 0, count) + for pos < len(data) { + fieldType := uint8(data[pos]) + pos++ + + fieldLen, read, ok := readLenEncInt(data, pos) + if !ok { + return nil, vterrors.New(vtrpcpb.Code_INTERNAL, "error reading optional metadata field length") + } + pos = read + + fieldVal := data[pos : pos+int(fieldLen)] + pos += int(fieldLen) + + if fieldType == tableMapDefaultCharset || fieldType == tableMapColumnCharset { // It's one or the other + for i := uint64(0); i < fieldLen; i++ { + v := uint16(fieldVal[i]) + if v == readTwoByteCollationID { // The ID is the subsequent 2 bytes + v = binary.LittleEndian.Uint16(fieldVal[i+1 : i+3]) + i += 2 + } + collationIDs = append(collationIDs, collations.ID(v)) + } + } } + return collationIDs, nil } // Rows implements BinlogEvent.TableMap(). @@ -235,7 +304,7 @@ func (ev binlogEvent) Rows(f BinlogFormat, tm *TableMap) (Rows, error) { columnCount, read, ok := readLenEncInt(data, pos) if !ok { - return result, vterrors.Errorf(vtrpc.Code_INTERNAL, "expected column count at position %v (data=%v)", pos, data) + return result, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "expected column count at position %v (data=%v)", pos, data) } pos = read diff --git a/go/mysql/capabilities/capability.go b/go/mysql/capabilities/capability.go new file mode 100644 index 00000000000..234707538ec --- /dev/null +++ b/go/mysql/capabilities/capability.go @@ -0,0 +1,135 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capabilities + +import ( + "strconv" + "strings" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +var ( + ErrUnspecifiedServerVersion = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "server version unspecified") +) + +type FlavorCapability int + +const ( + NoneFlavorCapability FlavorCapability = iota // default placeholder + FastDropTableFlavorCapability // supported in MySQL 8.0.23 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-23.html + TransactionalGtidExecutedFlavorCapability // + InstantDDLFlavorCapability // ALGORITHM=INSTANT general support + InstantAddLastColumnFlavorCapability // + InstantAddDropVirtualColumnFlavorCapability // + InstantAddDropColumnFlavorCapability // Adding/dropping column in any position/ordinal. + InstantChangeColumnDefaultFlavorCapability // + InstantExpandEnumCapability // + MySQLJSONFlavorCapability // JSON type supported + MySQLUpgradeInServerFlavorCapability // + DynamicRedoLogCapacityFlavorCapability // supported in MySQL 8.0.30 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-30.html + DisableRedoLogFlavorCapability // supported in MySQL 8.0.21 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-21.html + CheckConstraintsCapability // supported in MySQL 8.0.16 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-16.html + PerformanceSchemaDataLocksTableCapability // supported in MySQL 8.0.1 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-1.html + InstantDDLXtrabackupCapability // Supported in 8.0.32 and above, solving a MySQL-vs-Xtrabackup bug starting 8.0.29 + ReplicaTerminologyCapability // Supported in 8.0.26 and above, using SHOW REPLICA STATUS and all variations. +) + +type CapableOf func(capability FlavorCapability) (bool, error) + +// ServerVersionAtLeast returns true if current server is at least given value. +// Example: if input is []int{8, 0, 23}... the function returns 'true' if we're +// on MySQL 8.0.23, 8.0.24, ... +func ServerVersionAtLeast(serverVersion string, parts ...int) (bool, error) { + if serverVersion == "" { + return false, ErrUnspecifiedServerVersion + } + versionPrefix := strings.Split(serverVersion, "-")[0] + versionTokens := strings.Split(versionPrefix, ".") + for i, part := range parts { + if len(versionTokens) <= i { + return false, nil + } + tokenValue, err := strconv.Atoi(versionTokens[i]) + if err != nil { + return false, err + } + if tokenValue > part { + return true, nil + } + if tokenValue < part { + return false, nil + } + } + return true, nil +} + +// MySQLVersionHasCapability is specific to MySQL flavors (of all versions) and answers whether +// the given server version has the requested capability. +func MySQLVersionHasCapability(serverVersion string, capability FlavorCapability) (bool, error) { + atLeast := func(parts ...int) (bool, error) { + return ServerVersionAtLeast(serverVersion, parts...) + } + // Capabilities sorted by version. + switch capability { + case MySQLJSONFlavorCapability: + return atLeast(5, 7, 0) + case InstantDDLFlavorCapability, + InstantExpandEnumCapability, + InstantAddLastColumnFlavorCapability, + InstantAddDropVirtualColumnFlavorCapability, + InstantChangeColumnDefaultFlavorCapability: + return atLeast(8, 0, 0) + case PerformanceSchemaDataLocksTableCapability: + return atLeast(8, 0, 1) + case MySQLUpgradeInServerFlavorCapability: + return atLeast(8, 0, 16) + case CheckConstraintsCapability: + return atLeast(8, 0, 16) + case TransactionalGtidExecutedFlavorCapability: + return atLeast(8, 0, 17) + case DisableRedoLogFlavorCapability: + return atLeast(8, 0, 21) + case FastDropTableFlavorCapability: + return atLeast(8, 0, 23) + case InstantAddDropColumnFlavorCapability: + return atLeast(8, 0, 29) + case DynamicRedoLogCapacityFlavorCapability: + return atLeast(8, 0, 30) + case InstantDDLXtrabackupCapability: + return atLeast(8, 0, 32) + case ReplicaTerminologyCapability: + // In MySQL 8.0.22 the new replica syntax was introduced, but other changes + // like the log_replica_updates field was only present in 8.0.26 and newer. + // So be conservative here, and only use the new syntax on newer versions, + // so we don't have to have too many different flavors. + return atLeast(8, 0, 26) + default: + return false, nil + } +} + +// MySQLVersionCapableOf returns a CapableOf function specific to MySQL flavors +func MySQLVersionCapableOf(serverVersion string) CapableOf { + if serverVersion == "" { + return nil + } + return func(capability FlavorCapability) (bool, error) { + return MySQLVersionHasCapability(serverVersion, capability) + } +} diff --git a/go/mysql/capabilities/capability_test.go b/go/mysql/capabilities/capability_test.go new file mode 100644 index 00000000000..6e96c3487f5 --- /dev/null +++ b/go/mysql/capabilities/capability_test.go @@ -0,0 +1,270 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package capabilities + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestServerVersionAtLeast(t *testing.T) { + testcases := []struct { + version string + parts []int + expect bool + expectError bool + }{ + { + version: "8.0.14", + parts: []int{8, 0, 14}, + expect: true, + }, + { + version: "8.0.14-log", + parts: []int{8, 0, 14}, + expect: true, + }, + { + version: "8.0.14", + parts: []int{8, 0, 13}, + expect: true, + }, + { + version: "8.0.14-log", + parts: []int{8, 0, 13}, + expect: true, + }, + { + version: "8.0.14", + parts: []int{8, 0, 15}, + expect: false, + }, + { + version: "8.0.14-log", + parts: []int{8, 0, 15}, + expect: false, + }, + { + version: "8.0.14", + parts: []int{7, 5, 20}, + expect: true, + }, + { + version: "8.0.14", + parts: []int{7, 5}, + expect: true, + }, + { + version: "8.0.14", + parts: []int{5, 7}, + expect: true, + }, + { + version: "8.0.14-log", + parts: []int{7, 5, 20}, + expect: true, + }, + { + version: "8.0.14", + parts: []int{8, 1, 2}, + expect: false, + }, + { + version: "8.0.14", + parts: []int{10, 1, 2}, + expect: false, + }, + { + version: "8.0", + parts: []int{8, 0, 14}, + expect: false, + }, + { + version: "8.0.x", + parts: []int{8, 0, 14}, + expectError: true, + }, + { + version: "", + parts: []int{8, 0, 14}, + expectError: true, + }, + } + for _, tc := range testcases { + result, err := ServerVersionAtLeast(tc.version, tc.parts...) + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expect, result) + } + } +} + +func TestMySQLVersionCapableOf(t *testing.T) { + testcases := []struct { + version string + capability FlavorCapability + isCapable bool + expectNil bool + }{ + { + version: "8.0.14", + capability: InstantDDLFlavorCapability, + isCapable: true, + }, + { + version: "8.0.20", + capability: TransactionalGtidExecutedFlavorCapability, + isCapable: true, + }, + { + version: "8.0.0", + capability: InstantAddLastColumnFlavorCapability, + isCapable: true, + }, + { + version: "8.0.0", + capability: InstantAddDropColumnFlavorCapability, + isCapable: false, + }, + { + version: "5.6.7", + capability: InstantDDLFlavorCapability, + isCapable: false, + }, + { + version: "5.7.29", + capability: TransactionalGtidExecutedFlavorCapability, + isCapable: false, + }, + { + version: "5.6.7", + capability: MySQLJSONFlavorCapability, + isCapable: false, + }, + { + version: "5.7.29", + capability: MySQLJSONFlavorCapability, + isCapable: true, + }, + { + version: "8.0.30", + capability: DynamicRedoLogCapacityFlavorCapability, + isCapable: true, + }, + { + version: "8.0.29", + capability: DynamicRedoLogCapacityFlavorCapability, + isCapable: false, + }, + { + version: "5.7.38", + capability: DynamicRedoLogCapacityFlavorCapability, + isCapable: false, + }, + { + version: "8.0.21", + capability: DisableRedoLogFlavorCapability, + isCapable: true, + }, + { + version: "8.0.20", + capability: DisableRedoLogFlavorCapability, + isCapable: false, + }, + { + version: "8.0.15", + capability: CheckConstraintsCapability, + isCapable: false, + }, + { + version: "8.0.15-log", + capability: CheckConstraintsCapability, + isCapable: false, + }, + { + version: "8.0.20", + capability: CheckConstraintsCapability, + isCapable: true, + }, + { + version: "8.0.20-log", + capability: CheckConstraintsCapability, + isCapable: true, + }, + { + version: "5.7.38", + capability: PerformanceSchemaDataLocksTableCapability, + isCapable: false, + }, + { + version: "8.0", + capability: PerformanceSchemaDataLocksTableCapability, + isCapable: false, + }, + { + version: "8.0.0", + capability: PerformanceSchemaDataLocksTableCapability, + isCapable: false, + }, + { + version: "8.0.20", + capability: PerformanceSchemaDataLocksTableCapability, + isCapable: true, + }, + { + version: "8.0.29", + capability: InstantDDLXtrabackupCapability, + isCapable: false, + }, + { + version: "8.0.32", + capability: InstantDDLXtrabackupCapability, + isCapable: true, + }, + { + // What happens if server version is unspecified + version: "", + capability: CheckConstraintsCapability, + isCapable: false, + expectNil: true, + }, + { + // Some ridiculous version. But seeing that we force the question on a MySQLVersionCapableOf + // then this far futuristic version should actually work. + version: "5914.234.17", + capability: CheckConstraintsCapability, + isCapable: true, + }, + } + for _, tc := range testcases { + name := fmt.Sprintf("%s %v", tc.version, tc.capability) + t.Run(name, func(t *testing.T) { + capableOf := MySQLVersionCapableOf(tc.version) + if tc.expectNil { + assert.Nil(t, capableOf) + return + } + isCapable, err := capableOf(tc.capability) + assert.NoError(t, err) + assert.Equal(t, tc.isCapable, isCapable) + }) + } +} diff --git a/go/mysql/client.go b/go/mysql/client.go index c4dd87d95cc..16740bf38db 100644 --- a/go/mysql/client.go +++ b/go/mysql/client.go @@ -106,7 +106,7 @@ func Connect(ctx context.Context, params *ConnParams) (*Conn, error) { } // Send the connection back, so the other side can close it. - c := newConn(conn) + c := newConn(conn, params.FlushDelay, params.TruncateErrLen) status <- connectResult{ c: c, } @@ -229,11 +229,6 @@ func (c *Conn) clientHandshake(params *ConnParams) error { c.Capabilities = capabilities & (CapabilityClientDeprecateEOF) } - charset, err := collations.Local().ParseConnectionCharset(params.Charset) - if err != nil { - return err - } - // Handle switch to SSL if necessary. if params.SslEnabled() { // If client asked for SSL, but server doesn't support it, @@ -270,7 +265,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { } // Send the SSLRequest packet. - if err := c.writeSSLRequest(capabilities, charset, params); err != nil { + if err := c.writeSSLRequest(capabilities, uint8(params.Charset), params); err != nil { return err } @@ -302,7 +297,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // Build and send our handshake response 41. // Note this one will never have SSL flag on. - if err := c.writeHandshakeResponse41(capabilities, scrambledPassword, charset, params); err != nil { + if err := c.writeHandshakeResponse41(capabilities, scrambledPassword, uint8(params.Charset), params); err != nil { return err } diff --git a/go/mysql/client_test.go b/go/mysql/client_test.go index c349cdcd531..da577e338b2 100644 --- a/go/mysql/client_test.go +++ b/go/mysql/client_test.go @@ -151,7 +151,7 @@ func TestTLSClientDisabled(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() @@ -223,7 +223,7 @@ func TestTLSClientPreferredDefault(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() @@ -296,7 +296,7 @@ func TestTLSClientRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() @@ -343,7 +343,7 @@ func TestTLSClientVerifyCA(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() @@ -426,7 +426,7 @@ func TestTLSClientVerifyIdentity(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() diff --git a/go/mysql/collations/cached_size.go b/go/mysql/collations/cached_size.go new file mode 100644 index 00000000000..630bf41230a --- /dev/null +++ b/go/mysql/collations/cached_size.go @@ -0,0 +1,111 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package collations + +import ( + "math" + "reflect" + "unsafe" + + hack "vitess.io/vitess/go/hack" +) + +//go:nocheckptr +func (cached *Environment) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field byName map[string]vitess.io/vitess/go/mysql/collations.ID + if cached.byName != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.byName) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 160)) + if len(cached.byName) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 160)) + } + for k := range cached.byName { + size += hack.RuntimeAllocSize(int64(len(k))) + } + } + // field byCharset map[string]*vitess.io/vitess/go/mysql/collations.colldefaults + if cached.byCharset != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.byCharset) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 208)) + if len(cached.byCharset) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 208)) + } + for k, v := range cached.byCharset { + size += hack.RuntimeAllocSize(int64(len(k))) + if v != nil { + size += hack.RuntimeAllocSize(int64(4)) + } + } + } + // field byCharsetName map[vitess.io/vitess/go/mysql/collations.ID]string + if cached.byCharsetName != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.byCharsetName) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 160)) + if len(cached.byCharsetName) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 160)) + } + for _, v := range cached.byCharsetName { + size += hack.RuntimeAllocSize(int64(len(v))) + } + } + // field unsupported map[string]vitess.io/vitess/go/mysql/collations.ID + if cached.unsupported != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.unsupported) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 160)) + if len(cached.unsupported) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 160)) + } + for k := range cached.unsupported { + size += hack.RuntimeAllocSize(int64(len(k))) + } + } + // field byID map[vitess.io/vitess/go/mysql/collations.ID]string + if cached.byID != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.byID) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 160)) + if len(cached.byID) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 160)) + } + for _, v := range cached.byID { + size += hack.RuntimeAllocSize(int64(len(v))) + } + } + return size +} diff --git a/go/mysql/collations/charset/charset_test.go b/go/mysql/collations/charset/charset_test.go new file mode 100644 index 00000000000..a961e37c967 --- /dev/null +++ b/go/mysql/collations/charset/charset_test.go @@ -0,0 +1,108 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package charset + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsMultibyteByName(t *testing.T) { + testCases := []struct { + csname string + want bool + }{ + {"euckr", true}, + {"gb2312", true}, + {"sjis", true}, + {"cp932", true}, + {"eucjpms", true}, + {"ujis", true}, + {"utf16", false}, + {"latin1", false}, + {"binary", false}, + } + + for _, tc := range testCases { + t.Run(tc.csname, func(t *testing.T) { + assert.Equal(t, tc.want, IsMultibyteByName(tc.csname)) + }) + } +} + +func TestIsUnicode(t *testing.T) { + testCases := []struct { + cs Charset + want bool + }{ + {Charset_utf8mb3{}, true}, + {Charset_utf8mb4{}, true}, + {Charset_utf16{}, true}, + {Charset_utf16le{}, true}, + {Charset_ucs2{}, true}, + {Charset_utf32{}, true}, + {&testCharset1{}, false}, + } + + for _, tc := range testCases { + t.Run(tc.cs.Name(), func(t *testing.T) { + assert.Equal(t, tc.want, IsUnicode(tc.cs)) + }) + } +} + +func TestIsUnicodeByName(t *testing.T) { + testCases := []struct { + csname string + want bool + }{ + {"utf8", true}, + {"utf8mb3", true}, + {"utf8mb4", true}, + {"utf16", true}, + {"utf16le", true}, + {"ucs2", true}, + {"utf32", true}, + {"binary", false}, + } + + for _, tc := range testCases { + t.Run(tc.csname, func(t *testing.T) { + assert.Equal(t, tc.want, IsUnicodeByName(tc.csname)) + }) + } +} + +func TestIsBackslashSafe(t *testing.T) { + testCases := []struct { + cs Charset + want bool + }{ + {Charset_sjis{}, false}, + {Charset_cp932{}, false}, + {Charset_gb18030{}, false}, + {Charset_utf16le{}, true}, + {&testCharset1{}, true}, + } + + for _, tc := range testCases { + t.Run(tc.cs.Name(), func(t *testing.T) { + assert.Equal(t, tc.want, IsBackslashSafe(tc.cs)) + }) + } +} diff --git a/go/mysql/collations/charset/convert.go b/go/mysql/collations/charset/convert.go index bc51e9b8377..261ef7c9b4a 100644 --- a/go/mysql/collations/charset/convert.go +++ b/go/mysql/collations/charset/convert.go @@ -72,7 +72,7 @@ func convertSlow(dst []byte, dstCharset Charset, src []byte, srcCharset Charset) for len(src) > 0 { cp, width := srcCharset.DecodeRune(src) - if cp == utf8.RuneError && width < 3 { + if cp == utf8.RuneError { failed++ cp = '?' } diff --git a/go/mysql/collations/charset/convert_test.go b/go/mysql/collations/charset/convert_test.go new file mode 100644 index 00000000000..df44f961743 --- /dev/null +++ b/go/mysql/collations/charset/convert_test.go @@ -0,0 +1,362 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package charset + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type testCharset1 struct{} + +func (c *testCharset1) Name() string { + return "testCharset1" +} + +func (c *testCharset1) SupportsSupplementaryChars() bool { + return true +} + +func (c *testCharset1) IsSuperset(other Charset) bool { + return true +} + +func (c *testCharset1) MaxWidth() int { + return 1 +} + +func (c *testCharset1) EncodeRune([]byte, rune) int { + return 0 +} + +func (c *testCharset1) DecodeRune(bytes []byte) (rune, int) { + if len(bytes) < 1 { + return RuneError, 0 + } + return 1, 1 +} + +type testCharset2 struct{} + +func (c *testCharset2) Name() string { + return "testCharset2" +} + +func (c *testCharset2) SupportsSupplementaryChars() bool { + return true +} + +func (c *testCharset2) IsSuperset(other Charset) bool { + return false +} + +func (c *testCharset2) MaxWidth() int { + return 1 +} + +func (c *testCharset2) EncodeRune([]byte, rune) int { + return 0 +} + +func (c *testCharset2) DecodeRune(bytes []byte) (rune, int) { + if len(bytes) < 1 { + return RuneError, 0 + } + return rune(bytes[0]), 1 +} + +func (c *testCharset2) Convert(_, src []byte, from Charset) ([]byte, error) { + return src, nil +} + +func TestConvert(t *testing.T) { + testCases := []struct { + src []byte + srcCharset Charset + dst []byte + dstCharset Charset + want []byte + err string + }{ + { + src: []byte("testSrc"), + srcCharset: Charset_utf8mb3{}, + dst: []byte("testDst"), + dstCharset: Charset_utf8mb4{}, + want: []byte("testDsttestSrc"), + }, + { + src: []byte("testSrc"), + srcCharset: Charset_utf8mb3{}, + dst: nil, + dstCharset: Charset_utf8mb4{}, + want: []byte("testSrc"), + }, + { + src: []byte("testSrc"), + srcCharset: Charset_utf8mb4{}, + dst: nil, + dstCharset: Charset_utf8mb3{}, + want: []byte("testSrc"), + }, + { + src: []byte("testSrc"), + srcCharset: Charset_utf8mb4{}, + dst: []byte("testDst"), + dstCharset: Charset_utf8mb3{}, + want: []byte("testDsttestSrc"), + }, + { + src: []byte("😊😂🤢"), + srcCharset: Charset_utf8mb4{}, + dst: []byte("testDst"), + dstCharset: Charset_utf8mb3{}, + want: []byte("testDst???"), + err: "Cannot convert string", + }, + { + src: []byte("testSrc"), + srcCharset: Charset_binary{}, + dst: []byte("testDst"), + dstCharset: Charset_utf8mb3{}, + want: []byte("testDsttestSrc"), + }, + { + src: []byte{00, 65, 00, 66}, + srcCharset: Charset_ucs2{}, + dst: []byte("testDst"), + dstCharset: Charset_utf8mb3{}, + want: []byte("testDstAB"), + }, + { + src: []byte{00, 65, 00, 66}, + srcCharset: Charset_ucs2{}, + dst: nil, + dstCharset: Charset_utf8mb3{}, + want: []byte("AB"), + }, + { + src: []byte("😊😂🤢"), + srcCharset: Charset_utf8mb3{}, + dst: nil, + dstCharset: &testCharset2{}, + want: []byte("😊😂🤢"), + }, + } + + for _, tc := range testCases { + res, err := Convert(tc.dst, tc.dstCharset, tc.src, tc.srcCharset) + + if tc.err != "" { + assert.ErrorContains(t, err, tc.err) + assert.Equal(t, tc.want, res) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.want, res) + } + } +} + +func TestExpand(t *testing.T) { + testCases := []struct { + dst []rune + src []byte + srcCharset Charset + want []rune + }{ + { + dst: []rune("testDst"), + src: []byte("testSrc"), + srcCharset: Charset_utf8mb3{}, + want: []rune("testSrc"), + }, + { + dst: nil, + src: []byte("testSrc"), + srcCharset: Charset_utf8mb3{}, + want: []rune("testSrc"), + }, + { + dst: nil, + src: []byte("testSrc"), + srcCharset: Charset_binary{}, + want: []rune("testSrc"), + }, + { + dst: []rune("testDst"), + src: []byte("testSrc"), + srcCharset: Charset_binary{}, + want: []rune("testDsttestSrc"), + }, + { + dst: []rune("testDst"), + src: []byte{0, 0, 0, 0x41}, + srcCharset: Charset_utf32{}, + want: []rune("testDstA"), + }, + { + dst: nil, + src: []byte{0xFF}, + srcCharset: Charset_latin1{}, + want: []rune("ÿ"), + }, + // multibyte case + { + dst: []rune("testDst"), + src: []byte("😊😂🤢"), + srcCharset: Charset_utf8mb4{}, + want: []rune("😊😂🤢"), + }, + } + + for _, tc := range testCases { + res := Expand(tc.dst, tc.src, tc.srcCharset) + + assert.Equal(t, tc.want, res) + } +} + +func TestCollapse(t *testing.T) { + testCases := []struct { + dst []byte + src []rune + dstCharset Charset + want []byte + }{ + { + dst: []byte("testDst"), + src: []rune("testSrc"), + dstCharset: Charset_utf8mb3{}, + want: []byte("testDsttestSrc"), + }, + { + dst: nil, + src: []rune("testSrc"), + dstCharset: Charset_utf8mb3{}, + want: []byte("testSrc"), + }, + { + dst: []byte("testDst"), + src: []rune("testSrc"), + dstCharset: Charset_utf8mb4{}, + want: []byte("testDsttestSrc"), + }, + { + dst: []byte("testDst"), + src: []rune("testSrc"), + dstCharset: Charset_binary{}, + want: []byte("testDsttestSrc"), + }, + { + dst: nil, + src: []rune("testSrc"), + dstCharset: Charset_binary{}, + want: []byte("testSrc"), + }, + { + dst: []byte("dst"), + src: []rune("src"), + dstCharset: Charset_ucs2{}, + want: []byte{100, 115, 116, 0, 115, 0, 114, 0, 99}, + }, + { + dst: nil, + src: []rune("src"), + dstCharset: Charset_ucs2{}, + want: []byte{0, 115, 0, 114, 0, 99}, + }, + // unsupported encoding case + { + dst: nil, + src: []rune{0xffff1}, + dstCharset: Charset_ucs2{}, + want: []byte{0, 63}, + }, + } + + for _, tc := range testCases { + res := Collapse(tc.dst, tc.src, tc.dstCharset) + + assert.Equal(t, tc.want, res) + } +} + +func TestConvertFromUTF8(t *testing.T) { + dst := []byte("dst") + src := []byte("😊😂🤢") + + res, err := ConvertFromUTF8(dst, Charset_utf8mb4{}, src) + assert.NoError(t, err) + assert.Equal(t, []byte("dst😊😂🤢"), res) + + res, err = ConvertFromUTF8(dst, Charset_utf8mb3{}, src) + assert.ErrorContains(t, err, "Cannot convert string") + assert.Equal(t, []byte("dst???"), res) +} + +func TestConvertFromBinary(t *testing.T) { + testCases := []struct { + dst []byte + cs Charset + in []byte + want []byte + err string + }{ + { + dst: []byte("testDst"), + cs: Charset_utf8mb4{}, + in: []byte("testString"), + want: []byte("testDsttestString"), + }, + { + cs: Charset_utf16le{}, + in: []byte("testForOddLen"), + want: append([]byte{0}, []byte("testForOddLen")...), + }, + { + cs: Charset_utf16{}, + in: []byte("testForEvenLen"), + want: []byte("testForEvenLen"), + }, + // multibyte case + { + dst: []byte("testDst"), + cs: Charset_utf8mb4{}, + in: []byte("😊😂🤢"), + want: []byte("testDst😊😂🤢"), + }, + // unsuppported encoding case + { + cs: Charset_utf32{}, + in: []byte{0xff}, + err: "Cannot convert string", + }, + } + + for _, tc := range testCases { + got, err := ConvertFromBinary(tc.dst, tc.cs, tc.in) + + if tc.want == nil { + assert.ErrorContains(t, err, tc.err) + assert.Nil(t, got) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + } + } +} diff --git a/go/mysql/collations/charset/eightbit/8bit.go b/go/mysql/collations/charset/eightbit/8bit.go index 5bd930c61cb..12630749d5d 100644 --- a/go/mysql/collations/charset/eightbit/8bit.go +++ b/go/mysql/collations/charset/eightbit/8bit.go @@ -81,3 +81,17 @@ func (Charset_8bit) Length(src []byte) int { func (Charset_8bit) MaxWidth() int { return 1 } + +func (Charset_8bit) Slice(src []byte, from, to int) []byte { + if from >= len(src) { + return nil + } + if to > len(src) { + to = len(src) + } + return src[from:to] +} + +func (Charset_8bit) Validate(src []byte) bool { + return true +} diff --git a/go/mysql/collations/charset/eightbit/binary.go b/go/mysql/collations/charset/eightbit/binary.go index 44824bbc342..fa36fcf66a5 100644 --- a/go/mysql/collations/charset/eightbit/binary.go +++ b/go/mysql/collations/charset/eightbit/binary.go @@ -62,3 +62,17 @@ func (Charset_binary) Length(src []byte) int { func (Charset_binary) MaxWidth() int { return 1 } + +func (Charset_binary) Slice(src []byte, from, to int) []byte { + if from >= len(src) { + return nil + } + if to > len(src) { + to = len(src) + } + return src[from:to] +} + +func (Charset_binary) Validate(src []byte) bool { + return true +} diff --git a/go/mysql/collations/charset/eightbit/latin1.go b/go/mysql/collations/charset/eightbit/latin1.go index 67fa07c62c2..f32b4523a18 100644 --- a/go/mysql/collations/charset/eightbit/latin1.go +++ b/go/mysql/collations/charset/eightbit/latin1.go @@ -230,3 +230,17 @@ func (Charset_latin1) Length(src []byte) int { func (Charset_latin1) MaxWidth() int { return 1 } + +func (Charset_latin1) Slice(src []byte, from, to int) []byte { + if from >= len(src) { + return nil + } + if to > len(src) { + to = len(src) + } + return src[from:to] +} + +func (Charset_latin1) Validate(src []byte) bool { + return true +} diff --git a/go/mysql/collations/charset/helpers.go b/go/mysql/collations/charset/helpers.go index 851ce4bebf9..b66a6c77b87 100644 --- a/go/mysql/collations/charset/helpers.go +++ b/go/mysql/collations/charset/helpers.go @@ -41,7 +41,7 @@ func Validate(charset Charset, input []byte) bool { } for len(input) > 0 { r, size := charset.DecodeRune(input) - if r == RuneError && size < 2 { + if r == RuneError { return false } input = input[size:] diff --git a/go/mysql/collations/charset/helpers_test.go b/go/mysql/collations/charset/helpers_test.go new file mode 100644 index 00000000000..4f8d367e880 --- /dev/null +++ b/go/mysql/collations/charset/helpers_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package charset + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSlice(t *testing.T) { + testCases := []struct { + in []byte + cs Charset + from int + to int + want []byte + }{ + { + in: []byte("testString"), + cs: Charset_binary{}, + from: 1, + to: 4, + want: []byte("est"), + }, + { + in: []byte("testString"), + cs: &testCharset1{}, + from: 2, + to: 5, + want: []byte("stS"), + }, + { + in: []byte("testString"), + cs: &testCharset1{}, + from: 2, + to: 20, + want: []byte("stString"), + }, + // Multibyte cases + { + in: []byte("😊😂🤢"), + cs: Charset_utf8mb4{}, + from: 1, + to: 3, + want: []byte("😂🤢"), + }, + { + in: []byte("😊😂🤢"), + cs: Charset_utf8mb4{}, + from: -2, + to: 4, + want: []byte("😊😂🤢"), + }, + } + + for _, tc := range testCases { + s := Slice(tc.cs, tc.in, tc.from, tc.to) + assert.Equal(t, tc.want, s) + } +} + +func TestValidate(t *testing.T) { + in := "testString" + ok := Validate(Charset_binary{}, []byte(in)) + assert.True(t, ok, "%q should be valid for binary charset", in) + + ok = Validate(&testCharset1{}, nil) + assert.True(t, ok, "Validate should return true for empty string irrespective of charset") + + ok = Validate(&testCharset1{}, []byte(in)) + assert.True(t, ok, "%q should be valid for testCharset1", in) + + ok = Validate(Charset_utf16le{}, []byte{0x41}) + assert.False(t, ok, "%v should not be valid for utf16le charset", []byte{0x41}) +} + +func TestLength(t *testing.T) { + testCases := []struct { + in []byte + cs Charset + want int + }{ + {[]byte("testString"), Charset_binary{}, 10}, + {[]byte("testString"), &testCharset1{}, 10}, + // Multibyte cases + {[]byte("😊😂🤢"), Charset_utf8mb4{}, 3}, + {[]byte("한국어 시험"), Charset_utf8mb4{}, 6}, + } + + for _, tc := range testCases { + l := Length(tc.cs, tc.in) + assert.Equal(t, tc.want, l) + } +} diff --git a/go/mysql/collations/charset/unicode/utf16.go b/go/mysql/collations/charset/unicode/utf16.go index eb055db7382..924c12be7b7 100644 --- a/go/mysql/collations/charset/unicode/utf16.go +++ b/go/mysql/collations/charset/unicode/utf16.go @@ -67,7 +67,7 @@ func (Charset_utf16be) EncodeRune(dst []byte, r rune) int { func (Charset_utf16be) DecodeRune(b []byte) (rune, int) { if len(b) < 2 { - return utf8.RuneError, 0 + return utf8.RuneError, len(b) } r1 := uint16(b[1]) | uint16(b[0])<<8 @@ -129,7 +129,7 @@ func (Charset_utf16le) EncodeRune(dst []byte, r rune) int { func (Charset_utf16le) DecodeRune(b []byte) (rune, int) { if len(b) < 2 { - return utf8.RuneError, 0 + return utf8.RuneError, len(b) } r1 := uint16(b[0]) | uint16(b[1])<<8 @@ -185,7 +185,7 @@ func (Charset_ucs2) EncodeRune(dst []byte, r rune) int { func (Charset_ucs2) DecodeRune(p []byte) (rune, int) { if len(p) < 2 { - return utf8.RuneError, 0 + return utf8.RuneError, len(p) } return rune(p[0])<<8 | rune(p[1]), 2 } diff --git a/go/mysql/collations/charset/unicode/utf32.go b/go/mysql/collations/charset/unicode/utf32.go index 97095bb7f98..6053d7d10f8 100644 --- a/go/mysql/collations/charset/unicode/utf32.go +++ b/go/mysql/collations/charset/unicode/utf32.go @@ -49,7 +49,7 @@ func (Charset_utf32) EncodeRune(dst []byte, r rune) int { func (Charset_utf32) DecodeRune(p []byte) (rune, int) { if len(p) < 4 { - return utf8.RuneError, 0 + return utf8.RuneError, len(p) } return (rune(p[0]) << 24) | (rune(p[1]) << 16) | (rune(p[2]) << 8) | rune(p[3]), 4 } diff --git a/go/mysql/collations/colldata/collation.go b/go/mysql/collations/colldata/collation.go index 7697c08cbed..a041006ddc7 100644 --- a/go/mysql/collations/colldata/collation.go +++ b/go/mysql/collations/colldata/collation.go @@ -17,6 +17,7 @@ limitations under the License. package colldata import ( + "bytes" "fmt" "math" @@ -380,3 +381,46 @@ coerceToRight: return charset.Convert(dst, rightCS, in, leftCS) }, nil, nil } + +func Index(col Collation, str, sub []byte, offset int) int { + cs := col.Charset() + if offset > 0 { + l := charset.Length(cs, str) + if offset > l { + return -1 + } + str = charset.Slice(cs, str, offset, len(str)) + } + + pos := instr(col, str, sub) + if pos < 0 { + return -1 + } + return offset + pos +} + +func instr(col Collation, str, sub []byte) int { + if len(sub) == 0 { + return 0 + } + + if len(str) == 0 { + return -1 + } + + if col.IsBinary() && col.Charset().MaxWidth() == 1 { + return bytes.Index(str, sub) + } + + var pos int + cs := col.Charset() + for len(str) > 0 { + if col.Collate(str, sub, true) == 0 { + return pos + } + _, size := cs.DecodeRune(str) + str = str[size:] + pos++ + } + return -1 +} diff --git a/go/mysql/collations/colldata/mysqlucadata.go b/go/mysql/collations/colldata/mysqlucadata.go index 0affc45d11f..9f9e2b7e238 100644 --- a/go/mysql/collations/colldata/mysqlucadata.go +++ b/go/mysql/collations/colldata/mysqlucadata.go @@ -20,7 +20,7 @@ package colldata import ( _ "embed" - unsafe "unsafe" + "unsafe" ) var weightTable_uca900_page000 = weightsUCA_embed(0, 2560) @@ -1417,5 +1417,5 @@ var weightTable_uca520 = []*[]uint16{ var weightsUCA_embed_data string func weightsUCA_embed(pos, length int) []uint16 { - return (*[0x7fff0000]uint16)(unsafe.Pointer(unsafe.StringData(weightsUCA_embed_data)))[pos : pos+length] + return (*[0x3fffffff]uint16)(unsafe.Pointer(unsafe.StringData(weightsUCA_embed_data)))[pos : pos+length] } diff --git a/go/mysql/collations/colldata/uca_contraction_test.go b/go/mysql/collations/colldata/uca_contraction_test.go index d17ff21e255..a3511a07df6 100644 --- a/go/mysql/collations/colldata/uca_contraction_test.go +++ b/go/mysql/collations/colldata/uca_contraction_test.go @@ -19,9 +19,8 @@ package colldata import ( "encoding/json" "fmt" - "math/rand" + "math/rand/v2" "os" - "reflect" "sort" "testing" "unicode/utf8" @@ -36,7 +35,6 @@ type CollationWithContractions struct { Collation Collation Contractions []uca.Contraction ContractFast uca.Contractor - ContractTrie uca.Contractor } func findContractedCollations(t testing.TB, unique bool) (result []CollationWithContractions) { @@ -58,7 +56,7 @@ func findContractedCollations(t testing.TB, unique bool) (result []CollationWith continue } - rf, err := os.Open(fmt.Sprintf("testdata/mysqldata/%s.json", collation.Name())) + rf, err := os.Open(fmt.Sprintf("../testdata/mysqldata/%s.json", collation.Name())) if err != nil { t.Skipf("failed to open JSON metadata (%v). did you run colldump?", err) } @@ -91,14 +89,13 @@ func findContractedCollations(t testing.TB, unique bool) (result []CollationWith Collation: collation, Contractions: meta.Contractions, ContractFast: contract, - ContractTrie: uca.NewTrieContractor(meta.Contractions), }) } return } func testMatch(t *testing.T, name string, cnt uca.Contraction, result []uint16, remainder []byte, skip int) { - assert.True(t, reflect.DeepEqual(cnt.Weights, result), "%s didn't match: expected %#v, got %#v", name, cnt.Weights, result) + assert.Equal(t, result, cnt.Weights, "%s didn't match: expected %#v, got %#v", name, cnt.Weights, result) assert.Equal(t, 0, len(remainder), "%s bad remainder: %#v", name, remainder) assert.Equal(t, len(cnt.Path), skip, "%s bad skipped length %d for %#v", name, skip, cnt.Path) @@ -112,10 +109,7 @@ func TestUCAContractions(t *testing.T) { head := cnt.Path[0] tail := cnt.Path[1] - result := cwc.ContractTrie.FindContextual(head, tail) - testMatch(t, "ContractTrie", cnt, result, nil, 2) - - result = cwc.ContractFast.FindContextual(head, tail) + result := cwc.ContractFast.FindContextual(head, tail) testMatch(t, "ContractFast", cnt, result, nil, 2) continue } @@ -123,10 +117,7 @@ func TestUCAContractions(t *testing.T) { head := cnt.Path[0] tail := string(cnt.Path[1:]) - result, remainder, skip := cwc.ContractTrie.Find(charset.Charset_utf8mb4{}, head, []byte(tail)) - testMatch(t, "ContractTrie", cnt, result, remainder, skip) - - result, remainder, skip = cwc.ContractFast.Find(charset.Charset_utf8mb4{}, head, []byte(tail)) + result, remainder, skip := cwc.ContractFast.Find(charset.Charset_utf8mb4{}, head, []byte(tail)) testMatch(t, "ContractFast", cnt, result, remainder, skip) } }) @@ -212,14 +203,13 @@ func (s *strgen) generate(length int, freq float64) (out []byte) { return flat[i] < flat[j] }) - gen := rand.New(rand.NewSource(0xDEADBEEF)) out = make([]byte, 0, length) for len(out) < length { - if gen.Float64() < freq { - cnt := s.contractions[rand.Intn(len(s.contractions))] + if rand.Float64() < freq { + cnt := s.contractions[rand.IntN(len(s.contractions))] out = append(out, cnt...) } else { - cp := flat[rand.Intn(len(flat))] + cp := flat[rand.IntN(len(flat))] out = append(out, string(cp)...) } } @@ -239,10 +229,6 @@ func BenchmarkUCAContractions(b *testing.B) { b.Run(fmt.Sprintf("%s-%.02f-fast", cwc.Collation.Name(), frequency), func(b *testing.B) { benchmarkFind(b, input, cwc.ContractFast) }) - - b.Run(fmt.Sprintf("%s-%.02f-trie", cwc.Collation.Name(), frequency), func(b *testing.B) { - benchmarkFind(b, input, cwc.ContractTrie) - }) } } @@ -259,9 +245,5 @@ func BenchmarkUCAContractionsJA(b *testing.B) { b.Run(fmt.Sprintf("%s-%.02f-fast", cwc.Collation.Name(), frequency), func(b *testing.B) { benchmarkFindJA(b, input, cwc.ContractFast) }) - - b.Run(fmt.Sprintf("%s-%.02f-trie", cwc.Collation.Name(), frequency), func(b *testing.B) { - benchmarkFindJA(b, input, cwc.ContractTrie) - }) } } diff --git a/go/mysql/collations/colldata/uca_tables_test.go b/go/mysql/collations/colldata/uca_tables_test.go index 40c2f3bbed3..ee982b1f25c 100644 --- a/go/mysql/collations/colldata/uca_tables_test.go +++ b/go/mysql/collations/colldata/uca_tables_test.go @@ -24,8 +24,8 @@ import ( "testing" "unsafe" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gotest.tools/assert" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/mysql/collations/internal/uca" @@ -58,14 +58,13 @@ func verifyAllCodepoints(t *testing.T, expected map[rune][]uint16, weights uca.W for i := range vitessWeights { a, b := mysqlWeights[i], vitessWeights[i] assert.Equal(t, b, a, "weight mismatch for U+%04X (collation entity %d): mysql=%v vitess=%v", cp, i+1, a, b) - } } } } func loadExpectedWeights(t *testing.T, weights string) map[rune][]uint16 { - fullpath := fmt.Sprintf("testdata/mysqldata/%s.json", weights) + fullpath := fmt.Sprintf("../testdata/mysqldata/%s.json", weights) weightsMysqlFile, err := os.Open(fullpath) if err != nil { t.Skipf("failed to load %q (did you run 'colldump' locally?)", fullpath) diff --git a/go/mysql/collations/colldata/uca_test.go b/go/mysql/collations/colldata/uca_test.go index e00fb5fd6d1..5eb51fed67e 100644 --- a/go/mysql/collations/colldata/uca_test.go +++ b/go/mysql/collations/colldata/uca_test.go @@ -19,7 +19,7 @@ package colldata import ( "bytes" "fmt" - "math/rand" + "math/rand/v2" "slices" "sort" "strings" @@ -1002,7 +1002,7 @@ func TestUCACollationOrder(t *testing.T) { ary := slices.Clone(sorted) for i := range ary { - j := rand.Intn(i + 1) + j := rand.IntN(i + 1) ary[i], ary[j] = ary[j], ary[i] } slices.SortFunc(ary, func(a, b string) int { diff --git a/go/mysql/collations/env.go b/go/mysql/collations/env.go index 91fc2a8bd8c..ae5419a5797 100644 --- a/go/mysql/collations/env.go +++ b/go/mysql/collations/env.go @@ -248,10 +248,10 @@ func (env *Environment) CollationAlias(collation string) (string, bool) { // to a Collation ID, with the exception that it can only fit in 1 byte. // For MySQL 8.0+ environments, the default charset is `utf8mb4_0900_ai_ci`. // For older MySQL environments, the default charset is `utf8mb4_general_ci`. -func (env *Environment) DefaultConnectionCharset() uint8 { +func (env *Environment) DefaultConnectionCharset() ID { switch env.version { case collverMySQL8: - return uint8(CollationUtf8mb4ID) + return CollationUtf8mb4ID default: return 45 } @@ -267,7 +267,7 @@ func (env *Environment) DefaultConnectionCharset() uint8 { // handshake. // - empty, in which case the default connection charset for this MySQL version // is returned. -func (env *Environment) ParseConnectionCharset(csname string) (uint8, error) { +func (env *Environment) ParseConnectionCharset(csname string) (ID, error) { if csname == "" { return env.DefaultConnectionCharset(), nil } @@ -282,7 +282,7 @@ func (env *Environment) ParseConnectionCharset(csname string) (uint8, error) { if collid == 0 || collid > 255 { return 0, fmt.Errorf("unsupported connection charset: %q", csname) } - return uint8(collid), nil + return collid, nil } func (env *Environment) AllCollationIDs() []ID { @@ -301,3 +301,8 @@ func (env *Environment) LookupByCharset(name string) *colldefaults { func (env *Environment) LookupCharsetName(coll ID) string { return env.byCharsetName[coll] } + +func (env *Environment) IsSupported(coll ID) bool { + _, supported := env.byID[coll] + return supported +} diff --git a/go/mysql/collations/integration/charset_test.go b/go/mysql/collations/integration/charset_test.go index 8a4d12a0e4d..b1b747e768b 100644 --- a/go/mysql/collations/integration/charset_test.go +++ b/go/mysql/collations/integration/charset_test.go @@ -45,7 +45,7 @@ func TestLocalEncodings(t *testing.T) { defer conn.Close() for _, tc := range cases { - local := collations.Local().LookupByName(tc.collation) + local := collations.MySQL8().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) verifyTranscoding(t, colldata.Lookup(local), remote, tc.input) } diff --git a/go/mysql/collations/integration/coercion_test.go b/go/mysql/collations/integration/coercion_test.go index dad55bcafad..c194b48c071 100644 --- a/go/mysql/collations/integration/coercion_test.go +++ b/go/mysql/collations/integration/coercion_test.go @@ -54,7 +54,7 @@ type testConcat struct { } func (tc *testConcat) Expression() string { - env := collations.Local() + env := collations.MySQL8() return fmt.Sprintf("CONCAT((_%s X'%x' COLLATE %q), (_%s X'%x' COLLATE %q))", colldata.Lookup(tc.left.Collation).Charset().Name(), tc.left.Text, env.LookupName(tc.left.Collation), colldata.Lookup(tc.right.Collation).Charset().Name(), tc.right.Text, env.LookupName(tc.right.Collation), @@ -63,7 +63,7 @@ func (tc *testConcat) Expression() string { func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 colldata.Coercion) { localCollation := colldata.Lookup(local.Collation) - remoteName := collations.Local().LookupName(remote.Collation) + remoteName := collations.MySQL8().LookupName(remote.Collation) assert.Equal(t, remoteName, localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remoteName) assert.Equal(t, remote.Coercibility, local.Coercibility, "bad coercibility resolved: local is %d, remote is %d", local.Coercibility, remote.Coercibility) @@ -85,8 +85,8 @@ func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local col rEBytes, err := remote.Expr.ToBytes() require.NoError(t, err) - assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, collations.Local().LookupName(tc.left.Collation), - tc.right.Text, collations.Local().LookupName(tc.right.Collation), leftText, rightText, localCollation.Name(), + assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, collations.MySQL8().LookupName(tc.left.Collation), + tc.right.Text, collations.MySQL8().LookupName(tc.right.Collation), leftText, rightText, localCollation.Name(), concat.Bytes(), rEBytes) } @@ -96,7 +96,7 @@ type testComparison struct { } func (tc *testComparison) Expression() string { - env := collations.Local() + env := collations.MySQL8() return fmt.Sprintf("(_%s X'%x' COLLATE %q) = (_%s X'%x' COLLATE %q)", env.LookupCharsetName(tc.left.Collation), tc.left.Text, env.LookupName(tc.left.Collation), env.LookupCharsetName(tc.right.Collation), tc.right.Text, env.LookupName(tc.right.Collation), @@ -135,7 +135,7 @@ func TestComparisonSemantics(t *testing.T) { t.Skipf("The behavior of Coercion Semantics is not correct before 8.0.31") } - for _, coll := range colldata.All(collations.Local()) { + for _, coll := range colldata.All(collations.MySQL8()) { text := verifyTranscoding(t, coll, remote.NewCollation(conn, coll.Name()), []byte(BaseString)) testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll.ID()}) } @@ -175,7 +175,7 @@ func TestComparisonSemantics(t *testing.T) { Coercibility: 0, Repertoire: collations.RepertoireASCII, } - resultLocal, coercionLocal1, coercionLocal2, errLocal := colldata.Merge(collations.Local(), left, right, + resultLocal, coercionLocal1, coercionLocal2, errLocal := colldata.Merge(collations.MySQL8(), left, right, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, @@ -194,7 +194,7 @@ func TestComparisonSemantics(t *testing.T) { query := fmt.Sprintf("SELECT CAST((%s) AS BINARY), COLLATION(%s), COERCIBILITY(%s)", expr, expr, expr) resultRemote, errRemote := conn.ExecuteFetch(query, 1, false) - env := collations.Local() + env := collations.MySQL8() if errRemote != nil { require.True(t, strings.Contains(errRemote.Error(), "Illegal mix of collations"), "query %s failed: %v", query, errRemote) @@ -212,7 +212,7 @@ func TestComparisonSemantics(t *testing.T) { continue } - remoteCollation := collations.Local().LookupByName(resultRemote.Rows[0][1].ToString()) + remoteCollation := collations.MySQL8().LookupByName(resultRemote.Rows[0][1].ToString()) remoteCI, _ := resultRemote.Rows[0][2].ToInt64() remoteTest.Test(t, &RemoteCoercionResult{ Expr: resultRemote.Rows[0][0], diff --git a/go/mysql/collations/integration/collations_test.go b/go/mysql/collations/integration/collations_test.go index c8f53eeb242..519f4560faf 100644 --- a/go/mysql/collations/integration/collations_test.go +++ b/go/mysql/collations/integration/collations_test.go @@ -38,7 +38,6 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/remote" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" ) @@ -46,9 +45,7 @@ var collationEnv *collations.Environment func init() { // We require MySQL 8.0 collations for the comparisons in the tests - mySQLVersion := "8.0.0" - servenv.SetMySQLServerVersionForTest(mySQLVersion) - collationEnv = collations.NewEnvironment(mySQLVersion) + collationEnv = collations.NewEnvironment("8.0.30") } func getSQLQueries(t *testing.T, testfile string) []string { @@ -59,11 +56,11 @@ func getSQLQueries(t *testing.T, testfile string) []string { defer tf.Close() var chunks []string - var curchunk bytes.Buffer + var curchunk strings.Builder addchunk := func() { if curchunk.Len() > 0 { - stmts, err := sqlparser.SplitStatementToPieces(curchunk.String()) + stmts, err := sqlparser.NewTestParser().SplitStatementToPieces(curchunk.String()) if err != nil { t.Fatal(err) } @@ -219,8 +216,8 @@ func TestCollationsOnMysqld(t *testing.T) { } func TestRemoteKanaSensitivity(t *testing.T) { - var Kana1 = []byte("の東京ノ") - var Kana2 = []byte("ノ東京の") + Kana1 := []byte("の東京ノ") + Kana2 := []byte("ノ東京の") testRemoteComparison(t, nil, []testcmp{ {"utf8mb4_0900_as_cs", Kana1, Kana2}, diff --git a/go/mysql/collations/integration/helpers_test.go b/go/mysql/collations/integration/helpers_test.go index d436280f04b..a5d2bb0cc36 100644 --- a/go/mysql/collations/integration/helpers_test.go +++ b/go/mysql/collations/integration/helpers_test.go @@ -52,7 +52,7 @@ func testRemoteWeights(t *testing.T, golden io.Writer, cases []testweight) { for _, tc := range cases { t.Run(tc.collation, func(t *testing.T) { - local := collations.Local().LookupByName(tc.collation) + local := collations.MySQL8().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) localResult := colldata.Lookup(local).WeightString(nil, tc.input, 0) remoteResult := remote.WeightString(nil, tc.input, 0) @@ -85,7 +85,7 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) { for _, tc := range cases { t.Run(tc.collation, func(t *testing.T) { - local := collations.Local().LookupByName(tc.collation) + local := collations.MySQL8().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) localResult := normalizecmp(colldata.Lookup(local).Collate(tc.left, tc.right, false)) remoteResult := remote.Collate(tc.left, tc.right, false) diff --git a/go/mysql/collations/integration/main_test.go b/go/mysql/collations/integration/main_test.go index f0d8d4dfdfa..23c6f8d2716 100644 --- a/go/mysql/collations/integration/main_test.go +++ b/go/mysql/collations/integration/main_test.go @@ -47,7 +47,7 @@ func mysqlconn(t *testing.T) *mysql.Conn { if err != nil { t.Fatal(err) } - if !strings.HasPrefix(conn.ServerVersion, "8.0.") { + if !strings.HasPrefix(conn.ServerVersion, "8.") { conn.Close() t.Skipf("collation integration tests are only supported in MySQL 8.0+") } diff --git a/go/mysql/collations/integration/weight_string_test.go b/go/mysql/collations/integration/weight_string_test.go index 666856ca38b..ad4ad4270fc 100644 --- a/go/mysql/collations/integration/weight_string_test.go +++ b/go/mysql/collations/integration/weight_string_test.go @@ -60,7 +60,7 @@ func TestWeightStringsComprehensive(t *testing.T) { conn := mysqlconn(t) defer conn.Close() - allCollations := colldata.All(collations.Local()) + allCollations := colldata.All(collations.MySQL8()) sort.Slice(allCollations, func(i, j int) bool { return allCollations[i].ID() < allCollations[j].ID() }) @@ -104,7 +104,7 @@ func TestCJKWeightStrings(t *testing.T) { conn := mysqlconn(t) defer conn.Close() - allCollations := colldata.All(collations.Local()) + allCollations := colldata.All(collations.MySQL8()) testdata, _ := filepath.Glob("../internal/charset/testdata/*.txt") for _, testfile := range testdata { cs := filepath.Base(testfile) diff --git a/go/mysql/collations/integration/wildcard_test.go b/go/mysql/collations/integration/wildcard_test.go index 6475a35dd21..6a0271218dc 100644 --- a/go/mysql/collations/integration/wildcard_test.go +++ b/go/mysql/collations/integration/wildcard_test.go @@ -79,7 +79,7 @@ func TestRemoteWildcardMatches(t *testing.T) { {"Ǎḅeçd", "a%bd"}, } - for _, local := range colldata.All(collations.Local()) { + for _, local := range colldata.All(collations.MySQL8()) { t.Run(local.Name(), func(t *testing.T) { var remote = remote.NewCollation(conn, local.Name()) var err error diff --git a/go/mysql/collations/internal/uca/contractions.go b/go/mysql/collations/internal/uca/contractions.go index d894b0e206e..5866cf5bf53 100644 --- a/go/mysql/collations/internal/uca/contractions.go +++ b/go/mysql/collations/internal/uca/contractions.go @@ -17,93 +17,9 @@ limitations under the License. package uca import ( - "fmt" - "vitess.io/vitess/go/mysql/collations/charset" ) -type trie struct { - children map[rune]*trie - weights []uint16 -} - -func (t *trie) walkCharset(cs charset.Charset, remainder []byte, depth int) ([]uint16, []byte, int) { - if len(remainder) > 0 { - cp, width := cs.DecodeRune(remainder) - if cp == charset.RuneError && width < 3 { - return nil, nil, 0 - } - if ch := t.children[cp]; ch != nil { - return ch.walkCharset(cs, remainder[width:], depth+1) - } - } - return t.weights, remainder, depth + 1 -} - -func (t *trie) insert(path []rune, weights []uint16) { - if len(path) == 0 { - if t.weights != nil { - panic("duplicate contraction") - } - t.weights = weights - return - } - - if t.children == nil { - t.children = make(map[rune]*trie) - } - ch := t.children[path[0]] - if ch == nil { - ch = &trie{} - t.children[path[0]] = ch - } - ch.insert(path[1:], weights) -} - -type trieContractor struct { - tr trie -} - -func (ctr *trieContractor) insert(c *Contraction) { - if len(c.Path) < 2 { - panic("contraction is too short") - } - if len(c.Weights)%3 != 0 { - panic(fmt.Sprintf("weights are not well-formed: %#v has len=%d", c.Weights, len(c.Weights))) - } - if c.Contextual && len(c.Path) != 2 { - panic("contextual contractions can only span 2 codepoints") - } - ctr.tr.insert(c.Path, c.Weights) -} - -func (ctr *trieContractor) Find(cs charset.Charset, cp rune, remainder []byte) ([]uint16, []byte, int) { - if tr := ctr.tr.children[cp]; tr != nil { - return tr.walkCharset(cs, remainder, 0) - } - return nil, nil, 0 -} - -func (ctr *trieContractor) FindContextual(cp, prev rune) []uint16 { - if tr := ctr.tr.children[cp]; tr != nil { - if trc := tr.children[prev]; trc != nil { - return trc.weights - } - } - return nil -} - -func NewTrieContractor(all []Contraction) Contractor { - if len(all) == 0 { - return nil - } - ctr := &trieContractor{} - for _, c := range all { - ctr.insert(&c) - } - return ctr -} - type Contraction struct { Path []rune Weights []uint16 diff --git a/go/mysql/collations/local.go b/go/mysql/collations/local.go index 3cf81b270c7..090420e07a7 100644 --- a/go/mysql/collations/local.go +++ b/go/mysql/collations/local.go @@ -19,37 +19,14 @@ limitations under the License. package collations import ( - "sync" - - "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/servenv" ) -var defaultEnv *Environment -var defaultEnvInit sync.Once - -// Local is the default collation Environment for Vitess. This depends -// on the value of the `mysql_server_version` flag passed to this Vitess process. -func Local() *Environment { - defaultEnvInit.Do(func() { - if !flag.Parsed() { - panic("collations.Local() called too early") - } - defaultEnv = NewEnvironment(servenv.MySQLServerVersion()) - }) - return defaultEnv -} - -// Default returns the default collation for this Vitess process. -// This is based on the local collation environment, which is based on the user's configured -// MySQL version for this Vitess deployment. -func Default() ID { - return ID(Local().DefaultConnectionCharset()) -} - -func DefaultCollationForType(t sqltypes.Type) ID { - return CollationForType(t, Default()) +// MySQL8 is the collation Environment for MySQL 8. This should +// only be used for testing where we know it's safe to use this +// version, and we don't need a specific other version. +func MySQL8() *Environment { + return fetchCacheEnvironment(collverMySQL8) } func CollationForType(t sqltypes.Type, fallback ID) ID { diff --git a/go/mysql/collations/tools/colldump/Dockerfile b/go/mysql/collations/tools/colldump/Dockerfile index 3e5acf4d9a6..f6834b438bc 100644 --- a/go/mysql/collations/tools/colldump/Dockerfile +++ b/go/mysql/collations/tools/colldump/Dockerfile @@ -8,7 +8,7 @@ RUN cd /tmp && \ curl -OL https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-${MYSQL_VERSION}.tar.gz && \ tar zxvf mysql-${MYSQL_VERSION}.tar.gz -ADD colldump.cc /tmp/mysql-${MYSQL_VERSION}/strings/colldump.cc +ADD https://gist.githubusercontent.com/vmg/11625faa79574a4d389fb3c04bdd0582/raw/b46389f1d431392cc64d920d4a30306970cff21f/colldump.cc /tmp/mysql-${MYSQL_VERSION}/strings/colldump.cc RUN echo "MYSQL_ADD_EXECUTABLE(colldump colldump.cc SKIP_INSTALL)\nTARGET_LINK_LIBRARIES(colldump strings)\n" >> /tmp/mysql-${MYSQL_VERSION}/strings/CMakeLists.txt RUN cd /tmp/mysql-${MYSQL_VERSION} && \ diff --git a/go/mysql/collations/tools/colldump/colldump.cc b/go/mysql/collations/tools/colldump/colldump.cc deleted file mode 100644 index 7668ae1dc70..00000000000 --- a/go/mysql/collations/tools/colldump/colldump.cc +++ /dev/null @@ -1,418 +0,0 @@ -/* Copyright (c) 2023, The Vitess Authors - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License, version 2.0, - as published by the Free Software Foundation. - - This program is also distributed with certain software (including - but not limited to OpenSSL) that is licensed under separate terms, - as designated in a particular file or component or in included license - documentation. The authors of MySQL hereby grant you an additional - permission to link the program and your derivative works with the - separately licensed software that they have included with MySQL. - - Without limiting anything contained in the foregoing, this file, - which is part of C Driver for MySQL (Connector/C), is also subject to the - Universal FOSS Exception, version 1.0, a copy of which can be found at - http://oss.oracle.com/licenses/universal-foss-exception. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License, version 2.0, for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ - -#include -#include -#include -#include -#include -#include - -#include "m_ctype.h" - -#ifdef HAVE_UNISTD_H -#include -#endif - -#include "my_sys.h" -#include "my_config.h" -#include "my_compiler.h" -#include "my_inttypes.h" -#include "my_io.h" -#include "my_loglevel.h" -#include "my_macros.h" -#include "str_uca_type.h" - -#include "rapidjson/rapidjson.h" -#include "rapidjson/filewritestream.h" -#include "rapidjson/writer.h" - -template -static void print_contractions_1(J &json, my_wc_t *path, size_t depth, bool contextual, const MY_CONTRACTION &contraction) -{ - path[depth] = contraction.ch; - - if (contraction.is_contraction_tail) - { - json.StartObject(); - - json.Key("Path"); - json.StartArray(); - for (size_t i = 0; i <= depth; i++) - { - json.Uint((unsigned int)path[i]); - } - json.EndArray(); - - json.Key("Weights"); - json.StartArray(); - for (size_t i = 0; i < MY_UCA_MAX_WEIGHT_SIZE; i++) - { - json.Uint(contraction.weight[i]); - } - json.EndArray(); - - if (contextual) - { - json.Key("Contextual"); - json.Bool(true); - } - - json.EndObject(); - } - - for (const MY_CONTRACTION &ctr : contraction.child_nodes) - { - print_contractions_1(json, path, depth + 1, false, ctr); - } - for (const MY_CONTRACTION &ctr : contraction.child_nodes_context) - { - print_contractions_1(json, path, depth + 1, true, ctr); - } -} - -template -static void print_contractions(J &json, std::vector *contractions) -{ - my_wc_t path[256]; - json.StartArray(); - for (const MY_CONTRACTION &ctr : *contractions) - { - print_contractions_1(json, path, 0, false, ctr); - } - json.EndArray(); -} - -template -static void print_reorder_params(J &json, struct Reorder_param *reorder) -{ - json.StartArray(); - for (int i = 0; i < reorder->wt_rec_num; i++) - { - struct Reorder_wt_rec &r = reorder->wt_rec[i]; - json.StartArray(); - json.Uint(r.old_wt_bdy.begin); - json.Uint(r.old_wt_bdy.end); - json.Uint(r.new_wt_bdy.begin); - json.Uint(r.new_wt_bdy.end); - json.EndArray(); - } - json.EndArray(); -} - -template -static void print_unipages(J &json, const MY_UNI_IDX *unicodeidx) -{ - json.StartArray(); - for (const MY_UNI_IDX *idx = unicodeidx; idx->tab != NULL; idx++) - { - json.StartObject(); - json.Key("From"); - json.Uint(idx->from); - json.Key("To"); - json.Uint(idx->to); - json.Key("Tab"); - json.StartArray(); - const size_t entries = idx->to - idx->from; - for (size_t i = 0; i <= entries; i++) - { - json.Uint(idx->tab[i]); - } - json.EndArray(); - json.EndObject(); - } - json.EndArray(); -} - -template -static void print_uca_weights_900(J &json, int codepoint, uint16 **weights) -{ - uint16 *page = weights[codepoint >> 8]; - if (page == NULL) - return; - - int offset = codepoint & 0xFF; - int cecount = page[offset]; - char key[32]; - snprintf(key, sizeof(key), "U+%04X", codepoint); - - json.Key(key); - json.StartArray(); - for (int ce = 0; ce < cecount; ce++) - { - json.Uint(page[256 + (ce * 3 + 0) * 256 + offset]); - json.Uint(page[256 + (ce * 3 + 1) * 256 + offset]); - json.Uint(page[256 + (ce * 3 + 2) * 256 + offset]); - } - json.EndArray(); -} - -template -static void print_uca_weights_legacy(J &json, int codepoint, uint16 **weights, uchar *lengths) -{ - uint16 *page = weights[codepoint >> 8]; - if (page == NULL) - return; - - int offset = codepoint & 0xFF; - uint16 *w = page + offset * lengths[codepoint >> 8]; - if (!w[0]) - return; - - char key[32]; - snprintf(key, sizeof(key), "U+%04X", codepoint); - - json.Key(key); - json.StartArray(); - for (; w[0]; w++) - { - json.Uint(w[0]); - } - json.EndArray(); -} - -template -static void print_array_uchar(J &json, const uchar *arr, size_t len) -{ - json.StartArray(); - for (size_t i = 0; i < len; ++i) - { - json.Uint(arr[i]); - } - json.EndArray(); -} - -template -static void print_array_uint16(J &json, const uint16 *arr, size_t len) -{ - json.StartArray(); - for (size_t i = 0; i < len; ++i) - { - json.Uint(arr[i]); - } - json.EndArray(); -} - -static CHARSET_INFO *init_collation(const char *name) -{ - MY_CHARSET_LOADER loader; - return my_collation_get_by_name(&loader, name, MYF(0)); -} - -#define MY_UCA_MAXCHAR (0x10FFFF + 1) -#define MY_UCA_CHARS_PER_PAGE 256 - -extern MY_COLLATION_HANDLER my_collation_uca_900_handler; -extern MY_COLLATION_HANDLER my_collation_any_uca_handler; -extern MY_COLLATION_HANDLER my_collation_utf16_uca_handler; -extern MY_COLLATION_HANDLER my_collation_utf32_uca_handler; -extern MY_COLLATION_HANDLER my_collation_ucs2_uca_handler; - -struct KNOWN_HANDLER -{ - const char *name; - const MY_COLLATION_HANDLER *h; -}; - -static KNOWN_HANDLER known_handlers[] = { - {"8bit_bin", &my_collation_8bit_bin_handler}, - {"8bit_simple_ci", &my_collation_8bit_simple_ci_handler}, - {"any_uca", &my_collation_any_uca_handler}, - {"uca_900", &my_collation_uca_900_handler}, - {"utf16_uca", &my_collation_utf16_uca_handler}, - {"utf32_uca", &my_collation_utf32_uca_handler}, - {"ucs2_uca", &my_collation_ucs2_uca_handler}, -}; - -static int dumpall(const char *dumppath) -{ - char pathbuf[4096]; - char jsonbuf[4096 * 4]; - - // bootstrap the `all_charsets` collation array - init_collation("utf8mb4_0900_ai_ci"); - - for (const CHARSET_INFO *charset : all_charsets) - { - if (!charset || (charset->state & MY_CS_AVAILABLE) == 0) - continue; - - charset = init_collation(charset->m_coll_name); - snprintf(pathbuf, sizeof(pathbuf), "%s/%s.json", dumppath, charset->m_coll_name); - - FILE *jsonfile = fopen(pathbuf, "w"); - if (jsonfile == NULL) - { - fprintf(stderr, "failed to create '%s'\n", pathbuf); - return 1; - } - - rapidjson::FileWriteStream os(jsonfile, jsonbuf, sizeof(jsonbuf)); - rapidjson::Writer, rapidjson::ASCII<>> json(os); - - json.StartObject(); - json.Key("Name"); - json.String(charset->m_coll_name); - json.Key("Charset"); - json.String(charset->csname); - json.Key("Number"); - json.Uint(charset->number); - - json.Key("Flags"); - json.StartObject(); - - json.Key("Binary"); - json.Bool((charset->state & MY_CS_BINSORT) != 0); - json.Key("ASCII"); - json.Bool((charset->state & MY_CS_PUREASCII) != 0); - json.Key("Default"); - json.Bool((charset->state & MY_CS_PRIMARY) != 0); - - json.EndObject(); - - for (const KNOWN_HANDLER &handler : known_handlers) - { - if (charset->coll == handler.h) - { - json.Key("CollationImpl"); - json.String(handler.name); - break; - } - } - - if (charset->ctype != NULL) - { - json.Key("CType"); - print_array_uchar(json, charset->ctype, 256); - } - - if (charset->to_lower != NULL) - { - json.Key("ToLower"); - print_array_uchar(json, charset->to_lower, 256); - } - - if (charset->to_upper != NULL) - { - json.Key("ToUpper"); - print_array_uchar(json, charset->to_upper, 256); - } - - if (charset->tab_to_uni != NULL) - { - json.Key("TabToUni"); - print_array_uint16(json, charset->tab_to_uni, 256); - } - - if (charset->tab_from_uni != NULL) - { - json.Key("TabFromUni"); - print_unipages(json, charset->tab_from_uni); - } - - if (charset->sort_order != NULL) - { - json.Key("SortOrder"); - print_array_uchar(json, charset->sort_order, 256); - } - - if (charset->uca != NULL) - { - MY_UCA_INFO *uca = charset->uca; - - json.Key("UCAVersion"); - - switch (uca->version) - { - case UCA_V400: - json.Uint(400); - break; - case UCA_V520: - json.Uint(520); - break; - case UCA_V900: - json.Uint(900); - break; - default: - json.Uint(0); - break; - } - - json.Key("Weights"); - json.StartObject(); - if (uca->version == UCA_V900) - { - for (my_wc_t cp = 0; cp < MY_UCA_MAXCHAR; cp++) - { - print_uca_weights_900(json, cp, uca->weights); - } - } - else - { - for (my_wc_t cp = 0; cp < uca->maxchar; cp++) - { - print_uca_weights_legacy(json, cp, uca->weights, uca->lengths); - } - } - json.EndObject(); - - if (uca->have_contractions) - { - json.Key("Contractions"); - print_contractions(json, uca->contraction_nodes); - } - } - - if (charset->coll_param != NULL) - { - json.Key("UppercaseFirst"); - json.Bool(charset->coll_param->case_first == CASE_FIRST_UPPER); - - if (charset->coll_param->reorder_param != NULL) - { - json.Key("Reorder"); - print_reorder_params(json, charset->coll_param->reorder_param); - } - } - - json.EndObject(); - os.Flush(); - fclose(jsonfile); - } - return 0; -} - -int main(int argc, char **argv) -{ - if (argc < 2) - { - fprintf(stderr, "usage: %s \n", argv[0]); - return 1; - } - - return dumpall(argv[1]); -} \ No newline at end of file diff --git a/go/mysql/collations/tools/makecolldata/codegen/tablegen.go b/go/mysql/collations/tools/makecolldata/codegen/tablegen.go index b12d32f59d7..e1549c23bff 100644 --- a/go/mysql/collations/tools/makecolldata/codegen/tablegen.go +++ b/go/mysql/collations/tools/makecolldata/codegen/tablegen.go @@ -224,20 +224,6 @@ func (tg *TableGenerator) entryForCodepoint(codepoint rune) (*page, *entry) { return page, entry } -func (tg *TableGenerator) Add900(codepoint rune, rhs [][3]uint16) { - page, entry := tg.entryForCodepoint(codepoint) - page.entryCount++ - - for i, weights := range rhs { - if i >= uca.MaxCollationElementsPerCodepoint { - break - } - for _, we := range weights { - entry.weights = append(entry.weights, we) - } - } -} - func (tg *TableGenerator) Add(codepoint rune, weights []uint16) { page, entry := tg.entryForCodepoint(codepoint) page.entryCount++ @@ -248,22 +234,6 @@ func (tg *TableGenerator) Add(codepoint rune, weights []uint16) { entry.weights = append(entry.weights, weights...) } -func (tg *TableGenerator) AddFromAllkeys(lhs []rune, rhs [][]int, vars []int) { - if len(lhs) > 1 || lhs[0] > tg.maxChar { - // TODO: support contractions - return - } - - var weights [][3]uint16 - for _, we := range rhs { - if len(we) != 3 { - panic("non-triplet weight in allkeys.txt") - } - weights = append(weights, [3]uint16{uint16(we[0]), uint16(we[1]), uint16(we[2])}) - } - tg.Add900(lhs[0], weights) -} - func (tg *TableGenerator) writePage(g *Generator, p *page, layout uca.Layout) string { var weights []uint16 diff --git a/go/mysql/collations/tools/maketestdata/maketestdata.go b/go/mysql/collations/tools/maketestdata/maketestdata.go index edad1c840a3..7adee5d5dfd 100644 --- a/go/mysql/collations/tools/maketestdata/maketestdata.go +++ b/go/mysql/collations/tools/maketestdata/maketestdata.go @@ -167,7 +167,7 @@ func main() { fs := pflag.NewFlagSet("maketestdata", pflag.ExitOnError) flag.Parse(fs) - var defaults = collations.Local() + var defaults = collations.MySQL8() var collationsForLanguage = make(map[testutil.Lang][]collations.ID) var allcollations = colldata.All(defaults) for lang := range testutil.KnownLanguages { diff --git a/go/mysql/config/config.go b/go/mysql/config/config.go new file mode 100644 index 00000000000..cc08107f0a3 --- /dev/null +++ b/go/mysql/config/config.go @@ -0,0 +1,4 @@ +package config + +const DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" +const DefaultMySQLVersion = "8.0.30" diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 9cac35ea01f..925dfbfa8e4 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -28,7 +28,6 @@ import ( "strings" "sync" "sync/atomic" - "syscall" "time" "vitess.io/vitess/go/bucketpool" @@ -44,6 +43,8 @@ import ( ) const ( + DefaultFlushDelay = 100 * time.Millisecond + // connBufferSize is how much we buffer for reading and // writing. It is also how much we allocate for ephemeral buffers. connBufferSize = 16 * 1024 @@ -129,6 +130,7 @@ type Conn struct { bufferedReader *bufio.Reader flushTimer *time.Timer + flushDelay time.Duration header [packetHeaderSize]byte // Keep track of how and of the buffer we allocated for an @@ -213,10 +215,9 @@ type Conn struct { // this is used to mark the connection to be closed so that the command phase for the connection can be stopped and // the connection gets closed. closing bool -} -// splitStatementFunciton is the function that is used to split the statement in case of a multi-statement query. -var splitStatementFunction = sqlparser.SplitStatementToPieces + truncateErrLen int +} // PrepareData is a buffer used for store prepare statement meta data type PrepareData struct { @@ -247,10 +248,15 @@ var readersPool = sync.Pool{New: func() any { return bufio.NewReaderSize(nil, co // newConn is an internal method to create a Conn. Used by client and server // side for common creation code. -func newConn(conn net.Conn) *Conn { +func newConn(conn net.Conn, flushDelay time.Duration, truncateErrLen int) *Conn { + if flushDelay == 0 { + flushDelay = DefaultFlushDelay + } return &Conn{ conn: conn, bufferedReader: bufio.NewReaderSize(conn, connBufferSize), + flushDelay: flushDelay, + truncateErrLen: truncateErrLen, } } @@ -271,10 +277,12 @@ func newServerConn(conn net.Conn, listener *Listener) *Conn { } c := &Conn{ - conn: conn, - listener: listener, - PrepareData: make(map[uint32]*PrepareData), - keepAliveOn: enabledKeepAlive, + conn: conn, + listener: listener, + PrepareData: make(map[uint32]*PrepareData), + keepAliveOn: enabledKeepAlive, + flushDelay: listener.flushDelay, + truncateErrLen: listener.truncateErrLen, } if listener.connReadBufferSize > 0 { @@ -348,7 +356,7 @@ func (c *Conn) returnReader() { // startFlushTimer must be called while holding lock on bufMu. func (c *Conn) startFlushTimer() { if c.flushTimer == nil { - c.flushTimer = time.AfterFunc(mysqlServerFlushDelay, func() { + c.flushTimer = time.AfterFunc(c.flushDelay, func() { c.bufMu.Lock() defer c.bufMu.Unlock() @@ -358,7 +366,7 @@ func (c *Conn) startFlushTimer() { c.bufferedWriter.Flush() }) } else { - c.flushTimer.Reset(mysqlServerFlushDelay) + c.flushTimer.Reset(c.flushDelay) } } @@ -1228,7 +1236,7 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { var queries []string if c.Capabilities&CapabilityClientMultiStatements != 0 { var err error - queries, err = splitStatementFunction(query) + queries, err = handler.Env().Parser().SplitStatementToPieces(query) if err != nil { log.Errorf("Conn %v: Error splitting query: %v", c, err) return c.writeErrorPacketFromErrorAndLog(err) @@ -1241,14 +1249,14 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { queries = []string{query} } - // Popoulate PrepareData + // Populate PrepareData c.StatementID++ prepare := &PrepareData{ StatementID: c.StatementID, PrepareStmt: queries[0], } - statement, err := sqlparser.ParseStrictDDL(query) + statement, err := handler.Env().Parser().ParseStrictDDL(query) if err != nil { log.Errorf("Conn %v: Error parsing prepared statement: %v", c, err) if !c.writeErrorPacketFromErrorAndLog(err) { @@ -1356,7 +1364,7 @@ func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { var queries []string var err error if c.Capabilities&CapabilityClientMultiStatements != 0 { - queries, err = splitStatementFunction(query) + queries, err = handler.Env().Parser().SplitStatementToPieces(query) if err != nil { log.Errorf("Conn %v: Error splitting query: %v", c, err) return c.writeErrorPacketFromErrorAndLog(err) @@ -1517,35 +1525,30 @@ type PacketOK struct { sessionStateData string } -func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) { +func (c *Conn) parseOKPacket(packetOK *PacketOK, in []byte) error { data := &coder{ data: in, pos: 1, // We already read the type. } - packetOK := &PacketOK{} - - fail := func(format string, args ...any) (*PacketOK, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, format, args...) - } // Affected rows. affectedRows, ok := data.readLenEncInt() if !ok { - return fail("invalid OK packet affectedRows: %v", data) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid OK packet affectedRows: %v", data.data) } packetOK.affectedRows = affectedRows // Last Insert ID. lastInsertID, ok := data.readLenEncInt() if !ok { - return fail("invalid OK packet lastInsertID: %v", data) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid OK packet lastInsertID: %v", data.data) } packetOK.lastInsertID = lastInsertID // Status flags. statusFlags, ok := data.readUint16() if !ok { - return fail("invalid OK packet statusFlags: %v", data) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid OK packet statusFlags: %v", data.data) } packetOK.statusFlags = statusFlags @@ -1553,7 +1556,7 @@ func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) { // Warnings. warnings, ok := data.readUint16() if !ok { - return fail("invalid OK packet warnings: %v", data) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid OK packet warnings: %v", data.data) } packetOK.warnings = warnings @@ -1570,7 +1573,7 @@ func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) { if !ok || length == 0 { // In case we have no more data or a zero length string, there's no additional information so // we can return the packet. - return packetOK, nil + return nil } // Alright, now we need to read each sub packet from the session state change. @@ -1582,7 +1585,7 @@ func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) { } sessionLen, ok := data.readLenEncInt() if !ok { - return fail("invalid OK packet session state change length for type %v", sscType) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid OK packet session state change length for type %v", sscType) } if sscType != SessionTrackGtids { @@ -1595,19 +1598,19 @@ func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) { // read (and ignore for now) the GTIDS encoding specification code: 1 byte _, ok = data.readByte() if !ok { - return fail("invalid OK packet gtids type: %v", data) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid OK packet gtids type: %v", data.data) } gtids, ok := data.readLenEncString() if !ok { - return fail("invalid OK packet gtids: %v", data) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid OK packet gtids: %v", data.data) } packetOK.sessionStateData = gtids } } } - return packetOK, nil + return nil } // isErrorPacket determines whether or not the packet is an error packet. Mostly here for @@ -1699,41 +1702,3 @@ func (c *Conn) IsMarkedForClose() bool { func (c *Conn) IsShuttingDown() bool { return c.listener.shutdown.Load() } - -// ConnCheck ensures that this connection to the MySQL server hasn't been broken. -// This is a fast, non-blocking check. For details on its implementation, please read -// "Three Bugs in the Go MySQL Driver" (Vicent Marti, GitHub, 2020) -// https://github.blog/2020-05-20-three-bugs-in-the-go-mysql-driver/ -func (c *Conn) ConnCheck() error { - conn := c.conn - if tlsconn, ok := conn.(*tls.Conn); ok { - conn = tlsconn.NetConn() - } - if conn, ok := conn.(syscall.Conn); ok { - rc, err := conn.SyscallConn() - if err != nil { - return err - } - - var n int - var buff [1]byte - rerr := rc.Read(func(fd uintptr) bool { - n, err = syscall.Read(int(fd), buff[:]) - return true - }) - - switch { - case rerr != nil: - return rerr - case n == 0 && err == nil: - return io.EOF - case n > 0: - return sqlerror.NewSQLError(sqlerror.CRUnknownError, sqlerror.SSUnknownSQLState, "unexpected read from conn") - case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK: - return nil - default: - return err - } - } - return nil -} diff --git a/go/mysql/conn_fake.go b/go/mysql/conn_fake.go index e61f90d33f1..7bc4fd5ff61 100644 --- a/go/mysql/conn_fake.go +++ b/go/mysql/conn_fake.go @@ -84,7 +84,7 @@ var _ net.Addr = (*mockAddress)(nil) // GetTestConn returns a conn for testing purpose only. func GetTestConn() *Conn { - return newConn(testConn{}) + return newConn(testConn{}, DefaultFlushDelay, 0) } // GetTestServerConn is only meant to be used for testing. diff --git a/go/mysql/conn_params.go b/go/mysql/conn_params.go index 061aa23f220..46e733f6021 100644 --- a/go/mysql/conn_params.go +++ b/go/mysql/conn_params.go @@ -17,35 +17,38 @@ limitations under the License. package mysql import ( + "time" + + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vttls" ) // ConnParams contains all the parameters to use to connect to mysql. type ConnParams struct { - Host string `json:"host"` - Port int `json:"port"` - Uname string `json:"uname"` - Pass string `json:"pass"` - DbName string `json:"dbname"` - UnixSocket string `json:"unix_socket"` - Charset string `json:"charset"` - Flags uint64 `json:"flags"` - Flavor string `json:"flavor,omitempty"` + Host string + Port int + Uname string + Pass string + DbName string + UnixSocket string + Charset collations.ID + Flags uint64 + Flavor string // The following SSL flags control the SSL behavior. // // Not setting this value implies preferred mode unless // the CapabilityClientSSL bit is set in db_flags. In the // flag is set, it ends up equivalent to verify_identity mode. - SslMode vttls.SslMode `json:"ssl_mode"` - SslCa string `json:"ssl_ca"` - SslCaPath string `json:"ssl_ca_path"` - SslCert string `json:"ssl_cert"` - SslCrl string `json:"ssl_crl"` - SslKey string `json:"ssl_key"` - TLSMinVersion string `json:"tls_min_version"` - ServerName string `json:"server_name"` - ConnectTimeoutMs uint64 `json:"connect_timeout_ms"` + SslMode vttls.SslMode + SslCa string + SslCaPath string + SslCert string + SslCrl string + SslKey string + TLSMinVersion string + ServerName string + ConnectTimeoutMs uint64 // The following is only set to force the client to connect without // using CapabilityClientDeprecateEOF @@ -57,6 +60,11 @@ type ConnParams struct { // for informative purposes. It has no programmatic value. Returning this field is // disabled by default. EnableQueryInfo bool + + // FlushDelay is the delay after which buffered response will be flushed to the client. + FlushDelay time.Duration + + TruncateErrLen int } // EnableSSL will set the right flag on the parameters. diff --git a/go/mysql/conn_flaky_test.go b/go/mysql/conn_test.go similarity index 98% rename from go/mysql/conn_flaky_test.go rename to go/mysql/conn_test.go index 9df52a47589..64b97052ead 100644 --- a/go/mysql/conn_flaky_test.go +++ b/go/mysql/conn_test.go @@ -23,7 +23,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "math/rand" + "math/rand/v2" "net" "strconv" "strings" @@ -31,18 +31,15 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/sqlparser" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtenv" ) func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) { @@ -77,8 +74,8 @@ func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) { require.Nil(t, serverErr, "Accept failed: %v", serverErr) // Create a Conn on both sides. - cConn := newConn(clientConn) - sConn := newConn(serverConn) + cConn := newConn(clientConn, DefaultFlushDelay, 0) + sConn := newConn(serverConn, DefaultFlushDelay, 0) sConn.PrepareData = map[uint32]*PrepareData{} return listener, sConn, cConn @@ -249,7 +246,8 @@ func TestBasicPackets(t *testing.T) { require.NotEmpty(data) assert.EqualValues(data[0], OKPacket, "OKPacket") - packetOk, err := cConn.parseOKPacket(data) + var packetOk PacketOK + err = cConn.parseOKPacket(&packetOk, data) require.NoError(err) assert.EqualValues(12, packetOk.affectedRows) assert.EqualValues(34, packetOk.lastInsertID) @@ -275,7 +273,7 @@ func TestBasicPackets(t *testing.T) { require.NotEmpty(data) assert.EqualValues(data[0], OKPacket, "OKPacket") - packetOk, err = cConn.parseOKPacket(data) + err = cConn.parseOKPacket(&packetOk, data) require.NoError(err) assert.EqualValues(23, packetOk.affectedRows) assert.EqualValues(45, packetOk.lastInsertID) @@ -298,7 +296,7 @@ func TestBasicPackets(t *testing.T) { require.NotEmpty(data) assert.True(cConn.isEOFPacket(data), "expected EOF") - packetOk, err = cConn.parseOKPacket(data) + err = cConn.parseOKPacket(&packetOk, data) require.NoError(err) assert.EqualValues(12, packetOk.affectedRows) assert.EqualValues(34, packetOk.lastInsertID) @@ -359,7 +357,7 @@ func TestOkPackets(t *testing.T) { dataIn: ` 00000000 00 00 00 02 00 |.....|`, cc: CapabilityClientTransactions, - expectedErr: "invalid OK packet warnings: &{[0 0 0 2 0] 0}", + expectedErr: "invalid OK packet warnings: [0 0 0 2 0]", }, { dataIn: ` 00000000 FE 00 00 22 40 00 00 |.....|`, @@ -693,7 +691,8 @@ func TestOkPackets(t *testing.T) { cConn.Capabilities = testCase.cc sConn.Capabilities = testCase.cc // parse the packet - packetOk, err := cConn.parseOKPacket(data) + var packetOk PacketOK + err := cConn.parseOKPacket(&packetOk, data) if testCase.expectedErr != "" { require.Error(t, err) require.Equal(t, testCase.expectedErr, err.Error()) @@ -702,7 +701,7 @@ func TestOkPackets(t *testing.T) { require.NoError(t, err, "failed to parse OK packet") // write the ok packet from server - err = sConn.writeOKPacket(packetOk) + err = sConn.writeOKPacket(&packetOk) require.NoError(t, err, "failed to write OK packet") // receive the ok packet on client @@ -745,7 +744,7 @@ func TestEOFOrLengthEncodedIntFuzz(t *testing.T) { }() for i := 0; i < 100; i++ { - bytes := make([]byte, rand.Intn(16)+1) + bytes := make([]byte, rand.IntN(16)+1) _, err := crypto_rand.Read(bytes) require.NoError(t, err, "error doing rand.Read") @@ -878,14 +877,6 @@ func TestMultiStatement(t *testing.T) { func TestMultiStatementOnSplitError(t *testing.T) { listener, sConn, cConn := createSocketPair(t) - // Set the splitStatementFunction to return an error. - splitStatementFunction = func(blob string) (pieces []string, err error) { - return nil, fmt.Errorf("Error in split statements") - } - defer func() { - // Set the splitStatementFunction to the correct function back - splitStatementFunction = sqlparser.SplitStatementToPieces - }() sConn.Capabilities |= CapabilityClientMultiStatements defer func() { listener.Close() @@ -893,7 +884,7 @@ func TestMultiStatementOnSplitError(t *testing.T) { cConn.Close() }() - err := cConn.WriteComQuery("select 1;select 2") + err := cConn.WriteComQuery("broken>'query 1;parse 0: + return sqlerror.NewSQLError(sqlerror.CRUnknownError, sqlerror.SSUnknownSQLState, "unexpected read from conn") + case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK: + return nil + default: + return err + } + } + return nil +} diff --git a/go/mysql/conn_windows.go b/go/mysql/conn_windows.go new file mode 100644 index 00000000000..695c5703cdb --- /dev/null +++ b/go/mysql/conn_windows.go @@ -0,0 +1,24 @@ +//go:build windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +// ConnCheck is not implemented for Windows. +func (c *Conn) ConnCheck() error { + return nil +} diff --git a/go/mysql/constants.go b/go/mysql/constants.go index 194ed568b39..defcf37b871 100644 --- a/go/mysql/constants.go +++ b/go/mysql/constants.go @@ -17,7 +17,7 @@ limitations under the License. package mysql import ( - "vitess.io/vitess/go/mysql/binlog" + "vitess.io/vitess/go/sqltypes" ) const ( @@ -274,10 +274,19 @@ const ( AuthSwitchRequestPacket = 0xfe ) +var typeInt24, _ = sqltypes.TypeToMySQL(sqltypes.Int24) +var typeTimestamp, _ = sqltypes.TypeToMySQL(sqltypes.Timestamp) +var typeYear, _ = sqltypes.TypeToMySQL(sqltypes.Year) +var typeNewDecimal, _ = sqltypes.TypeToMySQL(sqltypes.Decimal) + // IsNum returns true if a MySQL type is a numeric value. // It is the same as IS_NUM defined in mysql.h. func IsNum(typ uint8) bool { - return (typ <= binlog.TypeInt24 && typ != binlog.TypeTimestamp) || - typ == binlog.TypeYear || - typ == binlog.TypeNewDecimal + return (typ <= typeInt24 && typ != typeTimestamp) || + typ == typeYear || + typ == typeNewDecimal } + +const ( + readReplicationConnectionConfiguration = "SELECT * FROM performance_schema.replication_connection_configuration" +) diff --git a/go/mysql/datetime/datetime.go b/go/mysql/datetime/datetime.go index bf73ac85c27..973c79b44c3 100644 --- a/go/mysql/datetime/datetime.go +++ b/go/mysql/datetime/datetime.go @@ -44,7 +44,10 @@ type DateTime struct { Time Time } -const DefaultPrecision = 6 +const ( + DefaultPrecision = 6 + MaxHours = 838 +) func (t Time) AppendFormat(b []byte, prec uint8) []byte { if t.Neg() { @@ -245,7 +248,14 @@ func (d Date) Hash(h *vthash.Hasher) { } func (d Date) Weekday() time.Weekday { - return d.ToStdTime(time.Local).Weekday() + // Go considers 0000-01-01 day as Saturday, while + // MySQL considers it to be Sunday, now 0000-02-29 exists in + // Go but not in MySQL so it balances out after that + wd := d.ToStdTime(time.Local).Weekday() + if d.Year() == 0 && d.Month() <= 2 { + wd = (wd + 1) % 7 + } + return wd } func (d Date) Yearday() int { @@ -315,12 +325,16 @@ func (d Date) Week(mode int) int { year, week := d.SundayWeek() if year < d.Year() { return 0 + } else if year > d.Year() { + return 53 } return week case 1: year, week := d.ISOWeek() if year < d.Year() { return 0 + } else if year > d.Year() { + return 53 } return week case 2: @@ -333,12 +347,16 @@ func (d Date) Week(mode int) int { year, week := d.Sunday4DayWeek() if year < d.Year() { return 0 + } else if year > d.Year() { + return 53 } return week case 5: year, week := d.MondayWeek() if year < d.Year() { return 0 + } else if year > d.Year() { + return 53 } return week case 6: @@ -360,9 +378,12 @@ func (d Date) YearWeek(mode int) int { case 1, 3: year, week := d.ISOWeek() return year*100 + week - case 4, 5, 6, 7: - // TODO - return 0 + case 4, 6: + year, week := d.Sunday4DayWeek() + return year*100 + week + case 5, 7: + year, week := d.MondayWeek() + return year*100 + week default: return d.YearWeek(DefaultWeekMode) } @@ -432,12 +453,16 @@ func (t Time) AddInterval(itv *Interval, stradd bool) (Time, uint8, bool) { return dt.Time, itv.precision(stradd), ok } -func (t Time) toSeconds() int { - tsecs := t.Hour()*secondsPerHour + t.Minute()*secondsPerMinute + t.Second() +func (t Time) toDuration() time.Duration { + dur := time.Duration(t.hour)*time.Hour + time.Duration(t.minute)*time.Minute + time.Duration(t.second)*time.Second + time.Duration(t.nanosecond)*time.Nanosecond if t.Neg() { - return -tsecs + return -dur } - return tsecs + return dur +} + +func (t Time) ToSeconds() int64 { + return int64(t.ToDuration().Seconds()) } func (d Date) ToStdTime(loc *time.Location) (out time.Time) { @@ -538,9 +563,9 @@ func (dt DateTime) Compare(dt2 DateTime) int { return dt.Time.Compare(dt2.Time) } -func (dt DateTime) AddInterval(itv *Interval, stradd bool) (DateTime, uint8, bool) { +func (dt DateTime) AddInterval(itv *Interval, prec uint8, stradd bool) (DateTime, uint8, bool) { ok := dt.addInterval(itv) - return dt, itv.precision(stradd), ok + return dt, max(prec, itv.precision(stradd)), ok } func (dt DateTime) Round(p int) (r DateTime) { @@ -569,8 +594,17 @@ func (dt DateTime) Round(p int) (r DateTime) { return r } -func (dt DateTime) toSeconds() int { - return (dt.Date.Day()-1)*secondsPerDay + dt.Time.toSeconds() +func (dt DateTime) toDuration() time.Duration { + dur := dt.Time.toDuration() + if !dt.Date.IsZero() { + dur += time.Duration(dt.Date.Day()-1) * durationPerDay + } + return dur +} + +func (dt DateTime) ToSeconds() int64 { + numDays := MysqlDayNumber(dt.Date.Year(), dt.Date.Month(), dt.Date.Day()) + return int64(numDays*24*3600) + dt.Time.ToSeconds() } func (dt *DateTime) addInterval(itv *Interval) bool { @@ -580,29 +614,25 @@ func (dt *DateTime) addInterval(itv *Interval) bool { return false } - nsec := dt.Time.Nanosecond() + itv.nsec - sec := dt.toSeconds() + itv.toSeconds() + (nsec / int(time.Second)) - nsec = nsec % int(time.Second) - - if nsec < 0 { - nsec += int(time.Second) - sec-- - } + dur := dt.toDuration() + dur += itv.toDuration() + days := time.Duration(0) + if !dt.Date.IsZero() { + days = dur / durationPerDay + dur -= days * durationPerDay - days := sec / secondsPerDay - sec -= days * secondsPerDay - - if sec < 0 { - sec += secondsPerDay - days-- + if dur < 0 { + dur += durationPerDay + days-- + } } - dt.Time.nanosecond = uint32(nsec) - dt.Time.second = uint8(sec % secondsPerMinute) - dt.Time.minute = uint8((sec / secondsPerMinute) % secondsPerMinute) - dt.Time.hour = uint16(sec / secondsPerHour) + dt.Time.nanosecond = uint32((dur % time.Second) / time.Nanosecond) + dt.Time.second = uint8((dur % time.Minute) / time.Second) + dt.Time.minute = uint8((dur % time.Hour) / time.Minute) + dt.Time.hour = uint16(dur / time.Hour) - daynum := mysqlDayNumber(dt.Date.Year(), dt.Date.Month(), 1) + days + daynum := MysqlDayNumber(dt.Date.Year(), dt.Date.Month(), 1) + int(days) if daynum < 0 || daynum > maxDay { return false } @@ -611,7 +641,7 @@ func (dt *DateTime) addInterval(itv *Interval) bool { return true case itv.unit.HasDayParts(): - daynum := mysqlDayNumber(dt.Date.Year(), dt.Date.Month(), dt.Date.Day()) + daynum := MysqlDayNumber(dt.Date.Year(), dt.Date.Month(), dt.Date.Day()) daynum += itv.day dt.Date.year, dt.Date.month, dt.Date.day = mysqlDateFromDayNumber(daynum) return true @@ -692,6 +722,56 @@ func NewTimeFromStd(t time.Time) Time { } } +var ( + decSecondsInHour = decimal.NewFromInt(3600) + decMinutesInHour = decimal.NewFromInt(60) + decMaxHours = decimal.NewFromInt(MaxHours) +) + +func NewTimeFromSeconds(seconds decimal.Decimal) Time { + var neg bool + if seconds.Sign() < 0 { + neg = true + seconds = seconds.Abs() + } + + sec, frac := seconds.QuoRem(decimal.New(1, 0), 0) + ns := frac.Mul(decimal.New(1, 9)) + + h, sec := sec.QuoRem(decSecondsInHour, 0) + min, sec := sec.QuoRem(decMinutesInHour, 0) + + if h.Cmp(decMaxHours) > 0 { + h := uint16(MaxHours) + if neg { + h |= negMask + } + + return Time{ + hour: h, + minute: 59, + second: 59, + nanosecond: 0, + } + } + + hour, _ := h.Int64() + if neg { + hour |= int64(negMask) + } + + m, _ := min.Int64() + s, _ := sec.Int64() + nsec, _ := ns.Int64() + + return Time{ + hour: uint16(hour), + minute: uint8(m), + second: uint8(s), + nanosecond: uint32(nsec), + } +} + func NewDateTimeFromStd(t time.Time) DateTime { return DateTime{ Date: NewDateFromStd(t), diff --git a/go/mysql/datetime/datetime_test.go b/go/mysql/datetime/datetime_test.go new file mode 100644 index 00000000000..697f967cc3c --- /dev/null +++ b/go/mysql/datetime/datetime_test.go @@ -0,0 +1,943 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/decimal" + "vitess.io/vitess/go/vt/vthash" +) + +var testGoTime = time.Date(2024, 03, 12, 12, 30, 20, 987654321, time.UTC) + +func TestNewTimeFromStd(t *testing.T) { + time := NewTimeFromStd(testGoTime) + + assert.Equal(t, uint16(12), time.hour) + assert.Equal(t, uint8(30), time.minute) + assert.Equal(t, uint8(20), time.second) + assert.Equal(t, uint32(987654321), time.nanosecond) +} + +func TestNewDateFromStd(t *testing.T) { + date := NewDateFromStd(testGoTime) + + assert.Equal(t, uint16(2024), date.year) + assert.Equal(t, uint8(03), date.month) + assert.Equal(t, uint8(12), date.day) +} + +func TestNewDateTimeFromStd(t *testing.T) { + dt := NewDateTimeFromStd(testGoTime) + + assert.Equal(t, uint16(2024), dt.Date.year) + assert.Equal(t, uint8(03), dt.Date.month) + assert.Equal(t, uint8(12), dt.Date.day) + + assert.Equal(t, uint16(12), dt.Time.hour) + assert.Equal(t, uint8(30), dt.Time.minute) + assert.Equal(t, uint8(20), dt.Time.second) + assert.Equal(t, uint32(987654321), dt.Time.nanosecond) +} + +func TestAppendFormat(t *testing.T) { + time := NewTimeFromStd(testGoTime) + b := []byte("test AppendFormat: ") + + testCases := []struct { + prec uint8 + want []byte + }{ + {0, []byte("test AppendFormat: 12:30:20")}, + {1, []byte("test AppendFormat: 12:30:20.9")}, + {3, []byte("test AppendFormat: 12:30:20.987")}, + } + + for _, tc := range testCases { + nb := time.AppendFormat(b, tc.prec) + assert.Equal(t, tc.want, nb) + } + + // Neg-time case + time = Time{ + hour: 1<<15 + 12, + minute: 30, + second: 20, + } + nb := time.AppendFormat(b, 0) + assert.Equal(t, []byte("test AppendFormat: -12:30:20"), nb) +} + +func TestFormat(t *testing.T) { + time := NewTimeFromStd(testGoTime) + + testCases := []struct { + prec uint8 + want []byte + }{ + {0, []byte("12:30:20")}, + {1, []byte("12:30:20.9")}, + {3, []byte("12:30:20.987")}, + } + + for _, tc := range testCases { + nb := time.Format(tc.prec) + assert.Equal(t, tc.want, nb) + } + + // Neg-time case + time = Time{ + hour: 1<<15 + 12, + minute: 30, + second: 20, + } + nb := time.Format(0) + assert.Equal(t, []byte("-12:30:20"), nb) +} + +func TestFormats(t *testing.T) { + testCases := []struct { + hour uint16 + minute uint8 + second uint8 + nanosecond uint32 + wantInt64 int64 + wantFloat64 float64 + wantDecimal decimal.Decimal + }{ + { + hour: 12, + minute: 30, + second: 20, + nanosecond: 987654321, + wantInt64: 123021, + wantFloat64: 123020.987654321, + wantDecimal: decimal.NewFromFloat(123020.987654321), + }, + { + hour: 1<<15 + 12, + minute: 30, + second: 20, + nanosecond: 987654321, + wantInt64: -123021, + wantFloat64: -123020.987654321, + wantDecimal: decimal.NewFromFloat(-123020.987654321), + }, + { + hour: 1<<15 + 123, + minute: 9, + second: 9, + nanosecond: 123456789, + wantInt64: -1230909, + wantFloat64: -1230909.123456789, + wantDecimal: decimal.NewFromFloat(-1230909.123456789), + }, + } + + for _, tc := range testCases { + time := Time{ + hour: tc.hour, + minute: tc.minute, + second: tc.second, + nanosecond: tc.nanosecond, + } + + n := time.FormatInt64() + assert.Equal(t, tc.wantInt64, n) + + f := time.FormatFloat64() + assert.Equal(t, tc.wantFloat64, f) + + d := time.FormatDecimal() + assert.Equal(t, tc.wantDecimal, d) + } +} + +func TestToDateTime(t *testing.T) { + time := NewTimeFromStd(testGoTime) + + want := DateTime{ + Date: Date{ + year: 2024, + month: 3, + day: 12, + }, + Time: Time{ + hour: 12, + minute: 30, + second: 20, + nanosecond: 987654321, + }, + } + + got := time.ToDateTime(testGoTime) + + assert.Equal(t, want, got) +} + +func TestTimeIsZero(t *testing.T) { + testCases := []struct { + hour uint16 + minute uint8 + second uint8 + nanosecond uint32 + wantZero bool + }{ + {0, 0, 0, 0, true}, + {0, 0, 0, 123, false}, + {12, 12, 23, 0, false}, + } + + for _, tc := range testCases { + time := Time{ + hour: tc.hour, + minute: tc.minute, + second: tc.second, + nanosecond: tc.nanosecond, + } + + z := time.IsZero() + if tc.wantZero { + assert.True(t, z, "Time %v should be considered as zero time", time) + } else { + assert.False(t, z, "Time %v should not be considered as zero time", time) + } + } +} + +func TestRoundForJSON(t *testing.T) { + testCases := []struct { + hour uint16 + minute uint8 + second uint8 + nanosecond uint32 + want Time + }{ + { + hour: 12, + minute: 30, + second: 20, + nanosecond: 987654321, + want: Time{12, 30, 20, 987654321}, + }, + { + hour: 1<<15 + 123, + minute: 9, + second: 9, + nanosecond: 123456789, + want: Time{1<<15 + 27, 9, 9, 123456789}, + }, + } + + for _, tc := range testCases { + time := Time{ + hour: tc.hour, + minute: tc.minute, + second: tc.second, + nanosecond: tc.nanosecond, + } + + res := time.RoundForJSON() + assert.Equal(t, tc.want, res) + } +} + +func TestCompare(t *testing.T) { + time := NewTimeFromStd(testGoTime) + + testCases := []struct { + hour uint16 + minute uint8 + second uint8 + nanosecond uint32 + want int + }{ + {12, 30, 20, 987654321, 0}, + {1<<15 + 12, 30, 20, 987654321, 1}, + {12, 29, 20, 987654321, 1}, + {12, 31, 20, 987654321, -1}, + {13, 30, 20, 987654321, -1}, + {11, 30, 20, 987654321, 1}, + {12, 30, 19, 98765, 1}, + {12, 30, 21, 98765432, -1}, + {12, 30, 20, 123123231, 1}, + {12, 30, 20, 987654322, -1}, + {12, 30, 20, 987654322, -1}, + } + + for _, tc := range testCases { + t2 := Time{ + hour: tc.hour, + minute: tc.minute, + second: tc.second, + nanosecond: tc.nanosecond, + } + + res := time.Compare(t2) + assert.Equal(t, tc.want, res) + + // If we use `t2` to call Compare, then result should be negative + // of what we wanted when `time` was used to call Compare + res = t2.Compare(time) + assert.Equal(t, -tc.want, res) + } + + // Case when both Time are negative + time = Time{ + hour: 1<<15 + 12, + minute: 30, + second: 20, + nanosecond: 987654321, + } + t2 := Time{ + hour: 1<<15 + 13, + minute: 30, + second: 20, + nanosecond: 987654321, + } + res := time.Compare(t2) + assert.Equal(t, 1, res) +} + +func TestRound(t *testing.T) { + time := NewTimeFromStd(testGoTime) + + testCases := []struct { + time Time + round int + want Time + }{ + { + time: time, + round: 9, + want: time, + }, + { + time: time, + round: 5, + want: Time{ + hour: 12, + minute: 30, + second: 20, + nanosecond: 987650000, + }, + }, + { + time: time, + round: 0, + want: Time{ + hour: 12, + minute: 30, + second: 21, + nanosecond: 0, + }, + }, + { + time: Time{ + hour: 12, + minute: 30, + second: 20, + }, + round: 0, + want: Time{ + hour: 12, + minute: 30, + second: 20, + }, + }, + { + time: Time{ + hour: 12, + minute: 59, + second: 59, + nanosecond: 987654321, + }, + round: 0, + want: Time{ + hour: 13, + minute: 0, + second: 0, + nanosecond: 0, + }, + }, + } + + for _, tc := range testCases { + res := tc.time.Round(tc.round) + + assert.Equal(t, tc.want, res) + } +} + +func TestDateIsZero(t *testing.T) { + testCases := []struct { + year uint16 + month uint8 + day uint8 + wantZero bool + }{ + {0, 0, 0, true}, + {0, 0, 1, false}, + {2023, 12, 23, false}, + } + + for _, tc := range testCases { + date := Date{ + year: tc.year, + month: tc.month, + day: tc.day, + } + + z := date.IsZero() + if tc.wantZero { + assert.True(t, z, "Date %v should be considered as zero date", date) + } else { + assert.False(t, z, "Date %v should not be considered as zero date", date) + } + } +} + +func TestWeekday(t *testing.T) { + testCases := []struct { + year uint16 + month uint8 + day uint8 + want int + }{ + {0, 1, 1, 0}, + {0, 2, 28, 2}, + {0, 3, 1, 3}, + {2024, 3, 13, 3}, + } + + for _, tc := range testCases { + date := Date{ + year: tc.year, + month: tc.month, + day: tc.day, + } + + wd := date.Weekday() + assert.Equal(t, time.Weekday(tc.want), wd) + } +} + +func TestMondayWeekAndSunday4DayWeek(t *testing.T) { + testCases := []struct { + year uint16 + month uint8 + day uint8 + wantWeekDay int + wantYear int + }{ + {0, 1, 1, 52, -1}, + {0, 2, 28, 9, 0}, + {0, 3, 1, 9, 0}, + {2024, 3, 13, 11, 2024}, + } + + for _, tc := range testCases { + date := Date{ + year: tc.year, + month: tc.month, + day: tc.day, + } + + y, wd := date.MondayWeek() + assert.Equal(t, tc.wantWeekDay, wd) + assert.Equal(t, tc.wantYear, y) + + y, wd = date.Sunday4DayWeek() + assert.Equal(t, tc.wantWeekDay, wd) + assert.Equal(t, tc.wantYear, y) + } +} + +func TestWeek(t *testing.T) { + testCases := []struct { + year uint16 + month uint8 + day uint8 + mode int + want int + }{ + {0, 1, 1, 0, 0}, + {0, 1, 1, 1, 0}, + {0, 2, 28, 2, 9}, + {0, 3, 1, 3, 9}, + {2001, 3, 14, 4, 11}, + {2000, 7, 12, 5, 28}, + {2024, 3, 13, 6, 11}, + {2024, 3, 13, 7, 11}, + } + + for _, tc := range testCases { + date := Date{ + year: tc.year, + month: tc.month, + day: tc.day, + } + + wd := date.Week(tc.mode) + assert.Equal(t, tc.want, wd) + } +} + +func TestYearWeek(t *testing.T) { + testCases := []struct { + year uint16 + month uint8 + day uint8 + mode int + want int + }{ + {0, 1, 1, 0, -48}, + {0, 1, 1, 1, -48}, + {0, 2, 28, 2, 9}, + {0, 3, 1, 3, 9}, + {2001, 3, 14, 4, 200111}, + {2000, 7, 12, 5, 200028}, + {2024, 3, 13, 6, 202411}, + {2024, 3, 13, 7, 202411}, + {2024, 3, 13, 8, 202410}, + } + + for _, tc := range testCases { + date := Date{ + year: tc.year, + month: tc.month, + day: tc.day, + } + + wd := date.YearWeek(tc.mode) + assert.Equal(t, tc.want, wd) + } +} + +func TestToDuration(t *testing.T) { + tt := NewTimeFromStd(testGoTime) + + res := tt.ToDuration() + assert.Equal(t, 45020987654321, int(res)) + + // Neg Time Case + tt.hour = 1<<15 | tt.hour + res = tt.ToDuration() + + assert.Equal(t, -45020987654321, int(res)) +} + +func TestToSeconds(t *testing.T) { + tt := NewTimeFromStd(testGoTime) + + res := tt.ToSeconds() + assert.Equal(t, 45020, int(res)) + + // Neg Time Case + tt.hour |= 1 << 15 + res = tt.ToSeconds() + + assert.Equal(t, -45020, int(res)) + + dt := NewDateTimeFromStd(testGoTime) + + res = dt.ToSeconds() + assert.Equal(t, 63877465820, int(res)) + + // Neg Time Case + dt.Time.hour |= 1 << 15 + res = dt.ToSeconds() + + assert.Equal(t, 63877375780, int(res)) +} + +func TestToStdTime(t *testing.T) { + testCases := []struct { + year int + month int + day int + hour int + minute int + second int + nanosecond int + }{ + {2024, 3, 15, 12, 23, 34, 45}, + {2024, 3, 15, 0, 0, 0, 0}, + {0, 0, 0, 12, 23, 34, 45}, + {0, 0, 0, 0, 0, 0, 0}, + } + + for _, tc := range testCases { + dt := DateTime{ + Date: Date{uint16(tc.year), uint8(tc.month), uint8(tc.day)}, + Time: Time{uint16(tc.hour), uint8(tc.minute), uint8(tc.second), uint32(tc.nanosecond)}, + } + + res := dt.ToStdTime(time.Now()) + + if dt.IsZero() { + assert.Equal(t, 1, res.Day()) + assert.Equal(t, time.Month(1), res.Month()) + assert.Equal(t, 1, res.Year()) + } else if dt.Date.IsZero() { + assert.Equal(t, time.Now().Day(), res.Day()) + assert.Equal(t, time.Now().Month(), res.Month()) + assert.Equal(t, time.Now().Year(), res.Year()) + } else { + assert.Equal(t, tc.day, res.Day()) + assert.Equal(t, time.Month(tc.month), res.Month()) + assert.Equal(t, tc.year, res.Year()) + } + + assert.Equal(t, tc.hour, res.Hour()) + assert.Equal(t, tc.minute, res.Minute()) + assert.Equal(t, tc.second, res.Second()) + assert.Equal(t, tc.nanosecond, res.Nanosecond()) + } +} + +func TestDateFormats(t *testing.T) { + testCases := []struct { + year int + month int + day int + hour int + minute int + second int + nanosecond int + wantDate string + wantDateInt64 int64 + }{ + {2024, 3, 15, 12, 23, 34, 45, "2024-03-15", 20240315}, + {2024, 3, 15, 0, 0, 0, 0, "2024-03-15", 20240315}, + {0, 0, 0, 12, 23, 34, 45000, "0000-00-00", 0}, + {0, 0, 0, 0, 0, 0, 0, "0000-00-00", 0}, + } + + for _, tc := range testCases { + d := Date{uint16(tc.year), uint8(tc.month), uint8(tc.day)} + + b := d.Format() + assert.Equal(t, tc.wantDate, string(b)) + + f := d.FormatInt64() + assert.Equal(t, tc.wantDateInt64, f) + } +} + +func TestDateCompare(t *testing.T) { + testCases := []struct { + d1 Date + d2 Date + want int + }{ + {Date{2024, 03, 12}, Date{2023, 02, 28}, 1}, + {Date{2023, 02, 28}, Date{2024, 03, 12}, -1}, + {Date{2024, 03, 12}, Date{2024, 02, 28}, 1}, + {Date{2024, 02, 28}, Date{2024, 03, 12}, -1}, + {Date{2024, 02, 28}, Date{2024, 02, 12}, 1}, + {Date{2024, 02, 12}, Date{2024, 02, 28}, -1}, + {Date{2024, 03, 12}, Date{2024, 03, 12}, 0}, + } + + for _, tc := range testCases { + got := tc.d1.Compare(tc.d2) + assert.Equal(t, tc.want, got) + } +} + +func TestAddInterval(t *testing.T) { + testCases := []struct { + d Date + in Interval + want Date + ok bool + }{ + { + d: Date{2024, 03, 12}, + in: Interval{ + timeparts: timeparts{ + sec: (maxDay + 1) * 24 * 60 * 60, + prec: 6, + }, + unit: IntervalSecond, + }, + want: Date{2024, 03, 12}, + ok: false, + }, + { + d: Date{2023, 02, 12}, + in: Interval{ + timeparts: timeparts{ + day: 18, + sec: 12, + prec: 6, + }, + unit: IntervalSecond, + }, + want: Date{2023, 03, 02}, + ok: true, + }, + { + d: Date{2024, 03, 12}, + in: Interval{ + timeparts: timeparts{ + sec: 3600 * 24, + prec: 6, + }, + unit: IntervalSecond, + }, + want: Date{2024, 03, 13}, + ok: true, + }, + { + d: Date{2024, 03, 12}, + in: Interval{ + timeparts: timeparts{ + day: maxDay + 1, + prec: 6, + }, + unit: IntervalDay, + }, + want: Date{0, 0, 0}, + ok: true, + }, + { + d: Date{2024, 03, 12}, + in: Interval{ + timeparts: timeparts{ + day: 123, + prec: 6, + }, + unit: IntervalDay, + }, + want: Date{2024, 7, 13}, + ok: true, + }, + { + d: Date{2024, 03, 12}, + in: Interval{ + timeparts: timeparts{ + month: 12, + }, + unit: IntervalMonth, + }, + want: Date{2025, 3, 12}, + ok: true, + }, + { + d: Date{2024, 03, 12}, + in: Interval{ + timeparts: timeparts{ + year: -3000, + }, + unit: IntervalMonth, + }, + want: Date{2024, 3, 12}, + ok: false, + }, + { + d: Date{2023, 03, 29}, + in: Interval{ + timeparts: timeparts{ + month: -1, + }, + unit: IntervalMonth, + }, + want: Date{2023, 2, 28}, + ok: true, + }, + { + d: Date{2024, 02, 29}, + in: Interval{ + timeparts: timeparts{ + year: -1, + }, + unit: IntervalYear, + }, + want: Date{2023, 2, 28}, + ok: true, + }, + { + d: Date{2024, 03, 12}, + in: Interval{ + timeparts: timeparts{ + year: 12, + }, + unit: IntervalYear, + }, + want: Date{2036, 3, 12}, + ok: true, + }, + { + d: Date{2024, 03, 12}, + in: Interval{ + timeparts: timeparts{ + year: 10001, + }, + unit: IntervalYear, + }, + want: Date{2024, 3, 12}, + ok: false, + }, + } + + for _, tc := range testCases { + d, ok := tc.d.AddInterval(&tc.in) + + assert.Equal(t, tc.want, d) + assert.Equal(t, tc.ok, ok) + } +} + +func TestWeightString(t *testing.T) { + testCases := []struct { + dt DateTime + want []byte + }{ + { + dt: DateTime{ + Date: Date{2024, 3, 15}, + Time: Time{7, 23, 40, 0}, + }, + want: []byte{116, 101, 115, 116, 58, 32, 153, 178, 222, 117, 232, 0, 0, 0}, + }, + { + dt: DateTime{ + Date: Date{2024, 3, 15}, + Time: Time{1<<15 | 7, 23, 40, 0}, + }, + want: []byte{116, 101, 115, 116, 58, 32, 102, 77, 33, 138, 24, 0, 0, 0}, + }, + } + + dst := []byte("test: ") + for _, tc := range testCases { + res := tc.dt.WeightString(dst) + assert.Equal(t, tc.want, res) + } +} + +func TestDateTimeFormats(t *testing.T) { + testCases := []struct { + year int + month int + day int + hour int + minute int + second int + nanosecond int + prec int + want string + wantDateTimeInt64 int64 + wantDateTimeFloat64 float64 + }{ + {2024, 3, 15, 12, 23, 34, 45, 0, "2024-03-15 12:23:34", 20240315122334, 20240315122334}, + {2024, 3, 15, 0, 0, 0, 0, 6, "2024-03-15 00:00:00.000000", 20240315000000, 20240315000000}, + {0, 0, 0, 12, 23, 34, 45000, 9, "0000-00-00 12:23:34.000045000", 122334, 122334.000045}, + {0, 0, 0, 0, 0, 0, 0, 0, "0000-00-00 00:00:00", 0, 0}, + } + + for _, tc := range testCases { + dt := DateTime{ + Date: Date{uint16(tc.year), uint8(tc.month), uint8(tc.day)}, + Time: Time{uint16(tc.hour), uint8(tc.minute), uint8(tc.second), uint32(tc.nanosecond)}, + } + + b := dt.Format(uint8(tc.prec)) + assert.Equal(t, tc.want, string(b)) + + i := dt.FormatInt64() + assert.Equal(t, tc.wantDateTimeInt64, i) + + f := dt.FormatFloat64() + assert.Equal(t, tc.wantDateTimeFloat64, f) + } +} + +func TestDateTimeCompare(t *testing.T) { + testCases := []struct { + dt1 DateTime + dt2 DateTime + want int + }{ + {DateTime{Date: Date{2024, 03, 12}}, DateTime{Date: Date{2024, 02, 12}}, 1}, + {DateTime{Time: Time{12, 30, 20, 0}}, DateTime{Time: Time{12, 30, 20, 23}}, -1}, + {DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 20, 0}}, DateTime{Time: Time{12, 30, 20, 23}}, -1}, + {DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 20, 0}}, DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 20, 0}}, 0}, + } + + for _, tc := range testCases { + got := tc.dt1.Compare(tc.dt2) + assert.Equal(t, tc.want, got) + } +} + +func TestDateTimeRound(t *testing.T) { + testCases := []struct { + dt DateTime + p int + want DateTime + }{ + {DateTime{Date: Date{2024, 03, 12}}, 4, DateTime{Date: Date{2024, 03, 12}}}, + {DateTime{Time: Time{12, 30, 20, 123312}}, 6, DateTime{Time: Time{12, 30, 20, 123000}}}, + {DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 20, 123312}}, 9, DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 20, 123312}}}, + {DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 20, 1e9}}, 9, DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 21, 0}}}, + {DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 20, 123}}, 0, DateTime{Date: Date{2024, 03, 12}, Time: Time{12, 30, 20, 0}}}, + } + + for _, tc := range testCases { + got := tc.dt.Round(tc.p) + assert.Equal(t, tc.want, got) + } +} + +func TestHash(t *testing.T) { + time := NewTimeFromStd(testGoTime) + h := vthash.New() + time.Hash(&h) + + want := [16]byte{ + 0xaa, 0x5c, 0xb4, 0xd3, 0x02, 0x85, 0xb3, 0xf3, + 0xb2, 0x44, 0x7d, 0x7c, 0x00, 0xda, 0x4a, 0xec, + } + assert.Equal(t, want, h.Sum128()) + + date := Date{2024, 3, 16} + h = vthash.New() + date.Hash(&h) + + want = [16]byte{ + 0xa8, 0xa0, 0x91, 0xbd, 0x3b, 0x27, 0xfc, 0x8b, + 0xf2, 0xfa, 0xe3, 0x09, 0xba, 0x23, 0x56, 0xe5, + } + assert.Equal(t, want, h.Sum128()) + + dt := DateTime{Date: date, Time: time} + h = vthash.New() + dt.Hash(&h) + + want = [16]byte{ + 0x0f, 0xd7, 0x67, 0xa0, 0xd8, 0x6, 0x1c, 0xc, + 0xe7, 0xbd, 0x71, 0x74, 0xfa, 0x74, 0x66, 0x38, + } + assert.Equal(t, want, h.Sum128()) +} diff --git a/go/mysql/datetime/helpers.go b/go/mysql/datetime/helpers.go index 33d673782fc..68466e320e2 100644 --- a/go/mysql/datetime/helpers.go +++ b/go/mysql/datetime/helpers.go @@ -17,6 +17,7 @@ limitations under the License. package datetime import ( + "strings" "time" ) @@ -245,7 +246,7 @@ func daysIn(m time.Month, year int) int { } func isLeap(year int) bool { - return year%4 == 0 && (year%100 != 0 || year%400 == 0) + return year%4 == 0 && (year%100 != 0 || year%400 == 0) && (year != 0) } func daysInYear(year int) int { @@ -285,7 +286,14 @@ func parseNanoseconds[bytes []byte | string](value bytes, nbytes int) (ns int, l } const ( - secondsPerMinute = 60 - secondsPerHour = 60 * secondsPerMinute - secondsPerDay = 24 * secondsPerHour + durationPerDay = 24 * time.Hour ) + +// SizeAndScaleFromString +func SizeFromString(s string) int32 { + idx := strings.LastIndex(s, ".") + if idx == -1 { + return 0 + } + return int32(len(s[idx+1:])) +} diff --git a/go/mysql/datetime/helpers_test.go b/go/mysql/datetime/helpers_test.go new file mode 100644 index 00000000000..cb46500bf45 --- /dev/null +++ b/go/mysql/datetime/helpers_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSizeFromString(t *testing.T) { + testcases := []struct { + value string + sizeExpected int32 + }{ + { + value: "2020-01-01 00:00:00", + sizeExpected: 0, + }, + { + value: "2020-01-01 00:00:00.1", + sizeExpected: 1, + }, + { + value: "2020-01-01 00:00:00.12", + sizeExpected: 2, + }, + { + value: "2020-01-01 00:00:00.123", + sizeExpected: 3, + }, + { + value: "2020-01-01 00:00:00.123456", + sizeExpected: 6, + }, + { + value: "00:00:00", + sizeExpected: 0, + }, + { + value: "00:00:00.1", + sizeExpected: 1, + }, + { + value: "00:00:00.12", + sizeExpected: 2, + }, + { + value: "00:00:00.123", + sizeExpected: 3, + }, + { + value: "00:00:00.123456", + sizeExpected: 6, + }, + } + for _, testcase := range testcases { + t.Run(testcase.value, func(t *testing.T) { + siz := SizeFromString(testcase.value) + assert.EqualValues(t, testcase.sizeExpected, siz) + }) + } +} diff --git a/go/mysql/datetime/interval.go b/go/mysql/datetime/interval.go index 21395f2174d..75e1ce7bb45 100644 --- a/go/mysql/datetime/interval.go +++ b/go/mysql/datetime/interval.go @@ -258,13 +258,13 @@ func (itv *Interval) inRange() bool { if itv.day > maxDay { return false } - if itv.hour > maxDay*24 { + if itv.hour/24 > maxDay { return false } - if itv.min > maxDay*24*60 { + if itv.min/24/60 > maxDay { return false } - if itv.sec > maxDay*24*60*60 { + if itv.sec/24/60/60 > maxDay { return false } return true diff --git a/go/mysql/datetime/interval_test.go b/go/mysql/datetime/interval_test.go new file mode 100644 index 00000000000..22b4617656b --- /dev/null +++ b/go/mysql/datetime/interval_test.go @@ -0,0 +1,369 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/decimal" +) + +func TestIntervalType(t *testing.T) { + testCases := []struct { + in IntervalType + wantPartCount int + wantTimeParts bool + wantDateParts bool + wantDayParts bool + wantMonthParts bool + wantNeedsPrecision bool + }{ + {IntervalYear, 1, false, true, false, false, false}, + {IntervalMonth, 1, false, true, false, true, false}, + {IntervalDay, 1, false, true, true, false, false}, + {IntervalHour, 1, true, false, false, false, false}, + {IntervalMinute, 1, true, false, false, false, false}, + {IntervalSecond, 1, true, false, false, false, false}, + {IntervalMicrosecond, 1, true, false, false, false, true}, + {IntervalNone, 0, false, false, false, false, false}, + {IntervalQuarter, 1, false, true, false, true, false}, + {IntervalWeek, 1, false, true, true, false, false}, + {IntervalSecondMicrosecond, 2, true, false, false, false, true}, + {IntervalMinuteMicrosecond, 3, true, false, false, false, true}, + {IntervalMinuteSecond, 2, true, false, false, false, false}, + {IntervalHourMicrosecond, 4, true, false, false, false, true}, + {IntervalHourSecond, 3, true, false, false, false, false}, + {IntervalHourMinute, 2, true, false, false, false, false}, + {IntervalDayMicrosecond, 5, true, true, true, false, true}, + {IntervalDaySecond, 4, true, true, true, false, false}, + {IntervalDayMinute, 3, true, true, true, false, false}, + {IntervalDayHour, 2, true, true, true, false, false}, + {IntervalYearMonth, 2, false, true, false, true, false}, + } + + for _, tc := range testCases { + got := tc.in.HasTimeParts() + assert.Equal(t, tc.wantTimeParts, got) + + got = tc.in.HasDateParts() + assert.Equal(t, tc.wantDateParts, got) + + got = tc.in.HasDayParts() + assert.Equal(t, tc.wantDayParts, got) + + got = tc.in.HasMonthParts() + assert.Equal(t, tc.wantMonthParts, got) + + got = tc.in.NeedsPrecision() + assert.Equal(t, tc.wantNeedsPrecision, got) + + assert.Equal(t, tc.wantPartCount, tc.in.PartCount()) + } +} + +func TestParseInterval(t *testing.T) { + testCases := []struct { + in string + tt IntervalType + want *Interval + }{ + { + in: "123", + tt: IntervalSecond, + want: &Interval{ + timeparts: timeparts{ + sec: 123, + prec: 6, + }, + unit: IntervalSecond, + }, + }, + { + in: "1", + tt: IntervalDay, + want: &Interval{ + timeparts: timeparts{ + day: 1, + prec: 0, + }, + unit: IntervalDay, + }, + }, + { + in: "1234", + tt: IntervalMinute, + want: &Interval{ + timeparts: timeparts{ + min: 1234, + prec: 0, + }, + unit: IntervalMinute, + }, + }, + { + in: "123.98", + tt: IntervalSecond, + want: &Interval{ + timeparts: timeparts{ + sec: 123, + nsec: 980000000, + prec: 6, + }, + unit: IntervalSecond, + }, + }, + } + + for _, tc := range testCases { + res := ParseInterval(tc.in, tc.tt, false) + assert.Equal(t, tc.want, res) + } + + // Neg interval case + res := ParseInterval("123", IntervalSecond, true) + want := &Interval{ + timeparts: timeparts{ + sec: -123, + prec: 6, + }, + unit: IntervalSecond, + } + assert.Equal(t, want, res) +} + +func TestParseIntervalInt64(t *testing.T) { + testCases := []struct { + in int64 + tt IntervalType + want *Interval + }{ + { + in: 123, + tt: IntervalSecond, + want: &Interval{ + timeparts: timeparts{ + sec: 123, + prec: 0, + }, + unit: IntervalSecond, + }, + }, + { + in: 1234, + tt: IntervalMicrosecond, + want: &Interval{ + timeparts: timeparts{ + nsec: 1234000, + prec: 6, + }, + unit: IntervalMicrosecond, + }, + }, + { + in: 35454, + tt: IntervalMinute, + want: &Interval{ + timeparts: timeparts{ + min: 35454, + prec: 0, + }, + unit: IntervalMinute, + }, + }, + } + + for _, tc := range testCases { + res := ParseIntervalInt64(tc.in, tc.tt, false) + assert.Equal(t, tc.want, res) + } + + // Neg interval case + res := ParseIntervalInt64(123, IntervalSecond, true) + want := &Interval{ + timeparts: timeparts{ + sec: -123, + prec: 0, + }, + unit: IntervalSecond, + } + assert.Equal(t, want, res) +} + +func TestParseIntervalFloat(t *testing.T) { + testCases := []struct { + in float64 + tt IntervalType + want *Interval + }{ + { + in: 123.45, + tt: IntervalSecond, + want: &Interval{ + timeparts: timeparts{ + sec: 123, + nsec: 450000000, + prec: 6, + }, + unit: IntervalSecond, + }, + }, + { + in: 12.34, + tt: IntervalMinute, + want: &Interval{ + timeparts: timeparts{ + min: 12, + prec: 0, + }, + unit: IntervalMinute, + }, + }, + { + in: 12.67, + tt: IntervalHour, + want: &Interval{ + timeparts: timeparts{ + hour: 13, + prec: 0, + }, + unit: IntervalHour, + }, + }, + { + in: 12.67, + tt: IntervalMicrosecond, + want: &Interval{ + timeparts: timeparts{ + nsec: 13000, + prec: 6, + }, + unit: IntervalMicrosecond, + }, + }, + { + in: 123, + tt: IntervalDay, + want: &Interval{ + timeparts: timeparts{ + day: 123, + prec: 0, + }, + unit: IntervalDay, + }, + }, + } + + for _, tc := range testCases { + res := ParseIntervalFloat(tc.in, tc.tt, false) + assert.Equal(t, tc.want, res) + + res = ParseIntervalDecimal(decimal.NewFromFloat(tc.in), 6, tc.tt, false) + assert.Equal(t, tc.want, res) + } + + // Neg interval case + res := ParseIntervalFloat(123.4, IntervalSecond, true) + want := &Interval{ + timeparts: timeparts{ + sec: -123, + nsec: -400000000, + prec: 6, + }, + unit: IntervalSecond, + } + assert.Equal(t, want, res) +} + +func TestInRange(t *testing.T) { + testCases := []struct { + in Interval + wantInRange bool + }{ + { + in: Interval{ + timeparts: timeparts{ + day: 3652425, + }, + }, + wantInRange: false, + }, + { + in: Interval{ + timeparts: timeparts{ + day: 3652424, + }, + }, + wantInRange: true, + }, + { + in: Interval{ + timeparts: timeparts{ + hour: 3652425 * 24, + }, + }, + wantInRange: false, + }, + { + in: Interval{ + timeparts: timeparts{ + hour: 3652424 * 24, + }, + }, + wantInRange: true, + }, + { + in: Interval{ + timeparts: timeparts{ + min: 3652425 * 24 * 60, + }, + }, + wantInRange: false, + }, + { + in: Interval{ + timeparts: timeparts{ + min: 3652424 * 24 * 60, + }, + }, + wantInRange: true, + }, + { + in: Interval{ + timeparts: timeparts{ + sec: 3652425 * 24 * 60 * 60, + }, + }, + wantInRange: false, + }, + { + in: Interval{ + timeparts: timeparts{ + sec: 3652424 * 24 * 60 * 60, + }, + }, + wantInRange: true, + }, + } + + for _, tc := range testCases { + got := tc.in.inRange() + + assert.Equal(t, tc.wantInRange, got) + } +} diff --git a/go/mysql/datetime/mydate.go b/go/mysql/datetime/mydate.go index 1d4a2eaf958..62cbb3f2524 100644 --- a/go/mysql/datetime/mydate.go +++ b/go/mysql/datetime/mydate.go @@ -16,7 +16,7 @@ limitations under the License. package datetime -// mysqlDayNumber converts a date into an absolute day number. +// MysqlDayNumber converts a date into an absolute day number. // This is an algorithm that has been reverse engineered from MySQL; // the tables used as a reference can be found in `testdata/year_to_daynr.json`. // It is worth noting that this absolute day number does not match the @@ -29,7 +29,7 @@ package datetime // This API should only be used when performing datetime calculations (addition // and subtraction), so that the results match MySQL's. All other date handling // operations must use our helpers based on Go's standard library. -func mysqlDayNumber(year, month, day int) int { +func MysqlDayNumber(year, month, day int) int { if year == 0 && month == 0 { return 0 } @@ -49,8 +49,8 @@ func mysqlDayNumber(year, month, day int) int { // mysqlDateFromDayNumber converts an absolute day number into a date (a year, month, day triplet). // This is an algorithm that has been reverse engineered from MySQL; // the tables used as a reference can be found in `testdata/daynr_to_date.json`. -// See the warning from mysqlDayNumber: the day number used as an argument to -// this function must come from mysqlDayNumber or the results won't be correct. +// See the warning from MysqlDayNumber: the day number used as an argument to +// this function must come from MysqlDayNumber or the results won't be correct. // This API should only be used when performing datetime calculations (addition // and subtraction), so that the results match MySQL's. All other date handling // operations must use our helpers based on Go's standard library. @@ -81,3 +81,11 @@ func mysqlDateFromDayNumber(daynr int) (uint16, uint8, uint8) { panic("unreachable: yday is too large?") } + +// DateFromDayNumber converts an absolute day number into a Date. +// Returns zero date if day number exceeds 3652499 or is less than 366. +func DateFromDayNumber(daynr int) Date { + var d Date + d.year, d.month, d.day = mysqlDateFromDayNumber(daynr) + return d +} diff --git a/go/mysql/datetime/mydate_test.go b/go/mysql/datetime/mydate_test.go index 29ecd2df9d2..bb5073b8ff8 100644 --- a/go/mysql/datetime/mydate_test.go +++ b/go/mysql/datetime/mydate_test.go @@ -35,7 +35,7 @@ func TestDayNumber(t *testing.T) { require.NoError(t, err) for year, daynr := range expected { - assert.Equal(t, daynr, mysqlDayNumber(year, 1, 1)) + assert.Equal(t, daynr, MysqlDayNumber(year, 1, 1)) } } @@ -54,6 +54,14 @@ func TestDayNumberFields(t *testing.T) { assert.Equal(t, tc[2], int(m)) assert.Equal(t, tc[3], int(d)) - assert.Equalf(t, tc[0], mysqlDayNumber(tc[1], tc[2], tc[3]), "date %d-%d-%d", tc[1], tc[2], tc[3]) + assert.Equalf(t, tc[0], MysqlDayNumber(tc[1], tc[2], tc[3]), "date %d-%d-%d", tc[1], tc[2], tc[3]) + + wantDate := Date{ + year: uint16(tc[1]), + month: uint8(tc[2]), + day: uint8(tc[3]), + } + got := DateFromDayNumber(tc[0]) + assert.Equal(t, wantDate, got) } } diff --git a/go/mysql/datetime/parse.go b/go/mysql/datetime/parse.go index e8f17191f4c..b3673cbcd42 100644 --- a/go/mysql/datetime/parse.go +++ b/go/mysql/datetime/parse.go @@ -24,42 +24,45 @@ import ( "vitess.io/vitess/go/mysql/fastparse" ) -func parsetimeHours(tp *timeparts, in string) (out string, ok bool) { +func parsetimeHours(tp *timeparts, in string) (string, TimeState) { + var ok bool if tp.hour, in, ok = getnumn(in); ok { tp.day = tp.day + tp.hour/24 tp.hour = tp.hour % 24 switch { case len(in) == 0: - return "", true + return "", TimeOK case in[0] == ':': return parsetimeMinutes(tp, in[1:]) } } - return "", false + return "", TimePartial } -func parsetimeMinutes(tp *timeparts, in string) (out string, ok bool) { +func parsetimeMinutes(tp *timeparts, in string) (string, TimeState) { + var ok bool if tp.min, in, ok = getnum(in, false); ok { switch { case tp.min > 59: - return "", false + return "", TimeInvalid case len(in) == 0: - return "", true + return "", TimeOK case in[0] == ':': return parsetimeSeconds(tp, in[1:]) } } - return "", false + return "", TimePartial } -func parsetimeSeconds(tp *timeparts, in string) (out string, ok bool) { +func parsetimeSeconds(tp *timeparts, in string) (string, TimeState) { + var ok bool if tp.sec, in, ok = getnum(in, false); ok { switch { case tp.sec > 59: - return "", false + return "", TimeInvalid case len(in) == 0: - return "", true + return "", TimeOK case len(in) > 1 && in[0] == '.': n := 1 for ; n < len(in) && isDigit(in, n); n++ { @@ -67,14 +70,18 @@ func parsetimeSeconds(tp *timeparts, in string) (out string, ok bool) { var l int tp.nsec, l, ok = parseNanoseconds(in, n) tp.prec = uint8(l) - return "", ok && len(in) == n + if ok && len(in) == n { + return "", TimeOK + } + return "", TimePartial } } - return "", false + return "", TimePartial } -func parsetimeAny(tp *timeparts, in string) (out string, ok bool) { +func parsetimeAny(tp *timeparts, in string) (out string, state TimeState) { orig := in + var ok bool for i := 0; i < len(in); i++ { switch r := in[i]; { case isSpace(r): @@ -91,7 +98,7 @@ func parsetimeAny(tp *timeparts, in string) (out string, ok bool) { return parsetimeNoDelimiters(tp, orig) } if tp.day > 34 { - return "", clampTimeparts(tp) + return "", clampTimeparts(tp, state) } return parsetimeHours(tp, in) case r == ':': @@ -101,8 +108,9 @@ func parsetimeAny(tp *timeparts, in string) (out string, ok bool) { return parsetimeNoDelimiters(tp, in) } -func parsetimeNoDelimiters(tp *timeparts, in string) (out string, ok bool) { +func parsetimeNoDelimiters(tp *timeparts, in string) (out string, state TimeState) { var integral int + var ok bool for ; integral < len(in); integral++ { if in[integral] == '.' || !isDigit(in, integral) { break @@ -112,12 +120,9 @@ func parsetimeNoDelimiters(tp *timeparts, in string) (out string, ok bool) { switch integral { default: // MySQL limits this to a numeric value that fits in a 32-bit unsigned integer. - i, _ := fastparse.ParseInt64(in[:integral], 10) + i, _ := fastparse.ParseUint64(in[:integral], 10) if i > math.MaxUint32 { - return "", false - } - if i < -math.MaxUint32 { - return "", false + return "", TimeInvalid } tp.hour, in, ok = getnuml(in, integral-4) @@ -132,7 +137,7 @@ func parsetimeNoDelimiters(tp *timeparts, in string) (out string, ok bool) { case 3, 4: tp.min, in, ok = getnuml(in, integral-2) if !ok || tp.min > 59 { - return "", false + return "", TimeInvalid } integral = 2 fallthrough @@ -140,10 +145,10 @@ func parsetimeNoDelimiters(tp *timeparts, in string) (out string, ok bool) { case 1, 2: tp.sec, in, ok = getnuml(in, integral) if !ok || tp.sec > 59 { - return "", false + return "", TimeInvalid } case 0: - return "", false + return "", TimeInvalid } if len(in) > 1 && in[0] == '.' && isDigit(in, 1) { @@ -152,14 +157,18 @@ func parsetimeNoDelimiters(tp *timeparts, in string) (out string, ok bool) { } var l int tp.nsec, l, ok = parseNanoseconds(in, n) + if !ok { + state = TimeInvalid + } tp.prec = uint8(l) in = in[n:] } - return in, clampTimeparts(tp) && ok + state = clampTimeparts(tp, state) + return in, state } -func clampTimeparts(tp *timeparts) bool { +func clampTimeparts(tp *timeparts, state TimeState) TimeState { // Maximum time is 838:59:59, so we have to clamp // it to that value here if we otherwise successfully // parser the time. @@ -168,15 +177,31 @@ func clampTimeparts(tp *timeparts) bool { tp.hour = 22 tp.min = 59 tp.sec = 59 - return false + if state == TimeOK { + return TimePartial + } } - return true + return state } -func ParseTime(in string, prec int) (t Time, l int, ok bool) { +type TimeState uint8 + +const ( + // TimeOK indicates that the parsed value is valid and complete. + TimeOK TimeState = iota + // TimePartial indicates that the parsed value has a partially parsed value + // but it is not fully complete and valid. There could be additional stray + // data in the input, or it has an overflow. + TimePartial + // TimeInvalid indicates that the parsed value is invalid and no partial + // TIME value could be extracted from the input. + TimeInvalid +) + +func ParseTime(in string, prec int) (t Time, l int, state TimeState) { in = strings.Trim(in, " \t\r\n") if len(in) == 0 { - return Time{}, 0, false + return Time{}, 0, TimeInvalid } var neg bool if in[0] == '-' { @@ -185,11 +210,15 @@ func ParseTime(in string, prec int) (t Time, l int, ok bool) { } var tp timeparts - in, ok = parsetimeAny(&tp, in) - ok = clampTimeparts(&tp) && ok + in, state = parsetimeAny(&tp, in) + if state == TimeInvalid { + return Time{}, 0, state + } + + state = clampTimeparts(&tp, state) hours := uint16(24*tp.day + tp.hour) - if !tp.isZero() && neg { + if neg { hours |= negMask } @@ -206,7 +235,13 @@ func ParseTime(in string, prec int) (t Time, l int, ok bool) { t = t.Round(prec) } - return t, prec, ok && len(in) == 0 + switch { + case state == TimeOK && len(in) == 0: + state = TimeOK + case state == TimeOK && len(in) > 0: + state = TimePartial + } + return t, prec, state } func ParseDate(s string) (Date, bool) { @@ -304,7 +339,7 @@ func ParseTimeInt64(i int64) (t Time, ok bool) { return t, false } - if i > 838 { + if i > MaxHours { return t, false } t.hour = uint16(i) diff --git a/go/mysql/datetime/parse_test.go b/go/mysql/datetime/parse_test.go index 6ed342edfb3..a219f518995 100644 --- a/go/mysql/datetime/parse_test.go +++ b/go/mysql/datetime/parse_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/decimal" ) func TestParseDate(t *testing.T) { @@ -96,32 +98,33 @@ func TestParseTime(t *testing.T) { output testTime norm string l int - err bool + state TimeState }{ {input: "00:00:00", norm: "00:00:00.000000", output: testTime{}}, - {input: "00:00:00foo", norm: "00:00:00.000000", output: testTime{}, err: true}, + {input: "-00:00:00", norm: "-00:00:00.000000", output: testTime{negative: true}}, + {input: "00:00:00foo", norm: "00:00:00.000000", output: testTime{}, state: TimePartial}, {input: "11:12:13", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}}, - {input: "11:12:13foo", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}, err: true}, + {input: "11:12:13foo", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}, state: TimePartial}, {input: "11:12:13.1", norm: "11:12:13.100000", output: testTime{11, 12, 13, 100000000, false}, l: 1}, - {input: "11:12:13.foo", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}, err: true}, - {input: "11:12:13.1foo", norm: "11:12:13.100000", output: testTime{11, 12, 13, 100000000, false}, l: 1, err: true}, + {input: "11:12:13.foo", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}, state: TimePartial}, + {input: "11:12:13.1foo", norm: "11:12:13.100000", output: testTime{11, 12, 13, 100000000, false}, l: 1, state: TimePartial}, {input: "11:12:13.123456", norm: "11:12:13.123456", output: testTime{11, 12, 13, 123456000, false}, l: 6}, {input: "11:12:13.000001", norm: "11:12:13.000001", output: testTime{11, 12, 13, 1000, false}, l: 6}, {input: "11:12:13.000000", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}, l: 6}, - {input: "11:12:13.123456foo", norm: "11:12:13.123456", output: testTime{11, 12, 13, 123456000, false}, l: 6, err: true}, + {input: "11:12:13.123456foo", norm: "11:12:13.123456", output: testTime{11, 12, 13, 123456000, false}, l: 6, state: TimePartial}, {input: "3 11:12:13", norm: "83:12:13.000000", output: testTime{3*24 + 11, 12, 13, 0, false}}, - {input: "3 11:12:13foo", norm: "83:12:13.000000", output: testTime{3*24 + 11, 12, 13, 0, false}, err: true}, + {input: "3 11:12:13foo", norm: "83:12:13.000000", output: testTime{3*24 + 11, 12, 13, 0, false}, state: TimePartial}, {input: "3 41:12:13", norm: "113:12:13.000000", output: testTime{3*24 + 41, 12, 13, 0, false}}, - {input: "3 41:12:13foo", norm: "113:12:13.000000", output: testTime{3*24 + 41, 12, 13, 0, false}, err: true}, - {input: "34 23:12:13", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, - {input: "35 11:12:13", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, + {input: "3 41:12:13foo", norm: "113:12:13.000000", output: testTime{3*24 + 41, 12, 13, 0, false}, state: TimePartial}, + {input: "34 23:12:13", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, state: TimePartial}, + {input: "35 11:12:13", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, state: TimePartial}, {input: "11:12", norm: "11:12:00.000000", output: testTime{11, 12, 0, 0, false}}, {input: "5 11:12", norm: "131:12:00.000000", output: testTime{5*24 + 11, 12, 0, 0, false}}, {input: "-2 11:12", norm: "-59:12:00.000000", output: testTime{2*24 + 11, 12, 0, 0, true}}, - {input: "--2 11:12", norm: "00:00:00.000000", err: true}, - {input: "nonsense", norm: "00:00:00.000000", err: true}, + {input: "--2 11:12", norm: "00:00:00.000000", state: TimeInvalid}, + {input: "nonsense", norm: "00:00:00.000000", state: TimeInvalid}, {input: "2 11", norm: "59:00:00.000000", output: testTime{2*24 + 11, 0, 0, 0, false}}, - {input: "2 -11", norm: "00:00:02.000000", output: testTime{0, 0, 2, 0, false}, err: true}, + {input: "2 -11", norm: "00:00:02.000000", output: testTime{0, 0, 2, 0, false}, state: TimePartial}, {input: "13", norm: "00:00:13.000000", output: testTime{0, 0, 13, 0, false}}, {input: "111213", norm: "11:12:13.000000", output: testTime{11, 12, 13, 0, false}}, {input: "111213.123456", norm: "11:12:13.123456", output: testTime{11, 12, 13, 123456000, false}, l: 6}, @@ -130,19 +133,21 @@ func TestParseTime(t *testing.T) { {input: "25:12:13", norm: "25:12:13.000000", output: testTime{25, 12, 13, 0, false}}, {input: "32:35", norm: "32:35:00.000000", output: testTime{32, 35, 0, 0, false}}, {input: "101:34:58", norm: "101:34:58.000000", output: testTime{101, 34, 58, 0, false}}, + {input: "101:64:58", norm: "00:00:00.000000", state: TimeInvalid}, + {input: "101:34:68", norm: "00:00:00.000000", state: TimeInvalid}, {input: "1", norm: "00:00:01.000000", output: testTime{0, 0, 1, 0, false}}, {input: "11", norm: "00:00:11.000000", output: testTime{0, 0, 11, 0, false}}, {input: "111", norm: "00:01:11.000000", output: testTime{0, 1, 11, 0, false}}, {input: "1111", norm: "00:11:11.000000", output: testTime{0, 11, 11, 0, false}}, {input: "11111", norm: "01:11:11.000000", output: testTime{1, 11, 11, 0, false}}, {input: "111111", norm: "11:11:11.000000", output: testTime{11, 11, 11, 0, false}}, - {input: "1foo", norm: "00:00:01.000000", output: testTime{0, 0, 1, 0, false}, err: true}, - {input: "11foo", norm: "00:00:11.000000", output: testTime{0, 0, 11, 0, false}, err: true}, - {input: "111foo", norm: "00:01:11.000000", output: testTime{0, 1, 11, 0, false}, err: true}, - {input: "1111foo", norm: "00:11:11.000000", output: testTime{0, 11, 11, 0, false}, err: true}, - {input: "11111foo", norm: "01:11:11.000000", output: testTime{1, 11, 11, 0, false}, err: true}, - {input: "111111foo", norm: "11:11:11.000000", output: testTime{11, 11, 11, 0, false}, err: true}, - {input: "1111111foo", norm: "111:11:11.000000", output: testTime{111, 11, 11, 0, false}, err: true}, + {input: "1foo", norm: "00:00:01.000000", output: testTime{0, 0, 1, 0, false}, state: TimePartial}, + {input: "11foo", norm: "00:00:11.000000", output: testTime{0, 0, 11, 0, false}, state: TimePartial}, + {input: "111foo", norm: "00:01:11.000000", output: testTime{0, 1, 11, 0, false}, state: TimePartial}, + {input: "1111foo", norm: "00:11:11.000000", output: testTime{0, 11, 11, 0, false}, state: TimePartial}, + {input: "11111foo", norm: "01:11:11.000000", output: testTime{1, 11, 11, 0, false}, state: TimePartial}, + {input: "111111foo", norm: "11:11:11.000000", output: testTime{11, 11, 11, 0, false}, state: TimePartial}, + {input: "1111111foo", norm: "111:11:11.000000", output: testTime{111, 11, 11, 0, false}, state: TimePartial}, {input: "-1", norm: "-00:00:01.000000", output: testTime{0, 0, 1, 0, true}}, {input: "-11", norm: "-00:00:11.000000", output: testTime{0, 0, 11, 0, true}}, {input: "-111", norm: "-00:01:11.000000", output: testTime{0, 1, 11, 0, true}}, @@ -172,44 +177,31 @@ func TestParseTime(t *testing.T) { {input: "11111.1", norm: "01:11:11.100000", output: testTime{1, 11, 11, 100000000, false}, l: 1}, {input: "111111.1", norm: "11:11:11.100000", output: testTime{11, 11, 11, 100000000, false}, l: 1}, {input: "1111111.1", norm: "111:11:11.100000", output: testTime{111, 11, 11, 100000000, false}, l: 1}, - {input: "20000101", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, - {input: "-20000101", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, err: true}, - {input: "999995959", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, - {input: "-999995959", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, err: true}, - {input: "4294965959", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, - {input: "-4294965959", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, err: true}, - {input: "4294975959", norm: "00:00:00.000000", err: true}, - {input: "-4294975959", norm: "00:00:00.000000", err: true}, - {input: "\t34 foo\t", norm: "00:00:34.000000", output: testTime{0, 0, 34, 0, false}, err: true}, - {input: "\t34 1foo\t", norm: "817:00:00.000000", output: testTime{817, 0, 0, 0, false}, err: true}, - {input: "\t34 23foo\t", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, - {input: "\t35 foo\t", norm: "00:00:35.000000", output: testTime{0, 0, 35, 0, false}, err: true}, - {input: "\t35 1foo\t", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, err: true}, - {input: " 255 foo", norm: "00:02:55.000000", output: testTime{0, 2, 55, 0, false}, err: true}, - {input: "255", norm: "00:02:55.000000", output: testTime{0, 2, 55, 0, false}}, + {input: "20000101", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, state: TimePartial}, + {input: "-20000101", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, state: TimePartial}, + {input: "999995959", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, state: TimePartial}, + {input: "-999995959", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, state: TimePartial}, + {input: "4294965959", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, state: TimePartial}, + {input: "-4294965959", norm: "-838:59:59.000000", output: testTime{838, 59, 59, 0, true}, state: TimePartial}, + {input: "4294975959", norm: "00:00:00.000000", state: TimeInvalid}, + {input: "-4294975959", norm: "00:00:00.000000", state: TimeInvalid}, + {input: "\t34 foo\t", norm: "00:00:34.000000", output: testTime{0, 0, 34, 0, false}, state: TimePartial}, + {input: "\t34 1foo\t", norm: "817:00:00.000000", output: testTime{817, 0, 0, 0, false}, state: TimePartial}, + {input: "\t34 23foo\t", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, state: TimePartial}, + {input: "\t35 foo\t", norm: "00:00:35.000000", output: testTime{0, 0, 35, 0, false}, state: TimePartial}, + {input: "\t35 1foo\t", norm: "838:59:59.000000", output: testTime{838, 59, 59, 0, false}, state: TimePartial}, } for _, test := range tests { t.Run(test.input, func(t *testing.T) { - got, l, ok := ParseTime(test.input, -1) - if test.err { - assert.Equal(t, test.output.hour, got.Hour()) - assert.Equal(t, test.output.minute, got.Minute()) - assert.Equal(t, test.output.second, got.Second()) - assert.Equal(t, test.output.nanosecond, got.Nanosecond()) - assert.Equal(t, test.norm, string(got.AppendFormat(nil, 6))) - assert.Equal(t, test.l, l) - assert.Falsef(t, ok, "did not fail to parse %s", test.input) - return - } - - require.True(t, ok) + got, l, state := ParseTime(test.input, -1) + assert.Equal(t, test.state, state) assert.Equal(t, test.output.hour, got.Hour()) assert.Equal(t, test.output.minute, got.Minute()) assert.Equal(t, test.output.second, got.Second()) assert.Equal(t, test.output.nanosecond, got.Nanosecond()) - assert.Equal(t, test.l, l) assert.Equal(t, test.norm, string(got.AppendFormat(nil, 6))) + assert.Equal(t, test.l, l) }) } } @@ -342,3 +334,255 @@ func TestParseDateTimeInt64(t *testing.T) { }) } } + +func TestParseDateTimeFloat(t *testing.T) { + type datetime struct { + year int + month int + day int + hour int + minute int + second int + nanosecond int + } + tests := []struct { + input float64 + prec int + output datetime + outPrec int + l int + err bool + }{ + {input: 1, prec: 3, outPrec: 3, output: datetime{}, err: true}, + {input: 20221012000000.101562, prec: -2, outPrec: 6, output: datetime{2022, 10, 12, 0, 0, 0, 101562500}}, + {input: 20221012112233.125000, prec: 3, outPrec: 3, output: datetime{2022, 10, 12, 11, 22, 33, 125000000}}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%f", test.input), func(t *testing.T) { + got, p, ok := ParseDateTimeFloat(test.input, test.prec) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.outPrec, p) + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + }) + } +} + +func TestParseDateTimeDecimal(t *testing.T) { + type datetime struct { + year int + month int + day int + hour int + minute int + second int + nanosecond int + } + tests := []struct { + input decimal.Decimal + prec int + output datetime + outPrec int + l int32 + err bool + }{ + {input: decimal.NewFromFloat(1), l: 6, prec: 3, outPrec: 3, output: datetime{}, err: true}, + {input: decimal.NewFromFloat(20221012000000.101562), l: 6, prec: -2, outPrec: 6, output: datetime{2022, 10, 12, 0, 0, 0, 100000000}}, + {input: decimal.NewFromFloat(20221012112233.125000), l: 6, prec: 3, outPrec: 3, output: datetime{2022, 10, 12, 11, 22, 33, 125000000}}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%v", test.input), func(t *testing.T) { + got, p, ok := ParseDateTimeDecimal(test.input, test.l, test.prec) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.outPrec, p) + assert.Equal(t, test.output.year, got.Date.Year()) + assert.Equal(t, test.output.month, got.Date.Month()) + assert.Equal(t, test.output.day, got.Date.Day()) + assert.Equal(t, test.output.hour, got.Time.Hour()) + assert.Equal(t, test.output.minute, got.Time.Minute()) + assert.Equal(t, test.output.second, got.Time.Second()) + assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond()) + }) + } +} + +func TestParseDateFloatAndDecimal(t *testing.T) { + type date struct { + year int + month int + day int + } + tests := []struct { + input float64 + prec int + output date + outPrec int + l int32 + err bool + }{ + {input: 1, output: date{0, 0, 1}, err: true}, + {input: 20221012.102, output: date{2022, 10, 12}}, + {input: 20221212.52, output: date{2022, 12, 12}}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%f", test.input), func(t *testing.T) { + got, ok := ParseDateFloat(test.input) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.output.year, got.Year()) + assert.Equal(t, test.output.month, got.Month()) + assert.Equal(t, test.output.day, got.Day()) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.output.year, got.Year()) + assert.Equal(t, test.output.month, got.Month()) + assert.Equal(t, test.output.day, got.Day()) + + got, ok = ParseDateDecimal(decimal.NewFromFloat(test.input)) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.output.year, got.Year()) + assert.Equal(t, test.output.month, got.Month()) + assert.Equal(t, test.output.day, got.Day()) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.output.year, got.Year()) + assert.Equal(t, test.output.month, got.Month()) + assert.Equal(t, test.output.day, got.Day()) + }) + } +} + +func TestParseTimeFloat(t *testing.T) { + type time struct { + hour int + minute int + second int + nanosecond int + } + tests := []struct { + input float64 + prec int + outPrec int + output time + err bool + }{ + {input: 1, prec: 1, outPrec: 1, output: time{0, 0, 1, 0}, err: false}, + {input: 201012.102, prec: -1, outPrec: 6, output: time{20, 10, 12, 102000000}}, + {input: 201212.52, prec: -1, outPrec: 6, output: time{20, 12, 12, 519999999}}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%f", test.input), func(t *testing.T) { + got, p, ok := ParseTimeFloat(test.input, test.prec) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.outPrec, p) + assert.Equal(t, test.output.hour, got.Hour()) + assert.Equal(t, test.output.minute, got.Minute()) + assert.Equal(t, test.output.second, got.Second()) + assert.Equal(t, test.output.nanosecond, got.Nanosecond()) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.outPrec, p) + assert.Equal(t, test.output.hour, got.Hour()) + assert.Equal(t, test.output.minute, got.Minute()) + assert.Equal(t, test.output.second, got.Second()) + assert.Equal(t, test.output.nanosecond, got.Nanosecond()) + }) + } +} + +func TestParseTimeDecimal(t *testing.T) { + type time struct { + hour int + minute int + second int + nanosecond int + } + tests := []struct { + input decimal.Decimal + l int32 + prec int + outPrec int + output time + err bool + }{ + {input: decimal.NewFromFloat(1), l: 6, prec: 1, outPrec: 1, output: time{0, 0, 1, 0}, err: false}, + {input: decimal.NewFromFloat(201012.102), l: 6, prec: -1, outPrec: 6, output: time{20, 10, 12, 102000000}}, + {input: decimal.NewFromFloat(201212.52), l: 6, prec: -1, outPrec: 6, output: time{20, 12, 12, 520000000}}, + {input: decimal.NewFromFloat(201212.52), l: 10, prec: -1, outPrec: 9, output: time{20, 12, 12, 520000000}}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%v", test.input), func(t *testing.T) { + got, p, ok := ParseTimeDecimal(test.input, test.l, test.prec) + if test.err { + if !got.IsZero() { + assert.Equal(t, test.outPrec, p) + assert.Equal(t, test.output.hour, got.Hour()) + assert.Equal(t, test.output.minute, got.Minute()) + assert.Equal(t, test.output.second, got.Second()) + assert.Equal(t, test.output.nanosecond, got.Nanosecond()) + } + assert.Falsef(t, ok, "did not fail to parse %s", test.input) + return + } + + require.True(t, ok) + assert.Equal(t, test.outPrec, p) + assert.Equal(t, test.output.hour, got.Hour()) + assert.Equal(t, test.output.minute, got.Minute()) + assert.Equal(t, test.output.second, got.Second()) + assert.Equal(t, test.output.nanosecond, got.Nanosecond()) + }) + } +} diff --git a/go/mysql/datetime/strftime_test.go b/go/mysql/datetime/strftime_test.go new file mode 100644 index 00000000000..3798fab61ee --- /dev/null +++ b/go/mysql/datetime/strftime_test.go @@ -0,0 +1,104 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datetime + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNew(t *testing.T) { + in := "%a-%Y" + res, err := New(in) + assert.NoError(t, err) + assert.Equal(t, res.Pattern(), in) + assert.Equal(t, res.compiled[0].format([]byte{}, DateTime{Date: Date{2024, 3, 15}}, 1), []byte("Fri")) + assert.Equal(t, res.compiled[1].format([]byte{}, DateTime{Date: Date{2024, 3, 15}}, 1), []byte("-")) + assert.Equal(t, res.compiled[2].format([]byte{}, DateTime{Date: Date{2024, 3, 15}}, 1), []byte("2024")) + + in = "%" + res, err = New(in) + assert.Nil(t, res) + assert.Error(t, err) + + in = "-" + res, err = New(in) + assert.NoError(t, err) + assert.Equal(t, res.Pattern(), in) + assert.Equal(t, res.compiled[0].format([]byte{}, DateTime{Date: Date{2024, 3, 15}}, 1), []byte("-")) +} + +func TestStrftimeFormat(t *testing.T) { + testCases := []struct { + in string + want_YYYY_MM_DD string + want_YYYY_M_D string + }{ + {"1999-12-31 23:59:58.999", "1999-12-31", "1999-12-31"}, + {"2000-01-02 03:04:05", "2000-01-02", "2000-1-2"}, + {"2001-01-01 01:04:05", "2001-01-01", "2001-1-1"}, + } + + for _, tc := range testCases { + t.Run(tc.in, func(t *testing.T) { + dt, _, ok := ParseDateTime(tc.in, -1) + require.True(t, ok) + + got := Date_YYYY_MM_DD.Format(dt, 6) + assert.Equal(t, []byte(tc.want_YYYY_MM_DD), got) + + got = Date_YYYY_M_D.Format(dt, 6) + assert.Equal(t, []byte(tc.want_YYYY_M_D), got) + + res := Date_YYYY_MM_DD.FormatString(dt, 6) + assert.Equal(t, tc.want_YYYY_MM_DD, res) + + res = Date_YYYY_M_D.FormatString(dt, 6) + assert.Equal(t, tc.want_YYYY_M_D, res) + + dst := []byte("test: ") + b := Date_YYYY_MM_DD.AppendFormat(dst, dt, 6) + want := append([]byte("test: "), []byte(tc.want_YYYY_MM_DD)...) + assert.Equal(t, want, b) + }) + } +} + +func TestFormatNumeric(t *testing.T) { + in := "%Y%h%H%s%d" + res, err := New(in) + require.NoError(t, err) + + testCases := []struct { + dt string + want int64 + }{ + {"1999-12-31 23:59:58.999", 199911235831}, + {"2000-01-02 03:04:05", 200003030502}, + {"2001-01-01 01:04:05", 200101010501}, + } + + for _, tc := range testCases { + dt, _, ok := ParseDateTime(tc.dt, -1) + require.True(t, ok) + + n := res.FormatNumeric(dt) + assert.Equal(t, tc.want, n) + } +} diff --git a/go/mysql/datetime/timeparts.go b/go/mysql/datetime/timeparts.go index a774099a93a..ccc0d0a3640 100644 --- a/go/mysql/datetime/timeparts.go +++ b/go/mysql/datetime/timeparts.go @@ -87,6 +87,6 @@ func (tp *timeparts) isZero() bool { return tp.year == 0 && tp.month == 0 && tp.day == 0 && tp.hour == 0 && tp.min == 0 && tp.sec == 0 && tp.nsec == 0 } -func (tp *timeparts) toSeconds() int { - return tp.day*secondsPerDay + tp.hour*3600 + tp.min*60 + tp.sec +func (tp *timeparts) toDuration() time.Duration { + return time.Duration(tp.day)*durationPerDay + time.Duration(tp.hour)*time.Hour + time.Duration(tp.min)*time.Minute + time.Duration(tp.sec)*time.Second + time.Duration(tp.nsec)*time.Nanosecond } diff --git a/go/mysql/decimal/decimal_test.go b/go/mysql/decimal/decimal_test.go index 09819ddcebb..03619a8f272 100644 --- a/go/mysql/decimal/decimal_test.go +++ b/go/mysql/decimal/decimal_test.go @@ -20,12 +20,14 @@ package decimal import ( "math" - "math/rand" + "math/rand/v2" "reflect" "strconv" "strings" "testing" "testing/quick" + + "github.com/stretchr/testify/assert" ) type testEnt struct { @@ -120,11 +122,8 @@ func TestNewFromFloat(t *testing.T) { for _, x := range testTable { s := x.short d := NewFromFloat(x.float) - if d.String() != s { - t.Errorf("expected %s, got %s (float: %v) (%s, %d)", - s, d.String(), x.float, - d.value.String(), d.exp) - } + assert.Equal(t, s, d.String()) + } shouldPanicOn := []float64{ @@ -143,24 +142,20 @@ func TestNewFromFloat(t *testing.T) { func TestNewFromFloatRandom(t *testing.T) { n := 0 - rng := rand.New(rand.NewSource(0xdead1337)) for { n++ if n == 10 { break } - in := (rng.Float64() - 0.5) * math.MaxFloat64 * 2 + in := (rand.Float64() - 0.5) * math.MaxFloat64 * 2 want, err := NewFromString(strconv.FormatFloat(in, 'f', -1, 64)) if err != nil { t.Error(err) continue } got := NewFromFloat(in) - if !want.Equal(got) { - t.Errorf("in: %v, expected %s (%s, %d), got %s (%s, %d) ", - in, want.String(), want.value.String(), want.exp, - got.String(), got.value.String(), got.exp) - } + assert.True(t, want.Equal(got)) + } } @@ -180,24 +175,20 @@ func TestNewFromFloatQuick(t *testing.T) { func TestNewFromFloat32Random(t *testing.T) { n := 0 - rng := rand.New(rand.NewSource(0xdead1337)) for { n++ if n == 10 { break } - in := float32((rng.Float64() - 0.5) * math.MaxFloat32 * 2) + in := float32((rand.Float64() - 0.5) * math.MaxFloat32 * 2) want, err := NewFromString(strconv.FormatFloat(float64(in), 'f', -1, 32)) if err != nil { t.Error(err) continue } got := NewFromFloat32(in) - if !want.Equal(got) { - t.Errorf("in: %v, expected %s (%s, %d), got %s (%s, %d) ", - in, want.String(), want.value.String(), want.exp, - got.String(), got.value.String(), got.exp) - } + assert.True(t, want.Equal(got)) + } } @@ -317,14 +308,9 @@ func TestNewFromStringErrs(t *testing.T) { for s, o := range tests { out, err := NewFromString(s) + assert.Error(t, err) + assert.Equal(t, o, out.String()) - if err == nil { - t.Errorf("error expected when parsing %s", s) - } - - if out.String() != o { - t.Errorf("expected %s, got %s", o, out.String()) - } } } @@ -353,11 +339,8 @@ func TestNewFromStringDeepEquals(t *testing.T) { if err1 != nil || err2 != nil { t.Errorf("error parsing strings to decimals") } + assert.Equal(t, cmp.expected, reflect.DeepEqual(d1, d2)) - if reflect.DeepEqual(d1, d2) != cmp.expected { - t.Errorf("comparison result is different from expected results for %s and %s", - cmp.str1, cmp.str2) - } } } @@ -413,11 +396,8 @@ func TestNewFromInt(t *testing.T) { for input, s := range tests { d := NewFromInt(input) - if d.String() != s { - t.Errorf("expected %s, got %s (%s, %d)", - s, d.String(), - d.value.String(), d.exp) - } + assert.Equal(t, s, d.String()) + } } @@ -499,20 +479,15 @@ func TestDecimal_RoundAndStringFixed(t *testing.T) { t.Fatal(err) } got := d.Round(test.places) - if !got.Equal(expected) { - t.Errorf("Rounding %s to %d places, got %s, expected %s", - d, test.places, got, expected) - } + assert.True(t, got.Equal(expected)) // test StringFixed if test.expectedFixed == "" { test.expectedFixed = test.expected } gotStr := d.StringFixed(test.places) - if gotStr != test.expectedFixed { - t.Errorf("(%s).StringFixed(%d): got %s, expected %s", - d, test.places, gotStr, test.expectedFixed) - } + assert.Equal(t, test.expectedFixed, gotStr) + } } @@ -541,9 +516,8 @@ func TestDecimal_Add(t *testing.T) { t.FailNow() } c := a.Add(b) - if c.String() != res { - t.Errorf("expected %s, got %s", res, c.String()) - } + assert.Equal(t, res, c.String()) + } } @@ -576,9 +550,8 @@ func TestDecimal_Sub(t *testing.T) { t.FailNow() } c := a.sub(b) - if c.String() != res { - t.Errorf("expected %s, got %s", res, c.String()) - } + assert.Equal(t, res, c.String()) + } } @@ -597,18 +570,16 @@ func TestDecimal_Neg(t *testing.T) { t.FailNow() } b := a.Neg() - if b.String() != res { - t.Errorf("expected %s, got %s", res, b.String()) - } + assert.Equal(t, res, b.String()) + } } func TestDecimal_NegFromEmpty(t *testing.T) { a := Decimal{} b := a.Neg() - if b.String() != "0" { - t.Errorf("expected %s, got %s", "0", b) - } + assert.Equal(t, "0", b.String()) + } func TestDecimal_Mul(t *testing.T) { @@ -635,16 +606,14 @@ func TestDecimal_Mul(t *testing.T) { t.FailNow() } c := a.mul(b) - if c.String() != res { - t.Errorf("expected %s, got %s", res, c.String()) - } + assert.Equal(t, res, c.String()) + } // positive scale c := New(1234, 5).mul(New(45, -1)) - if c.String() != "555300000" { - t.Errorf("Expected %s, got %s", "555300000", c.String()) - } + assert.Equal(t, "555300000", c.String()) + } func TestDecimal_Div(t *testing.T) { @@ -679,14 +648,11 @@ func TestDecimal_Div(t *testing.T) { } got := num.div(denom) expected, _ := NewFromString(expectedStr) - if !got.Equal(expected) { - t.Errorf("expected %v when dividing %v by %v, got %v", - expected, num, denom, got) - } + assert.True(t, got.Equal(expected)) + got2 := num.divRound(denom, int32(divisionPrecision)) - if !got2.Equal(expected) { - t.Errorf("expected %v on divRound (%v,%v), got %v", expected, num, denom, got2) - } + assert.True(t, got2.Equal(expected)) + } type Inp2 struct { @@ -717,10 +683,8 @@ func TestDecimal_Div(t *testing.T) { expected = "-" + expectedAbs } got := num.div(denom) - if got.String() != expected { - t.Errorf("expected %s when dividing %v by %v, got %v", - expected, num, denom, got) - } + assert.Equal(t, expected, got.String()) + } } } @@ -761,14 +725,8 @@ func TestDecimal_QuoRem(t *testing.T) { t.Errorf("bad QuoRem division %s , %s , %d got %v, %v expected %s , %s", inp4.d, inp4.d2, prec, q, r, inp4.q, inp4.r) } - if !d.Equal(d2.mul(q).Add(r)) { - t.Errorf("not fitting: d=%v, d2= %v, prec=%d, q=%v, r=%v", - d, d2, prec, q, r) - } - if !q.Equal(q.Truncate(prec)) { - t.Errorf("quotient wrong precision: d=%v, d2= %v, prec=%d, q=%v, r=%v", - d, d2, prec, q, r) - } + assert.True(t, d.Equal(d2.mul(q).Add(r))) + assert.True(t, q.Equal(q.Truncate(prec))) if r.Abs().Cmp(d2.Abs().mul(New(1, -prec))) >= 0 { t.Errorf("remainder too large: d=%v, d2= %v, prec=%d, q=%v, r=%v", d, d2, prec, q, r) @@ -823,15 +781,10 @@ func TestDecimal_QuoRem2(t *testing.T) { prec := tc.prec q, r := d.QuoRem(d2, prec) // rule 1: d = d2*q +r - if !d.Equal(d2.mul(q).Add(r)) { - t.Errorf("not fitting, d=%v, d2=%v, prec=%d, q=%v, r=%v", - d, d2, prec, q, r) - } + assert.True(t, d.Equal(d2.mul(q).Add(r))) // rule 2: q is integral multiple of 10^(-prec) - if !q.Equal(q.Truncate(prec)) { - t.Errorf("quotient wrong precision, d=%v, d2=%v, prec=%d, q=%v, r=%v", - d, d2, prec, q, r) - } + assert.True(t, q.Equal(q.Truncate(prec))) + // rule 3: abs(r)= 0 { t.Errorf("remainder too large, d=%v, d2=%v, prec=%d, q=%v, r=%v", @@ -894,9 +847,8 @@ func TestDecimal_DivRound(t *testing.T) { if x.Cmp(d2.Abs().mul(New(-1, -prec))) <= 0 { t.Errorf("wrong rounding, got: %v/%v prec=%d is about %v", d, d2, prec, q) } - if !q.Equal(result) { - t.Errorf("rounded division wrong %s / %s scale %d = %s, got %v", s.d, s.d2, prec, s.result, q) - } + assert.True(t, q.Equal(result)) + } } @@ -953,9 +905,8 @@ func TestDecimal_Mod(t *testing.T) { t.FailNow() } c := a.mod(b) - if c.String() != res { - t.Errorf("expected %s, got %s", res, c.String()) - } + assert.Equal(t, res, c.String()) + } } @@ -972,9 +923,8 @@ func TestDecimal_Overflow(t *testing.T) { func TestDecimal_Scale(t *testing.T) { a := New(1234, -3) - if a.Exponent() != -3 { - t.Errorf("error") - } + assert.EqualValues(t, -3, a.Exponent()) + } func TestDecimal_Abs1(t *testing.T) { @@ -982,9 +932,8 @@ func TestDecimal_Abs1(t *testing.T) { b := New(1234, -4) c := a.Abs() - if c.Cmp(b) != 0 { - t.Errorf("error") - } + assert.Zero(t, c.Cmp(b)) + } func TestDecimal_Abs2(t *testing.T) { @@ -992,9 +941,8 @@ func TestDecimal_Abs2(t *testing.T) { b := New(1234, -4) c := b.Abs() - if c.Cmp(a) == 0 { - t.Errorf("error") - } + assert.NotZero(t, c.Cmp(a)) + } func TestDecimal_ScalesNotEqual(t *testing.T) { @@ -1008,19 +956,60 @@ func TestDecimal_ScalesNotEqual(t *testing.T) { func TestDecimal_Cmp1(t *testing.T) { a := New(123, 3) b := New(-1234, 2) + assert.Equal(t, 1, a.Cmp(b)) +} - if a.Cmp(b) != 1 { - t.Errorf("Error") +func TestSizeAndScaleFromString(t *testing.T) { + testcases := []struct { + value string + sizeExpected int32 + scaleExpected int32 + }{ + { + value: "0.00003", + sizeExpected: 6, + scaleExpected: 5, + }, + { + value: "-0.00003", + sizeExpected: 6, + scaleExpected: 5, + }, + { + value: "12.00003", + sizeExpected: 7, + scaleExpected: 5, + }, + { + value: "-12.00003", + sizeExpected: 7, + scaleExpected: 5, + }, + { + value: "1000003", + sizeExpected: 7, + scaleExpected: 0, + }, + { + value: "-1000003", + sizeExpected: 7, + scaleExpected: 0, + }, + } + for _, testcase := range testcases { + t.Run(testcase.value, func(t *testing.T) { + siz, scale := SizeAndScaleFromString(testcase.value) + assert.EqualValues(t, testcase.sizeExpected, siz) + assert.EqualValues(t, testcase.scaleExpected, scale) + }) } } func TestDecimal_Cmp2(t *testing.T) { a := New(123, 3) b := New(1234, 2) + assert.Equal(t, -1, a.Cmp(b)) - if a.Cmp(b) != -1 { - t.Errorf("Error") - } } func TestDecimal_IsInteger(t *testing.T) { @@ -1045,26 +1034,20 @@ func TestDecimal_IsInteger(t *testing.T) { if err != nil { t.Fatal(err) } - if d.isInteger() != testCase.IsInteger { - t.Errorf("expect %t, got %t, for %s", testCase.IsInteger, d.isInteger(), testCase.Dec) - } + assert.Equal(t, testCase.IsInteger, d.isInteger()) + } } func TestDecimal_Sign(t *testing.T) { - if Zero.Sign() != 0 { - t.Errorf("%q should have sign 0", Zero) - } + assert.Zero(t, Zero.Sign()) one := New(1, 0) - if one.Sign() != 1 { - t.Errorf("%q should have sign 1", one) - } + assert.Equal(t, 1, one.Sign()) mone := New(-1, 0) - if mone.Sign() != -1 { - t.Errorf("%q should have sign -1", mone) - } + assert.Equal(t, -1, mone.Sign()) + } func didPanic(f func()) bool { diff --git a/go/mysql/decimal/mysql_test.go b/go/mysql/decimal/mysql_test.go index 1668c4377db..d1b0c52169b 100644 --- a/go/mysql/decimal/mysql_test.go +++ b/go/mysql/decimal/mysql_test.go @@ -21,13 +21,12 @@ import ( "encoding/json" "math" "math/big" - "math/rand" + "math/rand/v2" "os" "path" "strconv" "strings" "testing" - "time" ) func TestDecimalAdd(t *testing.T) { @@ -366,10 +365,8 @@ func TestRoundtripStress(t *testing.T) { count = 100 } - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - for n := 0; n < count; n++ { - fb := strconv.AppendFloat(nil, rng.NormFloat64(), 'f', -1, 64) + fb := strconv.AppendFloat(nil, rand.NormFloat64(), 'f', -1, 64) d, err := NewFromMySQL(fb) if err != nil { t.Fatalf("failed to parse %q: %v", fb, err) @@ -383,10 +380,9 @@ func TestRoundtripStress(t *testing.T) { func BenchmarkFormatting(b *testing.B) { const Count = 10000 - var rng = rand.New(rand.NewSource(time.Now().UnixNano())) var parsed = make([]Decimal, 0, Count) for i := 0; i < Count; i++ { - parsed = append(parsed, NewFromFloat(rng.NormFloat64())) + parsed = append(parsed, NewFromFloat(rand.NormFloat64())) } b.Run("StringFixed(8)", func(b *testing.B) { diff --git a/go/mysql/decimal/scan.go b/go/mysql/decimal/scan.go index 761eea5cdcf..c56fc185287 100644 --- a/go/mysql/decimal/scan.go +++ b/go/mysql/decimal/scan.go @@ -23,6 +23,7 @@ import ( "math" "math/big" "math/bits" + "strings" "vitess.io/vitess/go/mysql/fastparse" ) @@ -71,6 +72,20 @@ func parseDecimal64(s []byte) (Decimal, error) { }, nil } +// SizeAndScaleFromString gets the size and scale for the decimal value without needing to parse it. +func SizeAndScaleFromString(s string) (int32, int32) { + switch s[0] { + case '+', '-': + s = s[1:] + } + totalLen := len(s) + idx := strings.Index(s, ".") + if idx == -1 { + return int32(totalLen), 0 + } + return int32(totalLen - 1), int32(totalLen - 1 - idx) +} + func NewFromMySQL(s []byte) (Decimal, error) { var original = s var neg bool @@ -311,17 +326,12 @@ func pow(x big.Word, n int) (p big.Word) { } func parseLargeDecimal(integral, fractional []byte) (*big.Int, error) { - const ( - b1 = big.Word(10) - bn = big.Word(1e19) - n = 19 - ) var ( di = big.Word(0) // 0 <= di < b1**i < bn i = 0 // 0 <= i < n - // 5 is the largest possible size for a MySQL decimal; anything - // that doesn't fit in 5 words won't make it to this func - z = make([]big.Word, 0, 5) + // s is the largest possible size for a MySQL decimal; anything + // that doesn't fit in s words won't make it to this func + z = make([]big.Word, 0, s) ) parseChunk := func(partial []byte) error { diff --git a/go/mysql/decimal/scan_32.go b/go/mysql/decimal/scan_32.go new file mode 100644 index 00000000000..c0417a1ffce --- /dev/null +++ b/go/mysql/decimal/scan_32.go @@ -0,0 +1,28 @@ +//go:build 386 || arm || mips || mipsle + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package decimal + +import "math/big" + +const ( + b1 = big.Word(10) + bn = big.Word(1e9) + n = 9 + s = 10 +) diff --git a/go/mysql/decimal/scan_64.go b/go/mysql/decimal/scan_64.go new file mode 100644 index 00000000000..55b3f77c2bd --- /dev/null +++ b/go/mysql/decimal/scan_64.go @@ -0,0 +1,28 @@ +//go:build !386 && !arm && !mips && !mipsle + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package decimal + +import "math/big" + +const ( + b1 = big.Word(10) + bn = big.Word(1e19) + n = 19 + s = 5 +) diff --git a/go/mysql/endtoend/client_test.go b/go/mysql/endtoend/client_test.go index 65d20c11801..964a8702471 100644 --- a/go/mysql/endtoend/client_test.go +++ b/go/mysql/endtoend/client_test.go @@ -210,7 +210,11 @@ func doTestMultiResult(t *testing.T, disableClientDeprecateEOF bool) { assert.EqualValues(t, 1, result.RowsAffected, "insert into returned RowsAffected") } - qr, more, err = conn.ExecuteFetchMulti("update a set name = concat(name, ' updated'); select * from a; select count(*) from a", 300, true) + // Verify that a ExecuteFetchMultiDrain leaves the connection/packet in valid state. + err = conn.ExecuteFetchMultiDrain("update a set name = concat(name, ', multi drain 1'); select * from a; select count(*) from a") + expectNoError(t, err) + // If the previous command leaves packet in invalid state, this will fail. + qr, more, err = conn.ExecuteFetchMulti("update a set name = concat(name, ', fetch multi'); select * from a; select count(*) from a", 300, true) expectNoError(t, err) expectFlag(t, "ExecuteMultiFetch(multi result)", more, true) assert.EqualValues(t, 255, qr.RowsAffected) @@ -225,6 +229,13 @@ func doTestMultiResult(t *testing.T, disableClientDeprecateEOF bool) { expectFlag(t, "ReadQueryResult(2)", more, false) assert.EqualValues(t, 1, len(qr.Rows), "ReadQueryResult(1)") + // Verify that a ExecuteFetchMultiDrain is happy to operate again after all the above. + err = conn.ExecuteFetchMultiDrain("update a set name = concat(name, ', multi drain 2'); select * from a; select count(*) from a") + expectNoError(t, err) + + err = conn.ExecuteFetchMultiDrain("update b set name = concat(name, ' nonexistent table'); select * from a; select count(*) from a") + require.Error(t, err) + _, err = conn.ExecuteFetch("drop table a", 10, true) require.NoError(t, err) } diff --git a/go/mysql/endtoend/query_test.go b/go/mysql/endtoend/query_test.go index 576960f2acb..3436d045071 100644 --- a/go/mysql/endtoend/query_test.go +++ b/go/mysql/endtoend/query_test.go @@ -19,7 +19,7 @@ package endtoend import ( "context" "fmt" - "math/rand" + "math/rand/v2" "strings" "testing" @@ -151,7 +151,7 @@ func TestLargeQueries(t *testing.T) { randString := func(n int) string { b := make([]byte, n) for i := range b { - b[i] = letterBytes[rand.Intn(len(letterBytes))] + b[i] = letterBytes[rand.IntN(len(letterBytes))] } return string(b) } @@ -322,6 +322,6 @@ func TestSysInfo(t *testing.T) { } func getDefaultCollationID() collations.ID { - collationHandler := collations.Local() + collationHandler := collations.MySQL8() return collationHandler.DefaultCollationForCharset(charsetName) } diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go index 0c1fa006347..d3b9a6722ea 100644 --- a/go/mysql/endtoend/replication_test.go +++ b/go/mysql/endtoend/replication_test.go @@ -29,6 +29,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" @@ -46,15 +47,6 @@ func connectForReplication(t *testing.T, rbr bool) (*mysql.Conn, mysql.BinlogFor t.Fatal(err) } - // We need to know if this is MariaDB, to set the right flag. - if conn.IsMariaDB() { - // This flag is required to get GTIDs from MariaDB. - t.Log("MariaDB: sensing SET @mariadb_slave_capability=4") - if _, err := conn.ExecuteFetch("SET @mariadb_slave_capability=4", 0, false); err != nil { - t.Fatalf("failed to set @mariadb_slave_capability=4: %v", err) - } - } - // Switch server to RBR if needed. if rbr { if _, err := conn.ExecuteFetch("SET GLOBAL binlog_format='ROW'", 0, false); err != nil { @@ -63,25 +55,21 @@ func connectForReplication(t *testing.T, rbr bool) (*mysql.Conn, mysql.BinlogFor } // First we get the current binlog position. - result, err := conn.ExecuteFetch("SHOW MASTER STATUS", 1, true) - require.NoError(t, err, "SHOW MASTER STATUS failed: %v", err) + status, err := conn.ShowPrimaryStatus() + require.NoError(t, err, "retrieving primary status failed: %v", err) - if len(result.Fields) < 2 || result.Fields[0].Name != "File" || result.Fields[1].Name != "Position" || - len(result.Rows) != 1 { - t.Fatalf("SHOW MASTER STATUS returned unexpected result: %v", result) - } - file := result.Rows[0][0].ToString() - position, err := result.Rows[0][1].ToCastUint64() - require.NoError(t, err, "SHOW MASTER STATUS returned invalid position: %v", result.Rows[0][1]) + filePos := status.FilePosition.GTIDSet.(replication.FilePosGTID) + file := filePos.File + position := filePos.Pos // Tell the server that we understand the format of events // that will be used if binlog_checksum is enabled on the server. - if _, err := conn.ExecuteFetch("SET @master_binlog_checksum=@@global.binlog_checksum", 0, false); err != nil { - t.Fatalf("failed to set @master_binlog_checksum=@@global.binlog_checksum: %v", err) + if _, err := conn.ExecuteFetch("SET @source_binlog_checksum = @@global.binlog_checksum, @master_binlog_checksum=@@global.binlog_checksum", 0, false); err != nil { + t.Fatalf("failed to set @source_binlog_checksum=@@global.binlog_checksum: %v", err) } // Write ComBinlogDump packet with to start streaming events from here. - if err := conn.WriteComBinlogDump(1, file, uint32(position), 0); err != nil { + if err := conn.WriteComBinlogDump(1, file, position, 0); err != nil { t.Fatalf("WriteComBinlogDump failed: %v", err) } @@ -908,6 +896,10 @@ func TestRowReplicationTypes(t *testing.T) { t.Fatal(err) } defer dConn.Close() + // We have tests for zero dates, so we need to allow that for this session. + if _, err := dConn.ExecuteFetch("SET @@session.sql_mode=REPLACE(REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', ''), 'NO_ZERO_IN_DATE', '')", 0, false); err != nil { + t.Fatal(err) + } // Set the connection time zone for execution of the // statements to PST. That way we're sure to test the diff --git a/go/mysql/endtoend/schema_change_test.go b/go/mysql/endtoend/schema_change_test.go deleted file mode 100644 index a9e72aaef5b..00000000000 --- a/go/mysql/endtoend/schema_change_test.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package endtoend - -import ( - "context" - "fmt" - "strings" - "testing" - - "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/vt/sqlparser" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql" -) - -var ctx = context.Background() - -const ( - createUserTable = `create table vttest.product (id bigint(20) primary key, name char(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci, created bigint(20))` - dropTestTable = `drop table if exists product` -) - -func TestChangeSchemaIsNoticed(t *testing.T) { - conn, err := mysql.Connect(ctx, &connParams) - require.NoError(t, err) - defer conn.Close() - - clearQuery := sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query - insertQuery := sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query - detectQuery := sqlparser.BuildParsedQuery(mysql.DetectSchemaChange, sidecar.GetIdentifier()).Query - - tests := []struct { - name string - changeQ string - }{{ - name: "add column", - changeQ: "alter table vttest.product add column phone VARCHAR(15)", - }, { - name: "rename column", - changeQ: "alter table vttest.product change name firstname char(10)", - }, { - name: "change column type", - changeQ: "alter table vttest.product change name name char(100)", - }, { - name: "remove column", - changeQ: "alter table vttest.product drop column name", - }, { - name: "remove last column", - changeQ: "alter table vttest.product drop column created", - }, { - name: "remove table", - changeQ: "drop table product", - }, { - name: "create table", - changeQ: `create table vttest.new_table (id bigint(20) primary key)`, - }, { - name: "change character set", - changeQ: "alter table vttest.product change name name char(10) CHARACTER SET utf8mb4", - }, { - name: "change collation", - changeQ: "alter table vttest.product change name name char(10) COLLATE utf8_unicode_520_ci", - }, { - name: "drop PK", - changeQ: "alter table vttest.product drop primary key", - }, { - name: "change PK", - changeQ: "alter table vttest.product drop primary key, add primary key (name)", - }, { - name: "two tables changes", - changeQ: "create table vttest.new_table2 (id bigint(20) primary key);alter table vttest.product drop column name", - }} - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - // reset schemacopy - _, err := conn.ExecuteFetch(clearQuery, 1000, true) - require.NoError(t, err) - _, err = conn.ExecuteFetch(dropTestTable, 1000, true) - require.NoError(t, err) - _, err = conn.ExecuteFetch(createUserTable, 1000, true) - require.NoError(t, err) - rs, err := conn.ExecuteFetch(insertQuery, 1000, true) - require.NoError(t, err) - require.NotZero(t, rs.RowsAffected) - - // make sure no changes are detected - rs, err = conn.ExecuteFetch(detectQuery, 1000, true) - require.NoError(t, err) - require.Empty(t, rs.Rows) - - for _, q := range strings.Split(test.changeQ, ";") { - // make the schema change - _, err = conn.ExecuteFetch(q, 1000, true) - require.NoError(t, err) - } - - // make sure the change is detected - rs, err = conn.ExecuteFetch(detectQuery, 1000, true) - require.NoError(t, err) - require.NotEmpty(t, rs.Rows) - - var tables []string - for _, row := range rs.Rows { - apa := sqlparser.NewStrLiteral(row[0].ToString()) - tables = append(tables, "table_name = "+sqlparser.String(apa)) - } - tableNamePredicates := strings.Join(tables, " OR ") - del := fmt.Sprintf("%s AND %s", clearQuery, tableNamePredicates) - upd := fmt.Sprintf("%s AND %s", insertQuery, tableNamePredicates) - - _, err = conn.ExecuteFetch(del, 1000, true) - require.NoError(t, err) - _, err = conn.ExecuteFetch(upd, 1000, true) - require.NoError(t, err) - - // make sure the change is detected - rs, err = conn.ExecuteFetch(detectQuery, 1000, true) - require.NoError(t, err) - require.Empty(t, rs.Rows) - }) - } -} diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go index cb3d20ae04b..33512f23514 100644 --- a/go/mysql/fakesqldb/server.go +++ b/go/mysql/fakesqldb/server.go @@ -29,19 +29,20 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/vt/sqlparser" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" - "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" ) -const appendEntry = -1 +const ( + appendEntry = -1 + useQuery = "use `fakesqldb`" +) // DB is a fake database and all its methods are thread safe. It // creates a mysql.Listener and implements the mysql.Handler @@ -129,6 +130,8 @@ type DB struct { // lastError stores the last error in returning a query result. lastErrorMu sync.Mutex lastError error + + env *vtenv.Environment } // QueryHandler is the interface used by the DB to simulate executed queries @@ -182,6 +185,7 @@ func New(t testing.TB) *DB { queryPatternUserCallback: make(map[*regexp.Regexp]func(string)), patternData: make(map[string]exprResult), lastErrorMu: sync.Mutex{}, + env: vtenv.NewTestEnv(), } db.Handler = db @@ -189,7 +193,7 @@ func New(t testing.TB) *DB { authServer := mysql.NewAuthServerNone() // Start listening. - db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false, 0) + db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false, 0, 0) if err != nil { t.Fatalf("NewListener failed: %v", err) } @@ -200,7 +204,7 @@ func New(t testing.TB) *DB { db.listener.Accept() }() - db.AddQuery("use `fakesqldb`", &sqltypes.Result{}) + db.AddQuery(useQuery, &sqltypes.Result{}) // Return the db. return db } @@ -291,23 +295,23 @@ func (db *DB) WaitForClose(timeout time.Duration) error { } // ConnParams returns the ConnParams to connect to the DB. -func (db *DB) ConnParams() dbconfigs.Connector { - return dbconfigs.New(&mysql.ConnParams{ +func (db *DB) ConnParams() *mysql.ConnParams { + return &mysql.ConnParams{ UnixSocket: db.socketFile, Uname: "user1", Pass: "password1", DbName: "fakesqldb", - }) + } } // ConnParamsWithUname returns ConnParams to connect to the DB with the Uname set to the provided value. -func (db *DB) ConnParamsWithUname(uname string) dbconfigs.Connector { - return dbconfigs.New(&mysql.ConnParams{ +func (db *DB) ConnParamsWithUname(uname string) *mysql.ConnParams { + return &mysql.ConnParams{ UnixSocket: db.socketFile, Uname: uname, Pass: "password1", DbName: "fakesqldb", - }) + } } // @@ -376,11 +380,11 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R } key := strings.ToLower(query) db.mu.Lock() - defer db.mu.Unlock() db.queryCalled[key]++ db.querylog = append(db.querylog, key) // Check if we should close the connection and provoke errno 2013. if db.shouldClose.Load() { + defer db.mu.Unlock() c.Close() // log error @@ -394,6 +398,8 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R // The driver may send this at connection time, and we don't want it to // interfere. if key == "set names utf8" || strings.HasPrefix(key, "set collation_connection = ") { + defer db.mu.Unlock() + // log error if err := callback(&sqltypes.Result{}); err != nil { log.Errorf("callback failed : %v", err) @@ -403,12 +409,14 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R // check if we should reject it. if err, ok := db.rejectedData[key]; ok { + db.mu.Unlock() return err } // Check explicit queries from AddQuery(). result, ok := db.data[key] if ok { + db.mu.Unlock() if f := result.BeforeFunc; f != nil { f() } @@ -419,6 +427,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R for _, pat := range db.patternData { if pat.expr.MatchString(query) { userCallback, ok := db.queryPatternUserCallback[pat.expr] + db.mu.Unlock() if ok { userCallback(query) } @@ -429,13 +438,16 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R } } + defer db.mu.Unlock() + if db.neverFail.Load() { return callback(&sqltypes.Result{}) } // Nothing matched. + parser := sqlparser.NewTestParser() err = fmt.Errorf("fakesqldb:: query: '%s' is not supported on %v", - sqlparser.TruncateForUI(query), db.name) - log.Errorf("Query not found: %s", sqlparser.TruncateForUI(query)) + parser.TruncateForUI(query), db.name) + log.Errorf("Query not found: %s", parser.TruncateForUI(query)) return err } @@ -590,6 +602,8 @@ func (db *DB) RejectQueryPattern(queryPattern, error string) { // ClearQueryPattern removes all query patterns set up func (db *DB) ClearQueryPattern() { + db.mu.Lock() + defer db.mu.Unlock() db.patternData = make(map[string]exprResult) } @@ -609,6 +623,17 @@ func (db *DB) DeleteQuery(query string) { delete(db.queryCalled, key) } +// DeleteAllQueries deletes all expected queries from the fake DB. +func (db *DB) DeleteAllQueries() { + db.mu.Lock() + defer db.mu.Unlock() + clear(db.data) + clear(db.patternData) + clear(db.queryCalled) + // Use is always expected to be present. + db.data[useQuery] = &ExpectedResult{&sqltypes.Result{}, nil} +} + // AddRejectedQuery adds a query which will be rejected at execution time. func (db *DB) AddRejectedQuery(query string, err error) { db.mu.Lock() @@ -839,3 +864,7 @@ func (db *DB) GetQueryPatternResult(key string) (func(string), ExpectedResult, b return nil, ExpectedResult{nil, nil}, false, nil } + +func (db *DB) Env() *vtenv.Environment { + return db.env +} diff --git a/go/mysql/fastparse/fastparse.go b/go/mysql/fastparse/fastparse.go index f9aca692abd..a669a584d72 100644 --- a/go/mysql/fastparse/fastparse.go +++ b/go/mysql/fastparse/fastparse.go @@ -26,11 +26,19 @@ import ( "vitess.io/vitess/go/hack" ) +func ParseUint64(s string, base int) (uint64, error) { + return parseUint64(s, base, false) +} + +func ParseUint64WithNeg(s string, base int) (uint64, error) { + return parseUint64(s, base, true) +} + // ParseUint64 parses uint64 from s. // // It is equivalent to strconv.ParseUint(s, base, 64) in case it succeeds, // but on error it will return the best effort value of what it has parsed so far. -func ParseUint64(s string, base int) (uint64, error) { +func parseUint64(s string, base int, allowNeg bool) (uint64, error) { if len(s) == 0 { return 0, fmt.Errorf("cannot parse uint64 from empty string") } @@ -45,6 +53,22 @@ func ParseUint64(s string, base int) (uint64, error) { i++ } + if i >= uint(len(s)) { + return 0, fmt.Errorf("cannot parse uint64 from %q", s) + } + // For some reason, MySQL parses things as uint64 even with + // a negative sign and then turns it into the 2s complement value. + minus := s[i] == '-' + if minus { + if !allowNeg { + return 0, fmt.Errorf("cannot parse uint64 from %q", s) + } + i++ + if i >= uint(len(s)) { + return 0, fmt.Errorf("cannot parse uint64 from %q", s) + } + } + d := uint64(0) j := i next: @@ -75,17 +99,23 @@ next: cutoff = math.MaxUint64/uint64(base) + 1 } if d >= cutoff { + if minus { + return 0, fmt.Errorf("cannot parse uint64 from %q: %w", s, ErrOverflow) + } return math.MaxUint64, fmt.Errorf("cannot parse uint64 from %q: %w", s, ErrOverflow) } v := d*uint64(base) + uint64(b) if v < d { + if minus { + return 0, fmt.Errorf("cannot parse uint64 from %q: %w", s, ErrOverflow) + } return math.MaxUint64, fmt.Errorf("cannot parse uint64 from %q: %w", s, ErrOverflow) } d = v i++ } if i <= j { - return d, fmt.Errorf("cannot parse uint64 from %q", s) + return uValue(d, minus), fmt.Errorf("cannot parse uint64 from %q", s) } for i < uint(len(s)) { @@ -97,9 +127,9 @@ next: if i < uint(len(s)) { // Unparsed tail left. - return d, fmt.Errorf("unparsed tail left after parsing uint64 from %q: %q", s, s[i:]) + return uValue(d, minus), fmt.Errorf("unparsed tail left after parsing uint64 from %q: %q", s, s[i:]) } - return d, nil + return uValue(d, minus), nil } var ErrOverflow = errors.New("overflow") @@ -123,6 +153,9 @@ func ParseInt64(s string, base int) (int64, error) { i++ } + if i >= uint(len(s)) { + return 0, fmt.Errorf("cannot parse int64 from %q", s) + } minus := s[i] == '-' if minus { i++ @@ -160,21 +193,15 @@ next: default: cutoff = math.MaxInt64/uint64(base) + 1 } - if d >= cutoff { - if minus { - return math.MinInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) - } + if !minus && d >= cutoff { return math.MaxInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) } - v := d*uint64(base) + uint64(b) - if v < d { - if minus { - return math.MinInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) - } - return math.MaxInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) + if minus && d > cutoff { + return math.MinInt64, fmt.Errorf("cannot parse int64 from %q: %w", s, ErrOverflow) } - d = v + + d = d*uint64(base) + uint64(b) i++ } @@ -264,3 +291,10 @@ func isSpace(c byte) bool { return false } } + +func uValue(v uint64, neg bool) uint64 { + if neg { + return -v + } + return v +} diff --git a/go/mysql/fastparse/fastparse_test.go b/go/mysql/fastparse/fastparse_test.go index bec312b0bb5..5ee87a617d1 100644 --- a/go/mysql/fastparse/fastparse_test.go +++ b/go/mysql/fastparse/fastparse_test.go @@ -17,6 +17,8 @@ package fastparse import ( "math" + "math/big" + "strconv" "testing" "github.com/stretchr/testify/require" @@ -190,6 +192,48 @@ func TestParseInt64(t *testing.T) { expected: 42, err: `unparsed tail left after parsing int64 from "\t 42 \n": "\n"`, }, + { + input: "", + base: 10, + expected: 0, + err: `cannot parse int64 from empty string`, + }, + { + input: "256", + base: 1, + expected: 0, + err: `invalid base 1; must be in [2, 36]`, + }, + { + input: "256", + base: 37, + expected: 0, + err: `invalid base 37; must be in [2, 36]`, + }, + { + input: " -", + base: 10, + expected: 0, + err: `cannot parse int64 from " -"`, + }, + { + input: "-18446744073709551615", + base: 10, + expected: -9223372036854775808, + err: `cannot parse int64 from "-18446744073709551615": overflow`, + }, + { + input: " ", + base: 10, + expected: 0, + err: `cannot parse int64 from " "`, + }, + { + input: " :", + base: 10, + expected: 0, + err: `cannot parse int64 from " :"`, + }, } for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { @@ -205,6 +249,69 @@ func TestParseInt64(t *testing.T) { } } +func TestParseEdgeInt64(t *testing.T) { + for i := int64(math.MinInt64); i < math.MinInt64+1000; i++ { + for base := 2; base <= 36; base++ { + val, err := ParseInt64(strconv.FormatInt(i, base), base) + require.NoError(t, err, "base %d", base) + require.Equal(t, int64(i), val) + } + } + for i := int64(math.MaxInt64 - 1000); i < math.MaxInt64; i++ { + for base := 2; base <= 36; base++ { + val, err := ParseInt64(strconv.FormatInt(i, base), base) + require.NoError(t, err) + require.NoError(t, err, "base %d", base) + require.Equal(t, int64(i), val) + } + } +} + +func TestParseOverflowInt64(t *testing.T) { + for i := int64(1); i <= 1000; i++ { + b := big.NewInt(math.MinInt64) + b.Sub(b, big.NewInt(i)) + for base := 2; base <= 36; base++ { + val, err := ParseInt64(b.Text(base), base) + require.Error(t, err) + require.Equal(t, int64(math.MinInt64), val) + } + } + + for i := int64(1); i <= 1000; i++ { + b := big.NewInt(math.MaxInt64) + b.Add(b, big.NewInt(i)) + for base := 2; base <= 36; base++ { + val, err := ParseInt64(b.Text(base), base) + require.Error(t, err) + require.Equal(t, int64(math.MaxInt64), val) + } + } +} + +func TestParseEdgeUint64(t *testing.T) { + for i := uint64(math.MaxUint64 - 1000); i < math.MaxUint64; i++ { + for base := 2; base <= 36; base++ { + val, err := ParseUint64(strconv.FormatUint(i, base), base) + require.NoError(t, err, "base %d", base) + require.Equal(t, uint64(i), val) + } + } +} + +func TestParseOverflowUint64(t *testing.T) { + var b big.Int + for i := int64(1); i <= 1000; i++ { + b.SetUint64(math.MaxUint64) + b.Add(&b, big.NewInt(i)) + for base := 2; base <= 36; base++ { + val, err := ParseUint64(b.Text(base), base) + require.Error(t, err) + require.Equal(t, uint64(math.MaxUint64), val) + } + } +} + func TestParseUint64(t *testing.T) { testcases := []struct { input string @@ -227,6 +334,17 @@ func TestParseUint64(t *testing.T) { base: 2, expected: 1, }, + { + input: "-", + base: 10, + expected: 0, + err: `cannot parse uint64 from "-"`, + }, + { + input: "-1", + base: 10, + err: `cannot parse uint64 from "-1"`, + }, { input: "10", base: 2, @@ -326,6 +444,36 @@ func TestParseUint64(t *testing.T) { expected: 42, err: `unparsed tail left after parsing uint64 from "\t 42 \n": "\n"`, }, + { + input: "", + base: 10, + expected: 0, + err: `cannot parse uint64 from empty string`, + }, + { + input: "256", + base: 1, + expected: 0, + err: `invalid base 1; must be in [2, 36]`, + }, + { + input: "256", + base: 37, + expected: 0, + err: `invalid base 37; must be in [2, 36]`, + }, + { + input: " ", + base: 10, + expected: 0, + err: `cannot parse uint64 from " "`, + }, + { + input: " :", + base: 10, + expected: 0, + err: `cannot parse uint64 from " :"`, + }, } for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { @@ -341,6 +489,61 @@ func TestParseUint64(t *testing.T) { } } +func TestParseUint64WithNeg(t *testing.T) { + testcases := []struct { + input string + base int + expected uint64 + err string + }{ + { + input: "-", + base: 10, + expected: 0, + err: `cannot parse uint64 from "-"`, + }, + { + input: "-1", + base: 10, + expected: 18446744073709551615, + }, + { + input: "-9223372036854775808", + base: 10, + expected: 9223372036854775808, + }, + { + input: "-9223372036854775809", + base: 10, + expected: 9223372036854775807, + }, + { + input: "-18446744073709551616", + base: 10, + expected: 0, + err: `cannot parse uint64 from "-18446744073709551616": overflow`, + }, + { + input: "-31415926535897932384", + base: 10, + expected: 0, + err: `cannot parse uint64 from "-31415926535897932384": overflow`, + }, + } + for _, tc := range testcases { + t.Run(tc.input, func(t *testing.T) { + val, err := ParseUint64WithNeg(tc.input, tc.base) + if tc.err == "" { + require.NoError(t, err) + require.Equal(t, tc.expected, val) + } else { + require.Equal(t, tc.expected, val) + require.EqualError(t, err, tc.err) + } + }) + } +} + func TestParseFloat64(t *testing.T) { testcases := []struct { input string diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index 7cfa4d8c37e..24de3d3c9a5 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -23,9 +23,11 @@ import ( "strconv" "strings" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/proto/replicationdata" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -39,41 +41,22 @@ var ( ErrNoPrimaryStatus = errors.New("no master status") ) -type FlavorCapability int - -const ( - NoneFlavorCapability FlavorCapability = iota // default placeholder - FastDropTableFlavorCapability // supported in MySQL 8.0.23 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-23.html - TransactionalGtidExecutedFlavorCapability - InstantDDLFlavorCapability - InstantAddLastColumnFlavorCapability - InstantAddDropVirtualColumnFlavorCapability - InstantAddDropColumnFlavorCapability - InstantChangeColumnDefaultFlavorCapability - InstantExpandEnumCapability - MySQLJSONFlavorCapability - MySQLUpgradeInServerFlavorCapability - DynamicRedoLogCapacityFlavorCapability // supported in MySQL 8.0.30 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-30.html - DisableRedoLogFlavorCapability // supported in MySQL 8.0.21 and above: https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-21.html -) - const ( // mariaDBReplicationHackPrefix is the prefix of a version for MariaDB 10.0 // versions, to work around replication bugs. mariaDBReplicationHackPrefix = "5.5.5-" // mariaDBVersionString is present in mariaDBVersionString = "MariaDB" - // mysql57VersionPrefix is the prefix for 5.7 mysql version, such as 5.7.31-log - mysql57VersionPrefix = "5.7." - // mysql80VersionPrefix is the prefix for 8.0 mysql version, such as 8.0.19 - mysql80VersionPrefix = "8.0." + // mysql8VersionPrefix is the prefix for 8.x mysql version, such as 8.0.19, + // but also newer ones like 8.4.0. + mysql8VersionPrefix = "8." ) // flavor is the abstract interface for a flavor. // Flavors are auto-detected upon connection using the server version. // We have two major implementations (the main difference is the GTID // handling): -// 1. Oracle MySQL 5.6, 5.7, 8.0, ... +// 1. Oracle MySQL 5.7, 8.0, ... // 2. MariaDB 10.X type flavor interface { // primaryGTIDSet returns the current GTIDSet of a server. @@ -105,6 +88,9 @@ type flavor interface { // stopReplicationCommand returns the command to stop the replication. stopReplicationCommand() string + // resetReplicationCommand returns the command to reset the replication. + resetReplicationCommand() string + // stopIOThreadCommand returns the command to stop the replica's IO thread only. stopIOThreadCommand() string @@ -133,59 +119,46 @@ type flavor interface { // replication position at which the replica will resume. setReplicationPositionCommands(pos replication.Position) []string - // changeReplicationSourceArg returns the specific parameter to add to - // a "change primary" command. - changeReplicationSourceArg() string + // setReplicationSourceCommand returns the command to use the provided host/port + // as the new replication source (without changing any GTID position). + setReplicationSourceCommand(params *ConnParams, host string, port int32, heartbeatInterval float64, connectRetry int) string + + // resetBinaryLogsCommand returns the command to reset the binary logs. + resetBinaryLogsCommand() string // status returns the result of the appropriate status command, // with parsed replication position. status(c *Conn) (replication.ReplicationStatus, error) - // primaryStatus returns the result of 'SHOW MASTER STATUS', + // primaryStatus returns the result of 'SHOW BINARY LOG STATUS', // with parsed executed position. primaryStatus(c *Conn) (replication.PrimaryStatus, error) - // waitUntilPositionCommand returns the SQL command to issue - // to wait until the given position, until the context - // expires. The command returns -1 if it times out. It - // returns NULL if GTIDs are not enabled. - waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) + // replicationConfiguration reads the right global variables and performance schema information. + replicationConfiguration(c *Conn) (*replicationdata.Configuration, error) + + replicationNetTimeout(c *Conn) (int32, error) + + // waitUntilPosition waits until the given position is reached or + // until the context expires. It returns an error if we did not + // succeed. + waitUntilPosition(ctx context.Context, c *Conn, pos replication.Position) error + // catchupToGTIDCommands returns the command to catch up to a given GTID. + catchupToGTIDCommands(params *ConnParams, pos replication.Position) []string + + // binlogReplicatedUpdates returns the field to use to check replica updates. + binlogReplicatedUpdates() string baseShowTables() string baseShowTablesWithSizes() string - supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) + supportsCapability(capability capabilities.FlavorCapability) (bool, error) } -type CapableOf func(capability FlavorCapability) (bool, error) - -// flavors maps flavor names to their implementation. +// flavorFuncs maps flavor names to their implementation. // Flavors need to register only if they support being specified in the // connection parameters. -var flavors = make(map[string]func() flavor) - -// ServerVersionAtLeast returns true if current server is at least given value. -// Example: if input is []int{8, 0, 23}... the function returns 'true' if we're on MySQL 8.0.23, 8.0.24, ... -func ServerVersionAtLeast(serverVersion string, parts ...int) (bool, error) { - versionPrefix := strings.Split(serverVersion, "-")[0] - versionTokens := strings.Split(versionPrefix, ".") - for i, part := range parts { - if len(versionTokens) <= i { - return false, nil - } - tokenValue, err := strconv.Atoi(versionTokens[i]) - if err != nil { - return false, err - } - if tokenValue > part { - return true, nil - } - if tokenValue < part { - return false, nil - } - } - return true, nil -} +var flavorFuncs = make(map[string]func() flavor) // GetFlavor fills in c.Flavor. If the params specify the flavor, // that is used. Otherwise, we auto-detect. @@ -199,32 +172,41 @@ func ServerVersionAtLeast(serverVersion string, parts ...int) (bool, error) { // Note on such servers, 'select version()' would return 10.0.21-MariaDB-... // as well (not matching what c.ServerVersion is, but matching after we remove // the prefix). -func GetFlavor(serverVersion string, flavorFunc func() flavor) (f flavor, capableOf CapableOf, canonicalVersion string) { +func GetFlavor(serverVersion string, flavorFunc func() flavor) (f flavor, capableOf capabilities.CapableOf, canonicalVersion string) { canonicalVersion = serverVersion switch { case flavorFunc != nil: f = flavorFunc() case strings.HasPrefix(serverVersion, mariaDBReplicationHackPrefix): canonicalVersion = serverVersion[len(mariaDBReplicationHackPrefix):] - f = mariadbFlavor101{} + f = mariadbFlavor101{mariadbFlavor{serverVersion: canonicalVersion}} case strings.Contains(serverVersion, mariaDBVersionString): mariadbVersion, err := strconv.ParseFloat(serverVersion[:4], 64) if err != nil || mariadbVersion < 10.2 { - f = mariadbFlavor101{} + f = mariadbFlavor101{mariadbFlavor{serverVersion: fmt.Sprintf("%f", mariadbVersion)}} + } else { + f = mariadbFlavor102{mariadbFlavor{serverVersion: fmt.Sprintf("%f", mariadbVersion)}} + } + case strings.HasPrefix(serverVersion, mysql8VersionPrefix): + if latest, _ := capabilities.ServerVersionAtLeast(serverVersion, 8, 2, 0); latest { + f = mysqlFlavor82{mysqlFlavor{serverVersion: serverVersion}} + } else if recent, _ := capabilities.MySQLVersionHasCapability(serverVersion, capabilities.ReplicaTerminologyCapability); recent { + f = mysqlFlavor8{mysqlFlavor{serverVersion: serverVersion}} } else { - f = mariadbFlavor102{} + f = mysqlFlavor8Legacy{mysqlFlavorLegacy{mysqlFlavor{serverVersion: serverVersion}}} } - case strings.HasPrefix(serverVersion, mysql57VersionPrefix): - f = mysqlFlavor57{} - case strings.HasPrefix(serverVersion, mysql80VersionPrefix): - f = mysqlFlavor80{} default: - f = mysqlFlavor56{} + // If unknown, return the most basic flavor: MySQL 57. + f = mysqlFlavor57{mysqlFlavorLegacy{mysqlFlavor{serverVersion: serverVersion}}} } - return f, - func(capability FlavorCapability) (bool, error) { - return f.supportsCapability(serverVersion, capability) - }, canonicalVersion + return f, f.supportsCapability, canonicalVersion +} + +// ServerVersionCapableOf is a convenience function that returns a CapableOf function given a server version. +// It is a shortcut for GetFlavor(serverVersion, nil). +func ServerVersionCapableOf(serverVersion string) (capableOf capabilities.CapableOf) { + _, capableOf, _ = GetFlavor(serverVersion, nil) + return capableOf } // fillFlavor fills in c.Flavor. If the params specify the flavor, @@ -240,14 +222,14 @@ func GetFlavor(serverVersion string, flavorFunc func() flavor) (f flavor, capabl // as well (not matching what c.ServerVersion is, but matching after we remove // the prefix). func (c *Conn) fillFlavor(params *ConnParams) { - flavorFunc := flavors[params.Flavor] + flavorFunc := flavorFuncs[params.Flavor] c.flavor, _, c.ServerVersion = GetFlavor(c.ServerVersion, flavorFunc) } // ServerVersionAtLeast returns 'true' if server version is equal or greater than given parts. e.g. // "8.0.14-log" is at least [8, 0, 13] and [8, 0, 14], but not [8, 0, 15] func (c *Conn) ServerVersionAtLeast(parts ...int) (bool, error) { - return ServerVersionAtLeast(c.ServerVersion, parts...) + return capabilities.ServerVersionAtLeast(c.ServerVersion, parts...) } // @@ -337,6 +319,10 @@ func (c *Conn) StopReplicationCommand() string { return c.flavor.stopReplicationCommand() } +func (c *Conn) ResetReplicationCommand() string { + return c.flavor.resetReplicationCommand() +} + // StopIOThreadCommand returns the command to stop the replica's io thread. func (c *Conn) StopIOThreadCommand() string { return c.flavor.stopIOThreadCommand() @@ -388,31 +374,8 @@ func (c *Conn) SetReplicationPositionCommands(pos replication.Position) []string // as the new replication source (without changing any GTID position). // It is guaranteed to be called with replication stopped. // It should not start or stop replication. -func (c *Conn) SetReplicationSourceCommand(params *ConnParams, host string, port int32, connectRetry int) string { - args := []string{ - fmt.Sprintf("MASTER_HOST = '%s'", host), - fmt.Sprintf("MASTER_PORT = %d", port), - fmt.Sprintf("MASTER_USER = '%s'", params.Uname), - fmt.Sprintf("MASTER_PASSWORD = '%s'", params.Pass), - fmt.Sprintf("MASTER_CONNECT_RETRY = %d", connectRetry), - } - if params.SslEnabled() { - args = append(args, "MASTER_SSL = 1") - } - if params.SslCa != "" { - args = append(args, fmt.Sprintf("MASTER_SSL_CA = '%s'", params.SslCa)) - } - if params.SslCaPath != "" { - args = append(args, fmt.Sprintf("MASTER_SSL_CAPATH = '%s'", params.SslCaPath)) - } - if params.SslCert != "" { - args = append(args, fmt.Sprintf("MASTER_SSL_CERT = '%s'", params.SslCert)) - } - if params.SslKey != "" { - args = append(args, fmt.Sprintf("MASTER_SSL_KEY = '%s'", params.SslKey)) - } - args = append(args, c.flavor.changeReplicationSourceArg()) - return "CHANGE MASTER TO\n " + strings.Join(args, ",\n ") +func (c *Conn) SetReplicationSourceCommand(params *ConnParams, host string, port int32, heartbeatInterval float64, connectRetry int) string { + return c.flavor.setReplicationSourceCommand(params, host, port, heartbeatInterval, connectRetry) } // resultToMap is a helper function used by ShowReplicationStatus. @@ -469,27 +432,44 @@ func (c *Conn) ShowReplicationStatusWithContext(ctx context.Context) (replicatio } } -// ShowPrimaryStatus executes the right SHOW MASTER STATUS command, +// ShowPrimaryStatus executes the right SHOW BINARY LOG STATUS command, // and returns a parsed executed Position, as well as file based Position. func (c *Conn) ShowPrimaryStatus() (replication.PrimaryStatus, error) { return c.flavor.primaryStatus(c) } -// WaitUntilPositionCommand returns the SQL command to issue -// to wait until the given position, until the context -// expires. The command returns -1 if it times out. It -// returns NULL if GTIDs are not enabled. -func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { - return c.flavor.waitUntilPositionCommand(ctx, pos) +// ReplicationConfiguration reads the right global variables and performance schema information. +func (c *Conn) ReplicationConfiguration() (*replicationdata.Configuration, error) { + replConfiguration, err := c.flavor.replicationConfiguration(c) + // We don't want to fail this call if it called on a primary tablet. + // There just isn't any replication configuration to return since it is a primary tablet. + if err == ErrNotReplica { + return nil, nil + } + if err != nil { + return nil, err + } + replNetTimeout, err := c.flavor.replicationNetTimeout(c) + replConfiguration.ReplicaNetTimeout = replNetTimeout + return replConfiguration, err +} + +// WaitUntilPosition waits until the given position is reached or until the +// context expires. It returns an error if we did not succeed. +func (c *Conn) WaitUntilPosition(ctx context.Context, pos replication.Position) error { + return c.flavor.waitUntilPosition(ctx, c, pos) +} + +func (c *Conn) CatchupToGTIDCommands(params *ConnParams, pos replication.Position) []string { + return c.flavor.catchupToGTIDCommands(params, pos) } -// WaitUntilFilePositionCommand returns the SQL command to issue -// to wait until the given position, until the context -// expires for the file position flavor. The command returns -1 if it times out. It -// returns NULL if GTIDs are not enabled. -func (c *Conn) WaitUntilFilePositionCommand(ctx context.Context, pos replication.Position) (string, error) { +// WaitUntilFilePosition waits until the given position is reached or until +// the context expires for the file position flavor. It returns an error if +// we did not succeed. +func (c *Conn) WaitUntilFilePosition(ctx context.Context, pos replication.Position) error { filePosFlavor := filePosFlavor{} - return filePosFlavor.waitUntilPositionCommand(ctx, pos) + return filePosFlavor.waitUntilPosition(ctx, c, pos) } // BaseShowTables returns a query that shows tables @@ -503,10 +483,10 @@ func (c *Conn) BaseShowTablesWithSizes() string { } // SupportsCapability checks if the database server supports the given capability -func (c *Conn) SupportsCapability(capability FlavorCapability) (bool, error) { - return c.flavor.supportsCapability(c.ServerVersion, capability) +func (c *Conn) SupportsCapability(capability capabilities.FlavorCapability) (bool, error) { + return c.flavor.supportsCapability(capability) } func init() { - flavors[replication.FilePosFlavorID] = newFilePosFlavor + flavorFuncs[replication.FilePosFlavorID] = newFilePosFlavor } diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go index bf4076b85b1..5e766e81912 100644 --- a/go/mysql/flavor_filepos.go +++ b/go/mysql/flavor_filepos.go @@ -23,8 +23,10 @@ import ( "strings" "time" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/proto/replicationdata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" ) @@ -91,6 +93,10 @@ func (flv *filePosFlavor) startReplicationCommand() string { return "unsupported" } +func (flv *filePosFlavor) resetReplicationCommand() string { + return "unsupported" +} + func (flv *filePosFlavor) restartReplicationCommands() []string { return []string{"unsupported"} } @@ -222,8 +228,13 @@ func (flv *filePosFlavor) setReplicationPositionCommands(pos replication.Positio } } -// setReplicationPositionCommands is part of the Flavor interface. -func (flv *filePosFlavor) changeReplicationSourceArg() string { +// setReplicationSourceCommand is part of the Flavor interface. +func (flv *filePosFlavor) setReplicationSourceCommand(params *ConnParams, host string, port int32, heartbeatInterval float64, connectRetry int) string { + return "unsupported" +} + +// resetBinaryLogsCommand is part of the Flavor interface. +func (flv *filePosFlavor) resetBinaryLogsCommand() string { return "unsupported" } @@ -266,22 +277,62 @@ func (flv *filePosFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, err return replication.ParseFilePosPrimaryStatus(resultMap) } -// waitUntilPositionCommand is part of the Flavor interface. -func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { +func (flv *filePosFlavor) replicationConfiguration(c *Conn) (*replicationdata.Configuration, error) { + return nil, nil +} + +func (flv *filePosFlavor) replicationNetTimeout(c *Conn) (int32, error) { + return 0, nil +} + +// waitUntilPosition is part of the Flavor interface. +func (flv *filePosFlavor) waitUntilPosition(ctx context.Context, c *Conn, pos replication.Position) error { filePosPos, ok := pos.GTIDSet.(replication.FilePosGTID) if !ok { - return "", fmt.Errorf("Position is not filePos compatible: %#v", pos.GTIDSet) + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "position is not filePos compatible: %#v", pos.GTIDSet) } + query := fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d)", filePosPos.File, filePosPos.Pos) if deadline, ok := ctx.Deadline(); ok { timeout := time.Until(deadline) if timeout <= 0 { - return "", fmt.Errorf("timed out waiting for position %v", pos) + return vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "timed out waiting for position %v", pos) } - return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %.6f)", filePosPos.File, filePosPos.Pos, timeout.Seconds()), nil + query = fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %.6f)", filePosPos.File, filePosPos.Pos, timeout.Seconds()) + } + + result, err := c.ExecuteFetch(query, 1, false) + if err != nil { + return err } - return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d)", filePosPos.File, filePosPos.Pos), nil + // For MASTER_POS_WAIT(), the return value is the number of log events + // the replica had to wait for to advance to the specified position. + // The function returns NULL if the replica SQL thread is not started, + // the replica's source information is not initialized, the arguments + // are incorrect, or an error occurs. It returns -1 if the timeout has + // been exceeded. If the replica SQL thread stops while MASTER_POS_WAIT() + // is waiting, the function returns NULL. If the replica is past the + // specified position, the function returns immediately. + if len(result.Rows) != 1 || len(result.Rows[0]) != 1 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid results: %#v", result) + } + val := result.Rows[0][0] + if val.IsNull() { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "replication is not running") + } + state, err := val.ToInt64() + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid result of %#v", val) + } + switch { + case state == -1: + return vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "timed out waiting for position %v", pos) + case state >= 0: + return nil + default: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid result of %d", state) + } } func (*filePosFlavor) startReplicationUntilAfter(pos replication.Position) string { @@ -303,9 +354,17 @@ func (*filePosFlavor) baseShowTablesWithSizes() string { } // supportsCapability is part of the Flavor interface. -func (*filePosFlavor) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) { +func (*filePosFlavor) supportsCapability(capability capabilities.FlavorCapability) (bool, error) { switch capability { default: return false, nil } } + +func (*filePosFlavor) catchupToGTIDCommands(_ *ConnParams, _ replication.Position) []string { + return []string{"unsupported"} +} + +func (*filePosFlavor) binlogReplicatedUpdates() string { + return "@@global.log_slave_updates" +} diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go index 15718542b45..301ec2b0596 100644 --- a/go/mysql/flavor_mariadb.go +++ b/go/mysql/flavor_mariadb.go @@ -21,16 +21,23 @@ import ( "context" "fmt" "io" + "strconv" + "strings" "time" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/proto/replicationdata" "vitess.io/vitess/go/vt/vterrors" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // mariadbFlavor implements the Flavor interface for MariaDB. -type mariadbFlavor struct{} +type mariadbFlavor struct { + serverVersion string +} type mariadbFlavor101 struct { mariadbFlavor } @@ -48,7 +55,7 @@ func (mariadbFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { return nil, err } if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_binlog_pos: %#v", qr) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected result format for gtid_binlog_pos: %#v", qr) } return replication.ParseMariadbGTIDSet(qr.Rows[0][0].ToString()) @@ -93,6 +100,10 @@ func (mariadbFlavor) stopReplicationCommand() string { return "STOP SLAVE" } +func (mariadbFlavor) resetReplicationCommand() string { + return "RESET SLAVE ALL" +} + func (mariadbFlavor) stopIOThreadCommand() string { return "STOP SLAVE IO_THREAD" } @@ -140,7 +151,8 @@ func (mariadbFlavor) resetReplicationCommands(c *Conn) []string { "RESET MASTER", "SET GLOBAL gtid_slave_pos = ''", } - if c.SemiSyncExtensionLoaded() { + semisyncType, _ := c.SemiSyncExtensionLoaded() + if semisyncType == SemiSyncTypeMaster { resetCommands = append(resetCommands, "SET GLOBAL rpl_semi_sync_master_enabled = false, GLOBAL rpl_semi_sync_slave_enabled = false") // semi-sync will be enabled if needed when replica is started. } return resetCommands @@ -177,9 +189,38 @@ func (mariadbFlavor) setReplicationPositionCommands(pos replication.Position) [] } } -// setReplicationPositionCommands is part of the Flavor interface. -func (mariadbFlavor) changeReplicationSourceArg() string { - return "MASTER_USE_GTID = current_pos" +func (mariadbFlavor) setReplicationSourceCommand(params *ConnParams, host string, port int32, heartbeatInterval float64, connectRetry int) string { + args := []string{ + fmt.Sprintf("MASTER_HOST = '%s'", host), + fmt.Sprintf("MASTER_PORT = %d", port), + fmt.Sprintf("MASTER_USER = '%s'", params.Uname), + fmt.Sprintf("MASTER_PASSWORD = '%s'", params.Pass), + fmt.Sprintf("MASTER_CONNECT_RETRY = %d", connectRetry), + } + if params.SslEnabled() { + args = append(args, "MASTER_SSL = 1") + } + if params.SslCa != "" { + args = append(args, fmt.Sprintf("MASTER_SSL_CA = '%s'", params.SslCa)) + } + if params.SslCaPath != "" { + args = append(args, fmt.Sprintf("MASTER_SSL_CAPATH = '%s'", params.SslCaPath)) + } + if params.SslCert != "" { + args = append(args, fmt.Sprintf("MASTER_SSL_CERT = '%s'", params.SslCert)) + } + if params.SslKey != "" { + args = append(args, fmt.Sprintf("MASTER_SSL_KEY = '%s'", params.SslKey)) + } + if heartbeatInterval != 0 { + args = append(args, fmt.Sprintf("MASTER_HEARTBEAT_PERIOD = %v", heartbeatInterval)) + } + args = append(args, "MASTER_USE_GTID = current_pos") + return "CHANGE MASTER TO\n " + strings.Join(args, ",\n ") +} + +func (mariadbFlavor) resetBinaryLogsCommand() string { + return "RESET MASTER" } // status is part of the Flavor interface. @@ -223,22 +264,83 @@ func (m mariadbFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) return status, err } -// waitUntilPositionCommand is part of the Flavor interface. +// replicationConfiguration is part of the Flavor interface. +func (mariadbFlavor) replicationConfiguration(c *Conn) (*replicationdata.Configuration, error) { + qr, err := c.ExecuteFetch(readReplicationConnectionConfiguration, 100, true /* wantfields */) + if err != nil { + return nil, err + } + if len(qr.Rows) == 0 { + // The query returned no data. This is not a replica. + return nil, ErrNotReplica + } + + resultMap, err := resultToMap(qr) + if err != nil { + return nil, err + } + + heartbeatInterval, err := strconv.ParseFloat(resultMap["HEARTBEAT_INTERVAL"], 64) + if err != nil { + return nil, err + } + + return &replicationdata.Configuration{ + HeartbeatInterval: heartbeatInterval, + }, nil +} + +// replicationNetTimeout is part of the Flavor interface. +func (mariadbFlavor) replicationNetTimeout(c *Conn) (int32, error) { + qr, err := c.ExecuteFetch("select @@global.slave_net_timeout", 1, false) + if err != nil { + return 0, err + } + if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { + return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected result format for slave_net_timeout: %#v", qr) + } + return qr.Rows[0][0].ToInt32() +} + +// waitUntilPosition is part of the Flavor interface. // // Note: Unlike MASTER_POS_WAIT(), MASTER_GTID_WAIT() will continue waiting even // if the sql thread stops. If that is a problem, we'll have to change this. -func (mariadbFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { +func (mariadbFlavor) waitUntilPosition(ctx context.Context, c *Conn, pos replication.Position) error { + // Omit the timeout to wait indefinitely. In MariaDB, a timeout of 0 means + // return immediately. + query := fmt.Sprintf("SELECT MASTER_GTID_WAIT('%s')", pos) if deadline, ok := ctx.Deadline(); ok { timeout := time.Until(deadline) if timeout <= 0 { - return "", vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "timed out waiting for position %v", pos) + return vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "timed out waiting for position %v", pos) } - return fmt.Sprintf("SELECT MASTER_GTID_WAIT('%s', %.6f)", pos, timeout.Seconds()), nil + query = fmt.Sprintf("SELECT MASTER_GTID_WAIT('%s', %.6f)", pos, timeout.Seconds()) } - // Omit the timeout to wait indefinitely. In MariaDB, a timeout of 0 means - // return immediately. - return fmt.Sprintf("SELECT MASTER_GTID_WAIT('%s')", pos), nil + result, err := c.ExecuteFetch(query, 1, false) + if err != nil { + return err + } + + // For MASTER_GTID_WAIT(), if the wait completes without a timeout 0 is + // returned and -1 if there was a timeout. + if len(result.Rows) != 1 || len(result.Rows[0]) != 1 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid results: %#v", result) + } + val := result.Rows[0][0] + state, err := val.ToInt64() + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid result of %#v", val) + } + switch state { + case 0: + return nil + case -1: + return vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "timed out waiting for position %v", pos) + default: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid result of %d", state) + } } // readBinlogEvent is part of the Flavor interface. @@ -262,9 +364,17 @@ func (mariadbFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) { } // supportsCapability is part of the Flavor interface. -func (mariadbFlavor) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) { +func (mariadbFlavor) supportsCapability(capability capabilities.FlavorCapability) (bool, error) { switch capability { default: return false, nil } } + +func (mariadbFlavor) catchupToGTIDCommands(_ *ConnParams, _ replication.Position) []string { + return []string{"unsupported"} +} + +func (mariadbFlavor) binlogReplicatedUpdates() string { + return "@@global.log_slave_updates" +} diff --git a/go/mysql/flavor_mariadb_test.go b/go/mysql/flavor_mariadb_test.go index 250d664e4af..0e6bb500de2 100644 --- a/go/mysql/flavor_mariadb_test.go +++ b/go/mysql/flavor_mariadb_test.go @@ -39,9 +39,22 @@ func TestMariadbSetReplicationSourceCommand(t *testing.T) { MASTER_USE_GTID = current_pos` conn := &Conn{flavor: mariadbFlavor101{}} - got := conn.SetReplicationSourceCommand(params, host, port, connectRetry) + got := conn.SetReplicationSourceCommand(params, host, port, 0, connectRetry) assert.Equal(t, want, got, "mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want) + var heartbeatInterval float64 = 5.4 + want = `CHANGE MASTER TO + MASTER_HOST = 'localhost', + MASTER_PORT = 123, + MASTER_USER = 'username', + MASTER_PASSWORD = 'password', + MASTER_CONNECT_RETRY = 1234, + MASTER_HEARTBEAT_PERIOD = 5.4, + MASTER_USE_GTID = current_pos` + + got = conn.SetReplicationSourceCommand(params, host, port, heartbeatInterval, connectRetry) + assert.Equal(t, want, got, "mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, heartbeatInterval, connectRetry, got, want) + } func TestMariadbSetReplicationSourceCommandSSL(t *testing.T) { @@ -71,7 +84,7 @@ func TestMariadbSetReplicationSourceCommandSSL(t *testing.T) { MASTER_USE_GTID = current_pos` conn := &Conn{flavor: mariadbFlavor101{}} - got := conn.SetReplicationSourceCommand(params, host, port, connectRetry) + got := conn.SetReplicationSourceCommand(params, host, port, 0, connectRetry) assert.Equal(t, want, got, "mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want) } diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index bc5f31006e5..a1245257c74 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -20,29 +20,43 @@ import ( "context" "fmt" "io" + "strconv" + "strings" "time" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/proto/replicationdata" "vitess.io/vitess/go/vt/vterrors" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -// mysqlFlavor implements the Flavor interface for Mysql. -type mysqlFlavor struct{} -type mysqlFlavor56 struct { - mysqlFlavor +// mysqlFlavor implements the Flavor interface for Mysql. This is +// the most up to date / recent flavor and uses the most modern +// replication commands and semantics. +type mysqlFlavor struct { + serverVersion string } -type mysqlFlavor57 struct { + +// mysqlFlavor8 is for later MySQL 8.0 versions. It's the same as +// the modern flavor, but overrides some specific commands that +// are only available on MySQL 8.2.0 and later. This is specifically +// commands like SHOW BINARY LOG STATUS. +type mysqlFlavor8 struct { mysqlFlavor } -type mysqlFlavor80 struct { + +// mysqlFlavor82 is for MySQL 8.2.0 and later. It's the most modern +// flavor but has an explicit name so that it's clear it's explicitly +// for MySQL 8.2.0 and later. +type mysqlFlavor82 struct { mysqlFlavor } -var _ flavor = (*mysqlFlavor56)(nil) -var _ flavor = (*mysqlFlavor57)(nil) -var _ flavor = (*mysqlFlavor80)(nil) +var _ flavor = (*mysqlFlavor8)(nil) +var _ flavor = (*mysqlFlavor82)(nil) // primaryGTIDSet is part of the Flavor interface. func (mysqlFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { @@ -52,7 +66,7 @@ func (mysqlFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) { return nil, err } if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_executed: %#v", qr) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected result format for gtid_executed: %#v", qr) } return replication.ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) } @@ -65,7 +79,7 @@ func (mysqlFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) { return nil, err } if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_purged: %#v", qr) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected result format for gtid_purged: %#v", qr) } return replication.ParseMysql56GTIDSet(qr.Rows[0][0].ToString()) } @@ -78,7 +92,7 @@ func (mysqlFlavor) serverUUID(c *Conn) (string, error) { return "", err } if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { - return "", vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for server_uuid: %#v", qr) + return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected result format for server_uuid: %#v", qr) } return qr.Rows[0][0].ToString(), nil } @@ -90,82 +104,126 @@ func (mysqlFlavor) gtidMode(c *Conn) (string, error) { return "", err } if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { - return "", vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_mode: %#v", qr) + return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected result format for gtid_mode: %#v", qr) } return qr.Rows[0][0].ToString(), nil } -func (mysqlFlavor) startReplicationCommand() string { - return "START SLAVE" +func (f mysqlFlavor) startReplicationCommand() string { + return "START REPLICA" } -func (mysqlFlavor) restartReplicationCommands() []string { +func (f mysqlFlavor) restartReplicationCommands() []string { return []string{ - "STOP SLAVE", - "RESET SLAVE", - "START SLAVE", + "STOP REPLICA", + "RESET REPLICA", + "START REPLICA", } } -func (mysqlFlavor) startReplicationUntilAfter(pos replication.Position) string { - return fmt.Sprintf("START SLAVE UNTIL SQL_AFTER_GTIDS = '%s'", pos) +func (f mysqlFlavor) startReplicationUntilAfter(pos replication.Position) string { + return fmt.Sprintf("START REPLICA UNTIL SQL_AFTER_GTIDS = '%s'", pos) } -func (mysqlFlavor) startSQLThreadUntilAfter(pos replication.Position) string { - return fmt.Sprintf("START SLAVE SQL_THREAD UNTIL SQL_AFTER_GTIDS = '%s'", pos) +func (f mysqlFlavor) startSQLThreadUntilAfter(pos replication.Position) string { + return fmt.Sprintf("START REPLICA SQL_THREAD UNTIL SQL_AFTER_GTIDS = '%s'", pos) } -func (mysqlFlavor) stopReplicationCommand() string { - return "STOP SLAVE" +func (f mysqlFlavor) stopReplicationCommand() string { + return "STOP REPLICA" } -func (mysqlFlavor) stopIOThreadCommand() string { - return "STOP SLAVE IO_THREAD" +func (f mysqlFlavor) resetReplicationCommand() string { + return "RESET REPLICA ALL" } -func (mysqlFlavor) stopSQLThreadCommand() string { - return "STOP SLAVE SQL_THREAD" +func (f mysqlFlavor) stopIOThreadCommand() string { + return "STOP REPLICA IO_THREAD" } -func (mysqlFlavor) startSQLThreadCommand() string { - return "START SLAVE SQL_THREAD" +func (f mysqlFlavor) stopSQLThreadCommand() string { + return "STOP REPLICA SQL_THREAD" } -// sendBinlogDumpCommand is part of the Flavor interface. -func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { - gtidSet, ok := startPos.GTIDSet.(replication.Mysql56GTIDSet) - if !ok { - return vterrors.Errorf(vtrpc.Code_INTERNAL, "startPos.GTIDSet is wrong type - expected Mysql56GTIDSet, got: %#v", startPos.GTIDSet) +func (f mysqlFlavor) startSQLThreadCommand() string { + return "START REPLICA SQL_THREAD" +} + +// resetReplicationCommands is part of the Flavor interface. +func (mysqlFlavor) resetReplicationCommands(c *Conn) []string { + resetCommands := []string{ + "STOP REPLICA", + "RESET REPLICA ALL", // "ALL" makes it forget source host:port. + "RESET BINARY LOGS AND GTIDS", // This will also clear gtid_executed and gtid_purged. + } + status, err := c.SemiSyncExtensionLoaded() + if err != nil { + return resetCommands } + switch status { + case SemiSyncTypeSource: + resetCommands = append(resetCommands, "SET GLOBAL rpl_semi_sync_source_enabled = false, GLOBAL rpl_semi_sync_replica_enabled = false") // semi-sync will be enabled if needed when replica is started. + case SemiSyncTypeMaster: + resetCommands = append(resetCommands, "SET GLOBAL rpl_semi_sync_master_enabled = false, GLOBAL rpl_semi_sync_slave_enabled = false") // semi-sync will be enabled if needed when replica is started. + default: + // Nothing to do. + } + return resetCommands +} - // Build the command. - sidBlock := gtidSet.SIDBlock() - return c.WriteComBinlogDumpGTID(serverID, binlogFilename, 4, 0, sidBlock) +func (mysqlFlavor) resetBinaryLogsCommand() string { + return "RESET BINARY LOGS AND GTIDS" } // resetReplicationCommands is part of the Flavor interface. -func (mysqlFlavor) resetReplicationCommands(c *Conn) []string { +func (mysqlFlavor8) resetReplicationCommands(c *Conn) []string { resetCommands := []string{ - "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget source host:port. - "RESET MASTER", // This will also clear gtid_executed and gtid_purged. + "STOP REPLICA", + "RESET REPLICA ALL", // "ALL" makes it forget source host:port. + "RESET MASTER", // This will also clear gtid_executed and gtid_purged. } - if c.SemiSyncExtensionLoaded() { + status, err := c.SemiSyncExtensionLoaded() + if err != nil { + return resetCommands + } + switch status { + case SemiSyncTypeSource: + resetCommands = append(resetCommands, "SET GLOBAL rpl_semi_sync_source_enabled = false, GLOBAL rpl_semi_sync_replica_enabled = false") // semi-sync will be enabled if needed when replica is started. + case SemiSyncTypeMaster: resetCommands = append(resetCommands, "SET GLOBAL rpl_semi_sync_master_enabled = false, GLOBAL rpl_semi_sync_slave_enabled = false") // semi-sync will be enabled if needed when replica is started. + default: + // Nothing to do. } return resetCommands } +// resetReplicationCommands is part of the Flavor interface. +func (mysqlFlavor8) resetBinaryLogsCommand() string { + return "RESET MASTER" +} + // resetReplicationParametersCommands is part of the Flavor interface. func (mysqlFlavor) resetReplicationParametersCommands(c *Conn) []string { resetCommands := []string{ - "RESET SLAVE ALL", // "ALL" makes it forget source host:port. + "RESET REPLICA ALL", // "ALL" makes it forget source host:port. } return resetCommands } +// sendBinlogDumpCommand is part of the Flavor interface. +func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error { + gtidSet, ok := startPos.GTIDSet.(replication.Mysql56GTIDSet) + if !ok { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "startPos.GTIDSet is wrong type - expected Mysql56GTIDSet, got: %#v", startPos.GTIDSet) + } + + // Build the command. + sidBlock := gtidSet.SIDBlock() + return c.WriteComBinlogDumpGTID(serverID, binlogFilename, 4, 0, sidBlock) +} + // setReplicationPositionCommands is part of the Flavor interface. -func (mysqlFlavor) setReplicationPositionCommands(pos replication.Position) []string { +func (mysqlFlavor8) setReplicationPositionCommands(pos replication.Position) []string { return []string{ "RESET MASTER", // We must clear gtid_executed before setting gtid_purged. fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", pos), @@ -173,33 +231,35 @@ func (mysqlFlavor) setReplicationPositionCommands(pos replication.Position) []st } // setReplicationPositionCommands is part of the Flavor interface. -func (mysqlFlavor) changeReplicationSourceArg() string { - return "MASTER_AUTO_POSITION = 1" +func (mysqlFlavor) setReplicationPositionCommands(pos replication.Position) []string { + return []string{ + "RESET BINARY LOGS AND GTIDS", // We must clear gtid_executed before setting gtid_purged. + fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", pos), + } } -// status is part of the Flavor interface. -func (mysqlFlavor) status(c *Conn) (replication.ReplicationStatus, error) { - qr, err := c.ExecuteFetch("SHOW SLAVE STATUS", 100, true /* wantfields */) +// primaryStatus is part of the Flavor interface. +func (mysqlFlavor8) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { + qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return replication.ReplicationStatus{}, err + return replication.PrimaryStatus{}, err } if len(qr.Rows) == 0 { - // The query returned no data, meaning the server - // is not configured as a replica. - return replication.ReplicationStatus{}, ErrNotReplica + // The query returned no data. We don't know how this could happen. + return replication.PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return replication.ReplicationStatus{}, err + return replication.PrimaryStatus{}, err } - return replication.ParseMysqlReplicationStatus(resultMap) + return replication.ParseMysqlPrimaryStatus(resultMap) } // primaryStatus is part of the Flavor interface. func (mysqlFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { - qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) + qr, err := c.ExecuteFetch("SHOW BINARY LOG STATUS", 100, true /* wantfields */) if err != nil { return replication.PrimaryStatus{}, err } @@ -216,14 +276,72 @@ func (mysqlFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { return replication.ParseMysqlPrimaryStatus(resultMap) } -// waitUntilPositionCommand is part of the Flavor interface. -func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) { +// replicationConfiguration is part of the Flavor interface. +func (mysqlFlavor) replicationConfiguration(c *Conn) (*replicationdata.Configuration, error) { + qr, err := c.ExecuteFetch(readReplicationConnectionConfiguration, 100, true /* wantfields */) + if err != nil { + return nil, err + } + if len(qr.Rows) == 0 { + // The query returned no data. This is not a replica. + return nil, ErrNotReplica + } + + resultMap, err := resultToMap(qr) + if err != nil { + return nil, err + } + + heartbeatInterval, err := strconv.ParseFloat(resultMap["HEARTBEAT_INTERVAL"], 64) + if err != nil { + return nil, err + } + + return &replicationdata.Configuration{ + HeartbeatInterval: heartbeatInterval, + }, nil +} + +// replicationNetTimeout is part of the Flavor interface. +func (mysqlFlavor) replicationNetTimeout(c *Conn) (int32, error) { + qr, err := c.ExecuteFetch("select @@global.replica_net_timeout", 1, false) + if err != nil { + return 0, err + } + if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { + return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected result format for replica_net_timeout: %#v", qr) + } + return qr.Rows[0][0].ToInt32() +} + +// status is part of the Flavor interface. +func (mysqlFlavor) status(c *Conn) (replication.ReplicationStatus, error) { + qr, err := c.ExecuteFetch("SHOW REPLICA STATUS", 100, true /* wantfields */) + if err != nil { + return replication.ReplicationStatus{}, err + } + if len(qr.Rows) == 0 { + // The query returned no data, meaning the server + // is not configured as a replica. + return replication.ReplicationStatus{}, ErrNotReplica + } + + resultMap, err := resultToMap(qr) + if err != nil { + return replication.ReplicationStatus{}, err + } + + return replication.ParseMysqlReplicationStatus(resultMap, true) +} + +// waitUntilPosition is part of the Flavor interface. +func (mysqlFlavor) waitUntilPosition(ctx context.Context, c *Conn, pos replication.Position) error { // A timeout of 0 means wait indefinitely. timeoutSeconds := 0 if deadline, ok := ctx.Deadline(); ok { timeout := time.Until(deadline) if timeout <= 0 { - return "", vterrors.Errorf(vtrpc.Code_DEADLINE_EXCEEDED, "timed out waiting for position %v", pos) + return vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "timed out waiting for position %v", pos) } // Only whole numbers of seconds are supported. @@ -234,7 +352,30 @@ func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos replication } } - return fmt.Sprintf("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('%s', %v)", pos, timeoutSeconds), nil + query := fmt.Sprintf("SELECT WAIT_FOR_EXECUTED_GTID_SET('%s', %v)", pos, timeoutSeconds) + result, err := c.ExecuteFetch(query, 1, false) + if err != nil { + return err + } + + // For WAIT_FOR_EXECUTED_GTID_SET(), the return value is the state of the query, where + // 0 represents success, and 1 represents timeout. Any other failures generate an error. + if len(result.Rows) != 1 || len(result.Rows[0]) != 1 { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid results: %#v", result) + } + val := result.Rows[0][0] + state, err := val.ToInt64() + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid result of %#v", val) + } + switch state { + case 0: + return nil + case 1: + return vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "timed out waiting for position %v", pos) + default: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid result of %d", state) + } } // readBinlogEvent is part of the Flavor interface. @@ -262,71 +403,17 @@ func (mysqlFlavor) baseShowTables() string { return "SELECT table_name, table_type, unix_timestamp(create_time), table_comment FROM information_schema.tables WHERE table_schema = database()" } -// TablesWithSize56 is a query to select table along with size for mysql 5.6 -const TablesWithSize56 = `SELECT table_name, - table_type, - UNIX_TIMESTAMP(create_time) AS uts_create_time, - table_comment, - SUM(data_length + index_length), - SUM(data_length + index_length) -FROM information_schema.tables -WHERE table_schema = database() -GROUP BY table_name, - table_type, - uts_create_time, - table_comment` - -// TablesWithSize57 is a query to select table along with size for mysql 5.7. -// -// It's a little weird, because the JOIN predicate only works if the table and databases do not contain weird characters. -// If the join does not return any data, we fall back to the same fields as used in the mysql 5.6 query. -// -// We join with a subquery that materializes the data from `information_schema.innodb_sys_tablespaces` -// early for performance reasons. This effectively causes only a single read of `information_schema.innodb_sys_tablespaces` -// per query. -const TablesWithSize57 = `SELECT t.table_name, - t.table_type, - UNIX_TIMESTAMP(t.create_time), - t.table_comment, - IFNULL(SUM(i.file_size), SUM(t.data_length + t.index_length)), - IFNULL(SUM(i.allocated_size), SUM(t.data_length + t.index_length)) -FROM information_schema.tables t -LEFT OUTER JOIN ( - SELECT space, file_size, allocated_size, name - FROM information_schema.innodb_sys_tablespaces - WHERE name LIKE CONCAT(database(), '/%') - GROUP BY space, file_size, allocated_size, name -) i ON i.name = CONCAT(t.table_schema, '/', t.table_name) or i.name LIKE CONCAT(t.table_schema, '/', t.table_name, '#p#%') -WHERE t.table_schema = database() -GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment` - // TablesWithSize80 is a query to select table along with size for mysql 8.0 // -// We join with a subquery that materializes the data from `information_schema.innodb_sys_tablespaces` -// early for performance reasons. This effectively causes only a single read of `information_schema.innodb_tablespaces` -// per query. // Note the following: -// - We use UNION ALL to deal differently with partitioned tables vs. non-partitioned tables. -// Originally, the query handled both, but that introduced "WHERE ... OR" conditions that led to poor query -// optimization. By separating to UNION ALL we remove all "OR" conditions. +// - We use a single query to fetch both partitioned and non-partitioned tables. This is because +// accessing `information_schema.innodb_tablespaces` is expensive on servers with many tablespaces, +// and every query that loads the table needs to perform full table scans on it. Doing a single +// table scan is more efficient than doing more than one. // - We utilize `INFORMATION_SCHEMA`.`TABLES`.`CREATE_OPTIONS` column to do early pruning before the JOIN. // - `TABLES`.`TABLE_NAME` has `utf8mb4_0900_ai_ci` collation. `INNODB_TABLESPACES`.`NAME` has `utf8mb3_general_ci`. // We normalize the collation to get better query performance (we force the casting at the time of our choosing) -// - `create_options` is NULL for views, and therefore we need an additional UNION ALL to include views const TablesWithSize80 = `SELECT t.table_name, - t.table_type, - UNIX_TIMESTAMP(t.create_time), - t.table_comment, - i.file_size, - i.allocated_size - FROM information_schema.tables t - LEFT JOIN information_schema.innodb_tablespaces i - ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8mb3_general_ci - WHERE - t.table_schema = database() AND not t.create_options <=> 'partitioned' -UNION ALL - SELECT - t.table_name, t.table_type, UNIX_TIMESTAMP(t.create_time), t.table_comment, @@ -334,70 +421,90 @@ UNION ALL SUM(i.allocated_size) FROM information_schema.tables t LEFT JOIN information_schema.innodb_tablespaces i - ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8mb3_general_ci ) + ON i.name LIKE CONCAT(t.table_schema, '/', t.table_name, IF(t.create_options <=> 'partitioned', '#p#%', '')) COLLATE utf8mb3_general_ci WHERE - t.table_schema = database() AND t.create_options <=> 'partitioned' + t.table_schema = database() GROUP BY t.table_schema, t.table_name, t.table_type, t.create_time, t.table_comment ` // baseShowTablesWithSizes is part of the Flavor interface. -func (mysqlFlavor56) baseShowTablesWithSizes() string { - return TablesWithSize56 +func (mysqlFlavor57) baseShowTablesWithSizes() string { + return TablesWithSize57 } // supportsCapability is part of the Flavor interface. -func (mysqlFlavor56) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) { - switch capability { - default: - return false, nil - } +func (f mysqlFlavor) supportsCapability(capability capabilities.FlavorCapability) (bool, error) { + return capabilities.MySQLVersionHasCapability(f.serverVersion, capability) } // baseShowTablesWithSizes is part of the Flavor interface. -func (mysqlFlavor57) baseShowTablesWithSizes() string { - return TablesWithSize57 +func (mysqlFlavor) baseShowTablesWithSizes() string { + return TablesWithSize80 } -// supportsCapability is part of the Flavor interface. -func (mysqlFlavor57) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) { - switch capability { - case MySQLJSONFlavorCapability: - return true, nil - default: - return false, nil +func (mysqlFlavor) setReplicationSourceCommand(params *ConnParams, host string, port int32, heartbeatInterval float64, connectRetry int) string { + args := []string{ + fmt.Sprintf("SOURCE_HOST = '%s'", host), + fmt.Sprintf("SOURCE_PORT = %d", port), + fmt.Sprintf("SOURCE_USER = '%s'", params.Uname), + fmt.Sprintf("SOURCE_PASSWORD = '%s'", params.Pass), + fmt.Sprintf("SOURCE_CONNECT_RETRY = %d", connectRetry), + } + if params.SslEnabled() { + args = append(args, "SOURCE_SSL = 1") } + if params.SslCa != "" { + args = append(args, fmt.Sprintf("SOURCE_SSL_CA = '%s'", params.SslCa)) + } + if params.SslCaPath != "" { + args = append(args, fmt.Sprintf("SOURCE_SSL_CAPATH = '%s'", params.SslCaPath)) + } + if params.SslCert != "" { + args = append(args, fmt.Sprintf("SOURCE_SSL_CERT = '%s'", params.SslCert)) + } + if params.SslKey != "" { + args = append(args, fmt.Sprintf("SOURCE_SSL_KEY = '%s'", params.SslKey)) + } + if heartbeatInterval != 0 { + args = append(args, fmt.Sprintf("SOURCE_HEARTBEAT_PERIOD = %v", heartbeatInterval)) + } + args = append(args, "SOURCE_AUTO_POSITION = 1") + return "CHANGE REPLICATION SOURCE TO\n " + strings.Join(args, ",\n ") } -// baseShowTablesWithSizes is part of the Flavor interface. -func (mysqlFlavor80) baseShowTablesWithSizes() string { - return TablesWithSize80 -} +func (mysqlFlavor) catchupToGTIDCommands(params *ConnParams, replPos replication.Position) []string { + cmds := []string{ + "STOP REPLICA FOR CHANNEL '' ", + "STOP REPLICA IO_THREAD FOR CHANNEL ''", + } -// supportsCapability is part of the Flavor interface. -func (mysqlFlavor80) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) { - switch capability { - case InstantDDLFlavorCapability, - InstantExpandEnumCapability, - InstantAddLastColumnFlavorCapability, - InstantAddDropVirtualColumnFlavorCapability, - InstantChangeColumnDefaultFlavorCapability: - return true, nil - case InstantAddDropColumnFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 29) - case TransactionalGtidExecutedFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 17) - case FastDropTableFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 23) - case MySQLJSONFlavorCapability: - return true, nil - case MySQLUpgradeInServerFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 16) - case DynamicRedoLogCapacityFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 30) - case DisableRedoLogFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 21) - default: - return false, nil + if params.SslCa != "" || params.SslCert != "" { + // We need to use TLS + cmd := fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s', SOURCE_PORT=%d, SOURCE_USER='%s', SOURCE_PASSWORD='%s', SOURCE_AUTO_POSITION=1, SOURCE_SSL=1", params.Host, params.Port, params.Uname, params.Pass) + if params.SslCa != "" { + cmd += fmt.Sprintf(", SOURCE_SSL_CA='%s'", params.SslCa) + } + if params.SslCert != "" { + cmd += fmt.Sprintf(", SOURCE_SSL_CERT='%s'", params.SslCert) + } + if params.SslKey != "" { + cmd += fmt.Sprintf(", SOURCE_SSL_KEY='%s'", params.SslKey) + } + cmds = append(cmds, cmd+";") + } else { + // No TLS + cmds = append(cmds, fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s', SOURCE_PORT=%d, SOURCE_USER='%s', SOURCE_PASSWORD='%s', SOURCE_AUTO_POSITION=1;", params.Host, params.Port, params.Uname, params.Pass)) } + + if replPos.IsZero() { // when the there is no afterPos, that means need to replicate completely + cmds = append(cmds, "START REPLICA") + } else { + cmds = append(cmds, fmt.Sprintf("START REPLICA UNTIL SQL_BEFORE_GTIDS = '%s'", replPos.GTIDSet.Last())) + } + return cmds +} + +func (mysqlFlavor) binlogReplicatedUpdates() string { + return "@@global.log_replica_updates" } diff --git a/go/mysql/flavor_mysql_legacy.go b/go/mysql/flavor_mysql_legacy.go new file mode 100644 index 00000000000..a5639cc944e --- /dev/null +++ b/go/mysql/flavor_mysql_legacy.go @@ -0,0 +1,288 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +import ( + "fmt" + "strings" + + "vitess.io/vitess/go/mysql/replication" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// mysqlFlavorLegacy implements the Flavor interface for Mysql for +// older versions. This applies to MySQL 5.7 and early 8.0 versions from +// before the replication terminology deprecation. +type mysqlFlavorLegacy struct { + mysqlFlavor +} + +// mysqlFlavor57 is the explicit flavor for MySQL 5.7. It's basically +// the same as the legacy flavor, but it has a separate name here to +// be explicit about the version. +type mysqlFlavor57 struct { + mysqlFlavorLegacy +} + +// mysqlFlavor8 is the explicit flavor for MySQL 8.0. It's similarly to +// 5.7 the same as the legacy flavor, but has an explicit name to be +// clear it's used for early MySQL 8.0 versions. +type mysqlFlavor8Legacy struct { + mysqlFlavorLegacy +} + +var _ flavor = (*mysqlFlavor57)(nil) +var _ flavor = (*mysqlFlavor8Legacy)(nil) +var _ flavor = (*mysqlFlavor8)(nil) + +// TablesWithSize56 is a query to select table along with size for mysql 5.6 +const TablesWithSize56 = `SELECT table_name, + table_type, + UNIX_TIMESTAMP(create_time) AS uts_create_time, + table_comment, + SUM(data_length + index_length), + SUM(data_length + index_length) +FROM information_schema.tables +WHERE table_schema = database() +GROUP BY table_name, + table_type, + uts_create_time, + table_comment` + +// TablesWithSize57 is a query to select table along with size for mysql 5.7. +// +// It's a little weird, because the JOIN predicate only works if the table and databases do not contain weird characters. +// If the join does not return any data, we fall back to the same fields as used in the mysql 5.6 query. +// +// We join with a subquery that materializes the data from `information_schema.innodb_sys_tablespaces` +// early for performance reasons. This effectively causes only a single read of `information_schema.innodb_sys_tablespaces` +// per query. +const TablesWithSize57 = `SELECT t.table_name, + t.table_type, + UNIX_TIMESTAMP(t.create_time), + t.table_comment, + IFNULL(SUM(i.file_size), SUM(t.data_length + t.index_length)), + IFNULL(SUM(i.allocated_size), SUM(t.data_length + t.index_length)) +FROM information_schema.tables t +LEFT OUTER JOIN ( + SELECT space, file_size, allocated_size, name + FROM information_schema.innodb_sys_tablespaces + WHERE name LIKE CONCAT(database(), '/%') + GROUP BY space, file_size, allocated_size, name +) i ON i.name = CONCAT(t.table_schema, '/', t.table_name) or i.name LIKE CONCAT(t.table_schema, '/', t.table_name, '#p#%') +WHERE t.table_schema = database() +GROUP BY t.table_name, t.table_type, t.create_time, t.table_comment` + +func (mysqlFlavorLegacy) startReplicationCommand() string { + return "START SLAVE" +} + +func (mysqlFlavorLegacy) restartReplicationCommands() []string { + return []string{ + "STOP SLAVE", + "RESET SLAVE", + "START SLAVE", + } +} + +func (mysqlFlavorLegacy) startReplicationUntilAfter(pos replication.Position) string { + return fmt.Sprintf("START SLAVE UNTIL SQL_AFTER_GTIDS = '%s'", pos) +} + +func (mysqlFlavorLegacy) startSQLThreadUntilAfter(pos replication.Position) string { + return fmt.Sprintf("START SLAVE SQL_THREAD UNTIL SQL_AFTER_GTIDS = '%s'", pos) +} + +func (mysqlFlavorLegacy) stopReplicationCommand() string { + return "STOP SLAVE" +} + +func (mysqlFlavorLegacy) resetReplicationCommand() string { + return "RESET SLAVE ALL" +} + +func (mysqlFlavorLegacy) stopIOThreadCommand() string { + return "STOP SLAVE IO_THREAD" +} + +func (mysqlFlavorLegacy) stopSQLThreadCommand() string { + return "STOP SLAVE SQL_THREAD" +} + +func (mysqlFlavorLegacy) startSQLThreadCommand() string { + return "START SLAVE SQL_THREAD" +} + +// resetReplicationCommands is part of the Flavor interface. +func (mysqlFlavorLegacy) resetReplicationCommands(c *Conn) []string { + resetCommands := []string{ + "STOP SLAVE", + "RESET SLAVE ALL", // "ALL" makes it forget source host:port. + "RESET MASTER", // This will also clear gtid_executed and gtid_purged. + } + status, err := c.SemiSyncExtensionLoaded() + if err != nil { + return resetCommands + } + switch status { + case SemiSyncTypeSource: + resetCommands = append(resetCommands, "SET GLOBAL rpl_semi_sync_source_enabled = false, GLOBAL rpl_semi_sync_replica_enabled = false") // semi-sync will be enabled if needed when replica is started. + case SemiSyncTypeMaster: + resetCommands = append(resetCommands, "SET GLOBAL rpl_semi_sync_master_enabled = false, GLOBAL rpl_semi_sync_slave_enabled = false") // semi-sync will be enabled if needed when replica is started. + default: + // Nothing to do. + } + return resetCommands +} + +// resetReplicationParametersCommands is part of the Flavor interface. +func (mysqlFlavorLegacy) resetReplicationParametersCommands(c *Conn) []string { + resetCommands := []string{ + "RESET SLAVE ALL", // "ALL" makes it forget source host:port. + } + return resetCommands +} + +// status is part of the Flavor interface. +func (mysqlFlavorLegacy) status(c *Conn) (replication.ReplicationStatus, error) { + qr, err := c.ExecuteFetch("SHOW SLAVE STATUS", 100, true /* wantfields */) + if err != nil { + return replication.ReplicationStatus{}, err + } + if len(qr.Rows) == 0 { + // The query returned no data, meaning the server + // is not configured as a replica. + return replication.ReplicationStatus{}, ErrNotReplica + } + + resultMap, err := resultToMap(qr) + if err != nil { + return replication.ReplicationStatus{}, err + } + + return replication.ParseMysqlReplicationStatus(resultMap, false) +} + +// replicationNetTimeout is part of the Flavor interface. +func (mysqlFlavorLegacy) replicationNetTimeout(c *Conn) (int32, error) { + qr, err := c.ExecuteFetch("select @@global.slave_net_timeout", 1, false) + if err != nil { + return 0, err + } + if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { + return 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected result format for slave_net_timeout: %#v", qr) + } + return qr.Rows[0][0].ToInt32() +} + +func (mysqlFlavorLegacy) catchupToGTIDCommands(params *ConnParams, replPos replication.Position) []string { + cmds := []string{ + "STOP SLAVE FOR CHANNEL '' ", + "STOP SLAVE IO_THREAD FOR CHANNEL ''", + } + + if params.SslCa != "" || params.SslCert != "" { + // We need to use TLS + cmd := fmt.Sprintf("CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='%s', MASTER_PASSWORD='%s', MASTER_AUTO_POSITION=1, MASTER_SSL=1", params.Host, params.Port, params.Uname, params.Pass) + if params.SslCa != "" { + cmd += fmt.Sprintf(", MASTER_SSL_CA='%s'", params.SslCa) + } + if params.SslCert != "" { + cmd += fmt.Sprintf(", MASTER_SSL_CERT='%s'", params.SslCert) + } + if params.SslKey != "" { + cmd += fmt.Sprintf(", MASTER_SSL_KEY='%s'", params.SslKey) + } + cmds = append(cmds, cmd+";") + } else { + // No TLS + cmds = append(cmds, fmt.Sprintf("CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='%s', MASTER_PASSWORD='%s', MASTER_AUTO_POSITION=1;", params.Host, params.Port, params.Uname, params.Pass)) + } + + if replPos.IsZero() { // when the there is no afterPos, that means need to replicate completely + cmds = append(cmds, "START SLAVE") + } else { + cmds = append(cmds, fmt.Sprintf("START SLAVE UNTIL SQL_BEFORE_GTIDS = '%s'", replPos.GTIDSet.Last())) + } + return cmds +} + +func (mysqlFlavorLegacy) setReplicationSourceCommand(params *ConnParams, host string, port int32, heartbeatInterval float64, connectRetry int) string { + args := []string{ + fmt.Sprintf("MASTER_HOST = '%s'", host), + fmt.Sprintf("MASTER_PORT = %d", port), + fmt.Sprintf("MASTER_USER = '%s'", params.Uname), + fmt.Sprintf("MASTER_PASSWORD = '%s'", params.Pass), + fmt.Sprintf("MASTER_CONNECT_RETRY = %d", connectRetry), + } + if params.SslEnabled() { + args = append(args, "MASTER_SSL = 1") + } + if params.SslCa != "" { + args = append(args, fmt.Sprintf("MASTER_SSL_CA = '%s'", params.SslCa)) + } + if params.SslCaPath != "" { + args = append(args, fmt.Sprintf("MASTER_SSL_CAPATH = '%s'", params.SslCaPath)) + } + if params.SslCert != "" { + args = append(args, fmt.Sprintf("MASTER_SSL_CERT = '%s'", params.SslCert)) + } + if params.SslKey != "" { + args = append(args, fmt.Sprintf("MASTER_SSL_KEY = '%s'", params.SslKey)) + } + if heartbeatInterval != 0 { + args = append(args, fmt.Sprintf("MASTER_HEARTBEAT_PERIOD = %v", heartbeatInterval)) + } + args = append(args, "MASTER_AUTO_POSITION = 1") + return "CHANGE MASTER TO\n " + strings.Join(args, ",\n ") +} + +func (mysqlFlavorLegacy) resetBinaryLogsCommand() string { + return "RESET MASTER" +} + +func (mysqlFlavorLegacy) binlogReplicatedUpdates() string { + return "@@global.log_slave_updates" +} + +// setReplicationPositionCommands is part of the Flavor interface. +func (mysqlFlavorLegacy) setReplicationPositionCommands(pos replication.Position) []string { + return []string{ + "RESET MASTER", // We must clear gtid_executed before setting gtid_purged. + fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", pos), + } +} + +// primaryStatus is part of the Flavor interface. +func (mysqlFlavorLegacy) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { + qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) + if err != nil { + return replication.PrimaryStatus{}, err + } + if len(qr.Rows) == 0 { + // The query returned no data. We don't know how this could happen. + return replication.PrimaryStatus{}, ErrNoPrimaryStatus + } + + resultMap, err := resultToMap(qr) + if err != nil { + return replication.PrimaryStatus{}, err + } + + return replication.ParseMysqlPrimaryStatus(resultMap) +} diff --git a/go/mysql/flavor_mysql_test.go b/go/mysql/flavor_mysql_test.go index 0e1b749633a..a85c39e2807 100644 --- a/go/mysql/flavor_mysql_test.go +++ b/go/mysql/flavor_mysql_test.go @@ -20,9 +20,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/replication" ) -func TestMysql56SetReplicationSourceCommand(t *testing.T) { +func TestMysql8SetReplicationSourceCommand(t *testing.T) { params := &ConnParams{ Uname: "username", Pass: "password", @@ -30,21 +32,33 @@ func TestMysql56SetReplicationSourceCommand(t *testing.T) { host := "localhost" port := int32(123) connectRetry := 1234 - want := `CHANGE MASTER TO - MASTER_HOST = 'localhost', - MASTER_PORT = 123, - MASTER_USER = 'username', - MASTER_PASSWORD = 'password', - MASTER_CONNECT_RETRY = 1234, - MASTER_AUTO_POSITION = 1` - - conn := &Conn{flavor: mysqlFlavor57{}} - got := conn.SetReplicationSourceCommand(params, host, port, connectRetry) + want := `CHANGE REPLICATION SOURCE TO + SOURCE_HOST = 'localhost', + SOURCE_PORT = 123, + SOURCE_USER = 'username', + SOURCE_PASSWORD = 'password', + SOURCE_CONNECT_RETRY = 1234, + SOURCE_AUTO_POSITION = 1` + + conn := &Conn{flavor: mysqlFlavor8{}} + got := conn.SetReplicationSourceCommand(params, host, port, 0, connectRetry) assert.Equal(t, want, got, "mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want) + var heartbeatInterval float64 = 5.4 + want = `CHANGE REPLICATION SOURCE TO + SOURCE_HOST = 'localhost', + SOURCE_PORT = 123, + SOURCE_USER = 'username', + SOURCE_PASSWORD = 'password', + SOURCE_CONNECT_RETRY = 1234, + SOURCE_HEARTBEAT_PERIOD = 5.4, + SOURCE_AUTO_POSITION = 1` + + got = conn.SetReplicationSourceCommand(params, host, port, heartbeatInterval, connectRetry) + assert.Equal(t, want, got, "mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, heartbeatInterval, connectRetry, got, want) } -func TestMysql56SetReplicationSourceCommandSSL(t *testing.T) { +func TestMysql8SetReplicationSourceCommandSSL(t *testing.T) { params := &ConnParams{ Uname: "username", Pass: "password", @@ -57,21 +71,46 @@ func TestMysql56SetReplicationSourceCommandSSL(t *testing.T) { host := "localhost" port := int32(123) connectRetry := 1234 - want := `CHANGE MASTER TO - MASTER_HOST = 'localhost', - MASTER_PORT = 123, - MASTER_USER = 'username', - MASTER_PASSWORD = 'password', - MASTER_CONNECT_RETRY = 1234, - MASTER_SSL = 1, - MASTER_SSL_CA = 'ssl-ca', - MASTER_SSL_CAPATH = 'ssl-ca-path', - MASTER_SSL_CERT = 'ssl-cert', - MASTER_SSL_KEY = 'ssl-key', - MASTER_AUTO_POSITION = 1` - - conn := &Conn{flavor: mysqlFlavor57{}} - got := conn.SetReplicationSourceCommand(params, host, port, connectRetry) + want := `CHANGE REPLICATION SOURCE TO + SOURCE_HOST = 'localhost', + SOURCE_PORT = 123, + SOURCE_USER = 'username', + SOURCE_PASSWORD = 'password', + SOURCE_CONNECT_RETRY = 1234, + SOURCE_SSL = 1, + SOURCE_SSL_CA = 'ssl-ca', + SOURCE_SSL_CAPATH = 'ssl-ca-path', + SOURCE_SSL_CERT = 'ssl-cert', + SOURCE_SSL_KEY = 'ssl-key', + SOURCE_AUTO_POSITION = 1` + + conn := &Conn{flavor: mysqlFlavor8{}} + got := conn.SetReplicationSourceCommand(params, host, port, 0, connectRetry) assert.Equal(t, want, got, "mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want) +} + +func TestMysql8SetReplicationPositionCommands(t *testing.T) { + pos := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} + conn := &Conn{flavor: mysqlFlavor8{}} + queries := conn.SetReplicationPositionCommands(pos) + assert.Equal(t, []string{"RESET MASTER", "SET GLOBAL gtid_purged = ''"}, queries) +} + +func TestMysql82SetReplicationPositionCommands(t *testing.T) { + pos := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} + conn := &Conn{flavor: mysqlFlavor82{}} + queries := conn.SetReplicationPositionCommands(pos) + assert.Equal(t, []string{"RESET BINARY LOGS AND GTIDS", "SET GLOBAL gtid_purged = ''"}, queries) +} + +func TestMysql8ResetReplicationParametersCommands(t *testing.T) { + conn := &Conn{flavor: mysqlFlavor8{}} + queries := conn.ResetReplicationParametersCommands() + assert.Equal(t, []string{"RESET REPLICA ALL"}, queries) +} +func TestMysql82ResetReplicationParametersCommands(t *testing.T) { + conn := &Conn{flavor: mysqlFlavor82{}} + queries := conn.ResetReplicationParametersCommands() + assert.Equal(t, []string{"RESET REPLICA ALL"}, queries) } diff --git a/go/mysql/flavor_mysqlgr.go b/go/mysql/flavor_mysqlgr.go index e96a6433f73..df3dc060742 100644 --- a/go/mysql/flavor_mysqlgr.go +++ b/go/mysql/flavor_mysqlgr.go @@ -21,6 +21,7 @@ import ( "fmt" "math" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/proto/vtrpc" @@ -74,6 +75,10 @@ func (mysqlGRFlavor) stopReplicationCommand() string { return "" } +func (mysqlGRFlavor) resetReplicationCommand() string { + return "" +} + // stopIOThreadCommand is disabled in mysqlGRFlavor func (mysqlGRFlavor) stopIOThreadCommand() string { return "" @@ -233,12 +238,17 @@ func fetchStatusForGroupReplication(c *Conn, query string, onResult func([]sqlty return onResult(qr.Rows[0]) } -// primaryStatus returns the result of 'SHOW MASTER STATUS', +// primaryStatus returns the result of 'SHOW BINARY LOG STATUS', // with parsed executed position. func (mysqlGRFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) { return mysqlFlavor{}.primaryStatus(c) } +// replicationNetTimeout is part of the Flavor interface. +func (mysqlGRFlavor) replicationNetTimeout(c *Conn) (int32, error) { + return mysqlFlavor8{}.replicationNetTimeout(c) +} + func (mysqlGRFlavor) baseShowTables() string { return mysqlFlavor{}.baseShowTables() } @@ -248,31 +258,10 @@ func (mysqlGRFlavor) baseShowTablesWithSizes() string { } // supportsCapability is part of the Flavor interface. -func (mysqlGRFlavor) supportsCapability(serverVersion string, capability FlavorCapability) (bool, error) { - switch capability { - case InstantDDLFlavorCapability, - InstantExpandEnumCapability, - InstantAddLastColumnFlavorCapability, - InstantAddDropVirtualColumnFlavorCapability, - InstantChangeColumnDefaultFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 0) - case InstantAddDropColumnFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 29) - case TransactionalGtidExecutedFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 17) - case FastDropTableFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 23) - case MySQLJSONFlavorCapability: - return ServerVersionAtLeast(serverVersion, 5, 7, 0) - case MySQLUpgradeInServerFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 16) - case DynamicRedoLogCapacityFlavorCapability: - return ServerVersionAtLeast(serverVersion, 8, 0, 30) - default: - return false, nil - } +func (f mysqlGRFlavor) supportsCapability(capability capabilities.FlavorCapability) (bool, error) { + return capabilities.MySQLVersionHasCapability(f.serverVersion, capability) } func init() { - flavors[GRFlavorID] = newMysqlGRFlavor + flavorFuncs[GRFlavorID] = newMysqlGRFlavor } diff --git a/go/mysql/flavor_mysqlgr_test.go b/go/mysql/flavor_mysqlgr_test.go index df7876eca1c..348aefca934 100644 --- a/go/mysql/flavor_mysqlgr_test.go +++ b/go/mysql/flavor_mysqlgr_test.go @@ -16,13 +16,15 @@ limitations under the License. package mysql import ( + "fmt" "testing" - "gotest.tools/assert" + "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -53,3 +55,130 @@ func TestMysqlGRReplicationApplierLagParse(t *testing.T) { parseReplicationApplierLag(&res, row) assert.Equal(t, uint32(100), res.ReplicationLagSeconds) } + +func TestMysqlGRSupportCapability(t *testing.T) { + testcases := []struct { + version string + capability capabilities.FlavorCapability + isCapable bool + expectError error + }{ + { + version: "8.0.14", + capability: capabilities.InstantDDLFlavorCapability, + isCapable: true, + }, + { + version: "8.0.20", + capability: capabilities.TransactionalGtidExecutedFlavorCapability, + isCapable: true, + }, + { + version: "8.0.0", + capability: capabilities.InstantAddLastColumnFlavorCapability, + isCapable: true, + }, + { + version: "8.0.0", + capability: capabilities.InstantAddDropColumnFlavorCapability, + isCapable: false, + }, + { + version: "5.6.7", + capability: capabilities.InstantDDLFlavorCapability, + isCapable: false, + }, + { + version: "5.7.29", + capability: capabilities.TransactionalGtidExecutedFlavorCapability, + isCapable: false, + }, + { + version: "5.6.7", + capability: capabilities.MySQLJSONFlavorCapability, + isCapable: false, + }, + { + version: "5.7.29", + capability: capabilities.MySQLJSONFlavorCapability, + isCapable: true, + }, + { + version: "8.0.30", + capability: capabilities.DynamicRedoLogCapacityFlavorCapability, + isCapable: true, + }, + { + version: "8.0.29", + capability: capabilities.DynamicRedoLogCapacityFlavorCapability, + isCapable: false, + }, + { + version: "5.7.38", + capability: capabilities.DynamicRedoLogCapacityFlavorCapability, + isCapable: false, + }, + { + version: "8.0.21", + capability: capabilities.DisableRedoLogFlavorCapability, + isCapable: true, + }, + { + version: "8.0.20", + capability: capabilities.DisableRedoLogFlavorCapability, + isCapable: false, + }, + { + version: "8.0.15", + capability: capabilities.CheckConstraintsCapability, + isCapable: false, + }, + { + version: "8.0.20", + capability: capabilities.CheckConstraintsCapability, + isCapable: true, + }, + { + version: "8.0.20-log", + capability: capabilities.CheckConstraintsCapability, + isCapable: true, + }, + { + version: "5.7.38", + capability: capabilities.PerformanceSchemaDataLocksTableCapability, + isCapable: false, + }, + { + version: "8.0.20", + capability: capabilities.PerformanceSchemaDataLocksTableCapability, + isCapable: true, + }, + { + // What happens if server version is unspecified + version: "", + capability: capabilities.CheckConstraintsCapability, + isCapable: false, + expectError: capabilities.ErrUnspecifiedServerVersion, + }, + { + // Some ridiculous version. But seeing that we force the flavor to be mysqlGR, + // then this far futuristic version should actually work. + version: "5914.234.17", + capability: capabilities.CheckConstraintsCapability, + isCapable: true, + }, + } + for _, tc := range testcases { + name := fmt.Sprintf("%s %v", tc.version, tc.capability) + t.Run(name, func(t *testing.T) { + flavor := &mysqlGRFlavor{mysqlFlavor{serverVersion: tc.version}} + isCapable, err := flavor.supportsCapability(tc.capability) + if tc.expectError != nil { + assert.ErrorContains(t, err, tc.expectError.Error()) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.isCapable, isCapable) + }) + } +} diff --git a/go/mysql/flavor_test.go b/go/mysql/flavor_test.go index 891725b5afc..172ffa67eb2 100644 --- a/go/mysql/flavor_test.go +++ b/go/mysql/flavor_test.go @@ -18,153 +18,107 @@ import ( "testing" "github.com/stretchr/testify/assert" -) -func TestServerVersionAtLeast(t *testing.T) { - testcases := []struct { - version string - parts []int - expect bool - expectError bool - }{ - { - version: "8.0.14", - parts: []int{8, 0, 14}, - expect: true, - }, - { - version: "8.0.14-log", - parts: []int{8, 0, 14}, - expect: true, - }, - { - version: "8.0.14", - parts: []int{8, 0, 13}, - expect: true, - }, - { - version: "8.0.14", - parts: []int{7, 5, 20}, - expect: true, - }, - { - version: "8.0.14", - parts: []int{7, 5}, - expect: true, - }, - { - version: "8.0.14-log", - parts: []int{7, 5, 20}, - expect: true, - }, - { - version: "8.0.14", - parts: []int{8, 1, 2}, - expect: false, - }, - { - version: "8.0.14", - parts: []int{10, 1, 2}, - expect: false, - }, - { - version: "8.0", - parts: []int{8, 0, 14}, - expect: false, - }, - { - version: "8.0.x", - parts: []int{8, 0, 14}, - expectError: true, - }, - } - for _, tc := range testcases { - result, err := ServerVersionAtLeast(tc.version, tc.parts...) - if tc.expectError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tc.expect, result) - } - } -} + "vitess.io/vitess/go/mysql/capabilities" +) -func TestGetFlavor(t *testing.T) { +func TestServerVersionCapableOf(t *testing.T) { testcases := []struct { version string - capability FlavorCapability + capability capabilities.FlavorCapability isCapable bool }{ { version: "8.0.14", - capability: InstantDDLFlavorCapability, + capability: capabilities.InstantDDLFlavorCapability, isCapable: true, }, { version: "8.0.20", - capability: TransactionalGtidExecutedFlavorCapability, + capability: capabilities.TransactionalGtidExecutedFlavorCapability, isCapable: true, }, { version: "8.0.0", - capability: InstantAddLastColumnFlavorCapability, + capability: capabilities.InstantAddLastColumnFlavorCapability, isCapable: true, }, { version: "8.0.0", - capability: InstantAddDropColumnFlavorCapability, - isCapable: false, - }, - { - version: "5.6.7", - capability: InstantDDLFlavorCapability, + capability: capabilities.InstantAddDropColumnFlavorCapability, isCapable: false, }, { version: "5.7.29", - capability: TransactionalGtidExecutedFlavorCapability, - isCapable: false, - }, - { - version: "5.6.7", - capability: MySQLJSONFlavorCapability, + capability: capabilities.TransactionalGtidExecutedFlavorCapability, isCapable: false, }, { version: "5.7.29", - capability: MySQLJSONFlavorCapability, + capability: capabilities.MySQLJSONFlavorCapability, isCapable: true, }, { version: "8.0.30", - capability: DynamicRedoLogCapacityFlavorCapability, + capability: capabilities.DynamicRedoLogCapacityFlavorCapability, isCapable: true, }, { version: "8.0.29", - capability: DynamicRedoLogCapacityFlavorCapability, + capability: capabilities.DynamicRedoLogCapacityFlavorCapability, isCapable: false, }, { version: "5.7.38", - capability: DynamicRedoLogCapacityFlavorCapability, + capability: capabilities.DynamicRedoLogCapacityFlavorCapability, isCapable: false, }, { version: "8.0.21", - capability: DisableRedoLogFlavorCapability, + capability: capabilities.DisableRedoLogFlavorCapability, isCapable: true, }, { version: "8.0.20", - capability: DisableRedoLogFlavorCapability, + capability: capabilities.DisableRedoLogFlavorCapability, + isCapable: false, + }, + { + version: "8.0.15", + capability: capabilities.CheckConstraintsCapability, + isCapable: false, + }, + { + version: "8.0.20", + capability: capabilities.CheckConstraintsCapability, + isCapable: true, + }, + { + version: "8.0.20-log", + capability: capabilities.CheckConstraintsCapability, + isCapable: true, + }, + { + version: "5.7.38", + capability: capabilities.PerformanceSchemaDataLocksTableCapability, isCapable: false, }, + { + version: "8.0.20", + capability: capabilities.PerformanceSchemaDataLocksTableCapability, + isCapable: true, + }, + { + // Some ridiculous version + version: "5914.234.17", + capability: capabilities.CheckConstraintsCapability, + isCapable: true, + }, } for _, tc := range testcases { name := fmt.Sprintf("%s %v", tc.version, tc.capability) t.Run(name, func(t *testing.T) { - _, capableOf, _ := GetFlavor(tc.version, nil) + capableOf := ServerVersionCapableOf(tc.version) isCapable, err := capableOf(tc.capability) assert.NoError(t, err) assert.Equal(t, tc.isCapable, isCapable) diff --git a/go/mysql/format/float.go b/go/mysql/format/float.go index d9655281e1c..b9a147e5c14 100644 --- a/go/mysql/format/float.go +++ b/go/mysql/format/float.go @@ -25,11 +25,7 @@ const expUpperThreshold = 1000000000000000.0 const expLowerThreshold = 0.000000000000001 // FormatFloat formats a float64 as a byte string in a similar way to what MySQL does -func FormatFloat(v float64) []byte { - return AppendFloat(nil, v) -} - -func AppendFloat(buf []byte, f float64) []byte { +func FormatFloat(f float64) []byte { format := byte('f') if f >= expUpperThreshold || f <= -expUpperThreshold || (f < expLowerThreshold && f > -expLowerThreshold) { format = 'g' @@ -39,7 +35,7 @@ func AppendFloat(buf []byte, f float64) []byte { // do that, and there's no way to customize it, so we must strip the // redundant positive sign manually // e.g. 1.234E+56789 -> 1.234E56789 - fstr := strconv.AppendFloat(buf, f, format, -1, 64) + fstr := strconv.AppendFloat(nil, f, format, -1, 64) if idx := bytes.IndexByte(fstr, 'e'); idx >= 0 { if fstr[idx+1] == '+' { fstr = append(fstr[:idx+1], fstr[idx+2:]...) diff --git a/go/mysql/format/float_test.go b/go/mysql/format/float_test.go new file mode 100644 index 00000000000..218e139840b --- /dev/null +++ b/go/mysql/format/float_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package format + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFormatFloat(t *testing.T) { + testCases := []struct { + input float64 + want []byte + }{ + {123.456, []byte("123.456")}, + {-1.13456e15, []byte("-1.13456e15")}, + {2e15, []byte("2e15")}, + {2e-15, []byte("0.000000000000002")}, + {-1e-16, []byte("-1e-16")}, + {0.0, []byte("0")}, + } + + for _, tCase := range testCases { + got := FormatFloat(tCase.input) + assert.Equal(t, tCase.want, got) + } +} diff --git a/go/mysql/handshake_test.go b/go/mysql/handshake_test.go index c2b27d6f6d4..284189c30e8 100644 --- a/go/mysql/handshake_test.go +++ b/go/mysql/handshake_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/tlstest" @@ -45,7 +46,7 @@ func TestClearTextClientAuth(t *testing.T) { defer authServer.close() // Create the listener. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -77,6 +78,7 @@ func TestClearTextClientAuth(t *testing.T) { defer conn.Close() + assert.Equal(t, collations.ID(collations.CollationUtf8mb4ID), conn.CharacterSet) // Run a 'select rows' command with results. result, err := conn.ExecuteFetch("select rows", 10000, true) require.NoError(t, err, "ExecuteFetch failed: %v", err) @@ -99,7 +101,7 @@ func TestSSLConnection(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -176,6 +178,7 @@ func testSSLConnectionBasics(t *testing.T, params *ConnParams) { defer conn.Close() assert.Equal(t, "user1", conn.User, "Invalid conn.User, got %v was expecting user1", conn.User) + assert.Equal(t, collations.ID(collations.CollationUtf8mb4ID), conn.CharacterSet) // Run a 'select rows' command with results. result, err := conn.ExecuteFetch("select rows", 10000, true) diff --git a/go/mysql/hex/hex_test.go b/go/mysql/hex/hex_test.go new file mode 100644 index 00000000000..afd94e306e3 --- /dev/null +++ b/go/mysql/hex/hex_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hex + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEncodeBytes(t *testing.T) { + testCases := []struct { + input []byte + want []byte + }{ + {[]byte{0xAB, 0xCD, 0xEF}, []byte("ABCDEF")}, + {[]byte{0x01, 0x23, 0x45}, []byte("012345")}, + } + + for _, tCase := range testCases { + got := EncodeBytes(tCase.input) + assert.Equal(t, tCase.want, got) + } +} + +func TestEncodeUint(t *testing.T) { + testCases := []struct { + input uint64 + want []byte + }{ + {0, []byte("0")}, + {123, []byte("7B")}, + {255, []byte("FF")}, + {4096, []byte("1000")}, + } + + for _, tCase := range testCases { + got := EncodeUint(tCase.input) + assert.Equal(t, tCase.want, got) + } +} + +func TestDecodeUint(t *testing.T) { + testCases := []struct { + input uint64 + want []byte + }{ + {0, []byte{0}}, + {123, []byte{0x01, 0x23}}, + {255, []byte{0x02, 0x55}}, + {4096, []byte{0x40, 0x96}}, + } + + for _, tCase := range testCases { + got := DecodeUint(tCase.input) + assert.Equal(t, tCase.want, got) + } +} + +func TestDecodedLen(t *testing.T) { + testCases := []struct { + input []byte + want int + }{ + {[]byte{0}, 1}, + {[]byte{0x01, 0x23}, 1}, + {[]byte("ABCDE"), 3}, + {[]byte("0123456789ABCDEF"), 8}, + } + + for _, tCase := range testCases { + got := DecodedLen(tCase.input) + assert.Equal(t, tCase.want, got) + } +} + +func TestDecodeBytes(t *testing.T) { + err := DecodeBytes([]byte("testDst"), []byte("1")) + assert.NoError(t, err) + + err = DecodeBytes([]byte("testDst"), []byte("12")) + assert.NoError(t, err) + + // DecodeBytes should return an error for "é" as + // hex.decode returns an error for non-ASCII characters + err = DecodeBytes([]byte("testDst"), []byte("é")) + assert.Error(t, err) +} diff --git a/go/mysql/icuregex/compiler.go b/go/mysql/icuregex/compiler.go index 971cd439fb3..6aa92e268bb 100644 --- a/go/mysql/icuregex/compiler.go +++ b/go/mysql/icuregex/compiler.go @@ -328,7 +328,7 @@ func (c *compiler) compile(pat []rune) error { // Main loop for the regex pattern parsing state machine. // Runs once per state transition. // Each time through optionally performs, depending on the state table, - // - an advance to the the next pattern char + // - an advance to the next pattern char // - an action to be performed. // - pushing or popping a state to/from the local state return stack. // file regexcst.txt is the source for the state table. The logic behind @@ -2698,7 +2698,7 @@ func (c *compiler) compileInterval(init opcode, loop opcode) { // Goes at end of the block being looped over, so just append to the code so far. c.appendOp(loop, topOfBlock) - if (c.intervalLow&0xff000000) != 0 || (c.intervalUpper > 0 && (c.intervalUpper&0xff000000) != 0) { + if c.intervalLow > 0x00ffffff || (c.intervalUpper > 0 && c.intervalUpper > 0x00ffffff) { c.error(NumberTooBig) } @@ -3195,7 +3195,7 @@ func (c *compiler) maxMatchLength(start, end int) int32 { } blockLen := c.maxMatchLength(loc+4, loopEndLoc-1) // Recursive call. - updatedLen := int(currentLen) + int(blockLen)*maxLoopCount + updatedLen := int64(currentLen) + int64(blockLen)*int64(maxLoopCount) if updatedLen >= math.MaxInt32 { currentLen = math.MaxInt32 break diff --git a/go/mysql/icuregex/internal/uset/unicode_set.go b/go/mysql/icuregex/internal/uset/unicode_set.go index e2f7bd8cbca..d85bab47532 100644 --- a/go/mysql/icuregex/internal/uset/unicode_set.go +++ b/go/mysql/icuregex/internal/uset/unicode_set.go @@ -58,10 +58,6 @@ func New() *UnicodeSet { return &UnicodeSet{list: buf} } -func FromRunes(list []rune) *UnicodeSet { - return &UnicodeSet{list: list} -} - func (u *UnicodeSet) ensureBufferCapacity(c int) { if cap(u.buffer) < c { u.buffer = make([]rune, c) diff --git a/go/mysql/json/helpers.go b/go/mysql/json/helpers.go index 1df38b2d769..760d59c5624 100644 --- a/go/mysql/json/helpers.go +++ b/go/mysql/json/helpers.go @@ -106,6 +106,10 @@ func NewFromSQL(v sqltypes.Value) (*Value, error) { return NewDate(v.RawStr()), nil case v.IsTime(): return NewTime(v.RawStr()), nil + case v.IsEnum(): + return NewString(v.RawStr()), nil + case v.IsSet(): + return NewString(v.RawStr()), nil default: return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "cannot coerce %v as a JSON type", v) } diff --git a/go/mysql/json/marshal.go b/go/mysql/json/marshal.go index 8e63cddb171..d1a0072ccbb 100644 --- a/go/mysql/json/marshal.go +++ b/go/mysql/json/marshal.go @@ -169,13 +169,12 @@ func MarshalSQLValue(buf []byte) (*sqltypes.Value, error) { if len(buf) == 0 { buf = sqltypes.NullBytes } + jsonVal, err := parser.ParseBytes(buf) if err != nil { return nil, err } + newVal := sqltypes.MakeTrusted(querypb.Type_JSON, jsonVal.MarshalSQLTo(nil)) - if err != nil { - return nil, err - } return &newVal, nil } diff --git a/go/mysql/json/parser.go b/go/mysql/json/parser.go index 35278263877..b7a87c25756 100644 --- a/go/mysql/json/parser.go +++ b/go/mysql/json/parser.go @@ -669,6 +669,14 @@ type Value struct { n NumberType } +func (v *Value) Size() int32 { + return 0 +} + +func (v *Value) Scale() int32 { + return 0 +} + func (v *Value) MarshalDate() string { if d, ok := v.Date(); ok { return d.ToStdTime(time.Local).Format("2006-01-02") @@ -941,8 +949,8 @@ func (v *Value) Time() (datetime.Time, bool) { if v.t != TypeTime { return datetime.Time{}, false } - t, _, ok := datetime.ParseTime(v.s, datetime.DefaultPrecision) - return t, ok + t, _, state := datetime.ParseTime(v.s, datetime.DefaultPrecision) + return t, state == datetime.TimeOK } // Object returns the underlying JSON object for the v. diff --git a/go/mysql/ldapauthserver/auth_server_ldap.go b/go/mysql/ldapauthserver/auth_server_ldap.go index d5fcea027ac..5e6010fac0e 100644 --- a/go/mysql/ldapauthserver/auth_server_ldap.go +++ b/go/mysql/ldapauthserver/auth_server_ldap.go @@ -24,32 +24,16 @@ import ( "sync" "time" - "github.com/spf13/pflag" - ldap "gopkg.in/ldap.v2" + "gopkg.in/ldap.v2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vttls" querypb "vitess.io/vitess/go/vt/proto/query" ) -var ( - ldapAuthConfigFile string - ldapAuthConfigString string - ldapAuthMethod string -) - -func init() { - servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { - fs.StringVar(&ldapAuthConfigFile, "mysql_ldap_auth_config_file", "", "JSON File from which to read LDAP server config.") - fs.StringVar(&ldapAuthConfigString, "mysql_ldap_auth_config_string", "", "JSON representation of LDAP server config.") - fs.StringVar(&ldapAuthMethod, "mysql_ldap_auth_method", string(mysql.MysqlClearPassword), "client-side authentication method to use. Supported values: mysql_clear_password, dialog.") - }) -} - // AuthServerLdap implements AuthServer with an LDAP backend type AuthServerLdap struct { Client @@ -63,7 +47,7 @@ type AuthServerLdap struct { } // Init is public so it can be called from plugin_auth_ldap.go (go/cmd/vtgate) -func Init() { +func Init(ldapAuthConfigFile, ldapAuthConfigString, ldapAuthMethod string) { if ldapAuthConfigFile == "" && ldapAuthConfigString == "" { log.Infof("Not configuring AuthServerLdap because mysql_ldap_auth_config_file and mysql_ldap_auth_config_string are empty") return diff --git a/go/mysql/mysql_fuzzer.go b/go/mysql/mysql_fuzzer.go index 2a3e797a797..7370ad8a479 100644 --- a/go/mysql/mysql_fuzzer.go +++ b/go/mysql/mysql_fuzzer.go @@ -31,6 +31,7 @@ import ( gofuzzheaders "github.com/AdaLogics/go-fuzz-headers" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/tlstest" @@ -76,8 +77,8 @@ func createFuzzingSocketPair() (net.Listener, *Conn, *Conn) { } // Create a Conn on both sides. - cConn := newConn(clientConn) - sConn := newConn(serverConn) + cConn := newConn(clientConn, DefaultFlushDelay) + sConn := newConn(serverConn, DefaultFlushDelay) return listener, sConn, cConn } @@ -196,7 +197,7 @@ func FuzzHandleNextCommand(data []byte) int { writeToPass: []bool{false}, pos: -1, queryPacket: data, - }) + }, DefaultFlushDelay) sConn.PrepareData = map[uint32]*PrepareData{} handler := &fuzztestRun{} @@ -327,7 +328,7 @@ func FuzzTLSServer(data []byte) int { Password: "password1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, 0, DefaultFlushDelay, fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion), 512, 0) if err != nil { return -1 } diff --git a/go/mysql/query.go b/go/mysql/query.go index 7cfeafd258f..22299e5cc80 100644 --- a/go/mysql/query.go +++ b/go/mysql/query.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "errors" "fmt" "math" "strconv" @@ -26,6 +27,7 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" @@ -33,6 +35,17 @@ import ( // This file contains the methods related to queries. +var ( + ErrExecuteFetchMultipleResults = vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected multiple results. Use ExecuteFetchMulti instead.") +) + +const ( + // Use as `maxrows` in `ExecuteFetch` and related functions, to indicate no rows should be fetched. + // This is different than specifying `0`, because `0` means "expect zero results", while this means + // "do not attempt to read any results into memory". + FETCH_NO_ROWS = math.MinInt +) + // // Client side methods. // @@ -147,7 +160,7 @@ func (c *Conn) readColumnDefinition(field *querypb.Field, index int) error { } // Convert MySQL type to Vitess type. - field.Type, err = sqltypes.MySQLToType(int64(t), int64(flags)) + field.Type, err = sqltypes.MySQLToType(t, int64(flags)) if err != nil { return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } @@ -243,7 +256,7 @@ func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { } // Convert MySQL type to Vitess type. - field.Type, err = sqltypes.MySQLToType(int64(t), int64(flags)) + field.Type, err = sqltypes.MySQLToType(t, int64(flags)) if err != nil { return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed for column %v: %v", t, flags, index, err) } @@ -302,10 +315,35 @@ func (c *Conn) parseRow(data []byte, fields []*querypb.Field, reader func([]byte // 2. if the server closes the connection when a command is in flight, // readComQueryResponse will fail, and we'll return CRServerLost(2013). func (c *Conn) ExecuteFetch(query string, maxrows int, wantfields bool) (result *sqltypes.Result, err error) { - result, _, err = c.ExecuteFetchMulti(query, maxrows, wantfields) + result, more, err := c.ExecuteFetchMulti(query, maxrows, wantfields) + if more { + // Multiple results are unexpected. Prioritize this "unexpected" error over whatever error we got from the first result. + err = errors.Join(ErrExecuteFetchMultipleResults, err) + } + // draining to make the connection clean. + err = c.drainMoreResults(more, err) return result, err } +// ExecuteFetchMultiDrain is for executing multiple statements in one call, but without +// caring for any results. The function returns an error if any of the statements fail. +// The function drains the query results of all statements, even if there's an error. +func (c *Conn) ExecuteFetchMultiDrain(query string) (err error) { + _, more, err := c.ExecuteFetchMulti(query, FETCH_NO_ROWS, false) + return c.drainMoreResults(more, err) +} + +// drainMoreResults ensures to drain all query results, even if there's an error. +// We collect all errors until we consume all results. +func (c *Conn) drainMoreResults(more bool, err error) error { + for more { + var moreErr error + _, more, _, moreErr = c.ReadQueryResult(FETCH_NO_ROWS, false) + err = errors.Join(err, moreErr) + } + return err +} + // ExecuteFetchMulti is for fetching multiple results from a multi-statement result. // It returns an additional 'more' flag. If it is set, you must fetch the additional // results using ReadQueryResult. @@ -313,7 +351,7 @@ func (c *Conn) ExecuteFetchMulti(query string, maxrows int, wantfields bool) (re defer func() { if err != nil { if sqlerr, ok := err.(*sqlerror.SQLError); ok { - sqlerr.Query = query + sqlerr.Query = sqlparser.TruncateQuery(query, c.truncateErrLen) } } }() @@ -337,7 +375,7 @@ func (c *Conn) ExecuteFetchWithWarningCount(query string, maxrows int, wantfield defer func() { if err != nil { if sqlerr, ok := err.(*sqlerror.SQLError); ok { - sqlerr.Query = query + sqlerr.Query = sqlparser.TruncateQuery(query, c.truncateErrLen) } } }() @@ -353,8 +391,9 @@ func (c *Conn) ExecuteFetchWithWarningCount(query string, maxrows int, wantfield // ReadQueryResult gets the result from the last written query. func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, bool, uint16, error) { + var packetOk PacketOK // Get the result. - colNumber, packetOk, err := c.readComQueryResponse() + colNumber, err := c.readComQueryResponse(&packetOk) if err != nil { return nil, false, 0, err } @@ -440,15 +479,15 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, more = (statusFlags & ServerMoreResultsExists) != 0 result.StatusFlags = statusFlags } else { - packetOk, err := c.parseOKPacket(data) - if err != nil { + var packetEof PacketOK + if err := c.parseOKPacket(&packetEof, data); err != nil { return nil, false, 0, err } - warnings = packetOk.warnings - more = (packetOk.statusFlags & ServerMoreResultsExists) != 0 - result.SessionStateChanges = packetOk.sessionStateData - result.StatusFlags = packetOk.statusFlags - result.Info = packetOk.info + warnings = packetEof.warnings + more = (packetEof.statusFlags & ServerMoreResultsExists) != 0 + result.SessionStateChanges = packetEof.sessionStateData + result.StatusFlags = packetEof.statusFlags + result.Info = packetEof.info } return result, more, warnings, nil @@ -458,6 +497,11 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, return nil, false, 0, ParseErrorPacket(data) } + if maxrows == FETCH_NO_ROWS { + c.recycleReadPacket() + continue + } + // Check we're not over the limit before we add more. if len(result.Rows) == maxrows { c.recycleReadPacket() @@ -496,35 +540,34 @@ func (c *Conn) drainResults() error { } } -func (c *Conn) readComQueryResponse() (int, *PacketOK, error) { +func (c *Conn) readComQueryResponse(packetOk *PacketOK) (int, error) { data, err := c.readEphemeralPacket() if err != nil { - return 0, nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) + return 0, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err) } defer c.recycleReadPacket() if len(data) == 0 { - return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") + return 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet") } switch data[0] { case OKPacket: - packetOk, err := c.parseOKPacket(data) - return 0, packetOk, err + return 0, c.parseOKPacket(packetOk, data) case ErrPacket: // Error - return 0, nil, ParseErrorPacket(data) + return 0, ParseErrorPacket(data) case 0xfb: // Local infile - return 0, nil, vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "not implemented") + return 0, vterrors.Errorf(vtrpc.Code_UNIMPLEMENTED, "not implemented") } n, pos, ok := readLenEncInt(data, 0) if !ok { - return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number") + return 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number") } if pos != len(data) { - return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extra data in COM_QUERY response") + return 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extra data in COM_QUERY response") } - return int(n), &PacketOK{}, nil + return int(n), nil } // @@ -596,7 +639,7 @@ func (c *Conn) parseComStmtExecute(prepareData map[uint32]*PrepareData, data []b } // convert MySQL type to internal type. - valType, err := sqltypes.MySQLToType(int64(mysqlType), int64(flags)) + valType, err := sqltypes.MySQLToType(mysqlType, int64(flags)) if err != nil { return stmtID, 0, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "MySQLToType(%v,%v) failed: %v", mysqlType, flags, err) } @@ -930,7 +973,7 @@ func (c *Conn) writeColumnDefinition(field *querypb.Field) error { pos = writeByte(data, pos, 0x0c) pos = writeUint16(data, pos, uint16(field.Charset)) pos = writeUint32(data, pos, field.ColumnLength) - pos = writeByte(data, pos, byte(typ)) + pos = writeByte(data, pos, typ) pos = writeUint16(data, pos, uint16(flags)) pos = writeByte(data, pos, byte(field.Decimals)) pos = writeUint16(data, pos, uint16(0x0000)) diff --git a/go/mysql/query_benchmark_test.go b/go/mysql/query_benchmark_test.go index a9c44dbf50c..357bc8997ed 100644 --- a/go/mysql/query_benchmark_test.go +++ b/go/mysql/query_benchmark_test.go @@ -18,7 +18,7 @@ package mysql import ( "context" - "math/rand" + "math/rand/v2" "net" "strings" "testing" @@ -98,7 +98,7 @@ func benchmarkQuery(b *testing.B, threads int, query string, mkCfg mkListenerCfg execQuery := query if execQuery == "" { // generate random query - n := rand.Intn(maxPacketSize-len(benchmarkQueryPrefix)) + 1 + n := rand.IntN(maxPacketSize-len(benchmarkQueryPrefix)) + 1 execQuery = benchmarkQueryPrefix + strings.Repeat("x", n) } diff --git a/go/mysql/query_test.go b/go/mysql/query_test.go index 07012f83b9f..0e1f48c1804 100644 --- a/go/mysql/query_test.go +++ b/go/mysql/query_test.go @@ -413,7 +413,7 @@ func TestQueries(t *testing.T) { { Name: "name", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }, }, Rows: [][]sqltypes.Value{ @@ -451,15 +451,15 @@ func TestQueries(t *testing.T) { {Name: "Type_DATETIME ", Type: querypb.Type_DATETIME, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, {Name: "Type_YEAR ", Type: querypb.Type_YEAR, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_UNSIGNED_FLAG | querypb.MySqlFlag_NUM_FLAG)}, {Name: "Type_DECIMAL ", Type: querypb.Type_DECIMAL, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "Type_TEXT ", Type: querypb.Type_TEXT, Charset: uint32(collations.Default())}, + {Name: "Type_TEXT ", Type: querypb.Type_TEXT, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, {Name: "Type_BLOB ", Type: querypb.Type_BLOB, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, - {Name: "Type_VARCHAR ", Type: querypb.Type_VARCHAR, Charset: uint32(collations.Default())}, + {Name: "Type_VARCHAR ", Type: querypb.Type_VARCHAR, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, {Name: "Type_VARBINARY", Type: querypb.Type_VARBINARY, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, - {Name: "Type_CHAR ", Type: querypb.Type_CHAR, Charset: uint32(collations.Default())}, + {Name: "Type_CHAR ", Type: querypb.Type_CHAR, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, {Name: "Type_BINARY ", Type: querypb.Type_BINARY, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, {Name: "Type_BIT ", Type: querypb.Type_BIT, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, - {Name: "Type_ENUM ", Type: querypb.Type_ENUM, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_ENUM_FLAG)}, - {Name: "Type_SET ", Type: querypb.Type_SET, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_SET_FLAG)}, + {Name: "Type_ENUM ", Type: querypb.Type_ENUM, Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), Flags: uint32(querypb.MySqlFlag_ENUM_FLAG)}, + {Name: "Type_SET ", Type: querypb.Type_SET, Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), Flags: uint32(querypb.MySqlFlag_SET_FLAG)}, // Skip TUPLE, not possible in Result. {Name: "Type_GEOMETRY ", Type: querypb.Type_GEOMETRY, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG | querypb.MySqlFlag_BLOB_FLAG)}, {Name: "Type_JSON ", Type: querypb.Type_JSON, Charset: collations.CollationUtf8mb4ID}, @@ -537,7 +537,7 @@ func TestQueries(t *testing.T) { { Name: "name", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }, }, Rows: [][]sqltypes.Value{ diff --git a/go/mysql/replication.go b/go/mysql/replication.go index 399698d6a2a..08baaa169c8 100644 --- a/go/mysql/replication.go +++ b/go/mysql/replication.go @@ -17,6 +17,8 @@ limitations under the License. package mysql import ( + "fmt" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -138,12 +140,66 @@ func (c *Conn) WriteBinlogEvent(ev BinlogEvent, semiSyncEnabled bool) error { return nil } +type SemiSyncType int8 + +const ( + SemiSyncTypeUnknown SemiSyncType = iota + SemiSyncTypeOff + SemiSyncTypeSource + SemiSyncTypeMaster +) + // SemiSyncExtensionLoaded checks if the semisync extension has been loaded. // It should work for both MariaDB and MySQL. -func (c *Conn) SemiSyncExtensionLoaded() bool { - qr, err := c.ExecuteFetch("SHOW GLOBAL VARIABLES LIKE 'rpl_semi_sync%'", 10, false) +func (c *Conn) SemiSyncExtensionLoaded() (SemiSyncType, error) { + qr, err := c.ExecuteFetch("SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'", 10, false) + if err != nil { + return SemiSyncTypeUnknown, err + } + for _, row := range qr.Rows { + if row[0].ToString() == "rpl_semi_sync_source_enabled" { + return SemiSyncTypeSource, nil + } + if row[0].ToString() == "rpl_semi_sync_master_enabled" { + return SemiSyncTypeMaster, nil + } + } + return SemiSyncTypeOff, nil +} + +func (c *Conn) BinlogInformation() (string, bool, bool, string, error) { + replicaField := c.flavor.binlogReplicatedUpdates() + + query := fmt.Sprintf("select @@global.binlog_format, @@global.log_bin, %s, @@global.binlog_row_image", replicaField) + qr, err := c.ExecuteFetch(query, 1, true) if err != nil { - return false + return "", false, false, "", err } - return len(qr.Rows) >= 1 + if len(qr.Rows) != 1 { + return "", false, false, "", fmt.Errorf("unable to read global variables binlog_format, log_bin, %s, binlog_row_image", replicaField) + } + res := qr.Named().Row() + binlogFormat, err := res.ToString("@@global.binlog_format") + if err != nil { + return "", false, false, "", err + } + logBin, err := res.ToInt64("@@global.log_bin") + if err != nil { + return "", false, false, "", err + } + logReplicaUpdates, err := res.ToInt64(replicaField) + if err != nil { + return "", false, false, "", err + } + binlogRowImage, err := res.ToString("@@global.binlog_row_image") + if err != nil { + return "", false, false, "", err + } + return binlogFormat, logBin == 1, logReplicaUpdates == 1, binlogRowImage, nil +} + +// ResetBinaryLogsCommand returns the command used to reset the +// binary logs on the server. +func (c *Conn) ResetBinaryLogsCommand() string { + return c.flavor.resetBinaryLogsCommand() } diff --git a/go/mysql/replication/mysql56_gtid.go b/go/mysql/replication/mysql56_gtid.go index 4ec861b84e5..dd23fb2092b 100644 --- a/go/mysql/replication/mysql56_gtid.go +++ b/go/mysql/replication/mysql56_gtid.go @@ -29,6 +29,10 @@ import ( // Mysql56FlavorID is the string identifier for the Mysql56 flavor. const Mysql56FlavorID = "MySQL56" +var ( + ErrExpectMysql56Flavor = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "expected MySQL GTID position but found a different or invalid format.") +) + // parseMysql56GTID is registered as a GTID parser. func parseMysql56GTID(s string) (GTID, error) { // Split into parts. @@ -128,3 +132,32 @@ func (gtid Mysql56GTID) GTIDSet() GTIDSet { func init() { gtidParsers[Mysql56FlavorID] = parseMysql56GTID } + +// DecodePositionMySQL56 converts a string into a Position value with the MySQL56 flavor. The function returns an error if the given +// string does not translate to a MySQL56 GTID set. +// The prefix "MySQL56/" is optional in the input string. Examples of inputs strings that produce valid result: +// - "MySQL56/16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615" +// - "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-615" +func DecodePositionMySQL56(s string) (rp Position, gtidSet Mysql56GTIDSet, err error) { + if s == "" { + return rp, nil, nil + } + + flav, gtid, ok := strings.Cut(s, "/") + if !ok { + gtid = s + flav = Mysql56FlavorID + } + rp, err = ParsePosition(flav, gtid) + if err != nil { + return rp, nil, err + } + if !rp.MatchesFlavor(Mysql56FlavorID) { + return rp, nil, vterrors.Wrapf(ErrExpectMysql56Flavor, s) + } + gtidSet, ok = rp.GTIDSet.(Mysql56GTIDSet) + if !ok { + return rp, nil, vterrors.Wrapf(ErrExpectMysql56Flavor, s) + } + return rp, gtidSet, nil +} diff --git a/go/mysql/replication/mysql56_gtid_test.go b/go/mysql/replication/mysql56_gtid_test.go index 7a4bc9862a8..a8bffed72b9 100644 --- a/go/mysql/replication/mysql56_gtid_test.go +++ b/go/mysql/replication/mysql56_gtid_test.go @@ -153,3 +153,50 @@ func TestMysql56ParseGTID(t *testing.T) { require.NoError(t, err, "unexpected error: %v", err) assert.Equal(t, want, got, "(&mysql56{}).ParseGTID(%#v) = %#v, want %#v", input, got, want) } + +func TestDecodePositionMySQL56(t *testing.T) { + { + pos, gtidSet, err := DecodePositionMySQL56("") + assert.NoError(t, err) + assert.True(t, pos.IsZero()) + assert.Nil(t, gtidSet) + } + { + pos, gtidSet, err := DecodePositionMySQL56("MySQL56/00010203-0405-0607-0809-0A0B0C0D0E0F:1-615") + assert.NoError(t, err) + assert.False(t, pos.IsZero()) + assert.NotNil(t, gtidSet) + expectGTID := Mysql56GTIDSet{ + SID{ + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, + }: []interval{{start: 1, end: 615}}} + assert.Equal(t, expectGTID, gtidSet) + } + { + pos, gtidSet, err := DecodePositionMySQL56("00010203-0405-0607-0809-0A0B0C0D0E0F:1-615") + assert.NoError(t, err) + assert.False(t, pos.IsZero()) + assert.NotNil(t, gtidSet) + expectGTID := Mysql56GTIDSet{ + SID{ + 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, + }: []interval{{start: 1, end: 615}}} + assert.Equal(t, expectGTID, gtidSet) + } + { + _, _, err := DecodePositionMySQL56("q-22b6-11ed-b765-0a43f95f28a3:1-615") + assert.Error(t, err) + } + { + _, _, err := DecodePositionMySQL56("16b1039f-22b6-11ed-b765-0a43f95f28a3") + assert.Error(t, err) + } + { + _, _, err := DecodePositionMySQL56("FilePos/mysql-bin.000001:234") + assert.Error(t, err) + } + { + _, _, err := DecodePositionMySQL56("mysql-bin.000001:234") + assert.Error(t, err) + } +} diff --git a/go/mysql/replication/primary_status.go b/go/mysql/replication/primary_status.go index 679b152f9d4..511777a5a4a 100644 --- a/go/mysql/replication/primary_status.go +++ b/go/mysql/replication/primary_status.go @@ -24,7 +24,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -// PrimaryStatus holds replication information from SHOW MASTER STATUS. +// PrimaryStatus holds replication information from SHOW BINARY LOG STATUS. type PrimaryStatus struct { // Position represents the server's GTID based position. Position Position @@ -52,7 +52,7 @@ func ParseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) return status, nil } -// ParsePrimaryStatus parses the common fields of SHOW MASTER STATUS. +// ParsePrimaryStatus parses the common fields of SHOW BINARY LOG STATUS. func ParsePrimaryStatus(fields map[string]string) PrimaryStatus { status := PrimaryStatus{} diff --git a/go/mysql/replication/replication_position.go b/go/mysql/replication/replication_position.go index 240321f2c6f..a1a9fc2c9c1 100644 --- a/go/mysql/replication/replication_position.go +++ b/go/mysql/replication/replication_position.go @@ -214,23 +214,3 @@ func (rp *Position) MatchesFlavor(flavor string) bool { } return false } - -// Comparable returns whether the receiver is comparable to the supplied position, based on whether one -// of the two positions contains the other. -func (rp *Position) Comparable(other Position) bool { - return rp.GTIDSet.Contains(other.GTIDSet) || other.GTIDSet.Contains(rp.GTIDSet) -} - -// AllPositionsComparable returns true if all positions in the supplied list are comparable with one another, and false -// if any are non-comparable. -func AllPositionsComparable(positions []Position) bool { - for i := 0; i < len(positions); i++ { - for j := i + 1; j < len(positions); j++ { - if !positions[i].Comparable(positions[j]) { - return false - } - } - } - - return true -} diff --git a/go/mysql/replication/replication_status.go b/go/mysql/replication/replication_status.go index 6b3d1bf2214..9b7d674f2a9 100644 --- a/go/mysql/replication/replication_status.go +++ b/go/mysql/replication/replication_status.go @@ -25,7 +25,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -// ReplicationStatus holds replication information from SHOW SLAVE STATUS. +// ReplicationStatus holds replication information from SHOW REPLICA STATUS. type ReplicationStatus struct { // Position is the current position of the replica. For GTID replication implementations // it is the executed GTID set. For file replication implementation, it is same as @@ -222,9 +222,14 @@ func (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationS return diffSet, nil } -func ParseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := ParseReplicationStatus(resultMap) - uuidString := resultMap["Master_UUID"] +func ParseMysqlReplicationStatus(resultMap map[string]string, replicaTerminology bool) (ReplicationStatus, error) { + status := ParseReplicationStatus(resultMap, replicaTerminology) + + uuidField := "Source_UUID" + if !replicaTerminology { + uuidField = "Master_UUID" + } + uuidString := resultMap[uuidField] if uuidString != "" { sid, err := ParseSID(uuidString) if err != nil { @@ -251,7 +256,7 @@ func ParseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus } func ParseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := ParseReplicationStatus(resultMap) + status := ParseReplicationStatus(resultMap, false) var err error status.Position.GTIDSet, err = ParseMariadbGTIDSet(resultMap["Gtid_Slave_Pos"]) @@ -263,7 +268,7 @@ func ParseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStat } func ParseFilePosReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) { - status := ParseReplicationStatus(resultMap) + status := ParseReplicationStatus(resultMap, false) status.Position = status.FilePosition status.RelayLogPosition = status.RelayLogSourceBinlogEquivalentPosition @@ -280,27 +285,53 @@ func ParseFilePosPrimaryStatus(resultMap map[string]string) (PrimaryStatus, erro } // ParseReplicationStatus parses the common (non-flavor-specific) fields of ReplicationStatus -func ParseReplicationStatus(fields map[string]string) ReplicationStatus { +func ParseReplicationStatus(fields map[string]string, replica bool) ReplicationStatus { // The field names in the map are identical to what we receive from the database // Hence the names still contain Master + sourceHostField := "Source_Host" + sourceUserField := "Source_User" + sslAllowedField := "Source_SSL_Allowed" + replicaIOField := "Replica_IO_Running" + replicaSQLField := "Replica_SQL_Running" + sourcePortField := "Source_Port" + sourceSecondsBehindField := "Seconds_Behind_Source" + sourceServerIDField := "Source_Server_Id" + execSourceLogPosField := "Exec_Source_Log_Pos" + relaySourceLogFileField := "Relay_Source_Log_File" + readSourceLogPosField := "Read_Source_Log_Pos" + sourceLogFileField := "Source_Log_File" + if !replica { + sourceHostField = "Master_Host" + sourceUserField = "Master_User" + sslAllowedField = "Master_SSL_Allowed" + replicaIOField = "Slave_IO_Running" + replicaSQLField = "Slave_SQL_Running" + sourcePortField = "Master_Port" + sourceSecondsBehindField = "Seconds_Behind_Master" + sourceServerIDField = "Master_Server_Id" + execSourceLogPosField = "Exec_Master_Log_Pos" + relaySourceLogFileField = "Relay_Master_Log_File" + readSourceLogPosField = "Read_Master_Log_Pos" + sourceLogFileField = "Master_Log_File" + } + status := ReplicationStatus{ - SourceHost: fields["Master_Host"], - SourceUser: fields["Master_User"], - SSLAllowed: fields["Master_SSL_Allowed"] == "Yes", + SourceHost: fields[sourceHostField], + SourceUser: fields[sourceUserField], + SSLAllowed: fields[sslAllowedField] == "Yes", AutoPosition: fields["Auto_Position"] == "1", UsingGTID: fields["Using_Gtid"] != "No" && fields["Using_Gtid"] != "", HasReplicationFilters: (fields["Replicate_Do_DB"] != "") || (fields["Replicate_Ignore_DB"] != "") || (fields["Replicate_Do_Table"] != "") || (fields["Replicate_Ignore_Table"] != "") || (fields["Replicate_Wild_Do_Table"] != "") || (fields["Replicate_Wild_Ignore_Table"] != ""), - // These fields are returned from the underlying DB and cannot be renamed - IOState: ReplicationStatusToState(fields["Slave_IO_Running"]), - LastIOError: fields["Last_IO_Error"], - SQLState: ReplicationStatusToState(fields["Slave_SQL_Running"]), - LastSQLError: fields["Last_SQL_Error"], + IOState: ReplicationStatusToState(fields[replicaIOField]), + LastIOError: fields["Last_IO_Error"], + SQLState: ReplicationStatusToState(fields[replicaSQLField]), + LastSQLError: fields["Last_SQL_Error"], } - parseInt, _ := strconv.ParseInt(fields["Master_Port"], 10, 32) + parseInt, _ := strconv.ParseInt(fields[sourcePortField], 10, 32) status.SourcePort = int32(parseInt) parseInt, _ = strconv.ParseInt(fields["Connect_Retry"], 10, 32) status.ConnectRetry = int32(parseInt) - parseUint, err := strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 32) + parseUint, err := strconv.ParseUint(fields[sourceSecondsBehindField], 10, 32) if err != nil { // we could not parse the value into a valid uint32 -- most commonly because the value is NULL from the // database -- so let's reflect that the underlying value was unknown on our last check @@ -309,13 +340,13 @@ func ParseReplicationStatus(fields map[string]string) ReplicationStatus { status.ReplicationLagUnknown = false status.ReplicationLagSeconds = uint32(parseUint) } - parseUint, _ = strconv.ParseUint(fields["Master_Server_Id"], 10, 32) + parseUint, _ = strconv.ParseUint(fields[sourceServerIDField], 10, 32) status.SourceServerID = uint32(parseUint) parseUint, _ = strconv.ParseUint(fields["SQL_Delay"], 10, 32) status.SQLDelay = uint32(parseUint) - executedPosStr := fields["Exec_Master_Log_Pos"] - file := fields["Relay_Master_Log_File"] + executedPosStr := fields[execSourceLogPosField] + file := fields[relaySourceLogFileField] if file != "" && executedPosStr != "" { status.FilePosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, executedPosStr)) if err != nil { @@ -323,8 +354,8 @@ func ParseReplicationStatus(fields map[string]string) ReplicationStatus { } } - readPosStr := fields["Read_Master_Log_Pos"] - file = fields["Master_Log_File"] + readPosStr := fields[readSourceLogPosField] + file = fields[sourceLogFileField] if file != "" && readPosStr != "" { status.RelayLogSourceBinlogEquivalentPosition.GTIDSet, err = ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", file, readPosStr)) if err != nil { diff --git a/go/mysql/replication/replication_status_test.go b/go/mysql/replication/replication_status_test.go index c1f5991f253..25ff48dcd9c 100644 --- a/go/mysql/replication/replication_status_test.go +++ b/go/mysql/replication/replication_status_test.go @@ -134,13 +134,24 @@ func TestMysqlShouldGetPosition(t *testing.T) { assert.Equalf(t, got.FilePosition.GTIDSet.String(), want.FilePosition.GTIDSet.String(), "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) } -func TestMysqlRetrieveSourceServerId(t *testing.T) { +func TestMysqlRetrieveMasterServerId(t *testing.T) { resultMap := map[string]string{ "Master_Server_Id": "1", } want := ReplicationStatus{SourceServerID: 1} - got, err := ParseMysqlReplicationStatus(resultMap) + got, err := ParseMysqlReplicationStatus(resultMap, false) + require.NoError(t, err) + assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) +} + +func TestMysqlRetrieveSourceServerId(t *testing.T) { + resultMap := map[string]string{ + "Source_Server_Id": "1", + } + + want := ReplicationStatus{SourceServerID: 1} + got, err := ParseMysqlReplicationStatus(resultMap, true) require.NoError(t, err) assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID) } @@ -160,14 +171,14 @@ func TestMysqlRetrieveFileBasedPositions(t *testing.T) { RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: FilePosGTID{File: "master-bin.000003", Pos: 1308}}, RelayLogFilePosition: Position{GTIDSet: FilePosGTID{File: "relay-bin.000004", Pos: 1309}}, } - got, err := ParseMysqlReplicationStatus(resultMap) + got, err := ParseMysqlReplicationStatus(resultMap, false) require.NoError(t, err) assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet) assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet) } -func TestMysqlShouldGetRelayLogPosition(t *testing.T) { +func TestMysqlShouldGetLegacyRelayLogPosition(t *testing.T) { resultMap := map[string]string{ "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", "Retrieved_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:6-9", @@ -182,7 +193,27 @@ func TestMysqlShouldGetRelayLogPosition(t *testing.T) { Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, RelayLogPosition: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 9}}}}, } - got, err := ParseMysqlReplicationStatus(resultMap) + got, err := ParseMysqlReplicationStatus(resultMap, false) + require.NoError(t, err) + assert.Equalf(t, got.RelayLogPosition.GTIDSet.String(), want.RelayLogPosition.GTIDSet.String(), "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) +} + +func TestMysqlShouldGetRelayLogPosition(t *testing.T) { + resultMap := map[string]string{ + "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + "Retrieved_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:6-9", + "Exec_Source_Log_Pos": "1307", + "Relay_Source_Log_File": "master-bin.000002", + "Read_Source_Log_Pos": "1308", + "Source_Log_File": "master-bin.000003", + } + + sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") + want := ReplicationStatus{ + Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, + RelayLogPosition: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 9}}}}, + } + got, err := ParseMysqlReplicationStatus(resultMap, true) require.NoError(t, err) assert.Equalf(t, got.RelayLogPosition.GTIDSet.String(), want.RelayLogPosition.GTIDSet.String(), "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet) } diff --git a/go/mysql/schema.go b/go/mysql/schema.go index 933ce657c3a..d0b9bfe2e79 100644 --- a/go/mysql/schema.go +++ b/go/mysql/schema.go @@ -35,33 +35,6 @@ const ( // ShowRowsRead is the query used to find the number of rows read. ShowRowsRead = "show status like 'Innodb_rows_read'" - // DetectSchemaChange query detects if there is any schema change from previous copy. - DetectSchemaChange = ` -SELECT DISTINCT table_name -FROM ( - SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key - FROM information_schema.columns - WHERE table_schema = database() - - UNION ALL - - SELECT table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key - FROM %s.schemacopy - WHERE table_schema = database() -) _inner -GROUP BY table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key -HAVING COUNT(*) = 1 -` - - // ClearSchemaCopy query clears the schemacopy table. - ClearSchemaCopy = `delete from %s.schemacopy where table_schema = database()` - - // InsertIntoSchemaCopy query copies over the schema information from information_schema.columns table. - InsertIntoSchemaCopy = `insert %s.schemacopy -select table_schema, table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key -from information_schema.columns -where table_schema = database()` - // GetColumnNamesQueryPatternForTable is used for mocking queries in unit tests GetColumnNamesQueryPatternForTable = `SELECT COLUMN_NAME.*TABLE_NAME.*%s.*` ) diff --git a/go/mysql/server.go b/go/mysql/server.go index ec2d7538daa..e21281710b7 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -27,10 +27,9 @@ import ( "github.com/pires/go-proxyproto" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" - - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" @@ -39,7 +38,7 @@ import ( "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" ) @@ -133,6 +132,8 @@ type Handler interface { WarningCount(c *Conn) uint16 ComResetConnection(c *Conn) + + Env() *vtenv.Environment } // UnimplementedHandler implemnts all of the optional callbacks so as to satisy @@ -212,6 +213,14 @@ type Listener struct { // handled further by the MySQL handler. An non-nil error will stop // processing the connection by the MySQL handler. PreHandleFunc func(context.Context, net.Conn, uint32) (net.Conn, error) + + // flushDelay is the delay after which buffered response will be flushed to the client. + flushDelay time.Duration + + // charset is the default server side character set to use for the connection + charset collations.ID + // parser to use for this listener, configured with the correct version. + truncateErrLen int } // NewFromListener creates a new mysql listener from an existing net.Listener @@ -223,6 +232,7 @@ func NewFromListener( connWriteTimeout time.Duration, connBufferPooling bool, keepAlivePeriod time.Duration, + flushDelay time.Duration, ) (*Listener, error) { cfg := ListenerConfig{ Listener: l, @@ -233,6 +243,7 @@ func NewFromListener( ConnReadBufferSize: connBufferSize, ConnBufferPooling: connBufferPooling, ConnKeepAlivePeriod: keepAlivePeriod, + FlushDelay: flushDelay, } return NewListenerWithConfig(cfg) } @@ -247,6 +258,7 @@ func NewListener( proxyProtocol bool, connBufferPooling bool, keepAlivePeriod time.Duration, + flushDelay time.Duration, ) (*Listener, error) { listener, err := net.Listen(protocol, address) if err != nil { @@ -254,10 +266,10 @@ func NewListener( } if proxyProtocol { proxyListener := &proxyproto.Listener{Listener: listener} - return NewFromListener(proxyListener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod) + return NewFromListener(proxyListener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod, flushDelay) } - return NewFromListener(listener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod) + return NewFromListener(listener, authServer, handler, connReadTimeout, connWriteTimeout, connBufferPooling, keepAlivePeriod, flushDelay) } // ListenerConfig should be used with NewListenerWithConfig to specify listener parameters. @@ -273,6 +285,7 @@ type ListenerConfig struct { ConnReadBufferSize int ConnBufferPooling bool ConnKeepAlivePeriod time.Duration + FlushDelay time.Duration } // NewListenerWithConfig creates new listener using provided config. There are @@ -293,13 +306,16 @@ func NewListenerWithConfig(cfg ListenerConfig) (*Listener, error) { authServer: cfg.AuthServer, handler: cfg.Handler, listener: l, - ServerVersion: servenv.AppVersion.MySQLVersion(), + ServerVersion: cfg.Handler.Env().MySQLVersion(), connectionID: 1, connReadTimeout: cfg.ConnReadTimeout, connWriteTimeout: cfg.ConnWriteTimeout, connReadBufferSize: cfg.ConnReadBufferSize, connBufferPooling: cfg.ConnBufferPooling, connKeepAlivePeriod: cfg.ConnKeepAlivePeriod, + flushDelay: cfg.FlushDelay, + truncateErrLen: cfg.Handler.Env().TruncateErrLen(), + charset: cfg.Handler.Env().CollationEnv().DefaultConnectionCharset(), }, nil } @@ -375,7 +391,7 @@ func (l *Listener) handle(conn net.Conn, connectionID uint32, acceptTime time.Ti defer connCount.Add(-1) // First build and send the server handshake packet. - serverAuthPluginData, err := c.writeHandshakeV10(l.ServerVersion, l.authServer, l.TLSConfig.Load() != nil) + serverAuthPluginData, err := c.writeHandshakeV10(l.ServerVersion, l.authServer, uint8(l.charset), l.TLSConfig.Load() != nil) if err != nil { if err != io.EOF { log.Errorf("Cannot send HandshakeV10 packet to %s: %v", c, err) @@ -556,7 +572,7 @@ func (l *Listener) Shutdown() { // writeHandshakeV10 writes the Initial Handshake Packet, server side. // It returns the salt data. -func (c *Conn) writeHandshakeV10(serverVersion string, authServer AuthServer, enableTLS bool) ([]byte, error) { +func (c *Conn) writeHandshakeV10(serverVersion string, authServer AuthServer, charset uint8, enableTLS bool) ([]byte, error) { capabilities := CapabilityClientLongPassword | CapabilityClientFoundRows | CapabilityClientLongFlag | @@ -631,7 +647,7 @@ func (c *Conn) writeHandshakeV10(serverVersion string, authServer AuthServer, en pos = writeUint16(data, pos, uint16(capabilities)) // Character set. - pos = writeByte(data, pos, collations.Local().DefaultConnectionCharset()) + pos = writeByte(data, pos, charset) // Status flag. pos = writeUint16(data, pos, c.StatusFlags) diff --git a/go/mysql/server_flaky_test.go b/go/mysql/server_test.go similarity index 96% rename from go/mysql/server_flaky_test.go rename to go/mysql/server_test.go index 509fccaa47a..082a176e3af 100644 --- a/go/mysql/server_flaky_test.go +++ b/go/mysql/server_test.go @@ -32,15 +32,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" - vtenv "vitess.io/vitess/go/vt/env" + venv "vitess.io/vitess/go/vt/env" "vitess.io/vitess/go/vt/tlstest" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttls" @@ -146,7 +145,7 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R { Name: "schema_name", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }, }, Rows: [][]sqltypes.Value{ @@ -165,7 +164,7 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R { Name: "ssl_flag", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }, }, Rows: [][]sqltypes.Value{ @@ -180,12 +179,12 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R { Name: "user", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }, { Name: "user_data", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }, }, Rows: [][]sqltypes.Value{ @@ -200,7 +199,7 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R Fields: []*querypb.Field{{ Name: "result", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, }) time.Sleep(50 * time.Millisecond) @@ -216,7 +215,7 @@ func (th *testHandler) ComQuery(c *Conn, query string, callback func(*sqltypes.R { Name: "result", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }, }, Rows: [][]sqltypes.Value{ @@ -256,6 +255,10 @@ func (th *testHandler) WarningCount(c *Conn) uint16 { return th.warnings } +func (th *testHandler) Env() *vtenv.Environment { + return vtenv.NewTestEnv() +} + func getHostPort(t *testing.T, a net.Addr) (string, int) { host := a.(*net.TCPAddr).IP.String() port := a.(*net.TCPAddr).Port @@ -277,7 +280,7 @@ func TestConnectionFromListener(t *testing.T) { listener, err := net.Listen("tcp", "127.0.0.1:") require.NoError(t, err, "net.Listener failed") - l, err := NewFromListener(listener, authServer, th, 0, 0, false, 0) + l, err := NewFromListener(listener, authServer, th, 0, 0, false, 0, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -306,7 +309,7 @@ func TestConnectionWithoutSourceHost(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -339,7 +342,7 @@ func TestConnectionWithSourceHost(t *testing.T) { } defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -372,7 +375,7 @@ func TestConnectionUseMysqlNativePasswordWithSourceHost(t *testing.T) { } defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -410,7 +413,7 @@ func TestConnectionUnixSocket(t *testing.T) { os.Remove(unixSocket.Name()) - l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false, false, 0) + l, err := NewListener("unix", unixSocket.Name(), authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -436,7 +439,7 @@ func TestClientFoundRows(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -471,11 +474,6 @@ func TestClientFoundRows(t *testing.T) { func TestConnCounts(t *testing.T) { th := &testHandler{} - initialNumUsers := len(connCountPerUser.Counts()) - - // FIXME: we should be able to ResetAll counters instead of computing a delta, but it doesn't work for some reason - // connCountPerUser.ResetAll() - user := "anotherNotYetConnectedUser1" passwd := "password1" @@ -485,7 +483,7 @@ func TestConnCounts(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed") defer l.Close() go l.Accept() @@ -503,29 +501,26 @@ func TestConnCounts(t *testing.T) { c, err := Connect(context.Background(), params) require.NoError(t, err, "Connect failed") - connCounts := connCountPerUser.Counts() - assert.Equal(t, 1, len(connCounts)-initialNumUsers) checkCountsForUser(t, user, 1) // Test with a second new connection. c2, err := Connect(context.Background(), params) require.NoError(t, err) - connCounts = connCountPerUser.Counts() - // There is still only one new user. - assert.Equal(t, 1, len(connCounts)-initialNumUsers) checkCountsForUser(t, user, 2) - // Test after closing connections. time.Sleep lets it work, but seems flakey. + // Test after closing connections. c.Close() - // time.Sleep(10 * time.Millisecond) - // checkCountsForUser(t, user, 1) + assert.EventuallyWithT(t, func(t *assert.CollectT) { + checkCountsForUser(t, user, 1) + }, 1*time.Second, 10*time.Millisecond) c2.Close() - // time.Sleep(10 * time.Millisecond) - // checkCountsForUser(t, user, 0) + assert.EventuallyWithT(t, func(t *assert.CollectT) { + checkCountsForUser(t, user, 0) + }, 1*time.Second, 10*time.Millisecond) } -func checkCountsForUser(t *testing.T, user string, expected int64) { +func checkCountsForUser(t assert.TestingT, user string, expected int64) { connCounts := connCountPerUser.Counts() userCount, ok := connCounts[user] @@ -542,7 +537,7 @@ func TestServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) l.SlowConnectWarnThreshold.Store(time.Nanosecond.Nanoseconds()) defer l.Close() @@ -642,7 +637,7 @@ func TestServerStats(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) l.SlowConnectWarnThreshold.Store(time.Nanosecond.Nanoseconds()) defer l.Close() @@ -716,7 +711,7 @@ func TestClearTextServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -789,7 +784,7 @@ func TestDialogServer(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) l.AllowClearTextWithoutTLS.Store(true) defer l.Close() @@ -832,7 +827,7 @@ func TestTLSServer(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() @@ -930,7 +925,7 @@ func TestTLSRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() @@ -1019,7 +1014,7 @@ func TestCachingSha2PasswordAuthWithTLS(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1113,7 +1108,7 @@ func TestCachingSha2PasswordAuthWithMoreData(t *testing.T) { defer authServer.close() // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1182,7 +1177,7 @@ func TestCachingSha2PasswordAuthWithoutTLS(t *testing.T) { defer authServer.close() // Create the listener. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -1224,7 +1219,7 @@ func TestErrorCodes(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1318,7 +1313,7 @@ func runMysql(t *testing.T, params *ConnParams, command string) (string, bool) { } func runMysqlWithErr(t *testing.T, params *ConnParams, command string) (string, error) { - dir, err := vtenv.VtMysqlRoot() + dir, err := venv.VtMysqlRoot() require.NoError(t, err) name, err := binaryPath(dir, "mysql") require.NoError(t, err) @@ -1402,7 +1397,7 @@ func TestListenerShutdown(t *testing.T) { UserData: "userData1", }} defer authServer.close() - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1470,12 +1465,10 @@ func TestParseConnAttrs(t *testing.T) { } func TestServerFlush(t *testing.T) { - defer func(saved time.Duration) { mysqlServerFlushDelay = saved }(mysqlServerFlushDelay) - mysqlServerFlushDelay = 10 * time.Millisecond - + mysqlServerFlushDelay := 10 * time.Millisecond th := &testHandler{} - l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0, mysqlServerFlushDelay) require.NoError(t, err) defer l.Close() go l.Accept() @@ -1502,7 +1495,7 @@ func TestServerFlush(t *testing.T) { want1 := []*querypb.Field{{ Name: "result", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }} assert.Equal(t, want1, flds) @@ -1521,7 +1514,7 @@ func TestServerFlush(t *testing.T) { func TestTcpKeepAlive(t *testing.T) { th := &testHandler{} - l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0) + l, err := NewListener("tcp", "127.0.0.1:", NewAuthServerNone(), th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer l.Close() go l.Accept() diff --git a/go/mysql/sqlerror/constants.go b/go/mysql/sqlerror/constants.go index 0074e904e4a..a247ca15aa4 100644 --- a/go/mysql/sqlerror/constants.go +++ b/go/mysql/sqlerror/constants.go @@ -34,7 +34,8 @@ func (e ErrorCode) ToString() string { // See above reference for more information on each code. const ( // Vitess specific errors, (100-999) - ERNotReplica = ErrorCode(100) + ERNotReplica = ErrorCode(100) + ERNonAtomicCommit = ErrorCode(301) // unknown ERUnknownError = ErrorCode(1105) @@ -234,6 +235,7 @@ const ( ERUnknownTimeZone = ErrorCode(1298) ERInvalidCharacterString = ErrorCode(1300) ERQueryInterrupted = ErrorCode(1317) + ERViewWrongList = ErrorCode(1353) ERTruncatedWrongValueForField = ErrorCode(1366) ERIllegalValueForType = ErrorCode(1367) ERDataTooLong = ErrorCode(1406) @@ -250,6 +252,7 @@ const ( ERJSONValueTooBig = ErrorCode(3150) ERJSONDocumentTooDeep = ErrorCode(3157) + ERLockNowait = ErrorCode(3572) ERRegexpStringNotTerminated = ErrorCode(3684) ERRegexpBufferOverflow = ErrorCode(3684) ERRegexpIllegalArgument = ErrorCode(3685) diff --git a/go/mysql/sqlerror/sql_error.go b/go/mysql/sqlerror/sql_error.go index 9b1f65c82e3..935fd77a12f 100644 --- a/go/mysql/sqlerror/sql_error.go +++ b/go/mysql/sqlerror/sql_error.go @@ -17,13 +17,11 @@ limitations under the License. package sqlerror import ( - "bytes" "fmt" "regexp" "strconv" "strings" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -53,17 +51,17 @@ func NewSQLError(number ErrorCode, sqlState string, format string, args ...any) // Error implements the error interface func (se *SQLError) Error() string { - buf := &bytes.Buffer{} + var buf strings.Builder buf.WriteString(se.Message) // Add MySQL errno and SQLSTATE in a format that we can later parse. // There's no avoiding string parsing because all errors // are converted to strings anyway at RPC boundaries. // See NewSQLErrorFromError. - fmt.Fprintf(buf, " (errno %v) (sqlstate %v)", se.Num, se.State) + fmt.Fprintf(&buf, " (errno %v) (sqlstate %v)", se.Num, se.State) if se.Query != "" { - fmt.Fprintf(buf, " during query: %s", sqlparser.TruncateForLog(se.Query)) + fmt.Fprintf(&buf, " during query: %s", se.Query) } return buf.String() @@ -79,7 +77,7 @@ func (se *SQLError) SQLState() string { return se.State } -var errExtract = regexp.MustCompile(`.*\(errno ([0-9]*)\) \(sqlstate ([0-9a-zA-Z]{5})\).*`) +var errExtract = regexp.MustCompile(`\(errno ([0-9]*)\) \(sqlstate ([0-9a-zA-Z]{5})\)`) // NewSQLErrorFromError returns a *SQLError from the provided error. // If it's not the right type, it still tries to get it from a regexp. @@ -218,7 +216,9 @@ var stateToMysqlCode = map[vterrors.State]mysqlCode{ vterrors.OperandColumns: {num: EROperandColumns, state: SSWrongNumberOfColumns}, vterrors.WrongValueCountOnRow: {num: ERWrongValueCountOnRow, state: SSWrongValueCountOnRow}, vterrors.WrongArguments: {num: ERWrongArguments, state: SSUnknownSQLState}, + vterrors.ViewWrongList: {num: ERViewWrongList, state: SSUnknownSQLState}, vterrors.UnknownStmtHandler: {num: ERUnknownStmtHandler, state: SSUnknownSQLState}, + vterrors.KeyDoesNotExist: {num: ERKeyDoesNotExist, state: SSClientError}, vterrors.UnknownTimeZone: {num: ERUnknownTimeZone, state: SSUnknownSQLState}, vterrors.RegexpStringNotTerminated: {num: ERRegexpStringNotTerminated, state: SSUnknownSQLState}, vterrors.RegexpBufferOverflow: {num: ERRegexpBufferOverflow, state: SSUnknownSQLState}, @@ -243,6 +243,8 @@ var stateToMysqlCode = map[vterrors.State]mysqlCode{ vterrors.CharacterSetMismatch: {num: ERCharacterSetMismatch, state: SSUnknownSQLState}, vterrors.WrongParametersToNativeFct: {num: ERWrongParametersToNativeFct, state: SSUnknownSQLState}, vterrors.KillDeniedError: {num: ERKillDenied, state: SSUnknownSQLState}, + vterrors.BadNullError: {num: ERBadNullError, state: SSConstraintViolation}, + vterrors.InvalidGroupFuncUse: {num: ERInvalidGroupFuncUse, state: SSUnknownSQLState}, } func getStateToMySQLState(state vterrors.State) mysqlCode { diff --git a/go/mysql/sqlerror/sql_error_test.go b/go/mysql/sqlerror/sql_error_test.go index 3c7f3114b68..b38cec26388 100644 --- a/go/mysql/sqlerror/sql_error_test.go +++ b/go/mysql/sqlerror/sql_error_test.go @@ -20,10 +20,11 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - - "github.com/stretchr/testify/assert" ) func TestDemuxResourceExhaustedErrors(t *testing.T) { @@ -173,11 +174,17 @@ func TestNewSQLErrorFromError(t *testing.T) { num: EROutOfResources, ss: SSUnknownSQLState, }, + { + err: vterrors.Errorf(vtrpc.Code_RESOURCE_EXHAUSTED, "vttablet: rpc error: code = AlreadyExists desc = Duplicate entry '1' for key 'PRIMARY' (errno 1062) (sqlstate 23000) (CallerID: userData1): Sql: \"insert into test(id, `name`) values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */)\", BindVars: {vtg1: \"type:INT64 value:\\\"1\\\"\"vtg2: \"type:VARCHAR value:\\\"(errno 1366) (sqlstate 10000)\\\"\"}"), + num: ERDupEntry, + ss: SSConstraintViolation, + }, } for _, tc := range tCases { t.Run(tc.err.Error(), func(t *testing.T) { - err := NewSQLErrorFromError(tc.err).(*SQLError) + var err *SQLError + require.ErrorAs(t, NewSQLErrorFromError(tc.err), &err) assert.Equal(t, tc.num, err.Number()) assert.Equal(t, tc.ss, err.SQLState()) }) diff --git a/go/mysql/streaming_query.go b/go/mysql/streaming_query.go index 257c56e076f..3d0d9ef49e8 100644 --- a/go/mysql/streaming_query.go +++ b/go/mysql/streaming_query.go @@ -19,6 +19,7 @@ package mysql import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -32,7 +33,7 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { defer func() { if err != nil { if sqlerr, ok := err.(*sqlerror.SQLError); ok { - sqlerr.Query = query + sqlerr.Query = sqlparser.TruncateQuery(query, c.truncateErrLen) } } }() @@ -48,7 +49,8 @@ func (c *Conn) ExecuteStreamFetch(query string) (err error) { } // Get the result. - colNumber, _, err := c.readComQueryResponse() + var packetOk PacketOK + colNumber, err := c.readComQueryResponse(&packetOk) if err != nil { return err } diff --git a/go/mysql/vault/auth_server_vault.go b/go/mysql/vault/auth_server_vault.go index ccdef9f1d53..d2bc2548817 100644 --- a/go/mysql/vault/auth_server_vault.go +++ b/go/mysql/vault/auth_server_vault.go @@ -28,41 +28,12 @@ import ( "time" vaultapi "github.com/aquarapid/vaultlib" - "github.com/spf13/pflag" - - "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" ) -var ( - vaultAddr string - vaultTimeout time.Duration - vaultCACert string - vaultPath string - vaultCacheTTL time.Duration - vaultTokenFile string - vaultRoleID string - vaultRoleSecretIDFile string - vaultRoleMountPoint string -) - -func init() { - servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { - fs.StringVar(&vaultAddr, "mysql_auth_vault_addr", "", "URL to Vault server") - fs.DurationVar(&vaultTimeout, "mysql_auth_vault_timeout", 10*time.Second, "Timeout for vault API operations") - fs.StringVar(&vaultCACert, "mysql_auth_vault_tls_ca", "", "Path to CA PEM for validating Vault server certificate") - fs.StringVar(&vaultPath, "mysql_auth_vault_path", "", "Vault path to vtgate credentials JSON blob, e.g.: secret/data/prod/vtgatecreds") - fs.DurationVar(&vaultCacheTTL, "mysql_auth_vault_ttl", 30*time.Minute, "How long to cache vtgate credentials from the Vault server") - fs.StringVar(&vaultTokenFile, "mysql_auth_vault_tokenfile", "", "Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable") - fs.StringVar(&vaultRoleID, "mysql_auth_vault_roleid", "", "Vault AppRole id; can also be passed using VAULT_ROLEID environment variable") - fs.StringVar(&vaultRoleSecretIDFile, "mysql_auth_vault_role_secretidfile", "", "Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable") - fs.StringVar(&vaultRoleMountPoint, "mysql_auth_vault_role_mountpoint", "approle", "Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable") - }) -} - // AuthServerVault implements AuthServer with a config loaded from Vault. type AuthServerVault struct { methods []mysql.AuthMethod @@ -80,7 +51,7 @@ type AuthServerVault struct { } // InitAuthServerVault - entrypoint for initialization of Vault AuthServer implementation -func InitAuthServerVault() { +func InitAuthServerVault(vaultAddr string, vaultTimeout time.Duration, vaultCACert, vaultPath string, vaultCacheTTL time.Duration, vaultTokenFile, vaultRoleID, vaultRoleSecretIDFile, vaultRoleMountPoint string) { // Check critical parameters. if vaultAddr == "" { log.Infof("Not configuring AuthServerVault, as --mysql_auth_vault_addr is empty.") diff --git a/go/netutil/conn_test.go b/go/netutil/conn_test.go index 78776035856..b27f81a6311 100644 --- a/go/netutil/conn_test.go +++ b/go/netutil/conn_test.go @@ -15,18 +15,17 @@ package netutil import ( "net" - "strings" "sync" "testing" "time" + + "github.com/stretchr/testify/assert" ) func createSocketPair(t *testing.T) (net.Listener, net.Conn, net.Conn) { // Create a listener. listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Listen failed: %v", err) - } + assert.NoError(t, err) addr := listener.Addr().String() // Dial a client, Accept a server. @@ -38,9 +37,7 @@ func createSocketPair(t *testing.T) (net.Listener, net.Conn, net.Conn) { defer wg.Done() var err error clientConn, err = net.Dial("tcp", addr) - if err != nil { - t.Errorf("Dial failed: %v", err) - } + assert.NoError(t, err) }() var serverConn net.Conn @@ -49,9 +46,7 @@ func createSocketPair(t *testing.T) (net.Listener, net.Conn, net.Conn) { defer wg.Done() var err error serverConn, err = listener.Accept() - if err != nil { - t.Errorf("Accept failed: %v", err) - } + assert.NoError(t, err) }() wg.Wait() @@ -77,13 +72,7 @@ func TestReadTimeout(t *testing.T) { select { case err := <-c: - if err == nil { - t.Fatalf("Expected error, got nil") - } - - if !strings.HasSuffix(err.Error(), "i/o timeout") { - t.Errorf("Expected error timeout, got %s", err) - } + assert.ErrorContains(t, err, "i/o timeout", "Expected error timeout") case <-time.After(10 * time.Second): t.Errorf("Timeout did not happen") } @@ -113,13 +102,7 @@ func TestWriteTimeout(t *testing.T) { select { case err := <-c: - if err == nil { - t.Fatalf("Expected error, got nil") - } - - if !strings.HasSuffix(err.Error(), "i/o timeout") { - t.Errorf("Expected error timeout, got %s", err) - } + assert.ErrorContains(t, err, "i/o timeout", "Expected error timeout") case <-time.After(10 * time.Second): t.Errorf("Timeout did not happen") } @@ -167,3 +150,42 @@ func TestNoTimeouts(t *testing.T) { // NOOP } } + +func TestSetDeadline(t *testing.T) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + cConnWithTimeout := NewConnWithTimeouts(cConn, 0, 24*time.Hour) + + assert.Panics(t, func() { _ = cConnWithTimeout.SetDeadline(time.Now()) }) +} + +func TestSetReadDeadline(t *testing.T) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + cConnWithTimeout := NewConnWithTimeouts(cConn, 0, 24*time.Hour) + + assert.Panics(t, func() { _ = cConnWithTimeout.SetReadDeadline(time.Now()) }) +} + +func TestSetWriteDeadline(t *testing.T) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + cConnWithTimeout := NewConnWithTimeouts(cConn, 0, 24*time.Hour) + + assert.Panics(t, func() { _ = cConnWithTimeout.SetWriteDeadline(time.Now()) }) +} diff --git a/go/netutil/netutil.go b/go/netutil/netutil.go index fbac6e88424..e440c2148fc 100644 --- a/go/netutil/netutil.go +++ b/go/netutil/netutil.go @@ -20,71 +20,13 @@ package netutil import ( "bytes" "fmt" - "math/rand" "net" "os" "sort" "strconv" "strings" - "time" ) -// byPriorityWeight sorts records by ascending priority and weight. -type byPriorityWeight []*net.SRV - -func (addrs byPriorityWeight) Len() int { return len(addrs) } - -func (addrs byPriorityWeight) Swap(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] } - -func (addrs byPriorityWeight) Less(i, j int) bool { - return addrs[i].Priority < addrs[j].Priority || - (addrs[i].Priority == addrs[j].Priority && addrs[i].Weight < addrs[j].Weight) -} - -// shuffleByWeight shuffles SRV records by weight using the algorithm -// described in RFC 2782. -// NOTE(msolo) This is disabled when the weights are zero. -func (addrs byPriorityWeight) shuffleByWeight(rand *rand.Rand) { - sum := 0 - for _, addr := range addrs { - sum += int(addr.Weight) - } - for sum > 0 && len(addrs) > 1 { - s := 0 - n := rand.Intn(sum) - for i := range addrs { - s += int(addrs[i].Weight) - if s > n { - if i > 0 { - t := addrs[i] - copy(addrs[1:i+1], addrs[0:i]) - addrs[0] = t - } - break - } - } - sum -= int(addrs[0].Weight) - addrs = addrs[1:] - } -} - -func (addrs byPriorityWeight) sortRfc2782(rand *rand.Rand) { - sort.Sort(addrs) - i := 0 - for j := 1; j < len(addrs); j++ { - if addrs[i].Priority != addrs[j].Priority { - addrs[i:j].shuffleByWeight(rand) - i = j - } - } - addrs[i:].shuffleByWeight(rand) -} - -// SortRfc2782 reorders SRV records as specified in RFC 2782. -func SortRfc2782(srvs []*net.SRV) { - byPriorityWeight(srvs).sortRfc2782(rand.New(rand.NewSource(time.Now().UTC().UnixNano()))) -} - // SplitHostPort is an alternative to net.SplitHostPort that also parses the // integer port. In addition, it is more tolerant of improperly escaped IPv6 // addresses, such as "::1:456", which should actually be "[::1]:456". @@ -164,29 +106,6 @@ func FullyQualifiedHostnameOrPanic() string { return hostname } -// ResolveIPv4Addrs resolves the address:port part into IP address:port pairs -func ResolveIPv4Addrs(addr string) ([]string, error) { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - ipAddrs, err := net.LookupIP(host) - if err != nil { - return nil, err - } - result := make([]string, 0, len(ipAddrs)) - for _, ipAddr := range ipAddrs { - ipv4 := ipAddr.To4() - if ipv4 != nil { - result = append(result, net.JoinHostPort(ipv4.String(), port)) - } - } - if len(result) == 0 { - return nil, fmt.Errorf("no IPv4addr for name %v", host) - } - return result, nil -} - func dnsLookup(host string) ([]net.IP, error) { addrs, err := net.LookupHost(host) if err != nil { diff --git a/go/netutil/netutil_test.go b/go/netutil/netutil_test.go index b8cfc563acb..e5df2065033 100644 --- a/go/netutil/netutil_test.go +++ b/go/netutil/netutil_test.go @@ -17,69 +17,11 @@ limitations under the License. package netutil import ( - "fmt" - "math/rand" "net" - "reflect" "testing" -) - -func checkDistribution(t *testing.T, rand *rand.Rand, data []*net.SRV, margin float64) { - sum := 0 - for _, srv := range data { - sum += int(srv.Weight) - } - results := make(map[string]int) - - count := 1000 - for j := 0; j < count; j++ { - d := make([]*net.SRV, len(data)) - copy(d, data) - byPriorityWeight(d).shuffleByWeight(rand) - key := d[0].Target - results[key] = results[key] + 1 - } - - actual := results[data[0].Target] - expected := float64(count) * float64(data[0].Weight) / float64(sum) - diff := float64(actual) - expected - t.Logf("actual: %v diff: %v e: %v m: %v", actual, diff, expected, margin) - if diff < 0 { - diff = -diff - } - if diff > (expected * margin) { - t.Errorf("missed target weight: expected %v, %v", expected, actual) - } -} - -func testUniformity(t *testing.T, size int, margin float64) { - data := make([]*net.SRV, size) - for i := 0; i < size; i++ { - data[i] = &net.SRV{Target: fmt.Sprintf("%c", 'a'+i), Weight: 1} - } - checkDistribution(t, rand.New(rand.NewSource(1)), data, margin) -} - -func TestUniformity(t *testing.T) { - testUniformity(t, 2, 0.05) - testUniformity(t, 3, 0.10) - testUniformity(t, 10, 0.20) - testWeighting(t, 0.05) -} - -func testWeighting(t *testing.T, margin float64) { - data := []*net.SRV{ - {Target: "a", Weight: 60}, - {Target: "b", Weight: 30}, - {Target: "c", Weight: 10}, - } - checkDistribution(t, rand.New(rand.NewSource(1)), data, margin) -} - -func TestWeighting(t *testing.T) { - testWeighting(t, 0.05) -} + "github.com/stretchr/testify/assert" +) func TestSplitHostPort(t *testing.T) { type addr struct { @@ -94,12 +36,9 @@ func TestSplitHostPort(t *testing.T) { } for input, want := range table { gotHost, gotPort, err := SplitHostPort(input) - if err != nil { - t.Errorf("SplitHostPort error: %v", err) - } - if gotHost != want.host || gotPort != want.port { - t.Errorf("SplitHostPort(%#v) = (%v, %v), want (%v, %v)", input, gotHost, gotPort, want.host, want.port) - } + assert.NoError(t, err) + assert.Equal(t, want.host, gotHost) + assert.Equal(t, want.port, gotPort) } } @@ -111,9 +50,7 @@ func TestSplitHostPortFail(t *testing.T) { } for _, input := range inputs { _, _, err := SplitHostPort(input) - if err == nil { - t.Errorf("expected error from SplitHostPort(%q), but got none", input) - } + assert.Error(t, err) } } @@ -127,46 +64,7 @@ func TestJoinHostPort(t *testing.T) { "[::1]:321": {host: "::1", port: 321}, } for want, input := range table { - if got := JoinHostPort(input.host, input.port); got != want { - t.Errorf("SplitHostPort(%v, %v) = %#v, want %#v", input.host, input.port, got, want) - } - } -} - -func TestResolveIPv4Addrs(t *testing.T) { - cases := []struct { - address string - expected []string - expectedError bool - }{ - { - address: "localhost:3306", - expected: []string{"127.0.0.1:3306"}, - }, - { - address: "127.0.0.256:3306", - expectedError: true, - }, - { - address: "localhost", - expectedError: true, - }, - { - address: "InvalidHost:3306", - expectedError: true, - }, - } - - for _, c := range cases { - t.Run(c.address, func(t *testing.T) { - got, err := ResolveIPv4Addrs(c.address) - if (err != nil) != c.expectedError { - t.Errorf("expected error but got: %v", err) - } - if !reflect.DeepEqual(got, c.expected) { - t.Errorf("expected: %v, got: %v", c.expected, got) - } - }) + assert.Equal(t, want, JoinHostPort(input.host, input.port)) } } @@ -181,8 +79,34 @@ func TestNormalizeIP(t *testing.T) { "127.": "127.", } for input, want := range table { - if got := NormalizeIP(input); got != want { - t.Errorf("NormalizeIP(%#v) = %#v, want %#v", input, got, want) - } + assert.Equal(t, want, NormalizeIP(input)) } } + +func TestDNSTracker(t *testing.T) { + refresh := DNSTracker("localhost") + _, err := refresh() + assert.NoError(t, err) + + refresh = DNSTracker("") + val, err := refresh() + assert.NoError(t, err) + assert.False(t, val, "DNS name resolution should not have changed") +} + +func TestAddrEqual(t *testing.T) { + addr1 := net.ParseIP("1.2.3.4") + addr2 := net.ParseIP("127.0.0.1") + + addrSet1 := []net.IP{addr1, addr2} + addrSet2 := []net.IP{addr1} + addrSet3 := []net.IP{addr2} + ok := addrEqual(addrSet1, addrSet2) + assert.False(t, ok, "addresses %q and %q should not be equal", addrSet1, addrSet2) + + ok = addrEqual(addrSet3, addrSet2) + assert.False(t, ok, "addresses %q and %q should not be equal", addrSet3, addrSet2) + + ok = addrEqual(addrSet1, addrSet1) + assert.True(t, ok, "addresses %q and %q should be equal", addrSet1, addrSet1) +} diff --git a/go/pools/numbered.go b/go/pools/numbered.go index 6e1699a5dd8..304c723c3d2 100644 --- a/go/pools/numbered.go +++ b/go/pools/numbered.go @@ -30,7 +30,7 @@ type Numbered struct { mu sync.Mutex empty *sync.Cond // Broadcast when pool becomes empty resources map[int64]*numberedWrapper - recentlyUnregistered *cache.LRUCache + recentlyUnregistered *cache.LRUCache[*unregistered] } type numberedWrapper struct { @@ -47,10 +47,8 @@ type unregistered struct { // NewNumbered creates a new numbered func NewNumbered() *Numbered { n := &Numbered{ - resources: make(map[int64]*numberedWrapper), - recentlyUnregistered: cache.NewLRUCache(1000, func(_ any) int64 { - return 1 - }), + resources: make(map[int64]*numberedWrapper), + recentlyUnregistered: cache.NewLRUCache[*unregistered](1000), } n.empty = sync.NewCond(&n.mu) return n @@ -107,11 +105,10 @@ func (nu *Numbered) Get(id int64, purpose string) (val any, err error) { defer nu.mu.Unlock() nw, ok := nu.resources[id] if !ok { - if val, ok := nu.recentlyUnregistered.Get(fmt.Sprintf("%v", id)); ok { - unreg := val.(*unregistered) + if unreg, ok := nu.recentlyUnregistered.Get(fmt.Sprintf("%v", id)); ok { return nil, fmt.Errorf("ended at %v (%v)", unreg.timeUnregistered.Format("2006-01-02 15:04:05.000 MST"), unreg.reason) } - return nil, fmt.Errorf("not found") + return nil, fmt.Errorf("not found (potential transaction timeout)") } if nw.inUse { return nil, fmt.Errorf("in use: %s", nw.purpose) diff --git a/go/pools/numbered_test.go b/go/pools/numbered_test.go index 826af8253b8..12bffdb8ad7 100644 --- a/go/pools/numbered_test.go +++ b/go/pools/numbered_test.go @@ -17,7 +17,7 @@ limitations under the License. package pools import ( - "math/rand" + "math/rand/v2" "strings" "testing" @@ -45,7 +45,7 @@ func TestNumberedGeneral(t *testing.T) { p.Put(id) _, err = p.Get(1, "test2") - assert.Contains(t, "not found", err.Error()) + assert.ErrorContains(t, err, "not found (potential transaction timeout)") p.Unregister(1, "test") // Should not fail p.Unregister(0, "test") // p is now empty @@ -99,7 +99,7 @@ func BenchmarkRegisterUnregisterParallel(b *testing.B) { b.SetParallelism(200) b.RunParallel(func(pb *testing.PB) { for pb.Next() { - id := rand.Int63() + id := rand.Int64() p.Register(id, val) p.Unregister(id, "some reason") } diff --git a/go/pools/resource_pool.go b/go/pools/resource_pool.go index 939b73fa66c..2a20cf67acd 100644 --- a/go/pools/resource_pool.go +++ b/go/pools/resource_pool.go @@ -22,7 +22,7 @@ import ( "context" "errors" "fmt" - "math/rand" + "math/rand/v2" "sync" "sync/atomic" "time" @@ -419,7 +419,7 @@ func (rp *ResourcePool) extendedMaxLifetime() time.Duration { if maxLifetime == 0 { return 0 } - return time.Duration(maxLifetime + rand.Int63n(maxLifetime)) + return time.Duration(maxLifetime + rand.Int64N(maxLifetime)) } // MaxLifetimeClosed returns the count of resources closed due to refresh timeout. diff --git a/go/pools/smartconnpool/benchmarking/legacy/resource_pool.go b/go/pools/smartconnpool/benchmarking/legacy/resource_pool.go index df8c44e1530..33547349860 100644 --- a/go/pools/smartconnpool/benchmarking/legacy/resource_pool.go +++ b/go/pools/smartconnpool/benchmarking/legacy/resource_pool.go @@ -20,7 +20,7 @@ import ( "context" "errors" "fmt" - "math/rand" + "math/rand/v2" "sync" "sync/atomic" "time" @@ -576,7 +576,7 @@ func (rp *ResourcePool) extendedMaxLifetime() time.Duration { if maxLifetime == 0 { return 0 } - return time.Duration(maxLifetime + rand.Int63n(maxLifetime)) + return time.Duration(maxLifetime + rand.Int64N(maxLifetime)) } // MaxLifetimeClosed returns the count of resources closed due to refresh timeout. diff --git a/go/pools/smartconnpool/benchmarking/load_test.go b/go/pools/smartconnpool/benchmarking/load_test.go index 537daf2c357..e97537bca0c 100644 --- a/go/pools/smartconnpool/benchmarking/load_test.go +++ b/go/pools/smartconnpool/benchmarking/load_test.go @@ -21,7 +21,7 @@ import ( "encoding/json" "fmt" "math" - "math/rand" + "math/rand/v2" "os" "sort" "sync" diff --git a/go/pools/smartconnpool/pool.go b/go/pools/smartconnpool/pool.go index 47d80aa3fc8..8640910fa17 100644 --- a/go/pools/smartconnpool/pool.go +++ b/go/pools/smartconnpool/pool.go @@ -18,12 +18,12 @@ package smartconnpool import ( "context" + "math/rand/v2" "slices" "sync" "sync/atomic" "time" - "vitess.io/vitess/go/hack" "vitess.io/vitess/go/vt/log" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" @@ -32,10 +32,16 @@ import ( var ( // ErrTimeout is returned if a connection get times out. - ErrTimeout = vterrors.New(vtrpcpb.Code_RESOURCE_EXHAUSTED, "resource pool timed out") + ErrTimeout = vterrors.New(vtrpcpb.Code_RESOURCE_EXHAUSTED, "connection pool timed out") // ErrCtxTimeout is returned if a ctx is already expired by the time the connection pool is used - ErrCtxTimeout = vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "resource pool context already expired") + ErrCtxTimeout = vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, "connection pool context already expired") + + // ErrConnPoolClosed is returned when trying to get a connection from a closed conn pool + ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_INTERNAL, "connection pool is closed") + + // PoolCloseTimeout is how long to wait for all connections to be returned to the pool during close + PoolCloseTimeout = 10 * time.Second ) type Metrics struct { @@ -119,8 +125,9 @@ type ConnPool[C Connection] struct { capacity atomic.Int64 // workers is a waitgroup for all the currently running worker goroutines - workers sync.WaitGroup - close chan struct{} + workers sync.WaitGroup + close chan struct{} + capacityMu sync.Mutex config struct { // connect is the callback to create a new connection for the pool @@ -142,6 +149,7 @@ type ConnPool[C Connection] struct { } Metrics Metrics + Name string } // NewPool creates a new connection pool with the given Config. @@ -236,29 +244,60 @@ func (pool *ConnPool[C]) Open(connect Connector[C], refresh RefreshCheck) *ConnP // Close shuts down the pool. No connections will be returned from ConnPool.Get after calling this, // but calling ConnPool.Put is still allowed. This function will not return until all of the pool's -// connections have been returned. +// connections have been returned or the default PoolCloseTimeout has elapsed func (pool *ConnPool[C]) Close() { - if pool.close == nil { + ctx, cancel := context.WithTimeout(context.Background(), PoolCloseTimeout) + defer cancel() + + if err := pool.CloseWithContext(ctx); err != nil { + log.Errorf("failed to close pool %q: %v", pool.Name, err) + } +} + +// CloseWithContext behaves like Close but allows passing in a Context to time out the +// pool closing operation +func (pool *ConnPool[C]) CloseWithContext(ctx context.Context) error { + pool.capacityMu.Lock() + defer pool.capacityMu.Unlock() + + if pool.close == nil || pool.capacity.Load() == 0 { // already closed - return + return nil } - pool.SetCapacity(0) + // close all the connections in the pool; if we time out while waiting for + // users to return our connections, we still want to finish the shutdown + // for the pool + err := pool.setCapacity(ctx, 0) close(pool.close) pool.workers.Wait() pool.close = nil + return err } func (pool *ConnPool[C]) reopen() { + pool.capacityMu.Lock() + defer pool.capacityMu.Unlock() + capacity := pool.capacity.Load() if capacity == 0 { return } - pool.Close() - pool.open() - pool.SetCapacity(capacity) + ctx, cancel := context.WithTimeout(context.Background(), PoolCloseTimeout) + defer cancel() + + // to re-open the connection pool, first set the capacity to 0 so we close + // all the existing connections, as they're now connected to a stale MySQL + // instance. + if err := pool.setCapacity(ctx, 0); err != nil { + log.Errorf("failed to reopen pool %q: %v", pool.Name, err) + } + + // the second call to setCapacity cannot fail because it's only increasing the number + // of connections and doesn't need to shut down any + _ = pool.setCapacity(ctx, capacity) } // IsOpen returns whether the pool is open @@ -322,7 +361,7 @@ func (pool *ConnPool[C]) Get(ctx context.Context, setting *Setting) (*Pooled[C], return nil, ErrCtxTimeout } if pool.capacity.Load() == 0 { - return nil, ErrTimeout + return nil, ErrConnPoolClosed } if setting == nil { return pool.get(ctx) @@ -376,8 +415,7 @@ func (pool *ConnPool[D]) extendedMaxLifetime() time.Duration { if maxLifetime == 0 { return 0 } - extended := hack.FastRand() % uint32(maxLifetime) - return time.Duration(maxLifetime) + time.Duration(extended) + return time.Duration(maxLifetime) + time.Duration(rand.Uint32N(uint32(maxLifetime))) } func (pool *ConnPool[C]) connReopen(ctx context.Context, dbconn *Pooled[C], now time.Time) error { @@ -575,39 +613,55 @@ func (pool *ConnPool[C]) getWithSetting(ctx context.Context, setting *Setting) ( // If the capacity is smaller than the number of connections that there are // currently open, we'll close enough connections before returning, even if // that means waiting for clients to return connections to the pool. -func (pool *ConnPool[C]) SetCapacity(newcap int64) { +// If the given context times out before we've managed to close enough connections +// an error will be returned. +func (pool *ConnPool[C]) SetCapacity(ctx context.Context, newcap int64) error { + pool.capacityMu.Lock() + defer pool.capacityMu.Unlock() + return pool.setCapacity(ctx, newcap) +} + +// setCapacity is the internal implementation for SetCapacity; it must be called +// with pool.capacityMu being held +func (pool *ConnPool[C]) setCapacity(ctx context.Context, newcap int64) error { if newcap < 0 { panic("negative capacity") } oldcap := pool.capacity.Swap(newcap) if oldcap == newcap { - return + return nil } - backoff := 1 * time.Millisecond + const delay = 10 * time.Millisecond // close connections until we're under capacity for pool.active.Load() > newcap { + if err := ctx.Err(); err != nil { + return vterrors.Errorf(vtrpcpb.Code_ABORTED, + "timed out while waiting for connections to be returned to the pool (capacity=%d, active=%d, borrowed=%d)", + pool.capacity.Load(), pool.active.Load(), pool.borrowed.Load()) + } + // if we're closing down the pool, make sure there's no clients waiting + // for connections because they won't be returned in the future + if newcap == 0 { + pool.wait.expire(true) + } + // try closing from connections which are currently idle in the stacks conn := pool.getFromSettingsStack(nil) if conn == nil { conn, _ = pool.clean.Pop() } if conn == nil { - time.Sleep(backoff) - backoff += 1 * time.Millisecond + time.Sleep(delay) continue } conn.Close() pool.closedConn() } - // if we're closing down the pool, wake up any blocked waiters because no connections - // are going to be returned in the future - if newcap == 0 { - pool.wait.expire(true) - } + return nil } func (pool *ConnPool[C]) closeIdleResources(now time.Time) { @@ -663,6 +717,8 @@ func (pool *ConnPool[C]) RegisterStats(stats *servenv.Exporter, name string) { return } + pool.Name = name + stats.NewGaugeFunc(name+"Capacity", "Tablet server conn pool capacity", func() int64 { return pool.Capacity() }) diff --git a/go/pools/smartconnpool/pool_test.go b/go/pools/smartconnpool/pool_test.go index c9c2235d90f..701327005ad 100644 --- a/go/pools/smartconnpool/pool_test.go +++ b/go/pools/smartconnpool/pool_test.go @@ -149,7 +149,7 @@ func TestOpen(t *testing.T) { } // Test that Get waits - ch := make(chan bool) + done := make(chan struct{}) go func() { for i := 0; i < 5; i++ { if i%2 == 0 { @@ -163,14 +163,16 @@ func TestOpen(t *testing.T) { for i := 0; i < 5; i++ { p.put(resources[i]) } - ch <- true + close(done) }() for i := 0; i < 5; i++ { - // Sleep to ensure the goroutine waits - time.Sleep(10 * time.Millisecond) + // block until we have a client wait for a connection, then offer it + for p.wait.waiting() == 0 { + time.Sleep(time.Millisecond) + } p.put(resources[i]) } - <-ch + <-done assert.EqualValues(t, 5, p.Metrics.WaitCount()) assert.Equal(t, 5, len(state.waits)) // verify start times are monotonic increasing @@ -206,13 +208,15 @@ func TestOpen(t *testing.T) { assert.EqualValues(t, 6, state.lastID.Load()) // SetCapacity - p.SetCapacity(3) + err = p.SetCapacity(ctx, 3) + require.NoError(t, err) assert.EqualValues(t, 3, state.open.Load()) assert.EqualValues(t, 6, state.lastID.Load()) assert.EqualValues(t, 3, p.Capacity()) assert.EqualValues(t, 3, p.Available()) - p.SetCapacity(6) + err = p.SetCapacity(ctx, 6) + require.NoError(t, err) assert.EqualValues(t, 6, p.Capacity()) assert.EqualValues(t, 6, p.Available()) @@ -263,7 +267,9 @@ func TestShrinking(t *testing.T) { } done := make(chan bool) go func() { - p.SetCapacity(3) + err := p.SetCapacity(ctx, 3) + require.NoError(t, err) + done <- true }() expected := map[string]any{ @@ -333,7 +339,8 @@ func TestShrinking(t *testing.T) { // This will also wait go func() { - p.SetCapacity(2) + err := p.SetCapacity(ctx, 2) + require.NoError(t, err) done <- true }() time.Sleep(10 * time.Millisecond) @@ -351,7 +358,8 @@ func TestShrinking(t *testing.T) { assert.EqualValues(t, 2, state.open.Load()) // Test race condition of SetCapacity with itself - p.SetCapacity(3) + err = p.SetCapacity(ctx, 3) + require.NoError(t, err) for i := 0; i < 3; i++ { var r *Pooled[*TestConn] var err error @@ -373,9 +381,15 @@ func TestShrinking(t *testing.T) { time.Sleep(10 * time.Millisecond) // This will wait till we Put - go p.SetCapacity(2) + go func() { + err := p.SetCapacity(ctx, 2) + require.NoError(t, err) + }() time.Sleep(10 * time.Millisecond) - go p.SetCapacity(4) + go func() { + err := p.SetCapacity(ctx, 4) + require.NoError(t, err) + }() time.Sleep(10 * time.Millisecond) // This should not hang @@ -385,7 +399,7 @@ func TestShrinking(t *testing.T) { <-done assert.Panics(t, func() { - p.SetCapacity(-1) + _ = p.SetCapacity(ctx, -1) }) assert.EqualValues(t, 4, p.Capacity()) @@ -528,6 +542,46 @@ func TestReopen(t *testing.T) { assert.EqualValues(t, 0, state.open.Load()) } +func TestUserClosing(t *testing.T) { + var state TestState + + ctx := context.Background() + p := NewPool(&Config[*TestConn]{ + Capacity: 5, + IdleTimeout: time.Second, + LogWait: state.LogWait, + }).Open(newConnector(&state), nil) + + var resources [5]*Pooled[*TestConn] + for i := 0; i < 5; i++ { + var err error + resources[i], err = p.Get(ctx, nil) + require.NoError(t, err) + } + + for _, r := range resources[:4] { + r.Recycle() + } + + ch := make(chan error) + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + err := p.CloseWithContext(ctx) + ch <- err + close(ch) + }() + + select { + case <-time.After(5 * time.Second): + t.Fatalf("Pool did not shutdown after 5s") + case err := <-ch: + require.Error(t, err) + t.Logf("Shutdown error: %v", err) + } +} + func TestIdleTimeout(t *testing.T) { testTimeout := func(t *testing.T, setting *Setting) { var state TestState @@ -816,7 +870,7 @@ func TestTimeout(t *testing.T) { newctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) _, err = p.Get(newctx, setting) cancel() - assert.EqualError(t, err, "resource pool timed out") + assert.EqualError(t, err, "connection pool timed out") } @@ -840,7 +894,7 @@ func TestExpired(t *testing.T) { ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-1*time.Second)) _, err := p.Get(ctx, setting) cancel() - require.EqualError(t, err, "resource pool context already expired") + require.EqualError(t, err, "connection pool context already expired") } } diff --git a/go/protoutil/binlogsource.go b/go/protoutil/binlogsource.go new file mode 100644 index 00000000000..385f472c202 --- /dev/null +++ b/go/protoutil/binlogsource.go @@ -0,0 +1,60 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protoutil + +import ( + "slices" + "sort" + "strings" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +// SortBinlogSourceTables sorts the table related contents of the +// BinlogSource struct lexicographically by table name in order to +// produce consistent results. +func SortBinlogSourceTables(bls *binlogdatapb.BinlogSource) { + if bls == nil { + return + } + + // Sort the tables by name to ensure a consistent order. + slices.Sort(bls.Tables) + + if bls.Filter == nil || len(bls.Filter.Rules) == 0 { + return + } + sort.Slice(bls.Filter.Rules, func(i, j int) bool { + // Exclude filters should logically be processed first. + if bls.Filter.Rules[i].Filter == "exclude" && bls.Filter.Rules[j].Filter != "exclude" { + return true + } + if bls.Filter.Rules[j].Filter == "exclude" && bls.Filter.Rules[i].Filter != "exclude" { + return false + } + + // Remove preceding slash from the match string. + // That is used when the filter is a regular expression. + fi, _ := strings.CutPrefix(bls.Filter.Rules[i].Match, "/") + fj, _ := strings.CutPrefix(bls.Filter.Rules[j].Match, "/") + if fi != fj { + return fi < fj + } + + return bls.Filter.Rules[i].Filter < bls.Filter.Rules[j].Filter + }) +} diff --git a/go/protoutil/binlogsource_test.go b/go/protoutil/binlogsource_test.go new file mode 100644 index 00000000000..fe5564535bd --- /dev/null +++ b/go/protoutil/binlogsource_test.go @@ -0,0 +1,209 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protoutil + +import ( + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +func TestSortBinlogSourceTables(t *testing.T) { + tests := []struct { + name string + inSource *binlogdatapb.BinlogSource + outSource *binlogdatapb.BinlogSource + }{ + { + name: "Basic", + inSource: &binlogdatapb.BinlogSource{ + Tables: []string{"wuts1", "atable", "1table", "ztable2", "table3"}, + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: "ztable2", + }, + { + Match: "table3", + }, + { + Match: "/wuts", + }, + { + Match: "1table", + Filter: "a", + }, + { + Match: "1table", + }, + { + Match: "atable", + }, + }, + }, + }, + outSource: &binlogdatapb.BinlogSource{ + Tables: []string{"1table", "atable", "table3", "wuts1", "ztable2"}, + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: "1table", + }, + { + Match: "1table", + Filter: "a", + }, + { + Match: "atable", + }, + { + Match: "table3", + }, + { + Match: "/wuts", + }, + { + Match: "ztable2", + }, + }, + }, + }, + }, + { + name: "With excludes", + inSource: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: "./*", + }, + { + Match: "no4", + Filter: "exclude", + }, + { + Match: "no2", + Filter: "exclude", + }, + { + Match: "ztable2", + }, + { + Match: "atable2", + }, + }, + }, + }, + outSource: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: "no2", + Filter: "exclude", + }, + { + Match: "no4", + Filter: "exclude", + }, + { + Match: "./*", + }, + { + Match: "atable2", + }, + { + Match: "ztable2", + }, + }, + }, + }, + }, + { + name: "With excludes", + inSource: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: "no4", + Filter: "exclude", + }, + { + Match: "no2", + Filter: "exclude", + }, + { + Match: "./*", + }, + }, + }, + }, + outSource: &binlogdatapb.BinlogSource{ + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: "no2", + Filter: "exclude", + }, + { + Match: "no4", + Filter: "exclude", + }, + { + Match: "./*", + }, + }, + }, + }, + }, + { + name: "Nil", + inSource: nil, + outSource: nil, + }, + { + name: "No filter", + inSource: &binlogdatapb.BinlogSource{ + Tables: []string{"wuts1", "atable", "1table", "ztable2", "table3"}, + Filter: nil, + }, + outSource: &binlogdatapb.BinlogSource{ + Tables: []string{"1table", "atable", "table3", "wuts1", "ztable2"}, + Filter: nil, + }, + }, + { + name: "No filter rules", + inSource: &binlogdatapb.BinlogSource{ + Tables: []string{"wuts1", "atable", "1table", "ztable2", "table3"}, + Filter: &binlogdatapb.Filter{}, + }, + outSource: &binlogdatapb.BinlogSource{ + Tables: []string{"1table", "atable", "table3", "wuts1", "ztable2"}, + Filter: &binlogdatapb.Filter{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + SortBinlogSourceTables(tt.inSource) + require.True(t, proto.Equal(tt.inSource, tt.outSource), "got: %s, want: %s", tt.inSource.String(), tt.outSource.String()) + }) + } +} diff --git a/go/protoutil/duration_test.go b/go/protoutil/duration_test.go index 20f01482563..9f72a439910 100644 --- a/go/protoutil/duration_test.go +++ b/go/protoutil/duration_test.go @@ -26,7 +26,6 @@ import ( ) func TestDurationFromProto(t *testing.T) { - t.Parallel() tests := []struct { name string @@ -59,13 +58,30 @@ func TestDurationFromProto(t *testing.T) { isOk: true, shouldErr: true, }, + { + name: "nanoseconds", + in: &vttime.Duration{ + Seconds: 1, + Nanos: 500000000, + }, + expected: time.Second + 500*time.Millisecond, + isOk: true, + shouldErr: false, + }, + { + name: "out of range nanoseconds", + in: &vttime.Duration{ + Seconds: -1, + Nanos: 500000000, + }, + expected: 0, + isOk: true, + shouldErr: true, + }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() actual, ok, err := DurationFromProto(tt.in) if tt.shouldErr { @@ -80,3 +96,36 @@ func TestDurationFromProto(t *testing.T) { }) } } + +func TestDurationToProto(t *testing.T) { + + tests := []struct { + name string + in time.Duration + expected *vttime.Duration + }{ + { + name: "success", + in: time.Second * 1000, + expected: &vttime.Duration{Seconds: 1000}, + }, + { + name: "zero duration", + in: 0, + expected: &vttime.Duration{}, + }, + { + name: "nanoseconds", + in: time.Second + 500*time.Millisecond, + expected: &vttime.Duration{Seconds: 1, Nanos: 500000000}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + actual := DurationToProto(tt.in) + assert.Equal(t, tt.expected, actual) + }) + } +} diff --git a/go/ptr/ptr.go b/go/ptr/ptr.go new file mode 100644 index 00000000000..8fd7f6c0bf9 --- /dev/null +++ b/go/ptr/ptr.go @@ -0,0 +1,31 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ptr + +// Of returns a pointer to the given value +func Of[T any](x T) *T { + return &x +} + +// Unwrap dereferences the given pointer if it's not nil. +// Otherwise, it returns default_ +func Unwrap[T any](x *T, default_ T) T { + if x != nil { + return *x + } + return default_ +} diff --git a/go/ratelimiter/ratelimiter.go b/go/ratelimiter/ratelimiter.go deleted file mode 100644 index ddadb8659da..00000000000 --- a/go/ratelimiter/ratelimiter.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package ratelimiter implements rate limiting functionality. -package ratelimiter - -import ( - "sync" - "time" -) - -// RateLimiter was inspired by https://github.com/golang/go/wiki/RateLimiting. -// However, the go example is not good for setting high qps limits because -// it will cause the ticker to fire too often. Also, the ticker will continue -// to fire when the system is idle. This new Ratelimiter achieves the same thing, -// but by using just counters with no tickers or channels. -type RateLimiter struct { - maxCount int - interval time.Duration - - mu sync.Mutex - curCount int - lastTime time.Time -} - -// NewRateLimiter creates a new RateLimiter. maxCount is the max burst allowed -// while interval specifies the duration for a burst. The effective rate limit is -// equal to maxCount/interval. For example, if you want to a max QPS of 5000, -// and want to limit bursts to no more than 500, you'd specify a maxCount of 500 -// and an interval of 100*time.Millilsecond. -func NewRateLimiter(maxCount int, interval time.Duration) *RateLimiter { - return &RateLimiter{ - maxCount: maxCount, - interval: interval, - } -} - -// Allow returns true if a request is within the rate limit norms. -// Otherwise, it returns false. -func (rl *RateLimiter) Allow() bool { - rl.mu.Lock() - defer rl.mu.Unlock() - if time.Since(rl.lastTime) < rl.interval { - if rl.curCount > 0 { - rl.curCount-- - return true - } - return false - } - rl.curCount = rl.maxCount - 1 - rl.lastTime = time.Now() - return true -} diff --git a/go/ratelimiter/ratelimiter_test.go b/go/ratelimiter/ratelimiter_test.go deleted file mode 100644 index 768584b20f7..00000000000 --- a/go/ratelimiter/ratelimiter_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ratelimiter - -import ( - "testing" - "time" -) - -func TestLimiter1(t *testing.T) { - rl := NewRateLimiter(1, 10*time.Millisecond) - result := rl.Allow() - if !result { - t.Error("Allow: false, want true") - } - result = rl.Allow() - if result { - t.Error("Allow: true, want false") - } - - time.Sleep(11 * time.Millisecond) - result = rl.Allow() - if !result { - t.Error("Allow: false, want true") - } - result = rl.Allow() - if result { - t.Error("Allow: true, want false") - } -} - -func TestLimiter2(t *testing.T) { - rl := NewRateLimiter(2, 10*time.Millisecond) - var result bool - for i := 0; i < 2; i++ { - result = rl.Allow() - if !result { - t.Errorf("Allow(%d): false, want true", i) - } - } - result = rl.Allow() - if result { - t.Error("Allow: true, want false") - } - - time.Sleep(11 * time.Millisecond) - for i := 0; i < 2; i++ { - result = rl.Allow() - if !result { - t.Errorf("Allow(%d): false, want true", i) - } - } - result = rl.Allow() - if result { - t.Error("Allow: true, want false") - } -} diff --git a/go/sets/set_test.go b/go/sets/set_test.go new file mode 100644 index 00000000000..db453727d6c --- /dev/null +++ b/go/sets/set_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sets + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInsert(t *testing.T) { + testSet := New[int](1, 2, 3) + testSet.Insert(4, 5, 6) + compareSet := New[int](1, 2, 3, 4, 5, 6) + assert.Equal(t, testSet, compareSet) +} + +func TestDelete(t *testing.T) { + testSet := New[int](1, 2, 3, 4, 5, 6) + testSet.Delete(1, 5) + compareSet := New[int](2, 3, 4, 6) + assert.Equal(t, testSet, compareSet) + testSet.Delete(2, 3, 4, 6) + assert.Empty(t, testSet) +} + +func TestHas(t *testing.T) { + testSet := New[int](1, 2, 3) + assert.True(t, testSet.Has(3)) + assert.False(t, testSet.Has(-1)) +} + +func TestHasAny(t *testing.T) { + testSet := New[int](1, 2, 3) + assert.True(t, testSet.HasAny(1, 10, 11)) + assert.False(t, testSet.HasAny(-1, 10, 11)) +} + +func TestDifference(t *testing.T) { + testSet := New[int](1, 2, 3) + compareSet := New[int](-1, -2, 1, 2, 3) + diffSet := New[int](-1, -2) + assert.Equal(t, diffSet, compareSet.Difference(testSet)) +} + +func TestIntersection(t *testing.T) { + setA := New[int](1, 2, 3) + setB := New[int](1, 2, 8, 9, 10) + expectedSet := New[int](1, 2) + assert.Equal(t, expectedSet, setA.Intersection(setB)) +} + +func TestEqual(t *testing.T) { + testSet := New[int](1, 2, 3, 4, 5, 6) + compareSet := New[int](1, 2, 3, 4, 5, 6) + assert.True(t, testSet.Equal(compareSet)) + compareSet.Insert(-1, -2) + assert.False(t, testSet.Equal(compareSet)) +} + +func TestLen(t *testing.T) { + testSet := New[int](1, 2, 3) + assert.Equal(t, testSet.Len(), 3) +} + +func TestList(t *testing.T) { + testSet := New[string]("a string", "testing", "Capital", "34") + list := List(testSet) + require.EqualValues(t, []string{"34", "Capital", "a string", "testing"}, list) +} diff --git a/go/slice/slice_test.go b/go/slice/slice_test.go new file mode 100644 index 00000000000..d079ac51b9b --- /dev/null +++ b/go/slice/slice_test.go @@ -0,0 +1,198 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slice + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAll(t *testing.T) { + testCases := []struct { + name string + input []int + fn func(int) bool + expected bool + }{ + { + name: "EmptySlice", + input: []int{}, + fn: func(i int) bool { return i > 0 }, + expected: true, + }, + { + name: "AllElementsTrue", + input: []int{1, 2, 3}, + fn: func(i int) bool { return i > 0 }, + expected: true, + }, + { + name: "SomeElementsFalse", + input: []int{1, 2, 0}, + fn: func(i int) bool { return i > 0 }, + expected: false, + }, + { + name: "SingleElementFalse", + input: []int{0}, + fn: func(i int) bool { return i > 0 }, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := All(tc.input, tc.fn) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestAny(t *testing.T) { + testCases := []struct { + name string + input []int + fn func(int) bool + expected bool + }{ + { + name: "ReturnsTrue", + input: []int{1, 2, 3, 4, 5}, + fn: func(n int) bool { return n == 3 }, + expected: true, + }, + { + name: "EmptySliceInput", + input: []int{}, + fn: func(n int) bool { return n > 0 }, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := Any(tc.input, tc.fn) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestMap(t *testing.T) { + testCases := []struct { + name string + input []int + fn func(int) int + expected []int + }{ + { + name: "AppliesFunction", + input: []int{1, 2, 3, 4, 5}, + fn: func(n int) int { return n * 2 }, + expected: []int{2, 4, 6, 8, 10}, + }, + { + name: "EmptySliceInput", + input: nil, + fn: func(n int) int { return n }, + expected: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := Map(tc.input, tc.fn) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestFilter(t *testing.T) { + testCases := []struct { + name string + input []int + fn func(int) bool + expected []int + }{ + { + name: "Filtering", + input: []int{1, 2, 3, 4, 5}, + fn: func(i int) bool { return i%2 == 0 }, + expected: []int{2, 4}, + }, + { + name: "AllElementsFilteredOut", + input: []int{1, 3, 5}, + fn: func(i int) bool { return i%2 == 0 }, + expected: []int{}, + }, + { + name: "EmptySliceInput", + input: nil, + fn: func(n int) bool { return n > 0 }, + expected: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := Filter(tc.input, tc.fn) + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestMapWithError(t *testing.T) { + testCases := []struct { + name string + input []int + fn func(int) (int, error) + expected []int + expectedErr string + }{ + { + name: "SuccessfulMapping", + input: []int{1, 2, 3}, + fn: func(i int) (int, error) { return i * 2, nil }, + expected: []int{2, 4, 6}, + }, + { + name: "ErrorReturned", + input: []int{1, 2, 3}, + fn: func(i int) (int, error) { return 0, errors.New("error") }, + expectedErr: "error", + }, + { + name: "EmptySliceInput", + input: nil, + fn: func(n int) (int, error) { return n, nil }, + expected: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := MapWithError(tc.input, tc.fn) + if tc.expectedErr != "" { + assert.EqualError(t, err, tc.expectedErr) + } else { + assert.Equal(t, tc.expected, result) + } + }) + } +} diff --git a/go/sqlescape/ids.go b/go/sqlescape/ids.go index 3983db13362..a70d48c1cd2 100644 --- a/go/sqlescape/ids.go +++ b/go/sqlescape/ids.go @@ -14,6 +14,7 @@ limitations under the License. package sqlescape import ( + "fmt" "strings" ) @@ -52,11 +53,65 @@ func EscapeIDs(identifiers []string) []string { return result } -// UnescapeID reverses any backticking in the input string. -func UnescapeID(in string) string { +// UnescapeID reverses any backticking in the input string by EscapeID. +func UnescapeID(in string) (string, error) { l := len(in) - if l >= 2 && in[0] == '`' && in[l-1] == '`' { - return in[1 : l-1] + + if l == 0 || in == "``" { + return "", fmt.Errorf("UnescapeID err: invalid input identifier '%s'", in) + + } + + if l == 1 { + if in[0] == '`' { + return "", fmt.Errorf("UnescapeID err: invalid input identifier '`'") + } + return in, nil + } + + first, last := in[0], in[l-1] + + if first == '`' && last != '`' { + return "", fmt.Errorf("UnescapeID err: unexpected single backtick at position %d in '%s'", 0, in) + } + if first != '`' && last == '`' { + return "", fmt.Errorf("UnescapeID err: unexpected single backtick at position %d in '%s'", l, in) + } + if first != '`' && last != '`' { + if idx := strings.IndexByte(in, '`'); idx != -1 { + return "", fmt.Errorf("UnescapeID err: no outer backticks found in the identifier '%s'", in) + } + return in, nil + } + + in = in[1 : l-1] + + if idx := strings.IndexByte(in, '`'); idx == -1 { + return in, nil + } + + var buf strings.Builder + buf.Grow(len(in)) + + for i := 0; i < len(in); i++ { + buf.WriteByte(in[i]) + + if i < len(in)-1 && in[i] == '`' { + if in[i+1] == '`' { + i++ // halves the number of backticks + } else { + return "", fmt.Errorf("UnescapeID err: unexpected single backtick at position %d in '%s'", i, in) + } + } + } + + return buf.String(), nil +} + +func EnsureEscaped(in string) (string, error) { + out, err := UnescapeID(in) + if err != nil { + return "", err } - return in + return EscapeID(out), nil } diff --git a/go/sqlescape/ids_test.go b/go/sqlescape/ids_test.go index a2d2e69be6f..67ae233d7d6 100644 --- a/go/sqlescape/ids_test.go +++ b/go/sqlescape/ids_test.go @@ -14,7 +14,11 @@ limitations under the License. package sqlescape import ( + "fmt" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestEscapeID(t *testing.T) { @@ -26,12 +30,185 @@ func TestEscapeID(t *testing.T) { }, { in: "a`a", out: "`a``a`", + }, { + in: "a a", + out: "`a a`", + }, { + in: "`fo`o`", + out: "```fo``o```", + }, { + in: "", + out: "``", }} for _, tc := range testcases { - out := EscapeID(tc.in) - if out != tc.out { - t.Errorf("EscapeID(%s): %s, want %s", tc.in, out, tc.out) - } + t.Run(tc.in, func(t *testing.T) { + out := EscapeID(tc.in) + assert.Equal(t, out, tc.out) + }) + } +} + +func TestUnescapeID(t *testing.T) { + testcases := []struct { + in, out string + err bool + }{ + { + in: "``", + out: "", + err: true, + }, + { + in: "a", + out: "a", + err: false, + }, + { + in: "`aa`", + out: "aa", + err: false, + }, + { + in: "`a``a`", + out: "a`a", + err: false, + }, + { + in: "`foo", + out: "", + err: true, + }, + { + in: "foo`", + out: "", + err: true, + }, + { + in: "`fo`o", + out: "", + err: true, + }, + { + in: "`fo`o`", + out: "", + err: true, + }, + { + in: "``fo``o``", + out: "", + err: true, + }, + { + in: "```fo``o```", + out: "`fo`o`", + err: false, + }, + { + in: "```fo`o```", + out: "", + err: true, + }, + { + in: "foo", + out: "foo", + err: false, + }, + { + in: "f`oo", + out: "", + err: true, + }, + { + in: "", + out: "", + err: true, + }, + { + in: "`", + out: "", + err: true, + }, + } + for _, tc := range testcases { + t.Run(tc.in, func(t *testing.T) { + out, err := UnescapeID(tc.in) + if tc.err { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.out, out, "output mismatch") + } + }) + } +} + +func TestEnsureEscaped(t *testing.T) { + tt := []struct { + in string + out string + err bool + }{ + { + in: "", + out: "", + err: true, + }, + { + in: "foo", + out: "`foo`", + err: false, + }, + { + in: "`foo`", + out: "`foo`", + err: false, + }, + { + in: "```fo``o```", + out: "```fo``o```", + err: false, + }, + { + in: "`fo``o`", + out: "`fo``o`", + err: false, + }, + { + in: "f`oo", + out: "", + err: true, + }, + { + in: "`fo`o", + out: "", + err: true, + }, + { + in: "`foo", + out: "", + err: true, + }, + { + in: "foo`", + out: "", + err: true, + }, + { + in: "`fo`o`", + out: "", + err: true, + }, + } + for _, tc := range tt { + t.Run(tc.in, func(t *testing.T) { + out, err := EnsureEscaped(tc.in) + if tc.err { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.out, out, "output mismatch") + } + }) } } @@ -41,6 +218,7 @@ func BenchmarkEscapeID(b *testing.B) { testcases := []string{ "aa", "a`a", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", } + for _, tc := range testcases { name := tc if len(name) > 10 { @@ -53,3 +231,30 @@ func BenchmarkEscapeID(b *testing.B) { }) } } + +func TestEscapeIDs(t *testing.T) { + testCases := []struct { + input []string + expected []string + }{ + { + input: []string{"abc", "def", "ghi"}, + expected: []string{"`abc`", "`def`", "`ghi`"}, + }, + { + input: []string{"abc", "a`a", "`ghi`"}, + expected: []string{"`abc`", "`a``a`", "```ghi```"}, + }, + { + input: []string{}, + expected: []string{}, + }, + } + + for _, tt := range testCases { + t.Run(fmt.Sprintf("%v", tt.input), func(t *testing.T) { + out := EscapeIDs(tt.input) + assert.Equal(t, tt.expected, out) + }) + } +} diff --git a/go/sqltypes/bind_variables.go b/go/sqltypes/bind_variables.go index 18beda37702..9b8969bc814 100644 --- a/go/sqltypes/bind_variables.go +++ b/go/sqltypes/bind_variables.go @@ -17,10 +17,10 @@ limitations under the License. package sqltypes import ( - "bytes" "errors" "fmt" "strconv" + "strings" "google.golang.org/protobuf/proto" @@ -418,7 +418,7 @@ func FormatBindVariables(bindVariables map[string]*querypb.BindVariable, full, a } if asJSON { - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("{") first := true for k, v := range out { diff --git a/go/sqltypes/bind_variables_test.go b/go/sqltypes/bind_variables_test.go index 77b3381f751..99a9e2f2ef3 100644 --- a/go/sqltypes/bind_variables_test.go +++ b/go/sqltypes/bind_variables_test.go @@ -18,7 +18,6 @@ package sqltypes import ( "fmt" - "strings" "testing" "github.com/stretchr/testify/assert" @@ -92,15 +91,10 @@ func TestBuildBindVariables(t *testing.T) { }} for _, tcase := range tcases { bindVars, err := BuildBindVariables(tcase.in) - if err != nil { - if err.Error() != tcase.err { - t.Errorf("MapToBindVars(%v) error: %v, want %s", tcase.in, err, tcase.err) - } - continue - } - if tcase.err != "" { - t.Errorf("MapToBindVars(%v) error: nil, want %s", tcase.in, tcase.err) - continue + if tcase.err == "" { + assert.NoError(t, err) + } else { + assert.ErrorContains(t, err, tcase.err) } if !BindVariablesEqual(bindVars, tcase.out) { t.Errorf("MapToBindVars(%v): %v, want %s", tcase.in, bindVars, tcase.out) @@ -371,14 +365,10 @@ func TestValidateBindVarables(t *testing.T) { for _, tcase := range tcases { err := ValidateBindVariables(tcase.in) if tcase.err != "" { - if err == nil || err.Error() != tcase.err { - t.Errorf("ValidateBindVars(%v): %v, want %s", tcase.in, err, tcase.err) - } + assert.ErrorContains(t, err, tcase.err) continue } - if err != nil { - t.Errorf("ValidateBindVars(%v): %v, want nil", tcase.in, err) - } + assert.NoError(t, err) } } @@ -582,22 +572,16 @@ func TestValidateBindVariable(t *testing.T) { for _, tcase := range testcases { err := ValidateBindVariable(tcase.in) if tcase.err != "" { - if err == nil || !strings.Contains(err.Error(), tcase.err) { - t.Errorf("ValidateBindVar(%v) error: %v, must contain %v", tcase.in, err, tcase.err) - } + assert.ErrorContains(t, err, tcase.err) continue } - if err != nil { - t.Errorf("ValidateBindVar(%v) error: %v", tcase.in, err) - } + assert.NoError(t, err) } // Special case: nil bind var. err := ValidateBindVariable(nil) want := "bind variable is nil" - if err == nil || err.Error() != want { - t.Errorf("ValidateBindVar(nil) error: %v, want %s", err, want) - } + assert.ErrorContains(t, err, want) } func TestBindVariableToValue(t *testing.T) { @@ -633,19 +617,13 @@ func TestBindVariablesEqual(t *testing.T) { Value: []byte("1"), }, } - if !BindVariablesEqual(bv1, bv2) { - t.Errorf("%v != %v, want equal", bv1, bv2) - } - if !BindVariablesEqual(bv1, bv3) { - t.Errorf("%v = %v, want not equal", bv1, bv3) - } + assert.True(t, BindVariablesEqual(bv1, bv2)) + assert.True(t, BindVariablesEqual(bv1, bv3)) } func TestBindVariablesFormat(t *testing.T) { tupleBindVar, err := BuildBindVariable([]int64{1, 2}) - if err != nil { - t.Fatalf("failed to create a tuple bind var: %v", err) - } + require.NoError(t, err, "failed to create a tuple bind var") bindVariables := map[string]*querypb.BindVariable{ "key_1": StringBindVariable("val_1"), @@ -655,68 +633,38 @@ func TestBindVariablesFormat(t *testing.T) { } formattedStr := FormatBindVariables(bindVariables, true /* full */, false /* asJSON */) - if !strings.Contains(formattedStr, "key_1") || - !strings.Contains(formattedStr, "val_1") { - t.Fatalf("bind variable 'key_1': 'val_1' is not formatted") - } - if !strings.Contains(formattedStr, "key_2") || - !strings.Contains(formattedStr, "789") { - t.Fatalf("bind variable 'key_2': '789' is not formatted") - } - if !strings.Contains(formattedStr, "key_3") || !strings.Contains(formattedStr, "val_3") { - t.Fatalf("bind variable 'key_3': 'val_3' is not formatted") - } - if !strings.Contains(formattedStr, "key_4:type:TUPLE") { - t.Fatalf("bind variable 'key_4': (1, 2) is not formatted") - } + assert.Contains(t, formattedStr, "key_1") + assert.Contains(t, formattedStr, "val_1") - formattedStr = FormatBindVariables(bindVariables, false /* full */, false /* asJSON */) - if !strings.Contains(formattedStr, "key_1") { - t.Fatalf("bind variable 'key_1' is not formatted") - } - if !strings.Contains(formattedStr, "key_2") || - !strings.Contains(formattedStr, "789") { - t.Fatalf("bind variable 'key_2': '789' is not formatted") - } - if !strings.Contains(formattedStr, "key_3") || !strings.Contains(formattedStr, "5 bytes") { - t.Fatalf("bind variable 'key_3' is not formatted") - } - if !strings.Contains(formattedStr, "key_4") || !strings.Contains(formattedStr, "2 items") { - t.Fatalf("bind variable 'key_4' is not formatted") - } + assert.Contains(t, formattedStr, "key_2") + assert.Contains(t, formattedStr, "789") - formattedStr = FormatBindVariables(bindVariables, true /* full */, true /* asJSON */) - t.Logf("%q", formattedStr) - if !strings.Contains(formattedStr, "\"key_1\": {\"type\": \"VARCHAR\", \"value\": \"val_1\"}") { - t.Fatalf("bind variable 'key_1' is not formatted") - } + assert.Contains(t, formattedStr, "key_3") + assert.Contains(t, formattedStr, "val_3") - if !strings.Contains(formattedStr, "\"key_2\": {\"type\": \"INT64\", \"value\": 789}") { - t.Fatalf("bind variable 'key_2' is not formatted") - } + assert.Contains(t, formattedStr, "key_4:type:TUPLE") - if !strings.Contains(formattedStr, "\"key_3\": {\"type\": \"VARBINARY\", \"value\": \"val_3\"}") { - t.Fatalf("bind variable 'key_3' is not formatted") - } + formattedStr = FormatBindVariables(bindVariables, false /* full */, false /* asJSON */) + assert.Contains(t, formattedStr, "key_1") - if !strings.Contains(formattedStr, "\"key_4\": {\"type\": \"TUPLE\", \"value\": \"\"}") { - t.Fatalf("bind variable 'key_4' is not formatted") - } + assert.Contains(t, formattedStr, "key_2") + assert.Contains(t, formattedStr, "789") - formattedStr = FormatBindVariables(bindVariables, false /* full */, true /* asJSON */) - if !strings.Contains(formattedStr, "\"key_1\": {\"type\": \"VARCHAR\", \"value\": \"5 bytes\"}") { - t.Fatalf("bind variable 'key_1' is not formatted") - } + assert.Contains(t, formattedStr, "key_3") + assert.Contains(t, formattedStr, "5 bytes") - if !strings.Contains(formattedStr, "\"key_2\": {\"type\": \"INT64\", \"value\": 789}") { - t.Fatalf("bind variable 'key_2' is not formatted") - } + assert.Contains(t, formattedStr, "key_4") + assert.Contains(t, formattedStr, "2 items") - if !strings.Contains(formattedStr, "\"key_3\": {\"type\": \"VARCHAR\", \"value\": \"5 bytes\"}") { - t.Fatalf("bind variable 'key_3' is not formatted") - } + formattedStr = FormatBindVariables(bindVariables, true /* full */, true /* asJSON */) + assert.Contains(t, formattedStr, "\"key_1\": {\"type\": \"VARCHAR\", \"value\": \"val_1\"}") + assert.Contains(t, formattedStr, "\"key_2\": {\"type\": \"INT64\", \"value\": 789}") + assert.Contains(t, formattedStr, "\"key_3\": {\"type\": \"VARBINARY\", \"value\": \"val_3\"}") + assert.Contains(t, formattedStr, "\"key_4\": {\"type\": \"TUPLE\", \"value\": \"\"}") - if !strings.Contains(formattedStr, "\"key_4\": {\"type\": \"VARCHAR\", \"value\": \"2 items\"}") { - t.Fatalf("bind variable 'key_4' is not formatted") - } + formattedStr = FormatBindVariables(bindVariables, false /* full */, true /* asJSON */) + assert.Contains(t, formattedStr, "\"key_1\": {\"type\": \"VARCHAR\", \"value\": \"5 bytes\"}") + assert.Contains(t, formattedStr, "\"key_2\": {\"type\": \"INT64\", \"value\": 789}") + assert.Contains(t, formattedStr, "\"key_3\": {\"type\": \"VARCHAR\", \"value\": \"5 bytes\"}") + assert.Contains(t, formattedStr, "\"key_4\": {\"type\": \"VARCHAR\", \"value\": \"2 items\"}") } diff --git a/go/sqltypes/named_result_test.go b/go/sqltypes/named_result_test.go index 8c9c32554da..0d7651ba2ac 100644 --- a/go/sqltypes/named_result_test.go +++ b/go/sqltypes/named_result_test.go @@ -20,12 +20,15 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" querypb "vitess.io/vitess/go/vt/proto/query" ) func TestToNamedResult(t *testing.T) { + require.Nil(t, ToNamedResult(nil)) + in := &Result{ Fields: []*querypb.Field{{ Name: "id", @@ -49,6 +52,7 @@ func TestToNamedResult(t *testing.T) { for i := range in.Rows { require.Equal(t, in.Rows[i][0], named.Rows[i]["id"]) require.Equal(t, int64(i), named.Rows[i].AsInt64("id", 0)) + require.Equal(t, int32(i), named.Rows[i].AsInt32("id", 0)) require.Equal(t, in.Rows[i][1], named.Rows[i]["status"]) require.Equal(t, fmt.Sprintf("s%d", i), named.Rows[i].AsString("status", "notfound")) @@ -57,3 +61,162 @@ func TestToNamedResult(t *testing.T) { require.Equal(t, uint64(i), named.Rows[i].AsUint64("uid", 0)) } } + +func TestToNumericTypes(t *testing.T) { + row := RowNamedValues{ + "test": Value{ + val: []byte("0x1234"), + }, + } + tests := []struct { + name string + fieldName string + expectedErr string + }{ + { + name: "random fieldName", + fieldName: "random", + expectedErr: "No such field in RowNamedValues", + }, + { + name: "right fieldName", + fieldName: "test", + expectedErr: "Cannot convert value to desired type", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := row.ToInt(tt.fieldName) + if tt.expectedErr != "" { + require.ErrorContains(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + + _, err = row.ToInt32(tt.fieldName) + if tt.expectedErr != "" { + require.ErrorContains(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + + _, err = row.ToInt64(tt.fieldName) + if tt.expectedErr != "" { + require.ErrorContains(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + + _, err = row.ToUint64(tt.fieldName) + if tt.expectedErr != "" { + require.ErrorContains(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + + _, err = row.ToFloat64(tt.fieldName) + if tt.expectedErr != "" { + require.ErrorContains(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + + _, err = row.ToBool(tt.fieldName) + if tt.expectedErr != "" { + require.ErrorContains(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestToBytes(t *testing.T) { + row := RowNamedValues{ + "test": Value{ + val: []byte("0x1234"), + }, + } + + _, err := row.ToBytes("random") + require.ErrorContains(t, err, "No such field in RowNamedValues") + + val, err := row.ToBytes("test") + require.NoError(t, err) + require.Equal(t, []byte{0x30, 0x78, 0x31, 0x32, 0x33, 0x34}, val) +} + +func TestRow(t *testing.T) { + row := RowNamedValues{} + tests := []struct { + name string + res *NamedResult + expectedRow RowNamedValues + }{ + { + name: "empty results", + res: &NamedResult{}, + expectedRow: nil, + }, + { + name: "non-empty results", + res: &NamedResult{ + Rows: []RowNamedValues{row}, + }, + expectedRow: row, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expectedRow, tt.res.Row()) + }) + } +} + +func TestAsBool(t *testing.T) { + row := RowNamedValues{ + "testFalse": MakeTrusted(Int64, []byte("0")), + "testTrue": MakeTrusted(Int64, []byte("1")), + } + + r := row.AsBool("testFalse", true) + assert.False(t, r) + + r = row.AsBool("testTrue", false) + assert.True(t, r) + + r = row.AsBool("invalidField", true) + assert.True(t, r) +} + +func TestAsBytes(t *testing.T) { + row := RowNamedValues{ + "testField": MakeTrusted(Int64, []byte("1002")), + } + + r := row.AsBytes("testField", []byte("default")) + assert.Equal(t, []byte("1002"), r) + + r = row.AsBytes("invalidField", []byte("default")) + assert.Equal(t, []byte("default"), r) + +} + +func TestAsFloat64(t *testing.T) { + row := RowNamedValues{ + "testField": MakeTrusted(Int64, []byte("1002")), + "testField2": MakeTrusted(Float64, []byte("10.02")), + } + + r := row.AsFloat64("testField", 23.12) + assert.Equal(t, float64(1002), r) + + r = row.AsFloat64("testField2", 23.12) + assert.Equal(t, 10.02, r) + + r = row.AsFloat64("invalidField", 23.12) + assert.Equal(t, 23.12, r) + +} diff --git a/go/sqltypes/parse_rows_test.go b/go/sqltypes/parse_rows_test.go index a32f2fd35b0..45c55da019b 100644 --- a/go/sqltypes/parse_rows_test.go +++ b/go/sqltypes/parse_rows_test.go @@ -168,20 +168,123 @@ func TestRowParsing(t *testing.T) { } func TestRowsEquals(t *testing.T) { - var cases = []struct { + tests := []struct { + name string left, right string + expectedErr string }{ - {"[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]", "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]"}, + { + name: "Both equal", + left: "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]", + right: "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]", + }, + { + name: "length mismatch", + left: "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]", + right: "[[INT64(2)] [INT64(2)] [INT64(1)]]", + expectedErr: "results differ: expected 4 rows in result, got 3\n\twant: [[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]\n\tgot: [[INT64(2)] [INT64(2)] [INT64(1)]]", + }, + { + name: "elements mismatch", + left: "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]", + right: "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(4)]]", + expectedErr: "results differ: row [INT64(1)] is missing from result\n\twant: [[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]\n\tgot: [[INT64(1)] [INT64(2)] [INT64(2)] [INT64(4)]]", + }, } - for _, tc := range cases { - left, err := ParseRows(tc.left) - require.NoError(t, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + left, err := ParseRows(tt.left) + require.NoError(t, err) - right, err := ParseRows(tc.right) - require.NoError(t, err) + right, err := ParseRows(tt.right) + require.NoError(t, err) - err = RowsEquals(left, right) - require.NoError(t, err) + err = RowsEquals(left, right) + if tt.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) + } +} + +func TestRowsEqualStr(t *testing.T) { + tests := []struct { + name string + want string + got []Row + expectedErr string + }{ + { + name: "Unknown type", + want: "[[RANDOM(1)]]", + got: []Row{ + { + NewInt64(1), + }, + }, + expectedErr: "malformed row assertion: unknown SQL type \"RANDOM\" at :1:3", + }, + { + name: "Invalid row", + want: "[[INT64(1]]", + got: []Row{ + { + NewInt64(1), + }, + }, + expectedErr: "malformed row assertion: unexpected token ']' at :1:10", + }, + { + name: "Both equal", + want: "[[INT64(1)]]", + got: []Row{ + { + NewInt64(1), + }, + }, + }, + { + name: "length mismatch", + want: "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]", + got: []Row{ + { + NewInt64(1), + }, + }, + expectedErr: "results differ: expected 4 rows in result, got 1\n\twant: [[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]\n\tgot: [[INT64(1)]]", + }, + { + name: "elements mismatch", + want: "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]", + got: []Row{ + { + NewInt64(1), + }, + { + NewInt64(1), + }, + { + NewInt64(1), + }, + { + NewInt64(1), + }, + }, + expectedErr: "results differ: row [INT64(2)] is missing from result\n\twant: [[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]\n\tgot: [[INT64(1)] [INT64(1)] [INT64(1)] [INT64(1)]]", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := RowsEqualsStr(tt.want, tt.got) + if tt.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) } } diff --git a/go/sqltypes/query_response_test.go b/go/sqltypes/query_response_test.go new file mode 100644 index 00000000000..30b6fe62e14 --- /dev/null +++ b/go/sqltypes/query_response_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestQueryResponsesEqual(t *testing.T) { + tests := []struct { + name string + r1 []QueryResponse + r2 []QueryResponse + isEqual bool + }{ + { + name: "1 response in each", + r1: []QueryResponse{ + { + QueryResult: &Result{}, + QueryError: nil, + }, + }, + r2: []QueryResponse{ + { + QueryResult: &Result{}, + QueryError: nil, + }, + }, + isEqual: true, + }, + { + name: "different lengths", + r1: []QueryResponse{ + { + QueryResult: &Result{}, + QueryError: nil, + }, + }, + r2: []QueryResponse{}, + isEqual: false, + }, + { + name: "different query errors", + r1: []QueryResponse{ + { + QueryResult: &Result{}, + QueryError: fmt.Errorf("some error"), + }, + }, + r2: []QueryResponse{ + { + QueryResult: &Result{ + Info: "Test", + }, + QueryError: nil, + }, + }, + isEqual: false, + }, + { + name: "different query results", + r1: []QueryResponse{ + { + QueryResult: &Result{ + RowsAffected: 7, + }, + QueryError: nil, + }, + }, + r2: []QueryResponse{ + { + QueryResult: &Result{ + RowsAffected: 10, + }, + QueryError: nil, + }, + }, + isEqual: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.isEqual, QueryResponsesEqual(tt.r1, tt.r2)) + }) + } +} diff --git a/go/sqltypes/result_test.go b/go/sqltypes/result_test.go index 90d2eb9af65..d8075ec0633 100644 --- a/go/sqltypes/result_test.go +++ b/go/sqltypes/result_test.go @@ -19,6 +19,8 @@ package sqltypes import ( "testing" + "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" @@ -345,3 +347,165 @@ func TestAppendResult(t *testing.T) { t.Errorf("Got:\n%#v, want:\n%#v", result, want) } } + +func TestReplaceKeyspace(t *testing.T) { + result := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + Database: "vttest", + }, { + Type: VarChar, + Database: "vttest", + }, { + Type: VarBinary, + }}, + } + + result.ReplaceKeyspace("keyspace-name") + assert.Equal(t, "keyspace-name", result.Fields[0].Database) + assert.Equal(t, "keyspace-name", result.Fields[1].Database) + // Expect empty database identifiers to remain empty + assert.Equal(t, "", result.Fields[2].Database) +} + +func TestShallowCopy(t *testing.T) { + result := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + Database: "vttest", + }, { + Type: VarChar, + Database: "vttest", + }}, + Rows: [][]Value{ + { + MakeTrusted(querypb.Type_INT32, []byte("10")), + MakeTrusted(querypb.Type_VARCHAR, []byte("name")), + }, + }, + } + + res := result.ShallowCopy() + assert.Equal(t, result, res) +} + +func TestMetadata(t *testing.T) { + result := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + Database: "vttest", + }, { + Type: VarChar, + Database: "vttest", + }}, + Rows: [][]Value{ + { + MakeTrusted(querypb.Type_INT32, []byte("10")), + MakeTrusted(querypb.Type_VARCHAR, []byte("name")), + }, + }, + } + + res := result.Metadata() + assert.Nil(t, res.Rows) + assert.Equal(t, result.Fields, res.Fields) +} + +func TestResultsEqualUnordered(t *testing.T) { + result1 := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + Database: "vttest", + }, { + Type: VarChar, + Database: "vttest", + }}, + Rows: [][]Value{ + { + MakeTrusted(querypb.Type_INT32, []byte("24")), + MakeTrusted(querypb.Type_VARCHAR, []byte("test-name1")), + }, + }, + RowsAffected: 2, + } + + result2 := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + Database: "vttest", + }, { + Type: VarChar, + Database: "vttest", + }}, + Rows: [][]Value{ + { + MakeTrusted(querypb.Type_INT32, []byte("10")), + MakeTrusted(querypb.Type_VARCHAR, []byte("test-name2")), + }, + }, + RowsAffected: 2, + } + + result3 := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + Database: "vttest", + }, { + Type: VarChar, + Database: "vttest", + }}, + Rows: [][]Value{ + { + MakeTrusted(querypb.Type_INT32, []byte("10")), + MakeTrusted(querypb.Type_VARCHAR, []byte("test-name2")), + }, + { + MakeTrusted(querypb.Type_INT32, []byte("24")), + MakeTrusted(querypb.Type_VARCHAR, []byte("test-name1")), + }, + }, + RowsAffected: 3, + } + + eq := ResultsEqualUnordered([]Result{*result1, *result2}, []Result{*result2, *result1}) + assert.True(t, eq) + + eq = ResultsEqualUnordered([]Result{*result1}, []Result{*result2, *result1}) + assert.False(t, eq) + + eq = ResultsEqualUnordered([]Result{*result1}, []Result{*result2}) + assert.False(t, eq) + + eq = ResultsEqualUnordered([]Result{*result1, *result3}, []Result{*result2, *result1}) + assert.False(t, eq) +} + +func TestStatusFlags(t *testing.T) { + result := &Result{ + Fields: []*querypb.Field{{ + Type: Int64, + Database: "vttest", + }, { + Type: VarChar, + Database: "vttest", + }}, + StatusFlags: ServerMoreResultsExists, + } + + assert.True(t, result.IsMoreResultsExists()) + assert.False(t, result.IsInTransaction()) + + result.StatusFlags = ServerStatusInTrans + + assert.False(t, result.IsMoreResultsExists()) + assert.True(t, result.IsInTransaction()) +} + +func TestIncludeFieldsOrDefault(t *testing.T) { + // Should return default if nil is passed + r := IncludeFieldsOrDefault(nil) + assert.Equal(t, querypb.ExecuteOptions_TYPE_AND_NAME, r) + + r = IncludeFieldsOrDefault(&querypb.ExecuteOptions{IncludedFields: querypb.ExecuteOptions_TYPE_ONLY}) + assert.Equal(t, querypb.ExecuteOptions_TYPE_ONLY, r) +} diff --git a/go/sqltypes/testing.go b/go/sqltypes/testing.go index 3894635eae0..f67cd1c6deb 100644 --- a/go/sqltypes/testing.go +++ b/go/sqltypes/testing.go @@ -17,12 +17,11 @@ limitations under the License. package sqltypes import ( - "bytes" crand "crypto/rand" "encoding/base64" "encoding/hex" "fmt" - "math/rand" + "math/rand/v2" "strconv" "strings" "time" @@ -77,7 +76,7 @@ func MakeTestResult(fields []*querypb.Field, rows ...string) *Result { for i, row := range rows { result.Rows[i] = make([]Value, len(fields)) for j, col := range split(row) { - if col == "null" { + if strings.ToLower(col) == "null" { result.Rows[i][j] = NULL continue } @@ -155,13 +154,13 @@ func TestTuple(vals ...Value) Value { // PrintResults prints []*Results into a string. // This function should only be used for testing. func PrintResults(results []*Result) string { - b := new(bytes.Buffer) + var b strings.Builder for i, r := range results { if i == 0 { - fmt.Fprintf(b, "%v", r) + fmt.Fprintf(&b, "%v", r) continue } - fmt.Fprintf(b, ", %v", r) + fmt.Fprintf(&b, ", %v", r) } return b.String() } @@ -182,7 +181,7 @@ func TestRandomValues() (Value, Value) { } func randomNumericType(i int) Value { - r := rand.Intn(len(numericTypes)) + r := rand.IntN(len(numericTypes)) return numericTypes[r](i) } @@ -202,7 +201,7 @@ var numericTypes = []func(int) Value{ type RandomGenerator func() Value func randomBytes() []byte { - b := make([]byte, rand.Intn(128)) + b := make([]byte, rand.IntN(128)) _, _ = crand.Read(b) return b } @@ -212,13 +211,13 @@ var RandomGenerators = map[Type]RandomGenerator{ return NULL }, Int8: func() Value { - return NewInt8(int8(rand.Intn(255))) + return NewInt8(int8(rand.IntN(255))) }, Int32: func() Value { - return NewInt32(rand.Int31()) + return NewInt32(rand.Int32()) }, Int64: func() Value { - return NewInt64(rand.Int63()) + return NewInt64(rand.Int64()) }, Uint32: func() Value { return NewUint32(rand.Uint32()) @@ -230,7 +229,7 @@ var RandomGenerators = map[Type]RandomGenerator{ return NewFloat64(rand.ExpFloat64()) }, Decimal: func() Value { - dec := fmt.Sprintf("%d.%d", rand.Intn(9999999999), rand.Intn(9999999999)) + dec := fmt.Sprintf("%d.%d", rand.IntN(999999999), rand.IntN(999999999)) if rand.Int()&0x1 == 1 { dec = "-" + dec } @@ -256,11 +255,11 @@ var RandomGenerators = map[Type]RandomGenerator{ }, TypeJSON: func() Value { var j string - switch rand.Intn(6) { + switch rand.IntN(6) { case 0: j = "null" case 1: - i := rand.Int63() + i := rand.Int64() if rand.Int()&0x1 == 1 { i = -i } @@ -280,6 +279,12 @@ var RandomGenerators = map[Type]RandomGenerator{ } return v }, + Enum: func() Value { + return MakeTrusted(Enum, randEnum()) + }, + Set: func() Value { + return MakeTrusted(Set, randSet()) + }, } func randTime() time.Time { @@ -287,6 +292,36 @@ func randTime() time.Time { max := time.Date(2070, 1, 0, 0, 0, 0, 0, time.UTC).Unix() delta := max - min - sec := rand.Int63n(delta) + min + sec := rand.Int64N(delta) + min return time.Unix(sec, 0) } + +func randEnum() []byte { + enums := []string{ + "xxsmall", + "xsmall", + "small", + "medium", + "large", + "xlarge", + "xxlarge", + } + return []byte(enums[rand.IntN(len(enums))]) +} + +func randSet() []byte { + set := []string{ + "a", + "b", + "c", + "d", + "e", + "f", + "g", + } + rand.Shuffle(len(set), func(i, j int) { + set[i], set[j] = set[j], set[i] + }) + set = set[:rand.IntN(len(set))] + return []byte(strings.Join(set, ",")) +} diff --git a/go/sqltypes/type.go b/go/sqltypes/type.go index 9157db685e9..4090dd0107a 100644 --- a/go/sqltypes/type.go +++ b/go/sqltypes/type.go @@ -89,6 +89,10 @@ func IsText(t querypb.Type) bool { return int(t)&flagIsText == flagIsText } +func IsTextOrBinary(t querypb.Type) bool { + return int(t)&flagIsText == flagIsText || int(t)&flagIsBinary == flagIsBinary +} + // IsBinary returns true if querypb.Type is a binary. // If you have a Value object, use its member function. func IsBinary(t querypb.Type) bool { @@ -115,6 +119,16 @@ func IsNull(t querypb.Type) bool { return t == Null } +// IsEnum returns true if the type is Enum type +func IsEnum(t querypb.Type) bool { + return t == Enum +} + +// IsSet returns true if the type is Set type +func IsSet(t querypb.Type) bool { + return t == Set +} + // Vitess data types. These are idiomatically named synonyms for the querypb.Type values. // Although these constants are interchangeable, they should be treated as different from querypb.Type. // Use the synonyms only to refer to the type in Value. For proto variables, use the querypb.Type constants instead. @@ -185,7 +199,7 @@ const ( // If you add to this map, make sure you add a test case // in tabletserver/endtoend. -var mysqlToType = map[int64]querypb.Type{ +var mysqlToType = map[byte]querypb.Type{ 0: Decimal, 1: Int8, 2: Int16, @@ -271,7 +285,7 @@ func modifyType(typ querypb.Type, flags int64) querypb.Type { } // MySQLToType computes the vitess type from mysql type and flags. -func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) { +func MySQLToType(mysqlType byte, flags int64) (typ querypb.Type, err error) { result, ok := mysqlToType[mysqlType] if !ok { return 0, fmt.Errorf("unsupported type: %d", mysqlType) @@ -299,7 +313,7 @@ func AreTypesEquivalent(mysqlTypeFromBinlog, mysqlTypeFromSchema querypb.Type) b // typeToMySQL is the reverse of mysqlToType. var typeToMySQL = map[querypb.Type]struct { - typ int64 + typ byte flags int64 }{ Int8: {typ: 1}, @@ -338,7 +352,7 @@ var typeToMySQL = map[querypb.Type]struct { } // TypeToMySQL returns the equivalent mysql type and flag for a vitess type. -func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) { +func TypeToMySQL(typ querypb.Type) (mysqlType byte, flags int64) { val := typeToMySQL[typ] return val.typ, val.flags } diff --git a/go/sqltypes/type_test.go b/go/sqltypes/type_test.go index f223c5811e3..8493dc23e05 100644 --- a/go/sqltypes/type_test.go +++ b/go/sqltypes/type_test.go @@ -20,6 +20,8 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" + querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -285,7 +287,7 @@ func TestTypeToMySQL(t *testing.T) { func TestMySQLToType(t *testing.T) { testcases := []struct { - intype int64 + intype byte inflags int64 outtype querypb.Type }{{ @@ -512,3 +514,88 @@ func TestPrintTypeChecks(t *testing.T) { t.Logf("%s(): %s", f.name, strings.Join(match, ", ")) } } + +func TestIsTextOrBinary(t *testing.T) { + tests := []struct { + name string + ty querypb.Type + isTextorBinary bool + }{ + { + name: "null type", + ty: querypb.Type_NULL_TYPE, + isTextorBinary: false, + }, + { + name: "blob type", + ty: querypb.Type_BLOB, + isTextorBinary: true, + }, + { + name: "text type", + ty: querypb.Type_TEXT, + isTextorBinary: true, + }, + { + name: "binary type", + ty: querypb.Type_BINARY, + isTextorBinary: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.isTextorBinary, IsTextOrBinary(tt.ty)) + }) + } +} + +func TestIsDateOrTime(t *testing.T) { + tests := []struct { + name string + ty querypb.Type + isDateOrTime bool + }{ + { + name: "null type", + ty: querypb.Type_NULL_TYPE, + isDateOrTime: false, + }, + { + name: "blob type", + ty: querypb.Type_BLOB, + isDateOrTime: false, + }, + { + name: "timestamp type", + ty: querypb.Type_TIMESTAMP, + isDateOrTime: true, + }, + { + name: "date type", + ty: querypb.Type_DATE, + isDateOrTime: true, + }, + { + name: "time type", + ty: querypb.Type_TIME, + isDateOrTime: true, + }, + { + name: "date time type", + ty: querypb.Type_DATETIME, + isDateOrTime: true, + }, + { + name: "year type", + ty: querypb.Type_YEAR, + isDateOrTime: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.isDateOrTime, IsDateOrTime(tt.ty)) + }) + } +} diff --git a/go/sqltypes/value.go b/go/sqltypes/value.go index 45415814700..bb4e26d15e3 100644 --- a/go/sqltypes/value.go +++ b/go/sqltypes/value.go @@ -51,6 +51,8 @@ var ( // ErrIncompatibleTypeCast indicates a casting problem ErrIncompatibleTypeCast = errors.New("Cannot convert value to desired type") + + ErrInvalidEncodedString = errors.New("invalid SQL encoded string") ) const ( @@ -566,6 +568,16 @@ func (v Value) IsDecimal() bool { return IsDecimal(v.Type()) } +// IsEnum returns true if Value is enum. +func (v Value) IsEnum() bool { + return v.Type() == querypb.Type_ENUM +} + +// IsSet returns true if Value is set. +func (v Value) IsSet() bool { + return v.Type() == querypb.Type_SET +} + // IsComparable returns true if the Value is null safe comparable without collation information. func (v *Value) IsComparable() bool { if v.Type() == Null || IsNumber(v.Type()) || IsBinary(v.Type()) { @@ -733,7 +745,13 @@ func (v Value) TinyWeightCmp(other Value) int { if v.flags&other.flags&flagTinyWeight == 0 { return 0 } - return int(int64(v.tinyweight) - int64(other.tinyweight)) + if v.tinyweight == other.tinyweight { + return 0 + } + if v.tinyweight < other.tinyweight { + return -1 + } + return 1 } func (v Value) TinyWeight() uint32 { @@ -855,6 +873,56 @@ var encodeRef = map[byte]byte{ '\\': '\\', } +// BufDecodeStringSQL decodes the string into a strings.Builder +func BufDecodeStringSQL(buf *strings.Builder, val string) error { + if len(val) < 2 || val[0] != '\'' || val[len(val)-1] != '\'' { + return fmt.Errorf("%s: %w", val, ErrInvalidEncodedString) + } + in := hack.StringBytes(val[1 : len(val)-1]) + idx := 0 + for { + if idx >= len(in) { + return nil + } + ch := in[idx] + if ch == '\'' { + idx++ + if idx >= len(in) { + return fmt.Errorf("%s: %w", val, ErrInvalidEncodedString) + } + if in[idx] != '\'' { + return fmt.Errorf("%s: %w", val, ErrInvalidEncodedString) + } + buf.WriteByte(ch) + idx++ + continue + } + if ch == '\\' { + idx++ + if idx >= len(in) { + return fmt.Errorf("%s: %w", val, ErrInvalidEncodedString) + } + decoded := SQLDecodeMap[in[idx]] + if decoded == DontEscape { + return fmt.Errorf("%s: %w", val, ErrInvalidEncodedString) + } + buf.WriteByte(decoded) + idx++ + continue + } + + buf.WriteByte(ch) + idx++ + } +} + +// DecodeStringSQL encodes the string as a SQL string. +func DecodeStringSQL(val string) (string, error) { + var buf strings.Builder + err := BufDecodeStringSQL(&buf, val) + return buf.String(), err +} + func init() { for i := range SQLEncodeMap { SQLEncodeMap[i] = DontEscape diff --git a/go/sqltypes/value_test.go b/go/sqltypes/value_test.go index 86c751f3d0d..36a0f5a5090 100644 --- a/go/sqltypes/value_test.go +++ b/go/sqltypes/value_test.go @@ -17,8 +17,7 @@ limitations under the License. package sqltypes import ( - "bytes" - "reflect" + "math" "strings" "testing" @@ -26,6 +25,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/bytes2" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -190,18 +190,12 @@ func TestNewValue(t *testing.T) { for _, tcase := range testcases { v, err := NewValue(tcase.inType, []byte(tcase.inVal)) if tcase.outErr != "" { - if err == nil || !strings.Contains(err.Error(), tcase.outErr) { - t.Errorf("ValueFromBytes(%v, %v) error: %v, must contain %v", tcase.inType, tcase.inVal, err, tcase.outErr) - } - continue - } - if err != nil { - t.Errorf("ValueFromBytes(%v, %v) error: %v", tcase.inType, tcase.inVal, err) + assert.ErrorContains(t, err, tcase.outErr) continue } - if !reflect.DeepEqual(v, tcase.outVal) { - t.Errorf("ValueFromBytes(%v, %v) = %v, want %v", tcase.inType, tcase.inVal, v, tcase.outVal) - } + + assert.NoError(t, err) + assert.Equal(t, tcase.outVal, v) } } @@ -210,27 +204,24 @@ func TestNewValue(t *testing.T) { func TestNew(t *testing.T) { got := NewInt32(1) want := MakeTrusted(Int32, []byte("1")) - if !reflect.DeepEqual(got, want) { - t.Errorf("NewInt32(aa): %v, want %v", got, want) - } + assert.Equal(t, want, got) got = NewVarBinary("aa") want = MakeTrusted(VarBinary, []byte("aa")) - if !reflect.DeepEqual(got, want) { - t.Errorf("NewVarBinary(aa): %v, want %v", got, want) - } + assert.Equal(t, want, got) + + got, err := NewJSON("invalid-json") + assert.Empty(t, got) + assert.ErrorContains(t, err, "invalid JSON value") } func TestMakeTrusted(t *testing.T) { v := MakeTrusted(Null, []byte("abcd")) - if !reflect.DeepEqual(v, NULL) { - t.Errorf("MakeTrusted(Null...) = %v, want null", v) - } + assert.Equal(t, NULL, v) + v = MakeTrusted(Int64, []byte("1")) want := TestValue(Int64, "1") - if !reflect.DeepEqual(v, want) { - t.Errorf("MakeTrusted(Int64, \"1\") = %v, want %v", v, want) - } + assert.Equal(t, want, v) } func TestIntegralValue(t *testing.T) { @@ -254,18 +245,12 @@ func TestIntegralValue(t *testing.T) { for _, tcase := range testcases { v, err := NewIntegral(tcase.in) if tcase.outErr != "" { - if err == nil || !strings.Contains(err.Error(), tcase.outErr) { - t.Errorf("BuildIntegral(%v) error: %v, must contain %v", tcase.in, err, tcase.outErr) - } - continue - } - if err != nil { - t.Errorf("BuildIntegral(%v) error: %v", tcase.in, err) + assert.ErrorContains(t, err, tcase.outErr) continue } - if !reflect.DeepEqual(v, tcase.outVal) { - t.Errorf("BuildIntegral(%v) = %v, want %v", tcase.in, v, tcase.outVal) - } + + assert.NoError(t, err) + assert.Equal(t, tcase.outVal, v) } } @@ -294,118 +279,66 @@ func TestInterfaceValue(t *testing.T) { }} for _, tcase := range testcases { v, err := InterfaceToValue(tcase.in) - if err != nil { - t.Errorf("BuildValue(%#v) error: %v", tcase.in, err) - continue - } - if !reflect.DeepEqual(v, tcase.out) { - t.Errorf("BuildValue(%#v) = %v, want %v", tcase.in, v, tcase.out) - } + + assert.NoError(t, err) + assert.Equal(t, tcase.out, v) } _, err := InterfaceToValue(make(chan bool)) want := "unexpected" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("BuildValue(chan): %v, want %v", err, want) - } + assert.ErrorContains(t, err, want) } func TestAccessors(t *testing.T) { v := TestValue(Int64, "1") - if v.Type() != Int64 { - t.Errorf("v.Type=%v, want Int64", v.Type()) - } - if !bytes.Equal(v.Raw(), []byte("1")) { - t.Errorf("v.Raw=%s, want 1", v.Raw()) - } - if v.Len() != 1 { - t.Errorf("v.Len=%d, want 1", v.Len()) - } - if v.ToString() != "1" { - t.Errorf("v.String=%s, want 1", v.ToString()) - } - if v.IsNull() { - t.Error("v.IsNull: true, want false") - } - if !v.IsIntegral() { - t.Error("v.IsIntegral: false, want true") - } - if !v.IsSigned() { - t.Error("v.IsSigned: false, want true") - } - if v.IsUnsigned() { - t.Error("v.IsUnsigned: true, want false") - } - if v.IsFloat() { - t.Error("v.IsFloat: true, want false") - } - if v.IsQuoted() { - t.Error("v.IsQuoted: true, want false") - } - if v.IsText() { - t.Error("v.IsText: true, want false") - } - if v.IsBinary() { - t.Error("v.IsBinary: true, want false") - } + assert.Equal(t, Int64, v.Type()) + assert.Equal(t, []byte("1"), v.Raw()) + assert.Equal(t, 1, v.Len()) + assert.Equal(t, "1", v.ToString()) + assert.False(t, v.IsNull()) + assert.True(t, v.IsIntegral()) + assert.True(t, v.IsSigned()) + assert.False(t, v.IsUnsigned()) + assert.False(t, v.IsFloat()) + assert.False(t, v.IsQuoted()) + assert.False(t, v.IsText()) + assert.False(t, v.IsBinary()) + { i, err := v.ToInt64() - if err != nil { - t.Errorf("v.ToInt64: got error: %+v, want no error", err) - } - if i != 1 { - t.Errorf("v.ToInt64=%+v, want 1", i) - } + assert.NoError(t, err) + assert.Equal(t, int64(1), i) } { i, err := v.ToUint64() - if err != nil { - t.Errorf("v.ToUint64: got error: %+v, want no error", err) - } - if i != 1 { - t.Errorf("v.ToUint64=%+v, want 1", i) - } + assert.NoError(t, err) + assert.Equal(t, uint64(1), i) } { b, err := v.ToBool() - if err != nil { - t.Errorf("v.ToBool: got error: %+v, want no error", err) - } - if !b { - t.Errorf("v.ToBool=%+v, want true", b) - } + assert.NoError(t, err) + assert.True(t, b) } } func TestAccessorsNegative(t *testing.T) { v := TestValue(Int64, "-1") - if v.ToString() != "-1" { - t.Errorf("v.String=%s, want -1", v.ToString()) - } - if v.IsNull() { - t.Error("v.IsNull: true, want false") - } - if !v.IsIntegral() { - t.Error("v.IsIntegral: false, want true") - } + assert.Equal(t, "-1", v.ToString()) + assert.False(t, v.IsNull()) + assert.True(t, v.IsIntegral()) + { i, err := v.ToInt64() - if err != nil { - t.Errorf("v.ToInt64: got error: %+v, want no error", err) - } - if i != -1 { - t.Errorf("v.ToInt64=%+v, want -1", i) - } + assert.NoError(t, err) + assert.Equal(t, int64(-1), i) } { - if _, err := v.ToUint64(); err == nil { - t.Error("v.ToUint64: got no error, want error") - } + _, err := v.ToUint64() + assert.Error(t, err) } { - if _, err := v.ToBool(); err == nil { - t.Error("v.ToUint64: got no error, want error") - } + _, err := v.ToBool() + assert.Error(t, err) } } @@ -417,23 +350,15 @@ func TestToBytesAndString(t *testing.T) { } { vBytes, err := v.ToBytes() require.NoError(t, err) - if b := vBytes; !bytes.Equal(b, v.Raw()) { - t.Errorf("%v.ToBytes: %s, want %s", v, b, v.Raw()) - } - if s := v.ToString(); s != string(v.Raw()) { - t.Errorf("%v.ToString: %s, want %s", v, s, v.Raw()) - } + assert.Equal(t, v.Raw(), vBytes) + assert.Equal(t, string(v.Raw()), v.ToString()) } tv := TestValue(Expression, "aa") tvBytes, err := tv.ToBytes() require.EqualError(t, err, "expression cannot be converted to bytes") - if b := tvBytes; b != nil { - t.Errorf("%v.ToBytes: %s, want nil", tv, b) - } - if s := tv.ToString(); s != "" { - t.Errorf("%v.ToString: %s, want \"\"", tv, s) - } + assert.Nil(t, tvBytes) + assert.Empty(t, tv.ToString()) } func TestEncode(t *testing.T) { @@ -463,27 +388,20 @@ func TestEncode(t *testing.T) { outASCII: "'YQ=='", }} for _, tcase := range testcases { - buf := &bytes.Buffer{} - tcase.in.EncodeSQL(buf) - if tcase.outSQL != buf.String() { - t.Errorf("%v.EncodeSQL = %q, want %q", tcase.in, buf.String(), tcase.outSQL) - } - buf = &bytes.Buffer{} - tcase.in.EncodeASCII(buf) - if tcase.outASCII != buf.String() { - t.Errorf("%v.EncodeASCII = %q, want %q", tcase.in, buf.String(), tcase.outASCII) - } + var buf strings.Builder + tcase.in.EncodeSQL(&buf) + assert.Equal(t, tcase.outSQL, buf.String()) + + buf.Reset() + tcase.in.EncodeASCII(&buf) + assert.Equal(t, tcase.outASCII, buf.String()) } } // TestEncodeMap ensures DontEscape is not escaped func TestEncodeMap(t *testing.T) { - if SQLEncodeMap[DontEscape] != DontEscape { - t.Errorf("SQLEncodeMap[DontEscape] = %v, want %v", SQLEncodeMap[DontEscape], DontEscape) - } - if SQLDecodeMap[DontEscape] != DontEscape { - t.Errorf("SQLDecodeMap[DontEscape] = %v, want %v", SQLEncodeMap[DontEscape], DontEscape) - } + assert.Equal(t, DontEscape, SQLEncodeMap[DontEscape]) + assert.Equal(t, DontEscape, SQLDecodeMap[DontEscape]) } func TestHexAndBitToBytes(t *testing.T) { @@ -512,3 +430,280 @@ func TestHexAndBitToBytes(t *testing.T) { }) } } + +func TestEncodeStringSQL(t *testing.T) { + testcases := []struct { + in string + out string + }{ + { + in: "", + out: "''", + }, + { + in: "\x00'\"\b\n\r\t\x1A\\", + out: "'\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\'", + }, + } + for _, tcase := range testcases { + out := EncodeStringSQL(tcase.in) + assert.Equal(t, tcase.out, out) + } +} + +func TestDecodeStringSQL(t *testing.T) { + testcases := []struct { + in string + out string + err string + }{ + { + in: "", + err: ": invalid SQL encoded string", + }, { + in: "''", + err: "", + }, + { + in: "'\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\'", + out: "\x00'\"\b\n\r\t\x1A\\", + }, + { + in: "'light ''green\\r\\n, \\nfoo'", + out: "light 'green\r\n, \nfoo", + }, + { + in: "'foo \\\\ % _bar'", + out: "foo \\ % _bar", + }, + } + for _, tcase := range testcases { + out, err := DecodeStringSQL(tcase.in) + if tcase.err != "" { + assert.EqualError(t, err, tcase.err) + } else { + require.NoError(t, err) + assert.Equal(t, tcase.out, out) + } + } +} + +func TestTinyWeightCmp(t *testing.T) { + val1 := TestValue(Int64, "12") + val2 := TestValue(VarChar, "aa") + + val1.SetTinyWeight(10) + + // Test TinyWeight + assert.Equal(t, uint32(10), val1.TinyWeight()) + + cmp := val1.TinyWeightCmp(val2) + assert.Equal(t, 0, cmp) + + val2.SetTinyWeight(10) + cmp = val1.TinyWeightCmp(val2) + assert.Equal(t, 0, cmp) + + val2.SetTinyWeight(20) + cmp = val1.TinyWeightCmp(val2) + assert.Equal(t, -1, cmp) + + val2.SetTinyWeight(5) + cmp = val1.TinyWeightCmp(val2) + assert.Equal(t, 1, cmp) +} + +func TestToCastInt64(t *testing.T) { + tcases := []struct { + in Value + want int64 + err string + }{ + {TestValue(Int64, "213"), 213, ""}, + {TestValue(Int64, "-213"), -213, ""}, + {TestValue(VarChar, "9223372036854775808a"), math.MaxInt64, `cannot parse int64 from "9223372036854775808a": overflow`}, + {TestValue(Time, "12:23:59"), 12, `unparsed tail left after parsing int64 from "12:23:59": ":23:59"`}, + } + + for _, tcase := range tcases { + t.Run(tcase.in.String(), func(t *testing.T) { + got, err := tcase.in.ToCastInt64() + assert.Equal(t, tcase.want, got) + + if tcase.err != "" { + assert.ErrorContains(t, err, tcase.err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestToCastUint64(t *testing.T) { + tcases := []struct { + in Value + want uint64 + err string + }{ + {TestValue(Int64, "213"), 213, ""}, + {TestValue(Int64, "-213"), 0, `cannot parse uint64 from "-213"`}, + {TestValue(VarChar, "9223372036854775808a"), 9223372036854775808, `unparsed tail left after parsing uint64 from "9223372036854775808a": "a"`}, + {TestValue(Time, "12:23:59"), 12, `unparsed tail left after parsing uint64 from "12:23:59": ":23:59"`}, + } + + for _, tcase := range tcases { + t.Run(tcase.in.String(), func(t *testing.T) { + got, err := tcase.in.ToCastUint64() + assert.Equal(t, tcase.want, got) + + if tcase.err != "" { + assert.ErrorContains(t, err, tcase.err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestToUint16(t *testing.T) { + tcases := []struct { + in Value + want uint16 + err string + }{ + {TestValue(Int64, "213"), 213, ""}, + {TestValue(Int64, "-213"), 0, `parsing "-213": invalid syntax`}, + {TestValue(VarChar, "9223372036854775808a"), 0, ErrIncompatibleTypeCast.Error()}, + {TestValue(Time, "12:23:59"), 0, ErrIncompatibleTypeCast.Error()}, + } + + for _, tcase := range tcases { + t.Run(tcase.in.String(), func(t *testing.T) { + got, err := tcase.in.ToUint16() + assert.Equal(t, tcase.want, got) + + if tcase.err != "" { + assert.ErrorContains(t, err, tcase.err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestToUint32(t *testing.T) { + tcases := []struct { + in Value + want uint32 + err string + }{ + {TestValue(Int64, "213"), 213, ""}, + {TestValue(Int64, "-213"), 0, `parsing "-213": invalid syntax`}, + {TestValue(VarChar, "9223372036854775808a"), 0, ErrIncompatibleTypeCast.Error()}, + {TestValue(Time, "12:23:59"), 0, ErrIncompatibleTypeCast.Error()}, + } + + for _, tcase := range tcases { + t.Run(tcase.in.String(), func(t *testing.T) { + got, err := tcase.in.ToUint32() + assert.Equal(t, tcase.want, got) + + if tcase.err != "" { + assert.ErrorContains(t, err, tcase.err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestEncodeSQLStringBuilder(t *testing.T) { + testcases := []struct { + in Value + outSQL string + }{{ + in: NULL, + outSQL: "null", + }, { + in: TestValue(Int64, "1"), + outSQL: "1", + }, { + in: TestValue(VarChar, "foo"), + outSQL: "'foo'", + }, { + in: TestValue(VarChar, "\x00'\"\b\n\r\t\x1A\\"), + outSQL: "'\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\'", + }, { + in: TestValue(Bit, "a"), + outSQL: "b'01100001'", + }, { + in: TestTuple(TestValue(Int64, "1"), TestValue(VarChar, "foo")), + outSQL: "(1, 'foo')", + }} + for _, tcase := range testcases { + var buf strings.Builder + + tcase.in.EncodeSQLStringBuilder(&buf) + assert.Equal(t, tcase.outSQL, buf.String()) + } +} + +func TestEncodeSQLBytes2(t *testing.T) { + testcases := []struct { + in Value + outSQL string + }{{ + in: NULL, + outSQL: "null", + }, { + in: TestValue(Int64, "1"), + outSQL: "1", + }, { + in: TestValue(VarChar, "foo"), + outSQL: "'foo'", + }, { + in: TestValue(VarChar, "\x00'\"\b\n\r\t\x1A\\"), + outSQL: "'\\0\\'\\\"\\b\\n\\r\\t\\Z\\\\'", + }, { + in: TestValue(Bit, "a"), + outSQL: "b'01100001'", + }, { + in: TestTuple(TestValue(Int64, "1"), TestValue(VarChar, "foo")), + outSQL: "\x89\x02\x011\x950\x03foo", + }} + for _, tcase := range testcases { + buf := bytes2.NewBuffer([]byte{}) + + tcase.in.EncodeSQLBytes2(buf) + assert.Equal(t, tcase.outSQL, buf.String()) + } +} + +func TestIsComparable(t *testing.T) { + testcases := []struct { + in Value + isCmp bool + }{{ + in: NULL, + isCmp: true, + }, { + in: TestValue(Int64, "1"), + isCmp: true, + }, { + in: TestValue(VarChar, "foo"), + }, { + in: TestValue(VarChar, "\x00'\"\b\n\r\t\x1A\\"), + }, { + in: TestValue(Bit, "a"), + isCmp: true, + }, { + in: TestValue(Time, "12:21:11"), + isCmp: true, + }, { + in: TestTuple(TestValue(Int64, "1"), TestValue(VarChar, "foo")), + }} + for _, tcase := range testcases { + isCmp := tcase.in.IsComparable() + assert.Equal(t, tcase.isCmp, isCmp) + } +} diff --git a/go/stats/counter.go b/go/stats/counter.go index 4428dfe1136..d38929d64b6 100644 --- a/go/stats/counter.go +++ b/go/stats/counter.go @@ -17,6 +17,8 @@ limitations under the License. package stats import ( + "expvar" + "fmt" "math" "strconv" "sync/atomic" @@ -45,6 +47,22 @@ func NewCounter(name string, help string) *Counter { return v } +// NewCounterWithDeprecatedName returns a new Counter that also has a deprecated name that can be removed in a future release. +// It is important to ensure that we only call this function with values for name and deprecatedName such that they match to the same +// metric name in snake case. +func NewCounterWithDeprecatedName(name string, deprecatedName string, help string) *Counter { + // Ensure that the snake case for the deprecated name and the new name are the same. + if deprecatedName == "" || GetSnakeName(name) != GetSnakeName(deprecatedName) { + panic(fmt.Sprintf("New name for deprecated metric doesn't have the same snake case - %v", deprecatedName)) + } + v := &Counter{help: help} + // We want to publish the deprecated name for backward compatibility. + // At the same time we want the new metric to be visible on the `/debug/vars` page, so we publish the new name in expvar. + publish(deprecatedName, v) + expvar.Publish(name, v) + return v +} + // Add adds the provided value to the Counter. func (v *Counter) Add(delta int64) { if delta < 0 { @@ -136,6 +154,22 @@ func NewGauge(name string, help string) *Gauge { return v } +// NewGaugeWithDeprecatedName creates a new Gauge and publishes it if name is set that also has a deprecated name that can be removed in a future release. +// It is important to ensure that we only call this function with values for name and deprecatedName such that they match to the same metric name in snake case. +func NewGaugeWithDeprecatedName(name string, deprecatedName string, help string) *Gauge { + // Ensure that the snake case for the deprecated name and the new name are the same. + if deprecatedName == "" || GetSnakeName(name) != GetSnakeName(deprecatedName) { + panic(fmt.Sprintf("New name for deprecated metric doesn't have the same snake case - %v", deprecatedName)) + } + v := &Gauge{Counter: Counter{help: help}} + + // We want to publish the deprecated name for backward compatibility. + // At the same time we want the new metric to be visible on the `/debug/vars` page, so we publish the new name in expvar. + publish(deprecatedName, v) + expvar.Publish(name, v) + return v +} + // Set overwrites the current value. func (v *Gauge) Set(value int64) { v.Counter.i.Store(value) diff --git a/go/stats/counter_map.go b/go/stats/counter_map.go index 5ee7d19181e..a9af4495c60 100644 --- a/go/stats/counter_map.go +++ b/go/stats/counter_map.go @@ -25,7 +25,7 @@ var ( countersMu sync.RWMutex ) -// GetOrNewCounter returns a Counter with given name; the functiona either creates the counter +// GetOrNewCounter returns a Counter with given name; the function either creates the counter // if it does not exist, or returns a pre-existing one. The function is thread safe. func GetOrNewCounter(name string, help string) *Counter { // first, attempt read lock only diff --git a/go/stats/counter_test.go b/go/stats/counter_test.go index f290dc733d7..6a7b496dfab 100644 --- a/go/stats/counter_test.go +++ b/go/stats/counter_test.go @@ -18,9 +18,12 @@ package stats import ( "expvar" + "fmt" + "sync" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCounter(t *testing.T) { @@ -91,3 +94,95 @@ func TestGaugeFloat64(t *testing.T) { v.Reset() assert.Equal(t, float64(0), v.Get()) } + +func TestNewCounterWithDeprecatedName(t *testing.T) { + clearStats() + Register(func(name string, v expvar.Var) {}) + + testcases := []struct { + name string + deprecatedName string + shouldPanic bool + }{ + { + name: "new_name", + deprecatedName: "deprecatedName", + shouldPanic: true, + }, + { + name: "metricName_test", + deprecatedName: "metric.name-test", + shouldPanic: false, + }, + { + name: "MetricNameTesting", + deprecatedName: "metric.name.testing", + shouldPanic: false, + }, + } + + for _, testcase := range testcases { + t.Run(fmt.Sprintf("%v-%v", testcase.name, testcase.deprecatedName), func(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(1) + panicReceived := false + go func() { + defer func() { + if x := recover(); x != nil { + panicReceived = true + } + wg.Done() + }() + NewCounterWithDeprecatedName(testcase.name, testcase.deprecatedName, "help") + }() + wg.Wait() + require.EqualValues(t, testcase.shouldPanic, panicReceived) + }) + } +} + +func TestNewGaugeWithDeprecatedName(t *testing.T) { + clearStats() + Register(func(name string, v expvar.Var) {}) + + testcases := []struct { + name string + deprecatedName string + shouldPanic bool + }{ + { + name: "gauge_new_name", + deprecatedName: "gauge_deprecatedName", + shouldPanic: true, + }, + { + name: "gauge-metricName_test", + deprecatedName: "gauge_metric.name-test", + shouldPanic: false, + }, + { + name: "GaugeMetricNameTesting", + deprecatedName: "gauge.metric.name.testing", + shouldPanic: false, + }, + } + + for _, testcase := range testcases { + t.Run(fmt.Sprintf("%v-%v", testcase.name, testcase.deprecatedName), func(t *testing.T) { + wg := sync.WaitGroup{} + wg.Add(1) + panicReceived := false + go func() { + defer func() { + if x := recover(); x != nil { + panicReceived = true + } + wg.Done() + }() + NewGaugeWithDeprecatedName(testcase.name, testcase.deprecatedName, "help") + }() + wg.Wait() + require.EqualValues(t, testcase.shouldPanic, panicReceived) + }) + } +} diff --git a/go/stats/counters.go b/go/stats/counters.go index e79da39c48b..bcf7fc3a8b6 100644 --- a/go/stats/counters.go +++ b/go/stats/counters.go @@ -62,7 +62,7 @@ func (c *counters) set(name string, value int64) { func (c *counters) reset() { c.mu.Lock() defer c.mu.Unlock() - c.counts = make(map[string]int64) + clear(c.counts) } // ZeroAll zeroes out all values @@ -70,7 +70,9 @@ func (c *counters) ZeroAll() { c.mu.Lock() defer c.mu.Unlock() - clear(c.counts) + for k := range c.counts { + c.counts[k] = 0 + } } // Counts returns a copy of the Counters' map. diff --git a/go/stats/counters_test.go b/go/stats/counters_test.go index 22d6e769d3d..72eb11e1a10 100644 --- a/go/stats/counters_test.go +++ b/go/stats/counters_test.go @@ -18,7 +18,7 @@ package stats import ( "expvar" - "math/rand" + "math/rand/v2" "reflect" "sort" "strings" @@ -189,13 +189,11 @@ func BenchmarkCountersTailLatency(b *testing.B) { b.ResetTimer() b.SetParallelism(100) // The actual number of goroutines is 100*GOMAXPROCS b.RunParallel(func(pb *testing.PB) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - var start time.Time for pb.Next() { // sleep between 0~200ms to simulate 10 QPS per goroutine. - time.Sleep(time.Duration(r.Int63n(200)) * time.Millisecond) + time.Sleep(time.Duration(rand.Int64N(200)) * time.Millisecond) start = time.Now() benchCounter.Add("c1", 1) c <- time.Since(start) diff --git a/go/stats/histogram.go b/go/stats/histogram.go index 833c09b86bb..4a51098d606 100644 --- a/go/stats/histogram.go +++ b/go/stats/histogram.go @@ -74,6 +74,11 @@ func NewGenericHistogram(name, help string, cutoffs []int64, labels []string, co return h } +// Adds a hook that will be called every time a new value is added to the histogram +func (h *Histogram) AddHook(hook func(int64)) { + h.hook = hook +} + // Add adds a new measurement to the Histogram. func (h *Histogram) Add(value int64) { for i := range h.labels { diff --git a/go/stats/histogram_test.go b/go/stats/histogram_test.go index 1c7b05d8e9a..caa2a6ba722 100644 --- a/go/stats/histogram_test.go +++ b/go/stats/histogram_test.go @@ -19,6 +19,8 @@ package stats import ( "expvar" "testing" + + "github.com/stretchr/testify/assert" ) func TestHistogram(t *testing.T) { @@ -27,30 +29,28 @@ func TestHistogram(t *testing.T) { for i := 0; i < 10; i++ { h.Add(int64(i)) } - want := `{"1": 2, "5": 4, "inf": 4, "Count": 10, "Total": 45}` - if h.String() != want { - t.Errorf("got %v, want %v", h.String(), want) - } + + assert.Equal(t, h.String(), `{"1": 2, "5": 4, "inf": 4, "Count": 10, "Total": 45}`) + counts := h.Counts() counts["Count"] = h.Count() counts["Total"] = h.Total() - for k, want := range map[string]int64{ + for key, want := range map[string]int64{ "1": 2, "5": 4, "inf": 4, "Count": 10, "Total": 45, } { - if got := counts[k]; got != want { - t.Errorf("histogram counts [%v]: got %d, want %d", k, got, want) - } - } - if got, want := h.CountLabel(), "Count"; got != want { - t.Errorf("got %v, want %v", got, want) - } - if got, want := h.TotalLabel(), "Total"; got != want { - t.Errorf("got %v, want %v", got, want) + assert.Equal(t, counts[key], want) } + + assert.Equal(t, h.CountLabel(), "Count") + assert.Equal(t, h.TotalLabel(), "Total") + assert.Equal(t, h.Labels(), []string{"1", "5", "inf"}) + assert.Equal(t, h.Cutoffs(), []int64{1, 5}) + assert.Equal(t, h.Buckets(), []int64{2, 4, 4}) + assert.Equal(t, h.Help(), "help") } func TestGenericHistogram(t *testing.T) { @@ -63,27 +63,69 @@ func TestGenericHistogram(t *testing.T) { "count", "total", ) - want := `{"one": 0, "five": 0, "max": 0, "count": 0, "total": 0}` - if got := h.String(); got != want { - t.Errorf("got %v, want %v", got, want) - } + assert.Equal(t, h.String(), `{"one": 0, "five": 0, "max": 0, "count": 0, "total": 0}`) +} + +func TestInvalidGenericHistogram(t *testing.T) { + // Use a deferred function to capture the panic that the code should throw + defer func() { + r := recover() + assert.NotNil(t, r) + assert.Equal(t, r, "mismatched cutoff and label lengths") + }() + + clearStats() + NewGenericHistogram( + "histgen", + "help", + []int64{1, 5}, + []string{"one", "five"}, + "count", + "total", + ) } func TestHistogramHook(t *testing.T) { - var gotname string - var gotv *Histogram + // Check the results of Register hook function + var gotName string + var gotV *Histogram clearStats() Register(func(name string, v expvar.Var) { - gotname = name - gotv = v.(*Histogram) + gotName = name + gotV = v.(*Histogram) }) - name := "hist2" - v := NewHistogram(name, "help", []int64{1}) - if gotname != name { - t.Errorf("got %v; want %v", gotname, name) - } - if gotv != v { - t.Errorf("got %#v, want %#v", gotv, v) - } + v := NewHistogram("hist2", "help", []int64{1}) + + assert.Equal(t, gotName, "hist2") + assert.Equal(t, gotV, v) + + // Check the results of AddHook function + hookCalled := false + var addedValue int64 + + v.AddHook(func(value int64) { + hookCalled = true + addedValue = value + }) + + v.Add(42) + assert.Equal(t, hookCalled, true) + assert.Equal(t, addedValue, int64(42)) + + // Check the results of RegisterHistogramHook function + hookCalled = false + addedValue = 0 + gotName = "" + + RegisterHistogramHook(func(name string, value int64) { + hookCalled = true + gotName = name + addedValue = value + }) + + v.Add(10) + assert.Equal(t, gotName, "hist2") + assert.Equal(t, hookCalled, true) + assert.Equal(t, addedValue, int64(10)) } diff --git a/go/stats/hooks_test.go b/go/stats/hooks_test.go new file mode 100644 index 00000000000..72b6d1071c7 --- /dev/null +++ b/go/stats/hooks_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package stats + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStatsdHook(t *testing.T) { + t.Run("RegisterTimerHook", func(t *testing.T) { + defaultStatsdHook = statsdHook{} + + // Create a dummy timerHook function + dummyTimerHook := func(name, tags string, value int64, timings *Timings) { + assert.Equal(t, "dummyName", name) + assert.Equal(t, "dummyTags", tags) + assert.Equal(t, int64(42), value) + } + + // Register the dummy timerHook and then call the same + RegisterTimerHook(dummyTimerHook) + + assert.NotNil(t, defaultStatsdHook.timerHook) + assert.Nil(t, defaultStatsdHook.histogramHook) + defaultStatsdHook.timerHook("dummyName", "dummyTags", 42, nil) + }) + + t.Run("RegisterHistogramHook", func(t *testing.T) { + defaultStatsdHook = statsdHook{} + + // Create a dummy histogramHook function + dummyHistogramHook := func(name string, value int64) { + assert.Equal(t, "dummyName", name) + assert.Equal(t, int64(42), value) + } + + RegisterHistogramHook(dummyHistogramHook) + + assert.NotNil(t, defaultStatsdHook.histogramHook) + assert.Nil(t, defaultStatsdHook.timerHook) + defaultStatsdHook.histogramHook("dummyName", 42) + }) +} diff --git a/go/stats/opentsdb/backend_test.go b/go/stats/opentsdb/backend_test.go new file mode 100644 index 00000000000..c70b9ecb88b --- /dev/null +++ b/go/stats/opentsdb/backend_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/stats" +) + +type mockWriter struct { + data []*DataPoint +} + +func (mw *mockWriter) Write(data []*DataPoint) error { + mw.data = data + return nil +} + +func TestPushAll(t *testing.T) { + mw := &mockWriter{} + b := &backend{ + prefix: "testPrefix", + commonTags: map[string]string{"tag1": "value1"}, + writer: mw, + } + + err := b.PushAll() + assert.NoError(t, err) + before := len(mw.data) + + stats.NewGaugeFloat64("test_push_all1", "help") + stats.NewGaugeFloat64("test_push_all2", "help") + + err = b.PushAll() + assert.NoError(t, err) + after := len(mw.data) + + assert.Equalf(t, after-before, 2, "length of writer.data should have been increased by 2") +} + +func TestPushOne(t *testing.T) { + mw := &mockWriter{} + b := &backend{ + prefix: "testPrefix", + commonTags: map[string]string{"tag1": "value1"}, + writer: mw, + } + + s := stats.NewGaugeFloat64("test_push_one", "help") + err := b.PushOne("test_push_one", s) + assert.NoError(t, err) + + assert.Len(t, mw.data, 1) + assert.Equal(t, "testprefix.test_push_one", mw.data[0].Metric) +} diff --git a/go/stats/opentsdb/collector.go b/go/stats/opentsdb/collector.go index 9b870815067..d2c40432e7f 100644 --- a/go/stats/opentsdb/collector.go +++ b/go/stats/opentsdb/collector.go @@ -17,7 +17,6 @@ limitations under the License. package opentsdb import ( - "bytes" "encoding/json" "expvar" "strings" @@ -65,7 +64,7 @@ func (dc *collector) addFloat(metric string, val float64, tags map[string]string // Also make everything lowercase, since opentsdb is case sensitive and lowercase // simplifies the convention. sanitize := func(text string) string { - var b bytes.Buffer + var b strings.Builder for _, r := range text { if unicode.IsDigit(r) || unicode.IsLetter(r) || r == '-' || r == '_' || r == '/' || r == '.' { b.WriteRune(r) diff --git a/go/stats/opentsdb/datapoint_reader_test.go b/go/stats/opentsdb/datapoint_reader_test.go new file mode 100644 index 00000000000..43f99541fa3 --- /dev/null +++ b/go/stats/opentsdb/datapoint_reader_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRead(t *testing.T) { + invalidInputs := []string{ + "testMetric 0.100000 1.100000 key1=val1 key2=val2", + "InvalidMarshalText\n", + } + + for _, in := range invalidInputs { + mockReader := bytes.NewBufferString(in) + dpr := NewDataPointReader(mockReader) + dp, err := dpr.Read() + + assert.Error(t, err) + assert.Nil(t, dp) + } + + mockReader := bytes.NewBufferString("testMetric 0.100000 1.100000 key1=val1 key2=val2\n") + dpr := NewDataPointReader(mockReader) + dp, err := dpr.Read() + + assert.NoError(t, err) + + expectedDataPoint := DataPoint{ + Metric: "testMetric", + Timestamp: 0.1, + Value: 1.1, + Tags: map[string]string{ + "key1": "val1", + "key2": "val2", + }, + } + assert.Equal(t, expectedDataPoint, *dp) +} diff --git a/go/stats/opentsdb/datapoint_test.go b/go/stats/opentsdb/datapoint_test.go new file mode 100644 index 00000000000..4864c94745d --- /dev/null +++ b/go/stats/opentsdb/datapoint_test.go @@ -0,0 +1,68 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMarshalText(t *testing.T) { + dp := DataPoint{ + Metric: "testMetric", + Timestamp: 0.1, + Value: 1.1, + Tags: map[string]string{ + "key1": "val1", + }, + } + + str, err := dp.MarshalText() + assert.NoError(t, err) + assert.Equal(t, "testMetric 0.100000 1.100000 key1=val1\n", str) +} + +func TestUnmarshalTextToData(t *testing.T) { + dp := DataPoint{} + + invalidMarshalTestCases := []string{ + "InvalidMarshalText", + "testMetric invalidFloat invalidFloat", + "testMetric 0.100000 invalidFloat", + "testMetric 0.100000 1.100000 invalidKey:ValuePair", + } + + for _, text := range invalidMarshalTestCases { + err := unmarshalTextToData(&dp, []byte(text)) + assert.Error(t, err) + } + + err := unmarshalTextToData(&dp, []byte("testMetric 0.100000 1.100000 key1=val1 key2=val2")) + assert.NoError(t, err) + + expectedDataPoint := DataPoint{ + Metric: "testMetric", + Timestamp: 0.1, + Value: 1.1, + Tags: map[string]string{ + "key1": "val1", + "key2": "val2", + }, + } + assert.Equal(t, expectedDataPoint, dp) +} diff --git a/go/stats/opentsdb/file_writer_test.go b/go/stats/opentsdb/file_writer_test.go new file mode 100644 index 00000000000..8b7b52fb637 --- /dev/null +++ b/go/stats/opentsdb/file_writer_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFileWriter(t *testing.T) { + tempFile, err := os.CreateTemp("", "tempfile") + assert.NoError(t, err) + defer os.Remove(tempFile.Name()) + + w, err := newFileWriter(tempFile.Name()) + assert.NoError(t, err) + + dp := []*DataPoint{ + { + Metric: "testMetric", + Timestamp: 1.0, + Value: 2.0, + Tags: map[string]string{ + "key1": "value1", + }, + }, + } + + err = w.Write(dp) + assert.NoError(t, err) + + err = tempFile.Close() + assert.NoError(t, err) + + content, err := os.ReadFile(tempFile.Name()) + assert.NoError(t, err) + assert.Equal(t, "testMetric 1.000000 2.000000 key1=value1\n", string(content)) +} diff --git a/go/stats/opentsdb/flags_test.go b/go/stats/opentsdb/flags_test.go new file mode 100644 index 00000000000..ca9d63e37d9 --- /dev/null +++ b/go/stats/opentsdb/flags_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "testing" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestRegisterFlags(t *testing.T) { + oldOpenTSDBURI := openTSDBURI + defer func() { + openTSDBURI = oldOpenTSDBURI + }() + + fs := pflag.NewFlagSet("test", pflag.ExitOnError) + + registerFlags(fs) + + err := fs.Set("opentsdb_uri", "testURI") + assert.NoError(t, err) + assert.Equal(t, "testURI", openTSDBURI) +} diff --git a/go/stats/opentsdb/http_writer_test.go b/go/stats/opentsdb/http_writer_test.go new file mode 100644 index 00000000000..faba7b000d6 --- /dev/null +++ b/go/stats/opentsdb/http_writer_test.go @@ -0,0 +1,60 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opentsdb + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWrite(t *testing.T) { + sampleData := []*DataPoint{ + { + Metric: "testMetric", + Timestamp: 1.0, + Value: 2.0, + Tags: map[string]string{ + "tag1": "value1", + }, + }, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "application/json", r.Header.Get("Content-Type")) + + var receivedData []*DataPoint + err := json.NewDecoder(r.Body).Decode(&receivedData) + + assert.NoError(t, err) + assert.Len(t, receivedData, 1) + assert.Equal(t, sampleData[0], receivedData[0]) + + w.WriteHeader(http.StatusOK) + })) + + defer server.Close() + + client := &http.Client{} + hw := newHTTPWriter(client, server.URL) + + err := hw.Write(sampleData) + assert.NoError(t, err) +} diff --git a/go/stats/opentsdb/opentsdb_test.go b/go/stats/opentsdb/opentsdb_test.go index 940ee845ada..78db2616841 100644 --- a/go/stats/opentsdb/opentsdb_test.go +++ b/go/stats/opentsdb/opentsdb_test.go @@ -19,14 +19,36 @@ package opentsdb import ( "encoding/json" "expvar" - "reflect" "sort" "testing" "time" + "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/stats" ) +func TestFloatFunc(t *testing.T) { + name := "float_func_name" + f := stats.FloatFunc(func() float64 { + return 1.2 + }) + + stats.Publish(name, f) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.float_func_name", + "timestamp": 1234, + "value": 1.2, + "tags": { + "host": "localhost" + } + } + ]`) +} + func TestOpenTsdbCounter(t *testing.T) { name := "counter_name" c := stats.NewCounter(name, "counter description") @@ -83,6 +105,405 @@ func TestGaugesWithMultiLabels(t *testing.T) { ]`) } +func TestGaugesFuncWithMultiLabels(t *testing.T) { + name := "gauges_func_with_multi_labels_name" + stats.NewGaugesFuncWithMultiLabels(name, "help", []string{"flavor", "texture"}, func() map[string]int64 { + m := make(map[string]int64) + m["foo.bar"] = 1 + m["bar.baz"] = 2 + return m + }) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.gauges_func_with_multi_labels_name", + "timestamp": 1234, + "value": 2, + "tags": { + "flavor": "bar", + "host": "localhost", + "texture": "baz" + } + }, + { + "metric": "vtgate.gauges_func_with_multi_labels_name", + "timestamp": 1234, + "value": 1, + "tags": { + "flavor": "foo", + "host": "localhost", + "texture": "bar" + } + } + ]`) +} + +func TestGaugesWithSingleLabel(t *testing.T) { + name := "gauges_with_single_label_name" + s := stats.NewGaugesWithSingleLabel(name, "help", "label1") + s.Add("bar", 1) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.gauges_with_single_label_name", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "localhost", + "label1": "bar" + } + } + ]`) +} + +func TestCountersWithSingleLabel(t *testing.T) { + name := "counter_with_single_label_name" + s := stats.NewCountersWithSingleLabel(name, "help", "label", "tag1", "tag2") + s.Add("tag1", 2) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.counter_with_single_label_name", + "timestamp": 1234, + "value": 2, + "tags": { + "host": "localhost", + "label": "tag1" + } + }, + { + "metric": "vtgate.counter_with_single_label_name", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label": "tag2" + } + } + ]`) +} + +func TestCountersWithMultiLabels(t *testing.T) { + name := "counter_with_multiple_label_name" + s := stats.NewCountersWithMultiLabels(name, "help", []string{"label1", "label2"}) + s.Add([]string{"foo", "bar"}, 1) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.counter_with_multiple_label_name", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + } + ]`) +} + +func TestCountersFuncWithMultiLabels(t *testing.T) { + name := "counter_func_with_multiple_labels_name" + stats.NewCountersFuncWithMultiLabels(name, "help", []string{"label1", "label2"}, func() map[string]int64 { + m := make(map[string]int64) + m["foo.bar"] = 1 + m["bar.baz"] = 2 + return m + }) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.counter_func_with_multiple_labels_name", + "timestamp": 1234, + "value": 2, + "tags": { + "host": "localhost", + "label1": "bar", + "label2": "baz" + } + }, + { + "metric": "vtgate.counter_func_with_multiple_labels_name", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + } + ]`) +} + +func TestGaugeFloat64(t *testing.T) { + name := "gauge_float64_name" + s := stats.NewGaugeFloat64(name, "help") + s.Set(3.14) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.gauge_float64_name", + "timestamp": 1234, + "value": 3.14, + "tags": { + "host": "localhost" + } + } + ]`) +} + +func TestGaugeFunc(t *testing.T) { + name := "gauge_func_name" + stats.NewGaugeFunc(name, "help", func() int64 { + return 2 + }) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.gauge_func_name", + "timestamp": 1234, + "value": 2, + "tags": { + "host": "localhost" + } + } + ]`) +} + +func TestCounterDuration(t *testing.T) { + name := "counter_duration_name" + s := stats.NewCounterDuration(name, "help") + s.Add(1 * time.Millisecond) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.counter_duration_name", + "timestamp": 1234, + "value": 1000000, + "tags": { + "host": "localhost" + } + } + ]`) +} + +func TestCounterDurationFunc(t *testing.T) { + name := "counter_duration_func_name" + stats.NewCounterDurationFunc(name, "help", func() time.Duration { + return 1 * time.Millisecond + }) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.counter_duration_func_name", + "timestamp": 1234, + "value": 1000000, + "tags": { + "host": "localhost" + } + } + ]`) +} + +func TestMultiTimings(t *testing.T) { + name := "multi_timings_name" + s := stats.NewMultiTimings(name, "help", []string{"label1", "label2"}) + s.Add([]string{"foo", "bar"}, 1) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.multi_timings_name.1000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.10000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.100000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.1000000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.10000000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.500000", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.5000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.50000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.500000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.5000000000", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.count", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.inf", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + }, + { + "metric": "vtgate.multi_timings_name.time", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "localhost", + "label1": "foo", + "label2": "bar" + } + } + ]`) +} + +func TestHistogram(t *testing.T) { + name := "histogram_name" + s := stats.NewHistogram(name, "help", []int64{1, 2}) + s.Add(2) + + checkOutput(t, name, ` + [ + { + "metric": "vtgate.histogram_name.1", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost" + } + }, + { + "metric": "vtgate.histogram_name.2", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "localhost" + } + }, + { + "metric": "vtgate.histogram_name.count", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "localhost" + } + }, + { + "metric": "vtgate.histogram_name.inf", + "timestamp": 1234, + "value": 0, + "tags": { + "host": "localhost" + } + }, + { + "metric": "vtgate.histogram_name.total", + "timestamp": 1234, + "value": 2, + "tags": { + "host": "localhost" + } + } + ]`) +} + type myVar bool func (mv *myVar) String() string { @@ -351,6 +772,49 @@ func TestOpenTsdbTimings(t *testing.T) { ]`) } +func TestCounterForEmptyCollectorPrefix(t *testing.T) { + name := "counter_for_empty_collector_prefix_name" + c := stats.NewCounter(name, "counter description") + c.Add(1) + + expectedOutput := ` + [ + { + "metric": "counter_for_empty_collector_prefix_name", + "timestamp": 1234, + "value": 1, + "tags": { + "host": "test_localhost" + } + } + ]` + + dc := &collector{ + commonTags: map[string]string{"host": "test localhost"}, + prefix: "", + timestamp: int64(1234), + } + expvar.Do(func(kv expvar.KeyValue) { + if kv.Key == name { + dc.addExpVar(kv) + sort.Sort(byMetric(dc.data)) + + gotBytes, err := json.MarshalIndent(dc.data, "", " ") + assert.NoErrorf(t, err, "failed to marshal json") + + var got any + err = json.Unmarshal(gotBytes, &got) + assert.NoErrorf(t, err, "failed to unmarshal json") + + var want any + err = json.Unmarshal([]byte(expectedOutput), &want) + assert.NoErrorf(t, err, "failed to unmarshal json") + + assert.Equal(t, want, got) + } + }) +} + func checkOutput(t *testing.T, statName string, wantJSON string) { b := &backend{ prefix: "vtgate", @@ -372,30 +836,18 @@ func checkOutput(t *testing.T, statName string, wantJSON string) { sort.Sort(byMetric(dc.data)) gotBytes, err := json.MarshalIndent(dc.data, "", " ") - if err != nil { - t.Errorf("Failed to marshal json: %v", err) - return - } + assert.NoErrorf(t, err, "failed to marshal json") + var got any err = json.Unmarshal(gotBytes, &got) - if err != nil { - t.Errorf("Failed to marshal json: %v", err) - return - } + assert.NoErrorf(t, err, "failed to unmarshal json") var want any err = json.Unmarshal([]byte(wantJSON), &want) - if err != nil { - t.Errorf("Failed to marshal json: %v", err) - return - } + assert.NoErrorf(t, err, "failed to unmarshal json") - if !reflect.DeepEqual(got, want) { - t.Errorf("addExpVar(%#v) = %s, want %s", kv, string(gotBytes), wantJSON) - } + assert.Equal(t, want, got) } }) - if !found { - t.Errorf("Stat %s not found?...", statName) - } + assert.True(t, found, "stat %s not found", statName) } diff --git a/go/stats/ring_test.go b/go/stats/ring_test.go new file mode 100644 index 00000000000..d6f8ddb73af --- /dev/null +++ b/go/stats/ring_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stats + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRingInt64(t *testing.T) { + t.Run("Add Values", func(t *testing.T) { + ri := NewRingInt64(3) + ri.Add(1) + ri.Add(2) + ri.Add(3) + + assert.Equal(t, []int64{1, 2, 3}, ri.Values()) + + ri.Add(4) + ri.Add(5) + assert.Equal(t, []int64{3, 4, 5}, ri.Values()) + }) + + t.Run("Empty Ring", func(t *testing.T) { + ri := NewRingInt64(3) + assert.Empty(t, ri.Values()) + }) +} diff --git a/go/stats/snake_case_converter_test.go b/go/stats/snake_case_converter_test.go index 2552ade8df3..c8a3892020e 100644 --- a/go/stats/snake_case_converter_test.go +++ b/go/stats/snake_case_converter_test.go @@ -36,7 +36,7 @@ func TestToSnakeCase(t *testing.T) { } for _, tt := range snakeCaseTest { - if got, want := toSnakeCase(tt.input), tt.output; got != want { + if got, want := GetSnakeName(tt.input), tt.output; got != want { t.Errorf("want '%s', got '%s'", want, got) } } diff --git a/go/stats/statsd/statsd.go b/go/stats/statsd/statsd.go index f791d7b742d..099b8eea0f6 100644 --- a/go/stats/statsd/statsd.go +++ b/go/stats/statsd/statsd.go @@ -8,7 +8,7 @@ import ( "strings" "sync" - "github.com/DataDog/datadog-go/statsd" + "github.com/DataDog/datadog-go/v5/statsd" "github.com/spf13/pflag" "vitess.io/vitess/go/stats" @@ -81,15 +81,18 @@ func InitWithoutServenv(namespace string) { log.Info("statsdAddress is empty") return } - statsdC, err := statsd.NewBuffered(statsdAddress, 100) + opts := []statsd.Option{ + statsd.WithMaxMessagesPerPayload(100), + statsd.WithNamespace(namespace), + } + if tags := stats.ParseCommonTags(stats.CommonTags); len(tags) > 0 { + opts = append(opts, statsd.WithTags(makeCommonTags(tags))) + } + statsdC, err := statsd.New(statsdAddress, opts...) if err != nil { log.Errorf("Failed to create statsd client %v", err) return } - statsdC.Namespace = namespace + "." - if tags := stats.ParseCommonTags(stats.CommonTags); len(tags) > 0 { - statsdC.Tags = makeCommonTags(tags) - } sb.namespace = namespace sb.statsdClient = statsdC sb.sampleRate = statsdSampleRate diff --git a/go/stats/statsd/statsd_test.go b/go/stats/statsd/statsd_test.go index c615da3cdfd..feab8ae6759 100644 --- a/go/stats/statsd/statsd_test.go +++ b/go/stats/statsd/statsd_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/DataDog/datadog-go/statsd" + "github.com/DataDog/datadog-go/v5/statsd" "github.com/stretchr/testify/assert" "vitess.io/vitess/go/stats" @@ -19,8 +19,7 @@ func getBackend(t *testing.T) (StatsBackend, *net.UDPConn) { udpAddr, _ := net.ResolveUDPAddr("udp", addr) server, _ := net.ListenUDP("udp", udpAddr) bufferLength := 9 - client, _ := statsd.NewBuffered(addr, bufferLength) - client.Namespace = "test." + client, _ := statsd.New(addr, statsd.WithMaxMessagesPerPayload(bufferLength), statsd.WithNamespace("test")) var sb StatsBackend sb.namespace = "foo" sb.sampleRate = 1 diff --git a/go/streamlog/streamlog.go b/go/streamlog/streamlog.go index 26248fcd1b1..6d9f81f98d9 100644 --- a/go/streamlog/streamlog.go +++ b/go/streamlog/streamlog.go @@ -23,10 +23,8 @@ import ( "net/http" "net/url" "os" - "os/signal" "strings" "sync" - "syscall" "github.com/spf13/pflag" @@ -63,18 +61,10 @@ func SetRedactDebugUIQueries(newRedactDebugUIQueries bool) { redactDebugUIQueries = newRedactDebugUIQueries } -func GetQueryLogFilterTag() string { - return queryLogFilterTag -} - func SetQueryLogFilterTag(newQueryLogFilterTag string) { queryLogFilterTag = newQueryLogFilterTag } -func GetQueryLogRowThreshold() uint64 { - return queryLogRowThreshold -} - func SetQueryLogRowThreshold(newQueryLogRowThreshold uint64) { queryLogRowThreshold = newQueryLogRowThreshold } @@ -215,7 +205,7 @@ func (logger *StreamLogger[T]) ServeLogs(url string, logf LogFormatter) { // it. func (logger *StreamLogger[T]) LogToFile(path string, logf LogFormatter) (chan T, error) { rotateChan := make(chan os.Signal, 1) - signal.Notify(rotateChan, syscall.SIGUSR2) + setupRotate(rotateChan) logChan := logger.Subscribe("FileLog") formatParams := map[string][]string{"full": {}} diff --git a/go/streamlog/streamlog_flaky_test.go b/go/streamlog/streamlog_test.go similarity index 66% rename from go/streamlog/streamlog_flaky_test.go rename to go/streamlog/streamlog_test.go index 9c0b0366a1d..538cae99b54 100644 --- a/go/streamlog/streamlog_flaky_test.go +++ b/go/streamlog/streamlog_test.go @@ -18,6 +18,7 @@ package streamlog import ( "bufio" + "bytes" "fmt" "io" "net" @@ -29,6 +30,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/servenv" ) @@ -213,7 +216,7 @@ func TestFile(t *testing.T) { logger.Send(&logMessage{"test 2"}) // Allow time for propagation - time.Sleep(10 * time.Millisecond) + time.Sleep(100 * time.Millisecond) want := "test 1\ntest 2\n" contents, _ := os.ReadFile(logPath) @@ -227,7 +230,7 @@ func TestFile(t *testing.T) { os.Rename(logPath, rotatedPath) logger.Send(&logMessage{"test 3"}) - time.Sleep(10 * time.Millisecond) + time.Sleep(100 * time.Millisecond) want = "test 1\ntest 2\ntest 3\n" contents, _ = os.ReadFile(rotatedPath) @@ -241,10 +244,10 @@ func TestFile(t *testing.T) { if err := syscall.Kill(syscall.Getpid(), syscall.SIGUSR2); err != nil { t.Logf("failed to send streamlog rotate signal: %v", err) } - time.Sleep(10 * time.Millisecond) + time.Sleep(100 * time.Millisecond) logger.Send(&logMessage{"test 4"}) - time.Sleep(10 * time.Millisecond) + time.Sleep(100 * time.Millisecond) want = "test 1\ntest 2\ntest 3\n" contents, _ = os.ReadFile(rotatedPath) @@ -260,3 +263,122 @@ func TestFile(t *testing.T) { t.Errorf("streamlog file: want %q got %q", want, got) } } + +func TestShouldEmitLog(t *testing.T) { + origQueryLogFilterTag := queryLogFilterTag + origQueryLogRowThreshold := queryLogRowThreshold + defer func() { + SetQueryLogFilterTag(origQueryLogFilterTag) + SetQueryLogRowThreshold(origQueryLogRowThreshold) + }() + + tests := []struct { + sql string + qLogFilterTag string + qLogRowThreshold uint64 + rowsAffected uint64 + rowsReturned uint64 + ok bool + }{ + { + sql: "queryLogThreshold smaller than affected and returned", + qLogFilterTag: "", + qLogRowThreshold: 2, + rowsAffected: 7, + rowsReturned: 7, + ok: true, + }, + { + sql: "queryLogThreshold greater than affected and returned", + qLogFilterTag: "", + qLogRowThreshold: 27, + rowsAffected: 7, + rowsReturned: 17, + ok: false, + }, + { + sql: "this doesn't contains queryFilterTag: TAG", + qLogFilterTag: "special tag", + qLogRowThreshold: 10, + rowsAffected: 7, + rowsReturned: 17, + ok: false, + }, + { + sql: "this contains queryFilterTag: TAG", + qLogFilterTag: "TAG", + qLogRowThreshold: 0, + rowsAffected: 7, + rowsReturned: 17, + ok: true, + }, + } + + for _, tt := range tests { + t.Run(tt.sql, func(t *testing.T) { + SetQueryLogFilterTag(tt.qLogFilterTag) + SetQueryLogRowThreshold(tt.qLogRowThreshold) + + require.Equal(t, tt.ok, ShouldEmitLog(tt.sql, tt.rowsAffected, tt.rowsReturned)) + }) + } +} + +func TestGetFormatter(t *testing.T) { + tests := []struct { + name string + logger *StreamLogger[string] + params url.Values + val any + expectedErr string + expectedOutput string + }{ + { + name: "unexpected value error", + logger: &StreamLogger[string]{ + name: "test-logger", + }, + params: url.Values{ + "keys": []string{"key1", "key2"}, + }, + val: "temp val", + expectedOutput: "Error: unexpected value of type string in test-logger!", + expectedErr: "", + }, + { + name: "mock formatter", + logger: &StreamLogger[string]{ + name: "test-logger", + }, + params: url.Values{ + "keys": []string{"key1", "key2"}, + }, + val: &mockFormatter{err: fmt.Errorf("formatter error")}, + expectedErr: "formatter error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buffer bytes.Buffer + logFormatterFunc := GetFormatter[string](tt.logger) + err := logFormatterFunc(&buffer, tt.params, tt.val) + if tt.expectedErr == "" { + require.NoError(t, err) + require.Equal(t, tt.expectedOutput, buffer.String()) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) + } +} + +type mockFormatter struct { + called bool + err error +} + +func (mf *mockFormatter) Logf(w io.Writer, params url.Values) error { + mf.called = true + return mf.err +} diff --git a/go/streamlog/streamlog_unix.go b/go/streamlog/streamlog_unix.go new file mode 100644 index 00000000000..0cfa4b9e6bc --- /dev/null +++ b/go/streamlog/streamlog_unix.go @@ -0,0 +1,29 @@ +//go:build !windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package streamlog + +import ( + "os" + "os/signal" + "syscall" +) + +func setupRotate(ch chan os.Signal) { + signal.Notify(ch, syscall.SIGUSR2) +} diff --git a/go/vt/vtgate/planbuilder/filter.go b/go/streamlog/streamlog_windows.go similarity index 57% rename from go/vt/vtgate/planbuilder/filter.go rename to go/streamlog/streamlog_windows.go index c3686380446..ef69058b97c 100644 --- a/go/vt/vtgate/planbuilder/filter.go +++ b/go/streamlog/streamlog_windows.go @@ -1,5 +1,7 @@ +//go:build windows + /* -Copyright 2021 The Vitess Authors. +Copyright 2023 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,24 +16,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package planbuilder +package streamlog import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) + "os" -type ( - // filter is the logicalPlan for engine.Filter. - filter struct { - logicalPlanCommon - efilter *engine.Filter - } + "vitess.io/vitess/go/vt/log" ) -var _ logicalPlan = (*filter)(nil) - -// Primitive implements the logicalPlan interface -func (l *filter) Primitive() engine.Primitive { - l.efilter.Input = l.input.Primitive() - return l.efilter +func setupRotate(ch chan os.Signal) { + log.Warningf("signal based log rotation is not supported on Windows") } diff --git a/go/sync2/consolidator.go b/go/sync2/consolidator.go index 604d7fff35b..401daaef1f1 100644 --- a/go/sync2/consolidator.go +++ b/go/sync2/consolidator.go @@ -127,21 +127,19 @@ func (rs *pendingResult) Wait() { // It is also used by the txserializer package to count how often transactions // have been queued and had to wait because they targeted the same row (range). type ConsolidatorCache struct { - *cache.LRUCache + *cache.LRUCache[*ccount] } // NewConsolidatorCache creates a new cache with the given capacity. func NewConsolidatorCache(capacity int64) *ConsolidatorCache { - return &ConsolidatorCache{cache.NewLRUCache(capacity, func(_ any) int64 { - return 1 - })} + return &ConsolidatorCache{cache.NewLRUCache[*ccount](capacity)} } // Record increments the count for "query" by 1. // If it's not in the cache yet, it will be added. func (cc *ConsolidatorCache) Record(query string) { - if v, ok := cc.Get(query); ok { - v.(*ccount).add(1) + if c, ok := cc.Get(query); ok { + c.add(1) } else { c := ccount(1) cc.Set(query, &c) @@ -159,7 +157,7 @@ func (cc *ConsolidatorCache) Items() []ConsolidatorCacheItem { items := cc.LRUCache.Items() ret := make([]ConsolidatorCacheItem, len(items)) for i, v := range items { - ret[i] = ConsolidatorCacheItem{Query: v.Key, Count: v.Value.(*ccount).get()} + ret[i] = ConsolidatorCacheItem{Query: v.Key, Count: v.Value.get()} } return ret } diff --git a/go/syscallutil/kill_unix.go b/go/syscallutil/kill_unix.go new file mode 100644 index 00000000000..d0b1776ae3c --- /dev/null +++ b/go/syscallutil/kill_unix.go @@ -0,0 +1,27 @@ +//go:build !windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syscallutil + +import ( + "syscall" +) + +func Kill(pid int, signum syscall.Signal) (err error) { + return syscall.Kill(pid, signum) +} diff --git a/go/syscallutil/kill_windows.go b/go/syscallutil/kill_windows.go new file mode 100644 index 00000000000..091fcdf759d --- /dev/null +++ b/go/syscallutil/kill_windows.go @@ -0,0 +1,28 @@ +//go:build windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package syscallutil + +import ( + "errors" + "syscall" +) + +func Kill(pid int, signum syscall.Signal) (err error) { + return errors.New("kill is not supported on windows") +} diff --git a/go/tb/error_test.go b/go/tb/error_test.go new file mode 100644 index 00000000000..db257bc85b6 --- /dev/null +++ b/go/tb/error_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tb + +import ( + "bytes" + "fmt" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStackTrace(t *testing.T) { + testErr := "test err" + testStackTrace := "test stack trace" + testStackError := stackError{ + err: fmt.Errorf("%s", testErr), + stackTrace: testStackTrace, + } + + expectedErr := fmt.Sprintf("%s\n%s", testErr, testStackTrace) + assert.Equal(t, expectedErr, testStackError.Error()) + assert.Equal(t, testStackTrace, testStackError.StackTrace()) +} + +func TestStack(t *testing.T) { + // skip is set to 2 to check if the 3rd function in + // the go routine stack is called from this file + // 1st func is expected to be stack and 2nd to be Stack + b := Stack(2) + l := bytes.Split(b, []byte(":")) + + _, file, _, _ := runtime.Caller(0) + assert.Equal(t, string(l[0]), file) +} + +func TestFunction(t *testing.T) { + pc, _, _, _ := runtime.Caller(0) + name := function(pc) + + assert.Equal(t, "io/vitess/go/tb.TestFunction", string(name)) + + // invalid program counter + name = function(0) + assert.Equal(t, name, dunno) +} + +func TestErrorf(t *testing.T) { + s1 := stackError{ + err: fmt.Errorf("err1"), + stackTrace: "stackTrace1", + } + s2 := stackError{ + err: fmt.Errorf("err2"), + stackTrace: "stackTrace2", + } + err := Errorf("test msg %v %v", s1, s2) + + expectedMsg := fmt.Sprintf("test msg %v %v", s1, s2) + expectedErr := fmt.Sprintf("%v\n%v", expectedMsg, "stackTrace1") + assert.Equal(t, err.Error(), expectedErr) + + err = Errorf("test msg") + s := string(Stack(4)) + expectedErr = fmt.Sprintf("%v\n%v", "test msg", s) + assert.Equal(t, err.Error(), expectedErr) +} + +func TestSource(t *testing.T) { + lines := [][]byte{ + []byte("\ttest line 1\t"), + []byte("\ttest line 2\t"), + []byte("\ttest line 3\t"), + } + + assert.Equal(t, []byte("test line 1"), source(lines, 0)) + assert.Equal(t, []byte("test line 2"), source(lines, 1)) + assert.Equal(t, dunno, source(lines, -1)) + assert.Equal(t, dunno, source(lines, 3)) +} diff --git a/go/test/endtoend/backup/vtbackup/backup_only_test.go b/go/test/endtoend/backup/vtbackup/backup_only_test.go index 5e80d5d3cc3..7dada7a77d2 100644 --- a/go/test/endtoend/backup/vtbackup/backup_only_test.go +++ b/go/test/endtoend/backup/vtbackup/backup_only_test.go @@ -83,12 +83,12 @@ func TestTabletInitialBackup(t *testing.T) { restore(t, primary, "replica", "NOT_SERVING") // Vitess expects that the user has set the database into ReadWrite mode before calling // TabletExternallyReparented - err = localCluster.VtctlclientProcess.ExecuteCommand( - "SetReadWrite", primary.Alias) - require.Nil(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand( + err = localCluster.VtctldClientProcess.ExecuteCommand( + "SetWritable", primary.Alias, "true") + require.NoError(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand( "TabletExternallyReparented", primary.Alias) - require.Nil(t, err) + require.NoError(t, err) restore(t, replica1, "replica", "SERVING") // Run the entire backup test @@ -134,14 +134,14 @@ func firstBackupTest(t *testing.T, tabletType string) { // Store initial backup counts backups, err := listBackups(shardKsName) - require.Nil(t, err) + require.NoError(t, err) // insert data on primary, wait for replica to get it _, err = primary.VttabletProcess.QueryTablet(vtInsertTest, keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // Add a single row with value 'test1' to the primary tablet _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test1')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // Check that the specified tablet has the expected number of rows cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 1) @@ -158,7 +158,7 @@ func firstBackupTest(t *testing.T, tabletType string) { // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2) // even though we change the value of compression it won't affect @@ -167,8 +167,8 @@ func firstBackupTest(t *testing.T, tabletType string) { mysqlctl.CompressionEngineName = "lz4" defer func() { mysqlctl.CompressionEngineName = "pgzip" }() // now bring up the other replica, letting it restore from backup. - err = localCluster.VtctlclientProcess.InitTablet(replica2, cell, keyspaceName, hostname, shardName) - require.Nil(t, err) + err = localCluster.InitTablet(replica2, keyspaceName, shardName) + require.NoError(t, err) restore(t, replica2, "replica", "SERVING") // Replica2 takes time to serve. Sleeping for 5 sec. time.Sleep(5 * time.Second) @@ -181,7 +181,7 @@ func firstBackupTest(t *testing.T, tabletType string) { func vtBackup(t *testing.T, initialBackup bool, restartBeforeBackup, disableRedoLog bool) *opentsdb.DataPointReader { mysqlSocket, err := os.CreateTemp("", "vtbackup_test_mysql.sock") - require.Nil(t, err) + require.NoError(t, err) defer os.Remove(mysqlSocket.Name()) // Prepare opentsdb stats file path. @@ -214,7 +214,7 @@ func vtBackup(t *testing.T, initialBackup bool, restartBeforeBackup, disableRedo log.Infof("starting backup tablet %s", time.Now()) err = localCluster.StartVtbackup(newInitDBFile, initialBackup, keyspaceName, shardName, cell, extraArgs...) - require.Nil(t, err) + require.NoError(t, err) f, err := os.OpenFile(statsPath, os.O_RDONLY, 0) require.NoError(t, err) @@ -223,7 +223,7 @@ func vtBackup(t *testing.T, initialBackup bool, restartBeforeBackup, disableRedo func verifyBackupCount(t *testing.T, shardKsName string, expected int) []string { backups, err := listBackups(shardKsName) - require.Nil(t, err) + require.NoError(t, err) assert.Equalf(t, expected, len(backups), "invalid number of backups") return backups } @@ -251,7 +251,7 @@ func listBackups(shardKsName string) ([]string, error) { func removeBackups(t *testing.T) { // Remove all the backups from the shard backups, err := listBackups(shardKsName) - require.Nil(t, err) + require.NoError(t, err) for _, backup := range backups { _, err := localCluster.VtctlProcess.ExecuteCommandWithOutput( "--backup_storage_implementation", "file", @@ -259,26 +259,26 @@ func removeBackups(t *testing.T) { path.Join(os.Getenv("VTDATAROOT"), "tmp", "backupstorage"), "RemoveBackup", shardKsName, backup, ) - require.Nil(t, err) + require.NoError(t, err) } } func initTablets(t *testing.T, startTablet bool, initShardPrimary bool) { // Initialize tablets for _, tablet := range []cluster.Vttablet{*primary, *replica1} { - err := localCluster.VtctlclientProcess.InitTablet(&tablet, cell, keyspaceName, hostname, shardName) - require.Nil(t, err) + err := localCluster.InitTablet(&tablet, keyspaceName, shardName) + require.NoError(t, err) if startTablet { err = tablet.VttabletProcess.Setup() - require.Nil(t, err) + require.NoError(t, err) } } if initShardPrimary { // choose primary and start replication - err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + require.NoError(t, err) } } @@ -293,19 +293,20 @@ func restore(t *testing.T, tablet *cluster.Vttablet, tabletType string, waitForS tablet.VttabletProcess.ServingStatus = waitForState tablet.VttabletProcess.SupportsBackup = true err := tablet.VttabletProcess.Setup() - require.Nil(t, err) + require.NoError(t, err) } func resetTabletDirectory(t *testing.T, tablet cluster.Vttablet, initMysql bool) { extraArgs := []string{"--db-credentials-file", dbCredentialFile} tablet.MysqlctlProcess.ExtraArgs = extraArgs - // Shutdown Mysql - err := tablet.MysqlctlProcess.Stop() - require.Nil(t, err) // Teardown Tablet - err = tablet.VttabletProcess.TearDown() - require.Nil(t, err) + err := tablet.VttabletProcess.TearDown() + require.NoError(t, err) + + // Shutdown Mysql + err = tablet.MysqlctlProcess.Stop() + require.NoError(t, err) // Clear out the previous data tablet.MysqlctlProcess.CleanupFiles(tablet.TabletUID) @@ -314,7 +315,7 @@ func resetTabletDirectory(t *testing.T, tablet cluster.Vttablet, initMysql bool) // Init the Mysql tablet.MysqlctlProcess.InitDBFile = newInitDBFile err = tablet.MysqlctlProcess.Start() - require.Nil(t, err) + require.NoError(t, err) } } @@ -322,30 +323,40 @@ func tearDown(t *testing.T, initMysql bool) { // reset replication for _, db := range []string{"_vt", "vt_insert_test"} { _, err := primary.VttabletProcess.QueryTablet(fmt.Sprintf("drop database if exists %s", db), keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) } caughtUp := waitForReplicationToCatchup([]cluster.Vttablet{*replica1, *replica2}) require.True(t, caughtUp, "Timed out waiting for all replicas to catch up") - promoteCommands := "STOP SLAVE; RESET SLAVE ALL; RESET MASTER;" - disableSemiSyncCommands := "SET GLOBAL rpl_semi_sync_master_enabled = false; SET GLOBAL rpl_semi_sync_slave_enabled = false" + + promoteCommands := []string{"STOP REPLICA", "RESET REPLICA ALL"} + + disableSemiSyncCommandsSource := []string{"SET GLOBAL rpl_semi_sync_source_enabled = false", " SET GLOBAL rpl_semi_sync_replica_enabled = false"} + disableSemiSyncCommandsMaster := []string{"SET GLOBAL rpl_semi_sync_master_enabled = false", " SET GLOBAL rpl_semi_sync_slave_enabled = false"} + for _, tablet := range []cluster.Vttablet{*primary, *replica1, *replica2} { - _, err := tablet.VttabletProcess.QueryTablet(promoteCommands, keyspaceName, true) - require.Nil(t, err) - _, err = tablet.VttabletProcess.QueryTablet(disableSemiSyncCommands, keyspaceName, true) - require.Nil(t, err) + resetCmd, err := tablet.VttabletProcess.ResetBinaryLogsCommand() + require.NoError(t, err) + cmds := append(promoteCommands, resetCmd) + err = tablet.VttabletProcess.QueryTabletMultiple(cmds, keyspaceName, true) + require.NoError(t, err) + semisyncType, err := tablet.VttabletProcess.SemiSyncExtensionLoaded() + require.NoError(t, err) + + switch semisyncType { + case mysql.SemiSyncTypeSource: + err = tablet.VttabletProcess.QueryTabletMultiple(disableSemiSyncCommandsSource, keyspaceName, true) + require.NoError(t, err) + case mysql.SemiSyncTypeMaster: + err = tablet.VttabletProcess.QueryTabletMultiple(disableSemiSyncCommandsMaster, keyspaceName, true) + require.NoError(t, err) + } } - // TODO: Ideally we should not be resetting the mysql. - // So in below code we will have to uncomment the commented code and remove resetTabletDirectory for _, tablet := range []cluster.Vttablet{*primary, *replica1, *replica2} { - //Tear down Tablet - //err := tablet.VttabletProcess.TearDown() - //require.Nil(t, err) - resetTabletDirectory(t, tablet, initMysql) // DeleteTablet on a primary will cause tablet to shutdown, so should only call it after tablet is already shut down - err := localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) + require.NoError(t, err) } } @@ -364,7 +375,7 @@ func verifyDisableEnableRedoLogs(ctx context.Context, t *testing.T, mysqlSocket // Check if server supports disable/enable redo log. qr, err := conn.ExecuteFetch("SELECT 1 FROM performance_schema.global_status WHERE variable_name = 'innodb_redo_log_enabled'", 1, false) - require.Nil(t, err) + require.NoError(t, err) // If not, there's nothing to test. if len(qr.Rows) == 0 { return @@ -373,7 +384,7 @@ func verifyDisableEnableRedoLogs(ctx context.Context, t *testing.T, mysqlSocket // MY-013600 // https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html#error_er_ib_wrn_redo_disabled qr, err = conn.ExecuteFetch("SELECT 1 FROM performance_schema.error_log WHERE error_code = 'MY-013600'", 1, false) - require.Nil(t, err) + require.NoError(t, err) if len(qr.Rows) != 1 { // Keep trying, possible we haven't disabled yet. continue @@ -382,7 +393,7 @@ func verifyDisableEnableRedoLogs(ctx context.Context, t *testing.T, mysqlSocket // MY-013601 // https://dev.mysql.com/doc/mysql-errors/8.0/en/server-error-reference.html#error_er_ib_wrn_redo_enabled qr, err = conn.ExecuteFetch("SELECT 1 FROM performance_schema.error_log WHERE error_code = 'MY-013601'", 1, false) - require.Nil(t, err) + require.NoError(t, err) if len(qr.Rows) != 1 { // Keep trying, possible we haven't disabled yet. continue diff --git a/go/test/endtoend/backup/vtbackup/main_test.go b/go/test/endtoend/backup/vtbackup/main_test.go index 36bfae123d8..367956c9827 100644 --- a/go/test/endtoend/backup/vtbackup/main_test.go +++ b/go/test/endtoend/backup/vtbackup/main_test.go @@ -43,8 +43,6 @@ var ( shardKsName = fmt.Sprintf("%s/%s", keyspaceName, shardName) dbCredentialFile string commonTabletArg = []string{ - "--vreplication_healthcheck_topology_refresh", "1s", - "--vreplication_healthcheck_retry_delay", "1s", "--vreplication_retry_delay", "1s", "--degraded_threshold", "5s", "--lock_tables_timeout", "5s", diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index 1ca56db68c2..9227ce39516 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -21,6 +21,7 @@ import ( "context" "encoding/json" "fmt" + "math/rand" "os" "os/exec" "path" @@ -33,9 +34,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" @@ -74,8 +75,6 @@ var ( dbCredentialFile string shardName = "0" commonTabletArg = []string{ - "--vreplication_healthcheck_topology_refresh", "1s", - "--vreplication_healthcheck_retry_delay", "1s", "--vreplication_retry_delay", "1s", "--degraded_threshold", "5s", "--lock_tables_timeout", "5s", @@ -230,13 +229,13 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp replica2 = shard.Vttablets[2] replica3 = shard.Vttablets[3] - if err := localCluster.VtctlclientProcess.InitTablet(primary, cell, keyspaceName, hostname, shard.Name); err != nil { + if err := localCluster.InitTablet(primary, keyspaceName, shard.Name); err != nil { return 1, err } - if err := localCluster.VtctlclientProcess.InitTablet(replica1, cell, keyspaceName, hostname, shard.Name); err != nil { + if err := localCluster.InitTablet(replica1, keyspaceName, shard.Name); err != nil { return 1, err } - if err := localCluster.VtctlclientProcess.InitTablet(replica2, cell, keyspaceName, hostname, shard.Name); err != nil { + if err := localCluster.InitTablet(replica2, keyspaceName, shard.Name); err != nil { return 1, err } vtctldClientProcess := cluster.VtctldClientProcessInstance("localhost", localCluster.VtctldProcess.GrpcPort, localCluster.TmpDirectory) @@ -259,7 +258,7 @@ func LaunchCluster(setupType int, streamMode string, stripes int, cDetails *Comp return replica3, nil } - if err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { return 1, err } @@ -451,14 +450,14 @@ func primaryBackup(t *testing.T) { }() verifyInitialReplication(t) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("Backup", primary.Alias) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("Backup", primary.Alias) require.Error(t, err) assert.Contains(t, output, "type PRIMARY cannot take backup. if you really need to do this, rerun the backup command with --allow_primary") localCluster.VerifyBackupCount(t, shardKsName, 0) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", "--", "--allow_primary=true", primary.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) + require.NoError(t, err) // We'll restore this on the primary later to test restores using a backup timestamp firstBackupTimestamp := time.Now().UTC().Format(mysqlctl.BackupTimestampFormat) @@ -467,18 +466,18 @@ func primaryBackup(t *testing.T) { assert.Contains(t, backups[0], primary.Alias) _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) restoreWaitForBackup(t, "replica", nil, true) err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) - require.Nil(t, err) + require.NoError(t, err) // Verify that we have all the new data -- we should have 2 records now... // And only 1 record after we restore using the first backup timestamp cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", "--", "--allow_primary=true", primary.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", "--allow-primary", primary.Alias) + require.NoError(t, err) backups = localCluster.VerifyBackupCount(t, shardKsName, 2) assert.Contains(t, backups[1], primary.Alias) @@ -487,36 +486,35 @@ func primaryBackup(t *testing.T) { // Perform PRS to demote the primary tablet (primary) so that we can do a restore there and verify we don't have the // data from after the older/first backup - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) + require.NoError(t, err) // Delete the current primary tablet (replica2) so that the original primary tablet (primary) can be restored from the // older/first backup w/o it replicating the subsequent insert done after the first backup was taken - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary=true", replica2.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", replica2.Alias) + require.NoError(t, err) err = replica2.VttabletProcess.TearDown() - require.Nil(t, err) + require.NoError(t, err) // Restore the older/first backup -- using the timestamp we saved -- on the original primary tablet (primary) - err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", "--", "--backup_timestamp", firstBackupTimestamp, primary.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", "--backup-timestamp", firstBackupTimestamp, primary.Alias) + require.NoError(t, err) verifyTabletRestoreStats(t, primary.VttabletProcess.GetVars()) // Re-init the shard -- making the original primary tablet (primary) primary again -- for subsequent tests - err = localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + require.NoError(t, err) // Verify that we don't have the record created after the older/first backup cluster.VerifyRowsInTablet(t, primary, keyspaceName, 1) verifyAfterRemovingBackupNoBackupShouldBePresent(t, backups) - require.Nil(t, err) + require.NoError(t, err) _, err = primary.VttabletProcess.QueryTablet("DROP TABLE vt_insert_test", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) } // Test a primary and replica from the same backup. @@ -528,32 +526,31 @@ func primaryReplicaSameBackup(t *testing.T) { verifyInitialReplication(t) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) + require.NoError(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // now bring up the other replica, letting it restore from backup. restoreWaitForBackup(t, "replica", nil, true) err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) - require.Nil(t, err) + require.NoError(t, err) // check the new replica has the data cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) // Promote replica2 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) + require.NoError(t, err) // insert more data on replica2 (current primary) _, err = replica2.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // Force replica1 to restore from backup. verifyRestoreTablet(t, replica1, "SERVING") @@ -566,19 +563,19 @@ func primaryReplicaSameBackup(t *testing.T) { // It is written into the MANIFEST and read back from the MANIFEST. // // Take another backup on the replica. - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) + require.NoError(t, err) // Insert more data on replica2 (current primary). _, err = replica2.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test4')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // Force replica1 to restore from backup. verifyRestoreTablet(t, replica1, "SERVING") cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 4) err = replica2.VttabletProcess.TearDown() - require.Nil(t, err) + require.NoError(t, err) restartPrimaryAndReplica(t) } @@ -596,14 +593,14 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { time.Sleep(5 * time.Second) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) + require.NoError(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // now bring up the other replica, with change in compression engine // this is to verify that restore will read engine name from manifest instead of reading the new values @@ -615,20 +612,19 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { } restoreWaitForBackup(t, "replica", cDetails, false) err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, timeout) - require.Nil(t, err) + require.NoError(t, err) // check the new replica has the data cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) // Promote replica2 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica2.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica2.Alias, shardKsName) + require.NoError(t, err) // insert more data on replica2 (current primary) _, err = replica2.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // Force replica1 to restore from backup. verifyRestoreTablet(t, replica1, "SERVING") @@ -637,21 +633,20 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 3) // Promote replica1 to primary - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) + require.NoError(t, err) // Insert more data on replica1 (current primary). _, err = replica1.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test4')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // wait for replica2 to catch up. cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 4) // Now take replica2 backup with gzip (new compressor) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica2.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica2.Alias) + require.NoError(t, err) verifyTabletBackupStats(t, replica2.VttabletProcess.GetVars()) @@ -659,7 +654,7 @@ func primaryReplicaSameBackupModifiedCompressionEngine(t *testing.T) { verifyRestoreTablet(t, replica2, "SERVING") cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 4) err = replica2.VttabletProcess.TearDown() - require.Nil(t, err) + require.NoError(t, err) restartPrimaryAndReplica(t) } @@ -690,24 +685,23 @@ func testRestoreOldPrimary(t *testing.T, method restoreMethod) { time.Sleep(5 * time.Second) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) + require.NoError(t, err) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // reparent to replica1 - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) + require.NoError(t, err) // insert more data to new primary _, err = replica1.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // force the old primary to restore at the latest backup. method(t, primary) @@ -723,13 +717,13 @@ func testRestoreOldPrimary(t *testing.T, method restoreMethod) { func restoreUsingRestart(t *testing.T, tablet *cluster.Vttablet) { err := tablet.VttabletProcess.TearDown() - require.Nil(t, err) + require.NoError(t, err) verifyRestoreTablet(t, tablet, "SERVING") } func restoreInPlace(t *testing.T, tablet *cluster.Vttablet) { - err := localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", tablet.Alias) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", tablet.Alias) + require.NoError(t, err) } func restartPrimaryAndReplica(t *testing.T) { @@ -753,13 +747,13 @@ func restartPrimaryAndReplica(t *testing.T) { proc.Wait() } for _, tablet := range []*cluster.Vttablet{primary, replica1} { - err := localCluster.VtctlclientProcess.InitTablet(tablet, cell, keyspaceName, hostname, shardName) - require.Nil(t, err) + err := localCluster.InitTablet(tablet, keyspaceName, shardName) + require.NoError(t, err) err = tablet.VttabletProcess.Setup() - require.Nil(t, err) + require.NoError(t, err) } - err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primary.TabletUID) + require.NoError(t, err) } func stopAllTablets() { @@ -768,12 +762,12 @@ func stopAllTablets() { tablet.VttabletProcess.TearDown() if tablet.MysqlctldProcess.TabletUID > 0 { tablet.MysqlctldProcess.Stop() - localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) + localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) continue } proc, _ := tablet.MysqlctlProcess.StopProcess() mysqlProcs = append(mysqlProcs, proc) - localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", tablet.Alias) + localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", tablet.Alias) } for _, proc := range mysqlProcs { proc.Wait() @@ -798,33 +792,32 @@ func terminatedRestore(t *testing.T) { checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) // backup the replica - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) + require.NoError(t, err) checkTabletType(t, replica1.Alias, topodata.TabletType_REPLICA) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) // insert more data on the primary _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) // reparent to replica1 - err = localCluster.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", - "--keyspace_shard", shardKsName, - "--new_primary", replica1.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", + "--new-primary", replica1.Alias, shardKsName) + require.NoError(t, err) // insert more data to new primary _, err = replica1.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) terminateRestore(t) // If restore fails then the tablet type goes back to original type. checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) - err = localCluster.VtctlclientProcess.ExecuteCommand("RestoreFromBackup", primary.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("RestoreFromBackup", primary.Alias) + require.NoError(t, err) checkTabletType(t, primary.Alias, topodata.TabletType_REPLICA) _, err = os.Stat(path.Join(primary.VttabletProcess.Directory, "restore_in_progress")) @@ -842,7 +835,7 @@ func checkTabletType(t *testing.T, alias string, tabletType topodata.TabletType) // for loop for 15 seconds to check if tablet type is correct for i := 0; i < 15; i++ { output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetTablet", alias) - require.Nil(t, err) + require.NoError(t, err) var tabletPB topodata.Tablet err = json2.Unmarshal([]byte(output), &tabletPB) require.NoError(t, err) @@ -865,8 +858,8 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { checkTabletType(t, primary.Alias, topodata.TabletType_PRIMARY) // now backup - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) + require.NoError(t, err) }() // Perform a graceful reparent operation @@ -876,11 +869,12 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { checkTabletType(t, primary.Alias, topodata.TabletType_PRIMARY) // now reparent - _, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName), - "--new_primary", replica1.Alias) - require.Nil(t, err) + _, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", + "--new-primary", replica1.Alias, + fmt.Sprintf("%s/%s", keyspaceName, shardName), + ) + require.NoError(t, err) // check that we reparented checkTabletType(t, replica1.Alias, topodata.TabletType_PRIMARY) @@ -908,48 +902,48 @@ func doNotDemoteNewlyPromotedPrimaryIfReparentingDuringBackup(t *testing.T) { func vtctlBackup(t *testing.T, tabletType string) { // StopReplication on replica1. We verify that the replication works fine later in // verifyInitialReplication. So this will also check that VTOrc is running. - err := localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", replica1.Alias) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("StopReplication", replica1.Alias) + require.NoError(t, err) verifyInitialReplication(t) restoreWaitForBackup(t, tabletType, nil, true) - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) + require.NoError(t, err) backups := localCluster.VerifyBackupCount(t, shardKsName, 1) verifyTabletBackupStats(t, replica1.VttabletProcess.GetVars()) _, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) err = replica2.VttabletProcess.WaitForTabletStatusesForTimeout([]string{"SERVING"}, 25*time.Second) - require.Nil(t, err) + require.NoError(t, err) cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 2) verifyAfterRemovingBackupNoBackupShouldBePresent(t, backups) err = replica2.VttabletProcess.TearDown() - require.Nil(t, err) + require.NoError(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteTablet", replica2.Alias) - require.Nil(t, err) + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteTablets", replica2.Alias) + require.NoError(t, err) _, err = primary.VttabletProcess.QueryTablet("DROP TABLE vt_insert_test", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) } func InitTestTable(t *testing.T) { _, err := primary.VttabletProcess.QueryTablet("DROP TABLE IF EXISTS vt_insert_test", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) _, err = primary.VttabletProcess.QueryTablet(vtInsertTest, keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) } // This will create schema in primary, insert some data to primary and verify the same data in replica func verifyInitialReplication(t *testing.T) { InitTestTable(t) _, err := primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test1')", keyspaceName, true) - require.Nil(t, err) + require.NoError(t, err) cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 1) } @@ -974,12 +968,12 @@ func restoreWaitForBackup(t *testing.T, tabletType string, cDetails *Compression replica2.VttabletProcess.ExtraArgs = replicaTabletArgs replica2.VttabletProcess.ServingStatus = "" err := replica2.VttabletProcess.Setup() - require.Nil(t, err) + require.NoError(t, err) } func RemoveBackup(t *testing.T, backupName string) { - err := localCluster.VtctlclientProcess.ExecuteCommand("RemoveBackup", shardKsName, backupName) - require.Nil(t, err) + err := localCluster.VtctldClientProcess.ExecuteCommand("RemoveBackup", shardKsName, backupName) + require.NoError(t, err) } func verifyAfterRemovingBackupNoBackupShouldBePresent(t *testing.T, backups []string) { @@ -996,17 +990,17 @@ func verifyRestoreTablet(t *testing.T, tablet *cluster.Vttablet, status string) tablet.ValidateTabletRestart(t) tablet.VttabletProcess.ServingStatus = "" err := tablet.VttabletProcess.Setup() - require.Nil(t, err) + require.NoError(t, err) if status != "" { err = tablet.VttabletProcess.WaitForTabletStatusesForTimeout([]string{status}, 25*time.Second) - require.Nil(t, err) + require.NoError(t, err) } // We restart replication here because semi-sync will not be set correctly on tablet startup since // we deprecated enable_semi_sync. StartReplication RPC fixes the semi-sync settings by consulting the // durability policies set. - err = localCluster.VtctlclientProcess.ExecuteCommand("StopReplication", tablet.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("StopReplication", tablet.Alias) require.NoError(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("StartReplication", tablet.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("StartReplication", tablet.Alias) require.NoError(t, err) if tablet.Type == "replica" { @@ -1017,12 +1011,24 @@ func verifyRestoreTablet(t *testing.T, tablet *cluster.Vttablet, status string) } func verifySemiSyncStatus(t *testing.T, vttablet *cluster.Vttablet, expectedStatus string) { - status, err := vttablet.VttabletProcess.GetDBVar("rpl_semi_sync_slave_enabled", keyspaceName) - require.Nil(t, err) - assert.Equal(t, status, expectedStatus) - status, err = vttablet.VttabletProcess.GetDBStatus("rpl_semi_sync_slave_status", keyspaceName) - require.Nil(t, err) - assert.Equal(t, status, expectedStatus) + semisyncType, err := vttablet.VttabletProcess.SemiSyncExtensionLoaded() + require.NoError(t, err) + switch semisyncType { + case mysql.SemiSyncTypeSource: + status, err := vttablet.VttabletProcess.GetDBVar("rpl_semi_sync_replica_enabled", keyspaceName) + require.NoError(t, err) + assert.Equal(t, status, expectedStatus) + status, err = vttablet.VttabletProcess.GetDBStatus("rpl_semi_sync_replica_status", keyspaceName) + require.NoError(t, err) + assert.Equal(t, status, expectedStatus) + case mysql.SemiSyncTypeMaster: + status, err := vttablet.VttabletProcess.GetDBVar("rpl_semi_sync_slave_enabled", keyspaceName) + require.NoError(t, err) + assert.Equal(t, status, expectedStatus) + status, err = vttablet.VttabletProcess.GetDBStatus("rpl_semi_sync_slave_status", keyspaceName) + require.NoError(t, err) + assert.Equal(t, status, expectedStatus) + } } func terminateBackup(t *testing.T, alias string) { @@ -1035,15 +1041,15 @@ func terminateBackup(t *testing.T, alias string) { }() } - args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "Backup", "--", alias) + args := append([]string{"--server", localCluster.VtctldClientProcess.Server, "--alsologtostderr"}, "Backup", alias) tmpProcess := exec.Command( - "vtctlclient", + "vtctldclient", args..., ) - reader, _ := tmpProcess.StderrPipe() + reader, _ := tmpProcess.StdoutPipe() err := tmpProcess.Start() - require.Nil(t, err) + require.NoError(t, err) found := false scanner := bufio.NewScanner(reader) @@ -1069,15 +1075,15 @@ func terminateRestore(t *testing.T) { }() } - args := append([]string{"--server", localCluster.VtctlclientProcess.Server, "--alsologtostderr"}, "RestoreFromBackup", "--", primary.Alias) + args := append([]string{"--server", localCluster.VtctldClientProcess.Server, "--alsologtostderr"}, "RestoreFromBackup", primary.Alias) tmpProcess := exec.Command( - "vtctlclient", + "vtctldclient", args..., ) - reader, _ := tmpProcess.StderrPipe() + reader, _ := tmpProcess.StdoutPipe() err := tmpProcess.Start() - require.Nil(t, err) + require.NoError(t, err) found := false scanner := bufio.NewScanner(reader) @@ -1101,7 +1107,7 @@ func vtctlBackupReplicaNoDestroyNoWrites(t *testing.T, replicaIndex int) (backup numBackups := len(waitForNumBackups(t, -1)) err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica.Alias) - require.Nil(t, err) + require.NoError(t, err) backups = waitForNumBackups(t, numBackups+1) require.NotEmpty(t, backups) @@ -1257,13 +1263,10 @@ func waitForNumBackups(t *testing.T, expectNumBackups int) []string { } } -func testReplicaIncrementalBackup(t *testing.T, replica *cluster.Vttablet, incrementalFromPos replication.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { +func testReplicaIncrementalBackup(t *testing.T, replica *cluster.Vttablet, incrementalFromPos string, expectEmpty bool, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { numBackups := len(waitForNumBackups(t, -1)) - incrementalFromPosArg := "auto" - if !incrementalFromPos.IsZero() { - incrementalFromPosArg = replication.EncodePosition(incrementalFromPos) - } - output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("Backup", "--incremental-from-pos", incrementalFromPosArg, replica.Alias) + + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("Backup", "--incremental-from-pos", incrementalFromPos, replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) @@ -1271,24 +1274,30 @@ func testReplicaIncrementalBackup(t *testing.T, replica *cluster.Vttablet, incre } require.NoErrorf(t, err, "output: %v", output) + if expectEmpty { + require.Contains(t, output, mysqlctl.EmptyBackupMessage) + return nil, "" + } + backups := waitForNumBackups(t, numBackups+1) require.NotEmptyf(t, backups, "output: %v", output) verifyTabletBackupStats(t, replica.VttabletProcess.GetVars()) backupName = backups[len(backups)-1] + backupLocation := localCluster.CurrentVTDATAROOT + "/backups/" + shardKsName + "/" + backupName return readManifestFile(t, backupLocation), backupName } -func TestReplicaIncrementalBackup(t *testing.T, replicaIndex int, incrementalFromPos replication.Position, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { +func TestReplicaIncrementalBackup(t *testing.T, replicaIndex int, incrementalFromPos string, expectEmpty bool, expectError string) (manifest *mysqlctl.BackupManifest, backupName string) { replica := getReplica(t, replicaIndex) - return testReplicaIncrementalBackup(t, replica, incrementalFromPos, expectError) + return testReplicaIncrementalBackup(t, replica, incrementalFromPos, expectEmpty, expectError) } func TestReplicaFullRestore(t *testing.T, replicaIndex int, expectError string) { replica := getReplica(t, replicaIndex) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) @@ -1303,7 +1312,14 @@ func TestReplicaRestoreToPos(t *testing.T, replicaIndex int, restoreToPos replic require.False(t, restoreToPos.IsZero()) restoreToPosArg := replication.EncodePosition(restoreToPos) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--", "--restore_to_pos", restoreToPosArg, replica.Alias) + assert.Contains(t, restoreToPosArg, "MySQL56/") + if rand.Intn(2) == 0 { + // Verify that restore works whether or not the MySQL56/ prefix is present. + restoreToPosArg = strings.Replace(restoreToPosArg, "MySQL56/", "", 1) + assert.NotContains(t, restoreToPosArg, "MySQL56/") + } + + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("RestoreFromBackup", "--restore-to-pos", restoreToPosArg, replica.Alias) if expectError != "" { require.Errorf(t, err, "expected: %v", expectError) require.Contains(t, output, expectError) diff --git a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go index 8b9014e7f8c..7f611d81ad6 100644 --- a/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go +++ b/go/test/endtoend/backup/vtctlbackup/pitr_test_framework.go @@ -19,7 +19,8 @@ package vtctlbackup import ( "context" "fmt" - "math/rand" + "math/rand/v2" + "strings" "testing" "time" @@ -48,6 +49,14 @@ const ( operationFlushAndPurge ) +type incrementalFromPosType int + +const ( + incrementalFromPosPosition incrementalFromPosType = iota + incrementalFromPosAuto + incrementalFromPosBackupName +) + type PITRTestCase struct { Name string SetupType int @@ -106,6 +115,7 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) } var fullBackupPos replication.Position + var lastBackupName string t.Run("full backup", func(t *testing.T) { InsertRowOnPrimary(t, "before-full-backup") waitForReplica(t, 0) @@ -118,6 +128,8 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) pos := replication.EncodePosition(fullBackupPos) backupPositions = append(backupPositions, pos) rowsPerPosition[pos] = len(msgs) + + lastBackupName = manifest.BackupName }) lastBackupPos := fullBackupPos @@ -127,50 +139,63 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) name string writeBeforeBackup bool fromFullPosition bool - autoPosition bool + expectEmpty bool + incrementalFrom incrementalFromPosType expectError string }{ { - name: "first incremental backup", + name: "first incremental backup", + incrementalFrom: incrementalFromPosPosition, + }, + { + name: "empty1", + incrementalFrom: incrementalFromPosPosition, + expectEmpty: true, }, { - name: "fail1", - expectError: "no binary logs to backup", + name: "empty2", + incrementalFrom: incrementalFromPosAuto, + expectEmpty: true, }, { - name: "fail2", - expectError: "no binary logs to backup", + name: "empty3", + incrementalFrom: incrementalFromPosPosition, + expectEmpty: true, }, { name: "make writes, succeed", writeBeforeBackup: true, + incrementalFrom: incrementalFromPosPosition, }, { - name: "fail, no binary logs to backup", - expectError: "no binary logs to backup", + name: "empty again", + incrementalFrom: incrementalFromPosPosition, + expectEmpty: true, }, { name: "make writes again, succeed", writeBeforeBackup: true, + incrementalFrom: incrementalFromPosBackupName, }, { name: "auto position, succeed", writeBeforeBackup: true, - autoPosition: true, + incrementalFrom: incrementalFromPosAuto, }, { - name: "fail auto position, no binary logs to backup", - autoPosition: true, - expectError: "no binary logs to backup", + name: "empty again, based on auto position", + incrementalFrom: incrementalFromPosAuto, + expectEmpty: true, }, { name: "auto position, make writes again, succeed", writeBeforeBackup: true, - autoPosition: true, + incrementalFrom: incrementalFromPosAuto, }, { name: "from full backup position", fromFullPosition: true, + incrementalFrom: incrementalFromPosPosition, }, } var fromFullPositionBackups []string @@ -185,27 +210,46 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) // Also, we give the replica a chance to catch up. time.Sleep(postWriteSleepDuration) // randomly flush binary logs 0, 1 or 2 times - FlushBinaryLogsOnReplica(t, 0, rand.Intn(3)) + FlushBinaryLogsOnReplica(t, 0, rand.IntN(3)) waitForReplica(t, 0) recordRowsPerPosition(t) // configure --incremental-from-pos to either: // - auto // - explicit last backup pos // - back in history to the original full backup - var incrementalFromPos replication.Position - if !tc.autoPosition { - incrementalFromPos = lastBackupPos + var incrementalFromPos string + switch tc.incrementalFrom { + case incrementalFromPosAuto: + incrementalFromPos = mysqlctl.AutoIncrementalFromPos + case incrementalFromPosBackupName: + incrementalFromPos = lastBackupName + case incrementalFromPosPosition: + incrementalFromPos = replication.EncodePosition(lastBackupPos) if tc.fromFullPosition { - incrementalFromPos = fullBackupPos + incrementalFromPos = replication.EncodePosition(fullBackupPos) } + assert.Contains(t, incrementalFromPos, "MySQL56/") + } + incrementalFromPosArg := incrementalFromPos + if tc.incrementalFrom == incrementalFromPosPosition && tc.fromFullPosition { + // Verify that backup works whether or not the MySQL56/ prefix is present. + // We arbitrarily decide to strip the prefix when "tc.fromFullPosition" is true, and keep it when false. + incrementalFromPosArg = strings.Replace(incrementalFromPosArg, "MySQL56/", "", 1) + assert.NotContains(t, incrementalFromPosArg, "MySQL56/") } // always use same 1st replica - manifest, backupName := TestReplicaIncrementalBackup(t, 0, incrementalFromPos, tc.expectError) + manifest, backupName := TestReplicaIncrementalBackup(t, 0, incrementalFromPosArg, tc.expectEmpty, tc.expectError) if tc.expectError != "" { return } + if tc.expectEmpty { + assert.Nil(t, manifest) + return + } + require.NotNil(t, manifest) defer func() { lastBackupPos = manifest.Position + lastBackupName = manifest.BackupName }() if tc.fromFullPosition { fromFullPositionBackups = append(fromFullPositionBackups, backupName) @@ -219,8 +263,10 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) expectFromPosition := lastBackupPos.GTIDSet - if !incrementalFromPos.IsZero() { - expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) + if tc.incrementalFrom == incrementalFromPosPosition { + pos, err := replication.DecodePosition(incrementalFromPos) + assert.NoError(t, err) + expectFromPosition = pos.GTIDSet.Union(gtidPurgedPos.GTIDSet) } require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) }) @@ -234,6 +280,7 @@ func ExecTestIncrementalBackupAndRestoreToPos(t *testing.T, tcase *PITRTestCase) t.Run(testName, func(t *testing.T) { restoreToPos, err := replication.DecodePosition(pos) require.NoError(t, err) + require.False(t, restoreToPos.IsZero()) TestReplicaRestoreToPos(t, 0, restoreToPos, "") msgs := ReadRowsFromReplica(t, 0) count, ok := rowsPerPosition[pos] @@ -304,6 +351,7 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes testedBackups := []testedBackupTimestampInfo{} var fullBackupPos replication.Position + var lastBackupName string t.Run("full backup", func(t *testing.T) { insertRowOnPrimary(t, "before-full-backup") waitForReplica(t, 0) @@ -314,6 +362,8 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes // rows := ReadRowsFromReplica(t, 0) testedBackups = append(testedBackups, testedBackupTimestampInfo{len(rows), time.Now()}) + + lastBackupName = manifest.BackupName }) lastBackupPos := fullBackupPos @@ -323,50 +373,63 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes name string writeBeforeBackup bool fromFullPosition bool - autoPosition bool + expectEmpty bool + incrementalFrom incrementalFromPosType expectError string }{ { - name: "first incremental backup", + name: "first incremental backup", + incrementalFrom: incrementalFromPosPosition, }, { - name: "fail1", - expectError: "no binary logs to backup", + name: "empty1", + incrementalFrom: incrementalFromPosPosition, + expectEmpty: true, }, { - name: "fail2", - expectError: "no binary logs to backup", + name: "empty2", + incrementalFrom: incrementalFromPosAuto, + expectEmpty: true, + }, + { + name: "empty3", + incrementalFrom: incrementalFromPosPosition, + expectEmpty: true, }, { name: "make writes, succeed", writeBeforeBackup: true, + incrementalFrom: incrementalFromPosPosition, }, { - name: "fail, no binary logs to backup", - expectError: "no binary logs to backup", + name: "empty again", + incrementalFrom: incrementalFromPosPosition, + expectEmpty: true, }, { name: "make writes again, succeed", writeBeforeBackup: true, + incrementalFrom: incrementalFromPosBackupName, }, { name: "auto position, succeed", writeBeforeBackup: true, - autoPosition: true, + incrementalFrom: incrementalFromPosAuto, }, { - name: "fail auto position, no binary logs to backup", - autoPosition: true, - expectError: "no binary logs to backup", + name: "empty again, based on auto position", + incrementalFrom: incrementalFromPosAuto, + expectEmpty: true, }, { name: "auto position, make writes again, succeed", writeBeforeBackup: true, - autoPosition: true, + incrementalFrom: incrementalFromPosAuto, }, { name: "from full backup position", fromFullPosition: true, + incrementalFrom: incrementalFromPosPosition, }, } var fromFullPositionBackups []string @@ -386,18 +449,28 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes // - auto // - explicit last backup pos // - back in history to the original full backup - var incrementalFromPos replication.Position - if !tc.autoPosition { - incrementalFromPos = lastBackupPos + var incrementalFromPos string + switch tc.incrementalFrom { + case incrementalFromPosAuto: + incrementalFromPos = mysqlctl.AutoIncrementalFromPos + case incrementalFromPosBackupName: + incrementalFromPos = lastBackupName + case incrementalFromPosPosition: + incrementalFromPos = replication.EncodePosition(lastBackupPos) if tc.fromFullPosition { - incrementalFromPos = fullBackupPos + incrementalFromPos = replication.EncodePosition(fullBackupPos) } } - manifest, backupName := TestReplicaIncrementalBackup(t, 0, incrementalFromPos, tc.expectError) + manifest, backupName := TestReplicaIncrementalBackup(t, 0, incrementalFromPos, tc.expectEmpty, tc.expectError) if tc.expectError != "" { return } - // We wish to mark the current post-backup timestamp. We will later on retore to this point in time. + if tc.expectEmpty { + assert.Nil(t, manifest) + return + } + require.NotNil(t, manifest) + // We wish to mark the current post-backup timestamp. We will later on restore to this point in time. // However, the restore is up to and _exclusive_ of the timestamp. So for test's sake, we sleep // an extra few milliseconds just to ensure the timestamp we read is strictly after the backup time. // This is basicaly to avoid weird flakiness in CI. @@ -405,6 +478,7 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes testedBackups = append(testedBackups, testedBackupTimestampInfo{len(rowsBeforeBackup), time.Now()}) defer func() { lastBackupPos = manifest.Position + lastBackupName = manifest.BackupName }() if tc.fromFullPosition { fromFullPositionBackups = append(fromFullPositionBackups, backupName) @@ -434,8 +508,10 @@ func ExecTestIncrementalBackupAndRestoreToTimestamp(t *testing.T, tcase *PITRTes fromPositionIncludingPurged := manifest.FromPosition.GTIDSet.Union(gtidPurgedPos.GTIDSet) expectFromPosition := lastBackupPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) - if !incrementalFromPos.IsZero() { - expectFromPosition = incrementalFromPos.GTIDSet.Union(gtidPurgedPos.GTIDSet) + if tc.incrementalFrom == incrementalFromPosPosition { + pos, err := replication.DecodePosition(incrementalFromPos) + assert.NoError(t, err) + expectFromPosition = pos.GTIDSet.Union(gtidPurgedPos.GTIDSet) } require.Equalf(t, expectFromPosition, fromPositionIncludingPurged, "expected: %v, found: %v, gtid_purged: %v, manifest.Position: %v", expectFromPosition, fromPositionIncludingPurged, gtidPurgedPos, manifest.Position) }) @@ -663,11 +739,11 @@ func ExecTestIncrementalBackupOnTwoTablets(t *testing.T, tcase *PITRTestCase) { lastBackupPos = fullBackupPos case operationIncrementalBackup: - var incrementalFromPos replication.Position // keep zero, we will use "auto" - manifest, _ := TestReplicaIncrementalBackup(t, tc.replicaIndex, incrementalFromPos, tc.expectError) + manifest, _ := TestReplicaIncrementalBackup(t, tc.replicaIndex, "auto", false /* expectEmpty */, tc.expectError) if tc.expectError != "" { return } + require.NotNil(t, manifest) defer func() { lastBackupPos = manifest.Position }() diff --git a/go/test/endtoend/cellalias/cell_alias_test.go b/go/test/endtoend/cellalias/cell_alias_test.go index 9c2a29d2eb1..07e8d687f4e 100644 --- a/go/test/endtoend/cellalias/cell_alias_test.go +++ b/go/test/endtoend/cellalias/cell_alias_test.go @@ -53,8 +53,6 @@ var ( ) Engine=InnoDB ` commonTabletArg = []string{ - "--vreplication_healthcheck_topology_refresh", "1s", - "--vreplication_healthcheck_retry_delay", "1s", "--vreplication_retry_delay", "1s", "--degraded_threshold", "5s", "--lock_tables_timeout", "5s", @@ -188,14 +186,14 @@ func TestMain(m *testing.M) { return 1, err } } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { return 1, err } // run a health check on source replica so it responds to discovery // (for binlog players) and on the source rdonlys (for workers) for _, tablet := range []string{shard1Replica.Alias, shard1Rdonly.Alias} { - if err := localCluster.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { + if err := localCluster.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { return 1, err } } @@ -206,7 +204,7 @@ func TestMain(m *testing.M) { } } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { return 1, err } @@ -214,14 +212,14 @@ func TestMain(m *testing.M) { return 1, err } - if err := localCluster.VtctlclientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { + if err := localCluster.VtctldClientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { return 1, err } - if err := localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { + if err := localCluster.VtctldClientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { return 1, err } - _ = localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + _ = localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) return m.Run(), nil }() @@ -239,7 +237,7 @@ func TestAlias(t *testing.T) { insertInitialValues(t) defer deleteInitialValues(t) - err := localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + err := localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) require.NoError(t, err) shard1 := localCluster.Keyspaces[0].Shards[0] shard2 := localCluster.Keyspaces[0].Shards[1] @@ -253,11 +251,11 @@ func TestAlias(t *testing.T) { cluster.CheckSrvKeyspace(t, cell2, keyspaceName, expectedPartitions, *localCluster) // Adds alias so vtgate can route to replica/rdonly tablets that are not in the same cell, but same alias - err = localCluster.VtctlclientProcess.ExecuteCommand("AddCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("AddCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) - err = localCluster.VtctlclientProcess.ExecuteCommand("UpdateCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("UpdateCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) @@ -279,7 +277,7 @@ func TestAlias(t *testing.T) { testQueriesOnTabletType(t, "rdonly", vtgateInstance.GrpcPort, false) // now, delete the alias, so that if we run above assertions again, it will fail for replica,rdonly target type - err = localCluster.VtctlclientProcess.ExecuteCommand("DeleteCellsAlias", + err = localCluster.VtctldClientProcess.ExecuteCommand("DeleteCellsAlias", "region_east_coast") require.NoError(t, err) @@ -303,7 +301,7 @@ func TestAddAliasWhileVtgateUp(t *testing.T) { insertInitialValues(t) defer deleteInitialValues(t) - err := localCluster.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + err := localCluster.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) require.NoError(t, err) shard1 := localCluster.Keyspaces[0].Shards[0] shard2 := localCluster.Keyspaces[0].Shards[1] @@ -330,7 +328,7 @@ func TestAddAliasWhileVtgateUp(t *testing.T) { testQueriesOnTabletType(t, "rdonly", vtgateInstance.GrpcPort, true) // Adds alias so vtgate can route to replica/rdonly tablets that are not in the same cell, but same alias - err = localCluster.VtctlclientProcess.ExecuteCommand("AddCellsAlias", "--", + err = localCluster.VtctldClientProcess.ExecuteCommand("AddCellsAlias", "--cells", allCells, "region_east_coast") require.NoError(t, err) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index 28a8807cf08..0fc5edef1bb 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -22,7 +22,7 @@ import ( "flag" "fmt" "io" - "math/rand" + "math/rand/v2" "net" "os" "os/exec" @@ -38,9 +38,9 @@ import ( "time" "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/syscallutil" "vitess.io/vitess/go/test/endtoend/filelock" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/log" @@ -318,6 +318,44 @@ func (cluster *LocalProcessCluster) StartKeyspace(keyspace Keyspace, shardNames return nil } +// InitTablet initializes a tablet record in the topo server. It does not start the tablet process. +func (cluster *LocalProcessCluster) InitTablet(tablet *Vttablet, keyspace string, shard string) error { + tabletpb := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: tablet.Cell, + Uid: uint32(tablet.TabletUID), + }, + Hostname: cluster.Hostname, + Type: topodatapb.TabletType_REPLICA, + PortMap: map[string]int32{ + "vt": int32(tablet.HTTPPort), + }, + Keyspace: keyspace, + Shard: shard, + } + + switch tablet.Type { + case "rdonly": + tabletpb.Type = topodatapb.TabletType_RDONLY + case "primary": + tabletpb.Type = topodatapb.TabletType_PRIMARY + } + + if tablet.MySQLPort > 0 { + tabletpb.PortMap["mysql"] = int32(tablet.MySQLPort) + } + + if tablet.GrpcPort > 0 { + tabletpb.PortMap["grpc"] = int32(tablet.GrpcPort) + } + + allowPrimaryOverride := false + createShardAndKeyspace := true + allowUpdate := true + + return cluster.TopoProcess.Server.InitTablet(context.Background(), tabletpb, allowPrimaryOverride, createShardAndKeyspace, allowUpdate) +} + // StartKeyspace starts required number of shard and the corresponding tablets // keyspace : struct containing keyspace name, Sqlschema to apply, VSchema to apply // shardName : list of shard names @@ -420,7 +458,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames } // Make first tablet as primary - if err = cluster.VtctlclientProcess.InitializeShard(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { + if err = cluster.VtctldClientProcess.InitializeShard(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { log.Errorf("error running InitializeShard on keyspace %v, shard %v: %v", keyspace.Name, shardName, err) return } @@ -440,7 +478,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames // Apply Schema SQL if keyspace.SchemaSQL != "" { - if err = cluster.VtctlclientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { + if err = cluster.VtctldClientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { log.Errorf("error applying schema: %v, %v", keyspace.SchemaSQL, err) return } @@ -448,7 +486,7 @@ func (cluster *LocalProcessCluster) startKeyspace(keyspace Keyspace, shardNames // Apply VSchema if keyspace.VSchema != "" { - if err = cluster.VtctlclientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { + if err = cluster.VtctldClientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { log.Errorf("error applying vschema: %v, %v", keyspace.VSchema, err) return } @@ -580,7 +618,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard } // Make first tablet as primary - if err = cluster.VtctlclientProcess.InitShardPrimary(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { + if err = cluster.VtctldClientProcess.InitShardPrimary(keyspace.Name, shardName, cluster.Cell, shard.Vttablets[0].TabletUID); err != nil { log.Errorf("error running ISM on keyspace %v, shard %v: %v", keyspace.Name, shardName, err) return } @@ -600,7 +638,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // Apply Schema SQL if keyspace.SchemaSQL != "" { - if err = cluster.VtctlclientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { + if err = cluster.VtctldClientProcess.ApplySchema(keyspace.Name, keyspace.SchemaSQL); err != nil { log.Errorf("error applying schema: %v, %v", keyspace.SchemaSQL, err) return } @@ -608,7 +646,7 @@ func (cluster *LocalProcessCluster) StartKeyspaceLegacy(keyspace Keyspace, shard // Apply VSchema if keyspace.VSchema != "" { - if err = cluster.VtctlclientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { + if err = cluster.VtctldClientProcess.ApplyVSchema(keyspace.Name, keyspace.VSchema); err != nil { log.Errorf("error applying vschema: %v, %v", keyspace.VSchema, err) return } @@ -738,7 +776,6 @@ func NewBareCluster(cell string, hostname string) *LocalProcessCluster { _ = os.Setenv("VTDATAROOT", cluster.CurrentVTDATAROOT) log.Infof("Created cluster on %s. ReusingVTDATAROOT=%v", cluster.CurrentVTDATAROOT, cluster.ReusingVTDATAROOT) - rand.Seed(time.Now().UTC().UnixNano()) return cluster } @@ -764,19 +801,18 @@ func (cluster *LocalProcessCluster) populateVersionInfo() error { return err } +var versionRegex = regexp.MustCompile(`Version: ([0-9]+)\.([0-9]+)\.([0-9]+)`) + func GetMajorVersion(binaryName string) (int, error) { version, err := exec.Command(binaryName, "--version").Output() if err != nil { return 0, err } - versionRegex := regexp.MustCompile(`Version: ([0-9]+)\.([0-9]+)\.([0-9]+)`) v := versionRegex.FindStringSubmatch(string(version)) if len(v) != 4 { return 0, fmt.Errorf("could not parse server version from: %s", version) } - if err != nil { - return 0, fmt.Errorf("could not parse server version from: %s", version) - } + return strconv.Atoi(v[1]) } @@ -856,12 +892,12 @@ func (cluster *LocalProcessCluster) ExecOnTablet(ctx context.Context, vttablet * return nil, err } - tablet, err := cluster.VtctlclientGetTablet(vttablet) + tablet, err := cluster.VtctldClientProcess.GetTablet(vttablet.Alias) if err != nil { return nil, err } - conn, err := tabletconn.GetDialer()(tablet, grpcclient.FailFast(false)) + conn, err := tabletconn.GetDialer()(ctx, tablet, grpcclient.FailFast(false)) if err != nil { return nil, err } @@ -899,12 +935,12 @@ func (cluster *LocalProcessCluster) ExecOnVTGate(ctx context.Context, addr strin // returns the responses. It returns an error if the stream ends with fewer than // `count` responses. func (cluster *LocalProcessCluster) StreamTabletHealth(ctx context.Context, vttablet *Vttablet, count int) (responses []*querypb.StreamHealthResponse, err error) { - tablet, err := cluster.VtctlclientGetTablet(vttablet) + tablet, err := cluster.VtctldClientProcess.GetTablet(vttablet.Alias) if err != nil { return nil, err } - conn, err := tabletconn.GetDialer()(tablet, grpcclient.FailFast(false)) + conn, err := tabletconn.GetDialer()(ctx, tablet, grpcclient.FailFast(false)) if err != nil { return nil, err } @@ -934,12 +970,12 @@ func (cluster *LocalProcessCluster) StreamTabletHealth(ctx context.Context, vtta // StreamTabletHealthUntil invokes a HealthStream on a local cluster Vttablet and // returns the responses. It waits until a certain condition is met. The amount of time to wait is an input that it takes. func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context, vttablet *Vttablet, timeout time.Duration, condition func(shr *querypb.StreamHealthResponse) bool) error { - tablet, err := cluster.VtctlclientGetTablet(vttablet) + tablet, err := cluster.VtctldClientProcess.GetTablet(vttablet.Alias) if err != nil { return err } - conn, err := tabletconn.GetDialer()(tablet, grpcclient.FailFast(false)) + conn, err := tabletconn.GetDialer()(ctx, tablet, grpcclient.FailFast(false)) if err != nil { return err } @@ -971,25 +1007,6 @@ func (cluster *LocalProcessCluster) StreamTabletHealthUntil(ctx context.Context, return err } -func (cluster *LocalProcessCluster) VtctlclientGetTablet(tablet *Vttablet) (*topodatapb.Tablet, error) { - result, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", "--", tablet.Alias) - if err != nil { - return nil, err - } - - var ti topodatapb.Tablet - if err := json2.Unmarshal([]byte(result), &ti); err != nil { - return nil, err - } - - return &ti, nil -} - -func (cluster *LocalProcessCluster) VtctlclientChangeTabletType(tablet *Vttablet, tabletType topodatapb.TabletType) error { - _, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", "--", tablet.Alias, tabletType.String()) - return err -} - // Teardown brings down the cluster by invoking teardown for individual processes func (cluster *LocalProcessCluster) Teardown() { PanicHandler(nil) @@ -1091,7 +1108,7 @@ func (cluster *LocalProcessCluster) waitForMySQLProcessToExit(mysqlctlProcessLis log.Errorf("Error in conversion to integer: %v", err) return } - err = syscall.Kill(pid, syscall.SIGKILL) + err = syscallutil.Kill(pid, syscall.SIGKILL) if err != nil { log.Errorf("Error in killing process: %v", err) } @@ -1205,7 +1222,7 @@ func (cluster *LocalProcessCluster) GetAndReserveTabletUID() int { } func getRandomNumber(maxNumber int32, baseNumber int) int { - return int(rand.Int31n(maxNumber)) + baseNumber + return int(rand.Int32N(maxNumber)) + baseNumber } func getVtStartPort() int { diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index 3d442bbb576..061e632dde7 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -29,13 +29,13 @@ import ( "google.golang.org/grpc" "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" "github.com/buger/jsonparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/vtgate/vtgateconn" @@ -137,7 +137,7 @@ func PanicHandler(t testing.TB) { // ListBackups Lists back preset in shard func (cluster LocalProcessCluster) ListBackups(shardKsName string) ([]string, error) { - output, err := cluster.VtctlclientProcess.ExecuteCommandWithOutput("ListBackups", shardKsName) + output, err := cluster.VtctldClientProcess.ExecuteCommandWithOutput("GetBackups", shardKsName) if err != nil { return nil, err } @@ -164,7 +164,7 @@ func (cluster LocalProcessCluster) RemoveAllBackups(t *testing.T, shardKsName st backups, err := cluster.ListBackups(shardKsName) require.Nil(t, err) for _, backup := range backups { - cluster.VtctlclientProcess.ExecuteCommand("RemoveBackup", shardKsName, backup) + cluster.VtctldClientProcess.ExecuteCommand("RemoveBackup", shardKsName, backup) } } @@ -351,7 +351,6 @@ func GetPasswordUpdateSQL(localCluster *LocalProcessCluster) string { SET PASSWORD FOR 'vt_repl'@'%' = 'VtReplPass'; SET PASSWORD FOR 'vt_filtered'@'localhost' = 'VtFilteredPass'; SET PASSWORD FOR 'vt_appdebug'@'localhost' = 'VtDebugPass'; - FLUSH PRIVILEGES; ` return pwdChangeCmd } @@ -359,7 +358,11 @@ func GetPasswordUpdateSQL(localCluster *LocalProcessCluster) string { // CheckSrvKeyspace confirms that the cell and keyspace contain the expected // shard mappings. func CheckSrvKeyspace(t *testing.T, cell string, ksname string, expectedPartition map[topodatapb.TabletType][]string, ci LocalProcessCluster) { - srvKeyspace := GetSrvKeyspace(t, cell, ksname, ci) + srvKeyspaces, err := ci.VtctldClientProcess.GetSrvKeyspaces(ksname, cell) + require.NoError(t, err) + + srvKeyspace := srvKeyspaces[cell] + require.NotNil(t, srvKeyspace, "srvKeyspace is nil for %s", cell) currentPartition := map[topodatapb.TabletType][]string{} @@ -373,17 +376,6 @@ func CheckSrvKeyspace(t *testing.T, cell string, ksname string, expectedPartitio assert.True(t, reflect.DeepEqual(currentPartition, expectedPartition)) } -// GetSrvKeyspace returns the SrvKeyspace structure for the cell and keyspace. -func GetSrvKeyspace(t *testing.T, cell string, ksname string, ci LocalProcessCluster) *topodatapb.SrvKeyspace { - output, err := ci.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, ksname) - require.Nil(t, err) - var srvKeyspace topodatapb.SrvKeyspace - - err = json2.Unmarshal([]byte(output), &srvKeyspace) - require.Nil(t, err) - return &srvKeyspace -} - // ExecuteOnTablet executes a query on the specified vttablet. // It should always be called with a primary tablet for a keyspace/shard. func ExecuteOnTablet(t *testing.T, query string, vttablet Vttablet, ks string, expectFail bool) { @@ -498,3 +490,47 @@ func DialVTGate(ctx context.Context, name, addr, username, password string) (*vt vtgateconn.RegisterDialer(dialerName, dialerFunc) return vtgateconn.DialProtocol(ctx, dialerName, addr) } + +// PrintFiles prints the files that are asked for. If no file is specified, all the files are printed. +func PrintFiles(t *testing.T, dir string, files ...string) { + var directories []string + directories = append(directories, dir) + + // Go over the remaining directories to check + for len(directories) > 0 { + // Get one of the directories, and read its contents. + dir = directories[0] + directories = directories[1:] + entries, err := os.ReadDir(dir) + if err != nil { + log.Errorf("Couldn't read directory - %v", dir) + continue + } + for _, entry := range entries { + name := path.Join(dir, entry.Name()) + // For a directory, we add it to our list of directories to check. + if entry.IsDir() { + directories = append(directories, name) + continue + } + // Check if this file should be printed or not. + if len(files) != 0 { + fileFound := false + for _, file := range files { + if strings.EqualFold(entry.Name(), file) { + fileFound = true + break + } + } + if !fileFound { + continue + } + } + // Read and print the file. + res, err := os.ReadFile(name) + require.NoError(t, err) + log.Errorf("READING FILE - %v", name) + log.Errorf("%v", string(res)) + } + } +} diff --git a/go/test/endtoend/cluster/mysqlctl_process.go b/go/test/endtoend/cluster/mysqlctl_process.go index 06808627254..cfc4fc28088 100644 --- a/go/test/endtoend/cluster/mysqlctl_process.go +++ b/go/test/endtoend/cluster/mysqlctl_process.go @@ -30,6 +30,7 @@ import ( "github.com/google/safehtml/template" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/syscallutil" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/tlstest" @@ -215,11 +216,11 @@ func (mysqlctl *MysqlctlProcess) Stop() (err error) { // If we found a valid associated mysqld_safe process then let's kill // it first. if err == nil && mysqldSafePID > 0 { - if err = syscall.Kill(mysqldSafePID, syscall.SIGKILL); err != nil { + if err = syscallutil.Kill(mysqldSafePID, syscall.SIGKILL); err != nil { return err } } - return syscall.Kill(pid, syscall.SIGKILL) + return syscallutil.Kill(pid, syscall.SIGKILL) } // StopProcess executes mysqlctl command to stop mysql instance and returns process reference @@ -252,16 +253,6 @@ func (mysqlctl *MysqlctlProcess) CleanupFiles(tabletUID int) { os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", tabletUID))) } -// Connect returns a new connection to the underlying MySQL server -func (mysqlctl *MysqlctlProcess) Connect(ctx context.Context, username string) (*mysql.Conn, error) { - params := mysql.ConnParams{ - Uname: username, - UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", mysqlctl.TabletUID), "/mysql.sock"), - } - - return mysql.Connect(ctx, ¶ms) -} - // MysqlCtlProcessInstanceOptionalInit returns a Mysqlctl handle for mysqlctl process // configured with the given Config. func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirectory string, initMySQL bool) (*MysqlctlProcess, error) { diff --git a/go/test/endtoend/cluster/mysqlctld_process.go b/go/test/endtoend/cluster/mysqlctld_process.go index 60d30bc6cc0..08409c1246d 100644 --- a/go/test/endtoend/cluster/mysqlctld_process.go +++ b/go/test/endtoend/cluster/mysqlctld_process.go @@ -92,7 +92,15 @@ func (mysqlctld *MysqlctldProcess) Start() error { "--init_db_sql_file", mysqlctld.InitDBFile) } - errFile, _ := os.Create(path.Join(mysqlctld.LogDirectory, "mysqlctld-stderr.txt")) + err := os.MkdirAll(mysqlctld.LogDirectory, 0755) + if err != nil { + log.Errorf("Failed to create directory for mysqlctld logs: %v", err) + return err + } + errFile, err := os.Create(path.Join(mysqlctld.LogDirectory, "mysqlctld-stderr.txt")) + if err != nil { + log.Errorf("Failed to create directory for mysqlctld stderr: %v", err) + } tempProcess.Stderr = errFile tempProcess.Env = append(tempProcess.Env, os.Environ()...) @@ -103,7 +111,7 @@ func (mysqlctld *MysqlctldProcess) Start() error { log.Infof("%v", strings.Join(tempProcess.Args, " ")) - err := tempProcess.Start() + err = tempProcess.Start() if err != nil { return err } diff --git a/go/test/endtoend/cluster/topo_process.go b/go/test/endtoend/cluster/topo_process.go index 45a2e6586fa..d5d5c8482a0 100644 --- a/go/test/endtoend/cluster/topo_process.go +++ b/go/test/endtoend/cluster/topo_process.go @@ -33,6 +33,11 @@ import ( "vitess.io/vitess/go/vt/log" vtopo "vitess.io/vitess/go/vt/topo" + + // Register topo server implementations + _ "vitess.io/vitess/go/vt/topo/consultopo" + _ "vitess.io/vitess/go/vt/topo/etcd2topo" + _ "vitess.io/vitess/go/vt/topo/zk2topo" ) // TopoProcess is a generic handle for a running Topo service . @@ -51,6 +56,7 @@ type TopoProcess struct { PeerURL string ZKPorts string Client interface{} + Server *vtopo.Server proc *exec.Cmd exit chan error @@ -60,15 +66,22 @@ type TopoProcess struct { func (topo *TopoProcess) Setup(topoFlavor string, cluster *LocalProcessCluster) (err error) { switch topoFlavor { case "zk2": - return topo.SetupZookeeper(cluster) + err = topo.SetupZookeeper(cluster) case "consul": - return topo.SetupConsul(cluster) + err = topo.SetupConsul(cluster) default: // Override any inherited ETCDCTL_API env value to // ensure that we use the v3 API and storage. os.Setenv("ETCDCTL_API", "3") - return topo.SetupEtcd() + err = topo.SetupEtcd() + } + + if err != nil { + return } + + topo.Server, err = vtopo.OpenServer(topoFlavor, net.JoinHostPort(topo.Host, fmt.Sprintf("%d", topo.Port)), TopoGlobalRoot(topoFlavor)) + return } // SetupEtcd spawns a new etcd service and initializes it with the defaults. @@ -145,10 +158,10 @@ func (topo *TopoProcess) SetupEtcd() (err error) { // SetupZookeeper spawns a new zookeeper topo service and initializes it with the defaults. // The service is kept running in the background until TearDown() is called. -func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) (err error) { +func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) error { host, err := os.Hostname() if err != nil { - return + return err } topo.ZKPorts = fmt.Sprintf("%d:%d:%d", cluster.GetAndReservePort(), cluster.GetAndReservePort(), topo.Port) @@ -160,16 +173,21 @@ func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) (err error "init", ) - errFile, _ := os.Create(path.Join(topo.DataDirectory, "topo-stderr.txt")) + err = os.MkdirAll(topo.LogDirectory, 0755) + if err != nil { + log.Errorf("Failed to create log directory for zookeeper: %v", err) + return err + } + errFile, err := os.Create(path.Join(topo.LogDirectory, "topo-stderr.txt")) + if err != nil { + log.Errorf("Failed to create file for zookeeper stderr: %v", err) + return err + } topo.proc.Stderr = errFile topo.proc.Env = append(topo.proc.Env, os.Environ()...) log.Infof("Starting zookeeper with args %v", strings.Join(topo.proc.Args, " ")) - err = topo.proc.Run() - if err != nil { - return - } - return + return topo.proc.Run() } // ConsulConfigs are the configurations that are added the config files which are used by consul @@ -193,13 +211,25 @@ type PortsInfo struct { func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { topo.VerifyURL = fmt.Sprintf("http://%s:%d/v1/kv/?keys", topo.Host, topo.Port) - _ = os.MkdirAll(topo.LogDirectory, os.ModePerm) - _ = os.MkdirAll(topo.DataDirectory, os.ModePerm) + err = os.MkdirAll(topo.LogDirectory, os.ModePerm) + if err != nil { + log.Errorf("Failed to create directory for consul logs: %v", err) + return + } + err = os.MkdirAll(topo.DataDirectory, os.ModePerm) + if err != nil { + log.Errorf("Failed to create directory for consul data: %v", err) + return + } configFile := path.Join(os.Getenv("VTDATAROOT"), "consul.json") logFile := path.Join(topo.LogDirectory, "/consul.log") - _, _ = os.Create(logFile) + _, err = os.Create(logFile) + if err != nil { + log.Errorf("Failed to create file for consul logs: %v", err) + return + } var config []byte configs := ConsulConfigs{ @@ -233,7 +263,11 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { "-config-file", configFile, ) - errFile, _ := os.Create(path.Join(topo.DataDirectory, "topo-stderr.txt")) + errFile, err := os.Create(path.Join(topo.LogDirectory, "topo-stderr.txt")) + if err != nil { + log.Errorf("Failed to create file for consul stderr: %v", err) + return + } topo.proc.Stderr = errFile topo.proc.Env = append(topo.proc.Env, os.Environ()...) @@ -268,6 +302,11 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { // TearDown shutdowns the running topo service. func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoot string, keepdata bool, topoFlavor string) error { + if topo.Server != nil { + topo.Server.Close() + topo.Server = nil + } + if topo.Client != nil { switch cli := topo.Client.(type) { case *clientv3.Client: @@ -416,3 +455,13 @@ func TopoProcessInstance(port int, peerPort int, hostname string, flavor string, topo.PeerURL = fmt.Sprintf("http://%s:%d", hostname, peerPort) return topo } + +// TopoGlobalRoot returns the global root for the given topo flavor. +func TopoGlobalRoot(flavor string) string { + switch flavor { + case "consul": + return "global" + default: + return "/vitess/global" + } +} diff --git a/go/test/endtoend/cluster/vtctl_process.go b/go/test/endtoend/cluster/vtctl_process.go index 9b3d1a5f4e1..b9d8a5b46ce 100644 --- a/go/test/endtoend/cluster/vtctl_process.go +++ b/go/test/endtoend/cluster/vtctl_process.go @@ -118,7 +118,6 @@ func VtctlProcessInstance(topoPort int, hostname string) *VtctlProcess { // Default values for etcd2 topo server. topoImplementation := "etcd2" - topoGlobalRoot := "/vitess/global" topoRootPath := "/" // Checking and resetting the parameters for required topo server. @@ -127,7 +126,6 @@ func VtctlProcessInstance(topoPort int, hostname string) *VtctlProcess { topoImplementation = "zk2" case "consul": topoImplementation = "consul" - topoGlobalRoot = "global" // For consul we do not need "/" in the path topoRootPath = "" } @@ -142,7 +140,7 @@ func VtctlProcessInstance(topoPort int, hostname string) *VtctlProcess { Binary: "vtctl", TopoImplementation: topoImplementation, TopoGlobalAddress: fmt.Sprintf("%s:%d", hostname, topoPort), - TopoGlobalRoot: topoGlobalRoot, + TopoGlobalRoot: TopoGlobalRoot(*topoFlavor), TopoServerAddress: fmt.Sprintf("%s:%d", hostname, topoPort), TopoRootPath: topoRootPath, VtctlMajorVersion: version, diff --git a/go/test/endtoend/cluster/vtctld_process.go b/go/test/endtoend/cluster/vtctld_process.go index d0b2e5ab93e..d87427af9b9 100644 --- a/go/test/endtoend/cluster/vtctld_process.go +++ b/go/test/endtoend/cluster/vtctld_process.go @@ -79,7 +79,16 @@ func (vtctld *VtctldProcess) Setup(cell string, extraArgs ...string) (err error) } vtctld.proc.Args = append(vtctld.proc.Args, extraArgs...) - errFile, _ := os.Create(path.Join(vtctld.LogDir, "vtctld-stderr.txt")) + err = os.MkdirAll(vtctld.LogDir, 0755) + if err != nil { + log.Errorf("cannot create log directory for vtctld: %v", err) + return err + } + errFile, err := os.Create(path.Join(vtctld.LogDir, "vtctld-stderr.txt")) + if err != nil { + log.Errorf("cannot create error log file for vtctld: %v", err) + return err + } vtctld.proc.Stderr = errFile vtctld.ErrorLog = errFile.Name() diff --git a/go/test/endtoend/cluster/vtctldclient_process.go b/go/test/endtoend/cluster/vtctldclient_process.go index 52e0f985680..4ed5acde518 100644 --- a/go/test/endtoend/cluster/vtctldclient_process.go +++ b/go/test/endtoend/cluster/vtctldclient_process.go @@ -22,7 +22,14 @@ import ( "strings" "time" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vterrors" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) // VtctldClientProcess is a generic handle for a running vtctldclient command . @@ -64,7 +71,8 @@ func (vtctldclient *VtctldClientProcess) ExecuteCommandWithOutput(args ...string vtctldclient.Binary, filterDoubleDashArgs(pArgs, vtctldclient.VtctldClientMajorVersion)..., ) - log.Infof("Executing vtctldclient with command: %v (attempt %d of %d)", strings.Join(tmpProcess.Args, " "), i, retries) + msg := binlogplayer.LimitString(strings.Join(tmpProcess.Args, " "), 256) // limit log line length + log.Infof("Executing vtctldclient with command: %v (attempt %d of %d)", msg, i, retries) resultByte, err = tmpProcess.CombinedOutput() resultStr = string(resultByte) if err == nil || !shouldRetry(resultStr) { @@ -93,6 +101,95 @@ func VtctldClientProcessInstance(hostname string, grpcPort int, tmpDirectory str return vtctldclient } +// ApplyRoutingRules applies the given routing rules. +func (vtctldclient *VtctldClientProcess) ApplyRoutingRules(json string) error { + return vtctldclient.ExecuteCommand("ApplyRoutingRules", "--rules", json) +} + +type ApplySchemaParams struct { + DDLStrategy string + MigrationContext string + UUIDs string + CallerID string + BatchSize int +} + +// ApplySchemaWithOutput applies SQL schema to the keyspace +func (vtctldclient *VtctldClientProcess) ApplySchemaWithOutput(keyspace string, sql string, params ApplySchemaParams) (result string, err error) { + args := []string{ + "ApplySchema", + "--sql", sql, + } + if params.MigrationContext != "" { + args = append(args, "--migration-context", params.MigrationContext) + } + if params.DDLStrategy != "" { + args = append(args, "--ddl-strategy", params.DDLStrategy) + } + if params.UUIDs != "" { + args = append(args, "--uuid", params.UUIDs) + } + if params.BatchSize > 0 { + args = append(args, "--batch-size", fmt.Sprintf("%d", params.BatchSize)) + } + if params.CallerID != "" { + args = append(args, "--caller-id", params.CallerID) + } + args = append(args, keyspace) + return vtctldclient.ExecuteCommandWithOutput(args...) +} + +// ApplySchema applies SQL schema to the keyspace +func (vtctldclient *VtctldClientProcess) ApplySchema(keyspace string, sql string) error { + message, err := vtctldclient.ApplySchemaWithOutput(keyspace, sql, ApplySchemaParams{DDLStrategy: "direct -allow-zero-in-date"}) + + return vterrors.Wrap(err, message) +} + +// ApplyVSchema applies vitess schema (JSON format) to the keyspace +func (vtctldclient *VtctldClientProcess) ApplyVSchema(keyspace string, json string) (err error) { + return vtctldclient.ExecuteCommand( + "ApplyVSchema", + "--vschema", json, + keyspace, + ) +} + +// ChangeTabletType changes the type of the given tablet. +func (vtctldclient *VtctldClientProcess) ChangeTabletType(tablet *Vttablet, tabletType topodatapb.TabletType) error { + return vtctldclient.ExecuteCommand( + "ChangeTabletType", + tablet.Alias, + tabletType.String(), + ) +} + +// GetShardReplication returns a mapping of cell to shard replication for the given keyspace and shard. +func (vtctldclient *VtctldClientProcess) GetShardReplication(keyspace string, shard string, cells ...string) (map[string]*topodatapb.ShardReplication, error) { + args := append([]string{"GetShardReplication", keyspace + "/" + shard}, cells...) + out, err := vtctldclient.ExecuteCommandWithOutput(args...) + if err != nil { + return nil, err + } + + var resp vtctldatapb.GetShardReplicationResponse + err = json2.Unmarshal([]byte(out), &resp) + return resp.ShardReplicationByCell, err +} + +// GetSrvKeyspaces returns a mapping of cell to srv keyspace for the given keyspace. +func (vtctldclient *VtctldClientProcess) GetSrvKeyspaces(keyspace string, cells ...string) (ksMap map[string]*topodatapb.SrvKeyspace, err error) { + args := append([]string{"GetSrvKeyspaces", keyspace}, cells...) + out, err := vtctldclient.ExecuteCommandWithOutput(args...) + if err != nil { + return nil, err + } + + ksMap = map[string]*topodatapb.SrvKeyspace{} + err = json2.Unmarshal([]byte(out), &ksMap) + return ksMap, err +} + // PlannedReparentShard executes vtctlclient command to make specified tablet the primary for the shard. func (vtctldclient *VtctldClientProcess) PlannedReparentShard(Keyspace string, Shard string, alias string) (err error) { output, err := vtctldclient.ExecuteCommandWithOutput( @@ -105,6 +202,32 @@ func (vtctldclient *VtctldClientProcess) PlannedReparentShard(Keyspace string, S return err } +// InitializeShard executes vtctldclient command to make specified tablet the primary for the shard. +func (vtctldclient *VtctldClientProcess) InitializeShard(keyspace string, shard string, cell string, uid int) error { + output, err := vtctldclient.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", keyspace, shard), + "--wait-replicas-timeout", "31s", + "--new-primary", fmt.Sprintf("%s-%d", cell, uid)) + if err != nil { + log.Errorf("error in PlannedReparentShard output %s, err %s", output, err.Error()) + } + return err +} + +// InitShardPrimary executes vtctldclient command to make specified tablet the primary for the shard. +func (vtctldclient *VtctldClientProcess) InitShardPrimary(keyspace string, shard string, cell string, uid int) error { + output, err := vtctldclient.ExecuteCommandWithOutput( + "InitShardPrimary", + "--force", "--wait-replicas-timeout", "31s", + fmt.Sprintf("%s/%s", keyspace, shard), + fmt.Sprintf("%s-%d", cell, uid)) + if err != nil { + log.Errorf("error in InitShardPrimary output %s, err %s", output, err.Error()) + } + return err +} + // CreateKeyspace executes the vtctl command to create a keyspace func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sidecarDBName string) (err error) { var output string @@ -121,6 +244,51 @@ func (vtctldclient *VtctldClientProcess) CreateKeyspace(keyspaceName string, sid return err } +// GetKeyspace executes the vtctldclient command to get a shard, and parses the response. +func (vtctldclient *VtctldClientProcess) GetKeyspace(keyspace string) (*vtctldatapb.Keyspace, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetKeyspace", keyspace) + if err != nil { + return nil, err + } + + var ks vtctldatapb.Keyspace + err = json2.Unmarshal([]byte(data), &ks) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse keyspace output: %s", data) + } + return &ks, nil +} + +// GetShard executes the vtctldclient command to get a shard, and parses the response. +func (vtctldclient *VtctldClientProcess) GetShard(keyspace string, shard string) (*vtctldatapb.Shard, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace, shard)) + if err != nil { + return nil, err + } + + var si vtctldatapb.Shard + err = json2.Unmarshal([]byte(data), &si) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse shard output: %s", data) + } + return &si, nil +} + +// GetTablet executes vtctldclient command to get a tablet, and parses the response. +func (vtctldclient *VtctldClientProcess) GetTablet(alias string) (*topodatapb.Tablet, error) { + data, err := vtctldclient.ExecuteCommandWithOutput("GetTablet", alias) + if err != nil { + return nil, err + } + + var tablet topodatapb.Tablet + err = json2.Unmarshal([]byte(data), &tablet) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to parse tablet output: %s", data) + } + return &tablet, nil +} + // OnlineDDLShowRecent responds with recent schema migration list func (vtctldclient *VtctldClientProcess) OnlineDDLShowRecent(Keyspace string) (result string, err error) { return vtctldclient.ExecuteCommandWithOutput( @@ -130,3 +298,14 @@ func (vtctldclient *VtctldClientProcess) OnlineDDLShowRecent(Keyspace string) (r "recent", ) } + +// OnlineDDLShow responds with recent schema migration list +func (vtctldclient *VtctldClientProcess) OnlineDDLShow(keyspace, workflow string) (result string, err error) { + return vtctldclient.ExecuteCommandWithOutput( + "OnlineDDL", + "show", + "--json", + keyspace, + workflow, + ) +} diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go index ab82a32f651..d1877fb89bb 100644 --- a/go/test/endtoend/cluster/vtgate_process.go +++ b/go/test/endtoend/cluster/vtgate_process.go @@ -130,7 +130,11 @@ func (vtgate *VtgateProcess) Setup() (err error) { vtgate.proc.Args = append(vtgate.proc.Args, vtgate.ExtraArgs...) - errFile, _ := os.Create(path.Join(vtgate.LogDir, "vtgate-stderr.txt")) + errFile, err := os.Create(path.Join(vtgate.LogDir, "vtgate-stderr.txt")) + if err != nil { + log.Errorf("cannot create error log file for vtgate: %v", err) + return err + } vtgate.proc.Stderr = errFile vtgate.proc.Env = append(vtgate.proc.Env, os.Environ()...) diff --git a/go/test/endtoend/cluster/vtorc_process.go b/go/test/endtoend/cluster/vtorc_process.go index f80690d8d60..cac5921d01d 100644 --- a/go/test/endtoend/cluster/vtorc_process.go +++ b/go/test/endtoend/cluster/vtorc_process.go @@ -86,7 +86,16 @@ func (orc *VTOrcProcess) Setup() (err error) { // create the configuration file timeNow := time.Now().UnixNano() - configFile, _ := os.Create(path.Join(orc.LogDir, fmt.Sprintf("orc-config-%d.json", timeNow))) + err = os.MkdirAll(orc.LogDir, 0755) + if err != nil { + log.Errorf("cannot create log directory for vtorc: %v", err) + return err + } + configFile, err := os.Create(path.Join(orc.LogDir, fmt.Sprintf("orc-config-%d.json", timeNow))) + if err != nil { + log.Errorf("cannot create config file for vtorc: %v", err) + return err + } orc.ConfigPath = configFile.Name() // Add the default configurations and print them out @@ -135,7 +144,11 @@ func (orc *VTOrcProcess) Setup() (err error) { if orc.LogFileName == "" { orc.LogFileName = fmt.Sprintf("orc-stderr-%d.txt", timeNow) } - errFile, _ := os.Create(path.Join(orc.LogDir, orc.LogFileName)) + errFile, err := os.Create(path.Join(orc.LogDir, orc.LogFileName)) + if err != nil { + log.Errorf("cannot create error log file for vtorc: %v", err) + return err + } orc.proc.Stderr = errFile orc.proc.Env = append(orc.proc.Env, os.Environ()...) @@ -201,6 +214,22 @@ func (orc *VTOrcProcess) GetVars() map[string]any { return nil } +// GetMetrics gets the metrics exported on the /metrics page of VTOrc +func (orc *VTOrcProcess) GetMetrics() string { + varsURL := fmt.Sprintf("http://localhost:%d/metrics", orc.Port) + resp, err := http.Get(varsURL) + if err != nil { + return "" + } + defer resp.Body.Close() + + if resp.StatusCode == 200 { + respByte, _ := io.ReadAll(resp.Body) + return string(respByte) + } + return "" +} + // MakeAPICall makes an API call on the given endpoint of VTOrc func (orc *VTOrcProcess) MakeAPICall(endpoint string) (status int, response string, err error) { url := fmt.Sprintf("http://localhost:%d/%s", orc.Port, endpoint) diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go index 517f4bf3874..45db1dc4bd2 100644 --- a/go/test/endtoend/cluster/vttablet_process.go +++ b/go/test/endtoend/cluster/vttablet_process.go @@ -76,10 +76,10 @@ type VttabletProcess struct { ServingStatus string DbPassword string DbPort int - VreplicationTabletType string DbFlavor string Charset string ConsolidationsURL string + IsPrimary bool // Extra Args to be set before starting the vttablet process ExtraArgs []string @@ -109,7 +109,6 @@ func (vttablet *VttabletProcess) Setup() (err error) { "--backup_storage_implementation", vttablet.BackupStorageImplementation, "--file_backup_storage_root", vttablet.FileBackupStorageRoot, "--service_map", vttablet.ServiceMap, - "--vreplication_tablet_type", vttablet.VreplicationTabletType, "--db_charset", vttablet.Charset, ) if v, err := GetMajorVersion("vttablet"); err != nil { @@ -450,11 +449,7 @@ func (vttablet *VttabletProcess) CreateDB(keyspace string) error { // QueryTablet lets you execute a query in this tablet and get the result func (vttablet *VttabletProcess) QueryTablet(query string, keyspace string, useDb bool) (*sqltypes.Result, error) { - if !useDb { - keyspace = "" - } - dbParams := NewConnParams(vttablet.DbPort, vttablet.DbPassword, path.Join(vttablet.Directory, "mysql.sock"), keyspace) - conn, err := vttablet.conn(&dbParams) + conn, err := vttablet.TabletConn(keyspace, useDb) if err != nil { return nil, err } @@ -462,6 +457,54 @@ func (vttablet *VttabletProcess) QueryTablet(query string, keyspace string, useD return executeQuery(conn, query) } +// SemiSyncExtensionLoaded returns what type of semi-sync extension is loaded +func (vttablet *VttabletProcess) SemiSyncExtensionLoaded() (mysql.SemiSyncType, error) { + conn, err := vttablet.TabletConn("", false) + if err != nil { + return mysql.SemiSyncTypeUnknown, err + } + defer conn.Close() + return conn.SemiSyncExtensionLoaded() +} + +// ResetBinaryLogsCommand returns the commands to reset binary logs +func (vttablet *VttabletProcess) ResetBinaryLogsCommand() (string, error) { + conn, err := vttablet.TabletConn("", false) + if err != nil { + return "", err + } + defer conn.Close() + return conn.ResetBinaryLogsCommand(), nil +} + +// QueryTabletMultiple lets you execute multiple queries -- without any +// results -- against the tablet. +func (vttablet *VttabletProcess) QueryTabletMultiple(queries []string, keyspace string, useDb bool) error { + conn, err := vttablet.TabletConn(keyspace, useDb) + if err != nil { + return err + } + defer conn.Close() + + for _, query := range queries { + log.Infof("Executing query %s (on %s)", query, vttablet.Name) + _, err := executeQuery(conn, query) + if err != nil { + return err + } + } + return nil +} + +// TabletConn opens a MySQL connection on this tablet +func (vttablet *VttabletProcess) TabletConn(keyspace string, useDb bool) (*mysql.Conn, error) { + if !useDb { + keyspace = "" + } + dbParams := NewConnParams(vttablet.DbPort, vttablet.DbPassword, path.Join(vttablet.Directory, "mysql.sock"), keyspace) + return vttablet.conn(&dbParams) +} + func (vttablet *VttabletProcess) defaultConn(dbname string) (*mysql.Conn, error) { dbParams := mysql.ConnParams{ Uname: "vt_dba", @@ -489,6 +532,16 @@ func (vttablet *VttabletProcess) QueryTabletWithDB(query string, dbname string) return executeQuery(conn, query) } +// MultiQueryTabletWithDB lets you execute multiple queries on a specific DB in this tablet. +func (vttablet *VttabletProcess) MultiQueryTabletWithDB(query string, dbname string) error { + conn, err := vttablet.defaultConn(dbname) + if err != nil { + return err + } + defer conn.Close() + return executeMultiQuery(conn, query) +} + // executeQuery will retry the query up to 10 times with a small sleep in between each try. // This allows the tests to be more robust in the face of transient failures. func executeQuery(dbConn *mysql.Conn, query string) (*sqltypes.Result, error) { @@ -513,6 +566,26 @@ func executeQuery(dbConn *mysql.Conn, query string) (*sqltypes.Result, error) { return result, err } +// executeMultiQuery will retry the given multi query up to 10 times with a small sleep in between each try. +// This allows the tests to be more robust in the face of transient failures. +func executeMultiQuery(dbConn *mysql.Conn, query string) (err error) { + retries := 10 + retryDelay := 1 * time.Second + for i := 0; i < retries; i++ { + if i > 0 { + // We only audit from 2nd attempt and onwards, otherwise this is just too verbose. + log.Infof("Executing query %s (attempt %d of %d)", query, (i + 1), retries) + } + err = dbConn.ExecuteFetchMultiDrain(query) + if err == nil { + break + } + time.Sleep(retryDelay) + } + + return err +} + // GetDBVar returns first matching database variable's value func (vttablet *VttabletProcess) GetDBVar(varName string, ksName string) (string, error) { return vttablet.getDBSystemValues("variables", varName, ksName) @@ -534,11 +607,6 @@ func (vttablet *VttabletProcess) getDBSystemValues(placeholder string, value str return "", nil } -// ToggleProfiling enables or disables the configured CPU profiler on this vttablet -func (vttablet *VttabletProcess) ToggleProfiling() error { - return vttablet.proc.Process.Signal(syscall.SIGUSR1) -} - // WaitForVReplicationToCatchup waits for "workflow" to finish copying func (vttablet *VttabletProcess) WaitForVReplicationToCatchup(t testing.TB, workflow, database string, sidecarDBName string, duration time.Duration) { if sidecarDBName == "" { @@ -642,6 +710,7 @@ func VttabletProcessInstance(port, grpcPort, tabletUID int, cell, shard, keyspac Binary: "vttablet", FileToLogQueries: path.Join(tmpDirectory, fmt.Sprintf("/vt_%010d_querylog.txt", tabletUID)), Directory: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", tabletUID)), + Cell: cell, TabletPath: fmt.Sprintf("%s-%010d", cell, tabletUID), ServiceMap: "grpc-queryservice,grpc-tabletmanager,grpc-updatestream,grpc-throttler", LogDir: tmpDirectory, @@ -659,7 +728,6 @@ func VttabletProcessInstance(port, grpcPort, tabletUID int, cell, shard, keyspac ServingStatus: "NOT_SERVING", BackupStorageImplementation: "file", FileBackupStorageRoot: path.Join(os.Getenv("VTDATAROOT"), "/backups"), - VreplicationTabletType: "replica", TabletUID: tabletUID, Charset: charset, } diff --git a/go/test/endtoend/cluster/vttablet_process_unix.go b/go/test/endtoend/cluster/vttablet_process_unix.go new file mode 100644 index 00000000000..3f5c76e9988 --- /dev/null +++ b/go/test/endtoend/cluster/vttablet_process_unix.go @@ -0,0 +1,26 @@ +//go:build !windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import "syscall" + +// ToggleProfiling enables or disables the configured CPU profiler on this vttablet +func (vttablet *VttabletProcess) ToggleProfiling() error { + return vttablet.proc.Process.Signal(syscall.SIGUSR1) +} diff --git a/go/test/endtoend/cluster/vttablet_process_windows.go b/go/test/endtoend/cluster/vttablet_process_windows.go new file mode 100644 index 00000000000..6c233746e8a --- /dev/null +++ b/go/test/endtoend/cluster/vttablet_process_windows.go @@ -0,0 +1,28 @@ +//go:build windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "errors" +) + +// ToggleProfiling enables or disables the configured CPU profiler on this vttablet. +func (vttablet *VttabletProcess) ToggleProfiling() error { + return errors.New("not implemented") +} diff --git a/go/test/endtoend/clustertest/vtctld_test.go b/go/test/endtoend/clustertest/vtctld_test.go index 45643d869b1..c61f7820bb7 100644 --- a/go/test/endtoend/clustertest/vtctld_test.go +++ b/go/test/endtoend/clustertest/vtctld_test.go @@ -54,7 +54,7 @@ func TestVtctldProcess(t *testing.T) { url = fmt.Sprintf("http://%s:%d/api/topodata/", clusterInstance.Hostname, clusterInstance.VtctldHTTPPort) testTopoDataAPI(t, url) - testListAllTablets(t) + testGetTablets(t) testTabletStatus(t) testExecuteAsDba(t) testExecuteAsApp(t) @@ -82,9 +82,9 @@ func testTopoDataAPI(t *testing.T, url string) { assert.Contains(t, childrenGot, clusterInstance.Cell) } -func testListAllTablets(t *testing.T) { +func testGetTablets(t *testing.T) { // first w/o any filters, aside from cell - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ListAllTablets", clusterInstance.Cell) + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetTablets", "--cell", clusterInstance.Cell) require.NoError(t, err) tablets := getAllTablets() @@ -102,10 +102,12 @@ func testListAllTablets(t *testing.T) { // now filtering with the first keyspace and tablet type of primary, in // addition to the cell - result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "ListAllTablets", "--", "--keyspace", clusterInstance.Keyspaces[0].Name, - "--tablet_type", "primary", - clusterInstance.Cell) + result, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "GetTablets", + "--keyspace", clusterInstance.Keyspaces[0].Name, + "--tablet-type", "primary", + "--cell", clusterInstance.Cell, + ) require.NoError(t, err) // We should only return a single primary tablet per shard in the first keyspace @@ -128,13 +130,55 @@ func testTabletStatus(t *testing.T) { } func testExecuteAsDba(t *testing.T) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDba", clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].Alias, `SELECT 1 AS a`) - require.NoError(t, err) - assert.Equal(t, result, oneTableOutput) + tcases := []struct { + query string + result string + expectErr bool + }{ + { + query: "", + expectErr: true, + }, + { + query: "SELECT 1 AS a", + result: oneTableOutput, + }, + { + query: "SELECT 1 AS a; SELECT 1 AS a", + expectErr: true, + }, + { + query: "create table t(id int)", + result: "", + }, + { + query: "create table if not exists t(id int)", + result: "", + }, + { + query: "create table if not exists t(id int); create table if not exists t(id int);", + result: "", + }, + { + query: "create table if not exists t(id int); create table if not exists t(id int); SELECT 1 AS a", + expectErr: true, + }, + } + for _, tcase := range tcases { + t.Run(tcase.query, func(t *testing.T) { + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDBA", clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].Alias, tcase.query) + if tcase.expectErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tcase.result, result) + } + }) + } } func testExecuteAsApp(t *testing.T) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ExecuteFetchAsApp", clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].Alias, `SELECT 1 AS a`) + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteFetchAsApp", clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].Alias, `SELECT 1 AS a`) require.NoError(t, err) assert.Equal(t, result, oneTableOutput) } diff --git a/go/test/endtoend/clustertest/vttablet_test.go b/go/test/endtoend/clustertest/vttablet_test.go index 369deb18cfd..5e7d5e27182 100644 --- a/go/test/endtoend/clustertest/vttablet_test.go +++ b/go/test/endtoend/clustertest/vttablet_test.go @@ -51,6 +51,6 @@ func TestDeleteTablet(t *testing.T) { defer cluster.PanicHandler(t) primary := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet() require.NotNil(t, primary) - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("DeleteTablet", "--", "--allow_primary", primary.Alias) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteTablets", "--allow-primary", primary.Alias) require.NoError(t, err) } diff --git a/go/test/endtoend/docker/vttestserver.go b/go/test/endtoend/docker/vttestserver.go index 7f24134a28f..4f86c7616a1 100644 --- a/go/test/endtoend/docker/vttestserver.go +++ b/go/test/endtoend/docker/vttestserver.go @@ -39,7 +39,7 @@ type vttestserver struct { keyspaces []string numShards []int mysqlMaxConnecetions int - port int + basePort int } func newVttestserver(dockerImage string, keyspaces []string, numShards []int, mysqlMaxConnections, port int) *vttestserver { @@ -48,7 +48,7 @@ func newVttestserver(dockerImage string, keyspaces []string, numShards []int, my keyspaces: keyspaces, numShards: numShards, mysqlMaxConnecetions: mysqlMaxConnections, - port: port, + basePort: port, } } @@ -64,13 +64,16 @@ func (v *vttestserver) teardown() { func (v *vttestserver) startDockerImage() error { cmd := exec.Command("docker", "run") cmd.Args = append(cmd.Args, "--name=vttestserver-end2end-test") - cmd.Args = append(cmd.Args, "-p", fmt.Sprintf("%d:33577", v.port)) - cmd.Args = append(cmd.Args, "-e", "PORT=33574") + cmd.Args = append(cmd.Args, "-p", fmt.Sprintf("%d:%d", v.basePort, v.basePort)) + cmd.Args = append(cmd.Args, "-p", fmt.Sprintf("%d:%d", v.basePort+1, v.basePort+1)) + cmd.Args = append(cmd.Args, "-p", fmt.Sprintf("%d:%d", v.basePort+3, v.basePort+3)) + cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("PORT=%d", v.basePort)) cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("KEYSPACES=%s", strings.Join(v.keyspaces, ","))) cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("NUM_SHARDS=%s", strings.Join(convertToStringSlice(v.numShards), ","))) cmd.Args = append(cmd.Args, "-e", "MYSQL_BIND_HOST=0.0.0.0") + cmd.Args = append(cmd.Args, "-e", "VTCOMBO_BIND_HOST=0.0.0.0") cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("MYSQL_MAX_CONNECTIONS=%d", v.mysqlMaxConnecetions)) - cmd.Args = append(cmd.Args, "--health-cmd", "mysqladmin ping -h127.0.0.1 -P33577") + cmd.Args = append(cmd.Args, "--health-cmd", fmt.Sprintf("mysqladmin ping -h127.0.0.1 -P%d", v.basePort+3)) cmd.Args = append(cmd.Args, "--health-interval=5s") cmd.Args = append(cmd.Args, "--health-timeout=2s") cmd.Args = append(cmd.Args, "--health-retries=5") diff --git a/go/test/endtoend/docker/vttestserver_test.go b/go/test/endtoend/docker/vttestserver_test.go index c89f6299f30..e34be52accf 100644 --- a/go/test/endtoend/docker/vttestserver_test.go +++ b/go/test/endtoend/docker/vttestserver_test.go @@ -22,6 +22,7 @@ import ( "os" "testing" + "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/mysql" @@ -44,7 +45,7 @@ func TestUnsharded(t *testing.T) { dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} for _, image := range dockerImages { t.Run(image, func(t *testing.T) { - vtest := newVttestserver(image, []string{"unsharded_ks"}, []int{1}, 1000, 33577) + vtest := newVttestserver(image, []string{"unsharded_ks"}, []int{1}, 1000, 33574) err := vtest.startDockerImage() require.NoError(t, err) defer vtest.teardown() @@ -56,7 +57,7 @@ func TestUnsharded(t *testing.T) { ctx := context.Background() vttestParams := mysql.ConnParams{ Host: "localhost", - Port: vtest.port, + Port: vtest.basePort + 3, } conn, err := mysql.Connect(ctx, &vttestParams) require.NoError(t, err) @@ -73,7 +74,7 @@ func TestSharded(t *testing.T) { dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} for _, image := range dockerImages { t.Run(image, func(t *testing.T) { - vtest := newVttestserver(image, []string{"ks"}, []int{2}, 1000, 33577) + vtest := newVttestserver(image, []string{"ks"}, []int{2}, 1000, 33574) err := vtest.startDockerImage() require.NoError(t, err) defer vtest.teardown() @@ -85,7 +86,7 @@ func TestSharded(t *testing.T) { ctx := context.Background() vttestParams := mysql.ConnParams{ Host: "localhost", - Port: vtest.port, + Port: vtest.basePort + 3, } conn, err := mysql.Connect(ctx, &vttestParams) require.NoError(t, err) @@ -103,7 +104,7 @@ func TestMysqlMaxCons(t *testing.T) { dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} for _, image := range dockerImages { t.Run(image, func(t *testing.T) { - vtest := newVttestserver(image, []string{"ks"}, []int{2}, 100000, 33577) + vtest := newVttestserver(image, []string{"ks"}, []int{2}, 100000, 33574) err := vtest.startDockerImage() require.NoError(t, err) defer vtest.teardown() @@ -115,7 +116,7 @@ func TestMysqlMaxCons(t *testing.T) { ctx := context.Background() vttestParams := mysql.ConnParams{ Host: "localhost", - Port: vtest.port, + Port: vtest.basePort + 3, } conn, err := mysql.Connect(ctx, &vttestParams) require.NoError(t, err) @@ -125,6 +126,29 @@ func TestMysqlMaxCons(t *testing.T) { } } +// TestVtctldCommands tests that vtctld commands can be run with the docker image. +func TestVtctldCommands(t *testing.T) { + dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} + for _, image := range dockerImages { + t.Run(image, func(t *testing.T) { + vtest := newVttestserver(image, []string{"long_ks_name"}, []int{2}, 100, 33574) + err := vtest.startDockerImage() + require.NoError(t, err) + defer vtest.teardown() + + // wait for the docker to be setup + err = vtest.waitUntilDockerHealthy(10) + require.NoError(t, err) + + vtctldClient := cluster.VtctldClientProcessInstance("localhost", vtest.basePort+1, os.TempDir()) + res, err := vtctldClient.ExecuteCommandWithOutput("GetKeyspaces") + require.NoError(t, err) + // We verify that the command succeeds, and the keyspace name is present in the output. + require.Contains(t, res, "long_ks_name") + }) + } +} + func TestLargeNumberOfKeyspaces(t *testing.T) { dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} for _, image := range dockerImages { @@ -136,7 +160,7 @@ func TestLargeNumberOfKeyspaces(t *testing.T) { numShards = append(numShards, 1) } - vtest := newVttestserver(image, keyspaces, numShards, 100000, 33577) + vtest := newVttestserver(image, keyspaces, numShards, 100000, 33574) err := vtest.startDockerImage() require.NoError(t, err) defer vtest.teardown() @@ -148,7 +172,7 @@ func TestLargeNumberOfKeyspaces(t *testing.T) { ctx := context.Background() vttestParams := mysql.ConnParams{ Host: "localhost", - Port: vtest.port, + Port: vtest.basePort + 3, } conn, err := mysql.Connect(ctx, &vttestParams) require.NoError(t, err) diff --git a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go index 725659a5ee1..4c759ff577a 100644 --- a/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go +++ b/go/test/endtoend/encryption/encryptedreplication/encrypted_replication_test.go @@ -74,7 +74,7 @@ func testReplicationBase(t *testing.T, isClientCertPassed bool) { } // Reparent using SSL (this will also check replication works) - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspace, shardName, clusterInstance.Cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspace, shardName, clusterInstance.Cell, primaryTablet.TabletUID) if isClientCertPassed { require.NoError(t, err) } else { diff --git a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go index b076006ec2c..9147b7b9080 100644 --- a/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go +++ b/go/test/endtoend/encryption/encryptedtransport/encrypted_transport_test.go @@ -177,7 +177,7 @@ func TestSecureTransport(t *testing.T) { setCreds(t, "vtgate-client-1", "vtgate-server") ctx := context.Background() request := getRequest("select * from vt_insert_test") - vc, err := getVitessClient(grpcAddress) + vc, err := getVitessClient(ctx, grpcAddress) require.NoError(t, err) qr, err := vc.Execute(ctx, request) @@ -188,7 +188,7 @@ func TestSecureTransport(t *testing.T) { // 'vtgate client 2' is not authorized to access vt_insert_test setCreds(t, "vtgate-client-2", "vtgate-server") request = getRequest("select * from vt_insert_test") - vc, err = getVitessClient(grpcAddress) + vc, err = getVitessClient(ctx, grpcAddress) require.NoError(t, err) qr, err = vc.Execute(ctx, request) require.NoError(t, err) @@ -217,7 +217,7 @@ func useEffectiveCallerID(ctx context.Context, t *testing.T) { setSSLInfoEmpty() // get vitess client - vc, err := getVitessClient(grpcAddress) + vc, err := getVitessClient(ctx, grpcAddress) require.NoError(t, err) // test with empty effective caller Id @@ -266,7 +266,7 @@ func useEffectiveGroups(ctx context.Context, t *testing.T) { setSSLInfoEmpty() // get vitess client - vc, err := getVitessClient(grpcAddress) + vc, err := getVitessClient(ctx, grpcAddress) require.NoError(t, err) // test with empty effective caller Id @@ -452,12 +452,12 @@ func tabletConnExtraArgs(name string) []string { return args } -func getVitessClient(addr string) (vtgateservicepb.VitessClient, error) { +func getVitessClient(ctx context.Context, addr string) (vtgateservicepb.VitessClient, error) { opt, err := grpcclient.SecureDialOption(grpcCert, grpcKey, grpcCa, "", grpcName) if err != nil { return nil, err } - cc, err := grpcclient.Dial(addr, grpcclient.FailFast(false), opt) + cc, err := grpcclient.DialContext(ctx, addr, grpcclient.FailFast(false), opt) if err != nil { return nil, err } diff --git a/go/test/endtoend/filelock/filelock.go b/go/test/endtoend/filelock/filelock.go index 05f27c321a8..d37331892d1 100644 --- a/go/test/endtoend/filelock/filelock.go +++ b/go/test/endtoend/filelock/filelock.go @@ -10,7 +10,6 @@ package filelock import ( "errors" "io/fs" - "os" ) // A File provides the minimal set of methods required to lock an open file. @@ -78,22 +77,7 @@ func (lt lockType) String() string { // IsNotSupported returns a boolean indicating whether the error is known to // report that a function is not supported (possibly for a specific input). -// It is satisfied by ErrNotSupported as well as some syscall errors. +// It is satisfied by errors.ErrUnsupported as well as some syscall errors. func IsNotSupported(err error) bool { - return isNotSupported(underlyingError(err)) -} - -var ErrNotSupported = errors.New("operation not supported") - -// underlyingError returns the underlying error for known os error types. -func underlyingError(err error) error { - switch err := err.(type) { - case *fs.PathError: - return err.Err - case *os.LinkError: - return err.Err - case *os.SyscallError: - return err.Err - } - return err + return errors.Is(err, errors.ErrUnsupported) } diff --git a/go/test/endtoend/filelock/filelock_unix.go b/go/test/endtoend/filelock/filelock_unix.go index 23064dae0be..6f73b1bfeea 100644 --- a/go/test/endtoend/filelock/filelock_unix.go +++ b/go/test/endtoend/filelock/filelock_unix.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd + package filelock import ( @@ -36,7 +38,3 @@ func lock(f File, lt lockType) (err error) { func unlock(f File) error { return lock(f, syscall.LOCK_UN) } - -func isNotSupported(err error) bool { - return err == syscall.ENOSYS || err == syscall.ENOTSUP || err == syscall.EOPNOTSUPP || err == ErrNotSupported -} diff --git a/go/test/endtoend/filelock/filelock_windows.go b/go/test/endtoend/filelock/filelock_windows.go new file mode 100644 index 00000000000..34df039b96b --- /dev/null +++ b/go/test/endtoend/filelock/filelock_windows.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package filelock + +import ( + "errors" +) + +type lockType uint32 + +const ( + readLock lockType = 0 + writeLock lockType = 1 +) + +func lock(f File, lt lockType) error { + return errors.New("filelock: not implemented on windows") +} + +func unlock(f File) error { + return errors.New("filelock: not implemented on windows") +} diff --git a/go/test/endtoend/keyspace/keyspace_test.go b/go/test/endtoend/keyspace/keyspace_test.go index 338ad5c8cd2..2a665c66214 100644 --- a/go/test/endtoend/keyspace/keyspace_test.go +++ b/go/test/endtoend/keyspace/keyspace_test.go @@ -18,19 +18,20 @@ package sequence import ( "encoding/binary" - "encoding/json" "flag" "os" - "strings" "testing" - "vitess.io/vitess/go/vt/key" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/key" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) var ( @@ -40,7 +41,7 @@ var ( cell = "zone1" cell2 = "zone2" hostname = "localhost" - servedTypes = map[topodata.TabletType]bool{topodata.TabletType_PRIMARY: true, topodata.TabletType_REPLICA: true, topodata.TabletType_RDONLY: true} + servedTypes = map[topodatapb.TabletType]bool{topodatapb.TabletType_PRIMARY: true, topodatapb.TabletType_REPLICA: true, topodatapb.TabletType_RDONLY: true} sqlSchema = `create table vt_insert_test ( id bigint auto_increment, msg varchar(64), @@ -109,7 +110,7 @@ func TestMain(m *testing.M) { if err := clusterForKSTest.StartKeyspace(*keyspaceSharded, []string{"-80", "80-"}, 1, false); err != nil { return 1 } - if err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceShardedName); err != nil { + if err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceShardedName); err != nil { return 1 } @@ -121,7 +122,7 @@ func TestMain(m *testing.M) { if err := clusterForKSTest.StartKeyspace(*keyspaceUnsharded, []string{keyspaceUnshardedName}, 1, false); err != nil { return 1 } - if err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceUnshardedName); err != nil { + if err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceUnshardedName); err != nil { return 1 } @@ -151,29 +152,31 @@ func TestDurabilityPolicyField(t *testing.T) { out, err = vtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) - out, err = clusterForKSTest.VtctlProcess.ExecuteCommandWithOutput("CreateKeyspace", "--", "--durability-policy=semi_sync", "ks_durability") + out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--durability-policy=semi_sync", "ks_durability") require.NoError(t, err, out) checkDurabilityPolicy(t, "semi_sync") - out, err = clusterForKSTest.VtctlProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") + out, err = clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("DeleteKeyspace", "ks_durability") require.NoError(t, err, out) } func checkDurabilityPolicy(t *testing.T, durabilityPolicy string) { - var keyspace topodata.Keyspace - out, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", "ks_durability") - require.NoError(t, err, out) - err = json.Unmarshal([]byte(out), &keyspace) + ks, err := clusterForKSTest.VtctldClientProcess.GetKeyspace("ks_durability") require.NoError(t, err) - require.Equal(t, keyspace.DurabilityPolicy, durabilityPolicy) + require.Equal(t, ks.Keyspace.DurabilityPolicy, durabilityPolicy) } func TestGetSrvKeyspaceNames(t *testing.T) { defer cluster.PanicHandler(t) - output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) + data, err := clusterForKSTest.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvKeyspaceNames", cell) require.Nil(t, err) - assert.Contains(t, strings.Split(output, "\n"), keyspaceUnshardedName) - assert.Contains(t, strings.Split(output, "\n"), keyspaceShardedName) + + var namesByCell = map[string]*vtctldatapb.GetSrvKeyspaceNamesResponse_NameList{} + err = json2.Unmarshal([]byte(data), &namesByCell) + require.NoError(t, err) + + assert.Contains(t, namesByCell[cell].Names, keyspaceUnshardedName) + assert.Contains(t, namesByCell[cell].Names, keyspaceShardedName) } func TestGetSrvKeyspacePartitions(t *testing.T) { @@ -207,69 +210,73 @@ func TestGetSrvKeyspacePartitions(t *testing.T) { func TestShardNames(t *testing.T) { defer cluster.PanicHandler(t) - output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, keyspaceShardedName) - require.Nil(t, err) - var srvKeyspace topodata.SrvKeyspace - - err = json.Unmarshal([]byte(output), &srvKeyspace) - require.Nil(t, err) + output, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces(keyspaceShardedName, cell) + require.NoError(t, err) + require.NotNil(t, output[cell], "no srvkeyspace for cell %s", cell) } func TestGetKeyspace(t *testing.T) { defer cluster.PanicHandler(t) - output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetKeyspace", keyspaceUnshardedName) - require.Nil(t, err) - - var keyspace topodata.Keyspace - - err = json.Unmarshal([]byte(output), &keyspace) + _, err := clusterForKSTest.VtctldClientProcess.GetKeyspace(keyspaceUnshardedName) require.Nil(t, err) } func TestDeleteKeyspace(t *testing.T) { defer cluster.PanicHandler(t) - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") + _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") + _ = clusterForKSTest.InitTablet(&cluster.Vttablet{ + Type: "primary", + TabletUID: 100, + Cell: "zone1", + }, "test_delete_keyspace", "0") // Can't delete keyspace if there are shards present. - err := clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") + err := clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") require.Error(t, err) // Can't delete shard if there are tablets present. - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteShard", "--", "--even_if_serving", "test_delete_keyspace/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteShards", "--even-if-serving", "test_delete_keyspace/0") require.Error(t, err) // Use recursive DeleteShard to remove tablets. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteShard", "--", "--even_if_serving", "--recursive", "test_delete_keyspace/0") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteShards", "--even-if-serving", "--recursive", "test_delete_keyspace/0") // Now non-recursive DeleteKeyspace should work. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "test_delete_keyspace") // Start over and this time use recursive DeleteKeyspace to do everything. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateKeyspace", "test_delete_keyspace") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("InitTablet", "--", "--port=1234", "--bind-address=127.0.0.1", "--keyspace=test_delete_keyspace", "--shard=0", "zone1-0000000100", "primary") + _ = clusterForKSTest.VtctldClientProcess.CreateKeyspace("test_delete_keyspace", sidecar.DefaultName) + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("CreateShard", "test_delete_keyspace/0") + _ = clusterForKSTest.InitTablet(&cluster.Vttablet{ + Type: "primary", + TabletUID: 100, + Cell: "zone1", + HTTPPort: 1234, + }, "test_delete_keyspace", "0") // Create the serving/replication entries and check that they exist, // so we can later check they're deleted. - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", cell, "test_delete_keyspace/0") - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "test_delete_keyspace") + _, _ = clusterForKSTest.VtctldClientProcess.GetShardReplication("test_delete_keyspace", "0", cell) + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") // Recursive DeleteKeyspace - _ = clusterForKSTest.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "--", "--recursive", "test_delete_keyspace") + _ = clusterForKSTest.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "--recursive", "test_delete_keyspace") // Check that everything is gone. - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetKeyspace", "test_delete_keyspace") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetKeyspace", "test_delete_keyspace") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShard", "test_delete_keyspace/0") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetShard", "test_delete_keyspace/0") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetTablet", "zone1-0000000100") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetTablet", "zone1-0000000100") require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetShardReplication", cell, "test_delete_keyspace/0") + _, err = clusterForKSTest.VtctldClientProcess.GetShardReplication("test_delete_keyspace", "0", cell) require.Error(t, err) - err = clusterForKSTest.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") + err = clusterForKSTest.VtctldClientProcess.ExecuteCommand("GetSrvKeyspace", cell, "test_delete_keyspace") require.Error(t, err) + ksMap, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces("test_delete_keyspace", cell) + require.NoError(t, err) + require.Empty(t, ksMap[cell]) } // TODO: Fix this test, not running in CI @@ -388,7 +395,7 @@ func TestKeyspaceToShardName(t *testing.T) { // for each served type PRIMARY REPLICA RDONLY, the shard ref count should match for _, partition := range srvKeyspace.Partitions { - if partition.ServedType == topodata.TabletType_PRIMARY { + if partition.ServedType == topodatapb.TabletType_PRIMARY { for _, shardRef := range partition.ShardReferences { shardKIDs := shardKIdMap[shardRef.Name] for _, kid := range shardKIDs { @@ -403,7 +410,7 @@ func TestKeyspaceToShardName(t *testing.T) { srvKeyspace = getSrvKeyspace(t, cell, keyspaceUnshardedName) for _, partition := range srvKeyspace.Partitions { - if partition.ServedType == topodata.TabletType_PRIMARY { + if partition.ServedType == topodatapb.TabletType_PRIMARY { for _, shardRef := range partition.ShardReferences { assert.Equal(t, shardRef.Name, keyspaceUnshardedName) } @@ -418,12 +425,12 @@ func packKeyspaceID(keyspaceID uint64) []byte { return (keybytes[:]) } -func getSrvKeyspace(t *testing.T, cell string, ksname string) *topodata.SrvKeyspace { - output, err := clusterForKSTest.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvKeyspace", cell, ksname) - require.Nil(t, err) - var srvKeyspace topodata.SrvKeyspace +func getSrvKeyspace(t *testing.T, cell string, ksname string) *topodatapb.SrvKeyspace { + output, err := clusterForKSTest.VtctldClientProcess.GetSrvKeyspaces(ksname, cell) + require.NoError(t, err) - err = json.Unmarshal([]byte(output), &srvKeyspace) - require.Nil(t, err) - return &srvKeyspace + srvKeyspace := output[cell] + require.NotNil(t, srvKeyspace, "no srvkeyspace for cell %s", cell) + + return srvKeyspace } diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index 3082f295055..7e1190c16bb 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -393,12 +393,12 @@ func TestReparenting(t *testing.T) { // do planned reparenting, make one replica as primary // and validate client connection count in correspond tablets - clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", userKeyspace+"/-80", - "--new_primary", shard0Replica.Alias) + clusterInstance.VtctldClientProcess.ExecuteCommand( + "PlannedReparentShard", + userKeyspace+"/-80", + "--new-primary", shard0Replica.Alias) // validate topology - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err) // Verify connection has migrated. @@ -417,12 +417,12 @@ func TestReparenting(t *testing.T) { stream.Next() // make old primary again as new primary - clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", userKeyspace+"/-80", - "--new_primary", shard0Primary.Alias) + clusterInstance.VtctldClientProcess.ExecuteCommand( + "PlannedReparentShard", + userKeyspace+"/-80", + "--new-primary", shard0Primary.Alias) // validate topology - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err) time.Sleep(10 * time.Second) assertClientCount(t, 1, shard0Primary) diff --git a/go/test/endtoend/migration/migration_test.go b/go/test/endtoend/migration/migration_test.go index f0b91e2d6df..eca112e388d 100644 --- a/go/test/endtoend/migration/migration_test.go +++ b/go/test/endtoend/migration/migration_test.go @@ -145,7 +145,7 @@ func TestMigration(t *testing.T) { vt.ExtraArgs = append(vt.ExtraArgs, "--tablet_config", yamlFile) } createKeyspace(t, commerce, []string{"0"}, tabletConfig) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "commerce") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "commerce") require.NoError(t, err) err = clusterInstance.StartVtgate() @@ -221,7 +221,7 @@ func migrate(t *testing.T, fromdb, toks string, tables []string) { "('%s', '%s', %s, '', 9999, 9999, 'primary', 0, 0, 'Running')", tables[0], "vt_"+toks, sqlEscaped.String()) fmt.Printf("VReplicationExec: %s\n", query) vttablet := keyspaces[toks].Shards[0].Vttablets[0].VttabletProcess - err := clusterInstance.VtctlclientProcess.ExecuteCommand("VReplicationExec", vttablet.TabletPath, query) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("VReplicationExec", vttablet.TabletPath, query) require.NoError(t, err) } diff --git a/go/test/endtoend/mysqlctl/mysqlctl_test.go b/go/test/endtoend/mysqlctl/mysqlctl_test.go index 3b28c5bcf30..6c3d65226e3 100644 --- a/go/test/endtoend/mysqlctl/mysqlctl_test.go +++ b/go/test/endtoend/mysqlctl/mysqlctl_test.go @@ -141,21 +141,21 @@ func initCluster(shardNames []string, totalTabletsRequired int) { func TestRestart(t *testing.T) { defer cluster.PanicHandler(t) err := primaryTablet.MysqlctlProcess.Stop() - require.Nil(t, err) + require.NoError(t, err) primaryTablet.MysqlctlProcess.CleanupFiles(primaryTablet.TabletUID) err = primaryTablet.MysqlctlProcess.Start() - require.Nil(t, err) + require.NoError(t, err) } func TestAutoDetect(t *testing.T) { defer cluster.PanicHandler(t) err := clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.Setup() - require.Nil(t, err, "error should be nil") + require.NoError(t, err) err = clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].VttabletProcess.Setup() - require.Nil(t, err, "error should be nil") + require.NoError(t, err) // Reparent tablets, which requires flavor detection - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) - require.Nil(t, err, "error should be nil") + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) + require.NoError(t, err) } diff --git a/go/test/endtoend/mysqlctld/mysqlctld_test.go b/go/test/endtoend/mysqlctld/mysqlctld_test.go index 908a870d6f0..328bc563377 100644 --- a/go/test/endtoend/mysqlctld/mysqlctld_test.go +++ b/go/test/endtoend/mysqlctld/mysqlctld_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/vt/mysqlctl/mysqlctlclient" + "vitess.io/vitess/go/vt/proto/mysqlctl" "vitess.io/vitess/go/test/endtoend/cluster" ) @@ -158,14 +159,21 @@ func TestAutoDetect(t *testing.T) { require.Nil(t, err, "error should be nil") // Reparent tablets, which requires flavor detection - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.Nil(t, err, "error should be nil") } func TestVersionString(t *testing.T) { - client, err := mysqlctlclient.New("unix", primaryTablet.MysqlctldProcess.SocketFile) + client, err := mysqlctlclient.New(context.Background(), "unix", primaryTablet.MysqlctldProcess.SocketFile) require.NoError(t, err) version, err := client.VersionString(context.Background()) require.NoError(t, err) require.NotEmpty(t, version) } + +func TestReadBinlogFilesTimestamps(t *testing.T) { + client, err := mysqlctlclient.New(context.Background(), "unix", primaryTablet.MysqlctldProcess.SocketFile) + require.NoError(t, err) + _, err = client.ReadBinlogFilesTimestamps(context.Background(), &mysqlctl.ReadBinlogFilesTimestampsRequest{}) + require.ErrorContains(t, err, "empty binlog list in ReadBinlogFilesTimestampsRequest") +} diff --git a/go/test/endtoend/mysqlserver/main_test.go b/go/test/endtoend/mysqlserver/main_test.go index 42b4e6ea235..18b169e33d7 100644 --- a/go/test/endtoend/mysqlserver/main_test.go +++ b/go/test/endtoend/mysqlserver/main_test.go @@ -51,7 +51,7 @@ var ( PARTITION BY HASH( TO_DAYS(created) ) PARTITIONS 10; ` - createProcSQL = `use vt_test_keyspace; + createProcSQL = ` CREATE PROCEDURE testing() BEGIN delete from vt_insert_test; @@ -144,7 +144,7 @@ func TestMain(m *testing.M) { } primaryTabletProcess := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet().VttabletProcess - if _, err := primaryTabletProcess.QueryTablet(createProcSQL, keyspaceName, false); err != nil { + if _, err := primaryTabletProcess.QueryTablet(createProcSQL, keyspaceName, true); err != nil { return 1, err } diff --git a/go/test/endtoend/mysqlserver/mysql_server_test.go b/go/test/endtoend/mysqlserver/mysql_server_test.go index caed342688d..6b691582c66 100644 --- a/go/test/endtoend/mysqlserver/mysql_server_test.go +++ b/go/test/endtoend/mysqlserver/mysql_server_test.go @@ -116,7 +116,7 @@ func TestTimeout(t *testing.T) { require.Nilf(t, err, "unable to connect mysql: %v", err) defer conn.Close() - _, err = conn.ExecuteFetch("SELECT SLEEP(5);", 1, false) + _, err = conn.ExecuteFetch("SELECT SLEEP(5)", 1, false) require.NotNilf(t, err, "quiry timeout error expected") mysqlErr, ok := err.(*sqlerror.SQLError) require.Truef(t, ok, "invalid error type") @@ -132,7 +132,7 @@ func TestInvalidField(t *testing.T) { require.Nilf(t, err, "unable to connect mysql: %v", err) defer conn.Close() - _, err = conn.ExecuteFetch("SELECT invalid_field from vt_insert_test;", 1, false) + _, err = conn.ExecuteFetch("SELECT invalid_field from vt_insert_test", 1, false) require.NotNil(t, err, "invalid field error expected") mysqlErr, ok := err.(*sqlerror.SQLError) require.Truef(t, ok, "invalid error type") @@ -153,7 +153,7 @@ func TestWarnings(t *testing.T) { require.NoError(t, err) assert.Empty(t, qr.Rows, "number of rows") - qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) + qr, err = conn.ExecuteFetch("SHOW WARNINGS", 1, false) require.NoError(t, err, "SHOW WARNINGS") assert.EqualValues(t, 1, len(qr.Rows), "number of rows") assert.Contains(t, qr.Rows[0][0].String(), "VARCHAR(\"Warning\")", qr.Rows) @@ -164,7 +164,7 @@ func TestWarnings(t *testing.T) { _, err = conn.ExecuteFetch("SELECT 1 from vt_insert_test limit 1", 1, false) require.NoError(t, err) - qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) + qr, err = conn.ExecuteFetch("SHOW WARNINGS", 1, false) require.NoError(t, err) assert.Empty(t, qr.Rows) @@ -175,7 +175,7 @@ func TestWarnings(t *testing.T) { _, err = conn.ExecuteFetch("SELECT 1 from vt_insert_test limit 1", 1, false) require.NoError(t, err) - qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) + qr, err = conn.ExecuteFetch("SHOW WARNINGS", 1, false) require.NoError(t, err) assert.Empty(t, qr.Rows) } diff --git a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go deleted file mode 100644 index 3dc635c8870..00000000000 --- a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go +++ /dev/null @@ -1,451 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ghost - -import ( - "flag" - "fmt" - "os" - "path" - "strings" - "sync" - "testing" - "time" - - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/schema" - - "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/test/endtoend/onlineddl" - "vitess.io/vitess/go/test/endtoend/throttler" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - clusterInstance *cluster.LocalProcessCluster - shards []cluster.Shard - vtParams mysql.ConnParams - hostname = "localhost" - keyspaceName = "ks" - cell = "zone1" - schemaChangeDirectory = "" - totalTableCount = 4 - - normalMigrationWait = 20 * time.Second - - createTable = ` - CREATE TABLE %s ( - id bigint(20) NOT NULL, - msg varchar(64), - PRIMARY KEY (id) - ) ENGINE=InnoDB;` - insertStatements = []string{ - `insert into %s (id, msg) values (3, 'three')`, - `insert into %s (id, msg) values (5, 'five')`, - `insert into %s (id, msg) values (7, 'seven')`, - `insert into %s (id, msg) values (11, 'eleven')`, - `insert into %s (id, msg) values (13, 'thirteen')`, - } - // To verify non online-DDL behavior - alterTableNormalStatement = ` - ALTER TABLE %s - ADD COLUMN non_online int UNSIGNED NOT NULL` - // A trivial statement which must succeed and does not change the schema - alterTableTrivialStatement = ` - ALTER TABLE %s - ENGINE=InnoDB` - // The following statement is valid - alterTableSuccessfulStatement = ` - ALTER TABLE %s - MODIFY id bigint UNSIGNED NOT NULL, - ADD COLUMN ghost_col int NOT NULL, - ADD INDEX idx_msg(msg)` - // The following statement will fail because gh-ost requires some shared unique key - alterTableFailedStatement = ` - ALTER TABLE %s - DROP PRIMARY KEY, - DROP COLUMN ghost_col` - // We will run this query with "gh-ost --max-load=Threads_running=1" - alterTableThrottlingStatement = ` - ALTER TABLE %s - DROP COLUMN ghost_col` - onlineDDLCreateTableStatement = ` - CREATE TABLE %s ( - id bigint NOT NULL, - online_ddl_create_col INT NOT NULL, - PRIMARY KEY (id) - ) ENGINE=InnoDB;` - noPKCreateTableStatement = ` - CREATE TABLE %s ( - online_ddl_create_col INT NOT NULL - ) ENGINE=InnoDB;` - onlineDDLDropTableStatement = ` - DROP TABLE %s` - onlineDDLDropTableIfExistsStatement = ` - DROP TABLE IF EXISTS %s` - - vSchema = ` - { - "sharded": true, - "vindexes": { - "hash_index": { - "type": "hash" - } - }, - "tables": { - "vt_onlineddl_test_00": { - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - } - ] - }, - "vt_onlineddl_test_01": { - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - } - ] - }, - "vt_onlineddl_test_02": { - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - } - ] - }, - "vt_onlineddl_test_03": { - "column_vindexes": [ - { - "column": "id", - "name": "hash_index" - } - ] - } - } - } - ` -) - -func TestMain(m *testing.M) { - defer cluster.PanicHandler(nil) - flag.Parse() - - exitcode, err := func() (int, error) { - clusterInstance = cluster.NewCluster(cell, hostname) - schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) - defer os.RemoveAll(schemaChangeDirectory) - defer clusterInstance.Teardown() - - if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { - _ = os.Mkdir(schemaChangeDirectory, 0700) - } - - clusterInstance.VtctldExtraArgs = []string{ - "--schema_change_dir", schemaChangeDirectory, - "--schema_change_controller", "local", - "--schema_change_check_interval", "1s", - } - - clusterInstance.VtTabletExtraArgs = []string{ - "--heartbeat_interval", "250ms", - "--heartbeat_on_demand_duration", "5s", - "--migration_check_interval", "5s", - "--gh-ost-path", os.Getenv("VITESS_ENDTOEND_GH_OST_PATH"), // leave env variable empty/unset to get the default behavior. Override in Mac. - } - clusterInstance.VtGateExtraArgs = []string{ - "--ddl_strategy", "gh-ost", - } - - if err := clusterInstance.StartTopo(); err != nil { - return 1, err - } - - keyspace := &cluster.Keyspace{ - Name: keyspaceName, - VSchema: vSchema, - } - - if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { - return 1, err - } - - vtgateInstance := clusterInstance.NewVtgateInstance() - // Start vtgate - if err := vtgateInstance.Setup(); err != nil { - return 1, err - } - // ensure it is torn down during cluster TearDown - clusterInstance.VtgateProcess = *vtgateInstance - vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, - } - - return m.Run(), nil - }() - if err != nil { - fmt.Printf("%v\n", err) - os.Exit(1) - } else { - os.Exit(exitcode) - } - -} - -func TestSchemaChange(t *testing.T) { - defer cluster.PanicHandler(t) - shards = clusterInstance.Keyspaces[0].Shards - assert.Equal(t, 2, len(shards)) - - throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance, time.Second) - - testWithInitialSchema(t) - t.Run("create non_online", func(t *testing.T) { - _ = testOnlineDDLStatement(t, alterTableNormalStatement, string(schema.DDLStrategyDirect), "vtctl", "non_online", "") - }) - t.Run("successful online alter, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableSuccessfulStatement, "gh-ost", "vtgate", "ghost_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - - var totalRowsCopied uint64 - // count sum of rows copied in all shards, that should be the total number of rows inserted to the table - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - rowsCopied := row.AsUint64("rows_copied", 0) - totalRowsCopied += rowsCopied - } - require.Equal(t, uint64(len(insertStatements)), totalRowsCopied) - - // See that we're able to read logs after successful migration: - expectedMessage := "starting gh-ost" - logs := onlineddl.ReadMigrationLogs(t, &vtParams, uuid) - assert.Equal(t, len(shards), len(logs)) - for i := range logs { - require.Contains(t, logs[i], expectedMessage) - } - - }) - t.Run("successful online alter, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost", "vtctl", "ghost_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("successful online alter, postponed, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost -postpone-completion", "vtgate", "ghost_col", "") - // Should be still running! - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) - // Issue a complete and wait for successful completion - onlineddl.CheckCompleteMigration(t, &vtParams, shards, uuid, true) - // This part may take a while, because we depend on vreplicatoin polling - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("throttled migration", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusCancelled) - }) - t.Run("failed migration", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableFailedStatement, "gh-ost", "vtgate", "ghost_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) - // migration will fail again - }) - t.Run("cancel all migrations: nothing to cancel", func(t *testing.T) { - // no migrations pending at this time - time.Sleep(10 * time.Second) - onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) - }) - t.Run("cancel all migrations: some migrations to cancel", func(t *testing.T) { - // spawn n migrations; cancel them via cancel-all - var wg sync.WaitGroup - count := 4 - for i := 0; i < count; i++ { - wg.Add(1) - go func() { - defer wg.Done() - _ = testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost --max-load=Threads_running=1", "vtgate", "ghost_col", "") - }() - } - wg.Wait() - onlineddl.CheckCancelAllMigrations(t, &vtParams, len(shards)*count) - }) - t.Run("Online DROP, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "gh-ost", "vtctl", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("Online CREATE, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "gh-ost", "vtctl", "online_ddl_create_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("Online DROP TABLE IF EXISTS, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "gh-ost", "vtgate", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - // this table existed - checkTables(t, schema.OnlineDDLToGCUUID(uuid), 1) - }) - t.Run("Online DROP TABLE IF EXISTS for nonexistent table, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "gh-ost", "vtgate", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - // this table did not exist - checkTables(t, schema.OnlineDDLToGCUUID(uuid), 0) - }) - t.Run("Online DROP TABLE for nonexistent table, expect error, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "gh-ost", "vtgate", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) - }) - t.Run("Online CREATE no PK table, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, noPKCreateTableStatement, "gh-ost", "vtgate", "online_ddl_create_col", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("Fail ALTER for no PK table, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost", "vtgate", "", "") - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) - - expectedMessage := "No PRIMARY nor UNIQUE key found" - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - message := row["message"].ToString() - // the following message is generated by gh-ost. We test that it is captured in our 'message' column: - require.Contains(t, message, expectedMessage) - } - - // See that we're able to read logs after failed migration: - logs := onlineddl.ReadMigrationLogs(t, &vtParams, uuid) - assert.Equal(t, len(shards), len(logs)) - for i := range logs { - require.Contains(t, logs[i], expectedMessage) - } - }) -} - -func testWithInitialSchema(t *testing.T) { - // Create 4 tables and populate them - var sqlQuery = "" //nolint - for i := 0; i < totalTableCount; i++ { - tableName := fmt.Sprintf("vt_onlineddl_test_%02d", i) - sqlQuery = fmt.Sprintf(createTable, tableName) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) - require.Nil(t, err) - - for _, insert := range insertStatements { - insertQuery := fmt.Sprintf(insert, tableName) - r := onlineddl.VtgateExecQuery(t, &vtParams, insertQuery, "") - require.NotNil(t, r) - } - } - - // Check if 4 tables are created - checkTables(t, "", totalTableCount) -} - -// testOnlineDDLStatement runs an online DDL, ALTER statement -func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string, callerID string) (uuid string) { - tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) - sqlQuery := fmt.Sprintf(alterStatement, tableName) - if executeStrategy == "vtgate" { - row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, sqlQuery, "").Named().Row() - if row != nil { - uuid = row.AsString("uuid", "") - } - } else { - var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ddlStrategy, CallerID: callerID}) - assert.NoError(t, err) - } - uuid = strings.TrimSpace(uuid) - fmt.Println("# Generated UUID (for debug purposes):") - fmt.Printf("<%s>\n", uuid) - - strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) - assert.NoError(t, err) - - if !strategySetting.Strategy.IsDirect() { - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - } - - if expectHint != "" { - checkMigratedTable(t, tableName, expectHint) - } - return uuid -} - -// checkTables checks the number of tables in the first two shards. -func checkTables(t *testing.T, showTableName string, expectCount int) { - for i := range clusterInstance.Keyspaces[0].Shards { - checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) - } -} - -// checkTablesCount checks the number of tables in the given tablet -func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) { - query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) - queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) - require.Nil(t, err) - assert.Equal(t, expectCount, len(queryResult.Rows)) -} - -// checkMigratedTables checks the CREATE STATEMENT of a table after migration -func checkMigratedTable(t *testing.T, tableName, expectColumn string) { - for i := range clusterInstance.Keyspaces[0].Shards { - createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) - assert.Contains(t, createStatement, expectColumn) - } -} - -// getCreateTableStatement returns the CREATE TABLE statement for a given table -func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { - queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) - require.Nil(t, err) - - assert.Equal(t, len(queryResult.Rows), 1) - assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement - statement = queryResult.Rows[0][1].ToString() - return statement -} diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index 41cd5b5a1be..0efed92f440 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -20,7 +20,7 @@ import ( "context" "flag" "fmt" - "math/rand" + "math/rand/v2" "os" "path" "strings" @@ -30,6 +30,7 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" @@ -157,14 +158,6 @@ func TestMain(m *testing.M) { "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", "--watch_replication_stream", - // The next flags are deprecated, and we incldue them to verify that they are nonetheless still allowed. - // The values are irrelevant. Just the fact that the flags are allowed in what's important. - // These should be included in v18, and removed in v19. - "--throttle_threshold", "1m", - "--throttle_metrics_query", "select 1 from dual", - "--throttle_metrics_threshold", "1.5", - "--throttle_check_as_check_self=false", - "--throttler-config-via-topo=true", } clusterInstance.VtGateExtraArgs = []string{ "--ddl_strategy", "online", @@ -223,8 +216,10 @@ func testRevertible(t *testing.T) { fkOnlineDDLPossible := false t.Run("check 'rename_table_preserve_foreign_key' variable", func(t *testing.T) { // Online DDL is not possible on vanilla MySQL 8.0 for reasons described in https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/. - // However, Online DDL is made possible in via these changes: https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced - // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps1. + // However, Online DDL is made possible in via these changes: + // - https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced + // - https://github.com/planetscale/mysql-server/commit/c2f1344a6863518d749f2eb01a4c74ca08a5b889 + // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps3. // Said changes introduce a new global/session boolean variable named 'rename_table_preserve_foreign_key'. It defaults 'false'/0 for backwards compatibility. // When enabled, a `RENAME TABLE` to a FK parent "pins" the children's foreign keys to the table name rather than the table pointer. Which means after the RENAME, // the children will point to the newly instated table rather than the original, renamed table. @@ -362,8 +357,8 @@ func testRevertible(t *testing.T) { }, { name: "expanded: enum", - fromSchema: `id int primary key, e1 enum('a', 'b'), e2 enum('a', 'b'), e3 enum('a', 'b'), e4 enum('a', 'b'), e5 enum('a', 'b'), e6 enum('a', 'b'), e7 enum('a', 'b'), e8 enum('a', 'b')`, - toSchema: `id int primary key, e1 enum('a', 'b'), e2 enum('a'), e3 enum('a', 'b', 'c'), e4 enum('a', 'x'), e5 enum('a', 'x', 'b'), e6 enum('b'), e7 varchar(1), e8 tinyint`, + fromSchema: `id int primary key, e1 enum('a', 'b'), e2 enum('a', 'b'), e3 enum('a', 'b'), e4 enum('a', 'b'), e5 enum('a', 'b'), e6 enum('a', 'b'), e7 enum('a', 'b'), e8 enum('a', 'b')`, + toSchema: `id int primary key, e1 enum('a', 'b'), e2 enum('a'), e3 enum('a', 'b', 'c'), e4 enum('a', 'x'), e5 enum('a', 'x', 'b'), e6 enum('b'), e7 varchar(1), e8 tinyint`, expandedColumnNames: `e3,e4,e5,e6,e7,e8`, }, { @@ -428,7 +423,20 @@ func testRevertible(t *testing.T) { droppedNoDefaultColumnNames := row.AsString("dropped_no_default_column_names", "") expandedColumnNames := row.AsString("expanded_column_names", "") - assert.Equal(t, testcase.removedForeignKeyNames, removeBackticks(removedForeignKeyNames)) + // Online DDL renames constraint names, and keeps the original name as a prefix. + // The name of e.g. "some_fk_2_" might turn into "some_fk_2_518ubnm034rel35l1m0u1dc7m" + expectRemovedForeignKeyNames := strings.Split(testcase.removedForeignKeyNames, ",") + actualRemovedForeignKeyNames := strings.Split(removeBackticks(removedForeignKeyNames), ",") + assert.Equal(t, len(expectRemovedForeignKeyNames), len(actualRemovedForeignKeyNames)) + for _, actualRemovedForeignKeyName := range actualRemovedForeignKeyNames { + found := false + for _, expectRemovedForeignKeyName := range expectRemovedForeignKeyNames { + if strings.HasPrefix(actualRemovedForeignKeyName, expectRemovedForeignKeyName) { + found = true + } + } + assert.Truef(t, found, "unexpected FK name", "%s", actualRemovedForeignKeyName) + } assert.Equal(t, testcase.removedUniqueKeyNames, removeBackticks(removedUniqueKeyNames)) assert.Equal(t, testcase.droppedNoDefaultColumnNames, removeBackticks(droppedNoDefaultColumnNames)) assert.Equal(t, testcase.expandedColumnNames, removeBackticks(expandedColumnNames)) @@ -466,7 +474,8 @@ func testRevertible(t *testing.T) { droppedNoDefaultColumnNames := row.AsString("dropped_no_default_column_names", "") expandedColumnNames := row.AsString("expanded_column_names", "") - assert.Equal(t, "some_fk_2", removeBackticks(removedForeignKeyNames)) + // Online DDL renames constraint names, and keeps the original name as a prefix. The name will be e.g. some_fk_2_518ubnm034rel35l1m0u1dc7m + assert.Contains(t, removeBackticks(removedForeignKeyNames), "some_fk_2") assert.Equal(t, "", removeBackticks(removedUniqueKeyNames)) assert.Equal(t, "", removeBackticks(droppedNoDefaultColumnNames)) assert.Equal(t, "", removeBackticks(expandedColumnNames)) @@ -551,7 +560,7 @@ func testRevert(t *testing.T) { mysqlVersion = onlineddl.GetMySQLVersion(t, clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()) require.NotEmpty(t, mysqlVersion) - _, capableOf, _ := mysql.GetFlavor(mysqlVersion, nil) + capableOf := mysql.ServerVersionCapableOf(mysqlVersion) var uuids []string ddlStrategy := "online" @@ -1033,7 +1042,7 @@ func testRevert(t *testing.T) { require.NotNil(t, row) specialPlan := row.AsString("special_plan", "") artifacts := row.AsString("artifacts", "") - instantDDLCapable, err := capableOf(mysql.InstantDDLFlavorCapability) + instantDDLCapable, err := capableOf(capabilities.InstantDDLFlavorCapability) assert.NoError(t, err) if instantDDLCapable { // instant DDL expected to apply in 8.0 @@ -1050,7 +1059,7 @@ func testRevert(t *testing.T) { t.Run("INSTANT DDL: fail revert", func(t *testing.T) { uuid := testRevertMigration(t, uuids[len(uuids)-1], ddlStrategy) uuids = append(uuids, uuid) - instantDDLCapable, err := capableOf(mysql.InstantDDLFlavorCapability) + instantDDLCapable, err := capableOf(capabilities.InstantDDLFlavorCapability) assert.NoError(t, err) if instantDDLCapable { // instant DDL expected to apply in 8.0, therefore revert is impossible @@ -1133,7 +1142,7 @@ func testRevert(t *testing.T) { checkPartitionedTableCountRows(t, 6) }) t.Run("partitions: drop first partition", func(t *testing.T) { - uuid := testOnlineDDLStatementForTable(t, "alter table part_test drop partition `p1`", ddlStrategy+" --fast-range-rotation", "vtgate", "") + uuid := testOnlineDDLStatementForTable(t, "alter table part_test drop partition `p1`", ddlStrategy, "vtgate", "") uuids = append(uuids, uuid) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) checkTable(t, partitionedTableName, true) @@ -1148,7 +1157,7 @@ func testRevert(t *testing.T) { checkPartitionedTableCountRows(t, 5) }) t.Run("partitions: add new partition", func(t *testing.T) { - uuid := testOnlineDDLStatementForTable(t, "alter table part_test add partition (PARTITION p7 VALUES LESS THAN (70))", ddlStrategy+" --fast-range-rotation", "vtgate", "") + uuid := testOnlineDDLStatementForTable(t, "alter table part_test add partition (PARTITION p7 VALUES LESS THAN (70))", ddlStrategy, "vtgate", "") uuids = append(uuids, uuid) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) checkTable(t, partitionedTableName, true) @@ -1185,7 +1194,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) @@ -1273,7 +1282,7 @@ func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName s } func generateInsert(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(insertRowStatement, id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -1297,7 +1306,7 @@ func generateInsert(t *testing.T, conn *mysql.Conn) error { } func generateUpdate(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(updateRowStatement, id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -1321,7 +1330,7 @@ func generateUpdate(t *testing.T, conn *mysql.Conn) error { } func generateDelete(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(deleteRowStatement, id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -1360,7 +1369,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { log.Infof("Terminating single connection") return } - switch rand.Int31n(3) { + switch rand.Int32N(3) { case 0: err = generateInsert(t, conn) case 1: diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index fbe6377c1fe..4362069af66 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -22,7 +22,7 @@ import ( "flag" "fmt" "io" - "math/rand" + "math/rand/v2" "os" "path" "strings" @@ -31,6 +31,7 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" @@ -81,7 +82,7 @@ var ( keyspaceName = "ks" cell = "zone1" schemaChangeDirectory = "" - overrideVtctlParams *cluster.VtctlClientParams + overrideVtctlParams *cluster.ApplySchemaParams ) type WriteMetrics struct { @@ -127,7 +128,8 @@ deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, func parseTableName(t *testing.T, sql string) (tableName string) { // ddlStatement could possibly be composed of multiple DDL statements - tokenizer := sqlparser.NewStringTokenizer(sql) + parser := sqlparser.NewTestParser() + tokenizer := parser.NewStringTokenizer(sql) for { stmt, err := sqlparser.ParseNextStrictDDL(tokenizer) if err != nil && errors.Is(err, io.EOF) { @@ -200,6 +202,29 @@ func waitForReadyToComplete(t *testing.T, uuid string, expected bool) { } } +func waitForMessage(t *testing.T, uuid string, messageSubstring string) { + ctx, cancel := context.WithTimeout(context.Background(), normalWaitTime) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + message := row.AsString("message", "") + if strings.Contains(message, messageSubstring) { + return + } + } + select { + case <-ticker.C: + case <-ctx.Done(): + } + require.NoError(t, ctx.Err()) + } +} + func TestMain(m *testing.M) { defer cluster.PanicHandler(nil) flag.Parse() @@ -223,6 +248,7 @@ func TestMain(m *testing.M) { clusterInstance.VtTabletExtraArgs = []string{ "--heartbeat_interval", "250ms", "--heartbeat_on_demand_duration", "5s", + "--migration_check_interval", "5s", "--watch_replication_stream", } clusterInstance.VtGateExtraArgs = []string{} @@ -310,7 +336,7 @@ func testScheduler(t *testing.T) { mysqlVersion := onlineddl.GetMySQLVersion(t, clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()) require.NotEmpty(t, mysqlVersion) - _, capableOf, _ := mysql.GetFlavor(mysqlVersion, nil) + capableOf := mysql.ServerVersionCapableOf(mysqlVersion) var ( t1uuid string @@ -366,6 +392,9 @@ func testScheduler(t *testing.T) { alterNonexistent = ` ALTER TABLE nonexistent FORCE ` + populateT1Statement = ` + insert into t1_test values (1, 'new_row') + ` ) testReadTimestamp := func(t *testing.T, uuid string, timestampColumn string) (timestamp string) { @@ -490,6 +519,117 @@ func testScheduler(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) }) }) + + t.Run("Postpone completion ALTER", func(t *testing.T) { + t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-completion", "vtgate", "", "", true)) // skip wait + + t.Run("wait for t1 running", func(t *testing.T) { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + }) + t.Run("check postpone_completion", func(t *testing.T) { + rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + postponeCompletion := row.AsInt64("postpone_completion", 0) + assert.Equal(t, int64(1), postponeCompletion) + } + }) + t.Run("complete", func(t *testing.T) { + onlineddl.CheckCompleteMigration(t, &vtParams, shards, t1uuid, true) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) + }) + t.Run("check no postpone_completion", func(t *testing.T) { + rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + postponeCompletion := row.AsInt64("postpone_completion", 0) + assert.Equal(t, int64(0), postponeCompletion) + } + }) + }) + + forceCutoverCapable, err := capableOf(capabilities.PerformanceSchemaDataLocksTableCapability) // 8.0 + require.NoError(t, err) + if forceCutoverCapable { + t.Run("force_cutover", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), extendedWaitTime*2) + defer cancel() + + t.Run("populate t1_test", func(t *testing.T) { + onlineddl.VtgateExecQuery(t, &vtParams, populateT1Statement, "") + }) + t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-completion", "vtgate", "", "", true)) // skip wait + + t.Run("wait for t1 running", func(t *testing.T) { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + }) + t.Run("wait for t1 ready to complete", func(t *testing.T) { + // Waiting for 'running', above, is not enough. We want to let vreplication a chance to start running, or else + // we attempt the cut-over too early. Specifically in this test, we're going to lock rows FOR UPDATE, which, + // if vreplication does not get the chance to start, will prevent it from doing anything at all. + // ready_to_complete is a great signal for us that vreplication is healthy and up to date. + waitForReadyToComplete(t, t1uuid, true) + }) + + commitTransactionChan := make(chan any) + transactionErrorChan := make(chan error) + t.Run("locking table rows", func(t *testing.T) { + go runInTransaction(t, ctx, shards[0].Vttablets[0], "select * from t1_test for update", commitTransactionChan, transactionErrorChan) + }) + t.Run("check no force_cutover", func(t *testing.T) { + rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + forceCutOver := row.AsInt64("force_cutover", 0) + assert.Equal(t, int64(0), forceCutOver) // disabled + } + }) + t.Run("attempt to complete", func(t *testing.T) { + onlineddl.CheckCompleteMigration(t, &vtParams, shards, t1uuid, true) + }) + t.Run("cut-over fail due to timeout", func(t *testing.T) { + waitForMessage(t, t1uuid, "(errno 3024) (sqlstate HY000): Query execution was interrupted, maximum statement execution time exceeded") + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusRunning) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusRunning) + }) + t.Run("force_cutover", func(t *testing.T) { + onlineddl.CheckForceMigrationCutOver(t, &vtParams, shards, t1uuid, true) + }) + t.Run("check force_cutover", func(t *testing.T) { + rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + forceCutOver := row.AsInt64("force_cutover", 0) + assert.Equal(t, int64(1), forceCutOver) // enabled + } + }) + t.Run("expect completion", func(t *testing.T) { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) + }) + t.Run("expect transaction failure", func(t *testing.T) { + select { + case commitTransactionChan <- true: // good + case <-ctx.Done(): + assert.Fail(t, ctx.Err().Error()) + } + // Transaction will now attempt to commit. But we expect our "force_cutover" to have terminated + // the transaction's connection. + select { + case err := <-transactionErrorChan: + assert.ErrorContains(t, err, "broken pipe") + case <-ctx.Done(): + assert.Fail(t, ctx.Err().Error()) + } + }) + }) + } t.Run("ALTER both tables non-concurrent", func(t *testing.T) { t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtgate", "", "", true)) // skip wait t2uuid = testOnlineDDLStatement(t, createParams(trivialAlterT2Statement, ddlStrategy, "vtgate", "", "", true)) // skip wait @@ -818,7 +958,7 @@ func testScheduler(t *testing.T) { t.Run("Idempotent submission, retry failed migration", func(t *testing.T) { uuid := "00000000_1111_2222_3333_444444444444" - overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-2222-3333"} + overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: uuid, MigrationContext: "idempotent:1111-2222-3333"} defer func() { overrideVtctlParams = nil }() // create a migration and cancel it. We don't let it complete. We want it in "failed" state t.Run("start and fail migration", func(t *testing.T) { @@ -854,7 +994,7 @@ func testScheduler(t *testing.T) { t.Run("Idempotent submission, retry failed migration in singleton context", func(t *testing.T) { uuid := "00000000_1111_3333_3333_444444444444" ddlStrategy := ddlStrategy + " --singleton-context" - overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: uuid, MigrationContext: "idempotent:1111-3333-3333"} + overrideVtctlParams = &cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: uuid, MigrationContext: "idempotent:1111-3333-3333"} defer func() { overrideVtctlParams = nil }() // create a migration and cancel it. We don't let it complete. We want it in "failed" state t.Run("start and fail migration", func(t *testing.T) { @@ -894,6 +1034,9 @@ func testScheduler(t *testing.T) { t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-completion --retain-artifacts=1s", "vtctl", "", "", true)) // skip wait onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) }) + t.Run("wait for ready_to_complete", func(t *testing.T) { + waitForReadyToComplete(t, t1uuid, true) + }) var artifacts []string t.Run("validate artifact exists", func(t *testing.T) { rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) @@ -941,8 +1084,27 @@ func testScheduler(t *testing.T) { }) }) + checkConstraintCapable, err := capableOf(capabilities.CheckConstraintsCapability) // 8.0.16 and above + require.NoError(t, err) + if checkConstraintCapable { + // Constraints + t.Run("CREATE TABLE with CHECK constraint", func(t *testing.T) { + query := `create table with_constraint (id int primary key, check ((id >= 0)))` + uuid := testOnlineDDLStatement(t, createParams(query, ddlStrategy, "vtgate", "chk_", "", false)) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + t.Run("ensure constraint name is rewritten", func(t *testing.T) { + // Since we did not provide a name for the CHECK constraint, MySQL will + // name it `with_constraint_chk_1`. But we expect Online DDL to explicitly + // modify the constraint name, specifically to get rid of the prefix, + // so that we don't get into https://bugs.mysql.com/bug.php?id=107772 situation. + createStatement := getCreateTableStatement(t, shards[0].Vttablets[0], "with_constraint") + assert.NotContains(t, createStatement, "with_constraint_chk") + }) + }) + } + // INSTANT DDL - instantDDLCapable, err := capableOf(mysql.InstantAddLastColumnFlavorCapability) + instantDDLCapable, err := capableOf(capabilities.InstantAddLastColumnFlavorCapability) require.NoError(t, err) if instantDDLCapable { t.Run("INSTANT DDL: postpone-completion", func(t *testing.T) { @@ -1149,7 +1311,6 @@ func testSingleton(t *testing.T) { key updates_idx(updates) ) ENGINE=InnoDB ` - // We will run this query with "gh-ost --max-load=Threads_running=1" alterTableThrottlingStatement = ` ALTER TABLE stress_test DROP COLUMN created_timestamp ` @@ -1205,38 +1366,38 @@ DROP TABLE IF EXISTS stress_test checkTable(t, tableName, true) }) - var throttledUUID string - t.Run("throttled migration", func(t *testing.T) { - throttledUUID = testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtgate", "", "hint_col", "", false)) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning) + var openEndedUUID string + t.Run("open ended migration", func(t *testing.T) { + openEndedUUID = testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "vitess --singleton --postpone-completion", "vtgate", "", "hint_col", "", false)) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, openEndedUUID, schema.OnlineDDLStatusRunning) }) t.Run("failed singleton migration, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtgate", "", "hint_col", "rejected", true)) + uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "vitess --singleton --postpone-completion", "vtgate", "", "hint_col", "rejected", true)) assert.Empty(t, uuid) }) t.Run("failed singleton migration, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "gh-ost --singleton --max-load=Threads_running=1", "vtctl", "", "hint_col", "rejected", true)) + uuid := testOnlineDDLStatement(t, createParams(alterTableThrottlingStatement, "vitess --singleton --postpone-completion", "vtctl", "", "hint_col", "rejected", true)) assert.Empty(t, uuid) }) t.Run("failed revert migration", func(t *testing.T) { - uuid := testRevertMigration(t, createRevertParams(throttledUUID, onlineSingletonDDLStrategy, "vtgate", "", "rejected", true)) + uuid := testRevertMigration(t, createRevertParams(openEndedUUID, onlineSingletonDDLStrategy, "vtgate", "", "rejected", true)) assert.Empty(t, uuid) }) t.Run("terminate throttled migration", func(t *testing.T) { - onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning) - onlineddl.CheckCancelMigration(t, &vtParams, shards, throttledUUID, true) - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, throttledUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, openEndedUUID, schema.OnlineDDLStatusRunning) + onlineddl.CheckCancelMigration(t, &vtParams, shards, openEndedUUID, true) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, openEndedUUID, 20*time.Second, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusCancelled) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, openEndedUUID, schema.OnlineDDLStatusCancelled) }) - t.Run("successful gh-ost alter, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "gh-ost --singleton", "vtctl", "", "hint_col", "", false)) + t.Run("successful alter, vtctl", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "vitess --singleton", "vtctl", "", "hint_col", "", false)) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) }) - t.Run("successful gh-ost alter, vtgate", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "gh-ost --singleton", "vtgate", "", "hint_col", "", false)) + t.Run("successful alter, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, createParams(alterTableTrivialStatement, "vitess --singleton", "vtgate", "", "hint_col", "", false)) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) @@ -1260,8 +1421,8 @@ DROP TABLE IF EXISTS stress_test var throttledUUIDs []string // singleton-context - t.Run("throttled migrations, singleton-context", func(t *testing.T) { - uuidList := testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "gh-ost --singleton-context --max-load=Threads_running=1", "vtctl", "", "hint_col", "", false)) + t.Run("postponed migrations, singleton-context", func(t *testing.T) { + uuidList := testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "vitess --singleton-context --postpone-completion", "vtctl", "", "hint_col", "", false)) throttledUUIDs = strings.Split(uuidList, "\n") assert.Equal(t, 3, len(throttledUUIDs)) for _, uuid := range throttledUUIDs { @@ -1269,7 +1430,7 @@ DROP TABLE IF EXISTS stress_test } }) t.Run("failed migrations, singleton-context", func(t *testing.T) { - _ = testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "gh-ost --singleton-context --max-load=Threads_running=1", "vtctl", "", "hint_col", "rejected", false)) + _ = testOnlineDDLStatement(t, createParams(multiAlterTableThrottlingStatement, "vitess --singleton-context --postpone-completion", "vtctl", "", "hint_col", "rejected", false)) }) t.Run("terminate throttled migrations", func(t *testing.T) { for _, uuid := range throttledUUIDs { @@ -1293,7 +1454,7 @@ DROP TABLE IF EXISTS stress_test } }) - //DROP + // DROP t.Run("online DROP TABLE", func(t *testing.T) { uuid := testOnlineDDLStatement(t, createParams(dropStatement, onlineSingletonDDLStrategy, "vtgate", "", "", "", false)) @@ -1462,7 +1623,7 @@ func testDeclarative(t *testing.T) { var uuids []string generateInsert := func(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(insertRowStatement, id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -1486,7 +1647,7 @@ func testDeclarative(t *testing.T) { } generateUpdate := func(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(updateRowStatement, id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -1510,7 +1671,7 @@ func testDeclarative(t *testing.T) { } generateDelete := func(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(deleteRowStatement, id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -2061,6 +2222,7 @@ func testForeignKeys(t *testing.T) { sql string allowForeignKeys bool expectHint string + expectCountUUIDs int onlyIfFKOnlineDDLPossible bool } var testCases = []testCase{ @@ -2128,18 +2290,30 @@ func testForeignKeys(t *testing.T) { }, { name: "drop foreign key from a child", - sql: "alter table child_table DROP FOREIGN KEY child_parent_fk", + sql: "alter table child_table DROP FOREIGN KEY ", // See "getting child_table constraint name" test step below. allowForeignKeys: true, expectHint: "child_hint", onlyIfFKOnlineDDLPossible: true, }, + { + name: "add two tables with cyclic fk relationship", + sql: ` + create table t11 (id int primary key, i int, constraint f11 foreign key (i) references t12 (id)); + create table t12 (id int primary key, i int, constraint f12 foreign key (i) references t11 (id)); + `, + allowForeignKeys: true, + expectCountUUIDs: 2, + expectHint: "t11", + }, } fkOnlineDDLPossible := false t.Run("check 'rename_table_preserve_foreign_key' variable", func(t *testing.T) { // Online DDL is not possible on vanilla MySQL 8.0 for reasons described in https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/. - // However, Online DDL is made possible in via these changes: https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced - // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps1. + // However, Online DDL is made possible in via these changes: + // - https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced + // - https://github.com/planetscale/mysql-server/commit/c2f1344a6863518d749f2eb01a4c74ca08a5b889 + // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps3. // Said changes introduce a new global/session boolean variable named 'rename_table_preserve_foreign_key'. It defaults 'false'/0 for backwards compatibility. // When enabled, a `RENAME TABLE` to a FK parent "pins" the children's foreign keys to the table name rather than the table pointer. Which means after the RENAME, // the children will point to the newly instated table rather than the original, renamed table. @@ -2173,6 +2347,9 @@ func testForeignKeys(t *testing.T) { return testOnlineDDLStatement(t, createParams(sql, ddlStrategy, "vtctl", expectHint, errorHint, false)) } for _, testcase := range testCases { + if testcase.expectCountUUIDs == 0 { + testcase.expectCountUUIDs = 1 + } t.Run(testcase.name, func(t *testing.T) { if testcase.onlyIfFKOnlineDDLPossible && !fkOnlineDDLPossible { t.Skipf("skipped because backing database does not support 'rename_table_preserve_foreign_key'") @@ -2193,10 +2370,26 @@ func testForeignKeys(t *testing.T) { }) } }) + t.Run("getting child_table constraint name", func(t *testing.T) { + // Due to how OnlineDDL works, the name of the foreign key constraint will not be the one we used in the CREATE TABLE statement. + // There's a specific test where we drop said constraint. So speficially for that test (or any similar future tests), we need to dynamically + // evaluate the constraint name. + rs := onlineddl.VtgateExecQuery(t, &vtParams, "select CONSTRAINT_NAME from information_schema.REFERENTIAL_CONSTRAINTS where TABLE_NAME='child_table'", "") + assert.Equal(t, 1, len(rs.Rows)) + row := rs.Named().Row() + assert.NotNil(t, row) + childTableConstraintName := row.AsString("CONSTRAINT_NAME", "") + assert.NotEmpty(t, childTableConstraintName) + testcase.sql = strings.ReplaceAll(testcase.sql, "", childTableConstraintName) + }) + var uuid string t.Run("run migration", func(t *testing.T) { if testcase.allowForeignKeys { - uuid = testStatement(t, testcase.sql, ddlStrategyAllowFK, testcase.expectHint, false) + output := testStatement(t, testcase.sql, ddlStrategyAllowFK, testcase.expectHint, false) + uuids := strings.Split(output, "\n") + assert.Equal(t, testcase.expectCountUUIDs, len(uuids)) + uuid = uuids[0] // in case of multiple statements, we only check the first onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) } else { uuid = testStatement(t, testcase.sql, ddlStrategy, "", true) @@ -2216,7 +2409,7 @@ func testForeignKeys(t *testing.T) { artifacts = textutil.SplitDelimitedList(row.AsString("artifacts", "")) } - artifacts = append(artifacts, "child_table", "child_nofk_table", "parent_table") + artifacts = append(artifacts, "child_table", "child_nofk_table", "parent_table", "t11", "t12") // brute force drop all tables. In MySQL 8.0 you can do a single `DROP TABLE ... ` // which auto-resovled order. But in 5.7 you can't. droppedTables := map[string]bool{} @@ -2226,7 +2419,7 @@ func testForeignKeys(t *testing.T) { continue } statement := fmt.Sprintf("DROP TABLE IF EXISTS %s", artifact) - _, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, statement, cluster.VtctlClientParams{DDLStrategy: "direct"}) + _, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, statement, cluster.ApplySchemaParams{DDLStrategy: "direct --unsafe-allow-foreign-keys"}) if err == nil { droppedTables[artifact] = true } @@ -2258,11 +2451,11 @@ func testOnlineDDLStatement(t *testing.T, params *testOnlineDDLStatementParams) } } } else { - vtctlParams := &cluster.VtctlClientParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext} + vtctlParams := &cluster.ApplySchemaParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext} if overrideVtctlParams != nil { vtctlParams = overrideVtctlParams } - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, params.ddlStatement, *vtctlParams) + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, params.ddlStatement, *vtctlParams) switch params.expectError { case anyErrorIndicator: if err != nil { @@ -2307,7 +2500,7 @@ func testRevertMigration(t *testing.T, params *testRevertMigrationParams) (uuid } } } else { - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.VtctlClientParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext}) + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.ApplySchemaParams{DDLStrategy: params.ddlStrategy, MigrationContext: params.migrationContext}) if params.expectError == "" { assert.NoError(t, err) uuid = output @@ -2328,7 +2521,7 @@ func testRevertMigration(t *testing.T, params *testRevertMigrationParams) (uuid return uuid } -// checkTable checks the number of tables in the first two shards. +// checkTable checks the number of tables in all shards func checkTable(t *testing.T, showTableName string, expectExists bool) bool { expectCount := 0 if expectExists { @@ -2347,7 +2540,7 @@ func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName stri query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) require.Nil(t, err) - return assert.Equal(t, expectCount, len(queryResult.Rows)) + return assert.Equalf(t, expectCount, len(queryResult.Rows), "checkTablesCount cannot find table like '%%%s%%'", showTableName) } // checkMigratedTables checks the CREATE STATEMENT of a table after migration @@ -2368,3 +2561,31 @@ func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName s statement = queryResult.Rows[0][1].ToString() return statement } + +func runInTransaction(t *testing.T, ctx context.Context, tablet *cluster.Vttablet, query string, commitTransactionChan chan any, transactionErrorChan chan error) error { + conn, err := tablet.VttabletProcess.TabletConn(keyspaceName, true) + require.NoError(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("begin", 0, false) + require.NoError(t, err) + + _, err = conn.ExecuteFetch(query, 10000, false) + require.NoError(t, err) + + if commitTransactionChan != nil { + // Wait for instruction to commit + select { + case <-commitTransactionChan: + // good + case <-ctx.Done(): + assert.Fail(t, ctx.Err().Error()) + } + } + + _, err = conn.ExecuteFetch("commit", 0, false) + if transactionErrorChan != nil { + transactionErrorChan <- err + } + return err +} diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 49e72eda290..e5df3051612 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -458,7 +458,7 @@ func TestSchemaChange(t *testing.T) { time.Sleep(10 * time.Second) onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) // Validate that invoking CANCEL ALL via vtctl works - onlineddl.CheckCancelAllMigrationsViaVtctl(t, &clusterInstance.VtctlclientProcess, keyspaceName) + onlineddl.CheckCancelAllMigrationsViaVtctld(t, &clusterInstance.VtctldClientProcess, keyspaceName) }) t.Run("cancel all migrations: some migrations to cancel", func(t *testing.T) { // Use VTGate for throttling, issue a `ALTER VITESS_MIGRATION THROTTLE ALL ...` @@ -497,7 +497,7 @@ func TestSchemaChange(t *testing.T) { } wg.Wait() // cancelling via vtctl does not return values. We CANCEL ALL via vtctl, then validate via VTGate that nothing remains to be cancelled. - onlineddl.CheckCancelAllMigrationsViaVtctl(t, &clusterInstance.VtctlclientProcess, keyspaceName) + onlineddl.CheckCancelAllMigrationsViaVtctld(t, &clusterInstance.VtctldClientProcess, keyspaceName) onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) }) @@ -555,7 +555,7 @@ func TestSchemaChange(t *testing.T) { }) t.Run("PRS shard -80", func(t *testing.T) { // migration has started and is throttled. We now run PRS - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", keyspaceName+"/-80", "--new-primary", reparentTablet.Alias) require.NoError(t, err, "failed PRS: %v", err) rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "") onlineddl.PrintQueryResult(os.Stdout, rs) @@ -650,7 +650,7 @@ func TestSchemaChange(t *testing.T) { }) t.Run("PRS shard -80", func(t *testing.T) { // migration has started and completion is postponed. We now PRS - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", keyspaceName+"/-80", "--new-primary", reparentTablet.Alias) require.NoError(t, err, "failed PRS: %v", err) rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "") onlineddl.PrintQueryResult(os.Stdout, rs) @@ -905,7 +905,7 @@ func testWithInitialSchema(t *testing.T) { var sqlQuery = "" //nolint for i := 0; i < totalTableCount; i++ { sqlQuery = fmt.Sprintf(createTable, fmt.Sprintf("vt_onlineddl_test_%02d", i)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) } @@ -923,8 +923,8 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str uuid = row.AsString("uuid", "") } } else { - params := cluster.VtctlClientParams{DDLStrategy: ddlStrategy, UUIDList: providedUUIDList, MigrationContext: providedMigrationContext} - output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, params) + params := cluster.ApplySchemaParams{DDLStrategy: ddlStrategy, UUIDs: providedUUIDList, MigrationContext: providedMigrationContext} + output, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, params) if expectError == "" { assert.NoError(t, err) uuid = output diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 7f560a24f9e..770f7f3ee93 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -20,7 +20,7 @@ import ( "context" "flag" "fmt" - "math/rand" + "math/rand/v2" "os" "path" "runtime" @@ -29,16 +29,16 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/schema" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" "vitess.io/vitess/go/test/endtoend/throttler" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/vttablet" ) type WriteMetrics struct { @@ -184,6 +184,9 @@ func TestMain(m *testing.M) { "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", "--watch_replication_stream", + // Test VPlayer batching mode. + fmt.Sprintf("--vreplication_experimental_flags=%d", + vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching), } clusterInstance.VtGateExtraArgs = []string{ "--ddl_strategy", "online", @@ -326,11 +329,11 @@ func TestSchemaChange(t *testing.T) { func testWithInitialSchema(t *testing.T) { for _, statement := range cleanupStatements { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, statement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, statement) require.Nil(t, err) } // Create the stress table - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, createStatement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, createStatement) require.Nil(t, err) // Check if table is created @@ -346,7 +349,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) @@ -425,7 +428,7 @@ func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName s } func generateInsert(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(insertRowStatement, id, nextOpOrder()) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -449,7 +452,7 @@ func generateInsert(t *testing.T, conn *mysql.Conn) error { } func generateUpdate(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(updateRowStatement, nextOpOrder(), id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -473,7 +476,7 @@ func generateUpdate(t *testing.T, conn *mysql.Conn) error { } func generateDelete(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(deleteRowStatement, id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -511,7 +514,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, sleepInterval time.D defer ticker.Stop() for { - switch rand.Int31n(3) { + switch rand.Int32N(3) { case 0: err = generateInsert(t, conn) case 1: diff --git a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go index bac59241cf2..a3fa676d40b 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress_suite/onlineddl_vrepl_stress_suite_test.go @@ -31,7 +31,7 @@ import ( "context" "flag" "fmt" - "math/rand" + "math/rand/v2" "os" "path" "strings" @@ -40,18 +40,18 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/timer" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" "vitess.io/vitess/go/test/endtoend/throttler" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/timer" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/vttablet" ) type testcase struct { @@ -436,6 +436,9 @@ func TestMain(m *testing.M) { "--migration_check_interval", "5s", "--vstream_packet_size", "4096", // Keep this value small and below 10k to ensure multilple vstream iterations "--watch_replication_stream", + // Test VPlayer batching mode. + fmt.Sprintf("--vreplication_experimental_flags=%d", + vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching), } clusterInstance.VtGateExtraArgs = []string{ "--ddl_strategy", "online", @@ -512,7 +515,7 @@ func TestSchemaChange(t *testing.T) { t.Run("migrate", func(t *testing.T) { require.NotEmpty(t, testcase.alterStatement) - hintText := fmt.Sprintf("hint-after-alter-%d", rand.Int31n(int32(maxTableRows))) + hintText := fmt.Sprintf("hint-after-alter-%d", rand.Int32N(int32(maxTableRows))) hintStatement := fmt.Sprintf(alterHintStatement, hintText) fullStatement := fmt.Sprintf("%s, %s", hintStatement, testcase.alterStatement) @@ -550,10 +553,10 @@ func TestSchemaChange(t *testing.T) { func testWithInitialSchema(t *testing.T) { // Create the stress table for _, statement := range cleanupStatements { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, statement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, statement) require.Nil(t, err) } - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, createStatement) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, createStatement) require.Nil(t, err) // Check if table is created @@ -569,7 +572,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) @@ -641,10 +644,10 @@ func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName s } func generateInsert(t *testing.T, conn *mysql.Conn, autoIncInsert bool) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(insertRowStatement, id, -id, id, id, nextOpOrder()) if autoIncInsert { - id = rand.Int31() + id = rand.Int32() query = fmt.Sprintf(insertRowAutoIncStatement, -id, id, id, nextOpOrder()) } qr, err := conn.ExecuteFetch(query, 1000, true) @@ -655,7 +658,7 @@ func generateInsert(t *testing.T, conn *mysql.Conn, autoIncInsert bool) error { } func generateUpdate(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(updateRowStatement, nextOpOrder(), id) qr, err := conn.ExecuteFetch(query, 1000, true) if err == nil && qr != nil { @@ -665,7 +668,7 @@ func generateUpdate(t *testing.T, conn *mysql.Conn) error { } func generateDelete(t *testing.T, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(deleteRowStatement, id) qr, err := conn.ExecuteFetch(query, 1000, true) if err == nil && qr != nil { @@ -694,7 +697,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, autoIncInsert bool, log.Infof("Terminating single connection") return } - switch rand.Int31n(3) { + switch rand.Int32N(3) { case 0: err = generateInsert(t, conn, autoIncInsert) case 1: diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go index c8b87215036..6122a71aa44 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -28,6 +28,7 @@ import ( "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" @@ -58,8 +59,7 @@ var ( ) const ( - testDataPath = "testdata" - defaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" + testDataPath = "testdata" ) func TestMain(m *testing.M) { @@ -134,6 +134,27 @@ func TestSchemaChange(t *testing.T) { throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance, time.Second) + fkOnlineDDLPossible := false + t.Run("check 'rename_table_preserve_foreign_key' variable", func(t *testing.T) { + // Online DDL is not possible on vanilla MySQL 8.0 for reasons described in https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/. + // However, Online DDL is made possible in via these changes: + // - https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced + // - https://github.com/planetscale/mysql-server/commit/c2f1344a6863518d749f2eb01a4c74ca08a5b889 + // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps3. + // Said changes introduce a new global/session boolean variable named 'rename_table_preserve_foreign_key'. It defaults 'false'/0 for backwards compatibility. + // When enabled, a `RENAME TABLE` to a FK parent "pins" the children's foreign keys to the table name rather than the table pointer. Which means after the RENAME, + // the children will point to the newly instated table rather than the original, renamed table. + // (Note: this applies to a particular type of RENAME where we swap tables, see the above blog post). + // For FK children, the MySQL changes simply ignore any Vitess-internal table. + // + // In this stress test, we enable Online DDL if the variable 'rename_table_preserve_foreign_key' is present. The Online DDL mechanism will in turn + // query for this variable, and manipulate it, when starting the migration and when cutting over. + rs, err := shards[0].Vttablets[0].VttabletProcess.QueryTablet("show global variables like 'rename_table_preserve_foreign_key'", keyspaceName, false) + require.NoError(t, err) + fkOnlineDDLPossible = len(rs.Rows) > 0 + t.Logf("MySQL support for 'rename_table_preserve_foreign_key': %v", fkOnlineDDLPossible) + }) + files, err := os.ReadDir(testDataPath) require.NoError(t, err) for _, f := range files { @@ -142,7 +163,7 @@ func TestSchemaChange(t *testing.T) { } // this is a test! t.Run(f.Name(), func(t *testing.T) { - testSingle(t, f.Name()) + testSingle(t, f.Name(), fkOnlineDDLPossible) }) } } @@ -161,7 +182,14 @@ func readTestFile(t *testing.T, testName string, fileName string) (content strin // testSingle is the main testing function for a single test in the suite. // It prepares the grounds, creates the test data, runs a migration, expects results/error, cleans up. -func testSingle(t *testing.T, testName string) { +func testSingle(t *testing.T, testName string, fkOnlineDDLPossible bool) { + if _, exists := readTestFile(t, testName, "require_rename_table_preserve_foreign_key"); exists { + if !fkOnlineDDLPossible { + t.Skipf("Skipping test due to require_rename_table_preserve_foreign_key") + return + } + } + if ignoreVersions, exists := readTestFile(t, testName, "ignore_versions"); exists { // ignoreVersions is a regexp re, err := regexp.Compile(ignoreVersions) @@ -178,7 +206,7 @@ func testSingle(t *testing.T, testName string) { } } - sqlMode := defaultSQLMode + sqlMode := config.DefaultSQLMode if overrideSQLMode, exists := readTestFile(t, testName, "sql_mode"); exists { sqlMode = overrideSQLMode } diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-rename/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-rename/alter new file mode 100644 index 00000000000..b3e025d40b5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-rename/alter @@ -0,0 +1 @@ +modify e enum('red', 'green', 'cyan') not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-rename/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-rename/create.sql new file mode 100644 index 00000000000..cb4e6db0fcd --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-rename/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue') not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 'red'); + insert into onlineddl_test values (null, 13, 'green'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-reorder/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-reorder/alter new file mode 100644 index 00000000000..6e011c14192 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-reorder/alter @@ -0,0 +1 @@ +change e e enum('blue', 'green', 'red') not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-reorder/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-reorder/create.sql new file mode 100644 index 00000000000..84ebd4094c1 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-reorder/create.sql @@ -0,0 +1,26 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue') not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); +insert into onlineddl_test values (null, 17, 'blue'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 211, 'red'); + insert into onlineddl_test values (null, 213, 'green'); + insert into onlineddl_test values (null, 217, 'blue'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-truncate/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-truncate/alter new file mode 100644 index 00000000000..1d11c9cf5cb --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-truncate/alter @@ -0,0 +1 @@ +modify e enum('red', 'green') not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-truncate/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-truncate/create.sql new file mode 100644 index 00000000000..cb4e6db0fcd --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-truncate/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue') not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 'red'); + insert into onlineddl_test values (null, 13, 'green'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-whitespace/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-whitespace/alter new file mode 100644 index 00000000000..39c00aa3903 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-whitespace/alter @@ -0,0 +1 @@ +change e e enum('red', 'light green', 'blue', 'orange', 'yellow') collate 'utf8_bin' null default null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-whitespace/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-whitespace/create.sql new file mode 100644 index 00000000000..741b06e9040 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-whitespace/create.sql @@ -0,0 +1,27 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'light green', 'blue', 'orange') null default null collate 'utf8_bin', + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 'red'); + insert into onlineddl_test values (null, 13, 'light green'); + insert into onlineddl_test values (null, 17, 'blue'); + set @last_insert_id := last_insert_id(); + update onlineddl_test set e='orange' where id = @last_insert_id; + insert into onlineddl_test values (null, 23, null); + set @last_insert_id := last_insert_id(); + update onlineddl_test set i=i+1, e=null where id = @last_insert_id; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/alter new file mode 100644 index 00000000000..b3e025d40b5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/alter @@ -0,0 +1 @@ +modify e enum('red', 'green', 'cyan') not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/create.sql new file mode 100644 index 00000000000..27980b89072 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue') not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 'red'); + insert into onlineddl_test values (null, 13, 'green'); + insert into onlineddl_test values (null, 17, 'blue'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/expect_failure new file mode 100644 index 00000000000..dfa68652e4d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-changelog/expect_failure @@ -0,0 +1 @@ +Data truncated for column diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/alter new file mode 100644 index 00000000000..b3e025d40b5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/alter @@ -0,0 +1 @@ +modify e enum('red', 'green', 'cyan') not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/create.sql new file mode 100644 index 00000000000..248ab90f5fc --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/create.sql @@ -0,0 +1,11 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue') not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); +insert into onlineddl_test values (null, 17, 'blue'); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/expect_failure new file mode 100644 index 00000000000..dfa68652e4d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-rename-copy/expect_failure @@ -0,0 +1 @@ +Data truncated for column diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/alter new file mode 100644 index 00000000000..1d11c9cf5cb --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/alter @@ -0,0 +1 @@ +modify e enum('red', 'green') not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/create.sql new file mode 100644 index 00000000000..27980b89072 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue') not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 'red'); + insert into onlineddl_test values (null, 13, 'green'); + insert into onlineddl_test values (null, 17, 'blue'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/expect_failure new file mode 100644 index 00000000000..dfa68652e4d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-changelog/expect_failure @@ -0,0 +1 @@ +Data truncated for column diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/alter new file mode 100644 index 00000000000..1d11c9cf5cb --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/alter @@ -0,0 +1 @@ +modify e enum('red', 'green') not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/create.sql new file mode 100644 index 00000000000..248ab90f5fc --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/create.sql @@ -0,0 +1,11 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue') not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); +insert into onlineddl_test values (null, 17, 'blue'); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/expect_failure new file mode 100644 index 00000000000..dfa68652e4d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-enum-truncate-copy/expect_failure @@ -0,0 +1 @@ +Data truncated for column diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/alter new file mode 100644 index 00000000000..0660453e839 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/alter @@ -0,0 +1 @@ +modify parent_id int not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/create.sql new file mode 100644 index 00000000000..22113932f4f --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/create.sql @@ -0,0 +1,15 @@ +set session foreign_key_checks=0; +drop table if exists onlineddl_test_child; +drop table if exists onlineddl_test; +drop table if exists onlineddl_test_parent; +set session foreign_key_checks=1; +create table onlineddl_test_parent ( + id int auto_increment, + primary key(id) +); +create table onlineddl_test ( + id int auto_increment, + parent_id int null, + primary key(id), + constraint test_fk foreign key (parent_id) references onlineddl_test_parent (id) on delete no action +); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/ddl_strategy b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/ddl_strategy new file mode 100644 index 00000000000..f48a3989618 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/ddl_strategy @@ -0,0 +1 @@ +--unsafe-allow-foreign-keys \ No newline at end of file diff --git a/go/vt/vttablet/tabletserver/report.xml b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/require_rename_table_preserve_foreign_key similarity index 100% rename from go/vt/vttablet/tabletserver/report.xml rename to go/test/endtoend/onlineddl/vrepl_suite/testdata/fk-child-modify-not-null/require_rename_table_preserve_foreign_key diff --git a/go/test/endtoend/onlineddl/vtctlutil.go b/go/test/endtoend/onlineddl/vtctlutil.go index 19a6ff79604..52a832f0e1f 100644 --- a/go/test/endtoend/onlineddl/vtctlutil.go +++ b/go/test/endtoend/onlineddl/vtctlutil.go @@ -25,9 +25,9 @@ import ( ) // CheckCancelAllMigrations cancels all pending migrations. There is no validation for affected migrations. -func CheckCancelAllMigrationsViaVtctl(t *testing.T, vtctlclient *cluster.VtctlClientProcess, keyspace string) { +func CheckCancelAllMigrationsViaVtctld(t *testing.T, vtctldclient *cluster.VtctldClientProcess, keyspace string) { cancelQuery := "alter vitess_migration cancel all" - _, err := vtctlclient.ApplySchemaWithOutput(keyspace, cancelQuery, cluster.VtctlClientParams{}) + _, err := vtctldclient.ApplySchemaWithOutput(keyspace, cancelQuery, cluster.ApplySchemaParams{}) assert.NoError(t, err) } diff --git a/go/test/endtoend/onlineddl/vtgate_util.go b/go/test/endtoend/onlineddl/vtgate_util.go index 693523cec48..f2272fcd73e 100644 --- a/go/test/endtoend/onlineddl/vtgate_util.go +++ b/go/test/endtoend/onlineddl/vtgate_util.go @@ -19,7 +19,6 @@ package onlineddl import ( "context" "fmt" - "math" "os" "testing" "time" @@ -57,7 +56,7 @@ func VtgateExecQuery(t *testing.T, vtParams *mysql.ConnParams, query string, exp require.Nil(t, err) defer conn.Close() - qr, err := conn.ExecuteFetch(query, math.MaxInt64, true) + qr, err := conn.ExecuteFetch(query, -1, true) if expectError == "" { require.NoError(t, err) } else { @@ -206,6 +205,21 @@ func CheckLaunchAllMigrations(t *testing.T, vtParams *mysql.ConnParams, expectCo } } +// CheckForceMigrationCutOver marks a migration for forced cut-over, and expects success by counting affected rows. +func CheckForceMigrationCutOver(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectPossible bool) { + query, err := sqlparser.ParseAndBind("alter vitess_migration %a force_cutover", + sqltypes.StringBindVariable(uuid), + ) + require.NoError(t, err) + r := VtgateExecQuery(t, vtParams, query, "") + + if expectPossible { + assert.Equal(t, len(shards), int(r.RowsAffected)) + } else { + assert.Equal(t, int(0), int(r.RowsAffected)) + } +} + // CheckMigrationStatus verifies that the migration indicated by given UUID has the given expected status func CheckMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectStatuses ...schema.OnlineDDLStatus) bool { query, err := sqlparser.ParseAndBind("show vitess_migrations like %a", diff --git a/go/test/endtoend/recovery/pitr/binlog_server.go b/go/test/endtoend/recovery/pitr/binlog_server.go index 764af2b57cf..3b78b0d4ad7 100644 --- a/go/test/endtoend/recovery/pitr/binlog_server.go +++ b/go/test/endtoend/recovery/pitr/binlog_server.go @@ -93,14 +93,18 @@ func (bs *binLogServer) start(source mysqlSource) error { bs.proc.Args = append(bs.proc.Args, fmt.Sprintf("-ripple_master_password=%s", source.password)) } - errFile, _ := os.Create(path.Join(bs.dataDirectory, "log.txt")) + errFile, err := os.Create(path.Join(bs.dataDirectory, "log.txt")) + if err != nil { + log.Errorf("cannot create error log file for binlog server: %v", err) + return err + } bs.proc.Stderr = errFile bs.proc.Env = append(bs.proc.Env, os.Environ()...) log.Infof("Running binlog server with command: %v", strings.Join(bs.proc.Args, " ")) - err := bs.proc.Start() + err = bs.proc.Start() if err != nil { return err } diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index d04b5600362..03fcf76b07c 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -22,18 +22,21 @@ import ( "os" "os/exec" "path" + "strings" "testing" "time" - "github.com/buger/jsonparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/vt/log" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) var ( @@ -89,8 +92,6 @@ var ( } }` commonTabletArg = []string{ - "--vreplication_healthcheck_topology_refresh", "1s", - "--vreplication_healthcheck_retry_delay", "1s", "--vreplication_retry_delay", "1s", "--degraded_threshold", "5s", "--lock_tables_timeout", "5s", @@ -143,7 +144,7 @@ func TestPITRRecovery(t *testing.T) { cluster.VerifyRowsInTabletForTable(t, replica1, keyspaceName, 2, "product") // backup the replica - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) require.NoError(t, err) // check that the backup shows up in the listing @@ -183,10 +184,10 @@ func TestPITRRecovery(t *testing.T) { cluster.VerifyRowsInTabletForTable(t, shard1Replica1, keyspaceName, 4, "product") // take the backup (to simulate the regular backup) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard0Replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", shard0Replica1.Alias) require.NoError(t, err) // take the backup (to simulate the regular backup) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", shard1Replica1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", shard1Replica1.Alias) require.NoError(t, err) backups, err := clusterInstance.ListBackups(keyspaceName + "/-80") @@ -297,44 +298,44 @@ func TestPITRRecovery(t *testing.T) { } func performResharding(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema) + err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--source_shards=0", "--target_shards=-80,80-", "Create", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "create", "--source-shards=0", "--target-shards=-80,80-", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) waitTimeout := 30 * time.Second shard0Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, sidecar.DefaultName, waitTimeout) shard1Primary.VttabletProcess.WaitForVReplicationToCatchup(t, "ks.reshardWorkflow", dbName, sidecar.DefaultName, waitTimeout) - waitForNoWorkflowLag(t, clusterInstance, "ks.reshardWorkflow") + waitForNoWorkflowLag(t, clusterInstance, "ks", "reshardWorkflow") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=rdonly", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=rdonly", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=replica", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=replica", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) // then serve primary from the split shards - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=primary", "SwitchTraffic", "ks.reshardWorkflow") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--tablet-types=primary", "--target-keyspace", "ks", "--workflow", "reshardWorkflow") require.NoError(t, err) // remove the original tablets in the original shard removeTablets(t, []*cluster.Vttablet{primary, replica1, replica2}) for _, tablet := range []*cluster.Vttablet{replica1, replica2} { - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", "--allow_primary", primary.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", "--allow-primary", primary.Alias) require.NoError(t, err) // rebuild the serving graph, all mentions of the old shards should be gone - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", "ks") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", "ks") require.NoError(t, err) // delete the original shard - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteShard", "ks/0") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteShards", "ks/0") require.NoError(t, err) // Restart vtgate process @@ -462,13 +463,13 @@ func initializeCluster(t *testing.T) { } } - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard0.Name, cell, shard0Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard0.Name, cell, shard0Primary.TabletUID) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) require.NoError(t, err) err = clusterInstance.StartVTOrc(keyspaceName) @@ -499,9 +500,9 @@ func insertRow(t *testing.T, id int, productName string, isSlow bool) { } func createRestoreKeyspace(t *testing.T, timeToRecover, restoreKeyspaceName string) { - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("CreateKeyspace", "--", - "--keyspace_type=SNAPSHOT", "--base_keyspace="+keyspaceName, - "--snapshot_time", timeToRecover, restoreKeyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("CreateKeyspace", + "--type=SNAPSHOT", "--base-keyspace="+keyspaceName, + "--snapshot-timestamp", timeToRecover, restoreKeyspaceName) log.Info(output) require.NoError(t, err) } @@ -558,9 +559,6 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * "--binlog_user", binlogServer.username, "--binlog_password", binlogServer.password, "--pitr_gtid_lookup_timeout", lookupTimeout, - "--vreplication_healthcheck_topology_refresh", "1s", - "--vreplication_healthcheck_retry_delay", "1s", - "--vreplication_tablet_type", "replica", "--vreplication_retry_delay", "1s", "--degraded_threshold", "5s", "--lock_tables_timeout", "5s", @@ -578,22 +576,26 @@ func launchRecoveryTablet(t *testing.T, tablet *cluster.Vttablet, binlogServer * // waitForNoWorkflowLag waits for the VReplication workflow's MaxVReplicationTransactionLag // value to be 0. -func waitForNoWorkflowLag(t *testing.T, vc *cluster.LocalProcessCluster, ksWorkflow string) { - lag := int64(0) +func waitForNoWorkflowLag(t *testing.T, vc *cluster.LocalProcessCluster, ks string, workflow string) { + var lag int64 timer := time.NewTimer(defaultTimeout) defer timer.Stop() for { - output, err := vc.VtctlclientProcess.ExecuteCommandWithOutput("Workflow", "--", ksWorkflow, "show") + output, err := vc.VtctldClientProcess.ExecuteCommandWithOutput("Workflow", "--keyspace", ks, "show", "--workflow", workflow) require.NoError(t, err) - lag, err = jsonparser.GetInt([]byte(output), "MaxVReplicationTransactionLag") + + var resp vtctldatapb.GetWorkflowsResponse + err = json2.Unmarshal([]byte(output), &resp) require.NoError(t, err) + require.GreaterOrEqual(t, len(resp.Workflows), 1, "responce should have at least one workflow") + lag = resp.Workflows[0].MaxVReplicationTransactionLag if lag == 0 { return } select { case <-timer.C: require.FailNow(t, fmt.Sprintf("workflow %q did not eliminate VReplication lag before the timeout of %s; last seen MaxVReplicationTransactionLag: %d", - ksWorkflow, defaultTimeout, lag)) + strings.Join([]string{ks, workflow}, "."), defaultTimeout, lag)) default: time.Sleep(defaultTick) } diff --git a/go/test/endtoend/recovery/unshardedrecovery/recovery.go b/go/test/endtoend/recovery/unshardedrecovery/recovery.go index f4db74bbf4e..1ebb7c2647f 100644 --- a/go/test/endtoend/recovery/unshardedrecovery/recovery.go +++ b/go/test/endtoend/recovery/unshardedrecovery/recovery.go @@ -51,8 +51,6 @@ var ( dbCredentialFile string shardName = "0" commonTabletArg = []string{ - "--vreplication_healthcheck_topology_refresh", "1s", - "--vreplication_healthcheck_retry_delay", "1s", "--vreplication_retry_delay", "1s", "--degraded_threshold", "5s", "--lock_tables_timeout", "5s", @@ -166,7 +164,7 @@ func TestMainImpl(m *testing.M) { if err != nil { return 1, err } - if err := localCluster.VtctlclientProcess.InitializeShard(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { + if err := localCluster.VtctldClientProcess.InitializeShard(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil { return 1, err } if err := localCluster.StartVTOrc(keyspaceName); err != nil { @@ -208,17 +206,17 @@ func TestRecoveryImpl(t *testing.T) { verifyInitialReplication(t) // take first backup of value = test1 - err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err := localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) assert.NoError(t, err) backups := listBackups(t) require.Equal(t, len(backups), 1) assert.Contains(t, backups[0], replica1.Alias) - err = localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema) + err = localCluster.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema) assert.NoError(t, err) - output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", keyspaceName) + output, err := localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", keyspaceName) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") @@ -226,12 +224,12 @@ func TestRecoveryImpl(t *testing.T) { restoreTime := time.Now().UTC() recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg, restoreTime) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell) assert.NoError(t, err) assert.Contains(t, output, keyspaceName) assert.Contains(t, output, recoveryKS1) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") @@ -279,13 +277,13 @@ func TestRecoveryImpl(t *testing.T) { } // take second backup of value = msgx1 - err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) + err = localCluster.VtctldClientProcess.ExecuteCommand("Backup", replica1.Alias) assert.NoError(t, err) // restore to first backup recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg, restoreTime) - output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2) + output, err = localCluster.VtctldClientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2) assert.NoError(t, err) assert.Contains(t, output, "vt_insert_test") diff --git a/go/test/endtoend/reparent/emergencyreparent/ers_test.go b/go/test/endtoend/reparent/emergencyreparent/ers_test.go index 8f6638ecb7e..584bccfdfb7 100644 --- a/go/test/endtoend/reparent/emergencyreparent/ers_test.go +++ b/go/test/endtoend/reparent/emergencyreparent/ers_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" @@ -233,10 +234,10 @@ func TestERSPromoteRdonly(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets var err error - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[2].Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[2].Alias, "rdonly") require.NoError(t, err) utils.ConfirmReplication(t, tablets[0], tablets[1:]) @@ -248,7 +249,7 @@ func TestERSPromoteRdonly(t *testing.T) { out, err := utils.ErsIgnoreTablet(clusterInstance, nil, "30s", "30s", []*cluster.Vttablet{tablets[3]}, false) require.NotNil(t, err, out) - out, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", utils.KeyspaceShard) + out, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetShard", utils.KeyspaceShard) require.NoError(t, err) require.Contains(t, out, `"uid": 101`, "the primary should still be 101 in the shard info") } @@ -288,20 +289,27 @@ func TestPullFromRdonly(t *testing.T) { // make tablets[1] a rdonly tablet. // rename tablet so that the test is not confusing rdonly := tablets[1] - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly.Alias, "rdonly") require.NoError(t, err) // confirm that all the tablets can replicate successfully right now utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{rdonly, tablets[2], tablets[3]}) // stop replication on the other two tablets - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", tablets[2].Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", tablets[3].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", tablets[3].Alias) require.NoError(t, err) // stop semi-sync on the primary so that any transaction now added does not require an ack - utils.RunSQL(ctx, t, "SET GLOBAL rpl_semi_sync_master_enabled = false", tablets[0]) + semisyncType, err := utils.SemiSyncExtensionLoaded(ctx, tablets[0]) + require.NoError(t, err) + switch semisyncType { + case mysql.SemiSyncTypeSource: + utils.RunSQL(ctx, t, "SET GLOBAL rpl_semi_sync_source_enabled = false", tablets[0]) + case mysql.SemiSyncTypeMaster: + utils.RunSQL(ctx, t, "SET GLOBAL rpl_semi_sync_master_enabled = false", tablets[0]) + } // confirm that rdonly is able to replicate from our primary // This will also introduce a new transaction into the rdonly tablet which the other 2 replicas don't have @@ -311,9 +319,9 @@ func TestPullFromRdonly(t *testing.T) { utils.StopTablet(t, tablets[0], true) // start the replication back on the two tablets - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[3].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[3].Alias) require.NoError(t, err) // check that tablets[2] and tablets[3] still only has 1 value @@ -349,9 +357,12 @@ func TestNoReplicationStatusAndIOThreadStopped(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `STOP SLAVE; RESET SLAVE ALL`) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `STOP REPLICA`) + require.NoError(t, err) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `RESET REPLICA ALL`) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[3].Alias, `STOP SLAVE IO_THREAD;`) + // + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[3].Alias, `STOP REPLICA IO_THREAD;`) require.NoError(t, err) // Run an additional command in the current primary which will only be acked by tablets[2] and be in its relay log. insertedVal := utils.ConfirmReplication(t, tablets[0], nil) @@ -447,7 +458,7 @@ func TestRecoverWithMultipleFailures(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -475,7 +486,7 @@ func TestERSFailFast(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -514,9 +525,9 @@ func TestReplicationStopped(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `STOP SLAVE SQL_THREAD;`) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `STOP REPLICA SQL_THREAD;`) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[2].Alias, `STOP SLAVE;`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[2].Alias, `STOP REPLICA;`) require.NoError(t, err) // Run an additional command in the current primary which will only be acked by tablets[3] and be in its relay log. insertedVal := utils.ConfirmReplication(t, tablets[0], nil) @@ -525,7 +536,7 @@ func TestReplicationStopped(t *testing.T) { require.Error(t, err, "ERS should fail with 2 replicas having replication stopped") // Start replication back on tablet[1] - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", tablets[1].Alias, `START SLAVE;`) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", tablets[1].Alias, `START REPLICA;`) require.NoError(t, err) // Failover to tablets[3] again. This time it should succeed out, err := utils.Ers(clusterInstance, tablets[3], "60s", "30s") diff --git a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go index d5f37dc8604..ad798d61792 100644 --- a/go/test/endtoend/reparent/newfeaturetest/reparent_test.go +++ b/go/test/endtoend/reparent/newfeaturetest/reparent_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" ) @@ -40,7 +41,7 @@ func TestRecoverWithMultipleVttabletFailures(t *testing.T) { utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1] a rdonly tablet. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "rdonly") require.NoError(t, err) // Confirm that replication is still working as intended @@ -131,18 +132,48 @@ func TestChangeTypeWithoutSemiSync(t *testing.T) { utils.RunSQL(ctx, t, "set global super_read_only = 0", tablet) } - utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_slave;", tablet) - utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_master;", tablet) + semisyncType, err := utils.SemiSyncExtensionLoaded(ctx, tablet) + require.NoError(t, err) + switch semisyncType { + case mysql.SemiSyncTypeSource: + utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_replica", tablet) + utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_source", tablet) + case mysql.SemiSyncTypeMaster: + utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_slave", tablet) + utils.RunSQL(ctx, t, "UNINSTALL PLUGIN rpl_semi_sync_master", tablet) + default: + require.Fail(t, "Unknown semi sync type") + } } utils.ValidateTopology(t, clusterInstance, true) utils.CheckPrimaryTablet(t, clusterInstance, primary) // Change replica's type to rdonly - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") require.NoError(t, err) // Change tablets type from rdonly back to replica - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "replica") require.NoError(t, err) } + +// TestERSWithWriteInPromoteReplica tests that ERS doesn't fail even if there is a +// write that happens when PromoteReplica is called. +func TestERSWithWriteInPromoteReplica(t *testing.T) { + defer cluster.PanicHandler(t) + clusterInstance := utils.SetupReparentCluster(t, "semi_sync") + defer utils.TeardownCluster(clusterInstance) + tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets + utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) + + // Drop a table so that when sidecardb changes are checked, we run a DML query. + utils.RunSQLs(context.Background(), t, []string{ + "set sql_log_bin=0", + `SET @@global.super_read_only=0`, + `DROP TABLE _vt.heartbeat`, + "set sql_log_bin=1", + }, tablets[3]) + _, err := utils.Ers(clusterInstance, tablets[3], "60s", "30s") + require.NoError(t, err, "ERS should not fail even if there is a sidecardb change") +} diff --git a/go/test/endtoend/reparent/plannedreparent/reparent_test.go b/go/test/endtoend/reparent/plannedreparent/reparent_test.go index f7afea1431b..ae9bd6bbc9b 100644 --- a/go/test/endtoend/reparent/plannedreparent/reparent_test.go +++ b/go/test/endtoend/reparent/plannedreparent/reparent_test.go @@ -26,11 +26,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql/replication" - "google.golang.org/protobuf/encoding/protojson" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/reparent/utils" "vitess.io/vitess/go/vt/log" @@ -44,7 +42,7 @@ func TestPrimaryToSpareStateChangeImpossible(t *testing.T) { tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets // We cannot change a primary to spare - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablets[0].Alias, "spare") + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ChangeTabletType", tablets[0].Alias, "spare") require.Error(t, err, out) require.Contains(t, out, "type change PRIMARY -> SPARE is not an allowed transition for ChangeTabletType") } @@ -92,19 +90,19 @@ func TestPRSWithDrainedLaggingTablet(t *testing.T) { defer utils.TeardownCluster(clusterInstance) tablets := clusterInstance.Keyspaces[0].Shards[0].Vttablets - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "drained") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", tablets[1].Alias, "drained") require.NoError(t, err) utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[2], tablets[3]}) // make tablets[1 lag from the other tablets by setting the delay to a large number - utils.RunSQL(context.Background(), t, `stop slave;CHANGE MASTER TO MASTER_DELAY = 1999;start slave;`, tablets[1]) + utils.RunSQLs(context.Background(), t, []string{`stop replica`, `CHANGE REPLICATION SOURCE TO SOURCE_DELAY = 1999`, `start replica;`}, tablets[1]) // insert another row in tablets[1 utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[2], tablets[3]}) // assert that there is indeed only 1 row in tablets[1 - res := utils.RunSQL(context.Background(), t, `select msg from vt_insert_test;`, tablets[1]) + res := utils.RunSQL(context.Background(), t, `select msg from vt_insert_test`, tablets[1]) assert.Equal(t, 1, len(res.Rows)) // Perform a graceful reparent operation @@ -217,8 +215,8 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus if !downPrimary { // commands to stop the current primary - demoteCommands := "SET GLOBAL read_only = ON; FLUSH TABLES WITH READ LOCK; UNLOCK TABLES" - utils.RunSQL(ctx, t, demoteCommands, tablets[0]) + demoteCommands := []string{"SET GLOBAL read_only = ON", "FLUSH TABLES WITH READ LOCK", "UNLOCK TABLES"} + utils.RunSQLs(ctx, t, demoteCommands, tablets[0]) //Get the position of the old primary and wait for the new one to catch up. err := utils.WaitForReplicationPosition(t, tablets[0], tablets[1]) @@ -226,38 +224,49 @@ func reparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessClus } // commands to convert a replica to be writable - promoteReplicaCommands := "STOP SLAVE; RESET SLAVE ALL; SET GLOBAL read_only = OFF;" - utils.RunSQL(ctx, t, promoteReplicaCommands, tablets[1]) + promoteReplicaCommands := []string{"STOP REPLICA", "RESET REPLICA ALL", "SET GLOBAL read_only = OFF"} + utils.RunSQLs(ctx, t, promoteReplicaCommands, tablets[1]) // Get primary position _, gtID := cluster.GetPrimaryPosition(t, *tablets[1], utils.Hostname) // tablets[0] will now be a replica of tablets[1 - changeReplicationSourceCommands := fmt.Sprintf("RESET MASTER; RESET SLAVE; SET GLOBAL gtid_purged = '%s';"+ - "CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='vt_repl', MASTER_AUTO_POSITION = 1;"+ - "START SLAVE;", gtID, utils.Hostname, tablets[1].MySQLPort) - utils.RunSQL(ctx, t, changeReplicationSourceCommands, tablets[0]) + resetCmd, err := tablets[0].VttabletProcess.ResetBinaryLogsCommand() + require.NoError(t, err) + changeReplicationSourceCommands := []string{ + resetCmd, + "RESET REPLICA", + fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", gtID), + fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s', SOURCE_PORT=%d, SOURCE_USER='vt_repl', SOURCE_AUTO_POSITION = 1", utils.Hostname, tablets[1].MySQLPort), + } + utils.RunSQLs(ctx, t, changeReplicationSourceCommands, tablets[0]) // Capture time when we made tablets[1 writable baseTime := time.Now().UnixNano() / 1000000000 // tablets[2 will be a replica of tablets[1 - changeReplicationSourceCommands = fmt.Sprintf("STOP SLAVE; RESET MASTER; SET GLOBAL gtid_purged = '%s';"+ - "CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='vt_repl', MASTER_AUTO_POSITION = 1;"+ - "START SLAVE;", gtID, utils.Hostname, tablets[1].MySQLPort) - utils.RunSQL(ctx, t, changeReplicationSourceCommands, tablets[2]) + resetCmd, err = tablets[2].VttabletProcess.ResetBinaryLogsCommand() + require.NoError(t, err) + changeReplicationSourceCommands = []string{ + "STOP REPLICA", + resetCmd, + fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", gtID), + fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s', SOURCE_PORT=%d, SOURCE_USER='vt_repl', SOURCE_AUTO_POSITION = 1", utils.Hostname, tablets[1].MySQLPort), + "START REPLICA", + } + utils.RunSQLs(ctx, t, changeReplicationSourceCommands, tablets[2]) // To test the downPrimary, we kill the old primary first and delete its tablet record if downPrimary { err := tablets[0].VttabletProcess.TearDownWithTimeout(30 * time.Second) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", "--", - "--allow_primary", tablets[0].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", + "--allow-primary", tablets[0].Alias) require.NoError(t, err) } // update topology with the new server - err := clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablets[1].Alias) require.NoError(t, err) @@ -295,7 +304,7 @@ func TestReparentWithDownReplica(t *testing.T) { // insert data into the new primary, check the connected replica work insertVal = utils.ConfirmReplication(t, tablets[1], []*cluster.Vttablet{tablets[0], tablets[3]}) } else { - assert.Contains(t, out, fmt.Sprintf("TabletManager.PrimaryStatus on %s error", tablets[2].Alias)) + assert.Contains(t, out, fmt.Sprintf("TabletManager.PrimaryStatus on %s", tablets[2].Alias)) // insert data into the old primary, check the connected replica works. The primary tablet shouldn't have changed. insertVal = utils.ConfirmReplication(t, tablets[0], []*cluster.Vttablet{tablets[1], tablets[3]}) } @@ -311,7 +320,7 @@ func TestReparentWithDownReplica(t *testing.T) { // We have to StartReplication on tablets[2] since the MySQL instance is restarted and does not have replication running // We earlier used to rely on replicationManager to fix this but we have disabled it in our testing environment for latest versions of vttablet and vtctl. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", tablets[2].Alias) require.NoError(t, err) // wait until it gets the data @@ -331,9 +340,9 @@ func TestChangeTypeSemiSync(t *testing.T) { primary, replica, rdonly1, rdonly2 := tablets[0], tablets[1], tablets[2], tablets[3] // Updated rdonly tablet and set tablet type to rdonly - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "rdonly") require.NoError(t, err) utils.ValidateTopology(t, clusterInstance, true) @@ -342,45 +351,45 @@ func TestChangeTypeSemiSync(t *testing.T) { // Stop replication on rdonly1, to make sure when we make it replica it doesn't start again. // Note we do a similar test for replica -> rdonly below. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonly1.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonly1.Alias) require.NoError(t, err) // Check semi-sync on replicas. // The flag is only an indication of the value to use next time // we turn replication on, so also check the status. // rdonly1 is not replicating, so its status is off. - utils.CheckDBvar(ctx, t, replica, "rpl_semi_sync_slave_enabled", "ON") - utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "OFF") - utils.CheckDBvar(ctx, t, rdonly2, "rpl_semi_sync_slave_enabled", "OFF") - utils.CheckDBstatus(ctx, t, replica, "Rpl_semi_sync_slave_status", "ON") - utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF") - utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "OFF") + utils.CheckSemisyncEnabled(ctx, t, replica, true) + utils.CheckSemisyncEnabled(ctx, t, rdonly1, false) + utils.CheckSemisyncEnabled(ctx, t, rdonly2, false) + utils.CheckSemisyncStatus(ctx, t, replica, true) + utils.CheckSemisyncStatus(ctx, t, rdonly1, false) + utils.CheckSemisyncStatus(ctx, t, rdonly2, false) // Change replica to rdonly while replicating, should turn off semi-sync, and restart replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replica.Alias, "rdonly") require.NoError(t, err) - utils.CheckDBvar(ctx, t, replica, "rpl_semi_sync_slave_enabled", "OFF") - utils.CheckDBstatus(ctx, t, replica, "Rpl_semi_sync_slave_status", "OFF") + utils.CheckSemisyncEnabled(ctx, t, replica, false) + utils.CheckSemisyncStatus(ctx, t, replica, false) // Change rdonly1 to replica, should turn on semi-sync, and not start replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "replica") require.NoError(t, err) - utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "ON") - utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF") + utils.CheckSemisyncEnabled(ctx, t, rdonly1, true) + utils.CheckSemisyncStatus(ctx, t, rdonly1, false) utils.CheckReplicaStatus(ctx, t, rdonly1) // Now change from replica back to rdonly, make sure replication is still not enabled. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly1.Alias, "rdonly") require.NoError(t, err) - utils.CheckDBvar(ctx, t, rdonly1, "rpl_semi_sync_slave_enabled", "OFF") - utils.CheckDBstatus(ctx, t, rdonly1, "Rpl_semi_sync_slave_status", "OFF") + utils.CheckSemisyncEnabled(ctx, t, rdonly1, false) + utils.CheckSemisyncStatus(ctx, t, rdonly1, false) utils.CheckReplicaStatus(ctx, t, rdonly1) // Change rdonly2 to replica, should turn on semi-sync, and restart replication. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonly2.Alias, "replica") require.NoError(t, err) - utils.CheckDBvar(ctx, t, rdonly2, "rpl_semi_sync_slave_enabled", "ON") - utils.CheckDBstatus(ctx, t, rdonly2, "Rpl_semi_sync_slave_status", "ON") + utils.CheckSemisyncEnabled(ctx, t, rdonly2, true) + utils.CheckSemisyncStatus(ctx, t, rdonly2, true) } // TestCrossCellDurability tests 2 things - diff --git a/go/test/endtoend/reparent/prscomplex/main_test.go b/go/test/endtoend/reparent/prscomplex/main_test.go index 88276012781..88e3d6c09fa 100644 --- a/go/test/endtoend/reparent/prscomplex/main_test.go +++ b/go/test/endtoend/reparent/prscomplex/main_test.go @@ -63,12 +63,12 @@ func TestMain(m *testing.M) { SchemaSQL: schemaSQL, } clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, - "--queryserver-config-query-timeout=9000", + "--queryserver-config-query-timeout=9000s", "--queryserver-config-pool-size=3", "--queryserver-config-stream-pool-size=3", "--queryserver-config-transaction-cap=2", - "--queryserver-config-transaction-timeout=20", - "--shutdown_grace_period=3", + "--queryserver-config-transaction-timeout=20s", + "--shutdown_grace_period=3s", "--queryserver-config-schema-change-signal=false") err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false) if err != nil { diff --git a/go/test/endtoend/reparent/prssettingspool/main_test.go b/go/test/endtoend/reparent/prssettingspool/main_test.go index a9f4312caea..872f1867c77 100644 --- a/go/test/endtoend/reparent/prssettingspool/main_test.go +++ b/go/test/endtoend/reparent/prssettingspool/main_test.go @@ -104,13 +104,13 @@ func TestSettingsPoolWithTXAndPRS(t *testing.T) { // prs should happen without any error. text, err := rutils.Prs(t, clusterInstance, tablets[1]) require.NoError(t, err, text) - rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[0], 1*time.Minute) + rutils.WaitForTabletToBeServing(ctx, t, clusterInstance, tablets[0], 1*time.Minute) defer func() { // reset state text, err = rutils.Prs(t, clusterInstance, tablets[0]) require.NoError(t, err, text) - rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[1], 1*time.Minute) + rutils.WaitForTabletToBeServing(ctx, t, clusterInstance, tablets[1], 1*time.Minute) }() // no error should occur and it should go to the right tablet. @@ -134,12 +134,12 @@ func TestSettingsPoolWithoutTXAndPRS(t *testing.T) { // prs should happen without any error. text, err := rutils.Prs(t, clusterInstance, tablets[1]) require.NoError(t, err, text) - rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[0], 1*time.Minute) + rutils.WaitForTabletToBeServing(ctx, t, clusterInstance, tablets[0], 1*time.Minute) defer func() { // reset state text, err = rutils.Prs(t, clusterInstance, tablets[0]) require.NoError(t, err, text) - rutils.WaitForTabletToBeServing(t, clusterInstance, tablets[1], 1*time.Minute) + rutils.WaitForTabletToBeServing(ctx, t, clusterInstance, tablets[1], 1*time.Minute) }() // no error should occur and it should go to the right tablet. diff --git a/go/test/endtoend/reparent/utils/utils.go b/go/test/endtoend/reparent/utils/utils.go index 4ef13819e85..5038352d721 100644 --- a/go/test/endtoend/reparent/utils/utils.go +++ b/go/test/endtoend/reparent/utils/utils.go @@ -34,7 +34,6 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/tabletconn" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" @@ -185,7 +184,7 @@ func setupShard(ctx context.Context, t *testing.T, clusterInstance *cluster.Loca } // Initialize shard - err := clusterInstance.VtctlclientProcess.InitializeShard(KeyspaceName, shardName, tablets[0].Cell, tablets[0].TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(KeyspaceName, shardName, tablets[0].Cell, tablets[0].TabletUID) require.NoError(t, err) ValidateTopology(t, clusterInstance, true) @@ -258,6 +257,21 @@ func getMysqlConnParam(tablet *cluster.Vttablet) mysql.ConnParams { return connParams } +// RunSQLs is used to run SQL commands directly on the MySQL instance of a vttablet. All commands are +// run in a single connection. +func RunSQLs(ctx context.Context, t *testing.T, sqls []string, tablet *cluster.Vttablet) (results []*sqltypes.Result) { + tabletParams := getMysqlConnParam(tablet) + conn, err := mysql.Connect(ctx, &tabletParams) + require.Nil(t, err) + defer conn.Close() + + for _, sql := range sqls { + result := execute(t, conn, sql) + results = append(results, result) + } + return results +} + // RunSQL is used to run a SQL command directly on the MySQL instance of a vttablet func RunSQL(ctx context.Context, t *testing.T, sql string, tablet *cluster.Vttablet) *sqltypes.Result { tabletParams := getMysqlConnParam(tablet) @@ -291,21 +305,21 @@ func PrsAvoid(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *c // PrsWithTimeout runs PRS func PrsWithTimeout(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, avoid bool, actionTimeout, waitTimeout string) (string, error) { args := []string{ - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)} + "PlannedReparentShard", + fmt.Sprintf("%s/%s", KeyspaceName, ShardName)} if actionTimeout != "" { args = append(args, "--action_timeout", actionTimeout) } if waitTimeout != "" { - args = append(args, "--wait_replicas_timeout", waitTimeout) + args = append(args, "--wait-replicas-timeout", waitTimeout) } if avoid { - args = append(args, "--avoid_tablet") + args = append(args, "--avoid-primary") } else { - args = append(args, "--new_primary") + args = append(args, "--new-primary") } args = append(args, tab.Alias) - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) return out, err } @@ -320,15 +334,15 @@ func ErsIgnoreTablet(clusterInstance *cluster.LocalProcessCluster, tab *cluster. if timeout != "" { args = append(args, "--action_timeout", timeout) } - args = append(args, "EmergencyReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)) + args = append(args, "EmergencyReparentShard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)) if tab != nil { - args = append(args, "--new_primary", tab.Alias) + args = append(args, "--new-primary", tab.Alias) } if waitReplicasTimeout != "" { - args = append(args, "--wait_replicas_timeout", waitReplicasTimeout) + args = append(args, "--wait-replicas-timeout", waitReplicasTimeout) } if preventCrossCellPromotion { - args = append(args, "--prevent_cross_cell_promotion=true") + args = append(args, "--prevent-cross-cell-promotion") } if len(tabletsToIgnore) != 0 { tabsString := "" @@ -339,9 +353,9 @@ func ErsIgnoreTablet(clusterInstance *cluster.LocalProcessCluster, tab *cluster. tabsString = tabsString + "," + vttablet.Alias } } - args = append(args, "--ignore_replicas", tabsString) + args = append(args, "--ignore-replicas", tabsString) } - return clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) + return clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) } // ErsWithVtctl runs ERS via vtctl binary @@ -359,10 +373,10 @@ func ValidateTopology(t *testing.T, clusterInstance *cluster.LocalProcessCluster args := []string{"Validate"} if pingTablets { - args = append(args, "--", "--ping-tablets=true") + args = append(args, "--ping-tablets") } - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) - require.Empty(t, out) + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(args...) + require.Contains(t, out, "no issues found") require.NoError(t, err) } @@ -383,17 +397,14 @@ func ConfirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*clu // ConfirmOldPrimaryIsHangingAround confirms that the old primary is hanging around func ConfirmOldPrimaryIsHangingAround(t *testing.T, clusterInstance *cluster.LocalProcessCluster) { - out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") + out, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate") require.Error(t, err) require.Contains(t, out, "already has primary") } // CheckPrimaryTablet makes sure the tablet type is primary, and its health check agrees. func CheckPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.NoError(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) assert.Equal(t, topodatapb.TabletType_PRIMARY, tabletInfo.GetType()) @@ -409,10 +420,7 @@ func CheckPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessClust // isHealthyPrimaryTablet will return if tablet is primary AND healthy. func isHealthyPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) bool { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.Nil(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.Nil(t, err) if tabletInfo.GetType() != topodatapb.TabletType_PRIMARY { return false @@ -468,9 +476,20 @@ func CheckInsertedValues(ctx context.Context, t *testing.T, tablet *cluster.Vtta } func CheckSemiSyncSetupCorrectly(t *testing.T, tablet *cluster.Vttablet, semiSyncVal string) { - dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_slave_enabled", "") + semisyncType, err := tablet.VttabletProcess.SemiSyncExtensionLoaded() require.NoError(t, err) - require.Equal(t, semiSyncVal, dbVar) + switch semisyncType { + case mysql.SemiSyncTypeSource: + dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_replica_enabled", "") + require.NoError(t, err) + require.Equal(t, semiSyncVal, dbVar) + case mysql.SemiSyncTypeMaster: + dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_slave_enabled", "") + require.NoError(t, err) + require.Equal(t, semiSyncVal, dbVar) + default: + require.Fail(t, "Unknown semi sync type") + } } // CheckCountOfInsertedValues checks that the number of inserted values matches the given count on the given tablet @@ -502,7 +521,7 @@ func RestartTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, t tab.MysqlctlProcess.InitMysql = false err := tab.MysqlctlProcess.Start() require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, KeyspaceName, Hostname, ShardName) + err = clusterInstance.InitTablet(tab, KeyspaceName, ShardName) require.NoError(t, err) } @@ -511,7 +530,7 @@ func ResurrectTablet(ctx context.Context, t *testing.T, clusterInstance *cluster tab.MysqlctlProcess.InitMysql = false err := tab.MysqlctlProcess.Start() require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, KeyspaceName, Hostname, ShardName) + err = clusterInstance.InitTablet(tab, KeyspaceName, ShardName) require.NoError(t, err) // As there is already a primary the new replica will come directly in SERVING state @@ -526,9 +545,9 @@ func ResurrectTablet(ctx context.Context, t *testing.T, clusterInstance *cluster // DeleteTablet is used to delete the given tablet func DeleteTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand( - "DeleteTablet", "--", - "--allow_primary", + err := clusterInstance.VtctldClientProcess.ExecuteCommand( + "DeleteTablets", + "--allow-primary", tab.Alias) require.NoError(t, err) } @@ -553,7 +572,7 @@ func GetNewPrimary(t *testing.T, clusterInstance *cluster.LocalProcessCluster) * // GetShardReplicationPositions gets the shards replication positions. // This should not generally be called directly, instead use the WaitForReplicationToCatchup method. func GetShardReplicationPositions(t *testing.T, clusterInstance *cluster.LocalProcessCluster, keyspaceName, shardName string, doPrint bool) []string { - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( "ShardReplicationPositions", fmt.Sprintf("%s/%s", keyspaceName, shardName)) require.NoError(t, err) strArray := strings.Split(output, "\n") @@ -591,7 +610,7 @@ func WaitForReplicationToStart(t *testing.T, clusterInstance *cluster.LocalProce // CheckReplicaStatus checks the replication status and asserts that the replication is stopped func CheckReplicaStatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet) { - qr := RunSQL(ctx, t, "show slave status", tablet) + qr := RunSQL(ctx, t, "show replica status", tablet) IOThreadRunning := fmt.Sprintf("%v", qr.Rows[0][10]) SQLThreadRunning := fmt.Sprintf("%v", qr.Rows[0][10]) assert.Equal(t, IOThreadRunning, "VARCHAR(\"No\")") @@ -600,12 +619,23 @@ func CheckReplicaStatus(ctx context.Context, t *testing.T, tablet *cluster.Vttab // CheckReparentFromOutside checks that cluster was reparented from outside func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, downPrimary bool, baseTime int64) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell1, KeyspaceShard) - require.Nil(t, err, "error should be Nil") - if !downPrimary { - assertNodeCount(t, result, int(3)) + if clusterInstance.VtctlMajorVersion > 19 { // TODO: (ajm188) remove else clause after next release + result, err := clusterInstance.VtctldClientProcess.GetShardReplication(KeyspaceName, ShardName, cell1) + require.Nil(t, err, "error should be Nil") + require.NotNil(t, result[cell1], "result should not be nil") + if !downPrimary { + assert.Len(t, result[cell1].Nodes, 3) + } else { + assert.Len(t, result[cell1].Nodes, 2) + } } else { - assertNodeCount(t, result, int(2)) + result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell1, KeyspaceShard) + require.Nil(t, err, "error should be Nil") + if !downPrimary { + assertNodeCount(t, result, int(3)) + } else { + assertNodeCount(t, result, int(2)) + } } // make sure the primary status page says it's the primary @@ -614,7 +644,7 @@ func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProces // make sure the primary health stream says it's the primary too // (health check is disabled on these servers, force it first) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias) require.NoError(t, err) shrs, err := clusterInstance.StreamTabletHealth(context.Background(), tablet, 1) @@ -625,6 +655,16 @@ func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProces assert.True(t, streamHealthResponse.PrimaryTermStartTimestamp >= baseTime) } +func assertNodeCount(t *testing.T, result string, want int) { + resultMap := make(map[string]any) + err := json.Unmarshal([]byte(result), &resultMap) + require.NoError(t, err) + + nodes := reflect.ValueOf(resultMap["nodes"]) + got := nodes.Len() + assert.Equal(t, want, got) +} + // WaitForReplicationPosition waits for tablet B to catch up to the replication position of tablet A. func WaitForReplicationPosition(t *testing.T, tabletA *cluster.Vttablet, tabletB *cluster.Vttablet) error { posA, _ := cluster.GetPrimaryPosition(t, *tabletA, Hostname) @@ -650,40 +690,70 @@ func positionAtLeast(t *testing.T, tablet *cluster.Vttablet, a string, b string) return isAtleast } -func assertNodeCount(t *testing.T, result string, want int) { - resultMap := make(map[string]any) - err := json.Unmarshal([]byte(result), &resultMap) +func CheckSemisyncEnabled(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, enabled bool) { + tabletParams := getMysqlConnParam(tablet) + conn, err := mysql.Connect(ctx, &tabletParams) require.NoError(t, err) + defer conn.Close() - nodes := reflect.ValueOf(resultMap["nodes"]) - got := nodes.Len() - assert.Equal(t, want, got) + status := "OFF" + if enabled { + status = "ON" + } + + semisyncType, err := SemiSyncExtensionLoaded(ctx, tablet) + require.NoError(t, err) + switch semisyncType { + case mysql.SemiSyncTypeSource: + qr := execute(t, conn, "show variables like 'rpl_semi_sync_replica_enabled'") + got := fmt.Sprintf("%v", qr.Rows) + want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", "rpl_semi_sync_replica_enabled", status) + assert.Equal(t, want, got) + case mysql.SemiSyncTypeMaster: + qr := execute(t, conn, "show variables like 'rpl_semi_sync_slave_enabled'") + got := fmt.Sprintf("%v", qr.Rows) + want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", "rpl_semi_sync_slave_enabled", status) + assert.Equal(t, want, got) + } } -// CheckDBvar checks the db var -func CheckDBvar(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, variable string, status string) { +func CheckSemisyncStatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, enabled bool) { tabletParams := getMysqlConnParam(tablet) conn, err := mysql.Connect(ctx, &tabletParams) require.NoError(t, err) defer conn.Close() - qr := execute(t, conn, fmt.Sprintf("show variables like '%s'", variable)) - got := fmt.Sprintf("%v", qr.Rows) - want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", variable, status) - assert.Equal(t, want, got) -} + status := "OFF" + if enabled { + status = "ON" + } -// CheckDBstatus checks the db status -func CheckDBstatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, variable string, status string) { + semisyncType, err := SemiSyncExtensionLoaded(ctx, tablet) + require.NoError(t, err) + switch semisyncType { + case mysql.SemiSyncTypeSource: + qr := execute(t, conn, "show status like 'Rpl_semi_sync_replica_status'") + got := fmt.Sprintf("%v", qr.Rows) + want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", "Rpl_semi_sync_replica_status", status) + assert.Equal(t, want, got) + case mysql.SemiSyncTypeMaster: + qr := execute(t, conn, "show status like 'Rpl_semi_sync_slave_status'") + got := fmt.Sprintf("%v", qr.Rows) + want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", "Rpl_semi_sync_slave_status", status) + assert.Equal(t, want, got) + default: + assert.Fail(t, "unknown semi-sync type") + } +} + +func SemiSyncExtensionLoaded(ctx context.Context, tablet *cluster.Vttablet) (mysql.SemiSyncType, error) { tabletParams := getMysqlConnParam(tablet) conn, err := mysql.Connect(ctx, &tabletParams) - require.NoError(t, err) + if err != nil { + return mysql.SemiSyncTypeUnknown, err + } defer conn.Close() - - qr := execute(t, conn, fmt.Sprintf("show status like '%s'", variable)) - got := fmt.Sprintf("%v", qr.Rows) - want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", variable, status) - assert.Equal(t, want, got) + return conn.SemiSyncExtensionLoaded() } // SetReplicationSourceFailed returns true if the given output from PRS had failed because the given tablet was @@ -695,7 +765,7 @@ func SetReplicationSourceFailed(tablet *cluster.Vttablet, prsOut string) bool { // CheckReplicationStatus checks that the replication for sql and io threads is setup as expected func CheckReplicationStatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, sqlThreadRunning bool, ioThreadRunning bool) { - res := RunSQL(ctx, t, "show slave status;", tablet) + res := RunSQL(ctx, t, "show replica status", tablet) if ioThreadRunning { require.Equal(t, "Yes", res.Rows[0][10].ToString()) } else { @@ -709,11 +779,11 @@ func CheckReplicationStatus(ctx context.Context, t *testing.T, tablet *cluster.V } } -func WaitForTabletToBeServing(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, timeout time.Duration) { - vTablet, err := clusterInstance.VtctlclientGetTablet(tablet) +func WaitForTabletToBeServing(ctx context.Context, t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, timeout time.Duration) { + vTablet, err := clusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) - tConn, err := tabletconn.GetDialer()(vTablet, false) + tConn, err := tabletconn.GetDialer()(ctx, vTablet, false) require.NoError(t, err) newCtx, cancel := context.WithTimeout(context.Background(), timeout) diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index 2dc79840018..79cb4a0174e 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -53,8 +53,8 @@ var ( ) const ( - testDataPath = "../../onlineddl/vrepl_suite/testdata" - defaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" + testDataPath = "../../onlineddl/vrepl_suite/testdata" + sqlModeAllowsZeroDate = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" ) type testTableSchema struct { @@ -202,7 +202,7 @@ func testSingle(t *testing.T, testName string) { return } - sqlModeQuery := fmt.Sprintf("set @@global.sql_mode='%s'", defaultSQLMode) + sqlModeQuery := fmt.Sprintf("set @@global.sql_mode='%s'", sqlModeAllowsZeroDate) _ = mysqlExec(t, sqlModeQuery, "") _ = mysqlExec(t, "set @@global.event_scheduler=0", "") @@ -281,8 +281,8 @@ func testSingle(t *testing.T, testName string) { // hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementIgnore} // // count := 20 // // for i := 0; i < count; i++ { -// // fromTestTableSchema := fromTestTableSchemas[rand.Intn(len(fromTestTableSchemas))] -// // toTestTableSchema := toTestTableSchemas[rand.Intn(len(toTestTableSchemas))] +// // fromTestTableSchema := fromTestTableSchemas[rand.IntN(len(fromTestTableSchemas))] +// // toTestTableSchema := toTestTableSchemas[rand.IntN(len(toTestTableSchemas))] // // testName := fmt.Sprintf("%s/%s", fromTestTableSchema.testName, toTestTableSchema.testName) // // t.Run(testName, func(t *testing.T) { // // validateDiff(t, fromTestTableSchema.tableSchema, toTestTableSchema.tableSchema, hints) @@ -345,18 +345,19 @@ func ignoreAutoIncrement(t *testing.T, createTable string) string { func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, allowSchemadiffNormalization bool, hints *schemadiff.DiffHints) { // turn the "from" and "to" create statement strings (which we just read via SHOW CREATE TABLE into sqlparser.CreateTable statement) - fromStmt, err := sqlparser.ParseStrictDDL(fromCreateTable) + env := schemadiff.NewTestEnv() + fromStmt, err := env.Parser().ParseStrictDDL(fromCreateTable) require.NoError(t, err) fromCreateTableStatement, ok := fromStmt.(*sqlparser.CreateTable) require.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(toCreateTable) + toStmt, err := env.Parser().ParseStrictDDL(toCreateTable) require.NoError(t, err) toCreateTableStatement, ok := toStmt.(*sqlparser.CreateTable) require.True(t, ok) // The actual diff logic here! - diff, err := schemadiff.DiffTables(fromCreateTableStatement, toCreateTableStatement, hints) + diff, err := schemadiff.DiffTables(env, fromCreateTableStatement, toCreateTableStatement, hints) assert.NoError(t, err) // The diff can be empty or there can be an actual ALTER TABLE statement @@ -385,7 +386,6 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al // the table generated by the test's own ALTER statement? // But wait, there's caveats. - if toCreateTable != resultCreateTable { // schemadiff's ALTER statement can normalize away CHARACTER SET and COLLATION definitions: // when altering a column's CHARTSET&COLLATION into the table's values, schemadiff just strips the @@ -394,20 +394,20 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al // structure is identical. And so we accept that there can be a normalization issue. if allowSchemadiffNormalization { { - stmt, err := sqlparser.ParseStrictDDL(toCreateTable) + stmt, err := env.Parser().ParseStrictDDL(toCreateTable) require.NoError(t, err) createTableStatement, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - c, err := schemadiff.NewCreateTableEntity(createTableStatement) + c, err := schemadiff.NewCreateTableEntity(env, createTableStatement) require.NoError(t, err) toCreateTable = c.Create().CanonicalStatementString() } { - stmt, err := sqlparser.ParseStrictDDL(resultCreateTable) + stmt, err := env.Parser().ParseStrictDDL(resultCreateTable) require.NoError(t, err) createTableStatement, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - c, err := schemadiff.NewCreateTableEntity(createTableStatement) + c, err := schemadiff.NewCreateTableEntity(env, createTableStatement) require.NoError(t, err) resultCreateTable = c.Create().CanonicalStatementString() } @@ -418,12 +418,12 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al assert.Equal(t, toCreateTable, resultCreateTable, "mismatched table structure. ALTER query was: %s", diffedAlterQuery) // Also, let's see that our diff agrees there's no change: - resultStmt, err := sqlparser.ParseStrictDDL(resultCreateTable) + resultStmt, err := env.Parser().ParseStrictDDL(resultCreateTable) require.NoError(t, err) resultCreateTableStatement, ok := resultStmt.(*sqlparser.CreateTable) require.True(t, ok) - resultDiff, err := schemadiff.DiffTables(toCreateTableStatement, resultCreateTableStatement, hints) + resultDiff, err := schemadiff.DiffTables(env, toCreateTableStatement, resultCreateTableStatement, hints) assert.NoError(t, err) assert.Nil(t, resultDiff) } diff --git a/go/test/endtoend/sharded/sharded_keyspace_test.go b/go/test/endtoend/sharded/sharded_keyspace_test.go index 857dc455206..f311404ad7e 100644 --- a/go/test/endtoend/sharded/sharded_keyspace_test.go +++ b/go/test/endtoend/sharded/sharded_keyspace_test.go @@ -108,9 +108,9 @@ func TestShardedKeyspace(t *testing.T) { shard1Primary := shard1.Vttablets[0] shard2Primary := shard2.Vttablets[0] - err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, cell, shard1Primary.TabletUID) require.Nil(t, err) - err = clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, cell, shard2Primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, cell, shard2Primary.TabletUID) require.Nil(t, err) err = clusterInstance.StartVTOrc(keyspaceName) @@ -125,7 +125,7 @@ func TestShardedKeyspace(t *testing.T) { _, err = shard2Primary.VttabletProcess.QueryTablet(sqlSchemaReverse, keyspaceName, true) require.Nil(t, err) - if err = clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema); err != nil { + if err = clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, vSchema); err != nil { log.Error(err.Error()) return } @@ -136,13 +136,13 @@ func TestShardedKeyspace(t *testing.T) { shard2Primary.Alias, shard2.Vttablets[1].Alias) - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", shard1Primary.Alias) - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", shard2Primary.Alias) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", shard1Primary.Alias, "true") + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", shard2Primary.Alias, "true") _, _ = shard1Primary.VttabletProcess.QueryTablet("insert into vt_select_test (id, msg) values (1, 'test 1')", keyspaceName, true) _, _ = shard2Primary.VttabletProcess.QueryTablet("insert into vt_select_test (id, msg) values (10, 'test 10')", keyspaceName, true) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate", "--", "--ping-tablets") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate", "--ping-tablets") require.Nil(t, err) rows, err := shard1Primary.VttabletProcess.QueryTablet("select id, msg from vt_select_test order by id", keyspaceName, true) @@ -164,9 +164,9 @@ func TestShardedKeyspace(t *testing.T) { assert.Contains(t, output, shard1Primary.Alias+": CREATE TABLE") assert.Contains(t, output, shard2Primary.Alias+": CREATE TABLE") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateVersionShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateVersionShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) require.Nil(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("GetPermissions", shard1.Vttablets[1].Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("GetPermissions", shard1.Vttablets[1].Alias) require.Nil(t, err) err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidatePermissionsShard", fmt.Sprintf("%s/%s", keyspaceName, shard1.Name)) require.Nil(t, err) @@ -184,7 +184,7 @@ func TestShardedKeyspace(t *testing.T) { func reloadSchemas(t *testing.T, aliases ...string) { for _, alias := range aliases { - if err := clusterInstance.VtctlclientProcess.ExecuteCommand("ReloadSchema", alias); err != nil { + if err := clusterInstance.VtctldClientProcess.ExecuteCommand("ReloadSchema", alias); err != nil { assert.Fail(t, "Unable to reload schema") } diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go index 8a3dd4f9b73..ca4fe5f6094 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test_helpers.go @@ -33,7 +33,7 @@ import ( "encoding/json" "fmt" "io" - "math/rand" + "math/rand/v2" "net/http" "sync" "testing" @@ -71,7 +71,7 @@ const ( type threadParams struct { quit bool rpcs int // Number of queries successfully executed. - errors int // Number of failed queries. + errors []error // Errors returned by the queries. waitForNotification chan bool // Channel used to notify the main thread that this thread executed notifyLock sync.Mutex // notifyLock guards the two fields notifyAfterNSuccessfulRpcs/rpcsSoFar. notifyAfterNSuccessfulRpcs int // If 0, notifications are disabled @@ -96,14 +96,14 @@ func (c *threadParams) threadRun(wg *sync.WaitGroup, vtParams *mysql.ConnParams) if c.reservedConn { _, err = conn.ExecuteFetch("set default_week_format = 1", 1000, true) if err != nil { - c.errors++ + c.errors = append(c.errors, err) log.Errorf("error setting default_week_format: %v", err) } } for !c.quit { err = c.executeFunction(c, conn) if err != nil { - c.errors++ + c.errors = append(c.errors, err) log.Errorf("error executing function %s: %v", c.typ, err) } c.rpcs++ @@ -174,7 +174,7 @@ func updateExecute(c *threadParams, conn *mysql.Conn) error { // Sleep between [0, 1] seconds to prolong the time the transaction is in // flight. This is more realistic because applications are going to keep // their transactions open for longer as well. - dur := time.Duration(rand.Int31n(1000)) * time.Millisecond + dur := time.Duration(rand.Int32N(1000)) * time.Millisecond if c.slowQueries { dur = dur + 1*time.Second } @@ -229,7 +229,7 @@ func (bt *BufferingTest) createCluster() (*cluster.LocalProcessCluster, int) { } clusterInstance.VtTabletExtraArgs = []string{ "--health_check_interval", "1s", - "--queryserver-config-transaction-timeout", "20", + "--queryserver-config-transaction-timeout", "20s", } if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil { return nil, 1 @@ -241,8 +241,8 @@ func (bt *BufferingTest) createCluster() (*cluster.LocalProcessCluster, int) { "--buffer_window", "10m", "--buffer_max_failover_duration", "10m", "--buffer_min_time_between_failovers", "20m", - "--buffer_implementation", "keyspace_events", "--tablet_refresh_interval", "1s", + "--buffer_drain_concurrency", "4", } clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, bt.VtGateExtraArgs...) @@ -343,8 +343,8 @@ func (bt *BufferingTest) Test(t *testing.T) { updateThreadInstance.stop() // Both threads must not see any error - assert.Zero(t, readThreadInstance.errors, "found errors in read queries") - assert.Zero(t, updateThreadInstance.errors, "found errors in tx queries") + assert.Empty(t, readThreadInstance.errors, "found errors in read queries") + assert.Empty(t, updateThreadInstance.errors, "found errors in tx queries") //At least one thread should have been buffered. //This may fail if a failover is too fast. Add retries then. diff --git a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go index d3828eb8166..c8aaf0ae7fc 100644 --- a/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reparent/failover_buffer_test.go @@ -29,9 +29,9 @@ import ( "vitess.io/vitess/go/vt/log" ) -const ( - demoteQuery = "SET GLOBAL read_only = ON;FLUSH TABLES WITH READ LOCK;UNLOCK TABLES;" - promoteQuery = "STOP SLAVE;RESET SLAVE ALL;SET GLOBAL read_only = OFF;" +var ( + demoteQueries = []string{"SET GLOBAL read_only = ON", "FLUSH TABLES WITH READ LOCK", "UNLOCK TABLES"} + promoteQueries = []string{"STOP REPLICA", "RESET REPLICA ALL", "SET GLOBAL read_only = OFF"} hostname = "localhost" ) @@ -48,7 +48,8 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro replica := clusterInstance.Keyspaces[0].Shards[0].Vttablets[1] oldPrimary := primary newPrimary := replica - primary.VttabletProcess.QueryTablet(demoteQuery, keyspaceUnshardedName, true) + err := primary.VttabletProcess.QueryTabletMultiple(demoteQueries, keyspaceUnshardedName, true) + require.NoError(t, err) // Wait for replica to catch up to primary. cluster.WaitForReplicationPos(t, primary, replica, false, time.Minute) @@ -62,7 +63,8 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro } // Promote replica to new primary. - replica.VttabletProcess.QueryTablet(promoteQuery, keyspaceUnshardedName, true) + err = replica.VttabletProcess.QueryTabletMultiple(promoteQueries, keyspaceUnshardedName, true) + require.NoError(t, err) // Configure old primary to replicate from new primary. @@ -70,11 +72,20 @@ func failoverExternalReparenting(t *testing.T, clusterInstance *cluster.LocalPro // Use 'localhost' as hostname because Travis CI worker hostnames // are too long for MySQL replication. - changeSourceCommands := fmt.Sprintf("RESET SLAVE;SET GLOBAL gtid_slave_pos = '%s';CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d ,MASTER_USER='vt_repl', MASTER_USE_GTID = slave_pos;START SLAVE;", gtID, "localhost", newPrimary.MySQLPort) - oldPrimary.VttabletProcess.QueryTablet(changeSourceCommands, keyspaceUnshardedName, true) + resetCmd, err := oldPrimary.VttabletProcess.ResetBinaryLogsCommand() + require.NoError(t, err) + changeSourceCommands := []string{ + "STOP REPLICA", + resetCmd, + fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", gtID), + fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s', SOURCE_PORT=%d, SOURCE_USER='vt_repl', SOURCE_AUTO_POSITION = 1", "localhost", newPrimary.MySQLPort), + "START REPLICA", + } + err = oldPrimary.VttabletProcess.QueryTabletMultiple(changeSourceCommands, keyspaceUnshardedName, true) + require.NoError(t, err) // Notify the new vttablet primary about the reparent. - err := clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", newPrimary.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", newPrimary.Alias) require.NoError(t, err) } @@ -83,9 +94,9 @@ func failoverPlannedReparenting(t *testing.T, clusterInstance *cluster.LocalProc reads.ExpectQueries(10) writes.ExpectQueries(10) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", + err := clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceUnshardedName, "0"), - "--new_primary", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) + "--new-primary", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) require.NoError(t, err) } diff --git a/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go b/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go index ae922108012..5e439cc9fff 100644 --- a/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/reshard/sharded_buffer_test.go @@ -21,16 +21,15 @@ import ( "testing" "time" - "github.com/buger/jsonparser" - - "vitess.io/vitess/go/vt/log" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/tabletgateway/buffer" + "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/test/endtoend/cluster" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) const ( @@ -43,11 +42,16 @@ func waitForLowLag(t *testing.T, clusterInstance *cluster.LocalProcessCluster, k waitDuration := 500 * time.Millisecond duration := maxWait for duration > 0 { - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", keyspace, workflow), "Show") + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Workflow", "--keyspace", keyspace, "show", "--workflow", workflow) require.NoError(t, err) - lagSeconds, err = jsonparser.GetInt([]byte(output), "MaxVReplicationTransactionLag") + var resp vtctldatapb.GetWorkflowsResponse + err = json2.Unmarshal([]byte(output), &resp) require.NoError(t, err) + require.GreaterOrEqual(t, len(resp.Workflows), 1, "responce should have at least one workflow") + lagSeconds := resp.Workflows[0].MaxVReplicationTransactionLag + + require.NoError(t, err, output) if lagSeconds <= acceptableLagSeconds { log.Infof("waitForLowLag acceptable for workflow %s, keyspace %s, current lag is %d", workflow, keyspace, lagSeconds) break @@ -68,9 +72,8 @@ func reshard02(t *testing.T, clusterInstance *cluster.LocalProcessCluster, keysp err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false) require.NoError(t, err) workflowName := "buf2buf" - workflow := fmt.Sprintf("%s.%s", keyspaceName, "buf2buf") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--source_shards", "0", "--target_shards", "-80,80-", "Create", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "Create", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--source-shards", "0", "--target-shards", "-80,80-") require.NoError(t, err) // Execute the resharding operation @@ -78,13 +81,13 @@ func reshard02(t *testing.T, clusterInstance *cluster.LocalProcessCluster, keysp writes.ExpectQueries(25) waitForLowLag(t, clusterInstance, keyspaceName, workflowName) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=rdonly,replica", "SwitchTraffic", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--tablet-types=rdonly,replica") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "--", "--tablet_types=primary", "SwitchTraffic", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "SwitchTraffic", "--target-keyspace", keyspaceName, "--workflow", workflowName, "--tablet-types=primary") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "Complete", workflow) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Reshard", "--target-keyspace", keyspaceName, "--workflow", workflowName, "Complete") require.NoError(t, err) } diff --git a/go/test/endtoend/tabletgateway/main_test.go b/go/test/endtoend/tabletgateway/main_test.go index da4fe711f64..354be6969d3 100644 --- a/go/test/endtoend/tabletgateway/main_test.go +++ b/go/test/endtoend/tabletgateway/main_test.go @@ -18,6 +18,7 @@ package healthcheck import ( "flag" + "fmt" "os" "testing" @@ -26,11 +27,12 @@ import ( ) var ( - clusterInstance *cluster.LocalProcessCluster - vtParams mysql.ConnParams - keyspaceName = "commerce" - cell = "zone1" - sqlSchema = `create table product( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + keyspaceName = "commerce" + vtgateGrpcAddress string + cell = "zone1" + sqlSchema = `create table product( sku varbinary(128), description varbinary(128), price bigint, @@ -64,7 +66,7 @@ func TestMain(m *testing.M) { exitCode := func() int { clusterInstance = cluster.NewCluster(cell, "localhost") - clusterInstance.VtTabletExtraArgs = []string{"--health_check_interval", "1s"} + clusterInstance.VtTabletExtraArgs = []string{"--health_check_interval", "1s", "--shutdown_grace_period", "3s"} defer clusterInstance.Teardown() // Start topo server @@ -96,6 +98,7 @@ func TestMain(m *testing.M) { Host: clusterInstance.Hostname, Port: clusterInstance.VtgateMySQLPort, } + vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) return m.Run() }() os.Exit(exitCode) diff --git a/go/test/endtoend/tabletgateway/vtgate_test.go b/go/test/endtoend/tabletgateway/vtgate_test.go index be227927981..1f4f8758e16 100644 --- a/go/test/endtoend/tabletgateway/vtgate_test.go +++ b/go/test/endtoend/tabletgateway/vtgate_test.go @@ -28,15 +28,15 @@ import ( "testing" "time" - "vitess.io/vitess/go/test/endtoend/utils" - "vitess.io/vitess/go/vt/proto/topodata" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + vtorcutils "vitess.io/vitess/go/test/endtoend/vtorc/utils" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/topodata" ) func TestVtgateHealthCheck(t *testing.T) { @@ -59,7 +59,7 @@ func TestVtgateReplicationStatusCheck(t *testing.T) { time.Sleep(2 * time.Second) verifyVtgateVariables(t, clusterInstance.VtgateProcess.VerifyURL) ctx := context.Background() - conn, err := mysql.Connect(ctx, &vtParams) + conn, err := mysql.Connect(ctx, &vtParams) // VTGate require.NoError(t, err) defer conn.Close() @@ -68,6 +68,39 @@ func TestVtgateReplicationStatusCheck(t *testing.T) { expectNumRows := 2 numRows := len(qr.Rows) assert.Equal(t, expectNumRows, numRows, fmt.Sprintf("wrong number of results from show vitess_replication_status. Expected %d, got %d", expectNumRows, numRows)) + + // Disable VTOrc(s) recoveries so that it doesn't immediately repair/restart replication. + for _, vtorcProcess := range clusterInstance.VTOrcProcesses { + vtorcutils.DisableGlobalRecoveries(t, vtorcProcess) + } + // Re-enable recoveries afterward as the cluster is re-used. + defer func() { + for _, vtorcProcess := range clusterInstance.VTOrcProcesses { + vtorcutils.EnableGlobalRecoveries(t, vtorcProcess) + } + }() + // Stop replication on the non-PRIMARY tablets. + _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDBA", clusterInstance.Keyspaces[0].Shards[0].Replica().Alias, "stop replica") + require.NoError(t, err) + _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteMultiFetchAsDBA", clusterInstance.Keyspaces[0].Shards[0].Rdonly().Alias, "stop replica") + require.NoError(t, err) + // Restart replication afterward as the cluster is re-used. + defer func() { + _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDBA", clusterInstance.Keyspaces[0].Shards[0].Replica().Alias, "start replica") + require.NoError(t, err) + // Testing ExecuteMultiFetchAsDBA by running multiple commands in a single call: + _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteMultiFetchAsDBA", clusterInstance.Keyspaces[0].Shards[0].Rdonly().Alias, "start replica sql_thread; start replica io_thread;") + require.NoError(t, err) + }() + time.Sleep(2 * time.Second) // Build up some replication lag + res, err := conn.ExecuteFetch("show vitess_replication_status", 2, false) + require.NoError(t, err) + expectNumRows = 2 + numRows = len(qr.Rows) + assert.Equal(t, expectNumRows, numRows, fmt.Sprintf("wrong number of results from show vitess_replication_status, expected %d, got %d", expectNumRows, numRows)) + rawLag := res.Named().Rows[0]["ReplicationLag"] // Let's just look at the first row + lagInt, _ := rawLag.ToInt64() // Don't check the error as the value could be "NULL" + assert.True(t, rawLag.IsNull() || lagInt > 0, "replication lag should be NULL or greater than 0 but was: %s", rawLag.ToString()) } func TestVtgateReplicationStatusCheckWithTabletTypeChange(t *testing.T) { @@ -88,8 +121,13 @@ func TestVtgateReplicationStatusCheckWithTabletTypeChange(t *testing.T) { // change the RDONLY tablet to SPARE rdOnlyTablet := clusterInstance.Keyspaces[0].Shards[0].Rdonly() - err = clusterInstance.VtctlclientChangeTabletType(rdOnlyTablet, topodata.TabletType_SPARE) + err = clusterInstance.VtctldClientProcess.ChangeTabletType(rdOnlyTablet, topodata.TabletType_SPARE) require.NoError(t, err) + // Change it back to RDONLY afterward as the cluster is re-used. + defer func() { + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdOnlyTablet.Alias, "rdonly") + require.NoError(t, err) + }() // Only returns rows for REPLICA and RDONLY tablets -- so should be 1 of them since we updated 1 to spare qr = utils.Exec(t, conn, "show vitess_replication_status like '%'") @@ -247,6 +285,44 @@ func TestReplicaTransactions(t *testing.T) { assert.Equal(t, `[[INT64(1) VARCHAR("email1")] [INT64(2) VARCHAR("email2")]]`, fmt.Sprintf("%v", qr4.Rows), "we are not able to reconnect after restart") } +// TestStreamingRPCStuck tests that StreamExecute calls don't get stuck on the vttablets if a client stop reading from a stream. +func TestStreamingRPCStuck(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer vtConn.Close() + + // We want the table to have enough rows such that a streaming call returns multiple packets. + // Therefore, we insert one row and keep doubling it. + utils.Exec(t, vtConn, "insert into customer(email) values('testemail')") + for i := 0; i < 15; i++ { + // Double the number of rows in customer table. + utils.Exec(t, vtConn, "insert into customer (email) select email from customer") + } + + // Connect to vtgate and run a streaming query. + vtgateConn, err := cluster.DialVTGate(ctx, t.Name(), vtgateGrpcAddress, "test_user", "") + require.NoError(t, err) + stream, err := vtgateConn.Session("", &querypb.ExecuteOptions{}).StreamExecute(ctx, "select * from customer", map[string]*querypb.BindVariable{}) + require.NoError(t, err) + + // We read packets until we see the first set of results. This ensures that the stream is working. + for { + res, err := stream.Recv() + require.NoError(t, err) + if res != nil && len(res.Rows) > 0 { + // breaking here stops reading from the stream. + break + } + } + + // We simulate a misbehaving client that doesn't read from the stream anymore. + // This however shouldn't block PlannedReparentShard calls. + err = clusterInstance.VtctldClientProcess.PlannedReparentShard(keyspaceName, "0", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) + require.NoError(t, err) +} + func getMapFromJSON(JSON map[string]any, key string) map[string]any { result := make(map[string]any) object := reflect.ValueOf(JSON[key]) diff --git a/go/test/endtoend/tabletmanager/commands_test.go b/go/test/endtoend/tabletmanager/commands_test.go index 1a2d2424cb4..970e89c7037 100644 --- a/go/test/endtoend/tabletmanager/commands_test.go +++ b/go/test/endtoend/tabletmanager/commands_test.go @@ -22,17 +22,17 @@ import ( "fmt" "reflect" "testing" - "time" - - "vitess.io/vitess/go/test/endtoend/utils" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" - "github.com/stretchr/testify/assert" - + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" + + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) var ( @@ -60,54 +60,63 @@ func TestTabletCommands(t *testing.T) { utils.Exec(t, conn, "insert into t1(id, value) values(1,'a'), (2,'b')") checkDataOnReplica(t, replicaConn, `[[VARCHAR("a")] [VARCHAR("b")]]`) - // make sure direct dba queries work - sql := "select * from t1" - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDba", "--", "--json", primaryTablet.Alias, sql) - require.Nil(t, err) - assertExecuteFetch(t, result) + t.Run("ExecuteFetchAsDBA", func(t *testing.T) { + // make sure direct dba queries work + sql := "select * from t1" + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteFetchAsDBA", "--json", primaryTablet.Alias, sql) + require.Nil(t, err) + assertExecuteFetch(t, result) + }) + t.Run("ExecuteMultiFetchAsDBA", func(t *testing.T) { + // make sure direct dba queries work + sql := "select * from t1; select * from t1 limit 100" + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ExecuteMultiFetchAsDBA", "--json", primaryTablet.Alias, sql) + require.Nil(t, err) + assertExecuteMultiFetch(t, result) + }) // check Ping / RefreshState / RefreshStateByShard - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", primaryTablet.Alias) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshState", primaryTablet.Alias) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshStateByShard", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshStateByShard", keyspaceShard) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshStateByShard", "--", "--cells="+cell, keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RefreshStateByShard", "--cells", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") // Check basic actions. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadOnly", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "false") require.Nil(t, err, "error should be Nil") qr := utils.Exec(t, conn, "show variables like 'read_only'") got := fmt.Sprintf("%v", qr.Rows) want := "[[VARCHAR(\"read_only\") VARCHAR(\"ON\")]]" assert.Equal(t, want, got) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "true") require.Nil(t, err, "error should be Nil") qr = utils.Exec(t, conn, "show variables like 'read_only'") got = fmt.Sprintf("%v", qr.Rows) want = "[[VARCHAR(\"read_only\") VARCHAR(\"OFF\")]]" assert.Equal(t, want, got) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate", "--", "--ping-tablets=true") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate", "--ping-tablets") require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateKeyspace", keyspaceName) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateKeyspace", keyspaceName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateKeyspace", "--", "--ping-tablets=true", keyspaceName) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateKeyspace", "--ping-tablets", keyspaceName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateShard", "--", "--ping-tablets=false", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateShard", "--ping-tablets", keyspaceShard) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ValidateShard", "--", "--ping-tablets=true", keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ValidateShard", "--ping-tablets", keyspaceShard) require.Nil(t, err, "error should be Nil") } @@ -139,62 +148,60 @@ func assertExecuteFetch(t *testing.T, qr string) { want = int(2) assert.Equal(t, want, got) } - -// ActionAndTimeout test -func TestActionAndTimeout(t *testing.T) { - - defer cluster.PanicHandler(t) - err := clusterInstance.VtctlclientProcess.ExecuteCommand("Sleep", primaryTablet.Alias, "5s") +func assertExecuteMultiFetch(t *testing.T, qr string) { + resultMap := make([]map[string]any, 0) + err := json.Unmarshal([]byte(qr), &resultMap) require.Nil(t, err) - time.Sleep(1 * time.Second) + require.NotEmpty(t, resultMap) - // try a frontend RefreshState that should timeout as the tablet is busy running the other one - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RefreshState", "--", primaryTablet.Alias, "--wait-time", "2s") - assert.Error(t, err, "timeout as tablet is in Sleep") + rows := reflect.ValueOf(resultMap[0]["rows"]) + got := rows.Len() + want := int(2) + assert.Equal(t, want, got) + + fields := reflect.ValueOf(resultMap[0]["fields"]) + got = fields.Len() + want = int(2) + assert.Equal(t, want, got) } func TestHook(t *testing.T) { // test a regular program works defer cluster.PanicHandler(t) runHookAndAssert(t, []string{ - "ExecuteHook", "--", primaryTablet.Alias, "test.sh", "--flag1", "--param1=hello"}, "0", false, "") + "ExecuteHook", primaryTablet.Alias, "test.sh", "--", "--flag1", "--param1=hello"}, 0, false, "") // test stderr output runHookAndAssert(t, []string{ - "ExecuteHook", "--", primaryTablet.Alias, "test.sh", "--to-stderr"}, "0", false, "ERR: --to-stderr\n") + "ExecuteHook", primaryTablet.Alias, "test.sh", "--", "--to-stderr"}, 0, false, "ERR: --to-stderr\n") // test commands that fail runHookAndAssert(t, []string{ - "ExecuteHook", "--", primaryTablet.Alias, "test.sh", "--exit-error"}, "1", false, "ERROR: exit status 1\n") + "ExecuteHook", primaryTablet.Alias, "test.sh", "--", "--exit-error"}, 1, false, "ERROR: exit status 1\n") // test hook that is not present runHookAndAssert(t, []string{ - "ExecuteHook", "--", primaryTablet.Alias, "not_here.sh", "--exit-error"}, "-1", false, "missing hook") + "ExecuteHook", primaryTablet.Alias, "not_here.sh", "--", "--exit-error"}, -1, false, "missing hook") // test hook with invalid name runHookAndAssert(t, []string{ - "ExecuteHook", "--", primaryTablet.Alias, "/bin/ls"}, "-1", true, "hook name cannot have") + "ExecuteHook", primaryTablet.Alias, "/bin/ls"}, -1, true, "hook name cannot have") } -func runHookAndAssert(t *testing.T, params []string, expectedStatus string, expectedError bool, expectedStderr string) { - - hr, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(params...) +func runHookAndAssert(t *testing.T, params []string, expectedStatus int64, expectedError bool, expectedStderr string) { + hr, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput(params...) if expectedError { assert.Error(t, err, "Expected error") } else { require.Nil(t, err) - resultMap := make(map[string]any) - err = json.Unmarshal([]byte(hr), &resultMap) + var resp vtctldatapb.ExecuteHookResponse + err = json2.Unmarshal([]byte(hr), &resp) require.Nil(t, err) - exitStatus := reflect.ValueOf(resultMap["ExitStatus"]).Float() - status := fmt.Sprintf("%.0f", exitStatus) - assert.Equal(t, expectedStatus, status) - - stderr := reflect.ValueOf(resultMap["Stderr"]).String() - assert.Contains(t, stderr, expectedStderr) + assert.Equal(t, expectedStatus, resp.HookResult.ExitStatus) + assert.Contains(t, resp.HookResult.Stderr, expectedStderr) } } @@ -202,29 +209,32 @@ func runHookAndAssert(t *testing.T, params []string, expectedStatus string, expe func TestShardReplicationFix(t *testing.T) { // make sure the replica is in the replication graph, 2 nodes: 1 primary, 1 replica defer cluster.PanicHandler(t) - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) + result, err := clusterInstance.VtctldClientProcess.GetShardReplication(keyspaceName, shardName, cell) require.Nil(t, err, "error should be Nil") - assertNodeCount(t, result, int(3)) + require.NotNil(t, result[cell], "result should not be Nil") + assert.Len(t, result[cell].Nodes, 3) // Manually add a bogus entry to the replication graph, and check it is removed by ShardReplicationFix - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ShardReplicationAdd", keyspaceShard, fmt.Sprintf("%s-9000", cell)) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ShardReplicationAdd", keyspaceShard, fmt.Sprintf("%s-9000", cell)) require.Nil(t, err, "error should be Nil") - result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) + result, err = clusterInstance.VtctldClientProcess.GetShardReplication(keyspaceName, shardName, cell) require.Nil(t, err, "error should be Nil") - assertNodeCount(t, result, int(4)) + require.NotNil(t, result[cell], "result should not be Nil") + assert.Len(t, result[cell].Nodes, 4) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ShardReplicationFix", cell, keyspaceShard) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ShardReplicationFix", cell, keyspaceShard) require.Nil(t, err, "error should be Nil") - result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell, keyspaceShard) + result, err = clusterInstance.VtctldClientProcess.GetShardReplication(keyspaceName, shardName, cell) require.Nil(t, err, "error should be Nil") - assertNodeCount(t, result, int(3)) + require.NotNil(t, result[cell], "result should not be Nil") + assert.Len(t, result[cell].Nodes, 3) } func TestGetSchema(t *testing.T) { defer cluster.PanicHandler(t) - res, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", "--", + res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", "--include-views", "--tables", "t1,v1", fmt.Sprintf("%s-%d", clusterInstance.Cell, primaryTablet.TabletUID)) require.Nil(t, err) @@ -234,13 +244,3 @@ func TestGetSchema(t *testing.T) { v1Create := gjson.Get(res, "table_definitions.#(name==\"v1\").schema") assert.Equal(t, getSchemaV1Results, v1Create.String()) } - -func assertNodeCount(t *testing.T, result string, want int) { - resultMap := make(map[string]any) - err := json.Unmarshal([]byte(result), &resultMap) - require.Nil(t, err) - - nodes := reflect.ValueOf(resultMap["nodes"]) - got := nodes.Len() - assert.Equal(t, want, got) -} diff --git a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go index aa09a99e0fe..0c6e056af36 100644 --- a/go/test/endtoend/tabletmanager/custom_rule_topo_test.go +++ b/go/test/endtoend/tabletmanager/custom_rule_topo_test.go @@ -74,7 +74,7 @@ func TestTopoCustomRule(t *testing.T) { err = clusterInstance.StartVttablet(rTablet, false, "SERVING", false, cell, keyspaceName, hostname, shardName) require.Nil(t, err, "error should be Nil") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Validate") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Validate") require.Nil(t, err, "error should be Nil") // And wait until the query is working. diff --git a/go/test/endtoend/tabletmanager/primary/tablet_test.go b/go/test/endtoend/tabletmanager/primary/tablet_test.go index f6255b1f71a..297e5540fac 100644 --- a/go/test/endtoend/tabletmanager/primary/tablet_test.go +++ b/go/test/endtoend/tabletmanager/primary/tablet_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/test/endtoend/cluster" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -121,16 +120,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) { // Test that using InitShardPrimary can go back and forth between 2 hosts. // Make replica tablet as primary - err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) + err := clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) require.NoError(t, err) // Run health check on both, make sure they are both healthy. // Also make sure the types are correct. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) require.NoError(t, err) checkHealth(t, primaryTablet.HTTPPort, false) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) require.NoError(t, err) checkHealth(t, replicaTablet.HTTPPort, false) @@ -138,16 +137,16 @@ func TestRepeatedInitShardPrimary(t *testing.T) { checkTabletType(t, replicaTablet.Alias, "PRIMARY") // Come back to the original tablet. - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.NoError(t, err) // Run health check on both, make sure they are both healthy. // Also make sure the types are correct. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", primaryTablet.Alias) require.NoError(t, err) checkHealth(t, primaryTablet.HTTPPort, false) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", replicaTablet.Alias) require.NoError(t, err) checkHealth(t, replicaTablet.HTTPPort, false) @@ -162,7 +161,7 @@ func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { // See StreamHealthResponse.primary_term_start_timestamp for details. // Make replica as primary - err := clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) + err := clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, replicaTablet.TabletUID) require.NoError(t, err) err = replicaTablet.VttabletProcess.WaitForTabletStatus("SERVING") @@ -212,7 +211,7 @@ func TestPrimaryRestartSetsPTSTimestamp(t *testing.T) { streamHealthRes2.GetPrimaryTermStartTimestamp())) // Reset primary - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shardName, cell, primaryTablet.TabletUID) require.NoError(t, err) err = primaryTablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) @@ -232,11 +231,7 @@ func checkHealth(t *testing.T, port int, shouldError bool) { } func checkTabletType(t *testing.T, tabletAlias string, typeWant string) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tabletAlias) - require.NoError(t, err) - - var tablet topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tablet) + tablet, err := clusterInstance.VtctldClientProcess.GetTablet(tabletAlias) require.NoError(t, err) actualType := tablet.GetType() diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index c6f7253c791..685c361cef7 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -20,12 +20,15 @@ import ( "flag" "fmt" "os" + "strings" "testing" "time" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/gc" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" @@ -49,9 +52,9 @@ var ( ) Engine=InnoDB; ` sqlCreateView = ` - create or replace view v1 as select * from t1; + create or replace view v1 as select * from t1 ` - sqlSchema = sqlCreateTable + sqlCreateView + sqlSchema = []string{sqlCreateTable, sqlCreateView} vSchema = ` { @@ -107,7 +110,7 @@ func TestMain(m *testing.M) { // Start keyspace keyspace := &cluster.Keyspace{ Name: keyspaceName, - SchemaSQL: sqlSchema, + SchemaSQL: strings.Join(sqlSchema, ";"), VSchema: vSchema, } @@ -128,19 +131,25 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } -func checkTableRows(t *testing.T, tableName string, expect int64) { +func getTableRows(t *testing.T, tableName string) int64 { require.NotEmpty(t, tableName) query := `select count(*) as c from %a` parsed := sqlparser.BuildParsedQuery(query, tableName) rs, err := primaryTablet.VttabletProcess.QueryTablet(parsed.Query, keyspaceName, true) require.NoError(t, err) count := rs.Named().Row().AsInt64("c", 0) + return count +} + +func checkTableRows(t *testing.T, tableName string, expect int64) { + count := getTableRows(t, tableName) assert.Equal(t, expect, count) } func populateTable(t *testing.T) { - _, err := primaryTablet.VttabletProcess.QueryTablet(sqlSchema, keyspaceName, true) + err := primaryTablet.VttabletProcess.QueryTabletMultiple(sqlSchema, keyspaceName, true) require.NoError(t, err) + _, err = primaryTablet.VttabletProcess.QueryTablet("delete from t1", keyspaceName, true) require.NoError(t, err) _, err = primaryTablet.VttabletProcess.QueryTablet("insert into t1 (id, value) values (null, md5(rand()))", keyspaceName, true) @@ -158,10 +167,17 @@ func populateTable(t *testing.T) { } // tableExists sees that a given table exists in MySQL -func tableExists(tableExpr string) (exists bool, tableName string, err error) { - query := `select table_name as table_name from information_schema.tables where table_schema=database() and table_name like '%a'` - parsed := sqlparser.BuildParsedQuery(query, tableExpr) - rs, err := primaryTablet.VttabletProcess.QueryTablet(parsed.Query, keyspaceName, true) +func tableExists(exprs ...string) (exists bool, tableName string, err error) { + if len(exprs) == 0 { + return false, "", fmt.Errorf("empty table list") + } + var clauses []string + for _, expr := range exprs { + clauses = append(clauses, fmt.Sprintf("table_name like '%s'", expr)) + } + clause := strings.Join(clauses, " or ") + query := fmt.Sprintf(`select table_name as table_name from information_schema.tables where table_schema=database() and (%s)`, clause) + rs, err := primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) if err != nil { return false, "", err } @@ -176,19 +192,18 @@ func validateTableDoesNotExist(t *testing.T, tableExpr string) { defer cancel() ticker := time.NewTicker(time.Second) - var foundTableName string - var exists bool - var err error + defer ticker.Stop() + for { + exists, foundTableName, err := tableExists(tableExpr) + require.NoError(t, err) + if !exists { + return + } select { case <-ticker.C: - exists, foundTableName, err = tableExists(tableExpr) - require.NoError(t, err) - if !exists { - return - } case <-ctx.Done(): - assert.NoError(t, ctx.Err(), "validateTableDoesNotExist timed out, table %v still exists (%v)", tableExpr, foundTableName) + assert.Failf(t, "validateTableDoesNotExist timed out, table %v still exists (%v)", tableExpr, foundTableName) return } } @@ -199,59 +214,84 @@ func validateTableExists(t *testing.T, tableExpr string) { defer cancel() ticker := time.NewTicker(time.Second) - var exists bool - var err error + defer ticker.Stop() + for { + exists, _, err := tableExists(tableExpr) + require.NoError(t, err) + if exists { + return + } select { case <-ticker.C: - exists, _, err = tableExists(tableExpr) - require.NoError(t, err) - if exists { - return - } case <-ctx.Done(): - assert.NoError(t, ctx.Err(), "validateTableExists timed out, table %v still does not exist", tableExpr) + assert.Failf(t, "validateTableExists timed out, table %v still does not exist", tableExpr) return } } } func validateAnyState(t *testing.T, expectNumRows int64, states ...schema.TableGCState) { - for _, state := range states { - expectTableToExist := true - searchExpr := "" - switch state { - case schema.HoldTableGCState: - searchExpr = `\_vt\_HOLD\_%` - case schema.PurgeTableGCState: - searchExpr = `\_vt\_PURGE\_%` - case schema.EvacTableGCState: - searchExpr = `\_vt\_EVAC\_%` - case schema.DropTableGCState: - searchExpr = `\_vt\_DROP\_%` - case schema.TableDroppedGCState: - searchExpr = `\_vt\_%` - expectTableToExist = false - default: - t.Log("Unknown state") - t.Fail() - } - exists, tableName, err := tableExists(searchExpr) - require.NoError(t, err) - - if exists { - if expectNumRows >= 0 { - checkTableRows(t, tableName, expectNumRows) + t.Run(fmt.Sprintf("validateAnyState: expectNumRows=%v, states=%v", expectNumRows, states), func(t *testing.T) { + timeout := gc.NextChecksIntervals[len(gc.NextChecksIntervals)-1] + 5*time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + for { + // Attempt validation: + for _, state := range states { + expectTableToExist := true + searchExpr := "" + searchExpr2 := "" + switch state { + case schema.HoldTableGCState: + searchExpr = `\_vt\_HOLD\_%` + searchExpr2 = `\_vt\_hld\_%` + case schema.PurgeTableGCState: + searchExpr = `\_vt\_PURGE\_%` + searchExpr2 = `\_vt\_prg\_%` + case schema.EvacTableGCState: + searchExpr = `\_vt\_EVAC\_%` + searchExpr2 = `\_vt\_evc\_%` + case schema.DropTableGCState: + searchExpr = `\_vt\_DROP\_%` + searchExpr2 = `\_vt\_drp\_%` + case schema.TableDroppedGCState: + searchExpr = `\_vt\_%` + searchExpr2 = `\_vt\_%` + expectTableToExist = false + default: + require.Failf(t, "unknown state", "%v", state) + } + exists, tableName, err := tableExists(searchExpr, searchExpr2) + require.NoError(t, err) + + var foundRows int64 + if exists { + foundRows = getTableRows(t, tableName) + // Now that the table is validated, we can drop it (test cleanup) + dropTable(t, tableName) + } + t.Logf("=== exists: %v, tableName: %v, rows: %v", exists, tableName, foundRows) + if exists == expectTableToExist { + // expectNumRows < 0 means "don't care" + if expectNumRows < 0 || (expectNumRows == foundRows) { + // All conditions are met + return + } + } + } + select { + case <-ticker.C: + case <-ctx.Done(): + assert.Failf(t, "timeout in validateAnyState", " waiting for any of these states: %v, expecting rows: %v", states, expectNumRows) + return } - // Now that the table is validated, we can drop it - dropTable(t, tableName) - } - if exists == expectTableToExist { - // condition met - return } - } - assert.Failf(t, "could not match any of the states", "states=%v", states) + }) } // dropTable drops a table @@ -266,10 +306,10 @@ func TestCapability(t *testing.T) { mysqlVersion := onlineddl.GetMySQLVersion(t, clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()) require.NotEmpty(t, mysqlVersion) - _, capableOf, _ := mysql.GetFlavor(mysqlVersion, nil) + capableOf := mysql.ServerVersionCapableOf(mysqlVersion) require.NotNil(t, capableOf) var err error - fastDropTable, err = capableOf(mysql.FastDropTableFlavorCapability) + fastDropTable, err = capableOf(capabilities.FastDropTableFlavorCapability) require.NoError(t, err) } @@ -279,108 +319,137 @@ func TestPopulateTable(t *testing.T) { validateTableDoesNotExist(t, "no_such_table") } +func generateRenameStatement(newFormat bool, fromTableName string, state schema.TableGCState, tm time.Time) (statement string, toTableName string, err error) { + if newFormat { + return schema.GenerateRenameStatement(fromTableName, state, tm) + } + return schema.GenerateRenameStatementOldFormat(fromTableName, state, tm) +} + func TestHold(t *testing.T) { - populateTable(t) - query, tableName, err := schema.GenerateRenameStatement("t1", schema.HoldTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) - assert.NoError(t, err) + for _, newNameFormat := range []bool{false, true} { + t.Run(fmt.Sprintf("new format=%t", newNameFormat), func(t *testing.T) { + populateTable(t) + query, tableName, err := generateRenameStatement(newNameFormat, "t1", schema.HoldTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) + assert.NoError(t, err) - _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) - assert.NoError(t, err) + _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + assert.NoError(t, err) - validateTableDoesNotExist(t, "t1") - validateTableExists(t, tableName) + validateTableDoesNotExist(t, "t1") + validateTableExists(t, tableName) - time.Sleep(tableTransitionExpiration / 2) - { - // Table was created with +10s timestamp, so it should still exist - validateTableExists(t, tableName) + time.Sleep(tableTransitionExpiration / 2) + { + // Table was created with +10s timestamp, so it should still exist + validateTableExists(t, tableName) - checkTableRows(t, tableName, 1024) - } + checkTableRows(t, tableName, 1024) + } - time.Sleep(tableTransitionExpiration) - // We're now both beyond table's timestamp as well as a tableGC interval - validateTableDoesNotExist(t, tableName) - if fastDropTable { - validateAnyState(t, -1, schema.DropTableGCState, schema.TableDroppedGCState) - } else { - validateAnyState(t, -1, schema.PurgeTableGCState, schema.EvacTableGCState, schema.DropTableGCState, schema.TableDroppedGCState) + time.Sleep(tableTransitionExpiration) + // We're now both beyond table's timestamp as well as a tableGC interval + validateTableDoesNotExist(t, tableName) + if fastDropTable { + validateAnyState(t, -1, schema.DropTableGCState, schema.TableDroppedGCState) + } else { + validateAnyState(t, -1, schema.PurgeTableGCState, schema.EvacTableGCState, schema.DropTableGCState, schema.TableDroppedGCState) + } + }) } } func TestEvac(t *testing.T) { - populateTable(t) - query, tableName, err := schema.GenerateRenameStatement("t1", schema.EvacTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) - assert.NoError(t, err) - - _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) - assert.NoError(t, err) - - validateTableDoesNotExist(t, "t1") - - time.Sleep(tableTransitionExpiration / 2) - { - // Table was created with +10s timestamp, so it should still exist - if fastDropTable { - // EVAC state is skipped in mysql 8.0.23 and beyond - validateTableDoesNotExist(t, tableName) - } else { - validateTableExists(t, tableName) - checkTableRows(t, tableName, 1024) - } + for _, newNameFormat := range []bool{false, true} { + t.Run(fmt.Sprintf("new format=%t", newNameFormat), func(t *testing.T) { + var tableName string + t.Run("setting up EVAC table", func(t *testing.T) { + populateTable(t) + var query string + var err error + query, tableName, err = generateRenameStatement(newNameFormat, "t1", schema.EvacTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) + assert.NoError(t, err) + + _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + assert.NoError(t, err) + + validateTableDoesNotExist(t, "t1") + }) + + t.Run("validating before expiration", func(t *testing.T) { + time.Sleep(tableTransitionExpiration / 2) + // Table was created with +10s timestamp, so it should still exist + if fastDropTable { + // EVAC state is skipped in mysql 8.0.23 and beyond + validateTableDoesNotExist(t, tableName) + } else { + validateTableExists(t, tableName) + checkTableRows(t, tableName, 1024) + } + }) + + t.Run("validating rows evacuated", func(t *testing.T) { + // We're now both beyond table's timestamp as well as a tableGC interval + validateTableDoesNotExist(t, tableName) + // Table should be renamed as _vt_DROP_... and then dropped! + validateAnyState(t, 0, schema.DropTableGCState, schema.TableDroppedGCState) + }) + }) } - - time.Sleep(tableTransitionExpiration) - // We're now both beyond table's timestamp as well as a tableGC interval - validateTableDoesNotExist(t, tableName) - // Table should be renamed as _vt_DROP_... and then dropped! - validateAnyState(t, 0, schema.DropTableGCState, schema.TableDroppedGCState) } func TestDrop(t *testing.T) { - populateTable(t) - query, tableName, err := schema.GenerateRenameStatement("t1", schema.DropTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) - assert.NoError(t, err) + for _, newNameFormat := range []bool{false, true} { + t.Run(fmt.Sprintf("new format=%t", newNameFormat), func(t *testing.T) { + populateTable(t) + query, tableName, err := generateRenameStatement(newNameFormat, "t1", schema.DropTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) + assert.NoError(t, err) - _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) - assert.NoError(t, err) + _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + assert.NoError(t, err) - validateTableDoesNotExist(t, "t1") + validateTableDoesNotExist(t, "t1") - time.Sleep(tableTransitionExpiration) - time.Sleep(2 * gcCheckInterval) - // We're now both beyond table's timestamp as well as a tableGC interval - validateTableDoesNotExist(t, tableName) + time.Sleep(tableTransitionExpiration) + time.Sleep(2 * gcCheckInterval) + // We're now both beyond table's timestamp as well as a tableGC interval + validateTableDoesNotExist(t, tableName) + }) + } } func TestPurge(t *testing.T) { - populateTable(t) - query, tableName, err := schema.GenerateRenameStatement("t1", schema.PurgeTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) - require.NoError(t, err) + for _, newNameFormat := range []bool{false, true} { + t.Run(fmt.Sprintf("new format=%t", newNameFormat), func(t *testing.T) { + populateTable(t) + query, tableName, err := generateRenameStatement(newNameFormat, "t1", schema.PurgeTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) + require.NoError(t, err) - _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) - require.NoError(t, err) + _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) - validateTableDoesNotExist(t, "t1") - if !fastDropTable { - validateTableExists(t, tableName) - checkTableRows(t, tableName, 1024) - } - if !fastDropTable { - time.Sleep(5 * gcPurgeCheckInterval) // wait for table to be purged - } - validateTableDoesNotExist(t, tableName) // whether purged or not, table should at some point transition to next state - if fastDropTable { - // if MySQL supports fast DROP TABLE, TableGC completely skips the PURGE state. Rows are not purged. - validateAnyState(t, 1024, schema.DropTableGCState, schema.TableDroppedGCState) - } else { - validateAnyState(t, 0, schema.EvacTableGCState, schema.DropTableGCState, schema.TableDroppedGCState) + validateTableDoesNotExist(t, "t1") + if !fastDropTable { + validateTableExists(t, tableName) + checkTableRows(t, tableName, 1024) + } + if !fastDropTable { + time.Sleep(5 * gcPurgeCheckInterval) // wait for table to be purged + } + validateTableDoesNotExist(t, tableName) // whether purged or not, table should at some point transition to next state + if fastDropTable { + // if MySQL supports fast DROP TABLE, TableGC completely skips the PURGE state. Rows are not purged. + validateAnyState(t, 1024, schema.DropTableGCState, schema.TableDroppedGCState) + } else { + validateAnyState(t, 0, schema.EvacTableGCState, schema.DropTableGCState, schema.TableDroppedGCState) + } + }) } } func TestPurgeView(t *testing.T) { populateTable(t) - query, tableName, err := schema.GenerateRenameStatement("v1", schema.PurgeTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) + query, tableName, err := generateRenameStatement(true, "v1", schema.PurgeTableGCState, time.Now().UTC().Add(tableTransitionExpiration)) require.NoError(t, err) _, err = primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) diff --git a/go/test/endtoend/tabletmanager/tablet_health_test.go b/go/test/endtoend/tabletmanager/tablet_health_test.go index 7dc4bcd97d2..bf3747fde29 100644 --- a/go/test/endtoend/tabletmanager/tablet_health_test.go +++ b/go/test/endtoend/tabletmanager/tablet_health_test.go @@ -30,7 +30,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" @@ -85,7 +84,7 @@ func TestTabletReshuffle(t *testing.T) { require.NoError(t, err) assertExcludeFields(t, string(result)) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Backup", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("Backup", rTablet.Alias) assert.Error(t, err, "cannot perform backup without my.cnf") killTablets(rTablet) @@ -114,18 +113,18 @@ func TestHealthCheck(t *testing.T) { require.NoError(t, err) defer conn.Close() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) checkHealth(t, rTablet.HTTPPort, false) // Make sure the primary is still primary checkTabletType(t, primaryTablet.Alias, "PRIMARY") - utils.Exec(t, conn, "stop slave") + utils.Exec(t, conn, "stop replica") // stop replication, make sure we don't go unhealthy. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) // make sure the health stream is updated @@ -136,9 +135,9 @@ func TestHealthCheck(t *testing.T) { } // then restart replication, make sure we stay healthy - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rTablet.Alias) require.NoError(t, err) checkHealth(t, rTablet.HTTPPort, false) @@ -173,16 +172,16 @@ func TestHealthCheck(t *testing.T) { // On a MySQL restart, it comes up as a read-only tablet (check default.cnf file). // We have to explicitly set it to read-write otherwise heartbeat writer is unable // to write the heartbeats - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", primaryTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", primaryTablet.Alias, "true") require.NoError(t, err) // explicitly start replication on all of the replicas to avoid any test flakiness as they were all // replicating from the primary instance - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) require.NoError(t, err) time.Sleep(tabletHealthcheckRefreshInterval) @@ -348,11 +347,7 @@ func checkHealth(t *testing.T, port int, shouldError bool) { } func checkTabletType(t *testing.T, tabletAlias string, typeWant string) { - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tabletAlias) - require.NoError(t, err) - - var tablet topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tablet) + tablet, err := clusterInstance.VtctldClientProcess.GetTablet(tabletAlias) require.NoError(t, err) actualType := tablet.GetType() @@ -398,16 +393,16 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { // Change from rdonly to drained and stop replication. The tablet will stay // healthy, and the query service is still running. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") require.NoError(t, err) // Trying to drain the same tablet again, should error - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "drained") assert.Error(t, err, "already drained") - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonlyTablet.Alias) require.NoError(t, err) // Trigger healthcheck explicitly to avoid waiting for the next interval. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) require.NoError(t, err) checkTabletType(t, rdonlyTablet.Alias, "DRAINED") @@ -417,11 +412,11 @@ func TestHealthCheckDrainedStateDoesNotShutdownQueryService(t *testing.T) { require.NoError(t, err) // Restart replication. Tablet will become healthy again. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "rdonly") require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", rdonlyTablet.Alias) require.NoError(t, err) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", rdonlyTablet.Alias) require.NoError(t, err) checkHealth(t, rdonlyTablet.HTTPPort, false) } @@ -434,7 +429,7 @@ func killTablets(tablets ...*cluster.Vttablet) { defer wg.Done() _ = tablet.VttabletProcess.TearDown() _ = tablet.MysqlctlProcess.Stop() - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) }(tablet) } wg.Wait() diff --git a/go/test/endtoend/tabletmanager/tablet_test.go b/go/test/endtoend/tabletmanager/tablet_test.go index 4fe5a70d125..398610d82de 100644 --- a/go/test/endtoend/tabletmanager/tablet_test.go +++ b/go/test/endtoend/tabletmanager/tablet_test.go @@ -47,7 +47,7 @@ func TestEnsureDB(t *testing.T) { require.NoError(t, err) // Make it the primary. - err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) require.EqualError(t, err, "exit status 1") // It is still NOT_SERVING because the db is read-only. @@ -56,8 +56,8 @@ func TestEnsureDB(t *testing.T) { assert.Contains(t, status, "read-only") // Switch to read-write and verify that we go serving. - // Note: for TabletExternallyReparented, we expect SetReadWrite to be called by the user - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias) + // Note: for TabletExternallyReparented, we expect SetWritable to be called by the user + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", tablet.Alias, "true") require.NoError(t, err) err = tablet.VttabletProcess.WaitForTabletStatus("SERVING") require.NoError(t, err) @@ -82,11 +82,11 @@ func TestResetReplicationParameters(t *testing.T) { require.NoError(t, err) // Set a replication source on the tablet and start replication - _, err = tablet.VttabletProcess.QueryTablet("stop slave;change master to master_host = 'localhost', master_port = 123;start slave;", keyspaceName, false) + err = tablet.VttabletProcess.QueryTabletMultiple([]string{"stop replica", "change replication source to source_host = 'localhost', source_port = 123", "start replica"}, keyspaceName, false) require.NoError(t, err) // Check the replica status. - res, err := tablet.VttabletProcess.QueryTablet("show slave status", keyspaceName, false) + res, err := tablet.VttabletProcess.QueryTablet("show replica status", keyspaceName, false) require.NoError(t, err) // This is expected to return 1 row result require.Len(t, res.Rows, 1) @@ -96,7 +96,7 @@ func TestResetReplicationParameters(t *testing.T) { require.NoError(t, err) // Recheck the replica status and this time is should be empty - res, err = tablet.VttabletProcess.QueryTablet("show slave status", keyspaceName, false) + res, err = tablet.VttabletProcess.QueryTablet("show replica status", keyspaceName, false) require.NoError(t, err) require.Len(t, res.Rows, 0) } diff --git a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go index 7c0f05bdcc2..df63d5a84a1 100644 --- a/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go +++ b/go/test/endtoend/tabletmanager/throttler_topo/throttler_test.go @@ -89,6 +89,7 @@ var ( throttledAppsAPIPath = "throttler/throttled-apps" checkAPIPath = "throttler/check" checkSelfAPIPath = "throttler/check-self" + statusAPIPath = "throttler/status" getResponseBody = func(resp *http.Response) string { body, _ := io.ReadAll(resp.Body) return string(body) @@ -180,6 +181,16 @@ func throttleCheckSelf(tablet *cluster.Vttablet) (*http.Response, error) { return httpClient.Get(fmt.Sprintf("http://localhost:%d/%s?app=%s", tablet.HTTPPort, checkSelfAPIPath, testAppName)) } +func throttleStatus(t *testing.T, tablet *cluster.Vttablet) string { + resp, err := httpClient.Get(fmt.Sprintf("http://localhost:%d/%s", tablet.HTTPPort, statusAPIPath)) + require.NoError(t, err) + defer resp.Body.Close() + + b, err := io.ReadAll(resp.Body) + require.NoError(t, err) + return string(b) +} + func warmUpHeartbeat(t *testing.T) (respStatus int) { // because we run with -heartbeat_on_demand_duration=5s, the heartbeat is "cold" right now. // Let's warm it up. @@ -314,17 +325,32 @@ func TestInitialThrottler(t *testing.T) { }) t.Run("validating OK response from throttler with low threshold, heartbeats running", func(t *testing.T) { time.Sleep(1 * time.Second) + cluster.ValidateReplicationIsHealthy(t, replicaTablet) resp, err := throttleCheck(primaryTablet, false) require.NoError(t, err) defer resp.Body.Close() - assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) + if !assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) { + rs, err := replicaTablet.VttabletProcess.QueryTablet("show replica status", keyspaceName, false) + assert.NoError(t, err) + t.Logf("Seconds_Behind_Source: %s", rs.Named().Row()["Seconds_Behind_Source"].ToString()) + t.Logf("throttler primary status: %+v", throttleStatus(t, primaryTablet)) + t.Logf("throttler replica status: %+v", throttleStatus(t, replicaTablet)) + } }) + t.Run("validating OK response from throttler with low threshold, heartbeats running still", func(t *testing.T) { time.Sleep(1 * time.Second) + cluster.ValidateReplicationIsHealthy(t, replicaTablet) resp, err := throttleCheck(primaryTablet, false) require.NoError(t, err) defer resp.Body.Close() - assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) + if !assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) { + rs, err := replicaTablet.VttabletProcess.QueryTablet("show replica status", keyspaceName, false) + assert.NoError(t, err) + t.Logf("Seconds_Behind_Source: %s", rs.Named().Row()["Seconds_Behind_Source"].ToString()) + t.Logf("throttler primary status: %+v", throttleStatus(t, primaryTablet)) + t.Logf("throttler replica status: %+v", throttleStatus(t, replicaTablet)) + } }) t.Run("validating pushback response from throttler on low threshold once heartbeats go stale", func(t *testing.T) { time.Sleep(2 * onDemandHeartbeatDuration) // just... really wait long enough, make sure on-demand stops @@ -370,12 +396,18 @@ func TestLag(t *testing.T) { defer clusterInstance.EnableVTOrcRecoveries(t) t.Run("stopping replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replicaTablet.Alias) assert.NoError(t, err) }) t.Run("accumulating lag, expecting throttler push back", func(t *testing.T) { time.Sleep(2 * throttler.DefaultThreshold) + }) + t.Run("requesting heartbeats while replication stopped", func(t *testing.T) { + // By now on-demand heartbeats have stopped. + _ = warmUpHeartbeat(t) + }) + t.Run("expecting throttler push back", func(t *testing.T) { resp, err := throttleCheck(primaryTablet, false) require.NoError(t, err) defer resp.Body.Close() @@ -386,7 +418,10 @@ func TestLag(t *testing.T) { require.NoError(t, err) defer resp.Body.Close() // self (on primary) is unaffected by replication lag - assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) + if !assert.Equalf(t, http.StatusOK, resp.StatusCode, "Unexpected response from throttler: %s", getResponseBody(resp)) { + t.Logf("throttler primary status: %+v", throttleStatus(t, primaryTablet)) + t.Logf("throttler replica status: %+v", throttleStatus(t, replicaTablet)) + } }) t.Run("replica self-check should show error", func(t *testing.T) { resp, err := throttleCheckSelf(replicaTablet) @@ -415,7 +450,7 @@ func TestLag(t *testing.T) { }) t.Run("starting replication", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replicaTablet.Alias) assert.NoError(t, err) }) t.Run("expecting replication to catch up and throttler check to return OK", func(t *testing.T) { @@ -439,7 +474,7 @@ func TestLag(t *testing.T) { func TestNoReplicas(t *testing.T) { defer cluster.PanicHandler(t) t.Run("changing replica to RDONLY", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "RDONLY") assert.NoError(t, err) // This makes no REPLICA servers available. We expect something like: @@ -447,7 +482,7 @@ func TestNoReplicas(t *testing.T) { waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) }) t.Run("restoring to REPLICA", func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") + err := clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "REPLICA") assert.NoError(t, err) waitForThrottleCheckStatus(t, primaryTablet, http.StatusOK) diff --git a/go/test/endtoend/throttler/util.go b/go/test/endtoend/throttler/util.go index 40cfdb53118..602f8622a3b 100644 --- a/go/test/endtoend/throttler/util.go +++ b/go/test/endtoend/throttler/util.go @@ -34,9 +34,10 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) type Config struct { diff --git a/go/test/endtoend/topoconncache/main_test.go b/go/test/endtoend/topoconncache/main_test.go index 7cfea8839b0..26eb3918a0b 100644 --- a/go/test/endtoend/topoconncache/main_test.go +++ b/go/test/endtoend/topoconncache/main_test.go @@ -48,8 +48,6 @@ var ( ) Engine=InnoDB ` commonTabletArg = []string{ - "--vreplication_healthcheck_topology_refresh", "1s", - "--vreplication_healthcheck_retry_delay", "1s", "--vreplication_retry_delay", "1s", "--degraded_threshold", "5s", "--lock_tables_timeout", "5s", @@ -195,14 +193,14 @@ func TestMain(m *testing.M) { return 1, err } } - if err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { + if err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard1.Name, shard1Primary.Cell, shard1Primary.TabletUID); err != nil { return 1, err } // run a health check on source replica so it responds to discovery // (for binlog players) and on the source rdonlys (for workers) for _, tablet := range []string{shard1Replica.Alias, shard1Rdonly.Alias} { - if err := clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { + if err := clusterInstance.VtctldClientProcess.ExecuteCommand("RunHealthCheck", tablet); err != nil { return 1, err } } @@ -213,7 +211,7 @@ func TestMain(m *testing.M) { } } - if err := clusterInstance.VtctlclientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { + if err := clusterInstance.VtctldClientProcess.InitializeShard(keyspaceName, shard2.Name, shard2Primary.Cell, shard2Primary.TabletUID); err != nil { return 1, err } @@ -221,14 +219,14 @@ func TestMain(m *testing.M) { return 1, err } - if err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { + if err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, fmt.Sprintf(sqlSchema, tableName)); err != nil { return 1, err } - if err := clusterInstance.VtctlclientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { + if err := clusterInstance.VtctldClientProcess.ApplyVSchema(keyspaceName, fmt.Sprintf(vSchema, tableName)); err != nil { return 1, err } - _ = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) + _ = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildKeyspaceGraph", keyspaceName) return m.Run(), nil }() diff --git a/go/test/endtoend/topoconncache/topo_conn_cache_test.go b/go/test/endtoend/topoconncache/topo_conn_cache_test.go index 504ca218047..082ecc5717f 100644 --- a/go/test/endtoend/topoconncache/topo_conn_cache_test.go +++ b/go/test/endtoend/topoconncache/topo_conn_cache_test.go @@ -51,7 +51,7 @@ func TestVtctldListAllTablets(t *testing.T) { func testListAllTablets(t *testing.T) { // first w/o any filters, aside from cell - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ListAllTablets") + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetTablets") require.NoError(t, err) tablets := getAllTablets() @@ -74,7 +74,7 @@ func deleteCell(t *testing.T) { deleteTablet(t, shard2Rdonly) // Delete cell2 info from topo - res, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("DeleteCellInfo", "--", "--force", cell2) + res, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("DeleteCellInfo", "--force", cell2) t.Log(res) require.NoError(t, err) @@ -84,7 +84,7 @@ func deleteCell(t *testing.T) { clusterInstance.Keyspaces[0].Shards = []cluster.Shard{shard1, shard2} // Now list all tablets - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ListAllTablets") + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetTablets") require.NoError(t, err) tablets := getAllTablets() @@ -111,7 +111,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) } @@ -184,7 +184,7 @@ func addCellback(t *testing.T) { shard2.Vttablets = append(shard2.Vttablets, shard2Replica) shard2.Vttablets = append(shard2.Vttablets, shard1Rdonly) - result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ListAllTablets") + result, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetTablets") require.NoError(t, err) tablets := getAllTablets() diff --git a/go/test/endtoend/utils/cmp.go b/go/test/endtoend/utils/cmp.go index 38726d6c3aa..32c90a27a5b 100644 --- a/go/test/endtoend/utils/cmp.go +++ b/go/test/endtoend/utils/cmp.go @@ -29,12 +29,17 @@ import ( "vitess.io/vitess/go/sqltypes" ) +type TestingT interface { + require.TestingT + Helper() +} + type MySQLCompare struct { - t *testing.T + t TestingT MySQLConn, VtConn *mysql.Conn } -func NewMySQLCompare(t *testing.T, vtParams, mysqlParams mysql.ConnParams) (MySQLCompare, error) { +func NewMySQLCompare(t TestingT, vtParams, mysqlParams mysql.ConnParams) (MySQLCompare, error) { ctx := context.Background() vtConn, err := mysql.Connect(ctx, &vtParams) if err != nil { @@ -53,6 +58,10 @@ func NewMySQLCompare(t *testing.T, vtParams, mysqlParams mysql.ConnParams) (MySQ }, nil } +func (mcmp *MySQLCompare) AsT() *testing.T { + return mcmp.t.(*testing.T) +} + func (mcmp *MySQLCompare) Close() { mcmp.VtConn.Close() mcmp.MySQLConn.Close() @@ -70,6 +79,12 @@ func (mcmp *MySQLCompare) AssertMatches(query, expected string) { } } +// SkipIfBinaryIsBelowVersion should be used instead of using utils.SkipIfBinaryIsBelowVersion(t, +// This is because we might be inside a Run block that has a different `t` variable +func (mcmp *MySQLCompare) SkipIfBinaryIsBelowVersion(majorVersion int, binary string) { + SkipIfBinaryIsBelowVersion(mcmp.t.(*testing.T), majorVersion, binary) +} + // AssertMatchesAny ensures the given query produces any one of the expected results. func (mcmp *MySQLCompare) AssertMatchesAny(query string, expected ...string) { mcmp.t.Helper() @@ -121,7 +136,7 @@ func (mcmp *MySQLCompare) AssertMatchesAnyNoCompare(query string, expected ...st // Both clients need to return an error. The error of Vitess must be matching the given expectation. func (mcmp *MySQLCompare) AssertContainsError(query, expected string) { mcmp.t.Helper() - _, err := mcmp.ExecAllowAndCompareError(query) + _, err := mcmp.ExecAllowAndCompareError(query, CompareOptions{}) require.Error(mcmp.t, err) assert.Contains(mcmp.t, err.Error(), expected, "actual error: %s", err.Error()) } @@ -196,7 +211,7 @@ func (mcmp *MySQLCompare) Exec(query string) *sqltypes.Result { mysqlQr, err := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) require.NoError(mcmp.t, err, "[MySQL Error] for query: "+query) - compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) + compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, CompareOptions{}) return vtQr } @@ -223,7 +238,7 @@ func (mcmp *MySQLCompare) ExecWithColumnCompare(query string) *sqltypes.Result { mysqlQr, err := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) require.NoError(mcmp.t, err, "[MySQL Error] for query: "+query) - compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, true) + compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, CompareOptions{CompareColumnNames: true}) return vtQr } @@ -235,7 +250,7 @@ func (mcmp *MySQLCompare) ExecWithColumnCompare(query string) *sqltypes.Result { // The result set and error produced by Vitess are returned to the caller. // If the Vitess and MySQL error are both nil, but the results do not match, // the mismatched results are instead returned as an error, as well as the Vitess result set -func (mcmp *MySQLCompare) ExecAllowAndCompareError(query string) (*sqltypes.Result, error) { +func (mcmp *MySQLCompare) ExecAllowAndCompareError(query string, opts CompareOptions) (*sqltypes.Result, error) { mcmp.t.Helper() vtQr, vtErr := mcmp.VtConn.ExecuteFetch(query, 1000, true) mysqlQr, mysqlErr := mcmp.MySQLConn.ExecuteFetch(query, 1000, true) @@ -244,7 +259,7 @@ func (mcmp *MySQLCompare) ExecAllowAndCompareError(query string) (*sqltypes.Resu // Since we allow errors, we don't want to compare results if one of the client failed. // Vitess and MySQL should always be agreeing whether the query returns an error or not. if vtErr == nil && mysqlErr == nil { - vtErr = compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) + vtErr = compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, opts) } return vtQr, vtErr } @@ -257,8 +272,8 @@ func (mcmp *MySQLCompare) ExecAndIgnore(query string) (*sqltypes.Result, error) return mcmp.VtConn.ExecuteFetch(query, 1000, true) } -func (mcmp *MySQLCompare) Run(query string, f func(mcmp *MySQLCompare)) { - mcmp.t.Run(query, func(t *testing.T) { +func (mcmp *MySQLCompare) Run(name string, f func(mcmp *MySQLCompare)) { + mcmp.AsT().Run(name, func(t *testing.T) { inner := &MySQLCompare{ t: t, MySQLConn: mcmp.MySQLConn, @@ -282,7 +297,7 @@ func (mcmp *MySQLCompare) ExecAllowError(query string) (*sqltypes.Result, error) // Since we allow errors, we don't want to compare results if one of the client failed. // Vitess and MySQL should always be agreeing whether the query returns an error or not. if mysqlErr == nil { - vtErr = compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, false) + vtErr = compareVitessAndMySQLResults(mcmp.t, query, mcmp.VtConn, vtQr, mysqlQr, CompareOptions{}) } return vtQr, vtErr } diff --git a/go/test/endtoend/utils/mysql.go b/go/test/endtoend/utils/mysql.go index de8ce40f992..53b50195036 100644 --- a/go/test/endtoend/utils/mysql.go +++ b/go/test/endtoend/utils/mysql.go @@ -22,26 +22,41 @@ import ( "fmt" "os" "path" - "testing" + "regexp" + "strconv" + "time" "github.com/stretchr/testify/assert" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/dbconfigs" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" ) +const mysqlShutdownTimeout = 1 * time.Minute + // NewMySQL creates a new MySQL server using the local mysqld binary. The name of the database // will be set to `dbName`. SQL queries that need to be executed on the new MySQL instance // can be passed through the `schemaSQL` argument. // The mysql.ConnParams to connect to the new database is returned, along with a function to // teardown the database. func NewMySQL(cluster *cluster.LocalProcessCluster, dbName string, schemaSQL ...string) (mysql.ConnParams, func(), error) { - mysqlParam, _, closer, error := NewMySQLWithMysqld(cluster.GetAndReservePort(), cluster.Hostname, dbName, schemaSQL...) + // Even though we receive schemaSQL as a variadic argument, we ensure to further split it into singular statements. + parser := sqlparser.NewTestParser() + var sqls []string + for _, sql := range schemaSQL { + split, err := parser.SplitStatementToPieces(sql) + if err != nil { + return mysql.ConnParams{}, nil, err + } + sqls = append(sqls, split...) + } + mysqlParam, _, _, closer, error := NewMySQLWithMysqld(cluster.GetAndReservePort(), cluster.Hostname, dbName, sqls...) return mysqlParam, closer, error } @@ -58,28 +73,28 @@ func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int) ( var cfg dbconfigs.DBConfigs // ensure the DBA username is 'root' instead of the system's default username so that mysqladmin can shutdown cfg.Dba.User = "root" - cfg.InitWithSocket(mycnf.SocketFile) + cfg.InitWithSocket(mycnf.SocketFile, collations.MySQL8()) return mysqlctl.NewMysqld(&cfg), mycnf, nil } -func NewMySQLWithMysqld(port int, hostname, dbName string, schemaSQL ...string) (mysql.ConnParams, *mysqlctl.Mysqld, func(), error) { +func NewMySQLWithMysqld(port int, hostname, dbName string, schemaSQL ...string) (mysql.ConnParams, *mysqlctl.Mysqld, *mysqlctl.Mycnf, func(), error) { mysqlDir, err := createMySQLDir() if err != nil { - return mysql.ConnParams{}, nil, nil, err + return mysql.ConnParams{}, nil, nil, nil, err } initMySQLFile, err := createInitSQLFile(mysqlDir, dbName) if err != nil { - return mysql.ConnParams{}, nil, nil, err + return mysql.ConnParams{}, nil, nil, nil, err } mysqlPort := port mysqld, mycnf, err := CreateMysqldAndMycnf(0, "", mysqlPort) if err != nil { - return mysql.ConnParams{}, nil, nil, err + return mysql.ConnParams{}, nil, nil, nil, err } err = initMysqld(mysqld, mycnf, initMySQLFile) if err != nil { - return mysql.ConnParams{}, nil, nil, err + return mysql.ConnParams{}, nil, nil, nil, err } params := mysql.ConnParams{ @@ -91,12 +106,12 @@ func NewMySQLWithMysqld(port int, hostname, dbName string, schemaSQL ...string) for _, sql := range schemaSQL { err = prepareMySQLWithSchema(params, sql) if err != nil { - return mysql.ConnParams{}, nil, nil, err + return mysql.ConnParams{}, nil, nil, nil, err } } - return params, mysqld, func() { + return params, mysqld, mycnf, func() { ctx := context.Background() - _ = mysqld.Teardown(ctx, mycnf, true) + _ = mysqld.Teardown(ctx, mycnf, true, mysqlShutdownTimeout) }, nil } @@ -155,44 +170,52 @@ func prepareMySQLWithSchema(params mysql.ConnParams, sql string) error { return nil } -func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn, vtQr, mysqlQr *sqltypes.Result, compareColumns bool) error { +type CompareOptions struct { + CompareColumnNames bool + IgnoreRowsAffected bool +} + +func compareVitessAndMySQLResults(t TestingT, query string, vtConn *mysql.Conn, vtQr, mysqlQr *sqltypes.Result, opts CompareOptions) error { + t.Helper() + if vtQr == nil && mysqlQr == nil { return nil } if vtQr == nil { - t.Error("Vitess result is 'nil' while MySQL's is not.") + t.Errorf("Vitess result is 'nil' while MySQL's is not.") return errors.New("Vitess result is 'nil' while MySQL's is not.\n") } if mysqlQr == nil { - t.Error("MySQL result is 'nil' while Vitess' is not.") + t.Errorf("MySQL result is 'nil' while Vitess' is not.") return errors.New("MySQL result is 'nil' while Vitess' is not.\n") } - var errStr string - if compareColumns { - vtColCount := len(vtQr.Fields) - myColCount := len(mysqlQr.Fields) - if vtColCount > 0 && myColCount > 0 { - if vtColCount != myColCount { - t.Errorf("column count does not match: %d vs %d", vtColCount, myColCount) - errStr += fmt.Sprintf("column count does not match: %d vs %d\n", vtColCount, myColCount) - } - - var vtCols []string - var myCols []string - for i, vtField := range vtQr.Fields { - vtCols = append(vtCols, vtField.Name) - myCols = append(myCols, mysqlQr.Fields[i].Name) - } - if !assert.Equal(t, myCols, vtCols, "column names do not match - the expected values are what mysql produced") { - errStr += "column names do not match - the expected values are what mysql produced\n" - errStr += fmt.Sprintf("Not equal: \nexpected: %v\nactual: %v\n", myCols, vtCols) - } + vtColCount := len(vtQr.Fields) + myColCount := len(mysqlQr.Fields) + + if vtColCount != myColCount { + t.Errorf("column count does not match: %d vs %d", vtColCount, myColCount) + } + + if vtColCount > 0 { + var vtCols []string + var myCols []string + for i, vtField := range vtQr.Fields { + myField := mysqlQr.Fields[i] + checkFields(t, myField.Name, vtField, myField) + + vtCols = append(vtCols, vtField.Name) + myCols = append(myCols, myField.Name) + } + + if opts.CompareColumnNames && !assert.Equal(t, myCols, vtCols, "column names do not match - the expected values are what mysql produced") { + t.Errorf("column names do not match - the expected values are what mysql produced\nNot equal: \nexpected: %v\nactual: %v\n", myCols, vtCols) } } - stmt, err := sqlparser.Parse(query) + + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { - t.Error(err) + t.Errorf(err.Error()) return err } orderBy := false @@ -200,11 +223,16 @@ func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn orderBy = selStmt.GetOrderBy() != nil } + if opts.IgnoreRowsAffected { + vtQr.RowsAffected = 0 + mysqlQr.RowsAffected = 0 + } + if (orderBy && sqltypes.ResultsEqual([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr})) || sqltypes.ResultsEqualUnordered([]sqltypes.Result{*vtQr}, []sqltypes.Result{*mysqlQr}) { return nil } - errStr += "Query (" + query + ") results mismatched.\nVitess Results:\n" + errStr := "Query (" + query + ") results mismatched.\nVitess Results:\n" for _, row := range vtQr.Rows { errStr += fmt.Sprintf("%s\n", row) } @@ -220,14 +248,59 @@ func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn errStr += fmt.Sprintf("query plan: \n%s\n", qr.Rows[0][0].ToString()) } } - t.Error(errStr) + t.Errorf(errStr) return errors.New(errStr) } -func compareVitessAndMySQLErrors(t *testing.T, vtErr, mysqlErr error) { +// Parse the string representation of a type (i.e. "INT64") into a three elements slice. +// First element of the slice will contain the full expression, second element contains the +// type "INT" and the third element contains the size if there is any "64" or empty if we use +// "TIMESTAMP" for instance. +var checkFieldsRegExpr = regexp.MustCompile(`([a-zA-Z]*)(\d*)`) + +func checkFields(t TestingT, columnName string, vtField, myField *querypb.Field) { + t.Helper() + + fail := func() { + t.Errorf("for column %s field types do not match\nNot equal: \nMySQL: %v\nVitess: %v\n", columnName, myField.Type.String(), vtField.Type.String()) + } + + if vtField.Type != myField.Type { + vtMatches := checkFieldsRegExpr.FindStringSubmatch(vtField.Type.String()) + myMatches := checkFieldsRegExpr.FindStringSubmatch(myField.Type.String()) + + // Here we want to fail if we have totally different types for instance: "INT64" vs "TIMESTAMP" + // We do this by checking the length of the regexp slices and checking the second item of the slices (the real type i.e. "INT") + if len(vtMatches) != 3 || len(vtMatches) != len(myMatches) || vtMatches[1] != myMatches[1] { + fail() + return + } + vtVal, vtErr := strconv.Atoi(vtMatches[2]) + myVal, myErr := strconv.Atoi(myMatches[2]) + if vtErr != nil || myErr != nil { + fail() + return + } + + // Types the same now, however, if the size of the type is smaller on Vitess compared to MySQL + // we need to fail. We can allow superset but not the opposite. + if vtVal < myVal { + fail() + return + } + } + + // starting in Vitess 20, decimal types are properly sized in their field information + if BinaryIsAtLeastAtVersion(20, "vtgate") && vtField.Type == sqltypes.Decimal { + if vtField.Decimals != myField.Decimals { + t.Errorf("for column %s field decimals count do not match\nNot equal: \nMySQL: %v\nVitess: %v\n", columnName, myField.Decimals, vtField.Decimals) + } + } +} + +func compareVitessAndMySQLErrors(t TestingT, vtErr, mysqlErr error) { if vtErr != nil && mysqlErr != nil || vtErr == nil && mysqlErr == nil { return } - out := fmt.Sprintf("Vitess and MySQL are not erroring the same way.\nVitess error: %v\nMySQL error: %v", vtErr, mysqlErr) - t.Error(out) + t.Errorf("Vitess and MySQL are not erroring the same way.\nVitess error: %v\nMySQL error: %v", vtErr, mysqlErr) } diff --git a/go/test/endtoend/utils/mysql_test.go b/go/test/endtoend/utils/mysql_test.go index de9db23dab1..d2b10f1d0a8 100644 --- a/go/test/endtoend/utils/mysql_test.go +++ b/go/test/endtoend/utils/mysql_test.go @@ -19,21 +19,26 @@ package utils import ( "context" "fmt" + "math" "os" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/mysqlctl" + querypb "vitess.io/vitess/go/vt/proto/query" ) var ( clusterInstance *cluster.LocalProcessCluster mysqlParams mysql.ConnParams mysqld *mysqlctl.Mysqld + mycnf *mysqlctl.Mycnf keyspaceName = "ks" cell = "test" schemaSQL = `create table t1( @@ -53,7 +58,7 @@ func TestMain(m *testing.M) { var closer func() var err error - mysqlParams, mysqld, closer, err = NewMySQLWithMysqld(clusterInstance.GetAndReservePort(), clusterInstance.Hostname, keyspaceName, schemaSQL) + mysqlParams, mysqld, mycnf, closer, err = NewMySQLWithMysqld(clusterInstance.GetAndReservePort(), clusterInstance.Hostname, keyspaceName, schemaSQL) if err != nil { fmt.Println(err) return 1 @@ -64,6 +69,47 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } +func TestCheckFields(t *testing.T) { + createField := func(typ querypb.Type) *querypb.Field { + return &querypb.Field{ + Type: typ, + } + } + + cases := []struct { + fail bool + vtField querypb.Type + myField querypb.Type + }{ + { + vtField: querypb.Type_INT32, + myField: querypb.Type_INT32, + }, + { + vtField: querypb.Type_INT64, + myField: querypb.Type_INT32, + }, + { + fail: true, + vtField: querypb.Type_FLOAT32, + myField: querypb.Type_INT32, + }, + { + fail: true, + vtField: querypb.Type_TIMESTAMP, + myField: querypb.Type_TUPLE, + }, + } + + for _, c := range cases { + t.Run(fmt.Sprintf("%s_%s", c.vtField.String(), c.myField.String()), func(t *testing.T) { + tt := &testing.T{} + checkFields(tt, "col", createField(c.vtField), createField(c.myField)) + require.Equal(t, c.fail, tt.Failed()) + }) + } +} + func TestCreateMySQL(t *testing.T) { ctx := context.Background() conn, err := mysql.Connect(ctx, &mysqlParams) @@ -76,50 +122,424 @@ func TestCreateMySQL(t *testing.T) { func TestSetSuperReadOnlyMySQL(t *testing.T) { require.NotNil(t, mysqld) - isSuperReadOnly, _ := mysqld.IsSuperReadOnly() + isSuperReadOnly, _ := mysqld.IsSuperReadOnly(context.Background()) assert.False(t, isSuperReadOnly, "super_read_only should be set to False") - retFunc1, err := mysqld.SetSuperReadOnly(true) + retFunc1, err := mysqld.SetSuperReadOnly(context.Background(), true) assert.NotNil(t, retFunc1, "SetSuperReadOnly is supposed to return a defer function") assert.NoError(t, err, "SetSuperReadOnly should not have failed") - isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + isSuperReadOnly, _ = mysqld.IsSuperReadOnly(context.Background()) assert.True(t, isSuperReadOnly, "super_read_only should be set to True") // if value is already true then retFunc2 will be nil - retFunc2, err := mysqld.SetSuperReadOnly(true) + retFunc2, err := mysqld.SetSuperReadOnly(context.Background(), true) assert.Nil(t, retFunc2, "SetSuperReadOnly is supposed to return a nil function") assert.NoError(t, err, "SetSuperReadOnly should not have failed") retFunc1() - isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + isSuperReadOnly, _ = mysqld.IsSuperReadOnly(context.Background()) assert.False(t, isSuperReadOnly, "super_read_only should be set to False") - isReadOnly, _ := mysqld.IsReadOnly() + isReadOnly, _ := mysqld.IsReadOnly(context.Background()) assert.True(t, isReadOnly, "read_only should be set to True") - isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + isSuperReadOnly, _ = mysqld.IsSuperReadOnly(context.Background()) assert.False(t, isSuperReadOnly, "super_read_only should be set to False") - retFunc1, err = mysqld.SetSuperReadOnly(false) + retFunc1, err = mysqld.SetSuperReadOnly(context.Background(), false) assert.Nil(t, retFunc1, "SetSuperReadOnly is supposed to return a nil function") assert.NoError(t, err, "SetSuperReadOnly should not have failed") - _, err = mysqld.SetSuperReadOnly(true) + _, err = mysqld.SetSuperReadOnly(context.Background(), true) assert.NoError(t, err) - isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + isSuperReadOnly, _ = mysqld.IsSuperReadOnly(context.Background()) assert.True(t, isSuperReadOnly, "super_read_only should be set to True") - retFunc1, err = mysqld.SetSuperReadOnly(false) + retFunc1, err = mysqld.SetSuperReadOnly(context.Background(), false) assert.NotNil(t, retFunc1, "SetSuperReadOnly is supposed to return a defer function") assert.NoError(t, err, "SetSuperReadOnly should not have failed") - isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + isSuperReadOnly, _ = mysqld.IsSuperReadOnly(context.Background()) assert.False(t, isSuperReadOnly, "super_read_only should be set to False") // if value is already false then retFunc2 will be nil - retFunc2, err = mysqld.SetSuperReadOnly(false) + retFunc2, err = mysqld.SetSuperReadOnly(context.Background(), false) assert.Nil(t, retFunc2, "SetSuperReadOnly is supposed to return a nil function") assert.NoError(t, err, "SetSuperReadOnly should not have failed") retFunc1() - isSuperReadOnly, _ = mysqld.IsSuperReadOnly() + isSuperReadOnly, _ = mysqld.IsSuperReadOnly(context.Background()) assert.True(t, isSuperReadOnly, "super_read_only should be set to True") - isReadOnly, _ = mysqld.IsReadOnly() + isReadOnly, _ = mysqld.IsReadOnly(context.Background()) assert.True(t, isReadOnly, "read_only should be set to True") } + +func TestGetMysqlPort(t *testing.T) { + require.NotNil(t, mysqld) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + port, err := mysqld.GetMysqlPort(ctx) + + // Expected port should be one less than the port returned by GetAndReservePort + // As we are calling this second time to get port + want := clusterInstance.GetAndReservePort() - 1 + assert.Equal(t, want, int(port)) + assert.NoError(t, err) +} + +func TestGetServerID(t *testing.T) { + require.NotNil(t, mysqld) + + sid, err := mysqld.GetServerID(context.Background()) + assert.NoError(t, err) + assert.Equal(t, mycnf.ServerID, sid) + + suuid, err := mysqld.GetServerUUID(context.Background()) + assert.NoError(t, err) + assert.NotEmpty(t, suuid) +} + +func TestReplicationStatus(t *testing.T) { + require.NotNil(t, mysqld) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Initially we should expect an error for no replication status + _, err := mysqld.ReplicationStatus(context.Background()) + assert.ErrorContains(t, err, "no replication status") + + conn, err := mysql.Connect(ctx, &mysqlParams) + require.NoError(t, err) + + port, err := mysqld.GetMysqlPort(ctx) + require.NoError(t, err) + host := "localhost" + + q := conn.SetReplicationSourceCommand(&mysqlParams, host, port, 0, int(port)) + res := Exec(t, conn, q) + require.NotNil(t, res) + + r, err := mysqld.ReplicationStatus(ctx) + assert.NoError(t, err) + assert.Equal(t, port, r.SourcePort) + assert.Equal(t, host, r.SourceHost) +} + +func TestPrimaryStatus(t *testing.T) { + require.NotNil(t, mysqld) + + res, err := mysqld.PrimaryStatus(context.Background()) + assert.NoError(t, err) + + r, err := mysqld.ReplicationStatus(context.Background()) + assert.NoError(t, err) + + assert.True(t, res.Position.Equal(r.Position), "primary replication status should be same as replication status here") +} + +func TestReplicationConfiguration(t *testing.T) { + require.NotNil(t, mysqld) + + replConfig, err := mysqld.ReplicationConfiguration(context.Background()) + assert.NoError(t, err) + + require.NotNil(t, replConfig) + // For a properly configured mysql, the heartbeat interval is half of the replication net timeout. + require.EqualValues(t, math.Round(replConfig.HeartbeatInterval*2), replConfig.ReplicaNetTimeout) +} + +func TestGTID(t *testing.T) { + require.NotNil(t, mysqld) + + res, err := mysqld.GetGTIDPurged(context.Background()) + assert.Empty(t, res.String()) + assert.NoError(t, err) + + primaryPosition, err := mysqld.PrimaryPosition(context.Background()) + assert.NotNil(t, primaryPosition) + assert.NoError(t, err) + + // Now we set gtid_purged for testing + conn, err := mysql.Connect(context.Background(), &mysqlParams) + require.NoError(t, err) + + gtid := "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8" + r := Exec(t, conn, fmt.Sprintf("SET GLOBAL gtid_purged='%s'", gtid)) + require.NotNil(t, r) + + res, err = mysqld.GetGTIDPurged(context.Background()) + assert.NoError(t, err) + assert.Equal(t, gtid, res.String()) + + primaryPosition, err = mysqld.PrimaryPosition(context.Background()) + assert.NoError(t, err) + assert.Contains(t, primaryPosition.String(), gtid) +} + +func TestSetReplicationPosition(t *testing.T) { + require.NotNil(t, mysqld) + + pos := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} + sid := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + pos.GTIDSet = pos.GTIDSet.AddGTID(replication.Mysql56GTID{Server: sid, Sequence: 1}) + + err := mysqld.SetReplicationPosition(context.Background(), pos) + assert.NoError(t, err) + + want := "00010203-0405-0607-0809-0a0b0c0d0e0f:1" + res, err := mysqld.GetGTIDPurged(context.Background()) + assert.NoError(t, err) + assert.Contains(t, res.String(), want) +} + +func TestSetAndResetReplication(t *testing.T) { + require.NotNil(t, mysqld) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + port, err := mysqld.GetMysqlPort(ctx) + require.NoError(t, err) + host := "localhost" + + var heartbeatInterval float64 = 5.4 + err = mysqld.SetReplicationSource(context.Background(), host, port, heartbeatInterval, true, true) + assert.NoError(t, err) + + r, err := mysqld.ReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.Equal(t, port, r.SourcePort) + assert.Equal(t, host, r.SourceHost) + + replConfig, err := mysqld.ReplicationConfiguration(context.Background()) + require.NoError(t, err) + assert.EqualValues(t, heartbeatInterval, replConfig.HeartbeatInterval) + + err = mysqld.ResetReplication(context.Background()) + assert.NoError(t, err) + + r, err = mysqld.ReplicationStatus(context.Background()) + assert.ErrorContains(t, err, "no replication status") + assert.Equal(t, "", r.SourceHost) + assert.Equal(t, int32(0), r.SourcePort) + + err = mysqld.SetReplicationSource(context.Background(), host, port, 0, true, true) + assert.NoError(t, err) + + r, err = mysqld.ReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.Equal(t, port, r.SourcePort) + assert.Equal(t, host, r.SourceHost) + + err = mysqld.ResetReplication(context.Background()) + assert.NoError(t, err) + + r, err = mysqld.ReplicationStatus(context.Background()) + assert.ErrorContains(t, err, "no replication status") + assert.Equal(t, "", r.SourceHost) + assert.Equal(t, int32(0), r.SourcePort) +} + +func TestGetBinlogInformation(t *testing.T) { + require.NotNil(t, mysqld) + + // Default values + binlogFormat, logBin, logReplicaUpdates, binlogRowImage, err := mysqld.GetBinlogInformation(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "ROW", binlogFormat) + assert.True(t, logBin) + assert.True(t, logReplicaUpdates) + assert.Equal(t, "FULL", binlogRowImage) + + conn, err := mysql.Connect(context.Background(), &mysqlParams) + require.NoError(t, err) + + res := Exec(t, conn, "SET GLOBAL binlog_format = 'STATEMENT'") + require.NotNil(t, res) + + res = Exec(t, conn, "SET GLOBAL binlog_row_image = 'MINIMAL'") + require.NotNil(t, res) + + binlogFormat, logBin, logReplicaUpdates, binlogRowImage, err = mysqld.GetBinlogInformation(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "STATEMENT", binlogFormat) + assert.True(t, logBin) + assert.True(t, logReplicaUpdates) + assert.Equal(t, "MINIMAL", binlogRowImage) + + // Set to default + res = Exec(t, conn, "SET GLOBAL binlog_format = 'ROW'") + require.NotNil(t, res) + + res = Exec(t, conn, "SET GLOBAL binlog_row_image = 'FULL'") + require.NotNil(t, res) +} + +func TestGetGTIDMode(t *testing.T) { + require.NotNil(t, mysqld) + + // Default value + ctx := context.Background() + res, err := mysqld.GetGTIDMode(ctx) + assert.NoError(t, err) + assert.Equal(t, "ON", res) + + conn, err := mysql.Connect(context.Background(), &mysqlParams) + require.NoError(t, err) + + // Change value for the purpose of testing + r := Exec(t, conn, "SET GLOBAL gtid_mode = 'ON_PERMISSIVE'") + require.NotNil(t, r) + + res, err = mysqld.GetGTIDMode(ctx) + assert.NoError(t, err) + assert.Equal(t, "ON_PERMISSIVE", res) + + // Back to default + r = Exec(t, conn, "SET GLOBAL gtid_mode = 'ON'") + require.NotNil(t, r) +} + +func TestBinaryLogs(t *testing.T) { + require.NotNil(t, mysqld) + + res, err := mysqld.GetBinaryLogs(context.Background()) + assert.NoError(t, err) + oldNumLogs := len(res) + + err = mysqld.FlushBinaryLogs(context.Background()) + assert.NoError(t, err) + + res, err = mysqld.GetBinaryLogs(context.Background()) + assert.NoError(t, err) + newNumLogs := len(res) + assert.Equal(t, 1, newNumLogs-oldNumLogs, "binary logs should have been flushed once") +} + +func TestGetPreviousGTIDs(t *testing.T) { + require.NotNil(t, mysqld) + + res, err := mysqld.GetBinaryLogs(context.Background()) + require.NoError(t, err) + require.NotEmpty(t, res) + + ctx := context.Background() + r, err := mysqld.GetPreviousGTIDs(ctx, res[0]) + assert.NoError(t, err) + assert.Empty(t, r) + + _, err = mysqld.GetPreviousGTIDs(ctx, "invalid_binlog_file") + assert.ErrorContains(t, err, "Could not find target log") +} + +func TestSemiSyncEnabled(t *testing.T) { + require.NotNil(t, mysqld) + + err := mysqld.SetSemiSyncEnabled(context.Background(), true, false) + assert.NoError(t, err) + + p, r := mysqld.SemiSyncEnabled(context.Background()) + assert.True(t, p) + assert.False(t, r) + + err = mysqld.SetSemiSyncEnabled(context.Background(), false, true) + assert.NoError(t, err) + + p, r = mysqld.SemiSyncEnabled(context.Background()) + assert.False(t, p) + assert.True(t, r) +} + +func TestWaitForReplicationStart(t *testing.T) { + require.NotNil(t, mysqld) + + err := mysqlctl.WaitForReplicationStart(context.Background(), mysqld, 1) + assert.ErrorContains(t, err, "no replication status") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + port, err := mysqld.GetMysqlPort(ctx) + require.NoError(t, err) + host := "localhost" + + err = mysqld.SetReplicationSource(context.Background(), host, port, 0, true, true) + assert.NoError(t, err) + + err = mysqlctl.WaitForReplicationStart(context.Background(), mysqld, 1) + assert.NoError(t, err) + + err = mysqld.ResetReplication(context.Background()) + require.NoError(t, err) +} + +func TestStartReplication(t *testing.T) { + require.NotNil(t, mysqld) + + err := mysqld.StartReplication(context.Background(), map[string]string{}) + assert.ErrorContains(t, err, "The server is not configured as replica") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + port, err := mysqld.GetMysqlPort(ctx) + require.NoError(t, err) + host := "localhost" + + // Set startReplicationAfter to false as we want to test StartReplication here + err = mysqld.SetReplicationSource(context.Background(), host, port, 0, true, false) + assert.NoError(t, err) + + err = mysqld.StartReplication(context.Background(), map[string]string{}) + assert.NoError(t, err) + + err = mysqld.ResetReplication(context.Background()) + require.NoError(t, err) +} + +func TestStopReplication(t *testing.T) { + require.NotNil(t, mysqld) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + port, err := mysqld.GetMysqlPort(ctx) + require.NoError(t, err) + host := "localhost" + + err = mysqld.SetReplicationSource(context.Background(), host, port, 0, true, true) + assert.NoError(t, err) + + r, err := mysqld.ReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.Equal(t, host, r.SourceHost) + assert.Equal(t, port, r.SourcePort) + assert.Equal(t, replication.ReplicationStateRunning, r.SQLState) + + err = mysqld.StopReplication(context.Background(), map[string]string{}) + assert.NoError(t, err) + + r, err = mysqld.ReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.Equal(t, replication.ReplicationStateStopped, r.SQLState) +} + +func TestStopSQLThread(t *testing.T) { + require.NotNil(t, mysqld) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + port, err := mysqld.GetMysqlPort(ctx) + require.NoError(t, err) + host := "localhost" + + err = mysqld.SetReplicationSource(context.Background(), host, port, 0, true, true) + assert.NoError(t, err) + + r, err := mysqld.ReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.Equal(t, host, r.SourceHost) + assert.Equal(t, port, r.SourcePort) + assert.Equal(t, replication.ReplicationStateRunning, r.SQLState) + + err = mysqld.StopSQLThread(context.Background()) + assert.NoError(t, err) + + r, err = mysqld.ReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.Equal(t, replication.ReplicationStateStopped, r.SQLState) +} diff --git a/go/test/endtoend/utils/utils.go b/go/test/endtoend/utils/utils.go index fa270ba30a0..345f1e787b4 100644 --- a/go/test/endtoend/utils/utils.go +++ b/go/test/endtoend/utils/utils.go @@ -88,7 +88,13 @@ func AssertMatchesAny(t testing.TB, conn *mysql.Conn, query string, expected ... return } } - t.Errorf("Query: %s (-want +got):\n%v\nGot:%s", query, expected, got) + + var err strings.Builder + _, _ = fmt.Fprintf(&err, "Query did not match:\n%s\n", query) + for i, e := range expected { + _, _ = fmt.Fprintf(&err, "Expected query %d does not match.\nwant: %v\ngot: %v\n\n", i, e, got) + } + t.Error(err.String()) } // AssertMatchesCompareMySQL executes the given query on both Vitess and MySQL and make sure @@ -154,6 +160,15 @@ func Exec(t testing.TB, conn *mysql.Conn, query string) *sqltypes.Result { return qr } +// ExecMulti executes the given (potential multi) queries using the given connection. +// The test fails if any of the queries produces an error +func ExecMulti(t testing.TB, conn *mysql.Conn, query string) error { + t.Helper() + err := conn.ExecuteFetchMultiDrain(query) + require.NoError(t, err, "for query: "+query) + return err +} + // ExecCompareMySQL executes the given query against both Vitess and MySQL and compares // the two result set. If there is a mismatch, the difference will be printed and the // test will fail. If the query produces an error in either Vitess or MySQL, the test @@ -166,13 +181,13 @@ func ExecCompareMySQL(t *testing.T, vtConn, mysqlConn *mysql.Conn, query string) mysqlQr, err := mysqlConn.ExecuteFetch(query, 1000, true) require.NoError(t, err, "[MySQL Error] for query: "+query) - compareVitessAndMySQLResults(t, query, vtConn, vtQr, mysqlQr, false) + compareVitessAndMySQLResults(t, query, vtConn, vtQr, mysqlQr, CompareOptions{}) return vtQr } // ExecAllowError executes the given query without failing the test if it produces // an error. The error is returned to the client, along with the result set. -func ExecAllowError(t testing.TB, conn *mysql.Conn, query string) (*sqltypes.Result, error) { +func ExecAllowError(t TestingT, conn *mysql.Conn, query string) (*sqltypes.Result, error) { t.Helper() return conn.ExecuteFetch(query, 1000, true) } @@ -229,23 +244,24 @@ func AssertMatchesWithTimeout(t *testing.T, conn *mysql.Conn, query, expected st } // WaitForAuthoritative waits for a table to become authoritative -func WaitForAuthoritative(t *testing.T, ks, tbl string, readVSchema func() (*interface{}, error)) error { +func WaitForAuthoritative(t TestingT, ks, tbl string, readVSchema func() (*interface{}, error)) error { timeout := time.After(60 * time.Second) for { select { case <-timeout: - return fmt.Errorf("schema tracking didn't mark table t2 as authoritative until timeout") + return fmt.Errorf("schema tracking didn't mark table %v.%v as authoritative until timeout", ks, tbl) default: - time.Sleep(1 * time.Second) res, err := readVSchema() require.NoError(t, err, res) t2Map := getTableT2Map(res, ks, tbl) authoritative, fieldPresent := t2Map["column_list_authoritative"] if !fieldPresent { + time.Sleep(100 * time.Millisecond) continue } authoritativeBool, isBool := authoritative.(bool) if !isBool || !authoritativeBool { + time.Sleep(100 * time.Millisecond) continue } return nil @@ -255,68 +271,95 @@ func WaitForAuthoritative(t *testing.T, ks, tbl string, readVSchema func() (*int // WaitForKsError waits for the ks error field to be populated and returns it. func WaitForKsError(t *testing.T, vtgateProcess cluster.VtgateProcess, ks string) string { + var errString string + WaitForVschemaCondition(t, vtgateProcess, ks, func(t *testing.T, keyspace map[string]interface{}) bool { + ksErr, fieldPresent := keyspace["error"] + if !fieldPresent { + return false + } + var ok bool + errString, ok = ksErr.(string) + return ok + }, "Waiting for error") + return errString +} + +// WaitForVschemaCondition waits for the condition to be true +func WaitForVschemaCondition( + t *testing.T, + vtgateProcess cluster.VtgateProcess, + ks string, + conditionMet func(t *testing.T, keyspace map[string]interface{}) bool, + message string, +) { timeout := time.After(60 * time.Second) for { select { case <-timeout: - t.Fatalf("schema tracking did not find error in '%s'", ks) - return "" + t.Fatalf("schema tracking did not met the condition within the time for keyspace: %s\n%s", ks, message) default: - time.Sleep(1 * time.Second) res, err := vtgateProcess.ReadVSchema() require.NoError(t, err, res) kss := convertToMap(*res)["keyspaces"] ksMap := convertToMap(convertToMap(kss)[ks]) - ksErr, fieldPresent := ksMap["error"] - if !fieldPresent { - break - } - errString, isErr := ksErr.(string) - if !isErr { - break + if conditionMet(t, ksMap) { + return } - return errString + time.Sleep(100 * time.Millisecond) } } } +// WaitForTableDeletions waits for a table to be deleted +func WaitForTableDeletions(t *testing.T, vtgateProcess cluster.VtgateProcess, ks, tbl string) { + WaitForVschemaCondition(t, vtgateProcess, ks, func(t *testing.T, keyspace map[string]interface{}) bool { + tablesMap := keyspace["tables"] + _, isPresent := convertToMap(tablesMap)[tbl] + return !isPresent + }, "Waiting for table to be deleted") +} + // WaitForColumn waits for a table's column to be present -func WaitForColumn(t *testing.T, vtgateProcess cluster.VtgateProcess, ks, tbl, col string) error { +func WaitForColumn(t TestingT, vtgateProcess cluster.VtgateProcess, ks, tbl, col string) error { timeout := time.After(60 * time.Second) for { select { case <-timeout: return fmt.Errorf("schema tracking did not find column '%s' in table '%s'", col, tbl) default: - time.Sleep(1 * time.Second) res, err := vtgateProcess.ReadVSchema() require.NoError(t, err, res) t2Map := getTableT2Map(res, ks, tbl) authoritative, fieldPresent := t2Map["column_list_authoritative"] if !fieldPresent { - break + time.Sleep(100 * time.Millisecond) + continue } authoritativeBool, isBool := authoritative.(bool) if !isBool || !authoritativeBool { - break + time.Sleep(100 * time.Millisecond) + continue } colMap, exists := t2Map["columns"] if !exists { - break + time.Sleep(100 * time.Millisecond) + continue } colList, isSlice := colMap.([]interface{}) if !isSlice { - break + time.Sleep(100 * time.Millisecond) + continue } for _, c := range colList { colDef, isMap := c.(map[string]interface{}) if !isMap { break } - if colName, exists := colDef["name"]; exists && colName == col { + if colName, exists := colDef["name"]; exists && strings.EqualFold(colName.(string), col) { return nil } } + time.Sleep(100 * time.Millisecond) } } } @@ -330,7 +373,10 @@ func getTableT2Map(res *interface{}, ks, tbl string) map[string]interface{} { } func convertToMap(input interface{}) map[string]interface{} { - output := input.(map[string]interface{}) + output, ok := input.(map[string]interface{}) + if !ok { + return make(map[string]interface{}) + } return output } diff --git a/go/test/endtoend/vault/vault_test.go b/go/test/endtoend/vault/vault_test.go index 9bc5b9cb977..f8e19c07a0c 100644 --- a/go/test/endtoend/vault/vault_test.go +++ b/go/test/endtoend/vault/vault_test.go @@ -56,15 +56,13 @@ var ( vtgateUser = "vtgate_user" vtgatePassword = "password123" commonTabletArg = []string{ - "--vreplication_healthcheck_topology_refresh", "1s", - "--vreplication_healthcheck_retry_delay", "1s", "--vreplication_retry_delay", "1s", "--degraded_threshold", "5s", "--lock_tables_timeout", "5s", "--watch_replication_stream", // Frequently reload schema, generating some tablet traffic, // so we can speed up token refresh - "--queryserver-config-schema-reload-time", "5", + "--queryserver-config-schema-reload-time", "5s", "--serving_state_grace_period", "1s"} vaultTabletArg = []string{ "--db-credentials-server", "vault", @@ -285,7 +283,7 @@ func initializeClusterLate(t *testing.T) { tablet.MysqlctlProcess.ExtraArgs = append(tablet.MysqlctlProcess.ExtraArgs, mysqlctlArg...) } - err = clusterInstance.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) + err = clusterInstance.VtctldClientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID) require.NoError(t, err) err = clusterInstance.StartVTOrc(keyspaceName) diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index 87f7f9e8675..181b5dfc9ad 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -148,7 +148,7 @@ func TestDeploySchema(t *testing.T) { { sqlQuery := fmt.Sprintf(createTable, tableName) - result, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ""}) + result, err := clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.ApplySchemaParams{DDLStrategy: ""}) require.Nil(t, err, result) } for i := range clusterInstance.Keyspaces[0].Shards { diff --git a/go/test/endtoend/vreplication/cluster_test.go b/go/test/endtoend/vreplication/cluster_test.go index af93ac40726..ddd323f7d3f 100644 --- a/go/test/endtoend/vreplication/cluster_test.go +++ b/go/test/endtoend/vreplication/cluster_test.go @@ -19,7 +19,7 @@ package vreplication import ( "fmt" "io" - "math/rand" + "math/rand/v2" "net/http" "os" "os/exec" @@ -30,6 +30,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/vt/vttablet" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/mysqlctl" @@ -54,8 +56,9 @@ var ( sidecarDBIdentifier = sqlparser.String(sqlparser.NewIdentifierCS(sidecarDBName)) mainClusterConfig *ClusterConfig externalClusterConfig *ClusterConfig - extraVTGateArgs = []string{"--tablet_refresh_interval", "10ms", "--enable_buffer", "--buffer_window", loadTestBufferingWindowDurationStr, - "--buffer_size", "100000", "--buffer_min_time_between_failovers", "0s", "--buffer_max_failover_duration", loadTestBufferingWindowDurationStr} + extraVTGateArgs = []string{"--tablet_refresh_interval", "10ms", "--enable_buffer", "--buffer_window", loadTestBufferingWindowDuration.String(), + "--buffer_size", "250000", "--buffer_min_time_between_failovers", "1s", "--buffer_max_failover_duration", loadTestBufferingWindowDuration.String(), + "--buffer_drain_concurrency", "10"} extraVtctldArgs = []string{"--remote_operation_timeout", "600s", "--topo_etcd_lease_ttl", "120"} // This variable can be used within specific tests to alter vttablet behavior extraVTTabletArgs = []string{} @@ -87,11 +90,33 @@ type ClusterConfig struct { vreplicationCompressGTID bool } +// enableGTIDCompression enables GTID compression for the cluster and returns a function +// that can be used to disable it in a defer. +func (cc *ClusterConfig) enableGTIDCompression() func() { + cc.vreplicationCompressGTID = true + return func() { + cc.vreplicationCompressGTID = false + } +} + +// setAllVTTabletExperimentalFlags sets all the experimental flags for vttablet and returns a function +// that can be used to reset them in a defer. +func setAllVTTabletExperimentalFlags() func() { + experimentalArgs := fmt.Sprintf("--vreplication_experimental_flags=%d", + vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching) + oldArgs := extraVTTabletArgs + extraVTTabletArgs = append(extraVTTabletArgs, experimentalArgs) + return func() { + extraVTTabletArgs = oldArgs + } +} + // VitessCluster represents all components within the test cluster type VitessCluster struct { t *testing.T ClusterConfig *ClusterConfig Name string + CellNames []string Cells map[string]*Cell Topo *cluster.TopoProcess Vtctld *cluster.VtctldProcess @@ -132,7 +157,7 @@ type Tablet struct { } func setTempVtDataRoot() string { - dirSuffix := 100000 + rand.Intn(999999-100000) // 6 digits + dirSuffix := 100000 + rand.IntN(999999-100000) // 6 digits if debugMode { vtdataroot = originalVtdataroot } else { @@ -332,9 +357,28 @@ func init() { externalClusterConfig = getClusterConfig(1, mainVtDataRoot+"/ext") } +type clusterOptions struct { + cells []string + clusterConfig *ClusterConfig +} + +func getClusterOptions(opts *clusterOptions) *clusterOptions { + if opts == nil { + opts = &clusterOptions{} + } + if opts.cells == nil { + opts.cells = []string{"zone1"} + } + if opts.clusterConfig == nil { + opts.clusterConfig = mainClusterConfig + } + return opts +} + // NewVitessCluster starts a basic cluster with vtgate, vtctld and the topo -func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConfig *ClusterConfig) *VitessCluster { - vc := &VitessCluster{t: t, Name: name, Cells: make(map[string]*Cell), ClusterConfig: clusterConfig} +func NewVitessCluster(t *testing.T, opts *clusterOptions) *VitessCluster { + opts = getClusterOptions(opts) + vc := &VitessCluster{t: t, Name: t.Name(), CellNames: opts.cells, Cells: make(map[string]*Cell), ClusterConfig: opts.clusterConfig} require.NotNil(t, vc) vc.CleanupDataroot(t, true) @@ -346,32 +390,46 @@ func NewVitessCluster(t *testing.T, name string, cellNames []string, clusterConf err := topo.ManageTopoDir("mkdir", "/vitess/global") require.NoError(t, err) vc.Topo = topo - for _, cellName := range cellNames { + for _, cellName := range opts.cells { err := topo.ManageTopoDir("mkdir", "/vitess/"+cellName) require.NoError(t, err) } - vtctld := cluster.VtctldProcessInstance(vc.ClusterConfig.vtctldPort, vc.ClusterConfig.vtctldGrpcPort, + vc.setupVtctld() + vc.setupVtctl() + vc.setupVtctlClient() + vc.setupVtctldClient() + + return vc +} + +func (vc *VitessCluster) setupVtctld() { + vc.Vtctld = cluster.VtctldProcessInstance(vc.ClusterConfig.vtctldPort, vc.ClusterConfig.vtctldGrpcPort, vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname, vc.ClusterConfig.tmpDir) - vc.Vtctld = vtctld - require.NotNil(t, vc.Vtctld) + require.NotNil(vc.t, vc.Vtctld) // use first cell as `-cell` - vc.Vtctld.Setup(cellNames[0], extraVtctldArgs...) + vc.Vtctld.Setup(vc.CellNames[0], extraVtctldArgs...) +} +func (vc *VitessCluster) setupVtctl() { vc.Vtctl = cluster.VtctlProcessInstance(vc.ClusterConfig.topoPort, vc.ClusterConfig.hostname) - require.NotNil(t, vc.Vtctl) - for _, cellName := range cellNames { + require.NotNil(vc.t, vc.Vtctl) + for _, cellName := range vc.CellNames { vc.Vtctl.AddCellInfo(cellName) - cell, err := vc.AddCell(t, cellName) - require.NoError(t, err) - require.NotNil(t, cell) + cell, err := vc.AddCell(vc.t, cellName) + require.NoError(vc.t, err) + require.NotNil(vc.t, cell) } +} +func (vc *VitessCluster) setupVtctlClient() { vc.VtctlClient = cluster.VtctlClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir) - require.NotNil(t, vc.VtctlClient) + require.NotNil(vc.t, vc.VtctlClient) +} + +func (vc *VitessCluster) setupVtctldClient() { vc.VtctldClient = cluster.VtctldClientProcessInstance(vc.ClusterConfig.hostname, vc.Vtctld.GrpcPort, vc.ClusterConfig.tmpDir) - require.NotNil(t, vc.VtctldClient) - return vc + require.NotNil(vc.t, vc.VtctldClient) } // CleanupDataroot deletes the vtdataroot directory. Since we run multiple tests sequentially in a single CI test shard, @@ -385,8 +443,19 @@ func (vc *VitessCluster) CleanupDataroot(t *testing.T, recreate bool) { return } dir := vc.ClusterConfig.vtdataroot - log.Infof("Deleting vtdataroot %s", dir) - err := os.RemoveAll(dir) + // The directory cleanup sometimes fails with a "directory not empty" error as + // everything in the test is shutting down and cleaning up. So we retry a few + // times to deal with that non-problematic and ephemeral issue. + var err error + retries := 3 + for i := 1; i <= retries; i++ { + if err = os.RemoveAll(dir); err == nil { + log.Infof("Deleted vtdataroot %q", dir) + break + } + log.Errorf("Failed to delete vtdataroot (attempt %d of %d) %q: %v", i, retries, dir, err) + time.Sleep(1 * time.Second) + } require.NoError(t, err) if recreate { err = os.Mkdir(dir, 0700) @@ -419,8 +488,14 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, cell.Keyspaces[ksName] = keyspace cellsToWatch = cellsToWatch + cell.Name } - require.NoError(t, vc.AddShards(t, cells, keyspace, shards, numReplicas, numRdonly, tabletIDBase, opts)) + for _, cell := range cells { + if len(cell.Vtgates) == 0 { + log.Infof("Starting vtgate") + vc.StartVtgate(t, cell, cellsToWatch) + } + } + require.NoError(t, vc.AddShards(t, cells, keyspace, shards, numReplicas, numRdonly, tabletIDBase, opts)) if schema != "" { if err := vc.VtctlClient.ApplySchema(ksName, schema); err != nil { t.Fatalf(err.Error()) @@ -433,12 +508,6 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, } } keyspace.VSchema = vschema - for _, cell := range cells { - if len(cell.Vtgates) == 0 { - log.Infof("Starting vtgate") - vc.StartVtgate(t, cell, cellsToWatch) - } - } err = vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", ksName) require.NoError(t, err) @@ -450,10 +519,9 @@ func (vc *VitessCluster) AddTablet(t testing.TB, cell *Cell, keyspace *Keyspace, tablet := &Tablet{} options := []string{ - "--queryserver-config-schema-reload-time", "5", "--heartbeat_on_demand_duration", "5s", "--heartbeat_interval", "250ms", - } // FIXME: for multi-cell initial schema doesn't seem to load without "--queryserver-config-schema-reload-time" + } options = append(options, extraVTTabletArgs...) if mainClusterConfig.vreplicationCompressGTID { @@ -514,11 +582,11 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } } - arrNames := strings.Split(names, ",") - log.Infof("Addshards got %d shards with %+v", len(arrNames), arrNames) - isSharded := len(arrNames) > 1 + shardNames := strings.Split(names, ",") + log.Infof("Addshards got %d shards with %+v", len(shardNames), shardNames) + isSharded := len(shardNames) > 1 primaryTabletUID := 0 - for ind, shardName := range arrNames { + for ind, shardName := range shardNames { tabletID := tabletIDBase + ind*100 tabletIndex := 0 shard := &Shard{Name: shardName, IsSharded: isSharded, Tablets: make(map[string]*Tablet, 1)} @@ -541,10 +609,10 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa require.NoError(t, err) require.NotNil(t, primary) tabletIndex++ - primary.Vttablet.VreplicationTabletType = "PRIMARY" tablets = append(tablets, primary) dbProcesses = append(dbProcesses, proc) primaryTabletUID = primary.Vttablet.TabletUID + primary.Vttablet.IsPrimary = true } for i := 0; i < numReplicas; i++ { @@ -616,6 +684,12 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa if err := tablet.Vttablet.Setup(); err != nil { t.Fatalf(err.Error()) } + // Set time_zone to UTC for all tablets. Without this it fails locally on some MacOS setups. + query := "SET GLOBAL time_zone = '+00:00';" + qr, err := tablet.Vttablet.QueryTablet(query, tablet.Vttablet.Keyspace, false) + if err != nil { + t.Fatalf("failed to set time_zone: %v, output: %v", err, qr) + } } } require.NotEqual(t, 0, primaryTabletUID, "Should have created a primary tablet") @@ -624,12 +698,45 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa log.Infof("Finished creating shard %s", shard.Name) } + for _, shard := range shardNames { + require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, keyspace.Name, shard)) + } + + waitTimeout := 30 * time.Second + vtgate := cells[0].Vtgates[0] + for _, shardName := range shardNames { + shard := keyspace.Shards[shardName] + numReplicas, numRDOnly := 0, 0 + for _, tablet := range shard.Tablets { + switch strings.ToLower(tablet.Vttablet.TabletType) { + case "replica": + numReplicas++ + case "rdonly": + numRDOnly++ + } + } + numReplicas-- // account for primary, which also has replica type + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspace.Name, shard.Name), 1, waitTimeout); err != nil { + return err + } + if numReplicas > 0 { + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace.Name, shard.Name), numReplicas, waitTimeout); err != nil { + return err + } + } + if numRDOnly > 0 { + if err := vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", keyspace.Name, shard.Name), numRDOnly, waitTimeout); err != nil { + return err + } + } + } err := vc.VtctlClient.ExecuteCommand("RebuildKeyspaceGraph", keyspace.Name) require.NoError(t, err) log.Infof("Waiting for throttler config to be applied on all shards") - for _, shard := range keyspace.Shards { + for _, shardName := range shardNames { + shard := keyspace.Shards[shardName] for _, tablet := range shard.Tablets { clusterTablet := &cluster.Vttablet{ Alias: tablet.Name, @@ -640,7 +747,6 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } } log.Infof("Throttler config applied on all shards") - return nil } @@ -751,7 +857,7 @@ func (vc *VitessCluster) teardown() { } // TearDown brings down a cluster, deleting processes, removing topo keys -func (vc *VitessCluster) TearDown(t *testing.T) { +func (vc *VitessCluster) TearDown() { if debugMode { return } @@ -768,7 +874,7 @@ func (vc *VitessCluster) TearDown(t *testing.T) { } // some processes seem to hang around for a bit time.Sleep(5 * time.Second) - vc.CleanupDataroot(t, false) + vc.CleanupDataroot(vc.t, false) } func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName string, tabletType string) map[string]*cluster.VttabletProcess { @@ -776,7 +882,7 @@ func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName tablets := make(map[string]*cluster.VttabletProcess) for _, shard := range keyspace.Shards { for _, tablet := range shard.Tablets { - if tablet.Vttablet.GetTabletStatus() == "SERVING" && strings.EqualFold(tablet.Vttablet.VreplicationTabletType, tabletType) { + if tablet.Vttablet.GetTabletStatus() == "SERVING" { log.Infof("Serving status of tablet %s is %s, %s", tablet.Name, tablet.Vttablet.ServingStatus, tablet.Vttablet.GetTabletStatus()) tablets[tablet.Name] = tablet.Vttablet } @@ -796,13 +902,13 @@ func (vc *VitessCluster) getPrimaryTablet(t *testing.T, ksName, shardName string continue } for _, tablet := range shard.Tablets { - if tablet.Vttablet.GetTabletStatus() == "SERVING" && strings.EqualFold(tablet.Vttablet.VreplicationTabletType, "primary") { + if tablet.Vttablet.IsPrimary { return tablet.Vttablet } } } } - require.FailNow(t, "no primary found for %s:%s", ksName, shardName) + require.FailNow(t, "no primary found", "keyspace %s, shard %s", ksName, shardName) return nil } @@ -810,6 +916,13 @@ func (vc *VitessCluster) GetVTGateConn(t *testing.T) *mysql.Conn { return getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) } +func getVTGateConn() (*mysql.Conn, func()) { + vtgateConn := vc.GetVTGateConn(vc.t) + return vtgateConn, func() { + vtgateConn.Close() + } +} + func (vc *VitessCluster) startQuery(t *testing.T, query string) (func(t *testing.T), func(t *testing.T)) { conn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) _, err := conn.ExecuteFetch("begin", 1000, false) diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 0e430548a13..a37ebe77b94 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -16,13 +16,20 @@ limitations under the License. package vreplication +import ( + "fmt" + "strings" +) + // The product, customer, Lead, Lead-1 tables are used to exercise and test most Workflow variants. // We violate the NO_ZERO_DATES and NO_ZERO_IN_DATE sql_modes that are enabled by default in // MySQL 5.7+ and MariaDB 10.2+ to ensure that vreplication still works everywhere and the // permissive sql_mode now used in vreplication causes no unwanted side effects. -// The customer table also tests two important things: +// The customer table also tests several important things: // 1. Composite or multi-column primary keys // 2. PKs that contain an ENUM column +// 3. That we properly handle tables with auto_increment columns (which are stripped by default when +// moving the table to a sharded keyspace with vtctldclient and left in place when using vtctlclient) // // The Lead and Lead-1 tables also allows us to test several things: // 1. Mixed case identifiers @@ -38,9 +45,10 @@ package vreplication // default collation as it has to work across versions and the 8.0 default does not exist in 5.7. var ( // All standard user tables should have a primary key and at least one secondary key. - initialProductSchema = ` + customerTypes = []string{"'individual'", "'soho'", "'enterprise'"} + initialProductSchema = fmt.Sprintf(` create table product(pid int, description varbinary(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid), key(date1,date2)) CHARSET=utf8mb4; -create table customer(cid int, name varchar(128) collate utf8mb4_bin, meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'), +create table customer(cid int auto_increment, name varchar(128) collate utf8mb4_bin, meta json default null, typ enum(%s), sport set('football','cricket','baseball'), ts timestamp not null default current_timestamp, bits bit(2) default b'11', date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', dec80 decimal(8,0), blb blob, primary key(cid,typ), key(name)) CHARSET=utf8mb4; create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; @@ -49,18 +57,19 @@ create table orders(oid int, cid int, pid int, mname varchar(128), price int, qt create table order_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; create table customer2(cid int, name varchar(128), typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid), key(ts)) CHARSET=utf8; create table customer_seq2(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; -create table ` + "`Lead`(`Lead-id`" + ` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (` + "`Lead-id`" + `), key (date1)); -create table ` + "`Lead-1`(`Lead`" + ` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (` + "`Lead`" + `), key (date2)); +create table `+"`Lead`(`Lead-id`"+` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (`+"`Lead-id`"+`), key (date1)); +create table `+"`Lead-1`(`Lead`"+` binary(16), name varbinary(16), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key (`+"`Lead`"+`), key (date2)); create table _vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431(id int, val varbinary(128), primary key(id), key(val)); create table db_order_test (c_uuid varchar(64) not null default '', created_at datetime not null, dstuff varchar(128), dtstuff text, dbstuff blob, cstuff char(32), primary key (c_uuid,created_at), key (dstuff)) CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; create table vdiff_order (order_id varchar(50) collate utf8mb4_unicode_ci not null, primary key (order_id), key (order_id)) charset=utf8mb4 COLLATE=utf8mb4_unicode_ci; create table datze (id int, dt1 datetime not null default current_timestamp, dt2 datetime not null, ts1 timestamp default current_timestamp, primary key (id), key (dt1)); create table json_tbl (id int, j1 json, j2 json, j3 json not null, primary key(id)); create table geom_tbl (id int, g geometry, p point, ls linestring, pg polygon, mp multipoint, mls multilinestring, mpg multipolygon, gc geometrycollection, primary key(id)); -create table ` + "`blüb_tbl`" + ` (id int, val1 varchar(20), ` + "`blöb1`" + ` blob, val2 varbinary(20), ` + "`bl@b2`" + ` longblob, txt1 text, blb3 tinyblob, txt2 longtext, blb4 mediumblob, primary key(id)); +create table `+"`blüb_tbl`"+` (id int, val1 varchar(20), `+"`blöb1`"+` blob, val2 varbinary(20), `+"`bl@b2`"+` longblob, txt1 text, blb3 tinyblob, txt2 longtext, blb4 mediumblob, primary key(id)); create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); create table loadtest (id int, name varchar(256), primary key(id), key(name)); -` +create table nopk (name varchar(128), age int unsigned); +`, strings.Join(customerTypes, ",")) // These should always be ignored in vreplication internalSchema = ` create table _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho(id int, val varbinary(128), primary key(id)); @@ -94,6 +103,7 @@ create table loadtest (id int, name varchar(256), primary key(id), key(name)); "db_order_test": {}, "vdiff_order": {}, "datze": {}, + "nopk": {}, "reftable": { "type": "reference" } @@ -140,6 +150,22 @@ create table loadtest (id int, name varchar(256), primary key(id), key(name)); "sequence": "customer_seq" } }, + "customer_name": { + "column_vindexes": [ + { + "column": "cid", + "name": "xxhash" + } + ] + }, + "enterprise_customer": { + "column_vindexes": [ + { + "column": "cid", + "name": "xxhash" + } + ] + }, "customer2": { "column_vindexes": [ { @@ -216,6 +242,14 @@ create table loadtest (id int, name varchar(256), primary key(id), key(name)); } ] }, + "nopk": { + "column_vindexes": [ + { + "columns": ["name"], + "name": "unicode_loose_md5" + } + ] + }, "reftable": { "type": "reference" } @@ -389,6 +423,32 @@ create table loadtest (id int, name varchar(256), primary key(id), key(name)); "create_ddl": "create table cproduct(pid bigint, description varchar(128), date1 datetime not null default '0000-00-00 00:00:00', date2 datetime not null default '2021-00-01 00:00:00', primary key(pid)) CHARSET=utf8mb4" }] } +` + + materializeCustomerNameSpec = ` +{ + "workflow": "customer_name", + "source_keyspace": "customer", + "target_keyspace": "customer", + "table_settings": [{ + "target_table": "customer_name", + "source_expression": "select cid, name from customer", + "create_ddl": "create table if not exists customer_name (cid bigint not null, name varchar(128), primary key(cid), key(name))" + }] +} +` + + materializeCustomerTypeSpec = ` +{ + "workflow": "enterprise_customer", + "source_keyspace": "customer", + "target_keyspace": "customer", + "table_settings": [{ + "target_table": "enterprise_customer", + "source_expression": "select cid, name, typ from customer where typ = 'enterprise'", + "create_ddl": "create table if not exists enterprise_customer (cid bigint not null, name varchar(128), typ varchar(64), primary key(cid), key(typ))" + }] +} ` merchantOrdersVSchema = ` diff --git a/go/test/endtoend/vreplication/fk_config_test.go b/go/test/endtoend/vreplication/fk_config_test.go index 5b02aeb62bb..db446b78b5a 100644 --- a/go/test/endtoend/vreplication/fk_config_test.go +++ b/go/test/endtoend/vreplication/fk_config_test.go @@ -16,20 +16,39 @@ limitations under the License. package vreplication +// The tables in the schema are selected so that we have one parent/child table with names in reverse lexical order +// (child before parent), t1,t2 are in lexical order, and t11,t12 have valid circular foreign key constraints. var ( initialFKSchema = ` create table parent(id int, name varchar(128), primary key(id)) engine=innodb; create table child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade) engine=innodb; +create view vparent as select * from parent; +create table t1(id int, name varchar(128), primary key(id)) engine=innodb; +create table t2(id int, t1id int, name varchar(128), primary key(id), foreign key(t1id) references t1(id) on delete cascade) engine=innodb; +create table t11 (id int primary key, i int); +create table t12 (id int primary key, i int); +alter table t11 add constraint f11 foreign key (i) references t12 (id); +alter table t12 add constraint f12 foreign key (i) references t11 (id); ` initialFKData = ` insert into parent values(1, 'parent1'), (2, 'parent2'); -insert into child values(1, 1, 'child11'), (2, 1, 'child21'), (3, 2, 'child32');` +insert into child values(1, 1, 'child11'), (2, 1, 'child21'), (3, 2, 'child32'); +insert into t1 values(1, 't11'), (2, 't12'); +insert into t2 values(1, 1, 't21'), (2, 1, 't22'), (3, 2, 't23'); +insert into t11 values(1, null); +insert into t12 values(1, 1); +update t11 set i = 1 where id = 1; +` initialFKSourceVSchema = ` { "tables": { "parent": {}, - "child": {} + "child": {}, + "t1": {}, + "t2": {}, + "t11": {}, + "t12": {} } } ` @@ -58,6 +77,38 @@ insert into child values(1, 1, 'child11'), (2, 1, 'child21'), (3, 2, 'child32'); "name": "reverse_bits" } ] + }, + "t1": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "t2": { + "column_vindexes": [ + { + "column": "t1id", + "name": "reverse_bits" + } + ] + }, + "t11": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "t12": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] } } } diff --git a/go/test/endtoend/vreplication/fk_ext_load_generator_test.go b/go/test/endtoend/vreplication/fk_ext_load_generator_test.go new file mode 100644 index 00000000000..dc058c0574b --- /dev/null +++ b/go/test/endtoend/vreplication/fk_ext_load_generator_test.go @@ -0,0 +1,503 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "fmt" + "math/rand/v2" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" +) + +const ( + // Only used when debugging tests. + queryLog = "queries.txt" + + LoadGeneratorStateLoading = "loading" + LoadGeneratorStateRunning = "running" + LoadGeneratorStateStopped = "stopped" + + dataLoadTimeout = 1 * time.Minute + tickInterval = 1 * time.Second + queryTimeout = 1 * time.Minute + + getRandomIdQuery = "SELECT id FROM %s.parent ORDER BY RAND() LIMIT 1" + insertQuery = "INSERT INTO %s.parent (id, name) VALUES (%d, 'name-%d')" + updateQuery = "UPDATE %s.parent SET name = 'rename-%d' WHERE id = %d" + deleteQuery = "DELETE FROM %s.parent WHERE id = %d" + insertChildQuery = "INSERT INTO %s.child (id, parent_id) VALUES (%d, %d)" + insertChildQueryOverrideConstraints = "INSERT /*+ SET_VAR(foreign_key_checks=0) */ INTO %s.child (id, parent_id) VALUES (%d, %d)" +) + +// ILoadGenerator is an interface for load generators that we will use to simulate different types of loads. +type ILoadGenerator interface { + Init(ctx context.Context, vc *VitessCluster) // name & description only for logging. + Teardown() + + // "direct", use direct db connection to primary, only for unsharded keyspace. + // or "vtgate" to use vtgate routing. + // Stop() before calling SetDBStrategy(). + SetDBStrategy(direct, keyspace string) + SetOverrideConstraints(allow bool) // true if load generator can insert rows without FK constraints. + + Keyspace() string + DBStrategy() string // direct or vtgate + State() string // state of load generator (stopped, running) + OverrideConstraints() bool // true if load generator can insert rows without FK constraints. + + Load() error // initial load of data. + Start() error // start populating additional data. + Stop() error // stop populating additional data. + + // Implementation will decide which table to wait for extra rows on. + WaitForAdditionalRows(count int) error + // table == "", implementation will decide which table to get rows from, same table as in WaitForAdditionalRows(). + GetRowCount(table string) (int, error) +} + +var lg ILoadGenerator + +var _ ILoadGenerator = (*SimpleLoadGenerator)(nil) + +type LoadGenerator struct { + ctx context.Context + vc *VitessCluster + state string + dbStrategy string + overrideConstraints bool + keyspace string + tables []string +} + +// SimpleLoadGenerator, which has a single parent table and a single child table for which different types +// of DMLs are run. +type SimpleLoadGenerator struct { + LoadGenerator + currentParentId int + currentChildId int + ch chan bool + runCtx context.Context + runCtxCancel context.CancelFunc +} + +func (lg *SimpleLoadGenerator) SetOverrideConstraints(allow bool) { + lg.overrideConstraints = allow +} + +func (lg *SimpleLoadGenerator) OverrideConstraints() bool { + return lg.overrideConstraints +} + +func (lg *SimpleLoadGenerator) GetRowCount(table string) (int, error) { + vtgateConn, err := lg.getVtgateConn(context.Background()) + if err != nil { + return 0, err + } + defer vtgateConn.Close() + return lg.getNumRows(vtgateConn, table), nil +} + +func (lg *SimpleLoadGenerator) getVtgateConn(ctx context.Context) (*mysql.Conn, error) { + vtParams := mysql.ConnParams{ + Host: lg.vc.ClusterConfig.hostname, + Port: lg.vc.ClusterConfig.vtgateMySQLPort, + Uname: "vt_dba", + } + conn, err := mysql.Connect(ctx, &vtParams) + return conn, err +} + +func (lg *SimpleLoadGenerator) getNumRows(vtgateConn *mysql.Conn, table string) int { + t := lg.vc.t + return getRowCount(t, vtgateConn, table) +} + +func (lg *SimpleLoadGenerator) WaitForAdditionalRows(count int) error { + t := lg.vc.t + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + numRowsStart := lg.getNumRows(vtgateConn, "parent") + shortCtx, cancel := context.WithTimeout(context.Background(), dataLoadTimeout) + defer cancel() + for { + select { + case <-shortCtx.Done(): + t.Fatalf("Timed out waiting for additional rows in %q table", "parent") + default: + numRows := lg.getNumRows(vtgateConn, "parent") + if numRows >= numRowsStart+count { + return nil + } + time.Sleep(tickInterval) + } + } +} + +func (lg *SimpleLoadGenerator) exec(query string) (*sqltypes.Result, error) { + switch lg.dbStrategy { + case "direct": + // direct is expected to be used only for unsharded keyspaces to simulate an unmanaged keyspace + // that proxies to an external database. + primary := lg.vc.getPrimaryTablet(lg.vc.t, lg.keyspace, "0") + qr, err := primary.QueryTablet(query, lg.keyspace, true) + require.NoError(lg.vc.t, err) + return qr, err + case "vtgate": + return lg.execQueryWithRetry(query) + default: + err := fmt.Errorf("invalid dbStrategy: %v", lg.dbStrategy) + return nil, err + } +} + +// When a workflow switches traffic it is possible to get transient errors from vtgate while executing queries +// due to cluster-level changes. isQueryRetryable() checks for such errors so that tests can wait for such changes +// to complete before proceeding. +func isQueryRetryable(err error) bool { + retryableErrorStrings := []string{ + "retry", + "resharded", + "VT13001", + "Lock wait timeout exceeded", + "errno 2003", + } + for _, e := range retryableErrorStrings { + if strings.Contains(err.Error(), e) { + return true + } + } + return false +} + +func (lg *SimpleLoadGenerator) execQueryWithRetry(query string) (*sqltypes.Result, error) { + ctx, cancel := context.WithTimeout(context.Background(), queryTimeout) + defer cancel() + errCh := make(chan error) + qrCh := make(chan *sqltypes.Result) + var vtgateConn *mysql.Conn + go func() { + var qr *sqltypes.Result + var err error + retry := false + for { + select { + case <-ctx.Done(): + errCh <- fmt.Errorf("query %q did not succeed before the timeout of %s", query, queryTimeout) + return + default: + } + if lg.runCtx != nil && lg.runCtx.Err() != nil { + log.Infof("Load generator run context done, query never completed: %q", query) + errCh <- fmt.Errorf("load generator stopped") + return + } + if retry { + time.Sleep(tickInterval) + } + // We need to parse the error as well as the output of vdiff to determine if the error is retryable, since + // sometimes it is observed that we get the error output as part of vdiff output. + vtgateConn, err = lg.getVtgateConn(ctx) + if err != nil { + if !isQueryRetryable(err) { + errCh <- err + return + } + time.Sleep(tickInterval) + continue + } + qr, err = vtgateConn.ExecuteFetch(query, 1000, false) + vtgateConn.Close() + if err == nil { + qrCh <- qr + return + } + if !isQueryRetryable(err) { + errCh <- err + return + } + retry = true + } + }() + select { + case qr := <-qrCh: + return qr, nil + case err := <-errCh: + log.Infof("query %q failed with error %v", query, err) + return nil, err + } +} + +func (lg *SimpleLoadGenerator) Load() error { + lg.state = LoadGeneratorStateLoading + defer func() { lg.state = LoadGeneratorStateStopped }() + log.Infof("Inserting initial FK data") + var queries = []string{ + "insert into parent values(1, 'parent1'), (2, 'parent2');", + "insert into child values(1, 1, 'child11'), (2, 1, 'child21'), (3, 2, 'child32');", + } + for _, query := range queries { + _, err := lg.exec(query) + require.NoError(lg.vc.t, err) + } + log.Infof("Done inserting initial FK data") + return nil +} + +func (lg *SimpleLoadGenerator) Start() error { + if lg.state == LoadGeneratorStateRunning { + log.Infof("Load generator already running") + return nil + } + lg.state = LoadGeneratorStateRunning + go func() { + defer func() { + lg.state = LoadGeneratorStateStopped + log.Infof("Load generator stopped") + }() + lg.runCtx, lg.runCtxCancel = context.WithCancel(lg.ctx) + defer func() { + lg.runCtx = nil + lg.runCtxCancel = nil + }() + t := lg.vc.t + var err error + log.Infof("Load generator starting") + for i := 0; ; i++ { + if i%1000 == 0 { + // Log occasionally to show that the test is still running. + log.Infof("Load simulation iteration %d", i) + } + select { + case <-lg.ctx.Done(): + log.Infof("Load generator context done") + lg.ch <- true + return + case <-lg.runCtx.Done(): + log.Infof("Load generator run context done") + lg.ch <- true + return + default: + } + op := rand.IntN(100) + switch { + case op < 50: // 50% chance to insert + lg.insert() + case op < 80: // 30% chance to update + lg.update() + default: // 20% chance to delete + lg.delete() + } + require.NoError(t, err) + time.Sleep(1 * time.Millisecond) + } + }() + return nil +} + +func (lg *SimpleLoadGenerator) Stop() error { + if lg.state == LoadGeneratorStateStopped { + log.Infof("Load generator already stopped") + return nil + } + if lg.runCtx != nil && lg.runCtxCancel != nil { + log.Infof("Canceling load generator") + lg.runCtxCancel() + } + // Wait for ch to be closed or we hit a timeout. + timeout := vdiffTimeout + select { + case <-lg.ch: + log.Infof("Load generator stopped") + lg.state = LoadGeneratorStateStopped + return nil + case <-time.After(timeout): + log.Infof("Timed out waiting for load generator to stop") + return fmt.Errorf("timed out waiting for load generator to stop") + } +} + +func (lg *SimpleLoadGenerator) Init(ctx context.Context, vc *VitessCluster) { + lg.ctx = ctx + lg.vc = vc + lg.state = LoadGeneratorStateStopped + lg.currentParentId = 100 + lg.currentChildId = 100 + lg.ch = make(chan bool) + lg.tables = []string{"parent", "child"} +} + +func (lg *SimpleLoadGenerator) Teardown() { + // noop +} + +func (lg *SimpleLoadGenerator) SetDBStrategy(strategy, keyspace string) { + lg.dbStrategy = strategy + lg.keyspace = keyspace +} + +func (lg *SimpleLoadGenerator) Keyspace() string { + return lg.keyspace +} + +func (lg *SimpleLoadGenerator) DBStrategy() string { + return lg.dbStrategy +} + +func (lg *SimpleLoadGenerator) State() string { + return lg.state +} + +func isQueryCancelled(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "load generator stopped") +} + +func (lg *SimpleLoadGenerator) insert() { + t := lg.vc.t + currentParentId++ + query := fmt.Sprintf(insertQuery, lg.keyspace, currentParentId, currentParentId) + qr, err := lg.exec(query) + if isQueryCancelled(err) { + return + } + require.NoError(t, err) + require.NotNil(t, qr) + // Insert one or more children, some with valid foreign keys, some without. + for i := 0; i < rand.IntN(4)+1; i++ { + currentChildId++ + if i == 3 && lg.overrideConstraints { + query = fmt.Sprintf(insertChildQueryOverrideConstraints, lg.keyspace, currentChildId, currentParentId+1000000) + lg.exec(query) + } else { + query = fmt.Sprintf(insertChildQuery, lg.keyspace, currentChildId, currentParentId) + lg.exec(query) + } + } +} + +func (lg *SimpleLoadGenerator) getRandomId() int64 { + t := lg.vc.t + qr, err := lg.exec(fmt.Sprintf(getRandomIdQuery, lg.keyspace)) + if isQueryCancelled(err) { + return 0 + } + require.NoError(t, err) + require.NotNil(t, qr) + if len(qr.Rows) == 0 { + return 0 + } + id, err := qr.Rows[0][0].ToInt64() + require.NoError(t, err) + return id +} + +func (lg *SimpleLoadGenerator) update() { + id := lg.getRandomId() + if id == 0 { + return + } + updateQuery := fmt.Sprintf(updateQuery, lg.keyspace, id, id) + _, err := lg.exec(updateQuery) + if isQueryCancelled(err) { + return + } + require.NoError(lg.vc.t, err) +} + +func (lg *SimpleLoadGenerator) delete() { + id := lg.getRandomId() + if id == 0 { + return + } + deleteQuery := fmt.Sprintf(deleteQuery, lg.keyspace, id) + _, err := lg.exec(deleteQuery) + if isQueryCancelled(err) { + return + } + require.NoError(lg.vc.t, err) +} + +// FIXME: following three functions are copied over from vtgate test utility functions in +// `go/test/endtoend/utils/utils.go`. +// We will to refactor and then reuse the same functionality from vtgate tests, in the near future. + +func convertToMap(input interface{}) map[string]interface{} { + output := input.(map[string]interface{}) + return output +} + +func getTableT2Map(res *interface{}, ks, tbl string) map[string]interface{} { + step1 := convertToMap(*res)["keyspaces"] + step2 := convertToMap(step1)[ks] + step3 := convertToMap(step2)["tables"] + tblMap := convertToMap(step3)[tbl] + return convertToMap(tblMap) +} + +// waitForColumn waits for a table's column to be present in the vschema because vtgate's foreign key managed mode +// expects the column to be present in the vschema before it can be used in a foreign key constraint. +func waitForColumn(t *testing.T, vtgateProcess *cluster.VtgateProcess, ks, tbl, col string) error { + timeout := time.After(defaultTimeout) + for { + select { + case <-timeout: + return fmt.Errorf("schema tracking did not find column '%s' in table '%s'", col, tbl) + default: + time.Sleep(defaultTick) + res, err := vtgateProcess.ReadVSchema() + require.NoError(t, err, res) + t2Map := getTableT2Map(res, ks, tbl) + authoritative, fieldPresent := t2Map["column_list_authoritative"] + if !fieldPresent { + break + } + authoritativeBool, isBool := authoritative.(bool) + if !isBool || !authoritativeBool { + break + } + colMap, exists := t2Map["columns"] + if !exists { + break + } + colList, isSlice := colMap.([]interface{}) + if !isSlice { + break + } + for _, c := range colList { + colDef, isMap := c.(map[string]interface{}) + if !isMap { + break + } + if colName, exists := colDef["name"]; exists && colName == col { + log.Infof("Found column '%s' in table '%s' for keyspace '%s'", col, tbl, ks) + return nil + } + } + } + } +} diff --git a/go/test/endtoend/vreplication/fk_ext_test.go b/go/test/endtoend/vreplication/fk_ext_test.go new file mode 100644 index 00000000000..4e493da5baf --- /dev/null +++ b/go/test/endtoend/vreplication/fk_ext_test.go @@ -0,0 +1,439 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + _ "embed" + "fmt" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +var ( + //go:embed schema/fkext/source_schema.sql + FKExtSourceSchema string + //go:embed schema/fkext/source_vschema.json + FKExtSourceVSchema string + //go:embed schema/fkext/target1_vschema.json + FKExtTarget1VSchema string + //go:embed schema/fkext/target2_vschema.json + FKExtTarget2VSchema string + //go:embed schema/fkext/materialize_schema.sql + FKExtMaterializeSchema string +) + +type fkextConfigType struct { + *ClusterConfig + sourceKeyspaceName string + target1KeyspaceName string + target2KeyspaceName string + cell string +} + +var fkextConfig *fkextConfigType + +func initFKExtConfig(t *testing.T) { + fkextConfig = &fkextConfigType{ + ClusterConfig: mainClusterConfig, + sourceKeyspaceName: "source", + target1KeyspaceName: "target1", + target2KeyspaceName: "target2", + cell: "zone1", + } +} + +/* +TestFKExt is an end-to-end test for validating the foreign key implementation with respect to, both vreplication +flows and vtgate processing of DMLs for tables with foreign key constraints. It currently: +* Sets up a source keyspace, to simulate the external database, with a parent and child table with a foreign key constraint. +* Creates a target keyspace with two shards, the Vitess keyspace, into which the source data is imported. +* Imports the data using MoveTables. This uses the atomic copy flow +to test that we can import data with foreign key constraints and that data is kept consistent even after the copy phase +since the tables continue to have the FK Constraints. +* Creates a new keyspace with two shards, the Vitess keyspace, into which the data is migrated using MoveTables. +* Materializes the parent and child tables into a different keyspace. +* Reshards the keyspace from 2 shards to 3 shards. +* Reshards the keyspace from 3 shards to 1 shard. + +We drop constraints from the tables from some replicas to simulate a replica that is not doing cascades in +innodb, to confirm that vtgate's fkmanaged mode is working properly. +*/ + +func TestFKExt(t *testing.T) { + setSidecarDBName("_vt") + + // Ensure that there are multiple copy phase cycles per table. + extraVTTabletArgs = append(extraVTTabletArgs, "--vstream_packet_size=256", "--queryserver-config-schema-change-signal") + extraVTGateArgs = append(extraVTGateArgs, "--schema_change_signal=true", "--planner-version", "Gen4") + defer func() { extraVTTabletArgs = nil }() + initFKExtConfig(t) + + cellName := fkextConfig.cell + cells := []string{cellName} + vc = NewVitessCluster(t, &clusterOptions{ + cells: cells, + clusterConfig: fkextConfig.ClusterConfig, + }) + defaultCell := vc.Cells[vc.CellNames[0]] + cell := vc.Cells[cellName] + + defer vc.TearDown() + + sourceKeyspace := fkextConfig.sourceKeyspaceName + vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, "0", FKExtSourceVSchema, FKExtSourceSchema, 0, 0, 100, nil) + + verifyClusterHealth(t, vc) + + lg = &SimpleLoadGenerator{} + lg.Init(context.Background(), vc) + lg.SetDBStrategy("vtgate", fkextConfig.sourceKeyspaceName) + if lg.Load() != nil { + t.Fatal("Load failed") + } + if lg.Start() != nil { + t.Fatal("Start failed") + } + t.Run("Import from external db", func(t *testing.T) { + // Import data into vitess from sourceKeyspace to target1Keyspace, both unsharded. + importIntoVitess(t) + }) + + t.Run("MoveTables from unsharded to sharded keyspace", func(t *testing.T) { + // Migrate data from target1Keyspace to the sharded target2Keyspace. Drops constraints from + // replica to simulate a replica that is not doing cascades in innodb to test vtgate's fkmanaged mode. + // The replica with dropped constraints is used as source for the next workflow called in materializeTables(). + moveKeyspace(t) + }) + + t.Run("Materialize parent and copy tables without constraints", func(t *testing.T) { + // Materialize the tables from target2Keyspace to target1Keyspace. Stream only from replicas, one + // shard with constraints dropped. + materializeTables(t) + }) + lg.SetDBStrategy("vtgate", fkextConfig.target2KeyspaceName) + if lg.Start() != nil { + t.Fatal("Start failed") + } + threeShards := "-40,40-c0,c0-" + keyspaceName := fkextConfig.target2KeyspaceName + ks := vc.Cells[fkextConfig.cell].Keyspaces[keyspaceName] + numReplicas := 1 + + t.Run("Reshard keyspace from 2 shards to 3 shards", func(t *testing.T) { + tabletID := 500 + require.NoError(t, vc.AddShards(t, []*Cell{defaultCell}, ks, threeShards, numReplicas, 0, tabletID, nil)) + tablets := make(map[string]*cluster.VttabletProcess) + for i, shard := range strings.Split(threeShards, ",") { + tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID+i*100)].Vttablet + } + sqls := strings.Split(FKExtSourceSchema, "\n") + for _, sql := range sqls { + output, err := vc.VtctlClient.ExecuteCommandWithOutput("ApplySchema", "--", + "--ddl_strategy=direct", "--sql", sql, keyspaceName) + require.NoErrorf(t, err, output) + } + doReshard(t, fkextConfig.target2KeyspaceName, "reshard2to3", "-80,80-", threeShards, tablets) + }) + t.Run("Reshard keyspace from 3 shards to 1 shard", func(t *testing.T) { + tabletID := 800 + shard := "0" + require.NoError(t, vc.AddShards(t, []*Cell{defaultCell}, ks, shard, numReplicas, 0, tabletID, nil)) + tablets := make(map[string]*cluster.VttabletProcess) + tablets[shard] = vc.Cells[cellName].Keyspaces[keyspaceName].Shards[shard].Tablets[fmt.Sprintf("%s-%d", cellName, tabletID)].Vttablet + sqls := strings.Split(FKExtSourceSchema, "\n") + for _, sql := range sqls { + output, err := vc.VtctlClient.ExecuteCommandWithOutput("ApplySchema", "--", + "--ddl_strategy=direct", "--sql", sql, keyspaceName) + require.NoErrorf(t, err, output) + } + doReshard(t, fkextConfig.target2KeyspaceName, "reshard3to1", threeShards, "0", tablets) + }) + lg.Stop() + waitForLowLag(t, fkextConfig.target1KeyspaceName, "mat") + t.Run("Validate materialize counts at end of test", func(t *testing.T) { + validateMaterializeRowCounts(t) + }) + +} + +// checkRowCounts checks that the parent and child tables in the source and target shards have the same number of rows. +func checkRowCounts(t *testing.T, keyspace string, sourceShards, targetShards []string) bool { + sourceTabs := make(map[string]*cluster.VttabletProcess) + targetTabs := make(map[string]*cluster.VttabletProcess) + for _, shard := range sourceShards { + sourceTabs[shard] = vc.getPrimaryTablet(t, keyspace, shard) + } + for _, shard := range targetShards { + targetTabs[shard] = vc.getPrimaryTablet(t, keyspace, shard) + } + + getCount := func(tab *cluster.VttabletProcess, table string) (int64, error) { + qr, err := tab.QueryTablet(fmt.Sprintf("select count(*) from %s", table), keyspace, true) + if err != nil { + return 0, err + } + return qr.Rows[0][0].ToInt64() + } + + var sourceParentCount, sourceChildCount int64 + var targetParentCount, targetChildCount int64 + for _, tab := range sourceTabs { + count, _ := getCount(tab, "parent") + sourceParentCount += count + count, _ = getCount(tab, "child") + sourceChildCount += count + } + for _, tab := range targetTabs { + count, _ := getCount(tab, "parent") + targetParentCount += count + count, _ = getCount(tab, "child") + targetChildCount += count + } + log.Infof("Source parent count: %d, child count: %d, target parent count: %d, child count: %d.", + sourceParentCount, sourceChildCount, targetParentCount, targetChildCount) + if sourceParentCount != targetParentCount || sourceChildCount != targetChildCount { + log.Infof("Row counts do not match for keyspace %s, source shards: %v, target shards: %v", keyspace, sourceShards, targetShards) + return false + } + return true +} + +// compareRowCounts compares the row counts for the parent and child tables in the source and target shards. In addition to vdiffs, +// it is another check to ensure that both tables have the same number of rows in the source and target shards after load generation +// has stopped. +func compareRowCounts(t *testing.T, keyspace string, sourceShards, targetShards []string) error { + log.Infof("Comparing row counts for keyspace %s, source shards: %v, target shards: %v", keyspace, sourceShards, targetShards) + lg.Stop() + defer lg.Start() + if err := waitForCondition("load generator to stop", func() bool { return lg.State() == LoadGeneratorStateStopped }, 10*time.Second); err != nil { + return err + } + if err := waitForCondition("matching row counts", func() bool { return checkRowCounts(t, keyspace, sourceShards, targetShards) }, 30*time.Second); err != nil { + return err + } + + return nil +} + +func doReshard(t *testing.T, keyspace, workflowName, sourceShards, targetShards string, targetTabs map[string]*cluster.VttabletProcess) { + rs := newReshard(vc, &reshardWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: keyspace, + }, + sourceShards: sourceShards, + targetShards: targetShards, + skipSchemaCopy: true, + }, workflowFlavorVtctl) + rs.Create() + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + for _, targetTab := range targetTabs { + catchup(t, targetTab, workflowName, "Reshard") + } + vdiff(t, keyspace, workflowName, fkextConfig.cell, false, true, nil) + rs.SwitchReadsAndWrites() + //if lg.WaitForAdditionalRows(100) != nil { + // t.Fatal("WaitForAdditionalRows failed") + //} + waitForLowLag(t, keyspace, workflowName+"_reverse") + if compareRowCounts(t, keyspace, strings.Split(sourceShards, ","), strings.Split(targetShards, ",")) != nil { + t.Fatal("Row counts do not match") + } + vdiff(t, keyspace, workflowName+"_reverse", fkextConfig.cell, true, false, nil) + + rs.ReverseReadsAndWrites() + //if lg.WaitForAdditionalRows(100) != nil { + // t.Fatal("WaitForAdditionalRows failed") + //} + waitForLowLag(t, keyspace, workflowName) + if compareRowCounts(t, keyspace, strings.Split(targetShards, ","), strings.Split(sourceShards, ",")) != nil { + t.Fatal("Row counts do not match") + } + vdiff(t, keyspace, workflowName, fkextConfig.cell, false, true, nil) + lg.Stop() + + rs.SwitchReadsAndWrites() + rs.Complete() +} + +func areRowCountsEqual(t *testing.T) bool { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + parentRowCount := getRowCount(t, vtgateConn, "target2.parent") + childRowCount := getRowCount(t, vtgateConn, "target2.child") + parentCopyRowCount := getRowCount(t, vtgateConn, "target1.parent_copy") + childCopyRowCount := getRowCount(t, vtgateConn, "target1.child_copy") + log.Infof("Post-materialize row counts are parent: %d, child: %d, parent_copy: %d, child_copy: %d", + parentRowCount, childRowCount, parentCopyRowCount, childCopyRowCount) + if parentRowCount != parentCopyRowCount || childRowCount != childCopyRowCount { + return false + } + return true +} + +// validateMaterializeRowCounts expects the Load generator to be stopped before calling it. +func validateMaterializeRowCounts(t *testing.T) { + if lg.State() != LoadGeneratorStateStopped { + t.Fatal("Load generator was unexpectedly still running when validateMaterializeRowCounts was called -- this will produce unreliable results.") + } + areRowCountsEqual2 := func() bool { + return areRowCountsEqual(t) + } + require.NoError(t, waitForCondition("row counts to be equal", areRowCountsEqual2, defaultTimeout)) +} + +const fkExtMaterializeSpec = ` +{"workflow": "%s", "source_keyspace": "%s", "target_keyspace": "%s", +"table_settings": [ {"target_table": "parent_copy", "source_expression": "select * from parent" },{"target_table": "child_copy", "source_expression": "select * from child" }], +"tablet_types": "replica"}` + +func materializeTables(t *testing.T) { + wfName := "mat" + err := vc.VtctlClient.ExecuteCommand("ApplySchema", "--", "--ddl_strategy=direct", + "--sql", FKExtMaterializeSchema, fkextConfig.target1KeyspaceName) + require.NoError(t, err, fmt.Sprintf("ApplySchema Error: %s", err)) + materializeSpec := fmt.Sprintf(fkExtMaterializeSpec, "mat", fkextConfig.target2KeyspaceName, fkextConfig.target1KeyspaceName) + err = vc.VtctlClient.ExecuteCommand("Materialize", materializeSpec) + require.NoError(t, err, "Materialize") + tab := vc.getPrimaryTablet(t, fkextConfig.target1KeyspaceName, "0") + catchup(t, tab, wfName, "Materialize") + validateMaterializeRowCounts(t) +} + +func moveKeyspace(t *testing.T) { + targetTabs := newKeyspace(t, fkextConfig.target2KeyspaceName, "-80,80-", FKExtTarget2VSchema, FKExtSourceSchema, 300, 1) + shard := "-80" + tabletId := fmt.Sprintf("%s-%d", fkextConfig.cell, 301) + replicaTab := vc.Cells[fkextConfig.cell].Keyspaces[fkextConfig.target2KeyspaceName].Shards[shard].Tablets[tabletId].Vttablet + dropReplicaConstraints(t, fkextConfig.target2KeyspaceName, replicaTab) + doMoveTables(t, fkextConfig.target1KeyspaceName, fkextConfig.target2KeyspaceName, "move", "replica", targetTabs, false) +} + +func newKeyspace(t *testing.T, keyspaceName, shards, vschema, schema string, tabletId, numReplicas int) map[string]*cluster.VttabletProcess { + tablets := make(map[string]*cluster.VttabletProcess) + cell := vc.Cells[fkextConfig.cell] + vtgate := cell.Vtgates[0] + vc.AddKeyspace(t, []*Cell{cell}, keyspaceName, shards, vschema, schema, numReplicas, 0, tabletId, nil) + err := vc.VtctldClient.ExecuteCommand("RebuildVSchemaGraph") + require.NoError(t, err) + require.NoError(t, waitForColumn(t, vtgate, keyspaceName, "parent", "id")) + require.NoError(t, waitForColumn(t, vtgate, keyspaceName, "child", "parent_id")) + return tablets +} + +func doMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName, tabletTypes string, targetTabs map[string]*cluster.VttabletProcess, atomicCopy bool) { + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + tabletTypes: tabletTypes, + }, + sourceKeyspace: sourceKeyspace, + atomicCopy: atomicCopy, + }, workflowFlavorRandom) + mt.Create() + + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + + for _, targetTab := range targetTabs { + catchup(t, targetTab, workflowName, "MoveTables") + } + vdiff(t, targetKeyspace, workflowName, fkextConfig.cell, false, true, nil) + lg.Stop() + lg.SetDBStrategy("vtgate", targetKeyspace) + if lg.Start() != nil { + t.Fatal("Start failed") + } + + mt.SwitchReadsAndWrites() + + if lg.WaitForAdditionalRows(100) != nil { + t.Fatal("WaitForAdditionalRows failed") + } + + waitForLowLag(t, sourceKeyspace, workflowName+"_reverse") + vdiff(t, sourceKeyspace, workflowName+"_reverse", fkextConfig.cell, false, true, nil) + if lg.WaitForAdditionalRows(100) != nil { + t.Fatal("WaitForAdditionalRows failed") + } + + mt.ReverseReadsAndWrites() + if lg.WaitForAdditionalRows(100) != nil { + t.Fatal("WaitForAdditionalRows failed") + } + waitForLowLag(t, targetKeyspace, workflowName) + time.Sleep(5 * time.Second) + vdiff(t, targetKeyspace, workflowName, fkextConfig.cell, false, true, nil) + lg.Stop() + mt.SwitchReadsAndWrites() + mt.Complete() + if err := vc.VtctldClient.ExecuteCommand("ApplyRoutingRules", "--rules={}"); err != nil { + t.Fatal(err) + } +} + +func importIntoVitess(t *testing.T) { + targetTabs := newKeyspace(t, fkextConfig.target1KeyspaceName, "0", FKExtTarget1VSchema, FKExtSourceSchema, 200, 1) + doMoveTables(t, fkextConfig.sourceKeyspaceName, fkextConfig.target1KeyspaceName, "import", "primary", targetTabs, true) +} + +const getConstraintsQuery = ` +SELECT CONSTRAINT_NAME, TABLE_NAME +FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE +WHERE TABLE_SCHEMA = '%s' AND REFERENCED_TABLE_NAME IS NOT NULL; +` + +// dropReplicaConstraints drops all foreign key constraints on replica tables for a given keyspace/shard. +// We do this so that we can replay binlogs from a replica which is not doing cascades but just replaying +// the binlogs created by the primary. This will confirm that vtgate is doing the cascades correctly. +func dropReplicaConstraints(t *testing.T, keyspaceName string, tablet *cluster.VttabletProcess) { + var dropConstraints []string + require.Equal(t, "replica", strings.ToLower(tablet.TabletType)) + dbName := "vt_" + keyspaceName + qr, err := tablet.QueryTablet(fmt.Sprintf(getConstraintsQuery, dbName), keyspaceName, true) + if err != nil { + t.Fatal(err) + } + for _, row := range qr.Rows { + constraintName := row[0].ToString() + tableName := row[1].ToString() + dropConstraints = append(dropConstraints, fmt.Sprintf("ALTER TABLE `%s`.`%s` DROP FOREIGN KEY `%s`", + dbName, tableName, constraintName)) + } + prefixQueries := []string{ + "set sql_log_bin=0", + "SET @@global.super_read_only=0", + } + suffixQueries := []string{ + "SET @@global.super_read_only=1", + "set sql_log_bin=1", + } + queries := append(prefixQueries, dropConstraints...) + queries = append(queries, suffixQueries...) + require.NoError(t, tablet.QueryTabletMultiple(queries, keyspaceName, true)) +} diff --git a/go/test/endtoend/vreplication/fk_test.go b/go/test/endtoend/vreplication/fk_test.go index 31886864f11..09692930c5c 100644 --- a/go/test/endtoend/vreplication/fk_test.go +++ b/go/test/endtoend/vreplication/fk_test.go @@ -19,7 +19,7 @@ package vreplication import ( "context" "fmt" - "math/rand" + "math/rand/v2" "strconv" "testing" "time" @@ -28,49 +28,43 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vttablet" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) +const testWorkflowFlavor = workflowFlavorRandom + // TestFKWorkflow runs a MoveTables workflow with atomic copy for a db with foreign key constraints. // It inserts initial data, then simulates load. We insert both child rows with foreign keys and those without, // i.e. with foreign_key_checks=0. func TestFKWorkflow(t *testing.T) { - // ensure that there are multiple copy phase cycles per table - extraVTTabletArgs = []string{"--vstream_packet_size=256"} + extraVTTabletArgs = []string{ + // Ensure that there are multiple copy phase cycles per table. + "--vstream_packet_size=256", + // Test VPlayer batching mode. + fmt.Sprintf("--vreplication_experimental_flags=%d", + vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching), + } defer func() { extraVTTabletArgs = nil }() - cellName := "zone" - cells := []string{cellName} - vc = NewVitessCluster(t, "TestFKWorkflow", cells, mainClusterConfig) + cellName := "zone1" + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc) - allCellNames = cellName - defaultCellName := cellName - defaultCell = vc.Cells[defaultCellName] sourceKeyspace := "fksource" shardName := "0" + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - defer vc.TearDown(t) + defer vc.TearDown() cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialFKSourceVSchema, initialFKSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) + insertInitialFKData(t) var ls *fkLoadSimulator - - insertInitialFKData(t) withLoad := true // Set it to false to skip load simulation, while debugging var cancel context.CancelFunc var ctx context.Context @@ -86,20 +80,25 @@ func TestFKWorkflow(t *testing.T) { }() go ls.simulateLoad() } + targetKeyspace := "fktarget" targetTabletId := 200 - vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, initialFKSchema, 0, 0, targetTabletId, sourceKsOpts) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) + vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialFKTargetVSchema, "", 0, 0, targetTabletId, sourceKsOpts) + + testFKCancel(t, vc) workflowName := "fk" ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) - mt := newMoveTables(vc, &moveTables{ - workflowName: workflowName, - targetKeyspace: targetKeyspace, + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, sourceKeyspace: sourceKeyspace, atomicCopy: true, - }, moveTablesFlavorRandom) + }, testWorkflowFlavor) mt.Create() waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) @@ -108,7 +107,9 @@ func TestFKWorkflow(t *testing.T) { require.NotNil(t, targetTab) catchup(t, targetTab, workflowName, "MoveTables") vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) - ls.waitForAdditionalRows(200) + if withLoad { + ls.waitForAdditionalRows(200) + } vdiff(t, targetKeyspace, workflowName, cellName, true, false, nil) if withLoad { cancel() @@ -123,24 +124,43 @@ func TestFKWorkflow(t *testing.T) { ls = newFKLoadSimulator(t, ctx) defer cancel() go ls.simulateLoad() - } - ls.waitForAdditionalRows(200) - if withLoad { + ls.waitForAdditionalRows(200) cancel() <-ch } + mt.Complete() + vtgateConn, closeConn := getVTGateConn() + defer closeConn() + + t11Count := getRowCount(t, vtgateConn, "t11") + t12Count := getRowCount(t, vtgateConn, "t12") + require.Greater(t, t11Count, 1) + require.Greater(t, t12Count, 1) + require.Equal(t, t11Count, t12Count) } func insertInitialFKData(t *testing.T) { t.Run("insertInitialFKData", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sourceKeyspace := "fksource" shard := "0" db := fmt.Sprintf("%s:%s", sourceKeyspace, shard) log.Infof("Inserting initial FK data") execMultipleQueries(t, vtgateConn, db, initialFKData) log.Infof("Done inserting initial FK data") - waitForRowCount(t, vtgateConn, db, "parent", 2) - waitForRowCount(t, vtgateConn, db, "child", 3) + + type tableCounts struct { + name string + count int + } + for _, table := range []tableCounts{ + {"parent", 2}, {"child", 3}, + {"t1", 2}, {"t2", 3}, + {"t11", 1}, {"t12", 1}, + } { + waitForRowCount(t, vtgateConn, db, table.name, table.count) + } }) } @@ -166,6 +186,7 @@ func newFKLoadSimulator(t *testing.T, ctx context.Context) *fkLoadSimulator { } } +var indexCounter int = 100 // used to insert into t11 and t12 func (ls *fkLoadSimulator) simulateLoad() { t := ls.t var err error @@ -180,7 +201,7 @@ func (ls *fkLoadSimulator) simulateLoad() { default: } // Decide operation based on random number - op := rand.Intn(100) + op := rand.IntN(100) switch { case op < 50: // 50% chance to insert ls.insert() @@ -189,8 +210,13 @@ func (ls *fkLoadSimulator) simulateLoad() { default: // 20% chance to delete ls.delete() } + for _, table := range []string{"t11", "t12"} { + query := fmt.Sprintf("insert /*+ SET_VAR(foreign_key_checks=0) */ into fksource.%s values(%d, %d)", table, indexCounter, indexCounter) + ls.exec(query) + indexCounter++ + } require.NoError(t, err) - time.Sleep(1 * time.Millisecond) + time.Sleep(10 * time.Millisecond) } } @@ -231,7 +257,7 @@ func (ls *fkLoadSimulator) insert() { qr := ls.exec(insertQuery) require.NotNil(t, qr) // insert one or more children, some with valid foreign keys, some without. - for i := 0; i < rand.Intn(4)+1; i++ { + for i := 0; i < rand.IntN(4)+1; i++ { currentChildId++ if i == 3 { insertQuery = fmt.Sprintf("INSERT /*+ SET_VAR(foreign_key_checks=0) */ INTO child (id, parent_id) VALUES (%d, %d)", currentChildId, currentParentId+1000000) @@ -257,7 +283,7 @@ func (ls *fkLoadSimulator) getRandomId() int64 { } func (ls *fkLoadSimulator) update() { - updateQuery := fmt.Sprintf("UPDATE parent SET name = 'parent%d' WHERE id = %d", rand.Intn(1000)+1, ls.getRandomId()) + updateQuery := fmt.Sprintf("UPDATE parent SET name = 'parent%d' WHERE id = %d", rand.IntN(1000)+1, ls.getRandomId()) ls.exec(updateQuery) } @@ -268,7 +294,31 @@ func (ls *fkLoadSimulator) delete() { func (ls *fkLoadSimulator) exec(query string) *sqltypes.Result { t := ls.t + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "fksource", query) require.NotNil(t, qr) return qr } + +// testFKCancel confirms that a MoveTables workflow which includes tables with foreign key +// constraints, where the parent table is lexicographically sorted before the child table and +// thus may be dropped first, can be successfully cancelled. +func testFKCancel(t *testing.T, vc *VitessCluster) { + targetKeyspace := "fktarget" + sourceKeyspace := "fksource" + workflowName := "wf2" + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, + sourceKeyspace: sourceKeyspace, + atomicCopy: true, + }, testWorkflowFlavor) + mt.Create() + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + mt.Cancel() +} diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index d2154f13f1f..4764b213ad6 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -18,16 +18,18 @@ package vreplication import ( "context" - "crypto/rand" "encoding/hex" "encoding/json" "fmt" "io" + "math/rand" "net/http" + "os" "os/exec" "regexp" "sort" "strings" + "sync" "sync/atomic" "testing" "time" @@ -37,6 +39,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tidwall/gjson" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" @@ -45,9 +48,11 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) const ( @@ -56,6 +61,11 @@ const ( workflowStateTimeout = 90 * time.Second ) +func setSidecarDBName(dbName string) { + sidecarDBName = dbName + sidecarDBIdentifier = sqlparser.String(sqlparser.NewIdentifierCS(sidecarDBName)) +} + func execMultipleQueries(t *testing.T, conn *mysql.Conn, database string, lines string) { queries := strings.Split(lines, "\n") for _, query := range queries { @@ -65,13 +75,39 @@ func execMultipleQueries(t *testing.T, conn *mysql.Conn, database string, lines execVtgateQuery(t, conn, database, string(query)) } } + +func execQueryWithRetry(t *testing.T, conn *mysql.Conn, query string, timeout time.Duration) *sqltypes.Result { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ticker := time.NewTicker(defaultTick) + defer ticker.Stop() + + var qr *sqltypes.Result + var err error + for { + qr, err = conn.ExecuteFetch(query, 1000, false) + if err == nil { + return qr + } + select { + case <-ctx.Done(): + require.FailNow(t, fmt.Sprintf("query %q did not succeed before the timeout of %s; last seen result: %v", + query, timeout, qr.Rows)) + case <-ticker.C: + log.Infof("query %q failed with error %v, retrying in %ds", query, err, defaultTick) + } + } +} + func execQuery(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { qr, err := conn.ExecuteFetch(query, 1000, false) + if err != nil { + log.Errorf("Error executing query: %s: %v", query, err) + } require.NoError(t, err) return qr } - -func getConnection(t *testing.T, hostname string, port int) *mysql.Conn { +func getConnectionNoError(t *testing.T, hostname string, port int) *mysql.Conn { vtParams := mysql.ConnParams{ Host: hostname, Port: port, @@ -79,7 +115,22 @@ func getConnection(t *testing.T, hostname string, port int) *mysql.Conn { } ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) - require.NoError(t, err) + if err != nil { + return nil + } + return conn +} + +func getConnection(t *testing.T, hostname string, port int) *mysql.Conn { + vtParams := mysql.ConnParams{ + Host: hostname, + Port: port, + Uname: "vt_dba", + ConnectTimeoutMs: 1000, + } + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoErrorf(t, err, "error connecting to vtgate on %s:%d", hostname, port) return conn } @@ -96,6 +147,19 @@ func execVtgateQuery(t *testing.T, conn *mysql.Conn, database string, query stri return qr } +func execVtgateQueryWithRetry(t *testing.T, conn *mysql.Conn, database string, query string, timeout time.Duration) *sqltypes.Result { + if strings.TrimSpace(query) == "" { + return nil + } + if database != "" { + execQuery(t, conn, "use `"+database+"`;") + } + execQuery(t, conn, "begin") + qr := execQueryWithRetry(t, conn, query, timeout) + execQuery(t, conn, "commit") + return qr +} + func checkHealth(t *testing.T, url string) bool { resp, err := http.Get(url) require.NoError(t, err) @@ -221,6 +285,7 @@ func waitForRowCountInTablet(t *testing.T, vttablet *cluster.VttabletProcess, da require.NoError(t, err) require.NotNil(t, qr) if wantRes == fmt.Sprintf("%v", qr.Rows) { + log.Infof("waitForRowCountInTablet: found %d rows in table %s on tablet %s", want, table, vttablet.Name) return } select { @@ -286,6 +351,13 @@ func assertQueryDoesNotExecutesOnTablet(t *testing.T, conn *mysql.Conn, tablet * assert.Equalf(t, count0, count1, "query %q executed in target;\ntried to match %q\nbefore:\n%s\n\nafter:\n%s\n\n", query, matchQuery, body0, body1) } +func waitForWorkflowToBeCreated(t *testing.T, vc *VitessCluster, ksWorkflow string) { + require.NoError(t, waitForCondition("workflow to be created", func() bool { + _, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWorkflow, "show") + return err == nil + }, defaultTimeout)) +} + // waitForWorkflowState waits for all of the given workflow's // streams to reach the provided state. You can pass optional // key value pairs of the form "key==value" to also wait for @@ -297,7 +369,7 @@ func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wa log.Infof("Waiting for workflow %q to fully reach %q state", ksWorkflow, wantState) for { output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWorkflow, "show") - require.NoError(t, err) + require.NoError(t, err, output) done = true state := "" result := gjson.Get(output, "ShardStatuses") @@ -318,6 +390,9 @@ func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wa } } } + if wantState == binlogdatapb.VReplicationWorkflowState_Running.String() && attributeValue.Get("Pos").String() == "" { + done = false + } } else { done = false } @@ -351,7 +426,7 @@ func waitForWorkflowState(t *testing.T, vc *VitessCluster, ksWorkflow string, wa // as a CSV have secondary keys. This is useful when testing the // --defer-secondary-keys flag to confirm that the secondary keys // were re-added by the time the workflow hits the running phase. -// For a Reshard workflow, where no tables are specififed, pass +// For a Reshard workflow, where no tables are specified, pass // an empty string for the tables and all tables in the target // keyspace will be checked. func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletProcess, ksName string, tables string) { @@ -371,6 +446,12 @@ func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletPro } } for _, tablet := range tablets { + // Be sure that the schema is up to date. + err := vc.VtctldClient.ExecuteCommand("ReloadSchema", topoproto.TabletAliasString(&topodatapb.TabletAlias{ + Cell: tablet.Cell, + Uid: uint32(tablet.TabletUID), + })) + require.NoError(t, err) for _, table := range tableArr { if schema.IsInternalOperationTableName(table) { continue @@ -382,7 +463,7 @@ func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletPro require.NotNil(t, res) row := res.Named().Row() tableSchema := row["Create Table"].ToString() - parsedDDL, err := sqlparser.ParseStrictDDL(tableSchema) + parsedDDL, err := sqlparser.NewTestParser().ParseStrictDDL(tableSchema) require.NoError(t, err) createTable, ok := parsedDDL.(*sqlparser.CreateTable) require.True(t, ok) @@ -452,7 +533,6 @@ func validateDryRunResults(t *testing.T, output string, want []string) { w = strings.TrimSpace(w[1:]) result := strings.HasPrefix(g, w) match = result - //t.Logf("Partial match |%v|%v|%v\n", w, g, match) } else { match = g == w } @@ -509,11 +589,27 @@ func isTableInDenyList(t *testing.T, vc *VitessCluster, ksShard string, table st return found, nil } -func expectNumberOfStreams(t *testing.T, vtgateConn *mysql.Conn, name string, workflow string, database string, want int) { - query := sqlparser.BuildParsedQuery("select count(*) from %s.vreplication where workflow='%s'", sidecarDBIdentifier, workflow).Query +// expectNumberOfStreams waits for the given number of streams to be present and +// by default RUNNING. If you want to wait for different states, then you can +// pass in the state(s) you want to wait for. +func expectNumberOfStreams(t *testing.T, vtgateConn *mysql.Conn, name string, workflow string, database string, want int, states ...string) { + var query string + if len(states) == 0 { + states = append(states, binlogdatapb.VReplicationWorkflowState_Running.String()) + } + query = sqlparser.BuildParsedQuery("select count(*) from %s.vreplication where workflow='%s' and state in ('%s')", + sidecarDBIdentifier, workflow, strings.Join(states, "','")).Query waitForQueryResult(t, vtgateConn, database, query, fmt.Sprintf(`[[INT64(%d)]]`, want)) } +// confirmAllStreamsRunning confirms that all of the migrated streams are running +// after a Reshard. +func confirmAllStreamsRunning(t *testing.T, vtgateConn *mysql.Conn, database string) { + query := sqlparser.BuildParsedQuery("select count(*) from %s.vreplication where state != '%s'", + sidecarDBIdentifier, binlogdatapb.VReplicationWorkflowState_Running.String()).Query + waitForQueryResult(t, vtgateConn, database, query, `[[INT64(0)]]`) +} + func printShardPositions(vc *VitessCluster, ksShards []string) { for _, ksShard := range ksShards { output, err := vc.VtctlClient.ExecuteCommandWithOutput("ShardReplicationPositions", ksShard) @@ -703,95 +799,119 @@ func isBinlogRowImageNoBlob(t *testing.T, tablet *cluster.VttabletProcess) bool return mode == "noblob" } +func getRowCount(t *testing.T, vtgateConn *mysql.Conn, table string) int { + query := fmt.Sprintf("select count(*) from %s", table) + qr := execVtgateQuery(t, vtgateConn, "", query) + numRows, _ := qr.Rows[0][0].ToInt() + return numRows +} + const ( - loadTestBufferingWindowDurationStr = "30s" - loadTestPostBufferingInsertWindow = 60 * time.Second // should be greater than loadTestBufferingWindowDurationStr - loadTestWaitForCancel = 30 * time.Second - loadTestWaitBetweenQueries = 2 * time.Millisecond + loadTestBufferingWindowDuration = 10 * time.Second + loadTestAvgWaitBetweenQueries = 500 * time.Microsecond + loadTestDefaultConnections = 100 ) type loadGenerator struct { - t *testing.T - vc *VitessCluster - ctx context.Context - cancel context.CancelFunc + t *testing.T + vc *VitessCluster + ctx context.Context + cancel context.CancelFunc + connections int + wg sync.WaitGroup } func newLoadGenerator(t *testing.T, vc *VitessCluster) *loadGenerator { return &loadGenerator{ - t: t, - vc: vc, + t: t, + vc: vc, + connections: loadTestDefaultConnections, } } func (lg *loadGenerator) stop() { - time.Sleep(loadTestPostBufferingInsertWindow) // wait for buffering to stop and additional records to be inserted by startLoad after traffic is switched + // Wait for buffering to stop and additional records to be inserted by start + // after traffic is switched. + time.Sleep(loadTestBufferingWindowDuration * 2) log.Infof("Canceling load") lg.cancel() - time.Sleep(loadTestWaitForCancel) // wait for cancel to take effect - log.Flush() - + lg.wg.Wait() } func (lg *loadGenerator) start() { t := lg.t lg.ctx, lg.cancel = context.WithCancel(context.Background()) + var connectionCount atomic.Int64 var id int64 - log.Infof("startLoad: starting") + log.Infof("loadGenerator: starting") queryTemplate := "insert into loadtest(id, name) values (%d, 'name-%d')" var totalQueries, successfulQueries int64 var deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors int64 + lg.wg.Add(1) defer func() { - - log.Infof("startLoad: totalQueries: %d, successfulQueries: %d, deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", + defer lg.wg.Done() + log.Infof("loadGenerator: totalQueries: %d, successfulQueries: %d, deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", totalQueries, successfulQueries, deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors) }() - logOnce := true for { select { case <-lg.ctx.Done(): - log.Infof("startLoad: context cancelled") - log.Infof("startLoad: deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", + log.Infof("loadGenerator: context cancelled") + log.Infof("loadGenerator: deniedErrors: %d, ambiguousErrors: %d, reshardedErrors: %d, tableNotFoundErrors: %d, otherErrors: %d", deniedErrors, ambiguousErrors, reshardedErrors, tableNotFoundErrors, otherErrors) require.Equal(t, int64(0), deniedErrors) require.Equal(t, int64(0), otherErrors) + require.Equal(t, int64(0), reshardedErrors) require.Equal(t, totalQueries, successfulQueries) return default: - go func() { - conn := vc.GetVTGateConn(t) - defer conn.Close() - atomic.AddInt64(&id, 1) - query := fmt.Sprintf(queryTemplate, id, id) - _, err := conn.ExecuteFetch(query, 1, false) - atomic.AddInt64(&totalQueries, 1) - if err != nil { - sqlErr := err.(*sqlerror.SQLError) - if strings.Contains(strings.ToLower(err.Error()), "denied tables") { - log.Infof("startLoad: denied tables error executing query: %d:%v", sqlErr.Number(), err) - atomic.AddInt64(&deniedErrors, 1) - } else if strings.Contains(strings.ToLower(err.Error()), "ambiguous") { - // this can happen when a second keyspace is setup with the same tables, but there are no routing rules - // set yet by MoveTables. So we ignore these errors. - atomic.AddInt64(&ambiguousErrors, 1) - } else if strings.Contains(strings.ToLower(err.Error()), "current keyspace is being resharded") { - atomic.AddInt64(&reshardedErrors, 1) - } else if strings.Contains(strings.ToLower(err.Error()), "not found") { - atomic.AddInt64(&tableNotFoundErrors, 1) - } else { - if logOnce { - log.Infof("startLoad: error executing query: %d:%v", sqlErr.Number(), err) - logOnce = false + if int(connectionCount.Load()) < lg.connections { + connectionCount.Add(1) + lg.wg.Add(1) + go func() { + defer lg.wg.Done() + defer connectionCount.Add(-1) + conn := vc.GetVTGateConn(t) + defer conn.Close() + for { + select { + case <-lg.ctx.Done(): + return + default: } - atomic.AddInt64(&otherErrors, 1) + newID := atomic.AddInt64(&id, 1) + query := fmt.Sprintf(queryTemplate, newID, newID) + _, err := conn.ExecuteFetch(query, 1, false) + atomic.AddInt64(&totalQueries, 1) + if err != nil { + sqlErr := err.(*sqlerror.SQLError) + if strings.Contains(strings.ToLower(err.Error()), "denied tables") { + if debugMode { + t.Logf("loadGenerator: denied tables error executing query: %d:%v", sqlErr.Number(), err) + } + atomic.AddInt64(&deniedErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "ambiguous") { + // This can happen when a second keyspace is setup with the same tables, but + // there are no routing rules set yet by MoveTables. So we ignore these errors. + atomic.AddInt64(&ambiguousErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "current keyspace is being resharded") { + atomic.AddInt64(&reshardedErrors, 1) + } else if strings.Contains(strings.ToLower(err.Error()), "not found") { + atomic.AddInt64(&tableNotFoundErrors, 1) + } else { + if debugMode { + t.Logf("loadGenerator: error executing query: %d:%v", sqlErr.Number(), err) + } + atomic.AddInt64(&otherErrors, 1) + } + } else { + atomic.AddInt64(&successfulQueries, 1) + } + time.Sleep(time.Duration(int64(float64(loadTestAvgWaitBetweenQueries.Microseconds()) * rand.Float64()))) } - time.Sleep(loadTestWaitBetweenQueries) - } else { - atomic.AddInt64(&successfulQueries, 1) - } - }() - time.Sleep(loadTestWaitBetweenQueries) + }() + } } } } @@ -820,3 +940,93 @@ func (lg *loadGenerator) waitForCount(want int64) { } } } + +// appendToQueryLog is useful when debugging tests. +func appendToQueryLog(msg string) { + file, err := os.OpenFile(queryLog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Errorf("Error opening query log file: %v", err) + return + } + defer file.Close() + if _, err := file.WriteString(msg + "\n"); err != nil { + log.Errorf("Error writing to query log file: %v", err) + } +} + +func waitForCondition(name string, condition func() bool, timeout time.Duration) error { + if condition() { + return nil + } + + ticker := time.NewTicker(tickInterval) + defer ticker.Stop() + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + for { + select { + case <-ticker.C: + if condition() { + return nil + } + case <-ctx.Done(): + return fmt.Errorf("%s: waiting for %s", ctx.Err(), name) + } + } +} + +func getCellNames(cells []*Cell) string { + var cellNames []string + if cells == nil { + cells = []*Cell{} + for _, cell := range vc.Cells { + cells = append(cells, cell) + } + } + for _, cell := range cells { + cellNames = append(cellNames, cell.Name) + } + return strings.Join(cellNames, ",") +} + +// VExplainPlan is the struct that represents the json output of a vexplain query. +type VExplainPlan struct { + OperatorType string + Variant string + Keyspace VExplainKeyspace + FieldQuery string + Query string + Table string +} + +type VExplainKeyspace struct { + Name string + Sharded bool +} + +// vexplain runs vexplain on the given query and returns the plan. Useful for validating routing rules. +func vexplain(t *testing.T, database, query string) *VExplainPlan { + vtgateConn := vc.GetVTGateConn(t) + defer vtgateConn.Close() + + qr := execVtgateQuery(t, vtgateConn, database, fmt.Sprintf("vexplain %s", query)) + require.NotNil(t, qr) + require.Equal(t, 1, len(qr.Rows)) + json := qr.Rows[0][0].ToString() + + var plan VExplainPlan + require.NoError(t, json2.Unmarshal([]byte(json), &plan)) + return &plan +} + +// confirmKeyspacesRoutedTo confirms that the given keyspaces are routed as expected for the given tablet types, using vexplain. +func confirmKeyspacesRoutedTo(t *testing.T, keyspace string, routedKeyspace, table string, tabletTypes []string) { + if len(tabletTypes) == 0 { + tabletTypes = []string{"primary", "replica", "rdonly"} + } + for _, tt := range tabletTypes { + database := fmt.Sprintf("%s@%s", keyspace, tt) + plan := vexplain(t, database, fmt.Sprintf("select * from %s.%s", keyspace, table)) + require.Equalf(t, routedKeyspace, plan.Keyspace.Name, "for database %s, keyspace %v, tabletType %s", database, keyspace, tt) + } +} diff --git a/go/test/endtoend/vreplication/initial_data_test.go b/go/test/endtoend/vreplication/initial_data_test.go index bf93a040942..ea34ef7fddf 100644 --- a/go/test/endtoend/vreplication/initial_data_test.go +++ b/go/test/endtoend/vreplication/initial_data_test.go @@ -18,7 +18,7 @@ package vreplication import ( "fmt" - "math/rand" + "math/rand/v2" "os" "testing" @@ -27,6 +27,8 @@ import ( func insertInitialData(t *testing.T) { t.Run("insertInitialData", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() log.Infof("Inserting initial data") lines, _ := os.ReadFile("unsharded_init_data.sql") execMultipleQueries(t, vtgateConn, "product:0", string(lines)) @@ -48,6 +50,8 @@ const NumJSONRows = 100 func insertJSONValues(t *testing.T) { // insert null value combinations + vtgateConn, closeConn := getVTGateConn() + defer closeConn() execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j3) values(1, \"{}\")") execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j1, j3) values(2, \"{}\", \"{}\")") execVtgateQuery(t, vtgateConn, "product:0", "insert into json_tbl(id, j2, j3) values(3, \"{}\", \"{}\")") @@ -61,8 +65,8 @@ func insertJSONValues(t *testing.T) { numJsonValues := len(jsonValues) for id <= NumJSONRows { id++ - j1 := rand.Intn(numJsonValues) - j2 := rand.Intn(numJsonValues) + j1 := rand.IntN(numJsonValues) + j2 := rand.IntN(numJsonValues) query := fmt.Sprintf(q, id, jsonValues[j1], jsonValues[j2]) execVtgateQuery(t, vtgateConn, "product:0", query) } @@ -76,6 +80,8 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { // the number of customer records we are going to // create. The value we get back is the max value // that we reserved. + vtgateConn, closeConn := getVTGateConn() + defer closeConn() maxID := waitForSequenceValue(t, vtgateConn, "product", "customer_seq", numCustomers) // So we need to calculate the first value we reserved // from the max. @@ -95,16 +101,22 @@ func insertMoreCustomers(t *testing.T, numCustomers int) { } func insertMoreProducts(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(3, 'cpu'),(4, 'camera'),(5, 'mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } func insertMoreProductsForSourceThrottler(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(103, 'new-cpu'),(104, 'new-camera'),(105, 'new-mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } func insertMoreProductsForTargetThrottler(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() sql := "insert into product(pid, description) values(203, 'new-cpu'),(204, 'new-camera'),(205, 'new-mouse');" execVtgateQuery(t, vtgateConn, "product", sql) } @@ -122,6 +134,8 @@ var blobTableQueries = []string{ } func insertIntoBlobTable(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for _, query := range blobTableQueries { execVtgateQuery(t, vtgateConn, "product:0", query) } diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go index 63205a56c0a..486692a58ba 100644 --- a/go/test/endtoend/vreplication/materialize_test.go +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -20,8 +20,6 @@ import ( "testing" "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/endtoend/cluster" ) const smSchema = ` @@ -63,31 +61,21 @@ const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2 // testShardedMaterialize tests a materialize workflow for a sharded cluster (single shard) using comparison filters func testShardedMaterialize(t *testing.T, useVtctldClient bool) { - defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestShardedMaterialize", allCells, mainClusterConfig) + var err error + vc = NewVitessCluster(t, nil) ks1 := "ks1" ks2 := "ks2" - shard := "0" require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) - - defaultCell = vc.Cells[defaultCellName] + defer vc.TearDown() + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, ks1, shard) - require.NoError(t, err) vc.AddKeyspace(t, []*Cell{defaultCell}, ks2, "0", smVSchema, smSchema, defaultReplicas, defaultRdonly, 200, nil) - err = cluster.WaitForHealthyShard(vc.VtctldClient, ks2, shard) - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) _, err = vtgateConn.ExecuteFetch(initDataQuery, 0, false) @@ -182,10 +170,8 @@ RETURN id * length(val); ` func testMaterialize(t *testing.T, useVtctldClient bool) { - defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMaterialize", allCells, mainClusterConfig) + var err error + vc = NewVitessCluster(t, nil) sourceKs := "source" targetKs := "target" shard := "0" @@ -193,20 +179,14 @@ func testMaterialize(t *testing.T, useVtctldClient bool) { defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", smMaterializeVSchemaSource, smMaterializeSchemaSource, defaultReplicas, defaultRdonly, 300, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, "0", smMaterializeVSchemaTarget, smMaterializeSchemaTarget, defaultReplicas, defaultRdonly, 400, nil) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index 75ab6a3151b..1f365c47600 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -25,8 +25,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/test/endtoend/cluster" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -48,43 +46,36 @@ func insertInitialDataIntoExternalCluster(t *testing.T, conn *mysql.Conn) { // hence the VTDATAROOT env variable gets overwritten. // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT func TestVtctlMigrate(t *testing.T) { - defaultCellName := "zone1" - cells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMigrate", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc, "failed to create VitessCluster") defaultReplicas = 0 defaultRdonly = 0 - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") - err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err, "product shard did not become healthy") - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate, "failed to get vtgate") - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) + t.Run("VStreamFrom", func(t *testing.T) { + testVStreamFrom(t, vtgate, "product", 2) + }) // create external cluster extCell := "extcell1" - extCells := []string{extCell} - extVc := NewVitessCluster(t, "TestMigrateExternal", extCells, externalClusterConfig) - require.NotNil(t, extVc) - defer extVc.TearDown(t) + extVc := NewVitessCluster(t, &clusterOptions{cells: []string{"extcell1"}, clusterConfig: externalClusterConfig}) + defer extVc.TearDown() extCell2 := extVc.Cells[extCell] extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", initialExternalVSchema, initialExternalSchema, 0, 0, 1000, nil) extVtgate := extCell2.Vtgates[0] require.NotNil(t, extVtgate) - err = cluster.WaitForHealthyShard(extVc.VtctldClient, "rating", "0") - require.NoError(t, err) verifyClusterHealth(t, extVc) extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) @@ -137,7 +128,7 @@ func TestVtctlMigrate(t *testing.T) { "--source=ext1.rating", "create", ksWorkflow); err != nil { t.Fatalf("Migrate command failed with %+v : %s\n", err, output) } - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) waitForRowCount(t, vtgateConn, "product:0", "rating", 0) waitForRowCount(t, vtgateConn, "product:0", "review", 0) if output, err = vc.VtctlClient.ExecuteCommandWithOutput("Migrate", "cancel", ksWorkflow); err != nil { @@ -175,26 +166,18 @@ func TestVtctlMigrate(t *testing.T) { // hence the VTDATAROOT env variable gets overwritten. // Each time we need to create vt processes in the "other" cluster we need to set the appropriate VTDATAROOT func TestVtctldMigrate(t *testing.T) { - defaultCellName := "zone1" - cells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestMigrateVtctld", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) - require.NotNil(t, vc, "failed to create VitessCluster") defaultReplicas = 0 defaultRdonly = 0 - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) require.NoError(t, err, "failed to create product keyspace") - err = cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err, "product shard did not become healthy") - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate, "failed to get vtgate") - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) @@ -202,9 +185,11 @@ func TestVtctldMigrate(t *testing.T) { // create external cluster extCell := "extcell1" extCells := []string{extCell} - extVc := NewVitessCluster(t, t.Name(), extCells, externalClusterConfig) - require.NotNil(t, extVc) - defer extVc.TearDown(t) + extVc := NewVitessCluster(t, &clusterOptions{ + cells: extCells, + clusterConfig: externalClusterConfig, + }) + defer extVc.TearDown() extCell2 := extVc.Cells[extCell] extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", @@ -212,8 +197,6 @@ func TestVtctldMigrate(t *testing.T) { extVtgate := extCell2.Vtgates[0] require.NotNil(t, extVtgate) - err = cluster.WaitForHealthyShard(extVc.VtctldClient, "rating", "0") - require.NoError(t, err) verifyClusterHealth(t, extVc) extVtgateConn := getConnection(t, extVc.ClusterConfig.hostname, extVc.ClusterConfig.vtgateMySQLPort) insertInitialDataIntoExternalCluster(t, extVtgateConn) @@ -284,7 +267,7 @@ func TestVtctldMigrate(t *testing.T) { "--mount-name", "ext1", "--all-tables", "--auto-start=false", "--cells=extcell1") require.NoError(t, err, "Migrate command failed with %s", output) - expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1) + expectNumberOfStreams(t, vtgateConn, "migrate", "e1", "product:0", 1, binlogdatapb.VReplicationWorkflowState_Stopped.String()) waitForRowCount(t, vtgateConn, "product:0", "rating", 0) waitForRowCount(t, vtgateConn, "product:0", "review", 0) output, err = vc.VtctldClient.ExecuteCommandWithOutput("Migrate", diff --git a/go/test/endtoend/vreplication/movetables_buffering_test.go b/go/test/endtoend/vreplication/movetables_buffering_test.go index 4e4b7cada97..f456c32bfd5 100644 --- a/go/test/endtoend/vreplication/movetables_buffering_test.go +++ b/go/test/endtoend/vreplication/movetables_buffering_test.go @@ -2,26 +2,25 @@ package vreplication import ( "testing" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "time" "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/wrangler" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func TestMoveTablesBuffering(t *testing.T) { defaultRdonly = 1 vc = setupMinimalCluster(t) - defer vtgateConn.Close() - defer vc.TearDown(t) + defer vc.TearDown() - currentWorkflowType = wrangler.MoveTablesWorkflow + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables setupMinimalCustomerKeyspace(t) tables := "loadtest" err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, - tables, workflowActionCreate, "", "", "", false) + tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) @@ -35,11 +34,14 @@ func TestMoveTablesBuffering(t *testing.T) { catchup(t, targetTab2, workflowName, "MoveTables") vdiffSideBySide(t, ksWorkflow, "") waitForLowLag(t, "customer", workflowName) - tstWorkflowSwitchReads(t, "", "") - tstWorkflowSwitchWrites(t) + for i := 0; i < 10; i++ { + tstWorkflowSwitchReadsAndWrites(t) + time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) + tstWorkflowReverseReadsAndWrites(t) + time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) + } log.Infof("SwitchWrites done") lg.stop() log.Infof("TestMoveTablesBuffering: done") - log.Flush() } diff --git a/go/test/endtoend/vreplication/multi_tenant_test.go b/go/test/endtoend/vreplication/multi_tenant_test.go new file mode 100644 index 00000000000..eda245ee597 --- /dev/null +++ b/go/test/endtoend/vreplication/multi_tenant_test.go @@ -0,0 +1,613 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Consists of two tests. Both tests are for multi-tenant migration scenarios. + +1. TestMultiTenantSimple: migrates a single tenant to a target keyspace. + +2. TestMultiTenantComplex: migrates multiple tenants to a single target keyspace, with concurrent migrations. + +The tests use the MoveTables workflow to migrate the tenants. They are designed to simulate a real-world multi-tenant +migration scenario, where each tenant is in a separate database. +*/ + +package vreplication + +import ( + "encoding/json" + "fmt" + "math/rand/v2" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/proto/vtctldata" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" +) + +type tenantMigrationStatus int + +const ( + tenantMigrationStatusNotMigrated tenantMigrationStatus = iota + tenantMigrationStatusMigrating + tenantMigrationStatusMigrated + + sourceKeyspaceTemplate = "s%d" + targetKeyspaceName = "mt" + + numTenants = 10 + numInitialRowsPerTenant = 10 + numAdditionalRowsPerTenant = 10 + baseInitialTabletId = 1000 + tabletIdStep = 100 + maxRandomDelaySeconds = 5 + waitTimeout = 10 * time.Minute +) + +var ( + // channels to coordinate the migration workflow + chNotSetup, chNotCreated, chInProgress, chSwitched, chCompleted chan int64 + // counters to keep track of the number of tenants in each state + numSetup, numInProgress, numSwitched, numCompleted atomic.Int64 + + emptyKeyspaceRoutingRules = &vschemapb.KeyspaceRoutingRules{} +) + +// multiTenantMigration manages the migration of multiple tenants to a single target keyspace. +// A singleton object of this type is created for the test case. +type multiTenantMigration struct { + t *testing.T + mu sync.Mutex + tenantMigrationStatus map[int64]tenantMigrationStatus // current migration status for each tenant + activeMoveTables map[int64]*VtctldMoveTables // the internal MoveTables object for each tenant + + targetKeyspace string + tables string + tenantIdColumnName string // the name of the column in each table that holds the tenant ID + + lastIDs map[int64]int64 // the last primary key inserted for each tenant +} + +const ( + mtSchema = "create table t1(id int, tenant_id int, primary key(id, tenant_id)) Engine=InnoDB" + mtVSchema = ` +{ + "multi_tenant_spec": { + "tenant_id_column_name": "tenant_id", + "tenant_id_column_type": "INT64" + }, + "tables": { + "t1": {} + } +} +` + mtShardedVSchema = ` +{ + "sharded": true, + "multi_tenant_spec": { + "tenant_id_column_name": "tenant_id", + "tenant_id_column_type": "INT64" + }, + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "tenant_id", + "name": "reverse_bits" + } + ] + } + } +} +` + stSchema = mtSchema + stVSchema = ` +{ + "tables": { + "t1": {} + } +} +` +) + +// TestMultiTenantSimple tests a single tenant migration. The aim here is to test all the steps of the migration process +// including keyspace routing rules, addition of tenant filters to the forward and reverse vreplication streams, and +// verifying that the data is migrated correctly. +func TestMultiTenantSimple(t *testing.T) { + setSidecarDBName("_vt") + // Don't create RDONLY tablets to reduce number of tablets created to reduce resource requirements for the test. + origDefaultRdonly := defaultRdonly + defer func() { + defaultRdonly = origDefaultRdonly + }() + defaultRdonly = 0 + vc = setupMinimalCluster(t) + defer vc.TearDown() + + targetKeyspace := "mt" + _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, targetKeyspace, "0", mtVSchema, mtSchema, 1, 0, 200, nil) + require.NoError(t, err) + + tenantId := int64(1) + sourceKeyspace := getSourceKeyspace(tenantId) + _, err = vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, sourceKeyspace, "0", stVSchema, stSchema, 1, 0, getInitialTabletIdForTenant(tenantId), nil) + require.NoError(t, err) + + vtgateConn, closeConn := getVTGateConn() + defer closeConn() + numRows := 10 + lastIndex := int64(0) + insertRows := func(lastIndex int64, keyspace string) int64 { + for i := 1; i <= numRows; i++ { + execQueryWithRetry(t, vtgateConn, + fmt.Sprintf("insert into %s.t1(id, tenant_id) values(%d, %d)", keyspace, int64(i)+lastIndex, tenantId), queryTimeout) + } + return int64(numRows) + lastIndex + } + lastIndex = insertRows(lastIndex, sourceKeyspace) + + mt := newVtctldMoveTables(&moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: fmt.Sprintf("wf%d", tenantId), + targetKeyspace: targetKeyspace, + }, + sourceKeyspace: sourceKeyspace, + createFlags: []string{ + "--tenant-id", strconv.FormatInt(tenantId, 10), + }, + }) + + // Expected keyspace routing rules on creation of the workflow. + initialRules := &vschemapb.KeyspaceRoutingRules{ + Rules: []*vschemapb.KeyspaceRoutingRule{ + {FromKeyspace: "s1", ToKeyspace: "s1"}, + {FromKeyspace: "s1@rdonly", ToKeyspace: "s1"}, + {FromKeyspace: "s1@replica", ToKeyspace: "s1"}, + }, + } + + require.Zero(t, len(getKeyspaceRoutingRules(t, vc).Rules)) + + mt.Create() + confirmKeyspacesRoutedTo(t, sourceKeyspace, "s1", "t1", nil) + validateKeyspaceRoutingRules(t, vc, initialRules) + + lastIndex = insertRows(lastIndex, sourceKeyspace) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, mt.workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + + mt.SwitchReads() + confirmOnlyReadsSwitched(t) + + mt.SwitchWrites() + confirmBothReadsAndWritesSwitched(t) + + // Note: here we have already switched, and we can insert into the target keyspace, and it should get reverse + // replicated to the source keyspace. The source keyspace is routed to the target keyspace at this point. + lastIndex = insertRows(lastIndex, sourceKeyspace) + sourceTablet := vc.getPrimaryTablet(t, sourceKeyspace, "0") + require.NotNil(t, sourceTablet) + // Wait for the rows to be reverse replicated to the source keyspace. + waitForRowCountInTablet(t, sourceTablet, sourceKeyspace, "t1", int(lastIndex)) + + mt.Complete() + require.Zero(t, len(getKeyspaceRoutingRules(t, vc).Rules)) + // Targeting to target keyspace should start working now. Upto this point we had to target the source keyspace. + lastIndex = insertRows(lastIndex, targetKeyspace) + + actualRowsInserted := getRowCount(t, vtgateConn, fmt.Sprintf("%s.%s", targetKeyspace, "t1")) + log.Infof("Migration completed, total rows in target: %d", actualRowsInserted) + require.Equal(t, lastIndex, int64(actualRowsInserted)) + + t.Run("Test ApplyKeyspaceRoutingRules", func(t *testing.T) { + // First set of rules + applyKeyspaceRoutingRules(t, initialRules) + + updatedRules := &vschemapb.KeyspaceRoutingRules{ + Rules: []*vschemapb.KeyspaceRoutingRule{ + {FromKeyspace: "s1", ToKeyspace: "mt"}, + {FromKeyspace: "s1@rdonly", ToKeyspace: "mt"}, + {FromKeyspace: "s1@replica", ToKeyspace: "mt"}, + }, + } + // Update the rules + applyKeyspaceRoutingRules(t, updatedRules) + // Update with the same rules + applyKeyspaceRoutingRules(t, updatedRules) + // Remove the rules + applyKeyspaceRoutingRules(t, emptyKeyspaceRoutingRules) + // Test setting empty rules again + applyKeyspaceRoutingRules(t, emptyKeyspaceRoutingRules) + }) +} + +func applyKeyspaceRoutingRules(t *testing.T, newRules *vschemapb.KeyspaceRoutingRules) { + var rulesJSON []byte + var err error + require.NotNil(t, newRules) + rulesJSON, err = json.Marshal(newRules) + require.NoError(t, err) + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ApplyKeyspaceRoutingRules", "--rules", string(rulesJSON)) + require.NoError(t, err) + + response := &vtctldata.ApplyKeyspaceRoutingRulesResponse{} + err = json.Unmarshal([]byte(output), response) + require.NoError(t, err) + require.ElementsMatch(t, newRules.Rules, response.GetKeyspaceRoutingRules().Rules) +} + +func confirmOnlyReadsSwitched(t *testing.T) { + confirmKeyspacesRoutedTo(t, "s1", "mt", "t1", []string{"rdonly", "replica"}) + confirmKeyspacesRoutedTo(t, "s1", "s1", "t1", []string{"primary"}) + rules := &vschemapb.KeyspaceRoutingRules{ + Rules: []*vschemapb.KeyspaceRoutingRule{ + {FromKeyspace: "s1", ToKeyspace: "s1"}, + {FromKeyspace: "s1@rdonly", ToKeyspace: "mt"}, + {FromKeyspace: "s1@replica", ToKeyspace: "mt"}, + }, + } + validateKeyspaceRoutingRules(t, vc, rules) +} + +func confirmOnlyWritesSwitched(t *testing.T) { + confirmKeyspacesRoutedTo(t, "s1", "s1", "t1", []string{"rdonly", "replica"}) + confirmKeyspacesRoutedTo(t, "s1", "mt", "t1", []string{"primary"}) + rules := &vschemapb.KeyspaceRoutingRules{ + Rules: []*vschemapb.KeyspaceRoutingRule{ + {FromKeyspace: "s1", ToKeyspace: "mt"}, + {FromKeyspace: "s1@rdonly", ToKeyspace: "s1"}, + {FromKeyspace: "s1@replica", ToKeyspace: "s1"}, + }, + } + validateKeyspaceRoutingRules(t, vc, rules) +} + +// TestMultiTenantSharded tests a single tenant migration to a sharded target. The aim is to test +// the specification of the target shards in all the MoveTables subcommands, including creating only one stream +// for a tenant on the shard to which this tenant id will be routed, using the specified Vindex. +func TestMultiTenantSharded(t *testing.T) { + setSidecarDBName("_vt") + // Don't create RDONLY tablets to reduce number of tablets created to reduce resource requirements for the test. + origDefaultRdonly := defaultRdonly + defer func() { + defaultRdonly = origDefaultRdonly + }() + defaultRdonly = 0 + vc = setupMinimalCluster(t) + defer vc.TearDown() + + targetKeyspace := "mt" + _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, targetKeyspace, "-40,40-80,80-a0,a0-", mtShardedVSchema, mtSchema, 1, 0, 200, nil) + require.NoError(t, err) + + tenantId := int64(1) + tenantShard := "80-a0" // matches the vindex + sourceKeyspace := getSourceKeyspace(tenantId) + _, err = vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, sourceKeyspace, "0", stVSchema, stSchema, 1, 0, getInitialTabletIdForTenant(tenantId), nil) + require.NoError(t, err) + + vtgateConn, closeConn := getVTGateConn() + defer closeConn() + numRows := 10 + lastIndex := int64(0) + insertRows := func(lastIndex int64, keyspace string) int64 { + for i := 1; i <= numRows; i++ { + execQueryWithRetry(t, vtgateConn, + fmt.Sprintf("insert into %s.t1(id, tenant_id) values(%d, %d)", keyspace, int64(i)+lastIndex, tenantId), queryTimeout) + } + return int64(numRows) + lastIndex + } + lastIndex = insertRows(lastIndex, sourceKeyspace) + + mt := newVtctldMoveTables(&moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: fmt.Sprintf("wf%d", tenantId), + targetKeyspace: targetKeyspace, + }, + sourceKeyspace: sourceKeyspace, + createFlags: []string{ + "--tenant-id", strconv.FormatInt(tenantId, 10), + "--shards", tenantShard, // create the workflow for tenantid 1 in shard 80-a0: matches the vindex + }, + switchFlags: []string{ + "--shards", tenantShard, + }, + completeFlags: []string{ + "--shards", tenantShard, + }, + showFlags: []string{ + "--shards", tenantShard, + }, + }) + + mt.Create() + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, mt.workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + mt.Show() + var workflowState vtctldata.GetWorkflowsResponse + err = protojson.Unmarshal([]byte(mt.lastOutput), &workflowState) + require.NoError(t, err) + require.Equal(t, 1, len(workflowState.Workflows)) + wf := workflowState.Workflows[0] + // Verifies that only one stream is created for the tenant on the shard to which this tenant id will be routed. + require.Equal(t, 1, len(wf.ShardStreams)) + + // Note: we cannot insert into the target keyspace since that is never routed to the source keyspace. + lastIndex = insertRows(lastIndex, sourceKeyspace) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, mt.workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + mt.SwitchReadsAndWrites() + // Note: here we have already switched, and we can insert into the target keyspace, and it should get reverse + // replicated to the source keyspace. The source keyspace is routed to the target keyspace at this point. + lastIndex = insertRows(lastIndex, sourceKeyspace) + mt.Complete() + require.Zero(t, len(getKeyspaceRoutingRules(t, vc).Rules)) + actualRowsInserted := getRowCount(t, vtgateConn, fmt.Sprintf("%s.%s", targetKeyspace, "t1")) + require.Equal(t, lastIndex, int64(actualRowsInserted)) + require.Equal(t, lastIndex, int64(getRowCount(t, vtgateConn, fmt.Sprintf("%s.%s", targetKeyspace, "t1")))) + log.Infof("Migration completed, total rows in target: %d", actualRowsInserted) +} + +func confirmBothReadsAndWritesSwitched(t *testing.T) { + confirmKeyspacesRoutedTo(t, "s1", "mt", "t1", []string{"rdonly", "replica"}) + confirmKeyspacesRoutedTo(t, "s1", "mt", "t1", []string{"primary"}) + rules := &vschemapb.KeyspaceRoutingRules{ + Rules: []*vschemapb.KeyspaceRoutingRule{ + {FromKeyspace: "s1", ToKeyspace: "mt"}, + {FromKeyspace: "s1@rdonly", ToKeyspace: "mt"}, + {FromKeyspace: "s1@replica", ToKeyspace: "mt"}, + }, + } + validateKeyspaceRoutingRules(t, vc, rules) +} + +func validateKeyspaceRoutingRules(t *testing.T, vc *VitessCluster, expectedRules *vschemapb.KeyspaceRoutingRules) { + currentRules := getKeyspaceRoutingRules(t, vc) + require.ElementsMatch(t, expectedRules.Rules, currentRules.Rules) +} + +func getSourceKeyspace(tenantId int64) string { + return fmt.Sprintf(sourceKeyspaceTemplate, tenantId) +} + +func (mtm *multiTenantMigration) insertSomeData(t *testing.T, tenantId int64, keyspace string, numRows int64) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() + idx := mtm.getLastID(tenantId) + for i := idx + 1; i <= idx+numRows; i++ { + execQueryWithRetry(t, vtgateConn, + fmt.Sprintf("insert into %s.t1(id, tenant_id) values(%d, %d)", keyspace, i, tenantId), queryTimeout) + } + mtm.setLastID(tenantId, idx+numRows) +} + +func getKeyspaceRoutingRules(t *testing.T, vc *VitessCluster) *vschemapb.KeyspaceRoutingRules { + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetKeyspaceRoutingRules") + require.NoError(t, err) + rules := &vschemapb.KeyspaceRoutingRules{} + err = json.Unmarshal([]byte(output), rules) + require.NoError(t, err) + return rules +} + +// TestMultiTenant tests a multi-tenant migration scenario where each tenant is in a separate database. +// It uses MoveTables to migrate all tenants to the same target keyspace. The test creates a separate source keyspace +// for each tenant. It then steps through the migration process for each tenant, and verifies that the data is migrated +// correctly. The migration steps are done concurrently and randomly to simulate an actual multi-tenant migration. +func TestMultiTenantComplex(t *testing.T) { + setSidecarDBName("_vt") + // Don't create RDONLY tablets to reduce number of tablets created to reduce resource requirements for the test. + origDefaultRdonly := defaultRdonly + defer func() { + defaultRdonly = origDefaultRdonly + }() + defaultRdonly = 0 + vc = setupMinimalCluster(t) + defer vc.TearDown() + + mtm := newMultiTenantMigration(t) + numTenantsMigrated := 0 + mtm.run() // Start the migration process for all tenants. + timer := time.NewTimer(waitTimeout) + for numTenantsMigrated < numTenants { + select { + case tenantId := <-chCompleted: + mtm.setTenantMigrationStatus(tenantId, tenantMigrationStatusMigrated) + numTenantsMigrated++ + timer.Reset(waitTimeout) + case <-timer.C: + require.FailNow(t, "Timed out waiting for all tenants to complete") + } + } + vtgateConn, closeConn := getVTGateConn() + defer closeConn() + t.Run("Verify all rows have been migrated", func(t *testing.T) { + numAdditionalInsertSets := 2 /* during the SwitchTraffic stop */ + 1 /* after Complete */ + totalRowsInsertedPerTenant := numInitialRowsPerTenant + numAdditionalRowsPerTenant*numAdditionalInsertSets + totalRowsInserted := totalRowsInsertedPerTenant * numTenants + totalActualRowsInserted := getRowCount(t, vtgateConn, fmt.Sprintf("%s.%s", mtm.targetKeyspace, "t1")) + require.Equal(t, totalRowsInserted, totalActualRowsInserted) + log.Infof("Migration completed, total rows inserted in target: %d", totalActualRowsInserted) + }) +} + +func newMultiTenantMigration(t *testing.T) *multiTenantMigration { + _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, targetKeyspaceName, "0", mtVSchema, mtSchema, 1, 0, 200, nil) + require.NoError(t, err) + mtm := &multiTenantMigration{ + t: t, + tenantMigrationStatus: make(map[int64]tenantMigrationStatus), + activeMoveTables: make(map[int64]*VtctldMoveTables), + targetKeyspace: targetKeyspaceName, + tables: "t1", + tenantIdColumnName: "tenant_id", + lastIDs: make(map[int64]int64), + } + for i := 1; i <= numTenants; i++ { + mtm.setTenantMigrationStatus(int64(i), tenantMigrationStatusNotMigrated) + } + channelSize := numTenants + 1 // +1 to make sure the channels never block + for _, ch := range []*chan int64{&chNotSetup, &chNotCreated, &chInProgress, &chSwitched, &chCompleted} { + *ch = make(chan int64, channelSize) + } + return mtm +} + +func (mtm *multiTenantMigration) getTenantMigrationStatus(tenantId int64) tenantMigrationStatus { + mtm.mu.Lock() + defer mtm.mu.Unlock() + return mtm.tenantMigrationStatus[tenantId] +} + +func (mtm *multiTenantMigration) setTenantMigrationStatus(tenantId int64, status tenantMigrationStatus) { + mtm.mu.Lock() + defer mtm.mu.Unlock() + mtm.tenantMigrationStatus[tenantId] = status +} + +func (mtm *multiTenantMigration) getActiveMoveTables(tenantId int64) *VtctldMoveTables { + mtm.mu.Lock() + defer mtm.mu.Unlock() + return mtm.activeMoveTables[tenantId] +} + +func (mtm *multiTenantMigration) setActiveMoveTables(tenantId int64, mt *VtctldMoveTables) { + mtm.mu.Lock() + defer mtm.mu.Unlock() + mtm.activeMoveTables[tenantId] = mt +} + +func (mtm *multiTenantMigration) setLastID(tenantId, lastID int64) { + mtm.mu.Lock() + defer mtm.mu.Unlock() + mtm.lastIDs[tenantId] = lastID +} + +func (mtm *multiTenantMigration) getLastID(tenantId int64) int64 { + mtm.mu.Lock() + defer mtm.mu.Unlock() + return mtm.lastIDs[tenantId] +} + +func (mtm *multiTenantMigration) initTenantData(t *testing.T, tenantId int64) { + mtm.insertSomeData(t, tenantId, getSourceKeyspace(tenantId), numInitialRowsPerTenant) +} + +func getInitialTabletIdForTenant(tenantId int64) int { + return int(baseInitialTabletId + tenantId*tabletIdStep) +} + +func (mtm *multiTenantMigration) setup(tenantId int64) { + log.Infof("Creating MoveTables for tenant %d", tenantId) + mtm.setLastID(tenantId, 0) + sourceKeyspace := getSourceKeyspace(tenantId) + _, err := vc.AddKeyspace(mtm.t, []*Cell{vc.Cells["zone1"]}, sourceKeyspace, "0", stVSchema, stSchema, + 1, 0, getInitialTabletIdForTenant(tenantId), nil) + require.NoError(mtm.t, err) + mtm.initTenantData(mtm.t, tenantId) +} + +func (mtm *multiTenantMigration) start(tenantId int64) { + sourceKeyspace := getSourceKeyspace(tenantId) + mtm.setTenantMigrationStatus(tenantId, tenantMigrationStatusMigrating) + mt := newVtctldMoveTables(&moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: fmt.Sprintf("wf%d", tenantId), + targetKeyspace: mtm.targetKeyspace, + }, + sourceKeyspace: sourceKeyspace, + tables: mtm.tables, + createFlags: []string{ + "--tenant-id", strconv.FormatInt(tenantId, 10), + }, + }) + mtm.setActiveMoveTables(tenantId, mt) + mt.Create() +} + +func (mtm *multiTenantMigration) switchTraffic(tenantId int64) { + t := mtm.t + sourceKeyspaceName := getSourceKeyspace(tenantId) + mt := mtm.getActiveMoveTables(tenantId) + ksWorkflow := fmt.Sprintf("%s.%s", mtm.targetKeyspace, mt.workflowName) + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + mtm.insertSomeData(t, tenantId, sourceKeyspaceName, numAdditionalRowsPerTenant) + mt.SwitchReadsAndWrites() + mtm.insertSomeData(t, tenantId, sourceKeyspaceName, numAdditionalRowsPerTenant) +} + +func (mtm *multiTenantMigration) complete(tenantId int64) { + mt := mtm.getActiveMoveTables(tenantId) + mt.Complete() + mtm.insertSomeData(mtm.t, tenantId, mtm.targetKeyspace, numAdditionalRowsPerTenant) + vtgateConn := vc.GetVTGateConn(mtm.t) + defer vtgateConn.Close() + waitForQueryResult(mtm.t, vtgateConn, "", + fmt.Sprintf("select count(*) from %s.t1 where tenant_id=%d", mt.targetKeyspace, tenantId), + fmt.Sprintf("[[INT64(%d)]]", mtm.getLastID(tenantId))) +} + +func randomWait() { + time.Sleep(time.Duration(rand.IntN(maxRandomDelaySeconds)) * time.Second) +} + +func (mtm *multiTenantMigration) doThis(name string, chIn, chOut chan int64, counter *atomic.Int64, f func(int64)) { + timer := time.NewTimer(waitTimeout) + for counter.Load() < numTenants { + select { + case tenantId := <-chIn: + f(tenantId) + counter.Add(1) + chOut <- tenantId + timer.Reset(waitTimeout) + case <-timer.C: + require.FailNowf(mtm.t, "Timed out: %s", name) + } + randomWait() + } +} + +// run starts the migration process for all tenants. It starts concurrent +func (mtm *multiTenantMigration) run() { + go mtm.doThis("Setup tenant keyspace/schemas", chNotSetup, chNotCreated, &numSetup, mtm.setup) + for i := int64(1); i <= numTenants; i++ { + chNotSetup <- i + } + // Wait for all tenants to be created before starting the workflows: 10 seconds per tenant to account for CI overhead. + perTenantLoadTimeout := 1 * time.Minute + require.NoError(mtm.t, waitForCondition("All tenants created", + func() bool { + return numSetup.Load() == numTenants + }, perTenantLoadTimeout*numTenants)) + + go mtm.doThis("Start Migrations", chNotCreated, chInProgress, &numInProgress, mtm.start) + go mtm.doThis("Switch Traffic", chInProgress, chSwitched, &numSwitched, mtm.switchTraffic) + go mtm.doThis("Mark Migrations Complete", chSwitched, chCompleted, &numCompleted, mtm.complete) +} diff --git a/go/test/endtoend/vreplication/partial_movetables_seq_test.go b/go/test/endtoend/vreplication/partial_movetables_seq_test.go index 6a1ed92cb9c..eec304e0a4d 100644 --- a/go/test/endtoend/vreplication/partial_movetables_seq_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_seq_test.go @@ -20,7 +20,6 @@ import ( "fmt" "strings" "testing" - "time" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" @@ -28,7 +27,6 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/wrangler" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -75,7 +73,7 @@ type vrepTestCase struct { vtgate *cluster.VtgateProcess } -func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCase { +func initPartialMoveTablesComplexTestCase(t *testing.T) *vrepTestCase { const ( seqVSchema = `{ "sharded": false, @@ -122,7 +120,7 @@ func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCa ) tc := &vrepTestCase{ t: t, - testName: name, + testName: t.Name(), keyspaces: make(map[string]*keyspace), defaultCellName: "zone1", workflows: make(map[string]*workflow), @@ -169,18 +167,15 @@ func initPartialMoveTablesComplexTestCase(t *testing.T, name string) *vrepTestCa func (tc *vrepTestCase) teardown() { tc.vtgateConn.Close() - vc.TearDown(tc.t) + vc.TearDown() } func (tc *vrepTestCase) setupCluster() { - cells := []string{"zone1"} - - tc.vc = NewVitessCluster(tc.t, tc.testName, cells, mainClusterConfig) + tc.vc = NewVitessCluster(tc.t, nil) vc = tc.vc // for backward compatibility since vc is used globally in this package require.NotNil(tc.t, tc.vc) tc.setupKeyspaces([]string{"commerce", "seqSrc"}) tc.vtgateConn = getConnection(tc.t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) - vtgateConn = tc.vtgateConn // for backward compatibility since vtgateConn is used globally in this package } func (tc *vrepTestCase) initData() { @@ -211,10 +206,6 @@ func (tc *vrepTestCase) setupKeyspace(ks *keyspace) { tc.vtgate = defaultCell.Vtgates[0] } - for _, shard := range ks.shards { - require.NoError(t, cluster.WaitForHealthyShard(tc.vc.VtctldClient, ks.name, shard)) - require.NoError(t, tc.vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", ks.name, shard), 1, 30*time.Second)) - } } func (tc *vrepTestCase) newWorkflow(typ, workflowName, fromKeyspace, toKeyspace string, options *workflowOptions) *workflow { @@ -236,19 +227,19 @@ func (wf *workflow) create() { cell := wf.tc.defaultCellName switch typ { case "movetables": - currentWorkflowType = wrangler.MoveTablesWorkflow + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables sourceShards := strings.Join(wf.options.sourceShards, ",") err = tstWorkflowExec(t, cell, wf.name, wf.fromKeyspace, wf.toKeyspace, - strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, "", false) + strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, "", defaultWorkflowExecOptions) case "reshard": - currentWorkflowType = wrangler.ReshardWorkflow + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_Reshard sourceShards := strings.Join(wf.options.sourceShards, ",") targetShards := strings.Join(wf.options.targetShards, ",") if targetShards == "" { targetShards = sourceShards } err = tstWorkflowExec(t, cell, wf.name, wf.fromKeyspace, wf.toKeyspace, - strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, targetShards, false) + strings.Join(wf.options.tables, ","), workflowActionCreate, "", sourceShards, targetShards, defaultWorkflowExecOptions) default: panic(fmt.Sprintf("unknown workflow type: %s", wf.typ)) } @@ -266,15 +257,15 @@ func (wf *workflow) create() { } func (wf *workflow) switchTraffic() { - require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionSwitchTraffic, "", "", "", false)) + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions)) } func (wf *workflow) reverseTraffic() { - require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionReverseTraffic, "", "", "", false)) + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionReverseTraffic, "", "", "", defaultWorkflowExecOptions)) } func (wf *workflow) complete() { - require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionComplete, "", "", "", false)) + require.NoError(wf.tc.t, tstWorkflowExec(wf.tc.t, wf.tc.defaultCellName, wf.name, wf.fromKeyspace, wf.toKeyspace, "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions)) } // TestPartialMoveTablesWithSequences enhances TestPartialMoveTables by adding an unsharded keyspace which has a @@ -291,7 +282,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { extraVTGateArgs = origExtraVTGateArgs }() - tc := initPartialMoveTablesComplexTestCase(t, "TestPartialMoveTablesComplex") + tc := initPartialMoveTablesComplexTestCase(t) defer tc.teardown() var err error @@ -336,6 +327,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { shard := "80-" var wf80Dash, wfDash80 *workflow currentCustomerCount = getCustomerCount(t, "before customer2.80-") + vtgateConn, closeConn := getVTGateConn() t.Run("Start MoveTables on customer2.80-", func(t *testing.T) { // Now setup the customer2 keyspace so we can do a partial move tables for one of the two shards: 80-. defaultRdonly = 0 @@ -353,16 +345,17 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { }) currentCustomerCount = getCustomerCount(t, "after customer2.80-/2") - log.Flush() // This query uses an ID that should always get routed to shard 80- - shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" + shard80DashRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" // This query uses an ID that should always get routed to shard -80 - shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" + shardDash80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" // Reset any existing vtgate connection state. - vtgateConn.Close() - vtgateConn = getConnection(t, tc.vc.ClusterConfig.hostname, tc.vc.ClusterConfig.vtgateMySQLPort) + closeConn() + + vtgateConn, closeConn = getVTGateConn() + defer closeConn() t.Run("Confirm routing rules", func(t *testing.T) { // Global routing rules should be in place with everything going to the source keyspace (customer). @@ -378,14 +371,14 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { log.Infof("Testing reverse route (target->source) for shard being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") log.Infof("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") @@ -395,7 +388,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { // Switch all traffic for the shard wf80Dash.switchTraffic() - expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n", + expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\n\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n", targetKs, wfName, shard, shard) require.Equal(t, expectedSwitchOutput, lastOutput) currentCustomerCount = getCustomerCount(t, "") @@ -419,22 +412,22 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { t.Run("Validate shard and tablet type routing", func(t *testing.T) { // No shard targeting - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") // Shard targeting _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") @@ -455,7 +448,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { wfDash80.create() wfDash80.switchTraffic() - expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n", + expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\n\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n", targetKs, wfName) require.Equal(t, expectedSwitchOutput, lastOutput) @@ -505,7 +498,7 @@ func TestPartialMoveTablesWithSequences(t *testing.T) { // We switched traffic, so it's the reverse workflow we want to cancel. reverseWf := wf + "_reverse" reverseKs := sourceKs // customer - err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", false) + err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) output, err := tc.vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") @@ -537,6 +530,8 @@ var newCustomerCount = int64(201) var lastCustomerId int64 func getCustomerCount(t *testing.T, msg string) int64 { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "", "select count(*) from customer") require.NotNil(t, qr) count, err := qr.Rows[0][0].ToInt64() @@ -545,6 +540,8 @@ func getCustomerCount(t *testing.T, msg string) int64 { } func confirmLastCustomerIdHasIncreased(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() qr := execVtgateQuery(t, vtgateConn, "", "select cid from customer order by cid desc limit 1") require.NotNil(t, qr) currentCustomerId, err := qr.Rows[0][0].ToInt64() @@ -554,6 +551,8 @@ func confirmLastCustomerIdHasIncreased(t *testing.T) { } func insertCustomers(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for i := int64(1); i < newCustomerCount+1; i++ { execVtgateQuery(t, vtgateConn, "customer@primary", fmt.Sprintf("insert into customer(name) values ('name-%d')", currentCustomerCount+i)) } diff --git a/go/test/endtoend/vreplication/partial_movetables_test.go b/go/test/endtoend/vreplication/partial_movetables_test.go index 5583232fbdc..4236bff95a3 100644 --- a/go/test/endtoend/vreplication/partial_movetables_test.go +++ b/go/test/endtoend/vreplication/partial_movetables_test.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" "testing" + "time" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -27,7 +28,6 @@ import ( "github.com/tidwall/gjson" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/wrangler" ) // testCancel() starts and cancels a partial MoveTables for one of the shards which will be actually moved later on. @@ -44,13 +44,16 @@ func testCancel(t *testing.T) { table := "customer2" shard := "80-" // start the partial movetables for 80- - mt := newMoveTables(vc, &moveTables{ - workflowName: workflowName, - targetKeyspace: targetKeyspace, + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, sourceKeyspace: sourceKeyspace, tables: table, sourceShards: shard, - }, moveTablesFlavorRandom) + }, workflowFlavorRandom) mt.Create() checkDenyList := func(keyspace string, expected bool) { @@ -65,10 +68,12 @@ func testCancel(t *testing.T) { mt.SwitchReadsAndWrites() checkDenyList(targetKeyspace, false) checkDenyList(sourceKeyspace, true) + time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.ReverseReadsAndWrites() checkDenyList(targetKeyspace, true) checkDenyList(sourceKeyspace, false) + time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) mt.Cancel() checkDenyList(targetKeyspace, false) @@ -76,14 +81,13 @@ func testCancel(t *testing.T) { } -// TestPartialMoveTablesBasic tests partial move tables by moving each -// customer shard -- -80,80- -- once a a time to customer2. -func TestPartialMoveTablesBasic(t *testing.T) { +func testPartialMoveTablesBasic(t *testing.T, flavor workflowFlavor) { + setSidecarDBName("_vt") origDefaultRdonly := defaultRdonly defer func() { defaultRdonly = origDefaultRdonly }() - defaultRdonly = 1 + defaultRdonly = 0 origExtraVTGateArgs := extraVTGateArgs // We need to enable shard routing for partial movetables routing. // And we need to disable schema change tracking in vtgate as we want @@ -100,15 +104,30 @@ func TestPartialMoveTablesBasic(t *testing.T) { extraVTGateArgs = origExtraVTGateArgs }() vc = setupMinimalCluster(t) - defer vtgateConn.Close() - defer vc.TearDown(t) - setupMinimalCustomerKeyspace(t) + defer vc.TearDown() + sourceKeyspace := "product" + targetKeyspace := "customer" + workflowName := "wf1" + targetTabs := setupMinimalCustomerKeyspace(t) + targetTab80Dash := targetTabs["80-"] + targetTabDash80 := targetTabs["-80"] + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, + sourceKeyspace: sourceKeyspace, + tables: "customer,loadtest,customer2", + }, flavor) + mt.Create() - // Move customer table from unsharded product keyspace to - // sharded customer keyspace. - createMoveTablesWorkflow(t, "customer,loadtest,customer2") - tstWorkflowSwitchReadsAndWrites(t) - tstWorkflowComplete(t) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + catchup(t, targetTab80Dash, workflowName, "MoveTables") + vdiff(t, targetKeyspace, workflowName, defaultCellName, false, true, nil) + mt.SwitchReadsAndWrites() + time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) + mt.Complete() emptyGlobalRoutingRules := "{}\n" @@ -129,20 +148,35 @@ func TestPartialMoveTablesBasic(t *testing.T) { // move tables for one of the two shards: 80-. defaultRdonly = 0 setupCustomer2Keyspace(t) - testCancel(t) - currentWorkflowType = wrangler.MoveTablesWorkflow - wfName := "partial80Dash" - sourceKs := "customer" - targetKs := "customer2" + // We specify the --shards flag for one of the workflows to confirm that both the MoveTables and Workflow commands + // work the same with or without the flag. + workflowExecOptsPartialDash80 := &workflowExecOptions{ + deferSecondaryKeys: true, + shardSubset: "-80", + } + workflowExecOptsPartial80Dash := &workflowExecOptions{ + deferSecondaryKeys: true, + } + var err error + workflowName = "partial80Dash" + sourceKeyspace = "customer" + targetKeyspace = "customer2" shard := "80-" - ksWf := fmt.Sprintf("%s.%s", targetKs, wfName) + tables := "customer,loadtest" + mt80Dash := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, + sourceKeyspace: sourceKeyspace, + tables: tables, + sourceShards: shard, + }, flavor) + mt80Dash.Create() - // start the partial movetables for 80- - err := tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs, - "customer,loadtest", workflowActionCreate, "", shard, "", false) - require.NoError(t, err) var lg *loadGenerator if runWithLoad { // start load after routing rules are set, otherwise we end up with ambiguous tables lg = newLoadGenerator(t, vc) @@ -151,11 +185,12 @@ func TestPartialMoveTablesBasic(t *testing.T) { }() lg.waitForCount(1000) } + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + catchup(t, targetTab80Dash, workflowName, "MoveTables") + vdiff(t, targetKeyspace, workflowName, defaultCellName, false, true, nil) - targetTab1 = vc.getPrimaryTablet(t, targetKs, shard) - catchup(t, targetTab1, wfName, "Partial MoveTables Customer to Customer2") - vdiffSideBySide(t, ksWf, "") - + vtgateConn, closeConn := getVTGateConn() + defer closeConn() waitForRowCount(t, vtgateConn, "customer", "customer", 3) // customer: all shards waitForRowCount(t, vtgateConn, "customer2", "customer", 3) // customer2: all shards waitForRowCount(t, vtgateConn, "customer2:80-", "customer", 2) // customer2: 80- @@ -179,9 +214,9 @@ func TestPartialMoveTablesBasic(t *testing.T) { } // This query uses an ID that should always get routed to shard 80- - shard80MinusRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" + shard80DashRoutedQuery := "select name from customer where cid = 1 and noexistcol = 'foo'" // This query uses an ID that should always get routed to shard -80 - shardMinus80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" + shardDash80RoutedQuery := "select name from customer where cid = 2 and noexistcol = 'foo'" // reset any existing vtgate connection state vtgateConn.Close() @@ -202,22 +237,20 @@ func TestPartialMoveTablesBasic(t *testing.T) { log.Infof("Testing reverse route (target->source) for shard being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.80-.primary", "Query was routed to the target before any SwitchTraffic") log.Infof("Testing reverse route (target->source) for shard NOT being switched") _, err = vtgateConn.ExecuteFetch("use `customer2:-80`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before any SwitchTraffic") // Switch all traffic for the shard - require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", false)) - expectedSwitchOutput := fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads Not Switched. Writes Not Switched\nCurrent State: Reads partially switched, for shards: %s. Writes partially switched, for shards: %s\n\n", - targetKs, wfName, shard, shard) - require.Equal(t, expectedSwitchOutput, lastOutput) + mt80Dash.SwitchReadsAndWrites() + time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) // Confirm global routing rules -- everything should still be routed // to the source side, customer, globally. @@ -233,69 +266,77 @@ func TestPartialMoveTablesBasic(t *testing.T) { defer vtgateConn.Close() // No shard targeting - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.primary", "Query was routed to the target before partial SwitchTraffic") // Shard targeting _, err = vtgateConn.ExecuteFetch("use `customer2:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer:80-`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.primary", "Query was routed to the source after partial SwitchTraffic") // Tablet type targeting _, err = vtgateConn.ExecuteFetch("use `customer2@replica`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") _, err = vtgateConn.ExecuteFetch("use `customer@replica`", 0, false) require.NoError(t, err) - _, err = vtgateConn.ExecuteFetch(shard80MinusRoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shard80DashRoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer2.80-.replica", "Query was routed to the source after partial SwitchTraffic") - _, err = vtgateConn.ExecuteFetch(shardMinus80RoutedQuery, 0, false) + _, err = vtgateConn.ExecuteFetch(shardDash80RoutedQuery, 0, false) require.Error(t, err) require.Contains(t, err.Error(), "target: customer.-80.replica", "Query was routed to the target before partial SwitchTraffic") + workflowExec := tstWorkflowExec + if flavor == workflowFlavorVtctl { + workflowExec = tstWorkflowExecVtctl + } + // We cannot Complete a partial move tables at the moment because // it will find that all traffic has (obviously) not been switched. - err = tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionComplete, "", "", "", false) + err = workflowExec(t, "", workflowName, "", targetKs, "", workflowActionComplete, "", "", "", workflowExecOptsPartial80Dash) require.Error(t, err) // Confirm global routing rules: -80 should still be be routed to customer // while 80- should be routed to customer2. require.Equal(t, halfCutoverShardRoutingRules, getShardRoutingRules(t)) - // Now move the other shard: -80 - wfName = "partialDash80" shard = "-80" - ksWf = fmt.Sprintf("%s.%s", targetKs, wfName) - // Start the partial movetables for -80, 80- has already been switched - err = tstWorkflowExec(t, defaultCellName, wfName, sourceKs, targetKs, - "customer,loadtest", workflowActionCreate, "", shard, "", false) - require.NoError(t, err) - targetTab2 := vc.getPrimaryTablet(t, targetKs, shard) - catchup(t, targetTab2, wfName, "Partial MoveTables Customer to Customer2: -80") - vdiffSideBySide(t, ksWf, "") + workflowName = "partialDash80" + mtDash80 := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, + sourceKeyspace: sourceKeyspace, + tables: tables, + sourceShards: shard, + }, flavor) + mtDash80.Create() - // Switch all traffic for the shard - require.NoError(t, tstWorkflowExec(t, "", wfName, "", targetKs, "", workflowActionSwitchTraffic, "", "", "", false)) - expectedSwitchOutput = fmt.Sprintf("SwitchTraffic was successful for workflow %s.%s\nStart State: Reads partially switched, for shards: 80-. Writes partially switched, for shards: 80-\nCurrent State: All Reads Switched. All Writes Switched\n\n", - targetKs, wfName) - require.Equal(t, expectedSwitchOutput, lastOutput) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + + catchup(t, targetTabDash80, workflowName, "MoveTables") + vdiff(t, targetKeyspace, workflowName, defaultCellName, false, true, nil) + mtDash80.SwitchReadsAndWrites() + time.Sleep(loadTestBufferingWindowDuration + 1*time.Second) // Confirm global routing rules: everything should still be routed // to the source side, customer, globally. @@ -304,27 +345,33 @@ func TestPartialMoveTablesBasic(t *testing.T) { // Confirm shard routing rules: all shards should be routed to the // target side (customer2). require.Equal(t, postCutoverShardRoutingRules, getShardRoutingRules(t)) - lg.stop() // Cancel both reverse workflows (as we've done the cutover), which should // clean up both the global routing rules and the shard routing rules. for _, wf := range []string{"partialDash80", "partial80Dash"} { // We switched traffic, so it's the reverse workflow we want to cancel. + var opts *workflowExecOptions + switch wf { + case "partialDash80": + opts = workflowExecOptsPartialDash80 + case "partial80Dash": + opts = workflowExecOptsPartial80Dash + } reverseWf := wf + "_reverse" - reverseKs := sourceKs // customer - err = tstWorkflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", false) + reverseKs := sourceKeyspace + err = workflowExec(t, "", reverseWf, "", reverseKs, "", workflowActionCancel, "", "", "", opts) require.NoError(t, err) - output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") + output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "--", "--shards", opts.shardSubset, fmt.Sprintf("%s.%s", reverseKs, reverseWf), "show") require.Error(t, err) require.Contains(t, output, "no streams found") // Delete the original workflow originalKsWf := fmt.Sprintf("%s.%s", targetKs, wf) - _, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "delete") + _, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "--", "--shards", opts.shardSubset, originalKsWf, "delete") require.NoError(t, err) - output, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", originalKsWf, "show") + output, err = vc.VtctlClient.ExecuteCommandWithOutput("Workflow", "--", "--shards", opts.shardSubset, originalKsWf, "show") require.Error(t, err) require.Contains(t, output, "no streams found") } @@ -336,5 +383,16 @@ func TestPartialMoveTablesBasic(t *testing.T) { // Confirm that the shard routing rules are now gone. require.Equal(t, emptyShardRoutingRules, getShardRoutingRules(t)) +} +// TestPartialMoveTablesBasic tests partial move tables by moving each +// customer shard -- -80,80- -- once a a time to customer2. +// We test with both the vtctlclient and vtctldclient flavors. +func TestPartialMoveTablesBasic(t *testing.T) { + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables + for _, flavor := range workflowFlavors { + t.Run(workflowFlavorNames[flavor], func(t *testing.T) { + testPartialMoveTablesBasic(t, flavor) + }) + } } diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go index 9e0ae797e72..43157c8923d 100644 --- a/go/test/endtoend/vreplication/performance_test.go +++ b/go/test/endtoend/vreplication/performance_test.go @@ -23,8 +23,6 @@ import ( "time" "vitess.io/vitess/go/test/endtoend/cluster" - - "github.com/stretchr/testify/require" ) func TestReplicationStress(t *testing.T) { @@ -45,28 +43,16 @@ create table largebin(pid int, maindata varbinary(4096), primary key(pid)); create table customer(cid int, name varbinary(128), meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid)) CHARSET=utf8mb4; ` - const defaultCellName = "zone1" - const sourceKs = "stress_src" const targetKs = "stress_tgt" - allCells := []string{defaultCellName} - allCellNames = defaultCellName - - vc = NewVitessCluster(t, "TestReplicationStress", allCells, mainClusterConfig) - require.NotNil(t, vc) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() - defer vc.TearDown(t) - - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialStressVSchema, initialStressSchema, 0, 0, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) diff --git a/go/test/endtoend/vreplication/reference_test.go b/go/test/endtoend/vreplication/reference_test.go new file mode 100644 index 00000000000..8ff77de8708 --- /dev/null +++ b/go/test/endtoend/vreplication/reference_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + uksSchema = ` +create table product (id int, mfg_id int, cat_id int, name varchar(128), primary key(id)); +create table cat (id int, name varchar(128), primary key(id)); +create table mfg (id int, name varchar(128), primary key(id)); +` + sksSchema = ` +create table product (id int, mfg_id int, cat_id int, name varchar(128), primary key(id)); +create table cat (id int, name varchar(128), primary key(id)); +create table mfg2 (id int, name varchar(128), primary key(id)); +` + uksVSchema = ` +{ + "sharded": false, + "tables": { + "product": {}, + "cat": {}, + "mfg": {} + } +}` + + sksVSchema = ` +{ + "sharded": true, + "tables": { + "product": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] + }, + "cat": { + "type": "reference", + "source": "uks.cat" + }, + "mfg2": { + "type": "reference", + "source": "uks.mfg" + } + }, + "vindexes": { + "hash": { + "type": "hash" + } + } +}` + materializeCatSpec = ` +{ + "workflow": "wfCat", + "source_keyspace": "uks", + "target_keyspace": "sks", + "table_settings": [ {"target_table": "cat", "source_expression": "select id, name from cat" }] +}` + materializeMfgSpec = ` +{ + "workflow": "wfMfg", + "source_keyspace": "uks", + "target_keyspace": "sks", + "table_settings": [ {"target_table": "mfg2", "source_expression": "select id, name from mfg" }] +}` + initializeTables = ` +use uks; +insert into product values (1, 1, 1, 'p1'); +insert into product values (2, 2, 2, 'p2'); +insert into product values (3, 3, 3, 'p3'); +insert into cat values (1, 'c1'); +insert into cat values (2, 'c2'); +insert into cat values (3, 'c3'); +insert into mfg values (1, 'm1'); +insert into mfg values (2, 'm2'); +insert into mfg values (3, 'm3'); +insert into mfg values (4, 'm4'); +` +) + +func TestReferenceTableMaterializationAndRouting(t *testing.T) { + var err error + defaultCellName := "zone1" + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets + defer func() { defaultReplicas = 1 }() + uks := "uks" + sks := "sks" + + defaultCell := vc.Cells[defaultCellName] + vc.AddKeyspace(t, []*Cell{defaultCell}, uks, "0", uksVSchema, uksSchema, defaultReplicas, defaultRdonly, 100, nil) + vc.AddKeyspace(t, []*Cell{defaultCell}, sks, "-80,80-", sksVSchema, sksSchema, defaultReplicas, defaultRdonly, 200, nil) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + + verifyClusterHealth(t, vc) + _, _, err = vtgateConn.ExecuteFetchMulti(initializeTables, 0, false) + require.NoError(t, err) + vtgateConn.Close() + + materialize(t, materializeCatSpec, false) + materialize(t, materializeMfgSpec, false) + + tabDash80 := vc.getPrimaryTablet(t, sks, "-80") + tab80Dash := vc.getPrimaryTablet(t, sks, "80-") + catchup(t, tabDash80, "wfCat", "Materialize Category") + catchup(t, tab80Dash, "wfCat", "Materialize Category") + catchup(t, tabDash80, "wfMfg", "Materialize Manufacturer") + catchup(t, tab80Dash, "wfMfg", "Materialize Manufacturer") + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + waitForRowCount(t, vtgateConn, sks, "cat", 3) + waitForRowCount(t, vtgateConn, sks, "mfg2", 4) + + execRefQuery(t, "insert into mfg values (5, 'm5')") + execRefQuery(t, "insert into mfg2 values (6, 'm6')") + execRefQuery(t, "insert into uks.mfg values (7, 'm7')") + execRefQuery(t, "insert into sks.mfg2 values (8, 'm8')") + waitForRowCount(t, vtgateConn, uks, "mfg", 8) + + execRefQuery(t, "update mfg set name = concat(name, '-updated') where id = 1") + execRefQuery(t, "update mfg2 set name = concat(name, '-updated') where id = 2") + execRefQuery(t, "update uks.mfg set name = concat(name, '-updated') where id = 3") + execRefQuery(t, "update sks.mfg2 set name = concat(name, '-updated') where id = 4") + + waitForRowCount(t, vtgateConn, uks, "mfg", 8) + qr := execVtgateQuery(t, vtgateConn, "uks", "select count(*) from uks.mfg where name like '%updated%'") + require.NotNil(t, qr) + require.Equal(t, "4", qr.Rows[0][0].ToString()) + + execRefQuery(t, "delete from mfg where id = 5") + execRefQuery(t, "delete from mfg2 where id = 6") + execRefQuery(t, "delete from uks.mfg where id = 7") + execRefQuery(t, "delete from sks.mfg2 where id = 8") + waitForRowCount(t, vtgateConn, uks, "mfg", 4) + +} + +func execRefQuery(t *testing.T, query string) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + _, err := vtgateConn.ExecuteFetch(query, 0, false) + require.NoError(t, err) +} diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index 338310fdf14..82c859acb40 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -17,21 +17,28 @@ limitations under the License. package vreplication import ( + "context" + "encoding/json" "fmt" + "math/rand/v2" "net" "strconv" "strings" + "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/throttler" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/wrangler" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) const ( @@ -40,9 +47,7 @@ const ( targetKs = "customer" ksWorkflow = targetKs + "." + workflowName reverseKsWorkflow = sourceKs + "." + workflowName + "_reverse" - tablesToMove = "customer" defaultCellName = "zone1" - readQuery = "select cid from customer" ) const ( @@ -58,12 +63,22 @@ var ( sourceTab, sourceReplicaTab, sourceRdonlyTab *cluster.VttabletProcess lastOutput string - currentWorkflowType wrangler.VReplicationWorkflowType + currentWorkflowType binlogdatapb.VReplicationWorkflowType ) +type workflowExecOptions struct { + deferSecondaryKeys bool + atomicCopy bool + shardSubset string +} + +var defaultWorkflowExecOptions = &workflowExecOptions{ + deferSecondaryKeys: true, +} + func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) error { err := tstWorkflowExec(t, defaultCellName, workflowName, targetKs, targetKs, - "", workflowActionCreate, "", sourceShards, targetShards, false) + "", workflowActionCreate, "", sourceShards, targetShards, defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, "") @@ -75,10 +90,10 @@ func createReshardWorkflow(t *testing.T, sourceShards, targetShards string) erro func createMoveTablesWorkflow(t *testing.T, tables string) { if tables == "" { - tables = tablesToMove + tables = "customer" } err := tstWorkflowExec(t, defaultCellName, workflowName, sourceKs, targetKs, - tables, workflowActionCreate, "", "", "", false) + tables, workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) confirmTablesHaveSecondaryKeys(t, []*cluster.VttabletProcess{targetTab1}, targetKs, tables) @@ -88,12 +103,89 @@ func createMoveTablesWorkflow(t *testing.T, tables string) { } func tstWorkflowAction(t *testing.T, action, tabletTypes, cells string) error { - return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, tablesToMove, action, tabletTypes, "", "", false) + return tstWorkflowExec(t, cells, workflowName, sourceKs, targetKs, "customer", action, tabletTypes, "", "", defaultWorkflowExecOptions) } -func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, sourceShards, targetShards string, atomicCopy bool) error { +// tstWorkflowExec executes a MoveTables or Reshard workflow command using +// vtctldclient. If you need to use the legacy vtctlclient, use +// tstWorkflowExecVtctl instead. +func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, + sourceShards, targetShards string, options *workflowExecOptions) error { + var args []string - if currentWorkflowType == wrangler.MoveTablesWorkflow { + if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables { + args = append(args, "MoveTables") + } else { + args = append(args, "Reshard") + } + + args = append(args, "--workflow", workflow, "--target-keyspace", targetKs, action) + + switch action { + case workflowActionCreate: + if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables { + args = append(args, "--source-keyspace", sourceKs) + if tables != "" { + args = append(args, "--tables", tables) + } else { + args = append(args, "--all-tables") + } + if sourceShards != "" { + args = append(args, "--source-shards", sourceShards) + } + } else { + args = append(args, "--source-shards", sourceShards, "--target-shards", targetShards) + } + // Test new experimental --defer-secondary-keys flag + switch currentWorkflowType { + case binlogdatapb.VReplicationWorkflowType_MoveTables, binlogdatapb.VReplicationWorkflowType_Migrate, binlogdatapb.VReplicationWorkflowType_Reshard: + if !options.atomicCopy && options.deferSecondaryKeys { + args = append(args, "--defer-secondary-keys") + } + } + default: + if options.shardSubset != "" { + args = append(args, "--shards", options.shardSubset) + } + } + if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables && action == workflowActionSwitchTraffic { + args = append(args, "--initialize-target-sequences") + } + if action == workflowActionSwitchTraffic || action == workflowActionReverseTraffic { + if BypassLagCheck { + args = append(args, "--max-replication-lag-allowed=2542087h") + } + args = append(args, "--timeout=90s") + } + if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables && action == workflowActionCreate && options.atomicCopy { + args = append(args, "--atomic-copy") + } + if (action == workflowActionCreate || action == workflowActionSwitchTraffic || action == workflowActionReverseTraffic) && cells != "" { + args = append(args, "--cells", cells) + } + if action != workflowActionComplete && tabletTypes != "" { + args = append(args, "--tablet-types", tabletTypes) + } + args = append(args, "--action_timeout=10m") // At this point something is up so fail the test + if debugMode { + t.Logf("Executing workflow command: vtctldclient %v", strings.Join(args, " ")) + } + output, err := vc.VtctldClient.ExecuteCommandWithOutput(args...) + lastOutput = output + if err != nil { + return fmt.Errorf("%s: %s", err, output) + } + return nil +} + +// tstWorkflowExecVtctl executes a MoveTables or Reshard workflow command using +// vtctlclient. It should operate exactly the same way as tstWorkflowExec, but +// using the legacy client. +func tstWorkflowExecVtctl(t *testing.T, cells, workflow, sourceKs, targetKs, tables, action, tabletTypes, + sourceShards, targetShards string, options *workflowExecOptions) error { + + var args []string + if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables { args = append(args, "MoveTables") } else { args = append(args, "Reshard") @@ -104,12 +196,12 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, if BypassLagCheck { args = append(args, "--max_replication_lag_allowed=2542087h") } - if atomicCopy { + if options.atomicCopy { args = append(args, "--atomic-copy") } switch action { case workflowActionCreate: - if currentWorkflowType == wrangler.MoveTablesWorkflow { + if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_MoveTables { args = append(args, "--source", sourceKs) if tables != "" { args = append(args, "--tables", tables) @@ -124,12 +216,16 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, } // Test new experimental --defer-secondary-keys flag switch currentWorkflowType { - case wrangler.MoveTablesWorkflow, wrangler.MigrateWorkflow, wrangler.ReshardWorkflow: - if !atomicCopy { + case binlogdatapb.VReplicationWorkflowType_MoveTables, binlogdatapb.VReplicationWorkflowType_Migrate, binlogdatapb.VReplicationWorkflowType_Reshard: + if !options.atomicCopy && options.deferSecondaryKeys { args = append(args, "--defer-secondary-keys") } args = append(args, "--initialize-target-sequences") // Only used for MoveTables } + default: + if options.shardSubset != "" { + args = append(args, "--shards", options.shardSubset) + } } if cells != "" { args = append(args, "--cells", cells) @@ -192,19 +288,42 @@ func tstWorkflowComplete(t *testing.T) error { // to primary,replica,rdonly (the only applicable types in these tests). func testWorkflowUpdate(t *testing.T) { tabletTypes := "primary,replica,rdonly" - // Test vtctlclient first + // Test vtctlclient first. _, err := vc.VtctlClient.ExecuteCommandWithOutput("workflow", "--", "--tablet-types", tabletTypes, "noexist.noexist", "update") require.Error(t, err, err) resp, err := vc.VtctlClient.ExecuteCommandWithOutput("workflow", "--", "--tablet-types", tabletTypes, ksWorkflow, "update") require.NoError(t, err) require.NotEmpty(t, resp) - // Test vtctldclient last + // Test vtctldclient last. _, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", "noexist", "update", "--workflow", "noexist", "--tablet-types", tabletTypes) require.Error(t, err) + // Change the tablet-types to rdonly. + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", "rdonly") + require.NoError(t, err, err) + // Confirm that we changed the workflow. + var ures vtctldatapb.WorkflowUpdateResponse + require.NoError(t, err) + err = protojson.Unmarshal([]byte(resp), &ures) + require.NoError(t, err) + require.Greater(t, len(ures.Details), 0) + require.True(t, ures.Details[0].Changed) + // Change tablet-types back to primary,replica,rdonly. resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) require.NoError(t, err, err) - require.NotEmpty(t, resp) + // Confirm that we changed the workflow. + err = protojson.Unmarshal([]byte(resp), &ures) + require.NoError(t, err) + require.Greater(t, len(ures.Details), 0) + require.True(t, ures.Details[0].Changed) + // Execute a no-op as tablet-types is already primary,replica,rdonly. + resp, err = vc.VtctldClient.ExecuteCommandWithOutput("workflow", "--keyspace", targetKs, "update", "--workflow", workflowName, "--tablet-types", tabletTypes) + require.NoError(t, err, err) + // Confirm that we didn't change the workflow. + err = protojson.Unmarshal([]byte(resp), &ures) + require.NoError(t, err) + require.Greater(t, len(ures.Details), 0) + require.False(t, ures.Details[0].Changed) } func tstWorkflowCancel(t *testing.T) error { @@ -215,10 +334,13 @@ func validateReadsRoute(t *testing.T, tabletTypes string, tablet *cluster.Vttabl if tabletTypes == "" { tabletTypes = "replica,rdonly" } + vtgateConn, closeConn := getVTGateConn() + defer closeConn() for _, tt := range []string{"replica", "rdonly"} { destination := fmt.Sprintf("%s:%s@%s", tablet.Keyspace, tablet.Shard, tt) if strings.Contains(tabletTypes, tt) { - assertQueryExecutesOnTablet(t, vtgateConn, tablet, destination, readQuery, readQuery) + readQuery := "select cid from customer limit 10" + assertQueryExecutesOnTablet(t, vtgateConn, tablet, destination, readQuery, "select cid from customer limit :vtg1") } } } @@ -232,25 +354,29 @@ func validateReadsRouteToTarget(t *testing.T, tabletTypes string) { } func validateWritesRouteToSource(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer2', 200)" matchInsertQuery := "insert into customer(`name`, cid) values" assertQueryExecutesOnTablet(t, vtgateConn, sourceTab, "customer", insertQuery, matchInsertQuery) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid > 100") + execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid = 200") } func validateWritesRouteToTarget(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() insertQuery := "insert into customer(name, cid) values('tempCustomer3', 101)" matchInsertQuery := "insert into customer(`name`, cid) values" assertQueryExecutesOnTablet(t, vtgateConn, targetTab2, "customer", insertQuery, matchInsertQuery) insertQuery = "insert into customer(name, cid) values('tempCustomer3', 102)" assertQueryExecutesOnTablet(t, vtgateConn, targetTab1, "customer", insertQuery, matchInsertQuery) - execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid > 100") + execVtgateQuery(t, vtgateConn, "customer", "delete from customer where cid in (101, 102)") } func revert(t *testing.T, workflowType string) { switchWrites(t, workflowType, ksWorkflow, true) validateWritesRouteToSource(t) - switchReadsNew(t, workflowType, allCellNames, ksWorkflow, true) + switchReadsNew(t, workflowType, getCellNames(nil), ksWorkflow, true) validateReadsRouteToSource(t, "replica") // cancel the workflow to cleanup @@ -263,8 +389,8 @@ func checkStates(t *testing.T, startState, endState string) { require.Contains(t, lastOutput, fmt.Sprintf("Current State: %s", endState)) } -func getCurrentState(t *testing.T) string { - if err := tstWorkflowAction(t, "GetState", "", ""); err != nil { +func getCurrentStatus(t *testing.T) string { + if err := tstWorkflowAction(t, "status", "", ""); err != nil { return err.Error() } return strings.TrimSpace(strings.Trim(lastOutput, "\n")) @@ -284,8 +410,7 @@ func TestBasicV2Workflows(t *testing.T) { }() vc = setupCluster(t) - defer vtgateConn.Close() - defer vc.TearDown(t) + defer vc.TearDown() // Internal tables like the lifecycle ones for OnlineDDL should be ignored ddlSQL := "ALTER TABLE customer MODIFY cid bigint UNSIGNED" @@ -293,7 +418,6 @@ func TestBasicV2Workflows(t *testing.T) { testMoveTablesV2Workflow(t) testReshardV2Workflow(t) - log.Flush() } func getVtctldGRPCURL() string { @@ -315,29 +439,31 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // at this point the unsharded product and sharded customer keyspaces are created by previous tests // use MoveTables to move customer2 from product to customer using - currentWorkflowType = wrangler.MoveTablesWorkflow + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables err := tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "customer2", workflowActionCreate, "", "", "", false) + "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, "customer.wf2", binlogdatapb.VReplicationWorkflowState_Running.String()) waitForLowLag(t, "customer", "wf2") err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "", workflowActionSwitchTraffic, "", "", "", false) + "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) err = tstWorkflowExec(t, defaultCellName, "wf2", sourceKs, targetKs, - "", workflowActionComplete, "", "", "", false) + "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) + vtgateConn, closeConn := getVTGateConn() + defer closeConn() // sanity check - output, err := vc.VtctlClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err := vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") require.NoError(t, err) assert.NotContains(t, output, "customer2\"", "customer2 still found in keyspace product") waitForRowCount(t, vtgateConn, "customer", "customer2", 3) // check that customer2 has the sequence tag - output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetVSchema", "customer") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "customer") require.NoError(t, err) assert.Contains(t, output, "\"sequence\": \"customer_seq2\"", "customer2 sequence missing in keyspace customer") @@ -352,25 +478,25 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { // use MoveTables to move customer2 back to product. Note that now the table has an associated sequence err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "customer2", workflowActionCreate, "", "", "", false) + "customer2", workflowActionCreate, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) waitForWorkflowState(t, vc, "product.wf3", binlogdatapb.VReplicationWorkflowState_Running.String()) waitForLowLag(t, "product", "wf3") err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "", workflowActionSwitchTraffic, "", "", "", false) + "", workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) err = tstWorkflowExec(t, defaultCellName, "wf3", targetKs, sourceKs, - "", workflowActionComplete, "", "", "", false) + "", workflowActionComplete, "", "", "", defaultWorkflowExecOptions) require.NoError(t, err) // sanity check - output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") require.NoError(t, err) assert.Contains(t, output, "customer2\"", "customer2 not found in keyspace product ") // check that customer2 still has the sequence tag - output, err = vc.VtctlClient.ExecuteCommandWithOutput("GetVSchema", "product") + output, err = vc.VtctldClient.ExecuteCommandWithOutput("GetVSchema", "product") require.NoError(t, err) assert.Contains(t, output, "\"sequence\": \"customer_seq2\"", "customer2 still found in keyspace product") @@ -379,8 +505,10 @@ func testVSchemaForSequenceAfterMoveTables(t *testing.T) { execVtgateQuery(t, vtgateConn, "product", "insert into customer2(name) values('a')") } waitForRowCount(t, vtgateConn, "product", "customer2", 3+num+num) - want = fmt.Sprintf("[[INT32(%d)]]", 100+num+num-1) - waitForQueryResult(t, vtgateConn, "product", "select max(cid) from customer2", want) + res := execVtgateQuery(t, vtgateConn, "product", "select max(cid) from customer2") + cid, err := res.Rows[0][0].ToInt() + require.NoError(t, err) + require.GreaterOrEqual(t, cid, 100+num+num-1) } // testReplicatingWithPKEnumCols ensures that we properly apply binlog events @@ -394,6 +522,8 @@ func testReplicatingWithPKEnumCols(t *testing.T) { // when we re-insert the same row values and ultimately VDiff shows the table as // being identical in both keyspaces. + vtgateConn, closeConn := getVTGateConn() + defer closeConn() // typ is an enum, with soho having a stored and binlogged value of 2 deleteQuery := "delete from customer where cid = 2 and typ = 'soho'" insertQuery := "insert into customer(cid, name, typ, sport, meta) values(2, 'Paül','soho','cricket',convert(x'7b7d' using utf8mb4))" @@ -406,7 +536,36 @@ func testReplicatingWithPKEnumCols(t *testing.T) { } func testReshardV2Workflow(t *testing.T) { - currentWorkflowType = wrangler.ReshardWorkflow + vtgateConn, closeConn := getVTGateConn() + defer closeConn() + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_Reshard + + // Generate customer records in the background for the rest of the test + // in order to confirm that no writes are lost in either the customer + // table or the customer_name and enterprise_customer materializations + // against it during the Reshard and all of the traffic switches. + dataGenCtx, dataGenCancel := context.WithCancel(context.Background()) + defer dataGenCancel() + dataGenConn, dataGenCloseConn := getVTGateConn() + defer dataGenCloseConn() + dataGenWg := sync.WaitGroup{} + dataGenWg.Add(1) + go func() { + defer dataGenWg.Done() + id := 1000 + for { + select { + case <-dataGenCtx.Done(): + return + default: + // Use a random customer type for each record. + _ = execVtgateQuery(t, dataGenConn, "customer", fmt.Sprintf("insert into customer (cid, name, typ) values (%d, 'tempCustomer%d', %s)", + id, id, customerTypes[rand.IntN(len(customerTypes))])) + } + time.Sleep(1 * time.Millisecond) + id++ + } + }() // create internal tables on the original customer shards that should be // ignored and not show up on the new shards @@ -415,9 +574,6 @@ func testReshardV2Workflow(t *testing.T) { createAdditionalCustomerShards(t, "-40,40-80,80-c0,c0-") createReshardWorkflow(t, "-80,80-", "-40,40-80,80-c0,c0-") - if !strings.Contains(lastOutput, "Workflow started successfully") { - t.Fail() - } validateReadsRouteToSource(t, "replica") validateWritesRouteToSource(t) @@ -430,19 +586,73 @@ func testReshardV2Workflow(t *testing.T) { testWorkflowUpdate(t) testRestOfWorkflow(t) + + // Confirm that we lost no customer related writes during the Reshard. + dataGenCancel() + dataGenWg.Wait() + cres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from customer") + require.Len(t, cres.Rows, 1) + waitForNoWorkflowLag(t, vc, "customer", "customer_name") + cnres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from customer_name") + require.Len(t, cnres.Rows, 1) + require.EqualValues(t, cres.Rows, cnres.Rows) + if debugMode { + // We expect the row count to differ in enterprise_customer because it is + // using a `where typ='enterprise'` filter. So the count is only for debug + // info. + ecres := execVtgateQuery(t, dataGenConn, "customer", "select count(*) from enterprise_customer") + t.Logf("Done inserting customer data. Record counts in customer: %s, customer_name: %s, enterprise_customer: %s", + cres.Rows[0][0].ToString(), cnres.Rows[0][0].ToString(), ecres.Rows[0][0].ToString()) + } + // We also do a vdiff on the materialize workflows for good measure. + doVtctldclientVDiff(t, "customer", "customer_name", "", nil) + doVtctldclientVDiff(t, "customer", "enterprise_customer", "", nil) } func testMoveTablesV2Workflow(t *testing.T) { - currentWorkflowType = wrangler.MoveTablesWorkflow + vtgateConn, closeConn := getVTGateConn() + defer closeConn() + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - // test basic forward and reverse flows + materializeShow := func() { + if !debugMode { + return + } + output, err := vc.VtctldClient.ExecuteCommandWithOutput("materialize", "--target-keyspace=customer", "show", "--workflow=customer_name", "--compact", "--include-logs=false") + require.NoError(t, err) + t.Logf("Materialize show output: %s", output) + } + + // Test basic forward and reverse flows. setupCustomerKeyspace(t) + + listOutputContainsWorkflow := func(output string, workflow string) bool { + workflows := []string{} + err := json.Unmarshal([]byte(output), &workflows) + require.NoError(t, err) + for _, w := range workflows { + if w == workflow { + return true + } + } + return false + } + listOutputIsEmpty := func(output string) bool { + workflows := []string{} + err := json.Unmarshal([]byte(output), &workflows) + require.NoError(t, err) + return len(workflows) == 0 + } + listAllArgs := []string{"workflow", "--keyspace", "customer", "list"} + + output, err := vc.VtctldClient.ExecuteCommandWithOutput(listAllArgs...) + require.NoError(t, err) + require.True(t, listOutputIsEmpty(output)) + // The purge table should get skipped/ignored // If it's not then we'll get an error as the table doesn't exist in the vschema createMoveTablesWorkflow(t, "customer,loadtest,vdiff_order,reftable,_vt_PURGE_4f9194b43b2011eb8a0104ed332e05c2_20221210194431") - if !strings.Contains(lastOutput, "Workflow started successfully") { - t.Fail() - } + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) validateReadsRouteToSource(t, "replica") validateWritesRouteToSource(t) @@ -456,27 +666,42 @@ func testMoveTablesV2Workflow(t *testing.T) { testWorkflowUpdate(t) testRestOfWorkflow(t) + // Create our primary intra-keyspace materialization. + materialize(t, materializeCustomerNameSpec, false) + // Create a second one to confirm that multiple ones get migrated correctly. + materialize(t, materializeCustomerTypeSpec, false) + materializeShow() - listAllArgs := []string{"workflow", "customer", "listall"} - output, _ := vc.VtctlClient.ExecuteCommandWithOutput(listAllArgs...) - require.Contains(t, output, "No workflows found in keyspace customer") + output, err = vc.VtctldClient.ExecuteCommandWithOutput(listAllArgs...) + require.NoError(t, err) + require.True(t, listOutputContainsWorkflow(output, "customer_name") && listOutputContainsWorkflow(output, "enterprise_customer") && !listOutputContainsWorkflow(output, "wf1")) testVSchemaForSequenceAfterMoveTables(t) + // Confirm that the auto_increment clause on customer.cid was removed. + cs, err := vtgateConn.ExecuteFetch("show create table customer", 1, false) + require.NoError(t, err) + require.Len(t, cs.Rows, 1) + require.Len(t, cs.Rows[0], 2) // Table and "Create Table" + csddl := strings.ToLower(cs.Rows[0][1].ToString()) + require.NotContains(t, csddl, "auto_increment", "customer table still has auto_increment clause: %s", csddl) + createMoveTablesWorkflow(t, "Lead,Lead-1") - output, _ = vc.VtctlClient.ExecuteCommandWithOutput(listAllArgs...) - require.Contains(t, output, "Following workflow(s) found in keyspace customer: wf1") + output, err = vc.VtctldClient.ExecuteCommandWithOutput(listAllArgs...) + require.NoError(t, err) + require.True(t, listOutputContainsWorkflow(output, "wf1") && listOutputContainsWorkflow(output, "customer_name") && listOutputContainsWorkflow(output, "enterprise_customer")) - err := tstWorkflowCancel(t) + err = tstWorkflowCancel(t) require.NoError(t, err) - output, _ = vc.VtctlClient.ExecuteCommandWithOutput(listAllArgs...) - require.Contains(t, output, "No workflows found in keyspace customer") + output, err = vc.VtctldClient.ExecuteCommandWithOutput(listAllArgs...) + require.NoError(t, err) + require.True(t, listOutputContainsWorkflow(output, "customer_name") && listOutputContainsWorkflow(output, "enterprise_customer") && !listOutputContainsWorkflow(output, "wf1")) } func testPartialSwitches(t *testing.T) { // nothing switched - require.Equal(t, getCurrentState(t), wrangler.WorkflowStateNotSwitched) + require.Contains(t, getCurrentStatus(t), wrangler.WorkflowStateNotSwitched) tstWorkflowSwitchReads(t, "replica,rdonly", "zone1") nextState := "Reads partially switched. Replica switched in cells: zone1. Rdonly switched in cells: zone1. Writes Not Switched" checkStates(t, wrangler.WorkflowStateNotSwitched, nextState) @@ -498,7 +723,7 @@ func testPartialSwitches(t *testing.T) { checkStates(t, nextState, nextState) // idempotency keyspace := "product" - if currentWorkflowType == wrangler.ReshardWorkflow { + if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_Reshard { keyspace = "customer" } waitForLowLag(t, keyspace, "wf1_reverse") @@ -519,6 +744,11 @@ func testPartialSwitches(t *testing.T) { } func testRestOfWorkflow(t *testing.T) { + // Relax the throttler so that it does not cause switches to fail because it can block + // the catchup for the intra-keyspace materialization. + res, err := throttler.UpdateThrottlerTopoConfigRaw(vc.VtctldClient, "customer", true, false, throttlerConfig.Threshold*5, throttlerConfig.Query, nil) + require.NoError(t, err, res) + testPartialSwitches(t) // test basic forward and reverse flows @@ -535,7 +765,7 @@ func testRestOfWorkflow(t *testing.T) { // this function is called for both MoveTables and Reshard, so the reverse workflows exist in different keyspaces keyspace := "product" - if currentWorkflowType == wrangler.ReshardWorkflow { + if currentWorkflowType == binlogdatapb.VReplicationWorkflowType_Reshard { keyspace = "customer" } waitForLowLag(t, keyspace, "wf1_reverse") @@ -580,12 +810,14 @@ func testRestOfWorkflow(t *testing.T) { validateWritesRouteToSource(t) // trying to complete an unswitched workflow should error - err := tstWorkflowComplete(t) + err = tstWorkflowComplete(t) require.Error(t, err) require.Contains(t, err.Error(), wrangler.ErrWorkflowNotFullySwitched) // fully switch and complete waitForLowLag(t, "customer", "wf1") + waitForLowLag(t, "customer", "customer_name") + waitForLowLag(t, "customer", "enterprise_customer") tstWorkflowSwitchReadsAndWrites(t) validateReadsRoute(t, "rdonly", targetRdonlyTab1) validateReadsRouteToTarget(t, "replica") @@ -596,30 +828,17 @@ func testRestOfWorkflow(t *testing.T) { } func setupCluster(t *testing.T) *VitessCluster { - cells := []string{"zone1", "zone2"} - - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) - defaultCellName := "zone1" - allCellNames = defaultCellName - defaultCell = vc.Cells[defaultCellName] + vc = NewVitessCluster(t, &clusterOptions{cells: []string{"zone1", "zone2"}}) zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] vc.AddKeyspace(t, []*Cell{zone1, zone2}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = zone1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "product", "0"), 1, 30*time.Second)) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer getVTGateConn() verifyClusterHealth(t, vc) insertInitialData(t) - + defaultCell := vc.Cells[vc.CellNames[0]] sourceTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet sourceReplicaTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-101"].Vttablet sourceRdonlyTab = vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-102"].Vttablet @@ -632,12 +851,7 @@ func setupCustomerKeyspace(t *testing.T) { customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, nil); err != nil { t.Fatal(err) } - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "-80"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "customer", "80-"), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "-80"), 1, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", "customer", "80-"), 1, 30*time.Second)) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet @@ -652,33 +866,17 @@ func setupCustomer2Keyspace(t *testing.T) { customerVSchema, customerSchema, 0, 0, 1200, nil); err != nil { t.Fatal(err) } - for _, c2shard := range c2shards { - err := cluster.WaitForHealthyShard(vc.VtctldClient, c2keyspace, c2shard) - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", c2keyspace, c2shard), 1, 30*time.Second)) - } } func setupMinimalCluster(t *testing.T) *VitessCluster { - cells := []string{"zone1"} + vc = NewVitessCluster(t, nil) - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) - defaultCellName := "zone1" - allCellNames = defaultCellName - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] zone1 := vc.Cells["zone1"] vc.AddKeyspace(t, []*Cell{zone1}, "product", "0", initialProductVSchema, initialProductSchema, 0, 0, 100, nil) - vtgate = zone1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 1, 30*time.Second)) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) verifyClusterHealth(t, vc) insertInitialData(t) @@ -687,23 +885,24 @@ func setupMinimalCluster(t *testing.T) *VitessCluster { return vc } -func setupMinimalCustomerKeyspace(t *testing.T) { +func setupMinimalCustomerKeyspace(t *testing.T) map[string]*cluster.VttabletProcess { + tablets := make(map[string]*cluster.VttabletProcess) if _, err := vc.AddKeyspace(t, []*Cell{vc.Cells["zone1"]}, "customer", "-80,80-", customerVSchema, customerSchema, 0, 0, 200, nil); err != nil { t.Fatal(err) } - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "-80")) - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, "customer", "80-")) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "-80"), 1, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "customer", "80-"), 1, 30*time.Second)) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] targetTab1 = custKs.Shards["-80"].Tablets["zone1-200"].Vttablet targetTab2 = custKs.Shards["80-"].Tablets["zone1-300"].Vttablet + tablets["-80"] = targetTab1 + tablets["80-"] = targetTab2 + return tablets } func TestSwitchReadsWritesInAnyOrder(t *testing.T) { vc = setupCluster(t) - defer vc.TearDown(t) + defer vc.TearDown() moveCustomerTableSwitchFlows(t, []*Cell{vc.Cells["zone1"]}, "zone1") } @@ -712,8 +911,11 @@ func switchReadsNew(t *testing.T, workflowType, cells, ksWorkflow string, revers if reverse { command = "ReverseTraffic" } - output, err := vc.VtctlClient.ExecuteCommandWithOutput(workflowType, "--", "--cells="+cells, - "--tablet_types=rdonly,replica", command, ksWorkflow) + parts := strings.Split(ksWorkflow, ".") + require.Len(t, parts, 2) + ks, wf := parts[0], parts[1] + output, err := vc.VtctldClient.ExecuteCommandWithOutput(workflowType, "--workflow", wf, "--target-keyspace", ks, command, + "--cells", cells, "--tablet-types=rdonly,replica") require.NoError(t, err, fmt.Sprintf("SwitchReads Error: %s: %s", err, output)) if output != "" { fmt.Printf("SwitchReads output: %s\n", output) @@ -735,7 +937,7 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias catchup(t, targetTab2, workflow, workflowType) vdiffSideBySide(t, ksWorkflow, "") } - + allCellNames := getCellNames(cells) var switchReadsFollowedBySwitchWrites = func() { moveTablesAndWait() @@ -815,16 +1017,9 @@ func moveCustomerTableSwitchFlows(t *testing.T, cells []*Cell, sourceCellOrAlias func createAdditionalCustomerShards(t *testing.T, shards string) { ksName := "customer" + defaultCell := vc.Cells[vc.CellNames[0]] keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] require.NoError(t, vc.AddShards(t, []*Cell{defaultCell, vc.Cells["zone2"]}, keyspace, shards, defaultReplicas, defaultRdonly, 400, targetKsOpts)) - arrTargetShardNames := strings.Split(shards, ",") - - for _, shardName := range arrTargetShardNames { - err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName) - require.NoError(t, err) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", ksName, shardName), 2, 30*time.Second)) - require.NoError(t, vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.rdonly", ksName, shardName), 1, 30*time.Second)) - } custKs := vc.Cells[defaultCell.Name].Keyspaces[ksName] targetTab2 = custKs.Shards["80-c0"].Tablets["zone1-600"].Vttablet targetTab1 = custKs.Shards["40-80"].Tablets["zone1-500"].Vttablet @@ -836,7 +1031,7 @@ func createAdditionalCustomerShards(t *testing.T, shards string) { } func tstApplySchemaOnlineDDL(t *testing.T, sql string, keyspace string) { - err := vc.VtctlClient.ExecuteCommand("ApplySchema", "--", "--ddl_strategy=online", + err := vc.VtctldClient.ExecuteCommand("ApplySchema", "--ddl-strategy=online", "--sql", sql, keyspace) require.NoError(t, err, fmt.Sprintf("ApplySchema Error: %s", err)) } diff --git a/go/test/endtoend/vreplication/schema/fkext/materialize_schema.sql b/go/test/endtoend/vreplication/schema/fkext/materialize_schema.sql new file mode 100644 index 00000000000..6af8ca99b94 --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/materialize_schema.sql @@ -0,0 +1,2 @@ +create table parent_copy(id int, name varchar(128), primary key(id)) engine=innodb; +create table child_copy(id int, parent_id int, name varchar(128), primary key(id)) engine=innodb; \ No newline at end of file diff --git a/go/test/endtoend/vreplication/schema/fkext/source_schema.sql b/go/test/endtoend/vreplication/schema/fkext/source_schema.sql new file mode 100644 index 00000000000..01b788338b6 --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/source_schema.sql @@ -0,0 +1,2 @@ +create table if not exists parent(id int, name varchar(128), primary key(id)) engine=innodb; +create table if not exists child(id int, parent_id int, name varchar(128), primary key(id), foreign key(parent_id) references parent(id) on delete cascade) engine=innodb; \ No newline at end of file diff --git a/go/test/endtoend/vreplication/schema/fkext/source_vschema.json b/go/test/endtoend/vreplication/schema/fkext/source_vschema.json new file mode 100644 index 00000000000..01cde0d643d --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/source_vschema.json @@ -0,0 +1,6 @@ +{ + "tables": { + "parent": {}, + "child": {} + } +} diff --git a/go/test/endtoend/vreplication/schema/fkext/target1_vschema.json b/go/test/endtoend/vreplication/schema/fkext/target1_vschema.json new file mode 100644 index 00000000000..dc89232fbbb --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/target1_vschema.json @@ -0,0 +1,28 @@ +{ + "sharded": false, + "foreignKeyMode": "managed", + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "parent": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "child": { + "column_vindexes": [ + { + "column": "parent_id", + "name": "reverse_bits" + } + ] + } + + } +} diff --git a/go/test/endtoend/vreplication/schema/fkext/target2_vschema.json b/go/test/endtoend/vreplication/schema/fkext/target2_vschema.json new file mode 100644 index 00000000000..06e851a9007 --- /dev/null +++ b/go/test/endtoend/vreplication/schema/fkext/target2_vschema.json @@ -0,0 +1,43 @@ +{ + "sharded": true, + "foreignKeyMode": "managed", + "vindexes": { + "reverse_bits": { + "type": "reverse_bits" + } + }, + "tables": { + "parent": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "child": { + "column_vindexes": [ + { + "column": "parent_id", + "name": "reverse_bits" + } + ] + }, + "parent_copy": { + "column_vindexes": [ + { + "column": "id", + "name": "reverse_bits" + } + ] + }, + "child_copy": { + "column_vindexes": [ + { + "column": "parent_id", + "name": "reverse_bits" + } + ] + } + } +} diff --git a/go/test/endtoend/vreplication/sidecardb_test.go b/go/test/endtoend/vreplication/sidecardb_test.go index ef05e051be2..391f7d60246 100644 --- a/go/test/endtoend/vreplication/sidecardb_test.go +++ b/go/test/endtoend/vreplication/sidecardb_test.go @@ -37,9 +37,9 @@ var numSidecarDBTables int var ddls1, ddls2 []string func init() { - sidecarDBTables = []string{"copy_state", "dt_participant", "dt_state", "heartbeat", "post_copy_action", "redo_state", - "redo_statement", "reparent_journal", "resharding_journal", "schema_migrations", "schema_version", "schemacopy", "tables", - "vdiff", "vdiff_log", "vdiff_table", "views", "vreplication", "vreplication_log"} + sidecarDBTables = []string{"copy_state", "dt_participant", "dt_state", "heartbeat", "post_copy_action", + "redo_state", "redo_statement", "reparent_journal", "resharding_journal", "schema_migrations", "schema_version", + "tables", "udfs", "vdiff", "vdiff_log", "vdiff_table", "views", "vreplication", "vreplication_log"} numSidecarDBTables = len(sidecarDBTables) ddls1 = []string{ "drop table _vt.vreplication_log", @@ -58,15 +58,8 @@ func prs(t *testing.T, keyspace, shard string) { // TestSidecarDB launches a Vitess cluster and ensures that the expected sidecar tables are created. We also drop/alter // tables and ensure the next tablet init will recreate the sidecar database to the desired schema. func TestSidecarDB(t *testing.T) { - cells := []string{"zone1"} - - vc = NewVitessCluster(t, "TestSidecarDB", cells, mainClusterConfig) - require.NotNil(t, vc) - allCellNames = "zone1" - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] - - defer vc.TearDown(t) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() keyspace := "product" shard := "0" @@ -74,7 +67,7 @@ func TestSidecarDB(t *testing.T) { cell1 := vc.Cells[defaultCellName] tablet100 := fmt.Sprintf("%s-100", defaultCellName) tablet101 := fmt.Sprintf("%s-101", defaultCellName) - vc.AddKeyspace(t, []*Cell{cell1}, keyspace, shard, initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) + vc.AddKeyspace(t, []*Cell{cell1}, keyspace, "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) shard0 := vc.Cells[defaultCellName].Keyspaces[keyspace].Shards[shard] tablet100Port := shard0.Tablets[tablet100].Vttablet.Port tablet101Port := shard0.Tablets[tablet101].Vttablet.Port diff --git a/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql b/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql index 03df754ea21..fc78f6b414a 100644 --- a/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql +++ b/go/test/endtoend/vreplication/testdata/config/init_testserver_db.sql @@ -24,11 +24,8 @@ SET GLOBAL read_only='OFF'; # Changes during the init db should not make it to the binlog. # They could potentially create errant transactions on replicas. SET sql_log_bin = 0; -# Remove anonymous users. -DELETE FROM mysql.user WHERE User = ''; - -# Disable remote root access (only allow UNIX socket). -DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost'; +# Remove anonymous users & disable remote root access (only allow UNIX socket). +DROP USER IF EXISTS ''@'%', ''@'localhost', 'root'@'%'; # Remove test database. DROP DATABASE IF EXISTS test; @@ -82,10 +79,5 @@ GRANT SELECT, PROCESS, SUPER, REPLICATION CLIENT, RELOAD GRANT SELECT, UPDATE, DELETE, DROP ON performance_schema.* TO 'vt_monitoring'@'localhost'; -FLUSH PRIVILEGES; - -RESET SLAVE ALL; -RESET MASTER; - # custom sql is used to add custom scripts like creating users/passwords. We use it in our tests # {{custom_sql}} diff --git a/go/test/endtoend/vreplication/time_zone_test.go b/go/test/endtoend/vreplication/time_zone_test.go index 2d0d1eeaf0b..2c0a9a4f5a5 100644 --- a/go/test/endtoend/vreplication/time_zone_test.go +++ b/go/test/endtoend/vreplication/time_zone_test.go @@ -32,31 +32,21 @@ import ( // TestMoveTablesTZ tests the conversion of datetime based on the source timezone passed to the MoveTables workflow func TestMoveTablesTZ(t *testing.T) { - allCellNames = "zone1" - defaultCellName := "zone1" workflow := "tz" sourceKs := "product" targetKs := "customer" - shard := "0" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) ksReverseWorkflow := fmt.Sprintf("%s.%s_reverse", sourceKs, workflow) - vc = NewVitessCluster(t, "TestCellAliasVreplicationWorkflow", []string{"zone1"}, mainClusterConfig) - require.NotNil(t, vc) - defaultCell = vc.Cells[defaultCellName] + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + defaultCell := vc.Cells[vc.CellNames[0]] cells := []*Cell{defaultCell} - defer vc.TearDown(t) - cell1 := vc.Cells["zone1"] vc.AddKeyspace(t, []*Cell{cell1}, sourceKs, "0", initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -66,7 +56,7 @@ func TestMoveTablesTZ(t *testing.T) { // it seems to take some time for the mysql server to load time zone info after the tables in mysql db have been populated loadTimeZoneInfo := func(tab *cluster.VttabletProcess, sql, timezone string) { - _, err := tab.QueryTabletWithDB(timeZoneSQL, "mysql") + err := tab.MultiQueryTabletWithDB(timeZoneSQL, "mysql") require.NoError(t, err) timer := time.NewTimer(1 * time.Minute) for { @@ -90,10 +80,6 @@ func TestMoveTablesTZ(t *testing.T) { if _, err := vc.AddKeyspace(t, cells, targetKs, "0", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { t.Fatal(err) } - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) - - defaultCell := vc.Cells["zone1"] custKs := vc.Cells[defaultCell.Name].Keyspaces[targetKs] customerTab := custKs.Shards["0"].Tablets["zone1-200"].Vttablet diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index 72b09e8fede..08f5bb8926d 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -18,15 +18,27 @@ package vreplication import ( "fmt" + "math" + "runtime" + "strconv" "strings" "testing" "time" + "github.com/google/uuid" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" + "golang.org/x/exp/maps" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) type testCase struct { @@ -40,10 +52,13 @@ type testCase struct { retryInsert string resume bool // test resume functionality with this workflow // If testing resume, what new rows should be diff'd. These rows must have a PK > all initial rows and retry rows. - resumeInsert string - stop bool // test stop functionality with this workflow - testCLIErrors bool // test CLI errors against this workflow (only needs to be done once) - testCLICreateWait bool // test CLI create and wait until done against this workflow (only needs to be done once) + resumeInsert string + stop bool // test stop functionality with this workflow + testCLIErrors bool // test CLI errors against this workflow (only needs to be done once) + testCLICreateWait bool // test CLI create and wait until done against this workflow (only needs to be done once) + testCLIFlagHandling bool // test vtctldclient flag handling from end-to-end + extraVDiffFlags map[string]string + vdiffCount int64 // Keep track of the number of vdiffs created to test the stats } const ( @@ -55,21 +70,25 @@ const ( var testCases = []*testCase{ { - name: "MoveTables/unsharded to two shards", - workflow: "p1c2", - typ: "MoveTables", - sourceKs: "product", - targetKs: "customer", - sourceShards: "0", - targetShards: "-80,80-", - tabletBaseID: 200, - tables: "customer,Lead,Lead-1", - autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(1991234, 'Testy McTester', 'soho')`, - resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(1992234, 'Testy McTester (redux)', 'enterprise')`, - testCLIErrors: true, // test for errors in the simplest workflow - testCLICreateWait: true, // test wait on create feature against simplest workflow + name: "MoveTables/unsharded to two shards", + workflow: "p1c2", + typ: "MoveTables", + sourceKs: "product", + targetKs: "customer", + sourceShards: "0", + targetShards: "-80,80-", + tabletBaseID: 200, + tables: "customer,Lead,Lead-1,nopk", + autoRetryError: true, + retryInsert: `insert into customer(cid, name, typ) values(2005149100, 'Testy McTester', 'soho')`, + resume: true, + resumeInsert: `insert into customer(cid, name, typ) values(2005149200, 'Testy McTester (redux)', 'enterprise')`, + testCLIErrors: true, // test for errors in the simplest workflow + testCLICreateWait: true, // test wait on create feature against simplest workflow + testCLIFlagHandling: true, // test flag handling end-to-end against simplest workflow + extraVDiffFlags: map[string]string{ + "--max-diff-duration": "2s", + }, }, { name: "Reshard Merge/split 2 to 3", @@ -81,9 +100,9 @@ var testCases = []*testCase{ targetShards: "-40,40-a0,a0-", tabletBaseID: 400, autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(1993234, 'Testy McTester Jr', 'enterprise'), (1993235, 'Testy McTester II', 'enterprise')`, + retryInsert: `insert into customer(cid, name, typ) values(2005149300, 'Testy McTester Jr', 'enterprise'), (2005149350, 'Testy McTester II', 'enterprise')`, resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(1994234, 'Testy McTester III', 'enterprise')`, + resumeInsert: `insert into customer(cid, name, typ) values(2005149400, 'Testy McTester III', 'enterprise')`, stop: true, }, { @@ -96,43 +115,48 @@ var testCases = []*testCase{ targetShards: "0", tabletBaseID: 700, autoRetryError: true, - retryInsert: `insert into customer(cid, name, typ) values(1995234, 'Testy McTester IV', 'enterprise')`, + retryInsert: `insert into customer(cid, name, typ) values(2005149500, 'Testy McTester IV', 'enterprise')`, resume: true, - resumeInsert: `insert into customer(cid, name, typ) values(1996234, 'Testy McTester V', 'enterprise'), (1996235, 'Testy McTester VI', 'enterprise')`, + resumeInsert: `insert into customer(cid, name, typ) values(2005149600, 'Testy McTester V', 'enterprise'), (2005149650, 'Testy McTester VI', 'enterprise')`, stop: true, }, } +func checkVDiffCountStat(t *testing.T, tablet *cluster.VttabletProcess, expectedCount int64) { + countStr, err := getDebugVar(t, tablet.Port, []string{"VDiffCount"}) + require.NoError(t, err, "failed to get VDiffCount stat from %s-%d tablet: %v", tablet.Cell, tablet.TabletUID, err) + count, err := strconv.Atoi(countStr) + require.NoError(t, err, "failed to convert VDiffCount stat string to int: %v", err) + require.Equal(t, expectedCount, int64(count), "expected VDiffCount stat to be %d but got %d", expectedCount, count) +} + func TestVDiff2(t *testing.T) { - allCellNames = "zone5,zone1,zone2,zone3,zone4" + cellNames := "zone5,zone1,zone2,zone3,zone4" sourceKs := "product" sourceShards := []string{"0"} targetKs := "customer" targetShards := []string{"-80", "80-"} - // This forces us to use multiple vstream packets even with small test tables - extraVTTabletArgs = []string{"--vstream_packet_size=1"} + extraVTTabletArgs = []string{ + // This forces us to use multiple vstream packets even with small test tables. + "--vstream_packet_size=1", + // Test VPlayer batching mode. + fmt.Sprintf("--vreplication_experimental_flags=%d", + vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching), + } + + vc = NewVitessCluster(t, &clusterOptions{cells: strings.Split(cellNames, ",")}) + defer vc.TearDown() - vc = NewVitessCluster(t, "TestVDiff2", strings.Split(allCellNames, ","), mainClusterConfig) - require.NotNil(t, vc) zone1 := vc.Cells["zone1"] zone2 := vc.Cells["zone2"] zone3 := vc.Cells["zone3"] - defaultCell = zone1 - - defer vc.TearDown(t) // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. _, err := vc.AddKeyspace(t, []*Cell{zone2, zone1, zone3}, sourceKs, strings.Join(sourceShards, ","), initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) require.NoError(t, err) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - for _, shard := range sourceShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard)) - } - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := vc.GetVTGateConn(t) defer vtgateConn.Close() verifyClusterHealth(t, vc) @@ -142,15 +166,16 @@ func TestVDiff2(t *testing.T) { query := `insert into customer(cid, name, typ, sport) values(1001, null, 'soho','')` execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) - generateMoreCustomers(t, sourceKs, 100) + generateMoreCustomers(t, sourceKs, 1000) + + // Create rows in the nopk table using the customer names and random ages between 20 and 100. + query = "insert into nopk(name, age) select name, floor(rand()*80)+20 from customer" + execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. tks, err := vc.AddKeyspace(t, []*Cell{zone3, zone1, zone2}, targetKs, strings.Join(targetShards, ","), customerVSchema, customerSchema, 0, 0, 200, targetKsOpts) require.NoError(t, err) - for _, shard := range targetShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard)) - } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { @@ -158,23 +183,41 @@ func TestVDiff2(t *testing.T) { testWorkflow(t, vc, tc, tks, []*Cell{zone3, zone2, zone1}) }) } + + statsTablet := vc.getPrimaryTablet(t, targetKs, targetShards[0]) + + // We diffed X rows so confirm that the global total is > 0. + countStr, err := getDebugVar(t, statsTablet.Port, []string{"VDiffRowsComparedTotal"}) + require.NoError(t, err, "failed to get VDiffRowsComparedTotal stat from %s-%d tablet: %v", statsTablet.Cell, statsTablet.TabletUID, err) + count, err := strconv.Atoi(countStr) + require.NoError(t, err, "failed to convert VDiffRowsComparedTotal stat string to int: %v", err) + require.Greater(t, count, 0, "expected VDiffRowsComparedTotal stat to be greater than 0 but got %d", count) + + // The VDiffs should all be cleaned up so the VDiffRowsCompared value, which + // is produced from controller info, should be empty. + vdrc, err := getDebugVar(t, statsTablet.Port, []string{"VDiffRowsCompared"}) + require.NoError(t, err, "failed to get VDiffRowsCompared stat from %s-%d tablet: %v", statsTablet.Cell, statsTablet.TabletUID, err) + require.Equal(t, "{}", vdrc, "expected VDiffRowsCompared stat to be empty but got %s", vdrc) } func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, cells []*Cell) { + vtgateConn := vc.GetVTGateConn(t) + defer vtgateConn.Close() arrTargetShards := strings.Split(tc.targetShards, ",") if tc.typ == "Reshard" { require.NoError(t, vc.AddShards(t, cells, tks, tc.targetShards, 0, 0, tc.tabletBaseID, targetKsOpts)) - for _, shard := range arrTargetShards { - require.NoError(t, cluster.WaitForHealthyShard(vc.VtctldClient, tc.targetKs, shard)) - } + } ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) + statsShard := arrTargetShards[0] + statsTablet := vc.getPrimaryTablet(t, tc.targetKs, statsShard) var args []string args = append(args, tc.typ, "--") args = append(args, "--source", tc.sourceKs) if tc.typ == "Reshard" { args = append(args, "--source_shards", tc.sourceShards, "--target_shards", tc.targetShards) } + allCellNames := getCellNames(nil) args = append(args, "--cells", allCellNames) args = append(args, "--tables", tc.tables) args = append(args, "Create") @@ -182,12 +225,75 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, err := vc.VtctlClient.ExecuteCommand(args...) require.NoError(t, err) - for _, shard := range arrTargetShards { - tab := vc.getPrimaryTablet(t, tc.targetKs, shard) - catchup(t, tab, tc.workflow, tc.typ) + waitForShardsToCatchup := func() { + for _, shard := range arrTargetShards { + tab := vc.getPrimaryTablet(t, tc.targetKs, shard) + catchup(t, tab, tc.workflow, tc.typ) + } + } + + // Wait for the workflow to finish the copy phase and initially catch up. + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForShardsToCatchup() + + if diffDuration, ok := tc.extraVDiffFlags["--max-diff-duration"]; ok { + if !strings.Contains(tc.tables, "customer") { + require.Fail(t, "customer table must be included in the table list to test --max-diff-duration") + } + // Generate enough customer table data so that the table diff gets restarted. + dur, err := time.ParseDuration(diffDuration) + require.NoError(t, err, "could not parse --max-diff-duration %q: %v", diffDuration, err) + seconds := int64(dur.Seconds()) + chunkSize := int64(100000) + // Take the test host/runner vCPU count into account when generating rows. + perVCpuCount := int64(100000) + // Cap it at 1M rows per second so that we will create betweeen 100,000 and 1,000,000 + // rows for each second in the diff duration, depending on the test host vCPU count. + perSecondCount := int64(math.Min(float64(perVCpuCount*int64(runtime.NumCPU())), 1000000)) + totalRowsToCreate := seconds * perSecondCount + log.Infof("Test host has %d vCPUs. Generating %d rows in the customer table to test --max-diff-duration", runtime.NumCPU(), totalRowsToCreate) + for i := int64(0); i < totalRowsToCreate; i += chunkSize { + generateMoreCustomers(t, sourceKs, chunkSize) + } + + // Wait for the workflow to catch up after all the inserts. + waitForShardsToCatchup() + + // This flag is only implemented in vtctldclient. + doVtctldclientVDiff(t, tc.targetKs, tc.workflow, allCellNames, nil, "--max-diff-duration", diffDuration) + + // Confirm that the customer table diff was restarted but not others. + tablet := vc.getPrimaryTablet(t, tc.targetKs, arrTargetShards[0]) + stat, err := getDebugVar(t, tablet.Port, []string{"VDiffRestartedTableDiffsCount"}) + require.NoError(t, err, "failed to get VDiffRestartedTableDiffsCount stat: %v", err) + customerRestarts := gjson.Parse(stat).Get("customer").Int() + require.Greater(t, customerRestarts, int64(0), "expected VDiffRestartedTableDiffsCount stat to be greater than 0 for the customer table, got %d", customerRestarts) + leadRestarts := gjson.Parse(stat).Get("lead").Int() + require.Equal(t, int64(0), leadRestarts, "expected VDiffRestartedTableDiffsCount stat to be 0 for the Lead table, got %d", leadRestarts) + + // Cleanup the created customer records so as not to slow down the rest of the test. + delstmt := fmt.Sprintf("delete from %s.customer order by cid desc limit %d", sourceKs, chunkSize) + for i := int64(0); i < totalRowsToCreate; i += chunkSize { + _, err := vtgateConn.ExecuteFetch(delstmt, int(chunkSize), false) + require.NoError(t, err, "failed to cleanup added customer records: %v", err) + } + // Wait for the workflow to catch up again on the deletes. + waitForShardsToCatchup() + tc.vdiffCount++ // We only did vtctldclient vdiff create + } else { + vdiff(t, tc.targetKs, tc.workflow, allCellNames, true, true, nil) + tc.vdiffCount += 2 // We did vtctlclient AND vtctldclient vdiff create } + checkVDiffCountStat(t, statsTablet, tc.vdiffCount) - vdiff(t, tc.targetKs, tc.workflow, allCellNames, true, true, nil) + // Confirm that the VDiffRowsCompared stat -- which is a running count of the rows + // compared by vdiff per table at the controller level -- works as expected. + vdrc, err := getDebugVar(t, statsTablet.Port, []string{"VDiffRowsCompared"}) + require.NoError(t, err, "failed to get VDiffRowsCompared stat from %s-%d tablet: %v", statsTablet.Cell, statsTablet.TabletUID, err) + uuid, jsout := performVDiff2Action(t, false, ksWorkflow, allCellNames, "show", "last", false, "--verbose") + expect := gjson.Get(jsout, fmt.Sprintf("Reports.customer.%s", statsShard)).Int() + got := gjson.Get(vdrc, fmt.Sprintf("%s.%s.%s", tc.workflow, uuid, "customer")).Int() + require.Equal(t, expect, got, "expected VDiffRowsCompared stat to be %d, but got %d", expect, got) if tc.autoRetryError { testAutoRetryError(t, tc, allCellNames) @@ -197,31 +303,47 @@ func testWorkflow(t *testing.T, vc *VitessCluster, tc *testCase, tks *Keyspace, testResume(t, tc, allCellNames) } - // These are done here so that we have a valid workflow to test the commands against + checkVDiffCountStat(t, statsTablet, tc.vdiffCount) + + // These are done here so that we have a valid workflow to test the commands against. if tc.stop { testStop(t, ksWorkflow, allCellNames) + tc.vdiffCount++ // We did either vtctlclient OR vtctldclient vdiff create } if tc.testCLICreateWait { testCLICreateWait(t, ksWorkflow, allCellNames) + tc.vdiffCount++ // We did either vtctlclient OR vtctldclient vdiff create } if tc.testCLIErrors { testCLIErrors(t, ksWorkflow, allCellNames) } + if tc.testCLIFlagHandling { + testCLIFlagHandling(t, tc.targetKs, tc.workflow, cells[0]) + tc.vdiffCount++ // We did either vtctlclient OR vtctldclient vdiff create + } + + checkVDiffCountStat(t, statsTablet, tc.vdiffCount) testDelete(t, ksWorkflow, allCellNames) + tc.vdiffCount = 0 // All vdiffs are deleted, so reset the count and check + checkVDiffCountStat(t, statsTablet, tc.vdiffCount) - // create another VDiff record to confirm it gets deleted when the workflow is completed + // Create another VDiff record to confirm it gets deleted when the workflow is completed. ts := time.Now() - uuid, _ := performVDiff2Action(t, false, ksWorkflow, allCellNames, "create", "", false) + uuid, _ = performVDiff2Action(t, false, ksWorkflow, allCellNames, "create", "", false) waitForVDiff2ToComplete(t, false, ksWorkflow, allCellNames, uuid, ts) + tc.vdiffCount++ + checkVDiffCountStat(t, statsTablet, tc.vdiffCount) err = vc.VtctlClient.ExecuteCommand(tc.typ, "--", "SwitchTraffic", ksWorkflow) require.NoError(t, err) err = vc.VtctlClient.ExecuteCommand(tc.typ, "--", "Complete", ksWorkflow) require.NoError(t, err) - // confirm the VDiff data is deleted for the workflow + // Confirm the VDiff data is deleted for the workflow. testNoOrphanedData(t, tc.targetKs, tc.workflow, arrTargetShards) + tc.vdiffCount = 0 // All vdiffs are deleted, so reset the count and check + checkVDiffCountStat(t, statsTablet, tc.vdiffCount) } func testCLIErrors(t *testing.T, ksWorkflow, cells string) { @@ -242,6 +364,70 @@ func testCLIErrors(t *testing.T, ksWorkflow, cells string) { }) } +// testCLIFlagHandling tests that the vtctldclient CLI flags are handled correctly +// from vtctldclient->vtctld->vttablet->mysqld. +func testCLIFlagHandling(t *testing.T, targetKs, workflowName string, cell *Cell) { + expectedOptions := &tabletmanagerdatapb.VDiffOptions{ + CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{ + MaxRows: 999, + MaxExtraRowsToCompare: 777, + AutoRetry: true, + UpdateTableStats: true, + TimeoutSeconds: 60, + MaxDiffSeconds: 333, + }, + PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{ + SourceCell: "zone1,zone2,zone3,zonefoosource", + TargetCell: "zone1,zone2,zone3,zonefootarget", + TabletTypes: "replica,primary,rdonly", + }, + ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{ + MaxSampleRows: 888, + OnlyPks: true, + }, + } + + t.Run("Client flag handling", func(t *testing.T) { + res, err := vc.VtctldClient.ExecuteCommandWithOutput("vdiff", "--target-keyspace", targetKs, "--workflow", workflowName, + "create", + "--limit", fmt.Sprintf("%d", expectedOptions.CoreOptions.MaxRows), + "--max-report-sample-rows", fmt.Sprintf("%d", expectedOptions.ReportOptions.MaxSampleRows), + "--max-extra-rows-to-compare", fmt.Sprintf("%d", expectedOptions.CoreOptions.MaxExtraRowsToCompare), + "--filtered-replication-wait-time", fmt.Sprintf("%v", time.Duration(expectedOptions.CoreOptions.TimeoutSeconds)*time.Second), + "--max-diff-duration", fmt.Sprintf("%v", time.Duration(expectedOptions.CoreOptions.MaxDiffSeconds)*time.Second), + "--source-cells", expectedOptions.PickerOptions.SourceCell, + "--target-cells", expectedOptions.PickerOptions.TargetCell, + "--tablet-types", expectedOptions.PickerOptions.TabletTypes, + fmt.Sprintf("--update-table-stats=%t", expectedOptions.CoreOptions.UpdateTableStats), + fmt.Sprintf("--auto-retry=%t", expectedOptions.CoreOptions.AutoRetry), + fmt.Sprintf("--only-pks=%t", expectedOptions.ReportOptions.OnlyPks), + "--tablet-types-in-preference-order=false", // So tablet_types should not start with "in_order:", which is the default + "--format=json") // So we can easily grab the UUID + require.NoError(t, err, "vdiff command failed: %s", res) + jsonRes := gjson.Parse(res) + vduuid, err := uuid.Parse(jsonRes.Get("UUID").String()) + require.NoError(t, err, "invalid UUID: %s", jsonRes.Get("UUID").String()) + + // Confirm that the options were passed through and saved correctly. + query := sqlparser.BuildParsedQuery("select options from %s.vdiff where vdiff_uuid = %s", + sidecarDBIdentifier, encodeString(vduuid.String())).Query + tablets := vc.getVttabletsInKeyspace(t, cell, targetKs, "PRIMARY") + require.Greater(t, len(tablets), 0, "no primary tablets found in keyspace %s", targetKs) + tablet := maps.Values(tablets)[0] + qres, err := tablet.QueryTablet(query, targetKs, false) + require.NoError(t, err, "query %q failed: %v", query, err) + require.NotNil(t, qres, "query %q returned nil result", query) // Should never happen + require.Equal(t, 1, len(qres.Rows), "query %q returned %d rows, expected 1", query, len(qres.Rows)) + require.Equal(t, 1, len(qres.Rows[0]), "query %q returned %d columns, expected 1", query, len(qres.Rows[0])) + storedOptions := &tabletmanagerdatapb.VDiffOptions{} + bytes, err := qres.Rows[0][0].ToBytes() + require.NoError(t, err, "failed to convert result %+v to bytes: %v", qres.Rows[0], err) + err = protojson.Unmarshal(bytes, storedOptions) + require.NoError(t, err, "failed to unmarshal result %s to a %T: %v", string(bytes), storedOptions, err) + require.True(t, proto.Equal(expectedOptions, storedOptions), "stored options %v != expected options %v", storedOptions, expectedOptions) + }) +} + func testDelete(t *testing.T, ksWorkflow, cells string) { t.Run("Delete", func(t *testing.T) { // Let's be sure that we have at least 3 unique VDiffs. @@ -302,15 +488,17 @@ func testNoOrphanedData(t *testing.T, keyspace, workflow string, shards []string func testResume(t *testing.T, tc *testCase, cells string) { t.Run("Resume", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) - // confirm the last VDiff is in the expected completed state + // Confirm the last VDiff is in the expected completed state. uuid, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false) jsonOutput := getVDiffInfo(output) require.Equal(t, "completed", jsonOutput.State) - // save the number of rows compared in previous runs + // Save the number of rows compared in previous runs. rowsCompared := jsonOutput.RowsCompared - ogTime := time.Now() // the completed_at should be later than this after resuming + ogTime := time.Now() // The completed_at should be later than this after resuming expectedNewRows := int64(0) if tc.resumeInsert != "" { @@ -323,6 +511,7 @@ func testResume(t *testing.T, tc *testCase, cells string) { // expected number of rows in total (original run and resume) _, _ = performVDiff2Action(t, false, ksWorkflow, cells, "resume", uuid, false) info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, ogTime) + require.NotNil(t, info) require.False(t, info.HasMismatch) require.Equal(t, expectedRows, info.RowsCompared) }) @@ -344,18 +533,20 @@ func testStop(t *testing.T, ksWorkflow, cells string) { func testAutoRetryError(t *testing.T, tc *testCase, cells string) { t.Run("Auto retry on error", func(t *testing.T) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() ksWorkflow := fmt.Sprintf("%s.%s", tc.targetKs, tc.workflow) - // confirm the last VDiff is in the expected completed state + // Confirm the last VDiff is in the expected completed state. uuid, output := performVDiff2Action(t, false, ksWorkflow, cells, "show", "last", false) jsonOutput := getVDiffInfo(output) require.Equal(t, "completed", jsonOutput.State) - // save the number of rows compared in the first run + // Save the number of rows compared in the first run. rowsCompared := jsonOutput.RowsCompared - ogTime := time.Now() // the completed_at should be later than this upon retry + ogTime := time.Now() // The completed_at should be later than this upon retry - // create new data since original VDiff run -- if requested -- to confirm that the rows - // compared is cumulative + // Create new data since original VDiff run -- if requested -- to confirm that the rows + // compared is cumulative. expectedNewRows := int64(0) if tc.retryInsert != "" { res := execVtgateQuery(t, vtgateConn, tc.sourceKs, tc.retryInsert) @@ -363,18 +554,19 @@ func testAutoRetryError(t *testing.T, tc *testCase, cells string) { } expectedRows := rowsCompared + expectedNewRows - // update the VDiff to simulate an ephemeral error having occurred + // Update the VDiff to simulate an ephemeral error having occurred. for _, shard := range strings.Split(tc.targetShards, ",") { tab := vc.getPrimaryTablet(t, tc.targetKs, shard) res, err := tab.QueryTabletWithDB(sqlparser.BuildParsedQuery(sqlSimulateError, sidecarDBIdentifier, sidecarDBIdentifier, encodeString(uuid)).Query, "vt_"+tc.targetKs) require.NoError(t, err) - // should have updated the vdiff record and at least one vdiff_table record + // Should have updated the vdiff record and at least one vdiff_table record. require.GreaterOrEqual(t, int(res.RowsAffected), 2) } - // confirm that the VDiff was retried, able to complete, and we compared the expected - // number of rows in total (original run and retry) + // Confirm that the VDiff was retried, able to complete, and we compared the expected + // number of rows in total (original run and retry). info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, ogTime) + require.NotNil(t, info) require.False(t, info.HasMismatch) require.Equal(t, expectedRows, info.RowsCompared) }) diff --git a/go/test/endtoend/vreplication/vdiff_helper_test.go b/go/test/endtoend/vreplication/vdiff_helper_test.go index 38ae9273a42..53e19e56731 100644 --- a/go/test/endtoend/vreplication/vdiff_helper_test.go +++ b/go/test/endtoend/vreplication/vdiff_helper_test.go @@ -17,6 +17,7 @@ limitations under the License. package vreplication import ( + "context" "fmt" "strings" "testing" @@ -27,11 +28,15 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" ) const ( - vdiffTimeout = time.Second * 90 // we can leverage auto retry on error with this longer-than-usual timeout + vdiffTimeout = 120 * time.Second // We can leverage auto retry on error with this longer-than-usual timeout + vdiffRetryTimeout = 30 * time.Second + vdiffStatusCheckInterval = 5 * time.Second + vdiffRetryInterval = 5 * time.Second ) var ( @@ -66,6 +71,7 @@ func doVtctlclientVDiff(t *testing.T, keyspace, workflow, cells string, want *ex // update-table-stats is needed in order to test progress reports. uuid, _ := performVDiff2Action(t, true, ksWorkflow, cells, "create", "", false, "--auto-retry", "--update-table-stats") info := waitForVDiff2ToComplete(t, true, ksWorkflow, cells, uuid, time.Time{}) + require.NotNil(t, info) require.Equal(t, workflow, info.Workflow) require.Equal(t, keyspace, info.Keyspace) if want != nil { @@ -75,6 +81,7 @@ func doVtctlclientVDiff(t *testing.T, keyspace, workflow, cells string, want *ex } else { require.Equal(t, "completed", info.State, "vdiff results: %+v", info) require.False(t, info.HasMismatch, "vdiff results: %+v", info) + require.NotZero(t, info.RowsCompared) } if strings.Contains(t.Name(), "AcrossDBVersions") { log.Errorf("VDiff resume cannot be guaranteed between major MySQL versions due to implied collation differences, skipping resume test...") @@ -85,14 +92,16 @@ func doVtctlclientVDiff(t *testing.T, keyspace, workflow, cells string, want *ex func waitForVDiff2ToComplete(t *testing.T, useVtctlclient bool, ksWorkflow, cells, uuid string, completedAtMin time.Time) *vdiffInfo { var info *vdiffInfo + var jsonStr string first := true previousProgress := vdiff2.ProgressReport{} ch := make(chan bool) go func() { for { - time.Sleep(1 * time.Second) - _, jsonStr := performVDiff2Action(t, useVtctlclient, ksWorkflow, cells, "show", uuid, false) + time.Sleep(vdiffStatusCheckInterval) + _, jsonStr = performVDiff2Action(t, useVtctlclient, ksWorkflow, cells, "show", uuid, false) info = getVDiffInfo(jsonStr) + require.NotNil(t, info) if info.State == "completed" { if !completedAtMin.IsZero() { ca := info.CompletedAt @@ -103,7 +112,7 @@ func waitForVDiff2ToComplete(t *testing.T, useVtctlclient bool, ksWorkflow, cell } ch <- true return - } else if info.State == "started" { // test the progress report + } else if info.State == "started" { // Test the progress report // The ETA should always be in the future -- when we're able to estimate // it -- and the progress percentage should only increase. // The timestamp format allows us to compare them lexicographically. @@ -136,30 +145,38 @@ func waitForVDiff2ToComplete(t *testing.T, useVtctlclient bool, ksWorkflow, cell case <-ch: return info case <-time.After(vdiffTimeout): + log.Errorf("VDiff never completed for UUID %s. Latest output: %s", uuid, jsonStr) require.FailNow(t, fmt.Sprintf("VDiff never completed for UUID %s", uuid)) return nil } } type expectedVDiff2Result struct { - state string - shards []string - hasMismatch bool + state string + shards []string + hasMismatch bool + minimumRowsCompared int64 } -func doVtctldclientVDiff(t *testing.T, keyspace, workflow, cells string, want *expectedVDiff2Result) { +func doVtctldclientVDiff(t *testing.T, keyspace, workflow, cells string, want *expectedVDiff2Result, extraFlags ...string) { ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflow) t.Run(fmt.Sprintf("vtctldclient vdiff %s", ksWorkflow), func(t *testing.T) { // update-table-stats is needed in order to test progress reports. - uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false, "--auto-retry", "--update-table-stats") + flags := []string{"--auto-retry", "--update-table-stats"} + if len(extraFlags) > 0 { + flags = append(flags, extraFlags...) + } + uuid, _ := performVDiff2Action(t, false, ksWorkflow, cells, "create", "", false, flags...) info := waitForVDiff2ToComplete(t, false, ksWorkflow, cells, uuid, time.Time{}) - + require.NotNil(t, info) require.Equal(t, workflow, info.Workflow) require.Equal(t, keyspace, info.Keyspace) if want != nil { require.Equal(t, want.state, info.State) require.Equal(t, strings.Join(want.shards, ","), info.Shards) require.Equal(t, want.hasMismatch, info.HasMismatch) + require.GreaterOrEqual(t, info.RowsCompared, want.minimumRowsCompared, + "not enough rows compared: want at least %d, got %d", want.minimumRowsCompared, info.RowsCompared) } else { require.Equal(t, "completed", info.State, "vdiff results: %+v", info) require.False(t, info.HasMismatch, "vdiff results: %+v", info) @@ -175,7 +192,7 @@ func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, a var err error targetKeyspace, workflowName, ok := strings.Cut(ksWorkflow, ".") require.True(t, ok, "invalid keyspace.workflow value: %s", ksWorkflow) - + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) if useVtctlclient { // This will always result in us using a PRIMARY tablet, which is all // we start in many e2e tests, but it avoids the tablet picker logic @@ -186,7 +203,7 @@ func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, a args = append(args, extraFlags...) } args = append(args, ksWorkflow, action, actionArg) - output, err = vc.VtctlClient.ExecuteCommandWithOutput(args...) + output, err = execVDiffWithRetry(t, expectError, false, args) log.Infof("vdiff output: %+v (err: %+v)", output, err) if !expectError { require.Nil(t, err) @@ -211,7 +228,8 @@ func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, a if actionArg != "" { args = append(args, actionArg) } - output, err = vc.VtctldClient.ExecuteCommandWithOutput(args...) + + output, err = execVDiffWithRetry(t, expectError, true, args) log.Infof("vdiff output: %+v (err: %+v)", output, err) if !expectError { require.NoError(t, err) @@ -226,6 +244,79 @@ func performVDiff2Action(t *testing.T, useVtctlclient bool, ksWorkflow, cells, a return uuid, output } +// During SwitchTraffic, due to changes in the cluster, vdiff can return transient errors. isVDiffRetryable() is used to +// ignore such errors and retry vdiff expecting the condition to be resolved. +func isVDiffRetryable(str string) bool { + for _, s := range []string{"Error while dialing", "failed to connect"} { + if strings.Contains(str, s) { + return true + } + } + return false +} + +type vdiffResult struct { + output string + err error +} + +// execVDiffWithRetry will ignore transient errors that can occur during workflow state changes. +func execVDiffWithRetry(t *testing.T, expectError bool, useVtctldClient bool, args []string) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), vdiffRetryTimeout) + defer cancel() + vdiffResultCh := make(chan vdiffResult) + go func() { + var output string + var err error + retry := false + for { + select { + case <-ctx.Done(): + return + default: + } + if retry { + time.Sleep(vdiffRetryInterval) + } + retry = false + if useVtctldClient { + output, err = vc.VtctldClient.ExecuteCommandWithOutput(args...) + } else { + output, err = vc.VtctlClient.ExecuteCommandWithOutput(args...) + } + if err != nil { + if expectError { + result := vdiffResult{output: output, err: err} + vdiffResultCh <- result + return + } + log.Infof("vdiff error: %s", err) + if isVDiffRetryable(err.Error()) { + retry = true + } else { + result := vdiffResult{output: output, err: err} + vdiffResultCh <- result + return + } + } + if isVDiffRetryable(output) { + retry = true + } + if !retry { + result := vdiffResult{output: output, err: nil} + vdiffResultCh <- result + return + } + } + }() + select { + case <-ctx.Done(): + return "", fmt.Errorf("timed out waiting for vdiff to complete") + case result := <-vdiffResultCh: + return result.output, result.err + } +} + type vdiffInfo struct { Workflow, Keyspace string State, Shards string @@ -260,6 +351,8 @@ func encodeString(in string) string { // generateMoreCustomers creates additional test data for better tests // when needed. func generateMoreCustomers(t *testing.T, keyspace string, numCustomers int64) { + vtgateConn, closeConn := getVTGateConn() + defer closeConn() log.Infof("Generating more test data with an additional %d customers", numCustomers) res := execVtgateQuery(t, vtgateConn, keyspace, "select max(cid) from customer") startingID, _ := res.Rows[0][0].ToInt64() diff --git a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go index 0f6a9f668d0..a4c25941801 100644 --- a/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go +++ b/go/test/endtoend/vreplication/vdiff_multiple_movetables_test.go @@ -27,43 +27,26 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) func TestMultipleConcurrentVDiffs(t *testing.T) { - cellName := "zone" - cells := []string{cellName} - vc = NewVitessCluster(t, t.Name(), cells, mainClusterConfig) - - require.NotNil(t, vc) - allCellNames = cellName - defaultCellName := cellName - defaultCell = vc.Cells[defaultCellName] + cellName := "zone1" + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + sourceKeyspace := "product" shardName := "0" - defer vc.TearDown(t) - cell := vc.Cells[cellName] vc.AddKeyspace(t, []*Cell{cell}, sourceKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, 100, sourceKsOpts) - vtgate = cell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKeyspace, shardName) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", sourceKeyspace, shardName), 1, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) - insertInitialData(t) targetTabletId := 200 targetKeyspace := "customer" vc.AddKeyspace(t, []*Cell{cell}, targetKeyspace, shardName, initialProductVSchema, initialProductSchema, 0, 0, targetTabletId, sourceKsOpts) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", targetKeyspace, shardName), 1, 30*time.Second) index := 1000 var loadCtx context.Context @@ -93,12 +76,16 @@ func TestMultipleConcurrentVDiffs(t *testing.T) { time.Sleep(15 * time.Second) // wait for some rows to be inserted. createWorkflow := func(workflowName, tables string) { - mt := newMoveTables(vc, &moveTables{ - workflowName: workflowName, - targetKeyspace: targetKeyspace, + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + tabletTypes: "primary", + }, sourceKeyspace: sourceKeyspace, tables: tables, - }, moveTablesFlavorVtctld) + }, workflowFlavorVtctld) mt.Create() waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", targetKeyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) catchup(t, targetTab, workflowName, "MoveTables") diff --git a/go/test/endtoend/vreplication/vdiff_online_ddl_test.go b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go new file mode 100644 index 00000000000..92977111294 --- /dev/null +++ b/go/test/endtoend/vreplication/vdiff_online_ddl_test.go @@ -0,0 +1,163 @@ +package vreplication + +import ( + "context" + "fmt" + "strconv" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/encoding/protojson" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/proto/vtctldata" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +// TestOnlineDDLVDiff is to run a vdiff on a table that is part of an OnlineDDL workflow. +func TestOnlineDDLVDiff(t *testing.T) { + setSidecarDBName("_vt") + originalRdonly := defaultRdonly + originalReplicas := defaultReplicas + defaultRdonly = 0 + defaultReplicas = 0 + defer func() { + defaultRdonly = originalRdonly + defaultReplicas = originalReplicas + }() + + vc = setupMinimalCluster(t) + defer vc.TearDown() + keyspace := "product" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + createQuery := "create table temp (id int, name varchar(100), blb blob, primary key (id))" + dropQuery := "drop table temp" + alterQuery := "alter table temp add column extra1 int not null default 0" + insertTemplate := "insert into temp (id, name, blb) values (%d, 'name%d', 'blb%d')" + updateTemplate := "update temp set name = 'name_%d' where id = %d" + execOnlineDDL(t, "direct", keyspace, createQuery) + defer execOnlineDDL(t, "direct", keyspace, dropQuery) + + var output string + + t.Run("OnlineDDL VDiff", func(t *testing.T) { + var done = make(chan bool) + go populate(ctx, t, done, insertTemplate, updateTemplate) + + waitForAdditionalRows(t, keyspace, "temp", 100) + output = execOnlineDDL(t, "vitess --postpone-completion", keyspace, alterQuery) + uuid := strings.TrimSpace(output) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, uuid), binlogdatapb.VReplicationWorkflowState_Running.String()) + waitForAdditionalRows(t, keyspace, "temp", 200) + + require.NoError(t, waitForCondition("online ddl migration to be ready to complete", func() bool { + response := onlineDDLShow(t, keyspace, uuid) + if len(response.Migrations) > 0 && + response.Migrations[0].ReadyToComplete == true { + return true + } + return false + }, defaultTimeout)) + + want := &expectedVDiff2Result{ + state: "completed", + minimumRowsCompared: 200, + hasMismatch: false, + shards: []string{"0"}, + } + doVtctldclientVDiff(t, keyspace, uuid, "zone1", want) + + cancel() + <-done + }) +} + +func onlineDDLShow(t *testing.T, keyspace, uuid string) *vtctldata.GetSchemaMigrationsResponse { + var response vtctldata.GetSchemaMigrationsResponse + output, err := vc.VtctldClient.OnlineDDLShow(keyspace, uuid) + require.NoError(t, err, output) + err = protojson.Unmarshal([]byte(output), &response) + require.NoErrorf(t, err, "error unmarshalling OnlineDDL showresponse") + return &response +} + +func execOnlineDDL(t *testing.T, strategy, keyspace, query string) string { + output, err := vc.VtctldClient.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", strategy, "--sql", query, keyspace) + require.NoError(t, err, output) + uuid := strings.TrimSpace(output) + if strategy != "direct" { + err = waitForCondition("online ddl to start", func() bool { + response := onlineDDLShow(t, keyspace, uuid) + if len(response.Migrations) > 0 && + (response.Migrations[0].Status == vtctldata.SchemaMigration_RUNNING || + response.Migrations[0].Status == vtctldata.SchemaMigration_COMPLETE) { + return true + } + return false + }, defaultTimeout) + require.NoError(t, err) + // The online ddl migration is set to SchemaMigration_RUNNING before it creates the + // _vt.vreplication records. Hence wait for the vreplication workflow to be created as well. + waitForWorkflowToBeCreated(t, vc, fmt.Sprintf("%s.%s", keyspace, uuid)) + } + return uuid +} + +func waitForAdditionalRows(t *testing.T, keyspace, table string, count int) { + vtgateConn, cancel := getVTGateConn() + defer cancel() + + numRowsStart := getNumRows(t, vtgateConn, keyspace, table) + numRows := 0 + shortCtx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + for { + switch { + case shortCtx.Err() != nil: + require.FailNowf(t, "Timed out waiting for additional rows", "wanted %d rows, got %d rows", count, numRows) + default: + numRows = getNumRows(t, vtgateConn, keyspace, table) + if numRows >= numRowsStart+count { + return + } + time.Sleep(defaultTick) + } + } +} + +func getNumRows(t *testing.T, vtgateConn *mysql.Conn, keyspace, table string) int { + qr := execVtgateQuery(t, vtgateConn, keyspace, fmt.Sprintf("SELECT COUNT(*) FROM %s", table)) + require.NotNil(t, qr) + numRows, err := strconv.Atoi(qr.Rows[0][0].ToString()) + require.NoError(t, err) + return numRows +} + +func populate(ctx context.Context, t *testing.T, done chan bool, insertTemplate, updateTemplate string) { + defer close(done) + vtgateConn, closeConn := getVTGateConn() + defer closeConn() + id := 1 + for { + select { + case <-ctx.Done(): + log.Infof("load cancelled") + return + default: + query := fmt.Sprintf(insertTemplate, id, id, id) + _, err := vtgateConn.ExecuteFetch(query, 1, false) + require.NoErrorf(t, err, "error in insert") + query = fmt.Sprintf(updateTemplate, id, id) + _, err = vtgateConn.ExecuteFetch(query, 1, false) + require.NoErrorf(t, err, "error in update") + id++ + time.Sleep(10 * time.Millisecond) + } + } +} diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 62d174df067..c06489006f8 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -22,42 +22,38 @@ import ( "io" "net/http" "runtime" + "strconv" "strings" "sync" "testing" "time" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/vtgateconn" - + "github.com/buger/jsonparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" - "github.com/buger/jsonparser" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/log" - querypb "vitess.io/vitess/go/vt/proto/query" - throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" ) var ( vc *VitessCluster - vtgate *cluster.VtgateProcess - defaultCell *Cell - vtgateConn *mysql.Conn defaultRdonly int defaultReplicas int - allCellNames string sourceKsOpts = make(map[string]string) targetKsOpts = make(map[string]string) httpClient = throttlebase.SetupHTTPClient(time.Second) @@ -122,15 +118,16 @@ func throttlerCheckSelf(tablet *cluster.VttabletProcess, throttlerApp throttlera // NOTE: this is a manual test. It is not executed in the // CI. func TestVReplicationDDLHandling(t *testing.T) { + var err error workflow := "onddl_test" ksWorkflow := fmt.Sprintf("%s.%s", targetKs, workflow) table := "orders" newColumn := "ddltest" cell := "zone1" shard := "0" - vc = NewVitessCluster(t, t.Name(), []string{cell}, mainClusterConfig) - defer vc.TearDown(t) - defaultCell = vc.Cells[cell] + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + defaultCell := vc.Cells[cell] if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, shard, initialProductVSchema, initialProductSchema, 0, 0, 100, nil); err != nil { t.Fatal(err) @@ -138,15 +135,12 @@ func TestVReplicationDDLHandling(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) + verifyClusterHealth(t, vc) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) targetTab := vc.getPrimaryTablet(t, targetKs, shard) @@ -163,8 +157,25 @@ func TestVReplicationDDLHandling(t *testing.T) { checkColQueryTarget := fmt.Sprintf("select count(column_name) from information_schema.columns where table_schema='vt_%s' and table_name='%s' and column_name='%s'", targetKs, table, newColumn) + // expectedAction is the specific action, e.g. ignore, that should have a count of 1. All other + // actions should have a count of 0. id is the stream ID to check. + checkOnDDLStats := func(expectedAction binlogdatapb.OnDDLAction, id int) { + jsVal, err := getDebugVar(t, targetTab.Port, []string{"VReplicationDDLActions"}) + require.NoError(t, err) + require.NotEqual(t, "{}", jsVal) + // The JSON values look like this: {"onddl_test.3.IGNORE": 1} + for _, action := range binlogdatapb.OnDDLAction_name { + count := gjson.Get(jsVal, fmt.Sprintf(`%s\.%d\.%s`, workflow, id, action)).Int() + expectedCount := int64(0) + if action == expectedAction.String() { + expectedCount = 1 + } + require.Equal(t, expectedCount, count, "expected %s stat counter of %d but got %d, full value: %s", action, expectedCount, count, jsVal) + } + } + // Test IGNORE behavior - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl=IGNORE") + moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_IGNORE.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Add new col on source @@ -176,8 +187,10 @@ func TestVReplicationDDLHandling(t *testing.T) { waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") // Confirm new col does exist on source waitForQueryResult(t, vtgateConn, sourceKs, checkColQuerySource, "[[INT64(1)]]") - // Also test Cancel --keep_routing_rules - moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table, "--keep_routing_rules") + // Confirm that we updated the stats on the target tablet as expected. + checkOnDDLStats(binlogdatapb.OnDDLAction_IGNORE, 1) + // Also test Cancel --keep-routing-rules + moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table, "--keep-routing-rules") // Confirm that the routing rules were NOT cleared rr, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules") require.NoError(t, err) @@ -194,7 +207,7 @@ func TestVReplicationDDLHandling(t *testing.T) { require.NoError(t, err, "error executing %q: %v", dropColDDL, err) // Test STOP behavior (new col now exists nowhere) - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl=STOP") + moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_STOP.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Add new col on the source @@ -204,10 +217,12 @@ func TestVReplicationDDLHandling(t *testing.T) { waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String(), fmt.Sprintf("Message==Stopped at DDL %s", addColDDL)) // Confirm that the target does not have new col waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") + // Confirm that we updated the stats on the target tablet as expected. + checkOnDDLStats(binlogdatapb.OnDDLAction_STOP, 2) moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) // Test EXEC behavior (new col now exists on source) - moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl=EXEC") + moveTablesAction(t, "Create", defaultCellName, workflow, sourceKs, targetKs, table, "--on-ddl", binlogdatapb.OnDDLAction_EXEC.String()) // Wait until we get through the copy phase... catchup(t, targetTab, workflow, "MoveTables") // Confirm target has new col from copy phase @@ -219,7 +234,8 @@ func TestVReplicationDDLHandling(t *testing.T) { waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) // Confirm new col was dropped on target waitForQueryResult(t, vtgateConn, targetKs, checkColQueryTarget, "[[INT64(0)]]") - moveTablesAction(t, "Cancel", defaultCellName, workflow, sourceKs, targetKs, table) + // Confirm that we updated the stats on the target tablet as expected. + checkOnDDLStats(binlogdatapb.OnDDLAction_EXEC, 3) } // TestVreplicationCopyThrottling tests the logic that is used @@ -234,15 +250,15 @@ func TestVreplicationCopyThrottling(t *testing.T) { cell := "zone1" table := "customer" shard := "0" - vc = NewVitessCluster(t, "TestVreplicationCopyThrottling", []string{cell}, mainClusterConfig) - defer vc.TearDown(t) - defaultCell = vc.Cells[cell] + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + defaultCell := vc.Cells[cell] // To test vstreamer source throttling for the MoveTables operation maxSourceTrxHistory := int64(5) extraVTTabletArgs = []string{ // We rely on holding open transactions to generate innodb history so extend the timeout // to avoid flakiness when the CI is very slow. - fmt.Sprintf("--queryserver-config-transaction-timeout=%d", int64(defaultTimeout.Seconds())*3), + fmt.Sprintf("--queryserver-config-transaction-timeout=%s", (defaultTimeout * 3).String()), fmt.Sprintf("--vreplication_copy_phase_max_innodb_history_list_length=%d", maxSourceTrxHistory), parallelInsertWorkers, } @@ -253,12 +269,8 @@ func TestVreplicationCopyThrottling(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] + vtgate := defaultCell.Vtgates[0] require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) // Confirm that the initial copy table phase does not proceed until the source tablet(s) // have an InnoDB History List length that is less than specified in the tablet's config. @@ -280,6 +292,7 @@ func TestVreplicationCopyThrottling(t *testing.T) { } func TestBasicVreplicationWorkflow(t *testing.T) { + defer setAllVTTabletExperimentalFlags() sourceKsOpts["DBTypeVersion"] = "mysql-8.0" targetKsOpts["DBTypeVersion"] = "mysql-8.0" testBasicVreplicationWorkflow(t, "noblob") @@ -300,12 +313,10 @@ func testBasicVreplicationWorkflow(t *testing.T, binlogRowImage string) { // If limited == true, we only run a limited set of workflows. func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string) { + var err error defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", allCells, mainClusterConfig) - - require.NotNil(t, vc) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() // Keep the cluster processes minimal to deal with CI resource constraints defaultReplicas = 0 defaultRdonly = 0 @@ -315,16 +326,11 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) } - defer vc.TearDown(t) - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) insertInitialData(t) @@ -354,10 +360,13 @@ func testVreplicationWorkflows(t *testing.T, limited bool, binlogRowImage string insertMoreCustomers(t, 16) reshardCustomer2to4Split(t, nil, "") + confirmAllStreamsRunning(t, vtgateConn, "customer:-40") expectNumberOfStreams(t, vtgateConn, "Customer2to4", "sales", "product:0", 4) reshardCustomer3to2SplitMerge(t) + confirmAllStreamsRunning(t, vtgateConn, "customer:-60") expectNumberOfStreams(t, vtgateConn, "Customer3to2", "sales", "product:0", 3) reshardCustomer3to1Merge(t) + confirmAllStreamsRunning(t, vtgateConn, "customer:0") expectNumberOfStreams(t, vtgateConn, "Customer3to1", "sales", "product:0", 1) t.Run("Verify CopyState Is Optimized Afterwards", func(t *testing.T) { @@ -423,49 +432,14 @@ func TestMoveTablesMariaDBToMySQL(t *testing.T) { testVreplicationWorkflows(t, true /* only do MoveTables */, "") } -func TestMultiCellVreplicationWorkflow(t *testing.T) { - cells := []string{"zone1", "zone2"} - allCellNames = strings.Join(cells, ",") - - vc = NewVitessCluster(t, "TestMultiCellVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] - keyspace := "product" - shard := "0" - - defer vc.TearDown(t) - - cell1 := vc.Cells["zone1"] - cell2 := vc.Cells["zone2"] - vc.AddKeyspace(t, []*Cell{cell1, cell2}, keyspace, shard, initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, sourceKsOpts) - - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() - verifyClusterHealth(t, vc) - insertInitialData(t) - shardCustomer(t, true, []*Cell{cell1, cell2}, cell2.Name, true) - isTableInDenyList(t, vc, "product:0", "customer") - // we tag along this test so as not to create the overhead of creating another cluster - testVStreamCellFlag(t) -} - func TestVStreamFlushBinlog(t *testing.T) { defaultCellName := "zone1" - allCells := []string{defaultCellName} - allCellNames = defaultCellName workflow := "test_vstream_p2c" shard := "0" - vc = NewVitessCluster(t, "TestVStreamBinlogFlush", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) - defer vc.TearDown(t) - defaultCell = vc.Cells[defaultCellName] + defer vc.TearDown() + defaultCell := vc.Cells[defaultCellName] // Keep the cluster processes minimal (no rdonly and no replica tablets) // to deal with CI resource constraints. @@ -477,16 +451,8 @@ func TestVStreamFlushBinlog(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, shard, "", "", 0, 0, 200, nil); err != nil { t.Fatal(err) } - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, sourceKs, shard) - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, shard) - require.NoError(t, err) verifyClusterHealth(t, vc) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() sourceTab = vc.getPrimaryTablet(t, sourceKs, shard) insertInitialData(t) @@ -502,7 +468,9 @@ func TestVStreamFlushBinlog(t *testing.T) { // Generate a lot of binlog event bytes targetBinlogSize := vstreamer.GetBinlogRotationThreshold() + 1024 - vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := vc.GetVTGateConn(t) + defer vtgateConn.Close() + queryF := "insert into db_order_test (c_uuid, dbstuff, created_at) values ('%d', '%s', now())" for i := 100; i < 10000; i++ { randStr, err := randHex(6500) @@ -577,7 +545,7 @@ func testVStreamCellFlag(t *testing.T) { flags.CellPreference = "onlyspecified" } - ctx2, cancel := context.WithTimeout(ctx, 30*time.Second) + ctx2, cancel := context.WithTimeout(ctx, 10*time.Second) reader, err := conn.VStream(ctx2, topodatapb.TabletType_REPLICA, vgtid, filter, flags) require.NoError(t, err) @@ -621,21 +589,16 @@ func testVStreamCellFlag(t *testing.T) { // We also reuse the setup of this test to validate that the "vstream * from" vtgate query functionality is functional func TestCellAliasVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} - mainClusterConfig.vreplicationCompressGTID = true - defer func() { - mainClusterConfig.vreplicationCompressGTID = false - }() - vc = NewVitessCluster(t, "TestCellAliasVreplicationWorkflow", cells, mainClusterConfig) - require.NotNil(t, vc) - allCellNames = "zone1,zone2" - defaultCellName := "zone1" - defaultCell = vc.Cells[defaultCellName] + defer mainClusterConfig.enableGTIDCompression() + defer setAllVTTabletExperimentalFlags() + vc = NewVitessCluster(t, &clusterOptions{cells: cells}) + defer vc.TearDown() + keyspace := "product" shard := "0" require.NoError(t, utils.SetBinlogRowImageMode("noblob", vc.ClusterConfig.tmpDir)) defer utils.SetBinlogRowImageMode("", vc.ClusterConfig.tmpDir) - defer vc.TearDown(t) cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] @@ -645,26 +608,21 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { result, err := vc.VtctlClient.ExecuteCommandWithOutput("AddCellsAlias", "--", "--cells", "zone2", "alias") require.NoError(t, err, "command failed with output: %v", result) - vtgate = cell1.Vtgates[0] - require.NotNil(t, vtgate) - err = cluster.WaitForHealthyShard(vc.VtctldClient, keyspace, shard) - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspace, shard), 2, 30*time.Second) - - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) - insertInitialData(t) + vtgate := cell1.Vtgates[0] t.Run("VStreamFrom", func(t *testing.T) { - testVStreamFrom(t, keyspace, 2) + testVStreamFrom(t, vtgate, keyspace, 2) }) shardCustomer(t, true, []*Cell{cell1, cell2}, "alias", false) + isTableInDenyList(t, vc, "product:0", "customer") + // we tag along this test so as not to create the overhead of creating another cluster + testVStreamCellFlag(t) } // testVStreamFrom confirms that the "vstream * from" endpoint is serving data -func testVStreamFrom(t *testing.T, table string, expectedRowCount int) { +func testVStreamFrom(t *testing.T, vtgate *cluster.VtgateProcess, table string, expectedRowCount int) { ctx := context.Background() vtParams := mysql.ConnParams{ Host: "localhost", @@ -736,11 +694,6 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl if _, err := vc.AddKeyspace(t, cells, "customer", "-80,80-", customerVSchema, customerSchema, defaultReplicas, defaultRdonly, 200, targetKsOpts); err != nil { t.Fatal(err) } - err := cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, "-80") - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, targetKs, "80-") - require.NoError(t, err) - // Assume we are operating on first cell defaultCell := cells[0] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] @@ -760,7 +713,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl // The wait in the next code block which checks that customer.dec80 is updated, also confirms that the // blob-related dmls we execute here are vreplicated. insertIntoBlobTable(t) - + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() // Confirm that the 0 scale decimal field, dec80, is replicated correctly dec80Replicated := false execVtgateQuery(t, vtgateConn, sourceKs, "update customer set dec80 = 0") @@ -777,6 +731,12 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl } require.Equal(t, true, dec80Replicated) + // Insert multiple rows in the loadtest table and immediately delete them to confirm that bulk delete + // works the same way with the vplayer optimization enabled and disabled. Currently this optimization + // is disabled by default, but enabled in TestCellAliasVreplicationWorkflow. + execVtgateQuery(t, vtgateConn, sourceKs, "insert into loadtest(id, name) values(10001, 'tempCustomer'), (10002, 'tempCustomer2'), (10003, 'tempCustomer3'), (10004, 'tempCustomer4')") + execVtgateQuery(t, vtgateConn, sourceKs, "delete from loadtest where id > 10000") + // Confirm that all partial query metrics get updated when we are testing the noblob mode. t.Run("validate partial query counts", func(t *testing.T) { if !isBinlogRowImageNoBlob(t, productTab) { @@ -823,8 +783,9 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl } } vdiffSideBySide(t, ksWorkflow, "") - switchReadsDryRun(t, workflowType, allCellNames, ksWorkflow, dryRunResultsReadCustomerShard) - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + cellNames := getCellNames(cells) + switchReadsDryRun(t, workflowType, cellNames, ksWorkflow, dryRunResultsReadCustomerShard) + switchReads(t, workflowType, cellNames, ksWorkflow, false) assertQueryExecutesOnTablet(t, vtgateConn, productTab, "customer", query, query) var commit func(t *testing.T) @@ -868,7 +829,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl execVtgateQuery(t, vtgateConn, "customer", "update customer set meta = convert(x'7b7d' using utf8mb4) where cid = 1") if testReverse { // Reverse Replicate - switchReads(t, workflowType, allCellNames, ksWorkflow, true) + switchReads(t, workflowType, cellNames, ksWorkflow, true) printShardPositions(vc, ksShards) switchWrites(t, workflowType, ksWorkflow, true) @@ -888,7 +849,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl waitForNoWorkflowLag(t, vc, targetKs, workflow) // Go forward again - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, cellNames, ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) var exists bool @@ -896,7 +857,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl require.NoError(t, err, "Error getting denylist for customer:0") require.True(t, exists) - moveTablesAction(t, "Complete", allCellNames, workflow, sourceKs, targetKs, tables) + moveTablesAction(t, "Complete", cellNames, workflow, sourceKs, targetKs, tables) exists, err = isTableInDenyList(t, vc, "product:0", "customer") require.NoError(t, err, "Error getting denylist for customer:0") @@ -941,6 +902,8 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl func validateRollupReplicates(t *testing.T) { t.Run("validateRollupReplicates", func(t *testing.T) { insertMoreProducts(t) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() waitForRowCount(t, vtgateConn, "product", "rollup", 1) waitForQueryResult(t, vtgateConn, "product:0", "select rollupname, kount from rollup", `[[VARCHAR("total") INT32(5)]]`) @@ -949,6 +912,8 @@ func validateRollupReplicates(t *testing.T) { func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias string) { t.Run("reshardCustomer2to4Split", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := "customer" counts := map[string]int{"zone1-600": 4, "zone1-700": 5, "zone1-800": 6, "zone1-900": 5} reshard(t, ksName, "customer", "c2c4", "-80,80-", "-40,40-80,80-c0,c0-", @@ -962,6 +927,8 @@ func reshardCustomer2to4Split(t *testing.T, cells []*Cell, sourceCellOrAlias str func reshardMerchant2to3SplitMerge(t *testing.T) { t.Run("reshardMerchant2to3SplitMerge", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := merchantKeyspace counts := map[string]int{"zone1-1600": 0, "zone1-1700": 2, "zone1-1800": 0} reshard(t, ksName, "merchant", "m2m3", "-80,80-", "-40,40-c0,c0-", @@ -1009,6 +976,8 @@ func reshardMerchant2to3SplitMerge(t *testing.T) { func reshardMerchant3to1Merge(t *testing.T) { t.Run("reshardMerchant3to1Merge", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() ksName := merchantKeyspace counts := map[string]int{"zone1-2000": 3} reshard(t, ksName, "merchant", "m3m1", "-40,40-c0,c0-", "0", @@ -1042,21 +1011,18 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou tabletIDBase int, counts map[string]int, dryRunResultSwitchReads, dryRunResultSwitchWrites []string, cells []*Cell, sourceCellOrAlias string, autoIncrementStep int) { t.Run("reshard", func(t *testing.T) { + defaultCell := vc.Cells[vc.CellNames[0]] if cells == nil { cells = []*Cell{defaultCell} } if sourceCellOrAlias == "" { sourceCellOrAlias = defaultCell.Name } + callNames := getCellNames(cells) ksWorkflow := ksName + "." + workflow keyspace := vc.Cells[defaultCell.Name].Keyspaces[ksName] require.NoError(t, vc.AddShards(t, cells, keyspace, targetShards, defaultReplicas, defaultRdonly, tabletIDBase, targetKsOpts)) - arrTargetShardNames := strings.Split(targetShards, ",") - for _, shardName := range arrTargetShardNames { - err := cluster.WaitForHealthyShard(vc.VtctldClient, ksName, shardName) - require.NoError(t, err) - } tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "primary") // Test multi-primary setups, like a Galera cluster, which have auto increment steps > 1. @@ -1080,13 +1046,13 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou restartWorkflow(t, ksWorkflow) vdiffSideBySide(t, ksWorkflow, "") if dryRunResultSwitchReads != nil { - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica", "--dry-run") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "rdonly,replica", "--dry-run") } - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "rdonly,replica") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "rdonly,replica") if dryRunResultSwitchWrites != nil { - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary", "--dry-run") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "primary", "--dry-run") } - reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", allCellNames, "primary") + reshardAction(t, "SwitchTraffic", workflow, ksName, "", "", callNames, "primary") reshardAction(t, "Complete", workflow, ksName, "", "", "", "") for tabletName, count := range counts { if tablets[tabletName] == nil { @@ -1099,6 +1065,9 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou func shardOrders(t *testing.T) { t.Run("shardOrders", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + defaultCell := vc.Cells[vc.CellNames[0]] workflow := "o2c" cell := defaultCell.Name sourceKs := "product" @@ -1115,7 +1084,7 @@ func shardOrders(t *testing.T) { catchup(t, customerTab1, workflow, workflowType) catchup(t, customerTab2, workflow, workflowType) vdiffSideBySide(t, ksWorkflow, "") - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) moveTablesAction(t, "Complete", cell, workflow, sourceKs, targetKs, tables) waitForRowCountInTablet(t, customerTab1, "customer", "orders", 1) @@ -1141,7 +1110,10 @@ func checkThatVDiffFails(t *testing.T, keyspace, workflow string) { func shardMerchant(t *testing.T) { t.Run("shardMerchant", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "p2m" + defaultCell := vc.Cells[vc.CellNames[0]] cell := defaultCell.Name sourceKs := "product" targetKs := merchantKeyspace @@ -1150,10 +1122,6 @@ func shardMerchant(t *testing.T) { if _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, merchantKeyspace, "-80,80-", merchantVSchema, "", defaultReplicas, defaultRdonly, 400, targetKsOpts); err != nil { t.Fatal(err) } - err := cluster.WaitForHealthyShard(vc.VtctldClient, merchantKeyspace, "-80") - require.NoError(t, err) - err = cluster.WaitForHealthyShard(vc.VtctldClient, merchantKeyspace, "80-") - require.NoError(t, err) moveTablesAction(t, "Create", cell, workflow, sourceKs, targetKs, tables) merchantKs := vc.Cells[defaultCell.Name].Keyspaces[merchantKeyspace] merchantTab1 := merchantKs.Shards["-80"].Tablets["zone1-400"].Vttablet @@ -1163,7 +1131,7 @@ func shardMerchant(t *testing.T) { catchup(t, merchantTab2, workflow, workflowType) vdiffSideBySide(t, fmt.Sprintf("%s.%s", merchantKeyspace, workflow), "") - switchReads(t, workflowType, allCellNames, ksWorkflow, false) + switchReads(t, workflowType, strings.Join(vc.CellNames, ","), ksWorkflow, false) switchWrites(t, workflowType, ksWorkflow, false) printRoutingRules(t, vc, "After merchant movetables") @@ -1213,9 +1181,10 @@ func materialize(t *testing.T, spec string, useVtctldClient bool) { func materializeProduct(t *testing.T, useVtctldClient bool) { t.Run("materializeProduct", func(t *testing.T) { - // materializing from "product" keyspace to "customer" keyspace + // Materializing from "product" keyspace to "customer" keyspace. workflow := "cproduct" keyspace := "customer" + defaultCell := vc.Cells[vc.CellNames[0]] applyVSchema(t, materializeProductVSchema, keyspace) materialize(t, materializeProductSpec, useVtctldClient) customerTablets := vc.getVttabletsInKeyspace(t, defaultCell, keyspace, "primary") @@ -1226,7 +1195,7 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { productTablets := vc.getVttabletsInKeyspace(t, defaultCell, "product", "primary") t.Run("throttle-app-product", func(t *testing.T) { - // Now, throttle the streamer on source tablets, insert some rows + // Now, throttle the source side component (vstreamer), and insert some rows. for _, tab := range productTablets { body, err := throttleApp(tab, sourceThrottlerAppName) assert.NoError(t, err) @@ -1237,19 +1206,33 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { waitForTabletThrottlingStatus(t, tab, targetThrottlerAppName, throttlerStatusNotThrottled) } insertMoreProductsForSourceThrottler(t) - // To be fair to the test, we give the target time to apply the new changes. We expect it to NOT get them in the first place, - // we expect the additional rows to **not appear** in the materialized view + // To be fair to the test, we give the target time to apply the new changes. We + // expect it to NOT get them in the first place, we expect the additional rows + // to **not appear** in the materialized view. for _, tab := range customerTablets { waitForRowCountInTablet(t, tab, keyspace, workflow, 5) + // Confirm that we updated the stats on the target tablets as expected. + jsVal, err := getDebugVar(t, tab.Port, []string{"VReplicationThrottledCounts"}) + require.NoError(t, err) + require.NotEqual(t, "{}", jsVal) + // The JSON value looks like this: {"cproduct.4.tablet.vstreamer": 2} + vstreamerThrottledCount := gjson.Get(jsVal, fmt.Sprintf(`%s\.*\.tablet\.vstreamer`, workflow)).Int() + require.Greater(t, vstreamerThrottledCount, int64(0)) + // We only need to do this stat check once. + val, err := getDebugVar(t, tab.Port, []string{"VReplicationThrottledCountTotal"}) + require.NoError(t, err) + throttledCount, err := strconv.ParseInt(val, 10, 64) + require.NoError(t, err) + require.GreaterOrEqual(t, throttledCount, vstreamerThrottledCount) } }) t.Run("unthrottle-app-product", func(t *testing.T) { - // unthrottle on source tablets, and expect the rows to show up + // Unthrottle the vstreamer component, and expect the rows to show up. for _, tab := range productTablets { body, err := unthrottleApp(tab, sourceThrottlerAppName) assert.NoError(t, err) assert.Contains(t, body, sourceThrottlerAppName) - // give time for unthrottling to take effect and for target to fetch data + // Give time for unthrottling to take effect and for targets to fetch data. waitForTabletThrottlingStatus(t, tab, sourceThrottlerAppName, throttlerStatusNotThrottled) } for _, tab := range customerTablets { @@ -1258,8 +1241,8 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { }) t.Run("throttle-app-customer", func(t *testing.T) { - // Now, throttle vreplication (vcopier/vapplier) on target tablets, and - // insert some more rows. + // Now, throttle vreplication on the target side (vplayer), and insert some + // more rows. for _, tab := range customerTablets { body, err := throttleApp(tab, targetThrottlerAppName) assert.NoError(t, err) @@ -1274,6 +1257,13 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { // rows to **not appear** in the materialized view. for _, tab := range customerTablets { waitForRowCountInTablet(t, tab, keyspace, workflow, 8) + // Confirm that we updated the stats on the target tablets as expected. + jsVal, err := getDebugVar(t, tab.Port, []string{"VReplicationThrottledCounts"}) + require.NoError(t, err) + require.NotEqual(t, "{}", jsVal) + // The JSON value now looks like this: {"cproduct.4.tablet.vstreamer": 2, "cproduct.4.tablet.vplayer": 4} + vplayerThrottledCount := gjson.Get(jsVal, fmt.Sprintf(`%s\.*\.tablet\.vplayer`, workflow)).Int() + require.Greater(t, vplayerThrottledCount, int64(0)) } }) t.Run("unthrottle-app-customer", func(t *testing.T) { @@ -1294,9 +1284,12 @@ func materializeProduct(t *testing.T, useVtctldClient bool) { func materializeRollup(t *testing.T, useVtctldClient bool) { t.Run("materializeRollup", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() keyspace := "product" workflow := "rollup" applyVSchema(t, materializeSalesVSchema, keyspace) + defaultCell := vc.Cells[vc.CellNames[0]] productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet materialize(t, materializeRollupSpec, useVtctldClient) catchup(t, productTab, workflow, "Materialize") @@ -1308,9 +1301,12 @@ func materializeRollup(t *testing.T, useVtctldClient bool) { func materializeSales(t *testing.T, useVtctldClient bool) { t.Run("materializeSales", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() keyspace := "product" applyVSchema(t, materializeSalesVSchema, keyspace) materialize(t, materializeSalesSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] productTab := vc.Cells[defaultCell.Name].Keyspaces["product"].Shards["0"].Tablets["zone1-100"].Vttablet catchup(t, productTab, "sales", "Materialize") waitForRowCount(t, vtgateConn, "product", "sales", 2) @@ -1321,8 +1317,11 @@ func materializeSales(t *testing.T, useVtctldClient bool) { func materializeMerchantSales(t *testing.T, useVtctldClient bool) { t.Run("materializeMerchantSales", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "msales" materialize(t, materializeMerchantSalesSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { catchup(t, tab, workflow, "Materialize") @@ -1335,10 +1334,13 @@ func materializeMerchantSales(t *testing.T, useVtctldClient bool) { func materializeMerchantOrders(t *testing.T, useVtctldClient bool) { t.Run("materializeMerchantOrders", func(t *testing.T) { + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() workflow := "morders" keyspace := merchantKeyspace applyVSchema(t, merchantOrdersVSchema, keyspace) materialize(t, materializeMerchantOrdersSpec, useVtctldClient) + defaultCell := vc.Cells[vc.CellNames[0]] merchantTablets := vc.getVttabletsInKeyspace(t, defaultCell, merchantKeyspace, "primary") for _, tab := range merchantTablets { catchup(t, tab, workflow, "Materialize") @@ -1476,7 +1478,7 @@ func reshardAction(t *testing.T, action, workflow, keyspaceName, sourceShards, t action, workflow, output) } if err != nil { - t.Fatalf("Reshard %s command failed with %+v\n", action, err) + t.Fatalf("Reshard %s command failed with %+v\nOutput: %s", action, err, output) } } @@ -1599,6 +1601,7 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { log.Infof("------------------- START Extra debug info %s Switch writes %s", msg, ksWorkflow) ksShards := []string{"product/0", "customer/-80", "customer/80-"} printShardPositions(vc, ksShards) + defaultCell := vc.Cells[vc.CellNames[0]] custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] customerTab1 := custKs.Shards["-80"].Tablets["zone1-200"].Vttablet customerTab2 := custKs.Shards["80-"].Tablets["zone1-300"].Vttablet diff --git a/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go new file mode 100644 index 00000000000..bca51512a3c --- /dev/null +++ b/go/test/endtoend/vreplication/vreplication_vtctldclient_cli_test.go @@ -0,0 +1,440 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "encoding/json" + "fmt" + "slices" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" + "google.golang.org/protobuf/encoding/protojson" + + "vitess.io/vitess/go/test/endtoend/cluster" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +// TestVtctldclientCLI tests the vreplication vtctldclient CLI commands, primarily to check that non-standard flags +// are being handled correctly. The other end-to-end tests are expected to test the various common workflows. +func TestVtctldclientCLI(t *testing.T) { + setSidecarDBName("_vt") + var err error + origDefaultRdonly := defaultRdonly + defer func() { + defaultRdonly = origDefaultRdonly + }() + defaultRdonly = 0 + vc = setupMinimalCluster(t) + + err = vc.Vtctl.AddCellInfo("zone2") + require.NoError(t, err) + zone2, err := vc.AddCell(t, "zone2") + require.NoError(t, err) + require.NotNil(t, zone2) + defer vc.TearDown() + + sourceKeyspaceName := "product" + targetKeyspaceName := "customer" + var mt iMoveTables + workflowName := "wf1" + targetTabs := setupMinimalCustomerKeyspace(t) + + t.Run("WorkflowList", func(t *testing.T) { + testWorkflowList(t, sourceKeyspaceName, targetKeyspaceName) + }) + t.Run("MoveTablesCreateFlags1", func(t *testing.T) { + testMoveTablesFlags1(t, &mt, sourceKeyspaceName, targetKeyspaceName, workflowName, targetTabs) + }) + t.Run("MoveTablesCreateFlags2", func(t *testing.T) { + testMoveTablesFlags2(t, &mt, sourceKeyspaceName, targetKeyspaceName, workflowName, targetTabs) + }) + t.Run("MoveTablesCompleteFlags3", func(t *testing.T) { + testMoveTablesFlags3(t, sourceKeyspaceName, targetKeyspaceName, targetTabs) + }) + t.Run("Reshard", func(t *testing.T) { + cell := vc.Cells["zone1"] + targetKeyspace := cell.Keyspaces[targetKeyspaceName] + sourceShard := "-80" + newShards := "-40,40-80" + require.NoError(t, vc.AddShards(t, []*Cell{cell}, targetKeyspace, newShards, 1, 0, 400, nil)) + reshardWorkflowName := "reshard" + tablets := map[string]*cluster.VttabletProcess{ + "-40": targetKeyspace.Shards["-40"].Tablets["zone1-400"].Vttablet, + "40-80": targetKeyspace.Shards["40-80"].Tablets["zone1-500"].Vttablet, + } + splitShard(t, targetKeyspaceName, reshardWorkflowName, sourceShard, newShards, tablets) + }) +} + +// Tests several create flags and some complete flags and validates that some of them are set correctly for the workflow. +func testMoveTablesFlags1(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, workflowName string, targetTabs map[string]*cluster.VttabletProcess) { + tables := "customer,customer2" + createFlags := []string{"--auto-start=false", "--defer-secondary-keys=false", "--stop-after-copy", + "--no-routing-rules", "--on-ddl", "STOP", "--exclude-tables", "customer2", + "--tablet-types", "primary,rdonly", "--tablet-types-in-preference-order=true", + "--all-cells", + } + completeFlags := []string{"--keep-routing-rules", "--keep-data"} + switchFlags := []string{} + // Test one set of MoveTable flags. + *mt = createMoveTables(t, sourceKeyspace, targetKeyspace, workflowName, tables, createFlags, completeFlags, switchFlags) + (*mt).Show() + moveTablesResponse := getMoveTablesShowResponse(mt) + workflowResponse := getWorkflow(targetKeyspace, workflowName) + + // also validates that MoveTables Show and Workflow Show return the same output. + require.EqualValues(t, moveTablesResponse.CloneVT(), workflowResponse) + + // Validate that the flags are set correctly in the database. + validateMoveTablesWorkflow(t, workflowResponse.Workflows) + // Since we used --no-routing-rules, there should be no routing rules. + confirmNoRoutingRules(t) +} + +func getMoveTablesShowResponse(mt *iMoveTables) *vtctldatapb.GetWorkflowsResponse { + moveTablesOutput := (*mt).GetLastOutput() + var moveTablesResponse vtctldatapb.GetWorkflowsResponse + err := protojson.Unmarshal([]byte(moveTablesOutput), &moveTablesResponse) + require.NoError(vc.t, err) + moveTablesResponse.Workflows[0].MaxVReplicationTransactionLag = 0 + moveTablesResponse.Workflows[0].MaxVReplicationLag = 0 + return moveTablesResponse.CloneVT() +} + +// Validates some of the flags created from the previous test. +func testMoveTablesFlags2(t *testing.T, mt *iMoveTables, sourceKeyspace, targetKeyspace, workflowName string, targetTabs map[string]*cluster.VttabletProcess) { + ksWorkflow := fmt.Sprintf("%s.%s", targetKeyspace, workflowName) + (*mt).Start() // Need to start because we set auto-start to false. + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + confirmNoRoutingRules(t) + for _, tab := range targetTabs { + alias := fmt.Sprintf("zone1-%d", tab.TabletUID) + query := "update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_customer' and workflow = 'wf1'" + output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", alias, query) + require.NoError(t, err, output) + } + confirmNoRoutingRules(t) + (*mt).Start() // Need to start because we set stop-after-copy to true. + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + (*mt).Stop() // Test stopping workflow. + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + (*mt).Start() + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + for _, tab := range targetTabs { + catchup(t, tab, workflowName, "MoveTables") + } + (*mt).SwitchReadsAndWrites() + (*mt).Complete() + confirmRoutingRulesExist(t) + // Confirm that --keep-data was honored. + require.True(t, checkTablesExist(t, "zone1-100", []string{"customer", "customer2"})) +} + +// Tests SwitchTraffic and Complete flags +func testMoveTablesFlags3(t *testing.T, sourceKeyspace, targetKeyspace string, targetTabs map[string]*cluster.VttabletProcess) { + for _, tab := range targetTabs { + alias := fmt.Sprintf("zone1-%d", tab.TabletUID) + output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", alias, "drop table customer") + require.NoError(t, err, output) + } + createFlags := []string{} + completeFlags := []string{"--rename-tables"} + tables := "customer2" + switchFlags := []string{"--enable-reverse-replication=false"} + mt := createMoveTables(t, sourceKeyspace, targetKeyspace, workflowName, tables, createFlags, completeFlags, switchFlags) + mt.Start() // Need to start because we set stop-after-copy to true. + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + mt.Stop() // Test stopping workflow. + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + mt.Start() + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String()) + for _, tab := range targetTabs { + catchup(t, tab, workflowName, "MoveTables") + } + mt.SwitchReadsAndWrites() + mt.Complete() + // Confirm that the source tables were renamed. + require.True(t, checkTablesExist(t, "zone1-100", []string{"_customer2_old"})) + require.False(t, checkTablesExist(t, "zone1-100", []string{"customer2"})) +} + +// Create two workflows in order to confirm that listing all workflows works. +func testWorkflowList(t *testing.T, sourceKeyspace, targetKeyspace string) { + createFlags := []string{"--auto-start=false", "--tablet-types", + "primary,rdonly", "--tablet-types-in-preference-order=true", "--all-cells", + } + wfNames := []string{"list1", "list2"} + tables := []string{"customer", "customer2"} + for i := range wfNames { + mt := createMoveTables(t, sourceKeyspace, targetKeyspace, wfNames[i], tables[i], createFlags, nil, nil) + defer mt.Cancel() + } + slices.Sort(wfNames) + + workflowNames := workflowList(targetKeyspace) + slices.Sort(workflowNames) + require.EqualValues(t, wfNames, workflowNames) + + workflows := getWorkflows(targetKeyspace) + workflowNames = make([]string, len(workflows.Workflows)) + for i := range workflows.Workflows { + workflowNames[i] = workflows.Workflows[i].Name + } + slices.Sort(workflowNames) + require.EqualValues(t, wfNames, workflowNames) +} + +func createMoveTables(t *testing.T, sourceKeyspace, targetKeyspace, workflowName, tables string, + createFlags, completeFlags, switchFlags []string) iMoveTables { + mt := newMoveTables(vc, &moveTablesWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: targetKeyspace, + }, + sourceKeyspace: sourceKeyspace, + tables: tables, + createFlags: createFlags, + completeFlags: completeFlags, + switchFlags: switchFlags, + }, workflowFlavorVtctld) + mt.Create() + return mt +} + +// reshard helpers + +func splitShard(t *testing.T, keyspace, workflowName, sourceShards, targetShards string, targetTabs map[string]*cluster.VttabletProcess) { + createFlags := []string{"--auto-start=false", "--defer-secondary-keys=false", "--stop-after-copy", + "--on-ddl", "STOP", "--tablet-types", "primary,rdonly", "--tablet-types-in-preference-order=true", + "--all-cells", "--format=json", + } + rs := newReshard(vc, &reshardWorkflow{ + workflowInfo: &workflowInfo{ + vc: vc, + workflowName: workflowName, + targetKeyspace: keyspace, + }, + sourceShards: sourceShards, + targetShards: targetShards, + createFlags: createFlags, + }, workflowFlavorVtctld) + + ksWorkflow := fmt.Sprintf("%s.%s", keyspace, workflowName) + rs.Create() + validateReshardResponse(rs) + workflowResponse := getWorkflow(keyspace, workflowName) + reshardShowResponse := getReshardShowResponse(&rs) + require.EqualValues(t, reshardShowResponse, workflowResponse) + validateReshardWorkflow(t, workflowResponse.Workflows) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Stopped.String()) + rs.Start() + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + for _, tab := range targetTabs { + alias := fmt.Sprintf("zone1-%d", tab.TabletUID) + query := "update _vt.vreplication set source := replace(source, 'stop_after_copy:true', 'stop_after_copy:false') where db_name = 'vt_customer' and workflow = '" + workflowName + "'" + output, err := vc.VtctlClient.ExecuteCommandWithOutput("ExecuteFetchAsDba", alias, query) + require.NoError(t, err, output) + } + rs.Start() + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + rs.Stop() + waitForWorkflowState(t, vc, ksWorkflow, binlogdatapb.VReplicationWorkflowState_Stopped.String()) + rs.Start() + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", keyspace, workflowName), binlogdatapb.VReplicationWorkflowState_Running.String()) + for _, targetTab := range targetTabs { + catchup(t, targetTab, workflowName, "Reshard") + } + vdiff(t, keyspace, workflowName, "zone1", false, true, nil) + + rs.SwitchReadsAndWrites() + waitForLowLag(t, keyspace, workflowName+"_reverse") + vdiff(t, keyspace, workflowName+"_reverse", "zone1", true, false, nil) + + rs.ReverseReadsAndWrites() + waitForLowLag(t, keyspace, workflowName) + vdiff(t, keyspace, workflowName, "zone1", false, true, nil) + rs.SwitchReadsAndWrites() + rs.Complete() +} + +func getReshardShowResponse(rs *iReshard) *vtctldatapb.GetWorkflowsResponse { + (*rs).Show() + reshardOutput := (*rs).GetLastOutput() + var reshardResponse vtctldatapb.GetWorkflowsResponse + err := protojson.Unmarshal([]byte(reshardOutput), &reshardResponse) + require.NoError(vc.t, err) + reshardResponse.Workflows[0].MaxVReplicationTransactionLag = 0 + reshardResponse.Workflows[0].MaxVReplicationLag = 0 + return reshardResponse.CloneVT() +} + +func validateReshardResponse(rs iReshard) { + resp := getReshardResponse(rs) + require.NotNil(vc.t, resp) + require.NotNil(vc.t, resp.ShardStreams) + require.Equal(vc.t, len(resp.ShardStreams), 2) + keyspace := "customer" + for _, shard := range []string{"-40", "40-80"} { + streams := resp.ShardStreams[fmt.Sprintf("%s/%s", keyspace, shard)] + require.Equal(vc.t, 1, len(streams.Streams)) + require.Equal(vc.t, binlogdatapb.VReplicationWorkflowState_Stopped.String(), streams.Streams[0].Status) + } +} + +func validateReshardWorkflow(t *testing.T, workflows []*vtctldatapb.Workflow) { + require.Equal(t, 1, len(workflows)) + wf := workflows[0] + require.Equal(t, "reshard", wf.Name) + require.Equal(t, binlogdatapb.VReplicationWorkflowType_Reshard.String(), wf.WorkflowType) + require.Equal(t, "None", wf.WorkflowSubType) + require.Equal(t, "customer", wf.Target.Keyspace) + require.Equal(t, 2, len(wf.Target.Shards)) + require.Equal(t, "customer", wf.Source.Keyspace) + require.Equal(t, 1, len(wf.Source.Shards)) + require.False(t, wf.DeferSecondaryKeys) + + require.GreaterOrEqual(t, len(wf.ShardStreams), int(1)) + oneStream := maps.Values(wf.ShardStreams)[0] + require.NotNil(t, oneStream) + + stream := oneStream.Streams[0] + require.Equal(t, binlogdatapb.VReplicationWorkflowState_Stopped.String(), stream.State) + require.Equal(t, stream.TabletSelectionPreference, tabletmanagerdatapb.TabletSelectionPreference_INORDER) + require.True(t, slices.Equal([]topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_RDONLY}, stream.TabletTypes)) + require.True(t, slices.Equal([]string{"zone1", "zone2"}, stream.Cells)) + + bls := stream.BinlogSource + require.Equal(t, binlogdatapb.OnDDLAction_STOP, bls.OnDdl) + require.True(t, bls.StopAfterCopy) + +} + +func getReshardResponse(rs iReshard) *vtctldatapb.WorkflowStatusResponse { + reshardOutput := rs.GetLastOutput() + var reshardResponse vtctldatapb.WorkflowStatusResponse + err := protojson.Unmarshal([]byte(reshardOutput), &reshardResponse) + require.NoError(vc.t, err) + return reshardResponse.CloneVT() +} + +// helper functions + +func getWorkflow(targetKeyspace, workflow string) *vtctldatapb.GetWorkflowsResponse { + workflowOutput, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKeyspace, "show", "--workflow", workflow) + require.NoError(vc.t, err) + var workflowResponse vtctldatapb.GetWorkflowsResponse + err = protojson.Unmarshal([]byte(workflowOutput), &workflowResponse) + require.NoError(vc.t, err) + workflowResponse.Workflows[0].MaxVReplicationTransactionLag = 0 + workflowResponse.Workflows[0].MaxVReplicationLag = 0 + return workflowResponse.CloneVT() +} + +func getWorkflows(targetKeyspace string) *vtctldatapb.GetWorkflowsResponse { + getWorkflowsOutput, err := vc.VtctldClient.ExecuteCommandWithOutput("GetWorkflows", targetKeyspace, "--show-all", "--compact", "--include-logs=false") + require.NoError(vc.t, err) + var getWorkflowsResponse vtctldatapb.GetWorkflowsResponse + err = protojson.Unmarshal([]byte(getWorkflowsOutput), &getWorkflowsResponse) + require.NoError(vc.t, err) + return getWorkflowsResponse.CloneVT() +} + +func workflowList(targetKeyspace string) []string { + workflowListOutput, err := vc.VtctldClient.ExecuteCommandWithOutput("Workflow", "--keyspace", targetKeyspace, "list") + require.NoError(vc.t, err) + var workflowList []string + err = json.Unmarshal([]byte(workflowListOutput), &workflowList) + require.NoError(vc.t, err) + return workflowList +} + +func checkTablesExist(t *testing.T, tabletAlias string, tables []string) bool { + tablesResponse, err := vc.VtctldClient.ExecuteCommandWithOutput("GetSchema", tabletAlias, "--tables", strings.Join(tables, ","), "--table-names-only") + require.NoError(t, err) + tablesFound := strings.Split(tablesResponse, "\n") + for _, table := range tables { + found := false + for _, tableFound := range tablesFound { + if tableFound == table { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +func getRoutingRules(t *testing.T) *vschemapb.RoutingRules { + routingRules, err := vc.VtctldClient.ExecuteCommandWithOutput("GetRoutingRules") + require.NoError(t, err) + var routingRulesResponse vschemapb.RoutingRules + err = protojson.Unmarshal([]byte(routingRules), &routingRulesResponse) + require.NoError(t, err) + return &routingRulesResponse +} + +func confirmNoRoutingRules(t *testing.T) { + routingRulesResponse := getRoutingRules(t) + require.Zero(t, len(routingRulesResponse.Rules)) +} + +func confirmRoutingRulesExist(t *testing.T) { + routingRulesResponse := getRoutingRules(t) + require.NotZero(t, len(routingRulesResponse.Rules)) +} + +// We only want to validate non-standard attributes that are set by the CLI. The other end-to-end tests validate the rest. +// We also check some of the standard attributes to make sure they are set correctly. +func validateMoveTablesWorkflow(t *testing.T, workflows []*vtctldatapb.Workflow) { + require.Equal(t, 1, len(workflows)) + wf := workflows[0] + require.Equal(t, "wf1", wf.Name) + require.Equal(t, binlogdatapb.VReplicationWorkflowType_MoveTables.String(), wf.WorkflowType) + require.Equal(t, "None", wf.WorkflowSubType) + require.Equal(t, "customer", wf.Target.Keyspace) + require.Equal(t, 2, len(wf.Target.Shards)) + require.Equal(t, "product", wf.Source.Keyspace) + require.Equal(t, 1, len(wf.Source.Shards)) + require.False(t, wf.DeferSecondaryKeys) + + require.GreaterOrEqual(t, len(wf.ShardStreams), int(1)) + oneStream := maps.Values(wf.ShardStreams)[0] + require.NotNil(t, oneStream) + + stream := oneStream.Streams[0] + require.Equal(t, binlogdatapb.VReplicationWorkflowState_Stopped.String(), stream.State) + require.Equal(t, stream.TabletSelectionPreference, tabletmanagerdatapb.TabletSelectionPreference_INORDER) + require.True(t, slices.Equal([]topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_RDONLY}, stream.TabletTypes)) + require.True(t, slices.Equal([]string{"zone1", "zone2"}, stream.Cells)) + + bls := stream.BinlogSource + require.Equalf(t, 1, len(bls.Filter.Rules), "Rules are %+v", bls.Filter.Rules) // only customer, customer2 should be excluded + require.Equal(t, binlogdatapb.OnDDLAction_STOP, bls.OnDdl) + require.True(t, bls.StopAfterCopy) +} diff --git a/go/test/endtoend/vreplication/vschema_load_test.go b/go/test/endtoend/vreplication/vschema_load_test.go index a5cac4c68f8..6ca8dcfe472 100644 --- a/go/test/endtoend/vreplication/vschema_load_test.go +++ b/go/test/endtoend/vreplication/vschema_load_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -40,23 +39,13 @@ func TestVSchemaChangesUnderLoad(t *testing.T) { extendedTimeout := defaultTimeout * 4 - defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVSchemaChanges", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() - require.NotNil(t, vc) - - defer vc.TearDown(t) - - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, 1, 0, 100, sourceKsOpts) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "product", "0") - require.NoError(t, err) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", "product", "0"), 1, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + + vtgateConn := vc.GetVTGateConn(t) defer vtgateConn.Close() // ch is used to signal that there is significant data inserted into the tables and when a lot of vschema changes have been applied diff --git a/go/test/endtoend/vreplication/vstream_test.go b/go/test/endtoend/vreplication/vstream_test.go index 5c5e6a80130..e13c3e24e80 100644 --- a/go/test/endtoend/vreplication/vstream_test.go +++ b/go/test/endtoend/vreplication/vstream_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -43,26 +42,21 @@ import ( // - We ensure that this works through active reparents and doesn't miss any events // - We stream only from the primary and while streaming we reparent to a replica and then back to the original primary func testVStreamWithFailover(t *testing.T, failover bool) { - defaultCellName := "zone1" - cells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVStreamWithFailover", cells, mainClusterConfig) + vc = NewVitessCluster(t, nil) + defer vc.TearDown() require.NotNil(t, vc) defaultReplicas = 2 defaultRdonly = 0 - defer vc.TearDown(t) - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "product", "0"), 3, 30*time.Second) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() - verifyClusterHealth(t, vc) insertInitialData(t) + vtgate := defaultCell.Vtgates[0] + t.Run("VStreamFrom", func(t *testing.T) { + testVStreamFrom(t, vtgate, "product", 2) + }) ctx := context.Background() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) if err != nil { @@ -90,6 +84,9 @@ func testVStreamWithFailover(t *testing.T, failover bool) { stopInserting := false id := 0 + vtgateConn := vc.GetVTGateConn(t) + defer vtgateConn.Close() + // first goroutine that keeps inserting rows into table being streamed until some time elapses after second PRS go func() { for { @@ -217,44 +214,41 @@ const vschemaSharded = ` ` func insertRow(keyspace, table string, id int) { - vtgateConn.ExecuteFetch(fmt.Sprintf("use %s;", keyspace), 1000, false) + vtgateConn := getConnectionNoError(vc.t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + // Due to race conditions this call is sometimes made after vtgates have shutdown. In that case just return. + if vtgateConn == nil { + return + } + vtgateConn.ExecuteFetch(fmt.Sprintf("use %s", keyspace), 1000, false) vtgateConn.ExecuteFetch("begin", 1000, false) _, err := vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s (name) values ('%s%d')", table, table, id), 1000, false) if err != nil { - log.Infof("error inserting row %d: %v", id, err) + log.Errorf("error inserting row %d: %v", id, err) } vtgateConn.ExecuteFetch("commit", 1000, false) } type numEvents struct { numRowEvents, numJournalEvents int64 - numLessThan80Events, numGreaterThan80Events int64 - numLessThan40Events, numGreaterThan40Events int64 + numDash80Events, num80DashEvents int64 + numDash40Events, num40DashEvents int64 numShard0BeforeReshardEvents, numShard0AfterReshardEvents int64 } // tests the StopOnReshard flag func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID int) *numEvents { defaultCellName := "zone1" - allCells := []string{"zone1"} - allCellNames = "zone1" - vc = NewVitessCluster(t, "TestVStreamStopOnReshard", allCells, mainClusterConfig) + vc = NewVitessCluster(t, nil) require.NotNil(t, vc) defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] + defaultCell := vc.Cells[vc.CellNames[0]] vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - err := cluster.WaitForHealthyShard(vc.VtctldClient, "unsharded", "0") - require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) - defer vtgateConn.Close() verifyClusterHealth(t, vc) // some initial data @@ -325,13 +319,13 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID shard := ev.RowEvent.Shard switch shard { case "-80": - ne.numLessThan80Events++ + ne.numDash80Events++ case "80-": - ne.numGreaterThan80Events++ + ne.num80DashEvents++ case "-40": - ne.numLessThan40Events++ + ne.numDash40Events++ case "40-": - ne.numGreaterThan40Events++ + ne.num40DashEvents++ } ne.numRowEvents++ case binlogdatapb.VEventType_JOURNAL: @@ -385,29 +379,23 @@ func testVStreamStopOnReshardFlag(t *testing.T, stopOnReshard bool, baseTabletID // Validate that we can continue streaming from multiple keyspaces after first copying some tables and then resharding one of the keyspaces // Ensure that there are no missing row events during the resharding process. func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEvents { - defaultCellName := "zone1" - allCellNames = defaultCellName - allCells := []string{allCellNames} - vc = NewVitessCluster(t, "VStreamCopyMultiKeyspaceReshard", allCells, mainClusterConfig) - - require.NotNil(t, vc) + vc = NewVitessCluster(t, nil) ogdr := defaultReplicas defaultReplicas = 0 // because of CI resource constraints we can only run this test with primary tablets defer func(dr int) { defaultReplicas = dr }(ogdr) - defer vc.TearDown(t) + defer vc.TearDown() - defaultCell = vc.Cells[defaultCellName] - vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) - vtgate = defaultCell.Vtgates[0] - require.NotNil(t, vtgate) - vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", "unsharded", "0"), 1, 30*time.Second) + defaultCell := vc.Cells[vc.CellNames[0]] + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "unsharded", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID+100, nil) + require.NoError(t, err) - vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) defer vtgateConn.Close() verifyClusterHealth(t, vc) - vc.AddKeyspace(t, []*Cell{defaultCell}, "sharded", "-80,80-", vschemaSharded, schemaSharded, defaultReplicas, defaultRdonly, baseTabletID+200, nil) + _, err = vc.AddKeyspace(t, []*Cell{defaultCell}, "sharded", "-80,80-", vschemaSharded, schemaSharded, defaultReplicas, defaultRdonly, baseTabletID+200, nil) + require.NoError(t, err) ctx := context.Background() vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) @@ -468,13 +456,13 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven ne.numShard0BeforeReshardEvents++ } case "-80": - ne.numLessThan80Events++ + ne.numDash80Events++ case "80-": - ne.numGreaterThan80Events++ + ne.num80DashEvents++ case "-40": - ne.numLessThan40Events++ + ne.numDash40Events++ case "40-": - ne.numGreaterThan40Events++ + ne.num40DashEvents++ } ne.numRowEvents++ case binlogdatapb.VEventType_JOURNAL: @@ -522,10 +510,200 @@ func testVStreamCopyMultiKeyspaceReshard(t *testing.T, baseTabletID int) numEven customerResult := execVtgateQuery(t, vtgateConn, "sharded", "select count(*) from customer") insertedCustomerRows, err := customerResult.Rows[0][0].ToCastInt64() require.NoError(t, err) - require.Equal(t, insertedCustomerRows, ne.numLessThan80Events+ne.numGreaterThan80Events+ne.numLessThan40Events+ne.numGreaterThan40Events) + require.Equal(t, insertedCustomerRows, ne.numDash80Events+ne.num80DashEvents+ne.numDash40Events+ne.num40DashEvents) return ne } +// Validate that we can resume a VStream when the keyspace has been resharded +// while not streaming. Ensure that there we successfully transition from the +// old shards -- which are in the VGTID from the previous stream -- and that +// we miss no row events during the process. +func TestMultiVStreamsKeyspaceReshard(t *testing.T) { + ctx := context.Background() + ks := "testks" + wf := "multiVStreamsKeyspaceReshard" + baseTabletID := 100 + tabletType := topodatapb.TabletType_PRIMARY.String() + oldShards := "-80,80-" + newShards := "-40,40-80,80-c0,c0-" + oldShardRowEvents, newShardRowEvents := 0, 0 + vc = NewVitessCluster(t, nil) + defer vc.TearDown() + defaultCell := vc.Cells[vc.CellNames[0]] + ogdr := defaultReplicas + defaultReplicas = 0 // Because of CI resource constraints we can only run this test with primary tablets + defer func(dr int) { defaultReplicas = dr }(ogdr) + + // For our sequences etc. + _, err := vc.AddKeyspace(t, []*Cell{defaultCell}, "global", "0", vschemaUnsharded, schemaUnsharded, defaultReplicas, defaultRdonly, baseTabletID, nil) + require.NoError(t, err) + + // Setup the keyspace with our old/original shards. + keyspace, err := vc.AddKeyspace(t, []*Cell{defaultCell}, ks, oldShards, vschemaSharded, schemaSharded, defaultReplicas, defaultRdonly, baseTabletID+1000, nil) + require.NoError(t, err) + + // Add the new shards. + err = vc.AddShards(t, []*Cell{defaultCell}, keyspace, newShards, defaultReplicas, defaultRdonly, baseTabletID+2000, targetKsOpts) + require.NoError(t, err) + + vtgateConn := getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + + vstreamConn, err := vtgateconn.Dial(ctx, fmt.Sprintf("%s:%d", vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateGrpcPort)) + require.NoError(t, err) + defer vstreamConn.Close() + + // Ensure that we're starting with a clean slate. + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("delete from %s.customer", ks), 1000, false) + require.NoError(t, err) + + // Coordinate go-routines. + streamCtx, streamCancel := context.WithTimeout(ctx, 1*time.Minute) + defer streamCancel() + done := make(chan struct{}) + + // First goroutine that keeps inserting rows into the table being streamed until the + // stream context is cancelled. + go func() { + id := 1 + for { + select { + case <-streamCtx.Done(): + // Give the VStream a little catch-up time before telling it to stop + // via the done channel. + time.Sleep(10 * time.Second) + close(done) + return + default: + insertRow(ks, "customer", id) + time.Sleep(250 * time.Millisecond) + id++ + } + } + }() + + // Create the Reshard workflow and wait for it to finish the copy phase. + reshardAction(t, "Create", wf, ks, oldShards, newShards, defaultCellName, tabletType) + waitForWorkflowState(t, vc, fmt.Sprintf("%s.%s", ks, wf), binlogdatapb.VReplicationWorkflowState_Running.String()) + + vgtid := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: "/.*", // Match all keyspaces just to be more realistic. + }}} + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + // Only stream the customer table and its sequence backing table. + Match: "/customer.*", + }}, + } + flags := &vtgatepb.VStreamFlags{} + + // Stream events but stop once we have a VGTID with positions for the old/original shards. + var newVGTID *binlogdatapb.VGtid + func() { + var reader vtgateconn.VStreamReader + reader, err = vstreamConn.VStream(ctx, topodatapb.TabletType_PRIMARY, vgtid, filter, flags) + require.NoError(t, err) + for { + evs, err := reader.Recv() + + switch err { + case nil: + for _, ev := range evs { + switch ev.Type { + case binlogdatapb.VEventType_ROW: + shard := ev.GetRowEvent().GetShard() + switch shard { + case "-80", "80-": + oldShardRowEvents++ + case "0": + // We expect some for the sequence backing table, but don't care. + default: + require.FailNow(t, fmt.Sprintf("received event for unexpected shard: %s", shard)) + } + case binlogdatapb.VEventType_VGTID: + newVGTID = ev.GetVgtid() + if len(newVGTID.GetShardGtids()) == 3 { + // We want a VGTID with a position for the global shard and the old shards. + canStop := true + for _, sg := range newVGTID.GetShardGtids() { + if sg.GetGtid() == "" { + canStop = false + } + } + if canStop { + return + } + } + } + } + default: + require.FailNow(t, fmt.Sprintf("VStream returned unexpected error: %v", err)) + } + select { + case <-streamCtx.Done(): + return + default: + } + } + }() + + // Confirm that we have shard GTIDs for the global shard and the old/original shards. + require.Len(t, newVGTID.GetShardGtids(), 3) + + // Switch the traffic to the new shards. + reshardAction(t, "SwitchTraffic", wf, ks, oldShards, newShards, defaultCellName, tabletType) + + // Now start a new VStream from our previous VGTID which only has the old/original shards. + func() { + var reader vtgateconn.VStreamReader + reader, err = vstreamConn.VStream(ctx, topodatapb.TabletType_PRIMARY, newVGTID, filter, flags) + require.NoError(t, err) + for { + evs, err := reader.Recv() + + switch err { + case nil: + for _, ev := range evs { + switch ev.Type { + case binlogdatapb.VEventType_ROW: + shard := ev.RowEvent.Shard + switch shard { + case "-80", "80-": + oldShardRowEvents++ + case "-40", "40-80", "80-c0", "c0-": + newShardRowEvents++ + case "0": + // Again, we expect some for the sequence backing table, but don't care. + default: + require.FailNow(t, fmt.Sprintf("received event for unexpected shard: %s", shard)) + } + } + } + default: + require.FailNow(t, fmt.Sprintf("VStream returned unexpected error: %v", err)) + } + select { + case <-done: + return + default: + } + } + }() + + // We should have a mix of events across the old and new shards. + require.NotZero(t, oldShardRowEvents) + require.NotZero(t, newShardRowEvents) + + // The number of row events streamed by the VStream API should match the number of rows inserted. + customerResult := execVtgateQuery(t, vtgateConn, ks, "select count(*) from customer") + customerCount, err := customerResult.Rows[0][0].ToInt64() + require.NoError(t, err) + require.Equal(t, customerCount, int64(oldShardRowEvents+newShardRowEvents)) +} + func TestVStreamFailover(t *testing.T) { testVStreamWithFailover(t, true) } @@ -534,20 +712,20 @@ func TestVStreamStopOnReshardTrue(t *testing.T) { ne := testVStreamStopOnReshardFlag(t, true, 1000) require.Greater(t, ne.numJournalEvents, int64(0)) require.NotZero(t, ne.numRowEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.Zero(t, ne.numLessThan40Events) - require.Zero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.Zero(t, ne.numDash40Events) + require.Zero(t, ne.num40DashEvents) } func TestVStreamStopOnReshardFalse(t *testing.T) { ne := testVStreamStopOnReshardFlag(t, false, 2000) require.Equal(t, int64(0), ne.numJournalEvents) require.NotZero(t, ne.numRowEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.NotZero(t, ne.numLessThan40Events) - require.NotZero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.NotZero(t, ne.numDash40Events) + require.NotZero(t, ne.num40DashEvents) } func TestVStreamWithKeyspacesToWatch(t *testing.T) { @@ -564,8 +742,8 @@ func TestVStreamCopyMultiKeyspaceReshard(t *testing.T) { require.NotZero(t, ne.numRowEvents) require.NotZero(t, ne.numShard0BeforeReshardEvents) require.NotZero(t, ne.numShard0AfterReshardEvents) - require.NotZero(t, ne.numLessThan80Events) - require.NotZero(t, ne.numGreaterThan80Events) - require.NotZero(t, ne.numLessThan40Events) - require.NotZero(t, ne.numGreaterThan40Events) + require.NotZero(t, ne.numDash80Events) + require.NotZero(t, ne.num80DashEvents) + require.NotZero(t, ne.numDash40Events) + require.NotZero(t, ne.num40DashEvents) } diff --git a/go/test/endtoend/vreplication/wrappers_test.go b/go/test/endtoend/vreplication/wrappers_test.go index 6bd0bbb19d8..e1028fafa9f 100644 --- a/go/test/endtoend/vreplication/wrappers_test.go +++ b/go/test/endtoend/vreplication/wrappers_test.go @@ -17,59 +17,86 @@ limitations under the License. package vreplication import ( - "math/rand" + "math/rand/v2" "strconv" "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) -type moveTablesFlavor int +type iWorkflow interface { + Create() + Show() + SwitchReads() + SwitchWrites() + SwitchReadsAndWrites() + ReverseReadsAndWrites() + Cancel() + Complete() + Flavor() string + GetLastOutput() string + Start() + Stop() +} + +type workflowFlavor int const ( - moveTablesFlavorRandom moveTablesFlavor = iota - moveTablesFlavorVtctl - moveTablesFlavorVtctld + workflowFlavorRandom workflowFlavor = iota + workflowFlavorVtctl + workflowFlavorVtctld ) -var moveTablesFlavors = []moveTablesFlavor{ - moveTablesFlavorVtctl, - moveTablesFlavorVtctld, +var workflowFlavors = []workflowFlavor{ + workflowFlavorVtctl, + workflowFlavorVtctld, +} + +var workflowFlavorNames = map[workflowFlavor]string{ + workflowFlavorVtctl: "vtctl", + workflowFlavorVtctld: "vtctld", } -type moveTables struct { +type workflowInfo struct { vc *VitessCluster workflowName string targetKeyspace string + tabletTypes string +} + +// MoveTables wrappers + +type moveTablesWorkflow struct { + *workflowInfo sourceKeyspace string tables string atomicCopy bool sourceShards string + + // currently only used by vtctld + lastOutput string + createFlags []string + completeFlags []string + switchFlags []string + showFlags []string } type iMoveTables interface { - Create() - Show() - SwitchReads() - SwitchWrites() - SwitchReadsAndWrites() - ReverseReadsAndWrites() - Cancel() - Complete() - Flavor() string + iWorkflow } -func newMoveTables(vc *VitessCluster, mt *moveTables, flavor moveTablesFlavor) iMoveTables { +func newMoveTables(vc *VitessCluster, mt *moveTablesWorkflow, flavor workflowFlavor) iMoveTables { mt.vc = vc var mt2 iMoveTables - if flavor == moveTablesFlavorRandom { - flavor = moveTablesFlavors[rand.Intn(len(moveTablesFlavors))] + if flavor == workflowFlavorRandom { + flavor = workflowFlavors[rand.IntN(len(workflowFlavors))] } switch flavor { - case moveTablesFlavorVtctl: + case workflowFlavorVtctl: mt2 = newVtctlMoveTables(mt) - case moveTablesFlavorVtctld: + case workflowFlavorVtctld: mt2 = newVtctldMoveTables(mt) default: panic("unreachable") @@ -79,69 +106,85 @@ func newMoveTables(vc *VitessCluster, mt *moveTables, flavor moveTablesFlavor) i } type VtctlMoveTables struct { - *moveTables + *moveTablesWorkflow } func (vmt *VtctlMoveTables) Flavor() string { return "vtctl" } -func newVtctlMoveTables(mt *moveTables) *VtctlMoveTables { +func newVtctlMoveTables(mt *moveTablesWorkflow) *VtctlMoveTables { return &VtctlMoveTables{mt} } func (vmt *VtctlMoveTables) Create() { - log.Infof("vmt is %+v", vmt.vc, vmt.tables) - err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionCreate, "", vmt.sourceShards, "", vmt.atomicCopy) - require.NoError(vmt.vc.t, err) + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_MoveTables + vmt.exec(workflowActionCreate) } func (vmt *VtctlMoveTables) SwitchReadsAndWrites() { - err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionSwitchTraffic, "", "", "", vmt.atomicCopy) + err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionSwitchTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(vmt.vc.t, err) } func (vmt *VtctlMoveTables) ReverseReadsAndWrites() { - err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionReverseTraffic, "", "", "", vmt.atomicCopy) + err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, workflowActionReverseTraffic, "", "", "", defaultWorkflowExecOptions) require.NoError(vmt.vc.t, err) } func (vmt *VtctlMoveTables) Show() { - //TODO implement me + // TODO implement me panic("implement me") } +func (vmt *VtctlMoveTables) exec(action string) { + options := &workflowExecOptions{ + deferSecondaryKeys: false, + atomicCopy: vmt.atomicCopy, + } + err := tstWorkflowExecVtctl(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, + vmt.tables, action, vmt.tabletTypes, vmt.sourceShards, "", options) + require.NoError(vmt.vc.t, err) +} func (vmt *VtctlMoveTables) SwitchReads() { - //TODO implement me + // TODO implement me panic("implement me") } func (vmt *VtctlMoveTables) SwitchWrites() { - //TODO implement me + // TODO implement me panic("implement me") } func (vmt *VtctlMoveTables) Cancel() { - err := tstWorkflowExec(vmt.vc.t, "", vmt.workflowName, vmt.sourceKeyspace, vmt.targetKeyspace, - vmt.tables, workflowActionCancel, "", "", "", vmt.atomicCopy) - require.NoError(vmt.vc.t, err) + vmt.exec(workflowActionCancel) } func (vmt *VtctlMoveTables) Complete() { - //TODO implement me + vmt.exec(workflowActionComplete) +} + +func (vmt *VtctlMoveTables) GetLastOutput() string { + return vmt.lastOutput +} + +func (vmt *VtctlMoveTables) Start() { + panic("implement me") +} + +func (vmt *VtctlMoveTables) Stop() { panic("implement me") } var _ iMoveTables = (*VtctldMoveTables)(nil) type VtctldMoveTables struct { - *moveTables + *moveTablesWorkflow } -func newVtctldMoveTables(mt *moveTables) *VtctldMoveTables { +func newVtctldMoveTables(mt *moveTablesWorkflow) *VtctldMoveTables { return &VtctldMoveTables{mt} } @@ -152,8 +195,9 @@ func (v VtctldMoveTables) Flavor() string { func (v VtctldMoveTables) exec(args ...string) { args2 := []string{"MoveTables", "--workflow=" + v.workflowName, "--target-keyspace=" + v.targetKeyspace} args2 = append(args2, args...) - if err := vc.VtctldClient.ExecuteCommand(args2...); err != nil { - v.vc.t.Fatalf("failed to create MoveTables workflow: %v", err) + var err error + if v.lastOutput, err = vc.VtctldClient.ExecuteCommandWithOutput(args2...); err != nil { + require.FailNowf(v.vc.t, "failed MoveTables action", "%v: %s", err, v.lastOutput) } } @@ -170,11 +214,14 @@ func (v VtctldMoveTables) Create() { if v.sourceShards != "" { args = append(args, "--source-shards="+v.sourceShards) } + args = append(args, v.createFlags...) v.exec(args...) } func (v VtctldMoveTables) SwitchReadsAndWrites() { - v.exec("SwitchTraffic") + args := []string{"SwitchTraffic"} + args = append(args, v.switchFlags...) + v.exec(args...) } func (v VtctldMoveTables) ReverseReadsAndWrites() { @@ -182,18 +229,21 @@ func (v VtctldMoveTables) ReverseReadsAndWrites() { } func (v VtctldMoveTables) Show() { - //TODO implement me - panic("implement me") + args := []string{"Show"} + args = append(args, v.showFlags...) + v.exec(args...) } func (v VtctldMoveTables) SwitchReads() { - //TODO implement me - panic("implement me") + args := []string{"SwitchTraffic", "--tablet-types=rdonly,replica"} + args = append(args, v.switchFlags...) + v.exec(args...) } func (v VtctldMoveTables) SwitchWrites() { - //TODO implement me - panic("implement me") + args := []string{"SwitchTraffic", "--tablet-types=primary"} + args = append(args, v.switchFlags...) + v.exec(args...) } func (v VtctldMoveTables) Cancel() { @@ -201,6 +251,210 @@ func (v VtctldMoveTables) Cancel() { } func (v VtctldMoveTables) Complete() { - //TODO implement me + args := []string{"Complete"} + args = append(args, v.completeFlags...) + v.exec(args...) +} + +func (v VtctldMoveTables) GetLastOutput() string { + return v.lastOutput +} + +func (v VtctldMoveTables) Start() { + v.exec("Start") +} + +func (v VtctldMoveTables) Stop() { + v.exec("Stop") +} + +// Reshard wrappers + +type reshardWorkflow struct { + *workflowInfo + sourceShards string + targetShards string + skipSchemaCopy bool + + // currently only used by vtctld + lastOutput string + createFlags []string + completeFlags []string + cancelFlags []string + switchFlags []string +} + +type iReshard interface { + iWorkflow +} + +func newReshard(vc *VitessCluster, rs *reshardWorkflow, flavor workflowFlavor) iReshard { + rs.vc = vc + var rs2 iReshard + if flavor == workflowFlavorRandom { + flavor = workflowFlavors[rand.IntN(len(workflowFlavors))] + } + switch flavor { + case workflowFlavorVtctl: + rs2 = newVtctlReshard(rs) + case workflowFlavorVtctld: + rs2 = newVtctldReshard(rs) + default: + panic("unreachable") + } + log.Infof("Using reshard flavor: %s", rs2.Flavor()) + return rs2 +} + +type VtctlReshard struct { + *reshardWorkflow +} + +func (vrs *VtctlReshard) Flavor() string { + return "vtctl" +} + +func newVtctlReshard(rs *reshardWorkflow) *VtctlReshard { + return &VtctlReshard{rs} +} + +func (vrs *VtctlReshard) Create() { + currentWorkflowType = binlogdatapb.VReplicationWorkflowType_Reshard + vrs.exec(workflowActionCreate) +} + +func (vrs *VtctlReshard) SwitchReadsAndWrites() { + vrs.exec(workflowActionSwitchTraffic) +} + +func (vrs *VtctlReshard) ReverseReadsAndWrites() { + vrs.exec(workflowActionReverseTraffic) +} + +func (vrs *VtctlReshard) Show() { + // TODO implement me panic("implement me") } + +func (vrs *VtctlReshard) exec(action string) { + options := &workflowExecOptions{} + err := tstWorkflowExecVtctl(vrs.vc.t, "", vrs.workflowName, "", vrs.targetKeyspace, + "", action, vrs.tabletTypes, vrs.sourceShards, vrs.targetShards, options) + require.NoError(vrs.vc.t, err) +} + +func (vrs *VtctlReshard) SwitchReads() { + // TODO implement me + panic("implement me") +} + +func (vrs *VtctlReshard) SwitchWrites() { + // TODO implement me + panic("implement me") +} + +func (vrs *VtctlReshard) Cancel() { + vrs.exec(workflowActionCancel) +} + +func (vrs *VtctlReshard) Complete() { + vrs.exec(workflowActionComplete) +} + +func (vrs *VtctlReshard) GetLastOutput() string { + return vrs.lastOutput +} + +func (vrs *VtctlReshard) Start() { + panic("implement me") +} + +func (vrs *VtctlReshard) Stop() { + panic("implement me") +} + +var _ iReshard = (*VtctldReshard)(nil) + +type VtctldReshard struct { + *reshardWorkflow +} + +func newVtctldReshard(rs *reshardWorkflow) *VtctldReshard { + return &VtctldReshard{rs} +} + +func (v VtctldReshard) Flavor() string { + return "vtctld" +} + +func (v VtctldReshard) exec(args ...string) { + args2 := []string{"Reshard", "--workflow=" + v.workflowName, "--target-keyspace=" + v.targetKeyspace} + args2 = append(args2, args...) + var err error + if v.lastOutput, err = vc.VtctldClient.ExecuteCommandWithOutput(args2...); err != nil { + v.vc.t.Fatalf("failed to create Reshard workflow: %v: %s", err, v.lastOutput) + } +} + +func (v VtctldReshard) Create() { + args := []string{"Create"} + if v.sourceShards != "" { + args = append(args, "--source-shards="+v.sourceShards) + } + if v.targetShards != "" { + args = append(args, "--target-shards="+v.targetShards) + } + if v.skipSchemaCopy { + args = append(args, "--skip-schema-copy="+strconv.FormatBool(v.skipSchemaCopy)) + } + args = append(args, v.createFlags...) + v.exec(args...) +} + +func (v VtctldReshard) SwitchReadsAndWrites() { + args := []string{"SwitchTraffic"} + args = append(args, v.switchFlags...) + v.exec(args...) +} + +func (v VtctldReshard) ReverseReadsAndWrites() { + v.exec("ReverseTraffic") +} + +func (v VtctldReshard) Show() { + v.exec("Show") +} + +func (v VtctldReshard) SwitchReads() { + // TODO implement me + panic("implement me") +} + +func (v VtctldReshard) SwitchWrites() { + // TODO implement me + panic("implement me") +} + +func (v VtctldReshard) Cancel() { + args := []string{"Cancel"} + args = append(args, v.cancelFlags...) + v.exec(args...) +} + +func (v VtctldReshard) Complete() { + args := []string{"Complete"} + args = append(args, v.completeFlags...) + v.exec(args...) +} + +func (v VtctldReshard) GetLastOutput() string { + return v.lastOutput +} + +func (vrs *VtctldReshard) Start() { + vrs.exec("Start") +} + +func (vrs *VtctldReshard) Stop() { + vrs.exec("Stop") +} diff --git a/go/test/endtoend/vtcombo/recreate/recreate_test.go b/go/test/endtoend/vtcombo/recreate/recreate_test.go index a454adbd7e1..e66edb7688a 100644 --- a/go/test/endtoend/vtcombo/recreate/recreate_test.go +++ b/go/test/endtoend/vtcombo/recreate/recreate_test.go @@ -60,8 +60,7 @@ func TestMain(m *testing.M) { ReplicaCount: 2, }, { - Name: redirected, - ServedFrom: ks1, + Name: redirected, }, } diff --git a/go/test/endtoend/vtcombo/vttest_sample_test.go b/go/test/endtoend/vtcombo/vttest_sample_test.go index 91db0f8a2c0..daeb5e8deb9 100644 --- a/go/test/endtoend/vtcombo/vttest_sample_test.go +++ b/go/test/endtoend/vtcombo/vttest_sample_test.go @@ -48,7 +48,6 @@ var ( vtctldAddr string mysqlAddress string ks1 = "test_keyspace" - redirected = "redirected" jsonTopo = ` { "keyspaces": [ @@ -58,10 +57,6 @@ var ( "rdonlyCount": 1, "replicaCount": 2 }, - { - "name": "redirected", - "servedFrom": "test_keyspace" - }, { "name": "routed", "shards": [{"name": "0"}] @@ -174,15 +169,6 @@ func assertInsertedRowsExist(ctx context.Context, t *testing.T, conn *vtgateconn require.NoError(t, err) assert.Equal(t, rowCount, len(res.Rows)) - - cur = conn.Session(redirected+":-80@replica", nil) - bindVariables = map[string]*querypb.BindVariable{ - "id_start": {Type: querypb.Type_UINT64, Value: []byte(strconv.FormatInt(int64(idStart), 10))}, - } - res, err = cur.Execute(ctx, "select * from test_table where id = :id_start", bindVariables) - require.NoError(t, err) - require.Equal(t, 1, len(res.Rows)) - assert.Equal(t, "VARCHAR(\"test1000\")", res.Rows[0][1].String()) } func assertRouting(ctx context.Context, t *testing.T, db *sql.DB) { diff --git a/go/test/endtoend/vtgate/createdb_plugin/main_test.go b/go/test/endtoend/vtgate/createdb_plugin/main_test.go index e712fee7b36..5bfec3890b5 100644 --- a/go/test/endtoend/vtgate/createdb_plugin/main_test.go +++ b/go/test/endtoend/vtgate/createdb_plugin/main_test.go @@ -164,8 +164,8 @@ func shutdown(t *testing.T, ksName string) { } require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteKeyspace", "--", "--recursive", ksName)) + clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteKeyspace", "--recursive", ksName)) require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph")) + clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph")) } diff --git a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go index 24cade5b550..71f4a2353f7 100644 --- a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go +++ b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go @@ -153,7 +153,7 @@ func TestScatterErrsAsWarns(t *testing.T) { assertContainsOneOf(t, mode.conn, showQ, expectedWarnings...) // invalid_field should throw error and not warning - _, err = mode.conn.ExecuteFetch("SELECT /*vt+ PLANNER=Gen4 SCATTER_ERRORS_AS_WARNINGS */ invalid_field from t1;", 1, false) + _, err = mode.conn.ExecuteFetch("SELECT /*vt+ PLANNER=Gen4 SCATTER_ERRORS_AS_WARNINGS */ invalid_field from t1", 1, false) require.Error(t, err) serr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) require.Equal(t, sqlerror.ERBadFieldError, serr.Number(), serr.Error()) diff --git a/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go b/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go index 134b9cfa180..490c54f2299 100644 --- a/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go +++ b/go/test/endtoend/vtgate/foreignkey/fk_fuzz_test.go @@ -19,7 +19,7 @@ package foreignkey import ( "database/sql" "fmt" - "math/rand" + "math/rand/v2" "sync" "sync/atomic" "testing" @@ -28,17 +28,17 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" ) type QueryFormat string const ( SQLQueries QueryFormat = "SQLQueries" + OlapSQLQueries QueryFormat = "OlapSQLQueries" PreparedStatmentQueries QueryFormat = "PreparedStatmentQueries" PreparedStatementPacket QueryFormat = "PreparedStatementPacket" ) @@ -53,6 +53,8 @@ type fuzzer struct { updateShare int concurrency int queryFormat QueryFormat + noFkSetVar bool + fkState *bool // shouldStop is an internal state variable, that tells the fuzzer // whether it should stop or not. @@ -72,7 +74,7 @@ type debugInfo struct { } // newFuzzer creates a new fuzzer struct. -func newFuzzer(concurrency int, maxValForId int, maxValForCol int, insertShare int, deleteShare int, updateShare int, queryFormat QueryFormat) *fuzzer { +func newFuzzer(concurrency int, maxValForId int, maxValForCol int, insertShare int, deleteShare int, updateShare int, queryFormat QueryFormat, fkState *bool) *fuzzer { fz := &fuzzer{ concurrency: concurrency, maxValForId: maxValForId, @@ -81,6 +83,8 @@ func newFuzzer(concurrency int, maxValForId int, maxValForCol int, insertShare i deleteShare: deleteShare, updateShare: updateShare, queryFormat: queryFormat, + fkState: fkState, + noFkSetVar: false, wg: sync.WaitGroup{}, } // Initially the fuzzer thread is stopped. @@ -92,20 +96,20 @@ func newFuzzer(concurrency int, maxValForId int, maxValForCol int, insertShare i // The returned set is a list of strings, because for prepared statements, we have to run // set queries first and then the final query eventually. func (fz *fuzzer) generateQuery() []string { - val := rand.Intn(fz.insertShare + fz.updateShare + fz.deleteShare) + val := rand.IntN(fz.insertShare + fz.updateShare + fz.deleteShare) if val < fz.insertShare { switch fz.queryFormat { - case SQLQueries: - return []string{fz.generateInsertDMLQuery()} + case OlapSQLQueries, SQLQueries: + return []string{fz.generateInsertDMLQuery(getInsertType())} case PreparedStatmentQueries: - return fz.getPreparedInsertQueries() + return fz.getPreparedInsertQueries(getInsertType()) default: panic("Unknown query type") } } if val < fz.insertShare+fz.updateShare { switch fz.queryFormat { - case SQLQueries: + case OlapSQLQueries, SQLQueries: return []string{fz.generateUpdateDMLQuery()} case PreparedStatmentQueries: return fz.getPreparedUpdateQueries() @@ -114,7 +118,7 @@ func (fz *fuzzer) generateQuery() []string { } } switch fz.queryFormat { - case SQLQueries: + case OlapSQLQueries, SQLQueries: return []string{fz.generateDeleteDMLQuery()} case PreparedStatmentQueries: return fz.getPreparedDeleteQueries() @@ -123,75 +127,150 @@ func (fz *fuzzer) generateQuery() []string { } } +func getInsertType() string { + return []string{"insert", "replace"}[rand.IntN(2)] +} + // generateInsertDMLQuery generates an INSERT query from the parameters for the fuzzer. -func (fz *fuzzer) generateInsertDMLQuery() string { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) +func (fz *fuzzer) generateInsertDMLQuery(insertType string) string { + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) tableName := fkTables[tableId] + setVarFkChecksVal := fz.getSetVarFkChecksVal() if tableName == "fk_t20" { - colValue := rand.Intn(1 + fz.maxValForCol) - col2Value := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("insert into %v (id, col, col2) values (%v, %v, %v)", tableName, idValue, convertColValueToString(colValue), convertColValueToString(col2Value)) + colValue := rand.IntN(1 + fz.maxValForCol) + col2Value := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("%s %vinto %v (id, col, col2) values (%v, %v, %v)", insertType, setVarFkChecksVal, tableName, idValue, convertIntValueToString(colValue), convertIntValueToString(col2Value)) } else if isMultiColFkTable(tableName) { - colaValue := rand.Intn(1 + fz.maxValForCol) - colbValue := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("insert into %v (id, cola, colb) values (%v, %v, %v)", tableName, idValue, convertColValueToString(colaValue), convertColValueToString(colbValue)) + colaValue := rand.IntN(1 + fz.maxValForCol) + colbValue := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("%s %vinto %v (id, cola, colb) values (%v, %v, %v)", insertType, setVarFkChecksVal, tableName, idValue, convertIntValueToString(colaValue), convertIntValueToString(colbValue)) } else { - colValue := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("insert into %v (id, col) values (%v, %v)", tableName, idValue, convertColValueToString(colValue)) + colValue := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("%s %vinto %v (id, col) values (%v, %v)", insertType, setVarFkChecksVal, tableName, idValue, convertIntValueToString(colValue)) } } -// convertColValueToString converts the given value to a string -func convertColValueToString(value int) string { - if value == 0 { - return "NULL" +// generateUpdateDMLQuery generates a UPDATE query from the parameters for the fuzzer. +func (fz *fuzzer) generateUpdateDMLQuery() string { + multiTableUpdate := rand.IntN(2) + 1 + if multiTableUpdate == 1 { + return fz.generateSingleUpdateDMLQuery() } - return fmt.Sprintf("%d", value) + return fz.generateMultiUpdateDMLQuery() } -// generateUpdateDMLQuery generates an UPDATE query from the parameters for the fuzzer. -func (fz *fuzzer) generateUpdateDMLQuery() string { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) +// generateSingleUpdateDMLQuery generates an UPDATE query from the parameters for the fuzzer. +func (fz *fuzzer) generateSingleUpdateDMLQuery() string { + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) tableName := fkTables[tableId] + setVarFkChecksVal := fz.getSetVarFkChecksVal() + updWithLimit := rand.IntN(2) + limitCount := rand.IntN(3) if tableName == "fk_t20" { - colValue := rand.Intn(1 + fz.maxValForCol) - col2Value := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("update %v set col = %v, col2 = %v where id = %v", tableName, convertColValueToString(colValue), convertColValueToString(col2Value), idValue) + colValue := convertIntValueToString(rand.IntN(1 + fz.maxValForCol)) + col2Value := convertIntValueToString(rand.IntN(1 + fz.maxValForCol)) + if updWithLimit == 0 { + return fmt.Sprintf("update %v%v set col = %v, col2 = %v where id = %v", setVarFkChecksVal, tableName, colValue, col2Value, idValue) + } + return fmt.Sprintf("update %v%v set col = %v, col2 = %v order by id limit %v", setVarFkChecksVal, tableName, colValue, col2Value, limitCount) } else if isMultiColFkTable(tableName) { - colaValue := rand.Intn(1 + fz.maxValForCol) - colbValue := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("update %v set cola = %v, colb = %v where id = %v", tableName, convertColValueToString(colaValue), convertColValueToString(colbValue), idValue) + if rand.IntN(2) == 0 { + colaValue := convertIntValueToString(rand.IntN(1 + fz.maxValForCol)) + colbValue := convertIntValueToString(rand.IntN(1 + fz.maxValForCol)) + if fz.concurrency > 1 { + colaValue = fz.generateExpression(rand.IntN(4)+1, "cola", "colb", "id") + colbValue = fz.generateExpression(rand.IntN(4)+1, "cola", "colb", "id") + } + if updWithLimit == 0 { + return fmt.Sprintf("update %v%v set cola = %v, colb = %v where id = %v", setVarFkChecksVal, tableName, colaValue, colbValue, idValue) + } + return fmt.Sprintf("update %v%v set cola = %v, colb = %v order by id limit %v", setVarFkChecksVal, tableName, colaValue, colbValue, limitCount) + } else { + colValue := fz.generateExpression(rand.IntN(4)+1, "cola", "colb", "id") + colToUpdate := []string{"cola", "colb"}[rand.IntN(2)] + if updWithLimit == 0 { + return fmt.Sprintf("update %v set %v = %v where id = %v", tableName, colToUpdate, colValue, idValue) + } + return fmt.Sprintf("update %v set %v = %v order by id limit %v", tableName, colToUpdate, colValue, limitCount) + } } else { - colValue := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("update %v set col = %v where id = %v", tableName, convertColValueToString(colValue), idValue) + colValue := fz.generateExpression(rand.IntN(4)+1, "col", "id") + if updWithLimit == 0 { + return fmt.Sprintf("update %v%v set col = %v where id = %v", setVarFkChecksVal, tableName, colValue, idValue) + } + return fmt.Sprintf("update %v%v set col = %v order by id limit %v", setVarFkChecksVal, tableName, colValue, limitCount) + } +} + +// generateMultiUpdateDMLQuery generates a UPDATE query using 2 tables from the parameters for the fuzzer. +func (fz *fuzzer) generateMultiUpdateDMLQuery() string { + tableId := rand.IntN(len(fkTables)) + tableId2 := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) + colValue := convertIntValueToString(rand.IntN(1 + fz.maxValForCol)) + col2Value := convertIntValueToString(rand.IntN(1 + fz.maxValForCol)) + setVarFkChecksVal := fz.getSetVarFkChecksVal() + setExprs := fmt.Sprintf("%v.col = %v", fkTables[tableId], colValue) + if rand.IntN(2)%2 == 0 { + setExprs += ", " + fmt.Sprintf("%v.col = %v", fkTables[tableId2], col2Value) + } + query := fmt.Sprintf("update %v%v join %v using (id) set %s where %v.id = %v", setVarFkChecksVal, fkTables[tableId], fkTables[tableId2], setExprs, fkTables[tableId], idValue) + return query +} + +// generateDeleteDMLQuery generates a DELETE query using 1 table from the parameters for the fuzzer. +func (fz *fuzzer) generateSingleDeleteDMLQuery() string { + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) + setVarFkChecksVal := fz.getSetVarFkChecksVal() + delWithLimit := rand.IntN(2) + if delWithLimit == 0 { + return fmt.Sprintf("delete %vfrom %v where id = %v", setVarFkChecksVal, fkTables[tableId], idValue) } + limitCount := rand.IntN(3) + return fmt.Sprintf("delete %vfrom %v order by id limit %v", setVarFkChecksVal, fkTables[tableId], limitCount) +} + +// generateMultiDeleteDMLQuery generates a DELETE query using 2 tables from the parameters for the fuzzer. +func (fz *fuzzer) generateMultiDeleteDMLQuery() string { + tableId := rand.IntN(len(fkTables)) + tableId2 := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) + setVarFkChecksVal := fz.getSetVarFkChecksVal() + target := fkTables[tableId] + if rand.IntN(2)%2 == 0 { + target += ", " + fkTables[tableId2] + } + query := fmt.Sprintf("delete %v%v from %v join %v using (id) where %v.id = %v", setVarFkChecksVal, target, fkTables[tableId], fkTables[tableId2], fkTables[tableId], idValue) + return query } // generateDeleteDMLQuery generates a DELETE query from the parameters for the fuzzer. func (fz *fuzzer) generateDeleteDMLQuery() string { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) - query := fmt.Sprintf("delete from %v where id = %v", fkTables[tableId], idValue) - return query + multiTableDelete := rand.IntN(2) + 1 + if multiTableDelete == 1 { + return fz.generateSingleDeleteDMLQuery() + } + return fz.generateMultiDeleteDMLQuery() } // start starts running the fuzzer. -func (fz *fuzzer) start(t *testing.T, sharded bool) { +func (fz *fuzzer) start(t *testing.T, keyspace string) { // We mark the fuzzer thread to be running now. fz.shouldStop.Store(false) fz.wg.Add(fz.concurrency) for i := 0; i < fz.concurrency; i++ { fuzzerThreadId := i go func() { - fz.runFuzzerThread(t, sharded, fuzzerThreadId) + fz.runFuzzerThread(t, keyspace, fuzzerThreadId) }() } } // runFuzzerThread is used to run a thread of the fuzzer. -func (fz *fuzzer) runFuzzerThread(t *testing.T, sharded bool, fuzzerThreadId int) { +func (fz *fuzzer) runFuzzerThread(t *testing.T, keyspace string, fuzzerThreadId int) { // Whenever we finish running this thread, we should mark the thread has stopped. defer func() { fz.wg.Done() @@ -199,6 +278,9 @@ func (fz *fuzzer) runFuzzerThread(t *testing.T, sharded bool, fuzzerThreadId int // Create a MySQL Compare that connects to both Vitess and MySQL and runs the queries against both. mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) require.NoError(t, err) + if fz.fkState != nil { + mcmp.Exec(fmt.Sprintf("SET FOREIGN_KEY_CHECKS=%v", sqlparser.FkChecksStateString(fz.fkState))) + } var vitessDb, mysqlDb *sql.DB if fz.queryFormat == PreparedStatementPacket { // Open another connection to Vitess using the go-sql-driver so that we can send prepared statements as COM_STMT_PREPARE packets. @@ -211,16 +293,12 @@ func (fz *fuzzer) runFuzzerThread(t *testing.T, sharded bool, fuzzerThreadId int defer mysqlDb.Close() } // Set the correct keyspace to use from VtGates. - if sharded { - _ = utils.Exec(t, mcmp.VtConn, "use `ks`") - if vitessDb != nil { - _, _ = vitessDb.Exec("use `ks`") - } - } else { - _ = utils.Exec(t, mcmp.VtConn, "use `uks`") - if vitessDb != nil { - _, _ = vitessDb.Exec("use `uks`") - } + _ = utils.Exec(t, mcmp.VtConn, fmt.Sprintf("use `%v`", keyspace)) + if vitessDb != nil { + _, _ = vitessDb.Exec(fmt.Sprintf("use `%v`", keyspace)) + } + if fz.queryFormat == OlapSQLQueries { + _ = utils.Exec(t, mcmp.VtConn, "set workload = olap") } for { // If fuzzer thread is marked to be stopped, then we should exit this go routine. @@ -228,7 +306,7 @@ func (fz *fuzzer) runFuzzerThread(t *testing.T, sharded bool, fuzzerThreadId int return } switch fz.queryFormat { - case SQLQueries, PreparedStatmentQueries: + case OlapSQLQueries, SQLQueries, PreparedStatmentQueries: if fz.generateAndExecuteStatementQuery(t, mcmp) { return } @@ -255,7 +333,7 @@ func (fz *fuzzer) generateAndExecuteStatementQuery(t *testing.T, mcmp utils.MySQ for _, query := range queries { // When the concurrency is 1, then we run the query both on MySQL and Vitess. if fz.concurrency == 1 { - _, _ = mcmp.ExecAllowAndCompareError(query) + _, _ = mcmp.ExecAllowAndCompareError(query, utils.CompareOptions{IgnoreRowsAffected: true}) // If t is marked failed, we have encountered our first failure. // Let's collect the required information and finish execution. if t.Failed() { @@ -318,8 +396,8 @@ func (fz *fuzzer) stop() { // getPreparedDeleteQueries gets the list of queries to run for executing an DELETE using prepared statements. func (fz *fuzzer) getPreparedDeleteQueries() []string { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) return []string{ fmt.Sprintf("prepare stmt_del from 'delete from %v where id = ?'", fkTables[tableId]), fmt.Sprintf("SET @id = %v", idValue), @@ -328,36 +406,36 @@ func (fz *fuzzer) getPreparedDeleteQueries() []string { } // getPreparedInsertQueries gets the list of queries to run for executing an INSERT using prepared statements. -func (fz *fuzzer) getPreparedInsertQueries() []string { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) +func (fz *fuzzer) getPreparedInsertQueries(insertType string) []string { + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) tableName := fkTables[tableId] if tableName == "fk_t20" { - colValue := rand.Intn(1 + fz.maxValForCol) - col2Value := rand.Intn(1 + fz.maxValForCol) + colValue := rand.IntN(1 + fz.maxValForCol) + col2Value := rand.IntN(1 + fz.maxValForCol) return []string{ - "prepare stmt_insert from 'insert into fk_t20 (id, col, col2) values (?, ?, ?)'", + fmt.Sprintf("prepare stmt_insert from '%s into fk_t20 (id, col, col2) values (?, ?, ?)'", insertType), fmt.Sprintf("SET @id = %v", idValue), - fmt.Sprintf("SET @col = %v", convertColValueToString(colValue)), - fmt.Sprintf("SET @col2 = %v", convertColValueToString(col2Value)), + fmt.Sprintf("SET @col = %v", convertIntValueToString(colValue)), + fmt.Sprintf("SET @col2 = %v", convertIntValueToString(col2Value)), "execute stmt_insert using @id, @col, @col2", } } else if isMultiColFkTable(tableName) { - colaValue := rand.Intn(1 + fz.maxValForCol) - colbValue := rand.Intn(1 + fz.maxValForCol) + colaValue := rand.IntN(1 + fz.maxValForCol) + colbValue := rand.IntN(1 + fz.maxValForCol) return []string{ - fmt.Sprintf("prepare stmt_insert from 'insert into %v (id, cola, colb) values (?, ?, ?)'", tableName), + fmt.Sprintf("prepare stmt_insert from '%s into %v (id, cola, colb) values (?, ?, ?)'", insertType, tableName), fmt.Sprintf("SET @id = %v", idValue), - fmt.Sprintf("SET @cola = %v", convertColValueToString(colaValue)), - fmt.Sprintf("SET @colb = %v", convertColValueToString(colbValue)), + fmt.Sprintf("SET @cola = %v", convertIntValueToString(colaValue)), + fmt.Sprintf("SET @colb = %v", convertIntValueToString(colbValue)), "execute stmt_insert using @id, @cola, @colb", } } else { - colValue := rand.Intn(1 + fz.maxValForCol) + colValue := rand.IntN(1 + fz.maxValForCol) return []string{ - fmt.Sprintf("prepare stmt_insert from 'insert into %v (id, col) values (?, ?)'", tableName), + fmt.Sprintf("prepare stmt_insert from '%s into %v (id, col) values (?, ?)'", insertType, tableName), fmt.Sprintf("SET @id = %v", idValue), - fmt.Sprintf("SET @col = %v", convertColValueToString(colValue)), + fmt.Sprintf("SET @col = %v", convertIntValueToString(colValue)), "execute stmt_insert using @id, @col", } } @@ -365,35 +443,35 @@ func (fz *fuzzer) getPreparedInsertQueries() []string { // getPreparedUpdateQueries gets the list of queries to run for executing an UPDATE using prepared statements. func (fz *fuzzer) getPreparedUpdateQueries() []string { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) tableName := fkTables[tableId] if tableName == "fk_t20" { - colValue := rand.Intn(1 + fz.maxValForCol) - col2Value := rand.Intn(1 + fz.maxValForCol) + colValue := rand.IntN(1 + fz.maxValForCol) + col2Value := rand.IntN(1 + fz.maxValForCol) return []string{ "prepare stmt_update from 'update fk_t20 set col = ?, col2 = ? where id = ?'", fmt.Sprintf("SET @id = %v", idValue), - fmt.Sprintf("SET @col = %v", convertColValueToString(colValue)), - fmt.Sprintf("SET @col2 = %v", convertColValueToString(col2Value)), + fmt.Sprintf("SET @col = %v", convertIntValueToString(colValue)), + fmt.Sprintf("SET @col2 = %v", convertIntValueToString(col2Value)), "execute stmt_update using @col, @col2, @id", } } else if isMultiColFkTable(tableName) { - colaValue := rand.Intn(1 + fz.maxValForCol) - colbValue := rand.Intn(1 + fz.maxValForCol) + colaValue := rand.IntN(1 + fz.maxValForCol) + colbValue := rand.IntN(1 + fz.maxValForCol) return []string{ fmt.Sprintf("prepare stmt_update from 'update %v set cola = ?, colb = ? where id = ?'", tableName), fmt.Sprintf("SET @id = %v", idValue), - fmt.Sprintf("SET @cola = %v", convertColValueToString(colaValue)), - fmt.Sprintf("SET @colb = %v", convertColValueToString(colbValue)), + fmt.Sprintf("SET @cola = %v", convertIntValueToString(colaValue)), + fmt.Sprintf("SET @colb = %v", convertIntValueToString(colbValue)), "execute stmt_update using @cola, @colb, @id", } } else { - colValue := rand.Intn(1 + fz.maxValForCol) + colValue := rand.IntN(1 + fz.maxValForCol) return []string{ fmt.Sprintf("prepare stmt_update from 'update %v set col = ? where id = ?'", tableName), fmt.Sprintf("SET @id = %v", idValue), - fmt.Sprintf("SET @col = %v", convertColValueToString(colValue)), + fmt.Sprintf("SET @col = %v", convertIntValueToString(colValue)), "execute stmt_update using @col, @id", } } @@ -401,9 +479,9 @@ func (fz *fuzzer) getPreparedUpdateQueries() []string { // generateParameterizedQuery generates a parameterized query for the query format PreparedStatementPacket. func (fz *fuzzer) generateParameterizedQuery() (query string, params []any) { - val := rand.Intn(fz.insertShare + fz.updateShare + fz.deleteShare) + val := rand.IntN(fz.insertShare + fz.updateShare + fz.deleteShare) if val < fz.insertShare { - return fz.generateParameterizedInsertQuery() + return fz.generateParameterizedInsertQuery(getInsertType()) } if val < fz.insertShare+fz.updateShare { return fz.generateParameterizedUpdateQuery() @@ -412,50 +490,65 @@ func (fz *fuzzer) generateParameterizedQuery() (query string, params []any) { } // generateParameterizedInsertQuery generates a parameterized INSERT query for the query format PreparedStatementPacket. -func (fz *fuzzer) generateParameterizedInsertQuery() (query string, params []any) { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) +func (fz *fuzzer) generateParameterizedInsertQuery(insertType string) (query string, params []any) { + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) tableName := fkTables[tableId] if tableName == "fk_t20" { - colValue := rand.Intn(1 + fz.maxValForCol) - col2Value := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("insert into %v (id, col, col2) values (?, ?, ?)", tableName), []any{idValue, convertColValueToString(colValue), convertColValueToString(col2Value)} + colValue := rand.IntN(1 + fz.maxValForCol) + col2Value := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("%s into %v (id, col, col2) values (?, ?, ?)", insertType, tableName), []any{idValue, convertIntValueToString(colValue), convertIntValueToString(col2Value)} } else if isMultiColFkTable(tableName) { - colaValue := rand.Intn(1 + fz.maxValForCol) - colbValue := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("insert into %v (id, cola, colb) values (?, ?, ?)", tableName), []any{idValue, convertColValueToString(colaValue), convertColValueToString(colbValue)} + colaValue := rand.IntN(1 + fz.maxValForCol) + colbValue := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("%s into %v (id, cola, colb) values (?, ?, ?)", insertType, tableName), []any{idValue, convertIntValueToString(colaValue), convertIntValueToString(colbValue)} } else { - colValue := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("insert into %v (id, col) values (?, ?)", tableName), []any{idValue, convertColValueToString(colValue)} + colValue := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("%s into %v (id, col) values (?, ?)", insertType, tableName), []any{idValue, convertIntValueToString(colValue)} } } // generateParameterizedUpdateQuery generates a parameterized UPDATE query for the query format PreparedStatementPacket. func (fz *fuzzer) generateParameterizedUpdateQuery() (query string, params []any) { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) tableName := fkTables[tableId] if tableName == "fk_t20" { - colValue := rand.Intn(1 + fz.maxValForCol) - col2Value := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("update %v set col = ?, col2 = ? where id = ?", tableName), []any{convertColValueToString(colValue), convertColValueToString(col2Value), idValue} + colValue := rand.IntN(1 + fz.maxValForCol) + col2Value := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set col = ?, col2 = ? where id = ?", tableName), []any{convertIntValueToString(colValue), convertIntValueToString(col2Value), idValue} } else if isMultiColFkTable(tableName) { - colaValue := rand.Intn(1 + fz.maxValForCol) - colbValue := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("update %v set cola = ?, colb = ? where id = ?", tableName), []any{convertColValueToString(colaValue), convertColValueToString(colbValue), idValue} + colaValue := rand.IntN(1 + fz.maxValForCol) + colbValue := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set cola = ?, colb = ? where id = ?", tableName), []any{convertIntValueToString(colaValue), convertIntValueToString(colbValue), idValue} } else { - colValue := rand.Intn(1 + fz.maxValForCol) - return fmt.Sprintf("update %v set col = ? where id = ?", tableName), []any{convertColValueToString(colValue), idValue} + colValue := rand.IntN(1 + fz.maxValForCol) + return fmt.Sprintf("update %v set col = ? where id = ?", tableName), []any{convertIntValueToString(colValue), idValue} } } // generateParameterizedDeleteQuery generates a parameterized DELETE query for the query format PreparedStatementPacket. func (fz *fuzzer) generateParameterizedDeleteQuery() (query string, params []any) { - tableId := rand.Intn(len(fkTables)) - idValue := 1 + rand.Intn(fz.maxValForId) + tableId := rand.IntN(len(fkTables)) + idValue := 1 + rand.IntN(fz.maxValForId) return fmt.Sprintf("delete from %v where id = ?", fkTables[tableId]), []any{idValue} } +// getSetVarFkChecksVal generates an optimizer hint to randomly set the foreign key checks to on or off or leave them unaltered. +func (fz *fuzzer) getSetVarFkChecksVal() string { + if fz.concurrency != 1 || fz.noFkSetVar { + return "" + } + val := rand.IntN(3) + if val == 0 { + return "" + } + if val == 1 { + return "/*+ SET_VAR(foreign_key_checks=On) */ " + } + return "/*+ SET_VAR(foreign_key_checks=Off) */ " +} + // TestFkFuzzTest is a fuzzer test that works by querying the database concurrently. // We have a pre-written set of query templates that we will use, but the data in the queries will // be randomly generated. The intent is that we hammer the database as a real-world application would @@ -561,8 +654,7 @@ func TestFkFuzzTest(t *testing.T) { insertShare: 100, deleteShare: 0, updateShare: 0, - }, - { + }, { name: "Single Thread - Balanced Inserts and Deletes", concurrency: 1, timeForTesting: 5 * time.Second, @@ -571,8 +663,7 @@ func TestFkFuzzTest(t *testing.T) { insertShare: 50, deleteShare: 50, updateShare: 0, - }, - { + }, { name: "Single Thread - Balanced Inserts and Updates", concurrency: 1, timeForTesting: 5 * time.Second, @@ -593,6 +684,15 @@ func TestFkFuzzTest(t *testing.T) { updateShare: 50, }, { + name: "Multi Thread - Only Inserts", + concurrency: 30, + timeForTesting: 5 * time.Second, + maxValForCol: 5, + maxValForId: 30, + insertShare: 100, + deleteShare: 0, + updateShare: 0, + }, { name: "Multi Thread - Balanced Inserts, Updates and Deletes", concurrency: 30, timeForTesting: 5 * time.Second, @@ -604,131 +704,95 @@ func TestFkFuzzTest(t *testing.T) { }, } - for _, tt := range testcases { - for _, testSharded := range []bool{false, true} { - for _, queryFormat := range []QueryFormat{SQLQueries, PreparedStatmentQueries, PreparedStatementPacket} { - t.Run(getTestName(tt.name, testSharded)+fmt.Sprintf(" QueryFormat - %v", queryFormat), func(t *testing.T) { - mcmp, closer := start(t) - defer closer() - // Set the correct keyspace to use from VtGates. - if testSharded { - t.Skip("Skip test since we don't have sharded foreign key support yet") - _ = utils.Exec(t, mcmp.VtConn, "use `ks`") - } else { - _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + valTrue := true + valFalse := false + for _, fkState := range []*bool{nil, &valTrue, &valFalse} { + for _, tt := range testcases { + for _, keyspace := range []string{unshardedKs, shardedKs} { + for _, queryFormat := range []QueryFormat{OlapSQLQueries, SQLQueries, PreparedStatmentQueries, PreparedStatementPacket} { + if fkState != nil && (queryFormat != SQLQueries || tt.concurrency != 1) { + continue } - // Ensure that the Vitess database is originally empty - ensureDatabaseState(t, mcmp.VtConn, true) - ensureDatabaseState(t, mcmp.MySQLConn, true) - - // Create the fuzzer. - fz := newFuzzer(tt.concurrency, tt.maxValForId, tt.maxValForCol, tt.insertShare, tt.deleteShare, tt.updateShare, queryFormat) - - // Start the fuzzer. - fz.start(t, testSharded) - - // Wait for the timeForTesting so that the threads continue to run. - time.Sleep(tt.timeForTesting) - - fz.stop() - - // We encountered an error while running the fuzzer. Let's print out the information! - if fz.firstFailureInfo != nil { - log.Errorf("Failing query - %v", fz.firstFailureInfo.queryToFail) - for idx, table := range fkTables { - log.Errorf("MySQL data for %v -\n%v", table, fz.firstFailureInfo.mysqlState[idx].Rows) - log.Errorf("Vitess data for %v -\n%v", table, fz.firstFailureInfo.vitessState[idx].Rows) + t.Run(getTestName(tt.name, keyspace)+fmt.Sprintf(" FkState - %v QueryFormat - %v", sqlparser.FkChecksStateString(fkState), queryFormat), func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + if keyspace == shardedKs { + t.Skip("Skip test since we don't have sharded foreign key support yet") + } + // Set the correct keyspace to use from VtGates. + _ = utils.Exec(t, mcmp.VtConn, fmt.Sprintf("use `%v`", keyspace)) + + // Ensure that the Vitess database is originally empty + ensureDatabaseState(t, mcmp.VtConn, true) + ensureDatabaseState(t, mcmp.MySQLConn, true) + + // Create the fuzzer. + fz := newFuzzer(tt.concurrency, tt.maxValForId, tt.maxValForCol, tt.insertShare, tt.deleteShare, tt.updateShare, queryFormat, fkState) + + // Start the fuzzer. + fz.start(t, keyspace) + + // Wait for the timeForTesting so that the threads continue to run. + totalTime := time.After(tt.timeForTesting) + done := false + for !done { + select { + case <-totalTime: + done = true + case <-time.After(10 * time.Millisecond): + validateReplication(t) + } } - } - // ensure Vitess database has some data. This ensures not all the commands failed. - ensureDatabaseState(t, mcmp.VtConn, false) - // Verify the consistency of the data. - verifyDataIsCorrect(t, mcmp, tt.concurrency) - }) - } - } - } -} + fz.stop() -// ensureDatabaseState ensures that the database is either empty or not. -func ensureDatabaseState(t *testing.T, vtconn *mysql.Conn, empty bool) { - results := collectFkTablesState(vtconn) - isEmpty := true - for _, res := range results { - if len(res.Rows) > 0 { - isEmpty = false - } - } - require.Equal(t, isEmpty, empty) -} + // We encountered an error while running the fuzzer. Let's print out the information! + if fz.firstFailureInfo != nil { + log.Errorf("Failing query - %v", fz.firstFailureInfo.queryToFail) + for idx, table := range fkTables { + log.Errorf("MySQL data for %v -\n%v", table, fz.firstFailureInfo.mysqlState[idx].Rows) + log.Errorf("Vitess data for %v -\n%v", table, fz.firstFailureInfo.vitessState[idx].Rows) + } + } -// verifyDataIsCorrect verifies that the data in MySQL database matches the data in the Vitess database. -func verifyDataIsCorrect(t *testing.T, mcmp utils.MySQLCompare, concurrency int) { - // For single concurrent thread, we run all the queries on both MySQL and Vitess, so we can verify correctness - // by just checking if the data in MySQL and Vitess match. - if concurrency == 1 { - for _, table := range fkTables { - query := fmt.Sprintf("SELECT * FROM %v ORDER BY id", table) - mcmp.Exec(query) - } - } else { - // For higher concurrency, we don't have MySQL data to verify everything is fine, - // so we'll have to do something different. - // We run LEFT JOIN queries on all the parent and child tables linked by foreign keys - // to make sure that nothing is broken in the database. - for _, reference := range fkReferences { - query := fmt.Sprintf("select %v.id from %v left join %v on (%v.col = %v.col) where %v.col is null and %v.col is not null", reference.childTable, reference.childTable, reference.parentTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable) - if isMultiColFkTable(reference.childTable) { - query = fmt.Sprintf("select %v.id from %v left join %v on (%v.cola = %v.cola and %v.colb = %v.colb) where %v.cola is null and %v.cola is not null and %v.colb is not null", reference.childTable, reference.childTable, reference.parentTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable, reference.childTable) - } - res, err := mcmp.VtConn.ExecuteFetch(query, 1000, false) - require.NoError(t, err) - require.Zerof(t, len(res.Rows), "Query %v gave non-empty results", query) - } - } - // We also verify that the results in Primary and Replica table match as is. - for _, keyspace := range clusterInstance.Keyspaces { - for _, shard := range keyspace.Shards { - var primaryTab, replicaTab *cluster.Vttablet - for _, vttablet := range shard.Vttablets { - if vttablet.Type == "primary" { - primaryTab = vttablet - } else { - replicaTab = vttablet + // ensure Vitess database has some data. This ensures not all the commands failed. + ensureDatabaseState(t, mcmp.VtConn, false) + // Verify the consistency of the data. + verifyDataIsCorrect(t, mcmp, tt.concurrency) + }) } } - require.NotNil(t, primaryTab) - require.NotNil(t, replicaTab) - checkReplicationHealthy(t, replicaTab) - cluster.WaitForReplicationPos(t, primaryTab, replicaTab, true, 60.0) - primaryConn, err := utils.GetMySQLConn(primaryTab, fmt.Sprintf("vt_%v", keyspace.Name)) - require.NoError(t, err) - replicaConn, err := utils.GetMySQLConn(replicaTab, fmt.Sprintf("vt_%v", keyspace.Name)) - require.NoError(t, err) - primaryRes := collectFkTablesState(primaryConn) - replicaRes := collectFkTablesState(replicaConn) - verifyDataMatches(t, primaryRes, replicaRes) } } } -// verifyDataMatches verifies that the two list of results are the same. -func verifyDataMatches(t *testing.T, resOne []*sqltypes.Result, resTwo []*sqltypes.Result) { - require.EqualValues(t, len(resTwo), len(resOne), "Res 1 - %v, Res 2 - %v", resOne, resTwo) - for idx, resultOne := range resOne { - resultTwo := resTwo[idx] - require.True(t, resultOne.Equal(resultTwo), "Data for %v doesn't match\nRows 1\n%v\nRows 2\n%v", fkTables[idx], resultOne.Rows, resultTwo.Rows) - } -} - -// collectFkTablesState collects the data stored in the foreign key tables for the given connection. -func collectFkTablesState(conn *mysql.Conn) []*sqltypes.Result { - var tablesData []*sqltypes.Result - for _, table := range fkTables { - query := fmt.Sprintf("SELECT * FROM %v ORDER BY id", table) - res, _ := conn.ExecuteFetch(query, 10000, true) - tablesData = append(tablesData, res) +// BenchmarkFkFuzz benchmarks the performance of Vitess unmanaged, Vitess managed and vanilla MySQL performance on a given set of queries generated by the fuzzer. +func BenchmarkFkFuzz(b *testing.B) { + maxValForCol := 10 + maxValForId := 10 + insertShare := 50 + deleteShare := 50 + updateShare := 50 + numQueries := 1000 + // Wait for schema-tracking to be complete. + waitForSchemaTrackingForFkTables(b) + for i := 0; i < b.N; i++ { + queries, mysqlConn, vtConn, vtUnmanagedConn := setupBenchmark(b, maxValForId, maxValForCol, insertShare, deleteShare, updateShare, numQueries) + + // Now we run the benchmarks! + b.Run("MySQL", func(b *testing.B) { + startBenchmark(b) + runQueries(b, mysqlConn, queries) + }) + + b.Run("Vitess Managed Foreign Keys", func(b *testing.B) { + startBenchmark(b) + runQueries(b, vtConn, queries) + }) + + b.Run("Vitess Unmanaged Foreign Keys", func(b *testing.B) { + startBenchmark(b) + runQueries(b, vtUnmanagedConn, queries) + }) } - return tablesData } diff --git a/go/test/endtoend/vtgate/foreignkey/fk_test.go b/go/test/endtoend/vtgate/foreignkey/fk_test.go index 46bbc2ed433..1972d0a6259 100644 --- a/go/test/endtoend/vtgate/foreignkey/fk_test.go +++ b/go/test/endtoend/vtgate/foreignkey/fk_test.go @@ -18,7 +18,9 @@ package foreignkey import ( "context" + "fmt" "io" + "strings" "testing" "time" @@ -27,6 +29,7 @@ import ( "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" + "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/vtgate/vtgateconn" @@ -167,6 +170,14 @@ func TestUpdateWithFK(t *testing.T) { // Verify the result in u_t2 and u_t3 as well. utils.AssertMatches(t, conn, `select * from u_t2 order by id`, `[[INT64(19) INT64(1234)] [INT64(342) NULL]]`) utils.AssertMatches(t, conn, `select * from u_t3 order by id`, `[[INT64(1) INT64(12)] [INT64(32) INT64(13)]]`) + + // Update with a subquery inside, such that the update is on a foreign key related column. + qr = utils.Exec(t, conn, `update u_t2 set col2 = (select col1 from u_t1 where id = 100) where id = 342`) + assert.EqualValues(t, 1, qr.RowsAffected) + // Verify the result in u_t1, u_t2 and u_t3. + utils.AssertMatches(t, conn, `select * from u_t1 order by id`, `[[INT64(1) INT64(13)] [INT64(10) INT64(12)] [INT64(100) INT64(13)] [INT64(1000) INT64(1234)]]`) + utils.AssertMatches(t, conn, `select * from u_t2 order by id`, `[[INT64(19) INT64(1234)] [INT64(342) INT64(13)]]`) + utils.AssertMatches(t, conn, `select * from u_t3 order by id`, `[[INT64(1) INT64(12)] [INT64(32) INT64(13)]]`) } // TestVstreamForFKBinLog tests that dml queries with fks are written with child row first approach in the binary logs. @@ -368,6 +379,8 @@ func TestFkScenarios(t *testing.T) { name string dataQueries []string dmlQuery string + dmlShouldErr bool + skipShardScoped bool assertionQueries []string }{ { @@ -375,7 +388,8 @@ func TestFkScenarios(t *testing.T) { dataQueries: []string{ "insert into fk_t1(id, col) values (1, 5)", }, - dmlQuery: "insert into t2(id, col) values (1, 7)", + dmlQuery: "insert into t2(id, col) values (1, 7)", + dmlShouldErr: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -396,7 +410,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t1(id, col) values (1, 7)", "insert into fk_t2(id, col) values (1, 7)", }, - dmlQuery: "update fk_t1 set col = 5 where id = 1", + dmlShouldErr: true, + dmlQuery: "update fk_t1 set col = 5 where id = 1", assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -407,7 +422,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t1(id, col) values (1, 7), (2, 9)", "insert into fk_t2(id, col) values (1, 7)", }, - dmlQuery: "update fk_t1 set col = 5 where id = 2", + dmlQuery: "update fk_t1 set col = 5 where id = 2", + skipShardScoped: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -418,7 +434,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t1(id, col) values (1, 7)", "insert into fk_t2(id, col) values (1, 7)", }, - dmlQuery: "delete from fk_t1 where id = 1", + dmlQuery: "delete from fk_t1 where id = 1", + dmlShouldErr: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -439,10 +456,11 @@ func TestFkScenarios(t *testing.T) { dataQueries: []string{ "insert into fk_t1(id, col) values (1, 7), (2, 9)", "insert into fk_t2(id, col) values (1, 7), (2, 9)", - "insert into fk_t3(id, col) values (1, 7), (2, 9)", + "insert into fk_t3(id, col) values (1, 7)", "insert into fk_t6(id, col) values (1, 7)", }, - dmlQuery: "update fk_t3 set col = 9 where id = 1", + dmlQuery: "update fk_t3 set col = 9 where id = 1", + skipShardScoped: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -458,7 +476,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t4(id, col) values (1, 7)", "insert into fk_t5(id, col) values (1, 7)", }, - dmlQuery: "update fk_t3 set col = 9 where id = 1", + dmlQuery: "update fk_t3 set col = 9 where id = 1", + dmlShouldErr: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -475,7 +494,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t4(id, col) values (1, 7), (2, 9)", "insert into fk_t6(id, col) values (1, 7), (2, 9)", }, - dmlQuery: "update fk_t2 set col = 9 where id = 1", + dmlQuery: "update fk_t2 set col = 9 where id = 1", + skipShardScoped: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -491,7 +511,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t3(id, col) values (1, 7), (2, 9)", "insert into fk_t6(id, col) values (1, 7)", }, - dmlQuery: "delete from fk_t3 where id = 1", + dmlQuery: "delete from fk_t3 where id = 1", + skipShardScoped: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -507,7 +528,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t4(id, col) values (1, 7)", "insert into fk_t5(id, col) values (1, 7)", }, - dmlQuery: "delete from fk_t3 where id = 1", + dmlQuery: "delete from fk_t3 where id = 1", + dmlShouldErr: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -524,7 +546,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t4(id, col) values (1, 7), (2, 9)", "insert into fk_t6(id, col) values (1, 7), (2, 9)", }, - dmlQuery: "delete from fk_t2 where id = 1", + dmlQuery: "delete from fk_t2 where id = 1", + skipShardScoped: true, assertionQueries: []string{ "select * from fk_t1 order by id", "select * from fk_t2 order by id", @@ -538,7 +561,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t10(id, col) values (1, 7), (2, 9)", "insert into fk_t11(id, col) values (1, 7)", }, - dmlQuery: "update fk_t10 set col = 5 where id = 1", + dmlQuery: "update fk_t10 set col = 5 where id = 1", + skipShardScoped: true, assertionQueries: []string{ "select * from fk_t10 order by id", "select * from fk_t11 order by id", @@ -550,7 +574,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t11(id, col) values (1, 7)", "insert into fk_t13(id, col) values (1, 7)", }, - dmlQuery: "update fk_t10 set col = 5 where id = 1", + dmlQuery: "update fk_t10 set col = 5 where id = 1", + dmlShouldErr: true, assertionQueries: []string{ "select * from fk_t10 order by id", "select * from fk_t11 order by id", @@ -563,7 +588,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t11(id, col) values (1, 7)", "insert into fk_t12(id, col) values (1, 7)", }, - dmlQuery: "update fk_t10 set col = 5 where id = 1", + dmlQuery: "update fk_t10 set col = 5 where id = 1", + skipShardScoped: true, assertionQueries: []string{ "select * from fk_t10 order by id", "select * from fk_t11 order by id", @@ -587,7 +613,8 @@ func TestFkScenarios(t *testing.T) { "insert into fk_t11(id, col) values (1, 7)", "insert into fk_t13(id, col) values (1, 7)", }, - dmlQuery: "delete from fk_t10 where id = 1", + dmlQuery: "delete from fk_t10 where id = 1", + dmlShouldErr: true, assertionQueries: []string{ "select * from fk_t10 order by id", "select * from fk_t11 order by id", @@ -609,54 +636,58 @@ func TestFkScenarios(t *testing.T) { }, { name: "Delete success with set null to an update cascade foreign key", dataQueries: []string{ - "insert into fk_t15(id, col) values (1, 7), (2, 9)", - "insert into fk_t16(id, col) values (1, 7), (2, 9)", - "insert into fk_t17(id, col) values (1, 7)", - "insert into fk_t18(id, col) values (1, 7)", + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t18(id, cola, colb) values (1, 7, 1)", }, - dmlQuery: "delete from fk_t16 where id = 1", + dmlQuery: "delete from fk_multicol_t16 where id = 1", + skipShardScoped: true, assertionQueries: []string{ - "select * from fk_t15 order by id", - "select * from fk_t16 order by id", - "select * from fk_t17 order by id", - "select * from fk_t18 order by id", + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t18 order by id", }, }, { name: "Delete success with cascade to delete with set null to an update set null foreign key", dataQueries: []string{ - "insert into fk_t15(id, col) values (1, 7), (2, 9)", - "insert into fk_t16(id, col) values (1, 7), (2, 9)", - "insert into fk_t17(id, col) values (1, 7)", - "insert into fk_t19(id, col) values (1, 7)", + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t18(id, cola, colb) values (1, 7, 1)", }, - dmlQuery: "delete from fk_t15 where id = 1", + dmlQuery: "delete from fk_multicol_t15 where id = 1", + skipShardScoped: true, assertionQueries: []string{ - "select * from fk_t15 order by id", - "select * from fk_t16 order by id", - "select * from fk_t17 order by id", - "select * from fk_t19 order by id", + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t18 order by id", }, }, { name: "Update success with cascade to an update set null to an update cascade foreign key", dataQueries: []string{ - "insert into fk_t15(id, col) values (1, 7), (2, 9)", - "insert into fk_t16(id, col) values (1, 7), (2, 9)", - "insert into fk_t17(id, col) values (1, 7)", - "insert into fk_t18(id, col) values (1, 7)", + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t18(id, cola, colb) values (1, 7, 1)", }, - dmlQuery: "update fk_t15 set col = 3 where id = 1", + dmlQuery: "update fk_multicol_t15 set cola = 3 where id = 1", + skipShardScoped: true, assertionQueries: []string{ - "select * from fk_t15 order by id", - "select * from fk_t16 order by id", - "select * from fk_t17 order by id", - "select * from fk_t18 order by id", + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t18 order by id", }, }, { name: "Insert success for self-referenced foreign key", dataQueries: []string{ "insert into fk_t20(id, col, col2) values (1, 7, NULL)", }, - dmlQuery: "insert into fk_t20(id, col, col2) values (2, 9, 7), (3, 10, 9)", + skipShardScoped: true, + dmlQuery: "insert into fk_t20(id, col, col2) values (2, 9, 7), (3, 10, 9)", assertionQueries: []string{ "select * from fk_t20 order by id", }, @@ -665,25 +696,205 @@ func TestFkScenarios(t *testing.T) { dataQueries: []string{ "insert into fk_t20(id, col, col2) values (5, 7, NULL)", }, - dmlQuery: "insert into fk_t20(id, col, col2) values (6, 9, 6)", + skipShardScoped: true, + dmlQuery: "insert into fk_t20(id, col, col2) values (6, 9, 6)", + dmlShouldErr: true, assertionQueries: []string{ "select * from fk_t20 order by id", }, + }, { + name: "Multi Table Delete success", + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + skipShardScoped: true, + dmlQuery: "delete fk_multicol_t15 from fk_multicol_t15 join fk_multicol_t17 where fk_multicol_t15.id = fk_multicol_t17.id", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Multi Target Delete success", + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 11, 1), (4, 13, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 11, 1), (4, 13, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 11, 1)", + "insert into fk_multicol_t18(id, cola, colb) values (1, 7, 1), (3, 11, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + }, + skipShardScoped: true, + dmlQuery: "delete fk_multicol_t15, fk_multicol_t17 from fk_multicol_t15 join fk_multicol_t17 where fk_multicol_t15.id = fk_multicol_t17.id", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Delete with limit success", + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + skipShardScoped: true, + dmlQuery: "delete from fk_multicol_t15 order by id limit 1", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Delete with limit 0 success", + skipShardScoped: true, + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + dmlQuery: "delete from fk_multicol_t15 order by id limit 0", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Update with limit success", + skipShardScoped: true, + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + dmlQuery: "update fk_multicol_t15 set cola = '2' order by id limit 1", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Update with limit 0 success", + skipShardScoped: true, + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + dmlQuery: "update fk_multicol_t15 set cola = '8' order by id limit 0", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Update with non-literal update and limit success", + skipShardScoped: true, + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + dmlQuery: "update fk_multicol_t15 set cola = id + 3 order by id limit 1", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Update with non-literal update order by and limit - multiple update", + skipShardScoped: true, + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 12, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 12, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + dmlQuery: "update fk_multicol_t15 set cola = id + 8 order by id asc limit 2", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Update with non-literal update order by and limit - single update", + skipShardScoped: true, + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 12, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 12, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + dmlQuery: "update fk_multicol_t15 set cola = id + 8 where id < 3 order by id desc limit 2", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Multi Table Update with non-literal update", + skipShardScoped: true, + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 12, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 12, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + dmlQuery: "update fk_multicol_t15 m1 join fk_multicol_t17 on m1.id = fk_multicol_t17.id set m1.cola = m1.id + 8 where m1.id < 3", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, + }, { + name: "Multi Target Update with non-literal update", + skipShardScoped: true, + dataQueries: []string{ + "insert into fk_multicol_t15(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 12, 1)", + "insert into fk_multicol_t16(id, cola, colb) values (1, 7, 1), (2, 9, 1), (3, 12, 1)", + "insert into fk_multicol_t17(id, cola, colb) values (1, 7, 1), (2, 9, 1)", + "insert into fk_multicol_t19(id, cola, colb) values (1, 7, 1)", + }, + dmlQuery: "update fk_multicol_t15 m1 join fk_multicol_t17 on m1.id = fk_multicol_t17.id set m1.cola = m1.id + 8, fk_multicol_t17.colb = 32 where m1.id < 3", + assertionQueries: []string{ + "select * from fk_multicol_t15 order by id", + "select * from fk_multicol_t16 order by id", + "select * from fk_multicol_t17 order by id", + "select * from fk_multicol_t19 order by id", + }, }, } for _, tt := range testcases { - for _, testSharded := range []bool{false, true} { - t.Run(getTestName(tt.name, testSharded), func(t *testing.T) { + for _, keyspace := range []string{unshardedKs, shardedKs, shardScopedKs} { + t.Run(getTestName(tt.name, keyspace), func(t *testing.T) { mcmp, closer := start(t) defer closer() - // Set the correct keyspace to use from VtGates. - if testSharded { + if keyspace == shardedKs { t.Skip("Skip test since we don't have sharded foreign key support yet") - _ = utils.Exec(t, mcmp.VtConn, "use `ks`") - } else { - _ = utils.Exec(t, mcmp.VtConn, "use `uks`") } + if keyspace == shardScopedKs && tt.skipShardScoped { + t.Skip("Skip test since we don't support updates in primary vindex columns") + } + // Set the correct keyspace to use from VtGates. + _ = utils.Exec(t, mcmp.VtConn, fmt.Sprintf("use `%v`", keyspace)) // Insert all the data required for running the test. for _, query := range tt.dataQueries { @@ -691,7 +902,12 @@ func TestFkScenarios(t *testing.T) { } // Run the DML query that needs to be tested and verify output with MySQL. - _, _ = mcmp.ExecAllowAndCompareError(tt.dmlQuery) + _, err := mcmp.ExecAllowAndCompareError(tt.dmlQuery, utils.CompareOptions{}) + if tt.dmlShouldErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } // Run the assertion queries and verify we get the expected outputs. for _, query := range tt.assertionQueries { @@ -701,17 +917,15 @@ func TestFkScenarios(t *testing.T) { } } - for _, testSharded := range []bool{false, true} { - t.Run(getTestName("Transactions with intermediate failure", testSharded), func(t *testing.T) { + for _, keyspace := range []string{unshardedKs, shardedKs} { + t.Run(getTestName("Transactions with intermediate failure", keyspace), func(t *testing.T) { mcmp, closer := start(t) defer closer() - // Set the correct keyspace to use from VtGates. - if testSharded { + if keyspace == shardedKs { t.Skip("Skip test since we don't have sharded foreign key support yet") - _ = utils.Exec(t, mcmp.VtConn, "use `ks`") - } else { - _ = utils.Exec(t, mcmp.VtConn, "use `uks`") } + // Set the correct keyspace to use from VtGates. + _ = utils.Exec(t, mcmp.VtConn, fmt.Sprintf("use `%v`", keyspace)) // Insert some rows mcmp.Exec("INSERT INTO fk_t10(id, col) VALUES (1, 7), (2, 9), (3, 5)") @@ -734,7 +948,7 @@ func TestFkScenarios(t *testing.T) { mcmp.Exec("SELECT * FROM fk_t13 ORDER BY id") // Update that fails - _, err := mcmp.ExecAllowAndCompareError("UPDATE fk_t10 SET col = 15 WHERE id = 1") + _, err := mcmp.ExecAllowAndCompareError("UPDATE fk_t10 SET col = 15 WHERE id = 1", utils.CompareOptions{}) require.Error(t, err) // Verify the results @@ -775,6 +989,258 @@ func TestFkScenarios(t *testing.T) { } } +// TestFkQueries is for testing a specific set of queries one after the other. +func TestFkQueries(t *testing.T) { + // Wait for schema-tracking to be complete. + waitForSchemaTrackingForFkTables(t) + // Remove all the foreign key constraints for all the replicas. + // We can then verify that the replica, and the primary have the same data, to ensure + // that none of the queries ever lead to cascades/updates on MySQL level. + for _, ks := range []string{shardedKs, unshardedKs} { + replicas := getReplicaTablets(ks) + for _, replica := range replicas { + removeAllForeignKeyConstraints(t, replica, ks) + } + } + + testcases := []struct { + name string + queries []string + opts utils.CompareOptions + }{ + { + name: "Non-literal update", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t11 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "update fk_t10 set col = id + 3", + }, + }, { + name: "Non-literal update with order by", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t11 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "update fk_t10 set col = id + 3 order by id desc", + }, + }, { + name: "Non-literal update with order by that require parent and child foreign keys verification - success", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8)", + "insert into fk_t11 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t12 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t13 (id, col) values (1,1),(2,2)", + "update fk_t11 set col = id + 3 where id >= 3", + }, + }, { + name: "Non-literal update with order by that require parent and child foreign keys verification - parent fails", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t11 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t12 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "update fk_t11 set col = id + 3", + }, + }, { + name: "Non-literal update with order by that require parent and child foreign keys verification - child fails", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8)", + "insert into fk_t11 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t12 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t13 (id, col) values (1,1),(2,2)", + "update fk_t11 set col = id + 3", + }, + }, { + name: "Single column update in a multi-col table - success", + queries: []string{ + "insert into fk_multicol_t1 (id, cola, colb) values (1, 1, 1), (2, 2, 2)", + "insert into fk_multicol_t2 (id, cola, colb) values (1, 1, 1)", + "update fk_multicol_t1 set colb = 4 + (colb) where id = 2", + }, + }, { + name: "Single column update in a multi-col table - restrict failure", + queries: []string{ + "insert into fk_multicol_t1 (id, cola, colb) values (1, 1, 1), (2, 2, 2)", + "insert into fk_multicol_t2 (id, cola, colb) values (1, 1, 1)", + "update fk_multicol_t1 set colb = 4 + (colb) where id = 1", + }, + }, { + name: "Single column update in multi-col table - cascade and set null", + queries: []string{ + "insert into fk_multicol_t15 (id, cola, colb) values (1, 1, 1), (2, 2, 2)", + "insert into fk_multicol_t16 (id, cola, colb) values (1, 1, 1), (2, 2, 2)", + "insert into fk_multicol_t17 (id, cola, colb) values (1, 1, 1), (2, 2, 2)", + "update fk_multicol_t15 set colb = 4 + (colb) where id = 1", + }, + }, { + name: "Non literal update that evaluates to NULL - restricted", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t11 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t13 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "update fk_t10 set col = id + null where id = 1", + }, + }, { + name: "Non literal update that evaluates to NULL - success", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t11 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "insert into fk_t12 (id, col) values (1,1),(2,2),(3,3),(4,4),(5,5)", + "update fk_t10 set col = id + null where id = 1", + }, + }, { + name: "Multi column foreign key update with one literal and one non-literal update", + queries: []string{ + "insert into fk_multicol_t15 (id, cola, colb) values (1,1,1),(2,2,2)", + "insert into fk_multicol_t16 (id, cola, colb) values (1,1,1),(2,2,2)", + "update fk_multicol_t15 set cola = 3, colb = (id * 2) - 2", + }, + }, { + name: "Update that sets to 0 and -0 values", + queries: []string{ + "insert into fk_t15 (id, col) values (1,'-0'), (2, '0'), (3, '5'), (4, '-5')", + "insert into fk_t16 (id, col) values (1,'-0'), (2, '0'), (3, '5'), (4, '-5')", + "update fk_t15 set col = col * (col - (col))", + }, + }, + { + name: "Update a child table which doesn't cause an update, but parent doesn't have that value", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2)", + "insert /*+ SET_VAR(foreign_key_checks=0) */ into fk_t11 (id, col) values (1,1),(2,2),(5,5)", + "update fk_t11 set col = id where id in (1, 5)", + }, + }, + { + name: "Update a child table from a null to a value that parent doesn't have", + queries: []string{ + "insert into fk_t10 (id, col) values (1,1),(2,2)", + "insert into fk_t11 (id, col) values (1,1),(2,2),(5,NULL)", + "update fk_t11 set col = id where id in (1, 5)", + }, + }, + { + name: "Update on child to 0 when parent has -0", + queries: []string{ + "insert into fk_t15 (id, col) values (2, '-0')", + "insert /*+ SET_VAR(foreign_key_checks=0) */ into fk_t16 (id, col) values (3, '5'), (4, '-5')", + "update fk_t16 set col = col * (col - (col)) where id = 3", + "update fk_t16 set col = col * (col - (col)) where id = 4", + }, + }, + { + name: "Multi table delete that uses two tables related by foreign keys", + queries: []string{ + "insert /*+ SET_VAR(foreign_key_checks=0) */ into fk_t10 (id, col) values (1, '5'), (2, NULL), (3, NULL), (4, '4'), (6, '1'), (7, '2')", + "insert /*+ SET_VAR(foreign_key_checks=0) */ into fk_t11 (id, col) values (4, '1'), (5, '3'), (7, '22'), (8, '5'), (9, NULL), (10, '3')", + "insert /*+ SET_VAR(foreign_key_checks=0) */ into fk_t12 (id, col) values (2, NULL), (3, NULL), (4, '1'), (6, '6'), (8, NULL), (10, '1')", + "insert /*+ SET_VAR(foreign_key_checks=0) */ into fk_t13 (id, col) values (2, '1'), (5, '5'), (7, '5')", + "delete fk_t11 from fk_t11 join fk_t12 using (id) where fk_t11.id = 4", + }, + }, + { + name: "Multi table delete where MySQL and Vitess report different rows affected", + queries: []string{ + "insert /*+ SET_VAR(foreign_key_checks=0) */ into fk_t11 (id, col) values (4, '1'), (5, '3'), (7, '22'), (8, '5'), (9, NULL), (10, '3')", + "insert /*+ SET_VAR(foreign_key_checks=0) */ into fk_t12 (id, col) values (4, '1'), (5, '3'), (7, '22'), (8, '5'), (9, NULL), (10, '3')", + "delete fk_t11, fk_t12 from fk_t11 join fk_t12 using (id) where fk_t11.id = 5", + }, + opts: utils.CompareOptions{ + IgnoreRowsAffected: true, + }, + }, + } + + for _, tt := range testcases { + for _, keyspace := range []string{unshardedKs, shardedKs} { + t.Run(getTestName(tt.name, keyspace), func(t *testing.T) { + mcmp, closer := start(t) + defer closer() + if keyspace == shardedKs { + t.Skip("Skip test since we don't have sharded foreign key support yet") + } + // Set the correct keyspace to use from VtGates. + _ = utils.Exec(t, mcmp.VtConn, fmt.Sprintf("use `%v`", keyspace)) + + // Ensure that the Vitess database is originally empty + ensureDatabaseState(t, mcmp.VtConn, true) + ensureDatabaseState(t, mcmp.MySQLConn, true) + + for _, query := range tt.queries { + _, _ = mcmp.ExecAllowAndCompareError(query, tt.opts) + if t.Failed() { + break + } + } + + // ensure Vitess database has some data. This ensures not all the commands failed. + ensureDatabaseState(t, mcmp.VtConn, false) + // Verify the consistency of the data. + verifyDataIsCorrect(t, mcmp, 1) + }) + } + } +} + +// TestShowVschemaKeyspaces verifies the show vschema keyspaces query output for the keyspaces where the foreign keys are +func TestShowVschemaKeyspaces(t *testing.T) { + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + + res := utils.Exec(t, conn, "SHOW VSCHEMA KEYSPACES") + resStr := fmt.Sprintf("%v", res.Rows) + require.Contains(t, resStr, `[VARCHAR("uks") VARCHAR("false") VARCHAR("managed") VARCHAR("")]`) + require.Contains(t, resStr, `[VARCHAR("ks") VARCHAR("true") VARCHAR("managed") VARCHAR("")]`) +} + +// TestFkOneCase is for testing a specific set of queries. On the CI this test won't run since we'll keep the queries empty. +func TestFkOneCase(t *testing.T) { + queries := []string{} + if len(queries) == 0 { + t.Skip("No queries to test") + } + // Wait for schema-tracking to be complete. + waitForSchemaTrackingForFkTables(t) + // Remove all the foreign key constraints for all the replicas. + // We can then verify that the replica, and the primary have the same data, to ensure + // that none of the queries ever lead to cascades/updates on MySQL level. + for _, ks := range []string{shardedKs, unshardedKs} { + replicas := getReplicaTablets(ks) + for _, replica := range replicas { + removeAllForeignKeyConstraints(t, replica, ks) + } + } + + mcmp, closer := start(t) + defer closer() + _ = utils.Exec(t, mcmp.VtConn, "use `uks`") + + // Ensure that the Vitess database is originally empty + ensureDatabaseState(t, mcmp.VtConn, true) + ensureDatabaseState(t, mcmp.MySQLConn, true) + + for _, query := range queries { + if strings.HasPrefix(query, "vexplain") { + res := utils.Exec(t, mcmp.VtConn, query) + log.Errorf("Query %v, Result - %v", query, res.Rows) + continue + } + _, _ = mcmp.ExecAllowAndCompareError(query, utils.CompareOptions{}) + if t.Failed() { + log.Errorf("Query failed - %v", query) + break + } + } + vitessData := collectFkTablesState(mcmp.VtConn) + for idx, table := range fkTables { + log.Errorf("Vitess data for %v -\n%v", table, vitessData[idx].Rows) + } + + // ensure Vitess database has some data. This ensures not all the commands failed. + ensureDatabaseState(t, mcmp.VtConn, false) + // Verify the consistency of the data. + verifyDataIsCorrect(t, mcmp, 1) +} + func TestCyclicFks(t *testing.T) { mcmp, closer := start(t) defer closer() @@ -782,18 +1248,259 @@ func TestCyclicFks(t *testing.T) { // Create a cyclic foreign key constraint. utils.Exec(t, mcmp.VtConn, "alter table fk_t10 add constraint test_cyclic_fks foreign key (col) references fk_t12 (col) on delete cascade on update cascade") - defer func() { - utils.Exec(t, mcmp.VtConn, "alter table fk_t10 drop foreign key test_cyclic_fks") - }() // Wait for schema-tracking to be complete. - ksErr := utils.WaitForKsError(t, clusterInstance.VtgateProcess, unshardedKs) - // Make sure Vschema has the error for cyclic foreign keys. - assert.Contains(t, ksErr, "VT09019: uks has cyclic foreign keys") + errString := utils.WaitForKsError(t, clusterInstance.VtgateProcess, unshardedKs) + assert.Contains(t, errString, "VT09019: keyspace 'uks' has cyclic foreign keys") // Ensure that the Vitess database is originally empty ensureDatabaseState(t, mcmp.VtConn, true) _, err := utils.ExecAllowError(t, mcmp.VtConn, "insert into fk_t10(id, col) values (1, 1)") - require.ErrorContains(t, err, "VT09019: uks has cyclic foreign keys") + require.ErrorContains(t, err, "VT09019: keyspace 'uks' has cyclic foreign keys") + + // Drop the cyclic foreign key constraint. + utils.Exec(t, mcmp.VtConn, "alter table fk_t10 drop foreign key test_cyclic_fks") + + // Let's clean out the cycle so that the other tests don't fail + utils.WaitForVschemaCondition(t, clusterInstance.VtgateProcess, unshardedKs, func(t *testing.T, keyspace map[string]any) bool { + _, fieldPresent := keyspace["error"] + return !fieldPresent + }, "wait for error to disappear") +} + +func TestReplace(t *testing.T) { + t.Skip("replace engine marked for failure, hence skipping this.") + // Wait for schema-tracking to be complete. + waitForSchemaTrackingForFkTables(t) + // Remove all the foreign key constraints for all the replicas. + // We can then verify that the replica, and the primary have the same data, to ensure + // that none of the queries ever lead to cascades/updates on MySQL level. + for _, ks := range []string{shardedKs, unshardedKs} { + replicas := getReplicaTablets(ks) + for _, replica := range replicas { + removeAllForeignKeyConstraints(t, replica, ks) + } + } + + mcmp1, _ := start(t) + // defer closer1() + _ = utils.Exec(t, mcmp1.VtConn, "use `uks`") + + mcmp2, _ := start(t) + // defer closer2() + _ = utils.Exec(t, mcmp2.VtConn, "use `uks`") + + _ = utils.Exec(t, mcmp1.VtConn, "insert into fk_t2 values(1,5), (2,5)") + + done := false + go func() { + number := 1 + for !done { + query := fmt.Sprintf("replace /* g1q1 - %d */ into fk_t2 values(5,5)", number) + _, _ = utils.ExecAllowError(t, mcmp1.VtConn, query) + number++ + } + }() + + go func() { + number := 1 + for !done { + query := fmt.Sprintf("replace /* q1 - %d */ into fk_t3 values(3,5)", number) + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, query) + + query = fmt.Sprintf("replace /* q2 - %d */ into fk_t3 values(4,5)", number) + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, query) + number++ + } + }() + + totalTime := time.After(1 * time.Minute) + for !done { + select { + case <-totalTime: + done = true + case <-time.After(10 * time.Millisecond): + validateReplication(t) + } + } +} + +func TestReplaceExplicit(t *testing.T) { + t.Skip("explicit delete-insert in transaction fails, hence skipping") + // Wait for schema-tracking to be complete. + waitForSchemaTrackingForFkTables(t) + // Remove all the foreign key constraints for all the replicas. + // We can then verify that the replica, and the primary have the same data, to ensure + // that none of the queries ever lead to cascades/updates on MySQL level. + for _, ks := range []string{shardedKs, unshardedKs} { + replicas := getReplicaTablets(ks) + for _, replica := range replicas { + removeAllForeignKeyConstraints(t, replica, ks) + } + } + + mcmp1, _ := start(t) + // defer closer1() + _ = utils.Exec(t, mcmp1.VtConn, "use `uks`") + + mcmp2, _ := start(t) + // defer closer2() + _ = utils.Exec(t, mcmp2.VtConn, "use `uks`") + + _ = utils.Exec(t, mcmp1.VtConn, "insert into fk_t2 values(1,5), (2,5)") + + done := false + go func() { + number := 0 + for !done { + number++ + _, _ = utils.ExecAllowError(t, mcmp1.VtConn, "begin") + query := fmt.Sprintf("delete /* g1q1 - %d */ from fk_t2 where id = 5", number) + _, err := utils.ExecAllowError(t, mcmp1.VtConn, query) + if err != nil { + _, _ = utils.ExecAllowError(t, mcmp1.VtConn, "rollback") + continue + } + query = fmt.Sprintf("insert /* g1q1 - %d */ into fk_t2 values(5,5)", number) + _, err = utils.ExecAllowError(t, mcmp1.VtConn, query) + if err != nil { + _, _ = utils.ExecAllowError(t, mcmp1.VtConn, "rollback") + continue + } + _, _ = utils.ExecAllowError(t, mcmp1.VtConn, "commit") + } + }() + + go func() { + number := 0 + for !done { + number++ + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, "begin") + query := fmt.Sprintf("delete /* g1q1 - %d */ from fk_t3 where id = 3 or col = 5", number) + _, err := utils.ExecAllowError(t, mcmp2.VtConn, query) + if err != nil { + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, "rollback") + } else { + query = fmt.Sprintf("insert /* g1q1 - %d */ into fk_t3 values(3,5)", number) + _, err = utils.ExecAllowError(t, mcmp2.VtConn, query) + if err != nil { + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, "rollback") + } else { + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, "commit") + } + } + + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, "begin") + query = fmt.Sprintf("delete /* g1q1 - %d */ from fk_t3 where id = 4 or col = 5", number) + _, err = utils.ExecAllowError(t, mcmp2.VtConn, query) + if err != nil { + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, "rollback") + continue + } + query = fmt.Sprintf("insert /* g1q1 - %d */ into fk_t3 values(4,5)", number) + _, err = utils.ExecAllowError(t, mcmp2.VtConn, query) + if err != nil { + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, "rollback") + continue + } + _, _ = utils.ExecAllowError(t, mcmp2.VtConn, "commit") + } + }() + + totalTime := time.After(1 * time.Minute) + for !done { + select { + case <-totalTime: + done = true + case <-time.After(10 * time.Millisecond): + validateReplication(t) + } + } +} + +// TestReplaceWithFK tests that replace into work as expected when foreign key management is enabled in Vitess. +func TestReplaceWithFK(t *testing.T) { + mcmp, closer := start(t) + conn := mcmp.VtConn + defer closer() + + // replace some data. + _, err := utils.ExecAllowError(t, conn, `replace into t1(id, col) values (1, 1)`) + require.ErrorContains(t, err, "VT12001: unsupported: REPLACE INTO with sharded keyspace (errno 1235) (sqlstate 42000)") + + _ = utils.Exec(t, conn, `use uks`) + + _ = utils.Exec(t, conn, `replace into u_t1(id, col1) values (1, 1), (2, 1)`) + // u_t1: (1,1) (2,1) + + _ = utils.Exec(t, conn, `replace into u_t2(id, col2) values (1, 1), (2, 1)`) + // u_t1: (1,1) (2,1) + // u_t2: (1,1) (2,1) + + _ = utils.Exec(t, conn, `replace into u_t1(id, col1) values (2, 2)`) + // u_t1: (1,1) (2,2) + // u_t2: (1,null) (2,null) + + utils.AssertMatches(t, conn, `select * from u_t1`, `[[INT64(1) INT64(1)] [INT64(2) INT64(2)]]`) + utils.AssertMatches(t, conn, `select * from u_t2`, `[[INT64(1) NULL] [INT64(2) NULL]]`) +} + +// TestInsertWithFKOnDup tests that insertion with on duplicate key update works as expected. +func TestInsertWithFKOnDup(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + utils.Exec(t, mcmp.VtConn, "use `uks`") + + // insert some data. + mcmp.Exec(`insert into u_t1(id, col1) values (100, 1), (200, 2), (300, 3), (400, 4)`) + mcmp.Exec(`insert into u_t2(id, col2) values (1000, 1), (2000, 2), (3000, 3), (4000, 4)`) + + // updating child to an existing value in parent. + mcmp.Exec(`insert into u_t2(id, col2) values (4000, 50) on duplicate key update col2 = 1`) + mcmp.AssertMatches(`select * from u_t2 order by id`, `[[INT64(1000) INT64(1)] [INT64(2000) INT64(2)] [INT64(3000) INT64(3)] [INT64(4000) INT64(1)]]`) + + // updating parent, value not referred in child. + mcmp.Exec(`insert into u_t1(id, col1) values (400, 50) on duplicate key update col1 = values(col1)`) + mcmp.AssertMatches(`select * from u_t1 order by id`, `[[INT64(100) INT64(1)] [INT64(200) INT64(2)] [INT64(300) INT64(3)] [INT64(400) INT64(50)]]`) + mcmp.AssertMatches(`select * from u_t2 order by id`, `[[INT64(1000) INT64(1)] [INT64(2000) INT64(2)] [INT64(3000) INT64(3)] [INT64(4000) INT64(1)]]`) + + // updating parent, child updated to null. + mcmp.Exec(`insert into u_t1(id, col1) values (100, 75) on duplicate key update col1 = values(col1)`) + mcmp.AssertMatches(`select * from u_t1 order by id`, `[[INT64(100) INT64(75)] [INT64(200) INT64(2)] [INT64(300) INT64(3)] [INT64(400) INT64(50)]]`) + mcmp.AssertMatches(`select * from u_t2 order by id`, `[[INT64(1000) NULL] [INT64(2000) INT64(2)] [INT64(3000) INT64(3)] [INT64(4000) NULL]]`) + + // inserting multiple rows in parent, some child rows updated to null. + mcmp.Exec(`insert into u_t1(id, col1) values (100, 42),(600, 2),(300, 24),(200, 2) on duplicate key update col1 = values(col1)`) + mcmp.AssertMatches(`select * from u_t1 order by id`, `[[INT64(100) INT64(42)] [INT64(200) INT64(2)] [INT64(300) INT64(24)] [INT64(400) INT64(50)] [INT64(600) INT64(2)]]`) + mcmp.AssertMatches(`select * from u_t2 order by id`, `[[INT64(1000) NULL] [INT64(2000) INT64(2)] [INT64(3000) NULL] [INT64(4000) NULL]]`) +} + +// TestDDLFk tests that table is created with fk constraint when foreign_key_checks is off. +func TestDDLFk(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + utils.Exec(t, mcmp.VtConn, `use uks`) + + createTableDDLTemp1 := ` +create table temp1(id bigint auto_increment primary key, col varchar(20) not null, +foreign key (col) references temp2(col)) +` + mcmp.Exec(`set foreign_key_checks = off`) + // should be able to create `temp1` table without a `temp2` + mcmp.Exec(createTableDDLTemp1) + + createTableDDLTemp2 := ` +create table temp2(id bigint auto_increment primary key, col varchar(20) not null, key (col)) +` + // now create `temp2` + mcmp.Exec(createTableDDLTemp2) + + // inserting some data with fk constraints on. + mcmp.Exec(`set foreign_key_checks = on`) + mcmp.Exec(`insert into temp2(col) values('a'), ('b'), ('c') `) + mcmp.Exec(`insert into temp1(col) values('a') `) + mcmp.ExecAllowAndCompareError(`insert into temp1(col) values('d') `, utils.CompareOptions{}) } diff --git a/go/test/endtoend/vtgate/foreignkey/main_test.go b/go/test/endtoend/vtgate/foreignkey/main_test.go index dae78ae93a1..b4d610785b5 100644 --- a/go/test/endtoend/vtgate/foreignkey/main_test.go +++ b/go/test/endtoend/vtgate/foreignkey/main_test.go @@ -17,6 +17,7 @@ limitations under the License. package foreignkey import ( + "context" _ "embed" "flag" "fmt" @@ -31,25 +32,31 @@ import ( ) var ( - clusterInstance *cluster.LocalProcessCluster - vtParams mysql.ConnParams - mysqlParams mysql.ConnParams - vtgateGrpcAddress string - shardedKs = "ks" - unshardedKs = "uks" - Cell = "test" - //go:embed sharded_schema.sql - shardedSchemaSQL string - - //go:embed unsharded_schema.sql - unshardedSchemaSQL string + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + vtgateGrpcAddress string + shardedKs = "ks" + shardScopedKs = "sks" + unshardedKs = "uks" + unshardedUnmanagedKs = "unmanaged_uks" + Cell = "test" + + //go:embed schema.sql + schemaSQL string //go:embed sharded_vschema.json shardedVSchema string + //go:embed shard_scoped_vschema.json + shardScopedVSchema string + //go:embed unsharded_vschema.json unshardedVSchema string + //go:embed unsharded_unmanaged_vschema.json + unshardedUnmanagedVSchema string + fkTables = []string{"fk_t1", "fk_t2", "fk_t3", "fk_t4", "fk_t5", "fk_t6", "fk_t7", "fk_t10", "fk_t11", "fk_t12", "fk_t13", "fk_t15", "fk_t16", "fk_t17", "fk_t18", "fk_t19", "fk_t20", "fk_multicol_t1", "fk_multicol_t2", "fk_multicol_t3", "fk_multicol_t4", "fk_multicol_t5", "fk_multicol_t6", "fk_multicol_t7", @@ -107,7 +114,7 @@ func TestMain(m *testing.M) { // Start keyspace sKs := &cluster.Keyspace{ Name: shardedKs, - SchemaSQL: shardedSchemaSQL, + SchemaSQL: schemaSQL, VSchema: shardedVSchema, } @@ -116,9 +123,21 @@ func TestMain(m *testing.M) { return 1 } + // Start shard-scoped keyspace + ssKs := &cluster.Keyspace{ + Name: shardScopedKs, + SchemaSQL: schemaSQL, + VSchema: shardScopedVSchema, + } + + err = clusterInstance.StartKeyspace(*ssKs, []string{"-80", "80-"}, 1, false) + if err != nil { + return 1 + } + uKs := &cluster.Keyspace{ Name: unshardedKs, - SchemaSQL: unshardedSchemaSQL, + SchemaSQL: schemaSQL, VSchema: unshardedVSchema, } err = clusterInstance.StartUnshardedKeyspace(*uKs, 1, false) @@ -126,7 +145,17 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + unmanagedKs := &cluster.Keyspace{ + Name: unshardedUnmanagedKs, + SchemaSQL: schemaSQL, + VSchema: unshardedUnmanagedVSchema, + } + err = clusterInstance.StartUnshardedKeyspace(*unmanagedKs, 1, false) + if err != nil { + return 1 + } + + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } @@ -142,7 +171,7 @@ func TestMain(m *testing.M) { } vtgateGrpcAddress = fmt.Sprintf("%s:%d", clusterInstance.Hostname, clusterInstance.VtgateGrpcPort) - connParams, closer, err := utils.NewMySQL(clusterInstance, shardedKs, shardedSchemaSQL) + connParams, closer, err := utils.NewMySQL(clusterInstance, shardedKs, schemaSQL) if err != nil { fmt.Println(err) return 1 @@ -159,22 +188,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { require.NoError(t, err) deleteAll := func() { - _ = utils.Exec(t, mcmp.VtConn, "use `ks/-80`") - tables := []string{"t4", "t3", "t2", "t1", "multicol_tbl2", "multicol_tbl1"} - tables = append(tables, fkTables...) - for _, table := range tables { - _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) - } - _ = utils.Exec(t, mcmp.VtConn, "use `ks/80-`") - for _, table := range tables { - _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) - } - _ = utils.Exec(t, mcmp.VtConn, "use `uks`") - tables = []string{"u_t1", "u_t2", "u_t3"} - tables = append(tables, fkTables...) - for _, table := range tables { - _, _ = mcmp.ExecAndIgnore("delete /*+ SET_VAR(foreign_key_checks=OFF) */ from " + table) - } + clearOutAllData(t, mcmp.VtConn, mcmp.MySQLConn) _ = utils.Exec(t, mcmp.VtConn, "use `ks`") } @@ -186,3 +200,37 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { cluster.PanicHandler(t) } } + +func startBenchmark(b *testing.B) { + ctx := context.Background() + vtConn, err := mysql.Connect(ctx, &vtParams) + require.NoError(b, err) + mysqlConn, err := mysql.Connect(ctx, &mysqlParams) + require.NoError(b, err) + + clearOutAllData(b, vtConn, mysqlConn) +} + +func clearOutAllData(t testing.TB, vtConn *mysql.Conn, mysqlConn *mysql.Conn) { + tables := []string{"t4", "t3", "t2", "t1", "multicol_tbl2", "multicol_tbl1"} + tables = append(tables, fkTables...) + keyspaces := []string{`ks/-80`, `ks/80-`, `sks/-80`, `sks/80-`} + for _, keyspace := range keyspaces { + _ = utils.Exec(t, vtConn, fmt.Sprintf("use `%v`", keyspace)) + for _, table := range tables { + _, _ = utils.ExecAllowError(t, vtConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + _, _ = utils.ExecAllowError(t, mysqlConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + } + } + + tables = []string{"u_t1", "u_t2", "u_t3"} + tables = append(tables, fkTables...) + keyspaces = []string{`uks`, `unmanaged_uks`} + for _, keyspace := range keyspaces { + _ = utils.Exec(t, vtConn, fmt.Sprintf("use `%v`", keyspace)) + for _, table := range tables { + _, _ = utils.ExecAllowError(t, vtConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + _, _ = utils.ExecAllowError(t, mysqlConn, "delete /*+ SET_VAR(foreign_key_checks=OFF) */ from "+table) + } + } +} diff --git a/go/test/endtoend/vtgate/foreignkey/sharded_schema.sql b/go/test/endtoend/vtgate/foreignkey/schema.sql similarity index 95% rename from go/test/endtoend/vtgate/foreignkey/sharded_schema.sql rename to go/test/endtoend/vtgate/foreignkey/schema.sql index c1f511350f2..fd8bec5dc4a 100644 --- a/go/test/endtoend/vtgate/foreignkey/sharded_schema.sql +++ b/go/test/endtoend/vtgate/foreignkey/schema.sql @@ -73,6 +73,31 @@ create table t6 foreign key (sk, col1) references t5 (sk, col1) on delete restrict on update restrict ) Engine = InnoDB; +create table u_t1 +( + id bigint, + col1 bigint, + index(col1), + primary key (id) +) Engine = InnoDB; + +create table u_t2 +( + id bigint, + col2 bigint, + primary key (id), + foreign key (col2) references u_t1 (col1) on delete set null on update set null +) Engine = InnoDB; + +create table u_t3 +( + id bigint, + col3 bigint, + primary key (id), + foreign key (col3) references u_t1 (col1) on delete cascade on update cascade +) Engine = InnoDB; + + /* * fk_t1 * │ @@ -122,7 +147,7 @@ create table fk_t3 id bigint, col varchar(10), primary key (id), - index(col), + unique index(col), foreign key (col) references fk_t2(col) on delete set null on update set null ) Engine = InnoDB; @@ -184,7 +209,7 @@ create table fk_t10 id bigint, col varchar(10), primary key (id), - index(col) + unique index(col) ) Engine = InnoDB; create table fk_t11 @@ -243,7 +268,7 @@ create table fk_t15 id bigint, col varchar(10), primary key (id), - index(col) + unique index(col) ) Engine = InnoDB; create table fk_t16 @@ -251,7 +276,7 @@ create table fk_t16 id bigint, col varchar(10), primary key (id), - index(col), + unique index(col), foreign key (col) references fk_t15(col) on delete cascade on update cascade ) Engine = InnoDB; @@ -296,6 +321,7 @@ create table fk_t20 foreign key (col2) references fk_t20(col) on delete restrict on update restrict ) Engine = InnoDB; + /* * fk_multicol_t1 * │ @@ -328,16 +354,17 @@ create table fk_multicol_t1 colb varchar(10), cola varchar(10), primary key (id), - index(cola, colb) + index(cola, colb), + unique index(colb) ) Engine = InnoDB; create table fk_multicol_t2 ( id bigint, - colb varchar(10), + colb varchar(10) default 'xyz', cola varchar(10), primary key (id), - index(cola, colb), + unique index(cola, colb), foreign key (cola, colb) references fk_multicol_t1(cola, colb) on delete restrict on update restrict ) Engine = InnoDB; @@ -355,9 +382,10 @@ create table fk_multicol_t4 ( id bigint, colb varchar(10), - cola varchar(10), + cola varchar(10) default 'abcd', primary key (id), index(cola, colb), + unique index(cola), foreign key (cola, colb) references fk_multicol_t3(cola, colb) on delete set null on update set null ) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/foreignkey/shard_scoped_vschema.json b/go/test/endtoend/vtgate/foreignkey/shard_scoped_vschema.json new file mode 100644 index 00000000000..ed25d3becb0 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/shard_scoped_vschema.json @@ -0,0 +1,411 @@ +{ + "sharded": true, + "foreignKeyMode": "managed", + "vindexes": { + "xxhash": { + "type": "xxhash" + }, + "multicol_vdx": { + "type": "multicol", + "params": { + "column_count": "3", + "column_bytes": "1,3,4", + "column_vindex": "hash,binary,unicode_loose_xxhash" + } + }, + "multicol_fk_vdx": { + "type": "multicol", + "params": { + "column_count": "2", + "column_bytes": "4,4", + "column_vindex": "hash,binary" + } + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t2": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t3": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t4": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + }, + "t5": { + "column_vindexes": [ + { + "column": "sk", + "name": "xxhash" + } + ] + }, + "t6": { + "column_vindexes": [ + { + "column": "sk", + "name": "xxhash" + } + ] + }, + "multicol_tbl1": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicol_vdx" + } + ] + }, + "multicol_tbl2": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb", + "colc" + ], + "name": "multicol_vdx" + } + ] + }, + "fk_t1": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t2": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t3": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t4": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t5": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t6": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t7": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t10": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t11": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t12": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t13": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t15": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t16": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t17": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t18": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t19": { + "column_vindexes": [ + { + "column": "col", + "name": "xxhash" + } + ] + }, + "fk_t20": { + "column_vindexes": [ + { + "column": "col2", + "name": "xxhash" + } + ] + }, + "fk_multicol_t1": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t2": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t3": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t4": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t5": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t6": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t7": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t10": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t11": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t12": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t13": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t15": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t16": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t17": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t18": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + }, + "fk_multicol_t19": { + "column_vindexes": [ + { + "columns": [ + "cola", + "colb" + ], + "name": "multicol_fk_vdx" + } + ] + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index e9f0602d235..b9240f46605 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -20,7 +20,7 @@ import ( "context" "flag" "fmt" - "math/rand" + "math/rand/v2" "os" "path" "runtime" @@ -138,7 +138,8 @@ var ( clusterInstance *cluster.LocalProcessCluster shards []cluster.Shard primary *cluster.Vttablet - replica *cluster.Vttablet + replicaNoFK *cluster.Vttablet + replicaFK *cluster.Vttablet vtParams mysql.ConnParams onlineDDLStrategy = "vitess --unsafe-allow-foreign-keys --cut-over-threshold=15s" @@ -333,7 +334,6 @@ func TestMain(m *testing.M) { "--heartbeat_on_demand_duration", "5s", "--migration_check_interval", "5s", "--watch_replication_stream", - "--vreplication_tablet_type", "primary", } clusterInstance.VtGateExtraArgs = []string{} @@ -351,7 +351,7 @@ func TestMain(m *testing.M) { } // We will use a replica to confirm that vtgate's cascading works correctly. - if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 1, false); err != nil { + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 2, false); err != nil { return 1, err } @@ -392,36 +392,42 @@ func tabletTestName(t *testing.T, tablet *cluster.Vttablet) string { switch tablet { case primary: return "primary" - case replica: - return "replica" + case replicaNoFK: + return "replicaNoFK" + case replicaFK: + return "replicaFK" default: assert.FailNowf(t, "unknown tablet", "%v, type=%v", tablet.Alias, tablet.Type) } return "" } +func validateReplicationIsHealthy(t *testing.T, tablet *cluster.Vttablet) (result bool) { + t.Run(tabletTestName(t, tablet), func(t *testing.T) { + result = cluster.ValidateReplicationIsHealthy(t, tablet) + }) + return result +} + func getTabletPosition(t *testing.T, tablet *cluster.Vttablet) replication.Position { rs := queryTablet(t, tablet, "select @@gtid_executed as gtid_executed", "") row := rs.Named().Row() require.NotNil(t, row) gtidExecuted := row.AsString("gtid_executed", "") require.NotEmpty(t, gtidExecuted) - pos, err := replication.DecodePositionDefaultFlavor(gtidExecuted, replication.Mysql56FlavorID) + pos, _, err := replication.DecodePositionMySQL56(gtidExecuted) assert.NoError(t, err) return pos } -func waitForReplicaCatchup(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - primaryPos := getTabletPosition(t, primary) +func waitForReplicaCatchup(t *testing.T, ctx context.Context, replica *cluster.Vttablet, pos replication.Position) { for { replicaPos := getTabletPosition(t, replica) - if replicaPos.GTIDSet.Contains(primaryPos.GTIDSet) { + if replicaPos.GTIDSet.Contains(pos.GTIDSet) { // success return } - if !cluster.ValidateReplicationIsHealthy(t, replica) { + if !validateReplicationIsHealthy(t, replica) { assert.FailNow(t, "replication is broken; not waiting for catchup") return } @@ -435,21 +441,41 @@ func waitForReplicaCatchup(t *testing.T) { } } +func waitForReplicationCatchup(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + primaryPos := getTabletPosition(t, primary) + var wg sync.WaitGroup + for _, replica := range []*cluster.Vttablet{replicaNoFK, replicaFK} { + replica := replica + wg.Add(1) + go func() { + waitForReplicaCatchup(t, ctx, replica, primaryPos) + wg.Done() + }() + } + wg.Wait() +} + func validateMetrics(t *testing.T, tcase *testCase) { for _, workloadTable := range tableNames { t.Run(workloadTable, func(t *testing.T) { t.Run("fk errors", func(t *testing.T) { testSelectTableFKErrors(t, workloadTable, tcase) }) - var primaryRows, replicaRows int64 + var primaryRows, replicaNoFKRows, replicaFKRows int64 t.Run(tabletTestName(t, primary), func(t *testing.T) { primaryRows = testSelectTableMetrics(t, primary, workloadTable, tcase) }) - t.Run(tabletTestName(t, replica), func(t *testing.T) { - replicaRows = testSelectTableMetrics(t, replica, workloadTable, tcase) + t.Run(tabletTestName(t, replicaNoFK), func(t *testing.T) { + replicaNoFKRows = testSelectTableMetrics(t, replicaNoFK, workloadTable, tcase) + }) + t.Run(tabletTestName(t, replicaFK), func(t *testing.T) { + replicaFKRows = testSelectTableMetrics(t, replicaFK, workloadTable, tcase) }) - t.Run("compare primary and replica", func(t *testing.T) { - assert.Equal(t, primaryRows, replicaRows) + t.Run("compare primary and replicas", func(t *testing.T) { + assert.Equal(t, primaryRows, replicaNoFKRows) + assert.Equal(t, primaryRows, replicaFKRows) }) }) } @@ -458,12 +484,16 @@ func validateMetrics(t *testing.T, tcase *testCase) { func TestInitialSetup(t *testing.T) { shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) - require.Equal(t, 2, len(shards[0].Vttablets)) + require.Equal(t, 3, len(shards[0].Vttablets)) // primary, no-fk replica, fk replica primary = shards[0].Vttablets[0] require.NotNil(t, primary) - replica = shards[0].Vttablets[1] - require.NotNil(t, replica) - require.NotEqual(t, primary.Alias, replica.Alias) + replicaNoFK = shards[0].Vttablets[1] + require.NotNil(t, replicaNoFK) + require.NotEqual(t, primary.Alias, replicaNoFK.Alias) + replicaFK = shards[0].Vttablets[2] + require.NotNil(t, replicaFK) + require.NotEqual(t, primary.Alias, replicaFK.Alias) + require.NotEqual(t, replicaNoFK.Alias, replicaFK.Alias) reverseTableNames = slices.Clone(tableNames) slices.Reverse(reverseTableNames) @@ -498,7 +528,8 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { workloadName = "workload" } testName := fmt.Sprintf("%s/del=%s/upd=%s", workloadName, referenceActionMap[tcase.onDeleteAction], referenceActionMap[tcase.onUpdateAction]) - if tcase.onlineDDLTable != "" { + testOnlineDDL := (tcase.onlineDDLTable != "") + if testOnlineDDL { testName = fmt.Sprintf("%s/ddl=%s", testName, tcase.onlineDDLTable) } if tcase.notes != "" { @@ -525,7 +556,7 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { baseSleepInterval := 15 * time.Millisecond singleConnectionSleepIntervalNanoseconds := float64(baseSleepInterval.Nanoseconds()) * sleepModifier sleepInterval := time.Duration(int64(singleConnectionSleepIntervalNanoseconds)) - if tcase.onlineDDLTable != "" { + if testOnlineDDL { sleepInterval = sleepInterval * 2 maxConcurrency = max(1, maxConcurrency/2) } @@ -544,7 +575,7 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { }() } - if tcase.onlineDDLTable != "" { + if testOnlineDDL { t.Run("migrating", func(t *testing.T) { // This only works on patched MySQL hint := tcase.createTableHint @@ -566,7 +597,7 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { artifacts := textutil.SplitDelimitedList(row.AsString("artifacts", "")) for _, artifact := range artifacts { t.Run(artifact, func(t *testing.T) { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, "drop table if exists "+artifact) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, "drop table if exists "+artifact) require.NoError(t, err) }) } @@ -576,18 +607,21 @@ func ExecuteFKTest(t *testing.T, tcase *testCase) { wg.Wait() }) } - t.Run("wait for replica", func(t *testing.T) { - waitForReplicaCatchup(t) + t.Run("wait for replicas", func(t *testing.T) { + waitForReplicationCatchup(t) }) + validateTableDefinitions(t, testOnlineDDL) t.Run("validate metrics", func(t *testing.T) { validateMetrics(t, tcase) }) t.Run("validate replication health", func(t *testing.T) { - cluster.ValidateReplicationIsHealthy(t, replica) + validateReplicationIsHealthy(t, replicaNoFK) + validateReplicationIsHealthy(t, replicaFK) }) t.Run("validate fk", func(t *testing.T) { testFKIntegrity(t, primary, tcase) - testFKIntegrity(t, replica, tcase) + testFKIntegrity(t, replicaNoFK, tcase) + testFKIntegrity(t, replicaFK, tcase) }) }) } @@ -596,20 +630,26 @@ func TestStressFK(t *testing.T) { defer cluster.PanicHandler(t) t.Run("validate replication health", func(t *testing.T) { - cluster.ValidateReplicationIsHealthy(t, replica) + validateReplicationIsHealthy(t, replicaNoFK) + validateReplicationIsHealthy(t, replicaFK) }) runOnlineDDL := false t.Run("check 'rename_table_preserve_foreign_key' variable", func(t *testing.T) { // Online DDL is not possible on vanilla MySQL 8.0 for reasons described in https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/. - // However, Online DDL is made possible in via these changes: https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced - // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps1. - // Said changes introduce a new global/session boolean variable named 'rename_table_preserve_foreign_key'. It defaults 'false'/0 for backwards compatibility. - // When enabled, a `RENAME TABLE` to a FK parent "pins" the children's foreign keys to the table name rather than the table pointer. Which means after the RENAME, + // However, Online DDL is made possible in via these changes: + // - https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced + // - https://github.com/planetscale/mysql-server/commit/c2f1344a6863518d749f2eb01a4c74ca08a5b889 + // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps3. + // Said changes introduce a new behavior for `RENAME TABLE`. When at least two tables are being renamed in the statement, + // and when at least one table uses internal vitess naming, then a `RENAME TABLE` to a FK parent "pins" the children's + // foreign keys to the table name rather than the table pointer. Which means after the RENAME, // the children will point to the newly instated table rather than the original, renamed table. - // (Note: this applies to a particular type of RENAME where we swap tables, see the above blog post). // For FK children, the MySQL changes simply ignore any Vitess-internal table. // + // The variable 'rename_table_preserve_foreign_key' serves as an indicator to the functionality's availability, + // and at this time changing its value does not change any behavior. + // // In this stress test, we enable Online DDL if the variable 'rename_table_preserve_foreign_key' is present. The Online DDL mechanism will in turn // query for this variable, and manipulate it, when starting the migration and when cutting over. rs, err := primary.VttabletProcess.QueryTablet("show global variables like 'rename_table_preserve_foreign_key'", keyspaceName, false) @@ -691,6 +731,47 @@ func TestStressFK(t *testing.T) { } } +func validateTableDefinitions(t *testing.T, afterOnlineDDL bool) { + t.Run("validate definitions", func(t *testing.T) { + for _, tableName := range []string{childTableName, child2TableName, grandchildTableName} { + t.Run(tableName, func(t *testing.T) { + childFKFollowedParentRenameMsg := "found traces of internal vitess table name, suggesting Online DDL on parent table caused this child table to follow the renames parent. 'rename_table_preserve_foreign_key' should have prevented this" + var primaryStmt string + t.Run(tabletTestName(t, primary), func(t *testing.T) { + primaryStmt = getCreateTableStatement(t, primary, tableName) + assert.NotEmpty(t, primaryStmt) + assert.Contains(t, primaryStmt, "CONSTRAINT") + assert.NotContainsf(t, primaryStmt, "_vrepl", childFKFollowedParentRenameMsg) + assert.NotContainsf(t, primaryStmt, "_vrp_", childFKFollowedParentRenameMsg) + }) + t.Run(tabletTestName(t, replicaFK), func(t *testing.T) { + stmt := getCreateTableStatement(t, replicaFK, tableName) + assert.Contains(t, stmt, "CONSTRAINT") + assert.Equal(t, primaryStmt, stmt) + assert.NotContainsf(t, stmt, "_vrepl", childFKFollowedParentRenameMsg) + assert.NotContainsf(t, stmt, "_vrp_", childFKFollowedParentRenameMsg) + }) + t.Run(tabletTestName(t, replicaNoFK), func(t *testing.T) { + stmt := getCreateTableStatement(t, replicaNoFK, tableName) + // replicaNoFK does not have foreign keys, for the purpose of testing VTGate's cascading + // of foreign key rules. + // However, if we run Online DDL, the table will be swapped at the end of the migration. + // We're not sure here exactly which table has been migrated. Was it this table's parent? + // Or this table itself? Or an unrelated table? In case of Online DDL we don't want to + // validate this replicas' schema, because it could be any one of several outcomes. And + // we don't even care how this replica's schema looks like after the migration. Ths + // schema was inconsistent with the Primary to begin with. We've already tested replicaFK + // for correctness of the schema. + if !afterOnlineDDL { + assert.NotContains(t, stmt, "CONSTRAINT") + assert.NotEqual(t, primaryStmt, stmt) + } + }) + }) + } + }) +} + // createInitialSchema creates the tables from scratch, and drops the foreign key constraints on the replica. func createInitialSchema(t *testing.T, tcase *testCase) { ctx := context.Background() @@ -700,10 +781,15 @@ func createInitialSchema(t *testing.T, tcase *testCase) { t.Run("dropping tables", func(t *testing.T) { for _, tableName := range reverseTableNames { - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, "drop table if exists "+tableName) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, "drop table if exists "+tableName) require.NoError(t, err) } }) + t.Run("waiting for vschema deletions to apply", func(t *testing.T) { + for _, tableName := range tableNames { + utils.WaitForTableDeletions(t, clusterInstance.VtgateProcess, keyspaceName, tableName) + } + }) t.Run("creating tables", func(t *testing.T) { // Create the stress tables var b strings.Builder @@ -720,7 +806,7 @@ func createInitialSchema(t *testing.T, tcase *testCase) { } b.WriteString(";") } - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, b.String()) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, b.String()) require.NoError(t, err) }) if tcase.preStatement != "" { @@ -729,8 +815,8 @@ func createInitialSchema(t *testing.T, tcase *testCase) { require.Nil(t, err) }) } - t.Run("wait for replica", func(t *testing.T) { - waitForReplicaCatchup(t) + t.Run("wait for replication", func(t *testing.T) { + waitForReplicationCatchup(t) }) t.Run("validating tables: vttablet", func(t *testing.T) { // Check if table is created. Checked on tablets. @@ -757,25 +843,12 @@ func createInitialSchema(t *testing.T, tcase *testCase) { t.Run("dropping foreign keys on replica", func(t *testing.T) { for _, statement := range dropConstraintsStatements { - _ = queryTablet(t, replica, "set global super_read_only=0", "") - _ = queryTablet(t, replica, statement, "") - _ = queryTablet(t, replica, "set global super_read_only=1", "") - } - }) - t.Run("validate definitions", func(t *testing.T) { - for _, tableName := range []string{childTableName, child2TableName, grandchildTableName} { - t.Run(tableName, func(t *testing.T) { - t.Run(tabletTestName(t, primary), func(t *testing.T) { - stmt := getCreateTableStatement(t, primary, tableName) - assert.Contains(t, stmt, "CONSTRAINT") - }) - t.Run(tabletTestName(t, replica), func(t *testing.T) { - stmt := getCreateTableStatement(t, replica, tableName) - assert.NotContains(t, stmt, "CONSTRAINT") - }) - }) + _ = queryTablet(t, replicaNoFK, "set global super_read_only=0", "") + _ = queryTablet(t, replicaNoFK, statement, "") + _ = queryTablet(t, replicaNoFK, "set global super_read_only=1", "") } }) + validateTableDefinitions(t, false) } // testOnlineDDLStatement runs an online DDL, ALTER statement @@ -787,7 +860,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) + uuid, err = clusterInstance.VtctldClientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.ApplySchemaParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) @@ -804,7 +877,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } if expectHint != "" { - stmt, err := sqlparser.Parse(alterStatement) + stmt, err := sqlparser.NewTestParser().Parse(alterStatement) require.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) require.True(t, ok) @@ -915,6 +988,8 @@ func isFKError(err error) bool { return false case sqlerror.ERLockDeadlock: return false // bummer, but deadlocks can happen, it's a legit error. + case sqlerror.ERLockNowait: + return false // For some queries we use NOWAIT. Bummer, but this can happen, it's a legit error. case sqlerror.ERNoReferencedRow, sqlerror.ERRowIsReferenced, sqlerror.ERRowIsReferenced2, @@ -930,8 +1005,8 @@ func isFKError(err error) bool { } func generateInsert(t *testing.T, tableName string, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) - parentId := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) + parentId := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(insertRowStatement, tableName, id, parentId) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -961,11 +1036,11 @@ func generateInsert(t *testing.T, tableName string, conn *mysql.Conn) error { func generateUpdate(t *testing.T, tableName string, conn *mysql.Conn) error { // Most of the UPDATEs we run are "normal" updates, but the minority will actually change the // `id` column itself, which is the FOREIGN KEY parent column for some of the tables. - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(updateRowStatement, tableName, id) if tableName == parentTableName || tableName == childTableName { - if rand.Intn(4) == 0 { - updatedId := rand.Int31n(int32(maxTableRows)) + if rand.IntN(4) == 0 { + updatedId := rand.Int32N(int32(maxTableRows)) query = fmt.Sprintf(updateRowIdStatement, tableName, updatedId, id) } } @@ -995,7 +1070,7 @@ func generateUpdate(t *testing.T, tableName string, conn *mysql.Conn) error { } func generateDelete(t *testing.T, tableName string, conn *mysql.Conn) error { - id := rand.Int31n(int32(maxTableRows)) + id := rand.Int32N(int32(maxTableRows)) query := fmt.Sprintf(deleteRowStatement, tableName, id) qr, err := conn.ExecuteFetch(query, 1000, true) @@ -1034,7 +1109,7 @@ func runSingleConnection(ctx context.Context, t *testing.T, tableName string, sl require.Nil(t, err) for { - switch rand.Int31n(3) { + switch rand.Int32N(3) { case 0: _ = generateInsert(t, tableName, conn) case 1: diff --git a/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql b/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql deleted file mode 100644 index 3b4496d47fb..00000000000 --- a/go/test/endtoend/vtgate/foreignkey/unsharded_schema.sql +++ /dev/null @@ -1,472 +0,0 @@ -create table u_t1 -( - id bigint, - col1 bigint, - index(col1), - primary key (id) -) Engine = InnoDB; - -create table u_t2 -( - id bigint, - col2 bigint, - primary key (id), - foreign key (col2) references u_t1 (col1) on delete set null on update set null -) Engine = InnoDB; - -create table u_t3 -( - id bigint, - col3 bigint, - primary key (id), - foreign key (col3) references u_t1 (col1) on delete cascade on update cascade -) Engine = InnoDB; - - -/* - * fk_t1 - * │ - * │ On Delete Restrict - * │ On Update Restrict - * ▼ - * ┌────────────────fk_t2────────────────┐ - * │ │ - * │On Delete Set Null │ On Delete Set Null - * │On Update Set Null │ On Update Set Null - * ▼ ▼ - * fk_t7 fk_t3───────────────────┐ - * │ │ - * │ │ On Delete Set Null - * On Delete Set Null │ │ On Update Set Null - * On Update Set Null │ │ - * ▼ ▼ - * fk_t4 fk_t6 - * │ - * │ - * On Delete Restrict │ - * On Update Restrict │ - * │ - * ▼ - * fk_t5 - */ - -create table fk_t1 -( - id bigint, - col varchar(10), - primary key (id), - index(col) -) Engine = InnoDB; - -create table fk_t2 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t1(col) on delete restrict on update restrict -) Engine = InnoDB; - -create table fk_t3 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t2(col) on delete set null on update set null -) Engine = InnoDB; - -create table fk_t4 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t3(col) on delete set null on update set null -) Engine = InnoDB; - -create table fk_t5 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t4(col) on delete restrict on update restrict -) Engine = InnoDB; - -create table fk_t6 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t3(col) on delete set null on update set null -) Engine = InnoDB; - -create table fk_t7 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t2(col) on delete set null on update set null -) Engine = InnoDB; - -/* - * fk_t10 - * │ - * On Delete Cascade │ - * On Update Cascade │ - * │ - * ▼ - * fk_t11──────────────────┐ - * │ │ - * │ │ On Delete Restrict - * On Delete Cascade │ │ On Update Restrict - * On Update Cascade │ │ - * │ │ - * ▼ ▼ - * fk_t12 fk_t13 - */ - -create table fk_t10 -( - id bigint, - col varchar(10), - primary key (id), - index(col) -) Engine = InnoDB; - -create table fk_t11 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t10(col) on delete cascade on update cascade -) Engine = InnoDB; - -create table fk_t12 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t11(col) on delete cascade on update cascade -) Engine = InnoDB; - -create table fk_t13 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t11(col) on delete restrict on update restrict -) Engine = InnoDB; - -/* - * fk_t15 - * │ - * │ - * On Delete Cascade │ - * On Update Cascade │ - * │ - * ▼ - * fk_t16 - * │ - * On Delete Set Null │ - * On Update Set Null │ - * │ - * ▼ - * fk_t17──────────────────┐ - * │ │ - * │ │ On Delete Set Null - * On Delete Cascade │ │ On Update Set Null - * On Update Cascade │ │ - * │ │ - * ▼ ▼ - * fk_t18 fk_t19 - */ - -create table fk_t15 -( - id bigint, - col varchar(10), - primary key (id), - index(col) -) Engine = InnoDB; - -create table fk_t16 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t15(col) on delete cascade on update cascade -) Engine = InnoDB; - -create table fk_t17 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t16(col) on delete set null on update set null -) Engine = InnoDB; - -create table fk_t18 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t17(col) on delete cascade on update cascade -) Engine = InnoDB; - -create table fk_t19 -( - id bigint, - col varchar(10), - primary key (id), - index(col), - foreign key (col) references fk_t17(col) on delete set null on update set null -) Engine = InnoDB; - -/* - Self referenced foreign key from col2 to col in fk_t20 -*/ - -create table fk_t20 -( - id bigint, - col varchar(10), - col2 varchar(10), - primary key (id), - index(col), - foreign key (col2) references fk_t20(col) on delete restrict on update restrict -) Engine = InnoDB; - - -/* - * fk_multicol_t1 - * │ - * │ On Delete Restrict - * │ On Update Restrict - * ▼ - * ┌────────fk_multicol_t2───────────────┐ - * │ │ - * │On Delete Set Null │ On Delete Set Null - * │On Update Set Null │ On Update Set Null - * ▼ ▼ - * fk_multicol_t7 fk_multicol_t3───────────────────┐ - * │ │ - * │ │ On Delete Set Null - * On Delete Set Null │ │ On Update Set Null - * On Update Set Null │ │ - * ▼ ▼ - * fk_multicol_t4 fk_multicol_t6 - * │ - * │ - * On Delete Restrict │ - * On Update Restrict │ - * │ - * ▼ - * fk_multicol_t5 - */ -create table fk_multicol_t1 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb) -) Engine = InnoDB; - -create table fk_multicol_t2 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t1(cola, colb) on delete restrict on update restrict -) Engine = InnoDB; - -create table fk_multicol_t3 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t2(cola, colb) on delete set null on update set null -) Engine = InnoDB; - -create table fk_multicol_t4 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t3(cola, colb) on delete set null on update set null -) Engine = InnoDB; - -create table fk_multicol_t5 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t4(cola, colb) on delete restrict on update restrict -) Engine = InnoDB; - -create table fk_multicol_t6 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t3(cola, colb) on delete set null on update set null -) Engine = InnoDB; - -create table fk_multicol_t7 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t2(cola, colb) on delete set null on update set null -) Engine = InnoDB; - -/* - * fk_multicol_t10 - * │ - * On Delete Cascade │ - * On Update Cascade │ - * │ - * ▼ - * fk_multicol_t11──────────────────┐ - * │ │ - * │ │ On Delete Restrict - * On Delete Cascade │ │ On Update Restrict - * On Update Cascade │ │ - * │ │ - * ▼ ▼ - * fk_multicol_t12 fk_multicol_t13 - */ - -create table fk_multicol_t10 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb) -) Engine = InnoDB; - -create table fk_multicol_t11 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t10(cola, colb) on delete cascade on update cascade -) Engine = InnoDB; - -create table fk_multicol_t12 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t11(cola, colb) on delete cascade on update cascade -) Engine = InnoDB; - -create table fk_multicol_t13 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t11(cola, colb) on delete restrict on update restrict -) Engine = InnoDB; - -/* - * fk_multicol_t15 - * │ - * │ - * On Delete Cascade │ - * On Update Cascade │ - * │ - * ▼ - * fk_multicol_t16 - * │ - * On Delete Set Null │ - * On Update Set Null │ - * │ - * ▼ - * fk_multicol_t17──────────────────┐ - * │ │ - * │ │ On Delete Set Null - * On Delete Cascade │ │ On Update Set Null - * On Update Cascade │ │ - * │ │ - * ▼ ▼ - * fk_multicol_t18 fk_multicol_t19 - */ - -create table fk_multicol_t15 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb) -) Engine = InnoDB; - -create table fk_multicol_t16 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t15(cola, colb) on delete cascade on update cascade -) Engine = InnoDB; - -create table fk_multicol_t17 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t16(cola, colb) on delete set null on update set null -) Engine = InnoDB; - -create table fk_multicol_t18 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t17(cola, colb) on delete cascade on update cascade -) Engine = InnoDB; - -create table fk_multicol_t19 -( - id bigint, - colb varchar(10), - cola varchar(10), - primary key (id), - index(cola, colb), - foreign key (cola, colb) references fk_multicol_t17(cola, colb) on delete set null on update set null -) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/foreignkey/unsharded_unmanaged_vschema.json b/go/test/endtoend/vtgate/foreignkey/unsharded_unmanaged_vschema.json new file mode 100644 index 00000000000..2698e23dac5 --- /dev/null +++ b/go/test/endtoend/vtgate/foreignkey/unsharded_unmanaged_vschema.json @@ -0,0 +1,41 @@ +{ + "sharded": false, + "foreignKeyMode": "unmanaged", + "tables": { + "u_t1": {}, + "u_t2": {}, + "fk_t1": {}, + "fk_t2": {}, + "fk_t3": {}, + "fk_t4": {}, + "fk_t5": {}, + "fk_t6": {}, + "fk_t7": {}, + "fk_t10": {}, + "fk_t11": {}, + "fk_t12": {}, + "fk_t13": {}, + "fk_t15": {}, + "fk_t16": {}, + "fk_t17": {}, + "fk_t18": {}, + "fk_t19": {}, + "fk_t20": {}, + "fk_multicol_t1": {}, + "fk_multicol_t2": {}, + "fk_multicol_t3": {}, + "fk_multicol_t4": {}, + "fk_multicol_t5": {}, + "fk_multicol_t6": {}, + "fk_multicol_t7": {}, + "fk_multicol_t10": {}, + "fk_multicol_t11": {}, + "fk_multicol_t12": {}, + "fk_multicol_t13": {}, + "fk_multicol_t15": {}, + "fk_multicol_t16": {}, + "fk_multicol_t17": {}, + "fk_multicol_t18": {}, + "fk_multicol_t19": {} + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/foreignkey/utils_test.go b/go/test/endtoend/vtgate/foreignkey/utils_test.go index 5e0b4a8a3cc..7866db03389 100644 --- a/go/test/endtoend/vtgate/foreignkey/utils_test.go +++ b/go/test/endtoend/vtgate/foreignkey/utils_test.go @@ -17,23 +17,28 @@ limitations under the License. package foreignkey import ( + "context" "database/sql" "fmt" + "math/rand/v2" + "slices" "strings" "testing" + "time" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) -// getTestName prepends whether the test is for a sharded keyspace or not to the test name. -func getTestName(testName string, testSharded bool) string { - if testSharded { - return "Sharded - " + testName - } - return "Unsharded - " + testName +var supportedOpps = []string{"*", "+", "-"} + +// getTestName prepends the test with keyspace name. +func getTestName(testName string, keyspace string) string { + return keyspace + " - " + testName } // isMultiColFkTable tells if the table is a multicol table or not. @@ -41,9 +46,35 @@ func isMultiColFkTable(tableName string) bool { return strings.Contains(tableName, "multicol") } +func (fz *fuzzer) generateExpression(length int, cols ...string) string { + expr := fz.getColOrInt(cols...) + if length == 1 { + return expr + } + rhsExpr := fz.generateExpression(length-1, cols...) + op := supportedOpps[rand.IntN(len(supportedOpps))] + return fmt.Sprintf("%v %s (%v)", expr, op, rhsExpr) +} + +// getColOrInt gets a column or an integer/NULL literal with equal probability. +func (fz *fuzzer) getColOrInt(cols ...string) string { + if len(cols) == 0 || rand.IntN(2) == 0 { + return convertIntValueToString(rand.IntN(1 + fz.maxValForCol)) + } + return cols[rand.IntN(len(cols))] +} + +// convertIntValueToString converts the given value to a string +func convertIntValueToString(value int) string { + if value == 0 { + return "NULL" + } + return fmt.Sprintf("%d", value) +} + // waitForSchemaTrackingForFkTables waits for schema tracking to have run and seen the tables used // for foreign key tests. -func waitForSchemaTrackingForFkTables(t *testing.T) { +func waitForSchemaTrackingForFkTables(t testing.TB) { err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t1", "col") require.NoError(t, err) err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, shardedKs, "fk_t18", "col") @@ -56,6 +87,8 @@ func waitForSchemaTrackingForFkTables(t *testing.T) { require.NoError(t, err) err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedKs, "fk_t11", "col") require.NoError(t, err) + err = utils.WaitForColumn(t, clusterInstance.VtgateProcess, unshardedUnmanagedKs, "fk_t11", "col") + require.NoError(t, err) } // getReplicaTablets gets all the replica tablets. @@ -142,3 +175,152 @@ func compareVitessAndMySQLErrors(t *testing.T, vtErr, mysqlErr error) { out := fmt.Sprintf("Vitess and MySQL are not erroring the same way.\nVitess error: %v\nMySQL error: %v", vtErr, mysqlErr) t.Error(out) } + +// ensureDatabaseState ensures that the database is either empty or not. +func ensureDatabaseState(t *testing.T, vtconn *mysql.Conn, empty bool) { + results := collectFkTablesState(vtconn) + isEmpty := true + for _, res := range results { + if len(res.Rows) > 0 { + isEmpty = false + } + } + require.Equal(t, isEmpty, empty) +} + +// verifyDataIsCorrect verifies that the data in MySQL database matches the data in the Vitess database. +func verifyDataIsCorrect(t *testing.T, mcmp utils.MySQLCompare, concurrency int) { + // For single concurrent thread, we run all the queries on both MySQL and Vitess, so we can verify correctness + // by just checking if the data in MySQL and Vitess match. + if concurrency == 1 { + for _, table := range fkTables { + query := fmt.Sprintf("SELECT * FROM %v ORDER BY id", table) + mcmp.Exec(query) + } + } else { + // For higher concurrency, we don't have MySQL data to verify everything is fine, + // so we'll have to do something different. + // We run LEFT JOIN queries on all the parent and child tables linked by foreign keys + // to make sure that nothing is broken in the database. + for _, reference := range fkReferences { + query := fmt.Sprintf("select %v.id from %v left join %v on (%v.col = %v.col) where %v.col is null and %v.col is not null", reference.childTable, reference.childTable, reference.parentTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable) + if isMultiColFkTable(reference.childTable) { + query = fmt.Sprintf("select %v.id from %v left join %v on (%v.cola = %v.cola and %v.colb = %v.colb) where %v.cola is null and %v.cola is not null and %v.colb is not null", reference.childTable, reference.childTable, reference.parentTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable, reference.parentTable, reference.childTable, reference.childTable) + } + res, err := mcmp.VtConn.ExecuteFetch(query, 1000, false) + require.NoError(t, err) + require.Zerof(t, len(res.Rows), "Query %v gave non-empty results", query) + } + } + // We also verify that the results in Primary and Replica table match as is. + for _, keyspace := range clusterInstance.Keyspaces { + for _, shard := range keyspace.Shards { + var primaryTab, replicaTab *cluster.Vttablet + for _, vttablet := range shard.Vttablets { + if vttablet.Type == "primary" { + primaryTab = vttablet + } else { + replicaTab = vttablet + } + } + require.NotNil(t, primaryTab) + require.NotNil(t, replicaTab) + checkReplicationHealthy(t, replicaTab) + cluster.WaitForReplicationPos(t, primaryTab, replicaTab, true, 1*time.Minute) + primaryConn, err := utils.GetMySQLConn(primaryTab, fmt.Sprintf("vt_%v", keyspace.Name)) + require.NoError(t, err) + replicaConn, err := utils.GetMySQLConn(replicaTab, fmt.Sprintf("vt_%v", keyspace.Name)) + require.NoError(t, err) + primaryRes := collectFkTablesState(primaryConn) + replicaRes := collectFkTablesState(replicaConn) + verifyDataMatches(t, primaryRes, replicaRes) + } + } +} + +// verifyDataMatches verifies that the two list of results are the same. +func verifyDataMatches(t testing.TB, resOne []*sqltypes.Result, resTwo []*sqltypes.Result) { + require.EqualValues(t, len(resTwo), len(resOne), "Res 1 - %v, Res 2 - %v", resOne, resTwo) + for idx, resultOne := range resOne { + resultTwo := resTwo[idx] + require.True(t, resultOne.Equal(resultTwo), "Data for %v doesn't match\nRows 1\n%v\nRows 2\n%v", fkTables[idx], resultOne.Rows, resultTwo.Rows) + } +} + +// collectFkTablesState collects the data stored in the foreign key tables for the given connection. +func collectFkTablesState(conn *mysql.Conn) []*sqltypes.Result { + var tablesData []*sqltypes.Result + for _, table := range fkTables { + query := fmt.Sprintf("SELECT * FROM %v ORDER BY id", table) + res, _ := conn.ExecuteFetch(query, 10000, true) + tablesData = append(tablesData, res) + } + return tablesData +} + +func validateReplication(t *testing.T) { + for _, keyspace := range clusterInstance.Keyspaces { + for _, shard := range keyspace.Shards { + for _, vttablet := range shard.Vttablets { + if vttablet.Type != "primary" { + checkReplicationHealthy(t, vttablet) + } + } + } + } +} + +// compareResultRows compares the rows of the two results provided. +func compareResultRows(resOne *sqltypes.Result, resTwo *sqltypes.Result) bool { + return slices.EqualFunc(resOne.Rows, resTwo.Rows, func(a, b sqltypes.Row) bool { + return sqltypes.RowEqual(a, b) + }) +} + +// setupBenchmark sets up the benchmark by creating the set of queries that we want to run. It also ensures that the 3 modes (MySQL, Vitess Managed, Vitess Unmanaged) we verify all return the same results after the queries have been executed. +func setupBenchmark(b *testing.B, maxValForId int, maxValForCol int, insertShare int, deleteShare int, updateShare int, numQueries int) ([]string, *mysql.Conn, *mysql.Conn, *mysql.Conn) { + // Clear out all the data to ensure we start with a clean slate. + startBenchmark(b) + // Create a fuzzer to generate and store a certain set of queries. + fz := newFuzzer(1, maxValForId, maxValForCol, insertShare, deleteShare, updateShare, SQLQueries, nil) + fz.noFkSetVar = true + var queries []string + for j := 0; j < numQueries; j++ { + genQueries := fz.generateQuery() + require.Len(b, genQueries, 1) + queries = append(queries, genQueries[0]) + } + + // Connect to MySQL and run all the queries + mysqlConn, err := mysql.Connect(context.Background(), &mysqlParams) + require.NoError(b, err) + // Connect to Vitess managed foreign keys keyspace + vtConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(b, err) + utils.Exec(b, vtConn, fmt.Sprintf("use `%v`", unshardedKs)) + // Connect to Vitess unmanaged foreign keys keyspace + vtUnmanagedConn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(b, err) + utils.Exec(b, vtUnmanagedConn, fmt.Sprintf("use `%v`", unshardedUnmanagedKs)) + + // First we make sure that running all the queries in both the Vitess modes and MySQL gives the same data. + // So we run all the queries and then check that the data in all of them matches. + runQueries(b, mysqlConn, queries) + runQueries(b, vtConn, queries) + runQueries(b, vtUnmanagedConn, queries) + for _, table := range fkTables { + query := fmt.Sprintf("SELECT * FROM %v ORDER BY id", table) + resVitessManaged, _ := vtConn.ExecuteFetch(query, 10000, true) + resMySQL, _ := mysqlConn.ExecuteFetch(query, 10000, true) + resVitessUnmanaged, _ := vtUnmanagedConn.ExecuteFetch(query, 10000, true) + require.True(b, compareResultRows(resVitessManaged, resMySQL), "Results for %v don't match\nVitess Managed\n%v\nMySQL\n%v", table, resVitessManaged, resMySQL) + require.True(b, compareResultRows(resVitessUnmanaged, resMySQL), "Results for %v don't match\nVitess Unmanaged\n%v\nMySQL\n%v", table, resVitessUnmanaged, resMySQL) + } + return queries, mysqlConn, vtConn, vtUnmanagedConn +} + +func runQueries(t testing.TB, conn *mysql.Conn, queries []string) { + for _, query := range queries { + _, _ = utils.ExecAllowError(t, conn, query) + } +} diff --git a/go/test/endtoend/vtgate/gen4/gen4_test.go b/go/test/endtoend/vtgate/gen4/gen4_test.go index 8764328495c..f284f85e883 100644 --- a/go/test/endtoend/vtgate/gen4/gen4_test.go +++ b/go/test/endtoend/vtgate/gen4/gen4_test.go @@ -187,26 +187,6 @@ func TestSubQueriesOnOuterJoinOnCondition(t *testing.T) { } } -func TestPlannerWarning(t *testing.T) { - mcmp, closer := start(t) - defer closer() - - // straight_join query - _ = utils.Exec(t, mcmp.VtConn, `select 1 from t1 straight_join t2 on t1.id = t2.id`) - utils.AssertMatches(t, mcmp.VtConn, `show warnings`, `[[VARCHAR("Warning") UINT16(1235) VARCHAR("straight join is converted to normal join")]]`) - - // execute same query again. - _ = utils.Exec(t, mcmp.VtConn, `select 1 from t1 straight_join t2 on t1.id = t2.id`) - utils.AssertMatches(t, mcmp.VtConn, `show warnings`, `[[VARCHAR("Warning") UINT16(1235) VARCHAR("straight join is converted to normal join")]]`) - - // random query to reset the warning. - _ = utils.Exec(t, mcmp.VtConn, `select 1 from t1`) - - // execute same query again. - _ = utils.Exec(t, mcmp.VtConn, `select 1 from t1 straight_join t2 on t1.id = t2.id`) - utils.AssertMatches(t, mcmp.VtConn, `show warnings`, `[[VARCHAR("Warning") UINT16(1235) VARCHAR("straight join is converted to normal join")]]`) -} - func TestHashJoin(t *testing.T) { mcmp, closer := start(t) defer closer() diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go index 378b2d2969e..4c94e8e2ec8 100644 --- a/go/test/endtoend/vtgate/gen4/main_test.go +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -102,12 +102,12 @@ func TestMain(m *testing.M) { } // apply routing rules - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/godriver/main_test.go b/go/test/endtoend/vtgate/godriver/main_test.go index 492a68662fc..587c189d2ea 100644 --- a/go/test/endtoend/vtgate/godriver/main_test.go +++ b/go/test/endtoend/vtgate/godriver/main_test.go @@ -105,7 +105,7 @@ func TestMain(m *testing.M) { VSchema: VSchema, } clusterInstance.VtTabletExtraArgs = []string{ - "--queryserver-config-transaction-timeout", "3", + "--queryserver-config-transaction-timeout", "3s", } if err := clusterInstance.StartKeyspace(*Keyspace, []string{"-80", "80-"}, 1, false); err != nil { log.Fatal(err.Error()) diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index 12abcf4dd01..b276508f269 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -79,12 +79,12 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - _, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("RebuildVSchemaGraph") + _, err = clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index 83c41fd7183..128d930718c 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -17,16 +17,31 @@ limitations under the License. package vtgate import ( + "context" "fmt" + "sync/atomic" "testing" - - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/test/endtoend/utils" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) +func TestInsertOnDuplicateKey(t *testing.T) { + conn, closer := start(t) + defer closer() + + utils.Exec(t, conn, "insert into t11(id, sharding_key, col1, col2, col3) values(1, 2, 'a', 1, 2)") + utils.Exec(t, conn, "insert into t11(id, sharding_key, col1, col2, col3) values(1, 2, 'a', 1, 2) on duplicate key update id=10;") + utils.AssertMatches(t, conn, "select id, sharding_key from t11 where id=10", "[[INT64(10) INT64(2)]]") + +} + func TestInsertNeg(t *testing.T) { conn, closer := start(t) defer closer() @@ -306,19 +321,6 @@ func TestCreateIndex(t *testing.T) { utils.Exec(t, conn, `create index i2 on ks.t1000 (id1)`) } -func TestCreateView(t *testing.T) { - // The test wont work since we cant change the vschema without reloading the vtgate. - t.Skip() - conn, closer := start(t) - defer closer() - // Test that create view works and the output is as expected - utils.Exec(t, conn, `create view v1 as select * from t1`) - utils.Exec(t, conn, `insert into t1(id1, id2) values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)`) - // This wont work, since ALTER VSCHEMA ADD TABLE is only supported for unsharded keyspaces - utils.Exec(t, conn, "alter vschema add table v1") - utils.AssertMatches(t, conn, "select * from v1", `[[INT64(1) INT64(1)] [INT64(2) INT64(2)] [INT64(3) INT64(3)] [INT64(4) INT64(4)] [INT64(5) INT64(5)]]`) -} - func TestVersions(t *testing.T) { conn, closer := start(t) defer closer() @@ -336,6 +338,52 @@ func TestFlush(t *testing.T) { utils.Exec(t, conn, "flush local tables t1, t2") } +// TestFlushLock tests that ftwrl and unlock tables should unblock other session connections to execute the query. +func TestFlushLock(t *testing.T) { + conn, closer := start(t) + defer closer() + + // replica: fail it + utils.Exec(t, conn, "use @replica") + _, err := utils.ExecAllowError(t, conn, "flush tables ks.t1, ks.t2 with read lock") + require.ErrorContains(t, err, "VT09012: FLUSH statement with REPLICA tablet not allowed") + + // primary: should work + utils.Exec(t, conn, "use @primary") + utils.Exec(t, conn, "flush tables ks.t1, ks.t2 with read lock") + + var cnt atomic.Int32 + go func() { + ctx := context.Background() + conn2, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn2.Close() + + cnt.Add(1) + utils.Exec(t, conn2, "select * from ks.t1 for update") + cnt.Add(1) + }() + for cnt.Load() == 0 { + } + // added sleep to let the query execute inside the go routine, which should be blocked. + time.Sleep(1 * time.Second) + require.EqualValues(t, 1, cnt.Load()) + + // unlock it + utils.Exec(t, conn, "unlock tables") + + // now wait for go routine to complete. + timeout := time.After(3 * time.Second) + for cnt.Load() != 2 { + select { + case <-timeout: + t.Fatalf("test timeout waiting for select query to complete") + default: + + } + } +} + func TestShowVariables(t *testing.T) { conn, closer := start(t) defer closer() @@ -730,8 +778,15 @@ func TestJoinWithMergedRouteWithPredicate(t *testing.T) { } func TestRowCountExceed(t *testing.T) { - conn, closer := start(t) - defer closer() + conn, _ := start(t) + defer func() { + cluster.PanicHandler(t) + // needs special delete logic as it exceeds row count. + for i := 50; i <= 300; i += 50 { + utils.Exec(t, conn, fmt.Sprintf("delete from t1 where id1 < %d", i)) + } + conn.Close() + }() for i := 0; i < 250; i++ { utils.Exec(t, conn, fmt.Sprintf("insert into t1 (id1, id2) values (%d, %d)", i, i+1)) @@ -739,3 +794,41 @@ func TestRowCountExceed(t *testing.T) { utils.AssertContainsError(t, conn, "select id1 from t1 where id1 < 1000", `Row count exceeded 100`) } + +func TestLookupErrorMetric(t *testing.T) { + conn, closer := start(t) + defer closer() + + oldErrCount := getVtgateApiErrorCounts(t) + + utils.Exec(t, conn, `insert into t1 values (1,1)`) + _, err := utils.ExecAllowError(t, conn, `insert into t1 values (2,1)`) + require.ErrorContains(t, err, `(errno 1062) (sqlstate 23000)`) + + newErrCount := getVtgateApiErrorCounts(t) + require.EqualValues(t, oldErrCount+1, newErrCount) +} + +func getVtgateApiErrorCounts(t *testing.T) float64 { + apiErr := getVar(t, "VtgateApiErrorCounts") + if apiErr == nil { + return 0 + } + mapErrors := apiErr.(map[string]interface{}) + val, exists := mapErrors["Execute.ks.primary.ALREADY_EXISTS"] + if exists { + return val.(float64) + } + return 0 +} + +func getVar(t *testing.T, key string) interface{} { + vars, err := clusterInstance.VtgateProcess.GetVars() + require.NoError(t, err) + + val, exists := vars[key] + if !exists { + return nil + } + return val +} diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go index ed917efda4c..531e1077bf6 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go @@ -40,7 +40,21 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { deleteAll := func() { _, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp") - tables := []string{"t9", "aggr_test", "t3", "t7_xxhash", "aggr_test_dates", "t7_xxhash_idx", "t1", "t2", "t10"} + tables := []string{ + "t3", + "t3_id7_idx", + "t9", + "aggr_test", + "aggr_test_dates", + "t7_xxhash", + "t7_xxhash_idx", + "t1", + "t2", + "t10", + "emp", + "dept", + "bet_logs", + } for _, table := range tables { _, _ = mcmp.ExecAndIgnore("delete from " + table) } @@ -73,20 +87,33 @@ func TestAggregateTypes(t *testing.T) { mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by a", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by 2, a", `[[VARCHAR("b") INT64(1)] [VARCHAR("d") INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("c") INT64(2)] [VARCHAR("e") INT64(2)]]`) mcmp.AssertMatches("select sum(val1) from aggr_test", `[[FLOAT64(0)]]`) + mcmp.Run("Average for sharded keyspaces", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`) + }) + mcmp.Run("Average with group by without selecting the grouped columns", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(20, "vtgate") + mcmp.AssertMatches("select avg(val2) from aggr_test group by val1 order by val1", `[[DECIMAL(1.0000)] [DECIMAL(1.0000)] [DECIMAL(3.5000)] [NULL] [DECIMAL(1.0000)]]`) + }) } func TestGroupBy(t *testing.T) { mcmp, closer := start(t) defer closer() mcmp.Exec("insert into t3(id5, id6, id7) values(1,1,2), (2,2,4), (3,2,4), (4,1,2), (5,1,2), (6,3,6)") - // test ordering and group by int column - mcmp.AssertMatches("select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) - mcmp.AssertMatches("select id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) - - // Test the same queries in streaming mode - utils.Exec(t, mcmp.VtConn, "set workload = olap") - mcmp.AssertMatches("select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) - mcmp.AssertMatches("select id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) + + // run queries in both workloads + workloads := []string{"oltp", "olap"} + for _, workload := range workloads { + utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) + // test ordering and group by int column + mcmp.AssertMatches("select id6, id7, count(*) k from t3 group by id6, id7 order by k", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`) + mcmp.AssertMatches("select id6+id7, count(*) k from t3 group by id6+id7 order by k", `[[INT64(9) INT64(1)] [INT64(6) INT64(2)] [INT64(3) INT64(3)]]`) + if utils.BinaryIsAtLeastAtVersion(20, "vtgate") && + utils.BinaryIsAtLeastAtVersion(20, "vttablet") { + mcmp.Exec("select id6, id7, count(*) k from t3 group by id6, id7 with rollup") + } + } } func TestEqualFilterOnScatter(t *testing.T) { @@ -97,7 +124,7 @@ func TestEqualFilterOnScatter(t *testing.T) { workloads := []string{"oltp", "olap"} for _, workload := range workloads { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) mcmp.AssertMatches("select count(*) as a from aggr_test having 1 = 1", `[[INT64(5)]]`) @@ -172,6 +199,16 @@ func TestAggrOnJoin(t *testing.T) { mcmp.AssertMatches("select a.val1 from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", `[[VARCHAR("a")]]`) + + mcmp.Run("Average in join for sharded", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.AssertMatches(`select avg(a1.val2), avg(a2.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`, + "[[DECIMAL(1.5000) DECIMAL(1.0000)]]") + + mcmp.AssertMatches(`select a1.val1, avg(a1.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7 group by a1.val1`, + `[[VARCHAR("a") DECIMAL(1.0000)] [VARCHAR("b") DECIMAL(1.0000)] [VARCHAR("c") DECIMAL(3.0000)]]`) + }) + } func TestNotEqualFilterOnScatter(t *testing.T) { @@ -182,7 +219,7 @@ func TestNotEqualFilterOnScatter(t *testing.T) { workloads := []string{"oltp", "olap"} for _, workload := range workloads { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) mcmp.AssertMatches("select count(*) as a from aggr_test having a != 5", `[]`) @@ -206,7 +243,7 @@ func TestLessFilterOnScatter(t *testing.T) { workloads := []string{"oltp", "olap"} for _, workload := range workloads { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) mcmp.AssertMatches("select count(*) as a from aggr_test having a < 10", `[[INT64(5)]]`) mcmp.AssertMatches("select count(*) as a from aggr_test having 1 < a", `[[INT64(5)]]`) @@ -229,7 +266,7 @@ func TestLessEqualFilterOnScatter(t *testing.T) { workloads := []string{"oltp", "olap"} for _, workload := range workloads { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) mcmp.AssertMatches("select count(*) as a from aggr_test having a <= 10", `[[INT64(5)]]`) @@ -253,7 +290,7 @@ func TestGreaterFilterOnScatter(t *testing.T) { workloads := []string{"oltp", "olap"} for _, workload := range workloads { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) mcmp.AssertMatches("select count(*) as a from aggr_test having a > 1", `[[INT64(5)]]`) @@ -277,7 +314,7 @@ func TestGreaterEqualFilterOnScatter(t *testing.T) { workloads := []string{"oltp", "olap"} for _, workload := range workloads { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) mcmp.AssertMatches("select count(*) as a from aggr_test having a >= 1", `[[INT64(5)]]`) @@ -312,24 +349,34 @@ func TestAggOnTopOfLimit(t *testing.T) { mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',6), (2,'a',1), (3,'b',1), (4,'c',3), (5,'c',4), (6,'b',null), (7,null,2), (8,null,null)") for _, workload := range []string{"oltp", "olap"} { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = '%s'", workload)) - mcmp.AssertMatches(" select count(*) from (select id, val1 from aggr_test where val2 < 4 limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select count(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select count(*) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2)]]") - mcmp.AssertMatches(" select count(val1) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1)]]") - mcmp.AssertMatches(" select count(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0)]]") - mcmp.AssertMatches(" select val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`) - mcmp.AssertMatchesNoOrder(" select val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`) + mcmp.AssertMatches("select count(*) from (select id, val1 from aggr_test where val2 < 4 limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches("select count(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches("select count(*) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2)]]") + mcmp.AssertMatches("select count(val1) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1)]]") + mcmp.AssertMatches("select count(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0)]]") + mcmp.AssertMatches("select val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`) + mcmp.AssertMatchesNoOrder("select val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`) + mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.AssertMatches("select avg(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[NULL]]") + mcmp.AssertMatchesNoOrder("select val1, avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL DECIMAL(2.0000)] [VARCHAR("a") DECIMAL(3.5000)] [VARCHAR("b") DECIMAL(1.0000)] [VARCHAR("c") DECIMAL(3.5000)]]`) + }) // mysql returns FLOAT64(0), vitess returns DECIMAL(0) - mcmp.AssertMatchesNoCompare(" select count(*), sum(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0)]]", "[[INT64(2) FLOAT64(0)]]") - mcmp.AssertMatches(" select count(val1), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7)]]") - mcmp.AssertMatches(" select count(*), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2) DECIMAL(14)]]") - mcmp.AssertMatches(" select count(val1), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1) DECIMAL(14)]]") - mcmp.AssertMatches(" select count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]") - mcmp.AssertMatches(" select val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`) - mcmp.AssertMatchesNoOrder(" select val1, count(val2), sum(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1) DECIMAL(2)] [VARCHAR("a") INT64(2) DECIMAL(7)] [VARCHAR("b") INT64(1) DECIMAL(1)] [VARCHAR("c") INT64(2) DECIMAL(7)]]`) + mcmp.AssertMatches("select count(val1), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7)]]") + mcmp.AssertMatches("select count(*), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2) DECIMAL(14)]]") + mcmp.AssertMatches("select count(val1), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1) DECIMAL(14)]]") + mcmp.AssertMatches("select count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]") + mcmp.AssertMatches("select val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`) + mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.AssertMatches("select count(*), sum(val1), avg(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0) FLOAT64(0)]]") + mcmp.AssertMatches("select count(val1), sum(id), avg(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7) DECIMAL(3.5000)]]") + mcmp.AssertMatchesNoOrder("select val1, count(val2), sum(val2), avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", + `[[NULL INT64(1) DECIMAL(2) DECIMAL(2.0000)] [VARCHAR("a") INT64(2) DECIMAL(7) DECIMAL(3.5000)] [VARCHAR("b") INT64(1) DECIMAL(1) DECIMAL(1.0000)] [VARCHAR("c") INT64(2) DECIMAL(7) DECIMAL(3.5000)]]`) + }) }) } } @@ -339,24 +386,34 @@ func TestEmptyTableAggr(t *testing.T) { defer closer() for _, workload := range []string{"oltp", "olap"} { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) mcmp.AssertMatches(" select count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]") + }) }) } mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'b1','bar',200)") for _, workload := range []string{"oltp", "olap"} { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) mcmp.AssertMatches(" select count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + }) }) } @@ -399,6 +456,37 @@ func TestAggregateLeftJoin(t *testing.T) { mcmp.AssertMatches("SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1)]]`) mcmp.AssertMatches("SELECT sum(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1)]]`) mcmp.AssertMatches("SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'", `[[INT64(1)]]`) + + mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.AssertMatches("SELECT avg(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(0.5000)]]`) + mcmp.AssertMatches("SELECT avg(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1.0000)]]`) + aggregations := []string{ + "count(t1.shardkey)", + "count(t2.shardkey)", + "sum(t1.shardkey)", + "sum(t2.shardkey)", + "avg(t1.shardkey)", + "avg(t2.shardkey)", + "count(*)", + } + + grouping := []string{ + "t1.t1_id", + "t1.shardKey", + "t1.value", + "t2.id", + "t2.shardKey", + } + + // quickly construct a big number of left join aggregation queries that have to be executed using the hash join + for _, agg := range aggregations { + for _, gb := range grouping { + query := fmt.Sprintf("SELECT %s FROM t1 LEFT JOIN (select id, shardkey from t2 limit 100) as t2 ON t1.t1_id = t2.id group by %s", agg, gb) + mcmp.Exec(query) + } + } + }) } // TestScalarAggregate tests validates that only count is returned and no additional field is returned.gst @@ -426,6 +514,10 @@ func TestScalarAggregate(t *testing.T) { mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)") mcmp.AssertMatches("select count(distinct val1) from aggr_test", `[[INT64(3)]]`) + mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`) + }) } func TestAggregationRandomOnAnAggregatedValue(t *testing.T) { @@ -482,6 +574,31 @@ func TestComplexAggregation(t *testing.T) { mcmp.Exec(`SELECT shardkey + MIN(t1_id)+MAX(t1_id) FROM t1 GROUP BY shardkey`) mcmp.Exec(`SELECT name+COUNT(t1_id)+1 FROM t1 GROUP BY name`) mcmp.Exec(`SELECT COUNT(*)+shardkey+MIN(t1_id)+1+MAX(t1_id)*SUM(t1_id)+1+name FROM t1 GROUP BY shardkey, name`) + mcmp.Run("Average in sharded query", func(mcmp *utils.MySQLCompare) { + mcmp.SkipIfBinaryIsBelowVersion(19, "vtgate") + mcmp.Exec(`SELECT COUNT(t1_id)+MAX(shardkey)+AVG(t1_id) FROM t1`) + }) +} + +func TestJoinAggregation(t *testing.T) { + // This is new functionality in Vitess 20 + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(t1_id, `name`, `value`, shardkey) values(1,'a1','foo',100), (2,'b1','foo',200), (3,'c1','foo',300), (4,'a1','foo',100), (5,'d1','toto',200), (6,'c1','tata',893), (7,'a1','titi',2380), (8,'b1','tete',12833), (9,'e1','yoyo',783493)") + + mcmp.Exec(`insert into bet_logs(id, merchant_game_id, bet_amount, game_id) values + (1, 1, 22.5, 40), (2, 1, 15.3, 40), + (3, 2, 22.5, 40), (4, 2, 15.3, 40), + (5, 3, 22.5, 40), (6, 3, 15.3, 40), + (7, 3, 22.5, 40), (8, 4, 15.3, 40) +`) + + mcmp.Exec("set @@sql_mode = ' '") + mcmp.Exec(`SELECT t1.name, SUM(b.bet_amount) AS bet_amount FROM bet_logs as b LEFT JOIN t1 ON b.merchant_game_id = t1.t1_id GROUP BY b.merchant_game_id`) + mcmp.Exec(`SELECT t1.name, CAST(SUM(b.bet_amount) AS DECIMAL(20,6)) AS bet_amount FROM bet_logs as b LEFT JOIN t1 ON b.merchant_game_id = t1.t1_id GROUP BY b.merchant_game_id`) } // TestGroupConcatAggregation tests the group_concat function with vitess doing the aggregation. @@ -501,6 +618,12 @@ func TestGroupConcatAggregation(t *testing.T) { compareRow(t, mQr, vtQr, nil, []int{0}) mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(value), t1.name FROM t1, t2 group by t1.name`) compareRow(t, mQr, vtQr, []int{1}, []int{0}) + if versionMet := utils.BinaryIsAtLeastAtVersion(19, "vtgate"); !versionMet { + // skipping + return + } + mQr, vtQr = mcmp.ExecNoCompare(`SELECT group_concat(name, value) FROM t1`) + compareRow(t, mQr, vtQr, nil, []int{0}) } func compareRow(t *testing.T, mRes *sqltypes.Result, vtRes *sqltypes.Result, grpCols []int, fCols []int) { @@ -540,6 +663,7 @@ func TestDistinctAggregation(t *testing.T) { tcases := []struct { query string expectedErr string + minVersion int }{{ query: `SELECT COUNT(DISTINCT value), SUM(DISTINCT shardkey) FROM t1`, expectedErr: "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: sum(distinct shardkey) (errno 1235) (sqlstate 42000)", @@ -553,10 +677,15 @@ func TestDistinctAggregation(t *testing.T) { }, { query: `SELECT a.value, SUM(DISTINCT b.t1_id), min(DISTINCT a.t1_id) FROM t1 a, t1 b group by a.value`, }, { - query: `SELECT distinct count(*) from t1, (select distinct count(*) from t1) as t2`, + minVersion: 19, + query: `SELECT count(distinct name, shardkey) from t1`, }} for _, tc := range tcases { + if versionMet := utils.BinaryIsAtLeastAtVersion(tc.minVersion, "vtgate"); !versionMet { + // skipping + continue + } mcmp.Run(tc.query, func(mcmp *utils.MySQLCompare) { _, err := mcmp.ExecAllowError(tc.query) if tc.expectedErr == "" { @@ -567,3 +696,84 @@ func TestDistinctAggregation(t *testing.T) { }) } } + +func TestHavingQueries(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + inserts := []string{ + `INSERT INTO emp (empno, ename, job, mgr, hiredate, sal, comm, deptno) VALUES + (1, 'John', 'Manager', NULL, '2022-01-01', 5000, 500, 1), + (2, 'Doe', 'Analyst', 1, '2023-01-01', 4500, NULL, 1), + (3, 'Jane', 'Clerk', 1, '2023-02-01', 3000, 200, 2), + (4, 'Mary', 'Analyst', 2, '2022-03-01', 4700, NULL, 1), + (5, 'Smith', 'Salesman', 3, '2023-01-15', 3200, 300, 3)`, + "INSERT INTO dept (deptno, dname, loc) VALUES (1, 'IT', 'New York'), (2, 'HR', 'London'), (3, 'Sales', 'San Francisco')", + "INSERT INTO t1 (t1_id, name, value, shardKey) VALUES (1, 'Name1', 'Value1', 100), (2, 'Name2', 'Value2', 100), (3, 'Name1', 'Value3', 200)", + "INSERT INTO aggr_test_dates (id, val1, val2) VALUES (1, '2023-01-01', '2023-01-02'), (2, '2023-02-01', '2023-02-02'), (3, '2023-03-01', '2023-03-02')", + "INSERT INTO t10 (k, a, b) VALUES (1, 10, 20), (2, 30, 40), (3, 50, 60)", + "INSERT INTO t3 (id5, id6, id7) VALUES (1, 10, 100), (2, 20, 200), (3, 30, 300)", + "INSERT INTO t9 (id1, id2, id3) VALUES (1, 'A1', 'B1'), (2, 'A2', 'B2'), (3, 'A1', 'B3')", + "INSERT INTO aggr_test (id, val1, val2) VALUES (1, 'Test1', 100), (2, 'Test2', 200), (3, 'Test1', 300), (4, 'Test3', 400)", + "INSERT INTO t2 (id, shardKey) VALUES (1, 100), (2, 200), (3, 300)", + `INSERT INTO bet_logs (id, merchant_game_id, bet_amount, game_id) VALUES + (1, 1, 100.0, 10), + (2, 1, 200.0, 11), + (3, 2, 300.0, 10), + (4, 3, 400.0, 12)`, + } + + for _, insert := range inserts { + mcmp.Exec(insert) + } + + queries := []string{ + // The following queries are not allowed by MySQL but Vitess allows them + // SELECT ename FROM emp GROUP BY ename HAVING sal > 5000 + // SELECT val1, COUNT(val2) FROM aggr_test_dates GROUP BY val1 HAVING val2 > 5 + // SELECT k, a FROM t10 GROUP BY k HAVING b > 2 + // SELECT loc FROM dept GROUP BY loc HAVING COUNT(deptno) AND dname = 'Sales' + // SELECT AVG(val2) AS average_val2 FROM aggr_test HAVING val1 = 'Test' + + // these first queries are all failing in different ways. let's check that Vitess also fails + + "SELECT deptno, AVG(sal) AS average_salary HAVING average_salary > 5000 FROM emp", + "SELECT job, COUNT(empno) AS num_employees FROM emp HAVING num_employees > 2", + "SELECT dname, SUM(sal) FROM dept JOIN emp ON dept.deptno = emp.deptno HAVING AVG(sal) > 6000", + "SELECT COUNT(*) AS count FROM emp WHERE count > 5", + "SELECT `name`, AVG(`value`) FROM t1 GROUP BY `name` HAVING `name`", + "SELECT empno, MAX(sal) FROM emp HAVING COUNT(*) > 3", + "SELECT id, SUM(bet_amount) AS total_bets FROM bet_logs HAVING total_bets > 1000", + "SELECT merchant_game_id FROM bet_logs GROUP BY merchant_game_id HAVING SUM(bet_amount)", + "SELECT shardKey, COUNT(id) FROM t2 HAVING shardKey > 100", + "SELECT deptno FROM emp GROUP BY deptno HAVING MAX(hiredate) > '2020-01-01'", + + // These queries should not fail + "SELECT deptno, COUNT(*) AS num_employees FROM emp GROUP BY deptno HAVING num_employees > 5", + "SELECT ename, SUM(sal) FROM emp GROUP BY ename HAVING SUM(sal) > 10000", + "SELECT dname, AVG(sal) AS average_salary FROM emp JOIN dept ON emp.deptno = dept.deptno GROUP BY dname HAVING average_salary > 5000", + "SELECT dname, MAX(sal) AS max_salary FROM emp JOIN dept ON emp.deptno = dept.deptno GROUP BY dname HAVING max_salary < 10000", + "SELECT YEAR(hiredate) AS year, COUNT(*) FROM emp GROUP BY year HAVING COUNT(*) > 2", + "SELECT mgr, COUNT(empno) AS managed_employees FROM emp WHERE mgr IS NOT NULL GROUP BY mgr HAVING managed_employees >= 3", + "SELECT deptno, SUM(comm) AS total_comm FROM emp GROUP BY deptno HAVING total_comm > AVG(total_comm)", + "SELECT id2, COUNT(*) AS count FROM t9 GROUP BY id2 HAVING count > 1", + "SELECT val1, COUNT(*) FROM aggr_test GROUP BY val1 HAVING COUNT(*) > 1", + "SELECT DATE(val1) AS date, SUM(val2) FROM aggr_test_dates GROUP BY date HAVING SUM(val2) > 100", + "SELECT shardKey, AVG(`value`) FROM t1 WHERE `value` IS NOT NULL GROUP BY shardKey HAVING AVG(`value`) > 10", + "SELECT job, COUNT(*) AS job_count FROM emp GROUP BY job HAVING job_count > 3", + "SELECT b, AVG(a) AS avg_a FROM t10 GROUP BY b HAVING AVG(a) > 5", + "SELECT merchant_game_id, SUM(bet_amount) AS total_bets FROM bet_logs GROUP BY merchant_game_id HAVING total_bets > 1000", + "SELECT loc, COUNT(deptno) AS num_depts FROM dept GROUP BY loc HAVING num_depts > 1", + "SELECT `name`, COUNT(*) AS name_count FROM t1 GROUP BY `name` HAVING name_count > 2", + "SELECT COUNT(*) AS num_jobs FROM emp GROUP BY empno HAVING num_jobs > 1", + "SELECT id, COUNT(*) AS count FROM t2 GROUP BY id HAVING count > 1", + "SELECT val2, SUM(id) FROM aggr_test GROUP BY val2 HAVING SUM(id) > 10", + "SELECT game_id, COUNT(*) AS num_logs FROM bet_logs GROUP BY game_id HAVING num_logs > 5", + } + + for _, query := range queries { + mcmp.Run(query, func(mcmp *utils.MySQLCompare) { + mcmp.ExecAllowAndCompareError(query, utils.CompareOptions{}) + }) + } +} diff --git a/go/test/endtoend/vtgate/queries/aggregation/distinct_test.go b/go/test/endtoend/vtgate/queries/aggregation/distinct_test.go index 0a06190923c..3ec27dae6a6 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/distinct_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/distinct_test.go @@ -46,9 +46,9 @@ func TestDistinctIt(t *testing.T) { mcmp.AssertMatchesNoOrder("select distinct id from aggr_test", `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(5)] [INT64(4)] [INT64(6)] [INT64(7)] [INT64(8)]]`) if utils.BinaryIsAtLeastAtVersion(17, "vtgate") { - mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ distinct val1 from aggr_test order by val1 desc", `[[VARCHAR("e")] [VARCHAR("d")] [VARCHAR("c")] [VARCHAR("b")] [VARCHAR("a")]]`) - mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=Gen4 */ distinct val1, count(*) from aggr_test group by val1", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) - mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=Gen4 */ distinct val1+val2 from aggr_test", `[[NULL] [FLOAT64(1)] [FLOAT64(3)] [FLOAT64(4)]]`) - mcmp.AssertMatchesNoOrder("select /*vt+ PLANNER=Gen4 */ distinct count(*) from aggr_test group by val1", `[[INT64(2)] [INT64(1)]]`) + mcmp.AssertMatches("select distinct val1 from aggr_test order by val1 desc", `[[VARCHAR("e")] [VARCHAR("d")] [VARCHAR("c")] [VARCHAR("b")] [VARCHAR("a")]]`) + mcmp.AssertMatchesNoOrder("select distinct val1, count(*) from aggr_test group by val1", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) + mcmp.AssertMatchesNoOrder("select distinct val1+val2 from aggr_test", `[[NULL] [FLOAT64(1)] [FLOAT64(3)] [FLOAT64(4)]]`) + mcmp.AssertMatchesNoOrder("select distinct count(*) from aggr_test group by val1", `[[INT64(2)] [INT64(1)]]`) } } diff --git a/go/test/endtoend/vtgate/queries/aggregation/schema.sql b/go/test/endtoend/vtgate/queries/aggregation/schema.sql index e1489b4bd21..49956b98302 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/schema.sql +++ b/go/test/endtoend/vtgate/queries/aggregation/schema.sql @@ -96,4 +96,12 @@ CREATE TABLE dept ( loc VARCHAR(13), PRIMARY KEY (deptno) ) Engine = InnoDB - COLLATE = utf8mb4_general_ci; \ No newline at end of file + COLLATE = utf8mb4_general_ci; + +CREATE TABLE bet_logs ( + id bigint unsigned NOT NULL, + merchant_game_id bigint unsigned NOT NULL, + bet_amount DECIMAL(20, 8), + game_id bigint, + PRIMARY KEY (id) +) ENGINE InnoDB; diff --git a/go/test/endtoend/vtgate/queries/aggregation/vschema.json b/go/test/endtoend/vtgate/queries/aggregation/vschema.json index 050202aed81..6c3cddf4436 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/vschema.json +++ b/go/test/endtoend/vtgate/queries/aggregation/vschema.json @@ -147,6 +147,14 @@ "name": "hash" } ] + }, + "bet_logs": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] } } } \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/benchmark/benchmark_test.go b/go/test/endtoend/vtgate/queries/benchmark/benchmark_test.go index 3fd7edd14de..9a064c1769a 100644 --- a/go/test/endtoend/vtgate/queries/benchmark/benchmark_test.go +++ b/go/test/endtoend/vtgate/queries/benchmark/benchmark_test.go @@ -18,7 +18,7 @@ package dml import ( "fmt" - "math/rand" + "math/rand/v2" "strconv" "strings" "testing" @@ -48,11 +48,38 @@ func (tq *testQuery) getInsertQuery(rows int) string { return fmt.Sprintf("insert into %s(%s) values %s", tq.tableName, strings.Join(tq.cols, ","), strings.Join(allRows, ",")) } +func (tq *testQuery) getUpdateQuery(rows int) string { + var allRows []string + var row []string + for i, isInt := range tq.intTyp { + if isInt { + row = append(row, strconv.Itoa(i)) + continue + } + row = append(row, tq.cols[i]+" = '"+getRandomString(50)+"'") + } + allRows = append(allRows, strings.Join(row, ",")) + + var ids []string + for i := 0; i <= rows; i++ { + ids = append(ids, strconv.Itoa(i)) + } + return fmt.Sprintf("update %s set %s where id in (%s)", tq.tableName, strings.Join(allRows, ","), strings.Join(ids, ",")) +} + +func (tq *testQuery) getDeleteQuery(rows int) string { + var ids []string + for i := 0; i <= rows; i++ { + ids = append(ids, strconv.Itoa(i)) + } + return fmt.Sprintf("delete from %s where id in (%s)", tq.tableName, strings.Join(ids, ",")) +} + func getRandomString(size int) string { var str strings.Builder for i := 0; i < size; i++ { - str.WriteByte(byte(rand.Intn(27) + 97)) + str.WriteByte(byte(rand.IntN(27) + 97)) } return str.String() } @@ -78,3 +105,44 @@ func BenchmarkShardedTblNoLookup(b *testing.B) { }) } } + +func BenchmarkShardedTblUpdateIn(b *testing.B) { + conn, closer := start(b) + defer closer() + + cols := []string{"c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "c10", "c11", "c12"} + intType := make([]bool, len(cols)) + tq := &testQuery{ + tableName: "tbl_no_lkp_vdx", + cols: cols, + intTyp: intType, + } + insStmt := tq.getInsertQuery(10000) + _ = utils.Exec(b, conn, insStmt) + for _, rows := range []int{1, 10, 100, 500, 1000, 5000, 10000} { + updStmt := tq.getUpdateQuery(rows) + b.Run(fmt.Sprintf("16-shards-%d-rows", rows), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = utils.Exec(b, conn, updStmt) + } + }) + } +} + +func BenchmarkShardedTblDeleteIn(b *testing.B) { + conn, closer := start(b) + defer closer() + tq := &testQuery{ + tableName: "tbl_no_lkp_vdx", + } + for _, rows := range []int{1, 10, 100, 500, 1000, 5000, 10000} { + insStmt := tq.getInsertQuery(rows) + _ = utils.Exec(b, conn, insStmt) + delStmt := tq.getDeleteQuery(rows) + b.Run(fmt.Sprintf("16-shards-%d-rows", rows), func(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = utils.Exec(b, conn, delStmt) + } + }) + } +} diff --git a/go/test/endtoend/vtgate/queries/derived/cte_test.go b/go/test/endtoend/vtgate/queries/derived/cte_test.go index 677a5dba653..54d97261ae6 100644 --- a/go/test/endtoend/vtgate/queries/derived/cte_test.go +++ b/go/test/endtoend/vtgate/queries/derived/cte_test.go @@ -18,9 +18,12 @@ package misc import ( "testing" + + "vitess.io/vitess/go/test/endtoend/utils" ) func TestCTEWithOrderByLimit(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() @@ -28,6 +31,7 @@ func TestCTEWithOrderByLimit(t *testing.T) { } func TestCTEAggregationOnRHS(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() @@ -36,6 +40,7 @@ func TestCTEAggregationOnRHS(t *testing.T) { } func TestCTERemoveInnerOrderBy(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() @@ -43,6 +48,7 @@ func TestCTERemoveInnerOrderBy(t *testing.T) { } func TestCTEWithHaving(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() @@ -53,9 +59,34 @@ func TestCTEWithHaving(t *testing.T) { } func TestCTEColumns(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() mcmp.AssertMatches(`with t(id) as (SELECT id FROM user) SELECT t.id FROM t ORDER BY t.id DESC`, `[[INT64(5)] [INT64(4)] [INT64(3)] [INT64(2)] [INT64(1)]]`) } + +func TestCTEAggregationsInUnion(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + mcmp, closer := start(t) + defer closer() + + mcmp.AssertMatches(`WITH toto AS (SELECT COUNT(*) as num + FROM (SELECT user.id + FROM user + WHERE user.name = 'toto' + LIMIT 1000) t LIMIT 1 ), + tata AS (SELECT COUNT(*) as num + FROM (SELECT user.id + FROM user + WHERE user.name = 'tata' + LIMIT 1000) t LIMIT 1), + total AS (SELECT LEAST(1000, SUM(num)) AS num + FROM (SELECT num + FROM toto + UNION ALL SELECT num + FROM tata) t LIMIT 1) +SELECT 'total' AS tab, num +FROM total`, `[[VARCHAR("total") DECIMAL(2)]]`) +} diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go index c3360ee4135..c41161d9bcf 100644 --- a/go/test/endtoend/vtgate/queries/derived/derived_test.go +++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go @@ -52,7 +52,7 @@ func TestDerivedTableWithOrderByLimit(t *testing.T) { mcmp, closer := start(t) defer closer() - mcmp.Exec("select /*vt+ PLANNER=Gen4 */ music.id from music join (select id,name from user order by id limit 2) as d on music.user_id = d.id") + mcmp.Exec("select music.id from music join (select id,name from user order by id limit 2) as d on music.user_id = d.id") } func TestDerivedAggregationOnRHS(t *testing.T) { @@ -60,14 +60,14 @@ func TestDerivedAggregationOnRHS(t *testing.T) { defer closer() mcmp.Exec("set sql_mode = ''") - mcmp.Exec("select /*vt+ PLANNER=Gen4 */ d.a from music join (select id, count(*) as a from user) as d on music.user_id = d.id group by 1") + mcmp.Exec("select d.a from music join (select id, count(*) as a from user) as d on music.user_id = d.id group by 1") } func TestDerivedRemoveInnerOrderBy(t *testing.T) { mcmp, closer := start(t) defer closer() - mcmp.Exec("select /*vt+ PLANNER=Gen4 */ count(*) from (select user.id as oui, music.id as non from user join music on user.id = music.user_id order by user.name) as toto") + mcmp.Exec("select count(*) from (select user.id as oui, music.id as non from user join music on user.id = music.user_id order by user.name) as toto") } func TestDerivedTableWithHaving(t *testing.T) { @@ -76,7 +76,7 @@ func TestDerivedTableWithHaving(t *testing.T) { mcmp.Exec("set sql_mode = ''") // For the given query, we can get any id back, because we aren't grouping by it. - mcmp.AssertMatchesAnyNoCompare("select /*vt+ PLANNER=Gen4 */ * from (select id from user having count(*) >= 1) s", + mcmp.AssertMatchesAnyNoCompare("select * from (select id from user having count(*) >= 1) s", "[[INT64(1)]]", "[[INT64(2)]]", "[[INT64(3)]]", "[[INT64(4)]]", "[[INT64(5)]]") } @@ -84,6 +84,44 @@ func TestDerivedTableColumns(t *testing.T) { mcmp, closer := start(t) defer closer() - mcmp.AssertMatches(`SELECT /*vt+ PLANNER=gen4 */ t.id FROM (SELECT id FROM user) AS t(id) ORDER BY t.id DESC`, + mcmp.AssertMatches(`SELECT t.id FROM (SELECT id FROM user) AS t(id) ORDER BY t.id DESC`, `[[INT64(5)] [INT64(4)] [INT64(3)] [INT64(2)] [INT64(1)]]`) } + +// TestDerivedTablesWithLimit tests queries where we have to limit the right hand side of the join. +// We do this by not using the apply join we usually use, and instead use the hash join engine primitive +// These tests exercise these situations +func TestDerivedTablesWithLimit(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + // We need full type info before planning this, so we wait for the schema tracker + require.NoError(t, + utils.WaitForAuthoritative(t, keyspaceName, "user", clusterInstance.VtgateProcess.ReadVSchema)) + + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into user(id, name) values(6,'pikachu')") + + mcmp.AssertMatchesNoOrder( + `SELECT u.id, m.id FROM + (SELECT id, name FROM user LIMIT 10) AS u JOIN + (SELECT id, user_id FROM music LIMIT 10) as m on u.id = m.user_id`, + `[[INT64(1) INT64(1)] [INT64(5) INT64(2)] [INT64(1) INT64(3)] [INT64(2) INT64(4)] [INT64(3) INT64(5)] [INT64(5) INT64(7)] [INT64(4) INT64(6)]]`) + + mcmp.AssertMatchesNoOrder( + `SELECT u.id, m.id FROM user AS u LEFT JOIN + (SELECT id, user_id FROM music LIMIT 10) as m on u.id = m.user_id`, + `[[INT64(1) INT64(1)] [INT64(5) INT64(2)] [INT64(1) INT64(3)] [INT64(2) INT64(4)] [INT64(3) INT64(5)] [INT64(5) INT64(7)] [INT64(4) INT64(6)] [INT64(6) NULL]]`) +} + +// TestDerivedTableColumnAliasWithJoin tests the derived table having alias column and using it in the join condition +func TestDerivedTableColumnAliasWithJoin(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + mcmp, closer := start(t) + defer closer() + + mcmp.Exec(`SELECT user.id FROM user join (SELECT id as uid FROM user) t on t.uid = user.id`) + mcmp.Exec(`SELECT user.id FROM user left join (SELECT id as uid FROM user) t on t.uid = user.id`) + mcmp.Exec(`SELECT user.id FROM user join (SELECT id FROM user) t(uid) on t.uid = user.id`) + mcmp.Exec(`SELECT user.id FROM user left join (SELECT id FROM user) t(uid) on t.uid = user.id`) +} diff --git a/go/test/endtoend/vtgate/queries/derived/schema.sql b/go/test/endtoend/vtgate/queries/derived/schema.sql index cf608028ed5..3cb8619d93b 100644 --- a/go/test/endtoend/vtgate/queries/derived/schema.sql +++ b/go/test/endtoend/vtgate/queries/derived/schema.sql @@ -1,13 +1,13 @@ create table user ( - id bigint, + id bigint, name varchar(255), primary key (id) ) Engine = InnoDB; create table music ( - id bigint, + id bigint, user_id bigint, primary key (id) ) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/queries/dml/dml_test.go b/go/test/endtoend/vtgate/queries/dml/dml_test.go index 52a64acaa56..4383f59e6c4 100644 --- a/go/test/endtoend/vtgate/queries/dml/dml_test.go +++ b/go/test/endtoend/vtgate/queries/dml/dml_test.go @@ -19,7 +19,12 @@ package dml import ( "testing" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestMultiEqual(t *testing.T) { @@ -39,3 +44,400 @@ func TestMultiEqual(t *testing.T) { qr = mcmp.Exec("delete from user_tbl where (id, region_id) in ((1,1), (2,4))") assert.EqualValues(t, 1, qr.RowsAffected) } + +// TestMultiTableDelete executed multi-table delete queries +func TestMultiTableDelete(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + mcmp.Exec("insert into oevent_tbl(oid, ename) values (1,'a'), (2,'b'), (3,'a'), (4,'c')") + + // multi table delete + qr := mcmp.Exec(`delete o from order_tbl o join oevent_tbl ev where o.oid = ev.oid and ev.ename = 'a'`) + assert.EqualValues(t, 2, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("b")] [INT64(3) VARCHAR("a")] [INT64(4) VARCHAR("c")]]`) + + qr = mcmp.Exec(`delete o from order_tbl o join oevent_tbl ev where o.cust_no = ev.oid`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("b")] [INT64(3) VARCHAR("a")] [INT64(4) VARCHAR("c")]]`) +} + +// TestDeleteWithLimit executed delete queries with limit +func TestDeleteWithLimit(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into s_tbl(id, num) values (1,10), (2,10), (3,10), (4,20), (5,5), (6,15), (7,17), (8,80)") + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + + // delete with limit + qr := mcmp.Exec(`delete from s_tbl order by num, id limit 3`) + require.EqualValues(t, 3, qr.RowsAffected) + + qr = mcmp.Exec(`delete from order_tbl where region_id = 1 limit 1`) + require.EqualValues(t, 1, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select id, num from s_tbl order by id`, + `[[INT64(3) INT64(10)] [INT64(4) INT64(20)] [INT64(6) INT64(15)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`) + // 2 rows matches but limit is 1, so any one of the row can remain in table. + mcmp.AssertMatchesAnyNoCompare(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`) + + // delete with limit + qr = mcmp.Exec(`delete from s_tbl where num < 20 limit 2`) + require.EqualValues(t, 2, qr.RowsAffected) + + qr = mcmp.Exec(`delete from order_tbl limit 5`) + require.EqualValues(t, 3, qr.RowsAffected) + + // check rows + // 3 rows matches `num < 20` but limit is 2 so any one of them can remain in the table. + mcmp.AssertMatchesAnyNoCompare(`select id, num from s_tbl order by id`, + `[[INT64(4) INT64(20)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`, + `[[INT64(3) INT64(10)] [INT64(4) INT64(20)] [INT64(8) INT64(80)]]`, + `[[INT64(4) INT64(20)] [INT64(6) INT64(15)] [INT64(8) INT64(80)]]`) + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[]`) + + // remove all rows + mcmp.Exec(`delete from s_tbl`) + mcmp.Exec(`delete from order_tbl limit 5`) + + // try with limit again on empty table. + qr = mcmp.Exec(`delete from s_tbl where num < 20 limit 2`) + require.EqualValues(t, 0, qr.RowsAffected) + + qr = mcmp.Exec(`delete from order_tbl limit 5`) + require.EqualValues(t, 0, qr.RowsAffected) + +} + +// TestUpdateWithLimit executed update queries with limit +func TestUpdateWithLimit(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into s_tbl(id, num) values (1,10), (2,10), (3,10), (4,20), (5,5), (6,15), (7,17), (8,80)") + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + + // update with limit + qr := mcmp.Exec(`update s_tbl set num = 12 order by num, id limit 3`) + require.EqualValues(t, 3, qr.RowsAffected) + + qr = mcmp.Exec(`update order_tbl set cust_no = 12 where region_id = 1 limit 1`) + require.EqualValues(t, 1, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select id, num from s_tbl order by id`, + `[[INT64(1) INT64(12)] [INT64(2) INT64(12)] [INT64(3) INT64(10)] [INT64(4) INT64(20)] [INT64(5) INT64(12)] [INT64(6) INT64(15)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`) + // 2 rows matches but limit is 1, so any one of the row can be modified in the table. + mcmp.AssertMatchesAnyNoCompare(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(12)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(12)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`) + + // update with limit + qr = mcmp.Exec(`update s_tbl set num = 32 where num > 17 limit 1`) + require.EqualValues(t, 1, qr.RowsAffected) + + qr = mcmp.Exec(`update order_tbl set cust_no = cust_no + 10 limit 5`) + require.EqualValues(t, 4, qr.RowsAffected) + + // check rows + // 2 rows matches `num > 17` but limit is 1 so any one of them will be updated. + mcmp.AssertMatchesAnyNoCompare(`select id, num from s_tbl order by id`, + `[[INT64(1) INT64(12)] [INT64(2) INT64(12)] [INT64(3) INT64(10)] [INT64(4) INT64(32)] [INT64(5) INT64(12)] [INT64(6) INT64(15)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`, + `[[INT64(1) INT64(12)] [INT64(2) INT64(12)] [INT64(3) INT64(10)] [INT64(4) INT64(20)] [INT64(5) INT64(12)] [INT64(6) INT64(15)] [INT64(7) INT64(17)] [INT64(8) INT64(32)]]`) + mcmp.AssertMatchesAnyNoCompare(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(22)] [INT64(1) INT64(2) INT64(12)] [INT64(2) INT64(3) INT64(15)] [INT64(2) INT64(4) INT64(65)]]`, + `[[INT64(1) INT64(1) INT64(14)] [INT64(1) INT64(2) INT64(22)] [INT64(2) INT64(3) INT64(15)] [INT64(2) INT64(4) INT64(65)]]`) + + // trying with zero limit. + qr = mcmp.Exec(`update s_tbl set num = 44 limit 0`) + require.EqualValues(t, 0, qr.RowsAffected) + + qr = mcmp.Exec(`update order_tbl set oid = 44 limit 0`) + require.EqualValues(t, 0, qr.RowsAffected) + + // trying with limit with no-matching row. + qr = mcmp.Exec(`update s_tbl set num = 44 where id > 100 limit 2`) + require.EqualValues(t, 0, qr.RowsAffected) + + qr = mcmp.Exec(`update order_tbl set oid = 44 where region_id > 100 limit 2`) + require.EqualValues(t, 0, qr.RowsAffected) + +} + +// TestMultiTableUpdate executed multi-table update queries +func TestMultiTableUpdate(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + mcmp.Exec("insert into oevent_tbl(oid, ename) values (1,'a'), (2,'b'), (3,'a'), (4,'c')") + + // multi table update + qr := mcmp.Exec(`update order_tbl o join oevent_tbl ev on o.oid = ev.oid set ev.ename = 'a' where ev.oid > 3`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("b")] [INT64(3) VARCHAR("a")] [INT64(4) VARCHAR("a")]]`) + + qr = mcmp.Exec(`update order_tbl o, oevent_tbl ev set ev.ename = 'xyz' where o.cust_no = ev.oid`) + assert.EqualValues(t, 2, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("xyz")] [INT64(3) VARCHAR("a")] [INT64(4) VARCHAR("xyz")]]`) +} + +// TestDeleteWithSubquery executed delete queries with subqueries +func TestDeleteWithSubquery(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into s_tbl(id, num) values (1,10), (2,10), (3,10), (4,20), (5,5), (6,15), (7,17), (8,80)") + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + + // delete with subquery on s_tbl + qr := mcmp.Exec(`delete from s_tbl where id in (select oid from order_tbl)`) + require.EqualValues(t, 4, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select id, num from s_tbl order by id`, + `[[INT64(5) INT64(5)] [INT64(6) INT64(15)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`) + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`) + + // delete with subquery on order_tbl + qr = mcmp.Exec(`delete from order_tbl where cust_no > (select num from s_tbl where id = 7)`) + require.EqualValues(t, 1, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select id, num from s_tbl order by id`, + `[[INT64(5) INT64(5)] [INT64(6) INT64(15)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`) + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)]]`) + + // delete with subquery from same table (fails on mysql) - subquery get's merged so fails for vitess + _, err := mcmp.ExecAllowAndCompareError(`delete from s_tbl where id in (select id from s_tbl)`, utils.CompareOptions{}) + require.ErrorContains(t, err, "You can't specify target table 's_tbl' for update in FROM clause (errno 1093) (sqlstate HY000)") + + // delete with subquery from same table (fails on mysql) - subquery not merged so passes for vitess + qr = utils.Exec(t, mcmp.VtConn, `delete from order_tbl where region_id in (select cust_no from order_tbl)`) + require.EqualValues(t, 1, qr.RowsAffected) + + // check rows + utils.AssertMatches(t, mcmp.VtConn, `select id, num from s_tbl order by id`, + `[[INT64(5) INT64(5)] [INT64(6) INT64(15)] [INT64(7) INT64(17)] [INT64(8) INT64(80)]]`) + utils.AssertMatches(t, mcmp.VtConn, `select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)]]`) +} + +// TestMultiTargetDelete executed multi-target delete queries +func TestMultiTargetDelete(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + mcmp.Exec("insert into oevent_tbl(oid, ename) values (1,'a'), (2,'b'), (3,'a'), (2,'c')") + + // multi table delete + qr := mcmp.Exec(`delete o, ev from order_tbl o join oevent_tbl ev where o.oid = ev.oid and ev.ename = 'a'`) + assert.EqualValues(t, 4, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(2) VARCHAR("b")] [INT64(2) VARCHAR("c")]]`) + + qr = mcmp.Exec(`delete o, ev from order_tbl o join oevent_tbl ev where o.cust_no = ev.oid`) + assert.EqualValues(t, 3, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[]`) +} + +// TestMultiTargetDeleteMore executed multi-target delete queries with additional cases +func TestMultiTargetDeleteMore(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // multi table delete on empty table. + qr := mcmp.Exec(`delete o, ev from order_tbl o join oevent_tbl ev on o.oid = ev.oid`) + assert.EqualValues(t, 0, qr.RowsAffected) + + // initial rows + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + mcmp.Exec("insert into oevent_tbl(oid, ename) values (1,'a'), (2,'b'), (3,'a'), (2,'c')") + + // multi table delete on non-existent data. + qr = mcmp.Exec(`delete o, ev from order_tbl o join oevent_tbl ev on o.oid = ev.oid where ev.oid = 10`) + assert.EqualValues(t, 0, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("b")] [INT64(2) VARCHAR("c")] [INT64(3) VARCHAR("a")]]`) + + // multi table delete with rollback + mcmp.Exec(`begin`) + qr = mcmp.Exec(`delete o, ev from order_tbl o join oevent_tbl ev on o.oid = ev.oid where o.cust_no != 4`) + assert.EqualValues(t, 5, qr.RowsAffected) + mcmp.Exec(`rollback`) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("b")] [INT64(2) VARCHAR("c")] [INT64(3) VARCHAR("a")]]`) +} + +// TestMultiTargetUpdate executed multi-target update queries +func TestMultiTargetUpdate(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + mcmp.Exec("insert into oevent_tbl(oid, ename) values (1,'a'), (2,'b'), (3,'a'), (4,'c')") + + // multi target update + qr := mcmp.Exec(`update order_tbl o join oevent_tbl ev on o.oid = ev.oid set ev.ename = 'a', o.cust_no = 1 where ev.oid > 3`) + assert.EqualValues(t, 2, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(1)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("b")] [INT64(3) VARCHAR("a")] [INT64(4) VARCHAR("a")]]`) + + qr = mcmp.Exec(`update order_tbl o, oevent_tbl ev set ev.ename = 'xyz', o.oid = 40 where o.cust_no = ev.oid and ev.ename = 'b'`) + assert.EqualValues(t, 2, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid, region_id`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(1)] [INT64(1) INT64(40) INT64(2)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("xyz")] [INT64(3) VARCHAR("a")] [INT64(4) VARCHAR("a")]]`) +} + +// TestMultiTargetNonLiteralUpdate executed multi-target update queries with non-literal values. +func TestMultiTargetNonLiteralUpdate(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,4), (1,2,2), (2,3,5), (2,4,55)") + mcmp.Exec("insert into oevent_tbl(oid, ename) values (1,'a'), (2,'b'), (3,'a'), (4,'c')") + + // multi target update + qr := mcmp.Exec(`update order_tbl o join oevent_tbl ev on o.oid = ev.oid set ev.ename = o.cust_no where ev.oid > 3`) + assert.EqualValues(t, 1, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(1) INT64(2) INT64(2)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("b")] [INT64(3) VARCHAR("a")] [INT64(4) VARCHAR("55")]]`) + + qr = mcmp.Exec(`update order_tbl o, oevent_tbl ev set ev.ename = 'xyz', o.oid = ev.oid + 40 where o.cust_no = ev.oid and ev.ename = 'b'`) + assert.EqualValues(t, 2, qr.RowsAffected) + + // check rows + mcmp.AssertMatches(`select region_id, oid, cust_no from order_tbl order by oid, region_id`, + `[[INT64(1) INT64(1) INT64(4)] [INT64(2) INT64(3) INT64(5)] [INT64(2) INT64(4) INT64(55)] [INT64(1) INT64(42) INT64(2)]]`) + mcmp.AssertMatches(`select oid, ename from oevent_tbl order by oid`, + `[[INT64(1) VARCHAR("a")] [INT64(2) VARCHAR("xyz")] [INT64(3) VARCHAR("a")] [INT64(4) VARCHAR("55")]]`) +} + +// TestDMLInUnique for update/delete statement using an IN clause with the Vindexes, +// the query is correctly split according to the corresponding values in the IN list. +func TestDMLInUnique(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // initial rows + mcmp.Exec("insert into user_tbl(id, region_id, `name`) values (1,1,'a'),(2,2,'a'),(3,3,'a'),(4,4,'a'),(5,5,'a'),(6,6,'a')") + + qr := mcmp.Exec("update user_tbl set `name` = 'b' where region_id in (1,2,3,4,5,6)") + assert.EqualValues(t, 6, qr.RowsAffected) + qr = mcmp.Exec("delete from user_tbl where region_id in (1,2,3,4,5,6)") + assert.EqualValues(t, 6, qr.RowsAffected) + mcmp.Exec("insert into user_tbl(id, region_id, `name`) values (1,1,'a'),(2,2,'a'),(3,3,'a'),(4,4,'a'),(5,5,'a'),(6,6,'a')") + + assertVExplainEquals := func(t *testing.T, conn *mysql.Conn, query, expected string) { + t.Helper() + qr := utils.Exec(t, conn, query) + // strip the first column from each row as it is not deterministic in a VExplain query + for i := range qr.Rows { + qr.Rows[i] = qr.Rows[i][1:] + } + if err := sqltypes.RowsEqualsStr(expected, qr.Rows); err != nil { + t.Error(err) + } + } + expected := `[ + [VARCHAR("sks") VARCHAR("-80") VARCHAR("begin")] + [VARCHAR("sks") VARCHAR("-80") VARCHAR("update user_tbl set ` + "`name`" + ` = 'b' where region_id in (1, 2, 3, 5)")] + [VARCHAR("sks") VARCHAR("80-") VARCHAR("begin")] + [VARCHAR("sks") VARCHAR("80-") VARCHAR("update user_tbl set ` + "`name`" + ` = 'b' where region_id in (4, 6)")] + ]` + assertVExplainEquals(t, mcmp.VtConn, "vexplain /*vt+ EXECUTE_DML_QUERIES */ queries update user_tbl set `name` = 'b' where region_id in (1,2,3,4,5,6)", expected) + + expected = `[ + [VARCHAR("sks") VARCHAR("-80") VARCHAR("begin")] + [VARCHAR("sks") VARCHAR("-80") VARCHAR("delete from user_tbl where region_id in (1, 2, 3, 5)")] + [VARCHAR("sks") VARCHAR("80-") VARCHAR("begin")] + [VARCHAR("sks") VARCHAR("80-") VARCHAR("delete from user_tbl where region_id in (4, 6)")] + ]` + assertVExplainEquals(t, mcmp.VtConn, "vexplain /*vt+ EXECUTE_DML_QUERIES */ queries delete from user_tbl where region_id in (1,2,3,4,5,6)", expected) +} diff --git a/go/test/endtoend/vtgate/queries/dml/insert_test.go b/go/test/endtoend/vtgate/queries/dml/insert_test.go index 867b3b46fc8..026f53fe961 100644 --- a/go/test/endtoend/vtgate/queries/dml/insert_test.go +++ b/go/test/endtoend/vtgate/queries/dml/insert_test.go @@ -21,7 +21,9 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -36,7 +38,7 @@ func TestSimpleInsertSelect(t *testing.T) { mcmp.Exec("insert into u_tbl(id, num) values (1,2),(3,4)") for i, mode := range []string{"oltp", "olap"} { - t.Run(mode, func(t *testing.T) { + mcmp.Run(mode, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", mode)) qr := mcmp.Exec(fmt.Sprintf("insert into s_tbl(id, num) select id*%d, num*%d from s_tbl where id < 10", 10+i, 20+i)) @@ -52,6 +54,27 @@ func TestSimpleInsertSelect(t *testing.T) { utils.AssertMatches(t, mcmp.VtConn, `select num from num_vdx_tbl order by num`, `[[INT64(2)] [INT64(4)] [INT64(40)] [INT64(42)] [INT64(80)] [INT64(84)]]`) } +// TestInsertOnDup test the insert on duplicate key update feature with argument and list argument. +func TestInsertOnDup(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into order_tbl(oid, region_id, cust_no) values (1,2,3),(3,4,5)") + + for _, mode := range []string{"oltp", "olap"} { + mcmp.Run(mode, func(mcmp *utils.MySQLCompare) { + utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", mode)) + + mcmp.Exec(`insert into order_tbl(oid, region_id, cust_no) values (2,2,3),(4,4,5) on duplicate key update cust_no = if(values(cust_no) in (1, 2, 3), region_id, values(cust_no))`) + mcmp.Exec(`select oid, region_id, cust_no from order_tbl order by oid, region_id`) + mcmp.Exec(`insert into order_tbl(oid, region_id, cust_no) values (7,2,2) on duplicate key update cust_no = 10 + values(cust_no)`) + mcmp.Exec(`select oid, region_id, cust_no from order_tbl order by oid, region_id`) + }) + } +} + func TestFailureInsertSelect(t *testing.T) { if clusterInstance.HasPartialKeyspaces { t.Skip("don't run on partial keyspaces") @@ -63,16 +86,25 @@ func TestFailureInsertSelect(t *testing.T) { mcmp.Exec("insert into u_tbl(id, num) values (1,2),(3,4)") for _, mode := range []string{"oltp", "olap"} { - t.Run(mode, func(t *testing.T) { + mcmp.Run(mode, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", mode)) // primary key same mcmp.AssertContainsError("insert into s_tbl(id, num) select id, num*20 from s_tbl where id = 1", `AlreadyExists desc = Duplicate entry '1' for key`) - // lookup key same (does not fail on MySQL as there is no lookup, and we have not put unique contrains on num column) - utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `lookup.Create: Code: ALREADY_EXISTS`) - // mismatch column count - mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count at row 1`) - mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count at row 1`) + // lookup key same (does not fail on MySQL as there is no lookup, and we have not put unique constraint on num column) + vtgateVersion, err := cluster.GetMajorVersion("vtgate") + require.NoError(t, err) + if vtgateVersion >= 19 { + utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `(errno 1062) (sqlstate 23000)`) + // mismatch column count + mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count with the row`) + mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count with the row`) + } else { + utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `lookup.Create: Code: ALREADY_EXISTS`) + // mismatch column count + mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count at row 1`) + mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count at row 1`) + } }) } } @@ -116,7 +148,7 @@ func TestAutoIncInsertSelect(t *testing.T) { }} for _, tcase := range tcases { - t.Run(tcase.query, func(t *testing.T) { + mcmp.Run(tcase.query, func(mcmp *utils.MySQLCompare) { qr := utils.Exec(t, mcmp.VtConn, tcase.query) assert.EqualValues(t, tcase.expRowsAffected, qr.RowsAffected) assert.EqualValues(t, tcase.expInsertID, qr.InsertID) @@ -167,7 +199,7 @@ func TestAutoIncInsertSelectOlapMode(t *testing.T) { }} for _, tcase := range tcases { - t.Run(tcase.query, func(t *testing.T) { + mcmp.Run(tcase.query, func(mcmp *utils.MySQLCompare) { qr := utils.Exec(t, mcmp.VtConn, tcase.query) assert.EqualValues(t, tcase.expRowsAffected, qr.RowsAffected) assert.EqualValues(t, tcase.expInsertID, qr.InsertID) @@ -298,7 +330,7 @@ func TestIgnoreInsertSelect(t *testing.T) { mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,100),(1,2,200),(1,3,300)") // inserting same rows, throws error. - mcmp.AssertContainsError("insert into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl", `lookup.Create: Code: ALREADY_EXISTS`) + mcmp.AssertContainsError("insert into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl", `(errno 1062) (sqlstate 23000)`) // inserting same rows with ignore qr := mcmp.Exec("insert ignore into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl") assert.EqualValues(t, 0, qr.RowsAffected) @@ -336,7 +368,7 @@ func TestIgnoreInsertSelectOlapMode(t *testing.T) { mcmp.Exec("insert into order_tbl(region_id, oid, cust_no) values (1,1,100),(1,2,200),(1,3,300)") // inserting same rows, throws error. - mcmp.AssertContainsError("insert into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl", `lookup.Create: Code: ALREADY_EXISTS`) + mcmp.AssertContainsError("insert into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl", `(errno 1062) (sqlstate 23000)`) // inserting same rows with ignore qr := mcmp.Exec("insert ignore into order_tbl(region_id, oid, cust_no) select region_id, oid, cust_no from order_tbl") assert.EqualValues(t, 0, qr.RowsAffected) @@ -375,7 +407,7 @@ func TestInsertSelectUnshardedUsingSharded(t *testing.T) { mcmp.Exec("insert into s_tbl(id, num) values (1,2),(3,4)") for _, mode := range []string{"oltp", "olap"} { - t.Run(mode, func(t *testing.T) { + mcmp.Run(mode, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", mode)) qr := mcmp.Exec("insert into u_tbl(id, num) select id, num from s_tbl where s_tbl.id in (1,3)") assert.EqualValues(t, 2, qr.RowsAffected) @@ -442,7 +474,7 @@ func TestMixedCases(t *testing.T) { }} for _, tc := range tcases { - t.Run(tc.insQuery, func(t *testing.T) { + mcmp.Run(tc.insQuery, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, tc.insQuery) utils.AssertMatches(t, mcmp.VtConn, tc.selQuery, tc.exp) }) @@ -451,3 +483,27 @@ func TestMixedCases(t *testing.T) { // final check count on the lookup vindex table. utils.AssertMatches(t, mcmp.VtConn, "select count(*) from lkp_mixed_idx", "[[INT64(12)]]") } + +// TestInsertAlias test the alias feature in insert statement. +func TestInsertAlias(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + utils.SkipIfBinaryIsBelowVersion(t, 20, "vttablet") + + mcmp, closer := start(t) + defer closer() + + // initial record + mcmp.Exec("insert into user_tbl(id, region_id, name) values (1, 1,'foo'),(2, 2,'bar'),(3, 3,'baz'),(4, 4,'buzz')") + + qr := mcmp.Exec("insert into user_tbl(id, region_id, name) values (2, 2, 'foo') as new on duplicate key update name = new.name") + assert.EqualValues(t, 2, qr.RowsAffected) + + // this validates the record. + mcmp.Exec("select id, region_id, name from user_tbl order by id") + + qr = mcmp.Exec("insert into user_tbl(id, region_id, name) values (3, 3, 'foo') as new(m, n, p) on duplicate key update name = p") + assert.EqualValues(t, 2, qr.RowsAffected) + + // this validates the record. + mcmp.Exec("select id, region_id, name from user_tbl order by id") +} diff --git a/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql b/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql index 3310724d420..8ddf9250e45 100644 --- a/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql +++ b/go/test/endtoend/vtgate/queries/dml/sharded_schema.sql @@ -25,7 +25,8 @@ create table order_tbl oid bigint, region_id bigint, cust_no bigint unique key, - primary key (oid, region_id) + primary key (oid, region_id), + unique key (oid) ) Engine = InnoDB; create table oid_vdx_tbl diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go index e33daf061bc..ec55711a31f 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go @@ -21,14 +21,12 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/test/endtoend/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -203,6 +201,10 @@ func TestMultipleSchemaPredicates(t *testing.T) { _, err := mcmp.VtConn.ExecuteFetch(query, 1000, true) require.Error(t, err) require.Contains(t, err.Error(), "specifying two different database in the query is not supported") + + if utils.BinaryIsAtLeastAtVersion(20, "vtgate") { + _, _ = mcmp.ExecNoCompare("select * from information_schema.columns where table_schema = '' limit 1") + } } func TestInfrSchemaAndUnionAll(t *testing.T) { @@ -222,7 +224,28 @@ func TestInfrSchemaAndUnionAll(t *testing.T) { } } +func TestInfoschemaTypes(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + + require.NoError(t, + utils.WaitForAuthoritative(t, "ks", "t1", clusterInstance.VtgateProcess.ReadVSchema)) + + mcmp, closer := start(t) + defer closer() + + mcmp.Exec(` + SELECT ORDINAL_POSITION + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 't1' + UNION + SELECT ORDINAL_POSITION + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 't2'; + `) +} + func TestTypeORMQuery(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") // This test checks that we can run queries similar to the ones that the TypeORM framework uses require.NoError(t, @@ -231,7 +254,7 @@ func TestTypeORMQuery(t *testing.T) { mcmp, closer := start(t) defer closer() - query := `SELECT kcu.TABLE_NAME, kcu.COLUMN_NAME, cols.DATA_TYPE + utils.AssertMatchesAny(t, mcmp.VtConn, `SELECT kcu.TABLE_NAME, kcu.COLUMN_NAME, cols.DATA_TYPE FROM (SELECT TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE kcu WHERE kcu.TABLE_SCHEMA = 'ks' @@ -251,9 +274,51 @@ FROM (SELECT TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME WHERE cols.TABLE_SCHEMA = 'ks' AND cols.TABLE_NAME = 't7_xxhash') cols ON kcu.TABLE_SCHEMA = cols.TABLE_SCHEMA AND kcu.TABLE_NAME = cols.TABLE_NAME AND - kcu.COLUMN_NAME = cols.COLUMN_NAME` - utils.AssertMatchesAny(t, mcmp.VtConn, query, + kcu.COLUMN_NAME = cols.COLUMN_NAME`, `[[VARBINARY("t1") VARCHAR("id1") BLOB("bigint")] [VARBINARY("t7_xxhash") VARCHAR("uid") BLOB("varchar")]]`, `[[VARCHAR("t1") VARCHAR("id1") BLOB("bigint")] [VARCHAR("t7_xxhash") VARCHAR("uid") BLOB("varchar")]]`, ) + + utils.AssertMatchesAny(t, mcmp.VtConn, ` +SELECT * +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 't1' +UNION +SELECT * +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_SCHEMA = 'ks' AND TABLE_NAME = 't2'; +`, + `[[VARBINARY("def") VARBINARY("vt_ks") VARBINARY("t1") VARCHAR("id1") UINT32(1) NULL VARCHAR("NO") BLOB("bigint") NULL NULL UINT64(19) UINT64(0) NULL NULL NULL BLOB("bigint") VARBINARY("PRI") VARCHAR("") VARCHAR("select,insert,update,references") BLOB("") BLOB("") NULL] [VARBINARY("def") VARBINARY("vt_ks") VARBINARY("t1") VARCHAR("id2") UINT32(2) NULL VARCHAR("YES") BLOB("bigint") NULL NULL UINT64(19) UINT64(0) NULL NULL NULL BLOB("bigint") VARBINARY("") VARCHAR("") VARCHAR("select,insert,update,references") BLOB("") BLOB("") NULL] [VARBINARY("def") VARBINARY("vt_ks") VARBINARY("t2") VARCHAR("id") UINT32(1) NULL VARCHAR("NO") BLOB("bigint") NULL NULL UINT64(19) UINT64(0) NULL NULL NULL BLOB("bigint") VARBINARY("PRI") VARCHAR("") VARCHAR("select,insert,update,references") BLOB("") BLOB("") NULL] [VARBINARY("def") VARBINARY("vt_ks") VARBINARY("t2") VARCHAR("value") UINT32(2) NULL VARCHAR("YES") BLOB("bigint") NULL NULL UINT64(19) UINT64(0) NULL NULL NULL BLOB("bigint") VARBINARY("") VARCHAR("") VARCHAR("select,insert,update,references") BLOB("") BLOB("") NULL]]`, + `[[VARCHAR("def") VARCHAR("vt_ks") VARCHAR("t1") VARCHAR("id1") UINT32(1) NULL VARCHAR("NO") BLOB("bigint") NULL NULL UINT64(19) UINT64(0) NULL NULL NULL BLOB("bigint") VARBINARY("PRI") VARCHAR("") VARCHAR("select,insert,update,references") BLOB("") BLOB("") NULL] [VARCHAR("def") VARCHAR("vt_ks") VARCHAR("t1") VARCHAR("id2") UINT32(2) NULL VARCHAR("YES") BLOB("bigint") NULL NULL UINT64(19) UINT64(0) NULL NULL NULL BLOB("bigint") VARBINARY("") VARCHAR("") VARCHAR("select,insert,update,references") BLOB("") BLOB("") NULL] [VARCHAR("def") VARCHAR("vt_ks") VARCHAR("t2") VARCHAR("id") UINT32(1) NULL VARCHAR("NO") BLOB("bigint") NULL NULL UINT64(19) UINT64(0) NULL NULL NULL BLOB("bigint") VARBINARY("PRI") VARCHAR("") VARCHAR("select,insert,update,references") BLOB("") BLOB("") NULL] [VARCHAR("def") VARCHAR("vt_ks") VARCHAR("t2") VARCHAR("value") UINT32(2) NULL VARCHAR("YES") BLOB("bigint") NULL NULL UINT64(19) UINT64(0) NULL NULL NULL BLOB("bigint") VARBINARY("") VARCHAR("") VARCHAR("select,insert,update,references") BLOB("") BLOB("") NULL]]`, + ) +} + +func TestJoinWithSingleShardQueryOnRHS(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + // This test checks that we can run queries like this, where the RHS is a single shard query + mcmp, closer := start(t) + defer closer() + + query := `SELECT + c.column_name as column_name, + c.data_type as data_type, + c.table_name as table_name, + c.table_schema as table_schema +FROM + information_schema.columns c + JOIN ( + SELECT + table_name + FROM + information_schema.tables + WHERE + table_schema != 'information_schema' + LIMIT + 1 + ) AS tables ON tables.table_name = c.table_name +ORDER BY + c.table_name` + + res := utils.Exec(t, mcmp.VtConn, query) + require.NotEmpty(t, res.Rows) } diff --git a/go/test/endtoend/vtgate/queries/informationschema/main_test.go b/go/test/endtoend/vtgate/queries/informationschema/main_test.go index 06c5b188d18..3696617281e 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/main_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/main_test.go @@ -78,12 +78,12 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ApplyRoutingRules(routingRules) + err = clusterInstance.VtctldClientProcess.ApplyRoutingRules(routingRules) if err != nil { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } diff --git a/go/test/endtoend/vtgate/queries/informationschema/schema.sql b/go/test/endtoend/vtgate/queries/informationschema/schema.sql index 1fc9949406b..ad324cffd1a 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/schema.sql +++ b/go/test/endtoend/vtgate/queries/informationschema/schema.sql @@ -5,6 +5,13 @@ create table t1 primary key (id1) ) Engine = InnoDB; +create table t2 +( + id bigint, + value bigint, + primary key (id) +) Engine = InnoDB; + create table t1_id2_idx ( id2 bigint, diff --git a/go/test/endtoend/vtgate/queries/informationschema/vschema.json b/go/test/endtoend/vtgate/queries/informationschema/vschema.json index eec57e9970d..4fa5af75f49 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/vschema.json +++ b/go/test/endtoend/vtgate/queries/informationschema/vschema.json @@ -4,7 +4,7 @@ "hash": { "type": "hash" }, - "unicode_loose_xxhash" : { + "unicode_loose_xxhash": { "type": "unicode_loose_xxhash" }, "t1_id2_idx": { @@ -40,6 +40,14 @@ } ] }, + "t2": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] + }, "t3_id7_idx": { "column_vindexes": [ { diff --git a/go/test/endtoend/vtgate/queries/kill/main_test.go b/go/test/endtoend/vtgate/queries/kill/main_test.go index 836603c91ee..99608030246 100644 --- a/go/test/endtoend/vtgate/queries/kill/main_test.go +++ b/go/test/endtoend/vtgate/queries/kill/main_test.go @@ -21,7 +21,7 @@ import ( _ "embed" "flag" "fmt" - "math/rand" + "math/rand/v2" "os" "strconv" "strings" @@ -134,7 +134,8 @@ func dropData(t *testing.T) { defer conn.Close() utils.Exec(t, conn, "drop table if exists test") - utils.Exec(t, conn, schema) + utils.Exec(t, conn, "drop table if exists test_idx") + utils.ExecMulti(t, conn, schema) } func getRandomString(size int) string { diff --git a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go index c385941502a..a587f124762 100644 --- a/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go +++ b/go/test/endtoend/vtgate/queries/lookup_queries/main_test.go @@ -74,7 +74,7 @@ func TestMain(m *testing.M) { return 1 } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("RebuildVSchemaGraph") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("RebuildVSchemaGraph") if err != nil { return 1 } @@ -134,7 +134,7 @@ func TestLookupQueries(t *testing.T) { (3, 'monkey', 'monkey')`) for _, workload := range []string{"olap", "oltp"} { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, "set workload = "+workload) mcmp.AssertMatches("select id from user where lookup = 'apa'", "[[INT64(1)] [INT64(2)]]") diff --git a/go/test/endtoend/vtgate/queries/misc/main_test.go b/go/test/endtoend/vtgate/queries/misc/main_test.go index a3858284884..f20072031a8 100644 --- a/go/test/endtoend/vtgate/queries/misc/main_test.go +++ b/go/test/endtoend/vtgate/queries/misc/main_test.go @@ -73,6 +73,9 @@ func TestMain(m *testing.M) { return 1 } + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "--enable-views") + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-views") + // Start keyspace keyspace := &cluster.Keyspace{ Name: keyspaceName, diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index 0fdee1b88a1..857339605f8 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" "testing" + "time" _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/assert" @@ -36,7 +37,7 @@ func start(t *testing.T) (utils.MySQLCompare, func()) { require.NoError(t, err) deleteAll := func() { - tables := []string{"t1", "uks.unsharded"} + tables := []string{"t1", "tbl", "unq_idx", "nonunq_idx", "tbl_enum_set", "uks.unsharded"} for _, table := range tables { _, _ = mcmp.ExecAndIgnore("delete from " + table) } @@ -59,8 +60,25 @@ func TestBitVals(t *testing.T) { mcmp.AssertMatches(`select b'1001', 0x9, B'010011011010'`, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\x04\xda")]]`) mcmp.AssertMatches(`select b'1001', 0x9, B'010011011010' from t1`, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\x04\xda")]]`) - mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`) - mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`) + vtgateVersion, err := cluster.GetMajorVersion("vtgate") + require.NoError(t, err) + if vtgateVersion >= 19 { + mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`) + mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`) + } else { + mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[UINT64(10) UINT64(11) UINT64(1245)]]`) + mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[UINT64(10) UINT64(11) UINT64(1245)]]`) + } +} + +// TestTimeFunctionWithPrecision tests that inserting data with NOW(1) works as intended. +func TestTimeFunctionWithPrecision(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1, id2) values (1, NOW(1))") + mcmp.Exec("insert into t1(id1, id2) values (2, NOW(2))") + mcmp.Exec("insert into t1(id1, id2) values (3, NOW())") } func TestHexVals(t *testing.T) { @@ -88,11 +106,11 @@ func TestInvalidDateTimeTimestampVals(t *testing.T) { mcmp, closer := start(t) defer closer() - _, err := mcmp.ExecAllowAndCompareError(`select date'2022'`) + _, err := mcmp.ExecAllowAndCompareError(`select date'2022'`, utils.CompareOptions{}) require.Error(t, err) - _, err = mcmp.ExecAllowAndCompareError(`select time'12:34:56:78'`) + _, err = mcmp.ExecAllowAndCompareError(`select time'12:34:56:78'`, utils.CompareOptions{}) require.Error(t, err) - _, err = mcmp.ExecAllowAndCompareError(`select timestamp'2022'`) + _, err = mcmp.ExecAllowAndCompareError(`select timestamp'2022'`, utils.CompareOptions{}) require.Error(t, err) } @@ -120,6 +138,41 @@ func TestCast(t *testing.T) { mcmp.AssertMatches("select cast('3.2' as unsigned)", `[[UINT64(3)]]`) } +// TestVindexHints tests that vindex hints work as intended. +func TestVindexHints(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into tbl(id, unq_col, nonunq_col) values (1,0,10), (2,10,10), (3,4,20), (4,30,20), (5,40,10)") + mcmp.AssertMatches("select id, unq_col, nonunq_col from tbl where unq_col = 10 and id = 2 and nonunq_col in (10, 20)", "[[INT64(2) INT64(10) INT64(10)]]") + + // Verify that without any vindex hints, the query plan uses a hash vindex. + res, err := mcmp.VtConn.ExecuteFetch("vexplain plan select id, unq_col, nonunq_col from tbl where unq_col = 10 and id = 2 and nonunq_col in (10, 20)", 100, false) + require.NoError(t, err) + require.Contains(t, fmt.Sprintf("%v", res.Rows), "hash") + + // Now we make the query explicitly use the unique lookup vindex. + // We make sure the query still works. + res, err = mcmp.VtConn.ExecuteFetch("select id, unq_col, nonunq_col from tbl USE VINDEX (unq_vdx) where unq_col = 10 and id = 2 and nonunq_col in (10, 20)", 100, false) + require.NoError(t, err) + require.EqualValues(t, fmt.Sprintf("%v", res.Rows), "[[INT64(2) INT64(10) INT64(10)]]") + // Verify that we are using the unq_vdx, that we requested explicitly. + res, err = mcmp.VtConn.ExecuteFetch("vexplain plan select id, unq_col, nonunq_col from tbl USE VINDEX (unq_vdx) where unq_col = 10 and id = 2 and nonunq_col in (10, 20)", 100, false) + require.NoError(t, err) + require.Contains(t, fmt.Sprintf("%v", res.Rows), "unq_vdx") + + // Now we make the query explicitly refuse two of the three vindexes. + // We make sure the query still works. + res, err = mcmp.VtConn.ExecuteFetch("select id, unq_col, nonunq_col from tbl IGNORE VINDEX (hash, unq_vdx) where unq_col = 10 and id = 2 and nonunq_col in (10, 20)", 100, false) + require.NoError(t, err) + require.EqualValues(t, fmt.Sprintf("%v", res.Rows), "[[INT64(2) INT64(10) INT64(10)]]") + // Verify that we are using the nonunq_vdx, which is the only one left to be used. + res, err = mcmp.VtConn.ExecuteFetch("vexplain plan select id, unq_col, nonunq_col from tbl IGNORE VINDEX (hash, unq_vdx) where unq_col = 10 and id = 2 and nonunq_col in (10, 20)", 100, false) + require.NoError(t, err) + require.Contains(t, fmt.Sprintf("%v", res.Rows), "nonunq_vdx") +} + func TestOuterJoinWithPredicate(t *testing.T) { mcmp, closer := start(t) defer closer() @@ -204,12 +257,12 @@ func TestPrepareStatements(t *testing.T) { mcmp.AssertMatchesNoOrder(`execute prep_in_pk using @id1, @id2`, `[[INT64(0) INT64(0)] [INT64(1) INT64(0)]]`) // Fail by providing wrong number of arguments - _, err := mcmp.ExecAllowAndCompareError(`execute prep_in_pk using @id1, @id1, @id`) + _, err := mcmp.ExecAllowAndCompareError(`execute prep_in_pk using @id1, @id1, @id`, utils.CompareOptions{}) incorrectCount := "VT03025: Incorrect arguments to EXECUTE" assert.ErrorContains(t, err, incorrectCount) - _, err = mcmp.ExecAllowAndCompareError(`execute prep_in_pk using @id1`) + _, err = mcmp.ExecAllowAndCompareError(`execute prep_in_pk using @id1`, utils.CompareOptions{}) assert.ErrorContains(t, err, incorrectCount) - _, err = mcmp.ExecAllowAndCompareError(`execute prep_in_pk`) + _, err = mcmp.ExecAllowAndCompareError(`execute prep_in_pk`, utils.CompareOptions{}) assert.ErrorContains(t, err, incorrectCount) mcmp.Exec(`prepare prep_art from 'select 1+?, 10/?'`) @@ -229,10 +282,10 @@ func TestPrepareStatements(t *testing.T) { mcmp.Exec(`select 1+9999999999999999999999999999, 10/9999999999999999999999999999 from t1 limit 1`) mcmp.Exec("deallocate prepare prep_art") - _, err = mcmp.ExecAllowAndCompareError(`execute prep_art using @id1, @id1`) + _, err = mcmp.ExecAllowAndCompareError(`execute prep_art using @id1, @id1`, utils.CompareOptions{}) assert.ErrorContains(t, err, "VT09011: Unknown prepared statement handler (prep_art) given to EXECUTE") - _, err = mcmp.ExecAllowAndCompareError("deallocate prepare prep_art") + _, err = mcmp.ExecAllowAndCompareError("deallocate prepare prep_art", utils.CompareOptions{}) assert.ErrorContains(t, err, "VT09011: Unknown prepared statement handler (prep_art) given to DEALLOCATE PREPARE") } @@ -259,7 +312,7 @@ func TestAnalyze(t *testing.T) { defer closer() for _, workload := range []string{"olap", "oltp"} { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) utils.Exec(t, mcmp.VtConn, "analyze table t1") utils.Exec(t, mcmp.VtConn, "analyze table uks.unsharded") @@ -267,3 +320,157 @@ func TestAnalyze(t *testing.T) { }) } } + +// TestTransactionModeVar executes SELECT on `transaction_mode` variable +func TestTransactionModeVar(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + + mcmp, closer := start(t) + defer closer() + + tcases := []struct { + setStmt string + expRes string + }{{ + expRes: `[[VARCHAR("MULTI")]]`, + }, { + setStmt: `set transaction_mode = single`, + expRes: `[[VARCHAR("SINGLE")]]`, + }, { + setStmt: `set transaction_mode = multi`, + expRes: `[[VARCHAR("MULTI")]]`, + }, { + setStmt: `set transaction_mode = twopc`, + expRes: `[[VARCHAR("TWOPC")]]`, + }} + + for _, tcase := range tcases { + mcmp.Run(tcase.setStmt, func(mcmp *utils.MySQLCompare) { + if tcase.setStmt != "" { + utils.Exec(t, mcmp.VtConn, tcase.setStmt) + } + utils.AssertMatches(t, mcmp.VtConn, "select @@transaction_mode", tcase.expRes) + }) + } +} + +// TestAliasesInOuterJoinQueries tests that aliases work in queries that have outer join clauses. +func TestAliasesInOuterJoinQueries(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // Insert data into the 2 tables + mcmp.Exec("insert into t1(id1, id2) values (1,2), (42,5), (5, 42)") + mcmp.Exec("insert into tbl(id, unq_col, nonunq_col) values (1,2,3), (2,5,3), (3, 42, 2)") + + // Check that the select query works as intended and verifying the column names as well. + mcmp.ExecWithColumnCompare("select t1.id1 as t0, t1.id1 as t1, tbl.unq_col as col from t1 left outer join tbl on t1.id2 = tbl.nonunq_col") + mcmp.ExecWithColumnCompare("select t1.id1 as t0, t1.id1 as t1, tbl.unq_col as col from t1 left outer join tbl on t1.id2 = tbl.nonunq_col order by t1.id2 limit 2") + mcmp.ExecWithColumnCompare("select t1.id1 as t0, t1.id1 as t1, tbl.unq_col as col from t1 left outer join tbl on t1.id2 = tbl.nonunq_col order by t1.id2 limit 2 offset 2") + mcmp.ExecWithColumnCompare("select t1.id1 as t0, t1.id1 as t1, count(*) as leCount from t1 left outer join tbl on t1.id2 = tbl.nonunq_col group by 1, t1") + mcmp.ExecWithColumnCompare("select t.id1, t.id2, derived.unq_col from t1 t join (select id, unq_col, nonunq_col from tbl) as derived on t.id2 = derived.nonunq_col") +} + +func TestAlterTableWithView(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + mcmp, closer := start(t) + defer closer() + + // Test that create/alter view works and the output is as expected + mcmp.Exec(`use ks_misc`) + mcmp.Exec(`create view v1 as select * from t1`) + var viewDef string + utils.WaitForVschemaCondition(t, clusterInstance.VtgateProcess, keyspaceName, func(t *testing.T, ksMap map[string]any) bool { + views, ok := ksMap["views"] + if !ok { + return false + } + viewsMap := views.(map[string]any) + view, ok := viewsMap["v1"] + if ok { + viewDef = view.(string) + } + return ok + }, "Waiting for view creation") + mcmp.Exec(`insert into t1(id1, id2) values (1, 1)`) + mcmp.AssertMatches("select * from v1", `[[INT64(1) INT64(1)]]`) + + // alter table add column + mcmp.Exec(`alter table t1 add column test bigint`) + time.Sleep(10 * time.Second) + mcmp.Exec(`alter view v1 as select * from t1`) + + waitForChange := func(t *testing.T, ksMap map[string]any) bool { + // wait for the view definition to change + views := ksMap["views"] + viewsMap := views.(map[string]any) + newView := viewsMap["v1"] + if newView.(string) == viewDef { + return false + } + viewDef = newView.(string) + return true + } + utils.WaitForVschemaCondition(t, clusterInstance.VtgateProcess, keyspaceName, waitForChange, "Waiting for alter view") + + mcmp.AssertMatches("select * from v1", `[[INT64(1) INT64(1) NULL]]`) + + // alter table remove column + mcmp.Exec(`alter table t1 drop column test`) + mcmp.Exec(`alter view v1 as select * from t1`) + + utils.WaitForVschemaCondition(t, clusterInstance.VtgateProcess, keyspaceName, waitForChange, "Waiting for alter view") + + mcmp.AssertMatches("select * from v1", `[[INT64(1) INT64(1)]]`) +} + +// TestStraightJoin tests that Vitess respects the ordering of join in a STRAIGHT JOIN query. +func TestStraightJoin(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into tbl(id, unq_col, nonunq_col) values (1,0,10), (2,10,10), (3,4,20), (4,30,20), (5,40,10)") + mcmp.Exec(`insert into t1(id1, id2) values (10, 11), (20, 13)`) + + mcmp.AssertMatchesNoOrder("select tbl.unq_col, tbl.nonunq_col, t1.id2 from t1 join tbl where t1.id1 = tbl.nonunq_col", + `[[INT64(0) INT64(10) INT64(11)] [INT64(10) INT64(10) INT64(11)] [INT64(4) INT64(20) INT64(13)] [INT64(40) INT64(10) INT64(11)] [INT64(30) INT64(20) INT64(13)]]`, + ) + // Verify that in a normal join query, vitess joins tbl with t1. + res, err := mcmp.VtConn.ExecuteFetch("vexplain plan select tbl.unq_col, tbl.nonunq_col, t1.id2 from t1 join tbl where t1.id1 = tbl.nonunq_col", 100, false) + require.NoError(t, err) + require.Contains(t, fmt.Sprintf("%v", res.Rows), "tbl_t1") + + // Test the same query with a straight join + mcmp.AssertMatchesNoOrder("select tbl.unq_col, tbl.nonunq_col, t1.id2 from t1 straight_join tbl where t1.id1 = tbl.nonunq_col", + `[[INT64(0) INT64(10) INT64(11)] [INT64(10) INT64(10) INT64(11)] [INT64(4) INT64(20) INT64(13)] [INT64(40) INT64(10) INT64(11)] [INT64(30) INT64(20) INT64(13)]]`, + ) + // Verify that in a straight join query, vitess joins t1 with tbl. + res, err = mcmp.VtConn.ExecuteFetch("vexplain plan select tbl.unq_col, tbl.nonunq_col, t1.id2 from t1 straight_join tbl where t1.id1 = tbl.nonunq_col", 100, false) + require.NoError(t, err) + require.Contains(t, fmt.Sprintf("%v", res.Rows), "t1_tbl") +} + +func TestColumnAliases(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1, id2) values (0,0), (1,1)") + mcmp.ExecWithColumnCompare(`select a as k from (select count(*) as a from t1) t`) +} + +func TestEnumSetVals(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "tbl_enum_set", clusterInstance.VtgateProcess.ReadVSchema)) + + mcmp.Exec("insert into tbl_enum_set(id, enum_col, set_col) values (1, 'medium', 'a,b,e'), (2, 'small', 'e,f,g'), (3, 'large', 'c'), (4, 'xsmall', 'a,b'), (5, 'medium', 'a,d')") + + mcmp.AssertMatches("select id, enum_col, cast(enum_col as signed) from tbl_enum_set order by enum_col, id", `[[INT64(4) ENUM("xsmall") INT64(1)] [INT64(2) ENUM("small") INT64(2)] [INT64(1) ENUM("medium") INT64(3)] [INT64(5) ENUM("medium") INT64(3)] [INT64(3) ENUM("large") INT64(4)]]`) + mcmp.AssertMatches("select id, set_col, cast(set_col as unsigned) from tbl_enum_set order by set_col, id", `[[INT64(4) SET("a,b") UINT64(3)] [INT64(3) SET("c") UINT64(4)] [INT64(5) SET("a,d") UINT64(9)] [INT64(1) SET("a,b,e") UINT64(19)] [INT64(2) SET("e,f,g") UINT64(112)]]`) +} diff --git a/go/test/endtoend/vtgate/queries/misc/schema.sql b/go/test/endtoend/vtgate/queries/misc/schema.sql index ceac0c07e6d..e0c0d1a36a7 100644 --- a/go/test/endtoend/vtgate/queries/misc/schema.sql +++ b/go/test/endtoend/vtgate/queries/misc/schema.sql @@ -1,5 +1,38 @@ -create table if not exists t1( - id1 bigint, - id2 bigint, - primary key(id1) -) Engine=InnoDB; \ No newline at end of file +create table t1 +( + id1 bigint, + id2 bigint, + primary key (id1) +) Engine=InnoDB; + +create table unq_idx +( + unq_col bigint, + keyspace_id varbinary(20), + primary key (unq_col) +) Engine = InnoDB; + +create table nonunq_idx +( + nonunq_col bigint, + id bigint, + keyspace_id varbinary(20), + primary key (nonunq_col, id) +) Engine = InnoDB; + +create table tbl +( + id bigint, + unq_col bigint, + nonunq_col bigint, + primary key (id), + unique (unq_col) +) Engine = InnoDB; + +create table tbl_enum_set +( + id bigint, + enum_col enum('xsmall', 'small', 'medium', 'large', 'xlarge'), + set_col set('a', 'b', 'c', 'd', 'e', 'f', 'g'), + primary key (id) +) Engine = InnoDB; diff --git a/go/test/endtoend/vtgate/queries/misc/vschema.json b/go/test/endtoend/vtgate/queries/misc/vschema.json index 60aa2bc9c07..d3d7c3b7935 100644 --- a/go/test/endtoend/vtgate/queries/misc/vschema.json +++ b/go/test/endtoend/vtgate/queries/misc/vschema.json @@ -3,6 +3,26 @@ "vindexes": { "hash": { "type": "hash" + }, + "unq_vdx": { + "type": "consistent_lookup_unique", + "params": { + "table": "unq_idx", + "from": "unq_col", + "to": "keyspace_id", + "ignore_nulls": "true" + }, + "owner": "tbl" + }, + "nonunq_vdx": { + "type": "consistent_lookup", + "params": { + "table": "nonunq_idx", + "from": "nonunq_col,id", + "to": "keyspace_id", + "ignore_nulls": "true" + }, + "owner": "tbl" } }, "tables": { @@ -13,6 +33,49 @@ "name": "hash" } ] + }, + "tbl": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + }, + { + "column": "unq_col", + "name": "unq_vdx" + }, + { + "columns": [ + "nonunq_col", + "id" + ], + "name": "nonunq_vdx" + } + ] + }, + "tbl_enum_set": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] + }, + "unq_idx": { + "column_vindexes": [ + { + "column": "unq_col", + "name": "hash" + } + ] + }, + "nonunq_idx": { + "column_vindexes": [ + { + "column": "nonunq_col", + "name": "hash" + } + ] } } } \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go index b6495443a8e..51d9f9f24bf 100644 --- a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go +++ b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/mysql" @@ -39,7 +40,12 @@ func TestNormalizeAllFields(t *testing.T) { defer conn.Close() insertQuery := `insert into t1 values (1, "chars", "variable chars", x'73757265', 0x676F, 0.33, 9.99, 1, "1976-06-08", "small", "b", "{\"key\":\"value\"}", point(1,5), b'011', 0b0101)` - normalizedInsertQuery := `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)` + normalizedInsertQuery := `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL(3,2) */, :vtg7 /* DECIMAL(3,2) */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)` + vtgateVersion, err := cluster.GetMajorVersion("vtgate") + require.NoError(t, err) + if vtgateVersion < 20 { + normalizedInsertQuery = `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)` + } selectQuery := "select * from t1" utils.Exec(t, conn, insertQuery) qr := utils.Exec(t, conn, selectQuery) diff --git a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go index dd48a09fec7..e8d8d4bfef1 100644 --- a/go/test/endtoend/vtgate/queries/orderby/orderby_test.go +++ b/go/test/endtoend/vtgate/queries/orderby/orderby_test.go @@ -68,7 +68,7 @@ func TestOrderBy(t *testing.T) { mcmp.AssertMatches("select id1, id2 from t4 order by id1 desc", `[[INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(5) VARCHAR("test")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) // test ordering of complex column if utils.BinaryIsAtLeastAtVersion(17, "vtgate") { - mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ id1, id2 from t4 order by reverse(id2) desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(2) VARCHAR("Abc")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(1) VARCHAR("a")]]`) + mcmp.AssertMatches("select id1, id2 from t4 order by reverse(id2) desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(2) VARCHAR("Abc")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(1) VARCHAR("a")]]`) } defer func() { @@ -80,6 +80,77 @@ func TestOrderBy(t *testing.T) { mcmp.AssertMatches("select id1, id2 from t4 order by id2 desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) mcmp.AssertMatches("select id1, id2 from t4 order by id1 desc", `[[INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(5) VARCHAR("test")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(2) VARCHAR("Abc")] [INT64(1) VARCHAR("a")]]`) if utils.BinaryIsAtLeastAtVersion(17, "vtgate") { - mcmp.AssertMatches("select /*vt+ PLANNER=Gen4 */ id1, id2 from t4 order by reverse(id2) desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(2) VARCHAR("Abc")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(1) VARCHAR("a")]]`) + mcmp.AssertMatches("select id1, id2 from t4 order by reverse(id2) desc", `[[INT64(5) VARCHAR("test")] [INT64(8) VARCHAR("F")] [INT64(7) VARCHAR("e")] [INT64(6) VARCHAR("d")] [INT64(2) VARCHAR("Abc")] [INT64(4) VARCHAR("c")] [INT64(3) VARCHAR("b")] [INT64(1) VARCHAR("a")]]`) + } +} + +func TestOrderByComplex(t *testing.T) { + // tests written to try to trick the ORDER BY engine and planner + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into user(id, col, email) values(1,1,'a'), (2,2,'Abc'), (3,3,'b'), (4,4,'c'), (5,2,'test'), (6,1,'test'), (7,2,'a'), (8,3,'b'), (9,4,'c3'), (10,2,'d')") + + queries := []string{ + "select email, max(col) from user group by email order by col", + "select email, max(col) from user group by email order by col + 1", + "select email, max(col) from user group by email order by max(col)", + "select email, max(col) from user group by email order by max(col) + 1", + "select email, max(col) from user group by email order by min(col)", + "select email, max(col) as col from user group by email order by col", + "select email, max(col) as col from user group by email order by max(col)", + "select email, max(col) as col from user group by email order by col + 1", + "select email, max(col) as col from user group by email order by email + col", + "select email, max(col) as col from user group by email order by email + max(col)", + "select email, max(col) as col from user group by email order by email, col", + "select email, max(col) as xyz from user group by email order by email, xyz", + "select email, max(col) as xyz from user group by email order by email, max(xyz)", + "select email, max(col) as xyz from user group by email order by email, abs(xyz)", + "select email, max(col) as xyz from user group by email order by email, max(col)", + "select email, max(col) as xyz from user group by email order by email, abs(col)", + "select email, max(col) as xyz from user group by email order by xyz + email", + "select email, max(col) as xyz from user group by email order by abs(xyz) + email", + "select email, max(col) as xyz from user group by email order by abs(xyz)", + "select email, max(col) as xyz from user group by email order by abs(col)", + "select email, max(col) as max_col from user group by email order by max_col desc, length(email)", + "select email, max(col) as max_col, min(col) as min_col from user group by email order by max_col - min_col", + "select email, max(col) as col1, count(*) as col2 from user group by email order by col2 * col1", + "select email, sum(col) as sum_col from user group by email having sum_col > 10 order by sum_col / count(email)", + "select email, max(col) as max_col, char_length(email) as len_email from user group by email order by len_email, max_col desc", + "select email, max(col) as col_alias from user group by email order by case when col_alias > 100 then 0 else 1 end, col_alias", + "select email, count(*) as cnt, max(col) as max_col from user group by email order by cnt desc, max_col + cnt", + "select email, max(col) as max_col from user group by email order by if(max_col > 50, max_col, -max_col) desc", + "select email, max(col) as col, sum(col) as sum_col from user group by email order by col * sum_col desc", + "select email, max(col) as col, (select min(col) from user as u2 where u2.email = user.email) as min_col from user group by email order by col - min_col", + "select email, max(col) as max_col, (max(col) % 10) as mod_col from user group by email order by mod_col, max_col", + "select email, max(col) as 'value', count(email) as 'number' from user group by email order by 'number', 'value'", + "select email, max(col) as col, concat('email: ', email, ' col: ', max(col)) as complex_alias from user group by email order by complex_alias desc", + "select email, max(col) as max_col from user group by email union select email, min(col) as min_col from user group by email order by email", + "select email, max(col) as col from user where col > 50 group by email order by col desc", + "select email, max(col) as col from user group by email order by length(email), col", + "select email, max(col) as max_col, substring(email, 1, 3) as sub_email from user group by email order by sub_email, max_col desc", + "select email, max(col) as max_col from user group by email order by reverse(email), max_col", + "select email, max(col) as max_col from user group by email having max_col > avg(max_col) order by max_col desc", + "select email, count(*) as count, max(col) as max_col from user group by email order by count * max_col desc", + "select email, max(col) as col_alias from user group by email order by col_alias limit 10", + "select email, max(col) as col from user group by email order by col desc, email", + "select concat(email, ' ', max(col)) as combined from user group by email order by combined desc", + "select email, max(col) as max_col from user group by email order by ascii(email), max_col", + "select email, char_length(email) as email_length, max(col) as max_col from user group by email order by email_length desc, max_col", + "select email, max(col) as col from user group by email having col between 10 and 100 order by col", + "select email, max(col) as max_col, min(col) as min_col from user group by email order by max_col + min_col desc", + "select email, max(col) as 'max', count(*) as 'count' from user group by email order by 'max' desc, 'count'", + "select email, max(col) as max_col from (select email, col from user where col > 20) as filtered group by email order by max_col", + "select a.email, a.max_col from (select email, max(col) as max_col from user group by email) as a order by a.max_col desc", + "select email, max(col) as max_col from user where email like 'a%' group by email order by max_col, email", + `select email, max(col) as max_col from user group by email union select email, avg(col) as avg_col from user group by email order by email desc`, + } + + for _, query := range queries { + mcmp.Run(query, func(mcmp *utils.MySQLCompare) { + _, _ = mcmp.ExecAllowAndCompareError(query, utils.CompareOptions{}) + }) } } diff --git a/go/test/endtoend/vtgate/queries/orderby/schema.sql b/go/test/endtoend/vtgate/queries/orderby/schema.sql index 8f0131db357..efaedc14754 100644 --- a/go/test/endtoend/vtgate/queries/orderby/schema.sql +++ b/go/test/endtoend/vtgate/queries/orderby/schema.sql @@ -27,3 +27,12 @@ create table t4_id2_idx ) Engine = InnoDB DEFAULT charset = utf8mb4 COLLATE = utf8mb4_general_ci; + +create table user +( + id bigint primary key, + col bigint, + email varchar(20) +) Engine = InnoDB + DEFAULT charset = utf8mb4 + COLLATE = utf8mb4_general_ci; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/orderby/vschema.json b/go/test/endtoend/vtgate/queries/orderby/vschema.json index 14418850a35..771676de4b9 100644 --- a/go/test/endtoend/vtgate/queries/orderby/vschema.json +++ b/go/test/endtoend/vtgate/queries/orderby/vschema.json @@ -66,6 +66,14 @@ "name": "unicode_loose_md5" } ] + }, + "user": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] } } } \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/random/query_gen.go b/go/test/endtoend/vtgate/queries/random/query_gen.go index 3f8fccb05bb..b078f1cab8b 100644 --- a/go/test/endtoend/vtgate/queries/random/query_gen.go +++ b/go/test/endtoend/vtgate/queries/random/query_gen.go @@ -18,7 +18,7 @@ package random import ( "fmt" - "math/rand" + "math/rand/v2" "slices" "vitess.io/vitess/go/slice" @@ -36,7 +36,6 @@ const testFailingQueries = false type ( // selectGenerator generates select statements selectGenerator struct { - r *rand.Rand genConfig sqlparser.ExprGeneratorConfig maxTables int maxAggrs int @@ -74,19 +73,18 @@ var _ sqlparser.ExprGenerator = (*column)(nil) var _ sqlparser.QueryGenerator = (*selectGenerator)(nil) var _ sqlparser.QueryGenerator = (*queryGenerator)(nil) -func newQueryGenerator(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *queryGenerator { +func newQueryGenerator(genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *queryGenerator { return &queryGenerator{ - selGen: newSelectGenerator(r, genConfig, maxTables, maxAggrs, maxGBs, schemaTables), + selGen: newSelectGenerator(genConfig, maxTables, maxAggrs, maxGBs, schemaTables), } } -func newSelectGenerator(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *selectGenerator { +func newSelectGenerator(genConfig sqlparser.ExprGeneratorConfig, maxTables, maxAggrs, maxGBs int, schemaTables []tableT) *selectGenerator { if maxTables <= 0 { log.Fatalf("maxTables must be at least 1, currently %d\n", maxTables) } return &selectGenerator{ - r: r, genConfig: genConfig, maxTables: maxTables, maxAggrs: maxAggrs, @@ -140,7 +138,7 @@ func (t *tableT) clone() *tableT { } } -func (c *column) Generate(_ *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { +func (c *column) Generate(genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { if c.typ == genConfig.Type || genConfig.Type == "" { return c.getASTExpr() } @@ -148,11 +146,11 @@ func (c *column) Generate(_ *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) return nil } -func (t *tableT) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { +func (t *tableT) Generate(genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { colsCopy := slices.Clone(t.cols) for len(colsCopy) > 0 { - idx := r.Intn(len(colsCopy)) + idx := rand.IntN(len(colsCopy)) randCol := colsCopy[idx] if randCol.typ == genConfig.Type || genConfig.Type == "" { return randCol.getASTExpr() @@ -168,26 +166,26 @@ func (t *tableT) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) // Generate generates a subquery based on sg // TODO: currently unused; generate random expressions with union -func (sg *selectGenerator) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { +func (sg *selectGenerator) Generate(genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { var schemaTablesCopy []tableT for _, tbl := range sg.schemaTables { schemaTablesCopy = append(schemaTablesCopy, *tbl.clone()) } - newSG := newQueryGenerator(r, genConfig, sg.maxTables, sg.maxAggrs, sg.maxGBs, schemaTablesCopy) + newSG := newQueryGenerator(genConfig, sg.maxTables, sg.maxAggrs, sg.maxGBs, schemaTablesCopy) newSG.randomQuery() return &sqlparser.Subquery{Select: newSG.selGen.sel} } // Generate generates a subquery based on qg -func (qg *queryGenerator) Generate(r *rand.Rand, genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { +func (qg *queryGenerator) Generate(genConfig sqlparser.ExprGeneratorConfig) sqlparser.Expr { var schemaTablesCopy []tableT for _, tbl := range qg.selGen.schemaTables { schemaTablesCopy = append(schemaTablesCopy, *tbl.clone()) } - newQG := newQueryGenerator(r, genConfig, qg.selGen.maxTables, qg.selGen.maxAggrs, qg.selGen.maxGBs, schemaTablesCopy) + newQG := newQueryGenerator(genConfig, qg.selGen.maxTables, qg.selGen.maxAggrs, qg.selGen.maxGBs, schemaTablesCopy) newQG.randomQuery() return &sqlparser.Subquery{Select: newQG.stmt} @@ -197,7 +195,7 @@ func (sg *selectGenerator) IsQueryGenerator() {} func (qg *queryGenerator) IsQueryGenerator() {} func (qg *queryGenerator) randomQuery() { - if qg.selGen.r.Intn(10) < 1 && testFailingQueries { + if rand.IntN(10) < 1 && testFailingQueries { qg.createUnion() } else { qg.selGen.randomSelect() @@ -209,12 +207,12 @@ func (qg *queryGenerator) randomQuery() { func (qg *queryGenerator) createUnion() { union := &sqlparser.Union{} - if qg.selGen.r.Intn(2) < 1 { + if rand.IntN(2) < 1 { union.Distinct = true } // specify between 1-4 columns - qg.selGen.genConfig.NumCols = qg.selGen.r.Intn(4) + 1 + qg.selGen.genConfig.NumCols = rand.IntN(4) + 1 qg.randomQuery() union.Left = qg.stmt @@ -232,7 +230,7 @@ func (sg *selectGenerator) randomSelect() { sg.sel.SetComments(sqlparser.Comments{"/*vt+ PLANNER=Gen4 */"}) // select distinct (fails with group by bigint) - isDistinct := sg.r.Intn(2) < 1 + isDistinct := rand.IntN(2) < 1 if isDistinct { sg.sel.MakeDistinct() } @@ -242,7 +240,7 @@ func (sg *selectGenerator) randomSelect() { // canAggregate determines if the query will have // aggregate columns, group by, and having - canAggregate := sg.r.Intn(4) < 3 + canAggregate := rand.IntN(4) < 3 var ( grouping, aggregates []column @@ -258,7 +256,7 @@ func (sg *selectGenerator) randomSelect() { } // having - isHaving := sg.r.Intn(2) < 1 + isHaving := rand.IntN(2) < 1 // TODO: having creates a lot of results mismatched if isHaving && testFailingQueries { sg.createHavingPredicates(grouping) @@ -280,7 +278,7 @@ func (sg *selectGenerator) randomSelect() { // add random expression to select // TODO: random expressions cause a lot of failures - isRandomExpr := sg.r.Intn(2) < 1 && testFailingQueries + isRandomExpr := rand.IntN(2) < 1 && testFailingQueries // TODO: selecting a random expression potentially with columns creates // TODO: only_full_group_by related errors in Vitess @@ -288,7 +286,7 @@ func (sg *selectGenerator) randomSelect() { if canAggregate && testFailingQueries { exprGenerators = slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) // add scalar subqueries - if sg.r.Intn(10) < 1 { + if rand.IntN(10) < 1 { exprGenerators = append(exprGenerators, sg) } } @@ -325,14 +323,14 @@ func (sg *selectGenerator) randomSelect() { // can add both aggregate and grouping columns to order by // TODO: order fails with distinct and outer joins - isOrdered := sg.r.Intn(2) < 1 && (!isDistinct || testFailingQueries) && (!isJoin || testFailingQueries) + isOrdered := rand.IntN(2) < 1 && (!isDistinct || testFailingQueries) && (!isJoin || testFailingQueries) if isOrdered || (!canAggregate && sg.genConfig.SingleRow) /* TODO: might be redundant */ { sg.createOrderBy() } // only add a limit if there is an ordering // TODO: limit fails a lot - isLimit := sg.r.Intn(2) < 1 && len(sg.sel.OrderBy) > 0 && testFailingQueries + isLimit := rand.IntN(2) < 1 && len(sg.sel.OrderBy) > 0 && testFailingQueries if isLimit || (!canAggregate && sg.genConfig.SingleRow) /* TODO: might be redundant */ { sg.createLimit() } @@ -345,7 +343,7 @@ func (sg *selectGenerator) randomSelect() { sg.schemaTables = append(sg.schemaTables, newTable) // derived tables (partially unsupported) - if sg.r.Intn(10) < 1 { + if rand.IntN(10) < 1 { sg.randomSelect() } } @@ -353,24 +351,24 @@ func (sg *selectGenerator) randomSelect() { func (sg *selectGenerator) createTablesAndJoin() ([]tableT, bool) { var tables []tableT // add at least one of original emp/dept tables - tables = append(tables, sg.schemaTables[sg.r.Intn(2)]) + tables = append(tables, sg.schemaTables[rand.IntN(2)]) tables[0].setAlias("tbl0") sg.sel.From = append(sg.sel.From, newAliasedTable(tables[0], "tbl0")) - numTables := sg.r.Intn(sg.maxTables) + numTables := rand.IntN(sg.maxTables) for i := 0; i < numTables; i++ { - tables = append(tables, randomEl(sg.r, sg.schemaTables)) + tables = append(tables, randomEl(sg.schemaTables)) alias := fmt.Sprintf("tbl%d", i+1) sg.sel.From = append(sg.sel.From, newAliasedTable(tables[i+1], alias)) tables[i+1].setAlias(alias) } // TODO: outer joins produce results mismatched - isJoin := sg.r.Intn(2) < 1 && testFailingQueries + isJoin := rand.IntN(2) < 1 && testFailingQueries if isJoin { // TODO: do nested joins - newTable := randomEl(sg.r, sg.schemaTables) + newTable := randomEl(sg.schemaTables) alias := fmt.Sprintf("tbl%d", numTables+1) newTable.setAlias(alias) tables = append(tables, newTable) @@ -392,7 +390,7 @@ func (sg *selectGenerator) createJoin(tables []tableT) { joinPredicate := sqlparser.AndExpressions(sg.createJoinPredicates(tables)...) joinCondition := sqlparser.NewJoinCondition(joinPredicate, nil) newTable := newAliasedTable(tables[n], fmt.Sprintf("tbl%d", n)) - sg.sel.From[n-1] = sqlparser.NewJoinTableExpr(sg.sel.From[n-1], getRandomJoinType(sg.r), newTable, joinCondition) + sg.sel.From[n-1] = sqlparser.NewJoinTableExpr(sg.sel.From[n-1], getRandomJoinType(), newTable, joinCondition) } // returns 1-3 random expressions based on the last two elements of tables @@ -405,7 +403,7 @@ func (sg *selectGenerator) createJoinPredicates(tables []tableT) sqlparser.Exprs exprGenerators := []sqlparser.ExprGenerator{&tables[len(tables)-2], &tables[len(tables)-1]} // add scalar subqueries // TODO: subqueries fail - if sg.r.Intn(10) < 1 && testFailingQueries { + if rand.IntN(10) < 1 && testFailingQueries { exprGenerators = append(exprGenerators, sg) } @@ -417,18 +415,18 @@ func (sg *selectGenerator) createGroupBy(tables []tableT) (grouping []column) { if sg.maxGBs <= 0 { return } - numGBs := sg.r.Intn(sg.maxGBs + 1) + numGBs := rand.IntN(sg.maxGBs + 1) for i := 0; i < numGBs; i++ { - tblIdx := sg.r.Intn(len(tables)) - col := randomEl(sg.r, tables[tblIdx].cols) + tblIdx := rand.IntN(len(tables)) + col := randomEl(tables[tblIdx].cols) // TODO: grouping by a date column sometimes errors if col.typ == "date" && !testFailingQueries { continue } - sg.sel.GroupBy = append(sg.sel.GroupBy, col.getASTExpr()) + sg.sel.AddGroupBy(col.getASTExpr()) // add to select - if sg.r.Intn(2) < 1 { + if rand.IntN(2) < 1 { sg.sel.SelectExprs = append(sg.sel.SelectExprs, newAliasedColumn(col, "")) grouping = append(grouping, col) } @@ -444,7 +442,7 @@ func (sg *selectGenerator) aliasGroupingColumns(grouping []column) []column { } for i := range grouping { - if sg.r.Intn(2) < 1 { + if rand.IntN(2) < 1 { if aliasedExpr, ok := sg.sel.SelectExprs[i].(*sqlparser.AliasedExpr); ok { alias := fmt.Sprintf("cgroup%d", i) aliasedExpr.SetAlias(alias) @@ -461,7 +459,7 @@ func (sg *selectGenerator) createAggregations(tables []tableT) (aggregates []col exprGenerators := slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) // add scalar subqueries // TODO: subqueries fail - if sg.r.Intn(10) < 1 && testFailingQueries { + if rand.IntN(10) < 1 && testFailingQueries { exprGenerators = append(exprGenerators, sg) } @@ -480,19 +478,21 @@ func (sg *selectGenerator) createAggregations(tables []tableT) (aggregates []col // orders on all grouping expressions and on random SelectExprs func (sg *selectGenerator) createOrderBy() { // always order on grouping expressions - for _, expr := range sg.sel.GroupBy { - sg.sel.OrderBy = append(sg.sel.OrderBy, sqlparser.NewOrder(expr, getRandomOrderDirection(sg.r))) + if sg.sel.GroupBy != nil { + for _, expr := range sg.sel.GroupBy.Exprs { + sg.sel.OrderBy = append(sg.sel.OrderBy, sqlparser.NewOrder(expr, getRandomOrderDirection())) + } } // randomly order on SelectExprs for _, selExpr := range sg.sel.SelectExprs { - if aliasedExpr, ok := selExpr.(*sqlparser.AliasedExpr); ok && sg.r.Intn(2) < 1 { + if aliasedExpr, ok := selExpr.(*sqlparser.AliasedExpr); ok && rand.IntN(2) < 1 { literal, ok := aliasedExpr.Expr.(*sqlparser.Literal) isIntLiteral := ok && literal.Type == sqlparser.IntVal if isIntLiteral { continue } - sg.sel.OrderBy = append(sg.sel.OrderBy, sqlparser.NewOrder(aliasedExpr.Expr, getRandomOrderDirection(sg.r))) + sg.sel.OrderBy = append(sg.sel.OrderBy, sqlparser.NewOrder(aliasedExpr.Expr, getRandomOrderDirection())) } } } @@ -502,7 +502,7 @@ func (sg *selectGenerator) createWherePredicates(tables []tableT) { exprGenerators := slice.Map(tables, func(t tableT) sqlparser.ExprGenerator { return &t }) // add scalar subqueries // TODO: subqueries fail - if sg.r.Intn(10) < 1 && testFailingQueries { + if rand.IntN(10) < 1 && testFailingQueries { exprGenerators = append(exprGenerators, sg) } @@ -515,7 +515,7 @@ func (sg *selectGenerator) createHavingPredicates(grouping []column) { exprGenerators := slice.Map(grouping, func(c column) sqlparser.ExprGenerator { return &c }) // add scalar subqueries // TODO: subqueries fail - if sg.r.Intn(10) < 1 && testFailingQueries { + if rand.IntN(10) < 1 && testFailingQueries { exprGenerators = append(exprGenerators, sg) } @@ -533,7 +533,7 @@ func (sg *selectGenerator) createRandomExprs(minExprs, maxExprs int, generators } else if maxExprs <= 0 { return } - numPredicates := sg.r.Intn(maxExprs-minExprs+1) + minExprs + numPredicates := rand.IntN(maxExprs-minExprs+1) + minExprs for i := 0; i < numPredicates; i++ { predicates = append(predicates, sg.getRandomExpr(generators...)) } @@ -545,9 +545,9 @@ func (sg *selectGenerator) createRandomExprs(minExprs, maxExprs int, generators func (sg *selectGenerator) getRandomExpr(generators ...sqlparser.ExprGenerator) sqlparser.Expr { var g *sqlparser.Generator if generators == nil { - g = sqlparser.NewGenerator(sg.r, 2) + g = sqlparser.NewGenerator(2) } else { - g = sqlparser.NewGenerator(sg.r, 2, generators...) + g = sqlparser.NewGenerator(2, generators...) } return g.Expression(sg.genConfig.SingleRowConfig().SetNumCols(1)) @@ -560,9 +560,9 @@ func (sg *selectGenerator) createLimit() { return } - limitNum := sg.r.Intn(10) - if sg.r.Intn(2) < 1 { - offset := sg.r.Intn(10) + limitNum := rand.IntN(10) + if rand.IntN(2) < 1 { + offset := rand.IntN(10) sg.sel.Limit = sqlparser.NewLimit(offset, limitNum) } else { sg.sel.Limit = sqlparser.NewLimitWithoutOffset(limitNum) @@ -572,7 +572,7 @@ func (sg *selectGenerator) createLimit() { // randomlyAlias randomly aliases expr with alias alias, adds it to sel.SelectExprs, and returns the column created func (sg *selectGenerator) randomlyAlias(expr sqlparser.Expr, alias string) column { var col column - if sg.r.Intn(2) < 1 { + if rand.IntN(2) < 1 { alias = "" col.name = sqlparser.String(expr) } else { @@ -588,7 +588,7 @@ func (sg *selectGenerator) matchNumCols(tables []tableT, newTable tableT, canAgg // remove SelectExprs and newTable.cols randomly until there are sg.genConfig.NumCols amount for len(sg.sel.SelectExprs) > sg.genConfig.NumCols && sg.genConfig.NumCols > 0 { // select a random index and remove it from SelectExprs and newTable - idx := sg.r.Intn(len(sg.sel.SelectExprs)) + idx := rand.IntN(len(sg.sel.SelectExprs)) sg.sel.SelectExprs[idx] = sg.sel.SelectExprs[len(sg.sel.SelectExprs)-1] sg.sel.SelectExprs = sg.sel.SelectExprs[:len(sg.sel.SelectExprs)-1] @@ -616,18 +616,18 @@ func (sg *selectGenerator) matchNumCols(tables []tableT, newTable tableT, canAgg return newTable } -func getRandomOrderDirection(r *rand.Rand) sqlparser.OrderDirection { +func getRandomOrderDirection() sqlparser.OrderDirection { // asc, desc - return randomEl(r, []sqlparser.OrderDirection{0, 1}) + return randomEl([]sqlparser.OrderDirection{0, 1}) } -func getRandomJoinType(r *rand.Rand) sqlparser.JoinType { +func getRandomJoinType() sqlparser.JoinType { // normal, straight, left, right, natural, natural left, natural right - return randomEl(r, []sqlparser.JoinType{0, 1, 2, 3, 4, 5, 6}) + return randomEl([]sqlparser.JoinType{0, 1, 2, 3, 4, 5, 6}) } -func randomEl[K any](r *rand.Rand, in []K) K { - return in[r.Intn(len(in))] +func randomEl[K any](in []K) K { + return in[rand.IntN(len(in))] } func newAliasedTable(tbl tableT, alias string) *sqlparser.AliasedTableExpr { diff --git a/go/test/endtoend/vtgate/queries/random/query_gen_test.go b/go/test/endtoend/vtgate/queries/random/query_gen_test.go deleted file mode 100644 index fe8aa6f6492..00000000000 --- a/go/test/endtoend/vtgate/queries/random/query_gen_test.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package random - -import ( - "fmt" - "math/rand" - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/sqlparser" -) - -// TestSeed makes sure that the seed is deterministic -func TestSeed(t *testing.T) { - // specify the schema (that is defined in schema.sql) - schemaTables := []tableT{ - {tableExpr: sqlparser.NewTableName("emp")}, - {tableExpr: sqlparser.NewTableName("dept")}, - } - schemaTables[0].addColumns([]column{ - {name: "empno", typ: "bigint"}, - {name: "ename", typ: "varchar"}, - {name: "job", typ: "varchar"}, - {name: "mgr", typ: "bigint"}, - {name: "hiredate", typ: "date"}, - {name: "sal", typ: "bigint"}, - {name: "comm", typ: "bigint"}, - {name: "deptno", typ: "bigint"}, - }...) - schemaTables[1].addColumns([]column{ - {name: "deptno", typ: "bigint"}, - {name: "dname", typ: "varchar"}, - {name: "loc", typ: "varchar"}, - }...) - - seed := int64(1689757943775102000) - genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CannotAggregate, "", 0, false) - qg := newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) - qg.randomQuery() - query1 := sqlparser.String(qg.stmt) - qg = newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) - qg.randomQuery() - query2 := sqlparser.String(qg.stmt) - fmt.Println(query1) - require.Equal(t, query1, query2) -} diff --git a/go/test/endtoend/vtgate/queries/random/random_expr_test.go b/go/test/endtoend/vtgate/queries/random/random_expr_test.go index 450169a8d9f..8de1896d0e3 100644 --- a/go/test/endtoend/vtgate/queries/random/random_expr_test.go +++ b/go/test/endtoend/vtgate/queries/random/random_expr_test.go @@ -17,9 +17,7 @@ limitations under the License. package random import ( - "math/rand" "testing" - "time" "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" @@ -49,11 +47,8 @@ func TestRandomExprWithTables(t *testing.T) { }...) for i := 0; i < 100; i++ { - - seed := time.Now().UnixNano() - r := rand.New(rand.NewSource(seed)) genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CanAggregate, "", 0, false) - g := sqlparser.NewGenerator(r, 3, slice.Map(schemaTables, func(t tableT) sqlparser.ExprGenerator { return &t })...) + g := sqlparser.NewGenerator(3, slice.Map(schemaTables, func(t tableT) sqlparser.ExprGenerator { return &t })...) g.Expression(genConfig) } } diff --git a/go/test/endtoend/vtgate/queries/random/random_test.go b/go/test/endtoend/vtgate/queries/random/random_test.go index aea43c2f929..2d210ee7f99 100644 --- a/go/test/endtoend/vtgate/queries/random/random_test.go +++ b/go/test/endtoend/vtgate/queries/random/random_test.go @@ -18,7 +18,6 @@ package random import ( "fmt" - "math/rand" "strings" "testing" "time" @@ -72,7 +71,7 @@ func helperTest(t *testing.T, query string) { mcmp, closer := start(t) defer closer() - result, err := mcmp.ExecAllowAndCompareError(query) + result, err := mcmp.ExecAllowAndCompareError(query, utils.CompareOptions{}) fmt.Println(result) fmt.Println(err) }) @@ -258,16 +257,14 @@ func TestRandom(t *testing.T) { var queryCount, queryFailCount int // continue testing after an error if and only if testFailingQueries is true for time.Now().Before(endBy) && (!t.Failed() || !testFailingQueries) { - seed := time.Now().UnixNano() genConfig := sqlparser.NewExprGeneratorConfig(sqlparser.CannotAggregate, "", 0, false) - qg := newQueryGenerator(rand.New(rand.NewSource(seed)), genConfig, 2, 2, 2, schemaTables) + qg := newQueryGenerator(genConfig, 2, 2, 2, schemaTables) qg.randomQuery() query := sqlparser.String(qg.stmt) - _, vtErr := mcmp.ExecAllowAndCompareError(query) + _, vtErr := mcmp.ExecAllowAndCompareError(query, utils.CompareOptions{}) // this assumes all queries are valid mysql queries if vtErr != nil { - fmt.Printf("seed: %d\n", seed) fmt.Println(query) fmt.Println(vtErr) diff --git a/go/test/endtoend/vtgate/queries/random/simplifier_test.go b/go/test/endtoend/vtgate/queries/random/simplifier_test.go index 478ee355d34..c93c0e679c1 100644 --- a/go/test/endtoend/vtgate/queries/random/simplifier_test.go +++ b/go/test/endtoend/vtgate/queries/random/simplifier_test.go @@ -22,6 +22,7 @@ import ( "testing" "vitess.io/vitess/go/test/vschemawrapper" + "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/require" @@ -36,22 +37,22 @@ func TestSimplifyResultsMismatchedQuery(t *testing.T) { t.Skip("Skip CI") var queries []string - queries = append(queries, "select /*vt+ PLANNER=Gen4 */ (68 - -16) / case false when -45 then 3 when 28 then -43 else -62 end as crandom0 from dept as tbl0, (select /*vt+ PLANNER=Gen4 */ distinct not not false and count(*) from emp as tbl0, emp as tbl1 where tbl1.ename) as tbl1 limit 1", - "select /*vt+ PLANNER=Gen4 */ distinct case true when 'burro' then 'trout' else 'elf' end < case count(distinct true) when 'bobcat' then 'turkey' else 'penguin' end from dept as tbl0, emp as tbl1 where 'spider'", - "select /*vt+ PLANNER=Gen4 */ distinct sum(distinct tbl1.deptno) from dept as tbl0, emp as tbl1 where tbl0.deptno and tbl1.comm in (12, tbl0.deptno, case false when 67 then -17 when -78 then -35 end, -76 >> -68)", - "select /*vt+ PLANNER=Gen4 */ count(*) + 1 from emp as tbl0 order by count(*) desc", - "select /*vt+ PLANNER=Gen4 */ count(2 >> tbl2.mgr), sum(distinct tbl2.empno <=> 15) from emp as tbl0 left join emp as tbl2 on -32", - "select /*vt+ PLANNER=Gen4 */ sum(case false when true then tbl1.deptno else -154 / 132 end) as caggr1 from emp as tbl0, dept as tbl1", - "select /*vt+ PLANNER=Gen4 */ tbl1.dname as cgroup0, tbl1.dname as cgroup1 from dept as tbl0, dept as tbl1 group by tbl1.dname, tbl1.deptno order by tbl1.deptno desc", - "select /*vt+ PLANNER=Gen4 */ tbl0.ename as cgroup1 from emp as tbl0 group by tbl0.job, tbl0.ename having sum(tbl0.mgr) = sum(tbl0.mgr) order by tbl0.job desc, tbl0.ename asc limit 8", - "select /*vt+ PLANNER=Gen4 */ distinct count(*) as caggr1 from dept as tbl0, emp as tbl1 group by tbl1.sal having max(tbl1.comm) != true", - "select /*vt+ PLANNER=Gen4 */ distinct sum(tbl1.loc) as caggr0 from dept as tbl0, dept as tbl1 group by tbl1.deptno having max(tbl1.dname) <= 1", - "select /*vt+ PLANNER=Gen4 */ min(tbl0.deptno) as caggr0 from dept as tbl0, emp as tbl1 where case when false then tbl0.dname end group by tbl1.comm", - "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 1 as crandom0 from dept as tbl0, emp as tbl1 where 1 = 0", - "select /*vt+ PLANNER=Gen4 */ count(*) as caggr0, 1 as crandom0 from dept as tbl0, emp as tbl1 where 'octopus'", - "select /*vt+ PLANNER=Gen4 */ distinct 'octopus' as crandom0 from dept as tbl0, emp as tbl1 where tbl0.deptno = tbl1.empno having count(*) = count(*)", - "select /*vt+ PLANNER=Gen4 */ max(tbl0.deptno) from dept as tbl0 right join emp as tbl1 on tbl0.deptno = tbl1.empno and tbl0.deptno = tbl1.deptno group by tbl0.deptno", - "select /*vt+ PLANNER=Gen4 */ count(tbl1.comm) from emp as tbl1 right join emp as tbl2 on tbl1.mgr = tbl2.sal") + queries = append(queries, "select (68 - -16) / case false when -45 then 3 when 28 then -43 else -62 end as crandom0 from dept as tbl0, (select distinct not not false and count(*) from emp as tbl0, emp as tbl1 where tbl1.ename) as tbl1 limit 1", + "select distinct case true when 'burro' then 'trout' else 'elf' end < case count(distinct true) when 'bobcat' then 'turkey' else 'penguin' end from dept as tbl0, emp as tbl1 where 'spider'", + "select distinct sum(distinct tbl1.deptno) from dept as tbl0, emp as tbl1 where tbl0.deptno and tbl1.comm in (12, tbl0.deptno, case false when 67 then -17 when -78 then -35 end, -76 >> -68)", + "select count(*) + 1 from emp as tbl0 order by count(*) desc", + "select count(2 >> tbl2.mgr), sum(distinct tbl2.empno <=> 15) from emp as tbl0 left join emp as tbl2 on -32", + "select sum(case false when true then tbl1.deptno else -154 / 132 end) as caggr1 from emp as tbl0, dept as tbl1", + "select tbl1.dname as cgroup0, tbl1.dname as cgroup1 from dept as tbl0, dept as tbl1 group by tbl1.dname, tbl1.deptno order by tbl1.deptno desc", + "select tbl0.ename as cgroup1 from emp as tbl0 group by tbl0.job, tbl0.ename having sum(tbl0.mgr) = sum(tbl0.mgr) order by tbl0.job desc, tbl0.ename asc limit 8", + "select distinct count(*) as caggr1 from dept as tbl0, emp as tbl1 group by tbl1.sal having max(tbl1.comm) != true", + "select distinct sum(tbl1.loc) as caggr0 from dept as tbl0, dept as tbl1 group by tbl1.deptno having max(tbl1.dname) <= 1", + "select min(tbl0.deptno) as caggr0 from dept as tbl0, emp as tbl1 where case when false then tbl0.dname end group by tbl1.comm", + "select count(*) as caggr0, 1 as crandom0 from dept as tbl0, emp as tbl1 where 1 = 0", + "select count(*) as caggr0, 1 as crandom0 from dept as tbl0, emp as tbl1 where 'octopus'", + "select distinct 'octopus' as crandom0 from dept as tbl0, emp as tbl1 where tbl0.deptno = tbl1.empno having count(*) = count(*)", + "select max(tbl0.deptno) from dept as tbl0 right join emp as tbl1 on tbl0.deptno = tbl1.empno and tbl0.deptno = tbl1.deptno group by tbl0.deptno", + "select count(tbl1.comm) from emp as tbl1 right join emp as tbl2 on tbl1.mgr = tbl2.sal") for _, query := range queries { var simplified string @@ -63,7 +64,7 @@ func TestSimplifyResultsMismatchedQuery(t *testing.T) { mcmp, closer := start(t) defer closer() - mcmp.ExecAllowAndCompareError(simplified) + mcmp.ExecAllowAndCompareError(simplified, utils.CompareOptions{}) }) fmt.Printf("final simplified query: %s\n", simplified) @@ -76,7 +77,7 @@ func simplifyResultsMismatchedQuery(t *testing.T, query string) string { mcmp, closer := start(t) defer closer() - _, err := mcmp.ExecAllowAndCompareError(query) + _, err := mcmp.ExecAllowAndCompareError(query, utils.CompareOptions{}) if err == nil { t.Fatalf("query (%s) does not error", query) } else if !strings.Contains(err.Error(), "mismatched") { @@ -88,13 +89,14 @@ func simplifyResultsMismatchedQuery(t *testing.T, query string) string { formal, err := vindexes.LoadFormal("svschema.json") require.NoError(t, err) - vSchema := vindexes.BuildVSchema(formal) + vSchema := vindexes.BuildVSchema(formal, sqlparser.NewTestParser()) vSchemaWrapper := &vschemawrapper.VSchemaWrapper{ V: vSchema, Version: planbuilder.Gen4, + Env: vtenv.NewTestEnv(), } - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) simplified := simplifier.SimplifyStatement( @@ -103,7 +105,7 @@ func simplifyResultsMismatchedQuery(t *testing.T, query string) string { vSchemaWrapper, func(statement sqlparser.SelectStatement) bool { q := sqlparser.String(statement) - _, newErr := mcmp.ExecAllowAndCompareError(q) + _, newErr := mcmp.ExecAllowAndCompareError(q, utils.CompareOptions{}) if newErr == nil { return false } else { diff --git a/go/test/endtoend/vtgate/queries/reference/reference_test.go b/go/test/endtoend/vtgate/queries/reference/reference_test.go index 75efc840880..0e3096e6064 100644 --- a/go/test/endtoend/vtgate/queries/reference/reference_test.go +++ b/go/test/endtoend/vtgate/queries/reference/reference_test.go @@ -83,6 +83,25 @@ func TestReferenceRouting(t *testing.T) { `[[INT64(0)]]`, ) + t.Run("Complex reference query", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + // Verify a complex query using reference tables with a left join having a derived table with an order by clause works as intended. + utils.AssertMatches( + t, + conn, + `SELECT t.id FROM ( + SELECT zd.id, zd.zip_id + FROM `+shardedKeyspaceName+`.zip_detail AS zd + WHERE zd.id IN (2) + ORDER BY zd.discontinued_at + LIMIT 1 + ) AS t + LEFT JOIN `+shardedKeyspaceName+`.zip_detail AS t0 ON t.zip_id = t0.zip_id + ORDER BY t.id`, + `[[INT64(2)]]`, + ) + }) + // UPDATE should route an unqualified zip_detail to unsharded keyspace. utils.Exec(t, conn, "UPDATE zip_detail SET discontinued_at = NULL WHERE id = 2") @@ -114,7 +133,7 @@ func TestReferenceRouting(t *testing.T) { utils.AssertMatches( t, conn, - `SELECT /*vt+ PLANNER=gen4 */ COUNT(zd.id) + `SELECT COUNT(zd.id) FROM delivery_failure df JOIN zip_detail zd ON zd.id = df.zip_detail_id WHERE zd.id = 3`, `[[INT64(0)]]`, diff --git a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go index 01cc7b2ee54..59dc42de060 100644 --- a/go/test/endtoend/vtgate/queries/subquery/subquery_test.go +++ b/go/test/endtoend/vtgate/queries/subquery/subquery_test.go @@ -19,12 +19,11 @@ package subquery import ( "testing" - "vitess.io/vitess/go/test/endtoend/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" ) func start(t *testing.T) (utils.MySQLCompare, func()) { @@ -58,6 +57,26 @@ func TestSubqueriesHasValues(t *testing.T) { mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1 WHERE id1 > 10) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`) } +func TestNotINQueries(t *testing.T) { + // Tests NOT IN where the RHS contains all rows, some rows and no rows + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1, id2) values (0,1),(1,2),(2,3),(3,4),(4,5),(5,6)") + // no matching rows + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1 WHERE id1 > 10) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`) + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id2 FROM t1 WHERE id2 > 10) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)] [INT64(6)]]`) + + // some matching rows + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1 WHERE id1 > 3) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]`) + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id2 FROM t1 WHERE id2 > 3) ORDER BY id2`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]`) + + // all rows matching + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id1 FROM t1) ORDER BY id2`, `[]`) + mcmp.AssertMatches(`SELECT id2 FROM t1 WHERE id1 NOT IN (SELECT id2 FROM t1) ORDER BY id2`, `[[INT64(1)]]`) + +} + // Test only supported in >= v16.0.0 func TestSubqueriesExists(t *testing.T) { utils.SkipIfBinaryIsBelowVersion(t, 16, "vtgate") @@ -97,7 +116,6 @@ func TestSubqueryInINClause(t *testing.T) { } func TestSubqueryInUpdate(t *testing.T) { - utils.SkipIfBinaryIsBelowVersion(t, 14, "vtgate") mcmp, closer := start(t) defer closer() @@ -106,13 +124,12 @@ func TestSubqueryInUpdate(t *testing.T) { utils.Exec(t, conn, `insert into t1(id1, id2) values (1, 10), (2, 20), (3, 30), (4, 40), (5, 50)`) utils.Exec(t, conn, `insert into t2(id3, id4) values (1, 3), (2, 4)`) utils.AssertMatches(t, conn, `SELECT id2, keyspace_id FROM t1_id2_idx WHERE id2 IN (2,10)`, `[[INT64(10) VARBINARY("\x16k@\xb4J\xbaK\xd6")]]`) - utils.Exec(t, conn, `update /*vt+ PLANNER=gen4 */ t1 set id2 = (select count(*) from t2) where id1 = 1`) + utils.Exec(t, conn, `update t1 set id2 = (select count(*) from t2) where id1 = 1`) utils.AssertMatches(t, conn, `SELECT id2 FROM t1 WHERE id1 = 1`, `[[INT64(2)]]`) utils.AssertMatches(t, conn, `SELECT id2, keyspace_id FROM t1_id2_idx WHERE id2 IN (2,10)`, `[[INT64(2) VARBINARY("\x16k@\xb4J\xbaK\xd6")]]`) } func TestSubqueryInReference(t *testing.T) { - utils.SkipIfBinaryIsBelowVersion(t, 14, "vtgate") mcmp, closer := start(t) defer closer() @@ -141,3 +158,33 @@ func TestSubqueryInReference(t *testing.T) { mcmp.AssertMatches(`select (select id1 from t1 where id2 = 30)`, `[[INT64(3)]]`) mcmp.AssertMatches(`select (select id1 from t1 where id2 = 9)`, `[[NULL]]`) } + +// TestSubqueryInAggregation validates that subquery work inside aggregation functions. +func TestSubqueryInAggregation(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1, id2) values(0,0),(1,1)") + mcmp.Exec("insert into t2(id3, id4) values(1,2),(5,7)") + mcmp.Exec(`SELECT max((select min(id2) from t1)) FROM t2`) + mcmp.Exec(`SELECT max((select group_concat(id1, id2) from t1 where id1 = 1)) FROM t1 where id1 = 1`) + mcmp.Exec(`SELECT max((select min(id2) from t1 where id2 = 1)) FROM dual`) + mcmp.Exec(`SELECT max((select min(id2) from t1)) FROM t2 where id4 = 7`) + + // This fails as the planner adds `weight_string` method which make the query fail on MySQL. + // mcmp.Exec(`SELECT max((select min(id2) from t1 where t1.id1 = t.id1)) FROM t1 t`) +} + +// TestSubqueryInDerivedTable tests that subqueries and derived tables +// are handled correctly when there are joins inside the derived table +func TestSubqueryInDerivedTable(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("INSERT INTO t1 (id1, id2) VALUES (1, 100), (2, 200), (3, 300), (4, 400), (5, 500);") + mcmp.Exec("INSERT INTO t2 (id3, id4) VALUES (10, 1), (20, 2), (30, 3), (40, 4), (50, 99)") + mcmp.Exec(`select t.a from (select t1.id2, t2.id3, (select id2 from t1 order by id2 limit 1) as a from t1 join t2 on t1.id1 = t2.id4) t`) + mcmp.Exec(`SELECT COUNT(*) FROM (SELECT DISTINCT t1.id1 FROM t1 JOIN t2 ON t1.id1 = t2.id4) dt`) +} diff --git a/go/test/endtoend/vtgate/queries/timeout/main_test.go b/go/test/endtoend/vtgate/queries/timeout/main_test.go index d71dc55ef46..c265e824e88 100644 --- a/go/test/endtoend/vtgate/queries/timeout/main_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/main_test.go @@ -63,8 +63,8 @@ func TestMain(m *testing.M) { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-config-max-result-size", "1000000", - "--queryserver-config-query-timeout", "200", - "--queryserver-config-query-pool-timeout", "200") + "--queryserver-config-query-timeout", "200s", + "--queryserver-config-query-pool-timeout", "200s") // Start Unsharded keyspace ukeyspace := &cluster.Keyspace{ Name: uks, diff --git a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go index 9c81a6c5822..a8202cd5593 100644 --- a/go/test/endtoend/vtgate/queries/timeout/timeout_test.go +++ b/go/test/endtoend/vtgate/queries/timeout/timeout_test.go @@ -19,7 +19,6 @@ package misc import ( "testing" - _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -98,3 +97,33 @@ func TestQueryTimeoutWithTables(t *testing.T) { assert.Contains(t, err.Error(), "context deadline exceeded") assert.Contains(t, err.Error(), "(errno 1317) (sqlstate 70100)") } + +// TestQueryTimeoutWithShardTargeting tests the query timeout with shard targeting. +func TestQueryTimeoutWithShardTargeting(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + + mcmp, closer := start(t) + defer closer() + + // shard targeting to -80 shard. + utils.Exec(t, mcmp.VtConn, "use `ks_misc/-80`") + + // insert some data + utils.Exec(t, mcmp.VtConn, "insert into t1(id1, id2) values (1,2),(3,4),(4,5),(5,6)") + + // insert + _, err := utils.ExecAllowError(t, mcmp.VtConn, "insert /*vt+ QUERY_TIMEOUT_MS=1 */ into t1(id1, id2) values (6,sleep(5))") + assert.ErrorContains(t, err, "context deadline exceeded (errno 1317) (sqlstate 70100)") + + // update + _, err = utils.ExecAllowError(t, mcmp.VtConn, "update /*vt+ QUERY_TIMEOUT_MS=1 */ t1 set id2 = sleep(5)") + assert.ErrorContains(t, err, "context deadline exceeded (errno 1317) (sqlstate 70100)") + + // delete + _, err = utils.ExecAllowError(t, mcmp.VtConn, "delete /*vt+ QUERY_TIMEOUT_MS=1 */ from t1 where id2 = sleep(5)") + assert.ErrorContains(t, err, "context deadline exceeded (errno 1317) (sqlstate 70100)") + + // select + _, err = utils.ExecAllowError(t, mcmp.VtConn, "select /*vt+ QUERY_TIMEOUT_MS=1 */ 1 from t1 where id2 = 5 and sleep(100)") + assert.ErrorContains(t, err, "context deadline exceeded (errno 1317) (sqlstate 70100)") +} diff --git a/go/test/endtoend/vtgate/queries/tpch/main_test.go b/go/test/endtoend/vtgate/queries/tpch/main_test.go new file mode 100644 index 00000000000..103adb336ab --- /dev/null +++ b/go/test/endtoend/vtgate/queries/tpch/main_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package union + +import ( + _ "embed" + "flag" + "fmt" + "os" + "testing" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + mysqlParams mysql.ConnParams + keyspaceName = "ks" + cell = "zone-1" + + //go:embed schema.sql + schemaSQL string + + //go:embed vschema.json + vschema string +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: schemaSQL, + VSchema: vschema, + } + err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 0, false) + if err != nil { + return 1 + } + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = clusterInstance.GetVTParams(keyspaceName) + + // create mysql instance and connection parameters + conn, closer, err := utils.NewMySQL(clusterInstance, keyspaceName, schemaSQL) + if err != nil { + fmt.Println(err) + return 1 + } + defer closer() + mysqlParams = conn + return m.Run() + }() + os.Exit(exitCode) +} diff --git a/go/test/endtoend/vtgate/queries/tpch/schema.sql b/go/test/endtoend/vtgate/queries/tpch/schema.sql new file mode 100644 index 00000000000..44af337938f --- /dev/null +++ b/go/test/endtoend/vtgate/queries/tpch/schema.sql @@ -0,0 +1,291 @@ +CREATE TABLE IF NOT EXISTS nation +( + N_NATIONKEY + INTEGER + NOT + NULL, + N_NAME + CHAR +( + 25 +) NOT NULL, + N_REGIONKEY INTEGER NOT NULL, + N_COMMENT VARCHAR +( + 152 +), + PRIMARY KEY +( + N_NATIONKEY +)); + +CREATE TABLE IF NOT EXISTS region +( + R_REGIONKEY + INTEGER + NOT + NULL, + R_NAME + CHAR +( + 25 +) NOT NULL, + R_COMMENT VARCHAR +( + 152 +), + PRIMARY KEY +( + R_REGIONKEY +)); + +CREATE TABLE IF NOT EXISTS part +( + P_PARTKEY + INTEGER + NOT + NULL, + P_NAME + VARCHAR +( + 55 +) NOT NULL, + P_MFGR CHAR +( + 25 +) NOT NULL, + P_BRAND CHAR +( + 10 +) NOT NULL, + P_TYPE VARCHAR +( + 25 +) NOT NULL, + P_SIZE INTEGER NOT NULL, + P_CONTAINER CHAR +( + 10 +) NOT NULL, + P_RETAILPRICE DECIMAL +( + 15, + 2 +) NOT NULL, + P_COMMENT VARCHAR +( + 23 +) NOT NULL, + PRIMARY KEY +( + P_PARTKEY +)); + +CREATE TABLE IF NOT EXISTS supplier +( + S_SUPPKEY + INTEGER + NOT + NULL, + S_NAME + CHAR +( + 25 +) NOT NULL, + S_ADDRESS VARCHAR +( + 40 +) NOT NULL, + S_NATIONKEY INTEGER NOT NULL, + S_PHONE CHAR +( + 15 +) NOT NULL, + S_ACCTBAL DECIMAL +( + 15, + 2 +) NOT NULL, + S_COMMENT VARCHAR +( + 101 +) NOT NULL, + PRIMARY KEY +( + S_SUPPKEY +)); + +CREATE TABLE IF NOT EXISTS partsupp +( + PS_PARTKEY + INTEGER + NOT + NULL, + PS_SUPPKEY + INTEGER + NOT + NULL, + PS_AVAILQTY + INTEGER + NOT + NULL, + PS_SUPPLYCOST + DECIMAL +( + 15, + 2 +) NOT NULL, + PS_COMMENT VARCHAR +( + 199 +) NOT NULL, + PRIMARY KEY +( + PS_PARTKEY, + PS_SUPPKEY +)); + +CREATE TABLE IF NOT EXISTS customer +( + C_CUSTKEY + INTEGER + NOT + NULL, + C_NAME + VARCHAR +( + 25 +) NOT NULL, + C_ADDRESS VARCHAR +( + 40 +) NOT NULL, + C_NATIONKEY INTEGER NOT NULL, + C_PHONE CHAR +( + 15 +) NOT NULL, + C_ACCTBAL DECIMAL +( + 15, + 2 +) NOT NULL, + C_MKTSEGMENT CHAR +( + 10 +) NOT NULL, + C_COMMENT VARCHAR +( + 117 +) NOT NULL, + PRIMARY KEY +( + C_CUSTKEY +)); + +CREATE TABLE IF NOT EXISTS orders +( + O_ORDERKEY + INTEGER + NOT + NULL, + O_CUSTKEY + INTEGER + NOT + NULL, + O_ORDERSTATUS + CHAR +( + 1 +) NOT NULL, + O_TOTALPRICE DECIMAL +( + 15, + 2 +) NOT NULL, + O_ORDERDATE DATE NOT NULL, + O_ORDERPRIORITY CHAR +( + 15 +) NOT NULL, + O_CLERK CHAR +( + 15 +) NOT NULL, + O_SHIPPRIORITY INTEGER NOT NULL, + O_COMMENT VARCHAR +( + 79 +) NOT NULL, + PRIMARY KEY +( + O_ORDERKEY +)); + +CREATE TABLE IF NOT EXISTS lineitem +( + L_ORDERKEY + INTEGER + NOT + NULL, + L_PARTKEY + INTEGER + NOT + NULL, + L_SUPPKEY + INTEGER + NOT + NULL, + L_LINENUMBER + INTEGER + NOT + NULL, + L_QUANTITY + DECIMAL +( + 15, + 2 +) NOT NULL, + L_EXTENDEDPRICE DECIMAL +( + 15, + 2 +) NOT NULL, + L_DISCOUNT DECIMAL +( + 15, + 2 +) NOT NULL, + L_TAX DECIMAL +( + 15, + 2 +) NOT NULL, + L_RETURNFLAG CHAR +( + 1 +) NOT NULL, + L_LINESTATUS CHAR +( + 1 +) NOT NULL, + L_SHIPDATE DATE NOT NULL, + L_COMMITDATE DATE NOT NULL, + L_RECEIPTDATE DATE NOT NULL, + L_SHIPINSTRUCT CHAR +( + 25 +) NOT NULL, + L_SHIPMODE CHAR +( + 10 +) NOT NULL, + L_COMMENT VARCHAR +( + 44 +) NOT NULL, + PRIMARY KEY +( + L_ORDERKEY, + L_LINENUMBER +)); diff --git a/go/test/endtoend/vtgate/queries/tpch/tpch_test.go b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go new file mode 100644 index 00000000000..bd35fe3f67c --- /dev/null +++ b/go/test/endtoend/vtgate/queries/tpch/tpch_test.go @@ -0,0 +1,237 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package union + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/utils" +) + +func start(t *testing.T) (utils.MySQLCompare, func()) { + mcmp, err := utils.NewMySQLCompare(t, vtParams, mysqlParams) + require.NoError(t, err) + + deleteAll := func() { + _, _ = utils.ExecAllowError(t, mcmp.VtConn, "set workload = oltp") + + tables := []string{"nation", "region", "part", "supplier", "partsupp", "customer", "orders", "lineitem"} + for _, table := range tables { + _, _ = mcmp.ExecAndIgnore("delete from " + table) + } + } + + deleteAll() + + return mcmp, func() { + deleteAll() + mcmp.Close() + cluster.PanicHandler(t) + } +} + +func TestTPCHQueries(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 20, "vtgate") + mcmp, closer := start(t) + defer closer() + err := utils.WaitForColumn(t, clusterInstance.VtgateProcess, keyspaceName, "region", `R_COMMENT`) + require.NoError(t, err) + + insertQueries := []string{ + `INSERT INTO region (R_REGIONKEY, R_NAME, R_COMMENT) VALUES + (1, 'ASIA', 'Eastern Asia'), + (2, 'MIDDLE EAST', 'Rich cultural heritage');`, + `INSERT INTO nation (N_NATIONKEY, N_NAME, N_REGIONKEY, N_COMMENT) VALUES + (1, 'China', 1, 'Large population'), + (2, 'India', 1, 'Large variety of cultures'), + (3, 'Nation A', 2, 'Historic sites'), + (4, 'Nation B', 2, 'Beautiful landscapes');`, + `INSERT INTO supplier (S_SUPPKEY, S_NAME, S_ADDRESS, S_NATIONKEY, S_PHONE, S_ACCTBAL, S_COMMENT) VALUES + (1, 'Supplier A', '123 Square', 1, '86-123-4567', 5000.00, 'High quality steel'), + (2, 'Supplier B', '456 Ganges St', 2, '91-789-4561', 5500.00, 'Efficient production'), + (3, 'Supplier 1', 'Supplier Address 1', 3, '91-789-4562', 3000.00, 'Supplier Comment 1'), + (4, 'Supplier 2', 'Supplier Address 2', 2, '91-789-4563', 4000.00, 'Supplier Comment 2');`, + `INSERT INTO part (P_PARTKEY, P_NAME, P_MFGR, P_BRAND, P_TYPE, P_SIZE, P_CONTAINER, P_RETAILPRICE, P_COMMENT) VALUES + (100, 'Part 100', 'MFGR A', 'Brand X', 'BOLT STEEL', 30, 'SM BOX', 45.00, 'High strength'), + (101, 'Part 101', 'MFGR B', 'Brand Y', 'NUT STEEL', 30, 'LG BOX', 30.00, 'Rust resistant');`, + `INSERT INTO partsupp (PS_PARTKEY, PS_SUPPKEY, PS_AVAILQTY, PS_SUPPLYCOST, PS_COMMENT) VALUES + (100, 1, 500, 10.00, 'Deliveries on time'), + (101, 2, 300, 9.00, 'Back orders possible'), + (100, 2, 600, 8.50, 'Bulk discounts available');`, + `INSERT INTO customer (C_CUSTKEY, C_NAME, C_ADDRESS, C_NATIONKEY, C_PHONE, C_ACCTBAL, C_MKTSEGMENT, C_COMMENT) VALUES + (1, 'Customer A', '1234 Drive Lane', 1, '123-456-7890', 1000.00, 'AUTOMOBILE', 'Frequent orders'), + (2, 'Customer B', '5678 Park Ave', 2, '234-567-8901', 2000.00, 'AUTOMOBILE', 'Large orders'), + (3, 'Customer 1', 'Address 1', 1, 'Phone 1', 1000.00, 'Segment 1', 'Comment 1'), + (4, 'Customer 2', 'Address 2', 2, 'Phone 2', 2000.00, 'Segment 2', 'Comment 2');`, + `INSERT INTO orders (O_ORDERKEY, O_CUSTKEY, O_ORDERSTATUS, O_TOTALPRICE, O_ORDERDATE, O_ORDERPRIORITY, O_CLERK, O_SHIPPRIORITY, O_COMMENT) VALUES + (100, 1, 'O', 15000.00, '1995-03-10', '1-URGENT', 'Clerk#0001', 1, 'N/A'), + (101, 2, 'O', 25000.00, '1995-03-05', '2-HIGH', 'Clerk#0002', 2, 'N/A'), + (1, 3, 'O', 10000.00, '1994-01-10', 'Priority 1', 'Clerk 1', 1, 'Order Comment 1'), + (2, 4, 'O', 20000.00, '1994-06-15', 'Priority 2', 'Clerk 2', 1, 'Order Comment 2');`, + `INSERT INTO lineitem (L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER, L_QUANTITY, L_EXTENDEDPRICE, L_DISCOUNT, L_TAX, L_RETURNFLAG, L_LINESTATUS, L_SHIPDATE, L_COMMITDATE, L_RECEIPTDATE, L_SHIPINSTRUCT, L_SHIPMODE, L_COMMENT) VALUES + (100, 200, 300, 1, 10, 5000.00, 0.05, 0.10, 'N', 'O', '1995-03-15', '1995-03-14', '1995-03-16', 'DELIVER IN PERSON', 'TRUCK', 'Urgent delivery'), + (100, 201, 301, 2, 20, 10000.00, 0.10, 0.10, 'R', 'F', '1995-03-17', '1995-03-15', '1995-03-18', 'NONE', 'MAIL', 'Handle with care'), + (101, 202, 302, 1, 30, 15000.00, 0.00, 0.10, 'A', 'F', '1995-03-20', '1995-03-18', '1995-03-21', 'TAKE BACK RETURN', 'SHIP', 'Standard delivery'), + (101, 203, 303, 2, 40, 10000.00, 0.20, 0.10, 'N', 'O', '1995-03-22', '1995-03-20', '1995-03-23', 'DELIVER IN PERSON', 'RAIL', 'Expedite'), + (1, 101, 1, 1, 5, 5000.00, 0.1, 0.05, 'N', 'O', '1994-01-12', '1994-01-11', '1994-01-13', 'Deliver in person','TRUCK', 'Lineitem Comment 1'), + (2, 102, 2, 1, 3, 15000.00, 0.2, 0.05, 'R', 'F', '1994-06-17', '1994-06-15', '1994-06-18', 'Leave at front door','AIR', 'Lineitem Comment 2'), + (11, 100, 2, 1, 30, 10000.00, 0.05, 0.07, 'A', 'F', '1998-07-21', '1998-07-22', '1998-07-23', 'DELIVER IN PERSON', 'TRUCK', 'N/A'), + (12, 101, 3, 1, 50, 15000.00, 0.10, 0.08, 'N', 'O', '1998-08-10', '1998-08-11', '1998-08-12', 'NONE', 'AIR', 'N/A'), + (13, 102, 4, 1, 70, 21000.00, 0.02, 0.04, 'R', 'F', '1998-06-30', '1998-07-01', '1998-07-02', 'TAKE BACK RETURN', 'MAIL', 'N/A'), + (14, 103, 5, 1, 90, 30000.00, 0.15, 0.10, 'A', 'O', '1998-05-15', '1998-05-16', '1998-05-17', 'DELIVER IN PERSON', 'RAIL', 'N/A'), + (15, 104, 2, 1, 45, 45000.00, 0.20, 0.15, 'N', 'F', '1998-07-15', '1998-07-16', '1998-07-17', 'NONE', 'SHIP', 'N/A');`, + } + + for _, query := range insertQueries { + mcmp.Exec(query) + } + + testcases := []struct { + name string + query string + }{ + { + name: "Q1", + query: `select + l_returnflag, + l_linestatus, + sum(l_quantity) as sum_qty, + sum(l_extendedprice) as sum_base_price, + sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, + sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, + avg(l_quantity) as avg_qty, + avg(l_extendedprice) as avg_price, + avg(l_discount) as avg_disc, + count(*) as count_order +from + lineitem +where + l_shipdate <= date_sub('1998-12-01', interval 108 day) +group by + l_returnflag, + l_linestatus +order by + l_returnflag, + l_linestatus;`, + }, + { + name: "Q11", + query: `select + ps_partkey, + sum(ps_supplycost * ps_availqty) as value +from + partsupp, + supplier, + nation +where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'MOZAMBIQUE' +group by + ps_partkey having + sum(ps_supplycost * ps_availqty) > ( + select + sum(ps_supplycost * ps_availqty) * 0.0001000000 + from + partsupp, + supplier, + nation + where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'MOZAMBIQUE' + ) +order by + value desc;`, + }, + { + name: "Q14 without decimal literal", + query: `select sum(case + when p_type like 'PROMO%' + then l_extendedprice * (1 - l_discount) + else 0 + end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue +from lineitem, + part +where l_partkey = p_partkey + and l_shipdate >= '1996-12-01' + and l_shipdate < date_add('1996-12-01', interval '1' month);`, + }, + { + name: "Q14 without case", + query: `select 100.00 * sum(l_extendedprice * (1 - l_discount)) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue +from lineitem, + part +where l_partkey = p_partkey + and l_shipdate >= '1996-12-01' + and l_shipdate < date_add('1996-12-01', interval '1' month);`, + }, + { + name: "Q14", + query: `select 100.00 * sum(case + when p_type like 'PROMO%' + then l_extendedprice * (1 - l_discount) + else 0 + end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue +from lineitem, + part +where l_partkey = p_partkey + and l_shipdate >= '1996-12-01' + and l_shipdate < date_add('1996-12-01', interval '1' month);`, + }, + { + name: "Q8", + query: `select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share +from (select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation + from part, + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2, + region + where p_partkey = l_partkey + and s_suppkey = l_suppkey + and l_orderkey = o_orderkey + and o_custkey = c_custkey + and c_nationkey = n1.n_nationkey + and n1.n_regionkey = r_regionkey + and r_name = 'AMERICA' + and s_nationkey = n2.n_nationkey + and o_orderdate between date '1995-01-01' and date ('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations +group by o_year +order by o_year`, + }, + { + name: "simple derived table", + query: `select * +from (select l.l_extendedprice * o.o_totalprice + from lineitem l + join orders o) as dt`, + }, + } + + for _, testcase := range testcases { + mcmp.Run(testcase.name, func(mcmp *utils.MySQLCompare) { + mcmp.Exec(testcase.query) + }) + } +} diff --git a/go/test/endtoend/vtgate/queries/tpch/vschema.json b/go/test/endtoend/vtgate/queries/tpch/vschema.json new file mode 100644 index 00000000000..8cdf236e4e1 --- /dev/null +++ b/go/test/endtoend/vtgate/queries/tpch/vschema.json @@ -0,0 +1,121 @@ +{ + "sharded": true, + "foreignKeyMode": "unspecified", + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "basic": { + "name": "basic", + "column_vindexes": [ + { + "columns": [ + "a" + ], + "type": "hash", + "name": "hash" + } + ] + }, + "customer": { + "name": "customer", + "column_vindexes": [ + { + "columns": [ + "C_CUSTKEY" + ], + "type": "hash", + "name": "hash" + } + ] + }, + "lineitem": { + "name": "lineitem", + "column_vindexes": [ + { + "columns": [ + "L_ORDERKEY", + "L_LINENUMBER" + ], + "type": "hash", + "name": "hash" + } + ] + }, + "nation": { + "name": "nation", + "column_vindexes": [ + { + "columns": [ + "N_NATIONKEY" + ], + "type": "hash", + "name": "hash" + } + ] + }, + "orders": { + "name": "orders", + "column_vindexes": [ + { + "columns": [ + "O_ORDERKEY" + ], + "type": "hash", + "name": "hash" + } + ] + }, + "part": { + "name": "part", + "column_vindexes": [ + { + "columns": [ + "P_PARTKEY" + ], + "type": "hash", + "name": "hash" + } + ] + }, + "partsupp": { + "name": "partsupp", + "column_vindexes": [ + { + "columns": [ + "PS_PARTKEY", + "PS_SUPPKEY" + ], + "type": "hash", + "name": "hash" + } + ] + }, + "region": { + "name": "region", + "column_vindexes": [ + { + "columns": [ + "R_REGIONKEY" + ], + "type": "hash", + "name": "hash" + } + ] + }, + "supplier": { + "name": "supplier", + "column_vindexes": [ + { + "columns": [ + "S_SUPPKEY" + ], + "type": "hash", + "name": "hash" + } + ] + } + } +} \ No newline at end of file diff --git a/go/test/endtoend/vtgate/queries/union/union_test.go b/go/test/endtoend/vtgate/queries/union/union_test.go index d382d039f02..03f98950f44 100644 --- a/go/test/endtoend/vtgate/queries/union/union_test.go +++ b/go/test/endtoend/vtgate/queries/union/union_test.go @@ -20,7 +20,6 @@ import ( "testing" "vitess.io/vitess/go/test/endtoend/cluster" - "vitess.io/vitess/go/test/endtoend/utils" "github.com/stretchr/testify/assert" @@ -57,7 +56,7 @@ func TestUnionDistinct(t *testing.T) { mcmp.Exec("insert into t2(id3, id4) values (2, 3), (3, 4), (4,4), (5,5)") for _, workload := range []string{"oltp", "olap"} { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, "set workload = "+workload) mcmp.AssertMatches("select 1 union select null", "[[INT64(1)] [NULL]]") mcmp.AssertMatches("select null union select null", "[[NULL]]") @@ -69,10 +68,18 @@ func TestUnionDistinct(t *testing.T) { mcmp.AssertMatchesNoOrder("select id1 from t1 where id1 = 1 union select 452 union select id1 from t1 where id1 = 4", "[[INT64(1)] [INT64(452)] [INT64(4)]]") mcmp.AssertMatchesNoOrder("select id1, id2 from t1 union select 827, 452 union select id3,id4 from t2", "[[INT64(4) INT64(4)] [INT64(1) INT64(1)] [INT64(2) INT64(2)] [INT64(3) INT64(3)] [INT64(827) INT64(452)] [INT64(2) INT64(3)] [INT64(3) INT64(4)] [INT64(5) INT64(5)]]") - t.Run("skipped for now", func(t *testing.T) { - t.Skip() - mcmp.AssertMatches("select 1 from dual where 1 IN (select 1 as col union select 2)", "[[INT64(1)]]") - }) + mcmp.AssertMatches("select 1 from dual where 1 IN (select 1 as col union select 2)", "[[INT64(1)]]") + if utils.BinaryIsAtLeastAtVersion(19, "vtgate") { + mcmp.AssertMatches(`SELECT 1 from t1 UNION SELECT 2 from t1`, `[[INT64(1)] [INT64(2)]]`) + mcmp.AssertMatches(`SELECT 5 from t1 UNION SELECT 6 from t1`, `[[INT64(5)] [INT64(6)]]`) + mcmp.AssertMatchesNoOrder(`SELECT id1 from t1 UNION SELECT id2 from t1`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]`) + mcmp.AssertMatchesNoOrder(`SELECT 1 from t1 UNION SELECT id2 from t1`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]`) + mcmp.AssertMatchesNoOrder(`SELECT 5 from t1 UNION SELECT id2 from t1`, `[[INT64(5)] [INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]`) + mcmp.AssertMatchesNoOrder(`SELECT id1 from t1 UNION SELECT 2 from t1`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)]]`) + mcmp.AssertMatchesNoOrder(`SELECT id1 from t1 UNION SELECT 5 from t1`, `[[INT64(1)] [INT64(2)] [INT64(3)] [INT64(4)] [INT64(5)]]`) + mcmp.Exec(`select curdate() from t1 union select 3 from t1`) + mcmp.Exec(`select curdate() from t1 union select id1 from t1`) + } }) } @@ -86,7 +93,7 @@ func TestUnionAll(t *testing.T) { mcmp.Exec("insert into t2(id3, id4) values(3, 3), (4, 4)") for _, workload := range []string{"oltp", "olap"} { - t.Run(workload, func(t *testing.T) { + mcmp.Run(workload, func(mcmp *utils.MySQLCompare) { utils.Exec(t, mcmp.VtConn, "set workload = "+workload) // union all between two selectuniqueequal mcmp.AssertMatches("select id1 from t1 where id1 = 1 union all select id1 from t1 where id1 = 4", "[[INT64(1)]]") @@ -103,9 +110,15 @@ func TestUnionAll(t *testing.T) { mcmp.AssertMatchesNoOrder("select tbl2.id1 FROM ((select id1 from t1 order by id1 limit 5) union all (select id1 from t1 order by id1 desc limit 5)) as tbl1 INNER JOIN t1 as tbl2 ON tbl1.id1 = tbl2.id1", "[[INT64(1)] [INT64(2)] [INT64(2)] [INT64(1)]]") - // union all between two select unique in tables - mcmp.AssertMatchesNoOrder("select id1 from t1 where id1 in (1, 2, 3, 4, 5, 6, 7, 8) union all select id1 from t1 where id1 in (1, 2, 3, 4, 5, 6, 7, 8)", - "[[INT64(1)] [INT64(2)] [INT64(1)] [INT64(2)]]") + // this test is quite good at uncovering races in the Concatenate engine primitive. make it run many times + // see: https://github.com/vitessio/vitess/issues/15434 + if utils.BinaryIsAtLeastAtVersion(20, "vtgate") { + for i := 0; i < 100; i++ { + // union all between two select unique in tables + mcmp.AssertMatchesNoOrder("select id1 from t1 where id1 in (1, 2, 3, 4, 5, 6, 7, 8) union all select id1 from t1 where id1 in (1, 2, 3, 4, 5, 6, 7, 8)", + "[[INT64(1)] [INT64(2)] [INT64(1)] [INT64(2)]]") + } + } // 4 tables union all mcmp.AssertMatchesNoOrder("select id1, id2 from t1 where id1 = 1 union all select id3,id4 from t2 where id3 = 3 union all select id1, id2 from t1 where id1 = 2 union all select id3,id4 from t2 where id3 = 4", diff --git a/go/test/endtoend/vtgate/readafterwrite/raw_test.go b/go/test/endtoend/vtgate/readafterwrite/raw_test.go index 56f9b3a44cb..0549a9b06b0 100644 --- a/go/test/endtoend/vtgate/readafterwrite/raw_test.go +++ b/go/test/endtoend/vtgate/readafterwrite/raw_test.go @@ -119,7 +119,7 @@ func TestMain(m *testing.M) { VSchema: vSchema, } clusterInstance.VtTabletExtraArgs = []string{ - "--queryserver-config-transaction-timeout", "5", + "--queryserver-config-transaction-timeout", "5s", } if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { return 1 diff --git a/go/test/endtoend/vtgate/reservedconn/main_test.go b/go/test/endtoend/vtgate/reservedconn/main_test.go index cc76e7a3b46..528182a82e2 100644 --- a/go/test/endtoend/vtgate/reservedconn/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/main_test.go @@ -133,7 +133,7 @@ func runAllTests(m *testing.M) int { SchemaSQL: sqlSchema, VSchema: vSchema, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "5"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "5s"} if enableSettingsPool { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-settings-pool") } diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go index 11325a0f2f8..491ce6bc6ab 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect1/main_test.go @@ -133,7 +133,7 @@ func TestServingChange(t *testing.T) { // changing rdonly tablet to spare (non serving). rdonlyTablet := clusterInstance.Keyspaces[0].Shards[0].Rdonly() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") require.NoError(t, err) rdonlyTablet.Type = "replica" @@ -143,12 +143,12 @@ func TestServingChange(t *testing.T) { // changing replica tablet to rdonly to make rdonly available for serving. replicaTablet := clusterInstance.Keyspaces[0].Shards[0].Replica() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") require.NoError(t, err) replicaTablet.Type = "rdonly" // to see/make the new rdonly available - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", replicaTablet.Alias) require.NoError(t, err) // this should pass now as there is rdonly present @@ -174,7 +174,7 @@ func TestServingChangeStreaming(t *testing.T) { // changing rdonly tablet to spare (non serving). rdonlyTablet := clusterInstance.Keyspaces[0].Shards[0].Rdonly() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", rdonlyTablet.Alias, "replica") require.NoError(t, err) rdonlyTablet.Type = "replica" @@ -192,12 +192,12 @@ func TestServingChangeStreaming(t *testing.T) { // changing replica tablet to rdonly to make rdonly available for serving. replicaTablet := clusterInstance.Keyspaces[0].Shards[0].Replica() - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ChangeTabletType", replicaTablet.Alias, "rdonly") require.NoError(t, err) replicaTablet.Type = "rdonly" // to see/make the new rdonly available - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Ping", replicaTablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PingTablet", replicaTablet.Alias) require.NoError(t, err) // this should pass now as there is rdonly present diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go index f97d96ef89a..a448574c282 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect2/main_test.go @@ -96,7 +96,7 @@ func runAllTests(m *testing.M) int { SchemaSQL: sqlSchema, VSchema: vSchema, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "5"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "5s"} if enableSettingsPool { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-settings-pool") } @@ -129,7 +129,7 @@ func TestTabletChange(t *testing.T) { utils.Exec(t, conn, "select * from test") // Change Primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) require.NoError(t, err) // this should pass as there is a new primary tablet and is serving. @@ -150,7 +150,7 @@ func TestTabletChangeStreaming(t *testing.T) { utils.Exec(t, conn, "select * from test") // Change Primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("PlannedReparentShard", fmt.Sprintf("%s/%s", keyspaceName, "-80")) require.NoError(t, err) // this should pass as there is a new primary tablet and is serving. diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go index 25af85acc00..677c24666b2 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect3/main_test.go @@ -102,7 +102,7 @@ func TestMysqlDownServingChange(t *testing.T) { require.NoError(t, primaryTablet.MysqlctlProcess.Stop()) require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("EmergencyReparentShard", "--", "--keyspace_shard", "ks/0")) + clusterInstance.VtctldClientProcess.ExecuteCommand("EmergencyReparentShard", "ks/0")) // This should work without any error. _ = utils.Exec(t, conn, "select /*vt+ PLANNER=gen4 */ * from test") diff --git a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go index 28367cd597a..1dc53a89506 100644 --- a/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go +++ b/go/test/endtoend/vtgate/reservedconn/reconnect4/main_test.go @@ -104,7 +104,7 @@ func TestVttabletDownServingChange(t *testing.T) { // kill vttablet process _ = primaryTablet.VttabletProcess.TearDown() require.NoError(t, - clusterInstance.VtctlclientProcess.ExecuteCommand("EmergencyReparentShard", "--", "--keyspace_shard", "ks/0")) + clusterInstance.VtctldClientProcess.ExecuteCommand("EmergencyReparentShard", "ks/0")) // This should work without any error. _ = utils.Exec(t, conn, "select /*vt+ PLANNER=gen4 */ * from test") diff --git a/go/test/endtoend/vtgate/schema.sql b/go/test/endtoend/vtgate/schema.sql index a883a26519f..4c9ed46fe9a 100644 --- a/go/test/endtoend/vtgate/schema.sql +++ b/go/test/endtoend/vtgate/schema.sql @@ -155,3 +155,13 @@ create table t10_id_to_keyspace_id_idx keyspace_id varbinary(10), primary key (id) ) Engine = InnoDB; + +create table t11 +( + id bigint, + sharding_key bigint, + col1 varchar(50), + col2 int, + col3 int, + primary key (id) +) Engine = InnoDB; \ No newline at end of file diff --git a/go/test/endtoend/vtgate/schema/schema_test.go b/go/test/endtoend/vtgate/schema/schema_test.go index 04d91d8d978..6b2e8ef7e61 100644 --- a/go/test/endtoend/vtgate/schema/schema_test.go +++ b/go/test/endtoend/vtgate/schema/schema_test.go @@ -108,6 +108,7 @@ func TestSchemaChange(t *testing.T) { testWithDropCreateSchema(t) testDropNonExistentTables(t) testApplySchemaBatch(t) + testUnsafeAllowForeignKeys(t) testCreateInvalidView(t) testCopySchemaShards(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.TabletPath, 2) testCopySchemaShards(t, fmt.Sprintf("%s/0", keyspaceName), 3) @@ -120,7 +121,7 @@ func testWithInitialSchema(t *testing.T) { var sqlQuery = "" // nolint for i := 0; i < totalTableCount; i++ { sqlQuery = fmt.Sprintf(createTable, fmt.Sprintf("vt_select_test_%02d", i)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) } @@ -135,7 +136,7 @@ func testWithInitialSchema(t *testing.T) { // testWithAlterSchema if we alter schema and then apply, the resultant schema should match across shards func testWithAlterSchema(t *testing.T) { sqlQuery := fmt.Sprintf(alterTable, fmt.Sprintf("vt_select_test_%02d", 3), "msg") - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) matchSchema(t, clusterInstance.Keyspaces[0].Shards[0].Vttablets[0].VttabletProcess.TabletPath, clusterInstance.Keyspaces[0].Shards[1].Vttablets[0].VttabletProcess.TabletPath) } @@ -143,7 +144,7 @@ func testWithAlterSchema(t *testing.T) { // testWithAlterDatabase tests that ALTER DATABASE is accepted by the validator. func testWithAlterDatabase(t *testing.T) { sql := "create database alter_database_test; alter database alter_database_test default character set = utf8mb4; drop database alter_database_test" - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sql) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, sql) assert.NoError(t, err) } @@ -157,7 +158,7 @@ func testWithAlterDatabase(t *testing.T) { // See: https://github.com/vitessio/vitess/issues/1731#issuecomment-222914389 func testWithDropCreateSchema(t *testing.T) { dropCreateTable := fmt.Sprintf("DROP TABLE vt_select_test_%02d ;", 2) + fmt.Sprintf(createTable, fmt.Sprintf("vt_select_test_%02d", 2)) - err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, dropCreateTable) + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, dropCreateTable) require.NoError(t, err) checkTables(t, totalTableCount) } @@ -186,10 +187,10 @@ func testWithAutoSchemaFromChangeDir(t *testing.T) { // matchSchema schema for supplied tablets should match func matchSchema(t *testing.T, firstTablet string, secondTablet string) { - firstShardSchema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", firstTablet) + firstShardSchema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", firstTablet) require.Nil(t, err) - secondShardSchema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", secondTablet) + secondShardSchema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", secondTablet) require.Nil(t, err) assert.Equal(t, firstShardSchema, secondShardSchema) @@ -203,12 +204,12 @@ func matchSchema(t *testing.T, firstTablet string, secondTablet string) { // is the MySQL behavior the user expects. func testDropNonExistentTables(t *testing.T) { dropNonExistentTable := "DROP TABLE nonexistent_table;" - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", dropNonExistentTable, keyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", dropNonExistentTable, keyspaceName) require.Error(t, err) assert.True(t, strings.Contains(output, "Unknown table")) dropIfExists := "DROP TABLE IF EXISTS nonexistent_table;" - err = clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, dropIfExists) + err = clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, dropIfExists) require.Nil(t, err) checkTables(t, totalTableCount) @@ -219,7 +220,7 @@ func testDropNonExistentTables(t *testing.T) { func testCreateInvalidView(t *testing.T) { for _, ddlStrategy := range []string{"direct", "direct -allow-zero-in-date"} { createInvalidView := "CREATE OR REPLACE VIEW invalid_view AS SELECT * FROM nonexistent_table;" - output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--ddl_strategy", ddlStrategy, "--sql", createInvalidView, keyspaceName) + output, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", ddlStrategy, "--sql", createInvalidView, keyspaceName) require.Error(t, err) assert.Contains(t, output, "doesn't exist (errno 1146)") } @@ -228,25 +229,47 @@ func testCreateInvalidView(t *testing.T) { func testApplySchemaBatch(t *testing.T) { { sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, "--batch_size", "2", keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, "--batch-size", "2", keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount+5) } { sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount) } { sqls := "create table batch1(id int primary key);create table batch2(id int primary key);create table batch3(id int primary key);create table batch4(id int primary key);create table batch5(id int primary key);" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--ddl_strategy", "direct --allow-zero-in-date", "--sql", sqls, "--batch_size", "2", keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", "direct --allow-zero-in-date", "--sql", sqls, "--batch-size", "2", keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount+5) } { sqls := "drop table batch1; drop table batch2; drop table batch3; drop table batch4; drop table batch5" - _, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("ApplySchema", "--", "--sql", sqls, keyspaceName) + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", sqls, keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount) + } +} + +func testUnsafeAllowForeignKeys(t *testing.T) { + sqls := ` + create table t11 (id int primary key, i int, constraint f1101 foreign key (i) references t12 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1201 foreign key (i) references t11 (id) on delete set null); + ` + { + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", "direct --allow-zero-in-date", "--sql", sqls, keyspaceName) + assert.Error(t, err) + checkTables(t, totalTableCount) + } + { + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--ddl-strategy", "direct --unsafe-allow-foreign-keys --allow-zero-in-date", "--sql", sqls, keyspaceName) + require.NoError(t, err) + checkTables(t, totalTableCount+2) + } + { + _, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("ApplySchema", "--sql", "drop table t11, t12", keyspaceName) require.NoError(t, err) checkTables(t, totalTableCount) } @@ -291,7 +314,7 @@ func testCopySchemaShardWithDifferentDB(t *testing.T, shard int) { source := fmt.Sprintf("%s/0", keyspaceName) tabletAlias := clusterInstance.Keyspaces[0].Shards[shard].Vttablets[0].VttabletProcess.TabletPath - schema, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetSchema", tabletAlias) + schema, err := clusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("GetSchema", tabletAlias) require.Nil(t, err) resultMap := make(map[string]any) @@ -305,7 +328,7 @@ func testCopySchemaShardWithDifferentDB(t *testing.T, shard int) { // (The different charset won't be corrected on the destination shard // because we use "CREATE DATABASE IF NOT EXISTS" and this doesn't fail if // there are differences in the options e.g. the character set.) - err = clusterInstance.VtctlclientProcess.ExecuteCommand("ExecuteFetchAsDba", "--", "--json", tabletAlias, "ALTER DATABASE vt_ks CHARACTER SET latin1") + err = clusterInstance.VtctldClientProcess.ExecuteCommand("ExecuteFetchAsDBA", "--json", tabletAlias, "ALTER DATABASE vt_ks CHARACTER SET latin1") require.Nil(t, err) output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("CopySchemaShard", source, fmt.Sprintf("%s/%d", keyspaceName, shard)) diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go index b89b0916e37..3bb4f6dfd9f 100644 --- a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go +++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go @@ -129,6 +129,8 @@ func TestVSchemaTrackerInit(t *testing.T) { 100*time.Millisecond, 60*time.Second, "initial table list not complete") + + utils.AssertMatches(t, conn, "SHOW VSCHEMA KEYSPACES", `[[VARCHAR("ks") VARCHAR("false") VARCHAR("unmanaged") VARCHAR("")]]`) } // TestVSchemaTrackerKeyspaceReInit tests that the vschema tracker diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go index 1c9f4b0b6e2..8f8050bebe1 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go @@ -192,6 +192,12 @@ func TestInitAndUpdate(t *testing.T) { 30*time.Second, "initial table list not complete") + if vtgateVersion >= 19 { + utils.AssertMatches(t, conn, + "SHOW VSCHEMA KEYSPACES", + `[[VARCHAR("ks") VARCHAR("true") VARCHAR("unmanaged") VARCHAR("")]]`) + } + // Init _ = utils.Exec(t, conn, "create table test_sc (id bigint primary key)") expected = `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]` diff --git a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go index 3ff0b61b482..09bd97eb9fe 100644 --- a/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/sharded_prs/st_sharded_test.go @@ -181,7 +181,7 @@ func TestMain(m *testing.M) { // This is supposed to change the primary tablet in the shards, meaning that a different tablet // will be responsible for sending schema tracking updates. for _, shard := range clusterInstance.Keyspaces[0].Shards { - err := clusterInstance.VtctlclientProcess.InitializeShard(KeyspaceName, shard.Name, Cell, shard.Vttablets[1].TabletUID) + err := clusterInstance.VtctldClientProcess.InitializeShard(KeyspaceName, shard.Name, Cell, shard.Vttablets[1].TabletUID) if err != nil { fmt.Println(err) return 1 @@ -209,7 +209,6 @@ func TestMain(m *testing.M) { func TestAddColumn(t *testing.T) { defer cluster.PanicHandler(t) - utils.SkipIfBinaryIsBelowVersion(t, 14, "vtgate") ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.NoError(t, err) diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go index 1a37dfb5cf7..257dd7238f3 100644 --- a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go +++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go @@ -120,12 +120,7 @@ func TestNewUnshardedTable(t *testing.T) { require.NoError(t, err) defer conn.Close() - vtgateVersion, err := cluster.GetMajorVersion("vtgate") - require.NoError(t, err) - expected := `[[VARCHAR("dual")] [VARCHAR("main")]]` - if vtgateVersion >= 17 { - expected = `[[VARCHAR("main")]]` - } + expected := `[[VARCHAR("main")]]` // ensuring our initial table "main" is in the schema utils.AssertMatchesWithTimeout(t, conn, @@ -138,10 +133,7 @@ func TestNewUnshardedTable(t *testing.T) { // create a new table which is not part of the VSchema utils.Exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`) - expected = `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("new_table_tracked")]]` - if vtgateVersion >= 17 { - expected = `[[VARCHAR("main")] [VARCHAR("new_table_tracked")]]` - } + expected = `[[VARCHAR("main")] [VARCHAR("new_table_tracked")]]` // waiting for the vttablet's schema_reload interval to kick in utils.AssertMatchesWithTimeout(t, conn, @@ -176,10 +168,7 @@ func TestNewUnshardedTable(t *testing.T) { utils.Exec(t, conn, `drop table new_table_tracked`) // waiting for the vttablet's schema_reload interval to kick in - expected = `[[VARCHAR("dual")] [VARCHAR("main")]]` - if vtgateVersion >= 17 { - expected = `[[VARCHAR("main")]]` - } + expected = `[[VARCHAR("main")]]` utils.AssertMatchesWithTimeout(t, conn, "SHOW VSCHEMA TABLES", expected, @@ -187,3 +176,55 @@ func TestNewUnshardedTable(t *testing.T) { 30*time.Second, "new_table_tracked not in vschema tables") } + +// TestCaseSensitiveSchemaTracking tests that schema tracking is case-sensitive. +// This test only works on Linux (and not on Windows and Mac) since it has a case-sensitive file system, so it allows +// creating two tables having the same name differing only in casing, but other operating systems don't. +// More information at https://dev.mysql.com/doc/refman/8.0/en/identifier-case-sensitivity.html#:~:text=Table%20names%20are%20stored%20in,lowercase%20on%20storage%20and%20lookup. +func TestCaseSensitiveSchemaTracking(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vttablet") + defer cluster.PanicHandler(t) + + // create a sql connection + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // ensuring our initial table "main" is in the schema + utils.AssertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + `[[VARCHAR("main")]]`, + 100*time.Millisecond, + 30*time.Second, + "initial tables not found in vschema") + + // Now we create two tables with the same name differing only in casing t1 and T1. + // For both of them we'll have different schema's and verify that we can read the data after schema tracking kicks in. + utils.Exec(t, conn, `create table t1(id bigint, primary key(id)) Engine=InnoDB`) + utils.Exec(t, conn, `create table T1(col bigint, col2 bigint, primary key(col)) Engine=InnoDB`) + + // Wait for schema tracking to be caught up + utils.AssertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + `[[VARCHAR("T1")] [VARCHAR("main")] [VARCHAR("t1")]]`, + 100*time.Millisecond, + 30*time.Second, + "schema tracking didn't track both the tables") + + // Run DMLs + utils.Exec(t, conn, `insert into t1(id) values(0),(1)`) + utils.Exec(t, conn, `insert into T1(col, col2) values(0,0),(1,1)`) + + // Verify the tables are queryable + utils.AssertMatchesWithTimeout(t, conn, + `select * from t1`, `[[INT64(0)] [INT64(1)]]`, + 100*time.Millisecond, + 30*time.Second, + "could not query expected rows in t1 through vtgate") + utils.AssertMatchesWithTimeout(t, conn, + `select * from T1`, `[[INT64(0) INT64(0)] [INT64(1) INT64(1)]]`, + 100*time.Millisecond, + 30*time.Second, + "could not query expected rows in T1 through vtgate") +} diff --git a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go index dbc46bdda77..d6357ce8f2a 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck/reparent_test.go @@ -145,11 +145,11 @@ func TestHealthCheckExternallyReparentNewTablet(t *testing.T) { tablet := addTablet(t, reparentTabletUID, reparentTabletType) // promote the new tablet to the primary - err = clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("TabletExternallyReparented", tablet.Alias) require.NoError(t, err) // update the new primary tablet to be read-write - err = clusterInstance.VtctlclientProcess.ExecuteCommand("SetReadWrite", tablet.Alias) + err = clusterInstance.VtctldClientProcess.ExecuteCommand("SetWritable", tablet.Alias, "true") require.NoError(t, err) // wait for the vtgate to finish updating the new primary tablet @@ -236,7 +236,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.NoError(t, err) t.Logf("Deleted tablet: %s", tablet.Alias) diff --git a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go index 9386c307a12..50529d9fdf9 100644 --- a/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go +++ b/go/test/endtoend/vtgate/tablet_healthcheck_cache/correctness_test.go @@ -234,7 +234,7 @@ func deleteTablet(t *testing.T, tablet *cluster.Vttablet) { }(tablet) wg.Wait() - err := clusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", tablet.Alias) + err := clusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", tablet.Alias) require.Nil(t, err) t.Logf("Deleted tablet: %s", tablet.Alias) diff --git a/go/test/endtoend/vtgate/transaction/restart/main_test.go b/go/test/endtoend/vtgate/transaction/restart/main_test.go index 3c7ac710e9d..01185b5fa59 100644 --- a/go/test/endtoend/vtgate/transaction/restart/main_test.go +++ b/go/test/endtoend/vtgate/transaction/restart/main_test.go @@ -23,7 +23,6 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -60,6 +59,9 @@ func TestMain(m *testing.M) { Name: keyspaceName, SchemaSQL: schemaSQL, } + clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, + "--shutdown_grace_period=0s", + ) err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false) if err != nil { return 1 @@ -110,5 +112,4 @@ func TestStreamTxRestart(t *testing.T) { // query should return connection error _, err = utils.ExecAllowError(t, conn, "select connection_id()") require.Error(t, err) - assert.Contains(t, err.Error(), "broken pipe (errno 2006) (sqlstate HY000)") } diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index f772fabecc1..461a3c73b35 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -97,53 +97,53 @@ CREATE TABLE allDefaults ( } ` - createProcSQL = `use vt_customer; + createProcSQL = []string{` CREATE PROCEDURE sp_insert() BEGIN insert into allDefaults () values (); END; - +`, ` CREATE PROCEDURE sp_delete() BEGIN delete from allDefaults; END; - +`, ` CREATE PROCEDURE sp_multi_dml() BEGIN insert into allDefaults () values (); delete from allDefaults; END; - +`, ` CREATE PROCEDURE sp_variable() BEGIN insert into allDefaults () values (); SELECT min(id) INTO @myvar FROM allDefaults; DELETE FROM allDefaults WHERE id = @myvar; END; - +`, ` CREATE PROCEDURE sp_select() BEGIN SELECT * FROM allDefaults; END; - +`, ` CREATE PROCEDURE sp_all() BEGIN insert into allDefaults () values (); select * from allDefaults; delete from allDefaults; END; - +`, ` CREATE PROCEDURE in_parameter(IN val int) BEGIN insert into allDefaults(id) values(val); END; - +`, ` CREATE PROCEDURE out_parameter(OUT val int) BEGIN insert into allDefaults(id) values (128); select 128 into val from dual; END; -` +`} ) var enableSettingsPool bool @@ -179,7 +179,7 @@ func runAllTests(m *testing.M) int { SchemaSQL: SchemaSQL, VSchema: VSchema, } - clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "3", "--queryserver-config-max-result-size", "30"} + clusterInstance.VtTabletExtraArgs = []string{"--queryserver-config-transaction-timeout", "3s", "--queryserver-config-max-result-size", "30"} if enableSettingsPool { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--queryserver-enable-settings-pool") } @@ -196,7 +196,7 @@ func runAllTests(m *testing.M) int { } primaryTablet := clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet().VttabletProcess - if _, err := primaryTablet.QueryTablet(createProcSQL, KeyspaceName, false); err != nil { + if err := primaryTablet.QueryTabletMultiple(createProcSQL, KeyspaceName, true); err != nil { log.Fatal(err.Error()) return 1 } @@ -332,13 +332,11 @@ func TestCallProcedure(t *testing.T) { utils.AssertMatches(t, conn, "show warnings", `[[VARCHAR("Warning") UINT16(1235) VARCHAR("'CALL' not supported in sharded mode")]]`) - _, err = conn.ExecuteFetch(`CALL sp_select()`, 1000, true) - require.Error(t, err) - require.Contains(t, err.Error(), "Multi-Resultset not supported in stored procedure") + err = conn.ExecuteFetchMultiDrain(`CALL sp_select()`) + require.ErrorContains(t, err, "Multi-Resultset not supported in stored procedure") - _, err = conn.ExecuteFetch(`CALL sp_all()`, 1000, true) - require.Error(t, err) - require.Contains(t, err.Error(), "Multi-Resultset not supported in stored procedure") + err = conn.ExecuteFetchMultiDrain(`CALL sp_all()`) + require.ErrorContains(t, err, "Multi-Resultset not supported in stored procedure") qr = utils.Exec(t, conn, `CALL sp_delete()`) require.GreaterOrEqual(t, 1, int(qr.RowsAffected)) diff --git a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go index 83e20d9aa31..3251668e155 100644 --- a/go/test/endtoend/vtgate/vindex_bindvars/main_test.go +++ b/go/test/endtoend/vtgate/vindex_bindvars/main_test.go @@ -25,8 +25,8 @@ import ( "vitess.io/vitess/go/test/endtoend/utils" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gotest.tools/assert" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" diff --git a/go/test/endtoend/vtgate/vschema.json b/go/test/endtoend/vtgate/vschema.json index 8d16beec2a6..07b6e76550f 100644 --- a/go/test/endtoend/vtgate/vschema.json +++ b/go/test/endtoend/vtgate/vschema.json @@ -1,11 +1,10 @@ - { "sharded": true, "vindexes": { - "unicode_loose_xxhash" : { + "unicode_loose_xxhash": { "type": "unicode_loose_xxhash" }, - "unicode_loose_md5" : { + "unicode_loose_md5": { "type": "unicode_loose_md5" }, "hash": { @@ -159,7 +158,10 @@ "name": "hash" }, { - "columns": ["id2", "id1"], + "columns": [ + "id2", + "id1" + ], "name": "t4_id2_vdx" } ] @@ -179,7 +181,10 @@ "name": "hash" }, { - "columns": ["id2", "id1"], + "columns": [ + "id2", + "id1" + ], "name": "t6_id2_vdx" } ] @@ -301,6 +306,14 @@ "name": "hash" } ] + }, + "t11": { + "column_vindexes": [ + { + "column": "sharding_key", + "name": "hash" + } + ] } } } diff --git a/go/test/endtoend/vtorc/api/api_test.go b/go/test/endtoend/vtorc/api/api_test.go index 7dd5c50eefa..8fa24a39ac7 100644 --- a/go/test/endtoend/vtorc/api/api_test.go +++ b/go/test/endtoend/vtorc/api/api_test.go @@ -37,21 +37,17 @@ func TestAPIEndpoints(t *testing.T) { utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 2, 1, nil, cluster.VTOrcConfiguration{ PreventCrossDataCenterPrimaryFailover: true, RecoveryPeriodBlockSeconds: 5, - // The default topo refresh time is 3 seconds. We are intentionally making it slower for the test, so that we have time to verify - // the /debug/health output before and after the first refresh runs. - TopologyRefreshSeconds: 10, }, 1, "") keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] shard0 := &keyspace.Shards[0] vtorc := clusterInfo.ClusterInstance.VTOrcProcesses[0] // Call API with retry to ensure VTOrc is up status, resp := utils.MakeAPICallRetry(t, vtorc, "/debug/health", func(code int, response string) bool { - return code == 0 + return code != 200 }) - // When VTOrc is up and hasn't run the topo-refresh, is should be healthy but HasDiscovered should be false. - assert.Equal(t, 500, status) + // Verify when VTOrc is healthy, it has also run the first discovery. + assert.Equal(t, 200, status) assert.Contains(t, resp, `"Healthy": true,`) - assert.Contains(t, resp, `"DiscoveredOnce": false`) // find primary from topo primary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) @@ -80,7 +76,6 @@ func TestAPIEndpoints(t *testing.T) { require.NoError(t, err) assert.Equal(t, 200, status) assert.Contains(t, resp, `"Healthy": true,`) - assert.Contains(t, resp, `"DiscoveredOnce": true`) }) t.Run("Liveness API", func(t *testing.T) { @@ -96,6 +91,59 @@ func TestAPIEndpoints(t *testing.T) { return response != "null" }) + t.Run("Database State", func(t *testing.T) { + // Get database state + status, resp, err := utils.MakeAPICall(t, vtorc, "/api/database-state") + require.NoError(t, err) + assert.Equal(t, 200, status) + assert.Contains(t, resp, `"alias": "zone1-0000000101"`) + assert.Contains(t, resp, `{ + "TableName": "vitess_keyspace", + "Rows": [ + { + "durability_policy": "none", + "keyspace": "ks", + "keyspace_type": "0" + } + ] + },`) + }) + + t.Run("Check Vars and Metrics", func(t *testing.T) { + // These are vars that will be deprecated in v21. + utils.CheckVarExists(t, vtorc, "analysis.change.write") + utils.CheckVarExists(t, vtorc, "audit.write") + utils.CheckVarExists(t, vtorc, "discoveries.attempt") + utils.CheckVarExists(t, vtorc, "discoveries.fail") + utils.CheckVarExists(t, vtorc, "discoveries.instance_poll_seconds_exceeded") + utils.CheckVarExists(t, vtorc, "discoveries.queue_length") + utils.CheckVarExists(t, vtorc, "discoveries.recent_count") + utils.CheckVarExists(t, vtorc, "instance.read") + utils.CheckVarExists(t, vtorc, "instance.read_topology") + + // Newly added vars. + utils.CheckVarExists(t, vtorc, "AnalysisChangeWrite") + utils.CheckVarExists(t, vtorc, "AuditWrite") + utils.CheckVarExists(t, vtorc, "DiscoveriesAttempt") + utils.CheckVarExists(t, vtorc, "DiscoveriesFail") + utils.CheckVarExists(t, vtorc, "DiscoveriesInstancePollSecondsExceeded") + utils.CheckVarExists(t, vtorc, "DiscoveriesQueueLength") + utils.CheckVarExists(t, vtorc, "DiscoveriesRecentCount") + utils.CheckVarExists(t, vtorc, "InstanceRead") + utils.CheckVarExists(t, vtorc, "InstanceReadTopology") + + // Metrics registered in prometheus + utils.CheckMetricExists(t, vtorc, "vtorc_analysis_change_write") + utils.CheckMetricExists(t, vtorc, "vtorc_audit_write") + utils.CheckMetricExists(t, vtorc, "vtorc_discoveries_attempt") + utils.CheckMetricExists(t, vtorc, "vtorc_discoveries_fail") + utils.CheckMetricExists(t, vtorc, "vtorc_discoveries_instance_poll_seconds_exceeded") + utils.CheckMetricExists(t, vtorc, "vtorc_discoveries_queue_length") + utils.CheckMetricExists(t, vtorc, "vtorc_discoveries_recent_count") + utils.CheckMetricExists(t, vtorc, "vtorc_instance_read") + utils.CheckMetricExists(t, vtorc, "vtorc_instance_read_topology") + }) + t.Run("Disable Recoveries API", func(t *testing.T) { // Disable recoveries of VTOrc status, resp, err := utils.MakeAPICall(t, vtorc, "/api/disable-global-recoveries") @@ -106,7 +154,7 @@ func TestAPIEndpoints(t *testing.T) { t.Run("Replication Analysis API", func(t *testing.T) { // use vtctlclient to stop replication - _, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) + _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) // We know VTOrc won't fix this since we disabled global recoveries! diff --git a/go/test/endtoend/vtorc/general/vtorc_test.go b/go/test/endtoend/vtorc/general/vtorc_test.go index 244cd364e7c..88cd7b65d63 100644 --- a/go/test/endtoend/vtorc/general/vtorc_test.go +++ b/go/test/endtoend/vtorc/general/vtorc_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/vtorc/utils" "vitess.io/vitess/go/vt/log" @@ -163,7 +164,7 @@ func TestVTOrcRepairs(t *testing.T) { t.Run("StopReplication", func(t *testing.T) { // use vtctlclient to stop replication - _, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) + _, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("StopReplication", replica.Alias) require.NoError(t, err) // check replication is setup correctly @@ -171,7 +172,7 @@ func TestVTOrcRepairs(t *testing.T) { utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.FixReplicaRecoveryName, 2) // Stop just the IO thread on the replica - _, err = utils.RunSQL(t, "STOP SLAVE IO_THREAD", replica, "") + _, err = utils.RunSQL(t, "STOP REPLICA IO_THREAD", replica, "") require.NoError(t, err) // check replication is setup correctly @@ -179,7 +180,7 @@ func TestVTOrcRepairs(t *testing.T) { utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.FixReplicaRecoveryName, 3) // Stop just the SQL thread on the replica - _, err = utils.RunSQL(t, "STOP SLAVE SQL_THREAD", replica, "") + _, err = utils.RunSQL(t, "STOP REPLICA SQL_THREAD", replica, "") require.NoError(t, err) // check replication is setup correctly @@ -189,9 +190,13 @@ func TestVTOrcRepairs(t *testing.T) { t.Run("ReplicationFromOtherReplica", func(t *testing.T) { // point replica at otherReplica - changeReplicationSourceCommand := fmt.Sprintf("STOP SLAVE; RESET SLAVE ALL;"+ - "CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='vt_repl', MASTER_AUTO_POSITION = 1; START SLAVE", utils.Hostname, otherReplica.MySQLPort) - _, err := utils.RunSQL(t, changeReplicationSourceCommand, replica, "") + changeReplicationSourceCommands := []string{ + "STOP REPLICA", + "RESET REPLICA ALL", + fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s', SOURCE_PORT=%d, SOURCE_USER='vt_repl', SOURCE_AUTO_POSITION = 1", utils.Hostname, otherReplica.MySQLPort), + "START REPLICA", + } + err := utils.RunSQLs(t, changeReplicationSourceCommands, replica, "") require.NoError(t, err) // wait until the source port is set back correctly by vtorc @@ -202,12 +207,27 @@ func TestVTOrcRepairs(t *testing.T) { utils.VerifyWritesSucceed(t, clusterInfo, curPrimary, []*cluster.Vttablet{replica, otherReplica}, 15*time.Second) }) + t.Run("Replication Misconfiguration", func(t *testing.T) { + _, err := utils.RunSQL(t, `SET @@global.replica_net_timeout=33`, replica, "") + require.NoError(t, err) + + // wait until heart beat interval has been fixed by vtorc. + utils.CheckHeartbeatInterval(t, replica, 16.5, 15*time.Second) + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.FixReplicaRecoveryName, 6) + + // check that writes succeed + utils.VerifyWritesSucceed(t, clusterInfo, curPrimary, []*cluster.Vttablet{replica, otherReplica}, 15*time.Second) + }) + t.Run("CircularReplication", func(t *testing.T) { // change the replication source on the primary - changeReplicationSourceCommands := fmt.Sprintf("STOP SLAVE; RESET SLAVE ALL;"+ - "CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='vt_repl', MASTER_AUTO_POSITION = 1;"+ - "START SLAVE;", replica.VttabletProcess.TabletHostname, replica.MySQLPort) - _, err := utils.RunSQL(t, changeReplicationSourceCommands, curPrimary, "") + changeReplicationSourceCommands := []string{ + "STOP REPLICA", + "RESET REPLICA ALL", + fmt.Sprintf("CHANGE REPLICATION SOURCE TO SOURCE_HOST='%s', SOURCE_PORT=%d, SOURCE_USER='vt_repl', SOURCE_AUTO_POSITION = 1", replica.VttabletProcess.TabletHostname, replica.MySQLPort), + "START REPLICA", + } + err := utils.RunSQLs(t, changeReplicationSourceCommands, curPrimary, "") require.NoError(t, err) // wait for curPrimary to reach stable state @@ -293,7 +313,7 @@ func TestRepairAfterTER(t *testing.T) { } // TER to other tablet - _, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("TabletExternallyReparented", newPrimary.Alias) + _, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("TabletExternallyReparented", newPrimary.Alias) require.NoError(t, err) utils.CheckReplication(t, clusterInfo, newPrimary, []*cluster.Vttablet{curPrimary}, 15*time.Second) @@ -342,12 +362,44 @@ func TestSemiSync(t *testing.T) { // check that the replication is setup correctly utils.CheckReplication(t, newCluster, primary, []*cluster.Vttablet{rdonly, replica1, replica2}, 10*time.Second) - _, err := utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_slave_enabled = 0", replica1, "") + semisyncType, err := utils.SemiSyncExtensionLoaded(t, replica1) require.NoError(t, err) - _, err = utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_slave_enabled = 1", rdonly, "") + switch semisyncType { + case mysql.SemiSyncTypeSource: + _, err := utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_replica_enabled = 0", replica1, "") + require.NoError(t, err) + case mysql.SemiSyncTypeMaster: + _, err := utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_slave_enabled = 0", replica1, "") + require.NoError(t, err) + default: + require.Fail(t, "unexpected semi-sync type %v", semisyncType) + } + + semisyncType, err = utils.SemiSyncExtensionLoaded(t, rdonly) require.NoError(t, err) - _, err = utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_master_enabled = 0", primary, "") + switch semisyncType { + case mysql.SemiSyncTypeSource: + _, err := utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_replica_enabled = 0", rdonly, "") + require.NoError(t, err) + case mysql.SemiSyncTypeMaster: + _, err := utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_slave_enabled = 0", rdonly, "") + require.NoError(t, err) + default: + require.Fail(t, "unexpected semi-sync type %v", semisyncType) + } + + semisyncType, err = utils.SemiSyncExtensionLoaded(t, primary) require.NoError(t, err) + switch semisyncType { + case mysql.SemiSyncTypeSource: + _, err := utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_source_enabled = 0", primary, "") + require.NoError(t, err) + case mysql.SemiSyncTypeMaster: + _, err := utils.RunSQL(t, "SET GLOBAL rpl_semi_sync_master_enabled = 0", primary, "") + require.NoError(t, err) + default: + require.Fail(t, "unexpected semi-sync type %v", semisyncType) + } timeout := time.After(20 * time.Second) for { @@ -397,11 +449,11 @@ func TestVTOrcWithPrs(t *testing.T) { // check that the replication is setup correctly before we failover utils.CheckReplication(t, clusterInfo, curPrimary, shard0.Vttablets, 10*time.Second) - output, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( - "PlannedReparentShard", "--", - "--keyspace_shard", fmt.Sprintf("%s/%s", keyspace.Name, shard0.Name), - "--wait_replicas_timeout", "31s", - "--new_primary", replica.Alias) + output, err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput( + "PlannedReparentShard", + fmt.Sprintf("%s/%s", keyspace.Name, shard0.Name), + "--wait-replicas-timeout", "31s", + "--new-primary", replica.Alias) require.NoError(t, err, "error in PlannedReparentShard output - %s", output) time.Sleep(40 * time.Second) @@ -488,3 +540,76 @@ func TestDurabilityPolicySetLater(t *testing.T) { assert.NotNil(t, primary, "should have elected a primary") utils.CheckReplication(t, newCluster, primary, shard0.Vttablets, 10*time.Second) } + +func TestFullStatusConnectionPooling(t *testing.T) { + defer utils.PrintVTOrcLogsOnFailure(t, clusterInfo.ClusterInstance) + defer cluster.PanicHandler(t) + utils.SetupVttabletsAndVTOrcs(t, clusterInfo, 4, 0, []string{ + "--tablet_manager_grpc_concurrency=1", + }, cluster.VTOrcConfiguration{ + PreventCrossDataCenterPrimaryFailover: true, + }, 1, "") + keyspace := &clusterInfo.ClusterInstance.Keyspaces[0] + shard0 := &keyspace.Shards[0] + vtorc := clusterInfo.ClusterInstance.VTOrcProcesses[0] + + // find primary from topo + curPrimary := utils.ShardPrimaryTablet(t, clusterInfo, keyspace, shard0) + assert.NotNil(t, curPrimary, "should have elected a primary") + vtOrcProcess := clusterInfo.ClusterInstance.VTOrcProcesses[0] + utils.WaitForSuccessfulRecoveryCount(t, vtOrcProcess, logic.ElectNewPrimaryRecoveryName, 1) + utils.WaitForSuccessfulPRSCount(t, vtOrcProcess, keyspace.Name, shard0.Name, 1) + + // Kill the current primary. + _ = curPrimary.VttabletProcess.Kill() + + // Wait until VTOrc notices some problems + status, resp := utils.MakeAPICallRetry(t, vtorc, "/api/replication-analysis", func(_ int, response string) bool { + return response == "null" + }) + assert.Equal(t, 200, status) + assert.Contains(t, resp, "UnreachablePrimary") + + time.Sleep(1 * time.Minute) + + // Change the primaries ports and restart it. + curPrimary.VttabletProcess.Port = clusterInfo.ClusterInstance.GetAndReservePort() + curPrimary.VttabletProcess.GrpcPort = clusterInfo.ClusterInstance.GetAndReservePort() + err := curPrimary.VttabletProcess.Setup() + require.NoError(t, err) + + // See that VTOrc eventually reports no errors. + // Wait until there are no problems and the api endpoint returns null + status, resp = utils.MakeAPICallRetry(t, vtorc, "/api/replication-analysis", func(_ int, response string) bool { + return response != "null" + }) + assert.Equal(t, 200, status) + assert.Equal(t, "null", resp) + + // REPEATED + // Kill the current primary. + _ = curPrimary.VttabletProcess.Kill() + + // Wait until VTOrc notices some problems + status, resp = utils.MakeAPICallRetry(t, vtorc, "/api/replication-analysis", func(_ int, response string) bool { + return response == "null" + }) + assert.Equal(t, 200, status) + assert.Contains(t, resp, "UnreachablePrimary") + + time.Sleep(1 * time.Minute) + + // Change the primaries ports back to original and restart it. + curPrimary.VttabletProcess.Port = curPrimary.HTTPPort + curPrimary.VttabletProcess.GrpcPort = curPrimary.GrpcPort + err = curPrimary.VttabletProcess.Setup() + require.NoError(t, err) + + // See that VTOrc eventually reports no errors. + // Wait until there are no problems and the api endpoint returns null + status, resp = utils.MakeAPICallRetry(t, vtorc, "/api/replication-analysis", func(_ int, response string) bool { + return response != "null" + }) + assert.Equal(t, 200, status) + assert.Equal(t, "null", resp) +} diff --git a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go index 180f367d7fb..d91dadddcb4 100644 --- a/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go +++ b/go/test/endtoend/vtorc/primaryfailure/primary_failure_test.go @@ -113,7 +113,7 @@ func TestDownPrimaryBeforeVTOrc(t *testing.T) { curPrimary := shard0.Vttablets[0] // Promote the first tablet as the primary - err := clusterInfo.ClusterInstance.VtctlclientProcess.InitializeShard(keyspace.Name, shard0.Name, clusterInfo.ClusterInstance.Cell, curPrimary.TabletUID) + err := clusterInfo.ClusterInstance.VtctldClientProcess.InitializeShard(keyspace.Name, shard0.Name, clusterInfo.ClusterInstance.Cell, curPrimary.TabletUID) require.NoError(t, err) // find the replica and rdonly tablets @@ -438,14 +438,13 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { // check that replication is setup correctly utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{rdonly, aheadRdonly, replica}, 15*time.Second) - // revoke super privileges from vtorc on replica and rdonly so that it is unable to repair the replication - utils.ChangePrivileges(t, `REVOKE SUPER ON *.* FROM 'orc_client_user'@'%'`, replica, "orc_client_user") - utils.ChangePrivileges(t, `REVOKE SUPER ON *.* FROM 'orc_client_user'@'%'`, rdonly, "orc_client_user") + // disable recoveries on vtorc so that it is unable to repair the replication + utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the replica and rdonly. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replica.Alias) require.NoError(t, err) - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", rdonly.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", rdonly.Alias) require.NoError(t, err) // check that aheadRdonly is able to replicate. We also want to add some queries to aheadRdonly which will not be there in replica and rdonly @@ -467,9 +466,8 @@ func TestLostRdonlyOnPrimaryFailure(t *testing.T) { utils.PermanentlyRemoveVttablet(clusterInfo, curPrimary) }() - // grant super privileges back to vtorc on replica and rdonly so that it can repair - utils.ChangePrivileges(t, `GRANT SUPER ON *.* TO 'orc_client_user'@'%'`, replica, "orc_client_user") - utils.ChangePrivileges(t, `GRANT SUPER ON *.* TO 'orc_client_user'@'%'`, rdonly, "orc_client_user") + // enable recoveries back on vtorc so that it can repair + utils.EnableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // vtorc must promote the lagging replica and not the rdonly, since it has a MustNotPromoteRule promotion rule utils.CheckPrimaryTablet(t, clusterInfo, replica, true) @@ -667,11 +665,11 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { // newly started tablet does not replicate from anyone yet, we will allow vtorc to fix this too utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{crossCellReplica, replica, rdonly}, 25*time.Second) - // revoke super privileges from vtorc on crossCellReplica so that it is unable to repair the replication - utils.ChangePrivileges(t, `REVOKE SUPER ON *.* FROM 'orc_client_user'@'%'`, crossCellReplica, "orc_client_user") + // disable recoveries for vtorc so that it is unable to repair the replication. + utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the crossCellReplica. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", crossCellReplica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", crossCellReplica.Alias) require.NoError(t, err) // check that rdonly and replica are able to replicate. We also want to add some queries to replica which will not be there in crossCellReplica @@ -681,11 +679,11 @@ func TestDownPrimaryPromotionRuleWithLag(t *testing.T) { utils.ResetPrimaryLogs(t, curPrimary) // start replication back on the crossCellReplica. - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", crossCellReplica.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", crossCellReplica.Alias) require.NoError(t, err) - // grant super privileges back to vtorc on crossCellReplica so that it can repair - utils.ChangePrivileges(t, `GRANT SUPER ON *.* TO 'orc_client_user'@'%'`, crossCellReplica, "orc_client_user") + // enable recoveries back on vtorc so that it can repair + utils.EnableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // assert that the crossCellReplica is indeed lagging and does not have the new insertion by checking the count of rows in the table out, err := utils.RunSQL(t, "SELECT * FROM vt_insert_test", crossCellReplica, "vt_ks") @@ -748,11 +746,11 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { // newly started tablet does not replicate from anyone yet, we will allow vtorc to fix this too utils.CheckReplication(t, clusterInfo, curPrimary, []*cluster.Vttablet{crossCellReplica, replica, rdonly}, 25*time.Second) - // revoke super privileges from vtorc on replica so that it is unable to repair the replication - utils.ChangePrivileges(t, `REVOKE SUPER ON *.* FROM 'orc_client_user'@'%'`, replica, "orc_client_user") + // disable recoveries from vtorc so that it is unable to repair the replication + utils.DisableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // stop replication on the replica. - err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StopReplication", replica.Alias) + err := clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StopReplication", replica.Alias) require.NoError(t, err) // check that rdonly and crossCellReplica are able to replicate. We also want to add some queries to crossCenterReplica which will not be there in replica @@ -762,11 +760,11 @@ func TestDownPrimaryPromotionRuleWithLagCrossCenter(t *testing.T) { utils.ResetPrimaryLogs(t, curPrimary) // start replication back on the replica. - err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("StartReplication", replica.Alias) + err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("StartReplication", replica.Alias) require.NoError(t, err) - // grant super privileges back to vtorc on replica so that it can repair - utils.ChangePrivileges(t, `GRANT SUPER ON *.* TO 'orc_client_user'@'%'`, replica, "orc_client_user") + // enable recoveries back on vtorc so that it can repair + utils.EnableGlobalRecoveries(t, clusterInfo.ClusterInstance.VTOrcProcesses[0]) // assert that the replica is indeed lagging and does not have the new insertion by checking the count of rows in the table out, err := utils.RunSQL(t, "SELECT * FROM vt_insert_test", replica, "vt_ks") diff --git a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go index e3b55d64c6b..98bf16ec596 100644 --- a/go/test/endtoend/vtorc/readtopologyinstance/main_test.go +++ b/go/test/endtoend/vtorc/readtopologyinstance/main_test.go @@ -30,10 +30,8 @@ import ( "vitess.io/vitess/go/vt/vtorc/logic" "vitess.io/vitess/go/vt/vtorc/server" - _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - _ "modernc.org/sqlite" ) func TestReadTopologyInstanceBufferable(t *testing.T) { @@ -104,17 +102,19 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.False(t, primaryInstance.HasReplicationCredentials) assert.Equal(t, primaryInstance.ReplicationIOThreadState, inst.ReplicationThreadStateNoThread) assert.Equal(t, primaryInstance.ReplicationSQLThreadState, inst.ReplicationThreadStateNoThread) + assert.EqualValues(t, 0, primaryInstance.HeartbeatInterval) + assert.EqualValues(t, 0, primaryInstance.ReplicaNetTimeout) // Insert an errant GTID in the replica. // The way to do this is to disable global recoveries, stop replication and inject an errant GTID. // After this we restart the replication and enable the recoveries again. err = logic.DisableRecovery() require.NoError(t, err) - err = utils.RunSQLs(t, []string{`STOP SLAVE;`, + err = utils.RunSQLs(t, []string{`STOP REPLICA;`, `SET GTID_NEXT="12345678-1234-1234-1234-123456789012:1";`, `BEGIN;`, `COMMIT;`, `SET GTID_NEXT="AUTOMATIC";`, - `START SLAVE;`, + `START REPLICA;`, }, replica, "") require.NoError(t, err) err = logic.EnableRecovery() @@ -171,4 +171,6 @@ func TestReadTopologyInstanceBufferable(t *testing.T) { assert.False(t, replicaInstance.HasReplicationFilters) assert.LessOrEqual(t, int(replicaInstance.SecondsBehindPrimary.Int64), 1) assert.False(t, replicaInstance.AllowTLS) + assert.EqualValues(t, 4.0, replicaInstance.HeartbeatInterval) + assert.EqualValues(t, 8, replicaInstance.ReplicaNetTimeout) } diff --git a/go/test/endtoend/vtorc/utils/utils.go b/go/test/endtoend/vtorc/utils/utils.go index 07b5b016fcc..7df3898d9f3 100644 --- a/go/test/endtoend/vtorc/utils/utils.go +++ b/go/test/endtoend/vtorc/utils/utils.go @@ -33,7 +33,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" @@ -206,7 +205,7 @@ func shutdownVttablets(clusterInfo *VTOrcClusterInfo) error { // Remove the tablet record for this tablet } // Ignoring error here because some tests delete tablets themselves. - _ = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommand("DeleteTablet", vttablet.Alias) + _ = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommand("DeleteTablets", vttablet.Alias) } clusterInfo.ClusterInstance.Keyspaces[0].Shards[0].Vttablets = nil return nil @@ -328,10 +327,12 @@ func cleanAndStartVttablet(t *testing.T, clusterInfo *VTOrcClusterInfo, vttablet _, err = RunSQL(t, "DROP DATABASE IF EXISTS _vt", vttablet, "") require.NoError(t, err) // stop the replication - _, err = RunSQL(t, "STOP SLAVE", vttablet, "") + _, err = RunSQL(t, "STOP REPLICA", vttablet, "") require.NoError(t, err) // reset the binlog - _, err = RunSQL(t, "RESET MASTER", vttablet, "") + resetCmd, err := vttablet.VttabletProcess.ResetBinaryLogsCommand() + require.NoError(t, err) + _, err = RunSQL(t, resetCmd, vttablet, "") require.NoError(t, err) // set read-only to true _, err = RunSQL(t, "SET GLOBAL read_only = ON", vttablet, "") @@ -352,19 +353,16 @@ func ShardPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, keyspace *c if now.Sub(start) > time.Second*60 { assert.FailNow(t, "failed to elect primary before timeout") } - result, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShard", fmt.Sprintf("%s/%s", keyspace.Name, shard.Name)) - assert.Nil(t, err) + si, err := clusterInfo.ClusterInstance.VtctldClientProcess.GetShard(keyspace.Name, shard.Name) + require.NoError(t, err) - var shardInfo topodatapb.Shard - err = json2.Unmarshal([]byte(result), &shardInfo) - assert.Nil(t, err) - if shardInfo.PrimaryAlias == nil { + if si.Shard.PrimaryAlias == nil { log.Warningf("Shard %v/%v has no primary yet, sleep for 1 second\n", keyspace.Name, shard.Name) time.Sleep(time.Second) continue } for _, tablet := range shard.Vttablets { - if tablet.Alias == topoproto.TabletAliasString(shardInfo.PrimaryAlias) { + if tablet.Alias == topoproto.TabletAliasString(si.Shard.PrimaryAlias) { return tablet } } @@ -381,12 +379,8 @@ func CheckPrimaryTablet(t *testing.T, clusterInfo *VTOrcClusterInfo, tablet *clu //log.Exitf("error") assert.FailNow(t, "failed to elect primary before timeout") } - result, err := clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) - require.NoError(t, err) - var tabletInfo topodatapb.Tablet - err = json2.Unmarshal([]byte(result), &tabletInfo) + tabletInfo, err := clusterInfo.ClusterInstance.VtctldClientProcess.GetTablet(tablet.Alias) require.NoError(t, err) - if topodatapb.TabletType_PRIMARY != tabletInfo.GetType() { log.Warningf("Tablet %v is not primary yet, sleep for 1 second\n", tablet.Alias) time.Sleep(time.Second) @@ -510,7 +504,7 @@ func WaitForReplicationToStop(t *testing.T, vttablet *cluster.Vttablet) error { case <-timeout: return fmt.Errorf("timedout: waiting for primary to stop replication") default: - res, err := RunSQL(t, "SHOW SLAVE STATUS", vttablet, "") + res, err := RunSQL(t, "SHOW REPLICA STATUS", vttablet, "") if err != nil { return err } @@ -535,9 +529,9 @@ func validateTopology(t *testing.T, clusterInfo *VTOrcClusterInfo, pingTablets b var err error var output string if pingTablets { - output, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate", "--", "--ping-tablets=true") + output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate", "--ping-tablets") } else { - output, err = clusterInfo.ClusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") + output, err = clusterInfo.ClusterInstance.VtctldClientProcess.ExecuteCommandWithOutput("Validate") } if err != nil { log.Warningf("Validate failed, retrying, output - %s", output) @@ -683,21 +677,6 @@ func PermanentlyRemoveVttablet(clusterInfo *VTOrcClusterInfo, tablet *cluster.Vt } } -// ChangePrivileges is used to change the privileges of the given user. These commands are executed such that they are not replicated -func ChangePrivileges(t *testing.T, sql string, tablet *cluster.Vttablet, user string) { - _, err := RunSQL(t, "SET sql_log_bin = OFF;"+sql+";SET sql_log_bin = ON;", tablet, "") - require.NoError(t, err) - - res, err := RunSQL(t, fmt.Sprintf("SELECT id FROM INFORMATION_SCHEMA.PROCESSLIST WHERE user = '%s'", user), tablet, "") - require.NoError(t, err) - for _, row := range res.Rows { - id, err := row[0].ToInt64() - require.NoError(t, err) - _, err = RunSQL(t, fmt.Sprintf("kill %d", id), tablet, "") - require.NoError(t, err) - } -} - // ResetPrimaryLogs is used reset the binary logs func ResetPrimaryLogs(t *testing.T, curPrimary *cluster.Vttablet) { _, err := RunSQL(t, "FLUSH BINARY LOGS", curPrimary, "") @@ -715,14 +694,15 @@ func ResetPrimaryLogs(t *testing.T, curPrimary *cluster.Vttablet) { // CheckSourcePort is used to check that the replica has the given source port set in its MySQL instance func CheckSourcePort(t *testing.T, replica *cluster.Vttablet, source *cluster.Vttablet, timeToWait time.Duration) { - timeout := time.After(timeToWait) + ctx, cancel := context.WithTimeout(context.Background(), timeToWait) + defer cancel() for { select { - case <-timeout: + case <-ctx.Done(): t.Fatal("timedout waiting for correct primary to be setup") return default: - res, err := RunSQL(t, "SHOW SLAVE STATUS", replica, "") + res, err := RunSQL(t, "SHOW REPLICA STATUS", replica, "") require.NoError(t, err) if len(res.Rows) != 1 { @@ -731,7 +711,7 @@ func CheckSourcePort(t *testing.T, replica *cluster.Vttablet, source *cluster.Vt } for idx, field := range res.Fields { - if strings.EqualFold(field.Name, "MASTER_PORT") || strings.EqualFold(field.Name, "SOURCE_PORT") { + if strings.EqualFold(field.Name, "SOURCE_PORT") { port, err := res.Rows[0][idx].ToInt64() require.NoError(t, err) if port == int64(source.MySQLPort) { @@ -745,6 +725,41 @@ func CheckSourcePort(t *testing.T, replica *cluster.Vttablet, source *cluster.Vt } } +// CheckHeartbeatInterval is used to check that the replica has the given heartbeat interval set in its MySQL instance +func CheckHeartbeatInterval(t *testing.T, replica *cluster.Vttablet, heartbeatInterval float64, timeToWait time.Duration) { + ctx, cancel := context.WithTimeout(context.Background(), timeToWait) + defer cancel() + for { + select { + case <-ctx.Done(): + t.Fatal("timed out waiting for correct heartbeat interval to be setup") + return + default: + res, err := RunSQL(t, "select * from performance_schema.replication_connection_configuration", replica, "") + require.NoError(t, err) + + if len(res.Rows) != 1 { + log.Warningf("no replication configuration yet, will retry") + break + } + + for idx, field := range res.Fields { + if strings.EqualFold(field.Name, "HEARTBEAT_INTERVAL") { + readVal, err := res.Rows[0][idx].ToFloat64() + require.NoError(t, err) + if readVal == heartbeatInterval { + return + } else { + log.Warningf("heartbeat interval set to - %v", readVal) + } + } + } + log.Warningf("heartbeat interval not set correctly yet, will retry") + } + time.Sleep(300 * time.Millisecond) + } +} + // MakeAPICall is used make an API call given the url. It returns the status and the body of the response received func MakeAPICall(t *testing.T, vtorc *cluster.VTOrcProcess, url string) (status int, response string, err error) { t.Helper() @@ -756,7 +771,7 @@ func MakeAPICall(t *testing.T, vtorc *cluster.VTOrcProcess, url string) (status // The function provided takes in the status and response and returns if we should continue to retry or not func MakeAPICallRetry(t *testing.T, vtorc *cluster.VTOrcProcess, url string, retry func(int, string) bool) (status int, response string) { t.Helper() - timeout := time.After(10 * time.Second) + timeout := time.After(30 * time.Second) for { select { case <-timeout: @@ -919,16 +934,40 @@ func AddSemiSyncKeyspace(t *testing.T, clusterInfo *VTOrcClusterInfo) { // IsSemiSyncSetupCorrectly checks that the semi-sync is setup correctly on the given vttablet func IsSemiSyncSetupCorrectly(t *testing.T, tablet *cluster.Vttablet, semiSyncVal string) bool { - dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_slave_enabled", "") + semisyncType, err := tablet.VttabletProcess.SemiSyncExtensionLoaded() require.NoError(t, err) - return semiSyncVal == dbVar + switch semisyncType { + case mysql.SemiSyncTypeSource: + dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_replica_enabled", "") + require.NoError(t, err) + return semiSyncVal == dbVar + case mysql.SemiSyncTypeMaster: + dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_slave_enabled", "") + require.NoError(t, err) + return semiSyncVal == dbVar + default: + assert.Fail(t, "semisync extension not loaded") + return false + } } // IsPrimarySemiSyncSetupCorrectly checks that the priamry side semi-sync is setup correctly on the given vttablet func IsPrimarySemiSyncSetupCorrectly(t *testing.T, tablet *cluster.Vttablet, semiSyncVal string) bool { - dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_master_enabled", "") + semisyncType, err := tablet.VttabletProcess.SemiSyncExtensionLoaded() require.NoError(t, err) - return semiSyncVal == dbVar + switch semisyncType { + case mysql.SemiSyncTypeSource: + dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_source_enabled", "") + require.NoError(t, err) + return semiSyncVal == dbVar + case mysql.SemiSyncTypeMaster: + dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_master_enabled", "") + require.NoError(t, err) + return semiSyncVal == dbVar + default: + assert.Fail(t, "semisync extension not loaded") + return false + } } // WaitForReadOnlyValue waits for the read_only global variable to reach the provided value @@ -1013,6 +1052,21 @@ func WaitForSuccessfulERSCount(t *testing.T, vtorcInstance *cluster.VTOrcProcess assert.EqualValues(t, countExpected, successCount) } +// CheckVarExists checks whether the given metric exists or not in /debug/vars. +func CheckVarExists(t *testing.T, vtorcInstance *cluster.VTOrcProcess, metricName string) { + t.Helper() + vars := vtorcInstance.GetVars() + _, exists := vars[metricName] + assert.True(t, exists) +} + +// CheckMetricExists checks whether the given metric exists or not in /metrics. +func CheckMetricExists(t *testing.T, vtorcInstance *cluster.VTOrcProcess, metricName string) { + t.Helper() + metrics := vtorcInstance.GetMetrics() + assert.Contains(t, metrics, metricName) +} + // getIntFromValue is a helper function to get an integer from the given value. // If it is convertible to a float, then we round the number to the nearest integer. // If the value is not numeric at all, we return 0. @@ -1121,3 +1175,32 @@ func PrintVTOrcLogsOnFailure(t *testing.T, clusterInstance *cluster.LocalProcess log.Errorf("%s", string(content)) } } + +// EnableGlobalRecoveries enables global recoveries for the given VTOrc. +func EnableGlobalRecoveries(t *testing.T, vtorc *cluster.VTOrcProcess) { + status, resp, err := MakeAPICall(t, vtorc, "/api/enable-global-recoveries") + require.NoError(t, err) + assert.Equal(t, 200, status) + assert.Equal(t, "Global recoveries enabled\n", resp) +} + +// DisableGlobalRecoveries disables global recoveries for the given VTOrc. +func DisableGlobalRecoveries(t *testing.T, vtorc *cluster.VTOrcProcess) { + status, resp, err := MakeAPICall(t, vtorc, "/api/disable-global-recoveries") + require.NoError(t, err) + assert.Equal(t, 200, status) + assert.Equal(t, "Global recoveries disabled\n", resp) +} + +// SemiSyncExtensionLoaded is used to check which semisync extension is loaded. +func SemiSyncExtensionLoaded(t *testing.T, tablet *cluster.Vttablet) (mysql.SemiSyncType, error) { + // Get Connection + tabletParams := getMysqlConnParam(tablet, "") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + conn, err := mysql.Connect(ctx, &tabletParams) + require.Nil(t, err) + defer conn.Close() + + return conn.SemiSyncExtensionLoaded() +} diff --git a/go/test/fuzzing/ast_fuzzer.go b/go/test/fuzzing/ast_fuzzer.go index 118f044ea66..5951a0da9eb 100644 --- a/go/test/fuzzing/ast_fuzzer.go +++ b/go/test/fuzzing/ast_fuzzer.go @@ -36,11 +36,11 @@ func FuzzEqualsSQLNode(data []byte) int { if err != nil { return 0 } - inA, err := sqlparser.Parse(query1) + inA, err := sqlparser.NewTestParser().Parse(query1) if err != nil { return 0 } - inB, err := sqlparser.Parse(query2) + inB, err := sqlparser.NewTestParser().Parse(query2) if err != nil { return 0 } diff --git a/go/test/fuzzing/parser_fuzzer.go b/go/test/fuzzing/parser_fuzzer.go index 67b8a30ef00..04a37e6dbcb 100644 --- a/go/test/fuzzing/parser_fuzzer.go +++ b/go/test/fuzzing/parser_fuzzer.go @@ -42,7 +42,7 @@ func FuzzNormalizer(data []byte) int { } func FuzzParser(data []byte) int { - _, err := sqlparser.Parse(string(data)) + _, err := sqlparser.NewTestParser().Parse(string(data)) if err != nil { return 0 } @@ -55,7 +55,7 @@ func FuzzNodeFormat(data []byte) int { if err != nil { return 0 } - node, err := sqlparser.Parse(query) + node, err := sqlparser.NewTestParser().Parse(query) if err != nil { return 0 } @@ -69,6 +69,6 @@ func FuzzNodeFormat(data []byte) int { } func FuzzSplitStatementToPieces(data []byte) int { - _, _ = sqlparser.SplitStatementToPieces(string(data)) + _, _ = sqlparser.NewTestParser().SplitStatementToPieces(string(data)) return 1 } diff --git a/go/test/fuzzing/tabletserver_schema_fuzzer.go b/go/test/fuzzing/tabletserver_schema_fuzzer.go index 67bb36e52ed..5ee680952cc 100644 --- a/go/test/fuzzing/tabletserver_schema_fuzzer.go +++ b/go/test/fuzzing/tabletserver_schema_fuzzer.go @@ -17,9 +17,13 @@ import ( "context" "sync" "testing" + "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -57,14 +61,14 @@ func FuzzLoadTable(data []byte) int { func newTestLoadTable(tableName, comment string, db *fakesqldb.DB) (*schema.Table, error) { ctx := context.Background() - appParams := db.ConnParams() - dbaParams := db.ConnParams() + appParams := dbconfigs.New(db.ConnParams()) + dbaParams := dbconfigs.New(db.ConnParams()) cfg := tabletenv.ConnPoolConfig{ - Size: 2, + Size: 2, + IdleTimeout: 10 * time.Second, } - _ = cfg.IdleTimeoutSeconds.Set("10s") - connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", cfg) + connPool := connpool.NewPool(tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "SchemaTest"), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) if err != nil { @@ -72,5 +76,5 @@ func newTestLoadTable(tableName, comment string, db *fakesqldb.DB) (*schema.Tabl } defer conn.Recycle() - return schema.LoadTable(conn, "fakesqldb", tableName, "BASE_TABLE", comment) + return schema.LoadTable(conn, "fakesqldb", tableName, "BASE_TABLE", comment, collations.MySQL8()) } diff --git a/go/test/fuzzing/vt_schema_fuzzer.go b/go/test/fuzzing/vt_schema_fuzzer.go index 2092eac866a..79a30d3394a 100644 --- a/go/test/fuzzing/vt_schema_fuzzer.go +++ b/go/test/fuzzing/vt_schema_fuzzer.go @@ -26,7 +26,7 @@ import ( // FuzzOnlineDDLFromCommentedStatement implements a fuzzer // that targets schema.OnlineDDLFromCommentedStatement func FuzzOnlineDDLFromCommentedStatement(data []byte) int { - stmt, err := sqlparser.Parse(string(data)) + stmt, err := sqlparser.NewTestParser().Parse(string(data)) if err != nil { return 0 } @@ -75,7 +75,7 @@ func FuzzNewOnlineDDLs(data []byte) int { return 0 } - onlineDDLs, err := schema.NewOnlineDDLs(keyspace, sql, ddlStmt, ddlStrategySetting, requestContext) + onlineDDLs, err := schema.NewOnlineDDLs(sql, ddlStmt, ddlStrategySetting, requestContext, keyspace) if err != nil { return 0 } diff --git a/go/test/fuzzing/vtctl_fuzzer.go b/go/test/fuzzing/vtctl_fuzzer.go index 82fdaa572de..cfd19d1ee46 100644 --- a/go/test/fuzzing/vtctl_fuzzer.go +++ b/go/test/fuzzing/vtctl_fuzzer.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/tmclienttest" "vitess.io/vitess/go/vt/wrangler" @@ -180,7 +181,7 @@ func Fuzz(data []byte) int { // Add params to the command commandSlice = append(commandSlice, args...) - _ = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc), commandSlice) + _ = vtctl.RunCommand(ctx, wrangler.New(vtenv.NewTestEnv(), logger, topo, tmc), commandSlice) command++ } diff --git a/go/test/stress/stress.go b/go/test/stress/stress.go index 1346e3afa55..2f0ecdc751f 100644 --- a/go/test/stress/stress.go +++ b/go/test/stress/stress.go @@ -18,7 +18,7 @@ package stress import ( "fmt" - "math/rand" + "math/rand/v2" "sync" "sync/atomic" "testing" diff --git a/go/test/vschemawrapper/vschema_wrapper.go b/go/test/vschemawrapper/vschema_wrapper.go index 78cf0f8d41c..4d1c424dda8 100644 --- a/go/test/vschemawrapper/vschema_wrapper.go +++ b/go/test/vschemawrapper/vschema_wrapper.go @@ -30,6 +30,7 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -40,14 +41,16 @@ import ( var _ plancontext.VSchema = (*VSchemaWrapper)(nil) type VSchemaWrapper struct { - V *vindexes.VSchema - Keyspace *vindexes.Keyspace - TabletType_ topodatapb.TabletType - Dest key.Destination - SysVarEnabled bool - Version plancontext.PlannerVersion - EnableViews bool - TestBuilder func(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) + V *vindexes.VSchema + Keyspace *vindexes.Keyspace + TabletType_ topodatapb.TabletType + Dest key.Destination + SysVarEnabled bool + ForeignKeyChecksState *bool + Version plancontext.PlannerVersion + EnableViews bool + TestBuilder func(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) + Env *vtenv.Environment } func (vw *VSchemaWrapper) GetPrepareData(stmtName string) *vtgatepb.PrepareData { @@ -81,7 +84,7 @@ func (vw *VSchemaWrapper) PlanPrepareStatement(ctx context.Context, query string if err != nil { return nil, nil, err } - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := vw.Env.Parser().Parse2(query) if err != nil { return nil, nil, err } @@ -122,7 +125,11 @@ func (vw *VSchemaWrapper) GetSrvVschema() *vschemapb.SrvVSchema { } func (vw *VSchemaWrapper) ConnCollation() collations.ID { - return collations.CollationUtf8mb3ID + return vw.Env.CollationEnv().DefaultConnectionCharset() +} + +func (vw *VSchemaWrapper) Environment() *vtenv.Environment { + return vw.Env } func (vw *VSchemaWrapper) PlannerWarning(_ string) { @@ -140,6 +147,14 @@ func (vw *VSchemaWrapper) KeyspaceError(keyspace string) error { return nil } +func (vw *VSchemaWrapper) GetAggregateUDFs() (udfs []string) { + return vw.V.GetAggregateUDFs() +} + +func (vw *VSchemaWrapper) GetForeignKeyChecksState() *bool { + return vw.ForeignKeyChecksState +} + func (vw *VSchemaWrapper) AllKeyspace() ([]*vindexes.Keyspace, error) { if vw.Keyspace == nil { return nil, vterrors.VT13001("keyspace not available") diff --git a/go/textutil/strings.go b/go/textutil/strings.go index ac35541f52f..616366f0083 100644 --- a/go/textutil/strings.go +++ b/go/textutil/strings.go @@ -17,6 +17,7 @@ limitations under the License. package textutil import ( + "fmt" "net/url" "regexp" "strings" @@ -28,6 +29,13 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +type TruncationLocation int + +const ( + TruncationLocationMiddle TruncationLocation = iota + TruncationLocationEnd +) + var ( delimitedListRegexp = regexp.MustCompile(`[ ,;]+`) SimulatedNullString = sqltypes.NULL.String() @@ -133,3 +141,26 @@ func Title(s string) string { }, s) } + +// TruncateText truncates the provided text, if needed, to the specified maximum +// length using the provided truncation indicator in place of the truncated text +// in the specified location of the original string. +func TruncateText(text string, limit int, location TruncationLocation, indicator string) (string, error) { + ol := len(text) + if ol <= limit { + return text, nil + } + if len(indicator)+2 >= limit { + return "", fmt.Errorf("the truncation indicator is too long for the provided text") + } + switch location { + case TruncationLocationMiddle: + prefix := (limit / 2) - len(indicator) + suffix := (ol - (prefix + len(indicator))) + 1 + return fmt.Sprintf("%s%s%s", text[:prefix], indicator, text[suffix:]), nil + case TruncationLocationEnd: + return text[:limit-(len(indicator)+1)] + indicator, nil + default: + return "", fmt.Errorf("invalid truncation location: %d", location) + } +} diff --git a/go/textutil/strings_test.go b/go/textutil/strings_test.go index 9ea175909fd..2ba9851b71c 100644 --- a/go/textutil/strings_test.go +++ b/go/textutil/strings_test.go @@ -17,9 +17,14 @@ limitations under the License. package textutil import ( + "strings" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) func TestSplitDelimitedList(t *testing.T) { @@ -65,6 +70,12 @@ func TestSplitUnescape(t *testing.T) { assert.NoError(t, err) assert.Equal(t, expected, elems) } + { + s := "invalid%2" + elems, err := SplitUnescape(s, ",") + assert.Error(t, err) + assert.Equal(t, []string{}, elems) + } } func TestSingleWordCamel(t *testing.T) { @@ -108,3 +119,169 @@ func TestSingleWordCamel(t *testing.T) { }) } } + +func TestValueIsSimulatedNull(t *testing.T) { + tt := []struct { + name string + val interface{} + isNull bool + }{ + { + name: "case string false", + val: "test", + isNull: false, + }, + { + name: "case string true", + val: SimulatedNullString, + isNull: true, + }, + { + name: "case []string true", + val: []string{SimulatedNullString}, + isNull: true, + }, + { + name: "case []string false", + val: []string{SimulatedNullString, SimulatedNullString}, + isNull: false, + }, + { + name: "case binlogdatapb.OnDDLAction true", + val: binlogdatapb.OnDDLAction(SimulatedNullInt), + isNull: true, + }, + { + name: "case int true", + val: SimulatedNullInt, + isNull: true, + }, + { + name: "case int32 true", + val: int32(SimulatedNullInt), + isNull: true, + }, + { + name: "case int64 true", + val: int64(SimulatedNullInt), + isNull: true, + }, + { + name: "case []topodatapb.TabletType true", + val: []topodatapb.TabletType{topodatapb.TabletType(SimulatedNullInt)}, + isNull: true, + }, + { + name: "case binlogdatapb.VReplicationWorkflowState true", + val: binlogdatapb.VReplicationWorkflowState(SimulatedNullInt), + isNull: true, + }, + { + name: "case default", + val: float64(1), + isNull: false, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + isNull := ValueIsSimulatedNull(tc.val) + assert.Equal(t, tc.isNull, isNull) + }) + } +} + +func TestTitle(t *testing.T) { + tt := []struct { + s string + expect string + }{ + {s: "hello world", expect: "Hello World"}, + {s: "snake_case", expect: "Snake_case"}, + {s: "TITLE CASE", expect: "TITLE CASE"}, + {s: "HelLo wOrLd", expect: "HelLo WOrLd"}, + {s: "", expect: ""}, + } + + for _, tc := range tt { + t.Run(tc.s, func(t *testing.T) { + title := Title(tc.s) + assert.Equal(t, tc.expect, title) + }) + } +} + +func TestTruncateText(t *testing.T) { + defaultLocation := TruncationLocationMiddle + defaultMaxLen := 100 + defaultTruncationIndicator := "..." + + tests := []struct { + name string + text string + maxLen int + location TruncationLocation + truncationIndicator string + want string + wantErr string + }{ + { + name: "no truncation", + text: "hello world", + maxLen: defaultMaxLen, + location: defaultLocation, + want: "hello world", + }, + { + name: "no truncation - exact", + text: strings.Repeat("a", defaultMaxLen), + maxLen: defaultMaxLen, + location: defaultLocation, + want: strings.Repeat("a", defaultMaxLen), + }, + { + name: "barely too long - mid", + text: strings.Repeat("a", defaultMaxLen+1), + truncationIndicator: defaultTruncationIndicator, + maxLen: defaultMaxLen, + location: defaultLocation, + want: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + }, + { + name: "barely too long - end", + text: strings.Repeat("a", defaultMaxLen+1), + truncationIndicator: defaultTruncationIndicator, + maxLen: defaultMaxLen, + location: TruncationLocationEnd, + want: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa...", + }, + { + name: "too small", + text: strings.Repeat("a", defaultMaxLen), + truncationIndicator: defaultTruncationIndicator, + maxLen: 4, + location: defaultLocation, + wantErr: "the truncation indicator is too long for the provided text", + }, + { + name: "bad location", + text: strings.Repeat("a", defaultMaxLen+1), + truncationIndicator: defaultTruncationIndicator, + maxLen: defaultMaxLen, + location: 100, + wantErr: "invalid truncation location: 100", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + val, err := TruncateText(tt.text, tt.maxLen, tt.location, tt.truncationIndicator) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + } else { + require.NoError(t, err) + require.Equal(t, tt.want, val) + require.LessOrEqual(t, len(val), tt.maxLen) + } + }) + } +} diff --git a/go/textutil/template_test.go b/go/textutil/template_test.go new file mode 100644 index 00000000000..077a6f6a72d --- /dev/null +++ b/go/textutil/template_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textutil + +import ( + "testing" + "text/template" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExecuteTemplate(t *testing.T) { + tmplText := "Result: {{.Value}}" + inputData := struct{ Value string }{Value: "Test"} + tmpl, err := template.New("test").Parse(tmplText) + require.NoError(t, err) + + result, err := ExecuteTemplate(tmpl, inputData) + require.NoError(t, err) + + expectedResult := "Result: Test" + assert.Equal(t, expectedResult, result) + +} + +func TestExecuteTemplateWithError(t *testing.T) { + templText := "{{.UndefinedVariable}}" + invalidInput := struct{ Name string }{Name: "foo"} + + tmpl, err := template.New("test").Parse(templText) + require.NoError(t, err) + + result, err := ExecuteTemplate(tmpl, invalidInput) + assert.Error(t, err) + assert.Equal(t, "", result) +} diff --git a/go/timer/randticker.go b/go/timer/randticker.go index 17ab08526d6..8e921f40fea 100644 --- a/go/timer/randticker.go +++ b/go/timer/randticker.go @@ -17,7 +17,7 @@ limitations under the License. package timer import ( - "math/rand" + "math/rand/v2" "time" ) @@ -35,9 +35,8 @@ func NewRandTicker(d, variance time.Duration) *RandTicker { c := make(chan time.Time, 1) done := make(chan struct{}) go func() { - rnd := rand.New(rand.NewSource(time.Now().UnixNano())) for { - vr := time.Duration(rnd.Int63n(int64(2*variance)) - int64(variance)) + vr := time.Duration(rand.Int64N(int64(2*variance)) - int64(variance)) tmr := time.NewTimer(d + vr) select { case <-tmr.C: diff --git a/go/timer/randticker_flaky_test.go b/go/timer/randticker_test.go similarity index 100% rename from go/timer/randticker_flaky_test.go rename to go/timer/randticker_test.go diff --git a/go/timer/rate_limiter.go b/go/timer/rate_limiter.go index 25bc2b32f61..d42a4d7e14c 100644 --- a/go/timer/rate_limiter.go +++ b/go/timer/rate_limiter.go @@ -28,7 +28,7 @@ import ( // For example, we can create a RateLimiter of 1second. Then, we can ask it, over time, to run many // tasks. It will only ever run a single task in any 1 second time frame. The rest are ignored. type RateLimiter struct { - tickerValue int64 + tickerValue atomic.Int64 lastDoValue int64 mu sync.Mutex @@ -37,7 +37,8 @@ type RateLimiter struct { // NewRateLimiter creates a new limiter with given duration. It is immediately ready to run tasks. func NewRateLimiter(d time.Duration) *RateLimiter { - r := &RateLimiter{tickerValue: 1} + r := &RateLimiter{} + r.lastDoValue = math.MinInt32 // Far enough to make a difference, but not too far to overflow. ctx, cancel := context.WithCancel(context.Background()) r.cancel = cancel go func() { @@ -48,7 +49,7 @@ func NewRateLimiter(d time.Duration) *RateLimiter { case <-ctx.Done(): return case <-ticker.C: - atomic.StoreInt64(&r.tickerValue, r.tickerValue+1) + r.tickerValue.Add(1) } } }() @@ -61,16 +62,29 @@ func (r *RateLimiter) Do(f func() error) (err error) { r.mu.Lock() defer r.mu.Unlock() - if r.lastDoValue >= atomic.LoadInt64(&r.tickerValue) { + if r.lastDoValue >= r.tickerValue.Load() { return nil // rate limited. Skipped. } if f != nil { err = f() } - r.lastDoValue = atomic.LoadInt64(&r.tickerValue) + r.lastDoValue = r.tickerValue.Load() return err } +// DoEmpty is a convenience method to invoke Do() with no function. +func (r *RateLimiter) DoEmpty() { + _ = r.Do(nil) +} + +// Diff returns the logical clock diff between the ticker and the last Do() call. +func (r *RateLimiter) Diff() int64 { + r.mu.Lock() + defer r.mu.Unlock() + + return r.tickerValue.Load() - r.lastDoValue +} + // Stop terminates rate limiter's operation and will not allow any more Do() executions. func (r *RateLimiter) Stop() { r.cancel() diff --git a/go/timer/rate_limiter_test.go b/go/timer/rate_limiter_test.go index 84122233996..83690b98a22 100644 --- a/go/timer/rate_limiter_test.go +++ b/go/timer/rate_limiter_test.go @@ -17,6 +17,7 @@ limitations under the License. package timer import ( + "math" "testing" "time" @@ -75,3 +76,18 @@ func TestRateLimiterStop(t *testing.T) { } assert.Equal(t, valSnapshot, val) } + +func TestRateLimiterDiff(t *testing.T) { + d := 2 * time.Second + r := NewRateLimiter(d) + require.NotNil(t, r) + defer r.Stop() + + // This assumes the last couple lines of code run faster than 2 seconds, which should be the case. + // But if you see flakiness due to slow runners, we can revisit the logic. + assert.Greater(t, r.Diff(), int64(math.MaxInt32)) + time.Sleep(d + time.Second) + assert.Greater(t, r.Diff(), int64(math.MaxInt32)) + r.DoEmpty() + assert.LessOrEqual(t, r.Diff(), int64(1)) +} diff --git a/go/timer/suspendable_ticker.go b/go/timer/suspendable_ticker.go index 5257626b85f..f2694a5cab3 100644 --- a/go/timer/suspendable_ticker.go +++ b/go/timer/suspendable_ticker.go @@ -28,7 +28,7 @@ type SuspendableTicker struct { // C is user facing C chan time.Time - suspended int64 + suspended atomic.Bool } // NewSuspendableTicker creates a new suspendable ticker, indicating whether the ticker should start @@ -39,7 +39,7 @@ func NewSuspendableTicker(d time.Duration, initiallySuspended bool) *Suspendable C: make(chan time.Time), } if initiallySuspended { - s.suspended = 1 + s.suspended.Store(true) } go s.loop() return s @@ -48,12 +48,12 @@ func NewSuspendableTicker(d time.Duration, initiallySuspended bool) *Suspendable // Suspend stops sending time events on the channel C // time events sent during suspended time are lost func (s *SuspendableTicker) Suspend() { - atomic.StoreInt64(&s.suspended, 1) + s.suspended.Store(true) } // Resume re-enables time events on channel C func (s *SuspendableTicker) Resume() { - atomic.StoreInt64(&s.suspended, 0) + s.suspended.Store(false) } // Stop completely stops the timer, like time.Timer @@ -64,15 +64,23 @@ func (s *SuspendableTicker) Stop() { // TickNow generates a tick at this point in time. It may block // if nothing consumes the tick. func (s *SuspendableTicker) TickNow() { - if atomic.LoadInt64(&s.suspended) == 0 { + if !s.suspended.Load() { // not suspended s.C <- time.Now() } } +// TickAfter generates a tick after given duration has passed. +// It runs asynchronously and returns immediately. +func (s *SuspendableTicker) TickAfter(d time.Duration) { + time.AfterFunc(d, func() { + s.TickNow() + }) +} + func (s *SuspendableTicker) loop() { for t := range s.ticker.C { - if atomic.LoadInt64(&s.suspended) == 0 { + if !s.suspended.Load() { // not suspended s.C <- t } diff --git a/go/timer/suspendable_ticker_test.go b/go/timer/suspendable_ticker_test.go new file mode 100644 index 00000000000..1c7cf65edcf --- /dev/null +++ b/go/timer/suspendable_ticker_test.go @@ -0,0 +1,144 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package timer + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const ( + fastTickerInterval = 10 * time.Millisecond +) + +func TestInitiallySuspended(t *testing.T) { + ctx := context.Background() + t.Run("true", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + ticker := NewSuspendableTicker(fastTickerInterval, true) + defer ticker.Stop() + select { + case <-ticker.C: + assert.Fail(t, "unexpected tick. Was supposed to be suspended") + case <-ctx.Done(): + return + } + }) + t.Run("false", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + ticker := NewSuspendableTicker(fastTickerInterval, false) + defer ticker.Stop() + select { + case <-ticker.C: + return + case <-ctx.Done(): + assert.Fail(t, "unexpected timeout. Expected tick") + } + }) +} + +func TestSuspendableTicker(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ticker := NewSuspendableTicker(fastTickerInterval, false) + defer ticker.Stop() + + var ticks atomic.Int64 + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ticks.Add(1) + } + } + }() + t.Run("ticks running", func(t *testing.T) { + time.Sleep(time.Second) + after := ticks.Load() + assert.Greater(t, after, int64(10)) // should be about 100 + }) + t.Run("ticks suspended", func(t *testing.T) { + ticker.Suspend() + before := ticks.Load() + time.Sleep(time.Second) + after := ticks.Load() + assert.Less(t, after-before, int64(10)) + }) + t.Run("ticks resumed", func(t *testing.T) { + ticker.Resume() + before := ticks.Load() + time.Sleep(time.Second) + after := ticks.Load() + assert.Greater(t, after-before, int64(10)) + }) + t.Run("ticker stopped", func(t *testing.T) { + ticker.Stop() + before := ticks.Load() + time.Sleep(time.Second) + after := ticks.Load() + assert.Less(t, after-before, int64(10)) + }) +} + +func TestSuspendableTickerTick(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ticker := NewSuspendableTicker(time.Hour, false) + defer ticker.Stop() + + var ticks atomic.Int64 + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ticks.Add(1) + } + } + }() + t.Run("nothing going on", func(t *testing.T) { + time.Sleep(time.Second) + after := ticks.Load() + assert.Zero(t, after) + }) + t.Run("tick now", func(t *testing.T) { + before := ticks.Load() + ticker.TickNow() + time.Sleep(time.Second) + after := ticks.Load() + assert.Equal(t, int64(1), after-before) + }) + t.Run("tick after", func(t *testing.T) { + before := ticks.Load() + ticker.TickAfter(2 * time.Second) + time.Sleep(time.Second) + after := ticks.Load() + assert.Zero(t, after-before) + time.Sleep(3 * time.Second) + after = ticks.Load() + assert.Equal(t, int64(1), after-before) + }) +} diff --git a/go/timer/timer_flaky_test.go b/go/timer/timer_test.go similarity index 88% rename from go/timer/timer_flaky_test.go rename to go/timer/timer_test.go index c504a7d0eb2..fe268938db0 100644 --- a/go/timer/timer_flaky_test.go +++ b/go/timer/timer_test.go @@ -74,3 +74,13 @@ func TestIndefinite(t *testing.T) { time.Sleep(quarter) assert.Equal(t, int64(1), numcalls.Load()) } + +func TestInterval(t *testing.T) { + timer := NewTimer(100) + in := timer.Interval() + assert.Equal(t, 100*time.Nanosecond, in) + + timer.interval.Store(200) + in = timer.Interval() + assert.Equal(t, 200*time.Nanosecond, in) +} diff --git a/go/tools/asthelpergen/asthelpergen.go b/go/tools/asthelpergen/asthelpergen.go index 1811ff72511..3f59fdb3ece 100644 --- a/go/tools/asthelpergen/asthelpergen.go +++ b/go/tools/asthelpergen/asthelpergen.go @@ -29,7 +29,6 @@ import ( "golang.org/x/tools/go/packages" "vitess.io/vitess/go/textutil" - "vitess.io/vitess/go/tools/codegen" ) diff --git a/go/tools/codegen/common_test.go b/go/tools/codegen/common_test.go new file mode 100644 index 00000000000..fd2ef0035d9 --- /dev/null +++ b/go/tools/codegen/common_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codegen + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/tools/go/packages" +) + +func TestCheckErrors(t *testing.T) { + tests := []struct { + name string + loaded []*packages.Package + expectedErr string + }{ + { + name: "Empty packages", + loaded: []*packages.Package{}, + expectedErr: "", + }, + { + name: "Non-empty packages", + loaded: []*packages.Package{ + { + Errors: []packages.Error{ + { + Pos: "", + Msg: "New error", + Kind: 7, + }, + { + Pos: "1:7", + Msg: "New error", + Kind: 7, + }, + }, + }, + }, + expectedErr: "found 2 error(s) when loading Go packages:", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := CheckErrors(tt.loaded, GeneratedInSqlparser) + if tt.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) + } +} + +func TestGeneratedInSqlParser(t *testing.T) { + tests := []struct { + name string + fileName string + expectedOutput bool + }{ + { + name: "Empty file name", + fileName: "", + expectedOutput: false, + }, + { + name: "Random file name", + fileName: "random", + expectedOutput: false, + }, + { + name: "ast_format_fast.go", + fileName: "ast_format_fast.go", + expectedOutput: true, + }, + { + name: "ast_equals.go", + fileName: "ast_equals.go", + expectedOutput: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expectedOutput, GeneratedInSqlparser(tt.fileName)) + }) + } +} diff --git a/go/tools/codegen/goimports_test.go b/go/tools/codegen/goimports_test.go new file mode 100644 index 00000000000..25883073859 --- /dev/null +++ b/go/tools/codegen/goimports_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codegen + +import ( + "testing" + + "github.com/dave/jennifer/jen" + "github.com/stretchr/testify/require" +) + +func TestFormatGenFile(t *testing.T) { + tests := []struct { + name string + file *jen.File + expectedErr string + }{ + { + name: "some-file", + file: jen.NewFile("some-file"), + expectedErr: "Error 1:13: expected ';', found '-' while formatting source", + }, + { + name: "random", + file: jen.NewFile("random"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := FormatJenFile(tt.file) + if tt.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) + } +} + +func TestGoImports(t *testing.T) { + err := GoImports("") + require.ErrorContains(t, err, "exit status 2") +} + +func TestSaveJenFile(t *testing.T) { + tests := []struct { + name string + filePath string + file *jen.File + expectedErr string + }{ + { + name: "Empty file path", + filePath: "", + file: jen.NewFile("random"), + expectedErr: "open : no such file or directory", + }, + { + name: "Non empty file path", + filePath: "random", + file: jen.NewFile("random"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := SaveJenFile(tt.filePath, tt.file) + if tt.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) + } +} diff --git a/go/tools/go-upgrade/go-upgrade.go b/go/tools/go-upgrade/go-upgrade.go index b3ba7ca628d..ee42040e5c0 100644 --- a/go/tools/go-upgrade/go-upgrade.go +++ b/go/tools/go-upgrade/go-upgrade.go @@ -415,13 +415,16 @@ func replaceGoVersionInCodebase(old, new *version.Version, workflowUpdate bool) } if !isSameMajorMinorVersion(old, new) { - err = replaceInFile( - []*regexp.Regexp{regexp.MustCompile(regexpReplaceGoModGoVersion)}, - []string{fmt.Sprintf("go %d.%d", new.Segments()[0], new.Segments()[1])}, - "./go.mod", - ) - if err != nil { - return err + goModFiles := []string{"./go.mod", "./vitess-mixin/go.mod"} + for _, file := range goModFiles { + err = replaceInFile( + []*regexp.Regexp{regexp.MustCompile(regexpReplaceGoModGoVersion)}, + []string{fmt.Sprintf("go %d.%d", new.Segments()[0], new.Segments()[1])}, + file, + ) + if err != nil { + return err + } } } return nil @@ -432,7 +435,6 @@ func updateBootstrapVersionInCodebase(old, new string, newGoVersion *version.Ver return nil } files, err := getListOfFilesInPaths([]string{ - "./docker/base", "./docker/lite", "./docker/local", "./docker/vttestserver", diff --git a/go/tools/release-notes/release_notes.go b/go/tools/release-notes/release_notes.go index 1673d6a5160..1a8105a6f38 100644 --- a/go/tools/release-notes/release_notes.go +++ b/go/tools/release-notes/release_notes.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - "bytes" "encoding/json" "fmt" "log" @@ -79,9 +78,7 @@ type ( } ) -var ( - releaseNotesPath = `changelog/` -) +var releaseNotesPath = `changelog/` const ( releaseNotesPathGitHub = `https://github.com/vitessio/vitess/blob/main/` @@ -141,7 +138,7 @@ func (rn *releaseNote) generate(rnFile, changelogFile *os.File) error { // Generate the release notes rn.PathToChangeLogFileOnGH = releaseNotesPathGitHub + path.Join(rn.SubDirPath, "changelog.md") if rnFile == nil { - rnFile, err = os.OpenFile(path.Join(rn.SubDirPath, "release_notes.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + rnFile, err = os.OpenFile(path.Join(rn.SubDirPath, "release_notes.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o666) if err != nil { return err } @@ -155,7 +152,7 @@ func (rn *releaseNote) generate(rnFile, changelogFile *os.File) error { // Generate the changelog if changelogFile == nil { - changelogFile, err = os.OpenFile(path.Join(rn.SubDirPath, "changelog.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + changelogFile, err = os.OpenFile(path.Join(rn.SubDirPath, "changelog.md"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o666) if err != nil { return err } @@ -304,11 +301,11 @@ func getStringForPullRequestInfos(prPerType prsByType) (string, error) { data := createSortedPrTypeSlice(prPerType) t := template.Must(template.New("markdownTemplatePR").Parse(markdownTemplatePR)) - buff := bytes.Buffer{} - if err := t.ExecuteTemplate(&buff, "markdownTemplatePR", data); err != nil { + var buf strings.Builder + if err := t.ExecuteTemplate(&buf, "markdownTemplatePR", data); err != nil { return "", err } - return buff.String(), nil + return buf.String(), nil } func getStringForKnownIssues(issues []knownIssue) (string, error) { @@ -316,11 +313,11 @@ func getStringForKnownIssues(issues []knownIssue) (string, error) { return "", nil } t := template.Must(template.New("markdownTemplateKnownIssues").Parse(markdownTemplateKnownIssues)) - buff := bytes.Buffer{} - if err := t.ExecuteTemplate(&buff, "markdownTemplateKnownIssues", issues); err != nil { + var buf strings.Builder + if err := t.ExecuteTemplate(&buf, "markdownTemplateKnownIssues", issues); err != nil { return "", err } - return buff.String(), nil + return buf.String(), nil } func groupAndStringifyPullRequest(pris []pullRequestInformation) (string, error) { @@ -336,9 +333,7 @@ func groupAndStringifyPullRequest(pris []pullRequestInformation) (string, error) } func main() { - var ( - versionName, summaryFile string - ) + var versionName, summaryFile string pflag.StringVarP(&versionName, "version", "v", "", "name of the version (has to be the following format: v11.0.0)") pflag.StringVarP(&summaryFile, "summary", "s", "", "readme file on which there is a summary of the release") pflag.Parse() diff --git a/go/tools/releases/releases_test.go b/go/tools/releases/releases_test.go new file mode 100644 index 00000000000..19b3f7df88e --- /dev/null +++ b/go/tools/releases/releases_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetDirs(t *testing.T) { + tests := []struct { + name string + currentDir dir + expectedErr string + }{ + { + name: "Empty dir", + currentDir: dir{}, + expectedErr: "open : no such file or directory", + }, + { + name: "Non empty dir", + currentDir: dir{ + Path: "./", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := getDirs(tt.currentDir) + if tt.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) + } +} + +func TestExecReadMeTemplateWithDir(t *testing.T) { + tests := []struct { + name string + template string + currentDir dir + expectedErr string + }{ + { + name: "Empty dir and empty template", + currentDir: dir{}, + template: "", + expectedErr: "", + }, + { + name: "Invaild directory path", + currentDir: dir{ + Path: `\./`, + }, + template: "", + expectedErr: `open \./README.md: no such file or directory`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := execReadMeTemplateWithDir(tt.currentDir, tt.template) + if tt.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + }) + } +} diff --git a/go/trace/fake_test.go b/go/trace/fake_test.go new file mode 100644 index 00000000000..d7d01333202 --- /dev/null +++ b/go/trace/fake_test.go @@ -0,0 +1,33 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNoopTracingServer(t *testing.T) { + factoryFunc := tracingBackendFactories["noop"] + tracingSvc, closer, err := factoryFunc("value") + require.NoError(t, err) + require.NoError(t, closer.Close()) + span, err := tracingSvc.NewFromString("parent", "label") + require.NoError(t, err) + require.Empty(t, span) +} diff --git a/go/trace/logger_test.go b/go/trace/logger_test.go new file mode 100644 index 00000000000..cb414515fa5 --- /dev/null +++ b/go/trace/logger_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "io" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +// If captureStdout is false, it will capture the outut of +// os.Stderr +func captureOutput(t *testing.T, f func(), captureStdout bool) string { + oldVal := os.Stderr + if captureStdout { + oldVal = os.Stdout + } + t.Cleanup(func() { + // Ensure reset even if deferred function panics + if captureStdout { + os.Stdout = oldVal + } else { + os.Stderr = oldVal + } + }) + + r, w, _ := os.Pipe() + if captureStdout { + os.Stdout = w + } else { + os.Stderr = w + } + + f() + + w.Close() + got, _ := io.ReadAll(r) + + return string(got) +} + +func TestLoggerLogAndError(t *testing.T) { + logger := traceLogger{} + + // Test Error() output + output := captureOutput(t, func() { + logger.Error("This is an error message") + }, false) + assert.Contains(t, output, "This is an error message") + + // Test Log() output + output = captureOutput(t, func() { + logger.Log("This is an log message") + }, false) + assert.Contains(t, output, "This is an log message") +} diff --git a/go/trace/opentracing_test.go b/go/trace/opentracing_test.go index 104545fe657..4a1dad369d9 100644 --- a/go/trace/opentracing_test.go +++ b/go/trace/opentracing_test.go @@ -17,12 +17,14 @@ limitations under the License. package trace import ( + "context" "encoding/base64" "encoding/json" "testing" "github.com/opentracing/opentracing-go" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestExtractMapFromString(t *testing.T) { @@ -47,3 +49,29 @@ func TestErrorConditions(t *testing.T) { _, err = extractMapFromString("this is not base64") // malformed base64 assert.Error(t, err) } + +func TestNewClientSpan(t *testing.T) { + svc := openTracingService{ + Tracer: &fakeTracer{}, + } + clientSpan := svc.NewClientSpan(nil, "test-svc", "test-label") + require.NotEmpty(t, clientSpan) + + clientSpan = svc.New(clientSpan, "client-span") + require.NotEmpty(t, clientSpan) + + spanFromCtx, ok := svc.FromContext(context.Background()) + require.False(t, ok) + require.Nil(t, spanFromCtx) + + ctx := svc.NewContext(context.TODO(), clientSpan) + require.NotNil(t, ctx) + clientSpan.Finish() + + spanFromCtx, ok = svc.FromContext(ctx) + require.True(t, ok) + require.NotEmpty(t, spanFromCtx) + + ctx = svc.NewContext(context.TODO(), &mockSpan{}) + require.Nil(t, ctx) +} diff --git a/go/trace/plugin_datadog_test.go b/go/trace/plugin_datadog_test.go new file mode 100644 index 00000000000..4dc43a80c1e --- /dev/null +++ b/go/trace/plugin_datadog_test.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "testing" + + "github.com/opentracing/opentracing-go" + "github.com/stretchr/testify/require" +) + +func TestGetOpenTracingTracer(t *testing.T) { + tracer := datadogTracer{ + actual: opentracing.GlobalTracer(), + } + require.Equal(t, opentracing.GlobalTracer(), tracer.GetOpenTracingTracer()) +} + +func TestNewDataDogTracerHostAndPortNotSet(t *testing.T) { + tracingSvc, closer, err := newDatadogTracer("svc") + expectedErr := "need host and port to datadog agent to use datadog tracing" + require.ErrorContains(t, err, expectedErr) + require.Nil(t, tracingSvc) + require.Nil(t, closer) +} diff --git a/go/trace/plugin_jaeger_test.go b/go/trace/plugin_jaeger_test.go new file mode 100644 index 00000000000..0deab36c7ce --- /dev/null +++ b/go/trace/plugin_jaeger_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewJaegerTracerFromEnv(t *testing.T) { + tracingSvc, closer, err := newJagerTracerFromEnv("noop") + require.NoError(t, err) + require.NotEmpty(t, tracingSvc) + require.NotEmpty(t, closer) + + tracingSvc, closer, err = newJagerTracerFromEnv("") + require.ErrorContains(t, err, "no service name provided") + require.Empty(t, tracingSvc) + require.Empty(t, closer) +} diff --git a/go/trace/trace_test.go b/go/trace/trace_test.go index c98a47167a8..7f1f6d8c528 100644 --- a/go/trace/trace_test.go +++ b/go/trace/trace_test.go @@ -22,7 +22,9 @@ import ( "io" "testing" + "github.com/opentracing/opentracing-go" "github.com/spf13/viper" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "vitess.io/vitess/go/viperutil/vipertest" @@ -68,13 +70,79 @@ func TestRegisterService(t *testing.T) { } } +func TestNewFromString(t *testing.T) { + tests := []struct { + parent string + label string + context context.Context + expectedLog string + isPresent bool + expectedErr string + }{ + { + parent: "", + label: "empty parent", + context: context.TODO(), + expectedLog: "", + isPresent: true, + expectedErr: "parent is empty", + }, + { + parent: "parent", + label: "non-empty parent", + expectedLog: "[key: sql-statement-type values:non-empty parent]\n", + context: context.Background(), + isPresent: false, + }, + } + for _, tt := range tests { + t.Run(tt.label, func(t *testing.T) { + span, ctx, err := NewFromString(context.Background(), tt.parent, tt.label) + if tt.expectedErr == "" { + require.NoError(t, err) + require.NotEmpty(t, span) + require.Equal(t, tt.context, ctx) + + got := captureOutput(t, func() { + AnnotateSQL(span, &fakeStringer{tt.label}) + }, true) + + require.Equal(t, tt.expectedLog, got) + } else { + require.ErrorContains(t, err, tt.expectedErr) + require.Nil(t, span) + require.Nil(t, ctx) + } + + copySpan := CopySpan(context.TODO(), tt.context) + if tt.isPresent { + require.Equal(t, tt.context, copySpan) + } else { + require.Equal(t, context.TODO(), copySpan) + } + }) + } +} + +func TestNilCloser(t *testing.T) { + nc := nilCloser{} + require.Nil(t, nc.Close()) +} + type fakeTracer struct { name string log []string } +func (f *fakeTracer) GetOpenTracingTracer() opentracing.Tracer { + return opentracing.GlobalTracer() +} + func (f *fakeTracer) NewFromString(parent, label string) (Span, error) { - panic("implement me") + if parent == "" { + return &mockSpan{tracer: f}, fmt.Errorf("parent is empty") + } + return &mockSpan{tracer: f}, nil } func (f *fakeTracer) New(parent Span, label string) Span { @@ -84,7 +152,10 @@ func (f *fakeTracer) New(parent Span, label string) Span { } func (f *fakeTracer) FromContext(ctx context.Context) (Span, bool) { - return nil, false + if ctx == context.Background() { + return nil, false + } + return &mockSpan{}, true } func (f *fakeTracer) NewContext(parent context.Context, span Span) context.Context { @@ -113,4 +184,13 @@ func (m *mockSpan) Finish() { func (m *mockSpan) Annotate(key string, value any) { m.tracer.log = append(m.tracer.log, fmt.Sprintf("key: %v values:%v", key, value)) + fmt.Println(m.tracer.log) +} + +type fakeStringer struct { + str string +} + +func (fs *fakeStringer) String() string { + return fs.str } diff --git a/go/vt/vtgate/planbuilder/projection.go b/go/trace/utils_test.go similarity index 55% rename from go/vt/vtgate/planbuilder/projection.go rename to go/trace/utils_test.go index cb60c079c37..63bbcfa1528 100644 --- a/go/vt/vtgate/planbuilder/projection.go +++ b/go/trace/utils_test.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Vitess Authors. +Copyright 2024 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,24 +14,28 @@ See the License for the specific language governing permissions and limitations under the License. */ -package planbuilder +package trace import ( - "vitess.io/vitess/go/vt/vtgate/engine" + "fmt" + "testing" + + "github.com/stretchr/testify/require" ) -type projection struct { - source logicalPlan - primitive *engine.Projection +func TestLogErrorsWhenClosing(t *testing.T) { + logFunc := LogErrorsWhenClosing(&fakeCloser{}) + + got := captureOutput(t, func() { + logFunc() + }, false) + + require.Contains(t, string(got), "test error") } -var _ logicalPlan = (*projection)(nil) +type fakeCloser struct { +} -// Primitive implements the logicalPlan interface -func (p *projection) Primitive() engine.Primitive { - if p.primitive == nil { - panic("WireUp not yet run") - } - p.primitive.Input = p.source.Primitive() - return p.primitive +func (fc *fakeCloser) Close() error { + return fmt.Errorf("test error") } diff --git a/go/unicode2/unicode.go b/go/unicode2/unicode.go index bb1942289fe..126c32fc3cd 100644 --- a/go/unicode2/unicode.go +++ b/go/unicode2/unicode.go @@ -117,7 +117,7 @@ func sortIter(t []tableIndex) { } } -// next16 finds the ranged to be added to the table. If ranges overlap between +// next16 finds the range to be added to the table. If ranges overlap between // multiple tables it clips the result to a non-overlapping range if the // elements are not fully subsumed. It returns a zero range if there are no more // ranges. diff --git a/go/unicode2/unicode_test.go b/go/unicode2/unicode_test.go new file mode 100644 index 00000000000..e955d677b49 --- /dev/null +++ b/go/unicode2/unicode_test.go @@ -0,0 +1,267 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unicode2 + +import ( + "testing" + "unicode" + + "github.com/stretchr/testify/assert" +) + +func TestMerge(t *testing.T) { + // Test for no range tables + rt := Merge() + assert.Equal(t, &unicode.RangeTable{}, rt) + + testCases := []struct { + rt1 *unicode.RangeTable + rt2 *unicode.RangeTable + expected *unicode.RangeTable + }{ + { + rt1: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 67, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 2000, Stride: 1}, + }, + }, + rt2: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 68, Hi: 71, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 2001, Hi: 3000, Stride: 1}, + }, + }, + expected: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 71, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 3000, Stride: 1}, + }, + LatinOffset: 1, + }, + }, + { + rt1: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 70, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 2100, Stride: 1}, + }, + }, + rt2: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 68, Hi: 72, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 2000, Hi: 3100, Stride: 1}, + }, + }, + expected: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 72, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 3100, Stride: 1}, + }, + LatinOffset: 1, + }, + }, + { + rt1: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 65, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1000, Stride: 1}, + }, + }, + rt2: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 70, Hi: 70, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 2000, Hi: 2001, Stride: 1}, + }, + }, + expected: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 70, Stride: 5}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1000, Stride: 1}, + {Lo: 2000, Hi: 2001, Stride: 1}, + }, + LatinOffset: 1, + }, + }, + { + rt1: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 65, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1000, Stride: 1}, + }, + }, + rt2: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 70, Hi: 71, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 2000, Hi: 2000, Stride: 1}, + }, + }, + expected: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 65, Stride: 1}, + {Lo: 70, Hi: 71, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 2000, Stride: 1000}, + }, + LatinOffset: 2, + }, + }, + { + rt1: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 68, Stride: 1}, + {Lo: 100, Hi: 104, Stride: 2}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1004, Stride: 1}, + }, + }, + rt2: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 69, Hi: 75, Stride: 2}, + }, + R32: []unicode.Range32{ + {Lo: 1005, Hi: 2000, Stride: 2}, + {Lo: 2003, Hi: 2006, Stride: 3}, + }, + }, + expected: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 69, Stride: 1}, + {Lo: 71, Hi: 75, Stride: 2}, + {Lo: 100, Hi: 104, Stride: 2}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1005, Stride: 1}, + {Lo: 1007, Hi: 2000, Stride: 2}, + {Lo: 2003, Hi: 2006, Stride: 3}, + }, + LatinOffset: 3, + }, + }, + { + rt1: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 78, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 2000, Stride: 1}, + }, + }, + rt2: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 75, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1500, Stride: 1}, + }, + }, + expected: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 78, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 2000, Stride: 1}, + }, + LatinOffset: 1, + }, + }, + { + rt1: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 78, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 2000, Stride: 1}, + }, + }, + rt2: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 75, Stride: 8}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1500, Stride: 3}, + }, + }, + expected: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 78, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 2000, Stride: 1}, + }, + LatinOffset: 1, + }, + }, + { + rt1: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 78, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1500, Stride: 1}, + }, + }, + rt2: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 79, Hi: 84, Stride: 8}, + }, + R32: []unicode.Range32{ + {Lo: 1501, Hi: 2000, Stride: 500}, + }, + }, + expected: &unicode.RangeTable{ + R16: []unicode.Range16{ + {Lo: 65, Hi: 79, Stride: 1}, + }, + R32: []unicode.Range32{ + {Lo: 1000, Hi: 1501, Stride: 1}, + }, + LatinOffset: 1, + }, + }, + } + + for _, tc := range testCases { + rt := Merge(tc.rt1, tc.rt2) + + assert.Equal(t, tc.expected, rt) + } +} diff --git a/go/viperutil/internal/sync/sync.go b/go/viperutil/internal/sync/sync.go index 6608569d86c..6bee1a14e72 100644 --- a/go/viperutil/internal/sync/sync.go +++ b/go/viperutil/internal/sync/sync.go @@ -147,6 +147,7 @@ func (v *Viper) Watch(ctx context.Context, static *viper.Viper, minWaitInterval v.disk.SetConfigFile(cfg) if err := v.disk.ReadInConfig(); err != nil { + cancel() return nil, err } diff --git a/go/viperutil/internal/sync/sync_internal_test.go b/go/viperutil/internal/sync/sync_internal_test.go index cc8a163fa18..3c84913c7ce 100644 --- a/go/viperutil/internal/sync/sync_internal_test.go +++ b/go/viperutil/internal/sync/sync_internal_test.go @@ -19,7 +19,7 @@ package sync import ( "context" "encoding/json" - "math/rand" + "math/rand/v2" "testing" "time" @@ -132,5 +132,5 @@ func TestPersistConfig(t *testing.T) { } func jitter(min, max int) int { - return min + rand.Intn(max-min+1) + return min + rand.IntN(max-min+1) } diff --git a/go/viperutil/internal/sync/sync_test.go b/go/viperutil/internal/sync/sync_test.go index 6b8efa1b105..750baa053ea 100644 --- a/go/viperutil/internal/sync/sync_test.go +++ b/go/viperutil/internal/sync/sync_test.go @@ -19,7 +19,7 @@ package sync_test import ( "context" "encoding/json" - "math/rand" + "math/rand/v2" "os" "sync" "testing" @@ -53,7 +53,7 @@ func TestWatchConfig(t *testing.T) { return atomicWrite(tmp.Name(), data) } writeRandomConfig := func(tmp *os.File) error { - a, b := rand.Intn(100), rand.Intn(100) + a, b := rand.IntN(100), rand.IntN(100) return writeConfig(tmp, a, b) } @@ -155,5 +155,5 @@ func TestWatchConfig(t *testing.T) { } func jitter(min, max int) int { - return min + rand.Intn(max-min+1) + return min + rand.IntN(max-min+1) } diff --git a/go/vt/binlog/binlog_connection.go b/go/vt/binlog/binlog_connection.go index f7c7acd8e9c..0fd13fd984f 100644 --- a/go/vt/binlog/binlog_connection.go +++ b/go/vt/binlog/binlog_connection.go @@ -91,8 +91,8 @@ func connectForReplication(cp dbconfigs.Connector) (*mysql.Conn, error) { } // Tell the server that we understand the format of events // that will be used if binlog_checksum is enabled on the server. - if _, err := conn.ExecuteFetch("SET @master_binlog_checksum=@@global.binlog_checksum", 0, false); err != nil { - return nil, fmt.Errorf("failed to set @master_binlog_checksum=@@global.binlog_checksum: %v", err) + if _, err := conn.ExecuteFetch("SET @source_binlog_checksum = @@global.binlog_checksum, @master_binlog_checksum=@@global.binlog_checksum", 0, false); err != nil { + return nil, fmt.Errorf("failed to set @source_binlog_checksum=@@global.binlog_checksum: %v", err) } return conn, nil diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index abbf73ba506..d62fcc3a915 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -251,8 +251,8 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog var statements []FullBinlogStatement var format mysql.BinlogFormat var gtid replication.GTID - var pos = bls.startPos - var autocommit = true + pos := bls.startPos + autocommit := true var err error // Remember the RBR state. @@ -723,7 +723,7 @@ func (bls *Streamer) appendDeletes(statements []FullBinlogStatement, tce *tableC } // writeValuesAsSQL is a helper method to print the values as SQL in the -// provided bytes.Buffer. It also returns the value for the keyspaceIDColumn, +// provided strings.Builder. It also returns the value for the keyspaceIDColumn, // and the array of values for the PK, if necessary. func writeValuesAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, rs *mysql.Rows, rowIndex int, getPK bool) (sqltypes.Value, []sqltypes.Value, error) { valueIndex := 0 @@ -794,7 +794,7 @@ func writeValuesAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, rs *my } // writeIdentifiersAsSQL is a helper method to print the identifies as SQL in the -// provided bytes.Buffer. It also returns the value for the keyspaceIDColumn, +// provided strings.Builder. It also returns the value for the keyspaceIDColumn, // and the array of values for the PK, if necessary. func writeIdentifiersAsSQL(sql *sqlparser.TrackedBuffer, tce *tableCacheEntry, rs *mysql.Rows, rowIndex int, getPK bool) (sqltypes.Value, []sqltypes.Value, error) { valueIndex := 0 diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index d8481ca0665..1678b086719 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -53,7 +53,7 @@ func TestStreamerParseRBREvents(t *testing.T) { }, { Name: "message", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, }) @@ -302,7 +302,7 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { }, { Name: "delete", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, }) diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index 6d689bc5436..05685a54d3e 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -34,18 +34,19 @@ import ( "time" "github.com/spf13/pflag" - - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql/sqlerror" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/history" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/throttler" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -60,10 +61,20 @@ var ( // BlplQuery is the key for the stats map. BlplQuery = "Query" + // BlplMultiQuery is the key for the stats map. + BlplMultiQuery = "MultiQuery" // BlplTransaction is the key for the stats map. BlplTransaction = "Transaction" + // BlplBatchTransaction is the key for the stats map. + BlplBatchTransaction = "BatchTransaction" + + // Truncate values in the middle to preserve the end of the message which + // typically contains the error text. + TruncationLocation = textutil.TruncationLocationMiddle ) +var TruncationIndicator = fmt.Sprintf(" ... %s ... ", sqlparser.TruncationText) + // Stats is the internal stats of a player. It is a different // structure that is passed in so stats can be collected over the life // of multiple individual players. @@ -84,13 +95,15 @@ type Stats struct { State atomic.Value - PhaseTimings *stats.Timings - QueryTimings *stats.Timings - QueryCount *stats.CountersWithSingleLabel - CopyRowCount *stats.Counter - CopyLoopCount *stats.Counter - ErrorCounts *stats.CountersWithMultiLabels - NoopQueryCount *stats.CountersWithSingleLabel + PhaseTimings *stats.Timings + QueryTimings *stats.Timings + QueryCount *stats.CountersWithSingleLabel + BulkQueryCount *stats.CountersWithSingleLabel + TrxQueryBatchCount *stats.CountersWithSingleLabel + CopyRowCount *stats.Counter + CopyLoopCount *stats.Counter + ErrorCounts *stats.CountersWithMultiLabels + NoopQueryCount *stats.CountersWithSingleLabel VReplicationLags *stats.Timings VReplicationLagRates *stats.Rates @@ -100,6 +113,10 @@ type Stats struct { PartialQueryCount *stats.CountersWithMultiLabels PartialQueryCacheSize *stats.CountersWithMultiLabels + + ThrottledCounts *stats.CountersWithMultiLabels // By throttler and component + + DDLEventActions *stats.CountersWithSingleLabel } // RecordHeartbeat updates the time the last heartbeat from vstreamer was seen @@ -156,17 +173,21 @@ func NewStats() *Stats { bps.ReplicationLagSeconds.Store(math.MaxInt64) bps.PhaseTimings = stats.NewTimings("", "", "Phase") bps.QueryTimings = stats.NewTimings("", "", "Phase") - bps.QueryCount = stats.NewCountersWithSingleLabel("", "", "Phase", "") + bps.QueryCount = stats.NewCountersWithSingleLabel("", "", "Phase") + bps.BulkQueryCount = stats.NewCountersWithSingleLabel("", "", "Statement") + bps.TrxQueryBatchCount = stats.NewCountersWithSingleLabel("", "", "Statement") bps.CopyRowCount = stats.NewCounter("", "") bps.CopyLoopCount = stats.NewCounter("", "") bps.ErrorCounts = stats.NewCountersWithMultiLabels("", "", []string{"type"}) - bps.NoopQueryCount = stats.NewCountersWithSingleLabel("", "", "Statement", "") + bps.NoopQueryCount = stats.NewCountersWithSingleLabel("", "", "Statement") bps.VReplicationLags = stats.NewTimings("", "", "") bps.VReplicationLagRates = stats.NewRates("", bps.VReplicationLags, 15*60/5, 5*time.Second) - bps.TableCopyRowCounts = stats.NewCountersWithSingleLabel("", "", "Table", "") + bps.TableCopyRowCounts = stats.NewCountersWithSingleLabel("", "", "Table") bps.TableCopyTimings = stats.NewTimings("", "", "Table") bps.PartialQueryCacheSize = stats.NewCountersWithMultiLabels("", "", []string{"type"}) bps.PartialQueryCount = stats.NewCountersWithMultiLabels("", "", []string{"type"}) + bps.ThrottledCounts = stats.NewCountersWithMultiLabels("", "", []string{"throttler", "component"}) + bps.DDLEventActions = stats.NewCountersWithSingleLabel("", "", "action") return bps } @@ -311,7 +332,7 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { return fmt.Errorf("no binlog player client factory named %v", binlogPlayerProtocol) } blplClient := clientFactory() - err = blplClient.Dial(blp.tablet) + err = blplClient.Dial(ctx, blp.tablet) if err != nil { err := fmt.Errorf("error dialing binlog server: %v", err) log.Error(err) @@ -362,13 +383,14 @@ func (blp *BinlogPlayer) applyEvents(ctx context.Context) error { if backoff == throttler.NotThrottled { break } + blp.blplStats.ThrottledCounts.Add([]string{"trx", "binlogplayer"}, 1) // We don't bother checking for context cancellation here because the // sleep will block only up to 1 second. (Usually, backoff is 1s / rate // e.g. a rate of 1000 TPS results into a backoff of 1 ms.) time.Sleep(backoff) } - // get the response + // Get the response. response, err := stream.Recv() // Check context before checking error, because canceled // contexts could be wrapped as regular errors. @@ -539,8 +561,7 @@ type VRSettings struct { DeferSecondaryKeys bool } -// ReadVRSettings retrieves the throttler settings for -// vreplication from the checkpoint table. +// ReadVRSettings retrieves the settings for a vreplication stream. func ReadVRSettings(dbClient DBClient, uid int32) (VRSettings, error) { query := fmt.Sprintf("select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=%v", uid) qr, err := dbClient.ExecuteFetch(query, 1) @@ -598,22 +619,25 @@ func ReadVRSettings(dbClient DBClient, uid int32) (VRSettings, error) { // the _vt.vreplication table. func CreateVReplication(workflow string, source *binlogdatapb.BinlogSource, position string, maxTPS, maxReplicationLag, timeUpdated int64, dbName string, workflowType binlogdatapb.VReplicationWorkflowType, workflowSubType binlogdatapb.VReplicationWorkflowSubType, deferSecondaryKeys bool) string { + protoutil.SortBinlogSourceTables(source) return fmt.Sprintf("insert into _vt.vreplication "+ - "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) "+ - "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d, %v)", + "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options) "+ + "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d, %v, %s)", encodeString(workflow), encodeString(source.String()), encodeString(position), maxTPS, maxReplicationLag, - timeUpdated, binlogdatapb.VReplicationWorkflowState_Running.String(), encodeString(dbName), workflowType, workflowSubType, deferSecondaryKeys) + timeUpdated, binlogdatapb.VReplicationWorkflowState_Running.String(), encodeString(dbName), workflowType, + workflowSubType, deferSecondaryKeys, encodeString("{}")) } // CreateVReplicationState returns a statement to create a stopped vreplication. func CreateVReplicationState(workflow string, source *binlogdatapb.BinlogSource, position string, state binlogdatapb.VReplicationWorkflowState, dbName string, workflowType binlogdatapb.VReplicationWorkflowType, workflowSubType binlogdatapb.VReplicationWorkflowSubType) string { + protoutil.SortBinlogSourceTables(source) return fmt.Sprintf("insert into _vt.vreplication "+ - "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type) "+ - "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d)", + "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, options) "+ + "values (%v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d, %s)", encodeString(workflow), encodeString(source.String()), encodeString(position), throttler.MaxRateModuleDisabled, throttler.ReplicationLagModuleDisabled, time.Now().Unix(), state.String(), encodeString(dbName), - workflowType, workflowSubType) + workflowType, workflowSubType, encodeString("{}")) } // GenerateUpdatePos returns a statement to record the latest processed gtid in the _vt.vreplication table. @@ -652,13 +676,6 @@ func GenerateUpdateTimeThrottled(uid int32, timeThrottledUnix int64, componentTh return fmt.Sprintf("update _vt.vreplication set time_updated=%v, time_throttled=%v, component_throttled='%v' where id=%v", timeThrottledUnix, timeThrottledUnix, componentThrottled, uid), nil } -// StartVReplication returns a statement to start the replication. -func StartVReplication(uid int32) string { - return fmt.Sprintf( - "update _vt.vreplication set state='%v', stop_pos=NULL where id=%v", - binlogdatapb.VReplicationWorkflowState_Running.String(), uid) -} - // StartVReplicationUntil returns a statement to start the replication with a stop position. func StartVReplicationUntil(uid int32, pos string) string { return fmt.Sprintf( diff --git a/go/vt/binlog/binlogplayer/binlog_player_test.go b/go/vt/binlog/binlogplayer/binlog_player_test.go index 148c4fb386b..5c6e28df704 100644 --- a/go/vt/binlog/binlogplayer/binlog_player_test.go +++ b/go/vt/binlog/binlogplayer/binlog_player_test.go @@ -381,8 +381,8 @@ func applyEvents(blp *BinlogPlayer) func() error { func TestCreateVReplicationKeyRange(t *testing.T) { want := "insert into _vt.vreplication " + - "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) " + - `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" key_range:{end:\"\\x80\"}', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db', 0, 0, false)` + "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options) " + + `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" key_range:{end:\"\\x80\"}', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db', 0, 0, false, '{}')` bls := binlogdatapb.BinlogSource{ Keyspace: "ks", @@ -400,8 +400,8 @@ func TestCreateVReplicationKeyRange(t *testing.T) { func TestCreateVReplicationTables(t *testing.T) { want := "insert into _vt.vreplication " + - "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) " + - `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" tables:\"a\" tables:\"b\"', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db', 0, 0, false)` + "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options) " + + `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" tables:\"a\" tables:\"b\"', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db', 0, 0, false, '{}')` bls := binlogdatapb.BinlogSource{ Keyspace: "ks", diff --git a/go/vt/binlog/binlogplayer/client.go b/go/vt/binlog/binlogplayer/client.go index d234a439845..3aaad1a705c 100644 --- a/go/vt/binlog/binlogplayer/client.go +++ b/go/vt/binlog/binlogplayer/client.go @@ -53,7 +53,7 @@ type BinlogTransactionStream interface { // Client is the interface all clients must satisfy type Client interface { // Dial a server - Dial(tablet *topodatapb.Tablet) error + Dial(ctx context.Context, tablet *topodatapb.Tablet) error // Close the connection Close() diff --git a/go/vt/binlog/binlogplayer/dbclient.go b/go/vt/binlog/binlogplayer/dbclient.go index f9cd03691a5..61789f345c7 100644 --- a/go/vt/binlog/binlogplayer/dbclient.go +++ b/go/vt/binlog/binlogplayer/dbclient.go @@ -19,11 +19,13 @@ package binlogplayer import ( "context" "fmt" + "strings" "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" @@ -38,12 +40,14 @@ type DBClient interface { Rollback() error Close() ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) + ExecuteFetchMulti(query string, maxrows int) (qrs []*sqltypes.Result, err error) } // dbClientImpl is a real DBClient backed by a mysql connection. type dbClientImpl struct { dbConfig dbconfigs.Connector dbConn *mysql.Conn + parser *sqlparser.Parser } // dbClientImplWithSidecarDBReplacement is a DBClient implementation @@ -55,14 +59,15 @@ type dbClientImplWithSidecarDBReplacement struct { } // NewDBClient creates a DBClient instance -func NewDBClient(params dbconfigs.Connector) DBClient { +func NewDBClient(params dbconfigs.Connector, parser *sqlparser.Parser) DBClient { if sidecar.GetName() != sidecar.DefaultName { return &dbClientImplWithSidecarDBReplacement{ - dbClientImpl{dbConfig: params}, + dbClientImpl{dbConfig: params, parser: parser}, } } return &dbClientImpl{ dbConfig: params, + parser: parser, } } @@ -125,10 +130,14 @@ func LogError(msg string, err error) { // LimitString truncates string to specified size func LimitString(s string, limit int) string { - if len(s) > limit { + ts, err := textutil.TruncateText(s, limit, TruncationLocation, TruncationIndicator) + if err != nil { // Fallback to simple truncation + if len(s) <= limit { + return s + } return s[:limit] } - return s + return ts } func (dc *dbClientImpl) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { @@ -140,11 +149,47 @@ func (dc *dbClientImpl) ExecuteFetch(query string, maxrows int) (*sqltypes.Resul return mqr, nil } +func (dc *dbClientImpl) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { + results := make([]*sqltypes.Result, 0) + mqr, more, err := dc.dbConn.ExecuteFetchMulti(query, maxrows, true) + if err != nil { + dc.handleError(err) + return nil, err + } + results = append(results, mqr) + for more { + mqr, more, _, err = dc.dbConn.ReadQueryResult(maxrows, false) + if err != nil { + dc.handleError(err) + return nil, err + } + results = append(results, mqr) + } + return results, nil +} + func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + uq, err := dcr.parser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } return dcr.dbClientImpl.ExecuteFetch(uq, maxrows) } + +func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { + // Replace any provided sidecar database qualifiers with the correct one. + qps, err := dcr.parser.SplitStatementToPieces(query) + if err != nil { + return nil, err + } + for i, qp := range qps { + uq, err := dcr.parser.ReplaceTableQualifiers(qp, sidecar.DefaultName, sidecar.GetName()) + if err != nil { + return nil, err + } + qps[i] = uq + } + + return dcr.dbClientImpl.ExecuteFetchMulti(strings.Join(qps, ";"), maxrows) +} diff --git a/go/vt/binlog/binlogplayer/fake_dbclient.go b/go/vt/binlog/binlogplayer/fake_dbclient.go index 186722cf12f..750f35b3fe3 100644 --- a/go/vt/binlog/binlogplayer/fake_dbclient.go +++ b/go/vt/binlog/binlogplayer/fake_dbclient.go @@ -80,3 +80,7 @@ func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re } return nil, fmt.Errorf("unexpected: %v", query) } + +func (dc *fakeDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { + return make([]*sqltypes.Result, 0), nil +} diff --git a/go/vt/binlog/binlogplayer/framework_test.go b/go/vt/binlog/binlogplayer/framework_test.go index 4bb61aa70a9..5455e7cc1bf 100644 --- a/go/vt/binlog/binlogplayer/framework_test.go +++ b/go/vt/binlog/binlogplayer/framework_test.go @@ -46,7 +46,7 @@ func newFakeBinlogClient() *fakeBinlogClient { return globalFBC } -func (fbc *fakeBinlogClient) Dial(tablet *topodatapb.Tablet) error { +func (fbc *fakeBinlogClient) Dial(ctx context.Context, tablet *topodatapb.Tablet) error { fbc.lastTablet = tablet return nil } diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go index d64c4d40146..02e7ea28d7b 100644 --- a/go/vt/binlog/binlogplayer/mock_dbclient.go +++ b/go/vt/binlog/binlogplayer/mock_dbclient.go @@ -25,6 +25,7 @@ import ( "time" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" ) const mockClientUNameFiltered = "Filtered" @@ -41,6 +42,7 @@ type MockDBClient struct { done chan struct{} invariants map[string]*sqltypes.Result Tag string + parser *sqlparser.Parser } type mockExpect struct { @@ -83,15 +85,17 @@ func NewMockDBClient(t *testing.T) *MockDBClient { "set @@session.sql_mode": {}, "set sql_mode": {}, }, + parser: sqlparser.NewTestParser(), } } // NewMockDbaClient returns a new DBClientMock with the default "Dba" UName. func NewMockDbaClient(t *testing.T) *MockDBClient { return &MockDBClient{ - t: t, - UName: mockClientUNameDba, - done: make(chan struct{}), + t: t, + UName: mockClientUNameDba, + done: make(chan struct{}), + parser: sqlparser.NewTestParser(), } } @@ -178,6 +182,10 @@ func (dc *MockDBClient) Close() { // ExecuteFetch is part of the DBClient interface func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Result, err error) { + // Serialize ExecuteFetch to enforce a strict order on shared dbClients. + dc.expectMu.Lock() + defer dc.expectMu.Unlock() + dc.t.Helper() msg := "DBClient query: %v" if dc.Tag != "" { @@ -191,8 +199,6 @@ func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re } } - dc.expectMu.Lock() - defer dc.expectMu.Unlock() if dc.currentResult >= len(dc.expect) { msg := "DBClientMock: query: %s, no more requests are expected" if dc.Tag != "" { @@ -224,3 +230,33 @@ func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re } return result.result, result.err } + +func (dc *MockDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { + queries, err := dc.parser.SplitStatementToPieces(query) + if err != nil { + return nil, err + } + results := make([]*sqltypes.Result, 0, len(queries)) + for _, query := range queries { + qr, err := dc.ExecuteFetch(query, maxrows) + if err != nil { + return nil, err + } + results = append(results, qr) + } + return results, nil +} + +// AddInvariant can be used to customize the behavior of the mock client. +func (dc *MockDBClient) AddInvariant(query string, result *sqltypes.Result) { + dc.expectMu.Lock() + defer dc.expectMu.Unlock() + dc.invariants[query] = result +} + +// RemoveInvariant can be used to customize the behavior of the mock client. +func (dc *MockDBClient) RemoveInvariant(query string) { + dc.expectMu.Lock() + defer dc.expectMu.Unlock() + delete(dc.invariants, query) +} diff --git a/go/vt/binlog/binlogplayertest/player.go b/go/vt/binlog/binlogplayertest/player.go index e3468f92913..028f027ab3d 100644 --- a/go/vt/binlog/binlogplayertest/player.go +++ b/go/vt/binlog/binlogplayertest/player.go @@ -17,13 +17,12 @@ limitations under the License. package binlogplayertest import ( + "context" "fmt" "reflect" "strings" "testing" - "context" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -227,8 +226,8 @@ func (fake *FakeBinlogStreamer) HandlePanic(err *error) { } // Run runs the test suite -func Run(t *testing.T, bpc binlogplayer.Client, tablet *topodatapb.Tablet, fake *FakeBinlogStreamer) { - if err := bpc.Dial(tablet); err != nil { +func Run(ctx context.Context, t *testing.T, bpc binlogplayer.Client, tablet *topodatapb.Tablet, fake *FakeBinlogStreamer) { + if err := bpc.Dial(ctx, tablet); err != nil { t.Fatalf("Dial failed: %v", err) } diff --git a/go/vt/binlog/event_streamer.go b/go/vt/binlog/event_streamer.go deleted file mode 100644 index a872b089bff..00000000000 --- a/go/vt/binlog/event_streamer.go +++ /dev/null @@ -1,315 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package binlog - -import ( - "context" - "encoding/base64" - "fmt" - "strconv" - "strings" - - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/dbconfigs" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" -) - -var ( - binlogSetInsertID = "SET INSERT_ID=" - binlogSetInsertIDLen = len(binlogSetInsertID) - streamCommentStart = "/* _stream " - streamCommentStartLen = len(streamCommentStart) -) - -type sendEventFunc func(event *querypb.StreamEvent) error - -// EventStreamer is an adapter on top of a binlog Streamer that convert -// the events into StreamEvent objects. -type EventStreamer struct { - bls *Streamer - sendEvent sendEventFunc -} - -// NewEventStreamer returns a new EventStreamer on top of a Streamer -func NewEventStreamer(cp dbconfigs.Connector, se *schema.Engine, startPos replication.Position, timestamp int64, sendEvent sendEventFunc) *EventStreamer { - evs := &EventStreamer{ - sendEvent: sendEvent, - } - evs.bls = NewStreamer(cp, se, nil, startPos, timestamp, evs.transactionToEvent) - evs.bls.extractPK = true - return evs -} - -// Stream starts streaming updates -func (evs *EventStreamer) Stream(ctx context.Context) error { - return evs.bls.Stream(ctx) -} - -func (evs *EventStreamer) transactionToEvent(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { - event := &querypb.StreamEvent{ - EventToken: eventToken, - } - var err error - var insertid int64 - for _, stmt := range statements { - switch stmt.Statement.Category { - case binlogdatapb.BinlogTransaction_Statement_BL_SET: - sql := string(stmt.Statement.Sql) - if strings.HasPrefix(sql, binlogSetInsertID) { - insertid, err = strconv.ParseInt(sql[binlogSetInsertIDLen:], 10, 64) - if err != nil { - binlogStreamerErrors.Add("EventStreamer", 1) - log.Errorf("%v: %s", err, sql) - } - } - case binlogdatapb.BinlogTransaction_Statement_BL_INSERT, - binlogdatapb.BinlogTransaction_Statement_BL_UPDATE, - binlogdatapb.BinlogTransaction_Statement_BL_DELETE: - var dmlStatement *querypb.StreamEvent_Statement - dmlStatement, insertid, err = evs.buildDMLStatement(stmt, insertid) - if err != nil { - dmlStatement = &querypb.StreamEvent_Statement{ - Category: querypb.StreamEvent_Statement_Error, - Sql: stmt.Statement.Sql, - } - } - event.Statements = append(event.Statements, dmlStatement) - case binlogdatapb.BinlogTransaction_Statement_BL_DDL: - ddlStatement := &querypb.StreamEvent_Statement{ - Category: querypb.StreamEvent_Statement_DDL, - Sql: stmt.Statement.Sql, - } - event.Statements = append(event.Statements, ddlStatement) - case binlogdatapb.BinlogTransaction_Statement_BL_UNRECOGNIZED: - unrecognized := &querypb.StreamEvent_Statement{ - Category: querypb.StreamEvent_Statement_Error, - Sql: stmt.Statement.Sql, - } - event.Statements = append(event.Statements, unrecognized) - default: - binlogStreamerErrors.Add("EventStreamer", 1) - log.Errorf("Unrecognized event: %v: %s", stmt.Statement.Category, stmt.Statement.Sql) - } - } - return evs.sendEvent(event) -} - -/* -buildDMLStatement recovers the PK from a FullBinlogStatement. -For RBR, the values are already in there, just need to be translated. -For SBR, parses the tuples of the full stream comment. -The _stream comment is extracted into a StreamEvent.Statement. -*/ -// Example query: insert into _table_(foo) values ('foo') /* _stream _table_ (eid id name ) (null 1 'bmFtZQ==' ); */ -// the "null" value is used for auto-increment columns. -func (evs *EventStreamer) buildDMLStatement(stmt FullBinlogStatement, insertid int64) (*querypb.StreamEvent_Statement, int64, error) { - // For RBR events, we know all this already, just extract it. - if stmt.PKNames != nil { - // We get an array of []sqltypes.Value, need to convert to querypb.Row. - dmlStatement := &querypb.StreamEvent_Statement{ - Category: querypb.StreamEvent_Statement_DML, - TableName: stmt.Table, - PrimaryKeyFields: stmt.PKNames, - PrimaryKeyValues: []*querypb.Row{sqltypes.RowToProto3(stmt.PKValues)}, - } - // InsertID is only needed to fill in the ID on next queries, - // but if we use RBR, it's already in the values, so just return 0. - return dmlStatement, 0, nil - } - - sql := string(stmt.Statement.Sql) - - // first extract the comment - commentIndex := strings.LastIndex(sql, streamCommentStart) - if commentIndex == -1 { - return nil, insertid, fmt.Errorf("missing stream comment") - } - dmlComment := sql[commentIndex+streamCommentStartLen:] - - // then start building the response - dmlStatement := &querypb.StreamEvent_Statement{ - Category: querypb.StreamEvent_Statement_DML, - } - tokenizer := sqlparser.NewStringTokenizer(dmlComment) - - // first parse the table name - typ, val := tokenizer.Scan() - if typ != sqlparser.ID { - return nil, insertid, fmt.Errorf("expecting table name in stream comment") - } - dmlStatement.TableName = string(val) - - // then parse the PK names - var err error - dmlStatement.PrimaryKeyFields, err = parsePkNames(tokenizer) - hasNegatives := make([]bool, len(dmlStatement.PrimaryKeyFields)) - if err != nil { - return nil, insertid, err - } - - // then parse the PK values, one at a time - for typ, _ = tokenizer.Scan(); typ != ';'; typ, _ = tokenizer.Scan() { - switch typ { - case '(': - // pkTuple is a list of pk values - var pkTuple *querypb.Row - pkTuple, insertid, err = parsePkTuple(tokenizer, insertid, dmlStatement.PrimaryKeyFields, hasNegatives) - if err != nil { - return nil, insertid, err - } - dmlStatement.PrimaryKeyValues = append(dmlStatement.PrimaryKeyValues, pkTuple) - default: - return nil, insertid, fmt.Errorf("expecting '('") - } - } - - return dmlStatement, insertid, nil -} - -// parsePkNames parses something like (eid id name ) -func parsePkNames(tokenizer *sqlparser.Tokenizer) ([]*querypb.Field, error) { - var columns []*querypb.Field - if typ, _ := tokenizer.Scan(); typ != '(' { - return nil, fmt.Errorf("expecting '('") - } - for typ, val := tokenizer.Scan(); typ != ')'; typ, val = tokenizer.Scan() { - switch typ { - case sqlparser.ID: - columns = append(columns, &querypb.Field{ - Name: string(val), - }) - default: - return nil, fmt.Errorf("syntax error at position: %d", tokenizer.Pos) - } - } - return columns, nil -} - -// parsePkTuple parses something like (null 1 'bmFtZQ==' ). For numbers, the default -// type is Int64. If an unsigned number that can't fit in an int64 is seen, then the -// type is set to Uint64. In such cases, if a negative number was previously seen, the -// function returns an error. -func parsePkTuple(tokenizer *sqlparser.Tokenizer, insertid int64, fields []*querypb.Field, hasNegatives []bool) (*querypb.Row, int64, error) { - result := &querypb.Row{} - - index := 0 - for typ, val := tokenizer.Scan(); typ != ')'; typ, val = tokenizer.Scan() { - if index >= len(fields) { - return nil, insertid, fmt.Errorf("length mismatch in values") - } - - switch typ { - case '-': - hasNegatives[index] = true - typ2, val2 := tokenizer.Scan() - if typ2 != sqlparser.INTEGRAL { - return nil, insertid, fmt.Errorf("expecting number after '-'") - } - fullVal := append([]byte{'-'}, val2...) - if _, err := strconv.ParseInt(string(fullVal), 0, 64); err != nil { - return nil, insertid, err - } - switch fields[index].Type { - case sqltypes.Null: - fields[index].Type = sqltypes.Int64 - case sqltypes.Int64: - // no-op - default: - return nil, insertid, fmt.Errorf("incompatible negative number field with type %v", fields[index].Type) - } - - result.Lengths = append(result.Lengths, int64(len(fullVal))) - result.Values = append(result.Values, fullVal...) - case sqlparser.INTEGRAL: - unsigned, err := strconv.ParseUint(string(val), 0, 64) - if err != nil { - return nil, insertid, err - } - if unsigned > uint64(9223372036854775807) { - // Number is a uint64 that can't fit in an int64. - if hasNegatives[index] { - return nil, insertid, fmt.Errorf("incompatible unsigned number field with type %v", fields[index].Type) - } - switch fields[index].Type { - case sqltypes.Null, sqltypes.Int64: - fields[index].Type = sqltypes.Uint64 - case sqltypes.Uint64: - // no-op - default: - return nil, insertid, fmt.Errorf("incompatible number field with type %v", fields[index].Type) - } - } else { - // Could be int64 or uint64. - switch fields[index].Type { - case sqltypes.Null: - fields[index].Type = sqltypes.Int64 - case sqltypes.Int64, sqltypes.Uint64: - // no-op - default: - return nil, insertid, fmt.Errorf("incompatible number field with type %v", fields[index].Type) - } - } - - result.Lengths = append(result.Lengths, int64(len(val))) - result.Values = append(result.Values, val...) - case sqlparser.NULL: - switch fields[index].Type { - case sqltypes.Null: - fields[index].Type = sqltypes.Int64 - case sqltypes.Int64, sqltypes.Uint64: - // no-op - default: - return nil, insertid, fmt.Errorf("incompatible auto-increment field with type %v", fields[index].Type) - } - - v := strconv.AppendInt(nil, insertid, 10) - result.Lengths = append(result.Lengths, int64(len(v))) - result.Values = append(result.Values, v...) - insertid++ - case sqlparser.STRING: - switch fields[index].Type { - case sqltypes.Null: - fields[index].Type = sqltypes.VarBinary - case sqltypes.VarBinary: - // no-op - default: - return nil, insertid, fmt.Errorf("incompatible string field with type %v", fields[index].Type) - } - - decoded, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return nil, insertid, err - } - result.Lengths = append(result.Lengths, int64(len(decoded))) - result.Values = append(result.Values, decoded...) - default: - return nil, insertid, fmt.Errorf("syntax error at position: %d", tokenizer.Pos) - } - index++ - } - - if index != len(fields) { - return nil, insertid, fmt.Errorf("length mismatch in values") - } - return result, insertid, nil -} diff --git a/go/vt/binlog/event_streamer_test.go b/go/vt/binlog/event_streamer_test.go deleted file mode 100644 index 38e50240d1c..00000000000 --- a/go/vt/binlog/event_streamer_test.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package binlog - -import ( - "testing" - - "vitess.io/vitess/go/test/utils" - - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" -) - -var dmlErrorCases = []string{ - "query", - "query /* _stream 10 (eid id `name` ) (null 1 'bmFtZQ==' ); */", - "query /* _stream _table_ eid id `name` ) (null 1 'bmFtZQ==' ); */", - "query /* _stream _table_ (10 id `name` ) (null 1 'bmFtZQ==' ); */", - "query /* _stream _table_ (eid id `name` (null 1 'bmFtZQ==' ); */", - "query /* _stream _table_ (eid id `name`) (null 'aaa' 'bmFtZQ==' ); */", - "query /* _stream _table_ (eid id `name`) (null 'bmFtZQ==' ); */", - "query /* _stream _table_ (eid id `name`) (null 1.1 'bmFtZQ==' ); */", - "query /* _stream _table_ (eid id `name`) (null a 'bmFtZQ==' ); */", -} - -func TestEventErrors(t *testing.T) { - var got *querypb.StreamEvent - evs := &EventStreamer{ - sendEvent: func(event *querypb.StreamEvent) error { - got = event - return nil - }, - } - for _, sql := range dmlErrorCases { - statements := []FullBinlogStatement{ - { - Statement: &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, - Sql: []byte(sql), - }, - }, - } - err := evs.transactionToEvent(nil, statements) - if err != nil { - t.Errorf("%s: %v", sql, err) - continue - } - want := &querypb.StreamEvent{ - Statements: []*querypb.StreamEvent_Statement{ - { - Category: querypb.StreamEvent_Statement_Error, - Sql: []byte(sql), - }, - }, - } - if !proto.Equal(got, want) { - t.Errorf("error for SQL: '%v' got: %+v, want: %+v", sql, got, want) - } - } -} - -func TestSetErrors(t *testing.T) { - evs := &EventStreamer{ - sendEvent: func(event *querypb.StreamEvent) error { - return nil - }, - } - statements := []FullBinlogStatement{ - { - Statement: &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte("SET INSERT_ID=abcd"), - }, - }, - } - before := binlogStreamerErrors.Counts()["EventStreamer"] - err := evs.transactionToEvent(nil, statements) - require.NoError(t, err) - got := binlogStreamerErrors.Counts()["EventStreamer"] - if got != before+1 { - t.Errorf("got: %v, want: %+v", got, before+1) - } -} - -func TestDMLEvent(t *testing.T) { - statements := []FullBinlogStatement{ - { - Statement: &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte("SET TIMESTAMP=2"), - }, - }, - { - Statement: &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte("SET INSERT_ID=10"), - }, - }, - { - Statement: &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, - Sql: []byte("query /* _stream _table_ (eid id `name`) (null 1 'bmFtZQ==' ) (null 18446744073709551615 'bmFtZQ==' ); */"), - }, - }, - { - Statement: &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_INSERT, - Sql: []byte("query"), - }, - }, - } - eventToken := &querypb.EventToken{ - Timestamp: 1, - Position: "MariaDB/0-41983-20", - } - evs := &EventStreamer{ - sendEvent: func(event *querypb.StreamEvent) error { - for _, statement := range event.Statements { - switch statement.Category { - case querypb.StreamEvent_Statement_DML: - want := `category:DML table_name:"_table_" primary_key_fields:{name:"eid" type:INT64} primary_key_fields:{name:"id" type:UINT64} primary_key_fields:{name:"name" type:VARBINARY} primary_key_values:{lengths:2 lengths:1 lengths:4 values:"101name"} primary_key_values:{lengths:2 lengths:20 lengths:4 values:"1118446744073709551615name"}` - utils.MustMatchPB(t, want, statement) - case querypb.StreamEvent_Statement_Error: - want := `sql:"query"` - utils.MustMatchPB(t, want, statement) - default: - t.Errorf("unexpected: %#v", event) - } - } - // then test the position - want := `timestamp:1 position:"MariaDB/0-41983-20"` - utils.MustMatchPB(t, want, event.EventToken) - return nil - }, - } - err := evs.transactionToEvent(eventToken, statements) - require.NoError(t, err) -} - -func TestDDLEvent(t *testing.T) { - statements := []FullBinlogStatement{ - { - Statement: &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, - Sql: []byte("SET TIMESTAMP=2"), - }, - }, - { - Statement: &binlogdatapb.BinlogTransaction_Statement{ - Category: binlogdatapb.BinlogTransaction_Statement_BL_DDL, - Sql: []byte("DDL"), - }, - }, - } - eventToken := &querypb.EventToken{ - Timestamp: 1, - Position: "MariaDB/0-41983-20", - } - evs := &EventStreamer{ - sendEvent: func(event *querypb.StreamEvent) error { - for _, statement := range event.Statements { - switch statement.Category { - case querypb.StreamEvent_Statement_DDL: - want := `category:DDL sql:"DDL"` - utils.MustMatchPB(t, want, statement) - default: - t.Errorf("unexpected: %#v", event) - } - } - // then test the position - want := `timestamp:1 position:"MariaDB/0-41983-20"` - utils.MustMatchPB(t, want, event.EventToken) - return nil - }, - } - err := evs.transactionToEvent(eventToken, statements) - require.NoError(t, err) -} diff --git a/go/vt/binlog/grpcbinlogplayer/player.go b/go/vt/binlog/grpcbinlogplayer/player.go index 1d5111aa5b0..014860ccdaf 100644 --- a/go/vt/binlog/grpcbinlogplayer/player.go +++ b/go/vt/binlog/grpcbinlogplayer/player.go @@ -52,14 +52,14 @@ type client struct { c binlogservicepb.UpdateStreamClient } -func (client *client) Dial(tablet *topodatapb.Tablet) error { +func (client *client) Dial(ctx context.Context, tablet *topodatapb.Tablet) error { addr := netutil.JoinHostPort(tablet.Hostname, tablet.PortMap["grpc"]) var err error opt, err := grpcclient.SecureDialOption(cert, key, ca, crl, name) if err != nil { return err } - client.cc, err = grpcclient.Dial(addr, grpcclient.FailFast(true), opt) + client.cc, err = grpcclient.DialContext(ctx, addr, grpcclient.FailFast(true), opt) if err != nil { return err } diff --git a/go/vt/binlog/grpcbinlogplayer/player_test.go b/go/vt/binlog/grpcbinlogplayer/player_test.go index bde54cd2113..b290782f015 100644 --- a/go/vt/binlog/grpcbinlogplayer/player_test.go +++ b/go/vt/binlog/grpcbinlogplayer/player_test.go @@ -17,6 +17,7 @@ limitations under the License. package grpcbinlogplayer import ( + "context" "net" "testing" @@ -48,9 +49,11 @@ func TestGRPCBinlogStreamer(t *testing.T) { // Create a GRPC client to talk to the fake tablet c := &client{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // and send it to the test suite - binlogplayertest.Run(t, c, &topodatapb.Tablet{ + binlogplayertest.Run(ctx, t, c, &topodatapb.Tablet{ Hostname: host, PortMap: map[string]int32{ "grpc": int32(port), diff --git a/go/vt/binlog/keyspace_id_resolver.go b/go/vt/binlog/keyspace_id_resolver.go index 6903ba53b71..1ca198760a3 100644 --- a/go/vt/binlog/keyspace_id_resolver.go +++ b/go/vt/binlog/keyspace_id_resolver.go @@ -17,13 +17,13 @@ limitations under the License. package binlog import ( + "context" "fmt" "strings" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -47,13 +47,13 @@ type keyspaceIDResolverFactory func(*schema.Table) (int, keyspaceIDResolver, err // newKeyspaceIDResolverFactory creates a new // keyspaceIDResolverFactory for the provided keyspace and cell. -func newKeyspaceIDResolverFactory(ctx context.Context, ts *topo.Server, keyspace string, cell string) (keyspaceIDResolverFactory, error) { - return newKeyspaceIDResolverFactoryV3(ctx, ts, keyspace, cell) +func newKeyspaceIDResolverFactory(ctx context.Context, ts *topo.Server, keyspace string, cell string, parser *sqlparser.Parser) (keyspaceIDResolverFactory, error) { + return newKeyspaceIDResolverFactoryV3(ctx, ts, keyspace, cell, parser) } // newKeyspaceIDResolverFactoryV3 finds the SrvVSchema in the cell, // gets the keyspace part, and uses it to find the column name. -func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspace string, cell string) (keyspaceIDResolverFactory, error) { +func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspace string, cell string, parser *sqlparser.Parser) (keyspaceIDResolverFactory, error) { srvVSchema, err := ts.GetSrvVSchema(ctx, cell) if err != nil { return nil, err @@ -62,7 +62,7 @@ func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspa if !ok { return nil, fmt.Errorf("SrvVSchema has no entry for keyspace %v", keyspace) } - keyspaceSchema, err := vindexes.BuildKeyspaceSchema(kschema, keyspace) + keyspaceSchema, err := vindexes.BuildKeyspaceSchema(kschema, keyspace, parser) if err != nil { return nil, fmt.Errorf("cannot build vschema for keyspace %v: %v", keyspace, err) } diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index 78d61c0860c..4397eccd4da 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -70,47 +71,6 @@ type UpdateStreamControl interface { IsEnabled() bool } -// UpdateStreamControlMock is an implementation of UpdateStreamControl -// to be used in tests -type UpdateStreamControlMock struct { - enabled bool - sync.Mutex -} - -// NewUpdateStreamControlMock creates a new UpdateStreamControlMock -func NewUpdateStreamControlMock() *UpdateStreamControlMock { - return &UpdateStreamControlMock{} -} - -// InitDBConfig is part of UpdateStreamControl -func (m *UpdateStreamControlMock) InitDBConfig(*dbconfigs.DBConfigs) { -} - -// RegisterService is part of UpdateStreamControl -func (m *UpdateStreamControlMock) RegisterService() { -} - -// Enable is part of UpdateStreamControl -func (m *UpdateStreamControlMock) Enable() { - m.Lock() - m.enabled = true - m.Unlock() -} - -// Disable is part of UpdateStreamControl -func (m *UpdateStreamControlMock) Disable() { - m.Lock() - m.enabled = false - m.Unlock() -} - -// IsEnabled is part of UpdateStreamControl -func (m *UpdateStreamControlMock) IsEnabled() bool { - m.Lock() - defer m.Unlock() - return m.enabled -} - // UpdateStreamImpl is the real implementation of UpdateStream // and UpdateStreamControl type UpdateStreamImpl struct { @@ -126,6 +86,7 @@ type UpdateStreamImpl struct { state atomic.Int64 stateWaitGroup sync.WaitGroup streams StreamList + parser *sqlparser.Parser } // StreamList is a map of context.CancelFunc to mass-interrupt ongoing @@ -179,12 +140,13 @@ type RegisterUpdateStreamServiceFunc func(UpdateStream) var RegisterUpdateStreamServices []RegisterUpdateStreamServiceFunc // NewUpdateStream returns a new UpdateStreamImpl object -func NewUpdateStream(ts *topo.Server, keyspace string, cell string, se *schema.Engine) *UpdateStreamImpl { +func NewUpdateStream(ts *topo.Server, keyspace string, cell string, se *schema.Engine, parser *sqlparser.Parser) *UpdateStreamImpl { return &UpdateStreamImpl{ ts: ts, keyspace: keyspace, cell: cell, se: se, + parser: parser, } } @@ -275,7 +237,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi return callback(trans) }) bls := NewStreamer(updateStream.cp, updateStream.se, charset, pos, 0, f) - bls.resolverFactory, err = newKeyspaceIDResolverFactory(ctx, updateStream.ts, updateStream.keyspace, updateStream.cell) + bls.resolverFactory, err = newKeyspaceIDResolverFactory(ctx, updateStream.ts, updateStream.keyspace, updateStream.cell, updateStream.parser) if err != nil { return fmt.Errorf("newKeyspaceIDResolverFactory failed: %v", err) } diff --git a/go/vt/callinfo/callinfo_test.go b/go/vt/callinfo/callinfo_test.go new file mode 100644 index 00000000000..c81b7792242 --- /dev/null +++ b/go/vt/callinfo/callinfo_test.go @@ -0,0 +1,117 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package callinfo + +import ( + "context" + "testing" + + "github.com/google/safehtml" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/callinfo/fakecallinfo" +) + +var fci fakecallinfo.FakeCallInfo = fakecallinfo.FakeCallInfo{ + User: "test", + Remote: "locahost", + Method: "", + Html: safehtml.HTML{}, +} + +func TestNewContext(t *testing.T) { + tests := []struct { + name string + ctx context.Context + ci CallInfo + expectedContext context.Context + }{ + { + name: "empty", + ctx: context.Background(), + ci: nil, + expectedContext: context.WithValue(context.Background(), callInfoKey, nil), + }, + { + name: "not empty", + ctx: context.Background(), + ci: &fci, + expectedContext: context.WithValue(context.Background(), callInfoKey, &fci), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expectedContext, NewContext(tt.ctx, tt.ci)) + }) + } +} + +func TestFromContext(t *testing.T) { + tests := []struct { + name string + ctx context.Context + expectedCi CallInfo + ok bool + }{ + { + name: "empty", + ctx: context.WithValue(context.Background(), callInfoKey, nil), + expectedCi: nil, + ok: false, + }, + { + name: "not empty", + expectedCi: &fci, + ctx: context.WithValue(context.Background(), callInfoKey, &fci), + ok: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ci, ok := FromContext(tt.ctx) + require.Equal(t, tt.expectedCi, ci) + require.Equal(t, tt.ok, ok) + }) + } +} + +func TestHTMLFromContext(t *testing.T) { + tests := []struct { + name string + ctx context.Context + expectedHTML safehtml.HTML + }{ + { + name: "empty", + ctx: context.WithValue(context.Background(), callInfoKey, nil), + expectedHTML: safehtml.HTML{}, + }, + { + name: "not empty", + ctx: context.WithValue(context.Background(), callInfoKey, &fci), + expectedHTML: safehtml.HTML{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expectedHTML, HTMLFromContext(tt.ctx)) + }) + } +} diff --git a/go/vt/callinfo/plugin_grpc_test.go b/go/vt/callinfo/plugin_grpc_test.go new file mode 100644 index 00000000000..2c5c8f9d888 --- /dev/null +++ b/go/vt/callinfo/plugin_grpc_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package callinfo + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGRPCCallInfo(t *testing.T) { + grpcCi := gRPCCallInfoImpl{ + method: "tcp", + remoteAddr: "localhost", + } + + require.Equal(t, context.Background(), GRPCCallInfo(context.Background())) + require.Equal(t, grpcCi.remoteAddr, grpcCi.RemoteAddr()) + require.Equal(t, "gRPC", grpcCi.Username()) + require.Equal(t, "localhost:tcp(gRPC)", grpcCi.Text()) + require.Equal(t, "Method: tcp Remote Addr: localhost", grpcCi.HTML().String()) +} diff --git a/go/vt/callinfo/plugin_mysql_test.go b/go/vt/callinfo/plugin_mysql_test.go new file mode 100644 index 00000000000..abe6aa371dd --- /dev/null +++ b/go/vt/callinfo/plugin_mysql_test.go @@ -0,0 +1,35 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package callinfo + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMysqlCallInfo(t *testing.T) { + mysqlCi := mysqlCallInfoImpl{ + remoteAddr: "localhost", + user: "test", + } + + require.Equal(t, mysqlCi.remoteAddr, mysqlCi.RemoteAddr()) + require.Equal(t, mysqlCi.user, mysqlCi.Username()) + require.Equal(t, "test@localhost(Mysql)", mysqlCi.Text()) + require.Equal(t, "MySQL User: test Remote Addr: localhost", mysqlCi.HTML().String()) +} diff --git a/go/vt/dbconfigs/dbconfigs.go b/go/vt/dbconfigs/dbconfigs.go index fe3a228835c..82c322e7ae9 100644 --- a/go/vt/dbconfigs/dbconfigs.go +++ b/go/vt/dbconfigs/dbconfigs.go @@ -26,13 +26,13 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vttls" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/log" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttls" "vitess.io/vitess/go/yaml2" ) @@ -123,7 +123,7 @@ func RegisterFlags(userKeys ...string) { servenv.OnParse(func(fs *pflag.FlagSet) { registerBaseFlags(fs) for _, userKey := range userKeys { - uc, cp := GlobalDBConfigs.getParams(userKey, &GlobalDBConfigs) + uc, cp := GlobalDBConfigs.getParams(userKey) registerPerUserFlags(fs, userKey, uc, cp) } }) @@ -318,9 +318,9 @@ func (dbcfgs *DBConfigs) Clone() *DBConfigs { // parameters. This is only for legacy support. // If no per-user parameters are supplied, then the defaultSocketFile // is used to initialize the per-user conn params. -func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string) { +func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string, collationEnv *collations.Environment) { for _, userKey := range All { - uc, cp := dbcfgs.getParams(userKey, dbcfgs) + uc, cp := dbcfgs.getParams(userKey) // TODO @rafael: For ExternalRepl we need to respect the provided host / port // At the moment this is an snowflake user connection type that it used by // vreplication to connect to external mysql hosts that are not part of a vitess @@ -338,8 +338,13 @@ func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string) { // If the connection params has a charset defined, it will not be overridden by the // global configuration. - if dbcfgs.Charset != "" && cp.Charset == "" { - cp.Charset = dbcfgs.Charset + if dbcfgs.Charset != "" && cp.Charset == collations.Unknown { + ch, err := collationEnv.ParseConnectionCharset(dbcfgs.Charset) + if err != nil { + log.Warningf("Error parsing charset %s: %v", dbcfgs.Charset, err) + ch = collationEnv.DefaultConnectionCharset() + } + cp.Charset = ch } if dbcfgs.Flags != 0 { @@ -367,7 +372,7 @@ func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string) { log.Infof("DBConfigs: %v\n", dbcfgs.String()) } -func (dbcfgs *DBConfigs) getParams(userKey string, dbc *DBConfigs) (*UserConfig, *mysql.ConnParams) { +func (dbcfgs *DBConfigs) getParams(userKey string) (*UserConfig, *mysql.ConnParams) { var uc *UserConfig var cp *mysql.ConnParams switch userKey { diff --git a/go/vt/dbconfigs/dbconfigs_test.go b/go/vt/dbconfigs/dbconfigs_test.go index a97f2526c17..029682d13b7 100644 --- a/go/vt/dbconfigs/dbconfigs_test.go +++ b/go/vt/dbconfigs/dbconfigs_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/yaml2" ) @@ -36,10 +37,10 @@ func TestInit(t *testing.T) { dbaParams: mysql.ConnParams{Host: "host"}, Charset: "utf8", } - dbConfigs.InitWithSocket("default") - assert.Equal(t, mysql.ConnParams{UnixSocket: "socket", Charset: "utf8"}, dbConfigs.appParams) - assert.Equal(t, mysql.ConnParams{Host: "host", Charset: "utf8"}, dbConfigs.dbaParams) - assert.Equal(t, mysql.ConnParams{UnixSocket: "default", Charset: "utf8"}, dbConfigs.appdebugParams) + dbConfigs.InitWithSocket("default", collations.MySQL8()) + assert.Equal(t, mysql.ConnParams{UnixSocket: "socket", Charset: collations.CollationUtf8mb3ID}, dbConfigs.appParams) + assert.Equal(t, mysql.ConnParams{Host: "host", Charset: collations.CollationUtf8mb3ID}, dbConfigs.dbaParams) + assert.Equal(t, mysql.ConnParams{UnixSocket: "default", Charset: collations.CollationUtf8mb3ID}, dbConfigs.appdebugParams) dbConfigs = DBConfigs{ Host: "a", @@ -72,7 +73,7 @@ func TestInit(t *testing.T) { Host: "host", }, } - dbConfigs.InitWithSocket("default") + dbConfigs.InitWithSocket("default", collations.MySQL8()) want := mysql.ConnParams{ Host: "a", @@ -80,7 +81,7 @@ func TestInit(t *testing.T) { Uname: "app", Pass: "apppass", UnixSocket: "b", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, Flags: 2, Flavor: "flavor", ConnectTimeoutMs: 250, @@ -91,7 +92,7 @@ func TestInit(t *testing.T) { Host: "a", Port: 1, UnixSocket: "b", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, Flags: 2, Flavor: "flavor", SslCa: "d", @@ -107,7 +108,7 @@ func TestInit(t *testing.T) { Uname: "dba", Pass: "dbapass", UnixSocket: "b", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, Flags: 2, Flavor: "flavor", SslCa: "d", @@ -143,21 +144,21 @@ func TestInit(t *testing.T) { }, appParams: mysql.ConnParams{ UnixSocket: "socket", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, }, dbaParams: mysql.ConnParams{ Host: "host", Flags: 2, }, } - dbConfigs.InitWithSocket("default") + dbConfigs.InitWithSocket("default", collations.MySQL8()) want = mysql.ConnParams{ Host: "a", Port: 1, Uname: "app", Pass: "apppass", UnixSocket: "b", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, } assert.Equal(t, want, dbConfigs.appParams) want = mysql.ConnParams{ @@ -168,7 +169,7 @@ func TestInit(t *testing.T) { SslCaPath: "e", SslCert: "f", SslKey: "g", - Charset: "utf8", + Charset: collations.CollationUtf8mb3ID, } assert.Equal(t, want, dbConfigs.appdebugParams) want = mysql.ConnParams{ @@ -182,7 +183,7 @@ func TestInit(t *testing.T) { SslCaPath: "e", SslCert: "f", SslKey: "g", - Charset: "utf8", + Charset: collations.CollationUtf8mb3ID, } assert.Equal(t, want, dbConfigs.dbaParams) } @@ -201,13 +202,13 @@ func TestUseTCP(t *testing.T) { }, Charset: "utf8", } - dbConfigs.InitWithSocket("default") + dbConfigs.InitWithSocket("default", collations.MySQL8()) want := mysql.ConnParams{ Host: "a", Port: 1, Uname: "app", - Charset: "utf8", + Charset: collations.CollationUtf8mb3ID, } assert.Equal(t, want, dbConfigs.appParams) @@ -216,7 +217,7 @@ func TestUseTCP(t *testing.T) { Port: 1, Uname: "dba", UnixSocket: "b", - Charset: "utf8", + Charset: collations.CollationUtf8mb3ID, } assert.Equal(t, want, dbConfigs.dbaParams) } diff --git a/go/vt/discovery/fake_healthcheck.go b/go/vt/discovery/fake_healthcheck.go index cb959902c19..d1bde350276 100644 --- a/go/vt/discovery/fake_healthcheck.go +++ b/go/vt/discovery/fake_healthcheck.go @@ -229,7 +229,7 @@ func (fhc *FakeHealthCheck) ReplaceTablet(old, new *topodatapb.Tablet) { } // TabletConnection returns the TabletConn of the given tablet. -func (fhc *FakeHealthCheck) TabletConnection(alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) { +func (fhc *FakeHealthCheck) TabletConnection(ctx context.Context, alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) { aliasStr := topoproto.TabletAliasString(alias) fhc.mu.RLock() defer fhc.mu.RUnlock() @@ -252,6 +252,17 @@ func (fhc *FakeHealthCheck) CacheStatus() TabletsCacheStatusList { return tcsl } +// HealthyStatus returns the status for each healthy tablet +func (fhc *FakeHealthCheck) HealthyStatus() TabletsCacheStatusList { + tcsMap := fhc.CacheStatusMap() + tcsl := make(TabletsCacheStatusList, 0, len(tcsMap)) + for _, tcs := range tcsMap { + tcsl = append(tcsl, tcs) + } + sort.Sort(tcsl) + return tcsl +} + // CacheStatusMap returns a map of the health check cache. func (fhc *FakeHealthCheck) CacheStatusMap() map[string]*TabletsCacheStatus { tcsMap := make(map[string]*TabletsCacheStatus) diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index 9d17005d0ad..46d92c7364e 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -25,7 +25,7 @@ limitations under the License. // Alternatively, use a Watcher implementation which will constantly watch // a source (e.g. the topology) and add and remove tablets as they are // added or removed from the source. -// For a Watcher example have a look at NewCellTabletsWatcher(). +// For a Watcher example have a look at NewTopologyWatcher(). // // Internally, the HealthCheck module is connected to each tablet and has a // streaming RPC (StreamHealth) open to receive periodic health infos. @@ -46,6 +46,7 @@ import ( "github.com/google/safehtml/template" "github.com/google/safehtml/template/uncheckedconversions" "github.com/spf13/pflag" + "golang.org/x/sync/semaphore" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/stats" @@ -87,11 +88,19 @@ var ( // refreshKnownTablets tells us whether to process all tablets or only new tablets. refreshKnownTablets = true - // topoReadConcurrency tells us how many topo reads are allowed in parallel. - topoReadConcurrency = 32 + // healthCheckDialConcurrency tells us how many healthcheck connections can be opened to tablets at once. This should be less than the golang max thread limit of 10000. + healthCheckDialConcurrency int64 = 1024 // How much to sleep between each check. waitAvailableTabletInterval = 100 * time.Millisecond + + // HealthCheckCacheTemplate uses healthCheckTemplate with the `HealthCheck Tablet - Cache` title to create the + // HTML code required to render the cache of the HealthCheck. + HealthCheckCacheTemplate = fmt.Sprintf(healthCheckTemplate, "HealthCheck - Cache") + + // HealthCheckHealthyTemplate uses healthCheckTemplate with the `HealthCheck Tablet - Healthy Tablets` title to + // create the HTML code required to render the list of healthy tablets from the HealthCheck. + HealthCheckHealthyTemplate = fmt.Sprintf(healthCheckTemplate, "HealthCheck - Healthy Tablets") ) // See the documentation for NewHealthCheck below for an explanation of these parameters. @@ -99,13 +108,9 @@ const ( DefaultHealthCheckRetryDelay = 5 * time.Second DefaultHealthCheckTimeout = 1 * time.Minute - // DefaultTopoReadConcurrency is used as the default value for the topoReadConcurrency parameter of a TopologyWatcher. - DefaultTopoReadConcurrency int = 5 - // DefaultTopologyWatcherRefreshInterval is used as the default value for - // the refresh interval of a topology watcher. - DefaultTopologyWatcherRefreshInterval = 1 * time.Minute - // HealthCheckTemplate is the HTML code to display a TabletsCacheStatusList - HealthCheckTemplate = ` + // healthCheckTemplate is the HTML code to display a TabletsCacheStatusList, it takes a parameter for the title + // as the template can be used for both HealthCheck's cache and healthy tablets list. + healthCheckTemplate = ` - + @@ -167,7 +172,7 @@ func registerWebUIFlags(fs *pflag.FlagSet) { fs.StringVar(&TabletURLTemplateString, "tablet_url_template", "http://{{.GetTabletHostPort}}", "Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this.") fs.DurationVar(&refreshInterval, "tablet_refresh_interval", 1*time.Minute, "Tablet refresh interval.") fs.BoolVar(&refreshKnownTablets, "tablet_refresh_known_tablets", true, "Whether to reload the tablet's address/port map from topo in case they change.") - fs.IntVar(&topoReadConcurrency, "topo_read_concurrency", 32, "Concurrency of topo reads.") + fs.Int64Var(&healthCheckDialConcurrency, "healthcheck-dial-concurrency", 1024, "Maximum concurrency of new healthcheck connections. This should be less than the golang max thread limit of 10000.") ParseTabletURLTemplateFromFlag() } @@ -193,6 +198,9 @@ type HealthCheck interface { // CacheStatus returns a displayable version of the health check cache. CacheStatus() TabletsCacheStatusList + // HealthyStatus returns a displayable version of the health check healthy list. + HealthyStatus() TabletsCacheStatusList + // CacheStatusMap returns a map of the health check cache. CacheStatusMap() map[string]*TabletsCacheStatus @@ -206,7 +214,7 @@ type HealthCheck interface { WaitForAllServingTablets(ctx context.Context, targets []*query.Target) error // TabletConnection returns the TabletConn of the given tablet. - TabletConnection(alias *topodata.TabletAlias, target *query.Target) (queryservice.QueryService, error) + TabletConnection(ctx context.Context, alias *topodata.TabletAlias, target *query.Target) (queryservice.QueryService, error) // RegisterStats registers the connection counts stats RegisterStats() @@ -284,6 +292,8 @@ type HealthCheckImpl struct { subscribers map[chan *TabletHealth]struct{} // loadTablets trigger is used to immediately load a new primary tablet when the current one has been demoted loadTabletsTrigger chan struct{} + // healthCheckDialSem is used to limit how many healthcheck connections can be opened to tablets at once. + healthCheckDialSem *semaphore.Weighted } // NewHealthCheck creates a new HealthCheck object. @@ -318,6 +328,7 @@ func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Dur cell: localCell, retryDelay: retryDelay, healthCheckTimeout: healthCheckTimeout, + healthCheckDialSem: semaphore.NewWeighted(healthCheckDialConcurrency), healthByAlias: make(map[tabletAliasString]*tabletHealthCheck), healthData: make(map[KeyspaceShardTabletType]map[tabletAliasString]*TabletHealth), healthy: make(map[KeyspaceShardTabletType][]*TabletHealth), @@ -350,7 +361,7 @@ func NewHealthCheck(ctx context.Context, retryDelay, healthCheckTimeout time.Dur } else if len(KeyspacesToWatch) > 0 { filter = NewFilterByKeyspace(KeyspacesToWatch) } - topoWatchers = append(topoWatchers, NewCellTabletsWatcher(ctx, topoServer, hc, filter, c, refreshInterval, refreshKnownTablets, topoReadConcurrency)) + topoWatchers = append(topoWatchers, NewTopologyWatcher(ctx, topoServer, hc, filter, c, refreshInterval, refreshKnownTablets, topo.DefaultConcurrency)) } hc.topoWatchers = topoWatchers @@ -622,28 +633,55 @@ func (hc *HealthCheckImpl) CacheStatus() TabletsCacheStatusList { return tcsl } +// HealthyStatus returns a displayable version of the cache. +func (hc *HealthCheckImpl) HealthyStatus() TabletsCacheStatusList { + tcsMap := hc.HealthyStatusMap() + tcsl := make(TabletsCacheStatusList, 0, len(tcsMap)) + for _, tcs := range tcsMap { + tcsl = append(tcsl, tcs) + } + sort.Sort(tcsl) + return tcsl +} + func (hc *HealthCheckImpl) CacheStatusMap() map[string]*TabletsCacheStatus { tcsMap := make(map[string]*TabletsCacheStatus) hc.mu.Lock() defer hc.mu.Unlock() for _, ths := range hc.healthData { for _, th := range ths { - key := fmt.Sprintf("%v.%v.%v.%v", th.Tablet.Alias.Cell, th.Target.Keyspace, th.Target.Shard, th.Target.TabletType.String()) - var tcs *TabletsCacheStatus - var ok bool - if tcs, ok = tcsMap[key]; !ok { - tcs = &TabletsCacheStatus{ - Cell: th.Tablet.Alias.Cell, - Target: th.Target, - } - tcsMap[key] = tcs - } - tcs.TabletsStats = append(tcs.TabletsStats, th) + tabletHealthToTabletCacheStatus(th, tcsMap) } } return tcsMap } +func (hc *HealthCheckImpl) HealthyStatusMap() map[string]*TabletsCacheStatus { + tcsMap := make(map[string]*TabletsCacheStatus) + hc.mu.Lock() + defer hc.mu.Unlock() + for _, ths := range hc.healthy { + for _, th := range ths { + tabletHealthToTabletCacheStatus(th, tcsMap) + } + } + return tcsMap +} + +func tabletHealthToTabletCacheStatus(th *TabletHealth, tcsMap map[string]*TabletsCacheStatus) { + key := fmt.Sprintf("%v.%v.%v.%v", th.Tablet.Alias.Cell, th.Target.Keyspace, th.Target.Shard, th.Target.TabletType.String()) + var tcs *TabletsCacheStatus + var ok bool + if tcs, ok = tcsMap[key]; !ok { + tcs = &TabletsCacheStatus{ + Cell: th.Tablet.Alias.Cell, + Target: th.Target, + } + tcsMap[key] = tcs + } + tcs.TabletsStats = append(tcs.TabletsStats, th) +} + // Close stops the healthcheck. func (hc *HealthCheckImpl) Close() error { hc.mu.Lock() @@ -720,30 +758,8 @@ func (hc *HealthCheckImpl) WaitForAllServingTablets(ctx context.Context, targets return hc.waitForTablets(ctx, targets, true) } -// FilterTargetsByKeyspaces only returns the targets that are part of the provided keyspaces -func FilterTargetsByKeyspaces(keyspaces []string, targets []*query.Target) []*query.Target { - filteredTargets := make([]*query.Target, 0) - - // Keep them all if there are no keyspaces to watch - if len(KeyspacesToWatch) == 0 { - return append(filteredTargets, targets...) - } - - // Let's remove from the target shards that are not in the keyspaceToWatch list. - for _, target := range targets { - for _, keyspaceToWatch := range keyspaces { - if target.Keyspace == keyspaceToWatch { - filteredTargets = append(filteredTargets, target) - } - } - } - return filteredTargets -} - // waitForTablets is the internal method that polls for tablets. func (hc *HealthCheckImpl) waitForTablets(ctx context.Context, targets []*query.Target, requireServing bool) error { - targets = FilterTargetsByKeyspaces(KeyspacesToWatch, targets) - for { // We nil targets as we find them. allPresent := true @@ -812,7 +828,7 @@ func (hc *HealthCheckImpl) GetTabletHealth(kst KeyspaceShardTabletType, alias *t } // TabletConnection returns the Connection to a given tablet. -func (hc *HealthCheckImpl) TabletConnection(alias *topodata.TabletAlias, target *query.Target) (queryservice.QueryService, error) { +func (hc *HealthCheckImpl) TabletConnection(ctx context.Context, alias *topodata.TabletAlias, target *query.Target) (queryservice.QueryService, error) { hc.mu.Lock() thc := hc.healthByAlias[tabletAliasString(topoproto.TabletAliasString(alias))] hc.mu.Unlock() @@ -820,7 +836,7 @@ func (hc *HealthCheckImpl) TabletConnection(alias *topodata.TabletAlias, target // TODO: test that throws this error return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "tablet: %v is either down or nonexistent", alias) } - return thc.Connection(), nil + return thc.Connection(ctx, hc), nil } // getAliasByCell should only be called while holding hc.mu diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index 5fadc57eb2e..c87ba699234 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -672,27 +672,6 @@ func TestWaitForAllServingTablets(t *testing.T) { err = hc.WaitForAllServingTablets(ctx, targets) assert.NotNil(t, err, "error should not be nil (there are no tablets on this keyspace") - - targets = []*querypb.Target{ - - { - Keyspace: tablet.Keyspace, - Shard: tablet.Shard, - TabletType: tablet.Type, - }, - { - Keyspace: "newkeyspace", - Shard: tablet.Shard, - TabletType: tablet.Type, - }, - } - - KeyspacesToWatch = []string{tablet.Keyspace} - - err = hc.WaitForAllServingTablets(ctx, targets) - assert.Nil(t, err, "error should be nil. Keyspace with no tablets is filtered") - - KeyspacesToWatch = []string{} } // TestRemoveTablet tests the behavior when a tablet goes away. @@ -1267,7 +1246,7 @@ func TestTemplate(t *testing.T) { TabletsStats: ts, } templ := template.New("") - templ, err := templ.Parse(HealthCheckTemplate) + templ, err := templ.Parse(healthCheckTemplate) require.Nil(t, err, "error parsing template: %v", err) wr := &bytes.Buffer{} err = templ.Execute(wr, []*TabletsCacheStatus{tcs}) @@ -1295,7 +1274,7 @@ func TestDebugURLFormatting(t *testing.T) { TabletsStats: ts, } templ := template.New("") - templ, err := templ.Parse(HealthCheckTemplate) + templ, err := templ.Parse(healthCheckTemplate) require.Nil(t, err, "error parsing template") wr := &bytes.Buffer{} err = templ.Execute(wr, []*TabletsCacheStatus{tcs}) @@ -1304,7 +1283,7 @@ func TestDebugURLFormatting(t *testing.T) { require.Contains(t, wr.String(), expectedURL, "output missing formatted URL") } -func tabletDialer(tablet *topodatapb.Tablet, _ grpcclient.FailFast) (queryservice.QueryService, error) { +func tabletDialer(ctx context.Context, tablet *topodatapb.Tablet, _ grpcclient.FailFast) (queryservice.QueryService, error) { connMapMu.Lock() defer connMapMu.Unlock() diff --git a/go/vt/discovery/keyspace_events.go b/go/vt/discovery/keyspace_events.go index 163f240de8c..9fa457c1589 100644 --- a/go/vt/discovery/keyspace_events.go +++ b/go/vt/discovery/keyspace_events.go @@ -21,6 +21,7 @@ import ( "fmt" "sync" + "golang.org/x/sync/errgroup" "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/key" @@ -93,18 +94,8 @@ func NewKeyspaceEventWatcher(ctx context.Context, topoServer srvtopo.Server, hc return kew } -type MoveTablesStatus int - -const ( - MoveTablesUnknown MoveTablesStatus = iota - // MoveTablesSwitching is set when the write traffic is the middle of being switched from the source to the target - MoveTablesSwitching - // MoveTablesSwitched is set when write traffic has been completely switched to the target - MoveTablesSwitched -) - // keyspaceState is the internal state for all the keyspaces that the KEW is -// currently watching +// currently watching. type keyspaceState struct { kew *KeyspaceEventWatcher keyspace string @@ -120,7 +111,7 @@ type keyspaceState struct { moveTablesState *MoveTablesState } -// Format prints the internal state for this keyspace for debug purposes +// Format prints the internal state for this keyspace for debug purposes. func (kss *keyspaceState) Format(f fmt.State, verb rune) { kss.mu.Lock() defer kss.mu.Unlock() @@ -137,9 +128,9 @@ func (kss *keyspaceState) Format(f fmt.State, verb rune) { fmt.Fprintf(f, "]\n") } -// beingResharded returns whether this keyspace is thought to be in the middle of a resharding -// operation. currentShard is the name of the shard that belongs to this keyspace and which -// we are trying to access. currentShard can _only_ be a primary shard. +// beingResharded returns whether this keyspace is thought to be in the middle of a +// resharding operation. currentShard is the name of the shard that belongs to this +// keyspace and which we are trying to access. currentShard can _only_ be a primary shard. func (kss *keyspaceState) beingResharded(currentShard string) bool { kss.mu.Lock() defer kss.mu.Unlock() @@ -179,11 +170,19 @@ type shardState struct { currentPrimary *topodatapb.TabletAlias } -// Subscribe returns a channel that will receive any KeyspaceEvents for all keyspaces in the current cell +// Subscribe returns a channel that will receive any KeyspaceEvents for all keyspaces in the +// current cell. func (kew *KeyspaceEventWatcher) Subscribe() chan *KeyspaceEvent { kew.subsMu.Lock() defer kew.subsMu.Unlock() - c := make(chan *KeyspaceEvent, 2) + // Use a decent size buffer to: + // 1. Avoid blocking the KEW + // 2. While not losing/missing any events + // 3. And processing them in the order received + // TODO: do we care about intermediate events? + // If not, then we could instead e.g. pull the first/oldest event + // from the channel, discard it, and add the current/latest. + c := make(chan *KeyspaceEvent, 10) kew.subs[c] = struct{}{} return c } @@ -195,14 +194,11 @@ func (kew *KeyspaceEventWatcher) Unsubscribe(c chan *KeyspaceEvent) { delete(kew.subs, c) } -func (kew *KeyspaceEventWatcher) broadcast(th *KeyspaceEvent) { +func (kew *KeyspaceEventWatcher) broadcast(ev *KeyspaceEvent) { kew.subsMu.Lock() defer kew.subsMu.Unlock() for c := range kew.subs { - select { - case c <- th: - default: - } + c <- ev } } @@ -240,7 +236,8 @@ func (kew *KeyspaceEventWatcher) run(ctx context.Context) { } // ensureConsistentLocked checks if the current keyspace has recovered from an availability -// event, and if so, returns information about the availability event to all subscribers +// event, and if so, returns information about the availability event to all subscribers. +// Note: you MUST be holding the ks.mu when calling this function. func (kss *keyspaceState) ensureConsistentLocked() { // if this keyspace is consistent, there's no ongoing availability event if kss.consistent { @@ -285,7 +282,8 @@ func (kss *keyspaceState) ensureConsistentLocked() { } } - // clone the current moveTablesState, if any, to handle race conditions where it can get updated while we're broadcasting + // Clone the current moveTablesState, if any, to handle race conditions where it can get + // updated while we're broadcasting. var moveTablesState MoveTablesState if kss.moveTablesState != nil { moveTablesState = *kss.moveTablesState @@ -312,8 +310,8 @@ func (kss *keyspaceState) ensureConsistentLocked() { Serving: sstate.serving, }) - log.Infof("keyspace event resolved: %s/%s is now consistent (serving: %v)", - sstate.target.Keyspace, sstate.target.Keyspace, + log.Infof("keyspace event resolved: %s is now consistent (serving: %t)", + topoproto.KeyspaceShardString(sstate.target.Keyspace, sstate.target.Shard), sstate.serving, ) @@ -325,9 +323,10 @@ func (kss *keyspaceState) ensureConsistentLocked() { kss.kew.broadcast(ksevent) } -// onHealthCheck is the callback that updates this keyspace with event data from the HealthCheck stream. -// the HealthCheck stream applies to all the keyspaces in the cluster and emits TabletHealth events to our -// parent KeyspaceWatcher, which will mux them into their corresponding keyspaceState +// onHealthCheck is the callback that updates this keyspace with event data from the HealthCheck +// stream. The HealthCheck stream applies to all the keyspaces in the cluster and emits +// TabletHealth events to our parent KeyspaceWatcher, which will mux them into their +// corresponding keyspaceState. func (kss *keyspaceState) onHealthCheck(th *TabletHealth) { // we only care about health events on the primary if th.Target.TabletType != topodatapb.TabletType_PRIMARY { @@ -371,6 +370,17 @@ func (kss *keyspaceState) onHealthCheck(th *TabletHealth) { kss.ensureConsistentLocked() } +type MoveTablesStatus int + +const ( + MoveTablesUnknown MoveTablesStatus = iota + // MoveTablesSwitching is set when the write traffic is the middle of being switched from + // the source to the target. + MoveTablesSwitching + // MoveTablesSwitched is set when write traffic has been completely switched to the target. + MoveTablesSwitched +) + type MoveTablesType int const ( @@ -384,34 +394,66 @@ type MoveTablesState struct { State MoveTablesStatus } +func (mts MoveTablesState) String() string { + var typ, state string + switch mts.Typ { + case MoveTablesRegular: + typ = "Regular" + case MoveTablesShardByShard: + typ = "ShardByShard" + default: + typ = "None" + } + switch mts.State { + case MoveTablesSwitching: + state = "Switching" + case MoveTablesSwitched: + state = "Switched" + default: + state = "Unknown" + } + return fmt.Sprintf("{Type: %s, State: %s}", typ, state) +} + func (kss *keyspaceState) getMoveTablesStatus(vs *vschemapb.SrvVSchema) (*MoveTablesState, error) { mtState := &MoveTablesState{ Typ: MoveTablesNone, State: MoveTablesUnknown, } - // if there are no routing rules defined, then movetables is not in progress, exit early - if (vs.RoutingRules != nil && len(vs.RoutingRules.Rules) == 0) && - (vs.ShardRoutingRules != nil && len(vs.ShardRoutingRules.Rules) == 0) { + // If there are no routing rules defined, then movetables is not in progress, exit early. + if len(vs.GetRoutingRules().GetRules()) == 0 && len(vs.GetShardRoutingRules().GetRules()) == 0 { return mtState, nil } shortCtx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer cancel() - ts, _ := kss.kew.ts.GetTopoServer() - - // collect all current shard information from the topo + ts, err := kss.kew.ts.GetTopoServer() + if err != nil { + return mtState, err + } + // Collect all current shard information from the topo. var shardInfos []*topo.ShardInfo + mu := sync.Mutex{} + eg, ectx := errgroup.WithContext(shortCtx) for _, sstate := range kss.shards { - si, err := ts.GetShard(shortCtx, kss.keyspace, sstate.target.Shard) - if err != nil { - return nil, err - } - shardInfos = append(shardInfos, si) + eg.Go(func() error { + si, err := ts.GetShard(ectx, kss.keyspace, sstate.target.Shard) + if err != nil { + return err + } + mu.Lock() + defer mu.Unlock() + shardInfos = append(shardInfos, si) + return nil + }) + } + if err := eg.Wait(); err != nil { + return mtState, err } - // check if any shard has denied tables and if so, record one of these to check where it currently points to - // using the (shard) routing rules + // Check if any shard has denied tables and if so, record one of these to check where it + // currently points to using the (shard) routing rules. var shardsWithDeniedTables []string var oneDeniedTable string for _, si := range shardInfos { @@ -426,11 +468,11 @@ func (kss *keyspaceState) getMoveTablesStatus(vs *vschemapb.SrvVSchema) (*MoveTa return mtState, nil } - // check if a shard by shard migration is in progress and if so detect if it has been switched - isPartialTables := vs.ShardRoutingRules != nil && len(vs.ShardRoutingRules.Rules) > 0 + // Check if a shard by shard migration is in progress and if so detect if it has been switched. + isPartialTables := vs.GetShardRoutingRules() != nil && len(vs.GetShardRoutingRules().GetRules()) > 0 if isPartialTables { - srr := topotools.GetShardRoutingRulesMap(vs.ShardRoutingRules) + srr := topotools.GetShardRoutingRulesMap(vs.GetShardRoutingRules()) mtState.Typ = MoveTablesShardByShard mtState.State = MoveTablesSwitched for _, shard := range shardsWithDeniedTables { @@ -441,31 +483,32 @@ func (kss *keyspaceState) getMoveTablesStatus(vs *vschemapb.SrvVSchema) (*MoveTa break } } - log.Infof("getMoveTablesStatus: keyspace %s declaring partial move tables %v", kss.keyspace, mtState) + log.Infof("getMoveTablesStatus: keyspace %s declaring partial move tables %s", kss.keyspace, mtState.String()) return mtState, nil } - // it wasn't a shard by shard migration, but since we have denied tables it must be a regular MoveTables + // It wasn't a shard by shard migration, but since we have denied tables it must be a + // regular MoveTables. mtState.Typ = MoveTablesRegular mtState.State = MoveTablesSwitching - rr := topotools.GetRoutingRulesMap(vs.RoutingRules) + rr := topotools.GetRoutingRulesMap(vs.GetRoutingRules()) if rr != nil { r, ok := rr[oneDeniedTable] - // if a rule exists for the table and points to the target keyspace, writes have been switched + // If a rule exists for the table and points to the target keyspace, writes have been switched. if ok && len(r) > 0 && r[0] != fmt.Sprintf("%s.%s", kss.keyspace, oneDeniedTable) { mtState.State = MoveTablesSwitched log.Infof("onSrvKeyspace:: keyspace %s writes have been switched for table %s, rule %v", kss.keyspace, oneDeniedTable, r[0]) } } - log.Infof("getMoveTablesStatus: keyspace %s declaring regular move tables %v", kss.keyspace, mtState) + log.Infof("getMoveTablesStatus: keyspace %s declaring regular move tables %s", kss.keyspace, mtState.String()) return mtState, nil } -// onSrvKeyspace is the callback that updates this keyspace with fresh topology data from our topology server. -// this callback is called from a Watcher in the topo server whenever a change to the topology for this keyspace -// occurs. this watcher is dedicated to this keyspace, and will only yield topology metadata changes for as -// long as we're interested on this keyspace. +// onSrvKeyspace is the callback that updates this keyspace with fresh topology data from our +// topology server. this callback is called from a Watcher in the topo server whenever a change to +// the topology for this keyspace occurs. This watcher is dedicated to this keyspace, and will +// only yield topology metadata changes for as long as we're interested on this keyspace. func (kss *keyspaceState) onSrvKeyspace(newKeyspace *topodatapb.SrvKeyspace, newError error) bool { kss.mu.Lock() defer kss.mu.Unlock() @@ -479,23 +522,25 @@ func (kss *keyspaceState) onSrvKeyspace(newKeyspace *topodatapb.SrvKeyspace, new return false } - // if there's another kind of error while watching this keyspace, we assume it's temporary and related - // to the topology server, not to the keyspace itself. we'll keep waiting for more topology events. + // If there's another kind of error while watching this keyspace, we assume it's temporary and + // related to the topology server, not to the keyspace itself. we'll keep waiting for more + // topology events. if newError != nil { kss.lastError = newError log.Errorf("error while watching keyspace %q: %v", kss.keyspace, newError) return true } - // if the topology metadata for our keyspace is identical to the last one we saw there's nothing to do - // here. this is a side-effect of the way ETCD watchers work. + // If the topology metadata for our keyspace is identical to the last one we saw there's nothing to + // do here. this is a side-effect of the way ETCD watchers work. if proto.Equal(kss.lastKeyspace, newKeyspace) { // no changes return true } - // we only mark this keyspace as inconsistent if there has been a topology change in the PRIMARY for - // this keyspace, but we store the topology metadata for both primary and replicas for future-proofing. + // we only mark this keyspace as inconsistent if there has been a topology change in the PRIMARY + // for this keyspace, but we store the topology metadata for both primary and replicas for + // future-proofing. var oldPrimary, newPrimary *topodatapb.SrvKeyspace_KeyspacePartition if kss.lastKeyspace != nil { oldPrimary = topoproto.SrvKeyspaceGetPartition(kss.lastKeyspace, topodatapb.TabletType_PRIMARY) @@ -526,15 +571,24 @@ func (kss *keyspaceState) isServing() bool { // onSrvVSchema is called from a Watcher in the topo server whenever the SrvVSchema is updated by Vitess. // For the purposes here, we are interested in updates to the RoutingRules or ShardRoutingRules. -// In addition, the traffic switcher updates SrvVSchema when the DeniedTables attributes in a Shard record is -// modified. +// In addition, the traffic switcher updates SrvVSchema when the DeniedTables attributes in a Shard +// record is modified. func (kss *keyspaceState) onSrvVSchema(vs *vschemapb.SrvVSchema, err error) bool { + // The vschema can be nil if the server is currently shutting down. + if vs == nil { + return true + } + kss.mu.Lock() defer kss.mu.Unlock() - kss.moveTablesState, _ = kss.getMoveTablesStatus(vs) + var kerr error + if kss.moveTablesState, kerr = kss.getMoveTablesStatus(vs); err != nil { + log.Errorf("onSrvVSchema: keyspace %s failed to get move tables status: %v", kss.keyspace, kerr) + } if kss.moveTablesState != nil && kss.moveTablesState.Typ != MoveTablesNone { - // mark the keyspace as inconsistent. ensureConsistentLocked() checks if the workflow is switched, - // and if so, it will send an event to the buffering subscribers to indicate that buffering can be stopped. + // Mark the keyspace as inconsistent. ensureConsistentLocked() checks if the workflow is + // switched, and if so, it will send an event to the buffering subscribers to indicate that + // buffering can be stopped. kss.consistent = false kss.ensureConsistentLocked() } @@ -556,8 +610,9 @@ func newKeyspaceState(ctx context.Context, kew *KeyspaceEventWatcher, cell, keys return kss } -// processHealthCheck is the callback that is called by the global HealthCheck stream that was initiated -// by this KeyspaceEventWatcher. it redirects the TabletHealth event to the corresponding keyspaceState +// processHealthCheck is the callback that is called by the global HealthCheck stream that was +// initiated by this KeyspaceEventWatcher. It redirects the TabletHealth event to the +// corresponding keyspaceState. func (kew *KeyspaceEventWatcher) processHealthCheck(ctx context.Context, th *TabletHealth) { kss := kew.getKeyspaceStatus(ctx, th.Target.Keyspace) if kss == nil { @@ -567,8 +622,8 @@ func (kew *KeyspaceEventWatcher) processHealthCheck(ctx context.Context, th *Tab kss.onHealthCheck(th) } -// getKeyspaceStatus returns the keyspaceState object for the corresponding keyspace, allocating it -// if we've never seen the keyspace before. +// getKeyspaceStatus returns the keyspaceState object for the corresponding keyspace, allocating +// it if we've never seen the keyspace before. func (kew *KeyspaceEventWatcher) getKeyspaceStatus(ctx context.Context, keyspace string) *keyspaceState { kew.mu.Lock() defer kew.mu.Unlock() @@ -608,15 +663,15 @@ func (kew *KeyspaceEventWatcher) TargetIsBeingResharded(ctx context.Context, tar } // PrimaryIsNotServing checks if the reason why the given target is not accessible right now is -// that the primary tablet for that shard is not serving. This is possible during a Planned Reparent Shard -// operation. Just as the operation completes, a new primary will be elected, and it will send its own healthcheck -// stating that it is serving. We should buffer requests until that point. -// There are use cases where people do not run with a Primary server at all, so we must verify that -// we only start buffering when a primary was present, and it went not serving. -// The shard state keeps track of the current primary and the last externally reparented time, which we can use -// to determine that there was a serving primary which now became non serving. This is only possible in a DemotePrimary -// RPC which are only called from ERS and PRS. So buffering will stop when these operations succeed. -// We return the tablet alias of the primary if it is serving. +// that the primary tablet for that shard is not serving. This is possible during a Planned +// Reparent Shard operation. Just as the operation completes, a new primary will be elected, and +// it will send its own healthcheck stating that it is serving. We should buffer requests until +// that point. There are use cases where people do not run with a Primary server at all, so we must +// verify that we only start buffering when a primary was present, and it went not serving. +// The shard state keeps track of the current primary and the last externally reparented time, which +// we can use to determine that there was a serving primary which now became non serving. This is +// only possible in a DemotePrimary RPC which are only called from ERS and PRS. So buffering will +// stop when these operations succeed. We return the tablet alias of the primary if it is serving. func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(ctx context.Context, target *querypb.Target) (*topodatapb.TabletAlias, bool) { if target.TabletType != topodatapb.TabletType_PRIMARY { return nil, false @@ -628,7 +683,8 @@ func (kew *KeyspaceEventWatcher) PrimaryIsNotServing(ctx context.Context, target ks.mu.Lock() defer ks.mu.Unlock() if state, ok := ks.shards[target.Shard]; ok { - // If the primary tablet was present then externallyReparented will be non-zero and currentPrimary will be not nil + // If the primary tablet was present then externallyReparented will be non-zero and + // currentPrimary will be not nil. return state.currentPrimary, !state.serving && !ks.consistent && state.externallyReparented != 0 && state.currentPrimary != nil } return nil, false diff --git a/go/vt/discovery/keyspace_events_test.go b/go/vt/discovery/keyspace_events_test.go index 43af4bf49de..bcaf48b62a8 100644 --- a/go/vt/discovery/keyspace_events_test.go +++ b/go/vt/discovery/keyspace_events_test.go @@ -19,6 +19,8 @@ package discovery import ( "context" "encoding/hex" + "sync" + "sync/atomic" "testing" "time" @@ -49,17 +51,71 @@ func TestSrvKeyspaceWithNilNewKeyspace(t *testing.T) { keyspace: keyspace, shards: make(map[string]*shardState), } - kss.lastKeyspace = &topodatapb.SrvKeyspace{ - ServedFrom: []*topodatapb.SrvKeyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_PRIMARY, - Keyspace: keyspace, - }, - }, - } + kss.lastKeyspace = &topodatapb.SrvKeyspace{} require.True(t, kss.onSrvKeyspace(nil, nil)) } +// TestKeyspaceEventConcurrency confirms that the keyspace event watcher +// does not fail to broadcast received keyspace events to subscribers. +// This verifies that no events are lost when there's a high number of +// concurrent keyspace events. +func TestKeyspaceEventConcurrency(t *testing.T) { + cell := "cell1" + factory := faketopo.NewFakeTopoFactory() + factory.AddCell(cell) + sts := &fakeTopoServer{} + hc := NewFakeHealthCheck(make(chan *TabletHealth)) + defer hc.Close() + kew := &KeyspaceEventWatcher{ + hc: hc, + ts: sts, + localCell: cell, + keyspaces: make(map[string]*keyspaceState), + subs: make(map[chan *KeyspaceEvent]struct{}), + } + + // Subscribe to the watcher's broadcasted keyspace events. + receiver := kew.Subscribe() + + updates := atomic.Uint32{} + updates.Store(0) + wg := sync.WaitGroup{} + concurrency := 100 + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + go func() { + for { + select { + case <-ctx.Done(): + return + case <-receiver: + updates.Add(1) + } + } + }() + // Start up concurent go-routines that will broadcast keyspace events. + for i := 1; i <= concurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + kew.broadcast(&KeyspaceEvent{}) + }() + } + wg.Wait() + for { + select { + case <-ctx.Done(): + require.Equal(t, concurrency, int(updates.Load()), "expected %d updates, got %d", concurrency, updates.Load()) + return + default: + if int(updates.Load()) == concurrency { // Pass + cancel() + return + } + } + } +} + // TestKeyspaceEventTypes confirms that the keyspace event watcher determines // that the unavailability event is caused by the correct scenario. We should // consider it to be caused by a resharding operation when the following @@ -309,6 +365,26 @@ func (f *fakeTopoServer) GetSrvKeyspace(ctx context.Context, cell, keyspace stri return ks, nil } +// GetSrvVSchema returns the SrvVSchema for a cell. +func (f *fakeTopoServer) GetSrvVSchema(ctx context.Context, cell string) (*vschemapb.SrvVSchema, error) { + vs := &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "ks1": { + Sharded: true, + }, + }, + RoutingRules: &vschemapb.RoutingRules{ + Rules: []*vschemapb.RoutingRule{ + { + FromTable: "db1.t1", + ToTables: []string{"db1.t1"}, + }, + }, + }, + } + return vs, nil +} + func (f *fakeTopoServer) WatchSrvKeyspace(ctx context.Context, cell, keyspace string, callback func(*topodatapb.SrvKeyspace, error) bool) { ks, err := f.GetSrvKeyspace(ctx, cell, keyspace) callback(ks, err) @@ -318,5 +394,6 @@ func (f *fakeTopoServer) WatchSrvKeyspace(ctx context.Context, cell, keyspace st // the provided cell. It will call the callback when // a new value or an error occurs. func (f *fakeTopoServer) WatchSrvVSchema(ctx context.Context, cell string, callback func(*vschemapb.SrvVSchema, error) bool) { - + sv, err := f.GetSrvVSchema(ctx, cell) + callback(sv, err) } diff --git a/go/vt/discovery/tablet_health_check.go b/go/vt/discovery/tablet_health_check.go index 24496155e74..64450f4c8c6 100644 --- a/go/vt/discovery/tablet_health_check.go +++ b/go/vt/discovery/tablet_health_check.go @@ -19,6 +19,7 @@ package discovery import ( "context" "fmt" + "net" "strings" "sync" "sync/atomic" @@ -33,12 +34,16 @@ import ( "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletconn" + "google.golang.org/grpc" "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/topodata" ) +// withDialerContextOnce ensures grpc.WithDialContext() is added once to the options. +var withDialerContextOnce sync.Once + // tabletHealthCheck maintains the health status of a tablet. A map of this // structure is maintained in HealthCheck. type tabletHealthCheck struct { @@ -122,8 +127,8 @@ func (thc *tabletHealthCheck) setServingState(serving bool, reason string) { } // stream streams healthcheck responses to callback. -func (thc *tabletHealthCheck) stream(ctx context.Context, callback func(*query.StreamHealthResponse) error) error { - conn := thc.Connection() +func (thc *tabletHealthCheck) stream(ctx context.Context, hc *HealthCheckImpl, callback func(*query.StreamHealthResponse) error) error { + conn := thc.Connection(ctx, hc) if conn == nil { // This signals the caller to retry return nil @@ -136,15 +141,35 @@ func (thc *tabletHealthCheck) stream(ctx context.Context, callback func(*query.S return err } -func (thc *tabletHealthCheck) Connection() queryservice.QueryService { +func (thc *tabletHealthCheck) Connection(ctx context.Context, hc *HealthCheckImpl) queryservice.QueryService { thc.connMu.Lock() defer thc.connMu.Unlock() - return thc.connectionLocked() + return thc.connectionLocked(ctx, hc) +} + +func healthCheckDialerFactory(hc *HealthCheckImpl) func(ctx context.Context, addr string) (net.Conn, error) { + return func(ctx context.Context, addr string) (net.Conn, error) { + // Limit the number of healthcheck connections opened in parallel to avoid high OS-thread + // usage due to blocking networking syscalls (eg: DNS lookups, TCP connection opens, + // etc). Without this limit it is possible for vtgates watching >10k tablets to hit + // the panic: 'runtime: program exceeds 10000-thread limit'. + if err := hc.healthCheckDialSem.Acquire(ctx, 1); err != nil { + return nil, err + } + defer hc.healthCheckDialSem.Release(1) + var dialer net.Dialer + return dialer.DialContext(ctx, "tcp", addr) + } } -func (thc *tabletHealthCheck) connectionLocked() queryservice.QueryService { +func (thc *tabletHealthCheck) connectionLocked(ctx context.Context, hc *HealthCheckImpl) queryservice.QueryService { if thc.Conn == nil { - conn, err := tabletconn.GetDialer()(thc.Tablet, grpcclient.FailFast(true)) + withDialerContextOnce.Do(func() { + grpcclient.RegisterGRPCDialOptions(func(opts []grpc.DialOption) ([]grpc.DialOption, error) { + return append(opts, grpc.WithContextDialer(healthCheckDialerFactory(hc))), nil + }) + }) + conn, err := tabletconn.GetDialer()(ctx, thc.Tablet, grpcclient.FailFast(true)) if err != nil { thc.LastError = err return nil @@ -272,7 +297,7 @@ func (thc *tabletHealthCheck) checkConn(hc *HealthCheckImpl) { }() // Read stream health responses. - err := thc.stream(streamCtx, func(shr *query.StreamHealthResponse) error { + err := thc.stream(streamCtx, hc, func(shr *query.StreamHealthResponse) error { // We received a message. Reset the back-off. retryDelay = hc.retryDelay // Don't block on send to avoid deadlocks. diff --git a/go/vt/discovery/tablet_picker.go b/go/vt/discovery/tablet_picker.go index a507528d3a2..fd1ff64a3ce 100644 --- a/go/vt/discovery/tablet_picker.go +++ b/go/vt/discovery/tablet_picker.go @@ -20,7 +20,7 @@ import ( "context" "fmt" "io" - "math/rand" + "math/rand/v2" "sort" "strings" "sync" @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletconn" querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) @@ -75,6 +76,16 @@ var ( } ) +// BuildTabletTypesString is a helper to build a serialized string representation of +// the tablet type(s) and optional in order clause for later use with the TabletPicker. +func BuildTabletTypesString(tabletTypes []topodatapb.TabletType, tabletSelectionPreference tabletmanagerdatapb.TabletSelectionPreference) string { + tabletTypesStr := topoproto.MakeStringTypeCSV(tabletTypes) + if tabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { + tabletTypesStr = InOrderHint + tabletTypesStr + } + return tabletTypesStr +} + // GetTabletPickerRetryDelay synchronizes changes to tabletPickerRetryDelay. Used in tests only at the moment func GetTabletPickerRetryDelay() time.Duration { muTabletPickerRetryDelay.Lock() @@ -287,7 +298,7 @@ func (tp *TabletPicker) orderByTabletType(candidates []*topo.TabletInfo) []*topo sort.Slice(candidates, func(i, j int) bool { if orderMap[candidates[i].Type] == orderMap[candidates[j].Type] { // identical tablet types: randomize order of tablets for this type - return rand.Intn(2) == 0 // 50% chance + return rand.IntN(2) == 0 // 50% chance } return orderMap[candidates[i].Type] < orderMap[candidates[j].Type] }) @@ -428,7 +439,7 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() - tabletMap, err := tp.ts.GetTabletMap(shortCtx, aliases) + tabletMap, err := tp.ts.GetTabletMap(shortCtx, aliases, nil) if err != nil { log.Warningf("Error fetching tablets from topo: %v", err) // If we get a partial result we can still use it, otherwise return. @@ -446,7 +457,7 @@ func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletIn log.Warningf("Tablet picker failed to load tablet %v", tabletAlias) } else if topoproto.IsTypeInList(tabletInfo.Type, tp.tabletTypes) { // Try to connect to the tablet and confirm that it's usable. - if conn, err := tabletconn.GetDialer()(tabletInfo.Tablet, grpcclient.FailFast(true)); err == nil { + if conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, grpcclient.FailFast(true)); err == nil { // Ensure that the tablet is healthy and serving. shortCtx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer cancel() diff --git a/go/vt/discovery/topology_watcher.go b/go/vt/discovery/topology_watcher.go index b3298f55700..0b69ecb6a63 100644 --- a/go/vt/discovery/topology_watcher.go +++ b/go/vt/discovery/topology_watcher.go @@ -70,8 +70,7 @@ type TopologyWatcher struct { cell string refreshInterval time.Duration refreshKnownTablets bool - getTablets func(tw *TopologyWatcher) ([]*topodata.TabletAlias, error) - sem chan int + concurrency int ctx context.Context cancelFunc context.CancelFunc // wg keeps track of all launched Go routines. @@ -92,34 +91,28 @@ type TopologyWatcher struct { } // NewTopologyWatcher returns a TopologyWatcher that monitors all -// the tablets that it is configured to watch, and reloads them periodically if needed. -// As of now there is only one implementation: watch all tablets in a cell. -func NewTopologyWatcher(ctx context.Context, topoServer *topo.Server, hc HealthCheck, filter TabletFilter, cell string, refreshInterval time.Duration, refreshKnownTablets bool, topoReadConcurrency int, getTablets func(tw *TopologyWatcher) ([]*topodata.TabletAlias, error)) *TopologyWatcher { +// the tablets in a cell, and reloads them as needed. +func NewTopologyWatcher(ctx context.Context, topoServer *topo.Server, hc HealthCheck, f TabletFilter, cell string, refreshInterval time.Duration, refreshKnownTablets bool, topoReadConcurrency int) *TopologyWatcher { tw := &TopologyWatcher{ topoServer: topoServer, healthcheck: hc, - tabletFilter: filter, + tabletFilter: f, cell: cell, refreshInterval: refreshInterval, refreshKnownTablets: refreshKnownTablets, - getTablets: getTablets, - sem: make(chan int, topoReadConcurrency), + concurrency: topoReadConcurrency, tablets: make(map[string]*tabletInfo), } tw.firstLoadChan = make(chan struct{}) - // We want the span from the context, but not the cancelation that comes with it + // We want the span from the context, but not the cancellation that comes with it spanContext := trace.CopySpan(context.Background(), ctx) tw.ctx, tw.cancelFunc = context.WithCancel(spanContext) return tw } -// NewCellTabletsWatcher returns a TopologyWatcher that monitors all -// the tablets in a cell, and reloads them as needed. -func NewCellTabletsWatcher(ctx context.Context, topoServer *topo.Server, hc HealthCheck, f TabletFilter, cell string, refreshInterval time.Duration, refreshKnownTablets bool, topoReadConcurrency int) *TopologyWatcher { - return NewTopologyWatcher(ctx, topoServer, hc, f, cell, refreshInterval, refreshKnownTablets, topoReadConcurrency, func(tw *TopologyWatcher) ([]*topodata.TabletAlias, error) { - return tw.topoServer.GetTabletAliasesByCell(ctx, tw.cell) - }) +func (tw *TopologyWatcher) getTablets() ([]*topo.TabletInfo, error) { + return tw.topoServer.GetTabletsByCell(tw.ctx, tw.cell, &topo.GetTabletsByCellOptions{Concurrency: tw.concurrency}) } // Start starts the topology watcher. @@ -149,30 +142,33 @@ func (tw *TopologyWatcher) Stop() { } func (tw *TopologyWatcher) loadTablets() { - var wg sync.WaitGroup newTablets := make(map[string]*tabletInfo) + var partialResult bool - // First get the list of relevant tabletAliases. - tabletAliases, err := tw.getTablets(tw) + // First get the list of all tablets. + tabletInfos, err := tw.getTablets() topologyWatcherOperations.Add(topologyWatcherOpListTablets, 1) if err != nil { topologyWatcherErrors.Add(topologyWatcherOpListTablets, 1) - select { - case <-tw.ctx.Done(): + // If we get a partial result error, we just log it and process the tablets that we did manage to fetch. + if topo.IsErrType(err, topo.PartialResult) { + log.Errorf("received partial result from getTablets for cell %v: %v", tw.cell, err) + partialResult = true + } else { // For all other errors, just return. + log.Errorf("error getting tablets for cell: %v: %v", tw.cell, err) return - default: } - log.Errorf("cannot get tablets for cell: %v: %v", tw.cell, err) - return } // Accumulate a list of all known alias strings to use later // when sorting. - tabletAliasStrs := make([]string, 0, len(tabletAliases)) + tabletAliasStrs := make([]string, 0, len(tabletInfos)) tw.mu.Lock() - for _, tAlias := range tabletAliases { - aliasStr := topoproto.TabletAliasString(tAlias) + defer tw.mu.Unlock() + + for _, tInfo := range tabletInfos { + aliasStr := topoproto.TabletAliasString(tInfo.Alias) tabletAliasStrs = append(tabletAliasStrs, aliasStr) if !tw.refreshKnownTablets { @@ -182,38 +178,25 @@ func (tw *TopologyWatcher) loadTablets() { continue } } + // There's no network call here, so we just do the tablets one at a time instead of in parallel goroutines. + newTablets[aliasStr] = &tabletInfo{ + alias: aliasStr, + tablet: tInfo.Tablet, + } + } - wg.Add(1) - go func(alias *topodata.TabletAlias) { - defer wg.Done() - tw.sem <- 1 // Wait for active queue to drain. - tablet, err := tw.topoServer.GetTablet(tw.ctx, alias) - topologyWatcherOperations.Add(topologyWatcherOpGetTablet, 1) - <-tw.sem // Done; enable next request to run. - if err != nil { - topologyWatcherErrors.Add(topologyWatcherOpGetTablet, 1) - select { - case <-tw.ctx.Done(): - return - default: - } - log.Errorf("cannot get tablet for alias %v: %v", alias, err) - return - } - tw.mu.Lock() - aliasStr := topoproto.TabletAliasString(alias) - newTablets[aliasStr] = &tabletInfo{ - alias: aliasStr, - tablet: tablet.Tablet, + if partialResult { + // We don't want to remove any tablets from the tablets map or the healthcheck if we got a partial result + // because we don't know if they were actually deleted or if we simply failed to fetch them. + // Fill any gaps in the newTablets map using the existing tablets. + for alias, val := range tw.tablets { + if _, ok := newTablets[alias]; !ok { + tabletAliasStrs = append(tabletAliasStrs, alias) + newTablets[alias] = val } - tw.mu.Unlock() - }(tAlias) + } } - tw.mu.Unlock() - wg.Wait() - tw.mu.Lock() - for alias, newVal := range newTablets { if tw.tabletFilter != nil && !tw.tabletFilter.IsIncluded(newVal.tablet) { continue @@ -266,8 +249,6 @@ func (tw *TopologyWatcher) loadTablets() { tw.topoChecksum = crc32.ChecksumIEEE(buf.Bytes()) tw.lastRefresh = time.Now() - tw.mu.Unlock() - } // RefreshLag returns the time since the last refresh. diff --git a/go/vt/discovery/topology_watcher_test.go b/go/vt/discovery/topology_watcher_test.go index 3ac567acef8..95c6e44ec43 100644 --- a/go/vt/discovery/topology_watcher_test.go +++ b/go/vt/discovery/topology_watcher_test.go @@ -18,7 +18,8 @@ package discovery import ( "context" - "math/rand" + "errors" + "math/rand/v2" "testing" "time" @@ -65,7 +66,7 @@ func TestStartAndCloseTopoWatcher(t *testing.T) { fhc := NewFakeHealthCheck(nil) defer fhc.Close() topologyWatcherOperations.ZeroAll() - tw := NewCellTabletsWatcher(context.Background(), ts, fhc, nil, "aa", 100*time.Microsecond, true, 5) + tw := NewTopologyWatcher(context.Background(), ts, fhc, nil, "aa", 100*time.Microsecond, true, 5) done := make(chan bool, 3) result := make(chan bool, 1) @@ -102,9 +103,8 @@ func TestStartAndCloseTopoWatcher(t *testing.T) { done <- true _, ok := <-result - if !ok { - t.Fatal("timed out") - } + require.True(t, ok, "timed out") + } func TestCellTabletsWatcher(t *testing.T) { @@ -125,7 +125,7 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { logger := logutil.NewMemoryLogger() topologyWatcherOperations.ZeroAll() counts := topologyWatcherOperations.Counts() - tw := NewCellTabletsWatcher(context.Background(), ts, fhc, nil, "aa", 10*time.Minute, refreshKnownTablets, 5) + tw := NewTopologyWatcher(context.Background(), ts, fhc, nil, "aa", 10*time.Minute, refreshKnownTablets, 5) counts = checkOpCounts(t, counts, map[string]int64{}) checkChecksum(t, tw, 0) @@ -143,19 +143,18 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { Keyspace: "keyspace", Shard: "shard", } - if err := ts.CreateTablet(context.Background(), tablet); err != nil { - t.Fatalf("CreateTablet failed: %v", err) - } + require.NoError(t, ts.CreateTablet(context.Background(), tablet), "CreateTablet failed for %v", tablet.Alias) + tw.loadTablets() - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1, "AddTablet": 1}) + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "AddTablet": 1}) checkChecksum(t, tw, 3238442862) // Check the tablet is returned by GetAllTablets(). allTablets := fhc.GetAllTablets() key := TabletToMapKey(tablet) - if _, ok := allTablets[key]; !ok || len(allTablets) != 1 || !proto.Equal(allTablets[key], tablet) { - t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, tablet) - } + assert.Len(t, allTablets, 1) + assert.Contains(t, allTablets, key) + assert.True(t, proto.Equal(tablet, allTablets[key])) // Add a second tablet to the topology. tablet2 := &topodatapb.Tablet{ @@ -170,75 +169,51 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { Keyspace: "keyspace", Shard: "shard", } - if err := ts.CreateTablet(context.Background(), tablet2); err != nil { - t.Fatalf("CreateTablet failed: %v", err) - } + require.NoError(t, ts.CreateTablet(context.Background(), tablet2), "CreateTablet failed for %v", tablet2.Alias) tw.loadTablets() - // If refreshKnownTablets is disabled, only the new tablet is read - // from the topo - if refreshKnownTablets { - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2, "AddTablet": 1}) - } else { - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1, "AddTablet": 1}) - } + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "AddTablet": 1}) checkChecksum(t, tw, 2762153755) // Check the new tablet is returned by GetAllTablets(). allTablets = fhc.GetAllTablets() key = TabletToMapKey(tablet2) - if _, ok := allTablets[key]; !ok || len(allTablets) != 2 || !proto.Equal(allTablets[key], tablet2) { - t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, tablet2) - } - - // Load the tablets again to show that when refreshKnownTablets is disabled, - // only the list is read from the topo and the checksum doesn't change - tw.loadTablets() - if refreshKnownTablets { - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2}) - } else { - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1}) - } - checkChecksum(t, tw, 2762153755) + assert.Len(t, allTablets, 2) + assert.Contains(t, allTablets, key) + assert.True(t, proto.Equal(tablet2, allTablets[key])) // same tablet, different port, should update (previous // one should go away, new one be added) // // if refreshKnownTablets is disabled, this case is *not* - // detected and the tablet remains in the topo using the + // detected and the tablet remains in the healthcheck using the // old key origTablet := tablet.CloneVT() origKey := TabletToMapKey(tablet) tablet.PortMap["vt"] = 456 - if _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error { + _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error { t.PortMap["vt"] = 456 return nil - }); err != nil { - t.Fatalf("UpdateTabletFields failed: %v", err) - } + }) + require.Nil(t, err, "UpdateTabletFields failed") + tw.loadTablets() allTablets = fhc.GetAllTablets() key = TabletToMapKey(tablet) if refreshKnownTablets { - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2, "ReplaceTablet": 1}) - - if _, ok := allTablets[key]; !ok || len(allTablets) != 2 || !proto.Equal(allTablets[key], tablet) { - t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, tablet) - } - if _, ok := allTablets[origKey]; ok { - t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, origKey) - } + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "ReplaceTablet": 1}) + assert.Len(t, allTablets, 2) + assert.Contains(t, allTablets, key) + assert.True(t, proto.Equal(tablet, allTablets[key])) + assert.NotContains(t, allTablets, origKey) checkChecksum(t, tw, 2762153755) } else { - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1}) - - if _, ok := allTablets[origKey]; !ok || len(allTablets) != 2 || !proto.Equal(allTablets[origKey], origTablet) { - t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, origTablet) - } - if _, ok := allTablets[key]; ok { - t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, key) - } + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "ReplaceTablet": 0}) + assert.Len(t, allTablets, 2) + assert.Contains(t, allTablets, origKey) + assert.True(t, proto.Equal(origTablet, allTablets[origKey])) + assert.NotContains(t, allTablets, key) checkChecksum(t, tw, 2762153755) } @@ -248,94 +223,77 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { if refreshKnownTablets { origTablet := tablet.CloneVT() origTablet2 := tablet2.CloneVT() - if _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error { + _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error { t.Hostname = tablet.Hostname t.PortMap = tablet.PortMap tablet2 = t return nil - }); err != nil { - t.Fatalf("UpdateTabletFields failed: %v", err) - } - if _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error { + }) + require.Nil(t, err, "UpdateTabletFields failed") + _, err = ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error { t.Hostname = "host3" tablet = t return nil - }); err != nil { - t.Fatalf("UpdateTabletFields failed: %v", err) - } + }) + require.Nil(t, err, "UpdateTabletFields failed") tw.loadTablets() - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2, "ReplaceTablet": 2}) + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "ReplaceTablet": 2}) allTablets = fhc.GetAllTablets() key2 := TabletToMapKey(tablet2) - if _, ok := allTablets[key2]; !ok { - t.Fatalf("tablet was lost because it's reusing an address recently used by another tablet: %v", key2) - } + assert.Contains(t, allTablets, key2, "tablet was lost because it's reusing an address recently used by another tablet: %v", key2) // Change tablets back to avoid altering later tests. - if _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error { + _, err = ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error { t.Hostname = origTablet2.Hostname t.PortMap = origTablet2.PortMap tablet2 = t return nil - }); err != nil { - t.Fatalf("UpdateTabletFields failed: %v", err) - } - if _, err := ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error { + }) + require.Nil(t, err, "UpdateTabletFields failed") + + _, err = ts.UpdateTabletFields(context.Background(), tablet.Alias, func(t *topodatapb.Tablet) error { t.Hostname = origTablet.Hostname tablet = t return nil - }); err != nil { - t.Fatalf("UpdateTabletFields failed: %v", err) - } + }) + require.Nil(t, err, "UpdateTabletFields failed") + tw.loadTablets() - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 2, "ReplaceTablet": 2}) + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "ReplaceTablet": 2}) } // Remove the tablet and check that it is detected as being gone. - if err := ts.DeleteTablet(context.Background(), tablet.Alias); err != nil { - t.Fatalf("DeleteTablet failed: %v", err) - } - if _, err := topo.FixShardReplication(context.Background(), ts, logger, "aa", "keyspace", "shard"); err != nil { - t.Fatalf("FixShardReplication failed: %v", err) - } + require.NoError(t, ts.DeleteTablet(context.Background(), tablet.Alias)) + + _, err = topo.FixShardReplication(context.Background(), ts, logger, "aa", "keyspace", "shard") + require.Nil(t, err, "FixShardReplication failed") tw.loadTablets() - if refreshKnownTablets { - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1, "RemoveTablet": 1}) - } else { - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "RemoveTablet": 1}) - } + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "RemoveTablet": 1}) checkChecksum(t, tw, 789108290) allTablets = fhc.GetAllTablets() + assert.Len(t, allTablets, 1) key = TabletToMapKey(tablet) - if _, ok := allTablets[key]; ok || len(allTablets) != 1 { - t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, key) - } + assert.NotContains(t, allTablets, key) + key = TabletToMapKey(tablet2) - if _, ok := allTablets[key]; !ok || len(allTablets) != 1 || !proto.Equal(allTablets[key], tablet2) { - t.Errorf("fhc.GetAllTablets() = %+v; want %+v", allTablets, tablet2) - } + assert.Contains(t, allTablets, key) + assert.True(t, proto.Equal(tablet2, allTablets[key])) // Remove the other and check that it is detected as being gone. - if err := ts.DeleteTablet(context.Background(), tablet2.Alias); err != nil { - t.Fatalf("DeleteTablet failed: %v", err) - } - if _, err := topo.FixShardReplication(context.Background(), ts, logger, "aa", "keyspace", "shard"); err != nil { - t.Fatalf("FixShardReplication failed: %v", err) - } + require.NoError(t, ts.DeleteTablet(context.Background(), tablet2.Alias)) + _, err = topo.FixShardReplication(context.Background(), ts, logger, "aa", "keyspace", "shard") + require.Nil(t, err, "FixShardReplication failed") tw.loadTablets() checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "RemoveTablet": 1}) checkChecksum(t, tw, 0) allTablets = fhc.GetAllTablets() + assert.Len(t, allTablets, 0) key = TabletToMapKey(tablet) - if _, ok := allTablets[key]; ok || len(allTablets) != 0 { - t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, key) - } + assert.NotContains(t, allTablets, key) key = TabletToMapKey(tablet2) - if _, ok := allTablets[key]; ok || len(allTablets) != 0 { - t.Errorf("fhc.GetAllTablets() = %+v; don't want %v", allTablets, key) - } + assert.NotContains(t, allTablets, key) tw.Stop() } @@ -402,19 +360,13 @@ func TestFilterByShard(t *testing.T) { for _, tc := range testcases { fbs, err := NewFilterByShard(tc.filters) - if err != nil { - t.Errorf("cannot create FilterByShard for filters %v: %v", tc.filters, err) - } + require.Nil(t, err, "cannot create FilterByShard for filters %v", tc.filters) tablet := &topodatapb.Tablet{ Keyspace: tc.keyspace, Shard: tc.shard, } - - got := fbs.IsIncluded(tablet) - if got != tc.included { - t.Errorf("isIncluded(%v,%v) for filters %v returned %v but expected %v", tc.keyspace, tc.shard, tc.filters, got, tc.included) - } + require.Equal(t, tc.included, fbs.IsIncluded(tablet)) } } @@ -444,11 +396,11 @@ func TestFilterByKeyspace(t *testing.T) { f := NewFilterByKeyspace(testKeyspacesToWatch) ts := memorytopo.NewServer(ctx, testCell) defer ts.Close() - tw := NewCellTabletsWatcher(context.Background(), ts, hc, f, testCell, 10*time.Minute, true, 5) + tw := NewTopologyWatcher(context.Background(), ts, hc, f, testCell, 10*time.Minute, true, 5) for _, test := range testFilterByKeyspace { // Add a new tablet to the topology. - port := rand.Int31n(1000) + port := rand.Int32N(1000) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: testCell, @@ -462,22 +414,21 @@ func TestFilterByKeyspace(t *testing.T) { Shard: testShard, } - got := f.IsIncluded(tablet) - if got != test.expected { - t.Errorf("isIncluded(%v) for keyspace %v returned %v but expected %v", test.keyspace, test.keyspace, got, test.expected) - } + assert.Equal(t, test.expected, f.IsIncluded(tablet)) - if err := ts.CreateTablet(context.Background(), tablet); err != nil { - t.Errorf("CreateTablet failed: %v", err) - } + // Make this fatal because there is no point continuing if CreateTablet fails + require.NoError(t, ts.CreateTablet(context.Background(), tablet)) tw.loadTablets() key := TabletToMapKey(tablet) allTablets := hc.GetAllTablets() - if _, ok := allTablets[key]; ok != test.expected && proto.Equal(allTablets[key], tablet) != test.expected { - t.Errorf("Error adding tablet - got %v; want %v", ok, test.expected) + if test.expected { + assert.Contains(t, allTablets, key) + } else { + assert.NotContains(t, allTablets, key) } + assert.Equal(t, test.expected, proto.Equal(tablet, allTablets[key])) // Replace the tablet we added above tabletReplacement := &topodatapb.Tablet{ @@ -492,35 +443,31 @@ func TestFilterByKeyspace(t *testing.T) { Keyspace: test.keyspace, Shard: testShard, } - got = f.IsIncluded(tabletReplacement) - if got != test.expected { - t.Errorf("isIncluded(%v) for keyspace %v returned %v but expected %v", test.keyspace, test.keyspace, got, test.expected) - } - if err := ts.CreateTablet(context.Background(), tabletReplacement); err != nil { - t.Errorf("CreateTablet failed: %v", err) - } + assert.Equal(t, test.expected, f.IsIncluded(tabletReplacement)) + require.NoError(t, ts.CreateTablet(context.Background(), tabletReplacement)) tw.loadTablets() key = TabletToMapKey(tabletReplacement) allTablets = hc.GetAllTablets() - if _, ok := allTablets[key]; ok != test.expected && proto.Equal(allTablets[key], tabletReplacement) != test.expected { - t.Errorf("Error replacing tablet - got %v; want %v", ok, test.expected) + if test.expected { + assert.Contains(t, allTablets, key) + } else { + assert.NotContains(t, allTablets, key) } + assert.Equal(t, test.expected, proto.Equal(tabletReplacement, allTablets[key])) // Delete the tablet - if err := ts.DeleteTablet(context.Background(), tabletReplacement.Alias); err != nil { - t.Fatalf("DeleteTablet failed: %v", err) - } + require.NoError(t, ts.DeleteTablet(context.Background(), tabletReplacement.Alias)) } } -// TestFilterByKeypsaceSkipsIgnoredTablets confirms a bug fix for the case when a TopologyWatcher +// TestFilterByKeyspaceSkipsIgnoredTablets confirms a bug fix for the case when a TopologyWatcher // has a FilterByKeyspace TabletFilter configured along with refreshKnownTablets turned off. We want // to ensure that the TopologyWatcher: -// - does not continuosly call GetTablets for tablets that do not satisfy the filter -// - does not add or remove these filtered out tablets from the its healtcheck -func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { +// - does not continuously call GetTablets for tablets that do not satisfy the filter +// - does not add or remove these filtered out tablets from its healthcheck +func TestFilterByKeyspaceSkipsIgnoredTablets(t *testing.T) { ctx := utils.LeakCheckContext(t) ts := memorytopo.NewServer(ctx, "aa") @@ -530,7 +477,7 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { topologyWatcherOperations.ZeroAll() counts := topologyWatcherOperations.Counts() f := NewFilterByKeyspace(testKeyspacesToWatch) - tw := NewCellTabletsWatcher(context.Background(), ts, fhc, f, "aa", 10*time.Minute, false /*refreshKnownTablets*/, 5) + tw := NewTopologyWatcher(context.Background(), ts, fhc, f, "aa", 10*time.Minute, false /*refreshKnownTablets*/, 5) counts = checkOpCounts(t, counts, map[string]int64{}) checkChecksum(t, tw, 0) @@ -551,7 +498,7 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { require.NoError(t, ts.CreateTablet(context.Background(), tablet)) tw.loadTablets() - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1, "AddTablet": 1}) + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "AddTablet": 1}) checkChecksum(t, tw, 3238442862) // Check tablet is reported by HealthCheck @@ -576,7 +523,7 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { require.NoError(t, ts.CreateTablet(context.Background(), tablet2)) tw.loadTablets() - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 1}) + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0}) checkChecksum(t, tw, 2762153755) // Check the new tablet is NOT reported by HealthCheck. @@ -588,7 +535,7 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { // Load the tablets again to show that when refreshKnownTablets is disabled, // only the list is read from the topo and the checksum doesn't change tw.loadTablets() - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1}) + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0}) checkChecksum(t, tw, 2762153755) // With refreshKnownTablets set to false, changes to the port map for the same tablet alias @@ -600,7 +547,7 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { require.NoError(t, err) tw.loadTablets() - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1}) + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0}) checkChecksum(t, tw, 2762153755) allTablets = fhc.GetAllTablets() @@ -616,7 +563,7 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { require.NoError(t, ts.DeleteTablet(context.Background(), tablet.Alias)) tw.loadTablets() - counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "RemoveTablet": 1}) + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0, "RemoveTablet": 1}) checkChecksum(t, tw, 789108290) assert.Empty(t, fhc.GetAllTablets()) @@ -624,9 +571,90 @@ func TestFilterByKeypsaceSkipsIgnoredTablets(t *testing.T) { require.NoError(t, ts.DeleteTablet(context.Background(), tablet2.Alias)) tw.loadTablets() - checkOpCounts(t, counts, map[string]int64{"ListTablets": 1}) + checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "GetTablet": 0}) checkChecksum(t, tw, 0) assert.Empty(t, fhc.GetAllTablets()) tw.Stop() } + +func TestGetTabletErrorDoesNotRemoveFromHealthcheck(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + ts, factory := memorytopo.NewServerAndFactory(ctx, "aa") + defer ts.Close() + fhc := NewFakeHealthCheck(nil) + defer fhc.Close() + topologyWatcherOperations.ZeroAll() + counts := topologyWatcherOperations.Counts() + tw := NewTopologyWatcher(context.Background(), ts, fhc, nil, "aa", 10*time.Minute, true, 5) + defer tw.Stop() + + // Force fallback to getting tablets individually. + factory.AddOperationError(memorytopo.List, ".*", topo.NewError(topo.NoImplementation, "List not supported")) + + counts = checkOpCounts(t, counts, map[string]int64{}) + checkChecksum(t, tw, 0) + + // Add a tablet to the topology. + tablet1 := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "aa", + Uid: 0, + }, + Hostname: "host1", + PortMap: map[string]int32{ + "vt": 123, + }, + Keyspace: "keyspace", + Shard: "shard", + } + require.NoError(t, ts.CreateTablet(ctx, tablet1), "CreateTablet failed for %v", tablet1.Alias) + + tw.loadTablets() + counts = checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "AddTablet": 1}) + checkChecksum(t, tw, 3238442862) + + // Check the tablet is returned by GetAllTablets(). + allTablets := fhc.GetAllTablets() + key1 := TabletToMapKey(tablet1) + assert.Len(t, allTablets, 1) + assert.Contains(t, allTablets, key1) + assert.True(t, proto.Equal(tablet1, allTablets[key1])) + + // Add a second tablet to the topology. + tablet2 := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "aa", + Uid: 2, + }, + Hostname: "host2", + PortMap: map[string]int32{ + "vt": 789, + }, + Keyspace: "keyspace", + Shard: "shard", + } + require.NoError(t, ts.CreateTablet(ctx, tablet2), "CreateTablet failed for %v", tablet2.Alias) + + // Cause the Get for the first tablet to fail. + factory.AddOperationError(memorytopo.Get, "tablets/aa-0000000000/Tablet", errors.New("fake error")) + + // Ensure that a topo Get error results in a partial results error. If not, the rest of this test is invalid. + _, err := ts.GetTabletsByCell(ctx, "aa", &topo.GetTabletsByCellOptions{}) + require.ErrorContains(t, err, "partial result") + + // Now force the error during loadTablets. + tw.loadTablets() + checkOpCounts(t, counts, map[string]int64{"ListTablets": 1, "AddTablet": 1}) + checkChecksum(t, tw, 2762153755) + + // Ensure the first tablet is still returned by GetAllTablets() and the second tablet has been added. + allTablets = fhc.GetAllTablets() + key2 := TabletToMapKey(tablet2) + assert.Len(t, allTablets, 2) + assert.Contains(t, allTablets, key1) + assert.Contains(t, allTablets, key2) + assert.True(t, proto.Equal(tablet1, allTablets[key1])) + assert.True(t, proto.Equal(tablet2, allTablets[key2])) +} diff --git a/go/vt/discovery/utils.go b/go/vt/discovery/utils.go index 3a601830d35..253fead89a8 100644 --- a/go/vt/discovery/utils.go +++ b/go/vt/discovery/utils.go @@ -26,28 +26,6 @@ import ( // This file contains helper filter methods to process the unfiltered list of // tablets returned by HealthCheckImpl.GetTabletHealth*. -func TabletHealthReferenceListToValue(thl []*TabletHealth) []TabletHealth { - newTh := []TabletHealth{} - for _, th := range thl { - newTh = append(newTh, *th) - } - return newTh -} - -// RemoveUnhealthyTablets filters all unhealthy tablets out. -// NOTE: Non-serving tablets are considered healthy. -func RemoveUnhealthyTablets(tabletStatsList []TabletHealth) []TabletHealth { - result := make([]TabletHealth, 0, len(tabletStatsList)) - for _, ts := range tabletStatsList { - // Note we do not check the 'Serving' flag here. - if ts.LastError != nil || ts.Stats != nil && (ts.Stats.HealthError != "" || IsReplicationLagHigh(&ts)) { - continue - } - result = append(result, ts) - } - return result -} - func ParseTabletTypesAndOrder(tabletTypesStr string) ([]topodatapb.TabletType, bool, error) { inOrder := false if strings.HasPrefix(tabletTypesStr, InOrderHint) { diff --git a/go/vt/discovery/utils_test.go b/go/vt/discovery/utils_test.go deleted file mode 100644 index 27416da44b0..00000000000 --- a/go/vt/discovery/utils_test.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2018 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "errors" - "testing" - - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -func TestRemoveUnhealthyTablets(t *testing.T) { - var testcases = []struct { - desc string - input []TabletHealth - want []TabletHealth - }{{ - desc: "tablets missing Stats", - input: []TabletHealth{replica(1), replica(2)}, - want: []TabletHealth{replica(1), replica(2)}, - }, { - desc: "all tablets healthy", - input: []TabletHealth{healthy(replica(1)), healthy(replica(2))}, - want: []TabletHealth{healthy(replica(1)), healthy(replica(2))}, - }, { - desc: "one unhealthy tablet (error)", - input: []TabletHealth{healthy(replica(1)), unhealthyError(replica(2))}, - want: []TabletHealth{healthy(replica(1))}, - }, { - desc: "one error tablet", - input: []TabletHealth{healthy(replica(1)), unhealthyLastError(replica(2))}, - want: []TabletHealth{healthy(replica(1))}, - }, { - desc: "one unhealthy tablet (lag)", - input: []TabletHealth{healthy(replica(1)), unhealthyLag(replica(2))}, - want: []TabletHealth{healthy(replica(1))}, - }, { - desc: "no filtering by tablet type", - input: []TabletHealth{healthy(primary(1)), healthy(replica(2)), healthy(rdonly(3))}, - want: []TabletHealth{healthy(primary(1)), healthy(replica(2)), healthy(rdonly(3))}, - }, { - desc: "non-serving tablets won't be removed", - input: []TabletHealth{notServing(healthy(replica(1)))}, - want: []TabletHealth{notServing(healthy(replica(1)))}, - }} - - for _, tc := range testcases { - t.Run(tc.desc, func(t *testing.T) { - got := RemoveUnhealthyTablets(tc.input) - if len(got) != len(tc.want) { - t.Errorf("test case '%v' failed: RemoveUnhealthyTablets(%v) = %#v, want: %#v", tc.desc, tc.input, got, tc.want) - } else { - for i := range tc.want { - if !got[i].DeepEqual(&tc.want[i]) { - t.Errorf("test case '%v' failed: RemoveUnhealthyTablets(%v) = %#v, want: %#v", tc.desc, tc.input, got, tc.want) - } - } - } - }) - } -} - -func primary(uid uint32) TabletHealth { - return minimalTabletStats(uid, topodatapb.TabletType_PRIMARY) -} - -func replica(uid uint32) TabletHealth { - return minimalTabletStats(uid, topodatapb.TabletType_REPLICA) -} - -func rdonly(uid uint32) TabletHealth { - return minimalTabletStats(uid, topodatapb.TabletType_RDONLY) -} - -func minimalTabletStats(uid uint32, tabletType topodatapb.TabletType) TabletHealth { - return TabletHealth{ - Tablet: &topodatapb.Tablet{ - Alias: &topodatapb.TabletAlias{ - Uid: uid}, - }, - Target: &querypb.Target{ - Keyspace: "test_keyspace", - Shard: "-80", - TabletType: tabletType, - }, - Serving: true, - } -} - -func healthy(ts TabletHealth) TabletHealth { - ts.Stats = &querypb.RealtimeStats{ - ReplicationLagSeconds: uint32(1), - } - return ts -} - -func unhealthyLag(ts TabletHealth) TabletHealth { - ts.Stats = &querypb.RealtimeStats{ - ReplicationLagSeconds: uint32(3600), - } - return ts -} - -func unhealthyError(ts TabletHealth) TabletHealth { - ts.Stats = &querypb.RealtimeStats{ - HealthError: "unhealthy", - } - return ts -} - -func unhealthyLastError(ts TabletHealth) TabletHealth { - ts.LastError = errors.New("err") - return ts -} - -func notServing(ts TabletHealth) TabletHealth { - ts.Serving = false - return ts -} diff --git a/go/vt/env/env.go b/go/vt/env/env.go index 70feb43186c..186f81cd585 100644 --- a/go/vt/env/env.go +++ b/go/vt/env/env.go @@ -18,7 +18,6 @@ package env import ( "errors" - "fmt" "os" "os/exec" "path" @@ -30,9 +29,12 @@ const ( // DefaultVtDataRoot is the default value for VTROOT environment variable DefaultVtDataRoot = "/vt" // DefaultVtRoot is only required for hooks - DefaultVtRoot = "/usr/local/vitess" + DefaultVtRoot = "/usr/local/vitess" + mysqldSbinPath = "/usr/sbin/mysqld" ) +var errMysqldNotFound = errors.New("VT_MYSQL_ROOT is not set and no mysqld could be found in your PATH") + // VtRoot returns $VTROOT or tries to guess its value if it's not set. // This is the root for the 'vt' distribution, which contains bin/vttablet // for instance. @@ -64,25 +66,30 @@ func VtDataRoot() string { } // VtMysqlRoot returns the root for the mysql distribution, -// which contains bin/mysql CLI for instance. -// If it is not set, look for mysqld in the path. +// which contains the bin/mysql CLI for instance. +// If $VT_MYSQL_ROOT is not set, look for mysqld in the $PATH. func VtMysqlRoot() (string, error) { - // if the environment variable is set, use that + // If the environment variable is set, use that. if root := os.Getenv("VT_MYSQL_ROOT"); root != "" { return root, nil } - // otherwise let's look for mysqld in the PATH. - // ensure that /usr/sbin is included, as it might not be by default - // This is the default location for mysqld from packages. - newPath := fmt.Sprintf("/usr/sbin:%s", os.Getenv("PATH")) - os.Setenv("PATH", newPath) - path, err := exec.LookPath("mysqld") + getRoot := func(path string) string { + return filepath.Dir(filepath.Dir(path)) // Strip mysqld and [s]bin parts + } + binpath, err := exec.LookPath("mysqld") if err != nil { - return "", errors.New("VT_MYSQL_ROOT is not set and no mysqld could be found in your PATH") + // First see if /usr/sbin/mysqld exists as it might not be in + // the PATH by default and this is often the default location + // used by mysqld OS system packages (apt, dnf, etc). + fi, err := os.Stat(mysqldSbinPath) + if err == nil /* file exists */ && fi.Mode().IsRegular() /* not a DIR or other special file */ && + fi.Mode()&0111 != 0 /* executable by anyone */ { + return getRoot(mysqldSbinPath), nil + } + return "", errMysqldNotFound } - path = filepath.Dir(filepath.Dir(path)) // strip mysqld, and the sbin - return path, nil + return getRoot(binpath), nil } // VtMysqlBaseDir returns the Mysql base directory, which diff --git a/go/vt/env/env_test.go b/go/vt/env/env_test.go index 4aa53a25bed..f91cdf94673 100644 --- a/go/vt/env/env_test.go +++ b/go/vt/env/env_test.go @@ -18,7 +18,10 @@ package env import ( "os" + "path/filepath" "testing" + + "github.com/stretchr/testify/require" ) func TestVtDataRoot(t *testing.T) { @@ -43,3 +46,82 @@ func TestVtDataRoot(t *testing.T) { t.Errorf("The value of VtDataRoot should be %v, not %v.", passed, root) } } + +func TestVtMysqlRoot(t *testing.T) { + envVar := "VT_MYSQL_ROOT" + originalMySQLRoot := os.Getenv(envVar) + defer os.Setenv(envVar, originalMySQLRoot) + originalPATH := os.Getenv("PATH") + defer os.Setenv("PATH", originalPATH) + + // The test directory is used to create our fake mysqld binary. + testDir := t.TempDir() // This is automatically cleaned up + createExecutable := func(path string) error { + fullPath := testDir + path + err := os.MkdirAll(filepath.Dir(fullPath), 0755) + require.NoError(t, err) + return os.WriteFile(fullPath, []byte("test"), 0755) + } + + type testcase struct { + name string + preFunc func() error + vtMysqlRootEnvVal string + pathEnvVal string + expect string // The return value we expect from VtMysqlRoot() + expectErr string + } + testcases := []testcase{ + { + name: "VT_MYSQL_ROOT set", + vtMysqlRootEnvVal: "/home/mysql/binaries", + }, + { + name: "VT_MYSQL_ROOT empty; PATH set without /usr/sbin", + pathEnvVal: testDir + filepath.Dir(mysqldSbinPath) + + ":/usr/bin:/sbin:/bin:/usr/local/bin:/usr/local/sbin:/home/mysql/binaries", + preFunc: func() error { + return createExecutable(mysqldSbinPath) + }, + expect: testDir + "/usr", + }, + } + + // If /usr/sbin/mysqld exists, confirm that we find it even + // when /usr/sbin is not in the PATH. + _, err := os.Stat(mysqldSbinPath) + if err == nil { + t.Logf("Found %s, confirming auto detection behavior", mysqldSbinPath) + testcases = append(testcases, testcase{ + name: "VT_MYSQL_ROOT empty; PATH empty; mysqld in /usr/sbin", + expect: "/usr", + }) + } else { + testcases = append(testcases, testcase{ // Error expected + name: "VT_MYSQL_ROOT empty; PATH empty; mysqld not in /usr/sbin", + expectErr: errMysqldNotFound.Error(), + }) + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + if tc.preFunc != nil { + err := tc.preFunc() + require.NoError(t, err) + } + os.Setenv(envVar, tc.vtMysqlRootEnvVal) + os.Setenv("PATH", tc.pathEnvVal) + path, err := VtMysqlRoot() + if tc.expectErr != "" { + require.EqualError(t, err, tc.expectErr) + } else { + require.NoError(t, err) + } + if tc.vtMysqlRootEnvVal != "" { + // This should always be returned. + tc.expect = tc.vtMysqlRootEnvVal + } + require.Equal(t, tc.expect, path) + }) + } +} diff --git a/go/vt/external/golib/sqlutils/sqlite_dialect_test.go b/go/vt/external/golib/sqlutils/sqlite_dialect_test.go index 039e42eefff..1298c379adf 100644 --- a/go/vt/external/golib/sqlutils/sqlite_dialect_test.go +++ b/go/vt/external/golib/sqlutils/sqlite_dialect_test.go @@ -25,6 +25,7 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -244,3 +245,70 @@ func TestToSqlite3GeneralConversions(t *testing.T) { require.Equal(t, result, "select group_concat( 'abc' , 'def') as s") } } + +func TestIsCreateIndex(t *testing.T) { + tests := []struct { + input string + expected bool + }{ + {"create index my_index on my_table(column);", true}, + {"CREATE INDEX my_index ON my_table(column);", true}, + {"create unique index my_index on my_table(column);", true}, + {"CREATE UNIQUE INDEX my_index ON my_table(column);", true}, + {"create index my_index on my_table(column) where condition;", true}, + {"create unique index my_index on my_table(column) where condition;", true}, + {"create table my_table(column);", false}, + {"drop index my_index on my_table;", false}, + {"alter table my_table add index my_index (column);", false}, + {"", false}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + result := IsCreateIndex(test.input) + assert.Equal(t, test.expected, result) + }) + } +} + +func TestIsDropIndex(t *testing.T) { + tests := []struct { + input string + expected bool + }{ + {"drop index my_index on my_table;", true}, + {"DROP INDEX my_index ON my_table;", true}, + {"drop index if exists my_index on my_table;", true}, + {"DROP INDEX IF EXISTS my_index ON my_table;", true}, + {"drop table my_table;", false}, + {"create index my_index on my_table(column);", false}, + {"alter table my_table add index my_index (column);", false}, + {"", false}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + result := IsDropIndex(test.input) + assert.Equal(t, test.expected, result) + }) + } +} + +func TestToSqlite3Dialect(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"create table my_table(id int);", "create table my_table(id int);"}, + {"alter table my_table add column new_col int;", "alter table my_table add column new_col int;"}, + {"insert into my_table values (1);", "insert into my_table values (1);"}, + {"", ""}, + } + + for _, test := range tests { + t.Run(test.input, func(t *testing.T) { + result := ToSqlite3Dialect(test.input) + assert.Equal(t, test.expected, result) + }) + } +} diff --git a/go/vt/external/golib/sqlutils/sqlutils.go b/go/vt/external/golib/sqlutils/sqlutils.go index eb1cb8c8941..e55460d7783 100644 --- a/go/vt/external/golib/sqlutils/sqlutils.go +++ b/go/vt/external/golib/sqlutils/sqlutils.go @@ -28,6 +28,8 @@ import ( "sync" "time" + _ "modernc.org/sqlite" + "vitess.io/vitess/go/vt/log" ) @@ -40,7 +42,7 @@ type RowMap map[string]CellData // CellData is the result of a single (atomic) column in a single row type CellData sql.NullString -func (this *CellData) MarshalJSON() ([]byte, error) { +func (this CellData) MarshalJSON() ([]byte, error) { if this.Valid { return json.Marshal(this.String) } else { @@ -86,6 +88,11 @@ func (this *RowMap) GetInt64(key string) int64 { return res } +func (this *RowMap) GetFloat64(key string) float64 { + res, _ := strconv.ParseFloat(this.GetString(key), 64) + return res +} + func (this *RowMap) GetInt32(key string) int32 { res, _ := strconv.ParseInt(this.GetString(key), 10, 32) return int32(res) @@ -135,9 +142,9 @@ func (this *RowMap) GetTime(key string) time.Time { var knownDBs = make(map[string]*sql.DB) var knownDBsMutex = &sync.Mutex{} -// GetGenericDB returns a DB instance based on uri. +// GetSQLiteDB returns a SQLite DB instance based on DB file name. // bool result indicates whether the DB was returned from cache; err -func GetGenericDB(driverName, dataSourceName string) (*sql.DB, bool, error) { +func GetSQLiteDB(dataSourceName string) (*sql.DB, bool, error) { knownDBsMutex.Lock() defer func() { knownDBsMutex.Unlock() @@ -145,7 +152,7 @@ func GetGenericDB(driverName, dataSourceName string) (*sql.DB, bool, error) { var exists bool if _, exists = knownDBs[dataSourceName]; !exists { - if db, err := sql.Open(driverName, dataSourceName); err == nil { + if db, err := sql.Open("sqlite", dataSourceName); err == nil { knownDBs[dataSourceName] = db } else { return db, exists, err @@ -154,12 +161,6 @@ func GetGenericDB(driverName, dataSourceName string) (*sql.DB, bool, error) { return knownDBs[dataSourceName], exists, nil } -// GetSQLiteDB returns a SQLite DB instance based on DB file name. -// bool result indicates whether the DB was returned from cache; err -func GetSQLiteDB(dbFile string) (*sql.DB, bool, error) { - return GetGenericDB("sqlite", dbFile) -} - // RowToArray is a convenience function, typically not called directly, which maps a // single read database row into a NullString func RowToArray(rows *sql.Rows, columns []string) ([]CellData, error) { diff --git a/go/vt/external/golib/sqlutils/sqlutils_test.go b/go/vt/external/golib/sqlutils/sqlutils_test.go new file mode 100644 index 00000000000..a7ac8680072 --- /dev/null +++ b/go/vt/external/golib/sqlutils/sqlutils_test.go @@ -0,0 +1,255 @@ +/* + Copyright 2024 The Vitess Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sqlutils + +import ( + "database/sql" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestRowMap(t *testing.T) { + tt := []struct { + name string + rowMap RowMap + expected any + }{ + { + "GetString", + RowMap{"key": CellData{String: "value"}}, + "value", + }, + { + "GetInt64", + RowMap{"key": CellData{String: "123"}}, + int64(123), + }, + { + "GetInt32", + RowMap{"key": CellData{String: "42"}}, + int32(42), + }, + { + "GetNullInt64", + RowMap{"key": CellData{String: "789"}}, + sql.NullInt64{Int64: 789, Valid: true}, + }, + { + "GetNullInt64 Error", + RowMap{"key": CellData{String: "foo"}}, + sql.NullInt64{Valid: false}, + }, + { + "GetInt", + RowMap{"key": CellData{String: "456"}}, + 456, + }, + { + "GetUint", + RowMap{"key": CellData{String: "123"}}, + uint(123), + }, + { + "GetUint64", + RowMap{"key": CellData{String: "999"}}, + uint64(999), + }, + { + "GetUint32", + RowMap{"key": CellData{String: "888"}}, + uint32(888), + }, + { + "GetBool", + RowMap{"key": CellData{String: "1"}}, + true, + }, + { + "GetTime", + RowMap{"key": CellData{String: "2024-01-24 12:34:56.789"}}, + time.Date(2024, time.January, 24, 12, 34, 56, 789000000, time.UTC), + }, + { + "GetTime Error", + RowMap{"key": CellData{String: "invalid_time_format"}}, + time.Time{}, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + switch tc.name { + case "GetString": + assert.Equal(t, tc.expected, tc.rowMap.GetString("key")) + case "GetInt64": + assert.Equal(t, tc.expected, tc.rowMap.GetInt64("key")) + case "GetInt32": + assert.Equal(t, tc.expected, tc.rowMap.GetInt32("key")) + case "GetNullInt64": + assert.Equal(t, tc.expected, tc.rowMap.GetNullInt64("key")) + case "GetNullInt64 Error": + assert.Equal(t, tc.expected, tc.rowMap.GetNullInt64("key")) + case "GetInt": + assert.Equal(t, tc.expected, tc.rowMap.GetInt("key")) + case "GetUint": + assert.Equal(t, tc.expected, tc.rowMap.GetUint("key")) + case "GetUint64": + assert.Equal(t, tc.expected, tc.rowMap.GetUint64("key")) + case "GetUint32": + assert.Equal(t, tc.expected, tc.rowMap.GetUint32("key")) + case "GetBool": + assert.Equal(t, tc.expected, tc.rowMap.GetBool("key")) + case "GetTime": + assert.Equal(t, tc.expected, tc.rowMap.GetTime("key")) + case "GetTime Error": + assert.Equal(t, tc.expected, tc.rowMap.GetTime("key")) + } + }) + } +} + +func TestNullString(t *testing.T) { + cellData := CellData{String: "test_value", Valid: true} + + result := cellData.NullString() + + expected := &sql.NullString{String: "test_value", Valid: true} + assert.Equal(t, expected, result) +} + +func TestMarshalJSON(t *testing.T) { + tt := []struct { + name string + rowData RowData + expected string + }{ + {"Valid", RowData{{String: "value", Valid: true}}, `["value"]`}, + {"Invalid", RowData{{String: "", Valid: false}}, "[null]"}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result, err := tc.rowData.MarshalJSON() + assert.NoError(t, err) + assert.Equal(t, tc.expected, string(result)) + }) + } +} + +func TestUnmarshalJSON(t *testing.T) { + tt := []struct { + name string + input string + expected CellData + isError bool + }{ + {"Valid JSON", `"value"`, CellData{String: "value", Valid: true}, false}, + {"Invalid JSON", `"invalid_json`, CellData{}, true}, + {"Null JSON", `null`, CellData{String: "", Valid: true}, false}, //?? + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + var cellData CellData + err := cellData.UnmarshalJSON([]byte(tc.input)) + + if tc.isError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expected, cellData) + } + }) + } +} + +func TestQueryRowsMap(t *testing.T) { + tt := []struct { + name string + db *sql.DB + query string + onRowFunc func(RowMap) error + args []any + shouldErr bool + }{ + {"Error", nil, "", nil, nil, true}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + err := QueryRowsMap(tc.db, tc.query, tc.onRowFunc, tc.args...) + if tc.shouldErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestExecNoPrepare(t *testing.T) { + tt := []struct { + name string + db *sql.DB + query string + args []any + shouldErr bool + expect sql.Result + }{ + {"Error", nil, "", nil, true, nil}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + out, err := ExecNoPrepare(tc.db, tc.query, tc.args...) + if tc.shouldErr { + assert.Error(t, err) + assert.Nil(t, out) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expect, out) + } + }) + } +} + +func TestArgs(t *testing.T) { + args := []any{1, "abc", true} + expected := []any{1, "abc", true} + result := Args(args...) + assert.Equal(t, expected, result) +} + +func TestNilIfZero(t *testing.T) { + tt := []struct { + name string + i int64 + expected any + }{ + {"NonZero", int64(42), int64(42)}, + {"Zero", int64(0), nil}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result := NilIfZero(tc.i) + assert.Equal(t, tc.expected, result) + }) + } +} diff --git a/go/vt/graph/graph.go b/go/vt/graph/graph.go index 54668027008..1938cf4bf1c 100644 --- a/go/vt/graph/graph.go +++ b/go/vt/graph/graph.go @@ -18,19 +18,28 @@ package graph import ( "fmt" + "maps" "slices" "strings" ) +const ( + white int = iota + grey + black +) + // Graph is a generic graph implementation. type Graph[C comparable] struct { - edges map[C][]C + edges map[C][]C + orderedVertices []C } // NewGraph creates a new graph for the given comparable type. func NewGraph[C comparable]() *Graph[C] { return &Graph[C]{ - edges: map[C][]C{}, + edges: map[C][]C{}, + orderedVertices: []C{}, } } @@ -41,6 +50,7 @@ func (gr *Graph[C]) AddVertex(vertex C) { return } gr.edges[vertex] = []C{} + gr.orderedVertices = append(gr.orderedVertices, vertex) } // AddEdge adds an edge to the given Graph. @@ -73,10 +83,10 @@ func (gr *Graph[C]) Empty() bool { // HasCycles checks whether the given graph has a cycle or not. // We are using a well-known DFS based colouring algorithm to check for cycles. // Look at https://cp-algorithms.com/graph/finding-cycle.html for more details on the algorithm. -func (gr *Graph[C]) HasCycles() bool { +func (gr *Graph[C]) HasCycles() (bool, []C) { // If the graph is empty, then we don't need to check anything. if gr.Empty() { - return false + return false, nil } // Initialize the coloring map. // 0 represents white. @@ -85,35 +95,65 @@ func (gr *Graph[C]) HasCycles() bool { color := map[C]int{} for vertex := range gr.edges { // If any vertex is still white, we initiate a new DFS. - if color[vertex] == 0 { - if gr.hasCyclesDfs(color, vertex) { - return true + if color[vertex] == white { + if hasCycle, cycle := gr.hasCyclesDfs(color, vertex); hasCycle { + return true, cycle + } + } + } + return false, nil +} + +// GetCycles returns all known cycles in the graph. +// It returns a map of vertices to the cycle they are part of. +// We are using a well-known DFS based colouring algorithm to check for cycles. +// Look at https://cp-algorithms.com/graph/finding-cycle.html for more details on the algorithm. +func (gr *Graph[C]) GetCycles() (vertices map[C][]C) { + // If the graph is empty, then we don't need to check anything. + if gr.Empty() { + return nil + } + vertices = make(map[C][]C) + // Initialize the coloring map. + // 0 represents white. + // 1 represents grey. + // 2 represents black. + color := map[C]int{} + for _, vertex := range gr.orderedVertices { + // If any vertex is still white, we initiate a new DFS. + if color[vertex] == white { + // We clone the colors because we wnt full coverage for all vertices. + // Otherwise, the algorithm is optimal and stop more-or-less after the first cycle. + color := maps.Clone(color) + if hasCycle, cycle := gr.hasCyclesDfs(color, vertex); hasCycle { + vertices[vertex] = cycle } } } - return false + return vertices } // hasCyclesDfs is a utility function for checking for cycles in a graph. // It runs a dfs from the given vertex marking each vertex as grey. During the dfs, // if we encounter a grey vertex, we know we have a cycle. We mark the visited vertices black // on finishing the dfs. -func (gr *Graph[C]) hasCyclesDfs(color map[C]int, vertex C) bool { +func (gr *Graph[C]) hasCyclesDfs(color map[C]int, vertex C) (bool, []C) { // Mark the vertex grey. - color[vertex] = 1 + color[vertex] = grey + result := []C{vertex} // Go over all the edges. for _, end := range gr.edges[vertex] { // If we encounter a white vertex, we continue the dfs. - if color[end] == 0 { - if gr.hasCyclesDfs(color, end) { - return true + if color[end] == white { + if hasCycle, cycle := gr.hasCyclesDfs(color, end); hasCycle { + return true, append(result, cycle...) } - } else if color[end] == 1 { + } else if color[end] == grey { // We encountered a grey vertex, we have a cycle. - return true + return true, append(result, end) } } // Mark the vertex black before finishing - color[vertex] = 2 - return false + color[vertex] = black + return false, nil } diff --git a/go/vt/graph/graph_test.go b/go/vt/graph/graph_test.go index bc334c7d225..3f762552556 100644 --- a/go/vt/graph/graph_test.go +++ b/go/vt/graph/graph_test.go @@ -82,7 +82,8 @@ func TestIntegerGraph(t *testing.T) { } require.Equal(t, tt.wantedGraph, graph.PrintGraph()) require.Equal(t, tt.wantEmpty, graph.Empty()) - require.Equal(t, tt.wantHasCycles, graph.HasCycles()) + hasCycle, _ := graph.HasCycles() + require.Equal(t, tt.wantHasCycles, hasCycle) }) } } @@ -95,6 +96,7 @@ func TestStringGraph(t *testing.T) { wantedGraph string wantEmpty bool wantHasCycles bool + wantCycles map[string][]string }{ { name: "empty graph", @@ -137,6 +139,13 @@ E - F F - A`, wantEmpty: false, wantHasCycles: true, + wantCycles: map[string][]string{ + "A": {"A", "B", "E", "F", "A"}, + "B": {"B", "E", "F", "A", "B"}, + "D": {"D", "E", "F", "A", "B", "E"}, + "E": {"E", "F", "A", "B", "E"}, + "F": {"F", "A", "B", "E", "F"}, + }, }, } for _, tt := range testcases { @@ -147,7 +156,16 @@ F - A`, } require.Equal(t, tt.wantedGraph, graph.PrintGraph()) require.Equal(t, tt.wantEmpty, graph.Empty()) - require.Equal(t, tt.wantHasCycles, graph.HasCycles()) + hasCycle, _ := graph.HasCycles() + require.Equal(t, tt.wantHasCycles, hasCycle) + if tt.wantCycles == nil { + tt.wantCycles = map[string][]string{} + } + actualCycles := graph.GetCycles() + if actualCycles == nil { + actualCycles = map[string][]string{} + } + require.Equal(t, tt.wantCycles, actualCycles) }) } } diff --git a/go/vt/grpcclient/client.go b/go/vt/grpcclient/client.go index b2ef0d4fb28..e9209277b7c 100644 --- a/go/vt/grpcclient/client.go +++ b/go/vt/grpcclient/client.go @@ -21,6 +21,7 @@ package grpcclient import ( "context" "crypto/tls" + "sync" "time" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" @@ -39,6 +40,7 @@ import ( ) var ( + grpcDialOptionsMu sync.Mutex keepaliveTime = 10 * time.Second keepaliveTimeout = 10 * time.Second initialConnWindowSize int @@ -86,16 +88,11 @@ var grpcDialOptions []func(opts []grpc.DialOption) ([]grpc.DialOption, error) // RegisterGRPCDialOptions registers an implementation of AuthServer. func RegisterGRPCDialOptions(grpcDialOptionsFunc func(opts []grpc.DialOption) ([]grpc.DialOption, error)) { + grpcDialOptionsMu.Lock() + defer grpcDialOptionsMu.Unlock() grpcDialOptions = append(grpcDialOptions, grpcDialOptionsFunc) } -// Dial creates a grpc connection to the given target. -// failFast is a non-optional parameter because callers are required to specify -// what that should be. -func Dial(target string, failFast FailFast, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - return DialContext(context.Background(), target, failFast, opts...) -} - // DialContext creates a grpc connection to the given target. Setup steps are // covered by the context deadline, and, if WithBlock is specified in the dial // options, connection establishment steps are covered by the context as well. @@ -134,12 +131,14 @@ func DialContext(ctx context.Context, target string, failFast FailFast, opts ... newopts = append(newopts, opts...) var err error + grpcDialOptionsMu.Lock() for _, grpcDialOptionInitializer := range grpcDialOptions { newopts, err = grpcDialOptionInitializer(newopts) if err != nil { log.Fatalf("There was an error initializing client grpc.DialOption: %v", err) } } + grpcDialOptionsMu.Unlock() newopts = append(newopts, interceptors()...) diff --git a/go/vt/grpcclient/client_auth_static.go b/go/vt/grpcclient/client_auth_static.go index 22f69569956..bbb91a9fa55 100644 --- a/go/vt/grpcclient/client_auth_static.go +++ b/go/vt/grpcclient/client_auth_static.go @@ -20,24 +20,35 @@ import ( "context" "encoding/json" "os" + "os/signal" + "sync" + "syscall" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + + "vitess.io/vitess/go/vt/servenv" ) var ( credsFile string // registered as --grpc_auth_static_client_creds in RegisterFlags // StaticAuthClientCreds implements client interface to be able to WithPerRPCCredentials _ credentials.PerRPCCredentials = (*StaticAuthClientCreds)(nil) + + clientCreds *StaticAuthClientCreds + clientCredsCancel context.CancelFunc + clientCredsErr error + clientCredsMu sync.Mutex + clientCredsSigChan chan os.Signal ) -// StaticAuthClientCreds holder for client credentials +// StaticAuthClientCreds holder for client credentials. type StaticAuthClientCreds struct { Username string Password string } -// GetRequestMetadata gets the request metadata as a map from StaticAuthClientCreds +// GetRequestMetadata gets the request metadata as a map from StaticAuthClientCreds. func (c *StaticAuthClientCreds) GetRequestMetadata(context.Context, ...string) (map[string]string, error) { return map[string]string{ "username": c.Username, @@ -47,30 +58,82 @@ func (c *StaticAuthClientCreds) GetRequestMetadata(context.Context, ...string) ( // RequireTransportSecurity indicates whether the credentials requires transport security. // Given that people can use this with or without TLS, at the moment we are not enforcing -// transport security +// transport security. func (c *StaticAuthClientCreds) RequireTransportSecurity() bool { return false } // AppendStaticAuth optionally appends static auth credentials if provided. func AppendStaticAuth(opts []grpc.DialOption) ([]grpc.DialOption, error) { - if credsFile == "" { - return opts, nil - } - data, err := os.ReadFile(credsFile) + creds, err := getStaticAuthCreds() if err != nil { return nil, err } - clientCreds := &StaticAuthClientCreds{} - err = json.Unmarshal(data, clientCreds) + if creds != nil { + grpcCreds := grpc.WithPerRPCCredentials(creds) + opts = append(opts, grpcCreds) + } + return opts, nil +} + +// ResetStaticAuth resets the static auth credentials. +func ResetStaticAuth() { + clientCredsMu.Lock() + defer clientCredsMu.Unlock() + if clientCredsCancel != nil { + clientCredsCancel() + clientCredsCancel = nil + } + clientCreds = nil + clientCredsErr = nil +} + +// getStaticAuthCreds returns the static auth creds and error. +func getStaticAuthCreds() (*StaticAuthClientCreds, error) { + clientCredsMu.Lock() + defer clientCredsMu.Unlock() + if credsFile != "" && clientCreds == nil { + var ctx context.Context + ctx, clientCredsCancel = context.WithCancel(context.Background()) + go handleClientCredsSignals(ctx) + clientCreds, clientCredsErr = loadStaticAuthCredsFromFile(credsFile) + } + return clientCreds, clientCredsErr +} + +// handleClientCredsSignals handles signals to reload client creds. +func handleClientCredsSignals(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case <-clientCredsSigChan: + if newCreds, err := loadStaticAuthCredsFromFile(credsFile); err == nil { + clientCredsMu.Lock() + clientCreds = newCreds + clientCredsErr = err + clientCredsMu.Unlock() + } + } + } +} + +// loadStaticAuthCredsFromFile loads static auth credentials from a file. +func loadStaticAuthCredsFromFile(path string) (*StaticAuthClientCreds, error) { + data, err := os.ReadFile(path) if err != nil { return nil, err } - creds := grpc.WithPerRPCCredentials(clientCreds) - opts = append(opts, creds) - return opts, nil + creds := &StaticAuthClientCreds{} + err = json.Unmarshal(data, creds) + return creds, err } func init() { + servenv.OnInit(func() { + clientCredsSigChan = make(chan os.Signal, 1) + signal.Notify(clientCredsSigChan, syscall.SIGHUP) + _, _ = getStaticAuthCreds() // preload static auth credentials + }) RegisterGRPCDialOptions(AppendStaticAuth) } diff --git a/go/vt/grpcclient/client_auth_static_test.go b/go/vt/grpcclient/client_auth_static_test.go new file mode 100644 index 00000000000..325a3f6042c --- /dev/null +++ b/go/vt/grpcclient/client_auth_static_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpcclient + +import ( + "fmt" + "os" + "reflect" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func init() { + clientCredsSigChan = make(chan os.Signal, 1) +} + +func TestAppendStaticAuth(t *testing.T) { + oldCredsFile := credsFile + opts := []grpc.DialOption{ + grpc.EmptyDialOption{}, + } + + tests := []struct { + name string + cFile string + expectedLen int + expectedErr string + }{ + { + name: "creds file not set", + expectedLen: 1, + }, + { + name: "non-existent creds file", + cFile: "./testdata/unknown.json", + expectedErr: "open ./testdata/unknown.json: no such file or directory", + }, + { + name: "valid creds file", + cFile: "./testdata/credsFile.json", + expectedLen: 2, + }, + { + name: "invalid creds file", + cFile: "./testdata/invalid.json", + expectedErr: "unexpected end of JSON input", + }, + } + + for _, tt := range tests { + t.Run(tt.cFile, func(t *testing.T) { + defer func() { + credsFile = oldCredsFile + }() + + if tt.cFile != "" { + credsFile = tt.cFile + } + dialOpts, err := AppendStaticAuth(opts) + if tt.expectedErr == "" { + require.NoError(t, err) + require.Equal(t, tt.expectedLen, len(dialOpts)) + } else { + require.ErrorContains(t, err, tt.expectedErr) + } + ResetStaticAuth() + require.Nil(t, clientCredsCancel) + }) + } +} + +func TestGetStaticAuthCreds(t *testing.T) { + oldCredsFile := credsFile + defer func() { + ResetStaticAuth() + credsFile = oldCredsFile + }() + tmp, err := os.CreateTemp("", t.Name()) + assert.Nil(t, err) + defer os.Remove(tmp.Name()) + credsFile = tmp.Name() + ResetStaticAuth() + + // load old creds + fmt.Fprint(tmp, `{"Username": "old", "Password": "123456"}`) + ResetStaticAuth() + creds, err := getStaticAuthCreds() + assert.Nil(t, err) + assert.Equal(t, &StaticAuthClientCreds{Username: "old", Password: "123456"}, creds) + + // write new creds to the same file + _ = tmp.Truncate(0) + _, _ = tmp.Seek(0, 0) + fmt.Fprint(tmp, `{"Username": "new", "Password": "123456789"}`) + + // test the creds did not change yet + creds, err = getStaticAuthCreds() + assert.Nil(t, err) + assert.Equal(t, &StaticAuthClientCreds{Username: "old", Password: "123456"}, creds) + + // test SIGHUP signal triggers reload + credsOld := creds + clientCredsSigChan <- syscall.SIGHUP + timeoutChan := time.After(time.Second * 10) + for { + select { + case <-timeoutChan: + assert.Fail(t, "timed out waiting for SIGHUP reload of static auth creds") + return + default: + // confirm new creds get loaded + creds, err = getStaticAuthCreds() + if reflect.DeepEqual(creds, credsOld) { + continue // not changed yet + } + assert.Nil(t, err) + assert.Equal(t, &StaticAuthClientCreds{Username: "new", Password: "123456789"}, creds) + return + } + } +} + +func TestLoadStaticAuthCredsFromFile(t *testing.T) { + { + f, err := os.CreateTemp("", t.Name()) + if !assert.Nil(t, err) { + assert.FailNowf(t, "cannot create temp file: %s", err.Error()) + } + defer os.Remove(f.Name()) + fmt.Fprint(f, `{ + "Username": "test", + "Password": "correct horse battery staple" + }`) + if !assert.Nil(t, err) { + assert.FailNowf(t, "cannot read auth file: %s", err.Error()) + } + + creds, err := loadStaticAuthCredsFromFile(f.Name()) + assert.Nil(t, err) + assert.Equal(t, "test", creds.Username) + assert.Equal(t, "correct horse battery staple", creds.Password) + } + { + _, err := loadStaticAuthCredsFromFile(`does-not-exist`) + assert.NotNil(t, err) + } +} diff --git a/go/vt/grpcclient/client_flaky_test.go b/go/vt/grpcclient/client_flaky_test.go deleted file mode 100644 index edc6d9be98c..00000000000 --- a/go/vt/grpcclient/client_flaky_test.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package grpcclient - -import ( - "context" - "strings" - "testing" - "time" - - "google.golang.org/grpc/credentials/insecure" - - "google.golang.org/grpc" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - vtgateservicepb "vitess.io/vitess/go/vt/proto/vtgateservice" -) - -func TestDialErrors(t *testing.T) { - addresses := []string{ - "badhost", - "badhost:123456", - "[::]:12346", - } - wantErr := "Unavailable" - for _, address := range addresses { - gconn, err := Dial(address, true, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatal(err) - } - vtg := vtgateservicepb.NewVitessClient(gconn) - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - _, err = vtg.Execute(ctx, &vtgatepb.ExecuteRequest{}) - cancel() - gconn.Close() - if err == nil || !strings.Contains(err.Error(), wantErr) { - t.Errorf("Dial(%s, FailFast=true): %v, must contain %s", address, err, wantErr) - } - } - - wantErr = "DeadlineExceeded" - for _, address := range addresses { - gconn, err := Dial(address, false, grpc.WithTransportCredentials(insecure.NewCredentials())) - if err != nil { - t.Fatal(err) - } - vtg := vtgateservicepb.NewVitessClient(gconn) - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) - _, err = vtg.Execute(ctx, &vtgatepb.ExecuteRequest{}) - cancel() - gconn.Close() - if err == nil || !strings.Contains(err.Error(), wantErr) { - t.Errorf("Dial(%s, FailFast=false): %v, must contain %s", address, err, wantErr) - } - } -} diff --git a/go/vt/grpcclient/client_test.go b/go/vt/grpcclient/client_test.go new file mode 100644 index 00000000000..369ec8da17b --- /dev/null +++ b/go/vt/grpcclient/client_test.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpcclient + +import ( + "context" + "os" + "strings" + "testing" + "time" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/credentials/insecure" + + "google.golang.org/grpc" + + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtgateservicepb "vitess.io/vitess/go/vt/proto/vtgateservice" +) + +func TestDialErrors(t *testing.T) { + addresses := []string{ + "badhost", + "badhost:123456", + "[::]:12346", + } + wantErr := "Unavailable" + for _, address := range addresses { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + gconn, err := DialContext(ctx, address, true, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + cancel() + t.Fatal(err) + } + vtg := vtgateservicepb.NewVitessClient(gconn) + _, err = vtg.Execute(ctx, &vtgatepb.ExecuteRequest{}) + cancel() + gconn.Close() + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Errorf("DialContext(%s, FailFast=true): %v, must contain %s", address, err, wantErr) + } + } + + wantErr = "DeadlineExceeded" + for _, address := range addresses { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + gconn, err := DialContext(ctx, address, false, grpc.WithTransportCredentials(insecure.NewCredentials())) + cancel() + if err != nil { + t.Fatal(err) + } + vtg := vtgateservicepb.NewVitessClient(gconn) + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Millisecond) + _, err = vtg.Execute(ctx, &vtgatepb.ExecuteRequest{}) + cancel() + gconn.Close() + if err == nil || !strings.Contains(err.Error(), wantErr) { + t.Errorf("DialContext(%s, FailFast=false): %v, must contain %s", address, err, wantErr) + } + } +} + +func TestRegisterGRPCClientFlags(t *testing.T) { + oldArgs := os.Args + defer func() { + os.Args = oldArgs + }() + + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + RegisterFlags(fs) + + // Test current values + require.Equal(t, 10*time.Second, keepaliveTime) + require.Equal(t, 10*time.Second, keepaliveTimeout) + require.Equal(t, 0, initialWindowSize) + require.Equal(t, 0, initialConnWindowSize) + require.Equal(t, "", compression) + require.Equal(t, "", credsFile) + + // Test setting flags from command-line arguments + os.Args = []string{"test", "--grpc_keepalive_time=5s", "--grpc_keepalive_timeout=5s", "--grpc_initial_conn_window_size=10", "--grpc_initial_window_size=10", "--grpc_compression=not-snappy", "--grpc_auth_static_client_creds=tempfile"} + err := fs.Parse(os.Args[1:]) + require.NoError(t, err) + + require.Equal(t, 5*time.Second, keepaliveTime) + require.Equal(t, 5*time.Second, keepaliveTimeout) + require.Equal(t, 10, initialWindowSize) + require.Equal(t, 10, initialConnWindowSize) + require.Equal(t, "not-snappy", compression) + require.Equal(t, "tempfile", credsFile) +} diff --git a/go/vt/grpcclient/glogger_test.go b/go/vt/grpcclient/glogger_test.go new file mode 100644 index 00000000000..6b394ff7ef9 --- /dev/null +++ b/go/vt/grpcclient/glogger_test.go @@ -0,0 +1,87 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpcclient + +import ( + "io" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func captureOutput(t *testing.T, f func()) string { + oldVal := os.Stderr + t.Cleanup(func() { + // Ensure reset even if deferred function panics + os.Stderr = oldVal + }) + + r, w, err := os.Pipe() + require.NoError(t, err) + + os.Stderr = w + + f() + + err = w.Close() + require.NoError(t, err) + + got, err := io.ReadAll(r) + require.NoError(t, err) + + return string(got) +} + +func TestGlogger(t *testing.T) { + gl := glogger{} + + output := captureOutput(t, func() { + gl.Warning("warning") + }) + require.Contains(t, output, "warning") + + output = captureOutput(t, func() { + gl.Warningln("warningln") + }) + require.Contains(t, output, "warningln\n") + + output = captureOutput(t, func() { + gl.Warningf("formatted %s", "warning") + }) + require.Contains(t, output, "formatted warning") + +} + +func TestGloggerError(t *testing.T) { + gl := glogger{} + + output := captureOutput(t, func() { + gl.Error("error message") + }) + require.Contains(t, output, "error message") + + output = captureOutput(t, func() { + gl.Errorln("error message line") + }) + require.Contains(t, output, "error message line\n") + + output = captureOutput(t, func() { + gl.Errorf("this is a %s error message", "formatted") + }) + require.Contains(t, output, "this is a formatted error message") +} diff --git a/go/vt/grpcclient/snappy_test.go b/go/vt/grpcclient/snappy_test.go new file mode 100644 index 00000000000..41d205bf04d --- /dev/null +++ b/go/vt/grpcclient/snappy_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpcclient + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func TestCompressDecompress(t *testing.T) { + snappComp := SnappyCompressor{} + writer, err := snappComp.Compress(&bytes.Buffer{}) + require.NoError(t, err) + require.NotEmpty(t, writer) + + reader, err := snappComp.Decompress(&bytes.Buffer{}) + require.NoError(t, err) + require.NotEmpty(t, reader) +} + +func TestAppendCompression(t *testing.T) { + oldCompression := compression + defer func() { + compression = oldCompression + }() + + dialOpts := []grpc.DialOption{} + dialOpts, err := appendCompression(dialOpts) + require.NoError(t, err) + require.Equal(t, 0, len(dialOpts)) + + // Change the compression to snappy + compression = "snappy" + + dialOpts, err = appendCompression(dialOpts) + require.NoError(t, err) + require.Equal(t, 1, len(dialOpts)) + + // Change the compression to some unknown value + compression = "unknown" + + dialOpts, err = appendCompression(dialOpts) + require.NoError(t, err) + require.Equal(t, 1, len(dialOpts)) +} diff --git a/go/vt/grpcclient/testdata/credsFile.json b/go/vt/grpcclient/testdata/credsFile.json new file mode 100644 index 00000000000..e036126f78e --- /dev/null +++ b/go/vt/grpcclient/testdata/credsFile.json @@ -0,0 +1,4 @@ +{ + "Username": "test-user", + "Password": "test-pass" +} \ No newline at end of file diff --git a/go/vt/grpcclient/testdata/invalid.json b/go/vt/grpcclient/testdata/invalid.json new file mode 100644 index 00000000000..81750b96f9d --- /dev/null +++ b/go/vt/grpcclient/testdata/invalid.json @@ -0,0 +1 @@ +{ \ No newline at end of file diff --git a/go/vt/hook/hook.go b/go/vt/hook/hook.go index 6cee35e4241..4f402cdcb44 100644 --- a/go/vt/hook/hook.go +++ b/go/vt/hook/hook.go @@ -17,7 +17,6 @@ limitations under the License. package hook import ( - "bytes" "context" "errors" "fmt" @@ -147,7 +146,7 @@ func (hook *Hook) ExecuteContext(ctx context.Context) (result *HookResult) { } // Run it. - var stdout, stderr bytes.Buffer + var stdout, stderr strings.Builder cmd.Stdout = &stdout cmd.Stderr = &stderr @@ -234,7 +233,7 @@ func (hook *Hook) ExecuteAsWritePipe(out io.Writer) (io.WriteCloser, WaitFunc, i return nil, nil, HOOK_GENERIC_ERROR, fmt.Errorf("failed to configure stdin: %v", err) } cmd.Stdout = out - var stderr bytes.Buffer + var stderr strings.Builder cmd.Stderr = &stderr // Start the process. @@ -273,7 +272,7 @@ func (hook *Hook) ExecuteAsReadPipe(in io.Reader) (io.Reader, WaitFunc, int, err return nil, nil, HOOK_GENERIC_ERROR, fmt.Errorf("failed to configure stdout: %v", err) } cmd.Stdin = in - var stderr bytes.Buffer + var stderr strings.Builder cmd.Stderr = &stderr // Start the process. diff --git a/go/vt/hook/hook_test.go b/go/vt/hook/hook_test.go index 041e568e5ff..f5064175768 100644 --- a/go/vt/hook/hook_test.go +++ b/go/vt/hook/hook_test.go @@ -1,10 +1,29 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package hook import ( "context" + "io" "os" "os/exec" "path" + "strings" + "sync" "testing" "time" @@ -22,6 +41,11 @@ func TestExecuteContext(t *testing.T) { require.NoError(t, err) sleepHookPath := path.Join(vtroot, "vthook", "sleep") + + if _, err := os.Lstat(sleepHookPath); err == nil { + require.NoError(t, os.Remove(sleepHookPath)) + } + require.NoError(t, os.Symlink(sleep, sleepHookPath)) defer func() { require.NoError(t, os.Remove(sleepHookPath)) @@ -38,3 +62,234 @@ func TestExecuteContext(t *testing.T) { hr = h.Execute() assert.Equal(t, HOOK_SUCCESS, hr.ExitStatus) } + +func TestExecuteOptional(t *testing.T) { + vtroot, err := vtenv.VtRoot() + require.NoError(t, err) + + echo, err := exec.LookPath("echo") + require.NoError(t, err) + + echoHookPath := path.Join(vtroot, "vthook", "echo") + + if _, err := os.Lstat(echoHookPath); err == nil { + require.NoError(t, os.Remove(echoHookPath)) + } + + require.NoError(t, os.Symlink(echo, echoHookPath)) + defer func() { + require.NoError(t, os.Remove(echoHookPath)) + }() + tt := []struct { + name string + hookName string + parameters []string + expectedError string + }{ + { + name: "HookSuccess", + hookName: "echo", + parameters: []string{"test"}, + }, + { + name: "HookDoesNotExist", + hookName: "nonexistent-hook", + parameters: []string{"test"}, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + h := NewHook(tc.hookName, tc.parameters) + err := h.ExecuteOptional() + if tc.expectedError == "" { + assert.NoError(t, err) + } else { + assert.Error(t, err) + assert.ErrorContains(t, err, tc.expectedError) + } + }) + } +} + +func TestNewHook(t *testing.T) { + h := NewHook("test-hook", []string{"arg1", "arg2"}) + assert.Equal(t, "test-hook", h.Name) + assert.Equal(t, []string{"arg1", "arg2"}, h.Parameters) +} + +func TestNewSimpleHook(t *testing.T) { + h := NewSimpleHook("simple-hook") + assert.Equal(t, "simple-hook", h.Name) + assert.Empty(t, h.Parameters) +} + +func TestNewHookWithEnv(t *testing.T) { + h := NewHookWithEnv("env-hook", []string{"arg1", "arg2"}, map[string]string{"KEY": "VALUE"}) + assert.Equal(t, "env-hook", h.Name) + assert.Equal(t, []string{"arg1", "arg2"}, h.Parameters) + assert.Equal(t, map[string]string{"KEY": "VALUE"}, h.ExtraEnv) +} + +func TestString(t *testing.T) { + tt := []struct { + name string + input HookResult + expected string + }{ + { + name: "HOOK_SUCCESS", + input: HookResult{ExitStatus: HOOK_SUCCESS, Stdout: "output"}, + expected: "result: HOOK_SUCCESS\nstdout:\noutput", + }, + { + name: "HOOK_DOES_NOT_EXIST", + input: HookResult{ExitStatus: HOOK_DOES_NOT_EXIST}, + expected: "result: HOOK_DOES_NOT_EXIST", + }, + { + name: "HOOK_STAT_FAILED", + input: HookResult{ExitStatus: HOOK_STAT_FAILED}, + expected: "result: HOOK_STAT_FAILED", + }, + { + name: "HOOK_CANNOT_GET_EXIT_STATUS", + input: HookResult{ExitStatus: HOOK_CANNOT_GET_EXIT_STATUS}, + expected: "result: HOOK_CANNOT_GET_EXIT_STATUS", + }, + { + name: "HOOK_INVALID_NAME", + input: HookResult{ExitStatus: HOOK_INVALID_NAME}, + expected: "result: HOOK_INVALID_NAME", + }, + { + name: "HOOK_VTROOT_ERROR", + input: HookResult{ExitStatus: HOOK_VTROOT_ERROR}, + expected: "result: HOOK_VTROOT_ERROR", + }, + { + name: "case default", + input: HookResult{ExitStatus: 42}, + expected: "result: exit(42)", + }, + { + name: "WithStderr", + input: HookResult{ExitStatus: HOOK_SUCCESS, Stderr: "error"}, + expected: "result: HOOK_SUCCESS\nstderr:\nerror", + }, + { + name: "WithStderr", + input: HookResult{ExitStatus: HOOK_SUCCESS, Stderr: "error"}, + expected: "result: HOOK_SUCCESS\nstderr:\nerror", + }, + { + name: "WithStdoutAndStderr", + input: HookResult{ExitStatus: HOOK_SUCCESS, Stdout: "output", Stderr: "error"}, + expected: "result: HOOK_SUCCESS\nstdout:\noutput\nstderr:\nerror", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result := tc.input.String() + assert.Equal(t, tc.expected, result) + }) + } +} + +func TestExecuteAsReadPipe(t *testing.T) { + vtroot, err := vtenv.VtRoot() + require.NoError(t, err) + + cat, err := exec.LookPath("cat") + require.NoError(t, err) + + catHookPath := path.Join(vtroot, "vthook", "cat") + + if _, err := os.Lstat(catHookPath); err == nil { + require.NoError(t, os.Remove(catHookPath)) + } + + require.NoError(t, os.Symlink(cat, catHookPath)) + defer func() { + require.NoError(t, os.Remove(catHookPath)) + }() + + h := NewHook("cat", nil) + reader, waitFunc, status, err := h.ExecuteAsReadPipe(strings.NewReader("Hello, World!\n")) + require.NoError(t, err) + defer reader.(io.Closer).Close() + + output, err := io.ReadAll(reader) + require.NoError(t, err) + assert.Equal(t, "Hello, World!\n", string(output)) + + stderr, waitErr := waitFunc() + assert.Empty(t, stderr) + assert.NoError(t, waitErr) + assert.Equal(t, HOOK_SUCCESS, status) +} + +func TestExecuteAsReadPipeErrorFindingHook(t *testing.T) { + h := NewHook("nonexistent-hook", nil) + reader, waitFunc, status, err := h.ExecuteAsReadPipe(strings.NewReader("Hello, World!\n")) + require.Error(t, err) + assert.Nil(t, reader) + assert.Nil(t, waitFunc) + assert.Equal(t, HOOK_DOES_NOT_EXIST, status) +} + +func TestExecuteAsWritePipe(t *testing.T) { + var writer strings.Builder + var writerMutex sync.Mutex + + vtroot, err := vtenv.VtRoot() + require.NoError(t, err) + + echo, err := exec.LookPath("echo") + require.NoError(t, err) + + echoHookPath := path.Join(vtroot, "vthook", "echo") + + if _, err := os.Lstat(echoHookPath); err == nil { + require.NoError(t, os.Remove(echoHookPath)) + } + + require.NoError(t, os.Symlink(echo, echoHookPath)) + defer func() { + require.NoError(t, os.Remove(echoHookPath)) + }() + + h := NewHook("echo", nil) + + writerMutex.Lock() + var writerTemp strings.Builder + _, waitFunc, status, err := h.ExecuteAsWritePipe(&writerTemp) + writerMutex.Unlock() + + require.NoError(t, err) + defer func() { + writerMutex.Lock() + writer.Reset() + writerMutex.Unlock() + }() + + writerMutex.Lock() + _, err = writer.Write([]byte("Hello, World!\n")) + writerMutex.Unlock() + require.NoError(t, err) + + stderr, waitErr := waitFunc() + assert.Empty(t, stderr) + assert.NoError(t, waitErr) + assert.Equal(t, HOOK_SUCCESS, status) +} + +func TestExecuteAsWritePipeErrorFindingHook(t *testing.T) { + h := NewHook("nonexistent-hook", nil) + var writer strings.Builder + writerPtr := &writer + _, _, status, err := h.ExecuteAsWritePipe(writerPtr) + assert.Error(t, err) + assert.Equal(t, HOOK_DOES_NOT_EXIST, status) +} diff --git a/go/vt/key/destination.go b/go/vt/key/destination.go index 437e980f480..77287c782e7 100644 --- a/go/vt/key/destination.go +++ b/go/vt/key/destination.go @@ -17,9 +17,8 @@ limitations under the License. package key import ( - "bytes" "encoding/hex" - "math/rand" + "math/rand/v2" "sort" "strings" @@ -48,7 +47,7 @@ type Destination interface { // DestinationsString returns a printed version of the destination array. func DestinationsString(destinations []Destination) string { - var buffer bytes.Buffer + var buffer strings.Builder buffer.WriteString("Destinations:") for i, d := range destinations { if i > 0 { @@ -155,40 +154,6 @@ func processExactKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyrange %v does not exactly match shards", KeyRangeString(kr)) } -// -// DestinationExactKeyRanges -// - -// DestinationExactKeyRanges is the destination for multiple KeyRanges. -// The KeyRanges must map exactly to one or more shards, and cannot -// start or end in the middle of a shard. -// It implements the Destination interface. -type DestinationExactKeyRanges []*topodatapb.KeyRange - -// Resolve is part of the Destination interface. -func (d DestinationExactKeyRanges) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { - for _, kr := range d { - if err := processExactKeyRange(allShards, kr, addShard); err != nil { - return err - } - } - return nil -} - -// String is part of the Destination interface. -func (d DestinationExactKeyRanges) String() string { - var buffer bytes.Buffer - buffer.WriteString("DestinationExactKeyRanges(") - for i, kr := range d { - if i > 0 { - buffer.WriteByte(',') - } - buffer.WriteString(KeyRangeString(kr)) - } - buffer.WriteByte(')') - return buffer.String() -} - // // DestinationKeyRange // @@ -226,38 +191,6 @@ func processKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb.KeyR return nil } -// -// DestinationKeyRanges -// - -// DestinationKeyRanges is the destination for multiple KeyRanges. -// It implements the Destination interface. -type DestinationKeyRanges []*topodatapb.KeyRange - -// Resolve is part of the Destination interface. -func (d DestinationKeyRanges) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { - for _, kr := range d { - if err := processKeyRange(allShards, kr, addShard); err != nil { - return err - } - } - return nil -} - -// String is part of the Destination interface. -func (d DestinationKeyRanges) String() string { - var buffer bytes.Buffer - buffer.WriteString("DestinationKeyRanges(") - for i, kr := range d { - if i > 0 { - buffer.WriteByte(',') - } - buffer.WriteString(KeyRangeString(kr)) - } - buffer.WriteByte(')') - return buffer.String() -} - // // DestinationKeyspaceID // @@ -318,7 +251,7 @@ func (d DestinationKeyspaceIDs) Resolve(allShards []*topodatapb.ShardReference, // String is part of the Destination interface. func (d DestinationKeyspaceIDs) String() string { - var buffer bytes.Buffer + var buffer strings.Builder buffer.WriteString("DestinationKeyspaceIDs(") for i, ksid := range d { if i > 0 { @@ -341,7 +274,7 @@ type DestinationAnyShardPickerRandomShard struct{} // PickShard is DestinationAnyShardPickerRandomShard's implementation. func (dp DestinationAnyShardPickerRandomShard) PickShard(shardCount int) int { - return rand.Intn(shardCount) + return rand.IntN(shardCount) } // diff --git a/go/vt/key/destination_test.go b/go/vt/key/destination_test.go index 1f51323c715..f348b9ffa25 100644 --- a/go/vt/key/destination_test.go +++ b/go/vt/key/destination_test.go @@ -17,9 +17,12 @@ limitations under the License. package key import ( - "reflect" + "encoding/hex" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -51,9 +54,7 @@ func initShardArray(t *testing.T, shardingSpec string) []*topodatapb.ShardRefere } shardKrArray, err := ParseShardingSpec(shardingSpec) - if err != nil { - t.Fatalf("ParseShardingSpec failed: %v", err) - } + require.NoError(t, err, "ParseShardingSpec failed") result := make([]*topodatapb.ShardReference, len(shardKrArray)) for i, kr := range shardKrArray { @@ -137,9 +138,7 @@ func TestDestinationExactKeyRange(t *testing.T) { keyRange = &topodatapb.KeyRange{} } else { krArray, err := ParseShardingSpec(testCase.keyRange) - if err != nil { - t.Errorf("Got error while parsing sharding spec %v", err) - } + assert.NoError(t, err, "Got error while parsing sharding spec") keyRange = krArray[0] } dkr := DestinationExactKeyRange{KeyRange: keyRange} @@ -148,12 +147,10 @@ func TestDestinationExactKeyRange(t *testing.T) { gotShards = append(gotShards, shard) return nil }) - if err != nil && err.Error() != testCase.err { - t.Errorf("gotShards: %v, want %s", err, testCase.err) - } - if !reflect.DeepEqual(testCase.shards, gotShards) { - t.Errorf("want \n%#v, got \n%#v", testCase.shards, gotShards) + if testCase.err != "" { + assert.ErrorContains(t, err, testCase.err) } + assert.Equal(t, testCase.shards, gotShards) } } @@ -241,21 +238,202 @@ func TestDestinationKeyRange(t *testing.T) { keyRange = &topodatapb.KeyRange{} } else { krArray, err := ParseShardingSpec(testCase.keyRange) - if err != nil { - t.Errorf("Got error while parsing sharding spec %v", err) - } + assert.NoError(t, err, "Got error while parsing sharding spec") keyRange = krArray[0] } dkr := DestinationKeyRange{KeyRange: keyRange} var gotShards []string - if err := dkr.Resolve(allShards, func(shard string) error { + err := dkr.Resolve(allShards, func(shard string) error { gotShards = append(gotShards, shard) return nil - }); err != nil { - t.Errorf("want nil, got %v", err) - } - if !reflect.DeepEqual(testCase.shards, gotShards) { - t.Errorf("want \n%#v, got \n%#v", testCase.shards, gotShards) - } + }) + assert.NoError(t, err) + assert.Equal(t, testCase.shards, gotShards) + } +} + +func TestDestinationsString(t *testing.T) { + kr2040 := &topodatapb.KeyRange{ + Start: []byte{0x20}, + End: []byte{0x40}, + } + + got := DestinationsString([]Destination{ + DestinationShard("2"), + DestinationShards{"2", "3"}, + DestinationExactKeyRange{KeyRange: kr2040}, + DestinationKeyRange{KeyRange: kr2040}, + DestinationKeyspaceID{1, 2}, + DestinationKeyspaceIDs{ + {1, 2}, + {2, 3}, + }, + DestinationAllShards{}, + DestinationNone{}, + DestinationAnyShard{}, + }) + want := "Destinations:DestinationShard(2),DestinationShards(2,3),DestinationExactKeyRange(20-40),DestinationKeyRange(20-40),DestinationKeyspaceID(0102),DestinationKeyspaceIDs(0102,0203),DestinationAllShards(),DestinationNone(),DestinationAnyShard()" + assert.Equal(t, want, got) +} + +func TestDestinationShardResolve(t *testing.T) { + allShards := initShardArray(t, "") + + ds := DestinationShard("test-destination-shard") + + var calledVar string + err := ds.Resolve(allShards, func(shard string) error { + calledVar = shard + return nil + }) + assert.NoError(t, err) + assert.Equal(t, "test-destination-shard", calledVar) +} + +func TestDestinationShardsResolve(t *testing.T) { + allShards := initShardArray(t, "") + + ds := DestinationShards{"ds1", "ds2"} + + var calledVar []string + err := ds.Resolve(allShards, func(shard string) error { + calledVar = append(calledVar, shard) + return nil + }) + assert.NoError(t, err) + + want := []string{"ds1", "ds2"} + assert.ElementsMatch(t, want, calledVar) +} + +func TestDestinationKeyspaceIDResolve(t *testing.T) { + allShards := initShardArray(t, "60-80-90") + + testCases := []struct { + keyspaceID string + want string + err string + }{ + {"59", "", "KeyspaceId 59 didn't match any shards"}, + // Should include start limit of keyRange + {"60", "60-80", ""}, + {"79", "60-80", ""}, + {"80", "80-90", ""}, + {"89", "80-90", ""}, + // Shouldn't include end limit of keyRange + {"90", "", "KeyspaceId 90 didn't match any shards"}, + } + + for _, tc := range testCases { + t.Run(tc.keyspaceID, func(t *testing.T) { + k, err := hex.DecodeString(tc.keyspaceID) + assert.NoError(t, err) + + ds := DestinationKeyspaceID(k) + + var calledVar string + addShard := func(shard string) error { + calledVar = shard + return nil + } + + err = ds.Resolve(allShards, addShard) + if tc.err != "" { + assert.ErrorContains(t, err, tc.err) + return + } + + assert.Equal(t, tc.want, calledVar) + }) + } + + // Expect error when allShards is empty + ds := DestinationKeyspaceID("80") + err := ds.Resolve([]*topodatapb.ShardReference{}, func(_ string) error { + return nil + }) + assert.ErrorContains(t, err, "no shard in keyspace") +} + +func TestDestinationKeyspaceIDsResolve(t *testing.T) { + allShards := initShardArray(t, "60-80-90") + + k1, err := hex.DecodeString("82") + assert.NoError(t, err) + + k2, err := hex.DecodeString("61") + assert.NoError(t, err) + + k3, err := hex.DecodeString("89") + assert.NoError(t, err) + + ds := DestinationKeyspaceIDs{k1, k2, k3} + + var calledVar []string + addShard := func(shard string) error { + calledVar = append(calledVar, shard) + return nil + } + + err = ds.Resolve(allShards, addShard) + assert.NoError(t, err) + + want := []string{"80-90", "60-80", "80-90"} + assert.Equal(t, want, calledVar) +} + +func TestDestinationAllShardsResolve(t *testing.T) { + allShards := initShardArray(t, "60-80-90") + + ds := DestinationAllShards{} + + var calledVar []string + addShard := func(shard string) error { + calledVar = append(calledVar, shard) + return nil } + + err := ds.Resolve(allShards, addShard) + assert.NoError(t, err) + + want := []string{"60-80", "80-90"} + assert.ElementsMatch(t, want, calledVar) +} + +func TestDestinationNoneResolve(t *testing.T) { + allShards := initShardArray(t, "60-80-90") + + ds := DestinationNone{} + + var called bool + addShard := func(shard string) error { + called = true + return nil + } + + err := ds.Resolve(allShards, addShard) + assert.NoError(t, err) + assert.False(t, called, "addShard shouldn't be called in the case of DestinationNone") +} + +func TestDestinationAnyShardResolve(t *testing.T) { + allShards := initShardArray(t, "custom") + + ds := DestinationAnyShard{} + + var calledVar string + addShard := func(shard string) error { + calledVar = shard + return nil + } + + err := ds.Resolve(allShards, addShard) + assert.NoError(t, err) + + possibleShards := []string{"0", "1"} + assert.Contains(t, possibleShards, calledVar) + + // Expect error when allShards is empty + err = ds.Resolve([]*topodatapb.ShardReference{}, addShard) + assert.ErrorContains(t, err, "no shard in keyspace") } diff --git a/go/vt/key/key_test.go b/go/vt/key/key_test.go index 8db45aa79b9..84d4365ff0e 100644 --- a/go/vt/key/key_test.go +++ b/go/vt/key/key_test.go @@ -333,25 +333,18 @@ func TestEvenShardsKeyRange(t *testing.T) { for _, tc := range testCases { got, err := EvenShardsKeyRange(tc.i, tc.n) - if err != nil { - t.Fatalf("EvenShardsKeyRange(%v, %v) returned unexpected error: %v", tc.i, tc.n, err) - } - if !proto.Equal(got, tc.want) { - t.Errorf("EvenShardsKeyRange(%v, %v) = (%x, %x), want = (%x, %x)", tc.i, tc.n, got.Start, got.End, tc.want.Start, tc.want.End) - } + require.NoError(t, err) + assert.True(t, proto.Equal(got, tc.want), "got=(%x, %x), want=(%x, %x)", got.Start, got.End, tc.want.Start, tc.want.End) // Check if the string representation is equal as well. - if gotStr, want := KeyRangeString(got), tc.wantSpec; gotStr != want { - t.Errorf("EvenShardsKeyRange(%v) = %v, want = %v", got, gotStr, want) - } + gotStr := KeyRangeString(got) + assert.Equal(t, tc.wantSpec, gotStr) // Now verify that ParseKeyRangeParts() produces the same KeyRange object as // we do. parts := strings.Split(tc.wantSpec, "-") kr, _ := ParseKeyRangeParts(parts[0], parts[1]) - if !proto.Equal(got, kr) { - t.Errorf("EvenShardsKeyRange(%v, %v) != ParseKeyRangeParts(%v, %v): (%x, %x) != (%x, %x)", tc.i, tc.n, parts[0], parts[1], got.Start, got.End, kr.Start, kr.End) - } + assert.True(t, proto.Equal(got, kr), "EvenShardsKeyRange(%v, %v) != ParseKeyRangeParts(%v, %v): (%x, %x) != (%x, %x)", tc.i, tc.n, parts[0], parts[1], got.Start, got.End, kr.Start, kr.End) } } @@ -477,9 +470,7 @@ func TestKeyRangeEndEqual(t *testing.T) { first := stringToKeyRange(tcase.first) second := stringToKeyRange(tcase.second) out := KeyRangeEndEqual(first, second) - if out != tcase.out { - t.Fatalf("KeyRangeEndEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out) - } + require.Equal(t, tcase.out, out) } } @@ -518,9 +509,7 @@ func TestKeyRangeStartEqual(t *testing.T) { first := stringToKeyRange(tcase.first) second := stringToKeyRange(tcase.second) out := KeyRangeStartEqual(first, second) - if out != tcase.out { - t.Fatalf("KeyRangeStartEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out) - } + require.Equal(t, tcase.out, out) } } @@ -555,9 +544,7 @@ func TestKeyRangeEqual(t *testing.T) { first := stringToKeyRange(tcase.first) second := stringToKeyRange(tcase.second) out := KeyRangeEqual(first, second) - if out != tcase.out { - t.Fatalf("KeyRangeEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out) - } + require.Equal(t, tcase.out, out) } } @@ -600,9 +587,7 @@ func TestKeyRangeContiguous(t *testing.T) { first := stringToKeyRange(tcase.first) second := stringToKeyRange(tcase.second) out := KeyRangeContiguous(first, second) - if out != tcase.out { - t.Fatalf("KeyRangeContiguous(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out) - } + require.Equal(t, tcase.out, out) } } @@ -626,10 +611,8 @@ func TestEvenShardsKeyRange_Error(t *testing.T) { } for _, tc := range testCases { - kr, err := EvenShardsKeyRange(tc.i, tc.n) - if err == nil || !strings.Contains(err.Error(), tc.wantError) { - t.Fatalf("EvenShardsKeyRange(%v, %v) = (%v, %v) want error = %v", tc.i, tc.n, kr, err, tc.wantError) - } + _, err := EvenShardsKeyRange(tc.i, tc.n) + require.ErrorContains(t, err, tc.wantError) } } @@ -653,25 +636,17 @@ func TestParseShardingSpec(t *testing.T) { } for key, wanted := range goodTable { r, err := ParseShardingSpec(key) - if err != nil { - t.Errorf("Unexpected error: %v.", err) - } - if len(r) != len(wanted) { - t.Errorf("Wrong result: wanted %v, got %v", wanted, r) + assert.NoError(t, err) + if !assert.Len(t, r, len(wanted)) { continue } for i, w := range wanted { - if !proto.Equal(r[i], w) { - t.Errorf("Wrong result: wanted %v, got %v", w, r[i]) - break - } + require.Truef(t, proto.Equal(r[i], w), "wanted %v, got %v", w, r[i]) } } for _, bad := range badTable { _, err := ParseShardingSpec(bad) - if err == nil { - t.Errorf("Didn't get expected error for %v.", bad) - } + assert.Error(t, err) } } @@ -1081,27 +1056,19 @@ func TestKeyRangeContains(t *testing.T) { for _, el := range table { s, err := hex.DecodeString(el.start) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } + assert.NoError(t, err) e, err := hex.DecodeString(el.end) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } + assert.NoError(t, err) kr := &topodatapb.KeyRange{ Start: s, End: e, } k, err := hex.DecodeString(el.kid) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if c := KeyRangeContains(kr, k); c != el.contained { - t.Errorf("Unexpected result: contains for %v and (%v-%v) yields %v.", el.kid, el.start, el.end, c) - } - if !KeyRangeContains(nil, k) { - t.Errorf("KeyRangeContains(nil, x) should always be true") - } + assert.NoError(t, err) + c := KeyRangeContains(kr, k) + assert.Equal(t, el.contained, c) + + assert.True(t, KeyRangeContains(nil, k), "KeyRangeContains(nil, x) should always be true") } } @@ -1601,3 +1568,24 @@ func stringToKeyRange(spec string) *topodatapb.KeyRange { } return kr } + +func TestKeyRangeIsPartial(t *testing.T) { + testCases := []struct { + name string + keyRange *topodatapb.KeyRange + want bool + }{ + {"nil key range", nil, false}, + {"empty start and end", &topodatapb.KeyRange{}, false}, + {"empty end", &topodatapb.KeyRange{Start: []byte("12")}, true}, + {"empty start", &topodatapb.KeyRange{End: []byte("13")}, true}, + {"non-empty start and end", &topodatapb.KeyRange{Start: []byte("12"), End: []byte("13")}, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + isPartial := KeyRangeIsPartial(tc.keyRange) + assert.Equal(t, tc.want, isPartial) + }) + } +} diff --git a/go/vt/logutil/logger.go b/go/vt/logutil/logger.go index 524ca4db4d7..47c3f124238 100644 --- a/go/vt/logutil/logger.go +++ b/go/vt/logutil/logger.go @@ -17,7 +17,6 @@ limitations under the License. package logutil import ( - "bytes" "fmt" "io" "runtime" @@ -57,7 +56,7 @@ type Logger interface { // EventToBuffer formats an individual Event into a buffer, without the // final '\n' -func EventToBuffer(event *logutilpb.Event, buf *bytes.Buffer) { +func EventToBuffer(event *logutilpb.Event, buf *strings.Builder) { // Avoid Fprintf, for speed. The format is so simple that we // can do it quickly by hand. It's worth about 3X. Fprintf is hard. @@ -98,8 +97,8 @@ func EventToBuffer(event *logutilpb.Event, buf *bytes.Buffer) { // EventString returns the line in one string func EventString(event *logutilpb.Event) string { - buf := new(bytes.Buffer) - EventToBuffer(event, buf) + var buf strings.Builder + EventToBuffer(event, &buf) return buf.String() } @@ -207,27 +206,6 @@ func (cl *CallbackLogger) Printf(format string, v ...any) { }) } -// ChannelLogger is a Logger that sends the logging events through a channel for -// consumption. -type ChannelLogger struct { - CallbackLogger - C chan *logutilpb.Event -} - -// NewChannelLogger returns a CallbackLogger which will write the data -// on a channel -func NewChannelLogger(size int) *ChannelLogger { - c := make(chan *logutilpb.Event, size) - return &ChannelLogger{ - CallbackLogger: CallbackLogger{ - f: func(e *logutilpb.Event) { - c <- e - }, - }, - C: c, - } -} - // MemoryLogger keeps the logging events in memory. // All protected by a mutex. type MemoryLogger struct { @@ -251,11 +229,11 @@ func NewMemoryLogger() *MemoryLogger { // String returns all the lines in one String, separated by '\n' func (ml *MemoryLogger) String() string { - buf := new(bytes.Buffer) + var buf strings.Builder ml.mu.Lock() defer ml.mu.Unlock() for _, event := range ml.Events { - EventToBuffer(event, buf) + EventToBuffer(event, &buf) buf.WriteByte('\n') } return buf.String() @@ -355,7 +333,7 @@ func (tl *TeeLogger) Printf(format string, v ...any) { const digits = "0123456789" // twoDigits adds a zero-prefixed two-digit integer to buf -func twoDigits(buf *bytes.Buffer, value int) { +func twoDigits(buf *strings.Builder, value int) { buf.WriteByte(digits[value/10]) buf.WriteByte(digits[value%10]) } @@ -363,7 +341,7 @@ func twoDigits(buf *bytes.Buffer, value int) { // nDigits adds an n-digit integer d to buf // padding with pad on the left. // It assumes d >= 0. -func nDigits(buf *bytes.Buffer, n, d int, pad byte) { +func nDigits(buf *strings.Builder, n, d int, pad byte) { tmp := make([]byte, n) j := n - 1 for ; j >= 0 && d > 0; j-- { @@ -377,7 +355,7 @@ func nDigits(buf *bytes.Buffer, n, d int, pad byte) { } // someDigits adds a zero-prefixed variable-width integer to buf -func someDigits(buf *bytes.Buffer, d int64) { +func someDigits(buf *strings.Builder, d int64) { // Print into the top, then copy down. tmp := make([]byte, 10) j := 10 diff --git a/go/vt/logutil/logger_test.go b/go/vt/logutil/logger_test.go index 0eb4edb2b93..ce25543da5f 100644 --- a/go/vt/logutil/logger_test.go +++ b/go/vt/logutil/logger_test.go @@ -112,44 +112,15 @@ func TestMemoryLogger(t *testing.T) { } } -func TestChannelLogger(t *testing.T) { - cl := NewChannelLogger(10) - cl.Infof("test %v", 123) - cl.Warningf("test %v", 123) - cl.Errorf("test %v", 123) - cl.Printf("test %v", 123) - close(cl.C) - - count := 0 - for e := range cl.C { - if got, want := e.Value, "test 123"; got != want { - t.Errorf("e.Value = %q, want %q", got, want) - } - if e.File != "logger_test.go" { - t.Errorf("Invalid file name: %v", e.File) - } - count++ - } - if got, want := count, 4; got != want { - t.Errorf("count = %v, want %v", got, want) - } -} - func TestTeeLogger(t *testing.T) { - ml := NewMemoryLogger() - cl := NewChannelLogger(10) - tl := NewTeeLogger(ml, cl) + ml1 := NewMemoryLogger() + ml2 := NewMemoryLogger() + tl := NewTeeLogger(ml1, ml2) tl.Infof("test infof %v %v", 1, 2) tl.Warningf("test warningf %v %v", 2, 3) tl.Errorf("test errorf %v %v", 3, 4) tl.Printf("test printf %v %v", 4, 5) - close(cl.C) - - clEvents := []*logutilpb.Event{} - for e := range cl.C { - clEvents = append(clEvents, e) - } wantEvents := []*logutilpb.Event{ {Level: logutilpb.Level_INFO, Value: "test infof 1 2"}, @@ -159,7 +130,7 @@ func TestTeeLogger(t *testing.T) { } wantFile := "logger_test.go" - for i, events := range [][]*logutilpb.Event{ml.Events, clEvents} { + for i, events := range [][]*logutilpb.Event{ml1.Events, ml2.Events} { if got, want := len(events), len(wantEvents); got != want { t.Fatalf("[%v] len(events) = %v, want %v", i, got, want) } diff --git a/go/vt/logutil/logutil_flaky_test.go b/go/vt/logutil/logutil_test.go similarity index 100% rename from go/vt/logutil/logutil_flaky_test.go rename to go/vt/logutil/logutil_test.go diff --git a/go/vt/logz/logz_utils_test.go b/go/vt/logz/logz_utils_test.go new file mode 100644 index 00000000000..9a8b78917ff --- /dev/null +++ b/go/vt/logz/logz_utils_test.go @@ -0,0 +1,206 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logz + +import ( + "testing" + + "net/http" + "net/http/httptest" + + "github.com/stretchr/testify/require" +) + +func TestWrappable(t *testing.T) { + tests := []struct { + input string + output string + }{ + { + input: "s", + output: "s", + }, + { + input: "val,ue", + output: "val,\u200bue", + }, + { + input: ")al,ue", + output: ")\u200bal,\u200bue", + }, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + require.Equal(t, tt.output, Wrappable(tt.input)) + }) + } +} + +func TestStartAndEndHTMLTable(t *testing.T) { + // Create a mock HTTP response writer + w := httptest.NewRecorder() + + // Call the function to be tested + StartHTMLTable(w) + + // Check the response status code + require.Equal(t, http.StatusOK, w.Code) + + // Define the expected HTML content + expectedHTML := ` + + +
HealthCheck Tablet Cache%s
Cell
+` + + // Check if the response body matches the expected HTML content + require.Contains(t, w.Body.String(), expectedHTML) + + // Call the function to be tested + EndHTMLTable(w) + + // Check the response status code + require.Equal(t, http.StatusOK, w.Code) + + expectedHTML = ` +
+ +` + + // Check if the response body matches the expected HTML content + require.Contains(t, w.Body.String(), expectedHTML) +} diff --git a/go/vt/mysqlctl/azblobbackupstorage/azblob.go b/go/vt/mysqlctl/azblobbackupstorage/azblob.go index 7058745d6c6..3ba6b187a2f 100644 --- a/go/vt/mysqlctl/azblobbackupstorage/azblob.go +++ b/go/vt/mysqlctl/azblobbackupstorage/azblob.go @@ -239,8 +239,9 @@ func (bh *AZBlobBackupHandle) AddFile(ctx context.Context, filename string, file return nil, fmt.Errorf("AddFile cannot be called on read-only backup") } // Error out if the file size it too large ( ~4.75 TB) - if filesize > azblob.BlockBlobMaxStageBlockBytes*azblob.BlockBlobMaxBlocks { - return nil, fmt.Errorf("filesize (%v) is too large to upload to az blob (max size %v)", filesize, azblob.BlockBlobMaxStageBlockBytes*azblob.BlockBlobMaxBlocks) + maxSize := int64(azblob.BlockBlobMaxStageBlockBytes * azblob.BlockBlobMaxBlocks) + if filesize > maxSize { + return nil, fmt.Errorf("filesize (%v) is too large to upload to az blob (max size %v)", filesize, maxSize) } obj := objName(bh.dir, bh.name, filename) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index e9f0b19d54a..7052dcbdf87 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -86,6 +86,8 @@ var ( // backupCompressBlocks is the number of blocks that are processed // once before the writer blocks backupCompressBlocks = 2 + + EmptyBackupMessage = "no new data to backup, skipping it" ) func init() { @@ -168,14 +170,20 @@ func Backup(ctx context.Context, params BackupParams) error { } // Take the backup, and either AbortBackup or EndBackup. - usable, err := be.ExecuteBackup(ctx, beParams, bh) + backupResult, err := be.ExecuteBackup(ctx, beParams, bh) logger := params.Logger var finishErr error - if usable { - finishErr = bh.EndBackup(ctx) - } else { + switch backupResult { + case BackupUnusable: logger.Errorf2(err, "backup is not usable, aborting it") finishErr = bh.AbortBackup(ctx) + case BackupEmpty: + logger.Infof(EmptyBackupMessage) + // While an empty backup is considered "successful", it should leave no trace. + // We therefore ensire to clean up an backup files/directories/entries. + finishErr = bh.AbortBackup(ctx) + case BackupUsable: + finishErr = bh.EndBackup(ctx) } if err != nil { if finishErr != nil { @@ -310,6 +318,10 @@ func ShouldRestore(ctx context.Context, params RestoreParams) (bool, error) { if err := params.Mysqld.Wait(ctx, params.Cnf); err != nil { return false, err } + if err := params.Mysqld.WaitForDBAGrants(ctx, DbaGrantWaitTime); err != nil { + params.Logger.Errorf("error waiting for the grants: %v", err) + return false, err + } return checkNoDB(ctx, params.Mysqld, params.DbName) } @@ -339,13 +351,9 @@ func ensureRestoredGTIDPurgedMatchesManifest(ctx context.Context, manifest *Back } params.Logger.Infof("Restore: @@gtid_purged does not equal manifest's GTID position. Setting @@gtid_purged to %v", gtid) // This is not good. We want to apply a new @@gtid_purged value. - query := "RESET MASTER" // required dialect in 5.7 - if _, err := params.Mysqld.FetchSuperQuery(ctx, query); err != nil { - return vterrors.Wrapf(err, "error issuing %v", query) - } - query = fmt.Sprintf("SET GLOBAL gtid_purged='%s'", gtid) - if _, err := params.Mysqld.FetchSuperQuery(ctx, query); err != nil { - return vterrors.Wrapf(err, "failed to apply `%s` after restore", query) + err = params.Mysqld.SetReplicationPosition(ctx, manifest.Position) + if err != nil { + return vterrors.Wrap(err, "error setting replication position") } return nil } @@ -395,6 +403,10 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) params.Logger.Errorf("mysqld is not running: %v", err) return nil, err } + if err = params.Mysqld.WaitForDBAGrants(ctx, DbaGrantWaitTime); err != nil { + params.Logger.Errorf("error waiting for the grants: %v", err) + return nil, err + } // Since this is an empty database make sure we start replication at the beginning if err := params.Mysqld.ResetReplication(ctx); err != nil { params.Logger.Errorf("error resetting replication: %v. Continuing", err) @@ -453,7 +465,7 @@ func Restore(ctx context.Context, params RestoreParams) (*BackupManifest, error) // The MySQL manual recommends restarting mysqld after running mysql_upgrade, // so that any changes made to system tables take effect. params.Logger.Infof("Restore: restarting mysqld after mysql_upgrade") - if err := params.Mysqld.Shutdown(context.Background(), params.Cnf, true); err != nil { + if err := params.Mysqld.Shutdown(context.Background(), params.Cnf, true, params.MysqlShutdownTimeout); err != nil { return nil, err } if err := params.Mysqld.Start(context.Background(), params.Cnf); err != nil { diff --git a/go/vt/mysqlctl/backup_blackbox_test.go b/go/vt/mysqlctl/backup_blackbox_test.go index 8de6a8679fa..15244fb8782 100644 --- a/go/vt/mysqlctl/backup_blackbox_test.go +++ b/go/vt/mysqlctl/backup_blackbox_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" @@ -47,6 +48,8 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" ) +const mysqlShutdownTimeout = 1 * time.Minute + func setBuiltinBackupMysqldDeadline(t time.Duration) time.Duration { old := mysqlctl.BuiltinBackupMysqldTimeout mysqlctl.BuiltinBackupMysqldTimeout = t @@ -136,17 +139,17 @@ func TestExecuteBackup(t *testing.T) { bh := filebackupstorage.NewBackupHandle(nil, "", "", false) // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: - // "STOP SLAVE", "START SLAVE", in that order. + // "STOP REPLICA", "START REPLICA", in that order. fakedb := fakesqldb.New(t) defer fakedb.Close() mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) defer mysqld.Close() - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} // mysqld.ShutdownTime = time.Minute fakeStats := backupstats.NewFakeStats() - ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ Logger: logutil.NewConsoleLogger(), Mysqld: mysqld, Cnf: &mysqlctl.Mycnf{ @@ -154,16 +157,17 @@ func TestExecuteBackup(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - Stats: fakeStats, + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: fakeStats, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) require.NoError(t, err) - assert.True(t, ok) + assert.Equal(t, mysqlctl.BackupUsable, backupResult) var destinationCloseStats int var destinationOpenStats int @@ -205,7 +209,7 @@ func TestExecuteBackup(t *testing.T) { mysqld.ExpectedExecuteSuperQueryCurrent = 0 // resest the index of what queries we've run mysqld.ShutdownTime = time.Minute // reminder that shutdownDeadline is 1s - ok, err = be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + backupResult, err = be.ExecuteBackup(ctx, mysqlctl.BackupParams{ Logger: logutil.NewConsoleLogger(), Mysqld: mysqld, Cnf: &mysqlctl.Mycnf{ @@ -213,14 +217,15 @@ func TestExecuteBackup(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) assert.Error(t, err) - assert.False(t, ok) + assert.Equal(t, mysqlctl.BackupUnusable, backupResult) } func TestExecuteBackupWithSafeUpgrade(t *testing.T) { @@ -280,18 +285,18 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { bh := filebackupstorage.NewBackupHandle(nil, "", "", false) // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: - // "STOP SLAVE", "START SLAVE", in that order. + // "STOP REPLICA", "START REPLICA", in that order. // It also needs to be allowed to receive the query to disable the innodb_fast_shutdown flag. fakedb := fakesqldb.New(t) defer fakedb.Close() mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) defer mysqld.Close() - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} mysqld.FetchSuperQueryMap = map[string]*sqltypes.Result{ "SET GLOBAL innodb_fast_shutdown=0": {}, } - ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ Logger: logutil.NewConsoleLogger(), Mysqld: mysqld, Cnf: &mysqlctl.Mycnf{ @@ -299,16 +304,17 @@ func TestExecuteBackupWithSafeUpgrade(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - Concurrency: 2, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, - Stats: backupstats.NewFakeStats(), - UpgradeSafe: true, + Concurrency: 2, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + Stats: backupstats.NewFakeStats(), + UpgradeSafe: true, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) require.NoError(t, err) - assert.True(t, ok) + assert.Equal(t, mysqlctl.BackupUsable, backupResult) } // TestExecuteBackupWithCanceledContext tests the ability of the backup function to gracefully handle cases where errors @@ -366,18 +372,18 @@ func TestExecuteBackupWithCanceledContext(t *testing.T) { be := &mysqlctl.BuiltinBackupEngine{} bh := filebackupstorage.NewBackupHandle(nil, "", "", false) // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: - // "STOP SLAVE", "START SLAVE", in that order. + // "STOP REPLICA", "START REPLICA", in that order. fakedb := fakesqldb.New(t) defer fakedb.Close() mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) defer mysqld.Close() - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} // Cancel the context deliberately cancelledCtx, cancelCtx := context.WithCancel(context.Background()) cancelCtx() - ok, err := be.ExecuteBackup(cancelledCtx, mysqlctl.BackupParams{ + backupResult, err := be.ExecuteBackup(cancelledCtx, mysqlctl.BackupParams{ Logger: logutil.NewConsoleLogger(), Mysqld: mysqld, Cnf: &mysqlctl.Mycnf{ @@ -385,18 +391,19 @@ func TestExecuteBackupWithCanceledContext(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - Stats: backupstats.NewFakeStats(), - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, + Stats: backupstats.NewFakeStats(), + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) require.Error(t, err) // all four files will fail require.ErrorContains(t, err, "context canceled;context canceled;context canceled;context canceled") - assert.False(t, ok) + assert.Equal(t, mysqlctl.BackupUnusable, backupResult) } // TestExecuteRestoreWithCanceledContext tests the ability of the restore function to gracefully handle cases where errors @@ -454,14 +461,14 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { be := &mysqlctl.BuiltinBackupEngine{} bh := filebackupstorage.NewBackupHandle(nil, "", "", false) // Spin up a fake daemon to be used in backups. It needs to be allowed to receive: - // "STOP SLAVE", "START SLAVE", in that order. + // "STOP REPLICA", "START REPLICA", in that order. fakedb := fakesqldb.New(t) defer fakedb.Close() mysqld := mysqlctl.NewFakeMysqlDaemon(fakedb) defer mysqld.Close() - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} - ok, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ + backupResult, err := be.ExecuteBackup(ctx, mysqlctl.BackupParams{ Logger: logutil.NewConsoleLogger(), Mysqld: mysqld, Cnf: &mysqlctl.Mycnf{ @@ -469,16 +476,17 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { InnodbLogGroupHomeDir: path.Join(backupRoot, "log"), DataDir: path.Join(backupRoot, "datadir"), }, - Stats: backupstats.NewFakeStats(), - Concurrency: 2, - HookExtraEnv: map[string]string{}, - TopoServer: ts, - Keyspace: keyspace, - Shard: shard, + Stats: backupstats.NewFakeStats(), + Concurrency: 2, + HookExtraEnv: map[string]string{}, + TopoServer: ts, + Keyspace: keyspace, + Shard: shard, + MysqlShutdownTimeout: mysqlShutdownTimeout, }, bh) require.NoError(t, err) - assert.True(t, ok) + assert.Equal(t, mysqlctl.BackupUsable, backupResult) // Now try to restore the above backup. bh = filebackupstorage.NewBackupHandle(nil, "", "", true) @@ -486,7 +494,7 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { defer fakedb.Close() mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) defer mysqld.Close() - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} fakeStats := backupstats.NewFakeStats() @@ -500,19 +508,20 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { RelayLogIndexPath: path.Join(backupRoot, "relaylogindex"), RelayLogInfoPath: path.Join(backupRoot, "relayloginfo"), }, - Logger: logutil.NewConsoleLogger(), - Mysqld: mysqld, - Concurrency: 2, - HookExtraEnv: map[string]string{}, - DeleteBeforeRestore: false, - DbName: "test", - Keyspace: "test", - Shard: "-", - StartTime: time.Now(), - RestoreToPos: replication.Position{}, - RestoreToTimestamp: time.Time{}, - DryRun: false, - Stats: fakeStats, + Logger: logutil.NewConsoleLogger(), + Mysqld: mysqld, + Concurrency: 2, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + RestoreToTimestamp: time.Time{}, + DryRun: false, + Stats: fakeStats, + MysqlShutdownTimeout: mysqlShutdownTimeout, } // Successful restore. @@ -562,7 +571,7 @@ func TestExecuteRestoreWithTimedOutContext(t *testing.T) { defer fakedb.Close() mysqld = mysqlctl.NewFakeMysqlDaemon(fakedb) defer mysqld.Close() - mysqld.ExpectedExecuteSuperQueryList = []string{"STOP SLAVE", "START SLAVE"} + mysqld.ExpectedExecuteSuperQueryList = []string{"STOP REPLICA", "START REPLICA"} restoreParams.Mysqld = mysqld timedOutCtx, cancel := context.WithTimeout(ctx, 1*time.Second) defer cancel() @@ -593,9 +602,9 @@ func needInnoDBRedoLogSubdir() (needIt bool, err error) { return needIt, err } versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) - _, capableOf, _ := mysql.GetFlavor(versionStr, nil) + capableOf := mysql.ServerVersionCapableOf(versionStr) if capableOf == nil { return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) } - return capableOf(mysql.DynamicRedoLogCapacityFlavorCapability) + return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability) } diff --git a/go/vt/mysqlctl/backup_test.go b/go/vt/mysqlctl/backup_test.go index 5b97f709c2f..d1d6a73b7bd 100644 --- a/go/vt/mysqlctl/backup_test.go +++ b/go/vt/mysqlctl/backup_test.go @@ -29,8 +29,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/mysql/replication" @@ -42,6 +44,8 @@ import ( "vitess.io/vitess/go/vt/mysqlctl/backupstorage" ) +const mysqlShutdownTimeout = 1 * time.Minute + // TestBackupExecutesBackupWithScopedParams tests that Backup passes // a Scope()-ed stats to backupengine ExecuteBackup. func TestBackupExecutesBackupWithScopedParams(t *testing.T) { @@ -146,9 +150,8 @@ func TestFindFilesToBackupWithoutRedoLog(t *testing.T) { rocksdbDir := path.Join(dataDir, ".rocksdb") sdiOnlyDir := path.Join(dataDir, "sdi_dir") for _, s := range []string{innodbDataDir, innodbLogDir, dataDbDir, extraDir, outsideDbDir, rocksdbDir, sdiOnlyDir} { - if err := os.MkdirAll(s, os.ModePerm); err != nil { - t.Fatalf("failed to create directory %v: %v", s, err) - } + err := os.MkdirAll(s, os.ModePerm) + require.NoErrorf(t, err, "failed to create directory %v: %v", s, err) } innodbLogFile := "innodb_log_1" @@ -420,18 +423,6 @@ func TestRestoreManifestMySQLVersionValidation(t *testing.T) { upgradeSafe bool wantErr bool }{ - { - fromVersion: "mysqld Ver 5.6.42", - toVersion: "mysqld Ver 5.7.40", - upgradeSafe: false, - wantErr: true, - }, - { - fromVersion: "mysqld Ver 5.6.42", - toVersion: "mysqld Ver 5.7.40", - upgradeSafe: true, - wantErr: false, - }, { fromVersion: "mysqld Ver 5.7.42", toVersion: "mysqld Ver 8.0.32", @@ -563,7 +554,7 @@ func createFakeBackupRestoreEnv(t *testing.T) *fakeBackupRestoreEnv { sqldb := fakesqldb.New(t) sqldb.SetNeverFail(true) mysqld := NewFakeMysqlDaemon(sqldb) - require.Nil(t, mysqld.Shutdown(ctx, nil, false)) + require.Nil(t, mysqld.Shutdown(ctx, nil, false, mysqlShutdownTimeout)) dirName, err := os.MkdirTemp("", "vt_backup_test") require.Nil(t, err) @@ -575,33 +566,35 @@ func createFakeBackupRestoreEnv(t *testing.T) *fakeBackupRestoreEnv { stats := backupstats.NewFakeStats() backupParams := BackupParams{ - Cnf: cnf, - Logger: logger, - Mysqld: mysqld, - Concurrency: 1, - HookExtraEnv: map[string]string{}, - TopoServer: nil, - Keyspace: "test", - Shard: "-", - BackupTime: time.Now(), - IncrementalFromPos: "", - Stats: stats, + Cnf: cnf, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + TopoServer: nil, + Keyspace: "test", + Shard: "-", + BackupTime: time.Now(), + IncrementalFromPos: "", + Stats: stats, + MysqlShutdownTimeout: mysqlShutdownTimeout, } restoreParams := RestoreParams{ - Cnf: cnf, - Logger: logger, - Mysqld: mysqld, - Concurrency: 1, - HookExtraEnv: map[string]string{}, - DeleteBeforeRestore: false, - DbName: "test", - Keyspace: "test", - Shard: "-", - StartTime: time.Now(), - RestoreToPos: replication.Position{}, - DryRun: false, - Stats: stats, + Cnf: cnf, + Logger: logger, + Mysqld: mysqld, + Concurrency: 1, + HookExtraEnv: map[string]string{}, + DeleteBeforeRestore: false, + DbName: "test", + Keyspace: "test", + Shard: "-", + StartTime: time.Now(), + RestoreToPos: replication.Position{}, + DryRun: false, + Stats: stats, + MysqlShutdownTimeout: mysqlShutdownTimeout, } manifest := BackupManifest{ @@ -674,3 +667,50 @@ func (fbe *fakeBackupRestoreEnv) setStats(stats *backupstats.FakeStats) { fbe.restoreParams.Stats = nil fbe.stats = nil } + +func TestParseBackupName(t *testing.T) { + // backup name doesn't contain 3 parts + _, _, err := ParseBackupName("dir", "asd.saddsa") + assert.ErrorContains(t, err, "cannot backup name") + + // Invalid time + bt, al, err := ParseBackupName("dir", "2024-03-18.123.tablet_id") + assert.Nil(t, bt) + assert.Nil(t, al) + assert.NoError(t, err) + + // Valid case + bt, al, err = ParseBackupName("dir", "2024-03-18.180911.cell1-42") + assert.NotNil(t, *bt, time.Date(2024, 03, 18, 18, 9, 11, 0, time.UTC)) + assert.Equal(t, "cell1", al.Cell) + assert.Equal(t, uint32(42), al.Uid) + assert.NoError(t, err) +} + +func TestShouldRestore(t *testing.T) { + env := createFakeBackupRestoreEnv(t) + + b, err := ShouldRestore(env.ctx, env.restoreParams) + assert.False(t, b) + assert.Error(t, err) + + env.restoreParams.DeleteBeforeRestore = true + b, err = ShouldRestore(env.ctx, env.restoreParams) + assert.True(t, b) + assert.NoError(t, err) + env.restoreParams.DeleteBeforeRestore = false + + env.mysqld.FetchSuperQueryMap = map[string]*sqltypes.Result{ + "SHOW DATABASES": {Rows: [][]sqltypes.Value{{sqltypes.NewVarBinary("any_db")}}}, + } + b, err = ShouldRestore(env.ctx, env.restoreParams) + assert.NoError(t, err) + assert.True(t, b) + + env.mysqld.FetchSuperQueryMap = map[string]*sqltypes.Result{ + "SHOW DATABASES": {Rows: [][]sqltypes.Value{{sqltypes.NewVarBinary("test")}}}, + } + b, err = ShouldRestore(env.ctx, env.restoreParams) + assert.False(t, b) + assert.NoError(t, err) +} diff --git a/go/vt/mysqlctl/backupengine.go b/go/vt/mysqlctl/backupengine.go index 5a79edbdde0..c483aff3d78 100644 --- a/go/vt/mysqlctl/backupengine.go +++ b/go/vt/mysqlctl/backupengine.go @@ -45,9 +45,17 @@ var ( backupEngineImplementation = builtinBackupEngineName ) +type BackupResult int + +const ( + BackupUnusable BackupResult = iota + BackupEmpty + BackupUsable +) + // BackupEngine is the interface to take a backup with a given engine. type BackupEngine interface { - ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) + ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (BackupResult, error) ShouldDrainForBackup(req *tabletmanagerdatapb.BackupRequest) bool } @@ -77,23 +85,26 @@ type BackupParams struct { Stats backupstats.Stats // UpgradeSafe indicates whether the backup is safe for upgrade and created with innodb_fast_shutdown=0 UpgradeSafe bool + // MysqlShutdownTimeout defines how long we wait during MySQL shutdown if that is part of the backup process. + MysqlShutdownTimeout time.Duration } func (b *BackupParams) Copy() BackupParams { return BackupParams{ - Cnf: b.Cnf, - Mysqld: b.Mysqld, - Logger: b.Logger, - Concurrency: b.Concurrency, - HookExtraEnv: b.HookExtraEnv, - TopoServer: b.TopoServer, - Keyspace: b.Keyspace, - Shard: b.Shard, - TabletAlias: b.TabletAlias, - BackupTime: b.BackupTime, - IncrementalFromPos: b.IncrementalFromPos, - Stats: b.Stats, - UpgradeSafe: b.UpgradeSafe, + Cnf: b.Cnf, + Mysqld: b.Mysqld, + Logger: b.Logger, + Concurrency: b.Concurrency, + HookExtraEnv: b.HookExtraEnv, + TopoServer: b.TopoServer, + Keyspace: b.Keyspace, + Shard: b.Shard, + TabletAlias: b.TabletAlias, + BackupTime: b.BackupTime, + IncrementalFromPos: b.IncrementalFromPos, + Stats: b.Stats, + UpgradeSafe: b.UpgradeSafe, + MysqlShutdownTimeout: b.MysqlShutdownTimeout, } } @@ -130,24 +141,27 @@ type RestoreParams struct { DryRun bool // Stats let's restore engines report detailed restore timings. Stats backupstats.Stats + // MysqlShutdownTimeout defines how long we wait during MySQL shutdown if that is part of the backup process. + MysqlShutdownTimeout time.Duration } func (p *RestoreParams) Copy() RestoreParams { return RestoreParams{ - Cnf: p.Cnf, - Mysqld: p.Mysqld, - Logger: p.Logger, - Concurrency: p.Concurrency, - HookExtraEnv: p.HookExtraEnv, - DeleteBeforeRestore: p.DeleteBeforeRestore, - DbName: p.DbName, - Keyspace: p.Keyspace, - Shard: p.Shard, - StartTime: p.StartTime, - RestoreToPos: p.RestoreToPos, - RestoreToTimestamp: p.RestoreToTimestamp, - DryRun: p.DryRun, - Stats: p.Stats, + Cnf: p.Cnf, + Mysqld: p.Mysqld, + Logger: p.Logger, + Concurrency: p.Concurrency, + HookExtraEnv: p.HookExtraEnv, + DeleteBeforeRestore: p.DeleteBeforeRestore, + DbName: p.DbName, + Keyspace: p.Keyspace, + Shard: p.Shard, + StartTime: p.StartTime, + RestoreToPos: p.RestoreToPos, + RestoreToTimestamp: p.RestoreToTimestamp, + DryRun: p.DryRun, + Stats: p.Stats, + MysqlShutdownTimeout: p.MysqlShutdownTimeout, } } @@ -263,6 +277,9 @@ type IncrementalBackupDetails struct { // their own custom fields by embedding this struct anonymously into their own // custom struct, as long as their custom fields don't have conflicting names. type BackupManifest struct { + // BackupName is the name of the backup, which is also the name of the directory + BackupName string + // BackupMethod is the name of the backup engine that created this backup. // If this is empty, the backup engine is assumed to be "builtin" since that // was the only engine that ever left this field empty. All new backup @@ -402,9 +419,9 @@ func (p *RestorePath) String() string { return sb.String() } -// FindLatestSuccessfulBackup returns the handle and manifest for the last good backup, +// findLatestSuccessfulBackup returns the handle and manifest for the last good backup, // which can be either full or increment -func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs []backupstorage.BackupHandle, excludeBackupName string) (backupstorage.BackupHandle, *BackupManifest, error) { +func findLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs []backupstorage.BackupHandle, excludeBackupName string) (backupstorage.BackupHandle, *BackupManifest, error) { for index := len(bhs) - 1; index >= 0; index-- { bh := bhs[index] if bh.Name() == excludeBackupName { @@ -425,8 +442,8 @@ func FindLatestSuccessfulBackup(ctx context.Context, logger logutil.Logger, bhs return nil, nil, ErrNoCompleteBackup } -// FindLatestSuccessfulBackupPosition returns the position of the last known successful backup -func FindLatestSuccessfulBackupPosition(ctx context.Context, params BackupParams, excludeBackupName string) (backupName string, pos replication.Position, err error) { +// findLatestSuccessfulBackupPosition returns the position of the last known successful backup +func findLatestSuccessfulBackupPosition(ctx context.Context, params BackupParams, excludeBackupName string) (backupName string, pos replication.Position, err error) { bs, err := backupstorage.GetBackupStorage() if err != nil { return "", pos, err @@ -440,7 +457,7 @@ func FindLatestSuccessfulBackupPosition(ctx context.Context, params BackupParams if err != nil { return "", pos, vterrors.Wrap(err, "ListBackups failed") } - bh, manifest, err := FindLatestSuccessfulBackup(ctx, params.Logger, bhs, excludeBackupName) + bh, manifest, err := findLatestSuccessfulBackup(ctx, params.Logger, bhs, excludeBackupName) if err != nil { return "", pos, vterrors.Wrap(err, "FindLatestSuccessfulBackup failed") } @@ -448,6 +465,32 @@ func FindLatestSuccessfulBackupPosition(ctx context.Context, params BackupParams return bh.Name(), pos, nil } +// findBackupPosition returns the position of a given backup, assuming the backup exists. +func findBackupPosition(ctx context.Context, params BackupParams, backupName string) (pos replication.Position, err error) { + bs, err := backupstorage.GetBackupStorage() + if err != nil { + return pos, err + } + defer bs.Close() + + backupDir := GetBackupDir(params.Keyspace, params.Shard) + bhs, err := bs.ListBackups(ctx, backupDir) + if err != nil { + return pos, vterrors.Wrap(err, "ListBackups failed") + } + for _, bh := range bhs { + if bh.Name() != backupName { + continue + } + manifest, err := GetBackupManifest(ctx, bh) + if err != nil { + return pos, vterrors.Wrapf(err, "GetBackupManifest failed for backup: %v", backupName) + } + return manifest.Position, nil + } + return pos, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "could not find backup %q for %s/%s", backupName, params.Keyspace, params.Shard) +} + // FindBackupToRestore returns a path, a sequence of backup handles, to be restored. // The returned handles stand for valid backups with complete manifests. func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backupstorage.BackupHandle) (restorePath *RestorePath, err error) { @@ -549,6 +592,15 @@ func FindBackupToRestore(ctx context.Context, params RestoreParams, bhs []backup return restorePath, nil } +// See https://github.com/mysql/mysql-server/commit/9a940abe085fc75e1ffe7b72286927fdc9f11207 for the +// importance of this specific version and why downgrades within patches are allowed since that version. +var mysql8035 = ServerVersion{Major: 8, Minor: 0, Patch: 35} +var ltsVersions = []ServerVersion{ + {Major: 5, Minor: 7, Patch: 0}, + {Major: 8, Minor: 0, Patch: 0}, + {Major: 8, Minor: 4, Patch: 0}, +} + func validateMySQLVersionUpgradeCompatible(to string, from string, upgradeSafe bool) error { // It's always safe to use the same version. if to == from { @@ -573,6 +625,48 @@ func validateMySQLVersionUpgradeCompatible(to string, from string, upgradeSafe b return nil } + // If we're not on the same LTS stream, we have to do additional checks to see if it's safe to + // to upgrade. It can only be one newer LTS version for the destination and the backup + // has to be marked as upgrade safe. + + // If something is across different LTS streams and not upgrade safe, we can't use it. + if !parsedFrom.isSameRelease(parsedTo) { + if !upgradeSafe { + if parsedTo.atLeast(parsedFrom) { + return fmt.Errorf("running MySQL version %q is newer than backup MySQL version %q which is not safe to upgrade", to, from) + } + return fmt.Errorf("running MySQL version %q is older than backup MySQL version %q", to, from) + } + + // Alright, we're across different LTS streams and the backup is upgrade safe. + // We can only upgrade to the next LTS version. + for i, ltsVersion := range ltsVersions { + if parsedFrom.isSameRelease(ltsVersion) { + if i < len(ltsVersions)-1 && parsedTo.isSameRelease(ltsVersions[i+1]) { + return nil + } + if parsedTo.atLeast(parsedFrom) { + return fmt.Errorf("running MySQL version %q is too new for backup MySQL version %q", to, from) + } + return fmt.Errorf("running MySQL version %q is older than backup MySQL version %q", to, from) + } + } + if parsedTo.atLeast(parsedFrom) { + return fmt.Errorf("running MySQL version %q is newer than backup MySQL version %q which is not safe to upgrade", to, from) + } + return fmt.Errorf("running MySQL version %q is older than backup MySQL version %q", to, from) + } + + // At this point we know the versions are not the same, but we're withing the same version stream + // and only the patch version number mismatches. + + // Starting with MySQL 8.0.35, the data dictionary format is stable for 8.0.x, so we can upgrade + // from 8.0.35 or later here, also if the backup was taken with innodb_fast_shutdown=0. + // This also applies for any version newer like 8.4.x. + if parsedFrom.atLeast(mysql8035) && parsedTo.atLeast(mysql8035) { + return nil + } + if !parsedTo.atLeast(parsedFrom) { return fmt.Errorf("running MySQL version %q is older than backup MySQL version %q", to, from) } @@ -584,10 +678,10 @@ func validateMySQLVersionUpgradeCompatible(to string, from string, upgradeSafe b return fmt.Errorf("running MySQL version %q is newer than backup MySQL version %q which is not safe to upgrade", to, from) } -func prepareToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger) error { +func prepareToRestore(ctx context.Context, cnf *Mycnf, mysqld MysqlDaemon, logger logutil.Logger, mysqlShutdownTimeout time.Duration) error { // shutdown mysqld if it is running logger.Infof("Restore: shutdown mysqld") - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + if err := mysqld.Shutdown(ctx, cnf, true, mysqlShutdownTimeout); err != nil { return err } diff --git a/go/vt/mysqlctl/backupengine_test.go b/go/vt/mysqlctl/backupengine_test.go new file mode 100644 index 00000000000..77a02662460 --- /dev/null +++ b/go/vt/mysqlctl/backupengine_test.go @@ -0,0 +1,215 @@ +package mysqlctl + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestValidateMySQLVersionUpgradeCompatible(t *testing.T) { + // Test that the MySQL version is compatible with the upgrade. + testCases := []struct { + name string + fromVersion string + toVersion string + upgradeSafe bool + error string + }{ + { + name: "upgrade from 5.7 to 8.0", + fromVersion: "mysqld Ver 5.7.35", + toVersion: "mysqld Ver 8.0.23", + upgradeSafe: true, + }, + { + name: "downgrade from 8.0 to 5.7", + fromVersion: "mysqld Ver 8.0.23", + toVersion: "mysqld Ver 5.7.35", + upgradeSafe: true, + error: `running MySQL version "mysqld Ver 5.7.35" is older than backup MySQL version "mysqld Ver 8.0.23"`, + }, + { + name: "upgrade from 5.7 to 8.0", + fromVersion: "mysqld Ver 5.7.35", + toVersion: "mysqld Ver 8.0.23", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 8.0.23" is newer than backup MySQL version "mysqld Ver 5.7.35" which is not safe to upgrade`, + }, + { + name: "downgrade from 8.0 to 5.7", + fromVersion: "mysqld Ver 8.0.23", + toVersion: "mysqld Ver 5.7.35", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 5.7.35" is older than backup MySQL version "mysqld Ver 8.0.23"`, + }, + { + name: "upgrade from 8.0.23 to 8.0.34", + fromVersion: "mysqld Ver 8.0.23", + toVersion: "mysqld Ver 8.0.34", + upgradeSafe: true, + }, + { + name: "downgrade from 8.0.34 to 8.0.23", + fromVersion: "mysqld Ver 8.0.34", + toVersion: "mysqld Ver 8.0.23", + upgradeSafe: true, + error: `running MySQL version "mysqld Ver 8.0.23" is older than backup MySQL version "mysqld Ver 8.0.34"`, + }, + { + name: "upgrade from 8.0.23 to 8.0.34", + fromVersion: "mysqld Ver 8.0.23", + toVersion: "mysqld Ver 8.0.34", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 8.0.34" is newer than backup MySQL version "mysqld Ver 8.0.23" which is not safe to upgrade`, + }, + { + name: "downgrade from 8.0.34 to 8.0.23", + fromVersion: "mysqld Ver 8.0.34", + toVersion: "mysqld Ver 8.0.23", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 8.0.23" is older than backup MySQL version "mysqld Ver 8.0.34"`, + }, + { + name: "upgrade from 8.0.32 to 8.0.36", + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.36", + upgradeSafe: true, + }, + { + name: "downgrade from 8.0.36 to 8.0.32", + fromVersion: "mysqld Ver 8.0.36", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: true, + error: `running MySQL version "mysqld Ver 8.0.32" is older than backup MySQL version "mysqld Ver 8.0.36"`, + }, + { + name: "upgrade from 8.0.32 to 8.0.36", + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.0.36", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 8.0.36" is newer than backup MySQL version "mysqld Ver 8.0.32" which is not safe to upgrade`, + }, + { + name: "downgrade from 8.0.36 to 8.0.32", + fromVersion: "mysqld Ver 8.0.36", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 8.0.32" is older than backup MySQL version "mysqld Ver 8.0.36"`, + }, + { + name: "upgrade from 8.0.35 to 8.0.36", + fromVersion: "mysqld Ver 8.0.35", + toVersion: "mysqld Ver 8.0.36", + upgradeSafe: true, + }, + { + name: "downgrade from 8.0.36 to 8.0.35", + fromVersion: "mysqld Ver 8.0.36", + toVersion: "mysqld Ver 8.0.35", + upgradeSafe: true, + }, + { + name: "upgrade from 8.0.35 to 8.0.36", + fromVersion: "mysqld Ver 8.0.35", + toVersion: "mysqld Ver 8.0.36", + upgradeSafe: false, + }, + { + name: "downgrade from 8.0.36 to 8.0.35", + fromVersion: "mysqld Ver 8.0.36", + toVersion: "mysqld Ver 8.0.35", + upgradeSafe: false, + }, + { + name: "upgrade from 8.4.0 to 8.4.1", + fromVersion: "mysqld Ver 8.4.0", + toVersion: "mysqld Ver 8.4.1", + upgradeSafe: true, + }, + { + name: "downgrade from 8.4.1 to 8.4.0", + fromVersion: "mysqld Ver 8.4.1", + toVersion: "mysqld Ver 8.4.0", + upgradeSafe: true, + }, + { + name: "upgrade from 8.4.0 to 8.4.1", + fromVersion: "mysqld Ver 8.4.0", + toVersion: "mysqld Ver 8.4.1", + upgradeSafe: false, + }, + { + name: "downgrade from 8.4.1 to 8.4.0", + fromVersion: "mysqld Ver 8.4.1", + toVersion: "mysqld Ver 8.4.0", + upgradeSafe: false, + }, + { + name: "upgrade from 8.0.35 to 8.4.0", + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.4.0", + upgradeSafe: true, + }, + { + name: "downgrade from 8.4.0 to 8.0.32", + fromVersion: "mysqld Ver 8.4.0", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: true, + error: `running MySQL version "mysqld Ver 8.0.32" is older than backup MySQL version "mysqld Ver 8.4.0"`, + }, + { + name: "upgrade from 8.0.32 to 8.4.0", + fromVersion: "mysqld Ver 8.0.32", + toVersion: "mysqld Ver 8.4.0", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 8.4.0" is newer than backup MySQL version "mysqld Ver 8.0.32" which is not safe to upgrade`, + }, + { + name: "downgrade from 8.4.0 to 8.0.32", + fromVersion: "mysqld Ver 8.4.0", + toVersion: "mysqld Ver 8.0.32", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 8.0.32" is older than backup MySQL version "mysqld Ver 8.4.0"`, + }, + { + name: "upgrade from 5.7.35 to 8.4.0", + fromVersion: "mysqld Ver 5.7.32", + toVersion: "mysqld Ver 8.4.0", + upgradeSafe: true, + error: `running MySQL version "mysqld Ver 8.4.0" is too new for backup MySQL version "mysqld Ver 5.7.32"`, + }, + { + name: "downgrade from 8.4.0 to 5.7.32", + fromVersion: "mysqld Ver 8.4.0", + toVersion: "mysqld Ver 5.7.32", + upgradeSafe: true, + error: `running MySQL version "mysqld Ver 5.7.32" is older than backup MySQL version "mysqld Ver 8.4.0"`, + }, + { + name: "upgrade from 5.7.32 to 8.4.0", + fromVersion: "mysqld Ver 5.7.32", + toVersion: "mysqld Ver 8.4.0", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 8.4.0" is newer than backup MySQL version "mysqld Ver 5.7.32" which is not safe to upgrade`, + }, + { + name: "downgrade from 8.4.0 to 5.7.32", + fromVersion: "mysqld Ver 8.4.0", + toVersion: "mysqld Ver 5.7.32", + upgradeSafe: false, + error: `running MySQL version "mysqld Ver 5.7.32" is older than backup MySQL version "mysqld Ver 8.4.0"`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := validateMySQLVersionUpgradeCompatible(tc.toVersion, tc.fromVersion, tc.upgradeSafe) + if tc.error == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.error) + } + }) + } + +} diff --git a/go/vt/mysqlctl/binlogs_gtid.go b/go/vt/mysqlctl/binlogs_gtid.go index 3ea48663578..61ca0a87f70 100644 --- a/go/vt/mysqlctl/binlogs_gtid.go +++ b/go/vt/mysqlctl/binlogs_gtid.go @@ -104,8 +104,7 @@ func ChooseBinlogsForIncrementalBackup( // The other thing to validate, is that we can't allow a situation where the backup-GTIDs have entries not covered // by our binary log's Previous-GTIDs (padded with purged GTIDs). Because that means we can't possibly restore to // such position. - prevGTIDsUnionPurged := prevGTIDsUnion.Union(purgedGTIDSet) - if !prevGTIDsUnionPurged.Contains(backupFromGTIDSet) { + if prevGTIDsUnionPurged := prevGTIDsUnion.Union(purgedGTIDSet); !prevGTIDsUnionPurged.Contains(backupFromGTIDSet) { return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "Mismatching GTID entries. Requested backup pos has entries not found in the binary logs, and binary logs have entries not found in the requested backup pos. Neither fully contains the other.\n- Requested pos=%v\n- binlog pos=%v\n- purgedGTIDSet=%v\n- union=%v\n- union purged=%v", backupFromGTIDSet, previousGTIDsPos.GTIDSet, purgedGTIDSet, prevGTIDsUnion, prevGTIDsUnionPurged) @@ -133,7 +132,16 @@ func ChooseBinlogsForIncrementalBackup( } return binaryLogsToBackup, incrementalBackupFromGTID, incrementalBackupToGTID, nil } - return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "no binary logs to backup (increment is empty)") + if prevGTIDsUnion.Union(purgedGTIDSet).Equal(backupFromGTIDSet) { + // This means we've iterated over all binary logs, and as it turns out, the backup pos is + // identical to the Previous-GTIDs of the last binary log. But, we also know that we ourselves + // have flushed the binary logs so as to generate the new (now last) binary log. + // Which means, from the Pos of the backup till the time we issued FLUSH BINARY LOGS, there + // were no new GTID entries. The database was performed no writes during that period, + // so we have no entries to backup and the backup is therefore empty. + return nil, "", "", nil + } + return nil, "", "", vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot find binary logs that cover requested GTID range. backupFromGTIDSet=%v, prevGTIDsUnion=%v", backupFromGTIDSet.String(), prevGTIDsUnion.String()) } // IsValidIncrementalBakcup determines whether the given manifest can be used to extend a backup diff --git a/go/vt/mysqlctl/binlogs_gtid_test.go b/go/vt/mysqlctl/binlogs_gtid_test.go index 655208e908e..ec1c220fd39 100644 --- a/go/vt/mysqlctl/binlogs_gtid_test.go +++ b/go/vt/mysqlctl/binlogs_gtid_test.go @@ -85,13 +85,13 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { name: "last binlog excluded, no binlogs found", previousGTIDs: basePreviousGTIDs, backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-331", - expectError: "no binary logs to backup", + expectBinlogs: nil, }, { name: "backup pos beyond all binlogs", previousGTIDs: basePreviousGTIDs, backupPos: "16b1039f-22b6-11ed-b765-0a43f95f28a3:1-630000", - expectError: "no binary logs to backup", + expectError: "cannot find binary logs that cover requested GTID range", }, { name: "missing GTID entries", @@ -294,13 +294,14 @@ func TestChooseBinlogsForIncrementalBackup(t *testing.T) { return } require.NoError(t, err) - require.NotEmpty(t, binlogsToBackup) assert.Equal(t, tc.expectBinlogs, binlogsToBackup) - if tc.previousGTIDs[binlogsToBackup[0]] != "" { - assert.Equal(t, tc.previousGTIDs[binlogsToBackup[0]], fromGTID) + if len(binlogsToBackup) > 0 { + if tc.previousGTIDs[binlogsToBackup[0]] != "" { + assert.Equal(t, tc.previousGTIDs[binlogsToBackup[0]], fromGTID) + } + assert.Equal(t, tc.previousGTIDs[binlogs[len(binlogs)-1]], toGTID) + assert.NotEqual(t, fromGTID, toGTID) } - assert.Equal(t, tc.previousGTIDs[binlogs[len(binlogs)-1]], toGTID) - assert.NotEqual(t, fromGTID, toGTID) }) } } diff --git a/go/vt/mysqlctl/builtinbackupengine.go b/go/vt/mysqlctl/builtinbackupengine.go index b56f93bfd22..3bc39e45e72 100644 --- a/go/vt/mysqlctl/builtinbackupengine.go +++ b/go/vt/mysqlctl/builtinbackupengine.go @@ -58,7 +58,7 @@ import ( const ( builtinBackupEngineName = "builtin" - autoIncrementalFromPos = "auto" + AutoIncrementalFromPos = "auto" dataDictionaryFile = "mysql.ibd" ) @@ -80,6 +80,12 @@ var ( // engines during backups. The backupstorage may be a physical file, // network, or something else. builtinBackupStorageWriteBufferSize = 2 * 1024 * 1024 /* 2 MiB */ + + // The directory where incremental restore files, namely binlog files, are extracted to. + // In k8s environments, this should be set to a directory that is shared between the vttablet and mysqld pods. + // The path should exist. + // When empty, the default OS temp dir is assumed. + builtinIncrementalRestorePath = "" ) // BuiltinBackupEngine encapsulates the logic of the builtin engine @@ -157,6 +163,7 @@ func registerBuiltinBackupEngineFlags(fs *pflag.FlagSet) { fs.DurationVar(&builtinBackupProgress, "builtinbackup_progress", builtinBackupProgress, "how often to send progress updates when backing up large files.") fs.UintVar(&builtinBackupFileReadBufferSize, "builtinbackup-file-read-buffer-size", builtinBackupFileReadBufferSize, "read files using an IO buffer of this many bytes. Golang defaults are used when set to 0.") fs.UintVar(&builtinBackupFileWriteBufferSize, "builtinbackup-file-write-buffer-size", builtinBackupFileWriteBufferSize, "write files using an IO buffer of this many bytes. Golang defaults are used when set to 0.") + fs.StringVar(&builtinIncrementalRestorePath, "builtinbackup-incremental-restore-path", builtinIncrementalRestorePath, "the directory where incremental restore files, namely binlog files, are extracted to. In k8s environments, this should be set to a directory that is shared between the vttablet and mysqld pods. The path should exist. When empty, the default OS temp dir is assumed.") } // fullPath returns the full path of the entry, based on its type @@ -203,8 +210,8 @@ func (fe *FileEntry) open(cnf *Mycnf, readOnly bool) (*os.File, error) { } // ExecuteBackup runs a backup based on given params. This could be a full or incremental backup. -// The function returns a boolean that indicates if the backup is usable, and an overall error. -func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) { +// The function returns a BackupResult that indicates the usability of the backup, and an overall error. +func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (BackupResult, error) { params.Logger.Infof("Executing Backup at %v for keyspace/shard %v/%v on tablet %v, concurrency: %v, compress: %v, incrementalFromPos: %v", params.BackupTime, params.Keyspace, params.Shard, params.TabletAlias, params.Concurrency, backupStorageCompress, params.IncrementalFromPos) @@ -216,66 +223,63 @@ func (be *BuiltinBackupEngine) ExecuteBackup(ctx context.Context, params BackupP // getIncrementalFromPosGTIDSet turns the given string into a valid Mysql56GTIDSet func getIncrementalFromPosGTIDSet(incrementalFromPos string) (replication.Mysql56GTIDSet, error) { - pos, err := replication.DecodePositionDefaultFlavor(incrementalFromPos, replication.Mysql56FlavorID) + _, gtidSet, err := replication.DecodePositionMySQL56(incrementalFromPos) if err != nil { return nil, vterrors.Wrapf(err, "cannot decode position in incremental backup: %v", incrementalFromPos) } - if !pos.MatchesFlavor(replication.Mysql56FlavorID) { - return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "incremental backup only supports MySQL GTID positions. Got: %v", incrementalFromPos) - } - ifPosGTIDSet, ok := pos.GTIDSet.(replication.Mysql56GTIDSet) - if !ok { - return nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID value: %v", pos) - } - return ifPosGTIDSet, nil + return gtidSet, nil } // executeIncrementalBackup runs an incremental backup, based on given 'incremental_from_pos', which can be: // - A valid position // - "auto", indicating the incremental backup should begin with last successful backup end position. -func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) { +// The function returns a BackupResult that indicates the usability of the backup, and an overall error. +func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (BackupResult, error) { // Collect MySQL status: // UUID serverUUID, err := params.Mysqld.GetServerUUID(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get server uuid") + return BackupUnusable, vterrors.Wrap(err, "can't get server uuid") } mysqlVersion, err := params.Mysqld.GetVersionString(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get MySQL version") + return BackupUnusable, vterrors.Wrap(err, "can't get MySQL version") } + // We now need to figure out the GTIDSet from which we want to take the incremental backup. The user may have + // specified a position, or they may have specified "auto", or they may have specified a backup name, in which + // case we need to find the position of that backup. var fromBackupName string - if params.IncrementalFromPos == autoIncrementalFromPos { + if params.IncrementalFromPos == AutoIncrementalFromPos { + // User has supplied "auto". params.Logger.Infof("auto evaluating incremental_from_pos") - backupName, pos, err := FindLatestSuccessfulBackupPosition(ctx, params, bh.Name()) + backupName, pos, err := findLatestSuccessfulBackupPosition(ctx, params, bh.Name()) if err != nil { - return false, err + return BackupUnusable, err } fromBackupName = backupName params.IncrementalFromPos = replication.EncodePosition(pos) params.Logger.Infof("auto evaluated incremental_from_pos: %s", params.IncrementalFromPos) } - // @@gtid_purged - getPurgedGTIDSet := func() (replication.Position, replication.Mysql56GTIDSet, error) { - gtidPurged, err := params.Mysqld.GetGTIDPurged(ctx) + if _, _, err := replication.DecodePositionMySQL56(params.IncrementalFromPos); err != nil { + // This does not seem to be a valid position. Maybe it's a backup name? + backupName := params.IncrementalFromPos + pos, err := findBackupPosition(ctx, params, backupName) if err != nil { - return gtidPurged, nil, vterrors.Wrap(err, "can't get @@gtid_purged") - } - purgedGTIDSet, ok := gtidPurged.GTIDSet.(replication.Mysql56GTIDSet) - if !ok { - return gtidPurged, nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "cannot get MySQL GTID purged value: %v", gtidPurged) + return BackupUnusable, err } - return gtidPurged, purgedGTIDSet, nil + fromBackupName = backupName + params.IncrementalFromPos = replication.EncodePosition(pos) + params.Logger.Infof("evaluated incremental_from_pos using backup name %q: %s", backupName, params.IncrementalFromPos) } // params.IncrementalFromPos is a string. We want to turn that into a MySQL GTID backupFromGTIDSet, err := getIncrementalFromPosGTIDSet(params.IncrementalFromPos) if err != nil { - return false, err + return BackupUnusable, err } - // OK, we now have the formal MySQL GTID from which we want to take the incremental backip. + // OK, we now have the formal MySQL GTID from which we want to take the incremental backup. // binlogs may not contain information about purged GTIDs. e.g. some binlog.000003 may have // previous GTIDs like 00021324-1111-1111-1111-111111111111:30-60, ie 1-29 range is missing. This can happen @@ -285,18 +289,30 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par // ignore the purged GTIDs: if err := params.Mysqld.FlushBinaryLogs(ctx); err != nil { - return false, vterrors.Wrapf(err, "cannot flush binary logs in incremental backup") + return BackupUnusable, vterrors.Wrapf(err, "cannot flush binary logs in incremental backup") } binaryLogs, err := params.Mysqld.GetBinaryLogs(ctx) if err != nil { - return false, vterrors.Wrapf(err, "cannot get binary logs in incremental backup") + return BackupUnusable, vterrors.Wrapf(err, "cannot get binary logs in incremental backup") + } + + getPurgedGTIDSet := func() (replication.Position, replication.Mysql56GTIDSet, error) { + gtidPurged, err := params.Mysqld.GetGTIDPurged(ctx) + if err != nil { + return gtidPurged, nil, vterrors.Wrap(err, "can't get @@gtid_purged") + } + purgedGTIDSet, ok := gtidPurged.GTIDSet.(replication.Mysql56GTIDSet) + if !ok { + return gtidPurged, nil, vterrors.Errorf(vtrpc.Code_FAILED_PRECONDITION, "failed to parse a valid MySQL GTID set from value: %v", gtidPurged) + } + return gtidPurged, purgedGTIDSet, nil } // gtid_purged is important information. The restore flow uses this info to to complement binary logs' Previous-GTIDs. // It is important to only get gtid_purged _after_ we've rotated into the new binary log, because the `FLUSH BINARY LOGS` // command may also purge old logs, hence affecting the value of gtid_purged. gtidPurged, purgedGTIDSet, err := getPurgedGTIDSet() if err != nil { - return false, err + return BackupUnusable, err } previousGTIDs := map[string]string{} getBinlogPreviousGTIDs := func(ctx context.Context, binlog string) (gtids string, err error) { @@ -314,15 +330,19 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par } binaryLogsToBackup, incrementalBackupFromGTID, incrementalBackupToGTID, err := ChooseBinlogsForIncrementalBackup(ctx, backupFromGTIDSet, purgedGTIDSet, binaryLogs, getBinlogPreviousGTIDs) if err != nil { - return false, vterrors.Wrapf(err, "cannot get binary logs to backup in incremental backup") + return BackupUnusable, vterrors.Wrapf(err, "cannot get binary logs to backup in incremental backup") + } + if len(binaryLogsToBackup) == 0 { + // Empty backup. + return BackupEmpty, nil } incrementalBackupFromPosition, err := replication.ParsePosition(replication.Mysql56FlavorID, incrementalBackupFromGTID) if err != nil { - return false, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupFromGTID) + return BackupUnusable, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupFromGTID) } incrementalBackupToPosition, err := replication.ParsePosition(replication.Mysql56FlavorID, incrementalBackupToGTID) if err != nil { - return false, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupToGTID) + return BackupUnusable, vterrors.Wrapf(err, "cannot parse position %v", incrementalBackupToGTID) } // The backup position is the GTISset of the last binary log (taken from Previous-GTIDs of the one-next binary log), and we // also include gtid_purged ; this complies with the "standard" way MySQL "thinks" about GTIDs: there's gtid_executed, which includes @@ -337,16 +357,16 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par fe := FileEntry{Base: backupBinlogDir, Name: binlogFile} fullPath, err := fe.fullPath(params.Cnf) if err != nil { - return false, err + return BackupUnusable, err } req.BinlogFileNames = append(req.BinlogFileNames, fullPath) } resp, err := params.Mysqld.ReadBinlogFilesTimestamps(ctx, req) if err != nil { - return false, vterrors.Wrapf(err, "reading timestamps from binlog files %v", binaryLogsToBackup) + return BackupUnusable, vterrors.Wrapf(err, "reading timestamps from binlog files %v", binaryLogsToBackup) } if resp.FirstTimestampBinlog == "" || resp.LastTimestampBinlog == "" { - return false, vterrors.Errorf(vtrpc.Code_ABORTED, "empty binlog name in response. Request=%v, Response=%v", req, resp) + return BackupUnusable, vterrors.Errorf(vtrpc.Code_ABORTED, "empty binlog name in response. Request=%v, Response=%v", req, resp) } log.Infof("ReadBinlogFilesTimestampsResponse: %+v", resp) incrDetails := &IncrementalBackupDetails{ @@ -365,14 +385,14 @@ func (be *BuiltinBackupEngine) executeIncrementalBackup(ctx context.Context, par // It is a fact that incrementalBackupFromGTID is earlier or equal to params.IncrementalFromPos. // In the backup manifest file, we document incrementalBackupFromGTID, not the user's requested position. if err := be.backupFiles(ctx, params, bh, incrementalBackupToPosition, gtidPurged, incrementalBackupFromPosition, fromBackupName, binaryLogsToBackup, serverUUID, mysqlVersion, incrDetails); err != nil { - return false, err + return BackupUnusable, err } - return true, nil + return BackupUsable, nil } -// executeFullBackup returns a boolean that indicates if the backup is usable, +// executeFullBackup returns a BackupResult that indicates the usability of the backup, // and an overall error. -func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) { +func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (BackupResult, error) { if params.IncrementalFromPos != "" { return be.executeIncrementalBackup(ctx, params, bh) @@ -384,11 +404,11 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac superReadOnly := true //nolint readOnly := true //nolint var replicationPosition replication.Position - semiSyncSource, semiSyncReplica := params.Mysqld.SemiSyncEnabled() + semiSyncSource, semiSyncReplica := params.Mysqld.SemiSyncEnabled(ctx) // See if we need to restart replication after backup. params.Logger.Infof("getting current replication status") - replicaStatus, err := params.Mysqld.ReplicationStatusWithContext(ctx) + replicaStatus, err := params.Mysqld.ReplicationStatus(ctx) switch err { case nil: replicaStartRequired = replicaStatus.Healthy() && !DisableActiveReparents @@ -396,17 +416,17 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac // keep going if we're the primary, might be a degenerate case sourceIsPrimary = true default: - return false, vterrors.Wrap(err, "can't get replica status") + return BackupUnusable, vterrors.Wrap(err, "can't get replica status") } // get the read-only flag - readOnly, err = params.Mysqld.IsReadOnly() + readOnly, err = params.Mysqld.IsReadOnly(ctx) if err != nil { - return false, vterrors.Wrap(err, "failed to get read_only status") + return BackupUnusable, vterrors.Wrap(err, "failed to get read_only status") } - superReadOnly, err = params.Mysqld.IsSuperReadOnly() + superReadOnly, err = params.Mysqld.IsSuperReadOnly(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get super_read_only status") + return BackupUnusable, vterrors.Wrap(err, "can't get super_read_only status") } log.Infof("Flag values during full backup, read_only: %v, super_read_only:%t", readOnly, superReadOnly) @@ -415,30 +435,30 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac // No need to set read_only because super_read_only will implicitly set read_only to true as well. if !superReadOnly { params.Logger.Infof("Enabling super_read_only on primary prior to backup") - if _, err = params.Mysqld.SetSuperReadOnly(true); err != nil { - return false, vterrors.Wrap(err, "failed to enable super_read_only") + if _, err = params.Mysqld.SetSuperReadOnly(ctx, true); err != nil { + return BackupUnusable, vterrors.Wrap(err, "failed to enable super_read_only") } defer func() { // Resetting super_read_only back to its original value params.Logger.Infof("resetting mysqld super_read_only to %v", superReadOnly) - if _, err := params.Mysqld.SetSuperReadOnly(false); err != nil { + if _, err := params.Mysqld.SetSuperReadOnly(ctx, false); err != nil { log.Error("Failed to set super_read_only back to its original value") } }() } - replicationPosition, err = params.Mysqld.PrimaryPosition() + replicationPosition, err = params.Mysqld.PrimaryPosition(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get position on primary") + return BackupUnusable, vterrors.Wrap(err, "can't get position on primary") } } else { // This is a replica - if err := params.Mysqld.StopReplication(params.HookExtraEnv); err != nil { - return false, vterrors.Wrapf(err, "can't stop replica") + if err := params.Mysqld.StopReplication(ctx, params.HookExtraEnv); err != nil { + return BackupUnusable, vterrors.Wrapf(err, "can't stop replica") } - replicaStatus, err := params.Mysqld.ReplicationStatusWithContext(ctx) + replicaStatus, err := params.Mysqld.ReplicationStatus(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get replica status") + return BackupUnusable, vterrors.Wrap(err, "can't get replica status") } replicationPosition = replicaStatus.Position } @@ -446,48 +466,51 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac gtidPurgedPosition, err := params.Mysqld.GetGTIDPurged(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get gtid_purged") + return BackupUnusable, vterrors.Wrap(err, "can't get gtid_purged") } serverUUID, err := params.Mysqld.GetServerUUID(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get server uuid") + return BackupUnusable, vterrors.Wrap(err, "can't get server uuid") } mysqlVersion, err := params.Mysqld.GetVersionString(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get MySQL version") + return BackupUnusable, vterrors.Wrap(err, "can't get MySQL version") } // check if we need to set innodb_fast_shutdown=0 for a backup safe for upgrades if params.UpgradeSafe { if _, err := params.Mysqld.FetchSuperQuery(ctx, "SET GLOBAL innodb_fast_shutdown=0"); err != nil { - return false, vterrors.Wrapf(err, "failed to disable fast shutdown") + return BackupUnusable, vterrors.Wrapf(err, "failed to disable fast shutdown") } } // shutdown mysqld shutdownCtx, cancel := context.WithTimeout(ctx, BuiltinBackupMysqldTimeout) - err = params.Mysqld.Shutdown(shutdownCtx, params.Cnf, true) + err = params.Mysqld.Shutdown(shutdownCtx, params.Cnf, true, params.MysqlShutdownTimeout) defer cancel() if err != nil { - return false, vterrors.Wrap(err, "can't shutdown mysqld") + return BackupUnusable, vterrors.Wrap(err, "can't shutdown mysqld") } // Backup everything, capture the error. backupErr := be.backupFiles(ctx, params, bh, replicationPosition, gtidPurgedPosition, replication.Position{}, "", nil, serverUUID, mysqlVersion, nil) - usable := backupErr == nil + backupResult := BackupUnusable + if backupErr == nil { + backupResult = BackupUsable + } // Try to restart mysqld, use background context in case we timed out the original context err = params.Mysqld.Start(context.Background(), params.Cnf) if err != nil { - return usable, vterrors.Wrap(err, "can't restart mysqld") + return backupResult, vterrors.Wrap(err, "can't restart mysqld") } // Resetting super_read_only back to its original value params.Logger.Infof("resetting mysqld super_read_only to %v", superReadOnly) - if _, err := params.Mysqld.SetSuperReadOnly(superReadOnly); err != nil { - return usable, err + if _, err := params.Mysqld.SetSuperReadOnly(ctx, superReadOnly); err != nil { + return backupResult, err } // Restore original mysqld state that we saved above. @@ -496,20 +519,20 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac // the plugin isn't even loaded, and the server variables don't exist. params.Logger.Infof("restoring semi-sync settings from before backup: primary=%v, replica=%v", semiSyncSource, semiSyncReplica) - err := params.Mysqld.SetSemiSyncEnabled(semiSyncSource, semiSyncReplica) + err := params.Mysqld.SetSemiSyncEnabled(ctx, semiSyncSource, semiSyncReplica) if err != nil { - return usable, err + return backupResult, err } } if replicaStartRequired { params.Logger.Infof("restarting mysql replication") - if err := params.Mysqld.StartReplication(params.HookExtraEnv); err != nil { - return usable, vterrors.Wrap(err, "cannot restart replica") + if err := params.Mysqld.StartReplication(ctx, params.HookExtraEnv); err != nil { + return backupResult, vterrors.Wrap(err, "cannot restart replica") } // this should be quick, but we might as well just wait - if err := WaitForReplicationStart(params.Mysqld, replicationStartDeadline); err != nil { - return usable, vterrors.Wrap(err, "replica is not restarting") + if err := WaitForReplicationStart(ctx, params.Mysqld, replicationStartDeadline); err != nil { + return backupResult, vterrors.Wrap(err, "replica is not restarting") } // Wait for a reliable value for ReplicationLagSeconds from ReplicationStatus() @@ -527,16 +550,16 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac pos, err := getPrimaryPosition(remoteCtx, tmc, params.TopoServer, params.Keyspace, params.Shard) // If we are unable to get the primary's position, return error. if err != nil { - return usable, err + return backupResult, err } if !replicationPosition.Equal(pos) { for { if err := ctx.Err(); err != nil { - return usable, err + return backupResult, err } - status, err := params.Mysqld.ReplicationStatusWithContext(ctx) + status, err := params.Mysqld.ReplicationStatus(ctx) if err != nil { - return usable, err + return backupResult, err } newPos := status.Position if !newPos.Equal(replicationPosition) { @@ -547,7 +570,7 @@ func (be *BuiltinBackupEngine) executeFullBackup(ctx context.Context, params Bac } } - return usable, backupErr + return backupResult, backupErr } // backupFiles finds the list of files to backup, and creates the backup. @@ -647,6 +670,7 @@ func (be *BuiltinBackupEngine) backupFiles( bm := &builtinBackupManifest{ // Common base fields BackupManifest: BackupManifest{ + BackupName: bh.Name(), BackupMethod: builtinBackupEngineName, Position: backupPosition, PurgedPosition: purgedPosition, @@ -824,6 +848,16 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara var reader io.Reader = br var writer io.Writer = bw + defer func() { + // Close the backupPipe to finish writing on destination. + if err := bw.Close(); err != nil { + createAndCopyErr = errors.Join(createAndCopyErr, vterrors.Wrapf(err, "cannot flush destination: %v", name)) + } + + if err := br.Close(); err != nil { + createAndCopyErr = errors.Join(createAndCopyErr, vterrors.Wrap(err, "failed to close the source reader")) + } + }() // Create the gzip compression pipe, if necessary. if backupStorageCompress { var compressor io.WriteCloser @@ -867,16 +901,7 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara } if err := createAndCopy(); err != nil { - return err - } - - // Close the backupPipe to finish writing on destination. - if err = bw.Close(); err != nil { - return vterrors.Wrapf(err, "cannot flush destination: %v", name) - } - - if err := br.Close(); err != nil { - return vterrors.Wrap(err, "failed to close the source reader") + return errors.Join(finalErr, err) } // Save the hash. @@ -886,7 +911,7 @@ func (be *BuiltinBackupEngine) backupFile(ctx context.Context, params BackupPara // executeRestoreFullBackup restores the files from a full backup. The underlying mysql database service is expected to be stopped. func (be *BuiltinBackupEngine) executeRestoreFullBackup(ctx context.Context, params RestoreParams, bh backupstorage.BackupHandle, bm builtinBackupManifest) error { - if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil { + if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger, params.MysqlShutdownTimeout); err != nil { return err } @@ -981,7 +1006,7 @@ func (be *BuiltinBackupEngine) restoreFiles(ctx context.Context, params RestoreP } if bm.Incremental { - createdDir, err = os.MkdirTemp("", "restore-incremental-*") + createdDir, err = os.MkdirTemp(builtinIncrementalRestorePath, "restore-incremental-*") if err != nil { return "", err } diff --git a/go/vt/mysqlctl/cmd.go b/go/vt/mysqlctl/cmd.go index 5c3bda11437..cd4fd42f181 100644 --- a/go/vt/mysqlctl/cmd.go +++ b/go/vt/mysqlctl/cmd.go @@ -23,12 +23,13 @@ package mysqlctl import ( "fmt" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" ) // CreateMysqldAndMycnf returns a Mysqld and a Mycnf object to use for working with a MySQL // installation that hasn't been set up yet. -func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int) (*Mysqld, *Mycnf, error) { +func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int, collationEnv *collations.Environment) (*Mysqld, *Mycnf, error) { mycnf := NewMycnf(tabletUID, mysqlPort) // Choose a random MySQL server-id, since this is a fresh data dir. // We don't want to use the tablet UID as the MySQL server-id, @@ -46,20 +47,20 @@ func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int) ( mycnf.SocketFile = mysqlSocket } - dbconfigs.GlobalDBConfigs.InitWithSocket(mycnf.SocketFile) + dbconfigs.GlobalDBConfigs.InitWithSocket(mycnf.SocketFile, collationEnv) return NewMysqld(&dbconfigs.GlobalDBConfigs), mycnf, nil } // OpenMysqldAndMycnf returns a Mysqld and a Mycnf object to use for working with a MySQL // installation that already exists. The Mycnf will be built based on the my.cnf file // of the MySQL instance. -func OpenMysqldAndMycnf(tabletUID uint32) (*Mysqld, *Mycnf, error) { +func OpenMysqldAndMycnf(tabletUID uint32, collationEnv *collations.Environment) (*Mysqld, *Mycnf, error) { // We pass a port of 0, this will be read and overwritten from the path on disk - mycnf, err := ReadMycnf(NewMycnf(tabletUID, 0)) + mycnf, err := ReadMycnf(NewMycnf(tabletUID, 0), 0) if err != nil { return nil, nil, fmt.Errorf("couldn't read my.cnf file: %v", err) } - dbconfigs.GlobalDBConfigs.InitWithSocket(mycnf.SocketFile) + dbconfigs.GlobalDBConfigs.InitWithSocket(mycnf.SocketFile, collationEnv) return NewMysqld(&dbconfigs.GlobalDBConfigs), mycnf, nil } diff --git a/go/vt/mysqlctl/compression_test.go b/go/vt/mysqlctl/compression_test.go index 4215761dbe7..16fde00677c 100644 --- a/go/vt/mysqlctl/compression_test.go +++ b/go/vt/mysqlctl/compression_test.go @@ -19,14 +19,13 @@ package mysqlctl import ( "bytes" "context" - "errors" "fmt" "io" - "reflect" "strings" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/logutil" @@ -47,15 +46,8 @@ func TestGetExtensionFromEngine(t *testing.T) { for _, tt := range tests { t.Run(tt.engine, func(t *testing.T) { ext, err := getExtensionFromEngine(tt.engine) - // if err != tt.err { - if !errors.Is(err, tt.err) { - t.Errorf("got err: %v; expected: %v", err, tt.err) - } - // } - - if ext != tt.extension { - t.Errorf("got err: %v; expected: %v", ext, tt.extension) - } + assert.ErrorIs(t, err, tt.err) + assert.Equal(t, tt.extension, ext) }) } } @@ -69,33 +61,20 @@ func TestBuiltinCompressors(t *testing.T) { var compressed, decompressed bytes.Buffer reader := bytes.NewReader(data) compressor, err := newBuiltinCompressor(engine, &compressed, logger) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + _, err = io.Copy(compressor, reader) - if err != nil { - t.Error(err) - return - } + require.NoError(t, err) + compressor.Close() decompressor, err := newBuiltinDecompressor(engine, &compressed, logger) - if err != nil { - t.Error(err) - return - } + require.NoError(t, err) + _, err = io.Copy(&decompressed, decompressor) - if err != nil { - t.Error(err) - return - } - decompressor.Close() - if len(data) != len(decompressed.Bytes()) { - t.Errorf("Different size of original (%d bytes) and uncompressed (%d bytes) data", len(data), len(decompressed.Bytes())) - } + require.NoError(t, err) - if !reflect.DeepEqual(data, decompressed.Bytes()) { - t.Error("decompressed content differs from the original") - } + decompressor.Close() + assert.Equal(t, data, decompressed.Bytes()) }) } } @@ -142,33 +121,20 @@ func TestExternalCompressors(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() compressor, err := newExternalCompressor(ctx, tt.compress, &compressed, logger) - if err != nil { - t.Error(err) - return - } + require.NoError(t, err) + _, err = io.Copy(compressor, reader) - if err != nil { - t.Error(err) - return - } + require.NoError(t, err) + compressor.Close() decompressor, err := newExternalDecompressor(ctx, tt.decompress, &compressed, logger) - if err != nil { - t.Error(err) - return - } + require.NoError(t, err) + _, err = io.Copy(&decompressed, decompressor) - if err != nil { - t.Error(err) - return - } + require.NoError(t, err) + decompressor.Close() - if len(data) != len(decompressed.Bytes()) { - t.Errorf("Different size of original (%d bytes) and uncompressed (%d bytes) data", len(data), len(decompressed.Bytes())) - } - if !reflect.DeepEqual(data, decompressed.Bytes()) { - t.Error("decompressed content differs from the original") - } + assert.Equal(t, data, decompressed.Bytes()) }) } @@ -190,19 +156,13 @@ func TestValidateExternalCmd(t *testing.T) { t.Run(fmt.Sprintf("Test #%d", i+1), func(t *testing.T) { CmdName := tt.cmdName path, err := validateExternalCmd(CmdName) - if tt.path != "" { - if !strings.HasSuffix(path, tt.path) { - t.Errorf("Expected path \"%s\" to include \"%s\"", path, tt.path) - } - } + + assert.Contains(t, path, tt.path) + if tt.errStr == "" { - if err != nil { - t.Errorf("Expected result \"%v\", got \"%v\"", "", err) - } + assert.NoError(t, err) } else { - if !strings.Contains(fmt.Sprintf("%v", err), tt.errStr) { - t.Errorf("Expected result \"%v\", got \"%v\"", tt.errStr, err) - } + assert.ErrorContains(t, err, tt.errStr) } }) } diff --git a/go/vt/mysqlctl/fakebackupengine.go b/go/vt/mysqlctl/fakebackupengine.go index 2b8c3208ac5..d78282e6aff 100644 --- a/go/vt/mysqlctl/fakebackupengine.go +++ b/go/vt/mysqlctl/fakebackupengine.go @@ -41,7 +41,7 @@ type FakeBackupEngineExecuteBackupCall struct { } type FakeBackupEngineExecuteBackupReturn struct { - Ok bool + Res BackupResult Err error } @@ -59,14 +59,14 @@ func (be *FakeBackupEngine) ExecuteBackup( ctx context.Context, params BackupParams, bh backupstorage.BackupHandle, -) (bool, error) { +) (BackupResult, error) { be.ExecuteBackupCalls = append(be.ExecuteBackupCalls, FakeBackupEngineExecuteBackupCall{params, bh}) if be.ExecuteBackupDuration > 0 { time.Sleep(be.ExecuteBackupDuration) } - return be.ExecuteBackupReturn.Ok, be.ExecuteBackupReturn.Err + return be.ExecuteBackupReturn.Res, be.ExecuteBackupReturn.Err } func (be *FakeBackupEngine) ExecuteRestore( diff --git a/go/vt/mysqlctl/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon.go index ee513d63da0..35fb7359fcf 100644 --- a/go/vt/mysqlctl/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon.go @@ -26,11 +26,14 @@ import ( "sync/atomic" "time" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/proto/replicationdata" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" @@ -170,9 +173,9 @@ type FakeMysqlDaemon struct { // FetchSuperQueryResults is used by FetchSuperQuery. FetchSuperQueryMap map[string]*sqltypes.Result - // SemiSyncPrimaryEnabled represents the state of rpl_semi_sync_master_enabled. + // SemiSyncPrimaryEnabled represents the state of rpl_semi_sync_source_enabled. SemiSyncPrimaryEnabled bool - // SemiSyncReplicaEnabled represents the state of rpl_semi_sync_slave_enabled. + // SemiSyncReplicaEnabled represents the state of rpl_semi_sync_replica_enabled. SemiSyncReplicaEnabled bool // TimeoutHook is a func that can be called at the beginning of @@ -196,7 +199,7 @@ func NewFakeMysqlDaemon(db *fakesqldb.DB) *FakeMysqlDaemon { } if db != nil { result.appPool = dbconnpool.NewConnectionPool("AppConnPool", nil, 5, time.Minute, 0, 0) - result.appPool.Open(db.ConnParams()) + result.appPool.Open(dbconfigs.New(db.ConnParams())) } return result } @@ -220,7 +223,7 @@ func (fmd *FakeMysqlDaemon) Start(ctx context.Context, cnf *Mycnf, mysqldArgs .. } // Shutdown is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error { +func (fmd *FakeMysqlDaemon) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool, mysqlShutdownTimeout time.Duration) error { if !fmd.Running { return fmt.Errorf("fake mysql daemon not running") } @@ -267,8 +270,12 @@ func (fmd *FakeMysqlDaemon) Wait(ctx context.Context, cnf *Mycnf) error { return nil } +func (fmd *FakeMysqlDaemon) WaitForDBAGrants(ctx context.Context, waitTime time.Duration) (err error) { + return nil +} + // GetMysqlPort is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) GetMysqlPort() (int32, error) { +func (fmd *FakeMysqlDaemon) GetMysqlPort(ctx context.Context) (int32, error) { if fmd.MysqlPort.Load() == -1 { return 0, fmt.Errorf("FakeMysqlDaemon.GetMysqlPort returns an error") } @@ -293,7 +300,7 @@ func (fmd *FakeMysqlDaemon) CurrentPrimaryPositionLocked(pos replication.Positio } // ReplicationStatus is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) ReplicationStatus() (replication.ReplicationStatus, error) { +func (fmd *FakeMysqlDaemon) ReplicationStatus(ctx context.Context) (replication.ReplicationStatus, error) { if fmd.ReplicationStatusError != nil { return replication.ReplicationStatus{}, fmd.ReplicationStatusError } @@ -314,7 +321,7 @@ func (fmd *FakeMysqlDaemon) ReplicationStatus() (replication.ReplicationStatus, } func (fmd *FakeMysqlDaemon) ReplicationStatusWithContext(ctx context.Context) (replication.ReplicationStatus, error) { - return fmd.ReplicationStatus() + return fmd.ReplicationStatus(ctx) } // PrimaryStatus is part of the MysqlDaemon interface. @@ -328,6 +335,10 @@ func (fmd *FakeMysqlDaemon) PrimaryStatus(ctx context.Context) (replication.Prim }, nil } +func (fmd *FakeMysqlDaemon) ReplicationConfiguration(ctx context.Context) (*replicationdata.Configuration, error) { + return nil, nil +} + // GetGTIDPurged is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) GetGTIDPurged(ctx context.Context) (replication.Position, error) { return replication.Position{}, nil @@ -383,49 +394,49 @@ func (fmd *FakeMysqlDaemon) GetPreviousGTIDs(ctx context.Context, binlog string) } // PrimaryPosition is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) PrimaryPosition() (replication.Position, error) { +func (fmd *FakeMysqlDaemon) PrimaryPosition(ctx context.Context) (replication.Position, error) { return fmd.CurrentPrimaryPosition, nil } // IsReadOnly is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) IsReadOnly() (bool, error) { +func (fmd *FakeMysqlDaemon) IsReadOnly(ctx context.Context) (bool, error) { return fmd.ReadOnly, nil } // IsSuperReadOnly is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) IsSuperReadOnly() (bool, error) { +func (fmd *FakeMysqlDaemon) IsSuperReadOnly(ctx context.Context) (bool, error) { return fmd.SuperReadOnly.Load(), nil } // SetReadOnly is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SetReadOnly(on bool) error { +func (fmd *FakeMysqlDaemon) SetReadOnly(ctx context.Context, on bool) error { fmd.ReadOnly = on return nil } // SetSuperReadOnly is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) { +func (fmd *FakeMysqlDaemon) SetSuperReadOnly(ctx context.Context, on bool) (ResetSuperReadOnlyFunc, error) { fmd.SuperReadOnly.Store(on) fmd.ReadOnly = on return nil, nil } // StartReplication is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) StartReplication(hookExtraEnv map[string]string) error { +func (fmd *FakeMysqlDaemon) StartReplication(ctx context.Context, hookExtraEnv map[string]string) error { if fmd.StartReplicationError != nil { return fmd.StartReplicationError } - return fmd.ExecuteSuperQueryList(context.Background(), []string{ - "START SLAVE", + return fmd.ExecuteSuperQueryList(ctx, []string{ + "START REPLICA", }) } // RestartReplication is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) RestartReplication(hookExtraEnv map[string]string) error { - return fmd.ExecuteSuperQueryList(context.Background(), []string{ - "STOP SLAVE", - "RESET SLAVE", - "START SLAVE", +func (fmd *FakeMysqlDaemon) RestartReplication(ctx context.Context, hookExtraEnv map[string]string) error { + return fmd.ExecuteSuperQueryList(ctx, []string{ + "STOP REPLICA", + "RESET REPLICA", + "START REPLICA", }) } @@ -435,25 +446,25 @@ func (fmd *FakeMysqlDaemon) StartReplicationUntilAfter(ctx context.Context, pos return fmt.Errorf("wrong pos for StartReplicationUntilAfter: expected %v got %v", fmd.SetReplicationPositionPos, pos) } - return fmd.ExecuteSuperQueryList(context.Background(), []string{ - "START SLAVE UNTIL AFTER", + return fmd.ExecuteSuperQueryList(ctx, []string{ + "START REPLICA UNTIL AFTER", }) } // StopReplication is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) StopReplication(hookExtraEnv map[string]string) error { +func (fmd *FakeMysqlDaemon) StopReplication(ctx context.Context, hookExtraEnv map[string]string) error { if fmd.StopReplicationError != nil { return fmd.StopReplicationError } - return fmd.ExecuteSuperQueryList(context.Background(), []string{ - "STOP SLAVE", + return fmd.ExecuteSuperQueryList(ctx, []string{ + "STOP REPLICA", }) } // StopIOThread is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) StopIOThread(ctx context.Context) error { - return fmd.ExecuteSuperQueryList(context.Background(), []string{ - "STOP SLAVE IO_THREAD", + return fmd.ExecuteSuperQueryList(ctx, []string{ + "STOP REPLICA IO_THREAD", }) } @@ -463,12 +474,13 @@ func (fmd *FakeMysqlDaemon) SetReplicationPosition(ctx context.Context, pos repl return fmt.Errorf("wrong pos for SetReplicationPosition: expected %v got %v", fmd.SetReplicationPositionPos, pos) } return fmd.ExecuteSuperQueryList(ctx, []string{ - "FAKE SET SLAVE POSITION", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", }) } // SetReplicationSource is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error { +func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, host string, port int32, heartbeatInterval float64, stopReplicationBefore bool, startReplicationAfter bool) error { input := fmt.Sprintf("%v:%v", host, port) found := false for _, sourceInput := range fmd.SetReplicationSourceInputs { @@ -484,11 +496,11 @@ func (fmd *FakeMysqlDaemon) SetReplicationSource(ctx context.Context, host strin } cmds := []string{} if stopReplicationBefore { - cmds = append(cmds, "STOP SLAVE") + cmds = append(cmds, "STOP REPLICA") } - cmds = append(cmds, "FAKE SET MASTER") + cmds = append(cmds, "FAKE SET SOURCE") if startReplicationAfter { - cmds = append(cmds, "START SLAVE") + cmds = append(cmds, "START REPLICA") } fmd.CurrentSourceHost = host fmd.CurrentSourcePort = port @@ -513,8 +525,13 @@ func (fmd *FakeMysqlDaemon) WaitSourcePos(_ context.Context, pos replication.Pos return fmt.Errorf("wrong input for WaitSourcePos: expected a value in %v got %v", fmd.WaitPrimaryPositions, pos) } +// CatchupToGTID is part of the MysqlDaemon interface. +func (fmd *FakeMysqlDaemon) CatchupToGTID(_ context.Context, pos replication.Position) error { + return nil +} + // Promote is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) Promote(hookExtraEnv map[string]string) (replication.Position, error) { +func (fmd *FakeMysqlDaemon) Promote(ctx context.Context, hookExtraEnv map[string]string) (replication.Position, error) { if fmd.PromoteLag > 0 { time.Sleep(fmd.PromoteLag) } @@ -549,9 +566,9 @@ func (fmd *FakeMysqlDaemon) ExecuteSuperQueryList(ctx context.Context, queryList // Intercept some queries to update our status. switch query { - case "START SLAVE": + case "START REPLICA": fmd.Replicating = true - case "STOP SLAVE": + case "STOP REPLICA": fmd.Replicating = false } } @@ -657,28 +674,28 @@ func (fmd *FakeMysqlDaemon) GetAppConnection(ctx context.Context) (*dbconnpool.P // GetDbaConnection is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) GetDbaConnection(ctx context.Context) (*dbconnpool.DBConnection, error) { - return dbconnpool.NewDBConnection(ctx, fmd.db.ConnParams()) + return dbconnpool.NewDBConnection(ctx, dbconfigs.New(fmd.db.ConnParams())) } // GetAllPrivsConnection is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) GetAllPrivsConnection(ctx context.Context) (*dbconnpool.DBConnection, error) { - return dbconnpool.NewDBConnection(ctx, fmd.db.ConnParams()) + return dbconnpool.NewDBConnection(ctx, dbconfigs.New(fmd.db.ConnParams())) } // SetSemiSyncEnabled is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SetSemiSyncEnabled(primary, replica bool) error { +func (fmd *FakeMysqlDaemon) SetSemiSyncEnabled(ctx context.Context, primary, replica bool) error { fmd.SemiSyncPrimaryEnabled = primary fmd.SemiSyncReplicaEnabled = replica return nil } // SemiSyncEnabled is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SemiSyncEnabled() (primary, replica bool) { +func (fmd *FakeMysqlDaemon) SemiSyncEnabled(ctx context.Context) (primary, replica bool) { return fmd.SemiSyncPrimaryEnabled, fmd.SemiSyncReplicaEnabled } // SemiSyncStatus is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SemiSyncStatus() (bool, bool) { +func (fmd *FakeMysqlDaemon) SemiSyncStatus(ctx context.Context) (bool, bool) { // The fake assumes the status worked. if fmd.SemiSyncPrimaryEnabled { return true, false @@ -687,22 +704,22 @@ func (fmd *FakeMysqlDaemon) SemiSyncStatus() (bool, bool) { } // SemiSyncClients is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SemiSyncClients() uint32 { +func (fmd *FakeMysqlDaemon) SemiSyncClients(ctx context.Context) uint32 { return 0 } // SemiSyncExtensionLoaded is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SemiSyncExtensionLoaded() (bool, error) { - return true, nil +func (fmd *FakeMysqlDaemon) SemiSyncExtensionLoaded(ctx context.Context) (mysql.SemiSyncType, error) { + return mysql.SemiSyncTypeSource, nil } // SemiSyncSettings is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SemiSyncSettings() (timeout uint64, numReplicas uint32) { +func (fmd *FakeMysqlDaemon) SemiSyncSettings(ctx context.Context) (timeout uint64, numReplicas uint32) { return 10000000, 1 } // SemiSyncReplicationStatus is part of the MysqlDaemon interface. -func (fmd *FakeMysqlDaemon) SemiSyncReplicationStatus() (bool, error) { +func (fmd *FakeMysqlDaemon) SemiSyncReplicationStatus(ctx context.Context) (bool, error) { // The fake assumes the status worked. return fmd.SemiSyncReplicaEnabled, nil } diff --git a/go/vt/mysqlctl/grpcmysqlctlclient/client.go b/go/vt/mysqlctl/grpcmysqlctlclient/client.go index 150402a8c44..027f6709eb6 100644 --- a/go/vt/mysqlctl/grpcmysqlctlclient/client.go +++ b/go/vt/mysqlctl/grpcmysqlctlclient/client.go @@ -40,9 +40,10 @@ type client struct { c mysqlctlpb.MysqlCtlClient } -func factory(network, addr string) (mysqlctlclient.MysqlctlClient, error) { +func factory(ctx context.Context, network, addr string) (mysqlctlclient.MysqlctlClient, error) { // create the RPC client - cc, err := grpcclient.Dial( + cc, err := grpcclient.DialContext( + ctx, addr, grpcclient.FailFast(false), grpc.WithTransportCredentials(insecure.NewCredentials()), diff --git a/go/vt/mysqlctl/grpcmysqlctlserver/server.go b/go/vt/mysqlctl/grpcmysqlctlserver/server.go index 84953020534..2a703a50a84 100644 --- a/go/vt/mysqlctl/grpcmysqlctlserver/server.go +++ b/go/vt/mysqlctl/grpcmysqlctlserver/server.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/mysqlctl" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" ) @@ -43,7 +44,14 @@ func (s *server) Start(ctx context.Context, request *mysqlctlpb.StartRequest) (* // Shutdown implements the server side of the MysqlctlClient interface. func (s *server) Shutdown(ctx context.Context, request *mysqlctlpb.ShutdownRequest) (*mysqlctlpb.ShutdownResponse, error) { - return &mysqlctlpb.ShutdownResponse{}, s.mysqld.Shutdown(ctx, s.cnf, request.WaitForMysqld) + timeout, ok, err := protoutil.DurationFromProto(request.MysqlShutdownTimeout) + if err != nil { + return nil, err + } + if !ok { + timeout = mysqlctl.DefaultShutdownTimeout + } + return &mysqlctlpb.ShutdownResponse{}, s.mysqld.Shutdown(ctx, s.cnf, request.WaitForMysqld, timeout) } // RunMysqlUpgrade implements the server side of the MysqlctlClient interface. @@ -56,6 +64,11 @@ func (s *server) ApplyBinlogFile(ctx context.Context, request *mysqlctlpb.ApplyB return &mysqlctlpb.ApplyBinlogFileResponse{}, s.mysqld.ApplyBinlogFile(ctx, request) } +// ReadBinlogFilesTimestamps implements the server side of the MysqlctlClient interface. +func (s *server) ReadBinlogFilesTimestamps(ctx context.Context, request *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) { + return s.mysqld.ReadBinlogFilesTimestamps(ctx, request) +} + // ReinitConfig implements the server side of the MysqlctlClient interface. func (s *server) ReinitConfig(ctx context.Context, request *mysqlctlpb.ReinitConfigRequest) (*mysqlctlpb.ReinitConfigResponse, error) { return &mysqlctlpb.ReinitConfigResponse{}, s.mysqld.ReinitConfig(ctx, s.cnf) diff --git a/go/vt/mysqlctl/mycnf.go b/go/vt/mysqlctl/mycnf.go index 3af6b8e8607..c4ee062348b 100644 --- a/go/vt/mysqlctl/mycnf.go +++ b/go/vt/mysqlctl/mycnf.go @@ -28,8 +28,11 @@ import ( "os" "path" "strconv" + "time" ) +const DefaultShutdownTimeout = 5 * time.Minute + // Mycnf is a memory structure that contains a bunch of interesting // parameters to start mysqld. It can be used to generate standard // my.cnf files from a server id and mysql port. It can also be @@ -112,6 +115,10 @@ type Mycnf struct { Path string // the actual path that represents this mycnf } +const ( + myCnfWaitRetryTime = 100 * time.Millisecond +) + // TabletDir returns the tablet directory. func (cnf *Mycnf) TabletDir() string { return path.Dir(cnf.DataDir) @@ -153,17 +160,27 @@ func normKey(bkey []byte) string { // ReadMycnf will read an existing my.cnf from disk, and update the passed in Mycnf object // with values from the my.cnf on disk. -func ReadMycnf(mycnf *Mycnf) (*Mycnf, error) { +func ReadMycnf(mycnf *Mycnf, waitTime time.Duration) (*Mycnf, error) { f, err := os.Open(mycnf.Path) + if waitTime != 0 { + timer := time.NewTimer(waitTime) + for err != nil { + select { + case <-timer.C: + return nil, err + default: + time.Sleep(myCnfWaitRetryTime) + f, err = os.Open(mycnf.Path) + } + } + } if err != nil { return nil, err } defer f.Close() buf := bufio.NewReader(f) - if err != nil { - return nil, err - } + mycnf.mycnfMap = make(map[string]string) var lval, rval string var parts [][]byte diff --git a/go/vt/mysqlctl/mycnf_flag.go b/go/vt/mysqlctl/mycnf_flag.go index 33a18d69940..8559e5c1431 100644 --- a/go/vt/mysqlctl/mycnf_flag.go +++ b/go/vt/mysqlctl/mycnf_flag.go @@ -17,6 +17,8 @@ limitations under the License. package mysqlctl import ( + "time" + "github.com/spf13/pflag" "vitess.io/vitess/go/vt/log" @@ -51,6 +53,10 @@ var ( flagMycnfFile string ) +const ( + waitForMyCnf = 10 * time.Second +) + // RegisterFlags registers the command line flags for // specifying the values of a mycnf config file. See NewMycnfFromFlags // to get the supported modes. @@ -129,5 +135,5 @@ func NewMycnfFromFlags(uid uint32) (mycnf *Mycnf, err error) { } mycnf = NewMycnf(uid, 0) mycnf.Path = flagMycnfFile - return ReadMycnf(mycnf) + return ReadMycnf(mycnf, waitForMyCnf) } diff --git a/go/vt/mysqlctl/mycnf_gen.go b/go/vt/mysqlctl/mycnf_gen.go index b29d152707f..dd0d6c81c81 100644 --- a/go/vt/mysqlctl/mycnf_gen.go +++ b/go/vt/mysqlctl/mycnf_gen.go @@ -19,11 +19,11 @@ limitations under the License. package mysqlctl import ( - "bytes" "crypto/rand" "fmt" "math/big" "path" + "strings" "text/template" "github.com/spf13/pflag" @@ -54,9 +54,7 @@ const ( innodbLogSubdir = "innodb/logs" ) -var ( - tabletDir string -) +var tabletDir string func init() { for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} { @@ -149,8 +147,8 @@ func (cnf *Mycnf) fillMycnfTemplate(tmplSrc string) (string, error) { if err != nil { return "", err } - mycnfData := new(bytes.Buffer) - err = myTemplate.Execute(mycnfData, cnf) + var mycnfData strings.Builder + err = myTemplate.Execute(&mycnfData, cnf) if err != nil { return "", err } diff --git a/go/vt/mysqlctl/mycnf_test.go b/go/vt/mysqlctl/mycnf_test.go index d422ed899c4..bb3d6611c86 100644 --- a/go/vt/mysqlctl/mycnf_test.go +++ b/go/vt/mysqlctl/mycnf_test.go @@ -19,8 +19,14 @@ package mysqlctl import ( "bytes" "os" - "strings" + "sync" "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/servenv" @@ -29,6 +35,9 @@ import ( var MycnfPath = "/tmp/my.cnf" func TestMycnf(t *testing.T) { + // Remove any my.cnf file if it already exists. + os.Remove(MycnfPath) + uid := uint32(11111) cnf := NewMycnf(uid, 6802) myTemplateSource := new(bytes.Buffer) @@ -39,36 +48,45 @@ func TestMycnf(t *testing.T) { f, _ := os.ReadFile("../../../config/mycnf/default.cnf") myTemplateSource.Write(f) data, err := cnf.makeMycnf(myTemplateSource.String()) - if err != nil { - t.Errorf("err: %v", err) - } else { - t.Logf("data: %v", data) - } - err = os.WriteFile(MycnfPath, []byte(data), 0666) - if err != nil { - t.Errorf("failed creating my.cnf %v", err) - } - _, err = os.ReadFile(MycnfPath) - if err != nil { - t.Errorf("failed reading, err %v", err) - return - } + require.NoError(t, err) + t.Logf("data: %v", data) + + // Since there is no my.cnf file, reading it should fail with a no such file error. mycnf := NewMycnf(uid, 0) mycnf.Path = MycnfPath - mycnf, err = ReadMycnf(mycnf) - if err != nil { - t.Errorf("failed reading, err %v", err) - } else { + _, err = ReadMycnf(mycnf, 0) + require.ErrorContains(t, err, "no such file or directory") + + // Next up we will spawn a go-routine to try and read the cnf file with a timeout. + // We will create the cnf file after some delay and verify that ReadMycnf does wait that long + // and ends up succeeding in reading the my.cnf file. + waitTime := 1 * time.Second + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + defer wg.Done() + startTime := time.Now() + var readErr error + mycnf, readErr = ReadMycnf(mycnf, 1*time.Minute) + require.NoError(t, readErr, "failed reading") t.Logf("socket file %v", mycnf.SocketFile) - } + totalTimeSpent := time.Since(startTime) + require.GreaterOrEqual(t, totalTimeSpent, waitTime) + }() + + time.Sleep(waitTime) + err = os.WriteFile(MycnfPath, []byte(data), 0666) + require.NoError(t, err, "failed creating my.cnf") + _, err = os.ReadFile(MycnfPath) + require.NoError(t, err, "failed reading") + + // Wait for ReadMycnf to finish and then verify that the data read is correct. + wg.Wait() // Tablet UID should be 11111, which determines tablet/data dir. - if got, want := mycnf.DataDir, "/vt_0000011111/"; !strings.Contains(got, want) { - t.Errorf("mycnf.DataDir = %v, want *%v*", got, want) - } + require.Contains(t, mycnf.DataDir, "/vt_0000011111/") // MySQL server-id should be 22222, different from Tablet UID. - if got, want := mycnf.ServerID, uint32(22222); got != want { - t.Errorf("mycnf.ServerID = %v, want %v", got, want) - } + require.EqualValues(t, uint32(22222), mycnf.ServerID) } // Run this test if any changes are made to hook handling / make_mycnf hook @@ -97,55 +115,36 @@ func NoTestMycnfHook(t *testing.T) { // this is not being passed, so it should be nil os.Setenv("MY_VAR", "myvalue") - dbconfigs.GlobalDBConfigs.InitWithSocket(cnf.SocketFile) + dbconfigs.GlobalDBConfigs.InitWithSocket(cnf.SocketFile, collations.MySQL8()) mysqld := NewMysqld(&dbconfigs.GlobalDBConfigs) servenv.OnClose(mysqld.Close) err := mysqld.InitConfig(cnf) - if err != nil { - t.Errorf("err: %v", err) - } + require.NoError(t, err) + _, err = os.ReadFile(cnf.Path) - if err != nil { - t.Errorf("failed reading, err %v", err) - return - } + require.NoError(t, err) + mycnf := NewMycnf(uid, 0) mycnf.Path = cnf.Path - mycnf, err = ReadMycnf(mycnf) + mycnf, err = ReadMycnf(mycnf, 0) if err != nil { t.Errorf("failed reading, err %v", err) } else { t.Logf("socket file %v", mycnf.SocketFile) } // Tablet UID should be 11111, which determines tablet/data dir. - if got, want := mycnf.DataDir, "/vt_0000011111/"; !strings.Contains(got, want) { - t.Errorf("mycnf.DataDir = %v, want *%v*", got, want) - } + assert.Contains(t, mycnf.DataDir, "/vt_0000011111/") + // MySQL server-id should be 22222, different from Tablet UID. - if got, want := mycnf.ServerID, uint32(22222); got != want { - t.Errorf("mycnf.ServerID = %v, want %v", got, want) - } + assert.Equal(t, uint32(22222), mycnf.ServerID) + // check that the env variables we set were passed correctly to the hook - if got, want := mycnf.lookup("KEYSPACE"), "test-messagedb"; got != want { - t.Errorf("Error passing env %v, got %v, want %v", "KEYSPACE", got, want) - } - if got, want := mycnf.lookup("SHARD"), "0"; got != want { - t.Errorf("Error passing env %v, got %v, want %v", "SHARD", got, want) - } - if got, want := mycnf.lookup("TABLET_TYPE"), "PRIMARY"; got != want { - t.Errorf("Error passing env %v, got %v, want %v", "TABLET_TYPE", got, want) - } - if got, want := mycnf.lookup("TABLET_ID"), "11111"; got != want { - t.Errorf("Error passing env %v, got %v, want %v", "TABLET_ID", got, want) - } - if got, want := mycnf.lookup("TABLET_DIR"), "/vt_0000011111"; !strings.Contains(got, want) { - t.Errorf("Error passing env %v, got %v, want %v", "TABLET_DIR", got, want) - } - if got, want := mycnf.lookup("MYSQL_PORT"), "15306"; got != want { - t.Errorf("Error passing env %v, got %v, want %v", "MYSQL_PORT", got, want) - } - if got := mycnf.lookup("MY_VAR"); got != "" { - t.Errorf("Unexpected env %v set to %v", "MY_VAR", got) - } + assert.Equal(t, "test-messagedb", mycnf.lookup("KEYSPACE")) + assert.Equal(t, "test-0", mycnf.lookup("SHARD")) + assert.Equal(t, "PRIMARY", mycnf.lookup("TABLET_TYPE")) + assert.Equal(t, "11111", mycnf.lookup("TABLET_ID")) + assert.Equal(t, "/vt_0000011111", mycnf.lookup("TABLET_DIR")) + assert.Equal(t, "15306", mycnf.lookup("MYSQL_PORT")) + assert.Equal(t, "", mycnf.lookup("MY_VAR")) } diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 0269d16654a..0e7eb5ca359 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -18,11 +18,14 @@ package mysqlctl import ( "context" + "time" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/proto/replicationdata" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" @@ -33,15 +36,16 @@ import ( type MysqlDaemon interface { // methods related to mysql running or not Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...string) error - Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error + Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool, mysqlShutdownTimeout time.Duration) error RunMysqlUpgrade(ctx context.Context) error ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) ReinitConfig(ctx context.Context, cnf *Mycnf) error Wait(ctx context.Context, cnf *Mycnf) error + WaitForDBAGrants(ctx context.Context, waitTime time.Duration) (err error) // GetMysqlPort returns the current port mysql is listening on. - GetMysqlPort() (int32, error) + GetMysqlPort(ctx context.Context) (int32, error) // GetServerID returns the servers ID. GetServerID(ctx context.Context) (uint32, error) @@ -50,22 +54,22 @@ type MysqlDaemon interface { GetServerUUID(ctx context.Context) (string, error) // replication related methods - StartReplication(hookExtraEnv map[string]string) error - RestartReplication(hookExtraEnv map[string]string) error + StartReplication(ctx context.Context, hookExtraEnv map[string]string) error + RestartReplication(ctx context.Context, hookExtraEnv map[string]string) error StartReplicationUntilAfter(ctx context.Context, pos replication.Position) error - StopReplication(hookExtraEnv map[string]string) error + StopReplication(ctx context.Context, hookExtraEnv map[string]string) error StopIOThread(ctx context.Context) error - ReplicationStatus() (replication.ReplicationStatus, error) - ReplicationStatusWithContext(ctx context.Context) (replication.ReplicationStatus, error) + ReplicationStatus(ctx context.Context) (replication.ReplicationStatus, error) PrimaryStatus(ctx context.Context) (replication.PrimaryStatus, error) + ReplicationConfiguration(ctx context.Context) (*replicationdata.Configuration, error) GetGTIDPurged(ctx context.Context) (replication.Position, error) - SetSemiSyncEnabled(source, replica bool) error - SemiSyncEnabled() (source, replica bool) - SemiSyncExtensionLoaded() (bool, error) - SemiSyncStatus() (source, replica bool) - SemiSyncClients() (count uint32) - SemiSyncSettings() (timeout uint64, numReplicas uint32) - SemiSyncReplicationStatus() (bool, error) + SetSemiSyncEnabled(ctx context.Context, source, replica bool) error + SemiSyncEnabled(ctx context.Context) (source, replica bool) + SemiSyncExtensionLoaded(ctx context.Context) (mysql.SemiSyncType, error) + SemiSyncStatus(ctx context.Context) (source, replica bool) + SemiSyncClients(ctx context.Context) (count uint32) + SemiSyncSettings(ctx context.Context) (timeout uint64, numReplicas uint32) + SemiSyncReplicationStatus(ctx context.Context) (bool, error) ResetReplicationParameters(ctx context.Context) error GetBinlogInformation(ctx context.Context) (binlogFormat string, logEnabled bool, logReplicaUpdate bool, binlogRowImage string, err error) GetGTIDMode(ctx context.Context) (gtidMode string, err error) @@ -75,26 +79,26 @@ type MysqlDaemon interface { // reparenting related methods ResetReplication(ctx context.Context) error - PrimaryPosition() (replication.Position, error) - IsReadOnly() (bool, error) - IsSuperReadOnly() (bool, error) - SetReadOnly(on bool) error - SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) + PrimaryPosition(ctx context.Context) (replication.Position, error) + IsReadOnly(ctx context.Context) (bool, error) + IsSuperReadOnly(ctx context.Context) (bool, error) + SetReadOnly(ctx context.Context, on bool) error + SetSuperReadOnly(ctx context.Context, on bool) (ResetSuperReadOnlyFunc, error) SetReplicationPosition(ctx context.Context, pos replication.Position) error - SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error + SetReplicationSource(ctx context.Context, host string, port int32, heartbeatInterval float64, stopReplicationBefore bool, startReplicationAfter bool) error WaitForReparentJournal(ctx context.Context, timeCreatedNS int64) error WaitSourcePos(context.Context, replication.Position) error + CatchupToGTID(context.Context, replication.Position) error // Promote makes the current server the primary. It will not change // the read_only state of the server. - Promote(map[string]string) (replication.Position, error) + Promote(context.Context, map[string]string) (replication.Position, error) // Schema related methods GetSchema(ctx context.Context, dbName string, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) GetColumns(ctx context.Context, dbName, table string) ([]*querypb.Field, []string, error) GetPrimaryKeyColumns(ctx context.Context, dbName, table string) ([]string, error) - GetPrimaryKeyEquivalentColumns(ctx context.Context, dbName, table string) ([]string, string, error) PreflightSchemaChange(ctx context.Context, dbName string, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) ApplySchemaChange(ctx context.Context, dbName string, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) diff --git a/go/vt/mysqlctl/mysqlctlclient/interface.go b/go/vt/mysqlctl/mysqlctlclient/interface.go index 4ab03a9df5b..e6f15b230db 100644 --- a/go/vt/mysqlctl/mysqlctlclient/interface.go +++ b/go/vt/mysqlctl/mysqlctlclient/interface.go @@ -70,7 +70,7 @@ type MysqlctlClient interface { } // Factory functions are registered by client implementations. -type Factory func(network, addr string) (MysqlctlClient, error) +type Factory func(ctx context.Context, network, addr string) (MysqlctlClient, error) var factories = make(map[string]Factory) @@ -83,10 +83,10 @@ func RegisterFactory(name string, factory Factory) { } // New creates a client implementation as specified by a flag. -func New(network, addr string) (MysqlctlClient, error) { +func New(ctx context.Context, network, addr string) (MysqlctlClient, error) { factory, ok := factories[protocol] if !ok { return nil, fmt.Errorf("unknown mysqlctl client protocol: %v", protocol) } - return factory(network, addr) + return factory(ctx, network, addr) } diff --git a/go/vt/mysqlctl/mysqlctlproto/backup_test.go b/go/vt/mysqlctl/mysqlctlproto/backup_test.go index a5e31295a0e..cccbb349aa2 100644 --- a/go/vt/mysqlctl/mysqlctlproto/backup_test.go +++ b/go/vt/mysqlctl/mysqlctlproto/backup_test.go @@ -102,8 +102,6 @@ func TestBackupHandleToProto(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.bh.testname(), func(t *testing.T) { t.Parallel() diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index ee872c214f4..952c0987c82 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -38,31 +38,39 @@ import ( "strconv" "strings" "sync" + "syscall" "time" "github.com/spf13/pflag" - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/protoutil" - "vitess.io/vitess/config" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/dbconnpool" + vtenv "vitess.io/vitess/go/vt/env" "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/mysqlctlclient" + mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" - - vtenv "vitess.io/vitess/go/vt/env" - mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" - "vitess.io/vitess/go/vt/proto/vtrpc" ) -var ( +// The string we expect before the MySQL version number +// in strings containing MySQL version information. +const versionStringPrefix = "Ver " + +// How many bytes from MySQL error log to sample for error messages +const maxLogFileSampleSize = 4096 + +// DbaGrantWaitTime is the amount of time to wait for the grants to have applied +const DbaGrantWaitTime = 10 * time.Second +var ( // DisableActiveReparents is a flag to disable active // reparents for safety reasons. It is used in three places: // 1. in this file to skip registering the commands. @@ -86,15 +94,18 @@ var ( replicationConnectRetry = 10 * time.Second - versionRegex = regexp.MustCompile(`Ver ([0-9]+)\.([0-9]+)\.([0-9]+)`) + versionRegex = regexp.MustCompile(fmt.Sprintf(`%s([0-9]+)\.([0-9]+)\.([0-9]+)`, versionStringPrefix)) + // versionSQLQuery will return a version string directly from + // a MySQL server that is compatible with what we expect from + // mysqld --version and matches the versionRegex. Example + // result: Ver 8.0.35 MySQL Community Server - GPL + versionSQLQuery = fmt.Sprintf("select concat('%s', @@global.version, ' ', @@global.version_comment) as version", + versionStringPrefix) binlogEntryCommittedTimestampRegex = regexp.MustCompile("original_committed_timestamp=([0-9]+)") binlogEntryTimestampGTIDRegexp = regexp.MustCompile(`^#(.+) server id.*\bGTID\b`) ) -// How many bytes from MySQL error log to sample for error messages -const maxLogFileSampleSize = 4096 - // Mysqld is the object that represents a mysqld daemon running on this server. type Mysqld struct { dbcfgs *dbconfigs.DBConfigs @@ -107,15 +118,20 @@ type Mysqld struct { mutex sync.Mutex onTermFuncs []func() cancelWaitCmd chan struct{} + + semiSyncType mysql.SemiSyncType } func init() { for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver"} { servenv.OnParseFor(cmd, registerMySQLDFlags) } - for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver", "vtctld", "vtctldclient"} { + for _, cmd := range []string{"vtctld", "vtctldclient"} { servenv.OnParseFor(cmd, registerReparentFlags) } + for _, cmd := range []string{"vtcombo", "vttablet", "vttestserver"} { + servenv.OnParseFor(cmd, registerDeprecatedReparentFlags) + } for _, cmd := range []string{"mysqlctl", "mysqlctld", "vtcombo", "vttablet", "vttestserver"} { servenv.OnParseFor(cmd, registerPoolFlags) } @@ -132,6 +148,11 @@ func registerReparentFlags(fs *pflag.FlagSet) { fs.BoolVar(&DisableActiveReparents, "disable_active_reparents", DisableActiveReparents, "if set, do not allow active reparents. Use this to protect a cluster using external reparents.") } +func registerDeprecatedReparentFlags(fs *pflag.FlagSet) { + fs.BoolVar(&DisableActiveReparents, "disable_active_reparents", DisableActiveReparents, "if set, do not allow active reparents. Use this to protect a cluster using external reparents.") + fs.MarkDeprecated("disable_active_reparents", "Use --unmanaged flag instead for unmanaged tablets.") +} + func registerPoolFlags(fs *pflag.FlagSet) { fs.IntVar(&dbaPoolSize, "dba_pool_size", dbaPoolSize, "Size of the connection pool for dba connections") fs.DurationVar(&DbaIdleTimeout, "dba_idle_timeout", DbaIdleTimeout, "Idle timeout for dba connections") @@ -244,7 +265,7 @@ func (mysqld *Mysqld) RunMysqlUpgrade(ctx context.Context) error { // Execute as remote action on mysqlctld if requested. if socketFile != "" { log.Infof("executing Mysqld.RunMysqlUpgrade() remotely via mysqlctld server: %v", socketFile) - client, err := mysqlctlclient.New("unix", socketFile) + client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) } @@ -313,7 +334,7 @@ func (mysqld *Mysqld) Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...strin // Execute as remote action on mysqlctld if requested. if socketFile != "" { log.Infof("executing Mysqld.Start() remotely via mysqlctld server: %v", socketFile) - client, err := mysqlctlclient.New("unix", socketFile) + client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) } @@ -321,7 +342,7 @@ func (mysqld *Mysqld) Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...strin return client.Start(ctx, mysqldArgs...) } - if err := mysqld.startNoWait(ctx, cnf, mysqldArgs...); err != nil { + if err := mysqld.startNoWait(cnf, mysqldArgs...); err != nil { return err } @@ -329,7 +350,7 @@ func (mysqld *Mysqld) Start(ctx context.Context, cnf *Mycnf, mysqldArgs ...strin } // startNoWait is the internal version of Start, and it doesn't wait. -func (mysqld *Mysqld) startNoWait(ctx context.Context, cnf *Mycnf, mysqldArgs ...string) error { +func (mysqld *Mysqld) startNoWait(cnf *Mycnf, mysqldArgs ...string) error { var name string ts := fmt.Sprintf("Mysqld.Start(%v)", time.Now().Unix()) @@ -355,6 +376,13 @@ func (mysqld *Mysqld) startNoWait(ctx context.Context, cnf *Mycnf, mysqldArgs .. if err != nil { return err } + // If we're here, and the lockfile still exists for the socket, we have + // to clean that up since we know at this point we need to start MySQL. + // Having this stray lock file present means MySQL fails to start. This + // only happens when running without mysqld_safe. + if err := cleanupLockfile(cnf.SocketFile, ts); err != nil { + return err + } } mysqlBaseDir, err := vtenv.VtMysqlBaseDir() if err != nil { @@ -396,7 +424,7 @@ func (mysqld *Mysqld) startNoWait(ctx context.Context, cnf *Mycnf, mysqldArgs .. }() err = cmd.Start() if err != nil { - return err + return vterrors.Wrapf(err, "failed to start mysqld") } mysqld.mutex.Lock() @@ -426,6 +454,58 @@ func (mysqld *Mysqld) startNoWait(ctx context.Context, cnf *Mycnf, mysqldArgs .. return nil } +func cleanupLockfile(socket string, ts string) error { + lockPath := fmt.Sprintf("%s.lock", socket) + pid, err := os.ReadFile(lockPath) + if errors.Is(err, os.ErrNotExist) { + log.Infof("%v: no stale lock file at %s", ts, lockPath) + // If there's no lock file, we can early return here, nothing + // to clean up then. + return nil + } else if err != nil { + log.Errorf("%v: error checking if lock file exists: %v", ts, err) + // Any other errors here are unexpected. + return err + } + p, err := strconv.Atoi(string(bytes.TrimSpace(pid))) + if err != nil { + log.Errorf("%v: error parsing pid from lock file: %v", ts, err) + return err + } + if os.Getpid() == p { + log.Infof("%v: lock file at %s is ours, removing it", ts, lockPath) + return os.Remove(lockPath) + } + proc, err := os.FindProcess(p) + if err != nil { + log.Errorf("%v: error finding process: %v", ts, err) + return err + } + err = proc.Signal(syscall.Signal(0)) + if err == nil { + // If the process still exists, it's not safe to + // remove the lock file, so we have to keep it around. + cmdline, err := os.ReadFile(fmt.Sprintf("/proc/%d/cmdline", p)) + if err == nil { + name := string(bytes.ReplaceAll(cmdline, []byte{0}, []byte(" "))) + log.Errorf("%v: not removing socket lock file: %v with pid %v for %q", ts, lockPath, p, name) + } else { + log.Errorf("%v: not removing socket lock file: %v with pid %v (failed to read process name: %v)", ts, lockPath, p, err) + } + return fmt.Errorf("process %v is still running", p) + } + if !errors.Is(err, os.ErrProcessDone) { + // Any errors except for the process being done + // is unexpected here. + log.Errorf("%v: error checking process %v: %v", ts, p, err) + return err + } + + // All good, process is gone and we can safely clean up the lock file. + log.Infof("%v: removing stale socket lock file: %v", ts, lockPath) + return os.Remove(lockPath) +} + // Wait returns nil when mysqld is up and accepting connections. It // will use the dba credentials to try to connect. Use wait() with // different credentials if needed. @@ -438,6 +518,44 @@ func (mysqld *Mysqld) Wait(ctx context.Context, cnf *Mycnf) error { return mysqld.wait(ctx, cnf, params) } +// WaitForDBAGrants waits for the grants to have applied for all the users. +func (mysqld *Mysqld) WaitForDBAGrants(ctx context.Context, waitTime time.Duration) (err error) { + if waitTime == 0 { + return nil + } + params, err := mysqld.dbcfgs.DbaConnector().MysqlParams() + if err != nil { + return err + } + timer := time.NewTimer(waitTime) + ctx, cancel := context.WithTimeout(ctx, waitTime) + defer cancel() + for { + conn, connErr := mysql.Connect(ctx, params) + if connErr == nil { + res, fetchErr := conn.ExecuteFetch("SHOW GRANTS", 1000, false) + conn.Close() + if fetchErr != nil { + log.Errorf("Error running SHOW GRANTS - %v", fetchErr) + } + if fetchErr == nil && res != nil && len(res.Rows) > 0 && len(res.Rows[0]) > 0 { + privileges := res.Rows[0][0].ToString() + // In MySQL 8.0, all the privileges are listed out explicitly, so we can search for SUPER in the output. + // In MySQL 5.7, all the privileges are not listed explicitly, instead ALL PRIVILEGES is written, so we search for that too. + if strings.Contains(privileges, "SUPER") || strings.Contains(privileges, "ALL PRIVILEGES") { + return nil + } + } + } + select { + case <-timer.C: + return fmt.Errorf("timed out after %v waiting for the dba user to have the required permissions", waitTime) + default: + time.Sleep(100 * time.Millisecond) + } + } +} + // wait is the internal version of Wait, that takes credentials. func (mysqld *Mysqld) wait(ctx context.Context, cnf *Mycnf, params *mysql.ConnParams) error { log.Infof("Waiting for mysqld socket file (%v) to be ready...", cnf.SocketFile) @@ -472,13 +590,13 @@ func (mysqld *Mysqld) wait(ctx context.Context, cnf *Mycnf, params *mysql.ConnPa // flushed - on the order of 20-30 minutes. // // If a mysqlctld address is provided in a flag, Shutdown will run remotely. -func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool) error { +func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bool, shutdownTimeout time.Duration) error { log.Infof("Mysqld.Shutdown") // Execute as remote action on mysqlctld if requested. if socketFile != "" { log.Infof("executing Mysqld.Shutdown() remotely via mysqlctld server: %v", socketFile) - client, err := mysqlctlclient.New("unix", socketFile) + client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) } @@ -531,7 +649,7 @@ func (mysqld *Mysqld) Shutdown(ctx context.Context, cnf *Mycnf, waitForMysqld bo defer os.Remove(cnf) args := []string{ "--defaults-extra-file=" + cnf, - "--shutdown-timeout=300", + fmt.Sprintf("--shutdown-timeout=%d", int(shutdownTimeout.Seconds())), "--connect-timeout=30", "--wait=10", "shutdown", @@ -642,7 +760,7 @@ func (mysqld *Mysqld) Init(ctx context.Context, cnf *Mycnf, initDBSQLFile string // Start mysqld. We do not use Start, as we have to wait using // the root user. - if err = mysqld.startNoWait(ctx, cnf); err != nil { + if err = mysqld.startNoWait(cnf); err != nil { log.Errorf("failed starting mysqld: %v\n%v", err, readTailOfMysqldErrorLog(cnf.ErrorLogPath)) return err } @@ -780,7 +898,7 @@ func (mysqld *Mysqld) initConfig(cnf *Mycnf, outFile string) error { return err } - return os.WriteFile(outFile, []byte(configData), 0664) + return os.WriteFile(outFile, []byte(configData), 0o664) } func (mysqld *Mysqld) getMycnfTemplate() string { @@ -791,7 +909,7 @@ func (mysqld *Mysqld) getMycnfTemplate() string { } return string(data) // use only specified template } - myTemplateSource := new(bytes.Buffer) + var myTemplateSource strings.Builder myTemplateSource.WriteString("[mysqld]\n") myTemplateSource.WriteString(config.MycnfDefault) @@ -812,7 +930,13 @@ func (mysqld *Mysqld) getMycnfTemplate() string { log.Infof("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version) } case 8: - versionConfig = config.MycnfMySQL80 + if mysqld.capabilities.version.Minor >= 4 { + versionConfig = config.MycnfMySQL84 + } else if mysqld.capabilities.version.Minor >= 1 || mysqld.capabilities.version.Patch >= 26 { + versionConfig = config.MycnfMySQL8026 + } else { + versionConfig = config.MycnfMySQL80 + } default: log.Infof("this version of Vitess does not include built-in support for %v %v", mysqld.capabilities.flavor, mysqld.capabilities.version) } @@ -849,7 +973,7 @@ func (mysqld *Mysqld) RefreshConfig(ctx context.Context, cnf *Mycnf) error { // Execute as remote action on mysqlctld if requested. if socketFile != "" { log.Infof("executing Mysqld.RefreshConfig() remotely via mysqlctld server: %v", socketFile) - client, err := mysqlctlclient.New("unix", socketFile) + client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) } @@ -907,7 +1031,7 @@ func (mysqld *Mysqld) ReinitConfig(ctx context.Context, cnf *Mycnf) error { // Execute as remote action on mysqlctld if requested. if socketFile != "" { log.Infof("executing Mysqld.ReinitConfig() remotely via mysqlctld server: %v", socketFile) - client, err := mysqlctlclient.New("unix", socketFile) + client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) } @@ -974,9 +1098,9 @@ func (mysqld *Mysqld) createTopDir(cnf *Mycnf, dir string) error { } // Teardown will shutdown the running daemon, and delete the root directory. -func (mysqld *Mysqld) Teardown(ctx context.Context, cnf *Mycnf, force bool) error { +func (mysqld *Mysqld) Teardown(ctx context.Context, cnf *Mycnf, force bool, shutdownTimeout time.Duration) error { log.Infof("mysqlctl.Teardown") - if err := mysqld.Shutdown(ctx, cnf, true); err != nil { + if err := mysqld.Shutdown(ctx, cnf, true, shutdownTimeout); err != nil { log.Warningf("failed mysqld shutdown: %v", err.Error()) if !force { return err @@ -1136,15 +1260,22 @@ func buildLdPaths() ([]string, error) { // GetVersionString is part of the MysqlExecutor interface. func (mysqld *Mysqld) GetVersionString(ctx context.Context) (string, error) { - // Execute as remote action on mysqlctld to ensure we get the actual running MySQL version. + // Try to query the mysqld instance directly. + qr, err := mysqld.FetchSuperQuery(ctx, versionSQLQuery) + if err == nil && len(qr.Rows) == 1 { + return qr.Rows[0][0].ToString(), nil + } + // Execute as remote action on mysqlctld to use the actual running MySQL + // version. if socketFile != "" { - client, err := mysqlctlclient.New("unix", socketFile) + client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return "", fmt.Errorf("can't dial mysqlctld: %v", err) } defer client.Close() return client.VersionString(ctx) } + // Fall back to the sys exec method using mysqld --version. return GetVersionString() } @@ -1166,7 +1297,7 @@ func (mysqld *Mysqld) GetVersionComment(ctx context.Context) (string, error) { func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.ApplyBinlogFileRequest) error { if socketFile != "" { log.Infof("executing Mysqld.ApplyBinlogFile() remotely via mysqlctld server: %v", socketFile) - client, err := mysqlctlclient.New("unix", socketFile) + client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return fmt.Errorf("can't dial mysqlctld: %v", err) } @@ -1185,11 +1316,18 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.Apply if err != nil { return err } + var mysqlbinlogErrFile *os.File { name, err := binaryPath(dir, "mysqlbinlog") if err != nil { return err } + mysqlbinlogErrFile, err = os.CreateTemp("", "err-mysqlbinlog-") + if err != nil { + return err + } + defer os.Remove(mysqlbinlogErrFile.Name()) + args := []string{} if gtids := req.BinlogRestorePosition; gtids != "" { args = append(args, @@ -1209,7 +1347,8 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.Apply mysqlbinlogCmd = exec.Command(name, args...) mysqlbinlogCmd.Dir = dir mysqlbinlogCmd.Env = env - log.Infof("ApplyBinlogFile: running mysqlbinlog command: %#v", mysqlbinlogCmd) + mysqlbinlogCmd.Stderr = mysqlbinlogErrFile + log.Infof("ApplyBinlogFile: running mysqlbinlog command: %#v with errfile=%v", mysqlbinlogCmd, mysqlbinlogErrFile.Name()) pipe, err = mysqlbinlogCmd.StdoutPipe() // to be piped into mysql if err != nil { return err @@ -1244,7 +1383,7 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.Apply // parameters. We do it blindly, since this will fail on MariaDB, which doesn't // have super_read_only This is safe, since we're restarting MySQL after the restore anyway log.Infof("ApplyBinlogFile: disabling super_read_only") - resetFunc, err := mysqld.SetSuperReadOnly(false) + resetFunc, err := mysqld.SetSuperReadOnly(ctx, false) if err != nil { if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { log.Warningf("ApplyBinlogFile: server does not know about super_read_only, continuing anyway...") @@ -1279,6 +1418,12 @@ func (mysqld *Mysqld) ApplyBinlogFile(ctx context.Context, req *mysqlctlpb.Apply } // Wait for both to complete: if err := mysqlbinlogCmd.Wait(); err != nil { + if mysqlbinlogErrFile != nil { + errFileContent, _ := os.ReadFile(mysqlbinlogErrFile.Name()) + if len(errFileContent) > 0 { + err = vterrors.Wrapf(err, "with error output: %s", string(errFileContent)) + } + } return vterrors.Wrapf(err, "mysqlbinlog command failed") } if err := mysqlCmd.Wait(); err != nil { @@ -1379,11 +1524,11 @@ func (mysqld *Mysqld) scanBinlogTimestamp( // ReadBinlogFilesTimestamps reads all given binlog files via `mysqlbinlog` command and returns the first and last found transaction timestamps func (mysqld *Mysqld) ReadBinlogFilesTimestamps(ctx context.Context, req *mysqlctlpb.ReadBinlogFilesTimestampsRequest) (*mysqlctlpb.ReadBinlogFilesTimestampsResponse, error) { if len(req.BinlogFileNames) == 0 { - return nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "empty binlog list in ReadBinlogFilesTimestampsRequest") + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "empty binlog list in ReadBinlogFilesTimestampsRequest") } if socketFile != "" { log.Infof("executing Mysqld.ReadBinlogFilesTimestamps() remotely via mysqlctld server: %v", socketFile) - client, err := mysqlctlclient.New("unix", socketFile) + client, err := mysqlctlclient.New(ctx, "unix", socketFile) if err != nil { return nil, fmt.Errorf("can't dial mysqlctld: %v", err) } diff --git a/go/vt/mysqlctl/mysqld_test.go b/go/vt/mysqlctl/mysqld_test.go index 435090008f2..cc31206aa0c 100644 --- a/go/vt/mysqlctl/mysqld_test.go +++ b/go/vt/mysqlctl/mysqld_test.go @@ -17,11 +17,19 @@ limitations under the License. package mysqlctl import ( + "context" + "os" + "strconv" + "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" ) type testcase struct { @@ -172,3 +180,130 @@ func TestParseBinlogEntryTimestamp(t *testing.T) { }) } } + +func TestCleanupLockfile(t *testing.T) { + t.Cleanup(func() { + os.Remove("mysql.sock.lock") + }) + ts := "prefix" + // All good if no lockfile exists + assert.NoError(t, cleanupLockfile("mysql.sock", ts)) + + // If lockfile exists, but the process is not found, we clean it up. + os.WriteFile("mysql.sock.lock", []byte("123456789"), 0o600) + assert.NoError(t, cleanupLockfile("mysql.sock", ts)) + assert.NoFileExists(t, "mysql.sock.lock") + + // If lockfile exists, but the process is not found, we clean it up. + os.WriteFile("mysql.sock.lock", []byte("123456789\n"), 0o600) + assert.NoError(t, cleanupLockfile("mysql.sock", ts)) + assert.NoFileExists(t, "mysql.sock.lock") + + // If the lockfile exists, and the process is found, but it's for ourselves, + // we clean it up. + os.WriteFile("mysql.sock.lock", []byte(strconv.Itoa(os.Getpid())), 0o600) + assert.NoError(t, cleanupLockfile("mysql.sock", ts)) + assert.NoFileExists(t, "mysql.sock.lock") + + // If the lockfile exists, and the process is found, we don't clean it up. + os.WriteFile("mysql.sock.lock", []byte(strconv.Itoa(os.Getppid())), 0o600) + assert.Error(t, cleanupLockfile("mysql.sock", ts)) + assert.FileExists(t, "mysql.sock.lock") +} + +func TestRunMysqlUpgrade(t *testing.T) { + ver, err := GetVersionString() + require.NoError(t, err) + if strings.Contains(ver, "5.7") { + t.Skipf("Run the test only for 8.0") + } + + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + err = testMysqld.RunMysqlUpgrade(ctx) + assert.NoError(t, err) + + // Should not fail for older versions + testMysqld.capabilities = newCapabilitySet(FlavorMySQL, ServerVersion{Major: 8, Minor: 0, Patch: 15}) + err = testMysqld.RunMysqlUpgrade(ctx) + assert.NoError(t, err) +} + +func TestGetDbaConnection(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + + conn, err := testMysqld.GetDbaConnection(ctx) + assert.NoError(t, err) + assert.NoError(t, conn.Ping()) + defer conn.Close() +} + +func TestGetVersionString(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + str, err := testMysqld.GetVersionString(ctx) + assert.NoError(t, err) + assert.NotEmpty(t, str) + + ver := "test_version" + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery(versionSQLQuery, sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), ver)) + + str, err = testMysqld.GetVersionString(ctx) + assert.Equal(t, ver, str) + assert.NoError(t, err) +} + +func TestGetVersionComment(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("select @@global.version_comment", sqltypes.MakeTestResult(sqltypes.MakeTestFields("@@global.version_comment", "varchar"), "test_version1", "test_version2")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + _, err := testMysqld.GetVersionComment(ctx) + assert.ErrorContains(t, err, "unexpected result length") + + ver := "test_version" + db.AddQuery("select @@global.version_comment", sqltypes.MakeTestResult(sqltypes.MakeTestFields("@@global.version_comment", "varchar"), ver)) + + str, err := testMysqld.GetVersionComment(ctx) + assert.NoError(t, err) + assert.Equal(t, ver, str) +} diff --git a/go/vt/mysqlctl/permissions_test.go b/go/vt/mysqlctl/permissions_test.go new file mode 100644 index 00000000000..5a8954fac15 --- /dev/null +++ b/go/vt/mysqlctl/permissions_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctl + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/sqltypes" +) + +func TestGetPermissions(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + testMysqld := NewFakeMysqlDaemon(db) + defer testMysqld.Close() + + testMysqld.FetchSuperQueryMap = map[string]*sqltypes.Result{ + "SELECT * FROM mysql.user ORDER BY host, user": sqltypes.MakeTestResult(sqltypes.MakeTestFields("host|user", "varchar|varchar"), "test_host1|test_user1", "test_host2|test_user2"), + "SELECT * FROM mysql.db ORDER BY host, db, user": sqltypes.MakeTestResult(sqltypes.MakeTestFields("host|user|db", "varchar|varchar|varchar"), "test_host1|test_user1|test_db1", "test_host2|test_user2|test_db2"), + } + + per, err := GetPermissions(testMysqld) + assert.NoError(t, err) + assert.Len(t, per.DbPermissions, 2) + assert.Len(t, per.UserPermissions, 2) +} diff --git a/go/vt/mysqlctl/query.go b/go/vt/mysqlctl/query.go index e4b26a1607b..154ed132062 100644 --- a/go/vt/mysqlctl/query.go +++ b/go/vt/mysqlctl/query.go @@ -249,6 +249,8 @@ func (mysqld *Mysqld) fetchStatuses(ctx context.Context, pattern string) (map[st } const ( + sourcePasswordStart = " SOURCE_PASSWORD = '" + sourcePasswordEnd = "',\n" masterPasswordStart = " MASTER_PASSWORD = '" masterPasswordEnd = "',\n" passwordStart = " PASSWORD = '" @@ -256,7 +258,17 @@ const ( ) func redactPassword(input string) string { - i := strings.Index(input, masterPasswordStart) + i := strings.Index(input, sourcePasswordStart) + // We have primary password in the query, try to redact it + if i != -1 { + j := strings.Index(input[i+len(sourcePasswordStart):], sourcePasswordEnd) + if j == -1 { + return input + } + input = input[:i+len(sourcePasswordStart)] + strings.Repeat("*", 4) + input[i+len(masterPasswordStart)+j:] + } + + i = strings.Index(input, masterPasswordStart) // We have primary password in the query, try to redact it if i != -1 { j := strings.Index(input[i+len(masterPasswordStart):], masterPasswordEnd) diff --git a/go/vt/mysqlctl/redo_log_test.go b/go/vt/mysqlctl/redo_log_test.go new file mode 100644 index 00000000000..ae2005bdc51 --- /dev/null +++ b/go/vt/mysqlctl/redo_log_test.go @@ -0,0 +1,52 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctl + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" +) + +func TestProcessCanDisableRedoLog(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SELECT variable_value FROM performance_schema.global_status WHERE variable_name = 'innodb_redo_log_enabled'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1", "varchar"), "val1")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + res, err := testMysqld.ProcessCanDisableRedoLog(context.Background()) + assert.NoError(t, err) + assert.True(t, res) + + db.AddQuery("SELECT variable_value FROM performance_schema.global_status WHERE variable_name = 'innodb_redo_log_enabled'", &sqltypes.Result{}) + res, err = testMysqld.ProcessCanDisableRedoLog(context.Background()) + assert.Error(t, err) + assert.False(t, res) +} diff --git a/go/vt/mysqlctl/reparent.go b/go/vt/mysqlctl/reparent.go index 0cd89c59ab3..08326390f97 100644 --- a/go/vt/mysqlctl/reparent.go +++ b/go/vt/mysqlctl/reparent.go @@ -85,8 +85,7 @@ func (mysqld *Mysqld) WaitForReparentJournal(ctx context.Context, timeCreatedNS } // Promote will promote this server to be the new primary. -func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (replication.Position, error) { - ctx := context.TODO() +func (mysqld *Mysqld) Promote(ctx context.Context, hookExtraEnv map[string]string) (replication.Position, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return replication.Position{}, err @@ -96,7 +95,7 @@ func (mysqld *Mysqld) Promote(hookExtraEnv map[string]string) (replication.Posit // Since we handle replication, just stop it. cmds := []string{ conn.Conn.StopReplicationCommand(), - "RESET SLAVE ALL", // "ALL" makes it forget primary host:port. + conn.Conn.ResetReplicationCommand(), // When using semi-sync and GTID, a replica first connects to the new primary with a given GTID set, // it can take a long time to scan the current binlog file to find the corresponding position. // This can cause commits that occur soon after the primary is promoted to take a long time waiting diff --git a/go/vt/mysqlctl/reparent_test.go b/go/vt/mysqlctl/reparent_test.go new file mode 100644 index 00000000000..d882d594233 --- /dev/null +++ b/go/vt/mysqlctl/reparent_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysqlctl + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/logutil" +) + +func TestPopulateReparentJournal(t *testing.T) { + input := `MySQL replica position: filename 'vt-0476396352-bin.000005', position '310088991', GTID of the last change '145e508e-ae54-11e9-8ce6-46824dd1815e:1-3, + 1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3, + 47b59de1-b368-11e9-b48b-624401d35560:1-152981, + 557def0a-b368-11e9-84ed-f6fffd91cc57:1-3, + 599ef589-ae55-11e9-9688-ca1f44501925:1-14857169, + b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262' + MySQL replica binlog position: master host '10.128.0.43', purge list '145e508e-ae54-11e9-8ce6-46824dd1815e:1-3, 1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3, 47b59de1-b368-11e9-b48b-624401d35560:1-152981, 557def0a-b368-11e9-84ed-f6fffd91cc57:1-3, 599ef589-ae55-11e9-9688-ca1f44501925:1-14857169, b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262', channel name: '' + + 190809 00:15:44 [00] Streaming + 190809 00:15:44 [00] ...done + 190809 00:15:44 [00] Streaming + 190809 00:15:44 [00] ...done + xtrabackup: Transaction log of lsn (405344842034) to (406364859653) was copied. + 190809 00:16:14 completed OK!` + + pos, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger()) + require.NoError(t, err) + + res := PopulateReparentJournal(1, "action", "primaryAlias", pos) + want := `INSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES (1, 'action', 'primaryAlias', 'MySQL56/145e508e-ae54-11e9-8ce6-46824dd1815e:1-3,1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3,47b59de1-b368-11e9-b48b-624401d35560:1-152981,557def0a-b368-11e9-84ed-f6fffd91cc57:1-3,599ef589-ae55-11e9-9688-ca1f44501925:1-14857169,b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262')` + assert.Equal(t, want, res) +} + +func TestWaitForReparentJournal(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SELECT action_name, primary_alias, replication_position FROM _vt.reparent_journal WHERE time_created_ns=5", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), "test_row")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + err := testMysqld.WaitForReparentJournal(ctx, 5) + assert.NoError(t, err) +} + +func TestPromote(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("STOP REPLICA", &sqltypes.Result{}) + db.AddQuery("RESET REPLICA ALL", &sqltypes.Result{}) + db.AddQuery("FLUSH BINARY LOGS", &sqltypes.Result{}) + db.AddQuery("SELECT @@global.gtid_executed", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:12-17")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + pos, err := testMysqld.Promote(context.Background(), map[string]string{}) + assert.NoError(t, err) + assert.Equal(t, "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8:12-17", pos.String()) +} diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 90793a1abd1..07e0e3b5fad 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -29,37 +29,40 @@ import ( "strings" "time" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/hook" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/proto/replicationdata" + "vitess.io/vitess/go/vt/vterrors" ) type ResetSuperReadOnlyFunc func() error // WaitForReplicationStart waits until the deadline for replication to start. // This validates the current primary is correct and can be connected to. -func WaitForReplicationStart(mysqld MysqlDaemon, replicaStartDeadline int) error { - var rowMap map[string]string +func WaitForReplicationStart(ctx context.Context, mysqld MysqlDaemon, replicaStartDeadline int) (err error) { + var replicaStatus replication.ReplicationStatus for replicaWait := 0; replicaWait < replicaStartDeadline; replicaWait++ { - status, err := mysqld.ReplicationStatus() + replicaStatus, err = mysqld.ReplicationStatus(ctx) if err != nil { return err } - if status.Running() { + if replicaStatus.Running() { return nil } time.Sleep(time.Second) } - - errorKeys := []string{"Last_Error", "Last_IO_Error", "Last_SQL_Error"} - errs := make([]string, 0, len(errorKeys)) - for _, key := range errorKeys { - if rowMap[key] != "" { - errs = append(errs, key+": "+rowMap[key]) - } + errs := make([]string, 0, 2) + if replicaStatus.LastSQLError != "" { + errs = append(errs, "Last_SQL_Error: "+replicaStatus.LastSQLError) } + if replicaStatus.LastIOError != "" { + errs = append(errs, "Last_IO_Error: "+replicaStatus.LastIOError) + } + if len(errs) != 0 { return errors.New(strings.Join(errs, ", ")) } @@ -67,8 +70,7 @@ func WaitForReplicationStart(mysqld MysqlDaemon, replicaStartDeadline int) error } // StartReplication starts replication. -func (mysqld *Mysqld) StartReplication(hookExtraEnv map[string]string) error { - ctx := context.TODO() +func (mysqld *Mysqld) StartReplication(ctx context.Context, hookExtraEnv map[string]string) error { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err @@ -111,13 +113,12 @@ func (mysqld *Mysqld) StartSQLThreadUntilAfter(ctx context.Context, targetPos re } // StopReplication stops replication. -func (mysqld *Mysqld) StopReplication(hookExtraEnv map[string]string) error { +func (mysqld *Mysqld) StopReplication(ctx context.Context, hookExtraEnv map[string]string) error { h := hook.NewSimpleHook("preflight_stop_slave") h.ExtraEnv = hookExtraEnv if err := h.ExecuteOptional(); err != nil { return err } - ctx := context.TODO() conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err @@ -150,13 +151,12 @@ func (mysqld *Mysqld) StopSQLThread(ctx context.Context) error { } // RestartReplication stops, resets and starts replication. -func (mysqld *Mysqld) RestartReplication(hookExtraEnv map[string]string) error { +func (mysqld *Mysqld) RestartReplication(ctx context.Context, hookExtraEnv map[string]string) error { h := hook.NewSimpleHook("preflight_stop_slave") h.ExtraEnv = hookExtraEnv if err := h.ExecuteOptional(); err != nil { return err } - ctx := context.TODO() conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return err @@ -173,8 +173,21 @@ func (mysqld *Mysqld) RestartReplication(hookExtraEnv map[string]string) error { } // GetMysqlPort returns mysql port -func (mysqld *Mysqld) GetMysqlPort() (int32, error) { - qr, err := mysqld.FetchSuperQuery(context.TODO(), "SHOW VARIABLES LIKE 'port'") +func (mysqld *Mysqld) GetMysqlPort(ctx context.Context) (int32, error) { + // We can not use the connection pool here. This check runs very early + // during MySQL startup when we still might be loading things like grants. + // This means we need to use an isolated connection to avoid poisoning the + // DBA connection pool for further queries. + params, err := mysqld.dbcfgs.DbaConnector().MysqlParams() + if err != nil { + return 0, err + } + conn, err := mysql.Connect(ctx, params) + if err != nil { + return 0, err + } + defer conn.Close() + qr, err := conn.ExecuteFetch("SHOW VARIABLES LIKE 'port'", 1, false) if err != nil { return 0, err } @@ -216,8 +229,8 @@ func (mysqld *Mysqld) GetServerUUID(ctx context.Context) (string, error) { } // IsReadOnly return true if the instance is read only -func (mysqld *Mysqld) IsReadOnly() (bool, error) { - qr, err := mysqld.FetchSuperQuery(context.TODO(), "SHOW VARIABLES LIKE 'read_only'") +func (mysqld *Mysqld) IsReadOnly(ctx context.Context) (bool, error) { + qr, err := mysqld.FetchSuperQuery(ctx, "SHOW VARIABLES LIKE 'read_only'") if err != nil { return true, err } @@ -231,12 +244,13 @@ func (mysqld *Mysqld) IsReadOnly() (bool, error) { } // IsSuperReadOnly return true if the instance is super read only -func (mysqld *Mysqld) IsSuperReadOnly() (bool, error) { - qr, err := mysqld.FetchSuperQuery(context.TODO(), "SELECT @@global.super_read_only") +func (mysqld *Mysqld) IsSuperReadOnly(ctx context.Context) (bool, error) { + qr, err := mysqld.FetchSuperQuery(ctx, "SELECT @@global.super_read_only") if err != nil { return false, err } - if err == nil && len(qr.Rows) == 1 { + + if len(qr.Rows) == 1 { sro := qr.Rows[0][0].ToString() if sro == "1" || sro == "ON" { return true, nil @@ -247,29 +261,19 @@ func (mysqld *Mysqld) IsSuperReadOnly() (bool, error) { } // SetReadOnly set/unset the read_only flag -func (mysqld *Mysqld) SetReadOnly(on bool) error { - // temp logging, to be removed in v17 - var newState string - switch on { - case false: - newState = "ReadWrite" - case true: - newState = "ReadOnly" - } - log.Infof("SetReadOnly setting to : %s", newState) - +func (mysqld *Mysqld) SetReadOnly(ctx context.Context, on bool) error { query := "SET GLOBAL read_only = " if on { query += "ON" } else { query += "OFF" } - return mysqld.ExecuteSuperQuery(context.TODO(), query) + return mysqld.ExecuteSuperQuery(ctx, query) } // SetSuperReadOnly set/unset the super_read_only flag. // Returns a function which is called to set super_read_only back to its original value. -func (mysqld *Mysqld) SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) { +func (mysqld *Mysqld) SetSuperReadOnly(ctx context.Context, on bool) (ResetSuperReadOnlyFunc, error) { // return function for switching `OFF` super_read_only var resetFunc ResetSuperReadOnlyFunc var disableFunc = func() error { @@ -285,7 +289,7 @@ func (mysqld *Mysqld) SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) return err } - superReadOnlyEnabled, err := mysqld.IsSuperReadOnly() + superReadOnlyEnabled, err := mysqld.IsSuperReadOnly(ctx) if err != nil { return nil, err } @@ -315,7 +319,8 @@ func (mysqld *Mysqld) SetSuperReadOnly(on bool) (ResetSuperReadOnlyFunc, error) return resetFunc, nil } -// WaitSourcePos lets replicas wait to given replication position +// WaitSourcePos lets replicas wait for the given replication position to +// be reached. func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos replication.Position) error { // Get a connection. conn, err := getPoolReconnect(ctx, mysqld.dbaPool) @@ -324,71 +329,54 @@ func (mysqld *Mysqld) WaitSourcePos(ctx context.Context, targetPos replication.P } defer conn.Recycle() - // First check if filePos flavored Position was passed in. If so, we can't defer to the flavor in the connection, - // unless that flavor is also filePos. - waitCommandName := "WaitUntilPositionCommand" - var query string + // First check if filePos flavored Position was passed in. If so, we + // can't defer to the flavor in the connection, unless that flavor is + // also filePos. if targetPos.MatchesFlavor(replication.FilePosFlavorID) { - // If we are the primary, WaitUntilFilePositionCommand will fail. - // But position is most likely reached. So, check the position - // first. + // If we are the primary, WaitUntilFilePosition will fail. But + // position is most likely reached. So, check the position first. mpos, err := conn.Conn.PrimaryFilePosition() if err != nil { - return fmt.Errorf("WaitSourcePos: PrimaryFilePosition failed: %v", err) + return vterrors.Wrapf(err, "WaitSourcePos: PrimaryFilePosition failed") } if mpos.AtLeast(targetPos) { return nil } - - // Find the query to run, run it. - query, err = conn.Conn.WaitUntilFilePositionCommand(ctx, targetPos) - if err != nil { - return err - } - waitCommandName = "WaitUntilFilePositionCommand" } else { - // If we are the primary, WaitUntilPositionCommand will fail. - // But position is most likely reached. So, check the position - // first. + // If we are the primary, WaitUntilPosition will fail. But + // position is most likely reached. So, check the position first. mpos, err := conn.Conn.PrimaryPosition() if err != nil { - return fmt.Errorf("WaitSourcePos: PrimaryPosition failed: %v", err) + return vterrors.Wrapf(err, "WaitSourcePos: PrimaryPosition failed") } if mpos.AtLeast(targetPos) { return nil } - - // Find the query to run, run it. - query, err = conn.Conn.WaitUntilPositionCommand(ctx, targetPos) - if err != nil { - return err - } } - qr, err := mysqld.FetchSuperQuery(ctx, query) - if err != nil { - return fmt.Errorf("%v(%v) failed: %v", waitCommandName, query, err) + if err := conn.Conn.WaitUntilPosition(ctx, targetPos); err != nil { + return vterrors.Wrapf(err, "WaitSourcePos failed") } + return nil +} - if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 { - return fmt.Errorf("unexpected result format from %v(%v): %#v", waitCommandName, query, qr) - } - result := qr.Rows[0][0] - if result.IsNull() { - return fmt.Errorf("%v(%v) failed: replication is probably stopped", waitCommandName, query) +func (mysqld *Mysqld) CatchupToGTID(ctx context.Context, targetPos replication.Position) error { + params, err := mysqld.dbcfgs.ReplConnector().MysqlParams() + if err != nil { + return err } - if result.ToString() == "-1" { - return fmt.Errorf("timed out waiting for position %v", targetPos) + conn, err := getPoolReconnect(ctx, mysqld.dbaPool) + if err != nil { + return err } - return nil -} + defer conn.Recycle() -// ReplicationStatus returns the server replication status -func (mysqld *Mysqld) ReplicationStatus() (replication.ReplicationStatus, error) { - return mysqld.ReplicationStatusWithContext(context.TODO()) + cmds := conn.Conn.CatchupToGTIDCommands(params, targetPos) + return mysqld.executeSuperQueryListConn(ctx, conn, cmds) } -func (mysqld *Mysqld) ReplicationStatusWithContext(ctx context.Context) (replication.ReplicationStatus, error) { +// ReplicationStatus returns the server replication status +func (mysqld *Mysqld) ReplicationStatus(ctx context.Context) (replication.ReplicationStatus, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return replication.ReplicationStatus{}, err @@ -409,6 +397,16 @@ func (mysqld *Mysqld) PrimaryStatus(ctx context.Context) (replication.PrimarySta return conn.Conn.ShowPrimaryStatus() } +func (mysqld *Mysqld) ReplicationConfiguration(ctx context.Context) (*replicationdata.Configuration, error) { + conn, err := getPoolReconnect(ctx, mysqld.dbaPool) + if err != nil { + return nil, err + } + defer conn.Recycle() + + return conn.Conn.ReplicationConfiguration() +} + // GetGTIDPurged returns the gtid purged statuses func (mysqld *Mysqld) GetGTIDPurged(ctx context.Context) (replication.Position, error) { conn, err := getPoolReconnect(ctx, mysqld.dbaPool) @@ -421,8 +419,8 @@ func (mysqld *Mysqld) GetGTIDPurged(ctx context.Context) (replication.Position, } // PrimaryPosition returns the primary replication position. -func (mysqld *Mysqld) PrimaryPosition() (replication.Position, error) { - conn, err := getPoolReconnect(context.TODO(), mysqld.dbaPool) +func (mysqld *Mysqld) PrimaryPosition(ctx context.Context) (replication.Position, error) { + conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return replication.Position{}, err } @@ -447,7 +445,7 @@ func (mysqld *Mysqld) SetReplicationPosition(ctx context.Context, pos replicatio // SetReplicationSource makes the provided host / port the primary. It optionally // stops replication before, and starts it after. -func (mysqld *Mysqld) SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error { +func (mysqld *Mysqld) SetReplicationSource(ctx context.Context, host string, port int32, heartbeatInterval float64, stopReplicationBefore bool, startReplicationAfter bool) error { params, err := mysqld.dbcfgs.ReplConnector().MysqlParams() if err != nil { return err @@ -462,7 +460,7 @@ func (mysqld *Mysqld) SetReplicationSource(ctx context.Context, host string, por if stopReplicationBefore { cmds = append(cmds, conn.Conn.StopReplicationCommand()) } - smc := conn.Conn.SetReplicationSourceCommand(params, host, port, int(replicationConnectRetry.Seconds())) + smc := conn.Conn.SetReplicationSourceCommand(params, host, port, heartbeatInterval, int(replicationConnectRetry.Seconds())) cmds = append(cmds, smc) if startReplicationAfter { cmds = append(cmds, conn.Conn.StartReplicationCommand()) @@ -494,12 +492,12 @@ func (mysqld *Mysqld) ResetReplicationParameters(ctx context.Context) error { return mysqld.executeSuperQueryListConn(ctx, conn, cmds) } -// +------+---------+---------------------+------+-------------+------+----------------------------------------------------------------+------------------+ -// | Id | User | Host | db | Command | Time | State | Info | -// +------+---------+---------------------+------+-------------+------+----------------------------------------------------------------+------------------+ -// | 9792 | vt_repl | host:port | NULL | Binlog Dump | 54 | Has sent all binlog to slave; waiting for binlog to be updated | NULL | -// | 9797 | vt_dba | localhost | NULL | Query | 0 | NULL | show processlist | -// +------+---------+---------------------+------+-------------+------+----------------------------------------------------------------+------------------+ +// +------+---------+---------------------+------+-------------+------+------------------------------------------------------------------+------------------+ +// | Id | User | Host | db | Command | Time | State | Info | +// +------+---------+---------------------+------+-------------+------+------------------------------------------------------------------+------------------+ +// | 9792 | vt_repl | host:port | NULL | Binlog Dump | 54 | Has sent all binlog to replica; waiting for binlog to be updated | NULL | +// | 9797 | vt_dba | localhost | NULL | Query | 0 | NULL | show processlist | +// +------+---------+---------------------+------+-------------+------+------------------------------------------------------------------+------------------+ // // Array indices for the results of SHOW PROCESSLIST. const ( @@ -516,8 +514,8 @@ const ( ) // FindReplicas gets IP addresses for all currently connected replicas. -func FindReplicas(mysqld MysqlDaemon) ([]string, error) { - qr, err := mysqld.FetchSuperQuery(context.TODO(), "SHOW PROCESSLIST") +func FindReplicas(ctx context.Context, mysqld MysqlDaemon) ([]string, error) { + qr, err := mysqld.FetchSuperQuery(ctx, "SHOW PROCESSLIST") if err != nil { return nil, err } @@ -551,31 +549,13 @@ func FindReplicas(mysqld MysqlDaemon) ([]string, error) { // GetBinlogInformation gets the binlog format, whether binlog is enabled and if updates on replica logging is enabled. func (mysqld *Mysqld) GetBinlogInformation(ctx context.Context) (string, bool, bool, string, error) { - qr, err := mysqld.FetchSuperQuery(ctx, "select @@global.binlog_format, @@global.log_bin, @@global.log_slave_updates, @@global.binlog_row_image") - if err != nil { - return "", false, false, "", err - } - if len(qr.Rows) != 1 { - return "", false, false, "", errors.New("unable to read global variables binlog_format, log_bin, log_slave_updates, gtid_mode, binlog_rowge") - } - res := qr.Named().Row() - binlogFormat, err := res.ToString("@@global.binlog_format") - if err != nil { - return "", false, false, "", err - } - logBin, err := res.ToInt64("@@global.log_bin") - if err != nil { - return "", false, false, "", err - } - logReplicaUpdates, err := res.ToInt64("@@global.log_slave_updates") - if err != nil { - return "", false, false, "", err - } - binlogRowImage, err := res.ToString("@@global.binlog_row_image") + conn, err := getPoolReconnect(ctx, mysqld.dbaPool) if err != nil { return "", false, false, "", err } - return binlogFormat, logBin == 1, logReplicaUpdates == 1, binlogRowImage, nil + defer conn.Recycle() + + return conn.Conn.BinlogInformation() } // GetGTIDMode gets the GTID mode for the server @@ -627,9 +607,48 @@ func (mysqld *Mysqld) GetPreviousGTIDs(ctx context.Context, binlog string) (prev return previousGtids, nil } +var ErrNoSemiSync = errors.New("semi-sync plugin not loaded") + +func (mysqld *Mysqld) SemiSyncType(ctx context.Context) mysql.SemiSyncType { + if mysqld.semiSyncType == mysql.SemiSyncTypeUnknown { + mysqld.semiSyncType, _ = mysqld.SemiSyncExtensionLoaded(ctx) + } + return mysqld.semiSyncType +} + +func (mysqld *Mysqld) enableSemiSyncQuery(ctx context.Context) (string, error) { + switch mysqld.SemiSyncType(ctx) { + case mysql.SemiSyncTypeSource: + return "SET GLOBAL rpl_semi_sync_source_enabled = %v, GLOBAL rpl_semi_sync_replica_enabled = %v", nil + case mysql.SemiSyncTypeMaster: + return "SET GLOBAL rpl_semi_sync_master_enabled = %v, GLOBAL rpl_semi_sync_slave_enabled = %v", nil + } + return "", ErrNoSemiSync +} + +func (mysqld *Mysqld) semiSyncClientsQuery(ctx context.Context) (string, error) { + switch mysqld.SemiSyncType(ctx) { + case mysql.SemiSyncTypeSource: + return "SHOW STATUS LIKE 'Rpl_semi_sync_source_clients'", nil + case mysql.SemiSyncTypeMaster: + return "SHOW STATUS LIKE 'Rpl_semi_sync_master_clients'", nil + } + return "", ErrNoSemiSync +} + +func (mysqld *Mysqld) semiSyncReplicationStatusQuery(ctx context.Context) (string, error) { + switch mysqld.SemiSyncType(ctx) { + case mysql.SemiSyncTypeSource: + return "SHOW STATUS LIKE 'rpl_semi_sync_replica_status'", nil + case mysql.SemiSyncTypeMaster: + return "SHOW STATUS LIKE 'rpl_semi_sync_slave_status'", nil + } + return "", ErrNoSemiSync +} + // SetSemiSyncEnabled enables or disables semi-sync replication for // primary and/or replica mode. -func (mysqld *Mysqld) SetSemiSyncEnabled(primary, replica bool) error { +func (mysqld *Mysqld) SetSemiSyncEnabled(ctx context.Context, primary, replica bool) error { log.Infof("Setting semi-sync mode: primary=%v, replica=%v", primary, replica) // Convert bool to int. @@ -641,9 +660,11 @@ func (mysqld *Mysqld) SetSemiSyncEnabled(primary, replica bool) error { s = 1 } - err := mysqld.ExecuteSuperQuery(context.TODO(), fmt.Sprintf( - "SET GLOBAL rpl_semi_sync_master_enabled = %v, GLOBAL rpl_semi_sync_slave_enabled = %v", - p, s)) + query, err := mysqld.enableSemiSyncQuery(ctx) + if err != nil { + return err + } + err = mysqld.ExecuteSuperQuery(ctx, fmt.Sprintf(query, p, s)) if err != nil { return fmt.Errorf("can't set semi-sync mode: %v; make sure plugins are loaded in my.cnf", err) } @@ -652,30 +673,46 @@ func (mysqld *Mysqld) SetSemiSyncEnabled(primary, replica bool) error { // SemiSyncEnabled returns whether semi-sync is enabled for primary or replica. // If the semi-sync plugin is not loaded, we assume semi-sync is disabled. -func (mysqld *Mysqld) SemiSyncEnabled() (primary, replica bool) { - vars, err := mysqld.fetchVariables(context.TODO(), "rpl_semi_sync_%_enabled") +func (mysqld *Mysqld) SemiSyncEnabled(ctx context.Context) (primary, replica bool) { + vars, err := mysqld.fetchVariables(ctx, "rpl_semi_sync_%_enabled") if err != nil { return false, false } - primary = vars["rpl_semi_sync_master_enabled"] == "ON" - replica = vars["rpl_semi_sync_slave_enabled"] == "ON" + switch mysqld.SemiSyncType(ctx) { + case mysql.SemiSyncTypeSource: + primary = vars["rpl_semi_sync_source_enabled"] == "ON" + replica = vars["rpl_semi_sync_replica_enabled"] == "ON" + case mysql.SemiSyncTypeMaster: + primary = vars["rpl_semi_sync_master_enabled"] == "ON" + replica = vars["rpl_semi_sync_slave_enabled"] == "ON" + } return primary, replica } // SemiSyncStatus returns the current status of semi-sync for primary and replica. -func (mysqld *Mysqld) SemiSyncStatus() (primary, replica bool) { - vars, err := mysqld.fetchStatuses(context.TODO(), "Rpl_semi_sync_%_status") +func (mysqld *Mysqld) SemiSyncStatus(ctx context.Context) (primary, replica bool) { + vars, err := mysqld.fetchStatuses(ctx, "Rpl_semi_sync_%_status") if err != nil { return false, false } - primary = vars["Rpl_semi_sync_master_status"] == "ON" - replica = vars["Rpl_semi_sync_slave_status"] == "ON" + switch mysqld.SemiSyncType(ctx) { + case mysql.SemiSyncTypeSource: + primary = vars["Rpl_semi_sync_source_status"] == "ON" + replica = vars["Rpl_semi_sync_replica_status"] == "ON" + case mysql.SemiSyncTypeMaster: + primary = vars["Rpl_semi_sync_master_status"] == "ON" + replica = vars["Rpl_semi_sync_slave_status"] == "ON" + } return primary, replica } // SemiSyncClients returns the number of semi-sync clients for the primary. -func (mysqld *Mysqld) SemiSyncClients() uint32 { - qr, err := mysqld.FetchSuperQuery(context.TODO(), "SHOW STATUS LIKE 'Rpl_semi_sync_master_clients'") +func (mysqld *Mysqld) SemiSyncClients(ctx context.Context) uint32 { + query, err := mysqld.semiSyncClientsQuery(ctx) + if err != nil { + return 0 + } + qr, err := mysqld.FetchSuperQuery(ctx, query) if err != nil { return 0 } @@ -688,24 +725,35 @@ func (mysqld *Mysqld) SemiSyncClients() uint32 { } // SemiSyncSettings returns the settings of semi-sync which includes the timeout and the number of replicas to wait for. -func (mysqld *Mysqld) SemiSyncSettings() (timeout uint64, numReplicas uint32) { - vars, err := mysqld.fetchVariables(context.TODO(), "rpl_semi_sync_%") +func (mysqld *Mysqld) SemiSyncSettings(ctx context.Context) (timeout uint64, numReplicas uint32) { + vars, err := mysqld.fetchVariables(ctx, "rpl_semi_sync_%") if err != nil { return 0, 0 } - timeout, _ = strconv.ParseUint(vars["rpl_semi_sync_master_timeout"], 10, 64) - numReplicasUint, _ := strconv.ParseUint(vars["rpl_semi_sync_master_wait_for_slave_count"], 10, 32) + var numReplicasUint uint64 + switch mysqld.SemiSyncType(ctx) { + case mysql.SemiSyncTypeSource: + timeout, _ = strconv.ParseUint(vars["rpl_semi_sync_source_timeout"], 10, 64) + numReplicasUint, _ = strconv.ParseUint(vars["rpl_semi_sync_source_wait_for_replica_count"], 10, 32) + case mysql.SemiSyncTypeMaster: + timeout, _ = strconv.ParseUint(vars["rpl_semi_sync_master_timeout"], 10, 64) + numReplicasUint, _ = strconv.ParseUint(vars["rpl_semi_sync_master_wait_for_slave_count"], 10, 32) + } return timeout, uint32(numReplicasUint) } // SemiSyncReplicationStatus returns whether semi-sync is currently used by replication. -func (mysqld *Mysqld) SemiSyncReplicationStatus() (bool, error) { - qr, err := mysqld.FetchSuperQuery(context.TODO(), "SHOW STATUS LIKE 'rpl_semi_sync_slave_status'") +func (mysqld *Mysqld) SemiSyncReplicationStatus(ctx context.Context) (bool, error) { + query, err := mysqld.semiSyncReplicationStatusQuery(ctx) + if err != nil { + return false, err + } + qr, err := mysqld.FetchSuperQuery(ctx, query) if err != nil { return false, err } if len(qr.Rows) != 1 { - return false, errors.New("no rpl_semi_sync_slave_status variable in mysql") + return false, errors.New("no rpl_semi_sync_replica_status variable in mysql") } if qr.Rows[0][1].ToString() == "ON" { return true, nil @@ -714,14 +762,12 @@ func (mysqld *Mysqld) SemiSyncReplicationStatus() (bool, error) { } // SemiSyncExtensionLoaded returns whether semi-sync plugins are loaded. -func (mysqld *Mysqld) SemiSyncExtensionLoaded() (bool, error) { - qr, err := mysqld.FetchSuperQuery(context.Background(), "SELECT COUNT(*) > 0 AS plugin_loaded FROM information_schema.plugins WHERE plugin_name LIKE 'rpl_semi_sync%'") - if err != nil { - return false, err - } - pluginPresent, err := qr.Rows[0][0].ToBool() - if err != nil { - return false, err +func (mysqld *Mysqld) SemiSyncExtensionLoaded(ctx context.Context) (mysql.SemiSyncType, error) { + conn, connErr := getPoolReconnect(ctx, mysqld.dbaPool) + if connErr != nil { + return mysql.SemiSyncTypeUnknown, connErr } - return pluginPresent, nil + defer conn.Recycle() + + return conn.Conn.SemiSyncExtensionLoaded() } diff --git a/go/vt/mysqlctl/replication_test.go b/go/vt/mysqlctl/replication_test.go index 1502ad4773e..1ca41437ea7 100644 --- a/go/vt/mysqlctl/replication_test.go +++ b/go/vt/mysqlctl/replication_test.go @@ -17,13 +17,56 @@ limitations under the License. package mysqlctl import ( + "context" + "fmt" + "math" + "net" "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" ) func testRedacted(t *testing.T, source, expected string) { - if r := redactPassword(source); r != expected { - t.Errorf("redactPassword bad result: %v\nWas expecting:%v", r, expected) - } + assert.Equal(t, expected, redactPassword(source)) +} + +func TestRedactSourcePassword(t *testing.T) { + + // regular test case + testRedacted(t, `CHANGE REPLICATION SOURCE TO + SOURCE_PASSWORD = 'AAA', + SOURCE_CONNECT_RETRY = 1 +`, + `CHANGE REPLICATION SOURCE TO + SOURCE_PASSWORD = '****', + SOURCE_CONNECT_RETRY = 1 +`) + + // empty password + testRedacted(t, `CHANGE REPLICATION SOURCE TO + SOURCE_PASSWORD = '', + SOURCE_CONNECT_RETRY = 1 +`, + `CHANGE REPLICATION SOURCE TO + SOURCE_PASSWORD = '****', + SOURCE_CONNECT_RETRY = 1 +`) + + // no beginning match + testRedacted(t, "aaaaaaaaaaaaaa", "aaaaaaaaaaaaaa") + + // no end match + testRedacted(t, `CHANGE REPLICATION SOURCE TO + SOURCE_PASSWORD = 'AAA`, `CHANGE REPLICATION SOURCE TO + SOURCE_PASSWORD = 'AAA`) } func TestRedactMasterPassword(t *testing.T) { @@ -72,11 +115,646 @@ func TestRedactPassword(t *testing.T) { // both primary password and password testRedacted(t, `START xxx - MASTER_PASSWORD = 'AAA', + SOURCE_PASSWORD = 'AAA', PASSWORD = 'BBB' `, `START xxx - MASTER_PASSWORD = '****', + SOURCE_PASSWORD = '****', PASSWORD = '****' `) } + +func TestWaitForReplicationStart(t *testing.T) { + db := fakesqldb.New(t) + fakemysqld := NewFakeMysqlDaemon(db) + + defer func() { + db.Close() + fakemysqld.Close() + }() + + err := WaitForReplicationStart(context.Background(), fakemysqld, 2) + assert.NoError(t, err) + + fakemysqld.ReplicationStatusError = fmt.Errorf("test error") + err = WaitForReplicationStart(context.Background(), fakemysqld, 2) + assert.ErrorContains(t, err, "test error") + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW REPLICA STATUS", sqltypes.MakeTestResult(sqltypes.MakeTestFields("Last_SQL_Error|Last_IO_Error", "varchar|varchar"), "test sql error|test io error")) + + err = WaitForReplicationStart(context.Background(), testMysqld, 2) + assert.ErrorContains(t, err, "Last_SQL_Error: test sql error, Last_IO_Error: test io error") +} + +func TestGetMysqlPort(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW VARIABLES LIKE 'port'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field|test_field2", "varchar|uint64"), "test_port|12")) + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + res, err := testMysqld.GetMysqlPort(ctx) + assert.Equal(t, int32(12), res) + assert.NoError(t, err) + + db.AddQuery("SHOW VARIABLES LIKE 'port'", &sqltypes.Result{}) + res, err = testMysqld.GetMysqlPort(ctx) + assert.ErrorContains(t, err, "no port variable in mysql") + assert.Equal(t, int32(0), res) +} + +func TestGetServerID(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("select @@global.server_id", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "uint64"), "12")) + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + res, err := testMysqld.GetServerID(ctx) + assert.Equal(t, uint32(12), res) + assert.NoError(t, err) + + db.AddQuery("select @@global.server_id", &sqltypes.Result{}) + res, err = testMysqld.GetServerID(ctx) + assert.ErrorContains(t, err, "no server_id in mysql") + assert.Equal(t, uint32(0), res) +} + +func TestGetServerUUID(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + uuid := "test_uuid" + db.AddQuery("SELECT @@global.server_uuid", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), uuid)) + + ctx := context.Background() + res, err := testMysqld.GetServerUUID(ctx) + assert.Equal(t, uuid, res) + assert.NoError(t, err) + + db.AddQuery("SELECT @@global.server_uuid", &sqltypes.Result{}) + res, err = testMysqld.GetServerUUID(ctx) + assert.Error(t, err) + assert.Equal(t, "", res) +} + +func TestWaitSourcePos(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SELECT @@global.gtid_executed", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:12-17")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + err := testMysqld.WaitSourcePos(ctx, replication.Position{GTIDSet: replication.Mysql56GTIDSet{}}) + assert.NoError(t, err) + + db.AddQuery("SELECT @@global.gtid_executed", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), "invalid_id")) + err = testMysqld.WaitSourcePos(ctx, replication.Position{GTIDSet: replication.Mysql56GTIDSet{}}) + assert.ErrorContains(t, err, "invalid MySQL 5.6 GTID set") +} + +func TestReplicationStatus(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW REPLICA STATUS", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), "test_status")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + res, err := testMysqld.ReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.True(t, res.ReplicationLagUnknown) + + db.AddQuery("SHOW REPLICA STATUS", &sqltypes.Result{}) + res, err = testMysqld.ReplicationStatus(context.Background()) + assert.Error(t, err) + assert.False(t, res.ReplicationLagUnknown) +} + +func TestPrimaryStatus(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW MASTER STATUS", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), "test_status")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + res, err := testMysqld.PrimaryStatus(ctx) + assert.NoError(t, err) + assert.NotNil(t, res) + + db.AddQuery("SHOW MASTER STATUS", &sqltypes.Result{}) + _, err = testMysqld.PrimaryStatus(ctx) + assert.ErrorContains(t, err, "no master status") +} + +func TestReplicationConfiguration(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SELECT * FROM performance_schema.replication_connection_configuration", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field|HEARTBEAT_INTERVAL|field2", "varchar|float64|varchar"), "test_status|4.5000|test")) + db.AddQuery("select @@global.replica_net_timeout", sqltypes.MakeTestResult(sqltypes.MakeTestFields("@@global.replica_net_timeout", "int64"), "9")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + replConfig, err := testMysqld.ReplicationConfiguration(ctx) + assert.NoError(t, err) + assert.NotNil(t, replConfig) + require.EqualValues(t, math.Round(replConfig.HeartbeatInterval*2), replConfig.ReplicaNetTimeout) + + db.AddQuery("SELECT * FROM performance_schema.replication_connection_configuration", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field|HEARTBEAT_INTERVAL|field2", "varchar|float64|varchar"))) + replConfig, err = testMysqld.ReplicationConfiguration(ctx) + assert.NoError(t, err) + assert.Nil(t, replConfig) +} + +func TestGetGTIDPurged(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SELECT @@global.gtid_purged", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:12-17")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + res, err := testMysqld.GetGTIDPurged(ctx) + assert.NoError(t, err) + assert.Equal(t, "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8:12-17", res.String()) +} + +func TestPrimaryPosition(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SELECT @@global.gtid_executed", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:12-17")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + res, err := testMysqld.PrimaryPosition(context.Background()) + assert.NoError(t, err) + assert.Equal(t, "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8:12-17", res.String()) +} + +func TestSetReplicationPosition(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("RESET MASTER", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + + pos := replication.Position{GTIDSet: replication.Mysql56GTIDSet{}} + sid := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + pos.GTIDSet = pos.GTIDSet.AddGTID(replication.Mysql56GTID{Server: sid, Sequence: 1}) + + err := testMysqld.SetReplicationPosition(ctx, pos) + assert.Error(t, err) + + // We expect this query to be executed + db.AddQuery("SET GLOBAL gtid_purged = '00010203-0405-0607-0809-0a0b0c0d0e0f:1'", &sqltypes.Result{}) + + err = testMysqld.SetReplicationPosition(ctx, pos) + assert.NoError(t, err) +} + +func TestSetReplicationSource(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("RESET MASTER", &sqltypes.Result{}) + db.AddQuery("STOP REPLICA", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + + // We expect query containing passed host and port to be executed + err := testMysqld.SetReplicationSource(ctx, "test_host", 2, 0, true, true) + assert.ErrorContains(t, err, `SOURCE_HOST = 'test_host'`) + assert.ErrorContains(t, err, `SOURCE_PORT = 2`) + assert.ErrorContains(t, err, `CHANGE REPLICATION SOURCE TO`) +} + +func TestResetReplication(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW GLOBAL VARIABLES LIKE 'rpl_semi_sync%'", &sqltypes.Result{}) + db.AddQuery("STOP REPLICA", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + err := testMysqld.ResetReplication(ctx) + assert.ErrorContains(t, err, "RESET REPLICA ALL") + + // We expect this query to be executed + db.AddQuery("RESET REPLICA ALL", &sqltypes.Result{}) + err = testMysqld.ResetReplication(ctx) + assert.ErrorContains(t, err, "RESET MASTER") + + // We expect this query to be executed + db.AddQuery("RESET MASTER", &sqltypes.Result{}) + err = testMysqld.ResetReplication(ctx) + assert.NoError(t, err) +} + +func TestResetReplicationParameters(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW GLOBAL VARIABLES LIKE 'rpl_semi_sync%'", &sqltypes.Result{}) + db.AddQuery("STOP REPLICA", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + err := testMysqld.ResetReplicationParameters(ctx) + assert.ErrorContains(t, err, "RESET REPLICA ALL") + + // We expect this query to be executed + db.AddQuery("RESET REPLICA ALL", &sqltypes.Result{}) + err = testMysqld.ResetReplicationParameters(ctx) + assert.NoError(t, err) +} + +func TestFindReplicas(t *testing.T) { + db := fakesqldb.New(t) + fakemysqld := NewFakeMysqlDaemon(db) + + defer func() { + db.Close() + fakemysqld.Close() + }() + + fakemysqld.FetchSuperQueryMap = map[string]*sqltypes.Result{ + "SHOW PROCESSLIST": sqltypes.MakeTestResult(sqltypes.MakeTestFields("Id|User|Host|db|Command|Time|State|Info", "varchar|varchar|varchar|varchar|varchar|varchar|varchar|varchar"), "1|user1|localhost:12|db1|Binlog Dump|54|Has sent all binlog to replica|NULL"), + } + + res, err := FindReplicas(context.Background(), fakemysqld) + assert.NoError(t, err) + + want, err := net.LookupHost("localhost") + require.NoError(t, err) + + assert.Equal(t, want, res) +} + +func TestGetBinlogInformation(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SELECT @@global.binlog_format, @@global.log_bin, @@global.log_replica_updates, @@global.binlog_row_image", sqltypes.MakeTestResult(sqltypes.MakeTestFields("@@global.binlog_format|@@global.log_bin|@@global.log_replica_updates|@@global.binlog_row_image", "varchar|int64|int64|varchar"), "binlog|1|2|row_image")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + bin, logBin, replicaUpdate, rowImage, err := testMysqld.GetBinlogInformation(ctx) + assert.NoError(t, err) + assert.Equal(t, "binlog", bin) + assert.Equal(t, "row_image", rowImage) + assert.True(t, logBin) + assert.False(t, replicaUpdate) +} + +func TestGetGTIDMode(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + in := "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8,8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:12-17" + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("select @@global.gtid_mode", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field", "varchar"), in)) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + res, err := testMysqld.GetGTIDMode(ctx) + assert.NoError(t, err) + assert.Equal(t, in, res) +} + +func TestFlushBinaryLogs(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + // We expect this query to be executed + err := testMysqld.FlushBinaryLogs(context.Background()) + assert.ErrorContains(t, err, "FLUSH BINARY LOGS") +} + +func TestGetBinaryLogs(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + db.AddQuery("SHOW BINARY LOGS", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field", "varchar"), "binlog1", "binlog2")) + + res, err := testMysqld.GetBinaryLogs(context.Background()) + assert.NoError(t, err) + assert.Len(t, res, 2) + assert.Contains(t, res, "binlog1") + assert.Contains(t, res, "binlog2") +} + +func TestGetPreviousGTIDs(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW BINLOG EVENTS IN 'binlog' LIMIT 2", sqltypes.MakeTestResult(sqltypes.MakeTestFields("Event_type|Info", "varchar|varchar"), "Previous_gtids|8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + res, err := testMysqld.GetPreviousGTIDs(ctx, "binlog") + assert.NoError(t, err) + assert.Equal(t, "8bc65c84-3fe4-11ed-a912-257f0fcdd6c9:1-8", res) +} + +func TestSetSemiSyncEnabled(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + // We expect this query to be executed + err := testMysqld.SetSemiSyncEnabled(context.Background(), true, true) + assert.ErrorIs(t, err, ErrNoSemiSync) + + // We expect this query to be executed + err = testMysqld.SetSemiSyncEnabled(context.Background(), true, false) + assert.ErrorIs(t, err, ErrNoSemiSync) + + // We expect this query to be executed + err = testMysqld.SetSemiSyncEnabled(context.Background(), false, true) + assert.ErrorIs(t, err, ErrNoSemiSync) +} + +func TestSemiSyncEnabled(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|varchar"), "rpl_semi_sync_source_enabled|OFF", "rpl_semi_sync_replica_enabled|ON")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + p, r := testMysqld.SemiSyncEnabled(context.Background()) + assert.False(t, p) + assert.True(t, r) +} + +func TestSemiSyncStatus(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|varchar"), "rpl_semi_sync_source_enabled|ON", "rpl_semi_sync_replica_enabled|ON")) + db.AddQuery("SHOW STATUS LIKE 'Rpl_semi_sync_%_status'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|varchar"), "Rpl_semi_sync_source_status|ON", "Rpl_semi_sync_replica_status|OFF")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + p, r := testMysqld.SemiSyncStatus(context.Background()) + assert.True(t, p) + assert.False(t, r) +} + +func TestSemiSyncClients(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|varchar"), "rpl_semi_sync_source_enabled|ON", "rpl_semi_sync_replica_enabled|ON")) + db.AddQuery("SHOW STATUS LIKE 'Rpl_semi_sync_source_clients'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|uint64"), "val1|12")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + res := testMysqld.SemiSyncClients(context.Background()) + assert.Equal(t, uint32(12), res) +} + +func TestSemiSyncSettings(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|varchar"), "rpl_semi_sync_source_enabled|ON", "rpl_semi_sync_replica_enabled|ON")) + db.AddQuery("SHOW VARIABLES LIKE 'rpl_semi_sync_%'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|uint64"), "rpl_semi_sync_source_timeout|123", "rpl_semi_sync_source_wait_for_replica_count|80")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + timeout, replicas := testMysqld.SemiSyncSettings(context.Background()) + assert.Equal(t, uint64(123), timeout) + assert.Equal(t, uint32(80), replicas) +} + +func TestSemiSyncReplicationStatus(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|varchar"), "rpl_semi_sync_source_enabled|ON", "rpl_semi_sync_replica_enabled|ON")) + db.AddQuery("SHOW STATUS LIKE 'rpl_semi_sync_replica_status'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|uint64"), "rpl_semi_sync_replica_status|ON")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + res, err := testMysqld.SemiSyncReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.True(t, res) + + db.AddQuery("SHOW STATUS LIKE 'rpl_semi_sync_replica_status'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|uint64"), "rpl_semi_sync_replica_status|OFF")) + + res, err = testMysqld.SemiSyncReplicationStatus(context.Background()) + assert.NoError(t, err) + assert.False(t, res) +} + +func TestSemiSyncExtensionLoaded(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + db.AddQuery("SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'", sqltypes.MakeTestResult(sqltypes.MakeTestFields("field1|field2", "varchar|varchar"), "rpl_semi_sync_source_enabled|ON", "rpl_semi_sync_replica_enabled|ON")) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + res, err := testMysqld.SemiSyncExtensionLoaded(ctx) + assert.NoError(t, err) + assert.Contains(t, []mysql.SemiSyncType{mysql.SemiSyncTypeSource, mysql.SemiSyncTypeMaster}, res) + + db.AddQuery("SHOW VARIABLES LIKE 'rpl_semi_sync_%_enabled'", &sqltypes.Result{}) + + res, err = testMysqld.SemiSyncExtensionLoaded(ctx) + assert.NoError(t, err) + assert.Equal(t, mysql.SemiSyncTypeOff, res) +} diff --git a/go/vt/mysqlctl/s3backupstorage/s3.go b/go/vt/mysqlctl/s3backupstorage/s3.go index ef3bfc37b31..cdc4e355d45 100644 --- a/go/vt/mysqlctl/s3backupstorage/s3.go +++ b/go/vt/mysqlctl/s3backupstorage/s3.go @@ -281,10 +281,21 @@ func (s3ServerSideEncryption *S3ServerSideEncryption) reset() { // S3BackupStorage implements the backupstorage.BackupStorage interface. type S3BackupStorage struct { - _client *s3.S3 - mu sync.Mutex - s3SSE S3ServerSideEncryption - params backupstorage.Params + _client *s3.S3 + mu sync.Mutex + s3SSE S3ServerSideEncryption + params backupstorage.Params + transport *http.Transport +} + +func newS3BackupStorage() *S3BackupStorage { + // This initialises a new transport based off http.DefaultTransport the first time and returns the same + // transport on subsequent calls so connections can be reused as part of the same transport. + tlsClientConf := &tls.Config{InsecureSkipVerify: tlsSkipVerifyCert} + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = tlsClientConf + + return &S3BackupStorage{params: backupstorage.NoParams(), transport: transport} } // ListBackups is part of the backupstorage.BackupStorage interface. @@ -424,7 +435,7 @@ func (bs *S3BackupStorage) Close() error { } func (bs *S3BackupStorage) WithParams(params backupstorage.Params) backupstorage.BackupStorage { - return &S3BackupStorage{params: params} + return &S3BackupStorage{params: params, transport: bs.transport} } var _ backupstorage.BackupStorage = (*S3BackupStorage)(nil) @@ -445,9 +456,7 @@ func (bs *S3BackupStorage) client() (*s3.S3, error) { if bs._client == nil { logLevel := getLogLevel() - tlsClientConf := &tls.Config{InsecureSkipVerify: tlsSkipVerifyCert} - httpTransport := &http.Transport{TLSClientConfig: tlsClientConf} - httpClient := &http.Client{Transport: httpTransport} + httpClient := &http.Client{Transport: bs.transport} session, err := session.NewSession() if err != nil { @@ -497,7 +506,7 @@ func objName(parts ...string) *string { } func init() { - backupstorage.BackupStorageMap["s3"] = &S3BackupStorage{params: backupstorage.NoParams()} + backupstorage.BackupStorageMap["s3"] = newS3BackupStorage() logNameMap = logNameToLogLevel{ "LogOff": aws.LogOff, diff --git a/go/vt/mysqlctl/s3backupstorage/s3_test.go b/go/vt/mysqlctl/s3backupstorage/s3_test.go index a10432b78c2..6f4207a645f 100644 --- a/go/vt/mysqlctl/s3backupstorage/s3_test.go +++ b/go/vt/mysqlctl/s3backupstorage/s3_test.go @@ -276,3 +276,23 @@ func TestSSECustomerFileBase64Key(t *testing.T) { assert.Nil(t, sseData.customerKey, "customerKey expected to be nil") assert.Nil(t, sseData.customerMd5, "customerMd5 expected to be nil") } + +func TestNewS3Transport(t *testing.T) { + s3 := newS3BackupStorage() + + // checking some of the values are present in the returned transport and match the http.DefaultTransport. + assert.Equal(t, http.DefaultTransport.(*http.Transport).IdleConnTimeout, s3.transport.IdleConnTimeout) + assert.Equal(t, http.DefaultTransport.(*http.Transport).MaxIdleConns, s3.transport.MaxIdleConns) + assert.NotNil(t, s3.transport.DialContext) + assert.NotNil(t, s3.transport.Proxy) +} + +func TestWithParams(t *testing.T) { + bases3 := newS3BackupStorage() + s3 := bases3.WithParams(backupstorage.Params{}).(*S3BackupStorage) + // checking some of the values are present in the returned transport and match the http.DefaultTransport. + assert.Equal(t, http.DefaultTransport.(*http.Transport).IdleConnTimeout, s3.transport.IdleConnTimeout) + assert.Equal(t, http.DefaultTransport.(*http.Transport).MaxIdleConns, s3.transport.MaxIdleConns) + assert.NotNil(t, s3.transport.DialContext) + assert.NotNil(t, s3.transport.Proxy) +} diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index 6f1c7c19570..2953ddc3949 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -66,12 +66,6 @@ func (mysqld *Mysqld) executeSchemaCommands(ctx context.Context, sql string) err return mysqld.executeMysqlScript(ctx, params, sql) } -func encodeEntityName(name string) string { - var buf strings.Builder - sqltypes.NewVarChar(name).EncodeSQL(&buf) - return buf.String() -} - // tableListSQL returns an IN clause "('t1', 't2'...) for a list of tables." func tableListSQL(tables []string) (string, error) { if len(tables) == 0 { @@ -80,7 +74,7 @@ func tableListSQL(tables []string) (string, error) { encodedTables := make([]string, len(tables)) for i, tableName := range tables { - encodedTables[i] = encodeEntityName(tableName) + encodedTables[i] = sqltypes.EncodeStringSQL(tableName) } return "(" + strings.Join(encodedTables, ", ") + ")", nil @@ -307,9 +301,13 @@ func GetColumnsList(dbName, tableName string, exec func(string, int, bool) (*sql if dbName == "" { dbName2 = "database()" } else { - dbName2 = encodeEntityName(dbName) + dbName2 = sqltypes.EncodeStringSQL(dbName) + } + sanitizedTableName, err := sqlescape.UnescapeID(tableName) + if err != nil { + return "", err } - query := fmt.Sprintf(GetColumnNamesQuery, dbName2, encodeEntityName(sqlescape.UnescapeID(tableName))) + query := fmt.Sprintf(GetColumnNamesQuery, dbName2, sqltypes.EncodeStringSQL(sanitizedTableName)) qr, err := exec(query, -1, true) if err != nil { return "", err @@ -342,9 +340,16 @@ func GetColumns(dbName, table string, exec func(string, int, bool) (*sqltypes.Re if selectColumns == "" { selectColumns = "*" } - tableSpec := sqlescape.EscapeID(sqlescape.UnescapeID(table)) + tableSpec, err := sqlescape.EnsureEscaped(table) + if err != nil { + return nil, nil, err + } if dbName != "" { - tableSpec = fmt.Sprintf("%s.%s", sqlescape.EscapeID(sqlescape.UnescapeID(dbName)), tableSpec) + dbName, err := sqlescape.EnsureEscaped(dbName) + if err != nil { + return nil, nil, err + } + tableSpec = fmt.Sprintf("%s.%s", dbName, tableSpec) } query := fmt.Sprintf(GetFieldsQuery, selectColumns, tableSpec) qr, err := exec(query, 0, true) @@ -396,7 +401,7 @@ func (mysqld *Mysqld) getPrimaryKeyColumns(ctx context.Context, dbName string, t FROM information_schema.STATISTICS WHERE TABLE_SCHEMA = %s AND TABLE_NAME IN %s AND LOWER(INDEX_NAME) = 'primary' ORDER BY table_name, SEQ_IN_INDEX` - sql = fmt.Sprintf(sql, encodeEntityName(dbName), tableList) + sql = fmt.Sprintf(sql, sqltypes.EncodeStringSQL(dbName), tableList) qr, err := conn.Conn.ExecuteFetch(sql, len(tables)*100, true) if err != nil { return nil, err @@ -531,6 +536,10 @@ func (mysqld *Mysqld) ApplySchemaChange(ctx context.Context, dbName string, chan sql = "SET sql_log_bin = 0;\n" + sql } + if change.DisableForeignKeyChecks { + sql = "SET foreign_key_checks = 0;\n" + sql + } + // add a 'use XXX' in front of the SQL sql = fmt.Sprintf("USE %s;\n%s", sqlescape.EscapeID(dbName), sql) @@ -579,13 +588,7 @@ func (mysqld *Mysqld) ApplySchemaChange(ctx context.Context, dbName string, chan // defined PRIMARY KEY then it may return the columns for // that index if it is likely the most efficient one amongst // the available PKE indexes on the table. -func (mysqld *Mysqld) GetPrimaryKeyEquivalentColumns(ctx context.Context, dbName, table string) ([]string, string, error) { - conn, err := getPoolReconnect(ctx, mysqld.dbaPool) - if err != nil { - return nil, "", err - } - defer conn.Recycle() - +func GetPrimaryKeyEquivalentColumns(ctx context.Context, exec func(string, int, bool) (*sqltypes.Result, error), dbName, table string) ([]string, string, error) { // We use column name aliases to guarantee lower case for our named results. sql := ` SELECT index_cols.COLUMN_NAME AS column_name, index_cols.INDEX_NAME as index_name FROM information_schema.STATISTICS AS index_cols INNER JOIN @@ -626,10 +629,10 @@ func (mysqld *Mysqld) GetPrimaryKeyEquivalentColumns(ctx context.Context, dbName ) AS pke ON index_cols.INDEX_NAME = pke.INDEX_NAME WHERE index_cols.TABLE_SCHEMA = %s AND index_cols.TABLE_NAME = %s AND NON_UNIQUE = 0 AND NULLABLE != 'YES' ORDER BY SEQ_IN_INDEX ASC` - encodedDbName := encodeEntityName(dbName) - encodedTable := encodeEntityName(table) + encodedDbName := sqltypes.EncodeStringSQL(dbName) + encodedTable := sqltypes.EncodeStringSQL(table) sql = fmt.Sprintf(sql, encodedDbName, encodedTable, encodedDbName, encodedTable, encodedDbName, encodedTable) - qr, err := conn.Conn.ExecuteFetch(sql, 1000, true) + qr, err := exec(sql, 1000, true) if err != nil { return nil, "", err } diff --git a/go/vt/mysqlctl/schema_test.go b/go/vt/mysqlctl/schema_test.go index fb64f8ca8ee..d73e6c13665 100644 --- a/go/vt/mysqlctl/schema_test.go +++ b/go/vt/mysqlctl/schema_test.go @@ -1,14 +1,35 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package mysqlctl import ( + "context" "fmt" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) var queryMap map[string]*sqltypes.Result @@ -103,3 +124,286 @@ func TestColumnList(t *testing.T) { require.Equal(t, `[name:"col1" type:VARCHAR]`, fmt.Sprintf("%+v", fields)) } + +func TestGetSchemaAndSchemaChange(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + + db.AddQuery("SHOW CREATE DATABASE IF NOT EXISTS `fakesqldb`", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field|cmd", "varchar|varchar"), "create_db|create_db_cmd")) + db.AddQuery("SHOW CREATE TABLE `fakesqldb`.`test_table`", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field|cmd", "varchar|varchar"), "create_table|create_table_cmd")) + + db.AddQuery("SELECT table_name, table_type, data_length, table_rows FROM information_schema.tables WHERE table_schema = 'fakesqldb' AND table_type = 'BASE TABLE'", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("table_name|table_type|data_length|table_rows", "varchar|varchar|uint64|uint64"), "test_table|test_type|NULL|2")) + + db.AddQuery("SELECT table_name, table_type, data_length, table_rows FROM information_schema.tables WHERE table_schema = 'fakesqldb'", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("table_name|table_type|data_length|table_rows", "varchar|varchar|uint64|uint64"), "test_table|test_type|NULL|2")) + + query := fmt.Sprintf(GetColumnNamesQuery, sqltypes.EncodeStringSQL(db.Name()), sqltypes.EncodeStringSQL("test_table")) + db.AddQuery(query, &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "column_name", + Type: sqltypes.VarChar, + }}, + Rows: [][]sqltypes.Value{ + {sqltypes.NewVarChar("col1")}, + {sqltypes.NewVarChar("col2")}, + }, + }) + + db.AddQuery("SELECT `col1`, `col2` FROM `fakesqldb`.`test_table` WHERE 1 != 1", &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "col1", + Type: sqltypes.VarChar, + }, + { + Name: "col2", + Type: sqltypes.VarChar, + }, + }, + Rows: [][]sqltypes.Value{}, + }) + + tableList, err := tableListSQL([]string{"test_table"}) + require.NoError(t, err) + + query = ` + SELECT TABLE_NAME as table_name, COLUMN_NAME as column_name + FROM information_schema.STATISTICS + WHERE TABLE_SCHEMA = %s AND TABLE_NAME IN %s AND LOWER(INDEX_NAME) = 'primary' + ORDER BY table_name, SEQ_IN_INDEX` + query = fmt.Sprintf(query, sqltypes.EncodeStringSQL("fakesqldb"), tableList) + db.AddQuery(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields("TABLE_NAME|COLUMN_NAME", "varchar|varchar"), "test_table|col1", "test_table|col2")) + + ctx := context.Background() + res, err := testMysqld.GetSchema(ctx, db.Name(), &tabletmanagerdata.GetSchemaRequest{}) + assert.NoError(t, err) + assert.Equal(t, res.String(), `database_schema:"create_db_cmd" table_definitions:{name:"test_table" schema:"create_table_cmd" columns:"col1" columns:"col2" type:"test_type" row_count:2 fields:{name:"col1" type:VARCHAR} fields:{name:"col2" type:VARCHAR}}`) + + // Test ApplySchemaChange + db.AddQuery("\nSET sql_log_bin = 0", &sqltypes.Result{}) + + r, err := testMysqld.ApplySchemaChange(ctx, db.Name(), &tmutils.SchemaChange{}) + assert.NoError(t, err) + assert.Equal(t, r.BeforeSchema, r.AfterSchema, "BeforeSchema should be equal to AfterSchema as no schema change was passed") + assert.Equal(t, `database_schema:"create_db_cmd" table_definitions:{name:"test_table" schema:"create_table_cmd" columns:"col1" columns:"col2" type:"test_type" row_count:2 fields:{name:"col1" type:VARCHAR} fields:{name:"col2" type:VARCHAR}}`, r.BeforeSchema.String()) + + r, err = testMysqld.ApplySchemaChange(ctx, db.Name(), &tmutils.SchemaChange{ + BeforeSchema: &tabletmanagerdata.SchemaDefinition{ + DatabaseSchema: "create_db_cmd", + TableDefinitions: []*tabletmanagerdata.TableDefinition{ + { + Name: "test_table_changed", + Schema: "create_table_cmd", + Type: "test_type", + }, + }, + }, + AfterSchema: &tabletmanagerdata.SchemaDefinition{ + DatabaseSchema: "create_db_cmd", + TableDefinitions: []*tabletmanagerdata.TableDefinition{ + { + Name: "test_table", + Schema: "create_table_cmd", + Type: "test_type", + }, + }, + }, + }) + assert.NoError(t, err) + assert.Equal(t, r.BeforeSchema, r.AfterSchema) + + r, err = testMysqld.ApplySchemaChange(ctx, db.Name(), &tmutils.SchemaChange{ + BeforeSchema: &tabletmanagerdata.SchemaDefinition{ + DatabaseSchema: "create_db_cmd", + TableDefinitions: []*tabletmanagerdata.TableDefinition{ + { + Name: "test_table", + Schema: "create_table_cmd", + Type: "test_type", + }, + }, + }, + SQL: "EXPECT THIS QUERY TO BE EXECUTED;\n", + }) + assert.ErrorContains(t, err, "EXPECT THIS QUERY TO BE EXECUTED") + assert.Nil(t, r) + + // Test PreflightSchemaChange + db.AddQuery("SET sql_log_bin = 0", &sqltypes.Result{}) + db.AddQuery("\nDROP DATABASE IF EXISTS _vt_preflight", &sqltypes.Result{}) + db.AddQuery("\nCREATE DATABASE _vt_preflight", &sqltypes.Result{}) + db.AddQuery("\nUSE _vt_preflight", &sqltypes.Result{}) + db.AddQuery("\nSET foreign_key_checks = 0", &sqltypes.Result{}) + db.AddQuery("\nDROP DATABASE _vt_preflight", &sqltypes.Result{}) + + l, err := testMysqld.PreflightSchemaChange(context.Background(), db.Name(), []string{}) + assert.NoError(t, err) + assert.Empty(t, l) + + db.AddQuery("SHOW CREATE DATABASE IF NOT EXISTS `_vt_preflight`", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field|cmd", "varchar|varchar"), "create_db|create_db_cmd")) + + db.AddQuery("SELECT table_name, table_type, data_length, table_rows FROM information_schema.tables WHERE table_schema = '_vt_preflight' AND table_type = 'BASE TABLE'", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("table_name|table_type|data_length|table_rows", "varchar|varchar|uint64|uint64"), "test_table|test_type|NULL|2")) + db.AddQuery("SELECT table_name, table_type, data_length, table_rows FROM information_schema.tables WHERE table_schema = '_vt_preflight'", sqltypes.MakeTestResult( + sqltypes.MakeTestFields("table_name|table_type|data_length|table_rows", "varchar|varchar|uint64|uint64"), "test_table|test_type|NULL|2")) + db.AddQuery("SHOW CREATE TABLE `_vt_preflight`.`test_table`", sqltypes.MakeTestResult(sqltypes.MakeTestFields("test_field|cmd", "varchar|varchar"), "create_table|create_table_cmd")) + + query = ` + SELECT TABLE_NAME as table_name, COLUMN_NAME as column_name + FROM information_schema.STATISTICS + WHERE TABLE_SCHEMA = %s AND TABLE_NAME IN %s AND LOWER(INDEX_NAME) = 'primary' + ORDER BY table_name, SEQ_IN_INDEX` + query = fmt.Sprintf(query, sqltypes.EncodeStringSQL("_vt_preflight"), tableList) + db.AddQuery(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields("TABLE_NAME|COLUMN_NAME", "varchar|varchar"), "test_table|col1", "test_table|col2")) + + query = fmt.Sprintf(GetColumnNamesQuery, sqltypes.EncodeStringSQL("_vt_preflight"), sqltypes.EncodeStringSQL("test_table")) + db.AddQuery(query, &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "column_name", + Type: sqltypes.VarChar, + }}, + Rows: [][]sqltypes.Value{ + {sqltypes.NewVarChar("col1")}, + {sqltypes.NewVarChar("col2")}, + }, + }) + + db.AddQuery("SELECT `col1`, `col2` FROM `_vt_preflight`.`test_table` WHERE 1 != 1", &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "col1", + Type: sqltypes.VarChar, + }, + { + Name: "col2", + Type: sqltypes.VarChar, + }, + }, + Rows: [][]sqltypes.Value{}, + }) + + query = "EXPECT THIS QUERY TO BE EXECUTED" + _, err = testMysqld.PreflightSchemaChange(context.Background(), db.Name(), []string{query}) + assert.ErrorContains(t, err, query) +} + +func TestResolveTables(t *testing.T) { + db := fakesqldb.New(t) + testMysqld := NewFakeMysqlDaemon(db) + + defer func() { + db.Close() + testMysqld.Close() + }() + + ctx := context.Background() + res, err := ResolveTables(ctx, testMysqld, db.Name(), []string{}) + assert.ErrorContains(t, err, "no schema defined") + assert.Nil(t, res) + + testMysqld.Schema = &tabletmanagerdata.SchemaDefinition{TableDefinitions: tableDefinitions{{ + Name: "table1", + Schema: "schema1", + }, { + Name: "table2", + Schema: "schema2", + }}} + + res, err = ResolveTables(ctx, testMysqld, db.Name(), []string{"table1"}) + assert.NoError(t, err) + assert.Len(t, res, 1) + + res, err = ResolveTables(ctx, testMysqld, db.Name(), []string{"table1", "table2"}) + assert.NoError(t, err) + assert.Len(t, res, 2) +} + +func TestGetColumns(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, db.Name()) + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + + tableName := "test_table" + query := fmt.Sprintf(GetColumnNamesQuery, sqltypes.EncodeStringSQL(db.Name()), sqltypes.EncodeStringSQL(tableName)) + db.AddQuery(query, &sqltypes.Result{ + Fields: []*querypb.Field{{ + Name: "column_name", + Type: sqltypes.VarChar, + }}, + Rows: [][]sqltypes.Value{ + {sqltypes.NewVarChar("col1")}, + {sqltypes.NewVarChar("col2")}, + }, + }) + db.AddQuery("SELECT `col1`, `col2` FROM `fakesqldb`.`test_table` WHERE 1 != 1", &sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "col1", + Type: sqltypes.VarChar, + }, + { + Name: "col2", + Type: sqltypes.VarChar, + }, + }, + Rows: [][]sqltypes.Value{}, + }) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + ctx := context.Background() + + want := sqltypes.MakeTestFields("col1|col2", "varchar|varchar") + + field, cols, err := testMysqld.GetColumns(ctx, db.Name(), tableName) + assert.Equal(t, want, field) + assert.Equal(t, []string{"col1", "col2"}, cols) + assert.NoError(t, err) +} + +func TestGetPrimaryKeyColumns(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + cp := *params + dbc := dbconfigs.NewTestDBConfigs(cp, cp, db.Name()) + + db.AddQuery("SELECT 1", &sqltypes.Result{}) + + testMysqld := NewMysqld(dbc) + defer testMysqld.Close() + + tableList, err := tableListSQL([]string{"test_table"}) + require.NoError(t, err) + + query := ` + SELECT TABLE_NAME as table_name, COLUMN_NAME as column_name + FROM information_schema.STATISTICS + WHERE TABLE_SCHEMA = %s AND TABLE_NAME IN %s AND LOWER(INDEX_NAME) = 'primary' + ORDER BY table_name, SEQ_IN_INDEX` + query = fmt.Sprintf(query, sqltypes.EncodeStringSQL("fakesqldb"), tableList) + db.AddQuery(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|column_name", "varchar|varchar"), "fakesqldb|col1", "fakesqldb2|col2")) + + ctx := context.Background() + res, err := testMysqld.GetPrimaryKeyColumns(ctx, db.Name(), "test_table") + assert.NoError(t, err) + assert.Contains(t, res, "col1") + assert.Len(t, res, 1) +} diff --git a/go/vt/mysqlctl/tmutils/schema.go b/go/vt/mysqlctl/tmutils/schema.go index aae529f89b0..fe217acd8a6 100644 --- a/go/vt/mysqlctl/tmutils/schema.go +++ b/go/vt/mysqlctl/tmutils/schema.go @@ -40,31 +40,6 @@ const ( TableView = "VIEW" ) -// TableDefinitionGetColumn returns the index of a column inside a -// TableDefinition. -func TableDefinitionGetColumn(td *tabletmanagerdatapb.TableDefinition, name string) (index int, ok bool) { - lowered := strings.ToLower(name) - for i, n := range td.Columns { - if lowered == strings.ToLower(n) { - return i, true - } - } - return -1, false -} - -// TableDefinitions is a list of TableDefinition, for sorting -type TableDefinitions []*tabletmanagerdatapb.TableDefinition - -// Len returns TableDefinitions length. -func (tds TableDefinitions) Len() int { - return len(tds) -} - -// Swap used for sorting TableDefinitions. -func (tds TableDefinitions) Swap(i, j int) { - tds[i], tds[j] = tds[j], tds[i] -} - // TableFilter is a filter for table names and types. type TableFilter struct { includeViews bool @@ -325,12 +300,13 @@ func DiffSchemaToArray(leftName string, left *tabletmanagerdatapb.SchemaDefiniti // SchemaChange contains all necessary information to apply a schema change. // It should not be sent over the wire, it's just a set of parameters. type SchemaChange struct { - SQL string - Force bool - AllowReplication bool - BeforeSchema *tabletmanagerdatapb.SchemaDefinition - AfterSchema *tabletmanagerdatapb.SchemaDefinition - SQLMode string + SQL string + Force bool + AllowReplication bool + BeforeSchema *tabletmanagerdatapb.SchemaDefinition + AfterSchema *tabletmanagerdatapb.SchemaDefinition + SQLMode string + DisableForeignKeyChecks bool } // Equal compares two SchemaChange objects. diff --git a/go/vt/mysqlctl/utils.go b/go/vt/mysqlctl/utils.go deleted file mode 100644 index cc34be6abfe..00000000000 --- a/go/vt/mysqlctl/utils.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysqlctl - -import ( - "vitess.io/vitess/go/vt/log" -) - -type MapFunc func(index int) error - -// ConcurrentMap applies fun in a concurrent manner on integers from 0 -// to n-1 (they are assumed to be indexes of some slice containing -// items to be processed). The first error returned by a fun -// application will returned (subsequent errors will only be -// logged). It will use concurrency goroutines. -func ConcurrentMap(concurrency, n int, fun MapFunc) error { - errors := make(chan error) - work := make(chan int, n) - - for i := 0; i < n; i++ { - work <- i - } - close(work) - - for j := 0; j < concurrency; j++ { - go func() { - for i := range work { - errors <- fun(i) - } - }() - } - var err error - - for i := 0; i < n; i++ { - if e := <-errors; e != nil { - if err != nil { - log.Errorf("multiple errors, this one happened but it won't be returned: %v", err) - } - err = e - } - } - return err -} diff --git a/go/vt/mysqlctl/utils_test.go b/go/vt/mysqlctl/utils_test.go deleted file mode 100644 index 0fdcae92bfa..00000000000 --- a/go/vt/mysqlctl/utils_test.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysqlctl - -import ( - "errors" - "testing" -) - -func TestConcurrentMap(t *testing.T) { - work := make([]int, 10) - result := make([]int, 10) - for i := 0; i < 10; i++ { - work[i] = i - } - mapFunc := func(i int) error { - result[i] = work[i] - return nil - } - if err := ConcurrentMap(2, 10, mapFunc); err != nil { - t.Errorf("Unexpected error: %v", err) - } - - for i := 0; i < 10; i++ { - if got, expected := result[i], work[i]; got != expected { - t.Errorf("Wrong values in result: got %v, expected %v", got, expected) - } - } - fooErr := errors.New("foo") - if err := ConcurrentMap(2, 10, func(i int) error { return fooErr }); err != fooErr { - t.Errorf("Didn't get expected error: %v", err) - } -} diff --git a/go/vt/mysqlctl/version.go b/go/vt/mysqlctl/version.go index aa454319573..3bdb76be3a6 100644 --- a/go/vt/mysqlctl/version.go +++ b/go/vt/mysqlctl/version.go @@ -36,3 +36,7 @@ func (v *ServerVersion) atLeast(compare ServerVersion) bool { } return false } + +func (v *ServerVersion) isSameRelease(compare ServerVersion) bool { + return v.Major == compare.Major && v.Minor == compare.Minor +} diff --git a/go/vt/mysqlctl/xtrabackupengine.go b/go/vt/mysqlctl/xtrabackupengine.go index d11699167d9..3f8491fdfb6 100644 --- a/go/vt/mysqlctl/xtrabackupengine.go +++ b/go/vt/mysqlctl/xtrabackupengine.go @@ -167,27 +167,27 @@ func closeFile(wc io.WriteCloser, fileName string, logger logutil.Logger, finalE } // ExecuteBackup runs a backup based on given params. This could be a full or incremental backup. -// The function returns a boolean that indicates if the backup is usable, and an overall error. -func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (bool, error) { +// The function returns a BackupResult that indicates the usability of the backup, and an overall error. +func (be *XtrabackupEngine) ExecuteBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (BackupResult, error) { params.Logger.Infof("Executing Backup at %v for keyspace/shard %v/%v on tablet %v, concurrency: %v, compress: %v, incrementalFromPos: %v", params.BackupTime, params.Keyspace, params.Shard, params.TabletAlias, params.Concurrency, backupStorageCompress, params.IncrementalFromPos) return be.executeFullBackup(ctx, params, bh) } -// executeFullBackup returns a boolean that indicates if the backup is usable, +// executeFullBackup returns a BackupResult that indicates the usability of the backup, // and an overall error. -func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (complete bool, finalErr error) { +func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params BackupParams, bh backupstorage.BackupHandle) (backupResult BackupResult, finalErr error) { if params.IncrementalFromPos != "" { - return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "incremental backups not supported in xtrabackup engine.") + return BackupUnusable, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "incremental backups not supported in xtrabackup engine.") } if xtrabackupUser == "" { - return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "xtrabackupUser must be specified.") + return BackupUnusable, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "xtrabackupUser must be specified.") } // an extension is required when using an external compressor if backupStorageCompress && ExternalCompressorCmd != "" && ExternalCompressorExt == "" { - return false, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, + return BackupUnusable, vterrors.New(vtrpc.Code_INVALID_ARGUMENT, "flag --external-compressor-extension not provided when using an external compressor") } @@ -198,20 +198,20 @@ func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params Backup } if err != nil { - return false, vterrors.Wrap(err, "unable to obtain a connection to the database") + return BackupUnusable, vterrors.Wrap(err, "unable to obtain a connection to the database") } pos, err := conn.PrimaryPosition() if err != nil { - return false, vterrors.Wrap(err, "unable to obtain primary position") + return BackupUnusable, vterrors.Wrap(err, "unable to obtain primary position") } serverUUID, err := conn.GetServerUUID() if err != nil { - return false, vterrors.Wrap(err, "can't get server uuid") + return BackupUnusable, vterrors.Wrap(err, "can't get server uuid") } mysqlVersion, err := params.Mysqld.GetVersionString(ctx) if err != nil { - return false, vterrors.Wrap(err, "can't get MySQL version") + return BackupUnusable, vterrors.Wrap(err, "can't get MySQL version") } flavor := pos.GTIDSet.Flavor() @@ -229,14 +229,14 @@ func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params Backup params.Logger.Infof("Starting backup with %v stripe(s)", numStripes) replicationPosition, err := be.backupFiles(ctx, params, bh, backupFileName, numStripes, flavor) if err != nil { - return false, err + return BackupUnusable, err } // open the MANIFEST params.Logger.Infof("Writing backup MANIFEST") mwc, err := bh.AddFile(ctx, backupManifestFileName, backupstorage.FileSizeUnknown) if err != nil { - return false, vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) + return BackupUnusable, vterrors.Wrapf(err, "cannot add %v to backup", backupManifestFileName) } defer closeFile(mwc, backupManifestFileName, params.Logger, &finalErr) @@ -244,6 +244,7 @@ func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params Backup bm := &xtraBackupManifest{ // Common base fields BackupManifest: BackupManifest{ + BackupName: bh.Name(), BackupMethod: xtrabackupEngineName, Position: replicationPosition, PurgedPosition: replicationPosition, @@ -273,14 +274,14 @@ func (be *XtrabackupEngine) executeFullBackup(ctx context.Context, params Backup data, err := json.MarshalIndent(bm, "", " ") if err != nil { - return false, vterrors.Wrapf(err, "cannot JSON encode %v", backupManifestFileName) + return BackupUnusable, vterrors.Wrapf(err, "cannot JSON encode %v", backupManifestFileName) } if _, err := mwc.Write([]byte(data)); err != nil { - return false, vterrors.Wrapf(err, "cannot write %v", backupManifestFileName) + return BackupUnusable, vterrors.Wrapf(err, "cannot write %v", backupManifestFileName) } params.Logger.Infof("Backup completed") - return true, nil + return BackupUsable, nil } func (be *XtrabackupEngine) backupFiles( @@ -484,7 +485,7 @@ func (be *XtrabackupEngine) ExecuteRestore(ctx context.Context, params RestorePa return nil, err } - if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger); err != nil { + if err := prepareToRestore(ctx, params.Cnf, params.Mysqld, params.Logger, params.MysqlShutdownTimeout); err != nil { return nil, err } diff --git a/go/vt/mysqlctl/xtrabackupengine_test.go b/go/vt/mysqlctl/xtrabackupengine_test.go index 7a829ce4ba0..f560833d278 100644 --- a/go/vt/mysqlctl/xtrabackupengine_test.go +++ b/go/vt/mysqlctl/xtrabackupengine_test.go @@ -18,8 +18,8 @@ package mysqlctl import ( "bytes" + "crypto/rand" "io" - "math/rand" "testing" "github.com/stretchr/testify/assert" @@ -35,7 +35,7 @@ func TestFindReplicationPosition(t *testing.T) { 557def0a-b368-11e9-84ed-f6fffd91cc57:1-3, 599ef589-ae55-11e9-9688-ca1f44501925:1-14857169, b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262' - MySQL slave binlog position: master host '10.128.0.43', purge list '145e508e-ae54-11e9-8ce6-46824dd1815e:1-3, 1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3, 47b59de1-b368-11e9-b48b-624401d35560:1-152981, 557def0a-b368-11e9-84ed-f6fffd91cc57:1-3, 599ef589-ae55-11e9-9688-ca1f44501925:1-14857169, b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262', channel name: '' + MySQL replica binlog position: master host '10.128.0.43', purge list '145e508e-ae54-11e9-8ce6-46824dd1815e:1-3, 1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3, 47b59de1-b368-11e9-b48b-624401d35560:1-152981, 557def0a-b368-11e9-84ed-f6fffd91cc57:1-3, 599ef589-ae55-11e9-9688-ca1f44501925:1-14857169, b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262', channel name: '' 190809 00:15:44 [00] Streaming 190809 00:15:44 [00] ...done @@ -46,12 +46,8 @@ func TestFindReplicationPosition(t *testing.T) { want := "145e508e-ae54-11e9-8ce6-46824dd1815e:1-3,1e51f8be-ae54-11e9-a7c6-4280a041109b:1-3,47b59de1-b368-11e9-b48b-624401d35560:1-152981,557def0a-b368-11e9-84ed-f6fffd91cc57:1-3,599ef589-ae55-11e9-9688-ca1f44501925:1-14857169,b9ce485d-b36b-11e9-9b17-2a6e0a6011f4:1-371262" pos, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger()) - if err != nil { - t.Fatalf("findReplicationPosition error: %v", err) - } - if got := pos.String(); got != want { - t.Errorf("findReplicationPosition() = %v; want %v", got, want) - } + assert.NoError(t, err) + assert.Equal(t, want, pos.String()) } func TestFindReplicationPositionNoMatch(t *testing.T) { @@ -59,9 +55,7 @@ func TestFindReplicationPositionNoMatch(t *testing.T) { input := `nothing` _, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger()) - if err == nil { - t.Fatalf("expected error from findReplicationPosition but got nil") - } + assert.Error(t, err) } func TestFindReplicationPositionEmptyMatch(t *testing.T) { @@ -71,17 +65,14 @@ func TestFindReplicationPositionEmptyMatch(t *testing.T) { '` _, err := findReplicationPosition(input, "MySQL56", logutil.NewConsoleLogger()) - if err == nil { - t.Fatalf("expected error from findReplicationPosition but got nil") - } + assert.Error(t, err) } func TestStripeRoundTrip(t *testing.T) { - // Generate some deterministic input data. + // Generate some random input data. dataSize := int64(1000000) input := make([]byte, dataSize) - rng := rand.New(rand.NewSource(1)) - rng.Read(input) + rand.Read(input) test := func(blockSize int64, stripes int) { // Write it out striped across some buffers. @@ -97,16 +88,11 @@ func TestStripeRoundTrip(t *testing.T) { // Read it back and merge. outBuf := &bytes.Buffer{} written, err := io.Copy(outBuf, stripeReader(readers, blockSize)) - if err != nil { - t.Errorf("dataSize=%d, blockSize=%d, stripes=%d; copy error: %v", dataSize, blockSize, stripes, err) - } - if written != dataSize { - t.Errorf("dataSize=%d, blockSize=%d, stripes=%d; copy error: wrote %d total bytes instead of dataSize", dataSize, blockSize, stripes, written) - } + assert.NoError(t, err) + assert.Equal(t, dataSize, written) + output := outBuf.Bytes() - if !bytes.Equal(input, output) { - t.Errorf("output bytes are not the same as input") - } + assert.Equal(t, input, output) } // Test block size that evenly divides data size. diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index c0a4bd08860..5698e690f0d 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: binlogdata.proto @@ -1007,6 +1007,8 @@ type Rule struct { // such columns need to have special transofrmation of the data, from an integral format into a // string format. e.g. the value 0 needs to be converted to '0'. ConvertIntToEnum map[string]bool `protobuf:"bytes,8,rep,name=convert_int_to_enum,json=convertIntToEnum,proto3" json:"convert_int_to_enum,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // ForceUniqueKey gives vtreamer a hint for `FORCE INDEX (...)` usage. + ForceUniqueKey string `protobuf:"bytes,9,opt,name=force_unique_key,json=forceUniqueKey,proto3" json:"force_unique_key,omitempty"` } func (x *Rule) Reset() { @@ -1097,6 +1099,13 @@ func (x *Rule) GetConvertIntToEnum() map[string]bool { return nil } +func (x *Rule) GetForceUniqueKey() string { + if x != nil { + return x.ForceUniqueKey + } + return "" +} + // Filter represents a list of ordered rules. The first // match wins. type Filter struct { @@ -1491,6 +1500,14 @@ type FieldEvent struct { Fields []*query.Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` Keyspace string `protobuf:"bytes,3,opt,name=keyspace,proto3" json:"keyspace,omitempty"` Shard string `protobuf:"bytes,4,opt,name=shard,proto3" json:"shard,omitempty"` + // Are ENUM and SET field values already mapped to strings in the ROW + // events? This allows us to transition VTGate VStream consumers from + // the pre v20 behavior of having to do this mapping themselves to the + // v20+ behavior of not having to do this anymore and to expect string + // values directly. + // NOTE: because this is the use case, this is ONLY ever set today in + // vstreams managed by the vstreamManager. + EnumSetStringValues bool `protobuf:"varint,25,opt,name=enum_set_string_values,json=enumSetStringValues,proto3" json:"enum_set_string_values,omitempty"` } func (x *FieldEvent) Reset() { @@ -1553,6 +1570,13 @@ func (x *FieldEvent) GetShard() string { return "" } +func (x *FieldEvent) GetEnumSetStringValues() bool { + if x != nil { + return x.EnumSetStringValues + } + return false +} + // ShardGtid contains the GTID position for one shard. // It's used in a request for requesting a starting position. // It's used in a response to transmit the current position @@ -3056,7 +3080,7 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x43, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x6f, 0x43, 0x68, 0x61, - 0x72, 0x73, 0x65, 0x74, 0x22, 0xdf, 0x05, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x14, 0x0a, + 0x72, 0x73, 0x65, 0x74, 0x22, 0x89, 0x06, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x14, 0x63, @@ -3087,280 +3111,247 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x26, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x54, 0x6f, 0x45, 0x6e, 0x75, 0x6d, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, - 0x49, 0x6e, 0x74, 0x54, 0x6f, 0x45, 0x6e, 0x75, 0x6d, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, - 0x76, 0x65, 0x72, 0x74, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x6f, 0x54, 0x65, 0x78, 0x74, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x60, 0x0a, 0x13, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x72, 0x73, 0x65, - 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x54, - 0x6f, 0x45, 0x6e, 0x75, 0x6d, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, 0x01, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x12, 0x26, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, - 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x10, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, - 0x22, 0x36, 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x6f, - 0x64, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, - 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x45, 0x53, 0x54, 0x5f, - 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x01, 0x22, 0xea, 0x03, 0x0a, 0x0c, 0x42, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, - 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x12, 0x26, - 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, - 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, - 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, 0xc6, 0x01, 0x0a, 0x09, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, - 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x20, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, - 0x6f, 0x77, 0x52, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0c, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x52, 0x0b, 0x64, - 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x42, 0x69, - 0x74, 0x6d, 0x61, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, - 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xa9, - 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, - 0x77, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x0a, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x22, 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x67, 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x5f, - 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, - 0x4b, 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x4b, 0x73, 0x22, 0x3f, 0x0a, 0x05, 0x56, - 0x47, 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, - 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, - 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x0d, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, + 0x49, 0x6e, 0x74, 0x54, 0x6f, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x4b, 0x65, 0x79, 0x1a, 0x44, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x45, 0x6e, + 0x75, 0x6d, 0x54, 0x6f, 0x54, 0x65, 0x78, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x60, 0x0a, 0x13, 0x43, 0x6f, 0x6e, + 0x76, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x43, + 0x6f, 0x6e, 0x76, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x54, 0x6f, 0x45, 0x6e, 0x75, 0x6d, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xff, 0x01, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, + 0x6c, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, + 0x52, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x36, 0x0a, 0x0e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x13, 0x0a, 0x0f, + 0x45, 0x52, 0x52, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, + 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, + 0x10, 0x01, 0x22, 0xea, 0x03, 0x0a, 0x0c, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x09, + 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x2e, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, + 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, + 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, + 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, + 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x22, + 0xc6, 0x01, 0x0a, 0x09, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x22, 0x0a, + 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x12, 0x20, 0x0a, 0x05, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x05, 0x61, 0x66, + 0x74, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x63, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x2e, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x43, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x73, 0x1a, 0x32, 0x0a, 0x06, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0xa9, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x77, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x68, 0x61, 0x6e, + 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, + 0x6c, 0x61, 0x67, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x0a, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x6e, 0x75, 0x6d, + 0x53, 0x65, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, + 0x88, 0x01, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, - 0xbc, 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, 0x0e, 0x6d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, - 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0b, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, - 0x74, 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, - 0x61, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, - 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x8b, - 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, - 0x72, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, - 0x74, 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x07, 0x6a, 0x6f, - 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, - 0x52, 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6d, 0x6c, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6d, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3c, - 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, - 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, + 0x74, 0x69, 0x64, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x70, 0x5f, 0x6b, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, + 0x52, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x4b, 0x73, 0x22, 0x3f, 0x0a, 0x05, 0x56, 0x47, + 0x74, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, + 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, + 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x73, 0x22, 0x41, 0x0a, 0x0d, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, - 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x18, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, - 0x0c, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, - 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x5f, 0x6b, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x70, 0x4b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x41, 0x0a, 0x0d, - 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, - 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, - 0xc7, 0x02, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, - 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, - 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, - 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, - 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x12, 0x56, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, - 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, - 0x22, 0xf9, 0x01, 0x0a, 0x13, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, - 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, - 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, - 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, - 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, - 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, - 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x22, 0xc5, 0x01, 0x0a, - 0x14, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, - 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, - 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, - 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x22, 0xde, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, - 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, - 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, - 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, - 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0x69, 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, - 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, - 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, - 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, - 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, - 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xbc, + 0x02, 0x0a, 0x07, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x40, 0x0a, 0x0e, 0x6d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x6d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, + 0x63, 0x61, 0x6c, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x0b, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x5f, 0x67, 0x74, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x47, 0x74, 0x69, 0x64, 0x52, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x47, 0x74, + 0x69, 0x64, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, + 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, + 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x8b, 0x04, + 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x09, 0x72, 0x6f, 0x77, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x72, + 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x12, 0x27, 0x0a, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x47, 0x74, + 0x69, 0x64, 0x52, 0x05, 0x76, 0x67, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x07, 0x6a, 0x6f, 0x75, + 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, + 0x07, 0x6a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6d, 0x6c, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6d, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3c, 0x0a, + 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0b, + 0x6c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1c, 0x0a, + 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0x8d, 0x01, 0x0a, 0x0c, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b, 0x43, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x5f, 0x6b, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x70, 0x4b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x41, 0x0a, 0x0d, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, + 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xc7, + 0x02, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, + 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, + 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, + 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x12, 0x56, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, + 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, + 0xf9, 0x01, 0x0a, 0x13, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, + 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, + 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, + 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, + 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, + 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, + 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x22, 0xc5, 0x01, 0x0a, 0x14, + 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, @@ -3372,61 +3363,100 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, - 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, - 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, - 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, - 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x45, 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, - 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, - 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, - 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, - 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, - 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, - 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x2a, 0x44, 0x0a, 0x1b, 0x56, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, - 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, - 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x10, 0x02, - 0x2a, 0x71, 0x0a, 0x19, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, - 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x6e, - 0x69, 0x74, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x10, - 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x6f, 0x70, 0x79, 0x69, 0x6e, 0x67, 0x10, 0x03, 0x12, 0x0b, - 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x4c, 0x61, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x10, 0x06, 0x2a, 0x8d, 0x02, 0x0a, 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, - 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, - 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, - 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, - 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, - 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, - 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, - 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, - 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, - 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, - 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49, 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, - 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, - 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, - 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, - 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, - 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, - 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, - 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x67, 0x65, 0x74, 0x22, 0xde, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, + 0x67, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, + 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, + 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, + 0x73, 0x74, 0x70, 0x6b, 0x22, 0x69, 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, + 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, + 0x4b, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, + 0x58, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, + 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, + 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, + 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, + 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, + 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, + 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, + 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, + 0x01, 0x12, 0x08, 0x0a, 0x04, 0x45, 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, + 0x58, 0x45, 0x43, 0x5f, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, + 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, + 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, + 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, + 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x2a, 0x44, 0x0a, 0x1b, 0x56, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x10, 0x02, 0x2a, + 0x71, 0x0a, 0x19, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x6e, 0x69, + 0x74, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x6f, 0x70, 0x79, 0x69, 0x6e, 0x67, 0x10, 0x03, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x4c, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, + 0x10, 0x06, 0x2a, 0x8d, 0x02, 0x0a, 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, + 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, + 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, + 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, + 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, + 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, + 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, + 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, + 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, + 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, + 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49, 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, + 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, + 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, + 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, + 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, + 0x10, 0x14, 0x2a, 0x27, 0x0a, 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, + 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, + 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go index fd1fe5e459d..1332681a976 100644 --- a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go +++ b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go @@ -201,6 +201,7 @@ func (m *Rule) CloneVT() *Rule { SourceUniqueKeyColumns: m.SourceUniqueKeyColumns, TargetUniqueKeyColumns: m.TargetUniqueKeyColumns, SourceUniqueKeyTargetColumns: m.SourceUniqueKeyTargetColumns, + ForceUniqueKey: m.ForceUniqueKey, } if rhs := m.ConvertEnumToText; rhs != nil { tmpContainer := make(map[string]string, len(rhs)) @@ -370,9 +371,10 @@ func (m *FieldEvent) CloneVT() *FieldEvent { return (*FieldEvent)(nil) } r := &FieldEvent{ - TableName: m.TableName, - Keyspace: m.Keyspace, - Shard: m.Shard, + TableName: m.TableName, + Keyspace: m.Keyspace, + Shard: m.Shard, + EnumSetStringValues: m.EnumSetStringValues, } if rhs := m.Fields; rhs != nil { tmpContainer := make([]*query.Field, len(rhs)) @@ -1298,6 +1300,13 @@ func (m *Rule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.ForceUniqueKey) > 0 { + i -= len(m.ForceUniqueKey) + copy(dAtA[i:], m.ForceUniqueKey) + i = encodeVarint(dAtA, i, uint64(len(m.ForceUniqueKey))) + i-- + dAtA[i] = 0x4a + } if len(m.ConvertIntToEnum) > 0 { for k := range m.ConvertIntToEnum { v := m.ConvertIntToEnum[k] @@ -1794,6 +1803,18 @@ func (m *FieldEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.EnumSetStringValues { + i-- + if m.EnumSetStringValues { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } if len(m.Shard) > 0 { i -= len(m.Shard) copy(dAtA[i:], m.Shard) @@ -3362,6 +3383,10 @@ func (m *Rule) SizeVT() (n int) { n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) } } + l = len(m.ForceUniqueKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -3543,6 +3568,9 @@ func (m *FieldEvent) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if m.EnumSetStringValues { + n += 3 + } n += len(m.unknownFields) return n } @@ -5565,6 +5593,38 @@ func (m *Rule) UnmarshalVT(dAtA []byte) error { } m.ConvertIntToEnum[mapkey] = mapvalue iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceUniqueKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForceUniqueKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -6769,6 +6829,26 @@ func (m *FieldEvent) UnmarshalVT(dAtA []byte) error { } m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnumSetStringValues", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EnumSetStringValues = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/binlogservice/binlogservice.pb.go b/go/vt/proto/binlogservice/binlogservice.pb.go index 4eac50296c1..e23504604dd 100644 --- a/go/vt/proto/binlogservice/binlogservice.pb.go +++ b/go/vt/proto/binlogservice/binlogservice.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: binlogservice.proto diff --git a/go/vt/proto/logutil/logutil.pb.go b/go/vt/proto/logutil/logutil.pb.go index b2675716168..b6273fa066d 100644 --- a/go/vt/proto/logutil/logutil.pb.go +++ b/go/vt/proto/logutil/logutil.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: logutil.proto diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index 19f70887681..4be14ec22ec 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: mysqlctl.proto @@ -190,7 +190,8 @@ type ShutdownRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - WaitForMysqld bool `protobuf:"varint,1,opt,name=wait_for_mysqld,json=waitForMysqld,proto3" json:"wait_for_mysqld,omitempty"` + WaitForMysqld bool `protobuf:"varint,1,opt,name=wait_for_mysqld,json=waitForMysqld,proto3" json:"wait_for_mysqld,omitempty"` + MysqlShutdownTimeout *vttime.Duration `protobuf:"bytes,2,opt,name=mysql_shutdown_timeout,json=mysqlShutdownTimeout,proto3" json:"mysql_shutdown_timeout,omitempty"` } func (x *ShutdownRequest) Reset() { @@ -232,6 +233,13 @@ func (x *ShutdownRequest) GetWaitForMysqld() bool { return false } +func (x *ShutdownRequest) GetMysqlShutdownTimeout() *vttime.Duration { + if x != nil { + return x.MysqlShutdownTimeout + } + return nil +} + type ShutdownResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -922,129 +930,134 @@ var file_mysqlctl_proto_rawDesc = []byte{ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x64, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x64, 0x41, 0x72, 0x67, 0x73, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0x0a, 0x0f, 0x53, 0x68, - 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, - 0x0f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x4d, - 0x79, 0x73, 0x71, 0x6c, 0x64, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, - 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc0, - 0x01, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x17, 0x62, - 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x61, - 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, - 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, - 0x65, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4e, 0x0a, 0x20, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x0f, 0x53, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, + 0x0a, 0x0f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x6d, 0x79, 0x73, 0x71, 0x6c, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, + 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x64, 0x12, 0x46, 0x0a, 0x16, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x5f, + 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x53, + 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x12, + 0x0a, 0x10, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, + 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc0, 0x01, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x17, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x17, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x72, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x15, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x44, 0x61, 0x74, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4e, 0x0a, 0x20, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xf9, 0x01, 0x0a, 0x21, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0f, 0x66, + 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x16, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x66, 0x69, 0x72, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x12, 0x33, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, + 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, + 0x15, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x61, + 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x69, 0x6e, + 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xe6, 0x02, 0x0a, + 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x4b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, + 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x10, 0x04, 0x32, 0xb0, 0x05, 0x0a, 0x08, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x43, + 0x74, 0x6c, 0x12, 0x3a, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x16, 0x2e, 0x6d, 0x79, + 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, + 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x19, 0x2e, 0x6d, 0x79, 0x73, + 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, + 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, + 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, + 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, + 0x0f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, 0x42, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x73, 0x12, 0x2a, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2a, 0x0a, 0x11, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xf9, 0x01, 0x0a, - 0x21, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x16, 0x66, 0x69, 0x72, - 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x66, 0x69, 0x72, 0x73, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x12, - 0x33, 0x0a, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x22, 0x15, 0x0a, 0x13, 0x52, 0x65, 0x69, 0x6e, - 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x16, 0x0a, 0x14, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x17, 0x0a, 0x15, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x22, 0x31, 0x0a, 0x15, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xe6, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x12, 0x20, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x04, 0x74, 0x69, - 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0x4b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x43, 0x4f, 0x4d, 0x50, - 0x4c, 0x45, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, - 0x54, 0x45, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, - 0x03, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x04, 0x32, 0xb0, 0x05, 0x0a, - 0x08, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x43, 0x74, 0x6c, 0x12, 0x3a, 0x0a, 0x05, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x16, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6d, 0x79, 0x73, - 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, - 0x6e, 0x12, 0x19, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, - 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x52, 0x75, - 0x6e, 0x4d, 0x79, 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x20, 0x2e, - 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, 0x73, 0x71, - 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x75, 0x6e, 0x4d, 0x79, - 0x73, 0x71, 0x6c, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, - 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x79, 0x73, 0x71, - 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, - 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, - 0x0a, 0x19, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, - 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x2a, 0x2e, 0x6d, 0x79, - 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, - 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, - 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, - 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, - 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x72, 0x65, - 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, - 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, - 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, - 0x27, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, - 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x1a, 0x2b, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x61, 0x64, + 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4f, 0x0a, 0x0c, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x1d, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x69, 0x6e, 0x69, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x52, 0x0a, 0x0d, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, + 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x27, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, + 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, + 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1080,37 +1093,39 @@ var file_mysqlctl_proto_goTypes = []interface{}{ (*VersionStringRequest)(nil), // 15: mysqlctl.VersionStringRequest (*VersionStringResponse)(nil), // 16: mysqlctl.VersionStringResponse (*BackupInfo)(nil), // 17: mysqlctl.BackupInfo - (*vttime.Time)(nil), // 18: vttime.Time - (*topodata.TabletAlias)(nil), // 19: topodata.TabletAlias + (*vttime.Duration)(nil), // 18: vttime.Duration + (*vttime.Time)(nil), // 19: vttime.Time + (*topodata.TabletAlias)(nil), // 20: topodata.TabletAlias } var file_mysqlctl_proto_depIdxs = []int32{ - 18, // 0: mysqlctl.ApplyBinlogFileRequest.binlog_restore_datetime:type_name -> vttime.Time - 18, // 1: mysqlctl.ReadBinlogFilesTimestampsResponse.first_timestamp:type_name -> vttime.Time - 18, // 2: mysqlctl.ReadBinlogFilesTimestampsResponse.last_timestamp:type_name -> vttime.Time - 19, // 3: mysqlctl.BackupInfo.tablet_alias:type_name -> topodata.TabletAlias - 18, // 4: mysqlctl.BackupInfo.time:type_name -> vttime.Time - 0, // 5: mysqlctl.BackupInfo.status:type_name -> mysqlctl.BackupInfo.Status - 1, // 6: mysqlctl.MysqlCtl.Start:input_type -> mysqlctl.StartRequest - 3, // 7: mysqlctl.MysqlCtl.Shutdown:input_type -> mysqlctl.ShutdownRequest - 5, // 8: mysqlctl.MysqlCtl.RunMysqlUpgrade:input_type -> mysqlctl.RunMysqlUpgradeRequest - 7, // 9: mysqlctl.MysqlCtl.ApplyBinlogFile:input_type -> mysqlctl.ApplyBinlogFileRequest - 9, // 10: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:input_type -> mysqlctl.ReadBinlogFilesTimestampsRequest - 11, // 11: mysqlctl.MysqlCtl.ReinitConfig:input_type -> mysqlctl.ReinitConfigRequest - 13, // 12: mysqlctl.MysqlCtl.RefreshConfig:input_type -> mysqlctl.RefreshConfigRequest - 15, // 13: mysqlctl.MysqlCtl.VersionString:input_type -> mysqlctl.VersionStringRequest - 2, // 14: mysqlctl.MysqlCtl.Start:output_type -> mysqlctl.StartResponse - 4, // 15: mysqlctl.MysqlCtl.Shutdown:output_type -> mysqlctl.ShutdownResponse - 6, // 16: mysqlctl.MysqlCtl.RunMysqlUpgrade:output_type -> mysqlctl.RunMysqlUpgradeResponse - 8, // 17: mysqlctl.MysqlCtl.ApplyBinlogFile:output_type -> mysqlctl.ApplyBinlogFileResponse - 10, // 18: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:output_type -> mysqlctl.ReadBinlogFilesTimestampsResponse - 12, // 19: mysqlctl.MysqlCtl.ReinitConfig:output_type -> mysqlctl.ReinitConfigResponse - 14, // 20: mysqlctl.MysqlCtl.RefreshConfig:output_type -> mysqlctl.RefreshConfigResponse - 16, // 21: mysqlctl.MysqlCtl.VersionString:output_type -> mysqlctl.VersionStringResponse - 14, // [14:22] is the sub-list for method output_type - 6, // [6:14] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 18, // 0: mysqlctl.ShutdownRequest.mysql_shutdown_timeout:type_name -> vttime.Duration + 19, // 1: mysqlctl.ApplyBinlogFileRequest.binlog_restore_datetime:type_name -> vttime.Time + 19, // 2: mysqlctl.ReadBinlogFilesTimestampsResponse.first_timestamp:type_name -> vttime.Time + 19, // 3: mysqlctl.ReadBinlogFilesTimestampsResponse.last_timestamp:type_name -> vttime.Time + 20, // 4: mysqlctl.BackupInfo.tablet_alias:type_name -> topodata.TabletAlias + 19, // 5: mysqlctl.BackupInfo.time:type_name -> vttime.Time + 0, // 6: mysqlctl.BackupInfo.status:type_name -> mysqlctl.BackupInfo.Status + 1, // 7: mysqlctl.MysqlCtl.Start:input_type -> mysqlctl.StartRequest + 3, // 8: mysqlctl.MysqlCtl.Shutdown:input_type -> mysqlctl.ShutdownRequest + 5, // 9: mysqlctl.MysqlCtl.RunMysqlUpgrade:input_type -> mysqlctl.RunMysqlUpgradeRequest + 7, // 10: mysqlctl.MysqlCtl.ApplyBinlogFile:input_type -> mysqlctl.ApplyBinlogFileRequest + 9, // 11: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:input_type -> mysqlctl.ReadBinlogFilesTimestampsRequest + 11, // 12: mysqlctl.MysqlCtl.ReinitConfig:input_type -> mysqlctl.ReinitConfigRequest + 13, // 13: mysqlctl.MysqlCtl.RefreshConfig:input_type -> mysqlctl.RefreshConfigRequest + 15, // 14: mysqlctl.MysqlCtl.VersionString:input_type -> mysqlctl.VersionStringRequest + 2, // 15: mysqlctl.MysqlCtl.Start:output_type -> mysqlctl.StartResponse + 4, // 16: mysqlctl.MysqlCtl.Shutdown:output_type -> mysqlctl.ShutdownResponse + 6, // 17: mysqlctl.MysqlCtl.RunMysqlUpgrade:output_type -> mysqlctl.RunMysqlUpgradeResponse + 8, // 18: mysqlctl.MysqlCtl.ApplyBinlogFile:output_type -> mysqlctl.ApplyBinlogFileResponse + 10, // 19: mysqlctl.MysqlCtl.ReadBinlogFilesTimestamps:output_type -> mysqlctl.ReadBinlogFilesTimestampsResponse + 12, // 20: mysqlctl.MysqlCtl.ReinitConfig:output_type -> mysqlctl.ReinitConfigResponse + 14, // 21: mysqlctl.MysqlCtl.RefreshConfig:output_type -> mysqlctl.RefreshConfigResponse + 16, // 22: mysqlctl.MysqlCtl.VersionString:output_type -> mysqlctl.VersionStringResponse + 15, // [15:23] is the sub-list for method output_type + 7, // [7:15] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_mysqlctl_proto_init() } diff --git a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go index bb2ec78e03a..fab1af2f471 100644 --- a/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl_vtproto.pb.go @@ -63,7 +63,8 @@ func (m *ShutdownRequest) CloneVT() *ShutdownRequest { return (*ShutdownRequest)(nil) } r := &ShutdownRequest{ - WaitForMysqld: m.WaitForMysqld, + WaitForMysqld: m.WaitForMysqld, + MysqlShutdownTimeout: m.MysqlShutdownTimeout.CloneVT(), } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -430,6 +431,16 @@ func (m *ShutdownRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MysqlShutdownTimeout != nil { + size, err := m.MysqlShutdownTimeout.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } if m.WaitForMysqld { i-- if m.WaitForMysqld { @@ -1085,6 +1096,10 @@ func (m *ShutdownRequest) SizeVT() (n int) { if m.WaitForMysqld { n += 2 } + if m.MysqlShutdownTimeout != nil { + l = m.MysqlShutdownTimeout.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -1487,6 +1502,42 @@ func (m *ShutdownRequest) UnmarshalVT(dAtA []byte) error { } } m.WaitForMysqld = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MysqlShutdownTimeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MysqlShutdownTimeout == nil { + m.MysqlShutdownTimeout = &vttime.Duration{} + } + if err := m.MysqlShutdownTimeout.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/query/query.pb.go b/go/vt/proto/query/query.pb.go index b8e7e4fe05e..042791a1aec 100644 --- a/go/vt/proto/query/query.pb.go +++ b/go/vt/proto/query/query.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: query.proto @@ -482,6 +482,7 @@ const ( SchemaTableType_VIEWS SchemaTableType = 0 SchemaTableType_TABLES SchemaTableType = 1 SchemaTableType_ALL SchemaTableType = 2 + SchemaTableType_UDFS SchemaTableType = 3 ) // Enum value maps for SchemaTableType. @@ -490,11 +491,13 @@ var ( 0: "VIEWS", 1: "TABLES", 2: "ALL", + 3: "UDFS", } SchemaTableType_value = map[string]int32{ "VIEWS": 0, "TABLES": 1, "ALL": 2, + "UDFS": 3, } ) @@ -5066,6 +5069,8 @@ type RealtimeStats struct { TableSchemaChanged []string `protobuf:"bytes,7,rep,name=table_schema_changed,json=tableSchemaChanged,proto3" json:"table_schema_changed,omitempty"` // view_schema_changed is to provide list of views that have schema changes detected by the tablet. ViewSchemaChanged []string `protobuf:"bytes,8,rep,name=view_schema_changed,json=viewSchemaChanged,proto3" json:"view_schema_changed,omitempty"` + // udfs_changed is used to signal that the UDFs have changed on the tablet. + UdfsChanged bool `protobuf:"varint,9,opt,name=udfs_changed,json=udfsChanged,proto3" json:"udfs_changed,omitempty"` } func (x *RealtimeStats) Reset() { @@ -5156,6 +5161,13 @@ func (x *RealtimeStats) GetViewSchemaChanged() []string { return nil } +func (x *RealtimeStats) GetUdfsChanged() bool { + if x != nil { + return x.UdfsChanged + } + return false +} + // AggregateStats contains information about the health of a group of // tablets for a Target. It is used to propagate stats from a vtgate // to another, or from the Gateway layer of a vtgate to the routing @@ -5502,20 +5514,85 @@ func (x *GetSchemaRequest) GetTableNames() []string { return nil } +// UDFInfo represents the information about a UDF. +type UDFInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Aggregating bool `protobuf:"varint,2,opt,name=aggregating,proto3" json:"aggregating,omitempty"` + ReturnType Type `protobuf:"varint,3,opt,name=return_type,json=returnType,proto3,enum=query.Type" json:"return_type,omitempty"` +} + +func (x *UDFInfo) Reset() { + *x = UDFInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_query_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UDFInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UDFInfo) ProtoMessage() {} + +func (x *UDFInfo) ProtoReflect() protoreflect.Message { + mi := &file_query_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UDFInfo.ProtoReflect.Descriptor instead. +func (*UDFInfo) Descriptor() ([]byte, []int) { + return file_query_proto_rawDescGZIP(), []int{63} +} + +func (x *UDFInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UDFInfo) GetAggregating() bool { + if x != nil { + return x.Aggregating + } + return false +} + +func (x *UDFInfo) GetReturnType() Type { + if x != nil { + return x.ReturnType + } + return Type_NULL_TYPE +} + // GetSchemaResponse is the returned value from GetSchema type GetSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // this is for the schema definition for the requested tables. + Udfs []*UDFInfo `protobuf:"bytes,1,rep,name=udfs,proto3" json:"udfs,omitempty"` + // this is for the schema definition for the requested tables and views. TableDefinition map[string]string `protobuf:"bytes,2,rep,name=table_definition,json=tableDefinition,proto3" json:"table_definition,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *GetSchemaResponse) Reset() { *x = GetSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_query_proto_msgTypes[63] + mi := &file_query_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5528,7 +5605,7 @@ func (x *GetSchemaResponse) String() string { func (*GetSchemaResponse) ProtoMessage() {} func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_query_proto_msgTypes[63] + mi := &file_query_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5541,7 +5618,14 @@ func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSchemaResponse.ProtoReflect.Descriptor instead. func (*GetSchemaResponse) Descriptor() ([]byte, []int) { - return file_query_proto_rawDescGZIP(), []int{63} + return file_query_proto_rawDescGZIP(), []int{64} +} + +func (x *GetSchemaResponse) GetUdfs() []*UDFInfo { + if x != nil { + return x.Udfs + } + return nil } func (x *GetSchemaResponse) GetTableDefinition() map[string]string { @@ -5570,7 +5654,7 @@ type StreamEvent_Statement struct { func (x *StreamEvent_Statement) Reset() { *x = StreamEvent_Statement{} if protoimpl.UnsafeEnabled { - mi := &file_query_proto_msgTypes[65] + mi := &file_query_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5583,7 +5667,7 @@ func (x *StreamEvent_Statement) String() string { func (*StreamEvent_Statement) ProtoMessage() {} func (x *StreamEvent_Statement) ProtoReflect() protoreflect.Message { - mi := &file_query_proto_msgTypes[65] + mi := &file_query_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6389,8 +6473,8 @@ var file_query_proto_rawDesc = []byte{ 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x49, 0x64, 0x22, 0x11, 0x0a, 0x0f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xf6, - 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x99, + 0x03, 0x0a, 0x0d, 0x52, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, @@ -6413,140 +6497,152 @@ var file_query_proto_rawDesc = []byte{ 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x76, 0x69, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x41, 0x67, 0x67, 0x72, - 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, - 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x75, 0x6e, - 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x6d, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4d, 0x69, - 0x6e, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x6d, 0x61, 0x78, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x4d, 0x61, 0x78, - 0x22, 0x95, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, 0x1c, 0x70, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x19, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x65, 0x72, 0x6d, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x0e, 0x72, - 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x65, 0x61, 0x6c, - 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x6c, 0x74, - 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0xae, 0x01, 0x0a, 0x13, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x64, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, - 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0c, 0x70, 0x61, 0x72, - 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x10, 0x47, 0x65, - 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, - 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xb1, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x42, 0x0a, - 0x14, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x2a, 0x92, 0x03, 0x0a, 0x09, 0x4d, 0x79, 0x53, 0x71, 0x6c, 0x46, 0x6c, 0x61, 0x67, 0x12, - 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, - 0x54, 0x5f, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, - 0x0c, 0x50, 0x52, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x02, 0x12, - 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, - 0x41, 0x47, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x50, 0x4c, 0x45, - 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x08, 0x12, 0x0d, 0x0a, 0x09, 0x42, - 0x4c, 0x4f, 0x42, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x10, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, - 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x20, 0x12, 0x11, 0x0a, - 0x0d, 0x5a, 0x45, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x4c, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x40, - 0x12, 0x10, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, - 0x80, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, - 0x80, 0x02, 0x12, 0x18, 0x0a, 0x13, 0x41, 0x55, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x43, 0x52, 0x45, - 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x04, 0x12, 0x13, 0x0a, 0x0e, - 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, - 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x45, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x10, - 0x12, 0x1a, 0x0a, 0x15, 0x4e, 0x4f, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x56, - 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x20, 0x12, 0x17, 0x0a, 0x12, - 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x57, 0x5f, 0x46, 0x4c, - 0x41, 0x47, 0x10, 0x80, 0x40, 0x12, 0x0e, 0x0a, 0x08, 0x4e, 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, - 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x13, 0x0a, 0x0d, 0x50, 0x41, 0x52, 0x54, 0x5f, 0x4b, 0x45, - 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x01, 0x12, 0x10, 0x0a, 0x0a, 0x47, 0x52, - 0x4f, 0x55, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x11, 0x0a, 0x0b, - 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x04, 0x12, - 0x11, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x43, 0x4d, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, - 0x80, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x2a, 0x6b, 0x0a, 0x04, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x08, - 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0a, 0x49, 0x53, 0x49, 0x4e, - 0x54, 0x45, 0x47, 0x52, 0x41, 0x4c, 0x10, 0x80, 0x02, 0x12, 0x0f, 0x0a, 0x0a, 0x49, 0x53, 0x55, - 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x10, 0x80, 0x04, 0x12, 0x0c, 0x0a, 0x07, 0x49, 0x53, - 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x51, 0x55, - 0x4f, 0x54, 0x45, 0x44, 0x10, 0x80, 0x10, 0x12, 0x0b, 0x0a, 0x06, 0x49, 0x53, 0x54, 0x45, 0x58, - 0x54, 0x10, 0x80, 0x20, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, - 0x10, 0x80, 0x40, 0x2a, 0xc0, 0x03, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, - 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x04, 0x49, - 0x4e, 0x54, 0x38, 0x10, 0x81, 0x02, 0x12, 0x0a, 0x0a, 0x05, 0x55, 0x49, 0x4e, 0x54, 0x38, 0x10, - 0x82, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x83, 0x02, 0x12, 0x0b, - 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x84, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, - 0x4e, 0x54, 0x32, 0x34, 0x10, 0x85, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x32, - 0x34, 0x10, 0x86, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x87, 0x02, - 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x88, 0x06, 0x12, 0x0a, 0x0a, - 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x89, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x8a, 0x06, 0x12, 0x0c, 0x0a, 0x07, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x33, - 0x32, 0x10, 0x8b, 0x08, 0x12, 0x0c, 0x0a, 0x07, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, - 0x8c, 0x08, 0x12, 0x0e, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, - 0x8d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x44, 0x41, 0x54, 0x45, 0x10, 0x8e, 0x10, 0x12, 0x09, 0x0a, - 0x04, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x8f, 0x10, 0x12, 0x0d, 0x0a, 0x08, 0x44, 0x41, 0x54, 0x45, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x90, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, - 0x91, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x49, 0x4d, 0x41, 0x4c, 0x10, 0x12, 0x12, - 0x09, 0x0a, 0x04, 0x54, 0x45, 0x58, 0x54, 0x10, 0x93, 0x30, 0x12, 0x09, 0x0a, 0x04, 0x42, 0x4c, - 0x4f, 0x42, 0x10, 0x94, 0x50, 0x12, 0x0c, 0x0a, 0x07, 0x56, 0x41, 0x52, 0x43, 0x48, 0x41, 0x52, - 0x10, 0x95, 0x30, 0x12, 0x0e, 0x0a, 0x09, 0x56, 0x41, 0x52, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, - 0x10, 0x96, 0x50, 0x12, 0x09, 0x0a, 0x04, 0x43, 0x48, 0x41, 0x52, 0x10, 0x97, 0x30, 0x12, 0x0b, - 0x0a, 0x06, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x98, 0x50, 0x12, 0x08, 0x0a, 0x03, 0x42, - 0x49, 0x54, 0x10, 0x99, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x9a, 0x10, - 0x12, 0x08, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x9b, 0x10, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x55, - 0x50, 0x4c, 0x45, 0x10, 0x1c, 0x12, 0x0d, 0x0a, 0x08, 0x47, 0x45, 0x4f, 0x4d, 0x45, 0x54, 0x52, - 0x59, 0x10, 0x9d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x9e, 0x10, 0x12, - 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x1f, 0x12, - 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x4e, 0x55, 0x4d, 0x10, 0xa0, 0x20, 0x12, 0x0b, 0x0a, 0x06, - 0x48, 0x45, 0x58, 0x56, 0x41, 0x4c, 0x10, 0xa1, 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x54, - 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x2a, 0x46, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, 0x41, - 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, - 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x31, - 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x49, 0x45, 0x57, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, - 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, - 0x02, 0x42, 0x35, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x64, 0x66, 0x73, 0x5f, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, + 0x64, 0x66, 0x73, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x22, 0xf6, 0x01, 0x0a, 0x0e, 0x41, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x30, 0x0a, + 0x14, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, + 0x34, 0x0a, 0x16, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x14, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x5f, 0x6d, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x4d, 0x69, 0x6e, 0x12, 0x3d, 0x0a, 0x1b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, + 0x6d, 0x61, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x18, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, + 0x4d, 0x61, 0x78, 0x22, 0x95, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x06, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x3f, 0x0a, + 0x1c, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x5f, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x19, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x54, 0x65, 0x72, 0x6d, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, + 0x0a, 0x0e, 0x72, 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, + 0x65, 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0d, 0x72, 0x65, + 0x61, 0x6c, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0xae, 0x01, 0x0a, 0x13, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x74, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x64, 0x74, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, + 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x31, 0x0a, 0x0c, 0x70, 0x61, 0x72, + 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0c, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x22, 0x91, 0x01, 0x0a, + 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x35, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x22, 0x6d, 0x0a, 0x07, 0x55, 0x44, 0x46, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6e, + 0x67, 0x12, 0x2c, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, + 0xd5, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x04, 0x75, 0x64, 0x66, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x55, 0x44, 0x46, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x75, 0x64, 0x66, 0x73, 0x12, 0x58, 0x0a, 0x10, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x92, 0x03, 0x0a, 0x09, 0x4d, 0x79, 0x53, 0x71, + 0x6c, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x00, + 0x12, 0x11, 0x0a, 0x0d, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x46, 0x4c, 0x41, + 0x47, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, + 0x4c, 0x41, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, + 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x4d, 0x55, + 0x4c, 0x54, 0x49, 0x50, 0x4c, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, + 0x08, 0x12, 0x0d, 0x0a, 0x09, 0x42, 0x4c, 0x4f, 0x42, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x10, + 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x46, 0x4c, 0x41, + 0x47, 0x10, 0x20, 0x12, 0x11, 0x0a, 0x0d, 0x5a, 0x45, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x4c, 0x5f, + 0x46, 0x4c, 0x41, 0x47, 0x10, 0x40, 0x12, 0x10, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, + 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x01, 0x12, 0x0e, 0x0a, 0x09, 0x45, 0x4e, 0x55, 0x4d, + 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x02, 0x12, 0x18, 0x0a, 0x13, 0x41, 0x55, 0x54, 0x4f, + 0x5f, 0x49, 0x4e, 0x43, 0x52, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, + 0x80, 0x04, 0x12, 0x13, 0x0a, 0x0e, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x5f, + 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x08, 0x12, 0x0d, 0x0a, 0x08, 0x53, 0x45, 0x54, 0x5f, 0x46, + 0x4c, 0x41, 0x47, 0x10, 0x80, 0x10, 0x12, 0x1a, 0x0a, 0x15, 0x4e, 0x4f, 0x5f, 0x44, 0x45, 0x46, + 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, + 0x80, 0x20, 0x12, 0x17, 0x0a, 0x12, 0x4f, 0x4e, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, + 0x4e, 0x4f, 0x57, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x40, 0x12, 0x0e, 0x0a, 0x08, 0x4e, + 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x02, 0x12, 0x13, 0x0a, 0x0d, 0x50, + 0x41, 0x52, 0x54, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x01, + 0x12, 0x10, 0x0a, 0x0a, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, + 0x80, 0x02, 0x12, 0x11, 0x0a, 0x0b, 0x55, 0x4e, 0x49, 0x51, 0x55, 0x45, 0x5f, 0x46, 0x4c, 0x41, + 0x47, 0x10, 0x80, 0x80, 0x04, 0x12, 0x11, 0x0a, 0x0b, 0x42, 0x49, 0x4e, 0x43, 0x4d, 0x50, 0x5f, + 0x46, 0x4c, 0x41, 0x47, 0x10, 0x80, 0x80, 0x08, 0x1a, 0x02, 0x10, 0x01, 0x2a, 0x6b, 0x0a, 0x04, + 0x46, 0x6c, 0x61, 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, + 0x0a, 0x0a, 0x49, 0x53, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x52, 0x41, 0x4c, 0x10, 0x80, 0x02, 0x12, + 0x0f, 0x0a, 0x0a, 0x49, 0x53, 0x55, 0x4e, 0x53, 0x49, 0x47, 0x4e, 0x45, 0x44, 0x10, 0x80, 0x04, + 0x12, 0x0c, 0x0a, 0x07, 0x49, 0x53, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x80, 0x08, 0x12, 0x0d, + 0x0a, 0x08, 0x49, 0x53, 0x51, 0x55, 0x4f, 0x54, 0x45, 0x44, 0x10, 0x80, 0x10, 0x12, 0x0b, 0x0a, + 0x06, 0x49, 0x53, 0x54, 0x45, 0x58, 0x54, 0x10, 0x80, 0x20, 0x12, 0x0d, 0x0a, 0x08, 0x49, 0x53, + 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x80, 0x40, 0x2a, 0xc0, 0x03, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x55, 0x4c, 0x4c, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x04, 0x49, 0x4e, 0x54, 0x38, 0x10, 0x81, 0x02, 0x12, 0x0a, 0x0a, 0x05, + 0x55, 0x49, 0x4e, 0x54, 0x38, 0x10, 0x82, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x31, + 0x36, 0x10, 0x83, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x31, 0x36, 0x10, 0x84, + 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x85, 0x02, 0x12, 0x0b, 0x0a, + 0x06, 0x55, 0x49, 0x4e, 0x54, 0x32, 0x34, 0x10, 0x86, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, + 0x54, 0x33, 0x32, 0x10, 0x87, 0x02, 0x12, 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, + 0x10, 0x88, 0x06, 0x12, 0x0a, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x89, 0x02, 0x12, + 0x0b, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x8a, 0x06, 0x12, 0x0c, 0x0a, 0x07, + 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x33, 0x32, 0x10, 0x8b, 0x08, 0x12, 0x0c, 0x0a, 0x07, 0x46, 0x4c, + 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x8c, 0x08, 0x12, 0x0e, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, + 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x8d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x44, 0x41, 0x54, 0x45, + 0x10, 0x8e, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x8f, 0x10, 0x12, 0x0d, + 0x0a, 0x08, 0x44, 0x41, 0x54, 0x45, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x90, 0x10, 0x12, 0x09, 0x0a, + 0x04, 0x59, 0x45, 0x41, 0x52, 0x10, 0x91, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x49, + 0x4d, 0x41, 0x4c, 0x10, 0x12, 0x12, 0x09, 0x0a, 0x04, 0x54, 0x45, 0x58, 0x54, 0x10, 0x93, 0x30, + 0x12, 0x09, 0x0a, 0x04, 0x42, 0x4c, 0x4f, 0x42, 0x10, 0x94, 0x50, 0x12, 0x0c, 0x0a, 0x07, 0x56, + 0x41, 0x52, 0x43, 0x48, 0x41, 0x52, 0x10, 0x95, 0x30, 0x12, 0x0e, 0x0a, 0x09, 0x56, 0x41, 0x52, + 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x96, 0x50, 0x12, 0x09, 0x0a, 0x04, 0x43, 0x48, 0x41, + 0x52, 0x10, 0x97, 0x30, 0x12, 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x98, + 0x50, 0x12, 0x08, 0x0a, 0x03, 0x42, 0x49, 0x54, 0x10, 0x99, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x45, + 0x4e, 0x55, 0x4d, 0x10, 0x9a, 0x10, 0x12, 0x08, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x9b, 0x10, + 0x12, 0x09, 0x0a, 0x05, 0x54, 0x55, 0x50, 0x4c, 0x45, 0x10, 0x1c, 0x12, 0x0d, 0x0a, 0x08, 0x47, + 0x45, 0x4f, 0x4d, 0x45, 0x54, 0x52, 0x59, 0x10, 0x9d, 0x10, 0x12, 0x09, 0x0a, 0x04, 0x4a, 0x53, + 0x4f, 0x4e, 0x10, 0x9e, 0x10, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x52, 0x45, 0x53, 0x53, + 0x49, 0x4f, 0x4e, 0x10, 0x1f, 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x4e, 0x55, 0x4d, 0x10, + 0xa0, 0x20, 0x12, 0x0b, 0x0a, 0x06, 0x48, 0x45, 0x58, 0x56, 0x41, 0x4c, 0x10, 0xa1, 0x20, 0x12, + 0x0b, 0x0a, 0x06, 0x42, 0x49, 0x54, 0x4e, 0x55, 0x4d, 0x10, 0xa2, 0x20, 0x2a, 0x46, 0x0a, 0x10, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x50, 0x52, 0x45, 0x50, 0x41, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, + 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, + 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x3b, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x49, 0x45, 0x57, 0x53, + 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x55, 0x44, 0x46, 0x53, 0x10, + 0x03, 0x42, 0x35, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x22, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, @@ -6565,7 +6661,7 @@ func file_query_proto_rawDescGZIP() []byte { } var file_query_proto_enumTypes = make([]protoimpl.EnumInfo, 12) -var file_query_proto_msgTypes = make([]protoimpl.MessageInfo, 67) +var file_query_proto_msgTypes = make([]protoimpl.MessageInfo, 68) var file_query_proto_goTypes = []interface{}{ (MySqlFlag)(0), // 0: query.MySqlFlag (Flag)(0), // 1: query.Flag @@ -6642,21 +6738,22 @@ var file_query_proto_goTypes = []interface{}{ (*StreamHealthResponse)(nil), // 72: query.StreamHealthResponse (*TransactionMetadata)(nil), // 73: query.TransactionMetadata (*GetSchemaRequest)(nil), // 74: query.GetSchemaRequest - (*GetSchemaResponse)(nil), // 75: query.GetSchemaResponse - nil, // 76: query.BoundQuery.BindVariablesEntry - (*StreamEvent_Statement)(nil), // 77: query.StreamEvent.Statement - nil, // 78: query.GetSchemaResponse.TableDefinitionEntry - (topodata.TabletType)(0), // 79: topodata.TabletType - (*vtrpc.CallerID)(nil), // 80: vtrpc.CallerID - (*vtrpc.RPCError)(nil), // 81: vtrpc.RPCError - (*topodata.TabletAlias)(nil), // 82: topodata.TabletAlias + (*UDFInfo)(nil), // 75: query.UDFInfo + (*GetSchemaResponse)(nil), // 76: query.GetSchemaResponse + nil, // 77: query.BoundQuery.BindVariablesEntry + (*StreamEvent_Statement)(nil), // 78: query.StreamEvent.Statement + nil, // 79: query.GetSchemaResponse.TableDefinitionEntry + (topodata.TabletType)(0), // 80: topodata.TabletType + (*vtrpc.CallerID)(nil), // 81: vtrpc.CallerID + (*vtrpc.RPCError)(nil), // 82: vtrpc.RPCError + (*topodata.TabletAlias)(nil), // 83: topodata.TabletAlias } var file_query_proto_depIdxs = []int32{ - 79, // 0: query.Target.tablet_type:type_name -> topodata.TabletType + 80, // 0: query.Target.tablet_type:type_name -> topodata.TabletType 2, // 1: query.Value.type:type_name -> query.Type 2, // 2: query.BindVariable.type:type_name -> query.Type 15, // 3: query.BindVariable.values:type_name -> query.Value - 76, // 4: query.BoundQuery.bind_variables:type_name -> query.BoundQuery.BindVariablesEntry + 77, // 4: query.BoundQuery.bind_variables:type_name -> query.BoundQuery.BindVariablesEntry 5, // 5: query.ExecuteOptions.included_fields:type_name -> query.ExecuteOptions.IncludedFields 6, // 6: query.ExecuteOptions.workload:type_name -> query.ExecuteOptions.Workload 7, // 7: query.ExecuteOptions.transaction_isolation:type_name -> query.ExecuteOptions.TransactionIsolation @@ -6666,136 +6763,138 @@ var file_query_proto_depIdxs = []int32{ 2, // 11: query.Field.type:type_name -> query.Type 19, // 12: query.QueryResult.fields:type_name -> query.Field 20, // 13: query.QueryResult.rows:type_name -> query.Row - 77, // 14: query.StreamEvent.statements:type_name -> query.StreamEvent.Statement + 78, // 14: query.StreamEvent.statements:type_name -> query.StreamEvent.Statement 14, // 15: query.StreamEvent.event_token:type_name -> query.EventToken - 80, // 16: query.ExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 16: query.ExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 17: query.ExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 18: query.ExecuteRequest.target:type_name -> query.Target 17, // 19: query.ExecuteRequest.query:type_name -> query.BoundQuery 18, // 20: query.ExecuteRequest.options:type_name -> query.ExecuteOptions 21, // 21: query.ExecuteResponse.result:type_name -> query.QueryResult - 81, // 22: query.ResultWithError.error:type_name -> vtrpc.RPCError + 82, // 22: query.ResultWithError.error:type_name -> vtrpc.RPCError 21, // 23: query.ResultWithError.result:type_name -> query.QueryResult - 80, // 24: query.StreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 24: query.StreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 25: query.StreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 26: query.StreamExecuteRequest.target:type_name -> query.Target 17, // 27: query.StreamExecuteRequest.query:type_name -> query.BoundQuery 18, // 28: query.StreamExecuteRequest.options:type_name -> query.ExecuteOptions 21, // 29: query.StreamExecuteResponse.result:type_name -> query.QueryResult - 80, // 30: query.BeginRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 30: query.BeginRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 31: query.BeginRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 32: query.BeginRequest.target:type_name -> query.Target 18, // 33: query.BeginRequest.options:type_name -> query.ExecuteOptions - 82, // 34: query.BeginResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 35: query.CommitRequest.effective_caller_id:type_name -> vtrpc.CallerID + 83, // 34: query.BeginResponse.tablet_alias:type_name -> topodata.TabletAlias + 81, // 35: query.CommitRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 36: query.CommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 37: query.CommitRequest.target:type_name -> query.Target - 80, // 38: query.RollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 38: query.RollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 39: query.RollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 40: query.RollbackRequest.target:type_name -> query.Target - 80, // 41: query.PrepareRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 41: query.PrepareRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 42: query.PrepareRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 43: query.PrepareRequest.target:type_name -> query.Target - 80, // 44: query.CommitPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 44: query.CommitPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 45: query.CommitPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 46: query.CommitPreparedRequest.target:type_name -> query.Target - 80, // 47: query.RollbackPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 47: query.RollbackPreparedRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 48: query.RollbackPreparedRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 49: query.RollbackPreparedRequest.target:type_name -> query.Target - 80, // 50: query.CreateTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 50: query.CreateTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 51: query.CreateTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 52: query.CreateTransactionRequest.target:type_name -> query.Target 12, // 53: query.CreateTransactionRequest.participants:type_name -> query.Target - 80, // 54: query.StartCommitRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 54: query.StartCommitRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 55: query.StartCommitRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 56: query.StartCommitRequest.target:type_name -> query.Target - 80, // 57: query.SetRollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 57: query.SetRollbackRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 58: query.SetRollbackRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 59: query.SetRollbackRequest.target:type_name -> query.Target - 80, // 60: query.ConcludeTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 60: query.ConcludeTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 61: query.ConcludeTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 62: query.ConcludeTransactionRequest.target:type_name -> query.Target - 80, // 63: query.ReadTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 63: query.ReadTransactionRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 64: query.ReadTransactionRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 65: query.ReadTransactionRequest.target:type_name -> query.Target 73, // 66: query.ReadTransactionResponse.metadata:type_name -> query.TransactionMetadata - 80, // 67: query.BeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 67: query.BeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 68: query.BeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 69: query.BeginExecuteRequest.target:type_name -> query.Target 17, // 70: query.BeginExecuteRequest.query:type_name -> query.BoundQuery 18, // 71: query.BeginExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 72: query.BeginExecuteResponse.error:type_name -> vtrpc.RPCError + 82, // 72: query.BeginExecuteResponse.error:type_name -> vtrpc.RPCError 21, // 73: query.BeginExecuteResponse.result:type_name -> query.QueryResult - 82, // 74: query.BeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 75: query.BeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 83, // 74: query.BeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 81, // 75: query.BeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 76: query.BeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 77: query.BeginStreamExecuteRequest.target:type_name -> query.Target 17, // 78: query.BeginStreamExecuteRequest.query:type_name -> query.BoundQuery 18, // 79: query.BeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 80: query.BeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError + 82, // 80: query.BeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError 21, // 81: query.BeginStreamExecuteResponse.result:type_name -> query.QueryResult - 82, // 82: query.BeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 83: query.MessageStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID + 83, // 82: query.BeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 81, // 83: query.MessageStreamRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 84: query.MessageStreamRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 85: query.MessageStreamRequest.target:type_name -> query.Target 21, // 86: query.MessageStreamResponse.result:type_name -> query.QueryResult - 80, // 87: query.MessageAckRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 87: query.MessageAckRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 88: query.MessageAckRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 89: query.MessageAckRequest.target:type_name -> query.Target 15, // 90: query.MessageAckRequest.ids:type_name -> query.Value 21, // 91: query.MessageAckResponse.result:type_name -> query.QueryResult - 80, // 92: query.ReserveExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 81, // 92: query.ReserveExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 93: query.ReserveExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 94: query.ReserveExecuteRequest.target:type_name -> query.Target 17, // 95: query.ReserveExecuteRequest.query:type_name -> query.BoundQuery 18, // 96: query.ReserveExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 97: query.ReserveExecuteResponse.error:type_name -> vtrpc.RPCError + 82, // 97: query.ReserveExecuteResponse.error:type_name -> vtrpc.RPCError 21, // 98: query.ReserveExecuteResponse.result:type_name -> query.QueryResult - 82, // 99: query.ReserveExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 100: query.ReserveStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 83, // 99: query.ReserveExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 81, // 100: query.ReserveStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 101: query.ReserveStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 102: query.ReserveStreamExecuteRequest.target:type_name -> query.Target 17, // 103: query.ReserveStreamExecuteRequest.query:type_name -> query.BoundQuery 18, // 104: query.ReserveStreamExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 105: query.ReserveStreamExecuteResponse.error:type_name -> vtrpc.RPCError + 82, // 105: query.ReserveStreamExecuteResponse.error:type_name -> vtrpc.RPCError 21, // 106: query.ReserveStreamExecuteResponse.result:type_name -> query.QueryResult - 82, // 107: query.ReserveStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 108: query.ReserveBeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 83, // 107: query.ReserveStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 81, // 108: query.ReserveBeginExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 109: query.ReserveBeginExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 110: query.ReserveBeginExecuteRequest.target:type_name -> query.Target 17, // 111: query.ReserveBeginExecuteRequest.query:type_name -> query.BoundQuery 18, // 112: query.ReserveBeginExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 113: query.ReserveBeginExecuteResponse.error:type_name -> vtrpc.RPCError + 82, // 113: query.ReserveBeginExecuteResponse.error:type_name -> vtrpc.RPCError 21, // 114: query.ReserveBeginExecuteResponse.result:type_name -> query.QueryResult - 82, // 115: query.ReserveBeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 116: query.ReserveBeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID + 83, // 115: query.ReserveBeginExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 81, // 116: query.ReserveBeginStreamExecuteRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 117: query.ReserveBeginStreamExecuteRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 118: query.ReserveBeginStreamExecuteRequest.target:type_name -> query.Target 17, // 119: query.ReserveBeginStreamExecuteRequest.query:type_name -> query.BoundQuery 18, // 120: query.ReserveBeginStreamExecuteRequest.options:type_name -> query.ExecuteOptions - 81, // 121: query.ReserveBeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError + 82, // 121: query.ReserveBeginStreamExecuteResponse.error:type_name -> vtrpc.RPCError 21, // 122: query.ReserveBeginStreamExecuteResponse.result:type_name -> query.QueryResult - 82, // 123: query.ReserveBeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias - 80, // 124: query.ReleaseRequest.effective_caller_id:type_name -> vtrpc.CallerID + 83, // 123: query.ReserveBeginStreamExecuteResponse.tablet_alias:type_name -> topodata.TabletAlias + 81, // 124: query.ReleaseRequest.effective_caller_id:type_name -> vtrpc.CallerID 13, // 125: query.ReleaseRequest.immediate_caller_id:type_name -> query.VTGateCallerID 12, // 126: query.ReleaseRequest.target:type_name -> query.Target 12, // 127: query.StreamHealthResponse.target:type_name -> query.Target 70, // 128: query.StreamHealthResponse.realtime_stats:type_name -> query.RealtimeStats - 82, // 129: query.StreamHealthResponse.tablet_alias:type_name -> topodata.TabletAlias + 83, // 129: query.StreamHealthResponse.tablet_alias:type_name -> topodata.TabletAlias 3, // 130: query.TransactionMetadata.state:type_name -> query.TransactionState 12, // 131: query.TransactionMetadata.participants:type_name -> query.Target 12, // 132: query.GetSchemaRequest.target:type_name -> query.Target 4, // 133: query.GetSchemaRequest.table_type:type_name -> query.SchemaTableType - 78, // 134: query.GetSchemaResponse.table_definition:type_name -> query.GetSchemaResponse.TableDefinitionEntry - 16, // 135: query.BoundQuery.BindVariablesEntry.value:type_name -> query.BindVariable - 11, // 136: query.StreamEvent.Statement.category:type_name -> query.StreamEvent.Statement.Category - 19, // 137: query.StreamEvent.Statement.primary_key_fields:type_name -> query.Field - 20, // 138: query.StreamEvent.Statement.primary_key_values:type_name -> query.Row - 139, // [139:139] is the sub-list for method output_type - 139, // [139:139] is the sub-list for method input_type - 139, // [139:139] is the sub-list for extension type_name - 139, // [139:139] is the sub-list for extension extendee - 0, // [0:139] is the sub-list for field type_name + 2, // 134: query.UDFInfo.return_type:type_name -> query.Type + 75, // 135: query.GetSchemaResponse.udfs:type_name -> query.UDFInfo + 79, // 136: query.GetSchemaResponse.table_definition:type_name -> query.GetSchemaResponse.TableDefinitionEntry + 16, // 137: query.BoundQuery.BindVariablesEntry.value:type_name -> query.BindVariable + 11, // 138: query.StreamEvent.Statement.category:type_name -> query.StreamEvent.Statement.Category + 19, // 139: query.StreamEvent.Statement.primary_key_fields:type_name -> query.Field + 20, // 140: query.StreamEvent.Statement.primary_key_values:type_name -> query.Row + 141, // [141:141] is the sub-list for method output_type + 141, // [141:141] is the sub-list for method input_type + 141, // [141:141] is the sub-list for extension type_name + 141, // [141:141] is the sub-list for extension extendee + 0, // [0:141] is the sub-list for field type_name } func init() { file_query_proto_init() } @@ -7561,6 +7660,18 @@ func file_query_proto_init() { } } file_query_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UDFInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_query_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemaResponse); i { case 0: return &v.state @@ -7572,7 +7683,7 @@ func file_query_proto_init() { return nil } } - file_query_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + file_query_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StreamEvent_Statement); i { case 0: return &v.state @@ -7591,7 +7702,7 @@ func file_query_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_query_proto_rawDesc, NumEnums: 12, - NumMessages: 67, + NumMessages: 68, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/query/query_vtproto.pb.go b/go/vt/proto/query/query_vtproto.pb.go index 248772ba4f4..636c950642d 100644 --- a/go/vt/proto/query/query_vtproto.pb.go +++ b/go/vt/proto/query/query_vtproto.pb.go @@ -1359,6 +1359,7 @@ func (m *RealtimeStats) CloneVT() *RealtimeStats { FilteredReplicationLagSeconds: m.FilteredReplicationLagSeconds, CpuUsage: m.CpuUsage, Qps: m.Qps, + UdfsChanged: m.UdfsChanged, } if rhs := m.TableSchemaChanged; rhs != nil { tmpContainer := make([]string, len(rhs)) @@ -1475,11 +1476,38 @@ func (m *GetSchemaRequest) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *UDFInfo) CloneVT() *UDFInfo { + if m == nil { + return (*UDFInfo)(nil) + } + r := &UDFInfo{ + Name: m.Name, + Aggregating: m.Aggregating, + ReturnType: m.ReturnType, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UDFInfo) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *GetSchemaResponse) CloneVT() *GetSchemaResponse { if m == nil { return (*GetSchemaResponse)(nil) } r := &GetSchemaResponse{} + if rhs := m.Udfs; rhs != nil { + tmpContainer := make([]*UDFInfo, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Udfs = tmpContainer + } if rhs := m.TableDefinition; rhs != nil { tmpContainer := make(map[string]string, len(rhs)) for k, v := range rhs { @@ -5344,6 +5372,16 @@ func (m *RealtimeStats) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.UdfsChanged { + i-- + if m.UdfsChanged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } if len(m.ViewSchemaChanged) > 0 { for iNdEx := len(m.ViewSchemaChanged) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.ViewSchemaChanged[iNdEx]) @@ -5649,6 +5687,61 @@ func (m *GetSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *UDFInfo) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UDFInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UDFInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ReturnType != 0 { + i = encodeVarint(dAtA, i, uint64(m.ReturnType)) + i-- + dAtA[i] = 0x18 + } + if m.Aggregating { + i-- + if m.Aggregating { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *GetSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -5698,6 +5791,18 @@ func (m *GetSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { dAtA[i] = 0x12 } } + if len(m.Udfs) > 0 { + for iNdEx := len(m.Udfs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Udfs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } @@ -7230,6 +7335,9 @@ func (m *RealtimeStats) SizeVT() (n int) { n += 1 + l + sov(uint64(l)) } } + if m.UdfsChanged { + n += 2 + } n += len(m.unknownFields) return n } @@ -7333,12 +7441,38 @@ func (m *GetSchemaRequest) SizeVT() (n int) { return n } +func (m *UDFInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Aggregating { + n += 2 + } + if m.ReturnType != 0 { + n += 1 + sov(uint64(m.ReturnType)) + } + n += len(m.unknownFields) + return n +} + func (m *GetSchemaResponse) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l + if len(m.Udfs) > 0 { + for _, e := range m.Udfs { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } if len(m.TableDefinition) > 0 { for k, v := range m.TableDefinition { _ = k @@ -17524,6 +17658,26 @@ func (m *RealtimeStats) UnmarshalVT(dAtA []byte) error { } m.ViewSchemaChanged = append(m.ViewSchemaChanged, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UdfsChanged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UdfsChanged = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18164,6 +18318,128 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *UDFInfo) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UDFInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UDFInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Aggregating", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Aggregating = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReturnType", wireType) + } + m.ReturnType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReturnType |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -18193,6 +18469,40 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Udfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Udfs = append(m.Udfs, &UDFInfo{}) + if err := m.Udfs[len(m.Udfs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TableDefinition", wireType) diff --git a/go/vt/proto/queryservice/queryservice.pb.go b/go/vt/proto/queryservice/queryservice.pb.go index babedcde966..6dd33872b40 100644 --- a/go/vt/proto/queryservice/queryservice.pb.go +++ b/go/vt/proto/queryservice/queryservice.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: queryservice.proto diff --git a/go/vt/proto/replicationdata/replicationdata.pb.go b/go/vt/proto/replicationdata/replicationdata.pb.go index ec90d6943ac..5d8256cfa2e 100644 --- a/go/vt/proto/replicationdata/replicationdata.pb.go +++ b/go/vt/proto/replicationdata/replicationdata.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: replicationdata.proto @@ -302,6 +302,64 @@ func (x *Status) GetReplicationLagUnknown() bool { return false } +// Configuration holds replication configuration information gathered from performance_schema and global variables. +type Configuration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // HeartbeatInterval controls the heartbeat interval that the primary sends to the replica + HeartbeatInterval float64 `protobuf:"fixed64,1,opt,name=heartbeat_interval,json=heartbeatInterval,proto3" json:"heartbeat_interval,omitempty"` + // ReplicaNetTimeout specifies the number of seconds to wait for more data or a heartbeat signal from the source before the replica considers the connection broken + ReplicaNetTimeout int32 `protobuf:"varint,2,opt,name=replica_net_timeout,json=replicaNetTimeout,proto3" json:"replica_net_timeout,omitempty"` +} + +func (x *Configuration) Reset() { + *x = Configuration{} + if protoimpl.UnsafeEnabled { + mi := &file_replicationdata_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Configuration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Configuration) ProtoMessage() {} + +func (x *Configuration) ProtoReflect() protoreflect.Message { + mi := &file_replicationdata_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Configuration.ProtoReflect.Descriptor instead. +func (*Configuration) Descriptor() ([]byte, []int) { + return file_replicationdata_proto_rawDescGZIP(), []int{1} +} + +func (x *Configuration) GetHeartbeatInterval() float64 { + if x != nil { + return x.HeartbeatInterval + } + return 0 +} + +func (x *Configuration) GetReplicaNetTimeout() int32 { + if x != nil { + return x.ReplicaNetTimeout + } + return 0 +} + // StopReplicationStatus represents the replication status before calling StopReplication, and the replication status collected immediately after // calling StopReplication. type StopReplicationStatus struct { @@ -316,7 +374,7 @@ type StopReplicationStatus struct { func (x *StopReplicationStatus) Reset() { *x = StopReplicationStatus{} if protoimpl.UnsafeEnabled { - mi := &file_replicationdata_proto_msgTypes[1] + mi := &file_replicationdata_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -329,7 +387,7 @@ func (x *StopReplicationStatus) String() string { func (*StopReplicationStatus) ProtoMessage() {} func (x *StopReplicationStatus) ProtoReflect() protoreflect.Message { - mi := &file_replicationdata_proto_msgTypes[1] + mi := &file_replicationdata_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -342,7 +400,7 @@ func (x *StopReplicationStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationStatus.ProtoReflect.Descriptor instead. func (*StopReplicationStatus) Descriptor() ([]byte, []int) { - return file_replicationdata_proto_rawDescGZIP(), []int{1} + return file_replicationdata_proto_rawDescGZIP(), []int{2} } func (x *StopReplicationStatus) GetBefore() *Status { @@ -359,7 +417,7 @@ func (x *StopReplicationStatus) GetAfter() *Status { return nil } -// PrimaryStatus is the replication status for a MySQL primary (returned by 'show master status'). +// PrimaryStatus is the replication status for a MySQL primary (returned by 'show binary log status'). type PrimaryStatus struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -372,7 +430,7 @@ type PrimaryStatus struct { func (x *PrimaryStatus) Reset() { *x = PrimaryStatus{} if protoimpl.UnsafeEnabled { - mi := &file_replicationdata_proto_msgTypes[2] + mi := &file_replicationdata_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -385,7 +443,7 @@ func (x *PrimaryStatus) String() string { func (*PrimaryStatus) ProtoMessage() {} func (x *PrimaryStatus) ProtoReflect() protoreflect.Message { - mi := &file_replicationdata_proto_msgTypes[2] + mi := &file_replicationdata_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -398,7 +456,7 @@ func (x *PrimaryStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use PrimaryStatus.ProtoReflect.Descriptor instead. func (*PrimaryStatus) Descriptor() ([]byte, []int) { - return file_replicationdata_proto_rawDescGZIP(), []int{2} + return file_replicationdata_proto_rawDescGZIP(), []int{3} } func (x *PrimaryStatus) GetPosition() string { @@ -442,12 +500,13 @@ type FullStatus struct { SemiSyncPrimaryTimeout uint64 `protobuf:"varint,19,opt,name=semi_sync_primary_timeout,json=semiSyncPrimaryTimeout,proto3" json:"semi_sync_primary_timeout,omitempty"` SemiSyncWaitForReplicaCount uint32 `protobuf:"varint,20,opt,name=semi_sync_wait_for_replica_count,json=semiSyncWaitForReplicaCount,proto3" json:"semi_sync_wait_for_replica_count,omitempty"` SuperReadOnly bool `protobuf:"varint,21,opt,name=super_read_only,json=superReadOnly,proto3" json:"super_read_only,omitempty"` + ReplicationConfiguration *Configuration `protobuf:"bytes,22,opt,name=replication_configuration,json=replicationConfiguration,proto3" json:"replication_configuration,omitempty"` } func (x *FullStatus) Reset() { *x = FullStatus{} if protoimpl.UnsafeEnabled { - mi := &file_replicationdata_proto_msgTypes[3] + mi := &file_replicationdata_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -460,7 +519,7 @@ func (x *FullStatus) String() string { func (*FullStatus) ProtoMessage() {} func (x *FullStatus) ProtoReflect() protoreflect.Message { - mi := &file_replicationdata_proto_msgTypes[3] + mi := &file_replicationdata_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -473,7 +532,7 @@ func (x *FullStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use FullStatus.ProtoReflect.Descriptor instead. func (*FullStatus) Descriptor() ([]byte, []int) { - return file_replicationdata_proto_rawDescGZIP(), []int{3} + return file_replicationdata_proto_rawDescGZIP(), []int{4} } func (x *FullStatus) GetServerId() uint32 { @@ -623,6 +682,13 @@ func (x *FullStatus) GetSuperReadOnly() bool { return false } +func (x *FullStatus) GetReplicationConfiguration() *Configuration { + if x != nil { + return x.ReplicationConfiguration + } + return nil +} + var File_replicationdata_proto protoreflect.FileDescriptor var file_replicationdata_proto_rawDesc = []byte{ @@ -686,7 +752,14 @@ var file_replicationdata_proto_rawDesc = []byte{ 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, - 0x04, 0x22, 0x77, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x04, 0x22, 0x6e, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x11, + 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x6e, 0x65, 0x74, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x4e, 0x65, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x22, 0x77, 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, @@ -698,7 +771,7 @@ var file_replicationdata_proto_rawDesc = []byte{ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x66, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xeb, 0x07, 0x0a, + 0x66, 0x69, 0x6c, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc8, 0x08, 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, @@ -761,14 +834,20 @@ var file_replicationdata_proto_rawDesc = []byte{ 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x75, 0x70, - 0x65, 0x72, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x2a, 0x3b, 0x0a, 0x13, 0x53, 0x74, - 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4f, 0x41, 0x4e, 0x44, 0x53, 0x51, 0x4c, 0x54, 0x48, 0x52, - 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4f, 0x54, 0x48, 0x52, 0x45, 0x41, - 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x76, 0x69, 0x74, 0x65, 0x73, - 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, - 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x72, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x5b, 0x0a, 0x19, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x3b, 0x0a, 0x13, 0x53, 0x74, 0x6f, 0x70, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x12, + 0x0a, 0x0e, 0x49, 0x4f, 0x41, 0x4e, 0x44, 0x53, 0x51, 0x4c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, + 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4f, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x4f, 0x4e, + 0x4c, 0x59, 0x10, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, + 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -784,24 +863,26 @@ func file_replicationdata_proto_rawDescGZIP() []byte { } var file_replicationdata_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_replicationdata_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_replicationdata_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_replicationdata_proto_goTypes = []interface{}{ (StopReplicationMode)(0), // 0: replicationdata.StopReplicationMode (*Status)(nil), // 1: replicationdata.Status - (*StopReplicationStatus)(nil), // 2: replicationdata.StopReplicationStatus - (*PrimaryStatus)(nil), // 3: replicationdata.PrimaryStatus - (*FullStatus)(nil), // 4: replicationdata.FullStatus + (*Configuration)(nil), // 2: replicationdata.Configuration + (*StopReplicationStatus)(nil), // 3: replicationdata.StopReplicationStatus + (*PrimaryStatus)(nil), // 4: replicationdata.PrimaryStatus + (*FullStatus)(nil), // 5: replicationdata.FullStatus } var file_replicationdata_proto_depIdxs = []int32{ 1, // 0: replicationdata.StopReplicationStatus.before:type_name -> replicationdata.Status 1, // 1: replicationdata.StopReplicationStatus.after:type_name -> replicationdata.Status 1, // 2: replicationdata.FullStatus.replication_status:type_name -> replicationdata.Status - 3, // 3: replicationdata.FullStatus.primary_status:type_name -> replicationdata.PrimaryStatus - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name + 4, // 3: replicationdata.FullStatus.primary_status:type_name -> replicationdata.PrimaryStatus + 2, // 4: replicationdata.FullStatus.replication_configuration:type_name -> replicationdata.Configuration + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name } func init() { file_replicationdata_proto_init() } @@ -823,7 +904,7 @@ func file_replicationdata_proto_init() { } } file_replicationdata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationStatus); i { + switch v := v.(*Configuration); i { case 0: return &v.state case 1: @@ -835,7 +916,7 @@ func file_replicationdata_proto_init() { } } file_replicationdata_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimaryStatus); i { + switch v := v.(*StopReplicationStatus); i { case 0: return &v.state case 1: @@ -847,6 +928,18 @@ func file_replicationdata_proto_init() { } } file_replicationdata_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrimaryStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_replicationdata_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FullStatus); i { case 0: return &v.state @@ -865,7 +958,7 @@ func file_replicationdata_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_replicationdata_proto_rawDesc, NumEnums: 1, - NumMessages: 4, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go index f92a42b05e4..c5bc6e385fb 100644 --- a/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go +++ b/go/vt/proto/replicationdata/replicationdata_vtproto.pb.go @@ -5,10 +5,12 @@ package replicationdata import ( + binary "encoding/binary" fmt "fmt" proto "google.golang.org/protobuf/proto" protoimpl "google.golang.org/protobuf/runtime/protoimpl" io "io" + math "math" bits "math/bits" ) @@ -58,6 +60,25 @@ func (m *Status) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *Configuration) CloneVT() *Configuration { + if m == nil { + return (*Configuration)(nil) + } + r := &Configuration{ + HeartbeatInterval: m.HeartbeatInterval, + ReplicaNetTimeout: m.ReplicaNetTimeout, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Configuration) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *StopReplicationStatus) CloneVT() *StopReplicationStatus { if m == nil { return (*StopReplicationStatus)(nil) @@ -122,6 +143,7 @@ func (m *FullStatus) CloneVT() *FullStatus { SemiSyncPrimaryTimeout: m.SemiSyncPrimaryTimeout, SemiSyncWaitForReplicaCount: m.SemiSyncWaitForReplicaCount, SuperReadOnly: m.SuperReadOnly, + ReplicationConfiguration: m.ReplicationConfiguration.CloneVT(), } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -340,6 +362,50 @@ func (m *Status) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Configuration) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Configuration) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Configuration) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ReplicaNetTimeout != 0 { + i = encodeVarint(dAtA, i, uint64(m.ReplicaNetTimeout)) + i-- + dAtA[i] = 0x10 + } + if m.HeartbeatInterval != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.HeartbeatInterval)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + func (m *StopReplicationStatus) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -470,6 +536,18 @@ func (m *FullStatus) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.ReplicationConfiguration != nil { + size, err := m.ReplicationConfiguration.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } if m.SuperReadOnly { i-- if m.SuperReadOnly { @@ -751,6 +829,22 @@ func (m *Status) SizeVT() (n int) { return n } +func (m *Configuration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HeartbeatInterval != 0 { + n += 9 + } + if m.ReplicaNetTimeout != 0 { + n += 1 + sov(uint64(m.ReplicaNetTimeout)) + } + n += len(m.unknownFields) + return n +} + func (m *StopReplicationStatus) SizeVT() (n int) { if m == nil { return 0 @@ -865,6 +959,10 @@ func (m *FullStatus) SizeVT() (n int) { if m.SuperReadOnly { n += 3 } + if m.ReplicationConfiguration != nil { + l = m.ReplicationConfiguration.SizeVT() + n += 2 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -1479,6 +1577,87 @@ func (m *Status) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *Configuration) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Configuration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Configuration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatInterval", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.HeartbeatInterval = float64(math.Float64frombits(v)) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicaNetTimeout", wireType) + } + m.ReplicaNetTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReplicaNetTimeout |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StopReplicationStatus) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2278,6 +2457,42 @@ func (m *FullStatus) UnmarshalVT(dAtA []byte) error { } } m.SuperReadOnly = bool(v != 0) + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReplicationConfiguration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReplicationConfiguration == nil { + m.ReplicationConfiguration = &Configuration{} + } + if err := m.ReplicationConfiguration.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/tableacl/tableacl.pb.go b/go/vt/proto/tableacl/tableacl.pb.go index 3b26ace8157..f05afb00fba 100644 --- a/go/vt/proto/tableacl/tableacl.pb.go +++ b/go/vt/proto/tableacl/tableacl.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: tableacl.proto diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go index c9039a3cfd9..6ec17060dd3 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: tabletmanagerdata.proto @@ -1627,8 +1627,10 @@ type ApplySchemaRequest struct { BeforeSchema *SchemaDefinition `protobuf:"bytes,4,opt,name=before_schema,json=beforeSchema,proto3" json:"before_schema,omitempty"` AfterSchema *SchemaDefinition `protobuf:"bytes,5,opt,name=after_schema,json=afterSchema,proto3" json:"after_schema,omitempty"` SqlMode string `protobuf:"bytes,6,opt,name=sql_mode,json=sqlMode,proto3" json:"sql_mode,omitempty"` - // BatchSize indicates how many queries to apply together + // BatchSize indicates how many queries to apply together. BatchSize int64 `protobuf:"varint,7,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` + // DisableForeignKeyChecks will result in setting foreign_key_checks to off before applying the schema. + DisableForeignKeyChecks bool `protobuf:"varint,8,opt,name=disable_foreign_key_checks,json=disableForeignKeyChecks,proto3" json:"disable_foreign_key_checks,omitempty"` } func (x *ApplySchemaRequest) Reset() { @@ -1712,6 +1714,13 @@ func (x *ApplySchemaRequest) GetBatchSize() int64 { return 0 } +func (x *ApplySchemaRequest) GetDisableForeignKeyChecks() bool { + if x != nil { + return x.DisableForeignKeyChecks + } + return false +} + type ApplySchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2044,11 +2053,12 @@ type ExecuteFetchAsDbaRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Query []byte `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` - DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` - MaxRows uint64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` - DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` - ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` + Query []byte `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + MaxRows uint64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` + ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` + DisableForeignKeyChecks bool `protobuf:"varint,6,opt,name=disable_foreign_key_checks,json=disableForeignKeyChecks,proto3" json:"disable_foreign_key_checks,omitempty"` } func (x *ExecuteFetchAsDbaRequest) Reset() { @@ -2118,6 +2128,13 @@ func (x *ExecuteFetchAsDbaRequest) GetReloadSchema() bool { return false } +func (x *ExecuteFetchAsDbaRequest) GetDisableForeignKeyChecks() bool { + if x != nil { + return x.DisableForeignKeyChecks + } + return false +} + type ExecuteFetchAsDbaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2165,6 +2182,140 @@ func (x *ExecuteFetchAsDbaResponse) GetResult() *query.QueryResult { return nil } +type ExecuteMultiFetchAsDbaRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sql []byte `protobuf:"bytes,1,opt,name=sql,proto3" json:"sql,omitempty"` + DbName string `protobuf:"bytes,2,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + MaxRows uint64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` + ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` + DisableForeignKeyChecks bool `protobuf:"varint,6,opt,name=disable_foreign_key_checks,json=disableForeignKeyChecks,proto3" json:"disable_foreign_key_checks,omitempty"` +} + +func (x *ExecuteMultiFetchAsDbaRequest) Reset() { + *x = ExecuteMultiFetchAsDbaRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecuteMultiFetchAsDbaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteMultiFetchAsDbaRequest) ProtoMessage() {} + +func (x *ExecuteMultiFetchAsDbaRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteMultiFetchAsDbaRequest.ProtoReflect.Descriptor instead. +func (*ExecuteMultiFetchAsDbaRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{40} +} + +func (x *ExecuteMultiFetchAsDbaRequest) GetSql() []byte { + if x != nil { + return x.Sql + } + return nil +} + +func (x *ExecuteMultiFetchAsDbaRequest) GetDbName() string { + if x != nil { + return x.DbName + } + return "" +} + +func (x *ExecuteMultiFetchAsDbaRequest) GetMaxRows() uint64 { + if x != nil { + return x.MaxRows + } + return 0 +} + +func (x *ExecuteMultiFetchAsDbaRequest) GetDisableBinlogs() bool { + if x != nil { + return x.DisableBinlogs + } + return false +} + +func (x *ExecuteMultiFetchAsDbaRequest) GetReloadSchema() bool { + if x != nil { + return x.ReloadSchema + } + return false +} + +func (x *ExecuteMultiFetchAsDbaRequest) GetDisableForeignKeyChecks() bool { + if x != nil { + return x.DisableForeignKeyChecks + } + return false +} + +type ExecuteMultiFetchAsDbaResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Results []*query.QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *ExecuteMultiFetchAsDbaResponse) Reset() { + *x = ExecuteMultiFetchAsDbaResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecuteMultiFetchAsDbaResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteMultiFetchAsDbaResponse) ProtoMessage() {} + +func (x *ExecuteMultiFetchAsDbaResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteMultiFetchAsDbaResponse.ProtoReflect.Descriptor instead. +func (*ExecuteMultiFetchAsDbaResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{41} +} + +func (x *ExecuteMultiFetchAsDbaResponse) GetResults() []*query.QueryResult { + if x != nil { + return x.Results + } + return nil +} + type ExecuteFetchAsAllPrivsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2179,7 +2330,7 @@ type ExecuteFetchAsAllPrivsRequest struct { func (x *ExecuteFetchAsAllPrivsRequest) Reset() { *x = ExecuteFetchAsAllPrivsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[40] + mi := &file_tabletmanagerdata_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2192,7 +2343,7 @@ func (x *ExecuteFetchAsAllPrivsRequest) String() string { func (*ExecuteFetchAsAllPrivsRequest) ProtoMessage() {} func (x *ExecuteFetchAsAllPrivsRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[40] + mi := &file_tabletmanagerdata_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2205,7 +2356,7 @@ func (x *ExecuteFetchAsAllPrivsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteFetchAsAllPrivsRequest.ProtoReflect.Descriptor instead. func (*ExecuteFetchAsAllPrivsRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{40} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{42} } func (x *ExecuteFetchAsAllPrivsRequest) GetQuery() []byte { @@ -2247,7 +2398,7 @@ type ExecuteFetchAsAllPrivsResponse struct { func (x *ExecuteFetchAsAllPrivsResponse) Reset() { *x = ExecuteFetchAsAllPrivsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[41] + mi := &file_tabletmanagerdata_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2260,7 +2411,7 @@ func (x *ExecuteFetchAsAllPrivsResponse) String() string { func (*ExecuteFetchAsAllPrivsResponse) ProtoMessage() {} func (x *ExecuteFetchAsAllPrivsResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[41] + mi := &file_tabletmanagerdata_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2273,7 +2424,7 @@ func (x *ExecuteFetchAsAllPrivsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteFetchAsAllPrivsResponse.ProtoReflect.Descriptor instead. func (*ExecuteFetchAsAllPrivsResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{41} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{43} } func (x *ExecuteFetchAsAllPrivsResponse) GetResult() *query.QueryResult { @@ -2295,7 +2446,7 @@ type ExecuteFetchAsAppRequest struct { func (x *ExecuteFetchAsAppRequest) Reset() { *x = ExecuteFetchAsAppRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[42] + mi := &file_tabletmanagerdata_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2308,7 +2459,7 @@ func (x *ExecuteFetchAsAppRequest) String() string { func (*ExecuteFetchAsAppRequest) ProtoMessage() {} func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[42] + mi := &file_tabletmanagerdata_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2321,7 +2472,7 @@ func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteFetchAsAppRequest.ProtoReflect.Descriptor instead. func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{42} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{44} } func (x *ExecuteFetchAsAppRequest) GetQuery() []byte { @@ -2349,7 +2500,7 @@ type ExecuteFetchAsAppResponse struct { func (x *ExecuteFetchAsAppResponse) Reset() { *x = ExecuteFetchAsAppResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[43] + mi := &file_tabletmanagerdata_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2362,7 +2513,7 @@ func (x *ExecuteFetchAsAppResponse) String() string { func (*ExecuteFetchAsAppResponse) ProtoMessage() {} func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[43] + mi := &file_tabletmanagerdata_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2375,7 +2526,7 @@ func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteFetchAsAppResponse.ProtoReflect.Descriptor instead. func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{43} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{45} } func (x *ExecuteFetchAsAppResponse) GetResult() *query.QueryResult { @@ -2394,7 +2545,7 @@ type ReplicationStatusRequest struct { func (x *ReplicationStatusRequest) Reset() { *x = ReplicationStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[44] + mi := &file_tabletmanagerdata_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2407,7 +2558,7 @@ func (x *ReplicationStatusRequest) String() string { func (*ReplicationStatusRequest) ProtoMessage() {} func (x *ReplicationStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[44] + mi := &file_tabletmanagerdata_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2420,7 +2571,7 @@ func (x *ReplicationStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReplicationStatusRequest.ProtoReflect.Descriptor instead. func (*ReplicationStatusRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{44} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{46} } type ReplicationStatusResponse struct { @@ -2434,7 +2585,7 @@ type ReplicationStatusResponse struct { func (x *ReplicationStatusResponse) Reset() { *x = ReplicationStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[45] + mi := &file_tabletmanagerdata_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2447,7 +2598,7 @@ func (x *ReplicationStatusResponse) String() string { func (*ReplicationStatusResponse) ProtoMessage() {} func (x *ReplicationStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[45] + mi := &file_tabletmanagerdata_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2460,7 +2611,7 @@ func (x *ReplicationStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReplicationStatusResponse.ProtoReflect.Descriptor instead. func (*ReplicationStatusResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{45} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{47} } func (x *ReplicationStatusResponse) GetStatus() *replicationdata.Status { @@ -2479,7 +2630,7 @@ type PrimaryStatusRequest struct { func (x *PrimaryStatusRequest) Reset() { *x = PrimaryStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[46] + mi := &file_tabletmanagerdata_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2492,7 +2643,7 @@ func (x *PrimaryStatusRequest) String() string { func (*PrimaryStatusRequest) ProtoMessage() {} func (x *PrimaryStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[46] + mi := &file_tabletmanagerdata_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2505,7 +2656,7 @@ func (x *PrimaryStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PrimaryStatusRequest.ProtoReflect.Descriptor instead. func (*PrimaryStatusRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{46} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{48} } type PrimaryStatusResponse struct { @@ -2519,7 +2670,7 @@ type PrimaryStatusResponse struct { func (x *PrimaryStatusResponse) Reset() { *x = PrimaryStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[47] + mi := &file_tabletmanagerdata_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2532,7 +2683,7 @@ func (x *PrimaryStatusResponse) String() string { func (*PrimaryStatusResponse) ProtoMessage() {} func (x *PrimaryStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[47] + mi := &file_tabletmanagerdata_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2545,7 +2696,7 @@ func (x *PrimaryStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PrimaryStatusResponse.ProtoReflect.Descriptor instead. func (*PrimaryStatusResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{47} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{49} } func (x *PrimaryStatusResponse) GetStatus() *replicationdata.PrimaryStatus { @@ -2564,7 +2715,7 @@ type PrimaryPositionRequest struct { func (x *PrimaryPositionRequest) Reset() { *x = PrimaryPositionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[48] + mi := &file_tabletmanagerdata_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2577,7 +2728,7 @@ func (x *PrimaryPositionRequest) String() string { func (*PrimaryPositionRequest) ProtoMessage() {} func (x *PrimaryPositionRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[48] + mi := &file_tabletmanagerdata_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2590,7 +2741,7 @@ func (x *PrimaryPositionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PrimaryPositionRequest.ProtoReflect.Descriptor instead. func (*PrimaryPositionRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{48} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{50} } type PrimaryPositionResponse struct { @@ -2604,7 +2755,7 @@ type PrimaryPositionResponse struct { func (x *PrimaryPositionResponse) Reset() { *x = PrimaryPositionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[49] + mi := &file_tabletmanagerdata_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2617,7 +2768,7 @@ func (x *PrimaryPositionResponse) String() string { func (*PrimaryPositionResponse) ProtoMessage() {} func (x *PrimaryPositionResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[49] + mi := &file_tabletmanagerdata_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2630,7 +2781,7 @@ func (x *PrimaryPositionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PrimaryPositionResponse.ProtoReflect.Descriptor instead. func (*PrimaryPositionResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{49} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{51} } func (x *PrimaryPositionResponse) GetPosition() string { @@ -2651,7 +2802,7 @@ type WaitForPositionRequest struct { func (x *WaitForPositionRequest) Reset() { *x = WaitForPositionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[50] + mi := &file_tabletmanagerdata_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2664,7 +2815,7 @@ func (x *WaitForPositionRequest) String() string { func (*WaitForPositionRequest) ProtoMessage() {} func (x *WaitForPositionRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[50] + mi := &file_tabletmanagerdata_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2677,7 +2828,7 @@ func (x *WaitForPositionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WaitForPositionRequest.ProtoReflect.Descriptor instead. func (*WaitForPositionRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{50} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{52} } func (x *WaitForPositionRequest) GetPosition() string { @@ -2696,7 +2847,7 @@ type WaitForPositionResponse struct { func (x *WaitForPositionResponse) Reset() { *x = WaitForPositionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[51] + mi := &file_tabletmanagerdata_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2709,7 +2860,7 @@ func (x *WaitForPositionResponse) String() string { func (*WaitForPositionResponse) ProtoMessage() {} func (x *WaitForPositionResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[51] + mi := &file_tabletmanagerdata_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2722,7 +2873,7 @@ func (x *WaitForPositionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WaitForPositionResponse.ProtoReflect.Descriptor instead. func (*WaitForPositionResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{51} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{53} } type StopReplicationRequest struct { @@ -2734,7 +2885,7 @@ type StopReplicationRequest struct { func (x *StopReplicationRequest) Reset() { *x = StopReplicationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[52] + mi := &file_tabletmanagerdata_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2747,7 +2898,7 @@ func (x *StopReplicationRequest) String() string { func (*StopReplicationRequest) ProtoMessage() {} func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[52] + mi := &file_tabletmanagerdata_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2760,7 +2911,7 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. func (*StopReplicationRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{52} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{54} } type StopReplicationResponse struct { @@ -2772,7 +2923,7 @@ type StopReplicationResponse struct { func (x *StopReplicationResponse) Reset() { *x = StopReplicationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[53] + mi := &file_tabletmanagerdata_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2785,7 +2936,7 @@ func (x *StopReplicationResponse) String() string { func (*StopReplicationResponse) ProtoMessage() {} func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[53] + mi := &file_tabletmanagerdata_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2798,7 +2949,7 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. func (*StopReplicationResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{53} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{55} } type StopReplicationMinimumRequest struct { @@ -2813,7 +2964,7 @@ type StopReplicationMinimumRequest struct { func (x *StopReplicationMinimumRequest) Reset() { *x = StopReplicationMinimumRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[54] + mi := &file_tabletmanagerdata_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2826,7 +2977,7 @@ func (x *StopReplicationMinimumRequest) String() string { func (*StopReplicationMinimumRequest) ProtoMessage() {} func (x *StopReplicationMinimumRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[54] + mi := &file_tabletmanagerdata_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2839,7 +2990,7 @@ func (x *StopReplicationMinimumRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationMinimumRequest.ProtoReflect.Descriptor instead. func (*StopReplicationMinimumRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{54} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{56} } func (x *StopReplicationMinimumRequest) GetPosition() string { @@ -2867,7 +3018,7 @@ type StopReplicationMinimumResponse struct { func (x *StopReplicationMinimumResponse) Reset() { *x = StopReplicationMinimumResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[55] + mi := &file_tabletmanagerdata_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2880,7 +3031,7 @@ func (x *StopReplicationMinimumResponse) String() string { func (*StopReplicationMinimumResponse) ProtoMessage() {} func (x *StopReplicationMinimumResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[55] + mi := &file_tabletmanagerdata_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2893,7 +3044,7 @@ func (x *StopReplicationMinimumResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationMinimumResponse.ProtoReflect.Descriptor instead. func (*StopReplicationMinimumResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{55} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{57} } func (x *StopReplicationMinimumResponse) GetPosition() string { @@ -2914,7 +3065,7 @@ type StartReplicationRequest struct { func (x *StartReplicationRequest) Reset() { *x = StartReplicationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[56] + mi := &file_tabletmanagerdata_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2927,7 +3078,7 @@ func (x *StartReplicationRequest) String() string { func (*StartReplicationRequest) ProtoMessage() {} func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[56] + mi := &file_tabletmanagerdata_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2940,7 +3091,7 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. func (*StartReplicationRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{56} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{58} } func (x *StartReplicationRequest) GetSemiSync() bool { @@ -2959,7 +3110,7 @@ type StartReplicationResponse struct { func (x *StartReplicationResponse) Reset() { *x = StartReplicationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[57] + mi := &file_tabletmanagerdata_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2972,7 +3123,7 @@ func (x *StartReplicationResponse) String() string { func (*StartReplicationResponse) ProtoMessage() {} func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[57] + mi := &file_tabletmanagerdata_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2985,7 +3136,7 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. func (*StartReplicationResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{57} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{59} } type StartReplicationUntilAfterRequest struct { @@ -3000,7 +3151,7 @@ type StartReplicationUntilAfterRequest struct { func (x *StartReplicationUntilAfterRequest) Reset() { *x = StartReplicationUntilAfterRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[58] + mi := &file_tabletmanagerdata_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3013,7 +3164,7 @@ func (x *StartReplicationUntilAfterRequest) String() string { func (*StartReplicationUntilAfterRequest) ProtoMessage() {} func (x *StartReplicationUntilAfterRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[58] + mi := &file_tabletmanagerdata_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3026,7 +3177,7 @@ func (x *StartReplicationUntilAfterRequest) ProtoReflect() protoreflect.Message // Deprecated: Use StartReplicationUntilAfterRequest.ProtoReflect.Descriptor instead. func (*StartReplicationUntilAfterRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{58} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{60} } func (x *StartReplicationUntilAfterRequest) GetPosition() string { @@ -3052,7 +3203,7 @@ type StartReplicationUntilAfterResponse struct { func (x *StartReplicationUntilAfterResponse) Reset() { *x = StartReplicationUntilAfterResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[59] + mi := &file_tabletmanagerdata_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3065,7 +3216,7 @@ func (x *StartReplicationUntilAfterResponse) String() string { func (*StartReplicationUntilAfterResponse) ProtoMessage() {} func (x *StartReplicationUntilAfterResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[59] + mi := &file_tabletmanagerdata_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,7 +3229,7 @@ func (x *StartReplicationUntilAfterResponse) ProtoReflect() protoreflect.Message // Deprecated: Use StartReplicationUntilAfterResponse.ProtoReflect.Descriptor instead. func (*StartReplicationUntilAfterResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{59} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{61} } type GetReplicasRequest struct { @@ -3090,7 +3241,7 @@ type GetReplicasRequest struct { func (x *GetReplicasRequest) Reset() { *x = GetReplicasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[60] + mi := &file_tabletmanagerdata_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3103,7 +3254,7 @@ func (x *GetReplicasRequest) String() string { func (*GetReplicasRequest) ProtoMessage() {} func (x *GetReplicasRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[60] + mi := &file_tabletmanagerdata_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3116,7 +3267,7 @@ func (x *GetReplicasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetReplicasRequest.ProtoReflect.Descriptor instead. func (*GetReplicasRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{60} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{62} } type GetReplicasResponse struct { @@ -3130,7 +3281,7 @@ type GetReplicasResponse struct { func (x *GetReplicasResponse) Reset() { *x = GetReplicasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[61] + mi := &file_tabletmanagerdata_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3143,7 +3294,7 @@ func (x *GetReplicasResponse) String() string { func (*GetReplicasResponse) ProtoMessage() {} func (x *GetReplicasResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[61] + mi := &file_tabletmanagerdata_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3156,7 +3307,7 @@ func (x *GetReplicasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetReplicasResponse.ProtoReflect.Descriptor instead. func (*GetReplicasResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{61} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{63} } func (x *GetReplicasResponse) GetAddrs() []string { @@ -3175,7 +3326,7 @@ type ResetReplicationRequest struct { func (x *ResetReplicationRequest) Reset() { *x = ResetReplicationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[62] + mi := &file_tabletmanagerdata_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3188,7 +3339,7 @@ func (x *ResetReplicationRequest) String() string { func (*ResetReplicationRequest) ProtoMessage() {} func (x *ResetReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[62] + mi := &file_tabletmanagerdata_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3201,7 +3352,7 @@ func (x *ResetReplicationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ResetReplicationRequest.ProtoReflect.Descriptor instead. func (*ResetReplicationRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{62} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{64} } type ResetReplicationResponse struct { @@ -3213,7 +3364,7 @@ type ResetReplicationResponse struct { func (x *ResetReplicationResponse) Reset() { *x = ResetReplicationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[63] + mi := &file_tabletmanagerdata_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3226,7 +3377,7 @@ func (x *ResetReplicationResponse) String() string { func (*ResetReplicationResponse) ProtoMessage() {} func (x *ResetReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[63] + mi := &file_tabletmanagerdata_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3239,7 +3390,7 @@ func (x *ResetReplicationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ResetReplicationResponse.ProtoReflect.Descriptor instead. func (*ResetReplicationResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{63} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{65} } type VReplicationExecRequest struct { @@ -3253,7 +3404,7 @@ type VReplicationExecRequest struct { func (x *VReplicationExecRequest) Reset() { *x = VReplicationExecRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[64] + mi := &file_tabletmanagerdata_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3266,7 +3417,7 @@ func (x *VReplicationExecRequest) String() string { func (*VReplicationExecRequest) ProtoMessage() {} func (x *VReplicationExecRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[64] + mi := &file_tabletmanagerdata_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3279,7 +3430,7 @@ func (x *VReplicationExecRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VReplicationExecRequest.ProtoReflect.Descriptor instead. func (*VReplicationExecRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{64} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{66} } func (x *VReplicationExecRequest) GetQuery() string { @@ -3300,7 +3451,7 @@ type VReplicationExecResponse struct { func (x *VReplicationExecResponse) Reset() { *x = VReplicationExecResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[65] + mi := &file_tabletmanagerdata_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3313,7 +3464,7 @@ func (x *VReplicationExecResponse) String() string { func (*VReplicationExecResponse) ProtoMessage() {} func (x *VReplicationExecResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[65] + mi := &file_tabletmanagerdata_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3326,7 +3477,7 @@ func (x *VReplicationExecResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VReplicationExecResponse.ProtoReflect.Descriptor instead. func (*VReplicationExecResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{65} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{67} } func (x *VReplicationExecResponse) GetResult() *query.QueryResult { @@ -3348,7 +3499,7 @@ type VReplicationWaitForPosRequest struct { func (x *VReplicationWaitForPosRequest) Reset() { *x = VReplicationWaitForPosRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[66] + mi := &file_tabletmanagerdata_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3361,7 +3512,7 @@ func (x *VReplicationWaitForPosRequest) String() string { func (*VReplicationWaitForPosRequest) ProtoMessage() {} func (x *VReplicationWaitForPosRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[66] + mi := &file_tabletmanagerdata_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3374,7 +3525,7 @@ func (x *VReplicationWaitForPosRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VReplicationWaitForPosRequest.ProtoReflect.Descriptor instead. func (*VReplicationWaitForPosRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{66} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{68} } func (x *VReplicationWaitForPosRequest) GetId() int32 { @@ -3400,7 +3551,7 @@ type VReplicationWaitForPosResponse struct { func (x *VReplicationWaitForPosResponse) Reset() { *x = VReplicationWaitForPosResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[67] + mi := &file_tabletmanagerdata_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3413,7 +3564,7 @@ func (x *VReplicationWaitForPosResponse) String() string { func (*VReplicationWaitForPosResponse) ProtoMessage() {} func (x *VReplicationWaitForPosResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[67] + mi := &file_tabletmanagerdata_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3426,7 +3577,7 @@ func (x *VReplicationWaitForPosResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VReplicationWaitForPosResponse.ProtoReflect.Descriptor instead. func (*VReplicationWaitForPosResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{67} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{69} } type InitPrimaryRequest struct { @@ -3440,7 +3591,7 @@ type InitPrimaryRequest struct { func (x *InitPrimaryRequest) Reset() { *x = InitPrimaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[68] + mi := &file_tabletmanagerdata_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3453,7 +3604,7 @@ func (x *InitPrimaryRequest) String() string { func (*InitPrimaryRequest) ProtoMessage() {} func (x *InitPrimaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[68] + mi := &file_tabletmanagerdata_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3466,7 +3617,7 @@ func (x *InitPrimaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InitPrimaryRequest.ProtoReflect.Descriptor instead. func (*InitPrimaryRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{68} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{70} } func (x *InitPrimaryRequest) GetSemiSync() bool { @@ -3487,7 +3638,7 @@ type InitPrimaryResponse struct { func (x *InitPrimaryResponse) Reset() { *x = InitPrimaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[69] + mi := &file_tabletmanagerdata_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3500,7 +3651,7 @@ func (x *InitPrimaryResponse) String() string { func (*InitPrimaryResponse) ProtoMessage() {} func (x *InitPrimaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[69] + mi := &file_tabletmanagerdata_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3513,7 +3664,7 @@ func (x *InitPrimaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InitPrimaryResponse.ProtoReflect.Descriptor instead. func (*InitPrimaryResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{69} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{71} } func (x *InitPrimaryResponse) GetPosition() string { @@ -3537,7 +3688,7 @@ type PopulateReparentJournalRequest struct { func (x *PopulateReparentJournalRequest) Reset() { *x = PopulateReparentJournalRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[70] + mi := &file_tabletmanagerdata_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3550,7 +3701,7 @@ func (x *PopulateReparentJournalRequest) String() string { func (*PopulateReparentJournalRequest) ProtoMessage() {} func (x *PopulateReparentJournalRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[70] + mi := &file_tabletmanagerdata_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3563,7 +3714,7 @@ func (x *PopulateReparentJournalRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PopulateReparentJournalRequest.ProtoReflect.Descriptor instead. func (*PopulateReparentJournalRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{70} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{72} } func (x *PopulateReparentJournalRequest) GetTimeCreatedNs() int64 { @@ -3603,7 +3754,7 @@ type PopulateReparentJournalResponse struct { func (x *PopulateReparentJournalResponse) Reset() { *x = PopulateReparentJournalResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[71] + mi := &file_tabletmanagerdata_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3616,7 +3767,7 @@ func (x *PopulateReparentJournalResponse) String() string { func (*PopulateReparentJournalResponse) ProtoMessage() {} func (x *PopulateReparentJournalResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[71] + mi := &file_tabletmanagerdata_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3629,7 +3780,7 @@ func (x *PopulateReparentJournalResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PopulateReparentJournalResponse.ProtoReflect.Descriptor instead. func (*PopulateReparentJournalResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{71} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{73} } type InitReplicaRequest struct { @@ -3646,7 +3797,7 @@ type InitReplicaRequest struct { func (x *InitReplicaRequest) Reset() { *x = InitReplicaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[72] + mi := &file_tabletmanagerdata_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3659,7 +3810,7 @@ func (x *InitReplicaRequest) String() string { func (*InitReplicaRequest) ProtoMessage() {} func (x *InitReplicaRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[72] + mi := &file_tabletmanagerdata_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3672,7 +3823,7 @@ func (x *InitReplicaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InitReplicaRequest.ProtoReflect.Descriptor instead. func (*InitReplicaRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{72} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{74} } func (x *InitReplicaRequest) GetParent() *topodata.TabletAlias { @@ -3712,7 +3863,7 @@ type InitReplicaResponse struct { func (x *InitReplicaResponse) Reset() { *x = InitReplicaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[73] + mi := &file_tabletmanagerdata_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3725,7 +3876,7 @@ func (x *InitReplicaResponse) String() string { func (*InitReplicaResponse) ProtoMessage() {} func (x *InitReplicaResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[73] + mi := &file_tabletmanagerdata_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3738,7 +3889,7 @@ func (x *InitReplicaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InitReplicaResponse.ProtoReflect.Descriptor instead. func (*InitReplicaResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{73} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{75} } type DemotePrimaryRequest struct { @@ -3750,7 +3901,7 @@ type DemotePrimaryRequest struct { func (x *DemotePrimaryRequest) Reset() { *x = DemotePrimaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[74] + mi := &file_tabletmanagerdata_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3763,7 +3914,7 @@ func (x *DemotePrimaryRequest) String() string { func (*DemotePrimaryRequest) ProtoMessage() {} func (x *DemotePrimaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[74] + mi := &file_tabletmanagerdata_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3776,7 +3927,7 @@ func (x *DemotePrimaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DemotePrimaryRequest.ProtoReflect.Descriptor instead. func (*DemotePrimaryRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{74} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{76} } type DemotePrimaryResponse struct { @@ -3784,14 +3935,14 @@ type DemotePrimaryResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a primary that has been demoted. + // PrimaryStatus represents the response from calling `SHOW BINARY LOG STATUS` on a primary that has been demoted. PrimaryStatus *replicationdata.PrimaryStatus `protobuf:"bytes,2,opt,name=primary_status,json=primaryStatus,proto3" json:"primary_status,omitempty"` } func (x *DemotePrimaryResponse) Reset() { *x = DemotePrimaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[75] + mi := &file_tabletmanagerdata_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3804,7 +3955,7 @@ func (x *DemotePrimaryResponse) String() string { func (*DemotePrimaryResponse) ProtoMessage() {} func (x *DemotePrimaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[75] + mi := &file_tabletmanagerdata_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3817,7 +3968,7 @@ func (x *DemotePrimaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DemotePrimaryResponse.ProtoReflect.Descriptor instead. func (*DemotePrimaryResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{75} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{77} } func (x *DemotePrimaryResponse) GetPrimaryStatus() *replicationdata.PrimaryStatus { @@ -3838,7 +3989,7 @@ type UndoDemotePrimaryRequest struct { func (x *UndoDemotePrimaryRequest) Reset() { *x = UndoDemotePrimaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[76] + mi := &file_tabletmanagerdata_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3851,7 +4002,7 @@ func (x *UndoDemotePrimaryRequest) String() string { func (*UndoDemotePrimaryRequest) ProtoMessage() {} func (x *UndoDemotePrimaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[76] + mi := &file_tabletmanagerdata_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3864,7 +4015,7 @@ func (x *UndoDemotePrimaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UndoDemotePrimaryRequest.ProtoReflect.Descriptor instead. func (*UndoDemotePrimaryRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{76} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{78} } func (x *UndoDemotePrimaryRequest) GetSemiSync() bool { @@ -3883,7 +4034,7 @@ type UndoDemotePrimaryResponse struct { func (x *UndoDemotePrimaryResponse) Reset() { *x = UndoDemotePrimaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[77] + mi := &file_tabletmanagerdata_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3896,7 +4047,7 @@ func (x *UndoDemotePrimaryResponse) String() string { func (*UndoDemotePrimaryResponse) ProtoMessage() {} func (x *UndoDemotePrimaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[77] + mi := &file_tabletmanagerdata_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3909,7 +4060,7 @@ func (x *UndoDemotePrimaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UndoDemotePrimaryResponse.ProtoReflect.Descriptor instead. func (*UndoDemotePrimaryResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{77} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{79} } type ReplicaWasPromotedRequest struct { @@ -3921,7 +4072,7 @@ type ReplicaWasPromotedRequest struct { func (x *ReplicaWasPromotedRequest) Reset() { *x = ReplicaWasPromotedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[78] + mi := &file_tabletmanagerdata_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3934,7 +4085,7 @@ func (x *ReplicaWasPromotedRequest) String() string { func (*ReplicaWasPromotedRequest) ProtoMessage() {} func (x *ReplicaWasPromotedRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[78] + mi := &file_tabletmanagerdata_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3947,7 +4098,7 @@ func (x *ReplicaWasPromotedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReplicaWasPromotedRequest.ProtoReflect.Descriptor instead. func (*ReplicaWasPromotedRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{78} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{80} } type ReplicaWasPromotedResponse struct { @@ -3959,7 +4110,7 @@ type ReplicaWasPromotedResponse struct { func (x *ReplicaWasPromotedResponse) Reset() { *x = ReplicaWasPromotedResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[79] + mi := &file_tabletmanagerdata_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3972,7 +4123,7 @@ func (x *ReplicaWasPromotedResponse) String() string { func (*ReplicaWasPromotedResponse) ProtoMessage() {} func (x *ReplicaWasPromotedResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[79] + mi := &file_tabletmanagerdata_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3985,7 +4136,7 @@ func (x *ReplicaWasPromotedResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReplicaWasPromotedResponse.ProtoReflect.Descriptor instead. func (*ReplicaWasPromotedResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{79} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{81} } type ResetReplicationParametersRequest struct { @@ -3997,7 +4148,7 @@ type ResetReplicationParametersRequest struct { func (x *ResetReplicationParametersRequest) Reset() { *x = ResetReplicationParametersRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[80] + mi := &file_tabletmanagerdata_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4010,7 +4161,7 @@ func (x *ResetReplicationParametersRequest) String() string { func (*ResetReplicationParametersRequest) ProtoMessage() {} func (x *ResetReplicationParametersRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[80] + mi := &file_tabletmanagerdata_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4023,7 +4174,7 @@ func (x *ResetReplicationParametersRequest) ProtoReflect() protoreflect.Message // Deprecated: Use ResetReplicationParametersRequest.ProtoReflect.Descriptor instead. func (*ResetReplicationParametersRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{80} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{82} } type ResetReplicationParametersResponse struct { @@ -4035,7 +4186,7 @@ type ResetReplicationParametersResponse struct { func (x *ResetReplicationParametersResponse) Reset() { *x = ResetReplicationParametersResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[81] + mi := &file_tabletmanagerdata_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4048,7 +4199,7 @@ func (x *ResetReplicationParametersResponse) String() string { func (*ResetReplicationParametersResponse) ProtoMessage() {} func (x *ResetReplicationParametersResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[81] + mi := &file_tabletmanagerdata_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4061,7 +4212,7 @@ func (x *ResetReplicationParametersResponse) ProtoReflect() protoreflect.Message // Deprecated: Use ResetReplicationParametersResponse.ProtoReflect.Descriptor instead. func (*ResetReplicationParametersResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{81} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{83} } type FullStatusRequest struct { @@ -4073,7 +4224,7 @@ type FullStatusRequest struct { func (x *FullStatusRequest) Reset() { *x = FullStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[82] + mi := &file_tabletmanagerdata_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4086,7 +4237,7 @@ func (x *FullStatusRequest) String() string { func (*FullStatusRequest) ProtoMessage() {} func (x *FullStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[82] + mi := &file_tabletmanagerdata_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4099,7 +4250,7 @@ func (x *FullStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FullStatusRequest.ProtoReflect.Descriptor instead. func (*FullStatusRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{82} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{84} } type FullStatusResponse struct { @@ -4113,7 +4264,7 @@ type FullStatusResponse struct { func (x *FullStatusResponse) Reset() { *x = FullStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[83] + mi := &file_tabletmanagerdata_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4126,7 +4277,7 @@ func (x *FullStatusResponse) String() string { func (*FullStatusResponse) ProtoMessage() {} func (x *FullStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[83] + mi := &file_tabletmanagerdata_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4139,7 +4290,7 @@ func (x *FullStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FullStatusResponse.ProtoReflect.Descriptor instead. func (*FullStatusResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{83} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{85} } func (x *FullStatusResponse) GetStatus() *replicationdata.FullStatus { @@ -4159,12 +4310,13 @@ type SetReplicationSourceRequest struct { ForceStartReplication bool `protobuf:"varint,3,opt,name=force_start_replication,json=forceStartReplication,proto3" json:"force_start_replication,omitempty"` WaitPosition string `protobuf:"bytes,4,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` SemiSync bool `protobuf:"varint,5,opt,name=semiSync,proto3" json:"semiSync,omitempty"` + HeartbeatInterval float64 `protobuf:"fixed64,6,opt,name=heartbeat_interval,json=heartbeatInterval,proto3" json:"heartbeat_interval,omitempty"` } func (x *SetReplicationSourceRequest) Reset() { *x = SetReplicationSourceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[84] + mi := &file_tabletmanagerdata_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4177,7 +4329,7 @@ func (x *SetReplicationSourceRequest) String() string { func (*SetReplicationSourceRequest) ProtoMessage() {} func (x *SetReplicationSourceRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[84] + mi := &file_tabletmanagerdata_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4190,7 +4342,7 @@ func (x *SetReplicationSourceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetReplicationSourceRequest.ProtoReflect.Descriptor instead. func (*SetReplicationSourceRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{84} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{86} } func (x *SetReplicationSourceRequest) GetParent() *topodata.TabletAlias { @@ -4228,6 +4380,13 @@ func (x *SetReplicationSourceRequest) GetSemiSync() bool { return false } +func (x *SetReplicationSourceRequest) GetHeartbeatInterval() float64 { + if x != nil { + return x.HeartbeatInterval + } + return 0 +} + type SetReplicationSourceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4237,7 +4396,7 @@ type SetReplicationSourceResponse struct { func (x *SetReplicationSourceResponse) Reset() { *x = SetReplicationSourceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[85] + mi := &file_tabletmanagerdata_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4250,7 +4409,7 @@ func (x *SetReplicationSourceResponse) String() string { func (*SetReplicationSourceResponse) ProtoMessage() {} func (x *SetReplicationSourceResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[85] + mi := &file_tabletmanagerdata_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4263,7 +4422,7 @@ func (x *SetReplicationSourceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetReplicationSourceResponse.ProtoReflect.Descriptor instead. func (*SetReplicationSourceResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{85} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{87} } type ReplicaWasRestartedRequest struct { @@ -4278,7 +4437,7 @@ type ReplicaWasRestartedRequest struct { func (x *ReplicaWasRestartedRequest) Reset() { *x = ReplicaWasRestartedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[86] + mi := &file_tabletmanagerdata_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4291,7 +4450,7 @@ func (x *ReplicaWasRestartedRequest) String() string { func (*ReplicaWasRestartedRequest) ProtoMessage() {} func (x *ReplicaWasRestartedRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[86] + mi := &file_tabletmanagerdata_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4304,7 +4463,7 @@ func (x *ReplicaWasRestartedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReplicaWasRestartedRequest.ProtoReflect.Descriptor instead. func (*ReplicaWasRestartedRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{86} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{88} } func (x *ReplicaWasRestartedRequest) GetParent() *topodata.TabletAlias { @@ -4323,7 +4482,7 @@ type ReplicaWasRestartedResponse struct { func (x *ReplicaWasRestartedResponse) Reset() { *x = ReplicaWasRestartedResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[87] + mi := &file_tabletmanagerdata_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4336,7 +4495,7 @@ func (x *ReplicaWasRestartedResponse) String() string { func (*ReplicaWasRestartedResponse) ProtoMessage() {} func (x *ReplicaWasRestartedResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[87] + mi := &file_tabletmanagerdata_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4349,7 +4508,7 @@ func (x *ReplicaWasRestartedResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReplicaWasRestartedResponse.ProtoReflect.Descriptor instead. func (*ReplicaWasRestartedResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{87} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{89} } type StopReplicationAndGetStatusRequest struct { @@ -4363,7 +4522,7 @@ type StopReplicationAndGetStatusRequest struct { func (x *StopReplicationAndGetStatusRequest) Reset() { *x = StopReplicationAndGetStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[88] + mi := &file_tabletmanagerdata_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4376,7 +4535,7 @@ func (x *StopReplicationAndGetStatusRequest) String() string { func (*StopReplicationAndGetStatusRequest) ProtoMessage() {} func (x *StopReplicationAndGetStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[88] + mi := &file_tabletmanagerdata_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4389,7 +4548,7 @@ func (x *StopReplicationAndGetStatusRequest) ProtoReflect() protoreflect.Message // Deprecated: Use StopReplicationAndGetStatusRequest.ProtoReflect.Descriptor instead. func (*StopReplicationAndGetStatusRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{88} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{90} } func (x *StopReplicationAndGetStatusRequest) GetStopReplicationMode() replicationdata.StopReplicationMode { @@ -4411,7 +4570,7 @@ type StopReplicationAndGetStatusResponse struct { func (x *StopReplicationAndGetStatusResponse) Reset() { *x = StopReplicationAndGetStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[89] + mi := &file_tabletmanagerdata_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4424,7 +4583,7 @@ func (x *StopReplicationAndGetStatusResponse) String() string { func (*StopReplicationAndGetStatusResponse) ProtoMessage() {} func (x *StopReplicationAndGetStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[89] + mi := &file_tabletmanagerdata_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4437,7 +4596,7 @@ func (x *StopReplicationAndGetStatusResponse) ProtoReflect() protoreflect.Messag // Deprecated: Use StopReplicationAndGetStatusResponse.ProtoReflect.Descriptor instead. func (*StopReplicationAndGetStatusResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{89} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{91} } func (x *StopReplicationAndGetStatusResponse) GetStatus() *replicationdata.StopReplicationStatus { @@ -4458,7 +4617,7 @@ type PromoteReplicaRequest struct { func (x *PromoteReplicaRequest) Reset() { *x = PromoteReplicaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[90] + mi := &file_tabletmanagerdata_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4471,7 +4630,7 @@ func (x *PromoteReplicaRequest) String() string { func (*PromoteReplicaRequest) ProtoMessage() {} func (x *PromoteReplicaRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[90] + mi := &file_tabletmanagerdata_proto_msgTypes[92] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4484,7 +4643,7 @@ func (x *PromoteReplicaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PromoteReplicaRequest.ProtoReflect.Descriptor instead. func (*PromoteReplicaRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{90} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{92} } func (x *PromoteReplicaRequest) GetSemiSync() bool { @@ -4505,7 +4664,7 @@ type PromoteReplicaResponse struct { func (x *PromoteReplicaResponse) Reset() { *x = PromoteReplicaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[91] + mi := &file_tabletmanagerdata_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4518,7 +4677,7 @@ func (x *PromoteReplicaResponse) String() string { func (*PromoteReplicaResponse) ProtoMessage() {} func (x *PromoteReplicaResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[91] + mi := &file_tabletmanagerdata_proto_msgTypes[93] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4531,7 +4690,7 @@ func (x *PromoteReplicaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PromoteReplicaResponse.ProtoReflect.Descriptor instead. func (*PromoteReplicaResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{91} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{93} } func (x *PromoteReplicaResponse) GetPosition() string { @@ -4546,7 +4705,7 @@ type BackupRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Concurrency int64 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Concurrency int32 `protobuf:"varint,1,opt,name=concurrency,proto3" json:"concurrency,omitempty"` AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty // then the backup becomes incremental and applies as of given position. @@ -4559,7 +4718,7 @@ type BackupRequest struct { func (x *BackupRequest) Reset() { *x = BackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[92] + mi := &file_tabletmanagerdata_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4572,7 +4731,7 @@ func (x *BackupRequest) String() string { func (*BackupRequest) ProtoMessage() {} func (x *BackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[92] + mi := &file_tabletmanagerdata_proto_msgTypes[94] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4585,10 +4744,10 @@ func (x *BackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupRequest.ProtoReflect.Descriptor instead. func (*BackupRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{92} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{94} } -func (x *BackupRequest) GetConcurrency() int64 { +func (x *BackupRequest) GetConcurrency() int32 { if x != nil { return x.Concurrency } @@ -4627,7 +4786,7 @@ type BackupResponse struct { func (x *BackupResponse) Reset() { *x = BackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[93] + mi := &file_tabletmanagerdata_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4640,7 +4799,7 @@ func (x *BackupResponse) String() string { func (*BackupResponse) ProtoMessage() {} func (x *BackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[93] + mi := &file_tabletmanagerdata_proto_msgTypes[95] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4653,7 +4812,7 @@ func (x *BackupResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupResponse.ProtoReflect.Descriptor instead. func (*BackupResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{93} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{95} } func (x *BackupResponse) GetEvent() *logutil.Event { @@ -4683,7 +4842,7 @@ type RestoreFromBackupRequest struct { func (x *RestoreFromBackupRequest) Reset() { *x = RestoreFromBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[94] + mi := &file_tabletmanagerdata_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4696,7 +4855,7 @@ func (x *RestoreFromBackupRequest) String() string { func (*RestoreFromBackupRequest) ProtoMessage() {} func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[94] + mi := &file_tabletmanagerdata_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4709,7 +4868,7 @@ func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RestoreFromBackupRequest.ProtoReflect.Descriptor instead. func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{94} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{96} } func (x *RestoreFromBackupRequest) GetBackupTime() *vttime.Time { @@ -4751,7 +4910,7 @@ type RestoreFromBackupResponse struct { func (x *RestoreFromBackupResponse) Reset() { *x = RestoreFromBackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[95] + mi := &file_tabletmanagerdata_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4764,7 +4923,7 @@ func (x *RestoreFromBackupResponse) String() string { func (*RestoreFromBackupResponse) ProtoMessage() {} func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[95] + mi := &file_tabletmanagerdata_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4777,7 +4936,7 @@ func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RestoreFromBackupResponse.ProtoReflect.Descriptor instead. func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{95} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{97} } func (x *RestoreFromBackupResponse) GetEvent() *logutil.Event { @@ -4807,13 +4966,14 @@ type CreateVReplicationWorkflowRequest struct { // AutoStart specifies if the workflow should be started when created. AutoStart bool `protobuf:"varint,9,opt,name=auto_start,json=autoStart,proto3" json:"auto_start,omitempty"` // Should the workflow stop after the copy phase. - StopAfterCopy bool `protobuf:"varint,10,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + StopAfterCopy bool `protobuf:"varint,10,opt,name=stop_after_copy,json=stopAfterCopy,proto3" json:"stop_after_copy,omitempty"` + Options string `protobuf:"bytes,11,opt,name=options,proto3" json:"options,omitempty"` } func (x *CreateVReplicationWorkflowRequest) Reset() { *x = CreateVReplicationWorkflowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[96] + mi := &file_tabletmanagerdata_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4826,7 +4986,7 @@ func (x *CreateVReplicationWorkflowRequest) String() string { func (*CreateVReplicationWorkflowRequest) ProtoMessage() {} func (x *CreateVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[96] + mi := &file_tabletmanagerdata_proto_msgTypes[98] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4839,7 +4999,7 @@ func (x *CreateVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message // Deprecated: Use CreateVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. func (*CreateVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{96} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{98} } func (x *CreateVReplicationWorkflowRequest) GetWorkflow() string { @@ -4912,6 +5072,13 @@ func (x *CreateVReplicationWorkflowRequest) GetStopAfterCopy() bool { return false } +func (x *CreateVReplicationWorkflowRequest) GetOptions() string { + if x != nil { + return x.Options + } + return "" +} + type CreateVReplicationWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4923,7 +5090,7 @@ type CreateVReplicationWorkflowResponse struct { func (x *CreateVReplicationWorkflowResponse) Reset() { *x = CreateVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[97] + mi := &file_tabletmanagerdata_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4936,7 +5103,7 @@ func (x *CreateVReplicationWorkflowResponse) String() string { func (*CreateVReplicationWorkflowResponse) ProtoMessage() {} func (x *CreateVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[97] + mi := &file_tabletmanagerdata_proto_msgTypes[99] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4949,7 +5116,7 @@ func (x *CreateVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message // Deprecated: Use CreateVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. func (*CreateVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{97} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{99} } func (x *CreateVReplicationWorkflowResponse) GetResult() *query.QueryResult { @@ -4970,7 +5137,7 @@ type DeleteVReplicationWorkflowRequest struct { func (x *DeleteVReplicationWorkflowRequest) Reset() { *x = DeleteVReplicationWorkflowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[98] + mi := &file_tabletmanagerdata_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4983,7 +5150,7 @@ func (x *DeleteVReplicationWorkflowRequest) String() string { func (*DeleteVReplicationWorkflowRequest) ProtoMessage() {} func (x *DeleteVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[98] + mi := &file_tabletmanagerdata_proto_msgTypes[100] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4996,7 +5163,7 @@ func (x *DeleteVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message // Deprecated: Use DeleteVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. func (*DeleteVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{98} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{100} } func (x *DeleteVReplicationWorkflowRequest) GetWorkflow() string { @@ -5017,7 +5184,7 @@ type DeleteVReplicationWorkflowResponse struct { func (x *DeleteVReplicationWorkflowResponse) Reset() { *x = DeleteVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[99] + mi := &file_tabletmanagerdata_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5030,7 +5197,7 @@ func (x *DeleteVReplicationWorkflowResponse) String() string { func (*DeleteVReplicationWorkflowResponse) ProtoMessage() {} func (x *DeleteVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[99] + mi := &file_tabletmanagerdata_proto_msgTypes[101] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5043,7 +5210,7 @@ func (x *DeleteVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message // Deprecated: Use DeleteVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. func (*DeleteVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{99} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101} } func (x *DeleteVReplicationWorkflowResponse) GetResult() *query.QueryResult { @@ -5053,31 +5220,29 @@ func (x *DeleteVReplicationWorkflowResponse) GetResult() *query.QueryResult { return nil } -type ReadVReplicationWorkflowRequest struct { +type HasVReplicationWorkflowsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` } -func (x *ReadVReplicationWorkflowRequest) Reset() { - *x = ReadVReplicationWorkflowRequest{} +func (x *HasVReplicationWorkflowsRequest) Reset() { + *x = HasVReplicationWorkflowsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[100] + mi := &file_tabletmanagerdata_proto_msgTypes[102] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReadVReplicationWorkflowRequest) String() string { +func (x *HasVReplicationWorkflowsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReadVReplicationWorkflowRequest) ProtoMessage() {} +func (*HasVReplicationWorkflowsRequest) ProtoMessage() {} -func (x *ReadVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[100] +func (x *HasVReplicationWorkflowsRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[102] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5088,39 +5253,261 @@ func (x *ReadVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReadVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. -func (*ReadVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{100} -} - -func (x *ReadVReplicationWorkflowRequest) GetWorkflow() string { - if x != nil { - return x.Workflow - } - return "" +// Deprecated: Use HasVReplicationWorkflowsRequest.ProtoReflect.Descriptor instead. +func (*HasVReplicationWorkflowsRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{102} } -type ReadVReplicationWorkflowResponse struct { +type HasVReplicationWorkflowsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` - Cells string `protobuf:"bytes,3,opt,name=cells,proto3" json:"cells,omitempty"` - TabletTypes []topodata.TabletType `protobuf:"varint,4,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` - TabletSelectionPreference TabletSelectionPreference `protobuf:"varint,5,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` - DbName string `protobuf:"bytes,6,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` + Has bool `protobuf:"varint,1,opt,name=has,proto3" json:"has,omitempty"` +} + +func (x *HasVReplicationWorkflowsResponse) Reset() { + *x = HasVReplicationWorkflowsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[103] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HasVReplicationWorkflowsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HasVReplicationWorkflowsResponse) ProtoMessage() {} + +func (x *HasVReplicationWorkflowsResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[103] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HasVReplicationWorkflowsResponse.ProtoReflect.Descriptor instead. +func (*HasVReplicationWorkflowsResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{103} +} + +func (x *HasVReplicationWorkflowsResponse) GetHas() bool { + if x != nil { + return x.Has + } + return false +} + +type ReadVReplicationWorkflowsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IncludeIds []int32 `protobuf:"varint,1,rep,packed,name=include_ids,json=includeIds,proto3" json:"include_ids,omitempty"` + IncludeWorkflows []string `protobuf:"bytes,2,rep,name=include_workflows,json=includeWorkflows,proto3" json:"include_workflows,omitempty"` + IncludeStates []binlogdata.VReplicationWorkflowState `protobuf:"varint,3,rep,packed,name=include_states,json=includeStates,proto3,enum=binlogdata.VReplicationWorkflowState" json:"include_states,omitempty"` + ExcludeWorkflows []string `protobuf:"bytes,4,rep,name=exclude_workflows,json=excludeWorkflows,proto3" json:"exclude_workflows,omitempty"` + ExcludeStates []binlogdata.VReplicationWorkflowState `protobuf:"varint,5,rep,packed,name=exclude_states,json=excludeStates,proto3,enum=binlogdata.VReplicationWorkflowState" json:"exclude_states,omitempty"` + ExcludeFrozen bool `protobuf:"varint,6,opt,name=exclude_frozen,json=excludeFrozen,proto3" json:"exclude_frozen,omitempty"` +} + +func (x *ReadVReplicationWorkflowsRequest) Reset() { + *x = ReadVReplicationWorkflowsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[104] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVReplicationWorkflowsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVReplicationWorkflowsRequest) ProtoMessage() {} + +func (x *ReadVReplicationWorkflowsRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[104] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVReplicationWorkflowsRequest.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowsRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{104} +} + +func (x *ReadVReplicationWorkflowsRequest) GetIncludeIds() []int32 { + if x != nil { + return x.IncludeIds + } + return nil +} + +func (x *ReadVReplicationWorkflowsRequest) GetIncludeWorkflows() []string { + if x != nil { + return x.IncludeWorkflows + } + return nil +} + +func (x *ReadVReplicationWorkflowsRequest) GetIncludeStates() []binlogdata.VReplicationWorkflowState { + if x != nil { + return x.IncludeStates + } + return nil +} + +func (x *ReadVReplicationWorkflowsRequest) GetExcludeWorkflows() []string { + if x != nil { + return x.ExcludeWorkflows + } + return nil +} + +func (x *ReadVReplicationWorkflowsRequest) GetExcludeStates() []binlogdata.VReplicationWorkflowState { + if x != nil { + return x.ExcludeStates + } + return nil +} + +func (x *ReadVReplicationWorkflowsRequest) GetExcludeFrozen() bool { + if x != nil { + return x.ExcludeFrozen + } + return false +} + +type ReadVReplicationWorkflowsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflows []*ReadVReplicationWorkflowResponse `protobuf:"bytes,1,rep,name=workflows,proto3" json:"workflows,omitempty"` +} + +func (x *ReadVReplicationWorkflowsResponse) Reset() { + *x = ReadVReplicationWorkflowsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[105] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVReplicationWorkflowsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVReplicationWorkflowsResponse) ProtoMessage() {} + +func (x *ReadVReplicationWorkflowsResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[105] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVReplicationWorkflowsResponse.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowsResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{105} +} + +func (x *ReadVReplicationWorkflowsResponse) GetWorkflows() []*ReadVReplicationWorkflowResponse { + if x != nil { + return x.Workflows + } + return nil +} + +type ReadVReplicationWorkflowRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` +} + +func (x *ReadVReplicationWorkflowRequest) Reset() { + *x = ReadVReplicationWorkflowRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[106] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadVReplicationWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReadVReplicationWorkflowRequest) ProtoMessage() {} + +func (x *ReadVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[106] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReadVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. +func (*ReadVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{106} +} + +func (x *ReadVReplicationWorkflowRequest) GetWorkflow() string { + if x != nil { + return x.Workflow + } + return "" +} + +type ReadVReplicationWorkflowResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Cells string `protobuf:"bytes,3,opt,name=cells,proto3" json:"cells,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,4,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference TabletSelectionPreference `protobuf:"varint,5,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + DbName string `protobuf:"bytes,6,opt,name=db_name,json=dbName,proto3" json:"db_name,omitempty"` Tags string `protobuf:"bytes,7,opt,name=tags,proto3" json:"tags,omitempty"` WorkflowType binlogdata.VReplicationWorkflowType `protobuf:"varint,8,opt,name=workflow_type,json=workflowType,proto3,enum=binlogdata.VReplicationWorkflowType" json:"workflow_type,omitempty"` WorkflowSubType binlogdata.VReplicationWorkflowSubType `protobuf:"varint,9,opt,name=workflow_sub_type,json=workflowSubType,proto3,enum=binlogdata.VReplicationWorkflowSubType" json:"workflow_sub_type,omitempty"` DeferSecondaryKeys bool `protobuf:"varint,10,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` Streams []*ReadVReplicationWorkflowResponse_Stream `protobuf:"bytes,11,rep,name=streams,proto3" json:"streams,omitempty"` + Options string `protobuf:"bytes,12,opt,name=options,proto3" json:"options,omitempty"` } func (x *ReadVReplicationWorkflowResponse) Reset() { *x = ReadVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[101] + mi := &file_tabletmanagerdata_proto_msgTypes[107] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5133,7 +5520,7 @@ func (x *ReadVReplicationWorkflowResponse) String() string { func (*ReadVReplicationWorkflowResponse) ProtoMessage() {} func (x *ReadVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[101] + mi := &file_tabletmanagerdata_proto_msgTypes[107] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5146,7 +5533,7 @@ func (x *ReadVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. func (*ReadVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{107} } func (x *ReadVReplicationWorkflowResponse) GetWorkflow() string { @@ -5219,6 +5606,13 @@ func (x *ReadVReplicationWorkflowResponse) GetStreams() []*ReadVReplicationWorkf return nil } +func (x *ReadVReplicationWorkflowResponse) GetOptions() string { + if x != nil { + return x.Options + } + return "" +} + type VDiffRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5235,7 +5629,7 @@ type VDiffRequest struct { func (x *VDiffRequest) Reset() { *x = VDiffRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[102] + mi := &file_tabletmanagerdata_proto_msgTypes[108] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5248,7 +5642,7 @@ func (x *VDiffRequest) String() string { func (*VDiffRequest) ProtoMessage() {} func (x *VDiffRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[102] + mi := &file_tabletmanagerdata_proto_msgTypes[108] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5261,7 +5655,7 @@ func (x *VDiffRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffRequest.ProtoReflect.Descriptor instead. func (*VDiffRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{102} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{108} } func (x *VDiffRequest) GetKeyspace() string { @@ -5319,7 +5713,7 @@ type VDiffResponse struct { func (x *VDiffResponse) Reset() { *x = VDiffResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[103] + mi := &file_tabletmanagerdata_proto_msgTypes[109] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5332,7 +5726,7 @@ func (x *VDiffResponse) String() string { func (*VDiffResponse) ProtoMessage() {} func (x *VDiffResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[103] + mi := &file_tabletmanagerdata_proto_msgTypes[109] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5345,7 +5739,7 @@ func (x *VDiffResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffResponse.ProtoReflect.Descriptor instead. func (*VDiffResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{103} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{109} } func (x *VDiffResponse) GetId() int64 { @@ -5383,7 +5777,7 @@ type VDiffPickerOptions struct { func (x *VDiffPickerOptions) Reset() { *x = VDiffPickerOptions{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[104] + mi := &file_tabletmanagerdata_proto_msgTypes[110] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5396,7 +5790,7 @@ func (x *VDiffPickerOptions) String() string { func (*VDiffPickerOptions) ProtoMessage() {} func (x *VDiffPickerOptions) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[104] + mi := &file_tabletmanagerdata_proto_msgTypes[110] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5409,7 +5803,7 @@ func (x *VDiffPickerOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffPickerOptions.ProtoReflect.Descriptor instead. func (*VDiffPickerOptions) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{104} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{110} } func (x *VDiffPickerOptions) GetTabletTypes() string { @@ -5439,15 +5833,16 @@ type VDiffReportOptions struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - OnlyPks bool `protobuf:"varint,1,opt,name=only_pks,json=onlyPks,proto3" json:"only_pks,omitempty"` - DebugQuery bool `protobuf:"varint,2,opt,name=debug_query,json=debugQuery,proto3" json:"debug_query,omitempty"` - Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + OnlyPks bool `protobuf:"varint,1,opt,name=only_pks,json=onlyPks,proto3" json:"only_pks,omitempty"` + DebugQuery bool `protobuf:"varint,2,opt,name=debug_query,json=debugQuery,proto3" json:"debug_query,omitempty"` + Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"` + MaxSampleRows int64 `protobuf:"varint,4,opt,name=max_sample_rows,json=maxSampleRows,proto3" json:"max_sample_rows,omitempty"` } func (x *VDiffReportOptions) Reset() { *x = VDiffReportOptions{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[105] + mi := &file_tabletmanagerdata_proto_msgTypes[111] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5460,7 +5855,7 @@ func (x *VDiffReportOptions) String() string { func (*VDiffReportOptions) ProtoMessage() {} func (x *VDiffReportOptions) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[105] + mi := &file_tabletmanagerdata_proto_msgTypes[111] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5473,7 +5868,7 @@ func (x *VDiffReportOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffReportOptions.ProtoReflect.Descriptor instead. func (*VDiffReportOptions) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{105} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{111} } func (x *VDiffReportOptions) GetOnlyPks() bool { @@ -5497,6 +5892,13 @@ func (x *VDiffReportOptions) GetFormat() string { return "" } +func (x *VDiffReportOptions) GetMaxSampleRows() int64 { + if x != nil { + return x.MaxSampleRows + } + return 0 +} + type VDiffCoreOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5510,12 +5912,13 @@ type VDiffCoreOptions struct { TimeoutSeconds int64 `protobuf:"varint,6,opt,name=timeout_seconds,json=timeoutSeconds,proto3" json:"timeout_seconds,omitempty"` MaxExtraRowsToCompare int64 `protobuf:"varint,7,opt,name=max_extra_rows_to_compare,json=maxExtraRowsToCompare,proto3" json:"max_extra_rows_to_compare,omitempty"` UpdateTableStats bool `protobuf:"varint,8,opt,name=update_table_stats,json=updateTableStats,proto3" json:"update_table_stats,omitempty"` + MaxDiffSeconds int64 `protobuf:"varint,9,opt,name=max_diff_seconds,json=maxDiffSeconds,proto3" json:"max_diff_seconds,omitempty"` } func (x *VDiffCoreOptions) Reset() { *x = VDiffCoreOptions{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[106] + mi := &file_tabletmanagerdata_proto_msgTypes[112] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5528,7 +5931,7 @@ func (x *VDiffCoreOptions) String() string { func (*VDiffCoreOptions) ProtoMessage() {} func (x *VDiffCoreOptions) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[106] + mi := &file_tabletmanagerdata_proto_msgTypes[112] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5541,7 +5944,7 @@ func (x *VDiffCoreOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffCoreOptions.ProtoReflect.Descriptor instead. func (*VDiffCoreOptions) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{106} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{112} } func (x *VDiffCoreOptions) GetTables() string { @@ -5600,6 +6003,13 @@ func (x *VDiffCoreOptions) GetUpdateTableStats() bool { return false } +func (x *VDiffCoreOptions) GetMaxDiffSeconds() int64 { + if x != nil { + return x.MaxDiffSeconds + } + return 0 +} + type VDiffOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5613,7 +6023,7 @@ type VDiffOptions struct { func (x *VDiffOptions) Reset() { *x = VDiffOptions{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[107] + mi := &file_tabletmanagerdata_proto_msgTypes[113] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5626,7 +6036,7 @@ func (x *VDiffOptions) String() string { func (*VDiffOptions) ProtoMessage() {} func (x *VDiffOptions) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[107] + mi := &file_tabletmanagerdata_proto_msgTypes[113] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5639,7 +6049,7 @@ func (x *VDiffOptions) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffOptions.ProtoReflect.Descriptor instead. func (*VDiffOptions) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{107} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{113} } func (x *VDiffOptions) GetPickerOptions() *VDiffPickerOptions { @@ -5663,6 +6073,12 @@ func (x *VDiffOptions) GetReportOptions() *VDiffReportOptions { return nil } +// UpdateVReplicationWorkflowRequest is used to update an existing VReplication +// workflow. Note that the following fields MUST have an explicit value provided +// if you do NOT wish to update the existing value to the given type's ZeroValue: +// cells, tablet_types, on_ddl, and state. +// TODO: leverage the optional modifier for these fields rather than using SimulatedNull +// values: https://github.com/vitessio/vitess/issues/15627 type UpdateVReplicationWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5679,7 +6095,7 @@ type UpdateVReplicationWorkflowRequest struct { func (x *UpdateVReplicationWorkflowRequest) Reset() { *x = UpdateVReplicationWorkflowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[108] + mi := &file_tabletmanagerdata_proto_msgTypes[114] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5692,7 +6108,7 @@ func (x *UpdateVReplicationWorkflowRequest) String() string { func (*UpdateVReplicationWorkflowRequest) ProtoMessage() {} func (x *UpdateVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[108] + mi := &file_tabletmanagerdata_proto_msgTypes[114] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5705,7 +6121,7 @@ func (x *UpdateVReplicationWorkflowRequest) ProtoReflect() protoreflect.Message // Deprecated: Use UpdateVReplicationWorkflowRequest.ProtoReflect.Descriptor instead. func (*UpdateVReplicationWorkflowRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{108} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{114} } func (x *UpdateVReplicationWorkflowRequest) GetWorkflow() string { @@ -5761,7 +6177,7 @@ type UpdateVReplicationWorkflowResponse struct { func (x *UpdateVReplicationWorkflowResponse) Reset() { *x = UpdateVReplicationWorkflowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[109] + mi := &file_tabletmanagerdata_proto_msgTypes[115] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5774,7 +6190,7 @@ func (x *UpdateVReplicationWorkflowResponse) String() string { func (*UpdateVReplicationWorkflowResponse) ProtoMessage() {} func (x *UpdateVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[109] + mi := &file_tabletmanagerdata_proto_msgTypes[115] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5787,7 +6203,7 @@ func (x *UpdateVReplicationWorkflowResponse) ProtoReflect() protoreflect.Message // Deprecated: Use UpdateVReplicationWorkflowResponse.ProtoReflect.Descriptor instead. func (*UpdateVReplicationWorkflowResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{109} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{115} } func (x *UpdateVReplicationWorkflowResponse) GetResult() *query.QueryResult { @@ -5797,6 +6213,146 @@ func (x *UpdateVReplicationWorkflowResponse) GetResult() *query.QueryResult { return nil } +// UpdateVReplicationWorkflowsRequest is used to update multiple existing VReplication +// workflows. Note that the following fields MUST have an explicit value provided +// if you do NOT wish to update the existing values to the given type's ZeroValue: +// state, message, and stop_position. +// TODO: leverage the optional modifier for these fields rather than using SimulatedNull +// values: https://github.com/vitessio/vitess/issues/15627 +type UpdateVReplicationWorkflowsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AllWorkflows bool `protobuf:"varint,1,opt,name=all_workflows,json=allWorkflows,proto3" json:"all_workflows,omitempty"` + IncludeWorkflows []string `protobuf:"bytes,2,rep,name=include_workflows,json=includeWorkflows,proto3" json:"include_workflows,omitempty"` + ExcludeWorkflows []string `protobuf:"bytes,3,rep,name=exclude_workflows,json=excludeWorkflows,proto3" json:"exclude_workflows,omitempty"` + State binlogdata.VReplicationWorkflowState `protobuf:"varint,4,opt,name=state,proto3,enum=binlogdata.VReplicationWorkflowState" json:"state,omitempty"` + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` + StopPosition string `protobuf:"bytes,6,opt,name=stop_position,json=stopPosition,proto3" json:"stop_position,omitempty"` +} + +func (x *UpdateVReplicationWorkflowsRequest) Reset() { + *x = UpdateVReplicationWorkflowsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[116] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateVReplicationWorkflowsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateVReplicationWorkflowsRequest) ProtoMessage() {} + +func (x *UpdateVReplicationWorkflowsRequest) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[116] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateVReplicationWorkflowsRequest.ProtoReflect.Descriptor instead. +func (*UpdateVReplicationWorkflowsRequest) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{116} +} + +func (x *UpdateVReplicationWorkflowsRequest) GetAllWorkflows() bool { + if x != nil { + return x.AllWorkflows + } + return false +} + +func (x *UpdateVReplicationWorkflowsRequest) GetIncludeWorkflows() []string { + if x != nil { + return x.IncludeWorkflows + } + return nil +} + +func (x *UpdateVReplicationWorkflowsRequest) GetExcludeWorkflows() []string { + if x != nil { + return x.ExcludeWorkflows + } + return nil +} + +func (x *UpdateVReplicationWorkflowsRequest) GetState() binlogdata.VReplicationWorkflowState { + if x != nil { + return x.State + } + return binlogdata.VReplicationWorkflowState(0) +} + +func (x *UpdateVReplicationWorkflowsRequest) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *UpdateVReplicationWorkflowsRequest) GetStopPosition() string { + if x != nil { + return x.StopPosition + } + return "" +} + +type UpdateVReplicationWorkflowsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Result *query.QueryResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *UpdateVReplicationWorkflowsResponse) Reset() { + *x = UpdateVReplicationWorkflowsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_tabletmanagerdata_proto_msgTypes[117] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateVReplicationWorkflowsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateVReplicationWorkflowsResponse) ProtoMessage() {} + +func (x *UpdateVReplicationWorkflowsResponse) ProtoReflect() protoreflect.Message { + mi := &file_tabletmanagerdata_proto_msgTypes[117] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateVReplicationWorkflowsResponse.ProtoReflect.Descriptor instead. +func (*UpdateVReplicationWorkflowsResponse) Descriptor() ([]byte, []int) { + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{117} +} + +func (x *UpdateVReplicationWorkflowsResponse) GetResult() *query.QueryResult { + if x != nil { + return x.Result + } + return nil +} + type ResetSequencesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5808,7 +6364,7 @@ type ResetSequencesRequest struct { func (x *ResetSequencesRequest) Reset() { *x = ResetSequencesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[110] + mi := &file_tabletmanagerdata_proto_msgTypes[118] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5821,7 +6377,7 @@ func (x *ResetSequencesRequest) String() string { func (*ResetSequencesRequest) ProtoMessage() {} func (x *ResetSequencesRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[110] + mi := &file_tabletmanagerdata_proto_msgTypes[118] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5834,7 +6390,7 @@ func (x *ResetSequencesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ResetSequencesRequest.ProtoReflect.Descriptor instead. func (*ResetSequencesRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{110} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{118} } func (x *ResetSequencesRequest) GetTables() []string { @@ -5853,7 +6409,7 @@ type ResetSequencesResponse struct { func (x *ResetSequencesResponse) Reset() { *x = ResetSequencesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[111] + mi := &file_tabletmanagerdata_proto_msgTypes[119] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5866,7 +6422,7 @@ func (x *ResetSequencesResponse) String() string { func (*ResetSequencesResponse) ProtoMessage() {} func (x *ResetSequencesResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[111] + mi := &file_tabletmanagerdata_proto_msgTypes[119] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5879,7 +6435,7 @@ func (x *ResetSequencesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ResetSequencesResponse.ProtoReflect.Descriptor instead. func (*ResetSequencesResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{111} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{119} } type CheckThrottlerRequest struct { @@ -5893,7 +6449,7 @@ type CheckThrottlerRequest struct { func (x *CheckThrottlerRequest) Reset() { *x = CheckThrottlerRequest{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[112] + mi := &file_tabletmanagerdata_proto_msgTypes[120] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5906,7 +6462,7 @@ func (x *CheckThrottlerRequest) String() string { func (*CheckThrottlerRequest) ProtoMessage() {} func (x *CheckThrottlerRequest) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[112] + mi := &file_tabletmanagerdata_proto_msgTypes[120] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5919,7 +6475,7 @@ func (x *CheckThrottlerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckThrottlerRequest.ProtoReflect.Descriptor instead. func (*CheckThrottlerRequest) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{112} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{120} } func (x *CheckThrottlerRequest) GetAppName() string { @@ -5952,7 +6508,7 @@ type CheckThrottlerResponse struct { func (x *CheckThrottlerResponse) Reset() { *x = CheckThrottlerResponse{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[113] + mi := &file_tabletmanagerdata_proto_msgTypes[121] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5965,7 +6521,7 @@ func (x *CheckThrottlerResponse) String() string { func (*CheckThrottlerResponse) ProtoMessage() {} func (x *CheckThrottlerResponse) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[113] + mi := &file_tabletmanagerdata_proto_msgTypes[121] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5978,7 +6534,7 @@ func (x *CheckThrottlerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckThrottlerResponse.ProtoReflect.Descriptor instead. func (*CheckThrottlerResponse) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{113} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{121} } func (x *CheckThrottlerResponse) GetStatusCode() int32 { @@ -6047,7 +6603,7 @@ type ReadVReplicationWorkflowResponse_Stream struct { func (x *ReadVReplicationWorkflowResponse_Stream) Reset() { *x = ReadVReplicationWorkflowResponse_Stream{} if protoimpl.UnsafeEnabled { - mi := &file_tabletmanagerdata_proto_msgTypes[117] + mi := &file_tabletmanagerdata_proto_msgTypes[125] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6060,7 +6616,7 @@ func (x *ReadVReplicationWorkflowResponse_Stream) String() string { func (*ReadVReplicationWorkflowResponse_Stream) ProtoMessage() {} func (x *ReadVReplicationWorkflowResponse_Stream) ProtoReflect() protoreflect.Message { - mi := &file_tabletmanagerdata_proto_msgTypes[117] + mi := &file_tabletmanagerdata_proto_msgTypes[125] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6073,7 +6629,7 @@ func (x *ReadVReplicationWorkflowResponse_Stream) ProtoReflect() protoreflect.Me // Deprecated: Use ReadVReplicationWorkflowResponse_Stream.ProtoReflect.Descriptor instead. func (*ReadVReplicationWorkflowResponse_Stream) Descriptor() ([]byte, []int) { - return file_tabletmanagerdata_proto_rawDescGZIP(), []int{101, 0} + return file_tabletmanagerdata_proto_rawDescGZIP(), []int{107, 0} } func (x *ReadVReplicationWorkflowResponse_Stream) GetId() int32 { @@ -6350,7 +6906,7 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xf2, 0x02, 0x0a, 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, @@ -6369,542 +6925,640 @@ var file_tabletmanagerdata_proto_rawDesc = []byte{ 0x65, 0x6d, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x71, 0x6c, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x71, 0x6c, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa7, 0x01, - 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x46, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, - 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x6b, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x14, 0x0a, 0x12, - 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x55, 0x6e, 0x6c, - 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, - 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, - 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, - 0x6f, 0x77, 0x73, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, - 0x64, 0x22, 0x42, 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xb2, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x3b, 0x0a, + 0x1a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, + 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, + 0x6e, 0x4b, 0x65, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x13, 0x41, + 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x46, 0x0a, 0x0c, + 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x22, 0x13, 0x0a, 0x11, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x14, 0x0a, 0x12, 0x4c, 0x6f, 0x63, + 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x15, 0x0a, 0x13, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x16, 0x0a, 0x14, 0x55, 0x6e, 0x6c, 0x6f, 0x63, 0x6b, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8d, + 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, + 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, + 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, + 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x22, 0x42, + 0x0a, 0x14, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x22, 0xef, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, + 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, + 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xf0, 0x01, + 0x0a, 0x1d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x71, + 0x6c, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, + 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, + 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x3b, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, + 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, + 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x22, 0x4e, 0x0a, 0x1e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x22, 0x8e, 0x01, 0x0a, 0x1d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, + 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, - 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x47, 0x0a, 0x19, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x1d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x64, - 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x22, 0x4c, 0x0a, 0x1e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x22, 0x4b, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x22, - 0x47, 0x0a, 0x19, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, - 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x15, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x16, - 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x19, 0x0a, 0x17, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, - 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x5e, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x22, 0x4c, 0x0a, 0x1e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, + 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, + 0x4b, 0x0a, 0x18, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, + 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x22, 0x47, 0x0a, 0x19, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x4c, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0x16, 0x0a, 0x14, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x15, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x36, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x35, 0x0a, 0x17, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x34, 0x0a, 0x16, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x35, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, - 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x21, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, - 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, - 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, - 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, - 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, - 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x46, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x22, 0x4b, 0x0a, 0x1d, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, - 0x1e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, - 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x30, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, - 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, - 0x63, 0x22, 0x31, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd8, 0x01, 0x0a, 0x1e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, - 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x3a, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, - 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x31, 0x0a, 0x14, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x21, 0x0a, 0x1f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0xba, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x19, 0x0a, 0x17, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x53, 0x74, + 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x5e, 0x0a, 0x1d, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, + 0x3c, 0x0a, 0x1e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, + 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, + 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, + 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x62, 0x0a, 0x21, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x22, 0x2b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0x19, 0x0a, + 0x17, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x65, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x0a, 0x17, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x46, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x4b, 0x0a, + 0x1d, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x1e, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, + 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x0a, 0x12, + 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x31, + 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0xd8, 0x01, 0x0a, 0x1e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, + 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, + 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x21, 0x0a, 0x1f, + 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xba, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x15, 0x0a, 0x13, + 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x64, 0x0a, 0x15, 0x44, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0x36, 0x0a, 0x18, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1b, 0x0a, 0x19, 0x55, 0x6e, 0x64, + 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, + 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x13, 0x0a, 0x11, + 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x49, 0x0a, 0x12, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x9c, 0x02, 0x0a, + 0x1b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x4e, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x77, + 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2d, 0x0a, 0x12, + 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, + 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, + 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x53, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4b, 0x0a, 0x1a, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x4e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, - 0x15, 0x0a, 0x13, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x64, - 0x0a, 0x15, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0e, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1e, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x0d, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, - 0x08, 0x01, 0x10, 0x02, 0x22, 0x36, 0x0a, 0x18, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x1b, 0x0a, 0x19, - 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x24, 0x0a, 0x22, 0x52, 0x65, 0x73, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x13, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x12, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, - 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0xed, 0x01, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x26, - 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x4e, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, - 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, - 0x1e, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x4b, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x1d, 0x0a, 0x1b, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x22, 0x53, - 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, - 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x58, 0x0a, 0x15, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x24, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x6b, 0x0a, 0x23, 0x53, - 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, - 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x33, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x6d, - 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x34, 0x0a, - 0x16, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, - 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, - 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, - 0x65, 0x22, 0x36, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xc8, 0x01, 0x0a, 0x18, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, - 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, - 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, - 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, - 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xd4, 0x04, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, + 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7e, 0x0a, 0x22, 0x53, 0x74, 0x6f, 0x70, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, + 0x15, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, + 0x64, 0x65, 0x52, 0x13, 0x73, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x6b, 0x0a, 0x23, 0x53, 0x74, 0x6f, 0x70, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4a, 0x04, + 0x08, 0x01, 0x10, 0x02, 0x22, 0x33, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x73, 0x65, 0x6d, 0x69, 0x53, 0x79, 0x6e, 0x63, 0x22, 0x34, 0x0a, 0x16, 0x50, 0x72, 0x6f, + 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0xab, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, 0x65, 0x22, 0x36, 0x0a, + 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xc8, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, + 0x70, 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, + 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, + 0x12, 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x12, 0x72, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x22, 0x41, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, + 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x22, 0xee, 0x04, 0x0a, 0x21, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, + 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, + 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, + 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, + 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3f, 0x0a, 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x50, 0x0a, 0x22, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x21, 0x0a, 0x1f, 0x48, 0x61, 0x73, + 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x34, 0x0a, 0x20, + 0x48, 0x61, 0x73, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x68, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x68, + 0x61, 0x73, 0x22, 0xe0, 0x02, 0x0a, 0x20, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x25, 0x2e, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x12, 0x4c, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, + 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x22, 0x76, 0x0a, 0x21, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x09, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x3d, 0x0a, + 0x1f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0xae, 0x09, 0x0a, + 0x20, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, + 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, + 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, + 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xc1, 0x04, 0x0a, 0x06, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x62, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, + 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x62, 0x6c, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, + 0x6f, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, + 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x70, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x6d, 0x61, 0x78, 0x54, 0x70, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, + 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, + 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, + 0x65, 0x64, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x48, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, + 0x69, 0x6d, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, + 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0xd7, 0x01, + 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12, 0x1d, 0x0a, + 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69, 0x66, 0x66, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, + 0x75, 0x69, 0x64, 0x22, 0x79, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, + 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, + 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x22, 0x90, + 0x01, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x6b, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x6b, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, + 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, + 0x73, 0x22, 0xda, 0x02, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x19, 0x0a, + 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x70, + 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x50, 0x63, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x38, 0x0a, 0x19, + 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, + 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x69, 0x66, 0x66, + 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, + 0x6d, 0x61, 0x78, 0x44, 0x69, 0x66, 0x66, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0xf2, + 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, + 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, + 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, + 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x72, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0xef, 0x02, 0x0a, 0x21, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6f, 0x6e, 0x44, + 0x64, 0x6c, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4a, + 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x50, 0x0a, 0x22, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x9f, 0x02, 0x0a, 0x22, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, + 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x64, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x3b, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, - 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, - 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, - 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, - 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x22, 0x50, - 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x22, 0x3f, 0x0a, 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x22, 0x50, 0x0a, 0x22, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x22, 0x3d, 0x0a, 0x1f, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x22, 0x94, 0x09, 0x0a, 0x20, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, - 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x49, 0x0a, - 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, - 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, - 0x54, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0xc1, 0x04, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x2a, 0x0a, 0x03, 0x62, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x03, 0x62, 0x6c, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x6f, 0x73, 0x12, 0x19, - 0x0a, 0x08, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x78, - 0x5f, 0x74, 0x70, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x54, - 0x70, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x11, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, - 0x61, 0x67, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x3b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x33, - 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, - 0x65, 0x61, 0x74, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x54, - 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, - 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, - 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0xd7, 0x01, 0x0a, 0x0c, 0x56, 0x44, - 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, - 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, - 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, - 0x69, 0x66, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x6a, 0x0a, 0x0d, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x76, 0x64, 0x69, 0x66, 0x66, 0x55, 0x75, 0x69, 0x64, 0x22, - 0x79, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x22, 0x68, 0x0a, 0x12, 0x56, 0x44, - 0x69, 0x66, 0x66, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x6b, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, - 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, - 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, - 0x65, 0x5f, 0x70, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x50, 0x63, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, - 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, - 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, - 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x70, 0x69, 0x63, 0x6b, - 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x50, 0x69, 0x63, 0x6b, 0x65, 0x72, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x70, 0x69, 0x63, 0x6b, 0x65, 0x72, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x72, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, - 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, - 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0d, 0x72, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe9, 0x02, 0x0a, - 0x21, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, - 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, - 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x6f, - 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x62, 0x69, - 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x3b, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x69, 0x6e, - 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x50, 0x0a, 0x22, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, - 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x2f, 0x0a, 0x15, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, - 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, - 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, - 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x16, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, - 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, - 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, - 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x65, 0x64, 0x2a, 0x3e, 0x0a, 0x19, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, - 0x4f, 0x52, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x03, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, - 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, + 0x70, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x51, 0x0a, 0x23, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2a, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x2f, 0x0a, 0x15, + 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x18, 0x0a, + 0x16, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x19, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x16, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x72, + 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2a, 0x3e, 0x0a, 0x19, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, + 0x49, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x03, 0x42, 0x30, 0x5a, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -6920,7 +7574,7 @@ func file_tabletmanagerdata_proto_rawDescGZIP() []byte { } var file_tabletmanagerdata_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_tabletmanagerdata_proto_msgTypes = make([]protoimpl.MessageInfo, 118) +var file_tabletmanagerdata_proto_msgTypes = make([]protoimpl.MessageInfo, 126) var file_tabletmanagerdata_proto_goTypes = []interface{}{ (TabletSelectionPreference)(0), // 0: tabletmanagerdata.TabletSelectionPreference (*TableDefinition)(nil), // 1: tabletmanagerdata.TableDefinition @@ -6963,173 +7617,187 @@ var file_tabletmanagerdata_proto_goTypes = []interface{}{ (*ExecuteQueryResponse)(nil), // 38: tabletmanagerdata.ExecuteQueryResponse (*ExecuteFetchAsDbaRequest)(nil), // 39: tabletmanagerdata.ExecuteFetchAsDbaRequest (*ExecuteFetchAsDbaResponse)(nil), // 40: tabletmanagerdata.ExecuteFetchAsDbaResponse - (*ExecuteFetchAsAllPrivsRequest)(nil), // 41: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - (*ExecuteFetchAsAllPrivsResponse)(nil), // 42: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - (*ExecuteFetchAsAppRequest)(nil), // 43: tabletmanagerdata.ExecuteFetchAsAppRequest - (*ExecuteFetchAsAppResponse)(nil), // 44: tabletmanagerdata.ExecuteFetchAsAppResponse - (*ReplicationStatusRequest)(nil), // 45: tabletmanagerdata.ReplicationStatusRequest - (*ReplicationStatusResponse)(nil), // 46: tabletmanagerdata.ReplicationStatusResponse - (*PrimaryStatusRequest)(nil), // 47: tabletmanagerdata.PrimaryStatusRequest - (*PrimaryStatusResponse)(nil), // 48: tabletmanagerdata.PrimaryStatusResponse - (*PrimaryPositionRequest)(nil), // 49: tabletmanagerdata.PrimaryPositionRequest - (*PrimaryPositionResponse)(nil), // 50: tabletmanagerdata.PrimaryPositionResponse - (*WaitForPositionRequest)(nil), // 51: tabletmanagerdata.WaitForPositionRequest - (*WaitForPositionResponse)(nil), // 52: tabletmanagerdata.WaitForPositionResponse - (*StopReplicationRequest)(nil), // 53: tabletmanagerdata.StopReplicationRequest - (*StopReplicationResponse)(nil), // 54: tabletmanagerdata.StopReplicationResponse - (*StopReplicationMinimumRequest)(nil), // 55: tabletmanagerdata.StopReplicationMinimumRequest - (*StopReplicationMinimumResponse)(nil), // 56: tabletmanagerdata.StopReplicationMinimumResponse - (*StartReplicationRequest)(nil), // 57: tabletmanagerdata.StartReplicationRequest - (*StartReplicationResponse)(nil), // 58: tabletmanagerdata.StartReplicationResponse - (*StartReplicationUntilAfterRequest)(nil), // 59: tabletmanagerdata.StartReplicationUntilAfterRequest - (*StartReplicationUntilAfterResponse)(nil), // 60: tabletmanagerdata.StartReplicationUntilAfterResponse - (*GetReplicasRequest)(nil), // 61: tabletmanagerdata.GetReplicasRequest - (*GetReplicasResponse)(nil), // 62: tabletmanagerdata.GetReplicasResponse - (*ResetReplicationRequest)(nil), // 63: tabletmanagerdata.ResetReplicationRequest - (*ResetReplicationResponse)(nil), // 64: tabletmanagerdata.ResetReplicationResponse - (*VReplicationExecRequest)(nil), // 65: tabletmanagerdata.VReplicationExecRequest - (*VReplicationExecResponse)(nil), // 66: tabletmanagerdata.VReplicationExecResponse - (*VReplicationWaitForPosRequest)(nil), // 67: tabletmanagerdata.VReplicationWaitForPosRequest - (*VReplicationWaitForPosResponse)(nil), // 68: tabletmanagerdata.VReplicationWaitForPosResponse - (*InitPrimaryRequest)(nil), // 69: tabletmanagerdata.InitPrimaryRequest - (*InitPrimaryResponse)(nil), // 70: tabletmanagerdata.InitPrimaryResponse - (*PopulateReparentJournalRequest)(nil), // 71: tabletmanagerdata.PopulateReparentJournalRequest - (*PopulateReparentJournalResponse)(nil), // 72: tabletmanagerdata.PopulateReparentJournalResponse - (*InitReplicaRequest)(nil), // 73: tabletmanagerdata.InitReplicaRequest - (*InitReplicaResponse)(nil), // 74: tabletmanagerdata.InitReplicaResponse - (*DemotePrimaryRequest)(nil), // 75: tabletmanagerdata.DemotePrimaryRequest - (*DemotePrimaryResponse)(nil), // 76: tabletmanagerdata.DemotePrimaryResponse - (*UndoDemotePrimaryRequest)(nil), // 77: tabletmanagerdata.UndoDemotePrimaryRequest - (*UndoDemotePrimaryResponse)(nil), // 78: tabletmanagerdata.UndoDemotePrimaryResponse - (*ReplicaWasPromotedRequest)(nil), // 79: tabletmanagerdata.ReplicaWasPromotedRequest - (*ReplicaWasPromotedResponse)(nil), // 80: tabletmanagerdata.ReplicaWasPromotedResponse - (*ResetReplicationParametersRequest)(nil), // 81: tabletmanagerdata.ResetReplicationParametersRequest - (*ResetReplicationParametersResponse)(nil), // 82: tabletmanagerdata.ResetReplicationParametersResponse - (*FullStatusRequest)(nil), // 83: tabletmanagerdata.FullStatusRequest - (*FullStatusResponse)(nil), // 84: tabletmanagerdata.FullStatusResponse - (*SetReplicationSourceRequest)(nil), // 85: tabletmanagerdata.SetReplicationSourceRequest - (*SetReplicationSourceResponse)(nil), // 86: tabletmanagerdata.SetReplicationSourceResponse - (*ReplicaWasRestartedRequest)(nil), // 87: tabletmanagerdata.ReplicaWasRestartedRequest - (*ReplicaWasRestartedResponse)(nil), // 88: tabletmanagerdata.ReplicaWasRestartedResponse - (*StopReplicationAndGetStatusRequest)(nil), // 89: tabletmanagerdata.StopReplicationAndGetStatusRequest - (*StopReplicationAndGetStatusResponse)(nil), // 90: tabletmanagerdata.StopReplicationAndGetStatusResponse - (*PromoteReplicaRequest)(nil), // 91: tabletmanagerdata.PromoteReplicaRequest - (*PromoteReplicaResponse)(nil), // 92: tabletmanagerdata.PromoteReplicaResponse - (*BackupRequest)(nil), // 93: tabletmanagerdata.BackupRequest - (*BackupResponse)(nil), // 94: tabletmanagerdata.BackupResponse - (*RestoreFromBackupRequest)(nil), // 95: tabletmanagerdata.RestoreFromBackupRequest - (*RestoreFromBackupResponse)(nil), // 96: tabletmanagerdata.RestoreFromBackupResponse - (*CreateVReplicationWorkflowRequest)(nil), // 97: tabletmanagerdata.CreateVReplicationWorkflowRequest - (*CreateVReplicationWorkflowResponse)(nil), // 98: tabletmanagerdata.CreateVReplicationWorkflowResponse - (*DeleteVReplicationWorkflowRequest)(nil), // 99: tabletmanagerdata.DeleteVReplicationWorkflowRequest - (*DeleteVReplicationWorkflowResponse)(nil), // 100: tabletmanagerdata.DeleteVReplicationWorkflowResponse - (*ReadVReplicationWorkflowRequest)(nil), // 101: tabletmanagerdata.ReadVReplicationWorkflowRequest - (*ReadVReplicationWorkflowResponse)(nil), // 102: tabletmanagerdata.ReadVReplicationWorkflowResponse - (*VDiffRequest)(nil), // 103: tabletmanagerdata.VDiffRequest - (*VDiffResponse)(nil), // 104: tabletmanagerdata.VDiffResponse - (*VDiffPickerOptions)(nil), // 105: tabletmanagerdata.VDiffPickerOptions - (*VDiffReportOptions)(nil), // 106: tabletmanagerdata.VDiffReportOptions - (*VDiffCoreOptions)(nil), // 107: tabletmanagerdata.VDiffCoreOptions - (*VDiffOptions)(nil), // 108: tabletmanagerdata.VDiffOptions - (*UpdateVReplicationWorkflowRequest)(nil), // 109: tabletmanagerdata.UpdateVReplicationWorkflowRequest - (*UpdateVReplicationWorkflowResponse)(nil), // 110: tabletmanagerdata.UpdateVReplicationWorkflowResponse - (*ResetSequencesRequest)(nil), // 111: tabletmanagerdata.ResetSequencesRequest - (*ResetSequencesResponse)(nil), // 112: tabletmanagerdata.ResetSequencesResponse - (*CheckThrottlerRequest)(nil), // 113: tabletmanagerdata.CheckThrottlerRequest - (*CheckThrottlerResponse)(nil), // 114: tabletmanagerdata.CheckThrottlerResponse - nil, // 115: tabletmanagerdata.UserPermission.PrivilegesEntry - nil, // 116: tabletmanagerdata.DbPermission.PrivilegesEntry - nil, // 117: tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry - (*ReadVReplicationWorkflowResponse_Stream)(nil), // 118: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream - (*query.Field)(nil), // 119: query.Field - (topodata.TabletType)(0), // 120: topodata.TabletType - (*vtrpc.CallerID)(nil), // 121: vtrpc.CallerID - (*query.QueryResult)(nil), // 122: query.QueryResult - (*replicationdata.Status)(nil), // 123: replicationdata.Status - (*replicationdata.PrimaryStatus)(nil), // 124: replicationdata.PrimaryStatus - (*topodata.TabletAlias)(nil), // 125: topodata.TabletAlias - (*replicationdata.FullStatus)(nil), // 126: replicationdata.FullStatus - (replicationdata.StopReplicationMode)(0), // 127: replicationdata.StopReplicationMode - (*replicationdata.StopReplicationStatus)(nil), // 128: replicationdata.StopReplicationStatus - (*logutil.Event)(nil), // 129: logutil.Event - (*vttime.Time)(nil), // 130: vttime.Time - (*binlogdata.BinlogSource)(nil), // 131: binlogdata.BinlogSource - (binlogdata.VReplicationWorkflowType)(0), // 132: binlogdata.VReplicationWorkflowType - (binlogdata.VReplicationWorkflowSubType)(0), // 133: binlogdata.VReplicationWorkflowSubType - (binlogdata.OnDDLAction)(0), // 134: binlogdata.OnDDLAction - (binlogdata.VReplicationWorkflowState)(0), // 135: binlogdata.VReplicationWorkflowState + (*ExecuteMultiFetchAsDbaRequest)(nil), // 41: tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + (*ExecuteMultiFetchAsDbaResponse)(nil), // 42: tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + (*ExecuteFetchAsAllPrivsRequest)(nil), // 43: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + (*ExecuteFetchAsAllPrivsResponse)(nil), // 44: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + (*ExecuteFetchAsAppRequest)(nil), // 45: tabletmanagerdata.ExecuteFetchAsAppRequest + (*ExecuteFetchAsAppResponse)(nil), // 46: tabletmanagerdata.ExecuteFetchAsAppResponse + (*ReplicationStatusRequest)(nil), // 47: tabletmanagerdata.ReplicationStatusRequest + (*ReplicationStatusResponse)(nil), // 48: tabletmanagerdata.ReplicationStatusResponse + (*PrimaryStatusRequest)(nil), // 49: tabletmanagerdata.PrimaryStatusRequest + (*PrimaryStatusResponse)(nil), // 50: tabletmanagerdata.PrimaryStatusResponse + (*PrimaryPositionRequest)(nil), // 51: tabletmanagerdata.PrimaryPositionRequest + (*PrimaryPositionResponse)(nil), // 52: tabletmanagerdata.PrimaryPositionResponse + (*WaitForPositionRequest)(nil), // 53: tabletmanagerdata.WaitForPositionRequest + (*WaitForPositionResponse)(nil), // 54: tabletmanagerdata.WaitForPositionResponse + (*StopReplicationRequest)(nil), // 55: tabletmanagerdata.StopReplicationRequest + (*StopReplicationResponse)(nil), // 56: tabletmanagerdata.StopReplicationResponse + (*StopReplicationMinimumRequest)(nil), // 57: tabletmanagerdata.StopReplicationMinimumRequest + (*StopReplicationMinimumResponse)(nil), // 58: tabletmanagerdata.StopReplicationMinimumResponse + (*StartReplicationRequest)(nil), // 59: tabletmanagerdata.StartReplicationRequest + (*StartReplicationResponse)(nil), // 60: tabletmanagerdata.StartReplicationResponse + (*StartReplicationUntilAfterRequest)(nil), // 61: tabletmanagerdata.StartReplicationUntilAfterRequest + (*StartReplicationUntilAfterResponse)(nil), // 62: tabletmanagerdata.StartReplicationUntilAfterResponse + (*GetReplicasRequest)(nil), // 63: tabletmanagerdata.GetReplicasRequest + (*GetReplicasResponse)(nil), // 64: tabletmanagerdata.GetReplicasResponse + (*ResetReplicationRequest)(nil), // 65: tabletmanagerdata.ResetReplicationRequest + (*ResetReplicationResponse)(nil), // 66: tabletmanagerdata.ResetReplicationResponse + (*VReplicationExecRequest)(nil), // 67: tabletmanagerdata.VReplicationExecRequest + (*VReplicationExecResponse)(nil), // 68: tabletmanagerdata.VReplicationExecResponse + (*VReplicationWaitForPosRequest)(nil), // 69: tabletmanagerdata.VReplicationWaitForPosRequest + (*VReplicationWaitForPosResponse)(nil), // 70: tabletmanagerdata.VReplicationWaitForPosResponse + (*InitPrimaryRequest)(nil), // 71: tabletmanagerdata.InitPrimaryRequest + (*InitPrimaryResponse)(nil), // 72: tabletmanagerdata.InitPrimaryResponse + (*PopulateReparentJournalRequest)(nil), // 73: tabletmanagerdata.PopulateReparentJournalRequest + (*PopulateReparentJournalResponse)(nil), // 74: tabletmanagerdata.PopulateReparentJournalResponse + (*InitReplicaRequest)(nil), // 75: tabletmanagerdata.InitReplicaRequest + (*InitReplicaResponse)(nil), // 76: tabletmanagerdata.InitReplicaResponse + (*DemotePrimaryRequest)(nil), // 77: tabletmanagerdata.DemotePrimaryRequest + (*DemotePrimaryResponse)(nil), // 78: tabletmanagerdata.DemotePrimaryResponse + (*UndoDemotePrimaryRequest)(nil), // 79: tabletmanagerdata.UndoDemotePrimaryRequest + (*UndoDemotePrimaryResponse)(nil), // 80: tabletmanagerdata.UndoDemotePrimaryResponse + (*ReplicaWasPromotedRequest)(nil), // 81: tabletmanagerdata.ReplicaWasPromotedRequest + (*ReplicaWasPromotedResponse)(nil), // 82: tabletmanagerdata.ReplicaWasPromotedResponse + (*ResetReplicationParametersRequest)(nil), // 83: tabletmanagerdata.ResetReplicationParametersRequest + (*ResetReplicationParametersResponse)(nil), // 84: tabletmanagerdata.ResetReplicationParametersResponse + (*FullStatusRequest)(nil), // 85: tabletmanagerdata.FullStatusRequest + (*FullStatusResponse)(nil), // 86: tabletmanagerdata.FullStatusResponse + (*SetReplicationSourceRequest)(nil), // 87: tabletmanagerdata.SetReplicationSourceRequest + (*SetReplicationSourceResponse)(nil), // 88: tabletmanagerdata.SetReplicationSourceResponse + (*ReplicaWasRestartedRequest)(nil), // 89: tabletmanagerdata.ReplicaWasRestartedRequest + (*ReplicaWasRestartedResponse)(nil), // 90: tabletmanagerdata.ReplicaWasRestartedResponse + (*StopReplicationAndGetStatusRequest)(nil), // 91: tabletmanagerdata.StopReplicationAndGetStatusRequest + (*StopReplicationAndGetStatusResponse)(nil), // 92: tabletmanagerdata.StopReplicationAndGetStatusResponse + (*PromoteReplicaRequest)(nil), // 93: tabletmanagerdata.PromoteReplicaRequest + (*PromoteReplicaResponse)(nil), // 94: tabletmanagerdata.PromoteReplicaResponse + (*BackupRequest)(nil), // 95: tabletmanagerdata.BackupRequest + (*BackupResponse)(nil), // 96: tabletmanagerdata.BackupResponse + (*RestoreFromBackupRequest)(nil), // 97: tabletmanagerdata.RestoreFromBackupRequest + (*RestoreFromBackupResponse)(nil), // 98: tabletmanagerdata.RestoreFromBackupResponse + (*CreateVReplicationWorkflowRequest)(nil), // 99: tabletmanagerdata.CreateVReplicationWorkflowRequest + (*CreateVReplicationWorkflowResponse)(nil), // 100: tabletmanagerdata.CreateVReplicationWorkflowResponse + (*DeleteVReplicationWorkflowRequest)(nil), // 101: tabletmanagerdata.DeleteVReplicationWorkflowRequest + (*DeleteVReplicationWorkflowResponse)(nil), // 102: tabletmanagerdata.DeleteVReplicationWorkflowResponse + (*HasVReplicationWorkflowsRequest)(nil), // 103: tabletmanagerdata.HasVReplicationWorkflowsRequest + (*HasVReplicationWorkflowsResponse)(nil), // 104: tabletmanagerdata.HasVReplicationWorkflowsResponse + (*ReadVReplicationWorkflowsRequest)(nil), // 105: tabletmanagerdata.ReadVReplicationWorkflowsRequest + (*ReadVReplicationWorkflowsResponse)(nil), // 106: tabletmanagerdata.ReadVReplicationWorkflowsResponse + (*ReadVReplicationWorkflowRequest)(nil), // 107: tabletmanagerdata.ReadVReplicationWorkflowRequest + (*ReadVReplicationWorkflowResponse)(nil), // 108: tabletmanagerdata.ReadVReplicationWorkflowResponse + (*VDiffRequest)(nil), // 109: tabletmanagerdata.VDiffRequest + (*VDiffResponse)(nil), // 110: tabletmanagerdata.VDiffResponse + (*VDiffPickerOptions)(nil), // 111: tabletmanagerdata.VDiffPickerOptions + (*VDiffReportOptions)(nil), // 112: tabletmanagerdata.VDiffReportOptions + (*VDiffCoreOptions)(nil), // 113: tabletmanagerdata.VDiffCoreOptions + (*VDiffOptions)(nil), // 114: tabletmanagerdata.VDiffOptions + (*UpdateVReplicationWorkflowRequest)(nil), // 115: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*UpdateVReplicationWorkflowResponse)(nil), // 116: tabletmanagerdata.UpdateVReplicationWorkflowResponse + (*UpdateVReplicationWorkflowsRequest)(nil), // 117: tabletmanagerdata.UpdateVReplicationWorkflowsRequest + (*UpdateVReplicationWorkflowsResponse)(nil), // 118: tabletmanagerdata.UpdateVReplicationWorkflowsResponse + (*ResetSequencesRequest)(nil), // 119: tabletmanagerdata.ResetSequencesRequest + (*ResetSequencesResponse)(nil), // 120: tabletmanagerdata.ResetSequencesResponse + (*CheckThrottlerRequest)(nil), // 121: tabletmanagerdata.CheckThrottlerRequest + (*CheckThrottlerResponse)(nil), // 122: tabletmanagerdata.CheckThrottlerResponse + nil, // 123: tabletmanagerdata.UserPermission.PrivilegesEntry + nil, // 124: tabletmanagerdata.DbPermission.PrivilegesEntry + nil, // 125: tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry + (*ReadVReplicationWorkflowResponse_Stream)(nil), // 126: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + (*query.Field)(nil), // 127: query.Field + (topodata.TabletType)(0), // 128: topodata.TabletType + (*vtrpc.CallerID)(nil), // 129: vtrpc.CallerID + (*query.QueryResult)(nil), // 130: query.QueryResult + (*replicationdata.Status)(nil), // 131: replicationdata.Status + (*replicationdata.PrimaryStatus)(nil), // 132: replicationdata.PrimaryStatus + (*topodata.TabletAlias)(nil), // 133: topodata.TabletAlias + (*replicationdata.FullStatus)(nil), // 134: replicationdata.FullStatus + (replicationdata.StopReplicationMode)(0), // 135: replicationdata.StopReplicationMode + (*replicationdata.StopReplicationStatus)(nil), // 136: replicationdata.StopReplicationStatus + (*logutil.Event)(nil), // 137: logutil.Event + (*vttime.Time)(nil), // 138: vttime.Time + (*binlogdata.BinlogSource)(nil), // 139: binlogdata.BinlogSource + (binlogdata.VReplicationWorkflowType)(0), // 140: binlogdata.VReplicationWorkflowType + (binlogdata.VReplicationWorkflowSubType)(0), // 141: binlogdata.VReplicationWorkflowSubType + (binlogdata.VReplicationWorkflowState)(0), // 142: binlogdata.VReplicationWorkflowState + (binlogdata.OnDDLAction)(0), // 143: binlogdata.OnDDLAction } var file_tabletmanagerdata_proto_depIdxs = []int32{ - 119, // 0: tabletmanagerdata.TableDefinition.fields:type_name -> query.Field + 127, // 0: tabletmanagerdata.TableDefinition.fields:type_name -> query.Field 1, // 1: tabletmanagerdata.SchemaDefinition.table_definitions:type_name -> tabletmanagerdata.TableDefinition 2, // 2: tabletmanagerdata.SchemaChangeResult.before_schema:type_name -> tabletmanagerdata.SchemaDefinition 2, // 3: tabletmanagerdata.SchemaChangeResult.after_schema:type_name -> tabletmanagerdata.SchemaDefinition - 115, // 4: tabletmanagerdata.UserPermission.privileges:type_name -> tabletmanagerdata.UserPermission.PrivilegesEntry - 116, // 5: tabletmanagerdata.DbPermission.privileges:type_name -> tabletmanagerdata.DbPermission.PrivilegesEntry + 123, // 4: tabletmanagerdata.UserPermission.privileges:type_name -> tabletmanagerdata.UserPermission.PrivilegesEntry + 124, // 5: tabletmanagerdata.DbPermission.privileges:type_name -> tabletmanagerdata.DbPermission.PrivilegesEntry 4, // 6: tabletmanagerdata.Permissions.user_permissions:type_name -> tabletmanagerdata.UserPermission 5, // 7: tabletmanagerdata.Permissions.db_permissions:type_name -> tabletmanagerdata.DbPermission - 117, // 8: tabletmanagerdata.ExecuteHookRequest.extra_env:type_name -> tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry + 125, // 8: tabletmanagerdata.ExecuteHookRequest.extra_env:type_name -> tabletmanagerdata.ExecuteHookRequest.ExtraEnvEntry 2, // 9: tabletmanagerdata.GetSchemaResponse.schema_definition:type_name -> tabletmanagerdata.SchemaDefinition 6, // 10: tabletmanagerdata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions - 120, // 11: tabletmanagerdata.ChangeTypeRequest.tablet_type:type_name -> topodata.TabletType + 128, // 11: tabletmanagerdata.ChangeTypeRequest.tablet_type:type_name -> topodata.TabletType 3, // 12: tabletmanagerdata.PreflightSchemaResponse.change_results:type_name -> tabletmanagerdata.SchemaChangeResult 2, // 13: tabletmanagerdata.ApplySchemaRequest.before_schema:type_name -> tabletmanagerdata.SchemaDefinition 2, // 14: tabletmanagerdata.ApplySchemaRequest.after_schema:type_name -> tabletmanagerdata.SchemaDefinition 2, // 15: tabletmanagerdata.ApplySchemaResponse.before_schema:type_name -> tabletmanagerdata.SchemaDefinition 2, // 16: tabletmanagerdata.ApplySchemaResponse.after_schema:type_name -> tabletmanagerdata.SchemaDefinition - 121, // 17: tabletmanagerdata.ExecuteQueryRequest.caller_id:type_name -> vtrpc.CallerID - 122, // 18: tabletmanagerdata.ExecuteQueryResponse.result:type_name -> query.QueryResult - 122, // 19: tabletmanagerdata.ExecuteFetchAsDbaResponse.result:type_name -> query.QueryResult - 122, // 20: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse.result:type_name -> query.QueryResult - 122, // 21: tabletmanagerdata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult - 123, // 22: tabletmanagerdata.ReplicationStatusResponse.status:type_name -> replicationdata.Status - 124, // 23: tabletmanagerdata.PrimaryStatusResponse.status:type_name -> replicationdata.PrimaryStatus - 122, // 24: tabletmanagerdata.VReplicationExecResponse.result:type_name -> query.QueryResult - 125, // 25: tabletmanagerdata.PopulateReparentJournalRequest.primary_alias:type_name -> topodata.TabletAlias - 125, // 26: tabletmanagerdata.InitReplicaRequest.parent:type_name -> topodata.TabletAlias - 124, // 27: tabletmanagerdata.DemotePrimaryResponse.primary_status:type_name -> replicationdata.PrimaryStatus - 126, // 28: tabletmanagerdata.FullStatusResponse.status:type_name -> replicationdata.FullStatus - 125, // 29: tabletmanagerdata.SetReplicationSourceRequest.parent:type_name -> topodata.TabletAlias - 125, // 30: tabletmanagerdata.ReplicaWasRestartedRequest.parent:type_name -> topodata.TabletAlias - 127, // 31: tabletmanagerdata.StopReplicationAndGetStatusRequest.stop_replication_mode:type_name -> replicationdata.StopReplicationMode - 128, // 32: tabletmanagerdata.StopReplicationAndGetStatusResponse.status:type_name -> replicationdata.StopReplicationStatus - 129, // 33: tabletmanagerdata.BackupResponse.event:type_name -> logutil.Event - 130, // 34: tabletmanagerdata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time - 130, // 35: tabletmanagerdata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time - 129, // 36: tabletmanagerdata.RestoreFromBackupResponse.event:type_name -> logutil.Event - 131, // 37: tabletmanagerdata.CreateVReplicationWorkflowRequest.binlog_source:type_name -> binlogdata.BinlogSource - 120, // 38: tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_types:type_name -> topodata.TabletType - 0, // 39: tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 132, // 40: tabletmanagerdata.CreateVReplicationWorkflowRequest.workflow_type:type_name -> binlogdata.VReplicationWorkflowType - 133, // 41: tabletmanagerdata.CreateVReplicationWorkflowRequest.workflow_sub_type:type_name -> binlogdata.VReplicationWorkflowSubType - 122, // 42: tabletmanagerdata.CreateVReplicationWorkflowResponse.result:type_name -> query.QueryResult - 122, // 43: tabletmanagerdata.DeleteVReplicationWorkflowResponse.result:type_name -> query.QueryResult - 120, // 44: tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_types:type_name -> topodata.TabletType - 0, // 45: tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 132, // 46: tabletmanagerdata.ReadVReplicationWorkflowResponse.workflow_type:type_name -> binlogdata.VReplicationWorkflowType - 133, // 47: tabletmanagerdata.ReadVReplicationWorkflowResponse.workflow_sub_type:type_name -> binlogdata.VReplicationWorkflowSubType - 118, // 48: tabletmanagerdata.ReadVReplicationWorkflowResponse.streams:type_name -> tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream - 108, // 49: tabletmanagerdata.VDiffRequest.options:type_name -> tabletmanagerdata.VDiffOptions - 122, // 50: tabletmanagerdata.VDiffResponse.output:type_name -> query.QueryResult - 105, // 51: tabletmanagerdata.VDiffOptions.picker_options:type_name -> tabletmanagerdata.VDiffPickerOptions - 107, // 52: tabletmanagerdata.VDiffOptions.core_options:type_name -> tabletmanagerdata.VDiffCoreOptions - 106, // 53: tabletmanagerdata.VDiffOptions.report_options:type_name -> tabletmanagerdata.VDiffReportOptions - 120, // 54: tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_types:type_name -> topodata.TabletType - 0, // 55: tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 134, // 56: tabletmanagerdata.UpdateVReplicationWorkflowRequest.on_ddl:type_name -> binlogdata.OnDDLAction - 135, // 57: tabletmanagerdata.UpdateVReplicationWorkflowRequest.state:type_name -> binlogdata.VReplicationWorkflowState - 122, // 58: tabletmanagerdata.UpdateVReplicationWorkflowResponse.result:type_name -> query.QueryResult - 131, // 59: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.bls:type_name -> binlogdata.BinlogSource - 130, // 60: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_updated:type_name -> vttime.Time - 130, // 61: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.transaction_timestamp:type_name -> vttime.Time - 135, // 62: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.state:type_name -> binlogdata.VReplicationWorkflowState - 130, // 63: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_heartbeat:type_name -> vttime.Time - 130, // 64: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_throttled:type_name -> vttime.Time - 65, // [65:65] is the sub-list for method output_type - 65, // [65:65] is the sub-list for method input_type - 65, // [65:65] is the sub-list for extension type_name - 65, // [65:65] is the sub-list for extension extendee - 0, // [0:65] is the sub-list for field type_name + 129, // 17: tabletmanagerdata.ExecuteQueryRequest.caller_id:type_name -> vtrpc.CallerID + 130, // 18: tabletmanagerdata.ExecuteQueryResponse.result:type_name -> query.QueryResult + 130, // 19: tabletmanagerdata.ExecuteFetchAsDbaResponse.result:type_name -> query.QueryResult + 130, // 20: tabletmanagerdata.ExecuteMultiFetchAsDbaResponse.results:type_name -> query.QueryResult + 130, // 21: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse.result:type_name -> query.QueryResult + 130, // 22: tabletmanagerdata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult + 131, // 23: tabletmanagerdata.ReplicationStatusResponse.status:type_name -> replicationdata.Status + 132, // 24: tabletmanagerdata.PrimaryStatusResponse.status:type_name -> replicationdata.PrimaryStatus + 130, // 25: tabletmanagerdata.VReplicationExecResponse.result:type_name -> query.QueryResult + 133, // 26: tabletmanagerdata.PopulateReparentJournalRequest.primary_alias:type_name -> topodata.TabletAlias + 133, // 27: tabletmanagerdata.InitReplicaRequest.parent:type_name -> topodata.TabletAlias + 132, // 28: tabletmanagerdata.DemotePrimaryResponse.primary_status:type_name -> replicationdata.PrimaryStatus + 134, // 29: tabletmanagerdata.FullStatusResponse.status:type_name -> replicationdata.FullStatus + 133, // 30: tabletmanagerdata.SetReplicationSourceRequest.parent:type_name -> topodata.TabletAlias + 133, // 31: tabletmanagerdata.ReplicaWasRestartedRequest.parent:type_name -> topodata.TabletAlias + 135, // 32: tabletmanagerdata.StopReplicationAndGetStatusRequest.stop_replication_mode:type_name -> replicationdata.StopReplicationMode + 136, // 33: tabletmanagerdata.StopReplicationAndGetStatusResponse.status:type_name -> replicationdata.StopReplicationStatus + 137, // 34: tabletmanagerdata.BackupResponse.event:type_name -> logutil.Event + 138, // 35: tabletmanagerdata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time + 138, // 36: tabletmanagerdata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time + 137, // 37: tabletmanagerdata.RestoreFromBackupResponse.event:type_name -> logutil.Event + 139, // 38: tabletmanagerdata.CreateVReplicationWorkflowRequest.binlog_source:type_name -> binlogdata.BinlogSource + 128, // 39: tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_types:type_name -> topodata.TabletType + 0, // 40: tabletmanagerdata.CreateVReplicationWorkflowRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 140, // 41: tabletmanagerdata.CreateVReplicationWorkflowRequest.workflow_type:type_name -> binlogdata.VReplicationWorkflowType + 141, // 42: tabletmanagerdata.CreateVReplicationWorkflowRequest.workflow_sub_type:type_name -> binlogdata.VReplicationWorkflowSubType + 130, // 43: tabletmanagerdata.CreateVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 130, // 44: tabletmanagerdata.DeleteVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 142, // 45: tabletmanagerdata.ReadVReplicationWorkflowsRequest.include_states:type_name -> binlogdata.VReplicationWorkflowState + 142, // 46: tabletmanagerdata.ReadVReplicationWorkflowsRequest.exclude_states:type_name -> binlogdata.VReplicationWorkflowState + 108, // 47: tabletmanagerdata.ReadVReplicationWorkflowsResponse.workflows:type_name -> tabletmanagerdata.ReadVReplicationWorkflowResponse + 128, // 48: tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_types:type_name -> topodata.TabletType + 0, // 49: tabletmanagerdata.ReadVReplicationWorkflowResponse.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 140, // 50: tabletmanagerdata.ReadVReplicationWorkflowResponse.workflow_type:type_name -> binlogdata.VReplicationWorkflowType + 141, // 51: tabletmanagerdata.ReadVReplicationWorkflowResponse.workflow_sub_type:type_name -> binlogdata.VReplicationWorkflowSubType + 126, // 52: tabletmanagerdata.ReadVReplicationWorkflowResponse.streams:type_name -> tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream + 114, // 53: tabletmanagerdata.VDiffRequest.options:type_name -> tabletmanagerdata.VDiffOptions + 130, // 54: tabletmanagerdata.VDiffResponse.output:type_name -> query.QueryResult + 111, // 55: tabletmanagerdata.VDiffOptions.picker_options:type_name -> tabletmanagerdata.VDiffPickerOptions + 113, // 56: tabletmanagerdata.VDiffOptions.core_options:type_name -> tabletmanagerdata.VDiffCoreOptions + 112, // 57: tabletmanagerdata.VDiffOptions.report_options:type_name -> tabletmanagerdata.VDiffReportOptions + 128, // 58: tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_types:type_name -> topodata.TabletType + 0, // 59: tabletmanagerdata.UpdateVReplicationWorkflowRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 143, // 60: tabletmanagerdata.UpdateVReplicationWorkflowRequest.on_ddl:type_name -> binlogdata.OnDDLAction + 142, // 61: tabletmanagerdata.UpdateVReplicationWorkflowRequest.state:type_name -> binlogdata.VReplicationWorkflowState + 130, // 62: tabletmanagerdata.UpdateVReplicationWorkflowResponse.result:type_name -> query.QueryResult + 142, // 63: tabletmanagerdata.UpdateVReplicationWorkflowsRequest.state:type_name -> binlogdata.VReplicationWorkflowState + 130, // 64: tabletmanagerdata.UpdateVReplicationWorkflowsResponse.result:type_name -> query.QueryResult + 139, // 65: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.bls:type_name -> binlogdata.BinlogSource + 138, // 66: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_updated:type_name -> vttime.Time + 138, // 67: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.transaction_timestamp:type_name -> vttime.Time + 142, // 68: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.state:type_name -> binlogdata.VReplicationWorkflowState + 138, // 69: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_heartbeat:type_name -> vttime.Time + 138, // 70: tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.time_throttled:type_name -> vttime.Time + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_tabletmanagerdata_proto_init() } @@ -7619,7 +8287,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAllPrivsRequest); i { + switch v := v.(*ExecuteMultiFetchAsDbaRequest); i { case 0: return &v.state case 1: @@ -7631,7 +8299,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAllPrivsResponse); i { + switch v := v.(*ExecuteMultiFetchAsDbaResponse); i { case 0: return &v.state case 1: @@ -7643,7 +8311,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAppRequest); i { + switch v := v.(*ExecuteFetchAsAllPrivsRequest); i { case 0: return &v.state case 1: @@ -7655,7 +8323,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAppResponse); i { + switch v := v.(*ExecuteFetchAsAllPrivsResponse); i { case 0: return &v.state case 1: @@ -7667,7 +8335,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationStatusRequest); i { + switch v := v.(*ExecuteFetchAsAppRequest); i { case 0: return &v.state case 1: @@ -7679,7 +8347,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationStatusResponse); i { + switch v := v.(*ExecuteFetchAsAppResponse); i { case 0: return &v.state case 1: @@ -7691,7 +8359,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimaryStatusRequest); i { + switch v := v.(*ReplicationStatusRequest); i { case 0: return &v.state case 1: @@ -7703,7 +8371,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimaryStatusResponse); i { + switch v := v.(*ReplicationStatusResponse); i { case 0: return &v.state case 1: @@ -7715,7 +8383,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimaryPositionRequest); i { + switch v := v.(*PrimaryStatusRequest); i { case 0: return &v.state case 1: @@ -7727,7 +8395,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimaryPositionResponse); i { + switch v := v.(*PrimaryStatusResponse); i { case 0: return &v.state case 1: @@ -7739,7 +8407,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WaitForPositionRequest); i { + switch v := v.(*PrimaryPositionRequest); i { case 0: return &v.state case 1: @@ -7751,7 +8419,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WaitForPositionResponse); i { + switch v := v.(*PrimaryPositionResponse); i { case 0: return &v.state case 1: @@ -7763,7 +8431,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationRequest); i { + switch v := v.(*WaitForPositionRequest); i { case 0: return &v.state case 1: @@ -7775,7 +8443,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationResponse); i { + switch v := v.(*WaitForPositionResponse); i { case 0: return &v.state case 1: @@ -7787,7 +8455,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationMinimumRequest); i { + switch v := v.(*StopReplicationRequest); i { case 0: return &v.state case 1: @@ -7799,7 +8467,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationMinimumResponse); i { + switch v := v.(*StopReplicationResponse); i { case 0: return &v.state case 1: @@ -7811,7 +8479,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationRequest); i { + switch v := v.(*StopReplicationMinimumRequest); i { case 0: return &v.state case 1: @@ -7823,7 +8491,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationResponse); i { + switch v := v.(*StopReplicationMinimumResponse); i { case 0: return &v.state case 1: @@ -7835,7 +8503,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationUntilAfterRequest); i { + switch v := v.(*StartReplicationRequest); i { case 0: return &v.state case 1: @@ -7847,7 +8515,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartReplicationUntilAfterResponse); i { + switch v := v.(*StartReplicationResponse); i { case 0: return &v.state case 1: @@ -7859,7 +8527,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplicasRequest); i { + switch v := v.(*StartReplicationUntilAfterRequest); i { case 0: return &v.state case 1: @@ -7871,7 +8539,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplicasResponse); i { + switch v := v.(*StartReplicationUntilAfterResponse); i { case 0: return &v.state case 1: @@ -7883,7 +8551,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetReplicationRequest); i { + switch v := v.(*GetReplicasRequest); i { case 0: return &v.state case 1: @@ -7895,7 +8563,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetReplicationResponse); i { + switch v := v.(*GetReplicasResponse); i { case 0: return &v.state case 1: @@ -7907,7 +8575,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VReplicationExecRequest); i { + switch v := v.(*ResetReplicationRequest); i { case 0: return &v.state case 1: @@ -7919,7 +8587,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VReplicationExecResponse); i { + switch v := v.(*ResetReplicationResponse); i { case 0: return &v.state case 1: @@ -7931,7 +8599,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VReplicationWaitForPosRequest); i { + switch v := v.(*VReplicationExecRequest); i { case 0: return &v.state case 1: @@ -7943,7 +8611,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VReplicationWaitForPosResponse); i { + switch v := v.(*VReplicationExecResponse); i { case 0: return &v.state case 1: @@ -7955,7 +8623,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitPrimaryRequest); i { + switch v := v.(*VReplicationWaitForPosRequest); i { case 0: return &v.state case 1: @@ -7967,7 +8635,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitPrimaryResponse); i { + switch v := v.(*VReplicationWaitForPosResponse); i { case 0: return &v.state case 1: @@ -7979,7 +8647,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PopulateReparentJournalRequest); i { + switch v := v.(*InitPrimaryRequest); i { case 0: return &v.state case 1: @@ -7991,7 +8659,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PopulateReparentJournalResponse); i { + switch v := v.(*InitPrimaryResponse); i { case 0: return &v.state case 1: @@ -8003,7 +8671,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitReplicaRequest); i { + switch v := v.(*PopulateReparentJournalRequest); i { case 0: return &v.state case 1: @@ -8015,7 +8683,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InitReplicaResponse); i { + switch v := v.(*PopulateReparentJournalResponse); i { case 0: return &v.state case 1: @@ -8027,7 +8695,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DemotePrimaryRequest); i { + switch v := v.(*InitReplicaRequest); i { case 0: return &v.state case 1: @@ -8039,7 +8707,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DemotePrimaryResponse); i { + switch v := v.(*InitReplicaResponse); i { case 0: return &v.state case 1: @@ -8051,7 +8719,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UndoDemotePrimaryRequest); i { + switch v := v.(*DemotePrimaryRequest); i { case 0: return &v.state case 1: @@ -8063,7 +8731,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UndoDemotePrimaryResponse); i { + switch v := v.(*DemotePrimaryResponse); i { case 0: return &v.state case 1: @@ -8075,7 +8743,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicaWasPromotedRequest); i { + switch v := v.(*UndoDemotePrimaryRequest); i { case 0: return &v.state case 1: @@ -8087,7 +8755,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicaWasPromotedResponse); i { + switch v := v.(*UndoDemotePrimaryResponse); i { case 0: return &v.state case 1: @@ -8099,7 +8767,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetReplicationParametersRequest); i { + switch v := v.(*ReplicaWasPromotedRequest); i { case 0: return &v.state case 1: @@ -8111,7 +8779,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetReplicationParametersResponse); i { + switch v := v.(*ReplicaWasPromotedResponse); i { case 0: return &v.state case 1: @@ -8123,7 +8791,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FullStatusRequest); i { + switch v := v.(*ResetReplicationParametersRequest); i { case 0: return &v.state case 1: @@ -8135,7 +8803,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FullStatusResponse); i { + switch v := v.(*ResetReplicationParametersResponse); i { case 0: return &v.state case 1: @@ -8147,7 +8815,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicationSourceRequest); i { + switch v := v.(*FullStatusRequest); i { case 0: return &v.state case 1: @@ -8159,7 +8827,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicationSourceResponse); i { + switch v := v.(*FullStatusResponse); i { case 0: return &v.state case 1: @@ -8171,7 +8839,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicaWasRestartedRequest); i { + switch v := v.(*SetReplicationSourceRequest); i { case 0: return &v.state case 1: @@ -8183,7 +8851,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicaWasRestartedResponse); i { + switch v := v.(*SetReplicationSourceResponse); i { case 0: return &v.state case 1: @@ -8195,7 +8863,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationAndGetStatusRequest); i { + switch v := v.(*ReplicaWasRestartedRequest); i { case 0: return &v.state case 1: @@ -8207,7 +8875,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopReplicationAndGetStatusResponse); i { + switch v := v.(*ReplicaWasRestartedResponse); i { case 0: return &v.state case 1: @@ -8219,7 +8887,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PromoteReplicaRequest); i { + switch v := v.(*StopReplicationAndGetStatusRequest); i { case 0: return &v.state case 1: @@ -8231,7 +8899,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PromoteReplicaResponse); i { + switch v := v.(*StopReplicationAndGetStatusResponse); i { case 0: return &v.state case 1: @@ -8243,7 +8911,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupRequest); i { + switch v := v.(*PromoteReplicaRequest); i { case 0: return &v.state case 1: @@ -8255,7 +8923,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupResponse); i { + switch v := v.(*PromoteReplicaResponse); i { case 0: return &v.state case 1: @@ -8267,7 +8935,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreFromBackupRequest); i { + switch v := v.(*BackupRequest); i { case 0: return &v.state case 1: @@ -8279,7 +8947,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreFromBackupResponse); i { + switch v := v.(*BackupResponse); i { case 0: return &v.state case 1: @@ -8291,7 +8959,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateVReplicationWorkflowRequest); i { + switch v := v.(*RestoreFromBackupRequest); i { case 0: return &v.state case 1: @@ -8303,7 +8971,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateVReplicationWorkflowResponse); i { + switch v := v.(*RestoreFromBackupResponse); i { case 0: return &v.state case 1: @@ -8315,7 +8983,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteVReplicationWorkflowRequest); i { + switch v := v.(*CreateVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -8327,7 +8995,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteVReplicationWorkflowResponse); i { + switch v := v.(*CreateVReplicationWorkflowResponse); i { case 0: return &v.state case 1: @@ -8339,7 +9007,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadVReplicationWorkflowRequest); i { + switch v := v.(*DeleteVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -8351,7 +9019,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadVReplicationWorkflowResponse); i { + switch v := v.(*DeleteVReplicationWorkflowResponse); i { case 0: return &v.state case 1: @@ -8363,7 +9031,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffRequest); i { + switch v := v.(*HasVReplicationWorkflowsRequest); i { case 0: return &v.state case 1: @@ -8375,7 +9043,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffResponse); i { + switch v := v.(*HasVReplicationWorkflowsResponse); i { case 0: return &v.state case 1: @@ -8387,7 +9055,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffPickerOptions); i { + switch v := v.(*ReadVReplicationWorkflowsRequest); i { case 0: return &v.state case 1: @@ -8399,7 +9067,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffReportOptions); i { + switch v := v.(*ReadVReplicationWorkflowsResponse); i { case 0: return &v.state case 1: @@ -8411,7 +9079,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffCoreOptions); i { + switch v := v.(*ReadVReplicationWorkflowRequest); i { case 0: return &v.state case 1: @@ -8423,7 +9091,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VDiffOptions); i { + switch v := v.(*ReadVReplicationWorkflowResponse); i { case 0: return &v.state case 1: @@ -8435,7 +9103,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateVReplicationWorkflowRequest); i { + switch v := v.(*VDiffRequest); i { case 0: return &v.state case 1: @@ -8447,7 +9115,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateVReplicationWorkflowResponse); i { + switch v := v.(*VDiffResponse); i { case 0: return &v.state case 1: @@ -8459,7 +9127,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetSequencesRequest); i { + switch v := v.(*VDiffPickerOptions); i { case 0: return &v.state case 1: @@ -8471,7 +9139,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetSequencesResponse); i { + switch v := v.(*VDiffReportOptions); i { case 0: return &v.state case 1: @@ -8483,7 +9151,7 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckThrottlerRequest); i { + switch v := v.(*VDiffCoreOptions); i { case 0: return &v.state case 1: @@ -8495,7 +9163,43 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckThrottlerResponse); i { + switch v := v.(*VDiffOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateVReplicationWorkflowRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateVReplicationWorkflowResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateVReplicationWorkflowsRequest); i { case 0: return &v.state case 1: @@ -8507,6 +9211,66 @@ func file_tabletmanagerdata_proto_init() { } } file_tabletmanagerdata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateVReplicationWorkflowsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResetSequencesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResetSequencesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckThrottlerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckThrottlerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tabletmanagerdata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadVReplicationWorkflowResponse_Stream); i { case 0: return &v.state @@ -8525,7 +9289,7 @@ func file_tabletmanagerdata_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tabletmanagerdata_proto_rawDesc, NumEnums: 1, - NumMessages: 118, + NumMessages: 126, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go index 502a4c17ff9..1978b31ed4c 100644 --- a/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go +++ b/go/vt/proto/tabletmanagerdata/tabletmanagerdata_vtproto.pb.go @@ -642,13 +642,14 @@ func (m *ApplySchemaRequest) CloneVT() *ApplySchemaRequest { return (*ApplySchemaRequest)(nil) } r := &ApplySchemaRequest{ - Sql: m.Sql, - Force: m.Force, - AllowReplication: m.AllowReplication, - BeforeSchema: m.BeforeSchema.CloneVT(), - AfterSchema: m.AfterSchema.CloneVT(), - SqlMode: m.SqlMode, - BatchSize: m.BatchSize, + Sql: m.Sql, + Force: m.Force, + AllowReplication: m.AllowReplication, + BeforeSchema: m.BeforeSchema.CloneVT(), + AfterSchema: m.AfterSchema.CloneVT(), + SqlMode: m.SqlMode, + BatchSize: m.BatchSize, + DisableForeignKeyChecks: m.DisableForeignKeyChecks, } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -792,10 +793,11 @@ func (m *ExecuteFetchAsDbaRequest) CloneVT() *ExecuteFetchAsDbaRequest { return (*ExecuteFetchAsDbaRequest)(nil) } r := &ExecuteFetchAsDbaRequest{ - DbName: m.DbName, - MaxRows: m.MaxRows, - DisableBinlogs: m.DisableBinlogs, - ReloadSchema: m.ReloadSchema, + DbName: m.DbName, + MaxRows: m.MaxRows, + DisableBinlogs: m.DisableBinlogs, + ReloadSchema: m.ReloadSchema, + DisableForeignKeyChecks: m.DisableForeignKeyChecks, } if rhs := m.Query; rhs != nil { tmpBytes := make([]byte, len(rhs)) @@ -831,6 +833,56 @@ func (m *ExecuteFetchAsDbaResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *ExecuteMultiFetchAsDbaRequest) CloneVT() *ExecuteMultiFetchAsDbaRequest { + if m == nil { + return (*ExecuteMultiFetchAsDbaRequest)(nil) + } + r := &ExecuteMultiFetchAsDbaRequest{ + DbName: m.DbName, + MaxRows: m.MaxRows, + DisableBinlogs: m.DisableBinlogs, + ReloadSchema: m.ReloadSchema, + DisableForeignKeyChecks: m.DisableForeignKeyChecks, + } + if rhs := m.Sql; rhs != nil { + tmpBytes := make([]byte, len(rhs)) + copy(tmpBytes, rhs) + r.Sql = tmpBytes + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteMultiFetchAsDbaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteMultiFetchAsDbaResponse) CloneVT() *ExecuteMultiFetchAsDbaResponse { + if m == nil { + return (*ExecuteMultiFetchAsDbaResponse)(nil) + } + r := &ExecuteMultiFetchAsDbaResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]*query.QueryResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Results = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteMultiFetchAsDbaResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *ExecuteFetchAsAllPrivsRequest) CloneVT() *ExecuteFetchAsAllPrivsRequest { if m == nil { return (*ExecuteFetchAsAllPrivsRequest)(nil) @@ -1615,6 +1667,7 @@ func (m *SetReplicationSourceRequest) CloneVT() *SetReplicationSourceRequest { ForceStartReplication: m.ForceStartReplication, WaitPosition: m.WaitPosition, SemiSync: m.SemiSync, + HeartbeatInterval: m.HeartbeatInterval, } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -1839,6 +1892,7 @@ func (m *CreateVReplicationWorkflowRequest) CloneVT() *CreateVReplicationWorkflo DeferSecondaryKeys: m.DeferSecondaryKeys, AutoStart: m.AutoStart, StopAfterCopy: m.StopAfterCopy, + Options: m.Options, } if rhs := m.BinlogSource; rhs != nil { tmpContainer := make([]*binlogdata.BinlogSource, len(rhs)) @@ -1922,6 +1976,106 @@ func (m *DeleteVReplicationWorkflowResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *HasVReplicationWorkflowsRequest) CloneVT() *HasVReplicationWorkflowsRequest { + if m == nil { + return (*HasVReplicationWorkflowsRequest)(nil) + } + r := &HasVReplicationWorkflowsRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *HasVReplicationWorkflowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *HasVReplicationWorkflowsResponse) CloneVT() *HasVReplicationWorkflowsResponse { + if m == nil { + return (*HasVReplicationWorkflowsResponse)(nil) + } + r := &HasVReplicationWorkflowsResponse{ + Has: m.Has, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *HasVReplicationWorkflowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadVReplicationWorkflowsRequest) CloneVT() *ReadVReplicationWorkflowsRequest { + if m == nil { + return (*ReadVReplicationWorkflowsRequest)(nil) + } + r := &ReadVReplicationWorkflowsRequest{ + ExcludeFrozen: m.ExcludeFrozen, + } + if rhs := m.IncludeIds; rhs != nil { + tmpContainer := make([]int32, len(rhs)) + copy(tmpContainer, rhs) + r.IncludeIds = tmpContainer + } + if rhs := m.IncludeWorkflows; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.IncludeWorkflows = tmpContainer + } + if rhs := m.IncludeStates; rhs != nil { + tmpContainer := make([]binlogdata.VReplicationWorkflowState, len(rhs)) + copy(tmpContainer, rhs) + r.IncludeStates = tmpContainer + } + if rhs := m.ExcludeWorkflows; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeWorkflows = tmpContainer + } + if rhs := m.ExcludeStates; rhs != nil { + tmpContainer := make([]binlogdata.VReplicationWorkflowState, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeStates = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadVReplicationWorkflowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ReadVReplicationWorkflowsResponse) CloneVT() *ReadVReplicationWorkflowsResponse { + if m == nil { + return (*ReadVReplicationWorkflowsResponse)(nil) + } + r := &ReadVReplicationWorkflowsResponse{} + if rhs := m.Workflows; rhs != nil { + tmpContainer := make([]*ReadVReplicationWorkflowResponse, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Workflows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ReadVReplicationWorkflowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *ReadVReplicationWorkflowRequest) CloneVT() *ReadVReplicationWorkflowRequest { if m == nil { return (*ReadVReplicationWorkflowRequest)(nil) @@ -1984,6 +2138,7 @@ func (m *ReadVReplicationWorkflowResponse) CloneVT() *ReadVReplicationWorkflowRe WorkflowType: m.WorkflowType, WorkflowSubType: m.WorkflowSubType, DeferSecondaryKeys: m.DeferSecondaryKeys, + Options: m.Options, } if rhs := m.TabletTypes; rhs != nil { tmpContainer := make([]topodata.TabletType, len(rhs)) @@ -2076,9 +2231,10 @@ func (m *VDiffReportOptions) CloneVT() *VDiffReportOptions { return (*VDiffReportOptions)(nil) } r := &VDiffReportOptions{ - OnlyPks: m.OnlyPks, - DebugQuery: m.DebugQuery, - Format: m.Format, + OnlyPks: m.OnlyPks, + DebugQuery: m.DebugQuery, + Format: m.Format, + MaxSampleRows: m.MaxSampleRows, } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -2104,6 +2260,7 @@ func (m *VDiffCoreOptions) CloneVT() *VDiffCoreOptions { TimeoutSeconds: m.TimeoutSeconds, MaxExtraRowsToCompare: m.MaxExtraRowsToCompare, UpdateTableStats: m.UpdateTableStats, + MaxDiffSeconds: m.MaxDiffSeconds, } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -2185,6 +2342,55 @@ func (m *UpdateVReplicationWorkflowResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *UpdateVReplicationWorkflowsRequest) CloneVT() *UpdateVReplicationWorkflowsRequest { + if m == nil { + return (*UpdateVReplicationWorkflowsRequest)(nil) + } + r := &UpdateVReplicationWorkflowsRequest{ + AllWorkflows: m.AllWorkflows, + State: m.State, + Message: m.Message, + StopPosition: m.StopPosition, + } + if rhs := m.IncludeWorkflows; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.IncludeWorkflows = tmpContainer + } + if rhs := m.ExcludeWorkflows; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.ExcludeWorkflows = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UpdateVReplicationWorkflowsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *UpdateVReplicationWorkflowsResponse) CloneVT() *UpdateVReplicationWorkflowsResponse { + if m == nil { + return (*UpdateVReplicationWorkflowsResponse)(nil) + } + r := &UpdateVReplicationWorkflowsResponse{ + Result: m.Result.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *UpdateVReplicationWorkflowsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *ResetSequencesRequest) CloneVT() *ResetSequencesRequest { if m == nil { return (*ResetSequencesRequest)(nil) @@ -3659,6 +3865,16 @@ func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.DisableForeignKeyChecks { + i-- + if m.DisableForeignKeyChecks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } if m.BatchSize != 0 { i = encodeVarint(dAtA, i, uint64(m.BatchSize)) i-- @@ -4041,6 +4257,16 @@ func (m *ExecuteFetchAsDbaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.DisableForeignKeyChecks { + i-- + if m.DisableForeignKeyChecks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } if m.ReloadSchema { i-- if m.ReloadSchema { @@ -4126,7 +4352,7 @@ func (m *ExecuteFetchAsDbaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteMultiFetchAsDbaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4139,12 +4365,12 @@ func (m *ExecuteFetchAsAllPrivsRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteMultiFetchAsDbaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteMultiFetchAsDbaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4156,6 +4382,16 @@ func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.DisableForeignKeyChecks { + i-- + if m.DisableForeignKeyChecks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } if m.ReloadSchema { i-- if m.ReloadSchema { @@ -4164,6 +4400,16 @@ func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int dAtA[i] = 0 } i-- + dAtA[i] = 0x28 + } + if m.DisableBinlogs { + i-- + if m.DisableBinlogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- dAtA[i] = 0x20 } if m.MaxRows != 0 { @@ -4178,17 +4424,17 @@ func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int i-- dAtA[i] = 0x12 } - if len(m.Query) > 0 { - i -= len(m.Query) - copy(dAtA[i:], m.Query) - i = encodeVarint(dAtA, i, uint64(len(m.Query))) + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarint(dAtA, i, uint64(len(m.Sql))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteMultiFetchAsDbaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4201,12 +4447,12 @@ func (m *ExecuteFetchAsAllPrivsResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteMultiFetchAsDbaResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExecuteFetchAsAllPrivsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteMultiFetchAsDbaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4218,20 +4464,22 @@ func (m *ExecuteFetchAsAllPrivsResponse) MarshalToSizedBufferVT(dAtA []byte) (in i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Result != nil { - size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Results[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4244,12 +4492,12 @@ func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4261,10 +4509,27 @@ func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.ReloadSchema { + i-- + if m.ReloadSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } if m.MaxRows != 0 { i = encodeVarint(dAtA, i, uint64(m.MaxRows)) i-- - dAtA[i] = 0x10 + dAtA[i] = 0x18 + } + if len(m.DbName) > 0 { + i -= len(m.DbName) + copy(dAtA[i:], m.DbName) + i = encodeVarint(dAtA, i, uint64(len(m.DbName))) + i-- + dAtA[i] = 0x12 } if len(m.Query) > 0 { i -= len(m.Query) @@ -4276,7 +4541,7 @@ func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err return len(dAtA) - i, nil } -func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4289,12 +4554,12 @@ func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAllPrivsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4319,7 +4584,7 @@ func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *ReplicationStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4332,12 +4597,12 @@ func (m *ReplicationStatusRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicationStatusRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicationStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4349,10 +4614,22 @@ func (m *ReplicationStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x10 + } + if len(m.Query) > 0 { + i -= len(m.Query) + copy(dAtA[i:], m.Query) + i = encodeVarint(dAtA, i, uint64(len(m.Query))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *ReplicationStatusResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ExecuteFetchAsAppResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4365,12 +4642,12 @@ func (m *ReplicationStatusResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReplicationStatusResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ExecuteFetchAsAppResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4382,8 +4659,8 @@ func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Status != nil { - size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -4395,7 +4672,7 @@ func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *PrimaryStatusRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ReplicationStatusRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4408,7 +4685,83 @@ func (m *PrimaryStatusRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PrimaryStatusRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReplicationStatusRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReplicationStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *ReplicationStatusResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationStatusResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReplicationStatusResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Status != nil { + size, err := m.Status.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PrimaryStatusRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PrimaryStatusRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } @@ -5890,6 +6243,12 @@ func (m *SetReplicationSourceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.HeartbeatInterval != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.HeartbeatInterval)))) + i-- + dAtA[i] = 0x31 + } if m.SemiSync { i-- if m.SemiSync { @@ -6459,6 +6818,13 @@ func (m *CreateVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Options) > 0 { + i -= len(m.Options) + copy(dAtA[i:], m.Options) + i = encodeVarint(dAtA, i, uint64(len(m.Options))) + i-- + dAtA[i] = 0x5a + } if m.StopAfterCopy { i-- if m.StopAfterCopy { @@ -6682,7 +7048,7 @@ func (m *DeleteVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) return len(dAtA) - i, nil } -func (m *ReadVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { +func (m *HasVReplicationWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6695,12 +7061,12 @@ func (m *ReadVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *HasVReplicationWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReadVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *HasVReplicationWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6712,17 +7078,10 @@ func (m *ReadVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (i i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Workflow) > 0 { - i -= len(m.Workflow) - copy(dAtA[i:], m.Workflow) - i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *ReadVReplicationWorkflowResponse_Stream) MarshalVT() (dAtA []byte, err error) { +func (m *HasVReplicationWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6735,12 +7094,12 @@ func (m *ReadVReplicationWorkflowResponse_Stream) MarshalVT() (dAtA []byte, err return dAtA[:n], nil } -func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToVT(dAtA []byte) (int, error) { +func (m *HasVReplicationWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *HasVReplicationWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6752,113 +7111,144 @@ func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToSizedBufferVT(dAtA [] i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ComponentThrottled) > 0 { - i -= len(m.ComponentThrottled) - copy(dAtA[i:], m.ComponentThrottled) - i = encodeVarint(dAtA, i, uint64(len(m.ComponentThrottled))) + if m.Has { i-- - dAtA[i] = 0x72 - } - if m.TimeThrottled != nil { - size, err := m.TimeThrottled.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.Has { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x6a + dAtA[i] = 0x8 } - if m.TimeHeartbeat != nil { - size, err := m.TimeHeartbeat.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x62 + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } - if m.RowsCopied != 0 { - i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) - i-- - dAtA[i] = 0x58 + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarint(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x52 + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil } - if m.State != 0 { - i = encodeVarint(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x48 + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) } - if m.TransactionTimestamp != nil { - size, err := m.TransactionTimestamp.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) + if m.ExcludeFrozen { i-- - dAtA[i] = 0x42 - } - if m.TimeUpdated != nil { - size, err := m.TimeUpdated.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if m.ExcludeFrozen { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x3a - } - if m.MaxReplicationLag != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxReplicationLag)) i-- dAtA[i] = 0x30 } - if m.MaxTps != 0 { - i = encodeVarint(dAtA, i, uint64(m.MaxTps)) + if len(m.ExcludeStates) > 0 { + var pksize2 int + for _, num := range m.ExcludeStates { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.ExcludeStates { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) i-- - dAtA[i] = 0x28 + dAtA[i] = 0x2a } - if len(m.StopPos) > 0 { - i -= len(m.StopPos) - copy(dAtA[i:], m.StopPos) - i = encodeVarint(dAtA, i, uint64(len(m.StopPos))) - i-- - dAtA[i] = 0x22 + if len(m.ExcludeWorkflows) > 0 { + for iNdEx := len(m.ExcludeWorkflows) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeWorkflows[iNdEx]) + copy(dAtA[i:], m.ExcludeWorkflows[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeWorkflows[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - if len(m.Pos) > 0 { - i -= len(m.Pos) - copy(dAtA[i:], m.Pos) - i = encodeVarint(dAtA, i, uint64(len(m.Pos))) + if len(m.IncludeStates) > 0 { + var pksize4 int + for _, num := range m.IncludeStates { + pksize4 += sov(uint64(num)) + } + i -= pksize4 + j3 := i + for _, num1 := range m.IncludeStates { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j3] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j3++ + } + dAtA[j3] = uint8(num) + j3++ + } + i = encodeVarint(dAtA, i, uint64(pksize4)) i-- dAtA[i] = 0x1a } - if m.Bls != nil { - size, err := m.Bls.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err + if len(m.IncludeWorkflows) > 0 { + for iNdEx := len(m.IncludeWorkflows) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IncludeWorkflows[iNdEx]) + copy(dAtA[i:], m.IncludeWorkflows[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.IncludeWorkflows[iNdEx]))) + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x12 } - if m.Id != 0 { - i = encodeVarint(dAtA, i, uint64(m.Id)) + if len(m.IncludeIds) > 0 { + var pksize6 int + for _, num := range m.IncludeIds { + pksize6 += sov(uint64(num)) + } + i -= pksize6 + j5 := i + for _, num1 := range m.IncludeIds { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j5] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j5++ + } + dAtA[j5] = uint8(num) + j5++ + } + i = encodeVarint(dAtA, i, uint64(pksize6)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReadVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { +func (m *ReadVReplicationWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -6871,12 +7261,12 @@ func (m *ReadVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) return dAtA[:n], nil } -func (m *ReadVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *ReadVReplicationWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ReadVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ReadVReplicationWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -6888,24 +7278,252 @@ func (m *ReadVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) ( i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Streams) > 0 { - for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if len(m.Workflows) > 0 { + for iNdEx := len(m.Workflows) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Workflows[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- - dAtA[i] = 0x5a + dAtA[i] = 0xa } } - if m.DeferSecondaryKeys { - i-- - if m.DeferSecondaryKeys { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Workflow) > 0 { + i -= len(m.Workflow) + copy(dAtA[i:], m.Workflow) + i = encodeVarint(dAtA, i, uint64(len(m.Workflow))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowResponse_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ComponentThrottled) > 0 { + i -= len(m.ComponentThrottled) + copy(dAtA[i:], m.ComponentThrottled) + i = encodeVarint(dAtA, i, uint64(len(m.ComponentThrottled))) + i-- + dAtA[i] = 0x72 + } + if m.TimeThrottled != nil { + size, err := m.TimeThrottled.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x6a + } + if m.TimeHeartbeat != nil { + size, err := m.TimeHeartbeat.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if m.RowsCopied != 0 { + i = encodeVarint(dAtA, i, uint64(m.RowsCopied)) + i-- + dAtA[i] = 0x58 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x52 + } + if m.State != 0 { + i = encodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x48 + } + if m.TransactionTimestamp != nil { + size, err := m.TransactionTimestamp.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.TimeUpdated != nil { + size, err := m.TimeUpdated.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + if m.MaxReplicationLag != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxReplicationLag)) + i-- + dAtA[i] = 0x30 + } + if m.MaxTps != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxTps)) + i-- + dAtA[i] = 0x28 + } + if len(m.StopPos) > 0 { + i -= len(m.StopPos) + copy(dAtA[i:], m.StopPos) + i = encodeVarint(dAtA, i, uint64(len(m.StopPos))) + i-- + dAtA[i] = 0x22 + } + if len(m.Pos) > 0 { + i -= len(m.Pos) + copy(dAtA[i:], m.Pos) + i = encodeVarint(dAtA, i, uint64(len(m.Pos))) + i-- + dAtA[i] = 0x1a + } + if m.Bls != nil { + size, err := m.Bls.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarint(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReadVReplicationWorkflowResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadVReplicationWorkflowResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ReadVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Options) > 0 { + i -= len(m.Options) + copy(dAtA[i:], m.Options) + i = encodeVarint(dAtA, i, uint64(len(m.Options))) + i-- + dAtA[i] = 0x62 + } + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Streams[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + } + if m.DeferSecondaryKeys { + i-- + if m.DeferSecondaryKeys { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } i-- dAtA[i] = 0x50 @@ -7194,6 +7812,11 @@ func (m *VDiffReportOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MaxSampleRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxSampleRows)) + i-- + dAtA[i] = 0x20 + } if len(m.Format) > 0 { i -= len(m.Format) copy(dAtA[i:], m.Format) @@ -7254,6 +7877,11 @@ func (m *VDiffCoreOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MaxDiffSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxDiffSeconds)) + i-- + dAtA[i] = 0x48 + } if m.UpdateTableStats { i-- if m.UpdateTableStats { @@ -7505,7 +8133,7 @@ func (m *UpdateVReplicationWorkflowResponse) MarshalToSizedBufferVT(dAtA []byte) return len(dAtA) - i, nil } -func (m *ResetSequencesRequest) MarshalVT() (dAtA []byte, err error) { +func (m *UpdateVReplicationWorkflowsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -7518,12 +8146,12 @@ func (m *ResetSequencesRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResetSequencesRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *UpdateVReplicationWorkflowsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *ResetSequencesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *UpdateVReplicationWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -7535,21 +8163,144 @@ func (m *ResetSequencesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.Tables) > 0 { - for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Tables[iNdEx]) - copy(dAtA[i:], m.Tables[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResetSequencesResponse) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + if len(m.StopPosition) > 0 { + i -= len(m.StopPosition) + copy(dAtA[i:], m.StopPosition) + i = encodeVarint(dAtA, i, uint64(len(m.StopPosition))) + i-- + dAtA[i] = 0x32 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + } + if m.State != 0 { + i = encodeVarint(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x20 + } + if len(m.ExcludeWorkflows) > 0 { + for iNdEx := len(m.ExcludeWorkflows) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeWorkflows[iNdEx]) + copy(dAtA[i:], m.ExcludeWorkflows[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ExcludeWorkflows[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.IncludeWorkflows) > 0 { + for iNdEx := len(m.IncludeWorkflows) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.IncludeWorkflows[iNdEx]) + copy(dAtA[i:], m.IncludeWorkflows[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.IncludeWorkflows[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.AllWorkflows { + i-- + if m.AllWorkflows { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UpdateVReplicationWorkflowsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateVReplicationWorkflowsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *UpdateVReplicationWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Result != nil { + size, err := m.Result.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResetSequencesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetSequencesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResetSequencesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Tables) > 0 { + for iNdEx := len(m.Tables) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Tables[iNdEx]) + copy(dAtA[i:], m.Tables[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Tables[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResetSequencesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil } size := m.SizeVT() dAtA = make([]byte, size) @@ -8229,6 +8980,9 @@ func (m *ApplySchemaRequest) SizeVT() (n int) { if m.BatchSize != 0 { n += 1 + sov(uint64(m.BatchSize)) } + if m.DisableForeignKeyChecks { + n += 2 + } n += len(m.unknownFields) return n } @@ -8353,6 +9107,9 @@ func (m *ExecuteFetchAsDbaRequest) SizeVT() (n int) { if m.ReloadSchema { n += 2 } + if m.DisableForeignKeyChecks { + n += 2 + } n += len(m.unknownFields) return n } @@ -8371,6 +9128,52 @@ func (m *ExecuteFetchAsDbaResponse) SizeVT() (n int) { return n } +func (m *ExecuteMultiFetchAsDbaRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sql) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.DbName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.DisableBinlogs { + n += 2 + } + if m.ReloadSchema { + n += 2 + } + if m.DisableForeignKeyChecks { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteMultiFetchAsDbaResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + func (m *ExecuteFetchAsAllPrivsRequest) SizeVT() (n int) { if m == nil { return 0 @@ -8968,6 +9771,9 @@ func (m *SetReplicationSourceRequest) SizeVT() (n int) { if m.SemiSync { n += 2 } + if m.HeartbeatInterval != 0 { + n += 9 + } n += len(m.unknownFields) return n } @@ -9183,6 +9989,10 @@ func (m *CreateVReplicationWorkflowRequest) SizeVT() (n int) { if m.StopAfterCopy { n += 2 } + l = len(m.Options) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -9229,6 +10039,91 @@ func (m *DeleteVReplicationWorkflowResponse) SizeVT() (n int) { return n } +func (m *HasVReplicationWorkflowsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *HasVReplicationWorkflowsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Has { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ReadVReplicationWorkflowsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.IncludeIds) > 0 { + l = 0 + for _, e := range m.IncludeIds { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if len(m.IncludeWorkflows) > 0 { + for _, s := range m.IncludeWorkflows { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.IncludeStates) > 0 { + l = 0 + for _, e := range m.IncludeStates { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if len(m.ExcludeWorkflows) > 0 { + for _, s := range m.ExcludeWorkflows { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + if len(m.ExcludeStates) > 0 { + l = 0 + for _, e := range m.ExcludeStates { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + if m.ExcludeFrozen { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ReadVReplicationWorkflowsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Workflows) > 0 { + for _, e := range m.Workflows { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + func (m *ReadVReplicationWorkflowRequest) SizeVT() (n int) { if m == nil { return 0 @@ -9351,6 +10246,10 @@ func (m *ReadVReplicationWorkflowResponse) SizeVT() (n int) { n += 1 + l + sov(uint64(l)) } } + l = len(m.Options) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -9448,6 +10347,9 @@ func (m *VDiffReportOptions) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if m.MaxSampleRows != 0 { + n += 1 + sov(uint64(m.MaxSampleRows)) + } n += len(m.unknownFields) return n } @@ -9483,6 +10385,9 @@ func (m *VDiffCoreOptions) SizeVT() (n int) { if m.UpdateTableStats { n += 2 } + if m.MaxDiffSeconds != 0 { + n += 1 + sov(uint64(m.MaxDiffSeconds)) + } n += len(m.unknownFields) return n } @@ -9559,28 +10464,78 @@ func (m *UpdateVReplicationWorkflowResponse) SizeVT() (n int) { return n } -func (m *ResetSequencesRequest) SizeVT() (n int) { +func (m *UpdateVReplicationWorkflowsRequest) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Tables) > 0 { - for _, s := range m.Tables { + if m.AllWorkflows { + n += 2 + } + if len(m.IncludeWorkflows) > 0 { + for _, s := range m.IncludeWorkflows { l = len(s) n += 1 + l + sov(uint64(l)) } } - n += len(m.unknownFields) - return n -} - -func (m *ResetSequencesResponse) SizeVT() (n int) { - if m == nil { - return 0 + if len(m.ExcludeWorkflows) > 0 { + for _, s := range m.ExcludeWorkflows { + l = len(s) + n += 1 + l + sov(uint64(l)) + } } - var l int - _ = l + if m.State != 0 { + n += 1 + sov(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.StopPosition) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *UpdateVReplicationWorkflowsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ResetSequencesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Tables) > 0 { + for _, s := range m.Tables { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResetSequencesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l n += len(m.unknownFields) return n } @@ -12930,6 +13885,26 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { break } } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableForeignKeyChecks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableForeignKeyChecks = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13692,6 +14667,26 @@ func (m *ExecuteFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { } } m.ReloadSchema = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableForeignKeyChecks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableForeignKeyChecks = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13801,7 +14796,7 @@ func (m *ExecuteFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteMultiFetchAsDbaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13824,15 +14819,15 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteMultiFetchAsDbaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteMultiFetchAsDbaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -13859,9 +14854,9 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) - if m.Query == nil { - m.Query = []byte{} + m.Sql = append(m.Sql[:0], dAtA[iNdEx:postIndex]...) + if m.Sql == nil { + m.Sql = []byte{} } iNdEx = postIndex case 2: @@ -13916,6 +14911,26 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { } } case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableBinlogs = bool(v != 0) + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) } @@ -13935,6 +14950,26 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { } } m.ReloadSchema = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableForeignKeyChecks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableForeignKeyChecks = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13957,7 +14992,7 @@ func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteMultiFetchAsDbaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13980,15 +15015,15 @@ func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteMultiFetchAsDbaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteMultiFetchAsDbaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14015,10 +15050,8 @@ func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &query.QueryResult{} - } - if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Results = append(m.Results, &query.QueryResult{}) + if err := m.Results[len(m.Results)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14044,7 +15077,7 @@ func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAllPrivsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14067,10 +15100,10 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14108,6 +15141,38 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } iNdEx = postIndex case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DbName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) } @@ -14126,6 +15191,26 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { break } } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14148,7 +15233,7 @@ func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAllPrivsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14171,10 +15256,10 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAllPrivsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14235,7 +15320,7 @@ func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteFetchAsAppRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14258,40 +15343,231 @@ func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReplicationStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteFetchAsAppRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { return ErrInvalidLength } - if (iNdEx + skippy) > l { + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 + m.Query = append(m.Query[:0], dAtA[iNdEx:postIndex]...) + if m.Query == nil { + m.Query = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecuteFetchAsAppResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteFetchAsAppResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &query.QueryResult{} + } + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationStatusRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationStatusResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -17242,6 +18518,17 @@ func (m *SetReplicationSourceRequest) UnmarshalVT(dAtA []byte) error { } } m.SemiSync = bool(v != 0) + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatInterval", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.HeartbeatInterval = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -17807,7 +19094,7 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= int64(b&0x7F) << shift + m.Concurrency |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -18568,6 +19855,38 @@ func (m *CreateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { } } m.StopAfterCopy = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18847,7 +20166,7 @@ func (m *DeleteVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReadVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { +func (m *HasVReplicationWorkflowsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18870,44 +20189,83 @@ func (m *ReadVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReadVReplicationWorkflowRequest: wiretype end group for non-group") + return fmt.Errorf("proto: HasVReplicationWorkflowsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReadVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HasVReplicationWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HasVReplicationWorkflowsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Workflow = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HasVReplicationWorkflowsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HasVReplicationWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Has", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Has = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -18930,7 +20288,7 @@ func (m *ReadVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error { +func (m *ReadVReplicationWorkflowsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18953,36 +20311,93 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReadVReplicationWorkflowResponse_Stream: wiretype end group for non-group") + return fmt.Errorf("proto: ReadVReplicationWorkflowsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReadVReplicationWorkflowResponse_Stream: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadVReplicationWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - m.Id = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.IncludeIds = append(m.IncludeIds, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - m.Id |= int32(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.IncludeIds) == 0 { + m.IncludeIds = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeIds = append(m.IncludeIds, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeIds", wireType) } case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bls", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IncludeWorkflows", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -18992,31 +20407,96 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Bls == nil { - m.Bls = &binlogdata.BinlogSource{} - } - if err := m.Bls.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.IncludeWorkflows = append(m.IncludeWorkflows, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: + if wireType == 0 { + var v binlogdata.VReplicationWorkflowState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeStates = append(m.IncludeStates, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.IncludeStates) == 0 { + m.IncludeStates = make([]binlogdata.VReplicationWorkflowState, 0, elementCount) + } + for iNdEx < postIndex { + var v binlogdata.VReplicationWorkflowState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeStates = append(m.IncludeStates, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeStates", wireType) + } + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeWorkflows", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19044,11 +20524,805 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error if postIndex > l { return io.ErrUnexpectedEOF } - m.Pos = string(dAtA[iNdEx:postIndex]) + m.ExcludeWorkflows = append(m.ExcludeWorkflows, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StopPos", wireType) + case 5: + if wireType == 0 { + var v binlogdata.VReplicationWorkflowState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExcludeStates = append(m.ExcludeStates, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.ExcludeStates) == 0 { + m.ExcludeStates = make([]binlogdata.VReplicationWorkflowState, 0, elementCount) + } + for iNdEx < postIndex { + var v binlogdata.VReplicationWorkflowState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExcludeStates = append(m.ExcludeStates, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeStates", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeFrozen", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExcludeFrozen = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadVReplicationWorkflowsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadVReplicationWorkflowsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadVReplicationWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflows = append(m.Workflows, &ReadVReplicationWorkflowResponse{}) + if err := m.Workflows[len(m.Workflows)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadVReplicationWorkflowRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse_Stream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse_Stream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Bls == nil { + m.Bls = &binlogdata.BinlogSource{} + } + if err := m.Bls.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopPos", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopPos = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTps", wireType) + } + m.MaxTps = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTps |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicationLag", wireType) + } + m.MaxReplicationLag = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicationLag |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeUpdated == nil { + m.TimeUpdated = &vttime.Time{} + } + if err := m.TimeUpdated.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TransactionTimestamp == nil { + m.TransactionTimestamp = &vttime.Time{} + } + if err := m.TransactionTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) + } + m.RowsCopied = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RowsCopied |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeHeartbeat", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeHeartbeat == nil { + m.TimeHeartbeat = &vttime.Time{} + } + if err := m.TimeHeartbeat.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeThrottled", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeThrottled == nil { + m.TimeThrottled = &vttime.Time{} + } + if err := m.TimeThrottled.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ComponentThrottled", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ComponentThrottled = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Workflow = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19076,32 +21350,82 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error if postIndex > l { return io.ErrUnexpectedEOF } - m.StopPos = string(dAtA[iNdEx:postIndex]) + m.Cells = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxTps", wireType) - } - m.MaxTps = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + case 4: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - m.MaxTps |= int64(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - case 6: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicationLag", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) } - m.MaxReplicationLag = 0 + m.TabletSelectionPreference = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19111,16 +21435,16 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicationLag |= int64(b&0x7F) << shift + m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift if b < 0x80 { break } } - case 7: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeUpdated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19130,33 +21454,29 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TimeUpdated == nil { - m.TimeUpdated = &vttime.Time{} - } - if err := m.TimeUpdated.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.DbName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TransactionTimestamp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19166,52 +21486,29 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TransactionTimestamp == nil { - m.TransactionTimestamp = &vttime.Time{} - } - if err := m.TransactionTimestamp.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Tags = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) } - var stringLen uint64 + m.WorkflowType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19221,29 +21518,16 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.WorkflowType |= binlogdata.VReplicationWorkflowType(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: + case 9: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RowsCopied", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) } - m.RowsCopied = 0 + m.WorkflowSubType = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19253,16 +21537,16 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - m.RowsCopied |= int64(b&0x7F) << shift + m.WorkflowSubType |= binlogdata.VReplicationWorkflowSubType(b&0x7F) << shift if b < 0x80 { break } } - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeHeartbeat", wireType) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19272,31 +21556,15 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TimeHeartbeat == nil { - m.TimeHeartbeat = &vttime.Time{} - } - if err := m.TimeHeartbeat.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: + m.DeferSecondaryKeys = bool(v != 0) + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeThrottled", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19323,16 +21591,14 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error if postIndex > l { return io.ErrUnexpectedEOF } - if m.TimeThrottled == nil { - m.TimeThrottled = &vttime.Time{} - } - if err := m.TimeThrottled.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Streams = append(m.Streams, &ReadVReplicationWorkflowResponse_Stream{}) + if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 14: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ComponentThrottled", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19360,7 +21626,7 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error if postIndex > l { return io.ErrUnexpectedEOF } - m.ComponentThrottled = string(dAtA[iNdEx:postIndex]) + m.Options = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -19384,7 +21650,7 @@ func (m *ReadVReplicationWorkflowResponse_Stream) UnmarshalVT(dAtA []byte) error } return nil } -func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19407,12 +21673,44 @@ func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReadVReplicationWorkflowResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReadVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) @@ -19447,7 +21745,7 @@ func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19475,99 +21773,11 @@ func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = string(dAtA[iNdEx:postIndex]) + m.Action = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: - if wireType == 0 { - var v topodata.TabletType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TabletTypes = append(m.TabletTypes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - if elementCount != 0 && len(m.TabletTypes) == 0 { - m.TabletTypes = make([]topodata.TabletType, 0, elementCount) - } - for iNdEx < postIndex { - var v topodata.TabletType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TabletTypes = append(m.TabletTypes, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) - } - m.TabletSelectionPreference = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DbName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ActionArg", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19595,11 +21805,11 @@ func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DbName = string(dAtA[iNdEx:postIndex]) + m.ActionArg = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19627,69 +21837,11 @@ func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tags = string(dAtA[iNdEx:postIndex]) + m.VdiffUuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowType", wireType) - } - m.WorkflowType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WorkflowType |= binlogdata.VReplicationWorkflowType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkflowSubType", wireType) - } - m.WorkflowSubType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.WorkflowSubType |= binlogdata.VReplicationWorkflowSubType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeferSecondaryKeys", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DeferSecondaryKeys = bool(v != 0) - case 11: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19716,8 +21868,10 @@ func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Streams = append(m.Streams, &ReadVReplicationWorkflowResponse_Stream{}) - if err := m.Streams[len(m.Streams)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Options == nil { + m.Options = &VDiffOptions{} + } + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -19743,7 +21897,7 @@ func (m *ReadVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { +func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -19766,17 +21920,36 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffRequest: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -19786,27 +21959,31 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.Output == nil { + m.Output = &query.QueryResult{} + } + if err := m.Output.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19834,11 +22011,62 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Workflow = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + m.VdiffUuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VDiffPickerOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VDiffPickerOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19866,11 +22094,11 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Action = string(dAtA[iNdEx:postIndex]) + m.TabletTypes = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActionArg", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceCell", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19898,11 +22126,11 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ActionArg = string(dAtA[iNdEx:postIndex]) + m.SourceCell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetCell", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19930,43 +22158,7 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VdiffUuid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &VDiffOptions{} - } - if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.TargetCell = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -19990,7 +22182,7 @@ func (m *VDiffRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { +func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20013,17 +22205,17 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffResponse: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffReportOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffReportOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnlyPks", wireType) } - m.Id = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20033,16 +22225,17 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Id |= int64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } + m.OnlyPks = bool(v != 0) case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20052,31 +22245,15 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Output == nil { - m.Output = &query.QueryResult{} - } - if err := m.Output.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.DebugQuery = bool(v != 0) case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VdiffUuid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20104,8 +22281,27 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.VdiffUuid = string(dAtA[iNdEx:postIndex]) + m.Format = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSampleRows", wireType) + } + m.MaxSampleRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxSampleRows |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -20128,7 +22324,7 @@ func (m *VDiffResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { +func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20151,15 +22347,15 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffPickerOptions: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffCoreOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffPickerOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffCoreOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20187,13 +22383,129 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TabletTypes = string(dAtA[iNdEx:postIndex]) + m.Tables = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceCell", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoRetry = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Checksum = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SamplePct", wireType) + } + m.SamplePct = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SamplePct |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + m.TimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) + } + m.MaxExtraRowsToCompare = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - var stringLen uint64 + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateTableStats", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20203,29 +22515,17 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SourceCell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetCell", wireType) + m.UpdateTableStats = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxDiffSeconds", wireType) } - var stringLen uint64 + m.MaxDiffSeconds = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20235,24 +22535,11 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.MaxDiffSeconds |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetCell = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -20275,7 +22562,7 @@ func (m *VDiffPickerOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { +func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20298,17 +22585,17 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffReportOptions: wiretype end group for non-group") + return fmt.Errorf("proto: VDiffOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffReportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VDiffOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OnlyPks", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PickerOptions", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20318,17 +22605,33 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.OnlyPks = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PickerOptions == nil { + m.PickerOptions = &VDiffPickerOptions{} + } + if err := m.PickerOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoreOptions", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20338,17 +22641,33 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.DebugQuery = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CoreOptions == nil { + m.CoreOptions = &VDiffCoreOptions{} + } + if err := m.CoreOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReportOptions", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20358,23 +22677,27 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Format = string(dAtA[iNdEx:postIndex]) + if m.ReportOptions == nil { + m.ReportOptions = &VDiffReportOptions{} + } + if err := m.ReportOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -20398,7 +22721,7 @@ func (m *VDiffReportOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { +func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20421,15 +22744,15 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffCoreOptions: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateVReplicationWorkflowRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffCoreOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20457,13 +22780,13 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20473,75 +22796,98 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.AutoRetry = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.MaxRows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxRows |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Checksum", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break + if packedLen < 0 { + return ErrInvalidLength } - } - m.Checksum = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SamplePct", wireType) - } - m.SamplePct = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - m.SamplePct |= int64(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) } - case 6: + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) } - m.TimeoutSeconds = 0 + m.TabletSelectionPreference = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20551,16 +22897,16 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeoutSeconds |= int64(b&0x7F) << shift + m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift if b < 0x80 { break } } - case 7: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) } - m.MaxExtraRowsToCompare = 0 + m.OnDdl = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20570,16 +22916,16 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift + m.OnDdl |= binlogdata.OnDDLAction(b&0x7F) << shift if b < 0x80 { break } } - case 8: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateTableStats", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - var v int + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20589,12 +22935,11 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift if b < 0x80 { break } } - m.UpdateTableStats = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -20617,7 +22962,7 @@ func (m *VDiffCoreOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { +func (m *UpdateVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20640,87 +22985,15 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VDiffOptions: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateVReplicationWorkflowResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PickerOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PickerOptions == nil { - m.PickerOptions = &VDiffPickerOptions{} - } - if err := m.PickerOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CoreOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CoreOptions == nil { - m.CoreOptions = &VDiffCoreOptions{} - } - if err := m.CoreOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReportOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20747,10 +23020,10 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReportOptions == nil { - m.ReportOptions = &VDiffReportOptions{} + if m.Result == nil { + m.Result = &query.QueryResult{} } - if err := m.ReportOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Result.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -20776,7 +23049,7 @@ func (m *VDiffOptions) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { +func (m *UpdateVReplicationWorkflowsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20799,15 +23072,35 @@ func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateVReplicationWorkflowRequest: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateVReplicationWorkflowsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateVReplicationWorkflowRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateVReplicationWorkflowsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllWorkflows", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllWorkflows = bool(v != 0) + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IncludeWorkflows", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20835,11 +23128,11 @@ func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Workflow = string(dAtA[iNdEx:postIndex]) + m.IncludeWorkflows = append(m.IncludeWorkflows, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeWorkflows", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20867,82 +23160,13 @@ func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.ExcludeWorkflows = append(m.ExcludeWorkflows, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType == 0 { - var v topodata.TabletType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TabletTypes = append(m.TabletTypes, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - if elementCount != 0 && len(m.TabletTypes) == 0 { - m.TabletTypes = make([]topodata.TabletType, 0, elementCount) - } - for iNdEx < postIndex { - var v topodata.TabletType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TabletTypes = append(m.TabletTypes, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) - } case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } - m.TabletSelectionPreference = 0 + m.State = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20952,16 +23176,16 @@ func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TabletSelectionPreference |= TabletSelectionPreference(b&0x7F) << shift + m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift if b < 0x80 { break } } case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OnDdl", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - m.OnDdl = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20971,16 +23195,29 @@ func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.OnDdl |= binlogdata.OnDDLAction(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopPosition", wireType) } - m.State = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -20990,11 +23227,24 @@ func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.State |= binlogdata.VReplicationWorkflowState(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopPosition = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -21017,7 +23267,7 @@ func (m *UpdateVReplicationWorkflowRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *UpdateVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { +func (m *UpdateVReplicationWorkflowsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -21040,10 +23290,10 @@ func (m *UpdateVReplicationWorkflowResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: UpdateVReplicationWorkflowResponse: wiretype end group for non-group") + return fmt.Errorf("proto: UpdateVReplicationWorkflowsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateVReplicationWorkflowResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UpdateVReplicationWorkflowsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go index 608282049ba..3d7e42cf4e3 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: tabletmanagerservice.proto @@ -45,7 +45,7 @@ var file_tabletmanagerservice_proto_rawDesc = []byte{ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xca, 0x2c, 0x0a, 0x0d, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xef, 0x30, 0x0a, 0x0d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x49, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, @@ -156,257 +156,291 @@ var file_tabletmanagerservice_proto_rawDesc = []byte{ 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x12, 0x30, 0x2e, 0x74, + 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, - 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, 0x65, 0x74, + 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, - 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, - 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, - 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, + 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x62, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x12, 0x30, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, + 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x41, 0x73, 0x41, 0x6c, 0x6c, 0x50, 0x72, 0x69, 0x76, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, + 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x57, 0x61, 0x69, - 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, + 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x61, 0x69, 0x74, - 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, - 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x30, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, + 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x57, 0x61, + 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, - 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, - 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x25, - 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x8b, 0x01, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x34, - 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, - 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x85, 0x01, 0x0a, 0x18, - 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x32, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x12, 0x30, 0x2e, 0x74, + 0x61, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x61, 0x69, + 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, + 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, - 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, + 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4c, 0x0a, 0x05, 0x56, 0x44, 0x69, 0x66, 0x66, 0x12, 0x1f, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x6d, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, - 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x25, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x82, - 0x01, 0x0a, 0x17, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x31, 0x2e, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, - 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, - 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, + 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, + 0x72, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, + 0x6c, 0x41, 0x66, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, + 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, + 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, + 0x01, 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, - 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, + 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x85, 0x01, 0x0a, + 0x18, 0x48, 0x61, 0x73, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x32, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x48, 0x61, + 0x73, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x11, 0x55, 0x6e, 0x64, - 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2b, - 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x73, 0x0a, 0x12, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, - 0x64, 0x12, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, - 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, - 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, + 0x61, 0x2e, 0x48, 0x61, 0x73, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x85, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x12, 0x32, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, 0x01, 0x0a, + 0x19, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x33, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, - 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x24, 0x2e, 0x74, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x12, 0x2a, 0x2e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x65, 0x63, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x16, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, + 0x12, 0x30, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x50, 0x6f, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x14, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, - 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2d, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8e, - 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x35, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x05, 0x56, 0x44, 0x69, 0x66, 0x66, 0x12, + 0x1f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, + 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, + 0x69, 0x74, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x82, 0x01, 0x0a, 0x17, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x12, + 0x31, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4a, 0x6f, 0x75, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0b, 0x49, 0x6e, 0x69, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x67, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x12, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, - 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x72, 0x0a, 0x11, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, - 0x67, 0x0a, 0x0e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, - 0x72, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x33, 0x5a, 0x31, 0x76, 0x69, 0x74, 0x65, - 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, - 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x0d, 0x44, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, + 0x0a, 0x11, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x6e, 0x64, 0x6f, 0x44, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x73, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, + 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x57, 0x61, 0x73, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x73, 0x12, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, + 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x0a, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x24, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x79, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x13, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x65, 0x64, 0x12, 0x2d, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, + 0x73, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x57, 0x61, 0x73, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x8e, 0x01, 0x0a, 0x1b, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x35, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6e, + 0x64, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, + 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, + 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, + 0x12, 0x72, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2b, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, + 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, + 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x30, 0x01, 0x12, 0x67, 0x0a, 0x0e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x12, 0x28, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x33, 0x5a, + 0x31, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, + 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_tabletmanagerservice_proto_goTypes = []interface{}{ @@ -428,92 +462,100 @@ var file_tabletmanagerservice_proto_goTypes = []interface{}{ (*tabletmanagerdata.UnlockTablesRequest)(nil), // 15: tabletmanagerdata.UnlockTablesRequest (*tabletmanagerdata.ExecuteQueryRequest)(nil), // 16: tabletmanagerdata.ExecuteQueryRequest (*tabletmanagerdata.ExecuteFetchAsDbaRequest)(nil), // 17: tabletmanagerdata.ExecuteFetchAsDbaRequest - (*tabletmanagerdata.ExecuteFetchAsAllPrivsRequest)(nil), // 18: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - (*tabletmanagerdata.ExecuteFetchAsAppRequest)(nil), // 19: tabletmanagerdata.ExecuteFetchAsAppRequest - (*tabletmanagerdata.ReplicationStatusRequest)(nil), // 20: tabletmanagerdata.ReplicationStatusRequest - (*tabletmanagerdata.PrimaryStatusRequest)(nil), // 21: tabletmanagerdata.PrimaryStatusRequest - (*tabletmanagerdata.PrimaryPositionRequest)(nil), // 22: tabletmanagerdata.PrimaryPositionRequest - (*tabletmanagerdata.WaitForPositionRequest)(nil), // 23: tabletmanagerdata.WaitForPositionRequest - (*tabletmanagerdata.StopReplicationRequest)(nil), // 24: tabletmanagerdata.StopReplicationRequest - (*tabletmanagerdata.StopReplicationMinimumRequest)(nil), // 25: tabletmanagerdata.StopReplicationMinimumRequest - (*tabletmanagerdata.StartReplicationRequest)(nil), // 26: tabletmanagerdata.StartReplicationRequest - (*tabletmanagerdata.StartReplicationUntilAfterRequest)(nil), // 27: tabletmanagerdata.StartReplicationUntilAfterRequest - (*tabletmanagerdata.GetReplicasRequest)(nil), // 28: tabletmanagerdata.GetReplicasRequest - (*tabletmanagerdata.CreateVReplicationWorkflowRequest)(nil), // 29: tabletmanagerdata.CreateVReplicationWorkflowRequest - (*tabletmanagerdata.DeleteVReplicationWorkflowRequest)(nil), // 30: tabletmanagerdata.DeleteVReplicationWorkflowRequest - (*tabletmanagerdata.ReadVReplicationWorkflowRequest)(nil), // 31: tabletmanagerdata.ReadVReplicationWorkflowRequest - (*tabletmanagerdata.VReplicationExecRequest)(nil), // 32: tabletmanagerdata.VReplicationExecRequest - (*tabletmanagerdata.VReplicationWaitForPosRequest)(nil), // 33: tabletmanagerdata.VReplicationWaitForPosRequest - (*tabletmanagerdata.UpdateVReplicationWorkflowRequest)(nil), // 34: tabletmanagerdata.UpdateVReplicationWorkflowRequest - (*tabletmanagerdata.VDiffRequest)(nil), // 35: tabletmanagerdata.VDiffRequest - (*tabletmanagerdata.ResetReplicationRequest)(nil), // 36: tabletmanagerdata.ResetReplicationRequest - (*tabletmanagerdata.InitPrimaryRequest)(nil), // 37: tabletmanagerdata.InitPrimaryRequest - (*tabletmanagerdata.PopulateReparentJournalRequest)(nil), // 38: tabletmanagerdata.PopulateReparentJournalRequest - (*tabletmanagerdata.InitReplicaRequest)(nil), // 39: tabletmanagerdata.InitReplicaRequest - (*tabletmanagerdata.DemotePrimaryRequest)(nil), // 40: tabletmanagerdata.DemotePrimaryRequest - (*tabletmanagerdata.UndoDemotePrimaryRequest)(nil), // 41: tabletmanagerdata.UndoDemotePrimaryRequest - (*tabletmanagerdata.ReplicaWasPromotedRequest)(nil), // 42: tabletmanagerdata.ReplicaWasPromotedRequest - (*tabletmanagerdata.ResetReplicationParametersRequest)(nil), // 43: tabletmanagerdata.ResetReplicationParametersRequest - (*tabletmanagerdata.FullStatusRequest)(nil), // 44: tabletmanagerdata.FullStatusRequest - (*tabletmanagerdata.SetReplicationSourceRequest)(nil), // 45: tabletmanagerdata.SetReplicationSourceRequest - (*tabletmanagerdata.ReplicaWasRestartedRequest)(nil), // 46: tabletmanagerdata.ReplicaWasRestartedRequest - (*tabletmanagerdata.StopReplicationAndGetStatusRequest)(nil), // 47: tabletmanagerdata.StopReplicationAndGetStatusRequest - (*tabletmanagerdata.PromoteReplicaRequest)(nil), // 48: tabletmanagerdata.PromoteReplicaRequest - (*tabletmanagerdata.BackupRequest)(nil), // 49: tabletmanagerdata.BackupRequest - (*tabletmanagerdata.RestoreFromBackupRequest)(nil), // 50: tabletmanagerdata.RestoreFromBackupRequest - (*tabletmanagerdata.CheckThrottlerRequest)(nil), // 51: tabletmanagerdata.CheckThrottlerRequest - (*tabletmanagerdata.PingResponse)(nil), // 52: tabletmanagerdata.PingResponse - (*tabletmanagerdata.SleepResponse)(nil), // 53: tabletmanagerdata.SleepResponse - (*tabletmanagerdata.ExecuteHookResponse)(nil), // 54: tabletmanagerdata.ExecuteHookResponse - (*tabletmanagerdata.GetSchemaResponse)(nil), // 55: tabletmanagerdata.GetSchemaResponse - (*tabletmanagerdata.GetPermissionsResponse)(nil), // 56: tabletmanagerdata.GetPermissionsResponse - (*tabletmanagerdata.SetReadOnlyResponse)(nil), // 57: tabletmanagerdata.SetReadOnlyResponse - (*tabletmanagerdata.SetReadWriteResponse)(nil), // 58: tabletmanagerdata.SetReadWriteResponse - (*tabletmanagerdata.ChangeTypeResponse)(nil), // 59: tabletmanagerdata.ChangeTypeResponse - (*tabletmanagerdata.RefreshStateResponse)(nil), // 60: tabletmanagerdata.RefreshStateResponse - (*tabletmanagerdata.RunHealthCheckResponse)(nil), // 61: tabletmanagerdata.RunHealthCheckResponse - (*tabletmanagerdata.ReloadSchemaResponse)(nil), // 62: tabletmanagerdata.ReloadSchemaResponse - (*tabletmanagerdata.PreflightSchemaResponse)(nil), // 63: tabletmanagerdata.PreflightSchemaResponse - (*tabletmanagerdata.ApplySchemaResponse)(nil), // 64: tabletmanagerdata.ApplySchemaResponse - (*tabletmanagerdata.ResetSequencesResponse)(nil), // 65: tabletmanagerdata.ResetSequencesResponse - (*tabletmanagerdata.LockTablesResponse)(nil), // 66: tabletmanagerdata.LockTablesResponse - (*tabletmanagerdata.UnlockTablesResponse)(nil), // 67: tabletmanagerdata.UnlockTablesResponse - (*tabletmanagerdata.ExecuteQueryResponse)(nil), // 68: tabletmanagerdata.ExecuteQueryResponse - (*tabletmanagerdata.ExecuteFetchAsDbaResponse)(nil), // 69: tabletmanagerdata.ExecuteFetchAsDbaResponse - (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse)(nil), // 70: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - (*tabletmanagerdata.ExecuteFetchAsAppResponse)(nil), // 71: tabletmanagerdata.ExecuteFetchAsAppResponse - (*tabletmanagerdata.ReplicationStatusResponse)(nil), // 72: tabletmanagerdata.ReplicationStatusResponse - (*tabletmanagerdata.PrimaryStatusResponse)(nil), // 73: tabletmanagerdata.PrimaryStatusResponse - (*tabletmanagerdata.PrimaryPositionResponse)(nil), // 74: tabletmanagerdata.PrimaryPositionResponse - (*tabletmanagerdata.WaitForPositionResponse)(nil), // 75: tabletmanagerdata.WaitForPositionResponse - (*tabletmanagerdata.StopReplicationResponse)(nil), // 76: tabletmanagerdata.StopReplicationResponse - (*tabletmanagerdata.StopReplicationMinimumResponse)(nil), // 77: tabletmanagerdata.StopReplicationMinimumResponse - (*tabletmanagerdata.StartReplicationResponse)(nil), // 78: tabletmanagerdata.StartReplicationResponse - (*tabletmanagerdata.StartReplicationUntilAfterResponse)(nil), // 79: tabletmanagerdata.StartReplicationUntilAfterResponse - (*tabletmanagerdata.GetReplicasResponse)(nil), // 80: tabletmanagerdata.GetReplicasResponse - (*tabletmanagerdata.CreateVReplicationWorkflowResponse)(nil), // 81: tabletmanagerdata.CreateVReplicationWorkflowResponse - (*tabletmanagerdata.DeleteVReplicationWorkflowResponse)(nil), // 82: tabletmanagerdata.DeleteVReplicationWorkflowResponse - (*tabletmanagerdata.ReadVReplicationWorkflowResponse)(nil), // 83: tabletmanagerdata.ReadVReplicationWorkflowResponse - (*tabletmanagerdata.VReplicationExecResponse)(nil), // 84: tabletmanagerdata.VReplicationExecResponse - (*tabletmanagerdata.VReplicationWaitForPosResponse)(nil), // 85: tabletmanagerdata.VReplicationWaitForPosResponse - (*tabletmanagerdata.UpdateVReplicationWorkflowResponse)(nil), // 86: tabletmanagerdata.UpdateVReplicationWorkflowResponse - (*tabletmanagerdata.VDiffResponse)(nil), // 87: tabletmanagerdata.VDiffResponse - (*tabletmanagerdata.ResetReplicationResponse)(nil), // 88: tabletmanagerdata.ResetReplicationResponse - (*tabletmanagerdata.InitPrimaryResponse)(nil), // 89: tabletmanagerdata.InitPrimaryResponse - (*tabletmanagerdata.PopulateReparentJournalResponse)(nil), // 90: tabletmanagerdata.PopulateReparentJournalResponse - (*tabletmanagerdata.InitReplicaResponse)(nil), // 91: tabletmanagerdata.InitReplicaResponse - (*tabletmanagerdata.DemotePrimaryResponse)(nil), // 92: tabletmanagerdata.DemotePrimaryResponse - (*tabletmanagerdata.UndoDemotePrimaryResponse)(nil), // 93: tabletmanagerdata.UndoDemotePrimaryResponse - (*tabletmanagerdata.ReplicaWasPromotedResponse)(nil), // 94: tabletmanagerdata.ReplicaWasPromotedResponse - (*tabletmanagerdata.ResetReplicationParametersResponse)(nil), // 95: tabletmanagerdata.ResetReplicationParametersResponse - (*tabletmanagerdata.FullStatusResponse)(nil), // 96: tabletmanagerdata.FullStatusResponse - (*tabletmanagerdata.SetReplicationSourceResponse)(nil), // 97: tabletmanagerdata.SetReplicationSourceResponse - (*tabletmanagerdata.ReplicaWasRestartedResponse)(nil), // 98: tabletmanagerdata.ReplicaWasRestartedResponse - (*tabletmanagerdata.StopReplicationAndGetStatusResponse)(nil), // 99: tabletmanagerdata.StopReplicationAndGetStatusResponse - (*tabletmanagerdata.PromoteReplicaResponse)(nil), // 100: tabletmanagerdata.PromoteReplicaResponse - (*tabletmanagerdata.BackupResponse)(nil), // 101: tabletmanagerdata.BackupResponse - (*tabletmanagerdata.RestoreFromBackupResponse)(nil), // 102: tabletmanagerdata.RestoreFromBackupResponse - (*tabletmanagerdata.CheckThrottlerResponse)(nil), // 103: tabletmanagerdata.CheckThrottlerResponse + (*tabletmanagerdata.ExecuteMultiFetchAsDbaRequest)(nil), // 18: tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + (*tabletmanagerdata.ExecuteFetchAsAllPrivsRequest)(nil), // 19: tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + (*tabletmanagerdata.ExecuteFetchAsAppRequest)(nil), // 20: tabletmanagerdata.ExecuteFetchAsAppRequest + (*tabletmanagerdata.ReplicationStatusRequest)(nil), // 21: tabletmanagerdata.ReplicationStatusRequest + (*tabletmanagerdata.PrimaryStatusRequest)(nil), // 22: tabletmanagerdata.PrimaryStatusRequest + (*tabletmanagerdata.PrimaryPositionRequest)(nil), // 23: tabletmanagerdata.PrimaryPositionRequest + (*tabletmanagerdata.WaitForPositionRequest)(nil), // 24: tabletmanagerdata.WaitForPositionRequest + (*tabletmanagerdata.StopReplicationRequest)(nil), // 25: tabletmanagerdata.StopReplicationRequest + (*tabletmanagerdata.StopReplicationMinimumRequest)(nil), // 26: tabletmanagerdata.StopReplicationMinimumRequest + (*tabletmanagerdata.StartReplicationRequest)(nil), // 27: tabletmanagerdata.StartReplicationRequest + (*tabletmanagerdata.StartReplicationUntilAfterRequest)(nil), // 28: tabletmanagerdata.StartReplicationUntilAfterRequest + (*tabletmanagerdata.GetReplicasRequest)(nil), // 29: tabletmanagerdata.GetReplicasRequest + (*tabletmanagerdata.CreateVReplicationWorkflowRequest)(nil), // 30: tabletmanagerdata.CreateVReplicationWorkflowRequest + (*tabletmanagerdata.DeleteVReplicationWorkflowRequest)(nil), // 31: tabletmanagerdata.DeleteVReplicationWorkflowRequest + (*tabletmanagerdata.HasVReplicationWorkflowsRequest)(nil), // 32: tabletmanagerdata.HasVReplicationWorkflowsRequest + (*tabletmanagerdata.ReadVReplicationWorkflowRequest)(nil), // 33: tabletmanagerdata.ReadVReplicationWorkflowRequest + (*tabletmanagerdata.ReadVReplicationWorkflowsRequest)(nil), // 34: tabletmanagerdata.ReadVReplicationWorkflowsRequest + (*tabletmanagerdata.VReplicationExecRequest)(nil), // 35: tabletmanagerdata.VReplicationExecRequest + (*tabletmanagerdata.VReplicationWaitForPosRequest)(nil), // 36: tabletmanagerdata.VReplicationWaitForPosRequest + (*tabletmanagerdata.UpdateVReplicationWorkflowRequest)(nil), // 37: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*tabletmanagerdata.UpdateVReplicationWorkflowsRequest)(nil), // 38: tabletmanagerdata.UpdateVReplicationWorkflowsRequest + (*tabletmanagerdata.VDiffRequest)(nil), // 39: tabletmanagerdata.VDiffRequest + (*tabletmanagerdata.ResetReplicationRequest)(nil), // 40: tabletmanagerdata.ResetReplicationRequest + (*tabletmanagerdata.InitPrimaryRequest)(nil), // 41: tabletmanagerdata.InitPrimaryRequest + (*tabletmanagerdata.PopulateReparentJournalRequest)(nil), // 42: tabletmanagerdata.PopulateReparentJournalRequest + (*tabletmanagerdata.InitReplicaRequest)(nil), // 43: tabletmanagerdata.InitReplicaRequest + (*tabletmanagerdata.DemotePrimaryRequest)(nil), // 44: tabletmanagerdata.DemotePrimaryRequest + (*tabletmanagerdata.UndoDemotePrimaryRequest)(nil), // 45: tabletmanagerdata.UndoDemotePrimaryRequest + (*tabletmanagerdata.ReplicaWasPromotedRequest)(nil), // 46: tabletmanagerdata.ReplicaWasPromotedRequest + (*tabletmanagerdata.ResetReplicationParametersRequest)(nil), // 47: tabletmanagerdata.ResetReplicationParametersRequest + (*tabletmanagerdata.FullStatusRequest)(nil), // 48: tabletmanagerdata.FullStatusRequest + (*tabletmanagerdata.SetReplicationSourceRequest)(nil), // 49: tabletmanagerdata.SetReplicationSourceRequest + (*tabletmanagerdata.ReplicaWasRestartedRequest)(nil), // 50: tabletmanagerdata.ReplicaWasRestartedRequest + (*tabletmanagerdata.StopReplicationAndGetStatusRequest)(nil), // 51: tabletmanagerdata.StopReplicationAndGetStatusRequest + (*tabletmanagerdata.PromoteReplicaRequest)(nil), // 52: tabletmanagerdata.PromoteReplicaRequest + (*tabletmanagerdata.BackupRequest)(nil), // 53: tabletmanagerdata.BackupRequest + (*tabletmanagerdata.RestoreFromBackupRequest)(nil), // 54: tabletmanagerdata.RestoreFromBackupRequest + (*tabletmanagerdata.CheckThrottlerRequest)(nil), // 55: tabletmanagerdata.CheckThrottlerRequest + (*tabletmanagerdata.PingResponse)(nil), // 56: tabletmanagerdata.PingResponse + (*tabletmanagerdata.SleepResponse)(nil), // 57: tabletmanagerdata.SleepResponse + (*tabletmanagerdata.ExecuteHookResponse)(nil), // 58: tabletmanagerdata.ExecuteHookResponse + (*tabletmanagerdata.GetSchemaResponse)(nil), // 59: tabletmanagerdata.GetSchemaResponse + (*tabletmanagerdata.GetPermissionsResponse)(nil), // 60: tabletmanagerdata.GetPermissionsResponse + (*tabletmanagerdata.SetReadOnlyResponse)(nil), // 61: tabletmanagerdata.SetReadOnlyResponse + (*tabletmanagerdata.SetReadWriteResponse)(nil), // 62: tabletmanagerdata.SetReadWriteResponse + (*tabletmanagerdata.ChangeTypeResponse)(nil), // 63: tabletmanagerdata.ChangeTypeResponse + (*tabletmanagerdata.RefreshStateResponse)(nil), // 64: tabletmanagerdata.RefreshStateResponse + (*tabletmanagerdata.RunHealthCheckResponse)(nil), // 65: tabletmanagerdata.RunHealthCheckResponse + (*tabletmanagerdata.ReloadSchemaResponse)(nil), // 66: tabletmanagerdata.ReloadSchemaResponse + (*tabletmanagerdata.PreflightSchemaResponse)(nil), // 67: tabletmanagerdata.PreflightSchemaResponse + (*tabletmanagerdata.ApplySchemaResponse)(nil), // 68: tabletmanagerdata.ApplySchemaResponse + (*tabletmanagerdata.ResetSequencesResponse)(nil), // 69: tabletmanagerdata.ResetSequencesResponse + (*tabletmanagerdata.LockTablesResponse)(nil), // 70: tabletmanagerdata.LockTablesResponse + (*tabletmanagerdata.UnlockTablesResponse)(nil), // 71: tabletmanagerdata.UnlockTablesResponse + (*tabletmanagerdata.ExecuteQueryResponse)(nil), // 72: tabletmanagerdata.ExecuteQueryResponse + (*tabletmanagerdata.ExecuteFetchAsDbaResponse)(nil), // 73: tabletmanagerdata.ExecuteFetchAsDbaResponse + (*tabletmanagerdata.ExecuteMultiFetchAsDbaResponse)(nil), // 74: tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse)(nil), // 75: tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + (*tabletmanagerdata.ExecuteFetchAsAppResponse)(nil), // 76: tabletmanagerdata.ExecuteFetchAsAppResponse + (*tabletmanagerdata.ReplicationStatusResponse)(nil), // 77: tabletmanagerdata.ReplicationStatusResponse + (*tabletmanagerdata.PrimaryStatusResponse)(nil), // 78: tabletmanagerdata.PrimaryStatusResponse + (*tabletmanagerdata.PrimaryPositionResponse)(nil), // 79: tabletmanagerdata.PrimaryPositionResponse + (*tabletmanagerdata.WaitForPositionResponse)(nil), // 80: tabletmanagerdata.WaitForPositionResponse + (*tabletmanagerdata.StopReplicationResponse)(nil), // 81: tabletmanagerdata.StopReplicationResponse + (*tabletmanagerdata.StopReplicationMinimumResponse)(nil), // 82: tabletmanagerdata.StopReplicationMinimumResponse + (*tabletmanagerdata.StartReplicationResponse)(nil), // 83: tabletmanagerdata.StartReplicationResponse + (*tabletmanagerdata.StartReplicationUntilAfterResponse)(nil), // 84: tabletmanagerdata.StartReplicationUntilAfterResponse + (*tabletmanagerdata.GetReplicasResponse)(nil), // 85: tabletmanagerdata.GetReplicasResponse + (*tabletmanagerdata.CreateVReplicationWorkflowResponse)(nil), // 86: tabletmanagerdata.CreateVReplicationWorkflowResponse + (*tabletmanagerdata.DeleteVReplicationWorkflowResponse)(nil), // 87: tabletmanagerdata.DeleteVReplicationWorkflowResponse + (*tabletmanagerdata.HasVReplicationWorkflowsResponse)(nil), // 88: tabletmanagerdata.HasVReplicationWorkflowsResponse + (*tabletmanagerdata.ReadVReplicationWorkflowResponse)(nil), // 89: tabletmanagerdata.ReadVReplicationWorkflowResponse + (*tabletmanagerdata.ReadVReplicationWorkflowsResponse)(nil), // 90: tabletmanagerdata.ReadVReplicationWorkflowsResponse + (*tabletmanagerdata.VReplicationExecResponse)(nil), // 91: tabletmanagerdata.VReplicationExecResponse + (*tabletmanagerdata.VReplicationWaitForPosResponse)(nil), // 92: tabletmanagerdata.VReplicationWaitForPosResponse + (*tabletmanagerdata.UpdateVReplicationWorkflowResponse)(nil), // 93: tabletmanagerdata.UpdateVReplicationWorkflowResponse + (*tabletmanagerdata.UpdateVReplicationWorkflowsResponse)(nil), // 94: tabletmanagerdata.UpdateVReplicationWorkflowsResponse + (*tabletmanagerdata.VDiffResponse)(nil), // 95: tabletmanagerdata.VDiffResponse + (*tabletmanagerdata.ResetReplicationResponse)(nil), // 96: tabletmanagerdata.ResetReplicationResponse + (*tabletmanagerdata.InitPrimaryResponse)(nil), // 97: tabletmanagerdata.InitPrimaryResponse + (*tabletmanagerdata.PopulateReparentJournalResponse)(nil), // 98: tabletmanagerdata.PopulateReparentJournalResponse + (*tabletmanagerdata.InitReplicaResponse)(nil), // 99: tabletmanagerdata.InitReplicaResponse + (*tabletmanagerdata.DemotePrimaryResponse)(nil), // 100: tabletmanagerdata.DemotePrimaryResponse + (*tabletmanagerdata.UndoDemotePrimaryResponse)(nil), // 101: tabletmanagerdata.UndoDemotePrimaryResponse + (*tabletmanagerdata.ReplicaWasPromotedResponse)(nil), // 102: tabletmanagerdata.ReplicaWasPromotedResponse + (*tabletmanagerdata.ResetReplicationParametersResponse)(nil), // 103: tabletmanagerdata.ResetReplicationParametersResponse + (*tabletmanagerdata.FullStatusResponse)(nil), // 104: tabletmanagerdata.FullStatusResponse + (*tabletmanagerdata.SetReplicationSourceResponse)(nil), // 105: tabletmanagerdata.SetReplicationSourceResponse + (*tabletmanagerdata.ReplicaWasRestartedResponse)(nil), // 106: tabletmanagerdata.ReplicaWasRestartedResponse + (*tabletmanagerdata.StopReplicationAndGetStatusResponse)(nil), // 107: tabletmanagerdata.StopReplicationAndGetStatusResponse + (*tabletmanagerdata.PromoteReplicaResponse)(nil), // 108: tabletmanagerdata.PromoteReplicaResponse + (*tabletmanagerdata.BackupResponse)(nil), // 109: tabletmanagerdata.BackupResponse + (*tabletmanagerdata.RestoreFromBackupResponse)(nil), // 110: tabletmanagerdata.RestoreFromBackupResponse + (*tabletmanagerdata.CheckThrottlerResponse)(nil), // 111: tabletmanagerdata.CheckThrottlerResponse } var file_tabletmanagerservice_proto_depIdxs = []int32{ 0, // 0: tabletmanagerservice.TabletManager.Ping:input_type -> tabletmanagerdata.PingRequest @@ -534,94 +576,102 @@ var file_tabletmanagerservice_proto_depIdxs = []int32{ 15, // 15: tabletmanagerservice.TabletManager.UnlockTables:input_type -> tabletmanagerdata.UnlockTablesRequest 16, // 16: tabletmanagerservice.TabletManager.ExecuteQuery:input_type -> tabletmanagerdata.ExecuteQueryRequest 17, // 17: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:input_type -> tabletmanagerdata.ExecuteFetchAsDbaRequest - 18, // 18: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:input_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsRequest - 19, // 19: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:input_type -> tabletmanagerdata.ExecuteFetchAsAppRequest - 20, // 20: tabletmanagerservice.TabletManager.ReplicationStatus:input_type -> tabletmanagerdata.ReplicationStatusRequest - 21, // 21: tabletmanagerservice.TabletManager.PrimaryStatus:input_type -> tabletmanagerdata.PrimaryStatusRequest - 22, // 22: tabletmanagerservice.TabletManager.PrimaryPosition:input_type -> tabletmanagerdata.PrimaryPositionRequest - 23, // 23: tabletmanagerservice.TabletManager.WaitForPosition:input_type -> tabletmanagerdata.WaitForPositionRequest - 24, // 24: tabletmanagerservice.TabletManager.StopReplication:input_type -> tabletmanagerdata.StopReplicationRequest - 25, // 25: tabletmanagerservice.TabletManager.StopReplicationMinimum:input_type -> tabletmanagerdata.StopReplicationMinimumRequest - 26, // 26: tabletmanagerservice.TabletManager.StartReplication:input_type -> tabletmanagerdata.StartReplicationRequest - 27, // 27: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:input_type -> tabletmanagerdata.StartReplicationUntilAfterRequest - 28, // 28: tabletmanagerservice.TabletManager.GetReplicas:input_type -> tabletmanagerdata.GetReplicasRequest - 29, // 29: tabletmanagerservice.TabletManager.CreateVReplicationWorkflow:input_type -> tabletmanagerdata.CreateVReplicationWorkflowRequest - 30, // 30: tabletmanagerservice.TabletManager.DeleteVReplicationWorkflow:input_type -> tabletmanagerdata.DeleteVReplicationWorkflowRequest - 31, // 31: tabletmanagerservice.TabletManager.ReadVReplicationWorkflow:input_type -> tabletmanagerdata.ReadVReplicationWorkflowRequest - 32, // 32: tabletmanagerservice.TabletManager.VReplicationExec:input_type -> tabletmanagerdata.VReplicationExecRequest - 33, // 33: tabletmanagerservice.TabletManager.VReplicationWaitForPos:input_type -> tabletmanagerdata.VReplicationWaitForPosRequest - 34, // 34: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflow:input_type -> tabletmanagerdata.UpdateVReplicationWorkflowRequest - 35, // 35: tabletmanagerservice.TabletManager.VDiff:input_type -> tabletmanagerdata.VDiffRequest - 36, // 36: tabletmanagerservice.TabletManager.ResetReplication:input_type -> tabletmanagerdata.ResetReplicationRequest - 37, // 37: tabletmanagerservice.TabletManager.InitPrimary:input_type -> tabletmanagerdata.InitPrimaryRequest - 38, // 38: tabletmanagerservice.TabletManager.PopulateReparentJournal:input_type -> tabletmanagerdata.PopulateReparentJournalRequest - 39, // 39: tabletmanagerservice.TabletManager.InitReplica:input_type -> tabletmanagerdata.InitReplicaRequest - 40, // 40: tabletmanagerservice.TabletManager.DemotePrimary:input_type -> tabletmanagerdata.DemotePrimaryRequest - 41, // 41: tabletmanagerservice.TabletManager.UndoDemotePrimary:input_type -> tabletmanagerdata.UndoDemotePrimaryRequest - 42, // 42: tabletmanagerservice.TabletManager.ReplicaWasPromoted:input_type -> tabletmanagerdata.ReplicaWasPromotedRequest - 43, // 43: tabletmanagerservice.TabletManager.ResetReplicationParameters:input_type -> tabletmanagerdata.ResetReplicationParametersRequest - 44, // 44: tabletmanagerservice.TabletManager.FullStatus:input_type -> tabletmanagerdata.FullStatusRequest - 45, // 45: tabletmanagerservice.TabletManager.SetReplicationSource:input_type -> tabletmanagerdata.SetReplicationSourceRequest - 46, // 46: tabletmanagerservice.TabletManager.ReplicaWasRestarted:input_type -> tabletmanagerdata.ReplicaWasRestartedRequest - 47, // 47: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:input_type -> tabletmanagerdata.StopReplicationAndGetStatusRequest - 48, // 48: tabletmanagerservice.TabletManager.PromoteReplica:input_type -> tabletmanagerdata.PromoteReplicaRequest - 49, // 49: tabletmanagerservice.TabletManager.Backup:input_type -> tabletmanagerdata.BackupRequest - 50, // 50: tabletmanagerservice.TabletManager.RestoreFromBackup:input_type -> tabletmanagerdata.RestoreFromBackupRequest - 51, // 51: tabletmanagerservice.TabletManager.CheckThrottler:input_type -> tabletmanagerdata.CheckThrottlerRequest - 52, // 52: tabletmanagerservice.TabletManager.Ping:output_type -> tabletmanagerdata.PingResponse - 53, // 53: tabletmanagerservice.TabletManager.Sleep:output_type -> tabletmanagerdata.SleepResponse - 54, // 54: tabletmanagerservice.TabletManager.ExecuteHook:output_type -> tabletmanagerdata.ExecuteHookResponse - 55, // 55: tabletmanagerservice.TabletManager.GetSchema:output_type -> tabletmanagerdata.GetSchemaResponse - 56, // 56: tabletmanagerservice.TabletManager.GetPermissions:output_type -> tabletmanagerdata.GetPermissionsResponse - 57, // 57: tabletmanagerservice.TabletManager.SetReadOnly:output_type -> tabletmanagerdata.SetReadOnlyResponse - 58, // 58: tabletmanagerservice.TabletManager.SetReadWrite:output_type -> tabletmanagerdata.SetReadWriteResponse - 59, // 59: tabletmanagerservice.TabletManager.ChangeType:output_type -> tabletmanagerdata.ChangeTypeResponse - 60, // 60: tabletmanagerservice.TabletManager.RefreshState:output_type -> tabletmanagerdata.RefreshStateResponse - 61, // 61: tabletmanagerservice.TabletManager.RunHealthCheck:output_type -> tabletmanagerdata.RunHealthCheckResponse - 62, // 62: tabletmanagerservice.TabletManager.ReloadSchema:output_type -> tabletmanagerdata.ReloadSchemaResponse - 63, // 63: tabletmanagerservice.TabletManager.PreflightSchema:output_type -> tabletmanagerdata.PreflightSchemaResponse - 64, // 64: tabletmanagerservice.TabletManager.ApplySchema:output_type -> tabletmanagerdata.ApplySchemaResponse - 65, // 65: tabletmanagerservice.TabletManager.ResetSequences:output_type -> tabletmanagerdata.ResetSequencesResponse - 66, // 66: tabletmanagerservice.TabletManager.LockTables:output_type -> tabletmanagerdata.LockTablesResponse - 67, // 67: tabletmanagerservice.TabletManager.UnlockTables:output_type -> tabletmanagerdata.UnlockTablesResponse - 68, // 68: tabletmanagerservice.TabletManager.ExecuteQuery:output_type -> tabletmanagerdata.ExecuteQueryResponse - 69, // 69: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:output_type -> tabletmanagerdata.ExecuteFetchAsDbaResponse - 70, // 70: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:output_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsResponse - 71, // 71: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:output_type -> tabletmanagerdata.ExecuteFetchAsAppResponse - 72, // 72: tabletmanagerservice.TabletManager.ReplicationStatus:output_type -> tabletmanagerdata.ReplicationStatusResponse - 73, // 73: tabletmanagerservice.TabletManager.PrimaryStatus:output_type -> tabletmanagerdata.PrimaryStatusResponse - 74, // 74: tabletmanagerservice.TabletManager.PrimaryPosition:output_type -> tabletmanagerdata.PrimaryPositionResponse - 75, // 75: tabletmanagerservice.TabletManager.WaitForPosition:output_type -> tabletmanagerdata.WaitForPositionResponse - 76, // 76: tabletmanagerservice.TabletManager.StopReplication:output_type -> tabletmanagerdata.StopReplicationResponse - 77, // 77: tabletmanagerservice.TabletManager.StopReplicationMinimum:output_type -> tabletmanagerdata.StopReplicationMinimumResponse - 78, // 78: tabletmanagerservice.TabletManager.StartReplication:output_type -> tabletmanagerdata.StartReplicationResponse - 79, // 79: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:output_type -> tabletmanagerdata.StartReplicationUntilAfterResponse - 80, // 80: tabletmanagerservice.TabletManager.GetReplicas:output_type -> tabletmanagerdata.GetReplicasResponse - 81, // 81: tabletmanagerservice.TabletManager.CreateVReplicationWorkflow:output_type -> tabletmanagerdata.CreateVReplicationWorkflowResponse - 82, // 82: tabletmanagerservice.TabletManager.DeleteVReplicationWorkflow:output_type -> tabletmanagerdata.DeleteVReplicationWorkflowResponse - 83, // 83: tabletmanagerservice.TabletManager.ReadVReplicationWorkflow:output_type -> tabletmanagerdata.ReadVReplicationWorkflowResponse - 84, // 84: tabletmanagerservice.TabletManager.VReplicationExec:output_type -> tabletmanagerdata.VReplicationExecResponse - 85, // 85: tabletmanagerservice.TabletManager.VReplicationWaitForPos:output_type -> tabletmanagerdata.VReplicationWaitForPosResponse - 86, // 86: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflow:output_type -> tabletmanagerdata.UpdateVReplicationWorkflowResponse - 87, // 87: tabletmanagerservice.TabletManager.VDiff:output_type -> tabletmanagerdata.VDiffResponse - 88, // 88: tabletmanagerservice.TabletManager.ResetReplication:output_type -> tabletmanagerdata.ResetReplicationResponse - 89, // 89: tabletmanagerservice.TabletManager.InitPrimary:output_type -> tabletmanagerdata.InitPrimaryResponse - 90, // 90: tabletmanagerservice.TabletManager.PopulateReparentJournal:output_type -> tabletmanagerdata.PopulateReparentJournalResponse - 91, // 91: tabletmanagerservice.TabletManager.InitReplica:output_type -> tabletmanagerdata.InitReplicaResponse - 92, // 92: tabletmanagerservice.TabletManager.DemotePrimary:output_type -> tabletmanagerdata.DemotePrimaryResponse - 93, // 93: tabletmanagerservice.TabletManager.UndoDemotePrimary:output_type -> tabletmanagerdata.UndoDemotePrimaryResponse - 94, // 94: tabletmanagerservice.TabletManager.ReplicaWasPromoted:output_type -> tabletmanagerdata.ReplicaWasPromotedResponse - 95, // 95: tabletmanagerservice.TabletManager.ResetReplicationParameters:output_type -> tabletmanagerdata.ResetReplicationParametersResponse - 96, // 96: tabletmanagerservice.TabletManager.FullStatus:output_type -> tabletmanagerdata.FullStatusResponse - 97, // 97: tabletmanagerservice.TabletManager.SetReplicationSource:output_type -> tabletmanagerdata.SetReplicationSourceResponse - 98, // 98: tabletmanagerservice.TabletManager.ReplicaWasRestarted:output_type -> tabletmanagerdata.ReplicaWasRestartedResponse - 99, // 99: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:output_type -> tabletmanagerdata.StopReplicationAndGetStatusResponse - 100, // 100: tabletmanagerservice.TabletManager.PromoteReplica:output_type -> tabletmanagerdata.PromoteReplicaResponse - 101, // 101: tabletmanagerservice.TabletManager.Backup:output_type -> tabletmanagerdata.BackupResponse - 102, // 102: tabletmanagerservice.TabletManager.RestoreFromBackup:output_type -> tabletmanagerdata.RestoreFromBackupResponse - 103, // 103: tabletmanagerservice.TabletManager.CheckThrottler:output_type -> tabletmanagerdata.CheckThrottlerResponse - 52, // [52:104] is the sub-list for method output_type - 0, // [0:52] is the sub-list for method input_type + 18, // 18: tabletmanagerservice.TabletManager.ExecuteMultiFetchAsDba:input_type -> tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + 19, // 19: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:input_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsRequest + 20, // 20: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:input_type -> tabletmanagerdata.ExecuteFetchAsAppRequest + 21, // 21: tabletmanagerservice.TabletManager.ReplicationStatus:input_type -> tabletmanagerdata.ReplicationStatusRequest + 22, // 22: tabletmanagerservice.TabletManager.PrimaryStatus:input_type -> tabletmanagerdata.PrimaryStatusRequest + 23, // 23: tabletmanagerservice.TabletManager.PrimaryPosition:input_type -> tabletmanagerdata.PrimaryPositionRequest + 24, // 24: tabletmanagerservice.TabletManager.WaitForPosition:input_type -> tabletmanagerdata.WaitForPositionRequest + 25, // 25: tabletmanagerservice.TabletManager.StopReplication:input_type -> tabletmanagerdata.StopReplicationRequest + 26, // 26: tabletmanagerservice.TabletManager.StopReplicationMinimum:input_type -> tabletmanagerdata.StopReplicationMinimumRequest + 27, // 27: tabletmanagerservice.TabletManager.StartReplication:input_type -> tabletmanagerdata.StartReplicationRequest + 28, // 28: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:input_type -> tabletmanagerdata.StartReplicationUntilAfterRequest + 29, // 29: tabletmanagerservice.TabletManager.GetReplicas:input_type -> tabletmanagerdata.GetReplicasRequest + 30, // 30: tabletmanagerservice.TabletManager.CreateVReplicationWorkflow:input_type -> tabletmanagerdata.CreateVReplicationWorkflowRequest + 31, // 31: tabletmanagerservice.TabletManager.DeleteVReplicationWorkflow:input_type -> tabletmanagerdata.DeleteVReplicationWorkflowRequest + 32, // 32: tabletmanagerservice.TabletManager.HasVReplicationWorkflows:input_type -> tabletmanagerdata.HasVReplicationWorkflowsRequest + 33, // 33: tabletmanagerservice.TabletManager.ReadVReplicationWorkflow:input_type -> tabletmanagerdata.ReadVReplicationWorkflowRequest + 34, // 34: tabletmanagerservice.TabletManager.ReadVReplicationWorkflows:input_type -> tabletmanagerdata.ReadVReplicationWorkflowsRequest + 35, // 35: tabletmanagerservice.TabletManager.VReplicationExec:input_type -> tabletmanagerdata.VReplicationExecRequest + 36, // 36: tabletmanagerservice.TabletManager.VReplicationWaitForPos:input_type -> tabletmanagerdata.VReplicationWaitForPosRequest + 37, // 37: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflow:input_type -> tabletmanagerdata.UpdateVReplicationWorkflowRequest + 38, // 38: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflows:input_type -> tabletmanagerdata.UpdateVReplicationWorkflowsRequest + 39, // 39: tabletmanagerservice.TabletManager.VDiff:input_type -> tabletmanagerdata.VDiffRequest + 40, // 40: tabletmanagerservice.TabletManager.ResetReplication:input_type -> tabletmanagerdata.ResetReplicationRequest + 41, // 41: tabletmanagerservice.TabletManager.InitPrimary:input_type -> tabletmanagerdata.InitPrimaryRequest + 42, // 42: tabletmanagerservice.TabletManager.PopulateReparentJournal:input_type -> tabletmanagerdata.PopulateReparentJournalRequest + 43, // 43: tabletmanagerservice.TabletManager.InitReplica:input_type -> tabletmanagerdata.InitReplicaRequest + 44, // 44: tabletmanagerservice.TabletManager.DemotePrimary:input_type -> tabletmanagerdata.DemotePrimaryRequest + 45, // 45: tabletmanagerservice.TabletManager.UndoDemotePrimary:input_type -> tabletmanagerdata.UndoDemotePrimaryRequest + 46, // 46: tabletmanagerservice.TabletManager.ReplicaWasPromoted:input_type -> tabletmanagerdata.ReplicaWasPromotedRequest + 47, // 47: tabletmanagerservice.TabletManager.ResetReplicationParameters:input_type -> tabletmanagerdata.ResetReplicationParametersRequest + 48, // 48: tabletmanagerservice.TabletManager.FullStatus:input_type -> tabletmanagerdata.FullStatusRequest + 49, // 49: tabletmanagerservice.TabletManager.SetReplicationSource:input_type -> tabletmanagerdata.SetReplicationSourceRequest + 50, // 50: tabletmanagerservice.TabletManager.ReplicaWasRestarted:input_type -> tabletmanagerdata.ReplicaWasRestartedRequest + 51, // 51: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:input_type -> tabletmanagerdata.StopReplicationAndGetStatusRequest + 52, // 52: tabletmanagerservice.TabletManager.PromoteReplica:input_type -> tabletmanagerdata.PromoteReplicaRequest + 53, // 53: tabletmanagerservice.TabletManager.Backup:input_type -> tabletmanagerdata.BackupRequest + 54, // 54: tabletmanagerservice.TabletManager.RestoreFromBackup:input_type -> tabletmanagerdata.RestoreFromBackupRequest + 55, // 55: tabletmanagerservice.TabletManager.CheckThrottler:input_type -> tabletmanagerdata.CheckThrottlerRequest + 56, // 56: tabletmanagerservice.TabletManager.Ping:output_type -> tabletmanagerdata.PingResponse + 57, // 57: tabletmanagerservice.TabletManager.Sleep:output_type -> tabletmanagerdata.SleepResponse + 58, // 58: tabletmanagerservice.TabletManager.ExecuteHook:output_type -> tabletmanagerdata.ExecuteHookResponse + 59, // 59: tabletmanagerservice.TabletManager.GetSchema:output_type -> tabletmanagerdata.GetSchemaResponse + 60, // 60: tabletmanagerservice.TabletManager.GetPermissions:output_type -> tabletmanagerdata.GetPermissionsResponse + 61, // 61: tabletmanagerservice.TabletManager.SetReadOnly:output_type -> tabletmanagerdata.SetReadOnlyResponse + 62, // 62: tabletmanagerservice.TabletManager.SetReadWrite:output_type -> tabletmanagerdata.SetReadWriteResponse + 63, // 63: tabletmanagerservice.TabletManager.ChangeType:output_type -> tabletmanagerdata.ChangeTypeResponse + 64, // 64: tabletmanagerservice.TabletManager.RefreshState:output_type -> tabletmanagerdata.RefreshStateResponse + 65, // 65: tabletmanagerservice.TabletManager.RunHealthCheck:output_type -> tabletmanagerdata.RunHealthCheckResponse + 66, // 66: tabletmanagerservice.TabletManager.ReloadSchema:output_type -> tabletmanagerdata.ReloadSchemaResponse + 67, // 67: tabletmanagerservice.TabletManager.PreflightSchema:output_type -> tabletmanagerdata.PreflightSchemaResponse + 68, // 68: tabletmanagerservice.TabletManager.ApplySchema:output_type -> tabletmanagerdata.ApplySchemaResponse + 69, // 69: tabletmanagerservice.TabletManager.ResetSequences:output_type -> tabletmanagerdata.ResetSequencesResponse + 70, // 70: tabletmanagerservice.TabletManager.LockTables:output_type -> tabletmanagerdata.LockTablesResponse + 71, // 71: tabletmanagerservice.TabletManager.UnlockTables:output_type -> tabletmanagerdata.UnlockTablesResponse + 72, // 72: tabletmanagerservice.TabletManager.ExecuteQuery:output_type -> tabletmanagerdata.ExecuteQueryResponse + 73, // 73: tabletmanagerservice.TabletManager.ExecuteFetchAsDba:output_type -> tabletmanagerdata.ExecuteFetchAsDbaResponse + 74, // 74: tabletmanagerservice.TabletManager.ExecuteMultiFetchAsDba:output_type -> tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + 75, // 75: tabletmanagerservice.TabletManager.ExecuteFetchAsAllPrivs:output_type -> tabletmanagerdata.ExecuteFetchAsAllPrivsResponse + 76, // 76: tabletmanagerservice.TabletManager.ExecuteFetchAsApp:output_type -> tabletmanagerdata.ExecuteFetchAsAppResponse + 77, // 77: tabletmanagerservice.TabletManager.ReplicationStatus:output_type -> tabletmanagerdata.ReplicationStatusResponse + 78, // 78: tabletmanagerservice.TabletManager.PrimaryStatus:output_type -> tabletmanagerdata.PrimaryStatusResponse + 79, // 79: tabletmanagerservice.TabletManager.PrimaryPosition:output_type -> tabletmanagerdata.PrimaryPositionResponse + 80, // 80: tabletmanagerservice.TabletManager.WaitForPosition:output_type -> tabletmanagerdata.WaitForPositionResponse + 81, // 81: tabletmanagerservice.TabletManager.StopReplication:output_type -> tabletmanagerdata.StopReplicationResponse + 82, // 82: tabletmanagerservice.TabletManager.StopReplicationMinimum:output_type -> tabletmanagerdata.StopReplicationMinimumResponse + 83, // 83: tabletmanagerservice.TabletManager.StartReplication:output_type -> tabletmanagerdata.StartReplicationResponse + 84, // 84: tabletmanagerservice.TabletManager.StartReplicationUntilAfter:output_type -> tabletmanagerdata.StartReplicationUntilAfterResponse + 85, // 85: tabletmanagerservice.TabletManager.GetReplicas:output_type -> tabletmanagerdata.GetReplicasResponse + 86, // 86: tabletmanagerservice.TabletManager.CreateVReplicationWorkflow:output_type -> tabletmanagerdata.CreateVReplicationWorkflowResponse + 87, // 87: tabletmanagerservice.TabletManager.DeleteVReplicationWorkflow:output_type -> tabletmanagerdata.DeleteVReplicationWorkflowResponse + 88, // 88: tabletmanagerservice.TabletManager.HasVReplicationWorkflows:output_type -> tabletmanagerdata.HasVReplicationWorkflowsResponse + 89, // 89: tabletmanagerservice.TabletManager.ReadVReplicationWorkflow:output_type -> tabletmanagerdata.ReadVReplicationWorkflowResponse + 90, // 90: tabletmanagerservice.TabletManager.ReadVReplicationWorkflows:output_type -> tabletmanagerdata.ReadVReplicationWorkflowsResponse + 91, // 91: tabletmanagerservice.TabletManager.VReplicationExec:output_type -> tabletmanagerdata.VReplicationExecResponse + 92, // 92: tabletmanagerservice.TabletManager.VReplicationWaitForPos:output_type -> tabletmanagerdata.VReplicationWaitForPosResponse + 93, // 93: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflow:output_type -> tabletmanagerdata.UpdateVReplicationWorkflowResponse + 94, // 94: tabletmanagerservice.TabletManager.UpdateVReplicationWorkflows:output_type -> tabletmanagerdata.UpdateVReplicationWorkflowsResponse + 95, // 95: tabletmanagerservice.TabletManager.VDiff:output_type -> tabletmanagerdata.VDiffResponse + 96, // 96: tabletmanagerservice.TabletManager.ResetReplication:output_type -> tabletmanagerdata.ResetReplicationResponse + 97, // 97: tabletmanagerservice.TabletManager.InitPrimary:output_type -> tabletmanagerdata.InitPrimaryResponse + 98, // 98: tabletmanagerservice.TabletManager.PopulateReparentJournal:output_type -> tabletmanagerdata.PopulateReparentJournalResponse + 99, // 99: tabletmanagerservice.TabletManager.InitReplica:output_type -> tabletmanagerdata.InitReplicaResponse + 100, // 100: tabletmanagerservice.TabletManager.DemotePrimary:output_type -> tabletmanagerdata.DemotePrimaryResponse + 101, // 101: tabletmanagerservice.TabletManager.UndoDemotePrimary:output_type -> tabletmanagerdata.UndoDemotePrimaryResponse + 102, // 102: tabletmanagerservice.TabletManager.ReplicaWasPromoted:output_type -> tabletmanagerdata.ReplicaWasPromotedResponse + 103, // 103: tabletmanagerservice.TabletManager.ResetReplicationParameters:output_type -> tabletmanagerdata.ResetReplicationParametersResponse + 104, // 104: tabletmanagerservice.TabletManager.FullStatus:output_type -> tabletmanagerdata.FullStatusResponse + 105, // 105: tabletmanagerservice.TabletManager.SetReplicationSource:output_type -> tabletmanagerdata.SetReplicationSourceResponse + 106, // 106: tabletmanagerservice.TabletManager.ReplicaWasRestarted:output_type -> tabletmanagerdata.ReplicaWasRestartedResponse + 107, // 107: tabletmanagerservice.TabletManager.StopReplicationAndGetStatus:output_type -> tabletmanagerdata.StopReplicationAndGetStatusResponse + 108, // 108: tabletmanagerservice.TabletManager.PromoteReplica:output_type -> tabletmanagerdata.PromoteReplicaResponse + 109, // 109: tabletmanagerservice.TabletManager.Backup:output_type -> tabletmanagerdata.BackupResponse + 110, // 110: tabletmanagerservice.TabletManager.RestoreFromBackup:output_type -> tabletmanagerdata.RestoreFromBackupResponse + 111, // 111: tabletmanagerservice.TabletManager.CheckThrottler:output_type -> tabletmanagerdata.CheckThrottlerResponse + 56, // [56:112] is the sub-list for method output_type + 0, // [0:56] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go index f0665947007..9f009614794 100644 --- a/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go +++ b/go/vt/proto/tabletmanagerservice/tabletmanagerservice_grpc.pb.go @@ -47,6 +47,7 @@ type TabletManagerClient interface { UnlockTables(ctx context.Context, in *tabletmanagerdata.UnlockTablesRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UnlockTablesResponse, error) ExecuteQuery(ctx context.Context, in *tabletmanagerdata.ExecuteQueryRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteQueryResponse, error) ExecuteFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) + ExecuteMultiFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteMultiFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteMultiFetchAsDbaResponse, error) ExecuteFetchAsAllPrivs(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) ExecuteFetchAsApp(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAppRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) // ReplicationStatus returns the current replication status. @@ -72,10 +73,13 @@ type TabletManagerClient interface { // VReplication API CreateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.CreateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) DeleteVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.DeleteVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) + HasVReplicationWorkflows(ctx context.Context, in *tabletmanagerdata.HasVReplicationWorkflowsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.HasVReplicationWorkflowsResponse, error) ReadVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.ReadVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) + ReadVReplicationWorkflows(ctx context.Context, in *tabletmanagerdata.ReadVReplicationWorkflowsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReadVReplicationWorkflowsResponse, error) VReplicationExec(ctx context.Context, in *tabletmanagerdata.VReplicationExecRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationExecResponse, error) VReplicationWaitForPos(ctx context.Context, in *tabletmanagerdata.VReplicationWaitForPosRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) UpdateVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.UpdateVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) + UpdateVReplicationWorkflows(ctx context.Context, in *tabletmanagerdata.UpdateVReplicationWorkflowsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVReplicationWorkflowsResponse, error) // VDiff API VDiff(ctx context.Context, in *tabletmanagerdata.VDiffRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VDiffResponse, error) // ResetReplication makes the target not replicating @@ -283,6 +287,15 @@ func (c *tabletManagerClient) ExecuteFetchAsDba(ctx context.Context, in *tabletm return out, nil } +func (c *tabletManagerClient) ExecuteMultiFetchAsDba(ctx context.Context, in *tabletmanagerdata.ExecuteMultiFetchAsDbaRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteMultiFetchAsDbaResponse, error) { + out := new(tabletmanagerdata.ExecuteMultiFetchAsDbaResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ExecuteMultiFetchAsDba", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) ExecuteFetchAsAllPrivs(ctx context.Context, in *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) { out := new(tabletmanagerdata.ExecuteFetchAsAllPrivsResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ExecuteFetchAsAllPrivs", in, out, opts...) @@ -400,6 +413,15 @@ func (c *tabletManagerClient) DeleteVReplicationWorkflow(ctx context.Context, in return out, nil } +func (c *tabletManagerClient) HasVReplicationWorkflows(ctx context.Context, in *tabletmanagerdata.HasVReplicationWorkflowsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.HasVReplicationWorkflowsResponse, error) { + out := new(tabletmanagerdata.HasVReplicationWorkflowsResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/HasVReplicationWorkflows", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) ReadVReplicationWorkflow(ctx context.Context, in *tabletmanagerdata.ReadVReplicationWorkflowRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) { out := new(tabletmanagerdata.ReadVReplicationWorkflowResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ReadVReplicationWorkflow", in, out, opts...) @@ -409,6 +431,15 @@ func (c *tabletManagerClient) ReadVReplicationWorkflow(ctx context.Context, in * return out, nil } +func (c *tabletManagerClient) ReadVReplicationWorkflows(ctx context.Context, in *tabletmanagerdata.ReadVReplicationWorkflowsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.ReadVReplicationWorkflowsResponse, error) { + out := new(tabletmanagerdata.ReadVReplicationWorkflowsResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/ReadVReplicationWorkflows", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) VReplicationExec(ctx context.Context, in *tabletmanagerdata.VReplicationExecRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VReplicationExecResponse, error) { out := new(tabletmanagerdata.VReplicationExecResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/VReplicationExec", in, out, opts...) @@ -436,6 +467,15 @@ func (c *tabletManagerClient) UpdateVReplicationWorkflow(ctx context.Context, in return out, nil } +func (c *tabletManagerClient) UpdateVReplicationWorkflows(ctx context.Context, in *tabletmanagerdata.UpdateVReplicationWorkflowsRequest, opts ...grpc.CallOption) (*tabletmanagerdata.UpdateVReplicationWorkflowsResponse, error) { + out := new(tabletmanagerdata.UpdateVReplicationWorkflowsResponse) + err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/UpdateVReplicationWorkflows", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *tabletManagerClient) VDiff(ctx context.Context, in *tabletmanagerdata.VDiffRequest, opts ...grpc.CallOption) (*tabletmanagerdata.VDiffResponse, error) { out := new(tabletmanagerdata.VDiffResponse) err := c.cc.Invoke(ctx, "/tabletmanagerservice.TabletManager/VDiff", in, out, opts...) @@ -663,6 +703,7 @@ type TabletManagerServer interface { UnlockTables(context.Context, *tabletmanagerdata.UnlockTablesRequest) (*tabletmanagerdata.UnlockTablesResponse, error) ExecuteQuery(context.Context, *tabletmanagerdata.ExecuteQueryRequest) (*tabletmanagerdata.ExecuteQueryResponse, error) ExecuteFetchAsDba(context.Context, *tabletmanagerdata.ExecuteFetchAsDbaRequest) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) + ExecuteMultiFetchAsDba(context.Context, *tabletmanagerdata.ExecuteMultiFetchAsDbaRequest) (*tabletmanagerdata.ExecuteMultiFetchAsDbaResponse, error) ExecuteFetchAsAllPrivs(context.Context, *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) ExecuteFetchAsApp(context.Context, *tabletmanagerdata.ExecuteFetchAsAppRequest) (*tabletmanagerdata.ExecuteFetchAsAppResponse, error) // ReplicationStatus returns the current replication status. @@ -688,10 +729,13 @@ type TabletManagerServer interface { // VReplication API CreateVReplicationWorkflow(context.Context, *tabletmanagerdata.CreateVReplicationWorkflowRequest) (*tabletmanagerdata.CreateVReplicationWorkflowResponse, error) DeleteVReplicationWorkflow(context.Context, *tabletmanagerdata.DeleteVReplicationWorkflowRequest) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) + HasVReplicationWorkflows(context.Context, *tabletmanagerdata.HasVReplicationWorkflowsRequest) (*tabletmanagerdata.HasVReplicationWorkflowsResponse, error) ReadVReplicationWorkflow(context.Context, *tabletmanagerdata.ReadVReplicationWorkflowRequest) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) + ReadVReplicationWorkflows(context.Context, *tabletmanagerdata.ReadVReplicationWorkflowsRequest) (*tabletmanagerdata.ReadVReplicationWorkflowsResponse, error) VReplicationExec(context.Context, *tabletmanagerdata.VReplicationExecRequest) (*tabletmanagerdata.VReplicationExecResponse, error) VReplicationWaitForPos(context.Context, *tabletmanagerdata.VReplicationWaitForPosRequest) (*tabletmanagerdata.VReplicationWaitForPosResponse, error) UpdateVReplicationWorkflow(context.Context, *tabletmanagerdata.UpdateVReplicationWorkflowRequest) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) + UpdateVReplicationWorkflows(context.Context, *tabletmanagerdata.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdata.UpdateVReplicationWorkflowsResponse, error) // VDiff API VDiff(context.Context, *tabletmanagerdata.VDiffRequest) (*tabletmanagerdata.VDiffResponse, error) // ResetReplication makes the target not replicating @@ -788,6 +832,9 @@ func (UnimplementedTabletManagerServer) ExecuteQuery(context.Context, *tabletman func (UnimplementedTabletManagerServer) ExecuteFetchAsDba(context.Context, *tabletmanagerdata.ExecuteFetchAsDbaRequest) (*tabletmanagerdata.ExecuteFetchAsDbaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ExecuteFetchAsDba not implemented") } +func (UnimplementedTabletManagerServer) ExecuteMultiFetchAsDba(context.Context, *tabletmanagerdata.ExecuteMultiFetchAsDbaRequest) (*tabletmanagerdata.ExecuteMultiFetchAsDbaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteMultiFetchAsDba not implemented") +} func (UnimplementedTabletManagerServer) ExecuteFetchAsAllPrivs(context.Context, *tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) (*tabletmanagerdata.ExecuteFetchAsAllPrivsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ExecuteFetchAsAllPrivs not implemented") } @@ -827,9 +874,15 @@ func (UnimplementedTabletManagerServer) CreateVReplicationWorkflow(context.Conte func (UnimplementedTabletManagerServer) DeleteVReplicationWorkflow(context.Context, *tabletmanagerdata.DeleteVReplicationWorkflowRequest) (*tabletmanagerdata.DeleteVReplicationWorkflowResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteVReplicationWorkflow not implemented") } +func (UnimplementedTabletManagerServer) HasVReplicationWorkflows(context.Context, *tabletmanagerdata.HasVReplicationWorkflowsRequest) (*tabletmanagerdata.HasVReplicationWorkflowsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method HasVReplicationWorkflows not implemented") +} func (UnimplementedTabletManagerServer) ReadVReplicationWorkflow(context.Context, *tabletmanagerdata.ReadVReplicationWorkflowRequest) (*tabletmanagerdata.ReadVReplicationWorkflowResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadVReplicationWorkflow not implemented") } +func (UnimplementedTabletManagerServer) ReadVReplicationWorkflows(context.Context, *tabletmanagerdata.ReadVReplicationWorkflowsRequest) (*tabletmanagerdata.ReadVReplicationWorkflowsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadVReplicationWorkflows not implemented") +} func (UnimplementedTabletManagerServer) VReplicationExec(context.Context, *tabletmanagerdata.VReplicationExecRequest) (*tabletmanagerdata.VReplicationExecResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VReplicationExec not implemented") } @@ -839,6 +892,9 @@ func (UnimplementedTabletManagerServer) VReplicationWaitForPos(context.Context, func (UnimplementedTabletManagerServer) UpdateVReplicationWorkflow(context.Context, *tabletmanagerdata.UpdateVReplicationWorkflowRequest) (*tabletmanagerdata.UpdateVReplicationWorkflowResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateVReplicationWorkflow not implemented") } +func (UnimplementedTabletManagerServer) UpdateVReplicationWorkflows(context.Context, *tabletmanagerdata.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdata.UpdateVReplicationWorkflowsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateVReplicationWorkflows not implemented") +} func (UnimplementedTabletManagerServer) VDiff(context.Context, *tabletmanagerdata.VDiffRequest) (*tabletmanagerdata.VDiffResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method VDiff not implemented") } @@ -1227,6 +1283,24 @@ func _TabletManager_ExecuteFetchAsDba_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _TabletManager_ExecuteMultiFetchAsDba_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ExecuteMultiFetchAsDbaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ExecuteMultiFetchAsDba(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ExecuteMultiFetchAsDba", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ExecuteMultiFetchAsDba(ctx, req.(*tabletmanagerdata.ExecuteMultiFetchAsDbaRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_ExecuteFetchAsAllPrivs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) if err := dec(in); err != nil { @@ -1461,6 +1535,24 @@ func _TabletManager_DeleteVReplicationWorkflow_Handler(srv interface{}, ctx cont return interceptor(ctx, in, info, handler) } +func _TabletManager_HasVReplicationWorkflows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.HasVReplicationWorkflowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).HasVReplicationWorkflows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/HasVReplicationWorkflows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).HasVReplicationWorkflows(ctx, req.(*tabletmanagerdata.HasVReplicationWorkflowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_ReadVReplicationWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.ReadVReplicationWorkflowRequest) if err := dec(in); err != nil { @@ -1479,6 +1571,24 @@ func _TabletManager_ReadVReplicationWorkflow_Handler(srv interface{}, ctx contex return interceptor(ctx, in, info, handler) } +func _TabletManager_ReadVReplicationWorkflows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.ReadVReplicationWorkflowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).ReadVReplicationWorkflows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/ReadVReplicationWorkflows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).ReadVReplicationWorkflows(ctx, req.(*tabletmanagerdata.ReadVReplicationWorkflowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_VReplicationExec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.VReplicationExecRequest) if err := dec(in); err != nil { @@ -1533,6 +1643,24 @@ func _TabletManager_UpdateVReplicationWorkflow_Handler(srv interface{}, ctx cont return interceptor(ctx, in, info, handler) } +func _TabletManager_UpdateVReplicationWorkflows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(tabletmanagerdata.UpdateVReplicationWorkflowsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TabletManagerServer).UpdateVReplicationWorkflows(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tabletmanagerservice.TabletManager/UpdateVReplicationWorkflows", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TabletManagerServer).UpdateVReplicationWorkflows(ctx, req.(*tabletmanagerdata.UpdateVReplicationWorkflowsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _TabletManager_VDiff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(tabletmanagerdata.VDiffRequest) if err := dec(in); err != nil { @@ -1924,6 +2052,10 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "ExecuteFetchAsDba", Handler: _TabletManager_ExecuteFetchAsDba_Handler, }, + { + MethodName: "ExecuteMultiFetchAsDba", + Handler: _TabletManager_ExecuteMultiFetchAsDba_Handler, + }, { MethodName: "ExecuteFetchAsAllPrivs", Handler: _TabletManager_ExecuteFetchAsAllPrivs_Handler, @@ -1976,10 +2108,18 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "DeleteVReplicationWorkflow", Handler: _TabletManager_DeleteVReplicationWorkflow_Handler, }, + { + MethodName: "HasVReplicationWorkflows", + Handler: _TabletManager_HasVReplicationWorkflows_Handler, + }, { MethodName: "ReadVReplicationWorkflow", Handler: _TabletManager_ReadVReplicationWorkflow_Handler, }, + { + MethodName: "ReadVReplicationWorkflows", + Handler: _TabletManager_ReadVReplicationWorkflows_Handler, + }, { MethodName: "VReplicationExec", Handler: _TabletManager_VReplicationExec_Handler, @@ -1992,6 +2132,10 @@ var TabletManager_ServiceDesc = grpc.ServiceDesc{ MethodName: "UpdateVReplicationWorkflow", Handler: _TabletManager_UpdateVReplicationWorkflow_Handler, }, + { + MethodName: "UpdateVReplicationWorkflows", + Handler: _TabletManager_UpdateVReplicationWorkflows_Handler, + }, { MethodName: "VDiff", Handler: _TabletManager_VDiff_Handler, diff --git a/go/vt/proto/throttlerdata/throttlerdata.pb.go b/go/vt/proto/throttlerdata/throttlerdata.pb.go index fb12bc09ce8..905a862af57 100644 --- a/go/vt/proto/throttlerdata/throttlerdata.pb.go +++ b/go/vt/proto/throttlerdata/throttlerdata.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: throttlerdata.proto diff --git a/go/vt/proto/throttlerservice/throttlerservice.pb.go b/go/vt/proto/throttlerservice/throttlerservice.pb.go index 9bca73e067c..7fdd1e2ce6a 100644 --- a/go/vt/proto/throttlerservice/throttlerservice.pb.go +++ b/go/vt/proto/throttlerservice/throttlerservice.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: throttlerservice.proto diff --git a/go/vt/proto/topodata/topodata.pb.go b/go/vt/proto/topodata/topodata.pb.go index 43ecdbce963..1d5b8470803 100644 --- a/go/vt/proto/topodata/topodata.pb.go +++ b/go/vt/proto/topodata/topodata.pb.go @@ -20,7 +20,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: topodata.proto @@ -656,9 +656,6 @@ type Keyspace struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // ServedFrom will redirect the appropriate traffic to - // another keyspace. - ServedFroms []*Keyspace_ServedFrom `protobuf:"bytes,4,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` // keyspace_type will determine how this keyspace is treated by // vtgate / vschema. Normal keyspaces are routable by // any query. Snapshot keyspaces are only accessible @@ -716,13 +713,6 @@ func (*Keyspace) Descriptor() ([]byte, []int) { return file_topodata_proto_rawDescGZIP(), []int{4} } -func (x *Keyspace) GetServedFroms() []*Keyspace_ServedFrom { - if x != nil { - return x.ServedFroms - } - return nil -} - func (x *Keyspace) GetKeyspaceType() KeyspaceType { if x != nil { return x.KeyspaceType @@ -1172,7 +1162,6 @@ type SrvKeyspace struct { // The partitions this keyspace is serving, per tablet type. Partitions []*SrvKeyspace_KeyspacePartition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty"` - ServedFrom []*SrvKeyspace_ServedFrom `protobuf:"bytes,4,rep,name=served_from,json=servedFrom,proto3" json:"served_from,omitempty"` // ThrottlerConfig has the configuration for the tablet server's // lag throttler, and applies to the entire keyspace, across all // shards and tablets. This is copied from the global keyspace @@ -1219,13 +1208,6 @@ func (x *SrvKeyspace) GetPartitions() []*SrvKeyspace_KeyspacePartition { return nil } -func (x *SrvKeyspace) GetServedFrom() []*SrvKeyspace_ServedFrom { - if x != nil { - return x.ServedFrom - } - return nil -} - func (x *SrvKeyspace) GetThrottlerConfig() *ThrottlerConfig { if x != nil { return x.ThrottlerConfig @@ -1666,74 +1648,6 @@ func (x *Shard_TabletControl) GetFrozen() bool { return false } -// ServedFrom indicates a relationship between a TabletType and the -// keyspace name that's serving it. -type Keyspace_ServedFrom struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // the tablet type (key for the map) - TabletType TabletType `protobuf:"varint,1,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` - // the cells to limit this to - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` - // the keyspace name that's serving it - Keyspace string `protobuf:"bytes,3,opt,name=keyspace,proto3" json:"keyspace,omitempty"` -} - -func (x *Keyspace_ServedFrom) Reset() { - *x = Keyspace_ServedFrom{} - if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Keyspace_ServedFrom) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Keyspace_ServedFrom) ProtoMessage() {} - -func (x *Keyspace_ServedFrom) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Keyspace_ServedFrom.ProtoReflect.Descriptor instead. -func (*Keyspace_ServedFrom) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{4, 0} -} - -func (x *Keyspace_ServedFrom) GetTabletType() TabletType { - if x != nil { - return x.TabletType - } - return TabletType_UNKNOWN -} - -func (x *Keyspace_ServedFrom) GetCells() []string { - if x != nil { - return x.Cells - } - return nil -} - -func (x *Keyspace_ServedFrom) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - // Node describes a tablet instance within the cell type ShardReplication_Node struct { state protoimpl.MessageState @@ -1746,7 +1660,7 @@ type ShardReplication_Node struct { func (x *ShardReplication_Node) Reset() { *x = ShardReplication_Node{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[22] + mi := &file_topodata_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1759,7 +1673,7 @@ func (x *ShardReplication_Node) String() string { func (*ShardReplication_Node) ProtoMessage() {} func (x *ShardReplication_Node) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[22] + mi := &file_topodata_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1798,7 +1712,7 @@ type SrvKeyspace_KeyspacePartition struct { func (x *SrvKeyspace_KeyspacePartition) Reset() { *x = SrvKeyspace_KeyspacePartition{} if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[24] + mi := &file_topodata_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1811,7 +1725,7 @@ func (x *SrvKeyspace_KeyspacePartition) String() string { func (*SrvKeyspace_KeyspacePartition) ProtoMessage() {} func (x *SrvKeyspace_KeyspacePartition) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[24] + mi := &file_topodata_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1848,65 +1762,6 @@ func (x *SrvKeyspace_KeyspacePartition) GetShardTabletControls() []*ShardTabletC return nil } -// ServedFrom indicates a relationship between a TabletType and the -// keyspace name that's serving it. -type SrvKeyspace_ServedFrom struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // the tablet type - TabletType TabletType `protobuf:"varint,1,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` - // the keyspace name that's serving it - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` -} - -func (x *SrvKeyspace_ServedFrom) Reset() { - *x = SrvKeyspace_ServedFrom{} - if protoimpl.UnsafeEnabled { - mi := &file_topodata_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SrvKeyspace_ServedFrom) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SrvKeyspace_ServedFrom) ProtoMessage() {} - -func (x *SrvKeyspace_ServedFrom) ProtoReflect() protoreflect.Message { - mi := &file_topodata_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SrvKeyspace_ServedFrom.ProtoReflect.Descriptor instead. -func (*SrvKeyspace_ServedFrom) Descriptor() ([]byte, []int) { - return file_topodata_proto_rawDescGZIP(), []int{11, 1} -} - -func (x *SrvKeyspace_ServedFrom) GetTabletType() TabletType { - if x != nil { - return x.TabletType - } - return TabletType_UNKNOWN -} - -func (x *SrvKeyspace_ServedFrom) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - var File_topodata_proto protoreflect.FileDescriptor var file_topodata_proto_rawDesc = []byte{ @@ -2006,181 +1861,160 @@ var file_topodata_proto_rawDesc = []byte{ 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x7a, 0x65, 0x6e, 0x4a, 0x04, 0x08, 0x03, 0x10, - 0x04, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x85, 0x04, - 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, - 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x73, 0x12, 0x3b, 0x0a, 0x0d, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x73, - 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x31, - 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, - 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, - 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x5f, - 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, - 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x44, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x75, 0x0a, 0x0a, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, - 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x8b, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x05, 0x6e, 0x6f, - 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, - 0x73, 0x1a, 0x40, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x15, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x38, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x22, 0x39, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, - 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x4f, 0x50, 0x4f, 0x4c, 0x4f, 0x47, - 0x59, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, - 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x34, 0x0a, 0x16, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x14, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x81, 0x01, 0x0a, 0x10, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2b, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, - 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x06, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x22, 0xce, 0x02, 0x0a, 0x0f, 0x54, 0x68, - 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, - 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, - 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x53, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x70, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x74, - 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x1a, 0x5c, 0x0a, 0x12, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb6, 0x04, 0x0a, 0x0b, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, - 0x6f, 0x6d, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x44, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe1, 0x01, 0x0a, - 0x11, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x50, - 0x0a, 0x15, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x13, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, - 0x1a, 0x5f, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x35, - 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x4b, 0x0a, 0x08, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x25, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, - 0x22, 0x22, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x55, 0x0a, 0x0a, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5a, 0x0a, 0x10, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, - 0x46, 0x0a, 0x0e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, - 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0d, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2a, 0x28, 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, - 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, - 0x01, 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x50, 0x52, 0x49, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, - 0x53, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, - 0x41, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, - 0x09, 0x0a, 0x05, 0x42, 0x41, 0x54, 0x43, 0x48, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, - 0x41, 0x52, 0x45, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, - 0x45, 0x4e, 0x54, 0x41, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, - 0x50, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x07, - 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x08, 0x1a, 0x02, 0x10, - 0x01, 0x42, 0x38, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, - 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x04, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd2, 0x02, + 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0d, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x31, 0x0a, 0x0d, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x44, 0x0a, 0x10, + 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x5f, 0x64, 0x62, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x69, 0x64, + 0x65, 0x63, 0x61, 0x72, 0x44, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x22, 0x8b, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x1a, 0x40, + 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x22, 0xc6, 0x01, 0x0a, 0x15, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x38, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x39, + 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, + 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x4f, 0x50, 0x4f, 0x4c, 0x4f, 0x47, 0x59, 0x5f, 0x4d, + 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, + 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x22, 0x8f, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, + 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x22, 0x81, 0x01, 0x0a, 0x10, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, + 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x12, 0x2b, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x22, 0xce, 0x02, 0x0a, 0x0f, 0x54, 0x68, 0x72, 0x6f, 0x74, + 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, + 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x53, 0x65, 0x6c, 0x66, 0x12, 0x53, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, + 0x64, 0x5f, 0x61, 0x70, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, + 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, + 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x74, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x1a, 0x5c, 0x0a, 0x12, 0x54, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x98, 0x03, 0x0a, 0x0b, 0x53, 0x72, 0x76, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x44, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xe1, 0x01, 0x0a, 0x11, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x0a, 0x0b, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x15, 0x73, 0x68, 0x61, 0x72, + 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, + 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x22, 0x4b, 0x0a, 0x08, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, + 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, + 0x22, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x22, 0x55, 0x0a, 0x0a, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x22, 0x4e, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, + 0x74, 0x6f, 0x70, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x5a, 0x0a, 0x10, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x46, + 0x0a, 0x0e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x56, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0d, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2a, 0x28, 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, + 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x4e, 0x41, 0x50, 0x53, 0x48, 0x4f, 0x54, 0x10, 0x01, + 0x2a, 0x9d, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, + 0x50, 0x52, 0x49, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x41, 0x53, + 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, + 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x44, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x09, + 0x0a, 0x05, 0x42, 0x41, 0x54, 0x43, 0x48, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x41, + 0x52, 0x45, 0x10, 0x04, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, 0x45, + 0x4e, 0x54, 0x41, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, + 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x07, 0x12, + 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x08, 0x1a, 0x02, 0x10, 0x01, + 0x42, 0x38, 0x0a, 0x0f, 0x69, 0x6f, 0x2e, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x5a, 0x25, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, + 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -2196,7 +2030,7 @@ func file_topodata_proto_rawDescGZIP() []byte { } var file_topodata_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_topodata_proto_msgTypes = make([]protoimpl.MessageInfo, 26) +var file_topodata_proto_msgTypes = make([]protoimpl.MessageInfo, 24) var file_topodata_proto_goTypes = []interface{}{ (KeyspaceType)(0), // 0: topodata.KeyspaceType (TabletType)(0), // 1: topodata.TabletType @@ -2222,12 +2056,10 @@ var file_topodata_proto_goTypes = []interface{}{ nil, // 21: topodata.Tablet.TagsEntry (*Shard_SourceShard)(nil), // 22: topodata.Shard.SourceShard (*Shard_TabletControl)(nil), // 23: topodata.Shard.TabletControl - (*Keyspace_ServedFrom)(nil), // 24: topodata.Keyspace.ServedFrom - (*ShardReplication_Node)(nil), // 25: topodata.ShardReplication.Node - nil, // 26: topodata.ThrottlerConfig.ThrottledAppsEntry - (*SrvKeyspace_KeyspacePartition)(nil), // 27: topodata.SrvKeyspace.KeyspacePartition - (*SrvKeyspace_ServedFrom)(nil), // 28: topodata.SrvKeyspace.ServedFrom - (*vttime.Time)(nil), // 29: vttime.Time + (*ShardReplication_Node)(nil), // 24: topodata.ShardReplication.Node + nil, // 25: topodata.ThrottlerConfig.ThrottledAppsEntry + (*SrvKeyspace_KeyspacePartition)(nil), // 26: topodata.SrvKeyspace.KeyspacePartition + (*vttime.Time)(nil), // 27: vttime.Time } var file_topodata_proto_depIdxs = []int32{ 4, // 0: topodata.Tablet.alias:type_name -> topodata.TabletAlias @@ -2235,42 +2067,38 @@ var file_topodata_proto_depIdxs = []int32{ 3, // 2: topodata.Tablet.key_range:type_name -> topodata.KeyRange 1, // 3: topodata.Tablet.type:type_name -> topodata.TabletType 21, // 4: topodata.Tablet.tags:type_name -> topodata.Tablet.TagsEntry - 29, // 5: topodata.Tablet.primary_term_start_time:type_name -> vttime.Time + 27, // 5: topodata.Tablet.primary_term_start_time:type_name -> vttime.Time 4, // 6: topodata.Shard.primary_alias:type_name -> topodata.TabletAlias - 29, // 7: topodata.Shard.primary_term_start_time:type_name -> vttime.Time + 27, // 7: topodata.Shard.primary_term_start_time:type_name -> vttime.Time 3, // 8: topodata.Shard.key_range:type_name -> topodata.KeyRange 22, // 9: topodata.Shard.source_shards:type_name -> topodata.Shard.SourceShard 23, // 10: topodata.Shard.tablet_controls:type_name -> topodata.Shard.TabletControl - 24, // 11: topodata.Keyspace.served_froms:type_name -> topodata.Keyspace.ServedFrom - 0, // 12: topodata.Keyspace.keyspace_type:type_name -> topodata.KeyspaceType - 29, // 13: topodata.Keyspace.snapshot_time:type_name -> vttime.Time - 13, // 14: topodata.Keyspace.throttler_config:type_name -> topodata.ThrottlerConfig - 25, // 15: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node - 2, // 16: topodata.ShardReplicationError.type:type_name -> topodata.ShardReplicationError.Type - 4, // 17: topodata.ShardReplicationError.tablet_alias:type_name -> topodata.TabletAlias - 3, // 18: topodata.ShardReference.key_range:type_name -> topodata.KeyRange - 3, // 19: topodata.ShardTabletControl.key_range:type_name -> topodata.KeyRange - 29, // 20: topodata.ThrottledAppRule.expires_at:type_name -> vttime.Time - 26, // 21: topodata.ThrottlerConfig.throttled_apps:type_name -> topodata.ThrottlerConfig.ThrottledAppsEntry - 27, // 22: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition - 28, // 23: topodata.SrvKeyspace.served_from:type_name -> topodata.SrvKeyspace.ServedFrom - 13, // 24: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig - 17, // 25: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig - 18, // 26: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster - 3, // 27: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange - 1, // 28: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType - 1, // 29: topodata.Keyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType - 4, // 30: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias - 12, // 31: topodata.ThrottlerConfig.ThrottledAppsEntry.value:type_name -> topodata.ThrottledAppRule - 1, // 32: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType - 10, // 33: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference - 11, // 34: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl - 1, // 35: topodata.SrvKeyspace.ServedFrom.tablet_type:type_name -> topodata.TabletType - 36, // [36:36] is the sub-list for method output_type - 36, // [36:36] is the sub-list for method input_type - 36, // [36:36] is the sub-list for extension type_name - 36, // [36:36] is the sub-list for extension extendee - 0, // [0:36] is the sub-list for field type_name + 0, // 11: topodata.Keyspace.keyspace_type:type_name -> topodata.KeyspaceType + 27, // 12: topodata.Keyspace.snapshot_time:type_name -> vttime.Time + 13, // 13: topodata.Keyspace.throttler_config:type_name -> topodata.ThrottlerConfig + 24, // 14: topodata.ShardReplication.nodes:type_name -> topodata.ShardReplication.Node + 2, // 15: topodata.ShardReplicationError.type:type_name -> topodata.ShardReplicationError.Type + 4, // 16: topodata.ShardReplicationError.tablet_alias:type_name -> topodata.TabletAlias + 3, // 17: topodata.ShardReference.key_range:type_name -> topodata.KeyRange + 3, // 18: topodata.ShardTabletControl.key_range:type_name -> topodata.KeyRange + 27, // 19: topodata.ThrottledAppRule.expires_at:type_name -> vttime.Time + 25, // 20: topodata.ThrottlerConfig.throttled_apps:type_name -> topodata.ThrottlerConfig.ThrottledAppsEntry + 26, // 21: topodata.SrvKeyspace.partitions:type_name -> topodata.SrvKeyspace.KeyspacePartition + 13, // 22: topodata.SrvKeyspace.throttler_config:type_name -> topodata.ThrottlerConfig + 17, // 23: topodata.ExternalVitessCluster.topo_config:type_name -> topodata.TopoConfig + 18, // 24: topodata.ExternalClusters.vitess_cluster:type_name -> topodata.ExternalVitessCluster + 3, // 25: topodata.Shard.SourceShard.key_range:type_name -> topodata.KeyRange + 1, // 26: topodata.Shard.TabletControl.tablet_type:type_name -> topodata.TabletType + 4, // 27: topodata.ShardReplication.Node.tablet_alias:type_name -> topodata.TabletAlias + 12, // 28: topodata.ThrottlerConfig.ThrottledAppsEntry.value:type_name -> topodata.ThrottledAppRule + 1, // 29: topodata.SrvKeyspace.KeyspacePartition.served_type:type_name -> topodata.TabletType + 10, // 30: topodata.SrvKeyspace.KeyspacePartition.shard_references:type_name -> topodata.ShardReference + 11, // 31: topodata.SrvKeyspace.KeyspacePartition.shard_tablet_controls:type_name -> topodata.ShardTabletControl + 32, // [32:32] is the sub-list for method output_type + 32, // [32:32] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name } func init() { file_topodata_proto_init() } @@ -2508,18 +2336,6 @@ func file_topodata_proto_init() { } } file_topodata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Keyspace_ServedFrom); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_topodata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplication_Node); i { case 0: return &v.state @@ -2531,7 +2347,7 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_topodata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SrvKeyspace_KeyspacePartition); i { case 0: return &v.state @@ -2543,18 +2359,6 @@ func file_topodata_proto_init() { return nil } } - file_topodata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SrvKeyspace_ServedFrom); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } } type x struct{} out := protoimpl.TypeBuilder{ @@ -2562,7 +2366,7 @@ func file_topodata_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_topodata_proto_rawDesc, NumEnums: 3, - NumMessages: 26, + NumMessages: 24, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/topodata/topodata_vtproto.pb.go b/go/vt/proto/topodata/topodata_vtproto.pb.go index 5e675bb4ea0..78971f9db9a 100644 --- a/go/vt/proto/topodata/topodata_vtproto.pb.go +++ b/go/vt/proto/topodata/topodata_vtproto.pb.go @@ -199,30 +199,6 @@ func (m *Shard) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *Keyspace_ServedFrom) CloneVT() *Keyspace_ServedFrom { - if m == nil { - return (*Keyspace_ServedFrom)(nil) - } - r := &Keyspace_ServedFrom{ - TabletType: m.TabletType, - Keyspace: m.Keyspace, - } - if rhs := m.Cells; rhs != nil { - tmpContainer := make([]string, len(rhs)) - copy(tmpContainer, rhs) - r.Cells = tmpContainer - } - if len(m.unknownFields) > 0 { - r.unknownFields = make([]byte, len(m.unknownFields)) - copy(r.unknownFields, m.unknownFields) - } - return r -} - -func (m *Keyspace_ServedFrom) CloneMessageVT() proto.Message { - return m.CloneVT() -} - func (m *Keyspace) CloneVT() *Keyspace { if m == nil { return (*Keyspace)(nil) @@ -235,13 +211,6 @@ func (m *Keyspace) CloneVT() *Keyspace { ThrottlerConfig: m.ThrottlerConfig.CloneVT(), SidecarDbName: m.SidecarDbName, } - if rhs := m.ServedFroms; rhs != nil { - tmpContainer := make([]*Keyspace_ServedFrom, len(rhs)) - for k, v := range rhs { - tmpContainer[k] = v.CloneVT() - } - r.ServedFroms = tmpContainer - } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -433,25 +402,6 @@ func (m *SrvKeyspace_KeyspacePartition) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *SrvKeyspace_ServedFrom) CloneVT() *SrvKeyspace_ServedFrom { - if m == nil { - return (*SrvKeyspace_ServedFrom)(nil) - } - r := &SrvKeyspace_ServedFrom{ - TabletType: m.TabletType, - Keyspace: m.Keyspace, - } - if len(m.unknownFields) > 0 { - r.unknownFields = make([]byte, len(m.unknownFields)) - copy(r.unknownFields, m.unknownFields) - } - return r -} - -func (m *SrvKeyspace_ServedFrom) CloneMessageVT() proto.Message { - return m.CloneVT() -} - func (m *SrvKeyspace) CloneVT() *SrvKeyspace { if m == nil { return (*SrvKeyspace)(nil) @@ -466,13 +416,6 @@ func (m *SrvKeyspace) CloneVT() *SrvKeyspace { } r.Partitions = tmpContainer } - if rhs := m.ServedFrom; rhs != nil { - tmpContainer := make([]*SrvKeyspace_ServedFrom, len(rhs)) - for k, v := range rhs { - tmpContainer[k] = v.CloneVT() - } - r.ServedFrom = tmpContainer - } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -1062,60 +1005,6 @@ func (m *Shard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Keyspace_ServedFrom) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Keyspace_ServedFrom) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *Keyspace_ServedFrom) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0x1a - } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *Keyspace) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -1192,18 +1081,6 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - if len(m.ServedFroms) > 0 { - for iNdEx := len(m.ServedFroms) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ServedFroms[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } - } return len(dAtA) - i, nil } @@ -1669,51 +1546,6 @@ func (m *SrvKeyspace_KeyspacePartition) MarshalToSizedBufferVT(dAtA []byte) (int return len(dAtA) - i, nil } -func (m *SrvKeyspace_ServedFrom) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SrvKeyspace_ServedFrom) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *SrvKeyspace_ServedFrom) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0x12 - } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - func (m *SrvKeyspace) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -1754,18 +1586,6 @@ func (m *SrvKeyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - if len(m.ServedFrom) > 0 { - for iNdEx := len(m.ServedFrom) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ServedFrom[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x22 - } - } if len(m.Partitions) > 0 { for iNdEx := len(m.Partitions) - 1; iNdEx >= 0; iNdEx-- { size, err := m.Partitions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) @@ -2221,41 +2041,12 @@ func (m *Shard) SizeVT() (n int) { return n } -func (m *Keyspace_ServedFrom) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) - } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - func (m *Keyspace) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.ServedFroms) > 0 { - for _, e := range m.ServedFroms { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } if m.KeyspaceType != 0 { n += 1 + sov(uint64(m.KeyspaceType)) } @@ -2454,23 +2245,6 @@ func (m *SrvKeyspace_KeyspacePartition) SizeVT() (n int) { return n } -func (m *SrvKeyspace_ServedFrom) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) - } - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - func (m *SrvKeyspace) SizeVT() (n int) { if m == nil { return 0 @@ -2483,12 +2257,6 @@ func (m *SrvKeyspace) SizeVT() (n int) { n += 1 + l + sov(uint64(l)) } } - if len(m.ServedFrom) > 0 { - for _, e := range m.ServedFrom { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } if m.ThrottlerConfig != nil { l = m.ThrottlerConfig.SizeVT() n += 1 + l + sov(uint64(l)) @@ -4029,140 +3797,6 @@ func (m *Shard) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *Keyspace_ServedFrom) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Keyspace_ServedFrom: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Keyspace_ServedFrom: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) - } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Keyspace) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4192,40 +3826,6 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { return fmt.Errorf("proto: Keyspace: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServedFroms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServedFroms = append(m.ServedFroms, &Keyspace_ServedFrom{}) - if err := m.ServedFroms[len(m.ServedFroms)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceType", wireType) @@ -5522,108 +5122,6 @@ func (m *SrvKeyspace_KeyspacePartition) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SrvKeyspace_ServedFrom) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SrvKeyspace_ServedFrom: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SrvKeyspace_ServedFrom: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) - } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *SrvKeyspace) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5687,40 +5185,6 @@ func (m *SrvKeyspace) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServedFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServedFrom = append(m.ServedFrom, &SrvKeyspace_ServedFrom{}) - if err := m.ServedFrom[len(m.ServedFrom)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ThrottlerConfig", wireType) diff --git a/go/vt/proto/vschema/vschema.pb.go b/go/vt/proto/vschema/vschema.pb.go index 8726fb35745..10ef8c1296b 100644 --- a/go/vt/proto/vschema/vschema.pb.go +++ b/go/vt/proto/vschema/vschema.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vschema.proto @@ -211,6 +211,8 @@ type Keyspace struct { RequireExplicitRouting bool `protobuf:"varint,4,opt,name=require_explicit_routing,json=requireExplicitRouting,proto3" json:"require_explicit_routing,omitempty"` // foreign_key_mode dictates how Vitess should handle foreign keys for this keyspace. ForeignKeyMode Keyspace_ForeignKeyMode `protobuf:"varint,5,opt,name=foreign_key_mode,json=foreignKeyMode,proto3,enum=vschema.Keyspace_ForeignKeyMode" json:"foreign_key_mode,omitempty"` + // multi_tenant_mode specifies that the keyspace is multi-tenant. Currently used during migrations with MoveTables. + MultiTenantSpec *MultiTenantSpec `protobuf:"bytes,6,opt,name=multi_tenant_spec,json=multiTenantSpec,proto3" json:"multi_tenant_spec,omitempty"` } func (x *Keyspace) Reset() { @@ -280,6 +282,70 @@ func (x *Keyspace) GetForeignKeyMode() Keyspace_ForeignKeyMode { return Keyspace_unspecified } +func (x *Keyspace) GetMultiTenantSpec() *MultiTenantSpec { + if x != nil { + return x.MultiTenantSpec + } + return nil +} + +type MultiTenantSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // tenant_column is the name of the column that specifies the tenant id. + TenantIdColumnName string `protobuf:"bytes,1,opt,name=tenant_id_column_name,json=tenantIdColumnName,proto3" json:"tenant_id_column_name,omitempty"` + // tenant_column_type is the type of the column that specifies the tenant id. + TenantIdColumnType query.Type `protobuf:"varint,2,opt,name=tenant_id_column_type,json=tenantIdColumnType,proto3,enum=query.Type" json:"tenant_id_column_type,omitempty"` +} + +func (x *MultiTenantSpec) Reset() { + *x = MultiTenantSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_vschema_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultiTenantSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultiTenantSpec) ProtoMessage() {} + +func (x *MultiTenantSpec) ProtoReflect() protoreflect.Message { + mi := &file_vschema_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultiTenantSpec.ProtoReflect.Descriptor instead. +func (*MultiTenantSpec) Descriptor() ([]byte, []int) { + return file_vschema_proto_rawDescGZIP(), []int{3} +} + +func (x *MultiTenantSpec) GetTenantIdColumnName() string { + if x != nil { + return x.TenantIdColumnName + } + return "" +} + +func (x *MultiTenantSpec) GetTenantIdColumnType() query.Type { + if x != nil { + return x.TenantIdColumnType + } + return query.Type(0) +} + // Vindex is the vindex info for a Keyspace. type Vindex struct { state protoimpl.MessageState @@ -304,7 +370,7 @@ type Vindex struct { func (x *Vindex) Reset() { *x = Vindex{} if protoimpl.UnsafeEnabled { - mi := &file_vschema_proto_msgTypes[3] + mi := &file_vschema_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -317,7 +383,7 @@ func (x *Vindex) String() string { func (*Vindex) ProtoMessage() {} func (x *Vindex) ProtoReflect() protoreflect.Message { - mi := &file_vschema_proto_msgTypes[3] + mi := &file_vschema_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -330,7 +396,7 @@ func (x *Vindex) ProtoReflect() protoreflect.Message { // Deprecated: Use Vindex.ProtoReflect.Descriptor instead. func (*Vindex) Descriptor() ([]byte, []int) { - return file_vschema_proto_rawDescGZIP(), []int{3} + return file_vschema_proto_rawDescGZIP(), []int{4} } func (x *Vindex) GetType() string { @@ -392,7 +458,7 @@ type Table struct { func (x *Table) Reset() { *x = Table{} if protoimpl.UnsafeEnabled { - mi := &file_vschema_proto_msgTypes[4] + mi := &file_vschema_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -405,7 +471,7 @@ func (x *Table) String() string { func (*Table) ProtoMessage() {} func (x *Table) ProtoReflect() protoreflect.Message { - mi := &file_vschema_proto_msgTypes[4] + mi := &file_vschema_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -418,7 +484,7 @@ func (x *Table) ProtoReflect() protoreflect.Message { // Deprecated: Use Table.ProtoReflect.Descriptor instead. func (*Table) Descriptor() ([]byte, []int) { - return file_vschema_proto_rawDescGZIP(), []int{4} + return file_vschema_proto_rawDescGZIP(), []int{5} } func (x *Table) GetType() string { @@ -487,7 +553,7 @@ type ColumnVindex struct { func (x *ColumnVindex) Reset() { *x = ColumnVindex{} if protoimpl.UnsafeEnabled { - mi := &file_vschema_proto_msgTypes[5] + mi := &file_vschema_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -500,7 +566,7 @@ func (x *ColumnVindex) String() string { func (*ColumnVindex) ProtoMessage() {} func (x *ColumnVindex) ProtoReflect() protoreflect.Message { - mi := &file_vschema_proto_msgTypes[5] + mi := &file_vschema_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -513,7 +579,7 @@ func (x *ColumnVindex) ProtoReflect() protoreflect.Message { // Deprecated: Use ColumnVindex.ProtoReflect.Descriptor instead. func (*ColumnVindex) Descriptor() ([]byte, []int) { - return file_vschema_proto_rawDescGZIP(), []int{5} + return file_vschema_proto_rawDescGZIP(), []int{6} } func (x *ColumnVindex) GetColumn() string { @@ -551,7 +617,7 @@ type AutoIncrement struct { func (x *AutoIncrement) Reset() { *x = AutoIncrement{} if protoimpl.UnsafeEnabled { - mi := &file_vschema_proto_msgTypes[6] + mi := &file_vschema_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -564,7 +630,7 @@ func (x *AutoIncrement) String() string { func (*AutoIncrement) ProtoMessage() {} func (x *AutoIncrement) ProtoReflect() protoreflect.Message { - mi := &file_vschema_proto_msgTypes[6] + mi := &file_vschema_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -577,7 +643,7 @@ func (x *AutoIncrement) ProtoReflect() protoreflect.Message { // Deprecated: Use AutoIncrement.ProtoReflect.Descriptor instead. func (*AutoIncrement) Descriptor() ([]byte, []int) { - return file_vschema_proto_rawDescGZIP(), []int{6} + return file_vschema_proto_rawDescGZIP(), []int{7} } func (x *AutoIncrement) GetColumn() string { @@ -600,15 +666,22 @@ type Column struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Type query.Type `protobuf:"varint,2,opt,name=type,proto3,enum=query.Type" json:"type,omitempty"` - Invisible bool `protobuf:"varint,3,opt,name=invisible,proto3" json:"invisible,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type query.Type `protobuf:"varint,2,opt,name=type,proto3,enum=query.Type" json:"type,omitempty"` + Invisible bool `protobuf:"varint,3,opt,name=invisible,proto3" json:"invisible,omitempty"` + Default string `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + CollationName string `protobuf:"bytes,5,opt,name=collation_name,json=collationName,proto3" json:"collation_name,omitempty"` + Size int32 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` + Scale int32 `protobuf:"varint,7,opt,name=scale,proto3" json:"scale,omitempty"` + Nullable *bool `protobuf:"varint,8,opt,name=nullable,proto3,oneof" json:"nullable,omitempty"` + // values contains the list of values for an enum or set column. + Values []string `protobuf:"bytes,9,rep,name=values,proto3" json:"values,omitempty"` } func (x *Column) Reset() { *x = Column{} if protoimpl.UnsafeEnabled { - mi := &file_vschema_proto_msgTypes[7] + mi := &file_vschema_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -621,7 +694,7 @@ func (x *Column) String() string { func (*Column) ProtoMessage() {} func (x *Column) ProtoReflect() protoreflect.Message { - mi := &file_vschema_proto_msgTypes[7] + mi := &file_vschema_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -634,7 +707,7 @@ func (x *Column) ProtoReflect() protoreflect.Message { // Deprecated: Use Column.ProtoReflect.Descriptor instead. func (*Column) Descriptor() ([]byte, []int) { - return file_vschema_proto_rawDescGZIP(), []int{7} + return file_vschema_proto_rawDescGZIP(), []int{8} } func (x *Column) GetName() string { @@ -658,6 +731,48 @@ func (x *Column) GetInvisible() bool { return false } +func (x *Column) GetDefault() string { + if x != nil { + return x.Default + } + return "" +} + +func (x *Column) GetCollationName() string { + if x != nil { + return x.CollationName + } + return "" +} + +func (x *Column) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *Column) GetScale() int32 { + if x != nil { + return x.Scale + } + return 0 +} + +func (x *Column) GetNullable() bool { + if x != nil && x.Nullable != nil { + return *x.Nullable + } + return false +} + +func (x *Column) GetValues() []string { + if x != nil { + return x.Values + } + return nil +} + // SrvVSchema is the roll-up of all the Keyspace schema for a cell. type SrvVSchema struct { state protoimpl.MessageState @@ -665,15 +780,16 @@ type SrvVSchema struct { unknownFields protoimpl.UnknownFields // keyspaces is a map of keyspace name -> Keyspace object. - Keyspaces map[string]*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - RoutingRules *RoutingRules `protobuf:"bytes,2,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` // table routing rules - ShardRoutingRules *ShardRoutingRules `protobuf:"bytes,3,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` + Keyspaces map[string]*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + RoutingRules *RoutingRules `protobuf:"bytes,2,opt,name=routing_rules,json=routingRules,proto3" json:"routing_rules,omitempty"` // table routing rules + ShardRoutingRules *ShardRoutingRules `protobuf:"bytes,3,opt,name=shard_routing_rules,json=shardRoutingRules,proto3" json:"shard_routing_rules,omitempty"` + KeyspaceRoutingRules *KeyspaceRoutingRules `protobuf:"bytes,4,opt,name=keyspace_routing_rules,json=keyspaceRoutingRules,proto3" json:"keyspace_routing_rules,omitempty"` } func (x *SrvVSchema) Reset() { *x = SrvVSchema{} if protoimpl.UnsafeEnabled { - mi := &file_vschema_proto_msgTypes[8] + mi := &file_vschema_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -686,7 +802,7 @@ func (x *SrvVSchema) String() string { func (*SrvVSchema) ProtoMessage() {} func (x *SrvVSchema) ProtoReflect() protoreflect.Message { - mi := &file_vschema_proto_msgTypes[8] + mi := &file_vschema_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -699,7 +815,7 @@ func (x *SrvVSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use SrvVSchema.ProtoReflect.Descriptor instead. func (*SrvVSchema) Descriptor() ([]byte, []int) { - return file_vschema_proto_rawDescGZIP(), []int{8} + return file_vschema_proto_rawDescGZIP(), []int{9} } func (x *SrvVSchema) GetKeyspaces() map[string]*Keyspace { @@ -723,6 +839,13 @@ func (x *SrvVSchema) GetShardRoutingRules() *ShardRoutingRules { return nil } +func (x *SrvVSchema) GetKeyspaceRoutingRules() *KeyspaceRoutingRules { + if x != nil { + return x.KeyspaceRoutingRules + } + return nil +} + // ShardRoutingRules specify the shard routing rules for the VSchema. type ShardRoutingRules struct { state protoimpl.MessageState @@ -735,7 +858,7 @@ type ShardRoutingRules struct { func (x *ShardRoutingRules) Reset() { *x = ShardRoutingRules{} if protoimpl.UnsafeEnabled { - mi := &file_vschema_proto_msgTypes[9] + mi := &file_vschema_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -748,7 +871,7 @@ func (x *ShardRoutingRules) String() string { func (*ShardRoutingRules) ProtoMessage() {} func (x *ShardRoutingRules) ProtoReflect() protoreflect.Message { - mi := &file_vschema_proto_msgTypes[9] + mi := &file_vschema_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -761,7 +884,7 @@ func (x *ShardRoutingRules) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardRoutingRules.ProtoReflect.Descriptor instead. func (*ShardRoutingRules) Descriptor() ([]byte, []int) { - return file_vschema_proto_rawDescGZIP(), []int{9} + return file_vschema_proto_rawDescGZIP(), []int{10} } func (x *ShardRoutingRules) GetRules() []*ShardRoutingRule { @@ -771,7 +894,7 @@ func (x *ShardRoutingRules) GetRules() []*ShardRoutingRule { return nil } -// RoutingRule specifies a routing rule. +// ShardRoutingRule specifies a routing rule. type ShardRoutingRule struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -785,7 +908,7 @@ type ShardRoutingRule struct { func (x *ShardRoutingRule) Reset() { *x = ShardRoutingRule{} if protoimpl.UnsafeEnabled { - mi := &file_vschema_proto_msgTypes[10] + mi := &file_vschema_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -798,7 +921,7 @@ func (x *ShardRoutingRule) String() string { func (*ShardRoutingRule) ProtoMessage() {} func (x *ShardRoutingRule) ProtoReflect() protoreflect.Message { - mi := &file_vschema_proto_msgTypes[10] + mi := &file_vschema_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -811,7 +934,7 @@ func (x *ShardRoutingRule) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardRoutingRule.ProtoReflect.Descriptor instead. func (*ShardRoutingRule) Descriptor() ([]byte, []int) { - return file_vschema_proto_rawDescGZIP(), []int{10} + return file_vschema_proto_rawDescGZIP(), []int{11} } func (x *ShardRoutingRule) GetFromKeyspace() string { @@ -835,6 +958,108 @@ func (x *ShardRoutingRule) GetShard() string { return "" } +type KeyspaceRoutingRules struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Rules []*KeyspaceRoutingRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (x *KeyspaceRoutingRules) Reset() { + *x = KeyspaceRoutingRules{} + if protoimpl.UnsafeEnabled { + mi := &file_vschema_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyspaceRoutingRules) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyspaceRoutingRules) ProtoMessage() {} + +func (x *KeyspaceRoutingRules) ProtoReflect() protoreflect.Message { + mi := &file_vschema_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyspaceRoutingRules.ProtoReflect.Descriptor instead. +func (*KeyspaceRoutingRules) Descriptor() ([]byte, []int) { + return file_vschema_proto_rawDescGZIP(), []int{12} +} + +func (x *KeyspaceRoutingRules) GetRules() []*KeyspaceRoutingRule { + if x != nil { + return x.Rules + } + return nil +} + +type KeyspaceRoutingRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + FromKeyspace string `protobuf:"bytes,1,opt,name=from_keyspace,json=fromKeyspace,proto3" json:"from_keyspace,omitempty"` + ToKeyspace string `protobuf:"bytes,2,opt,name=to_keyspace,json=toKeyspace,proto3" json:"to_keyspace,omitempty"` +} + +func (x *KeyspaceRoutingRule) Reset() { + *x = KeyspaceRoutingRule{} + if protoimpl.UnsafeEnabled { + mi := &file_vschema_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyspaceRoutingRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyspaceRoutingRule) ProtoMessage() {} + +func (x *KeyspaceRoutingRule) ProtoReflect() protoreflect.Message { + mi := &file_vschema_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyspaceRoutingRule.ProtoReflect.Descriptor instead. +func (*KeyspaceRoutingRule) Descriptor() ([]byte, []int) { + return file_vschema_proto_rawDescGZIP(), []int{13} +} + +func (x *KeyspaceRoutingRule) GetFromKeyspace() string { + if x != nil { + return x.FromKeyspace + } + return "" +} + +func (x *KeyspaceRoutingRule) GetToKeyspace() string { + if x != nil { + return x.ToKeyspace + } + return "" +} + var File_vschema_proto protoreflect.FileDescriptor var file_vschema_proto_rawDesc = []byte{ @@ -848,7 +1073,7 @@ var file_vschema_proto_rawDesc = []byte{ 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x66, 0x72, 0x6f, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x84, 0x04, 0x0a, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xca, 0x04, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x08, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, @@ -866,100 +1091,140 @@ var file_vschema_proto_rawDesc = []byte{ 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x66, - 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x1a, 0x4c, 0x0a, - 0x0d, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, 0x0b, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, - 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x75, 0x6e, 0x73, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x64, 0x69, 0x73, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x75, 0x6e, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x64, 0x10, 0x03, 0x22, 0xa2, 0x01, 0x0a, 0x06, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x56, 0x69, 0x6e, - 0x64, 0x65, 0x78, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x1a, 0x39, 0x0a, - 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x05, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, 0x0a, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x5f, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x0e, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x69, - 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, - 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x19, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, - 0x61, 0x74, 0x69, 0x76, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x63, 0x6f, 0x6c, - 0x75, 0x6d, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x54, 0x0a, 0x0c, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, - 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, - 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, - 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, - 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x5b, 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x76, 0x69, 0x73, 0x69, - 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x6e, 0x76, 0x69, 0x73, - 0x69, 0x62, 0x6c, 0x65, 0x22, 0xa7, 0x02, 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, - 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, - 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x1a, 0x4f, 0x0a, - 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x44, - 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x6f, 0x6d, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, - 0x0b, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, - 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x44, 0x0a, + 0x11, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x5f, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x73, 0x70, + 0x65, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x53, 0x70, + 0x65, 0x63, 0x52, 0x0f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x53, + 0x70, 0x65, 0x63, 0x1a, 0x4c, 0x0a, 0x0d, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x1a, 0x49, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x0e, + 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x75, 0x6e, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x10, 0x00, 0x12, + 0x0c, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x10, 0x01, 0x12, 0x0d, 0x0a, + 0x09, 0x75, 0x6e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x10, 0x03, 0x22, 0x84, 0x01, 0x0a, 0x0f, 0x4d, 0x75, + 0x6c, 0x74, 0x69, 0x54, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x31, 0x0a, + 0x15, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x3e, 0x0a, 0x15, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x5f, 0x63, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x12, 0x74, 0x65, + 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, + 0x22, 0xa2, 0x01, 0x0a, 0x06, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x33, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x02, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x3e, 0x0a, 0x0f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x76, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x52, 0x0e, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x69, 0x6e, 0x63, 0x72, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, + 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x19, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, + 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, + 0x4c, 0x69, 0x73, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x54, 0x0a, 0x0c, 0x43, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x22, + 0x43, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x22, 0x8c, 0x02, 0x0a, 0x06, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x0b, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x69, 0x6e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x6c, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x6e, 0x76, 0x69, 0x73, 0x69, 0x62, + 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x25, 0x0a, 0x0e, + 0x63, 0x6f, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6c, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x1f, 0x0a, + 0x08, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x08, 0x6e, 0x75, 0x6c, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, 0x16, + 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x22, 0xfc, 0x02, 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x40, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, + 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, + 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, + 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x16, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x14, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x1a, 0x4f, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x44, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, + 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x4a, 0x0a, 0x14, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x12, 0x32, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, + 0x72, 0x6f, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x66, 0x72, 0x6f, 0x6d, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x42, 0x26, 0x5a, 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, + 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -975,48 +1240,55 @@ func file_vschema_proto_rawDescGZIP() []byte { } var file_vschema_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_vschema_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_vschema_proto_msgTypes = make([]protoimpl.MessageInfo, 18) var file_vschema_proto_goTypes = []interface{}{ (Keyspace_ForeignKeyMode)(0), // 0: vschema.Keyspace.ForeignKeyMode (*RoutingRules)(nil), // 1: vschema.RoutingRules (*RoutingRule)(nil), // 2: vschema.RoutingRule (*Keyspace)(nil), // 3: vschema.Keyspace - (*Vindex)(nil), // 4: vschema.Vindex - (*Table)(nil), // 5: vschema.Table - (*ColumnVindex)(nil), // 6: vschema.ColumnVindex - (*AutoIncrement)(nil), // 7: vschema.AutoIncrement - (*Column)(nil), // 8: vschema.Column - (*SrvVSchema)(nil), // 9: vschema.SrvVSchema - (*ShardRoutingRules)(nil), // 10: vschema.ShardRoutingRules - (*ShardRoutingRule)(nil), // 11: vschema.ShardRoutingRule - nil, // 12: vschema.Keyspace.VindexesEntry - nil, // 13: vschema.Keyspace.TablesEntry - nil, // 14: vschema.Vindex.ParamsEntry - nil, // 15: vschema.SrvVSchema.KeyspacesEntry - (query.Type)(0), // 16: query.Type + (*MultiTenantSpec)(nil), // 4: vschema.MultiTenantSpec + (*Vindex)(nil), // 5: vschema.Vindex + (*Table)(nil), // 6: vschema.Table + (*ColumnVindex)(nil), // 7: vschema.ColumnVindex + (*AutoIncrement)(nil), // 8: vschema.AutoIncrement + (*Column)(nil), // 9: vschema.Column + (*SrvVSchema)(nil), // 10: vschema.SrvVSchema + (*ShardRoutingRules)(nil), // 11: vschema.ShardRoutingRules + (*ShardRoutingRule)(nil), // 12: vschema.ShardRoutingRule + (*KeyspaceRoutingRules)(nil), // 13: vschema.KeyspaceRoutingRules + (*KeyspaceRoutingRule)(nil), // 14: vschema.KeyspaceRoutingRule + nil, // 15: vschema.Keyspace.VindexesEntry + nil, // 16: vschema.Keyspace.TablesEntry + nil, // 17: vschema.Vindex.ParamsEntry + nil, // 18: vschema.SrvVSchema.KeyspacesEntry + (query.Type)(0), // 19: query.Type } var file_vschema_proto_depIdxs = []int32{ 2, // 0: vschema.RoutingRules.rules:type_name -> vschema.RoutingRule - 12, // 1: vschema.Keyspace.vindexes:type_name -> vschema.Keyspace.VindexesEntry - 13, // 2: vschema.Keyspace.tables:type_name -> vschema.Keyspace.TablesEntry + 15, // 1: vschema.Keyspace.vindexes:type_name -> vschema.Keyspace.VindexesEntry + 16, // 2: vschema.Keyspace.tables:type_name -> vschema.Keyspace.TablesEntry 0, // 3: vschema.Keyspace.foreign_key_mode:type_name -> vschema.Keyspace.ForeignKeyMode - 14, // 4: vschema.Vindex.params:type_name -> vschema.Vindex.ParamsEntry - 6, // 5: vschema.Table.column_vindexes:type_name -> vschema.ColumnVindex - 7, // 6: vschema.Table.auto_increment:type_name -> vschema.AutoIncrement - 8, // 7: vschema.Table.columns:type_name -> vschema.Column - 16, // 8: vschema.Column.type:type_name -> query.Type - 15, // 9: vschema.SrvVSchema.keyspaces:type_name -> vschema.SrvVSchema.KeyspacesEntry - 1, // 10: vschema.SrvVSchema.routing_rules:type_name -> vschema.RoutingRules - 10, // 11: vschema.SrvVSchema.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 11, // 12: vschema.ShardRoutingRules.rules:type_name -> vschema.ShardRoutingRule - 4, // 13: vschema.Keyspace.VindexesEntry.value:type_name -> vschema.Vindex - 5, // 14: vschema.Keyspace.TablesEntry.value:type_name -> vschema.Table - 3, // 15: vschema.SrvVSchema.KeyspacesEntry.value:type_name -> vschema.Keyspace - 16, // [16:16] is the sub-list for method output_type - 16, // [16:16] is the sub-list for method input_type - 16, // [16:16] is the sub-list for extension type_name - 16, // [16:16] is the sub-list for extension extendee - 0, // [0:16] is the sub-list for field type_name + 4, // 4: vschema.Keyspace.multi_tenant_spec:type_name -> vschema.MultiTenantSpec + 19, // 5: vschema.MultiTenantSpec.tenant_id_column_type:type_name -> query.Type + 17, // 6: vschema.Vindex.params:type_name -> vschema.Vindex.ParamsEntry + 7, // 7: vschema.Table.column_vindexes:type_name -> vschema.ColumnVindex + 8, // 8: vschema.Table.auto_increment:type_name -> vschema.AutoIncrement + 9, // 9: vschema.Table.columns:type_name -> vschema.Column + 19, // 10: vschema.Column.type:type_name -> query.Type + 18, // 11: vschema.SrvVSchema.keyspaces:type_name -> vschema.SrvVSchema.KeyspacesEntry + 1, // 12: vschema.SrvVSchema.routing_rules:type_name -> vschema.RoutingRules + 11, // 13: vschema.SrvVSchema.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 13, // 14: vschema.SrvVSchema.keyspace_routing_rules:type_name -> vschema.KeyspaceRoutingRules + 12, // 15: vschema.ShardRoutingRules.rules:type_name -> vschema.ShardRoutingRule + 14, // 16: vschema.KeyspaceRoutingRules.rules:type_name -> vschema.KeyspaceRoutingRule + 5, // 17: vschema.Keyspace.VindexesEntry.value:type_name -> vschema.Vindex + 6, // 18: vschema.Keyspace.TablesEntry.value:type_name -> vschema.Table + 3, // 19: vschema.SrvVSchema.KeyspacesEntry.value:type_name -> vschema.Keyspace + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_vschema_proto_init() } @@ -1062,7 +1334,7 @@ func file_vschema_proto_init() { } } file_vschema_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Vindex); i { + switch v := v.(*MultiTenantSpec); i { case 0: return &v.state case 1: @@ -1074,7 +1346,7 @@ func file_vschema_proto_init() { } } file_vschema_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Table); i { + switch v := v.(*Vindex); i { case 0: return &v.state case 1: @@ -1086,7 +1358,7 @@ func file_vschema_proto_init() { } } file_vschema_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ColumnVindex); i { + switch v := v.(*Table); i { case 0: return &v.state case 1: @@ -1098,7 +1370,7 @@ func file_vschema_proto_init() { } } file_vschema_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AutoIncrement); i { + switch v := v.(*ColumnVindex); i { case 0: return &v.state case 1: @@ -1110,7 +1382,7 @@ func file_vschema_proto_init() { } } file_vschema_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Column); i { + switch v := v.(*AutoIncrement); i { case 0: return &v.state case 1: @@ -1122,7 +1394,7 @@ func file_vschema_proto_init() { } } file_vschema_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SrvVSchema); i { + switch v := v.(*Column); i { case 0: return &v.state case 1: @@ -1134,7 +1406,7 @@ func file_vschema_proto_init() { } } file_vschema_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardRoutingRules); i { + switch v := v.(*SrvVSchema); i { case 0: return &v.state case 1: @@ -1146,6 +1418,18 @@ func file_vschema_proto_init() { } } file_vschema_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShardRoutingRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vschema_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardRoutingRule); i { case 0: return &v.state @@ -1157,14 +1441,39 @@ func file_vschema_proto_init() { return nil } } + file_vschema_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyspaceRoutingRules); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vschema_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyspaceRoutingRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } + file_vschema_proto_msgTypes[8].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vschema_proto_rawDesc, NumEnums: 1, - NumMessages: 15, + NumMessages: 18, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/vschema/vschema_vtproto.pb.go b/go/vt/proto/vschema/vschema_vtproto.pb.go index 94527ef14ae..8cf523f4009 100644 --- a/go/vt/proto/vschema/vschema_vtproto.pb.go +++ b/go/vt/proto/vschema/vschema_vtproto.pb.go @@ -74,6 +74,7 @@ func (m *Keyspace) CloneVT() *Keyspace { Sharded: m.Sharded, RequireExplicitRouting: m.RequireExplicitRouting, ForeignKeyMode: m.ForeignKeyMode, + MultiTenantSpec: m.MultiTenantSpec.CloneVT(), } if rhs := m.Vindexes; rhs != nil { tmpContainer := make(map[string]*Vindex, len(rhs)) @@ -100,6 +101,25 @@ func (m *Keyspace) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *MultiTenantSpec) CloneVT() *MultiTenantSpec { + if m == nil { + return (*MultiTenantSpec)(nil) + } + r := &MultiTenantSpec{ + TenantIdColumnName: m.TenantIdColumnName, + TenantIdColumnType: m.TenantIdColumnType, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *MultiTenantSpec) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Vindex) CloneVT() *Vindex { if m == nil { return (*Vindex)(nil) @@ -210,9 +230,22 @@ func (m *Column) CloneVT() *Column { return (*Column)(nil) } r := &Column{ - Name: m.Name, - Type: m.Type, - Invisible: m.Invisible, + Name: m.Name, + Type: m.Type, + Invisible: m.Invisible, + Default: m.Default, + CollationName: m.CollationName, + Size: m.Size, + Scale: m.Scale, + } + if rhs := m.Nullable; rhs != nil { + tmpVal := *rhs + r.Nullable = &tmpVal + } + if rhs := m.Values; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Values = tmpContainer } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -230,8 +263,9 @@ func (m *SrvVSchema) CloneVT() *SrvVSchema { return (*SrvVSchema)(nil) } r := &SrvVSchema{ - RoutingRules: m.RoutingRules.CloneVT(), - ShardRoutingRules: m.ShardRoutingRules.CloneVT(), + RoutingRules: m.RoutingRules.CloneVT(), + ShardRoutingRules: m.ShardRoutingRules.CloneVT(), + KeyspaceRoutingRules: m.KeyspaceRoutingRules.CloneVT(), } if rhs := m.Keyspaces; rhs != nil { tmpContainer := make(map[string]*Keyspace, len(rhs)) @@ -294,6 +328,48 @@ func (m *ShardRoutingRule) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *KeyspaceRoutingRules) CloneVT() *KeyspaceRoutingRules { + if m == nil { + return (*KeyspaceRoutingRules)(nil) + } + r := &KeyspaceRoutingRules{} + if rhs := m.Rules; rhs != nil { + tmpContainer := make([]*KeyspaceRoutingRule, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Rules = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyspaceRoutingRules) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *KeyspaceRoutingRule) CloneVT() *KeyspaceRoutingRule { + if m == nil { + return (*KeyspaceRoutingRule)(nil) + } + r := &KeyspaceRoutingRule{ + FromKeyspace: m.FromKeyspace, + ToKeyspace: m.ToKeyspace, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *KeyspaceRoutingRule) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *RoutingRules) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -418,6 +494,16 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MultiTenantSpec != nil { + size, err := m.MultiTenantSpec.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } if m.ForeignKeyMode != 0 { i = encodeVarint(dAtA, i, uint64(m.ForeignKeyMode)) i-- @@ -490,6 +576,51 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MultiTenantSpec) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MultiTenantSpec) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MultiTenantSpec) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TenantIdColumnType != 0 { + i = encodeVarint(dAtA, i, uint64(m.TenantIdColumnType)) + i-- + dAtA[i] = 0x10 + } + if len(m.TenantIdColumnName) > 0 { + i -= len(m.TenantIdColumnName) + copy(dAtA[i:], m.TenantIdColumnName) + i = encodeVarint(dAtA, i, uint64(len(m.TenantIdColumnName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Vindex) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -787,6 +918,49 @@ func (m *Column) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if m.Nullable != nil { + i-- + if *m.Nullable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.Scale != 0 { + i = encodeVarint(dAtA, i, uint64(m.Scale)) + i-- + dAtA[i] = 0x38 + } + if m.Size != 0 { + i = encodeVarint(dAtA, i, uint64(m.Size)) + i-- + dAtA[i] = 0x30 + } + if len(m.CollationName) > 0 { + i -= len(m.CollationName) + copy(dAtA[i:], m.CollationName) + i = encodeVarint(dAtA, i, uint64(len(m.CollationName))) + i-- + dAtA[i] = 0x2a + } + if len(m.Default) > 0 { + i -= len(m.Default) + copy(dAtA[i:], m.Default) + i = encodeVarint(dAtA, i, uint64(len(m.Default))) + i-- + dAtA[i] = 0x22 + } if m.Invisible { i-- if m.Invisible { @@ -842,6 +1016,16 @@ func (m *SrvVSchema) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.KeyspaceRoutingRules != nil { + size, err := m.KeyspaceRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 + } if m.ShardRoutingRules != nil { size, err := m.ShardRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -986,6 +1170,98 @@ func (m *ShardRoutingRule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *KeyspaceRoutingRules) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyspaceRoutingRules) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyspaceRoutingRules) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Rules[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *KeyspaceRoutingRule) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyspaceRoutingRule) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyspaceRoutingRule) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ToKeyspace) > 0 { + i -= len(m.ToKeyspace) + copy(dAtA[i:], m.ToKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.ToKeyspace))) + i-- + dAtA[i] = 0x12 + } + if len(m.FromKeyspace) > 0 { + i -= len(m.FromKeyspace) + copy(dAtA[i:], m.FromKeyspace) + i = encodeVarint(dAtA, i, uint64(len(m.FromKeyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarint(dAtA []byte, offset int, v uint64) int { offset -= sov(v) base := offset @@ -1074,6 +1350,27 @@ func (m *Keyspace) SizeVT() (n int) { if m.ForeignKeyMode != 0 { n += 1 + sov(uint64(m.ForeignKeyMode)) } + if m.MultiTenantSpec != nil { + l = m.MultiTenantSpec.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *MultiTenantSpec) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TenantIdColumnName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TenantIdColumnType != 0 { + n += 1 + sov(uint64(m.TenantIdColumnType)) + } n += len(m.unknownFields) return n } @@ -1203,6 +1500,29 @@ func (m *Column) SizeVT() (n int) { if m.Invisible { n += 2 } + l = len(m.Default) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.CollationName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Size != 0 { + n += 1 + sov(uint64(m.Size)) + } + if m.Scale != 0 { + n += 1 + sov(uint64(m.Scale)) + } + if m.Nullable != nil { + n += 2 + } + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -1234,6 +1554,10 @@ func (m *SrvVSchema) SizeVT() (n int) { l = m.ShardRoutingRules.SizeVT() n += 1 + l + sov(uint64(l)) } + if m.KeyspaceRoutingRules != nil { + l = m.KeyspaceRoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -1276,15 +1600,49 @@ func (m *ShardRoutingRule) SizeVT() (n int) { return n } -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func (m *KeyspaceRoutingRules) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n } -func (m *RoutingRules) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 + +func (m *KeyspaceRoutingRule) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FromKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ToKeyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *RoutingRules) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 @@ -1828,6 +2186,144 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MultiTenantSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MultiTenantSpec == nil { + m.MultiTenantSpec = &MultiTenantSpec{} + } + if err := m.MultiTenantSpec.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MultiTenantSpec) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MultiTenantSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MultiTenantSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantIdColumnName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantIdColumnName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantIdColumnType", wireType) + } + m.TenantIdColumnType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TenantIdColumnType |= query.Type(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2725,62 +3221,43 @@ func (m *Column) UnmarshalVT(dAtA []byte) error { } } m.Invisible = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SrvVSchema) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SrvVSchema: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SrvVSchema: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Default = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CollationName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -2790,41 +3267,215 @@ func (m *SrvVSchema) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspaces == nil { - m.Keyspaces = make(map[string]*Keyspace) + m.CollationName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) } - var mapkey string - var mapvalue *Keyspace - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Size = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + } + m.Scale = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Scale |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nullable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Nullable = &b + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SrvVSchema) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SrvVSchema: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SrvVSchema: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Keyspaces == nil { + m.Keyspaces = make(map[string]*Keyspace) + } + var mapkey string + var mapvalue *Keyspace + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } fieldNum := int32(wire >> 3) @@ -2977,6 +3628,42 @@ func (m *SrvVSchema) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceRoutingRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyspaceRoutingRules == nil { + m.KeyspaceRoutingRules = &KeyspaceRoutingRules{} + } + if err := m.KeyspaceRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3231,6 +3918,206 @@ func (m *ShardRoutingRule) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *KeyspaceRoutingRules) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyspaceRoutingRules: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyspaceRoutingRules: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &KeyspaceRoutingRule{}) + if err := m.Rules[len(m.Rules)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyspaceRoutingRule) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyspaceRoutingRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyspaceRoutingRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToKeyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skip(dAtA []byte) (n int, err error) { l := len(dAtA) diff --git a/go/vt/proto/vtadmin/vtadmin.pb.go b/go/vt/proto/vtadmin/vtadmin.pb.go index 3e41edd5f7e..efefbef6f21 100644 --- a/go/vt/proto/vtadmin/vtadmin.pb.go +++ b/go/vt/proto/vtadmin/vtadmin.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vtadmin.proto @@ -89,7 +89,7 @@ func (x Tablet_ServingState) Number() protoreflect.EnumNumber { // Deprecated: Use Tablet_ServingState.Descriptor instead. func (Tablet_ServingState) EnumDescriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{10, 0} + return file_vtadmin_proto_rawDescGZIP(), []int{11, 0} } // Cluster represents information about a Vitess cluster. @@ -589,6 +589,61 @@ func (x *Schema) GetTableSizes() map[string]*Schema_TableSize { return nil } +type SchemaMigration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + SchemaMigration *vtctldata.SchemaMigration `protobuf:"bytes,2,opt,name=schema_migration,json=schemaMigration,proto3" json:"schema_migration,omitempty"` +} + +func (x *SchemaMigration) Reset() { + *x = SchemaMigration{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemaMigration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaMigration) ProtoMessage() {} + +func (x *SchemaMigration) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemaMigration.ProtoReflect.Descriptor instead. +func (*SchemaMigration) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{8} +} + +func (x *SchemaMigration) GetCluster() *Cluster { + if x != nil { + return x.Cluster + } + return nil +} + +func (x *SchemaMigration) GetSchemaMigration() *vtctldata.SchemaMigration { + if x != nil { + return x.SchemaMigration + } + return nil +} + // Shard groups the vtctldata information about a shard record together with // the Vitess cluster it belongs to. type Shard struct { @@ -603,7 +658,7 @@ type Shard struct { func (x *Shard) Reset() { *x = Shard{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[8] + mi := &file_vtadmin_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -616,7 +671,7 @@ func (x *Shard) String() string { func (*Shard) ProtoMessage() {} func (x *Shard) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[8] + mi := &file_vtadmin_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -629,7 +684,7 @@ func (x *Shard) ProtoReflect() protoreflect.Message { // Deprecated: Use Shard.ProtoReflect.Descriptor instead. func (*Shard) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{8} + return file_vtadmin_proto_rawDescGZIP(), []int{9} } func (x *Shard) GetCluster() *Cluster { @@ -659,7 +714,7 @@ type SrvVSchema struct { func (x *SrvVSchema) Reset() { *x = SrvVSchema{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[9] + mi := &file_vtadmin_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -672,7 +727,7 @@ func (x *SrvVSchema) String() string { func (*SrvVSchema) ProtoMessage() {} func (x *SrvVSchema) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[9] + mi := &file_vtadmin_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -685,7 +740,7 @@ func (x *SrvVSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use SrvVSchema.ProtoReflect.Descriptor instead. func (*SrvVSchema) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{9} + return file_vtadmin_proto_rawDescGZIP(), []int{10} } func (x *SrvVSchema) GetCell() string { @@ -725,7 +780,7 @@ type Tablet struct { func (x *Tablet) Reset() { *x = Tablet{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[10] + mi := &file_vtadmin_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -738,7 +793,7 @@ func (x *Tablet) String() string { func (*Tablet) ProtoMessage() {} func (x *Tablet) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[10] + mi := &file_vtadmin_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -751,7 +806,7 @@ func (x *Tablet) ProtoReflect() protoreflect.Message { // Deprecated: Use Tablet.ProtoReflect.Descriptor instead. func (*Tablet) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{10} + return file_vtadmin_proto_rawDescGZIP(), []int{11} } func (x *Tablet) GetCluster() *Cluster { @@ -797,7 +852,7 @@ type VSchema struct { func (x *VSchema) Reset() { *x = VSchema{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[11] + mi := &file_vtadmin_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -810,7 +865,7 @@ func (x *VSchema) String() string { func (*VSchema) ProtoMessage() {} func (x *VSchema) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[11] + mi := &file_vtadmin_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -823,7 +878,7 @@ func (x *VSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use VSchema.ProtoReflect.Descriptor instead. func (*VSchema) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{11} + return file_vtadmin_proto_rawDescGZIP(), []int{12} } func (x *VSchema) GetCluster() *Cluster { @@ -861,7 +916,7 @@ type Vtctld struct { func (x *Vtctld) Reset() { *x = Vtctld{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[12] + mi := &file_vtadmin_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -874,7 +929,7 @@ func (x *Vtctld) String() string { func (*Vtctld) ProtoMessage() {} func (x *Vtctld) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[12] + mi := &file_vtadmin_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -887,7 +942,7 @@ func (x *Vtctld) ProtoReflect() protoreflect.Message { // Deprecated: Use Vtctld.ProtoReflect.Descriptor instead. func (*Vtctld) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{12} + return file_vtadmin_proto_rawDescGZIP(), []int{13} } func (x *Vtctld) GetHostname() string { @@ -935,7 +990,7 @@ type VTGate struct { func (x *VTGate) Reset() { *x = VTGate{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[13] + mi := &file_vtadmin_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -948,7 +1003,7 @@ func (x *VTGate) String() string { func (*VTGate) ProtoMessage() {} func (x *VTGate) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[13] + mi := &file_vtadmin_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -961,7 +1016,7 @@ func (x *VTGate) ProtoReflect() protoreflect.Message { // Deprecated: Use VTGate.ProtoReflect.Descriptor instead. func (*VTGate) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{13} + return file_vtadmin_proto_rawDescGZIP(), []int{14} } func (x *VTGate) GetHostname() string { @@ -1019,7 +1074,7 @@ type Workflow struct { func (x *Workflow) Reset() { *x = Workflow{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[14] + mi := &file_vtadmin_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1032,7 +1087,7 @@ func (x *Workflow) String() string { func (*Workflow) ProtoMessage() {} func (x *Workflow) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[14] + mi := &file_vtadmin_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1045,7 +1100,7 @@ func (x *Workflow) ProtoReflect() protoreflect.Message { // Deprecated: Use Workflow.ProtoReflect.Descriptor instead. func (*Workflow) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{14} + return file_vtadmin_proto_rawDescGZIP(), []int{15} } func (x *Workflow) GetCluster() *Cluster { @@ -1069,32 +1124,32 @@ func (x *Workflow) GetWorkflow() *vtctldata.Workflow { return nil } -type CreateKeyspaceRequest struct { +type ApplySchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Options *vtctldata.CreateKeyspaceRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Request *vtctldata.ApplySchemaRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` } -func (x *CreateKeyspaceRequest) Reset() { - *x = CreateKeyspaceRequest{} +func (x *ApplySchemaRequest) Reset() { + *x = ApplySchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[15] + mi := &file_vtadmin_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *CreateKeyspaceRequest) String() string { +func (x *ApplySchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateKeyspaceRequest) ProtoMessage() {} +func (*ApplySchemaRequest) ProtoMessage() {} -func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[15] +func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1105,83 +1160,36 @@ func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{15} +// Deprecated: Use ApplySchemaRequest.ProtoReflect.Descriptor instead. +func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{16} } -func (x *CreateKeyspaceRequest) GetClusterId() string { +func (x *ApplySchemaRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *CreateKeyspaceRequest) GetOptions() *vtctldata.CreateKeyspaceRequest { - if x != nil { - return x.Options - } - return nil -} - -type CreateKeyspaceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` -} - -func (x *CreateKeyspaceResponse) Reset() { - *x = CreateKeyspaceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CreateKeyspaceResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateKeyspaceResponse) ProtoMessage() {} - -func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateKeyspaceResponse.ProtoReflect.Descriptor instead. -func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{16} -} - -func (x *CreateKeyspaceResponse) GetKeyspace() *Keyspace { +func (x *ApplySchemaRequest) GetRequest() *vtctldata.ApplySchemaRequest { if x != nil { - return x.Keyspace + return x.Request } return nil } -type CreateShardRequest struct { +type CancelSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Options *vtctldata.CreateShardRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Request *vtctldata.CancelSchemaMigrationRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` } -func (x *CreateShardRequest) Reset() { - *x = CreateShardRequest{} +func (x *CancelSchemaMigrationRequest) Reset() { + *x = CancelSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1189,13 +1197,13 @@ func (x *CreateShardRequest) Reset() { } } -func (x *CreateShardRequest) String() string { +func (x *CancelSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateShardRequest) ProtoMessage() {} +func (*CancelSchemaMigrationRequest) ProtoMessage() {} -func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { +func (x *CancelSchemaMigrationRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1207,36 +1215,36 @@ func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateShardRequest.ProtoReflect.Descriptor instead. -func (*CreateShardRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CancelSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CancelSchemaMigrationRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{17} } -func (x *CreateShardRequest) GetClusterId() string { +func (x *CancelSchemaMigrationRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *CreateShardRequest) GetOptions() *vtctldata.CreateShardRequest { +func (x *CancelSchemaMigrationRequest) GetRequest() *vtctldata.CancelSchemaMigrationRequest { if x != nil { - return x.Options + return x.Request } return nil } -type DeleteKeyspaceRequest struct { +type CleanupSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Options *vtctldata.DeleteKeyspaceRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Request *vtctldata.CleanupSchemaMigrationRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` } -func (x *DeleteKeyspaceRequest) Reset() { - *x = DeleteKeyspaceRequest{} +func (x *CleanupSchemaMigrationRequest) Reset() { + *x = CleanupSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1244,13 +1252,13 @@ func (x *DeleteKeyspaceRequest) Reset() { } } -func (x *DeleteKeyspaceRequest) String() string { +func (x *CleanupSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteKeyspaceRequest) ProtoMessage() {} +func (*CleanupSchemaMigrationRequest) ProtoMessage() {} -func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { +func (x *CleanupSchemaMigrationRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1262,36 +1270,36 @@ func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CleanupSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CleanupSchemaMigrationRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{18} } -func (x *DeleteKeyspaceRequest) GetClusterId() string { +func (x *CleanupSchemaMigrationRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *DeleteKeyspaceRequest) GetOptions() *vtctldata.DeleteKeyspaceRequest { +func (x *CleanupSchemaMigrationRequest) GetRequest() *vtctldata.CleanupSchemaMigrationRequest { if x != nil { - return x.Options + return x.Request } return nil } -type DeleteShardsRequest struct { +type CompleteSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Options *vtctldata.DeleteShardsRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Request *vtctldata.CompleteSchemaMigrationRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` } -func (x *DeleteShardsRequest) Reset() { - *x = DeleteShardsRequest{} +func (x *CompleteSchemaMigrationRequest) Reset() { + *x = CompleteSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1299,13 +1307,13 @@ func (x *DeleteShardsRequest) Reset() { } } -func (x *DeleteShardsRequest) String() string { +func (x *CompleteSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteShardsRequest) ProtoMessage() {} +func (*CompleteSchemaMigrationRequest) ProtoMessage() {} -func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { +func (x *CompleteSchemaMigrationRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1317,37 +1325,36 @@ func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteShardsRequest.ProtoReflect.Descriptor instead. -func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CompleteSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*CompleteSchemaMigrationRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{19} } -func (x *DeleteShardsRequest) GetClusterId() string { +func (x *CompleteSchemaMigrationRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *DeleteShardsRequest) GetOptions() *vtctldata.DeleteShardsRequest { +func (x *CompleteSchemaMigrationRequest) GetRequest() *vtctldata.CompleteSchemaMigrationRequest { if x != nil { - return x.Options + return x.Request } return nil } -type DeleteTabletRequest struct { +type CreateKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - AllowPrimary bool `protobuf:"varint,3,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Options *vtctldata.CreateKeyspaceRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (x *DeleteTabletRequest) Reset() { - *x = DeleteTabletRequest{} +func (x *CreateKeyspaceRequest) Reset() { + *x = CreateKeyspaceRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1355,13 +1362,13 @@ func (x *DeleteTabletRequest) Reset() { } } -func (x *DeleteTabletRequest) String() string { +func (x *CreateKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteTabletRequest) ProtoMessage() {} +func (*CreateKeyspaceRequest) ProtoMessage() {} -func (x *DeleteTabletRequest) ProtoReflect() protoreflect.Message { +func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1373,43 +1380,35 @@ func (x *DeleteTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteTabletRequest.ProtoReflect.Descriptor instead. -func (*DeleteTabletRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{20} } -func (x *DeleteTabletRequest) GetAlias() *topodata.TabletAlias { +func (x *CreateKeyspaceRequest) GetClusterId() string { if x != nil { - return x.Alias + return x.ClusterId } - return nil + return "" } -func (x *DeleteTabletRequest) GetClusterIds() []string { +func (x *CreateKeyspaceRequest) GetOptions() *vtctldata.CreateKeyspaceRequest { if x != nil { - return x.ClusterIds + return x.Options } return nil } -func (x *DeleteTabletRequest) GetAllowPrimary() bool { - if x != nil { - return x.AllowPrimary - } - return false -} - -type DeleteTabletResponse struct { +type CreateKeyspaceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *DeleteTabletResponse) Reset() { - *x = DeleteTabletResponse{} +func (x *CreateKeyspaceResponse) Reset() { + *x = CreateKeyspaceResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1417,13 +1416,13 @@ func (x *DeleteTabletResponse) Reset() { } } -func (x *DeleteTabletResponse) String() string { +func (x *CreateKeyspaceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteTabletResponse) ProtoMessage() {} +func (*CreateKeyspaceResponse) ProtoMessage() {} -func (x *DeleteTabletResponse) ProtoReflect() protoreflect.Message { +func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1435,36 +1434,29 @@ func (x *DeleteTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteTabletResponse.ProtoReflect.Descriptor instead. -func (*DeleteTabletResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateKeyspaceResponse.ProtoReflect.Descriptor instead. +func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{21} } -func (x *DeleteTabletResponse) GetStatus() string { +func (x *CreateKeyspaceResponse) GetKeyspace() *Keyspace { if x != nil { - return x.Status + return x.Keyspace } - return "" + return nil } -func (x *DeleteTabletResponse) GetCluster() *Cluster { - if x != nil { - return x.Cluster - } - return nil -} - -type EmergencyFailoverShardRequest struct { +type CreateShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Options *vtctldata.EmergencyReparentShardRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Options *vtctldata.CreateShardRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (x *EmergencyFailoverShardRequest) Reset() { - *x = EmergencyFailoverShardRequest{} +func (x *CreateShardRequest) Reset() { + *x = CreateShardRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1472,13 +1464,13 @@ func (x *EmergencyFailoverShardRequest) Reset() { } } -func (x *EmergencyFailoverShardRequest) String() string { +func (x *CreateShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*EmergencyFailoverShardRequest) ProtoMessage() {} +func (*CreateShardRequest) ProtoMessage() {} -func (x *EmergencyFailoverShardRequest) ProtoReflect() protoreflect.Message { +func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1490,43 +1482,36 @@ func (x *EmergencyFailoverShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EmergencyFailoverShardRequest.ProtoReflect.Descriptor instead. -func (*EmergencyFailoverShardRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use CreateShardRequest.ProtoReflect.Descriptor instead. +func (*CreateShardRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{22} } -func (x *EmergencyFailoverShardRequest) GetClusterId() string { +func (x *CreateShardRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *EmergencyFailoverShardRequest) GetOptions() *vtctldata.EmergencyReparentShardRequest { +func (x *CreateShardRequest) GetOptions() *vtctldata.CreateShardRequest { if x != nil { return x.Options } return nil } -type EmergencyFailoverShardResponse struct { +type DeleteKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - // PromotedPrimary is the tablet alias that was promoted to shard primary. - // If NewPrimary was set in the request options, then this will be the - // same tablet alias. Otherwise, it will be the alias of the tablet found - // to be most up-to-date in the shard. - PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` - Events []*logutil.Event `protobuf:"bytes,5,rep,name=events,proto3" json:"events,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Options *vtctldata.DeleteKeyspaceRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (x *EmergencyFailoverShardResponse) Reset() { - *x = EmergencyFailoverShardResponse{} +func (x *DeleteKeyspaceRequest) Reset() { + *x = DeleteKeyspaceRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1534,13 +1519,13 @@ func (x *EmergencyFailoverShardResponse) Reset() { } } -func (x *EmergencyFailoverShardResponse) String() string { +func (x *DeleteKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*EmergencyFailoverShardResponse) ProtoMessage() {} +func (*DeleteKeyspaceRequest) ProtoMessage() {} -func (x *EmergencyFailoverShardResponse) ProtoReflect() protoreflect.Message { +func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1552,58 +1537,36 @@ func (x *EmergencyFailoverShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use EmergencyFailoverShardResponse.ProtoReflect.Descriptor instead. -func (*EmergencyFailoverShardResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{23} } -func (x *EmergencyFailoverShardResponse) GetCluster() *Cluster { - if x != nil { - return x.Cluster - } - return nil -} - -func (x *EmergencyFailoverShardResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *EmergencyFailoverShardResponse) GetShard() string { +func (x *DeleteKeyspaceRequest) GetClusterId() string { if x != nil { - return x.Shard + return x.ClusterId } return "" } -func (x *EmergencyFailoverShardResponse) GetPromotedPrimary() *topodata.TabletAlias { - if x != nil { - return x.PromotedPrimary - } - return nil -} - -func (x *EmergencyFailoverShardResponse) GetEvents() []*logutil.Event { +func (x *DeleteKeyspaceRequest) GetOptions() *vtctldata.DeleteKeyspaceRequest { if x != nil { - return x.Events + return x.Options } return nil } -type FindSchemaRequest struct { +type DeleteShardsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,3,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Options *vtctldata.DeleteShardsRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (x *FindSchemaRequest) Reset() { - *x = FindSchemaRequest{} +func (x *DeleteShardsRequest) Reset() { + *x = DeleteShardsRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1611,13 +1574,13 @@ func (x *FindSchemaRequest) Reset() { } } -func (x *FindSchemaRequest) String() string { +func (x *DeleteShardsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FindSchemaRequest) ProtoMessage() {} +func (*DeleteShardsRequest) ProtoMessage() {} -func (x *FindSchemaRequest) ProtoReflect() protoreflect.Message { +func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1629,56 +1592,37 @@ func (x *FindSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FindSchemaRequest.ProtoReflect.Descriptor instead. -func (*FindSchemaRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteShardsRequest.ProtoReflect.Descriptor instead. +func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{24} } -func (x *FindSchemaRequest) GetTable() string { +func (x *DeleteShardsRequest) GetClusterId() string { if x != nil { - return x.Table + return x.ClusterId } return "" } -func (x *FindSchemaRequest) GetClusterIds() []string { - if x != nil { - return x.ClusterIds - } - return nil -} - -func (x *FindSchemaRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { +func (x *DeleteShardsRequest) GetOptions() *vtctldata.DeleteShardsRequest { if x != nil { - return x.TableSizeOptions + return x.Options } return nil } -type GetBackupsRequest struct { +type DeleteTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - // Keyspaces, if set, limits backups to just the specified keyspaces. - // Applies to all clusters in the request. - Keyspaces []string `protobuf:"bytes,2,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` - // KeyspaceShards, if set, limits backups to just the specified - // keyspace/shards. Applies to all clusters in the request. - // - // This field takes precedence over Keyspaces. If KeyspaceShards is set, - // Keyspaces is ignored. - KeyspaceShards []string `protobuf:"bytes,3,rep,name=keyspace_shards,json=keyspaceShards,proto3" json:"keyspace_shards,omitempty"` - // RequestOptions controls the per-shard request options when making - // GetBackups requests to vtctlds. Note that the Keyspace and Shard fields - // of this field are ignored; it is used only to specify Limit and Detailed - // fields. - RequestOptions *vtctldata.GetBackupsRequest `protobuf:"bytes,4,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + AllowPrimary bool `protobuf:"varint,3,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` } -func (x *GetBackupsRequest) Reset() { - *x = GetBackupsRequest{} +func (x *DeleteTabletRequest) Reset() { + *x = DeleteTabletRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1686,13 +1630,13 @@ func (x *GetBackupsRequest) Reset() { } } -func (x *GetBackupsRequest) String() string { +func (x *DeleteTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBackupsRequest) ProtoMessage() {} +func (*DeleteTabletRequest) ProtoMessage() {} -func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { +func (x *DeleteTabletRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1704,49 +1648,43 @@ func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBackupsRequest.ProtoReflect.Descriptor instead. -func (*GetBackupsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteTabletRequest.ProtoReflect.Descriptor instead. +func (*DeleteTabletRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{25} } -func (x *GetBackupsRequest) GetClusterIds() []string { - if x != nil { - return x.ClusterIds - } - return nil -} - -func (x *GetBackupsRequest) GetKeyspaces() []string { +func (x *DeleteTabletRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspaces + return x.Alias } return nil } -func (x *GetBackupsRequest) GetKeyspaceShards() []string { +func (x *DeleteTabletRequest) GetClusterIds() []string { if x != nil { - return x.KeyspaceShards + return x.ClusterIds } return nil } -func (x *GetBackupsRequest) GetRequestOptions() *vtctldata.GetBackupsRequest { +func (x *DeleteTabletRequest) GetAllowPrimary() bool { if x != nil { - return x.RequestOptions + return x.AllowPrimary } - return nil + return false } -type GetBackupsResponse struct { +type DeleteTabletResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Backups []*ClusterBackup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` } -func (x *GetBackupsResponse) Reset() { - *x = GetBackupsResponse{} +func (x *DeleteTabletResponse) Reset() { + *x = DeleteTabletResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1754,13 +1692,13 @@ func (x *GetBackupsResponse) Reset() { } } -func (x *GetBackupsResponse) String() string { +func (x *DeleteTabletResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBackupsResponse) ProtoMessage() {} +func (*DeleteTabletResponse) ProtoMessage() {} -func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { +func (x *DeleteTabletResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1772,38 +1710,36 @@ func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBackupsResponse.ProtoReflect.Descriptor instead. -func (*GetBackupsResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use DeleteTabletResponse.ProtoReflect.Descriptor instead. +func (*DeleteTabletResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{26} } -func (x *GetBackupsResponse) GetBackups() []*ClusterBackup { +func (x *DeleteTabletResponse) GetStatus() string { if x != nil { - return x.Backups + return x.Status + } + return "" +} + +func (x *DeleteTabletResponse) GetCluster() *Cluster { + if x != nil { + return x.Cluster } return nil } -type GetCellInfosRequest struct { +type EmergencyFailoverShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - // Cells, if specified, limits the response to include only CellInfo objects - // with those names. If omitted, all CellInfo objects in each cluster are - // returned. - // - // Mutually-exclusive with NamesOnly. If both are set, this field takes - // precedence. - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` - // Return only the cell names in each cluster; the actual CellInfo objects - // will be empty. - NamesOnly bool `protobuf:"varint,3,opt,name=names_only,json=namesOnly,proto3" json:"names_only,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Options *vtctldata.EmergencyReparentShardRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (x *GetCellInfosRequest) Reset() { - *x = GetCellInfosRequest{} +func (x *EmergencyFailoverShardRequest) Reset() { + *x = EmergencyFailoverShardRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1811,13 +1747,13 @@ func (x *GetCellInfosRequest) Reset() { } } -func (x *GetCellInfosRequest) String() string { +func (x *EmergencyFailoverShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfosRequest) ProtoMessage() {} +func (*EmergencyFailoverShardRequest) ProtoMessage() {} -func (x *GetCellInfosRequest) ProtoReflect() protoreflect.Message { +func (x *EmergencyFailoverShardRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1829,42 +1765,43 @@ func (x *GetCellInfosRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfosRequest.ProtoReflect.Descriptor instead. -func (*GetCellInfosRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use EmergencyFailoverShardRequest.ProtoReflect.Descriptor instead. +func (*EmergencyFailoverShardRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{27} } -func (x *GetCellInfosRequest) GetClusterIds() []string { +func (x *EmergencyFailoverShardRequest) GetClusterId() string { if x != nil { - return x.ClusterIds + return x.ClusterId } - return nil + return "" } -func (x *GetCellInfosRequest) GetCells() []string { +func (x *EmergencyFailoverShardRequest) GetOptions() *vtctldata.EmergencyReparentShardRequest { if x != nil { - return x.Cells + return x.Options } return nil } -func (x *GetCellInfosRequest) GetNamesOnly() bool { - if x != nil { - return x.NamesOnly - } - return false -} - -type GetCellInfosResponse struct { +type EmergencyFailoverShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - CellInfos []*ClusterCellInfo `protobuf:"bytes,1,rep,name=cell_infos,json=cellInfos,proto3" json:"cell_infos,omitempty"` -} + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + // PromotedPrimary is the tablet alias that was promoted to shard primary. + // If NewPrimary was set in the request options, then this will be the + // same tablet alias. Otherwise, it will be the alias of the tablet found + // to be most up-to-date in the shard. + PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,5,rep,name=events,proto3" json:"events,omitempty"` +} -func (x *GetCellInfosResponse) Reset() { - *x = GetCellInfosResponse{} +func (x *EmergencyFailoverShardResponse) Reset() { + *x = EmergencyFailoverShardResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1872,13 +1809,13 @@ func (x *GetCellInfosResponse) Reset() { } } -func (x *GetCellInfosResponse) String() string { +func (x *EmergencyFailoverShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellInfosResponse) ProtoMessage() {} +func (*EmergencyFailoverShardResponse) ProtoMessage() {} -func (x *GetCellInfosResponse) ProtoReflect() protoreflect.Message { +func (x *EmergencyFailoverShardResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1890,28 +1827,58 @@ func (x *GetCellInfosResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellInfosResponse.ProtoReflect.Descriptor instead. -func (*GetCellInfosResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use EmergencyFailoverShardResponse.ProtoReflect.Descriptor instead. +func (*EmergencyFailoverShardResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{28} } -func (x *GetCellInfosResponse) GetCellInfos() []*ClusterCellInfo { +func (x *EmergencyFailoverShardResponse) GetCluster() *Cluster { if x != nil { - return x.CellInfos + return x.Cluster } return nil } -type GetCellsAliasesRequest struct { +func (x *EmergencyFailoverShardResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *EmergencyFailoverShardResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *EmergencyFailoverShardResponse) GetPromotedPrimary() *topodata.TabletAlias { + if x != nil { + return x.PromotedPrimary + } + return nil +} + +func (x *EmergencyFailoverShardResponse) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +type FindSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,3,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` } -func (x *GetCellsAliasesRequest) Reset() { - *x = GetCellsAliasesRequest{} +func (x *FindSchemaRequest) Reset() { + *x = FindSchemaRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1919,13 +1886,13 @@ func (x *GetCellsAliasesRequest) Reset() { } } -func (x *GetCellsAliasesRequest) String() string { +func (x *FindSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellsAliasesRequest) ProtoMessage() {} +func (*FindSchemaRequest) ProtoMessage() {} -func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { +func (x *FindSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1937,28 +1904,56 @@ func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellsAliasesRequest.ProtoReflect.Descriptor instead. -func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use FindSchemaRequest.ProtoReflect.Descriptor instead. +func (*FindSchemaRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{29} } -func (x *GetCellsAliasesRequest) GetClusterIds() []string { +func (x *FindSchemaRequest) GetTable() string { + if x != nil { + return x.Table + } + return "" +} + +func (x *FindSchemaRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -type GetCellsAliasesResponse struct { +func (x *FindSchemaRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { + if x != nil { + return x.TableSizeOptions + } + return nil +} + +type GetBackupsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Aliases []*ClusterCellsAliases `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Keyspaces, if set, limits backups to just the specified keyspaces. + // Applies to all clusters in the request. + Keyspaces []string `protobuf:"bytes,2,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + // KeyspaceShards, if set, limits backups to just the specified + // keyspace/shards. Applies to all clusters in the request. + // + // This field takes precedence over Keyspaces. If KeyspaceShards is set, + // Keyspaces is ignored. + KeyspaceShards []string `protobuf:"bytes,3,rep,name=keyspace_shards,json=keyspaceShards,proto3" json:"keyspace_shards,omitempty"` + // RequestOptions controls the per-shard request options when making + // GetBackups requests to vtctlds. Note that the Keyspace and Shard fields + // of this field are ignored; it is used only to specify Limit and Detailed + // fields. + RequestOptions *vtctldata.GetBackupsRequest `protobuf:"bytes,4,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"` } -func (x *GetCellsAliasesResponse) Reset() { - *x = GetCellsAliasesResponse{} +func (x *GetBackupsRequest) Reset() { + *x = GetBackupsRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1966,13 +1961,13 @@ func (x *GetCellsAliasesResponse) Reset() { } } -func (x *GetCellsAliasesResponse) String() string { +func (x *GetBackupsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetCellsAliasesResponse) ProtoMessage() {} +func (*GetBackupsRequest) ProtoMessage() {} -func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { +func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1984,26 +1979,49 @@ func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetCellsAliasesResponse.ProtoReflect.Descriptor instead. -func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetBackupsRequest.ProtoReflect.Descriptor instead. +func (*GetBackupsRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{30} } -func (x *GetCellsAliasesResponse) GetAliases() []*ClusterCellsAliases { +func (x *GetBackupsRequest) GetClusterIds() []string { if x != nil { - return x.Aliases + return x.ClusterIds } return nil } -type GetClustersRequest struct { +func (x *GetBackupsRequest) GetKeyspaces() []string { + if x != nil { + return x.Keyspaces + } + return nil +} + +func (x *GetBackupsRequest) GetKeyspaceShards() []string { + if x != nil { + return x.KeyspaceShards + } + return nil +} + +func (x *GetBackupsRequest) GetRequestOptions() *vtctldata.GetBackupsRequest { + if x != nil { + return x.RequestOptions + } + return nil +} + +type GetBackupsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Backups []*ClusterBackup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` } -func (x *GetClustersRequest) Reset() { - *x = GetClustersRequest{} +func (x *GetBackupsResponse) Reset() { + *x = GetBackupsResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2011,13 +2029,13 @@ func (x *GetClustersRequest) Reset() { } } -func (x *GetClustersRequest) String() string { +func (x *GetBackupsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetClustersRequest) ProtoMessage() {} +func (*GetBackupsResponse) ProtoMessage() {} -func (x *GetClustersRequest) ProtoReflect() protoreflect.Message { +func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2029,21 +2047,38 @@ func (x *GetClustersRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetClustersRequest.ProtoReflect.Descriptor instead. -func (*GetClustersRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetBackupsResponse.ProtoReflect.Descriptor instead. +func (*GetBackupsResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{31} } -type GetClustersResponse struct { +func (x *GetBackupsResponse) GetBackups() []*ClusterBackup { + if x != nil { + return x.Backups + } + return nil +} + +type GetCellInfosRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Cells, if specified, limits the response to include only CellInfo objects + // with those names. If omitted, all CellInfo objects in each cluster are + // returned. + // + // Mutually-exclusive with NamesOnly. If both are set, this field takes + // precedence. + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + // Return only the cell names in each cluster; the actual CellInfo objects + // will be empty. + NamesOnly bool `protobuf:"varint,3,opt,name=names_only,json=namesOnly,proto3" json:"names_only,omitempty"` } -func (x *GetClustersResponse) Reset() { - *x = GetClustersResponse{} +func (x *GetCellInfosRequest) Reset() { + *x = GetCellInfosRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2051,13 +2086,13 @@ func (x *GetClustersResponse) Reset() { } } -func (x *GetClustersResponse) String() string { +func (x *GetCellInfosRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetClustersResponse) ProtoMessage() {} +func (*GetCellInfosRequest) ProtoMessage() {} -func (x *GetClustersResponse) ProtoReflect() protoreflect.Message { +func (x *GetCellInfosRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2069,29 +2104,42 @@ func (x *GetClustersResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetClustersResponse.ProtoReflect.Descriptor instead. -func (*GetClustersResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetCellInfosRequest.ProtoReflect.Descriptor instead. +func (*GetCellInfosRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{32} } -func (x *GetClustersResponse) GetClusters() []*Cluster { +func (x *GetCellInfosRequest) GetClusterIds() []string { if x != nil { - return x.Clusters + return x.ClusterIds } return nil } -type GetFullStatusRequest struct { +func (x *GetCellInfosRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +func (x *GetCellInfosRequest) GetNamesOnly() bool { + if x != nil { + return x.NamesOnly + } + return false +} + +type GetCellInfosResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Alias *topodata.TabletAlias `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"` + CellInfos []*ClusterCellInfo `protobuf:"bytes,1,rep,name=cell_infos,json=cellInfos,proto3" json:"cell_infos,omitempty"` } -func (x *GetFullStatusRequest) Reset() { - *x = GetFullStatusRequest{} +func (x *GetCellInfosResponse) Reset() { + *x = GetCellInfosResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2099,13 +2147,13 @@ func (x *GetFullStatusRequest) Reset() { } } -func (x *GetFullStatusRequest) String() string { +func (x *GetCellInfosResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetFullStatusRequest) ProtoMessage() {} +func (*GetCellInfosResponse) ProtoMessage() {} -func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { +func (x *GetCellInfosResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2117,26 +2165,19 @@ func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead. -func (*GetFullStatusRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetCellInfosResponse.ProtoReflect.Descriptor instead. +func (*GetCellInfosResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{33} } -func (x *GetFullStatusRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *GetFullStatusRequest) GetAlias() *topodata.TabletAlias { +func (x *GetCellInfosResponse) GetCellInfos() []*ClusterCellInfo { if x != nil { - return x.Alias + return x.CellInfos } return nil } -type GetGatesRequest struct { +type GetCellsAliasesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -2144,8 +2185,8 @@ type GetGatesRequest struct { ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *GetGatesRequest) Reset() { - *x = GetGatesRequest{} +func (x *GetCellsAliasesRequest) Reset() { + *x = GetCellsAliasesRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2153,13 +2194,13 @@ func (x *GetGatesRequest) Reset() { } } -func (x *GetGatesRequest) String() string { +func (x *GetCellsAliasesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetGatesRequest) ProtoMessage() {} +func (*GetCellsAliasesRequest) ProtoMessage() {} -func (x *GetGatesRequest) ProtoReflect() protoreflect.Message { +func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2171,28 +2212,28 @@ func (x *GetGatesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetGatesRequest.ProtoReflect.Descriptor instead. -func (*GetGatesRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetCellsAliasesRequest.ProtoReflect.Descriptor instead. +func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{34} } -func (x *GetGatesRequest) GetClusterIds() []string { +func (x *GetCellsAliasesRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -type GetGatesResponse struct { +type GetCellsAliasesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Gates []*VTGate `protobuf:"bytes,1,rep,name=gates,proto3" json:"gates,omitempty"` + Aliases []*ClusterCellsAliases `protobuf:"bytes,1,rep,name=aliases,proto3" json:"aliases,omitempty"` } -func (x *GetGatesResponse) Reset() { - *x = GetGatesResponse{} +func (x *GetCellsAliasesResponse) Reset() { + *x = GetCellsAliasesResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2200,13 +2241,13 @@ func (x *GetGatesResponse) Reset() { } } -func (x *GetGatesResponse) String() string { +func (x *GetCellsAliasesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetGatesResponse) ProtoMessage() {} +func (*GetCellsAliasesResponse) ProtoMessage() {} -func (x *GetGatesResponse) ProtoReflect() protoreflect.Message { +func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2218,29 +2259,26 @@ func (x *GetGatesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetGatesResponse.ProtoReflect.Descriptor instead. -func (*GetGatesResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetCellsAliasesResponse.ProtoReflect.Descriptor instead. +func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{35} } -func (x *GetGatesResponse) GetGates() []*VTGate { +func (x *GetCellsAliasesResponse) GetAliases() []*ClusterCellsAliases { if x != nil { - return x.Gates + return x.Aliases } return nil } -type GetKeyspaceRequest struct { +type GetClustersRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *GetKeyspaceRequest) Reset() { - *x = GetKeyspaceRequest{} +func (x *GetClustersRequest) Reset() { + *x = GetClustersRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2248,13 +2286,13 @@ func (x *GetKeyspaceRequest) Reset() { } } -func (x *GetKeyspaceRequest) String() string { +func (x *GetClustersRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspaceRequest) ProtoMessage() {} +func (*GetClustersRequest) ProtoMessage() {} -func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { +func (x *GetClustersRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2266,35 +2304,21 @@ func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetClustersRequest.ProtoReflect.Descriptor instead. +func (*GetClustersRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{36} } -func (x *GetKeyspaceRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *GetKeyspaceRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -type GetKeyspacesRequest struct { +type GetClustersResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` } -func (x *GetKeyspacesRequest) Reset() { - *x = GetKeyspacesRequest{} +func (x *GetClustersResponse) Reset() { + *x = GetClustersResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2302,13 +2326,13 @@ func (x *GetKeyspacesRequest) Reset() { } } -func (x *GetKeyspacesRequest) String() string { +func (x *GetClustersResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspacesRequest) ProtoMessage() {} +func (*GetClustersResponse) ProtoMessage() {} -func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { +func (x *GetClustersResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2320,28 +2344,29 @@ func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead. -func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetClustersResponse.ProtoReflect.Descriptor instead. +func (*GetClustersResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{37} } -func (x *GetKeyspacesRequest) GetClusterIds() []string { +func (x *GetClustersResponse) GetClusters() []*Cluster { if x != nil { - return x.ClusterIds + return x.Clusters } return nil } -type GetKeyspacesResponse struct { +type GetFullStatusRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"` } -func (x *GetKeyspacesResponse) Reset() { - *x = GetKeyspacesResponse{} +func (x *GetFullStatusRequest) Reset() { + *x = GetFullStatusRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2349,13 +2374,13 @@ func (x *GetKeyspacesResponse) Reset() { } } -func (x *GetKeyspacesResponse) String() string { +func (x *GetFullStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetKeyspacesResponse) ProtoMessage() {} +func (*GetFullStatusRequest) ProtoMessage() {} -func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { +func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2367,31 +2392,35 @@ func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead. -func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead. +func (*GetFullStatusRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{38} } -func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { +func (x *GetFullStatusRequest) GetClusterId() string { if x != nil { - return x.Keyspaces + return x.ClusterId + } + return "" +} + +func (x *GetFullStatusRequest) GetAlias() *topodata.TabletAlias { + if x != nil { + return x.Alias } return nil } -type GetSchemaRequest struct { +type GetGatesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"` - TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,4,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *GetSchemaRequest) Reset() { - *x = GetSchemaRequest{} +func (x *GetGatesRequest) Reset() { + *x = GetGatesRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2399,13 +2428,13 @@ func (x *GetSchemaRequest) Reset() { } } -func (x *GetSchemaRequest) String() string { +func (x *GetGatesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSchemaRequest) ProtoMessage() {} +func (*GetGatesRequest) ProtoMessage() {} -func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { +func (x *GetGatesRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2417,50 +2446,28 @@ func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetSchemaRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetGatesRequest.ProtoReflect.Descriptor instead. +func (*GetGatesRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{39} } -func (x *GetSchemaRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *GetSchemaRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *GetSchemaRequest) GetTable() string { - if x != nil { - return x.Table - } - return "" -} - -func (x *GetSchemaRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { +func (x *GetGatesRequest) GetClusterIds() []string { if x != nil { - return x.TableSizeOptions + return x.ClusterIds } return nil } -type GetSchemasRequest struct { +type GetGatesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,2,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` + Gates []*VTGate `protobuf:"bytes,1,rep,name=gates,proto3" json:"gates,omitempty"` } -func (x *GetSchemasRequest) Reset() { - *x = GetSchemasRequest{} +func (x *GetGatesResponse) Reset() { + *x = GetGatesResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2468,13 +2475,13 @@ func (x *GetSchemasRequest) Reset() { } } -func (x *GetSchemasRequest) String() string { +func (x *GetGatesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSchemasRequest) ProtoMessage() {} +func (*GetGatesResponse) ProtoMessage() {} -func (x *GetSchemasRequest) ProtoReflect() protoreflect.Message { +func (x *GetGatesResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2486,35 +2493,29 @@ func (x *GetSchemasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSchemasRequest.ProtoReflect.Descriptor instead. -func (*GetSchemasRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetGatesResponse.ProtoReflect.Descriptor instead. +func (*GetGatesResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{40} } -func (x *GetSchemasRequest) GetClusterIds() []string { - if x != nil { - return x.ClusterIds - } - return nil -} - -func (x *GetSchemasRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { +func (x *GetGatesResponse) GetGates() []*VTGate { if x != nil { - return x.TableSizeOptions + return x.Gates } return nil } -type GetSchemasResponse struct { +type GetKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Schemas []*Schema `protobuf:"bytes,1,rep,name=schemas,proto3" json:"schemas,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *GetSchemasResponse) Reset() { - *x = GetSchemasResponse{} +func (x *GetKeyspaceRequest) Reset() { + *x = GetKeyspaceRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2522,13 +2523,13 @@ func (x *GetSchemasResponse) Reset() { } } -func (x *GetSchemasResponse) String() string { +func (x *GetKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSchemasResponse) ProtoMessage() {} +func (*GetKeyspaceRequest) ProtoMessage() {} -func (x *GetSchemasResponse) ProtoReflect() protoreflect.Message { +func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2540,37 +2541,35 @@ func (x *GetSchemasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSchemasResponse.ProtoReflect.Descriptor instead. -func (*GetSchemasResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{41} } -func (x *GetSchemasResponse) GetSchemas() []*Schema { +func (x *GetKeyspaceRequest) GetClusterId() string { if x != nil { - return x.Schemas + return x.ClusterId } - return nil + return "" } -type GetShardReplicationPositionsRequest struct { +func (x *GetKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +type GetKeyspacesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - // Keyspaces, if set, limits replication positions to just the specified - // keyspaces. Applies to all clusters in the request. - Keyspaces []string `protobuf:"bytes,2,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` - // KeyspaceShards, if set, limits replicatin positions to just the specified - // keyspace/shards. Applies to all clusters in the request. - // - // This field takes precedence over Keyspaces. If KeyspaceShards is set, - // Keyspaces is ignored. - KeyspaceShards []string `protobuf:"bytes,3,rep,name=keyspace_shards,json=keyspaceShards,proto3" json:"keyspace_shards,omitempty"` } -func (x *GetShardReplicationPositionsRequest) Reset() { - *x = GetShardReplicationPositionsRequest{} +func (x *GetKeyspacesRequest) Reset() { + *x = GetKeyspacesRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2578,13 +2577,13 @@ func (x *GetShardReplicationPositionsRequest) Reset() { } } -func (x *GetShardReplicationPositionsRequest) String() string { +func (x *GetKeyspacesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardReplicationPositionsRequest) ProtoMessage() {} +func (*GetKeyspacesRequest) ProtoMessage() {} -func (x *GetShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { +func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2596,42 +2595,28 @@ func (x *GetShardReplicationPositionsRequest) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use GetShardReplicationPositionsRequest.ProtoReflect.Descriptor instead. -func (*GetShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead. +func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{42} } -func (x *GetShardReplicationPositionsRequest) GetClusterIds() []string { +func (x *GetKeyspacesRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -func (x *GetShardReplicationPositionsRequest) GetKeyspaces() []string { - if x != nil { - return x.Keyspaces - } - return nil -} - -func (x *GetShardReplicationPositionsRequest) GetKeyspaceShards() []string { - if x != nil { - return x.KeyspaceShards - } - return nil -} - -type GetShardReplicationPositionsResponse struct { +type GetKeyspacesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ReplicationPositions []*ClusterShardReplicationPosition `protobuf:"bytes,1,rep,name=replication_positions,json=replicationPositions,proto3" json:"replication_positions,omitempty"` + Keyspaces []*Keyspace `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` } -func (x *GetShardReplicationPositionsResponse) Reset() { - *x = GetShardReplicationPositionsResponse{} +func (x *GetKeyspacesResponse) Reset() { + *x = GetKeyspacesResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2639,13 +2624,13 @@ func (x *GetShardReplicationPositionsResponse) Reset() { } } -func (x *GetShardReplicationPositionsResponse) String() string { +func (x *GetKeyspacesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardReplicationPositionsResponse) ProtoMessage() {} +func (*GetKeyspacesResponse) ProtoMessage() {} -func (x *GetShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message { +func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2657,32 +2642,31 @@ func (x *GetShardReplicationPositionsResponse) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use GetShardReplicationPositionsResponse.ProtoReflect.Descriptor instead. -func (*GetShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead. +func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{43} } -func (x *GetShardReplicationPositionsResponse) GetReplicationPositions() []*ClusterShardReplicationPosition { +func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { if x != nil { - return x.ReplicationPositions + return x.Keyspaces } return nil } -type GetSrvKeyspaceRequest struct { +type GetSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is - // equivalent to specifying all cells in the topo. - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"` + TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,4,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` } -func (x *GetSrvKeyspaceRequest) Reset() { - *x = GetSrvKeyspaceRequest{} +func (x *GetSchemaRequest) Reset() { + *x = GetSchemaRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2690,13 +2674,13 @@ func (x *GetSrvKeyspaceRequest) Reset() { } } -func (x *GetSrvKeyspaceRequest) String() string { +func (x *GetSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspaceRequest) ProtoMessage() {} +func (*GetSchemaRequest) ProtoMessage() {} -func (x *GetSrvKeyspaceRequest) ProtoReflect() protoreflect.Message { +func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2708,46 +2692,50 @@ func (x *GetSrvKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspaceRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetSchemaRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{44} } -func (x *GetSrvKeyspaceRequest) GetClusterId() string { +func (x *GetSchemaRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *GetSrvKeyspaceRequest) GetKeyspace() string { +func (x *GetSchemaRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *GetSrvKeyspaceRequest) GetCells() []string { +func (x *GetSchemaRequest) GetTable() string { if x != nil { - return x.Cells + return x.Table + } + return "" +} + +func (x *GetSchemaRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { + if x != nil { + return x.TableSizeOptions } return nil } -type GetSrvKeyspacesRequest struct { +type GetSchemasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // An optional list of cluster IDs to filter specific clusters - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is - // equivalent to specifying all cells in the topo. - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + TableSizeOptions *GetSchemaTableSizeOptions `protobuf:"bytes,2,opt,name=table_size_options,json=tableSizeOptions,proto3" json:"table_size_options,omitempty"` } -func (x *GetSrvKeyspacesRequest) Reset() { - *x = GetSrvKeyspacesRequest{} +func (x *GetSchemasRequest) Reset() { + *x = GetSchemasRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2755,13 +2743,13 @@ func (x *GetSrvKeyspacesRequest) Reset() { } } -func (x *GetSrvKeyspacesRequest) String() string { +func (x *GetSchemasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspacesRequest) ProtoMessage() {} +func (*GetSchemasRequest) ProtoMessage() {} -func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { +func (x *GetSchemasRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2773,36 +2761,35 @@ func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSchemasRequest.ProtoReflect.Descriptor instead. +func (*GetSchemasRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{45} } -func (x *GetSrvKeyspacesRequest) GetClusterIds() []string { +func (x *GetSchemasRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -func (x *GetSrvKeyspacesRequest) GetCells() []string { +func (x *GetSchemasRequest) GetTableSizeOptions() *GetSchemaTableSizeOptions { if x != nil { - return x.Cells + return x.TableSizeOptions } return nil } -type GetSrvKeyspacesResponse struct { +type GetSchemasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // GetSrvKeyspaces responses for each keyspace - SrvKeyspaces map[string]*vtctldata.GetSrvKeyspacesResponse `protobuf:"bytes,1,rep,name=srv_keyspaces,json=srvKeyspaces,proto3" json:"srv_keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Schemas []*Schema `protobuf:"bytes,1,rep,name=schemas,proto3" json:"schemas,omitempty"` } -func (x *GetSrvKeyspacesResponse) Reset() { - *x = GetSrvKeyspacesResponse{} +func (x *GetSchemasResponse) Reset() { + *x = GetSchemasResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2810,13 +2797,13 @@ func (x *GetSrvKeyspacesResponse) Reset() { } } -func (x *GetSrvKeyspacesResponse) String() string { +func (x *GetSchemasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvKeyspacesResponse) ProtoMessage() {} +func (*GetSchemasResponse) ProtoMessage() {} -func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { +func (x *GetSchemasResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2828,29 +2815,28 @@ func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. -func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSchemasResponse.ProtoReflect.Descriptor instead. +func (*GetSchemasResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{46} } -func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*vtctldata.GetSrvKeyspacesResponse { +func (x *GetSchemasResponse) GetSchemas() []*Schema { if x != nil { - return x.SrvKeyspaces + return x.Schemas } return nil } -type GetSrvVSchemaRequest struct { +type GetSchemaMigrationsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Cell string `protobuf:"bytes,2,opt,name=cell,proto3" json:"cell,omitempty"` + ClusterRequests []*GetSchemaMigrationsRequest_ClusterRequest `protobuf:"bytes,1,rep,name=cluster_requests,json=clusterRequests,proto3" json:"cluster_requests,omitempty"` } -func (x *GetSrvVSchemaRequest) Reset() { - *x = GetSrvVSchemaRequest{} +func (x *GetSchemaMigrationsRequest) Reset() { + *x = GetSchemaMigrationsRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2858,13 +2844,13 @@ func (x *GetSrvVSchemaRequest) Reset() { } } -func (x *GetSrvVSchemaRequest) String() string { +func (x *GetSchemaMigrationsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemaRequest) ProtoMessage() {} +func (*GetSchemaMigrationsRequest) ProtoMessage() {} -func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { +func (x *GetSchemaMigrationsRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2876,36 +2862,28 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSchemaMigrationsRequest.ProtoReflect.Descriptor instead. +func (*GetSchemaMigrationsRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{47} } -func (x *GetSrvVSchemaRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *GetSrvVSchemaRequest) GetCell() string { +func (x *GetSchemaMigrationsRequest) GetClusterRequests() []*GetSchemaMigrationsRequest_ClusterRequest { if x != nil { - return x.Cell + return x.ClusterRequests } - return "" + return nil } -type GetSrvVSchemasRequest struct { +type GetSchemaMigrationsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` + SchemaMigrations []*SchemaMigration `protobuf:"bytes,1,rep,name=schema_migrations,json=schemaMigrations,proto3" json:"schema_migrations,omitempty"` } -func (x *GetSrvVSchemasRequest) Reset() { - *x = GetSrvVSchemasRequest{} +func (x *GetSchemaMigrationsResponse) Reset() { + *x = GetSchemaMigrationsResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2913,13 +2891,13 @@ func (x *GetSrvVSchemasRequest) Reset() { } } -func (x *GetSrvVSchemasRequest) String() string { +func (x *GetSchemaMigrationsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemasRequest) ProtoMessage() {} +func (*GetSchemaMigrationsResponse) ProtoMessage() {} -func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { +func (x *GetSchemaMigrationsResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2931,35 +2909,37 @@ func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSchemaMigrationsResponse.ProtoReflect.Descriptor instead. +func (*GetSchemaMigrationsResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{48} } -func (x *GetSrvVSchemasRequest) GetClusterIds() []string { - if x != nil { - return x.ClusterIds - } - return nil -} - -func (x *GetSrvVSchemasRequest) GetCells() []string { +func (x *GetSchemaMigrationsResponse) GetSchemaMigrations() []*SchemaMigration { if x != nil { - return x.Cells + return x.SchemaMigrations } return nil } -type GetSrvVSchemasResponse struct { +type GetShardReplicationPositionsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - SrvVSchemas []*SrvVSchema `protobuf:"bytes,1,rep,name=srv_v_schemas,json=srvVSchemas,proto3" json:"srv_v_schemas,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Keyspaces, if set, limits replication positions to just the specified + // keyspaces. Applies to all clusters in the request. + Keyspaces []string `protobuf:"bytes,2,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + // KeyspaceShards, if set, limits replicatin positions to just the specified + // keyspace/shards. Applies to all clusters in the request. + // + // This field takes precedence over Keyspaces. If KeyspaceShards is set, + // Keyspaces is ignored. + KeyspaceShards []string `protobuf:"bytes,3,rep,name=keyspace_shards,json=keyspaceShards,proto3" json:"keyspace_shards,omitempty"` } -func (x *GetSrvVSchemasResponse) Reset() { - *x = GetSrvVSchemasResponse{} +func (x *GetShardReplicationPositionsRequest) Reset() { + *x = GetShardReplicationPositionsRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2967,13 +2947,13 @@ func (x *GetSrvVSchemasResponse) Reset() { } } -func (x *GetSrvVSchemasResponse) String() string { +func (x *GetShardReplicationPositionsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSrvVSchemasResponse) ProtoMessage() {} +func (*GetShardReplicationPositionsRequest) ProtoMessage() {} -func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { +func (x *GetShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2985,29 +2965,42 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. -func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetShardReplicationPositionsRequest.ProtoReflect.Descriptor instead. +func (*GetShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{49} } -func (x *GetSrvVSchemasResponse) GetSrvVSchemas() []*SrvVSchema { +func (x *GetShardReplicationPositionsRequest) GetClusterIds() []string { if x != nil { - return x.SrvVSchemas + return x.ClusterIds } return nil } -type GetSchemaTableSizeOptions struct { +func (x *GetShardReplicationPositionsRequest) GetKeyspaces() []string { + if x != nil { + return x.Keyspaces + } + return nil +} + +func (x *GetShardReplicationPositionsRequest) GetKeyspaceShards() []string { + if x != nil { + return x.KeyspaceShards + } + return nil +} + +type GetShardReplicationPositionsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - AggregateSizes bool `protobuf:"varint,1,opt,name=aggregate_sizes,json=aggregateSizes,proto3" json:"aggregate_sizes,omitempty"` - IncludeNonServingShards bool `protobuf:"varint,2,opt,name=include_non_serving_shards,json=includeNonServingShards,proto3" json:"include_non_serving_shards,omitempty"` + ReplicationPositions []*ClusterShardReplicationPosition `protobuf:"bytes,1,rep,name=replication_positions,json=replicationPositions,proto3" json:"replication_positions,omitempty"` } -func (x *GetSchemaTableSizeOptions) Reset() { - *x = GetSchemaTableSizeOptions{} +func (x *GetShardReplicationPositionsResponse) Reset() { + *x = GetShardReplicationPositionsResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3015,13 +3008,13 @@ func (x *GetSchemaTableSizeOptions) Reset() { } } -func (x *GetSchemaTableSizeOptions) String() string { +func (x *GetShardReplicationPositionsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetSchemaTableSizeOptions) ProtoMessage() {} +func (*GetShardReplicationPositionsResponse) ProtoMessage() {} -func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message { +func (x *GetShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3033,40 +3026,32 @@ func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetSchemaTableSizeOptions.ProtoReflect.Descriptor instead. -func (*GetSchemaTableSizeOptions) Descriptor() ([]byte, []int) { +// Deprecated: Use GetShardReplicationPositionsResponse.ProtoReflect.Descriptor instead. +func (*GetShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{50} } -func (x *GetSchemaTableSizeOptions) GetAggregateSizes() bool { - if x != nil { - return x.AggregateSizes - } - return false -} - -func (x *GetSchemaTableSizeOptions) GetIncludeNonServingShards() bool { +func (x *GetShardReplicationPositionsResponse) GetReplicationPositions() []*ClusterShardReplicationPosition { if x != nil { - return x.IncludeNonServingShards + return x.ReplicationPositions } - return false + return nil } -type GetTabletRequest struct { +type GetSrvKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Unique (per cluster) tablet alias. - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - // ClusterIDs is an optional parameter to narrow the scope of the search, if - // the caller knows which cluster the tablet may be in, or, to disambiguate - // if multiple clusters have a tablet with the same hostname. - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is + // equivalent to specifying all cells in the topo. + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetTabletRequest) Reset() { - *x = GetTabletRequest{} +func (x *GetSrvKeyspaceRequest) Reset() { + *x = GetSrvKeyspaceRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3074,13 +3059,13 @@ func (x *GetTabletRequest) Reset() { } } -func (x *GetTabletRequest) String() string { +func (x *GetSrvKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletRequest) ProtoMessage() {} +func (*GetSrvKeyspaceRequest) ProtoMessage() {} -func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { +func (x *GetSrvKeyspaceRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3092,35 +3077,46 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. -func (*GetTabletRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSrvKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspaceRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{51} } -func (x *GetTabletRequest) GetAlias() *topodata.TabletAlias { +func (x *GetSrvKeyspaceRequest) GetClusterId() string { if x != nil { - return x.Alias + return x.ClusterId } - return nil + return "" } -func (x *GetTabletRequest) GetClusterIds() []string { +func (x *GetSrvKeyspaceRequest) GetKeyspace() string { if x != nil { - return x.ClusterIds + return x.Keyspace + } + return "" +} + +func (x *GetSrvKeyspaceRequest) GetCells() []string { + if x != nil { + return x.Cells } return nil } -type GetTabletsRequest struct { +type GetSrvKeyspacesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // An optional list of cluster IDs to filter specific clusters ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Cells is a list of cells to lookup a SrvKeyspace for. Leaving this empty is + // equivalent to specifying all cells in the topo. + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetTabletsRequest) Reset() { - *x = GetTabletsRequest{} +func (x *GetSrvKeyspacesRequest) Reset() { + *x = GetSrvKeyspacesRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3128,13 +3124,13 @@ func (x *GetTabletsRequest) Reset() { } } -func (x *GetTabletsRequest) String() string { +func (x *GetSrvKeyspacesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletsRequest) ProtoMessage() {} +func (*GetSrvKeyspacesRequest) ProtoMessage() {} -func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { +func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3146,28 +3142,36 @@ func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. -func (*GetTabletsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{52} } -func (x *GetTabletsRequest) GetClusterIds() []string { +func (x *GetSrvKeyspacesRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -type GetTabletsResponse struct { +func (x *GetSrvKeyspacesRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +type GetSrvKeyspacesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tablets []*Tablet `protobuf:"bytes,1,rep,name=tablets,proto3" json:"tablets,omitempty"` + // GetSrvKeyspaces responses for each keyspace + SrvKeyspaces map[string]*vtctldata.GetSrvKeyspacesResponse `protobuf:"bytes,1,rep,name=srv_keyspaces,json=srvKeyspaces,proto3" json:"srv_keyspaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *GetTabletsResponse) Reset() { - *x = GetTabletsResponse{} +func (x *GetSrvKeyspacesResponse) Reset() { + *x = GetSrvKeyspacesResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3175,13 +3179,13 @@ func (x *GetTabletsResponse) Reset() { } } -func (x *GetTabletsResponse) String() string { +func (x *GetSrvKeyspacesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTabletsResponse) ProtoMessage() {} +func (*GetSrvKeyspacesResponse) ProtoMessage() {} -func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { +func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3193,29 +3197,29 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. -func (*GetTabletsResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. +func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{53} } -func (x *GetTabletsResponse) GetTablets() []*Tablet { +func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*vtctldata.GetSrvKeyspacesResponse { if x != nil { - return x.Tablets + return x.SrvKeyspaces } return nil } -type GetTopologyPathRequest struct { +type GetSrvVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Cell string `protobuf:"bytes,2,opt,name=cell,proto3" json:"cell,omitempty"` } -func (x *GetTopologyPathRequest) Reset() { - *x = GetTopologyPathRequest{} +func (x *GetSrvVSchemaRequest) Reset() { + *x = GetSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3223,13 +3227,13 @@ func (x *GetTopologyPathRequest) Reset() { } } -func (x *GetTopologyPathRequest) String() string { +func (x *GetSrvVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTopologyPathRequest) ProtoMessage() {} +func (*GetSrvVSchemaRequest) ProtoMessage() {} -func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { +func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3241,36 +3245,36 @@ func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead. -func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{54} } -func (x *GetTopologyPathRequest) GetClusterId() string { +func (x *GetSrvVSchemaRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *GetTopologyPathRequest) GetPath() string { +func (x *GetSrvVSchemaRequest) GetCell() string { if x != nil { - return x.Path + return x.Cell } return "" } -type GetVSchemaRequest struct { +type GetSrvVSchemasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + Cells []string `protobuf:"bytes,2,rep,name=cells,proto3" json:"cells,omitempty"` } -func (x *GetVSchemaRequest) Reset() { - *x = GetVSchemaRequest{} +func (x *GetSrvVSchemasRequest) Reset() { + *x = GetSrvVSchemasRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3278,13 +3282,13 @@ func (x *GetVSchemaRequest) Reset() { } } -func (x *GetVSchemaRequest) String() string { +func (x *GetSrvVSchemasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVSchemaRequest) ProtoMessage() {} +func (*GetSrvVSchemasRequest) ProtoMessage() {} -func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { +func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3296,35 +3300,35 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. -func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{55} } -func (x *GetVSchemaRequest) GetClusterId() string { +func (x *GetSrvVSchemasRequest) GetClusterIds() []string { if x != nil { - return x.ClusterId + return x.ClusterIds } - return "" + return nil } -func (x *GetVSchemaRequest) GetKeyspace() string { +func (x *GetSrvVSchemasRequest) GetCells() []string { if x != nil { - return x.Keyspace + return x.Cells } - return "" + return nil } -type GetVSchemasRequest struct { +type GetSrvVSchemasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + SrvVSchemas []*SrvVSchema `protobuf:"bytes,1,rep,name=srv_v_schemas,json=srvVSchemas,proto3" json:"srv_v_schemas,omitempty"` } -func (x *GetVSchemasRequest) Reset() { - *x = GetVSchemasRequest{} +func (x *GetSrvVSchemasResponse) Reset() { + *x = GetSrvVSchemasResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3332,13 +3336,13 @@ func (x *GetVSchemasRequest) Reset() { } } -func (x *GetVSchemasRequest) String() string { +func (x *GetSrvVSchemasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVSchemasRequest) ProtoMessage() {} +func (*GetSrvVSchemasResponse) ProtoMessage() {} -func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message { +func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3350,28 +3354,29 @@ func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVSchemasRequest.ProtoReflect.Descriptor instead. -func (*GetVSchemasRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. +func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{56} } -func (x *GetVSchemasRequest) GetClusterIds() []string { +func (x *GetSrvVSchemasResponse) GetSrvVSchemas() []*SrvVSchema { if x != nil { - return x.ClusterIds + return x.SrvVSchemas } return nil } -type GetVSchemasResponse struct { +type GetSchemaTableSizeOptions struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - VSchemas []*VSchema `protobuf:"bytes,1,rep,name=v_schemas,json=vSchemas,proto3" json:"v_schemas,omitempty"` + AggregateSizes bool `protobuf:"varint,1,opt,name=aggregate_sizes,json=aggregateSizes,proto3" json:"aggregate_sizes,omitempty"` + IncludeNonServingShards bool `protobuf:"varint,2,opt,name=include_non_serving_shards,json=includeNonServingShards,proto3" json:"include_non_serving_shards,omitempty"` } -func (x *GetVSchemasResponse) Reset() { - *x = GetVSchemasResponse{} +func (x *GetSchemaTableSizeOptions) Reset() { + *x = GetSchemaTableSizeOptions{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3379,13 +3384,13 @@ func (x *GetVSchemasResponse) Reset() { } } -func (x *GetVSchemasResponse) String() string { +func (x *GetSchemaTableSizeOptions) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVSchemasResponse) ProtoMessage() {} +func (*GetSchemaTableSizeOptions) ProtoMessage() {} -func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message { +func (x *GetSchemaTableSizeOptions) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3397,28 +3402,40 @@ func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVSchemasResponse.ProtoReflect.Descriptor instead. -func (*GetVSchemasResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetSchemaTableSizeOptions.ProtoReflect.Descriptor instead. +func (*GetSchemaTableSizeOptions) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{57} } -func (x *GetVSchemasResponse) GetVSchemas() []*VSchema { +func (x *GetSchemaTableSizeOptions) GetAggregateSizes() bool { if x != nil { - return x.VSchemas + return x.AggregateSizes } - return nil + return false } -type GetVtctldsRequest struct { +func (x *GetSchemaTableSizeOptions) GetIncludeNonServingShards() bool { + if x != nil { + return x.IncludeNonServingShards + } + return false +} + +type GetTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Unique (per cluster) tablet alias. + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + // ClusterIDs is an optional parameter to narrow the scope of the search, if + // the caller knows which cluster the tablet may be in, or, to disambiguate + // if multiple clusters have a tablet with the same hostname. + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *GetVtctldsRequest) Reset() { - *x = GetVtctldsRequest{} +func (x *GetTabletRequest) Reset() { + *x = GetTabletRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3426,13 +3443,13 @@ func (x *GetVtctldsRequest) Reset() { } } -func (x *GetVtctldsRequest) String() string { +func (x *GetTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVtctldsRequest) ProtoMessage() {} +func (*GetTabletRequest) ProtoMessage() {} -func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message { +func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3444,28 +3461,35 @@ func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVtctldsRequest.ProtoReflect.Descriptor instead. -func (*GetVtctldsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. +func (*GetTabletRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{58} } -func (x *GetVtctldsRequest) GetClusterIds() []string { +func (x *GetTabletRequest) GetAlias() *topodata.TabletAlias { + if x != nil { + return x.Alias + } + return nil +} + +func (x *GetTabletRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -type GetVtctldsResponse struct { +type GetTabletsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Vtctlds []*Vtctld `protobuf:"bytes,1,rep,name=vtctlds,proto3" json:"vtctlds,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *GetVtctldsResponse) Reset() { - *x = GetVtctldsResponse{} +func (x *GetTabletsRequest) Reset() { + *x = GetTabletsRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3473,13 +3497,13 @@ func (x *GetVtctldsResponse) Reset() { } } -func (x *GetVtctldsResponse) String() string { +func (x *GetTabletsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetVtctldsResponse) ProtoMessage() {} +func (*GetTabletsRequest) ProtoMessage() {} -func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message { +func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3491,31 +3515,28 @@ func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetVtctldsResponse.ProtoReflect.Descriptor instead. -func (*GetVtctldsResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. +func (*GetTabletsRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{59} } -func (x *GetVtctldsResponse) GetVtctlds() []*Vtctld { +func (x *GetTabletsRequest) GetClusterIds() []string { if x != nil { - return x.Vtctlds + return x.ClusterIds } return nil } -type GetWorkflowRequest struct { +type GetTabletsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - ActiveOnly bool `protobuf:"varint,4,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + Tablets []*Tablet `protobuf:"bytes,1,rep,name=tablets,proto3" json:"tablets,omitempty"` } -func (x *GetWorkflowRequest) Reset() { - *x = GetWorkflowRequest{} +func (x *GetTabletsResponse) Reset() { + *x = GetTabletsResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3523,13 +3544,13 @@ func (x *GetWorkflowRequest) Reset() { } } -func (x *GetWorkflowRequest) String() string { +func (x *GetTabletsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowRequest) ProtoMessage() {} +func (*GetTabletsResponse) ProtoMessage() {} -func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message { +func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3541,67 +3562,29 @@ func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowRequest.ProtoReflect.Descriptor instead. -func (*GetWorkflowRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. +func (*GetTabletsResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{60} } -func (x *GetWorkflowRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *GetWorkflowRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *GetWorkflowRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *GetWorkflowRequest) GetActiveOnly() bool { +func (x *GetTabletsResponse) GetTablets() []*Tablet { if x != nil { - return x.ActiveOnly + return x.Tablets } - return false + return nil } -type GetWorkflowsRequest struct { +type GetTopologyPathRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - // ActiveOnly specifies whether to return workflows that are currently - // active (running or paused) instead of all workflows. - ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` - // Keyspaces is a list of keyspaces to restrict the workflow search to. Note - // that the keyspaces list applies across all cluster IDs in the request. - // - // If, for example, you have two clusters, each with a keyspace called "foo" - // and want the workflows from "foo" in cluster1 but not from cluster2, you - // must make two requests. - // - // Keyspaces and IgnoreKeyspaces are mutually-exclusive, and Keyspaces takes - // precedence; if Keyspaces is a non-empty list, then IgnoreKeyspaces is - // ignored completely. - Keyspaces []string `protobuf:"bytes,3,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` - // IgnoreKeyspaces is a list of keyspaces to skip during the workflow - // search. It has the same semantics as the Keyspaces parameter, so refer to - // that documentation for more details. - IgnoreKeyspaces []string `protobuf:"bytes,4,rep,name=ignore_keyspaces,json=ignoreKeyspaces,proto3" json:"ignore_keyspaces,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` } -func (x *GetWorkflowsRequest) Reset() { - *x = GetWorkflowsRequest{} +func (x *GetTopologyPathRequest) Reset() { + *x = GetTopologyPathRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3609,13 +3592,13 @@ func (x *GetWorkflowsRequest) Reset() { } } -func (x *GetWorkflowsRequest) String() string { +func (x *GetTopologyPathRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowsRequest) ProtoMessage() {} +func (*GetTopologyPathRequest) ProtoMessage() {} -func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { +func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3627,49 +3610,36 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. -func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead. +func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{61} } -func (x *GetWorkflowsRequest) GetClusterIds() []string { - if x != nil { - return x.ClusterIds - } - return nil -} - -func (x *GetWorkflowsRequest) GetActiveOnly() bool { - if x != nil { - return x.ActiveOnly - } - return false -} - -func (x *GetWorkflowsRequest) GetKeyspaces() []string { +func (x *GetTopologyPathRequest) GetClusterId() string { if x != nil { - return x.Keyspaces + return x.ClusterId } - return nil + return "" } -func (x *GetWorkflowsRequest) GetIgnoreKeyspaces() []string { +func (x *GetTopologyPathRequest) GetPath() string { if x != nil { - return x.IgnoreKeyspaces + return x.Path } - return nil + return "" } -type GetWorkflowsResponse struct { +type GetVSchemaRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - WorkflowsByCluster map[string]*ClusterWorkflows `protobuf:"bytes,1,rep,name=workflows_by_cluster,json=workflowsByCluster,proto3" json:"workflows_by_cluster,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *GetWorkflowsResponse) Reset() { - *x = GetWorkflowsResponse{} +func (x *GetVSchemaRequest) Reset() { + *x = GetVSchemaRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3677,13 +3647,13 @@ func (x *GetWorkflowsResponse) Reset() { } } -func (x *GetWorkflowsResponse) String() string { +func (x *GetVSchemaRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowsResponse) ProtoMessage() {} +func (*GetVSchemaRequest) ProtoMessage() {} -func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { +func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3695,33 +3665,35 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. -func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. +func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{62} } -func (x *GetWorkflowsResponse) GetWorkflowsByCluster() map[string]*ClusterWorkflows { +func (x *GetVSchemaRequest) GetClusterId() string { if x != nil { - return x.WorkflowsByCluster + return x.ClusterId } - return nil + return "" } -type PingTabletRequest struct { +func (x *GetVSchemaRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +type GetVSchemasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Unique (per cluster) tablet alias of the standard form: "$cell-$uid" - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - // ClusterIDs is an optional parameter to narrow the scope of the search, if - // the caller knows which cluster the tablet may be in, or, to disambiguate - // if multiple clusters have a tablet with the same hostname. - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *PingTabletRequest) Reset() { - *x = PingTabletRequest{} +func (x *GetVSchemasRequest) Reset() { + *x = GetVSchemasRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3729,13 +3701,13 @@ func (x *PingTabletRequest) Reset() { } } -func (x *PingTabletRequest) String() string { +func (x *GetVSchemasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PingTabletRequest) ProtoMessage() {} +func (*GetVSchemasRequest) ProtoMessage() {} -func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { +func (x *GetVSchemasRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3747,36 +3719,28 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead. -func (*PingTabletRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetVSchemasRequest.ProtoReflect.Descriptor instead. +func (*GetVSchemasRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{63} } -func (x *PingTabletRequest) GetAlias() *topodata.TabletAlias { - if x != nil { - return x.Alias - } - return nil -} - -func (x *PingTabletRequest) GetClusterIds() []string { +func (x *GetVSchemasRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -type PingTabletResponse struct { +type GetVSchemasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + VSchemas []*VSchema `protobuf:"bytes,1,rep,name=v_schemas,json=vSchemas,proto3" json:"v_schemas,omitempty"` } -func (x *PingTabletResponse) Reset() { - *x = PingTabletResponse{} +func (x *GetVSchemasResponse) Reset() { + *x = GetVSchemasResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3784,13 +3748,13 @@ func (x *PingTabletResponse) Reset() { } } -func (x *PingTabletResponse) String() string { +func (x *GetVSchemasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PingTabletResponse) ProtoMessage() {} +func (*GetVSchemasResponse) ProtoMessage() {} -func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { +func (x *GetVSchemasResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3802,36 +3766,28 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead. -func (*PingTabletResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetVSchemasResponse.ProtoReflect.Descriptor instead. +func (*GetVSchemasResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{64} } -func (x *PingTabletResponse) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *PingTabletResponse) GetCluster() *Cluster { +func (x *GetVSchemasResponse) GetVSchemas() []*VSchema { if x != nil { - return x.Cluster + return x.VSchemas } return nil } -type PlannedFailoverShardRequest struct { +type GetVtctldsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Options *vtctldata.PlannedReparentShardRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *PlannedFailoverShardRequest) Reset() { - *x = PlannedFailoverShardRequest{} +func (x *GetVtctldsRequest) Reset() { + *x = GetVtctldsRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3839,13 +3795,13 @@ func (x *PlannedFailoverShardRequest) Reset() { } } -func (x *PlannedFailoverShardRequest) String() string { +func (x *GetVtctldsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PlannedFailoverShardRequest) ProtoMessage() {} +func (*GetVtctldsRequest) ProtoMessage() {} -func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message { +func (x *GetVtctldsRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3857,43 +3813,28 @@ func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PlannedFailoverShardRequest.ProtoReflect.Descriptor instead. -func (*PlannedFailoverShardRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetVtctldsRequest.ProtoReflect.Descriptor instead. +func (*GetVtctldsRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{65} } -func (x *PlannedFailoverShardRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *PlannedFailoverShardRequest) GetOptions() *vtctldata.PlannedReparentShardRequest { +func (x *GetVtctldsRequest) GetClusterIds() []string { if x != nil { - return x.Options + return x.ClusterIds } return nil } -type PlannedFailoverShardResponse struct { +type GetVtctldsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - // PromotedPrimary is the tablet alias that was promoted to shard primary. - // If NewPrimary was set in the request options, then this will be the - // same tablet alias. Otherwise, it will be the alias of the tablet found - // to be most up-to-date in the shard. - PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` - Events []*logutil.Event `protobuf:"bytes,5,rep,name=events,proto3" json:"events,omitempty"` + Vtctlds []*Vtctld `protobuf:"bytes,1,rep,name=vtctlds,proto3" json:"vtctlds,omitempty"` } -func (x *PlannedFailoverShardResponse) Reset() { - *x = PlannedFailoverShardResponse{} +func (x *GetVtctldsResponse) Reset() { + *x = GetVtctldsResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3901,13 +3842,13 @@ func (x *PlannedFailoverShardResponse) Reset() { } } -func (x *PlannedFailoverShardResponse) String() string { +func (x *GetVtctldsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PlannedFailoverShardResponse) ProtoMessage() {} +func (*GetVtctldsResponse) ProtoMessage() {} -func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message { +func (x *GetVtctldsResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3919,59 +3860,31 @@ func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PlannedFailoverShardResponse.ProtoReflect.Descriptor instead. -func (*PlannedFailoverShardResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetVtctldsResponse.ProtoReflect.Descriptor instead. +func (*GetVtctldsResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{66} } -func (x *PlannedFailoverShardResponse) GetCluster() *Cluster { +func (x *GetVtctldsResponse) GetVtctlds() []*Vtctld { if x != nil { - return x.Cluster + return x.Vtctlds } return nil } -func (x *PlannedFailoverShardResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *PlannedFailoverShardResponse) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *PlannedFailoverShardResponse) GetPromotedPrimary() *topodata.TabletAlias { - if x != nil { - return x.PromotedPrimary - } - return nil -} - -func (x *PlannedFailoverShardResponse) GetEvents() []*logutil.Event { - if x != nil { - return x.Events - } - return nil -} - -type RebuildKeyspaceGraphRequest struct { +type GetWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` - AllowPartial bool `protobuf:"varint,4,opt,name=allow_partial,json=allowPartial,proto3" json:"allow_partial,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + ActiveOnly bool `protobuf:"varint,4,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` } -func (x *RebuildKeyspaceGraphRequest) Reset() { - *x = RebuildKeyspaceGraphRequest{} +func (x *GetWorkflowRequest) Reset() { + *x = GetWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3979,13 +3892,13 @@ func (x *RebuildKeyspaceGraphRequest) Reset() { } } -func (x *RebuildKeyspaceGraphRequest) String() string { +func (x *GetWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildKeyspaceGraphRequest) ProtoMessage() {} +func (*GetWorkflowRequest) ProtoMessage() {} -func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { +func (x *GetWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3997,49 +3910,67 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead. -func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetWorkflowRequest.ProtoReflect.Descriptor instead. +func (*GetWorkflowRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{67} } -func (x *RebuildKeyspaceGraphRequest) GetClusterId() string { +func (x *GetWorkflowRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string { +func (x *GetWorkflowRequest) GetKeyspace() string { if x != nil { return x.Keyspace } return "" } -func (x *RebuildKeyspaceGraphRequest) GetCells() []string { +func (x *GetWorkflowRequest) GetName() string { if x != nil { - return x.Cells + return x.Name } - return nil + return "" } -func (x *RebuildKeyspaceGraphRequest) GetAllowPartial() bool { +func (x *GetWorkflowRequest) GetActiveOnly() bool { if x != nil { - return x.AllowPartial + return x.ActiveOnly } return false } -type RebuildKeyspaceGraphResponse struct { +type GetWorkflowsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + ClusterIds []string `protobuf:"bytes,1,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // ActiveOnly specifies whether to return workflows that are currently + // active (running or paused) instead of all workflows. + ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + // Keyspaces is a list of keyspaces to restrict the workflow search to. Note + // that the keyspaces list applies across all cluster IDs in the request. + // + // If, for example, you have two clusters, each with a keyspace called "foo" + // and want the workflows from "foo" in cluster1 but not from cluster2, you + // must make two requests. + // + // Keyspaces and IgnoreKeyspaces are mutually-exclusive, and Keyspaces takes + // precedence; if Keyspaces is a non-empty list, then IgnoreKeyspaces is + // ignored completely. + Keyspaces []string `protobuf:"bytes,3,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + // IgnoreKeyspaces is a list of keyspaces to skip during the workflow + // search. It has the same semantics as the Keyspaces parameter, so refer to + // that documentation for more details. + IgnoreKeyspaces []string `protobuf:"bytes,4,rep,name=ignore_keyspaces,json=ignoreKeyspaces,proto3" json:"ignore_keyspaces,omitempty"` } -func (x *RebuildKeyspaceGraphResponse) Reset() { - *x = RebuildKeyspaceGraphResponse{} +func (x *GetWorkflowsRequest) Reset() { + *x = GetWorkflowsRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4047,13 +3978,13 @@ func (x *RebuildKeyspaceGraphResponse) Reset() { } } -func (x *RebuildKeyspaceGraphResponse) String() string { +func (x *GetWorkflowsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildKeyspaceGraphResponse) ProtoMessage() {} +func (*GetWorkflowsRequest) ProtoMessage() {} -func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { +func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4065,29 +3996,49 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead. -func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. +func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{68} } -func (x *RebuildKeyspaceGraphResponse) GetStatus() string { +func (x *GetWorkflowsRequest) GetClusterIds() []string { if x != nil { - return x.Status + return x.ClusterIds } - return "" + return nil } -type RefreshStateRequest struct { +func (x *GetWorkflowsRequest) GetActiveOnly() bool { + if x != nil { + return x.ActiveOnly + } + return false +} + +func (x *GetWorkflowsRequest) GetKeyspaces() []string { + if x != nil { + return x.Keyspaces + } + return nil +} + +func (x *GetWorkflowsRequest) GetIgnoreKeyspaces() []string { + if x != nil { + return x.IgnoreKeyspaces + } + return nil +} + +type GetWorkflowsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + WorkflowsByCluster map[string]*ClusterWorkflows `protobuf:"bytes,1,rep,name=workflows_by_cluster,json=workflowsByCluster,proto3" json:"workflows_by_cluster,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (x *RefreshStateRequest) Reset() { - *x = RefreshStateRequest{} +func (x *GetWorkflowsResponse) Reset() { + *x = GetWorkflowsResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4095,13 +4046,13 @@ func (x *RefreshStateRequest) Reset() { } } -func (x *RefreshStateRequest) String() string { +func (x *GetWorkflowsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshStateRequest) ProtoMessage() {} +func (*GetWorkflowsResponse) ProtoMessage() {} -func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { +func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4113,36 +4064,29 @@ func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead. -func (*RefreshStateRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. +func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{69} } -func (x *RefreshStateRequest) GetAlias() *topodata.TabletAlias { - if x != nil { - return x.Alias - } - return nil -} - -func (x *RefreshStateRequest) GetClusterIds() []string { +func (x *GetWorkflowsResponse) GetWorkflowsByCluster() map[string]*ClusterWorkflows { if x != nil { - return x.ClusterIds + return x.WorkflowsByCluster } return nil } -type RefreshStateResponse struct { +type LaunchSchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Request *vtctldata.LaunchSchemaMigrationRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` } -func (x *RefreshStateResponse) Reset() { - *x = RefreshStateResponse{} +func (x *LaunchSchemaMigrationRequest) Reset() { + *x = LaunchSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4150,13 +4094,13 @@ func (x *RefreshStateResponse) Reset() { } } -func (x *RefreshStateResponse) String() string { +func (x *LaunchSchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshStateResponse) ProtoMessage() {} +func (*LaunchSchemaMigrationRequest) ProtoMessage() {} -func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { +func (x *LaunchSchemaMigrationRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4168,76 +4112,40 @@ func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead. -func (*RefreshStateResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use LaunchSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*LaunchSchemaMigrationRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{70} } -func (x *RefreshStateResponse) GetStatus() string { +func (x *LaunchSchemaMigrationRequest) GetClusterId() string { if x != nil { - return x.Status + return x.ClusterId } return "" } -func (x *RefreshStateResponse) GetCluster() *Cluster { +func (x *LaunchSchemaMigrationRequest) GetRequest() *vtctldata.LaunchSchemaMigrationRequest { if x != nil { - return x.Cluster + return x.Request } return nil } -type ReloadSchemasRequest struct { +type PingTabletRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Keyspaces, if set, will reload schemas across one or more keyspaces. A - // keyspace not existing in a cluster will not fail the overall request. - // - // Superceded by KeyspaceShards and Tablets, in that order. - Keyspaces []string `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` - // KeyspaceShards, if set, will reload schemas across one or more shards. - // Each element must be a valid keyspace/shard according to - // topoproto.ParseKeyspaceShard. A shard not existing in a cluster will not - // fail the overall request. - // - // Supercedes Keyspaces, and is superceded by Tablets. - KeyspaceShards []string `protobuf:"bytes,2,rep,name=keyspace_shards,json=keyspaceShards,proto3" json:"keyspace_shards,omitempty"` - // Tablets, if set will reload schemas across one or more tablets. - // Supercedes both Keyspaces and KeyspaceShards. - Tablets []*topodata.TabletAlias `protobuf:"bytes,3,rep,name=tablets,proto3" json:"tablets,omitempty"` - // ClusterIds optionally restricts the reload operation to clusters with - // the specified IDs. An empty list of ClusterIds will operate on all - // clusters. - ClusterIds []string `protobuf:"bytes,4,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` - // Concurrency controls the number of tablets to reload at any given time. - // Its semantics depend on whether the request is for keyspace, shard, or - // tablet mode. - // - // In Keyspaces mode, Concurrency is the number of tablets to reload at once - // *per keyspace*. - // - // In KeyspaceShards mode, Concurrency is the number of tablets to reload at - // once *per shard*. - // - // In Tablets mode, Concurrency is the number of tablets to reload at once - // *per cluster*. - Concurrency uint32 `protobuf:"varint,5,opt,name=concurrency,proto3" json:"concurrency,omitempty"` - // WaitPosition is the replication position that replicating tablets should - // reach prior to reloading their schemas. - // - // Does not apply in Tablets mode. - WaitPosition string `protobuf:"bytes,6,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` - // IncludePrimary, if set, will reload the schemas on PRIMARY tablets as - // well as REPLICA and RDONLY. - // - // Does not apply in Tablets mode. - IncludePrimary bool `protobuf:"varint,7,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` + // Unique (per cluster) tablet alias of the standard form: "$cell-$uid" + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + // ClusterIDs is an optional parameter to narrow the scope of the search, if + // the caller knows which cluster the tablet may be in, or, to disambiguate + // if multiple clusters have a tablet with the same hostname. + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *ReloadSchemasRequest) Reset() { - *x = ReloadSchemasRequest{} +func (x *PingTabletRequest) Reset() { + *x = PingTabletRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4245,13 +4153,13 @@ func (x *ReloadSchemasRequest) Reset() { } } -func (x *ReloadSchemasRequest) String() string { +func (x *PingTabletRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemasRequest) ProtoMessage() {} +func (*PingTabletRequest) ProtoMessage() {} -func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message { +func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4263,81 +4171,36 @@ func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemasRequest.ProtoReflect.Descriptor instead. -func (*ReloadSchemasRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead. +func (*PingTabletRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{71} } -func (x *ReloadSchemasRequest) GetKeyspaces() []string { +func (x *PingTabletRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspaces + return x.Alias } return nil } -func (x *ReloadSchemasRequest) GetKeyspaceShards() []string { +func (x *PingTabletRequest) GetClusterIds() []string { if x != nil { - return x.KeyspaceShards + return x.ClusterIds } return nil } -func (x *ReloadSchemasRequest) GetTablets() []*topodata.TabletAlias { - if x != nil { - return x.Tablets - } - return nil -} +type PingTabletResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (x *ReloadSchemasRequest) GetClusterIds() []string { - if x != nil { - return x.ClusterIds - } - return nil -} - -func (x *ReloadSchemasRequest) GetConcurrency() uint32 { - if x != nil { - return x.Concurrency - } - return 0 -} - -func (x *ReloadSchemasRequest) GetWaitPosition() string { - if x != nil { - return x.WaitPosition - } - return "" -} - -func (x *ReloadSchemasRequest) GetIncludePrimary() bool { - if x != nil { - return x.IncludePrimary - } - return false -} - -type ReloadSchemasResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // KeyspaceResults is the list of KeyspaceResult objects for a ReloadSchemas - // operation. It is only set when the request mandates Keyspaces mode (see - // ReloadSchemasRequest). - KeyspaceResults []*ReloadSchemasResponse_KeyspaceResult `protobuf:"bytes,1,rep,name=keyspace_results,json=keyspaceResults,proto3" json:"keyspace_results,omitempty"` - // ShardResults is the list of ShardResult objects for a ReloadSchemas - // operation. It is only set when the request mandates KeyspaceShards mode - // (see ReloadSchemasRequest). - ShardResults []*ReloadSchemasResponse_ShardResult `protobuf:"bytes,2,rep,name=shard_results,json=shardResults,proto3" json:"shard_results,omitempty"` - // TabletResults is the list of TabletResult objects for a ReloadSchemas - // operation. It is only set when the request mandates Tablets mode (see - // ReloadSchemasRequest). - TabletResults []*ReloadSchemasResponse_TabletResult `protobuf:"bytes,3,rep,name=tablet_results,json=tabletResults,proto3" json:"tablet_results,omitempty"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` } -func (x *ReloadSchemasResponse) Reset() { - *x = ReloadSchemasResponse{} +func (x *PingTabletResponse) Reset() { + *x = PingTabletResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4345,13 +4208,13 @@ func (x *ReloadSchemasResponse) Reset() { } } -func (x *ReloadSchemasResponse) String() string { +func (x *PingTabletResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemasResponse) ProtoMessage() {} +func (*PingTabletResponse) ProtoMessage() {} -func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message { +func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4363,47 +4226,36 @@ func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemasResponse.ProtoReflect.Descriptor instead. -func (*ReloadSchemasResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead. +func (*PingTabletResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{72} } -func (x *ReloadSchemasResponse) GetKeyspaceResults() []*ReloadSchemasResponse_KeyspaceResult { - if x != nil { - return x.KeyspaceResults - } - return nil -} - -func (x *ReloadSchemasResponse) GetShardResults() []*ReloadSchemasResponse_ShardResult { +func (x *PingTabletResponse) GetStatus() string { if x != nil { - return x.ShardResults + return x.Status } - return nil + return "" } -func (x *ReloadSchemasResponse) GetTabletResults() []*ReloadSchemasResponse_TabletResult { +func (x *PingTabletResponse) GetCluster() *Cluster { if x != nil { - return x.TabletResults + return x.Cluster } return nil } -type ReloadSchemaShardRequest struct { +type PlannedFailoverShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - WaitPosition string `protobuf:"bytes,4,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` - IncludePrimary bool `protobuf:"varint,5,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` - Concurrency uint32 `protobuf:"varint,6,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Options *vtctldata.PlannedReparentShardRequest `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (x *ReloadSchemaShardRequest) Reset() { - *x = ReloadSchemaShardRequest{} +func (x *PlannedFailoverShardRequest) Reset() { + *x = PlannedFailoverShardRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4411,13 +4263,13 @@ func (x *ReloadSchemaShardRequest) Reset() { } } -func (x *ReloadSchemaShardRequest) String() string { +func (x *PlannedFailoverShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaShardRequest) ProtoMessage() {} +func (*PlannedFailoverShardRequest) ProtoMessage() {} -func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { +func (x *PlannedFailoverShardRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4429,63 +4281,43 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead. -func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use PlannedFailoverShardRequest.ProtoReflect.Descriptor instead. +func (*PlannedFailoverShardRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{73} } -func (x *ReloadSchemaShardRequest) GetClusterId() string { +func (x *PlannedFailoverShardRequest) GetClusterId() string { if x != nil { return x.ClusterId } return "" } -func (x *ReloadSchemaShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *ReloadSchemaShardRequest) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *ReloadSchemaShardRequest) GetWaitPosition() string { - if x != nil { - return x.WaitPosition - } - return "" -} - -func (x *ReloadSchemaShardRequest) GetIncludePrimary() bool { - if x != nil { - return x.IncludePrimary - } - return false -} - -func (x *ReloadSchemaShardRequest) GetConcurrency() uint32 { +func (x *PlannedFailoverShardRequest) GetOptions() *vtctldata.PlannedReparentShardRequest { if x != nil { - return x.Concurrency + return x.Options } - return 0 + return nil } -type ReloadSchemaShardResponse struct { +type PlannedFailoverShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + // PromotedPrimary is the tablet alias that was promoted to shard primary. + // If NewPrimary was set in the request options, then this will be the + // same tablet alias. Otherwise, it will be the alias of the tablet found + // to be most up-to-date in the shard. + PromotedPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=promoted_primary,json=promotedPrimary,proto3" json:"promoted_primary,omitempty"` + Events []*logutil.Event `protobuf:"bytes,5,rep,name=events,proto3" json:"events,omitempty"` } -func (x *ReloadSchemaShardResponse) Reset() { - *x = ReloadSchemaShardResponse{} +func (x *PlannedFailoverShardResponse) Reset() { + *x = PlannedFailoverShardResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4493,13 +4325,13 @@ func (x *ReloadSchemaShardResponse) Reset() { } } -func (x *ReloadSchemaShardResponse) String() string { +func (x *PlannedFailoverShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemaShardResponse) ProtoMessage() {} +func (*PlannedFailoverShardResponse) ProtoMessage() {} -func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { +func (x *PlannedFailoverShardResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4511,29 +4343,59 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead. -func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use PlannedFailoverShardResponse.ProtoReflect.Descriptor instead. +func (*PlannedFailoverShardResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{74} } -func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event { +func (x *PlannedFailoverShardResponse) GetCluster() *Cluster { + if x != nil { + return x.Cluster + } + return nil +} + +func (x *PlannedFailoverShardResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *PlannedFailoverShardResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *PlannedFailoverShardResponse) GetPromotedPrimary() *topodata.TabletAlias { + if x != nil { + return x.PromotedPrimary + } + return nil +} + +func (x *PlannedFailoverShardResponse) GetEvents() []*logutil.Event { if x != nil { return x.Events } return nil } -type RefreshTabletReplicationSourceRequest struct { +type RebuildKeyspaceGraphRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` + AllowPartial bool `protobuf:"varint,4,opt,name=allow_partial,json=allowPartial,proto3" json:"allow_partial,omitempty"` } -func (x *RefreshTabletReplicationSourceRequest) Reset() { - *x = RefreshTabletReplicationSourceRequest{} +func (x *RebuildKeyspaceGraphRequest) Reset() { + *x = RebuildKeyspaceGraphRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4541,13 +4403,13 @@ func (x *RefreshTabletReplicationSourceRequest) Reset() { } } -func (x *RefreshTabletReplicationSourceRequest) String() string { +func (x *RebuildKeyspaceGraphRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshTabletReplicationSourceRequest) ProtoMessage() {} +func (*RebuildKeyspaceGraphRequest) ProtoMessage() {} -func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Message { +func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4559,38 +4421,49 @@ func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use RefreshTabletReplicationSourceRequest.ProtoReflect.Descriptor instead. -func (*RefreshTabletReplicationSourceRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead. +func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{75} } -func (x *RefreshTabletReplicationSourceRequest) GetAlias() *topodata.TabletAlias { +func (x *RebuildKeyspaceGraphRequest) GetClusterId() string { if x != nil { - return x.Alias + return x.ClusterId } - return nil + return "" } -func (x *RefreshTabletReplicationSourceRequest) GetClusterIds() []string { +func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string { if x != nil { - return x.ClusterIds + return x.Keyspace + } + return "" +} + +func (x *RebuildKeyspaceGraphRequest) GetCells() []string { + if x != nil { + return x.Cells } return nil } -type RefreshTabletReplicationSourceResponse struct { +func (x *RebuildKeyspaceGraphRequest) GetAllowPartial() bool { + if x != nil { + return x.AllowPartial + } + return false +} + +type RebuildKeyspaceGraphResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` - Primary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"` - Cluster *Cluster `protobuf:"bytes,4,opt,name=cluster,proto3" json:"cluster,omitempty"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (x *RefreshTabletReplicationSourceResponse) Reset() { - *x = RefreshTabletReplicationSourceResponse{} +func (x *RebuildKeyspaceGraphResponse) Reset() { + *x = RebuildKeyspaceGraphResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4598,13 +4471,13 @@ func (x *RefreshTabletReplicationSourceResponse) Reset() { } } -func (x *RefreshTabletReplicationSourceResponse) String() string { +func (x *RebuildKeyspaceGraphResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshTabletReplicationSourceResponse) ProtoMessage() {} +func (*RebuildKeyspaceGraphResponse) ProtoMessage() {} -func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Message { +func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4616,53 +4489,29 @@ func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use RefreshTabletReplicationSourceResponse.ProtoReflect.Descriptor instead. -func (*RefreshTabletReplicationSourceResponse) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{76} -} - -func (x *RefreshTabletReplicationSourceResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" +// Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead. +func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{76} } -func (x *RefreshTabletReplicationSourceResponse) GetShard() string { +func (x *RebuildKeyspaceGraphResponse) GetStatus() string { if x != nil { - return x.Shard + return x.Status } return "" } -func (x *RefreshTabletReplicationSourceResponse) GetPrimary() *topodata.TabletAlias { - if x != nil { - return x.Primary - } - return nil -} - -func (x *RefreshTabletReplicationSourceResponse) GetCluster() *Cluster { - if x != nil { - return x.Cluster - } - return nil -} - -type RemoveKeyspaceCellRequest struct { +type RefreshStateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` - Recursive bool `protobuf:"varint,5,opt,name=recursive,proto3" json:"recursive,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *RemoveKeyspaceCellRequest) Reset() { - *x = RemoveKeyspaceCellRequest{} +func (x *RefreshStateRequest) Reset() { + *x = RefreshStateRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4670,13 +4519,13 @@ func (x *RemoveKeyspaceCellRequest) Reset() { } } -func (x *RemoveKeyspaceCellRequest) String() string { +func (x *RefreshStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveKeyspaceCellRequest) ProtoMessage() {} +func (*RefreshStateRequest) ProtoMessage() {} -func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { +func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4688,56 +4537,36 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. -func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead. +func (*RefreshStateRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{77} } -func (x *RemoveKeyspaceCellRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *RemoveKeyspaceCellRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *RemoveKeyspaceCellRequest) GetCell() string { - if x != nil { - return x.Cell - } - return "" -} - -func (x *RemoveKeyspaceCellRequest) GetForce() bool { +func (x *RefreshStateRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.Force + return x.Alias } - return false + return nil } -func (x *RemoveKeyspaceCellRequest) GetRecursive() bool { +func (x *RefreshStateRequest) GetClusterIds() []string { if x != nil { - return x.Recursive + return x.ClusterIds } - return false + return nil } -type RemoveKeyspaceCellResponse struct { +type RefreshStateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` } -func (x *RemoveKeyspaceCellResponse) Reset() { - *x = RemoveKeyspaceCellResponse{} +func (x *RefreshStateResponse) Reset() { + *x = RefreshStateResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4745,13 +4574,13 @@ func (x *RemoveKeyspaceCellResponse) Reset() { } } -func (x *RemoveKeyspaceCellResponse) String() string { +func (x *RefreshStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveKeyspaceCellResponse) ProtoMessage() {} +func (*RefreshStateResponse) ProtoMessage() {} -func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { +func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4763,29 +4592,76 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. -func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead. +func (*RefreshStateResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{78} } -func (x *RemoveKeyspaceCellResponse) GetStatus() string { +func (x *RefreshStateResponse) GetStatus() string { if x != nil { return x.Status } return "" } -type RunHealthCheckRequest struct { +func (x *RefreshStateResponse) GetCluster() *Cluster { + if x != nil { + return x.Cluster + } + return nil +} + +type ReloadSchemasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Keyspaces, if set, will reload schemas across one or more keyspaces. A + // keyspace not existing in a cluster will not fail the overall request. + // + // Superceded by KeyspaceShards and Tablets, in that order. + Keyspaces []string `protobuf:"bytes,1,rep,name=keyspaces,proto3" json:"keyspaces,omitempty"` + // KeyspaceShards, if set, will reload schemas across one or more shards. + // Each element must be a valid keyspace/shard according to + // topoproto.ParseKeyspaceShard. A shard not existing in a cluster will not + // fail the overall request. + // + // Supercedes Keyspaces, and is superceded by Tablets. + KeyspaceShards []string `protobuf:"bytes,2,rep,name=keyspace_shards,json=keyspaceShards,proto3" json:"keyspace_shards,omitempty"` + // Tablets, if set will reload schemas across one or more tablets. + // Supercedes both Keyspaces and KeyspaceShards. + Tablets []*topodata.TabletAlias `protobuf:"bytes,3,rep,name=tablets,proto3" json:"tablets,omitempty"` + // ClusterIds optionally restricts the reload operation to clusters with + // the specified IDs. An empty list of ClusterIds will operate on all + // clusters. + ClusterIds []string `protobuf:"bytes,4,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + // Concurrency controls the number of tablets to reload at any given time. + // Its semantics depend on whether the request is for keyspace, shard, or + // tablet mode. + // + // In Keyspaces mode, Concurrency is the number of tablets to reload at once + // *per keyspace*. + // + // In KeyspaceShards mode, Concurrency is the number of tablets to reload at + // once *per shard*. + // + // In Tablets mode, Concurrency is the number of tablets to reload at once + // *per cluster*. + Concurrency int32 `protobuf:"varint,5,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + // WaitPosition is the replication position that replicating tablets should + // reach prior to reloading their schemas. + // + // Does not apply in Tablets mode. + WaitPosition string `protobuf:"bytes,6,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` + // IncludePrimary, if set, will reload the schemas on PRIMARY tablets as + // well as REPLICA and RDONLY. + // + // Does not apply in Tablets mode. + IncludePrimary bool `protobuf:"varint,7,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` } -func (x *RunHealthCheckRequest) Reset() { - *x = RunHealthCheckRequest{} +func (x *ReloadSchemasRequest) Reset() { + *x = ReloadSchemasRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4793,13 +4669,13 @@ func (x *RunHealthCheckRequest) Reset() { } } -func (x *RunHealthCheckRequest) String() string { +func (x *ReloadSchemasRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RunHealthCheckRequest) ProtoMessage() {} +func (*ReloadSchemasRequest) ProtoMessage() {} -func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { +func (x *ReloadSchemasRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4811,36 +4687,81 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead. -func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use ReloadSchemasRequest.ProtoReflect.Descriptor instead. +func (*ReloadSchemasRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{79} } -func (x *RunHealthCheckRequest) GetAlias() *topodata.TabletAlias { +func (x *ReloadSchemasRequest) GetKeyspaces() []string { if x != nil { - return x.Alias + return x.Keyspaces } return nil } -func (x *RunHealthCheckRequest) GetClusterIds() []string { +func (x *ReloadSchemasRequest) GetKeyspaceShards() []string { + if x != nil { + return x.KeyspaceShards + } + return nil +} + +func (x *ReloadSchemasRequest) GetTablets() []*topodata.TabletAlias { + if x != nil { + return x.Tablets + } + return nil +} + +func (x *ReloadSchemasRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -type RunHealthCheckResponse struct { +func (x *ReloadSchemasRequest) GetConcurrency() int32 { + if x != nil { + return x.Concurrency + } + return 0 +} + +func (x *ReloadSchemasRequest) GetWaitPosition() string { + if x != nil { + return x.WaitPosition + } + return "" +} + +func (x *ReloadSchemasRequest) GetIncludePrimary() bool { + if x != nil { + return x.IncludePrimary + } + return false +} + +type ReloadSchemasResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + // KeyspaceResults is the list of KeyspaceResult objects for a ReloadSchemas + // operation. It is only set when the request mandates Keyspaces mode (see + // ReloadSchemasRequest). + KeyspaceResults []*ReloadSchemasResponse_KeyspaceResult `protobuf:"bytes,1,rep,name=keyspace_results,json=keyspaceResults,proto3" json:"keyspace_results,omitempty"` + // ShardResults is the list of ShardResult objects for a ReloadSchemas + // operation. It is only set when the request mandates KeyspaceShards mode + // (see ReloadSchemasRequest). + ShardResults []*ReloadSchemasResponse_ShardResult `protobuf:"bytes,2,rep,name=shard_results,json=shardResults,proto3" json:"shard_results,omitempty"` + // TabletResults is the list of TabletResult objects for a ReloadSchemas + // operation. It is only set when the request mandates Tablets mode (see + // ReloadSchemasRequest). + TabletResults []*ReloadSchemasResponse_TabletResult `protobuf:"bytes,3,rep,name=tablet_results,json=tabletResults,proto3" json:"tablet_results,omitempty"` } -func (x *RunHealthCheckResponse) Reset() { - *x = RunHealthCheckResponse{} +func (x *ReloadSchemasResponse) Reset() { + *x = ReloadSchemasResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4848,13 +4769,13 @@ func (x *RunHealthCheckResponse) Reset() { } } -func (x *RunHealthCheckResponse) String() string { +func (x *ReloadSchemasResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RunHealthCheckResponse) ProtoMessage() {} +func (*ReloadSchemasResponse) ProtoMessage() {} -func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { +func (x *ReloadSchemasResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4866,36 +4787,47 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead. -func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use ReloadSchemasResponse.ProtoReflect.Descriptor instead. +func (*ReloadSchemasResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{80} } -func (x *RunHealthCheckResponse) GetStatus() string { +func (x *ReloadSchemasResponse) GetKeyspaceResults() []*ReloadSchemasResponse_KeyspaceResult { if x != nil { - return x.Status + return x.KeyspaceResults } - return "" + return nil } -func (x *RunHealthCheckResponse) GetCluster() *Cluster { +func (x *ReloadSchemasResponse) GetShardResults() []*ReloadSchemasResponse_ShardResult { if x != nil { - return x.Cluster + return x.ShardResults } return nil } -type SetReadOnlyRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` +func (x *ReloadSchemasResponse) GetTabletResults() []*ReloadSchemasResponse_TabletResult { + if x != nil { + return x.TabletResults + } + return nil } -func (x *SetReadOnlyRequest) Reset() { - *x = SetReadOnlyRequest{} +type ReloadSchemaShardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + WaitPosition string `protobuf:"bytes,4,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` + IncludePrimary bool `protobuf:"varint,5,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` + Concurrency int32 `protobuf:"varint,6,opt,name=concurrency,proto3" json:"concurrency,omitempty"` +} + +func (x *ReloadSchemaShardRequest) Reset() { + *x = ReloadSchemaShardRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4903,13 +4835,13 @@ func (x *SetReadOnlyRequest) Reset() { } } -func (x *SetReadOnlyRequest) String() string { +func (x *ReloadSchemaShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetReadOnlyRequest) ProtoMessage() {} +func (*ReloadSchemaShardRequest) ProtoMessage() {} -func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message { +func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4921,33 +4853,63 @@ func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetReadOnlyRequest.ProtoReflect.Descriptor instead. -func (*SetReadOnlyRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead. +func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{81} } -func (x *SetReadOnlyRequest) GetAlias() *topodata.TabletAlias { +func (x *ReloadSchemaShardRequest) GetClusterId() string { if x != nil { - return x.Alias + return x.ClusterId } - return nil + return "" } -func (x *SetReadOnlyRequest) GetClusterIds() []string { +func (x *ReloadSchemaShardRequest) GetKeyspace() string { if x != nil { - return x.ClusterIds + return x.Keyspace } - return nil + return "" } -type SetReadOnlyResponse struct { +func (x *ReloadSchemaShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *ReloadSchemaShardRequest) GetWaitPosition() string { + if x != nil { + return x.WaitPosition + } + return "" +} + +func (x *ReloadSchemaShardRequest) GetIncludePrimary() bool { + if x != nil { + return x.IncludePrimary + } + return false +} + +func (x *ReloadSchemaShardRequest) GetConcurrency() int32 { + if x != nil { + return x.Concurrency + } + return 0 +} + +type ReloadSchemaShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Events []*logutil.Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` } -func (x *SetReadOnlyResponse) Reset() { - *x = SetReadOnlyResponse{} +func (x *ReloadSchemaShardResponse) Reset() { + *x = ReloadSchemaShardResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4955,13 +4917,13 @@ func (x *SetReadOnlyResponse) Reset() { } } -func (x *SetReadOnlyResponse) String() string { +func (x *ReloadSchemaShardResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetReadOnlyResponse) ProtoMessage() {} +func (*ReloadSchemaShardResponse) ProtoMessage() {} -func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message { +func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4973,12 +4935,19 @@ func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetReadOnlyResponse.ProtoReflect.Descriptor instead. -func (*SetReadOnlyResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead. +func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{82} } -type SetReadWriteRequest struct { +func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +type RefreshTabletReplicationSourceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -4987,8 +4956,8 @@ type SetReadWriteRequest struct { ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *SetReadWriteRequest) Reset() { - *x = SetReadWriteRequest{} +func (x *RefreshTabletReplicationSourceRequest) Reset() { + *x = RefreshTabletReplicationSourceRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -4996,13 +4965,13 @@ func (x *SetReadWriteRequest) Reset() { } } -func (x *SetReadWriteRequest) String() string { +func (x *RefreshTabletReplicationSourceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetReadWriteRequest) ProtoMessage() {} +func (*RefreshTabletReplicationSourceRequest) ProtoMessage() {} -func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message { +func (x *RefreshTabletReplicationSourceRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5014,33 +4983,38 @@ func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetReadWriteRequest.ProtoReflect.Descriptor instead. -func (*SetReadWriteRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RefreshTabletReplicationSourceRequest.ProtoReflect.Descriptor instead. +func (*RefreshTabletReplicationSourceRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{83} } -func (x *SetReadWriteRequest) GetAlias() *topodata.TabletAlias { +func (x *RefreshTabletReplicationSourceRequest) GetAlias() *topodata.TabletAlias { if x != nil { return x.Alias } return nil } -func (x *SetReadWriteRequest) GetClusterIds() []string { +func (x *RefreshTabletReplicationSourceRequest) GetClusterIds() []string { if x != nil { return x.ClusterIds } return nil } -type SetReadWriteResponse struct { +type RefreshTabletReplicationSourceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + Primary *topodata.TabletAlias `protobuf:"bytes,3,opt,name=primary,proto3" json:"primary,omitempty"` + Cluster *Cluster `protobuf:"bytes,4,opt,name=cluster,proto3" json:"cluster,omitempty"` } -func (x *SetReadWriteResponse) Reset() { - *x = SetReadWriteResponse{} +func (x *RefreshTabletReplicationSourceResponse) Reset() { + *x = RefreshTabletReplicationSourceResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5048,13 +5022,13 @@ func (x *SetReadWriteResponse) Reset() { } } -func (x *SetReadWriteResponse) String() string { +func (x *RefreshTabletReplicationSourceResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetReadWriteResponse) ProtoMessage() {} +func (*RefreshTabletReplicationSourceResponse) ProtoMessage() {} -func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message { +func (x *RefreshTabletReplicationSourceResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5066,22 +5040,53 @@ func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SetReadWriteResponse.ProtoReflect.Descriptor instead. -func (*SetReadWriteResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use RefreshTabletReplicationSourceResponse.ProtoReflect.Descriptor instead. +func (*RefreshTabletReplicationSourceResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{84} } -type StartReplicationRequest struct { +func (x *RefreshTabletReplicationSourceResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *RefreshTabletReplicationSourceResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *RefreshTabletReplicationSourceResponse) GetPrimary() *topodata.TabletAlias { + if x != nil { + return x.Primary + } + return nil +} + +func (x *RefreshTabletReplicationSourceResponse) GetCluster() *Cluster { + if x != nil { + return x.Cluster + } + return nil +} + +type RemoveKeyspaceCellRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Cell string `protobuf:"bytes,3,opt,name=cell,proto3" json:"cell,omitempty"` + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` + Recursive bool `protobuf:"varint,5,opt,name=recursive,proto3" json:"recursive,omitempty"` } -func (x *StartReplicationRequest) Reset() { - *x = StartReplicationRequest{} +func (x *RemoveKeyspaceCellRequest) Reset() { + *x = RemoveKeyspaceCellRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5089,13 +5094,13 @@ func (x *StartReplicationRequest) Reset() { } } -func (x *StartReplicationRequest) String() string { +func (x *RemoveKeyspaceCellRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StartReplicationRequest) ProtoMessage() {} +func (*RemoveKeyspaceCellRequest) ProtoMessage() {} -func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { +func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5107,36 +5112,56 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. -func (*StartReplicationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. +func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{85} } -func (x *StartReplicationRequest) GetAlias() *topodata.TabletAlias { +func (x *RemoveKeyspaceCellRequest) GetClusterId() string { if x != nil { - return x.Alias + return x.ClusterId } - return nil + return "" } -func (x *StartReplicationRequest) GetClusterIds() []string { +func (x *RemoveKeyspaceCellRequest) GetKeyspace() string { if x != nil { - return x.ClusterIds + return x.Keyspace } - return nil + return "" } -type StartReplicationResponse struct { +func (x *RemoveKeyspaceCellRequest) GetCell() string { + if x != nil { + return x.Cell + } + return "" +} + +func (x *RemoveKeyspaceCellRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *RemoveKeyspaceCellRequest) GetRecursive() bool { + if x != nil { + return x.Recursive + } + return false +} + +type RemoveKeyspaceCellResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -func (x *StartReplicationResponse) Reset() { - *x = StartReplicationResponse{} +func (x *RemoveKeyspaceCellResponse) Reset() { + *x = RemoveKeyspaceCellResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5144,13 +5169,13 @@ func (x *StartReplicationResponse) Reset() { } } -func (x *StartReplicationResponse) String() string { +func (x *RemoveKeyspaceCellResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StartReplicationResponse) ProtoMessage() {} +func (*RemoveKeyspaceCellResponse) ProtoMessage() {} -func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { +func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5162,36 +5187,29 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. -func (*StartReplicationResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. +func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{86} } -func (x *StartReplicationResponse) GetStatus() string { +func (x *RemoveKeyspaceCellResponse) GetStatus() string { if x != nil { return x.Status } return "" } -func (x *StartReplicationResponse) GetCluster() *Cluster { - if x != nil { - return x.Cluster - } - return nil -} - -type StopReplicationRequest struct { +type RetrySchemaMigrationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Request *vtctldata.RetrySchemaMigrationRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` } -func (x *StopReplicationRequest) Reset() { - *x = StopReplicationRequest{} +func (x *RetrySchemaMigrationRequest) Reset() { + *x = RetrySchemaMigrationRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5199,13 +5217,13 @@ func (x *StopReplicationRequest) Reset() { } } -func (x *StopReplicationRequest) String() string { +func (x *RetrySchemaMigrationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StopReplicationRequest) ProtoMessage() {} +func (*RetrySchemaMigrationRequest) ProtoMessage() {} -func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { +func (x *RetrySchemaMigrationRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5217,36 +5235,36 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. -func (*StopReplicationRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RetrySchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*RetrySchemaMigrationRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{87} } -func (x *StopReplicationRequest) GetAlias() *topodata.TabletAlias { +func (x *RetrySchemaMigrationRequest) GetClusterId() string { if x != nil { - return x.Alias + return x.ClusterId } - return nil + return "" } -func (x *StopReplicationRequest) GetClusterIds() []string { +func (x *RetrySchemaMigrationRequest) GetRequest() *vtctldata.RetrySchemaMigrationRequest { if x != nil { - return x.ClusterIds + return x.Request } return nil } -type StopReplicationResponse struct { +type RunHealthCheckRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *StopReplicationResponse) Reset() { - *x = StopReplicationResponse{} +func (x *RunHealthCheckRequest) Reset() { + *x = RunHealthCheckRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5254,13 +5272,13 @@ func (x *StopReplicationResponse) Reset() { } } -func (x *StopReplicationResponse) String() string { +func (x *RunHealthCheckRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StopReplicationResponse) ProtoMessage() {} +func (*RunHealthCheckRequest) ProtoMessage() {} -func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { +func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[88] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5272,38 +5290,36 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. -func (*StopReplicationResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead. +func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{88} } -func (x *StopReplicationResponse) GetStatus() string { +func (x *RunHealthCheckRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.Status + return x.Alias } - return "" + return nil } -func (x *StopReplicationResponse) GetCluster() *Cluster { +func (x *RunHealthCheckRequest) GetClusterIds() []string { if x != nil { - return x.Cluster + return x.ClusterIds } return nil } -type TabletExternallyPromotedRequest struct { +type RunHealthCheckResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Tablet is the alias of the tablet that was promoted externally and should - // be updated to the shard primary in the topo. - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` } -func (x *TabletExternallyPromotedRequest) Reset() { - *x = TabletExternallyPromotedRequest{} +func (x *RunHealthCheckResponse) Reset() { + *x = RunHealthCheckResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5311,13 +5327,13 @@ func (x *TabletExternallyPromotedRequest) Reset() { } } -func (x *TabletExternallyPromotedRequest) String() string { +func (x *RunHealthCheckResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TabletExternallyPromotedRequest) ProtoMessage() {} +func (*RunHealthCheckResponse) ProtoMessage() {} -func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message { +func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5329,39 +5345,36 @@ func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TabletExternallyPromotedRequest.ProtoReflect.Descriptor instead. -func (*TabletExternallyPromotedRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead. +func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{89} } -func (x *TabletExternallyPromotedRequest) GetAlias() *topodata.TabletAlias { +func (x *RunHealthCheckResponse) GetStatus() string { if x != nil { - return x.Alias + return x.Status } - return nil + return "" } -func (x *TabletExternallyPromotedRequest) GetClusterIds() []string { +func (x *RunHealthCheckResponse) GetCluster() *Cluster { if x != nil { - return x.ClusterIds + return x.Cluster } return nil } -type TabletExternallyPromotedResponse struct { +type SetReadOnlyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - NewPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` - OldPrimary *topodata.TabletAlias `protobuf:"bytes,5,opt,name=old_primary,json=oldPrimary,proto3" json:"old_primary,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *TabletExternallyPromotedResponse) Reset() { - *x = TabletExternallyPromotedResponse{} +func (x *SetReadOnlyRequest) Reset() { + *x = SetReadOnlyRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5369,13 +5382,13 @@ func (x *TabletExternallyPromotedResponse) Reset() { } } -func (x *TabletExternallyPromotedResponse) String() string { +func (x *SetReadOnlyRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TabletExternallyPromotedResponse) ProtoMessage() {} +func (*SetReadOnlyRequest) ProtoMessage() {} -func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message { +func (x *SetReadOnlyRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5387,57 +5400,33 @@ func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TabletExternallyPromotedResponse.ProtoReflect.Descriptor instead. -func (*TabletExternallyPromotedResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use SetReadOnlyRequest.ProtoReflect.Descriptor instead. +func (*SetReadOnlyRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{90} } -func (x *TabletExternallyPromotedResponse) GetCluster() *Cluster { - if x != nil { - return x.Cluster - } - return nil -} - -func (x *TabletExternallyPromotedResponse) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *TabletExternallyPromotedResponse) GetShard() string { - if x != nil { - return x.Shard - } - return "" -} - -func (x *TabletExternallyPromotedResponse) GetNewPrimary() *topodata.TabletAlias { +func (x *SetReadOnlyRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.NewPrimary + return x.Alias } return nil } -func (x *TabletExternallyPromotedResponse) GetOldPrimary() *topodata.TabletAlias { +func (x *SetReadOnlyRequest) GetClusterIds() []string { if x != nil { - return x.OldPrimary + return x.ClusterIds } return nil } -type TabletExternallyReparentedRequest struct { +type SetReadOnlyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` - ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *TabletExternallyReparentedRequest) Reset() { - *x = TabletExternallyReparentedRequest{} +func (x *SetReadOnlyResponse) Reset() { + *x = SetReadOnlyResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5445,13 +5434,13 @@ func (x *TabletExternallyReparentedRequest) Reset() { } } -func (x *TabletExternallyReparentedRequest) String() string { +func (x *SetReadOnlyResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TabletExternallyReparentedRequest) ProtoMessage() {} +func (*SetReadOnlyResponse) ProtoMessage() {} -func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { +func (x *SetReadOnlyResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5463,36 +5452,22 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. -func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use SetReadOnlyResponse.ProtoReflect.Descriptor instead. +func (*SetReadOnlyResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{91} } -func (x *TabletExternallyReparentedRequest) GetAlias() *topodata.TabletAlias { - if x != nil { - return x.Alias - } - return nil -} - -func (x *TabletExternallyReparentedRequest) GetClusterIds() []string { - if x != nil { - return x.ClusterIds - } - return nil -} - -type ValidateRequest struct { +type SetReadWriteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - PingTablets bool `protobuf:"varint,2,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *ValidateRequest) Reset() { - *x = ValidateRequest{} +func (x *SetReadWriteRequest) Reset() { + *x = SetReadWriteRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5500,13 +5475,13 @@ func (x *ValidateRequest) Reset() { } } -func (x *ValidateRequest) String() string { +func (x *SetReadWriteRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateRequest) ProtoMessage() {} +func (*SetReadWriteRequest) ProtoMessage() {} -func (x *ValidateRequest) ProtoReflect() protoreflect.Message { +func (x *SetReadWriteRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[92] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5518,37 +5493,33 @@ func (x *ValidateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead. -func (*ValidateRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use SetReadWriteRequest.ProtoReflect.Descriptor instead. +func (*SetReadWriteRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{92} } -func (x *ValidateRequest) GetClusterId() string { +func (x *SetReadWriteRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.ClusterId + return x.Alias } - return "" + return nil } -func (x *ValidateRequest) GetPingTablets() bool { +func (x *SetReadWriteRequest) GetClusterIds() []string { if x != nil { - return x.PingTablets + return x.ClusterIds } - return false + return nil } -type ValidateKeyspaceRequest struct { +type SetReadWriteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - PingTablets bool `protobuf:"varint,3,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` } -func (x *ValidateKeyspaceRequest) Reset() { - *x = ValidateKeyspaceRequest{} +func (x *SetReadWriteResponse) Reset() { + *x = SetReadWriteResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5556,13 +5527,13 @@ func (x *ValidateKeyspaceRequest) Reset() { } } -func (x *ValidateKeyspaceRequest) String() string { +func (x *SetReadWriteResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateKeyspaceRequest) ProtoMessage() {} +func (*SetReadWriteResponse) ProtoMessage() {} -func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { +func (x *SetReadWriteResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[93] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5574,43 +5545,22 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use SetReadWriteResponse.ProtoReflect.Descriptor instead. +func (*SetReadWriteResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{93} } -func (x *ValidateKeyspaceRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *ValidateKeyspaceRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *ValidateKeyspaceRequest) GetPingTablets() bool { - if x != nil { - return x.PingTablets - } - return false -} - -type ValidateSchemaKeyspaceRequest struct { +type StartReplicationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *ValidateSchemaKeyspaceRequest) Reset() { - *x = ValidateSchemaKeyspaceRequest{} +func (x *StartReplicationRequest) Reset() { + *x = StartReplicationRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5618,13 +5568,13 @@ func (x *ValidateSchemaKeyspaceRequest) Reset() { } } -func (x *ValidateSchemaKeyspaceRequest) String() string { +func (x *StartReplicationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {} +func (*StartReplicationRequest) ProtoMessage() {} -func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { +func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[94] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5636,38 +5586,36 @@ func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. +func (*StartReplicationRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{94} } -func (x *ValidateSchemaKeyspaceRequest) GetClusterId() string { +func (x *StartReplicationRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.ClusterId + return x.Alias } - return "" + return nil } -func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string { +func (x *StartReplicationRequest) GetClusterIds() []string { if x != nil { - return x.Keyspace + return x.ClusterIds } - return "" + return nil } -type ValidateShardRequest struct { +type StartReplicationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` - PingTablets bool `protobuf:"varint,4,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` } -func (x *ValidateShardRequest) Reset() { - *x = ValidateShardRequest{} +func (x *StartReplicationResponse) Reset() { + *x = StartReplicationResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5675,13 +5623,13 @@ func (x *ValidateShardRequest) Reset() { } } -func (x *ValidateShardRequest) String() string { +func (x *StartReplicationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateShardRequest) ProtoMessage() {} +func (*StartReplicationResponse) ProtoMessage() {} -func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { +func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[95] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5693,50 +5641,36 @@ func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead. -func (*ValidateShardRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. +func (*StartReplicationResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{95} } -func (x *ValidateShardRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *ValidateShardRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *ValidateShardRequest) GetShard() string { +func (x *StartReplicationResponse) GetStatus() string { if x != nil { - return x.Shard + return x.Status } return "" } -func (x *ValidateShardRequest) GetPingTablets() bool { +func (x *StartReplicationResponse) GetCluster() *Cluster { if x != nil { - return x.PingTablets + return x.Cluster } - return false + return nil } -type ValidateVersionKeyspaceRequest struct { +type StopReplicationRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *ValidateVersionKeyspaceRequest) Reset() { - *x = ValidateVersionKeyspaceRequest{} +func (x *StopReplicationRequest) Reset() { + *x = StopReplicationRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5744,13 +5678,13 @@ func (x *ValidateVersionKeyspaceRequest) Reset() { } } -func (x *ValidateVersionKeyspaceRequest) String() string { +func (x *StopReplicationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionKeyspaceRequest) ProtoMessage() {} +func (*StopReplicationRequest) ProtoMessage() {} -func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { +func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5762,37 +5696,36 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. +func (*StopReplicationRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{96} } -func (x *ValidateVersionKeyspaceRequest) GetClusterId() string { +func (x *StopReplicationRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.ClusterId + return x.Alias } - return "" + return nil } -func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string { +func (x *StopReplicationRequest) GetClusterIds() []string { if x != nil { - return x.Keyspace + return x.ClusterIds } - return "" + return nil } -type ValidateVersionShardRequest struct { +type StopReplicationResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` } -func (x *ValidateVersionShardRequest) Reset() { - *x = ValidateVersionShardRequest{} +func (x *StopReplicationResponse) Reset() { + *x = StopReplicationResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5800,13 +5733,13 @@ func (x *ValidateVersionShardRequest) Reset() { } } -func (x *ValidateVersionShardRequest) String() string { +func (x *StopReplicationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateVersionShardRequest) ProtoMessage() {} +func (*StopReplicationResponse) ProtoMessage() {} -func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { +func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5818,44 +5751,38 @@ func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead. -func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. +func (*StopReplicationResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{97} } -func (x *ValidateVersionShardRequest) GetClusterId() string { - if x != nil { - return x.ClusterId - } - return "" -} - -func (x *ValidateVersionShardRequest) GetKeyspace() string { +func (x *StopReplicationResponse) GetStatus() string { if x != nil { - return x.Keyspace + return x.Status } return "" } -func (x *ValidateVersionShardRequest) GetShard() string { +func (x *StopReplicationResponse) GetCluster() *Cluster { if x != nil { - return x.Shard + return x.Cluster } - return "" + return nil } -type VTExplainRequest struct { +type TabletExternallyPromotedRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` - Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"` + // Tablet is the alias of the tablet that was promoted externally and should + // be updated to the shard primary in the topo. + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *VTExplainRequest) Reset() { - *x = VTExplainRequest{} +func (x *TabletExternallyPromotedRequest) Reset() { + *x = TabletExternallyPromotedRequest{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5863,13 +5790,13 @@ func (x *VTExplainRequest) Reset() { } } -func (x *VTExplainRequest) String() string { +func (x *TabletExternallyPromotedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VTExplainRequest) ProtoMessage() {} +func (*TabletExternallyPromotedRequest) ProtoMessage() {} -func (x *VTExplainRequest) ProtoReflect() protoreflect.Message { +func (x *TabletExternallyPromotedRequest) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[98] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5881,42 +5808,39 @@ func (x *VTExplainRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VTExplainRequest.ProtoReflect.Descriptor instead. -func (*VTExplainRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use TabletExternallyPromotedRequest.ProtoReflect.Descriptor instead. +func (*TabletExternallyPromotedRequest) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{98} } -func (x *VTExplainRequest) GetCluster() string { - if x != nil { - return x.Cluster - } - return "" -} - -func (x *VTExplainRequest) GetKeyspace() string { +func (x *TabletExternallyPromotedRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.Alias } - return "" + return nil } -func (x *VTExplainRequest) GetSql() string { +func (x *TabletExternallyPromotedRequest) GetClusterIds() []string { if x != nil { - return x.Sql + return x.ClusterIds } - return "" + return nil } -type VTExplainResponse struct { +type TabletExternallyPromotedResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Response string `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + NewPrimary *topodata.TabletAlias `protobuf:"bytes,4,opt,name=new_primary,json=newPrimary,proto3" json:"new_primary,omitempty"` + OldPrimary *topodata.TabletAlias `protobuf:"bytes,5,opt,name=old_primary,json=oldPrimary,proto3" json:"old_primary,omitempty"` } -func (x *VTExplainResponse) Reset() { - *x = VTExplainResponse{} +func (x *TabletExternallyPromotedResponse) Reset() { + *x = TabletExternallyPromotedResponse{} if protoimpl.UnsafeEnabled { mi := &file_vtadmin_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5924,13 +5848,13 @@ func (x *VTExplainResponse) Reset() { } } -func (x *VTExplainResponse) String() string { +func (x *TabletExternallyPromotedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VTExplainResponse) ProtoMessage() {} +func (*TabletExternallyPromotedResponse) ProtoMessage() {} -func (x *VTExplainResponse) ProtoReflect() protoreflect.Message { +func (x *TabletExternallyPromotedResponse) ProtoReflect() protoreflect.Message { mi := &file_vtadmin_proto_msgTypes[99] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -5942,44 +5866,72 @@ func (x *VTExplainResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VTExplainResponse.ProtoReflect.Descriptor instead. -func (*VTExplainResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use TabletExternallyPromotedResponse.ProtoReflect.Descriptor instead. +func (*TabletExternallyPromotedResponse) Descriptor() ([]byte, []int) { return file_vtadmin_proto_rawDescGZIP(), []int{99} } -func (x *VTExplainResponse) GetResponse() string { +func (x *TabletExternallyPromotedResponse) GetCluster() *Cluster { if x != nil { - return x.Response + return x.Cluster + } + return nil +} + +func (x *TabletExternallyPromotedResponse) GetKeyspace() string { + if x != nil { + return x.Keyspace } return "" } -type Schema_ShardTableSize struct { +func (x *TabletExternallyPromotedResponse) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *TabletExternallyPromotedResponse) GetNewPrimary() *topodata.TabletAlias { + if x != nil { + return x.NewPrimary + } + return nil +} + +func (x *TabletExternallyPromotedResponse) GetOldPrimary() *topodata.TabletAlias { + if x != nil { + return x.OldPrimary + } + return nil +} + +type TabletExternallyReparentedRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RowCount uint64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` - DataLength uint64 `protobuf:"varint,2,opt,name=data_length,json=dataLength,proto3" json:"data_length,omitempty"` + Alias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=alias,proto3" json:"alias,omitempty"` + ClusterIds []string `protobuf:"bytes,2,rep,name=cluster_ids,json=clusterIds,proto3" json:"cluster_ids,omitempty"` } -func (x *Schema_ShardTableSize) Reset() { - *x = Schema_ShardTableSize{} +func (x *TabletExternallyReparentedRequest) Reset() { + *x = TabletExternallyReparentedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[103] + mi := &file_vtadmin_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Schema_ShardTableSize) String() string { +func (x *TabletExternallyReparentedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Schema_ShardTableSize) ProtoMessage() {} +func (*TabletExternallyReparentedRequest) ProtoMessage() {} -func (x *Schema_ShardTableSize) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[103] +func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[100] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5990,54 +5942,51 @@ func (x *Schema_ShardTableSize) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Schema_ShardTableSize.ProtoReflect.Descriptor instead. -func (*Schema_ShardTableSize) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{7, 1} +// Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. +func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{100} } -func (x *Schema_ShardTableSize) GetRowCount() uint64 { +func (x *TabletExternallyReparentedRequest) GetAlias() *topodata.TabletAlias { if x != nil { - return x.RowCount + return x.Alias } - return 0 + return nil } -func (x *Schema_ShardTableSize) GetDataLength() uint64 { +func (x *TabletExternallyReparentedRequest) GetClusterIds() []string { if x != nil { - return x.DataLength + return x.ClusterIds } - return 0 + return nil } -// TableSize aggregates table size information across all shards containing -// in the given keyspace and cluster, as well as per-shard size information. -type Schema_TableSize struct { +type ValidateRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - RowCount uint64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` - DataLength uint64 `protobuf:"varint,2,opt,name=data_length,json=dataLength,proto3" json:"data_length,omitempty"` - ByShard map[string]*Schema_ShardTableSize `protobuf:"bytes,3,rep,name=by_shard,json=byShard,proto3" json:"by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + PingTablets bool `protobuf:"varint,2,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` } -func (x *Schema_TableSize) Reset() { - *x = Schema_TableSize{} +func (x *ValidateRequest) Reset() { + *x = ValidateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[104] + mi := &file_vtadmin_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *Schema_TableSize) String() string { +func (x *ValidateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Schema_TableSize) ProtoMessage() {} +func (*ValidateRequest) ProtoMessage() {} -func (x *Schema_TableSize) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[104] +func (x *ValidateRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[101] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6048,64 +5997,114 @@ func (x *Schema_TableSize) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Schema_TableSize.ProtoReflect.Descriptor instead. -func (*Schema_TableSize) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{7, 2} +// Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead. +func (*ValidateRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{101} } -func (x *Schema_TableSize) GetRowCount() uint64 { +func (x *ValidateRequest) GetClusterId() string { if x != nil { - return x.RowCount + return x.ClusterId } - return 0 + return "" } -func (x *Schema_TableSize) GetDataLength() uint64 { +func (x *ValidateRequest) GetPingTablets() bool { if x != nil { - return x.DataLength + return x.PingTablets } - return 0 + return false } -func (x *Schema_TableSize) GetByShard() map[string]*Schema_ShardTableSize { +type ValidateKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + PingTablets bool `protobuf:"varint,3,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` +} + +func (x *ValidateKeyspaceRequest) Reset() { + *x = ValidateKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[102] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateKeyspaceRequest) ProtoMessage() {} + +func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[102] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{102} +} + +func (x *ValidateKeyspaceRequest) GetClusterId() string { if x != nil { - return x.ByShard + return x.ClusterId } - return nil + return "" } -// KeyspaceResult is a grouping of a Keyspace and any log events that -// occurred in that keyspace during a schema reload (usually associated with -// partial errors - ReloadSchemas requests are best-effort). -// -// It is only set when a ReloadSchemas request mandates Keyspaces mode -// (see ReloadSchemasRequest). -type ReloadSchemasResponse_KeyspaceResult struct { +func (x *ValidateKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateKeyspaceRequest) GetPingTablets() bool { + if x != nil { + return x.PingTablets + } + return false +} + +type ValidateSchemaKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Events []*logutil.Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *ReloadSchemasResponse_KeyspaceResult) Reset() { - *x = ReloadSchemasResponse_KeyspaceResult{} +func (x *ValidateSchemaKeyspaceRequest) Reset() { + *x = ValidateSchemaKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[108] + mi := &file_vtadmin_proto_msgTypes[103] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemasResponse_KeyspaceResult) String() string { +func (x *ValidateSchemaKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemasResponse_KeyspaceResult) ProtoMessage() {} +func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {} -func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[108] +func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[103] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6116,57 +6115,53 @@ func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemasResponse_KeyspaceResult.ProtoReflect.Descriptor instead. -func (*ReloadSchemasResponse_KeyspaceResult) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{72, 0} +// Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{103} } -func (x *ReloadSchemasResponse_KeyspaceResult) GetKeyspace() *Keyspace { +func (x *ValidateSchemaKeyspaceRequest) GetClusterId() string { if x != nil { - return x.Keyspace + return x.ClusterId } - return nil + return "" } -func (x *ReloadSchemasResponse_KeyspaceResult) GetEvents() []*logutil.Event { +func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string { if x != nil { - return x.Events + return x.Keyspace } - return nil + return "" } -// ShardResult is a grouping of a Shard and any log events that occurred in -// that shard during a schema reload (usually associated with partial -// errors - ReloadSchemas requests are best-effort). -// -// It is only set when a ReloadSchemas request mandates KeyspaceShards mode -// (see ReloadSchemasRequest). -type ReloadSchemasResponse_ShardResult struct { +type ValidateShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Shard *Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` - Events []*logutil.Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` + PingTablets bool `protobuf:"varint,4,opt,name=ping_tablets,json=pingTablets,proto3" json:"ping_tablets,omitempty"` } -func (x *ReloadSchemasResponse_ShardResult) Reset() { - *x = ReloadSchemasResponse_ShardResult{} +func (x *ValidateShardRequest) Reset() { + *x = ValidateShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[109] + mi := &file_vtadmin_proto_msgTypes[104] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemasResponse_ShardResult) String() string { +func (x *ValidateShardRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemasResponse_ShardResult) ProtoMessage() {} +func (*ValidateShardRequest) ProtoMessage() {} -func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[109] +func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[104] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6177,58 +6172,65 @@ func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemasResponse_ShardResult.ProtoReflect.Descriptor instead. -func (*ReloadSchemasResponse_ShardResult) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{72, 1} +// Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead. +func (*ValidateShardRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{104} } -func (x *ReloadSchemasResponse_ShardResult) GetShard() *Shard { +func (x *ValidateShardRequest) GetClusterId() string { + if x != nil { + return x.ClusterId + } + return "" +} + +func (x *ValidateShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateShardRequest) GetShard() string { if x != nil { return x.Shard } - return nil + return "" } -func (x *ReloadSchemasResponse_ShardResult) GetEvents() []*logutil.Event { +func (x *ValidateShardRequest) GetPingTablets() bool { if x != nil { - return x.Events + return x.PingTablets } - return nil + return false } -// TabletResult is a grouping of a Tablet and the result of reloading that -// Tablet's schema. Result will either be the string "ok", or the error -// message from that tablet. Note ReloadSchemas is best-effort, so tablet's -// failing to reload is not treated as an overall failure. -// -// It is only set when a ReloadSchemas request mandates Tablets mode (see -// ReloadSchemasRequest). -type ReloadSchemasResponse_TabletResult struct { +type ValidateVersionKeyspaceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Tablet *Tablet `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` - Result string `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` } -func (x *ReloadSchemasResponse_TabletResult) Reset() { - *x = ReloadSchemasResponse_TabletResult{} +func (x *ValidateVersionKeyspaceRequest) Reset() { + *x = ValidateVersionKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtadmin_proto_msgTypes[110] + mi := &file_vtadmin_proto_msgTypes[105] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ReloadSchemasResponse_TabletResult) String() string { +func (x *ValidateVersionKeyspaceRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReloadSchemasResponse_TabletResult) ProtoMessage() {} +func (*ValidateVersionKeyspaceRequest) ProtoMessage() {} -func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message { - mi := &file_vtadmin_proto_msgTypes[110] +func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[105] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6239,49 +6241,581 @@ func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use ReloadSchemasResponse_TabletResult.ProtoReflect.Descriptor instead. -func (*ReloadSchemasResponse_TabletResult) Descriptor() ([]byte, []int) { - return file_vtadmin_proto_rawDescGZIP(), []int{72, 2} +// Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{105} } -func (x *ReloadSchemasResponse_TabletResult) GetTablet() *Tablet { +func (x *ValidateVersionKeyspaceRequest) GetClusterId() string { if x != nil { - return x.Tablet + return x.ClusterId } - return nil + return "" } -func (x *ReloadSchemasResponse_TabletResult) GetResult() string { +func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string { if x != nil { - return x.Result + return x.Keyspace } return "" } -var File_vtadmin_proto protoreflect.FileDescriptor +type ValidateVersionShardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -var file_vtadmin_proto_rawDesc = []byte{ - 0x0a, 0x0d, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x07, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x1a, 0x0d, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, - 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, - 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x0d, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x0f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x2d, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x69, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x06, - 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, - 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0xd8, 0x01, 0x0a, 0x13, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,3,opt,name=shard,proto3" json:"shard,omitempty"` +} + +func (x *ValidateVersionShardRequest) Reset() { + *x = ValidateVersionShardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[106] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateVersionShardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateVersionShardRequest) ProtoMessage() {} + +func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[106] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead. +func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{106} +} + +func (x *ValidateVersionShardRequest) GetClusterId() string { + if x != nil { + return x.ClusterId + } + return "" +} + +func (x *ValidateVersionShardRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ValidateVersionShardRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +type VTExplainRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Keyspace string `protobuf:"bytes,2,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"` +} + +func (x *VTExplainRequest) Reset() { + *x = VTExplainRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[107] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VTExplainRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VTExplainRequest) ProtoMessage() {} + +func (x *VTExplainRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[107] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VTExplainRequest.ProtoReflect.Descriptor instead. +func (*VTExplainRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{107} +} + +func (x *VTExplainRequest) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +func (x *VTExplainRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *VTExplainRequest) GetSql() string { + if x != nil { + return x.Sql + } + return "" +} + +type VTExplainResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Response string `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` +} + +func (x *VTExplainResponse) Reset() { + *x = VTExplainResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[108] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VTExplainResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VTExplainResponse) ProtoMessage() {} + +func (x *VTExplainResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[108] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VTExplainResponse.ProtoReflect.Descriptor instead. +func (*VTExplainResponse) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{108} +} + +func (x *VTExplainResponse) GetResponse() string { + if x != nil { + return x.Response + } + return "" +} + +type Schema_ShardTableSize struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RowCount uint64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` + DataLength uint64 `protobuf:"varint,2,opt,name=data_length,json=dataLength,proto3" json:"data_length,omitempty"` +} + +func (x *Schema_ShardTableSize) Reset() { + *x = Schema_ShardTableSize{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[112] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_ShardTableSize) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_ShardTableSize) ProtoMessage() {} + +func (x *Schema_ShardTableSize) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[112] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_ShardTableSize.ProtoReflect.Descriptor instead. +func (*Schema_ShardTableSize) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{7, 1} +} + +func (x *Schema_ShardTableSize) GetRowCount() uint64 { + if x != nil { + return x.RowCount + } + return 0 +} + +func (x *Schema_ShardTableSize) GetDataLength() uint64 { + if x != nil { + return x.DataLength + } + return 0 +} + +// TableSize aggregates table size information across all shards containing +// in the given keyspace and cluster, as well as per-shard size information. +type Schema_TableSize struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RowCount uint64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` + DataLength uint64 `protobuf:"varint,2,opt,name=data_length,json=dataLength,proto3" json:"data_length,omitempty"` + ByShard map[string]*Schema_ShardTableSize `protobuf:"bytes,3,rep,name=by_shard,json=byShard,proto3" json:"by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Schema_TableSize) Reset() { + *x = Schema_TableSize{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[113] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema_TableSize) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema_TableSize) ProtoMessage() {} + +func (x *Schema_TableSize) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[113] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema_TableSize.ProtoReflect.Descriptor instead. +func (*Schema_TableSize) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{7, 2} +} + +func (x *Schema_TableSize) GetRowCount() uint64 { + if x != nil { + return x.RowCount + } + return 0 +} + +func (x *Schema_TableSize) GetDataLength() uint64 { + if x != nil { + return x.DataLength + } + return 0 +} + +func (x *Schema_TableSize) GetByShard() map[string]*Schema_ShardTableSize { + if x != nil { + return x.ByShard + } + return nil +} + +type GetSchemaMigrationsRequest_ClusterRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + Request *vtctldata.GetSchemaMigrationsRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +} + +func (x *GetSchemaMigrationsRequest_ClusterRequest) Reset() { + *x = GetSchemaMigrationsRequest_ClusterRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[115] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSchemaMigrationsRequest_ClusterRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSchemaMigrationsRequest_ClusterRequest) ProtoMessage() {} + +func (x *GetSchemaMigrationsRequest_ClusterRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[115] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSchemaMigrationsRequest_ClusterRequest.ProtoReflect.Descriptor instead. +func (*GetSchemaMigrationsRequest_ClusterRequest) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{47, 0} +} + +func (x *GetSchemaMigrationsRequest_ClusterRequest) GetClusterId() string { + if x != nil { + return x.ClusterId + } + return "" +} + +func (x *GetSchemaMigrationsRequest_ClusterRequest) GetRequest() *vtctldata.GetSchemaMigrationsRequest { + if x != nil { + return x.Request + } + return nil +} + +// KeyspaceResult is a grouping of a Keyspace and any log events that +// occurred in that keyspace during a schema reload (usually associated with +// partial errors - ReloadSchemas requests are best-effort). +// +// It is only set when a ReloadSchemas request mandates Keyspaces mode +// (see ReloadSchemasRequest). +type ReloadSchemasResponse_KeyspaceResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace *Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Events []*logutil.Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` +} + +func (x *ReloadSchemasResponse_KeyspaceResult) Reset() { + *x = ReloadSchemasResponse_KeyspaceResult{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[118] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReloadSchemasResponse_KeyspaceResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReloadSchemasResponse_KeyspaceResult) ProtoMessage() {} + +func (x *ReloadSchemasResponse_KeyspaceResult) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[118] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReloadSchemasResponse_KeyspaceResult.ProtoReflect.Descriptor instead. +func (*ReloadSchemasResponse_KeyspaceResult) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{80, 0} +} + +func (x *ReloadSchemasResponse_KeyspaceResult) GetKeyspace() *Keyspace { + if x != nil { + return x.Keyspace + } + return nil +} + +func (x *ReloadSchemasResponse_KeyspaceResult) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +// ShardResult is a grouping of a Shard and any log events that occurred in +// that shard during a schema reload (usually associated with partial +// errors - ReloadSchemas requests are best-effort). +// +// It is only set when a ReloadSchemas request mandates KeyspaceShards mode +// (see ReloadSchemasRequest). +type ReloadSchemasResponse_ShardResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Shard *Shard `protobuf:"bytes,1,opt,name=shard,proto3" json:"shard,omitempty"` + Events []*logutil.Event `protobuf:"bytes,2,rep,name=events,proto3" json:"events,omitempty"` +} + +func (x *ReloadSchemasResponse_ShardResult) Reset() { + *x = ReloadSchemasResponse_ShardResult{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[119] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReloadSchemasResponse_ShardResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReloadSchemasResponse_ShardResult) ProtoMessage() {} + +func (x *ReloadSchemasResponse_ShardResult) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[119] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReloadSchemasResponse_ShardResult.ProtoReflect.Descriptor instead. +func (*ReloadSchemasResponse_ShardResult) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{80, 1} +} + +func (x *ReloadSchemasResponse_ShardResult) GetShard() *Shard { + if x != nil { + return x.Shard + } + return nil +} + +func (x *ReloadSchemasResponse_ShardResult) GetEvents() []*logutil.Event { + if x != nil { + return x.Events + } + return nil +} + +// TabletResult is a grouping of a Tablet and the result of reloading that +// Tablet's schema. Result will either be the string "ok", or the error +// message from that tablet. Note ReloadSchemas is best-effort, so tablet's +// failing to reload is not treated as an overall failure. +// +// It is only set when a ReloadSchemas request mandates Tablets mode (see +// ReloadSchemasRequest). +type ReloadSchemasResponse_TabletResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tablet *Tablet `protobuf:"bytes,1,opt,name=tablet,proto3" json:"tablet,omitempty"` + Result string `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *ReloadSchemasResponse_TabletResult) Reset() { + *x = ReloadSchemasResponse_TabletResult{} + if protoimpl.UnsafeEnabled { + mi := &file_vtadmin_proto_msgTypes[120] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReloadSchemasResponse_TabletResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReloadSchemasResponse_TabletResult) ProtoMessage() {} + +func (x *ReloadSchemasResponse_TabletResult) ProtoReflect() protoreflect.Message { + mi := &file_vtadmin_proto_msgTypes[120] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReloadSchemasResponse_TabletResult.ProtoReflect.Descriptor instead. +func (*ReloadSchemasResponse_TabletResult) Descriptor() ([]byte, []int) { + return file_vtadmin_proto_rawDescGZIP(), []int{80, 2} +} + +func (x *ReloadSchemasResponse_TabletResult) GetTablet() *Tablet { + if x != nil { + return x.Tablet + } + return nil +} + +func (x *ReloadSchemasResponse_TabletResult) GetResult() string { + if x != nil { + return x.Result + } + return "" +} + +var File_vtadmin_proto protoreflect.FileDescriptor + +var file_vtadmin_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x07, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x1a, 0x0d, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, + 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x0e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x0d, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x0f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x2d, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x69, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x06, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, + 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0xd8, 0x01, 0x0a, 0x13, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, @@ -6374,947 +6908,1070 @@ var file_vtadmin_proto_rawDesc = []byte{ 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5b, 0x0a, 0x05, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x26, - 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xe1, 0x01, 0x0a, - 0x06, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x32, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x51, 0x44, 0x4e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x46, 0x51, 0x44, 0x4e, 0x22, 0x39, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, - 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, - 0x22, 0x77, 0x0a, 0x07, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2a, 0x0a, 0x07, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x64, 0x0a, 0x06, 0x56, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x46, - 0x51, 0x44, 0x4e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x51, 0x44, 0x4e, 0x22, - 0xaa, 0x01, 0x0a, 0x06, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, - 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, - 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, - 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x2a, - 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x51, 0x44, 0x4e, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x51, 0x44, 0x4e, 0x22, 0x83, 0x01, 0x0a, - 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x84, 0x01, 0x0a, 0x0f, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x10, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x5b, 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x22, 0x72, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2d, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, - 0x6c, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x72, 0x0a, - 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x83, 0x01, + 0x0a, 0x0a, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, + 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, + 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x22, 0xe1, 0x01, 0x0a, 0x06, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x2a, + 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x51, 0x44, 0x4e, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x51, 0x44, 0x4e, 0x22, 0x39, 0x0a, 0x0c, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x45, 0x52, + 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x22, 0x77, 0x0a, 0x07, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x22, 0x64, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, + 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, + 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x51, 0x44, 0x4e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x46, 0x51, 0x44, 0x4e, 0x22, 0xaa, 0x01, 0x0a, 0x06, 0x56, 0x54, 0x47, 0x61, 0x74, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x6f, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x6f, 0x6f, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x46, 0x51, 0x44, 0x4e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, + 0x51, 0x44, 0x4e, 0x22, 0x83, 0x01, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x6c, 0x0a, 0x12, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x37, + 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, 0x01, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, + 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x88, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5a, 0x0a, 0x14, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x82, 0x01, 0x0a, 0x1d, 0x45, 0x6d, 0x65, - 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x82, 0x01, 0x0a, 0x1d, 0x43, + 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x84, 0x01, 0x0a, 0x1e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x72, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3a, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47, 0x0a, 0x16, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x22, 0x6c, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x72, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x42, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe8, 0x01, - 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, - 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x6e, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, + 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x22, 0x5a, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, - 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x11, 0x46, 0x69, 0x6e, - 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, - 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, - 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x45, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0e, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x46, 0x0a, 0x12, - 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x73, 0x22, 0x6b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, - 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, - 0x79, 0x22, 0x4f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x65, 0x6c, - 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x82, 0x01, 0x0a, + 0x1d, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, + 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x42, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, + 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0xe8, 0x01, 0x0a, 0x1e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, + 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x9c, 0x01, 0x0a, + 0x11, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x11, + 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, + 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x45, 0x0a, 0x0f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x22, 0x46, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, + 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x6b, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, + 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x4f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, + 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x63, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x22, 0x39, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, + 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, + 0x73, 0x22, 0x51, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, - 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x73, 0x22, 0x39, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x51, 0x0a, - 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x65, 0x6c, 0x6c, 0x73, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, - 0x22, 0x14, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x43, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, - 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x22, 0x62, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x22, - 0x32, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x64, 0x73, 0x22, 0x39, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x52, 0x05, 0x67, 0x61, 0x74, 0x65, 0x73, 0x22, 0x4f, - 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, - 0x36, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x65, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x43, 0x0a, 0x13, 0x47, 0x65, + 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x22, + 0x62, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x22, 0x32, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x47, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x22, 0xb5, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, - 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, - 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x22, 0x8d, 0x01, 0x0a, 0x23, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x39, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x47, 0x61, + 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x67, + 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x52, 0x05, 0x67, 0x61, 0x74, + 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x22, 0x36, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x68, 0x0a, 0x15, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, - 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xd7, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x57, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x63, 0x0a, 0x11, 0x53, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x47, 0x0a, 0x14, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x22, 0xb5, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86, 0x01, 0x0a, + 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x49, 0x64, 0x73, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x07, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0xed, 0x01, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5d, 0x0a, 0x10, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x1a, 0x70, 0x0a, 0x0e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3f, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x64, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x8d, 0x01, 0x0a, + 0x23, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x85, 0x01, 0x0a, + 0x24, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x15, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x68, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x4f, + 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, + 0xd7, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0d, 0x73, + 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x1a, 0x63, 0x0a, 0x11, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x38, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, + 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x81, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x12, 0x3b, + 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x6f, 0x6e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x60, 0x0a, 0x10, 0x47, + 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x34, 0x0a, + 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x73, 0x22, 0x4b, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, + 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2d, 0x0a, 0x09, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x34, + 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x51, 0x0a, 0x16, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x22, 0x81, 0x01, - 0x0a, 0x19, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x53, 0x69, 0x7a, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x61, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x53, - 0x69, 0x7a, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, - 0x6e, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x4e, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x73, 0x22, 0x60, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x49, 0x64, 0x73, 0x22, 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x29, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x4b, 0x0a, 0x16, 0x47, 0x65, - 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x4e, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x52, 0x07, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, - 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x44, - 0x0a, 0x13, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x76, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, - 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x29, 0x0a, 0x07, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x52, 0x07, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x12, - 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xa0, 0x01, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, + 0xe1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, + 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x1a, 0x60, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x80, 0x01, 0x0a, 0x1c, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x61, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x58, 0x0a, 0x12, 0x50, 0x69, 0x6e, + 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x22, 0x7e, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, + 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, - 0x6c, 0x79, 0x22, 0xa0, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1c, 0x0a, 0x09, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0xe1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, - 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x42, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x11, 0x50, 0x69, 0x6e, - 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, - 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x58, 0x0a, 0x12, - 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, + 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, + 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, + 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x93, 0x01, 0x0a, + 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, + 0x61, 0x6c, 0x22, 0x36, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x7e, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, - 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, - 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, - 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0x93, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, - 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, - 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x36, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x63, 0x0a, - 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x73, 0x22, 0x5a, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9f, - 0x02, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2f, - 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, - 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, - 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x63, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x22, 0xad, 0x04, 0x0a, 0x15, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, - 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x4f, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x52, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, 0x0e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, - 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x1a, 0x5b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x12, 0x24, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x63, 0x0a, 0x13, 0x52, 0x65, + 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, + 0x5a, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x02, 0x0a, 0x14, + 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, + 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xad, 0x04, + 0x0a, 0x15, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x52, 0x0f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x12, 0x4f, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x52, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, - 0x4f, 0x0a, 0x0c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x27, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x22, 0xdb, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, - 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, - 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, - 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, - 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, - 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x22, 0x75, 0x0a, 0x25, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, - 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x52, - 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x5b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x24, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x4f, 0x0a, 0x0c, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x27, 0x0a, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xdb, 0x01, + 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, + 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, + 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, + 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, + 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x75, 0x0a, 0x25, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x22, 0x9e, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, - 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, - 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, - 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x34, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x65, 0x0a, 0x15, 0x52, - 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x73, 0x22, 0x5c, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x22, 0x62, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, - 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x0a, 0x13, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, - 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, - 0x22, 0x16, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, - 0x73, 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x26, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x22, 0x66, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5d, 0x0a, 0x17, 0x53, 0x74, 0x6f, - 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, - 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x1f, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, - 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x20, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, - 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, - 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, - 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x71, 0x0a, 0x21, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, - 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x72, 0x22, 0x9e, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, + 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, + 0x76, 0x65, 0x22, 0x34, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x7e, 0x0a, 0x1b, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x65, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, - 0x53, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x22, 0x77, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x5c, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x62, 0x0a, + 0x12, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, + 0x73, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x52, + 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x16, 0x0a, + 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5e, + 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x66, + 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x5d, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x0a, 0x07, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x1f, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x20, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, + 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, + 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x71, 0x0a, 0x21, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, + 0x0a, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x53, 0x0a, 0x0f, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x22, 0x77, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, + 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x5a, 0x0a, 0x1d, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, - 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x5a, 0x0a, - 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, - 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x22, 0x6e, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x22, 0x5a, 0x0a, 0x10, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, - 0x2f, 0x0a, 0x11, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x32, 0xd9, 0x21, 0x0a, 0x07, 0x56, 0x54, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x53, 0x0a, 0x0e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, - 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, - 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x55, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, - 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, - 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x22, 0x6e, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x22, 0x5a, 0x0a, 0x10, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, + 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x2f, 0x0a, 0x11, + 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xad, 0x27, + 0x0a, 0x07, 0x56, 0x54, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x4c, 0x0a, 0x0b, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6a, 0x0a, 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x16, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, + 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1c, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x4d, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, + 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6b, + 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, + 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, - 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x69, 0x6e, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, - 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1a, - 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, - 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, - 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x41, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x12, 0x18, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, - 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1a, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, - 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x53, 0x0a, - 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, - 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, - 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x00, 0x12, 0x47, 0x0a, - 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, - 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, - 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, - 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x3c, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, - 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x4a, - 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1b, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, - 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, - 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, - 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, - 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, - 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, - 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, - 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x24, 0x2e, 0x76, 0x74, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x1e, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, - 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x50, 0x0a, 0x0d, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, - 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5c, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x5f, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, - 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x53, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, 0x6e, - 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, - 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, - 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, - 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, - 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x59, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x53, - 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, - 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x12, - 0x28, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, + 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, + 0x65, 0x6e, 0x63, 0x79, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x0a, 0x46, + 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, + 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, + 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, + 0x61, 0x74, 0x65, 0x73, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x61, 0x74, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1c, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x62, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1f, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x45, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x72, 0x76, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, + 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x19, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, + 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x58, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, + 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, + 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x0a, + 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x10, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x47, 0x65, + 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x73, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, + 0x65, 0x74, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x3f, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0x00, + 0x12, 0x4d, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x6a, 0x0a, 0x15, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x50, + 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, + 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x2e, 0x76, + 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, + 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x50, 0x6c, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x65, 0x0a, 0x14, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, + 0x61, 0x70, 0x68, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x83, 0x01, 0x0a, 0x1e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x12, 0x2e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x0d, 0x52, 0x65, 0x6c, 0x6f, 0x61, + 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x11, 0x52, 0x65, 0x6c, + 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, + 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5f, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x22, 0x2e, + 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, 0x14, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x53, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, + 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x52, 0x75, + 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, + 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, + 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, + 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x59, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x0f, + 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1f, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x71, 0x0a, 0x18, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, + 0x12, 0x28, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x20, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6d, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, 0x0a, - 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, + 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x44, 0x0a, 0x09, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, - 0x61, 0x69, 0x6e, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, - 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, - 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, - 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x26, 0x5a, 0x24, - 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x70, 0x0a, 0x17, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x67, + 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x44, 0x0a, 0x09, 0x56, 0x54, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x19, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, + 0x54, 0x45, 0x78, 0x70, 0x6c, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1a, 0x2e, 0x76, 0x74, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x54, 0x45, 0x78, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x26, 0x5a, + 0x24, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, + 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -7330,361 +7987,410 @@ func file_vtadmin_proto_rawDescGZIP() []byte { } var file_vtadmin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_vtadmin_proto_msgTypes = make([]protoimpl.MessageInfo, 111) +var file_vtadmin_proto_msgTypes = make([]protoimpl.MessageInfo, 121) var file_vtadmin_proto_goTypes = []interface{}{ - (Tablet_ServingState)(0), // 0: vtadmin.Tablet.ServingState - (*Cluster)(nil), // 1: vtadmin.Cluster - (*ClusterBackup)(nil), // 2: vtadmin.ClusterBackup - (*ClusterCellsAliases)(nil), // 3: vtadmin.ClusterCellsAliases - (*ClusterCellInfo)(nil), // 4: vtadmin.ClusterCellInfo - (*ClusterShardReplicationPosition)(nil), // 5: vtadmin.ClusterShardReplicationPosition - (*ClusterWorkflows)(nil), // 6: vtadmin.ClusterWorkflows - (*Keyspace)(nil), // 7: vtadmin.Keyspace - (*Schema)(nil), // 8: vtadmin.Schema - (*Shard)(nil), // 9: vtadmin.Shard - (*SrvVSchema)(nil), // 10: vtadmin.SrvVSchema - (*Tablet)(nil), // 11: vtadmin.Tablet - (*VSchema)(nil), // 12: vtadmin.VSchema - (*Vtctld)(nil), // 13: vtadmin.Vtctld - (*VTGate)(nil), // 14: vtadmin.VTGate - (*Workflow)(nil), // 15: vtadmin.Workflow - (*CreateKeyspaceRequest)(nil), // 16: vtadmin.CreateKeyspaceRequest - (*CreateKeyspaceResponse)(nil), // 17: vtadmin.CreateKeyspaceResponse - (*CreateShardRequest)(nil), // 18: vtadmin.CreateShardRequest - (*DeleteKeyspaceRequest)(nil), // 19: vtadmin.DeleteKeyspaceRequest - (*DeleteShardsRequest)(nil), // 20: vtadmin.DeleteShardsRequest - (*DeleteTabletRequest)(nil), // 21: vtadmin.DeleteTabletRequest - (*DeleteTabletResponse)(nil), // 22: vtadmin.DeleteTabletResponse - (*EmergencyFailoverShardRequest)(nil), // 23: vtadmin.EmergencyFailoverShardRequest - (*EmergencyFailoverShardResponse)(nil), // 24: vtadmin.EmergencyFailoverShardResponse - (*FindSchemaRequest)(nil), // 25: vtadmin.FindSchemaRequest - (*GetBackupsRequest)(nil), // 26: vtadmin.GetBackupsRequest - (*GetBackupsResponse)(nil), // 27: vtadmin.GetBackupsResponse - (*GetCellInfosRequest)(nil), // 28: vtadmin.GetCellInfosRequest - (*GetCellInfosResponse)(nil), // 29: vtadmin.GetCellInfosResponse - (*GetCellsAliasesRequest)(nil), // 30: vtadmin.GetCellsAliasesRequest - (*GetCellsAliasesResponse)(nil), // 31: vtadmin.GetCellsAliasesResponse - (*GetClustersRequest)(nil), // 32: vtadmin.GetClustersRequest - (*GetClustersResponse)(nil), // 33: vtadmin.GetClustersResponse - (*GetFullStatusRequest)(nil), // 34: vtadmin.GetFullStatusRequest - (*GetGatesRequest)(nil), // 35: vtadmin.GetGatesRequest - (*GetGatesResponse)(nil), // 36: vtadmin.GetGatesResponse - (*GetKeyspaceRequest)(nil), // 37: vtadmin.GetKeyspaceRequest - (*GetKeyspacesRequest)(nil), // 38: vtadmin.GetKeyspacesRequest - (*GetKeyspacesResponse)(nil), // 39: vtadmin.GetKeyspacesResponse - (*GetSchemaRequest)(nil), // 40: vtadmin.GetSchemaRequest - (*GetSchemasRequest)(nil), // 41: vtadmin.GetSchemasRequest - (*GetSchemasResponse)(nil), // 42: vtadmin.GetSchemasResponse - (*GetShardReplicationPositionsRequest)(nil), // 43: vtadmin.GetShardReplicationPositionsRequest - (*GetShardReplicationPositionsResponse)(nil), // 44: vtadmin.GetShardReplicationPositionsResponse - (*GetSrvKeyspaceRequest)(nil), // 45: vtadmin.GetSrvKeyspaceRequest - (*GetSrvKeyspacesRequest)(nil), // 46: vtadmin.GetSrvKeyspacesRequest - (*GetSrvKeyspacesResponse)(nil), // 47: vtadmin.GetSrvKeyspacesResponse - (*GetSrvVSchemaRequest)(nil), // 48: vtadmin.GetSrvVSchemaRequest - (*GetSrvVSchemasRequest)(nil), // 49: vtadmin.GetSrvVSchemasRequest - (*GetSrvVSchemasResponse)(nil), // 50: vtadmin.GetSrvVSchemasResponse - (*GetSchemaTableSizeOptions)(nil), // 51: vtadmin.GetSchemaTableSizeOptions - (*GetTabletRequest)(nil), // 52: vtadmin.GetTabletRequest - (*GetTabletsRequest)(nil), // 53: vtadmin.GetTabletsRequest - (*GetTabletsResponse)(nil), // 54: vtadmin.GetTabletsResponse - (*GetTopologyPathRequest)(nil), // 55: vtadmin.GetTopologyPathRequest - (*GetVSchemaRequest)(nil), // 56: vtadmin.GetVSchemaRequest - (*GetVSchemasRequest)(nil), // 57: vtadmin.GetVSchemasRequest - (*GetVSchemasResponse)(nil), // 58: vtadmin.GetVSchemasResponse - (*GetVtctldsRequest)(nil), // 59: vtadmin.GetVtctldsRequest - (*GetVtctldsResponse)(nil), // 60: vtadmin.GetVtctldsResponse - (*GetWorkflowRequest)(nil), // 61: vtadmin.GetWorkflowRequest - (*GetWorkflowsRequest)(nil), // 62: vtadmin.GetWorkflowsRequest - (*GetWorkflowsResponse)(nil), // 63: vtadmin.GetWorkflowsResponse - (*PingTabletRequest)(nil), // 64: vtadmin.PingTabletRequest - (*PingTabletResponse)(nil), // 65: vtadmin.PingTabletResponse - (*PlannedFailoverShardRequest)(nil), // 66: vtadmin.PlannedFailoverShardRequest - (*PlannedFailoverShardResponse)(nil), // 67: vtadmin.PlannedFailoverShardResponse - (*RebuildKeyspaceGraphRequest)(nil), // 68: vtadmin.RebuildKeyspaceGraphRequest - (*RebuildKeyspaceGraphResponse)(nil), // 69: vtadmin.RebuildKeyspaceGraphResponse - (*RefreshStateRequest)(nil), // 70: vtadmin.RefreshStateRequest - (*RefreshStateResponse)(nil), // 71: vtadmin.RefreshStateResponse - (*ReloadSchemasRequest)(nil), // 72: vtadmin.ReloadSchemasRequest - (*ReloadSchemasResponse)(nil), // 73: vtadmin.ReloadSchemasResponse - (*ReloadSchemaShardRequest)(nil), // 74: vtadmin.ReloadSchemaShardRequest - (*ReloadSchemaShardResponse)(nil), // 75: vtadmin.ReloadSchemaShardResponse - (*RefreshTabletReplicationSourceRequest)(nil), // 76: vtadmin.RefreshTabletReplicationSourceRequest - (*RefreshTabletReplicationSourceResponse)(nil), // 77: vtadmin.RefreshTabletReplicationSourceResponse - (*RemoveKeyspaceCellRequest)(nil), // 78: vtadmin.RemoveKeyspaceCellRequest - (*RemoveKeyspaceCellResponse)(nil), // 79: vtadmin.RemoveKeyspaceCellResponse - (*RunHealthCheckRequest)(nil), // 80: vtadmin.RunHealthCheckRequest - (*RunHealthCheckResponse)(nil), // 81: vtadmin.RunHealthCheckResponse - (*SetReadOnlyRequest)(nil), // 82: vtadmin.SetReadOnlyRequest - (*SetReadOnlyResponse)(nil), // 83: vtadmin.SetReadOnlyResponse - (*SetReadWriteRequest)(nil), // 84: vtadmin.SetReadWriteRequest - (*SetReadWriteResponse)(nil), // 85: vtadmin.SetReadWriteResponse - (*StartReplicationRequest)(nil), // 86: vtadmin.StartReplicationRequest - (*StartReplicationResponse)(nil), // 87: vtadmin.StartReplicationResponse - (*StopReplicationRequest)(nil), // 88: vtadmin.StopReplicationRequest - (*StopReplicationResponse)(nil), // 89: vtadmin.StopReplicationResponse - (*TabletExternallyPromotedRequest)(nil), // 90: vtadmin.TabletExternallyPromotedRequest - (*TabletExternallyPromotedResponse)(nil), // 91: vtadmin.TabletExternallyPromotedResponse - (*TabletExternallyReparentedRequest)(nil), // 92: vtadmin.TabletExternallyReparentedRequest - (*ValidateRequest)(nil), // 93: vtadmin.ValidateRequest - (*ValidateKeyspaceRequest)(nil), // 94: vtadmin.ValidateKeyspaceRequest - (*ValidateSchemaKeyspaceRequest)(nil), // 95: vtadmin.ValidateSchemaKeyspaceRequest - (*ValidateShardRequest)(nil), // 96: vtadmin.ValidateShardRequest - (*ValidateVersionKeyspaceRequest)(nil), // 97: vtadmin.ValidateVersionKeyspaceRequest - (*ValidateVersionShardRequest)(nil), // 98: vtadmin.ValidateVersionShardRequest - (*VTExplainRequest)(nil), // 99: vtadmin.VTExplainRequest - (*VTExplainResponse)(nil), // 100: vtadmin.VTExplainResponse - nil, // 101: vtadmin.ClusterCellsAliases.AliasesEntry - nil, // 102: vtadmin.Keyspace.ShardsEntry - nil, // 103: vtadmin.Schema.TableSizesEntry - (*Schema_ShardTableSize)(nil), // 104: vtadmin.Schema.ShardTableSize - (*Schema_TableSize)(nil), // 105: vtadmin.Schema.TableSize - nil, // 106: vtadmin.Schema.TableSize.ByShardEntry - nil, // 107: vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry - nil, // 108: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry - (*ReloadSchemasResponse_KeyspaceResult)(nil), // 109: vtadmin.ReloadSchemasResponse.KeyspaceResult - (*ReloadSchemasResponse_ShardResult)(nil), // 110: vtadmin.ReloadSchemasResponse.ShardResult - (*ReloadSchemasResponse_TabletResult)(nil), // 111: vtadmin.ReloadSchemasResponse.TabletResult - (*mysqlctl.BackupInfo)(nil), // 112: mysqlctl.BackupInfo - (*topodata.CellInfo)(nil), // 113: topodata.CellInfo - (*vtctldata.ShardReplicationPositionsResponse)(nil), // 114: vtctldata.ShardReplicationPositionsResponse - (*vtctldata.Keyspace)(nil), // 115: vtctldata.Keyspace - (*tabletmanagerdata.TableDefinition)(nil), // 116: tabletmanagerdata.TableDefinition - (*vtctldata.Shard)(nil), // 117: vtctldata.Shard - (*vschema.SrvVSchema)(nil), // 118: vschema.SrvVSchema - (*topodata.Tablet)(nil), // 119: topodata.Tablet - (*vschema.Keyspace)(nil), // 120: vschema.Keyspace - (*vtctldata.Workflow)(nil), // 121: vtctldata.Workflow - (*vtctldata.CreateKeyspaceRequest)(nil), // 122: vtctldata.CreateKeyspaceRequest - (*vtctldata.CreateShardRequest)(nil), // 123: vtctldata.CreateShardRequest - (*vtctldata.DeleteKeyspaceRequest)(nil), // 124: vtctldata.DeleteKeyspaceRequest - (*vtctldata.DeleteShardsRequest)(nil), // 125: vtctldata.DeleteShardsRequest - (*topodata.TabletAlias)(nil), // 126: topodata.TabletAlias - (*vtctldata.EmergencyReparentShardRequest)(nil), // 127: vtctldata.EmergencyReparentShardRequest - (*logutil.Event)(nil), // 128: logutil.Event - (*vtctldata.GetBackupsRequest)(nil), // 129: vtctldata.GetBackupsRequest - (*vtctldata.PlannedReparentShardRequest)(nil), // 130: vtctldata.PlannedReparentShardRequest - (*topodata.CellsAlias)(nil), // 131: topodata.CellsAlias - (*vtctldata.GetSrvKeyspacesResponse)(nil), // 132: vtctldata.GetSrvKeyspacesResponse - (*vtctldata.CreateShardResponse)(nil), // 133: vtctldata.CreateShardResponse - (*vtctldata.DeleteKeyspaceResponse)(nil), // 134: vtctldata.DeleteKeyspaceResponse - (*vtctldata.DeleteShardsResponse)(nil), // 135: vtctldata.DeleteShardsResponse - (*vtctldata.GetFullStatusResponse)(nil), // 136: vtctldata.GetFullStatusResponse - (*vtctldata.GetTopologyPathResponse)(nil), // 137: vtctldata.GetTopologyPathResponse - (*vtctldata.ValidateResponse)(nil), // 138: vtctldata.ValidateResponse - (*vtctldata.ValidateKeyspaceResponse)(nil), // 139: vtctldata.ValidateKeyspaceResponse - (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 140: vtctldata.ValidateSchemaKeyspaceResponse - (*vtctldata.ValidateShardResponse)(nil), // 141: vtctldata.ValidateShardResponse - (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 142: vtctldata.ValidateVersionKeyspaceResponse - (*vtctldata.ValidateVersionShardResponse)(nil), // 143: vtctldata.ValidateVersionShardResponse + (Tablet_ServingState)(0), // 0: vtadmin.Tablet.ServingState + (*Cluster)(nil), // 1: vtadmin.Cluster + (*ClusterBackup)(nil), // 2: vtadmin.ClusterBackup + (*ClusterCellsAliases)(nil), // 3: vtadmin.ClusterCellsAliases + (*ClusterCellInfo)(nil), // 4: vtadmin.ClusterCellInfo + (*ClusterShardReplicationPosition)(nil), // 5: vtadmin.ClusterShardReplicationPosition + (*ClusterWorkflows)(nil), // 6: vtadmin.ClusterWorkflows + (*Keyspace)(nil), // 7: vtadmin.Keyspace + (*Schema)(nil), // 8: vtadmin.Schema + (*SchemaMigration)(nil), // 9: vtadmin.SchemaMigration + (*Shard)(nil), // 10: vtadmin.Shard + (*SrvVSchema)(nil), // 11: vtadmin.SrvVSchema + (*Tablet)(nil), // 12: vtadmin.Tablet + (*VSchema)(nil), // 13: vtadmin.VSchema + (*Vtctld)(nil), // 14: vtadmin.Vtctld + (*VTGate)(nil), // 15: vtadmin.VTGate + (*Workflow)(nil), // 16: vtadmin.Workflow + (*ApplySchemaRequest)(nil), // 17: vtadmin.ApplySchemaRequest + (*CancelSchemaMigrationRequest)(nil), // 18: vtadmin.CancelSchemaMigrationRequest + (*CleanupSchemaMigrationRequest)(nil), // 19: vtadmin.CleanupSchemaMigrationRequest + (*CompleteSchemaMigrationRequest)(nil), // 20: vtadmin.CompleteSchemaMigrationRequest + (*CreateKeyspaceRequest)(nil), // 21: vtadmin.CreateKeyspaceRequest + (*CreateKeyspaceResponse)(nil), // 22: vtadmin.CreateKeyspaceResponse + (*CreateShardRequest)(nil), // 23: vtadmin.CreateShardRequest + (*DeleteKeyspaceRequest)(nil), // 24: vtadmin.DeleteKeyspaceRequest + (*DeleteShardsRequest)(nil), // 25: vtadmin.DeleteShardsRequest + (*DeleteTabletRequest)(nil), // 26: vtadmin.DeleteTabletRequest + (*DeleteTabletResponse)(nil), // 27: vtadmin.DeleteTabletResponse + (*EmergencyFailoverShardRequest)(nil), // 28: vtadmin.EmergencyFailoverShardRequest + (*EmergencyFailoverShardResponse)(nil), // 29: vtadmin.EmergencyFailoverShardResponse + (*FindSchemaRequest)(nil), // 30: vtadmin.FindSchemaRequest + (*GetBackupsRequest)(nil), // 31: vtadmin.GetBackupsRequest + (*GetBackupsResponse)(nil), // 32: vtadmin.GetBackupsResponse + (*GetCellInfosRequest)(nil), // 33: vtadmin.GetCellInfosRequest + (*GetCellInfosResponse)(nil), // 34: vtadmin.GetCellInfosResponse + (*GetCellsAliasesRequest)(nil), // 35: vtadmin.GetCellsAliasesRequest + (*GetCellsAliasesResponse)(nil), // 36: vtadmin.GetCellsAliasesResponse + (*GetClustersRequest)(nil), // 37: vtadmin.GetClustersRequest + (*GetClustersResponse)(nil), // 38: vtadmin.GetClustersResponse + (*GetFullStatusRequest)(nil), // 39: vtadmin.GetFullStatusRequest + (*GetGatesRequest)(nil), // 40: vtadmin.GetGatesRequest + (*GetGatesResponse)(nil), // 41: vtadmin.GetGatesResponse + (*GetKeyspaceRequest)(nil), // 42: vtadmin.GetKeyspaceRequest + (*GetKeyspacesRequest)(nil), // 43: vtadmin.GetKeyspacesRequest + (*GetKeyspacesResponse)(nil), // 44: vtadmin.GetKeyspacesResponse + (*GetSchemaRequest)(nil), // 45: vtadmin.GetSchemaRequest + (*GetSchemasRequest)(nil), // 46: vtadmin.GetSchemasRequest + (*GetSchemasResponse)(nil), // 47: vtadmin.GetSchemasResponse + (*GetSchemaMigrationsRequest)(nil), // 48: vtadmin.GetSchemaMigrationsRequest + (*GetSchemaMigrationsResponse)(nil), // 49: vtadmin.GetSchemaMigrationsResponse + (*GetShardReplicationPositionsRequest)(nil), // 50: vtadmin.GetShardReplicationPositionsRequest + (*GetShardReplicationPositionsResponse)(nil), // 51: vtadmin.GetShardReplicationPositionsResponse + (*GetSrvKeyspaceRequest)(nil), // 52: vtadmin.GetSrvKeyspaceRequest + (*GetSrvKeyspacesRequest)(nil), // 53: vtadmin.GetSrvKeyspacesRequest + (*GetSrvKeyspacesResponse)(nil), // 54: vtadmin.GetSrvKeyspacesResponse + (*GetSrvVSchemaRequest)(nil), // 55: vtadmin.GetSrvVSchemaRequest + (*GetSrvVSchemasRequest)(nil), // 56: vtadmin.GetSrvVSchemasRequest + (*GetSrvVSchemasResponse)(nil), // 57: vtadmin.GetSrvVSchemasResponse + (*GetSchemaTableSizeOptions)(nil), // 58: vtadmin.GetSchemaTableSizeOptions + (*GetTabletRequest)(nil), // 59: vtadmin.GetTabletRequest + (*GetTabletsRequest)(nil), // 60: vtadmin.GetTabletsRequest + (*GetTabletsResponse)(nil), // 61: vtadmin.GetTabletsResponse + (*GetTopologyPathRequest)(nil), // 62: vtadmin.GetTopologyPathRequest + (*GetVSchemaRequest)(nil), // 63: vtadmin.GetVSchemaRequest + (*GetVSchemasRequest)(nil), // 64: vtadmin.GetVSchemasRequest + (*GetVSchemasResponse)(nil), // 65: vtadmin.GetVSchemasResponse + (*GetVtctldsRequest)(nil), // 66: vtadmin.GetVtctldsRequest + (*GetVtctldsResponse)(nil), // 67: vtadmin.GetVtctldsResponse + (*GetWorkflowRequest)(nil), // 68: vtadmin.GetWorkflowRequest + (*GetWorkflowsRequest)(nil), // 69: vtadmin.GetWorkflowsRequest + (*GetWorkflowsResponse)(nil), // 70: vtadmin.GetWorkflowsResponse + (*LaunchSchemaMigrationRequest)(nil), // 71: vtadmin.LaunchSchemaMigrationRequest + (*PingTabletRequest)(nil), // 72: vtadmin.PingTabletRequest + (*PingTabletResponse)(nil), // 73: vtadmin.PingTabletResponse + (*PlannedFailoverShardRequest)(nil), // 74: vtadmin.PlannedFailoverShardRequest + (*PlannedFailoverShardResponse)(nil), // 75: vtadmin.PlannedFailoverShardResponse + (*RebuildKeyspaceGraphRequest)(nil), // 76: vtadmin.RebuildKeyspaceGraphRequest + (*RebuildKeyspaceGraphResponse)(nil), // 77: vtadmin.RebuildKeyspaceGraphResponse + (*RefreshStateRequest)(nil), // 78: vtadmin.RefreshStateRequest + (*RefreshStateResponse)(nil), // 79: vtadmin.RefreshStateResponse + (*ReloadSchemasRequest)(nil), // 80: vtadmin.ReloadSchemasRequest + (*ReloadSchemasResponse)(nil), // 81: vtadmin.ReloadSchemasResponse + (*ReloadSchemaShardRequest)(nil), // 82: vtadmin.ReloadSchemaShardRequest + (*ReloadSchemaShardResponse)(nil), // 83: vtadmin.ReloadSchemaShardResponse + (*RefreshTabletReplicationSourceRequest)(nil), // 84: vtadmin.RefreshTabletReplicationSourceRequest + (*RefreshTabletReplicationSourceResponse)(nil), // 85: vtadmin.RefreshTabletReplicationSourceResponse + (*RemoveKeyspaceCellRequest)(nil), // 86: vtadmin.RemoveKeyspaceCellRequest + (*RemoveKeyspaceCellResponse)(nil), // 87: vtadmin.RemoveKeyspaceCellResponse + (*RetrySchemaMigrationRequest)(nil), // 88: vtadmin.RetrySchemaMigrationRequest + (*RunHealthCheckRequest)(nil), // 89: vtadmin.RunHealthCheckRequest + (*RunHealthCheckResponse)(nil), // 90: vtadmin.RunHealthCheckResponse + (*SetReadOnlyRequest)(nil), // 91: vtadmin.SetReadOnlyRequest + (*SetReadOnlyResponse)(nil), // 92: vtadmin.SetReadOnlyResponse + (*SetReadWriteRequest)(nil), // 93: vtadmin.SetReadWriteRequest + (*SetReadWriteResponse)(nil), // 94: vtadmin.SetReadWriteResponse + (*StartReplicationRequest)(nil), // 95: vtadmin.StartReplicationRequest + (*StartReplicationResponse)(nil), // 96: vtadmin.StartReplicationResponse + (*StopReplicationRequest)(nil), // 97: vtadmin.StopReplicationRequest + (*StopReplicationResponse)(nil), // 98: vtadmin.StopReplicationResponse + (*TabletExternallyPromotedRequest)(nil), // 99: vtadmin.TabletExternallyPromotedRequest + (*TabletExternallyPromotedResponse)(nil), // 100: vtadmin.TabletExternallyPromotedResponse + (*TabletExternallyReparentedRequest)(nil), // 101: vtadmin.TabletExternallyReparentedRequest + (*ValidateRequest)(nil), // 102: vtadmin.ValidateRequest + (*ValidateKeyspaceRequest)(nil), // 103: vtadmin.ValidateKeyspaceRequest + (*ValidateSchemaKeyspaceRequest)(nil), // 104: vtadmin.ValidateSchemaKeyspaceRequest + (*ValidateShardRequest)(nil), // 105: vtadmin.ValidateShardRequest + (*ValidateVersionKeyspaceRequest)(nil), // 106: vtadmin.ValidateVersionKeyspaceRequest + (*ValidateVersionShardRequest)(nil), // 107: vtadmin.ValidateVersionShardRequest + (*VTExplainRequest)(nil), // 108: vtadmin.VTExplainRequest + (*VTExplainResponse)(nil), // 109: vtadmin.VTExplainResponse + nil, // 110: vtadmin.ClusterCellsAliases.AliasesEntry + nil, // 111: vtadmin.Keyspace.ShardsEntry + nil, // 112: vtadmin.Schema.TableSizesEntry + (*Schema_ShardTableSize)(nil), // 113: vtadmin.Schema.ShardTableSize + (*Schema_TableSize)(nil), // 114: vtadmin.Schema.TableSize + nil, // 115: vtadmin.Schema.TableSize.ByShardEntry + (*GetSchemaMigrationsRequest_ClusterRequest)(nil), // 116: vtadmin.GetSchemaMigrationsRequest.ClusterRequest + nil, // 117: vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry + nil, // 118: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry + (*ReloadSchemasResponse_KeyspaceResult)(nil), // 119: vtadmin.ReloadSchemasResponse.KeyspaceResult + (*ReloadSchemasResponse_ShardResult)(nil), // 120: vtadmin.ReloadSchemasResponse.ShardResult + (*ReloadSchemasResponse_TabletResult)(nil), // 121: vtadmin.ReloadSchemasResponse.TabletResult + (*mysqlctl.BackupInfo)(nil), // 122: mysqlctl.BackupInfo + (*topodata.CellInfo)(nil), // 123: topodata.CellInfo + (*vtctldata.ShardReplicationPositionsResponse)(nil), // 124: vtctldata.ShardReplicationPositionsResponse + (*vtctldata.Keyspace)(nil), // 125: vtctldata.Keyspace + (*tabletmanagerdata.TableDefinition)(nil), // 126: tabletmanagerdata.TableDefinition + (*vtctldata.SchemaMigration)(nil), // 127: vtctldata.SchemaMigration + (*vtctldata.Shard)(nil), // 128: vtctldata.Shard + (*vschema.SrvVSchema)(nil), // 129: vschema.SrvVSchema + (*topodata.Tablet)(nil), // 130: topodata.Tablet + (*vschema.Keyspace)(nil), // 131: vschema.Keyspace + (*vtctldata.Workflow)(nil), // 132: vtctldata.Workflow + (*vtctldata.ApplySchemaRequest)(nil), // 133: vtctldata.ApplySchemaRequest + (*vtctldata.CancelSchemaMigrationRequest)(nil), // 134: vtctldata.CancelSchemaMigrationRequest + (*vtctldata.CleanupSchemaMigrationRequest)(nil), // 135: vtctldata.CleanupSchemaMigrationRequest + (*vtctldata.CompleteSchemaMigrationRequest)(nil), // 136: vtctldata.CompleteSchemaMigrationRequest + (*vtctldata.CreateKeyspaceRequest)(nil), // 137: vtctldata.CreateKeyspaceRequest + (*vtctldata.CreateShardRequest)(nil), // 138: vtctldata.CreateShardRequest + (*vtctldata.DeleteKeyspaceRequest)(nil), // 139: vtctldata.DeleteKeyspaceRequest + (*vtctldata.DeleteShardsRequest)(nil), // 140: vtctldata.DeleteShardsRequest + (*topodata.TabletAlias)(nil), // 141: topodata.TabletAlias + (*vtctldata.EmergencyReparentShardRequest)(nil), // 142: vtctldata.EmergencyReparentShardRequest + (*logutil.Event)(nil), // 143: logutil.Event + (*vtctldata.GetBackupsRequest)(nil), // 144: vtctldata.GetBackupsRequest + (*vtctldata.LaunchSchemaMigrationRequest)(nil), // 145: vtctldata.LaunchSchemaMigrationRequest + (*vtctldata.PlannedReparentShardRequest)(nil), // 146: vtctldata.PlannedReparentShardRequest + (*vtctldata.RetrySchemaMigrationRequest)(nil), // 147: vtctldata.RetrySchemaMigrationRequest + (*topodata.CellsAlias)(nil), // 148: topodata.CellsAlias + (*vtctldata.GetSchemaMigrationsRequest)(nil), // 149: vtctldata.GetSchemaMigrationsRequest + (*vtctldata.GetSrvKeyspacesResponse)(nil), // 150: vtctldata.GetSrvKeyspacesResponse + (*vtctldata.ApplySchemaResponse)(nil), // 151: vtctldata.ApplySchemaResponse + (*vtctldata.CancelSchemaMigrationResponse)(nil), // 152: vtctldata.CancelSchemaMigrationResponse + (*vtctldata.CleanupSchemaMigrationResponse)(nil), // 153: vtctldata.CleanupSchemaMigrationResponse + (*vtctldata.CompleteSchemaMigrationResponse)(nil), // 154: vtctldata.CompleteSchemaMigrationResponse + (*vtctldata.CreateShardResponse)(nil), // 155: vtctldata.CreateShardResponse + (*vtctldata.DeleteKeyspaceResponse)(nil), // 156: vtctldata.DeleteKeyspaceResponse + (*vtctldata.DeleteShardsResponse)(nil), // 157: vtctldata.DeleteShardsResponse + (*vtctldata.GetFullStatusResponse)(nil), // 158: vtctldata.GetFullStatusResponse + (*vtctldata.GetTopologyPathResponse)(nil), // 159: vtctldata.GetTopologyPathResponse + (*vtctldata.LaunchSchemaMigrationResponse)(nil), // 160: vtctldata.LaunchSchemaMigrationResponse + (*vtctldata.RetrySchemaMigrationResponse)(nil), // 161: vtctldata.RetrySchemaMigrationResponse + (*vtctldata.ValidateResponse)(nil), // 162: vtctldata.ValidateResponse + (*vtctldata.ValidateKeyspaceResponse)(nil), // 163: vtctldata.ValidateKeyspaceResponse + (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 164: vtctldata.ValidateSchemaKeyspaceResponse + (*vtctldata.ValidateShardResponse)(nil), // 165: vtctldata.ValidateShardResponse + (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 166: vtctldata.ValidateVersionKeyspaceResponse + (*vtctldata.ValidateVersionShardResponse)(nil), // 167: vtctldata.ValidateVersionShardResponse } var file_vtadmin_proto_depIdxs = []int32{ 1, // 0: vtadmin.ClusterBackup.cluster:type_name -> vtadmin.Cluster - 112, // 1: vtadmin.ClusterBackup.backup:type_name -> mysqlctl.BackupInfo + 122, // 1: vtadmin.ClusterBackup.backup:type_name -> mysqlctl.BackupInfo 1, // 2: vtadmin.ClusterCellsAliases.cluster:type_name -> vtadmin.Cluster - 101, // 3: vtadmin.ClusterCellsAliases.aliases:type_name -> vtadmin.ClusterCellsAliases.AliasesEntry + 110, // 3: vtadmin.ClusterCellsAliases.aliases:type_name -> vtadmin.ClusterCellsAliases.AliasesEntry 1, // 4: vtadmin.ClusterCellInfo.cluster:type_name -> vtadmin.Cluster - 113, // 5: vtadmin.ClusterCellInfo.cell_info:type_name -> topodata.CellInfo + 123, // 5: vtadmin.ClusterCellInfo.cell_info:type_name -> topodata.CellInfo 1, // 6: vtadmin.ClusterShardReplicationPosition.cluster:type_name -> vtadmin.Cluster - 114, // 7: vtadmin.ClusterShardReplicationPosition.position_info:type_name -> vtctldata.ShardReplicationPositionsResponse - 15, // 8: vtadmin.ClusterWorkflows.workflows:type_name -> vtadmin.Workflow + 124, // 7: vtadmin.ClusterShardReplicationPosition.position_info:type_name -> vtctldata.ShardReplicationPositionsResponse + 16, // 8: vtadmin.ClusterWorkflows.workflows:type_name -> vtadmin.Workflow 1, // 9: vtadmin.Keyspace.cluster:type_name -> vtadmin.Cluster - 115, // 10: vtadmin.Keyspace.keyspace:type_name -> vtctldata.Keyspace - 102, // 11: vtadmin.Keyspace.shards:type_name -> vtadmin.Keyspace.ShardsEntry + 125, // 10: vtadmin.Keyspace.keyspace:type_name -> vtctldata.Keyspace + 111, // 11: vtadmin.Keyspace.shards:type_name -> vtadmin.Keyspace.ShardsEntry 1, // 12: vtadmin.Schema.cluster:type_name -> vtadmin.Cluster - 116, // 13: vtadmin.Schema.table_definitions:type_name -> tabletmanagerdata.TableDefinition - 103, // 14: vtadmin.Schema.table_sizes:type_name -> vtadmin.Schema.TableSizesEntry - 1, // 15: vtadmin.Shard.cluster:type_name -> vtadmin.Cluster - 117, // 16: vtadmin.Shard.shard:type_name -> vtctldata.Shard - 1, // 17: vtadmin.SrvVSchema.cluster:type_name -> vtadmin.Cluster - 118, // 18: vtadmin.SrvVSchema.srv_v_schema:type_name -> vschema.SrvVSchema - 1, // 19: vtadmin.Tablet.cluster:type_name -> vtadmin.Cluster - 119, // 20: vtadmin.Tablet.tablet:type_name -> topodata.Tablet - 0, // 21: vtadmin.Tablet.state:type_name -> vtadmin.Tablet.ServingState - 1, // 22: vtadmin.VSchema.cluster:type_name -> vtadmin.Cluster - 120, // 23: vtadmin.VSchema.v_schema:type_name -> vschema.Keyspace - 1, // 24: vtadmin.Vtctld.cluster:type_name -> vtadmin.Cluster - 1, // 25: vtadmin.VTGate.cluster:type_name -> vtadmin.Cluster - 1, // 26: vtadmin.Workflow.cluster:type_name -> vtadmin.Cluster - 121, // 27: vtadmin.Workflow.workflow:type_name -> vtctldata.Workflow - 122, // 28: vtadmin.CreateKeyspaceRequest.options:type_name -> vtctldata.CreateKeyspaceRequest - 7, // 29: vtadmin.CreateKeyspaceResponse.keyspace:type_name -> vtadmin.Keyspace - 123, // 30: vtadmin.CreateShardRequest.options:type_name -> vtctldata.CreateShardRequest - 124, // 31: vtadmin.DeleteKeyspaceRequest.options:type_name -> vtctldata.DeleteKeyspaceRequest - 125, // 32: vtadmin.DeleteShardsRequest.options:type_name -> vtctldata.DeleteShardsRequest - 126, // 33: vtadmin.DeleteTabletRequest.alias:type_name -> topodata.TabletAlias - 1, // 34: vtadmin.DeleteTabletResponse.cluster:type_name -> vtadmin.Cluster - 127, // 35: vtadmin.EmergencyFailoverShardRequest.options:type_name -> vtctldata.EmergencyReparentShardRequest - 1, // 36: vtadmin.EmergencyFailoverShardResponse.cluster:type_name -> vtadmin.Cluster - 126, // 37: vtadmin.EmergencyFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 128, // 38: vtadmin.EmergencyFailoverShardResponse.events:type_name -> logutil.Event - 51, // 39: vtadmin.FindSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions - 129, // 40: vtadmin.GetBackupsRequest.request_options:type_name -> vtctldata.GetBackupsRequest - 2, // 41: vtadmin.GetBackupsResponse.backups:type_name -> vtadmin.ClusterBackup - 4, // 42: vtadmin.GetCellInfosResponse.cell_infos:type_name -> vtadmin.ClusterCellInfo - 3, // 43: vtadmin.GetCellsAliasesResponse.aliases:type_name -> vtadmin.ClusterCellsAliases - 1, // 44: vtadmin.GetClustersResponse.clusters:type_name -> vtadmin.Cluster - 126, // 45: vtadmin.GetFullStatusRequest.alias:type_name -> topodata.TabletAlias - 14, // 46: vtadmin.GetGatesResponse.gates:type_name -> vtadmin.VTGate - 7, // 47: vtadmin.GetKeyspacesResponse.keyspaces:type_name -> vtadmin.Keyspace - 51, // 48: vtadmin.GetSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions - 51, // 49: vtadmin.GetSchemasRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions - 8, // 50: vtadmin.GetSchemasResponse.schemas:type_name -> vtadmin.Schema - 5, // 51: vtadmin.GetShardReplicationPositionsResponse.replication_positions:type_name -> vtadmin.ClusterShardReplicationPosition - 107, // 52: vtadmin.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry - 10, // 53: vtadmin.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtadmin.SrvVSchema - 126, // 54: vtadmin.GetTabletRequest.alias:type_name -> topodata.TabletAlias - 11, // 55: vtadmin.GetTabletsResponse.tablets:type_name -> vtadmin.Tablet - 12, // 56: vtadmin.GetVSchemasResponse.v_schemas:type_name -> vtadmin.VSchema - 13, // 57: vtadmin.GetVtctldsResponse.vtctlds:type_name -> vtadmin.Vtctld - 108, // 58: vtadmin.GetWorkflowsResponse.workflows_by_cluster:type_name -> vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry - 126, // 59: vtadmin.PingTabletRequest.alias:type_name -> topodata.TabletAlias - 1, // 60: vtadmin.PingTabletResponse.cluster:type_name -> vtadmin.Cluster - 130, // 61: vtadmin.PlannedFailoverShardRequest.options:type_name -> vtctldata.PlannedReparentShardRequest - 1, // 62: vtadmin.PlannedFailoverShardResponse.cluster:type_name -> vtadmin.Cluster - 126, // 63: vtadmin.PlannedFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 128, // 64: vtadmin.PlannedFailoverShardResponse.events:type_name -> logutil.Event - 126, // 65: vtadmin.RefreshStateRequest.alias:type_name -> topodata.TabletAlias - 1, // 66: vtadmin.RefreshStateResponse.cluster:type_name -> vtadmin.Cluster - 126, // 67: vtadmin.ReloadSchemasRequest.tablets:type_name -> topodata.TabletAlias - 109, // 68: vtadmin.ReloadSchemasResponse.keyspace_results:type_name -> vtadmin.ReloadSchemasResponse.KeyspaceResult - 110, // 69: vtadmin.ReloadSchemasResponse.shard_results:type_name -> vtadmin.ReloadSchemasResponse.ShardResult - 111, // 70: vtadmin.ReloadSchemasResponse.tablet_results:type_name -> vtadmin.ReloadSchemasResponse.TabletResult - 128, // 71: vtadmin.ReloadSchemaShardResponse.events:type_name -> logutil.Event - 126, // 72: vtadmin.RefreshTabletReplicationSourceRequest.alias:type_name -> topodata.TabletAlias - 126, // 73: vtadmin.RefreshTabletReplicationSourceResponse.primary:type_name -> topodata.TabletAlias - 1, // 74: vtadmin.RefreshTabletReplicationSourceResponse.cluster:type_name -> vtadmin.Cluster - 126, // 75: vtadmin.RunHealthCheckRequest.alias:type_name -> topodata.TabletAlias - 1, // 76: vtadmin.RunHealthCheckResponse.cluster:type_name -> vtadmin.Cluster - 126, // 77: vtadmin.SetReadOnlyRequest.alias:type_name -> topodata.TabletAlias - 126, // 78: vtadmin.SetReadWriteRequest.alias:type_name -> topodata.TabletAlias - 126, // 79: vtadmin.StartReplicationRequest.alias:type_name -> topodata.TabletAlias - 1, // 80: vtadmin.StartReplicationResponse.cluster:type_name -> vtadmin.Cluster - 126, // 81: vtadmin.StopReplicationRequest.alias:type_name -> topodata.TabletAlias - 1, // 82: vtadmin.StopReplicationResponse.cluster:type_name -> vtadmin.Cluster - 126, // 83: vtadmin.TabletExternallyPromotedRequest.alias:type_name -> topodata.TabletAlias - 1, // 84: vtadmin.TabletExternallyPromotedResponse.cluster:type_name -> vtadmin.Cluster - 126, // 85: vtadmin.TabletExternallyPromotedResponse.new_primary:type_name -> topodata.TabletAlias - 126, // 86: vtadmin.TabletExternallyPromotedResponse.old_primary:type_name -> topodata.TabletAlias - 126, // 87: vtadmin.TabletExternallyReparentedRequest.alias:type_name -> topodata.TabletAlias - 131, // 88: vtadmin.ClusterCellsAliases.AliasesEntry.value:type_name -> topodata.CellsAlias - 117, // 89: vtadmin.Keyspace.ShardsEntry.value:type_name -> vtctldata.Shard - 105, // 90: vtadmin.Schema.TableSizesEntry.value:type_name -> vtadmin.Schema.TableSize - 106, // 91: vtadmin.Schema.TableSize.by_shard:type_name -> vtadmin.Schema.TableSize.ByShardEntry - 104, // 92: vtadmin.Schema.TableSize.ByShardEntry.value:type_name -> vtadmin.Schema.ShardTableSize - 132, // 93: vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> vtctldata.GetSrvKeyspacesResponse - 6, // 94: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry.value:type_name -> vtadmin.ClusterWorkflows - 7, // 95: vtadmin.ReloadSchemasResponse.KeyspaceResult.keyspace:type_name -> vtadmin.Keyspace - 128, // 96: vtadmin.ReloadSchemasResponse.KeyspaceResult.events:type_name -> logutil.Event - 9, // 97: vtadmin.ReloadSchemasResponse.ShardResult.shard:type_name -> vtadmin.Shard - 128, // 98: vtadmin.ReloadSchemasResponse.ShardResult.events:type_name -> logutil.Event - 11, // 99: vtadmin.ReloadSchemasResponse.TabletResult.tablet:type_name -> vtadmin.Tablet - 16, // 100: vtadmin.VTAdmin.CreateKeyspace:input_type -> vtadmin.CreateKeyspaceRequest - 18, // 101: vtadmin.VTAdmin.CreateShard:input_type -> vtadmin.CreateShardRequest - 19, // 102: vtadmin.VTAdmin.DeleteKeyspace:input_type -> vtadmin.DeleteKeyspaceRequest - 20, // 103: vtadmin.VTAdmin.DeleteShards:input_type -> vtadmin.DeleteShardsRequest - 21, // 104: vtadmin.VTAdmin.DeleteTablet:input_type -> vtadmin.DeleteTabletRequest - 23, // 105: vtadmin.VTAdmin.EmergencyFailoverShard:input_type -> vtadmin.EmergencyFailoverShardRequest - 25, // 106: vtadmin.VTAdmin.FindSchema:input_type -> vtadmin.FindSchemaRequest - 26, // 107: vtadmin.VTAdmin.GetBackups:input_type -> vtadmin.GetBackupsRequest - 28, // 108: vtadmin.VTAdmin.GetCellInfos:input_type -> vtadmin.GetCellInfosRequest - 30, // 109: vtadmin.VTAdmin.GetCellsAliases:input_type -> vtadmin.GetCellsAliasesRequest - 32, // 110: vtadmin.VTAdmin.GetClusters:input_type -> vtadmin.GetClustersRequest - 34, // 111: vtadmin.VTAdmin.GetFullStatus:input_type -> vtadmin.GetFullStatusRequest - 35, // 112: vtadmin.VTAdmin.GetGates:input_type -> vtadmin.GetGatesRequest - 37, // 113: vtadmin.VTAdmin.GetKeyspace:input_type -> vtadmin.GetKeyspaceRequest - 38, // 114: vtadmin.VTAdmin.GetKeyspaces:input_type -> vtadmin.GetKeyspacesRequest - 40, // 115: vtadmin.VTAdmin.GetSchema:input_type -> vtadmin.GetSchemaRequest - 41, // 116: vtadmin.VTAdmin.GetSchemas:input_type -> vtadmin.GetSchemasRequest - 43, // 117: vtadmin.VTAdmin.GetShardReplicationPositions:input_type -> vtadmin.GetShardReplicationPositionsRequest - 45, // 118: vtadmin.VTAdmin.GetSrvKeyspace:input_type -> vtadmin.GetSrvKeyspaceRequest - 46, // 119: vtadmin.VTAdmin.GetSrvKeyspaces:input_type -> vtadmin.GetSrvKeyspacesRequest - 48, // 120: vtadmin.VTAdmin.GetSrvVSchema:input_type -> vtadmin.GetSrvVSchemaRequest - 49, // 121: vtadmin.VTAdmin.GetSrvVSchemas:input_type -> vtadmin.GetSrvVSchemasRequest - 52, // 122: vtadmin.VTAdmin.GetTablet:input_type -> vtadmin.GetTabletRequest - 53, // 123: vtadmin.VTAdmin.GetTablets:input_type -> vtadmin.GetTabletsRequest - 55, // 124: vtadmin.VTAdmin.GetTopologyPath:input_type -> vtadmin.GetTopologyPathRequest - 56, // 125: vtadmin.VTAdmin.GetVSchema:input_type -> vtadmin.GetVSchemaRequest - 57, // 126: vtadmin.VTAdmin.GetVSchemas:input_type -> vtadmin.GetVSchemasRequest - 59, // 127: vtadmin.VTAdmin.GetVtctlds:input_type -> vtadmin.GetVtctldsRequest - 61, // 128: vtadmin.VTAdmin.GetWorkflow:input_type -> vtadmin.GetWorkflowRequest - 62, // 129: vtadmin.VTAdmin.GetWorkflows:input_type -> vtadmin.GetWorkflowsRequest - 64, // 130: vtadmin.VTAdmin.PingTablet:input_type -> vtadmin.PingTabletRequest - 66, // 131: vtadmin.VTAdmin.PlannedFailoverShard:input_type -> vtadmin.PlannedFailoverShardRequest - 68, // 132: vtadmin.VTAdmin.RebuildKeyspaceGraph:input_type -> vtadmin.RebuildKeyspaceGraphRequest - 70, // 133: vtadmin.VTAdmin.RefreshState:input_type -> vtadmin.RefreshStateRequest - 76, // 134: vtadmin.VTAdmin.RefreshTabletReplicationSource:input_type -> vtadmin.RefreshTabletReplicationSourceRequest - 72, // 135: vtadmin.VTAdmin.ReloadSchemas:input_type -> vtadmin.ReloadSchemasRequest - 74, // 136: vtadmin.VTAdmin.ReloadSchemaShard:input_type -> vtadmin.ReloadSchemaShardRequest - 78, // 137: vtadmin.VTAdmin.RemoveKeyspaceCell:input_type -> vtadmin.RemoveKeyspaceCellRequest - 80, // 138: vtadmin.VTAdmin.RunHealthCheck:input_type -> vtadmin.RunHealthCheckRequest - 82, // 139: vtadmin.VTAdmin.SetReadOnly:input_type -> vtadmin.SetReadOnlyRequest - 84, // 140: vtadmin.VTAdmin.SetReadWrite:input_type -> vtadmin.SetReadWriteRequest - 86, // 141: vtadmin.VTAdmin.StartReplication:input_type -> vtadmin.StartReplicationRequest - 88, // 142: vtadmin.VTAdmin.StopReplication:input_type -> vtadmin.StopReplicationRequest - 90, // 143: vtadmin.VTAdmin.TabletExternallyPromoted:input_type -> vtadmin.TabletExternallyPromotedRequest - 93, // 144: vtadmin.VTAdmin.Validate:input_type -> vtadmin.ValidateRequest - 94, // 145: vtadmin.VTAdmin.ValidateKeyspace:input_type -> vtadmin.ValidateKeyspaceRequest - 95, // 146: vtadmin.VTAdmin.ValidateSchemaKeyspace:input_type -> vtadmin.ValidateSchemaKeyspaceRequest - 96, // 147: vtadmin.VTAdmin.ValidateShard:input_type -> vtadmin.ValidateShardRequest - 97, // 148: vtadmin.VTAdmin.ValidateVersionKeyspace:input_type -> vtadmin.ValidateVersionKeyspaceRequest - 98, // 149: vtadmin.VTAdmin.ValidateVersionShard:input_type -> vtadmin.ValidateVersionShardRequest - 99, // 150: vtadmin.VTAdmin.VTExplain:input_type -> vtadmin.VTExplainRequest - 17, // 151: vtadmin.VTAdmin.CreateKeyspace:output_type -> vtadmin.CreateKeyspaceResponse - 133, // 152: vtadmin.VTAdmin.CreateShard:output_type -> vtctldata.CreateShardResponse - 134, // 153: vtadmin.VTAdmin.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse - 135, // 154: vtadmin.VTAdmin.DeleteShards:output_type -> vtctldata.DeleteShardsResponse - 22, // 155: vtadmin.VTAdmin.DeleteTablet:output_type -> vtadmin.DeleteTabletResponse - 24, // 156: vtadmin.VTAdmin.EmergencyFailoverShard:output_type -> vtadmin.EmergencyFailoverShardResponse - 8, // 157: vtadmin.VTAdmin.FindSchema:output_type -> vtadmin.Schema - 27, // 158: vtadmin.VTAdmin.GetBackups:output_type -> vtadmin.GetBackupsResponse - 29, // 159: vtadmin.VTAdmin.GetCellInfos:output_type -> vtadmin.GetCellInfosResponse - 31, // 160: vtadmin.VTAdmin.GetCellsAliases:output_type -> vtadmin.GetCellsAliasesResponse - 33, // 161: vtadmin.VTAdmin.GetClusters:output_type -> vtadmin.GetClustersResponse - 136, // 162: vtadmin.VTAdmin.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse - 36, // 163: vtadmin.VTAdmin.GetGates:output_type -> vtadmin.GetGatesResponse - 7, // 164: vtadmin.VTAdmin.GetKeyspace:output_type -> vtadmin.Keyspace - 39, // 165: vtadmin.VTAdmin.GetKeyspaces:output_type -> vtadmin.GetKeyspacesResponse - 8, // 166: vtadmin.VTAdmin.GetSchema:output_type -> vtadmin.Schema - 42, // 167: vtadmin.VTAdmin.GetSchemas:output_type -> vtadmin.GetSchemasResponse - 44, // 168: vtadmin.VTAdmin.GetShardReplicationPositions:output_type -> vtadmin.GetShardReplicationPositionsResponse - 132, // 169: vtadmin.VTAdmin.GetSrvKeyspace:output_type -> vtctldata.GetSrvKeyspacesResponse - 47, // 170: vtadmin.VTAdmin.GetSrvKeyspaces:output_type -> vtadmin.GetSrvKeyspacesResponse - 10, // 171: vtadmin.VTAdmin.GetSrvVSchema:output_type -> vtadmin.SrvVSchema - 50, // 172: vtadmin.VTAdmin.GetSrvVSchemas:output_type -> vtadmin.GetSrvVSchemasResponse - 11, // 173: vtadmin.VTAdmin.GetTablet:output_type -> vtadmin.Tablet - 54, // 174: vtadmin.VTAdmin.GetTablets:output_type -> vtadmin.GetTabletsResponse - 137, // 175: vtadmin.VTAdmin.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse - 12, // 176: vtadmin.VTAdmin.GetVSchema:output_type -> vtadmin.VSchema - 58, // 177: vtadmin.VTAdmin.GetVSchemas:output_type -> vtadmin.GetVSchemasResponse - 60, // 178: vtadmin.VTAdmin.GetVtctlds:output_type -> vtadmin.GetVtctldsResponse - 15, // 179: vtadmin.VTAdmin.GetWorkflow:output_type -> vtadmin.Workflow - 63, // 180: vtadmin.VTAdmin.GetWorkflows:output_type -> vtadmin.GetWorkflowsResponse - 65, // 181: vtadmin.VTAdmin.PingTablet:output_type -> vtadmin.PingTabletResponse - 67, // 182: vtadmin.VTAdmin.PlannedFailoverShard:output_type -> vtadmin.PlannedFailoverShardResponse - 69, // 183: vtadmin.VTAdmin.RebuildKeyspaceGraph:output_type -> vtadmin.RebuildKeyspaceGraphResponse - 71, // 184: vtadmin.VTAdmin.RefreshState:output_type -> vtadmin.RefreshStateResponse - 77, // 185: vtadmin.VTAdmin.RefreshTabletReplicationSource:output_type -> vtadmin.RefreshTabletReplicationSourceResponse - 73, // 186: vtadmin.VTAdmin.ReloadSchemas:output_type -> vtadmin.ReloadSchemasResponse - 75, // 187: vtadmin.VTAdmin.ReloadSchemaShard:output_type -> vtadmin.ReloadSchemaShardResponse - 79, // 188: vtadmin.VTAdmin.RemoveKeyspaceCell:output_type -> vtadmin.RemoveKeyspaceCellResponse - 81, // 189: vtadmin.VTAdmin.RunHealthCheck:output_type -> vtadmin.RunHealthCheckResponse - 83, // 190: vtadmin.VTAdmin.SetReadOnly:output_type -> vtadmin.SetReadOnlyResponse - 85, // 191: vtadmin.VTAdmin.SetReadWrite:output_type -> vtadmin.SetReadWriteResponse - 87, // 192: vtadmin.VTAdmin.StartReplication:output_type -> vtadmin.StartReplicationResponse - 89, // 193: vtadmin.VTAdmin.StopReplication:output_type -> vtadmin.StopReplicationResponse - 91, // 194: vtadmin.VTAdmin.TabletExternallyPromoted:output_type -> vtadmin.TabletExternallyPromotedResponse - 138, // 195: vtadmin.VTAdmin.Validate:output_type -> vtctldata.ValidateResponse - 139, // 196: vtadmin.VTAdmin.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse - 140, // 197: vtadmin.VTAdmin.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse - 141, // 198: vtadmin.VTAdmin.ValidateShard:output_type -> vtctldata.ValidateShardResponse - 142, // 199: vtadmin.VTAdmin.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse - 143, // 200: vtadmin.VTAdmin.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse - 100, // 201: vtadmin.VTAdmin.VTExplain:output_type -> vtadmin.VTExplainResponse - 151, // [151:202] is the sub-list for method output_type - 100, // [100:151] is the sub-list for method input_type - 100, // [100:100] is the sub-list for extension type_name - 100, // [100:100] is the sub-list for extension extendee - 0, // [0:100] is the sub-list for field type_name + 126, // 13: vtadmin.Schema.table_definitions:type_name -> tabletmanagerdata.TableDefinition + 112, // 14: vtadmin.Schema.table_sizes:type_name -> vtadmin.Schema.TableSizesEntry + 1, // 15: vtadmin.SchemaMigration.cluster:type_name -> vtadmin.Cluster + 127, // 16: vtadmin.SchemaMigration.schema_migration:type_name -> vtctldata.SchemaMigration + 1, // 17: vtadmin.Shard.cluster:type_name -> vtadmin.Cluster + 128, // 18: vtadmin.Shard.shard:type_name -> vtctldata.Shard + 1, // 19: vtadmin.SrvVSchema.cluster:type_name -> vtadmin.Cluster + 129, // 20: vtadmin.SrvVSchema.srv_v_schema:type_name -> vschema.SrvVSchema + 1, // 21: vtadmin.Tablet.cluster:type_name -> vtadmin.Cluster + 130, // 22: vtadmin.Tablet.tablet:type_name -> topodata.Tablet + 0, // 23: vtadmin.Tablet.state:type_name -> vtadmin.Tablet.ServingState + 1, // 24: vtadmin.VSchema.cluster:type_name -> vtadmin.Cluster + 131, // 25: vtadmin.VSchema.v_schema:type_name -> vschema.Keyspace + 1, // 26: vtadmin.Vtctld.cluster:type_name -> vtadmin.Cluster + 1, // 27: vtadmin.VTGate.cluster:type_name -> vtadmin.Cluster + 1, // 28: vtadmin.Workflow.cluster:type_name -> vtadmin.Cluster + 132, // 29: vtadmin.Workflow.workflow:type_name -> vtctldata.Workflow + 133, // 30: vtadmin.ApplySchemaRequest.request:type_name -> vtctldata.ApplySchemaRequest + 134, // 31: vtadmin.CancelSchemaMigrationRequest.request:type_name -> vtctldata.CancelSchemaMigrationRequest + 135, // 32: vtadmin.CleanupSchemaMigrationRequest.request:type_name -> vtctldata.CleanupSchemaMigrationRequest + 136, // 33: vtadmin.CompleteSchemaMigrationRequest.request:type_name -> vtctldata.CompleteSchemaMigrationRequest + 137, // 34: vtadmin.CreateKeyspaceRequest.options:type_name -> vtctldata.CreateKeyspaceRequest + 7, // 35: vtadmin.CreateKeyspaceResponse.keyspace:type_name -> vtadmin.Keyspace + 138, // 36: vtadmin.CreateShardRequest.options:type_name -> vtctldata.CreateShardRequest + 139, // 37: vtadmin.DeleteKeyspaceRequest.options:type_name -> vtctldata.DeleteKeyspaceRequest + 140, // 38: vtadmin.DeleteShardsRequest.options:type_name -> vtctldata.DeleteShardsRequest + 141, // 39: vtadmin.DeleteTabletRequest.alias:type_name -> topodata.TabletAlias + 1, // 40: vtadmin.DeleteTabletResponse.cluster:type_name -> vtadmin.Cluster + 142, // 41: vtadmin.EmergencyFailoverShardRequest.options:type_name -> vtctldata.EmergencyReparentShardRequest + 1, // 42: vtadmin.EmergencyFailoverShardResponse.cluster:type_name -> vtadmin.Cluster + 141, // 43: vtadmin.EmergencyFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 143, // 44: vtadmin.EmergencyFailoverShardResponse.events:type_name -> logutil.Event + 58, // 45: vtadmin.FindSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions + 144, // 46: vtadmin.GetBackupsRequest.request_options:type_name -> vtctldata.GetBackupsRequest + 2, // 47: vtadmin.GetBackupsResponse.backups:type_name -> vtadmin.ClusterBackup + 4, // 48: vtadmin.GetCellInfosResponse.cell_infos:type_name -> vtadmin.ClusterCellInfo + 3, // 49: vtadmin.GetCellsAliasesResponse.aliases:type_name -> vtadmin.ClusterCellsAliases + 1, // 50: vtadmin.GetClustersResponse.clusters:type_name -> vtadmin.Cluster + 141, // 51: vtadmin.GetFullStatusRequest.alias:type_name -> topodata.TabletAlias + 15, // 52: vtadmin.GetGatesResponse.gates:type_name -> vtadmin.VTGate + 7, // 53: vtadmin.GetKeyspacesResponse.keyspaces:type_name -> vtadmin.Keyspace + 58, // 54: vtadmin.GetSchemaRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions + 58, // 55: vtadmin.GetSchemasRequest.table_size_options:type_name -> vtadmin.GetSchemaTableSizeOptions + 8, // 56: vtadmin.GetSchemasResponse.schemas:type_name -> vtadmin.Schema + 116, // 57: vtadmin.GetSchemaMigrationsRequest.cluster_requests:type_name -> vtadmin.GetSchemaMigrationsRequest.ClusterRequest + 9, // 58: vtadmin.GetSchemaMigrationsResponse.schema_migrations:type_name -> vtadmin.SchemaMigration + 5, // 59: vtadmin.GetShardReplicationPositionsResponse.replication_positions:type_name -> vtadmin.ClusterShardReplicationPosition + 117, // 60: vtadmin.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry + 11, // 61: vtadmin.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtadmin.SrvVSchema + 141, // 62: vtadmin.GetTabletRequest.alias:type_name -> topodata.TabletAlias + 12, // 63: vtadmin.GetTabletsResponse.tablets:type_name -> vtadmin.Tablet + 13, // 64: vtadmin.GetVSchemasResponse.v_schemas:type_name -> vtadmin.VSchema + 14, // 65: vtadmin.GetVtctldsResponse.vtctlds:type_name -> vtadmin.Vtctld + 118, // 66: vtadmin.GetWorkflowsResponse.workflows_by_cluster:type_name -> vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry + 145, // 67: vtadmin.LaunchSchemaMigrationRequest.request:type_name -> vtctldata.LaunchSchemaMigrationRequest + 141, // 68: vtadmin.PingTabletRequest.alias:type_name -> topodata.TabletAlias + 1, // 69: vtadmin.PingTabletResponse.cluster:type_name -> vtadmin.Cluster + 146, // 70: vtadmin.PlannedFailoverShardRequest.options:type_name -> vtctldata.PlannedReparentShardRequest + 1, // 71: vtadmin.PlannedFailoverShardResponse.cluster:type_name -> vtadmin.Cluster + 141, // 72: vtadmin.PlannedFailoverShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 143, // 73: vtadmin.PlannedFailoverShardResponse.events:type_name -> logutil.Event + 141, // 74: vtadmin.RefreshStateRequest.alias:type_name -> topodata.TabletAlias + 1, // 75: vtadmin.RefreshStateResponse.cluster:type_name -> vtadmin.Cluster + 141, // 76: vtadmin.ReloadSchemasRequest.tablets:type_name -> topodata.TabletAlias + 119, // 77: vtadmin.ReloadSchemasResponse.keyspace_results:type_name -> vtadmin.ReloadSchemasResponse.KeyspaceResult + 120, // 78: vtadmin.ReloadSchemasResponse.shard_results:type_name -> vtadmin.ReloadSchemasResponse.ShardResult + 121, // 79: vtadmin.ReloadSchemasResponse.tablet_results:type_name -> vtadmin.ReloadSchemasResponse.TabletResult + 143, // 80: vtadmin.ReloadSchemaShardResponse.events:type_name -> logutil.Event + 141, // 81: vtadmin.RefreshTabletReplicationSourceRequest.alias:type_name -> topodata.TabletAlias + 141, // 82: vtadmin.RefreshTabletReplicationSourceResponse.primary:type_name -> topodata.TabletAlias + 1, // 83: vtadmin.RefreshTabletReplicationSourceResponse.cluster:type_name -> vtadmin.Cluster + 147, // 84: vtadmin.RetrySchemaMigrationRequest.request:type_name -> vtctldata.RetrySchemaMigrationRequest + 141, // 85: vtadmin.RunHealthCheckRequest.alias:type_name -> topodata.TabletAlias + 1, // 86: vtadmin.RunHealthCheckResponse.cluster:type_name -> vtadmin.Cluster + 141, // 87: vtadmin.SetReadOnlyRequest.alias:type_name -> topodata.TabletAlias + 141, // 88: vtadmin.SetReadWriteRequest.alias:type_name -> topodata.TabletAlias + 141, // 89: vtadmin.StartReplicationRequest.alias:type_name -> topodata.TabletAlias + 1, // 90: vtadmin.StartReplicationResponse.cluster:type_name -> vtadmin.Cluster + 141, // 91: vtadmin.StopReplicationRequest.alias:type_name -> topodata.TabletAlias + 1, // 92: vtadmin.StopReplicationResponse.cluster:type_name -> vtadmin.Cluster + 141, // 93: vtadmin.TabletExternallyPromotedRequest.alias:type_name -> topodata.TabletAlias + 1, // 94: vtadmin.TabletExternallyPromotedResponse.cluster:type_name -> vtadmin.Cluster + 141, // 95: vtadmin.TabletExternallyPromotedResponse.new_primary:type_name -> topodata.TabletAlias + 141, // 96: vtadmin.TabletExternallyPromotedResponse.old_primary:type_name -> topodata.TabletAlias + 141, // 97: vtadmin.TabletExternallyReparentedRequest.alias:type_name -> topodata.TabletAlias + 148, // 98: vtadmin.ClusterCellsAliases.AliasesEntry.value:type_name -> topodata.CellsAlias + 128, // 99: vtadmin.Keyspace.ShardsEntry.value:type_name -> vtctldata.Shard + 114, // 100: vtadmin.Schema.TableSizesEntry.value:type_name -> vtadmin.Schema.TableSize + 115, // 101: vtadmin.Schema.TableSize.by_shard:type_name -> vtadmin.Schema.TableSize.ByShardEntry + 113, // 102: vtadmin.Schema.TableSize.ByShardEntry.value:type_name -> vtadmin.Schema.ShardTableSize + 149, // 103: vtadmin.GetSchemaMigrationsRequest.ClusterRequest.request:type_name -> vtctldata.GetSchemaMigrationsRequest + 150, // 104: vtadmin.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> vtctldata.GetSrvKeyspacesResponse + 6, // 105: vtadmin.GetWorkflowsResponse.WorkflowsByClusterEntry.value:type_name -> vtadmin.ClusterWorkflows + 7, // 106: vtadmin.ReloadSchemasResponse.KeyspaceResult.keyspace:type_name -> vtadmin.Keyspace + 143, // 107: vtadmin.ReloadSchemasResponse.KeyspaceResult.events:type_name -> logutil.Event + 10, // 108: vtadmin.ReloadSchemasResponse.ShardResult.shard:type_name -> vtadmin.Shard + 143, // 109: vtadmin.ReloadSchemasResponse.ShardResult.events:type_name -> logutil.Event + 12, // 110: vtadmin.ReloadSchemasResponse.TabletResult.tablet:type_name -> vtadmin.Tablet + 17, // 111: vtadmin.VTAdmin.ApplySchema:input_type -> vtadmin.ApplySchemaRequest + 18, // 112: vtadmin.VTAdmin.CancelSchemaMigration:input_type -> vtadmin.CancelSchemaMigrationRequest + 19, // 113: vtadmin.VTAdmin.CleanupSchemaMigration:input_type -> vtadmin.CleanupSchemaMigrationRequest + 20, // 114: vtadmin.VTAdmin.CompleteSchemaMigration:input_type -> vtadmin.CompleteSchemaMigrationRequest + 21, // 115: vtadmin.VTAdmin.CreateKeyspace:input_type -> vtadmin.CreateKeyspaceRequest + 23, // 116: vtadmin.VTAdmin.CreateShard:input_type -> vtadmin.CreateShardRequest + 24, // 117: vtadmin.VTAdmin.DeleteKeyspace:input_type -> vtadmin.DeleteKeyspaceRequest + 25, // 118: vtadmin.VTAdmin.DeleteShards:input_type -> vtadmin.DeleteShardsRequest + 26, // 119: vtadmin.VTAdmin.DeleteTablet:input_type -> vtadmin.DeleteTabletRequest + 28, // 120: vtadmin.VTAdmin.EmergencyFailoverShard:input_type -> vtadmin.EmergencyFailoverShardRequest + 30, // 121: vtadmin.VTAdmin.FindSchema:input_type -> vtadmin.FindSchemaRequest + 31, // 122: vtadmin.VTAdmin.GetBackups:input_type -> vtadmin.GetBackupsRequest + 33, // 123: vtadmin.VTAdmin.GetCellInfos:input_type -> vtadmin.GetCellInfosRequest + 35, // 124: vtadmin.VTAdmin.GetCellsAliases:input_type -> vtadmin.GetCellsAliasesRequest + 37, // 125: vtadmin.VTAdmin.GetClusters:input_type -> vtadmin.GetClustersRequest + 39, // 126: vtadmin.VTAdmin.GetFullStatus:input_type -> vtadmin.GetFullStatusRequest + 40, // 127: vtadmin.VTAdmin.GetGates:input_type -> vtadmin.GetGatesRequest + 42, // 128: vtadmin.VTAdmin.GetKeyspace:input_type -> vtadmin.GetKeyspaceRequest + 43, // 129: vtadmin.VTAdmin.GetKeyspaces:input_type -> vtadmin.GetKeyspacesRequest + 45, // 130: vtadmin.VTAdmin.GetSchema:input_type -> vtadmin.GetSchemaRequest + 46, // 131: vtadmin.VTAdmin.GetSchemas:input_type -> vtadmin.GetSchemasRequest + 48, // 132: vtadmin.VTAdmin.GetSchemaMigrations:input_type -> vtadmin.GetSchemaMigrationsRequest + 50, // 133: vtadmin.VTAdmin.GetShardReplicationPositions:input_type -> vtadmin.GetShardReplicationPositionsRequest + 52, // 134: vtadmin.VTAdmin.GetSrvKeyspace:input_type -> vtadmin.GetSrvKeyspaceRequest + 53, // 135: vtadmin.VTAdmin.GetSrvKeyspaces:input_type -> vtadmin.GetSrvKeyspacesRequest + 55, // 136: vtadmin.VTAdmin.GetSrvVSchema:input_type -> vtadmin.GetSrvVSchemaRequest + 56, // 137: vtadmin.VTAdmin.GetSrvVSchemas:input_type -> vtadmin.GetSrvVSchemasRequest + 59, // 138: vtadmin.VTAdmin.GetTablet:input_type -> vtadmin.GetTabletRequest + 60, // 139: vtadmin.VTAdmin.GetTablets:input_type -> vtadmin.GetTabletsRequest + 62, // 140: vtadmin.VTAdmin.GetTopologyPath:input_type -> vtadmin.GetTopologyPathRequest + 63, // 141: vtadmin.VTAdmin.GetVSchema:input_type -> vtadmin.GetVSchemaRequest + 64, // 142: vtadmin.VTAdmin.GetVSchemas:input_type -> vtadmin.GetVSchemasRequest + 66, // 143: vtadmin.VTAdmin.GetVtctlds:input_type -> vtadmin.GetVtctldsRequest + 68, // 144: vtadmin.VTAdmin.GetWorkflow:input_type -> vtadmin.GetWorkflowRequest + 69, // 145: vtadmin.VTAdmin.GetWorkflows:input_type -> vtadmin.GetWorkflowsRequest + 71, // 146: vtadmin.VTAdmin.LaunchSchemaMigration:input_type -> vtadmin.LaunchSchemaMigrationRequest + 72, // 147: vtadmin.VTAdmin.PingTablet:input_type -> vtadmin.PingTabletRequest + 74, // 148: vtadmin.VTAdmin.PlannedFailoverShard:input_type -> vtadmin.PlannedFailoverShardRequest + 76, // 149: vtadmin.VTAdmin.RebuildKeyspaceGraph:input_type -> vtadmin.RebuildKeyspaceGraphRequest + 78, // 150: vtadmin.VTAdmin.RefreshState:input_type -> vtadmin.RefreshStateRequest + 84, // 151: vtadmin.VTAdmin.RefreshTabletReplicationSource:input_type -> vtadmin.RefreshTabletReplicationSourceRequest + 80, // 152: vtadmin.VTAdmin.ReloadSchemas:input_type -> vtadmin.ReloadSchemasRequest + 82, // 153: vtadmin.VTAdmin.ReloadSchemaShard:input_type -> vtadmin.ReloadSchemaShardRequest + 86, // 154: vtadmin.VTAdmin.RemoveKeyspaceCell:input_type -> vtadmin.RemoveKeyspaceCellRequest + 88, // 155: vtadmin.VTAdmin.RetrySchemaMigration:input_type -> vtadmin.RetrySchemaMigrationRequest + 89, // 156: vtadmin.VTAdmin.RunHealthCheck:input_type -> vtadmin.RunHealthCheckRequest + 91, // 157: vtadmin.VTAdmin.SetReadOnly:input_type -> vtadmin.SetReadOnlyRequest + 93, // 158: vtadmin.VTAdmin.SetReadWrite:input_type -> vtadmin.SetReadWriteRequest + 95, // 159: vtadmin.VTAdmin.StartReplication:input_type -> vtadmin.StartReplicationRequest + 97, // 160: vtadmin.VTAdmin.StopReplication:input_type -> vtadmin.StopReplicationRequest + 99, // 161: vtadmin.VTAdmin.TabletExternallyPromoted:input_type -> vtadmin.TabletExternallyPromotedRequest + 102, // 162: vtadmin.VTAdmin.Validate:input_type -> vtadmin.ValidateRequest + 103, // 163: vtadmin.VTAdmin.ValidateKeyspace:input_type -> vtadmin.ValidateKeyspaceRequest + 104, // 164: vtadmin.VTAdmin.ValidateSchemaKeyspace:input_type -> vtadmin.ValidateSchemaKeyspaceRequest + 105, // 165: vtadmin.VTAdmin.ValidateShard:input_type -> vtadmin.ValidateShardRequest + 106, // 166: vtadmin.VTAdmin.ValidateVersionKeyspace:input_type -> vtadmin.ValidateVersionKeyspaceRequest + 107, // 167: vtadmin.VTAdmin.ValidateVersionShard:input_type -> vtadmin.ValidateVersionShardRequest + 108, // 168: vtadmin.VTAdmin.VTExplain:input_type -> vtadmin.VTExplainRequest + 151, // 169: vtadmin.VTAdmin.ApplySchema:output_type -> vtctldata.ApplySchemaResponse + 152, // 170: vtadmin.VTAdmin.CancelSchemaMigration:output_type -> vtctldata.CancelSchemaMigrationResponse + 153, // 171: vtadmin.VTAdmin.CleanupSchemaMigration:output_type -> vtctldata.CleanupSchemaMigrationResponse + 154, // 172: vtadmin.VTAdmin.CompleteSchemaMigration:output_type -> vtctldata.CompleteSchemaMigrationResponse + 22, // 173: vtadmin.VTAdmin.CreateKeyspace:output_type -> vtadmin.CreateKeyspaceResponse + 155, // 174: vtadmin.VTAdmin.CreateShard:output_type -> vtctldata.CreateShardResponse + 156, // 175: vtadmin.VTAdmin.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse + 157, // 176: vtadmin.VTAdmin.DeleteShards:output_type -> vtctldata.DeleteShardsResponse + 27, // 177: vtadmin.VTAdmin.DeleteTablet:output_type -> vtadmin.DeleteTabletResponse + 29, // 178: vtadmin.VTAdmin.EmergencyFailoverShard:output_type -> vtadmin.EmergencyFailoverShardResponse + 8, // 179: vtadmin.VTAdmin.FindSchema:output_type -> vtadmin.Schema + 32, // 180: vtadmin.VTAdmin.GetBackups:output_type -> vtadmin.GetBackupsResponse + 34, // 181: vtadmin.VTAdmin.GetCellInfos:output_type -> vtadmin.GetCellInfosResponse + 36, // 182: vtadmin.VTAdmin.GetCellsAliases:output_type -> vtadmin.GetCellsAliasesResponse + 38, // 183: vtadmin.VTAdmin.GetClusters:output_type -> vtadmin.GetClustersResponse + 158, // 184: vtadmin.VTAdmin.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse + 41, // 185: vtadmin.VTAdmin.GetGates:output_type -> vtadmin.GetGatesResponse + 7, // 186: vtadmin.VTAdmin.GetKeyspace:output_type -> vtadmin.Keyspace + 44, // 187: vtadmin.VTAdmin.GetKeyspaces:output_type -> vtadmin.GetKeyspacesResponse + 8, // 188: vtadmin.VTAdmin.GetSchema:output_type -> vtadmin.Schema + 47, // 189: vtadmin.VTAdmin.GetSchemas:output_type -> vtadmin.GetSchemasResponse + 49, // 190: vtadmin.VTAdmin.GetSchemaMigrations:output_type -> vtadmin.GetSchemaMigrationsResponse + 51, // 191: vtadmin.VTAdmin.GetShardReplicationPositions:output_type -> vtadmin.GetShardReplicationPositionsResponse + 150, // 192: vtadmin.VTAdmin.GetSrvKeyspace:output_type -> vtctldata.GetSrvKeyspacesResponse + 54, // 193: vtadmin.VTAdmin.GetSrvKeyspaces:output_type -> vtadmin.GetSrvKeyspacesResponse + 11, // 194: vtadmin.VTAdmin.GetSrvVSchema:output_type -> vtadmin.SrvVSchema + 57, // 195: vtadmin.VTAdmin.GetSrvVSchemas:output_type -> vtadmin.GetSrvVSchemasResponse + 12, // 196: vtadmin.VTAdmin.GetTablet:output_type -> vtadmin.Tablet + 61, // 197: vtadmin.VTAdmin.GetTablets:output_type -> vtadmin.GetTabletsResponse + 159, // 198: vtadmin.VTAdmin.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse + 13, // 199: vtadmin.VTAdmin.GetVSchema:output_type -> vtadmin.VSchema + 65, // 200: vtadmin.VTAdmin.GetVSchemas:output_type -> vtadmin.GetVSchemasResponse + 67, // 201: vtadmin.VTAdmin.GetVtctlds:output_type -> vtadmin.GetVtctldsResponse + 16, // 202: vtadmin.VTAdmin.GetWorkflow:output_type -> vtadmin.Workflow + 70, // 203: vtadmin.VTAdmin.GetWorkflows:output_type -> vtadmin.GetWorkflowsResponse + 160, // 204: vtadmin.VTAdmin.LaunchSchemaMigration:output_type -> vtctldata.LaunchSchemaMigrationResponse + 73, // 205: vtadmin.VTAdmin.PingTablet:output_type -> vtadmin.PingTabletResponse + 75, // 206: vtadmin.VTAdmin.PlannedFailoverShard:output_type -> vtadmin.PlannedFailoverShardResponse + 77, // 207: vtadmin.VTAdmin.RebuildKeyspaceGraph:output_type -> vtadmin.RebuildKeyspaceGraphResponse + 79, // 208: vtadmin.VTAdmin.RefreshState:output_type -> vtadmin.RefreshStateResponse + 85, // 209: vtadmin.VTAdmin.RefreshTabletReplicationSource:output_type -> vtadmin.RefreshTabletReplicationSourceResponse + 81, // 210: vtadmin.VTAdmin.ReloadSchemas:output_type -> vtadmin.ReloadSchemasResponse + 83, // 211: vtadmin.VTAdmin.ReloadSchemaShard:output_type -> vtadmin.ReloadSchemaShardResponse + 87, // 212: vtadmin.VTAdmin.RemoveKeyspaceCell:output_type -> vtadmin.RemoveKeyspaceCellResponse + 161, // 213: vtadmin.VTAdmin.RetrySchemaMigration:output_type -> vtctldata.RetrySchemaMigrationResponse + 90, // 214: vtadmin.VTAdmin.RunHealthCheck:output_type -> vtadmin.RunHealthCheckResponse + 92, // 215: vtadmin.VTAdmin.SetReadOnly:output_type -> vtadmin.SetReadOnlyResponse + 94, // 216: vtadmin.VTAdmin.SetReadWrite:output_type -> vtadmin.SetReadWriteResponse + 96, // 217: vtadmin.VTAdmin.StartReplication:output_type -> vtadmin.StartReplicationResponse + 98, // 218: vtadmin.VTAdmin.StopReplication:output_type -> vtadmin.StopReplicationResponse + 100, // 219: vtadmin.VTAdmin.TabletExternallyPromoted:output_type -> vtadmin.TabletExternallyPromotedResponse + 162, // 220: vtadmin.VTAdmin.Validate:output_type -> vtctldata.ValidateResponse + 163, // 221: vtadmin.VTAdmin.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse + 164, // 222: vtadmin.VTAdmin.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse + 165, // 223: vtadmin.VTAdmin.ValidateShard:output_type -> vtctldata.ValidateShardResponse + 166, // 224: vtadmin.VTAdmin.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse + 167, // 225: vtadmin.VTAdmin.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse + 109, // 226: vtadmin.VTAdmin.VTExplain:output_type -> vtadmin.VTExplainResponse + 169, // [169:227] is the sub-list for method output_type + 111, // [111:169] is the sub-list for method input_type + 111, // [111:111] is the sub-list for extension type_name + 111, // [111:111] is the sub-list for extension extendee + 0, // [0:111] is the sub-list for field type_name } func init() { file_vtadmin_proto_init() } @@ -7765,8 +8471,68 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Keyspace); i { + file_vtadmin_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Keyspace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaMigration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Shard); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SrvVSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tablet); i { case 0: return &v.state case 1: @@ -7777,8 +8543,8 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { + file_vtadmin_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VSchema); i { case 0: return &v.state case 1: @@ -7789,8 +8555,8 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Shard); i { + file_vtadmin_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Vtctld); i { case 0: return &v.state case 1: @@ -7801,8 +8567,8 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SrvVSchema); i { + file_vtadmin_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VTGate); i { case 0: return &v.state case 1: @@ -7813,8 +8579,8 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tablet); i { + file_vtadmin_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Workflow); i { case 0: return &v.state case 1: @@ -7825,8 +8591,8 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VSchema); i { + file_vtadmin_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplySchemaRequest); i { case 0: return &v.state case 1: @@ -7837,8 +8603,8 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Vtctld); i { + file_vtadmin_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CancelSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -7849,8 +8615,8 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VTGate); i { + file_vtadmin_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanupSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -7861,8 +8627,8 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow); i { + file_vtadmin_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CompleteSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -7873,7 +8639,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateKeyspaceRequest); i { case 0: return &v.state @@ -7885,7 +8651,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateKeyspaceResponse); i { case 0: return &v.state @@ -7897,7 +8663,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateShardRequest); i { case 0: return &v.state @@ -7909,7 +8675,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteKeyspaceRequest); i { case 0: return &v.state @@ -7921,7 +8687,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteShardsRequest); i { case 0: return &v.state @@ -7933,7 +8699,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteTabletRequest); i { case 0: return &v.state @@ -7945,7 +8711,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteTabletResponse); i { case 0: return &v.state @@ -7957,7 +8723,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EmergencyFailoverShardRequest); i { case 0: return &v.state @@ -7969,7 +8735,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EmergencyFailoverShardResponse); i { case 0: return &v.state @@ -7981,7 +8747,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*FindSchemaRequest); i { case 0: return &v.state @@ -7993,7 +8759,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBackupsRequest); i { case 0: return &v.state @@ -8005,7 +8771,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBackupsResponse); i { case 0: return &v.state @@ -8017,7 +8783,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellInfosRequest); i { case 0: return &v.state @@ -8029,7 +8795,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellInfosResponse); i { case 0: return &v.state @@ -8041,7 +8807,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellsAliasesRequest); i { case 0: return &v.state @@ -8053,7 +8819,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellsAliasesResponse); i { case 0: return &v.state @@ -8065,7 +8831,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetClustersRequest); i { case 0: return &v.state @@ -8077,7 +8843,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetClustersResponse); i { case 0: return &v.state @@ -8089,7 +8855,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetFullStatusRequest); i { case 0: return &v.state @@ -8101,7 +8867,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetGatesRequest); i { case 0: return &v.state @@ -8113,7 +8879,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetGatesResponse); i { case 0: return &v.state @@ -8125,7 +8891,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetKeyspaceRequest); i { case 0: return &v.state @@ -8137,7 +8903,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetKeyspacesRequest); i { case 0: return &v.state @@ -8149,7 +8915,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetKeyspacesResponse); i { case 0: return &v.state @@ -8161,7 +8927,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemaRequest); i { case 0: return &v.state @@ -8173,7 +8939,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemasRequest); i { case 0: return &v.state @@ -8185,7 +8951,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemasResponse); i { case 0: return &v.state @@ -8197,7 +8963,31 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaMigrationsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaMigrationsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetShardReplicationPositionsRequest); i { case 0: return &v.state @@ -8209,7 +8999,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetShardReplicationPositionsResponse); i { case 0: return &v.state @@ -8221,7 +9011,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvKeyspaceRequest); i { case 0: return &v.state @@ -8233,7 +9023,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvKeyspacesRequest); i { case 0: return &v.state @@ -8245,7 +9035,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvKeyspacesResponse); i { case 0: return &v.state @@ -8257,7 +9047,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvVSchemaRequest); i { case 0: return &v.state @@ -8269,7 +9059,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvVSchemasRequest); i { case 0: return &v.state @@ -8281,7 +9071,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvVSchemasResponse); i { case 0: return &v.state @@ -8293,7 +9083,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemaTableSizeOptions); i { case 0: return &v.state @@ -8305,7 +9095,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTabletRequest); i { case 0: return &v.state @@ -8317,7 +9107,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTabletsRequest); i { case 0: return &v.state @@ -8329,7 +9119,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTabletsResponse); i { case 0: return &v.state @@ -8341,7 +9131,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTopologyPathRequest); i { case 0: return &v.state @@ -8353,7 +9143,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVSchemaRequest); i { case 0: return &v.state @@ -8365,7 +9155,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVSchemasRequest); i { case 0: return &v.state @@ -8377,7 +9167,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVSchemasResponse); i { case 0: return &v.state @@ -8389,7 +9179,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVtctldsRequest); i { case 0: return &v.state @@ -8401,7 +9191,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVtctldsResponse); i { case 0: return &v.state @@ -8413,7 +9203,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowRequest); i { case 0: return &v.state @@ -8425,7 +9215,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowsRequest); i { case 0: return &v.state @@ -8437,7 +9227,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowsResponse); i { case 0: return &v.state @@ -8449,7 +9239,19 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LaunchSchemaMigrationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PingTabletRequest); i { case 0: return &v.state @@ -8461,7 +9263,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PingTabletResponse); i { case 0: return &v.state @@ -8473,7 +9275,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlannedFailoverShardRequest); i { case 0: return &v.state @@ -8485,7 +9287,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlannedFailoverShardResponse); i { case 0: return &v.state @@ -8497,7 +9299,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RebuildKeyspaceGraphRequest); i { case 0: return &v.state @@ -8509,7 +9311,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RebuildKeyspaceGraphResponse); i { case 0: return &v.state @@ -8521,7 +9323,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RefreshStateRequest); i { case 0: return &v.state @@ -8533,7 +9335,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RefreshStateResponse); i { case 0: return &v.state @@ -8545,7 +9347,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemasRequest); i { case 0: return &v.state @@ -8557,7 +9359,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemasResponse); i { case 0: return &v.state @@ -8569,7 +9371,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemaShardRequest); i { case 0: return &v.state @@ -8581,7 +9383,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemaShardResponse); i { case 0: return &v.state @@ -8593,7 +9395,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RefreshTabletReplicationSourceRequest); i { case 0: return &v.state @@ -8605,7 +9407,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RefreshTabletReplicationSourceResponse); i { case 0: return &v.state @@ -8617,7 +9419,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveKeyspaceCellRequest); i { case 0: return &v.state @@ -8629,7 +9431,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveKeyspaceCellResponse); i { case 0: return &v.state @@ -8641,7 +9443,19 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetrySchemaMigrationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RunHealthCheckRequest); i { case 0: return &v.state @@ -8653,7 +9467,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RunHealthCheckResponse); i { case 0: return &v.state @@ -8665,7 +9479,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetReadOnlyRequest); i { case 0: return &v.state @@ -8677,7 +9491,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetReadOnlyResponse); i { case 0: return &v.state @@ -8689,7 +9503,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetReadWriteRequest); i { case 0: return &v.state @@ -8701,7 +9515,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetReadWriteResponse); i { case 0: return &v.state @@ -8713,7 +9527,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StartReplicationRequest); i { case 0: return &v.state @@ -8725,7 +9539,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StartReplicationResponse); i { case 0: return &v.state @@ -8737,7 +9551,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StopReplicationRequest); i { case 0: return &v.state @@ -8749,7 +9563,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StopReplicationResponse); i { case 0: return &v.state @@ -8761,7 +9575,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TabletExternallyPromotedRequest); i { case 0: return &v.state @@ -8773,7 +9587,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TabletExternallyPromotedResponse); i { case 0: return &v.state @@ -8785,7 +9599,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TabletExternallyReparentedRequest); i { case 0: return &v.state @@ -8797,7 +9611,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateRequest); i { case 0: return &v.state @@ -8809,7 +9623,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateKeyspaceRequest); i { case 0: return &v.state @@ -8821,7 +9635,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateSchemaKeyspaceRequest); i { case 0: return &v.state @@ -8833,7 +9647,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateShardRequest); i { case 0: return &v.state @@ -8845,7 +9659,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateVersionKeyspaceRequest); i { case 0: return &v.state @@ -8857,7 +9671,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateVersionShardRequest); i { case 0: return &v.state @@ -8869,7 +9683,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VTExplainRequest); i { case 0: return &v.state @@ -8881,7 +9695,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VTExplainResponse); i { case 0: return &v.state @@ -8893,7 +9707,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_ShardTableSize); i { case 0: return &v.state @@ -8905,7 +9719,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_TableSize); i { case 0: return &v.state @@ -8917,7 +9731,19 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSchemaMigrationsRequest_ClusterRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtadmin_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemasResponse_KeyspaceResult); i { case 0: return &v.state @@ -8929,7 +9755,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemasResponse_ShardResult); i { case 0: return &v.state @@ -8941,7 +9767,7 @@ func file_vtadmin_proto_init() { return nil } } - file_vtadmin_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { + file_vtadmin_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemasResponse_TabletResult); i { case 0: return &v.state @@ -8960,7 +9786,7 @@ func file_vtadmin_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vtadmin_proto_rawDesc, NumEnums: 1, - NumMessages: 111, + NumMessages: 121, NumExtensions: 0, NumServices: 1, }, diff --git a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go index e0e2ce2f44f..1e377d0659f 100644 --- a/go/vt/proto/vtadmin/vtadmin_grpc.pb.go +++ b/go/vt/proto/vtadmin/vtadmin_grpc.pb.go @@ -23,6 +23,17 @@ const _ = grpc.SupportPackageIsVersion7 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type VTAdminClient interface { + // ApplySchema applies a schema to a keyspace in the given cluster. + ApplySchema(ctx context.Context, in *ApplySchemaRequest, opts ...grpc.CallOption) (*vtctldata.ApplySchemaResponse, error) + // CancelSchemaMigration cancels one or all schema migrations in the given + // cluster, terminating any running ones as needed. + CancelSchemaMigration(ctx context.Context, in *CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CancelSchemaMigrationResponse, error) + // CleanupSchemaMigration marks a schema migration in the given cluster as + // ready for artifact cleanup. + CleanupSchemaMigration(ctx context.Context, in *CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CleanupSchemaMigrationResponse, error) + // CompleteSchemaMigration completes one or all migrations in the given + // cluster executed with --postpone-completion. + CompleteSchemaMigration(ctx context.Context, in *CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CompleteSchemaMigrationResponse, error) // CreateKeyspace creates a new keyspace in the given cluster. CreateKeyspace(ctx context.Context, in *CreateKeyspaceRequest, opts ...grpc.CallOption) (*CreateKeyspaceResponse, error) // CreateShard creates a new shard in the given cluster and keyspace. @@ -67,6 +78,13 @@ type VTAdminClient interface { GetSchema(ctx context.Context, in *GetSchemaRequest, opts ...grpc.CallOption) (*Schema, error) // GetSchemas returns all schemas across the specified clusters. GetSchemas(ctx context.Context, in *GetSchemasRequest, opts ...grpc.CallOption) (*GetSchemasResponse, error) + // GetSchemaMigrations returns one or more online schema migrations for the + // set of keyspaces (or all keyspaces) in the given clusters, analagous to + // repeated executions of `SHOW VITESS_MIGRATIONS`. + // + // Different fields in the request message result in different behaviors. + // See the documentation on vtctldata.GetSchemaMigrationsRequest for details. + GetSchemaMigrations(ctx context.Context, in *GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*GetSchemaMigrationsResponse, error) // GetShardReplicationPositions returns shard replication positions grouped // by cluster. GetShardReplicationPositions(ctx context.Context, in *GetShardReplicationPositionsRequest, opts ...grpc.CallOption) (*GetShardReplicationPositionsResponse, error) @@ -98,6 +116,9 @@ type VTAdminClient interface { GetWorkflow(ctx context.Context, in *GetWorkflowRequest, opts ...grpc.CallOption) (*Workflow, error) // GetWorkflows returns the Workflows for all specified clusters. GetWorkflows(ctx context.Context, in *GetWorkflowsRequest, opts ...grpc.CallOption) (*GetWorkflowsResponse, error) + // LaunchSchemaMigration launches one or all migrations in the given + // cluster executed with --postpone-launch. + LaunchSchemaMigration(ctx context.Context, in *LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.LaunchSchemaMigrationResponse, error) // PingTablet checks that the specified tablet is awake and responding to // RPCs. This command can be blocked by other in-flight operations. PingTablet(ctx context.Context, in *PingTabletRequest, opts ...grpc.CallOption) (*PingTabletResponse, error) @@ -124,6 +145,9 @@ type VTAdminClient interface { ReloadSchemaShard(ctx context.Context, in *ReloadSchemaShardRequest, opts ...grpc.CallOption) (*ReloadSchemaShardResponse, error) // RemoveKeyspaceCell removes the cell from the Cells list for all shards in the keyspace, and the SrvKeyspace for that keyspace in that cell. RemoveKeyspaceCell(ctx context.Context, in *RemoveKeyspaceCellRequest, opts ...grpc.CallOption) (*RemoveKeyspaceCellResponse, error) + // RetrySchemaMigration marks a given schema migration in the given cluster + // for retry. + RetrySchemaMigration(ctx context.Context, in *RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.RetrySchemaMigrationResponse, error) // RunHealthCheck runs a healthcheck on the tablet. RunHealthCheck(ctx context.Context, in *RunHealthCheckRequest, opts ...grpc.CallOption) (*RunHealthCheckResponse, error) // SetReadOnly sets the tablet to read-only mode. @@ -176,6 +200,42 @@ func NewVTAdminClient(cc grpc.ClientConnInterface) VTAdminClient { return &vTAdminClient{cc} } +func (c *vTAdminClient) ApplySchema(ctx context.Context, in *ApplySchemaRequest, opts ...grpc.CallOption) (*vtctldata.ApplySchemaResponse, error) { + out := new(vtctldata.ApplySchemaResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/ApplySchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) CancelSchemaMigration(ctx context.Context, in *CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CancelSchemaMigrationResponse, error) { + out := new(vtctldata.CancelSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/CancelSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) CleanupSchemaMigration(ctx context.Context, in *CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CleanupSchemaMigrationResponse, error) { + out := new(vtctldata.CleanupSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/CleanupSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vTAdminClient) CompleteSchemaMigration(ctx context.Context, in *CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CompleteSchemaMigrationResponse, error) { + out := new(vtctldata.CompleteSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/CompleteSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vTAdminClient) CreateKeyspace(ctx context.Context, in *CreateKeyspaceRequest, opts ...grpc.CallOption) (*CreateKeyspaceResponse, error) { out := new(CreateKeyspaceResponse) err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/CreateKeyspace", in, out, opts...) @@ -329,6 +389,15 @@ func (c *vTAdminClient) GetSchemas(ctx context.Context, in *GetSchemasRequest, o return out, nil } +func (c *vTAdminClient) GetSchemaMigrations(ctx context.Context, in *GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*GetSchemaMigrationsResponse, error) { + out := new(GetSchemaMigrationsResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetSchemaMigrations", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vTAdminClient) GetShardReplicationPositions(ctx context.Context, in *GetShardReplicationPositionsRequest, opts ...grpc.CallOption) (*GetShardReplicationPositionsResponse, error) { out := new(GetShardReplicationPositionsResponse) err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/GetShardReplicationPositions", in, out, opts...) @@ -446,6 +515,15 @@ func (c *vTAdminClient) GetWorkflows(ctx context.Context, in *GetWorkflowsReques return out, nil } +func (c *vTAdminClient) LaunchSchemaMigration(ctx context.Context, in *LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.LaunchSchemaMigrationResponse, error) { + out := new(vtctldata.LaunchSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/LaunchSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vTAdminClient) PingTablet(ctx context.Context, in *PingTabletRequest, opts ...grpc.CallOption) (*PingTabletResponse, error) { out := new(PingTabletResponse) err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/PingTablet", in, out, opts...) @@ -518,6 +596,15 @@ func (c *vTAdminClient) RemoveKeyspaceCell(ctx context.Context, in *RemoveKeyspa return out, nil } +func (c *vTAdminClient) RetrySchemaMigration(ctx context.Context, in *RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.RetrySchemaMigrationResponse, error) { + out := new(vtctldata.RetrySchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/RetrySchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vTAdminClient) RunHealthCheck(ctx context.Context, in *RunHealthCheckRequest, opts ...grpc.CallOption) (*RunHealthCheckResponse, error) { out := new(RunHealthCheckResponse) err := c.cc.Invoke(ctx, "/vtadmin.VTAdmin/RunHealthCheck", in, out, opts...) @@ -639,6 +726,17 @@ func (c *vTAdminClient) VTExplain(ctx context.Context, in *VTExplainRequest, opt // All implementations must embed UnimplementedVTAdminServer // for forward compatibility type VTAdminServer interface { + // ApplySchema applies a schema to a keyspace in the given cluster. + ApplySchema(context.Context, *ApplySchemaRequest) (*vtctldata.ApplySchemaResponse, error) + // CancelSchemaMigration cancels one or all schema migrations in the given + // cluster, terminating any running ones as needed. + CancelSchemaMigration(context.Context, *CancelSchemaMigrationRequest) (*vtctldata.CancelSchemaMigrationResponse, error) + // CleanupSchemaMigration marks a schema migration in the given cluster as + // ready for artifact cleanup. + CleanupSchemaMigration(context.Context, *CleanupSchemaMigrationRequest) (*vtctldata.CleanupSchemaMigrationResponse, error) + // CompleteSchemaMigration completes one or all migrations in the given + // cluster executed with --postpone-completion. + CompleteSchemaMigration(context.Context, *CompleteSchemaMigrationRequest) (*vtctldata.CompleteSchemaMigrationResponse, error) // CreateKeyspace creates a new keyspace in the given cluster. CreateKeyspace(context.Context, *CreateKeyspaceRequest) (*CreateKeyspaceResponse, error) // CreateShard creates a new shard in the given cluster and keyspace. @@ -683,6 +781,13 @@ type VTAdminServer interface { GetSchema(context.Context, *GetSchemaRequest) (*Schema, error) // GetSchemas returns all schemas across the specified clusters. GetSchemas(context.Context, *GetSchemasRequest) (*GetSchemasResponse, error) + // GetSchemaMigrations returns one or more online schema migrations for the + // set of keyspaces (or all keyspaces) in the given clusters, analagous to + // repeated executions of `SHOW VITESS_MIGRATIONS`. + // + // Different fields in the request message result in different behaviors. + // See the documentation on vtctldata.GetSchemaMigrationsRequest for details. + GetSchemaMigrations(context.Context, *GetSchemaMigrationsRequest) (*GetSchemaMigrationsResponse, error) // GetShardReplicationPositions returns shard replication positions grouped // by cluster. GetShardReplicationPositions(context.Context, *GetShardReplicationPositionsRequest) (*GetShardReplicationPositionsResponse, error) @@ -714,6 +819,9 @@ type VTAdminServer interface { GetWorkflow(context.Context, *GetWorkflowRequest) (*Workflow, error) // GetWorkflows returns the Workflows for all specified clusters. GetWorkflows(context.Context, *GetWorkflowsRequest) (*GetWorkflowsResponse, error) + // LaunchSchemaMigration launches one or all migrations in the given + // cluster executed with --postpone-launch. + LaunchSchemaMigration(context.Context, *LaunchSchemaMigrationRequest) (*vtctldata.LaunchSchemaMigrationResponse, error) // PingTablet checks that the specified tablet is awake and responding to // RPCs. This command can be blocked by other in-flight operations. PingTablet(context.Context, *PingTabletRequest) (*PingTabletResponse, error) @@ -740,6 +848,9 @@ type VTAdminServer interface { ReloadSchemaShard(context.Context, *ReloadSchemaShardRequest) (*ReloadSchemaShardResponse, error) // RemoveKeyspaceCell removes the cell from the Cells list for all shards in the keyspace, and the SrvKeyspace for that keyspace in that cell. RemoveKeyspaceCell(context.Context, *RemoveKeyspaceCellRequest) (*RemoveKeyspaceCellResponse, error) + // RetrySchemaMigration marks a given schema migration in the given cluster + // for retry. + RetrySchemaMigration(context.Context, *RetrySchemaMigrationRequest) (*vtctldata.RetrySchemaMigrationResponse, error) // RunHealthCheck runs a healthcheck on the tablet. RunHealthCheck(context.Context, *RunHealthCheckRequest) (*RunHealthCheckResponse, error) // SetReadOnly sets the tablet to read-only mode. @@ -789,6 +900,18 @@ type VTAdminServer interface { type UnimplementedVTAdminServer struct { } +func (UnimplementedVTAdminServer) ApplySchema(context.Context, *ApplySchemaRequest) (*vtctldata.ApplySchemaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySchema not implemented") +} +func (UnimplementedVTAdminServer) CancelSchemaMigration(context.Context, *CancelSchemaMigrationRequest) (*vtctldata.CancelSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelSchemaMigration not implemented") +} +func (UnimplementedVTAdminServer) CleanupSchemaMigration(context.Context, *CleanupSchemaMigrationRequest) (*vtctldata.CleanupSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CleanupSchemaMigration not implemented") +} +func (UnimplementedVTAdminServer) CompleteSchemaMigration(context.Context, *CompleteSchemaMigrationRequest) (*vtctldata.CompleteSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CompleteSchemaMigration not implemented") +} func (UnimplementedVTAdminServer) CreateKeyspace(context.Context, *CreateKeyspaceRequest) (*CreateKeyspaceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateKeyspace not implemented") } @@ -840,6 +963,9 @@ func (UnimplementedVTAdminServer) GetSchema(context.Context, *GetSchemaRequest) func (UnimplementedVTAdminServer) GetSchemas(context.Context, *GetSchemasRequest) (*GetSchemasResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSchemas not implemented") } +func (UnimplementedVTAdminServer) GetSchemaMigrations(context.Context, *GetSchemaMigrationsRequest) (*GetSchemaMigrationsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchemaMigrations not implemented") +} func (UnimplementedVTAdminServer) GetShardReplicationPositions(context.Context, *GetShardReplicationPositionsRequest) (*GetShardReplicationPositionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetShardReplicationPositions not implemented") } @@ -879,6 +1005,9 @@ func (UnimplementedVTAdminServer) GetWorkflow(context.Context, *GetWorkflowReque func (UnimplementedVTAdminServer) GetWorkflows(context.Context, *GetWorkflowsRequest) (*GetWorkflowsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetWorkflows not implemented") } +func (UnimplementedVTAdminServer) LaunchSchemaMigration(context.Context, *LaunchSchemaMigrationRequest) (*vtctldata.LaunchSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LaunchSchemaMigration not implemented") +} func (UnimplementedVTAdminServer) PingTablet(context.Context, *PingTabletRequest) (*PingTabletResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method PingTablet not implemented") } @@ -903,6 +1032,9 @@ func (UnimplementedVTAdminServer) ReloadSchemaShard(context.Context, *ReloadSche func (UnimplementedVTAdminServer) RemoveKeyspaceCell(context.Context, *RemoveKeyspaceCellRequest) (*RemoveKeyspaceCellResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RemoveKeyspaceCell not implemented") } +func (UnimplementedVTAdminServer) RetrySchemaMigration(context.Context, *RetrySchemaMigrationRequest) (*vtctldata.RetrySchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RetrySchemaMigration not implemented") +} func (UnimplementedVTAdminServer) RunHealthCheck(context.Context, *RunHealthCheckRequest) (*RunHealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RunHealthCheck not implemented") } @@ -955,6 +1087,78 @@ func RegisterVTAdminServer(s grpc.ServiceRegistrar, srv VTAdminServer) { s.RegisterService(&VTAdmin_ServiceDesc, srv) } +func _VTAdmin_ApplySchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplySchemaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).ApplySchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/ApplySchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).ApplySchema(ctx, req.(*ApplySchemaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_CancelSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).CancelSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/CancelSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).CancelSchemaMigration(ctx, req.(*CancelSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_CleanupSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CleanupSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).CleanupSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/CleanupSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).CleanupSchemaMigration(ctx, req.(*CleanupSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VTAdmin_CompleteSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompleteSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).CompleteSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/CompleteSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).CompleteSchemaMigration(ctx, req.(*CompleteSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VTAdmin_CreateKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateKeyspaceRequest) if err := dec(in); err != nil { @@ -1261,6 +1465,24 @@ func _VTAdmin_GetSchemas_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _VTAdmin_GetSchemaMigrations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSchemaMigrationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).GetSchemaMigrations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/GetSchemaMigrations", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).GetSchemaMigrations(ctx, req.(*GetSchemaMigrationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VTAdmin_GetShardReplicationPositions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetShardReplicationPositionsRequest) if err := dec(in); err != nil { @@ -1495,6 +1717,24 @@ func _VTAdmin_GetWorkflows_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } +func _VTAdmin_LaunchSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LaunchSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).LaunchSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/LaunchSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).LaunchSchemaMigration(ctx, req.(*LaunchSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VTAdmin_PingTablet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PingTabletRequest) if err := dec(in); err != nil { @@ -1639,6 +1879,24 @@ func _VTAdmin_RemoveKeyspaceCell_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _VTAdmin_RetrySchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RetrySchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VTAdminServer).RetrySchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtadmin.VTAdmin/RetrySchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VTAdminServer).RetrySchemaMigration(ctx, req.(*RetrySchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _VTAdmin_RunHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RunHealthCheckRequest) if err := dec(in); err != nil { @@ -1880,6 +2138,22 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{ ServiceName: "vtadmin.VTAdmin", HandlerType: (*VTAdminServer)(nil), Methods: []grpc.MethodDesc{ + { + MethodName: "ApplySchema", + Handler: _VTAdmin_ApplySchema_Handler, + }, + { + MethodName: "CancelSchemaMigration", + Handler: _VTAdmin_CancelSchemaMigration_Handler, + }, + { + MethodName: "CleanupSchemaMigration", + Handler: _VTAdmin_CleanupSchemaMigration_Handler, + }, + { + MethodName: "CompleteSchemaMigration", + Handler: _VTAdmin_CompleteSchemaMigration_Handler, + }, { MethodName: "CreateKeyspace", Handler: _VTAdmin_CreateKeyspace_Handler, @@ -1948,6 +2222,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetSchemas", Handler: _VTAdmin_GetSchemas_Handler, }, + { + MethodName: "GetSchemaMigrations", + Handler: _VTAdmin_GetSchemaMigrations_Handler, + }, { MethodName: "GetShardReplicationPositions", Handler: _VTAdmin_GetShardReplicationPositions_Handler, @@ -2000,6 +2278,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetWorkflows", Handler: _VTAdmin_GetWorkflows_Handler, }, + { + MethodName: "LaunchSchemaMigration", + Handler: _VTAdmin_LaunchSchemaMigration_Handler, + }, { MethodName: "PingTablet", Handler: _VTAdmin_PingTablet_Handler, @@ -2032,6 +2314,10 @@ var VTAdmin_ServiceDesc = grpc.ServiceDesc{ MethodName: "RemoveKeyspaceCell", Handler: _VTAdmin_RemoveKeyspaceCell_Handler, }, + { + MethodName: "RetrySchemaMigration", + Handler: _VTAdmin_RetrySchemaMigration_Handler, + }, { MethodName: "RunHealthCheck", Handler: _VTAdmin_RunHealthCheck_Handler, diff --git a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go index 0e4b4c6e84b..ce506cb7215 100644 --- a/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go +++ b/go/vt/proto/vtadmin/vtadmin_vtproto.pb.go @@ -261,6 +261,25 @@ func (m *Schema) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *SchemaMigration) CloneVT() *SchemaMigration { + if m == nil { + return (*SchemaMigration)(nil) + } + r := &SchemaMigration{ + Cluster: m.Cluster.CloneVT(), + SchemaMigration: m.SchemaMigration.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *SchemaMigration) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Shard) CloneVT() *Shard { if m == nil { return (*Shard)(nil) @@ -408,6 +427,82 @@ func (m *Workflow) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *ApplySchemaRequest) CloneVT() *ApplySchemaRequest { + if m == nil { + return (*ApplySchemaRequest)(nil) + } + r := &ApplySchemaRequest{ + ClusterId: m.ClusterId, + Request: m.Request.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ApplySchemaRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CancelSchemaMigrationRequest) CloneVT() *CancelSchemaMigrationRequest { + if m == nil { + return (*CancelSchemaMigrationRequest)(nil) + } + r := &CancelSchemaMigrationRequest{ + ClusterId: m.ClusterId, + Request: m.Request.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CancelSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CleanupSchemaMigrationRequest) CloneVT() *CleanupSchemaMigrationRequest { + if m == nil { + return (*CleanupSchemaMigrationRequest)(nil) + } + r := &CleanupSchemaMigrationRequest{ + ClusterId: m.ClusterId, + Request: m.Request.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CleanupSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *CompleteSchemaMigrationRequest) CloneVT() *CompleteSchemaMigrationRequest { + if m == nil { + return (*CompleteSchemaMigrationRequest)(nil) + } + r := &CompleteSchemaMigrationRequest{ + ClusterId: m.ClusterId, + Request: m.Request.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *CompleteSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *CreateKeyspaceRequest) CloneVT() *CreateKeyspaceRequest { if m == nil { return (*CreateKeyspaceRequest)(nil) @@ -999,6 +1094,71 @@ func (m *GetSchemasResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *GetSchemaMigrationsRequest_ClusterRequest) CloneVT() *GetSchemaMigrationsRequest_ClusterRequest { + if m == nil { + return (*GetSchemaMigrationsRequest_ClusterRequest)(nil) + } + r := &GetSchemaMigrationsRequest_ClusterRequest{ + ClusterId: m.ClusterId, + Request: m.Request.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaMigrationsRequest_ClusterRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaMigrationsRequest) CloneVT() *GetSchemaMigrationsRequest { + if m == nil { + return (*GetSchemaMigrationsRequest)(nil) + } + r := &GetSchemaMigrationsRequest{} + if rhs := m.ClusterRequests; rhs != nil { + tmpContainer := make([]*GetSchemaMigrationsRequest_ClusterRequest, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ClusterRequests = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaMigrationsRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetSchemaMigrationsResponse) CloneVT() *GetSchemaMigrationsResponse { + if m == nil { + return (*GetSchemaMigrationsResponse)(nil) + } + r := &GetSchemaMigrationsResponse{} + if rhs := m.SchemaMigrations; rhs != nil { + tmpContainer := make([]*SchemaMigration, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.SchemaMigrations = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetSchemaMigrationsResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *GetShardReplicationPositionsRequest) CloneVT() *GetShardReplicationPositionsRequest { if m == nil { return (*GetShardReplicationPositionsRequest)(nil) @@ -1483,6 +1643,25 @@ func (m *GetWorkflowsResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *LaunchSchemaMigrationRequest) CloneVT() *LaunchSchemaMigrationRequest { + if m == nil { + return (*LaunchSchemaMigrationRequest)(nil) + } + r := &LaunchSchemaMigrationRequest{ + ClusterId: m.ClusterId, + Request: m.Request.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *LaunchSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *PingTabletRequest) CloneVT() *PingTabletRequest { if m == nil { return (*PingTabletRequest)(nil) @@ -1935,6 +2114,25 @@ func (m *RemoveKeyspaceCellResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *RetrySchemaMigrationRequest) CloneVT() *RetrySchemaMigrationRequest { + if m == nil { + return (*RetrySchemaMigrationRequest)(nil) + } + r := &RetrySchemaMigrationRequest{ + ClusterId: m.ClusterId, + Request: m.Request.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *RetrySchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *RunHealthCheckRequest) CloneVT() *RunHealthCheckRequest { if m == nil { return (*RunHealthCheckRequest)(nil) @@ -2976,6 +3174,59 @@ func (m *Schema) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SchemaMigration) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchemaMigration) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SchemaMigration) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.SchemaMigration != nil { + size, err := m.SchemaMigration.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if m.Cluster != nil { + size, err := m.Cluster.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Shard) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -3411,7 +3662,7 @@ func (m *Workflow) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *CreateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *ApplySchemaRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3424,12 +3675,12 @@ func (m *CreateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CreateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *ApplySchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3441,8 +3692,8 @@ func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Options != nil { - size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -3461,7 +3712,7 @@ func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *CreateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CancelSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3474,12 +3725,12 @@ func (m *CreateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CreateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *CreateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CancelSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3491,20 +3742,27 @@ func (m *CreateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarint(dAtA, i, uint64(size)) i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterId))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *CreateShardRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CleanupSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3517,12 +3775,12 @@ func (m *CreateShardRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CreateShardRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *CreateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CleanupSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3534,8 +3792,8 @@ func (m *CreateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Options != nil { - size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -3554,7 +3812,7 @@ func (m *CreateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DeleteKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CompleteSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3567,12 +3825,12 @@ func (m *DeleteKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DeleteKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DeleteKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CompleteSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3584,8 +3842,8 @@ func (m *DeleteKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.Options != nil { - size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -3604,7 +3862,7 @@ func (m *DeleteKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } -func (m *DeleteShardsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CreateKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3617,12 +3875,12 @@ func (m *DeleteShardsRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DeleteShardsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DeleteShardsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3654,7 +3912,7 @@ func (m *DeleteShardsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DeleteTabletRequest) MarshalVT() (dAtA []byte, err error) { +func (m *CreateKeyspaceResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -3667,12 +3925,12 @@ func (m *DeleteTabletRequest) MarshalVT() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DeleteTabletRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceResponse) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *DeleteTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *CreateKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -3684,27 +3942,8 @@ func (m *DeleteTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if m.AllowPrimary { - i-- - if m.AllowPrimary { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ClusterIds) > 0 { - for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ClusterIds[iNdEx]) - copy(dAtA[i:], m.ClusterIds[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Alias != nil { - size, err := m.Alias.MarshalToSizedBufferVT(dAtA[:i]) + if m.Keyspace != nil { + size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err } @@ -3716,7 +3955,219 @@ func (m *DeleteTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DeleteTabletResponse) MarshalVT() (dAtA []byte, err error) { +func (m *CreateShardRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateShardRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CreateShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Options != nil { + size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteKeyspaceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Options != nil { + size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteShardsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteShardsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteShardsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Options != nil { + size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteTabletRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteTabletRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *DeleteTabletRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AllowPrimary { + i-- + if m.AllowPrimary { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Alias != nil { + size, err := m.Alias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DeleteTabletResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4772,7 +5223,7 @@ func (m *GetSchemasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *GetShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaMigrationsRequest_ClusterRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4785,12 +5236,12 @@ func (m *GetShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err erro return dAtA[:n], nil } -func (m *GetShardReplicationPositionsRequest) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsRequest_ClusterRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsRequest_ClusterRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4802,37 +5253,27 @@ func (m *GetShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.KeyspaceShards) > 0 { - for iNdEx := len(m.KeyspaceShards) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.KeyspaceShards[iNdEx]) - copy(dAtA[i:], m.KeyspaceShards[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.KeyspaceShards[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Keyspaces) > 0 { - for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Keyspaces[iNdEx]) - copy(dAtA[i:], m.Keyspaces[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspaces[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } - if len(m.ClusterIds) > 0 { - for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ClusterIds[iNdEx]) - copy(dAtA[i:], m.ClusterIds[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) - i-- - dAtA[i] = 0xa - } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *GetShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err error) { +func (m *GetSchemaMigrationsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil } @@ -4845,12 +5286,12 @@ func (m *GetShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err err return dAtA[:n], nil } -func (m *GetShardReplicationPositionsResponse) MarshalToVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsRequest) MarshalToVT(dAtA []byte) (int, error) { size := m.SizeVT() return m.MarshalToSizedBufferVT(dAtA[:size]) } -func (m *GetShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetSchemaMigrationsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { if m == nil { return 0, nil } @@ -4862,8 +5303,158 @@ func (m *GetShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byt i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } - if len(m.ReplicationPositions) > 0 { - for iNdEx := len(m.ReplicationPositions) - 1; iNdEx >= 0; iNdEx-- { + if len(m.ClusterRequests) > 0 { + for iNdEx := len(m.ClusterRequests) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ClusterRequests[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetSchemaMigrationsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSchemaMigrationsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetSchemaMigrationsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SchemaMigrations) > 0 { + for iNdEx := len(m.SchemaMigrations) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SchemaMigrations[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetShardReplicationPositionsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetShardReplicationPositionsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetShardReplicationPositionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.KeyspaceShards) > 0 { + for iNdEx := len(m.KeyspaceShards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.KeyspaceShards[iNdEx]) + copy(dAtA[i:], m.KeyspaceShards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.KeyspaceShards[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Keyspaces) > 0 { + for iNdEx := len(m.Keyspaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keyspaces[iNdEx]) + copy(dAtA[i:], m.Keyspaces[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ClusterIds) > 0 { + for iNdEx := len(m.ClusterIds) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ClusterIds[iNdEx]) + copy(dAtA[i:], m.ClusterIds[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterIds[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GetShardReplicationPositionsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetShardReplicationPositionsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetShardReplicationPositionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ReplicationPositions) > 0 { + for iNdEx := len(m.ReplicationPositions) - 1; iNdEx >= 0; iNdEx-- { size, err := m.ReplicationPositions[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) if err != nil { return 0, err @@ -5831,6 +6422,56 @@ func (m *GetWorkflowsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *LaunchSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LaunchSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *LaunchSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *PingTabletRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -6947,6 +7588,56 @@ func (m *RemoveKeyspaceCellResponse) MarshalToSizedBufferVT(dAtA []byte) (int, e return len(dAtA) - i, nil } +func (m *RetrySchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RetrySchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RetrySchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Request != nil { + size, err := m.Request.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.ClusterId) > 0 { + i -= len(m.ClusterId) + copy(dAtA[i:], m.ClusterId) + i = encodeVarint(dAtA, i, uint64(len(m.ClusterId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *RunHealthCheckRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -8274,6 +8965,24 @@ func (m *Schema) SizeVT() (n int) { return n } +func (m *SchemaMigration) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.SchemaMigration != nil { + l = m.SchemaMigration.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *Shard) SizeVT() (n int) { if m == nil { return 0 @@ -8441,7 +9150,7 @@ func (m *Workflow) SizeVT() (n int) { return n } -func (m *CreateKeyspaceRequest) SizeVT() (n int) { +func (m *ApplySchemaRequest) SizeVT() (n int) { if m == nil { return 0 } @@ -8451,22 +9160,94 @@ func (m *CreateKeyspaceRequest) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } - if m.Options != nil { - l = m.Options.SizeVT() + if m.Request != nil { + l = m.Request.SizeVT() n += 1 + l + sov(uint64(l)) } n += len(m.unknownFields) return n } -func (m *CreateKeyspaceResponse) SizeVT() (n int) { +func (m *CancelSchemaMigrationRequest) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CleanupSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CompleteSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateKeyspaceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Options != nil { + l = m.Options.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CreateKeyspaceResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Keyspace != nil { + l = m.Keyspace.SizeVT() n += 1 + l + sov(uint64(l)) } n += len(m.unknownFields) @@ -8951,6 +9732,56 @@ func (m *GetSchemasResponse) SizeVT() (n int) { return n } +func (m *GetSchemaMigrationsRequest_ClusterRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaMigrationsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterRequests) > 0 { + for _, e := range m.ClusterRequests { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetSchemaMigrationsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.SchemaMigrations) > 0 { + for _, e := range m.SchemaMigrations { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + func (m *GetShardReplicationPositionsRequest) SizeVT() (n int) { if m == nil { return 0 @@ -9367,6 +10198,24 @@ func (m *GetWorkflowsResponse) SizeVT() (n int) { return n } +func (m *LaunchSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *PingTabletRequest) SizeVT() (n int) { if m == nil { return 0 @@ -9800,6 +10649,24 @@ func (m *RemoveKeyspaceCellResponse) SizeVT() (n int) { return n } +func (m *RetrySchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClusterId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Request != nil { + l = m.Request.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *RunHealthCheckRequest) SizeVT() (n int) { if m == nil { return 0 @@ -11961,6 +12828,129 @@ func (m *Schema) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *SchemaMigration) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaMigration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaMigration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaMigration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SchemaMigration == nil { + m.SchemaMigration = &vtctldata.SchemaMigration{} + } + if err := m.SchemaMigration.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Shard) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -13121,7 +14111,7 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13144,10 +14134,10 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13184,7 +14174,7 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13211,10 +14201,10 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Options == nil { - m.Options = &vtctldata.CreateKeyspaceRequest{} + if m.Request == nil { + m.Request = &vtctldata.ApplySchemaRequest{} } - if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Request.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13240,7 +14230,7 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *CancelSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13263,15 +14253,47 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: CancelSchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CancelSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13298,10 +14320,10 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &Keyspace{} + if m.Request == nil { + m.Request = &vtctldata.CancelSchemaMigrationRequest{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Request.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13327,7 +14349,7 @@ func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *CleanupSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13350,10 +14372,10 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CleanupSchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CleanupSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13390,7 +14412,7 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13417,10 +14439,10 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Options == nil { - m.Options = &vtctldata.CreateShardRequest{} + if m.Request == nil { + m.Request = &vtctldata.CleanupSchemaMigrationRequest{} } - if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Request.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13446,7 +14468,7 @@ func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *CompleteSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13469,10 +14491,10 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CompleteSchemaMigrationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CompleteSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13509,7 +14531,7 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13536,10 +14558,10 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Options == nil { - m.Options = &vtctldata.DeleteKeyspaceRequest{} + if m.Request == nil { + m.Request = &vtctldata.CompleteSchemaMigrationRequest{} } - if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Request.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13565,7 +14587,7 @@ func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { +func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13588,10 +14610,10 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteShardsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CreateKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteShardsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13656,7 +14678,7 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Options == nil { - m.Options = &vtctldata.DeleteShardsRequest{} + m.Options = &vtctldata.CreateKeyspaceRequest{} } if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13684,7 +14706,7 @@ func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteTabletRequest) UnmarshalVT(dAtA []byte) error { +func (m *CreateKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13707,15 +14729,15 @@ func (m *DeleteTabletRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteTabletRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CreateKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CreateKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13742,16 +14764,67 @@ func (m *DeleteTabletRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Alias == nil { - m.Alias = &topodata.TabletAlias{} + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} } - if err := m.Alias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateShardRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateShardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13779,13 +14852,13 @@ func (m *DeleteTabletRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + m.ClusterId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -13795,12 +14868,28 @@ func (m *DeleteTabletRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.AllowPrimary = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &vtctldata.CreateShardRequest{} + } + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -13823,7 +14912,7 @@ func (m *DeleteTabletRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteTabletResponse) UnmarshalVT(dAtA []byte) error { +func (m *DeleteKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13846,15 +14935,15 @@ func (m *DeleteTabletResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteTabletResponse: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13882,11 +14971,11 @@ func (m *DeleteTabletResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = string(dAtA[iNdEx:postIndex]) + m.ClusterId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13913,10 +15002,10 @@ func (m *DeleteTabletResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Cluster == nil { - m.Cluster = &Cluster{} + if m.Options == nil { + m.Options = &vtctldata.DeleteKeyspaceRequest{} } - if err := m.Cluster.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -13942,7 +15031,7 @@ func (m *DeleteTabletResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *EmergencyFailoverShardRequest) UnmarshalVT(dAtA []byte) error { +func (m *DeleteShardsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13965,10 +15054,10 @@ func (m *EmergencyFailoverShardRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmergencyFailoverShardRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteShardsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmergencyFailoverShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteShardsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14033,7 +15122,7 @@ func (m *EmergencyFailoverShardRequest) UnmarshalVT(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Options == nil { - m.Options = &vtctldata.EmergencyReparentShardRequest{} + m.Options = &vtctldata.DeleteShardsRequest{} } if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err @@ -14061,7 +15150,7 @@ func (m *EmergencyFailoverShardRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *EmergencyFailoverShardResponse) UnmarshalVT(dAtA []byte) error { +func (m *DeleteTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14084,15 +15173,15 @@ func (m *EmergencyFailoverShardResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmergencyFailoverShardResponse: wiretype end group for non-group") + return fmt.Errorf("proto: DeleteTabletRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmergencyFailoverShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DeleteTabletRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14119,16 +15208,16 @@ func (m *EmergencyFailoverShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Cluster == nil { - m.Cluster = &Cluster{} + if m.Alias == nil { + m.Alias = &topodata.TabletAlias{} } - if err := m.Cluster.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Alias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14156,13 +15245,13 @@ func (m *EmergencyFailoverShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrimary", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14172,63 +15261,98 @@ func (m *EmergencyFailoverShardResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength + m.AllowPrimary = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLength } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Shard = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteTabletResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteTabletResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteTabletResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.PromotedPrimary == nil { - m.PromotedPrimary = &topodata.TabletAlias{} - } - if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Status = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14255,8 +15379,10 @@ func (m *EmergencyFailoverShardResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, &logutil.Event{}) - if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14282,7 +15408,7 @@ func (m *EmergencyFailoverShardResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FindSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *EmergencyFailoverShardRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14305,15 +15431,15 @@ func (m *FindSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FindSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: EmergencyFailoverShardRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FindSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmergencyFailoverShardRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14341,43 +15467,11 @@ func (m *FindSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Table = string(dAtA[iNdEx:postIndex]) + m.ClusterId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14404,10 +15498,10 @@ func (m *FindSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TableSizeOptions == nil { - m.TableSizeOptions = &GetSchemaTableSizeOptions{} + if m.Options == nil { + m.Options = &vtctldata.EmergencyReparentShardRequest{} } - if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14433,7 +15527,7 @@ func (m *FindSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { +func (m *EmergencyFailoverShardResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14456,17 +15550,17 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetBackupsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: EmergencyFailoverShardResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetBackupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmergencyFailoverShardResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14476,27 +15570,31 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14524,11 +15622,11 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspaces = append(m.Keyspaces, string(dAtA[iNdEx:postIndex])) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceShards", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14556,11 +15654,11 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KeyspaceShards = append(m.KeyspaceShards, string(dAtA[iNdEx:postIndex])) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PromotedPrimary", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14587,67 +15685,16 @@ func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RequestOptions == nil { - m.RequestOptions = &vtctldata.GetBackupsRequest{} + if m.PromotedPrimary == nil { + m.PromotedPrimary = &topodata.TabletAlias{} } - if err := m.RequestOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PromotedPrimary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14674,8 +15721,8 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Backups = append(m.Backups, &ClusterBackup{}) - if err := m.Backups[len(m.Backups)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, &logutil.Event{}) + if err := m.Events[len(m.Events)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14701,7 +15748,7 @@ func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { +func (m *FindSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14724,15 +15771,15 @@ func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfosRequest: wiretype end group for non-group") + return fmt.Errorf("proto: FindSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfosRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FindSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14760,11 +15807,11 @@ func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + m.Table = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14792,13 +15839,13 @@ func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NamesOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14808,12 +15855,28 @@ func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.NamesOnly = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizeOptions == nil { + m.TableSizeOptions = &GetSchemaTableSizeOptions{} + } + if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -14836,7 +15899,7 @@ func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14859,17 +15922,17 @@ func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfosResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetBackupsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfosResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetBackupsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfos", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14879,23 +15942,594 @@ func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspaces = append(m.Keyspaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceShards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyspaceShards = append(m.KeyspaceShards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestOptions == nil { + m.RequestOptions = &vtctldata.GetBackupsRequest{} + } + if err := m.RequestOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetBackupsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBackupsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBackupsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Backups = append(m.Backups, &ClusterBackup{}) + if err := m.Backups[len(m.Backups)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfosRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfosRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfosRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NamesOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NamesOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfosResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfosResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellInfos", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CellInfos = append(m.CellInfos, &ClusterCellInfo{}) + if err := m.CellInfos[len(m.CellInfos)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.CellInfos = append(m.CellInfos, &ClusterCellInfo{}) - if err := m.CellInfos[len(m.CellInfos)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Aliases = append(m.Aliases, &ClusterCellsAliases{}) + if err := m.Aliases[len(m.Aliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14921,7 +16555,7 @@ func (m *GetCellInfosResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetClustersRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14944,17 +16578,68 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetClustersRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClustersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -14964,23 +16649,25 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -15004,7 +16691,7 @@ func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15027,15 +16714,47 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15062,8 +16781,10 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Aliases = append(m.Aliases, &ClusterCellsAliases{}) - if err := m.Aliases[len(m.Aliases)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.Alias == nil { + m.Alias = &topodata.TabletAlias{} + } + if err := m.Alias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15089,7 +16810,7 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetClustersRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15112,12 +16833,44 @@ func (m *GetClustersRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetClustersRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetGatesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetGatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15140,7 +16893,7 @@ func (m *GetClustersRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15163,15 +16916,15 @@ func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetClustersResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetGatesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetGatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15198,8 +16951,8 @@ func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Clusters = append(m.Clusters, &Cluster{}) - if err := m.Clusters[len(m.Clusters)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Gates = append(m.Gates, &VTGate{}) + if err := m.Gates[len(m.Gates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15225,7 +16978,7 @@ func (m *GetClustersResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15248,10 +17001,10 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -15288,9 +17041,9 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Alias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -15300,27 +17053,23 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Alias == nil { - m.Alias = &topodata.TabletAlias{} - } - if err := m.Alias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -15344,7 +17093,7 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15367,10 +17116,10 @@ func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetGatesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetGatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -15427,7 +17176,7 @@ func (m *GetGatesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15450,15 +17199,15 @@ func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetGatesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetGatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15485,8 +17234,8 @@ func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Gates = append(m.Gates, &VTGate{}) - if err := m.Gates[len(m.Gates)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Keyspaces = append(m.Keyspaces, &Keyspace{}) + if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15512,7 +17261,7 @@ func (m *GetGatesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15535,10 +17284,10 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -15605,6 +17354,74 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Table = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizeOptions == nil { + m.TableSizeOptions = &GetSchemaTableSizeOptions{} + } + if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15627,7 +17444,7 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15650,10 +17467,10 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemasRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -15688,6 +17505,42 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TableSizeOptions == nil { + m.TableSizeOptions = &GetSchemaTableSizeOptions{} + } + if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -15710,7 +17563,7 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemasResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15733,15 +17586,15 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemasResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15768,8 +17621,8 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspaces = append(m.Keyspaces, &Keyspace{}) - if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Schemas = append(m.Schemas, &Schema{}) + if err := m.Schemas[len(m.Schemas)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15795,7 +17648,7 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsRequest_ClusterRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15818,10 +17671,10 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaMigrationsRequest_ClusterRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaMigrationsRequest_ClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -15858,71 +17711,7 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Table", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Table = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15949,10 +17738,10 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TableSizeOptions == nil { - m.TableSizeOptions = &GetSchemaTableSizeOptions{} + if m.Request == nil { + m.Request = &vtctldata.GetSchemaMigrationsRequest{} } - if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Request.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15978,7 +17767,7 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15986,62 +17775,30 @@ func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSchemasRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemasRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIds", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength + return ErrIntOverflow } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.ClusterIds = append(m.ClusterIds, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSchemaMigrationsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSchemaMigrationsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSizeOptions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRequests", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16068,10 +17825,8 @@ func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TableSizeOptions == nil { - m.TableSizeOptions = &GetSchemaTableSizeOptions{} - } - if err := m.TableSizeOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.ClusterRequests = append(m.ClusterRequests, &GetSchemaMigrationsRequest_ClusterRequest{}) + if err := m.ClusterRequests[len(m.ClusterRequests)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -16097,7 +17852,7 @@ func (m *GetSchemasRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemasResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16120,15 +17875,15 @@ func (m *GetSchemasResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemasResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaMigrationsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaMigrationsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SchemaMigrations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16155,8 +17910,8 @@ func (m *GetSchemasResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Schemas = append(m.Schemas, &Schema{}) - if err := m.Schemas[len(m.Schemas)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.SchemaMigrations = append(m.SchemaMigrations, &SchemaMigration{}) + if err := m.SchemaMigrations[len(m.SchemaMigrations)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18629,6 +20384,125 @@ func (m *GetWorkflowsResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *LaunchSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LaunchSchemaMigrationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LaunchSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &vtctldata.LaunchSchemaMigrationRequest{} + } + if err := m.Request.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PingTabletRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -19868,7 +21742,7 @@ func (m *ReloadSchemasRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= uint32(b&0x7F) << shift + m.Concurrency |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -20652,7 +22526,7 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= uint32(b&0x7F) << shift + m.Concurrency |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -21340,6 +23214,125 @@ func (m *RemoveKeyspaceCellResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *RetrySchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RetrySchemaMigrationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RetrySchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Request == nil { + m.Request = &vtctldata.RetrySchemaMigrationRequest{} + } + if err := m.Request.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/go/vt/proto/vtctldata/vtctldata.pb.go b/go/vt/proto/vtctldata/vtctldata.pb.go index 5f70d625ffa..06f600f4cf5 100644 --- a/go/vt/proto/vtctldata/vtctldata.pb.go +++ b/go/vt/proto/vtctldata/vtctldata.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vtctldata.proto @@ -486,6 +486,7 @@ type MaterializeSettings struct { DeferSecondaryKeys bool `protobuf:"varint,14,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,15,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` AtomicCopy bool `protobuf:"varint,16,opt,name=atomic_copy,json=atomicCopy,proto3" json:"atomic_copy,omitempty"` + WorkflowOptions *WorkflowOptions `protobuf:"bytes,17,opt,name=workflow_options,json=workflowOptions,proto3" json:"workflow_options,omitempty"` } func (x *MaterializeSettings) Reset() { @@ -632,6 +633,13 @@ func (x *MaterializeSettings) GetAtomicCopy() bool { return false } +func (x *MaterializeSettings) GetWorkflowOptions() *WorkflowOptions { + if x != nil { + return x.WorkflowOptions + } + return nil +} + type Keyspace struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1222,6 +1230,73 @@ func (x *Shard) GetShard() *topodata.Shard { return nil } +type WorkflowOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TenantId string `protobuf:"bytes,1,opt,name=tenant_id,json=tenantId,proto3" json:"tenant_id,omitempty"` + // Remove auto_increment clauses on tables when moving them to a sharded + // keyspace. + StripShardedAutoIncrement bool `protobuf:"varint,2,opt,name=strip_sharded_auto_increment,json=stripShardedAutoIncrement,proto3" json:"strip_sharded_auto_increment,omitempty"` + // Shards on which vreplication streams in the target keyspace are created for this workflow and to which the data + // from the source will be vreplicated. + Shards []string `protobuf:"bytes,3,rep,name=shards,proto3" json:"shards,omitempty"` +} + +func (x *WorkflowOptions) Reset() { + *x = WorkflowOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowOptions) ProtoMessage() {} + +func (x *WorkflowOptions) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowOptions.ProtoReflect.Descriptor instead. +func (*WorkflowOptions) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{7} +} + +func (x *WorkflowOptions) GetTenantId() string { + if x != nil { + return x.TenantId + } + return "" +} + +func (x *WorkflowOptions) GetStripShardedAutoIncrement() bool { + if x != nil { + return x.StripShardedAutoIncrement + } + return false +} + +func (x *WorkflowOptions) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + // TODO: comment the hell out of this. type Workflow struct { state protoimpl.MessageState @@ -1243,12 +1318,15 @@ type Workflow struct { MaxVReplicationTransactionLag int64 `protobuf:"varint,8,opt,name=max_v_replication_transaction_lag,json=maxVReplicationTransactionLag,proto3" json:"max_v_replication_transaction_lag,omitempty"` // This specifies whether to defer the creation of secondary keys. DeferSecondaryKeys bool `protobuf:"varint,9,opt,name=defer_secondary_keys,json=deferSecondaryKeys,proto3" json:"defer_secondary_keys,omitempty"` + // These are additional (optional) settings for vreplication workflows. Previously we used to add it to the + // binlogdata.BinlogSource proto object. More details in go/vt/sidecardb/schema/vreplication.sql. + Options *WorkflowOptions `protobuf:"bytes,10,opt,name=options,proto3" json:"options,omitempty"` } func (x *Workflow) Reset() { *x = Workflow{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[7] + mi := &file_vtctldata_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1261,7 +1339,7 @@ func (x *Workflow) String() string { func (*Workflow) ProtoMessage() {} func (x *Workflow) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[7] + mi := &file_vtctldata_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1274,7 +1352,7 @@ func (x *Workflow) ProtoReflect() protoreflect.Message { // Deprecated: Use Workflow.ProtoReflect.Descriptor instead. func (*Workflow) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7} + return file_vtctldata_proto_rawDescGZIP(), []int{8} } func (x *Workflow) GetName() string { @@ -1340,6 +1418,13 @@ func (x *Workflow) GetDeferSecondaryKeys() bool { return false } +func (x *Workflow) GetOptions() *WorkflowOptions { + if x != nil { + return x.Options + } + return nil +} + type AddCellInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1352,7 +1437,7 @@ type AddCellInfoRequest struct { func (x *AddCellInfoRequest) Reset() { *x = AddCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[8] + mi := &file_vtctldata_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1365,7 +1450,7 @@ func (x *AddCellInfoRequest) String() string { func (*AddCellInfoRequest) ProtoMessage() {} func (x *AddCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[8] + mi := &file_vtctldata_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1378,7 +1463,7 @@ func (x *AddCellInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddCellInfoRequest.ProtoReflect.Descriptor instead. func (*AddCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{8} + return file_vtctldata_proto_rawDescGZIP(), []int{9} } func (x *AddCellInfoRequest) GetName() string { @@ -1404,7 +1489,7 @@ type AddCellInfoResponse struct { func (x *AddCellInfoResponse) Reset() { *x = AddCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[9] + mi := &file_vtctldata_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1417,7 +1502,7 @@ func (x *AddCellInfoResponse) String() string { func (*AddCellInfoResponse) ProtoMessage() {} func (x *AddCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[9] + mi := &file_vtctldata_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1430,7 +1515,7 @@ func (x *AddCellInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AddCellInfoResponse.ProtoReflect.Descriptor instead. func (*AddCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{9} + return file_vtctldata_proto_rawDescGZIP(), []int{10} } type AddCellsAliasRequest struct { @@ -1445,7 +1530,7 @@ type AddCellsAliasRequest struct { func (x *AddCellsAliasRequest) Reset() { *x = AddCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[10] + mi := &file_vtctldata_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1458,7 +1543,7 @@ func (x *AddCellsAliasRequest) String() string { func (*AddCellsAliasRequest) ProtoMessage() {} func (x *AddCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[10] + mi := &file_vtctldata_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1471,7 +1556,7 @@ func (x *AddCellsAliasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddCellsAliasRequest.ProtoReflect.Descriptor instead. func (*AddCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{10} + return file_vtctldata_proto_rawDescGZIP(), []int{11} } func (x *AddCellsAliasRequest) GetName() string { @@ -1497,7 +1582,7 @@ type AddCellsAliasResponse struct { func (x *AddCellsAliasResponse) Reset() { *x = AddCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[11] + mi := &file_vtctldata_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1510,7 +1595,7 @@ func (x *AddCellsAliasResponse) String() string { func (*AddCellsAliasResponse) ProtoMessage() {} func (x *AddCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[11] + mi := &file_vtctldata_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1523,7 +1608,124 @@ func (x *AddCellsAliasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AddCellsAliasResponse.ProtoReflect.Descriptor instead. func (*AddCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{11} + return file_vtctldata_proto_rawDescGZIP(), []int{12} +} + +type ApplyKeyspaceRoutingRulesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyspaceRoutingRules *vschema.KeyspaceRoutingRules `protobuf:"bytes,1,opt,name=keyspace_routing_rules,json=keyspaceRoutingRules,proto3" json:"keyspace_routing_rules,omitempty"` + // SkipRebuild, if set, will cause ApplyKeyspaceRoutingRules to skip rebuilding the + // SrvVSchema objects in each cell in RebuildCells. + SkipRebuild bool `protobuf:"varint,2,opt,name=skip_rebuild,json=skipRebuild,proto3" json:"skip_rebuild,omitempty"` + // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not + // provided the SrvVSchema will be rebuilt in every cell in the topology. + // + // Ignored if SkipRebuild is set. + RebuildCells []string `protobuf:"bytes,3,rep,name=rebuild_cells,json=rebuildCells,proto3" json:"rebuild_cells,omitempty"` +} + +func (x *ApplyKeyspaceRoutingRulesRequest) Reset() { + *x = ApplyKeyspaceRoutingRulesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyKeyspaceRoutingRulesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyKeyspaceRoutingRulesRequest) ProtoMessage() {} + +func (x *ApplyKeyspaceRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyKeyspaceRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*ApplyKeyspaceRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{13} +} + +func (x *ApplyKeyspaceRoutingRulesRequest) GetKeyspaceRoutingRules() *vschema.KeyspaceRoutingRules { + if x != nil { + return x.KeyspaceRoutingRules + } + return nil +} + +func (x *ApplyKeyspaceRoutingRulesRequest) GetSkipRebuild() bool { + if x != nil { + return x.SkipRebuild + } + return false +} + +func (x *ApplyKeyspaceRoutingRulesRequest) GetRebuildCells() []string { + if x != nil { + return x.RebuildCells + } + return nil +} + +type ApplyKeyspaceRoutingRulesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // KeyspaceRoutingRules returns the current set of rules. + KeyspaceRoutingRules *vschema.KeyspaceRoutingRules `protobuf:"bytes,1,opt,name=keyspace_routing_rules,json=keyspaceRoutingRules,proto3" json:"keyspace_routing_rules,omitempty"` +} + +func (x *ApplyKeyspaceRoutingRulesResponse) Reset() { + *x = ApplyKeyspaceRoutingRulesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyKeyspaceRoutingRulesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyKeyspaceRoutingRulesResponse) ProtoMessage() {} + +func (x *ApplyKeyspaceRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyKeyspaceRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*ApplyKeyspaceRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{14} +} + +func (x *ApplyKeyspaceRoutingRulesResponse) GetKeyspaceRoutingRules() *vschema.KeyspaceRoutingRules { + if x != nil { + return x.KeyspaceRoutingRules + } + return nil } type ApplyRoutingRulesRequest struct { @@ -1545,7 +1747,7 @@ type ApplyRoutingRulesRequest struct { func (x *ApplyRoutingRulesRequest) Reset() { *x = ApplyRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[12] + mi := &file_vtctldata_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1558,7 +1760,7 @@ func (x *ApplyRoutingRulesRequest) String() string { func (*ApplyRoutingRulesRequest) ProtoMessage() {} func (x *ApplyRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[12] + mi := &file_vtctldata_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1571,7 +1773,7 @@ func (x *ApplyRoutingRulesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyRoutingRulesRequest.ProtoReflect.Descriptor instead. func (*ApplyRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{12} + return file_vtctldata_proto_rawDescGZIP(), []int{15} } func (x *ApplyRoutingRulesRequest) GetRoutingRules() *vschema.RoutingRules { @@ -1604,7 +1806,7 @@ type ApplyRoutingRulesResponse struct { func (x *ApplyRoutingRulesResponse) Reset() { *x = ApplyRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[13] + mi := &file_vtctldata_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1617,7 +1819,7 @@ func (x *ApplyRoutingRulesResponse) String() string { func (*ApplyRoutingRulesResponse) ProtoMessage() {} func (x *ApplyRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[13] + mi := &file_vtctldata_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1630,7 +1832,7 @@ func (x *ApplyRoutingRulesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyRoutingRulesResponse.ProtoReflect.Descriptor instead. func (*ApplyRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{13} + return file_vtctldata_proto_rawDescGZIP(), []int{16} } type ApplyShardRoutingRulesRequest struct { @@ -1652,7 +1854,7 @@ type ApplyShardRoutingRulesRequest struct { func (x *ApplyShardRoutingRulesRequest) Reset() { *x = ApplyShardRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[14] + mi := &file_vtctldata_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1665,7 +1867,7 @@ func (x *ApplyShardRoutingRulesRequest) String() string { func (*ApplyShardRoutingRulesRequest) ProtoMessage() {} func (x *ApplyShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[14] + mi := &file_vtctldata_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1678,7 +1880,7 @@ func (x *ApplyShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyShardRoutingRulesRequest.ProtoReflect.Descriptor instead. func (*ApplyShardRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{14} + return file_vtctldata_proto_rawDescGZIP(), []int{17} } func (x *ApplyShardRoutingRulesRequest) GetShardRoutingRules() *vschema.ShardRoutingRules { @@ -1711,7 +1913,7 @@ type ApplyShardRoutingRulesResponse struct { func (x *ApplyShardRoutingRulesResponse) Reset() { *x = ApplyShardRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[15] + mi := &file_vtctldata_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1724,7 +1926,7 @@ func (x *ApplyShardRoutingRulesResponse) String() string { func (*ApplyShardRoutingRulesResponse) ProtoMessage() {} func (x *ApplyShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[15] + mi := &file_vtctldata_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1737,7 +1939,7 @@ func (x *ApplyShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyShardRoutingRulesResponse.ProtoReflect.Descriptor instead. func (*ApplyShardRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{15} + return file_vtctldata_proto_rawDescGZIP(), []int{18} } type ApplySchemaRequest struct { @@ -1769,7 +1971,7 @@ type ApplySchemaRequest struct { func (x *ApplySchemaRequest) Reset() { *x = ApplySchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[16] + mi := &file_vtctldata_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1782,7 +1984,7 @@ func (x *ApplySchemaRequest) String() string { func (*ApplySchemaRequest) ProtoMessage() {} func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[16] + mi := &file_vtctldata_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1795,7 +1997,7 @@ func (x *ApplySchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplySchemaRequest.ProtoReflect.Descriptor instead. func (*ApplySchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{16} + return file_vtctldata_proto_rawDescGZIP(), []int{19} } func (x *ApplySchemaRequest) GetKeyspace() string { @@ -1866,7 +2068,7 @@ type ApplySchemaResponse struct { func (x *ApplySchemaResponse) Reset() { *x = ApplySchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[17] + mi := &file_vtctldata_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1879,7 +2081,7 @@ func (x *ApplySchemaResponse) String() string { func (*ApplySchemaResponse) ProtoMessage() {} func (x *ApplySchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[17] + mi := &file_vtctldata_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1892,7 +2094,7 @@ func (x *ApplySchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplySchemaResponse.ProtoReflect.Descriptor instead. func (*ApplySchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{17} + return file_vtctldata_proto_rawDescGZIP(), []int{20} } func (x *ApplySchemaResponse) GetUuidList() []string { @@ -1920,12 +2122,14 @@ type ApplyVSchemaRequest struct { Cells []string `protobuf:"bytes,4,rep,name=cells,proto3" json:"cells,omitempty"` VSchema *vschema.Keyspace `protobuf:"bytes,5,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` Sql string `protobuf:"bytes,6,opt,name=sql,proto3" json:"sql,omitempty"` + // Strict returns an error if there are unknown vindex params. + Strict bool `protobuf:"varint,7,opt,name=strict,proto3" json:"strict,omitempty"` } func (x *ApplyVSchemaRequest) Reset() { *x = ApplyVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[18] + mi := &file_vtctldata_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1938,7 +2142,7 @@ func (x *ApplyVSchemaRequest) String() string { func (*ApplyVSchemaRequest) ProtoMessage() {} func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[18] + mi := &file_vtctldata_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1951,7 +2155,7 @@ func (x *ApplyVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyVSchemaRequest.ProtoReflect.Descriptor instead. func (*ApplyVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{18} + return file_vtctldata_proto_rawDescGZIP(), []int{21} } func (x *ApplyVSchemaRequest) GetKeyspace() string { @@ -1996,18 +2200,34 @@ func (x *ApplyVSchemaRequest) GetSql() string { return "" } +func (x *ApplyVSchemaRequest) GetStrict() bool { + if x != nil { + return x.Strict + } + return false +} + type ApplyVSchemaResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields VSchema *vschema.Keyspace `protobuf:"bytes,1,opt,name=v_schema,json=vSchema,proto3" json:"v_schema,omitempty"` + // UnknownVindexParams is a map of vindex name to params that were not recognized by the vindex + // type. E.g.: + // + // { + // "lookup_vdx": { + // "params": ["raed_lock", "not_verify"] + // } + // } + UnknownVindexParams map[string]*ApplyVSchemaResponse_ParamList `protobuf:"bytes,2,rep,name=unknown_vindex_params,json=unknownVindexParams,proto3" json:"unknown_vindex_params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *ApplyVSchemaResponse) Reset() { *x = ApplyVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[19] + mi := &file_vtctldata_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2020,7 +2240,7 @@ func (x *ApplyVSchemaResponse) String() string { func (*ApplyVSchemaResponse) ProtoMessage() {} func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[19] + mi := &file_vtctldata_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2033,7 +2253,7 @@ func (x *ApplyVSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyVSchemaResponse.ProtoReflect.Descriptor instead. func (*ApplyVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{19} + return file_vtctldata_proto_rawDescGZIP(), []int{22} } func (x *ApplyVSchemaResponse) GetVSchema() *vschema.Keyspace { @@ -2043,6 +2263,13 @@ func (x *ApplyVSchemaResponse) GetVSchema() *vschema.Keyspace { return nil } +func (x *ApplyVSchemaResponse) GetUnknownVindexParams() map[string]*ApplyVSchemaResponse_ParamList { + if x != nil { + return x.UnknownVindexParams + } + return nil +} + type BackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2056,7 +2283,7 @@ type BackupRequest struct { AllowPrimary bool `protobuf:"varint,2,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` // Concurrency specifies the number of compression/checksum jobs to run // simultaneously. - Concurrency uint64 `protobuf:"varint,3,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Concurrency int32 `protobuf:"varint,3,opt,name=concurrency,proto3" json:"concurrency,omitempty"` // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty // then the backup becomes incremental and applies as of given position. IncrementalFromPos string `protobuf:"bytes,4,opt,name=incremental_from_pos,json=incrementalFromPos,proto3" json:"incremental_from_pos,omitempty"` @@ -2068,7 +2295,7 @@ type BackupRequest struct { func (x *BackupRequest) Reset() { *x = BackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[20] + mi := &file_vtctldata_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2081,7 +2308,7 @@ func (x *BackupRequest) String() string { func (*BackupRequest) ProtoMessage() {} func (x *BackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[20] + mi := &file_vtctldata_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2094,7 +2321,7 @@ func (x *BackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupRequest.ProtoReflect.Descriptor instead. func (*BackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{20} + return file_vtctldata_proto_rawDescGZIP(), []int{23} } func (x *BackupRequest) GetTabletAlias() *topodata.TabletAlias { @@ -2111,7 +2338,7 @@ func (x *BackupRequest) GetAllowPrimary() bool { return false } -func (x *BackupRequest) GetConcurrency() uint64 { +func (x *BackupRequest) GetConcurrency() int32 { if x != nil { return x.Concurrency } @@ -2147,7 +2374,7 @@ type BackupResponse struct { func (x *BackupResponse) Reset() { *x = BackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[21] + mi := &file_vtctldata_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2160,7 +2387,7 @@ func (x *BackupResponse) String() string { func (*BackupResponse) ProtoMessage() {} func (x *BackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[21] + mi := &file_vtctldata_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2173,7 +2400,7 @@ func (x *BackupResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupResponse.ProtoReflect.Descriptor instead. func (*BackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{21} + return file_vtctldata_proto_rawDescGZIP(), []int{24} } func (x *BackupResponse) GetTabletAlias() *topodata.TabletAlias { @@ -2216,7 +2443,7 @@ type BackupShardRequest struct { AllowPrimary bool `protobuf:"varint,3,opt,name=allow_primary,json=allowPrimary,proto3" json:"allow_primary,omitempty"` // Concurrency specifies the number of compression/checksum jobs to run // simultaneously. - Concurrency uint64 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Concurrency int32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 // so that it's a backup that can be used for an upgrade. UpgradeSafe bool `protobuf:"varint,5,opt,name=upgrade_safe,json=upgradeSafe,proto3" json:"upgrade_safe,omitempty"` @@ -2228,7 +2455,7 @@ type BackupShardRequest struct { func (x *BackupShardRequest) Reset() { *x = BackupShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[22] + mi := &file_vtctldata_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2241,7 +2468,7 @@ func (x *BackupShardRequest) String() string { func (*BackupShardRequest) ProtoMessage() {} func (x *BackupShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[22] + mi := &file_vtctldata_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2254,7 +2481,7 @@ func (x *BackupShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupShardRequest.ProtoReflect.Descriptor instead. func (*BackupShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{22} + return file_vtctldata_proto_rawDescGZIP(), []int{25} } func (x *BackupShardRequest) GetKeyspace() string { @@ -2278,7 +2505,7 @@ func (x *BackupShardRequest) GetAllowPrimary() bool { return false } -func (x *BackupShardRequest) GetConcurrency() uint64 { +func (x *BackupShardRequest) GetConcurrency() int32 { if x != nil { return x.Concurrency } @@ -2311,7 +2538,7 @@ type CancelSchemaMigrationRequest struct { func (x *CancelSchemaMigrationRequest) Reset() { *x = CancelSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[23] + mi := &file_vtctldata_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2324,7 +2551,7 @@ func (x *CancelSchemaMigrationRequest) String() string { func (*CancelSchemaMigrationRequest) ProtoMessage() {} func (x *CancelSchemaMigrationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[23] + mi := &file_vtctldata_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2337,7 +2564,7 @@ func (x *CancelSchemaMigrationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelSchemaMigrationRequest.ProtoReflect.Descriptor instead. func (*CancelSchemaMigrationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{23} + return file_vtctldata_proto_rawDescGZIP(), []int{26} } func (x *CancelSchemaMigrationRequest) GetKeyspace() string { @@ -2365,7 +2592,7 @@ type CancelSchemaMigrationResponse struct { func (x *CancelSchemaMigrationResponse) Reset() { *x = CancelSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[24] + mi := &file_vtctldata_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2378,7 +2605,7 @@ func (x *CancelSchemaMigrationResponse) String() string { func (*CancelSchemaMigrationResponse) ProtoMessage() {} func (x *CancelSchemaMigrationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[24] + mi := &file_vtctldata_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2391,7 +2618,7 @@ func (x *CancelSchemaMigrationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelSchemaMigrationResponse.ProtoReflect.Descriptor instead. func (*CancelSchemaMigrationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{24} + return file_vtctldata_proto_rawDescGZIP(), []int{27} } func (x *CancelSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { @@ -2414,7 +2641,7 @@ type ChangeTabletTypeRequest struct { func (x *ChangeTabletTypeRequest) Reset() { *x = ChangeTabletTypeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[25] + mi := &file_vtctldata_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2427,7 +2654,7 @@ func (x *ChangeTabletTypeRequest) String() string { func (*ChangeTabletTypeRequest) ProtoMessage() {} func (x *ChangeTabletTypeRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[25] + mi := &file_vtctldata_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2440,7 +2667,7 @@ func (x *ChangeTabletTypeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ChangeTabletTypeRequest.ProtoReflect.Descriptor instead. func (*ChangeTabletTypeRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{25} + return file_vtctldata_proto_rawDescGZIP(), []int{28} } func (x *ChangeTabletTypeRequest) GetTabletAlias() *topodata.TabletAlias { @@ -2477,7 +2704,7 @@ type ChangeTabletTypeResponse struct { func (x *ChangeTabletTypeResponse) Reset() { *x = ChangeTabletTypeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[26] + mi := &file_vtctldata_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2490,7 +2717,7 @@ func (x *ChangeTabletTypeResponse) String() string { func (*ChangeTabletTypeResponse) ProtoMessage() {} func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[26] + mi := &file_vtctldata_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2503,7 +2730,7 @@ func (x *ChangeTabletTypeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ChangeTabletTypeResponse.ProtoReflect.Descriptor instead. func (*ChangeTabletTypeResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{26} + return file_vtctldata_proto_rawDescGZIP(), []int{29} } func (x *ChangeTabletTypeResponse) GetBeforeTablet() *topodata.Tablet { @@ -2539,7 +2766,7 @@ type CleanupSchemaMigrationRequest struct { func (x *CleanupSchemaMigrationRequest) Reset() { *x = CleanupSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[27] + mi := &file_vtctldata_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2552,7 +2779,7 @@ func (x *CleanupSchemaMigrationRequest) String() string { func (*CleanupSchemaMigrationRequest) ProtoMessage() {} func (x *CleanupSchemaMigrationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[27] + mi := &file_vtctldata_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2565,7 +2792,7 @@ func (x *CleanupSchemaMigrationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CleanupSchemaMigrationRequest.ProtoReflect.Descriptor instead. func (*CleanupSchemaMigrationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{27} + return file_vtctldata_proto_rawDescGZIP(), []int{30} } func (x *CleanupSchemaMigrationRequest) GetKeyspace() string { @@ -2593,7 +2820,7 @@ type CleanupSchemaMigrationResponse struct { func (x *CleanupSchemaMigrationResponse) Reset() { *x = CleanupSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[28] + mi := &file_vtctldata_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2606,7 +2833,7 @@ func (x *CleanupSchemaMigrationResponse) String() string { func (*CleanupSchemaMigrationResponse) ProtoMessage() {} func (x *CleanupSchemaMigrationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[28] + mi := &file_vtctldata_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2619,7 +2846,7 @@ func (x *CleanupSchemaMigrationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CleanupSchemaMigrationResponse.ProtoReflect.Descriptor instead. func (*CleanupSchemaMigrationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{28} + return file_vtctldata_proto_rawDescGZIP(), []int{31} } func (x *CleanupSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { @@ -2641,7 +2868,7 @@ type CompleteSchemaMigrationRequest struct { func (x *CompleteSchemaMigrationRequest) Reset() { *x = CompleteSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[29] + mi := &file_vtctldata_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2654,7 +2881,7 @@ func (x *CompleteSchemaMigrationRequest) String() string { func (*CompleteSchemaMigrationRequest) ProtoMessage() {} func (x *CompleteSchemaMigrationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[29] + mi := &file_vtctldata_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2667,7 +2894,7 @@ func (x *CompleteSchemaMigrationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CompleteSchemaMigrationRequest.ProtoReflect.Descriptor instead. func (*CompleteSchemaMigrationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{29} + return file_vtctldata_proto_rawDescGZIP(), []int{32} } func (x *CompleteSchemaMigrationRequest) GetKeyspace() string { @@ -2695,7 +2922,7 @@ type CompleteSchemaMigrationResponse struct { func (x *CompleteSchemaMigrationResponse) Reset() { *x = CompleteSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[30] + mi := &file_vtctldata_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2708,7 +2935,7 @@ func (x *CompleteSchemaMigrationResponse) String() string { func (*CompleteSchemaMigrationResponse) ProtoMessage() {} func (x *CompleteSchemaMigrationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[30] + mi := &file_vtctldata_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2721,7 +2948,7 @@ func (x *CompleteSchemaMigrationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CompleteSchemaMigrationResponse.ProtoReflect.Descriptor instead. func (*CompleteSchemaMigrationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{30} + return file_vtctldata_proto_rawDescGZIP(), []int{33} } func (x *CompleteSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { @@ -2742,9 +2969,6 @@ type CreateKeyspaceRequest struct { Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` // AllowEmptyVSchema allows a keyspace to be created with no vschema. AllowEmptyVSchema bool `protobuf:"varint,3,opt,name=allow_empty_v_schema,json=allowEmptyVSchema,proto3" json:"allow_empty_v_schema,omitempty"` - // ServedFroms specifies a set of db_type:keyspace pairs used to serve - // traffic for the keyspace. - ServedFroms []*topodata.Keyspace_ServedFrom `protobuf:"bytes,6,rep,name=served_froms,json=servedFroms,proto3" json:"served_froms,omitempty"` // Type is the type of the keyspace to create. Type topodata.KeyspaceType `protobuf:"varint,7,opt,name=type,proto3,enum=topodata.KeyspaceType" json:"type,omitempty"` // BaseKeyspace specifies the base keyspace for SNAPSHOT keyspaces. It is @@ -2764,7 +2988,7 @@ type CreateKeyspaceRequest struct { func (x *CreateKeyspaceRequest) Reset() { *x = CreateKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[31] + mi := &file_vtctldata_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2777,7 +3001,7 @@ func (x *CreateKeyspaceRequest) String() string { func (*CreateKeyspaceRequest) ProtoMessage() {} func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[31] + mi := &file_vtctldata_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2790,7 +3014,7 @@ func (x *CreateKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateKeyspaceRequest.ProtoReflect.Descriptor instead. func (*CreateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{31} + return file_vtctldata_proto_rawDescGZIP(), []int{34} } func (x *CreateKeyspaceRequest) GetName() string { @@ -2814,13 +3038,6 @@ func (x *CreateKeyspaceRequest) GetAllowEmptyVSchema() bool { return false } -func (x *CreateKeyspaceRequest) GetServedFroms() []*topodata.Keyspace_ServedFrom { - if x != nil { - return x.ServedFroms - } - return nil -} - func (x *CreateKeyspaceRequest) GetType() topodata.KeyspaceType { if x != nil { return x.Type @@ -2868,7 +3085,7 @@ type CreateKeyspaceResponse struct { func (x *CreateKeyspaceResponse) Reset() { *x = CreateKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[32] + mi := &file_vtctldata_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2881,7 +3098,7 @@ func (x *CreateKeyspaceResponse) String() string { func (*CreateKeyspaceResponse) ProtoMessage() {} func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[32] + mi := &file_vtctldata_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2894,7 +3111,7 @@ func (x *CreateKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateKeyspaceResponse.ProtoReflect.Descriptor instead. func (*CreateKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{32} + return file_vtctldata_proto_rawDescGZIP(), []int{35} } func (x *CreateKeyspaceResponse) GetKeyspace() *Keyspace { @@ -2924,7 +3141,7 @@ type CreateShardRequest struct { func (x *CreateShardRequest) Reset() { *x = CreateShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[33] + mi := &file_vtctldata_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2937,7 +3154,7 @@ func (x *CreateShardRequest) String() string { func (*CreateShardRequest) ProtoMessage() {} func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[33] + mi := &file_vtctldata_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2950,7 +3167,7 @@ func (x *CreateShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateShardRequest.ProtoReflect.Descriptor instead. func (*CreateShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{33} + return file_vtctldata_proto_rawDescGZIP(), []int{36} } func (x *CreateShardRequest) GetKeyspace() string { @@ -2999,7 +3216,7 @@ type CreateShardResponse struct { func (x *CreateShardResponse) Reset() { *x = CreateShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[34] + mi := &file_vtctldata_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3012,7 +3229,7 @@ func (x *CreateShardResponse) String() string { func (*CreateShardResponse) ProtoMessage() {} func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[34] + mi := &file_vtctldata_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3025,7 +3242,7 @@ func (x *CreateShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateShardResponse.ProtoReflect.Descriptor instead. func (*CreateShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{34} + return file_vtctldata_proto_rawDescGZIP(), []int{37} } func (x *CreateShardResponse) GetKeyspace() *Keyspace { @@ -3061,7 +3278,7 @@ type DeleteCellInfoRequest struct { func (x *DeleteCellInfoRequest) Reset() { *x = DeleteCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[35] + mi := &file_vtctldata_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3074,7 +3291,7 @@ func (x *DeleteCellInfoRequest) String() string { func (*DeleteCellInfoRequest) ProtoMessage() {} func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[35] + mi := &file_vtctldata_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3087,7 +3304,7 @@ func (x *DeleteCellInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCellInfoRequest.ProtoReflect.Descriptor instead. func (*DeleteCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{35} + return file_vtctldata_proto_rawDescGZIP(), []int{38} } func (x *DeleteCellInfoRequest) GetName() string { @@ -3113,7 +3330,7 @@ type DeleteCellInfoResponse struct { func (x *DeleteCellInfoResponse) Reset() { *x = DeleteCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[36] + mi := &file_vtctldata_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3126,7 +3343,7 @@ func (x *DeleteCellInfoResponse) String() string { func (*DeleteCellInfoResponse) ProtoMessage() {} func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[36] + mi := &file_vtctldata_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3139,7 +3356,7 @@ func (x *DeleteCellInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCellInfoResponse.ProtoReflect.Descriptor instead. func (*DeleteCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{36} + return file_vtctldata_proto_rawDescGZIP(), []int{39} } type DeleteCellsAliasRequest struct { @@ -3153,7 +3370,7 @@ type DeleteCellsAliasRequest struct { func (x *DeleteCellsAliasRequest) Reset() { *x = DeleteCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[37] + mi := &file_vtctldata_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3166,7 +3383,7 @@ func (x *DeleteCellsAliasRequest) String() string { func (*DeleteCellsAliasRequest) ProtoMessage() {} func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[37] + mi := &file_vtctldata_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3179,7 +3396,7 @@ func (x *DeleteCellsAliasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCellsAliasRequest.ProtoReflect.Descriptor instead. func (*DeleteCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{37} + return file_vtctldata_proto_rawDescGZIP(), []int{40} } func (x *DeleteCellsAliasRequest) GetName() string { @@ -3198,7 +3415,7 @@ type DeleteCellsAliasResponse struct { func (x *DeleteCellsAliasResponse) Reset() { *x = DeleteCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[38] + mi := &file_vtctldata_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3211,7 +3428,7 @@ func (x *DeleteCellsAliasResponse) String() string { func (*DeleteCellsAliasResponse) ProtoMessage() {} func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[38] + mi := &file_vtctldata_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3224,7 +3441,7 @@ func (x *DeleteCellsAliasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCellsAliasResponse.ProtoReflect.Descriptor instead. func (*DeleteCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{38} + return file_vtctldata_proto_rawDescGZIP(), []int{41} } type DeleteKeyspaceRequest struct { @@ -3246,7 +3463,7 @@ type DeleteKeyspaceRequest struct { func (x *DeleteKeyspaceRequest) Reset() { *x = DeleteKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[39] + mi := &file_vtctldata_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3259,7 +3476,7 @@ func (x *DeleteKeyspaceRequest) String() string { func (*DeleteKeyspaceRequest) ProtoMessage() {} func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[39] + mi := &file_vtctldata_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3272,7 +3489,7 @@ func (x *DeleteKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteKeyspaceRequest.ProtoReflect.Descriptor instead. func (*DeleteKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{39} + return file_vtctldata_proto_rawDescGZIP(), []int{42} } func (x *DeleteKeyspaceRequest) GetKeyspace() string { @@ -3305,7 +3522,7 @@ type DeleteKeyspaceResponse struct { func (x *DeleteKeyspaceResponse) Reset() { *x = DeleteKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[40] + mi := &file_vtctldata_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3318,7 +3535,7 @@ func (x *DeleteKeyspaceResponse) String() string { func (*DeleteKeyspaceResponse) ProtoMessage() {} func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[40] + mi := &file_vtctldata_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3331,7 +3548,7 @@ func (x *DeleteKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteKeyspaceResponse.ProtoReflect.Descriptor instead. func (*DeleteKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{40} + return file_vtctldata_proto_rawDescGZIP(), []int{43} } type DeleteShardsRequest struct { @@ -3357,7 +3574,7 @@ type DeleteShardsRequest struct { func (x *DeleteShardsRequest) Reset() { *x = DeleteShardsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[41] + mi := &file_vtctldata_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3370,7 +3587,7 @@ func (x *DeleteShardsRequest) String() string { func (*DeleteShardsRequest) ProtoMessage() {} func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[41] + mi := &file_vtctldata_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3383,7 +3600,7 @@ func (x *DeleteShardsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteShardsRequest.ProtoReflect.Descriptor instead. func (*DeleteShardsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{41} + return file_vtctldata_proto_rawDescGZIP(), []int{44} } func (x *DeleteShardsRequest) GetShards() []*Shard { @@ -3423,7 +3640,7 @@ type DeleteShardsResponse struct { func (x *DeleteShardsResponse) Reset() { *x = DeleteShardsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[42] + mi := &file_vtctldata_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3436,7 +3653,7 @@ func (x *DeleteShardsResponse) String() string { func (*DeleteShardsResponse) ProtoMessage() {} func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[42] + mi := &file_vtctldata_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3449,7 +3666,7 @@ func (x *DeleteShardsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteShardsResponse.ProtoReflect.Descriptor instead. func (*DeleteShardsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{42} + return file_vtctldata_proto_rawDescGZIP(), []int{45} } type DeleteSrvVSchemaRequest struct { @@ -3463,7 +3680,7 @@ type DeleteSrvVSchemaRequest struct { func (x *DeleteSrvVSchemaRequest) Reset() { *x = DeleteSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[43] + mi := &file_vtctldata_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3476,7 +3693,7 @@ func (x *DeleteSrvVSchemaRequest) String() string { func (*DeleteSrvVSchemaRequest) ProtoMessage() {} func (x *DeleteSrvVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[43] + mi := &file_vtctldata_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3489,7 +3706,7 @@ func (x *DeleteSrvVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSrvVSchemaRequest.ProtoReflect.Descriptor instead. func (*DeleteSrvVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{43} + return file_vtctldata_proto_rawDescGZIP(), []int{46} } func (x *DeleteSrvVSchemaRequest) GetCell() string { @@ -3508,7 +3725,7 @@ type DeleteSrvVSchemaResponse struct { func (x *DeleteSrvVSchemaResponse) Reset() { *x = DeleteSrvVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[44] + mi := &file_vtctldata_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3521,7 +3738,7 @@ func (x *DeleteSrvVSchemaResponse) String() string { func (*DeleteSrvVSchemaResponse) ProtoMessage() {} func (x *DeleteSrvVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[44] + mi := &file_vtctldata_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3534,7 +3751,7 @@ func (x *DeleteSrvVSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSrvVSchemaResponse.ProtoReflect.Descriptor instead. func (*DeleteSrvVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{44} + return file_vtctldata_proto_rawDescGZIP(), []int{47} } type DeleteTabletsRequest struct { @@ -3552,7 +3769,7 @@ type DeleteTabletsRequest struct { func (x *DeleteTabletsRequest) Reset() { *x = DeleteTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[45] + mi := &file_vtctldata_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3565,7 +3782,7 @@ func (x *DeleteTabletsRequest) String() string { func (*DeleteTabletsRequest) ProtoMessage() {} func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[45] + mi := &file_vtctldata_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3578,7 +3795,7 @@ func (x *DeleteTabletsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteTabletsRequest.ProtoReflect.Descriptor instead. func (*DeleteTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{45} + return file_vtctldata_proto_rawDescGZIP(), []int{48} } func (x *DeleteTabletsRequest) GetTabletAliases() []*topodata.TabletAlias { @@ -3604,7 +3821,7 @@ type DeleteTabletsResponse struct { func (x *DeleteTabletsResponse) Reset() { *x = DeleteTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[46] + mi := &file_vtctldata_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3617,7 +3834,7 @@ func (x *DeleteTabletsResponse) String() string { func (*DeleteTabletsResponse) ProtoMessage() {} func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[46] + mi := &file_vtctldata_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3630,7 +3847,7 @@ func (x *DeleteTabletsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteTabletsResponse.ProtoReflect.Descriptor instead. func (*DeleteTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{46} + return file_vtctldata_proto_rawDescGZIP(), []int{49} } type EmergencyReparentShardRequest struct { @@ -3664,7 +3881,7 @@ type EmergencyReparentShardRequest struct { func (x *EmergencyReparentShardRequest) Reset() { *x = EmergencyReparentShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[47] + mi := &file_vtctldata_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3677,7 +3894,7 @@ func (x *EmergencyReparentShardRequest) String() string { func (*EmergencyReparentShardRequest) ProtoMessage() {} func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[47] + mi := &file_vtctldata_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3690,7 +3907,7 @@ func (x *EmergencyReparentShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use EmergencyReparentShardRequest.ProtoReflect.Descriptor instead. func (*EmergencyReparentShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{47} + return file_vtctldata_proto_rawDescGZIP(), []int{50} } func (x *EmergencyReparentShardRequest) GetKeyspace() string { @@ -3762,7 +3979,7 @@ type EmergencyReparentShardResponse struct { func (x *EmergencyReparentShardResponse) Reset() { *x = EmergencyReparentShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[48] + mi := &file_vtctldata_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3775,7 +3992,7 @@ func (x *EmergencyReparentShardResponse) String() string { func (*EmergencyReparentShardResponse) ProtoMessage() {} func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[48] + mi := &file_vtctldata_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3788,7 +4005,7 @@ func (x *EmergencyReparentShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use EmergencyReparentShardResponse.ProtoReflect.Descriptor instead. func (*EmergencyReparentShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{48} + return file_vtctldata_proto_rawDescGZIP(), []int{51} } func (x *EmergencyReparentShardResponse) GetKeyspace() string { @@ -3840,7 +4057,7 @@ type ExecuteFetchAsAppRequest struct { func (x *ExecuteFetchAsAppRequest) Reset() { *x = ExecuteFetchAsAppRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[49] + mi := &file_vtctldata_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3853,7 +4070,7 @@ func (x *ExecuteFetchAsAppRequest) String() string { func (*ExecuteFetchAsAppRequest) ProtoMessage() {} func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[49] + mi := &file_vtctldata_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3866,7 +4083,7 @@ func (x *ExecuteFetchAsAppRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteFetchAsAppRequest.ProtoReflect.Descriptor instead. func (*ExecuteFetchAsAppRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{49} + return file_vtctldata_proto_rawDescGZIP(), []int{52} } func (x *ExecuteFetchAsAppRequest) GetTabletAlias() *topodata.TabletAlias { @@ -3908,7 +4125,7 @@ type ExecuteFetchAsAppResponse struct { func (x *ExecuteFetchAsAppResponse) Reset() { *x = ExecuteFetchAsAppResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[50] + mi := &file_vtctldata_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3921,7 +4138,7 @@ func (x *ExecuteFetchAsAppResponse) String() string { func (*ExecuteFetchAsAppResponse) ProtoMessage() {} func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[50] + mi := &file_vtctldata_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3934,7 +4151,7 @@ func (x *ExecuteFetchAsAppResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteFetchAsAppResponse.ProtoReflect.Descriptor instead. func (*ExecuteFetchAsAppResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{50} + return file_vtctldata_proto_rawDescGZIP(), []int{53} } func (x *ExecuteFetchAsAppResponse) GetResult() *query.QueryResult { @@ -3969,7 +4186,7 @@ type ExecuteFetchAsDBARequest struct { func (x *ExecuteFetchAsDBARequest) Reset() { *x = ExecuteFetchAsDBARequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[51] + mi := &file_vtctldata_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3982,7 +4199,7 @@ func (x *ExecuteFetchAsDBARequest) String() string { func (*ExecuteFetchAsDBARequest) ProtoMessage() {} func (x *ExecuteFetchAsDBARequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[51] + mi := &file_vtctldata_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3995,7 +4212,7 @@ func (x *ExecuteFetchAsDBARequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteFetchAsDBARequest.ProtoReflect.Descriptor instead. func (*ExecuteFetchAsDBARequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{51} + return file_vtctldata_proto_rawDescGZIP(), []int{54} } func (x *ExecuteFetchAsDBARequest) GetTabletAlias() *topodata.TabletAlias { @@ -4044,7 +4261,7 @@ type ExecuteFetchAsDBAResponse struct { func (x *ExecuteFetchAsDBAResponse) Reset() { *x = ExecuteFetchAsDBAResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[52] + mi := &file_vtctldata_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4057,7 +4274,7 @@ func (x *ExecuteFetchAsDBAResponse) String() string { func (*ExecuteFetchAsDBAResponse) ProtoMessage() {} func (x *ExecuteFetchAsDBAResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[52] + mi := &file_vtctldata_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4070,7 +4287,7 @@ func (x *ExecuteFetchAsDBAResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteFetchAsDBAResponse.ProtoReflect.Descriptor instead. func (*ExecuteFetchAsDBAResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{52} + return file_vtctldata_proto_rawDescGZIP(), []int{55} } func (x *ExecuteFetchAsDBAResponse) GetResult() *query.QueryResult { @@ -4092,7 +4309,7 @@ type ExecuteHookRequest struct { func (x *ExecuteHookRequest) Reset() { *x = ExecuteHookRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[53] + mi := &file_vtctldata_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4105,7 +4322,7 @@ func (x *ExecuteHookRequest) String() string { func (*ExecuteHookRequest) ProtoMessage() {} func (x *ExecuteHookRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[53] + mi := &file_vtctldata_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4118,7 +4335,7 @@ func (x *ExecuteHookRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteHookRequest.ProtoReflect.Descriptor instead. func (*ExecuteHookRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{53} + return file_vtctldata_proto_rawDescGZIP(), []int{56} } func (x *ExecuteHookRequest) GetTabletAlias() *topodata.TabletAlias { @@ -4146,7 +4363,7 @@ type ExecuteHookResponse struct { func (x *ExecuteHookResponse) Reset() { *x = ExecuteHookResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[54] + mi := &file_vtctldata_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4159,7 +4376,7 @@ func (x *ExecuteHookResponse) String() string { func (*ExecuteHookResponse) ProtoMessage() {} func (x *ExecuteHookResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[54] + mi := &file_vtctldata_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4172,7 +4389,7 @@ func (x *ExecuteHookResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ExecuteHookResponse.ProtoReflect.Descriptor instead. func (*ExecuteHookResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{54} + return file_vtctldata_proto_rawDescGZIP(), []int{57} } func (x *ExecuteHookResponse) GetHookResult() *tabletmanagerdata.ExecuteHookResponse { @@ -4182,31 +4399,46 @@ func (x *ExecuteHookResponse) GetHookResult() *tabletmanagerdata.ExecuteHookResp return nil } -type FindAllShardsInKeyspaceRequest struct { +type ExecuteMultiFetchAsDBARequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + TabletAlias *topodata.TabletAlias `protobuf:"bytes,1,opt,name=tablet_alias,json=tabletAlias,proto3" json:"tablet_alias,omitempty"` + // SQL could have potentially multiple queries separated by semicolons. + Sql string `protobuf:"bytes,2,opt,name=sql,proto3" json:"sql,omitempty"` + // MaxRows is an optional parameter to limit the number of rows read into the + // QueryResult. Note that this does not apply a LIMIT to a query, just how + // many rows are read from the MySQL server on the tablet side. + // + // This field is optional. Specifying a non-positive value will use whatever + // default is configured in the VtctldService. + MaxRows int64 `protobuf:"varint,3,opt,name=max_rows,json=maxRows,proto3" json:"max_rows,omitempty"` + // DisableBinlogs instructs the tablet not to use binary logging when + // executing the query. + DisableBinlogs bool `protobuf:"varint,4,opt,name=disable_binlogs,json=disableBinlogs,proto3" json:"disable_binlogs,omitempty"` + // ReloadSchema instructs the tablet to reload its schema after executing the + // query. + ReloadSchema bool `protobuf:"varint,5,opt,name=reload_schema,json=reloadSchema,proto3" json:"reload_schema,omitempty"` } -func (x *FindAllShardsInKeyspaceRequest) Reset() { - *x = FindAllShardsInKeyspaceRequest{} +func (x *ExecuteMultiFetchAsDBARequest) Reset() { + *x = ExecuteMultiFetchAsDBARequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[55] + mi := &file_vtctldata_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *FindAllShardsInKeyspaceRequest) String() string { +func (x *ExecuteMultiFetchAsDBARequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} +func (*ExecuteMultiFetchAsDBARequest) ProtoMessage() {} -func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[55] +func (x *ExecuteMultiFetchAsDBARequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4217,30 +4449,152 @@ func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use FindAllShardsInKeyspaceRequest.ProtoReflect.Descriptor instead. -func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{55} +// Deprecated: Use ExecuteMultiFetchAsDBARequest.ProtoReflect.Descriptor instead. +func (*ExecuteMultiFetchAsDBARequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{58} } -func (x *FindAllShardsInKeyspaceRequest) GetKeyspace() string { +func (x *ExecuteMultiFetchAsDBARequest) GetTabletAlias() *topodata.TabletAlias { if x != nil { - return x.Keyspace + return x.TabletAlias + } + return nil +} + +func (x *ExecuteMultiFetchAsDBARequest) GetSql() string { + if x != nil { + return x.Sql } return "" } -type FindAllShardsInKeyspaceResponse struct { +func (x *ExecuteMultiFetchAsDBARequest) GetMaxRows() int64 { + if x != nil { + return x.MaxRows + } + return 0 +} + +func (x *ExecuteMultiFetchAsDBARequest) GetDisableBinlogs() bool { + if x != nil { + return x.DisableBinlogs + } + return false +} + +func (x *ExecuteMultiFetchAsDBARequest) GetReloadSchema() bool { + if x != nil { + return x.ReloadSchema + } + return false +} + +type ExecuteMultiFetchAsDBAResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Shards map[string]*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Results []*query.QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *ExecuteMultiFetchAsDBAResponse) Reset() { + *x = ExecuteMultiFetchAsDBAResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecuteMultiFetchAsDBAResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteMultiFetchAsDBAResponse) ProtoMessage() {} + +func (x *ExecuteMultiFetchAsDBAResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteMultiFetchAsDBAResponse.ProtoReflect.Descriptor instead. +func (*ExecuteMultiFetchAsDBAResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{59} +} + +func (x *ExecuteMultiFetchAsDBAResponse) GetResults() []*query.QueryResult { + if x != nil { + return x.Results + } + return nil +} + +type FindAllShardsInKeyspaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` +} + +func (x *FindAllShardsInKeyspaceRequest) Reset() { + *x = FindAllShardsInKeyspaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FindAllShardsInKeyspaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FindAllShardsInKeyspaceRequest) ProtoMessage() {} + +func (x *FindAllShardsInKeyspaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[60] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FindAllShardsInKeyspaceRequest.ProtoReflect.Descriptor instead. +func (*FindAllShardsInKeyspaceRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{60} +} + +func (x *FindAllShardsInKeyspaceRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +type FindAllShardsInKeyspaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Shards map[string]*Shard `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *FindAllShardsInKeyspaceResponse) Reset() { *x = FindAllShardsInKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[56] + mi := &file_vtctldata_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4253,7 +4607,7 @@ func (x *FindAllShardsInKeyspaceResponse) String() string { func (*FindAllShardsInKeyspaceResponse) ProtoMessage() {} func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[56] + mi := &file_vtctldata_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4266,7 +4620,7 @@ func (x *FindAllShardsInKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FindAllShardsInKeyspaceResponse.ProtoReflect.Descriptor instead. func (*FindAllShardsInKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{56} + return file_vtctldata_proto_rawDescGZIP(), []int{61} } func (x *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { @@ -4276,6 +4630,108 @@ func (x *FindAllShardsInKeyspaceResponse) GetShards() map[string]*Shard { return nil } +type ForceCutOverSchemaMigrationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` +} + +func (x *ForceCutOverSchemaMigrationRequest) Reset() { + *x = ForceCutOverSchemaMigrationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ForceCutOverSchemaMigrationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceCutOverSchemaMigrationRequest) ProtoMessage() {} + +func (x *ForceCutOverSchemaMigrationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[62] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceCutOverSchemaMigrationRequest.ProtoReflect.Descriptor instead. +func (*ForceCutOverSchemaMigrationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{62} +} + +func (x *ForceCutOverSchemaMigrationRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *ForceCutOverSchemaMigrationRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type ForceCutOverSchemaMigrationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RowsAffectedByShard map[string]uint64 `protobuf:"bytes,1,rep,name=rows_affected_by_shard,json=rowsAffectedByShard,proto3" json:"rows_affected_by_shard,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` +} + +func (x *ForceCutOverSchemaMigrationResponse) Reset() { + *x = ForceCutOverSchemaMigrationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ForceCutOverSchemaMigrationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceCutOverSchemaMigrationResponse) ProtoMessage() {} + +func (x *ForceCutOverSchemaMigrationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceCutOverSchemaMigrationResponse.ProtoReflect.Descriptor instead. +func (*ForceCutOverSchemaMigrationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{63} +} + +func (x *ForceCutOverSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { + if x != nil { + return x.RowsAffectedByShard + } + return nil +} + type GetBackupsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -4302,7 +4758,7 @@ type GetBackupsRequest struct { func (x *GetBackupsRequest) Reset() { *x = GetBackupsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[57] + mi := &file_vtctldata_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4315,7 +4771,7 @@ func (x *GetBackupsRequest) String() string { func (*GetBackupsRequest) ProtoMessage() {} func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[57] + mi := &file_vtctldata_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4328,7 +4784,7 @@ func (x *GetBackupsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupsRequest.ProtoReflect.Descriptor instead. func (*GetBackupsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{57} + return file_vtctldata_proto_rawDescGZIP(), []int{64} } func (x *GetBackupsRequest) GetKeyspace() string { @@ -4377,7 +4833,7 @@ type GetBackupsResponse struct { func (x *GetBackupsResponse) Reset() { *x = GetBackupsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[58] + mi := &file_vtctldata_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4390,7 +4846,7 @@ func (x *GetBackupsResponse) String() string { func (*GetBackupsResponse) ProtoMessage() {} func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[58] + mi := &file_vtctldata_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4403,7 +4859,7 @@ func (x *GetBackupsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupsResponse.ProtoReflect.Descriptor instead. func (*GetBackupsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{58} + return file_vtctldata_proto_rawDescGZIP(), []int{65} } func (x *GetBackupsResponse) GetBackups() []*mysqlctl.BackupInfo { @@ -4424,7 +4880,7 @@ type GetCellInfoRequest struct { func (x *GetCellInfoRequest) Reset() { *x = GetCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[59] + mi := &file_vtctldata_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4437,7 +4893,7 @@ func (x *GetCellInfoRequest) String() string { func (*GetCellInfoRequest) ProtoMessage() {} func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[59] + mi := &file_vtctldata_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4450,7 +4906,7 @@ func (x *GetCellInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellInfoRequest.ProtoReflect.Descriptor instead. func (*GetCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{59} + return file_vtctldata_proto_rawDescGZIP(), []int{66} } func (x *GetCellInfoRequest) GetCell() string { @@ -4471,7 +4927,7 @@ type GetCellInfoResponse struct { func (x *GetCellInfoResponse) Reset() { *x = GetCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[60] + mi := &file_vtctldata_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4484,7 +4940,7 @@ func (x *GetCellInfoResponse) String() string { func (*GetCellInfoResponse) ProtoMessage() {} func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[60] + mi := &file_vtctldata_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4497,7 +4953,7 @@ func (x *GetCellInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellInfoResponse.ProtoReflect.Descriptor instead. func (*GetCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{60} + return file_vtctldata_proto_rawDescGZIP(), []int{67} } func (x *GetCellInfoResponse) GetCellInfo() *topodata.CellInfo { @@ -4516,7 +4972,7 @@ type GetCellInfoNamesRequest struct { func (x *GetCellInfoNamesRequest) Reset() { *x = GetCellInfoNamesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[61] + mi := &file_vtctldata_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4529,7 +4985,7 @@ func (x *GetCellInfoNamesRequest) String() string { func (*GetCellInfoNamesRequest) ProtoMessage() {} func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[61] + mi := &file_vtctldata_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4542,7 +4998,7 @@ func (x *GetCellInfoNamesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellInfoNamesRequest.ProtoReflect.Descriptor instead. func (*GetCellInfoNamesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{61} + return file_vtctldata_proto_rawDescGZIP(), []int{68} } type GetCellInfoNamesResponse struct { @@ -4556,7 +5012,7 @@ type GetCellInfoNamesResponse struct { func (x *GetCellInfoNamesResponse) Reset() { *x = GetCellInfoNamesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[62] + mi := &file_vtctldata_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4569,7 +5025,7 @@ func (x *GetCellInfoNamesResponse) String() string { func (*GetCellInfoNamesResponse) ProtoMessage() {} func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[62] + mi := &file_vtctldata_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4582,7 +5038,7 @@ func (x *GetCellInfoNamesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellInfoNamesResponse.ProtoReflect.Descriptor instead. func (*GetCellInfoNamesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{62} + return file_vtctldata_proto_rawDescGZIP(), []int{69} } func (x *GetCellInfoNamesResponse) GetNames() []string { @@ -4601,7 +5057,7 @@ type GetCellsAliasesRequest struct { func (x *GetCellsAliasesRequest) Reset() { *x = GetCellsAliasesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[63] + mi := &file_vtctldata_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4614,7 +5070,7 @@ func (x *GetCellsAliasesRequest) String() string { func (*GetCellsAliasesRequest) ProtoMessage() {} func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[63] + mi := &file_vtctldata_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4627,7 +5083,7 @@ func (x *GetCellsAliasesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellsAliasesRequest.ProtoReflect.Descriptor instead. func (*GetCellsAliasesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{63} + return file_vtctldata_proto_rawDescGZIP(), []int{70} } type GetCellsAliasesResponse struct { @@ -4641,7 +5097,7 @@ type GetCellsAliasesResponse struct { func (x *GetCellsAliasesResponse) Reset() { *x = GetCellsAliasesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[64] + mi := &file_vtctldata_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4654,7 +5110,7 @@ func (x *GetCellsAliasesResponse) String() string { func (*GetCellsAliasesResponse) ProtoMessage() {} func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[64] + mi := &file_vtctldata_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4667,7 +5123,7 @@ func (x *GetCellsAliasesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetCellsAliasesResponse.ProtoReflect.Descriptor instead. func (*GetCellsAliasesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{64} + return file_vtctldata_proto_rawDescGZIP(), []int{71} } func (x *GetCellsAliasesResponse) GetAliases() map[string]*topodata.CellsAlias { @@ -4688,7 +5144,7 @@ type GetFullStatusRequest struct { func (x *GetFullStatusRequest) Reset() { *x = GetFullStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[65] + mi := &file_vtctldata_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4701,7 +5157,7 @@ func (x *GetFullStatusRequest) String() string { func (*GetFullStatusRequest) ProtoMessage() {} func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[65] + mi := &file_vtctldata_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4714,7 +5170,7 @@ func (x *GetFullStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFullStatusRequest.ProtoReflect.Descriptor instead. func (*GetFullStatusRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{65} + return file_vtctldata_proto_rawDescGZIP(), []int{72} } func (x *GetFullStatusRequest) GetTabletAlias() *topodata.TabletAlias { @@ -4735,7 +5191,7 @@ type GetFullStatusResponse struct { func (x *GetFullStatusResponse) Reset() { *x = GetFullStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[66] + mi := &file_vtctldata_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4748,7 +5204,7 @@ func (x *GetFullStatusResponse) String() string { func (*GetFullStatusResponse) ProtoMessage() {} func (x *GetFullStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[66] + mi := &file_vtctldata_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4761,7 +5217,7 @@ func (x *GetFullStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetFullStatusResponse.ProtoReflect.Descriptor instead. func (*GetFullStatusResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{66} + return file_vtctldata_proto_rawDescGZIP(), []int{73} } func (x *GetFullStatusResponse) GetStatus() *replicationdata.FullStatus { @@ -4780,7 +5236,7 @@ type GetKeyspacesRequest struct { func (x *GetKeyspacesRequest) Reset() { *x = GetKeyspacesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[67] + mi := &file_vtctldata_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4793,7 +5249,7 @@ func (x *GetKeyspacesRequest) String() string { func (*GetKeyspacesRequest) ProtoMessage() {} func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[67] + mi := &file_vtctldata_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4806,7 +5262,7 @@ func (x *GetKeyspacesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyspacesRequest.ProtoReflect.Descriptor instead. func (*GetKeyspacesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{67} + return file_vtctldata_proto_rawDescGZIP(), []int{74} } type GetKeyspacesResponse struct { @@ -4820,7 +5276,7 @@ type GetKeyspacesResponse struct { func (x *GetKeyspacesResponse) Reset() { *x = GetKeyspacesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[68] + mi := &file_vtctldata_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4833,7 +5289,7 @@ func (x *GetKeyspacesResponse) String() string { func (*GetKeyspacesResponse) ProtoMessage() {} func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[68] + mi := &file_vtctldata_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4846,7 +5302,7 @@ func (x *GetKeyspacesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyspacesResponse.ProtoReflect.Descriptor instead. func (*GetKeyspacesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{68} + return file_vtctldata_proto_rawDescGZIP(), []int{75} } func (x *GetKeyspacesResponse) GetKeyspaces() []*Keyspace { @@ -4867,7 +5323,7 @@ type GetKeyspaceRequest struct { func (x *GetKeyspaceRequest) Reset() { *x = GetKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[69] + mi := &file_vtctldata_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4880,7 +5336,7 @@ func (x *GetKeyspaceRequest) String() string { func (*GetKeyspaceRequest) ProtoMessage() {} func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[69] + mi := &file_vtctldata_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4893,7 +5349,7 @@ func (x *GetKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyspaceRequest.ProtoReflect.Descriptor instead. func (*GetKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{69} + return file_vtctldata_proto_rawDescGZIP(), []int{76} } func (x *GetKeyspaceRequest) GetKeyspace() string { @@ -4914,7 +5370,7 @@ type GetKeyspaceResponse struct { func (x *GetKeyspaceResponse) Reset() { *x = GetKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[70] + mi := &file_vtctldata_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4927,7 +5383,7 @@ func (x *GetKeyspaceResponse) String() string { func (*GetKeyspaceResponse) ProtoMessage() {} func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[70] + mi := &file_vtctldata_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4940,7 +5396,7 @@ func (x *GetKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetKeyspaceResponse.ProtoReflect.Descriptor instead. func (*GetKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{70} + return file_vtctldata_proto_rawDescGZIP(), []int{77} } func (x *GetKeyspaceResponse) GetKeyspace() *Keyspace { @@ -4961,7 +5417,7 @@ type GetPermissionsRequest struct { func (x *GetPermissionsRequest) Reset() { *x = GetPermissionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[71] + mi := &file_vtctldata_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4974,7 +5430,7 @@ func (x *GetPermissionsRequest) String() string { func (*GetPermissionsRequest) ProtoMessage() {} func (x *GetPermissionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[71] + mi := &file_vtctldata_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4987,7 +5443,7 @@ func (x *GetPermissionsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetPermissionsRequest.ProtoReflect.Descriptor instead. func (*GetPermissionsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{71} + return file_vtctldata_proto_rawDescGZIP(), []int{78} } func (x *GetPermissionsRequest) GetTabletAlias() *topodata.TabletAlias { @@ -5008,7 +5464,7 @@ type GetPermissionsResponse struct { func (x *GetPermissionsResponse) Reset() { *x = GetPermissionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[72] + mi := &file_vtctldata_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5021,7 +5477,7 @@ func (x *GetPermissionsResponse) String() string { func (*GetPermissionsResponse) ProtoMessage() {} func (x *GetPermissionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[72] + mi := &file_vtctldata_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5034,7 +5490,7 @@ func (x *GetPermissionsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetPermissionsResponse.ProtoReflect.Descriptor instead. func (*GetPermissionsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{72} + return file_vtctldata_proto_rawDescGZIP(), []int{79} } func (x *GetPermissionsResponse) GetPermissions() *tabletmanagerdata.Permissions { @@ -5044,6 +5500,91 @@ func (x *GetPermissionsResponse) GetPermissions() *tabletmanagerdata.Permissions return nil } +type GetKeyspaceRoutingRulesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetKeyspaceRoutingRulesRequest) Reset() { + *x = GetKeyspaceRoutingRulesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetKeyspaceRoutingRulesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetKeyspaceRoutingRulesRequest) ProtoMessage() {} + +func (x *GetKeyspaceRoutingRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[80] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetKeyspaceRoutingRulesRequest.ProtoReflect.Descriptor instead. +func (*GetKeyspaceRoutingRulesRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{80} +} + +type GetKeyspaceRoutingRulesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyspaceRoutingRules *vschema.KeyspaceRoutingRules `protobuf:"bytes,1,opt,name=keyspace_routing_rules,json=keyspaceRoutingRules,proto3" json:"keyspace_routing_rules,omitempty"` +} + +func (x *GetKeyspaceRoutingRulesResponse) Reset() { + *x = GetKeyspaceRoutingRulesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetKeyspaceRoutingRulesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetKeyspaceRoutingRulesResponse) ProtoMessage() {} + +func (x *GetKeyspaceRoutingRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[81] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetKeyspaceRoutingRulesResponse.ProtoReflect.Descriptor instead. +func (*GetKeyspaceRoutingRulesResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{81} +} + +func (x *GetKeyspaceRoutingRulesResponse) GetKeyspaceRoutingRules() *vschema.KeyspaceRoutingRules { + if x != nil { + return x.KeyspaceRoutingRules + } + return nil +} + type GetRoutingRulesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5053,7 +5594,7 @@ type GetRoutingRulesRequest struct { func (x *GetRoutingRulesRequest) Reset() { *x = GetRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[73] + mi := &file_vtctldata_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5066,7 +5607,7 @@ func (x *GetRoutingRulesRequest) String() string { func (*GetRoutingRulesRequest) ProtoMessage() {} func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[73] + mi := &file_vtctldata_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5079,7 +5620,7 @@ func (x *GetRoutingRulesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRoutingRulesRequest.ProtoReflect.Descriptor instead. func (*GetRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{73} + return file_vtctldata_proto_rawDescGZIP(), []int{82} } type GetRoutingRulesResponse struct { @@ -5093,7 +5634,7 @@ type GetRoutingRulesResponse struct { func (x *GetRoutingRulesResponse) Reset() { *x = GetRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[74] + mi := &file_vtctldata_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5106,7 +5647,7 @@ func (x *GetRoutingRulesResponse) String() string { func (*GetRoutingRulesResponse) ProtoMessage() {} func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[74] + mi := &file_vtctldata_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5119,7 +5660,7 @@ func (x *GetRoutingRulesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetRoutingRulesResponse.ProtoReflect.Descriptor instead. func (*GetRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{74} + return file_vtctldata_proto_rawDescGZIP(), []int{83} } func (x *GetRoutingRulesResponse) GetRoutingRules() *vschema.RoutingRules { @@ -5158,7 +5699,7 @@ type GetSchemaRequest struct { func (x *GetSchemaRequest) Reset() { *x = GetSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[75] + mi := &file_vtctldata_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5171,7 +5712,7 @@ func (x *GetSchemaRequest) String() string { func (*GetSchemaRequest) ProtoMessage() {} func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[75] + mi := &file_vtctldata_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5184,7 +5725,7 @@ func (x *GetSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSchemaRequest.ProtoReflect.Descriptor instead. func (*GetSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{75} + return file_vtctldata_proto_rawDescGZIP(), []int{84} } func (x *GetSchemaRequest) GetTabletAlias() *topodata.TabletAlias { @@ -5247,7 +5788,7 @@ type GetSchemaResponse struct { func (x *GetSchemaResponse) Reset() { *x = GetSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[76] + mi := &file_vtctldata_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5260,7 +5801,7 @@ func (x *GetSchemaResponse) String() string { func (*GetSchemaResponse) ProtoMessage() {} func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[76] + mi := &file_vtctldata_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5273,7 +5814,7 @@ func (x *GetSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSchemaResponse.ProtoReflect.Descriptor instead. func (*GetSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{76} + return file_vtctldata_proto_rawDescGZIP(), []int{85} } func (x *GetSchemaResponse) GetSchema() *tabletmanagerdata.SchemaDefinition { @@ -5319,7 +5860,7 @@ type GetSchemaMigrationsRequest struct { func (x *GetSchemaMigrationsRequest) Reset() { *x = GetSchemaMigrationsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[77] + mi := &file_vtctldata_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5332,7 +5873,7 @@ func (x *GetSchemaMigrationsRequest) String() string { func (*GetSchemaMigrationsRequest) ProtoMessage() {} func (x *GetSchemaMigrationsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[77] + mi := &file_vtctldata_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5345,7 +5886,7 @@ func (x *GetSchemaMigrationsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSchemaMigrationsRequest.ProtoReflect.Descriptor instead. func (*GetSchemaMigrationsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{77} + return file_vtctldata_proto_rawDescGZIP(), []int{86} } func (x *GetSchemaMigrationsRequest) GetKeyspace() string { @@ -5415,7 +5956,7 @@ type GetSchemaMigrationsResponse struct { func (x *GetSchemaMigrationsResponse) Reset() { *x = GetSchemaMigrationsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[78] + mi := &file_vtctldata_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5428,7 +5969,7 @@ func (x *GetSchemaMigrationsResponse) String() string { func (*GetSchemaMigrationsResponse) ProtoMessage() {} func (x *GetSchemaMigrationsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[78] + mi := &file_vtctldata_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5441,7 +5982,7 @@ func (x *GetSchemaMigrationsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSchemaMigrationsResponse.ProtoReflect.Descriptor instead. func (*GetSchemaMigrationsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{78} + return file_vtctldata_proto_rawDescGZIP(), []int{87} } func (x *GetSchemaMigrationsResponse) GetMigrations() []*SchemaMigration { @@ -5451,6 +5992,118 @@ func (x *GetSchemaMigrationsResponse) GetMigrations() []*SchemaMigration { return nil } +type GetShardReplicationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard,proto3" json:"shard,omitempty"` + // Cells is the list of cells to fetch data for. Omit to fetch data from all + // cells. + Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` +} + +func (x *GetShardReplicationRequest) Reset() { + *x = GetShardReplicationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetShardReplicationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetShardReplicationRequest) ProtoMessage() {} + +func (x *GetShardReplicationRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[88] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetShardReplicationRequest.ProtoReflect.Descriptor instead. +func (*GetShardReplicationRequest) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{88} +} + +func (x *GetShardReplicationRequest) GetKeyspace() string { + if x != nil { + return x.Keyspace + } + return "" +} + +func (x *GetShardReplicationRequest) GetShard() string { + if x != nil { + return x.Shard + } + return "" +} + +func (x *GetShardReplicationRequest) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +type GetShardReplicationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ShardReplicationByCell map[string]*topodata.ShardReplication `protobuf:"bytes,1,rep,name=shard_replication_by_cell,json=shardReplicationByCell,proto3" json:"shard_replication_by_cell,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetShardReplicationResponse) Reset() { + *x = GetShardReplicationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetShardReplicationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetShardReplicationResponse) ProtoMessage() {} + +func (x *GetShardReplicationResponse) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[89] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetShardReplicationResponse.ProtoReflect.Descriptor instead. +func (*GetShardReplicationResponse) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{89} +} + +func (x *GetShardReplicationResponse) GetShardReplicationByCell() map[string]*topodata.ShardReplication { + if x != nil { + return x.ShardReplicationByCell + } + return nil +} + type GetShardRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -5463,7 +6116,7 @@ type GetShardRequest struct { func (x *GetShardRequest) Reset() { *x = GetShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[79] + mi := &file_vtctldata_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5476,7 +6129,7 @@ func (x *GetShardRequest) String() string { func (*GetShardRequest) ProtoMessage() {} func (x *GetShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[79] + mi := &file_vtctldata_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5489,7 +6142,7 @@ func (x *GetShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetShardRequest.ProtoReflect.Descriptor instead. func (*GetShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{79} + return file_vtctldata_proto_rawDescGZIP(), []int{90} } func (x *GetShardRequest) GetKeyspace() string { @@ -5517,7 +6170,7 @@ type GetShardResponse struct { func (x *GetShardResponse) Reset() { *x = GetShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[80] + mi := &file_vtctldata_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5530,7 +6183,7 @@ func (x *GetShardResponse) String() string { func (*GetShardResponse) ProtoMessage() {} func (x *GetShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[80] + mi := &file_vtctldata_proto_msgTypes[91] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5543,7 +6196,7 @@ func (x *GetShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetShardResponse.ProtoReflect.Descriptor instead. func (*GetShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{80} + return file_vtctldata_proto_rawDescGZIP(), []int{91} } func (x *GetShardResponse) GetShard() *Shard { @@ -5562,7 +6215,7 @@ type GetShardRoutingRulesRequest struct { func (x *GetShardRoutingRulesRequest) Reset() { *x = GetShardRoutingRulesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[81] + mi := &file_vtctldata_proto_msgTypes[92] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5575,7 +6228,7 @@ func (x *GetShardRoutingRulesRequest) String() string { func (*GetShardRoutingRulesRequest) ProtoMessage() {} func (x *GetShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[81] + mi := &file_vtctldata_proto_msgTypes[92] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5588,7 +6241,7 @@ func (x *GetShardRoutingRulesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetShardRoutingRulesRequest.ProtoReflect.Descriptor instead. func (*GetShardRoutingRulesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{81} + return file_vtctldata_proto_rawDescGZIP(), []int{92} } type GetShardRoutingRulesResponse struct { @@ -5602,7 +6255,7 @@ type GetShardRoutingRulesResponse struct { func (x *GetShardRoutingRulesResponse) Reset() { *x = GetShardRoutingRulesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[82] + mi := &file_vtctldata_proto_msgTypes[93] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5615,7 +6268,7 @@ func (x *GetShardRoutingRulesResponse) String() string { func (*GetShardRoutingRulesResponse) ProtoMessage() {} func (x *GetShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[82] + mi := &file_vtctldata_proto_msgTypes[93] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5628,7 +6281,7 @@ func (x *GetShardRoutingRulesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetShardRoutingRulesResponse.ProtoReflect.Descriptor instead. func (*GetShardRoutingRulesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{82} + return file_vtctldata_proto_rawDescGZIP(), []int{93} } func (x *GetShardRoutingRulesResponse) GetShardRoutingRules() *vschema.ShardRoutingRules { @@ -5649,7 +6302,7 @@ type GetSrvKeyspaceNamesRequest struct { func (x *GetSrvKeyspaceNamesRequest) Reset() { *x = GetSrvKeyspaceNamesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[83] + mi := &file_vtctldata_proto_msgTypes[94] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5662,7 +6315,7 @@ func (x *GetSrvKeyspaceNamesRequest) String() string { func (*GetSrvKeyspaceNamesRequest) ProtoMessage() {} func (x *GetSrvKeyspaceNamesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[83] + mi := &file_vtctldata_proto_msgTypes[94] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5675,7 +6328,7 @@ func (x *GetSrvKeyspaceNamesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvKeyspaceNamesRequest.ProtoReflect.Descriptor instead. func (*GetSrvKeyspaceNamesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{83} + return file_vtctldata_proto_rawDescGZIP(), []int{94} } func (x *GetSrvKeyspaceNamesRequest) GetCells() []string { @@ -5697,7 +6350,7 @@ type GetSrvKeyspaceNamesResponse struct { func (x *GetSrvKeyspaceNamesResponse) Reset() { *x = GetSrvKeyspaceNamesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[84] + mi := &file_vtctldata_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5710,7 +6363,7 @@ func (x *GetSrvKeyspaceNamesResponse) String() string { func (*GetSrvKeyspaceNamesResponse) ProtoMessage() {} func (x *GetSrvKeyspaceNamesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[84] + mi := &file_vtctldata_proto_msgTypes[95] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5723,7 +6376,7 @@ func (x *GetSrvKeyspaceNamesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvKeyspaceNamesResponse.ProtoReflect.Descriptor instead. func (*GetSrvKeyspaceNamesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{84} + return file_vtctldata_proto_rawDescGZIP(), []int{95} } func (x *GetSrvKeyspaceNamesResponse) GetNames() map[string]*GetSrvKeyspaceNamesResponse_NameList { @@ -5747,7 +6400,7 @@ type GetSrvKeyspacesRequest struct { func (x *GetSrvKeyspacesRequest) Reset() { *x = GetSrvKeyspacesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[85] + mi := &file_vtctldata_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5760,7 +6413,7 @@ func (x *GetSrvKeyspacesRequest) String() string { func (*GetSrvKeyspacesRequest) ProtoMessage() {} func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[85] + mi := &file_vtctldata_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5773,7 +6426,7 @@ func (x *GetSrvKeyspacesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvKeyspacesRequest.ProtoReflect.Descriptor instead. func (*GetSrvKeyspacesRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{85} + return file_vtctldata_proto_rawDescGZIP(), []int{96} } func (x *GetSrvKeyspacesRequest) GetKeyspace() string { @@ -5802,7 +6455,7 @@ type GetSrvKeyspacesResponse struct { func (x *GetSrvKeyspacesResponse) Reset() { *x = GetSrvKeyspacesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[86] + mi := &file_vtctldata_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5815,7 +6468,7 @@ func (x *GetSrvKeyspacesResponse) String() string { func (*GetSrvKeyspacesResponse) ProtoMessage() {} func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[86] + mi := &file_vtctldata_proto_msgTypes[97] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5828,7 +6481,7 @@ func (x *GetSrvKeyspacesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvKeyspacesResponse.ProtoReflect.Descriptor instead. func (*GetSrvKeyspacesResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{86} + return file_vtctldata_proto_rawDescGZIP(), []int{97} } func (x *GetSrvKeyspacesResponse) GetSrvKeyspaces() map[string]*topodata.SrvKeyspace { @@ -5865,7 +6518,7 @@ type UpdateThrottlerConfigRequest struct { func (x *UpdateThrottlerConfigRequest) Reset() { *x = UpdateThrottlerConfigRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[87] + mi := &file_vtctldata_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5878,7 +6531,7 @@ func (x *UpdateThrottlerConfigRequest) String() string { func (*UpdateThrottlerConfigRequest) ProtoMessage() {} func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[87] + mi := &file_vtctldata_proto_msgTypes[98] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5891,7 +6544,7 @@ func (x *UpdateThrottlerConfigRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateThrottlerConfigRequest.ProtoReflect.Descriptor instead. func (*UpdateThrottlerConfigRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{87} + return file_vtctldata_proto_rawDescGZIP(), []int{98} } func (x *UpdateThrottlerConfigRequest) GetKeyspace() string { @@ -5966,7 +6619,7 @@ type UpdateThrottlerConfigResponse struct { func (x *UpdateThrottlerConfigResponse) Reset() { *x = UpdateThrottlerConfigResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[88] + mi := &file_vtctldata_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5979,7 +6632,7 @@ func (x *UpdateThrottlerConfigResponse) String() string { func (*UpdateThrottlerConfigResponse) ProtoMessage() {} func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[88] + mi := &file_vtctldata_proto_msgTypes[99] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5992,7 +6645,7 @@ func (x *UpdateThrottlerConfigResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateThrottlerConfigResponse.ProtoReflect.Descriptor instead. func (*UpdateThrottlerConfigResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{88} + return file_vtctldata_proto_rawDescGZIP(), []int{99} } type GetSrvVSchemaRequest struct { @@ -6006,7 +6659,7 @@ type GetSrvVSchemaRequest struct { func (x *GetSrvVSchemaRequest) Reset() { *x = GetSrvVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[89] + mi := &file_vtctldata_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6019,7 +6672,7 @@ func (x *GetSrvVSchemaRequest) String() string { func (*GetSrvVSchemaRequest) ProtoMessage() {} func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[89] + mi := &file_vtctldata_proto_msgTypes[100] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6032,7 +6685,7 @@ func (x *GetSrvVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemaRequest.ProtoReflect.Descriptor instead. func (*GetSrvVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{89} + return file_vtctldata_proto_rawDescGZIP(), []int{100} } func (x *GetSrvVSchemaRequest) GetCell() string { @@ -6053,7 +6706,7 @@ type GetSrvVSchemaResponse struct { func (x *GetSrvVSchemaResponse) Reset() { *x = GetSrvVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[90] + mi := &file_vtctldata_proto_msgTypes[101] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6066,7 +6719,7 @@ func (x *GetSrvVSchemaResponse) String() string { func (*GetSrvVSchemaResponse) ProtoMessage() {} func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[90] + mi := &file_vtctldata_proto_msgTypes[101] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6079,7 +6732,7 @@ func (x *GetSrvVSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemaResponse.ProtoReflect.Descriptor instead. func (*GetSrvVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{90} + return file_vtctldata_proto_rawDescGZIP(), []int{101} } func (x *GetSrvVSchemaResponse) GetSrvVSchema() *vschema.SrvVSchema { @@ -6100,7 +6753,7 @@ type GetSrvVSchemasRequest struct { func (x *GetSrvVSchemasRequest) Reset() { *x = GetSrvVSchemasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[91] + mi := &file_vtctldata_proto_msgTypes[102] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6113,7 +6766,7 @@ func (x *GetSrvVSchemasRequest) String() string { func (*GetSrvVSchemasRequest) ProtoMessage() {} func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[91] + mi := &file_vtctldata_proto_msgTypes[102] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6126,7 +6779,7 @@ func (x *GetSrvVSchemasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemasRequest.ProtoReflect.Descriptor instead. func (*GetSrvVSchemasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{91} + return file_vtctldata_proto_rawDescGZIP(), []int{102} } func (x *GetSrvVSchemasRequest) GetCells() []string { @@ -6148,7 +6801,7 @@ type GetSrvVSchemasResponse struct { func (x *GetSrvVSchemasResponse) Reset() { *x = GetSrvVSchemasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[92] + mi := &file_vtctldata_proto_msgTypes[103] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6161,7 +6814,7 @@ func (x *GetSrvVSchemasResponse) String() string { func (*GetSrvVSchemasResponse) ProtoMessage() {} func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[92] + mi := &file_vtctldata_proto_msgTypes[103] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6174,7 +6827,7 @@ func (x *GetSrvVSchemasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSrvVSchemasResponse.ProtoReflect.Descriptor instead. func (*GetSrvVSchemasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{92} + return file_vtctldata_proto_rawDescGZIP(), []int{103} } func (x *GetSrvVSchemasResponse) GetSrvVSchemas() map[string]*vschema.SrvVSchema { @@ -6195,7 +6848,7 @@ type GetTabletRequest struct { func (x *GetTabletRequest) Reset() { *x = GetTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[93] + mi := &file_vtctldata_proto_msgTypes[104] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6208,7 +6861,7 @@ func (x *GetTabletRequest) String() string { func (*GetTabletRequest) ProtoMessage() {} func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[93] + mi := &file_vtctldata_proto_msgTypes[104] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6221,7 +6874,7 @@ func (x *GetTabletRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletRequest.ProtoReflect.Descriptor instead. func (*GetTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{93} + return file_vtctldata_proto_rawDescGZIP(), []int{104} } func (x *GetTabletRequest) GetTabletAlias() *topodata.TabletAlias { @@ -6242,7 +6895,7 @@ type GetTabletResponse struct { func (x *GetTabletResponse) Reset() { *x = GetTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[94] + mi := &file_vtctldata_proto_msgTypes[105] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6255,7 +6908,7 @@ func (x *GetTabletResponse) String() string { func (*GetTabletResponse) ProtoMessage() {} func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[94] + mi := &file_vtctldata_proto_msgTypes[105] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6268,7 +6921,7 @@ func (x *GetTabletResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletResponse.ProtoReflect.Descriptor instead. func (*GetTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{94} + return file_vtctldata_proto_rawDescGZIP(), []int{105} } func (x *GetTabletResponse) GetTablet() *topodata.Tablet { @@ -6310,7 +6963,7 @@ type GetTabletsRequest struct { func (x *GetTabletsRequest) Reset() { *x = GetTabletsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[95] + mi := &file_vtctldata_proto_msgTypes[106] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6323,7 +6976,7 @@ func (x *GetTabletsRequest) String() string { func (*GetTabletsRequest) ProtoMessage() {} func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[95] + mi := &file_vtctldata_proto_msgTypes[106] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6336,7 +6989,7 @@ func (x *GetTabletsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletsRequest.ProtoReflect.Descriptor instead. func (*GetTabletsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{95} + return file_vtctldata_proto_rawDescGZIP(), []int{106} } func (x *GetTabletsRequest) GetKeyspace() string { @@ -6392,7 +7045,7 @@ type GetTabletsResponse struct { func (x *GetTabletsResponse) Reset() { *x = GetTabletsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[96] + mi := &file_vtctldata_proto_msgTypes[107] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6405,7 +7058,7 @@ func (x *GetTabletsResponse) String() string { func (*GetTabletsResponse) ProtoMessage() {} func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[96] + mi := &file_vtctldata_proto_msgTypes[107] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6418,7 +7071,7 @@ func (x *GetTabletsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTabletsResponse.ProtoReflect.Descriptor instead. func (*GetTabletsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{96} + return file_vtctldata_proto_rawDescGZIP(), []int{107} } func (x *GetTabletsResponse) GetTablets() []*topodata.Tablet { @@ -6439,7 +7092,7 @@ type GetTopologyPathRequest struct { func (x *GetTopologyPathRequest) Reset() { *x = GetTopologyPathRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[97] + mi := &file_vtctldata_proto_msgTypes[108] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6452,7 +7105,7 @@ func (x *GetTopologyPathRequest) String() string { func (*GetTopologyPathRequest) ProtoMessage() {} func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[97] + mi := &file_vtctldata_proto_msgTypes[108] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6465,7 +7118,7 @@ func (x *GetTopologyPathRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTopologyPathRequest.ProtoReflect.Descriptor instead. func (*GetTopologyPathRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{97} + return file_vtctldata_proto_rawDescGZIP(), []int{108} } func (x *GetTopologyPathRequest) GetPath() string { @@ -6486,7 +7139,7 @@ type GetTopologyPathResponse struct { func (x *GetTopologyPathResponse) Reset() { *x = GetTopologyPathResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[98] + mi := &file_vtctldata_proto_msgTypes[109] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6499,7 +7152,7 @@ func (x *GetTopologyPathResponse) String() string { func (*GetTopologyPathResponse) ProtoMessage() {} func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[98] + mi := &file_vtctldata_proto_msgTypes[109] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6512,7 +7165,7 @@ func (x *GetTopologyPathResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTopologyPathResponse.ProtoReflect.Descriptor instead. func (*GetTopologyPathResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{98} + return file_vtctldata_proto_rawDescGZIP(), []int{109} } func (x *GetTopologyPathResponse) GetCell() *TopologyCell { @@ -6538,7 +7191,7 @@ type TopologyCell struct { func (x *TopologyCell) Reset() { *x = TopologyCell{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[99] + mi := &file_vtctldata_proto_msgTypes[110] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6551,7 +7204,7 @@ func (x *TopologyCell) String() string { func (*TopologyCell) ProtoMessage() {} func (x *TopologyCell) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[99] + mi := &file_vtctldata_proto_msgTypes[110] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6564,7 +7217,7 @@ func (x *TopologyCell) ProtoReflect() protoreflect.Message { // Deprecated: Use TopologyCell.ProtoReflect.Descriptor instead. func (*TopologyCell) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{99} + return file_vtctldata_proto_rawDescGZIP(), []int{110} } func (x *TopologyCell) GetName() string { @@ -6606,7 +7259,7 @@ type GetVSchemaRequest struct { func (x *GetVSchemaRequest) Reset() { *x = GetVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[100] + mi := &file_vtctldata_proto_msgTypes[111] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6619,7 +7272,7 @@ func (x *GetVSchemaRequest) String() string { func (*GetVSchemaRequest) ProtoMessage() {} func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[100] + mi := &file_vtctldata_proto_msgTypes[111] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6632,7 +7285,7 @@ func (x *GetVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVSchemaRequest.ProtoReflect.Descriptor instead. func (*GetVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{100} + return file_vtctldata_proto_rawDescGZIP(), []int{111} } func (x *GetVSchemaRequest) GetKeyspace() string { @@ -6653,7 +7306,7 @@ type GetVersionRequest struct { func (x *GetVersionRequest) Reset() { *x = GetVersionRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[101] + mi := &file_vtctldata_proto_msgTypes[112] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6666,7 +7319,7 @@ func (x *GetVersionRequest) String() string { func (*GetVersionRequest) ProtoMessage() {} func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[101] + mi := &file_vtctldata_proto_msgTypes[112] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6679,7 +7332,7 @@ func (x *GetVersionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVersionRequest.ProtoReflect.Descriptor instead. func (*GetVersionRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{101} + return file_vtctldata_proto_rawDescGZIP(), []int{112} } func (x *GetVersionRequest) GetTabletAlias() *topodata.TabletAlias { @@ -6700,7 +7353,7 @@ type GetVersionResponse struct { func (x *GetVersionResponse) Reset() { *x = GetVersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[102] + mi := &file_vtctldata_proto_msgTypes[113] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6713,7 +7366,7 @@ func (x *GetVersionResponse) String() string { func (*GetVersionResponse) ProtoMessage() {} func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[102] + mi := &file_vtctldata_proto_msgTypes[113] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6726,7 +7379,7 @@ func (x *GetVersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVersionResponse.ProtoReflect.Descriptor instead. func (*GetVersionResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{102} + return file_vtctldata_proto_rawDescGZIP(), []int{113} } func (x *GetVersionResponse) GetVersion() string { @@ -6747,7 +7400,7 @@ type GetVSchemaResponse struct { func (x *GetVSchemaResponse) Reset() { *x = GetVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[103] + mi := &file_vtctldata_proto_msgTypes[114] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6760,7 +7413,7 @@ func (x *GetVSchemaResponse) String() string { func (*GetVSchemaResponse) ProtoMessage() {} func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[103] + mi := &file_vtctldata_proto_msgTypes[114] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6773,7 +7426,7 @@ func (x *GetVSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetVSchemaResponse.ProtoReflect.Descriptor instead. func (*GetVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{103} + return file_vtctldata_proto_rawDescGZIP(), []int{114} } func (x *GetVSchemaResponse) GetVSchema() *vschema.Keyspace { @@ -6792,14 +7445,15 @@ type GetWorkflowsRequest struct { ActiveOnly bool `protobuf:"varint,2,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` NameOnly bool `protobuf:"varint,3,opt,name=name_only,json=nameOnly,proto3" json:"name_only,omitempty"` // If you only want a specific workflow then set this field. - Workflow string `protobuf:"bytes,4,opt,name=workflow,proto3" json:"workflow,omitempty"` - IncludeLogs bool `protobuf:"varint,5,opt,name=include_logs,json=includeLogs,proto3" json:"include_logs,omitempty"` + Workflow string `protobuf:"bytes,4,opt,name=workflow,proto3" json:"workflow,omitempty"` + IncludeLogs bool `protobuf:"varint,5,opt,name=include_logs,json=includeLogs,proto3" json:"include_logs,omitempty"` + Shards []string `protobuf:"bytes,6,rep,name=shards,proto3" json:"shards,omitempty"` } func (x *GetWorkflowsRequest) Reset() { *x = GetWorkflowsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[104] + mi := &file_vtctldata_proto_msgTypes[115] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6812,7 +7466,7 @@ func (x *GetWorkflowsRequest) String() string { func (*GetWorkflowsRequest) ProtoMessage() {} func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[104] + mi := &file_vtctldata_proto_msgTypes[115] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6825,7 +7479,7 @@ func (x *GetWorkflowsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetWorkflowsRequest.ProtoReflect.Descriptor instead. func (*GetWorkflowsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{104} + return file_vtctldata_proto_rawDescGZIP(), []int{115} } func (x *GetWorkflowsRequest) GetKeyspace() string { @@ -6863,6 +7517,13 @@ func (x *GetWorkflowsRequest) GetIncludeLogs() bool { return false } +func (x *GetWorkflowsRequest) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + type GetWorkflowsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -6874,7 +7535,7 @@ type GetWorkflowsResponse struct { func (x *GetWorkflowsResponse) Reset() { *x = GetWorkflowsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[105] + mi := &file_vtctldata_proto_msgTypes[116] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6887,7 +7548,7 @@ func (x *GetWorkflowsResponse) String() string { func (*GetWorkflowsResponse) ProtoMessage() {} func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[105] + mi := &file_vtctldata_proto_msgTypes[116] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6900,7 +7561,7 @@ func (x *GetWorkflowsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetWorkflowsResponse.ProtoReflect.Descriptor instead. func (*GetWorkflowsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{105} + return file_vtctldata_proto_rawDescGZIP(), []int{116} } func (x *GetWorkflowsResponse) GetWorkflows() []*Workflow { @@ -6925,7 +7586,7 @@ type InitShardPrimaryRequest struct { func (x *InitShardPrimaryRequest) Reset() { *x = InitShardPrimaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[106] + mi := &file_vtctldata_proto_msgTypes[117] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6938,7 +7599,7 @@ func (x *InitShardPrimaryRequest) String() string { func (*InitShardPrimaryRequest) ProtoMessage() {} func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[106] + mi := &file_vtctldata_proto_msgTypes[117] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6951,7 +7612,7 @@ func (x *InitShardPrimaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use InitShardPrimaryRequest.ProtoReflect.Descriptor instead. func (*InitShardPrimaryRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{106} + return file_vtctldata_proto_rawDescGZIP(), []int{117} } func (x *InitShardPrimaryRequest) GetKeyspace() string { @@ -7000,7 +7661,7 @@ type InitShardPrimaryResponse struct { func (x *InitShardPrimaryResponse) Reset() { *x = InitShardPrimaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[107] + mi := &file_vtctldata_proto_msgTypes[118] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7013,7 +7674,7 @@ func (x *InitShardPrimaryResponse) String() string { func (*InitShardPrimaryResponse) ProtoMessage() {} func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[107] + mi := &file_vtctldata_proto_msgTypes[118] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7026,7 +7687,7 @@ func (x *InitShardPrimaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use InitShardPrimaryResponse.ProtoReflect.Descriptor instead. func (*InitShardPrimaryResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{107} + return file_vtctldata_proto_rawDescGZIP(), []int{118} } func (x *InitShardPrimaryResponse) GetEvents() []*logutil.Event { @@ -7048,7 +7709,7 @@ type LaunchSchemaMigrationRequest struct { func (x *LaunchSchemaMigrationRequest) Reset() { *x = LaunchSchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[108] + mi := &file_vtctldata_proto_msgTypes[119] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7061,7 +7722,7 @@ func (x *LaunchSchemaMigrationRequest) String() string { func (*LaunchSchemaMigrationRequest) ProtoMessage() {} func (x *LaunchSchemaMigrationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[108] + mi := &file_vtctldata_proto_msgTypes[119] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7074,7 +7735,7 @@ func (x *LaunchSchemaMigrationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LaunchSchemaMigrationRequest.ProtoReflect.Descriptor instead. func (*LaunchSchemaMigrationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{108} + return file_vtctldata_proto_rawDescGZIP(), []int{119} } func (x *LaunchSchemaMigrationRequest) GetKeyspace() string { @@ -7102,7 +7763,7 @@ type LaunchSchemaMigrationResponse struct { func (x *LaunchSchemaMigrationResponse) Reset() { *x = LaunchSchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[109] + mi := &file_vtctldata_proto_msgTypes[120] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7115,7 +7776,7 @@ func (x *LaunchSchemaMigrationResponse) String() string { func (*LaunchSchemaMigrationResponse) ProtoMessage() {} func (x *LaunchSchemaMigrationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[109] + mi := &file_vtctldata_proto_msgTypes[120] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7128,7 +7789,7 @@ func (x *LaunchSchemaMigrationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LaunchSchemaMigrationResponse.ProtoReflect.Descriptor instead. func (*LaunchSchemaMigrationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{109} + return file_vtctldata_proto_rawDescGZIP(), []int{120} } func (x *LaunchSchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { @@ -7155,7 +7816,7 @@ type LookupVindexCreateRequest struct { func (x *LookupVindexCreateRequest) Reset() { *x = LookupVindexCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[110] + mi := &file_vtctldata_proto_msgTypes[121] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7168,7 +7829,7 @@ func (x *LookupVindexCreateRequest) String() string { func (*LookupVindexCreateRequest) ProtoMessage() {} func (x *LookupVindexCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[110] + mi := &file_vtctldata_proto_msgTypes[121] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7181,7 +7842,7 @@ func (x *LookupVindexCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LookupVindexCreateRequest.ProtoReflect.Descriptor instead. func (*LookupVindexCreateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{110} + return file_vtctldata_proto_rawDescGZIP(), []int{121} } func (x *LookupVindexCreateRequest) GetKeyspace() string { @@ -7242,7 +7903,7 @@ type LookupVindexCreateResponse struct { func (x *LookupVindexCreateResponse) Reset() { *x = LookupVindexCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[111] + mi := &file_vtctldata_proto_msgTypes[122] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7255,7 +7916,7 @@ func (x *LookupVindexCreateResponse) String() string { func (*LookupVindexCreateResponse) ProtoMessage() {} func (x *LookupVindexCreateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[111] + mi := &file_vtctldata_proto_msgTypes[122] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7268,7 +7929,7 @@ func (x *LookupVindexCreateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LookupVindexCreateResponse.ProtoReflect.Descriptor instead. func (*LookupVindexCreateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{111} + return file_vtctldata_proto_rawDescGZIP(), []int{122} } type LookupVindexExternalizeRequest struct { @@ -7287,7 +7948,7 @@ type LookupVindexExternalizeRequest struct { func (x *LookupVindexExternalizeRequest) Reset() { *x = LookupVindexExternalizeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[112] + mi := &file_vtctldata_proto_msgTypes[123] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7300,7 +7961,7 @@ func (x *LookupVindexExternalizeRequest) String() string { func (*LookupVindexExternalizeRequest) ProtoMessage() {} func (x *LookupVindexExternalizeRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[112] + mi := &file_vtctldata_proto_msgTypes[123] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7313,7 +7974,7 @@ func (x *LookupVindexExternalizeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use LookupVindexExternalizeRequest.ProtoReflect.Descriptor instead. func (*LookupVindexExternalizeRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{112} + return file_vtctldata_proto_rawDescGZIP(), []int{123} } func (x *LookupVindexExternalizeRequest) GetKeyspace() string { @@ -7349,7 +8010,7 @@ type LookupVindexExternalizeResponse struct { func (x *LookupVindexExternalizeResponse) Reset() { *x = LookupVindexExternalizeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[113] + mi := &file_vtctldata_proto_msgTypes[124] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7362,7 +8023,7 @@ func (x *LookupVindexExternalizeResponse) String() string { func (*LookupVindexExternalizeResponse) ProtoMessage() {} func (x *LookupVindexExternalizeResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[113] + mi := &file_vtctldata_proto_msgTypes[124] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7375,7 +8036,7 @@ func (x *LookupVindexExternalizeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use LookupVindexExternalizeResponse.ProtoReflect.Descriptor instead. func (*LookupVindexExternalizeResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{113} + return file_vtctldata_proto_rawDescGZIP(), []int{124} } func (x *LookupVindexExternalizeResponse) GetWorkflowDeleted() bool { @@ -7396,7 +8057,7 @@ type MaterializeCreateRequest struct { func (x *MaterializeCreateRequest) Reset() { *x = MaterializeCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[114] + mi := &file_vtctldata_proto_msgTypes[125] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7409,7 +8070,7 @@ func (x *MaterializeCreateRequest) String() string { func (*MaterializeCreateRequest) ProtoMessage() {} func (x *MaterializeCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[114] + mi := &file_vtctldata_proto_msgTypes[125] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7422,7 +8083,7 @@ func (x *MaterializeCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MaterializeCreateRequest.ProtoReflect.Descriptor instead. func (*MaterializeCreateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{114} + return file_vtctldata_proto_rawDescGZIP(), []int{125} } func (x *MaterializeCreateRequest) GetSettings() *MaterializeSettings { @@ -7441,7 +8102,7 @@ type MaterializeCreateResponse struct { func (x *MaterializeCreateResponse) Reset() { *x = MaterializeCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[115] + mi := &file_vtctldata_proto_msgTypes[126] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7454,7 +8115,7 @@ func (x *MaterializeCreateResponse) String() string { func (*MaterializeCreateResponse) ProtoMessage() {} func (x *MaterializeCreateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[115] + mi := &file_vtctldata_proto_msgTypes[126] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7467,7 +8128,7 @@ func (x *MaterializeCreateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MaterializeCreateResponse.ProtoReflect.Descriptor instead. func (*MaterializeCreateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{115} + return file_vtctldata_proto_rawDescGZIP(), []int{126} } type MigrateCreateRequest struct { @@ -7506,7 +8167,7 @@ type MigrateCreateRequest struct { func (x *MigrateCreateRequest) Reset() { *x = MigrateCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[116] + mi := &file_vtctldata_proto_msgTypes[127] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7519,7 +8180,7 @@ func (x *MigrateCreateRequest) String() string { func (*MigrateCreateRequest) ProtoMessage() {} func (x *MigrateCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[116] + mi := &file_vtctldata_proto_msgTypes[127] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7532,7 +8193,7 @@ func (x *MigrateCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MigrateCreateRequest.ProtoReflect.Descriptor instead. func (*MigrateCreateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{116} + return file_vtctldata_proto_rawDescGZIP(), []int{127} } func (x *MigrateCreateRequest) GetWorkflow() string { @@ -7670,7 +8331,7 @@ type MigrateCompleteRequest struct { func (x *MigrateCompleteRequest) Reset() { *x = MigrateCompleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[117] + mi := &file_vtctldata_proto_msgTypes[128] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7683,7 +8344,7 @@ func (x *MigrateCompleteRequest) String() string { func (*MigrateCompleteRequest) ProtoMessage() {} func (x *MigrateCompleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[117] + mi := &file_vtctldata_proto_msgTypes[128] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7696,7 +8357,7 @@ func (x *MigrateCompleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MigrateCompleteRequest.ProtoReflect.Descriptor instead. func (*MigrateCompleteRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{117} + return file_vtctldata_proto_rawDescGZIP(), []int{128} } func (x *MigrateCompleteRequest) GetWorkflow() string { @@ -7753,7 +8414,7 @@ type MigrateCompleteResponse struct { func (x *MigrateCompleteResponse) Reset() { *x = MigrateCompleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[118] + mi := &file_vtctldata_proto_msgTypes[129] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7766,7 +8427,7 @@ func (x *MigrateCompleteResponse) String() string { func (*MigrateCompleteResponse) ProtoMessage() {} func (x *MigrateCompleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[118] + mi := &file_vtctldata_proto_msgTypes[129] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7779,7 +8440,7 @@ func (x *MigrateCompleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MigrateCompleteResponse.ProtoReflect.Descriptor instead. func (*MigrateCompleteResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{118} + return file_vtctldata_proto_rawDescGZIP(), []int{129} } func (x *MigrateCompleteResponse) GetSummary() string { @@ -7810,7 +8471,7 @@ type MountRegisterRequest struct { func (x *MountRegisterRequest) Reset() { *x = MountRegisterRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[119] + mi := &file_vtctldata_proto_msgTypes[130] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7823,7 +8484,7 @@ func (x *MountRegisterRequest) String() string { func (*MountRegisterRequest) ProtoMessage() {} func (x *MountRegisterRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[119] + mi := &file_vtctldata_proto_msgTypes[130] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7836,7 +8497,7 @@ func (x *MountRegisterRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MountRegisterRequest.ProtoReflect.Descriptor instead. func (*MountRegisterRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{119} + return file_vtctldata_proto_rawDescGZIP(), []int{130} } func (x *MountRegisterRequest) GetTopoType() string { @@ -7876,7 +8537,7 @@ type MountRegisterResponse struct { func (x *MountRegisterResponse) Reset() { *x = MountRegisterResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[120] + mi := &file_vtctldata_proto_msgTypes[131] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7889,7 +8550,7 @@ func (x *MountRegisterResponse) String() string { func (*MountRegisterResponse) ProtoMessage() {} func (x *MountRegisterResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[120] + mi := &file_vtctldata_proto_msgTypes[131] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7902,7 +8563,7 @@ func (x *MountRegisterResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MountRegisterResponse.ProtoReflect.Descriptor instead. func (*MountRegisterResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{120} + return file_vtctldata_proto_rawDescGZIP(), []int{131} } type MountUnregisterRequest struct { @@ -7916,7 +8577,7 @@ type MountUnregisterRequest struct { func (x *MountUnregisterRequest) Reset() { *x = MountUnregisterRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[121] + mi := &file_vtctldata_proto_msgTypes[132] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7929,7 +8590,7 @@ func (x *MountUnregisterRequest) String() string { func (*MountUnregisterRequest) ProtoMessage() {} func (x *MountUnregisterRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[121] + mi := &file_vtctldata_proto_msgTypes[132] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7942,7 +8603,7 @@ func (x *MountUnregisterRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MountUnregisterRequest.ProtoReflect.Descriptor instead. func (*MountUnregisterRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{121} + return file_vtctldata_proto_rawDescGZIP(), []int{132} } func (x *MountUnregisterRequest) GetName() string { @@ -7961,7 +8622,7 @@ type MountUnregisterResponse struct { func (x *MountUnregisterResponse) Reset() { *x = MountUnregisterResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[122] + mi := &file_vtctldata_proto_msgTypes[133] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -7974,7 +8635,7 @@ func (x *MountUnregisterResponse) String() string { func (*MountUnregisterResponse) ProtoMessage() {} func (x *MountUnregisterResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[122] + mi := &file_vtctldata_proto_msgTypes[133] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -7987,7 +8648,7 @@ func (x *MountUnregisterResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MountUnregisterResponse.ProtoReflect.Descriptor instead. func (*MountUnregisterResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{122} + return file_vtctldata_proto_rawDescGZIP(), []int{133} } type MountShowRequest struct { @@ -8001,7 +8662,7 @@ type MountShowRequest struct { func (x *MountShowRequest) Reset() { *x = MountShowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[123] + mi := &file_vtctldata_proto_msgTypes[134] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8014,7 +8675,7 @@ func (x *MountShowRequest) String() string { func (*MountShowRequest) ProtoMessage() {} func (x *MountShowRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[123] + mi := &file_vtctldata_proto_msgTypes[134] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8027,7 +8688,7 @@ func (x *MountShowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MountShowRequest.ProtoReflect.Descriptor instead. func (*MountShowRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{123} + return file_vtctldata_proto_rawDescGZIP(), []int{134} } func (x *MountShowRequest) GetName() string { @@ -8051,7 +8712,7 @@ type MountShowResponse struct { func (x *MountShowResponse) Reset() { *x = MountShowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[124] + mi := &file_vtctldata_proto_msgTypes[135] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8064,7 +8725,7 @@ func (x *MountShowResponse) String() string { func (*MountShowResponse) ProtoMessage() {} func (x *MountShowResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[124] + mi := &file_vtctldata_proto_msgTypes[135] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8077,7 +8738,7 @@ func (x *MountShowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MountShowResponse.ProtoReflect.Descriptor instead. func (*MountShowResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{124} + return file_vtctldata_proto_rawDescGZIP(), []int{135} } func (x *MountShowResponse) GetTopoType() string { @@ -8117,7 +8778,7 @@ type MountListRequest struct { func (x *MountListRequest) Reset() { *x = MountListRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[125] + mi := &file_vtctldata_proto_msgTypes[136] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8130,7 +8791,7 @@ func (x *MountListRequest) String() string { func (*MountListRequest) ProtoMessage() {} func (x *MountListRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[125] + mi := &file_vtctldata_proto_msgTypes[136] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8143,7 +8804,7 @@ func (x *MountListRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MountListRequest.ProtoReflect.Descriptor instead. func (*MountListRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{125} + return file_vtctldata_proto_rawDescGZIP(), []int{136} } type MountListResponse struct { @@ -8157,7 +8818,7 @@ type MountListResponse struct { func (x *MountListResponse) Reset() { *x = MountListResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[126] + mi := &file_vtctldata_proto_msgTypes[137] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8170,7 +8831,7 @@ func (x *MountListResponse) String() string { func (*MountListResponse) ProtoMessage() {} func (x *MountListResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[126] + mi := &file_vtctldata_proto_msgTypes[137] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8183,7 +8844,7 @@ func (x *MountListResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MountListResponse.ProtoReflect.Descriptor instead. func (*MountListResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{126} + return file_vtctldata_proto_rawDescGZIP(), []int{137} } func (x *MountListResponse) GetNames() []string { @@ -8227,13 +8888,14 @@ type MoveTablesCreateRequest struct { // NoRoutingRules is set to true if routing rules should not be created on the target when the workflow is created. NoRoutingRules bool `protobuf:"varint,18,opt,name=no_routing_rules,json=noRoutingRules,proto3" json:"no_routing_rules,omitempty"` // Run a single copy phase for the entire database. - AtomicCopy bool `protobuf:"varint,19,opt,name=atomic_copy,json=atomicCopy,proto3" json:"atomic_copy,omitempty"` + AtomicCopy bool `protobuf:"varint,19,opt,name=atomic_copy,json=atomicCopy,proto3" json:"atomic_copy,omitempty"` + WorkflowOptions *WorkflowOptions `protobuf:"bytes,20,opt,name=workflow_options,json=workflowOptions,proto3" json:"workflow_options,omitempty"` } func (x *MoveTablesCreateRequest) Reset() { *x = MoveTablesCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[127] + mi := &file_vtctldata_proto_msgTypes[138] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8246,7 +8908,7 @@ func (x *MoveTablesCreateRequest) String() string { func (*MoveTablesCreateRequest) ProtoMessage() {} func (x *MoveTablesCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[127] + mi := &file_vtctldata_proto_msgTypes[138] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8259,7 +8921,7 @@ func (x *MoveTablesCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveTablesCreateRequest.ProtoReflect.Descriptor instead. func (*MoveTablesCreateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{127} + return file_vtctldata_proto_rawDescGZIP(), []int{138} } func (x *MoveTablesCreateRequest) GetWorkflow() string { @@ -8395,6 +9057,13 @@ func (x *MoveTablesCreateRequest) GetAtomicCopy() bool { return false } +func (x *MoveTablesCreateRequest) GetWorkflowOptions() *WorkflowOptions { + if x != nil { + return x.WorkflowOptions + } + return nil +} + type MoveTablesCreateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -8407,7 +9076,7 @@ type MoveTablesCreateResponse struct { func (x *MoveTablesCreateResponse) Reset() { *x = MoveTablesCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[128] + mi := &file_vtctldata_proto_msgTypes[139] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8420,7 +9089,7 @@ func (x *MoveTablesCreateResponse) String() string { func (*MoveTablesCreateResponse) ProtoMessage() {} func (x *MoveTablesCreateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[128] + mi := &file_vtctldata_proto_msgTypes[139] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8433,7 +9102,7 @@ func (x *MoveTablesCreateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveTablesCreateResponse.ProtoReflect.Descriptor instead. func (*MoveTablesCreateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{128} + return file_vtctldata_proto_rawDescGZIP(), []int{139} } func (x *MoveTablesCreateResponse) GetSummary() string { @@ -8455,18 +9124,19 @@ type MoveTablesCompleteRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` - TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` - KeepData bool `protobuf:"varint,4,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` - KeepRoutingRules bool `protobuf:"varint,5,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` - RenameTables bool `protobuf:"varint,6,opt,name=rename_tables,json=renameTables,proto3" json:"rename_tables,omitempty"` - DryRun bool `protobuf:"varint,7,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"` + TargetKeyspace string `protobuf:"bytes,3,opt,name=target_keyspace,json=targetKeyspace,proto3" json:"target_keyspace,omitempty"` + KeepData bool `protobuf:"varint,4,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` + KeepRoutingRules bool `protobuf:"varint,5,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` + RenameTables bool `protobuf:"varint,6,opt,name=rename_tables,json=renameTables,proto3" json:"rename_tables,omitempty"` + DryRun bool `protobuf:"varint,7,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` + Shards []string `protobuf:"bytes,8,rep,name=shards,proto3" json:"shards,omitempty"` } func (x *MoveTablesCompleteRequest) Reset() { *x = MoveTablesCompleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[129] + mi := &file_vtctldata_proto_msgTypes[140] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8479,7 +9149,7 @@ func (x *MoveTablesCompleteRequest) String() string { func (*MoveTablesCompleteRequest) ProtoMessage() {} func (x *MoveTablesCompleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[129] + mi := &file_vtctldata_proto_msgTypes[140] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8492,7 +9162,7 @@ func (x *MoveTablesCompleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveTablesCompleteRequest.ProtoReflect.Descriptor instead. func (*MoveTablesCompleteRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{129} + return file_vtctldata_proto_rawDescGZIP(), []int{140} } func (x *MoveTablesCompleteRequest) GetWorkflow() string { @@ -8537,6 +9207,13 @@ func (x *MoveTablesCompleteRequest) GetDryRun() bool { return false } +func (x *MoveTablesCompleteRequest) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + type MoveTablesCompleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -8549,7 +9226,7 @@ type MoveTablesCompleteResponse struct { func (x *MoveTablesCompleteResponse) Reset() { *x = MoveTablesCompleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[130] + mi := &file_vtctldata_proto_msgTypes[141] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8562,7 +9239,7 @@ func (x *MoveTablesCompleteResponse) String() string { func (*MoveTablesCompleteResponse) ProtoMessage() {} func (x *MoveTablesCompleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[130] + mi := &file_vtctldata_proto_msgTypes[141] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8575,7 +9252,7 @@ func (x *MoveTablesCompleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MoveTablesCompleteResponse.ProtoReflect.Descriptor instead. func (*MoveTablesCompleteResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{130} + return file_vtctldata_proto_rawDescGZIP(), []int{141} } func (x *MoveTablesCompleteResponse) GetSummary() string { @@ -8603,7 +9280,7 @@ type PingTabletRequest struct { func (x *PingTabletRequest) Reset() { *x = PingTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[131] + mi := &file_vtctldata_proto_msgTypes[142] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8616,7 +9293,7 @@ func (x *PingTabletRequest) String() string { func (*PingTabletRequest) ProtoMessage() {} func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[131] + mi := &file_vtctldata_proto_msgTypes[142] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8629,7 +9306,7 @@ func (x *PingTabletRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PingTabletRequest.ProtoReflect.Descriptor instead. func (*PingTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{131} + return file_vtctldata_proto_rawDescGZIP(), []int{142} } func (x *PingTabletRequest) GetTabletAlias() *topodata.TabletAlias { @@ -8648,7 +9325,7 @@ type PingTabletResponse struct { func (x *PingTabletResponse) Reset() { *x = PingTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[132] + mi := &file_vtctldata_proto_msgTypes[143] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8661,7 +9338,7 @@ func (x *PingTabletResponse) String() string { func (*PingTabletResponse) ProtoMessage() {} func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[132] + mi := &file_vtctldata_proto_msgTypes[143] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8674,7 +9351,7 @@ func (x *PingTabletResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PingTabletResponse.ProtoReflect.Descriptor instead. func (*PingTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{132} + return file_vtctldata_proto_rawDescGZIP(), []int{143} } type PlannedReparentShardRequest struct { @@ -8704,12 +9381,16 @@ type PlannedReparentShardRequest struct { // WaitReplicasTimeout time to catch up before the reparent, and an additional // WaitReplicasTimeout time to catch up after the reparent. WaitReplicasTimeout *vttime.Duration `protobuf:"bytes,5,opt,name=wait_replicas_timeout,json=waitReplicasTimeout,proto3" json:"wait_replicas_timeout,omitempty"` + // TolerableReplicationLag is the amount of replication lag that is considered + // acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary. + // A value of 0 indicates that Vitess shouldn't consider the replication lag at all. + TolerableReplicationLag *vttime.Duration `protobuf:"bytes,6,opt,name=tolerable_replication_lag,json=tolerableReplicationLag,proto3" json:"tolerable_replication_lag,omitempty"` } func (x *PlannedReparentShardRequest) Reset() { *x = PlannedReparentShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[133] + mi := &file_vtctldata_proto_msgTypes[144] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8722,7 +9403,7 @@ func (x *PlannedReparentShardRequest) String() string { func (*PlannedReparentShardRequest) ProtoMessage() {} func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[133] + mi := &file_vtctldata_proto_msgTypes[144] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8735,7 +9416,7 @@ func (x *PlannedReparentShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PlannedReparentShardRequest.ProtoReflect.Descriptor instead. func (*PlannedReparentShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{133} + return file_vtctldata_proto_rawDescGZIP(), []int{144} } func (x *PlannedReparentShardRequest) GetKeyspace() string { @@ -8773,6 +9454,13 @@ func (x *PlannedReparentShardRequest) GetWaitReplicasTimeout() *vttime.Duration return nil } +func (x *PlannedReparentShardRequest) GetTolerableReplicationLag() *vttime.Duration { + if x != nil { + return x.TolerableReplicationLag + } + return nil +} + type PlannedReparentShardResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -8793,7 +9481,7 @@ type PlannedReparentShardResponse struct { func (x *PlannedReparentShardResponse) Reset() { *x = PlannedReparentShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[134] + mi := &file_vtctldata_proto_msgTypes[145] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8806,7 +9494,7 @@ func (x *PlannedReparentShardResponse) String() string { func (*PlannedReparentShardResponse) ProtoMessage() {} func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[134] + mi := &file_vtctldata_proto_msgTypes[145] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8819,7 +9507,7 @@ func (x *PlannedReparentShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PlannedReparentShardResponse.ProtoReflect.Descriptor instead. func (*PlannedReparentShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{134} + return file_vtctldata_proto_rawDescGZIP(), []int{145} } func (x *PlannedReparentShardResponse) GetKeyspace() string { @@ -8865,7 +9553,7 @@ type RebuildKeyspaceGraphRequest struct { func (x *RebuildKeyspaceGraphRequest) Reset() { *x = RebuildKeyspaceGraphRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[135] + mi := &file_vtctldata_proto_msgTypes[146] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8878,7 +9566,7 @@ func (x *RebuildKeyspaceGraphRequest) String() string { func (*RebuildKeyspaceGraphRequest) ProtoMessage() {} func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[135] + mi := &file_vtctldata_proto_msgTypes[146] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8891,7 +9579,7 @@ func (x *RebuildKeyspaceGraphRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RebuildKeyspaceGraphRequest.ProtoReflect.Descriptor instead. func (*RebuildKeyspaceGraphRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{135} + return file_vtctldata_proto_rawDescGZIP(), []int{146} } func (x *RebuildKeyspaceGraphRequest) GetKeyspace() string { @@ -8924,7 +9612,7 @@ type RebuildKeyspaceGraphResponse struct { func (x *RebuildKeyspaceGraphResponse) Reset() { *x = RebuildKeyspaceGraphResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[136] + mi := &file_vtctldata_proto_msgTypes[147] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8937,7 +9625,7 @@ func (x *RebuildKeyspaceGraphResponse) String() string { func (*RebuildKeyspaceGraphResponse) ProtoMessage() {} func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[136] + mi := &file_vtctldata_proto_msgTypes[147] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8950,7 +9638,7 @@ func (x *RebuildKeyspaceGraphResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RebuildKeyspaceGraphResponse.ProtoReflect.Descriptor instead. func (*RebuildKeyspaceGraphResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{136} + return file_vtctldata_proto_rawDescGZIP(), []int{147} } type RebuildVSchemaGraphRequest struct { @@ -8966,7 +9654,7 @@ type RebuildVSchemaGraphRequest struct { func (x *RebuildVSchemaGraphRequest) Reset() { *x = RebuildVSchemaGraphRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[137] + mi := &file_vtctldata_proto_msgTypes[148] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -8979,7 +9667,7 @@ func (x *RebuildVSchemaGraphRequest) String() string { func (*RebuildVSchemaGraphRequest) ProtoMessage() {} func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[137] + mi := &file_vtctldata_proto_msgTypes[148] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -8992,7 +9680,7 @@ func (x *RebuildVSchemaGraphRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RebuildVSchemaGraphRequest.ProtoReflect.Descriptor instead. func (*RebuildVSchemaGraphRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{137} + return file_vtctldata_proto_rawDescGZIP(), []int{148} } func (x *RebuildVSchemaGraphRequest) GetCells() []string { @@ -9011,7 +9699,7 @@ type RebuildVSchemaGraphResponse struct { func (x *RebuildVSchemaGraphResponse) Reset() { *x = RebuildVSchemaGraphResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[138] + mi := &file_vtctldata_proto_msgTypes[149] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9024,7 +9712,7 @@ func (x *RebuildVSchemaGraphResponse) String() string { func (*RebuildVSchemaGraphResponse) ProtoMessage() {} func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[138] + mi := &file_vtctldata_proto_msgTypes[149] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9037,7 +9725,7 @@ func (x *RebuildVSchemaGraphResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RebuildVSchemaGraphResponse.ProtoReflect.Descriptor instead. func (*RebuildVSchemaGraphResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{138} + return file_vtctldata_proto_rawDescGZIP(), []int{149} } type RefreshStateRequest struct { @@ -9051,7 +9739,7 @@ type RefreshStateRequest struct { func (x *RefreshStateRequest) Reset() { *x = RefreshStateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[139] + mi := &file_vtctldata_proto_msgTypes[150] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9064,7 +9752,7 @@ func (x *RefreshStateRequest) String() string { func (*RefreshStateRequest) ProtoMessage() {} func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[139] + mi := &file_vtctldata_proto_msgTypes[150] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9077,7 +9765,7 @@ func (x *RefreshStateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshStateRequest.ProtoReflect.Descriptor instead. func (*RefreshStateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{139} + return file_vtctldata_proto_rawDescGZIP(), []int{150} } func (x *RefreshStateRequest) GetTabletAlias() *topodata.TabletAlias { @@ -9096,7 +9784,7 @@ type RefreshStateResponse struct { func (x *RefreshStateResponse) Reset() { *x = RefreshStateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[140] + mi := &file_vtctldata_proto_msgTypes[151] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9109,7 +9797,7 @@ func (x *RefreshStateResponse) String() string { func (*RefreshStateResponse) ProtoMessage() {} func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[140] + mi := &file_vtctldata_proto_msgTypes[151] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9122,7 +9810,7 @@ func (x *RefreshStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshStateResponse.ProtoReflect.Descriptor instead. func (*RefreshStateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{140} + return file_vtctldata_proto_rawDescGZIP(), []int{151} } type RefreshStateByShardRequest struct { @@ -9138,7 +9826,7 @@ type RefreshStateByShardRequest struct { func (x *RefreshStateByShardRequest) Reset() { *x = RefreshStateByShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[141] + mi := &file_vtctldata_proto_msgTypes[152] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9151,7 +9839,7 @@ func (x *RefreshStateByShardRequest) String() string { func (*RefreshStateByShardRequest) ProtoMessage() {} func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[141] + mi := &file_vtctldata_proto_msgTypes[152] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9164,7 +9852,7 @@ func (x *RefreshStateByShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshStateByShardRequest.ProtoReflect.Descriptor instead. func (*RefreshStateByShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{141} + return file_vtctldata_proto_rawDescGZIP(), []int{152} } func (x *RefreshStateByShardRequest) GetKeyspace() string { @@ -9201,7 +9889,7 @@ type RefreshStateByShardResponse struct { func (x *RefreshStateByShardResponse) Reset() { *x = RefreshStateByShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[142] + mi := &file_vtctldata_proto_msgTypes[153] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9214,7 +9902,7 @@ func (x *RefreshStateByShardResponse) String() string { func (*RefreshStateByShardResponse) ProtoMessage() {} func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[142] + mi := &file_vtctldata_proto_msgTypes[153] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9227,7 +9915,7 @@ func (x *RefreshStateByShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RefreshStateByShardResponse.ProtoReflect.Descriptor instead. func (*RefreshStateByShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{142} + return file_vtctldata_proto_rawDescGZIP(), []int{153} } func (x *RefreshStateByShardResponse) GetIsPartialRefresh() bool { @@ -9255,7 +9943,7 @@ type ReloadSchemaRequest struct { func (x *ReloadSchemaRequest) Reset() { *x = ReloadSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[143] + mi := &file_vtctldata_proto_msgTypes[154] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9268,7 +9956,7 @@ func (x *ReloadSchemaRequest) String() string { func (*ReloadSchemaRequest) ProtoMessage() {} func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[143] + mi := &file_vtctldata_proto_msgTypes[154] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9281,7 +9969,7 @@ func (x *ReloadSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemaRequest.ProtoReflect.Descriptor instead. func (*ReloadSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{143} + return file_vtctldata_proto_rawDescGZIP(), []int{154} } func (x *ReloadSchemaRequest) GetTabletAlias() *topodata.TabletAlias { @@ -9300,7 +9988,7 @@ type ReloadSchemaResponse struct { func (x *ReloadSchemaResponse) Reset() { *x = ReloadSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[144] + mi := &file_vtctldata_proto_msgTypes[155] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9313,7 +10001,7 @@ func (x *ReloadSchemaResponse) String() string { func (*ReloadSchemaResponse) ProtoMessage() {} func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[144] + mi := &file_vtctldata_proto_msgTypes[155] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9326,7 +10014,7 @@ func (x *ReloadSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemaResponse.ProtoReflect.Descriptor instead. func (*ReloadSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{144} + return file_vtctldata_proto_rawDescGZIP(), []int{155} } type ReloadSchemaKeyspaceRequest struct { @@ -9340,13 +10028,13 @@ type ReloadSchemaKeyspaceRequest struct { // Concurrency is the global concurrency across all shards in the keyspace // (so, at most this many tablets will be reloaded across the keyspace at any // given point). - Concurrency uint32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Concurrency int32 `protobuf:"varint,4,opt,name=concurrency,proto3" json:"concurrency,omitempty"` } func (x *ReloadSchemaKeyspaceRequest) Reset() { *x = ReloadSchemaKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[145] + mi := &file_vtctldata_proto_msgTypes[156] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9359,7 +10047,7 @@ func (x *ReloadSchemaKeyspaceRequest) String() string { func (*ReloadSchemaKeyspaceRequest) ProtoMessage() {} func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[145] + mi := &file_vtctldata_proto_msgTypes[156] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9372,7 +10060,7 @@ func (x *ReloadSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. func (*ReloadSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{145} + return file_vtctldata_proto_rawDescGZIP(), []int{156} } func (x *ReloadSchemaKeyspaceRequest) GetKeyspace() string { @@ -9396,7 +10084,7 @@ func (x *ReloadSchemaKeyspaceRequest) GetIncludePrimary() bool { return false } -func (x *ReloadSchemaKeyspaceRequest) GetConcurrency() uint32 { +func (x *ReloadSchemaKeyspaceRequest) GetConcurrency() int32 { if x != nil { return x.Concurrency } @@ -9414,7 +10102,7 @@ type ReloadSchemaKeyspaceResponse struct { func (x *ReloadSchemaKeyspaceResponse) Reset() { *x = ReloadSchemaKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[146] + mi := &file_vtctldata_proto_msgTypes[157] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9427,7 +10115,7 @@ func (x *ReloadSchemaKeyspaceResponse) String() string { func (*ReloadSchemaKeyspaceResponse) ProtoMessage() {} func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[146] + mi := &file_vtctldata_proto_msgTypes[157] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9440,7 +10128,7 @@ func (x *ReloadSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. func (*ReloadSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{146} + return file_vtctldata_proto_rawDescGZIP(), []int{157} } func (x *ReloadSchemaKeyspaceResponse) GetEvents() []*logutil.Event { @@ -9460,13 +10148,13 @@ type ReloadSchemaShardRequest struct { WaitPosition string `protobuf:"bytes,3,opt,name=wait_position,json=waitPosition,proto3" json:"wait_position,omitempty"` IncludePrimary bool `protobuf:"varint,4,opt,name=include_primary,json=includePrimary,proto3" json:"include_primary,omitempty"` // Concurrency is the maximum number of tablets to reload at one time. - Concurrency uint32 `protobuf:"varint,5,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + Concurrency int32 `protobuf:"varint,5,opt,name=concurrency,proto3" json:"concurrency,omitempty"` } func (x *ReloadSchemaShardRequest) Reset() { *x = ReloadSchemaShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[147] + mi := &file_vtctldata_proto_msgTypes[158] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9479,7 +10167,7 @@ func (x *ReloadSchemaShardRequest) String() string { func (*ReloadSchemaShardRequest) ProtoMessage() {} func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[147] + mi := &file_vtctldata_proto_msgTypes[158] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9492,7 +10180,7 @@ func (x *ReloadSchemaShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemaShardRequest.ProtoReflect.Descriptor instead. func (*ReloadSchemaShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{147} + return file_vtctldata_proto_rawDescGZIP(), []int{158} } func (x *ReloadSchemaShardRequest) GetKeyspace() string { @@ -9523,7 +10211,7 @@ func (x *ReloadSchemaShardRequest) GetIncludePrimary() bool { return false } -func (x *ReloadSchemaShardRequest) GetConcurrency() uint32 { +func (x *ReloadSchemaShardRequest) GetConcurrency() int32 { if x != nil { return x.Concurrency } @@ -9541,7 +10229,7 @@ type ReloadSchemaShardResponse struct { func (x *ReloadSchemaShardResponse) Reset() { *x = ReloadSchemaShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[148] + mi := &file_vtctldata_proto_msgTypes[159] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9554,7 +10242,7 @@ func (x *ReloadSchemaShardResponse) String() string { func (*ReloadSchemaShardResponse) ProtoMessage() {} func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[148] + mi := &file_vtctldata_proto_msgTypes[159] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9567,7 +10255,7 @@ func (x *ReloadSchemaShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReloadSchemaShardResponse.ProtoReflect.Descriptor instead. func (*ReloadSchemaShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{148} + return file_vtctldata_proto_rawDescGZIP(), []int{159} } func (x *ReloadSchemaShardResponse) GetEvents() []*logutil.Event { @@ -9590,7 +10278,7 @@ type RemoveBackupRequest struct { func (x *RemoveBackupRequest) Reset() { *x = RemoveBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[149] + mi := &file_vtctldata_proto_msgTypes[160] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9603,7 +10291,7 @@ func (x *RemoveBackupRequest) String() string { func (*RemoveBackupRequest) ProtoMessage() {} func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[149] + mi := &file_vtctldata_proto_msgTypes[160] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9616,7 +10304,7 @@ func (x *RemoveBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveBackupRequest.ProtoReflect.Descriptor instead. func (*RemoveBackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{149} + return file_vtctldata_proto_rawDescGZIP(), []int{160} } func (x *RemoveBackupRequest) GetKeyspace() string { @@ -9649,7 +10337,7 @@ type RemoveBackupResponse struct { func (x *RemoveBackupResponse) Reset() { *x = RemoveBackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[150] + mi := &file_vtctldata_proto_msgTypes[161] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9662,7 +10350,7 @@ func (x *RemoveBackupResponse) String() string { func (*RemoveBackupResponse) ProtoMessage() {} func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[150] + mi := &file_vtctldata_proto_msgTypes[161] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9675,7 +10363,7 @@ func (x *RemoveBackupResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveBackupResponse.ProtoReflect.Descriptor instead. func (*RemoveBackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{150} + return file_vtctldata_proto_rawDescGZIP(), []int{161} } type RemoveKeyspaceCellRequest struct { @@ -9697,7 +10385,7 @@ type RemoveKeyspaceCellRequest struct { func (x *RemoveKeyspaceCellRequest) Reset() { *x = RemoveKeyspaceCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[151] + mi := &file_vtctldata_proto_msgTypes[162] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9710,7 +10398,7 @@ func (x *RemoveKeyspaceCellRequest) String() string { func (*RemoveKeyspaceCellRequest) ProtoMessage() {} func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[151] + mi := &file_vtctldata_proto_msgTypes[162] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9723,7 +10411,7 @@ func (x *RemoveKeyspaceCellRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveKeyspaceCellRequest.ProtoReflect.Descriptor instead. func (*RemoveKeyspaceCellRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{151} + return file_vtctldata_proto_rawDescGZIP(), []int{162} } func (x *RemoveKeyspaceCellRequest) GetKeyspace() string { @@ -9763,7 +10451,7 @@ type RemoveKeyspaceCellResponse struct { func (x *RemoveKeyspaceCellResponse) Reset() { *x = RemoveKeyspaceCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[152] + mi := &file_vtctldata_proto_msgTypes[163] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9776,7 +10464,7 @@ func (x *RemoveKeyspaceCellResponse) String() string { func (*RemoveKeyspaceCellResponse) ProtoMessage() {} func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[152] + mi := &file_vtctldata_proto_msgTypes[163] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9789,7 +10477,7 @@ func (x *RemoveKeyspaceCellResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveKeyspaceCellResponse.ProtoReflect.Descriptor instead. func (*RemoveKeyspaceCellResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{152} + return file_vtctldata_proto_rawDescGZIP(), []int{163} } type RemoveShardCellRequest struct { @@ -9812,7 +10500,7 @@ type RemoveShardCellRequest struct { func (x *RemoveShardCellRequest) Reset() { *x = RemoveShardCellRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[153] + mi := &file_vtctldata_proto_msgTypes[164] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9825,7 +10513,7 @@ func (x *RemoveShardCellRequest) String() string { func (*RemoveShardCellRequest) ProtoMessage() {} func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[153] + mi := &file_vtctldata_proto_msgTypes[164] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9838,7 +10526,7 @@ func (x *RemoveShardCellRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveShardCellRequest.ProtoReflect.Descriptor instead. func (*RemoveShardCellRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{153} + return file_vtctldata_proto_rawDescGZIP(), []int{164} } func (x *RemoveShardCellRequest) GetKeyspace() string { @@ -9885,7 +10573,7 @@ type RemoveShardCellResponse struct { func (x *RemoveShardCellResponse) Reset() { *x = RemoveShardCellResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[154] + mi := &file_vtctldata_proto_msgTypes[165] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9898,7 +10586,7 @@ func (x *RemoveShardCellResponse) String() string { func (*RemoveShardCellResponse) ProtoMessage() {} func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[154] + mi := &file_vtctldata_proto_msgTypes[165] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9911,7 +10599,7 @@ func (x *RemoveShardCellResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RemoveShardCellResponse.ProtoReflect.Descriptor instead. func (*RemoveShardCellResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{154} + return file_vtctldata_proto_rawDescGZIP(), []int{165} } type ReparentTabletRequest struct { @@ -9927,7 +10615,7 @@ type ReparentTabletRequest struct { func (x *ReparentTabletRequest) Reset() { *x = ReparentTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[155] + mi := &file_vtctldata_proto_msgTypes[166] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9940,7 +10628,7 @@ func (x *ReparentTabletRequest) String() string { func (*ReparentTabletRequest) ProtoMessage() {} func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[155] + mi := &file_vtctldata_proto_msgTypes[166] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -9953,7 +10641,7 @@ func (x *ReparentTabletRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReparentTabletRequest.ProtoReflect.Descriptor instead. func (*ReparentTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{155} + return file_vtctldata_proto_rawDescGZIP(), []int{166} } func (x *ReparentTabletRequest) GetTablet() *topodata.TabletAlias { @@ -9979,7 +10667,7 @@ type ReparentTabletResponse struct { func (x *ReparentTabletResponse) Reset() { *x = ReparentTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[156] + mi := &file_vtctldata_proto_msgTypes[167] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -9992,7 +10680,7 @@ func (x *ReparentTabletResponse) String() string { func (*ReparentTabletResponse) ProtoMessage() {} func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[156] + mi := &file_vtctldata_proto_msgTypes[167] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10005,7 +10693,7 @@ func (x *ReparentTabletResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReparentTabletResponse.ProtoReflect.Descriptor instead. func (*ReparentTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{156} + return file_vtctldata_proto_rawDescGZIP(), []int{167} } func (x *ReparentTabletResponse) GetKeyspace() string { @@ -10057,7 +10745,7 @@ type ReshardCreateRequest struct { func (x *ReshardCreateRequest) Reset() { *x = ReshardCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[157] + mi := &file_vtctldata_proto_msgTypes[168] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10070,7 +10758,7 @@ func (x *ReshardCreateRequest) String() string { func (*ReshardCreateRequest) ProtoMessage() {} func (x *ReshardCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[157] + mi := &file_vtctldata_proto_msgTypes[168] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10083,7 +10771,7 @@ func (x *ReshardCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReshardCreateRequest.ProtoReflect.Descriptor instead. func (*ReshardCreateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{157} + return file_vtctldata_proto_rawDescGZIP(), []int{168} } func (x *ReshardCreateRequest) GetWorkflow() string { @@ -10193,7 +10881,7 @@ type RestoreFromBackupRequest struct { func (x *RestoreFromBackupRequest) Reset() { *x = RestoreFromBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[158] + mi := &file_vtctldata_proto_msgTypes[169] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10206,7 +10894,7 @@ func (x *RestoreFromBackupRequest) String() string { func (*RestoreFromBackupRequest) ProtoMessage() {} func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[158] + mi := &file_vtctldata_proto_msgTypes[169] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10219,7 +10907,7 @@ func (x *RestoreFromBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RestoreFromBackupRequest.ProtoReflect.Descriptor instead. func (*RestoreFromBackupRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{158} + return file_vtctldata_proto_rawDescGZIP(), []int{169} } func (x *RestoreFromBackupRequest) GetTabletAlias() *topodata.TabletAlias { @@ -10272,7 +10960,7 @@ type RestoreFromBackupResponse struct { func (x *RestoreFromBackupResponse) Reset() { *x = RestoreFromBackupResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[159] + mi := &file_vtctldata_proto_msgTypes[170] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10285,7 +10973,7 @@ func (x *RestoreFromBackupResponse) String() string { func (*RestoreFromBackupResponse) ProtoMessage() {} func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[159] + mi := &file_vtctldata_proto_msgTypes[170] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10298,7 +10986,7 @@ func (x *RestoreFromBackupResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RestoreFromBackupResponse.ProtoReflect.Descriptor instead. func (*RestoreFromBackupResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{159} + return file_vtctldata_proto_rawDescGZIP(), []int{170} } func (x *RestoreFromBackupResponse) GetTabletAlias() *topodata.TabletAlias { @@ -10341,7 +11029,7 @@ type RetrySchemaMigrationRequest struct { func (x *RetrySchemaMigrationRequest) Reset() { *x = RetrySchemaMigrationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[160] + mi := &file_vtctldata_proto_msgTypes[171] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10354,7 +11042,7 @@ func (x *RetrySchemaMigrationRequest) String() string { func (*RetrySchemaMigrationRequest) ProtoMessage() {} func (x *RetrySchemaMigrationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[160] + mi := &file_vtctldata_proto_msgTypes[171] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10367,7 +11055,7 @@ func (x *RetrySchemaMigrationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RetrySchemaMigrationRequest.ProtoReflect.Descriptor instead. func (*RetrySchemaMigrationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{160} + return file_vtctldata_proto_rawDescGZIP(), []int{171} } func (x *RetrySchemaMigrationRequest) GetKeyspace() string { @@ -10395,7 +11083,7 @@ type RetrySchemaMigrationResponse struct { func (x *RetrySchemaMigrationResponse) Reset() { *x = RetrySchemaMigrationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[161] + mi := &file_vtctldata_proto_msgTypes[172] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10408,7 +11096,7 @@ func (x *RetrySchemaMigrationResponse) String() string { func (*RetrySchemaMigrationResponse) ProtoMessage() {} func (x *RetrySchemaMigrationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[161] + mi := &file_vtctldata_proto_msgTypes[172] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10421,7 +11109,7 @@ func (x *RetrySchemaMigrationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RetrySchemaMigrationResponse.ProtoReflect.Descriptor instead. func (*RetrySchemaMigrationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{161} + return file_vtctldata_proto_rawDescGZIP(), []int{172} } func (x *RetrySchemaMigrationResponse) GetRowsAffectedByShard() map[string]uint64 { @@ -10442,7 +11130,7 @@ type RunHealthCheckRequest struct { func (x *RunHealthCheckRequest) Reset() { *x = RunHealthCheckRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[162] + mi := &file_vtctldata_proto_msgTypes[173] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10455,7 +11143,7 @@ func (x *RunHealthCheckRequest) String() string { func (*RunHealthCheckRequest) ProtoMessage() {} func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[162] + mi := &file_vtctldata_proto_msgTypes[173] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10468,7 +11156,7 @@ func (x *RunHealthCheckRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RunHealthCheckRequest.ProtoReflect.Descriptor instead. func (*RunHealthCheckRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{162} + return file_vtctldata_proto_rawDescGZIP(), []int{173} } func (x *RunHealthCheckRequest) GetTabletAlias() *topodata.TabletAlias { @@ -10487,7 +11175,7 @@ type RunHealthCheckResponse struct { func (x *RunHealthCheckResponse) Reset() { *x = RunHealthCheckResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[163] + mi := &file_vtctldata_proto_msgTypes[174] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10500,7 +11188,7 @@ func (x *RunHealthCheckResponse) String() string { func (*RunHealthCheckResponse) ProtoMessage() {} func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[163] + mi := &file_vtctldata_proto_msgTypes[174] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10513,7 +11201,7 @@ func (x *RunHealthCheckResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RunHealthCheckResponse.ProtoReflect.Descriptor instead. func (*RunHealthCheckResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{163} + return file_vtctldata_proto_rawDescGZIP(), []int{174} } type SetKeyspaceDurabilityPolicyRequest struct { @@ -10528,7 +11216,7 @@ type SetKeyspaceDurabilityPolicyRequest struct { func (x *SetKeyspaceDurabilityPolicyRequest) Reset() { *x = SetKeyspaceDurabilityPolicyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[164] + mi := &file_vtctldata_proto_msgTypes[175] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10541,7 +11229,7 @@ func (x *SetKeyspaceDurabilityPolicyRequest) String() string { func (*SetKeyspaceDurabilityPolicyRequest) ProtoMessage() {} func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[164] + mi := &file_vtctldata_proto_msgTypes[175] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10554,7 +11242,7 @@ func (x *SetKeyspaceDurabilityPolicyRequest) ProtoReflect() protoreflect.Message // Deprecated: Use SetKeyspaceDurabilityPolicyRequest.ProtoReflect.Descriptor instead. func (*SetKeyspaceDurabilityPolicyRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{164} + return file_vtctldata_proto_rawDescGZIP(), []int{175} } func (x *SetKeyspaceDurabilityPolicyRequest) GetKeyspace() string { @@ -10583,7 +11271,7 @@ type SetKeyspaceDurabilityPolicyResponse struct { func (x *SetKeyspaceDurabilityPolicyResponse) Reset() { *x = SetKeyspaceDurabilityPolicyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[165] + mi := &file_vtctldata_proto_msgTypes[176] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10596,7 +11284,7 @@ func (x *SetKeyspaceDurabilityPolicyResponse) String() string { func (*SetKeyspaceDurabilityPolicyResponse) ProtoMessage() {} func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[165] + mi := &file_vtctldata_proto_msgTypes[176] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10609,7 +11297,7 @@ func (x *SetKeyspaceDurabilityPolicyResponse) ProtoReflect() protoreflect.Messag // Deprecated: Use SetKeyspaceDurabilityPolicyResponse.ProtoReflect.Descriptor instead. func (*SetKeyspaceDurabilityPolicyResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{165} + return file_vtctldata_proto_rawDescGZIP(), []int{176} } func (x *SetKeyspaceDurabilityPolicyResponse) GetKeyspace() *topodata.Keyspace { @@ -10619,159 +11307,32 @@ func (x *SetKeyspaceDurabilityPolicyResponse) GetKeyspace() *topodata.Keyspace { return nil } -type SetKeyspaceServedFromRequest struct { +type SetKeyspaceShardingInfoRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - TabletType topodata.TabletType `protobuf:"varint,2,opt,name=tablet_type,json=tabletType,proto3,enum=topodata.TabletType" json:"tablet_type,omitempty"` - Cells []string `protobuf:"bytes,3,rep,name=cells,proto3" json:"cells,omitempty"` - Remove bool `protobuf:"varint,4,opt,name=remove,proto3" json:"remove,omitempty"` - SourceKeyspace string `protobuf:"bytes,5,opt,name=source_keyspace,json=sourceKeyspace,proto3" json:"source_keyspace,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` } -func (x *SetKeyspaceServedFromRequest) Reset() { - *x = SetKeyspaceServedFromRequest{} +func (x *SetKeyspaceShardingInfoRequest) Reset() { + *x = SetKeyspaceShardingInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[166] + mi := &file_vtctldata_proto_msgTypes[177] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *SetKeyspaceServedFromRequest) String() string { +func (x *SetKeyspaceShardingInfoRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SetKeyspaceServedFromRequest) ProtoMessage() {} +func (*SetKeyspaceShardingInfoRequest) ProtoMessage() {} -func (x *SetKeyspaceServedFromRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[166] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetKeyspaceServedFromRequest.ProtoReflect.Descriptor instead. -func (*SetKeyspaceServedFromRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{166} -} - -func (x *SetKeyspaceServedFromRequest) GetKeyspace() string { - if x != nil { - return x.Keyspace - } - return "" -} - -func (x *SetKeyspaceServedFromRequest) GetTabletType() topodata.TabletType { - if x != nil { - return x.TabletType - } - return topodata.TabletType(0) -} - -func (x *SetKeyspaceServedFromRequest) GetCells() []string { - if x != nil { - return x.Cells - } - return nil -} - -func (x *SetKeyspaceServedFromRequest) GetRemove() bool { - if x != nil { - return x.Remove - } - return false -} - -func (x *SetKeyspaceServedFromRequest) GetSourceKeyspace() string { - if x != nil { - return x.SourceKeyspace - } - return "" -} - -type SetKeyspaceServedFromResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Keyspace is the updated keyspace record. - Keyspace *topodata.Keyspace `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` -} - -func (x *SetKeyspaceServedFromResponse) Reset() { - *x = SetKeyspaceServedFromResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[167] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetKeyspaceServedFromResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetKeyspaceServedFromResponse) ProtoMessage() {} - -func (x *SetKeyspaceServedFromResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[167] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetKeyspaceServedFromResponse.ProtoReflect.Descriptor instead. -func (*SetKeyspaceServedFromResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{167} -} - -func (x *SetKeyspaceServedFromResponse) GetKeyspace() *topodata.Keyspace { - if x != nil { - return x.Keyspace - } - return nil -} - -type SetKeyspaceShardingInfoRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` -} - -func (x *SetKeyspaceShardingInfoRequest) Reset() { - *x = SetKeyspaceShardingInfoRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[168] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetKeyspaceShardingInfoRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetKeyspaceShardingInfoRequest) ProtoMessage() {} - -func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[168] +func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[177] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10784,7 +11345,7 @@ func (x *SetKeyspaceShardingInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetKeyspaceShardingInfoRequest.ProtoReflect.Descriptor instead. func (*SetKeyspaceShardingInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{168} + return file_vtctldata_proto_rawDescGZIP(), []int{177} } func (x *SetKeyspaceShardingInfoRequest) GetKeyspace() string { @@ -10813,7 +11374,7 @@ type SetKeyspaceShardingInfoResponse struct { func (x *SetKeyspaceShardingInfoResponse) Reset() { *x = SetKeyspaceShardingInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[169] + mi := &file_vtctldata_proto_msgTypes[178] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10826,7 +11387,7 @@ func (x *SetKeyspaceShardingInfoResponse) String() string { func (*SetKeyspaceShardingInfoResponse) ProtoMessage() {} func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[169] + mi := &file_vtctldata_proto_msgTypes[178] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10839,7 +11400,7 @@ func (x *SetKeyspaceShardingInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetKeyspaceShardingInfoResponse.ProtoReflect.Descriptor instead. func (*SetKeyspaceShardingInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{169} + return file_vtctldata_proto_rawDescGZIP(), []int{178} } func (x *SetKeyspaceShardingInfoResponse) GetKeyspace() *topodata.Keyspace { @@ -10862,7 +11423,7 @@ type SetShardIsPrimaryServingRequest struct { func (x *SetShardIsPrimaryServingRequest) Reset() { *x = SetShardIsPrimaryServingRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[170] + mi := &file_vtctldata_proto_msgTypes[179] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10875,7 +11436,7 @@ func (x *SetShardIsPrimaryServingRequest) String() string { func (*SetShardIsPrimaryServingRequest) ProtoMessage() {} func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[170] + mi := &file_vtctldata_proto_msgTypes[179] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10888,7 +11449,7 @@ func (x *SetShardIsPrimaryServingRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetShardIsPrimaryServingRequest.ProtoReflect.Descriptor instead. func (*SetShardIsPrimaryServingRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{170} + return file_vtctldata_proto_rawDescGZIP(), []int{179} } func (x *SetShardIsPrimaryServingRequest) GetKeyspace() string { @@ -10924,7 +11485,7 @@ type SetShardIsPrimaryServingResponse struct { func (x *SetShardIsPrimaryServingResponse) Reset() { *x = SetShardIsPrimaryServingResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[171] + mi := &file_vtctldata_proto_msgTypes[180] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -10937,7 +11498,7 @@ func (x *SetShardIsPrimaryServingResponse) String() string { func (*SetShardIsPrimaryServingResponse) ProtoMessage() {} func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[171] + mi := &file_vtctldata_proto_msgTypes[180] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -10950,7 +11511,7 @@ func (x *SetShardIsPrimaryServingResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetShardIsPrimaryServingResponse.ProtoReflect.Descriptor instead. func (*SetShardIsPrimaryServingResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{171} + return file_vtctldata_proto_rawDescGZIP(), []int{180} } func (x *SetShardIsPrimaryServingResponse) GetShard() *topodata.Shard { @@ -10991,7 +11552,7 @@ type SetShardTabletControlRequest struct { func (x *SetShardTabletControlRequest) Reset() { *x = SetShardTabletControlRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[172] + mi := &file_vtctldata_proto_msgTypes[181] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11004,7 +11565,7 @@ func (x *SetShardTabletControlRequest) String() string { func (*SetShardTabletControlRequest) ProtoMessage() {} func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[172] + mi := &file_vtctldata_proto_msgTypes[181] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11017,7 +11578,7 @@ func (x *SetShardTabletControlRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetShardTabletControlRequest.ProtoReflect.Descriptor instead. func (*SetShardTabletControlRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{172} + return file_vtctldata_proto_rawDescGZIP(), []int{181} } func (x *SetShardTabletControlRequest) GetKeyspace() string { @@ -11081,7 +11642,7 @@ type SetShardTabletControlResponse struct { func (x *SetShardTabletControlResponse) Reset() { *x = SetShardTabletControlResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[173] + mi := &file_vtctldata_proto_msgTypes[182] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11094,7 +11655,7 @@ func (x *SetShardTabletControlResponse) String() string { func (*SetShardTabletControlResponse) ProtoMessage() {} func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[173] + mi := &file_vtctldata_proto_msgTypes[182] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11107,7 +11668,7 @@ func (x *SetShardTabletControlResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetShardTabletControlResponse.ProtoReflect.Descriptor instead. func (*SetShardTabletControlResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{173} + return file_vtctldata_proto_rawDescGZIP(), []int{182} } func (x *SetShardTabletControlResponse) GetShard() *topodata.Shard { @@ -11129,7 +11690,7 @@ type SetWritableRequest struct { func (x *SetWritableRequest) Reset() { *x = SetWritableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[174] + mi := &file_vtctldata_proto_msgTypes[183] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11142,7 +11703,7 @@ func (x *SetWritableRequest) String() string { func (*SetWritableRequest) ProtoMessage() {} func (x *SetWritableRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[174] + mi := &file_vtctldata_proto_msgTypes[183] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11155,7 +11716,7 @@ func (x *SetWritableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetWritableRequest.ProtoReflect.Descriptor instead. func (*SetWritableRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{174} + return file_vtctldata_proto_rawDescGZIP(), []int{183} } func (x *SetWritableRequest) GetTabletAlias() *topodata.TabletAlias { @@ -11181,7 +11742,7 @@ type SetWritableResponse struct { func (x *SetWritableResponse) Reset() { *x = SetWritableResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[175] + mi := &file_vtctldata_proto_msgTypes[184] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11194,7 +11755,7 @@ func (x *SetWritableResponse) String() string { func (*SetWritableResponse) ProtoMessage() {} func (x *SetWritableResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[175] + mi := &file_vtctldata_proto_msgTypes[184] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11207,7 +11768,7 @@ func (x *SetWritableResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SetWritableResponse.ProtoReflect.Descriptor instead. func (*SetWritableResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{175} + return file_vtctldata_proto_rawDescGZIP(), []int{184} } type ShardReplicationAddRequest struct { @@ -11223,7 +11784,7 @@ type ShardReplicationAddRequest struct { func (x *ShardReplicationAddRequest) Reset() { *x = ShardReplicationAddRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[176] + mi := &file_vtctldata_proto_msgTypes[185] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11236,7 +11797,7 @@ func (x *ShardReplicationAddRequest) String() string { func (*ShardReplicationAddRequest) ProtoMessage() {} func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[176] + mi := &file_vtctldata_proto_msgTypes[185] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11249,7 +11810,7 @@ func (x *ShardReplicationAddRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardReplicationAddRequest.ProtoReflect.Descriptor instead. func (*ShardReplicationAddRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{176} + return file_vtctldata_proto_rawDescGZIP(), []int{185} } func (x *ShardReplicationAddRequest) GetKeyspace() string { @@ -11282,7 +11843,7 @@ type ShardReplicationAddResponse struct { func (x *ShardReplicationAddResponse) Reset() { *x = ShardReplicationAddResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[177] + mi := &file_vtctldata_proto_msgTypes[186] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11295,7 +11856,7 @@ func (x *ShardReplicationAddResponse) String() string { func (*ShardReplicationAddResponse) ProtoMessage() {} func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[177] + mi := &file_vtctldata_proto_msgTypes[186] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11308,7 +11869,7 @@ func (x *ShardReplicationAddResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardReplicationAddResponse.ProtoReflect.Descriptor instead. func (*ShardReplicationAddResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{177} + return file_vtctldata_proto_rawDescGZIP(), []int{186} } type ShardReplicationFixRequest struct { @@ -11324,7 +11885,7 @@ type ShardReplicationFixRequest struct { func (x *ShardReplicationFixRequest) Reset() { *x = ShardReplicationFixRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[178] + mi := &file_vtctldata_proto_msgTypes[187] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11337,7 +11898,7 @@ func (x *ShardReplicationFixRequest) String() string { func (*ShardReplicationFixRequest) ProtoMessage() {} func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[178] + mi := &file_vtctldata_proto_msgTypes[187] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11350,7 +11911,7 @@ func (x *ShardReplicationFixRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardReplicationFixRequest.ProtoReflect.Descriptor instead. func (*ShardReplicationFixRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{178} + return file_vtctldata_proto_rawDescGZIP(), []int{187} } func (x *ShardReplicationFixRequest) GetKeyspace() string { @@ -11388,7 +11949,7 @@ type ShardReplicationFixResponse struct { func (x *ShardReplicationFixResponse) Reset() { *x = ShardReplicationFixResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[179] + mi := &file_vtctldata_proto_msgTypes[188] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11401,7 +11962,7 @@ func (x *ShardReplicationFixResponse) String() string { func (*ShardReplicationFixResponse) ProtoMessage() {} func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[179] + mi := &file_vtctldata_proto_msgTypes[188] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11414,7 +11975,7 @@ func (x *ShardReplicationFixResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardReplicationFixResponse.ProtoReflect.Descriptor instead. func (*ShardReplicationFixResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{179} + return file_vtctldata_proto_rawDescGZIP(), []int{188} } func (x *ShardReplicationFixResponse) GetError() *topodata.ShardReplicationError { @@ -11436,7 +11997,7 @@ type ShardReplicationPositionsRequest struct { func (x *ShardReplicationPositionsRequest) Reset() { *x = ShardReplicationPositionsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[180] + mi := &file_vtctldata_proto_msgTypes[189] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11449,7 +12010,7 @@ func (x *ShardReplicationPositionsRequest) String() string { func (*ShardReplicationPositionsRequest) ProtoMessage() {} func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[180] + mi := &file_vtctldata_proto_msgTypes[189] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11462,7 +12023,7 @@ func (x *ShardReplicationPositionsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardReplicationPositionsRequest.ProtoReflect.Descriptor instead. func (*ShardReplicationPositionsRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{180} + return file_vtctldata_proto_rawDescGZIP(), []int{189} } func (x *ShardReplicationPositionsRequest) GetKeyspace() string { @@ -11495,7 +12056,7 @@ type ShardReplicationPositionsResponse struct { func (x *ShardReplicationPositionsResponse) Reset() { *x = ShardReplicationPositionsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[181] + mi := &file_vtctldata_proto_msgTypes[190] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11508,7 +12069,7 @@ func (x *ShardReplicationPositionsResponse) String() string { func (*ShardReplicationPositionsResponse) ProtoMessage() {} func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[181] + mi := &file_vtctldata_proto_msgTypes[190] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11521,7 +12082,7 @@ func (x *ShardReplicationPositionsResponse) ProtoReflect() protoreflect.Message // Deprecated: Use ShardReplicationPositionsResponse.ProtoReflect.Descriptor instead. func (*ShardReplicationPositionsResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{181} + return file_vtctldata_proto_rawDescGZIP(), []int{190} } func (x *ShardReplicationPositionsResponse) GetReplicationStatuses() map[string]*replicationdata.Status { @@ -11551,7 +12112,7 @@ type ShardReplicationRemoveRequest struct { func (x *ShardReplicationRemoveRequest) Reset() { *x = ShardReplicationRemoveRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[182] + mi := &file_vtctldata_proto_msgTypes[191] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11564,7 +12125,7 @@ func (x *ShardReplicationRemoveRequest) String() string { func (*ShardReplicationRemoveRequest) ProtoMessage() {} func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[182] + mi := &file_vtctldata_proto_msgTypes[191] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11577,7 +12138,7 @@ func (x *ShardReplicationRemoveRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardReplicationRemoveRequest.ProtoReflect.Descriptor instead. func (*ShardReplicationRemoveRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{182} + return file_vtctldata_proto_rawDescGZIP(), []int{191} } func (x *ShardReplicationRemoveRequest) GetKeyspace() string { @@ -11610,7 +12171,7 @@ type ShardReplicationRemoveResponse struct { func (x *ShardReplicationRemoveResponse) Reset() { *x = ShardReplicationRemoveResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[183] + mi := &file_vtctldata_proto_msgTypes[192] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11623,7 +12184,7 @@ func (x *ShardReplicationRemoveResponse) String() string { func (*ShardReplicationRemoveResponse) ProtoMessage() {} func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[183] + mi := &file_vtctldata_proto_msgTypes[192] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11636,7 +12197,7 @@ func (x *ShardReplicationRemoveResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ShardReplicationRemoveResponse.ProtoReflect.Descriptor instead. func (*ShardReplicationRemoveResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{183} + return file_vtctldata_proto_rawDescGZIP(), []int{192} } type SleepTabletRequest struct { @@ -11651,7 +12212,7 @@ type SleepTabletRequest struct { func (x *SleepTabletRequest) Reset() { *x = SleepTabletRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[184] + mi := &file_vtctldata_proto_msgTypes[193] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11664,7 +12225,7 @@ func (x *SleepTabletRequest) String() string { func (*SleepTabletRequest) ProtoMessage() {} func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[184] + mi := &file_vtctldata_proto_msgTypes[193] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11677,7 +12238,7 @@ func (x *SleepTabletRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SleepTabletRequest.ProtoReflect.Descriptor instead. func (*SleepTabletRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{184} + return file_vtctldata_proto_rawDescGZIP(), []int{193} } func (x *SleepTabletRequest) GetTabletAlias() *topodata.TabletAlias { @@ -11703,7 +12264,7 @@ type SleepTabletResponse struct { func (x *SleepTabletResponse) Reset() { *x = SleepTabletResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[185] + mi := &file_vtctldata_proto_msgTypes[194] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11716,7 +12277,7 @@ func (x *SleepTabletResponse) String() string { func (*SleepTabletResponse) ProtoMessage() {} func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[185] + mi := &file_vtctldata_proto_msgTypes[194] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11729,7 +12290,7 @@ func (x *SleepTabletResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SleepTabletResponse.ProtoReflect.Descriptor instead. func (*SleepTabletResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{185} + return file_vtctldata_proto_rawDescGZIP(), []int{194} } type SourceShardAddRequest struct { @@ -11753,7 +12314,7 @@ type SourceShardAddRequest struct { func (x *SourceShardAddRequest) Reset() { *x = SourceShardAddRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[186] + mi := &file_vtctldata_proto_msgTypes[195] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11766,7 +12327,7 @@ func (x *SourceShardAddRequest) String() string { func (*SourceShardAddRequest) ProtoMessage() {} func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[186] + mi := &file_vtctldata_proto_msgTypes[195] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11779,7 +12340,7 @@ func (x *SourceShardAddRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceShardAddRequest.ProtoReflect.Descriptor instead. func (*SourceShardAddRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{186} + return file_vtctldata_proto_rawDescGZIP(), []int{195} } func (x *SourceShardAddRequest) GetKeyspace() string { @@ -11843,7 +12404,7 @@ type SourceShardAddResponse struct { func (x *SourceShardAddResponse) Reset() { *x = SourceShardAddResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[187] + mi := &file_vtctldata_proto_msgTypes[196] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11856,7 +12417,7 @@ func (x *SourceShardAddResponse) String() string { func (*SourceShardAddResponse) ProtoMessage() {} func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[187] + mi := &file_vtctldata_proto_msgTypes[196] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11869,7 +12430,7 @@ func (x *SourceShardAddResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceShardAddResponse.ProtoReflect.Descriptor instead. func (*SourceShardAddResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{187} + return file_vtctldata_proto_rawDescGZIP(), []int{196} } func (x *SourceShardAddResponse) GetShard() *topodata.Shard { @@ -11892,7 +12453,7 @@ type SourceShardDeleteRequest struct { func (x *SourceShardDeleteRequest) Reset() { *x = SourceShardDeleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[188] + mi := &file_vtctldata_proto_msgTypes[197] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11905,7 +12466,7 @@ func (x *SourceShardDeleteRequest) String() string { func (*SourceShardDeleteRequest) ProtoMessage() {} func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[188] + mi := &file_vtctldata_proto_msgTypes[197] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11918,7 +12479,7 @@ func (x *SourceShardDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceShardDeleteRequest.ProtoReflect.Descriptor instead. func (*SourceShardDeleteRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{188} + return file_vtctldata_proto_rawDescGZIP(), []int{197} } func (x *SourceShardDeleteRequest) GetKeyspace() string { @@ -11954,7 +12515,7 @@ type SourceShardDeleteResponse struct { func (x *SourceShardDeleteResponse) Reset() { *x = SourceShardDeleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[189] + mi := &file_vtctldata_proto_msgTypes[198] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -11967,7 +12528,7 @@ func (x *SourceShardDeleteResponse) String() string { func (*SourceShardDeleteResponse) ProtoMessage() {} func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[189] + mi := &file_vtctldata_proto_msgTypes[198] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -11980,7 +12541,7 @@ func (x *SourceShardDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceShardDeleteResponse.ProtoReflect.Descriptor instead. func (*SourceShardDeleteResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{189} + return file_vtctldata_proto_rawDescGZIP(), []int{198} } func (x *SourceShardDeleteResponse) GetShard() *topodata.Shard { @@ -12001,7 +12562,7 @@ type StartReplicationRequest struct { func (x *StartReplicationRequest) Reset() { *x = StartReplicationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[190] + mi := &file_vtctldata_proto_msgTypes[199] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12014,7 +12575,7 @@ func (x *StartReplicationRequest) String() string { func (*StartReplicationRequest) ProtoMessage() {} func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[190] + mi := &file_vtctldata_proto_msgTypes[199] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12027,7 +12588,7 @@ func (x *StartReplicationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartReplicationRequest.ProtoReflect.Descriptor instead. func (*StartReplicationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{190} + return file_vtctldata_proto_rawDescGZIP(), []int{199} } func (x *StartReplicationRequest) GetTabletAlias() *topodata.TabletAlias { @@ -12046,7 +12607,7 @@ type StartReplicationResponse struct { func (x *StartReplicationResponse) Reset() { *x = StartReplicationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[191] + mi := &file_vtctldata_proto_msgTypes[200] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12059,7 +12620,7 @@ func (x *StartReplicationResponse) String() string { func (*StartReplicationResponse) ProtoMessage() {} func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[191] + mi := &file_vtctldata_proto_msgTypes[200] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12072,7 +12633,7 @@ func (x *StartReplicationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartReplicationResponse.ProtoReflect.Descriptor instead. func (*StartReplicationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{191} + return file_vtctldata_proto_rawDescGZIP(), []int{200} } type StopReplicationRequest struct { @@ -12086,7 +12647,7 @@ type StopReplicationRequest struct { func (x *StopReplicationRequest) Reset() { *x = StopReplicationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[192] + mi := &file_vtctldata_proto_msgTypes[201] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12099,7 +12660,7 @@ func (x *StopReplicationRequest) String() string { func (*StopReplicationRequest) ProtoMessage() {} func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[192] + mi := &file_vtctldata_proto_msgTypes[201] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12112,7 +12673,7 @@ func (x *StopReplicationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationRequest.ProtoReflect.Descriptor instead. func (*StopReplicationRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{192} + return file_vtctldata_proto_rawDescGZIP(), []int{201} } func (x *StopReplicationRequest) GetTabletAlias() *topodata.TabletAlias { @@ -12131,7 +12692,7 @@ type StopReplicationResponse struct { func (x *StopReplicationResponse) Reset() { *x = StopReplicationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[193] + mi := &file_vtctldata_proto_msgTypes[202] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12144,7 +12705,7 @@ func (x *StopReplicationResponse) String() string { func (*StopReplicationResponse) ProtoMessage() {} func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[193] + mi := &file_vtctldata_proto_msgTypes[202] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12157,7 +12718,7 @@ func (x *StopReplicationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StopReplicationResponse.ProtoReflect.Descriptor instead. func (*StopReplicationResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{193} + return file_vtctldata_proto_rawDescGZIP(), []int{202} } type TabletExternallyReparentedRequest struct { @@ -12173,7 +12734,7 @@ type TabletExternallyReparentedRequest struct { func (x *TabletExternallyReparentedRequest) Reset() { *x = TabletExternallyReparentedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[194] + mi := &file_vtctldata_proto_msgTypes[203] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12186,7 +12747,7 @@ func (x *TabletExternallyReparentedRequest) String() string { func (*TabletExternallyReparentedRequest) ProtoMessage() {} func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[194] + mi := &file_vtctldata_proto_msgTypes[203] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12199,7 +12760,7 @@ func (x *TabletExternallyReparentedRequest) ProtoReflect() protoreflect.Message // Deprecated: Use TabletExternallyReparentedRequest.ProtoReflect.Descriptor instead. func (*TabletExternallyReparentedRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{194} + return file_vtctldata_proto_rawDescGZIP(), []int{203} } func (x *TabletExternallyReparentedRequest) GetTablet() *topodata.TabletAlias { @@ -12223,7 +12784,7 @@ type TabletExternallyReparentedResponse struct { func (x *TabletExternallyReparentedResponse) Reset() { *x = TabletExternallyReparentedResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[195] + mi := &file_vtctldata_proto_msgTypes[204] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12236,7 +12797,7 @@ func (x *TabletExternallyReparentedResponse) String() string { func (*TabletExternallyReparentedResponse) ProtoMessage() {} func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[195] + mi := &file_vtctldata_proto_msgTypes[204] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12249,7 +12810,7 @@ func (x *TabletExternallyReparentedResponse) ProtoReflect() protoreflect.Message // Deprecated: Use TabletExternallyReparentedResponse.ProtoReflect.Descriptor instead. func (*TabletExternallyReparentedResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{195} + return file_vtctldata_proto_rawDescGZIP(), []int{204} } func (x *TabletExternallyReparentedResponse) GetKeyspace() string { @@ -12292,7 +12853,7 @@ type UpdateCellInfoRequest struct { func (x *UpdateCellInfoRequest) Reset() { *x = UpdateCellInfoRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[196] + mi := &file_vtctldata_proto_msgTypes[205] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12305,7 +12866,7 @@ func (x *UpdateCellInfoRequest) String() string { func (*UpdateCellInfoRequest) ProtoMessage() {} func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[196] + mi := &file_vtctldata_proto_msgTypes[205] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12318,7 +12879,7 @@ func (x *UpdateCellInfoRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateCellInfoRequest.ProtoReflect.Descriptor instead. func (*UpdateCellInfoRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{196} + return file_vtctldata_proto_rawDescGZIP(), []int{205} } func (x *UpdateCellInfoRequest) GetName() string { @@ -12347,7 +12908,7 @@ type UpdateCellInfoResponse struct { func (x *UpdateCellInfoResponse) Reset() { *x = UpdateCellInfoResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[197] + mi := &file_vtctldata_proto_msgTypes[206] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12360,7 +12921,7 @@ func (x *UpdateCellInfoResponse) String() string { func (*UpdateCellInfoResponse) ProtoMessage() {} func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[197] + mi := &file_vtctldata_proto_msgTypes[206] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12373,7 +12934,7 @@ func (x *UpdateCellInfoResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateCellInfoResponse.ProtoReflect.Descriptor instead. func (*UpdateCellInfoResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{197} + return file_vtctldata_proto_rawDescGZIP(), []int{206} } func (x *UpdateCellInfoResponse) GetName() string { @@ -12402,7 +12963,7 @@ type UpdateCellsAliasRequest struct { func (x *UpdateCellsAliasRequest) Reset() { *x = UpdateCellsAliasRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[198] + mi := &file_vtctldata_proto_msgTypes[207] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12415,7 +12976,7 @@ func (x *UpdateCellsAliasRequest) String() string { func (*UpdateCellsAliasRequest) ProtoMessage() {} func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[198] + mi := &file_vtctldata_proto_msgTypes[207] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12428,7 +12989,7 @@ func (x *UpdateCellsAliasRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateCellsAliasRequest.ProtoReflect.Descriptor instead. func (*UpdateCellsAliasRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{198} + return file_vtctldata_proto_rawDescGZIP(), []int{207} } func (x *UpdateCellsAliasRequest) GetName() string { @@ -12457,7 +13018,7 @@ type UpdateCellsAliasResponse struct { func (x *UpdateCellsAliasResponse) Reset() { *x = UpdateCellsAliasResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[199] + mi := &file_vtctldata_proto_msgTypes[208] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12470,7 +13031,7 @@ func (x *UpdateCellsAliasResponse) String() string { func (*UpdateCellsAliasResponse) ProtoMessage() {} func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[199] + mi := &file_vtctldata_proto_msgTypes[208] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12483,7 +13044,7 @@ func (x *UpdateCellsAliasResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateCellsAliasResponse.ProtoReflect.Descriptor instead. func (*UpdateCellsAliasResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{199} + return file_vtctldata_proto_rawDescGZIP(), []int{208} } func (x *UpdateCellsAliasResponse) GetName() string { @@ -12511,7 +13072,7 @@ type ValidateRequest struct { func (x *ValidateRequest) Reset() { *x = ValidateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[200] + mi := &file_vtctldata_proto_msgTypes[209] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12524,7 +13085,7 @@ func (x *ValidateRequest) String() string { func (*ValidateRequest) ProtoMessage() {} func (x *ValidateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[200] + mi := &file_vtctldata_proto_msgTypes[209] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12537,7 +13098,7 @@ func (x *ValidateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateRequest.ProtoReflect.Descriptor instead. func (*ValidateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{200} + return file_vtctldata_proto_rawDescGZIP(), []int{209} } func (x *ValidateRequest) GetPingTablets() bool { @@ -12559,7 +13120,7 @@ type ValidateResponse struct { func (x *ValidateResponse) Reset() { *x = ValidateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[201] + mi := &file_vtctldata_proto_msgTypes[210] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12572,7 +13133,7 @@ func (x *ValidateResponse) String() string { func (*ValidateResponse) ProtoMessage() {} func (x *ValidateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[201] + mi := &file_vtctldata_proto_msgTypes[210] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12585,7 +13146,7 @@ func (x *ValidateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResponse.ProtoReflect.Descriptor instead. func (*ValidateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{201} + return file_vtctldata_proto_rawDescGZIP(), []int{210} } func (x *ValidateResponse) GetResults() []string { @@ -12614,7 +13175,7 @@ type ValidateKeyspaceRequest struct { func (x *ValidateKeyspaceRequest) Reset() { *x = ValidateKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[202] + mi := &file_vtctldata_proto_msgTypes[211] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12627,7 +13188,7 @@ func (x *ValidateKeyspaceRequest) String() string { func (*ValidateKeyspaceRequest) ProtoMessage() {} func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[202] + mi := &file_vtctldata_proto_msgTypes[211] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12640,7 +13201,7 @@ func (x *ValidateKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateKeyspaceRequest.ProtoReflect.Descriptor instead. func (*ValidateKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{202} + return file_vtctldata_proto_rawDescGZIP(), []int{211} } func (x *ValidateKeyspaceRequest) GetKeyspace() string { @@ -12669,7 +13230,7 @@ type ValidateKeyspaceResponse struct { func (x *ValidateKeyspaceResponse) Reset() { *x = ValidateKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[203] + mi := &file_vtctldata_proto_msgTypes[212] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12682,7 +13243,7 @@ func (x *ValidateKeyspaceResponse) String() string { func (*ValidateKeyspaceResponse) ProtoMessage() {} func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[203] + mi := &file_vtctldata_proto_msgTypes[212] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12695,7 +13256,7 @@ func (x *ValidateKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateKeyspaceResponse.ProtoReflect.Descriptor instead. func (*ValidateKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{203} + return file_vtctldata_proto_rawDescGZIP(), []int{212} } func (x *ValidateKeyspaceResponse) GetResults() []string { @@ -12727,7 +13288,7 @@ type ValidateSchemaKeyspaceRequest struct { func (x *ValidateSchemaKeyspaceRequest) Reset() { *x = ValidateSchemaKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[204] + mi := &file_vtctldata_proto_msgTypes[213] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12740,7 +13301,7 @@ func (x *ValidateSchemaKeyspaceRequest) String() string { func (*ValidateSchemaKeyspaceRequest) ProtoMessage() {} func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[204] + mi := &file_vtctldata_proto_msgTypes[213] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12753,7 +13314,7 @@ func (x *ValidateSchemaKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSchemaKeyspaceRequest.ProtoReflect.Descriptor instead. func (*ValidateSchemaKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{204} + return file_vtctldata_proto_rawDescGZIP(), []int{213} } func (x *ValidateSchemaKeyspaceRequest) GetKeyspace() string { @@ -12803,7 +13364,7 @@ type ValidateSchemaKeyspaceResponse struct { func (x *ValidateSchemaKeyspaceResponse) Reset() { *x = ValidateSchemaKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[205] + mi := &file_vtctldata_proto_msgTypes[214] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12816,7 +13377,7 @@ func (x *ValidateSchemaKeyspaceResponse) String() string { func (*ValidateSchemaKeyspaceResponse) ProtoMessage() {} func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[205] + mi := &file_vtctldata_proto_msgTypes[214] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12829,7 +13390,7 @@ func (x *ValidateSchemaKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateSchemaKeyspaceResponse.ProtoReflect.Descriptor instead. func (*ValidateSchemaKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{205} + return file_vtctldata_proto_rawDescGZIP(), []int{214} } func (x *ValidateSchemaKeyspaceResponse) GetResults() []string { @@ -12859,7 +13420,7 @@ type ValidateShardRequest struct { func (x *ValidateShardRequest) Reset() { *x = ValidateShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[206] + mi := &file_vtctldata_proto_msgTypes[215] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12872,7 +13433,7 @@ func (x *ValidateShardRequest) String() string { func (*ValidateShardRequest) ProtoMessage() {} func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[206] + mi := &file_vtctldata_proto_msgTypes[215] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12885,7 +13446,7 @@ func (x *ValidateShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateShardRequest.ProtoReflect.Descriptor instead. func (*ValidateShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{206} + return file_vtctldata_proto_rawDescGZIP(), []int{215} } func (x *ValidateShardRequest) GetKeyspace() string { @@ -12920,7 +13481,7 @@ type ValidateShardResponse struct { func (x *ValidateShardResponse) Reset() { *x = ValidateShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[207] + mi := &file_vtctldata_proto_msgTypes[216] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12933,7 +13494,7 @@ func (x *ValidateShardResponse) String() string { func (*ValidateShardResponse) ProtoMessage() {} func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[207] + mi := &file_vtctldata_proto_msgTypes[216] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12946,7 +13507,7 @@ func (x *ValidateShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateShardResponse.ProtoReflect.Descriptor instead. func (*ValidateShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{207} + return file_vtctldata_proto_rawDescGZIP(), []int{216} } func (x *ValidateShardResponse) GetResults() []string { @@ -12967,7 +13528,7 @@ type ValidateVersionKeyspaceRequest struct { func (x *ValidateVersionKeyspaceRequest) Reset() { *x = ValidateVersionKeyspaceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[208] + mi := &file_vtctldata_proto_msgTypes[217] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -12980,7 +13541,7 @@ func (x *ValidateVersionKeyspaceRequest) String() string { func (*ValidateVersionKeyspaceRequest) ProtoMessage() {} func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[208] + mi := &file_vtctldata_proto_msgTypes[217] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -12993,7 +13554,7 @@ func (x *ValidateVersionKeyspaceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateVersionKeyspaceRequest.ProtoReflect.Descriptor instead. func (*ValidateVersionKeyspaceRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{208} + return file_vtctldata_proto_rawDescGZIP(), []int{217} } func (x *ValidateVersionKeyspaceRequest) GetKeyspace() string { @@ -13015,7 +13576,7 @@ type ValidateVersionKeyspaceResponse struct { func (x *ValidateVersionKeyspaceResponse) Reset() { *x = ValidateVersionKeyspaceResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[209] + mi := &file_vtctldata_proto_msgTypes[218] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13028,7 +13589,7 @@ func (x *ValidateVersionKeyspaceResponse) String() string { func (*ValidateVersionKeyspaceResponse) ProtoMessage() {} func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[209] + mi := &file_vtctldata_proto_msgTypes[218] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13041,7 +13602,7 @@ func (x *ValidateVersionKeyspaceResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateVersionKeyspaceResponse.ProtoReflect.Descriptor instead. func (*ValidateVersionKeyspaceResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{209} + return file_vtctldata_proto_rawDescGZIP(), []int{218} } func (x *ValidateVersionKeyspaceResponse) GetResults() []string { @@ -13070,7 +13631,7 @@ type ValidateVersionShardRequest struct { func (x *ValidateVersionShardRequest) Reset() { *x = ValidateVersionShardRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[210] + mi := &file_vtctldata_proto_msgTypes[219] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13083,7 +13644,7 @@ func (x *ValidateVersionShardRequest) String() string { func (*ValidateVersionShardRequest) ProtoMessage() {} func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[210] + mi := &file_vtctldata_proto_msgTypes[219] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13096,7 +13657,7 @@ func (x *ValidateVersionShardRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateVersionShardRequest.ProtoReflect.Descriptor instead. func (*ValidateVersionShardRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{210} + return file_vtctldata_proto_rawDescGZIP(), []int{219} } func (x *ValidateVersionShardRequest) GetKeyspace() string { @@ -13124,7 +13685,7 @@ type ValidateVersionShardResponse struct { func (x *ValidateVersionShardResponse) Reset() { *x = ValidateVersionShardResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[211] + mi := &file_vtctldata_proto_msgTypes[220] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13137,7 +13698,7 @@ func (x *ValidateVersionShardResponse) String() string { func (*ValidateVersionShardResponse) ProtoMessage() {} func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[211] + mi := &file_vtctldata_proto_msgTypes[220] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13150,7 +13711,7 @@ func (x *ValidateVersionShardResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateVersionShardResponse.ProtoReflect.Descriptor instead. func (*ValidateVersionShardResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{211} + return file_vtctldata_proto_rawDescGZIP(), []int{220} } func (x *ValidateVersionShardResponse) GetResults() []string { @@ -13174,7 +13735,7 @@ type ValidateVSchemaRequest struct { func (x *ValidateVSchemaRequest) Reset() { *x = ValidateVSchemaRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[212] + mi := &file_vtctldata_proto_msgTypes[221] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13187,7 +13748,7 @@ func (x *ValidateVSchemaRequest) String() string { func (*ValidateVSchemaRequest) ProtoMessage() {} func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[212] + mi := &file_vtctldata_proto_msgTypes[221] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13200,7 +13761,7 @@ func (x *ValidateVSchemaRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateVSchemaRequest.ProtoReflect.Descriptor instead. func (*ValidateVSchemaRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{212} + return file_vtctldata_proto_rawDescGZIP(), []int{221} } func (x *ValidateVSchemaRequest) GetKeyspace() string { @@ -13243,7 +13804,7 @@ type ValidateVSchemaResponse struct { func (x *ValidateVSchemaResponse) Reset() { *x = ValidateVSchemaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[213] + mi := &file_vtctldata_proto_msgTypes[222] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13256,7 +13817,7 @@ func (x *ValidateVSchemaResponse) String() string { func (*ValidateVSchemaResponse) ProtoMessage() {} func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[213] + mi := &file_vtctldata_proto_msgTypes[222] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13269,7 +13830,7 @@ func (x *ValidateVSchemaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateVSchemaResponse.ProtoReflect.Descriptor instead. func (*ValidateVSchemaResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{213} + return file_vtctldata_proto_rawDescGZIP(), []int{222} } func (x *ValidateVSchemaResponse) GetResults() []string { @@ -13309,12 +13870,14 @@ type VDiffCreateRequest struct { WaitUpdateInterval *vttime.Duration `protobuf:"bytes,16,opt,name=wait_update_interval,json=waitUpdateInterval,proto3" json:"wait_update_interval,omitempty"` AutoRetry bool `protobuf:"varint,17,opt,name=auto_retry,json=autoRetry,proto3" json:"auto_retry,omitempty"` Verbose bool `protobuf:"varint,18,opt,name=verbose,proto3" json:"verbose,omitempty"` + MaxReportSampleRows int64 `protobuf:"varint,19,opt,name=max_report_sample_rows,json=maxReportSampleRows,proto3" json:"max_report_sample_rows,omitempty"` + MaxDiffDuration *vttime.Duration `protobuf:"bytes,20,opt,name=max_diff_duration,json=maxDiffDuration,proto3" json:"max_diff_duration,omitempty"` } func (x *VDiffCreateRequest) Reset() { *x = VDiffCreateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[214] + mi := &file_vtctldata_proto_msgTypes[223] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13327,7 +13890,7 @@ func (x *VDiffCreateRequest) String() string { func (*VDiffCreateRequest) ProtoMessage() {} func (x *VDiffCreateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[214] + mi := &file_vtctldata_proto_msgTypes[223] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13340,7 +13903,7 @@ func (x *VDiffCreateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffCreateRequest.ProtoReflect.Descriptor instead. func (*VDiffCreateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{214} + return file_vtctldata_proto_rawDescGZIP(), []int{223} } func (x *VDiffCreateRequest) GetWorkflow() string { @@ -13469,6 +14032,20 @@ func (x *VDiffCreateRequest) GetVerbose() bool { return false } +func (x *VDiffCreateRequest) GetMaxReportSampleRows() int64 { + if x != nil { + return x.MaxReportSampleRows + } + return 0 +} + +func (x *VDiffCreateRequest) GetMaxDiffDuration() *vttime.Duration { + if x != nil { + return x.MaxDiffDuration + } + return nil +} + type VDiffCreateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -13482,7 +14059,7 @@ type VDiffCreateResponse struct { func (x *VDiffCreateResponse) Reset() { *x = VDiffCreateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[215] + mi := &file_vtctldata_proto_msgTypes[224] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13495,7 +14072,7 @@ func (x *VDiffCreateResponse) String() string { func (*VDiffCreateResponse) ProtoMessage() {} func (x *VDiffCreateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[215] + mi := &file_vtctldata_proto_msgTypes[224] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13508,7 +14085,7 @@ func (x *VDiffCreateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffCreateResponse.ProtoReflect.Descriptor instead. func (*VDiffCreateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{215} + return file_vtctldata_proto_rawDescGZIP(), []int{224} } func (x *VDiffCreateResponse) GetUUID() string { @@ -13532,7 +14109,7 @@ type VDiffDeleteRequest struct { func (x *VDiffDeleteRequest) Reset() { *x = VDiffDeleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[216] + mi := &file_vtctldata_proto_msgTypes[225] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13545,7 +14122,7 @@ func (x *VDiffDeleteRequest) String() string { func (*VDiffDeleteRequest) ProtoMessage() {} func (x *VDiffDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[216] + mi := &file_vtctldata_proto_msgTypes[225] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13558,7 +14135,7 @@ func (x *VDiffDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffDeleteRequest.ProtoReflect.Descriptor instead. func (*VDiffDeleteRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{216} + return file_vtctldata_proto_rawDescGZIP(), []int{225} } func (x *VDiffDeleteRequest) GetWorkflow() string { @@ -13591,7 +14168,7 @@ type VDiffDeleteResponse struct { func (x *VDiffDeleteResponse) Reset() { *x = VDiffDeleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[217] + mi := &file_vtctldata_proto_msgTypes[226] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13604,7 +14181,7 @@ func (x *VDiffDeleteResponse) String() string { func (*VDiffDeleteResponse) ProtoMessage() {} func (x *VDiffDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[217] + mi := &file_vtctldata_proto_msgTypes[226] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13617,7 +14194,7 @@ func (x *VDiffDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffDeleteResponse.ProtoReflect.Descriptor instead. func (*VDiffDeleteResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{217} + return file_vtctldata_proto_rawDescGZIP(), []int{226} } type VDiffResumeRequest struct { @@ -13633,7 +14210,7 @@ type VDiffResumeRequest struct { func (x *VDiffResumeRequest) Reset() { *x = VDiffResumeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[218] + mi := &file_vtctldata_proto_msgTypes[227] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13646,7 +14223,7 @@ func (x *VDiffResumeRequest) String() string { func (*VDiffResumeRequest) ProtoMessage() {} func (x *VDiffResumeRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[218] + mi := &file_vtctldata_proto_msgTypes[227] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13659,7 +14236,7 @@ func (x *VDiffResumeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffResumeRequest.ProtoReflect.Descriptor instead. func (*VDiffResumeRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{218} + return file_vtctldata_proto_rawDescGZIP(), []int{227} } func (x *VDiffResumeRequest) GetWorkflow() string { @@ -13692,7 +14269,7 @@ type VDiffResumeResponse struct { func (x *VDiffResumeResponse) Reset() { *x = VDiffResumeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[219] + mi := &file_vtctldata_proto_msgTypes[228] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13705,7 +14282,7 @@ func (x *VDiffResumeResponse) String() string { func (*VDiffResumeResponse) ProtoMessage() {} func (x *VDiffResumeResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[219] + mi := &file_vtctldata_proto_msgTypes[228] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13718,7 +14295,7 @@ func (x *VDiffResumeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffResumeResponse.ProtoReflect.Descriptor instead. func (*VDiffResumeResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{219} + return file_vtctldata_proto_rawDescGZIP(), []int{228} } type VDiffShowRequest struct { @@ -13735,7 +14312,7 @@ type VDiffShowRequest struct { func (x *VDiffShowRequest) Reset() { *x = VDiffShowRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[220] + mi := &file_vtctldata_proto_msgTypes[229] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13748,7 +14325,7 @@ func (x *VDiffShowRequest) String() string { func (*VDiffShowRequest) ProtoMessage() {} func (x *VDiffShowRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[220] + mi := &file_vtctldata_proto_msgTypes[229] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13761,7 +14338,7 @@ func (x *VDiffShowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffShowRequest.ProtoReflect.Descriptor instead. func (*VDiffShowRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{220} + return file_vtctldata_proto_rawDescGZIP(), []int{229} } func (x *VDiffShowRequest) GetWorkflow() string { @@ -13797,7 +14374,7 @@ type VDiffShowResponse struct { func (x *VDiffShowResponse) Reset() { *x = VDiffShowResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[221] + mi := &file_vtctldata_proto_msgTypes[230] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13810,7 +14387,7 @@ func (x *VDiffShowResponse) String() string { func (*VDiffShowResponse) ProtoMessage() {} func (x *VDiffShowResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[221] + mi := &file_vtctldata_proto_msgTypes[230] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13823,7 +14400,7 @@ func (x *VDiffShowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffShowResponse.ProtoReflect.Descriptor instead. func (*VDiffShowResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{221} + return file_vtctldata_proto_rawDescGZIP(), []int{230} } func (x *VDiffShowResponse) GetTabletResponses() map[string]*tabletmanagerdata.VDiffResponse { @@ -13846,7 +14423,7 @@ type VDiffStopRequest struct { func (x *VDiffStopRequest) Reset() { *x = VDiffStopRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[222] + mi := &file_vtctldata_proto_msgTypes[231] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13859,7 +14436,7 @@ func (x *VDiffStopRequest) String() string { func (*VDiffStopRequest) ProtoMessage() {} func (x *VDiffStopRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[222] + mi := &file_vtctldata_proto_msgTypes[231] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13872,7 +14449,7 @@ func (x *VDiffStopRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffStopRequest.ProtoReflect.Descriptor instead. func (*VDiffStopRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{222} + return file_vtctldata_proto_rawDescGZIP(), []int{231} } func (x *VDiffStopRequest) GetWorkflow() string { @@ -13905,7 +14482,7 @@ type VDiffStopResponse struct { func (x *VDiffStopResponse) Reset() { *x = VDiffStopResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[223] + mi := &file_vtctldata_proto_msgTypes[232] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13918,7 +14495,7 @@ func (x *VDiffStopResponse) String() string { func (*VDiffStopResponse) ProtoMessage() {} func (x *VDiffStopResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[223] + mi := &file_vtctldata_proto_msgTypes[232] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13931,7 +14508,7 @@ func (x *VDiffStopResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VDiffStopResponse.ProtoReflect.Descriptor instead. func (*VDiffStopResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{223} + return file_vtctldata_proto_rawDescGZIP(), []int{232} } type WorkflowDeleteRequest struct { @@ -13939,16 +14516,17 @@ type WorkflowDeleteRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` - KeepData bool `protobuf:"varint,3,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` - KeepRoutingRules bool `protobuf:"varint,4,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + KeepData bool `protobuf:"varint,3,opt,name=keep_data,json=keepData,proto3" json:"keep_data,omitempty"` + KeepRoutingRules bool `protobuf:"varint,4,opt,name=keep_routing_rules,json=keepRoutingRules,proto3" json:"keep_routing_rules,omitempty"` + Shards []string `protobuf:"bytes,5,rep,name=shards,proto3" json:"shards,omitempty"` } func (x *WorkflowDeleteRequest) Reset() { *x = WorkflowDeleteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[224] + mi := &file_vtctldata_proto_msgTypes[233] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -13961,7 +14539,7 @@ func (x *WorkflowDeleteRequest) String() string { func (*WorkflowDeleteRequest) ProtoMessage() {} func (x *WorkflowDeleteRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[224] + mi := &file_vtctldata_proto_msgTypes[233] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -13974,7 +14552,7 @@ func (x *WorkflowDeleteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowDeleteRequest.ProtoReflect.Descriptor instead. func (*WorkflowDeleteRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{224} + return file_vtctldata_proto_rawDescGZIP(), []int{233} } func (x *WorkflowDeleteRequest) GetKeyspace() string { @@ -14005,6 +14583,13 @@ func (x *WorkflowDeleteRequest) GetKeepRoutingRules() bool { return false } +func (x *WorkflowDeleteRequest) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + type WorkflowDeleteResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -14017,7 +14602,7 @@ type WorkflowDeleteResponse struct { func (x *WorkflowDeleteResponse) Reset() { *x = WorkflowDeleteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[225] + mi := &file_vtctldata_proto_msgTypes[234] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14030,7 +14615,7 @@ func (x *WorkflowDeleteResponse) String() string { func (*WorkflowDeleteResponse) ProtoMessage() {} func (x *WorkflowDeleteResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[225] + mi := &file_vtctldata_proto_msgTypes[234] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14043,7 +14628,7 @@ func (x *WorkflowDeleteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowDeleteResponse.ProtoReflect.Descriptor instead. func (*WorkflowDeleteResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{225} + return file_vtctldata_proto_rawDescGZIP(), []int{234} } func (x *WorkflowDeleteResponse) GetSummary() string { @@ -14065,14 +14650,15 @@ type WorkflowStatusRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` - Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Keyspace string `protobuf:"bytes,1,opt,name=keyspace,proto3" json:"keyspace,omitempty"` + Workflow string `protobuf:"bytes,2,opt,name=workflow,proto3" json:"workflow,omitempty"` + Shards []string `protobuf:"bytes,3,rep,name=shards,proto3" json:"shards,omitempty"` } func (x *WorkflowStatusRequest) Reset() { *x = WorkflowStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[226] + mi := &file_vtctldata_proto_msgTypes[235] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14085,7 +14671,7 @@ func (x *WorkflowStatusRequest) String() string { func (*WorkflowStatusRequest) ProtoMessage() {} func (x *WorkflowStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[226] + mi := &file_vtctldata_proto_msgTypes[235] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14098,7 +14684,7 @@ func (x *WorkflowStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowStatusRequest.ProtoReflect.Descriptor instead. func (*WorkflowStatusRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{226} + return file_vtctldata_proto_rawDescGZIP(), []int{235} } func (x *WorkflowStatusRequest) GetKeyspace() string { @@ -14115,6 +14701,13 @@ func (x *WorkflowStatusRequest) GetWorkflow() string { return "" } +func (x *WorkflowStatusRequest) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + type WorkflowStatusResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -14129,7 +14722,7 @@ type WorkflowStatusResponse struct { func (x *WorkflowStatusResponse) Reset() { *x = WorkflowStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[227] + mi := &file_vtctldata_proto_msgTypes[236] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14142,7 +14735,7 @@ func (x *WorkflowStatusResponse) String() string { func (*WorkflowStatusResponse) ProtoMessage() {} func (x *WorkflowStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[227] + mi := &file_vtctldata_proto_msgTypes[236] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14155,7 +14748,7 @@ func (x *WorkflowStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowStatusResponse.ProtoReflect.Descriptor instead. func (*WorkflowStatusResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{227} + return file_vtctldata_proto_rawDescGZIP(), []int{236} } func (x *WorkflowStatusResponse) GetTableCopyState() map[string]*WorkflowStatusResponse_TableCopyState { @@ -14194,12 +14787,13 @@ type WorkflowSwitchTrafficRequest struct { Timeout *vttime.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"` DryRun bool `protobuf:"varint,9,opt,name=dry_run,json=dryRun,proto3" json:"dry_run,omitempty"` InitializeTargetSequences bool `protobuf:"varint,10,opt,name=initialize_target_sequences,json=initializeTargetSequences,proto3" json:"initialize_target_sequences,omitempty"` + Shards []string `protobuf:"bytes,11,rep,name=shards,proto3" json:"shards,omitempty"` } func (x *WorkflowSwitchTrafficRequest) Reset() { *x = WorkflowSwitchTrafficRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[228] + mi := &file_vtctldata_proto_msgTypes[237] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14212,7 +14806,7 @@ func (x *WorkflowSwitchTrafficRequest) String() string { func (*WorkflowSwitchTrafficRequest) ProtoMessage() {} func (x *WorkflowSwitchTrafficRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[228] + mi := &file_vtctldata_proto_msgTypes[237] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14225,7 +14819,7 @@ func (x *WorkflowSwitchTrafficRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowSwitchTrafficRequest.ProtoReflect.Descriptor instead. func (*WorkflowSwitchTrafficRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{228} + return file_vtctldata_proto_rawDescGZIP(), []int{237} } func (x *WorkflowSwitchTrafficRequest) GetKeyspace() string { @@ -14298,6 +14892,13 @@ func (x *WorkflowSwitchTrafficRequest) GetInitializeTargetSequences() bool { return false } +func (x *WorkflowSwitchTrafficRequest) GetShards() []string { + if x != nil { + return x.Shards + } + return nil +} + type WorkflowSwitchTrafficResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -14312,7 +14913,7 @@ type WorkflowSwitchTrafficResponse struct { func (x *WorkflowSwitchTrafficResponse) Reset() { *x = WorkflowSwitchTrafficResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[229] + mi := &file_vtctldata_proto_msgTypes[238] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14325,7 +14926,7 @@ func (x *WorkflowSwitchTrafficResponse) String() string { func (*WorkflowSwitchTrafficResponse) ProtoMessage() {} func (x *WorkflowSwitchTrafficResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[229] + mi := &file_vtctldata_proto_msgTypes[238] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14338,7 +14939,7 @@ func (x *WorkflowSwitchTrafficResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowSwitchTrafficResponse.ProtoReflect.Descriptor instead. func (*WorkflowSwitchTrafficResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{229} + return file_vtctldata_proto_rawDescGZIP(), []int{238} } func (x *WorkflowSwitchTrafficResponse) GetSummary() string { @@ -14383,7 +14984,7 @@ type WorkflowUpdateRequest struct { func (x *WorkflowUpdateRequest) Reset() { *x = WorkflowUpdateRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[230] + mi := &file_vtctldata_proto_msgTypes[239] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14396,7 +14997,7 @@ func (x *WorkflowUpdateRequest) String() string { func (*WorkflowUpdateRequest) ProtoMessage() {} func (x *WorkflowUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[230] + mi := &file_vtctldata_proto_msgTypes[239] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14409,7 +15010,7 @@ func (x *WorkflowUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowUpdateRequest.ProtoReflect.Descriptor instead. func (*WorkflowUpdateRequest) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{230} + return file_vtctldata_proto_rawDescGZIP(), []int{239} } func (x *WorkflowUpdateRequest) GetKeyspace() string { @@ -14438,7 +15039,7 @@ type WorkflowUpdateResponse struct { func (x *WorkflowUpdateResponse) Reset() { *x = WorkflowUpdateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[231] + mi := &file_vtctldata_proto_msgTypes[240] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14451,7 +15052,7 @@ func (x *WorkflowUpdateResponse) String() string { func (*WorkflowUpdateResponse) ProtoMessage() {} func (x *WorkflowUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[231] + mi := &file_vtctldata_proto_msgTypes[240] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14464,7 +15065,7 @@ func (x *WorkflowUpdateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowUpdateResponse.ProtoReflect.Descriptor instead. func (*WorkflowUpdateResponse) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{231} + return file_vtctldata_proto_rawDescGZIP(), []int{240} } func (x *WorkflowUpdateResponse) GetSummary() string { @@ -14493,7 +15094,7 @@ type Workflow_ReplicationLocation struct { func (x *Workflow_ReplicationLocation) Reset() { *x = Workflow_ReplicationLocation{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[233] + mi := &file_vtctldata_proto_msgTypes[242] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14506,7 +15107,7 @@ func (x *Workflow_ReplicationLocation) String() string { func (*Workflow_ReplicationLocation) ProtoMessage() {} func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[233] + mi := &file_vtctldata_proto_msgTypes[242] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14519,7 +15120,7 @@ func (x *Workflow_ReplicationLocation) ProtoReflect() protoreflect.Message { // Deprecated: Use Workflow_ReplicationLocation.ProtoReflect.Descriptor instead. func (*Workflow_ReplicationLocation) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7, 1} + return file_vtctldata_proto_rawDescGZIP(), []int{8, 1} } func (x *Workflow_ReplicationLocation) GetKeyspace() string { @@ -14549,7 +15150,7 @@ type Workflow_ShardStream struct { func (x *Workflow_ShardStream) Reset() { *x = Workflow_ShardStream{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[234] + mi := &file_vtctldata_proto_msgTypes[243] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14562,7 +15163,7 @@ func (x *Workflow_ShardStream) String() string { func (*Workflow_ShardStream) ProtoMessage() {} func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[234] + mi := &file_vtctldata_proto_msgTypes[243] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14575,7 +15176,7 @@ func (x *Workflow_ShardStream) ProtoReflect() protoreflect.Message { // Deprecated: Use Workflow_ShardStream.ProtoReflect.Descriptor instead. func (*Workflow_ShardStream) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7, 2} + return file_vtctldata_proto_rawDescGZIP(), []int{8, 2} } func (x *Workflow_ShardStream) GetStreams() []*Workflow_Stream { @@ -14625,16 +15226,19 @@ type Workflow_Stream struct { // Note that this field being set does not necessarily mean that Logs is nil; // if there are N logs that exist for the stream, and we fail to fetch the // ith log, we will still return logs in [0, i) + (i, N]. - LogFetchError string `protobuf:"bytes,14,opt,name=log_fetch_error,json=logFetchError,proto3" json:"log_fetch_error,omitempty"` - Tags []string `protobuf:"bytes,15,rep,name=tags,proto3" json:"tags,omitempty"` - RowsCopied int64 `protobuf:"varint,16,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` - ThrottlerStatus *Workflow_Stream_ThrottlerStatus `protobuf:"bytes,17,opt,name=throttler_status,json=throttlerStatus,proto3" json:"throttler_status,omitempty"` + LogFetchError string `protobuf:"bytes,14,opt,name=log_fetch_error,json=logFetchError,proto3" json:"log_fetch_error,omitempty"` + Tags []string `protobuf:"bytes,15,rep,name=tags,proto3" json:"tags,omitempty"` + RowsCopied int64 `protobuf:"varint,16,opt,name=rows_copied,json=rowsCopied,proto3" json:"rows_copied,omitempty"` + ThrottlerStatus *Workflow_Stream_ThrottlerStatus `protobuf:"bytes,17,opt,name=throttler_status,json=throttlerStatus,proto3" json:"throttler_status,omitempty"` + TabletTypes []topodata.TabletType `protobuf:"varint,18,rep,packed,name=tablet_types,json=tabletTypes,proto3,enum=topodata.TabletType" json:"tablet_types,omitempty"` + TabletSelectionPreference tabletmanagerdata.TabletSelectionPreference `protobuf:"varint,19,opt,name=tablet_selection_preference,json=tabletSelectionPreference,proto3,enum=tabletmanagerdata.TabletSelectionPreference" json:"tablet_selection_preference,omitempty"` + Cells []string `protobuf:"bytes,20,rep,name=cells,proto3" json:"cells,omitempty"` } func (x *Workflow_Stream) Reset() { *x = Workflow_Stream{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[235] + mi := &file_vtctldata_proto_msgTypes[244] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14647,7 +15251,7 @@ func (x *Workflow_Stream) String() string { func (*Workflow_Stream) ProtoMessage() {} func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[235] + mi := &file_vtctldata_proto_msgTypes[244] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14660,7 +15264,7 @@ func (x *Workflow_Stream) ProtoReflect() protoreflect.Message { // Deprecated: Use Workflow_Stream.ProtoReflect.Descriptor instead. func (*Workflow_Stream) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7, 3} + return file_vtctldata_proto_rawDescGZIP(), []int{8, 3} } func (x *Workflow_Stream) GetId() int64 { @@ -14782,19 +15386,41 @@ func (x *Workflow_Stream) GetThrottlerStatus() *Workflow_Stream_ThrottlerStatus return nil } -type Workflow_Stream_CopyState struct { - state protoimpl.MessageState +func (x *Workflow_Stream) GetTabletTypes() []topodata.TabletType { + if x != nil { + return x.TabletTypes + } + return nil +} + +func (x *Workflow_Stream) GetTabletSelectionPreference() tabletmanagerdata.TabletSelectionPreference { + if x != nil { + return x.TabletSelectionPreference + } + return tabletmanagerdata.TabletSelectionPreference(0) +} + +func (x *Workflow_Stream) GetCells() []string { + if x != nil { + return x.Cells + } + return nil +} + +type Workflow_Stream_CopyState struct { + state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` - LastPk string `protobuf:"bytes,2,opt,name=last_pk,json=lastPk,proto3" json:"last_pk,omitempty"` + Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"` + LastPk string `protobuf:"bytes,2,opt,name=last_pk,json=lastPk,proto3" json:"last_pk,omitempty"` + StreamId int64 `protobuf:"varint,3,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` } func (x *Workflow_Stream_CopyState) Reset() { *x = Workflow_Stream_CopyState{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[236] + mi := &file_vtctldata_proto_msgTypes[245] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14807,7 +15433,7 @@ func (x *Workflow_Stream_CopyState) String() string { func (*Workflow_Stream_CopyState) ProtoMessage() {} func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[236] + mi := &file_vtctldata_proto_msgTypes[245] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14820,7 +15446,7 @@ func (x *Workflow_Stream_CopyState) ProtoReflect() protoreflect.Message { // Deprecated: Use Workflow_Stream_CopyState.ProtoReflect.Descriptor instead. func (*Workflow_Stream_CopyState) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7, 3, 0} + return file_vtctldata_proto_rawDescGZIP(), []int{8, 3, 0} } func (x *Workflow_Stream_CopyState) GetTable() string { @@ -14837,6 +15463,13 @@ func (x *Workflow_Stream_CopyState) GetLastPk() string { return "" } +func (x *Workflow_Stream_CopyState) GetStreamId() int64 { + if x != nil { + return x.StreamId + } + return 0 +} + type Workflow_Stream_Log struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -14855,7 +15488,7 @@ type Workflow_Stream_Log struct { func (x *Workflow_Stream_Log) Reset() { *x = Workflow_Stream_Log{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[237] + mi := &file_vtctldata_proto_msgTypes[246] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14868,7 +15501,7 @@ func (x *Workflow_Stream_Log) String() string { func (*Workflow_Stream_Log) ProtoMessage() {} func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[237] + mi := &file_vtctldata_proto_msgTypes[246] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14881,7 +15514,7 @@ func (x *Workflow_Stream_Log) ProtoReflect() protoreflect.Message { // Deprecated: Use Workflow_Stream_Log.ProtoReflect.Descriptor instead. func (*Workflow_Stream_Log) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7, 3, 1} + return file_vtctldata_proto_rawDescGZIP(), []int{8, 3, 1} } func (x *Workflow_Stream_Log) GetId() int64 { @@ -14952,7 +15585,7 @@ type Workflow_Stream_ThrottlerStatus struct { func (x *Workflow_Stream_ThrottlerStatus) Reset() { *x = Workflow_Stream_ThrottlerStatus{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[238] + mi := &file_vtctldata_proto_msgTypes[247] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -14965,7 +15598,7 @@ func (x *Workflow_Stream_ThrottlerStatus) String() string { func (*Workflow_Stream_ThrottlerStatus) ProtoMessage() {} func (x *Workflow_Stream_ThrottlerStatus) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[238] + mi := &file_vtctldata_proto_msgTypes[247] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -14978,7 +15611,7 @@ func (x *Workflow_Stream_ThrottlerStatus) ProtoReflect() protoreflect.Message { // Deprecated: Use Workflow_Stream_ThrottlerStatus.ProtoReflect.Descriptor instead. func (*Workflow_Stream_ThrottlerStatus) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{7, 3, 2} + return file_vtctldata_proto_rawDescGZIP(), []int{8, 3, 2} } func (x *Workflow_Stream_ThrottlerStatus) GetComponentThrottled() string { @@ -14995,6 +15628,53 @@ func (x *Workflow_Stream_ThrottlerStatus) GetTimeThrottled() *vttime.Time { return nil } +type ApplyVSchemaResponse_ParamList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Params []string `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty"` +} + +func (x *ApplyVSchemaResponse_ParamList) Reset() { + *x = ApplyVSchemaResponse_ParamList{} + if protoimpl.UnsafeEnabled { + mi := &file_vtctldata_proto_msgTypes[250] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApplyVSchemaResponse_ParamList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyVSchemaResponse_ParamList) ProtoMessage() {} + +func (x *ApplyVSchemaResponse_ParamList) ProtoReflect() protoreflect.Message { + mi := &file_vtctldata_proto_msgTypes[250] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyVSchemaResponse_ParamList.ProtoReflect.Descriptor instead. +func (*ApplyVSchemaResponse_ParamList) Descriptor() ([]byte, []int) { + return file_vtctldata_proto_rawDescGZIP(), []int{22, 1} +} + +func (x *ApplyVSchemaResponse_ParamList) GetParams() []string { + if x != nil { + return x.Params + } + return nil +} + type GetSrvKeyspaceNamesResponse_NameList struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -15006,7 +15686,7 @@ type GetSrvKeyspaceNamesResponse_NameList struct { func (x *GetSrvKeyspaceNamesResponse_NameList) Reset() { *x = GetSrvKeyspaceNamesResponse_NameList{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[246] + mi := &file_vtctldata_proto_msgTypes[259] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -15019,7 +15699,7 @@ func (x *GetSrvKeyspaceNamesResponse_NameList) String() string { func (*GetSrvKeyspaceNamesResponse_NameList) ProtoMessage() {} func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[246] + mi := &file_vtctldata_proto_msgTypes[259] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -15032,7 +15712,7 @@ func (x *GetSrvKeyspaceNamesResponse_NameList) ProtoReflect() protoreflect.Messa // Deprecated: Use GetSrvKeyspaceNamesResponse_NameList.ProtoReflect.Descriptor instead. func (*GetSrvKeyspaceNamesResponse_NameList) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{84, 1} + return file_vtctldata_proto_rawDescGZIP(), []int{95, 1} } func (x *GetSrvKeyspaceNamesResponse_NameList) GetNames() []string { @@ -15055,7 +15735,7 @@ type MoveTablesCreateResponse_TabletInfo struct { func (x *MoveTablesCreateResponse_TabletInfo) Reset() { *x = MoveTablesCreateResponse_TabletInfo{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[250] + mi := &file_vtctldata_proto_msgTypes[263] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -15068,7 +15748,7 @@ func (x *MoveTablesCreateResponse_TabletInfo) String() string { func (*MoveTablesCreateResponse_TabletInfo) ProtoMessage() {} func (x *MoveTablesCreateResponse_TabletInfo) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[250] + mi := &file_vtctldata_proto_msgTypes[263] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -15081,7 +15761,7 @@ func (x *MoveTablesCreateResponse_TabletInfo) ProtoReflect() protoreflect.Messag // Deprecated: Use MoveTablesCreateResponse_TabletInfo.ProtoReflect.Descriptor instead. func (*MoveTablesCreateResponse_TabletInfo) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{128, 0} + return file_vtctldata_proto_rawDescGZIP(), []int{139, 0} } func (x *MoveTablesCreateResponse_TabletInfo) GetTablet() *topodata.TabletAlias { @@ -15111,7 +15791,7 @@ type WorkflowDeleteResponse_TabletInfo struct { func (x *WorkflowDeleteResponse_TabletInfo) Reset() { *x = WorkflowDeleteResponse_TabletInfo{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[260] + mi := &file_vtctldata_proto_msgTypes[273] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -15124,7 +15804,7 @@ func (x *WorkflowDeleteResponse_TabletInfo) String() string { func (*WorkflowDeleteResponse_TabletInfo) ProtoMessage() {} func (x *WorkflowDeleteResponse_TabletInfo) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[260] + mi := &file_vtctldata_proto_msgTypes[273] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -15137,7 +15817,7 @@ func (x *WorkflowDeleteResponse_TabletInfo) ProtoReflect() protoreflect.Message // Deprecated: Use WorkflowDeleteResponse_TabletInfo.ProtoReflect.Descriptor instead. func (*WorkflowDeleteResponse_TabletInfo) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{225, 0} + return file_vtctldata_proto_rawDescGZIP(), []int{234, 0} } func (x *WorkflowDeleteResponse_TabletInfo) GetTablet() *topodata.TabletAlias { @@ -15170,7 +15850,7 @@ type WorkflowStatusResponse_TableCopyState struct { func (x *WorkflowStatusResponse_TableCopyState) Reset() { *x = WorkflowStatusResponse_TableCopyState{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[261] + mi := &file_vtctldata_proto_msgTypes[274] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -15183,7 +15863,7 @@ func (x *WorkflowStatusResponse_TableCopyState) String() string { func (*WorkflowStatusResponse_TableCopyState) ProtoMessage() {} func (x *WorkflowStatusResponse_TableCopyState) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[261] + mi := &file_vtctldata_proto_msgTypes[274] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -15196,7 +15876,7 @@ func (x *WorkflowStatusResponse_TableCopyState) ProtoReflect() protoreflect.Mess // Deprecated: Use WorkflowStatusResponse_TableCopyState.ProtoReflect.Descriptor instead. func (*WorkflowStatusResponse_TableCopyState) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{227, 0} + return file_vtctldata_proto_rawDescGZIP(), []int{236, 0} } func (x *WorkflowStatusResponse_TableCopyState) GetRowsCopied() int64 { @@ -15257,7 +15937,7 @@ type WorkflowStatusResponse_ShardStreamState struct { func (x *WorkflowStatusResponse_ShardStreamState) Reset() { *x = WorkflowStatusResponse_ShardStreamState{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[262] + mi := &file_vtctldata_proto_msgTypes[275] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -15270,7 +15950,7 @@ func (x *WorkflowStatusResponse_ShardStreamState) String() string { func (*WorkflowStatusResponse_ShardStreamState) ProtoMessage() {} func (x *WorkflowStatusResponse_ShardStreamState) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[262] + mi := &file_vtctldata_proto_msgTypes[275] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -15283,7 +15963,7 @@ func (x *WorkflowStatusResponse_ShardStreamState) ProtoReflect() protoreflect.Me // Deprecated: Use WorkflowStatusResponse_ShardStreamState.ProtoReflect.Descriptor instead. func (*WorkflowStatusResponse_ShardStreamState) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{227, 1} + return file_vtctldata_proto_rawDescGZIP(), []int{236, 1} } func (x *WorkflowStatusResponse_ShardStreamState) GetId() int32 { @@ -15339,7 +16019,7 @@ type WorkflowStatusResponse_ShardStreams struct { func (x *WorkflowStatusResponse_ShardStreams) Reset() { *x = WorkflowStatusResponse_ShardStreams{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[263] + mi := &file_vtctldata_proto_msgTypes[276] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -15352,7 +16032,7 @@ func (x *WorkflowStatusResponse_ShardStreams) String() string { func (*WorkflowStatusResponse_ShardStreams) ProtoMessage() {} func (x *WorkflowStatusResponse_ShardStreams) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[263] + mi := &file_vtctldata_proto_msgTypes[276] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -15365,7 +16045,7 @@ func (x *WorkflowStatusResponse_ShardStreams) ProtoReflect() protoreflect.Messag // Deprecated: Use WorkflowStatusResponse_ShardStreams.ProtoReflect.Descriptor instead. func (*WorkflowStatusResponse_ShardStreams) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{227, 2} + return file_vtctldata_proto_rawDescGZIP(), []int{236, 2} } func (x *WorkflowStatusResponse_ShardStreams) GetStreams() []*WorkflowStatusResponse_ShardStreamState { @@ -15389,7 +16069,7 @@ type WorkflowUpdateResponse_TabletInfo struct { func (x *WorkflowUpdateResponse_TabletInfo) Reset() { *x = WorkflowUpdateResponse_TabletInfo{} if protoimpl.UnsafeEnabled { - mi := &file_vtctldata_proto_msgTypes[266] + mi := &file_vtctldata_proto_msgTypes[279] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -15402,7 +16082,7 @@ func (x *WorkflowUpdateResponse_TabletInfo) String() string { func (*WorkflowUpdateResponse_TabletInfo) ProtoMessage() {} func (x *WorkflowUpdateResponse_TabletInfo) ProtoReflect() protoreflect.Message { - mi := &file_vtctldata_proto_msgTypes[266] + mi := &file_vtctldata_proto_msgTypes[279] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -15415,7 +16095,7 @@ func (x *WorkflowUpdateResponse_TabletInfo) ProtoReflect() protoreflect.Message // Deprecated: Use WorkflowUpdateResponse_TabletInfo.ProtoReflect.Descriptor instead. func (*WorkflowUpdateResponse_TabletInfo) Descriptor() ([]byte, []int) { - return file_vtctldata_proto_rawDescGZIP(), []int{231, 0} + return file_vtctldata_proto_rawDescGZIP(), []int{240, 0} } func (x *WorkflowUpdateResponse_TabletInfo) GetTablet() *topodata.TabletAlias { @@ -15465,7 +16145,7 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x64, 0x6c, 0x22, 0x83, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x64, 0x6c, 0x22, 0xca, 0x06, 0x0a, 0x13, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, @@ -15514,478 +16194,506 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, - 0x43, 0x6f, 0x70, 0x79, 0x22, 0x4e, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x22, 0xc0, 0x13, 0x0a, 0x0f, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2f, 0x0a, 0x13, - 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6d, 0x69, 0x67, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, - 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x72, 0x61, - 0x74, 0x65, 0x67, 0x79, 0x52, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x18, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x08, 0x61, 0x64, 0x64, 0x65, - 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x61, 0x64, 0x64, 0x65, 0x64, 0x41, - 0x74, 0x12, 0x2f, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x61, - 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x41, 0x74, 0x12, 0x27, 0x0a, 0x08, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x61, 0x74, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x52, 0x07, 0x72, 0x65, 0x61, 0x64, 0x79, 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x12, 0x6c, 0x69, 0x76, 0x65, - 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0d, + 0x43, 0x6f, 0x70, 0x79, 0x12, 0x45, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4e, 0x0a, 0x08, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xc0, 0x13, 0x0a, 0x0f, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x2f, 0x0a, 0x13, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3f, 0x0a, 0x08, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x08, 0x73, 0x74, 0x72, + 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x27, 0x0a, 0x08, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, + 0x07, 0x61, 0x64, 0x64, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2f, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x27, 0x0a, 0x08, 0x72, 0x65, 0x61, + 0x64, 0x79, 0x5f, 0x61, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x65, 0x61, 0x64, 0x79, + 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x3b, 0x0a, 0x12, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x11, 0x6c, 0x69, 0x76, 0x65, 0x6e, + 0x65, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, + 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x30, 0x0a, + 0x0d, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x5f, 0x61, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x52, 0x11, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x30, 0x0a, 0x0d, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x65, - 0x64, 0x5f, 0x75, 0x70, 0x5f, 0x61, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, - 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x6c, 0x65, - 0x61, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1c, - 0x0a, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x17, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x64, 0x6c, 0x5f, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x64, 0x6c, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x1a, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x74, 0x61, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, - 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x1b, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, - 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x1c, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, - 0x2a, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x61, 0x64, 0x64, 0x65, - 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, - 0x79, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, - 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, - 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x12, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, - 0x63, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x20, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x13, 0x70, 0x6f, 0x73, 0x74, 0x70, - 0x6f, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x21, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x43, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x18, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x12, 0x44, 0x0a, 0x1f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x5f, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1b, 0x64, 0x72, 0x6f, 0x70, - 0x70, 0x65, 0x64, 0x4e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6c, 0x75, - 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x61, 0x6e, - 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x65, 0x64, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, - 0x65, 0x76, 0x65, 0x72, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, - 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x69, 0x62, 0x6c, - 0x65, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, - 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x75, - 0x69, 0x64, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, - 0x65, 0x64, 0x55, 0x75, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x76, 0x69, 0x65, - 0x77, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, - 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x72, 0x65, 0x61, 0x64, - 0x79, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x76, - 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x69, - 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, - 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x49, 0x6e, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x75, 0x73, 0x65, 0x72, 0x5f, - 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x2b, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x75, 0x73, 0x65, 0x72, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, - 0x6c, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x70, 0x65, 0x63, 0x69, - 0x61, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, - 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x12, 0x38, 0x0a, 0x11, 0x6c, 0x61, - 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, - 0x2d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, - 0x65, 0x64, 0x41, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x2e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x0c, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, - 0x6c, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, - 0x6e, 0x65, 0x5f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x18, 0x30, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x31, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x74, 0x6f, 0x76, 0x65, 0x72, - 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x0f, 0x63, 0x75, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, - 0x12, 0x34, 0x0a, 0x16, 0x69, 0x73, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, - 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x14, 0x69, 0x73, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x0b, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x34, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x76, 0x69, 0x65, - 0x77, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, 0x14, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x74, - 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x35, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x52, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x19, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, - 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x18, 0x36, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, - 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, - 0x53, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x0a, 0x0a, 0x06, 0x56, - 0x49, 0x54, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, - 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x01, 0x12, 0x09, - 0x0a, 0x05, 0x50, 0x54, 0x4f, 0x53, 0x43, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x49, 0x52, - 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x10, 0x04, - 0x1a, 0x02, 0x10, 0x01, 0x22, 0x71, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, - 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, - 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, - 0x55, 0x45, 0x44, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x04, - 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0c, 0x0a, - 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x06, 0x12, 0x0a, 0x0a, 0x06, 0x46, - 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07, 0x22, 0x5e, 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xbf, 0x0f, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6d, 0x65, 0x52, 0x0b, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x65, 0x64, 0x55, 0x70, 0x41, 0x74, 0x12, + 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, + 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, + 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x13, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x25, 0x0a, 0x0e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x46, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x64, 0x64, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x64, 0x64, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x74, 0x61, 0x53, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, + 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x72, 0x6f, 0x77, + 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x64, 0x64, 0x65, 0x64, 0x5f, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0f, 0x61, 0x64, 0x64, 0x65, 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x1f, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x3f, 0x0a, + 0x12, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x61, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, + 0x0a, 0x13, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, + 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x70, 0x6f, 0x73, + 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x37, 0x0a, 0x18, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x69, 0x71, 0x75, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x15, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1f, 0x64, 0x72, 0x6f, 0x70, + 0x70, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1b, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4e, 0x6f, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x32, + 0x0a, 0x15, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, + 0x78, 0x70, 0x61, 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x69, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x6f, 0x74, 0x65, 0x73, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, + 0x76, 0x65, 0x72, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, + 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x18, 0x26, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, + 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x76, 0x65, + 0x72, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x72, 0x65, 0x76, 0x65, 0x72, 0x74, 0x65, 0x64, 0x55, 0x75, 0x69, 0x64, 0x12, 0x17, 0x0a, + 0x07, 0x69, 0x73, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, + 0x69, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, 0x2a, 0x0a, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, + 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0f, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x69, 0x76, + 0x65, 0x6e, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, + 0x2a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x4c, 0x69, 0x76, + 0x65, 0x6e, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x2e, + 0x0a, 0x13, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x5f, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x02, 0x52, 0x11, 0x75, 0x73, 0x65, + 0x72, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x2c, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x70, 0x65, 0x63, 0x69, 0x61, 0x6c, 0x50, 0x6c, 0x61, + 0x6e, 0x12, 0x38, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, + 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x63, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x64, 0x18, 0x2e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x2f, 0x0a, 0x0c, + 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x2f, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x6c, 0x65, 0x64, 0x41, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, 0x5f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, + 0x18, 0x30, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x6f, 0x73, 0x74, 0x70, 0x6f, 0x6e, 0x65, + 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, + 0x31, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x63, 0x75, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, + 0x18, 0x32, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x63, 0x75, 0x74, 0x6f, 0x76, 0x65, 0x72, 0x41, + 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x69, 0x73, 0x5f, 0x69, 0x6d, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x33, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x69, 0x73, 0x49, 0x6d, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, + 0x0b, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x34, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x52, 0x0a, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3d, 0x0a, 0x14, + 0x72, 0x65, 0x61, 0x64, 0x79, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x61, 0x74, 0x18, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x11, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, + 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x19, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x36, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, + 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x53, 0x0a, 0x08, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x49, 0x54, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0a, + 0x0a, 0x06, 0x4f, 0x4e, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x48, + 0x4f, 0x53, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x54, 0x4f, 0x53, 0x43, 0x10, 0x02, + 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, + 0x4d, 0x59, 0x53, 0x51, 0x4c, 0x10, 0x04, 0x1a, 0x02, 0x10, 0x01, 0x22, 0x71, 0x0a, 0x06, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, + 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x02, + 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, + 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, + 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, + 0x10, 0x06, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x07, 0x22, 0x5e, + 0x0a, 0x05, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x87, + 0x01, 0x0a, 0x0f, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x65, 0x6e, 0x61, 0x6e, 0x74, 0x49, 0x64, 0x12, + 0x3f, 0x0a, 0x1c, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x65, 0x64, + 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x74, 0x72, 0x69, 0x70, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x65, 0x64, 0x41, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0xcf, 0x11, 0x0a, 0x08, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, 0x61, - 0x78, 0x5f, 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6c, 0x61, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x56, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x4a, 0x0a, - 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, - 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x6d, 0x61, - 0x78, 0x5f, 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x6d, 0x61, 0x78, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, - 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x60, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x49, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x46, 0x0a, 0x0f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, - 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, - 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, - 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x1a, - 0xe7, 0x08, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, - 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, - 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, - 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, - 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x14, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x45, 0x0a, 0x0b, - 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x43, 0x6f, - 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4c, 0x6f, - 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, - 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, - 0x65, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, - 0x70, 0x69, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, - 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0x3a, 0x0a, 0x09, 0x43, - 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, - 0x0a, 0x07, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x6b, 0x1a, 0xe6, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, - 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, - 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x1a, 0x77, 0x0a, 0x0f, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, - 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, - 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, 0x65, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0x59, 0x0a, 0x12, 0x41, 0x64, 0x64, - 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x40, 0x0a, 0x14, 0x41, - 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x17, 0x0a, - 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x6d, + 0x61, 0x78, 0x5f, 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6c, 0x61, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x56, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x4a, + 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x2a, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x75, 0x62, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x48, 0x0a, 0x21, 0x6d, + 0x61, 0x78, 0x5f, 0x76, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1d, 0x6d, 0x61, 0x78, 0x56, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x60, 0x0a, + 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x49, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0xb9, 0x01, 0x0a, 0x0b, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x12, 0x46, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x1a, 0xc1, 0x0a, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x3d, 0x0a, 0x0d, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x69, 0x6e, 0x6c, 0x6f, + 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x70, 0x50, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, + 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x52, 0x14, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, + 0x63, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x04, 0x6c, 0x6f, + 0x67, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x46, 0x65, 0x74, 0x63, + 0x68, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0f, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, + 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x55, 0x0a, 0x10, 0x74, + 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x0f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x1a, + 0x57, 0x0a, 0x09, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x6b, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x73, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x1a, 0xe6, 0x01, 0x0a, 0x03, 0x4c, 0x6f, 0x67, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x64, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x2b, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, + 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x1a, 0x77, 0x0a, 0x0f, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x54, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x33, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x74, 0x69, 0x6d, + 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x22, 0x59, 0x0a, 0x12, 0x41, 0x64, + 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x40, 0x0a, 0x14, + 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x17, + 0x0a, 0x15, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xbf, 0x01, 0x0a, 0x20, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x16, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x14, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x78, 0x0a, 0x21, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, + 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x14, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, 0x18, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, + 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x1d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x41, 0x70, 0x70, 0x6c, 0x79, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb3, 0x01, 0x0a, 0x1d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, - 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, - 0x11, 0x73, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, - 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x41, 0x70, - 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xce, 0x02, 0x0a, - 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, - 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x64, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x64, 0x6c, 0x53, 0x74, 0x72, 0x61, - 0x74, 0x65, 0x67, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, - 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, - 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x44, - 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, - 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xe8, 0x01, - 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, - 0x73, 0x74, 0x12, 0x6c, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, - 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, - 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, - 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc3, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, - 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, - 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x2c, - 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, 0x03, - 0x73, 0x71, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x22, 0x44, - 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, 0x65, 0x22, 0xa2, 0x01, 0x0a, - 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, - 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x22, 0xe2, 0x01, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, - 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, - 0x53, 0x61, 0x66, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, - 0x72, 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x22, 0x4e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xdf, 0x01, 0x0a, 0x1d, 0x43, 0x61, 0x6e, 0x63, 0x65, - 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, - 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, - 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, - 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, - 0x0a, 0x07, 0x64, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x64, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, - 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, - 0x74, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x52, 0x0b, 0x61, 0x66, 0x74, 0x65, 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, - 0x1e, 0x0a, 0x0b, 0x77, 0x61, 0x73, 0x5f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x77, 0x61, 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, - 0x4f, 0x0a, 0x1d, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x6c, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xce, 0x02, 0x0a, 0x12, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, - 0x22, 0xe1, 0x01, 0x0a, 0x1e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, + 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x21, + 0x0a, 0x0c, 0x64, 0x64, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x64, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, + 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2b, + 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x44, 0x0a, 0x15, 0x77, + 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, + 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x12, 0x2c, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, + 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0xe8, 0x01, 0x0a, 0x13, 0x41, + 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x75, 0x69, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x75, 0x75, 0x69, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x12, + 0x6c, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, + 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdb, 0x01, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6b, 0x69, + 0x70, 0x5f, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0b, 0x73, 0x6b, 0x69, 0x70, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, + 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x76, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x72, + 0x69, 0x63, 0x74, 0x22, 0xca, 0x02, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, + 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x15, 0x75, 0x6e, + 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, + 0x77, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x13, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x56, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x71, 0x0a, 0x18, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x23, 0x0a, 0x09, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x63, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x12, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, + 0x6f, 0x6d, 0x50, 0x6f, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x0e, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0xe2, 0x01, + 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, + 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x21, 0x0a, + 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x73, 0x61, 0x66, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x53, 0x61, 0x66, 0x65, + 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, + 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x72, 0x6f, 0x6d, 0x50, + 0x6f, 0x73, 0x22, 0x4e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, + 0x69, 0x64, 0x22, 0xdf, 0x01, 0x0a, 0x1d, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, @@ -15994,52 +16702,88 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x1e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xe3, 0x01, 0x0a, 0x1f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x16, 0x72, 0x6f, - 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9b, 0x01, 0x0a, 0x17, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x64, 0x62, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x06, 0x64, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, + 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, + 0x75, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x18, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x35, 0x0a, 0x0d, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0c, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x33, 0x0a, 0x0c, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x0b, + 0x61, 0x66, 0x74, 0x65, 0x72, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1e, 0x0a, 0x0b, 0x77, + 0x61, 0x73, 0x5f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x77, 0x61, 0x73, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x4f, 0x0a, 0x1d, 0x43, + 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xe1, 0x01, 0x0a, + 0x1e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x77, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x42, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, + 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, + 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x50, 0x0a, 0x1e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, + 0x69, 0x64, 0x22, 0xe3, 0x01, 0x0a, 0x1f, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x03, 0x0a, - 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, - 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, - 0x12, 0x2f, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, - 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x40, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, - 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, - 0x6f, 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x16, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x31, 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x5f, - 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, - 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x44, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x49, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, + 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, + 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdd, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x2f, 0x0a, 0x14, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x5f, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2a, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x73, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x31, + 0x0a, 0x0d, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x52, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, + 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x26, + 0x0a, 0x0f, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x5f, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, + 0x44, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0x49, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, @@ -16192,562 +16936,639 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x3c, 0x0a, 0x1e, 0x46, - 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, - 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, - 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, - 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, - 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x47, - 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a, 0x12, 0x47, - 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x73, 0x22, 0x28, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, - 0x6e, 0x66, 0x6f, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, - 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, - 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, - 0x73, 0x1a, 0x50, 0x0a, 0x0c, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, - 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x50, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x0a, 0x68, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xd4, 0x01, 0x0a, 0x1d, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, 0x65, 0x74, 0x63, + 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, + 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x71, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x71, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x61, 0x78, + 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x6d, 0x61, 0x78, + 0x52, 0x6f, 0x77, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x22, 0x4e, 0x0a, 0x1e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x22, 0xbe, 0x01, 0x0a, 0x1f, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x1a, 0x4b, 0x0a, 0x0b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x54, 0x0a, 0x22, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x75, 0x74, 0x4f, 0x76, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xeb, 0x01, 0x0a, 0x23, 0x46, 0x6f, 0x72, 0x63, + 0x65, 0x43, 0x75, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x7c, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x47, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x6f, 0x72, 0x63, + 0x65, 0x43, 0x75, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, + 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, + 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x07, + 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6d, 0x79, 0x73, 0x71, 0x6c, 0x63, 0x74, 0x6c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x22, 0x28, 0x0a, 0x12, + 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, + 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x19, + 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x18, 0x47, 0x65, 0x74, + 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, + 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, + 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x49, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x1a, 0x50, 0x0a, 0x0c, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x50, + 0x0a, 0x14, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6c, 0x6c, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x15, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, + 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, - 0x51, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, - 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x18, - 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, - 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, - 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, - 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, - 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, - 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x10, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x6e, - 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x22, 0xb8, 0x02, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, - 0x75, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, - 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x28, 0x0a, 0x06, 0x72, - 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x72, - 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x05, - 0x6f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, - 0x6b, 0x69, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x22, - 0x59, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, - 0x0a, 0x0a, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, - 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x22, 0x30, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x22, 0x46, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x51, 0x0a, 0x15, 0x47, 0x65, + 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x5a, 0x0a, + 0x16, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x70, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x47, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x76, 0x0a, 0x1f, 0x47, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, + 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, + 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x14, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x55, 0x0a, + 0x17, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x22, 0xb0, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, + 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x4f, 0x6e, 0x6c, + 0x79, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x73, + 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x73, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x50, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xb8, 0x02, 0x0a, 0x1a, 0x47, 0x65, + 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x6d, 0x69, 0x67, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x10, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x28, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x05, 0x6f, 0x72, + 0x64, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x52, 0x05, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x73, 0x6b, 0x69, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, + 0x73, 0x6b, 0x69, 0x70, 0x22, 0x59, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0a, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x64, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x22, 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, 0x75, - 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, - 0x32, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, - 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, - 0x6c, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, 0x0a, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x4c, - 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, 0x74, - 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, 0x76, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, - 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x11, - 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x72, - 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf8, 0x02, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, - 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x69, 0x73, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x69, 0x73, 0x61, - 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x74, 0x12, 0x2d, - 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x2f, 0x0a, - 0x14, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x3f, - 0x0a, 0x0d, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x70, 0x70, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, 0x6c, - 0x65, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x22, - 0x1f, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, - 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, 0x15, - 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x2d, 0x0a, 0x15, - 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x16, - 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x53, - 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x02, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7d, 0x0a, 0x19, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x79, 0x5f, 0x63, 0x65, + 0x6c, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x79, + 0x43, 0x65, 0x6c, 0x6c, 0x1a, 0x65, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0f, 0x47, + 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3a, 0x0a, 0x10, 0x47, 0x65, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x22, 0x6a, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x13, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x72, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x11, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x22, 0x32, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x69, 0x0a, + 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x20, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x4a, 0x0a, 0x16, 0x47, 0x65, + 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x72, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x59, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x72, + 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0c, 0x73, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x1a, 0x56, 0x0a, + 0x11, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, - 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf8, 0x02, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, - 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x69, 0x73, + 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, + 0x6c, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x74, 0x12, + 0x2d, 0x0a, 0x13, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x73, 0x65, 0x6c, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x6c, 0x66, 0x12, 0x2f, + 0x0a, 0x14, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x61, 0x73, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x41, 0x73, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x3f, 0x0a, 0x0d, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x61, 0x70, 0x70, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, 0x52, 0x75, + 0x6c, 0x65, 0x52, 0x0c, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x41, 0x70, 0x70, + 0x22, 0x1f, 0x0a, 0x1d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x2a, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x4e, 0x0a, + 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x0c, 0x73, 0x72, 0x76, 0x5f, 0x76, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x0a, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x2d, 0x0a, + 0x15, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0xc5, 0x01, 0x0a, + 0x16, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x0d, 0x73, 0x72, 0x76, 0x5f, 0x76, + 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, + 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0b, 0x73, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, + 0x53, 0x0a, 0x10, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x53, + 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x12, 0x47, - 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2c, 0x0a, - 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x46, 0x0a, 0x17, 0x47, - 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x04, 0x63, - 0x65, 0x6c, 0x6c, 0x22, 0x66, 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, - 0x65, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x2f, 0x0a, 0x11, 0x47, - 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4d, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x2e, 0x0a, 0x12, 0x47, - 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x12, 0x47, - 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, - 0xae, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, - 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, 0x6c, - 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x21, 0x0a, - 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4c, 0x6f, 0x67, 0x73, - 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x17, - 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x22, 0x3d, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, 0x70, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6c, 0x65, - 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, - 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, - 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, 0x49, 0x6e, 0x69, - 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x4e, 0x0a, - 0x1c, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xdf, 0x01, - 0x0a, 0x1d, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x76, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x41, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x75, 0x6e, - 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, - 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xff, 0x02, 0x0a, 0x19, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x76, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x06, - 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x42, 0x0a, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, - 0x75, 0x65, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x77, 0x69, - 0x74, 0x68, 0x5f, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, - 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, - 0x79, 0x57, 0x69, 0x74, 0x68, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, - 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x77, 0x0a, 0x1e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x06, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x12, + 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x2c, + 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, + 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x46, 0x0a, 0x17, + 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x04, + 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x66, 0x0a, 0x0c, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, + 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, 0x2f, 0x0a, 0x11, + 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4c, 0x0a, 0x1f, 0x4c, 0x6f, 0x6f, 0x6b, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4d, 0x0a, + 0x11, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x2e, 0x0a, 0x12, + 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x12, + 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x08, 0x76, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x07, 0x76, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x22, 0xc6, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, + 0x6e, 0x6c, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, + 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f, 0x6e, + 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x4f, 0x6e, + 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x21, + 0x0a, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4c, 0x6f, 0x67, + 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0x49, 0x0a, 0x14, 0x47, 0x65, 0x74, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x31, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x17, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x12, 0x52, 0x0a, 0x1a, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x17, 0x70, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x45, 0x6c, 0x65, 0x63, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x44, 0x0a, 0x15, + 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, + 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x22, 0x42, 0x0a, 0x18, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, + 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x4e, 0x0a, 0x1c, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xdf, 0x01, 0x0a, 0x1d, 0x4c, 0x61, 0x75, 0x6e, 0x63, + 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, + 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, + 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, + 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, 0x02, 0x0a, 0x19, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, + 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x06, 0x76, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x42, 0x0a, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x5f, 0x61, 0x66, 0x74, 0x65, + 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x6f, 0x77, 0x6e, 0x65, + 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, + 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x57, 0x69, 0x74, 0x68, 0x4f, 0x77, + 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, + 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x4c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x1e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x56, 0x0a, 0x18, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x74, - 0x69, 0x6e, 0x67, 0x73, 0x52, 0x08, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x1b, - 0x0a, 0x19, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdd, 0x05, 0x0a, 0x14, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, - 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, - 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x26, 0x0a, 0x0f, - 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, - 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x66, 0x6f, 0x72, - 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x73, - 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, - 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, - 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x6f, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, - 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6e, 0x6f, 0x52, - 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x16, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x22, 0x4c, 0x0a, 0x1f, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, + 0x56, 0x0a, 0x18, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x73, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x08, 0x73, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x1b, 0x0a, 0x19, 0x4d, 0x61, 0x74, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdd, 0x05, 0x0a, 0x14, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6b, - 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, - 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, - 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, - 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, - 0x79, 0x52, 0x75, 0x6e, 0x22, 0x5b, 0x0a, 0x17, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, - 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x22, 0x85, 0x01, 0x0a, 0x14, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, - 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, - 0x70, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, - 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, - 0x6f, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x4d, 0x6f, 0x75, - 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x19, 0x0a, 0x17, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x0a, 0x10, 0x4d, - 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0x82, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, - 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, - 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, - 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x70, - 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, - 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, - 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x4d, 0x6f, 0x75, 0x6e, - 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x29, 0x0a, 0x11, - 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xbb, 0x06, 0x0a, 0x17, 0x4d, 0x6f, 0x76, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, - 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, - 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, - 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, - 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x23, - 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x6c, 0x6c, 0x5f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x6c, 0x6c, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x15, - 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, - 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, - 0x79, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x46, 0x6f, + 0x79, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, - 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x6f, - 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x12, + 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6e, 0x6f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x5f, 0x63, - 0x6f, 0x70, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x74, 0x6f, 0x6d, 0x69, - 0x63, 0x43, 0x6f, 0x70, 0x79, 0x22, 0xd5, 0x01, 0x0a, 0x18, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, + 0x75, 0x6c, 0x65, 0x73, 0x22, 0xe6, 0x01, 0x0a, 0x16, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, + 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x2c, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, + 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x5b, 0x0a, + 0x17, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x72, 0x79, + 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x85, 0x01, 0x0a, 0x14, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x0a, 0x10, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, + 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x82, 0x01, 0x0a, + 0x11, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x6f, 0x70, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x70, 0x6f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x70, 0x6f, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x12, 0x0a, 0x10, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x22, 0x82, 0x07, 0x0a, 0x17, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, + 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, + 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, + 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x61, 0x6c, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x61, 0x6c, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x09, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, + 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, + 0x6c, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x26, + 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, + 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, + 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x66, + 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x46, 0x6f, 0x72, 0x65, 0x69, 0x67, 0x6e, 0x4b, 0x65, + 0x79, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, + 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x6e, 0x6f, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x6e, + 0x6f, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x61, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x45, + 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x18, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x48, 0x0a, 0x07, @@ -16760,7 +17581,7 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0xe9, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x81, 0x02, 0x0a, 0x19, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, @@ -16775,388 +17596,322 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x22, 0x5e, 0x0a, 0x1a, 0x4d, 0x6f, 0x76, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, - 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, - 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x89, - 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, - 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, - 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, - 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, - 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, - 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, - 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, + 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x73, 0x22, 0x5e, 0x0a, 0x1a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, + 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x22, 0x4d, 0x0a, 0x11, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x22, 0x14, 0x0a, 0x12, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd7, 0x02, 0x0a, 0x1b, 0x50, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, - 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, - 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, - 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, - 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, - 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, - 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, - 0x83, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, - 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, - 0x17, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, - 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, - 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, - 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, - 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, - 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, - 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, - 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, + 0x12, 0x3a, 0x0a, 0x0d, 0x61, 0x76, 0x6f, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0c, + 0x61, 0x76, 0x6f, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x44, 0x0a, 0x15, + 0x77, 0x61, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, + 0x61, 0x69, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x4c, 0x0a, 0x19, 0x74, 0x6f, 0x6c, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x74, 0x6f, 0x6c, 0x65, 0x72, 0x61, 0x62, + 0x6c, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, + 0x22, 0xba, 0x01, 0x0a, 0x1c, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x40, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x5f, + 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x65, 0x64, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, + 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, + 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, + 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, + 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x64, 0x0a, 0x1a, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, + 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, - 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, + 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x69, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, 0x72, + 0x65, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x72, + 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x66, + 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x52, + 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x16, 0x0a, 0x14, + 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, + 0x22, 0x46, 0x0a, 0x1c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbc, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6c, + 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, - 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, - 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x8f, 0x04, 0x0a, 0x14, 0x52, 0x65, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, + 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x77, 0x61, 0x69, 0x74, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x22, 0x43, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x6f, 0x61, + 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x5b, 0x0a, 0x13, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x7f, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, + 0x6c, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, + 0x6f, 0x72, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, + 0x76, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x9b, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x19, + 0x0a, 0x17, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, + 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x46, 0x0a, 0x15, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x22, 0x7b, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, + 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x8f, + 0x04, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, + 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x6f, 0x70, 0x79, + 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, + 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, + 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, + 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, + 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, + 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x22, 0x82, 0x02, 0x0a, 0x18, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, + 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, + 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, + 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xad, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x23, - 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, - 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, - 0x63, 0x6f, 0x70, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x6b, 0x69, 0x70, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6e, - 0x5f, 0x64, 0x64, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x6e, 0x44, 0x64, - 0x6c, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x74, 0x6f, 0x70, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6f, 0x70, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x70, - 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x66, - 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x5f, 0x6b, 0x65, 0x79, - 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x64, 0x65, 0x66, 0x65, 0x72, 0x53, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x61, - 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x09, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x22, 0x82, 0x02, 0x0a, 0x18, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x2d, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x54, 0x69, 0x6d, 0x65, - 0x12, 0x24, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, - 0x6f, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x54, 0x6f, 0x50, 0x6f, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, - 0x3e, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, - 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x12, 0x72, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, - 0xad, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x1b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x75, 0x75, 0x69, 0x64, 0x22, 0xdd, 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, + 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, + 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x51, 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x24, 0x0a, 0x05, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6c, 0x6f, 0x67, 0x75, 0x74, - 0x69, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, - 0x4d, 0x0a, 0x1b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0xdd, - 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x75, 0x0a, 0x16, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, - 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x40, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, - 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x72, 0x6f, 0x77, 0x73, 0x41, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, - 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x46, 0x0a, 0x18, 0x52, 0x6f, 0x77, 0x73, 0x41, 0x66, - 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x51, - 0x0a, 0x15, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x22, 0x53, - 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, - 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x55, 0x0a, 0x23, 0x53, 0x65, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x22, 0xc8, 0x01, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x35, - 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x4f, 0x0a, 0x1d, - 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, - 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, - 0x1e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, - 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51, 0x0a, - 0x1f, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x22, 0x72, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x6e, 0x67, 0x22, 0x49, 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, - 0x8e, 0x02, 0x0a, 0x1c, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, - 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x22, 0x46, 0x0a, 0x1d, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, - 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, - 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1a, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x52, 0x75, 0x6e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x6d, 0x0a, 0x22, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x64, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x55, 0x0a, 0x23, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, + 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x5e, 0x0a, 0x1e, 0x53, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, + 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x51, 0x0a, 0x1f, 0x53, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, + 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x72, 0x0a, 0x1f, 0x53, 0x65, + 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x22, 0x49, + 0x0a, 0x20, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, + 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x8e, 0x02, 0x0a, 0x1c, 0x53, 0x65, + 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x0a, 0x0c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, - 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, - 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x8b, 0x01, 0x0a, 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x35, 0x0a, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, + 0x69, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x32, + 0x0a, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, + 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x22, 0x46, 0x0a, 0x1d, 0x53, 0x65, + 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x22, 0x6a, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x15, + 0x0a, 0x13, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -17164,135 +17919,220 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x12, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x22, 0x1d, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x62, 0x0a, 0x1a, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x63, 0x65, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, + 0x65, 0x6c, 0x6c, 0x22, 0x54, 0x0a, 0x1b, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x35, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x54, 0x0a, 0x20, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, + 0xaa, 0x03, 0x0a, 0x21, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, + 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x1a, 0x5f, 0x0a, 0x18, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4e, 0x0a, 0x0e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x26, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8b, 0x01, 0x0a, + 0x1d, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, + 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x12, + 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, + 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x2c, 0x0a, 0x08, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x6c, + 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xf0, 0x01, 0x0a, 0x15, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, + 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, + 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, + 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x2f, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x16, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x5e, 0x0a, 0x18, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, - 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, - 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, - 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, - 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, - 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, - 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, - 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, - 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, - 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, - 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0x42, 0x0a, 0x19, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x25, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x53, 0x0a, 0x17, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x1a, + 0x0a, 0x18, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x16, 0x53, 0x74, + 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x61, + 0x6c, 0x69, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, + 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x19, + 0x0a, 0x17, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x52, 0x0a, 0x21, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, + 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x22, 0xc6, 0x01, + 0x0a, 0x22, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x0a, 0x0b, 0x6e, 0x65, 0x77, 0x5f, 0x70, 0x72, + 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, + 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x0a, 0x6e, 0x65, 0x77, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, + 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x6f, 0x6c, 0x64, 0x50, + 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x5c, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x5d, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, + 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x65, 0x6c, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x63, 0x65, 0x6c, 0x6c, 0x49, + 0x6e, 0x66, 0x6f, 0x22, 0x64, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, + 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x22, 0x65, 0x0a, 0x18, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x0b, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x0a, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x22, 0x34, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfb, 0x01, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x62, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x5f, 0x62, 0x79, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, + 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x1a, 0x69, 0x0a, 0x16, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x58, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x70, + 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0xfc, + 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x61, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x37, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, + 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, + 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, + 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, + 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x88, 0x02, 0x0a, 0x1e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, @@ -17302,364 +18142,345 @@ var file_vtctldata_proto_rawDesc = []byte{ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xd8, 0x01, 0x0a, 0x1d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, + 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, + 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x12, 0x26, 0x0a, - 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x50, 0x72, - 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x88, - 0x02, 0x0a, 0x1e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x10, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, - 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x14, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x22, 0x31, 0x0a, 0x15, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x3c, 0x0a, 0x1e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, - 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, - 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x25, - 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x17, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, - 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x06, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, - 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x08, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x12, 0x55, 0x0a, 0x1e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1b, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x09, 0x6f, 0x6e, - 0x6c, 0x79, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, - 0x6e, 0x6c, 0x79, 0x50, 0x4b, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, - 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, - 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x77, 0x61, 0x69, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x77, 0x61, - 0x69, 0x74, 0x12, 0x42, 0x0a, 0x14, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x12, 0x77, 0x61, 0x69, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, - 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, - 0x65, 0x74, 0x72, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, - 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x22, - 0x29, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x22, 0x6b, 0x0a, 0x12, 0x56, 0x44, - 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, 0x67, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x61, 0x72, 0x67, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, - 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x15, 0x0a, - 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x69, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, - 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, + 0x68, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, + 0x61, 0x72, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, 0x52, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4f, + 0x0a, 0x1b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, + 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x68, 0x61, 0x72, 0x64, 0x22, + 0x38, 0x0a, 0x1c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x98, 0x01, 0x0a, 0x16, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x63, 0x6c, + 0x75, 0x64, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, + 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x56, + 0x69, 0x65, 0x77, 0x73, 0x22, 0xfa, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x1a, 0x63, 0x0a, 0x13, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x88, 0x07, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x61, 0x72, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x72, 0x67, 0x22, - 0xd7, 0x01, 0x0a, 0x11, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, - 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x73, 0x1a, 0x64, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x10, 0x56, 0x44, 0x69, - 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, + 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x65, 0x6c, 0x6c, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, + 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, + 0x12, 0x6c, 0x0a, 0x1b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x19, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x55, 0x0a, 0x1e, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1b, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x69, 0x74, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x09, 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x70, 0x5f, 0x6b, + 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x6e, 0x6c, 0x79, 0x50, 0x4b, 0x73, + 0x12, 0x2c, 0x0a, 0x12, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x38, + 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x6f, 0x77, 0x73, + 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x77, 0x73, 0x54, + 0x6f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x77, 0x61, 0x69, 0x74, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x77, 0x61, 0x69, 0x74, 0x12, 0x42, 0x0a, 0x14, + 0x77, 0x61, 0x69, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, + 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x77, 0x61, + 0x69, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x74, 0x72, 0x79, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x78, + 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x72, + 0x6f, 0x77, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x52, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x6f, 0x77, 0x73, 0x12, 0x3c, + 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x69, 0x66, 0x66, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6d, 0x61, 0x78, + 0x44, 0x69, 0x66, 0x66, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x29, 0x0a, 0x13, + 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x22, 0x6b, 0x0a, 0x12, 0x56, 0x44, 0x69, 0x66, 0x66, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, - 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x15, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x0a, - 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, 0x12, 0x6b, 0x65, - 0x65, 0x70, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, - 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x4f, 0x0a, 0x15, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0xe6, 0x07, - 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x66, - 0x66, 0x69, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xe8, 0x01, 0x0a, 0x0e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, - 0x6f, 0x77, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0a, 0x72, 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x6f, 0x77, 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x09, 0x72, 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, - 0x6f, 0x77, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x72, 0x6f, 0x77, 0x73, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, - 0x74, 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, - 0x70, 0x69, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x02, 0x52, 0x0f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, - 0x61, 0x67, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x69, 0x6e, - 0x66, 0x6f, 0x1a, 0x5c, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, - 0x6d, 0x73, 0x12, 0x4c, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, - 0x1a, 0x73, 0x0a, 0x13, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x6f, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x44, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7, 0x03, 0x0a, 0x1c, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, - 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, - 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x0b, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x4f, - 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6c, 0x61, 0x67, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, - 0x3c, 0x0a, 0x1a, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, - 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x18, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x76, 0x65, 0x72, - 0x73, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, - 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x09, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, - 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, - 0x75, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, - 0x12, 0x3e, 0x0a, 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, - 0x22, 0xa7, 0x01, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, - 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x72, 0x79, - 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x90, 0x01, 0x0a, 0x15, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x5b, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xd1, 0x01, - 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x61, 0x72, 0x67, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6d, 0x0a, 0x12, 0x56, + 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, + 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x69, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x72, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x61, 0x72, 0x67, 0x22, 0xd7, 0x01, 0x0a, + 0x11, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, + 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, + 0x1a, 0x64, 0x0a, 0x14, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6b, 0x0a, 0x10, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb2, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, + 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, + 0x65, 0x70, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, + 0x65, 0x65, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2c, 0x0a, 0x12, 0x6b, 0x65, 0x65, 0x70, 0x5f, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0xd1, 0x01, + 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x64, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x55, - 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x4f, 0x56, 0x45, 0x54, 0x41, - 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, - 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x10, 0x02, 0x2a, 0x38, 0x0a, - 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x08, - 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x53, 0x43, 0x45, - 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x53, 0x43, 0x45, - 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, 0x76, 0x69, 0x74, 0x65, 0x73, - 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, - 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x22, 0x67, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0xe6, 0x07, 0x0a, 0x16, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, + 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x58, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xe8, 0x01, 0x0a, 0x0e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, + 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x6f, 0x77, 0x73, + 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, + 0x6f, 0x77, 0x73, 0x43, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x6f, 0x77, + 0x73, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, + 0x6f, 0x77, 0x73, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x6f, 0x77, 0x73, + 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x02, 0x52, 0x0e, 0x72, 0x6f, 0x77, 0x73, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x70, 0x69, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, + 0x70, 0x69, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x70, + 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x02, 0x52, + 0x0f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x61, 0x67, 0x65, + 0x1a, 0xbc, 0x01, 0x0a, 0x10, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2d, 0x0a, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x1a, + 0x5c, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, + 0x4c, 0x0a, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x07, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0x73, 0x0a, + 0x13, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, + 0x70, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x6f, 0x0a, 0x11, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x44, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0xef, 0x03, 0x0a, 0x1c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, + 0x6c, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x74, 0x6f, 0x70, 0x6f, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x1b, 0x6d, + 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, + 0x61, 0x67, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x61, 0x67, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x18, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x64, 0x72, 0x79, 0x5f, 0x72, 0x75, 0x6e, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x3e, 0x0a, + 0x1b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x19, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x68, 0x61, 0x72, 0x64, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x1d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x72, 0x79, 0x5f, 0x72, + 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, + 0x90, 0x01, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x5f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x46, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, + 0x55, 0x0a, 0x0a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2d, 0x0a, + 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x6f, 0x70, 0x6f, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x2a, 0x4a, 0x0a, 0x15, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, + 0x0a, 0x0a, 0x06, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, + 0x4f, 0x56, 0x45, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x49, 0x4e, 0x44, 0x45, 0x58, + 0x10, 0x02, 0x2a, 0x38, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x72, 0x64, 0x65, 0x72, + 0x69, 0x6e, 0x67, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x41, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x44, 0x45, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x42, 0x28, 0x5a, 0x26, + 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -17675,7 +18496,7 @@ func file_vtctldata_proto_rawDescGZIP() []byte { } var file_vtctldata_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 267) +var file_vtctldata_proto_msgTypes = make([]protoimpl.MessageInfo, 280) var file_vtctldata_proto_goTypes = []interface{}{ (MaterializationIntent)(0), // 0: vtctldata.MaterializationIntent (QueryOrdering)(0), // 1: vtctldata.QueryOrdering @@ -17688,514 +18509,542 @@ var file_vtctldata_proto_goTypes = []interface{}{ (*Keyspace)(nil), // 8: vtctldata.Keyspace (*SchemaMigration)(nil), // 9: vtctldata.SchemaMigration (*Shard)(nil), // 10: vtctldata.Shard - (*Workflow)(nil), // 11: vtctldata.Workflow - (*AddCellInfoRequest)(nil), // 12: vtctldata.AddCellInfoRequest - (*AddCellInfoResponse)(nil), // 13: vtctldata.AddCellInfoResponse - (*AddCellsAliasRequest)(nil), // 14: vtctldata.AddCellsAliasRequest - (*AddCellsAliasResponse)(nil), // 15: vtctldata.AddCellsAliasResponse - (*ApplyRoutingRulesRequest)(nil), // 16: vtctldata.ApplyRoutingRulesRequest - (*ApplyRoutingRulesResponse)(nil), // 17: vtctldata.ApplyRoutingRulesResponse - (*ApplyShardRoutingRulesRequest)(nil), // 18: vtctldata.ApplyShardRoutingRulesRequest - (*ApplyShardRoutingRulesResponse)(nil), // 19: vtctldata.ApplyShardRoutingRulesResponse - (*ApplySchemaRequest)(nil), // 20: vtctldata.ApplySchemaRequest - (*ApplySchemaResponse)(nil), // 21: vtctldata.ApplySchemaResponse - (*ApplyVSchemaRequest)(nil), // 22: vtctldata.ApplyVSchemaRequest - (*ApplyVSchemaResponse)(nil), // 23: vtctldata.ApplyVSchemaResponse - (*BackupRequest)(nil), // 24: vtctldata.BackupRequest - (*BackupResponse)(nil), // 25: vtctldata.BackupResponse - (*BackupShardRequest)(nil), // 26: vtctldata.BackupShardRequest - (*CancelSchemaMigrationRequest)(nil), // 27: vtctldata.CancelSchemaMigrationRequest - (*CancelSchemaMigrationResponse)(nil), // 28: vtctldata.CancelSchemaMigrationResponse - (*ChangeTabletTypeRequest)(nil), // 29: vtctldata.ChangeTabletTypeRequest - (*ChangeTabletTypeResponse)(nil), // 30: vtctldata.ChangeTabletTypeResponse - (*CleanupSchemaMigrationRequest)(nil), // 31: vtctldata.CleanupSchemaMigrationRequest - (*CleanupSchemaMigrationResponse)(nil), // 32: vtctldata.CleanupSchemaMigrationResponse - (*CompleteSchemaMigrationRequest)(nil), // 33: vtctldata.CompleteSchemaMigrationRequest - (*CompleteSchemaMigrationResponse)(nil), // 34: vtctldata.CompleteSchemaMigrationResponse - (*CreateKeyspaceRequest)(nil), // 35: vtctldata.CreateKeyspaceRequest - (*CreateKeyspaceResponse)(nil), // 36: vtctldata.CreateKeyspaceResponse - (*CreateShardRequest)(nil), // 37: vtctldata.CreateShardRequest - (*CreateShardResponse)(nil), // 38: vtctldata.CreateShardResponse - (*DeleteCellInfoRequest)(nil), // 39: vtctldata.DeleteCellInfoRequest - (*DeleteCellInfoResponse)(nil), // 40: vtctldata.DeleteCellInfoResponse - (*DeleteCellsAliasRequest)(nil), // 41: vtctldata.DeleteCellsAliasRequest - (*DeleteCellsAliasResponse)(nil), // 42: vtctldata.DeleteCellsAliasResponse - (*DeleteKeyspaceRequest)(nil), // 43: vtctldata.DeleteKeyspaceRequest - (*DeleteKeyspaceResponse)(nil), // 44: vtctldata.DeleteKeyspaceResponse - (*DeleteShardsRequest)(nil), // 45: vtctldata.DeleteShardsRequest - (*DeleteShardsResponse)(nil), // 46: vtctldata.DeleteShardsResponse - (*DeleteSrvVSchemaRequest)(nil), // 47: vtctldata.DeleteSrvVSchemaRequest - (*DeleteSrvVSchemaResponse)(nil), // 48: vtctldata.DeleteSrvVSchemaResponse - (*DeleteTabletsRequest)(nil), // 49: vtctldata.DeleteTabletsRequest - (*DeleteTabletsResponse)(nil), // 50: vtctldata.DeleteTabletsResponse - (*EmergencyReparentShardRequest)(nil), // 51: vtctldata.EmergencyReparentShardRequest - (*EmergencyReparentShardResponse)(nil), // 52: vtctldata.EmergencyReparentShardResponse - (*ExecuteFetchAsAppRequest)(nil), // 53: vtctldata.ExecuteFetchAsAppRequest - (*ExecuteFetchAsAppResponse)(nil), // 54: vtctldata.ExecuteFetchAsAppResponse - (*ExecuteFetchAsDBARequest)(nil), // 55: vtctldata.ExecuteFetchAsDBARequest - (*ExecuteFetchAsDBAResponse)(nil), // 56: vtctldata.ExecuteFetchAsDBAResponse - (*ExecuteHookRequest)(nil), // 57: vtctldata.ExecuteHookRequest - (*ExecuteHookResponse)(nil), // 58: vtctldata.ExecuteHookResponse - (*FindAllShardsInKeyspaceRequest)(nil), // 59: vtctldata.FindAllShardsInKeyspaceRequest - (*FindAllShardsInKeyspaceResponse)(nil), // 60: vtctldata.FindAllShardsInKeyspaceResponse - (*GetBackupsRequest)(nil), // 61: vtctldata.GetBackupsRequest - (*GetBackupsResponse)(nil), // 62: vtctldata.GetBackupsResponse - (*GetCellInfoRequest)(nil), // 63: vtctldata.GetCellInfoRequest - (*GetCellInfoResponse)(nil), // 64: vtctldata.GetCellInfoResponse - (*GetCellInfoNamesRequest)(nil), // 65: vtctldata.GetCellInfoNamesRequest - (*GetCellInfoNamesResponse)(nil), // 66: vtctldata.GetCellInfoNamesResponse - (*GetCellsAliasesRequest)(nil), // 67: vtctldata.GetCellsAliasesRequest - (*GetCellsAliasesResponse)(nil), // 68: vtctldata.GetCellsAliasesResponse - (*GetFullStatusRequest)(nil), // 69: vtctldata.GetFullStatusRequest - (*GetFullStatusResponse)(nil), // 70: vtctldata.GetFullStatusResponse - (*GetKeyspacesRequest)(nil), // 71: vtctldata.GetKeyspacesRequest - (*GetKeyspacesResponse)(nil), // 72: vtctldata.GetKeyspacesResponse - (*GetKeyspaceRequest)(nil), // 73: vtctldata.GetKeyspaceRequest - (*GetKeyspaceResponse)(nil), // 74: vtctldata.GetKeyspaceResponse - (*GetPermissionsRequest)(nil), // 75: vtctldata.GetPermissionsRequest - (*GetPermissionsResponse)(nil), // 76: vtctldata.GetPermissionsResponse - (*GetRoutingRulesRequest)(nil), // 77: vtctldata.GetRoutingRulesRequest - (*GetRoutingRulesResponse)(nil), // 78: vtctldata.GetRoutingRulesResponse - (*GetSchemaRequest)(nil), // 79: vtctldata.GetSchemaRequest - (*GetSchemaResponse)(nil), // 80: vtctldata.GetSchemaResponse - (*GetSchemaMigrationsRequest)(nil), // 81: vtctldata.GetSchemaMigrationsRequest - (*GetSchemaMigrationsResponse)(nil), // 82: vtctldata.GetSchemaMigrationsResponse - (*GetShardRequest)(nil), // 83: vtctldata.GetShardRequest - (*GetShardResponse)(nil), // 84: vtctldata.GetShardResponse - (*GetShardRoutingRulesRequest)(nil), // 85: vtctldata.GetShardRoutingRulesRequest - (*GetShardRoutingRulesResponse)(nil), // 86: vtctldata.GetShardRoutingRulesResponse - (*GetSrvKeyspaceNamesRequest)(nil), // 87: vtctldata.GetSrvKeyspaceNamesRequest - (*GetSrvKeyspaceNamesResponse)(nil), // 88: vtctldata.GetSrvKeyspaceNamesResponse - (*GetSrvKeyspacesRequest)(nil), // 89: vtctldata.GetSrvKeyspacesRequest - (*GetSrvKeyspacesResponse)(nil), // 90: vtctldata.GetSrvKeyspacesResponse - (*UpdateThrottlerConfigRequest)(nil), // 91: vtctldata.UpdateThrottlerConfigRequest - (*UpdateThrottlerConfigResponse)(nil), // 92: vtctldata.UpdateThrottlerConfigResponse - (*GetSrvVSchemaRequest)(nil), // 93: vtctldata.GetSrvVSchemaRequest - (*GetSrvVSchemaResponse)(nil), // 94: vtctldata.GetSrvVSchemaResponse - (*GetSrvVSchemasRequest)(nil), // 95: vtctldata.GetSrvVSchemasRequest - (*GetSrvVSchemasResponse)(nil), // 96: vtctldata.GetSrvVSchemasResponse - (*GetTabletRequest)(nil), // 97: vtctldata.GetTabletRequest - (*GetTabletResponse)(nil), // 98: vtctldata.GetTabletResponse - (*GetTabletsRequest)(nil), // 99: vtctldata.GetTabletsRequest - (*GetTabletsResponse)(nil), // 100: vtctldata.GetTabletsResponse - (*GetTopologyPathRequest)(nil), // 101: vtctldata.GetTopologyPathRequest - (*GetTopologyPathResponse)(nil), // 102: vtctldata.GetTopologyPathResponse - (*TopologyCell)(nil), // 103: vtctldata.TopologyCell - (*GetVSchemaRequest)(nil), // 104: vtctldata.GetVSchemaRequest - (*GetVersionRequest)(nil), // 105: vtctldata.GetVersionRequest - (*GetVersionResponse)(nil), // 106: vtctldata.GetVersionResponse - (*GetVSchemaResponse)(nil), // 107: vtctldata.GetVSchemaResponse - (*GetWorkflowsRequest)(nil), // 108: vtctldata.GetWorkflowsRequest - (*GetWorkflowsResponse)(nil), // 109: vtctldata.GetWorkflowsResponse - (*InitShardPrimaryRequest)(nil), // 110: vtctldata.InitShardPrimaryRequest - (*InitShardPrimaryResponse)(nil), // 111: vtctldata.InitShardPrimaryResponse - (*LaunchSchemaMigrationRequest)(nil), // 112: vtctldata.LaunchSchemaMigrationRequest - (*LaunchSchemaMigrationResponse)(nil), // 113: vtctldata.LaunchSchemaMigrationResponse - (*LookupVindexCreateRequest)(nil), // 114: vtctldata.LookupVindexCreateRequest - (*LookupVindexCreateResponse)(nil), // 115: vtctldata.LookupVindexCreateResponse - (*LookupVindexExternalizeRequest)(nil), // 116: vtctldata.LookupVindexExternalizeRequest - (*LookupVindexExternalizeResponse)(nil), // 117: vtctldata.LookupVindexExternalizeResponse - (*MaterializeCreateRequest)(nil), // 118: vtctldata.MaterializeCreateRequest - (*MaterializeCreateResponse)(nil), // 119: vtctldata.MaterializeCreateResponse - (*MigrateCreateRequest)(nil), // 120: vtctldata.MigrateCreateRequest - (*MigrateCompleteRequest)(nil), // 121: vtctldata.MigrateCompleteRequest - (*MigrateCompleteResponse)(nil), // 122: vtctldata.MigrateCompleteResponse - (*MountRegisterRequest)(nil), // 123: vtctldata.MountRegisterRequest - (*MountRegisterResponse)(nil), // 124: vtctldata.MountRegisterResponse - (*MountUnregisterRequest)(nil), // 125: vtctldata.MountUnregisterRequest - (*MountUnregisterResponse)(nil), // 126: vtctldata.MountUnregisterResponse - (*MountShowRequest)(nil), // 127: vtctldata.MountShowRequest - (*MountShowResponse)(nil), // 128: vtctldata.MountShowResponse - (*MountListRequest)(nil), // 129: vtctldata.MountListRequest - (*MountListResponse)(nil), // 130: vtctldata.MountListResponse - (*MoveTablesCreateRequest)(nil), // 131: vtctldata.MoveTablesCreateRequest - (*MoveTablesCreateResponse)(nil), // 132: vtctldata.MoveTablesCreateResponse - (*MoveTablesCompleteRequest)(nil), // 133: vtctldata.MoveTablesCompleteRequest - (*MoveTablesCompleteResponse)(nil), // 134: vtctldata.MoveTablesCompleteResponse - (*PingTabletRequest)(nil), // 135: vtctldata.PingTabletRequest - (*PingTabletResponse)(nil), // 136: vtctldata.PingTabletResponse - (*PlannedReparentShardRequest)(nil), // 137: vtctldata.PlannedReparentShardRequest - (*PlannedReparentShardResponse)(nil), // 138: vtctldata.PlannedReparentShardResponse - (*RebuildKeyspaceGraphRequest)(nil), // 139: vtctldata.RebuildKeyspaceGraphRequest - (*RebuildKeyspaceGraphResponse)(nil), // 140: vtctldata.RebuildKeyspaceGraphResponse - (*RebuildVSchemaGraphRequest)(nil), // 141: vtctldata.RebuildVSchemaGraphRequest - (*RebuildVSchemaGraphResponse)(nil), // 142: vtctldata.RebuildVSchemaGraphResponse - (*RefreshStateRequest)(nil), // 143: vtctldata.RefreshStateRequest - (*RefreshStateResponse)(nil), // 144: vtctldata.RefreshStateResponse - (*RefreshStateByShardRequest)(nil), // 145: vtctldata.RefreshStateByShardRequest - (*RefreshStateByShardResponse)(nil), // 146: vtctldata.RefreshStateByShardResponse - (*ReloadSchemaRequest)(nil), // 147: vtctldata.ReloadSchemaRequest - (*ReloadSchemaResponse)(nil), // 148: vtctldata.ReloadSchemaResponse - (*ReloadSchemaKeyspaceRequest)(nil), // 149: vtctldata.ReloadSchemaKeyspaceRequest - (*ReloadSchemaKeyspaceResponse)(nil), // 150: vtctldata.ReloadSchemaKeyspaceResponse - (*ReloadSchemaShardRequest)(nil), // 151: vtctldata.ReloadSchemaShardRequest - (*ReloadSchemaShardResponse)(nil), // 152: vtctldata.ReloadSchemaShardResponse - (*RemoveBackupRequest)(nil), // 153: vtctldata.RemoveBackupRequest - (*RemoveBackupResponse)(nil), // 154: vtctldata.RemoveBackupResponse - (*RemoveKeyspaceCellRequest)(nil), // 155: vtctldata.RemoveKeyspaceCellRequest - (*RemoveKeyspaceCellResponse)(nil), // 156: vtctldata.RemoveKeyspaceCellResponse - (*RemoveShardCellRequest)(nil), // 157: vtctldata.RemoveShardCellRequest - (*RemoveShardCellResponse)(nil), // 158: vtctldata.RemoveShardCellResponse - (*ReparentTabletRequest)(nil), // 159: vtctldata.ReparentTabletRequest - (*ReparentTabletResponse)(nil), // 160: vtctldata.ReparentTabletResponse - (*ReshardCreateRequest)(nil), // 161: vtctldata.ReshardCreateRequest - (*RestoreFromBackupRequest)(nil), // 162: vtctldata.RestoreFromBackupRequest - (*RestoreFromBackupResponse)(nil), // 163: vtctldata.RestoreFromBackupResponse - (*RetrySchemaMigrationRequest)(nil), // 164: vtctldata.RetrySchemaMigrationRequest - (*RetrySchemaMigrationResponse)(nil), // 165: vtctldata.RetrySchemaMigrationResponse - (*RunHealthCheckRequest)(nil), // 166: vtctldata.RunHealthCheckRequest - (*RunHealthCheckResponse)(nil), // 167: vtctldata.RunHealthCheckResponse - (*SetKeyspaceDurabilityPolicyRequest)(nil), // 168: vtctldata.SetKeyspaceDurabilityPolicyRequest - (*SetKeyspaceDurabilityPolicyResponse)(nil), // 169: vtctldata.SetKeyspaceDurabilityPolicyResponse - (*SetKeyspaceServedFromRequest)(nil), // 170: vtctldata.SetKeyspaceServedFromRequest - (*SetKeyspaceServedFromResponse)(nil), // 171: vtctldata.SetKeyspaceServedFromResponse - (*SetKeyspaceShardingInfoRequest)(nil), // 172: vtctldata.SetKeyspaceShardingInfoRequest - (*SetKeyspaceShardingInfoResponse)(nil), // 173: vtctldata.SetKeyspaceShardingInfoResponse - (*SetShardIsPrimaryServingRequest)(nil), // 174: vtctldata.SetShardIsPrimaryServingRequest - (*SetShardIsPrimaryServingResponse)(nil), // 175: vtctldata.SetShardIsPrimaryServingResponse - (*SetShardTabletControlRequest)(nil), // 176: vtctldata.SetShardTabletControlRequest - (*SetShardTabletControlResponse)(nil), // 177: vtctldata.SetShardTabletControlResponse - (*SetWritableRequest)(nil), // 178: vtctldata.SetWritableRequest - (*SetWritableResponse)(nil), // 179: vtctldata.SetWritableResponse - (*ShardReplicationAddRequest)(nil), // 180: vtctldata.ShardReplicationAddRequest - (*ShardReplicationAddResponse)(nil), // 181: vtctldata.ShardReplicationAddResponse - (*ShardReplicationFixRequest)(nil), // 182: vtctldata.ShardReplicationFixRequest - (*ShardReplicationFixResponse)(nil), // 183: vtctldata.ShardReplicationFixResponse - (*ShardReplicationPositionsRequest)(nil), // 184: vtctldata.ShardReplicationPositionsRequest - (*ShardReplicationPositionsResponse)(nil), // 185: vtctldata.ShardReplicationPositionsResponse - (*ShardReplicationRemoveRequest)(nil), // 186: vtctldata.ShardReplicationRemoveRequest - (*ShardReplicationRemoveResponse)(nil), // 187: vtctldata.ShardReplicationRemoveResponse - (*SleepTabletRequest)(nil), // 188: vtctldata.SleepTabletRequest - (*SleepTabletResponse)(nil), // 189: vtctldata.SleepTabletResponse - (*SourceShardAddRequest)(nil), // 190: vtctldata.SourceShardAddRequest - (*SourceShardAddResponse)(nil), // 191: vtctldata.SourceShardAddResponse - (*SourceShardDeleteRequest)(nil), // 192: vtctldata.SourceShardDeleteRequest - (*SourceShardDeleteResponse)(nil), // 193: vtctldata.SourceShardDeleteResponse - (*StartReplicationRequest)(nil), // 194: vtctldata.StartReplicationRequest - (*StartReplicationResponse)(nil), // 195: vtctldata.StartReplicationResponse - (*StopReplicationRequest)(nil), // 196: vtctldata.StopReplicationRequest - (*StopReplicationResponse)(nil), // 197: vtctldata.StopReplicationResponse - (*TabletExternallyReparentedRequest)(nil), // 198: vtctldata.TabletExternallyReparentedRequest - (*TabletExternallyReparentedResponse)(nil), // 199: vtctldata.TabletExternallyReparentedResponse - (*UpdateCellInfoRequest)(nil), // 200: vtctldata.UpdateCellInfoRequest - (*UpdateCellInfoResponse)(nil), // 201: vtctldata.UpdateCellInfoResponse - (*UpdateCellsAliasRequest)(nil), // 202: vtctldata.UpdateCellsAliasRequest - (*UpdateCellsAliasResponse)(nil), // 203: vtctldata.UpdateCellsAliasResponse - (*ValidateRequest)(nil), // 204: vtctldata.ValidateRequest - (*ValidateResponse)(nil), // 205: vtctldata.ValidateResponse - (*ValidateKeyspaceRequest)(nil), // 206: vtctldata.ValidateKeyspaceRequest - (*ValidateKeyspaceResponse)(nil), // 207: vtctldata.ValidateKeyspaceResponse - (*ValidateSchemaKeyspaceRequest)(nil), // 208: vtctldata.ValidateSchemaKeyspaceRequest - (*ValidateSchemaKeyspaceResponse)(nil), // 209: vtctldata.ValidateSchemaKeyspaceResponse - (*ValidateShardRequest)(nil), // 210: vtctldata.ValidateShardRequest - (*ValidateShardResponse)(nil), // 211: vtctldata.ValidateShardResponse - (*ValidateVersionKeyspaceRequest)(nil), // 212: vtctldata.ValidateVersionKeyspaceRequest - (*ValidateVersionKeyspaceResponse)(nil), // 213: vtctldata.ValidateVersionKeyspaceResponse - (*ValidateVersionShardRequest)(nil), // 214: vtctldata.ValidateVersionShardRequest - (*ValidateVersionShardResponse)(nil), // 215: vtctldata.ValidateVersionShardResponse - (*ValidateVSchemaRequest)(nil), // 216: vtctldata.ValidateVSchemaRequest - (*ValidateVSchemaResponse)(nil), // 217: vtctldata.ValidateVSchemaResponse - (*VDiffCreateRequest)(nil), // 218: vtctldata.VDiffCreateRequest - (*VDiffCreateResponse)(nil), // 219: vtctldata.VDiffCreateResponse - (*VDiffDeleteRequest)(nil), // 220: vtctldata.VDiffDeleteRequest - (*VDiffDeleteResponse)(nil), // 221: vtctldata.VDiffDeleteResponse - (*VDiffResumeRequest)(nil), // 222: vtctldata.VDiffResumeRequest - (*VDiffResumeResponse)(nil), // 223: vtctldata.VDiffResumeResponse - (*VDiffShowRequest)(nil), // 224: vtctldata.VDiffShowRequest - (*VDiffShowResponse)(nil), // 225: vtctldata.VDiffShowResponse - (*VDiffStopRequest)(nil), // 226: vtctldata.VDiffStopRequest - (*VDiffStopResponse)(nil), // 227: vtctldata.VDiffStopResponse - (*WorkflowDeleteRequest)(nil), // 228: vtctldata.WorkflowDeleteRequest - (*WorkflowDeleteResponse)(nil), // 229: vtctldata.WorkflowDeleteResponse - (*WorkflowStatusRequest)(nil), // 230: vtctldata.WorkflowStatusRequest - (*WorkflowStatusResponse)(nil), // 231: vtctldata.WorkflowStatusResponse - (*WorkflowSwitchTrafficRequest)(nil), // 232: vtctldata.WorkflowSwitchTrafficRequest - (*WorkflowSwitchTrafficResponse)(nil), // 233: vtctldata.WorkflowSwitchTrafficResponse - (*WorkflowUpdateRequest)(nil), // 234: vtctldata.WorkflowUpdateRequest - (*WorkflowUpdateResponse)(nil), // 235: vtctldata.WorkflowUpdateResponse - nil, // 236: vtctldata.Workflow.ShardStreamsEntry - (*Workflow_ReplicationLocation)(nil), // 237: vtctldata.Workflow.ReplicationLocation - (*Workflow_ShardStream)(nil), // 238: vtctldata.Workflow.ShardStream - (*Workflow_Stream)(nil), // 239: vtctldata.Workflow.Stream - (*Workflow_Stream_CopyState)(nil), // 240: vtctldata.Workflow.Stream.CopyState - (*Workflow_Stream_Log)(nil), // 241: vtctldata.Workflow.Stream.Log - (*Workflow_Stream_ThrottlerStatus)(nil), // 242: vtctldata.Workflow.Stream.ThrottlerStatus - nil, // 243: vtctldata.ApplySchemaResponse.RowsAffectedByShardEntry - nil, // 244: vtctldata.CancelSchemaMigrationResponse.RowsAffectedByShardEntry - nil, // 245: vtctldata.CleanupSchemaMigrationResponse.RowsAffectedByShardEntry - nil, // 246: vtctldata.CompleteSchemaMigrationResponse.RowsAffectedByShardEntry - nil, // 247: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry - nil, // 248: vtctldata.GetCellsAliasesResponse.AliasesEntry - nil, // 249: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry - (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 250: vtctldata.GetSrvKeyspaceNamesResponse.NameList - nil, // 251: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry - nil, // 252: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry - nil, // 253: vtctldata.LaunchSchemaMigrationResponse.RowsAffectedByShardEntry - (*MoveTablesCreateResponse_TabletInfo)(nil), // 254: vtctldata.MoveTablesCreateResponse.TabletInfo - nil, // 255: vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry - nil, // 256: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - nil, // 257: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - nil, // 258: vtctldata.ValidateResponse.ResultsByKeyspaceEntry - nil, // 259: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry - nil, // 260: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry - nil, // 261: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry - nil, // 262: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry - nil, // 263: vtctldata.VDiffShowResponse.TabletResponsesEntry - (*WorkflowDeleteResponse_TabletInfo)(nil), // 264: vtctldata.WorkflowDeleteResponse.TabletInfo - (*WorkflowStatusResponse_TableCopyState)(nil), // 265: vtctldata.WorkflowStatusResponse.TableCopyState - (*WorkflowStatusResponse_ShardStreamState)(nil), // 266: vtctldata.WorkflowStatusResponse.ShardStreamState - (*WorkflowStatusResponse_ShardStreams)(nil), // 267: vtctldata.WorkflowStatusResponse.ShardStreams - nil, // 268: vtctldata.WorkflowStatusResponse.TableCopyStateEntry - nil, // 269: vtctldata.WorkflowStatusResponse.ShardStreamsEntry - (*WorkflowUpdateResponse_TabletInfo)(nil), // 270: vtctldata.WorkflowUpdateResponse.TabletInfo - (*logutil.Event)(nil), // 271: logutil.Event - (tabletmanagerdata.TabletSelectionPreference)(0), // 272: tabletmanagerdata.TabletSelectionPreference - (*topodata.Keyspace)(nil), // 273: topodata.Keyspace - (*vttime.Time)(nil), // 274: vttime.Time - (*topodata.TabletAlias)(nil), // 275: topodata.TabletAlias - (*vttime.Duration)(nil), // 276: vttime.Duration - (*topodata.Shard)(nil), // 277: topodata.Shard - (*topodata.CellInfo)(nil), // 278: topodata.CellInfo - (*vschema.RoutingRules)(nil), // 279: vschema.RoutingRules - (*vschema.ShardRoutingRules)(nil), // 280: vschema.ShardRoutingRules - (*vtrpc.CallerID)(nil), // 281: vtrpc.CallerID - (*vschema.Keyspace)(nil), // 282: vschema.Keyspace - (topodata.TabletType)(0), // 283: topodata.TabletType - (*topodata.Tablet)(nil), // 284: topodata.Tablet - (*topodata.Keyspace_ServedFrom)(nil), // 285: topodata.Keyspace.ServedFrom - (topodata.KeyspaceType)(0), // 286: topodata.KeyspaceType - (*query.QueryResult)(nil), // 287: query.QueryResult - (*tabletmanagerdata.ExecuteHookRequest)(nil), // 288: tabletmanagerdata.ExecuteHookRequest - (*tabletmanagerdata.ExecuteHookResponse)(nil), // 289: tabletmanagerdata.ExecuteHookResponse - (*mysqlctl.BackupInfo)(nil), // 290: mysqlctl.BackupInfo - (*replicationdata.FullStatus)(nil), // 291: replicationdata.FullStatus - (*tabletmanagerdata.Permissions)(nil), // 292: tabletmanagerdata.Permissions - (*tabletmanagerdata.SchemaDefinition)(nil), // 293: tabletmanagerdata.SchemaDefinition - (*topodata.ThrottledAppRule)(nil), // 294: topodata.ThrottledAppRule - (*vschema.SrvVSchema)(nil), // 295: vschema.SrvVSchema - (*topodata.ShardReplicationError)(nil), // 296: topodata.ShardReplicationError - (*topodata.KeyRange)(nil), // 297: topodata.KeyRange - (*topodata.CellsAlias)(nil), // 298: topodata.CellsAlias - (*tabletmanagerdata.UpdateVReplicationWorkflowRequest)(nil), // 299: tabletmanagerdata.UpdateVReplicationWorkflowRequest - (*topodata.Shard_TabletControl)(nil), // 300: topodata.Shard.TabletControl - (*binlogdata.BinlogSource)(nil), // 301: binlogdata.BinlogSource - (*topodata.SrvKeyspace)(nil), // 302: topodata.SrvKeyspace - (*replicationdata.Status)(nil), // 303: replicationdata.Status - (*tabletmanagerdata.VDiffResponse)(nil), // 304: tabletmanagerdata.VDiffResponse + (*WorkflowOptions)(nil), // 11: vtctldata.WorkflowOptions + (*Workflow)(nil), // 12: vtctldata.Workflow + (*AddCellInfoRequest)(nil), // 13: vtctldata.AddCellInfoRequest + (*AddCellInfoResponse)(nil), // 14: vtctldata.AddCellInfoResponse + (*AddCellsAliasRequest)(nil), // 15: vtctldata.AddCellsAliasRequest + (*AddCellsAliasResponse)(nil), // 16: vtctldata.AddCellsAliasResponse + (*ApplyKeyspaceRoutingRulesRequest)(nil), // 17: vtctldata.ApplyKeyspaceRoutingRulesRequest + (*ApplyKeyspaceRoutingRulesResponse)(nil), // 18: vtctldata.ApplyKeyspaceRoutingRulesResponse + (*ApplyRoutingRulesRequest)(nil), // 19: vtctldata.ApplyRoutingRulesRequest + (*ApplyRoutingRulesResponse)(nil), // 20: vtctldata.ApplyRoutingRulesResponse + (*ApplyShardRoutingRulesRequest)(nil), // 21: vtctldata.ApplyShardRoutingRulesRequest + (*ApplyShardRoutingRulesResponse)(nil), // 22: vtctldata.ApplyShardRoutingRulesResponse + (*ApplySchemaRequest)(nil), // 23: vtctldata.ApplySchemaRequest + (*ApplySchemaResponse)(nil), // 24: vtctldata.ApplySchemaResponse + (*ApplyVSchemaRequest)(nil), // 25: vtctldata.ApplyVSchemaRequest + (*ApplyVSchemaResponse)(nil), // 26: vtctldata.ApplyVSchemaResponse + (*BackupRequest)(nil), // 27: vtctldata.BackupRequest + (*BackupResponse)(nil), // 28: vtctldata.BackupResponse + (*BackupShardRequest)(nil), // 29: vtctldata.BackupShardRequest + (*CancelSchemaMigrationRequest)(nil), // 30: vtctldata.CancelSchemaMigrationRequest + (*CancelSchemaMigrationResponse)(nil), // 31: vtctldata.CancelSchemaMigrationResponse + (*ChangeTabletTypeRequest)(nil), // 32: vtctldata.ChangeTabletTypeRequest + (*ChangeTabletTypeResponse)(nil), // 33: vtctldata.ChangeTabletTypeResponse + (*CleanupSchemaMigrationRequest)(nil), // 34: vtctldata.CleanupSchemaMigrationRequest + (*CleanupSchemaMigrationResponse)(nil), // 35: vtctldata.CleanupSchemaMigrationResponse + (*CompleteSchemaMigrationRequest)(nil), // 36: vtctldata.CompleteSchemaMigrationRequest + (*CompleteSchemaMigrationResponse)(nil), // 37: vtctldata.CompleteSchemaMigrationResponse + (*CreateKeyspaceRequest)(nil), // 38: vtctldata.CreateKeyspaceRequest + (*CreateKeyspaceResponse)(nil), // 39: vtctldata.CreateKeyspaceResponse + (*CreateShardRequest)(nil), // 40: vtctldata.CreateShardRequest + (*CreateShardResponse)(nil), // 41: vtctldata.CreateShardResponse + (*DeleteCellInfoRequest)(nil), // 42: vtctldata.DeleteCellInfoRequest + (*DeleteCellInfoResponse)(nil), // 43: vtctldata.DeleteCellInfoResponse + (*DeleteCellsAliasRequest)(nil), // 44: vtctldata.DeleteCellsAliasRequest + (*DeleteCellsAliasResponse)(nil), // 45: vtctldata.DeleteCellsAliasResponse + (*DeleteKeyspaceRequest)(nil), // 46: vtctldata.DeleteKeyspaceRequest + (*DeleteKeyspaceResponse)(nil), // 47: vtctldata.DeleteKeyspaceResponse + (*DeleteShardsRequest)(nil), // 48: vtctldata.DeleteShardsRequest + (*DeleteShardsResponse)(nil), // 49: vtctldata.DeleteShardsResponse + (*DeleteSrvVSchemaRequest)(nil), // 50: vtctldata.DeleteSrvVSchemaRequest + (*DeleteSrvVSchemaResponse)(nil), // 51: vtctldata.DeleteSrvVSchemaResponse + (*DeleteTabletsRequest)(nil), // 52: vtctldata.DeleteTabletsRequest + (*DeleteTabletsResponse)(nil), // 53: vtctldata.DeleteTabletsResponse + (*EmergencyReparentShardRequest)(nil), // 54: vtctldata.EmergencyReparentShardRequest + (*EmergencyReparentShardResponse)(nil), // 55: vtctldata.EmergencyReparentShardResponse + (*ExecuteFetchAsAppRequest)(nil), // 56: vtctldata.ExecuteFetchAsAppRequest + (*ExecuteFetchAsAppResponse)(nil), // 57: vtctldata.ExecuteFetchAsAppResponse + (*ExecuteFetchAsDBARequest)(nil), // 58: vtctldata.ExecuteFetchAsDBARequest + (*ExecuteFetchAsDBAResponse)(nil), // 59: vtctldata.ExecuteFetchAsDBAResponse + (*ExecuteHookRequest)(nil), // 60: vtctldata.ExecuteHookRequest + (*ExecuteHookResponse)(nil), // 61: vtctldata.ExecuteHookResponse + (*ExecuteMultiFetchAsDBARequest)(nil), // 62: vtctldata.ExecuteMultiFetchAsDBARequest + (*ExecuteMultiFetchAsDBAResponse)(nil), // 63: vtctldata.ExecuteMultiFetchAsDBAResponse + (*FindAllShardsInKeyspaceRequest)(nil), // 64: vtctldata.FindAllShardsInKeyspaceRequest + (*FindAllShardsInKeyspaceResponse)(nil), // 65: vtctldata.FindAllShardsInKeyspaceResponse + (*ForceCutOverSchemaMigrationRequest)(nil), // 66: vtctldata.ForceCutOverSchemaMigrationRequest + (*ForceCutOverSchemaMigrationResponse)(nil), // 67: vtctldata.ForceCutOverSchemaMigrationResponse + (*GetBackupsRequest)(nil), // 68: vtctldata.GetBackupsRequest + (*GetBackupsResponse)(nil), // 69: vtctldata.GetBackupsResponse + (*GetCellInfoRequest)(nil), // 70: vtctldata.GetCellInfoRequest + (*GetCellInfoResponse)(nil), // 71: vtctldata.GetCellInfoResponse + (*GetCellInfoNamesRequest)(nil), // 72: vtctldata.GetCellInfoNamesRequest + (*GetCellInfoNamesResponse)(nil), // 73: vtctldata.GetCellInfoNamesResponse + (*GetCellsAliasesRequest)(nil), // 74: vtctldata.GetCellsAliasesRequest + (*GetCellsAliasesResponse)(nil), // 75: vtctldata.GetCellsAliasesResponse + (*GetFullStatusRequest)(nil), // 76: vtctldata.GetFullStatusRequest + (*GetFullStatusResponse)(nil), // 77: vtctldata.GetFullStatusResponse + (*GetKeyspacesRequest)(nil), // 78: vtctldata.GetKeyspacesRequest + (*GetKeyspacesResponse)(nil), // 79: vtctldata.GetKeyspacesResponse + (*GetKeyspaceRequest)(nil), // 80: vtctldata.GetKeyspaceRequest + (*GetKeyspaceResponse)(nil), // 81: vtctldata.GetKeyspaceResponse + (*GetPermissionsRequest)(nil), // 82: vtctldata.GetPermissionsRequest + (*GetPermissionsResponse)(nil), // 83: vtctldata.GetPermissionsResponse + (*GetKeyspaceRoutingRulesRequest)(nil), // 84: vtctldata.GetKeyspaceRoutingRulesRequest + (*GetKeyspaceRoutingRulesResponse)(nil), // 85: vtctldata.GetKeyspaceRoutingRulesResponse + (*GetRoutingRulesRequest)(nil), // 86: vtctldata.GetRoutingRulesRequest + (*GetRoutingRulesResponse)(nil), // 87: vtctldata.GetRoutingRulesResponse + (*GetSchemaRequest)(nil), // 88: vtctldata.GetSchemaRequest + (*GetSchemaResponse)(nil), // 89: vtctldata.GetSchemaResponse + (*GetSchemaMigrationsRequest)(nil), // 90: vtctldata.GetSchemaMigrationsRequest + (*GetSchemaMigrationsResponse)(nil), // 91: vtctldata.GetSchemaMigrationsResponse + (*GetShardReplicationRequest)(nil), // 92: vtctldata.GetShardReplicationRequest + (*GetShardReplicationResponse)(nil), // 93: vtctldata.GetShardReplicationResponse + (*GetShardRequest)(nil), // 94: vtctldata.GetShardRequest + (*GetShardResponse)(nil), // 95: vtctldata.GetShardResponse + (*GetShardRoutingRulesRequest)(nil), // 96: vtctldata.GetShardRoutingRulesRequest + (*GetShardRoutingRulesResponse)(nil), // 97: vtctldata.GetShardRoutingRulesResponse + (*GetSrvKeyspaceNamesRequest)(nil), // 98: vtctldata.GetSrvKeyspaceNamesRequest + (*GetSrvKeyspaceNamesResponse)(nil), // 99: vtctldata.GetSrvKeyspaceNamesResponse + (*GetSrvKeyspacesRequest)(nil), // 100: vtctldata.GetSrvKeyspacesRequest + (*GetSrvKeyspacesResponse)(nil), // 101: vtctldata.GetSrvKeyspacesResponse + (*UpdateThrottlerConfigRequest)(nil), // 102: vtctldata.UpdateThrottlerConfigRequest + (*UpdateThrottlerConfigResponse)(nil), // 103: vtctldata.UpdateThrottlerConfigResponse + (*GetSrvVSchemaRequest)(nil), // 104: vtctldata.GetSrvVSchemaRequest + (*GetSrvVSchemaResponse)(nil), // 105: vtctldata.GetSrvVSchemaResponse + (*GetSrvVSchemasRequest)(nil), // 106: vtctldata.GetSrvVSchemasRequest + (*GetSrvVSchemasResponse)(nil), // 107: vtctldata.GetSrvVSchemasResponse + (*GetTabletRequest)(nil), // 108: vtctldata.GetTabletRequest + (*GetTabletResponse)(nil), // 109: vtctldata.GetTabletResponse + (*GetTabletsRequest)(nil), // 110: vtctldata.GetTabletsRequest + (*GetTabletsResponse)(nil), // 111: vtctldata.GetTabletsResponse + (*GetTopologyPathRequest)(nil), // 112: vtctldata.GetTopologyPathRequest + (*GetTopologyPathResponse)(nil), // 113: vtctldata.GetTopologyPathResponse + (*TopologyCell)(nil), // 114: vtctldata.TopologyCell + (*GetVSchemaRequest)(nil), // 115: vtctldata.GetVSchemaRequest + (*GetVersionRequest)(nil), // 116: vtctldata.GetVersionRequest + (*GetVersionResponse)(nil), // 117: vtctldata.GetVersionResponse + (*GetVSchemaResponse)(nil), // 118: vtctldata.GetVSchemaResponse + (*GetWorkflowsRequest)(nil), // 119: vtctldata.GetWorkflowsRequest + (*GetWorkflowsResponse)(nil), // 120: vtctldata.GetWorkflowsResponse + (*InitShardPrimaryRequest)(nil), // 121: vtctldata.InitShardPrimaryRequest + (*InitShardPrimaryResponse)(nil), // 122: vtctldata.InitShardPrimaryResponse + (*LaunchSchemaMigrationRequest)(nil), // 123: vtctldata.LaunchSchemaMigrationRequest + (*LaunchSchemaMigrationResponse)(nil), // 124: vtctldata.LaunchSchemaMigrationResponse + (*LookupVindexCreateRequest)(nil), // 125: vtctldata.LookupVindexCreateRequest + (*LookupVindexCreateResponse)(nil), // 126: vtctldata.LookupVindexCreateResponse + (*LookupVindexExternalizeRequest)(nil), // 127: vtctldata.LookupVindexExternalizeRequest + (*LookupVindexExternalizeResponse)(nil), // 128: vtctldata.LookupVindexExternalizeResponse + (*MaterializeCreateRequest)(nil), // 129: vtctldata.MaterializeCreateRequest + (*MaterializeCreateResponse)(nil), // 130: vtctldata.MaterializeCreateResponse + (*MigrateCreateRequest)(nil), // 131: vtctldata.MigrateCreateRequest + (*MigrateCompleteRequest)(nil), // 132: vtctldata.MigrateCompleteRequest + (*MigrateCompleteResponse)(nil), // 133: vtctldata.MigrateCompleteResponse + (*MountRegisterRequest)(nil), // 134: vtctldata.MountRegisterRequest + (*MountRegisterResponse)(nil), // 135: vtctldata.MountRegisterResponse + (*MountUnregisterRequest)(nil), // 136: vtctldata.MountUnregisterRequest + (*MountUnregisterResponse)(nil), // 137: vtctldata.MountUnregisterResponse + (*MountShowRequest)(nil), // 138: vtctldata.MountShowRequest + (*MountShowResponse)(nil), // 139: vtctldata.MountShowResponse + (*MountListRequest)(nil), // 140: vtctldata.MountListRequest + (*MountListResponse)(nil), // 141: vtctldata.MountListResponse + (*MoveTablesCreateRequest)(nil), // 142: vtctldata.MoveTablesCreateRequest + (*MoveTablesCreateResponse)(nil), // 143: vtctldata.MoveTablesCreateResponse + (*MoveTablesCompleteRequest)(nil), // 144: vtctldata.MoveTablesCompleteRequest + (*MoveTablesCompleteResponse)(nil), // 145: vtctldata.MoveTablesCompleteResponse + (*PingTabletRequest)(nil), // 146: vtctldata.PingTabletRequest + (*PingTabletResponse)(nil), // 147: vtctldata.PingTabletResponse + (*PlannedReparentShardRequest)(nil), // 148: vtctldata.PlannedReparentShardRequest + (*PlannedReparentShardResponse)(nil), // 149: vtctldata.PlannedReparentShardResponse + (*RebuildKeyspaceGraphRequest)(nil), // 150: vtctldata.RebuildKeyspaceGraphRequest + (*RebuildKeyspaceGraphResponse)(nil), // 151: vtctldata.RebuildKeyspaceGraphResponse + (*RebuildVSchemaGraphRequest)(nil), // 152: vtctldata.RebuildVSchemaGraphRequest + (*RebuildVSchemaGraphResponse)(nil), // 153: vtctldata.RebuildVSchemaGraphResponse + (*RefreshStateRequest)(nil), // 154: vtctldata.RefreshStateRequest + (*RefreshStateResponse)(nil), // 155: vtctldata.RefreshStateResponse + (*RefreshStateByShardRequest)(nil), // 156: vtctldata.RefreshStateByShardRequest + (*RefreshStateByShardResponse)(nil), // 157: vtctldata.RefreshStateByShardResponse + (*ReloadSchemaRequest)(nil), // 158: vtctldata.ReloadSchemaRequest + (*ReloadSchemaResponse)(nil), // 159: vtctldata.ReloadSchemaResponse + (*ReloadSchemaKeyspaceRequest)(nil), // 160: vtctldata.ReloadSchemaKeyspaceRequest + (*ReloadSchemaKeyspaceResponse)(nil), // 161: vtctldata.ReloadSchemaKeyspaceResponse + (*ReloadSchemaShardRequest)(nil), // 162: vtctldata.ReloadSchemaShardRequest + (*ReloadSchemaShardResponse)(nil), // 163: vtctldata.ReloadSchemaShardResponse + (*RemoveBackupRequest)(nil), // 164: vtctldata.RemoveBackupRequest + (*RemoveBackupResponse)(nil), // 165: vtctldata.RemoveBackupResponse + (*RemoveKeyspaceCellRequest)(nil), // 166: vtctldata.RemoveKeyspaceCellRequest + (*RemoveKeyspaceCellResponse)(nil), // 167: vtctldata.RemoveKeyspaceCellResponse + (*RemoveShardCellRequest)(nil), // 168: vtctldata.RemoveShardCellRequest + (*RemoveShardCellResponse)(nil), // 169: vtctldata.RemoveShardCellResponse + (*ReparentTabletRequest)(nil), // 170: vtctldata.ReparentTabletRequest + (*ReparentTabletResponse)(nil), // 171: vtctldata.ReparentTabletResponse + (*ReshardCreateRequest)(nil), // 172: vtctldata.ReshardCreateRequest + (*RestoreFromBackupRequest)(nil), // 173: vtctldata.RestoreFromBackupRequest + (*RestoreFromBackupResponse)(nil), // 174: vtctldata.RestoreFromBackupResponse + (*RetrySchemaMigrationRequest)(nil), // 175: vtctldata.RetrySchemaMigrationRequest + (*RetrySchemaMigrationResponse)(nil), // 176: vtctldata.RetrySchemaMigrationResponse + (*RunHealthCheckRequest)(nil), // 177: vtctldata.RunHealthCheckRequest + (*RunHealthCheckResponse)(nil), // 178: vtctldata.RunHealthCheckResponse + (*SetKeyspaceDurabilityPolicyRequest)(nil), // 179: vtctldata.SetKeyspaceDurabilityPolicyRequest + (*SetKeyspaceDurabilityPolicyResponse)(nil), // 180: vtctldata.SetKeyspaceDurabilityPolicyResponse + (*SetKeyspaceShardingInfoRequest)(nil), // 181: vtctldata.SetKeyspaceShardingInfoRequest + (*SetKeyspaceShardingInfoResponse)(nil), // 182: vtctldata.SetKeyspaceShardingInfoResponse + (*SetShardIsPrimaryServingRequest)(nil), // 183: vtctldata.SetShardIsPrimaryServingRequest + (*SetShardIsPrimaryServingResponse)(nil), // 184: vtctldata.SetShardIsPrimaryServingResponse + (*SetShardTabletControlRequest)(nil), // 185: vtctldata.SetShardTabletControlRequest + (*SetShardTabletControlResponse)(nil), // 186: vtctldata.SetShardTabletControlResponse + (*SetWritableRequest)(nil), // 187: vtctldata.SetWritableRequest + (*SetWritableResponse)(nil), // 188: vtctldata.SetWritableResponse + (*ShardReplicationAddRequest)(nil), // 189: vtctldata.ShardReplicationAddRequest + (*ShardReplicationAddResponse)(nil), // 190: vtctldata.ShardReplicationAddResponse + (*ShardReplicationFixRequest)(nil), // 191: vtctldata.ShardReplicationFixRequest + (*ShardReplicationFixResponse)(nil), // 192: vtctldata.ShardReplicationFixResponse + (*ShardReplicationPositionsRequest)(nil), // 193: vtctldata.ShardReplicationPositionsRequest + (*ShardReplicationPositionsResponse)(nil), // 194: vtctldata.ShardReplicationPositionsResponse + (*ShardReplicationRemoveRequest)(nil), // 195: vtctldata.ShardReplicationRemoveRequest + (*ShardReplicationRemoveResponse)(nil), // 196: vtctldata.ShardReplicationRemoveResponse + (*SleepTabletRequest)(nil), // 197: vtctldata.SleepTabletRequest + (*SleepTabletResponse)(nil), // 198: vtctldata.SleepTabletResponse + (*SourceShardAddRequest)(nil), // 199: vtctldata.SourceShardAddRequest + (*SourceShardAddResponse)(nil), // 200: vtctldata.SourceShardAddResponse + (*SourceShardDeleteRequest)(nil), // 201: vtctldata.SourceShardDeleteRequest + (*SourceShardDeleteResponse)(nil), // 202: vtctldata.SourceShardDeleteResponse + (*StartReplicationRequest)(nil), // 203: vtctldata.StartReplicationRequest + (*StartReplicationResponse)(nil), // 204: vtctldata.StartReplicationResponse + (*StopReplicationRequest)(nil), // 205: vtctldata.StopReplicationRequest + (*StopReplicationResponse)(nil), // 206: vtctldata.StopReplicationResponse + (*TabletExternallyReparentedRequest)(nil), // 207: vtctldata.TabletExternallyReparentedRequest + (*TabletExternallyReparentedResponse)(nil), // 208: vtctldata.TabletExternallyReparentedResponse + (*UpdateCellInfoRequest)(nil), // 209: vtctldata.UpdateCellInfoRequest + (*UpdateCellInfoResponse)(nil), // 210: vtctldata.UpdateCellInfoResponse + (*UpdateCellsAliasRequest)(nil), // 211: vtctldata.UpdateCellsAliasRequest + (*UpdateCellsAliasResponse)(nil), // 212: vtctldata.UpdateCellsAliasResponse + (*ValidateRequest)(nil), // 213: vtctldata.ValidateRequest + (*ValidateResponse)(nil), // 214: vtctldata.ValidateResponse + (*ValidateKeyspaceRequest)(nil), // 215: vtctldata.ValidateKeyspaceRequest + (*ValidateKeyspaceResponse)(nil), // 216: vtctldata.ValidateKeyspaceResponse + (*ValidateSchemaKeyspaceRequest)(nil), // 217: vtctldata.ValidateSchemaKeyspaceRequest + (*ValidateSchemaKeyspaceResponse)(nil), // 218: vtctldata.ValidateSchemaKeyspaceResponse + (*ValidateShardRequest)(nil), // 219: vtctldata.ValidateShardRequest + (*ValidateShardResponse)(nil), // 220: vtctldata.ValidateShardResponse + (*ValidateVersionKeyspaceRequest)(nil), // 221: vtctldata.ValidateVersionKeyspaceRequest + (*ValidateVersionKeyspaceResponse)(nil), // 222: vtctldata.ValidateVersionKeyspaceResponse + (*ValidateVersionShardRequest)(nil), // 223: vtctldata.ValidateVersionShardRequest + (*ValidateVersionShardResponse)(nil), // 224: vtctldata.ValidateVersionShardResponse + (*ValidateVSchemaRequest)(nil), // 225: vtctldata.ValidateVSchemaRequest + (*ValidateVSchemaResponse)(nil), // 226: vtctldata.ValidateVSchemaResponse + (*VDiffCreateRequest)(nil), // 227: vtctldata.VDiffCreateRequest + (*VDiffCreateResponse)(nil), // 228: vtctldata.VDiffCreateResponse + (*VDiffDeleteRequest)(nil), // 229: vtctldata.VDiffDeleteRequest + (*VDiffDeleteResponse)(nil), // 230: vtctldata.VDiffDeleteResponse + (*VDiffResumeRequest)(nil), // 231: vtctldata.VDiffResumeRequest + (*VDiffResumeResponse)(nil), // 232: vtctldata.VDiffResumeResponse + (*VDiffShowRequest)(nil), // 233: vtctldata.VDiffShowRequest + (*VDiffShowResponse)(nil), // 234: vtctldata.VDiffShowResponse + (*VDiffStopRequest)(nil), // 235: vtctldata.VDiffStopRequest + (*VDiffStopResponse)(nil), // 236: vtctldata.VDiffStopResponse + (*WorkflowDeleteRequest)(nil), // 237: vtctldata.WorkflowDeleteRequest + (*WorkflowDeleteResponse)(nil), // 238: vtctldata.WorkflowDeleteResponse + (*WorkflowStatusRequest)(nil), // 239: vtctldata.WorkflowStatusRequest + (*WorkflowStatusResponse)(nil), // 240: vtctldata.WorkflowStatusResponse + (*WorkflowSwitchTrafficRequest)(nil), // 241: vtctldata.WorkflowSwitchTrafficRequest + (*WorkflowSwitchTrafficResponse)(nil), // 242: vtctldata.WorkflowSwitchTrafficResponse + (*WorkflowUpdateRequest)(nil), // 243: vtctldata.WorkflowUpdateRequest + (*WorkflowUpdateResponse)(nil), // 244: vtctldata.WorkflowUpdateResponse + nil, // 245: vtctldata.Workflow.ShardStreamsEntry + (*Workflow_ReplicationLocation)(nil), // 246: vtctldata.Workflow.ReplicationLocation + (*Workflow_ShardStream)(nil), // 247: vtctldata.Workflow.ShardStream + (*Workflow_Stream)(nil), // 248: vtctldata.Workflow.Stream + (*Workflow_Stream_CopyState)(nil), // 249: vtctldata.Workflow.Stream.CopyState + (*Workflow_Stream_Log)(nil), // 250: vtctldata.Workflow.Stream.Log + (*Workflow_Stream_ThrottlerStatus)(nil), // 251: vtctldata.Workflow.Stream.ThrottlerStatus + nil, // 252: vtctldata.ApplySchemaResponse.RowsAffectedByShardEntry + nil, // 253: vtctldata.ApplyVSchemaResponse.UnknownVindexParamsEntry + (*ApplyVSchemaResponse_ParamList)(nil), // 254: vtctldata.ApplyVSchemaResponse.ParamList + nil, // 255: vtctldata.CancelSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 256: vtctldata.CleanupSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 257: vtctldata.CompleteSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 258: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry + nil, // 259: vtctldata.ForceCutOverSchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 260: vtctldata.GetCellsAliasesResponse.AliasesEntry + nil, // 261: vtctldata.GetShardReplicationResponse.ShardReplicationByCellEntry + nil, // 262: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry + (*GetSrvKeyspaceNamesResponse_NameList)(nil), // 263: vtctldata.GetSrvKeyspaceNamesResponse.NameList + nil, // 264: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry + nil, // 265: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry + nil, // 266: vtctldata.LaunchSchemaMigrationResponse.RowsAffectedByShardEntry + (*MoveTablesCreateResponse_TabletInfo)(nil), // 267: vtctldata.MoveTablesCreateResponse.TabletInfo + nil, // 268: vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry + nil, // 269: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + nil, // 270: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + nil, // 271: vtctldata.ValidateResponse.ResultsByKeyspaceEntry + nil, // 272: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry + nil, // 273: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry + nil, // 274: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry + nil, // 275: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry + nil, // 276: vtctldata.VDiffShowResponse.TabletResponsesEntry + (*WorkflowDeleteResponse_TabletInfo)(nil), // 277: vtctldata.WorkflowDeleteResponse.TabletInfo + (*WorkflowStatusResponse_TableCopyState)(nil), // 278: vtctldata.WorkflowStatusResponse.TableCopyState + (*WorkflowStatusResponse_ShardStreamState)(nil), // 279: vtctldata.WorkflowStatusResponse.ShardStreamState + (*WorkflowStatusResponse_ShardStreams)(nil), // 280: vtctldata.WorkflowStatusResponse.ShardStreams + nil, // 281: vtctldata.WorkflowStatusResponse.TableCopyStateEntry + nil, // 282: vtctldata.WorkflowStatusResponse.ShardStreamsEntry + (*WorkflowUpdateResponse_TabletInfo)(nil), // 283: vtctldata.WorkflowUpdateResponse.TabletInfo + (*logutil.Event)(nil), // 284: logutil.Event + (tabletmanagerdata.TabletSelectionPreference)(0), // 285: tabletmanagerdata.TabletSelectionPreference + (*topodata.Keyspace)(nil), // 286: topodata.Keyspace + (*vttime.Time)(nil), // 287: vttime.Time + (*topodata.TabletAlias)(nil), // 288: topodata.TabletAlias + (*vttime.Duration)(nil), // 289: vttime.Duration + (*topodata.Shard)(nil), // 290: topodata.Shard + (*topodata.CellInfo)(nil), // 291: topodata.CellInfo + (*vschema.KeyspaceRoutingRules)(nil), // 292: vschema.KeyspaceRoutingRules + (*vschema.RoutingRules)(nil), // 293: vschema.RoutingRules + (*vschema.ShardRoutingRules)(nil), // 294: vschema.ShardRoutingRules + (*vtrpc.CallerID)(nil), // 295: vtrpc.CallerID + (*vschema.Keyspace)(nil), // 296: vschema.Keyspace + (topodata.TabletType)(0), // 297: topodata.TabletType + (*topodata.Tablet)(nil), // 298: topodata.Tablet + (topodata.KeyspaceType)(0), // 299: topodata.KeyspaceType + (*query.QueryResult)(nil), // 300: query.QueryResult + (*tabletmanagerdata.ExecuteHookRequest)(nil), // 301: tabletmanagerdata.ExecuteHookRequest + (*tabletmanagerdata.ExecuteHookResponse)(nil), // 302: tabletmanagerdata.ExecuteHookResponse + (*mysqlctl.BackupInfo)(nil), // 303: mysqlctl.BackupInfo + (*replicationdata.FullStatus)(nil), // 304: replicationdata.FullStatus + (*tabletmanagerdata.Permissions)(nil), // 305: tabletmanagerdata.Permissions + (*tabletmanagerdata.SchemaDefinition)(nil), // 306: tabletmanagerdata.SchemaDefinition + (*topodata.ThrottledAppRule)(nil), // 307: topodata.ThrottledAppRule + (*vschema.SrvVSchema)(nil), // 308: vschema.SrvVSchema + (*topodata.ShardReplicationError)(nil), // 309: topodata.ShardReplicationError + (*topodata.KeyRange)(nil), // 310: topodata.KeyRange + (*topodata.CellsAlias)(nil), // 311: topodata.CellsAlias + (*tabletmanagerdata.UpdateVReplicationWorkflowRequest)(nil), // 312: tabletmanagerdata.UpdateVReplicationWorkflowRequest + (*topodata.Shard_TabletControl)(nil), // 313: topodata.Shard.TabletControl + (*binlogdata.BinlogSource)(nil), // 314: binlogdata.BinlogSource + (*topodata.ShardReplication)(nil), // 315: topodata.ShardReplication + (*topodata.SrvKeyspace)(nil), // 316: topodata.SrvKeyspace + (*replicationdata.Status)(nil), // 317: replicationdata.Status + (*tabletmanagerdata.VDiffResponse)(nil), // 318: tabletmanagerdata.VDiffResponse } var file_vtctldata_proto_depIdxs = []int32{ - 271, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event + 284, // 0: vtctldata.ExecuteVtctlCommandResponse.event:type_name -> logutil.Event 6, // 1: vtctldata.MaterializeSettings.table_settings:type_name -> vtctldata.TableMaterializeSettings 0, // 2: vtctldata.MaterializeSettings.materialization_intent:type_name -> vtctldata.MaterializationIntent - 272, // 3: vtctldata.MaterializeSettings.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 273, // 4: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace - 2, // 5: vtctldata.SchemaMigration.strategy:type_name -> vtctldata.SchemaMigration.Strategy - 274, // 6: vtctldata.SchemaMigration.added_at:type_name -> vttime.Time - 274, // 7: vtctldata.SchemaMigration.requested_at:type_name -> vttime.Time - 274, // 8: vtctldata.SchemaMigration.ready_at:type_name -> vttime.Time - 274, // 9: vtctldata.SchemaMigration.started_at:type_name -> vttime.Time - 274, // 10: vtctldata.SchemaMigration.liveness_timestamp:type_name -> vttime.Time - 274, // 11: vtctldata.SchemaMigration.completed_at:type_name -> vttime.Time - 274, // 12: vtctldata.SchemaMigration.cleaned_up_at:type_name -> vttime.Time - 3, // 13: vtctldata.SchemaMigration.status:type_name -> vtctldata.SchemaMigration.Status - 275, // 14: vtctldata.SchemaMigration.tablet:type_name -> topodata.TabletAlias - 276, // 15: vtctldata.SchemaMigration.artifact_retention:type_name -> vttime.Duration - 274, // 16: vtctldata.SchemaMigration.last_throttled_at:type_name -> vttime.Time - 274, // 17: vtctldata.SchemaMigration.cancelled_at:type_name -> vttime.Time - 274, // 18: vtctldata.SchemaMigration.reviewed_at:type_name -> vttime.Time - 274, // 19: vtctldata.SchemaMigration.ready_to_complete_at:type_name -> vttime.Time - 277, // 20: vtctldata.Shard.shard:type_name -> topodata.Shard - 237, // 21: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation - 237, // 22: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation - 236, // 23: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry - 278, // 24: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 279, // 25: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules - 280, // 26: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 276, // 27: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration - 281, // 28: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID - 243, // 29: vtctldata.ApplySchemaResponse.rows_affected_by_shard:type_name -> vtctldata.ApplySchemaResponse.RowsAffectedByShardEntry - 282, // 30: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace - 282, // 31: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace - 275, // 32: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 33: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias - 271, // 34: vtctldata.BackupResponse.event:type_name -> logutil.Event - 244, // 35: vtctldata.CancelSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CancelSchemaMigrationResponse.RowsAffectedByShardEntry - 275, // 36: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias - 283, // 37: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType - 284, // 38: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet - 284, // 39: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet - 245, // 40: vtctldata.CleanupSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CleanupSchemaMigrationResponse.RowsAffectedByShardEntry - 246, // 41: vtctldata.CompleteSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CompleteSchemaMigrationResponse.RowsAffectedByShardEntry - 285, // 42: vtctldata.CreateKeyspaceRequest.served_froms:type_name -> topodata.Keyspace.ServedFrom - 286, // 43: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType - 274, // 44: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time - 8, // 45: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace - 8, // 46: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace - 10, // 47: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard - 10, // 48: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard - 275, // 49: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias - 275, // 50: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias - 275, // 51: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias - 276, // 52: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 275, // 53: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 271, // 54: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event - 275, // 55: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias - 287, // 56: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult - 275, // 57: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias - 287, // 58: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult - 275, // 59: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias - 288, // 60: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest - 289, // 61: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse - 247, // 62: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry - 290, // 63: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo - 278, // 64: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 248, // 65: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry - 275, // 66: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias - 291, // 67: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus - 8, // 68: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace - 8, // 69: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace - 275, // 70: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias - 292, // 71: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions - 279, // 72: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules - 275, // 73: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias - 293, // 74: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition - 3, // 75: vtctldata.GetSchemaMigrationsRequest.status:type_name -> vtctldata.SchemaMigration.Status - 276, // 76: vtctldata.GetSchemaMigrationsRequest.recent:type_name -> vttime.Duration - 1, // 77: vtctldata.GetSchemaMigrationsRequest.order:type_name -> vtctldata.QueryOrdering - 9, // 78: vtctldata.GetSchemaMigrationsResponse.migrations:type_name -> vtctldata.SchemaMigration - 10, // 79: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard - 280, // 80: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules - 249, // 81: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry - 251, // 82: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry - 294, // 83: vtctldata.UpdateThrottlerConfigRequest.throttled_app:type_name -> topodata.ThrottledAppRule - 295, // 84: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema - 252, // 85: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry - 275, // 86: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 284, // 87: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet - 275, // 88: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias - 283, // 89: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType - 284, // 90: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet - 103, // 91: vtctldata.GetTopologyPathResponse.cell:type_name -> vtctldata.TopologyCell - 275, // 92: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias - 282, // 93: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace - 11, // 94: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow - 275, // 95: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias - 276, // 96: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration - 271, // 97: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event - 253, // 98: vtctldata.LaunchSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.LaunchSchemaMigrationResponse.RowsAffectedByShardEntry - 282, // 99: vtctldata.LookupVindexCreateRequest.vindex:type_name -> vschema.Keyspace - 283, // 100: vtctldata.LookupVindexCreateRequest.tablet_types:type_name -> topodata.TabletType - 272, // 101: vtctldata.LookupVindexCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 7, // 102: vtctldata.MaterializeCreateRequest.settings:type_name -> vtctldata.MaterializeSettings - 283, // 103: vtctldata.MigrateCreateRequest.tablet_types:type_name -> topodata.TabletType - 272, // 104: vtctldata.MigrateCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 283, // 105: vtctldata.MoveTablesCreateRequest.tablet_types:type_name -> topodata.TabletType - 272, // 106: vtctldata.MoveTablesCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 254, // 107: vtctldata.MoveTablesCreateResponse.details:type_name -> vtctldata.MoveTablesCreateResponse.TabletInfo - 275, // 108: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 109: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias - 275, // 110: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias - 276, // 111: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration - 275, // 112: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias - 271, // 113: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event - 275, // 114: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 115: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias - 271, // 116: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event - 271, // 117: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event - 275, // 118: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias - 275, // 119: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias - 283, // 120: vtctldata.ReshardCreateRequest.tablet_types:type_name -> topodata.TabletType - 272, // 121: vtctldata.ReshardCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 275, // 122: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias - 274, // 123: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time - 274, // 124: vtctldata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time - 275, // 125: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias - 271, // 126: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event - 255, // 127: vtctldata.RetrySchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry - 275, // 128: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias - 273, // 129: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace - 283, // 130: vtctldata.SetKeyspaceServedFromRequest.tablet_type:type_name -> topodata.TabletType - 273, // 131: vtctldata.SetKeyspaceServedFromResponse.keyspace:type_name -> topodata.Keyspace - 273, // 132: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace - 277, // 133: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard - 283, // 134: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType - 277, // 135: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard - 275, // 136: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 137: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias - 296, // 138: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError - 256, // 139: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry - 257, // 140: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry - 275, // 141: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 142: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias - 276, // 143: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration - 297, // 144: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange - 277, // 145: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard - 277, // 146: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard - 275, // 147: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 148: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias - 275, // 149: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias - 275, // 150: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias - 275, // 151: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias - 278, // 152: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo - 278, // 153: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo - 298, // 154: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias - 298, // 155: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias - 258, // 156: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry - 259, // 157: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry - 260, // 158: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry - 261, // 159: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry - 262, // 160: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry - 283, // 161: vtctldata.VDiffCreateRequest.tablet_types:type_name -> topodata.TabletType - 272, // 162: vtctldata.VDiffCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference - 276, // 163: vtctldata.VDiffCreateRequest.filtered_replication_wait_time:type_name -> vttime.Duration - 276, // 164: vtctldata.VDiffCreateRequest.wait_update_interval:type_name -> vttime.Duration - 263, // 165: vtctldata.VDiffShowResponse.tablet_responses:type_name -> vtctldata.VDiffShowResponse.TabletResponsesEntry - 264, // 166: vtctldata.WorkflowDeleteResponse.details:type_name -> vtctldata.WorkflowDeleteResponse.TabletInfo - 268, // 167: vtctldata.WorkflowStatusResponse.table_copy_state:type_name -> vtctldata.WorkflowStatusResponse.TableCopyStateEntry - 269, // 168: vtctldata.WorkflowStatusResponse.shard_streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamsEntry - 283, // 169: vtctldata.WorkflowSwitchTrafficRequest.tablet_types:type_name -> topodata.TabletType - 276, // 170: vtctldata.WorkflowSwitchTrafficRequest.max_replication_lag_allowed:type_name -> vttime.Duration - 276, // 171: vtctldata.WorkflowSwitchTrafficRequest.timeout:type_name -> vttime.Duration - 299, // 172: vtctldata.WorkflowUpdateRequest.tablet_request:type_name -> tabletmanagerdata.UpdateVReplicationWorkflowRequest - 270, // 173: vtctldata.WorkflowUpdateResponse.details:type_name -> vtctldata.WorkflowUpdateResponse.TabletInfo - 238, // 174: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream - 239, // 175: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream - 300, // 176: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl - 275, // 177: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias - 301, // 178: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource - 274, // 179: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time - 274, // 180: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time - 240, // 181: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState - 241, // 182: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log - 242, // 183: vtctldata.Workflow.Stream.throttler_status:type_name -> vtctldata.Workflow.Stream.ThrottlerStatus - 274, // 184: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time - 274, // 185: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time - 274, // 186: vtctldata.Workflow.Stream.ThrottlerStatus.time_throttled:type_name -> vttime.Time - 10, // 187: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard - 298, // 188: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias - 250, // 189: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList - 302, // 190: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace - 295, // 191: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema - 275, // 192: vtctldata.MoveTablesCreateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias - 303, // 193: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status - 284, // 194: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet - 207, // 195: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse - 211, // 196: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 211, // 197: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 211, // 198: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 211, // 199: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse - 304, // 200: vtctldata.VDiffShowResponse.TabletResponsesEntry.value:type_name -> tabletmanagerdata.VDiffResponse - 275, // 201: vtctldata.WorkflowDeleteResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias - 275, // 202: vtctldata.WorkflowStatusResponse.ShardStreamState.tablet:type_name -> topodata.TabletAlias - 266, // 203: vtctldata.WorkflowStatusResponse.ShardStreams.streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamState - 265, // 204: vtctldata.WorkflowStatusResponse.TableCopyStateEntry.value:type_name -> vtctldata.WorkflowStatusResponse.TableCopyState - 267, // 205: vtctldata.WorkflowStatusResponse.ShardStreamsEntry.value:type_name -> vtctldata.WorkflowStatusResponse.ShardStreams - 275, // 206: vtctldata.WorkflowUpdateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias - 207, // [207:207] is the sub-list for method output_type - 207, // [207:207] is the sub-list for method input_type - 207, // [207:207] is the sub-list for extension type_name - 207, // [207:207] is the sub-list for extension extendee - 0, // [0:207] is the sub-list for field type_name + 285, // 3: vtctldata.MaterializeSettings.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 11, // 4: vtctldata.MaterializeSettings.workflow_options:type_name -> vtctldata.WorkflowOptions + 286, // 5: vtctldata.Keyspace.keyspace:type_name -> topodata.Keyspace + 2, // 6: vtctldata.SchemaMigration.strategy:type_name -> vtctldata.SchemaMigration.Strategy + 287, // 7: vtctldata.SchemaMigration.added_at:type_name -> vttime.Time + 287, // 8: vtctldata.SchemaMigration.requested_at:type_name -> vttime.Time + 287, // 9: vtctldata.SchemaMigration.ready_at:type_name -> vttime.Time + 287, // 10: vtctldata.SchemaMigration.started_at:type_name -> vttime.Time + 287, // 11: vtctldata.SchemaMigration.liveness_timestamp:type_name -> vttime.Time + 287, // 12: vtctldata.SchemaMigration.completed_at:type_name -> vttime.Time + 287, // 13: vtctldata.SchemaMigration.cleaned_up_at:type_name -> vttime.Time + 3, // 14: vtctldata.SchemaMigration.status:type_name -> vtctldata.SchemaMigration.Status + 288, // 15: vtctldata.SchemaMigration.tablet:type_name -> topodata.TabletAlias + 289, // 16: vtctldata.SchemaMigration.artifact_retention:type_name -> vttime.Duration + 287, // 17: vtctldata.SchemaMigration.last_throttled_at:type_name -> vttime.Time + 287, // 18: vtctldata.SchemaMigration.cancelled_at:type_name -> vttime.Time + 287, // 19: vtctldata.SchemaMigration.reviewed_at:type_name -> vttime.Time + 287, // 20: vtctldata.SchemaMigration.ready_to_complete_at:type_name -> vttime.Time + 290, // 21: vtctldata.Shard.shard:type_name -> topodata.Shard + 246, // 22: vtctldata.Workflow.source:type_name -> vtctldata.Workflow.ReplicationLocation + 246, // 23: vtctldata.Workflow.target:type_name -> vtctldata.Workflow.ReplicationLocation + 245, // 24: vtctldata.Workflow.shard_streams:type_name -> vtctldata.Workflow.ShardStreamsEntry + 11, // 25: vtctldata.Workflow.options:type_name -> vtctldata.WorkflowOptions + 291, // 26: vtctldata.AddCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 292, // 27: vtctldata.ApplyKeyspaceRoutingRulesRequest.keyspace_routing_rules:type_name -> vschema.KeyspaceRoutingRules + 292, // 28: vtctldata.ApplyKeyspaceRoutingRulesResponse.keyspace_routing_rules:type_name -> vschema.KeyspaceRoutingRules + 293, // 29: vtctldata.ApplyRoutingRulesRequest.routing_rules:type_name -> vschema.RoutingRules + 294, // 30: vtctldata.ApplyShardRoutingRulesRequest.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 289, // 31: vtctldata.ApplySchemaRequest.wait_replicas_timeout:type_name -> vttime.Duration + 295, // 32: vtctldata.ApplySchemaRequest.caller_id:type_name -> vtrpc.CallerID + 252, // 33: vtctldata.ApplySchemaResponse.rows_affected_by_shard:type_name -> vtctldata.ApplySchemaResponse.RowsAffectedByShardEntry + 296, // 34: vtctldata.ApplyVSchemaRequest.v_schema:type_name -> vschema.Keyspace + 296, // 35: vtctldata.ApplyVSchemaResponse.v_schema:type_name -> vschema.Keyspace + 253, // 36: vtctldata.ApplyVSchemaResponse.unknown_vindex_params:type_name -> vtctldata.ApplyVSchemaResponse.UnknownVindexParamsEntry + 288, // 37: vtctldata.BackupRequest.tablet_alias:type_name -> topodata.TabletAlias + 288, // 38: vtctldata.BackupResponse.tablet_alias:type_name -> topodata.TabletAlias + 284, // 39: vtctldata.BackupResponse.event:type_name -> logutil.Event + 255, // 40: vtctldata.CancelSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CancelSchemaMigrationResponse.RowsAffectedByShardEntry + 288, // 41: vtctldata.ChangeTabletTypeRequest.tablet_alias:type_name -> topodata.TabletAlias + 297, // 42: vtctldata.ChangeTabletTypeRequest.db_type:type_name -> topodata.TabletType + 298, // 43: vtctldata.ChangeTabletTypeResponse.before_tablet:type_name -> topodata.Tablet + 298, // 44: vtctldata.ChangeTabletTypeResponse.after_tablet:type_name -> topodata.Tablet + 256, // 45: vtctldata.CleanupSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CleanupSchemaMigrationResponse.RowsAffectedByShardEntry + 257, // 46: vtctldata.CompleteSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.CompleteSchemaMigrationResponse.RowsAffectedByShardEntry + 299, // 47: vtctldata.CreateKeyspaceRequest.type:type_name -> topodata.KeyspaceType + 287, // 48: vtctldata.CreateKeyspaceRequest.snapshot_time:type_name -> vttime.Time + 8, // 49: vtctldata.CreateKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace + 8, // 50: vtctldata.CreateShardResponse.keyspace:type_name -> vtctldata.Keyspace + 10, // 51: vtctldata.CreateShardResponse.shard:type_name -> vtctldata.Shard + 10, // 52: vtctldata.DeleteShardsRequest.shards:type_name -> vtctldata.Shard + 288, // 53: vtctldata.DeleteTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias + 288, // 54: vtctldata.EmergencyReparentShardRequest.new_primary:type_name -> topodata.TabletAlias + 288, // 55: vtctldata.EmergencyReparentShardRequest.ignore_replicas:type_name -> topodata.TabletAlias + 289, // 56: vtctldata.EmergencyReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration + 288, // 57: vtctldata.EmergencyReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 284, // 58: vtctldata.EmergencyReparentShardResponse.events:type_name -> logutil.Event + 288, // 59: vtctldata.ExecuteFetchAsAppRequest.tablet_alias:type_name -> topodata.TabletAlias + 300, // 60: vtctldata.ExecuteFetchAsAppResponse.result:type_name -> query.QueryResult + 288, // 61: vtctldata.ExecuteFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias + 300, // 62: vtctldata.ExecuteFetchAsDBAResponse.result:type_name -> query.QueryResult + 288, // 63: vtctldata.ExecuteHookRequest.tablet_alias:type_name -> topodata.TabletAlias + 301, // 64: vtctldata.ExecuteHookRequest.tablet_hook_request:type_name -> tabletmanagerdata.ExecuteHookRequest + 302, // 65: vtctldata.ExecuteHookResponse.hook_result:type_name -> tabletmanagerdata.ExecuteHookResponse + 288, // 66: vtctldata.ExecuteMultiFetchAsDBARequest.tablet_alias:type_name -> topodata.TabletAlias + 300, // 67: vtctldata.ExecuteMultiFetchAsDBAResponse.results:type_name -> query.QueryResult + 258, // 68: vtctldata.FindAllShardsInKeyspaceResponse.shards:type_name -> vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry + 259, // 69: vtctldata.ForceCutOverSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.ForceCutOverSchemaMigrationResponse.RowsAffectedByShardEntry + 303, // 70: vtctldata.GetBackupsResponse.backups:type_name -> mysqlctl.BackupInfo + 291, // 71: vtctldata.GetCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 260, // 72: vtctldata.GetCellsAliasesResponse.aliases:type_name -> vtctldata.GetCellsAliasesResponse.AliasesEntry + 288, // 73: vtctldata.GetFullStatusRequest.tablet_alias:type_name -> topodata.TabletAlias + 304, // 74: vtctldata.GetFullStatusResponse.status:type_name -> replicationdata.FullStatus + 8, // 75: vtctldata.GetKeyspacesResponse.keyspaces:type_name -> vtctldata.Keyspace + 8, // 76: vtctldata.GetKeyspaceResponse.keyspace:type_name -> vtctldata.Keyspace + 288, // 77: vtctldata.GetPermissionsRequest.tablet_alias:type_name -> topodata.TabletAlias + 305, // 78: vtctldata.GetPermissionsResponse.permissions:type_name -> tabletmanagerdata.Permissions + 292, // 79: vtctldata.GetKeyspaceRoutingRulesResponse.keyspace_routing_rules:type_name -> vschema.KeyspaceRoutingRules + 293, // 80: vtctldata.GetRoutingRulesResponse.routing_rules:type_name -> vschema.RoutingRules + 288, // 81: vtctldata.GetSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias + 306, // 82: vtctldata.GetSchemaResponse.schema:type_name -> tabletmanagerdata.SchemaDefinition + 3, // 83: vtctldata.GetSchemaMigrationsRequest.status:type_name -> vtctldata.SchemaMigration.Status + 289, // 84: vtctldata.GetSchemaMigrationsRequest.recent:type_name -> vttime.Duration + 1, // 85: vtctldata.GetSchemaMigrationsRequest.order:type_name -> vtctldata.QueryOrdering + 9, // 86: vtctldata.GetSchemaMigrationsResponse.migrations:type_name -> vtctldata.SchemaMigration + 261, // 87: vtctldata.GetShardReplicationResponse.shard_replication_by_cell:type_name -> vtctldata.GetShardReplicationResponse.ShardReplicationByCellEntry + 10, // 88: vtctldata.GetShardResponse.shard:type_name -> vtctldata.Shard + 294, // 89: vtctldata.GetShardRoutingRulesResponse.shard_routing_rules:type_name -> vschema.ShardRoutingRules + 262, // 90: vtctldata.GetSrvKeyspaceNamesResponse.names:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry + 264, // 91: vtctldata.GetSrvKeyspacesResponse.srv_keyspaces:type_name -> vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry + 307, // 92: vtctldata.UpdateThrottlerConfigRequest.throttled_app:type_name -> topodata.ThrottledAppRule + 308, // 93: vtctldata.GetSrvVSchemaResponse.srv_v_schema:type_name -> vschema.SrvVSchema + 265, // 94: vtctldata.GetSrvVSchemasResponse.srv_v_schemas:type_name -> vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry + 288, // 95: vtctldata.GetTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 298, // 96: vtctldata.GetTabletResponse.tablet:type_name -> topodata.Tablet + 288, // 97: vtctldata.GetTabletsRequest.tablet_aliases:type_name -> topodata.TabletAlias + 297, // 98: vtctldata.GetTabletsRequest.tablet_type:type_name -> topodata.TabletType + 298, // 99: vtctldata.GetTabletsResponse.tablets:type_name -> topodata.Tablet + 114, // 100: vtctldata.GetTopologyPathResponse.cell:type_name -> vtctldata.TopologyCell + 288, // 101: vtctldata.GetVersionRequest.tablet_alias:type_name -> topodata.TabletAlias + 296, // 102: vtctldata.GetVSchemaResponse.v_schema:type_name -> vschema.Keyspace + 12, // 103: vtctldata.GetWorkflowsResponse.workflows:type_name -> vtctldata.Workflow + 288, // 104: vtctldata.InitShardPrimaryRequest.primary_elect_tablet_alias:type_name -> topodata.TabletAlias + 289, // 105: vtctldata.InitShardPrimaryRequest.wait_replicas_timeout:type_name -> vttime.Duration + 284, // 106: vtctldata.InitShardPrimaryResponse.events:type_name -> logutil.Event + 266, // 107: vtctldata.LaunchSchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.LaunchSchemaMigrationResponse.RowsAffectedByShardEntry + 296, // 108: vtctldata.LookupVindexCreateRequest.vindex:type_name -> vschema.Keyspace + 297, // 109: vtctldata.LookupVindexCreateRequest.tablet_types:type_name -> topodata.TabletType + 285, // 110: vtctldata.LookupVindexCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 7, // 111: vtctldata.MaterializeCreateRequest.settings:type_name -> vtctldata.MaterializeSettings + 297, // 112: vtctldata.MigrateCreateRequest.tablet_types:type_name -> topodata.TabletType + 285, // 113: vtctldata.MigrateCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 297, // 114: vtctldata.MoveTablesCreateRequest.tablet_types:type_name -> topodata.TabletType + 285, // 115: vtctldata.MoveTablesCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 11, // 116: vtctldata.MoveTablesCreateRequest.workflow_options:type_name -> vtctldata.WorkflowOptions + 267, // 117: vtctldata.MoveTablesCreateResponse.details:type_name -> vtctldata.MoveTablesCreateResponse.TabletInfo + 288, // 118: vtctldata.PingTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 288, // 119: vtctldata.PlannedReparentShardRequest.new_primary:type_name -> topodata.TabletAlias + 288, // 120: vtctldata.PlannedReparentShardRequest.avoid_primary:type_name -> topodata.TabletAlias + 289, // 121: vtctldata.PlannedReparentShardRequest.wait_replicas_timeout:type_name -> vttime.Duration + 289, // 122: vtctldata.PlannedReparentShardRequest.tolerable_replication_lag:type_name -> vttime.Duration + 288, // 123: vtctldata.PlannedReparentShardResponse.promoted_primary:type_name -> topodata.TabletAlias + 284, // 124: vtctldata.PlannedReparentShardResponse.events:type_name -> logutil.Event + 288, // 125: vtctldata.RefreshStateRequest.tablet_alias:type_name -> topodata.TabletAlias + 288, // 126: vtctldata.ReloadSchemaRequest.tablet_alias:type_name -> topodata.TabletAlias + 284, // 127: vtctldata.ReloadSchemaKeyspaceResponse.events:type_name -> logutil.Event + 284, // 128: vtctldata.ReloadSchemaShardResponse.events:type_name -> logutil.Event + 288, // 129: vtctldata.ReparentTabletRequest.tablet:type_name -> topodata.TabletAlias + 288, // 130: vtctldata.ReparentTabletResponse.primary:type_name -> topodata.TabletAlias + 297, // 131: vtctldata.ReshardCreateRequest.tablet_types:type_name -> topodata.TabletType + 285, // 132: vtctldata.ReshardCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 288, // 133: vtctldata.RestoreFromBackupRequest.tablet_alias:type_name -> topodata.TabletAlias + 287, // 134: vtctldata.RestoreFromBackupRequest.backup_time:type_name -> vttime.Time + 287, // 135: vtctldata.RestoreFromBackupRequest.restore_to_timestamp:type_name -> vttime.Time + 288, // 136: vtctldata.RestoreFromBackupResponse.tablet_alias:type_name -> topodata.TabletAlias + 284, // 137: vtctldata.RestoreFromBackupResponse.event:type_name -> logutil.Event + 268, // 138: vtctldata.RetrySchemaMigrationResponse.rows_affected_by_shard:type_name -> vtctldata.RetrySchemaMigrationResponse.RowsAffectedByShardEntry + 288, // 139: vtctldata.RunHealthCheckRequest.tablet_alias:type_name -> topodata.TabletAlias + 286, // 140: vtctldata.SetKeyspaceDurabilityPolicyResponse.keyspace:type_name -> topodata.Keyspace + 286, // 141: vtctldata.SetKeyspaceShardingInfoResponse.keyspace:type_name -> topodata.Keyspace + 290, // 142: vtctldata.SetShardIsPrimaryServingResponse.shard:type_name -> topodata.Shard + 297, // 143: vtctldata.SetShardTabletControlRequest.tablet_type:type_name -> topodata.TabletType + 290, // 144: vtctldata.SetShardTabletControlResponse.shard:type_name -> topodata.Shard + 288, // 145: vtctldata.SetWritableRequest.tablet_alias:type_name -> topodata.TabletAlias + 288, // 146: vtctldata.ShardReplicationAddRequest.tablet_alias:type_name -> topodata.TabletAlias + 309, // 147: vtctldata.ShardReplicationFixResponse.error:type_name -> topodata.ShardReplicationError + 269, // 148: vtctldata.ShardReplicationPositionsResponse.replication_statuses:type_name -> vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry + 270, // 149: vtctldata.ShardReplicationPositionsResponse.tablet_map:type_name -> vtctldata.ShardReplicationPositionsResponse.TabletMapEntry + 288, // 150: vtctldata.ShardReplicationRemoveRequest.tablet_alias:type_name -> topodata.TabletAlias + 288, // 151: vtctldata.SleepTabletRequest.tablet_alias:type_name -> topodata.TabletAlias + 289, // 152: vtctldata.SleepTabletRequest.duration:type_name -> vttime.Duration + 310, // 153: vtctldata.SourceShardAddRequest.key_range:type_name -> topodata.KeyRange + 290, // 154: vtctldata.SourceShardAddResponse.shard:type_name -> topodata.Shard + 290, // 155: vtctldata.SourceShardDeleteResponse.shard:type_name -> topodata.Shard + 288, // 156: vtctldata.StartReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias + 288, // 157: vtctldata.StopReplicationRequest.tablet_alias:type_name -> topodata.TabletAlias + 288, // 158: vtctldata.TabletExternallyReparentedRequest.tablet:type_name -> topodata.TabletAlias + 288, // 159: vtctldata.TabletExternallyReparentedResponse.new_primary:type_name -> topodata.TabletAlias + 288, // 160: vtctldata.TabletExternallyReparentedResponse.old_primary:type_name -> topodata.TabletAlias + 291, // 161: vtctldata.UpdateCellInfoRequest.cell_info:type_name -> topodata.CellInfo + 291, // 162: vtctldata.UpdateCellInfoResponse.cell_info:type_name -> topodata.CellInfo + 311, // 163: vtctldata.UpdateCellsAliasRequest.cells_alias:type_name -> topodata.CellsAlias + 311, // 164: vtctldata.UpdateCellsAliasResponse.cells_alias:type_name -> topodata.CellsAlias + 271, // 165: vtctldata.ValidateResponse.results_by_keyspace:type_name -> vtctldata.ValidateResponse.ResultsByKeyspaceEntry + 272, // 166: vtctldata.ValidateKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry + 273, // 167: vtctldata.ValidateSchemaKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry + 274, // 168: vtctldata.ValidateVersionKeyspaceResponse.results_by_shard:type_name -> vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry + 275, // 169: vtctldata.ValidateVSchemaResponse.results_by_shard:type_name -> vtctldata.ValidateVSchemaResponse.ResultsByShardEntry + 297, // 170: vtctldata.VDiffCreateRequest.tablet_types:type_name -> topodata.TabletType + 285, // 171: vtctldata.VDiffCreateRequest.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 289, // 172: vtctldata.VDiffCreateRequest.filtered_replication_wait_time:type_name -> vttime.Duration + 289, // 173: vtctldata.VDiffCreateRequest.wait_update_interval:type_name -> vttime.Duration + 289, // 174: vtctldata.VDiffCreateRequest.max_diff_duration:type_name -> vttime.Duration + 276, // 175: vtctldata.VDiffShowResponse.tablet_responses:type_name -> vtctldata.VDiffShowResponse.TabletResponsesEntry + 277, // 176: vtctldata.WorkflowDeleteResponse.details:type_name -> vtctldata.WorkflowDeleteResponse.TabletInfo + 281, // 177: vtctldata.WorkflowStatusResponse.table_copy_state:type_name -> vtctldata.WorkflowStatusResponse.TableCopyStateEntry + 282, // 178: vtctldata.WorkflowStatusResponse.shard_streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamsEntry + 297, // 179: vtctldata.WorkflowSwitchTrafficRequest.tablet_types:type_name -> topodata.TabletType + 289, // 180: vtctldata.WorkflowSwitchTrafficRequest.max_replication_lag_allowed:type_name -> vttime.Duration + 289, // 181: vtctldata.WorkflowSwitchTrafficRequest.timeout:type_name -> vttime.Duration + 312, // 182: vtctldata.WorkflowUpdateRequest.tablet_request:type_name -> tabletmanagerdata.UpdateVReplicationWorkflowRequest + 283, // 183: vtctldata.WorkflowUpdateResponse.details:type_name -> vtctldata.WorkflowUpdateResponse.TabletInfo + 247, // 184: vtctldata.Workflow.ShardStreamsEntry.value:type_name -> vtctldata.Workflow.ShardStream + 248, // 185: vtctldata.Workflow.ShardStream.streams:type_name -> vtctldata.Workflow.Stream + 313, // 186: vtctldata.Workflow.ShardStream.tablet_controls:type_name -> topodata.Shard.TabletControl + 288, // 187: vtctldata.Workflow.Stream.tablet:type_name -> topodata.TabletAlias + 314, // 188: vtctldata.Workflow.Stream.binlog_source:type_name -> binlogdata.BinlogSource + 287, // 189: vtctldata.Workflow.Stream.transaction_timestamp:type_name -> vttime.Time + 287, // 190: vtctldata.Workflow.Stream.time_updated:type_name -> vttime.Time + 249, // 191: vtctldata.Workflow.Stream.copy_states:type_name -> vtctldata.Workflow.Stream.CopyState + 250, // 192: vtctldata.Workflow.Stream.logs:type_name -> vtctldata.Workflow.Stream.Log + 251, // 193: vtctldata.Workflow.Stream.throttler_status:type_name -> vtctldata.Workflow.Stream.ThrottlerStatus + 297, // 194: vtctldata.Workflow.Stream.tablet_types:type_name -> topodata.TabletType + 285, // 195: vtctldata.Workflow.Stream.tablet_selection_preference:type_name -> tabletmanagerdata.TabletSelectionPreference + 287, // 196: vtctldata.Workflow.Stream.Log.created_at:type_name -> vttime.Time + 287, // 197: vtctldata.Workflow.Stream.Log.updated_at:type_name -> vttime.Time + 287, // 198: vtctldata.Workflow.Stream.ThrottlerStatus.time_throttled:type_name -> vttime.Time + 254, // 199: vtctldata.ApplyVSchemaResponse.UnknownVindexParamsEntry.value:type_name -> vtctldata.ApplyVSchemaResponse.ParamList + 10, // 200: vtctldata.FindAllShardsInKeyspaceResponse.ShardsEntry.value:type_name -> vtctldata.Shard + 311, // 201: vtctldata.GetCellsAliasesResponse.AliasesEntry.value:type_name -> topodata.CellsAlias + 315, // 202: vtctldata.GetShardReplicationResponse.ShardReplicationByCellEntry.value:type_name -> topodata.ShardReplication + 263, // 203: vtctldata.GetSrvKeyspaceNamesResponse.NamesEntry.value:type_name -> vtctldata.GetSrvKeyspaceNamesResponse.NameList + 316, // 204: vtctldata.GetSrvKeyspacesResponse.SrvKeyspacesEntry.value:type_name -> topodata.SrvKeyspace + 308, // 205: vtctldata.GetSrvVSchemasResponse.SrvVSchemasEntry.value:type_name -> vschema.SrvVSchema + 288, // 206: vtctldata.MoveTablesCreateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 317, // 207: vtctldata.ShardReplicationPositionsResponse.ReplicationStatusesEntry.value:type_name -> replicationdata.Status + 298, // 208: vtctldata.ShardReplicationPositionsResponse.TabletMapEntry.value:type_name -> topodata.Tablet + 216, // 209: vtctldata.ValidateResponse.ResultsByKeyspaceEntry.value:type_name -> vtctldata.ValidateKeyspaceResponse + 220, // 210: vtctldata.ValidateKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 220, // 211: vtctldata.ValidateSchemaKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 220, // 212: vtctldata.ValidateVersionKeyspaceResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 220, // 213: vtctldata.ValidateVSchemaResponse.ResultsByShardEntry.value:type_name -> vtctldata.ValidateShardResponse + 318, // 214: vtctldata.VDiffShowResponse.TabletResponsesEntry.value:type_name -> tabletmanagerdata.VDiffResponse + 288, // 215: vtctldata.WorkflowDeleteResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 288, // 216: vtctldata.WorkflowStatusResponse.ShardStreamState.tablet:type_name -> topodata.TabletAlias + 279, // 217: vtctldata.WorkflowStatusResponse.ShardStreams.streams:type_name -> vtctldata.WorkflowStatusResponse.ShardStreamState + 278, // 218: vtctldata.WorkflowStatusResponse.TableCopyStateEntry.value:type_name -> vtctldata.WorkflowStatusResponse.TableCopyState + 280, // 219: vtctldata.WorkflowStatusResponse.ShardStreamsEntry.value:type_name -> vtctldata.WorkflowStatusResponse.ShardStreams + 288, // 220: vtctldata.WorkflowUpdateResponse.TabletInfo.tablet:type_name -> topodata.TabletAlias + 221, // [221:221] is the sub-list for method output_type + 221, // [221:221] is the sub-list for method input_type + 221, // [221:221] is the sub-list for extension type_name + 221, // [221:221] is the sub-list for extension extendee + 0, // [0:221] is the sub-list for field type_name } func init() { file_vtctldata_proto_init() } @@ -18289,7 +19138,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Workflow); i { + switch v := v.(*WorkflowOptions); i { case 0: return &v.state case 1: @@ -18301,7 +19150,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellInfoRequest); i { + switch v := v.(*Workflow); i { case 0: return &v.state case 1: @@ -18313,7 +19162,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellInfoResponse); i { + switch v := v.(*AddCellInfoRequest); i { case 0: return &v.state case 1: @@ -18325,7 +19174,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellsAliasRequest); i { + switch v := v.(*AddCellInfoResponse); i { case 0: return &v.state case 1: @@ -18337,7 +19186,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddCellsAliasResponse); i { + switch v := v.(*AddCellsAliasRequest); i { case 0: return &v.state case 1: @@ -18349,7 +19198,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyRoutingRulesRequest); i { + switch v := v.(*AddCellsAliasResponse); i { case 0: return &v.state case 1: @@ -18361,7 +19210,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyRoutingRulesResponse); i { + switch v := v.(*ApplyKeyspaceRoutingRulesRequest); i { case 0: return &v.state case 1: @@ -18373,7 +19222,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyShardRoutingRulesRequest); i { + switch v := v.(*ApplyKeyspaceRoutingRulesResponse); i { case 0: return &v.state case 1: @@ -18385,7 +19234,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyShardRoutingRulesResponse); i { + switch v := v.(*ApplyRoutingRulesRequest); i { case 0: return &v.state case 1: @@ -18397,7 +19246,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplySchemaRequest); i { + switch v := v.(*ApplyRoutingRulesResponse); i { case 0: return &v.state case 1: @@ -18409,7 +19258,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplySchemaResponse); i { + switch v := v.(*ApplyShardRoutingRulesRequest); i { case 0: return &v.state case 1: @@ -18421,7 +19270,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyVSchemaRequest); i { + switch v := v.(*ApplyShardRoutingRulesResponse); i { case 0: return &v.state case 1: @@ -18433,7 +19282,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyVSchemaResponse); i { + switch v := v.(*ApplySchemaRequest); i { case 0: return &v.state case 1: @@ -18445,7 +19294,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupRequest); i { + switch v := v.(*ApplySchemaResponse); i { case 0: return &v.state case 1: @@ -18457,7 +19306,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupResponse); i { + switch v := v.(*ApplyVSchemaRequest); i { case 0: return &v.state case 1: @@ -18469,7 +19318,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BackupShardRequest); i { + switch v := v.(*ApplyVSchemaResponse); i { case 0: return &v.state case 1: @@ -18481,7 +19330,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelSchemaMigrationRequest); i { + switch v := v.(*BackupRequest); i { case 0: return &v.state case 1: @@ -18493,7 +19342,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelSchemaMigrationResponse); i { + switch v := v.(*BackupResponse); i { case 0: return &v.state case 1: @@ -18505,7 +19354,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeTabletTypeRequest); i { + switch v := v.(*BackupShardRequest); i { case 0: return &v.state case 1: @@ -18517,7 +19366,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChangeTabletTypeResponse); i { + switch v := v.(*CancelSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -18529,7 +19378,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CleanupSchemaMigrationRequest); i { + switch v := v.(*CancelSchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -18541,7 +19390,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CleanupSchemaMigrationResponse); i { + switch v := v.(*ChangeTabletTypeRequest); i { case 0: return &v.state case 1: @@ -18553,7 +19402,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CompleteSchemaMigrationRequest); i { + switch v := v.(*ChangeTabletTypeResponse); i { case 0: return &v.state case 1: @@ -18565,7 +19414,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CompleteSchemaMigrationResponse); i { + switch v := v.(*CleanupSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -18577,7 +19426,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateKeyspaceRequest); i { + switch v := v.(*CleanupSchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -18589,7 +19438,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateKeyspaceResponse); i { + switch v := v.(*CompleteSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -18601,7 +19450,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardRequest); i { + switch v := v.(*CompleteSchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -18613,7 +19462,7 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateShardResponse); i { + switch v := v.(*CreateKeyspaceRequest); i { case 0: return &v.state case 1: @@ -18625,6 +19474,42 @@ func file_vtctldata_proto_init() { } } file_vtctldata_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateKeyspaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateShardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateShardResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DeleteCellInfoRequest); i { case 0: return &v.state @@ -18636,8 +19521,56 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellInfoResponse); i { + file_vtctldata_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellInfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellsAliasRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCellsAliasResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteKeyspaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteKeyspaceResponse); i { case 0: return &v.state case 1: @@ -18648,8 +19581,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellsAliasRequest); i { + file_vtctldata_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteShardsRequest); i { case 0: return &v.state case 1: @@ -18660,8 +19593,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCellsAliasResponse); i { + file_vtctldata_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteShardsResponse); i { case 0: return &v.state case 1: @@ -18672,8 +19605,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSrvVSchemaRequest); i { case 0: return &v.state case 1: @@ -18684,8 +19617,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSrvVSchemaResponse); i { case 0: return &v.state case 1: @@ -18696,8 +19629,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardsRequest); i { + file_vtctldata_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTabletsRequest); i { case 0: return &v.state case 1: @@ -18708,8 +19641,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteShardsResponse); i { + file_vtctldata_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTabletsResponse); i { case 0: return &v.state case 1: @@ -18720,8 +19653,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSrvVSchemaRequest); i { + file_vtctldata_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EmergencyReparentShardRequest); i { case 0: return &v.state case 1: @@ -18732,8 +19665,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSrvVSchemaResponse); i { + file_vtctldata_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EmergencyReparentShardResponse); i { case 0: return &v.state case 1: @@ -18744,8 +19677,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTabletsRequest); i { + file_vtctldata_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsAppRequest); i { case 0: return &v.state case 1: @@ -18756,8 +19689,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTabletsResponse); i { + file_vtctldata_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsAppResponse); i { case 0: return &v.state case 1: @@ -18768,8 +19701,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmergencyReparentShardRequest); i { + file_vtctldata_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsDBARequest); i { case 0: return &v.state case 1: @@ -18780,8 +19713,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmergencyReparentShardResponse); i { + file_vtctldata_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteFetchAsDBAResponse); i { case 0: return &v.state case 1: @@ -18792,8 +19725,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAppRequest); i { + file_vtctldata_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteHookRequest); i { case 0: return &v.state case 1: @@ -18804,8 +19737,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsAppResponse); i { + file_vtctldata_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteHookResponse); i { case 0: return &v.state case 1: @@ -18816,8 +19749,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsDBARequest); i { + file_vtctldata_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteMultiFetchAsDBARequest); i { case 0: return &v.state case 1: @@ -18828,8 +19761,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteFetchAsDBAResponse); i { + file_vtctldata_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecuteMultiFetchAsDBAResponse); i { case 0: return &v.state case 1: @@ -18840,8 +19773,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteHookRequest); i { + file_vtctldata_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindAllShardsInKeyspaceRequest); i { case 0: return &v.state case 1: @@ -18852,8 +19785,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecuteHookResponse); i { + file_vtctldata_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FindAllShardsInKeyspaceResponse); i { case 0: return &v.state case 1: @@ -18864,8 +19797,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FindAllShardsInKeyspaceRequest); i { + file_vtctldata_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForceCutOverSchemaMigrationRequest); i { case 0: return &v.state case 1: @@ -18876,8 +19809,8 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FindAllShardsInKeyspaceResponse); i { + file_vtctldata_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForceCutOverSchemaMigrationResponse); i { case 0: return &v.state case 1: @@ -18888,7 +19821,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBackupsRequest); i { case 0: return &v.state @@ -18900,7 +19833,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetBackupsResponse); i { case 0: return &v.state @@ -18912,7 +19845,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellInfoRequest); i { case 0: return &v.state @@ -18924,7 +19857,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellInfoResponse); i { case 0: return &v.state @@ -18936,7 +19869,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellInfoNamesRequest); i { case 0: return &v.state @@ -18948,7 +19881,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellInfoNamesResponse); i { case 0: return &v.state @@ -18960,7 +19893,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellsAliasesRequest); i { case 0: return &v.state @@ -18972,7 +19905,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetCellsAliasesResponse); i { case 0: return &v.state @@ -18984,7 +19917,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetFullStatusRequest); i { case 0: return &v.state @@ -18996,7 +19929,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetFullStatusResponse); i { case 0: return &v.state @@ -19008,7 +19941,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetKeyspacesRequest); i { case 0: return &v.state @@ -19020,7 +19953,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetKeyspacesResponse); i { case 0: return &v.state @@ -19032,7 +19965,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetKeyspaceRequest); i { case 0: return &v.state @@ -19044,7 +19977,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetKeyspaceResponse); i { case 0: return &v.state @@ -19056,7 +19989,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetPermissionsRequest); i { case 0: return &v.state @@ -19068,7 +20001,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetPermissionsResponse); i { case 0: return &v.state @@ -19080,7 +20013,31 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspaceRoutingRulesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetKeyspaceRoutingRulesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetRoutingRulesRequest); i { case 0: return &v.state @@ -19092,7 +20049,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetRoutingRulesResponse); i { case 0: return &v.state @@ -19104,7 +20061,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemaRequest); i { case 0: return &v.state @@ -19116,7 +20073,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemaResponse); i { case 0: return &v.state @@ -19128,7 +20085,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemaMigrationsRequest); i { case 0: return &v.state @@ -19140,7 +20097,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSchemaMigrationsResponse); i { case 0: return &v.state @@ -19152,7 +20109,31 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardReplicationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetShardReplicationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetShardRequest); i { case 0: return &v.state @@ -19164,7 +20145,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetShardResponse); i { case 0: return &v.state @@ -19176,7 +20157,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetShardRoutingRulesRequest); i { case 0: return &v.state @@ -19188,7 +20169,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetShardRoutingRulesResponse); i { case 0: return &v.state @@ -19200,7 +20181,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvKeyspaceNamesRequest); i { case 0: return &v.state @@ -19212,7 +20193,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvKeyspaceNamesResponse); i { case 0: return &v.state @@ -19224,7 +20205,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvKeyspacesRequest); i { case 0: return &v.state @@ -19236,7 +20217,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvKeyspacesResponse); i { case 0: return &v.state @@ -19248,7 +20229,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpdateThrottlerConfigRequest); i { case 0: return &v.state @@ -19260,7 +20241,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpdateThrottlerConfigResponse); i { case 0: return &v.state @@ -19272,7 +20253,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvVSchemaRequest); i { case 0: return &v.state @@ -19284,7 +20265,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvVSchemaResponse); i { case 0: return &v.state @@ -19296,7 +20277,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvVSchemasRequest); i { case 0: return &v.state @@ -19308,7 +20289,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvVSchemasResponse); i { case 0: return &v.state @@ -19320,7 +20301,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTabletRequest); i { case 0: return &v.state @@ -19332,7 +20313,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTabletResponse); i { case 0: return &v.state @@ -19344,7 +20325,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTabletsRequest); i { case 0: return &v.state @@ -19356,7 +20337,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTabletsResponse); i { case 0: return &v.state @@ -19368,7 +20349,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTopologyPathRequest); i { case 0: return &v.state @@ -19380,7 +20361,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetTopologyPathResponse); i { case 0: return &v.state @@ -19392,7 +20373,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TopologyCell); i { case 0: return &v.state @@ -19404,7 +20385,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVSchemaRequest); i { case 0: return &v.state @@ -19416,7 +20397,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVersionRequest); i { case 0: return &v.state @@ -19428,7 +20409,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVersionResponse); i { case 0: return &v.state @@ -19440,7 +20421,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetVSchemaResponse); i { case 0: return &v.state @@ -19452,7 +20433,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowsRequest); i { case 0: return &v.state @@ -19464,7 +20445,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowsResponse); i { case 0: return &v.state @@ -19476,7 +20457,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InitShardPrimaryRequest); i { case 0: return &v.state @@ -19488,7 +20469,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*InitShardPrimaryResponse); i { case 0: return &v.state @@ -19500,7 +20481,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LaunchSchemaMigrationRequest); i { case 0: return &v.state @@ -19512,7 +20493,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LaunchSchemaMigrationResponse); i { case 0: return &v.state @@ -19524,7 +20505,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LookupVindexCreateRequest); i { case 0: return &v.state @@ -19536,7 +20517,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LookupVindexCreateResponse); i { case 0: return &v.state @@ -19548,7 +20529,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LookupVindexExternalizeRequest); i { case 0: return &v.state @@ -19560,7 +20541,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*LookupVindexExternalizeResponse); i { case 0: return &v.state @@ -19572,7 +20553,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MaterializeCreateRequest); i { case 0: return &v.state @@ -19584,7 +20565,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MaterializeCreateResponse); i { case 0: return &v.state @@ -19596,7 +20577,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MigrateCreateRequest); i { case 0: return &v.state @@ -19608,7 +20589,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MigrateCompleteRequest); i { case 0: return &v.state @@ -19620,7 +20601,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MigrateCompleteResponse); i { case 0: return &v.state @@ -19632,7 +20613,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MountRegisterRequest); i { case 0: return &v.state @@ -19644,7 +20625,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MountRegisterResponse); i { case 0: return &v.state @@ -19656,7 +20637,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MountUnregisterRequest); i { case 0: return &v.state @@ -19668,7 +20649,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MountUnregisterResponse); i { case 0: return &v.state @@ -19680,7 +20661,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MountShowRequest); i { case 0: return &v.state @@ -19692,7 +20673,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MountShowResponse); i { case 0: return &v.state @@ -19704,7 +20685,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MountListRequest); i { case 0: return &v.state @@ -19716,7 +20697,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MountListResponse); i { case 0: return &v.state @@ -19728,7 +20709,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[127].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveTablesCreateRequest); i { case 0: return &v.state @@ -19740,7 +20721,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[128].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveTablesCreateResponse); i { case 0: return &v.state @@ -19752,7 +20733,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[129].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveTablesCompleteRequest); i { case 0: return &v.state @@ -19764,7 +20745,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[130].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveTablesCompleteResponse); i { case 0: return &v.state @@ -19776,7 +20757,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PingTabletRequest); i { case 0: return &v.state @@ -19788,7 +20769,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PingTabletResponse); i { case 0: return &v.state @@ -19800,7 +20781,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[133].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlannedReparentShardRequest); i { case 0: return &v.state @@ -19812,7 +20793,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[134].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlannedReparentShardResponse); i { case 0: return &v.state @@ -19824,7 +20805,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[135].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RebuildKeyspaceGraphRequest); i { case 0: return &v.state @@ -19836,7 +20817,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[136].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RebuildKeyspaceGraphResponse); i { case 0: return &v.state @@ -19848,7 +20829,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[137].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RebuildVSchemaGraphRequest); i { case 0: return &v.state @@ -19860,7 +20841,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[138].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RebuildVSchemaGraphResponse); i { case 0: return &v.state @@ -19872,7 +20853,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[139].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RefreshStateRequest); i { case 0: return &v.state @@ -19884,7 +20865,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[140].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RefreshStateResponse); i { case 0: return &v.state @@ -19896,7 +20877,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[141].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RefreshStateByShardRequest); i { case 0: return &v.state @@ -19908,7 +20889,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[142].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RefreshStateByShardResponse); i { case 0: return &v.state @@ -19920,7 +20901,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[143].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemaRequest); i { case 0: return &v.state @@ -19932,7 +20913,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[144].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemaResponse); i { case 0: return &v.state @@ -19944,7 +20925,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[145].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemaKeyspaceRequest); i { case 0: return &v.state @@ -19956,7 +20937,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[146].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemaKeyspaceResponse); i { case 0: return &v.state @@ -19968,7 +20949,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[147].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemaShardRequest); i { case 0: return &v.state @@ -19980,7 +20961,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[148].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReloadSchemaShardResponse); i { case 0: return &v.state @@ -19992,7 +20973,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[149].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveBackupRequest); i { case 0: return &v.state @@ -20004,7 +20985,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[150].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveBackupResponse); i { case 0: return &v.state @@ -20016,7 +20997,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[151].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveKeyspaceCellRequest); i { case 0: return &v.state @@ -20028,7 +21009,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[152].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveKeyspaceCellResponse); i { case 0: return &v.state @@ -20040,7 +21021,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[153].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveShardCellRequest); i { case 0: return &v.state @@ -20052,7 +21033,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[154].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RemoveShardCellResponse); i { case 0: return &v.state @@ -20064,7 +21045,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[155].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReparentTabletRequest); i { case 0: return &v.state @@ -20076,7 +21057,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[156].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReparentTabletResponse); i { case 0: return &v.state @@ -20088,7 +21069,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[157].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReshardCreateRequest); i { case 0: return &v.state @@ -20100,7 +21081,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[158].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RestoreFromBackupRequest); i { case 0: return &v.state @@ -20112,7 +21093,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[159].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RestoreFromBackupResponse); i { case 0: return &v.state @@ -20124,7 +21105,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[160].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RetrySchemaMigrationRequest); i { case 0: return &v.state @@ -20136,7 +21117,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[161].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RetrySchemaMigrationResponse); i { case 0: return &v.state @@ -20148,7 +21129,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[162].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RunHealthCheckRequest); i { case 0: return &v.state @@ -20160,7 +21141,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[163].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*RunHealthCheckResponse); i { case 0: return &v.state @@ -20172,7 +21153,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[164].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetKeyspaceDurabilityPolicyRequest); i { case 0: return &v.state @@ -20184,7 +21165,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[165].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetKeyspaceDurabilityPolicyResponse); i { case 0: return &v.state @@ -20196,31 +21177,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[166].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceServedFromRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_vtctldata_proto_msgTypes[167].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetKeyspaceServedFromResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_vtctldata_proto_msgTypes[168].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetKeyspaceShardingInfoRequest); i { case 0: return &v.state @@ -20232,7 +21189,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[169].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetKeyspaceShardingInfoResponse); i { case 0: return &v.state @@ -20244,7 +21201,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[170].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[179].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetShardIsPrimaryServingRequest); i { case 0: return &v.state @@ -20256,7 +21213,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[171].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetShardIsPrimaryServingResponse); i { case 0: return &v.state @@ -20268,7 +21225,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[172].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetShardTabletControlRequest); i { case 0: return &v.state @@ -20280,7 +21237,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[173].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetShardTabletControlResponse); i { case 0: return &v.state @@ -20292,7 +21249,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[174].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[183].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetWritableRequest); i { case 0: return &v.state @@ -20304,7 +21261,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[175].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[184].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SetWritableResponse); i { case 0: return &v.state @@ -20316,7 +21273,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[176].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[185].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplicationAddRequest); i { case 0: return &v.state @@ -20328,7 +21285,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[177].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[186].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplicationAddResponse); i { case 0: return &v.state @@ -20340,7 +21297,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[178].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[187].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplicationFixRequest); i { case 0: return &v.state @@ -20352,7 +21309,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[179].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[188].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplicationFixResponse); i { case 0: return &v.state @@ -20364,7 +21321,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[180].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[189].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplicationPositionsRequest); i { case 0: return &v.state @@ -20376,7 +21333,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[181].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[190].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplicationPositionsResponse); i { case 0: return &v.state @@ -20388,7 +21345,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[182].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[191].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplicationRemoveRequest); i { case 0: return &v.state @@ -20400,7 +21357,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[183].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[192].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ShardReplicationRemoveResponse); i { case 0: return &v.state @@ -20412,7 +21369,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[184].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[193].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SleepTabletRequest); i { case 0: return &v.state @@ -20424,7 +21381,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[185].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[194].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SleepTabletResponse); i { case 0: return &v.state @@ -20436,7 +21393,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[186].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[195].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SourceShardAddRequest); i { case 0: return &v.state @@ -20448,7 +21405,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[187].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[196].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SourceShardAddResponse); i { case 0: return &v.state @@ -20460,7 +21417,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[188].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[197].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SourceShardDeleteRequest); i { case 0: return &v.state @@ -20472,7 +21429,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[189].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[198].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*SourceShardDeleteResponse); i { case 0: return &v.state @@ -20484,7 +21441,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[190].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[199].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StartReplicationRequest); i { case 0: return &v.state @@ -20496,7 +21453,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[191].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[200].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StartReplicationResponse); i { case 0: return &v.state @@ -20508,7 +21465,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[192].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[201].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StopReplicationRequest); i { case 0: return &v.state @@ -20520,7 +21477,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[193].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[202].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StopReplicationResponse); i { case 0: return &v.state @@ -20532,7 +21489,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[194].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[203].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TabletExternallyReparentedRequest); i { case 0: return &v.state @@ -20544,7 +21501,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[195].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[204].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TabletExternallyReparentedResponse); i { case 0: return &v.state @@ -20556,7 +21513,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[196].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[205].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpdateCellInfoRequest); i { case 0: return &v.state @@ -20568,7 +21525,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[197].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[206].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpdateCellInfoResponse); i { case 0: return &v.state @@ -20580,7 +21537,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[198].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[207].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpdateCellsAliasRequest); i { case 0: return &v.state @@ -20592,7 +21549,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[199].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[208].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpdateCellsAliasResponse); i { case 0: return &v.state @@ -20604,7 +21561,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[200].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[209].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateRequest); i { case 0: return &v.state @@ -20616,7 +21573,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[201].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[210].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResponse); i { case 0: return &v.state @@ -20628,7 +21585,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[202].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[211].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateKeyspaceRequest); i { case 0: return &v.state @@ -20640,7 +21597,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[203].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[212].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateKeyspaceResponse); i { case 0: return &v.state @@ -20652,7 +21609,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[204].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[213].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateSchemaKeyspaceRequest); i { case 0: return &v.state @@ -20664,7 +21621,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[205].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[214].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateSchemaKeyspaceResponse); i { case 0: return &v.state @@ -20676,7 +21633,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[206].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[215].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateShardRequest); i { case 0: return &v.state @@ -20688,7 +21645,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[207].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[216].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateShardResponse); i { case 0: return &v.state @@ -20700,7 +21657,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[208].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[217].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateVersionKeyspaceRequest); i { case 0: return &v.state @@ -20712,7 +21669,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[209].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[218].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateVersionKeyspaceResponse); i { case 0: return &v.state @@ -20724,7 +21681,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[210].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[219].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateVersionShardRequest); i { case 0: return &v.state @@ -20736,7 +21693,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[211].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[220].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateVersionShardResponse); i { case 0: return &v.state @@ -20748,7 +21705,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[212].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[221].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateVSchemaRequest); i { case 0: return &v.state @@ -20760,7 +21717,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[213].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[222].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateVSchemaResponse); i { case 0: return &v.state @@ -20772,7 +21729,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[214].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[223].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffCreateRequest); i { case 0: return &v.state @@ -20784,7 +21741,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[215].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[224].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffCreateResponse); i { case 0: return &v.state @@ -20796,7 +21753,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[216].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[225].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffDeleteRequest); i { case 0: return &v.state @@ -20808,7 +21765,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[217].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[226].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffDeleteResponse); i { case 0: return &v.state @@ -20820,7 +21777,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[218].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[227].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffResumeRequest); i { case 0: return &v.state @@ -20832,7 +21789,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[219].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[228].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffResumeResponse); i { case 0: return &v.state @@ -20844,7 +21801,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[220].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[229].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffShowRequest); i { case 0: return &v.state @@ -20856,7 +21813,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[221].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[230].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffShowResponse); i { case 0: return &v.state @@ -20868,7 +21825,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[222].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[231].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffStopRequest); i { case 0: return &v.state @@ -20880,7 +21837,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[223].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[232].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*VDiffStopResponse); i { case 0: return &v.state @@ -20892,7 +21849,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[224].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[233].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowDeleteRequest); i { case 0: return &v.state @@ -20904,7 +21861,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[225].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[234].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowDeleteResponse); i { case 0: return &v.state @@ -20916,7 +21873,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[226].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[235].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowStatusRequest); i { case 0: return &v.state @@ -20928,7 +21885,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[227].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[236].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowStatusResponse); i { case 0: return &v.state @@ -20940,7 +21897,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[228].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[237].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowSwitchTrafficRequest); i { case 0: return &v.state @@ -20952,7 +21909,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[229].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[238].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowSwitchTrafficResponse); i { case 0: return &v.state @@ -20964,7 +21921,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[230].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[239].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowUpdateRequest); i { case 0: return &v.state @@ -20976,7 +21933,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[231].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[240].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowUpdateResponse); i { case 0: return &v.state @@ -20988,7 +21945,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[233].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[242].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_ReplicationLocation); i { case 0: return &v.state @@ -21000,7 +21957,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[234].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[243].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_ShardStream); i { case 0: return &v.state @@ -21012,7 +21969,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[235].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[244].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_Stream); i { case 0: return &v.state @@ -21024,7 +21981,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[236].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[245].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_Stream_CopyState); i { case 0: return &v.state @@ -21036,7 +21993,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[237].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[246].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_Stream_Log); i { case 0: return &v.state @@ -21048,7 +22005,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[238].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[247].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Workflow_Stream_ThrottlerStatus); i { case 0: return &v.state @@ -21060,7 +22017,19 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[246].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[250].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyVSchemaResponse_ParamList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vtctldata_proto_msgTypes[259].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetSrvKeyspaceNamesResponse_NameList); i { case 0: return &v.state @@ -21072,7 +22041,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[250].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[263].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MoveTablesCreateResponse_TabletInfo); i { case 0: return &v.state @@ -21084,7 +22053,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[260].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[273].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowDeleteResponse_TabletInfo); i { case 0: return &v.state @@ -21096,7 +22065,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[261].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[274].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowStatusResponse_TableCopyState); i { case 0: return &v.state @@ -21108,7 +22077,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[262].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[275].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowStatusResponse_ShardStreamState); i { case 0: return &v.state @@ -21120,7 +22089,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[263].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[276].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowStatusResponse_ShardStreams); i { case 0: return &v.state @@ -21132,7 +22101,7 @@ func file_vtctldata_proto_init() { return nil } } - file_vtctldata_proto_msgTypes[266].Exporter = func(v interface{}, i int) interface{} { + file_vtctldata_proto_msgTypes[279].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowUpdateResponse_TabletInfo); i { case 0: return &v.state @@ -21151,7 +22120,7 @@ func file_vtctldata_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vtctldata_proto_rawDesc, NumEnums: 4, - NumMessages: 267, + NumMessages: 280, NumExtensions: 0, NumServices: 0, }, diff --git a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go index b453eb2a0b2..b3721495678 100644 --- a/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go +++ b/go/vt/proto/vtctldata/vtctldata_vtproto.pb.go @@ -111,6 +111,7 @@ func (m *MaterializeSettings) CloneVT() *MaterializeSettings { DeferSecondaryKeys: m.DeferSecondaryKeys, TabletSelectionPreference: m.TabletSelectionPreference, AtomicCopy: m.AtomicCopy, + WorkflowOptions: m.WorkflowOptions.CloneVT(), } if rhs := m.TableSettings; rhs != nil { tmpContainer := make([]*TableMaterializeSettings, len(rhs)) @@ -245,6 +246,30 @@ func (m *Shard) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *WorkflowOptions) CloneVT() *WorkflowOptions { + if m == nil { + return (*WorkflowOptions)(nil) + } + r := &WorkflowOptions{ + TenantId: m.TenantId, + StripShardedAutoIncrement: m.StripShardedAutoIncrement, + } + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *WorkflowOptions) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Workflow_ReplicationLocation) CloneVT() *Workflow_ReplicationLocation { if m == nil { return (*Workflow_ReplicationLocation)(nil) @@ -305,8 +330,9 @@ func (m *Workflow_Stream_CopyState) CloneVT() *Workflow_Stream_CopyState { return (*Workflow_Stream_CopyState)(nil) } r := &Workflow_Stream_CopyState{ - Table: m.Table, - LastPk: m.LastPk, + Table: m.Table, + LastPk: m.LastPk, + StreamId: m.StreamId, } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -368,20 +394,21 @@ func (m *Workflow_Stream) CloneVT() *Workflow_Stream { return (*Workflow_Stream)(nil) } r := &Workflow_Stream{ - Id: m.Id, - Shard: m.Shard, - Tablet: m.Tablet.CloneVT(), - BinlogSource: m.BinlogSource.CloneVT(), - Position: m.Position, - StopPosition: m.StopPosition, - State: m.State, - DbName: m.DbName, - TransactionTimestamp: m.TransactionTimestamp.CloneVT(), - TimeUpdated: m.TimeUpdated.CloneVT(), - Message: m.Message, - LogFetchError: m.LogFetchError, - RowsCopied: m.RowsCopied, - ThrottlerStatus: m.ThrottlerStatus.CloneVT(), + Id: m.Id, + Shard: m.Shard, + Tablet: m.Tablet.CloneVT(), + BinlogSource: m.BinlogSource.CloneVT(), + Position: m.Position, + StopPosition: m.StopPosition, + State: m.State, + DbName: m.DbName, + TransactionTimestamp: m.TransactionTimestamp.CloneVT(), + TimeUpdated: m.TimeUpdated.CloneVT(), + Message: m.Message, + LogFetchError: m.LogFetchError, + RowsCopied: m.RowsCopied, + ThrottlerStatus: m.ThrottlerStatus.CloneVT(), + TabletSelectionPreference: m.TabletSelectionPreference, } if rhs := m.CopyStates; rhs != nil { tmpContainer := make([]*Workflow_Stream_CopyState, len(rhs)) @@ -402,6 +429,16 @@ func (m *Workflow_Stream) CloneVT() *Workflow_Stream { copy(tmpContainer, rhs) r.Tags = tmpContainer } + if rhs := m.TabletTypes; rhs != nil { + tmpContainer := make([]topodata.TabletType, len(rhs)) + copy(tmpContainer, rhs) + r.TabletTypes = tmpContainer + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -426,6 +463,7 @@ func (m *Workflow) CloneVT() *Workflow { WorkflowSubType: m.WorkflowSubType, MaxVReplicationTransactionLag: m.MaxVReplicationTransactionLag, DeferSecondaryKeys: m.DeferSecondaryKeys, + Options: m.Options.CloneVT(), } if rhs := m.ShardStreams; rhs != nil { tmpContainer := make(map[string]*Workflow_ShardStream, len(rhs)) @@ -519,6 +557,48 @@ func (m *AddCellsAliasResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *ApplyKeyspaceRoutingRulesRequest) CloneVT() *ApplyKeyspaceRoutingRulesRequest { + if m == nil { + return (*ApplyKeyspaceRoutingRulesRequest)(nil) + } + r := &ApplyKeyspaceRoutingRulesRequest{ + KeyspaceRoutingRules: m.KeyspaceRoutingRules.CloneVT(), + SkipRebuild: m.SkipRebuild, + } + if rhs := m.RebuildCells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.RebuildCells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ApplyKeyspaceRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ApplyKeyspaceRoutingRulesResponse) CloneVT() *ApplyKeyspaceRoutingRulesResponse { + if m == nil { + return (*ApplyKeyspaceRoutingRulesResponse)(nil) + } + r := &ApplyKeyspaceRoutingRulesResponse{ + KeyspaceRoutingRules: m.KeyspaceRoutingRules.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ApplyKeyspaceRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *ApplyRoutingRulesRequest) CloneVT() *ApplyRoutingRulesRequest { if m == nil { return (*ApplyRoutingRulesRequest)(nil) @@ -670,6 +750,7 @@ func (m *ApplyVSchemaRequest) CloneVT() *ApplyVSchemaRequest { DryRun: m.DryRun, VSchema: m.VSchema.CloneVT(), Sql: m.Sql, + Strict: m.Strict, } if rhs := m.Cells; rhs != nil { tmpContainer := make([]string, len(rhs)) @@ -687,6 +768,27 @@ func (m *ApplyVSchemaRequest) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *ApplyVSchemaResponse_ParamList) CloneVT() *ApplyVSchemaResponse_ParamList { + if m == nil { + return (*ApplyVSchemaResponse_ParamList)(nil) + } + r := &ApplyVSchemaResponse_ParamList{} + if rhs := m.Params; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Params = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ApplyVSchemaResponse_ParamList) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *ApplyVSchemaResponse) CloneVT() *ApplyVSchemaResponse { if m == nil { return (*ApplyVSchemaResponse)(nil) @@ -694,6 +796,13 @@ func (m *ApplyVSchemaResponse) CloneVT() *ApplyVSchemaResponse { r := &ApplyVSchemaResponse{ VSchema: m.VSchema.CloneVT(), } + if rhs := m.UnknownVindexParams; rhs != nil { + tmpContainer := make(map[string]*ApplyVSchemaResponse_ParamList, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.UnknownVindexParams = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -951,13 +1060,6 @@ func (m *CreateKeyspaceRequest) CloneVT() *CreateKeyspaceRequest { DurabilityPolicy: m.DurabilityPolicy, SidecarDbName: m.SidecarDbName, } - if rhs := m.ServedFroms; rhs != nil { - tmpContainer := make([]*topodata.Keyspace_ServedFrom, len(rhs)) - for k, v := range rhs { - tmpContainer[k] = v.CloneVT() - } - r.ServedFroms = tmpContainer - } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -1424,6 +1526,51 @@ func (m *ExecuteHookResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *ExecuteMultiFetchAsDBARequest) CloneVT() *ExecuteMultiFetchAsDBARequest { + if m == nil { + return (*ExecuteMultiFetchAsDBARequest)(nil) + } + r := &ExecuteMultiFetchAsDBARequest{ + TabletAlias: m.TabletAlias.CloneVT(), + Sql: m.Sql, + MaxRows: m.MaxRows, + DisableBinlogs: m.DisableBinlogs, + ReloadSchema: m.ReloadSchema, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteMultiFetchAsDBARequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ExecuteMultiFetchAsDBAResponse) CloneVT() *ExecuteMultiFetchAsDBAResponse { + if m == nil { + return (*ExecuteMultiFetchAsDBAResponse)(nil) + } + r := &ExecuteMultiFetchAsDBAResponse{} + if rhs := m.Results; rhs != nil { + tmpContainer := make([]*query.QueryResult, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.Results = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ExecuteMultiFetchAsDBAResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *FindAllShardsInKeyspaceRequest) CloneVT() *FindAllShardsInKeyspaceRequest { if m == nil { return (*FindAllShardsInKeyspaceRequest)(nil) @@ -1465,6 +1612,48 @@ func (m *FindAllShardsInKeyspaceResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *ForceCutOverSchemaMigrationRequest) CloneVT() *ForceCutOverSchemaMigrationRequest { + if m == nil { + return (*ForceCutOverSchemaMigrationRequest)(nil) + } + r := &ForceCutOverSchemaMigrationRequest{ + Keyspace: m.Keyspace, + Uuid: m.Uuid, + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ForceCutOverSchemaMigrationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *ForceCutOverSchemaMigrationResponse) CloneVT() *ForceCutOverSchemaMigrationResponse { + if m == nil { + return (*ForceCutOverSchemaMigrationResponse)(nil) + } + r := &ForceCutOverSchemaMigrationResponse{} + if rhs := m.RowsAffectedByShard; rhs != nil { + tmpContainer := make(map[string]uint64, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v + } + r.RowsAffectedByShard = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *ForceCutOverSchemaMigrationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *GetBackupsRequest) CloneVT() *GetBackupsRequest { if m == nil { return (*GetBackupsRequest)(nil) @@ -1769,6 +1958,40 @@ func (m *GetPermissionsResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *GetKeyspaceRoutingRulesRequest) CloneVT() *GetKeyspaceRoutingRulesRequest { + if m == nil { + return (*GetKeyspaceRoutingRulesRequest)(nil) + } + r := &GetKeyspaceRoutingRulesRequest{} + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetKeyspaceRoutingRulesRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetKeyspaceRoutingRulesResponse) CloneVT() *GetKeyspaceRoutingRulesResponse { + if m == nil { + return (*GetKeyspaceRoutingRulesResponse)(nil) + } + r := &GetKeyspaceRoutingRulesResponse{ + KeyspaceRoutingRules: m.KeyspaceRoutingRules.CloneVT(), + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetKeyspaceRoutingRulesResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *GetRoutingRulesRequest) CloneVT() *GetRoutingRulesRequest { if m == nil { return (*GetRoutingRulesRequest)(nil) @@ -1901,6 +2124,53 @@ func (m *GetSchemaMigrationsResponse) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *GetShardReplicationRequest) CloneVT() *GetShardReplicationRequest { + if m == nil { + return (*GetShardReplicationRequest)(nil) + } + r := &GetShardReplicationRequest{ + Keyspace: m.Keyspace, + Shard: m.Shard, + } + if rhs := m.Cells; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Cells = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetShardReplicationRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + +func (m *GetShardReplicationResponse) CloneVT() *GetShardReplicationResponse { + if m == nil { + return (*GetShardReplicationResponse)(nil) + } + r := &GetShardReplicationResponse{} + if rhs := m.ShardReplicationByCell; rhs != nil { + tmpContainer := make(map[string]*topodata.ShardReplication, len(rhs)) + for k, v := range rhs { + tmpContainer[k] = v.CloneVT() + } + r.ShardReplicationByCell = tmpContainer + } + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *GetShardReplicationResponse) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *GetShardRequest) CloneVT() *GetShardRequest { if m == nil { return (*GetShardRequest)(nil) @@ -2441,6 +2711,11 @@ func (m *GetWorkflowsRequest) CloneVT() *GetWorkflowsRequest { Workflow: m.Workflow, IncludeLogs: m.IncludeLogs, } + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -2944,6 +3219,7 @@ func (m *MoveTablesCreateRequest) CloneVT() *MoveTablesCreateRequest { AutoStart: m.AutoStart, NoRoutingRules: m.NoRoutingRules, AtomicCopy: m.AtomicCopy, + WorkflowOptions: m.WorkflowOptions.CloneVT(), } if rhs := m.Cells; rhs != nil { tmpContainer := make([]string, len(rhs)) @@ -3037,6 +3313,11 @@ func (m *MoveTablesCompleteRequest) CloneVT() *MoveTablesCompleteRequest { RenameTables: m.RenameTables, DryRun: m.DryRun, } + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -3110,11 +3391,12 @@ func (m *PlannedReparentShardRequest) CloneVT() *PlannedReparentShardRequest { return (*PlannedReparentShardRequest)(nil) } r := &PlannedReparentShardRequest{ - Keyspace: m.Keyspace, - Shard: m.Shard, - NewPrimary: m.NewPrimary.CloneVT(), - AvoidPrimary: m.AvoidPrimary.CloneVT(), - WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), + Keyspace: m.Keyspace, + Shard: m.Shard, + NewPrimary: m.NewPrimary.CloneVT(), + AvoidPrimary: m.AvoidPrimary.CloneVT(), + WaitReplicasTimeout: m.WaitReplicasTimeout.CloneVT(), + TolerableReplicationLag: m.TolerableReplicationLag.CloneVT(), } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) @@ -3781,50 +4063,6 @@ func (m *SetKeyspaceDurabilityPolicyResponse) CloneMessageVT() proto.Message { return m.CloneVT() } -func (m *SetKeyspaceServedFromRequest) CloneVT() *SetKeyspaceServedFromRequest { - if m == nil { - return (*SetKeyspaceServedFromRequest)(nil) - } - r := &SetKeyspaceServedFromRequest{ - Keyspace: m.Keyspace, - TabletType: m.TabletType, - Remove: m.Remove, - SourceKeyspace: m.SourceKeyspace, - } - if rhs := m.Cells; rhs != nil { - tmpContainer := make([]string, len(rhs)) - copy(tmpContainer, rhs) - r.Cells = tmpContainer - } - if len(m.unknownFields) > 0 { - r.unknownFields = make([]byte, len(m.unknownFields)) - copy(r.unknownFields, m.unknownFields) - } - return r -} - -func (m *SetKeyspaceServedFromRequest) CloneMessageVT() proto.Message { - return m.CloneVT() -} - -func (m *SetKeyspaceServedFromResponse) CloneVT() *SetKeyspaceServedFromResponse { - if m == nil { - return (*SetKeyspaceServedFromResponse)(nil) - } - r := &SetKeyspaceServedFromResponse{ - Keyspace: m.Keyspace.CloneVT(), - } - if len(m.unknownFields) > 0 { - r.unknownFields = make([]byte, len(m.unknownFields)) - copy(r.unknownFields, m.unknownFields) - } - return r -} - -func (m *SetKeyspaceServedFromResponse) CloneMessageVT() proto.Message { - return m.CloneVT() -} - func (m *SetKeyspaceShardingInfoRequest) CloneVT() *SetKeyspaceShardingInfoRequest { if m == nil { return (*SetKeyspaceShardingInfoRequest)(nil) @@ -4796,6 +5034,8 @@ func (m *VDiffCreateRequest) CloneVT() *VDiffCreateRequest { WaitUpdateInterval: m.WaitUpdateInterval.CloneVT(), AutoRetry: m.AutoRetry, Verbose: m.Verbose, + MaxReportSampleRows: m.MaxReportSampleRows, + MaxDiffDuration: m.MaxDiffDuration.CloneVT(), } if rhs := m.SourceCells; rhs != nil { tmpContainer := make([]string, len(rhs)) @@ -5007,6 +5247,11 @@ func (m *WorkflowDeleteRequest) CloneVT() *WorkflowDeleteRequest { KeepData: m.KeepData, KeepRoutingRules: m.KeepRoutingRules, } + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -5070,6 +5315,11 @@ func (m *WorkflowStatusRequest) CloneVT() *WorkflowStatusRequest { Keyspace: m.Keyspace, Workflow: m.Workflow, } + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -5206,6 +5456,11 @@ func (m *WorkflowSwitchTrafficRequest) CloneVT() *WorkflowSwitchTrafficRequest { copy(tmpContainer, rhs) r.TabletTypes = tmpContainer } + if rhs := m.Shards; rhs != nil { + tmpContainer := make([]string, len(rhs)) + copy(tmpContainer, rhs) + r.Shards = tmpContainer + } if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -5479,6 +5734,18 @@ func (m *MaterializeSettings) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.WorkflowOptions != nil { + size, err := m.WorkflowOptions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } if m.AtomicCopy { i-- if m.AtomicCopy { @@ -6242,6 +6509,65 @@ func (m *Shard) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *WorkflowOptions) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WorkflowOptions) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *WorkflowOptions) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.StripShardedAutoIncrement { + i-- + if m.StripShardedAutoIncrement { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.TenantId) > 0 { + i -= len(m.TenantId) + copy(dAtA[i:], m.TenantId) + i = encodeVarint(dAtA, i, uint64(len(m.TenantId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Workflow_ReplicationLocation) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -6388,6 +6714,11 @@ func (m *Workflow_Stream_CopyState) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.StreamId != 0 { + i = encodeVarint(dAtA, i, uint64(m.StreamId)) + i-- + dAtA[i] = 0x18 + } if len(m.LastPk) > 0 { i -= len(m.LastPk) copy(dAtA[i:], m.LastPk) @@ -6574,6 +6905,47 @@ func (m *Workflow_Stream) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if m.TabletSelectionPreference != 0 { + i = encodeVarint(dAtA, i, uint64(m.TabletSelectionPreference)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if len(m.TabletTypes) > 0 { + var pksize2 int + for _, num := range m.TabletTypes { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num1 := range m.TabletTypes { + num := uint64(num1) + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } if m.ThrottlerStatus != nil { size, err := m.ThrottlerStatus.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -6753,6 +7125,16 @@ func (m *Workflow) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Options != nil { + size, err := m.Options.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } if m.DeferSecondaryKeys { i-- if m.DeferSecondaryKeys { @@ -7004,6 +7386,111 @@ func (m *AddCellsAliasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) return len(dAtA) - i, nil } +func (m *ApplyKeyspaceRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyKeyspaceRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ApplyKeyspaceRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RebuildCells) > 0 { + for iNdEx := len(m.RebuildCells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RebuildCells[iNdEx]) + copy(dAtA[i:], m.RebuildCells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.RebuildCells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.SkipRebuild { + i-- + if m.SkipRebuild { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.KeyspaceRoutingRules != nil { + size, err := m.KeyspaceRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ApplyKeyspaceRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyKeyspaceRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ApplyKeyspaceRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeyspaceRoutingRules != nil { + size, err := m.KeyspaceRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ApplyRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -7380,6 +7867,16 @@ func (m *ApplyVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.Strict { + i-- + if m.Strict { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } if len(m.Sql) > 0 { i -= len(m.Sql) copy(dAtA[i:], m.Sql) @@ -7436,6 +7933,48 @@ func (m *ApplyVSchemaRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ApplyVSchemaResponse_ParamList) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ApplyVSchemaResponse_ParamList) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ApplyVSchemaResponse_ParamList) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Params) > 0 { + for iNdEx := len(m.Params) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Params[iNdEx]) + copy(dAtA[i:], m.Params[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Params[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *ApplyVSchemaResponse) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -7466,6 +8005,28 @@ func (m *ApplyVSchemaResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.UnknownVindexParams) > 0 { + for k := range m.UnknownVindexParams { + v := m.UnknownVindexParams[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } if m.VSchema != nil { size, err := m.VSchema.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -8178,18 +8739,6 @@ func (m *CreateKeyspaceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i-- dAtA[i] = 0x38 } - if len(m.ServedFroms) > 0 { - for iNdEx := len(m.ServedFroms) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.ServedFroms[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0x32 - } - } if m.AllowEmptyVSchema { i-- if m.AllowEmptyVSchema { @@ -9401,6 +9950,126 @@ func (m *ExecuteHookResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ExecuteMultiFetchAsDBARequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteMultiFetchAsDBARequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ExecuteMultiFetchAsDBARequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ReloadSchema { + i-- + if m.ReloadSchema { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.DisableBinlogs { + i-- + if m.DisableBinlogs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.MaxRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxRows)) + i-- + dAtA[i] = 0x18 + } + if len(m.Sql) > 0 { + i -= len(m.Sql) + copy(dAtA[i:], m.Sql) + i = encodeVarint(dAtA, i, uint64(len(m.Sql))) + i-- + dAtA[i] = 0x12 + } + if m.TabletAlias != nil { + size, err := m.TabletAlias.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecuteMultiFetchAsDBAResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecuteMultiFetchAsDBAResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ExecuteMultiFetchAsDBAResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Results) > 0 { + for iNdEx := len(m.Results) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Results[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *FindAllShardsInKeyspaceRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -9496,6 +10165,103 @@ func (m *FindAllShardsInKeyspaceResponse) MarshalToSizedBufferVT(dAtA []byte) (i return len(dAtA) - i, nil } +func (m *ForceCutOverSchemaMigrationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ForceCutOverSchemaMigrationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ForceCutOverSchemaMigrationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Uuid) > 0 { + i -= len(m.Uuid) + copy(dAtA[i:], m.Uuid) + i = encodeVarint(dAtA, i, uint64(len(m.Uuid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ForceCutOverSchemaMigrationResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ForceCutOverSchemaMigrationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ForceCutOverSchemaMigrationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.RowsAffectedByShard) > 0 { + for k := range m.RowsAffectedByShard { + v := m.RowsAffectedByShard[k] + baseI := i + i = encodeVarint(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *GetBackupsRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -10187,6 +10953,82 @@ func (m *GetPermissionsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error return len(dAtA) - i, nil } +func (m *GetKeyspaceRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetKeyspaceRoutingRulesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetKeyspaceRoutingRulesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetKeyspaceRoutingRulesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetKeyspaceRoutingRulesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetKeyspaceRoutingRulesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.KeyspaceRoutingRules != nil { + size, err := m.KeyspaceRoutingRules.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *GetRoutingRulesRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -10536,6 +11378,117 @@ func (m *GetSchemaMigrationsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, return len(dAtA) - i, nil } +func (m *GetShardReplicationRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetShardReplicationRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetShardReplicationRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Cells) > 0 { + for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cells[iNdEx]) + copy(dAtA[i:], m.Cells[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Shard) > 0 { + i -= len(m.Shard) + copy(dAtA[i:], m.Shard) + i = encodeVarint(dAtA, i, uint64(len(m.Shard))) + i-- + dAtA[i] = 0x12 + } + if len(m.Keyspace) > 0 { + i -= len(m.Keyspace) + copy(dAtA[i:], m.Keyspace) + i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetShardReplicationResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetShardReplicationResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetShardReplicationResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ShardReplicationByCell) > 0 { + for k := range m.ShardReplicationByCell { + v := m.ShardReplicationByCell[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *GetShardRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -11827,6 +12780,15 @@ func (m *GetWorkflowsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } if m.IncludeLogs { i-- if m.IncludeLogs { @@ -13147,6 +14109,18 @@ func (m *MoveTablesCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, erro i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.WorkflowOptions != nil { + size, err := m.WorkflowOptions.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } if m.AtomicCopy { i-- if m.AtomicCopy { @@ -13467,6 +14441,15 @@ func (m *MoveTablesCompleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } if m.DryRun { i-- if m.DryRun { @@ -13679,6 +14662,16 @@ func (m *PlannedReparentShardRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.TolerableReplicationLag != nil { + size, err := m.TolerableReplicationLag.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } if m.WaitReplicasTimeout != nil { size, err := m.WaitReplicasTimeout.MarshalToSizedBufferVT(dAtA[:i]) if err != nil { @@ -15392,120 +16385,6 @@ func (m *SetKeyspaceDurabilityPolicyResponse) MarshalToSizedBufferVT(dAtA []byte return len(dAtA) - i, nil } -func (m *SetKeyspaceServedFromRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetKeyspaceServedFromRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *SetKeyspaceServedFromRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.SourceKeyspace) > 0 { - i -= len(m.SourceKeyspace) - copy(dAtA[i:], m.SourceKeyspace) - i = encodeVarint(dAtA, i, uint64(len(m.SourceKeyspace))) - i-- - dAtA[i] = 0x2a - } - if m.Remove { - i-- - if m.Remove { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Cells) > 0 { - for iNdEx := len(m.Cells) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Cells[iNdEx]) - copy(dAtA[i:], m.Cells[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Cells[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.TabletType != 0 { - i = encodeVarint(dAtA, i, uint64(m.TabletType)) - i-- - dAtA[i] = 0x10 - } - if len(m.Keyspace) > 0 { - i -= len(m.Keyspace) - copy(dAtA[i:], m.Keyspace) - i = encodeVarint(dAtA, i, uint64(len(m.Keyspace))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SetKeyspaceServedFromResponse) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetKeyspaceServedFromResponse) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *SetKeyspaceServedFromResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if m.Keyspace != nil { - size, err := m.Keyspace.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *SetKeyspaceShardingInfoRequest) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -17908,6 +18787,25 @@ func (m *VDiffCreateRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MaxDiffDuration != nil { + size, err := m.MaxDiffDuration.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.MaxReportSampleRows != 0 { + i = encodeVarint(dAtA, i, uint64(m.MaxReportSampleRows)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } if m.Verbose { i-- if m.Verbose { @@ -18521,6 +19419,15 @@ func (m *WorkflowDeleteRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } if m.KeepRoutingRules { i-- if m.KeepRoutingRules { @@ -18693,6 +19600,15 @@ func (m *WorkflowStatusRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } if len(m.Workflow) > 0 { i -= len(m.Workflow) copy(dAtA[i:], m.Workflow) @@ -19010,6 +19926,15 @@ func (m *WorkflowSwitchTrafficRequest) MarshalToSizedBufferVT(dAtA []byte) (int, i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.Shards) > 0 { + for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Shards[iNdEx]) + copy(dAtA[i:], m.Shards[iNdEx]) + i = encodeVarint(dAtA, i, uint64(len(m.Shards[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } if m.InitializeTargetSequences { i-- if m.InitializeTargetSequences { @@ -19465,6 +20390,10 @@ func (m *MaterializeSettings) SizeVT() (n int) { if m.AtomicCopy { n += 3 } + if m.WorkflowOptions != nil { + l = m.WorkflowOptions.SizeVT() + n += 2 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -19716,6 +20645,29 @@ func (m *Shard) SizeVT() (n int) { return n } +func (m *WorkflowOptions) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TenantId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.StripShardedAutoIncrement { + n += 2 + } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + func (m *Workflow_ReplicationLocation) SizeVT() (n int) { if m == nil { return 0 @@ -19775,6 +20727,9 @@ func (m *Workflow_Stream_CopyState) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if m.StreamId != 0 { + n += 1 + sov(uint64(m.StreamId)) + } n += len(m.unknownFields) return n } @@ -19914,6 +20869,22 @@ func (m *Workflow_Stream) SizeVT() (n int) { l = m.ThrottlerStatus.SizeVT() n += 2 + l + sov(uint64(l)) } + if len(m.TabletTypes) > 0 { + l = 0 + for _, e := range m.TabletTypes { + l += sov(uint64(e)) + } + n += 2 + sov(uint64(l)) + l + } + if m.TabletSelectionPreference != 0 { + n += 2 + sov(uint64(m.TabletSelectionPreference)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 2 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -19966,6 +20937,10 @@ func (m *Workflow) SizeVT() (n int) { if m.DeferSecondaryKeys { n += 2 } + if m.Options != nil { + l = m.Options.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -20028,6 +21003,43 @@ func (m *AddCellsAliasResponse) SizeVT() (n int) { return n } +func (m *ApplyKeyspaceRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KeyspaceRoutingRules != nil { + l = m.KeyspaceRoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.SkipRebuild { + n += 2 + } + if len(m.RebuildCells) > 0 { + for _, s := range m.RebuildCells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyKeyspaceRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KeyspaceRoutingRules != nil { + l = m.KeyspaceRoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *ApplyRoutingRulesRequest) SizeVT() (n int) { if m == nil { return 0 @@ -20193,6 +21205,25 @@ func (m *ApplyVSchemaRequest) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if m.Strict { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ApplyVSchemaResponse_ParamList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Params) > 0 { + for _, s := range m.Params { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -20207,6 +21238,19 @@ func (m *ApplyVSchemaResponse) SizeVT() (n int) { l = m.VSchema.SizeVT() n += 1 + l + sov(uint64(l)) } + if len(m.UnknownVindexParams) > 0 { + for k, v := range m.UnknownVindexParams { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } n += len(m.unknownFields) return n } @@ -20460,12 +21504,6 @@ func (m *CreateKeyspaceRequest) SizeVT() (n int) { if m.AllowEmptyVSchema { n += 2 } - if len(m.ServedFroms) > 0 { - for _, e := range m.ServedFroms { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } if m.Type != 0 { n += 1 + sov(uint64(m.Type)) } @@ -20894,6 +21932,49 @@ func (m *ExecuteHookResponse) SizeVT() (n int) { return n } +func (m *ExecuteMultiFetchAsDBARequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TabletAlias != nil { + l = m.TabletAlias.SizeVT() + n += 1 + l + sov(uint64(l)) + } + l = len(m.Sql) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.MaxRows != 0 { + n += 1 + sov(uint64(m.MaxRows)) + } + if m.DisableBinlogs { + n += 2 + } + if m.ReloadSchema { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *ExecuteMultiFetchAsDBAResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Results) > 0 { + for _, e := range m.Results { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + func (m *FindAllShardsInKeyspaceRequest) SizeVT() (n int) { if m == nil { return 0 @@ -20931,6 +22012,42 @@ func (m *FindAllShardsInKeyspaceResponse) SizeVT() (n int) { return n } +func (m *ForceCutOverSchemaMigrationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Uuid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ForceCutOverSchemaMigrationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RowsAffectedByShard) > 0 { + for k, v := range m.RowsAffectedByShard { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + sov(uint64(v)) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + func (m *GetBackupsRequest) SizeVT() (n int) { if m == nil { return 0 @@ -21171,6 +22288,30 @@ func (m *GetPermissionsResponse) SizeVT() (n int) { return n } +func (m *GetKeyspaceRoutingRulesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetKeyspaceRoutingRulesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KeyspaceRoutingRules != nil { + l = m.KeyspaceRoutingRules.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *GetRoutingRulesRequest) SizeVT() (n int) { if m == nil { return 0 @@ -21301,6 +22442,53 @@ func (m *GetSchemaMigrationsResponse) SizeVT() (n int) { return n } +func (m *GetShardReplicationRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Keyspace) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Shard) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Cells) > 0 { + for _, s := range m.Cells { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *GetShardReplicationResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ShardReplicationByCell) > 0 { + for k, v := range m.ShardReplicationByCell { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + sov(uint64(l)) + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + l + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + func (m *GetShardRequest) SizeVT() (n int) { if m == nil { return 0 @@ -21787,6 +22975,12 @@ func (m *GetWorkflowsRequest) SizeVT() (n int) { if m.IncludeLogs { n += 2 } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -22339,6 +23533,10 @@ func (m *MoveTablesCreateRequest) SizeVT() (n int) { if m.AtomicCopy { n += 3 } + if m.WorkflowOptions != nil { + l = m.WorkflowOptions.SizeVT() + n += 2 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -22406,6 +23604,12 @@ func (m *MoveTablesCompleteRequest) SizeVT() (n int) { if m.DryRun { n += 2 } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -22480,6 +23684,10 @@ func (m *PlannedReparentShardRequest) SizeVT() (n int) { l = m.WaitReplicasTimeout.SizeVT() n += 1 + l + sov(uint64(l)) } + if m.TolerableReplicationLag != nil { + l = m.TolerableReplicationLag.SizeVT() + n += 1 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -23093,50 +24301,6 @@ func (m *SetKeyspaceDurabilityPolicyResponse) SizeVT() (n int) { return n } -func (m *SetKeyspaceServedFromRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Keyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.TabletType != 0 { - n += 1 + sov(uint64(m.TabletType)) - } - if len(m.Cells) > 0 { - for _, s := range m.Cells { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - if m.Remove { - n += 2 - } - l = len(m.SourceKeyspace) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *SetKeyspaceServedFromResponse) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Keyspace != nil { - l = m.Keyspace.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - func (m *SetKeyspaceShardingInfoRequest) SizeVT() (n int) { if m == nil { return 0 @@ -24109,6 +25273,13 @@ func (m *VDiffCreateRequest) SizeVT() (n int) { if m.Verbose { n += 3 } + if m.MaxReportSampleRows != 0 { + n += 2 + sov(uint64(m.MaxReportSampleRows)) + } + if m.MaxDiffDuration != nil { + l = m.MaxDiffDuration.SizeVT() + n += 2 + l + sov(uint64(l)) + } n += len(m.unknownFields) return n } @@ -24288,6 +25459,12 @@ func (m *WorkflowDeleteRequest) SizeVT() (n int) { if m.KeepRoutingRules { n += 2 } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -24343,6 +25520,12 @@ func (m *WorkflowStatusRequest) SizeVT() (n int) { if l > 0 { n += 1 + l + sov(uint64(l)) } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -24511,6 +25694,12 @@ func (m *WorkflowSwitchTrafficRequest) SizeVT() (n int) { if m.InitializeTargetSequences { n += 2 } + if len(m.Shards) > 0 { + for _, s := range m.Shards { + l = len(s) + n += 1 + l + sov(uint64(l)) + } + } n += len(m.unknownFields) return n } @@ -25421,6 +26610,42 @@ func (m *MaterializeSettings) UnmarshalVT(dAtA []byte) error { } } m.AtomicCopy = bool(v != 0) + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkflowOptions == nil { + m.WorkflowOptions = &WorkflowOptions{} + } + if err := m.WorkflowOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -27288,6 +28513,141 @@ func (m *Shard) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *WorkflowOptions) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TenantId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TenantId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StripShardedAutoIncrement", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StripShardedAutoIncrement = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Workflow_ReplicationLocation) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -27635,6 +28995,25 @@ func (m *Workflow_Stream_CopyState) UnmarshalVT(dAtA []byte) error { } m.LastPk = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamId", wireType) + } + m.StreamId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StreamId |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -28623,6 +30002,126 @@ func (m *Workflow_Stream) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 18: + if wireType == 0 { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + if elementCount != 0 && len(m.TabletTypes) == 0 { + m.TabletTypes = make([]topodata.TabletType, 0, elementCount) + } + for iNdEx < postIndex { + var v topodata.TabletType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= topodata.TabletType(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TabletTypes = append(m.TabletTypes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field TabletTypes", wireType) + } + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletSelectionPreference", wireType) + } + m.TabletSelectionPreference = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TabletSelectionPreference |= tabletmanagerdata.TabletSelectionPreference(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -29029,6 +30528,42 @@ func (m *Workflow) UnmarshalVT(dAtA []byte) error { } } m.DeferSecondaryKeys = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &WorkflowOptions{} + } + if err := m.Options.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -29387,7 +30922,7 @@ func (m *AddCellsAliasResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *ApplyKeyspaceRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29410,15 +30945,15 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ApplyKeyspaceRoutingRulesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplyKeyspaceRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceRoutingRules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29445,10 +30980,10 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RoutingRules == nil { - m.RoutingRules = &vschema.RoutingRules{} + if m.KeyspaceRoutingRules == nil { + m.KeyspaceRoutingRules = &vschema.KeyspaceRoutingRules{} } - if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.KeyspaceRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29526,7 +31061,7 @@ func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *ApplyKeyspaceRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29549,12 +31084,48 @@ func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ApplyKeyspaceRoutingRulesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplyKeyspaceRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceRoutingRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KeyspaceRoutingRules == nil { + m.KeyspaceRoutingRules = &vschema.KeyspaceRoutingRules{} + } + if err := m.KeyspaceRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -29577,7 +31148,7 @@ func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { +func (m *ApplyRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29600,15 +31171,15 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ApplyRoutingRulesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplyRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29635,10 +31206,10 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ShardRoutingRules == nil { - m.ShardRoutingRules = &vschema.ShardRoutingRules{} + if m.RoutingRules == nil { + m.RoutingRules = &vschema.RoutingRules{} } - if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -29716,7 +31287,7 @@ func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *ApplyRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29739,10 +31310,10 @@ func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: ApplyRoutingRulesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplyRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -29767,7 +31338,7 @@ func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *ApplyShardRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29790,17 +31361,17 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplyShardRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardRoutingRules", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -29810,27 +31381,217 @@ func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + if m.ShardRoutingRules == nil { + m.ShardRoutingRules = &vschema.ShardRoutingRules{} + } + if err := m.ShardRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipRebuild", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipRebuild = bool(v != 0) case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RebuildCells", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RebuildCells = append(m.RebuildCells, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyShardRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyShardRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplySchemaRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplySchemaRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplySchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30466,6 +32227,109 @@ func (m *ApplyVSchemaRequest) UnmarshalVT(dAtA []byte) error { } m.Sql = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Strict", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Strict = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplyVSchemaResponse_ParamList) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplyVSchemaResponse_ParamList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplyVSchemaResponse_ParamList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Params = append(m.Params, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30553,6 +32417,135 @@ func (m *ApplyVSchemaResponse) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnknownVindexParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UnknownVindexParams == nil { + m.UnknownVindexParams = make(map[string]*ApplyVSchemaResponse_ParamList) + } + var mapkey string + var mapvalue *ApplyVSchemaResponse_ParamList + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ApplyVSchemaResponse_ParamList{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.UnknownVindexParams[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -30674,7 +32667,7 @@ func (m *BackupRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= uint64(b&0x7F) << shift + m.Concurrency |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -31067,7 +33060,7 @@ func (m *BackupShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= uint64(b&0x7F) << shift + m.Concurrency |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -32353,40 +34346,6 @@ func (m *CreateKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } } m.AllowEmptyVSchema = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServedFroms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServedFroms = append(m.ServedFroms, &topodata.Keyspace_ServedFrom{}) - if err := m.ServedFroms[len(m.ServedFroms)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) @@ -35059,7 +37018,7 @@ func (m *ExecuteHookResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteMultiFetchAsDBARequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35082,15 +37041,51 @@ func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecuteMultiFetchAsDBARequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecuteMultiFetchAsDBARequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sql", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35118,8 +37113,67 @@ func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Sql = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxRows", wireType) + } + m.MaxRows = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxRows |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableBinlogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableBinlogs = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloadSchema", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReloadSchema = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -35142,7 +37196,175 @@ func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *ExecuteMultiFetchAsDBAResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecuteMultiFetchAsDBAResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecuteMultiFetchAsDBAResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Results = append(m.Results, &query.QueryResult{}) + if err := m.Results[len(m.Results)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FindAllShardsInKeyspaceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FindAllShardsInKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35322,6 +37544,285 @@ func (m *FindAllShardsInKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *ForceCutOverSchemaMigrationRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForceCutOverSchemaMigrationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForceCutOverSchemaMigrationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ForceCutOverSchemaMigrationResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ForceCutOverSchemaMigrationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ForceCutOverSchemaMigrationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RowsAffectedByShard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RowsAffectedByShard == nil { + m.RowsAffectedByShard = make(map[string]uint64) + } + var mapkey string + var mapvalue uint64 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.RowsAffectedByShard[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *GetBackupsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -35663,7 +38164,684 @@ func (m *GetCellInfoRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CellInfo == nil { + m.CellInfo = &topodata.CellInfo{} + } + if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoNamesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellInfoNamesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellInfoNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Aliases == nil { + m.Aliases = make(map[string]*topodata.CellsAlias) + } + var mapkey string + var mapvalue *topodata.CellsAlias + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.CellsAlias{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Aliases[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TabletAlias == nil { + m.TabletAlias = &topodata.TabletAlias{} + } + if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetFullStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetFullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &replicationdata.FullStatus{} + } + if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35686,15 +38864,15 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CellInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35721,10 +38899,8 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CellInfo == nil { - m.CellInfo = &topodata.CellInfo{} - } - if err := m.CellInfo.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Keyspaces = append(m.Keyspaces, &Keyspace{}) + if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35750,58 +38926,7 @@ func (m *GetCellInfoResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellInfoNamesRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoNamesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35824,15 +38949,15 @@ func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellInfoNamesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellInfoNamesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35860,7 +38985,7 @@ func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -35884,58 +39009,7 @@ func (m *GetCellInfoNamesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetCellsAliasesRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35958,15 +39032,15 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetCellsAliasesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetCellsAliasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35993,105 +39067,12 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Aliases == nil { - m.Aliases = make(map[string]*topodata.CellsAlias) + if m.Keyspace == nil { + m.Keyspace = &Keyspace{} } - var mapkey string - var mapvalue *topodata.CellsAlias - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLength - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLength - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &topodata.CellsAlias{} - if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Aliases[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -36115,7 +39096,7 @@ func (m *GetCellsAliasesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36138,10 +39119,10 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetFullStatusRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetFullStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -36202,7 +39183,7 @@ func (m *GetFullStatusRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36225,15 +39206,15 @@ func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetFullStatusResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetFullStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36260,10 +39241,10 @@ func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Status == nil { - m.Status = &replicationdata.FullStatus{} + if m.Permissions == nil { + m.Permissions = &tabletmanagerdata.Permissions{} } - if err := m.Status.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36289,7 +39270,7 @@ func (m *GetFullStatusResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36312,10 +39293,10 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceRoutingRulesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -36340,7 +39321,7 @@ func (m *GetKeyspacesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetKeyspaceRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36363,15 +39344,15 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspacesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetKeyspaceRoutingRulesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetKeyspaceRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspaces", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeyspaceRoutingRules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36398,8 +39379,10 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspaces = append(m.Keyspaces, &Keyspace{}) - if err := m.Keyspaces[len(m.Keyspaces)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if m.KeyspaceRoutingRules == nil { + m.KeyspaceRoutingRules = &vschema.KeyspaceRoutingRules{} + } + if err := m.KeyspaceRoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36425,7 +39408,7 @@ func (m *GetKeyspacesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36448,44 +39431,12 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspaceRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetRoutingRulesRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -36508,7 +39459,7 @@ func (m *GetKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36531,15 +39482,15 @@ func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetKeyspaceResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetRoutingRulesResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetKeyspaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36566,10 +39517,10 @@ func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Keyspace == nil { - m.Keyspace = &Keyspace{} + if m.RoutingRules == nil { + m.RoutingRules = &vschema.RoutingRules{} } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36595,7 +39546,7 @@ func (m *GetKeyspaceResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36618,10 +39569,10 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -36660,62 +39611,43 @@ func (m *GetPermissionsRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetPermissionsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetPermissionsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -36725,79 +39657,104 @@ func (m *GetPermissionsResponse) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Permissions == nil { - m.Permissions = &tabletmanagerdata.Permissions{} + m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) } - if err := m.Permissions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + m.IncludeViews = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableNamesOnly", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.TableNamesOnly = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSizesOnly", wireType) } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.TableSizesOnly = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetRoutingRulesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetRoutingRulesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.TableSchemaOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -36820,7 +39777,7 @@ func (m *GetRoutingRulesRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36843,15 +39800,15 @@ func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetRoutingRulesResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetRoutingRulesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoutingRules", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36878,10 +39835,10 @@ func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RoutingRules == nil { - m.RoutingRules = &vschema.RoutingRules{} + if m.Schema == nil { + m.Schema = &tabletmanagerdata.SchemaDefinition{} } - if err := m.RoutingRules.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Schema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36907,7 +39864,7 @@ func (m *GetRoutingRulesResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36930,17 +39887,17 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaMigrationsRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaMigrationsRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletAlias", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -36950,31 +39907,27 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TabletAlias == nil { - m.TabletAlias = &topodata.TabletAlias{} - } - if err := m.TabletAlias.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37002,11 +39955,11 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tables = append(m.Tables, string(dAtA[iNdEx:postIndex])) + m.Uuid = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTables", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37034,13 +39987,13 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExcludeTables = append(m.ExcludeTables, string(dAtA[iNdEx:postIndex])) + m.MigrationContext = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeViews", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int + m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -37050,17 +40003,16 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Status |= SchemaMigration_Status(b&0x7F) << shift if b < 0x80 { break } } - m.IncludeViews = bool(v != 0) case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableNamesOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Recent", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -37070,17 +40022,33 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.TableNamesOnly = bool(v != 0) + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Recent == nil { + m.Recent = &vttime.Duration{} + } + if err := m.Recent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSizesOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) } - var v int + m.Order = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -37090,17 +40058,16 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Order |= QueryOrdering(b&0x7F) << shift if b < 0x80 { break } } - m.TableSizesOnly = bool(v != 0) case 7: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TableSchemaOnly", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - var v int + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -37110,12 +40077,30 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Skip", wireType) + } + m.Skip = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Skip |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.TableSchemaOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -37138,7 +40123,7 @@ func (m *GetSchemaRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetSchemaMigrationsResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -37161,15 +40146,15 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetSchemaMigrationsResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetSchemaMigrationsResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schema", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Migrations", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -37196,10 +40181,8 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Schema == nil { - m.Schema = &tabletmanagerdata.SchemaDefinition{} - } - if err := m.Schema.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + m.Migrations = append(m.Migrations, &SchemaMigration{}) + if err := m.Migrations[len(m.Migrations)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -37225,7 +40208,7 @@ func (m *GetSchemaResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetShardReplicationRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -37248,10 +40231,10 @@ func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaMigrationsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardReplicationRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaMigrationsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardReplicationRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -37288,7 +40271,7 @@ func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Shard", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37316,11 +40299,11 @@ func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Uuid = string(dAtA[iNdEx:postIndex]) + m.Shard = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MigrationContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -37348,120 +40331,8 @@ func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.MigrationContext = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= SchemaMigration_Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Recent", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Recent == nil { - m.Recent = &vttime.Duration{} - } - if err := m.Recent.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - m.Order = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Order |= QueryOrdering(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Skip", wireType) - } - m.Skip = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Skip |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -37484,7 +40355,7 @@ func (m *GetSchemaMigrationsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetSchemaMigrationsResponse) UnmarshalVT(dAtA []byte) error { +func (m *GetShardReplicationResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -37507,15 +40378,15 @@ func (m *GetSchemaMigrationsResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetSchemaMigrationsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetShardReplicationResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetSchemaMigrationsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetShardReplicationResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Migrations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShardReplicationByCell", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -37542,10 +40413,105 @@ func (m *GetSchemaMigrationsResponse) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Migrations = append(m.Migrations, &SchemaMigration{}) - if err := m.Migrations[len(m.Migrations)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.ShardReplicationByCell == nil { + m.ShardReplicationByCell = make(map[string]*topodata.ShardReplication) + } + var mapkey string + var mapvalue *topodata.ShardReplication + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &topodata.ShardReplication{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.ShardReplicationByCell[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -40617,6 +43583,38 @@ func (m *GetWorkflowsRequest) UnmarshalVT(dAtA []byte) error { } } m.IncludeLogs = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -44187,6 +47185,42 @@ func (m *MoveTablesCreateRequest) UnmarshalVT(dAtA []byte) error { } } m.AtomicCopy = bool(v != 0) + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkflowOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkflowOptions == nil { + m.WorkflowOptions = &WorkflowOptions{} + } + if err := m.WorkflowOptions.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -44606,6 +47640,38 @@ func (m *MoveTablesCompleteRequest) UnmarshalVT(dAtA []byte) error { } } m.DryRun = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -45082,6 +48148,42 @@ func (m *PlannedReparentShardRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerableReplicationLag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TolerableReplicationLag == nil { + m.TolerableReplicationLag = &vttime.Duration{} + } + if err := m.TolerableReplicationLag.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -46262,7 +49364,7 @@ func (m *ReloadSchemaKeyspaceRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= uint32(b&0x7F) << shift + m.Concurrency |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -46533,7 +49635,7 @@ func (m *ReloadSchemaShardRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Concurrency |= uint32(b&0x7F) << shift + m.Concurrency |= int32(b&0x7F) << shift if b < 0x80 { break } @@ -48700,173 +51802,7 @@ func (m *RunHealthCheckRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { +func (m *RunHealthCheckResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -48889,48 +51825,12 @@ func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: wiretype end group for non-group") + return fmt.Errorf("proto: RunHealthCheckResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunHealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Keyspace == nil { - m.Keyspace = &topodata.Keyspace{} - } - if err := m.Keyspace.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -48953,7 +51853,7 @@ func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceDurabilityPolicyRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -48976,10 +51876,10 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceServedFromRequest: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceServedFromRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -49015,79 +51915,8 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TabletType", wireType) - } - m.TabletType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TabletType |= topodata.TabletType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cells", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cells = append(m.Cells, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Remove", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Remove = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceKeyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DurabilityPolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -49115,7 +51944,7 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SourceKeyspace = string(dAtA[iNdEx:postIndex]) + m.DurabilityPolicy = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -49139,7 +51968,7 @@ func (m *SetKeyspaceServedFromRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { +func (m *SetKeyspaceDurabilityPolicyResponse) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -49162,10 +51991,10 @@ func (m *SetKeyspaceServedFromResponse) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetKeyspaceServedFromResponse: wiretype end group for non-group") + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetKeyspaceServedFromResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SetKeyspaceDurabilityPolicyResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -55389,36 +58218,151 @@ func (m *VDiffCreateRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FilteredReplicationWaitTime == nil { - m.FilteredReplicationWaitTime = &vttime.Duration{} + if m.FilteredReplicationWaitTime == nil { + m.FilteredReplicationWaitTime = &vttime.Duration{} + } + if err := m.FilteredReplicationWaitTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DebugQuery = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OnlyPKs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OnlyPKs = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateTableStats", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpdateTableStats = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) + } + m.MaxExtraRowsToCompare = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Wait = bool(v != 0) + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitUpdateInterval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WaitUpdateInterval == nil { + m.WaitUpdateInterval = &vttime.Duration{} } - if err := m.FilteredReplicationWaitTime.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.WaitUpdateInterval.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DebugQuery", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DebugQuery = bool(v != 0) - case 12: + case 17: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OnlyPKs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -55435,10 +58379,10 @@ func (m *VDiffCreateRequest) UnmarshalVT(dAtA []byte) error { break } } - m.OnlyPKs = bool(v != 0) - case 13: + m.AutoRetry = bool(v != 0) + case 18: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateTableStats", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -55455,31 +58399,12 @@ func (m *VDiffCreateRequest) UnmarshalVT(dAtA []byte) error { break } } - m.UpdateTableStats = bool(v != 0) - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxExtraRowsToCompare", wireType) - } - m.MaxExtraRowsToCompare = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxExtraRowsToCompare |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: + m.Verbose = bool(v != 0) + case 19: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxReportSampleRows", wireType) } - var v int + m.MaxReportSampleRows = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -55489,15 +58414,14 @@ func (m *VDiffCreateRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + m.MaxReportSampleRows |= int64(b&0x7F) << shift if b < 0x80 { break } } - m.Wait = bool(v != 0) - case 16: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WaitUpdateInterval", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxDiffDuration", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -55524,53 +58448,13 @@ func (m *VDiffCreateRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.WaitUpdateInterval == nil { - m.WaitUpdateInterval = &vttime.Duration{} + if m.MaxDiffDuration == nil { + m.MaxDiffDuration = &vttime.Duration{} } - if err := m.WaitUpdateInterval.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + if err := m.MaxDiffDuration.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoRetry", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AutoRetry = bool(v != 0) - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Verbose = bool(v != 0) default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -56490,145 +59374,177 @@ func (m *VDiffStopRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Uuid = string(dAtA[iNdEx:postIndex]) + m.TargetKeyspace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uuid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uuid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VDiffStopResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VDiffStopResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VDiffStopResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WorkflowDeleteRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WorkflowDeleteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WorkflowDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyspace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VDiffStopResponse) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VDiffStopResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VDiffStopResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkflowDeleteRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkflowDeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkflowDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyspace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -56656,13 +59572,13 @@ func (m *WorkflowDeleteRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Keyspace = string(dAtA[iNdEx:postIndex]) + m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workflow", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepData", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -56672,27 +59588,15 @@ func (m *WorkflowDeleteRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Workflow = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + m.KeepData = bool(v != 0) + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KeepRoutingRules", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -56709,12 +59613,12 @@ func (m *WorkflowDeleteRequest) UnmarshalVT(dAtA []byte) error { break } } - m.KeepData = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepRoutingRules", wireType) + m.KeepRoutingRules = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -56724,12 +59628,24 @@ func (m *WorkflowDeleteRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.KeepRoutingRules = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -57069,6 +59985,38 @@ func (m *WorkflowStatusRequest) UnmarshalVT(dAtA []byte) error { } m.Workflow = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -58245,6 +61193,38 @@ func (m *WorkflowSwitchTrafficRequest) UnmarshalVT(dAtA []byte) error { } } m.InitializeTargetSequences = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Shards", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Shards = append(m.Shards, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/go/vt/proto/vtctlservice/vtctlservice.pb.go b/go/vt/proto/vtctlservice/vtctlservice.pb.go index 41231828a3d..3bd9b3f847a 100644 --- a/go/vt/proto/vtctlservice/vtctlservice.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vtctlservice.proto @@ -51,7 +51,7 @@ var file_vtctlservice_proto_rawDesc = []byte{ 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0x93, 0x50, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x32, 0xda, 0x54, 0x0a, 0x06, 0x56, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x64, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, @@ -73,630 +73,666 @@ var file_vtctlservice_proto_rawDesc = []byte{ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x41, 0x70, 0x70, - 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, - 0x6c, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, - 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, 0x41, 0x70, 0x70, + 0x6c, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x41, 0x70, 0x70, 0x6c, 0x79, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, + 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x70, - 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x41, 0x0a, - 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, - 0x12, 0x4b, 0x0a, 0x0b, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x6c, 0x0a, - 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x43, 0x6c, - 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, + 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x41, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x70, 0x12, 0x18, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4b, 0x0a, 0x0b, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x6c, 0x0a, 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, + 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, + 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x43, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x57, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, - 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, - 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, - 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, + 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x20, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x72, 0x76, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x45, + 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, - 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, - 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, - 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, - 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, + 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x6d, 0x65, 0x72, + 0x67, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, + 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, + 0x73, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, + 0x0a, 0x11, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, + 0x44, 0x42, 0x41, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, + 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, + 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4c, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x12, + 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x65, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, - 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x65, 0x48, 0x6f, 0x6f, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6f, + 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, 0x65, + 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, + 0x69, 0x46, 0x65, 0x74, 0x63, 0x68, 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x41, 0x73, 0x44, 0x42, 0x41, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x72, 0x0a, 0x17, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, + 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, - 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, - 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, - 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, - 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, - 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, - 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, - 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1a, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, - 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, - 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x46, 0x69, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x53, 0x68, 0x61, 0x72, 0x64, 0x73, 0x49, + 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x75, 0x74, 0x4f, + 0x76, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, + 0x6f, 0x72, 0x63, 0x65, 0x43, 0x75, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x6f, + 0x72, 0x63, 0x65, 0x43, 0x75, 0x74, 0x4f, 0x76, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, + 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, + 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x5d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, + 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, 0x6c, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6c, + 0x6c, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, + 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, + 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x50, + 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, + 0x75, 0x6c, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, + 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, + 0x0a, 0x14, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, + 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, + 0x61, 0x72, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, - 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, + 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x68, 0x72, 0x6f, 0x74, - 0x74, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, - 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x20, 0x2e, 0x76, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x47, + 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, - 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, - 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, - 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x12, 0x21, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, - 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, - 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, - 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x63, 0x0a, 0x12, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4d, 0x61, 0x74, - 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x23, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, - 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0d, 0x4d, - 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4d, 0x6f, 0x75, 0x6e, - 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x21, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, - 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, - 0x77, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, - 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, - 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, - 0x0a, 0x09, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x10, 0x4d, 0x6f, 0x76, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x22, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, - 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x69, - 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, + 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x72, 0x76, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, + 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x74, 0x73, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, + 0x50, 0x61, 0x74, 0x68, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, 0x61, 0x74, 0x68, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x50, + 0x61, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, + 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, + 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, - 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, - 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, - 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, - 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, - 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, - 0x72, 0x61, 0x70, 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, - 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, + 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x49, 0x6e, + 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, + 0x6e, 0x69, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x4c, 0x61, 0x75, + 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, + 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x24, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x45, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x60, 0x0a, 0x11, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5a, 0x0a, 0x0f, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x6e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, + 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5b, 0x0a, 0x10, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, + 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, + 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x43, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, + 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, + 0x0a, 0x14, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, + 0x68, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, + 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x66, 0x0a, 0x13, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, + 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, - 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, - 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, - 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, - 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, - 0x0d, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1f, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, - 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, - 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, + 0x68, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x79, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6c, 0x6f, 0x61, + 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x52, 0x65, + 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6c, 0x6f, + 0x61, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x63, 0x0a, 0x12, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, + 0x12, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, + 0x6c, 0x6c, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, 0x6c, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x43, 0x65, 0x6c, + 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, + 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x20, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x62, 0x0a, 0x11, 0x52, + 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x42, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x69, 0x0a, 0x14, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, + 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, + 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, - 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x52, 0x75, + 0x6e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x20, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x75, 0x6e, 0x48, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x7e, 0x0a, 0x1b, 0x53, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x18, - 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, - 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, - 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, - 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e, 0x76, + 0x63, 0x79, 0x12, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, + 0x6c, 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, + 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x12, + 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x76, 0x74, + 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x49, 0x73, 0x50, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x53, 0x65, + 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, - 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, - 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, - 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x57, + 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x12, + 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, - 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x66, 0x0a, 0x13, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x12, 0x25, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x78, 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x78, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x78, 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, - 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x12, 0x20, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76, 0x74, - 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x1a, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, - 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x57, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, - 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x6f, 0x6e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x28, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x6c, 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6c, + 0x65, 0x65, 0x70, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, + 0x72, 0x64, 0x41, 0x64, 0x64, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, 0x64, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x41, + 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, + 0x0a, 0x10, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, - 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, - 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, - 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, - 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, - 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x69, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, + 0x74, 0x61, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, + 0x0f, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x53, 0x74, 0x6f, + 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7b, 0x0a, 0x1a, 0x54, 0x61, 0x62, + 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x6c, 0x79, 0x52, 0x65, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x43, 0x65, 0x6c, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, + 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x5d, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, + 0x69, 0x61, 0x73, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, 0x6c, 0x69, 0x61, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x65, 0x6c, 0x6c, 0x73, 0x41, + 0x6c, 0x69, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x45, + 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, + 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x28, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x0d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x1f, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x68, 0x61, 0x72, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x72, 0x0a, 0x17, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x29, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2a, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x69, 0x0a, 0x14, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x12, 0x26, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1d, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x1d, - 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, - 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, - 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, - 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x48, 0x0a, 0x09, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x76, - 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, - 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, - 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x56, 0x44, 0x69, - 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, - 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x21, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x22, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x56, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x1d, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, + 0x68, 0x6f, 0x77, 0x12, 0x1b, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, + 0x66, 0x66, 0x53, 0x68, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x48, 0x0a, 0x09, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x1b, 0x2e, + 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, + 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x76, 0x74, 0x63, + 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x44, 0x69, 0x66, 0x66, 0x53, 0x74, 0x6f, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x12, 0x27, + 0x6c, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x15, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, + 0x61, 0x66, 0x66, 0x69, 0x63, 0x12, 0x27, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, + 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, + 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x77, 0x69, 0x74, - 0x63, 0x68, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, - 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, - 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, - 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x76, + 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, + 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x42, 0x2b, 0x5a, 0x29, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, + 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x63, 0x74, 0x6c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_vtctlservice_proto_goTypes = []interface{}{ @@ -705,219 +741,229 @@ var file_vtctlservice_proto_goTypes = []interface{}{ (*vtctldata.AddCellsAliasRequest)(nil), // 2: vtctldata.AddCellsAliasRequest (*vtctldata.ApplyRoutingRulesRequest)(nil), // 3: vtctldata.ApplyRoutingRulesRequest (*vtctldata.ApplySchemaRequest)(nil), // 4: vtctldata.ApplySchemaRequest - (*vtctldata.ApplyShardRoutingRulesRequest)(nil), // 5: vtctldata.ApplyShardRoutingRulesRequest - (*vtctldata.ApplyVSchemaRequest)(nil), // 6: vtctldata.ApplyVSchemaRequest - (*vtctldata.BackupRequest)(nil), // 7: vtctldata.BackupRequest - (*vtctldata.BackupShardRequest)(nil), // 8: vtctldata.BackupShardRequest - (*vtctldata.CancelSchemaMigrationRequest)(nil), // 9: vtctldata.CancelSchemaMigrationRequest - (*vtctldata.ChangeTabletTypeRequest)(nil), // 10: vtctldata.ChangeTabletTypeRequest - (*vtctldata.CleanupSchemaMigrationRequest)(nil), // 11: vtctldata.CleanupSchemaMigrationRequest - (*vtctldata.CompleteSchemaMigrationRequest)(nil), // 12: vtctldata.CompleteSchemaMigrationRequest - (*vtctldata.CreateKeyspaceRequest)(nil), // 13: vtctldata.CreateKeyspaceRequest - (*vtctldata.CreateShardRequest)(nil), // 14: vtctldata.CreateShardRequest - (*vtctldata.DeleteCellInfoRequest)(nil), // 15: vtctldata.DeleteCellInfoRequest - (*vtctldata.DeleteCellsAliasRequest)(nil), // 16: vtctldata.DeleteCellsAliasRequest - (*vtctldata.DeleteKeyspaceRequest)(nil), // 17: vtctldata.DeleteKeyspaceRequest - (*vtctldata.DeleteShardsRequest)(nil), // 18: vtctldata.DeleteShardsRequest - (*vtctldata.DeleteSrvVSchemaRequest)(nil), // 19: vtctldata.DeleteSrvVSchemaRequest - (*vtctldata.DeleteTabletsRequest)(nil), // 20: vtctldata.DeleteTabletsRequest - (*vtctldata.EmergencyReparentShardRequest)(nil), // 21: vtctldata.EmergencyReparentShardRequest - (*vtctldata.ExecuteFetchAsAppRequest)(nil), // 22: vtctldata.ExecuteFetchAsAppRequest - (*vtctldata.ExecuteFetchAsDBARequest)(nil), // 23: vtctldata.ExecuteFetchAsDBARequest - (*vtctldata.ExecuteHookRequest)(nil), // 24: vtctldata.ExecuteHookRequest - (*vtctldata.FindAllShardsInKeyspaceRequest)(nil), // 25: vtctldata.FindAllShardsInKeyspaceRequest - (*vtctldata.GetBackupsRequest)(nil), // 26: vtctldata.GetBackupsRequest - (*vtctldata.GetCellInfoRequest)(nil), // 27: vtctldata.GetCellInfoRequest - (*vtctldata.GetCellInfoNamesRequest)(nil), // 28: vtctldata.GetCellInfoNamesRequest - (*vtctldata.GetCellsAliasesRequest)(nil), // 29: vtctldata.GetCellsAliasesRequest - (*vtctldata.GetFullStatusRequest)(nil), // 30: vtctldata.GetFullStatusRequest - (*vtctldata.GetKeyspaceRequest)(nil), // 31: vtctldata.GetKeyspaceRequest - (*vtctldata.GetKeyspacesRequest)(nil), // 32: vtctldata.GetKeyspacesRequest - (*vtctldata.GetPermissionsRequest)(nil), // 33: vtctldata.GetPermissionsRequest - (*vtctldata.GetRoutingRulesRequest)(nil), // 34: vtctldata.GetRoutingRulesRequest - (*vtctldata.GetSchemaRequest)(nil), // 35: vtctldata.GetSchemaRequest - (*vtctldata.GetSchemaMigrationsRequest)(nil), // 36: vtctldata.GetSchemaMigrationsRequest - (*vtctldata.GetShardRequest)(nil), // 37: vtctldata.GetShardRequest - (*vtctldata.GetShardRoutingRulesRequest)(nil), // 38: vtctldata.GetShardRoutingRulesRequest - (*vtctldata.GetSrvKeyspaceNamesRequest)(nil), // 39: vtctldata.GetSrvKeyspaceNamesRequest - (*vtctldata.GetSrvKeyspacesRequest)(nil), // 40: vtctldata.GetSrvKeyspacesRequest - (*vtctldata.UpdateThrottlerConfigRequest)(nil), // 41: vtctldata.UpdateThrottlerConfigRequest - (*vtctldata.GetSrvVSchemaRequest)(nil), // 42: vtctldata.GetSrvVSchemaRequest - (*vtctldata.GetSrvVSchemasRequest)(nil), // 43: vtctldata.GetSrvVSchemasRequest - (*vtctldata.GetTabletRequest)(nil), // 44: vtctldata.GetTabletRequest - (*vtctldata.GetTabletsRequest)(nil), // 45: vtctldata.GetTabletsRequest - (*vtctldata.GetTopologyPathRequest)(nil), // 46: vtctldata.GetTopologyPathRequest - (*vtctldata.GetVersionRequest)(nil), // 47: vtctldata.GetVersionRequest - (*vtctldata.GetVSchemaRequest)(nil), // 48: vtctldata.GetVSchemaRequest - (*vtctldata.GetWorkflowsRequest)(nil), // 49: vtctldata.GetWorkflowsRequest - (*vtctldata.InitShardPrimaryRequest)(nil), // 50: vtctldata.InitShardPrimaryRequest - (*vtctldata.LaunchSchemaMigrationRequest)(nil), // 51: vtctldata.LaunchSchemaMigrationRequest - (*vtctldata.LookupVindexCreateRequest)(nil), // 52: vtctldata.LookupVindexCreateRequest - (*vtctldata.LookupVindexExternalizeRequest)(nil), // 53: vtctldata.LookupVindexExternalizeRequest - (*vtctldata.MaterializeCreateRequest)(nil), // 54: vtctldata.MaterializeCreateRequest - (*vtctldata.MigrateCreateRequest)(nil), // 55: vtctldata.MigrateCreateRequest - (*vtctldata.MountRegisterRequest)(nil), // 56: vtctldata.MountRegisterRequest - (*vtctldata.MountUnregisterRequest)(nil), // 57: vtctldata.MountUnregisterRequest - (*vtctldata.MountShowRequest)(nil), // 58: vtctldata.MountShowRequest - (*vtctldata.MountListRequest)(nil), // 59: vtctldata.MountListRequest - (*vtctldata.MoveTablesCreateRequest)(nil), // 60: vtctldata.MoveTablesCreateRequest - (*vtctldata.MoveTablesCompleteRequest)(nil), // 61: vtctldata.MoveTablesCompleteRequest - (*vtctldata.PingTabletRequest)(nil), // 62: vtctldata.PingTabletRequest - (*vtctldata.PlannedReparentShardRequest)(nil), // 63: vtctldata.PlannedReparentShardRequest - (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 64: vtctldata.RebuildKeyspaceGraphRequest - (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 65: vtctldata.RebuildVSchemaGraphRequest - (*vtctldata.RefreshStateRequest)(nil), // 66: vtctldata.RefreshStateRequest - (*vtctldata.RefreshStateByShardRequest)(nil), // 67: vtctldata.RefreshStateByShardRequest - (*vtctldata.ReloadSchemaRequest)(nil), // 68: vtctldata.ReloadSchemaRequest - (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 69: vtctldata.ReloadSchemaKeyspaceRequest - (*vtctldata.ReloadSchemaShardRequest)(nil), // 70: vtctldata.ReloadSchemaShardRequest - (*vtctldata.RemoveBackupRequest)(nil), // 71: vtctldata.RemoveBackupRequest - (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 72: vtctldata.RemoveKeyspaceCellRequest - (*vtctldata.RemoveShardCellRequest)(nil), // 73: vtctldata.RemoveShardCellRequest - (*vtctldata.ReparentTabletRequest)(nil), // 74: vtctldata.ReparentTabletRequest - (*vtctldata.ReshardCreateRequest)(nil), // 75: vtctldata.ReshardCreateRequest - (*vtctldata.RestoreFromBackupRequest)(nil), // 76: vtctldata.RestoreFromBackupRequest - (*vtctldata.RetrySchemaMigrationRequest)(nil), // 77: vtctldata.RetrySchemaMigrationRequest - (*vtctldata.RunHealthCheckRequest)(nil), // 78: vtctldata.RunHealthCheckRequest - (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 79: vtctldata.SetKeyspaceDurabilityPolicyRequest - (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 80: vtctldata.SetShardIsPrimaryServingRequest - (*vtctldata.SetShardTabletControlRequest)(nil), // 81: vtctldata.SetShardTabletControlRequest - (*vtctldata.SetWritableRequest)(nil), // 82: vtctldata.SetWritableRequest - (*vtctldata.ShardReplicationAddRequest)(nil), // 83: vtctldata.ShardReplicationAddRequest - (*vtctldata.ShardReplicationFixRequest)(nil), // 84: vtctldata.ShardReplicationFixRequest - (*vtctldata.ShardReplicationPositionsRequest)(nil), // 85: vtctldata.ShardReplicationPositionsRequest - (*vtctldata.ShardReplicationRemoveRequest)(nil), // 86: vtctldata.ShardReplicationRemoveRequest - (*vtctldata.SleepTabletRequest)(nil), // 87: vtctldata.SleepTabletRequest - (*vtctldata.SourceShardAddRequest)(nil), // 88: vtctldata.SourceShardAddRequest - (*vtctldata.SourceShardDeleteRequest)(nil), // 89: vtctldata.SourceShardDeleteRequest - (*vtctldata.StartReplicationRequest)(nil), // 90: vtctldata.StartReplicationRequest - (*vtctldata.StopReplicationRequest)(nil), // 91: vtctldata.StopReplicationRequest - (*vtctldata.TabletExternallyReparentedRequest)(nil), // 92: vtctldata.TabletExternallyReparentedRequest - (*vtctldata.UpdateCellInfoRequest)(nil), // 93: vtctldata.UpdateCellInfoRequest - (*vtctldata.UpdateCellsAliasRequest)(nil), // 94: vtctldata.UpdateCellsAliasRequest - (*vtctldata.ValidateRequest)(nil), // 95: vtctldata.ValidateRequest - (*vtctldata.ValidateKeyspaceRequest)(nil), // 96: vtctldata.ValidateKeyspaceRequest - (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 97: vtctldata.ValidateSchemaKeyspaceRequest - (*vtctldata.ValidateShardRequest)(nil), // 98: vtctldata.ValidateShardRequest - (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 99: vtctldata.ValidateVersionKeyspaceRequest - (*vtctldata.ValidateVersionShardRequest)(nil), // 100: vtctldata.ValidateVersionShardRequest - (*vtctldata.ValidateVSchemaRequest)(nil), // 101: vtctldata.ValidateVSchemaRequest - (*vtctldata.VDiffCreateRequest)(nil), // 102: vtctldata.VDiffCreateRequest - (*vtctldata.VDiffDeleteRequest)(nil), // 103: vtctldata.VDiffDeleteRequest - (*vtctldata.VDiffResumeRequest)(nil), // 104: vtctldata.VDiffResumeRequest - (*vtctldata.VDiffShowRequest)(nil), // 105: vtctldata.VDiffShowRequest - (*vtctldata.VDiffStopRequest)(nil), // 106: vtctldata.VDiffStopRequest - (*vtctldata.WorkflowDeleteRequest)(nil), // 107: vtctldata.WorkflowDeleteRequest - (*vtctldata.WorkflowStatusRequest)(nil), // 108: vtctldata.WorkflowStatusRequest - (*vtctldata.WorkflowSwitchTrafficRequest)(nil), // 109: vtctldata.WorkflowSwitchTrafficRequest - (*vtctldata.WorkflowUpdateRequest)(nil), // 110: vtctldata.WorkflowUpdateRequest - (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 111: vtctldata.ExecuteVtctlCommandResponse - (*vtctldata.AddCellInfoResponse)(nil), // 112: vtctldata.AddCellInfoResponse - (*vtctldata.AddCellsAliasResponse)(nil), // 113: vtctldata.AddCellsAliasResponse - (*vtctldata.ApplyRoutingRulesResponse)(nil), // 114: vtctldata.ApplyRoutingRulesResponse - (*vtctldata.ApplySchemaResponse)(nil), // 115: vtctldata.ApplySchemaResponse - (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 116: vtctldata.ApplyShardRoutingRulesResponse - (*vtctldata.ApplyVSchemaResponse)(nil), // 117: vtctldata.ApplyVSchemaResponse - (*vtctldata.BackupResponse)(nil), // 118: vtctldata.BackupResponse - (*vtctldata.CancelSchemaMigrationResponse)(nil), // 119: vtctldata.CancelSchemaMigrationResponse - (*vtctldata.ChangeTabletTypeResponse)(nil), // 120: vtctldata.ChangeTabletTypeResponse - (*vtctldata.CleanupSchemaMigrationResponse)(nil), // 121: vtctldata.CleanupSchemaMigrationResponse - (*vtctldata.CompleteSchemaMigrationResponse)(nil), // 122: vtctldata.CompleteSchemaMigrationResponse - (*vtctldata.CreateKeyspaceResponse)(nil), // 123: vtctldata.CreateKeyspaceResponse - (*vtctldata.CreateShardResponse)(nil), // 124: vtctldata.CreateShardResponse - (*vtctldata.DeleteCellInfoResponse)(nil), // 125: vtctldata.DeleteCellInfoResponse - (*vtctldata.DeleteCellsAliasResponse)(nil), // 126: vtctldata.DeleteCellsAliasResponse - (*vtctldata.DeleteKeyspaceResponse)(nil), // 127: vtctldata.DeleteKeyspaceResponse - (*vtctldata.DeleteShardsResponse)(nil), // 128: vtctldata.DeleteShardsResponse - (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 129: vtctldata.DeleteSrvVSchemaResponse - (*vtctldata.DeleteTabletsResponse)(nil), // 130: vtctldata.DeleteTabletsResponse - (*vtctldata.EmergencyReparentShardResponse)(nil), // 131: vtctldata.EmergencyReparentShardResponse - (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 132: vtctldata.ExecuteFetchAsAppResponse - (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 133: vtctldata.ExecuteFetchAsDBAResponse - (*vtctldata.ExecuteHookResponse)(nil), // 134: vtctldata.ExecuteHookResponse - (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 135: vtctldata.FindAllShardsInKeyspaceResponse - (*vtctldata.GetBackupsResponse)(nil), // 136: vtctldata.GetBackupsResponse - (*vtctldata.GetCellInfoResponse)(nil), // 137: vtctldata.GetCellInfoResponse - (*vtctldata.GetCellInfoNamesResponse)(nil), // 138: vtctldata.GetCellInfoNamesResponse - (*vtctldata.GetCellsAliasesResponse)(nil), // 139: vtctldata.GetCellsAliasesResponse - (*vtctldata.GetFullStatusResponse)(nil), // 140: vtctldata.GetFullStatusResponse - (*vtctldata.GetKeyspaceResponse)(nil), // 141: vtctldata.GetKeyspaceResponse - (*vtctldata.GetKeyspacesResponse)(nil), // 142: vtctldata.GetKeyspacesResponse - (*vtctldata.GetPermissionsResponse)(nil), // 143: vtctldata.GetPermissionsResponse - (*vtctldata.GetRoutingRulesResponse)(nil), // 144: vtctldata.GetRoutingRulesResponse - (*vtctldata.GetSchemaResponse)(nil), // 145: vtctldata.GetSchemaResponse - (*vtctldata.GetSchemaMigrationsResponse)(nil), // 146: vtctldata.GetSchemaMigrationsResponse - (*vtctldata.GetShardResponse)(nil), // 147: vtctldata.GetShardResponse - (*vtctldata.GetShardRoutingRulesResponse)(nil), // 148: vtctldata.GetShardRoutingRulesResponse - (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 149: vtctldata.GetSrvKeyspaceNamesResponse - (*vtctldata.GetSrvKeyspacesResponse)(nil), // 150: vtctldata.GetSrvKeyspacesResponse - (*vtctldata.UpdateThrottlerConfigResponse)(nil), // 151: vtctldata.UpdateThrottlerConfigResponse - (*vtctldata.GetSrvVSchemaResponse)(nil), // 152: vtctldata.GetSrvVSchemaResponse - (*vtctldata.GetSrvVSchemasResponse)(nil), // 153: vtctldata.GetSrvVSchemasResponse - (*vtctldata.GetTabletResponse)(nil), // 154: vtctldata.GetTabletResponse - (*vtctldata.GetTabletsResponse)(nil), // 155: vtctldata.GetTabletsResponse - (*vtctldata.GetTopologyPathResponse)(nil), // 156: vtctldata.GetTopologyPathResponse - (*vtctldata.GetVersionResponse)(nil), // 157: vtctldata.GetVersionResponse - (*vtctldata.GetVSchemaResponse)(nil), // 158: vtctldata.GetVSchemaResponse - (*vtctldata.GetWorkflowsResponse)(nil), // 159: vtctldata.GetWorkflowsResponse - (*vtctldata.InitShardPrimaryResponse)(nil), // 160: vtctldata.InitShardPrimaryResponse - (*vtctldata.LaunchSchemaMigrationResponse)(nil), // 161: vtctldata.LaunchSchemaMigrationResponse - (*vtctldata.LookupVindexCreateResponse)(nil), // 162: vtctldata.LookupVindexCreateResponse - (*vtctldata.LookupVindexExternalizeResponse)(nil), // 163: vtctldata.LookupVindexExternalizeResponse - (*vtctldata.MaterializeCreateResponse)(nil), // 164: vtctldata.MaterializeCreateResponse - (*vtctldata.WorkflowStatusResponse)(nil), // 165: vtctldata.WorkflowStatusResponse - (*vtctldata.MountRegisterResponse)(nil), // 166: vtctldata.MountRegisterResponse - (*vtctldata.MountUnregisterResponse)(nil), // 167: vtctldata.MountUnregisterResponse - (*vtctldata.MountShowResponse)(nil), // 168: vtctldata.MountShowResponse - (*vtctldata.MountListResponse)(nil), // 169: vtctldata.MountListResponse - (*vtctldata.MoveTablesCompleteResponse)(nil), // 170: vtctldata.MoveTablesCompleteResponse - (*vtctldata.PingTabletResponse)(nil), // 171: vtctldata.PingTabletResponse - (*vtctldata.PlannedReparentShardResponse)(nil), // 172: vtctldata.PlannedReparentShardResponse - (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 173: vtctldata.RebuildKeyspaceGraphResponse - (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 174: vtctldata.RebuildVSchemaGraphResponse - (*vtctldata.RefreshStateResponse)(nil), // 175: vtctldata.RefreshStateResponse - (*vtctldata.RefreshStateByShardResponse)(nil), // 176: vtctldata.RefreshStateByShardResponse - (*vtctldata.ReloadSchemaResponse)(nil), // 177: vtctldata.ReloadSchemaResponse - (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 178: vtctldata.ReloadSchemaKeyspaceResponse - (*vtctldata.ReloadSchemaShardResponse)(nil), // 179: vtctldata.ReloadSchemaShardResponse - (*vtctldata.RemoveBackupResponse)(nil), // 180: vtctldata.RemoveBackupResponse - (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 181: vtctldata.RemoveKeyspaceCellResponse - (*vtctldata.RemoveShardCellResponse)(nil), // 182: vtctldata.RemoveShardCellResponse - (*vtctldata.ReparentTabletResponse)(nil), // 183: vtctldata.ReparentTabletResponse - (*vtctldata.RestoreFromBackupResponse)(nil), // 184: vtctldata.RestoreFromBackupResponse - (*vtctldata.RetrySchemaMigrationResponse)(nil), // 185: vtctldata.RetrySchemaMigrationResponse - (*vtctldata.RunHealthCheckResponse)(nil), // 186: vtctldata.RunHealthCheckResponse - (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 187: vtctldata.SetKeyspaceDurabilityPolicyResponse - (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 188: vtctldata.SetShardIsPrimaryServingResponse - (*vtctldata.SetShardTabletControlResponse)(nil), // 189: vtctldata.SetShardTabletControlResponse - (*vtctldata.SetWritableResponse)(nil), // 190: vtctldata.SetWritableResponse - (*vtctldata.ShardReplicationAddResponse)(nil), // 191: vtctldata.ShardReplicationAddResponse - (*vtctldata.ShardReplicationFixResponse)(nil), // 192: vtctldata.ShardReplicationFixResponse - (*vtctldata.ShardReplicationPositionsResponse)(nil), // 193: vtctldata.ShardReplicationPositionsResponse - (*vtctldata.ShardReplicationRemoveResponse)(nil), // 194: vtctldata.ShardReplicationRemoveResponse - (*vtctldata.SleepTabletResponse)(nil), // 195: vtctldata.SleepTabletResponse - (*vtctldata.SourceShardAddResponse)(nil), // 196: vtctldata.SourceShardAddResponse - (*vtctldata.SourceShardDeleteResponse)(nil), // 197: vtctldata.SourceShardDeleteResponse - (*vtctldata.StartReplicationResponse)(nil), // 198: vtctldata.StartReplicationResponse - (*vtctldata.StopReplicationResponse)(nil), // 199: vtctldata.StopReplicationResponse - (*vtctldata.TabletExternallyReparentedResponse)(nil), // 200: vtctldata.TabletExternallyReparentedResponse - (*vtctldata.UpdateCellInfoResponse)(nil), // 201: vtctldata.UpdateCellInfoResponse - (*vtctldata.UpdateCellsAliasResponse)(nil), // 202: vtctldata.UpdateCellsAliasResponse - (*vtctldata.ValidateResponse)(nil), // 203: vtctldata.ValidateResponse - (*vtctldata.ValidateKeyspaceResponse)(nil), // 204: vtctldata.ValidateKeyspaceResponse - (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 205: vtctldata.ValidateSchemaKeyspaceResponse - (*vtctldata.ValidateShardResponse)(nil), // 206: vtctldata.ValidateShardResponse - (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 207: vtctldata.ValidateVersionKeyspaceResponse - (*vtctldata.ValidateVersionShardResponse)(nil), // 208: vtctldata.ValidateVersionShardResponse - (*vtctldata.ValidateVSchemaResponse)(nil), // 209: vtctldata.ValidateVSchemaResponse - (*vtctldata.VDiffCreateResponse)(nil), // 210: vtctldata.VDiffCreateResponse - (*vtctldata.VDiffDeleteResponse)(nil), // 211: vtctldata.VDiffDeleteResponse - (*vtctldata.VDiffResumeResponse)(nil), // 212: vtctldata.VDiffResumeResponse - (*vtctldata.VDiffShowResponse)(nil), // 213: vtctldata.VDiffShowResponse - (*vtctldata.VDiffStopResponse)(nil), // 214: vtctldata.VDiffStopResponse - (*vtctldata.WorkflowDeleteResponse)(nil), // 215: vtctldata.WorkflowDeleteResponse - (*vtctldata.WorkflowSwitchTrafficResponse)(nil), // 216: vtctldata.WorkflowSwitchTrafficResponse - (*vtctldata.WorkflowUpdateResponse)(nil), // 217: vtctldata.WorkflowUpdateResponse + (*vtctldata.ApplyKeyspaceRoutingRulesRequest)(nil), // 5: vtctldata.ApplyKeyspaceRoutingRulesRequest + (*vtctldata.ApplyShardRoutingRulesRequest)(nil), // 6: vtctldata.ApplyShardRoutingRulesRequest + (*vtctldata.ApplyVSchemaRequest)(nil), // 7: vtctldata.ApplyVSchemaRequest + (*vtctldata.BackupRequest)(nil), // 8: vtctldata.BackupRequest + (*vtctldata.BackupShardRequest)(nil), // 9: vtctldata.BackupShardRequest + (*vtctldata.CancelSchemaMigrationRequest)(nil), // 10: vtctldata.CancelSchemaMigrationRequest + (*vtctldata.ChangeTabletTypeRequest)(nil), // 11: vtctldata.ChangeTabletTypeRequest + (*vtctldata.CleanupSchemaMigrationRequest)(nil), // 12: vtctldata.CleanupSchemaMigrationRequest + (*vtctldata.CompleteSchemaMigrationRequest)(nil), // 13: vtctldata.CompleteSchemaMigrationRequest + (*vtctldata.CreateKeyspaceRequest)(nil), // 14: vtctldata.CreateKeyspaceRequest + (*vtctldata.CreateShardRequest)(nil), // 15: vtctldata.CreateShardRequest + (*vtctldata.DeleteCellInfoRequest)(nil), // 16: vtctldata.DeleteCellInfoRequest + (*vtctldata.DeleteCellsAliasRequest)(nil), // 17: vtctldata.DeleteCellsAliasRequest + (*vtctldata.DeleteKeyspaceRequest)(nil), // 18: vtctldata.DeleteKeyspaceRequest + (*vtctldata.DeleteShardsRequest)(nil), // 19: vtctldata.DeleteShardsRequest + (*vtctldata.DeleteSrvVSchemaRequest)(nil), // 20: vtctldata.DeleteSrvVSchemaRequest + (*vtctldata.DeleteTabletsRequest)(nil), // 21: vtctldata.DeleteTabletsRequest + (*vtctldata.EmergencyReparentShardRequest)(nil), // 22: vtctldata.EmergencyReparentShardRequest + (*vtctldata.ExecuteFetchAsAppRequest)(nil), // 23: vtctldata.ExecuteFetchAsAppRequest + (*vtctldata.ExecuteFetchAsDBARequest)(nil), // 24: vtctldata.ExecuteFetchAsDBARequest + (*vtctldata.ExecuteHookRequest)(nil), // 25: vtctldata.ExecuteHookRequest + (*vtctldata.ExecuteMultiFetchAsDBARequest)(nil), // 26: vtctldata.ExecuteMultiFetchAsDBARequest + (*vtctldata.FindAllShardsInKeyspaceRequest)(nil), // 27: vtctldata.FindAllShardsInKeyspaceRequest + (*vtctldata.ForceCutOverSchemaMigrationRequest)(nil), // 28: vtctldata.ForceCutOverSchemaMigrationRequest + (*vtctldata.GetBackupsRequest)(nil), // 29: vtctldata.GetBackupsRequest + (*vtctldata.GetCellInfoRequest)(nil), // 30: vtctldata.GetCellInfoRequest + (*vtctldata.GetCellInfoNamesRequest)(nil), // 31: vtctldata.GetCellInfoNamesRequest + (*vtctldata.GetCellsAliasesRequest)(nil), // 32: vtctldata.GetCellsAliasesRequest + (*vtctldata.GetFullStatusRequest)(nil), // 33: vtctldata.GetFullStatusRequest + (*vtctldata.GetKeyspaceRequest)(nil), // 34: vtctldata.GetKeyspaceRequest + (*vtctldata.GetKeyspacesRequest)(nil), // 35: vtctldata.GetKeyspacesRequest + (*vtctldata.GetKeyspaceRoutingRulesRequest)(nil), // 36: vtctldata.GetKeyspaceRoutingRulesRequest + (*vtctldata.GetPermissionsRequest)(nil), // 37: vtctldata.GetPermissionsRequest + (*vtctldata.GetRoutingRulesRequest)(nil), // 38: vtctldata.GetRoutingRulesRequest + (*vtctldata.GetSchemaRequest)(nil), // 39: vtctldata.GetSchemaRequest + (*vtctldata.GetSchemaMigrationsRequest)(nil), // 40: vtctldata.GetSchemaMigrationsRequest + (*vtctldata.GetShardReplicationRequest)(nil), // 41: vtctldata.GetShardReplicationRequest + (*vtctldata.GetShardRequest)(nil), // 42: vtctldata.GetShardRequest + (*vtctldata.GetShardRoutingRulesRequest)(nil), // 43: vtctldata.GetShardRoutingRulesRequest + (*vtctldata.GetSrvKeyspaceNamesRequest)(nil), // 44: vtctldata.GetSrvKeyspaceNamesRequest + (*vtctldata.GetSrvKeyspacesRequest)(nil), // 45: vtctldata.GetSrvKeyspacesRequest + (*vtctldata.UpdateThrottlerConfigRequest)(nil), // 46: vtctldata.UpdateThrottlerConfigRequest + (*vtctldata.GetSrvVSchemaRequest)(nil), // 47: vtctldata.GetSrvVSchemaRequest + (*vtctldata.GetSrvVSchemasRequest)(nil), // 48: vtctldata.GetSrvVSchemasRequest + (*vtctldata.GetTabletRequest)(nil), // 49: vtctldata.GetTabletRequest + (*vtctldata.GetTabletsRequest)(nil), // 50: vtctldata.GetTabletsRequest + (*vtctldata.GetTopologyPathRequest)(nil), // 51: vtctldata.GetTopologyPathRequest + (*vtctldata.GetVersionRequest)(nil), // 52: vtctldata.GetVersionRequest + (*vtctldata.GetVSchemaRequest)(nil), // 53: vtctldata.GetVSchemaRequest + (*vtctldata.GetWorkflowsRequest)(nil), // 54: vtctldata.GetWorkflowsRequest + (*vtctldata.InitShardPrimaryRequest)(nil), // 55: vtctldata.InitShardPrimaryRequest + (*vtctldata.LaunchSchemaMigrationRequest)(nil), // 56: vtctldata.LaunchSchemaMigrationRequest + (*vtctldata.LookupVindexCreateRequest)(nil), // 57: vtctldata.LookupVindexCreateRequest + (*vtctldata.LookupVindexExternalizeRequest)(nil), // 58: vtctldata.LookupVindexExternalizeRequest + (*vtctldata.MaterializeCreateRequest)(nil), // 59: vtctldata.MaterializeCreateRequest + (*vtctldata.MigrateCreateRequest)(nil), // 60: vtctldata.MigrateCreateRequest + (*vtctldata.MountRegisterRequest)(nil), // 61: vtctldata.MountRegisterRequest + (*vtctldata.MountUnregisterRequest)(nil), // 62: vtctldata.MountUnregisterRequest + (*vtctldata.MountShowRequest)(nil), // 63: vtctldata.MountShowRequest + (*vtctldata.MountListRequest)(nil), // 64: vtctldata.MountListRequest + (*vtctldata.MoveTablesCreateRequest)(nil), // 65: vtctldata.MoveTablesCreateRequest + (*vtctldata.MoveTablesCompleteRequest)(nil), // 66: vtctldata.MoveTablesCompleteRequest + (*vtctldata.PingTabletRequest)(nil), // 67: vtctldata.PingTabletRequest + (*vtctldata.PlannedReparentShardRequest)(nil), // 68: vtctldata.PlannedReparentShardRequest + (*vtctldata.RebuildKeyspaceGraphRequest)(nil), // 69: vtctldata.RebuildKeyspaceGraphRequest + (*vtctldata.RebuildVSchemaGraphRequest)(nil), // 70: vtctldata.RebuildVSchemaGraphRequest + (*vtctldata.RefreshStateRequest)(nil), // 71: vtctldata.RefreshStateRequest + (*vtctldata.RefreshStateByShardRequest)(nil), // 72: vtctldata.RefreshStateByShardRequest + (*vtctldata.ReloadSchemaRequest)(nil), // 73: vtctldata.ReloadSchemaRequest + (*vtctldata.ReloadSchemaKeyspaceRequest)(nil), // 74: vtctldata.ReloadSchemaKeyspaceRequest + (*vtctldata.ReloadSchemaShardRequest)(nil), // 75: vtctldata.ReloadSchemaShardRequest + (*vtctldata.RemoveBackupRequest)(nil), // 76: vtctldata.RemoveBackupRequest + (*vtctldata.RemoveKeyspaceCellRequest)(nil), // 77: vtctldata.RemoveKeyspaceCellRequest + (*vtctldata.RemoveShardCellRequest)(nil), // 78: vtctldata.RemoveShardCellRequest + (*vtctldata.ReparentTabletRequest)(nil), // 79: vtctldata.ReparentTabletRequest + (*vtctldata.ReshardCreateRequest)(nil), // 80: vtctldata.ReshardCreateRequest + (*vtctldata.RestoreFromBackupRequest)(nil), // 81: vtctldata.RestoreFromBackupRequest + (*vtctldata.RetrySchemaMigrationRequest)(nil), // 82: vtctldata.RetrySchemaMigrationRequest + (*vtctldata.RunHealthCheckRequest)(nil), // 83: vtctldata.RunHealthCheckRequest + (*vtctldata.SetKeyspaceDurabilityPolicyRequest)(nil), // 84: vtctldata.SetKeyspaceDurabilityPolicyRequest + (*vtctldata.SetShardIsPrimaryServingRequest)(nil), // 85: vtctldata.SetShardIsPrimaryServingRequest + (*vtctldata.SetShardTabletControlRequest)(nil), // 86: vtctldata.SetShardTabletControlRequest + (*vtctldata.SetWritableRequest)(nil), // 87: vtctldata.SetWritableRequest + (*vtctldata.ShardReplicationAddRequest)(nil), // 88: vtctldata.ShardReplicationAddRequest + (*vtctldata.ShardReplicationFixRequest)(nil), // 89: vtctldata.ShardReplicationFixRequest + (*vtctldata.ShardReplicationPositionsRequest)(nil), // 90: vtctldata.ShardReplicationPositionsRequest + (*vtctldata.ShardReplicationRemoveRequest)(nil), // 91: vtctldata.ShardReplicationRemoveRequest + (*vtctldata.SleepTabletRequest)(nil), // 92: vtctldata.SleepTabletRequest + (*vtctldata.SourceShardAddRequest)(nil), // 93: vtctldata.SourceShardAddRequest + (*vtctldata.SourceShardDeleteRequest)(nil), // 94: vtctldata.SourceShardDeleteRequest + (*vtctldata.StartReplicationRequest)(nil), // 95: vtctldata.StartReplicationRequest + (*vtctldata.StopReplicationRequest)(nil), // 96: vtctldata.StopReplicationRequest + (*vtctldata.TabletExternallyReparentedRequest)(nil), // 97: vtctldata.TabletExternallyReparentedRequest + (*vtctldata.UpdateCellInfoRequest)(nil), // 98: vtctldata.UpdateCellInfoRequest + (*vtctldata.UpdateCellsAliasRequest)(nil), // 99: vtctldata.UpdateCellsAliasRequest + (*vtctldata.ValidateRequest)(nil), // 100: vtctldata.ValidateRequest + (*vtctldata.ValidateKeyspaceRequest)(nil), // 101: vtctldata.ValidateKeyspaceRequest + (*vtctldata.ValidateSchemaKeyspaceRequest)(nil), // 102: vtctldata.ValidateSchemaKeyspaceRequest + (*vtctldata.ValidateShardRequest)(nil), // 103: vtctldata.ValidateShardRequest + (*vtctldata.ValidateVersionKeyspaceRequest)(nil), // 104: vtctldata.ValidateVersionKeyspaceRequest + (*vtctldata.ValidateVersionShardRequest)(nil), // 105: vtctldata.ValidateVersionShardRequest + (*vtctldata.ValidateVSchemaRequest)(nil), // 106: vtctldata.ValidateVSchemaRequest + (*vtctldata.VDiffCreateRequest)(nil), // 107: vtctldata.VDiffCreateRequest + (*vtctldata.VDiffDeleteRequest)(nil), // 108: vtctldata.VDiffDeleteRequest + (*vtctldata.VDiffResumeRequest)(nil), // 109: vtctldata.VDiffResumeRequest + (*vtctldata.VDiffShowRequest)(nil), // 110: vtctldata.VDiffShowRequest + (*vtctldata.VDiffStopRequest)(nil), // 111: vtctldata.VDiffStopRequest + (*vtctldata.WorkflowDeleteRequest)(nil), // 112: vtctldata.WorkflowDeleteRequest + (*vtctldata.WorkflowStatusRequest)(nil), // 113: vtctldata.WorkflowStatusRequest + (*vtctldata.WorkflowSwitchTrafficRequest)(nil), // 114: vtctldata.WorkflowSwitchTrafficRequest + (*vtctldata.WorkflowUpdateRequest)(nil), // 115: vtctldata.WorkflowUpdateRequest + (*vtctldata.ExecuteVtctlCommandResponse)(nil), // 116: vtctldata.ExecuteVtctlCommandResponse + (*vtctldata.AddCellInfoResponse)(nil), // 117: vtctldata.AddCellInfoResponse + (*vtctldata.AddCellsAliasResponse)(nil), // 118: vtctldata.AddCellsAliasResponse + (*vtctldata.ApplyRoutingRulesResponse)(nil), // 119: vtctldata.ApplyRoutingRulesResponse + (*vtctldata.ApplySchemaResponse)(nil), // 120: vtctldata.ApplySchemaResponse + (*vtctldata.ApplyKeyspaceRoutingRulesResponse)(nil), // 121: vtctldata.ApplyKeyspaceRoutingRulesResponse + (*vtctldata.ApplyShardRoutingRulesResponse)(nil), // 122: vtctldata.ApplyShardRoutingRulesResponse + (*vtctldata.ApplyVSchemaResponse)(nil), // 123: vtctldata.ApplyVSchemaResponse + (*vtctldata.BackupResponse)(nil), // 124: vtctldata.BackupResponse + (*vtctldata.CancelSchemaMigrationResponse)(nil), // 125: vtctldata.CancelSchemaMigrationResponse + (*vtctldata.ChangeTabletTypeResponse)(nil), // 126: vtctldata.ChangeTabletTypeResponse + (*vtctldata.CleanupSchemaMigrationResponse)(nil), // 127: vtctldata.CleanupSchemaMigrationResponse + (*vtctldata.CompleteSchemaMigrationResponse)(nil), // 128: vtctldata.CompleteSchemaMigrationResponse + (*vtctldata.CreateKeyspaceResponse)(nil), // 129: vtctldata.CreateKeyspaceResponse + (*vtctldata.CreateShardResponse)(nil), // 130: vtctldata.CreateShardResponse + (*vtctldata.DeleteCellInfoResponse)(nil), // 131: vtctldata.DeleteCellInfoResponse + (*vtctldata.DeleteCellsAliasResponse)(nil), // 132: vtctldata.DeleteCellsAliasResponse + (*vtctldata.DeleteKeyspaceResponse)(nil), // 133: vtctldata.DeleteKeyspaceResponse + (*vtctldata.DeleteShardsResponse)(nil), // 134: vtctldata.DeleteShardsResponse + (*vtctldata.DeleteSrvVSchemaResponse)(nil), // 135: vtctldata.DeleteSrvVSchemaResponse + (*vtctldata.DeleteTabletsResponse)(nil), // 136: vtctldata.DeleteTabletsResponse + (*vtctldata.EmergencyReparentShardResponse)(nil), // 137: vtctldata.EmergencyReparentShardResponse + (*vtctldata.ExecuteFetchAsAppResponse)(nil), // 138: vtctldata.ExecuteFetchAsAppResponse + (*vtctldata.ExecuteFetchAsDBAResponse)(nil), // 139: vtctldata.ExecuteFetchAsDBAResponse + (*vtctldata.ExecuteHookResponse)(nil), // 140: vtctldata.ExecuteHookResponse + (*vtctldata.ExecuteMultiFetchAsDBAResponse)(nil), // 141: vtctldata.ExecuteMultiFetchAsDBAResponse + (*vtctldata.FindAllShardsInKeyspaceResponse)(nil), // 142: vtctldata.FindAllShardsInKeyspaceResponse + (*vtctldata.ForceCutOverSchemaMigrationResponse)(nil), // 143: vtctldata.ForceCutOverSchemaMigrationResponse + (*vtctldata.GetBackupsResponse)(nil), // 144: vtctldata.GetBackupsResponse + (*vtctldata.GetCellInfoResponse)(nil), // 145: vtctldata.GetCellInfoResponse + (*vtctldata.GetCellInfoNamesResponse)(nil), // 146: vtctldata.GetCellInfoNamesResponse + (*vtctldata.GetCellsAliasesResponse)(nil), // 147: vtctldata.GetCellsAliasesResponse + (*vtctldata.GetFullStatusResponse)(nil), // 148: vtctldata.GetFullStatusResponse + (*vtctldata.GetKeyspaceResponse)(nil), // 149: vtctldata.GetKeyspaceResponse + (*vtctldata.GetKeyspacesResponse)(nil), // 150: vtctldata.GetKeyspacesResponse + (*vtctldata.GetKeyspaceRoutingRulesResponse)(nil), // 151: vtctldata.GetKeyspaceRoutingRulesResponse + (*vtctldata.GetPermissionsResponse)(nil), // 152: vtctldata.GetPermissionsResponse + (*vtctldata.GetRoutingRulesResponse)(nil), // 153: vtctldata.GetRoutingRulesResponse + (*vtctldata.GetSchemaResponse)(nil), // 154: vtctldata.GetSchemaResponse + (*vtctldata.GetSchemaMigrationsResponse)(nil), // 155: vtctldata.GetSchemaMigrationsResponse + (*vtctldata.GetShardReplicationResponse)(nil), // 156: vtctldata.GetShardReplicationResponse + (*vtctldata.GetShardResponse)(nil), // 157: vtctldata.GetShardResponse + (*vtctldata.GetShardRoutingRulesResponse)(nil), // 158: vtctldata.GetShardRoutingRulesResponse + (*vtctldata.GetSrvKeyspaceNamesResponse)(nil), // 159: vtctldata.GetSrvKeyspaceNamesResponse + (*vtctldata.GetSrvKeyspacesResponse)(nil), // 160: vtctldata.GetSrvKeyspacesResponse + (*vtctldata.UpdateThrottlerConfigResponse)(nil), // 161: vtctldata.UpdateThrottlerConfigResponse + (*vtctldata.GetSrvVSchemaResponse)(nil), // 162: vtctldata.GetSrvVSchemaResponse + (*vtctldata.GetSrvVSchemasResponse)(nil), // 163: vtctldata.GetSrvVSchemasResponse + (*vtctldata.GetTabletResponse)(nil), // 164: vtctldata.GetTabletResponse + (*vtctldata.GetTabletsResponse)(nil), // 165: vtctldata.GetTabletsResponse + (*vtctldata.GetTopologyPathResponse)(nil), // 166: vtctldata.GetTopologyPathResponse + (*vtctldata.GetVersionResponse)(nil), // 167: vtctldata.GetVersionResponse + (*vtctldata.GetVSchemaResponse)(nil), // 168: vtctldata.GetVSchemaResponse + (*vtctldata.GetWorkflowsResponse)(nil), // 169: vtctldata.GetWorkflowsResponse + (*vtctldata.InitShardPrimaryResponse)(nil), // 170: vtctldata.InitShardPrimaryResponse + (*vtctldata.LaunchSchemaMigrationResponse)(nil), // 171: vtctldata.LaunchSchemaMigrationResponse + (*vtctldata.LookupVindexCreateResponse)(nil), // 172: vtctldata.LookupVindexCreateResponse + (*vtctldata.LookupVindexExternalizeResponse)(nil), // 173: vtctldata.LookupVindexExternalizeResponse + (*vtctldata.MaterializeCreateResponse)(nil), // 174: vtctldata.MaterializeCreateResponse + (*vtctldata.WorkflowStatusResponse)(nil), // 175: vtctldata.WorkflowStatusResponse + (*vtctldata.MountRegisterResponse)(nil), // 176: vtctldata.MountRegisterResponse + (*vtctldata.MountUnregisterResponse)(nil), // 177: vtctldata.MountUnregisterResponse + (*vtctldata.MountShowResponse)(nil), // 178: vtctldata.MountShowResponse + (*vtctldata.MountListResponse)(nil), // 179: vtctldata.MountListResponse + (*vtctldata.MoveTablesCompleteResponse)(nil), // 180: vtctldata.MoveTablesCompleteResponse + (*vtctldata.PingTabletResponse)(nil), // 181: vtctldata.PingTabletResponse + (*vtctldata.PlannedReparentShardResponse)(nil), // 182: vtctldata.PlannedReparentShardResponse + (*vtctldata.RebuildKeyspaceGraphResponse)(nil), // 183: vtctldata.RebuildKeyspaceGraphResponse + (*vtctldata.RebuildVSchemaGraphResponse)(nil), // 184: vtctldata.RebuildVSchemaGraphResponse + (*vtctldata.RefreshStateResponse)(nil), // 185: vtctldata.RefreshStateResponse + (*vtctldata.RefreshStateByShardResponse)(nil), // 186: vtctldata.RefreshStateByShardResponse + (*vtctldata.ReloadSchemaResponse)(nil), // 187: vtctldata.ReloadSchemaResponse + (*vtctldata.ReloadSchemaKeyspaceResponse)(nil), // 188: vtctldata.ReloadSchemaKeyspaceResponse + (*vtctldata.ReloadSchemaShardResponse)(nil), // 189: vtctldata.ReloadSchemaShardResponse + (*vtctldata.RemoveBackupResponse)(nil), // 190: vtctldata.RemoveBackupResponse + (*vtctldata.RemoveKeyspaceCellResponse)(nil), // 191: vtctldata.RemoveKeyspaceCellResponse + (*vtctldata.RemoveShardCellResponse)(nil), // 192: vtctldata.RemoveShardCellResponse + (*vtctldata.ReparentTabletResponse)(nil), // 193: vtctldata.ReparentTabletResponse + (*vtctldata.RestoreFromBackupResponse)(nil), // 194: vtctldata.RestoreFromBackupResponse + (*vtctldata.RetrySchemaMigrationResponse)(nil), // 195: vtctldata.RetrySchemaMigrationResponse + (*vtctldata.RunHealthCheckResponse)(nil), // 196: vtctldata.RunHealthCheckResponse + (*vtctldata.SetKeyspaceDurabilityPolicyResponse)(nil), // 197: vtctldata.SetKeyspaceDurabilityPolicyResponse + (*vtctldata.SetShardIsPrimaryServingResponse)(nil), // 198: vtctldata.SetShardIsPrimaryServingResponse + (*vtctldata.SetShardTabletControlResponse)(nil), // 199: vtctldata.SetShardTabletControlResponse + (*vtctldata.SetWritableResponse)(nil), // 200: vtctldata.SetWritableResponse + (*vtctldata.ShardReplicationAddResponse)(nil), // 201: vtctldata.ShardReplicationAddResponse + (*vtctldata.ShardReplicationFixResponse)(nil), // 202: vtctldata.ShardReplicationFixResponse + (*vtctldata.ShardReplicationPositionsResponse)(nil), // 203: vtctldata.ShardReplicationPositionsResponse + (*vtctldata.ShardReplicationRemoveResponse)(nil), // 204: vtctldata.ShardReplicationRemoveResponse + (*vtctldata.SleepTabletResponse)(nil), // 205: vtctldata.SleepTabletResponse + (*vtctldata.SourceShardAddResponse)(nil), // 206: vtctldata.SourceShardAddResponse + (*vtctldata.SourceShardDeleteResponse)(nil), // 207: vtctldata.SourceShardDeleteResponse + (*vtctldata.StartReplicationResponse)(nil), // 208: vtctldata.StartReplicationResponse + (*vtctldata.StopReplicationResponse)(nil), // 209: vtctldata.StopReplicationResponse + (*vtctldata.TabletExternallyReparentedResponse)(nil), // 210: vtctldata.TabletExternallyReparentedResponse + (*vtctldata.UpdateCellInfoResponse)(nil), // 211: vtctldata.UpdateCellInfoResponse + (*vtctldata.UpdateCellsAliasResponse)(nil), // 212: vtctldata.UpdateCellsAliasResponse + (*vtctldata.ValidateResponse)(nil), // 213: vtctldata.ValidateResponse + (*vtctldata.ValidateKeyspaceResponse)(nil), // 214: vtctldata.ValidateKeyspaceResponse + (*vtctldata.ValidateSchemaKeyspaceResponse)(nil), // 215: vtctldata.ValidateSchemaKeyspaceResponse + (*vtctldata.ValidateShardResponse)(nil), // 216: vtctldata.ValidateShardResponse + (*vtctldata.ValidateVersionKeyspaceResponse)(nil), // 217: vtctldata.ValidateVersionKeyspaceResponse + (*vtctldata.ValidateVersionShardResponse)(nil), // 218: vtctldata.ValidateVersionShardResponse + (*vtctldata.ValidateVSchemaResponse)(nil), // 219: vtctldata.ValidateVSchemaResponse + (*vtctldata.VDiffCreateResponse)(nil), // 220: vtctldata.VDiffCreateResponse + (*vtctldata.VDiffDeleteResponse)(nil), // 221: vtctldata.VDiffDeleteResponse + (*vtctldata.VDiffResumeResponse)(nil), // 222: vtctldata.VDiffResumeResponse + (*vtctldata.VDiffShowResponse)(nil), // 223: vtctldata.VDiffShowResponse + (*vtctldata.VDiffStopResponse)(nil), // 224: vtctldata.VDiffStopResponse + (*vtctldata.WorkflowDeleteResponse)(nil), // 225: vtctldata.WorkflowDeleteResponse + (*vtctldata.WorkflowSwitchTrafficResponse)(nil), // 226: vtctldata.WorkflowSwitchTrafficResponse + (*vtctldata.WorkflowUpdateResponse)(nil), // 227: vtctldata.WorkflowUpdateResponse } var file_vtctlservice_proto_depIdxs = []int32{ 0, // 0: vtctlservice.Vtctl.ExecuteVtctlCommand:input_type -> vtctldata.ExecuteVtctlCommandRequest @@ -925,225 +971,235 @@ var file_vtctlservice_proto_depIdxs = []int32{ 2, // 2: vtctlservice.Vtctld.AddCellsAlias:input_type -> vtctldata.AddCellsAliasRequest 3, // 3: vtctlservice.Vtctld.ApplyRoutingRules:input_type -> vtctldata.ApplyRoutingRulesRequest 4, // 4: vtctlservice.Vtctld.ApplySchema:input_type -> vtctldata.ApplySchemaRequest - 5, // 5: vtctlservice.Vtctld.ApplyShardRoutingRules:input_type -> vtctldata.ApplyShardRoutingRulesRequest - 6, // 6: vtctlservice.Vtctld.ApplyVSchema:input_type -> vtctldata.ApplyVSchemaRequest - 7, // 7: vtctlservice.Vtctld.Backup:input_type -> vtctldata.BackupRequest - 8, // 8: vtctlservice.Vtctld.BackupShard:input_type -> vtctldata.BackupShardRequest - 9, // 9: vtctlservice.Vtctld.CancelSchemaMigration:input_type -> vtctldata.CancelSchemaMigrationRequest - 10, // 10: vtctlservice.Vtctld.ChangeTabletType:input_type -> vtctldata.ChangeTabletTypeRequest - 11, // 11: vtctlservice.Vtctld.CleanupSchemaMigration:input_type -> vtctldata.CleanupSchemaMigrationRequest - 12, // 12: vtctlservice.Vtctld.CompleteSchemaMigration:input_type -> vtctldata.CompleteSchemaMigrationRequest - 13, // 13: vtctlservice.Vtctld.CreateKeyspace:input_type -> vtctldata.CreateKeyspaceRequest - 14, // 14: vtctlservice.Vtctld.CreateShard:input_type -> vtctldata.CreateShardRequest - 15, // 15: vtctlservice.Vtctld.DeleteCellInfo:input_type -> vtctldata.DeleteCellInfoRequest - 16, // 16: vtctlservice.Vtctld.DeleteCellsAlias:input_type -> vtctldata.DeleteCellsAliasRequest - 17, // 17: vtctlservice.Vtctld.DeleteKeyspace:input_type -> vtctldata.DeleteKeyspaceRequest - 18, // 18: vtctlservice.Vtctld.DeleteShards:input_type -> vtctldata.DeleteShardsRequest - 19, // 19: vtctlservice.Vtctld.DeleteSrvVSchema:input_type -> vtctldata.DeleteSrvVSchemaRequest - 20, // 20: vtctlservice.Vtctld.DeleteTablets:input_type -> vtctldata.DeleteTabletsRequest - 21, // 21: vtctlservice.Vtctld.EmergencyReparentShard:input_type -> vtctldata.EmergencyReparentShardRequest - 22, // 22: vtctlservice.Vtctld.ExecuteFetchAsApp:input_type -> vtctldata.ExecuteFetchAsAppRequest - 23, // 23: vtctlservice.Vtctld.ExecuteFetchAsDBA:input_type -> vtctldata.ExecuteFetchAsDBARequest - 24, // 24: vtctlservice.Vtctld.ExecuteHook:input_type -> vtctldata.ExecuteHookRequest - 25, // 25: vtctlservice.Vtctld.FindAllShardsInKeyspace:input_type -> vtctldata.FindAllShardsInKeyspaceRequest - 26, // 26: vtctlservice.Vtctld.GetBackups:input_type -> vtctldata.GetBackupsRequest - 27, // 27: vtctlservice.Vtctld.GetCellInfo:input_type -> vtctldata.GetCellInfoRequest - 28, // 28: vtctlservice.Vtctld.GetCellInfoNames:input_type -> vtctldata.GetCellInfoNamesRequest - 29, // 29: vtctlservice.Vtctld.GetCellsAliases:input_type -> vtctldata.GetCellsAliasesRequest - 30, // 30: vtctlservice.Vtctld.GetFullStatus:input_type -> vtctldata.GetFullStatusRequest - 31, // 31: vtctlservice.Vtctld.GetKeyspace:input_type -> vtctldata.GetKeyspaceRequest - 32, // 32: vtctlservice.Vtctld.GetKeyspaces:input_type -> vtctldata.GetKeyspacesRequest - 33, // 33: vtctlservice.Vtctld.GetPermissions:input_type -> vtctldata.GetPermissionsRequest - 34, // 34: vtctlservice.Vtctld.GetRoutingRules:input_type -> vtctldata.GetRoutingRulesRequest - 35, // 35: vtctlservice.Vtctld.GetSchema:input_type -> vtctldata.GetSchemaRequest - 36, // 36: vtctlservice.Vtctld.GetSchemaMigrations:input_type -> vtctldata.GetSchemaMigrationsRequest - 37, // 37: vtctlservice.Vtctld.GetShard:input_type -> vtctldata.GetShardRequest - 38, // 38: vtctlservice.Vtctld.GetShardRoutingRules:input_type -> vtctldata.GetShardRoutingRulesRequest - 39, // 39: vtctlservice.Vtctld.GetSrvKeyspaceNames:input_type -> vtctldata.GetSrvKeyspaceNamesRequest - 40, // 40: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest - 41, // 41: vtctlservice.Vtctld.UpdateThrottlerConfig:input_type -> vtctldata.UpdateThrottlerConfigRequest - 42, // 42: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest - 43, // 43: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest - 44, // 44: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest - 45, // 45: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest - 46, // 46: vtctlservice.Vtctld.GetTopologyPath:input_type -> vtctldata.GetTopologyPathRequest - 47, // 47: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest - 48, // 48: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest - 49, // 49: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest - 50, // 50: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest - 51, // 51: vtctlservice.Vtctld.LaunchSchemaMigration:input_type -> vtctldata.LaunchSchemaMigrationRequest - 52, // 52: vtctlservice.Vtctld.LookupVindexCreate:input_type -> vtctldata.LookupVindexCreateRequest - 53, // 53: vtctlservice.Vtctld.LookupVindexExternalize:input_type -> vtctldata.LookupVindexExternalizeRequest - 54, // 54: vtctlservice.Vtctld.MaterializeCreate:input_type -> vtctldata.MaterializeCreateRequest - 55, // 55: vtctlservice.Vtctld.MigrateCreate:input_type -> vtctldata.MigrateCreateRequest - 56, // 56: vtctlservice.Vtctld.MountRegister:input_type -> vtctldata.MountRegisterRequest - 57, // 57: vtctlservice.Vtctld.MountUnregister:input_type -> vtctldata.MountUnregisterRequest - 58, // 58: vtctlservice.Vtctld.MountShow:input_type -> vtctldata.MountShowRequest - 59, // 59: vtctlservice.Vtctld.MountList:input_type -> vtctldata.MountListRequest - 60, // 60: vtctlservice.Vtctld.MoveTablesCreate:input_type -> vtctldata.MoveTablesCreateRequest - 61, // 61: vtctlservice.Vtctld.MoveTablesComplete:input_type -> vtctldata.MoveTablesCompleteRequest - 62, // 62: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest - 63, // 63: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest - 64, // 64: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest - 65, // 65: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest - 66, // 66: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest - 67, // 67: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest - 68, // 68: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest - 69, // 69: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest - 70, // 70: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest - 71, // 71: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest - 72, // 72: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest - 73, // 73: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest - 74, // 74: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest - 75, // 75: vtctlservice.Vtctld.ReshardCreate:input_type -> vtctldata.ReshardCreateRequest - 76, // 76: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest - 77, // 77: vtctlservice.Vtctld.RetrySchemaMigration:input_type -> vtctldata.RetrySchemaMigrationRequest - 78, // 78: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest - 79, // 79: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest - 80, // 80: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest - 81, // 81: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest - 82, // 82: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest - 83, // 83: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest - 84, // 84: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest - 85, // 85: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest - 86, // 86: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest - 87, // 87: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest - 88, // 88: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest - 89, // 89: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest - 90, // 90: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest - 91, // 91: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest - 92, // 92: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest - 93, // 93: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest - 94, // 94: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest - 95, // 95: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest - 96, // 96: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest - 97, // 97: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest - 98, // 98: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest - 99, // 99: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest - 100, // 100: vtctlservice.Vtctld.ValidateVersionShard:input_type -> vtctldata.ValidateVersionShardRequest - 101, // 101: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest - 102, // 102: vtctlservice.Vtctld.VDiffCreate:input_type -> vtctldata.VDiffCreateRequest - 103, // 103: vtctlservice.Vtctld.VDiffDelete:input_type -> vtctldata.VDiffDeleteRequest - 104, // 104: vtctlservice.Vtctld.VDiffResume:input_type -> vtctldata.VDiffResumeRequest - 105, // 105: vtctlservice.Vtctld.VDiffShow:input_type -> vtctldata.VDiffShowRequest - 106, // 106: vtctlservice.Vtctld.VDiffStop:input_type -> vtctldata.VDiffStopRequest - 107, // 107: vtctlservice.Vtctld.WorkflowDelete:input_type -> vtctldata.WorkflowDeleteRequest - 108, // 108: vtctlservice.Vtctld.WorkflowStatus:input_type -> vtctldata.WorkflowStatusRequest - 109, // 109: vtctlservice.Vtctld.WorkflowSwitchTraffic:input_type -> vtctldata.WorkflowSwitchTrafficRequest - 110, // 110: vtctlservice.Vtctld.WorkflowUpdate:input_type -> vtctldata.WorkflowUpdateRequest - 111, // 111: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse - 112, // 112: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse - 113, // 113: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse - 114, // 114: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse - 115, // 115: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse - 116, // 116: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse - 117, // 117: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse - 118, // 118: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse - 118, // 119: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse - 119, // 120: vtctlservice.Vtctld.CancelSchemaMigration:output_type -> vtctldata.CancelSchemaMigrationResponse - 120, // 121: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse - 121, // 122: vtctlservice.Vtctld.CleanupSchemaMigration:output_type -> vtctldata.CleanupSchemaMigrationResponse - 122, // 123: vtctlservice.Vtctld.CompleteSchemaMigration:output_type -> vtctldata.CompleteSchemaMigrationResponse - 123, // 124: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse - 124, // 125: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse - 125, // 126: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse - 126, // 127: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse - 127, // 128: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse - 128, // 129: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse - 129, // 130: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse - 130, // 131: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse - 131, // 132: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse - 132, // 133: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse - 133, // 134: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse - 134, // 135: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse - 135, // 136: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse - 136, // 137: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse - 137, // 138: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse - 138, // 139: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse - 139, // 140: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse - 140, // 141: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse - 141, // 142: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse - 142, // 143: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse - 143, // 144: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse - 144, // 145: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse - 145, // 146: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse - 146, // 147: vtctlservice.Vtctld.GetSchemaMigrations:output_type -> vtctldata.GetSchemaMigrationsResponse - 147, // 148: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse - 148, // 149: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse - 149, // 150: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse - 150, // 151: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse - 151, // 152: vtctlservice.Vtctld.UpdateThrottlerConfig:output_type -> vtctldata.UpdateThrottlerConfigResponse - 152, // 153: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse - 153, // 154: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse - 154, // 155: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse - 155, // 156: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse - 156, // 157: vtctlservice.Vtctld.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse - 157, // 158: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse - 158, // 159: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse - 159, // 160: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse - 160, // 161: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse - 161, // 162: vtctlservice.Vtctld.LaunchSchemaMigration:output_type -> vtctldata.LaunchSchemaMigrationResponse - 162, // 163: vtctlservice.Vtctld.LookupVindexCreate:output_type -> vtctldata.LookupVindexCreateResponse - 163, // 164: vtctlservice.Vtctld.LookupVindexExternalize:output_type -> vtctldata.LookupVindexExternalizeResponse - 164, // 165: vtctlservice.Vtctld.MaterializeCreate:output_type -> vtctldata.MaterializeCreateResponse - 165, // 166: vtctlservice.Vtctld.MigrateCreate:output_type -> vtctldata.WorkflowStatusResponse - 166, // 167: vtctlservice.Vtctld.MountRegister:output_type -> vtctldata.MountRegisterResponse - 167, // 168: vtctlservice.Vtctld.MountUnregister:output_type -> vtctldata.MountUnregisterResponse - 168, // 169: vtctlservice.Vtctld.MountShow:output_type -> vtctldata.MountShowResponse - 169, // 170: vtctlservice.Vtctld.MountList:output_type -> vtctldata.MountListResponse - 165, // 171: vtctlservice.Vtctld.MoveTablesCreate:output_type -> vtctldata.WorkflowStatusResponse - 170, // 172: vtctlservice.Vtctld.MoveTablesComplete:output_type -> vtctldata.MoveTablesCompleteResponse - 171, // 173: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse - 172, // 174: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse - 173, // 175: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse - 174, // 176: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse - 175, // 177: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse - 176, // 178: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse - 177, // 179: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse - 178, // 180: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse - 179, // 181: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse - 180, // 182: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse - 181, // 183: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse - 182, // 184: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse - 183, // 185: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse - 165, // 186: vtctlservice.Vtctld.ReshardCreate:output_type -> vtctldata.WorkflowStatusResponse - 184, // 187: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse - 185, // 188: vtctlservice.Vtctld.RetrySchemaMigration:output_type -> vtctldata.RetrySchemaMigrationResponse - 186, // 189: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse - 187, // 190: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse - 188, // 191: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse - 189, // 192: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse - 190, // 193: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse - 191, // 194: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse - 192, // 195: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse - 193, // 196: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse - 194, // 197: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse - 195, // 198: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse - 196, // 199: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse - 197, // 200: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse - 198, // 201: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse - 199, // 202: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse - 200, // 203: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse - 201, // 204: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse - 202, // 205: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse - 203, // 206: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse - 204, // 207: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse - 205, // 208: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse - 206, // 209: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse - 207, // 210: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse - 208, // 211: vtctlservice.Vtctld.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse - 209, // 212: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse - 210, // 213: vtctlservice.Vtctld.VDiffCreate:output_type -> vtctldata.VDiffCreateResponse - 211, // 214: vtctlservice.Vtctld.VDiffDelete:output_type -> vtctldata.VDiffDeleteResponse - 212, // 215: vtctlservice.Vtctld.VDiffResume:output_type -> vtctldata.VDiffResumeResponse - 213, // 216: vtctlservice.Vtctld.VDiffShow:output_type -> vtctldata.VDiffShowResponse - 214, // 217: vtctlservice.Vtctld.VDiffStop:output_type -> vtctldata.VDiffStopResponse - 215, // 218: vtctlservice.Vtctld.WorkflowDelete:output_type -> vtctldata.WorkflowDeleteResponse - 165, // 219: vtctlservice.Vtctld.WorkflowStatus:output_type -> vtctldata.WorkflowStatusResponse - 216, // 220: vtctlservice.Vtctld.WorkflowSwitchTraffic:output_type -> vtctldata.WorkflowSwitchTrafficResponse - 217, // 221: vtctlservice.Vtctld.WorkflowUpdate:output_type -> vtctldata.WorkflowUpdateResponse - 111, // [111:222] is the sub-list for method output_type - 0, // [0:111] is the sub-list for method input_type + 5, // 5: vtctlservice.Vtctld.ApplyKeyspaceRoutingRules:input_type -> vtctldata.ApplyKeyspaceRoutingRulesRequest + 6, // 6: vtctlservice.Vtctld.ApplyShardRoutingRules:input_type -> vtctldata.ApplyShardRoutingRulesRequest + 7, // 7: vtctlservice.Vtctld.ApplyVSchema:input_type -> vtctldata.ApplyVSchemaRequest + 8, // 8: vtctlservice.Vtctld.Backup:input_type -> vtctldata.BackupRequest + 9, // 9: vtctlservice.Vtctld.BackupShard:input_type -> vtctldata.BackupShardRequest + 10, // 10: vtctlservice.Vtctld.CancelSchemaMigration:input_type -> vtctldata.CancelSchemaMigrationRequest + 11, // 11: vtctlservice.Vtctld.ChangeTabletType:input_type -> vtctldata.ChangeTabletTypeRequest + 12, // 12: vtctlservice.Vtctld.CleanupSchemaMigration:input_type -> vtctldata.CleanupSchemaMigrationRequest + 13, // 13: vtctlservice.Vtctld.CompleteSchemaMigration:input_type -> vtctldata.CompleteSchemaMigrationRequest + 14, // 14: vtctlservice.Vtctld.CreateKeyspace:input_type -> vtctldata.CreateKeyspaceRequest + 15, // 15: vtctlservice.Vtctld.CreateShard:input_type -> vtctldata.CreateShardRequest + 16, // 16: vtctlservice.Vtctld.DeleteCellInfo:input_type -> vtctldata.DeleteCellInfoRequest + 17, // 17: vtctlservice.Vtctld.DeleteCellsAlias:input_type -> vtctldata.DeleteCellsAliasRequest + 18, // 18: vtctlservice.Vtctld.DeleteKeyspace:input_type -> vtctldata.DeleteKeyspaceRequest + 19, // 19: vtctlservice.Vtctld.DeleteShards:input_type -> vtctldata.DeleteShardsRequest + 20, // 20: vtctlservice.Vtctld.DeleteSrvVSchema:input_type -> vtctldata.DeleteSrvVSchemaRequest + 21, // 21: vtctlservice.Vtctld.DeleteTablets:input_type -> vtctldata.DeleteTabletsRequest + 22, // 22: vtctlservice.Vtctld.EmergencyReparentShard:input_type -> vtctldata.EmergencyReparentShardRequest + 23, // 23: vtctlservice.Vtctld.ExecuteFetchAsApp:input_type -> vtctldata.ExecuteFetchAsAppRequest + 24, // 24: vtctlservice.Vtctld.ExecuteFetchAsDBA:input_type -> vtctldata.ExecuteFetchAsDBARequest + 25, // 25: vtctlservice.Vtctld.ExecuteHook:input_type -> vtctldata.ExecuteHookRequest + 26, // 26: vtctlservice.Vtctld.ExecuteMultiFetchAsDBA:input_type -> vtctldata.ExecuteMultiFetchAsDBARequest + 27, // 27: vtctlservice.Vtctld.FindAllShardsInKeyspace:input_type -> vtctldata.FindAllShardsInKeyspaceRequest + 28, // 28: vtctlservice.Vtctld.ForceCutOverSchemaMigration:input_type -> vtctldata.ForceCutOverSchemaMigrationRequest + 29, // 29: vtctlservice.Vtctld.GetBackups:input_type -> vtctldata.GetBackupsRequest + 30, // 30: vtctlservice.Vtctld.GetCellInfo:input_type -> vtctldata.GetCellInfoRequest + 31, // 31: vtctlservice.Vtctld.GetCellInfoNames:input_type -> vtctldata.GetCellInfoNamesRequest + 32, // 32: vtctlservice.Vtctld.GetCellsAliases:input_type -> vtctldata.GetCellsAliasesRequest + 33, // 33: vtctlservice.Vtctld.GetFullStatus:input_type -> vtctldata.GetFullStatusRequest + 34, // 34: vtctlservice.Vtctld.GetKeyspace:input_type -> vtctldata.GetKeyspaceRequest + 35, // 35: vtctlservice.Vtctld.GetKeyspaces:input_type -> vtctldata.GetKeyspacesRequest + 36, // 36: vtctlservice.Vtctld.GetKeyspaceRoutingRules:input_type -> vtctldata.GetKeyspaceRoutingRulesRequest + 37, // 37: vtctlservice.Vtctld.GetPermissions:input_type -> vtctldata.GetPermissionsRequest + 38, // 38: vtctlservice.Vtctld.GetRoutingRules:input_type -> vtctldata.GetRoutingRulesRequest + 39, // 39: vtctlservice.Vtctld.GetSchema:input_type -> vtctldata.GetSchemaRequest + 40, // 40: vtctlservice.Vtctld.GetSchemaMigrations:input_type -> vtctldata.GetSchemaMigrationsRequest + 41, // 41: vtctlservice.Vtctld.GetShardReplication:input_type -> vtctldata.GetShardReplicationRequest + 42, // 42: vtctlservice.Vtctld.GetShard:input_type -> vtctldata.GetShardRequest + 43, // 43: vtctlservice.Vtctld.GetShardRoutingRules:input_type -> vtctldata.GetShardRoutingRulesRequest + 44, // 44: vtctlservice.Vtctld.GetSrvKeyspaceNames:input_type -> vtctldata.GetSrvKeyspaceNamesRequest + 45, // 45: vtctlservice.Vtctld.GetSrvKeyspaces:input_type -> vtctldata.GetSrvKeyspacesRequest + 46, // 46: vtctlservice.Vtctld.UpdateThrottlerConfig:input_type -> vtctldata.UpdateThrottlerConfigRequest + 47, // 47: vtctlservice.Vtctld.GetSrvVSchema:input_type -> vtctldata.GetSrvVSchemaRequest + 48, // 48: vtctlservice.Vtctld.GetSrvVSchemas:input_type -> vtctldata.GetSrvVSchemasRequest + 49, // 49: vtctlservice.Vtctld.GetTablet:input_type -> vtctldata.GetTabletRequest + 50, // 50: vtctlservice.Vtctld.GetTablets:input_type -> vtctldata.GetTabletsRequest + 51, // 51: vtctlservice.Vtctld.GetTopologyPath:input_type -> vtctldata.GetTopologyPathRequest + 52, // 52: vtctlservice.Vtctld.GetVersion:input_type -> vtctldata.GetVersionRequest + 53, // 53: vtctlservice.Vtctld.GetVSchema:input_type -> vtctldata.GetVSchemaRequest + 54, // 54: vtctlservice.Vtctld.GetWorkflows:input_type -> vtctldata.GetWorkflowsRequest + 55, // 55: vtctlservice.Vtctld.InitShardPrimary:input_type -> vtctldata.InitShardPrimaryRequest + 56, // 56: vtctlservice.Vtctld.LaunchSchemaMigration:input_type -> vtctldata.LaunchSchemaMigrationRequest + 57, // 57: vtctlservice.Vtctld.LookupVindexCreate:input_type -> vtctldata.LookupVindexCreateRequest + 58, // 58: vtctlservice.Vtctld.LookupVindexExternalize:input_type -> vtctldata.LookupVindexExternalizeRequest + 59, // 59: vtctlservice.Vtctld.MaterializeCreate:input_type -> vtctldata.MaterializeCreateRequest + 60, // 60: vtctlservice.Vtctld.MigrateCreate:input_type -> vtctldata.MigrateCreateRequest + 61, // 61: vtctlservice.Vtctld.MountRegister:input_type -> vtctldata.MountRegisterRequest + 62, // 62: vtctlservice.Vtctld.MountUnregister:input_type -> vtctldata.MountUnregisterRequest + 63, // 63: vtctlservice.Vtctld.MountShow:input_type -> vtctldata.MountShowRequest + 64, // 64: vtctlservice.Vtctld.MountList:input_type -> vtctldata.MountListRequest + 65, // 65: vtctlservice.Vtctld.MoveTablesCreate:input_type -> vtctldata.MoveTablesCreateRequest + 66, // 66: vtctlservice.Vtctld.MoveTablesComplete:input_type -> vtctldata.MoveTablesCompleteRequest + 67, // 67: vtctlservice.Vtctld.PingTablet:input_type -> vtctldata.PingTabletRequest + 68, // 68: vtctlservice.Vtctld.PlannedReparentShard:input_type -> vtctldata.PlannedReparentShardRequest + 69, // 69: vtctlservice.Vtctld.RebuildKeyspaceGraph:input_type -> vtctldata.RebuildKeyspaceGraphRequest + 70, // 70: vtctlservice.Vtctld.RebuildVSchemaGraph:input_type -> vtctldata.RebuildVSchemaGraphRequest + 71, // 71: vtctlservice.Vtctld.RefreshState:input_type -> vtctldata.RefreshStateRequest + 72, // 72: vtctlservice.Vtctld.RefreshStateByShard:input_type -> vtctldata.RefreshStateByShardRequest + 73, // 73: vtctlservice.Vtctld.ReloadSchema:input_type -> vtctldata.ReloadSchemaRequest + 74, // 74: vtctlservice.Vtctld.ReloadSchemaKeyspace:input_type -> vtctldata.ReloadSchemaKeyspaceRequest + 75, // 75: vtctlservice.Vtctld.ReloadSchemaShard:input_type -> vtctldata.ReloadSchemaShardRequest + 76, // 76: vtctlservice.Vtctld.RemoveBackup:input_type -> vtctldata.RemoveBackupRequest + 77, // 77: vtctlservice.Vtctld.RemoveKeyspaceCell:input_type -> vtctldata.RemoveKeyspaceCellRequest + 78, // 78: vtctlservice.Vtctld.RemoveShardCell:input_type -> vtctldata.RemoveShardCellRequest + 79, // 79: vtctlservice.Vtctld.ReparentTablet:input_type -> vtctldata.ReparentTabletRequest + 80, // 80: vtctlservice.Vtctld.ReshardCreate:input_type -> vtctldata.ReshardCreateRequest + 81, // 81: vtctlservice.Vtctld.RestoreFromBackup:input_type -> vtctldata.RestoreFromBackupRequest + 82, // 82: vtctlservice.Vtctld.RetrySchemaMigration:input_type -> vtctldata.RetrySchemaMigrationRequest + 83, // 83: vtctlservice.Vtctld.RunHealthCheck:input_type -> vtctldata.RunHealthCheckRequest + 84, // 84: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:input_type -> vtctldata.SetKeyspaceDurabilityPolicyRequest + 85, // 85: vtctlservice.Vtctld.SetShardIsPrimaryServing:input_type -> vtctldata.SetShardIsPrimaryServingRequest + 86, // 86: vtctlservice.Vtctld.SetShardTabletControl:input_type -> vtctldata.SetShardTabletControlRequest + 87, // 87: vtctlservice.Vtctld.SetWritable:input_type -> vtctldata.SetWritableRequest + 88, // 88: vtctlservice.Vtctld.ShardReplicationAdd:input_type -> vtctldata.ShardReplicationAddRequest + 89, // 89: vtctlservice.Vtctld.ShardReplicationFix:input_type -> vtctldata.ShardReplicationFixRequest + 90, // 90: vtctlservice.Vtctld.ShardReplicationPositions:input_type -> vtctldata.ShardReplicationPositionsRequest + 91, // 91: vtctlservice.Vtctld.ShardReplicationRemove:input_type -> vtctldata.ShardReplicationRemoveRequest + 92, // 92: vtctlservice.Vtctld.SleepTablet:input_type -> vtctldata.SleepTabletRequest + 93, // 93: vtctlservice.Vtctld.SourceShardAdd:input_type -> vtctldata.SourceShardAddRequest + 94, // 94: vtctlservice.Vtctld.SourceShardDelete:input_type -> vtctldata.SourceShardDeleteRequest + 95, // 95: vtctlservice.Vtctld.StartReplication:input_type -> vtctldata.StartReplicationRequest + 96, // 96: vtctlservice.Vtctld.StopReplication:input_type -> vtctldata.StopReplicationRequest + 97, // 97: vtctlservice.Vtctld.TabletExternallyReparented:input_type -> vtctldata.TabletExternallyReparentedRequest + 98, // 98: vtctlservice.Vtctld.UpdateCellInfo:input_type -> vtctldata.UpdateCellInfoRequest + 99, // 99: vtctlservice.Vtctld.UpdateCellsAlias:input_type -> vtctldata.UpdateCellsAliasRequest + 100, // 100: vtctlservice.Vtctld.Validate:input_type -> vtctldata.ValidateRequest + 101, // 101: vtctlservice.Vtctld.ValidateKeyspace:input_type -> vtctldata.ValidateKeyspaceRequest + 102, // 102: vtctlservice.Vtctld.ValidateSchemaKeyspace:input_type -> vtctldata.ValidateSchemaKeyspaceRequest + 103, // 103: vtctlservice.Vtctld.ValidateShard:input_type -> vtctldata.ValidateShardRequest + 104, // 104: vtctlservice.Vtctld.ValidateVersionKeyspace:input_type -> vtctldata.ValidateVersionKeyspaceRequest + 105, // 105: vtctlservice.Vtctld.ValidateVersionShard:input_type -> vtctldata.ValidateVersionShardRequest + 106, // 106: vtctlservice.Vtctld.ValidateVSchema:input_type -> vtctldata.ValidateVSchemaRequest + 107, // 107: vtctlservice.Vtctld.VDiffCreate:input_type -> vtctldata.VDiffCreateRequest + 108, // 108: vtctlservice.Vtctld.VDiffDelete:input_type -> vtctldata.VDiffDeleteRequest + 109, // 109: vtctlservice.Vtctld.VDiffResume:input_type -> vtctldata.VDiffResumeRequest + 110, // 110: vtctlservice.Vtctld.VDiffShow:input_type -> vtctldata.VDiffShowRequest + 111, // 111: vtctlservice.Vtctld.VDiffStop:input_type -> vtctldata.VDiffStopRequest + 112, // 112: vtctlservice.Vtctld.WorkflowDelete:input_type -> vtctldata.WorkflowDeleteRequest + 113, // 113: vtctlservice.Vtctld.WorkflowStatus:input_type -> vtctldata.WorkflowStatusRequest + 114, // 114: vtctlservice.Vtctld.WorkflowSwitchTraffic:input_type -> vtctldata.WorkflowSwitchTrafficRequest + 115, // 115: vtctlservice.Vtctld.WorkflowUpdate:input_type -> vtctldata.WorkflowUpdateRequest + 116, // 116: vtctlservice.Vtctl.ExecuteVtctlCommand:output_type -> vtctldata.ExecuteVtctlCommandResponse + 117, // 117: vtctlservice.Vtctld.AddCellInfo:output_type -> vtctldata.AddCellInfoResponse + 118, // 118: vtctlservice.Vtctld.AddCellsAlias:output_type -> vtctldata.AddCellsAliasResponse + 119, // 119: vtctlservice.Vtctld.ApplyRoutingRules:output_type -> vtctldata.ApplyRoutingRulesResponse + 120, // 120: vtctlservice.Vtctld.ApplySchema:output_type -> vtctldata.ApplySchemaResponse + 121, // 121: vtctlservice.Vtctld.ApplyKeyspaceRoutingRules:output_type -> vtctldata.ApplyKeyspaceRoutingRulesResponse + 122, // 122: vtctlservice.Vtctld.ApplyShardRoutingRules:output_type -> vtctldata.ApplyShardRoutingRulesResponse + 123, // 123: vtctlservice.Vtctld.ApplyVSchema:output_type -> vtctldata.ApplyVSchemaResponse + 124, // 124: vtctlservice.Vtctld.Backup:output_type -> vtctldata.BackupResponse + 124, // 125: vtctlservice.Vtctld.BackupShard:output_type -> vtctldata.BackupResponse + 125, // 126: vtctlservice.Vtctld.CancelSchemaMigration:output_type -> vtctldata.CancelSchemaMigrationResponse + 126, // 127: vtctlservice.Vtctld.ChangeTabletType:output_type -> vtctldata.ChangeTabletTypeResponse + 127, // 128: vtctlservice.Vtctld.CleanupSchemaMigration:output_type -> vtctldata.CleanupSchemaMigrationResponse + 128, // 129: vtctlservice.Vtctld.CompleteSchemaMigration:output_type -> vtctldata.CompleteSchemaMigrationResponse + 129, // 130: vtctlservice.Vtctld.CreateKeyspace:output_type -> vtctldata.CreateKeyspaceResponse + 130, // 131: vtctlservice.Vtctld.CreateShard:output_type -> vtctldata.CreateShardResponse + 131, // 132: vtctlservice.Vtctld.DeleteCellInfo:output_type -> vtctldata.DeleteCellInfoResponse + 132, // 133: vtctlservice.Vtctld.DeleteCellsAlias:output_type -> vtctldata.DeleteCellsAliasResponse + 133, // 134: vtctlservice.Vtctld.DeleteKeyspace:output_type -> vtctldata.DeleteKeyspaceResponse + 134, // 135: vtctlservice.Vtctld.DeleteShards:output_type -> vtctldata.DeleteShardsResponse + 135, // 136: vtctlservice.Vtctld.DeleteSrvVSchema:output_type -> vtctldata.DeleteSrvVSchemaResponse + 136, // 137: vtctlservice.Vtctld.DeleteTablets:output_type -> vtctldata.DeleteTabletsResponse + 137, // 138: vtctlservice.Vtctld.EmergencyReparentShard:output_type -> vtctldata.EmergencyReparentShardResponse + 138, // 139: vtctlservice.Vtctld.ExecuteFetchAsApp:output_type -> vtctldata.ExecuteFetchAsAppResponse + 139, // 140: vtctlservice.Vtctld.ExecuteFetchAsDBA:output_type -> vtctldata.ExecuteFetchAsDBAResponse + 140, // 141: vtctlservice.Vtctld.ExecuteHook:output_type -> vtctldata.ExecuteHookResponse + 141, // 142: vtctlservice.Vtctld.ExecuteMultiFetchAsDBA:output_type -> vtctldata.ExecuteMultiFetchAsDBAResponse + 142, // 143: vtctlservice.Vtctld.FindAllShardsInKeyspace:output_type -> vtctldata.FindAllShardsInKeyspaceResponse + 143, // 144: vtctlservice.Vtctld.ForceCutOverSchemaMigration:output_type -> vtctldata.ForceCutOverSchemaMigrationResponse + 144, // 145: vtctlservice.Vtctld.GetBackups:output_type -> vtctldata.GetBackupsResponse + 145, // 146: vtctlservice.Vtctld.GetCellInfo:output_type -> vtctldata.GetCellInfoResponse + 146, // 147: vtctlservice.Vtctld.GetCellInfoNames:output_type -> vtctldata.GetCellInfoNamesResponse + 147, // 148: vtctlservice.Vtctld.GetCellsAliases:output_type -> vtctldata.GetCellsAliasesResponse + 148, // 149: vtctlservice.Vtctld.GetFullStatus:output_type -> vtctldata.GetFullStatusResponse + 149, // 150: vtctlservice.Vtctld.GetKeyspace:output_type -> vtctldata.GetKeyspaceResponse + 150, // 151: vtctlservice.Vtctld.GetKeyspaces:output_type -> vtctldata.GetKeyspacesResponse + 151, // 152: vtctlservice.Vtctld.GetKeyspaceRoutingRules:output_type -> vtctldata.GetKeyspaceRoutingRulesResponse + 152, // 153: vtctlservice.Vtctld.GetPermissions:output_type -> vtctldata.GetPermissionsResponse + 153, // 154: vtctlservice.Vtctld.GetRoutingRules:output_type -> vtctldata.GetRoutingRulesResponse + 154, // 155: vtctlservice.Vtctld.GetSchema:output_type -> vtctldata.GetSchemaResponse + 155, // 156: vtctlservice.Vtctld.GetSchemaMigrations:output_type -> vtctldata.GetSchemaMigrationsResponse + 156, // 157: vtctlservice.Vtctld.GetShardReplication:output_type -> vtctldata.GetShardReplicationResponse + 157, // 158: vtctlservice.Vtctld.GetShard:output_type -> vtctldata.GetShardResponse + 158, // 159: vtctlservice.Vtctld.GetShardRoutingRules:output_type -> vtctldata.GetShardRoutingRulesResponse + 159, // 160: vtctlservice.Vtctld.GetSrvKeyspaceNames:output_type -> vtctldata.GetSrvKeyspaceNamesResponse + 160, // 161: vtctlservice.Vtctld.GetSrvKeyspaces:output_type -> vtctldata.GetSrvKeyspacesResponse + 161, // 162: vtctlservice.Vtctld.UpdateThrottlerConfig:output_type -> vtctldata.UpdateThrottlerConfigResponse + 162, // 163: vtctlservice.Vtctld.GetSrvVSchema:output_type -> vtctldata.GetSrvVSchemaResponse + 163, // 164: vtctlservice.Vtctld.GetSrvVSchemas:output_type -> vtctldata.GetSrvVSchemasResponse + 164, // 165: vtctlservice.Vtctld.GetTablet:output_type -> vtctldata.GetTabletResponse + 165, // 166: vtctlservice.Vtctld.GetTablets:output_type -> vtctldata.GetTabletsResponse + 166, // 167: vtctlservice.Vtctld.GetTopologyPath:output_type -> vtctldata.GetTopologyPathResponse + 167, // 168: vtctlservice.Vtctld.GetVersion:output_type -> vtctldata.GetVersionResponse + 168, // 169: vtctlservice.Vtctld.GetVSchema:output_type -> vtctldata.GetVSchemaResponse + 169, // 170: vtctlservice.Vtctld.GetWorkflows:output_type -> vtctldata.GetWorkflowsResponse + 170, // 171: vtctlservice.Vtctld.InitShardPrimary:output_type -> vtctldata.InitShardPrimaryResponse + 171, // 172: vtctlservice.Vtctld.LaunchSchemaMigration:output_type -> vtctldata.LaunchSchemaMigrationResponse + 172, // 173: vtctlservice.Vtctld.LookupVindexCreate:output_type -> vtctldata.LookupVindexCreateResponse + 173, // 174: vtctlservice.Vtctld.LookupVindexExternalize:output_type -> vtctldata.LookupVindexExternalizeResponse + 174, // 175: vtctlservice.Vtctld.MaterializeCreate:output_type -> vtctldata.MaterializeCreateResponse + 175, // 176: vtctlservice.Vtctld.MigrateCreate:output_type -> vtctldata.WorkflowStatusResponse + 176, // 177: vtctlservice.Vtctld.MountRegister:output_type -> vtctldata.MountRegisterResponse + 177, // 178: vtctlservice.Vtctld.MountUnregister:output_type -> vtctldata.MountUnregisterResponse + 178, // 179: vtctlservice.Vtctld.MountShow:output_type -> vtctldata.MountShowResponse + 179, // 180: vtctlservice.Vtctld.MountList:output_type -> vtctldata.MountListResponse + 175, // 181: vtctlservice.Vtctld.MoveTablesCreate:output_type -> vtctldata.WorkflowStatusResponse + 180, // 182: vtctlservice.Vtctld.MoveTablesComplete:output_type -> vtctldata.MoveTablesCompleteResponse + 181, // 183: vtctlservice.Vtctld.PingTablet:output_type -> vtctldata.PingTabletResponse + 182, // 184: vtctlservice.Vtctld.PlannedReparentShard:output_type -> vtctldata.PlannedReparentShardResponse + 183, // 185: vtctlservice.Vtctld.RebuildKeyspaceGraph:output_type -> vtctldata.RebuildKeyspaceGraphResponse + 184, // 186: vtctlservice.Vtctld.RebuildVSchemaGraph:output_type -> vtctldata.RebuildVSchemaGraphResponse + 185, // 187: vtctlservice.Vtctld.RefreshState:output_type -> vtctldata.RefreshStateResponse + 186, // 188: vtctlservice.Vtctld.RefreshStateByShard:output_type -> vtctldata.RefreshStateByShardResponse + 187, // 189: vtctlservice.Vtctld.ReloadSchema:output_type -> vtctldata.ReloadSchemaResponse + 188, // 190: vtctlservice.Vtctld.ReloadSchemaKeyspace:output_type -> vtctldata.ReloadSchemaKeyspaceResponse + 189, // 191: vtctlservice.Vtctld.ReloadSchemaShard:output_type -> vtctldata.ReloadSchemaShardResponse + 190, // 192: vtctlservice.Vtctld.RemoveBackup:output_type -> vtctldata.RemoveBackupResponse + 191, // 193: vtctlservice.Vtctld.RemoveKeyspaceCell:output_type -> vtctldata.RemoveKeyspaceCellResponse + 192, // 194: vtctlservice.Vtctld.RemoveShardCell:output_type -> vtctldata.RemoveShardCellResponse + 193, // 195: vtctlservice.Vtctld.ReparentTablet:output_type -> vtctldata.ReparentTabletResponse + 175, // 196: vtctlservice.Vtctld.ReshardCreate:output_type -> vtctldata.WorkflowStatusResponse + 194, // 197: vtctlservice.Vtctld.RestoreFromBackup:output_type -> vtctldata.RestoreFromBackupResponse + 195, // 198: vtctlservice.Vtctld.RetrySchemaMigration:output_type -> vtctldata.RetrySchemaMigrationResponse + 196, // 199: vtctlservice.Vtctld.RunHealthCheck:output_type -> vtctldata.RunHealthCheckResponse + 197, // 200: vtctlservice.Vtctld.SetKeyspaceDurabilityPolicy:output_type -> vtctldata.SetKeyspaceDurabilityPolicyResponse + 198, // 201: vtctlservice.Vtctld.SetShardIsPrimaryServing:output_type -> vtctldata.SetShardIsPrimaryServingResponse + 199, // 202: vtctlservice.Vtctld.SetShardTabletControl:output_type -> vtctldata.SetShardTabletControlResponse + 200, // 203: vtctlservice.Vtctld.SetWritable:output_type -> vtctldata.SetWritableResponse + 201, // 204: vtctlservice.Vtctld.ShardReplicationAdd:output_type -> vtctldata.ShardReplicationAddResponse + 202, // 205: vtctlservice.Vtctld.ShardReplicationFix:output_type -> vtctldata.ShardReplicationFixResponse + 203, // 206: vtctlservice.Vtctld.ShardReplicationPositions:output_type -> vtctldata.ShardReplicationPositionsResponse + 204, // 207: vtctlservice.Vtctld.ShardReplicationRemove:output_type -> vtctldata.ShardReplicationRemoveResponse + 205, // 208: vtctlservice.Vtctld.SleepTablet:output_type -> vtctldata.SleepTabletResponse + 206, // 209: vtctlservice.Vtctld.SourceShardAdd:output_type -> vtctldata.SourceShardAddResponse + 207, // 210: vtctlservice.Vtctld.SourceShardDelete:output_type -> vtctldata.SourceShardDeleteResponse + 208, // 211: vtctlservice.Vtctld.StartReplication:output_type -> vtctldata.StartReplicationResponse + 209, // 212: vtctlservice.Vtctld.StopReplication:output_type -> vtctldata.StopReplicationResponse + 210, // 213: vtctlservice.Vtctld.TabletExternallyReparented:output_type -> vtctldata.TabletExternallyReparentedResponse + 211, // 214: vtctlservice.Vtctld.UpdateCellInfo:output_type -> vtctldata.UpdateCellInfoResponse + 212, // 215: vtctlservice.Vtctld.UpdateCellsAlias:output_type -> vtctldata.UpdateCellsAliasResponse + 213, // 216: vtctlservice.Vtctld.Validate:output_type -> vtctldata.ValidateResponse + 214, // 217: vtctlservice.Vtctld.ValidateKeyspace:output_type -> vtctldata.ValidateKeyspaceResponse + 215, // 218: vtctlservice.Vtctld.ValidateSchemaKeyspace:output_type -> vtctldata.ValidateSchemaKeyspaceResponse + 216, // 219: vtctlservice.Vtctld.ValidateShard:output_type -> vtctldata.ValidateShardResponse + 217, // 220: vtctlservice.Vtctld.ValidateVersionKeyspace:output_type -> vtctldata.ValidateVersionKeyspaceResponse + 218, // 221: vtctlservice.Vtctld.ValidateVersionShard:output_type -> vtctldata.ValidateVersionShardResponse + 219, // 222: vtctlservice.Vtctld.ValidateVSchema:output_type -> vtctldata.ValidateVSchemaResponse + 220, // 223: vtctlservice.Vtctld.VDiffCreate:output_type -> vtctldata.VDiffCreateResponse + 221, // 224: vtctlservice.Vtctld.VDiffDelete:output_type -> vtctldata.VDiffDeleteResponse + 222, // 225: vtctlservice.Vtctld.VDiffResume:output_type -> vtctldata.VDiffResumeResponse + 223, // 226: vtctlservice.Vtctld.VDiffShow:output_type -> vtctldata.VDiffShowResponse + 224, // 227: vtctlservice.Vtctld.VDiffStop:output_type -> vtctldata.VDiffStopResponse + 225, // 228: vtctlservice.Vtctld.WorkflowDelete:output_type -> vtctldata.WorkflowDeleteResponse + 175, // 229: vtctlservice.Vtctld.WorkflowStatus:output_type -> vtctldata.WorkflowStatusResponse + 226, // 230: vtctlservice.Vtctld.WorkflowSwitchTraffic:output_type -> vtctldata.WorkflowSwitchTrafficResponse + 227, // 231: vtctlservice.Vtctld.WorkflowUpdate:output_type -> vtctldata.WorkflowUpdateResponse + 116, // [116:232] is the sub-list for method output_type + 0, // [0:116] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go index f0a73530047..37448e9d850 100644 --- a/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go +++ b/go/vt/proto/vtctlservice/vtctlservice_grpc.pb.go @@ -150,6 +150,8 @@ type VtctldClient interface { ApplyRoutingRules(ctx context.Context, in *vtctldata.ApplyRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldata.ApplyRoutingRulesResponse, error) // ApplySchema applies a schema to a keyspace. ApplySchema(ctx context.Context, in *vtctldata.ApplySchemaRequest, opts ...grpc.CallOption) (*vtctldata.ApplySchemaResponse, error) + // ApplyKeyspaceRoutingRules applies the VSchema keyspace routing rules. + ApplyKeyspaceRoutingRules(ctx context.Context, in *vtctldata.ApplyKeyspaceRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldata.ApplyKeyspaceRoutingRulesResponse, error) // ApplyShardRoutingRules applies the VSchema shard routing rules. ApplyShardRoutingRules(ctx context.Context, in *vtctldata.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldata.ApplyShardRoutingRulesResponse, error) // ApplyVSchema applies a vschema to a keyspace. @@ -159,7 +161,7 @@ type VtctldClient interface { Backup(ctx context.Context, in *vtctldata.BackupRequest, opts ...grpc.CallOption) (Vtctld_BackupClient, error) // BackupShard chooses a tablet in the shard and uses it to create a backup. BackupShard(ctx context.Context, in *vtctldata.BackupShardRequest, opts ...grpc.CallOption) (Vtctld_BackupShardClient, error) - // CancelSchemaMigration cancels one or all migrations, terminating any runnign ones as needed. + // CancelSchemaMigration cancels one or all migrations, terminating any running ones as needed. CancelSchemaMigration(ctx context.Context, in *vtctldata.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.CancelSchemaMigrationResponse, error) // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a @@ -205,9 +207,13 @@ type VtctldClient interface { ExecuteFetchAsDBA(ctx context.Context, in *vtctldata.ExecuteFetchAsDBARequest, opts ...grpc.CallOption) (*vtctldata.ExecuteFetchAsDBAResponse, error) // ExecuteHook runs the hook on the tablet. ExecuteHook(ctx context.Context, in *vtctldata.ExecuteHookRequest, opts ...grpc.CallOption) (*vtctldata.ExecuteHookResponse, error) + // ExecuteMultiFetchAsDBA executes one or more SQL queries on the remote tablet as the DBA user. + ExecuteMultiFetchAsDBA(ctx context.Context, in *vtctldata.ExecuteMultiFetchAsDBARequest, opts ...grpc.CallOption) (*vtctldata.ExecuteMultiFetchAsDBAResponse, error) // FindAllShardsInKeyspace returns a map of shard names to shard references // for a given keyspace. FindAllShardsInKeyspace(ctx context.Context, in *vtctldata.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.FindAllShardsInKeyspaceResponse, error) + // ForceCutOverSchemaMigration marks a schema migration for forced cut-over. + ForceCutOverSchemaMigration(ctx context.Context, in *vtctldata.ForceCutOverSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.ForceCutOverSchemaMigrationResponse, error) // GetBackups returns all the backups for a shard. GetBackups(ctx context.Context, in *vtctldata.GetBackupsRequest, opts ...grpc.CallOption) (*vtctldata.GetBackupsResponse, error) // GetCellInfo returns the information for a cell. @@ -224,6 +230,8 @@ type VtctldClient interface { GetKeyspace(ctx context.Context, in *vtctldata.GetKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspaceResponse, error) // GetKeyspaces returns the keyspace struct of all keyspaces in the topo. GetKeyspaces(ctx context.Context, in *vtctldata.GetKeyspacesRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspacesResponse, error) + // GetKeyspaceRoutingRules returns the VSchema keyspace routing rules. + GetKeyspaceRoutingRules(ctx context.Context, in *vtctldata.GetKeyspaceRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspaceRoutingRulesResponse, error) // GetPermissions returns the permissions set on the remote tablet. GetPermissions(ctx context.Context, in *vtctldata.GetPermissionsRequest, opts ...grpc.CallOption) (*vtctldata.GetPermissionsResponse, error) // GetRoutingRules returns the VSchema routing rules. @@ -237,6 +245,8 @@ type VtctldClient interface { // Different fields in the request message result in different filtering // behaviors. See the documentation on GetSchemaMigrationsRequest for details. GetSchemaMigrations(ctx context.Context, in *vtctldata.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldata.GetSchemaMigrationsResponse, error) + // GetShardReplication returns the replication graph for a shard in a cell. + GetShardReplication(ctx context.Context, in *vtctldata.GetShardReplicationRequest, opts ...grpc.CallOption) (*vtctldata.GetShardReplicationResponse, error) // GetShard returns information about a shard in the topology. GetShard(ctx context.Context, in *vtctldata.GetShardRequest, opts ...grpc.CallOption) (*vtctldata.GetShardResponse, error) // GetShardRoutingRules returns the VSchema shard routing rules. @@ -496,6 +506,15 @@ func (c *vtctldClient) ApplySchema(ctx context.Context, in *vtctldata.ApplySchem return out, nil } +func (c *vtctldClient) ApplyKeyspaceRoutingRules(ctx context.Context, in *vtctldata.ApplyKeyspaceRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldata.ApplyKeyspaceRoutingRulesResponse, error) { + out := new(vtctldata.ApplyKeyspaceRoutingRulesResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ApplyKeyspaceRoutingRules", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) ApplyShardRoutingRules(ctx context.Context, in *vtctldata.ApplyShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldata.ApplyShardRoutingRulesResponse, error) { out := new(vtctldata.ApplyShardRoutingRulesResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ApplyShardRoutingRules", in, out, opts...) @@ -722,6 +741,15 @@ func (c *vtctldClient) ExecuteHook(ctx context.Context, in *vtctldata.ExecuteHoo return out, nil } +func (c *vtctldClient) ExecuteMultiFetchAsDBA(ctx context.Context, in *vtctldata.ExecuteMultiFetchAsDBARequest, opts ...grpc.CallOption) (*vtctldata.ExecuteMultiFetchAsDBAResponse, error) { + out := new(vtctldata.ExecuteMultiFetchAsDBAResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ExecuteMultiFetchAsDBA", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) FindAllShardsInKeyspace(ctx context.Context, in *vtctldata.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldata.FindAllShardsInKeyspaceResponse, error) { out := new(vtctldata.FindAllShardsInKeyspaceResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/FindAllShardsInKeyspace", in, out, opts...) @@ -731,6 +759,15 @@ func (c *vtctldClient) FindAllShardsInKeyspace(ctx context.Context, in *vtctldat return out, nil } +func (c *vtctldClient) ForceCutOverSchemaMigration(ctx context.Context, in *vtctldata.ForceCutOverSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldata.ForceCutOverSchemaMigrationResponse, error) { + out := new(vtctldata.ForceCutOverSchemaMigrationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/ForceCutOverSchemaMigration", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) GetBackups(ctx context.Context, in *vtctldata.GetBackupsRequest, opts ...grpc.CallOption) (*vtctldata.GetBackupsResponse, error) { out := new(vtctldata.GetBackupsResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetBackups", in, out, opts...) @@ -794,6 +831,15 @@ func (c *vtctldClient) GetKeyspaces(ctx context.Context, in *vtctldata.GetKeyspa return out, nil } +func (c *vtctldClient) GetKeyspaceRoutingRules(ctx context.Context, in *vtctldata.GetKeyspaceRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldata.GetKeyspaceRoutingRulesResponse, error) { + out := new(vtctldata.GetKeyspaceRoutingRulesResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetKeyspaceRoutingRules", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) GetPermissions(ctx context.Context, in *vtctldata.GetPermissionsRequest, opts ...grpc.CallOption) (*vtctldata.GetPermissionsResponse, error) { out := new(vtctldata.GetPermissionsResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetPermissions", in, out, opts...) @@ -830,6 +876,15 @@ func (c *vtctldClient) GetSchemaMigrations(ctx context.Context, in *vtctldata.Ge return out, nil } +func (c *vtctldClient) GetShardReplication(ctx context.Context, in *vtctldata.GetShardReplicationRequest, opts ...grpc.CallOption) (*vtctldata.GetShardReplicationResponse, error) { + out := new(vtctldata.GetShardReplicationResponse) + err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetShardReplication", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vtctldClient) GetShard(ctx context.Context, in *vtctldata.GetShardRequest, opts ...grpc.CallOption) (*vtctldata.GetShardResponse, error) { out := new(vtctldata.GetShardResponse) err := c.cc.Invoke(ctx, "/vtctlservice.Vtctld/GetShard", in, out, opts...) @@ -1537,6 +1592,8 @@ type VtctldServer interface { ApplyRoutingRules(context.Context, *vtctldata.ApplyRoutingRulesRequest) (*vtctldata.ApplyRoutingRulesResponse, error) // ApplySchema applies a schema to a keyspace. ApplySchema(context.Context, *vtctldata.ApplySchemaRequest) (*vtctldata.ApplySchemaResponse, error) + // ApplyKeyspaceRoutingRules applies the VSchema keyspace routing rules. + ApplyKeyspaceRoutingRules(context.Context, *vtctldata.ApplyKeyspaceRoutingRulesRequest) (*vtctldata.ApplyKeyspaceRoutingRulesResponse, error) // ApplyShardRoutingRules applies the VSchema shard routing rules. ApplyShardRoutingRules(context.Context, *vtctldata.ApplyShardRoutingRulesRequest) (*vtctldata.ApplyShardRoutingRulesResponse, error) // ApplyVSchema applies a vschema to a keyspace. @@ -1546,7 +1603,7 @@ type VtctldServer interface { Backup(*vtctldata.BackupRequest, Vtctld_BackupServer) error // BackupShard chooses a tablet in the shard and uses it to create a backup. BackupShard(*vtctldata.BackupShardRequest, Vtctld_BackupShardServer) error - // CancelSchemaMigration cancels one or all migrations, terminating any runnign ones as needed. + // CancelSchemaMigration cancels one or all migrations, terminating any running ones as needed. CancelSchemaMigration(context.Context, *vtctldata.CancelSchemaMigrationRequest) (*vtctldata.CancelSchemaMigrationResponse, error) // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a @@ -1592,9 +1649,13 @@ type VtctldServer interface { ExecuteFetchAsDBA(context.Context, *vtctldata.ExecuteFetchAsDBARequest) (*vtctldata.ExecuteFetchAsDBAResponse, error) // ExecuteHook runs the hook on the tablet. ExecuteHook(context.Context, *vtctldata.ExecuteHookRequest) (*vtctldata.ExecuteHookResponse, error) + // ExecuteMultiFetchAsDBA executes one or more SQL queries on the remote tablet as the DBA user. + ExecuteMultiFetchAsDBA(context.Context, *vtctldata.ExecuteMultiFetchAsDBARequest) (*vtctldata.ExecuteMultiFetchAsDBAResponse, error) // FindAllShardsInKeyspace returns a map of shard names to shard references // for a given keyspace. FindAllShardsInKeyspace(context.Context, *vtctldata.FindAllShardsInKeyspaceRequest) (*vtctldata.FindAllShardsInKeyspaceResponse, error) + // ForceCutOverSchemaMigration marks a schema migration for forced cut-over. + ForceCutOverSchemaMigration(context.Context, *vtctldata.ForceCutOverSchemaMigrationRequest) (*vtctldata.ForceCutOverSchemaMigrationResponse, error) // GetBackups returns all the backups for a shard. GetBackups(context.Context, *vtctldata.GetBackupsRequest) (*vtctldata.GetBackupsResponse, error) // GetCellInfo returns the information for a cell. @@ -1611,6 +1672,8 @@ type VtctldServer interface { GetKeyspace(context.Context, *vtctldata.GetKeyspaceRequest) (*vtctldata.GetKeyspaceResponse, error) // GetKeyspaces returns the keyspace struct of all keyspaces in the topo. GetKeyspaces(context.Context, *vtctldata.GetKeyspacesRequest) (*vtctldata.GetKeyspacesResponse, error) + // GetKeyspaceRoutingRules returns the VSchema keyspace routing rules. + GetKeyspaceRoutingRules(context.Context, *vtctldata.GetKeyspaceRoutingRulesRequest) (*vtctldata.GetKeyspaceRoutingRulesResponse, error) // GetPermissions returns the permissions set on the remote tablet. GetPermissions(context.Context, *vtctldata.GetPermissionsRequest) (*vtctldata.GetPermissionsResponse, error) // GetRoutingRules returns the VSchema routing rules. @@ -1624,6 +1687,8 @@ type VtctldServer interface { // Different fields in the request message result in different filtering // behaviors. See the documentation on GetSchemaMigrationsRequest for details. GetSchemaMigrations(context.Context, *vtctldata.GetSchemaMigrationsRequest) (*vtctldata.GetSchemaMigrationsResponse, error) + // GetShardReplication returns the replication graph for a shard in a cell. + GetShardReplication(context.Context, *vtctldata.GetShardReplicationRequest) (*vtctldata.GetShardReplicationResponse, error) // GetShard returns information about a shard in the topology. GetShard(context.Context, *vtctldata.GetShardRequest) (*vtctldata.GetShardResponse, error) // GetShardRoutingRules returns the VSchema shard routing rules. @@ -1856,6 +1921,9 @@ func (UnimplementedVtctldServer) ApplyRoutingRules(context.Context, *vtctldata.A func (UnimplementedVtctldServer) ApplySchema(context.Context, *vtctldata.ApplySchemaRequest) (*vtctldata.ApplySchemaResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplySchema not implemented") } +func (UnimplementedVtctldServer) ApplyKeyspaceRoutingRules(context.Context, *vtctldata.ApplyKeyspaceRoutingRulesRequest) (*vtctldata.ApplyKeyspaceRoutingRulesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyKeyspaceRoutingRules not implemented") +} func (UnimplementedVtctldServer) ApplyShardRoutingRules(context.Context, *vtctldata.ApplyShardRoutingRulesRequest) (*vtctldata.ApplyShardRoutingRulesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplyShardRoutingRules not implemented") } @@ -1916,9 +1984,15 @@ func (UnimplementedVtctldServer) ExecuteFetchAsDBA(context.Context, *vtctldata.E func (UnimplementedVtctldServer) ExecuteHook(context.Context, *vtctldata.ExecuteHookRequest) (*vtctldata.ExecuteHookResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ExecuteHook not implemented") } +func (UnimplementedVtctldServer) ExecuteMultiFetchAsDBA(context.Context, *vtctldata.ExecuteMultiFetchAsDBARequest) (*vtctldata.ExecuteMultiFetchAsDBAResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteMultiFetchAsDBA not implemented") +} func (UnimplementedVtctldServer) FindAllShardsInKeyspace(context.Context, *vtctldata.FindAllShardsInKeyspaceRequest) (*vtctldata.FindAllShardsInKeyspaceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FindAllShardsInKeyspace not implemented") } +func (UnimplementedVtctldServer) ForceCutOverSchemaMigration(context.Context, *vtctldata.ForceCutOverSchemaMigrationRequest) (*vtctldata.ForceCutOverSchemaMigrationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ForceCutOverSchemaMigration not implemented") +} func (UnimplementedVtctldServer) GetBackups(context.Context, *vtctldata.GetBackupsRequest) (*vtctldata.GetBackupsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetBackups not implemented") } @@ -1940,6 +2014,9 @@ func (UnimplementedVtctldServer) GetKeyspace(context.Context, *vtctldata.GetKeys func (UnimplementedVtctldServer) GetKeyspaces(context.Context, *vtctldata.GetKeyspacesRequest) (*vtctldata.GetKeyspacesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetKeyspaces not implemented") } +func (UnimplementedVtctldServer) GetKeyspaceRoutingRules(context.Context, *vtctldata.GetKeyspaceRoutingRulesRequest) (*vtctldata.GetKeyspaceRoutingRulesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetKeyspaceRoutingRules not implemented") +} func (UnimplementedVtctldServer) GetPermissions(context.Context, *vtctldata.GetPermissionsRequest) (*vtctldata.GetPermissionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetPermissions not implemented") } @@ -1952,6 +2029,9 @@ func (UnimplementedVtctldServer) GetSchema(context.Context, *vtctldata.GetSchema func (UnimplementedVtctldServer) GetSchemaMigrations(context.Context, *vtctldata.GetSchemaMigrationsRequest) (*vtctldata.GetSchemaMigrationsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetSchemaMigrations not implemented") } +func (UnimplementedVtctldServer) GetShardReplication(context.Context, *vtctldata.GetShardReplicationRequest) (*vtctldata.GetShardReplicationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetShardReplication not implemented") +} func (UnimplementedVtctldServer) GetShard(context.Context, *vtctldata.GetShardRequest) (*vtctldata.GetShardResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetShard not implemented") } @@ -2259,6 +2339,24 @@ func _Vtctld_ApplySchema_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Vtctld_ApplyKeyspaceRoutingRules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ApplyKeyspaceRoutingRulesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).ApplyKeyspaceRoutingRules(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/ApplyKeyspaceRoutingRules", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).ApplyKeyspaceRoutingRules(ctx, req.(*vtctldata.ApplyKeyspaceRoutingRulesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_ApplyShardRoutingRules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.ApplyShardRoutingRulesRequest) if err := dec(in); err != nil { @@ -2625,6 +2723,24 @@ func _Vtctld_ExecuteHook_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _Vtctld_ExecuteMultiFetchAsDBA_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ExecuteMultiFetchAsDBARequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).ExecuteMultiFetchAsDBA(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/ExecuteMultiFetchAsDBA", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).ExecuteMultiFetchAsDBA(ctx, req.(*vtctldata.ExecuteMultiFetchAsDBARequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_FindAllShardsInKeyspace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.FindAllShardsInKeyspaceRequest) if err := dec(in); err != nil { @@ -2643,6 +2759,24 @@ func _Vtctld_FindAllShardsInKeyspace_Handler(srv interface{}, ctx context.Contex return interceptor(ctx, in, info, handler) } +func _Vtctld_ForceCutOverSchemaMigration_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.ForceCutOverSchemaMigrationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).ForceCutOverSchemaMigration(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/ForceCutOverSchemaMigration", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).ForceCutOverSchemaMigration(ctx, req.(*vtctldata.ForceCutOverSchemaMigrationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_GetBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.GetBackupsRequest) if err := dec(in); err != nil { @@ -2769,6 +2903,24 @@ func _Vtctld_GetKeyspaces_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } +func _Vtctld_GetKeyspaceRoutingRules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetKeyspaceRoutingRulesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetKeyspaceRoutingRules(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetKeyspaceRoutingRules", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetKeyspaceRoutingRules(ctx, req.(*vtctldata.GetKeyspaceRoutingRulesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_GetPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.GetPermissionsRequest) if err := dec(in); err != nil { @@ -2841,6 +2993,24 @@ func _Vtctld_GetSchemaMigrations_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _Vtctld_GetShardReplication_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(vtctldata.GetShardReplicationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VtctldServer).GetShardReplication(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vtctlservice.Vtctld/GetShardReplication", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VtctldServer).GetShardReplication(ctx, req.(*vtctldata.GetShardReplicationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Vtctld_GetShard_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(vtctldata.GetShardRequest) if err := dec(in); err != nil { @@ -4199,6 +4369,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ApplySchema", Handler: _Vtctld_ApplySchema_Handler, }, + { + MethodName: "ApplyKeyspaceRoutingRules", + Handler: _Vtctld_ApplyKeyspaceRoutingRules_Handler, + }, { MethodName: "ApplyShardRoutingRules", Handler: _Vtctld_ApplyShardRoutingRules_Handler, @@ -4271,10 +4445,18 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "ExecuteHook", Handler: _Vtctld_ExecuteHook_Handler, }, + { + MethodName: "ExecuteMultiFetchAsDBA", + Handler: _Vtctld_ExecuteMultiFetchAsDBA_Handler, + }, { MethodName: "FindAllShardsInKeyspace", Handler: _Vtctld_FindAllShardsInKeyspace_Handler, }, + { + MethodName: "ForceCutOverSchemaMigration", + Handler: _Vtctld_ForceCutOverSchemaMigration_Handler, + }, { MethodName: "GetBackups", Handler: _Vtctld_GetBackups_Handler, @@ -4303,6 +4485,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetKeyspaces", Handler: _Vtctld_GetKeyspaces_Handler, }, + { + MethodName: "GetKeyspaceRoutingRules", + Handler: _Vtctld_GetKeyspaceRoutingRules_Handler, + }, { MethodName: "GetPermissions", Handler: _Vtctld_GetPermissions_Handler, @@ -4319,6 +4505,10 @@ var Vtctld_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetSchemaMigrations", Handler: _Vtctld_GetSchemaMigrations_Handler, }, + { + MethodName: "GetShardReplication", + Handler: _Vtctld_GetShardReplication_Handler, + }, { MethodName: "GetShard", Handler: _Vtctld_GetShard_Handler, diff --git a/go/vt/proto/vtgate/vtgate.pb.go b/go/vt/proto/vtgate/vtgate.pb.go index aee90d134a4..62790b78b6d 100644 --- a/go/vt/proto/vtgate/vtgate.pb.go +++ b/go/vt/proto/vtgate/vtgate.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vtgate.proto diff --git a/go/vt/proto/vtgateservice/vtgateservice.pb.go b/go/vt/proto/vtgateservice/vtgateservice.pb.go index 2008d486dc9..b293fd0631b 100644 --- a/go/vt/proto/vtgateservice/vtgateservice.pb.go +++ b/go/vt/proto/vtgateservice/vtgateservice.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vtgateservice.proto diff --git a/go/vt/proto/vtrpc/vtrpc.pb.go b/go/vt/proto/vtrpc/vtrpc.pb.go index 0c82dc34bf5..2466b71513e 100644 --- a/go/vt/proto/vtrpc/vtrpc.pb.go +++ b/go/vt/proto/vtrpc/vtrpc.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vtrpc.proto diff --git a/go/vt/proto/vttest/vttest.pb.go b/go/vt/proto/vttest/vttest.pb.go index 4b4f269d38c..6295c1b6e68 100644 --- a/go/vt/proto/vttest/vttest.pb.go +++ b/go/vt/proto/vttest/vttest.pb.go @@ -41,7 +41,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vttest.proto @@ -134,8 +134,6 @@ type Keyspace struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // shards inside this keyspace. Ignored if redirect is set. Shards []*Shard `protobuf:"bytes,2,rep,name=shards,proto3" json:"shards,omitempty"` - // redirects all traffic to another keyspace. If set, shards is ignored. - ServedFrom string `protobuf:"bytes,5,opt,name=served_from,json=servedFrom,proto3" json:"served_from,omitempty"` // number of replica tablets to instantiate. This includes the primary tablet. ReplicaCount int32 `protobuf:"varint,6,opt,name=replica_count,json=replicaCount,proto3" json:"replica_count,omitempty"` // number of rdonly tablets to instantiate. @@ -188,13 +186,6 @@ func (x *Keyspace) GetShards() []*Shard { return nil } -func (x *Keyspace) GetServedFrom() string { - if x != nil { - return x.ServedFrom - } - return "" -} - func (x *Keyspace) GetReplicaCount() int32 { if x != nil { return x.ReplicaCount @@ -285,31 +276,30 @@ var file_vttest_proto_rawDesc = []byte{ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x64, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x62, - 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x22, 0xba, 0x01, 0x0a, + 0x4e, 0x61, 0x6d, 0x65, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x22, 0x9f, 0x01, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x76, 0x74, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x06, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x66, - 0x72, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x64, - 0x6f, 0x6e, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0b, 0x72, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x04, 0x08, - 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x92, 0x01, 0x0a, 0x0e, 0x56, 0x54, - 0x54, 0x65, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x12, 0x2e, 0x0a, 0x09, - 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x76, 0x74, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x63, 0x65, 0x6c, - 0x6c, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x75, - 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, - 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x25, - 0x5a, 0x23, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, - 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, - 0x74, 0x74, 0x65, 0x73, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x72, 0x64, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x64, 0x6f, + 0x6e, 0x6c, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0b, 0x72, 0x64, 0x6f, 0x6e, 0x6c, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x04, 0x08, 0x03, + 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x92, + 0x01, 0x0a, 0x0e, 0x56, 0x54, 0x54, 0x65, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, + 0x79, 0x12, 0x2e, 0x0a, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x76, 0x74, 0x74, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, + 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x05, 0x63, 0x65, 0x6c, 0x6c, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x76, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x75, + 0x6c, 0x65, 0x73, 0x42, 0x25, 0x5a, 0x23, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, 0x69, 0x6f, + 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x74, 0x74, 0x65, 0x73, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/go/vt/proto/vttest/vttest_vtproto.pb.go b/go/vt/proto/vttest/vttest_vtproto.pb.go index f1dee298011..8000a036a5f 100644 --- a/go/vt/proto/vttest/vttest_vtproto.pb.go +++ b/go/vt/proto/vttest/vttest_vtproto.pb.go @@ -45,7 +45,6 @@ func (m *Keyspace) CloneVT() *Keyspace { } r := &Keyspace{ Name: m.Name, - ServedFrom: m.ServedFrom, ReplicaCount: m.ReplicaCount, RdonlyCount: m.RdonlyCount, } @@ -184,13 +183,6 @@ func (m *Keyspace) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0x30 } - if len(m.ServedFrom) > 0 { - i -= len(m.ServedFrom) - copy(dAtA[i:], m.ServedFrom) - i = encodeVarint(dAtA, i, uint64(len(m.ServedFrom))) - i-- - dAtA[i] = 0x2a - } if len(m.Shards) > 0 { for iNdEx := len(m.Shards) - 1; iNdEx >= 0; iNdEx-- { size, err := m.Shards[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) @@ -322,10 +314,6 @@ func (m *Keyspace) SizeVT() (n int) { n += 1 + l + sov(uint64(l)) } } - l = len(m.ServedFrom) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } if m.ReplicaCount != 0 { n += 1 + sov(uint64(m.ReplicaCount)) } @@ -578,38 +566,6 @@ func (m *Keyspace) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServedFrom", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServedFrom = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReplicaCount", wireType) diff --git a/go/vt/proto/vttime/vttime.pb.go b/go/vt/proto/vttime/vttime.pb.go index 5cdf3f616ce..9395edfd883 100644 --- a/go/vt/proto/vttime/vttime.pb.go +++ b/go/vt/proto/vttime/vttime.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.34.1 // protoc v3.21.3 // source: vttime.proto diff --git a/go/vt/schema/ddl_strategy.go b/go/vt/schema/ddl_strategy.go index bc33c8cb3cf..e3b03c3f330 100644 --- a/go/vt/schema/ddl_strategy.go +++ b/go/vt/schema/ddl_strategy.go @@ -27,9 +27,10 @@ import ( ) var ( - strategyParserRegexp = regexp.MustCompile(`^([\S]+)\s+(.*)$`) - cutOverThresholdFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, cutOverThresholdFlag)) - retainArtifactsFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, retainArtifactsFlag)) + strategyParserRegexp = regexp.MustCompile(`^([\S]+)\s+(.*)$`) + cutOverThresholdFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, cutOverThresholdFlag)) + forceCutOverAfterFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, forceCutOverAfterFlag)) + retainArtifactsFlagRegexp = regexp.MustCompile(fmt.Sprintf(`^[-]{1,2}%s=(.*?)$`, retainArtifactsFlag)) ) const ( @@ -45,6 +46,7 @@ const ( preferInstantDDL = "prefer-instant-ddl" fastRangeRotationFlag = "fast-range-rotation" cutOverThresholdFlag = "cut-over-threshold" + forceCutOverAfterFlag = "force-cut-over-after" retainArtifactsFlag = "retain-artifacts" vreplicationTestSuite = "vreplication-test-suite" allowForeignKeysFlag = "unsafe-allow-foreign-keys" @@ -116,6 +118,17 @@ func ParseDDLStrategy(strategyVariable string) (*DDLStrategySetting, error) { if _, err := setting.RetainArtifactsDuration(); err != nil { return nil, err } + cutoverAfter, err := setting.ForceCutOverAfter() + if err != nil { + return nil, err + } + switch setting.Strategy { + case DDLStrategyVitess, DDLStrategyOnline: + default: + if cutoverAfter != 0 { + return nil, fmt.Errorf("--force-cut-over-after is only valid in 'vitess' strategy. Found %v value in '%v' strategy", cutoverAfter, setting.Strategy) + } + } switch setting.Strategy { case DDLStrategyVitess, DDLStrategyOnline, DDLStrategyMySQL, DDLStrategyDirect: @@ -194,11 +207,6 @@ func (setting *DDLStrategySetting) IsPreferInstantDDL() bool { return setting.hasFlag(preferInstantDDL) } -// IsFastRangeRotationFlag checks if strategy options include --fast-range-rotation -func (setting *DDLStrategySetting) IsFastRangeRotationFlag() bool { - return setting.hasFlag(fastRangeRotationFlag) -} - // isCutOverThresholdFlag returns true when given option denotes a `--cut-over-threshold=[...]` flag func isCutOverThresholdFlag(opt string) (string, bool) { submatch := cutOverThresholdFlagRegexp.FindStringSubmatch(opt) @@ -208,6 +216,15 @@ func isCutOverThresholdFlag(opt string) (string, bool) { return submatch[1], true } +// isForceCutOverFlag returns true when given option denotes a `--force-cut-over-after=[...]` flag +func isForceCutOverFlag(opt string) (string, bool) { + submatch := forceCutOverAfterFlagRegexp.FindStringSubmatch(opt) + if len(submatch) == 0 { + return "", false + } + return submatch[1], true +} + // isRetainArtifactsFlag returns true when given option denotes a `--retain-artifacts=[...]` flag func isRetainArtifactsFlag(opt string) (string, bool) { submatch := retainArtifactsFlagRegexp.FindStringSubmatch(opt) @@ -235,6 +252,24 @@ func (setting *DDLStrategySetting) CutOverThreshold() (d time.Duration, err erro return d, err } +// ForceCutOverAfter returns a the duration threshold indicated by --force-cut-over-after +func (setting *DDLStrategySetting) ForceCutOverAfter() (d time.Duration, err error) { + // We do some ugly manual parsing of --cut-over-threshold value + opts, _ := shlex.Split(setting.Options) + for _, opt := range opts { + if val, isCutOver := isForceCutOverFlag(opt); isCutOver { + // value is possibly quoted + if s, err := strconv.Unquote(val); err == nil { + val = s + } + if val != "" { + d, err = time.ParseDuration(val) + } + } + } + return d, err +} + // RetainArtifactsDuration returns a the duration indicated by --retain-artifacts func (setting *DDLStrategySetting) RetainArtifactsDuration() (d time.Duration, err error) { // We do some ugly manual parsing of --retain-artifacts @@ -276,12 +311,15 @@ func (setting *DDLStrategySetting) RuntimeOptions() []string { if _, ok := isCutOverThresholdFlag(opt); ok { continue } + if _, ok := isForceCutOverFlag(opt); ok { + continue + } if _, ok := isRetainArtifactsFlag(opt); ok { continue } switch { case isFlag(opt, declarativeFlag): - case isFlag(opt, skipTopoFlag): + case isFlag(opt, skipTopoFlag): // deprecated flag, parsed for backwards compatibility case isFlag(opt, singletonFlag): case isFlag(opt, singletonContextFlag): case isFlag(opt, allowZeroInDateFlag): @@ -290,7 +328,7 @@ func (setting *DDLStrategySetting) RuntimeOptions() []string { case isFlag(opt, inOrderCompletionFlag): case isFlag(opt, allowConcurrentFlag): case isFlag(opt, preferInstantDDL): - case isFlag(opt, fastRangeRotationFlag): + case isFlag(opt, fastRangeRotationFlag): // deprecated flag, parsed for backwards compatibility case isFlag(opt, vreplicationTestSuite): case isFlag(opt, allowForeignKeysFlag): case isFlag(opt, analyzeTableFlag): diff --git a/go/vt/schema/ddl_strategy_test.go b/go/vt/schema/ddl_strategy_test.go index ba7d029b8b7..f27f0963e80 100644 --- a/go/vt/schema/ddl_strategy_test.go +++ b/go/vt/schema/ddl_strategy_test.go @@ -198,6 +198,7 @@ func TestParseDDLStrategy(t *testing.T) { allowForeignKeys bool analyzeTable bool cutOverThreshold time.Duration + forceCutOverAfter time.Duration expireArtifacts time.Duration runtimeOptions string expectError string @@ -320,6 +321,25 @@ func TestParseDDLStrategy(t *testing.T) { runtimeOptions: "", cutOverThreshold: 5 * time.Minute, }, + { + strategyVariable: "vitess --force-cut-over-after=3m", + strategy: DDLStrategyVitess, + options: "--force-cut-over-after=3m", + runtimeOptions: "", + forceCutOverAfter: 3 * time.Minute, + }, + { + strategyVariable: "vitess --force-cut-over-after=r3m", + strategy: DDLStrategyVitess, + runtimeOptions: "", + expectError: "time: invalid duration", + }, + { + strategyVariable: "gh-ost --force-cut-over-after=3m", + strategy: DDLStrategyVitess, + runtimeOptions: "", + expectError: "--force-cut-over-after is only valid in 'vitess' strategy", + }, { strategyVariable: "vitess --retain-artifacts=4m", strategy: DDLStrategyVitess, @@ -338,14 +358,12 @@ func TestParseDDLStrategy(t *testing.T) { { strategyVariable: "vitess --alow-concrrnt", // intentional typo strategy: DDLStrategyVitess, - options: "", runtimeOptions: "", expectError: "invalid flags", }, { strategyVariable: "vitess --declarative --max-load=Threads_running=100", strategy: DDLStrategyVitess, - options: "--declarative --max-load=Threads_running=100", runtimeOptions: "--max-load=Threads_running=100", expectError: "invalid flags", }, @@ -366,12 +384,14 @@ func TestParseDDLStrategy(t *testing.T) { assert.Equal(t, ts.isPostponeLaunch, setting.IsPostponeLaunch()) assert.Equal(t, ts.isAllowConcurrent, setting.IsAllowConcurrent()) assert.Equal(t, ts.fastOverRevertible, setting.IsPreferInstantDDL()) - assert.Equal(t, ts.fastRangeRotation, setting.IsFastRangeRotationFlag()) assert.Equal(t, ts.allowForeignKeys, setting.IsAllowForeignKeysFlag()) assert.Equal(t, ts.analyzeTable, setting.IsAnalyzeTableFlag()) cutOverThreshold, err := setting.CutOverThreshold() assert.NoError(t, err) assert.Equal(t, ts.cutOverThreshold, cutOverThreshold) + forceCutOverAfter, err := setting.ForceCutOverAfter() + assert.NoError(t, err) + assert.Equal(t, ts.forceCutOverAfter, forceCutOverAfter) runtimeOptions := strings.Join(setting.RuntimeOptions(), " ") assert.Equal(t, ts.runtimeOptions, runtimeOptions) diff --git a/go/vt/schema/name.go b/go/vt/schema/name.go index 42d3878b302..c9754129c39 100644 --- a/go/vt/schema/name.go +++ b/go/vt/schema/name.go @@ -17,6 +17,8 @@ limitations under the License. package schema import ( + "fmt" + "regexp" "strings" "time" @@ -27,6 +29,30 @@ const ( readableTimeFormat = "20060102150405" ) +const ( + InternalTableNameExpression string = `^_vt_([a-zA-Z0-9]{3})_([0-f]{32})_([0-9]{14})_$` +) + +type InternalTableHint string + +const ( + InternalTableUnknownHint InternalTableHint = "nil" + InternalTableGCHoldHint InternalTableHint = "hld" + InternalTableGCPurgeHint InternalTableHint = "prg" + InternalTableGCEvacHint InternalTableHint = "evc" + InternalTableGCDropHint InternalTableHint = "drp" + InternalTableVreplicationHint InternalTableHint = "vrp" +) + +func (h InternalTableHint) String() string { + return string(h) +} + +var ( + // internalTableNameRegexp parses new internal table name format, e.g. _vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_ + internalTableNameRegexp = regexp.MustCompile(InternalTableNameExpression) +) + // CreateUUIDWithDelimiter creates a globally unique ID, with a given delimiter // example results: // - 1876a01a-354d-11eb-9a79-f8e4e33000bb (delimiter = "-") @@ -55,12 +81,53 @@ func ToReadableTimestamp(t time.Time) string { return t.Format(readableTimeFormat) } +// ReadableTimestamp returns the current timestamp, in seconds resolution, that is human readable +func ReadableTimestamp() string { + return ToReadableTimestamp(time.Now()) +} + +func condenseUUID(uuid string) string { + uuid = strings.ReplaceAll(uuid, "-", "") + uuid = strings.ReplaceAll(uuid, "_", "") + return uuid +} + +// isCondensedUUID answers 'true' when the given string is a condensed UUID, e.g.: +// a0638f6bec7b11ea9bf8000d3a9b8a9a +func isCondensedUUID(uuid string) bool { + return condensedUUIDRegexp.MatchString(uuid) +} + +// generateGCTableName creates an internal table name, based on desired hint and time, and with optional preset UUID. +// If uuid is given, then it must be in condensed-UUID format. If empty, the function auto-generates a UUID. +func GenerateInternalTableName(hint string, uuid string, t time.Time) (tableName string, err error) { + if len(hint) != 3 { + return "", fmt.Errorf("Invalid hint: %s, expected 3 characters", hint) + } + if uuid == "" { + uuid, err = CreateUUIDWithDelimiter("") + } else { + uuid = condenseUUID(uuid) + } + if err != nil { + return "", err + } + if !isCondensedUUID(uuid) { + return "", fmt.Errorf("Invalid UUID: %s, expected condensed 32 hexadecimals", uuid) + } + timestamp := ToReadableTimestamp(t) + return fmt.Sprintf("_vt_%s_%s_%s_", hint, uuid, timestamp), nil +} + // IsInternalOperationTableName answers 'true' when the given table name stands for an internal Vitess // table used for operations such as: // - Online DDL (gh-ost, pt-online-schema-change) // - Table GC (renamed before drop) // Apps such as VStreamer may choose to ignore such tables. func IsInternalOperationTableName(tableName string) bool { + if internalTableNameRegexp.MatchString(tableName) { + return true + } if IsGCTableName(tableName) { return true } @@ -69,3 +136,21 @@ func IsInternalOperationTableName(tableName string) bool { } return false } + +// AnalyzeInternalTableName analyzes a table name, and assumign it's a vitess internal table name, extracts +// the hint, uuid and time out of the name. +// An internal table name can be e.g. `_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_`, analyzed like so: +// - hint is `hld` +// - UUID is `6ace8bcef73211ea87e9f875a4d24e90` +// - Time is 2020-09-15 12:04:10 +func AnalyzeInternalTableName(tableName string) (isInternalTable bool, hint string, uuid string, t time.Time, err error) { + submatch := internalTableNameRegexp.FindStringSubmatch(tableName) + if len(submatch) == 0 { + return false, hint, uuid, t, nil + } + t, err = time.Parse(readableTimeFormat, submatch[3]) + if err != nil { + return false, hint, uuid, t, err + } + return true, submatch[1], submatch[2], t, nil +} diff --git a/go/vt/schema/name_test.go b/go/vt/schema/name_test.go index ab72f80644e..7d1d086cc45 100644 --- a/go/vt/schema/name_test.go +++ b/go/vt/schema/name_test.go @@ -18,8 +18,10 @@ package schema import ( "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNameIsGCTableName(t *testing.T) { @@ -69,6 +71,14 @@ func TestIsInternalOperationTableName(t *testing.T) { "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", "_vt_EVAC_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", "_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "_vt_drp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_evc_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_vrp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_gho_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_ghc_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_xyz_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", } for _, tableName := range names { assert.True(t, IsInternalOperationTableName(tableName)) @@ -93,3 +103,124 @@ func TestIsInternalOperationTableName(t *testing.T) { assert.False(t, IsInternalOperationTableName(tableName)) } } + +func TestAnalyzeInternalTableName(t *testing.T) { + baseTime, err := time.Parse(time.RFC1123, "Tue, 15 Sep 2020 12:04:10 UTC") + assert.NoError(t, err) + tt := []struct { + tableName string + hint string + t time.Time + isInternal bool + }{ + { + tableName: "_84371a37_6153_11eb_9917_f875a4d24e90_20210128122816_vrepl", + isInternal: false, + }, + { + tableName: "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + isInternal: false, + }, + { + tableName: "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + isInternal: false, + }, + { + tableName: "_vt_EVAC_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + isInternal: false, + }, + { + tableName: "_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + isInternal: false, + }, + { + tableName: "_vt_drop_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + isInternal: false, + }, + { + tableName: "_vt_drp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + hint: "drp", + t: baseTime, + isInternal: true, + }, + { + tableName: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + hint: "hld", + t: baseTime, + isInternal: true, + }, + { + tableName: "_vt_xyz_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + hint: "xyz", + t: baseTime, + isInternal: true, + }, + { + tableName: "_vt_xyz_6ace8bcef73211ea87e9f875a4d24e90_20200915129999_", + isInternal: false, + }, + } + for _, ts := range tt { + t.Run(ts.tableName, func(t *testing.T) { + isInternal, hint, uuid, tm, err := AnalyzeInternalTableName(ts.tableName) + assert.Equal(t, ts.isInternal, isInternal) + if ts.isInternal { + assert.NoError(t, err) + assert.True(t, isCondensedUUID(uuid)) + assert.Equal(t, ts.hint, hint) + assert.Equal(t, ts.t, tm) + } + }) + } +} + +func TestToReadableTimestamp(t *testing.T) { + ti, err := time.Parse(time.UnixDate, "Wed Feb 25 11:06:39 PST 2015") + assert.NoError(t, err) + + readableTimestamp := ToReadableTimestamp(ti) + assert.Equal(t, "20150225110639", readableTimestamp) +} + +func TestGenerateInternalTableName(t *testing.T) { + ti, err := time.Parse(time.UnixDate, "Wed Feb 25 11:06:39 PST 2015") + assert.NoError(t, err) + + { + uuid := "6ace8bcef73211ea87e9f875a4d24e90" + tableName, err := GenerateInternalTableName(InternalTableGCPurgeHint.String(), uuid, ti) + require.NoError(t, err) + assert.Equal(t, "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20150225110639_", tableName) + assert.True(t, IsInternalOperationTableName(tableName)) + } + { + uuid := "4e5dcf80_354b_11eb_82cd_f875a4d24e90" + tableName, err := GenerateInternalTableName(InternalTableGCPurgeHint.String(), uuid, ti) + require.NoError(t, err) + assert.Equal(t, "_vt_prg_4e5dcf80354b11eb82cdf875a4d24e90_20150225110639_", tableName) + assert.True(t, IsInternalOperationTableName(tableName)) + } + { + uuid := "4e5dcf80-354b-11eb-82cd-f875a4d24e90" + tableName, err := GenerateInternalTableName(InternalTableGCPurgeHint.String(), uuid, ti) + require.NoError(t, err) + assert.Equal(t, "_vt_prg_4e5dcf80354b11eb82cdf875a4d24e90_20150225110639_", tableName) + assert.True(t, IsInternalOperationTableName(tableName)) + } + { + uuid := "" + tableName, err := GenerateInternalTableName(InternalTableGCPurgeHint.String(), uuid, ti) + require.NoError(t, err) + assert.True(t, IsInternalOperationTableName(tableName)) + } + { + uuid := "4e5dcf80_354b_11eb_82cd_f875a4d24e90_00001111" + _, err := GenerateInternalTableName(InternalTableGCPurgeHint.String(), uuid, ti) + require.ErrorContains(t, err, "Invalid UUID") + } + { + uuid := "6ace8bcef73211ea87e9f875a4d24e90" + _, err := GenerateInternalTableName("abcdefg", uuid, ti) + require.ErrorContains(t, err, "Invalid hint") + } +} diff --git a/go/vt/schema/online_ddl.go b/go/vt/schema/online_ddl.go index a06866e996a..57ed075cf38 100644 --- a/go/vt/schema/online_ddl.go +++ b/go/vt/schema/online_ddl.go @@ -37,6 +37,16 @@ var ( migrationContextValidatorRegexp = regexp.MustCompile(`^[\w:-]*$`) ) +var ( + onlineDDLInternalTableHintsMap = map[string]bool{ + "vrp": true, // vreplication + "gho": true, // gh-ost + "ghc": true, // gh-ost + "del": true, // gh-ost + "new": true, // pt-osc + } +) + var ( // ErrDirectDDLDisabled is returned when direct DDL is disabled, and a user attempts to run a DDL statement ErrDirectDDLDisabled = errors.New("direct DDL is disabled") @@ -108,17 +118,10 @@ type OnlineDDL struct { WasReadyToComplete int64 `json:"was_ready_to_complete,omitempty"` } -// FromJSON creates an OnlineDDL from json -func FromJSON(bytes []byte) (*OnlineDDL, error) { - onlineDDL := &OnlineDDL{} - err := json.Unmarshal(bytes, onlineDDL) - return onlineDDL, err -} - // ParseOnlineDDLStatement parses the given SQL into a statement and returns the action type of the DDL statement, or error // if the statement is not a DDL -func ParseOnlineDDLStatement(sql string) (ddlStmt sqlparser.DDLStatement, action sqlparser.DDLAction, err error) { - stmt, err := sqlparser.Parse(sql) +func ParseOnlineDDLStatement(sql string, parser *sqlparser.Parser) (ddlStmt sqlparser.DDLStatement, action sqlparser.DDLAction, err error) { + stmt, err := parser.Parse(sql) if err != nil { return nil, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error parsing statement: SQL=%s, error=%+v", sql, err) } @@ -129,10 +132,10 @@ func ParseOnlineDDLStatement(sql string) (ddlStmt sqlparser.DDLStatement, action return ddlStmt, action, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported query type: %s", sql) } -func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting) error { +func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, parser *sqlparser.Parser) error { // SQL statement sanity checks: if !ddlStmt.IsFullyParsed() { - if _, err := sqlparser.ParseStrictDDL(sql); err != nil { + if _, err := parser.ParseStrictDDL(sql); err != nil { // More information about the reason why the statement is not fully parsed: return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "%v", err) } @@ -154,12 +157,12 @@ func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStr } // NewOnlineDDLs takes a single DDL statement, normalizes it (potentially break down into multiple statements), and generates one or more OnlineDDL instances, one for each normalized statement -func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string) (onlineDDLs [](*OnlineDDL), err error) { +func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string, parser *sqlparser.Parser) (onlineDDLs []*OnlineDDL, err error) { appendOnlineDDL := func(tableName string, ddlStmt sqlparser.DDLStatement) error { - if err := onlineDDLStatementSanity(sql, ddlStmt, ddlStrategySetting); err != nil { + if err := onlineDDLStatementSanity(sql, ddlStmt, ddlStrategySetting, parser); err != nil { return err } - onlineDDL, err := NewOnlineDDL(keyspace, tableName, sqlparser.String(ddlStmt), ddlStrategySetting, migrationContext, providedUUID) + onlineDDL, err := NewOnlineDDL(keyspace, tableName, sqlparser.String(ddlStmt), ddlStrategySetting, migrationContext, providedUUID, parser) if err != nil { return err } @@ -190,7 +193,7 @@ func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, } // NewOnlineDDL creates a schema change request with self generated UUID and RequestTime -func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string) (onlineDDL *OnlineDDL, err error) { +func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string, parser *sqlparser.Parser) (onlineDDL *OnlineDDL, err error) { if ddlStrategySetting == nil { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "NewOnlineDDL: found nil DDLStrategySetting") } @@ -224,7 +227,7 @@ func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting sql = fmt.Sprintf("revert vitess_migration '%s'", uuid) } - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { isLegacyRevertStatement := false // query validation and rebuilding @@ -347,9 +350,9 @@ func (onlineDDL *OnlineDDL) ToJSON() ([]byte, error) { } // sqlWithoutComments returns the SQL statement without comment directives. Useful for tests -func (onlineDDL *OnlineDDL) sqlWithoutComments() (sql string, err error) { +func (onlineDDL *OnlineDDL) sqlWithoutComments(parser *sqlparser.Parser) (sql string, err error) { sql = onlineDDL.SQL - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { // query validation and rebuilding if _, err := legacyParseRevertUUID(sql); err == nil { @@ -373,18 +376,18 @@ func (onlineDDL *OnlineDDL) sqlWithoutComments() (sql string, err error) { } // GetAction extracts the DDL action type from the online DDL statement -func (onlineDDL *OnlineDDL) GetAction() (action sqlparser.DDLAction, err error) { - if _, err := onlineDDL.GetRevertUUID(); err == nil { +func (onlineDDL *OnlineDDL) GetAction(parser *sqlparser.Parser) (action sqlparser.DDLAction, err error) { + if _, err := onlineDDL.GetRevertUUID(parser); err == nil { return sqlparser.RevertDDLAction, nil } - _, action, err = ParseOnlineDDLStatement(onlineDDL.SQL) + _, action, err = ParseOnlineDDLStatement(onlineDDL.SQL, parser) return action, err } // IsView returns 'true' when the statement affects a VIEW -func (onlineDDL *OnlineDDL) IsView() bool { - stmt, _, err := ParseOnlineDDLStatement(onlineDDL.SQL) +func (onlineDDL *OnlineDDL) IsView(parser *sqlparser.Parser) bool { + stmt, _, err := ParseOnlineDDLStatement(onlineDDL.SQL, parser) if err != nil { return false } @@ -396,8 +399,8 @@ func (onlineDDL *OnlineDDL) IsView() bool { } // GetActionStr returns a string representation of the DDL action -func (onlineDDL *OnlineDDL) GetActionStr() (action sqlparser.DDLAction, actionStr string, err error) { - action, err = onlineDDL.GetAction() +func (onlineDDL *OnlineDDL) GetActionStr(parser *sqlparser.Parser) (action sqlparser.DDLAction, actionStr string, err error) { + action, err = onlineDDL.GetAction(parser) if err != nil { return action, actionStr, err } @@ -417,11 +420,11 @@ func (onlineDDL *OnlineDDL) GetActionStr() (action sqlparser.DDLAction, actionSt // GetRevertUUID works when this migration is a revert for another migration. It returns the UUID // fo the reverted migration. // The function returns error when this is not a revert migration. -func (onlineDDL *OnlineDDL) GetRevertUUID() (uuid string, err error) { +func (onlineDDL *OnlineDDL) GetRevertUUID(parser *sqlparser.Parser) (uuid string, err error) { if uuid, err := legacyParseRevertUUID(onlineDDL.SQL); err == nil { return uuid, nil } - if stmt, err := sqlparser.Parse(onlineDDL.SQL); err == nil { + if stmt, err := parser.Parse(onlineDDL.SQL); err == nil { if revert, ok := stmt.(*sqlparser.RevertMigration); ok { return revert.UUID, nil } @@ -461,6 +464,12 @@ func OnlineDDLToGCUUID(uuid string) string { // by pt-online-schema-change. // There is no guarantee that the tables _was indeed_ generated by an online DDL flow. func IsOnlineDDLTableName(tableName string) bool { + // Try new naming format (e.g. `_vt_vrp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_`): + // The new naming format is accepted in v19, and actually _used_ in v20 + if isInternal, hint, _, _, _ := AnalyzeInternalTableName(tableName); isInternal { + return onlineDDLInternalTableHintsMap[hint] + } + if onlineDDLGeneratedTableNameRegexp.MatchString(tableName) { return true } diff --git a/go/vt/schema/online_ddl_test.go b/go/vt/schema/online_ddl_test.go index c616d64a698..c443f6b28ce 100644 --- a/go/vt/schema/online_ddl_test.go +++ b/go/vt/schema/online_ddl_test.go @@ -52,13 +52,14 @@ func TestIsOnlineDDLUUID(t *testing.T) { } func TestGetGCUUID(t *testing.T) { + parser := sqlparser.NewTestParser() uuids := map[string]bool{} count := 20 for i := 0; i < count; i++ { - onlineDDL, err := NewOnlineDDL("ks", "tbl", "alter table t drop column c", NewDDLStrategySetting(DDLStrategyDirect, ""), "", "") + onlineDDL, err := NewOnlineDDL("ks", "tbl", "alter table t drop column c", NewDDLStrategySetting(DDLStrategyDirect, ""), "", "", parser) assert.NoError(t, err) gcUUID := onlineDDL.GetGCUUID() - assert.True(t, IsGCUUID(gcUUID)) + assert.True(t, isCondensedUUID(gcUUID)) uuids[gcUUID] = true } assert.Equal(t, count, len(uuids)) @@ -86,10 +87,11 @@ func TestGetActionStr(t *testing.T) { isError: true, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { onlineDDL := &OnlineDDL{SQL: ts.statement} - _, actionStr, err := onlineDDL.GetActionStr() + _, actionStr, err := onlineDDL.GetActionStr(parser) if ts.isError { assert.Error(t, err) } else { @@ -101,31 +103,50 @@ func TestGetActionStr(t *testing.T) { } func TestIsOnlineDDLTableName(t *testing.T) { - names := []string{ - "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_gho", - "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_ghc", - "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_del", - "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114013_new", - "_84371a37_6153_11eb_9917_f875a4d24e90_20210128122816_vrepl", - "_table_old", - "__table_old", - } - for _, tableName := range names { - assert.True(t, IsOnlineDDLTableName(tableName)) - } - irrelevantNames := []string{ - "t", - "_table_new", - "__table_new", - "_table_gho", - "_table_ghc", - "_table_del", - "_table_vrepl", - "table_old", - } - for _, tableName := range irrelevantNames { - assert.False(t, IsOnlineDDLTableName(tableName)) - } + t.Run("accept", func(t *testing.T) { + names := []string{ + "_vt_vrp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_gho_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_ghc_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_del_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_new_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_gho", + "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_ghc", + "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114014_del", + "_4e5dcf80_354b_11eb_82cd_f875a4d24e90_20201203114013_new", + "_84371a37_6153_11eb_9917_f875a4d24e90_20210128122816_vrepl", + "_table_old", + "__table_old", + } + for _, tableName := range names { + t.Run(tableName, func(t *testing.T) { + assert.True(t, IsOnlineDDLTableName(tableName)) + }) + } + }) + t.Run("reject", func(t *testing.T) { + irrelevantNames := []string{ + "_vt_vrp_6ace8bcef73211ea87e9f875a4d24e90_20200915999999_", // time error + "_vt_xyz_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", // unrecognized hint + "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", // GC table + "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", // GC table + "_vt_evc_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", // GC table + "_vt_drp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", // GC table + "t", + "_table_new", + "__table_new", + "_table_gho", + "_table_ghc", + "_table_del", + "_table_vrepl", + "table_old", + } + for _, tableName := range irrelevantNames { + t.Run(tableName, func(t *testing.T) { + assert.False(t, IsOnlineDDLTableName(tableName)) + }) + } + }) } func TestGetRevertUUID(t *testing.T) { @@ -147,10 +168,11 @@ func TestGetRevertUUID(t *testing.T) { isError: true, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { onlineDDL := &OnlineDDL{SQL: ts.statement} - uuid, err := onlineDDL.GetRevertUUID() + uuid, err := onlineDDL.GetRevertUUID(parser) if ts.isError { assert.Error(t, err) return @@ -162,10 +184,10 @@ func TestGetRevertUUID(t *testing.T) { migrationContext := "354b-11eb-82cd-f875a4d24e90" for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { - onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.statement, NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "") + onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.statement, NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "", parser) assert.NoError(t, err) require.NotNil(t, onlineDDL) - uuid, err := onlineDDL.GetRevertUUID() + uuid, err := onlineDDL.GetRevertUUID(parser) if ts.isError { assert.Error(t, err) return @@ -209,11 +231,12 @@ func TestNewOnlineDDL(t *testing.T) { NewDDLStrategySetting(DDLStrategyOnline, "-singleton"), } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.sql, func(t *testing.T) { for _, stgy := range strategies { t.Run(stgy.ToString(), func(t *testing.T) { - onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.sql, stgy, migrationContext, "") + onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.sql, stgy, migrationContext, "", parser) if ts.isError { assert.Error(t, err) return @@ -231,19 +254,20 @@ func TestNewOnlineDDL(t *testing.T) { t.Run("explicit UUID", func(t *testing.T) { var err error var onlineDDL *OnlineDDL + parser := sqlparser.NewTestParser() - onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "") + onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "", parser) assert.NoError(t, err) assert.True(t, IsOnlineDDLUUID(onlineDDL.UUID)) - _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "abc") + _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "abc", parser) assert.Error(t, err) - onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "4e5dcf80_354b_11eb_82cd_f875a4d24e90") + onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "4e5dcf80_354b_11eb_82cd_f875a4d24e90", parser) assert.NoError(t, err) assert.Equal(t, "4e5dcf80_354b_11eb_82cd_f875a4d24e90", onlineDDL.UUID) - _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, " 4e5dcf80_354b_11eb_82cd_f875a4d24e90") + _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, " 4e5dcf80_354b_11eb_82cd_f875a4d24e90", parser) assert.Error(t, err) }) } @@ -284,9 +308,10 @@ func TestNewOnlineDDLs(t *testing.T) { "CREATE TABLE if not exists t (id bigint unsigned NOT NULL AUTO_INCREMENT, ts datetime(6) DEFAULT NULL, error_column NO_SUCH_TYPE NOT NULL, PRIMARY KEY (id)) ENGINE=InnoDB": {isError: true, expectErrorText: "near"}, } migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for query, expect := range tests { t.Run(query, func(t *testing.T) { - stmt, err := sqlparser.Parse(query) + stmt, err := parser.Parse(query) if expect.parseError { assert.Error(t, err) return @@ -299,7 +324,7 @@ func TestNewOnlineDDLs(t *testing.T) { } assert.True(t, ok) - onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "") + onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "", parser) if expect.isError { assert.Error(t, err) assert.Contains(t, err.Error(), expect.expectErrorText) @@ -309,12 +334,12 @@ func TestNewOnlineDDLs(t *testing.T) { sqls := []string{} for _, onlineDDL := range onlineDDLs { - sql, err := onlineDDL.sqlWithoutComments() + sql, err := onlineDDL.sqlWithoutComments(parser) assert.NoError(t, err) sql = strings.ReplaceAll(sql, "\n", "") sql = strings.ReplaceAll(sql, "\t", "") sqls = append(sqls, sql) - assert.Equal(t, expect.isView, onlineDDL.IsView()) + assert.Equal(t, expect.isView, onlineDDL.IsView(parser)) } assert.Equal(t, expect.sqls, sqls) }) @@ -328,12 +353,13 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { } migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for _, query := range queries { t.Run(query, func(t *testing.T) { for _, allowForeignKeys := range []bool{false, true} { testName := fmt.Sprintf("%t", allowForeignKeys) t.Run(testName, func(t *testing.T) { - stmt, err := sqlparser.Parse(query) + stmt, err := parser.Parse(query) require.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) require.True(t, ok) @@ -342,7 +368,7 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { if allowForeignKeys { flags = "--unsafe-allow-foreign-keys" } - onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, flags), migrationContext, "") + onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, flags), migrationContext, "", parser) if allowForeignKeys { assert.NoError(t, err) } else { @@ -351,7 +377,7 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { } for _, onlineDDL := range onlineDDLs { - sql, err := onlineDDL.sqlWithoutComments() + sql, err := onlineDDL.sqlWithoutComments(parser) assert.NoError(t, err) assert.NotEmpty(t, sql) } @@ -373,12 +399,13 @@ func TestOnlineDDLFromCommentedStatement(t *testing.T) { } strategySetting := NewDDLStrategySetting(DDLStrategyGhost, `-singleton -declarative --max-load="Threads_running=5"`) migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for _, query := range queries { t.Run(query, func(t *testing.T) { - o1, err := NewOnlineDDL("ks", "t", query, strategySetting, migrationContext, "") + o1, err := NewOnlineDDL("ks", "t", query, strategySetting, migrationContext, "", parser) require.NoError(t, err) - stmt, err := sqlparser.Parse(o1.SQL) + stmt, err := parser.Parse(o1.SQL) require.NoError(t, err) o2, err := OnlineDDLFromCommentedStatement(stmt) diff --git a/go/vt/schema/parser.go b/go/vt/schema/parser.go index 6bf15057d13..78ec4ec36e6 100644 --- a/go/vt/schema/parser.go +++ b/go/vt/schema/parser.go @@ -19,10 +19,9 @@ package schema import ( "fmt" "regexp" - "strconv" "strings" - "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" ) @@ -113,31 +112,71 @@ func ParseSetValues(setColumnType string) string { // returns the (unquoted) text values // Expected input: `'x-small','small','medium','large','x-large'` // Unexpected input: `enum('x-small','small','medium','large','x-large')` -func parseEnumOrSetTokens(enumOrSetValues string) (tokens []string) { - if submatch := enumValuesRegexp.FindStringSubmatch(enumOrSetValues); len(submatch) > 0 { - // input should not contain `enum(...)` column definition, just the comma delimited list - return tokens - } - if submatch := setValuesRegexp.FindStringSubmatch(enumOrSetValues); len(submatch) > 0 { - // input should not contain `enum(...)` column definition, just the comma delimited list - return tokens - } - tokens = textutil.SplitDelimitedList(enumOrSetValues) - for i := range tokens { - if strings.HasPrefix(tokens[i], `'`) && strings.HasSuffix(tokens[i], `'`) { - tokens[i] = strings.Trim(tokens[i], `'`) +func parseEnumOrSetTokens(enumOrSetValues string) []string { + // We need to track both the start of the current value and current + // position, since there might be quoted quotes inside the value + // which we need to handle. + start := 0 + pos := 1 + var tokens []string + for { + // If the input does not start with a quote, it's not a valid enum/set definition + if enumOrSetValues[start] != '\'' { + return nil + } + i := strings.IndexByte(enumOrSetValues[pos:], '\'') + // If there's no closing quote, we have invalid input + if i < 0 { + return nil + } + // We're at the end here of the last quoted value, + // so we add the last token and return them. + if i == len(enumOrSetValues[pos:])-1 { + tok, err := sqltypes.DecodeStringSQL(enumOrSetValues[start:]) + if err != nil { + return nil + } + tokens = append(tokens, tok) + return tokens + } + // MySQL double quotes things as escape value, so if we see another + // single quote, we skip the character and remove it from the input. + if enumOrSetValues[pos+i+1] == '\'' { + pos = pos + i + 2 + continue } + // Next value needs to be a comma as a separator, otherwise + // the data is invalid so we return nil. + if enumOrSetValues[pos+i+1] != ',' { + return nil + } + // If we're at the end of the input here, it's invalid + // since we have a trailing comma which is not what MySQL + // returns. + if pos+i+1 == len(enumOrSetValues) { + return nil + } + + tok, err := sqltypes.DecodeStringSQL(enumOrSetValues[start : pos+i+1]) + if err != nil { + return nil + } + + tokens = append(tokens, tok) + // We add 2 to the position to skip the closing quote & comma + start = pos + i + 2 + pos = start + 1 } - return tokens } // ParseEnumOrSetTokensMap parses the comma delimited part of an enum column definition -// and returns a map where ["1"] is the first token, and [""] is th elast token -func ParseEnumOrSetTokensMap(enumOrSetValues string) map[string]string { +// and returns a map where [1] is the first token, and [] is the last. +func ParseEnumOrSetTokensMap(enumOrSetValues string) map[int]string { tokens := parseEnumOrSetTokens(enumOrSetValues) - tokensMap := map[string]string{} + tokensMap := map[int]string{} for i, token := range tokens { - tokensMap[strconv.Itoa(i+1)] = token + // SET and ENUM values are 1 indexed. + tokensMap[i+1] = token } return tokensMap } diff --git a/go/vt/schema/parser_test.go b/go/vt/schema/parser_test.go index 5154411e829..fe9264c29b9 100644 --- a/go/vt/schema/parser_test.go +++ b/go/vt/schema/parser_test.go @@ -89,6 +89,19 @@ func TestParseEnumValues(t *testing.T) { assert.Equal(t, input, enumValues) } } + + { + inputs := []string{ + ``, + `abc`, + `func('x small','small','medium','large','x large')`, + `set('x small','small','medium','large','x large')`, + } + for _, input := range inputs { + enumValues := ParseEnumValues(input) + assert.Equal(t, input, enumValues) + } + } } func TestParseSetValues(t *testing.T) { @@ -125,6 +138,18 @@ func TestParseEnumTokens(t *testing.T) { expect := []string{"x-small", "small", "medium", "large", "x-large"} assert.Equal(t, expect, enumTokens) } + { + input := `'x small','small','medium','large','x large'` + enumTokens := parseEnumOrSetTokens(input) + expect := []string{"x small", "small", "medium", "large", "x large"} + assert.Equal(t, expect, enumTokens) + } + { + input := `'with '' quote','and \n newline'` + enumTokens := parseEnumOrSetTokens(input) + expect := []string{"with ' quote", "and \n newline"} + assert.Equal(t, expect, enumTokens) + } { input := `enum('x-small','small','medium','large','x-large')` enumTokens := parseEnumOrSetTokens(input) @@ -142,12 +167,12 @@ func TestParseEnumTokensMap(t *testing.T) { input := `'x-small','small','medium','large','x-large'` enumTokensMap := ParseEnumOrSetTokensMap(input) - expect := map[string]string{ - "1": "x-small", - "2": "small", - "3": "medium", - "4": "large", - "5": "x-large", + expect := map[int]string{ + 1: "x-small", + 2: "small", + 3: "medium", + 4: "large", + 5: "x-large", } assert.Equal(t, expect, enumTokensMap) } @@ -158,7 +183,7 @@ func TestParseEnumTokensMap(t *testing.T) { } for _, input := range inputs { enumTokensMap := ParseEnumOrSetTokensMap(input) - expect := map[string]string{} + expect := map[int]string{} assert.Equal(t, expect, enumTokensMap) } } diff --git a/go/vt/schema/tablegc.go b/go/vt/schema/tablegc.go index 872fb42dbe5..fc1b8361fb4 100644 --- a/go/vt/schema/tablegc.go +++ b/go/vt/schema/tablegc.go @@ -44,44 +44,68 @@ const ( TableDroppedGCState TableGCState = "" ) +func (s TableGCState) TableHint() InternalTableHint { + if hint, ok := gcStatesTableHints[s]; ok { + return hint + } + return InternalTableUnknownHint +} + const ( - GCTableNameExpression string = `^_vt_(HOLD|PURGE|EVAC|DROP)_([0-f]{32})_([0-9]{14})$` + OldGCTableNameExpression string = `^_vt_(HOLD|PURGE|EVAC|DROP)_([0-f]{32})_([0-9]{14})$` + // GCTableNameExpression parses new internal table name format, e.g. _vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_ + GCTableNameExpression string = `^_vt_(hld|prg|evc|drp)_([0-f]{32})_([0-9]{14})_$` ) var ( - gcUUIDRegexp = regexp.MustCompile(`^[0-f]{32}$`) - gcTableNameRegexp = regexp.MustCompile(GCTableNameExpression) - - gcStates = map[string]TableGCState{ - string(HoldTableGCState): HoldTableGCState, - string(PurgeTableGCState): PurgeTableGCState, - string(EvacTableGCState): EvacTableGCState, - string(DropTableGCState): DropTableGCState, - } + condensedUUIDRegexp = regexp.MustCompile(`^[0-f]{32}$`) + oldGCTableNameRegexp = regexp.MustCompile(OldGCTableNameExpression) + + gcStates = map[string]TableGCState{} + gcStatesTableHints = map[TableGCState]InternalTableHint{} ) -// IsGCUUID answers 'true' when the given string is an GC UUID, e.g.: -// a0638f6bec7b11ea9bf8000d3a9b8a9a -func IsGCUUID(uuid string) bool { - return gcUUIDRegexp.MatchString(uuid) +func init() { + gcStatesTableHints[HoldTableGCState] = InternalTableGCHoldHint + gcStatesTableHints[PurgeTableGCState] = InternalTableGCPurgeHint + gcStatesTableHints[EvacTableGCState] = InternalTableGCEvacHint + gcStatesTableHints[DropTableGCState] = InternalTableGCDropHint + for _, gcState := range []TableGCState{HoldTableGCState, PurgeTableGCState, EvacTableGCState, DropTableGCState} { + gcStates[string(gcState)] = gcState + gcStates[gcState.TableHint().String()] = gcState + } } // generateGCTableName creates a GC table name, based on desired state and time, and with optional preset UUID. // If uuid is given, then it must be in GC-UUID format. If empty, the function auto-generates a UUID. -func generateGCTableName(state TableGCState, uuid string, t time.Time) (tableName string, err error) { +func generateGCTableNameOldFormat(state TableGCState, uuid string, t time.Time) (tableName string, err error) { if uuid == "" { uuid, err = CreateUUIDWithDelimiter("") } if err != nil { return "", err } - if !IsGCUUID(uuid) { + if !isCondensedUUID(uuid) { return "", fmt.Errorf("Not a valid GC UUID format: %s", uuid) } timestamp := ToReadableTimestamp(t) return fmt.Sprintf("_vt_%s_%s_%s", state, uuid, timestamp), nil } +// generateGCTableName creates a GC table name, based on desired state and time, and with optional preset UUID. +// If uuid is given, then it must be in GC-UUID format. If empty, the function auto-generates a UUID. +func generateGCTableName(state TableGCState, uuid string, t time.Time) (tableName string, err error) { + for k, v := range gcStates { + if v != state { + continue + } + if len(k) == 3 && k != string(state) { // the "new" format + return GenerateInternalTableName(k, uuid, t) + } + } + return "", fmt.Errorf("Unknown GC state: %v", state) +} + // GenerateGCTableName creates a GC table name, based on desired state and time, and with random UUID func GenerateGCTableName(state TableGCState, t time.Time) (tableName string, err error) { return generateGCTableName(state, "", t) @@ -90,17 +114,33 @@ func GenerateGCTableName(state TableGCState, t time.Time) (tableName string, err // AnalyzeGCTableName analyzes a given table name to see if it's a GC table, and if so, parse out // its state, uuid, and timestamp func AnalyzeGCTableName(tableName string) (isGCTable bool, state TableGCState, uuid string, t time.Time, err error) { - submatch := gcTableNameRegexp.FindStringSubmatch(tableName) + // Try new naming format (e.g. `_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_`): + // The new naming format is accepted in v19, and actually _used_ in v20 + if isInternal, hint, uuid, t, err := AnalyzeInternalTableName(tableName); isInternal { + gcState, ok := gcStates[hint] + return ok, gcState, uuid, t, err + } + // Try old naming formats. These names will not be generated in v20. + // TODO(shlomi): the code below should be remvoed in v21 + submatch := oldGCTableNameRegexp.FindStringSubmatch(tableName) if len(submatch) == 0 { return false, state, uuid, t, nil } + gcState, ok := gcStates[submatch[1]] + if !ok { + return false, state, uuid, t, nil + } t, err = time.Parse(readableTimeFormat, submatch[3]) - return true, TableGCState(submatch[1]), submatch[2], t, err + if err != nil { + return false, state, uuid, t, err + } + return true, gcState, submatch[2], t, nil } // IsGCTableName answers 'true' when the given table name stands for a GC table func IsGCTableName(tableName string) bool { - return gcTableNameRegexp.MatchString(tableName) + isGC, _, _, _, _ := AnalyzeGCTableName(tableName) + return isGC } // GenerateRenameStatementWithUUID generates a "RENAME TABLE" statement, where a table is renamed to a GC table, with preset UUID @@ -112,11 +152,25 @@ func GenerateRenameStatementWithUUID(fromTableName string, state TableGCState, u return fmt.Sprintf("RENAME TABLE `%s` TO %s", fromTableName, toTableName), toTableName, nil } +// GenerateRenameStatementWithUUIDNewFormat generates a "RENAME TABLE" statement, where a table is renamed to a GC table, with preset UUID +func generateRenameStatementWithUUIDOldFormat(fromTableName string, state TableGCState, uuid string, t time.Time) (statement string, toTableName string, err error) { + toTableName, err = generateGCTableNameOldFormat(state, uuid, t) + if err != nil { + return "", "", err + } + return fmt.Sprintf("RENAME TABLE `%s` TO %s", fromTableName, toTableName), toTableName, nil +} + // GenerateRenameStatement generates a "RENAME TABLE" statement, where a table is renamed to a GC table. func GenerateRenameStatement(fromTableName string, state TableGCState, t time.Time) (statement string, toTableName string, err error) { return GenerateRenameStatementWithUUID(fromTableName, state, "", t) } +// GenerateRenameStatement generates a "RENAME TABLE" statement, where a table is renamed to a GC table. +func GenerateRenameStatementOldFormat(fromTableName string, state TableGCState, t time.Time) (statement string, toTableName string, err error) { + return generateRenameStatementWithUUIDOldFormat(fromTableName, state, "", t) +} + // ParseGCLifecycle parses a comma separated list of gc states and returns a map of indicated states func ParseGCLifecycle(gcLifecycle string) (states map[TableGCState]bool, err error) { states = make(map[TableGCState]bool) diff --git a/go/vt/schema/tablegc_test.go b/go/vt/schema/tablegc_test.go index 90b31ff90fa..3f4e4e7bc09 100644 --- a/go/vt/schema/tablegc_test.go +++ b/go/vt/schema/tablegc_test.go @@ -17,6 +17,7 @@ limitations under the License. package schema import ( + "regexp" "testing" "time" @@ -24,28 +25,105 @@ import ( "github.com/stretchr/testify/require" ) +func TestGCStates(t *testing.T) { + // These are all hard coded + require.Equal(t, HoldTableGCState, gcStates["hld"]) + require.Equal(t, HoldTableGCState, gcStates["HOLD"]) + require.Equal(t, PurgeTableGCState, gcStates["prg"]) + require.Equal(t, PurgeTableGCState, gcStates["PURGE"]) + require.Equal(t, EvacTableGCState, gcStates["evc"]) + require.Equal(t, EvacTableGCState, gcStates["EVAC"]) + require.Equal(t, DropTableGCState, gcStates["drp"]) + require.Equal(t, DropTableGCState, gcStates["DROP"]) + _, ok := gcStates["purge"] + require.False(t, ok) + _, ok = gcStates["vrp"] + require.False(t, ok) + require.Equal(t, 2*4, len(gcStates)) // 4 states, 2 forms each +} + func TestIsGCTableName(t *testing.T) { tm := time.Now() states := []TableGCState{HoldTableGCState, PurgeTableGCState, EvacTableGCState, DropTableGCState} for _, state := range states { for i := 0; i < 10; i++ { - tableName, err := generateGCTableName(state, "", tm) + tableName, err := generateGCTableNameOldFormat(state, "", tm) + assert.NoError(t, err) + assert.Truef(t, IsGCTableName(tableName), "table name: %s", tableName) + + tableName, err = generateGCTableName(state, "6ace8bcef73211ea87e9f875a4d24e90", tm) + assert.NoError(t, err) + assert.Truef(t, IsGCTableName(tableName), "table name: %s", tableName) + + tableName, err = GenerateGCTableName(state, tm) assert.NoError(t, err) - assert.True(t, IsGCTableName(tableName)) + assert.Truef(t, IsGCTableName(tableName), "table name: %s", tableName) } } - names := []string{ - "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_202009151204100", - "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410 ", - "__vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", - "_vt_DROP_6ace8bcef73211ea87e9f875a4d2_20200915120410", - "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915", - "_vt_OTHER_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", - "_vt_OTHER_6ace8bcef73211ea87e9f875a4d24e90_zz20200915120410", - } - for _, tableName := range names { - assert.False(t, IsGCTableName(tableName)) - } + t.Run("accept", func(t *testing.T) { + names := []string{ + "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "_vt_drp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + } + for _, tableName := range names { + t.Run(tableName, func(t *testing.T) { + assert.True(t, IsGCTableName(tableName)) + }) + } + }) + t.Run("reject", func(t *testing.T) { + names := []string{ + "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_202009151204100", + "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410 ", + "__vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "_vt_DROP_6ace8bcef73211ea87e9f875a4d2_20200915120410", + "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915", + "_vt_OTHER_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "_vt_OTHER_6ace8bcef73211ea87e9f875a4d24e90_zz20200915120410", + "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915999999", + "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915999999_", + } + for _, tableName := range names { + t.Run(tableName, func(t *testing.T) { + assert.False(t, IsGCTableName(tableName)) + }) + } + }) + + t.Run("explicit regexp", func(t *testing.T) { + // NewGCTableNameExpression regexp is used externally by vreplication. Its a redundant form of + // InternalTableNameExpression, but is nonetheless required. We verify it works correctly + re := regexp.MustCompile(GCTableNameExpression) + t.Run("accept", func(t *testing.T) { + names := []string{ + "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_evc_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_drp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + } + for _, tableName := range names { + t.Run(tableName, func(t *testing.T) { + assert.True(t, IsGCTableName(tableName)) + assert.True(t, re.MatchString(tableName)) + }) + } + }) + t.Run("reject", func(t *testing.T) { + names := []string{ + "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "_vt_vrp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_gho_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + } + for _, tableName := range names { + t.Run(tableName, func(t *testing.T) { + assert.False(t, re.MatchString(tableName)) + }) + } + }) + }) + } func TestAnalyzeGCTableName(t *testing.T) { @@ -55,35 +133,68 @@ func TestAnalyzeGCTableName(t *testing.T) { tableName string state TableGCState t time.Time + isGC bool }{ { tableName: "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", state: DropTableGCState, t: baseTime, + isGC: true, }, { tableName: "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", state: HoldTableGCState, t: baseTime, + isGC: true, }, { tableName: "_vt_EVAC_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", state: EvacTableGCState, t: baseTime, + isGC: true, }, { tableName: "_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", state: PurgeTableGCState, t: baseTime, + isGC: true, + }, + { + tableName: "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20200915999999", // time error + isGC: false, + }, + { + tableName: "_vt_drp_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + state: DropTableGCState, + t: baseTime, + isGC: true, + }, + { + tableName: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + state: HoldTableGCState, + t: baseTime, + isGC: true, + }, + { + tableName: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_20200915999999_", // time error + isGC: false, + }, + { + tableName: "_vt_xyz_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + isGC: false, }, } for _, ts := range tt { - isGC, state, uuid, tm, err := AnalyzeGCTableName(ts.tableName) - assert.NoError(t, err) - assert.True(t, isGC) - assert.True(t, IsGCUUID(uuid)) - assert.Equal(t, ts.state, state) - assert.Equal(t, ts.t, tm) + t.Run(ts.tableName, func(t *testing.T) { + isGC, state, uuid, tm, err := AnalyzeGCTableName(ts.tableName) + assert.Equal(t, ts.isGC, isGC) + if ts.isGC { + assert.NoError(t, err) + assert.True(t, isCondensedUUID(uuid)) + assert.Equal(t, ts.state, state) + assert.Equal(t, ts.t, tm) + } + }) } } diff --git a/go/vt/schemadiff/analysis.go b/go/vt/schemadiff/analysis.go new file mode 100644 index 00000000000..ae0f22559f2 --- /dev/null +++ b/go/vt/schemadiff/analysis.go @@ -0,0 +1,68 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemadiff + +import ( + "vitess.io/vitess/go/vt/sqlparser" +) + +// AlterTableRotatesRangePartition answers `true` when the given ALTER TABLE statement performs any sort +// of range partition rotation, that is applicable immediately and without moving data. +// Such would be: +// - Dropping any partition(s) +// - Adding a new partition (empty, at the end of the list) +func AlterTableRotatesRangePartition(createTable *sqlparser.CreateTable, alterTable *sqlparser.AlterTable) (bool, error) { + // Validate original table is partitioned by RANGE + if createTable.TableSpec.PartitionOption == nil { + return false, nil + } + if createTable.TableSpec.PartitionOption.Type != sqlparser.RangeType { + return false, nil + } + + spec := alterTable.PartitionSpec + if spec == nil { + return false, nil + } + errorResult := func(conflictingNode sqlparser.SQLNode) error { + return &PartitionSpecNonExclusiveError{ + Table: alterTable.Table.Name.String(), + PartitionSpec: spec, + ConflictingStatement: sqlparser.CanonicalString(conflictingNode), + } + } + if len(alterTable.AlterOptions) > 0 { + // This should never happen, unless someone programmatically tampered with the AlterTable AST. + return false, errorResult(alterTable.AlterOptions[0]) + } + if alterTable.PartitionOption != nil { + // This should never happen, unless someone programmatically tampered with the AlterTable AST. + return false, errorResult(alterTable.PartitionOption) + } + switch spec.Action { + case sqlparser.AddAction: + if len(spec.Definitions) > 1 { + // This should never happen, unless someone programmatically tampered with the AlterTable AST. + return false, errorResult(spec.Definitions[1]) + } + return true, nil + case sqlparser.DropAction: + return true, nil + default: + return false, nil + } +} diff --git a/go/vt/schemadiff/analysis_test.go b/go/vt/schemadiff/analysis_test.go new file mode 100644 index 00000000000..b0092fb7aac --- /dev/null +++ b/go/vt/schemadiff/analysis_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemadiff + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +// AnalyzePartitionRotation analyzes a given AlterTable statement to see whether it has partition rotation +// commands, and if so, is the ALTER TABLE statement valid in MySQL. In MySQL, a single ALTER TABLE statement +// cannot apply multiple rotation commands, nor can it mix rotation commands with other types of changes. +func TestAlterTableRotatesRangePartition(t *testing.T) { + tcases := []struct { + create string + alter string + expect bool + }{ + { + alter: "ALTER TABLE t ADD PARTITION (PARTITION p1 VALUES LESS THAN (10))", + expect: true, + }, + { + alter: "ALTER TABLE t DROP PARTITION p1", + expect: true, + }, + { + alter: "ALTER TABLE t DROP PARTITION p1, p2", + expect: true, + }, + { + alter: "ALTER TABLE t TRUNCATE PARTITION p3", + }, + { + alter: "ALTER TABLE t COALESCE PARTITION 3", + }, + { + alter: "ALTER TABLE t partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", + }, + { + alter: "ALTER TABLE t ADD COLUMN c1 INT, DROP COLUMN c2", + }, + } + + for _, tcase := range tcases { + t.Run(tcase.alter, func(t *testing.T) { + if tcase.create == "" { + tcase.create = "CREATE TABLE t (id int PRIMARY KEY) PARTITION BY RANGE (id) (PARTITION p0 VALUES LESS THAN (10))" + } + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(tcase.create) + require.NoError(t, err) + createTable, ok := stmt.(*sqlparser.CreateTable) + require.True(t, ok) + + stmt, err = sqlparser.NewTestParser().ParseStrictDDL(tcase.alter) + require.NoError(t, err) + alterTable, ok := stmt.(*sqlparser.AlterTable) + require.True(t, ok) + + result, err := AlterTableRotatesRangePartition(createTable, alterTable) + require.NoError(t, err) + assert.Equal(t, tcase.expect, result) + }) + } +} diff --git a/go/vt/schemadiff/annotations.go b/go/vt/schemadiff/annotations.go new file mode 100644 index 00000000000..f17344c1085 --- /dev/null +++ b/go/vt/schemadiff/annotations.go @@ -0,0 +1,246 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemadiff + +import ( + "strings" +) + +// TextualAnnotationType is an enum for the type of annotation that can be applied to a line of text. +type TextualAnnotationType int + +const ( + UnchangedTextualAnnotationType TextualAnnotationType = iota + AddedTextualAnnotationType + RemovedTextualAnnotationType +) + +// AnnotatedText is a some text and its annotation type. The text is usually single-line, but it +// can be multi-line, as in the case of partition specs. +type AnnotatedText struct { + text string + typ TextualAnnotationType +} + +// TextualAnnotations is a sequence of annotated texts. It is the annotated representation of a statement. +type TextualAnnotations struct { + texts []*AnnotatedText + hasAnyChanges bool +} + +func NewTextualAnnotations() *TextualAnnotations { + return &TextualAnnotations{} +} + +func (a *TextualAnnotations) Len() int { + return len(a.texts) +} + +func (a *TextualAnnotations) mark(text string, typ TextualAnnotationType) { + a.texts = append(a.texts, &AnnotatedText{text: text, typ: typ}) + if typ != UnchangedTextualAnnotationType { + a.hasAnyChanges = true + } +} + +func (a *TextualAnnotations) MarkAdded(text string) { + a.mark(text, AddedTextualAnnotationType) +} + +func (a *TextualAnnotations) MarkRemoved(text string) { + a.mark(text, RemovedTextualAnnotationType) +} + +func (a *TextualAnnotations) MarkUnchanged(text string) { + a.mark(text, UnchangedTextualAnnotationType) +} + +// ByType returns the subset of annotations by given type. +func (a *TextualAnnotations) ByType(typ TextualAnnotationType) (r []*AnnotatedText) { + for _, text := range a.texts { + if text.typ == typ { + r = append(r, text) + } + } + return r +} + +func (a *TextualAnnotations) Added() (r []*AnnotatedText) { + return a.ByType(AddedTextualAnnotationType) +} + +func (a *TextualAnnotations) Removed() (r []*AnnotatedText) { + return a.ByType(RemovedTextualAnnotationType) +} + +// Export beautifies the annotated text and returns it as a string. +func (a *TextualAnnotations) Export() string { + textLines := make([]string, 0, len(a.texts)) + for _, annotatedText := range a.texts { + switch annotatedText.typ { + case AddedTextualAnnotationType: + annotatedText.text = "+" + annotatedText.text + case RemovedTextualAnnotationType: + annotatedText.text = "-" + annotatedText.text + default: + // text unchanged + if a.hasAnyChanges { + // If there is absolutely no change, we don't add a space anywhere + annotatedText.text = " " + annotatedText.text + } + } + textLines = append(textLines, annotatedText.text) + } + return strings.Join(textLines, "\n") +} + +// annotatedStatement returns a new TextualAnnotations object that annotates the given statement with the given annotations. +// The given annotations were created by the diffing algorithm, and represent the CanonicalString of some node. +// However, the given statement is just some text, and we need to find the annotations (some of which may be multi-line) +// inside our text, and return a per-line annotation. +func annotatedStatement(stmt string, annotationType TextualAnnotationType, annotations *TextualAnnotations) *TextualAnnotations { + stmtLines := strings.Split(stmt, "\n") + result := NewTextualAnnotations() + annotationLines := map[string]bool{} // single-line breakdown of all annotations + for _, annotation := range annotations.ByType(annotationType) { + // An annotated text could be multiline. Partition specs are such. + lines := strings.Split(annotation.text, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + annotationLines[line] = true + } + } + } + annotationLinesMutations := map[string](map[string]bool){} + // Mutations are expected ways to find an annotation inside a `CREATE TABLE` statement. + for annotationLine := range annotationLines { + possibleMutations := map[string]bool{ + annotationLine: true, + ") " + annotationLine: true, // e.g. ") ENGINE=InnoDB" + ") " + annotationLine + ",": true, // e.g. ") ENGINE=InnoDB,[\n ROW_FORMAT=COMPRESSED]" + "(" + annotationLine + ")": true, // e.g. "(PARTITION p0 VALUES LESS THAN (10)) + "(" + annotationLine + ",": true, // e.g. "(PARTITION p0 VALUES LESS THAN (10), + annotationLine + ",": true, // e.g. "i int unsigned," + annotationLine + ")": true, // e.g. "PARTITION p9 VALUES LESS THAN (90))" + } + annotationLinesMutations[annotationLine] = possibleMutations + } + for i := range stmtLines { + lineAnnotated := false + trimmedLine := strings.TrimSpace(stmtLines[i]) + if trimmedLine == "" { + continue + } + for annotationLine := range annotationLines { + if lineAnnotated { + break + } + possibleMutations := annotationLinesMutations[annotationLine] + if possibleMutations[trimmedLine] { + // Annotate this line! + result.mark(stmtLines[i], annotationType) + lineAnnotated = true + // No need to match this annotation again + delete(annotationLines, annotationLine) + delete(possibleMutations, annotationLine) + } + } + if !lineAnnotated { + result.MarkUnchanged(stmtLines[i]) + } + } + return result +} + +// annotateAll blindly annotates all lines of the given statement with the given annotation type. +func annotateAll(stmt string, annotationType TextualAnnotationType) *TextualAnnotations { + stmtLines := strings.Split(stmt, "\n") + result := NewTextualAnnotations() + for _, line := range stmtLines { + result.mark(line, annotationType) + } + return result +} + +// unifiedAnnotated takes two annotations of from, to statements and returns a unified annotation. +func unifiedAnnotated(from *TextualAnnotations, to *TextualAnnotations) *TextualAnnotations { + unified := NewTextualAnnotations() + fromIndex := 0 + toIndex := 0 + for fromIndex < from.Len() || toIndex < to.Len() { + matchingLine := "" + if fromIndex < from.Len() { + fromLine := from.texts[fromIndex] + if fromLine.typ == RemovedTextualAnnotationType { + unified.MarkRemoved(fromLine.text) + fromIndex++ + continue + } + matchingLine = fromLine.text + } + if toIndex < to.Len() { + toLine := to.texts[toIndex] + if toLine.typ == AddedTextualAnnotationType { + unified.MarkAdded(toLine.text) + toIndex++ + continue + } + if matchingLine == "" { + matchingLine = toLine.text + } + } + unified.MarkUnchanged(matchingLine) + fromIndex++ + toIndex++ + } + return unified +} + +// annotatedDiff returns the annotated representations of the from and to entities, and their unified representation. +func annotatedDiff(diff EntityDiff, entityAnnotations *TextualAnnotations) (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) { + fromEntity, toEntity := diff.Entities() + // Handle the infamous golang interface is not-nil but underlying object is: + if fromEntity != nil && fromEntity.Create() == nil { + fromEntity = nil + } + if toEntity != nil && toEntity.Create() == nil { + toEntity = nil + } + switch { + case fromEntity == nil && toEntity == nil: + // Will only get here if using mockup entities, as generated by EntityDiffByStatement. + return nil, nil, nil + case fromEntity == nil: + // A new entity was created. + from = NewTextualAnnotations() + to = annotateAll(toEntity.Create().CanonicalStatementString(), AddedTextualAnnotationType) + case toEntity == nil: + // An entity was dropped. + from = annotateAll(fromEntity.Create().CanonicalStatementString(), RemovedTextualAnnotationType) + to = NewTextualAnnotations() + case entityAnnotations == nil: + // Entity was modified, and we have no prior info about entity annotations. Treat this is as a complete rewrite. + from = annotateAll(fromEntity.Create().CanonicalStatementString(), RemovedTextualAnnotationType) + to = annotateAll(toEntity.Create().CanonicalStatementString(), AddedTextualAnnotationType) + default: + // Entity was modified, and we have prior info about entity annotations. + from = annotatedStatement(fromEntity.Create().CanonicalStatementString(), RemovedTextualAnnotationType, entityAnnotations) + to = annotatedStatement(toEntity.Create().CanonicalStatementString(), AddedTextualAnnotationType, entityAnnotations) + } + return from, to, unifiedAnnotated(from, to) +} diff --git a/go/vt/schemadiff/annotations_test.go b/go/vt/schemadiff/annotations_test.go new file mode 100644 index 00000000000..8cba3c1ee90 --- /dev/null +++ b/go/vt/schemadiff/annotations_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemadiff + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +func TestAnnotateAll(t *testing.T) { + stmt := `create table t( + id int, + name varchar(100), + primary key(id) +) engine=innodb` + annotations := annotateAll(stmt, RemovedTextualAnnotationType) + assert.Equal(t, 5, annotations.Len()) + expect := `-create table t( +- id int, +- name varchar(100), +- primary key(id) +-) engine=innodb` + assert.Equal(t, expect, annotations.Export()) +} + +func TestUnifiedAnnotated(t *testing.T) { + tcases := []struct { + name string + from string + to string + fromAnnotations *TextualAnnotations + toAnnotations *TextualAnnotations + expected string + }{ + { + "no change", + "CREATE TABLE t1 (a int)", + "CREATE TABLE t1 (a int)", + &TextualAnnotations{}, + &TextualAnnotations{}, + "CREATE TABLE `t1` (\n\t`a` int\n)", + }, + { + "simple", + "CREATE TABLE t1 (a int)", + "CREATE TABLE t1 (a int, b int)", + &TextualAnnotations{}, + &TextualAnnotations{texts: []*AnnotatedText{{text: "`b` int", typ: AddedTextualAnnotationType}}}, + " CREATE TABLE `t1` (\n \t`a` int\n+\t`b` int\n )", + }, + } + parser := sqlparser.NewTestParser() + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + fromStmt, err := parser.ParseStrictDDL(tcase.from) + require.NoError(t, err) + annotatedFrom := annotatedStatement(sqlparser.CanonicalString(fromStmt), RemovedTextualAnnotationType, tcase.fromAnnotations) + toStmt, err := parser.ParseStrictDDL(tcase.to) + require.NoError(t, err) + annotatedTo := annotatedStatement(sqlparser.CanonicalString(toStmt), AddedTextualAnnotationType, tcase.toAnnotations) + unified := unifiedAnnotated(annotatedFrom, annotatedTo) + export := unified.Export() + assert.Equalf(t, tcase.expected, export, "from: %v, to: %v", annotatedFrom.Export(), annotatedTo.Export()) + }) + } +} + +func TestUnifiedAnnotatedAll(t *testing.T) { + stmt := `create table t( + id int, + name varchar(100), + primary key(id) +) engine=innodb` + annotatedTo := annotateAll(stmt, AddedTextualAnnotationType) + annotatedFrom := NewTextualAnnotations() + unified := unifiedAnnotated(annotatedFrom, annotatedTo) + expect := `+create table t( ++ id int, ++ name varchar(100), ++ primary key(id) ++) engine=innodb` + assert.Equal(t, expect, unified.Export()) +} diff --git a/go/vt/schemadiff/capability.go b/go/vt/schemadiff/capability.go new file mode 100644 index 00000000000..ad93e40838f --- /dev/null +++ b/go/vt/schemadiff/capability.go @@ -0,0 +1,245 @@ +package schemadiff + +import ( + "strings" + + "vitess.io/vitess/go/mysql/capabilities" + "vitess.io/vitess/go/vt/sqlparser" +) + +const ( + maxColumnsForInstantAddColumn = 1022 +) + +// alterOptionAvailableViaInstantDDL checks if the specific alter option is eligible to run via ALGORITHM=INSTANT +// reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-online-ddl-operations.html +func alterOptionCapableOfInstantDDL(alterOption sqlparser.AlterOption, createTable *sqlparser.CreateTable, capableOf capabilities.CapableOf) (bool, error) { + // A table with FULLTEXT index won't support adding/removing columns instantly. + tableHasFulltextIndex := false + for _, key := range createTable.TableSpec.Indexes { + if key.Info.Type == sqlparser.IndexTypeFullText { + tableHasFulltextIndex = true + break + } + } + findColumn := func(colName string) *sqlparser.ColumnDefinition { + if createTable == nil { + return nil + } + for _, col := range createTable.TableSpec.Columns { + if strings.EqualFold(colName, col.Name.String()) { + return col + } + } + return nil + } + findIndexCoveringColumn := func(colName string) *sqlparser.IndexDefinition { + for _, index := range createTable.TableSpec.Indexes { + for _, col := range index.Columns { + if col.Column.String() == colName { + return index + } + } + } + return nil + } + findTableOption := func(optName string) *sqlparser.TableOption { + if createTable == nil { + return nil + } + for _, opt := range createTable.TableSpec.Options { + if strings.EqualFold(optName, opt.Name) { + return opt + } + } + return nil + } + tableIsCompressed := false + if opt := findTableOption("ROW_FORMAT"); opt != nil { + if strings.EqualFold(opt.String, "COMPRESSED") { + tableIsCompressed = true + } + } + + isGeneratedColumn := func(col *sqlparser.ColumnDefinition) (bool, sqlparser.ColumnStorage) { + if col == nil { + return false, 0 + } + if col.Type.Options == nil { + return false, 0 + } + if col.Type.Options.As == nil { + return false, 0 + } + return true, col.Type.Options.Storage + } + colStringStrippedDown := func(col *sqlparser.ColumnDefinition, stripDefault bool, stripEnum bool) string { + strippedCol := sqlparser.CloneRefOfColumnDefinition(col) + if stripDefault { + strippedCol.Type.Options.Default = nil + strippedCol.Type.Options.DefaultLiteral = false + } + if stripEnum { + strippedCol.Type.EnumValues = nil + } + return sqlparser.CanonicalString(strippedCol) + } + hasPrefix := func(vals []string, prefix []string) bool { + if len(vals) < len(prefix) { + return false + } + for i := range prefix { + if vals[i] != prefix[i] { + return false + } + } + return true + } + // Up to 8.0.26 we could only ADD COLUMN as last column + switch opt := alterOption.(type) { + case *sqlparser.ChangeColumn: + // We do not support INSTANT for renaming a column (ALTER TABLE ...CHANGE) because: + // 1. We discourage column rename + // 2. We do not produce CHANGE statements in declarative diff + // 3. The success of the operation depends on whether the column is referenced by a foreign key + // in another table. Which is a bit too much to compute here. + return false, nil + case *sqlparser.AddColumns: + if tableHasFulltextIndex { + // not supported if the table has a FULLTEXT index + return false, nil + } + // Not supported in COMPRESSED tables + if tableIsCompressed { + return false, nil + } + for _, column := range opt.Columns { + if isGenerated, storage := isGeneratedColumn(column); isGenerated { + if storage == sqlparser.StoredStorage { + // Adding a generated "STORED" column is unsupported + return false, nil + } + } + } + if opt.First || opt.After != nil { + // not a "last" column. Only supported as of 8.0.29 + return capableOf(capabilities.InstantAddDropColumnFlavorCapability) + } + // Adding a *last* column is supported in 8.0 + return capableOf(capabilities.InstantAddLastColumnFlavorCapability) + case *sqlparser.DropColumn: + col := findColumn(opt.Name.Name.String()) + if col == nil { + // column not found + return false, nil + } + if tableHasFulltextIndex { + // not supported if the table has a FULLTEXT index + return false, nil + } + // Not supported in COMPRESSED tables + if tableIsCompressed { + return false, nil + } + if findIndexCoveringColumn(opt.Name.Name.String()) != nil { + // not supported if the column is part of an index + return false, nil + } + if isGenerated, _ := isGeneratedColumn(col); isGenerated { + // supported by all 8.0 versions + // Note: according to the docs dropping a STORED generated column is not INSTANT-able, + // but in practice this is supported. This is why we don't test for STORED here, like + // we did for `AddColumns`. + return capableOf(capabilities.InstantAddDropVirtualColumnFlavorCapability) + } + return capableOf(capabilities.InstantAddDropColumnFlavorCapability) + case *sqlparser.ModifyColumn: + if col := findColumn(opt.NewColDefinition.Name.String()); col != nil { + // Check if only diff is change of default. + // We temporarily remove the DEFAULT expression (if any) from both + // table and ALTER statement, and compare the columns: if they're otherwise equal, + // then the only change can be an addition/change/removal of DEFAULT, which + // is instant-table. + tableColDefinition := colStringStrippedDown(col, true, false) + newColDefinition := colStringStrippedDown(opt.NewColDefinition, true, false) + if tableColDefinition == newColDefinition { + return capableOf(capabilities.InstantChangeColumnDefaultFlavorCapability) + } + // Check if: + // 1. this an ENUM/SET + // 2. and the change is to append values to the end of the list + // 3. and the number of added values does not increase the storage size for the enum/set + // 4. while still not caring about a change in the default value + if len(col.Type.EnumValues) > 0 && len(opt.NewColDefinition.Type.EnumValues) > 0 { + // both are enum or set + if !hasPrefix(opt.NewColDefinition.Type.EnumValues, col.Type.EnumValues) { + return false, nil + } + // we know the new column definition is identical to, or extends, the old definition. + // Now validate storage: + if strings.EqualFold(col.Type.Type, "enum") { + if len(col.Type.EnumValues) <= 255 && len(opt.NewColDefinition.Type.EnumValues) > 255 { + // this increases the SET storage size (1 byte for up to 8 values, 2 bytes beyond) + return false, nil + } + } + if strings.EqualFold(col.Type.Type, "set") { + if (len(col.Type.EnumValues)+7)/8 != (len(opt.NewColDefinition.Type.EnumValues)+7)/8 { + // this increases the SET storage size (1 byte for up to 8 values, 2 bytes for 8-15, etc.) + return false, nil + } + } + // Now don't care about change of default: + tableColDefinition := colStringStrippedDown(col, true, true) + newColDefinition := colStringStrippedDown(opt.NewColDefinition, true, true) + if tableColDefinition == newColDefinition { + return capableOf(capabilities.InstantExpandEnumCapability) + } + } + } + return false, nil + default: + return false, nil + } +} + +// AlterTableCapableOfInstantDDL checks if the specific ALTER TABLE is eligible to run via ALGORITHM=INSTANT, given the existing table schema and +// the MySQL server capabilities. +// The function is intentionally public, as it is intended to be used by other packages, such as onlineddl. +func AlterTableCapableOfInstantDDL(alterTable *sqlparser.AlterTable, createTable *sqlparser.CreateTable, capableOf capabilities.CapableOf) (bool, error) { + if capableOf == nil { + return false, nil + } + capable, err := capableOf(capabilities.InstantDDLFlavorCapability) + if err != nil { + return false, err + } + if !capable { + return false, nil + } + if alterTable.PartitionOption != nil || alterTable.PartitionSpec != nil { + // no INSTANT for partitions + return false, nil + } + // For the ALTER statement to qualify for ALGORITHM=INSTANT, all alter options must each qualify. + numAddedColumns := 0 + for _, alterOption := range alterTable.AlterOptions { + instantOK, err := alterOptionCapableOfInstantDDL(alterOption, createTable, capableOf) + if err != nil { + return false, err + } + if !instantOK { + return false, nil + } + switch opt := alterOption.(type) { + case *sqlparser.AddColumns: + numAddedColumns += len(opt.Columns) + } + } + if len(createTable.TableSpec.Columns)+numAddedColumns > maxColumnsForInstantAddColumn { + // Per MySQL docs: + // > The maximum number of columns in the internal representation of the table cannot exceed 1022 after column addition with the INSTANT algorithm + return false, nil + } + return true, nil +} diff --git a/go/vt/schemadiff/capability_test.go b/go/vt/schemadiff/capability_test.go new file mode 100644 index 00000000000..39134ea7a8a --- /dev/null +++ b/go/vt/schemadiff/capability_test.go @@ -0,0 +1,260 @@ +package schemadiff + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/capabilities" + "vitess.io/vitess/go/vt/sqlparser" +) + +func TestAlterTableCapableOfInstantDDL(t *testing.T) { + capableOf := func(capability capabilities.FlavorCapability) (bool, error) { + switch capability { + case + capabilities.InstantDDLFlavorCapability, + capabilities.InstantAddLastColumnFlavorCapability, + capabilities.InstantAddDropVirtualColumnFlavorCapability, + capabilities.InstantAddDropColumnFlavorCapability, + capabilities.InstantChangeColumnDefaultFlavorCapability, + capabilities.InstantExpandEnumCapability: + return true, nil + } + return false, nil + } + incapableOf := func(capability capabilities.FlavorCapability) (bool, error) { + return false, nil + } + parser := sqlparser.NewTestParser() + + tcases := []struct { + name string + create string + alter string + expectCapableOfInstantDDL bool + capableOf capabilities.CapableOf + }{ + { + name: "add column", + create: "create table t1 (id int, i1 int)", + alter: "alter table t1 add column i2 int", + expectCapableOfInstantDDL: true, + }, + { + name: "add multiple columns", + create: "create table t1 (id int, i1 int)", + alter: "alter table t1 add column i2 int, add column i3 int, add column i4 int", + expectCapableOfInstantDDL: true, + }, + { + name: "add last column", + create: "create table t1 (id int, i1 int)", + alter: "alter table t1 add column i2 int after i1", + expectCapableOfInstantDDL: true, + }, + { + name: "add mid column", + create: "create table t1 (id int, i1 int)", + alter: "alter table t1 add column i2 int after id", + expectCapableOfInstantDDL: true, + }, + { + name: "add mid column, incapable", + create: "create table t1 (id int, i1 int)", + alter: "alter table t1 add column i2 int after id", + capableOf: incapableOf, + expectCapableOfInstantDDL: false, + }, + { + name: "add column fails on COMPRESSED tables", + create: "create table t1 (id int, i1 int) row_format=compressed", + alter: "alter table t1 add column i2 int", + expectCapableOfInstantDDL: false, + }, + { + name: "add column fails on table with FULLTEXT index", + create: "create table t(id int, name varchar(128), primary key(id), fulltext key (name))", + alter: "alter table t1 add column i2 int", + expectCapableOfInstantDDL: false, + }, + { + name: "add columns max capacity", + create: `create table t(i1 int, i2 int, i3 int, i4 int, i5 int, i6 int, i7 int, i8 int, i9 int, i10 int, i11 int, i12 int, i13 int, i14 int, i15 int, i16 int, i17 int, i18 int, i19 int, i20 int, i21 int, i22 int, i23 int, i24 int, i25 int, i26 int, i27 int, i28 int, i29 int, i30 int, i31 int, i32 int, i33 int, i34 int, i35 int, i36 int, i37 int, i38 int, i39 int, i40 int, i41 int, i42 int, i43 int, i44 int, i45 int, i46 int, i47 int, i48 int, i49 int, i50 int, i51 int, i52 int, i53 int, i54 int, i55 int, i56 int, i57 int, i58 int, i59 int, i60 int, i61 int, i62 int, i63 int, i64 int, i65 int, i66 int, i67 int, i68 int, i69 int, i70 int, i71 int, i72 int, i73 int, i74 int, i75 int, i76 int, i77 int, i78 int, i79 int, i80 int, i81 int, i82 int, i83 int, i84 int, i85 int, i86 int, i87 int, i88 int, i89 int, i90 int, i91 int, i92 int, i93 int, i94 int, i95 int, i96 int, i97 int, i98 int, i99 int, i100 int, i101 int, i102 int, i103 int, i104 int, i105 int, i106 int, i107 int, i108 int, i109 int, i110 int, i111 int, i112 int, i113 int, i114 int, i115 int, i116 int, i117 int, i118 int, i119 int, i120 int, i121 int, i122 int, i123 int, i124 int, i125 int, i126 int, i127 int, i128 int, i129 int, i130 int, i131 int, i132 int, i133 int, i134 int, i135 int, i136 int, i137 int, i138 int, i139 int, i140 int, i141 int, i142 int, i143 int, i144 int, i145 int, i146 int, i147 int, i148 int, i149 int, i150 int, i151 int, i152 int, i153 int, i154 int, i155 int, i156 int, i157 int, i158 int, i159 int, i160 int, i161 int, i162 int, i163 int, i164 int, i165 int, i166 int, i167 int, i168 int, i169 int, i170 int, i171 int, i172 int, i173 int, i174 int, i175 int, i176 int, i177 int, i178 int, i179 int, i180 int, i181 int, i182 int, i183 int, i184 int, i185 int, i186 int, i187 int, i188 int, i189 int, i190 int, i191 int, i192 int, i193 int, i194 int, i195 int, i196 int, i197 int, i198 int, i199 int, i200 int, + i201 int, i202 int, i203 int, i204 int, i205 int, i206 int, i207 int, i208 int, i209 int, i210 int, i211 int, i212 int, i213 int, i214 int, i215 int, i216 int, i217 int, i218 int, i219 int, i220 int, i221 int, i222 int, i223 int, i224 int, i225 int, i226 int, i227 int, i228 int, i229 int, i230 int, i231 int, i232 int, i233 int, i234 int, i235 int, i236 int, i237 int, i238 int, i239 int, i240 int, i241 int, i242 int, i243 int, i244 int, i245 int, i246 int, i247 int, i248 int, i249 int, i250 int, i251 int, i252 int, i253 int, i254 int, i255 int, i256 int, i257 int, i258 int, i259 int, i260 int, i261 int, i262 int, i263 int, i264 int, i265 int, i266 int, i267 int, i268 int, i269 int, i270 int, i271 int, i272 int, i273 int, i274 int, i275 int, i276 int, i277 int, i278 int, i279 int, i280 int, i281 int, i282 int, i283 int, i284 int, i285 int, i286 int, i287 int, i288 int, i289 int, i290 int, i291 int, i292 int, i293 int, i294 int, i295 int, i296 int, i297 int, i298 int, i299 int, i300 int, i301 int, i302 int, i303 int, i304 int, i305 int, i306 int, i307 int, i308 int, i309 int, i310 int, i311 int, i312 int, i313 int, i314 int, i315 int, i316 int, i317 int, i318 int, i319 int, i320 int, i321 int, i322 int, i323 int, i324 int, i325 int, i326 int, i327 int, i328 int, i329 int, i330 int, i331 int, i332 int, i333 int, i334 int, i335 int, i336 int, i337 int, i338 int, i339 int, i340 int, i341 int, i342 int, i343 int, i344 int, i345 int, i346 int, i347 int, i348 int, i349 int, i350 int, i351 int, i352 int, i353 int, i354 int, i355 int, i356 int, i357 int, i358 int, i359 int, i360 int, i361 int, i362 int, i363 int, i364 int, i365 int, i366 int, i367 int, i368 int, i369 int, i370 int, i371 int, i372 int, i373 int, i374 int, i375 int, i376 int, i377 int, i378 int, i379 int, i380 int, i381 int, i382 int, i383 int, i384 int, i385 int, i386 int, i387 int, i388 int, i389 int, i390 int, i391 int, i392 int, i393 int, i394 int, i395 int, i396 int, i397 int, i398 int, i399 int, + i400 int, i401 int, i402 int, i403 int, i404 int, i405 int, i406 int, i407 int, i408 int, i409 int, i410 int, i411 int, i412 int, i413 int, i414 int, i415 int, i416 int, i417 int, i418 int, i419 int, i420 int, i421 int, i422 int, i423 int, i424 int, i425 int, i426 int, i427 int, i428 int, i429 int, i430 int, i431 int, i432 int, i433 int, i434 int, i435 int, i436 int, i437 int, i438 int, i439 int, i440 int, i441 int, i442 int, i443 int, i444 int, i445 int, i446 int, i447 int, i448 int, i449 int, i450 int, i451 int, i452 int, i453 int, i454 int, i455 int, i456 int, i457 int, i458 int, i459 int, i460 int, i461 int, i462 int, i463 int, i464 int, i465 int, i466 int, i467 int, i468 int, i469 int, i470 int, i471 int, i472 int, i473 int, i474 int, i475 int, i476 int, i477 int, i478 int, i479 int, i480 int, i481 int, i482 int, i483 int, i484 int, i485 int, i486 int, i487 int, i488 int, i489 int, i490 int, i491 int, i492 int, i493 int, i494 int, i495 int, i496 int, i497 int, i498 int, i499 int, i500 int, i501 int, i502 int, i503 int, i504 int, i505 int, i506 int, i507 int, i508 int, i509 int, i510 int, i511 int, i512 int, i513 int, i514 int, i515 int, i516 int, i517 int, i518 int, i519 int, i520 int, i521 int, i522 int, i523 int, i524 int, i525 int, i526 int, i527 int, i528 int, i529 int, i530 int, i531 int, i532 int, i533 int, i534 int, i535 int, i536 int, i537 int, i538 int, i539 int, i540 int, i541 int, i542 int, i543 int, i544 int, i545 int, i546 int, i547 int, i548 int, i549 int, i550 int, i551 int, i552 int, i553 int, i554 int, i555 int, i556 int, i557 int, i558 int, i559 int, i560 int, i561 int, i562 int, i563 int, i564 int, i565 int, i566 int, i567 int, i568 int, i569 int, i570 int, i571 int, i572 int, i573 int, i574 int, i575 int, i576 int, i577 int, i578 int, i579 int, i580 int, i581 int, i582 int, i583 int, i584 int, i585 int, i586 int, i587 int, i588 int, i589 int, i590 int, i591 int, i592 int, i593 int, i594 int, i595 int, i596 int, i597 int, i598 int, i599 int, + i600 int, i601 int, i602 int, i603 int, i604 int, i605 int, i606 int, i607 int, i608 int, i609 int, i610 int, i611 int, i612 int, i613 int, i614 int, i615 int, i616 int, i617 int, i618 int, i619 int, i620 int, i621 int, i622 int, i623 int, i624 int, i625 int, i626 int, i627 int, i628 int, i629 int, i630 int, i631 int, i632 int, i633 int, i634 int, i635 int, i636 int, i637 int, i638 int, i639 int, i640 int, i641 int, i642 int, i643 int, i644 int, i645 int, i646 int, i647 int, i648 int, i649 int, i650 int, i651 int, i652 int, i653 int, i654 int, i655 int, i656 int, i657 int, i658 int, i659 int, i660 int, i661 int, i662 int, i663 int, i664 int, i665 int, i666 int, i667 int, i668 int, i669 int, i670 int, i671 int, i672 int, i673 int, i674 int, i675 int, i676 int, i677 int, i678 int, i679 int, i680 int, i681 int, i682 int, i683 int, i684 int, i685 int, i686 int, i687 int, i688 int, i689 int, i690 int, i691 int, i692 int, i693 int, i694 int, i695 int, i696 int, i697 int, i698 int, i699 int, i700 int, i701 int, i702 int, i703 int, i704 int, i705 int, i706 int, i707 int, i708 int, i709 int, i710 int, i711 int, i712 int, i713 int, i714 int, i715 int, i716 int, i717 int, i718 int, i719 int, i720 int, i721 int, i722 int, i723 int, i724 int, i725 int, i726 int, i727 int, i728 int, i729 int, i730 int, i731 int, i732 int, i733 int, i734 int, i735 int, i736 int, i737 int, i738 int, i739 int, i740 int, i741 int, i742 int, i743 int, i744 int, i745 int, i746 int, i747 int, i748 int, i749 int, i750 int, i751 int, i752 int, i753 int, i754 int, i755 int, i756 int, i757 int, i758 int, i759 int, i760 int, i761 int, i762 int, i763 int, i764 int, i765 int, i766 int, i767 int, i768 int, i769 int, i770 int, i771 int, i772 int, i773 int, i774 int, i775 int, i776 int, i777 int, i778 int, i779 int, i780 int, i781 int, i782 int, i783 int, i784 int, i785 int, i786 int, i787 int, i788 int, i789 int, i790 int, i791 int, i792 int, i793 int, i794 int, i795 int, i796 int, i797 int, i798 int, i799 int, + i800 int, i801 int, i802 int, i803 int, i804 int, i805 int, i806 int, i807 int, i808 int, i809 int, i810 int, i811 int, i812 int, i813 int, i814 int, i815 int, i816 int, i817 int, i818 int, i819 int, i820 int, i821 int, i822 int, i823 int, i824 int, i825 int, i826 int, i827 int, i828 int, i829 int, i830 int, i831 int, i832 int, i833 int, i834 int, i835 int, i836 int, i837 int, i838 int, i839 int, i840 int, i841 int, i842 int, i843 int, i844 int, i845 int, i846 int, i847 int, i848 int, i849 int, i850 int, i851 int, i852 int, i853 int, i854 int, i855 int, i856 int, i857 int, i858 int, i859 int, i860 int, i861 int, i862 int, i863 int, i864 int, i865 int, i866 int, i867 int, i868 int, i869 int, i870 int, i871 int, i872 int, i873 int, i874 int, i875 int, i876 int, i877 int, i878 int, i879 int, i880 int, i881 int, i882 int, i883 int, i884 int, i885 int, i886 int, i887 int, i888 int, i889 int, i890 int, i891 int, i892 int, i893 int, i894 int, i895 int, i896 int, i897 int, i898 int, i899 int, i900 int, i901 int, i902 int, i903 int, i904 int, i905 int, i906 int, i907 int, i908 int, i909 int, i910 int, i911 int, i912 int, i913 int, i914 int, i915 int, i916 int, i917 int, i918 int, i919 int, i920 int, i921 int, i922 int, i923 int, i924 int, i925 int, i926 int, i927 int, i928 int, i929 int, i930 int, i931 int, i932 int, i933 int, i934 int, i935 int, i936 int, i937 int, i938 int, i939 int, i940 int, i941 int, i942 int, i943 int, i944 int, i945 int, i946 int, i947 int, i948 int, i949 int, i950 int, i951 int, i952 int, i953 int, i954 int, i955 int, i956 int, i957 int, i958 int, i959 int, i960 int, i961 int, i962 int, i963 int, i964 int, i965 int, i966 int, i967 int, i968 int, i969 int, i970 int, i971 int, i972 int, i973 int, i974 int, i975 int, i976 int, i977 int, i978 int, i979 int, i980 int, i981 int, i982 int, i983 int, i984 int, i985 int, i986 int, i987 int, i988 int, i989 int, i990 int, i991 int, i992 int, i993 int, i994 int, i995 int, i996 int, i997 int, i998 int, i999 int, + i1000 int, i1001 int, i1002 int, i1003 int, i1004 int, i1005 int, i1006 int, i1007 int, i1008 int, i1009 int, i1010 int, i1011 int, i1012 int, i1013 int, i1014 int, i1015 int, i1016 int, i1017 int, i1018 int, i1019 int, i1020 int, i1021 int)`, + alter: "alter table t1 add column j1 int, add column j2 int", + expectCapableOfInstantDDL: false, + }, + { + name: "add virtual column", + create: "create table t(id int, i1 int not null, primary key(id))", + alter: "alter table t add column i2 int generated always as (i1 + 1) virtual", + expectCapableOfInstantDDL: true, + }, + { + name: "add stored column", + create: "create table t(id int, i1 int not null, primary key(id))", + alter: "alter table t add column i2 int generated always as (i1 + 1) stored", + expectCapableOfInstantDDL: false, + }, + { + name: "drop virtual column", + create: "create table t(id int, i1 int not null, i2 int generated always as (i1 + 1) virtual, primary key(id))", + alter: "alter table t drop column i2", + expectCapableOfInstantDDL: true, + }, + { + name: "drop stored virtual column", + create: "create table t(id int, i1 int not null, i2 int generated always as (i1 + 1) stored, primary key(id))", + alter: "alter table t drop column i2", + expectCapableOfInstantDDL: true, + }, + { + name: "drop mid column", + create: "create table t(id int, i1 int not null, i2 int not null, primary key(id))", + alter: "alter table t drop column i1", + expectCapableOfInstantDDL: true, + }, + { + name: "fail due to row_format=compressed", + create: "create table t(id int, i1 int not null, i2 int not null, primary key(id)) row_format=compressed", + alter: "alter table t drop column i1", + expectCapableOfInstantDDL: false, + }, + { + name: "drop column fail due to index", + create: "create table t(id int, i1 int not null, i2 int not null, primary key(id), key i1_idx (i1))", + alter: "alter table t drop column i1", + expectCapableOfInstantDDL: false, + }, + { + name: "drop column fail due to multicolumn index", + create: "create table t(id int, i1 int not null, i2 int not null, primary key(id), key i21_idx (i2, i1))", + alter: "alter table t drop column i1", + expectCapableOfInstantDDL: false, + }, + { + name: "drop column fail due to fulltext index in table", + create: "create table t(id int, i1 int not null, name varchar(128), primary key(id), fulltext key (name))", + alter: "alter table t drop column i1", + expectCapableOfInstantDDL: false, + }, + { + name: "add two columns", + create: "create table t(id int, i1 int not null, primary key(id))", + alter: "alter table t add column i2 int not null after id, add column i3 int not null", + expectCapableOfInstantDDL: true, + }, + { + name: "multiple add/drop columns", + create: "create table t(id int, i1 int not null, primary key(id))", + alter: "alter table t add column i2 int not null after id, add column i3 int not null, drop column i1", + expectCapableOfInstantDDL: true, + }, + // change/remove column default + { + name: "set a default column value", + create: "create table t(id int, i1 int not null, primary key(id))", + alter: "alter table t modify column i1 int not null default 0", + expectCapableOfInstantDDL: true, + }, + { + name: "change a default column value", + create: "create table t(id int, i1 int not null, primary key(id))", + alter: "alter table t modify column i1 int not null default 3", + expectCapableOfInstantDDL: true, + }, + { + name: "change a default column value on a table with FULLTEXT index", + create: "create table t(id int, i1 int not null, name varchar(128), primary key(id), fulltext key (name))", + alter: "alter table t modify column i1 int not null default 3", + expectCapableOfInstantDDL: true, + }, + { + name: "change default column value to null", + create: "create table t(id int, i1 int not null, primary key(id))", + alter: "alter table t modify column i1 int default null", + expectCapableOfInstantDDL: false, + }, + { + name: "fail because on top of changing the default value, the datatype is changed, too", + create: "create table t(id int, i1 int not null, primary key(id))", + alter: "alter table t modify column i1 bigint not null default 3", + expectCapableOfInstantDDL: false, + }, + { + name: "set column dfault value to null", + create: "create table t(id int, i1 int, primary key(id))", + alter: "alter table t modify column i1 int default null", + expectCapableOfInstantDDL: true, + }, + // enum/set: + { + name: "change enum default value", + create: "create table t(id int, c1 enum('a', 'b', 'c'), primary key(id))", + alter: "alter table t modify column c1 enum('a', 'b', 'c') default 'b'", + expectCapableOfInstantDDL: true, + }, + { + name: "enum append", + create: "create table t(id int, c1 enum('a', 'b', 'c'), primary key(id))", + alter: "alter table t modify column c1 enum('a', 'b', 'c', 'd')", + expectCapableOfInstantDDL: true, + }, + { + name: "enum append with changed default", + create: "create table t(id int, c1 enum('a', 'b', 'c') default 'a', primary key(id))", + alter: "alter table t modify column c1 enum('a', 'b', 'c', 'd') default 'd'", + expectCapableOfInstantDDL: true, + }, + { + name: "enum: fail insert in middle", + create: "create table t(id int, c1 enum('a', 'b', 'c'), primary key(id))", + alter: "alter table t modify column c1 enum('a', 'b', 'x', 'c')", + expectCapableOfInstantDDL: false, + }, + { + name: "enum: fail change", + create: "create table t(id int, c1 enum('a', 'b', 'c'), primary key(id))", + alter: "alter table t modify column c1 enum('a', 'x', 'c')", + expectCapableOfInstantDDL: false, + }, + { + name: "set: append", + create: "create table t(id int, c1 set('a', 'b', 'c'), primary key(id))", + alter: "alter table t modify column c1 set('a', 'b', 'c', 'd')", + expectCapableOfInstantDDL: true, + }, + { + name: "fail set append when over threshold", // (increase from 8 to 9 values => storage goes from 1 byte to 2 bytes) + create: "create table t(id int, c1 set('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'), primary key(id))", + alter: "alter table t modify column c1 set('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i')", + expectCapableOfInstantDDL: false, + }, + } + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + if tcase.capableOf == nil { + tcase.capableOf = capableOf + } + createTable, err := parser.ParseStrictDDL(tcase.create) + require.NoError(t, err, "failed to parse a CREATE TABLE statement from %q", tcase.create) + createTableStmt, ok := createTable.(*sqlparser.CreateTable) + require.True(t, ok) + + alterTable, err := parser.ParseStrictDDL(tcase.alter) + require.NoError(t, err, "failed to parse a ALTER TABLE statement from %q", tcase.alter) + alterTableStmt, ok := alterTable.(*sqlparser.AlterTable) + require.True(t, ok) + + isCapableOf, err := AlterTableCapableOfInstantDDL(alterTableStmt, createTableStmt, tcase.capableOf) + require.NoError(t, err) + assert.Equal(t, tcase.expectCapableOfInstantDDL, isCapableOf) + }) + } +} diff --git a/go/vt/schemadiff/column.go b/go/vt/schemadiff/column.go index 4b8022ac289..da2145f3ab0 100644 --- a/go/vt/schemadiff/column.go +++ b/go/vt/schemadiff/column.go @@ -19,6 +19,7 @@ package schemadiff import ( "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" ) @@ -79,14 +80,140 @@ func NewColumnDefinitionEntity(c *sqlparser.ColumnDefinition) *ColumnDefinitionE // ColumnDiff compares this table statement with another table statement, and sees what it takes to // change this table to look like the other table. -// It returns an AlterTable statement if changes are found, or nil if not. -// the other table may be of different name; its name is ignored. -func (c *ColumnDefinitionEntity) ColumnDiff(other *ColumnDefinitionEntity, _ *DiffHints) *ModifyColumnDiff { +// It returns an ModifyColumnDiff statement if changes are found, or nil if not. +// The function also requires the charset/collate on the source & target tables. This is because the column's +// charset & collation, if undefined, are really defined by the table's charset & collation. +// +// Anecdotally, in CreateTableEntity.normalize() we actually actively strip away the charset/collate properties +// from the column definition, to get a cleaner table definition. +// +// Things get complicated when we consider hints.TableCharsetCollateStrategy. Consider this test case: +// +// from: "create table t (a varchar(64)) default charset=latin1", +// to: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin)", +// +// In both cases, the column is really a latin1. But the tables themselves have different collations. +// We need to denormalize the column's charset/collate properties, so that the comparison can be done. +func (c *ColumnDefinitionEntity) ColumnDiff( + env *Environment, + tableName string, + other *ColumnDefinitionEntity, + t1cc *charsetCollate, + t2cc *charsetCollate, + hints *DiffHints, +) (*ModifyColumnDiff, error) { + if c.IsTextual() || other.IsTextual() { + // We will now denormalize the columns charset & collate as needed (if empty, populate from table.) + // Normalizing _this_ column definition: + if c.columnDefinition.Type.Charset.Name != "" && c.columnDefinition.Type.Options.Collate == "" { + // Charset defined without collation. Assign the default collation for that charset. + collation := env.CollationEnv().DefaultCollationForCharset(c.columnDefinition.Type.Charset.Name) + if collation == collations.Unknown { + return nil, &UnknownColumnCharsetCollationError{Column: c.columnDefinition.Name.String(), Charset: t1cc.charset} + } + defer func() { + c.columnDefinition.Type.Options.Collate = "" + }() + c.columnDefinition.Type.Options.Collate = env.CollationEnv().LookupName(collation) + } + if c.columnDefinition.Type.Charset.Name == "" && c.columnDefinition.Type.Options.Collate != "" { + // Column has explicit collation but no charset. We can infer the charset from the collation. + collationID := env.CollationEnv().LookupByName(c.columnDefinition.Type.Options.Collate) + charset := env.CollationEnv().LookupCharsetName(collationID) + if charset == "" { + return nil, &UnknownColumnCollationCharsetError{Column: c.columnDefinition.Name.String(), Collation: c.columnDefinition.Type.Options.Collate} + } + defer func() { + c.columnDefinition.Type.Charset.Name = "" + }() + c.columnDefinition.Type.Charset.Name = charset + } + if c.columnDefinition.Type.Charset.Name == "" { + // Still nothing? Assign the table's charset/collation. + defer func() { + c.columnDefinition.Type.Charset.Name = "" + c.columnDefinition.Type.Options.Collate = "" + }() + c.columnDefinition.Type.Charset.Name = t1cc.charset + if c.columnDefinition.Type.Options.Collate == "" { + defer func() { + c.columnDefinition.Type.Options.Collate = "" + }() + c.columnDefinition.Type.Options.Collate = t1cc.collate + } + if c.columnDefinition.Type.Options.Collate = t1cc.collate; c.columnDefinition.Type.Options.Collate == "" { + collation := env.CollationEnv().DefaultCollationForCharset(t1cc.charset) + if collation == collations.Unknown { + return nil, &UnknownColumnCharsetCollationError{Column: c.columnDefinition.Name.String(), Charset: t1cc.charset} + } + c.columnDefinition.Type.Options.Collate = env.CollationEnv().LookupName(collation) + } + } + // Normalizing _the other_ column definition: + if other.columnDefinition.Type.Charset.Name != "" && other.columnDefinition.Type.Options.Collate == "" { + // Charset defined without collation. Assign the default collation for that charset. + collation := env.CollationEnv().DefaultCollationForCharset(other.columnDefinition.Type.Charset.Name) + if collation == collations.Unknown { + return nil, &UnknownColumnCharsetCollationError{Column: other.columnDefinition.Name.String(), Charset: t2cc.charset} + } + defer func() { + other.columnDefinition.Type.Options.Collate = "" + }() + other.columnDefinition.Type.Options.Collate = env.CollationEnv().LookupName(collation) + } + if other.columnDefinition.Type.Charset.Name == "" && other.columnDefinition.Type.Options.Collate != "" { + // Column has explicit collation but no charset. We can infer the charset from the collation. + collationID := env.CollationEnv().LookupByName(other.columnDefinition.Type.Options.Collate) + charset := env.CollationEnv().LookupCharsetName(collationID) + if charset == "" { + return nil, &UnknownColumnCollationCharsetError{Column: other.columnDefinition.Name.String(), Collation: other.columnDefinition.Type.Options.Collate} + } + defer func() { + other.columnDefinition.Type.Charset.Name = "" + }() + other.columnDefinition.Type.Charset.Name = charset + } + + if other.columnDefinition.Type.Charset.Name == "" { + // Still nothing? Assign the table's charset/collation. + defer func() { + other.columnDefinition.Type.Charset.Name = "" + other.columnDefinition.Type.Options.Collate = "" + }() + other.columnDefinition.Type.Charset.Name = t2cc.charset + if other.columnDefinition.Type.Options.Collate = t2cc.collate; other.columnDefinition.Type.Options.Collate == "" { + collation := env.CollationEnv().DefaultCollationForCharset(t2cc.charset) + if collation == collations.Unknown { + return nil, &UnknownColumnCharsetCollationError{Column: other.columnDefinition.Name.String(), Charset: t2cc.charset} + } + other.columnDefinition.Type.Options.Collate = env.CollationEnv().LookupName(collation) + } + } + } + if sqlparser.Equals.RefOfColumnDefinition(c.columnDefinition, other.columnDefinition) { - return nil + return nil, nil } - return NewModifyColumnDiffByDefinition(other.columnDefinition) + getEnumValuesMap := func(enumValues []string) map[string]int { + m := make(map[string]int, len(enumValues)) + for i, enumValue := range enumValues { + m[enumValue] = i + } + return m + } + switch hints.EnumReorderStrategy { + case EnumReorderStrategyReject: + otherEnumValuesMap := getEnumValuesMap(other.columnDefinition.Type.EnumValues) + for ordinal, enumValue := range c.columnDefinition.Type.EnumValues { + if otherOrdinal, ok := otherEnumValuesMap[enumValue]; ok { + if ordinal != otherOrdinal { + return nil, &EnumValueOrdinalChangedError{Table: tableName, Column: c.columnDefinition.Name.String(), Value: enumValue, Ordinal: ordinal, NewOrdinal: otherOrdinal} + } + } + } + } + return NewModifyColumnDiffByDefinition(other.columnDefinition), nil } // IsTextual returns true when this column is of textual type, and is capable of having a character set property diff --git a/go/vt/schemadiff/diff.go b/go/vt/schemadiff/diff.go index fce1e5e99db..ff0d861516b 100644 --- a/go/vt/schemadiff/diff.go +++ b/go/vt/schemadiff/diff.go @@ -24,14 +24,44 @@ func AllSubsequent(diff EntityDiff) (diffs []EntityDiff) { return diffs } +// AtomicDiffs attempts to break a given diff statement into its smallest components. +// This isn't necessarily the _correct_ thing to do, as MySQL goes, but it assists in +// identifying the distinct changes that are being made. +// Currently, the only implementation is to break up `ALTER TABLE ... DROP PARTITION` statements. +func AtomicDiffs(diff EntityDiff) []EntityDiff { + if diff == nil || diff.IsEmpty() { + return nil + } + trivial := func() []EntityDiff { + return []EntityDiff{diff} + } + switch diff := diff.(type) { + case *AlterTableEntityDiff: + alterTable := diff.alterTable + // Examine the scenario where we have e.g. `ALTER TABLE ... DROP PARTITION p1, p2, p3` + // and explode it into separate diffs + if alterTable.PartitionSpec != nil && alterTable.PartitionSpec.Action == sqlparser.DropAction && len(alterTable.PartitionSpec.Names) > 1 { + var distinctDiffs []EntityDiff + for i := range alterTable.PartitionSpec.Names { + clone := diff.Clone() + cloneAlterTableEntityDiff := clone.(*AlterTableEntityDiff) + cloneAlterTableEntityDiff.alterTable.PartitionSpec.Names = cloneAlterTableEntityDiff.alterTable.PartitionSpec.Names[i : i+1] + distinctDiffs = append(distinctDiffs, clone) + } + return distinctDiffs + } + } + return trivial() +} + // DiffCreateTablesQueries compares two `CREATE TABLE ...` queries (in string form) and returns the diff from table1 to table2. // Either or both of the queries can be empty. Based on this, the diff could be // nil, CreateTable, DropTable or AlterTable -func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints) (EntityDiff, error) { +func DiffCreateTablesQueries(env *Environment, query1 string, query2 string, hints *DiffHints) (EntityDiff, error) { var fromCreateTable *sqlparser.CreateTable var ok bool if query1 != "" { - stmt, err := sqlparser.ParseStrictDDL(query1) + stmt, err := env.Parser().ParseStrictDDL(query1) if err != nil { return nil, err } @@ -42,7 +72,7 @@ func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints) (En } var toCreateTable *sqlparser.CreateTable if query2 != "" { - stmt, err := sqlparser.ParseStrictDDL(query2) + stmt, err := env.Parser().ParseStrictDDL(query2) if err != nil { return nil, err } @@ -51,34 +81,34 @@ func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints) (En return nil, ErrExpectedCreateTable } } - return DiffTables(fromCreateTable, toCreateTable, hints) + return DiffTables(env, fromCreateTable, toCreateTable, hints) } // DiffTables compares two tables and returns the diff from table1 to table2. // Either or both of the CreateTable statements can be nil. Based on this, the diff could be // nil, CreateTable, DropTable or AlterTable -func DiffTables(create1 *sqlparser.CreateTable, create2 *sqlparser.CreateTable, hints *DiffHints) (EntityDiff, error) { +func DiffTables(env *Environment, create1 *sqlparser.CreateTable, create2 *sqlparser.CreateTable, hints *DiffHints) (EntityDiff, error) { switch { case create1 == nil && create2 == nil: return nil, nil case create1 == nil: - c2, err := NewCreateTableEntity(create2) + c2, err := NewCreateTableEntity(env, create2) if err != nil { return nil, err } return c2.Create(), nil case create2 == nil: - c1, err := NewCreateTableEntity(create1) + c1, err := NewCreateTableEntity(env, create1) if err != nil { return nil, err } return c1.Drop(), nil default: - c1, err := NewCreateTableEntity(create1) + c1, err := NewCreateTableEntity(env, create1) if err != nil { return nil, err } - c2, err := NewCreateTableEntity(create2) + c2, err := NewCreateTableEntity(env, create2) if err != nil { return nil, err } @@ -89,11 +119,11 @@ func DiffTables(create1 *sqlparser.CreateTable, create2 *sqlparser.CreateTable, // DiffCreateViewsQueries compares two `CREATE TABLE ...` queries (in string form) and returns the diff from table1 to table2. // Either or both of the queries can be empty. Based on this, the diff could be // nil, CreateView, DropView or AlterView -func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints) (EntityDiff, error) { +func DiffCreateViewsQueries(env *Environment, query1 string, query2 string, hints *DiffHints) (EntityDiff, error) { var fromCreateView *sqlparser.CreateView var ok bool if query1 != "" { - stmt, err := sqlparser.ParseStrictDDL(query1) + stmt, err := env.Parser().ParseStrictDDL(query1) if err != nil { return nil, err } @@ -104,7 +134,7 @@ func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints) (Ent } var toCreateView *sqlparser.CreateView if query2 != "" { - stmt, err := sqlparser.ParseStrictDDL(query2) + stmt, err := env.Parser().ParseStrictDDL(query2) if err != nil { return nil, err } @@ -113,34 +143,34 @@ func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints) (Ent return nil, ErrExpectedCreateView } } - return DiffViews(fromCreateView, toCreateView, hints) + return DiffViews(env, fromCreateView, toCreateView, hints) } // DiffViews compares two views and returns the diff from view1 to view2 // Either or both of the CreateView statements can be nil. Based on this, the diff could be // nil, CreateView, DropView or AlterView -func DiffViews(create1 *sqlparser.CreateView, create2 *sqlparser.CreateView, hints *DiffHints) (EntityDiff, error) { +func DiffViews(env *Environment, create1 *sqlparser.CreateView, create2 *sqlparser.CreateView, hints *DiffHints) (EntityDiff, error) { switch { case create1 == nil && create2 == nil: return nil, nil case create1 == nil: - c2, err := NewCreateViewEntity(create2) + c2, err := NewCreateViewEntity(env, create2) if err != nil { return nil, err } return c2.Create(), nil case create2 == nil: - c1, err := NewCreateViewEntity(create1) + c1, err := NewCreateViewEntity(env, create1) if err != nil { return nil, err } return c1.Drop(), nil default: - c1, err := NewCreateViewEntity(create1) + c1, err := NewCreateViewEntity(env, create1) if err != nil { return nil, err } - c2, err := NewCreateViewEntity(create2) + c2, err := NewCreateViewEntity(env, create2) if err != nil { return nil, err } @@ -151,12 +181,12 @@ func DiffViews(create1 *sqlparser.CreateView, create2 *sqlparser.CreateView, hin // DiffSchemasSQL compares two schemas and returns the rich diff that turns // 1st schema into 2nd. Schemas are build from SQL, each of which can contain an arbitrary number of // CREATE TABLE and CREATE VIEW statements. -func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints) (*SchemaDiff, error) { - schema1, err := NewSchemaFromSQL(sql1) +func DiffSchemasSQL(env *Environment, sql1 string, sql2 string, hints *DiffHints) (*SchemaDiff, error) { + schema1, err := NewSchemaFromSQL(env, sql1) if err != nil { return nil, err } - schema2, err := NewSchemaFromSQL(sql2) + schema2, err := NewSchemaFromSQL(env, sql2) if err != nil { return nil, err } @@ -165,12 +195,34 @@ func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints) (*SchemaDiff, er // DiffSchemas compares two schemas and returns the list of diffs that turn // 1st schema into 2nd. Any of the schemas may be nil. -func DiffSchemas(schema1 *Schema, schema2 *Schema, hints *DiffHints) (*SchemaDiff, error) { +func DiffSchemas(env *Environment, schema1 *Schema, schema2 *Schema, hints *DiffHints) (*SchemaDiff, error) { if schema1 == nil { - schema1 = newEmptySchema() + schema1 = newEmptySchema(env) } if schema2 == nil { - schema2 = newEmptySchema() + schema2 = newEmptySchema(env) } return schema1.SchemaDiff(schema2, hints) } + +// EntityDiffByStatement is a helper function that returns a simplified and incomplete EntityDiff based on the given SQL statement. +// It is useful for testing purposes as a quick mean to wrap a statement with a diff. +func EntityDiffByStatement(statement sqlparser.Statement) EntityDiff { + switch stmt := statement.(type) { + case *sqlparser.CreateTable: + return &CreateTableEntityDiff{createTable: stmt} + case *sqlparser.RenameTable: + return &RenameTableEntityDiff{renameTable: stmt} + case *sqlparser.AlterTable: + return &AlterTableEntityDiff{alterTable: stmt} + case *sqlparser.DropTable: + return &DropTableEntityDiff{dropTable: stmt} + case *sqlparser.CreateView: + return &CreateViewEntityDiff{createView: stmt} + case *sqlparser.AlterView: + return &AlterViewEntityDiff{alterView: stmt} + case *sqlparser.DropView: + return &DropViewEntityDiff{dropView: stmt} + } + return nil +} diff --git a/go/vt/schemadiff/diff_test.go b/go/vt/schemadiff/diff_test.go index d2a170f4752..d78308c90e0 100644 --- a/go/vt/schemadiff/diff_test.go +++ b/go/vt/schemadiff/diff_test.go @@ -18,26 +18,33 @@ package schemadiff import ( "context" + "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" ) func TestDiffTables(t *testing.T) { + env57, err := vtenv.New(vtenv.Options{MySQLServerVersion: "5.7.9"}) + require.NoError(t, err) tt := []struct { - name string - from string - to string - diff string - cdiff string - fromName string - toName string - action string - isError bool - hints *DiffHints + name string + from string + to string + diff string + cdiff string + fromName string + toName string + action string + expectError string + hints *DiffHints + env *Environment + annotated []string }{ { name: "identical", @@ -53,6 +60,9 @@ func TestDiffTables(t *testing.T) { action: "alter", fromName: "t", toName: "t", + annotated: []string{ + " CREATE TABLE `t` (", " \t`id` int,", "+\t`i` int,", " \tPRIMARY KEY (`id`)", " )", + }, }, { name: "change of columns, boolean type", @@ -101,6 +111,9 @@ func TestDiffTables(t *testing.T) { cdiff: "CREATE TABLE `t` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)", action: "create", toName: "t", + annotated: []string{ + "+CREATE TABLE `t` (", "+\t`id` int,", "+\tPRIMARY KEY (`id`)", "+)", + }, }, { name: "drop", @@ -109,6 +122,9 @@ func TestDiffTables(t *testing.T) { cdiff: "DROP TABLE `t`", action: "drop", fromName: "t", + annotated: []string{ + "-CREATE TABLE `t` (", "-\t`id` int,", "-\tPRIMARY KEY (`id`)", "-)", + }, }, { name: "none", @@ -189,16 +205,134 @@ func TestDiffTables(t *testing.T) { TableQualifierHint: TableQualifierDeclared, }, }, + { + name: "changing table level defaults with column specific settings, ignore charset", + from: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin) default charset=latin1", + to: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin)", + hints: &DiffHints{ + AlterTableAlgorithmStrategy: AlterTableAlgorithmStrategyCopy, + TableCharsetCollateStrategy: TableCharsetCollateIgnoreAlways, + }, + }, + { + name: "changing table level defaults with column specific settings based on collation, ignore charset", + from: "create table t (a varchar(64) COLLATE latin1_bin) default charset=utf8mb4", + to: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin)", + hints: &DiffHints{ + AlterTableAlgorithmStrategy: AlterTableAlgorithmStrategyCopy, + TableCharsetCollateStrategy: TableCharsetCollateIgnoreAlways, + }, + }, + { + name: "error on unknown collation", + from: "create table t (a varchar(64) COLLATE latin1_nonexisting) default charset=utf8mb4", + to: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin)", + hints: &DiffHints{ + AlterTableAlgorithmStrategy: AlterTableAlgorithmStrategyCopy, + TableCharsetCollateStrategy: TableCharsetCollateIgnoreAlways, + }, + expectError: (&UnknownColumnCollationCharsetError{Column: "a", Collation: "latin1_nonexisting"}).Error(), + }, + { + name: "error on unknown charset", + from: "create table t (a varchar(64)) default charset=latin_nonexisting collate=''", + to: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin)", + hints: &DiffHints{ + AlterTableAlgorithmStrategy: AlterTableAlgorithmStrategyCopy, + }, + expectError: (&UnknownColumnCharsetCollationError{Column: "a", Charset: "latin_nonexisting"}).Error(), + }, + { + name: "changing table level defaults with column specific settings", + from: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin) default charset=latin1", + to: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin)", + diff: "alter table t charset utf8mb4, algorithm = COPY", + cdiff: "ALTER TABLE `t` CHARSET utf8mb4, ALGORITHM = COPY", + action: "alter", + fromName: "t", + hints: &DiffHints{ + AlterTableAlgorithmStrategy: AlterTableAlgorithmStrategyCopy, + TableCharsetCollateStrategy: TableCharsetCollateStrict, + }, + }, + { + name: "changing table level defaults with column specific settings, table already normalized", + from: "create table t (a varchar(64)) default charset=latin1", + to: "create table t (a varchar(64) CHARACTER SET latin1 COLLATE latin1_bin)", + diff: "alter table t modify column a varchar(64) character set latin1 collate latin1_bin, charset utf8mb4, algorithm = COPY", + cdiff: "ALTER TABLE `t` MODIFY COLUMN `a` varchar(64) CHARACTER SET latin1 COLLATE latin1_bin, CHARSET utf8mb4, ALGORITHM = COPY", + action: "alter", + fromName: "t", + hints: &DiffHints{ + AlterTableAlgorithmStrategy: AlterTableAlgorithmStrategyCopy, + TableCharsetCollateStrategy: TableCharsetCollateStrict, + }, + }, + { + name: "changing table level charset to default", + from: `create table t (i int) default charset=latin1`, + to: `create table t (i int)`, + action: "alter", + diff: "alter table t charset utf8mb4", + cdiff: "ALTER TABLE `t` CHARSET utf8mb4", + }, + { + name: "no changes with normalization and utf8mb4", + from: `CREATE TABLE IF NOT EXISTS tables + ( + TABLE_SCHEMA varchar(64) NOT NULL, + TABLE_NAME varchar(64) NOT NULL, + CREATE_STATEMENT longtext, + CREATE_TIME BIGINT, + PRIMARY KEY (TABLE_SCHEMA, TABLE_NAME) + ) engine = InnoDB`, + to: "CREATE TABLE `tables` (" + + "`TABLE_SCHEMA` varchar(64) NOT NULL," + + "`TABLE_NAME` varchar(64) NOT NULL," + + "`CREATE_STATEMENT` longtext," + + "`CREATE_TIME` bigint DEFAULT NULL," + + "PRIMARY KEY (`TABLE_SCHEMA`,`TABLE_NAME`)" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + hints: &DiffHints{ + TableCharsetCollateStrategy: TableCharsetCollateIgnoreAlways, + }, + }, + { + name: "no changes with normalization and utf8mb3", + from: `CREATE TABLE IF NOT EXISTS tables + ( + TABLE_SCHEMA varchar(64) NOT NULL, + TABLE_NAME varchar(64) NOT NULL, + CREATE_STATEMENT longtext, + CREATE_TIME BIGINT, + PRIMARY KEY (TABLE_SCHEMA, TABLE_NAME) + ) engine = InnoDB`, + to: "CREATE TABLE `tables` (" + + "`TABLE_SCHEMA` varchar(64) NOT NULL," + + "`TABLE_NAME` varchar(64) NOT NULL," + + "`CREATE_STATEMENT` longtext," + + "`CREATE_TIME` bigint DEFAULT NULL," + + "PRIMARY KEY (`TABLE_SCHEMA`,`TABLE_NAME`)" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci", + hints: &DiffHints{ + TableCharsetCollateStrategy: TableCharsetCollateIgnoreAlways, + }, + env: NewEnv(env57, collations.CollationUtf8mb3ID), + }, } + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { var fromCreateTable *sqlparser.CreateTable - hints := &DiffHints{} + hints := EmptyDiffHints() if ts.hints != nil { hints = ts.hints } + if ts.env != nil { + env = ts.env + } if ts.from != "" { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := env.Parser().ParseStrictDDL(ts.from) assert.NoError(t, err) var ok bool fromCreateTable, ok = fromStmt.(*sqlparser.CreateTable) @@ -206,7 +340,7 @@ func TestDiffTables(t *testing.T) { } var toCreateTable *sqlparser.CreateTable if ts.to != "" { - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := env.Parser().ParseStrictDDL(ts.to) assert.NoError(t, err) var ok bool toCreateTable, ok = toStmt.(*sqlparser.CreateTable) @@ -218,22 +352,26 @@ func TestDiffTables(t *testing.T) { // Technically, DiffCreateTablesQueries calls DiffTables, // but we expose both to users of this library. so we want to make sure // both work as expected irrespective of any relationship between them. - dq, dqerr := DiffCreateTablesQueries(ts.from, ts.to, hints) - d, err := DiffTables(fromCreateTable, toCreateTable, hints) + dq, dqerr := DiffCreateTablesQueries(env, ts.from, ts.to, hints) + d, err := DiffTables(env, fromCreateTable, toCreateTable, hints) switch { - case ts.isError: - assert.Error(t, err) - assert.Error(t, dqerr) + case ts.expectError != "": + assert.ErrorContains(t, err, ts.expectError) + assert.ErrorContains(t, dqerr, ts.expectError) case ts.diff == "": assert.NoError(t, err) assert.NoError(t, dqerr) - assert.Nil(t, d) - assert.Nil(t, dq) + if !assert.Nil(t, d) { + assert.Failf(t, "found unexpected diff", "%v", d.CanonicalStatementString()) + } + if !assert.Nil(t, dq) { + assert.Failf(t, "found unexpected diff", "%v", dq.CanonicalStatementString()) + } default: assert.NoError(t, err) require.NotNil(t, d) require.False(t, d.IsEmpty()) - { + t.Run("statement", func(t *testing.T) { diff := d.StatementString() assert.Equal(t, ts.diff, diff) action, err := DDLActionStr(d) @@ -241,7 +379,7 @@ func TestDiffTables(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(diff) + _, err = env.Parser().ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := d.Entities() @@ -251,8 +389,8 @@ func TestDiffTables(t *testing.T) { if ts.toName != "" { assert.Equal(t, ts.toName, eTo.Name()) } - } - { + }) + t.Run("canonical", func(t *testing.T) { canonicalDiff := d.CanonicalStatementString() assert.Equal(t, ts.cdiff, canonicalDiff) action, err := DDLActionStr(d) @@ -260,9 +398,20 @@ func TestDiffTables(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(canonicalDiff) + _, err = env.Parser().ParseStrictDDL(canonicalDiff) assert.NoError(t, err) - } + }) + t.Run("annotations", func(t *testing.T) { + from, to, unified := d.Annotated() + require.NotNil(t, from) + require.NotNil(t, to) + require.NotNil(t, unified) + if ts.annotated != nil { + // Optional test for assorted scenarios. + unifiedExport := unified.Export() + assert.Equal(t, ts.annotated, strings.Split(unifiedExport, "\n")) + } + }) // let's also check dq, and also validate that dq's statement is identical to d's assert.NoError(t, dqerr) require.NotNil(t, dq) @@ -276,15 +425,16 @@ func TestDiffTables(t *testing.T) { func TestDiffViews(t *testing.T) { tt := []struct { - name string - from string - to string - diff string - cdiff string - fromName string - toName string - action string - isError bool + name string + from string + to string + diff string + cdiff string + fromName string + toName string + action string + isError bool + annotated []string }{ { name: "identical", @@ -300,6 +450,10 @@ func TestDiffViews(t *testing.T) { action: "alter", fromName: "v1", toName: "v1", + annotated: []string{ + "-CREATE VIEW `v1`(`col1`, `col2`, `col3`) AS SELECT `a`, `b`, `c` FROM `t`", + "+CREATE VIEW `v1`(`col1`, `col2`, `colother`) AS SELECT `a`, `b`, `c` FROM `t`", + }, }, { name: "create", @@ -308,6 +462,9 @@ func TestDiffViews(t *testing.T) { cdiff: "CREATE VIEW `v1` AS SELECT `a`, `b`, `c` FROM `t`", action: "create", toName: "v1", + annotated: []string{ + "+CREATE VIEW `v1` AS SELECT `a`, `b`, `c` FROM `t`", + }, }, { name: "drop", @@ -316,17 +473,21 @@ func TestDiffViews(t *testing.T) { cdiff: "DROP VIEW `v1`", action: "drop", fromName: "v1", + annotated: []string{ + "-CREATE VIEW `v1` AS SELECT `a`, `b`, `c` FROM `t`", + }, }, { name: "none", }, } - hints := &DiffHints{} + hints := EmptyDiffHints() + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { var fromCreateView *sqlparser.CreateView if ts.from != "" { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := env.Parser().ParseStrictDDL(ts.from) assert.NoError(t, err) var ok bool fromCreateView, ok = fromStmt.(*sqlparser.CreateView) @@ -334,7 +495,7 @@ func TestDiffViews(t *testing.T) { } var toCreateView *sqlparser.CreateView if ts.to != "" { - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := env.Parser().ParseStrictDDL(ts.to) assert.NoError(t, err) var ok bool toCreateView, ok = toStmt.(*sqlparser.CreateView) @@ -346,8 +507,8 @@ func TestDiffViews(t *testing.T) { // Technically, DiffCreateTablesQueries calls DiffTables, // but we expose both to users of this library. so we want to make sure // both work as expected irrespective of any relationship between them. - dq, dqerr := DiffCreateViewsQueries(ts.from, ts.to, hints) - d, err := DiffViews(fromCreateView, toCreateView, hints) + dq, dqerr := DiffCreateViewsQueries(env, ts.from, ts.to, hints) + d, err := DiffViews(env, fromCreateView, toCreateView, hints) switch { case ts.isError: assert.Error(t, err) @@ -355,8 +516,12 @@ func TestDiffViews(t *testing.T) { case ts.diff == "": assert.NoError(t, err) assert.NoError(t, dqerr) - assert.Nil(t, d) - assert.Nil(t, dq) + if !assert.Nil(t, d) { + assert.Failf(t, "found unexpected diff", "%v", d.CanonicalStatementString()) + } + if !assert.Nil(t, dq) { + assert.Failf(t, "found unexpected diff", "%v", dq.CanonicalStatementString()) + } default: assert.NoError(t, err) require.NotNil(t, d) @@ -369,7 +534,7 @@ func TestDiffViews(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(diff) + _, err = env.Parser().ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := d.Entities() @@ -388,10 +553,15 @@ func TestDiffViews(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(canonicalDiff) + _, err = env.Parser().ParseStrictDDL(canonicalDiff) assert.NoError(t, err) } - + if ts.annotated != nil { + // Optional test for assorted scenarios. + _, _, unified := d.Annotated() + unifiedExport := unified.Export() + assert.Equal(t, ts.annotated, strings.Split(unifiedExport, "\n")) + } // let's also check dq, and also validate that dq's statement is identical to d's assert.NoError(t, dqerr) require.NotNil(t, dq) @@ -399,6 +569,11 @@ func TestDiffViews(t *testing.T) { diff := dq.StatementString() assert.Equal(t, ts.diff, diff) } + if d != nil && !d.IsEmpty() { + // Validate Clone() works + clone := d.Clone() + assert.Equal(t, d.Statement(), clone.Statement()) + } }) } } @@ -413,6 +588,8 @@ func TestDiffSchemas(t *testing.T) { cdiffs []string expectError string tableRename int + annotated []string + fkStrategy int }{ { name: "identical tables", @@ -549,6 +726,9 @@ func TestDiffSchemas(t *testing.T) { cdiffs: []string{ "CREATE TABLE `t` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)", }, + annotated: []string{ + "+CREATE TABLE `t` (\n+\t`id` int,\n+\tPRIMARY KEY (`id`)\n+)", + }, }, { name: "drop table", @@ -559,6 +739,9 @@ func TestDiffSchemas(t *testing.T) { cdiffs: []string{ "DROP TABLE `t`", }, + annotated: []string{ + "-CREATE TABLE `t` (\n-\t`id` int,\n-\tPRIMARY KEY (`id`)\n-)", + }, }, { name: "create, alter, drop tables", @@ -574,6 +757,11 @@ func TestDiffSchemas(t *testing.T) { "ALTER TABLE `t2` MODIFY COLUMN `id` bigint", "CREATE TABLE `t4` (\n\t`id` int,\n\tPRIMARY KEY (`id`)\n)", }, + annotated: []string{ + "-CREATE TABLE `t1` (\n-\t`id` int,\n-\tPRIMARY KEY (`id`)\n-)", + " CREATE TABLE `t2` (\n-\t`id` int,\n+\t`id` bigint,\n \tPRIMARY KEY (`id`)\n )", + "+CREATE TABLE `t4` (\n+\t`id` int,\n+\tPRIMARY KEY (`id`)\n+)", + }, }, { name: "identical tables: drop and create", @@ -599,6 +787,9 @@ func TestDiffSchemas(t *testing.T) { "RENAME TABLE `t2a` TO `t2b`", }, tableRename: TableRenameHeuristicStatement, + annotated: []string{ + "-CREATE TABLE `t2a` (\n-\t`id` int unsigned,\n-\tPRIMARY KEY (`id`)\n-)\n+CREATE TABLE `t2b` (\n+\t`id` int unsigned,\n+\tPRIMARY KEY (`id`)\n+)", + }, }, { name: "drop and create all", @@ -667,6 +858,45 @@ func TestDiffSchemas(t *testing.T) { "CREATE TABLE `t5` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `f5` (`i`),\n\tCONSTRAINT `f5` FOREIGN KEY (`i`) REFERENCES `t7` (`id`)\n)", }, }, + { + name: "create tables with foreign keys, with invalid fk reference", + from: "create table t (id int primary key)", + to: ` + create table t (id int primary key); + create table t11 (id int primary key, i int, constraint f1101a foreign key (i) references t12 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1201a foreign key (i) references t9 (id) on delete set null); + `, + expectError: "table `t12` foreign key references nonexistent table `t9`", + }, + { + name: "create tables with foreign keys, with invalid fk reference", + from: "create table t (id int primary key)", + to: ` + create table t (id int primary key); + create table t11 (id int primary key, i int, constraint f1101b foreign key (i) references t12 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1201b foreign key (i) references t9 (id) on delete set null); + `, + expectError: "table `t12` foreign key references nonexistent table `t9`", + fkStrategy: ForeignKeyCheckStrategyIgnore, + }, + { + name: "create tables with foreign keys, with valid cycle", + from: "create table t (id int primary key)", + to: ` + create table t (id int primary key); + create table t11 (id int primary key, i int, constraint f1101c foreign key (i) references t12 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1201c foreign key (i) references t11 (id) on delete set null); + `, + diffs: []string{ + "create table t11 (\n\tid int,\n\ti int,\n\tprimary key (id),\n\tkey f1101c (i),\n\tconstraint f1101c foreign key (i) references t12 (id) on delete restrict\n)", + "create table t12 (\n\tid int,\n\ti int,\n\tprimary key (id),\n\tkey f1201c (i),\n\tconstraint f1201c foreign key (i) references t11 (id) on delete set null\n)", + }, + cdiffs: []string{ + "CREATE TABLE `t11` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `f1101c` (`i`),\n\tCONSTRAINT `f1101c` FOREIGN KEY (`i`) REFERENCES `t12` (`id`) ON DELETE RESTRICT\n)", + "CREATE TABLE `t12` (\n\t`id` int,\n\t`i` int,\n\tPRIMARY KEY (`id`),\n\tKEY `f1201c` (`i`),\n\tCONSTRAINT `f1201c` FOREIGN KEY (`i`) REFERENCES `t11` (`id`) ON DELETE SET NULL\n)", + }, + fkStrategy: ForeignKeyCheckStrategyIgnore, + }, { name: "drop tables with foreign keys, expect specific order", from: "create table t7(id int primary key); create table t5 (id int primary key, i int, constraint f5 foreign key (i) references t7(id)); create table t4 (id int primary key, i int, constraint f4 foreign key (i) references t7(id));", @@ -796,17 +1026,19 @@ func TestDiffSchemas(t *testing.T) { }, }, } + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { hints := &DiffHints{ - TableRenameStrategy: ts.tableRename, + TableRenameStrategy: ts.tableRename, + ForeignKeyCheckStrategy: ts.fkStrategy, } - diff, err := DiffSchemasSQL(ts.from, ts.to, hints) + diff, err := DiffSchemasSQL(env, ts.from, ts.to, hints) if ts.expectError != "" { require.Error(t, err) assert.Contains(t, err.Error(), ts.expectError) } else { - assert.NoError(t, err) + require.NoError(t, err) diffs, err := diff.OrderedDiffs(ctx) assert.NoError(t, err) @@ -827,21 +1059,47 @@ func TestDiffSchemas(t *testing.T) { // validate we can parse back the diff statements for _, s := range statements { - _, err := sqlparser.ParseStrictDDL(s) + _, err := env.Parser().ParseStrictDDL(s) assert.NoError(t, err) } for _, s := range cstatements { - _, err := sqlparser.ParseStrictDDL(s) + _, err := env.Parser().ParseStrictDDL(s) assert.NoError(t, err) } + // Validate Clone() works + for _, d := range diffs { + if d.IsEmpty() { + continue + } + dFrom, dTo := d.Entities() + clone := d.Clone() + clonedFrom, clonedTo := clone.Entities() + assert.Equal(t, d.CanonicalStatementString(), clone.CanonicalStatementString()) + if dFrom != nil { + assert.Equal(t, dFrom.Name(), clonedFrom.Name()) + } + if dTo != nil { + assert.Equal(t, dTo.Name(), clonedTo.Name()) + } + } + + if ts.annotated != nil { + // Optional test for assorted scenarios. + if assert.Equalf(t, len(diffs), len(ts.annotated), "%+v", cstatements) { + for i, d := range diffs { + _, _, unified := d.Annotated() + assert.Equal(t, ts.annotated[i], unified.Export()) + } + } + } { // Validate "apply()" on "from" converges with "to" - schema1, err := NewSchemaFromSQL(ts.from) + schema1, err := NewSchemaFromSQL(env, ts.from) require.NoError(t, err) schema1SQL := schema1.ToSQL() - schema2, err := NewSchemaFromSQL(ts.to) + schema2, err := NewSchemaFromSQL(env, ts.to) require.NoError(t, err) applied, err := schema1.Apply(diffs) require.NoError(t, err) @@ -891,13 +1149,14 @@ func TestSchemaApplyError(t *testing.T) { to: "create table t(id int); create view v1 as select * from t; create view v2 as select * from t", }, } - hints := &DiffHints{} + hints := EmptyDiffHints() + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { // Validate "apply()" on "from" converges with "to" - schema1, err := NewSchemaFromSQL(ts.from) + schema1, err := NewSchemaFromSQL(env, ts.from) assert.NoError(t, err) - schema2, err := NewSchemaFromSQL(ts.to) + schema2, err := NewSchemaFromSQL(env, ts.to) assert.NoError(t, err) { @@ -925,3 +1184,80 @@ func TestSchemaApplyError(t *testing.T) { }) } } + +func TestEntityDiffByStatement(t *testing.T) { + env := NewTestEnv() + + tcases := []struct { + query string + valid bool + expectAnotated bool + }{ + { + query: "create table t1(id int primary key)", + valid: true, + expectAnotated: true, + }, + { + query: "alter table t1 add column i int", + valid: true, + }, + { + query: "rename table t1 to t2", + valid: true, + }, + { + query: "drop table t1", + valid: true, + }, + { + query: "create view v1 as select * from t1", + valid: true, + expectAnotated: true, + }, + { + query: "alter view v1 as select * from t2", + valid: true, + }, + { + query: "drop view v1", + valid: true, + }, + { + query: "drop database d1", + valid: false, + }, + { + query: "optimize table t1", + valid: false, + }, + } + + for _, tcase := range tcases { + t.Run(tcase.query, func(t *testing.T) { + stmt, err := env.Parser().ParseStrictDDL(tcase.query) + require.NoError(t, err) + entityDiff := EntityDiffByStatement(stmt) + if !tcase.valid { + require.Nil(t, entityDiff) + return + } + require.NotNil(t, entityDiff) + require.NotNil(t, entityDiff.Statement()) + require.Equal(t, stmt, entityDiff.Statement()) + + annotatedFrom, annotatedTo, annotatedUnified := entityDiff.Annotated() + // EntityDiffByStatement doesn't have real entities behind it, just a wrapper around a statement. + // Therefore, there are no annotations. + if tcase.expectAnotated { + assert.NotNil(t, annotatedFrom) + assert.NotNil(t, annotatedTo) + assert.NotNil(t, annotatedUnified) + } else { + assert.Nil(t, annotatedFrom) + assert.Nil(t, annotatedTo) + assert.Nil(t, annotatedUnified) + } + }) + } +} diff --git a/go/vt/schemadiff/env.go b/go/vt/schemadiff/env.go new file mode 100644 index 00000000000..9037de40b01 --- /dev/null +++ b/go/vt/schemadiff/env.go @@ -0,0 +1,25 @@ +package schemadiff + +import ( + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/vtenv" +) + +type Environment struct { + *vtenv.Environment + DefaultColl collations.ID +} + +func NewTestEnv() *Environment { + return &Environment{ + Environment: vtenv.NewTestEnv(), + DefaultColl: collations.MySQL8().DefaultConnectionCharset(), + } +} + +func NewEnv(env *vtenv.Environment, defaultColl collations.ID) *Environment { + return &Environment{ + Environment: env, + DefaultColl: defaultColl, + } +} diff --git a/go/vt/schemadiff/errors.go b/go/vt/schemadiff/errors.go index 8317fbe9cea..a941c406be0 100644 --- a/go/vt/schemadiff/errors.go +++ b/go/vt/schemadiff/errors.go @@ -22,6 +22,7 @@ import ( "strings" "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/vt/sqlparser" ) var ( @@ -282,10 +283,30 @@ type ForeignKeyDependencyUnresolvedError struct { } func (e *ForeignKeyDependencyUnresolvedError) Error() string { - return fmt.Sprintf("table %s has unresolved/loop foreign key dependencies", + return fmt.Sprintf("table %s has unresolved foreign key dependencies", sqlescape.EscapeID(e.Table)) } +type ForeignKeyNonexistentReferencedTableError struct { + Table string + ReferencedTable string +} + +func (e *ForeignKeyNonexistentReferencedTableError) Error() string { + return fmt.Sprintf("table %s foreign key references nonexistent table %s", + sqlescape.EscapeID(e.Table), sqlescape.EscapeID(e.ReferencedTable)) +} + +type ForeignKeyReferencesViewError struct { + Table string + ReferencedView string +} + +func (e *ForeignKeyReferencesViewError) Error() string { + return fmt.Sprintf("table %s foreign key references view %s", + sqlescape.EscapeID(e.Table), sqlescape.EscapeID(e.ReferencedView)) +} + type InvalidColumnInForeignKeyConstraintError struct { Table string Constraint string @@ -403,3 +424,66 @@ type EntityNotFoundError struct { func (e *EntityNotFoundError) Error() string { return fmt.Sprintf("entity %s not found", sqlescape.EscapeID(e.Name)) } + +type EnumValueOrdinalChangedError struct { + Table string + Column string + Value string + Ordinal int + NewOrdinal int +} + +func (e *EnumValueOrdinalChangedError) Error() string { + return fmt.Sprintf("ordinal of %s changed in enum or set column %s.%s, from %d to %d", e.Value, sqlescape.EscapeID(e.Table), sqlescape.EscapeID(e.Column), e.Ordinal, e.NewOrdinal) +} + +type UnknownColumnCharsetCollationError struct { + Column string + Charset string +} + +func (e *UnknownColumnCharsetCollationError) Error() string { + return fmt.Sprintf("unable to determine collation for column %s with charset %q", sqlescape.EscapeID(e.Column), e.Charset) +} + +type UnknownColumnCollationCharsetError struct { + Column string + Collation string +} + +func (e *UnknownColumnCollationCharsetError) Error() string { + return fmt.Sprintf("unable to determine charset for column %s with collation %q", sqlescape.EscapeID(e.Column), e.Collation) +} + +type SubsequentDiffRejectedError struct { + Table string + Diffs []EntityDiff +} + +func (e *SubsequentDiffRejectedError) Error() string { + var b strings.Builder + b.WriteString(fmt.Sprintf("multiple changes not allowed on table %s. Found:", sqlescape.EscapeID(e.Table))) + for _, d := range e.Diffs { + b.WriteString("\n") + b.WriteString(d.CanonicalStatementString()) + } + return b.String() +} + +// PartitionSpecNonExclusiveError is returned when a partition spec change is found alongside other changes. +// for example, in MySQL it is invalid to both DROP PARTITION (a partition spec change) and ADD COLUMN +// in the same ALTER TABLE statement. In fact, even two partition spec changes in the same ALTER TABLE +// statement are not allowed. +// This error should never be encountered in normal circumstances, because: +// - `sqlparser` should not allow such statements to be parsed. +// - schemadiff's `Diff()` function will never generate a single `ALTER TABLE` statement with such multiple changes. +// The error is used for integrity checks only, and should be considered a bug if encountered. +type PartitionSpecNonExclusiveError struct { + Table string + PartitionSpec *sqlparser.PartitionSpec + ConflictingStatement string +} + +func (e *PartitionSpecNonExclusiveError) Error() string { + return fmt.Sprintf("ALTER TABLE on %s, may only have a single partition spec change, and other changes are not allowed. Found spec: %s; and change: %s", sqlescape.EscapeID(e.Table), sqlparser.CanonicalString(e.PartitionSpec), e.ConflictingStatement) +} diff --git a/go/vt/schemadiff/schema.go b/go/vt/schemadiff/schema.go index 9180012676f..efad76d9a33 100644 --- a/go/vt/schemadiff/schema.go +++ b/go/vt/schemadiff/schema.go @@ -17,16 +17,12 @@ limitations under the License. package schemadiff import ( - "bytes" "errors" - "fmt" - "io" "sort" "strings" - "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -41,10 +37,12 @@ type Schema struct { foreignKeyParents []*CreateTableEntity // subset of tables foreignKeyChildren []*CreateTableEntity // subset of tables + + env *Environment } // newEmptySchema is used internally to initialize a Schema object -func newEmptySchema() *Schema { +func newEmptySchema(env *Environment) *Schema { schema := &Schema{ tables: []*CreateTableEntity{}, views: []*CreateViewEntity{}, @@ -53,13 +51,15 @@ func newEmptySchema() *Schema { foreignKeyParents: []*CreateTableEntity{}, foreignKeyChildren: []*CreateTableEntity{}, + + env: env, } return schema } // NewSchemaFromEntities creates a valid and normalized schema based on list of entities -func NewSchemaFromEntities(entities []Entity) (*Schema, error) { - schema := newEmptySchema() +func NewSchemaFromEntities(env *Environment, entities []Entity) (*Schema, error) { + schema := newEmptySchema(env) for _, e := range entities { switch c := e.(type) { case *CreateTableEntity: @@ -70,23 +70,23 @@ func NewSchemaFromEntities(entities []Entity) (*Schema, error) { return nil, &UnsupportedEntityError{Entity: c.Name(), Statement: c.Create().CanonicalStatementString()} } } - err := schema.normalize() + err := schema.normalize(EmptyDiffHints()) return schema, err } // NewSchemaFromStatements creates a valid and normalized schema based on list of valid statements -func NewSchemaFromStatements(statements []sqlparser.Statement) (*Schema, error) { +func NewSchemaFromStatements(env *Environment, statements []sqlparser.Statement) (*Schema, error) { entities := make([]Entity, 0, len(statements)) for _, s := range statements { switch stmt := s.(type) { case *sqlparser.CreateTable: - c, err := NewCreateTableEntity(stmt) + c, err := NewCreateTableEntity(env, stmt) if err != nil { return nil, err } entities = append(entities, c) case *sqlparser.CreateView: - v, err := NewCreateViewEntity(stmt) + v, err := NewCreateViewEntity(env, stmt) if err != nil { return nil, err } @@ -95,41 +95,33 @@ func NewSchemaFromStatements(statements []sqlparser.Statement) (*Schema, error) return nil, &UnsupportedStatementError{Statement: sqlparser.CanonicalString(s)} } } - return NewSchemaFromEntities(entities) + return NewSchemaFromEntities(env, entities) } // NewSchemaFromQueries creates a valid and normalized schema based on list of queries -func NewSchemaFromQueries(queries []string) (*Schema, error) { +func NewSchemaFromQueries(env *Environment, queries []string) (*Schema, error) { statements := make([]sqlparser.Statement, 0, len(queries)) for _, q := range queries { - stmt, err := sqlparser.ParseStrictDDL(q) + stmt, err := env.Parser().ParseStrictDDL(q) if err != nil { return nil, err } statements = append(statements, stmt) } - return NewSchemaFromStatements(statements) + return NewSchemaFromStatements(env, statements) } // NewSchemaFromSQL creates a valid and normalized schema based on a SQL blob that contains // CREATE statements for various objects (tables, views) -func NewSchemaFromSQL(sql string) (*Schema, error) { - var statements []sqlparser.Statement - tokenizer := sqlparser.NewStringTokenizer(sql) - for { - stmt, err := sqlparser.ParseNextStrictDDL(tokenizer) - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return nil, fmt.Errorf("could not parse statement in SQL: %v: %w", sql, err) - } - statements = append(statements, stmt) +func NewSchemaFromSQL(env *Environment, sql string) (*Schema, error) { + statements, err := env.Parser().SplitStatements(sql) + if err != nil { + return nil, err } - return NewSchemaFromStatements(statements) + return NewSchemaFromStatements(env, statements) } -// getForeignKeyParentTableNames analyzes a CREATE TABLE definition and extracts all referened foreign key tables names. +// getForeignKeyParentTableNames analyzes a CREATE TABLE definition and extracts all referenced foreign key tables names. // A table name may appear twice in the result output, if it is referenced by more than one foreign key func getForeignKeyParentTableNames(createTable *sqlparser.CreateTable) (names []string) { for _, cs := range createTable.TableSpec.Constraints { @@ -161,7 +153,7 @@ func getViewDependentTableNames(createView *sqlparser.CreateView) (names []strin // normalize is called as part of Schema creation process. The user may only get a hold of normalized schema. // It validates some cross-entity constraints, and orders entity based on dependencies (e.g. tables, views that read from tables, 2nd level views, etc.) -func (s *Schema) normalize() error { +func (s *Schema) normalize(hints *DiffHints) error { var errs error s.named = make(map[string]Entity, len(s.tables)+len(s.views)) @@ -215,6 +207,18 @@ func (s *Schema) normalize() error { return true } + // Utility map and function to only record one foreign-key error per table. We make this limitation + // because the search algorithm below could review the same table twice, thus potentially unnecessarily duplicating + // found errors. + entityFkErrors := map[string]error{} + addEntityFkError := func(e Entity, err error) error { + if _, ok := entityFkErrors[e.Name()]; ok { + // error already recorded for this entity + return nil + } + entityFkErrors[e.Name()] = err + return err + } // We now iterate all tables. We iterate "dependency levels": // - first we want all tables that don't have foreign keys or which only reference themselves // - then we only want tables that reference 1st level tables. these are 2nd level tables @@ -240,6 +244,18 @@ func (s *Schema) normalize() error { if referencedTableName != name { nonSelfReferenceNames = append(nonSelfReferenceNames, referencedTableName) } + referencedEntity, ok := s.named[referencedTableName] + if !ok { + if hints.ForeignKeyCheckStrategy == ForeignKeyCheckStrategyStrict { + errs = errors.Join(errs, addEntityFkError(t, &ForeignKeyNonexistentReferencedTableError{Table: name, ReferencedTable: referencedTableName})) + continue + } + } + if _, ok := referencedEntity.(*CreateViewEntity); ok { + errs = errors.Join(errs, addEntityFkError(t, &ForeignKeyReferencesViewError{Table: name, ReferencedView: referencedTableName})) + continue + } + fkParents[referencedTableName] = true } if allNamesFoundInLowerLevel(nonSelfReferenceNames, iterationLevel) { @@ -258,6 +274,19 @@ func (s *Schema) normalize() error { s.foreignKeyParents = append(s.foreignKeyParents, t) } } + if len(dependencyLevels) != len(s.tables) { + // We have leftover tables. This can happen if there's foreign key loops + for _, t := range s.tables { + if _, ok := dependencyLevels[t.Name()]; ok { + // known table + continue + } + // Table is part of a loop or references a loop + s.sorted = append(s.sorted, t) + dependencyLevels[t.Name()] = iterationLevel // all in same level + } + } + // We now iterate all views. We iterate "dependency levels": // - first we want all views that only depend on tables. These are 1st level views. // - then we only want views that depend on 1st level views or on tables. These are 2nd level views. @@ -267,7 +296,7 @@ func (s *Schema) normalize() error { // It's possible that there's never been any tables in this schema. Which means // iterationLevel remains zero. // To deal with views, we must have iterationLevel at least 1. This is because any view reads - // from _something_: at the very least it reads from DUAL (inplicitly or explicitly). Which + // from _something_: at the very least it reads from DUAL (implicitly or explicitly). Which // puts the view at a higher level. if iterationLevel < 1 { iterationLevel = 1 @@ -293,6 +322,7 @@ func (s *Schema) normalize() error { } iterationLevel++ } + if len(s.sorted) != len(s.tables)+len(s.views) { // We have leftover tables or views. This can happen if the schema definition is invalid: // - a table's foreign key references a nonexistent table @@ -303,7 +333,8 @@ func (s *Schema) normalize() error { if _, ok := dependencyLevels[t.Name()]; !ok { // We _know_ that in this iteration, at least one foreign key is not found. // We return the first one. - return &ForeignKeyDependencyUnresolvedError{Table: t.Name()} + errs = errors.Join(errs, addEntityFkError(t, &ForeignKeyDependencyUnresolvedError{Table: t.Name()})) + s.sorted = append(s.sorted, t) } } for _, v := range s.views { @@ -328,12 +359,29 @@ func (s *Schema) normalize() error { return errors.Join(errs, err) } } - colTypeEqualForForeignKey := func(a, b *sqlparser.ColumnType) bool { - return a.Type == b.Type && - a.Unsigned == b.Unsigned && - a.Zerofill == b.Zerofill && - sqlparser.Equals.ColumnCharset(a.Charset, b.Charset) && - sqlparser.Equals.SliceOfString(a.EnumValues, b.EnumValues) + colTypeCompatibleForForeignKey := func(child, parent *sqlparser.ColumnType) bool { + if child.Type == parent.Type { + return true + } + if child.Type == "char" && parent.Type == "varchar" { + return true + } + if child.Type == "varchar" && parent.Type == "char" { + return true + } + return false + } + colTypeEqualForForeignKey := func(child, parent *sqlparser.ColumnType) bool { + if colTypeCompatibleForForeignKey(child, parent) && + child.Unsigned == parent.Unsigned && + child.Zerofill == parent.Zerofill && + sqlparser.Equals.ColumnCharset(child.Charset, parent.Charset) && + child.Options.Collate == parent.Options.Collate && + sqlparser.Equals.SliceOfString(child.EnumValues, parent.EnumValues) { + // Complete identify (other than precision which is ignored) + return true + } + return false } // Now validate foreign key columns: @@ -357,7 +405,12 @@ func (s *Schema) normalize() error { continue } referencedTableName := check.ReferenceDefinition.ReferencedTable.Name.String() - referencedTable := s.Table(referencedTableName) // we know this exists because we validated foreign key dependencies earlier on + referencedTable := s.Table(referencedTableName) + if referencedTable == nil { + // This can happen because earlier, when we validated existence of reference table, we took note + // of nonexisting tables, but kept on going. + continue + } referencedColumns := map[string]*sqlparser.ColumnDefinition{} for _, col := range referencedTable.CreateTable.TableSpec.Columns { @@ -452,7 +505,7 @@ func (s *Schema) diff(other *Schema, hints *DiffHints) (diffs []EntityDiff, err if _, ok := other.named[e.Name()]; !ok { // other schema does not have the entity // Entities are sorted in foreign key CREATE TABLE valid order (create parents first, then children). - // When issuing DROPs, we want to reverse that order. We want to first frop children, then parents. + // When issuing DROPs, we want to reverse that order. We want to first do it for children, then parents. // Instead of analyzing all relationships again, we just reverse the entire order of DROPs, foreign key // related or not. dropDiffs = append([]EntityDiff{e.Drop()}, dropDiffs...) @@ -613,7 +666,7 @@ func (s *Schema) ToQueries() []string { // ToSQL returns a SQL blob with ordered sequence of queries which can be applied to create the schema func (s *Schema) ToSQL() string { - var buf bytes.Buffer + var buf strings.Builder for _, query := range s.ToQueries() { buf.WriteString(query) buf.WriteString(";\n") @@ -624,7 +677,7 @@ func (s *Schema) ToSQL() string { // copy returns a shallow copy of the schema. This is used when applying changes for example. // applying changes will ensure we copy new entities themselves separately. func (s *Schema) copy() *Schema { - dup := newEmptySchema() + dup := newEmptySchema(s.env) dup.tables = make([]*CreateTableEntity, len(s.tables)) copy(dup.tables, s.tables) dup.views = make([]*CreateViewEntity, len(s.views)) @@ -640,7 +693,7 @@ func (s *Schema) copy() *Schema { // apply attempts to apply given list of diffs to this object. // These diffs are CREATE/DROP/ALTER TABLE/VIEW. -func (s *Schema) apply(diffs []EntityDiff) error { +func (s *Schema) apply(diffs []EntityDiff, hints *DiffHints) error { for _, diff := range diffs { switch diff := diff.(type) { case *CreateTableEntityDiff: @@ -750,7 +803,7 @@ func (s *Schema) apply(diffs []EntityDiff) error { return &UnsupportedApplyOperationError{Statement: diff.CanonicalStatementString()} } } - if err := s.normalize(); err != nil { + if err := s.normalize(hints); err != nil { return err } return nil @@ -761,23 +814,23 @@ func (s *Schema) apply(diffs []EntityDiff) error { // The operation does not modify this object. Instead, if successful, a new (modified) Schema is returned. func (s *Schema) Apply(diffs []EntityDiff) (*Schema, error) { dup := s.copy() - if err := dup.apply(diffs); err != nil { + if err := dup.apply(diffs, EmptyDiffHints()); err != nil { return nil, err } return dup, nil } -// SchemaDiff calulates a rich diff between this schema and the given schema. It builds on top of diff(): +// SchemaDiff calculates a rich diff between this schema and the given schema. It builds on top of diff(): // on top of the list of diffs that can take this schema into the given schema, this function also // evaluates the dependencies between those diffs, if any, and the resulting SchemaDiff object offers OrderedDiffs(), -// the safe ordering of diffs that, when appleid sequentially, does not produce any conflicts and keeps schema valid +// the safe ordering of diffs that, when applied sequentially, does not produce any conflicts and keeps schema valid // at each step. func (s *Schema) SchemaDiff(other *Schema, hints *DiffHints) (*SchemaDiff, error) { diffs, err := s.diff(other, hints) if err != nil { return nil, err } - schemaDiff := NewSchemaDiff(s) + schemaDiff := NewSchemaDiff(s, hints) schemaDiff.loadDiffs(diffs) // Utility function to see whether the given diff has dependencies on diffs that operate on any of the given named entities, @@ -916,12 +969,31 @@ func (s *Schema) SchemaDiff(other *Schema, hints *DiffHints) (*SchemaDiff, error // No need to handle. Any dependencies will be resolved by any of the other cases } } + + // Check and assign capabilities: + // Reminder: schemadiff assumes a MySQL flavor, so we only check for MySQL capabilities. + if capableOf := capabilities.MySQLVersionCapableOf(s.env.MySQLVersion()); capableOf != nil { + for _, diff := range schemaDiff.UnorderedDiffs() { + switch diff := diff.(type) { + case *AlterTableEntityDiff: + instantDDLCapable, err := AlterTableCapableOfInstantDDL(diff.AlterTable(), diff.from.CreateTable, capableOf) + if err != nil { + return nil, err + } + if instantDDLCapable { + diff.instantDDLCapability = InstantDDLCapabilityPossible + } else { + diff.instantDDLCapability = InstantDDLCapabilityImpossible + } + } + } + } return schemaDiff, nil } func (s *Schema) ValidateViewReferences() error { var errs error - schemaInformation := newDeclarativeSchemaInformation() + schemaInformation := newDeclarativeSchemaInformation(s.env) // Remember that s.Entities() is already ordered by dependency. ie. tables first, then views // that only depend on those tables (or on dual), then 2nd tier views, etc. @@ -955,7 +1027,7 @@ func (s *Schema) ValidateViewReferences() error { Column: e.Column, Ambiguous: true, } - case *semantics.ColumnNotFoundError: + case semantics.ColumnNotFoundError: return &InvalidColumnReferencedInViewError{ View: view.Name(), Column: e.Column.Name.String(), @@ -988,7 +1060,7 @@ func (s *Schema) getEntityColumnNames(entityName string, schemaInformation *decl case *CreateViewEntity: return s.getViewColumnNames(entity, schemaInformation) } - return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected entity type for %v", entityName) + return nil, &UnsupportedEntityError{Entity: entity.Name(), Statement: entity.Create().CanonicalStatementString()} } // getTableColumnNames returns the names of columns in given table. @@ -1000,10 +1072,8 @@ func (s *Schema) getTableColumnNames(t *CreateTableEntity) (columnNames []*sqlpa } // getViewColumnNames returns the names of aliased columns returned by a given view. -func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *declarativeSchemaInformation) ( - columnNames []*sqlparser.IdentifierCI, - err error, -) { +func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *declarativeSchemaInformation) ([]*sqlparser.IdentifierCI, error) { + var columnNames []*sqlparser.IdentifierCI for _, node := range v.Select.GetColumns() { switch node := node.(type) { case *sqlparser.StarExpr: @@ -1033,8 +1103,5 @@ func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *decl } } - if err != nil { - return nil, err - } return columnNames, nil } diff --git a/go/vt/schemadiff/schema_diff.go b/go/vt/schemadiff/schema_diff.go index 8fef7c29d28..3fbc1e6c9d3 100644 --- a/go/vt/schemadiff/schema_diff.go +++ b/go/vt/schemadiff/schema_diff.go @@ -92,7 +92,7 @@ func permutateDiffs(ctx context.Context, diffs []EntityDiff, callback func([]Ent if len(diffs) == 0 { return false, nil } - // Sort by a heristic (DROPs first, ALTERs next, CREATEs last). This ordering is then used first in the permutation + // Sort by a heuristic (DROPs first, ALTERs next, CREATEs last). This ordering is then used first in the permutation // search and serves as seed for the rest of permutations. return permDiff(ctx, diffs, callback, 0) @@ -165,6 +165,7 @@ func permDiff(ctx context.Context, a []EntityDiff, callback func([]EntityDiff) ( // Operations on SchemaDiff are not concurrency-safe. type SchemaDiff struct { schema *Schema + hints *DiffHints diffs []EntityDiff diffMap map[string]EntityDiff // key is diff's CanonicalStatementString() @@ -173,9 +174,10 @@ type SchemaDiff struct { r *mathutil.EquivalenceRelation // internal structure to help determine diffs } -func NewSchemaDiff(schema *Schema) *SchemaDiff { +func NewSchemaDiff(schema *Schema, hints *DiffHints) *SchemaDiff { return &SchemaDiff{ schema: schema, + hints: hints, dependencies: make(map[string]*DiffDependency), diffMap: make(map[string]EntityDiff), r: mathutil.NewEquivalenceRelation(), @@ -296,7 +298,7 @@ func (d *SchemaDiff) OrderedDiffs(ctx context.Context) ([]EntityDiff, error) { for i, diff := range d.UnorderedDiffs() { unorderedDiffsMap[diff.CanonicalStatementString()] = i } - // The order of classes in the quivalence relation is, generally speaking, loyal to the order of original diffs. + // The order of classes in the equivalence relation is, generally speaking, loyal to the order of original diffs. for _, class := range d.r.OrderedClasses() { classDiffs := []EntityDiff{} // Which diffs are in this equivalence class? @@ -318,7 +320,7 @@ func (d *SchemaDiff) OrderedDiffs(ctx context.Context) ([]EntityDiff, error) { // We want to apply the changes one by one, and validate the schema after each change for i := range permutatedDiffs { // apply inline - if err := permutationSchema.apply(permutatedDiffs[i : i+1]); err != nil { + if err := permutationSchema.apply(permutatedDiffs[i:i+1], d.hints); err != nil { // permutation is invalid return false // continue searching } @@ -341,5 +343,39 @@ func (d *SchemaDiff) OrderedDiffs(ctx context.Context) ([]EntityDiff, error) { // Done taking care of this equivalence class. } + if d.hints.ForeignKeyCheckStrategy != ForeignKeyCheckStrategyStrict { + // We may have allowed invalid foreign key dependencies along the way. But we must then validate the final schema + // to ensure that all foreign keys are valid. + hints := *d.hints + hints.ForeignKeyCheckStrategy = ForeignKeyCheckStrategyStrict + if err := lastGoodSchema.normalize(&hints); err != nil { + return nil, &ImpossibleApplyDiffOrderError{ + UnorderedDiffs: d.UnorderedDiffs(), + ConflictingDiffs: d.UnorderedDiffs(), + } + } + } return orderedDiffs, nil } + +// InstantDDLCapability returns an overall summary of the ability of the diffs to run with ALGORITHM=INSTANT. +// It is a convenience method, whose logic anyone can reimplement. +func (d *SchemaDiff) InstantDDLCapability() InstantDDLCapability { + // The general logic: we return "InstantDDLCapabilityPossible" if there is one or more diffs that is capable of + // ALGORITHM=INSTANT, and zero or more diffs that are irrelevant, and no diffs that are impossible to run with + // ALGORITHM=INSTANT. + capability := InstantDDLCapabilityIrrelevant + for _, diff := range d.UnorderedDiffs() { + switch diff.InstantDDLCapability() { + case InstantDDLCapabilityUnknown: + return InstantDDLCapabilityUnknown // Early break + case InstantDDLCapabilityImpossible: + return InstantDDLCapabilityImpossible // Early break + case InstantDDLCapabilityPossible: + capability = InstantDDLCapabilityPossible + case InstantDDLCapabilityIrrelevant: + // do nothing + } + } + return capability +} diff --git a/go/vt/schemadiff/schema_diff_test.go b/go/vt/schemadiff/schema_diff_test.go index df7d893356f..270449841d9 100644 --- a/go/vt/schemadiff/schema_diff_test.go +++ b/go/vt/schemadiff/schema_diff_test.go @@ -23,6 +23,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/vtenv" ) func TestPermutations(t *testing.T) { @@ -160,14 +163,15 @@ func TestPermutations(t *testing.T) { }, } hints := &DiffHints{RangeRotationStrategy: RangeRotationDistinctStatements} + env := NewTestEnv() for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - fromSchema, err := NewSchemaFromQueries(tc.fromQueries) + fromSchema, err := NewSchemaFromQueries(env, tc.fromQueries) require.NoError(t, err) require.NotNil(t, fromSchema) - toSchema, err := NewSchemaFromQueries(tc.toQueries) + toSchema, err := NewSchemaFromQueries(env, tc.toQueries) require.NoError(t, err) require.NotNil(t, toSchema) @@ -258,19 +262,25 @@ func TestSchemaDiff(t *testing.T) { } ) tt := []struct { - name string - fromQueries []string - toQueries []string - expectDiffs int - expectDeps int - sequential bool - conflictingDiffs int - entityOrder []string // names of tables/views in expected diff order + name string + fromQueries []string + toQueries []string + expectDiffs int + expectDeps int + sequential bool + conflictingDiffs int + entityOrder []string // names of tables/views in expected diff order + mysqlServerVersion string + instantCapability InstantDDLCapability + fkStrategy int + expectError string + expectOrderedError string }{ { - name: "no change", - toQueries: createQueries, - entityOrder: []string{}, + name: "no change", + toQueries: createQueries, + entityOrder: []string{}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "three unrelated changes", @@ -280,8 +290,44 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id from t1", "create view v2 as select 1 from dual", }, - expectDiffs: 3, - entityOrder: []string{"t1", "t2", "v2"}, + expectDiffs: 3, + entityOrder: []string{"t1", "t2", "v2"}, + instantCapability: InstantDDLCapabilityPossible, + }, + { + name: "two identical tables, one with explicit charset, one without", + fromQueries: []string{ + "create table foobar (id int primary key, foo varchar(64) character set utf8mb3 collate utf8mb3_bin)", + }, + toQueries: []string{ + "create table foobar (id int primary key, foo varchar(64) collate utf8mb3_bin)", + }, + entityOrder: []string{}, + instantCapability: InstantDDLCapabilityIrrelevant, + }, + + { + name: "instant DDL possible on 8.0.32", + toQueries: []string{ + "create table t1 (id int primary key, ts timestamp, info int not null);", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id from t1", + }, + expectDiffs: 1, + entityOrder: []string{"t1"}, + instantCapability: InstantDDLCapabilityPossible, + }, + { + name: "instant DDL impossible on 8.0.17", + toQueries: []string{ + "create table t1 (id int primary key, ts timestamp, info int not null);", + "create table t2 (id int primary key, ts timestamp);", + "create view v1 as select id from t1", + }, + mysqlServerVersion: "8.0.17", + expectDiffs: 1, + entityOrder: []string{"t1"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "three unrelated changes 2", @@ -290,8 +336,9 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, v varchar);", "create view v2 as select 1 from dual", }, - expectDiffs: 3, - entityOrder: []string{"v1", "t2", "v2"}, + expectDiffs: 3, + entityOrder: []string{"v1", "t2", "v2"}, + instantCapability: InstantDDLCapabilityPossible, }, // Subsequent { @@ -301,9 +348,10 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, v varchar, fulltext key ftk1 (v));", "create view v1 as select id from t1", }, - expectDiffs: 1, - expectDeps: 0, - entityOrder: []string{"t2"}, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { // MySQL limitation: you cannot add two FULLTEXT keys in a single statement. `schemadiff` complies @@ -314,10 +362,11 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, v varchar, fulltext key ftk1 (v), fulltext key ftk2 (v));", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - entityOrder: []string{"t2", "t2"}, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t2", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add partition", @@ -331,9 +380,10 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp) partition by range (id) (partition p0 values less than (0), partition p1 values less than (1), partition p2 values less than (2));", "create view v1 as select id from t1", }, - expectDiffs: 1, - expectDeps: 0, - entityOrder: []string{"t2"}, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { // In MySQL, you cannot ALTER TABLE ADD COLUMN ..., ADD PARTITION in a single statement @@ -348,10 +398,11 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, v varchar) partition by range (id) (partition p0 values less than (0), partition p1 values less than (1), partition p2 values less than (2));", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - entityOrder: []string{"t2", "t2"}, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t2", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add view", @@ -359,8 +410,9 @@ func TestSchemaDiff(t *testing.T) { createQueries, "create view v2 as select id from t2", ), - expectDiffs: 1, - entityOrder: []string{"v2"}, + expectDiffs: 1, + entityOrder: []string{"v2"}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "add view, alter table", @@ -370,9 +422,10 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id from t1", "create view v2 as select id from t2", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t2", "v2"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "v2"}, + instantCapability: InstantDDLCapabilityPossible, }, { name: "alter view, alter table", @@ -381,10 +434,11 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp);", "create view v1 as select the_id from t1", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t1", "v1"}, - conflictingDiffs: 2, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "v1"}, + conflictingDiffs: 2, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "alter table, add view", @@ -394,9 +448,10 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id from t1", "create view v2 as select id, v from t2", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t2", "v2"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "v2"}, + instantCapability: InstantDDLCapabilityPossible, }, { name: "create view depending on 2 tables, alter table", @@ -406,9 +461,10 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id from t1", "create view v2 as select info, v from t1, t2", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t2", "v2"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "v2"}, + instantCapability: InstantDDLCapabilityPossible, }, { name: "create view depending on 2 tables, alter other table", @@ -420,9 +476,10 @@ func TestSchemaDiff(t *testing.T) { "create view v2 as select info, ts from t1, t2", // "create view v2 as select info, ts from t1, t2", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t1", "v2"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "v2"}, + instantCapability: InstantDDLCapabilityPossible, }, { name: "create view depending on 2 tables, alter both tables", @@ -432,9 +489,10 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id from t1", "create view v2 as select info, ts from t1, t2", }, - expectDiffs: 3, - expectDeps: 2, - entityOrder: []string{"t1", "t2", "v2"}, + expectDiffs: 3, + expectDeps: 2, + entityOrder: []string{"t1", "t2", "v2"}, + instantCapability: InstantDDLCapabilityPossible, }, { name: "alter view depending on 2 tables, uses new column, alter tables", @@ -444,9 +502,10 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id from t1", "create view v2 as select info, v from t1, t2", }, - expectDiffs: 3, - expectDeps: 2, - entityOrder: []string{"t1", "t2", "v2"}, + expectDiffs: 3, + expectDeps: 2, + entityOrder: []string{"t1", "t2", "v2"}, + instantCapability: InstantDDLCapabilityPossible, }, { name: "drop view", @@ -454,9 +513,10 @@ func TestSchemaDiff(t *testing.T) { "create table t1 (id int primary key, info int not null);", "create table t2 (id int primary key, ts timestamp);", }, - expectDiffs: 1, - expectDeps: 0, - entityOrder: []string{"v1"}, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"v1"}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "drop view, alter dependent table", @@ -464,27 +524,30 @@ func TestSchemaDiff(t *testing.T) { "create table t1 (id int primary key, info int not null, dt datetime);", "create table t2 (id int primary key, ts timestamp);", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"v1", "t1"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v1", "t1"}, + instantCapability: InstantDDLCapabilityPossible, }, { name: "drop view, drop dependent table", toQueries: []string{ "create table t2 (id int primary key, ts timestamp);", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"v1", "t1"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v1", "t1"}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "drop view, drop unrelated table", toQueries: []string{ "create table t1 (id int primary key, info int not null);", }, - expectDiffs: 2, - expectDeps: 0, - entityOrder: []string{"v1", "t2"}, + expectDiffs: 2, + expectDeps: 0, + entityOrder: []string{"v1", "t2"}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "alter view, drop table", @@ -492,9 +555,10 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp);", "create view v1 as select id from t2", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"v1", "t1"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v1", "t1"}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "alter view, add view", @@ -504,9 +568,10 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id, info from t1", "create view v2 as select info from v1", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"v1", "v2"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v1", "v2"}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "alter view, add view, 2", @@ -516,9 +581,10 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id, ts from v2", "create view v2 as select id, ts from t2", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"v2", "v1"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"v2", "v1"}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "alter table, alter view, add view", @@ -528,9 +594,10 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select ts from t2", "create view v2 as select v from t2", }, - expectDiffs: 3, - expectDeps: 2, - entityOrder: []string{"t2", "v1", "v2"}, + expectDiffs: 3, + expectDeps: 2, + entityOrder: []string{"t2", "v1", "v2"}, + instantCapability: InstantDDLCapabilityPossible, }, { name: "alter table, alter view, impossible sequence", @@ -542,9 +609,10 @@ func TestSchemaDiff(t *testing.T) { "create table t1 (id int primary key, newcol int not null);", "create view v1 as select id, newcol from t1", }, - expectDiffs: 2, - expectDeps: 1, - conflictingDiffs: 2, + expectDiffs: 2, + expectDeps: 1, + conflictingDiffs: 2, + instantCapability: InstantDDLCapabilityPossible, }, // FKs @@ -554,8 +622,9 @@ func TestSchemaDiff(t *testing.T) { createQueries, "create table t3 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", ), - expectDiffs: 1, - entityOrder: []string{"t3"}, + expectDiffs: 1, + entityOrder: []string{"t3"}, + instantCapability: InstantDDLCapabilityIrrelevant, }, { name: "create two tables with fk", @@ -564,10 +633,38 @@ func TestSchemaDiff(t *testing.T) { "create table tp (id int primary key, info int not null);", "create table t3 (id int primary key, ts timestamp, tp_id int, foreign key (tp_id) references tp (id) on delete no action);", ), - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"tp", "t3"}, - sequential: true, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"tp", "t3"}, + sequential: true, + instantCapability: InstantDDLCapabilityIrrelevant, + }, + { + name: "create two tables valid fk cycle", + toQueries: append( + createQueries, + "create table t11 (id int primary key, i int, constraint f1101 foreign key (i) references t12 (id) on delete restrict);", + "create table t12 (id int primary key, i int, constraint f1201 foreign key (i) references t11 (id) on delete set null);", + ), + expectDiffs: 2, + expectDeps: 2, + sequential: true, + fkStrategy: ForeignKeyCheckStrategyStrict, + expectOrderedError: "no valid applicable order for diffs", + }, + { + name: "create two tables valid fk cycle, fk ignore", + toQueries: append( + createQueries, + "create table t12 (id int primary key, i int, constraint f1201 foreign key (i) references t11 (id) on delete set null);", + "create table t11 (id int primary key, i int, constraint f1101 foreign key (i) references t12 (id) on delete restrict);", + ), + expectDiffs: 2, + expectDeps: 2, + entityOrder: []string{"t11", "t12"}, // Note that the tables were reordered lexicographically + sequential: true, + instantCapability: InstantDDLCapabilityIrrelevant, + fkStrategy: ForeignKeyCheckStrategyIgnore, }, { name: "add FK", @@ -576,9 +673,10 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", "create view v1 as select id from t1", }, - expectDiffs: 1, - expectDeps: 0, - entityOrder: []string{"t2"}, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add FK pointing to new table", @@ -588,10 +686,55 @@ func TestSchemaDiff(t *testing.T) { "create table tp (id int primary key, info int not null);", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - entityOrder: []string{"tp", "t2"}, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"tp", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, + }, + { + name: "add two valid fk cycle references", + toQueries: []string{ + "create table t1 (id int primary key, info int not null, i int, constraint f1 foreign key (i) references t2 (id) on delete restrict);", + "create table t2 (id int primary key, ts timestamp, i int, constraint f2 foreign key (i) references t1 (id) on delete set null);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 2, + sequential: false, + fkStrategy: ForeignKeyCheckStrategyStrict, + entityOrder: []string{"t1", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, + }, + { + name: "add a table and a valid fk cycle references", + toQueries: []string{ + "create table t0 (id int primary key, info int not null, i int, constraint f1 foreign key (i) references t2 (id) on delete restrict);", + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, i int, constraint f2 foreign key (i) references t0 (id) on delete set null);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 2, + sequential: true, + fkStrategy: ForeignKeyCheckStrategyStrict, + entityOrder: []string{"t0", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, + }, + { + name: "add a table and a valid fk cycle references, lelxicographically desc", + toQueries: []string{ + "create table t1 (id int primary key, info int not null);", + "create table t2 (id int primary key, ts timestamp, i int, constraint f2 foreign key (i) references t9 (id) on delete set null);", + "create table t9 (id int primary key, info int not null, i int, constraint f1 foreign key (i) references t2 (id) on delete restrict);", + "create view v1 as select id from t1", + }, + expectDiffs: 2, + expectDeps: 2, + sequential: true, + fkStrategy: ForeignKeyCheckStrategyStrict, + entityOrder: []string{"t9", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add FK, unrelated alter", @@ -600,9 +743,10 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t1", "t2"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add FK, add unrelated column", @@ -611,9 +755,10 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t1", "t2"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add FK, alter unrelated column", @@ -622,9 +767,10 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t1", "t2"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add FK, alter referenced column", @@ -633,10 +779,11 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, t1_id bigint, foreign key (t1_id) references t1 (id) on delete no action);", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - entityOrder: []string{"t1", "t2"}, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t1", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add column. create FK table referencing new column", @@ -646,10 +793,11 @@ func TestSchemaDiff(t *testing.T) { "create view v1 as select id from t1", "create table t3 (id int primary key, ts timestamp, t1_p int, foreign key (t1_p) references t1 (p) on delete no action);", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t1", "t3"}, - sequential: true, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t1", "t3"}, + sequential: true, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add column. add FK referencing new column", @@ -658,10 +806,11 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, t1_p int, foreign key (t1_p) references t1 (p) on delete no action);", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - entityOrder: []string{"t1", "t2"}, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t1", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add column. add FK referencing new column, alphabetically desc", @@ -670,21 +819,24 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, p int, key p_idx (p));", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - entityOrder: []string{"t2", "t1"}, - }, { + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t2", "t1"}, + instantCapability: InstantDDLCapabilityImpossible, + }, + { name: "add index on parent. add FK to index column", toQueries: []string{ "create table t1 (id int primary key, info int not null, key info_idx(info));", "create table t2 (id int primary key, ts timestamp, t1_info int not null, constraint parent_info_fk foreign key (t1_info) references t1 (info));", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - entityOrder: []string{"t1", "t2"}, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t1", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add index on parent with existing index. add FK to index column", @@ -698,10 +850,11 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, t1_info int not null, constraint parent_info_fk foreign key (t1_info) references t1 (info));", "create view v1 as select id from t1", }, - expectDiffs: 2, - expectDeps: 1, - sequential: false, - entityOrder: []string{"t1", "t2"}, + expectDiffs: 2, + expectDeps: 1, + sequential: false, + entityOrder: []string{"t1", "t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "modify fk column types, fail", @@ -713,10 +866,11 @@ func TestSchemaDiff(t *testing.T) { "create table t1 (id bigint primary key);", "create table t2 (id int primary key, ts timestamp, t1_id bigint, foreign key (t1_id) references t1 (id) on delete no action);", }, - expectDiffs: 2, - expectDeps: 0, - sequential: false, - conflictingDiffs: 1, + expectDiffs: 2, + expectDeps: 0, + sequential: false, + conflictingDiffs: 1, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add hierarchical constraints", @@ -734,10 +888,11 @@ func TestSchemaDiff(t *testing.T) { "create table t4 (id int primary key, ref int, key ref_idx (ref), foreign key (ref) references t3 (id) on delete no action);", "create table t5 (id int primary key, ref int, key ref_idx (ref), foreign key (ref) references t1 (id) on delete no action);", }, - expectDiffs: 4, - expectDeps: 2, // t2<->t3, t3<->t4 - sequential: false, - entityOrder: []string{"t2", "t3", "t4", "t5"}, + expectDiffs: 4, + expectDeps: 2, // t2<->t3, t3<->t4 + sequential: false, + entityOrder: []string{"t2", "t3", "t4", "t5"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "drop fk", @@ -746,10 +901,11 @@ func TestSchemaDiff(t *testing.T) { "create table t2 (id int primary key, ts timestamp, t1_id int, foreign key (t1_id) references t1 (id) on delete no action);", "create view v1 as select id from t1", }, - toQueries: createQueries, - expectDiffs: 1, - expectDeps: 0, - entityOrder: []string{"t2"}, + toQueries: createQueries, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t2"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "drop fk, drop table", @@ -760,9 +916,10 @@ func TestSchemaDiff(t *testing.T) { toQueries: []string{ "create table t2 (id int primary key, ts timestamp, t1_id int);", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t2", "t1"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "t1"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "drop fk, drop column", @@ -774,9 +931,10 @@ func TestSchemaDiff(t *testing.T) { "create table t1 (id int primary key, info int not null);", "create table t2 (id int primary key, ts timestamp, t1_p int);", }, - expectDiffs: 2, - expectDeps: 1, - entityOrder: []string{"t2", "t1"}, + expectDiffs: 2, + expectDeps: 1, + entityOrder: []string{"t2", "t1"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "reverse fk", @@ -788,9 +946,10 @@ func TestSchemaDiff(t *testing.T) { "create table t1 (id int primary key, p int, key p_idx (p), foreign key (p) references t2 (p) on delete no action);", "create table t2 (id int primary key, p int, key p_idx (p));", }, - expectDiffs: 2, - expectDeps: 2, - entityOrder: []string{"t2", "t1"}, + expectDiffs: 2, + expectDeps: 2, + entityOrder: []string{"t2", "t1"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "add and drop FK, add and drop column, impossible order", @@ -802,10 +961,11 @@ func TestSchemaDiff(t *testing.T) { "create table t1 (id int primary key, q int, key q_idx (q));", "create table t2 (id int primary key, q int, key q_idx (q), foreign key (q) references t1 (q) on delete no action);", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - conflictingDiffs: 2, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + conflictingDiffs: 2, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "two identical foreign keys in table, drop one", @@ -817,9 +977,10 @@ func TestSchemaDiff(t *testing.T) { "create table parent (id int primary key)", "create table t1 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id))", }, - expectDiffs: 1, - expectDeps: 0, - entityOrder: []string{"t1"}, + expectDiffs: 1, + expectDeps: 0, + entityOrder: []string{"t1"}, + instantCapability: InstantDDLCapabilityImpossible, }, { name: "test", @@ -830,27 +991,42 @@ func TestSchemaDiff(t *testing.T) { "CREATE TABLE t1 (id bigint NOT NULL, name varchar(255), PRIMARY KEY (id), KEY idx_name (name))", "CREATE TABLE t3 (id bigint NOT NULL, name varchar(255), t1_id bigint, PRIMARY KEY (id), KEY t1_id (t1_id), KEY nameidx (name), CONSTRAINT t3_ibfk_1 FOREIGN KEY (t1_id) REFERENCES t1 (id) ON DELETE CASCADE ON UPDATE CASCADE, CONSTRAINT t3_ibfk_2 FOREIGN KEY (name) REFERENCES t1 (name) ON DELETE CASCADE ON UPDATE CASCADE)", }, - expectDiffs: 2, - expectDeps: 1, - sequential: true, - entityOrder: []string{"t1", "t3"}, + expectDiffs: 2, + expectDeps: 1, + sequential: true, + entityOrder: []string{"t1", "t3"}, + instantCapability: InstantDDLCapabilityImpossible, }, } - hints := &DiffHints{RangeRotationStrategy: RangeRotationDistinctStatements} + baseHints := &DiffHints{ + RangeRotationStrategy: RangeRotationDistinctStatements, + } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { + vtenv, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: tc.mysqlServerVersion, + }) + require.NoError(t, err) + env := NewEnv(vtenv, collations.CollationUtf8mb4ID) + if tc.fromQueries == nil { tc.fromQueries = createQueries } - fromSchema, err := NewSchemaFromQueries(tc.fromQueries) + fromSchema, err := NewSchemaFromQueries(env, tc.fromQueries) require.NoError(t, err) require.NotNil(t, fromSchema) - toSchema, err := NewSchemaFromQueries(tc.toQueries) + toSchema, err := NewSchemaFromQueries(env, tc.toQueries) require.NoError(t, err) require.NotNil(t, toSchema) - schemaDiff, err := fromSchema.SchemaDiff(toSchema, hints) + hints := *baseHints + hints.ForeignKeyCheckStrategy = tc.fkStrategy + schemaDiff, err := fromSchema.SchemaDiff(toSchema, &hints) + if tc.expectError != "" { + assert.ErrorContains(t, err, tc.expectError) + return + } require.NoError(t, err) allDiffs := schemaDiff.UnorderedDiffs() @@ -869,6 +1045,10 @@ func TestSchemaDiff(t *testing.T) { assert.Equal(t, tc.sequential, schemaDiff.HasSequentialExecutionDependencies()) orderedDiffs, err := schemaDiff.OrderedDiffs(ctx) + if tc.expectOrderedError != "" { + assert.ErrorContains(t, err, tc.expectOrderedError) + return + } if tc.conflictingDiffs > 0 { assert.Error(t, err) impossibleOrderErr, ok := err.(*ImpossibleApplyDiffOrderError) @@ -902,6 +1082,9 @@ func TestSchemaDiff(t *testing.T) { _, err := schemaDiff.r.ElementClass(s) require.NoError(t, err) } + instantCapability := schemaDiff.InstantDDLCapability() + assert.Equal(t, tc.instantCapability, instantCapability) }) + } } diff --git a/go/vt/schemadiff/schema_test.go b/go/vt/schemadiff/schema_test.go index 79bf44117e2..7eab685f0d7 100644 --- a/go/vt/schemadiff/schema_test.go +++ b/go/vt/schemadiff/schema_test.go @@ -18,7 +18,7 @@ package schemadiff import ( "fmt" - "math/rand" + "math/rand/v2" "sort" "strings" "testing" @@ -27,7 +27,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/errors" + vterrors "vitess.io/vitess/go/errors" "vitess.io/vitess/go/vt/sqlparser" ) @@ -83,7 +83,7 @@ var schemaTestExpectSortedViewNames = []string{ var schemaTestToSQL = "CREATE TABLE `t1` (\n\t`id` int\n);\nCREATE TABLE `t2` (\n\t`id` int\n);\nCREATE TABLE `t3` (\n\t`id` int,\n\t`type` enum('foo', 'bar') NOT NULL DEFAULT 'foo'\n);\nCREATE TABLE `t5` (\n\t`id` int\n);\nCREATE VIEW `v0` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v3` AS SELECT *, `id` + 1 AS `id_plus`, `id` + 2 FROM `t3` AS `t3`;\nCREATE VIEW `v9` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v1` AS SELECT * FROM `v3`;\nCREATE VIEW `v2` AS SELECT * FROM `v3`, `t2`;\nCREATE VIEW `v4` AS SELECT * FROM `t2` AS `something_else`, `v3`;\nCREATE VIEW `v5` AS SELECT * FROM `t1`, (SELECT * FROM `v3`) AS `some_alias`;\nCREATE VIEW `v6` AS SELECT * FROM `v4`;\n" func TestNewSchemaFromQueries(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(NewTestEnv(), schemaTestCreateQueries) assert.NoError(t, err) require.NotNil(t, schema) @@ -93,7 +93,7 @@ func TestNewSchemaFromQueries(t *testing.T) { } func TestNewSchemaFromSQL(t *testing.T) { - schema, err := NewSchemaFromSQL(strings.Join(schemaTestCreateQueries, ";")) + schema, err := NewSchemaFromSQL(NewTestEnv(), strings.Join(schemaTestCreateQueries, ";")) assert.NoError(t, err) require.NotNil(t, schema) @@ -107,7 +107,7 @@ func TestNewSchemaFromQueriesWithDuplicate(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v2 as select * from v1, t2", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(NewTestEnv(), queries) assert.Error(t, err) assert.EqualError(t, err, (&ApplyDuplicateEntityError{Entity: "v2"}).Error()) } @@ -117,7 +117,7 @@ func TestNewSchemaFromQueriesUnresolved(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v7 as select * from v8, t2", ) - schema, err := NewSchemaFromQueries(queries) + schema, err := NewSchemaFromQueries(NewTestEnv(), queries) assert.Error(t, err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) v := schema.sorted[len(schema.sorted)-1] @@ -130,7 +130,7 @@ func TestNewSchemaFromQueriesUnresolvedAlias(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v7 as select * from something_else as t1, t2", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(NewTestEnv(), queries) assert.Error(t, err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) } @@ -140,7 +140,7 @@ func TestNewSchemaFromQueriesViewFromDual(t *testing.T) { queries := []string{ "create view v20 as select 1 from dual", } - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(NewTestEnv(), queries) assert.NoError(t, err) } @@ -149,7 +149,7 @@ func TestNewSchemaFromQueriesViewFromDualImplicit(t *testing.T) { queries := []string{ "create view v20 as select 1", } - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(NewTestEnv(), queries) assert.NoError(t, err) } @@ -159,14 +159,14 @@ func TestNewSchemaFromQueriesLoop(t *testing.T) { "create view v7 as select * from v8, t2", "create view v8 as select * from t1, v7", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(NewTestEnv(), queries) require.Error(t, err) - err = errors.UnwrapFirst(err) + err = vterrors.UnwrapFirst(err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) } func TestToSQL(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(NewTestEnv(), schemaTestCreateQueries) assert.NoError(t, err) require.NotNil(t, schema) @@ -175,7 +175,7 @@ func TestToSQL(t *testing.T) { } func TestCopy(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(NewTestEnv(), schemaTestCreateQueries) assert.NoError(t, err) require.NotNil(t, schema) @@ -222,7 +222,7 @@ func TestGetViewDependentTableNames(t *testing.T) { } for _, ts := range tt { t.Run(ts.view, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.view) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.view) require.NoError(t, err) createView, ok := stmt.(*sqlparser.CreateView) require.True(t, ok) @@ -262,7 +262,7 @@ func TestGetForeignKeyParentTableNames(t *testing.T) { } for _, ts := range tt { t.Run(ts.table, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.table) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.table) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -298,7 +298,7 @@ func TestTableForeignKeyOrdering(t *testing.T) { "v13", "v09", } - schema, err := NewSchemaFromQueries(fkQueries) + schema, err := NewSchemaFromQueries(NewTestEnv(), fkQueries) require.NoError(t, err) assert.NotNil(t, schema) @@ -331,8 +331,77 @@ func TestInvalidSchema(t *testing.T) { expectErr: &ForeignKeyColumnCountMismatchError{Table: "t11", Constraint: "f11", ColumnCount: 2, ReferencedTable: "t11", ReferencedColumnCount: 1}, }, { - schema: "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)", - expectErr: &ForeignKeyDependencyUnresolvedError{Table: "t11"}, + schema: "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12 (id) on delete restrict)", + expectErr: &ForeignKeyNonexistentReferencedTableError{Table: "t11", ReferencedTable: "t12"}, + }, + { + schema: "create view v as select 1 as id from dual; create table t11 (id int primary key, i int, constraint fv foreign key (i) references v (id) on delete restrict)", + expectErr: &ForeignKeyReferencesViewError{Table: "t11", ReferencedView: "v"}, + }, + { + // t11 self loop + schema: "create table t11 (id int primary key, i int, constraint f11 foreign key (i) references t11 (id) on delete restrict)", + }, + { + // t12<->t11 + schema: ` + create table t11 (id int primary key, i int, constraint f1103 foreign key (i) references t12 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1203 foreign key (i) references t11 (id) on delete restrict) + `, + }, + { + // t12<->t11 + schema: ` + create table t11 (id int primary key, i int, constraint f1101 foreign key (i) references t12 (i) on delete restrict); + create table t12 (id int primary key, i int, constraint f1201 foreign key (i) references t11 (i) on delete set null) + `, + }, + { + // t10, t12<->t11 + schema: ` + create table t10(id int primary key); + create table t11 (id int primary key, i int, constraint f1102 foreign key (i) references t12 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1202 foreign key (i) references t11 (id) on delete restrict) + `, + }, + { + // t10, t12<->t11<-t13 + schema: ` + create table t10(id int primary key); + create table t11 (id int primary key, i int, constraint f1104 foreign key (i) references t12 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1204 foreign key (i) references t11 (id) on delete restrict); + create table t13 (id int primary key, i int, constraint f13 foreign key (i) references t11 (id) on delete restrict)`, + }, + { + // t10 + // ^ + // | + //t12<->t11<-t13 + schema: ` + create table t10(id int primary key); + create table t11 (id int primary key, i int, i10 int, constraint f111205 foreign key (i) references t12 (id) on delete restrict, constraint f111005 foreign key (i10) references t10 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1205 foreign key (id) references t11 (i) on delete restrict); + create table t13 (id int primary key, i int, constraint f1305 foreign key (i) references t11 (id) on delete restrict) + `, + }, + { + // t10, t12<->t11<-t13<-t14 + schema: ` + create table t10(id int primary key); + create table t11 (id int primary key, i int, i10 int, constraint f1106 foreign key (i) references t12 (id) on delete restrict, constraint f111006 foreign key (i10) references t10 (id) on delete restrict); + create table t12 (id int primary key, i int, constraint f1206 foreign key (i) references t11 (id) on delete restrict); + create table t13 (id int primary key, i int, constraint f1306 foreign key (i) references t11 (id) on delete restrict); + create table t14 (id int primary key, i int, constraint f1406 foreign key (i) references t13 (id) on delete restrict) + `, + }, + { + // t10, t12<-t11<-t13<-t12 + schema: ` + create table t10(id int primary key); + create table t11 (id int primary key, i int, key i_idx (i), i10 int, constraint f1107 foreign key (i) references t12 (id), constraint f111007 foreign key (i10) references t10 (id)); + create table t12 (id int primary key, i int, key i_idx (i), constraint f1207 foreign key (id) references t13 (i)); + create table t13 (id int primary key, i int, key i_idx (i), constraint f1307 foreign key (i) references t11 (i)); + `, }, { schema: "create table t11 (id int primary key, i int, key ix(i), constraint f11 foreign key (i) references t11(id2) on delete restrict)", @@ -362,19 +431,51 @@ func TestInvalidSchema(t *testing.T) { schema: "create table t10(id bigint primary key); create table t11 (id int primary key, i varchar(100), key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)", expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"}, }, + { + schema: "create table t10(id int primary key, pid int null, key (pid)); create table t11 (id int primary key, pid int unsigned, key ix(pid), constraint f10 foreign key (pid) references t10(pid))", + expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "pid", ReferencedTable: "t10", ReferencedColumn: "pid"}, + }, + { + // NULL vs NOT NULL should be fine + schema: "create table t10(id int primary key, pid int null, key (pid)); create table t11 (id int primary key, pid int not null, key ix(pid), constraint f10 foreign key (pid) references t10(pid))", + }, + { + // NOT NULL vs NULL should be fine + schema: "create table t10(id int primary key, pid int not null, key (pid)); create table t11 (id int primary key, pid int null, key ix(pid), constraint f10 foreign key (pid) references t10(pid))", + }, { // InnoDB allows different string length schema: "create table t10(id varchar(50) primary key); create table t11 (id int primary key, i varchar(100), key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)", }, + { + // explicit charset/collation + schema: "create table t10(id varchar(50) charset utf8mb4 collate utf8mb4_0900_ai_ci primary key); create table t11 (id int primary key, i varchar(100) charset utf8mb4 collate utf8mb4_0900_ai_ci, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)", + }, + { + // allowed: varchar->char + schema: "create table t10(id varchar(50) charset utf8mb4 collate utf8mb4_0900_ai_ci primary key); create table t11 (id int primary key, i char(100) charset utf8mb4 collate utf8mb4_0900_ai_ci, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)", + }, + { + // allowed: char->varchar + schema: "create table t10(id char(50) charset utf8mb4 collate utf8mb4_0900_ai_ci primary key); create table t11 (id int primary key, i varchar(50) charset utf8mb4 collate utf8mb4_0900_ai_ci, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)", + }, { schema: "create table t10(id varchar(50) charset utf8mb3 primary key); create table t11 (id int primary key, i varchar(100) charset utf8mb4, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)", expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"}, }, + { + schema: "create table t10(id varchar(50) charset utf8mb4 collate utf8mb4_0900_ai_ci primary key); create table t11 (id int primary key, i varchar(100) charset utf8mb4 collate utf8mb4_general_ci, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)", + expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"}, + }, + { + schema: "create table t10(id VARCHAR(50) charset utf8mb4 collate utf8mb4_0900_ai_ci primary key); create table t11 (id int primary key, i VARCHAR(100) charset utf8mb4 collate utf8mb4_general_ci, key ix(i), constraint f10 foreign key (i) references t10(id) on delete restrict)", + expectErr: &ForeignKeyColumnTypeMismatchError{Table: "t11", Constraint: "f10", Column: "i", ReferencedTable: "t10", ReferencedColumn: "id"}, + }, } for _, ts := range tt { t.Run(ts.schema, func(t *testing.T) { - _, err := NewSchemaFromSQL(ts.schema) + _, err := NewSchemaFromSQL(NewTestEnv(), ts.schema) if ts.expectErr == nil { assert.NoError(t, err) } else { @@ -388,12 +489,21 @@ func TestInvalidSchema(t *testing.T) { func TestInvalidTableForeignKeyReference(t *testing.T) { { fkQueries := []string{ + "create table t10 (id int primary key)", "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)", "create table t15(id int, primary key(id))", } - _, err := NewSchemaFromQueries(fkQueries) + s, err := NewSchemaFromQueries(NewTestEnv(), fkQueries) assert.Error(t, err) - assert.EqualError(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t11"}).Error()) + // Even though there's errors, we still expect the schema to have been created. + assert.NotNil(t, s) + // Even though t11 caused an error, we still expect the schema to have parsed all tables. + assert.Equalf(t, 3, len(s.Entities()), "found: %+v", s.EntityNames()) + t11 := s.Table("t11") + assert.NotNil(t, t11) + // validate t11 table definition is complete, even though it was invalid. + assert.Equal(t, "create table t11 (\n\tid int,\n\ti int,\n\tprimary key (id),\n\tkey f12 (i),\n\tconstraint f12 foreign key (i) references t12 (id) on delete restrict\n)", t11.Create().StatementString()) + assert.EqualError(t, err, (&ForeignKeyNonexistentReferencedTableError{Table: "t11", ReferencedTable: "t12"}).Error()) } { fkQueries := []string{ @@ -401,9 +511,37 @@ func TestInvalidTableForeignKeyReference(t *testing.T) { "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)", "create table t12 (id int primary key, i int, constraint f13 foreign key (i) references t13(id) on delete restrict)", } - _, err := NewSchemaFromQueries(fkQueries) + _, err := NewSchemaFromQueries(NewTestEnv(), fkQueries) + assert.NoError(t, err) + } + { + fkQueries := []string{ + "create table t13 (id int primary key, i int, constraint f11 foreign key (i) references t11(i) on delete restrict)", + "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(i) on delete restrict)", + "create table t12 (id int primary key, i int, constraint f13 foreign key (i) references t13(i) on delete restrict)", + } + _, err := NewSchemaFromQueries(NewTestEnv(), fkQueries) + assert.NoError(t, err) + } + { + fkQueries := []string{ + "create table t13 (id int primary key, i int, constraint f11 foreign key (i) references t11(id) on delete restrict)", + "create table t11 (id int primary key, i int, constraint f0 foreign key (i) references t0(id) on delete restrict)", + "create table t12 (id int primary key, i int, constraint f13 foreign key (i) references t13(id) on delete restrict)", + } + _, err := NewSchemaFromQueries(NewTestEnv(), fkQueries) + assert.Error(t, err) + assert.ErrorContains(t, err, (&ForeignKeyNonexistentReferencedTableError{Table: "t11", ReferencedTable: "t0"}).Error()) + } + { + fkQueries := []string{ + "create table t13 (id int primary key, i int, constraint f11 foreign key (i) references t11(id) on delete restrict, constraint f12 foreign key (i) references t12(id) on delete restrict)", + "create table t11 (id int primary key, i int, constraint f0 foreign key (i) references t0(id) on delete restrict)", + "create table t12 (id int primary key, i int, constraint f13 foreign key (i) references t13(id) on delete restrict)", + } + _, err := NewSchemaFromQueries(NewTestEnv(), fkQueries) assert.Error(t, err) - assert.EqualError(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t11"}).Error()) + assert.ErrorContains(t, err, (&ForeignKeyNonexistentReferencedTableError{Table: "t11", ReferencedTable: "t0"}).Error()) } } @@ -424,7 +562,7 @@ func TestGetEntityColumnNames(t *testing.T) { "create view vb as select *, now() from v8", } - schema, err := NewSchemaFromQueries(queries) + schema, err := NewSchemaFromQueries(NewTestEnv(), queries) require.NoError(t, err) require.NotNil(t, schema) @@ -446,7 +584,7 @@ func TestGetEntityColumnNames(t *testing.T) { entities := schema.Entities() require.Equal(t, len(entities), len(expectedColNames)) - tcmap := newDeclarativeSchemaInformation() + tcmap := newDeclarativeSchemaInformation(NewTestEnv()) // we test by order of dependency: for _, e := range entities { tbl := e.Name() @@ -702,13 +840,13 @@ func TestViewReferences(t *testing.T) { } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - schema, err := NewSchemaFromQueries(ts.queries) + schema, err := NewSchemaFromQueries(NewTestEnv(), ts.queries) if ts.expectErr == nil { require.NoError(t, err) require.NotNil(t, schema) } else { require.Error(t, err) - err = errors.UnwrapFirst(err) + err = vterrors.UnwrapFirst(err) require.Equal(t, ts.expectErr, err, "received error: %v", err) } }) @@ -716,7 +854,7 @@ func TestViewReferences(t *testing.T) { } // TestMassiveSchema loads thousands of tables into one schema, and thousands of tables, some of which are different, into another schema. -// It compares the two shemas. +// It compares the two schemas. // The objective of this test is to verify that execution time is _reasonable_. Since this will run in GitHub CI, which is very slow, we allow // for 1 minute total for all operations. func TestMassiveSchema(t *testing.T) { @@ -794,9 +932,9 @@ func TestMassiveSchema(t *testing.T) { queries1 = append(queries1, query) tableNames[tableName] = true } - schema0, err = NewSchemaFromQueries(queries0) + schema0, err = NewSchemaFromQueries(NewTestEnv(), queries0) require.NoError(t, err) - schema1, err = NewSchemaFromQueries(queries1) + schema1, err = NewSchemaFromQueries(NewTestEnv(), queries1) require.NoError(t, err) require.Equal(t, countModifiedTables, modifyTables) @@ -813,7 +951,7 @@ func TestMassiveSchema(t *testing.T) { }) t.Run("evaluating diff", func(t *testing.T) { - schemaDiff, err := schema0.SchemaDiff(schema1, &DiffHints{}) + schemaDiff, err := schema0.SchemaDiff(schema1, EmptyDiffHints()) require.NoError(t, err) diffs := schemaDiff.UnorderedDiffs() require.NotEmpty(t, diffs) diff --git a/go/vt/schemadiff/semantics.go b/go/vt/schemadiff/semantics.go index da9c6b1e2a9..ccbf654f566 100644 --- a/go/vt/schemadiff/semantics.go +++ b/go/vt/schemadiff/semantics.go @@ -22,6 +22,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -34,15 +35,17 @@ var semanticKS = &vindexes.Keyspace{ var _ semantics.SchemaInformation = (*declarativeSchemaInformation)(nil) -// declarativeSchemaInformation is a utility wrapper arounf FakeSI, and adds a few utility functions +// declarativeSchemaInformation is a utility wrapper around FakeSI, and adds a few utility functions // to make it more simple and accessible to schemadiff's logic. type declarativeSchemaInformation struct { Tables map[string]*vindexes.Table + env *Environment } -func newDeclarativeSchemaInformation() *declarativeSchemaInformation { +func newDeclarativeSchemaInformation(env *Environment) *declarativeSchemaInformation { return &declarativeSchemaInformation{ Tables: make(map[string]*vindexes.Table), + env: env, } } @@ -53,7 +56,11 @@ func (si *declarativeSchemaInformation) FindTableOrVindex(tablename sqlparser.Ta } func (si *declarativeSchemaInformation) ConnCollation() collations.ID { - return 45 + return si.env.DefaultColl +} + +func (si *declarativeSchemaInformation) Environment() *vtenv.Environment { + return si.env.Environment } func (si *declarativeSchemaInformation) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { @@ -64,6 +71,14 @@ func (si *declarativeSchemaInformation) KeyspaceError(keyspace string) error { return nil } +func (si *declarativeSchemaInformation) GetAggregateUDFs() []string { + return nil +} + +func (si *declarativeSchemaInformation) GetForeignKeyChecksState() *bool { + return nil +} + // addTable adds a fake table with an empty column list func (si *declarativeSchemaInformation) addTable(tableName string) { tbl := &vindexes.Table{ diff --git a/go/vt/schemadiff/table.go b/go/vt/schemadiff/table.go index 3f256889721..def83fa7f19 100644 --- a/go/vt/schemadiff/table.go +++ b/go/vt/schemadiff/table.go @@ -19,25 +19,31 @@ package schemadiff import ( "fmt" "math" + "slices" "sort" "strconv" "strings" golcs "github.com/yudai/golcs" - "vitess.io/vitess/go/mysql/collations/colldata" - - "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/ptr" "vitess.io/vitess/go/vt/sqlparser" ) +type charsetCollate struct { + charset string + collate string +} + type AlterTableEntityDiff struct { - from *CreateTableEntity - to *CreateTableEntity - alterTable *sqlparser.AlterTable + from *CreateTableEntity + to *CreateTableEntity + alterTable *sqlparser.AlterTable + annotations *TextualAnnotations canonicalStatementString string subsequentDiff *AlterTableEntityDiff + instantDDLCapability InstantDDLCapability } // IsEmpty implements EntityDiff @@ -55,6 +61,10 @@ func (d *AlterTableEntityDiff) Entities() (from Entity, to Entity) { return d.from, d.to } +func (d *AlterTableEntityDiff) Annotated() (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) { + return annotatedDiff(d, d.annotations) +} + // Statement implements EntityDiff func (d *AlterTableEntityDiff) Statement() sqlparser.Statement { if d == nil { @@ -121,6 +131,37 @@ func (d *AlterTableEntityDiff) addSubsequentDiff(diff *AlterTableEntityDiff) { } } +// InstantDDLCapability implements EntityDiff +func (d *AlterTableEntityDiff) InstantDDLCapability() InstantDDLCapability { + if d == nil { + return InstantDDLCapabilityUnknown + } + return d.instantDDLCapability +} + +// Clone implements EntityDiff +func (d *AlterTableEntityDiff) Clone() EntityDiff { + if d == nil { + return nil + } + ann := *d.annotations + clone := &AlterTableEntityDiff{ + alterTable: sqlparser.CloneRefOfAlterTable(d.alterTable), + instantDDLCapability: d.instantDDLCapability, + annotations: &ann, + } + if d.from != nil { + clone.from = d.from.Clone().(*CreateTableEntity) + } + if d.to != nil { + clone.to = d.to.Clone().(*CreateTableEntity) + } + if d.subsequentDiff != nil { + clone.subsequentDiff = d.subsequentDiff.Clone().(*AlterTableEntityDiff) + } + return clone +} + type CreateTableEntityDiff struct { to *CreateTableEntity createTable *sqlparser.CreateTable @@ -143,6 +184,10 @@ func (d *CreateTableEntityDiff) Entities() (from Entity, to Entity) { return nil, &CreateTableEntity{CreateTable: d.createTable} } +func (d *CreateTableEntityDiff) Annotated() (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) { + return annotatedDiff(d, nil) +} + // Statement implements EntityDiff func (d *CreateTableEntityDiff) Statement() sqlparser.Statement { if d == nil { @@ -189,6 +234,25 @@ func (d *CreateTableEntityDiff) SubsequentDiff() EntityDiff { func (d *CreateTableEntityDiff) SetSubsequentDiff(EntityDiff) { } +// InstantDDLCapability implements EntityDiff +func (d *CreateTableEntityDiff) InstantDDLCapability() InstantDDLCapability { + return InstantDDLCapabilityIrrelevant +} + +// Clone implements EntityDiff +func (d *CreateTableEntityDiff) Clone() EntityDiff { + if d == nil { + return nil + } + clone := &CreateTableEntityDiff{ + createTable: sqlparser.CloneRefOfCreateTable(d.createTable), + } + if d.to != nil { + clone.to = d.to.Clone().(*CreateTableEntity) + } + return clone +} + type DropTableEntityDiff struct { from *CreateTableEntity dropTable *sqlparser.DropTable @@ -211,6 +275,10 @@ func (d *DropTableEntityDiff) Entities() (from Entity, to Entity) { return d.from, nil } +func (d *DropTableEntityDiff) Annotated() (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) { + return annotatedDiff(d, nil) +} + // Statement implements EntityDiff func (d *DropTableEntityDiff) Statement() sqlparser.Statement { if d == nil { @@ -257,6 +325,25 @@ func (d *DropTableEntityDiff) SubsequentDiff() EntityDiff { func (d *DropTableEntityDiff) SetSubsequentDiff(EntityDiff) { } +// InstantDDLCapability implements EntityDiff +func (d *DropTableEntityDiff) InstantDDLCapability() InstantDDLCapability { + return InstantDDLCapabilityIrrelevant +} + +// Clone implements EntityDiff +func (d *DropTableEntityDiff) Clone() EntityDiff { + if d == nil { + return nil + } + clone := &DropTableEntityDiff{ + dropTable: sqlparser.CloneRefOfDropTable(d.dropTable), + } + if d.from != nil { + clone.from = d.from.Clone().(*CreateTableEntity) + } + return clone +} + type RenameTableEntityDiff struct { from *CreateTableEntity to *CreateTableEntity @@ -280,6 +367,10 @@ func (d *RenameTableEntityDiff) Entities() (from Entity, to Entity) { return d.from, d.to } +func (d *RenameTableEntityDiff) Annotated() (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) { + return annotatedDiff(d, nil) +} + // Statement implements EntityDiff func (d *RenameTableEntityDiff) Statement() sqlparser.Statement { if d == nil { @@ -326,16 +417,39 @@ func (d *RenameTableEntityDiff) SubsequentDiff() EntityDiff { func (d *RenameTableEntityDiff) SetSubsequentDiff(EntityDiff) { } +// InstantDDLCapability implements EntityDiff +func (d *RenameTableEntityDiff) InstantDDLCapability() InstantDDLCapability { + return InstantDDLCapabilityIrrelevant +} + +// Clone implements EntityDiff +func (d *RenameTableEntityDiff) Clone() EntityDiff { + if d == nil { + return nil + } + clone := &RenameTableEntityDiff{ + renameTable: sqlparser.CloneRefOfRenameTable(d.renameTable), + } + if d.from != nil { + clone.from = d.from.Clone().(*CreateTableEntity) + } + if d.to != nil { + clone.to = d.to.Clone().(*CreateTableEntity) + } + return clone +} + // CreateTableEntity stands for a TABLE construct. It contains the table's CREATE statement. type CreateTableEntity struct { *sqlparser.CreateTable + Env *Environment } -func NewCreateTableEntity(c *sqlparser.CreateTable) (*CreateTableEntity, error) { +func NewCreateTableEntity(env *Environment, c *sqlparser.CreateTable) (*CreateTableEntity, error) { if !c.IsFullyParsed() { return nil, &NotFullyParsedError{Entity: c.Table.Name.String(), Statement: sqlparser.CanonicalString(c)} } - entity := &CreateTableEntity{CreateTable: c} + entity := &CreateTableEntity{CreateTable: c, Env: env} entity.normalize() return entity, nil } @@ -362,12 +476,12 @@ func (c *CreateTableEntity) normalizeTableOptions() { switch opt.Name { case "charset": opt.String = strings.ToLower(opt.String) - if charset, ok := collationEnv.CharsetAlias(opt.String); ok { + if charset, ok := c.Env.CollationEnv().CharsetAlias(opt.String); ok { opt.String = charset } case "collate": opt.String = strings.ToLower(opt.String) - if collation, ok := collationEnv.CollationAlias(opt.String); ok { + if collation, ok := c.Env.CollationEnv().CollationAlias(opt.String); ok { opt.String = collation } case "engine": @@ -387,7 +501,7 @@ func (c *CreateTableEntity) GetCharset() string { for _, opt := range c.CreateTable.TableSpec.Options { if strings.ToLower(opt.Name) == "charset" { opt.String = strings.ToLower(opt.String) - if charsetName, ok := collationEnv.CharsetAlias(opt.String); ok { + if charsetName, ok := c.Env.CollationEnv().CharsetAlias(opt.String); ok { return charsetName } return opt.String @@ -402,7 +516,7 @@ func (c *CreateTableEntity) GetCollation() string { for _, opt := range c.CreateTable.TableSpec.Options { if strings.ToLower(opt.Name) == "collate" { opt.String = strings.ToLower(opt.String) - if collationName, ok := collationEnv.CollationAlias(opt.String); ok { + if collationName, ok := c.Env.CollationEnv().CollationAlias(opt.String); ok { return collationName } return opt.String @@ -412,45 +526,27 @@ func (c *CreateTableEntity) GetCollation() string { } func (c *CreateTableEntity) Clone() Entity { - return &CreateTableEntity{CreateTable: sqlparser.CloneRefOfCreateTable(c.CreateTable)} + return &CreateTableEntity{CreateTable: sqlparser.CloneRefOfCreateTable(c.CreateTable), Env: c.Env} } -// Right now we assume MySQL 8.0 for the collation normalization handling. -const mysqlCollationVersion = "8.0.0" - -var collationEnv = collations.NewEnvironment(mysqlCollationVersion) - -func defaultCharset() string { - collation := colldata.Lookup(collations.ID(collationEnv.DefaultConnectionCharset())) - if collation == nil { - return "" +func getTableCharsetCollate(env *Environment, tableOptions *sqlparser.TableOptions) *charsetCollate { + cc := &charsetCollate{ + charset: env.CollationEnv().LookupCharsetName(env.DefaultColl), + collate: env.CollationEnv().LookupName(env.DefaultColl), } - return collation.Charset().Name() -} - -func defaultCharsetCollation(charset string) string { - collation := collationEnv.DefaultCollationForCharset(charset) - if collation == collations.Unknown { - return "" + for _, option := range *tableOptions { + if strings.EqualFold(option.Name, "charset") { + cc.charset = option.String + } + if strings.EqualFold(option.Name, "collate") { + cc.collate = option.String + } } - return collationEnv.LookupName(collation) + return cc } func (c *CreateTableEntity) normalizeColumnOptions() { - tableCharset := defaultCharset() - tableCollation := "" - for _, option := range c.CreateTable.TableSpec.Options { - switch strings.ToUpper(option.Name) { - case "CHARSET": - tableCharset = option.String - case "COLLATE": - tableCollation = option.String - } - } - defaultCollation := defaultCharsetCollation(tableCharset) - if tableCollation == "" { - tableCollation = defaultCollation - } + cc := getTableCharsetCollate(c.Env, &c.CreateTable.TableSpec.Options) for _, col := range c.CreateTable.TableSpec.Columns { if col.Type.Options == nil { @@ -497,13 +593,13 @@ func (c *CreateTableEntity) normalizeColumnOptions() { // Map any charset aliases to the real charset. This applies mainly right // now to utf8 being an alias for utf8mb3. - if charset, ok := collationEnv.CharsetAlias(col.Type.Charset.Name); ok { + if charset, ok := c.Env.CollationEnv().CharsetAlias(col.Type.Charset.Name); ok { col.Type.Charset.Name = charset } // Map any collation aliases to the real collation. This applies mainly right // now to utf8 being an alias for utf8mb3 collations. - if collation, ok := collationEnv.CollationAlias(col.Type.Options.Collate); ok { + if collation, ok := c.Env.CollationEnv().CollationAlias(col.Type.Options.Collate); ok { col.Type.Options.Collate = collation } @@ -521,10 +617,7 @@ func (c *CreateTableEntity) normalizeColumnOptions() { // "show create table" reports it as a tinyint(1). if col.Type.Type == "boolean" { col.Type.Type = "tinyint" - col.Type.Length = &sqlparser.Literal{ - Type: sqlparser.IntVal, - Val: "1", - } + col.Type.Length = ptr.Of(1) if col.Type.Options.Default != nil { val, ok := col.Type.Options.Default.(sqlparser.BoolVal) @@ -553,16 +646,14 @@ func (c *CreateTableEntity) normalizeColumnOptions() { col.Type.Type = "double" } - if col.Type.Length != nil && col.Type.Scale == nil && col.Type.Length.Type == sqlparser.IntVal { - if l, err := strconv.ParseInt(col.Type.Length.Val, 10, 64); err == nil { - // See https://dev.mysql.com/doc/refman/8.0/en/floating-point-types.html, but the docs are - // subtly wrong. We use a float for a precision of 24, not a double as the documentation - // mentioned. Validated against the actual behavior of MySQL. - if l <= 24 { - col.Type.Type = "float" - } else { - col.Type.Type = "double" - } + if col.Type.Length != nil && col.Type.Scale == nil { + // See https://dev.mysql.com/doc/refman/8.0/en/floating-point-types.html, but the docs are + // subtly wrong. We use a float for a precision of 24, not a double as the documentation + // mentioned. Validated against the actual behavior of MySQL. + if *col.Type.Length <= 24 { + col.Type.Type = "float" + } else { + col.Type.Type = "double" } col.Type.Length = nil } @@ -571,13 +662,13 @@ func (c *CreateTableEntity) normalizeColumnOptions() { if _, ok := charsetTypes[col.Type.Type]; ok { // If the charset is explicitly configured and it mismatches, we don't normalize // anything for charsets or collations and move on. - if col.Type.Charset.Name != "" && col.Type.Charset.Name != tableCharset { + if col.Type.Charset.Name != "" && col.Type.Charset.Name != cc.charset { continue } // Alright, first check if both charset and collation are the same as // the table level options, in that case we can remove both since that's equivalent. - if col.Type.Charset.Name == tableCharset && col.Type.Options.Collate == tableCollation { + if col.Type.Charset.Name == cc.charset && col.Type.Options.Collate == cc.collate { col.Type.Charset.Name = "" col.Type.Options.Collate = "" } @@ -595,13 +686,13 @@ func (c *CreateTableEntity) normalizeColumnOptions() { if col.Type.Charset.Name != "" { col.Type.Charset.Name = "" if col.Type.Options.Collate == "" { - col.Type.Options.Collate = defaultCollation + col.Type.Options.Collate = c.Env.CollationEnv().LookupName(c.Env.DefaultColl) } } // We now have one case left, which is when we have set a collation but it's the same // as the table level. In that case, we can clear it since that is equivalent. - if col.Type.Options.Collate == tableCollation { + if col.Type.Options.Collate == cc.collate { col.Type.Options.Collate = "" } } @@ -618,7 +709,7 @@ func (c *CreateTableEntity) normalizeIndexOptions() { } func isBool(colType *sqlparser.ColumnType) bool { - return colType.Type == sqlparser.KeywordString(sqlparser.TINYINT) && colType.Length != nil && sqlparser.CanonicalString(colType.Length) == "1" + return colType.Type == sqlparser.KeywordString(sqlparser.TINYINT) && colType.Length != nil && *colType.Length == 1 } func (c *CreateTableEntity) normalizePartitionOptions() { @@ -826,35 +917,36 @@ func (c *CreateTableEntity) TableDiff(other *CreateTableEntity, hints *DiffHints alterTable.Table.Qualifier = other.Table.Qualifier } - diffedTableCharset := "" + t1cc := getTableCharsetCollate(c.Env, &c.CreateTable.TableSpec.Options) + t2cc := getTableCharsetCollate(c.Env, &other.CreateTable.TableSpec.Options) + var parentAlterTableEntityDiff *AlterTableEntityDiff var partitionSpecs []*sqlparser.PartitionSpec var superfluousFulltextKeys []*sqlparser.AddIndexDefinition - { - t1Options := c.CreateTable.TableSpec.Options - t2Options := other.CreateTable.TableSpec.Options - diffedTableCharset = c.diffTableCharset(t1Options, t2Options) - } + annotations := NewTextualAnnotations() { // diff columns // ordered columns for both tables: + t1Columns := c.CreateTable.TableSpec.Columns t2Columns := other.CreateTable.TableSpec.Columns - c.diffColumns(alterTable, t1Columns, t2Columns, hints, diffedTableCharset != "") + if err := c.diffColumns(alterTable, annotations, t1Columns, t2Columns, hints, t1cc, t2cc); err != nil { + return nil, err + } } { // diff keys // ordered keys for both tables: t1Keys := c.CreateTable.TableSpec.Indexes t2Keys := other.CreateTable.TableSpec.Indexes - superfluousFulltextKeys = c.diffKeys(alterTable, t1Keys, t2Keys, hints) + superfluousFulltextKeys = c.diffKeys(alterTable, annotations, t1Keys, t2Keys, hints) } { // diff constraints // ordered constraints for both tables: t1Constraints := c.CreateTable.TableSpec.Constraints t2Constraints := other.CreateTable.TableSpec.Constraints - c.diffConstraints(alterTable, c.Name(), t1Constraints, other.Name(), t2Constraints, hints) + c.diffConstraints(alterTable, annotations, c.Name(), t1Constraints, other.Name(), t2Constraints, hints) } { // diff partitions @@ -862,7 +954,7 @@ func (c *CreateTableEntity) TableDiff(other *CreateTableEntity, hints *DiffHints t1Partitions := c.CreateTable.TableSpec.PartitionOption t2Partitions := other.CreateTable.TableSpec.PartitionOption var err error - partitionSpecs, err = c.diffPartitions(alterTable, t1Partitions, t2Partitions, hints) + partitionSpecs, err = c.diffPartitions(alterTable, annotations, t1Partitions, t2Partitions, hints) if err != nil { return nil, err } @@ -872,14 +964,14 @@ func (c *CreateTableEntity) TableDiff(other *CreateTableEntity, hints *DiffHints // ordered keys for both tables: t1Options := c.CreateTable.TableSpec.Options t2Options := other.CreateTable.TableSpec.Options - if err := c.diffOptions(alterTable, t1Options, t2Options, hints); err != nil { + if err := c.diffOptions(alterTable, annotations, t1Options, t2Options, hints); err != nil { return nil, err } } tableSpecHasChanged := len(alterTable.AlterOptions) > 0 || alterTable.PartitionOption != nil || alterTable.PartitionSpec != nil newAlterTableEntityDiff := func(alterTable *sqlparser.AlterTable) *AlterTableEntityDiff { - d := &AlterTableEntityDiff{alterTable: alterTable, from: c, to: other} + d := &AlterTableEntityDiff{alterTable: alterTable, from: c, to: other, annotations: annotations} var algorithmValue sqlparser.AlgorithmValue @@ -923,25 +1015,21 @@ func (c *CreateTableEntity) TableDiff(other *CreateTableEntity, hints *DiffHints } sortAlterOptions(parentAlterTableEntityDiff) + if hints.SubsequentDiffStrategy == SubsequentDiffStrategyReject { + if allSubsequent := AllSubsequent(parentAlterTableEntityDiff); len(allSubsequent) > 1 { + return nil, &SubsequentDiffRejectedError{Table: c.Name(), Diffs: allSubsequent} + } + } + return parentAlterTableEntityDiff, nil } func (c *CreateTableEntity) diffTableCharset( - t1Options sqlparser.TableOptions, - t2Options sqlparser.TableOptions, + t1cc *charsetCollate, + t2cc *charsetCollate, ) string { - getcharset := func(options sqlparser.TableOptions) string { - for _, option := range options { - if strings.EqualFold(option.Name, "CHARSET") { - return option.String - } - } - return "" - } - t1Charset := getcharset(t1Options) - t2Charset := getcharset(t2Options) - if t1Charset != t2Charset { - return t2Charset + if t1cc.charset != t2cc.charset { + return t2cc.charset } return "" } @@ -991,6 +1079,7 @@ func isDefaultTableOptionValue(option *sqlparser.TableOption) bool { } func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable, + annotations *TextualAnnotations, t1Options sqlparser.TableOptions, t2Options sqlparser.TableOptions, hints *DiffHints, @@ -1019,7 +1108,7 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable, case "CHARSET": switch hints.TableCharsetCollateStrategy { case TableCharsetCollateStrict: - tableOption = &sqlparser.TableOption{String: ""} + tableOption = &sqlparser.TableOption{Name: "CHARSET", String: c.Env.CollationEnv().LookupCharsetName(c.Env.DefaultColl), CaseSensitive: true} // in all other strategies we ignore the charset } case "CHECKSUM": @@ -1076,11 +1165,16 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable, if tableOption != nil { tableOption.Name = t1Option.Name alterTableOptions = append(alterTableOptions, tableOption) + annotations.MarkRemoved(sqlparser.CanonicalString(sqlparser.TableOptions{t1Option})) } } - } // changed options + modifyTableOption := func(option1, option2 *sqlparser.TableOption) { + alterTableOptions = append(alterTableOptions, option2) + annotations.MarkRemoved(sqlparser.CanonicalString(sqlparser.TableOptions{option1})) + annotations.MarkAdded(sqlparser.CanonicalString(sqlparser.TableOptions{option2})) + } for _, t2Option := range t2Options { if t1Option, ok := t1OptionsMap[t2Option.Name]; ok { options1 := sqlparser.TableOptions{t1Option} @@ -1092,10 +1186,10 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable, case "CHARSET", "COLLATE": switch hints.TableCharsetCollateStrategy { case TableCharsetCollateStrict: - alterTableOptions = append(alterTableOptions, t2Option) + modifyTableOption(t1Option, t2Option) case TableCharsetCollateIgnoreEmpty: if t1Option.String != "" && t2Option.String != "" { - alterTableOptions = append(alterTableOptions, t2Option) + modifyTableOption(t1Option, t2Option) } // if one is empty, we ignore case TableCharsetCollateIgnoreAlways: @@ -1104,7 +1198,7 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable, case "AUTO_INCREMENT": switch hints.AutoIncrementStrategy { case AutoIncrementApplyAlways: - alterTableOptions = append(alterTableOptions, t2Option) + modifyTableOption(t1Option, t2Option) case AutoIncrementApplyHigher: option1AutoIncrement, err := strconv.ParseInt(t1Option.Value.Val, 10, 64) if err != nil { @@ -1116,18 +1210,22 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable, } if option2AutoIncrement > option1AutoIncrement { // never decrease AUTO_INCREMENT. Only increase - alterTableOptions = append(alterTableOptions, t2Option) + modifyTableOption(t1Option, t2Option) } case AutoIncrementIgnore: // do not apply } default: // Apply the new options - alterTableOptions = append(alterTableOptions, t2Option) + modifyTableOption(t1Option, t2Option) } } } } + addTableOption := func(option *sqlparser.TableOption) { + alterTableOptions = append(alterTableOptions, option) + annotations.MarkAdded(sqlparser.CanonicalString(sqlparser.TableOptions{option})) + } // added options for _, t2Option := range t2Options { if _, ok := t1OptionsMap[t2Option.Name]; !ok { @@ -1135,18 +1233,18 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable, case "CHARSET", "COLLATE": switch hints.TableCharsetCollateStrategy { case TableCharsetCollateStrict: - alterTableOptions = append(alterTableOptions, t2Option) + addTableOption(t2Option) // in all other strategies we ignore the charset } case "AUTO_INCREMENT": switch hints.AutoIncrementStrategy { case AutoIncrementApplyAlways, AutoIncrementApplyHigher: - alterTableOptions = append(alterTableOptions, t2Option) + addTableOption(t2Option) case AutoIncrementIgnore: // do not apply } default: - alterTableOptions = append(alterTableOptions, t2Option) + addTableOption(t2Option) } } } @@ -1159,10 +1257,11 @@ func (c *CreateTableEntity) diffOptions(alterTable *sqlparser.AlterTable, // rangePartitionsAddedRemoved returns true when: // - both table partitions are RANGE type -// - there is exactly one consequitive non-empty shared sequence of partitions (same names, same range values, in same order) +// - there is exactly one consecutive non-empty shared sequence of partitions (same names, same range values, in same order) // - table1 may have non-empty list of partitions _preceding_ this sequence, and table2 may not // - table2 may have non-empty list of partitions _following_ this sequence, and table1 may not func (c *CreateTableEntity) isRangePartitionsRotation( + annotations *TextualAnnotations, t1Partitions *sqlparser.PartitionOption, t2Partitions *sqlparser.PartitionOption, ) (bool, []*sqlparser.PartitionSpec, error) { @@ -1173,62 +1272,77 @@ func (c *CreateTableEntity) isRangePartitionsRotation( if t1Partitions.Type != sqlparser.RangeType { return false, nil, nil } - definitions1 := t1Partitions.Definitions + definitions1 := slices.Clone(t1Partitions.Definitions) definitions2 := t2Partitions.Definitions - // there has to be a non-empty shared list, therefore both definitions must be non-empty: if len(definitions1) == 0 { return false, nil, nil } if len(definitions2) == 0 { return false, nil, nil } + definitions2map := make(map[string]*sqlparser.PartitionDefinition, len(definitions2)) + for _, definition := range definitions2 { + definitions2map[sqlparser.CanonicalString(definition)] = definition + } + // Find dropped partitions: var droppedPartitions1 []*sqlparser.PartitionDefinition - // It's OK for prefix of t1 partitions to be nonexistent in t2 (as they may have been rotated away in t2) - for len(definitions1) > 0 && !sqlparser.Equals.RefOfPartitionDefinition(definitions1[0], definitions2[0]) { - droppedPartitions1 = append(droppedPartitions1, definitions1[0]) - definitions1 = definitions1[1:] + for i := len(definitions1) - 1; i >= 0; i-- { + definition := definitions1[i] + if _, ok := definitions2map[sqlparser.CanonicalString(definition)]; !ok { + // In range partitioning, it's allowed to drop any partition, whether it's the first, somewhere in the middle, or last. + droppedPartitions1 = append(droppedPartitions1, definition) + // We remove the definition from the list, so that we can then compare the remaining definitions + definitions1 = append(definitions1[:i], definitions1[i+1:]...) + } } + slices.Reverse(droppedPartitions1) if len(definitions1) == 0 { - // We've exhaused definition1 trying to find a shared partition with definitions2. Nothing found. - // so there is no shared sequence between the two tables. + // Nothing shared between the two partition lists. return false, nil, nil } + // In range partitioning, it's only allowed to ADD one partition at the end of the range. + // We allow multiple here, and the diff mechanism will later split them to subsequent diffs. + + // Let's now validate that any added partitions in t2Partitions are strictly a suffix of t1Partitions if len(definitions1) > len(definitions2) { return false, nil, nil } - // To save computation, and because we've already shown that sqlparser.EqualsRefOfPartitionDefinition(definitions1[0], definitions2[0]), nil, - // we can skip one element - definitions1 = definitions1[1:] - definitions2 = definitions2[1:] - // Now let's ensure that whatever is remaining in definitions1 is an exact match for a prefix of definitions2 - // It's ok if we end up with leftover elements in definition2 - for len(definitions1) > 0 { - if !sqlparser.Equals.RefOfPartitionDefinition(definitions1[0], definitions2[0]) { + for i := range definitions1 { + if !sqlparser.Equals.RefOfPartitionDefinition(definitions1[i], definitions2[i]) { + // Not a suffix return false, nil, nil } - definitions1 = definitions1[1:] - definitions2 = definitions2[1:] } - addedPartitions2 := definitions2 - partitionSpecs := make([]*sqlparser.PartitionSpec, 0, len(droppedPartitions1)+len(addedPartitions2)) - for _, p := range droppedPartitions1 { + // And the suffix is any remaining definitions + addedPartitions2 := definitions2[len(definitions1):] + + var partitionSpecs []*sqlparser.PartitionSpec + // Dropped partitions: + if len(droppedPartitions1) > 0 { + // A single DROP PARTITION clause can specify multiple partition names partitionSpec := &sqlparser.PartitionSpec{ Action: sqlparser.DropAction, - Names: []sqlparser.IdentifierCI{p.Name}, + } + for _, p := range droppedPartitions1 { + partitionSpec.Names = append(partitionSpec.Names, p.Name) + annotations.MarkRemoved(sqlparser.CanonicalString(p)) } partitionSpecs = append(partitionSpecs, partitionSpec) } + // Added partitions: for _, p := range addedPartitions2 { partitionSpec := &sqlparser.PartitionSpec{ Action: sqlparser.AddAction, Definitions: []*sqlparser.PartitionDefinition{p}, } partitionSpecs = append(partitionSpecs, partitionSpec) + annotations.MarkAdded(sqlparser.CanonicalString(p)) } return true, partitionSpecs, nil } func (c *CreateTableEntity) diffPartitions(alterTable *sqlparser.AlterTable, + annotations *TextualAnnotations, t1Partitions *sqlparser.PartitionOption, t2Partitions *sqlparser.PartitionOption, hints *DiffHints, @@ -1239,6 +1353,7 @@ func (c *CreateTableEntity) diffPartitions(alterTable *sqlparser.AlterTable, case t1Partitions == nil: // add partitioning alterTable.PartitionOption = t2Partitions + annotations.MarkAdded(sqlparser.CanonicalString(t2Partitions)) case t2Partitions == nil: // remove partitioning partitionSpec := &sqlparser.PartitionSpec{ @@ -1246,14 +1361,15 @@ func (c *CreateTableEntity) diffPartitions(alterTable *sqlparser.AlterTable, IsAll: true, } alterTable.PartitionSpec = partitionSpec + annotations.MarkRemoved(sqlparser.CanonicalString(t1Partitions)) case sqlparser.Equals.RefOfPartitionOption(t1Partitions, t2Partitions): // identical partitioning return nil, nil default: // partitioning was changed - // For most cases, we produce a complete re-partitioing schema: we don't try and figure out the minimal + // For most cases, we produce a complete re-partitioning schema: we don't try and figure out the minimal // needed change. For example, maybe the minimal change is to REORGANIZE a specific partition and split - // into two, thus unaffecting the rest of the partitions. But we don't evaluate that, we just set a + // into two, thus not affecting the rest of the partitions. But we don't evaluate that, we just set a // complete new ALTER TABLE ... PARTITION BY statement. // The idea is that it doesn't matter: we're not looking to do optimal in-place ALTERs, we run // Online DDL alters, where we create a new table anyway. Thus, the optimization is meaningless. @@ -1261,7 +1377,7 @@ func (c *CreateTableEntity) diffPartitions(alterTable *sqlparser.AlterTable, // Having said that, we _do_ analyze the scenario of a RANGE partitioning rotation of partitions: // where zero or more partitions may have been dropped from the earlier range, and zero or more // partitions have been added with a later range: - isRotation, partitionSpecs, err := c.isRangePartitionsRotation(t1Partitions, t2Partitions) + isRotation, partitionSpecs, err := c.isRangePartitionsRotation(annotations, t1Partitions, t2Partitions) if err != nil { return nil, err } @@ -1276,11 +1392,14 @@ func (c *CreateTableEntity) diffPartitions(alterTable *sqlparser.AlterTable, } } alterTable.PartitionOption = t2Partitions + annotations.MarkRemoved(sqlparser.CanonicalString(t1Partitions)) + annotations.MarkAdded(sqlparser.CanonicalString(t2Partitions)) } return nil, nil } func (c *CreateTableEntity) diffConstraints(alterTable *sqlparser.AlterTable, + annotations *TextualAnnotations, t1Name string, t1Constraints []*sqlparser.ConstraintDefinition, t2Name string, @@ -1333,6 +1452,7 @@ func (c *CreateTableEntity) diffConstraints(alterTable *sqlparser.AlterTable, // constraint exists in t1 but not in t2, hence it is dropped dropConstraint := dropConstraintStatement(t1Constraint) alterTable.AlterOptions = append(alterTable.AlterOptions, dropConstraint) + annotations.MarkRemoved(sqlparser.CanonicalString(t1Constraint)) } else { t2ConstraintsCountMap[constraintName]-- } @@ -1359,6 +1479,8 @@ func (c *CreateTableEntity) diffConstraints(alterTable *sqlparser.AlterTable, Enforced: check2Details.Enforced, } alterTable.AlterOptions = append(alterTable.AlterOptions, alterConstraint) + annotations.MarkRemoved(sqlparser.CanonicalString(t1Constraint)) + annotations.MarkAdded(sqlparser.CanonicalString(t2Constraint)) continue } @@ -1369,6 +1491,8 @@ func (c *CreateTableEntity) diffConstraints(alterTable *sqlparser.AlterTable, } alterTable.AlterOptions = append(alterTable.AlterOptions, dropConstraint) alterTable.AlterOptions = append(alterTable.AlterOptions, addConstraint) + annotations.MarkRemoved(sqlparser.CanonicalString(t1Constraint)) + annotations.MarkAdded(sqlparser.CanonicalString(t2Constraint)) } } else { // constraint exists in t2 but not in t1, hence it is added @@ -1376,11 +1500,13 @@ func (c *CreateTableEntity) diffConstraints(alterTable *sqlparser.AlterTable, ConstraintDefinition: t2Constraint, } alterTable.AlterOptions = append(alterTable.AlterOptions, addConstraint) + annotations.MarkAdded(sqlparser.CanonicalString(t2Constraint)) } } } func (c *CreateTableEntity) diffKeys(alterTable *sqlparser.AlterTable, + annotations *TextualAnnotations, t1Keys []*sqlparser.IndexDefinition, t2Keys []*sqlparser.IndexDefinition, hints *DiffHints, @@ -1412,6 +1538,7 @@ func (c *CreateTableEntity) diffKeys(alterTable *sqlparser.AlterTable, // column exists in t1 but not in t2, hence it is dropped dropKey := dropKeyStatement(t1Key.Info) alterTable.AlterOptions = append(alterTable.AlterOptions, dropKey) + annotations.MarkRemoved(sqlparser.CanonicalString(t1Key)) } } @@ -1430,6 +1557,8 @@ func (c *CreateTableEntity) diffKeys(alterTable *sqlparser.AlterTable, Name: t2Key.Info.Name, Invisible: newVisibility, }) + annotations.MarkRemoved(sqlparser.CanonicalString(t1Key)) + annotations.MarkAdded(sqlparser.CanonicalString(t2Key)) continue } @@ -1440,6 +1569,8 @@ func (c *CreateTableEntity) diffKeys(alterTable *sqlparser.AlterTable, } alterTable.AlterOptions = append(alterTable.AlterOptions, dropKey) alterTable.AlterOptions = append(alterTable.AlterOptions, addKey) + annotations.MarkRemoved(sqlparser.CanonicalString(t1Key)) + annotations.MarkAdded(sqlparser.CanonicalString(t2Key)) } } else { // key exists in t2 but not in t1, hence it is added @@ -1452,11 +1583,13 @@ func (c *CreateTableEntity) diffKeys(alterTable *sqlparser.AlterTable, // Special case: MySQL does not support multiple ADD FULLTEXT KEY statements in a single ALTER superfluousFulltextKeys = append(superfluousFulltextKeys, addKey) addedAsSuperfluousStatement = true + annotations.MarkAdded(sqlparser.CanonicalString(t2Key)) } addedFulltextKeys++ } if !addedAsSuperfluousStatement { alterTable.AlterOptions = append(alterTable.AlterOptions, addKey) + annotations.MarkAdded(sqlparser.CanonicalString(t2Key)) } } } @@ -1533,11 +1666,13 @@ func evaluateColumnReordering(t1SharedColumns, t2SharedColumns []*sqlparser.Colu // It returns an AlterTable statement if changes are found, or nil if not. // the other table may be of different name; its name is ignored. func (c *CreateTableEntity) diffColumns(alterTable *sqlparser.AlterTable, + annotations *TextualAnnotations, t1Columns []*sqlparser.ColumnDefinition, t2Columns []*sqlparser.ColumnDefinition, hints *DiffHints, - tableCharsetChanged bool, -) { + t1cc *charsetCollate, + t2cc *charsetCollate, +) error { getColumnsMap := func(cols []*sqlparser.ColumnDefinition) map[string]*columnDetails { var prevCol *columnDetails m := map[string]*columnDetails{} @@ -1574,6 +1709,7 @@ func (c *CreateTableEntity) diffColumns(alterTable *sqlparser.AlterTable, Name: getColName(&t1Col.Name), } dropColumns = append(dropColumns, dropColumn) + annotations.MarkRemoved(sqlparser.CanonicalString(t1Col)) } } @@ -1599,13 +1735,16 @@ func (c *CreateTableEntity) diffColumns(alterTable *sqlparser.AlterTable, t2ColEntity := NewColumnDefinitionEntity(t2Col) // check diff between before/after columns: - modifyColumnDiff := t1ColEntity.ColumnDiff(t2ColEntity, hints) + modifyColumnDiff, err := t1ColEntity.ColumnDiff(c.Env, c.Name(), t2ColEntity, t1cc, t2cc, hints) + if err != nil { + return err + } if modifyColumnDiff == nil { // even if there's no apparent change, there can still be implicit changes - // it is possible that the table charset is changed. the column may be some col1 TEXT NOT NULL, possibly in both varsions 1 and 2, - // but implicitly the column has changed its characters set. So we need to explicitly ass a MODIFY COLUMN statement, so that + // it is possible that the table charset is changed. the column may be some col1 TEXT NOT NULL, possibly in both versions 1 and 2, + // but implicitly the column has changed its character set. So we need to explicitly add a MODIFY COLUMN statement, so that // MySQL rebuilds it. - if tableCharsetChanged && t2ColEntity.IsTextual() && t2Col.Type.Charset.Name == "" { + if t1cc.charset != t2cc.charset && t2ColEntity.IsTextual() && t2Col.Type.Charset.Name == "" { modifyColumnDiff = NewModifyColumnDiffByDefinition(t2Col) } } @@ -1626,6 +1765,8 @@ func (c *CreateTableEntity) diffColumns(alterTable *sqlparser.AlterTable, if modifyColumnDiff != nil { // column definition or ordering has changed modifyColumns = append(modifyColumns, modifyColumnDiff.modifyColumn) + annotations.MarkRemoved(sqlparser.CanonicalString(t1Col.col)) + annotations.MarkAdded(sqlparser.CanonicalString(t2Col)) } } // Evaluate added columns @@ -1651,6 +1792,7 @@ func (c *CreateTableEntity) diffColumns(alterTable *sqlparser.AlterTable, } expectAppendIndex++ addColumns = append(addColumns, addColumn) + annotations.MarkAdded(sqlparser.CanonicalString(t2Col)) } } dropColumns, addColumns, renameColumns := heuristicallyDetectColumnRenames(dropColumns, addColumns, t1ColumnsMap, t2ColumnsMap, hints) @@ -1666,6 +1808,7 @@ func (c *CreateTableEntity) diffColumns(alterTable *sqlparser.AlterTable, for _, c := range addColumns { alterTable.AlterOptions = append(alterTable.AlterOptions, c) } + return nil } func heuristicallyDetectColumnRenames( @@ -1684,7 +1827,7 @@ func heuristicallyDetectColumnRenames( // - the DROP and ADD column definitions are identical other than the column name, and // - the DROPped and ADDded column are both FIRST, or they come AFTER the same column, and // - the DROPped and ADDded column are both last, or they come before the same column - // This v1 chcek therefore cannot handle a case where two successive columns are renamed. + // This v1 check therefore cannot handle a case where two successive columns are renamed. // the problem is complex, and with successive renamed, or drops and adds, it can be // impossible to tell apart different scenarios. // At any case, once we heuristically decide that we found a RENAME, we cancel the DROP, @@ -1742,6 +1885,9 @@ func (c *CreateTableEntity) primaryKeyColumns() []*sqlparser.IndexColumn { // Create implements Entity interface func (c *CreateTableEntity) Create() EntityDiff { + if c == nil { + return nil + } return &CreateTableEntityDiff{to: c, createTable: c.CreateTable} } diff --git a/go/vt/schemadiff/table_test.go b/go/vt/schemadiff/table_test.go index e2ef58c1a6f..1168f53f3b6 100644 --- a/go/vt/schemadiff/table_test.go +++ b/go/vt/schemadiff/table_test.go @@ -28,24 +28,27 @@ import ( func TestCreateTableDiff(t *testing.T) { tt := []struct { - name string - from string - to string - fromName string - toName string - diff string - diffs []string - cdiff string - cdiffs []string - isError bool - errorMsg string - autoinc int - rotation int - fulltext int - colrename int - constraint int - charset int - algorithm int + name string + from string + to string + fromName string + toName string + diff string + diffs []string + cdiff string + cdiffs []string + errorMsg string + autoinc int + rotation int + fulltext int + colrename int + constraint int + charset int + algorithm int + enumreorder int + subsequent int + textdiffs []string + atomicdiffs []string }{ { name: "identical", @@ -70,6 +73,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t (Id int not null, primary key(id))", diff: "alter table t modify column Id int not null", cdiff: "ALTER TABLE `t` MODIFY COLUMN `Id` int NOT NULL", + textdiffs: []string{ + "- `id` int NOT NULL,", + "+ `Id` int NOT NULL,", + }, }, { name: "identical, name change", @@ -107,6 +114,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, `i` int not null default 0)", diff: "alter table t1 add column i int not null default 0", cdiff: "ALTER TABLE `t1` ADD COLUMN `i` int NOT NULL DEFAULT 0", + textdiffs: []string{ + "+ `i` int NOT NULL DEFAULT 0,", + }, }, { name: "dropped column", @@ -116,6 +126,9 @@ func TestCreateTableDiff(t *testing.T) { cdiff: "ALTER TABLE `t1` DROP COLUMN `i`", fromName: "t1", toName: "t2", + textdiffs: []string{ + "- `i` int NOT NULL DEFAULT 0,", + }, }, { name: "modified column", @@ -123,6 +136,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, `i` bigint unsigned default null)", diff: "alter table t1 modify column i bigint unsigned", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `i` bigint unsigned", + textdiffs: []string{ + "- `i` int NOT NULL DEFAULT 0,", + "+ `i` bigint unsigned,", + }, }, { name: "added column, dropped column, modified column", @@ -130,6 +147,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, ts timestamp null, `i` bigint unsigned default null)", diff: "alter table t1 drop column c, modify column i bigint unsigned, add column ts timestamp null after id", cdiff: "ALTER TABLE `t1` DROP COLUMN `c`, MODIFY COLUMN `i` bigint unsigned, ADD COLUMN `ts` timestamp NULL AFTER `id`", + textdiffs: []string{ + "- `c` char(3) DEFAULT '',", + "- `i` int NOT NULL DEFAULT 0,", + "+ `i` bigint unsigned,", + "+ `ts` timestamp NULL,", + }, }, // columns, rename { @@ -138,6 +161,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i2 int not null, c char(3) default '')", diff: "alter table t1 drop column i1, add column i2 int not null after id", cdiff: "ALTER TABLE `t1` DROP COLUMN `i1`, ADD COLUMN `i2` int NOT NULL AFTER `id`", + textdiffs: []string{ + "- `i1` int NOT NULL,", + "+ `i2` int NOT NULL,", + }, }, { name: "rename mid column. statement", @@ -146,6 +173,10 @@ func TestCreateTableDiff(t *testing.T) { colrename: ColumnRenameHeuristicStatement, diff: "alter table t1 rename column i1 to i2", cdiff: "ALTER TABLE `t1` RENAME COLUMN `i1` TO `i2`", + textdiffs: []string{ + "- `i1` int NOT NULL,", + "+ `i2` int NOT NULL,", + }, }, { name: "rename last column. statement", @@ -154,6 +185,10 @@ func TestCreateTableDiff(t *testing.T) { colrename: ColumnRenameHeuristicStatement, diff: "alter table t1 rename column i1 to i2", cdiff: "ALTER TABLE `t1` RENAME COLUMN `i1` TO `i2`", + textdiffs: []string{ + "- `i1` int NOT NULL,", + "+ `i2` int NOT NULL,", + }, }, { name: "rename two columns. statement", @@ -162,6 +197,12 @@ func TestCreateTableDiff(t *testing.T) { colrename: ColumnRenameHeuristicStatement, diff: "alter table t1 rename column i1 to i2, rename column v1 to v2", cdiff: "ALTER TABLE `t1` RENAME COLUMN `i1` TO `i2`, RENAME COLUMN `v1` TO `v2`", + textdiffs: []string{ + "- `i1` int NOT NULL,", + "- `v1` varchar(32),", + "+ `i2` int NOT NULL,", + "+ `v2` varchar(32),", + }, }, { name: "rename mid column and add an index. statement", @@ -170,6 +211,11 @@ func TestCreateTableDiff(t *testing.T) { colrename: ColumnRenameHeuristicStatement, diff: "alter table t1 rename column i1 to i2, add key i2_idx (i2)", cdiff: "ALTER TABLE `t1` RENAME COLUMN `i1` TO `i2`, ADD KEY `i2_idx` (`i2`)", + textdiffs: []string{ + "- `i1` int NOT NULL,", + "+ `i2` int NOT NULL,", + "+ KEY `i2_idx` (`i2`)", + }, }, { // in a future iteration, this will generate a RENAME for both column, like in the previous test. Until then, we do not RENAME two successive columns @@ -179,6 +225,12 @@ func TestCreateTableDiff(t *testing.T) { colrename: ColumnRenameHeuristicStatement, diff: "alter table t1 drop column i1, drop column v1, add column i2 int not null, add column v2 varchar(32)", cdiff: "ALTER TABLE `t1` DROP COLUMN `i1`, DROP COLUMN `v1`, ADD COLUMN `i2` int NOT NULL, ADD COLUMN `v2` varchar(32)", + textdiffs: []string{ + "- `i1` int NOT NULL,", + "- `v1` varchar(32),", + "+ `i2` int NOT NULL,", + "+ `v2` varchar(32),", + }, }, // columns, reordering { @@ -187,6 +239,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, a int, c int, b int, d int)", diff: "alter table t1 modify column c int after a", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int AFTER `a`", + textdiffs: []string{ + "+ `c` int,", + "- `c` int,", + }, }, { name: "reorder column, far jump", @@ -194,6 +250,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (a int, b int, c int, d int, id int primary key)", diff: "alter table t1 modify column id int after d", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `id` int AFTER `d`", + textdiffs: []string{ + "- `id` int,", + "+ `id` int,", + }, }, { name: "reorder column, far jump with case sentivity", @@ -201,6 +261,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (a int, B int, c int, d int, id int primary key)", diff: "alter table t1 modify column B int, modify column id int after d", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `B` int, MODIFY COLUMN `id` int AFTER `d`", + textdiffs: []string{ + "- `id` int,", + "+ `id` int,", + "- `b` int,", + "+ `B` int,", + }, }, { name: "reorder column, far jump, another reorder", @@ -208,6 +274,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (a int, c int, b int, d int, id int primary key)", diff: "alter table t1 modify column c int after a, modify column id int after d", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int AFTER `a`, MODIFY COLUMN `id` int AFTER `d`", + textdiffs: []string{ + "- `id` int,", + "+ `id` int,", + "- `c` int,", + "+ `c` int,", + }, }, { name: "reorder column, far jump, another reorder 2", @@ -215,6 +287,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (c int, a int, b int, d int, id int primary key)", diff: "alter table t1 modify column c int first, modify column id int after d", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int FIRST, MODIFY COLUMN `id` int AFTER `d`", + textdiffs: []string{ + "- `id` int,", + "+ `id` int,", + "- `c` int,", + "+ `c` int,", + }, }, { name: "reorder column, far jump, another reorder 3", @@ -222,6 +300,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (a int, c int, b int, d int, id int primary key, e int, f int)", diff: "alter table t1 modify column c int after a, modify column id int after d", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int AFTER `a`, MODIFY COLUMN `id` int AFTER `d`", + textdiffs: []string{ + "- `id` int,", + "+ `id` int,", + "- `c` int,", + "+ `c` int,", + }, }, { name: "reorder column, far jump, another reorder, removed columns", @@ -229,6 +313,14 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (a int, c int, f int, e int, id int primary key, g int)", diff: "alter table t1 drop column b, drop column d, modify column f int after c, modify column id int after e", cdiff: "ALTER TABLE `t1` DROP COLUMN `b`, DROP COLUMN `d`, MODIFY COLUMN `f` int AFTER `c`, MODIFY COLUMN `id` int AFTER `e`", + textdiffs: []string{ + "- `b` int,", + "- `d` int,", + "- `id` int,", + "+ `id` int,", + "- `f` int,", + "+ `f` int,", + }, }, { name: "two reorders", @@ -236,6 +328,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, b int, a int, c int, e int, d int, f int)", diff: "alter table t1 modify column b int after id, modify column e int after c", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `b` int AFTER `id`, MODIFY COLUMN `e` int AFTER `c`", + textdiffs: []string{ + "- `b` int,", + "+ `b` int,", + "- `e` int,", + "+ `e` int,", + }, }, { name: "two reorders, added and removed columns", @@ -243,6 +341,18 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (g int, id int primary key, h int, b int, a int, i int, e int, d int, j int, f int, k int)", diff: "alter table t1 drop column c, modify column b int after id, modify column e int after a, add column g int first, add column h int after id, add column i int after a, add column j int after d, add column k int", cdiff: "ALTER TABLE `t1` DROP COLUMN `c`, MODIFY COLUMN `b` int AFTER `id`, MODIFY COLUMN `e` int AFTER `a`, ADD COLUMN `g` int FIRST, ADD COLUMN `h` int AFTER `id`, ADD COLUMN `i` int AFTER `a`, ADD COLUMN `j` int AFTER `d`, ADD COLUMN `k` int", + textdiffs: []string{ + "- `c` int,", + "- `b` int,", + "+ `b` int,", + "- `e` int,", + "+ `e` int,", + "+ `g` int,", + "+ `h` int,", + "+ `i` int,", + "+ `j` int,", + "+ `k` int,", + }, }, { name: "reorder column and change data type", @@ -250,6 +360,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, a int, c bigint, b int, d int)", diff: "alter table t1 modify column c bigint after a", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` bigint AFTER `a`", + textdiffs: []string{ + "- `c` int,", + "+ `c` bigint,", + }, }, { name: "reorder column, first", @@ -257,6 +371,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (c int, id int primary key, a int, b int, d int)", diff: "alter table t1 modify column c int first", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int FIRST", + textdiffs: []string{ + "- `c` int,", + "+ `c` int,", + }, }, { name: "add multiple columns", @@ -264,6 +382,11 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, a int, b int, c int, d int)", diff: "alter table t1 add column b int, add column c int, add column d int", cdiff: "ALTER TABLE `t1` ADD COLUMN `b` int, ADD COLUMN `c` int, ADD COLUMN `d` int", + textdiffs: []string{ + "+ `b` int,", + "+ `c` int,", + "+ `d` int,", + }, }, { name: "added column in middle", @@ -271,6 +394,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, a int, b int, x int, c int, d int)", diff: "alter table t1 add column x int after b", cdiff: "ALTER TABLE `t1` ADD COLUMN `x` int AFTER `b`", + textdiffs: []string{ + "+ `x` int,", + }, }, { name: "added multiple column in middle", @@ -278,6 +404,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (w int, x int, id int primary key, y int, a int, z int)", diff: "alter table t1 add column w int first, add column x int after w, add column y int after id, add column z int", cdiff: "ALTER TABLE `t1` ADD COLUMN `w` int FIRST, ADD COLUMN `x` int AFTER `w`, ADD COLUMN `y` int AFTER `id`, ADD COLUMN `z` int", + textdiffs: []string{ + "+ `w` int,", + "+ `x` int,", + "+ `y` int,", + "+ `z` int,", + }, }, { name: "added column first, reorder column", @@ -285,6 +417,11 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (x int, a int, id int primary key)", diff: "alter table t1 modify column a int first, add column x int first", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `a` int FIRST, ADD COLUMN `x` int FIRST", + textdiffs: []string{ + "- `a` int,", + "+ `a` int,", + "+ `x` int,", + }, }, { name: "added column in middle, add column on end, reorder column", @@ -292,6 +429,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, a int, b int, x int, d int, c int, y int)", diff: "alter table t1 modify column d int after b, add column x int after b, add column y int", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `d` int AFTER `b`, ADD COLUMN `x` int AFTER `b`, ADD COLUMN `y` int", + textdiffs: []string{ + "- `d` int,", + "+ `d` int,", + "+ `x` int,", + "+ `y` int,", + }, }, { name: "added column in middle, add column on end, reorder column 2", @@ -299,7 +442,119 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, a int, c int, x int, b int, d int, y int)", diff: "alter table t1 modify column c int after a, add column x int after c, add column y int", cdiff: "ALTER TABLE `t1` MODIFY COLUMN `c` int AFTER `a`, ADD COLUMN `x` int AFTER `c`, ADD COLUMN `y` int", + textdiffs: []string{ + "- `c` int,", + "+ `c` int,", + "+ `x` int,", + "+ `y` int,", + }, + }, + // enum + { + name: "expand enum", + from: "create table t1 (id int primary key, e enum('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e enum('a', 'b', 'c', 'd'))", + diff: "alter table t1 modify column e enum('a', 'b', 'c', 'd')", + cdiff: "ALTER TABLE `t1` MODIFY COLUMN `e` enum('a', 'b', 'c', 'd')", + textdiffs: []string{ + "- `e` enum('a', 'b', 'c'),", + "+ `e` enum('a', 'b', 'c', 'd'),", + }, + }, + { + name: "truncate enum", + from: "create table t1 (id int primary key, e enum('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e enum('a', 'b'))", + diff: "alter table t1 modify column e enum('a', 'b')", + cdiff: "ALTER TABLE `t1` MODIFY COLUMN `e` enum('a', 'b')", + textdiffs: []string{ + "- `e` enum('a', 'b', 'c'),", + "+ `e` enum('a', 'b'),", + }, + }, + { + name: "rename enum value", + from: "create table t1 (id int primary key, e enum('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e enum('a', 'b', 'd'))", + diff: "alter table t1 modify column e enum('a', 'b', 'd')", + cdiff: "ALTER TABLE `t1` MODIFY COLUMN `e` enum('a', 'b', 'd')", + textdiffs: []string{ + "- `e` enum('a', 'b', 'c'),", + "+ `e` enum('a', 'b', 'd'),", + }, + }, + { + name: "reorder enum, fail", + from: "create table t1 (id int primary key, e enum('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e enum('b', 'a', 'c'))", + enumreorder: EnumReorderStrategyReject, + errorMsg: (&EnumValueOrdinalChangedError{Table: "t1", Column: "e", Value: "'a'", Ordinal: 0, NewOrdinal: 1}).Error(), + }, + { + name: "reorder enum, allow", + from: "create table t1 (id int primary key, e enum('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e enum('b', 'a', 'c'))", + diff: "alter table t1 modify column e enum('b', 'a', 'c')", + cdiff: "ALTER TABLE `t1` MODIFY COLUMN `e` enum('b', 'a', 'c')", + enumreorder: EnumReorderStrategyAllow, + textdiffs: []string{ + "- `e` enum('a', 'b', 'c'),", + "+ `e` enum('b', 'a', 'c'),", + }, + }, + { + name: "expand set", + from: "create table t1 (id int primary key, e set('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e set('a', 'b', 'c', 'd'))", + diff: "alter table t1 modify column e set('a', 'b', 'c', 'd')", + cdiff: "ALTER TABLE `t1` MODIFY COLUMN `e` set('a', 'b', 'c', 'd')", + textdiffs: []string{ + "- `e` set('a', 'b', 'c'),", + "+ `e` set('a', 'b', 'c', 'd'),", + }, + }, + { + name: "truncate set", + from: "create table t1 (id int primary key, e set('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e set('a', 'b'))", + diff: "alter table t1 modify column e set('a', 'b')", + cdiff: "ALTER TABLE `t1` MODIFY COLUMN `e` set('a', 'b')", + textdiffs: []string{ + "- `e` set('a', 'b', 'c'),", + "+ `e` set('a', 'b'),", + }, + }, + { + name: "rename set value", + from: "create table t1 (id int primary key, e set('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e set('a', 'b', 'd'))", + diff: "alter table t1 modify column e set('a', 'b', 'd')", + cdiff: "ALTER TABLE `t1` MODIFY COLUMN `e` set('a', 'b', 'd')", + textdiffs: []string{ + "- `e` set('a', 'b', 'c'),", + "+ `e` set('a', 'b', 'd'),", + }, + }, + { + name: "reorder set, fail", + from: "create table t1 (id int primary key, e set('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e set('b', 'a', 'c'))", + enumreorder: EnumReorderStrategyReject, + errorMsg: (&EnumValueOrdinalChangedError{Table: "t1", Column: "e", Value: "'a'", Ordinal: 0, NewOrdinal: 1}).Error(), + }, + { + name: "reorder set, allow", + from: "create table t1 (id int primary key, e set('a', 'b', 'c'))", + to: "create table t2 (id int primary key, e set('b', 'a', 'c'))", + diff: "alter table t1 modify column e set('b', 'a', 'c')", + cdiff: "ALTER TABLE `t1` MODIFY COLUMN `e` set('b', 'a', 'c')", + enumreorder: EnumReorderStrategyAllow, + textdiffs: []string{ + "- `e` set('a', 'b', 'c'),", + "+ `e` set('b', 'a', 'c'),", + }, }, + // keys { name: "added key", @@ -307,6 +562,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, `i` int, key `i_idx` (i))", diff: "alter table t1 add key i_idx (i)", cdiff: "ALTER TABLE `t1` ADD KEY `i_idx` (`i`)", + textdiffs: []string{ + "+ KEY `i_idx` (`i`)", + }, }, { name: "added key without name", @@ -314,6 +572,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, `i` int, key (i))", diff: "alter table t1 add key i (i)", cdiff: "ALTER TABLE `t1` ADD KEY `i` (`i`)", + textdiffs: []string{ + "+ KEY `i` (`i`)", + }, }, { name: "added key without name, conflicting name", @@ -321,6 +582,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, `i` int, key i(i), key (i))", diff: "alter table t1 add key i_2 (i)", cdiff: "ALTER TABLE `t1` ADD KEY `i_2` (`i`)", + textdiffs: []string{ + "+ KEY `i_2` (`i`)", + }, }, { name: "added key without name, conflicting name 2", @@ -328,6 +592,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, `i` int, key i(i), key i_2(i), key (i))", diff: "alter table t1 add key i_3 (i)", cdiff: "ALTER TABLE `t1` ADD KEY `i_3` (`i`)", + textdiffs: []string{ + "+ KEY `i_3` (`i`)", + }, }, { name: "added column and key", @@ -335,6 +602,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, `i` int, key `i_idx` (i))", diff: "alter table t1 add column i int, add key i_idx (i)", cdiff: "ALTER TABLE `t1` ADD COLUMN `i` int, ADD KEY `i_idx` (`i`)", + textdiffs: []string{ + "+ `i` int", + "+ KEY `i_idx` (`i`)", + }, }, { name: "modify column primary key", @@ -342,6 +613,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key)", diff: "alter table t1 add primary key (id)", cdiff: "ALTER TABLE `t1` ADD PRIMARY KEY (`id`)", + textdiffs: []string{ + "+ PRIMARY KEY (`id`)", + }, }, { name: "added primary key", @@ -349,6 +623,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int, primary key(id))", diff: "alter table t1 add primary key (id)", cdiff: "ALTER TABLE `t1` ADD PRIMARY KEY (`id`)", + textdiffs: []string{ + "+ PRIMARY KEY (`id`)", + }, }, { name: "dropped primary key", @@ -356,6 +633,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int)", diff: "alter table t1 drop primary key", cdiff: "ALTER TABLE `t1` DROP PRIMARY KEY", + textdiffs: []string{ + "- PRIMARY KEY (`id`)", + }, }, { name: "dropped key", @@ -363,6 +643,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (`id` int primary key, i int)", diff: "alter table t1 drop key i_idx", cdiff: "ALTER TABLE `t1` DROP KEY `i_idx`", + textdiffs: []string{ + "- KEY `i_idx` (`i`)", + }, }, { name: "dropped key 2", @@ -370,6 +653,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (`id` int, i int, primary key (id))", diff: "alter table t1 drop key i_idx", cdiff: "ALTER TABLE `t1` DROP KEY `i_idx`", + textdiffs: []string{ + "- KEY `i_idx` (`i`)", + }, }, { name: "modified key", @@ -377,6 +663,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (`id` int primary key, i int, key i_idx(i, id))", diff: "alter table t1 drop key i_idx, add key i_idx (i, id)", cdiff: "ALTER TABLE `t1` DROP KEY `i_idx`, ADD KEY `i_idx` (`i`, `id`)", + textdiffs: []string{ + "- KEY `i_idx` (`i`)", + "+ KEY `i_idx` (`i`, `id`)", + }, }, { name: "modified primary key", @@ -384,6 +674,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (`id` int, i int, primary key(id, i),key i_idx(`i`))", diff: "alter table t1 drop primary key, add primary key (id, i)", cdiff: "ALTER TABLE `t1` DROP PRIMARY KEY, ADD PRIMARY KEY (`id`, `i`)", + textdiffs: []string{ + "- PRIMARY KEY (`id`)", + "+ PRIMARY KEY (`id`, `i`)", + }, }, { name: "alternative primary key definition, no diff", @@ -398,6 +692,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (the_id int primary key, info int not null);", diff: "alter table t1 drop primary key, drop column id, add column the_id int first, add primary key (the_id)", cdiff: "ALTER TABLE `t1` DROP PRIMARY KEY, DROP COLUMN `id`, ADD COLUMN `the_id` int FIRST, ADD PRIMARY KEY (`the_id`)", + textdiffs: []string{ + "- PRIMARY KEY (`id`)", + "- `id` int,", + "+ `the_id` int,", + "+ PRIMARY KEY (`the_id`)", + }, }, { name: "reordered key, no diff", @@ -425,6 +725,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (`id` int primary key, i int, key i2_idx (`i`, id), key i_idx3(id), key i_idx ( i ) )", diff: "alter table t1 add key i_idx3 (id)", cdiff: "ALTER TABLE `t1` ADD KEY `i_idx3` (`id`)", + textdiffs: []string{ + "+ KEY `i_idx3` (`id`)", + }, }, { name: "key made visible", @@ -432,6 +735,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (`id` int primary key, i int, key i_idx(i))", diff: "alter table t1 alter index i_idx visible", cdiff: "ALTER TABLE `t1` ALTER INDEX `i_idx` VISIBLE", + textdiffs: []string{ + "- KEY `i_idx` (`i`) INVISIBLE", + "+ KEY `i_idx` (`i`)", + }, }, { name: "key made invisible", @@ -439,6 +746,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (`id` int primary key, i int, key i_idx(i) invisible)", diff: "alter table t1 alter index i_idx invisible", cdiff: "ALTER TABLE `t1` ALTER INDEX `i_idx` INVISIBLE", + textdiffs: []string{ + "- KEY `i_idx` (`i`)", + "+ KEY `i_idx` (`i`) INVISIBLE", + }, }, { name: "key made invisible with different case", @@ -446,6 +757,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (`id` int primary key, i int, key i_idx(i) INVISIBLE)", diff: "alter table t1 alter index i_idx invisible", cdiff: "ALTER TABLE `t1` ALTER INDEX `i_idx` INVISIBLE", + textdiffs: []string{ + "- KEY `i_idx` (`i`)", + "+ KEY `i_idx` (`i`) INVISIBLE", + }, }, // FULLTEXT keys { @@ -454,6 +769,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name))", diff: "alter table t1 add fulltext key name_ft (`name`)", cdiff: "ALTER TABLE `t1` ADD FULLTEXT KEY `name_ft` (`name`)", + textdiffs: []string{ + "+ FULLTEXT KEY `name_ft` (`name`)", + }, }, { name: "add one fulltext key with explicit parser", @@ -461,6 +779,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key, name tinytext not null, fulltext key name_ft(name) with parser ngram)", diff: "alter table t1 add fulltext key name_ft (`name`) with parser ngram", cdiff: "ALTER TABLE `t1` ADD FULLTEXT KEY `name_ft` (`name`) WITH PARSER ngram", + textdiffs: []string{ + "+ FULLTEXT KEY `name_ft` (`name`) WITH PARSER ngram", + }, }, { name: "add one fulltext key and one normal key", @@ -468,6 +789,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key, name tinytext not null, key name_idx(name(32)), fulltext key name_ft(name))", diff: "alter table t1 add key name_idx (`name`(32)), add fulltext key name_ft (`name`)", cdiff: "ALTER TABLE `t1` ADD KEY `name_idx` (`name`(32)), ADD FULLTEXT KEY `name_ft` (`name`)", + textdiffs: []string{ + "+ KEY `name_idx` (`name`(32)),", + "+ FULLTEXT KEY `name_ft` (`name`)", + }, }, { name: "add two fulltext keys, distinct statements", @@ -475,6 +800,17 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key, name1 tinytext not null, name2 tinytext not null, fulltext key name1_ft(name1), fulltext key name2_ft(name2))", diffs: []string{"alter table t1 add fulltext key name1_ft (name1)", "alter table t1 add fulltext key name2_ft (name2)"}, cdiffs: []string{"ALTER TABLE `t1` ADD FULLTEXT KEY `name1_ft` (`name1`)", "ALTER TABLE `t1` ADD FULLTEXT KEY `name2_ft` (`name2`)"}, + textdiffs: []string{ + "+ FULLTEXT KEY `name1_ft` (`name1`)", + "+ FULLTEXT KEY `name2_ft` (`name2`)", + }, + }, + { + name: "add two fulltext keys, distinct statements, reject", + from: "create table t1 (id int primary key, name1 tinytext not null, name2 tinytext not null)", + to: "create table t1 (id int primary key, name1 tinytext not null, name2 tinytext not null, fulltext key name1_ft(name1), fulltext key name2_ft(name2))", + subsequent: SubsequentDiffStrategyReject, + errorMsg: (&SubsequentDiffRejectedError{Table: "t1"}).Error(), }, { name: "add two fulltext keys, unify statements", @@ -483,6 +819,23 @@ func TestCreateTableDiff(t *testing.T) { fulltext: FullTextKeyUnifyStatements, diff: "alter table t1 add fulltext key name1_ft (name1), add fulltext key name2_ft (name2)", cdiff: "ALTER TABLE `t1` ADD FULLTEXT KEY `name1_ft` (`name1`), ADD FULLTEXT KEY `name2_ft` (`name2`)", + textdiffs: []string{ + "+ FULLTEXT KEY `name1_ft` (`name1`)", + "+ FULLTEXT KEY `name2_ft` (`name2`)", + }, + }, + { + name: "add two fulltext keys, unify statements, no reject", + from: "create table t1 (id int primary key, name1 tinytext not null, name2 tinytext not null)", + to: "create table t1 (id int primary key, name1 tinytext not null, name2 tinytext not null, fulltext key name1_ft(name1), fulltext key name2_ft(name2))", + fulltext: FullTextKeyUnifyStatements, + subsequent: SubsequentDiffStrategyReject, + diff: "alter table t1 add fulltext key name1_ft (name1), add fulltext key name2_ft (name2)", + cdiff: "ALTER TABLE `t1` ADD FULLTEXT KEY `name1_ft` (`name1`), ADD FULLTEXT KEY `name2_ft` (`name2`)", + textdiffs: []string{ + "+ FULLTEXT KEY `name1_ft` (`name1`)", + "+ FULLTEXT KEY `name2_ft` (`name2`)", + }, }, { name: "no fulltext diff", @@ -525,6 +878,10 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop check check1, add constraint chk_abc123 check (i < 5)", cdiff: "ALTER TABLE `t1` DROP CHECK `check1`, ADD CONSTRAINT `chk_abc123` CHECK (`i` < 5)", constraint: ConstraintNamesStrict, + textdiffs: []string{ + "- CONSTRAINT `check1` CHECK (`i` < 5)", + "+ CONSTRAINT `chk_abc123` CHECK (`i` < 5)", + }, }, { name: "check constraints, different name, ignore vitess, non vitess names", @@ -533,6 +890,10 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop check check1, add constraint chk_abc123 check (i < 5)", cdiff: "ALTER TABLE `t1` DROP CHECK `check1`, ADD CONSTRAINT `chk_abc123` CHECK (`i` < 5)", constraint: ConstraintNamesIgnoreVitess, + textdiffs: []string{ + "- CONSTRAINT `check1` CHECK (`i` < 5)", + "+ CONSTRAINT `chk_abc123` CHECK (`i` < 5)", + }, }, { name: "check constraints, different name, ignore vitess, vitess names, no match", @@ -541,6 +902,10 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop check check1, add constraint check2_7fp024p4rxvr858tsaggvf9dw check (i < 5)", cdiff: "ALTER TABLE `t1` DROP CHECK `check1`, ADD CONSTRAINT `check2_7fp024p4rxvr858tsaggvf9dw` CHECK (`i` < 5)", constraint: ConstraintNamesIgnoreVitess, + textdiffs: []string{ + "- CONSTRAINT `check1` CHECK (`i` < 5)", + "+ CONSTRAINT `check2_7fp024p4rxvr858tsaggvf9dw` CHECK (`i` < 5)", + }, }, { name: "check constraints, different name, ignore vitess, vitess names match", @@ -598,6 +963,9 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 add constraint check3 check (i != 3)", cdiff: "ALTER TABLE `t1` ADD CONSTRAINT `check3` CHECK (`i` != 3)", constraint: ConstraintNamesIgnoreAll, + textdiffs: []string{ + "+ CONSTRAINT `check3` CHECK (`i` != 3)", + }, }, { name: "check constraints, remove", @@ -606,6 +974,9 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop check check3", cdiff: "ALTER TABLE `t1` DROP CHECK `check3`", constraint: ConstraintNamesIgnoreAll, + textdiffs: []string{ + "- CONSTRAINT `check3` CHECK (`i` != 3)", + }, }, { name: "check constraints, remove duplicate", @@ -614,6 +985,9 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop check check3", cdiff: "ALTER TABLE `t1` DROP CHECK `check3`", constraint: ConstraintNamesIgnoreAll, + textdiffs: []string{ + "- CONSTRAINT `check3` CHECK (`i` > 2)", + }, }, { name: "check constraints, remove, ignore vitess, no match", @@ -622,6 +996,13 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop check chk_123abc, drop check check3, drop check chk_789def, add constraint check1 check (i < 5), add constraint check2 check (i > 2)", cdiff: "ALTER TABLE `t1` DROP CHECK `chk_123abc`, DROP CHECK `check3`, DROP CHECK `chk_789def`, ADD CONSTRAINT `check1` CHECK (`i` < 5), ADD CONSTRAINT `check2` CHECK (`i` > 2)", constraint: ConstraintNamesIgnoreVitess, + textdiffs: []string{ + "- CONSTRAINT `chk_123abc` CHECK (`i` > 2)", + "- CONSTRAINT `check3` CHECK (`i` != 3)", + "- CONSTRAINT `chk_789def` CHECK (`i` < 5)", + "+ CONSTRAINT `check1` CHECK (`i` < 5)", + "+ CONSTRAINT `check2` CHECK (`i` > 2)", + }, }, { name: "check constraints, remove, ignore vitess, match", @@ -630,6 +1011,9 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop check check3", cdiff: "ALTER TABLE `t1` DROP CHECK `check3`", constraint: ConstraintNamesIgnoreVitess, + textdiffs: []string{ + "- CONSTRAINT `check3` CHECK (`i` != 3)", + }, }, { name: "check constraints, remove, strict", @@ -638,6 +1022,13 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop check chk_123abc, drop check check3, drop check chk_789def, add constraint check1 check (i < 5), add constraint check2 check (i > 2)", cdiff: "ALTER TABLE `t1` DROP CHECK `chk_123abc`, DROP CHECK `check3`, DROP CHECK `chk_789def`, ADD CONSTRAINT `check1` CHECK (`i` < 5), ADD CONSTRAINT `check2` CHECK (`i` > 2)", constraint: ConstraintNamesStrict, + textdiffs: []string{ + "- CONSTRAINT `chk_123abc` CHECK (`i` > 2)", + "- CONSTRAINT `check3` CHECK (`i` != 3)", + "- CONSTRAINT `chk_789def` CHECK (`i` < 5)", + "+ CONSTRAINT `check1` CHECK (`i` < 5)", + "+ CONSTRAINT `check2` CHECK (`i` > 2)", + }, }, // foreign keys { @@ -646,6 +1037,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i int, key i_idex (i))", diff: "alter table t1 drop foreign key f", cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f`", + textdiffs: []string{ + "- CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + }, }, { name: "add foreign key", @@ -653,6 +1047,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i int, key ix(i), constraint f foreign key (i) references parent(id))", diff: "alter table t1 add constraint f foreign key (i) references parent (id)", cdiff: "ALTER TABLE `t1` ADD CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + textdiffs: []string{ + "+ CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + }, }, { name: "add foreign key and index", @@ -660,6 +1057,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i int, key ix(i), constraint f foreign key (i) references parent(id))", diff: "alter table t1 add key ix (i), add constraint f foreign key (i) references parent (id)", cdiff: "ALTER TABLE `t1` ADD KEY `ix` (`i`), ADD CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + textdiffs: []string{ + "+ KEY `ix` (`i`)", + "+ CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + }, }, { name: "identical foreign key", @@ -672,6 +1073,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i int, key ix(i), constraint f2 foreign key (i) references parent(id) on delete cascade)", diff: "alter table t1 drop foreign key f1, add constraint f2 foreign key (i) references parent (id) on delete cascade", cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f1`, ADD CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE CASCADE", + textdiffs: []string{ + "- CONSTRAINT `f1` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + "+ CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + }, }, { name: "similar foreign key under different name, ignore names", @@ -685,6 +1090,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i int, key i_idex (i), constraint f1 foreign key (i) references parent(id))", diff: "alter table t1 drop foreign key f2", cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f2`", + textdiffs: []string{ + "- CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + }, }, { name: "two identical foreign keys, dropping one, ignore vitess names", @@ -693,6 +1101,9 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop foreign key f2", cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f2`", constraint: ConstraintNamesIgnoreVitess, + textdiffs: []string{ + "- CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + }, }, { name: "two identical foreign keys, dropping one, ignore all names", @@ -701,6 +1112,9 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 drop foreign key f2", cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f2`", constraint: ConstraintNamesIgnoreAll, + textdiffs: []string{ + "- CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + }, }, { name: "add two identical foreign key constraints, ignore all names", @@ -709,6 +1123,10 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t1 add constraint f1 foreign key (i) references parent (id), add constraint f2 foreign key (i) references parent (id)", cdiff: "ALTER TABLE `t1` ADD CONSTRAINT `f1` FOREIGN KEY (`i`) REFERENCES `parent` (`id`), ADD CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", constraint: ConstraintNamesIgnoreAll, + textdiffs: []string{ + "+ CONSTRAINT `f1` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + "+ CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`)", + }, }, { name: "implicit foreign key indexes", @@ -731,6 +1149,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i int, key ix(i), constraint f foreign key (i) references parent(id) on delete set null)", diff: "alter table t1 drop foreign key f, add constraint f foreign key (i) references parent (id) on delete set null", cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f`, ADD CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE SET NULL", + textdiffs: []string{ + "- CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE CASCADE", + "+ CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE SET NULL", + }, }, { name: "drop and add foreign key", @@ -738,6 +1160,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i int, key ix(i), constraint f2 foreign key (i) references parent(id) on delete set null)", diff: "alter table t1 drop foreign key f, add constraint f2 foreign key (i) references parent (id) on delete set null", cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f`, ADD CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE SET NULL", + textdiffs: []string{ + "- CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE CASCADE", + "+ CONSTRAINT `f2` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE SET NULL", + }, }, { name: "ignore different foreign key order", @@ -751,6 +1177,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t2 (id int primary key, i int, key f(i))", diff: "alter table t1 drop foreign key f", cdiff: "ALTER TABLE `t1` DROP FOREIGN KEY `f`", + textdiffs: []string{ + "- CONSTRAINT `f` FOREIGN KEY (`i`) REFERENCES `parent` (`id`) ON DELETE CASCADE", + }, }, // partitions { @@ -759,6 +1188,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key, a int) partition by hash (id) partitions 4", diff: "alter table t1 add column a int", cdiff: "ALTER TABLE `t1` ADD COLUMN `a` int", + textdiffs: []string{ + "+ `a` int", + }, }, { name: "partitioning, column case", @@ -766,13 +1198,33 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key, a int) partition by hash (ID) partitions 4", diff: "alter table t1 add column a int \npartition by hash (ID) partitions 4", cdiff: "ALTER TABLE `t1` ADD COLUMN `a` int \nPARTITION BY HASH (`ID`) PARTITIONS 4", + textdiffs: []string{ + "+ `a` int", + "-PARTITION BY HASH (`id`) PARTITIONS 4", + "+PARTITION BY HASH (`ID`) PARTITIONS 4", + }, + }, + { + name: "add partitioning", + from: "create table t1 (id int primary key, a int)", + to: "create table t1 (id int primary key, a int) partition by hash (id) partitions 4", + diff: "alter table t1 \npartition by hash (id) partitions 4", + cdiff: "ALTER TABLE `t1` \nPARTITION BY HASH (`id`) PARTITIONS 4", + textdiffs: []string{ + "+PARTITION BY HASH (`id`) PARTITIONS 4", + }, }, + { name: "remove partitioning", from: "create table t1 (id int primary key) partition by hash (id) partitions 4", to: "create table t1 (id int primary key, a int)", diff: "alter table t1 add column a int remove partitioning", cdiff: "ALTER TABLE `t1` ADD COLUMN `a` int REMOVE PARTITIONING", + textdiffs: []string{ + "+ `a` int", + "-PARTITION BY HASH (`id`) PARTITIONS 4", + }, }, { name: "remove partitioning 2", @@ -780,6 +1232,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key)", diff: "alter table t1 remove partitioning", cdiff: "ALTER TABLE `t1` REMOVE PARTITIONING", + textdiffs: []string{ + "-PARTITION BY HASH (`id`) PARTITIONS 4", + }, }, { name: "change partitioning hash", @@ -787,6 +1242,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) partition by hash (id) partitions 5", diff: "alter table t1 \npartition by hash (id) partitions 5", cdiff: "ALTER TABLE `t1` \nPARTITION BY HASH (`id`) PARTITIONS 5", + textdiffs: []string{ + "-PARTITION BY HASH (`id`) PARTITIONS 4", + "+PARTITION BY HASH (`id`) PARTITIONS 5", + }, }, { name: "change partitioning key", @@ -794,6 +1253,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) partition by hash (id) partitions 5", diff: "alter table t1 \npartition by hash (id) partitions 5", cdiff: "ALTER TABLE `t1` \nPARTITION BY HASH (`id`) PARTITIONS 5", + textdiffs: []string{ + "-PARTITION BY KEY (`id`) PARTITIONS 2", + "+PARTITION BY HASH (`id`) PARTITIONS 5", + }, }, { name: "change partitioning list", @@ -801,6 +1264,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) partition by list (id) (partition p1 values in(11,21), partition p2 values in (12,22))", diff: "alter table t1 \npartition by list (id)\n(partition p1 values in (11, 21),\n partition p2 values in (12, 22))", cdiff: "ALTER TABLE `t1` \nPARTITION BY LIST (`id`)\n(PARTITION `p1` VALUES IN (11, 21),\n PARTITION `p2` VALUES IN (12, 22))", + textdiffs: []string{ + "-PARTITION BY KEY (`id`) PARTITIONS 2", + "+PARTITION BY LIST (`id`)", + "+(PARTITION `p1` VALUES IN (11, 21),", + "+ PARTITION `p2` VALUES IN (12, 22))", + }, }, { name: "change partitioning range: rotate", @@ -808,6 +1277,16 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20), partition p3 values less than (30), partition p4 values less than (40))", diff: "alter table t1 \npartition by range (id)\n(partition p2 values less than (20),\n partition p3 values less than (30),\n partition p4 values less than (40))", cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p2` VALUES LESS THAN (20),\n PARTITION `p3` VALUES LESS THAN (30),\n PARTITION `p4` VALUES LESS THAN (40))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `p2` VALUES LESS THAN (20),", + "+ PARTITION `p3` VALUES LESS THAN (30),", + "+ PARTITION `p4` VALUES LESS THAN (40))", + }, }, { name: "change partitioning range: ignore rotate", @@ -815,6 +1294,34 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20), partition p3 values less than (30), partition p4 values less than (40))", rotation: RangeRotationIgnore, }, + { + name: "change partitioning range: don't rotate, single partition", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10))", + to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20))", + rotation: RangeRotationFullSpec, + diff: "alter table t1 \npartition by range (id)\n(partition p2 values less than (20))", + cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p2` VALUES LESS THAN (20))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `p2` VALUES LESS THAN (20))", + }, + }, + { + name: "change partitioning range: don't rotate, single partition", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10))", + to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20))", + rotation: RangeRotationDistinctStatements, + diff: "alter table t1 \npartition by range (id)\n(partition p2 values less than (20))", + cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p2` VALUES LESS THAN (20))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `p2` VALUES LESS THAN (20))", + }, + }, { name: "change partitioning range: statements, drop", from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", @@ -822,6 +1329,20 @@ func TestCreateTableDiff(t *testing.T) { rotation: RangeRotationDistinctStatements, diff: "alter table t1 drop partition p1", cdiff: "ALTER TABLE `t1` DROP PARTITION `p1`", + textdiffs: []string{ + "-(PARTITION `p1` VALUES LESS THAN (10),", + }, + }, + { + name: "change partitioning range: statements, drop middle", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", + to: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p3 values less than (30))", + rotation: RangeRotationDistinctStatements, + diff: "alter table t1 drop partition p2", + cdiff: "ALTER TABLE `t1` DROP PARTITION `p2`", + textdiffs: []string{ + "- PARTITION `p2` VALUES LESS THAN (20),", + }, }, { name: "change partitioning range: statements, add", @@ -830,14 +1351,25 @@ func TestCreateTableDiff(t *testing.T) { rotation: RangeRotationDistinctStatements, diff: "alter table t1 add partition (partition p3 values less than (30))", cdiff: "ALTER TABLE `t1` ADD PARTITION (PARTITION `p3` VALUES LESS THAN (30))", + textdiffs: []string{ + "+ PARTITION `p3` VALUES LESS THAN (30)", + }, }, { - name: "change partitioning range: statements, multiple drops", + name: "change partitioning range: statements, multiple drops, distinct", from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", to: "create table t1 (id int primary key) partition by range (id) (partition p3 values less than (30))", rotation: RangeRotationDistinctStatements, - diffs: []string{"alter table t1 drop partition p1", "alter table t1 drop partition p2"}, - cdiffs: []string{"ALTER TABLE `t1` DROP PARTITION `p1`", "ALTER TABLE `t1` DROP PARTITION `p2`"}, + diffs: []string{"alter table t1 drop partition p1, p2"}, + cdiffs: []string{"ALTER TABLE `t1` DROP PARTITION `p1`, `p2`"}, + textdiffs: []string{ + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + }, + atomicdiffs: []string{ + "ALTER TABLE `t1` DROP PARTITION `p1`", + "ALTER TABLE `t1` DROP PARTITION `p2`", + }, }, { name: "change partitioning range: statements, multiple adds", @@ -846,6 +1378,10 @@ func TestCreateTableDiff(t *testing.T) { rotation: RangeRotationDistinctStatements, diffs: []string{"alter table t1 add partition (partition p2 values less than (20))", "alter table t1 add partition (partition p3 values less than (30))"}, cdiffs: []string{"ALTER TABLE `t1` ADD PARTITION (PARTITION `p2` VALUES LESS THAN (20))", "ALTER TABLE `t1` ADD PARTITION (PARTITION `p3` VALUES LESS THAN (30))"}, + textdiffs: []string{ + "+ PARTITION `p2` VALUES LESS THAN (20),", + "+ PARTITION `p3` VALUES LESS THAN (30)", + }, }, { name: "change partitioning range: statements, multiple, assorted", @@ -854,14 +1390,39 @@ func TestCreateTableDiff(t *testing.T) { rotation: RangeRotationDistinctStatements, diffs: []string{"alter table t1 drop partition p1", "alter table t1 add partition (partition p4 values less than (40))"}, cdiffs: []string{"ALTER TABLE `t1` DROP PARTITION `p1`", "ALTER TABLE `t1` ADD PARTITION (PARTITION `p4` VALUES LESS THAN (40))"}, + textdiffs: []string{ + "-(PARTITION `p1` VALUES LESS THAN (10),", + "+ PARTITION `p4` VALUES LESS THAN (40)", + }, + }, + { + name: "change partitioning range: statements, multiple, reject", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", + to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20), partition p3 values less than (30), partition p4 values less than (40))", + rotation: RangeRotationDistinctStatements, + subsequent: SubsequentDiffStrategyReject, + errorMsg: (&SubsequentDiffRejectedError{Table: "t1"}).Error(), }, { name: "change partitioning range: mixed with nonpartition changes", from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", to: "create table t1 (id int primary key, i int) partition by range (id) (partition p3 values less than (30))", rotation: RangeRotationDistinctStatements, - diffs: []string{"alter table t1 add column i int", "alter table t1 drop partition p1", "alter table t1 drop partition p2"}, - cdiffs: []string{"ALTER TABLE `t1` ADD COLUMN `i` int", "ALTER TABLE `t1` DROP PARTITION `p1`", "ALTER TABLE `t1` DROP PARTITION `p2`"}, + diffs: []string{"alter table t1 add column i int", "alter table t1 drop partition p1, p2"}, + cdiffs: []string{"ALTER TABLE `t1` ADD COLUMN `i` int", "ALTER TABLE `t1` DROP PARTITION `p1`, `p2`"}, + textdiffs: []string{ + "+ `i` int", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + }, + }, + { + name: "change partitioning range: mixed with nonpartition changes, reject", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", + to: "create table t1 (id int primary key, i int) partition by range (id) (partition p3 values less than (30))", + rotation: RangeRotationDistinctStatements, + subsequent: SubsequentDiffStrategyReject, + errorMsg: (&SubsequentDiffRejectedError{Table: "t1"}).Error(), }, { name: "change partitioning range: single partition change, mixed with nonpartition changes", @@ -870,6 +1431,10 @@ func TestCreateTableDiff(t *testing.T) { rotation: RangeRotationDistinctStatements, diffs: []string{"alter table t1 add column i int", "alter table t1 drop partition p1"}, cdiffs: []string{"ALTER TABLE `t1` ADD COLUMN `i` int", "ALTER TABLE `t1` DROP PARTITION `p1`"}, + textdiffs: []string{ + "+ `i` int", + "-(PARTITION `p1` VALUES LESS THAN (10),", + }, }, { name: "change partitioning range: mixed with nonpartition changes, full spec", @@ -878,38 +1443,135 @@ func TestCreateTableDiff(t *testing.T) { rotation: RangeRotationFullSpec, diff: "alter table t1 add column i int \npartition by range (id)\n(partition p3 values less than (30))", cdiff: "ALTER TABLE `t1` ADD COLUMN `i` int \nPARTITION BY RANGE (`id`)\n(PARTITION `p3` VALUES LESS THAN (30))", - }, - { - name: "change partitioning range: ignore rotate, not a rotation", + textdiffs: []string{ + "+ `i` int", + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `p3` VALUES LESS THAN (30))", + }, + }, + { + name: "change partitioning range: not a rotation, ignore", from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (25), partition p3 values less than (30), partition p4 values less than (40))", rotation: RangeRotationIgnore, diff: "alter table t1 \npartition by range (id)\n(partition p2 values less than (25),\n partition p3 values less than (30),\n partition p4 values less than (40))", cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p2` VALUES LESS THAN (25),\n PARTITION `p3` VALUES LESS THAN (30),\n PARTITION `p4` VALUES LESS THAN (40))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `p2` VALUES LESS THAN (25)", + "+ PARTITION `p3` VALUES LESS THAN (30),", + "+ PARTITION `p4` VALUES LESS THAN (40))", + }, + }, + { + name: "change partitioning range: not a rotation, ignore 2", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", + to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (25), partition p3 values less than (30), partition p4 values less than (40))", + rotation: RangeRotationIgnore, + diff: "alter table t1 \npartition by range (id)\n(partition p2 values less than (25),\n partition p3 values less than (30),\n partition p4 values less than (40))", + cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p2` VALUES LESS THAN (25),\n PARTITION `p3` VALUES LESS THAN (30),\n PARTITION `p4` VALUES LESS THAN (40))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `p2` VALUES LESS THAN (25)", + "+ PARTITION `p3` VALUES LESS THAN (30),", + "+ PARTITION `p4` VALUES LESS THAN (40))", + }, + }, + { + name: "change partitioning range: complex rotate, ignore", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", + to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20), partition p3 values less than (35), partition p4 values less than (40))", + rotation: RangeRotationIgnore, }, { - name: "change partitioning range: ignore rotate, not a rotation 2", + name: "change partitioning range: complex rotate, distinct", from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20), partition p3 values less than (35), partition p4 values less than (40))", + rotation: RangeRotationDistinctStatements, + diffs: []string{"alter table t1 drop partition p1, p3", "alter table t1 add partition (partition p3 values less than (35))", "alter table t1 add partition (partition p4 values less than (40))"}, + cdiffs: []string{"ALTER TABLE `t1` DROP PARTITION `p1`, `p3`", "ALTER TABLE `t1` ADD PARTITION (PARTITION `p3` VALUES LESS THAN (35))", "ALTER TABLE `t1` ADD PARTITION (PARTITION `p4` VALUES LESS THAN (40))"}, + textdiffs: []string{ + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+ PARTITION `p3` VALUES LESS THAN (35),", + "+ PARTITION `p4` VALUES LESS THAN (40))", + }, + atomicdiffs: []string{ + "ALTER TABLE `t1` DROP PARTITION `p1`", + "ALTER TABLE `t1` DROP PARTITION `p3`", + }, + }, + { + name: "change partitioning range: complex rotate 2, ignore", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", + to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20), partition pX values less than (30), partition p4 values less than (40))", rotation: RangeRotationIgnore, - diff: "alter table t1 \npartition by range (id)\n(partition p2 values less than (20),\n partition p3 values less than (35),\n partition p4 values less than (40))", - cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p2` VALUES LESS THAN (20),\n PARTITION `p3` VALUES LESS THAN (35),\n PARTITION `p4` VALUES LESS THAN (40))", }, { - name: "change partitioning range: ignore rotate, not a rotation 3", + name: "change partitioning range: complex rotate 2, distinct", from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (20), partition pX values less than (30), partition p4 values less than (40))", - rotation: RangeRotationIgnore, - diff: "alter table t1 \npartition by range (id)\n(partition p2 values less than (20),\n partition pX values less than (30),\n partition p4 values less than (40))", - cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p2` VALUES LESS THAN (20),\n PARTITION `pX` VALUES LESS THAN (30),\n PARTITION `p4` VALUES LESS THAN (40))", + rotation: RangeRotationDistinctStatements, + diffs: []string{"alter table t1 drop partition p1, p3", "alter table t1 add partition (partition pX values less than (30))", "alter table t1 add partition (partition p4 values less than (40))"}, + cdiffs: []string{"ALTER TABLE `t1` DROP PARTITION `p1`, `p3`", "ALTER TABLE `t1` ADD PARTITION (PARTITION `pX` VALUES LESS THAN (30))", "ALTER TABLE `t1` ADD PARTITION (PARTITION `p4` VALUES LESS THAN (40))"}, + textdiffs: []string{ + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+ PARTITION `pX` VALUES LESS THAN (30),", + "+ PARTITION `p4` VALUES LESS THAN (40))", + }, + atomicdiffs: []string{ + "ALTER TABLE `t1` DROP PARTITION `p1`", + "ALTER TABLE `t1` DROP PARTITION `p3`", + }, + }, + { + name: "change partitioning range: not a rotation", + from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", + to: "create table t1 (id int primary key) partition by range (id) (partition p2 values less than (25), partition p3 values less than (30), partition p4 values less than (40))", + rotation: RangeRotationDistinctStatements, + diff: "alter table t1 \npartition by range (id)\n(partition p2 values less than (25),\n partition p3 values less than (30),\n partition p4 values less than (40))", + cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p2` VALUES LESS THAN (25),\n PARTITION `p3` VALUES LESS THAN (30),\n PARTITION `p4` VALUES LESS THAN (40))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `p2` VALUES LESS THAN (25)", + "+ PARTITION `p3` VALUES LESS THAN (30),", + "+ PARTITION `p4` VALUES LESS THAN (40))", + }, }, { - name: "change partitioning range: ignore rotate, not a rotation 4", + name: "change partitioning range: ignore rotate, not a rotation 2", from: "create table t1 (id int primary key) partition by range (id) (partition p1 values less than (10), partition p2 values less than (20), partition p3 values less than (30))", to: "create table t1 (id int primary key) partition by range (id) (partition pX values less than (20), partition p3 values less than (30), partition p4 values less than (40))", rotation: RangeRotationIgnore, diff: "alter table t1 \npartition by range (id)\n(partition pX values less than (20),\n partition p3 values less than (30),\n partition p4 values less than (40))", cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `pX` VALUES LESS THAN (20),\n PARTITION `p3` VALUES LESS THAN (30),\n PARTITION `p4` VALUES LESS THAN (40))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `pX` VALUES LESS THAN (20)", + "+ PARTITION `p3` VALUES LESS THAN (30),", + "+ PARTITION `p4` VALUES LESS THAN (40))", + }, }, { name: "change partitioning range: ignore rotate, nothing shared", @@ -918,6 +1580,16 @@ func TestCreateTableDiff(t *testing.T) { rotation: RangeRotationIgnore, diff: "alter table t1 \npartition by range (id)\n(partition p4 values less than (40),\n partition p5 values less than (50),\n partition p6 values less than (60))", cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `p4` VALUES LESS THAN (40),\n PARTITION `p5` VALUES LESS THAN (50),\n PARTITION `p6` VALUES LESS THAN (60))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `p4` VALUES LESS THAN (40)", + "+ PARTITION `p5` VALUES LESS THAN (50),", + "+ PARTITION `p6` VALUES LESS THAN (60))", + }, }, { name: "change partitioning range: ignore rotate, no names shared, definitions shared", @@ -926,6 +1598,16 @@ func TestCreateTableDiff(t *testing.T) { rotation: RangeRotationIgnore, diff: "alter table t1 \npartition by range (id)\n(partition pA values less than (20),\n partition pB values less than (30),\n partition pC values less than (40))", cdiff: "ALTER TABLE `t1` \nPARTITION BY RANGE (`id`)\n(PARTITION `pA` VALUES LESS THAN (20),\n PARTITION `pB` VALUES LESS THAN (30),\n PARTITION `pC` VALUES LESS THAN (40))", + textdiffs: []string{ + "-PARTITION BY RANGE (`id`)", + "-(PARTITION `p1` VALUES LESS THAN (10),", + "- PARTITION `p2` VALUES LESS THAN (20),", + "- PARTITION `p3` VALUES LESS THAN (30))", + "+PARTITION BY RANGE (`id`)", + "+(PARTITION `pA` VALUES LESS THAN (20)", + "+ PARTITION `pB` VALUES LESS THAN (30),", + "+ PARTITION `pC` VALUES LESS THAN (40))", + }, }, // @@ -956,6 +1638,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) row_format=compressed", diff: "alter table t1 row_format COMPRESSED", cdiff: "ALTER TABLE `t1` ROW_FORMAT COMPRESSED", + textdiffs: []string{ + "+) ROW_FORMAT COMPRESSED", + }, }, { name: "add table option 2", @@ -963,6 +1648,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) character set=utf8, row_format=compressed", diff: "alter table t1 row_format COMPRESSED", cdiff: "ALTER TABLE `t1` ROW_FORMAT COMPRESSED", + textdiffs: []string{ + "+ ROW_FORMAT COMPRESSED", + }, }, { name: "add table option 3", @@ -970,6 +1658,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) row_format=compressed, character set=utf8", diff: "alter table t1 row_format COMPRESSED", cdiff: "ALTER TABLE `t1` ROW_FORMAT COMPRESSED", + textdiffs: []string{ + "+) ROW_FORMAT COMPRESSED", + }, }, { name: "add table option 3", @@ -977,6 +1668,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) row_format=compressed, character set=utf8, checksum=1", diff: "alter table t1 row_format COMPRESSED checksum 1", cdiff: "ALTER TABLE `t1` ROW_FORMAT COMPRESSED CHECKSUM 1", + textdiffs: []string{ + "+) ROW_FORMAT COMPRESSED", + "+ CHECKSUM 1", + }, }, { name: "modify table option 1", @@ -984,6 +1679,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) character set=utf8mb4", diff: "alter table t1 charset utf8mb4", cdiff: "ALTER TABLE `t1` CHARSET utf8mb4", + textdiffs: []string{ + "-) CHARSET utf8mb3", + "+) CHARSET utf8mb4", + }, }, { name: "modify table option 2", @@ -991,6 +1690,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) character set=utf8mb4", diff: "alter table t1 charset utf8mb4", cdiff: "ALTER TABLE `t1` CHARSET utf8mb4", + textdiffs: []string{ + "-) CHARSET utf8mb3", + "+) CHARSET utf8mb4", + }, }, { name: "modify table option 3", @@ -998,6 +1701,10 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) charset=utf8mb4", diff: "alter table t1 charset utf8mb4", cdiff: "ALTER TABLE `t1` CHARSET utf8mb4", + textdiffs: []string{ + "-) CHARSET utf8mb3", + "+) CHARSET utf8mb4", + }, }, { name: "modify table option 4", @@ -1005,6 +1712,12 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) row_format=compressed, character set=utf8mb4, checksum=1", diff: "alter table t1 charset utf8mb4 row_format COMPRESSED checksum 1", cdiff: "ALTER TABLE `t1` CHARSET utf8mb4 ROW_FORMAT COMPRESSED CHECKSUM 1", + textdiffs: []string{ + "-) CHARSET utf8mb3", + "+) ROW_FORMAT COMPRESSED,", + "+ CHARSET utf8mb4,", + "+ CHECKSUM 1", + }, }, { name: "remove table option 1", @@ -1117,6 +1830,14 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t (id int, primary key(id)) COLLATE utf8mb4_0900_ai_ci", charset: TableCharsetCollateIgnoreEmpty, }, + { + name: "non empty collate with ignore empty table collate", + from: "create table t (id int, primary key(id)) COLLATE utf8mb4_0900_bin", + to: "create table t (id int, primary key(id)) COLLATE utf8mb4_0900_ai_ci", + charset: TableCharsetCollateIgnoreEmpty, + diff: "alter table t collate utf8mb4_0900_ai_ci", + cdiff: "ALTER TABLE `t` COLLATE utf8mb4_0900_ai_ci", + }, { name: "ignore empty table charset and collate in target", from: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_ai_ci", @@ -1149,6 +1870,62 @@ func TestCreateTableDiff(t *testing.T) { diff: "alter table t modify column t1 varchar(128) not null, modify column t2 varchar(128) not null, modify column t3 tinytext, charset utf8mb4", cdiff: "ALTER TABLE `t` MODIFY COLUMN `t1` varchar(128) NOT NULL, MODIFY COLUMN `t2` varchar(128) NOT NULL, MODIFY COLUMN `t3` tinytext, CHARSET utf8mb4", }, + { + name: "change table collation", + from: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_ai_ci", + to: "create table t (id int, primary key(id)) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_bin", + diff: "alter table t collate utf8mb4_0900_bin", + cdiff: "ALTER TABLE `t` COLLATE utf8mb4_0900_bin", + }, + { + name: "change table collation with textual column", + from: "create table t (id int, t varchar(192) not null) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_ai_ci", + to: "create table t (id int, t varchar(192) not null) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_bin", + diff: "alter table t modify column t varchar(192) not null, collate utf8mb4_0900_bin", + cdiff: "ALTER TABLE `t` MODIFY COLUMN `t` varchar(192) NOT NULL, COLLATE utf8mb4_0900_bin", + }, + { + name: "change table collation with textual column that has collation", + from: "create table t (id int, t varchar(192) not null collate utf8mb4_0900_bin) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_ai_ci", + to: "create table t (id int, t varchar(192) not null collate utf8mb4_0900_bin) DEFAULT CHARSET = utf8mb4 COLLATE utf8mb4_0900_bin", + diff: "alter table t collate utf8mb4_0900_bin", + cdiff: "ALTER TABLE `t` COLLATE utf8mb4_0900_bin", + }, + { + name: "ignore identical implicit charset", + from: "create table t (id int primary key, v varchar(64) character set utf8mb3 collate utf8mb3_bin)", + to: "create table t (id int primary key, v varchar(64) collate utf8mb3_bin)", + }, + { + name: "ignore identical implicit ascii charset", + from: "create table t (id int primary key, v varchar(64) character set ascii collate ascii_general_ci)", + to: "create table t (id int primary key, v varchar(64) collate ascii_general_ci)", + }, + { + name: "ignore identical implicit collation", + from: "create table t (id int primary key, v varchar(64) character set utf8mb3 collate utf8mb3_general_ci)", + to: "create table t (id int primary key, v varchar(64) character set utf8mb3)", + }, + { + name: "ignore identical implicit collation, reverse", + from: "create table t (id int primary key, v varchar(64) character set utf8mb3)", + to: "create table t (id int primary key, v varchar(64) character set utf8mb3 collate utf8mb3_general_ci)", + }, + { + name: "implicit charset and implciit collation", + from: "create table t (id int primary key, v varchar(64) character set utf8mb3)", + to: "create table t (id int primary key, v varchar(64) collate utf8mb3_general_ci)", + }, + { + name: "ignore identical implicit ascii collation", + from: "create table t (id int primary key, v varchar(64) character set ascii collate ascii_general_ci)", + to: "create table t (id int primary key, v varchar(64) character set ascii)", + }, + { + name: "implicit charset and implciit collation, ascii", + from: "create table t (id int primary key, v varchar(64) collate ascii_general_ci)", + to: "create table t (id int primary key, v varchar(64) character set ascii)", + }, { name: "normalized unsigned attribute", from: "create table t1 (id int primary key)", @@ -1162,6 +1939,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) engine=innodb, character set=utf8", diff: "alter table t1 engine InnoDB", cdiff: "ALTER TABLE `t1` ENGINE InnoDB", + textdiffs: []string{ + "+) ENGINE InnoDB", + }, }, { name: "normalized ENGINE MyISAM value", @@ -1169,6 +1949,20 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key) engine=myisam, character set=utf8", diff: "alter table t1 engine MyISAM", cdiff: "ALTER TABLE `t1` ENGINE MyISAM", + textdiffs: []string{ + "+) ENGINE MyISAM", + }, + }, + { + name: "modify ENGINE option", + from: "create table t1 (id int primary key) engine=myisam", + to: "create table t1 (id int primary key) engine=InnoDB", + diff: "alter table t1 engine InnoDB", + cdiff: "ALTER TABLE `t1` ENGINE InnoDB", + textdiffs: []string{ + "-) ENGINE MyISAM", + "+) ENGINE InnoDB", + }, }, { name: "normalized ENGINE MEMORY value", @@ -1204,6 +1998,9 @@ func TestCreateTableDiff(t *testing.T) { to: "create table t1 (id int primary key)", diff: "alter table t1 comment ''", cdiff: "ALTER TABLE `t1` COMMENT ''", + textdiffs: []string{ + "-) COMMENT 'foo'", + }, }, // expressions { @@ -1265,21 +2062,22 @@ func TestCreateTableDiff(t *testing.T) { }, } standardHints := DiffHints{} + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := env.Parser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := fromStmt.(*sqlparser.CreateTable) require.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := env.Parser().ParseStrictDDL(ts.to) require.NoError(t, err) toCreateTable, ok := toStmt.(*sqlparser.CreateTable) require.True(t, ok) - c, err := NewCreateTableEntity(fromCreateTable) + c, err := NewCreateTableEntity(env, fromCreateTable) require.NoError(t, err) - other, err := NewCreateTableEntity(toCreateTable) + other, err := NewCreateTableEntity(env, toCreateTable) require.NoError(t, err) hints := standardHints @@ -1290,6 +2088,8 @@ func TestCreateTableDiff(t *testing.T) { hints.FullTextKeyStrategy = ts.fulltext hints.TableCharsetCollateStrategy = ts.charset hints.AlterTableAlgorithmStrategy = ts.algorithm + hints.EnumReorderStrategy = ts.enumreorder + hints.SubsequentDiffStrategy = ts.subsequent alter, err := c.Diff(other, &hints) require.Equal(t, len(ts.diffs), len(ts.cdiffs)) @@ -1297,13 +2097,20 @@ func TestCreateTableDiff(t *testing.T) { ts.diff = ts.diffs[0] ts.cdiff = ts.cdiffs[0] } - switch { - case ts.isError: - require.Error(t, err) - if ts.errorMsg != "" { - assert.Contains(t, err.Error(), ts.errorMsg) - } - case ts.diff == "": + + if ts.diff != "" { + _, err := env.Parser().ParseStrictDDL(ts.diff) + require.NoError(t, err) + } + if ts.cdiff != "" { + _, err := env.Parser().ParseStrictDDL(ts.cdiff) + require.NoError(t, err) + } + if ts.errorMsg != "" { + require.ErrorContains(t, err, ts.errorMsg) + return + } + if ts.diff == "" { assert.NoError(t, err) assert.True(t, alter.IsEmpty(), "expected empty diff, found changes") if !alter.IsEmpty() { @@ -1312,60 +2119,127 @@ func TestCreateTableDiff(t *testing.T) { t.Logf("c: %v", sqlparser.CanonicalString(c.CreateTable)) t.Logf("other: %v", sqlparser.CanonicalString(other.CreateTable)) } - default: - assert.NoError(t, err) - require.NotNil(t, alter) - assert.False(t, alter.IsEmpty(), "expected changes, found empty diff") + assert.Empty(t, ts.textdiffs) + assert.Empty(t, AtomicDiffs(alter)) + return + } - { - diff := alter.StatementString() - assert.Equal(t, ts.diff, diff) + // Expecting diff + assert.NoError(t, err) + require.NotNil(t, alter) + assert.False(t, alter.IsEmpty(), "expected changes, found empty diff") - if len(ts.diffs) > 0 { + { + diff := alter.StatementString() + assert.Equal(t, ts.diff, diff) - allSubsequentDiffs := AllSubsequent(alter) - require.Equal(t, len(ts.diffs), len(allSubsequentDiffs)) - require.Equal(t, len(ts.cdiffs), len(allSubsequentDiffs)) - for i := range ts.diffs { - assert.Equal(t, ts.diffs[i], allSubsequentDiffs[i].StatementString()) - assert.Equal(t, ts.cdiffs[i], allSubsequentDiffs[i].CanonicalStatementString()) - } - } - // validate we can parse back the statement - _, err := sqlparser.ParseStrictDDL(diff) - assert.NoError(t, err) + if len(ts.diffs) > 0 { - // Validate "from/to" entities - eFrom, eTo := alter.Entities() - if ts.fromName != "" { - assert.Equal(t, ts.fromName, eFrom.Name()) + allSubsequentDiffs := AllSubsequent(alter) + allSubsequentDiffsStatements := []string{} + for _, d := range allSubsequentDiffs { + allSubsequentDiffsStatements = append(allSubsequentDiffsStatements, d.CanonicalStatementString()) } - if ts.toName != "" { - assert.Equal(t, ts.toName, eTo.Name()) + require.Equal(t, len(ts.diffs), len(allSubsequentDiffs), allSubsequentDiffsStatements) + require.Equal(t, len(ts.cdiffs), len(allSubsequentDiffs), allSubsequentDiffsStatements) + for i := range ts.diffs { + assert.Equal(t, ts.diffs[i], allSubsequentDiffs[i].StatementString()) + assert.Equal(t, ts.cdiffs[i], allSubsequentDiffs[i].CanonicalStatementString()) } + } + // validate we can parse back the statement + _, err := env.Parser().ParseStrictDDL(diff) + assert.NoError(t, err) - { // Validate "apply()" on "from" converges with "to" - applied, err := c.Apply(alter) - assert.NoError(t, err) - require.NotNil(t, applied) - appliedDiff, err := eTo.Diff(applied, &hints) - require.NoError(t, err) - assert.True(t, appliedDiff.IsEmpty(), "expected empty diff, found changes: %v.\nc=%v\n,alter=%v\n,eTo=%v\napplied=%v\n", - appliedDiff.CanonicalStatementString(), - c.Create().CanonicalStatementString(), - alter.CanonicalStatementString(), - eTo.Create().CanonicalStatementString(), - applied.Create().CanonicalStatementString(), - ) - } + // Validate "from/to" entities + eFrom, eTo := alter.Entities() + if ts.fromName != "" { + assert.Equal(t, ts.fromName, eFrom.Name()) } - { - cdiff := alter.CanonicalStatementString() - assert.Equal(t, ts.cdiff, cdiff) - _, err := sqlparser.ParseStrictDDL(cdiff) + if ts.toName != "" { + assert.Equal(t, ts.toName, eTo.Name()) + } + + { // Validate "apply()" on "from" converges with "to" + applied, err := c.Apply(alter) assert.NoError(t, err) + require.NotNil(t, applied) + appliedDiff, err := eTo.Diff(applied, &hints) + require.NoError(t, err) + assert.True(t, appliedDiff.IsEmpty(), "expected empty diff, found changes: %v.\nc=%v\n,alter=%v\n,eTo=%v\napplied=%v\n", + appliedDiff.CanonicalStatementString(), + c.Create().CanonicalStatementString(), + alter.CanonicalStatementString(), + eTo.Create().CanonicalStatementString(), + applied.Create().CanonicalStatementString(), + ) + } + // Validate atomic diffs + atomicDiffs := AtomicDiffs(alter) + if len(ts.atomicdiffs) > 0 { + assert.Equal(t, len(ts.atomicdiffs), len(atomicDiffs), "%+v", atomicDiffs) + for i := range ts.atomicdiffs { + assert.Equal(t, ts.atomicdiffs[i], atomicDiffs[i].CanonicalStatementString()) + } + } else { + assert.Equal(t, 1, len(atomicDiffs)) + assert.Equal(t, alter.CanonicalStatementString(), atomicDiffs[0].CanonicalStatementString()) } + { // Validate annotations + alterEntityDiff, ok := alter.(*AlterTableEntityDiff) + require.True(t, ok) + annotatedFrom, annotatedTo, annotatedUnified := alterEntityDiff.Annotated() + annotatedFromString := annotatedFrom.Export() + annotatedToString := annotatedTo.Export() + annotatedUnifiedString := annotatedUnified.Export() + { + eFromStatementString := eFrom.Create().CanonicalStatementString() + for _, annotation := range alterEntityDiff.annotations.Removed() { + require.NotEmpty(t, annotation.text) + assert.Contains(t, eFromStatementString, annotation.text) + } + if len(alterEntityDiff.annotations.Removed()) == 0 { + assert.Empty(t, annotatedFrom.Removed()) + assert.Equal(t, eFromStatementString, annotatedFromString) + } else { + assert.NotEmpty(t, annotatedFrom.Removed()) + assert.NotEqual(t, eFromStatementString, annotatedFromString) + } + } + { + eToStatementString := eTo.Create().CanonicalStatementString() + for _, annotation := range alterEntityDiff.annotations.Added() { + require.NotEmpty(t, annotation.text) + assert.Contains(t, eToStatementString, annotation.text) + } + if len(alterEntityDiff.annotations.Added()) == 0 { + assert.Empty(t, annotatedTo.Added()) + assert.Equal(t, eToStatementString, annotatedToString) + } else { + assert.NotEmpty(t, annotatedTo.Added()) + assert.NotEqual(t, eToStatementString, annotatedToString) + } + } + if len(ts.textdiffs) > 0 { // Still incomplete. + // For this test, we should validate the given diffs + uniqueDiffs := make(map[string]bool) + for _, textdiff := range ts.textdiffs { + uniqueDiffs[textdiff] = true + } + require.Equal(t, len(uniqueDiffs), len(ts.textdiffs)) // integrity of test + for _, textdiff := range ts.textdiffs { + assert.Containsf(t, annotatedUnifiedString, textdiff, annotatedUnifiedString) + } + assert.Equalf(t, len(annotatedUnified.Removed())+len(annotatedUnified.Added()), len(ts.textdiffs), annotatedUnifiedString) + } + } + } + { + cdiff := alter.CanonicalStatementString() + assert.Equal(t, ts.cdiff, cdiff) + _, err := env.Parser().ParseStrictDDL(cdiff) + assert.NoError(t, err) } }) } @@ -1647,6 +2521,7 @@ func TestValidate(t *testing.T) { alter: "alter table t add column i int", expectErr: &ApplyDuplicatePartitionError{Table: "t1", Partition: "p2"}, }, + // More columns and indexes { name: "change to visible with alter column", from: "create table t (id int, i int invisible, primary key (id))", @@ -1857,19 +2732,20 @@ func TestValidate(t *testing.T) { }, } hints := DiffHints{} + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := env.Parser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - stmt, err = sqlparser.ParseStrictDDL(ts.alter) + stmt, err = env.Parser().ParseStrictDDL(ts.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) - from, err := NewCreateTableEntity(fromCreateTable) + from, err := NewCreateTableEntity(env, fromCreateTable) require.NoError(t, err) a := &AlterTableEntityDiff{from: from, alterTable: alterTable} applied, err := from.Apply(a) @@ -1888,12 +2764,12 @@ func TestValidate(t *testing.T) { require.True(t, ok) applied = c.normalize() - stmt, err := sqlparser.ParseStrictDDL(ts.to) + stmt, err := env.Parser().ParseStrictDDL(ts.to) require.NoError(t, err) toCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - to, err := NewCreateTableEntity(toCreateTable) + to, err := NewCreateTableEntity(env, toCreateTable) require.NoError(t, err) diff, err := applied.Diff(to, &hints) require.NoError(t, err) @@ -2074,6 +2950,16 @@ func TestNormalize(t *testing.T) { from: "create table t (id int signed primary key, v varchar(255) charset utf8mb3 collate utf8_unicode_ci) charset utf8mb3 collate utf8_unicode_ci", to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255),\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb3,\n COLLATE utf8mb3_unicode_ci", }, + { + name: "remove column charset if collation is explicit and implies specified charset", + from: "create table t (id int primary key, v varchar(255) charset utf8mb4 collate utf8mb4_german2_ci)", + to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255) COLLATE utf8mb4_german2_ci,\n\tPRIMARY KEY (`id`)\n)", + }, + { + name: "ascii charset and collation", + from: "create table t (id int primary key, v varchar(255) charset ascii collate ascii_general_ci) charset utf8mb3 collate utf8_general_ci", + to: "CREATE TABLE `t` (\n\t`id` int,\n\t`v` varchar(255) CHARACTER SET ascii COLLATE ascii_general_ci,\n\tPRIMARY KEY (`id`)\n) CHARSET utf8mb3,\n COLLATE utf8mb3_general_ci", + }, { name: "correct case table options for engine", from: "create table t (id int signed primary key) engine innodb", @@ -2170,14 +3056,15 @@ func TestNormalize(t *testing.T) { to: "CREATE TABLE `t` (\n\t`id` tinyint(1),\n\t`b` tinyint(1),\n\tPRIMARY KEY (`id`)\n)", }, } + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := env.Parser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - from, err := NewCreateTableEntity(fromCreateTable) + from, err := NewCreateTableEntity(env, fromCreateTable) require.NoError(t, err) assert.Equal(t, ts.to, sqlparser.CanonicalString(from)) }) @@ -2261,11 +3148,12 @@ func TestIndexesCoveringForeignKeyColumns(t *testing.T) { }, } - stmt, err := sqlparser.ParseStrictDDL(sql) + env := NewTestEnv() + stmt, err := env.Parser().ParseStrictDDL(sql) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - c, err := NewCreateTableEntity(createTable) + c, err := NewCreateTableEntity(env, createTable) require.NoError(t, err) tableColumns := map[string]sqlparser.IdentifierCI{} for _, col := range c.CreateTable.TableSpec.Columns { diff --git a/go/vt/schemadiff/types.go b/go/vt/schemadiff/types.go index 86e5a8d06bf..2049387140c 100644 --- a/go/vt/schemadiff/types.go +++ b/go/vt/schemadiff/types.go @@ -17,9 +17,21 @@ limitations under the License. package schemadiff import ( + "strings" + + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/vt/sqlparser" ) +type InstantDDLCapability int + +const ( + InstantDDLCapabilityUnknown InstantDDLCapability = iota + InstantDDLCapabilityIrrelevant + InstantDDLCapabilityImpossible + InstantDDLCapabilityPossible +) + // Entity stands for a database object we can diff: // - A table // - A view @@ -55,6 +67,11 @@ type EntityDiff interface { SubsequentDiff() EntityDiff // SetSubsequentDiff updates the existing subsequent diff to the given one SetSubsequentDiff(EntityDiff) + // InstantDDLCapability returns the ability of this diff to run with ALGORITHM=INSTANT + InstantDDLCapability() InstantDDLCapability + // Clone returns a deep copy of the entity diff, and of all referenced entities. + Clone() EntityDiff + Annotated() (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) } const ( @@ -108,6 +125,21 @@ const ( AlterTableAlgorithmStrategyCopy ) +const ( + EnumReorderStrategyAllow int = iota + EnumReorderStrategyReject +) + +const ( + ForeignKeyCheckStrategyStrict int = iota + ForeignKeyCheckStrategyIgnore +) + +const ( + SubsequentDiffStrategyAllow int = iota + SubsequentDiffStrategyReject +) + // DiffHints is an assortment of rules for diffing entities type DiffHints struct { StrictIndexOrdering bool @@ -120,6 +152,13 @@ type DiffHints struct { TableCharsetCollateStrategy int TableQualifierHint int AlterTableAlgorithmStrategy int + EnumReorderStrategy int + ForeignKeyCheckStrategy int + SubsequentDiffStrategy int +} + +func EmptyDiffHints() *DiffHints { + return &DiffHints{} } const ( @@ -127,3 +166,21 @@ const ( ApplyDiffsInOrder = "ApplyDiffsInOrder" ApplyDiffsSequential = "ApplyDiffsSequential" ) + +type ForeignKeyTableColumns struct { + Table string + Columns []string +} + +func (f ForeignKeyTableColumns) Escaped() string { + var b strings.Builder + b.WriteString(sqlescape.EscapeID(f.Table)) + b.WriteString(" (") + escapedColumns := make([]string, len(f.Columns)) + for i, column := range f.Columns { + escapedColumns[i] = sqlescape.EscapeID(column) + } + b.WriteString(strings.Join(escapedColumns, ", ")) + b.WriteString(")") + return b.String() +} diff --git a/go/vt/schemadiff/view.go b/go/vt/schemadiff/view.go index 4e32dfd9910..c4d48ac66cc 100644 --- a/go/vt/schemadiff/view.go +++ b/go/vt/schemadiff/view.go @@ -45,6 +45,10 @@ func (d *AlterViewEntityDiff) Entities() (from Entity, to Entity) { return d.from, d.to } +func (d *AlterViewEntityDiff) Annotated() (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) { + return annotatedDiff(d, nil) +} + // Statement implements EntityDiff func (d *AlterViewEntityDiff) Statement() sqlparser.Statement { if d == nil { @@ -91,6 +95,28 @@ func (d *AlterViewEntityDiff) SubsequentDiff() EntityDiff { func (d *AlterViewEntityDiff) SetSubsequentDiff(EntityDiff) { } +// InstantDDLCapability implements EntityDiff +func (d *AlterViewEntityDiff) InstantDDLCapability() InstantDDLCapability { + return InstantDDLCapabilityIrrelevant +} + +// Clone implements EntityDiff +func (d *AlterViewEntityDiff) Clone() EntityDiff { + if d == nil { + return nil + } + clone := &AlterViewEntityDiff{ + alterView: sqlparser.CloneRefOfAlterView(d.alterView), + } + if d.from != nil { + clone.from = d.from.Clone().(*CreateViewEntity) + } + if d.to != nil { + clone.to = d.to.Clone().(*CreateViewEntity) + } + return clone +} + type CreateViewEntityDiff struct { createView *sqlparser.CreateView @@ -113,6 +139,10 @@ func (d *CreateViewEntityDiff) Entities() (from Entity, to Entity) { return nil, &CreateViewEntity{CreateView: d.createView} } +func (d *CreateViewEntityDiff) Annotated() (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) { + return annotatedDiff(d, nil) +} + // Statement implements EntityDiff func (d *CreateViewEntityDiff) Statement() sqlparser.Statement { if d == nil { @@ -159,6 +189,21 @@ func (d *CreateViewEntityDiff) SubsequentDiff() EntityDiff { func (d *CreateViewEntityDiff) SetSubsequentDiff(EntityDiff) { } +// InstantDDLCapability implements EntityDiff +func (d *CreateViewEntityDiff) InstantDDLCapability() InstantDDLCapability { + return InstantDDLCapabilityIrrelevant +} + +// Clone implements EntityDiff +func (d *CreateViewEntityDiff) Clone() EntityDiff { + if d == nil { + return nil + } + return &CreateViewEntityDiff{ + createView: sqlparser.CloneRefOfCreateView(d.createView), + } +} + type DropViewEntityDiff struct { from *CreateViewEntity dropView *sqlparser.DropView @@ -181,6 +226,10 @@ func (d *DropViewEntityDiff) Entities() (from Entity, to Entity) { return d.from, nil } +func (d *DropViewEntityDiff) Annotated() (from *TextualAnnotations, to *TextualAnnotations, unified *TextualAnnotations) { + return annotatedDiff(d, nil) +} + // Statement implements EntityDiff func (d *DropViewEntityDiff) Statement() sqlparser.Statement { if d == nil { @@ -227,16 +276,36 @@ func (d *DropViewEntityDiff) SubsequentDiff() EntityDiff { func (d *DropViewEntityDiff) SetSubsequentDiff(EntityDiff) { } +// InstantDDLCapability implements EntityDiff +func (d *DropViewEntityDiff) InstantDDLCapability() InstantDDLCapability { + return InstantDDLCapabilityIrrelevant +} + +// Clone implements EntityDiff +func (d *DropViewEntityDiff) Clone() EntityDiff { + if d == nil { + return nil + } + clone := &DropViewEntityDiff{ + dropView: sqlparser.CloneRefOfDropView(d.dropView), + } + if d.from != nil { + clone.from = d.from.Clone().(*CreateViewEntity) + } + return clone +} + // CreateViewEntity stands for a VIEW construct. It contains the view's CREATE statement. type CreateViewEntity struct { *sqlparser.CreateView + env *Environment } -func NewCreateViewEntity(c *sqlparser.CreateView) (*CreateViewEntity, error) { +func NewCreateViewEntity(env *Environment, c *sqlparser.CreateView) (*CreateViewEntity, error) { if !c.IsFullyParsed() { return nil, &NotFullyParsedError{Entity: c.ViewName.Name.String(), Statement: sqlparser.CanonicalString(c)} } - entity := &CreateViewEntity{CreateView: c} + entity := &CreateViewEntity{CreateView: c, env: env} entity.normalize() return entity, nil } @@ -296,6 +365,9 @@ func (c *CreateViewEntity) ViewDiff(other *CreateViewEntity, _ *DiffHints) (*Alt // Create implements Entity interface func (c *CreateViewEntity) Create() EntityDiff { + if c == nil { + return nil + } return &CreateViewEntityDiff{createView: c.CreateView} } diff --git a/go/vt/schemadiff/view_test.go b/go/vt/schemadiff/view_test.go index 939308d056c..d1a26c3cdaa 100644 --- a/go/vt/schemadiff/view_test.go +++ b/go/vt/schemadiff/view_test.go @@ -145,22 +145,23 @@ func TestCreateViewDiff(t *testing.T) { cdiff: "ALTER ALGORITHM = TEMPTABLE VIEW `v1` AS SELECT `a` FROM `t`", }, } - hints := &DiffHints{} + hints := EmptyDiffHints() + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := env.Parser().ParseStrictDDL(ts.from) assert.NoError(t, err) fromCreateView, ok := fromStmt.(*sqlparser.CreateView) assert.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := env.Parser().ParseStrictDDL(ts.to) assert.NoError(t, err) toCreateView, ok := toStmt.(*sqlparser.CreateView) assert.True(t, ok) - c, err := NewCreateViewEntity(fromCreateView) + c, err := NewCreateViewEntity(env, fromCreateView) require.NoError(t, err) - other, err := NewCreateViewEntity(toCreateView) + other, err := NewCreateViewEntity(env, toCreateView) require.NoError(t, err) alter, err := c.Diff(other, hints) switch { @@ -177,7 +178,7 @@ func TestCreateViewDiff(t *testing.T) { diff := alter.StatementString() assert.Equal(t, ts.diff, diff) // validate we can parse back the statement - _, err := sqlparser.ParseStrictDDL(diff) + _, err := env.Parser().ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := alter.Entities() @@ -195,11 +196,20 @@ func TestCreateViewDiff(t *testing.T) { require.NoError(t, err) assert.True(t, appliedDiff.IsEmpty(), "expected empty diff, found changes: %v", appliedDiff.CanonicalStatementString()) } + // Validate Clone() works + { + clone := alter.Clone() + alterClone, ok := clone.(*AlterViewEntityDiff) + require.True(t, ok) + assert.Equal(t, eFrom.Create().CanonicalStatementString(), alterClone.from.Create().CanonicalStatementString()) + alterClone.from.CreateView.ViewName.Name = sqlparser.NewIdentifierCS("something_else") + assert.NotEqual(t, eFrom.Create().CanonicalStatementString(), alterClone.from.Create().CanonicalStatementString()) + } } { cdiff := alter.CanonicalStatementString() assert.Equal(t, ts.cdiff, cdiff) - _, err := sqlparser.ParseStrictDDL(cdiff) + _, err := env.Parser().ParseStrictDDL(cdiff) assert.NoError(t, err) } } @@ -239,14 +249,15 @@ func TestNormalizeView(t *testing.T) { to: "CREATE SQL SECURITY INVOKER VIEW `v1` AS SELECT `a`, `b`, `c` FROM `t`", }, } + env := NewTestEnv() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := env.Parser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateView, ok := stmt.(*sqlparser.CreateView) require.True(t, ok) - from, err := NewCreateViewEntity(fromCreateView) + from, err := NewCreateViewEntity(env, fromCreateView) require.NoError(t, err) assert.Equal(t, ts.to, sqlparser.CanonicalString(from)) }) diff --git a/go/vt/schemamanager/local_controller_test.go b/go/vt/schemamanager/local_controller_test.go index 9b6b7c5369e..1784a76e133 100644 --- a/go/vt/schemamanager/local_controller_test.go +++ b/go/vt/schemamanager/local_controller_test.go @@ -21,10 +21,11 @@ import ( "fmt" "os" "path" - "reflect" "strings" "testing" + "github.com/stretchr/testify/require" + querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -32,64 +33,50 @@ func TestLocalControllerNoSchemaChanges(t *testing.T) { schemaChangeDir := t.TempDir() controller := NewLocalController(schemaChangeDir) ctx := context.Background() - if err := controller.Open(ctx); err != nil { - t.Fatalf("Open should succeed, but got error: %v", err) - } + err := controller.Open(ctx) + require.NoError(t, err) + defer controller.Close() data, err := controller.Read(ctx) - if err != nil { - t.Fatalf("Read should succeed, but got error: %v", err) - } - if len(data) != 0 { - t.Fatalf("there is no schema change, Read should return empty data") - } + require.NoError(t, err) + require.Empty(t, data, "there is no schema change, Read should return empty data") } func TestLocalControllerOpen(t *testing.T) { controller := NewLocalController("") ctx := context.Background() - if err := controller.Open(ctx); err == nil || !strings.Contains(err.Error(), "no such file or directory") { - t.Fatalf("Open should fail, no such dir, but got: %v", err) - } + err := controller.Open(ctx) + require.ErrorContains(t, err, "no such file or directory", "Open should fail, no such dir") schemaChangeDir := t.TempDir() // create a file under schema change dir - _, err := os.Create(path.Join(schemaChangeDir, "create_test_table.sql")) - if err != nil { - t.Fatalf("failed to create sql file, error: %v", err) - } + _, err = os.Create(path.Join(schemaChangeDir, "create_test_table.sql")) + require.NoError(t, err, "failed to create sql file") controller = NewLocalController(schemaChangeDir) - if err := controller.Open(ctx); err != nil { - t.Fatalf("Open should succeed") - } + err = controller.Open(ctx) + require.NoError(t, err) + data, err := controller.Read(ctx) - if err != nil { - t.Fatalf("Read should succeed, but got error: %v", err) - } - if len(data) != 0 { - t.Fatalf("there is no schema change, Read should return empty data") - } + require.NoError(t, err) + require.Empty(t, data, "there is no schema change, Read should return empty data") + controller.Close() testKeyspaceDir := path.Join(schemaChangeDir, "test_keyspace") - if err := os.MkdirAll(testKeyspaceDir, os.ModePerm); err != nil { - t.Fatalf("failed to create test_keyspace dir, error: %v", err) - } + err = os.MkdirAll(testKeyspaceDir, os.ModePerm) + require.NoError(t, err, "failed to create test_keyspace dir") controller = NewLocalController(schemaChangeDir) - if err := controller.Open(ctx); err != nil { - t.Fatalf("Open should succeed") - } + err = controller.Open(ctx) + require.NoError(t, err) + data, err = controller.Read(ctx) - if err != nil { - t.Fatalf("Read should succeed, but got error: %v", err) - } - if len(data) != 0 { - t.Fatalf("there is no schema change, Read should return empty data") - } + require.NoError(t, err) + require.Empty(t, data, "there is no schema change, Read should return empty data") + controller.Close() } @@ -97,14 +84,11 @@ func TestLocalControllerSchemaChange(t *testing.T) { schemaChangeDir := t.TempDir() testKeyspaceInputDir := path.Join(schemaChangeDir, "test_keyspace/input") - if err := os.MkdirAll(testKeyspaceInputDir, os.ModePerm); err != nil { - t.Fatalf("failed to create test_keyspace dir, error: %v", err) - } + err := os.MkdirAll(testKeyspaceInputDir, os.ModePerm) + require.NoError(t, err, "failed to create test_keyspace dir") file, err := os.Create(path.Join(testKeyspaceInputDir, "create_test_table.sql")) - if err != nil { - t.Fatalf("failed to create sql file, error: %v", err) - } + require.NoError(t, err, "failed to create sql file") sqls := []string{ "create table test_table_01 (id int)", @@ -117,51 +101,36 @@ func TestLocalControllerSchemaChange(t *testing.T) { controller := NewLocalController(schemaChangeDir) ctx := context.Background() - if err := controller.Open(ctx); err != nil { - t.Fatalf("Open should succeed, but got error: %v", err) - } + err = controller.Open(ctx) + require.NoError(t, err) defer controller.Close() data, err := controller.Read(ctx) - if err != nil { - t.Fatalf("Read should succeed, but got error: %v", err) - } - - if !reflect.DeepEqual(sqls, data) { - t.Fatalf("expect to get sqls: %v, but got: %v", sqls, data) - } - - if controller.Keyspace() != "test_keyspace" { - t.Fatalf("expect to get keyspace: 'test_keyspace', but got: '%s'", - controller.Keyspace()) - } + require.NoError(t, err) + require.Equal(t, sqls, data) + require.Equal(t, "test_keyspace", controller.Keyspace()) // test various callbacks - if err := controller.OnReadSuccess(ctx); err != nil { - t.Fatalf("OnReadSuccess should succeed, but got error: %v", err) - } + err = controller.OnReadSuccess(ctx) + require.NoError(t, err) - if err := controller.OnReadFail(ctx, fmt.Errorf("read fail")); err != nil { - t.Fatalf("OnReadFail should succeed, but got error: %v", err) - } + err = controller.OnReadFail(ctx, fmt.Errorf("read fail")) + require.NoError(t, err) errorPath := path.Join(controller.errorDir, controller.sqlFilename) - if err := controller.OnValidationSuccess(ctx); err != nil { - t.Fatalf("OnReadSuccess should succeed, but got error: %v", err) - } + err = controller.OnValidationSuccess(ctx) + require.NoError(t, err) // move sql file from error dir to input dir for OnValidationFail test os.Rename(errorPath, controller.sqlPath) - if err := controller.OnValidationFail(ctx, fmt.Errorf("validation fail")); err != nil { - t.Fatalf("OnValidationFail should succeed, but got error: %v", err) - } + err = controller.OnValidationFail(ctx, fmt.Errorf("validation fail")) + require.NoError(t, err) - if _, err := os.Stat(errorPath); os.IsNotExist(err) { - t.Fatalf("sql file should be moved to error dir, error: %v", err) - } + _, err = os.Stat(errorPath) + require.Falsef(t, os.IsNotExist(err), "sql file should be moved to error dir, error: %v", err) // move sql file from error dir to input dir for OnExecutorComplete test os.Rename(errorPath, controller.sqlPath) @@ -175,16 +144,14 @@ func TestLocalControllerSchemaChange(t *testing.T) { } logPath := path.Join(controller.logDir, controller.sqlFilename) completePath := path.Join(controller.completeDir, controller.sqlFilename) - if err := controller.OnExecutorComplete(ctx, result); err != nil { - t.Fatalf("OnExecutorComplete should succeed, but got error: %v", err) - } - if _, err := os.Stat(completePath); os.IsNotExist(err) { - t.Fatalf("sql file should be moved to complete dir, error: %v", err) - } + err = controller.OnExecutorComplete(ctx, result) + require.NoError(t, err) - if _, err := os.Stat(logPath); os.IsNotExist(err) { - t.Fatalf("sql file should be moved to log dir, error: %v", err) - } + _, err = os.Stat(completePath) + require.Falsef(t, os.IsNotExist(err), "sql file should be moved to complete dir, error: %v", err) + + _, err = os.Stat(logPath) + require.Falsef(t, os.IsNotExist(err), "sql file should be moved to log dir, error: %v", err) // move sql file from error dir to input dir for OnExecutorComplete test os.Rename(completePath, controller.sqlPath) @@ -197,11 +164,9 @@ func TestLocalControllerSchemaChange(t *testing.T) { }}, } - if err := controller.OnExecutorComplete(ctx, result); err != nil { - t.Fatalf("OnExecutorComplete should succeed, but got error: %v", err) - } + err = controller.OnExecutorComplete(ctx, result) + require.NoError(t, err) - if _, err := os.Stat(errorPath); os.IsNotExist(err) { - t.Fatalf("sql file should be moved to error dir, error: %v", err) - } + _, err = os.Stat(errorPath) + require.Falsef(t, os.IsNotExist(err), "sql file should be moved to error dir, error: %v", err) } diff --git a/go/vt/schemamanager/plain_controller_test.go b/go/vt/schemamanager/plain_controller_test.go index ca3352cda82..ea6c845d433 100644 --- a/go/vt/schemamanager/plain_controller_test.go +++ b/go/vt/schemamanager/plain_controller_test.go @@ -21,6 +21,8 @@ import ( "testing" "context" + + "github.com/stretchr/testify/require" ) func TestPlainController(t *testing.T) { @@ -28,50 +30,31 @@ func TestPlainController(t *testing.T) { controller := NewPlainController([]string{sql}, "test_keyspace") ctx := context.Background() err := controller.Open(ctx) - if err != nil { - t.Fatalf("controller.Open should succeed, but got error: %v", err) - } + require.NoError(t, err) keyspace := controller.Keyspace() - if keyspace != "test_keyspace" { - t.Fatalf("expect to get keyspace: 'test_keyspace', but got keyspace: '%s'", keyspace) - } + require.Equal(t, "test_keyspace", keyspace) sqls, err := controller.Read(ctx) - if err != nil { - t.Fatalf("controller.Read should succeed, but got error: %v", err) - } - if len(sqls) != 1 { - t.Fatalf("controller should only get one sql, but got: %v", sqls) - } - if sqls[0] != sql { - t.Fatalf("expect to get sql: '%s', but got: '%s'", sql, sqls[0]) - } + require.NoError(t, err) + require.Len(t, sqls, 1, "controller should only get one sql") + require.Equal(t, sql, sqls[0]) + defer controller.Close() err = controller.OnReadSuccess(ctx) - if err != nil { - t.Fatalf("OnDataSourcerReadSuccess should succeed") - } + require.NoError(t, err) errReadFail := fmt.Errorf("read fail") err = controller.OnReadFail(ctx, errReadFail) - if err != errReadFail { - t.Fatalf("should get error:%v, but get: %v", errReadFail, err) - } + require.ErrorIs(t, err, errReadFail) err = controller.OnValidationSuccess(ctx) - if err != nil { - t.Fatalf("OnValidationSuccess should succeed") - } + require.NoError(t, err) errValidationFail := fmt.Errorf("validation fail") err = controller.OnValidationFail(ctx, errValidationFail) - if err != errValidationFail { - t.Fatalf("should get error:%v, but get: %v", errValidationFail, err) - } + require.ErrorIs(t, err, errValidationFail) err = controller.OnExecutorComplete(ctx, &ExecuteResult{}) - if err != nil { - t.Fatalf("OnExecutorComplete should succeed") - } + require.NoError(t, err) } diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 154d985bba4..129600d0527 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -20,10 +20,12 @@ import ( "context" "errors" "fmt" - "strings" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" @@ -59,10 +61,7 @@ func TestSchemaManagerControllerOpenFail(t *testing.T) { ctx := context.Background() _, err := Run(ctx, controller, newFakeExecutor(t)) - if err != errControllerOpen { - t.Fatalf("controller.Open fail, should get error: %v, but get error: %v", - errControllerOpen, err) - } + require.ErrorIs(t, err, errControllerOpen) } func TestSchemaManagerControllerReadFail(t *testing.T) { @@ -70,13 +69,8 @@ func TestSchemaManagerControllerReadFail(t *testing.T) { []string{"select * from test_db"}, false, true, false) ctx := context.Background() _, err := Run(ctx, controller, newFakeExecutor(t)) - if err != errControllerRead { - t.Fatalf("controller.Read fail, should get error: %v, but get error: %v", - errControllerRead, err) - } - if !controller.onReadFailTriggered { - t.Fatalf("OnReadFail should be called") - } + require.ErrorIs(t, err, errControllerRead) + require.True(t, controller.onReadFailTriggered, "OnReadFail should be called") } func TestSchemaManagerValidationFail(t *testing.T) { @@ -85,22 +79,18 @@ func TestSchemaManagerValidationFail(t *testing.T) { ctx := context.Background() _, err := Run(ctx, controller, newFakeExecutor(t)) - if err == nil || !strings.Contains(err.Error(), "failed to parse sql") { - t.Fatalf("run schema change should fail due to executor.Validate fail, but got: %v", err) - } + require.ErrorContains(t, err, "failed to parse sql", "run schema change should fail due to executor.Validate fail") } func TestSchemaManagerExecutorOpenFail(t *testing.T) { controller := newFakeController( []string{"create table test_table (pk int);"}, false, false, false) controller.SetKeyspace("unknown_keyspace") - executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() _, err := Run(ctx, controller, executor) - if err == nil || !strings.Contains(err.Error(), "unknown_keyspace") { - t.Fatalf("run schema change should fail due to executor.Open fail, but got: %v", err) - } + require.ErrorContains(t, err, "unknown_keyspace", "run schema change should fail due to executor.Open fail") } func TestSchemaManagerRun(t *testing.T) { @@ -125,33 +115,19 @@ func TestSchemaManagerRun(t *testing.T) { }) fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) - executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() resp, err := Run(ctx, controller, executor) - if len(resp.UUIDs) > 0 { - t.Fatalf("response should contain an empty list of UUIDs, found %v", len(resp.UUIDs)) - } - - if err != nil { - t.Fatalf("schema change should success but get error: %v", err) - } - if !controller.onReadSuccessTriggered { - t.Fatalf("OnReadSuccess should be called") - } - if controller.onReadFailTriggered { - t.Fatalf("OnReadFail should not be called") - } - if !controller.onValidationSuccessTriggered { - t.Fatalf("OnValidateSuccess should be called") - } - if controller.onValidationFailTriggered { - t.Fatalf("OnValidationFail should not be called") - } - if !controller.onExecutorCompleteTriggered { - t.Fatalf("OnExecutorComplete should be called") - } + require.Lenf(t, resp.UUIDs, 0, "response should contain an empty list of UUIDs") + require.NoError(t, err) + + require.True(t, controller.onReadSuccessTriggered, "OnReadSuccess should be called") + require.False(t, controller.onReadFailTriggered, "OnReadFail should not be called") + require.True(t, controller.onValidationSuccessTriggered, "OnValidateSuccess should be called") + require.False(t, controller.onValidationFailTriggered, "OnValidationFail should not be called") + require.True(t, controller.onExecutorCompleteTriggered, "OnExecutorComplete should be called") }) } } @@ -176,17 +152,12 @@ func TestSchemaManagerExecutorFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() resp, err := Run(ctx, controller, executor) - if len(resp.UUIDs) > 0 { - t.Fatalf("response should contain an empty list of UUIDs, found %v", len(resp.UUIDs)) - } - - if err == nil || !strings.Contains(err.Error(), "schema change failed") { - t.Fatalf("schema change should fail, but got err: %v", err) - } + require.Lenf(t, resp.UUIDs, 0, "response should contain an empty list of UUIDs") + require.ErrorContains(t, err, "schema change failed", "schema change should fail") } func TestSchemaManagerExecutorBatchVsStrategyFail(t *testing.T) { @@ -196,7 +167,7 @@ func TestSchemaManagerExecutorBatchVsStrategyFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("online") ctx := context.Background() @@ -212,7 +183,7 @@ func TestSchemaManagerExecutorBatchVsQueriesFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("direct") ctx := context.Background() @@ -228,7 +199,7 @@ func TestSchemaManagerExecutorBatchVsUUIDsFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("direct") executor.SetUUIDList([]string{"4e5dcf80_354b_11eb_82cd_f875a4d24e90"}) @@ -247,19 +218,15 @@ func TestSchemaManagerRegisterControllerFactory(t *testing.T) { }) _, err := GetControllerFactory("unknown") - if err == nil || !strings.Contains(err.Error(), "there is no data sourcer factory") { - t.Fatalf("controller factory is not registered, GetControllerFactory should return an error, but got: %v", err) - } + require.ErrorContains(t, err, "there is no data sourcer factory", "controller factory is not registered, GetControllerFactory should return an error") + _, err = GetControllerFactory("test_controller") - if err != nil { - t.Fatalf("GetControllerFactory should succeed, but get an error: %v", err) - } + require.NoError(t, err) + func() { defer func() { err := recover() - if err == nil { - t.Fatalf("RegisterControllerFactory should fail, it registers a registered ControllerFactory") - } + require.NotNil(t, err, "RegisterControllerFactory should fail, it registers a registered ControllerFactory") }() RegisterControllerFactory( "test_controller", @@ -271,7 +238,7 @@ func TestSchemaManagerRegisterControllerFactory(t *testing.T) { } func newFakeExecutor(t *testing.T) *TabletExecutor { - return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) } func newFakeTabletManagerClient() *fakeTabletManagerClient { @@ -334,13 +301,13 @@ func newFakeTopo(t *testing.T) *topo.Server { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "test_cell") - if err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}); err != nil { - t.Fatalf("CreateKeyspace failed: %v", err) - } + err := ts.CreateKeyspace(ctx, "test_keyspace", &topodatapb.Keyspace{}) + require.NoError(t, err) + for i, shard := range []string{"0", "1", "2"} { - if err := ts.CreateShard(ctx, "test_keyspace", shard); err != nil { - t.Fatalf("CreateShard(%v) failed: %v", shard, err) - } + err = ts.CreateShard(ctx, "test_keyspace", shard) + require.NoError(t, err) + tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "test_cell", @@ -349,22 +316,23 @@ func newFakeTopo(t *testing.T) *topo.Server { Keyspace: "test_keyspace", Shard: shard, } - if err := ts.CreateTablet(ctx, tablet); err != nil { - t.Fatalf("CreateTablet failed: %v", err) - } - if _, err := ts.UpdateShardFields(ctx, "test_keyspace", shard, func(si *topo.ShardInfo) error { + + err = ts.CreateTablet(ctx, tablet) + require.NoError(t, err) + + _, err = ts.UpdateShardFields(ctx, "test_keyspace", shard, func(si *topo.ShardInfo) error { si.Shard.PrimaryAlias = tablet.Alias return nil - }); err != nil { - t.Fatalf("UpdateShardFields failed: %v", err) - } - } - if err := ts.CreateKeyspace(ctx, "unsharded_keyspace", &topodatapb.Keyspace{}); err != nil { - t.Fatalf("CreateKeyspace failed: %v", err) - } - if err := ts.CreateShard(ctx, "unsharded_keyspace", "0"); err != nil { - t.Fatalf("CreateShard(%v) failed: %v", "0", err) + }) + require.NoError(t, err) } + + err = ts.CreateKeyspace(ctx, "unsharded_keyspace", &topodatapb.Keyspace{}) + require.NoError(t, err) + + err = ts.CreateShard(ctx, "unsharded_keyspace", "0") + require.NoError(t, err) + tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "test_cell", @@ -373,15 +341,14 @@ func newFakeTopo(t *testing.T) *topo.Server { Keyspace: "test_keyspace", Shard: "0", } - if err := ts.CreateTablet(ctx, tablet); err != nil { - t.Fatalf("CreateTablet failed: %v", err) - } - if _, err := ts.UpdateShardFields(ctx, "unsharded_keyspace", "0", func(si *topo.ShardInfo) error { + err = ts.CreateTablet(ctx, tablet) + require.NoError(t, err) + + _, err = ts.UpdateShardFields(ctx, "unsharded_keyspace", "0", func(si *topo.ShardInfo) error { si.Shard.PrimaryAlias = tablet.Alias return nil - }); err != nil { - t.Fatalf("UpdateShardFields failed: %v", err) - } + }) + require.NoError(t, err) return ts } diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index a56a95d5034..592c64e7073 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -53,10 +53,11 @@ type TabletExecutor struct { ddlStrategySetting *schema.DDLStrategySetting uuids []string batchSize int64 + parser *sqlparser.Parser } // NewTabletExecutor creates a new TabletExecutor instance -func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration, batchSize int64) *TabletExecutor { +func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration, batchSize int64, parser *sqlparser.Parser) *TabletExecutor { return &TabletExecutor{ ts: ts, tmc: tmc, @@ -65,6 +66,7 @@ func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.Ta waitReplicasTimeout: waitReplicasTimeout, migrationContext: migrationContext, batchSize: batchSize, + parser: parser, } } @@ -105,16 +107,12 @@ func (exec *TabletExecutor) Open(ctx context.Context, keyspace string) error { return nil } exec.keyspace = keyspace - shardNames, err := exec.ts.GetShardNames(ctx, keyspace) + shards, err := exec.ts.FindAllShardsInKeyspace(ctx, keyspace, nil) if err != nil { - return fmt.Errorf("unable to get shard names for keyspace: %s, error: %v", keyspace, err) + return fmt.Errorf("unable to get shards for keyspace: %s, error: %v", keyspace, err) } - exec.tablets = make([]*topodatapb.Tablet, len(shardNames)) - for i, shardName := range shardNames { - shardInfo, err := exec.ts.GetShard(ctx, keyspace, shardName) - if err != nil { - return fmt.Errorf("unable to get shard info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err) - } + exec.tablets = make([]*topodatapb.Tablet, 0, len(shards)) + for shardName, shardInfo := range shards { if !shardInfo.HasPrimary() { return fmt.Errorf("shard: %s does not have a primary", shardName) } @@ -122,7 +120,7 @@ func (exec *TabletExecutor) Open(ctx context.Context, keyspace string) error { if err != nil { return fmt.Errorf("unable to get primary tablet info, keyspace: %s, shard: %s, error: %v", keyspace, shardName, err) } - exec.tablets[i] = tabletInfo.Tablet + exec.tablets = append(exec.tablets, tabletInfo.Tablet) } if len(exec.tablets) == 0 { @@ -146,7 +144,7 @@ func (exec *TabletExecutor) Validate(ctx context.Context, sqls []string) error { func (exec *TabletExecutor) parseDDLs(sqls []string) error { for _, sql := range sqls { - stmt, err := sqlparser.Parse(sql) + stmt, err := exec.parser.Parse(sql) if err != nil { return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) } @@ -204,14 +202,14 @@ func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, provided return executeViaFetch() } // Analyze what type of query this is: - stmt, err := sqlparser.Parse(sql) + stmt, err := exec.parser.Parse(sql) if err != nil { return false, err } switch stmt := stmt.(type) { case sqlparser.DDLStatement: if exec.isOnlineSchemaDDL(stmt) { - onlineDDLs, err := schema.NewOnlineDDLs(exec.keyspace, sql, stmt, exec.ddlStrategySetting, exec.migrationContext, providedUUID) + onlineDDLs, err := schema.NewOnlineDDLs(exec.keyspace, sql, stmt, exec.ddlStrategySetting, exec.migrationContext, providedUUID, exec.parser) if err != nil { execResult.ExecutorErr = err.Error() return false, err @@ -227,7 +225,7 @@ func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, provided } case *sqlparser.RevertMigration: strategySetting := schema.NewDDLStrategySetting(schema.DDLStrategyOnline, exec.ddlStrategySetting.Options) - onlineDDL, err := schema.NewOnlineDDL(exec.keyspace, "", sqlparser.String(stmt), strategySetting, exec.migrationContext, providedUUID) + onlineDDL, err := schema.NewOnlineDDL(exec.keyspace, "", sqlparser.String(stmt), strategySetting, exec.migrationContext, providedUUID, exec.parser) if err != nil { execResult.ExecutorErr = err.Error() return false, err @@ -265,9 +263,9 @@ func batchSQLs(sqls []string, batchSize int) (batchedSQLs []string) { // allSQLsAreCreateQueries returns 'true' when all given queries are CREATE TABLE|VIEW // This function runs pretty fast even for thousands of tables (its overhead is insignificant compared with // the time it would take to apply the changes). -func allSQLsAreCreateQueries(sqls []string) (bool, error) { +func allSQLsAreCreateQueries(sqls []string, parser *sqlparser.Parser) (bool, error) { for _, sql := range sqls { - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) } @@ -377,7 +375,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute if exec.hasProvidedUUIDs() { return errorExecResult(fmt.Errorf("--batch-size conflicts with --uuid-list. Batching does not support UUIDs.")) } - allSQLsAreCreate, err := allSQLsAreCreateQueries(sqls) + allSQLsAreCreate, err := allSQLsAreCreateQueries(sqls, exec.parser) if err != nil { return errorExecResult(err) } @@ -444,16 +442,16 @@ func (exec *TabletExecutor) executeOnAllTablets(ctx context.Context, execResult // applyAllowZeroInDate takes a SQL string which may contain one or more statements, // and, assuming those are DDLs, adds a /*vt+ allowZeroInDate=true */ directive to all of them, // returning the result again as one long SQL. -func applyAllowZeroInDate(sql string) (string, error) { +func applyAllowZeroInDate(sql string, parser *sqlparser.Parser) (string, error) { // sql may be a batch of multiple statements - sqls, err := sqlparser.SplitStatementToPieces(sql) + sqls, err := parser.SplitStatementToPieces(sql) if err != nil { return sql, err } var modifiedSqls []string for _, singleSQL := range sqls { // --allow-zero-in-date Applies to DDLs - stmt, err := sqlparser.Parse(singleSQL) + stmt, err := parser.Parse(singleSQL) if err != nil { return sql, err } @@ -486,16 +484,20 @@ func (exec *TabletExecutor) executeOneTablet( } else { if exec.ddlStrategySetting != nil && exec.ddlStrategySetting.IsAllowZeroInDateFlag() { // --allow-zero-in-date Applies to DDLs - sql, err = applyAllowZeroInDate(sql) + sql, err = applyAllowZeroInDate(sql, exec.parser) if err != nil { errChan <- ShardWithError{Shard: tablet.Shard, Err: err.Error()} return } } - result, err = exec.tmc.ExecuteFetchAsDba(ctx, tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + request := &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ Query: []byte(sql), MaxRows: 10, - }) + } + if exec.ddlStrategySetting != nil && exec.ddlStrategySetting.IsAllowForeignKeysFlag() { + request.DisableForeignKeyChecks = true + } + result, err = exec.tmc.ExecuteFetchAsDba(ctx, tablet, false, request) } if err != nil { diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 175e10dfb66..a683ef4d22e 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -19,11 +19,11 @@ package schemamanager import ( "context" "fmt" - "strings" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -43,15 +43,13 @@ func TestTabletExecutorOpen(t *testing.T) { executor := newFakeExecutor(t) ctx := context.Background() - if err := executor.Open(ctx, "test_keyspace"); err != nil { - t.Fatalf("executor.Open should succeed") - } + err := executor.Open(ctx, "test_keyspace") + require.NoError(t, err) defer executor.Close() - if err := executor.Open(ctx, "test_keyspace"); err != nil { - t.Fatalf("open an opened executor should also succeed") - } + err = executor.Open(ctx, "test_keyspace") + require.NoError(t, err, "open an opened executor should also succeed") } func TestTabletExecutorOpenWithEmptyPrimaryAlias(t *testing.T) { @@ -69,13 +67,12 @@ func TestTabletExecutorOpenWithEmptyPrimaryAlias(t *testing.T) { } // This will create the Keyspace, Shard and Tablet record. // Since this is a replica tablet, the Shard will have no primary. - if err := ts.InitTablet(ctx, tablet, false /*allowPrimaryOverride*/, true /*createShardAndKeyspace*/, false /*allowUpdate*/); err != nil { - t.Fatalf("InitTablet failed: %v", err) - } - executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) - if err := executor.Open(ctx, "test_keyspace"); err == nil || !strings.Contains(err.Error(), "does not have a primary") { - t.Fatalf("executor.Open() = '%v', want error", err) - } + err := ts.InitTablet(ctx, tablet, false /*allowPrimaryOverride*/, true /*createShardAndKeyspace*/, false /*allowUpdate*/) + require.NoError(t, err) + + executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) + err = executor.Open(ctx, "test_keyspace") + require.ErrorContains(t, err, "does not have a primary") executor.Close() } @@ -105,7 +102,7 @@ func TestTabletExecutorValidate(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() sqls := []string{ @@ -115,42 +112,37 @@ func TestTabletExecutorValidate(t *testing.T) { "ALTER SCHEMA db_name CHARACTER SET = utf8mb4", } - if err := executor.Validate(ctx, sqls); err == nil { - t.Fatalf("validate should fail because executor is closed") - } + err := executor.Validate(ctx, sqls) + require.Error(t, err, "validate should fail because executor is closed") executor.Open(ctx, "test_keyspace") defer executor.Close() // schema changes with DMLs should fail - if err := executor.Validate(ctx, []string{ - "INSERT INTO test_table VALUES(1)"}); err == nil { - t.Fatalf("schema changes are for DDLs") - } + err = executor.Validate(ctx, []string{ + "INSERT INTO test_table VALUES(1)", + }) + require.Error(t, err, "schema changes are for DDLs") // validates valid ddls - if err := executor.Validate(ctx, sqls); err != nil { - t.Fatalf("executor.Validate should succeed, but got error: %v", err) - } + err = executor.Validate(ctx, sqls) + require.NoError(t, err) // alter a table with more than 100,000 rows - if err := executor.Validate(ctx, []string{ + err = executor.Validate(ctx, []string{ "ALTER TABLE test_table_03 ADD COLUMN new_id bigint(20)", - }); err != nil { - t.Fatalf("executor.Validate should not fail, even for a table with more than 100,000 rows") - } + }) + require.NoError(t, err, "executor.Validate should not fail, even for a table with more than 100,000 rows") - if err := executor.Validate(ctx, []string{ + err = executor.Validate(ctx, []string{ "TRUNCATE TABLE test_table_04", - }); err != nil { - t.Fatalf("executor.Validate should succeed, drop a table with more than 2,000,000 rows is allowed") - } + }) + require.NoError(t, err, "executor.Validate should succeed, truncate a table with more than 2,000,000 rows is allowed") - if err := executor.Validate(ctx, []string{ + err = executor.Validate(ctx, []string{ "DROP TABLE test_table_04", - }); err != nil { - t.Fatalf("executor.Validate should succeed, drop a table with more than 2,000,000 rows is allowed") - } + }) + require.NoError(t, err, "executor.Validate should succeed, drop a table with more than 2,000,000 rows is allowed") } func TestTabletExecutorDML(t *testing.T) { @@ -179,17 +171,17 @@ func TestTabletExecutorDML(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() executor.Open(ctx, "unsharded_keyspace") defer executor.Close() // schema changes with DMLs should fail - if err := executor.Validate(ctx, []string{ - "INSERT INTO test_table VALUES(1)"}); err != nil { - t.Fatalf("executor.Validate should succeed, for DML to unsharded keyspace") - } + err := executor.Validate(ctx, []string{ + "INSERT INTO test_table VALUES(1)", + }) + require.NoError(t, err, "executor.Validate should succeed, for DML to unsharded keyspace") } func TestTabletExecutorExecute(t *testing.T) { @@ -199,9 +191,7 @@ func TestTabletExecutorExecute(t *testing.T) { sqls := []string{"DROP TABLE unknown_table"} result := executor.Execute(ctx, sqls) - if result.ExecutorErr == "" { - t.Fatalf("execute should fail, call execute.Open first") - } + require.NotEmpty(t, result.ExecutorErr, "execute should fail, call execute.Open first") } func TestIsOnlineSchemaDDL(t *testing.T) { @@ -269,12 +259,13 @@ func TestIsOnlineSchemaDDL(t *testing.T) { }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { e := &TabletExecutor{} err := e.SetDDLStrategy(ts.ddlStrategy) assert.NoError(t, err) - stmt, err := sqlparser.Parse(ts.query) + stmt, err := parser.Parse(ts.query) assert.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) @@ -402,7 +393,7 @@ func TestAllSQLsAreCreateQueries(t *testing.T) { for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { - result, err := allSQLsAreCreateQueries(tcase.sqls) + result, err := allSQLsAreCreateQueries(tcase.sqls, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tcase.expect, result) }) @@ -437,7 +428,7 @@ func TestApplyAllowZeroInDate(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - result, err := applyAllowZeroInDate(tcase.sql) + result, err := applyAllowZeroInDate(tcase.sql, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tcase.expect, result) }) diff --git a/go/vt/schemamanager/ui_controller_test.go b/go/vt/schemamanager/ui_controller_test.go index 1823717e0e3..de1b0bcda66 100644 --- a/go/vt/schemamanager/ui_controller_test.go +++ b/go/vt/schemamanager/ui_controller_test.go @@ -19,10 +19,11 @@ package schemamanager import ( "fmt" "net/http/httptest" - "strings" "testing" "context" + + "github.com/stretchr/testify/require" ) func TestUIController(t *testing.T) { @@ -32,68 +33,37 @@ func TestUIController(t *testing.T) { ctx := context.Background() err := controller.Open(ctx) - if err != nil { - t.Fatalf("controller.Open should succeed, but got error: %v", err) - } + require.NoError(t, err) keyspace := controller.Keyspace() - if keyspace != "test_keyspace" { - t.Fatalf("expect to get keyspace: 'test_keyspace', but got keyspace: '%s'", keyspace) - } + require.Equal(t, "test_keyspace", keyspace) sqls, err := controller.Read(ctx) - if err != nil { - t.Fatalf("controller.Read should succeed, but got error: %v", err) - } - if len(sqls) != 1 { - t.Fatalf("controller should only get one sql, but got: %v", sqls) - } - if sqls[0] != sql { - t.Fatalf("expect to get sql: '%s', but got: '%s'", sql, sqls[0]) - } + require.NoError(t, err) + require.Len(t, sqls, 1, "controller should only get one sql") + require.Equal(t, sql, sqls[0]) + defer controller.Close() + err = controller.OnReadSuccess(ctx) - if err != nil { - t.Fatalf("OnDataSourcerReadSuccess should succeed") - } - if !strings.Contains(response.Body.String(), "OnReadSuccess, sqls") { - t.Fatalf("controller.OnReadSuccess should write to http response") - } + require.NoError(t, err) + require.Contains(t, response.Body.String(), "OnReadSuccess, sqls", "controller.OnReadSuccess should write to http response") + errReadFail := fmt.Errorf("read fail") err = controller.OnReadFail(ctx, errReadFail) - if err != errReadFail { - t.Fatalf("should get error:%v, but get: %v", errReadFail, err) - } - - if !strings.Contains(response.Body.String(), "OnReadFail, error") { - t.Fatalf("controller.OnReadFail should write to http response") - } + require.ErrorIs(t, err, errReadFail) + require.Contains(t, response.Body.String(), "OnReadFail, error", "controller.OnReadFail should write to http response") err = controller.OnValidationSuccess(ctx) - if err != nil { - t.Fatalf("OnValidationSuccess should succeed") - } - - if !strings.Contains(response.Body.String(), "OnValidationSuccess, sqls") { - t.Fatalf("controller.OnValidationSuccess should write to http response") - } + require.NoError(t, err) + require.Contains(t, response.Body.String(), "OnValidationSuccess, sqls", "controller.OnValidationSuccess should write to http response") errValidationFail := fmt.Errorf("validation fail") err = controller.OnValidationFail(ctx, errValidationFail) - if err != errValidationFail { - t.Fatalf("should get error:%v, but get: %v", errValidationFail, err) - } - - if !strings.Contains(response.Body.String(), "OnValidationFail, error") { - t.Fatalf("controller.OnValidationFail should write to http response") - } + require.ErrorIs(t, err, errValidationFail) + require.Contains(t, response.Body.String(), "OnValidationFail, error", "controller.OnValidationFail should write to http response") err = controller.OnExecutorComplete(ctx, &ExecuteResult{}) - if err != nil { - t.Fatalf("OnExecutorComplete should succeed") - } - - if !strings.Contains(response.Body.String(), "Executor succeeds") { - t.Fatalf("controller.OnExecutorComplete should write to http response") - } + require.NoError(t, err) + require.Contains(t, response.Body.String(), "Executor succeeds", "controller.OnExecutorComplete should write to http response") } diff --git a/go/vt/servenv/buildinfo.go b/go/vt/servenv/buildinfo.go index 15e34217dae..d55e01d84c0 100644 --- a/go/vt/servenv/buildinfo.go +++ b/go/vt/servenv/buildinfo.go @@ -33,6 +33,7 @@ var ( buildTime = "" buildGitRev = "" buildGitBranch = "" + statsBuildVersion *stats.String jenkinsBuildNumberStr = "" // version registers the command line flag to expose build info. @@ -121,6 +122,8 @@ func init() { stats.NewString("BuildHost").Set(AppVersion.buildHost) stats.NewString("BuildUser").Set(AppVersion.buildUser) stats.NewGauge("BuildTimestamp", "build timestamp").Set(AppVersion.buildTime) + statsBuildVersion = stats.NewString("BuildVersion") + statsBuildVersion.Set(AppVersion.version) stats.NewString("BuildGitRev").Set(AppVersion.buildGitRev) stats.NewString("BuildGitBranch").Set(AppVersion.buildGitBranch) stats.NewGauge("BuildNumber", "build number").Set(AppVersion.jenkinsBuildNumber) diff --git a/go/vt/servenv/buildinfo_test.go b/go/vt/servenv/buildinfo_test.go index be35511a036..bc972df03ea 100644 --- a/go/vt/servenv/buildinfo_test.go +++ b/go/vt/servenv/buildinfo_test.go @@ -47,3 +47,8 @@ func TestVersionString(t *testing.T) { assert.Equal(t, "8.0.30-Vitess", v.MySQLVersion()) } + +func TestBuildVersionStats(t *testing.T) { + buildVersion := statsBuildVersion.Get() + assert.Equal(t, buildVersion, versionName) +} diff --git a/go/vt/servenv/exporter.go b/go/vt/servenv/exporter.go index a3d23dc4b74..7adb3e18f2c 100644 --- a/go/vt/servenv/exporter.go +++ b/go/vt/servenv/exporter.go @@ -19,6 +19,7 @@ package servenv import ( "expvar" "net/http" + "net/url" "sync" "time" @@ -150,9 +151,10 @@ func (e *Exporter) URLPrefix() string { // There are two other places where this logic is duplicated: // status.go and go/vt/vtgate/discovery/healthcheck.go. if e.name == "" { - return e.name + return "" } - return "/" + e.name + prefix, _ := url.JoinPath("/", e.name) + return prefix } // HandleFunc sets or overwrites the handler for url. If Exporter has a name, diff --git a/go/vt/servenv/grpc_server.go b/go/vt/servenv/grpc_server.go index 7a41cca389a..96fe3c25ea9 100644 --- a/go/vt/servenv/grpc_server.go +++ b/go/vt/servenv/grpc_server.go @@ -99,6 +99,9 @@ var ( // even when there are no active streams (RPCs). If false, and client sends ping when // there are no active streams, server will send GOAWAY and close the connection. gRPCKeepAliveEnforcementPolicyPermitWithoutStream bool + + gRPCKeepaliveTime = 10 * time.Second + gRPCKeepaliveTimeout = 10 * time.Second ) // TLS variables. @@ -141,6 +144,8 @@ func RegisterGRPCServerFlags() { fs.StringVar(&gRPCCRL, "grpc_crl", gRPCCRL, "path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake") fs.BoolVar(&gRPCEnableOptionalTLS, "grpc_enable_optional_tls", gRPCEnableOptionalTLS, "enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port") fs.StringVar(&gRPCServerCA, "grpc_server_ca", gRPCServerCA, "path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients") + fs.DurationVar(&gRPCKeepaliveTime, "grpc_server_keepalive_time", gRPCKeepaliveTime, "After a duration of this time, if the server doesn't see any activity, it pings the client to see if the transport is still alive.") + fs.DurationVar(&gRPCKeepaliveTimeout, "grpc_server_keepalive_timeout", gRPCKeepaliveTimeout, "After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that the connection is closed.") }) } @@ -233,6 +238,8 @@ func createGRPCServer() { ka := keepalive.ServerParameters{ MaxConnectionAge: gRPCMaxConnectionAge, MaxConnectionAgeGrace: gRPCMaxConnectionAgeGrace, + Time: gRPCKeepaliveTime, + Timeout: gRPCKeepaliveTimeout, } opts = append(opts, grpc.KeepaliveParams(ka)) diff --git a/go/vt/servenv/http.go b/go/vt/servenv/http.go index f4b001383d1..c4b14e9b4e6 100644 --- a/go/vt/servenv/http.go +++ b/go/vt/servenv/http.go @@ -46,6 +46,10 @@ func HTTPServe(l net.Listener) error { // HTTPRegisterProfile registers the default pprof HTTP endpoints with the internal servenv mux. func HTTPRegisterProfile() { + if !httpPprof { + return + } + HTTPHandleFunc("/debug/pprof/", pprof.Index) HTTPHandleFunc("/debug/pprof/cmdline", pprof.Cmdline) HTTPHandleFunc("/debug/pprof/profile", pprof.Profile) diff --git a/go/vt/servenv/mysql.go b/go/vt/servenv/mysql.go index 94019a1c42c..c0af2a7ee39 100644 --- a/go/vt/servenv/mysql.go +++ b/go/vt/servenv/mysql.go @@ -17,13 +17,17 @@ limitations under the License. package servenv import ( + "fmt" + "github.com/spf13/pflag" + + "vitess.io/vitess/go/mysql/config" ) // mySQLServerVersion is what Vitess will present as it's version during the connection handshake, // and as the value to the @@version system variable. If nothing is provided, Vitess will report itself as // a specific MySQL version with the vitess version appended to it -var mySQLServerVersion = "8.0.30-Vitess" +var mySQLServerVersion = fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion) // RegisterMySQLServerFlags installs the flags needed to specify or expose a // particular MySQL server version from Vitess. @@ -36,14 +40,6 @@ func MySQLServerVersion() string { return mySQLServerVersion } -// SetMySQLServerVersionForTest sets the value of the `--mysql_server_version` -// flag. It is intended for use in tests that require a specific MySQL server -// version (for example, collations) that cannot specify that via the command -// line. -func SetMySQLServerVersionForTest(version string) { - mySQLServerVersion = version -} - func init() { for _, cmd := range []string{ "mysqlctl", @@ -51,6 +47,7 @@ func init() { "vtbackup", "vtcombo", "vtctl", + "vtctld", "vtctldclient", "vtexplain", "vtgate", diff --git a/go/vt/servenv/pprof.go b/go/vt/servenv/pprof.go index d1d8e99588f..957c0504c00 100644 --- a/go/vt/servenv/pprof.go +++ b/go/vt/servenv/pprof.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "os" - "os/signal" "path/filepath" "runtime" "runtime/pprof" @@ -28,7 +27,6 @@ import ( "strconv" "strings" "sync/atomic" - "syscall" "github.com/spf13/pflag" @@ -37,6 +35,7 @@ import ( var ( pprofFlag []string + httpPprof bool ) type profmode string @@ -298,47 +297,9 @@ func (prof *profile) init() (start func(), stop func()) { } } -func pprofInit() { - prof, err := parseProfileFlag(pprofFlag) - if err != nil { - log.Fatal(err) - } - if prof != nil { - start, stop := prof.init() - startSignal := make(chan os.Signal, 1) - stopSignal := make(chan os.Signal, 1) - - if prof.waitSig { - signal.Notify(startSignal, syscall.SIGUSR1) - } else { - start() - signal.Notify(stopSignal, syscall.SIGUSR1) - } - - go func() { - for { - <-startSignal - start() - signal.Reset(syscall.SIGUSR1) - signal.Notify(stopSignal, syscall.SIGUSR1) - } - }() - - go func() { - for { - <-stopSignal - stop() - signal.Reset(syscall.SIGUSR1) - signal.Notify(startSignal, syscall.SIGUSR1) - } - }() - - OnTerm(stop) - } -} - func init() { OnParse(func(fs *pflag.FlagSet) { + fs.BoolVar(&httpPprof, "pprof-http", httpPprof, "enable pprof http endpoints") fs.StringSliceVar(&pprofFlag, "pprof", pprofFlag, "enable profiling") }) OnInit(pprofInit) diff --git a/go/vt/servenv/pprof_unix.go b/go/vt/servenv/pprof_unix.go new file mode 100644 index 00000000000..097abc08720 --- /dev/null +++ b/go/vt/servenv/pprof_unix.go @@ -0,0 +1,66 @@ +//go:build !windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servenv + +import ( + "os" + "os/signal" + "syscall" + + "vitess.io/vitess/go/vt/log" +) + +func pprofInit() { + prof, err := parseProfileFlag(pprofFlag) + if err != nil { + log.Fatal(err) + } + if prof != nil { + start, stop := prof.init() + startSignal := make(chan os.Signal, 1) + stopSignal := make(chan os.Signal, 1) + + if prof.waitSig { + signal.Notify(startSignal, syscall.SIGUSR1) + } else { + start() + signal.Notify(stopSignal, syscall.SIGUSR1) + } + + go func() { + for { + <-startSignal + start() + signal.Reset(syscall.SIGUSR1) + signal.Notify(stopSignal, syscall.SIGUSR1) + } + }() + + go func() { + for { + <-stopSignal + stop() + signal.Reset(syscall.SIGUSR1) + signal.Notify(startSignal, syscall.SIGUSR1) + } + }() + + OnTerm(stop) + } +} diff --git a/go/vt/servenv/pprof_windows.go b/go/vt/servenv/pprof_windows.go new file mode 100644 index 00000000000..7ec4be816df --- /dev/null +++ b/go/vt/servenv/pprof_windows.go @@ -0,0 +1,27 @@ +//go:build windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servenv + +import ( + "vitess.io/vitess/go/vt/log" +) + +func pprofInit() { + log.Warningf("pprof is not supported on Windows") +} diff --git a/go/vt/servenv/run.go b/go/vt/servenv/run.go index 6f028786eaf..29b15a40008 100644 --- a/go/vt/servenv/run.go +++ b/go/vt/servenv/run.go @@ -62,18 +62,18 @@ func Run(bindAddress string, port int) { l.Close() startTime := time.Now() - log.Infof("Entering lameduck mode for at least %v", lameduckPeriod) + log.Infof("Entering lameduck mode for at least %v", timeouts.LameduckPeriod) log.Infof("Firing asynchronous OnTerm hooks") go onTermHooks.Fire() - fireOnTermSyncHooks(onTermTimeout) - if remain := lameduckPeriod - time.Since(startTime); remain > 0 { + fireOnTermSyncHooks(timeouts.OnTermTimeout) + if remain := timeouts.LameduckPeriod - time.Since(startTime); remain > 0 { log.Infof("Sleeping an extra %v after OnTermSync to finish lameduck period", remain) time.Sleep(remain) } log.Info("Shutting down gracefully") - fireOnCloseHooks(onCloseTimeout) + fireOnCloseHooks(timeouts.OnCloseTimeout) ListeningURL = url.URL{} } diff --git a/go/vt/servenv/servenv.go b/go/vt/servenv/servenv.go index e7c28855997..4aa9818eb7d 100644 --- a/go/vt/servenv/servenv.go +++ b/go/vt/servenv/servenv.go @@ -33,11 +33,8 @@ import ( "fmt" "net/url" "os" - "os/signal" - "runtime/debug" "strings" "sync" - "syscall" "time" "github.com/spf13/cobra" @@ -78,24 +75,33 @@ var ( // Flags specific to Init, Run, and RunDefault functions. var ( - lameduckPeriod = 50 * time.Millisecond - onTermTimeout = 10 * time.Second - onCloseTimeout = 10 * time.Second catchSigpipe bool maxStackSize = 64 * 1024 * 1024 initStartTime time.Time // time when tablet init started: for debug purposes to time how long a tablet init takes tableRefreshInterval int ) +type TimeoutFlags struct { + LameduckPeriod time.Duration + OnTermTimeout time.Duration + OnCloseTimeout time.Duration +} + +var timeouts = &TimeoutFlags{ + LameduckPeriod: 50 * time.Millisecond, + OnTermTimeout: 10 * time.Second, + OnCloseTimeout: 10 * time.Second, +} + // RegisterFlags installs the flags used by Init, Run, and RunDefault. // // This must be called before servenv.ParseFlags if using any of those // functions. func RegisterFlags() { OnParse(func(fs *pflag.FlagSet) { - fs.DurationVar(&lameduckPeriod, "lameduck-period", lameduckPeriod, "keep running at least this long after SIGTERM before stopping") - fs.DurationVar(&onTermTimeout, "onterm_timeout", onTermTimeout, "wait no more than this for OnTermSync handlers before stopping") - fs.DurationVar(&onCloseTimeout, "onclose_timeout", onCloseTimeout, "wait no more than this for OnClose handlers before stopping") + fs.DurationVar(&timeouts.LameduckPeriod, "lameduck-period", timeouts.LameduckPeriod, "keep running at least this long after SIGTERM before stopping") + fs.DurationVar(&timeouts.OnTermTimeout, "onterm_timeout", timeouts.OnTermTimeout, "wait no more than this for OnTermSync handlers before stopping") + fs.DurationVar(&timeouts.OnCloseTimeout, "onclose_timeout", timeouts.OnCloseTimeout, "wait no more than this for OnClose handlers before stopping") fs.BoolVar(&catchSigpipe, "catch-sigpipe", catchSigpipe, "catch and ignore SIGPIPE on stdout and stderr if specified") fs.IntVar(&maxStackSize, "max-stack-size", maxStackSize, "configure the maximum stack size in bytes") fs.IntVar(&tableRefreshInterval, "table-refresh-interval", tableRefreshInterval, "interval in milliseconds to refresh tables in status page with refreshRequired class") @@ -105,67 +111,26 @@ func RegisterFlags() { }) } -func GetInitStartTime() time.Time { - mu.Lock() - defer mu.Unlock() - return initStartTime -} +func RegisterFlagsWithTimeouts(tf *TimeoutFlags) { + OnParse(func(fs *pflag.FlagSet) { + fs.DurationVar(&tf.LameduckPeriod, "lameduck-period", tf.LameduckPeriod, "keep running at least this long after SIGTERM before stopping") + fs.DurationVar(&tf.OnTermTimeout, "onterm_timeout", tf.OnTermTimeout, "wait no more than this for OnTermSync handlers before stopping") + fs.DurationVar(&tf.OnCloseTimeout, "onclose_timeout", tf.OnCloseTimeout, "wait no more than this for OnClose handlers before stopping") + fs.BoolVar(&catchSigpipe, "catch-sigpipe", catchSigpipe, "catch and ignore SIGPIPE on stdout and stderr if specified") + fs.IntVar(&maxStackSize, "max-stack-size", maxStackSize, "configure the maximum stack size in bytes") + fs.IntVar(&tableRefreshInterval, "table-refresh-interval", tableRefreshInterval, "interval in milliseconds to refresh tables in status page with refreshRequired class") -// Init is the first phase of the server startup. -func Init() { - mu.Lock() - defer mu.Unlock() - initStartTime = time.Now() + // pid_file.go + fs.StringVar(&pidFile, "pid_file", pidFile, "If set, the process will write its pid to the named file, and delete it on graceful shutdown.") - // Uptime metric - _ = stats.NewGaugeFunc("Uptime", "Uptime in nanoseconds", func() int64 { - return int64(time.Since(serverStart).Nanoseconds()) + timeouts = tf }) +} - // Ignore SIGPIPE if specified - // The Go runtime catches SIGPIPE for us on all fds except stdout/stderr - // See https://golang.org/pkg/os/signal/#hdr-SIGPIPE - if catchSigpipe { - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGPIPE) - go func() { - <-sigChan - log.Warning("Caught SIGPIPE (ignoring all future SIGPIPEs)") - signal.Ignore(syscall.SIGPIPE) - }() - } - - // Add version tag to every info log - log.Infof(AppVersion.String()) - if inited { - log.Fatal("servenv.Init called second time") - } - inited = true - - // Once you run as root, you pretty much destroy the chances of a - // non-privileged user starting the program correctly. - if uid := os.Getuid(); uid == 0 { - log.Exitf("servenv.Init: running this as root makes no sense") - } - - // We used to set this limit directly, but you pretty much have to - // use a root account to allow increasing a limit reliably. Dropping - // privileges is also tricky. The best strategy is to make a shell - // script set up the limits as root and switch users before starting - // the server. - fdLimit := &syscall.Rlimit{} - if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, fdLimit); err != nil { - log.Errorf("max-open-fds failed: %v", err) - } - fdl := stats.NewGauge("MaxFds", "File descriptor limit") - fdl.Set(int64(fdLimit.Cur)) - - // Limit the stack size. We don't need huge stacks and smaller limits mean - // any infinite recursion fires earlier and on low memory systems avoids - // out of memory issues in favor of a stack overflow error. - debug.SetMaxStack(maxStackSize) - - onInitHooks.Fire() +func GetInitStartTime() time.Time { + mu.Lock() + defer mu.Unlock() + return initStartTime } func populateListeningURL(port int32) { diff --git a/go/vt/servenv/servenv_unix.go b/go/vt/servenv/servenv_unix.go new file mode 100644 index 00000000000..17fa85c4167 --- /dev/null +++ b/go/vt/servenv/servenv_unix.go @@ -0,0 +1,87 @@ +//go:build !windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servenv + +import ( + "os" + "os/signal" + "runtime/debug" + "syscall" + "time" + + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/log" +) + +// Init is the first phase of the server startup. +func Init() { + mu.Lock() + defer mu.Unlock() + initStartTime = time.Now() + + // Uptime metric + _ = stats.NewGaugeFunc("Uptime", "Uptime in nanoseconds", func() int64 { + return int64(time.Since(serverStart).Nanoseconds()) + }) + + // Ignore SIGPIPE if specified + // The Go runtime catches SIGPIPE for us on all fds except stdout/stderr + // See https://golang.org/pkg/os/signal/#hdr-SIGPIPE + if catchSigpipe { + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGPIPE) + go func() { + <-sigChan + log.Warning("Caught SIGPIPE (ignoring all future SIGPIPEs)") + signal.Ignore(syscall.SIGPIPE) + }() + } + + // Add version tag to every info log + log.Infof(AppVersion.String()) + if inited { + log.Fatal("servenv.Init called second time") + } + inited = true + + // Once you run as root, you pretty much destroy the chances of a + // non-privileged user starting the program correctly. + if uid := os.Getuid(); uid == 0 { + log.Exitf("servenv.Init: running this as root makes no sense") + } + + // We used to set this limit directly, but you pretty much have to + // use a root account to allow increasing a limit reliably. Dropping + // privileges is also tricky. The best strategy is to make a shell + // script set up the limits as root and switch users before starting + // the server. + fdLimit := &syscall.Rlimit{} + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, fdLimit); err != nil { + log.Errorf("max-open-fds failed: %v", err) + } + fdl := stats.NewGauge("MaxFds", "File descriptor limit") + fdl.Set(int64(fdLimit.Cur)) + + // Limit the stack size. We don't need huge stacks and smaller limits mean + // any infinite recursion fires earlier and on low memory systems avoids + // out of memory issues in favor of a stack overflow error. + debug.SetMaxStack(maxStackSize) + + onInitHooks.Fire() +} diff --git a/go/vt/servenv/servenv_windows.go b/go/vt/servenv/servenv_windows.go new file mode 100644 index 00000000000..bd610b1f245 --- /dev/null +++ b/go/vt/servenv/servenv_windows.go @@ -0,0 +1,21 @@ +//go:build windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package servenv + +func Init() {} diff --git a/go/vt/servenv/status.go b/go/vt/servenv/status.go index ac912fd881e..422e6907a76 100644 --- a/go/vt/servenv/status.go +++ b/go/vt/servenv/status.go @@ -17,11 +17,11 @@ limitations under the License. package servenv import ( - "bytes" "fmt" "io" "net" "net/http" + "net/url" "os" "path/filepath" "runtime" @@ -172,7 +172,8 @@ func newStatusPage(name string) *statusPage { registerDebugBlockProfileRate() registerDebugMutexProfileFraction() } else { - HTTPHandleFunc("/"+name+StatusURLPath(), sp.statusHandler) + pat, _ := url.JoinPath("/", name, StatusURLPath()) + HTTPHandleFunc(pat, sp.statusHandler) } return sp } @@ -256,7 +257,7 @@ func (sp *statusPage) statusHandler(w http.ResponseWriter, r *http.Request) { } func (sp *statusPage) reparse(sections []section) (*template.Template, error) { - var buf bytes.Buffer + var buf strings.Builder io.WriteString(&buf, `{{define "status"}}`) io.WriteString(&buf, statusHTML) @@ -301,7 +302,7 @@ func registerDebugBlockProfileRate() { runtime.SetBlockProfileRate(rate) log.Infof("Set block profile rate to: %d", rate) w.Header().Set("Content-Type", "text/plain") - w.Write([]byte(message)) + io.WriteString(w, message) }) } @@ -329,7 +330,7 @@ func registerDebugMutexProfileFraction() { runtime.SetMutexProfileFraction(fraction) log.Infof("Set mutex profiling fraction to: %d", fraction) w.Header().Set("Content-Type", "text/plain") - w.Write([]byte(message)) + io.WriteString(w, message) }) } diff --git a/go/vt/servenv/truncate_query.go b/go/vt/servenv/truncate_query.go new file mode 100644 index 00000000000..fdb618c5c6a --- /dev/null +++ b/go/vt/servenv/truncate_query.go @@ -0,0 +1,34 @@ +package servenv + +import ( + "github.com/spf13/pflag" +) + +var ( + // TruncateUILen truncate queries in debug UIs to the given length. 0 means unlimited. + TruncateUILen = 512 + + // TruncateErrLen truncate queries in error logs to the given length. 0 means unlimited. + TruncateErrLen = 0 +) + +func registerQueryTruncationFlags(fs *pflag.FlagSet) { + fs.IntVar(&TruncateUILen, "sql-max-length-ui", TruncateUILen, "truncate queries in debug UIs to the given length (default 512)") + fs.IntVar(&TruncateErrLen, "sql-max-length-errors", TruncateErrLen, "truncate queries in error logs to the given length (default unlimited)") +} + +func init() { + for _, cmd := range []string{ + "vtgate", + "vttablet", + "vtcombo", + "vtctld", + "vtctl", + "vtexplain", + "vtbackup", + "vttestserver", + "vtbench", + } { + OnParseFor(cmd, registerQueryTruncationFlags) + } +} diff --git a/go/vt/servenv/version.go b/go/vt/servenv/version.go index 23bac777b4d..ca036720381 100644 --- a/go/vt/servenv/version.go +++ b/go/vt/servenv/version.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Vitess Authors. +Copyright 2024 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,9 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES -// DO NOT EDIT - package servenv -const versionName = "19.0.0-SNAPSHOT" +// DO NOT EDIT +// THIS FILE IS AUTO-GENERATED DURING NEW RELEASES BY THE VITESS-RELEASER + +const versionName = "20.0.0-SNAPSHOT" diff --git a/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql b/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql index 40fdeef2683..2926ec76f28 100644 --- a/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql +++ b/go/vt/sidecardb/schema/onlineddl/schema_migrations.sql @@ -71,6 +71,8 @@ CREATE TABLE IF NOT EXISTS schema_migrations `reviewed_timestamp` timestamp NULL DEFAULT NULL, `ready_to_complete_timestamp` timestamp NULL DEFAULT NULL, `removed_foreign_key_names` text NOT NULL, + `last_cutover_attempt_timestamp` timestamp NULL DEFAULT NULL, + `force_cutover` tinyint unsigned NOT NULL DEFAULT '0', PRIMARY KEY (`id`), UNIQUE KEY `uuid_idx` (`migration_uuid`), KEY `keyspace_shard_idx` (`keyspace`(64), `shard`(64)), diff --git a/go/vt/sidecardb/schema/schemaengine/tables.sql b/go/vt/sidecardb/schema/schemaengine/tables.sql index 00fd0194d67..3aadc7c9635 100644 --- a/go/vt/sidecardb/schema/schemaengine/tables.sql +++ b/go/vt/sidecardb/schema/schemaengine/tables.sql @@ -16,8 +16,8 @@ limitations under the License. CREATE TABLE IF NOT EXISTS tables ( - TABLE_SCHEMA varchar(64) NOT NULL, - TABLE_NAME varchar(64) NOT NULL, + TABLE_SCHEMA varchar(64) CHARACTER SET `utf8mb3` COLLATE `utf8mb3_bin` NOT NULL, + TABLE_NAME varchar(64) CHARACTER SET `utf8mb3` COLLATE `utf8mb3_bin` NOT NULL, CREATE_STATEMENT longtext, CREATE_TIME BIGINT, PRIMARY KEY (TABLE_SCHEMA, TABLE_NAME) diff --git a/go/vt/sidecardb/schema/schemaengine/udfs.sql b/go/vt/sidecardb/schema/schemaengine/udfs.sql new file mode 100644 index 00000000000..90c6143fbd6 --- /dev/null +++ b/go/vt/sidecardb/schema/schemaengine/udfs.sql @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +CREATE TABLE IF NOT EXISTS udfs +( + FUNCTION_NAME varchar(128) CHARACTER SET `utf8mb3` COLLATE `utf8mb3_bin` NOT NULL, + FUNCTION_RETURN_TYPE varchar(20) CHARACTER SET `utf8mb3` COLLATE `utf8mb3_bin` NOT NULL, + FUNCTION_TYPE varchar(20) CHARACTER SET `utf8mb3` COLLATE `utf8mb3_bin` NOT NULL, + PRIMARY KEY (FUNCTION_NAME) +) engine = InnoDB diff --git a/go/vt/sidecardb/schema/schemaengine/views.sql b/go/vt/sidecardb/schema/schemaengine/views.sql index 1fee077202f..dd242e6567f 100644 --- a/go/vt/sidecardb/schema/schemaengine/views.sql +++ b/go/vt/sidecardb/schema/schemaengine/views.sql @@ -16,8 +16,8 @@ limitations under the License. CREATE TABLE IF NOT EXISTS views ( - TABLE_SCHEMA varchar(64) NOT NULL, - TABLE_NAME varchar(64) NOT NULL, + TABLE_SCHEMA varchar(64) CHARACTER SET `utf8mb3` COLLATE `utf8mb3_bin` NOT NULL, + TABLE_NAME varchar(64) CHARACTER SET `utf8mb3` COLLATE `utf8mb3_bin` NOT NULL, CREATE_STATEMENT longtext, VIEW_DEFINITION longtext NOT NULL, PRIMARY KEY (TABLE_SCHEMA, TABLE_NAME) diff --git a/go/vt/sidecardb/schema/schematracker/schemacopy.sql b/go/vt/sidecardb/schema/schematracker/schemacopy.sql deleted file mode 100644 index 296bb34df14..00000000000 --- a/go/vt/sidecardb/schema/schematracker/schemacopy.sql +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -CREATE TABLE IF NOT EXISTS schemacopy -( - `table_schema` varchar(64) NOT NULL, - `table_name` varchar(64) NOT NULL, - `column_name` varchar(64) NOT NULL, - `ordinal_position` bigint unsigned NOT NULL, - `character_set_name` varchar(32) DEFAULT NULL, - `collation_name` varchar(32) DEFAULT NULL, - `data_type` varchar(64) NOT NULL, - `column_key` varchar(3) NOT NULL, - PRIMARY KEY (`table_schema`, `table_name`, `ordinal_position`) -) ENGINE = InnoDB diff --git a/go/vt/sidecardb/schema/vreplication/vreplication.sql b/go/vt/sidecardb/schema/vreplication/vreplication.sql index ce9badfd98f..8d2ec41d1a6 100644 --- a/go/vt/sidecardb/schema/vreplication/vreplication.sql +++ b/go/vt/sidecardb/schema/vreplication/vreplication.sql @@ -38,6 +38,12 @@ CREATE TABLE IF NOT EXISTS vreplication `component_throttled` varchar(255) NOT NULL DEFAULT '', `workflow_sub_type` int NOT NULL DEFAULT '0', `defer_secondary_keys` tinyint(1) NOT NULL DEFAULT '0', + /* + The options column is used to store any applicable additional attributes for a vreplication workflow. + Currently used for optional flag(s): + - `tenant-id`: used to specify the tenant id for a multi-tenant migration. (MoveTables only) + */ + `options` json NOT NULL, PRIMARY KEY (`id`), KEY `workflow_idx` (`workflow`(64)) ) ENGINE = InnoDB diff --git a/go/vt/sidecardb/sidecardb.go b/go/vt/sidecardb/sidecardb.go index 0bb64611607..0947355cd46 100644 --- a/go/vt/sidecardb/sidecardb.go +++ b/go/vt/sidecardb/sidecardb.go @@ -29,7 +29,10 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/history" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/mysql/fakesqldb" @@ -45,8 +48,9 @@ import ( ) const ( - sidecarDBExistsQuery = "select 'true' as 'dbexists' from information_schema.SCHEMATA where SCHEMA_NAME = %a" - showCreateTableQuery = "show create table %s.%s" + sidecarDBExistsQuery = "select 'true' as 'dbexists' from information_schema.SCHEMATA where SCHEMA_NAME = %a" + showCreateTableQuery = "show create table %s.%s" + sidecarCollationQuery = "select @@global.collation_server" maxDDLErrorHistoryLength = 100 @@ -113,8 +117,8 @@ func init() { })) } -func validateSchemaDefinition(name, schema string) (string, error) { - stmt, err := sqlparser.ParseStrictDDL(schema) +func validateSchemaDefinition(name, schema string, parser *sqlparser.Parser) (string, error) { + stmt, err := parser.ParseStrictDDL(schema) if err != nil { return "", err @@ -142,7 +146,7 @@ func validateSchemaDefinition(name, schema string) (string, error) { // loadSchemaDefinitions loads the embedded schema definitions // into a slice of sidecarTables for processing. -func loadSchemaDefinitions() { +func loadSchemaDefinitions(parser *sqlparser.Parser) { sqlFileExtension := ".sql" err := fs.WalkDir(schemaLocation, ".", func(path string, entry fs.DirEntry, err error) error { if err != nil { @@ -171,7 +175,7 @@ func loadSchemaDefinitions() { panic(err) } var normalizedSchema string - if normalizedSchema, err = validateSchemaDefinition(name, string(schema)); err != nil { + if normalizedSchema, err = validateSchemaDefinition(name, string(schema), parser); err != nil { return err } sidecarTables = append(sidecarTables, &sidecarTable{name: name, module: module, path: path, schema: normalizedSchema}) @@ -194,8 +198,10 @@ func printCallerDetails() { type schemaInit struct { ctx context.Context + env *vtenv.Environment exec Exec dbCreated bool // The first upgrade/create query will also create the sidecar database if required. + coll collations.ID } // Exec is a callback that has to be passed to Init() to @@ -227,15 +233,18 @@ func getDDLErrorHistory() []*ddlError { // Init creates or upgrades the sidecar database based on // the declarative schema defined for all tables. -func Init(ctx context.Context, exec Exec) error { +func Init(ctx context.Context, env *vtenv.Environment, exec Exec) error { printCallerDetails() // for debug purposes only, remove in v17 log.Infof("Starting sidecardb.Init()") - once.Do(loadSchemaDefinitions) + once.Do(func() { + loadSchemaDefinitions(env.Parser()) + }) si := &schemaInit{ ctx: ctx, exec: exec, + env: env, } // There are paths in the tablet initialization where we @@ -264,6 +273,10 @@ func Init(ctx context.Context, exec Exec) error { } defer resetSQLMode() + if si.coll, err = si.collation(); err != nil { + return err + } + for _, table := range sidecarTables { if err := si.ensureSchema(table); err != nil { return err @@ -337,6 +350,22 @@ func (si *schemaInit) setCurrentDatabase(dbName string) error { return err } +func (si *schemaInit) collation() (collations.ID, error) { + rs, err := si.exec(si.ctx, sidecarCollationQuery, 2, false) + if err != nil { + log.Error(err) + return collations.Unknown, err + } + + switch len(rs.Rows) { + case 1: + return si.env.CollationEnv().LookupByName(rs.Rows[0][0].ToString()), nil + default: + // This should never happen. + return collations.Unknown, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid results for SidecarDB query %q as it produced %d rows", sidecarCollationQuery, len(rs.Rows)) + } +} + // Gets existing schema of a table in the sidecar database. func (si *schemaInit) getCurrentSchema(tableName string) (string, error) { var currentTableSchema string @@ -361,7 +390,7 @@ func (si *schemaInit) getCurrentSchema(tableName string) (string, error) { } // findTableSchemaDiff gets the diff which needs to be applied -// to the current table schema in order toreach the desired one. +// to the current table schema in order to reach the desired one. // The result will be an empty string if they match. // This will be a CREATE statement if the table does not exist // or an ALTER if the table exists but has a different schema. @@ -370,7 +399,8 @@ func (si *schemaInit) findTableSchemaDiff(tableName, current, desired string) (s TableCharsetCollateStrategy: schemadiff.TableCharsetCollateIgnoreAlways, AlterTableAlgorithmStrategy: schemadiff.AlterTableAlgorithmStrategyCopy, } - diff, err := schemadiff.DiffCreateTablesQueries(current, desired, hints) + env := schemadiff.NewEnv(si.env, si.coll) + diff, err := schemadiff.DiffCreateTablesQueries(env, current, desired, hints) if err != nil { return "", err } @@ -458,8 +488,10 @@ func (t *sidecarTable) String() string { // AddSchemaInitQueries adds sidecar database schema related // queries to a mock db. // This is for unit tests only! -func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { - once.Do(loadSchemaDefinitions) +func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool, parser *sqlparser.Parser) { + once.Do(func() { + loadSchemaDefinitions(parser) + }) result := &sqltypes.Result{} for _, q := range sidecar.DBInitQueryPatterns { db.AddQueryPattern(q, result) @@ -485,10 +517,15 @@ func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { sqlModeResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "sql_mode", "varchar"), - "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION", + config.DefaultSQLMode, ) db.AddQuery("select @@session.sql_mode as sql_mode", sqlModeResult) - + collationResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "@@global.collation_server ", + "varchar"), + "utf8mb4_0900_ai_ci", + ) + db.AddQuery("select @@global.collation_server", collationResult) db.AddQuery("set @@session.sql_mode=''", &sqltypes.Result{}) } diff --git a/go/vt/sidecardb/sidecardb_test.go b/go/vt/sidecardb/sidecardb_test.go index 22147c960e9..55c2c6cd6b5 100644 --- a/go/vt/sidecardb/sidecardb_test.go +++ b/go/vt/sidecardb/sidecardb_test.go @@ -25,7 +25,9 @@ import ( "testing" "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/require" @@ -41,12 +43,13 @@ func TestInitErrors(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - AddSchemaInitQueries(db, false) + env := vtenv.NewTestEnv() + AddSchemaInitQueries(db, false, env.Parser()) ddlErrorCount.Set(0) ddlCount.Set(0) - cp := db.ConnParams() + cp := dbconfigs.New(db.ConnParams()) conn, err := cp.Connect(ctx) require.NoError(t, err) @@ -69,7 +72,7 @@ func TestInitErrors(t *testing.T) { } // simulate errors for the table creation DDLs applied for tables specified in schemaErrors - stmt, err := sqlparser.Parse(query) + stmt, err := env.Parser().Parse(query) if err != nil { return nil, err } @@ -85,7 +88,7 @@ func TestInitErrors(t *testing.T) { } require.Equal(t, int64(0), getDDLCount()) - err = Init(ctx, exec) + err = Init(ctx, env, exec) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)-len(schemaErrors)), getDDLCount()) require.Equal(t, int64(len(schemaErrors)), getDDLErrorCount()) @@ -124,11 +127,12 @@ func TestMiscSidecarDB(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - AddSchemaInitQueries(db, false) + env := vtenv.NewTestEnv() + AddSchemaInitQueries(db, false, env.Parser()) db.AddQuery("use dbname", &sqltypes.Result{}) db.AddQueryPattern("set @@session.sql_mode=.*", &sqltypes.Result{}) - cp := db.ConnParams() + cp := dbconfigs.New(db.ConnParams()) conn, err := cp.Connect(ctx) require.NoError(t, err) exec := func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { @@ -149,22 +153,22 @@ func TestMiscSidecarDB(t *testing.T) { require.NoError(t, err) db.AddQuery(dbeq, result) db.AddQuery(sidecar.GetCreateQuery(), &sqltypes.Result{}) - AddSchemaInitQueries(db, false) + AddSchemaInitQueries(db, false, env.Parser()) // tests init on empty db ddlErrorCount.Set(0) ddlCount.Set(0) require.Equal(t, int64(0), getDDLCount()) - err = Init(ctx, exec) + err = Init(ctx, env, exec) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)), getDDLCount()) // Include the table DDLs in the expected queries. // This causes them to NOT be created again. - AddSchemaInitQueries(db, true) + AddSchemaInitQueries(db, true, env.Parser()) // tests init on already inited db - err = Init(ctx, exec) + err = Init(ctx, env, exec) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)), getDDLCount()) @@ -172,6 +176,7 @@ func TestMiscSidecarDB(t *testing.T) { si := &schemaInit{ ctx: ctx, exec: exec, + env: env, } err = si.setCurrentDatabase(sidecar.GetIdentifier()) @@ -196,9 +201,10 @@ func TestValidateSchema(t *testing.T) { {"invalid table name", "t1", "create table if not exists t2(i int)", true}, {"qualifier", "t1", "create table if not exists vt_product.t1(i int)", true}, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { - _, err := validateSchemaDefinition(tc.name, tc.schema) + _, err := validateSchemaDefinition(tc.name, tc.schema, parser) if tc.mustError { require.Error(t, err) } else { @@ -220,13 +226,15 @@ func TestAlterTableAlgorithm(t *testing.T) { {"add column", "t1", "create table if not exists _vt.t1(i int)", "create table if not exists _vt.t1(i int, i1 int)"}, {"modify column", "t1", "create table if not exists _vt.t1(i int)", "create table if not exists _vt.t(i float)"}, } - si := &schemaInit{} + si := &schemaInit{ + env: vtenv.NewTestEnv(), + } copyAlgo := sqlparser.AlgorithmValue("COPY") for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { diff, err := si.findTableSchemaDiff(tc.tableName, tc.currentSchema, tc.desiredSchema) require.NoError(t, err) - stmt, err := sqlparser.Parse(diff) + stmt, err := si.env.Parser().Parse(diff) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go index b4015f7937b..ea0773d99cc 100644 --- a/go/vt/sqlparser/analyzer.go +++ b/go/vt/sqlparser/analyzer.go @@ -344,8 +344,8 @@ func IsDMLStatement(stmt Statement) bool { // TableFromStatement returns the qualified table name for the query. // This works only for select statements. -func TableFromStatement(sql string) (TableName, error) { - stmt, err := Parse(sql) +func (p *Parser) TableFromStatement(sql string) (TableName, error) { + stmt, err := p.Parse(sql) if err != nil { return TableName{}, err } diff --git a/go/vt/sqlparser/analyzer_test.go b/go/vt/sqlparser/analyzer_test.go index 9f6a451770e..0a2de52ef19 100644 --- a/go/vt/sqlparser/analyzer_test.go +++ b/go/vt/sqlparser/analyzer_test.go @@ -145,8 +145,9 @@ func TestSplitAndExpression(t *testing.T) { sql: "select * from t where (a = 1 and ((b = 1 and c = 1)))", out: []string{"a = 1", "b = 1", "c = 1"}, }} + parser := NewTestParser() for _, tcase := range testcases { - stmt, err := Parse(tcase.sql) + stmt, err := parser.Parse(tcase.sql) assert.NoError(t, err) var expr Expr if where := stmt.(*Select).Where; where != nil { @@ -259,9 +260,9 @@ func TestTableFromStatement(t *testing.T) { in: "bad query", out: "syntax error at position 4 near 'bad'", }} - + parser := NewTestParser() for _, tc := range testcases { - name, err := TableFromStatement(tc.in) + name, err := parser.TableFromStatement(tc.in) var got string if err != nil { got = err.Error() @@ -288,8 +289,9 @@ func TestGetTableName(t *testing.T) { out: "", }} + parser := NewTestParser() for _, tc := range testcases { - tree, err := Parse(tc.in) + tree, err := parser.Parse(tc.in) if err != nil { t.Error(err) continue diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index 1ff48b8be78..0026764970e 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -53,16 +53,17 @@ type ( Commented } + OrderAndLimit interface { + AddOrder(*Order) + SetLimit(*Limit) + } + // SelectStatement any SELECT statement. SelectStatement interface { Statement InsertRows + OrderAndLimit iSelectStatement() - AddOrder(*Order) - SetOrderBy(OrderBy) - GetOrderBy() OrderBy - GetLimit() *Limit - SetLimit(*Limit) GetLock() Lock SetLock(lock Lock) SetInto(into *SelectInto) @@ -72,6 +73,9 @@ type ( GetColumns() SelectExprs Commented IsDistinct() bool + GetOrderBy() OrderBy + SetOrderBy(OrderBy) + GetLimit() *Limit } // DDLStatement represents any DDL Statement @@ -265,7 +269,7 @@ type ( Comments *ParsedComments SelectExprs SelectExprs Where *Where - GroupBy GroupBy + GroupBy *GroupBy Having *Where Windows NamedWindows OrderBy OrderBy @@ -337,6 +341,7 @@ type ( Partitions Partitions Columns Columns Rows InsertRows + RowAlias *RowAlias OnDup OnDup } @@ -352,7 +357,7 @@ type ( With *With Comments *ParsedComments Ignore Ignore - TableExprs TableExprs + TableExprs []TableExpr Exprs UpdateExprs Where *Where OrderBy OrderBy @@ -365,8 +370,8 @@ type ( With *With Ignore Ignore Comments *ParsedComments + TableExprs []TableExpr Targets TableNames - TableExprs TableExprs Partitions Partitions Where *Where OrderBy OrderBy @@ -712,6 +717,10 @@ type ( IndexType int8 ) +var _ OrderAndLimit = (*Select)(nil) +var _ OrderAndLimit = (*Update)(nil) +var _ OrderAndLimit = (*Delete)(nil) + func (*Union) iStatement() {} func (*Select) iStatement() {} func (*Stream) iStatement() {} @@ -1807,15 +1816,15 @@ type ColumnType struct { Options *ColumnTypeOptions // Numeric field options - Length *Literal + Length *int Unsigned bool Zerofill bool - Scale *Literal + Scale *int // Text field options Charset ColumnCharset - // Enum values + // Enum and Set column definition values EnumValues []string } @@ -2308,8 +2317,9 @@ type ( // Argument represents bindvariable expression Argument struct { - Name string - Type sqltypes.Type + Name string + Type sqltypes.Type + Size, Scale int32 } // NullVal represents a NULL value. @@ -2398,7 +2408,7 @@ type ( FuncExpr struct { Qualifier IdentifierCS Name IdentifierCI - Exprs SelectExprs + Exprs Exprs } // ValuesFuncExpr represents a function call. @@ -2874,8 +2884,9 @@ type ( } Count struct { - Args Exprs - Distinct bool + Args Exprs + Distinct bool + OverClause *OverClause } CountStar struct { @@ -2906,66 +2917,81 @@ type ( // The solution we employed was to add a dummy field `_ bool` to the otherwise empty struct `CountStar`. // This ensures that each instance of `CountStar` is treated as a separate object, // even in the context of out semantic state which uses these objects as map keys. + OverClause *OverClause } Avg struct { - Arg Expr - Distinct bool + Arg Expr + Distinct bool + OverClause *OverClause } Max struct { - Arg Expr - Distinct bool + Arg Expr + Distinct bool + OverClause *OverClause } Min struct { - Arg Expr - Distinct bool + Arg Expr + Distinct bool + OverClause *OverClause } Sum struct { - Arg Expr - Distinct bool + Arg Expr + Distinct bool + OverClause *OverClause } BitAnd struct { - Arg Expr + Arg Expr + OverClause *OverClause } BitOr struct { - Arg Expr + Arg Expr + OverClause *OverClause } BitXor struct { - Arg Expr + Arg Expr + OverClause *OverClause } Std struct { - Arg Expr + Arg Expr + OverClause *OverClause } StdDev struct { - Arg Expr + Arg Expr + OverClause *OverClause } StdPop struct { - Arg Expr + Arg Expr + OverClause *OverClause } StdSamp struct { - Arg Expr + Arg Expr + OverClause *OverClause } VarPop struct { - Arg Expr + Arg Expr + OverClause *OverClause } VarSamp struct { - Arg Expr + Arg Expr + OverClause *OverClause } Variance struct { - Arg Expr + Arg Expr + OverClause *OverClause } // GroupConcatExpr represents a call to GROUP_CONCAT @@ -3419,13 +3445,16 @@ func (ListArg) iColTuple() {} // ConvertType represents the type in call to CONVERT(expr, type) type ConvertType struct { Type string - Length *Literal - Scale *Literal + Length *int + Scale *int Charset ColumnCharset } // GroupBy represents a GROUP BY clause. -type GroupBy []Expr +type GroupBy struct { + Exprs []Expr + WithRollup bool +} // OrderBy represents an ORDER By clause. type OrderBy []*Order @@ -3468,6 +3497,11 @@ type SetExpr struct { // OnDup represents an ON DUPLICATE KEY clause. type OnDup UpdateExprs +type RowAlias struct { + TableName IdentifierCS + Columns Columns +} + // IdentifierCI is a case insensitive SQL identifier. It will be escaped with // backquotes if necessary. type IdentifierCI struct { diff --git a/go/vt/sqlparser/ast_clone.go b/go/vt/sqlparser/ast_clone.go index b29b4c90047..ab215fdf994 100644 --- a/go/vt/sqlparser/ast_clone.go +++ b/go/vt/sqlparser/ast_clone.go @@ -205,8 +205,8 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfGeomFromWKBExpr(in) case *GeomPropertyFuncExpr: return CloneRefOfGeomPropertyFuncExpr(in) - case GroupBy: - return CloneGroupBy(in) + case *GroupBy: + return CloneRefOfGroupBy(in) case *GroupConcatExpr: return CloneRefOfGroupConcatExpr(in) case IdentifierCI: @@ -423,6 +423,8 @@ func CloneSQLNode(in SQLNode) SQLNode { return CloneRefOfRollback(in) case RootNode: return CloneRootNode(in) + case *RowAlias: + return CloneRefOfRowAlias(in) case *SRollback: return CloneRefOfSRollback(in) case *Savepoint: @@ -799,6 +801,7 @@ func CloneRefOfAvg(n *Avg) *Avg { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -842,6 +845,7 @@ func CloneRefOfBitAnd(n *BitAnd) *BitAnd { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -852,6 +856,7 @@ func CloneRefOfBitOr(n *BitOr) *BitOr { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -862,6 +867,7 @@ func CloneRefOfBitXor(n *BitXor) *BitXor { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -964,8 +970,8 @@ func CloneRefOfColumnType(n *ColumnType) *ColumnType { } out := *n out.Options = CloneRefOfColumnTypeOptions(n.Options) - out.Length = CloneRefOfLiteral(n.Length) - out.Scale = CloneRefOfLiteral(n.Scale) + out.Length = CloneRefOfInt(n.Length) + out.Scale = CloneRefOfInt(n.Scale) out.Charset = CloneColumnCharset(n.Charset) out.EnumValues = CloneSliceOfString(n.EnumValues) return &out @@ -1054,8 +1060,8 @@ func CloneRefOfConvertType(n *ConvertType) *ConvertType { return nil } out := *n - out.Length = CloneRefOfLiteral(n.Length) - out.Scale = CloneRefOfLiteral(n.Scale) + out.Length = CloneRefOfInt(n.Length) + out.Scale = CloneRefOfInt(n.Scale) out.Charset = CloneColumnCharset(n.Charset) return &out } @@ -1077,6 +1083,7 @@ func CloneRefOfCount(n *Count) *Count { } out := *n out.Args = CloneExprs(n.Args) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -1086,6 +1093,7 @@ func CloneRefOfCountStar(n *CountStar) *CountStar { return nil } out := *n + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -1175,8 +1183,8 @@ func CloneRefOfDelete(n *Delete) *Delete { out := *n out.With = CloneRefOfWith(n.With) out.Comments = CloneRefOfParsedComments(n.Comments) + out.TableExprs = CloneSliceOfTableExpr(n.TableExprs) out.Targets = CloneTableNames(n.Targets) - out.TableExprs = CloneTableExprs(n.TableExprs) out.Partitions = ClonePartitions(n.Partitions) out.Where = CloneRefOfWhere(n.Where) out.OrderBy = CloneOrderBy(n.OrderBy) @@ -1405,7 +1413,7 @@ func CloneRefOfFuncExpr(n *FuncExpr) *FuncExpr { out := *n out.Qualifier = CloneIdentifierCS(n.Qualifier) out.Name = CloneIdentifierCI(n.Name) - out.Exprs = CloneSelectExprs(n.Exprs) + out.Exprs = CloneExprs(n.Exprs) return &out } @@ -1536,16 +1544,14 @@ func CloneRefOfGeomPropertyFuncExpr(n *GeomPropertyFuncExpr) *GeomPropertyFuncEx return &out } -// CloneGroupBy creates a deep clone of the input. -func CloneGroupBy(n GroupBy) GroupBy { +// CloneRefOfGroupBy creates a deep clone of the input. +func CloneRefOfGroupBy(n *GroupBy) *GroupBy { if n == nil { return nil } - res := make(GroupBy, len(n)) - for i, x := range n { - res[i] = CloneExpr(x) - } - return res + out := *n + out.Exprs = CloneSliceOfExpr(n.Exprs) + return &out } // CloneRefOfGroupConcatExpr creates a deep clone of the input. @@ -1626,6 +1632,7 @@ func CloneRefOfInsert(n *Insert) *Insert { out.Partitions = ClonePartitions(n.Partitions) out.Columns = CloneColumns(n.Columns) out.Rows = CloneInsertRows(n.Rows) + out.RowAlias = CloneRefOfRowAlias(n.RowAlias) out.OnDup = CloneOnDup(n.OnDup) return &out } @@ -2117,6 +2124,7 @@ func CloneRefOfMax(n *Max) *Max { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -2138,6 +2146,7 @@ func CloneRefOfMin(n *Min) *Min { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -2684,6 +2693,17 @@ func CloneRootNode(n RootNode) RootNode { return *CloneRefOfRootNode(&n) } +// CloneRefOfRowAlias creates a deep clone of the input. +func CloneRefOfRowAlias(n *RowAlias) *RowAlias { + if n == nil { + return nil + } + out := *n + out.TableName = CloneIdentifierCS(n.TableName) + out.Columns = CloneColumns(n.Columns) + return &out +} + // CloneRefOfSRollback creates a deep clone of the input. func CloneRefOfSRollback(n *SRollback) *SRollback { if n == nil { @@ -2716,7 +2736,7 @@ func CloneRefOfSelect(n *Select) *Select { out.Comments = CloneRefOfParsedComments(n.Comments) out.SelectExprs = CloneSelectExprs(n.SelectExprs) out.Where = CloneRefOfWhere(n.Where) - out.GroupBy = CloneGroupBy(n.GroupBy) + out.GroupBy = CloneRefOfGroupBy(n.GroupBy) out.Having = CloneRefOfWhere(n.Having) out.Windows = CloneNamedWindows(n.Windows) out.OrderBy = CloneOrderBy(n.OrderBy) @@ -2879,6 +2899,7 @@ func CloneRefOfStd(n *Std) *Std { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -2889,6 +2910,7 @@ func CloneRefOfStdDev(n *StdDev) *StdDev { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -2899,6 +2921,7 @@ func CloneRefOfStdPop(n *StdPop) *StdPop { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -2909,6 +2932,7 @@ func CloneRefOfStdSamp(n *StdSamp) *StdSamp { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -3002,6 +3026,7 @@ func CloneRefOfSum(n *Sum) *Sum { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -3143,7 +3168,7 @@ func CloneRefOfUpdate(n *Update) *Update { out := *n out.With = CloneRefOfWith(n.With) out.Comments = CloneRefOfParsedComments(n.Comments) - out.TableExprs = CloneTableExprs(n.TableExprs) + out.TableExprs = CloneSliceOfTableExpr(n.TableExprs) out.Exprs = CloneUpdateExprs(n.Exprs) out.Where = CloneRefOfWhere(n.Where) out.OrderBy = CloneOrderBy(n.OrderBy) @@ -3271,6 +3296,7 @@ func CloneRefOfVarPop(n *VarPop) *VarPop { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -3281,6 +3307,7 @@ func CloneRefOfVarSamp(n *VarSamp) *VarSamp { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -3301,6 +3328,7 @@ func CloneRefOfVariance(n *Variance) *Variance { } out := *n out.Arg = CloneExpr(n.Arg) + out.OverClause = CloneRefOfOverClause(n.OverClause) return &out } @@ -4334,6 +4362,15 @@ func CloneRefOfColumnTypeOptions(n *ColumnTypeOptions) *ColumnTypeOptions { return &out } +// CloneRefOfInt creates a deep clone of the input. +func CloneRefOfInt(n *int) *int { + if n == nil { + return nil + } + out := *n + return &out +} + // CloneColumnCharset creates a deep clone of the input. func CloneColumnCharset(n ColumnCharset) ColumnCharset { return *CloneRefOfColumnCharset(&n) @@ -4349,6 +4386,18 @@ func CloneSliceOfString(n []string) []string { return res } +// CloneSliceOfTableExpr creates a deep clone of the input. +func CloneSliceOfTableExpr(n []TableExpr) []TableExpr { + if n == nil { + return nil + } + res := make([]TableExpr, len(n)) + for i, x := range n { + res[i] = CloneTableExpr(x) + } + return res +} + // CloneSliceOfRefOfVariable creates a deep clone of the input. func CloneSliceOfRefOfVariable(n []*Variable) []*Variable { if n == nil { @@ -4361,6 +4410,18 @@ func CloneSliceOfRefOfVariable(n []*Variable) []*Variable { return res } +// CloneSliceOfExpr creates a deep clone of the input. +func CloneSliceOfExpr(n []Expr) []Expr { + if n == nil { + return nil + } + res := make([]Expr, len(n)) + for i, x := range n { + res[i] = CloneExpr(x) + } + return res +} + // CloneRefOfIdentifierCI creates a deep clone of the input. func CloneRefOfIdentifierCI(n *IdentifierCI) *IdentifierCI { if n == nil { @@ -4403,18 +4464,6 @@ func CloneSliceOfRefOfIndexOption(n []*IndexOption) []*IndexOption { return res } -// CloneSliceOfExpr creates a deep clone of the input. -func CloneSliceOfExpr(n []Expr) []Expr { - if n == nil { - return nil - } - res := make([]Expr, len(n)) - for i, x := range n { - res[i] = CloneExpr(x) - } - return res -} - // CloneSliceOfRefOfJSONObjectParam creates a deep clone of the input. func CloneSliceOfRefOfJSONObjectParam(n []*JSONObjectParam) []*JSONObjectParam { if n == nil { @@ -4510,15 +4559,6 @@ func CloneComments(n Comments) Comments { return res } -// CloneRefOfInt creates a deep clone of the input. -func CloneRefOfInt(n *int) *int { - if n == nil { - return nil - } - out := *n - return &out -} - // CloneSliceOfRefOfPartitionDefinition creates a deep clone of the input. func CloneSliceOfRefOfPartitionDefinition(n []*PartitionDefinition) []*PartitionDefinition { if n == nil { @@ -4553,18 +4593,6 @@ func CloneRefOfRootNode(n *RootNode) *RootNode { return &out } -// CloneSliceOfTableExpr creates a deep clone of the input. -func CloneSliceOfTableExpr(n []TableExpr) []TableExpr { - if n == nil { - return nil - } - res := make([]TableExpr, len(n)) - for i, x := range n { - res[i] = CloneTableExpr(x) - } - return res -} - // CloneRefOfTableName creates a deep clone of the input. func CloneRefOfTableName(n *TableName) *TableName { if n == nil { @@ -4666,7 +4694,7 @@ func CloneRefOfIndexColumn(n *IndexColumn) *IndexColumn { } out := *n out.Column = CloneIdentifierCI(n.Column) - out.Length = CloneRefOfLiteral(n.Length) + out.Length = CloneRefOfInt(n.Length) out.Expression = CloneExpr(n.Expression) return &out } diff --git a/go/vt/sqlparser/ast_copy_on_rewrite.go b/go/vt/sqlparser/ast_copy_on_rewrite.go index 86dda29ebcf..68eea685405 100644 --- a/go/vt/sqlparser/ast_copy_on_rewrite.go +++ b/go/vt/sqlparser/ast_copy_on_rewrite.go @@ -204,8 +204,8 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfGeomFromWKBExpr(n, parent) case *GeomPropertyFuncExpr: return c.copyOnRewriteRefOfGeomPropertyFuncExpr(n, parent) - case GroupBy: - return c.copyOnRewriteGroupBy(n, parent) + case *GroupBy: + return c.copyOnRewriteRefOfGroupBy(n, parent) case *GroupConcatExpr: return c.copyOnRewriteRefOfGroupConcatExpr(n, parent) case IdentifierCI: @@ -422,6 +422,8 @@ func (c *cow) copyOnRewriteSQLNode(n SQLNode, parent SQLNode) (out SQLNode, chan return c.copyOnRewriteRefOfRollback(n, parent) case RootNode: return c.copyOnRewriteRootNode(n, parent) + case *RowAlias: + return c.copyOnRewriteRefOfRowAlias(n, parent) case *SRollback: return c.copyOnRewriteRefOfSRollback(n, parent) case *Savepoint: @@ -1074,9 +1076,11 @@ func (c *cow) copyOnRewriteRefOfAvg(n *Avg, parent SQLNode) (out SQLNode, change out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -1158,9 +1162,11 @@ func (c *cow) copyOnRewriteRefOfBitAnd(n *BitAnd, parent SQLNode) (out SQLNode, out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -1180,9 +1186,11 @@ func (c *cow) copyOnRewriteRefOfBitOr(n *BitOr, parent SQLNode) (out SQLNode, ch out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -1202,9 +1210,11 @@ func (c *cow) copyOnRewriteRefOfBitXor(n *BitXor, parent SQLNode) (out SQLNode, out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -1445,18 +1455,6 @@ func (c *cow) copyOnRewriteRefOfColumnType(n *ColumnType, parent SQLNode) (out S } out = n if c.pre == nil || c.pre(n, parent) { - _Length, changedLength := c.copyOnRewriteRefOfLiteral(n.Length, n) - _Scale, changedScale := c.copyOnRewriteRefOfLiteral(n.Scale, n) - if changedLength || changedScale { - res := *n - res.Length, _ = _Length.(*Literal) - res.Scale, _ = _Scale.(*Literal) - out = &res - if c.cloned != nil { - c.cloned(n, out) - } - changed = true - } } if c.post != nil { out, changed = c.postVisit(out, parent, changed) @@ -1616,18 +1614,6 @@ func (c *cow) copyOnRewriteRefOfConvertType(n *ConvertType, parent SQLNode) (out } out = n if c.pre == nil || c.pre(n, parent) { - _Length, changedLength := c.copyOnRewriteRefOfLiteral(n.Length, n) - _Scale, changedScale := c.copyOnRewriteRefOfLiteral(n.Scale, n) - if changedLength || changedScale { - res := *n - res.Length, _ = _Length.(*Literal) - res.Scale, _ = _Scale.(*Literal) - out = &res - if c.cloned != nil { - c.cloned(n, out) - } - changed = true - } } if c.post != nil { out, changed = c.postVisit(out, parent, changed) @@ -1663,9 +1649,11 @@ func (c *cow) copyOnRewriteRefOfCount(n *Count, parent SQLNode) (out SQLNode, ch out = n if c.pre == nil || c.pre(n, parent) { _Args, changedArgs := c.copyOnRewriteExprs(n.Args, n) - if changedArgs { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArgs || changedOverClause { res := *n res.Args, _ = _Args.(Exprs) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -1684,6 +1672,16 @@ func (c *cow) copyOnRewriteRefOfCountStar(n *CountStar, parent SQLNode) (out SQL } out = n if c.pre == nil || c.pre(n, parent) { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedOverClause { + res := *n + res.OverClause, _ = _OverClause.(*OverClause) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } } if c.post != nil { out, changed = c.postVisit(out, parent, changed) @@ -1850,18 +1848,26 @@ func (c *cow) copyOnRewriteRefOfDelete(n *Delete, parent SQLNode) (out SQLNode, if c.pre == nil || c.pre(n, parent) { _With, changedWith := c.copyOnRewriteRefOfWith(n.With, n) _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n) + var changedTableExprs bool + _TableExprs := make([]TableExpr, len(n.TableExprs)) + for x, el := range n.TableExprs { + this, changed := c.copyOnRewriteTableExpr(el, n) + _TableExprs[x] = this.(TableExpr) + if changed { + changedTableExprs = true + } + } _Targets, changedTargets := c.copyOnRewriteTableNames(n.Targets, n) - _TableExprs, changedTableExprs := c.copyOnRewriteTableExprs(n.TableExprs, n) _Partitions, changedPartitions := c.copyOnRewritePartitions(n.Partitions, n) _Where, changedWhere := c.copyOnRewriteRefOfWhere(n.Where, n) _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n) _Limit, changedLimit := c.copyOnRewriteRefOfLimit(n.Limit, n) - if changedWith || changedComments || changedTargets || changedTableExprs || changedPartitions || changedWhere || changedOrderBy || changedLimit { + if changedWith || changedComments || changedTableExprs || changedTargets || changedPartitions || changedWhere || changedOrderBy || changedLimit { res := *n res.With, _ = _With.(*With) res.Comments, _ = _Comments.(*ParsedComments) + res.TableExprs = _TableExprs res.Targets, _ = _Targets.(TableNames) - res.TableExprs, _ = _TableExprs.(TableExprs) res.Partitions, _ = _Partitions.(Partitions) res.Where, _ = _Where.(*Where) res.OrderBy, _ = _OrderBy.(OrderBy) @@ -2339,12 +2345,12 @@ func (c *cow) copyOnRewriteRefOfFuncExpr(n *FuncExpr, parent SQLNode) (out SQLNo if c.pre == nil || c.pre(n, parent) { _Qualifier, changedQualifier := c.copyOnRewriteIdentifierCS(n.Qualifier, n) _Name, changedName := c.copyOnRewriteIdentifierCI(n.Name, n) - _Exprs, changedExprs := c.copyOnRewriteSelectExprs(n.Exprs, n) + _Exprs, changedExprs := c.copyOnRewriteExprs(n.Exprs, n) if changedQualifier || changedName || changedExprs { res := *n res.Qualifier, _ = _Qualifier.(IdentifierCS) res.Name, _ = _Name.(IdentifierCI) - res.Exprs, _ = _Exprs.(SelectExprs) + res.Exprs, _ = _Exprs.(Exprs) out = &res if c.cloned != nil { c.cloned(n, out) @@ -2633,22 +2639,29 @@ func (c *cow) copyOnRewriteRefOfGeomPropertyFuncExpr(n *GeomPropertyFuncExpr, pa } return } -func (c *cow) copyOnRewriteGroupBy(n GroupBy, parent SQLNode) (out SQLNode, changed bool) { +func (c *cow) copyOnRewriteRefOfGroupBy(n *GroupBy, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false } out = n if c.pre == nil || c.pre(n, parent) { - res := make(GroupBy, len(n)) - for x, el := range n { - this, change := c.copyOnRewriteExpr(el, n) - res[x] = this.(Expr) - if change { - changed = true + var changedExprs bool + _Exprs := make([]Expr, len(n.Exprs)) + for x, el := range n.Exprs { + this, changed := c.copyOnRewriteExpr(el, n) + _Exprs[x] = this.(Expr) + if changed { + changedExprs = true } } - if changed { - out = res + if changedExprs { + res := *n + res.Exprs = _Exprs + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true } } if c.post != nil { @@ -2810,14 +2823,16 @@ func (c *cow) copyOnRewriteRefOfInsert(n *Insert, parent SQLNode) (out SQLNode, _Partitions, changedPartitions := c.copyOnRewritePartitions(n.Partitions, n) _Columns, changedColumns := c.copyOnRewriteColumns(n.Columns, n) _Rows, changedRows := c.copyOnRewriteInsertRows(n.Rows, n) + _RowAlias, changedRowAlias := c.copyOnRewriteRefOfRowAlias(n.RowAlias, n) _OnDup, changedOnDup := c.copyOnRewriteOnDup(n.OnDup, n) - if changedComments || changedTable || changedPartitions || changedColumns || changedRows || changedOnDup { + if changedComments || changedTable || changedPartitions || changedColumns || changedRows || changedRowAlias || changedOnDup { res := *n res.Comments, _ = _Comments.(*ParsedComments) res.Table, _ = _Table.(*AliasedTableExpr) res.Partitions, _ = _Partitions.(Partitions) res.Columns, _ = _Columns.(Columns) res.Rows, _ = _Rows.(InsertRows) + res.RowAlias, _ = _RowAlias.(*RowAlias) res.OnDup, _ = _OnDup.(OnDup) out = &res if c.cloned != nil { @@ -3886,9 +3901,11 @@ func (c *cow) copyOnRewriteRefOfMax(n *Max, parent SQLNode) (out SQLNode, change out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -3932,9 +3949,11 @@ func (c *cow) copyOnRewriteRefOfMin(n *Min, parent SQLNode) (out SQLNode, change out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -5078,6 +5097,30 @@ func (c *cow) copyOnRewriteRootNode(n RootNode, parent SQLNode) (out SQLNode, ch } return } +func (c *cow) copyOnRewriteRefOfRowAlias(n *RowAlias, parent SQLNode) (out SQLNode, changed bool) { + if n == nil || c.cursor.stop { + return n, false + } + out = n + if c.pre == nil || c.pre(n, parent) { + _TableName, changedTableName := c.copyOnRewriteIdentifierCS(n.TableName, n) + _Columns, changedColumns := c.copyOnRewriteColumns(n.Columns, n) + if changedTableName || changedColumns { + res := *n + res.TableName, _ = _TableName.(IdentifierCS) + res.Columns, _ = _Columns.(Columns) + out = &res + if c.cloned != nil { + c.cloned(n, out) + } + changed = true + } + } + if c.post != nil { + out, changed = c.postVisit(out, parent, changed) + } + return +} func (c *cow) copyOnRewriteRefOfSRollback(n *SRollback, parent SQLNode) (out SQLNode, changed bool) { if n == nil || c.cursor.stop { return n, false @@ -5141,7 +5184,7 @@ func (c *cow) copyOnRewriteRefOfSelect(n *Select, parent SQLNode) (out SQLNode, _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n) _SelectExprs, changedSelectExprs := c.copyOnRewriteSelectExprs(n.SelectExprs, n) _Where, changedWhere := c.copyOnRewriteRefOfWhere(n.Where, n) - _GroupBy, changedGroupBy := c.copyOnRewriteGroupBy(n.GroupBy, n) + _GroupBy, changedGroupBy := c.copyOnRewriteRefOfGroupBy(n.GroupBy, n) _Having, changedHaving := c.copyOnRewriteRefOfWhere(n.Having, n) _Windows, changedWindows := c.copyOnRewriteNamedWindows(n.Windows, n) _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n) @@ -5154,7 +5197,7 @@ func (c *cow) copyOnRewriteRefOfSelect(n *Select, parent SQLNode) (out SQLNode, res.Comments, _ = _Comments.(*ParsedComments) res.SelectExprs, _ = _SelectExprs.(SelectExprs) res.Where, _ = _Where.(*Where) - res.GroupBy, _ = _GroupBy.(GroupBy) + res.GroupBy, _ = _GroupBy.(*GroupBy) res.Having, _ = _Having.(*Where) res.Windows, _ = _Windows.(NamedWindows) res.OrderBy, _ = _OrderBy.(OrderBy) @@ -5457,9 +5500,11 @@ func (c *cow) copyOnRewriteRefOfStd(n *Std, parent SQLNode) (out SQLNode, change out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -5479,9 +5524,11 @@ func (c *cow) copyOnRewriteRefOfStdDev(n *StdDev, parent SQLNode) (out SQLNode, out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -5501,9 +5548,11 @@ func (c *cow) copyOnRewriteRefOfStdPop(n *StdPop, parent SQLNode) (out SQLNode, out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -5523,9 +5572,11 @@ func (c *cow) copyOnRewriteRefOfStdSamp(n *StdSamp, parent SQLNode) (out SQLNode out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -5718,9 +5769,11 @@ func (c *cow) copyOnRewriteRefOfSum(n *Sum, parent SQLNode) (out SQLNode, change out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -6023,7 +6076,15 @@ func (c *cow) copyOnRewriteRefOfUpdate(n *Update, parent SQLNode) (out SQLNode, if c.pre == nil || c.pre(n, parent) { _With, changedWith := c.copyOnRewriteRefOfWith(n.With, n) _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n) - _TableExprs, changedTableExprs := c.copyOnRewriteTableExprs(n.TableExprs, n) + var changedTableExprs bool + _TableExprs := make([]TableExpr, len(n.TableExprs)) + for x, el := range n.TableExprs { + this, changed := c.copyOnRewriteTableExpr(el, n) + _TableExprs[x] = this.(TableExpr) + if changed { + changedTableExprs = true + } + } _Exprs, changedExprs := c.copyOnRewriteUpdateExprs(n.Exprs, n) _Where, changedWhere := c.copyOnRewriteRefOfWhere(n.Where, n) _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n) @@ -6032,7 +6093,7 @@ func (c *cow) copyOnRewriteRefOfUpdate(n *Update, parent SQLNode) (out SQLNode, res := *n res.With, _ = _With.(*With) res.Comments, _ = _Comments.(*ParsedComments) - res.TableExprs, _ = _TableExprs.(TableExprs) + res.TableExprs = _TableExprs res.Exprs, _ = _Exprs.(UpdateExprs) res.Where, _ = _Where.(*Where) res.OrderBy, _ = _OrderBy.(OrderBy) @@ -6285,9 +6346,11 @@ func (c *cow) copyOnRewriteRefOfVarPop(n *VarPop, parent SQLNode) (out SQLNode, out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -6307,9 +6370,11 @@ func (c *cow) copyOnRewriteRefOfVarSamp(n *VarSamp, parent SQLNode) (out SQLNode out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) @@ -6351,9 +6416,11 @@ func (c *cow) copyOnRewriteRefOfVariance(n *Variance, parent SQLNode) (out SQLNo out = n if c.pre == nil || c.pre(n, parent) { _Arg, changedArg := c.copyOnRewriteExpr(n.Arg, n) - if changedArg { + _OverClause, changedOverClause := c.copyOnRewriteRefOfOverClause(n.OverClause, n) + if changedArg || changedOverClause { res := *n res.Arg, _ = _Arg.(Expr) + res.OverClause, _ = _OverClause.(*OverClause) out = &res if c.cloned != nil { c.cloned(n, out) diff --git a/go/vt/sqlparser/ast_copy_on_rewrite_test.go b/go/vt/sqlparser/ast_copy_on_rewrite_test.go index 389b2a4bc29..bb2bd5b886e 100644 --- a/go/vt/sqlparser/ast_copy_on_rewrite_test.go +++ b/go/vt/sqlparser/ast_copy_on_rewrite_test.go @@ -24,8 +24,9 @@ import ( ) func TestCopyOnRewrite(t *testing.T) { + parser := NewTestParser() // rewrite an expression without changing the original - expr, err := ParseExpr("a = b") + expr, err := parser.ParseExpr("a = b") require.NoError(t, err) out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) { col, ok := cursor.Node().(*ColName) @@ -42,9 +43,10 @@ func TestCopyOnRewrite(t *testing.T) { } func TestCopyOnRewriteDeeper(t *testing.T) { + parser := NewTestParser() // rewrite an expression without changing the original. the changed happens deep in the syntax tree, // here we are testing that all ancestors up to the root are cloned correctly - expr, err := ParseExpr("a + b * c = 12") + expr, err := parser.ParseExpr("a + b * c = 12") require.NoError(t, err) var path []string out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) { @@ -72,8 +74,9 @@ func TestCopyOnRewriteDeeper(t *testing.T) { } func TestDontCopyWithoutRewrite(t *testing.T) { + parser := NewTestParser() // when no rewriting happens, we want the original back - expr, err := ParseExpr("a = b") + expr, err := parser.ParseExpr("a = b") require.NoError(t, err) out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) {}, nil) @@ -81,9 +84,10 @@ func TestDontCopyWithoutRewrite(t *testing.T) { } func TestStopTreeWalk(t *testing.T) { + parser := NewTestParser() // stop walking down part of the AST original := "a = b + c" - expr, err := ParseExpr(original) + expr, err := parser.ParseExpr(original) require.NoError(t, err) out := CopyOnRewrite(expr, func(node, parent SQLNode) bool { _, ok := node.(*BinaryExpr) @@ -102,9 +106,10 @@ func TestStopTreeWalk(t *testing.T) { } func TestStopTreeWalkButStillVisit(t *testing.T) { + parser := NewTestParser() // here we are asserting that even when we stop at the binary expression, we still visit it in the post visitor original := "1337 = b + c" - expr, err := ParseExpr(original) + expr, err := parser.ParseExpr(original) require.NoError(t, err) out := CopyOnRewrite(expr, func(node, parent SQLNode) bool { _, ok := node.(*BinaryExpr) diff --git a/go/vt/sqlparser/ast_equals.go b/go/vt/sqlparser/ast_equals.go index 9beed3a8242..386731a47ad 100644 --- a/go/vt/sqlparser/ast_equals.go +++ b/go/vt/sqlparser/ast_equals.go @@ -572,12 +572,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RefOfGeomPropertyFuncExpr(a, b) - case GroupBy: - b, ok := inB.(GroupBy) + case *GroupBy: + b, ok := inB.(*GroupBy) if !ok { return false } - return cmp.GroupBy(a, b) + return cmp.RefOfGroupBy(a, b) case *GroupConcatExpr: b, ok := inB.(*GroupConcatExpr) if !ok { @@ -1226,6 +1226,12 @@ func (cmp *Comparator) SQLNode(inA, inB SQLNode) bool { return false } return cmp.RootNode(a, b) + case *RowAlias: + b, ok := inB.(*RowAlias) + if !ok { + return false + } + return cmp.RefOfRowAlias(a, b) case *SRollback: b, ok := inB.(*SRollback) if !ok { @@ -1863,6 +1869,8 @@ func (cmp *Comparator) RefOfArgument(a, b *Argument) bool { return false } return a.Name == b.Name && + a.Size == b.Size && + a.Scale == b.Scale && a.Type == b.Type } @@ -1911,7 +1919,8 @@ func (cmp *Comparator) RefOfAvg(a, b *Avg) bool { return false } return a.Distinct == b.Distinct && - cmp.Expr(a.Arg, b.Arg) + cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfBegin does deep equals between the two objects. @@ -1960,7 +1969,8 @@ func (cmp *Comparator) RefOfBitAnd(a, b *BitAnd) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfBitOr does deep equals between the two objects. @@ -1971,7 +1981,8 @@ func (cmp *Comparator) RefOfBitOr(a, b *BitOr) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfBitXor does deep equals between the two objects. @@ -1982,7 +1993,8 @@ func (cmp *Comparator) RefOfBitXor(a, b *BitXor) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfCallProc does deep equals between the two objects. @@ -2112,8 +2124,8 @@ func (cmp *Comparator) RefOfColumnType(a, b *ColumnType) bool { a.Unsigned == b.Unsigned && a.Zerofill == b.Zerofill && cmp.RefOfColumnTypeOptions(a.Options, b.Options) && - cmp.RefOfLiteral(a.Length, b.Length) && - cmp.RefOfLiteral(a.Scale, b.Scale) && + cmp.RefOfInt(a.Length, b.Length) && + cmp.RefOfInt(a.Scale, b.Scale) && cmp.ColumnCharset(a.Charset, b.Charset) && cmp.SliceOfString(a.EnumValues, b.EnumValues) } @@ -2213,8 +2225,8 @@ func (cmp *Comparator) RefOfConvertType(a, b *ConvertType) bool { return false } return a.Type == b.Type && - cmp.RefOfLiteral(a.Length, b.Length) && - cmp.RefOfLiteral(a.Scale, b.Scale) && + cmp.RefOfInt(a.Length, b.Length) && + cmp.RefOfInt(a.Scale, b.Scale) && cmp.ColumnCharset(a.Charset, b.Charset) } @@ -2239,7 +2251,8 @@ func (cmp *Comparator) RefOfCount(a, b *Count) bool { return false } return a.Distinct == b.Distinct && - cmp.Exprs(a.Args, b.Args) + cmp.Exprs(a.Args, b.Args) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfCountStar does deep equals between the two objects. @@ -2250,7 +2263,7 @@ func (cmp *Comparator) RefOfCountStar(a, b *CountStar) bool { if a == nil || b == nil { return false } - return true + return cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfCreateDatabase does deep equals between the two objects. @@ -2362,8 +2375,8 @@ func (cmp *Comparator) RefOfDelete(a, b *Delete) bool { return cmp.RefOfWith(a.With, b.With) && a.Ignore == b.Ignore && cmp.RefOfParsedComments(a.Comments, b.Comments) && + cmp.SliceOfTableExpr(a.TableExprs, b.TableExprs) && cmp.TableNames(a.Targets, b.Targets) && - cmp.TableExprs(a.TableExprs, b.TableExprs) && cmp.Partitions(a.Partitions, b.Partitions) && cmp.RefOfWhere(a.Where, b.Where) && cmp.OrderBy(a.OrderBy, b.OrderBy) && @@ -2631,7 +2644,7 @@ func (cmp *Comparator) RefOfFuncExpr(a, b *FuncExpr) bool { } return cmp.IdentifierCS(a.Qualifier, b.Qualifier) && cmp.IdentifierCI(a.Name, b.Name) && - cmp.SelectExprs(a.Exprs, b.Exprs) + cmp.Exprs(a.Exprs, b.Exprs) } // RefOfGTIDFuncExpr does deep equals between the two objects. @@ -2779,17 +2792,16 @@ func (cmp *Comparator) RefOfGeomPropertyFuncExpr(a, b *GeomPropertyFuncExpr) boo cmp.Expr(a.Geom, b.Geom) } -// GroupBy does deep equals between the two objects. -func (cmp *Comparator) GroupBy(a, b GroupBy) bool { - if len(a) != len(b) { - return false +// RefOfGroupBy does deep equals between the two objects. +func (cmp *Comparator) RefOfGroupBy(a, b *GroupBy) bool { + if a == b { + return true } - for i := 0; i < len(a); i++ { - if !cmp.Expr(a[i], b[i]) { - return false - } + if a == nil || b == nil { + return false } - return true + return a.WithRollup == b.WithRollup && + cmp.SliceOfExpr(a.Exprs, b.Exprs) } // RefOfGroupConcatExpr does deep equals between the two objects. @@ -2885,6 +2897,7 @@ func (cmp *Comparator) RefOfInsert(a, b *Insert) bool { cmp.Partitions(a.Partitions, b.Partitions) && cmp.Columns(a.Columns, b.Columns) && cmp.InsertRows(a.Rows, b.Rows) && + cmp.RefOfRowAlias(a.RowAlias, b.RowAlias) && cmp.OnDup(a.OnDup, b.OnDup) } @@ -3441,7 +3454,8 @@ func (cmp *Comparator) RefOfMax(a, b *Max) bool { return false } return a.Distinct == b.Distinct && - cmp.Expr(a.Arg, b.Arg) + cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfMemberOfExpr does deep equals between the two objects. @@ -3465,7 +3479,8 @@ func (cmp *Comparator) RefOfMin(a, b *Min) bool { return false } return a.Distinct == b.Distinct && - cmp.Expr(a.Arg, b.Arg) + cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfModifyColumn does deep equals between the two objects. @@ -4088,6 +4103,18 @@ func (cmp *Comparator) RootNode(a, b RootNode) bool { return cmp.SQLNode(a.SQLNode, b.SQLNode) } +// RefOfRowAlias does deep equals between the two objects. +func (cmp *Comparator) RefOfRowAlias(a, b *RowAlias) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return cmp.IdentifierCS(a.TableName, b.TableName) && + cmp.Columns(a.Columns, b.Columns) +} + // RefOfSRollback does deep equals between the two objects. func (cmp *Comparator) RefOfSRollback(a, b *SRollback) bool { if a == b { @@ -4127,7 +4154,7 @@ func (cmp *Comparator) RefOfSelect(a, b *Select) bool { cmp.RefOfParsedComments(a.Comments, b.Comments) && cmp.SelectExprs(a.SelectExprs, b.SelectExprs) && cmp.RefOfWhere(a.Where, b.Where) && - cmp.GroupBy(a.GroupBy, b.GroupBy) && + cmp.RefOfGroupBy(a.GroupBy, b.GroupBy) && cmp.RefOfWhere(a.Having, b.Having) && cmp.NamedWindows(a.Windows, b.Windows) && cmp.OrderBy(a.OrderBy, b.OrderBy) && @@ -4317,7 +4344,8 @@ func (cmp *Comparator) RefOfStd(a, b *Std) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfStdDev does deep equals between the two objects. @@ -4328,7 +4356,8 @@ func (cmp *Comparator) RefOfStdDev(a, b *StdDev) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfStdPop does deep equals between the two objects. @@ -4339,7 +4368,8 @@ func (cmp *Comparator) RefOfStdPop(a, b *StdPop) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfStdSamp does deep equals between the two objects. @@ -4350,7 +4380,8 @@ func (cmp *Comparator) RefOfStdSamp(a, b *StdSamp) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfStream does deep equals between the two objects. @@ -4457,7 +4488,8 @@ func (cmp *Comparator) RefOfSum(a, b *Sum) bool { return false } return a.Distinct == b.Distinct && - cmp.Expr(a.Arg, b.Arg) + cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // TableExprs does deep equals between the two objects. @@ -4621,7 +4653,7 @@ func (cmp *Comparator) RefOfUpdate(a, b *Update) bool { return cmp.RefOfWith(a.With, b.With) && cmp.RefOfParsedComments(a.Comments, b.Comments) && a.Ignore == b.Ignore && - cmp.TableExprs(a.TableExprs, b.TableExprs) && + cmp.SliceOfTableExpr(a.TableExprs, b.TableExprs) && cmp.UpdateExprs(a.Exprs, b.Exprs) && cmp.RefOfWhere(a.Where, b.Where) && cmp.OrderBy(a.OrderBy, b.OrderBy) && @@ -4761,7 +4793,8 @@ func (cmp *Comparator) RefOfVarPop(a, b *VarPop) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfVarSamp does deep equals between the two objects. @@ -4772,7 +4805,8 @@ func (cmp *Comparator) RefOfVarSamp(a, b *VarSamp) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // RefOfVariable does deep equals between the two objects. @@ -4795,7 +4829,8 @@ func (cmp *Comparator) RefOfVariance(a, b *Variance) bool { if a == nil || b == nil { return false } - return cmp.Expr(a.Arg, b.Arg) + return cmp.Expr(a.Arg, b.Arg) && + cmp.RefOfOverClause(a.OverClause, b.OverClause) } // VindexParam does deep equals between the two objects. @@ -7173,6 +7208,17 @@ func (cmp *Comparator) RefOfColumnTypeOptions(a, b *ColumnTypeOptions) bool { cmp.RefOfLiteral(a.SRID, b.SRID) } +// RefOfInt does deep equals between the two objects. +func (cmp *Comparator) RefOfInt(a, b *int) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return *a == *b +} + // ColumnCharset does deep equals between the two objects. func (cmp *Comparator) ColumnCharset(a, b ColumnCharset) bool { return a.Name == b.Name && @@ -7192,6 +7238,19 @@ func (cmp *Comparator) SliceOfString(a, b []string) bool { return true } +// SliceOfTableExpr does deep equals between the two objects. +func (cmp *Comparator) SliceOfTableExpr(a, b []TableExpr) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !cmp.TableExpr(a[i], b[i]) { + return false + } + } + return true +} + // SliceOfRefOfVariable does deep equals between the two objects. func (cmp *Comparator) SliceOfRefOfVariable(a, b []*Variable) bool { if len(a) != len(b) { @@ -7205,6 +7264,19 @@ func (cmp *Comparator) SliceOfRefOfVariable(a, b []*Variable) bool { return true } +// SliceOfExpr does deep equals between the two objects. +func (cmp *Comparator) SliceOfExpr(a, b []Expr) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !cmp.Expr(a[i], b[i]) { + return false + } + } + return true +} + // RefOfIdentifierCI does deep equals between the two objects. func (cmp *Comparator) RefOfIdentifierCI(a, b *IdentifierCI) bool { if a == b { @@ -7254,19 +7326,6 @@ func (cmp *Comparator) SliceOfRefOfIndexOption(a, b []*IndexOption) bool { return true } -// SliceOfExpr does deep equals between the two objects. -func (cmp *Comparator) SliceOfExpr(a, b []Expr) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if !cmp.Expr(a[i], b[i]) { - return false - } - } - return true -} - // SliceOfRefOfJSONObjectParam does deep equals between the two objects. func (cmp *Comparator) SliceOfRefOfJSONObjectParam(a, b []*JSONObjectParam) bool { if len(a) != len(b) { @@ -7371,17 +7430,6 @@ func (cmp *Comparator) Comments(a, b Comments) bool { return true } -// RefOfInt does deep equals between the two objects. -func (cmp *Comparator) RefOfInt(a, b *int) bool { - if a == b { - return true - } - if a == nil || b == nil { - return false - } - return *a == *b -} - // SliceOfRefOfPartitionDefinition does deep equals between the two objects. func (cmp *Comparator) SliceOfRefOfPartitionDefinition(a, b []*PartitionDefinition) bool { if len(a) != len(b) { @@ -7419,19 +7467,6 @@ func (cmp *Comparator) RefOfRootNode(a, b *RootNode) bool { return cmp.SQLNode(a.SQLNode, b.SQLNode) } -// SliceOfTableExpr does deep equals between the two objects. -func (cmp *Comparator) SliceOfTableExpr(a, b []TableExpr) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if !cmp.TableExpr(a[i], b[i]) { - return false - } - } - return true -} - // RefOfTableName does deep equals between the two objects. func (cmp *Comparator) RefOfTableName(a, b *TableName) bool { if a == b { @@ -7551,7 +7586,7 @@ func (cmp *Comparator) RefOfIndexColumn(a, b *IndexColumn) bool { return false } return cmp.IdentifierCI(a.Column, b.Column) && - cmp.RefOfLiteral(a.Length, b.Length) && + cmp.RefOfInt(a.Length, b.Length) && cmp.Expr(a.Expression, b.Expression) && a.Direction == b.Direction } diff --git a/go/vt/sqlparser/ast_format.go b/go/vt/sqlparser/ast_format.go index 3176ea2c12e..caba74fb567 100644 --- a/go/vt/sqlparser/ast_format.go +++ b/go/vt/sqlparser/ast_format.go @@ -54,7 +54,8 @@ func (node *Select) Format(buf *TrackedBuffer) { buf.astPrintf(node, "%v%v%v", node.Where, - node.GroupBy, node.Having) + node.GroupBy, + node.Having) if node.Windows != nil { buf.astPrintf(node, " %v", node.Windows) @@ -74,6 +75,10 @@ func (node *CommentOnly) Format(buf *TrackedBuffer) { // Format formats the node. func (node *Union) Format(buf *TrackedBuffer) { + if node.With != nil { + buf.astPrintf(node, "%v", node.With) + } + if requiresParen(node.Left) { buf.astPrintf(node, "(%v)", node.Left) } else { @@ -113,20 +118,20 @@ func (node *Stream) Format(buf *TrackedBuffer) { func (node *Insert) Format(buf *TrackedBuffer) { switch node.Action { case InsertAct: - buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", + buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v%v", InsertStr, node.Comments, node.Ignore.ToString(), - node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.OnDup) + node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.RowAlias, node.OnDup) case ReplaceAct: - buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", + buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v%v", ReplaceStr, node.Comments, node.Ignore.ToString(), - node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.OnDup) + node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.RowAlias, node.OnDup) default: - buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v", + buf.astPrintf(node, "%s %v%sinto %v%v%v %v%v%v", "Unkown Insert Action", node.Comments, node.Ignore.ToString(), - node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.OnDup) + node.Table.Expr, node.Partitions, node.Columns, node.Rows, node.RowAlias, node.OnDup) } } @@ -158,9 +163,14 @@ func (node *Update) Format(buf *TrackedBuffer) { if node.With != nil { buf.astPrintf(node, "%v", node.With) } - buf.astPrintf(node, "update %v%s%v set %v%v%v%v", - node.Comments, node.Ignore.ToString(), node.TableExprs, - node.Exprs, node.Where, node.OrderBy, node.Limit) + buf.astPrintf(node, "update %v%s", + node.Comments, node.Ignore.ToString()) + prefix := "" + for _, expr := range node.TableExprs { + buf.astPrintf(node, "%s%v", prefix, expr) + prefix = ", " + } + buf.astPrintf(node, " set %v%v%v%v", node.Exprs, node.Where, node.OrderBy, node.Limit) } // Format formats the node. @@ -172,10 +182,15 @@ func (node *Delete) Format(buf *TrackedBuffer) { if node.Ignore { buf.literal("ignore ") } - if node.Targets != nil { + if node.Targets != nil && !node.IsSingleAliasExpr() { buf.astPrintf(node, "%v ", node.Targets) } - buf.astPrintf(node, "from %v%v%v%v%v", node.TableExprs, node.Partitions, node.Where, node.OrderBy, node.Limit) + prefix := "from " + for _, expr := range node.TableExprs { + buf.astPrintf(node, "%s%v", prefix, expr) + prefix = ", " + } + buf.astPrintf(node, "%v%v%v%v", node.Partitions, node.Where, node.OrderBy, node.Limit) } // Format formats the node. @@ -289,6 +304,10 @@ func (node *AlterMigration) Format(buf *TrackedBuffer) { alterType = "unthrottle" case UnthrottleAllMigrationType: alterType = "unthrottle all" + case ForceCutOverMigrationType: + alterType = "force_cutover" + case ForceCutOverAllMigrationType: + alterType = "force_cutover all" } buf.astPrintf(node, " %#s", alterType) if node.Expire != "" { @@ -682,10 +701,10 @@ func (ct *ColumnType) Format(buf *TrackedBuffer) { buf.astPrintf(ct, "%#s", ct.Type) if ct.Length != nil && ct.Scale != nil { - buf.astPrintf(ct, "(%v,%v)", ct.Length, ct.Scale) + buf.astPrintf(ct, "(%d,%d)", *ct.Length, *ct.Scale) } else if ct.Length != nil { - buf.astPrintf(ct, "(%v)", ct.Length) + buf.astPrintf(ct, "(%d)", *ct.Length) } if ct.EnumValues != nil { @@ -810,7 +829,7 @@ func (idx *IndexDefinition) Format(buf *TrackedBuffer) { } else { buf.astPrintf(idx, "%v", col.Column) if col.Length != nil { - buf.astPrintf(idx, "(%v)", col.Length) + buf.astPrintf(idx, "(%d)", *col.Length) } } if col.Direction == DescOrder { @@ -831,7 +850,7 @@ func (idx *IndexDefinition) Format(buf *TrackedBuffer) { // Format formats the node. func (ii *IndexInfo) Format(buf *TrackedBuffer) { - if !ii.ConstraintName.IsEmpty() { + if ii.ConstraintName.NotEmpty() { buf.astPrintf(ii, "constraint %v ", ii.ConstraintName) } switch ii.Type { @@ -847,7 +866,7 @@ func (ii *IndexInfo) Format(buf *TrackedBuffer) { case IndexTypeFullText: buf.astPrintf(ii, "%s %s", keywordStrings[FULLTEXT], keywordStrings[KEY]) } - if !ii.Name.IsEmpty() { + if ii.Name.NotEmpty() { buf.astPrintf(ii, " %v", ii.Name) } } @@ -883,7 +902,7 @@ func (node VindexParam) Format(buf *TrackedBuffer) { // Format formats the node. func (c *ConstraintDefinition) Format(buf *TrackedBuffer) { - if !c.Name.IsEmpty() { + if c.Name.NotEmpty() { buf.astPrintf(c, "constraint %v ", c.Name) } c.Details.Format(buf) @@ -1114,7 +1133,7 @@ func (node *StarExpr) Format(buf *TrackedBuffer) { // Format formats the node. func (node *AliasedExpr) Format(buf *TrackedBuffer) { buf.astPrintf(node, "%v", node.Expr) - if !node.As.IsEmpty() { + if node.As.NotEmpty() { buf.astPrintf(node, " as %v", node.As) } } @@ -1163,7 +1182,7 @@ func (node TableExprs) Format(buf *TrackedBuffer) { // Format formats the node. func (node *AliasedTableExpr) Format(buf *TrackedBuffer) { buf.astPrintf(node, "%v%v", node.Expr, node.Partitions) - if !node.As.IsEmpty() { + if node.As.NotEmpty() { buf.astPrintf(node, " as %v", node.As) if len(node.Columns) != 0 { buf.astPrintf(node, "%v", node.Columns) @@ -1189,7 +1208,7 @@ func (node TableName) Format(buf *TrackedBuffer) { if node.IsEmpty() { return } - if !node.Qualifier.IsEmpty() { + if node.Qualifier.NotEmpty() { buf.astPrintf(node, "%v.", node.Qualifier) } buf.astPrintf(node, "%v", node.Name) @@ -1227,7 +1246,7 @@ func (node IndexHints) Format(buf *TrackedBuffer) { // Format formats the node. func (node *IndexHint) Format(buf *TrackedBuffer) { - buf.astPrintf(node, " %sindex ", node.Type.ToString()) + buf.astPrintf(node, " %s ", node.Type.ToString()) if node.ForType != NoForType { buf.astPrintf(node, "for %s ", node.ForType.ToString()) } @@ -1339,7 +1358,15 @@ func (node *Argument) Format(buf *TrackedBuffer) { // For bind variables that are statically typed, emit their type as an adjacent comment. // This comment will be ignored by older versions of Vitess (and by MySQL) but will provide // type safety when using the query as a cache key. - buf.astPrintf(node, " /* %s */", node.Type.String()) + buf.astPrintf(node, " /* %s", node.Type.String()) + if node.Size != 0 || node.Scale != 0 { + buf.astPrintf(node, "(%d", node.Size) + if node.Scale != 0 { + buf.astPrintf(node, ",%d", node.Scale) + } + buf.WriteString(")") + } + buf.WriteString(" */") } } @@ -1544,7 +1571,7 @@ func (node *CollateExpr) Format(buf *TrackedBuffer) { // Format formats the node. func (node *FuncExpr) Format(buf *TrackedBuffer) { - if !node.Qualifier.IsEmpty() { + if node.Qualifier.NotEmpty() { buf.astPrintf(node, "%v.", node.Qualifier) } // Function names should not be back-quoted even @@ -1598,7 +1625,7 @@ func (node *JSONStorageSizeExpr) Format(buf *TrackedBuffer) { // Format formats the node func (node *OverClause) Format(buf *TrackedBuffer) { buf.WriteString("over") - if !node.WindowName.IsEmpty() { + if node.WindowName.NotEmpty() { buf.astPrintf(node, " %v", node.WindowName) } if node.WindowSpec != nil { @@ -1608,7 +1635,7 @@ func (node *OverClause) Format(buf *TrackedBuffer) { // Format formats the node func (node *WindowSpecification) Format(buf *TrackedBuffer) { - if !node.Name.IsEmpty() { + if node.Name.NotEmpty() { buf.astPrintf(node, " %v", node.Name) } if node.PartitionClause != nil { @@ -1838,9 +1865,9 @@ func (node *ConvertUsingExpr) Format(buf *TrackedBuffer) { func (node *ConvertType) Format(buf *TrackedBuffer) { buf.astPrintf(node, "%#s", node.Type) if node.Length != nil { - buf.astPrintf(node, "(%v", node.Length) + buf.astPrintf(node, "(%d", *node.Length) if node.Scale != nil { - buf.astPrintf(node, ", %v", node.Scale) + buf.astPrintf(node, ", %d", *node.Scale) } buf.astPrintf(node, ")") } @@ -1896,12 +1923,18 @@ func (node *When) Format(buf *TrackedBuffer) { } // Format formats the node. -func (node GroupBy) Format(buf *TrackedBuffer) { +func (node *GroupBy) Format(buf *TrackedBuffer) { + if node == nil || len(node.Exprs) == 0 { + return + } prefix := " group by " - for _, n := range node { + for _, n := range node.Exprs { buf.astPrintf(node, "%s%v", prefix, n) prefix = ", " } + if node.WithRollup { + buf.literal(" with rollup") + } } // Format formats the node. @@ -1992,6 +2025,18 @@ func (node OnDup) Format(buf *TrackedBuffer) { buf.astPrintf(node, " on duplicate key update %v", UpdateExprs(node)) } +func (node *RowAlias) Format(buf *TrackedBuffer) { + if node == nil { + return + } + + buf.astPrintf(node, " as %v", node.TableName) + + if node.Columns != nil { + buf.astPrintf(node, " %v", node.Columns) + } +} + // Format formats the node. func (node IdentifierCI) Format(buf *TrackedBuffer) { if node.IsEmpty() { @@ -2020,7 +2065,7 @@ func (node *ShowBasic) Format(buf *TrackedBuffer) { if !node.Tbl.IsEmpty() { buf.astPrintf(node, " from %v", node.Tbl) } - if !node.DbName.IsEmpty() { + if node.DbName.NotEmpty() { buf.astPrintf(node, " from %v", node.DbName) } buf.astPrintf(node, "%v", node.Filter) @@ -2070,7 +2115,7 @@ func (node *CreateDatabase) Format(buf *TrackedBuffer) { // Format formats the node. func (node *AlterDatabase) Format(buf *TrackedBuffer) { buf.literal("alter database") - if !node.DBName.IsEmpty() { + if node.DBName.NotEmpty() { buf.astPrintf(node, " %v", node.DBName) } if node.UpdateDataDirectory { @@ -2354,7 +2399,7 @@ func (node *DropColumn) Format(buf *TrackedBuffer) { // Format formats the node func (node *DropKey) Format(buf *TrackedBuffer) { buf.astPrintf(node, "drop %s", node.Type.ToString()) - if !node.Name.IsEmpty() { + if node.Name.NotEmpty() { buf.astPrintf(node, " %v", node.Name) } } @@ -2685,10 +2730,16 @@ func (node *Count) Format(buf *TrackedBuffer) { buf.literal(DistinctStr) } buf.astPrintf(node, "%v)", node.Args) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *CountStar) Format(buf *TrackedBuffer) { buf.WriteString("count(*)") + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *AnyValue) Format(buf *TrackedBuffer) { @@ -2701,6 +2752,9 @@ func (node *Avg) Format(buf *TrackedBuffer) { buf.literal(DistinctStr) } buf.astPrintf(node, "%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *Max) Format(buf *TrackedBuffer) { @@ -2709,6 +2763,9 @@ func (node *Max) Format(buf *TrackedBuffer) { buf.literal(DistinctStr) } buf.astPrintf(node, "%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *Min) Format(buf *TrackedBuffer) { @@ -2717,6 +2774,9 @@ func (node *Min) Format(buf *TrackedBuffer) { buf.literal(DistinctStr) } buf.astPrintf(node, "%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *Sum) Format(buf *TrackedBuffer) { @@ -2725,46 +2785,79 @@ func (node *Sum) Format(buf *TrackedBuffer) { buf.literal(DistinctStr) } buf.astPrintf(node, "%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *BitAnd) Format(buf *TrackedBuffer) { buf.astPrintf(node, "bit_and(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *BitOr) Format(buf *TrackedBuffer) { buf.astPrintf(node, "bit_or(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *BitXor) Format(buf *TrackedBuffer) { buf.astPrintf(node, "bit_xor(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *Std) Format(buf *TrackedBuffer) { buf.astPrintf(node, "std(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *StdDev) Format(buf *TrackedBuffer) { buf.astPrintf(node, "stddev(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *StdPop) Format(buf *TrackedBuffer) { buf.astPrintf(node, "stddev_pop(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *StdSamp) Format(buf *TrackedBuffer) { buf.astPrintf(node, "stddev_samp(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *VarPop) Format(buf *TrackedBuffer) { buf.astPrintf(node, "var_pop(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *VarSamp) Format(buf *TrackedBuffer) { buf.astPrintf(node, "var_samp(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } func (node *Variance) Format(buf *TrackedBuffer) { buf.astPrintf(node, "variance(%v)", node.Arg) + if node.OverClause != nil { + buf.astPrintf(node, " %v", node.OverClause) + } } // Format formats the node. diff --git a/go/vt/sqlparser/ast_format_fast.go b/go/vt/sqlparser/ast_format_fast.go index b99c96c87ab..3858cd56715 100644 --- a/go/vt/sqlparser/ast_format_fast.go +++ b/go/vt/sqlparser/ast_format_fast.go @@ -86,6 +86,10 @@ func (node *CommentOnly) FormatFast(buf *TrackedBuffer) { // FormatFast formats the node. func (node *Union) FormatFast(buf *TrackedBuffer) { + if node.With != nil { + node.With.FormatFast(buf) + } + if requiresParen(node.Left) { buf.WriteByte('(') node.Left.FormatFast(buf) @@ -155,6 +159,8 @@ func (node *Insert) FormatFast(buf *TrackedBuffer) { node.Rows.FormatFast(buf) + node.RowAlias.FormatFast(buf) + node.OnDup.FormatFast(buf) case ReplaceAct: @@ -174,6 +180,8 @@ func (node *Insert) FormatFast(buf *TrackedBuffer) { node.Rows.FormatFast(buf) + node.RowAlias.FormatFast(buf) + node.OnDup.FormatFast(buf) default: @@ -193,6 +201,8 @@ func (node *Insert) FormatFast(buf *TrackedBuffer) { node.Rows.FormatFast(buf) + node.RowAlias.FormatFast(buf) + node.OnDup.FormatFast(buf) } @@ -234,17 +244,17 @@ func (node *Update) FormatFast(buf *TrackedBuffer) { buf.WriteString("update ") node.Comments.FormatFast(buf) buf.WriteString(node.Ignore.ToString()) - node.TableExprs.FormatFast(buf) + prefix := "" + for _, expr := range node.TableExprs { + buf.WriteString(prefix) + expr.FormatFast(buf) + prefix = ", " + } buf.WriteString(" set ") - node.Exprs.FormatFast(buf) - node.Where.FormatFast(buf) - node.OrderBy.FormatFast(buf) - node.Limit.FormatFast(buf) - } // FormatFast formats the node. @@ -257,12 +267,16 @@ func (node *Delete) FormatFast(buf *TrackedBuffer) { if node.Ignore { buf.WriteString("ignore ") } - if node.Targets != nil { + if node.Targets != nil && !node.IsSingleAliasExpr() { node.Targets.FormatFast(buf) buf.WriteByte(' ') } - buf.WriteString("from ") - node.TableExprs.FormatFast(buf) + prefix := "from " + for _, expr := range node.TableExprs { + buf.WriteString(prefix) + expr.FormatFast(buf) + prefix = ", " + } node.Partitions.FormatFast(buf) node.Where.FormatFast(buf) node.OrderBy.FormatFast(buf) @@ -416,6 +430,10 @@ func (node *AlterMigration) FormatFast(buf *TrackedBuffer) { alterType = "unthrottle" case UnthrottleAllMigrationType: alterType = "unthrottle all" + case ForceCutOverMigrationType: + alterType = "force_cutover" + case ForceCutOverAllMigrationType: + alterType = "force_cutover all" } buf.WriteByte(' ') buf.WriteString(alterType) @@ -899,14 +917,14 @@ func (ct *ColumnType) FormatFast(buf *TrackedBuffer) { if ct.Length != nil && ct.Scale != nil { buf.WriteByte('(') - ct.Length.FormatFast(buf) + buf.WriteString(fmt.Sprintf("%d", *ct.Length)) buf.WriteByte(',') - ct.Scale.FormatFast(buf) + buf.WriteString(fmt.Sprintf("%d", *ct.Scale)) buf.WriteByte(')') } else if ct.Length != nil { buf.WriteByte('(') - ct.Length.FormatFast(buf) + buf.WriteString(fmt.Sprintf("%d", *ct.Length)) buf.WriteByte(')') } @@ -1103,7 +1121,7 @@ func (idx *IndexDefinition) FormatFast(buf *TrackedBuffer) { col.Column.FormatFast(buf) if col.Length != nil { buf.WriteByte('(') - col.Length.FormatFast(buf) + buf.WriteString(fmt.Sprintf("%d", *col.Length)) buf.WriteByte(')') } } @@ -1128,7 +1146,7 @@ func (idx *IndexDefinition) FormatFast(buf *TrackedBuffer) { // FormatFast formats the node. func (ii *IndexInfo) FormatFast(buf *TrackedBuffer) { - if !ii.ConstraintName.IsEmpty() { + if ii.ConstraintName.NotEmpty() { buf.WriteString("constraint ") ii.ConstraintName.FormatFast(buf) buf.WriteByte(' ') @@ -1154,7 +1172,7 @@ func (ii *IndexInfo) FormatFast(buf *TrackedBuffer) { buf.WriteByte(' ') buf.WriteString(keywordStrings[KEY]) } - if !ii.Name.IsEmpty() { + if ii.Name.NotEmpty() { buf.WriteByte(' ') ii.Name.FormatFast(buf) } @@ -1196,7 +1214,7 @@ func (node VindexParam) FormatFast(buf *TrackedBuffer) { // FormatFast formats the node. func (c *ConstraintDefinition) FormatFast(buf *TrackedBuffer) { - if !c.Name.IsEmpty() { + if c.Name.NotEmpty() { buf.WriteString("constraint ") c.Name.FormatFast(buf) buf.WriteByte(' ') @@ -1474,7 +1492,7 @@ func (node *StarExpr) FormatFast(buf *TrackedBuffer) { // FormatFast formats the node. func (node *AliasedExpr) FormatFast(buf *TrackedBuffer) { node.Expr.FormatFast(buf) - if !node.As.IsEmpty() { + if node.As.NotEmpty() { buf.WriteString(" as ") node.As.FormatFast(buf) } @@ -1530,7 +1548,7 @@ func (node TableExprs) FormatFast(buf *TrackedBuffer) { func (node *AliasedTableExpr) FormatFast(buf *TrackedBuffer) { node.Expr.FormatFast(buf) node.Partitions.FormatFast(buf) - if !node.As.IsEmpty() { + if node.As.NotEmpty() { buf.WriteString(" as ") node.As.FormatFast(buf) if len(node.Columns) != 0 { @@ -1558,7 +1576,7 @@ func (node TableName) FormatFast(buf *TrackedBuffer) { if node.IsEmpty() { return } - if !node.Qualifier.IsEmpty() { + if node.Qualifier.NotEmpty() { node.Qualifier.FormatFast(buf) buf.WriteByte('.') } @@ -1608,7 +1626,7 @@ func (node IndexHints) FormatFast(buf *TrackedBuffer) { func (node *IndexHint) FormatFast(buf *TrackedBuffer) { buf.WriteByte(' ') buf.WriteString(node.Type.ToString()) - buf.WriteString("index ") + buf.WriteByte(' ') if node.ForType != NoForType { buf.WriteString("for ") buf.WriteString(node.ForType.ToString()) @@ -1762,6 +1780,15 @@ func (node *Argument) FormatFast(buf *TrackedBuffer) { // type safety when using the query as a cache key. buf.WriteString(" /* ") buf.WriteString(node.Type.String()) + if node.Size != 0 || node.Scale != 0 { + buf.WriteByte('(') + buf.WriteString(fmt.Sprintf("%d", node.Size)) + if node.Scale != 0 { + buf.WriteByte(',') + buf.WriteString(fmt.Sprintf("%d", node.Scale)) + } + buf.WriteString(")") + } buf.WriteString(" */") } } @@ -2064,7 +2091,7 @@ func (node *CollateExpr) FormatFast(buf *TrackedBuffer) { // FormatFast formats the node. func (node *FuncExpr) FormatFast(buf *TrackedBuffer) { - if !node.Qualifier.IsEmpty() { + if node.Qualifier.NotEmpty() { node.Qualifier.FormatFast(buf) buf.WriteByte('.') } @@ -2138,7 +2165,7 @@ func (node *JSONStorageSizeExpr) FormatFast(buf *TrackedBuffer) { // FormatFast formats the node func (node *OverClause) FormatFast(buf *TrackedBuffer) { buf.WriteString("over") - if !node.WindowName.IsEmpty() { + if node.WindowName.NotEmpty() { buf.WriteByte(' ') node.WindowName.FormatFast(buf) } @@ -2151,7 +2178,7 @@ func (node *OverClause) FormatFast(buf *TrackedBuffer) { // FormatFast formats the node func (node *WindowSpecification) FormatFast(buf *TrackedBuffer) { - if !node.Name.IsEmpty() { + if node.Name.NotEmpty() { buf.WriteByte(' ') node.Name.FormatFast(buf) } @@ -2478,10 +2505,10 @@ func (node *ConvertType) FormatFast(buf *TrackedBuffer) { buf.WriteString(node.Type) if node.Length != nil { buf.WriteByte('(') - node.Length.FormatFast(buf) + buf.WriteString(fmt.Sprintf("%d", *node.Length)) if node.Scale != nil { buf.WriteString(", ") - node.Scale.FormatFast(buf) + buf.WriteString(fmt.Sprintf("%d", *node.Scale)) } buf.WriteByte(')') } @@ -2550,13 +2577,19 @@ func (node *When) FormatFast(buf *TrackedBuffer) { } // FormatFast formats the node. -func (node GroupBy) FormatFast(buf *TrackedBuffer) { +func (node *GroupBy) FormatFast(buf *TrackedBuffer) { + if node == nil || len(node.Exprs) == 0 { + return + } prefix := " group by " - for _, n := range node { + for _, n := range node.Exprs { buf.WriteString(prefix) n.FormatFast(buf) prefix = ", " } + if node.WithRollup { + buf.WriteString(" with rollup") + } } // FormatFast formats the node. @@ -2661,6 +2694,20 @@ func (node OnDup) FormatFast(buf *TrackedBuffer) { UpdateExprs(node).FormatFast(buf) } +func (node *RowAlias) FormatFast(buf *TrackedBuffer) { + if node == nil { + return + } + + buf.WriteString(" as ") + node.TableName.FormatFast(buf) + + if node.Columns != nil { + buf.WriteByte(' ') + node.Columns.FormatFast(buf) + } +} + // FormatFast formats the node. func (node IdentifierCI) FormatFast(buf *TrackedBuffer) { if node.IsEmpty() { @@ -2690,7 +2737,7 @@ func (node *ShowBasic) FormatFast(buf *TrackedBuffer) { buf.WriteString(" from ") node.Tbl.FormatFast(buf) } - if !node.DbName.IsEmpty() { + if node.DbName.NotEmpty() { buf.WriteString(" from ") node.DbName.FormatFast(buf) } @@ -2751,7 +2798,7 @@ func (node *CreateDatabase) FormatFast(buf *TrackedBuffer) { // FormatFast formats the node. func (node *AlterDatabase) FormatFast(buf *TrackedBuffer) { buf.WriteString("alter database") - if !node.DBName.IsEmpty() { + if node.DBName.NotEmpty() { buf.WriteByte(' ') node.DBName.FormatFast(buf) } @@ -3118,7 +3165,7 @@ func (node *DropColumn) FormatFast(buf *TrackedBuffer) { func (node *DropKey) FormatFast(buf *TrackedBuffer) { buf.WriteString("drop ") buf.WriteString(node.Type.ToString()) - if !node.Name.IsEmpty() { + if node.Name.NotEmpty() { buf.WriteByte(' ') node.Name.FormatFast(buf) } @@ -3556,10 +3603,18 @@ func (node *Count) FormatFast(buf *TrackedBuffer) { } node.Args.FormatFast(buf) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *CountStar) FormatFast(buf *TrackedBuffer) { buf.WriteString("count(*)") + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *AnyValue) FormatFast(buf *TrackedBuffer) { @@ -3575,6 +3630,10 @@ func (node *Avg) FormatFast(buf *TrackedBuffer) { } buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *Max) FormatFast(buf *TrackedBuffer) { @@ -3584,6 +3643,10 @@ func (node *Max) FormatFast(buf *TrackedBuffer) { } buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *Min) FormatFast(buf *TrackedBuffer) { @@ -3593,6 +3656,10 @@ func (node *Min) FormatFast(buf *TrackedBuffer) { } buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *Sum) FormatFast(buf *TrackedBuffer) { @@ -3602,66 +3669,110 @@ func (node *Sum) FormatFast(buf *TrackedBuffer) { } buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *BitAnd) FormatFast(buf *TrackedBuffer) { buf.WriteString("bit_and(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *BitOr) FormatFast(buf *TrackedBuffer) { buf.WriteString("bit_or(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *BitXor) FormatFast(buf *TrackedBuffer) { buf.WriteString("bit_xor(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *Std) FormatFast(buf *TrackedBuffer) { buf.WriteString("std(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *StdDev) FormatFast(buf *TrackedBuffer) { buf.WriteString("stddev(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *StdPop) FormatFast(buf *TrackedBuffer) { buf.WriteString("stddev_pop(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *StdSamp) FormatFast(buf *TrackedBuffer) { buf.WriteString("stddev_samp(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *VarPop) FormatFast(buf *TrackedBuffer) { buf.WriteString("var_pop(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *VarSamp) FormatFast(buf *TrackedBuffer) { buf.WriteString("var_samp(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } func (node *Variance) FormatFast(buf *TrackedBuffer) { buf.WriteString("variance(") buf.printExpr(node, node.Arg, true) buf.WriteByte(')') + if node.OverClause != nil { + buf.WriteByte(' ') + node.OverClause.FormatFast(buf) + } } // FormatFast formats the node. diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go index 951d9879bdb..df201676fae 100644 --- a/go/vt/sqlparser/ast_funcs.go +++ b/go/vt/sqlparser/ast_funcs.go @@ -24,13 +24,13 @@ import ( "strconv" "strings" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/log" - + "vitess.io/vitess/go/mysql/datetime" + "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) // Walk calls postVisit on every node. @@ -71,7 +71,7 @@ type IndexColumn struct { // Only one of Column or Expression can be specified // Length is an optional field which is only applicable when Column is used Column IdentifierCI - Length *Literal + Length *int Expression Expr Direction OrderDirection } @@ -79,8 +79,8 @@ type IndexColumn struct { // LengthScaleOption is used for types that have an optional length // and scale type LengthScaleOption struct { - Length *Literal - Scale *Literal + Length *int + Scale *int } // IndexOption is used for trailing options for indexes: COMMENT, KEY_BLOCK_SIZE, USING, WITH PARSER @@ -357,6 +357,20 @@ func (node *ParsedComments) AddQueryHint(queryHint string) (Comments, error) { return newComments, nil } +// FkChecksStateString prints the foreign key checks state. +func FkChecksStateString(state *bool) string { + if state == nil { + return "" + } + switch *state { + case false: + return "Off" + case true: + return "On" + } + return "" +} + // ParseParams parses the vindex parameter list, pulling out the special-case // "owner" parameter func (node *VindexSpec) ParseParams() (string, map[string]string) { @@ -400,7 +414,7 @@ func (node *AliasedTableExpr) RemoveHints() *AliasedTableExpr { // TableName returns a TableName pointing to this table expr func (node *AliasedTableExpr) TableName() (TableName, error) { - if !node.As.IsEmpty() { + if node.As.NotEmpty() { return TableName{Name: node.As}, nil } @@ -417,6 +431,7 @@ func (node TableName) IsEmpty() bool { // If Name is empty, Qualifier is also empty. return node.Name.IsEmpty() } +func (node TableName) NonEmpty() bool { return !node.Name.IsEmpty() } // NewWhere creates a WHERE or HAVING clause out // of a Expr. If the expression is nil, it returns nil. @@ -549,6 +564,20 @@ func NewTypedArgument(in string, t sqltypes.Type) *Argument { return &Argument{Name: in, Type: t} } +func NewTypedArgumentFromLiteral(in string, lit *Literal) (*Argument, error) { + arg := &Argument{Name: in, Type: lit.SQLType()} + switch arg.Type { + case sqltypes.Decimal: + siz, scale := decimal.SizeAndScaleFromString(lit.Val) + arg.Scale = scale + arg.Size = siz + case sqltypes.Datetime, sqltypes.Time: + siz := datetime.SizeFromString(lit.Val) + arg.Size = siz + } + return arg, nil +} + // NewListArg builds a new ListArg. func NewListArg(in string) ListArg { return ListArg(in) @@ -759,10 +788,19 @@ func NewLimitWithoutOffset(rowCount int) *Limit { } // NewSelect is used to create a select statement -func NewSelect(comments Comments, exprs SelectExprs, selectOptions []string, into *SelectInto, from TableExprs, where *Where, groupBy GroupBy, having *Where, windows NamedWindows) *Select { +func NewSelect( + comments Comments, + exprs SelectExprs, + selectOptions []string, + into *SelectInto, + from TableExprs, + where *Where, + groupBy *GroupBy, + having *Where, + windows NamedWindows, +) *Select { var cache *bool var distinct, straightJoinHint, sqlFoundRows bool - for _, option := range selectOptions { switch strings.ToLower(option) { case DistinctStr: @@ -868,6 +906,11 @@ func (node IdentifierCI) IsEmpty() bool { return node.val == "" } +// NonEmpty returns true if the name is not empty. +func (node IdentifierCI) NotEmpty() bool { + return !node.IsEmpty() +} + // String returns the unescaped column name. It must // not be used for SQL generation. Use sqlparser.String // instead. The Stringer conformance is for usage @@ -905,6 +948,16 @@ func (node IdentifierCI) EqualString(str string) bool { return node.Lowered() == strings.ToLower(str) } +// EqualsAnyString returns true if any of these strings match +func (node IdentifierCI) EqualsAnyString(str []string) bool { + for _, s := range str { + if node.EqualString(s) { + return true + } + } + return false +} + // MarshalJSON marshals into JSON. func (node IdentifierCI) MarshalJSON() ([]byte, error) { return json.Marshal(node.val) @@ -935,6 +988,11 @@ func (node IdentifierCS) IsEmpty() bool { return node.v == "" } +// NonEmpty returns true if TabIdent is not empty. +func (node IdentifierCS) NotEmpty() bool { + return !node.IsEmpty() +} + // String returns the unescaped table name. It must // not be used for SQL generation. Use sqlparser.String // instead. The Stringer conformance is for usage @@ -1119,13 +1177,25 @@ func (node *Select) AddHaving(expr Expr) { // AddGroupBy adds a grouping expression, unless it's already present func (node *Select) AddGroupBy(expr Expr) { - for _, gb := range node.GroupBy { + if node.GroupBy == nil { + node.GroupBy = &GroupBy{Exprs: []Expr{expr}} + return + } + for _, gb := range node.GroupBy.Exprs { if Equals.Expr(gb, expr) { // group by columns are sets - duplicates don't add anything, so we can just skip these return } } - node.GroupBy = append(node.GroupBy, expr) + node.GroupBy.Exprs = append(node.GroupBy.Exprs, expr) +} + +// GroupByExprs returns the group by expressions +func (node *Select) GroupByExprs() []Expr { + if node.GroupBy == nil { + return nil + } + return node.GroupBy.Exprs } // AddWhere adds the boolean expression to the @@ -1315,6 +1385,16 @@ func (lock Lock) ToString() string { return NoLockStr case ForUpdateLock: return ForUpdateStr + case ForUpdateLockNoWait: + return ForUpdateNoWaitStr + case ForUpdateLockSkipLocked: + return ForUpdateSkipLockedStr + case ForShareLock: + return ForShareStr + case ForShareLockNoWait: + return ForShareNoWaitStr + case ForShareLockSkipLocked: + return ForShareSkipLockedStr case ShareModeLock: return ShareModeStr default: @@ -1526,14 +1606,28 @@ func (ty IndexHintType) ToString() string { case UseOp: return UseStr case IgnoreOp: - return IgnoreStr + return IgnoreIndexStr case ForceOp: return ForceStr + case UseVindexOp: + return UseVindexStr + case IgnoreVindexOp: + return IgnoreVindexStr default: return "Unknown IndexHintType" } } +// IsVindexHint returns if the given hint is a Vindex hint or not. +func (ty IndexHintType) IsVindexHint() bool { + switch ty { + case UseVindexOp, IgnoreVindexOp: + return true + default: + return false + } +} + // ToString returns the type as a string func (ty IndexHintForType) ToString() string { switch ty { @@ -1783,10 +1877,6 @@ func (ty ExplainType) ToString() string { return TreeStr case JSONType: return JSONStr - case VitessType: - return VitessStr - case VTExplainType: - return VTExplainStr case TraditionalType: return TraditionalStr case AnalyzeType: @@ -1838,6 +1928,26 @@ func (node DatabaseOptionType) ToString() string { } } +// IsCommutative returns whether the join type supports rearranging or not. +func (joinType JoinType) IsCommutative() bool { + switch joinType { + case StraightJoinType, LeftJoinType, RightJoinType, NaturalLeftJoinType, NaturalRightJoinType: + return false + default: + return true + } +} + +// IsInner returns whether the join type is an inner join or not. +func (joinType JoinType) IsInner() bool { + switch joinType { + case StraightJoinType, NaturalJoinType, NormalJoinType: + return true + default: + return false + } +} + // ToString returns the type as a string func (ty LockType) ToString() string { switch ty { @@ -1929,6 +2039,8 @@ func (ty ShowCommandType) ToString() string { return VitessVariablesStr case VschemaTables: return VschemaTablesStr + case VschemaKeyspaces: + return VschemaKeyspacesStr case VschemaVindexes: return VschemaVindexesStr case Warnings: @@ -2099,12 +2211,17 @@ func GetAllSelects(selStmt SelectStatement) []*Select { // ColumnName returns the alias if one was provided, otherwise prints the AST func (ae *AliasedExpr) ColumnName() string { - if !ae.As.IsEmpty() { + if ae.As.NotEmpty() { return ae.As.String() } - if col, ok := ae.Expr.(*ColName); ok { - return col.Name.String() + switch node := ae.Expr.(type) { + case *ColName: + return node.Name.String() + case *Literal: + if node.Type == StrVal { + return node.Val + } } return String(ae.Expr) @@ -2120,23 +2237,47 @@ func (s SelectExprs) AllAggregation() bool { return true } -// RemoveKeyspaceFromColName removes the Qualifier.Qualifier on all ColNames in the expression tree -func RemoveKeyspaceFromColName(expr Expr) { - RemoveKeyspace(expr) +// RemoveKeyspaceInCol removes the Qualifier.Qualifier on all ColNames in the AST +func RemoveKeyspaceInCol(in SQLNode) { + // Walk will only return an error if we return an error from the inner func. safe to ignore here + _ = Walk(func(node SQLNode) (kontinue bool, err error) { + if col, ok := node.(*ColName); ok && col.Qualifier.Qualifier.NotEmpty() { + col.Qualifier.Qualifier = NewIdentifierCS("") + } + + return true, nil + }, in) } -// RemoveKeyspace removes the Qualifier.Qualifier on all ColNames in the AST -func RemoveKeyspace(in SQLNode) { +// RemoveKeyspaceInTables removes the Qualifier on all TableNames in the AST +func RemoveKeyspaceInTables(in SQLNode) { // Walk will only return an error if we return an error from the inner func. safe to ignore here - _ = Walk(func(node SQLNode) (kontinue bool, err error) { - switch col := node.(type) { + Rewrite(in, nil, func(cursor *Cursor) bool { + if tbl, ok := cursor.Node().(TableName); ok && tbl.Qualifier.NotEmpty() { + tbl.Qualifier = NewIdentifierCS("") + cursor.Replace(tbl) + } + + return true + }) +} + +// RemoveKeyspace removes the Qualifier.Qualifier on all ColNames and Qualifier on all TableNames in the AST +func RemoveKeyspace(in SQLNode) { + Rewrite(in, nil, func(cursor *Cursor) bool { + switch expr := cursor.Node().(type) { case *ColName: - if !col.Qualifier.Qualifier.IsEmpty() { - col.Qualifier.Qualifier = NewIdentifierCS("") + if expr.Qualifier.Qualifier.NotEmpty() { + expr.Qualifier.Qualifier = NewIdentifierCS("") + } + case TableName: + if expr.Qualifier.NotEmpty() { + expr.Qualifier = NewIdentifierCS("") + cursor.Replace(expr) } } - return true, nil - }, in) + return true + }) } func convertStringToInt(integer string) int { @@ -2487,6 +2628,16 @@ func (ra ReferenceAction) IsRestrict() bool { } } +// IsCascade returns true if the reference action is of cascade type. +func (ra ReferenceAction) IsCascade() bool { + switch ra { + case Cascade: + return true + default: + return false + } +} + // IsLiteral returns true if the expression is of a literal type. func IsLiteral(expr Expr) bool { switch expr.(type) { @@ -2497,6 +2648,131 @@ func IsLiteral(expr Expr) bool { } } +// AppendString appends a string to the expression provided. +// This is intended to be used in the parser only for concatenating multiple strings together. +func AppendString(expr Expr, in string) Expr { + switch node := expr.(type) { + case *Literal: + node.Val = node.Val + in + return node + case *UnaryExpr: + node.Expr = AppendString(node.Expr, in) + return node + case *IntroducerExpr: + node.Expr = AppendString(node.Expr, in) + return node + } + return nil +} + func (ct *ColumnType) Invisible() bool { return ct.Options.Invisible != nil && *ct.Options.Invisible } + +func (node *Delete) IsSingleAliasExpr() bool { + if len(node.Targets) > 1 { + return false + } + if len(node.TableExprs) != 1 { + return false + } + _, isAliasExpr := node.TableExprs[0].(*AliasedTableExpr) + return isAliasExpr +} + +func MultiTable(node []TableExpr) bool { + if len(node) > 1 { + return true + } + _, singleTbl := node[0].(*AliasedTableExpr) + return !singleTbl +} + +func (node *Update) AddOrder(order *Order) { + node.OrderBy = append(node.OrderBy, order) +} + +func (node *Update) SetLimit(limit *Limit) { + node.Limit = limit +} + +func (node *Delete) AddOrder(order *Order) { + node.OrderBy = append(node.OrderBy, order) +} + +func (node *Delete) SetLimit(limit *Limit) { + node.Limit = limit +} + +func (node *Select) GetFrom() []TableExpr { + return node.From +} + +func (node *Select) SetFrom(exprs []TableExpr) { + node.From = exprs +} + +func (node *Select) GetWherePredicate() Expr { + if node.Where == nil { + return nil + } + return node.Where.Expr +} + +func (node *Select) SetWherePredicate(expr Expr) { + node.Where = &Where{ + Type: WhereClause, + Expr: expr, + } +} +func (node *Delete) GetFrom() []TableExpr { + return node.TableExprs +} + +func (node *Delete) SetFrom(exprs []TableExpr) { + node.TableExprs = exprs +} + +func (node *Delete) GetWherePredicate() Expr { + if node.Where == nil { + return nil + } + return node.Where.Expr +} + +func (node *Delete) SetWherePredicate(expr Expr) { + node.Where = &Where{ + Type: WhereClause, + Expr: expr, + } +} + +func (node *Update) GetFrom() []TableExpr { + return node.TableExprs +} + +func (node *Update) SetFrom(exprs []TableExpr) { + node.TableExprs = exprs +} + +func (node *Update) GetWherePredicate() Expr { + if node.Where == nil { + return nil + } + return node.Where.Expr +} + +func (node *Update) SetWherePredicate(expr Expr) { + node.Where = &Where{ + Type: WhereClause, + Expr: expr, + } +} + +// GetHighestOrderLock returns the higher level lock between the current lock and the new lock +func (lock Lock) GetHighestOrderLock(newLock Lock) Lock { + if newLock > lock { + return newLock + } + return lock +} diff --git a/go/vt/sqlparser/ast_rewrite.go b/go/vt/sqlparser/ast_rewrite.go index 0121695fe8c..8f0926951f3 100644 --- a/go/vt/sqlparser/ast_rewrite.go +++ b/go/vt/sqlparser/ast_rewrite.go @@ -204,8 +204,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfGeomFromWKBExpr(parent, node, replacer) case *GeomPropertyFuncExpr: return a.rewriteRefOfGeomPropertyFuncExpr(parent, node, replacer) - case GroupBy: - return a.rewriteGroupBy(parent, node, replacer) + case *GroupBy: + return a.rewriteRefOfGroupBy(parent, node, replacer) case *GroupConcatExpr: return a.rewriteRefOfGroupConcatExpr(parent, node, replacer) case IdentifierCI: @@ -422,6 +422,8 @@ func (a *application) rewriteSQLNode(parent SQLNode, node SQLNode, replacer repl return a.rewriteRefOfRollback(parent, node, replacer) case RootNode: return a.rewriteRootNode(parent, node, replacer) + case *RowAlias: + return a.rewriteRefOfRowAlias(parent, node, replacer) case *SRollback: return a.rewriteRefOfSRollback(parent, node, replacer) case *Savepoint: @@ -1284,6 +1286,11 @@ func (a *application) rewriteRefOfAvg(parent SQLNode, node *Avg, replacer replac }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*Avg).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -1419,6 +1426,11 @@ func (a *application) rewriteRefOfBitAnd(parent SQLNode, node *BitAnd, replacer }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*BitAnd).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -1451,6 +1463,11 @@ func (a *application) rewriteRefOfBitOr(parent SQLNode, node *BitOr, replacer re }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*BitOr).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -1483,6 +1500,11 @@ func (a *application) rewriteRefOfBitXor(parent SQLNode, node *BitXor, replacer }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*BitXor).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -1817,20 +1839,12 @@ func (a *application) rewriteRefOfColumnType(parent SQLNode, node *ColumnType, r return true } } - if !a.rewriteRefOfLiteral(node, node.Length, func(newNode, parent SQLNode) { - parent.(*ColumnType).Length = newNode.(*Literal) - }) { - return false - } - if !a.rewriteRefOfLiteral(node, node.Scale, func(newNode, parent SQLNode) { - parent.(*ColumnType).Scale = newNode.(*Literal) - }) { - return false - } if a.post != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } if !a.post(&a.cur) { return false } @@ -2082,20 +2096,12 @@ func (a *application) rewriteRefOfConvertType(parent SQLNode, node *ConvertType, return true } } - if !a.rewriteRefOfLiteral(node, node.Length, func(newNode, parent SQLNode) { - parent.(*ConvertType).Length = newNode.(*Literal) - }) { - return false - } - if !a.rewriteRefOfLiteral(node, node.Scale, func(newNode, parent SQLNode) { - parent.(*ConvertType).Scale = newNode.(*Literal) - }) { - return false - } if a.post != nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node + if a.pre == nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + } if !a.post(&a.cur) { return false } @@ -2156,6 +2162,11 @@ func (a *application) rewriteRefOfCount(parent SQLNode, node *Count, replacer re }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*Count).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -2183,12 +2194,15 @@ func (a *application) rewriteRefOfCountStar(parent SQLNode, node *CountStar, rep return true } } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*CountStar).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { - if a.pre == nil { - a.cur.replacer = replacer - a.cur.parent = parent - a.cur.node = node - } + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node if !a.post(&a.cur) { return false } @@ -2455,16 +2469,20 @@ func (a *application) rewriteRefOfDelete(parent SQLNode, node *Delete, replacer }) { return false } + for x, el := range node.TableExprs { + if !a.rewriteTableExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*Delete).TableExprs[idx] = newNode.(TableExpr) + } + }(x)) { + return false + } + } if !a.rewriteTableNames(node, node.Targets, func(newNode, parent SQLNode) { parent.(*Delete).Targets = newNode.(TableNames) }) { return false } - if !a.rewriteTableExprs(node, node.TableExprs, func(newNode, parent SQLNode) { - parent.(*Delete).TableExprs = newNode.(TableExprs) - }) { - return false - } if !a.rewritePartitions(node, node.Partitions, func(newNode, parent SQLNode) { parent.(*Delete).Partitions = newNode.(Partitions) }) { @@ -3150,8 +3168,8 @@ func (a *application) rewriteRefOfFuncExpr(parent SQLNode, node *FuncExpr, repla }) { return false } - if !a.rewriteSelectExprs(node, node.Exprs, func(newNode, parent SQLNode) { - parent.(*FuncExpr).Exprs = newNode.(SelectExprs) + if !a.rewriteExprs(node, node.Exprs, func(newNode, parent SQLNode) { + parent.(*FuncExpr).Exprs = newNode.(Exprs) }) { return false } @@ -3602,7 +3620,7 @@ func (a *application) rewriteRefOfGeomPropertyFuncExpr(parent SQLNode, node *Geo } return true } -func (a *application) rewriteGroupBy(parent SQLNode, node GroupBy, replacer replacerFunc) bool { +func (a *application) rewriteRefOfGroupBy(parent SQLNode, node *GroupBy, replacer replacerFunc) bool { if node == nil { return true } @@ -3610,20 +3628,14 @@ func (a *application) rewriteGroupBy(parent SQLNode, node GroupBy, replacer repl a.cur.replacer = replacer a.cur.parent = parent a.cur.node = node - kontinue := !a.pre(&a.cur) - if a.cur.revisit { - node = a.cur.node.(GroupBy) - a.cur.revisit = false - return a.rewriteGroupBy(parent, node, replacer) - } - if kontinue { + if !a.pre(&a.cur) { return true } } - for x, el := range node { + for x, el := range node.Exprs { if !a.rewriteExpr(node, el, func(idx int) replacerFunc { return func(newNode, parent SQLNode) { - parent.(GroupBy)[idx] = newNode.(Expr) + parent.(*GroupBy).Exprs[idx] = newNode.(Expr) } }(x)) { return false @@ -3887,6 +3899,11 @@ func (a *application) rewriteRefOfInsert(parent SQLNode, node *Insert, replacer }) { return false } + if !a.rewriteRefOfRowAlias(node, node.RowAlias, func(newNode, parent SQLNode) { + parent.(*Insert).RowAlias = newNode.(*RowAlias) + }) { + return false + } if !a.rewriteOnDup(node, node.OnDup, func(newNode, parent SQLNode) { parent.(*Insert).OnDup = newNode.(OnDup) }) { @@ -5498,6 +5515,11 @@ func (a *application) rewriteRefOfMax(parent SQLNode, node *Max, replacer replac }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*Max).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -5567,6 +5589,11 @@ func (a *application) rewriteRefOfMin(parent SQLNode, node *Min, replacer replac }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*Min).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -7278,6 +7305,38 @@ func (a *application) rewriteRootNode(parent SQLNode, node RootNode, replacer re } return true } +func (a *application) rewriteRefOfRowAlias(parent SQLNode, node *RowAlias, replacer replacerFunc) bool { + if node == nil { + return true + } + if a.pre != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.pre(&a.cur) { + return true + } + } + if !a.rewriteIdentifierCS(node, node.TableName, func(newNode, parent SQLNode) { + parent.(*RowAlias).TableName = newNode.(IdentifierCS) + }) { + return false + } + if !a.rewriteColumns(node, node.Columns, func(newNode, parent SQLNode) { + parent.(*RowAlias).Columns = newNode.(Columns) + }) { + return false + } + if a.post != nil { + a.cur.replacer = replacer + a.cur.parent = parent + a.cur.node = node + if !a.post(&a.cur) { + return false + } + } + return true +} func (a *application) rewriteRefOfSRollback(parent SQLNode, node *SRollback, replacer replacerFunc) bool { if node == nil { return true @@ -7373,8 +7432,8 @@ func (a *application) rewriteRefOfSelect(parent SQLNode, node *Select, replacer }) { return false } - if !a.rewriteGroupBy(node, node.GroupBy, func(newNode, parent SQLNode) { - parent.(*Select).GroupBy = newNode.(GroupBy) + if !a.rewriteRefOfGroupBy(node, node.GroupBy, func(newNode, parent SQLNode) { + parent.(*Select).GroupBy = newNode.(*GroupBy) }) { return false } @@ -7841,6 +7900,11 @@ func (a *application) rewriteRefOfStd(parent SQLNode, node *Std, replacer replac }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*Std).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -7873,6 +7937,11 @@ func (a *application) rewriteRefOfStdDev(parent SQLNode, node *StdDev, replacer }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*StdDev).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -7905,6 +7974,11 @@ func (a *application) rewriteRefOfStdPop(parent SQLNode, node *StdPop, replacer }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*StdPop).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -7937,6 +8011,11 @@ func (a *application) rewriteRefOfStdSamp(parent SQLNode, node *StdSamp, replace }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*StdSamp).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -8223,6 +8302,11 @@ func (a *application) rewriteRefOfSum(parent SQLNode, node *Sum, replacer replac }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*Sum).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -8680,10 +8764,14 @@ func (a *application) rewriteRefOfUpdate(parent SQLNode, node *Update, replacer }) { return false } - if !a.rewriteTableExprs(node, node.TableExprs, func(newNode, parent SQLNode) { - parent.(*Update).TableExprs = newNode.(TableExprs) - }) { - return false + for x, el := range node.TableExprs { + if !a.rewriteTableExpr(node, el, func(idx int) replacerFunc { + return func(newNode, parent SQLNode) { + parent.(*Update).TableExprs[idx] = newNode.(TableExpr) + } + }(x)) { + return false + } } if !a.rewriteUpdateExprs(node, node.Exprs, func(newNode, parent SQLNode) { parent.(*Update).Exprs = newNode.(UpdateExprs) @@ -9084,6 +9172,11 @@ func (a *application) rewriteRefOfVarPop(parent SQLNode, node *VarPop, replacer }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*VarPop).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -9116,6 +9209,11 @@ func (a *application) rewriteRefOfVarSamp(parent SQLNode, node *VarSamp, replace }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*VarSamp).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent @@ -9180,6 +9278,11 @@ func (a *application) rewriteRefOfVariance(parent SQLNode, node *Variance, repla }) { return false } + if !a.rewriteRefOfOverClause(node, node.OverClause, func(newNode, parent SQLNode) { + parent.(*Variance).OverClause = newNode.(*OverClause) + }) { + return false + } if a.post != nil { a.cur.replacer = replacer a.cur.parent = parent diff --git a/go/vt/sqlparser/ast_rewriting.go b/go/vt/sqlparser/ast_rewriting.go index 45711f8d535..64de1f9d920 100644 --- a/go/vt/sqlparser/ast_rewriting.go +++ b/go/vt/sqlparser/ast_rewriting.go @@ -51,6 +51,7 @@ func PrepareAST( selectLimit int, setVarComment string, sysVars map[string]string, + fkChecksState *bool, views VSchemaViews, ) (*RewriteASTResult, error) { if parameterize { @@ -59,7 +60,7 @@ func PrepareAST( return nil, err } } - return RewriteAST(in, keyspace, selectLimit, setVarComment, sysVars, views) + return RewriteAST(in, keyspace, selectLimit, setVarComment, sysVars, fkChecksState, views) } // RewriteAST rewrites the whole AST, replacing function calls and adding column aliases to queries. @@ -70,9 +71,10 @@ func RewriteAST( selectLimit int, setVarComment string, sysVars map[string]string, + fkChecksState *bool, views VSchemaViews, ) (*RewriteASTResult, error) { - er := newASTRewriter(keyspace, selectLimit, setVarComment, sysVars, views) + er := newASTRewriter(keyspace, selectLimit, setVarComment, sysVars, fkChecksState, views) er.shouldRewriteDatabaseFunc = shouldRewriteDatabaseFunc(in) result := SafeRewrite(in, er.rewriteDown, er.rewriteUp) if er.err != nil { @@ -121,16 +123,18 @@ type astRewriter struct { keyspace string selectLimit int setVarComment string + fkChecksState *bool sysVars map[string]string views VSchemaViews } -func newASTRewriter(keyspace string, selectLimit int, setVarComment string, sysVars map[string]string, views VSchemaViews) *astRewriter { +func newASTRewriter(keyspace string, selectLimit int, setVarComment string, sysVars map[string]string, fkChecksState *bool, views VSchemaViews) *astRewriter { return &astRewriter{ bindVars: &BindVarNeeds{}, keyspace: keyspace, selectLimit: selectLimit, setVarComment: setVarComment, + fkChecksState: fkChecksState, sysVars: sysVars, views: views, } @@ -154,7 +158,7 @@ const ( ) func (er *astRewriter) rewriteAliasedExpr(node *AliasedExpr) (*BindVarNeeds, error) { - inner := newASTRewriter(er.keyspace, er.selectLimit, er.setVarComment, er.sysVars, er.views) + inner := newASTRewriter(er.keyspace, er.selectLimit, er.setVarComment, er.sysVars, nil, er.views) inner.shouldRewriteDatabaseFunc = er.shouldRewriteDatabaseFunc tmp := SafeRewrite(node.Expr, inner.rewriteDown, inner.rewriteUp) newExpr, ok := tmp.(Expr) @@ -177,13 +181,19 @@ func (er *astRewriter) rewriteDown(node SQLNode, _ SQLNode) bool { func (er *astRewriter) rewriteUp(cursor *Cursor) bool { // Add SET_VAR comment to this node if it supports it and is needed - if supportOptimizerHint, supportsOptimizerHint := cursor.Node().(SupportOptimizerHint); supportsOptimizerHint && er.setVarComment != "" { - newComments, err := supportOptimizerHint.GetParsedComments().AddQueryHint(er.setVarComment) - if err != nil { - er.err = err - return false + if supportOptimizerHint, supportsOptimizerHint := cursor.Node().(SupportOptimizerHint); supportsOptimizerHint { + if er.setVarComment != "" { + newComments, err := supportOptimizerHint.GetParsedComments().AddQueryHint(er.setVarComment) + if err != nil { + er.err = err + return false + } + supportOptimizerHint.SetComments(newComments) + } + if er.fkChecksState != nil { + newComments := supportOptimizerHint.GetParsedComments().SetMySQLSetVarValue(sysvars.ForeignKeyChecks, FkChecksStateString(er.fkChecksState)) + supportOptimizerHint.SetComments(newComments) } - supportOptimizerHint.SetComments(newComments) } switch node := cursor.Node().(type) { @@ -307,7 +317,7 @@ func (er *astRewriter) visitSelect(node *Select) { } aliasedExpr, ok := col.(*AliasedExpr) - if !ok || !aliasedExpr.As.IsEmpty() { + if !ok || aliasedExpr.As.NotEmpty() { continue } buf := NewTrackedBuffer(nil) @@ -443,7 +453,7 @@ func (er *astRewriter) unnestSubQueries(cursor *Cursor, subquery *Subquery) { if len(sel.SelectExprs) != 1 || len(sel.OrderBy) != 0 || - len(sel.GroupBy) != 0 || + sel.GroupBy != nil || len(sel.From) != 1 || sel.Where != nil || sel.Having != nil || @@ -506,7 +516,7 @@ func (er *astRewriter) existsRewrite(cursor *Cursor, node *ExistsExpr) { return } - if len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation() { + if sel.GroupBy == nil && sel.SelectExprs.AllAggregation() { // in these situations, we are guaranteed to always get a non-empty result, // so we can replace the EXISTS with a literal true cursor.Replace(BoolVal(true)) diff --git a/go/vt/sqlparser/ast_rewriting_test.go b/go/vt/sqlparser/ast_rewriting_test.go index 2ed92201296..3ad9a5298c4 100644 --- a/go/vt/sqlparser/ast_rewriting_test.go +++ b/go/vt/sqlparser/ast_rewriting_test.go @@ -37,12 +37,12 @@ type testCaseSysVar struct { } type myTestCase struct { - in, expected string - liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool - ddlStrategy, migrationContext, sessionUUID, sessionEnableSystemSettings bool - udv int - autocommit, clientFoundRows, skipQueryPlanCache, socket, queryTimeout bool - sqlSelectLimit, transactionMode, workload, version, versionComment bool + in, expected string + liid, db, foundRows, rowCount, rawGTID, rawTimeout, sessTrackGTID bool + ddlStrategy, migrationContext, sessionUUID, sessionEnableSystemSettings bool + udv int + autocommit, foreignKeyChecks, clientFoundRows, skipQueryPlanCache, socket, queryTimeout bool + sqlSelectLimit, transactionMode, workload, version, versionComment bool } func TestRewrites(in *testing.T) { @@ -296,6 +296,7 @@ func TestRewrites(in *testing.T) { in: "SHOW VARIABLES", expected: "SHOW VARIABLES", autocommit: true, + foreignKeyChecks: true, clientFoundRows: true, skipQueryPlanCache: true, sqlSelectLimit: true, @@ -316,6 +317,7 @@ func TestRewrites(in *testing.T) { in: "SHOW GLOBAL VARIABLES", expected: "SHOW GLOBAL VARIABLES", autocommit: true, + foreignKeyChecks: true, clientFoundRows: true, skipQueryPlanCache: true, sqlSelectLimit: true, @@ -333,11 +335,11 @@ func TestRewrites(in *testing.T) { socket: true, queryTimeout: true, }} - + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST( @@ -346,11 +348,12 @@ func TestRewrites(in *testing.T) { SQLSelectLimitUnset, "", nil, + nil, &fakeViews{}, ) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) s := String(expected) @@ -362,6 +365,7 @@ func TestRewrites(in *testing.T) { assert.Equal(tc.rowCount, result.NeedsFuncResult(RowCountName), "should need row count") assert.Equal(tc.udv, len(result.NeedUserDefinedVariables), "count of user defined variables") assert.Equal(tc.autocommit, result.NeedsSysVar(sysvars.Autocommit.Name), "should need :__vtautocommit") + assert.Equal(tc.foreignKeyChecks, result.NeedsSysVar(sysvars.ForeignKeyChecks), "should need :__vtforeignKeyChecks") assert.Equal(tc.clientFoundRows, result.NeedsSysVar(sysvars.ClientFoundRows.Name), "should need :__vtclientFoundRows") assert.Equal(tc.skipQueryPlanCache, result.NeedsSysVar(sysvars.SkipQueryPlanCache.Name), "should need :__vtskipQueryPlanCache") assert.Equal(tc.sqlSelectLimit, result.NeedsSysVar(sysvars.SQLSelectLimit.Name), "should need :__vtsqlSelectLimit") @@ -388,7 +392,8 @@ func (*fakeViews) FindView(name TableName) SelectStatement { if name.Name.String() != "user_details" { return nil } - statement, err := Parse("select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id") + parser := NewTestParser() + statement, err := parser.Parse("select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id") if err != nil { return nil } @@ -430,16 +435,17 @@ func TestRewritesWithSetVarComment(in *testing.T) { setVarComment: "AA(a)", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) - result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, tc.setVarComment, nil, &fakeViews{}) + result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, tc.setVarComment, nil, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) @@ -478,16 +484,17 @@ func TestRewritesSysVar(in *testing.T) { expected: "select :__vttransaction_isolation as `@@session.transaction_isolation` from dual", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) - result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, "", tc.sysVar, &fakeViews{}) + result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, "", tc.sysVar, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) @@ -528,16 +535,17 @@ func TestRewritesWithDefaultKeyspace(in *testing.T) { expected: "SELECT 2 as `(select 2 from dual)` from DUAL", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) - result, err := RewriteAST(stmt, "sys", SQLSelectLimitUnset, "", nil, &fakeViews{}) + result, err := RewriteAST(stmt, "sys", SQLSelectLimitUnset, "", nil, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go index 97b93a80379..f01b47cbd7b 100644 --- a/go/vt/sqlparser/ast_test.go +++ b/go/vt/sqlparser/ast_test.go @@ -30,8 +30,9 @@ import ( ) func TestAppend(t *testing.T) { + parser := NewTestParser() query := "select * from t where a = 1" - tree, err := Parse(query) + tree, err := parser.Parse(query) require.NoError(t, err) var b strings.Builder Append(&b, tree) @@ -49,9 +50,10 @@ func TestAppend(t *testing.T) { } func TestSelect(t *testing.T) { - e1, err := ParseExpr("a = 1") + parser := NewTestParser() + e1, err := parser.ParseExpr("a = 1") require.NoError(t, err) - e2, err := ParseExpr("b = 2") + e2, err := parser.ParseExpr("b = 2") require.NoError(t, err) t.Run("single predicate where", func(t *testing.T) { sel := &Select{} @@ -81,7 +83,8 @@ func TestSelect(t *testing.T) { } func TestUpdate(t *testing.T) { - tree, err := Parse("update t set a = 1") + parser := NewTestParser() + tree, err := parser.Parse("update t set a = 1") require.NoError(t, err) upd, ok := tree.(*Update) @@ -103,11 +106,12 @@ func TestUpdate(t *testing.T) { } func TestRemoveHints(t *testing.T) { + parser := NewTestParser() for _, query := range []string{ "select * from t use index (i)", "select * from t force index (i)", } { - tree, err := Parse(query) + tree, err := parser.Parse(query) if err != nil { t.Fatal(err) } @@ -124,16 +128,17 @@ func TestRemoveHints(t *testing.T) { } func TestAddOrder(t *testing.T) { - src, err := Parse("select foo, bar from baz order by foo") + parser := NewTestParser() + src, err := parser.Parse("select foo, bar from baz order by foo") require.NoError(t, err) order := src.(*Select).OrderBy[0] - dst, err := Parse("select * from t") + dst, err := parser.Parse("select * from t") require.NoError(t, err) dst.(*Select).AddOrder(order) buf := NewTrackedBuffer(nil) dst.Format(buf) require.Equal(t, "select * from t order by foo asc", buf.String()) - dst, err = Parse("select * from t union select * from s") + dst, err = parser.Parse("select * from t union select * from s") require.NoError(t, err) dst.(*Union).AddOrder(order) buf = NewTrackedBuffer(nil) @@ -142,16 +147,17 @@ func TestAddOrder(t *testing.T) { } func TestSetLimit(t *testing.T) { - src, err := Parse("select foo, bar from baz limit 4") + parser := NewTestParser() + src, err := parser.Parse("select foo, bar from baz limit 4") require.NoError(t, err) limit := src.(*Select).Limit - dst, err := Parse("select * from t") + dst, err := parser.Parse("select * from t") require.NoError(t, err) dst.(*Select).SetLimit(limit) buf := NewTrackedBuffer(nil) dst.Format(buf) require.Equal(t, "select * from t limit 4", buf.String()) - dst, err = Parse("select * from t union select * from s") + dst, err = parser.Parse("select * from t union select * from s") require.NoError(t, err) dst.(*Union).SetLimit(limit) buf = NewTrackedBuffer(nil) @@ -213,8 +219,9 @@ func TestDDL(t *testing.T) { }, affected: []string{"a", "b"}, }} + parser := NewTestParser() for _, tcase := range testcases { - got, err := Parse(tcase.query) + got, err := parser.Parse(tcase.query) if err != nil { t.Fatal(err) } @@ -232,7 +239,8 @@ func TestDDL(t *testing.T) { } func TestSetAutocommitON(t *testing.T) { - stmt, err := Parse("SET autocommit=ON") + parser := NewTestParser() + stmt, err := parser.Parse("SET autocommit=ON") require.NoError(t, err) s, ok := stmt.(*Set) if !ok { @@ -257,7 +265,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement expression is not Literal: %T", e.Expr) } - stmt, err = Parse("SET @@session.autocommit=ON") + stmt, err = parser.Parse("SET @@session.autocommit=ON") require.NoError(t, err) s, ok = stmt.(*Set) if !ok { @@ -284,7 +292,8 @@ func TestSetAutocommitON(t *testing.T) { } func TestSetAutocommitOFF(t *testing.T) { - stmt, err := Parse("SET autocommit=OFF") + parser := NewTestParser() + stmt, err := parser.Parse("SET autocommit=OFF") require.NoError(t, err) s, ok := stmt.(*Set) if !ok { @@ -309,7 +318,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement expression is not Literal: %T", e.Expr) } - stmt, err = Parse("SET @@session.autocommit=OFF") + stmt, err = parser.Parse("SET @@session.autocommit=OFF") require.NoError(t, err) s, ok = stmt.(*Set) if !ok { @@ -491,9 +500,10 @@ func TestReplaceExpr(t *testing.T) { out: "case a when b then c when d then c else :a end", }} to := NewArgument("a") + parser := NewTestParser() for _, tcase := range tcases { t.Run(tcase.in, func(t *testing.T) { - tree, err := Parse(tcase.in) + tree, err := parser.Parse(tcase.in) require.NoError(t, err) var from *Subquery _ = Walk(func(node SQLNode) (kontinue bool, err error) { @@ -687,6 +697,77 @@ func TestColumns_FindColumn(t *testing.T) { } } +func TestSplitStatements(t *testing.T) { + testcases := []struct { + input string + stmts int + wantErr bool + }{ + { + input: "select * from table1; \t; \n; \n\t\t ;select * from table1;", + stmts: 2, + }, { + input: "select * from table1", + stmts: 1, + }, { + input: "select * from table1;", + stmts: 1, + }, { + input: "select * from table1; ", + stmts: 1, + }, { + input: "select * from table1; select * from table2;", + stmts: 2, + }, { + input: "create /*vt+ directive=true */ table t1 (id int); create table t2 (id int); create table t3 (id int)", + stmts: 3, + }, { + input: "create /*vt+ directive=true */ table t1 (id int); create table t2 (id int); create table t3 (id int);", + stmts: 3, + }, { + input: "select * from /* comment ; */ table1;", + stmts: 1, + }, { + input: "select * from table1 where semi = ';';", + stmts: 1, + }, { + input: "CREATE TABLE `total_data` (`id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'id', " + + "`region` varchar(32) NOT NULL COMMENT 'region name, like zh; th; kepler'," + + "`data_size` bigint NOT NULL DEFAULT '0' COMMENT 'data size;'," + + "`createtime` datetime NOT NULL DEFAULT NOW() COMMENT 'create time;'," + + "`comment` varchar(100) NOT NULL DEFAULT '' COMMENT 'comment'," + + "PRIMARY KEY (`id`))", + stmts: 1, + }, { + input: "create table t1 (id int primary key); create table t2 (id int primary key);", + stmts: 2, + }, { + input: ";;; create table t1 (id int primary key);;; ;create table t2 (id int primary key);", + stmts: 2, + }, { + input: ";create table t1 ;create table t2 (id;", + wantErr: true, + }, { + // Ignore quoted semicolon + input: ";create table t1 ';';;;create table t2 (id;", + wantErr: true, + }, + } + + parser := NewTestParser() + for _, tcase := range testcases { + t.Run(tcase.input, func(t *testing.T) { + statements, err := parser.SplitStatements(tcase.input) + if tcase.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tcase.stmts, len(statements)) + } + }) + } +} + func TestSplitStatementToPieces(t *testing.T) { testcases := []struct { input string @@ -735,16 +816,21 @@ func TestSplitStatementToPieces(t *testing.T) { // Ignore quoted semicolon input: ";create table t1 ';';;;create table t2 (id;", output: "create table t1 ';';create table t2 (id", + }, { + // Ignore quoted semicolon + input: "stop replica; start replica", + output: "stop replica; start replica", }, } + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - stmtPieces, err := SplitStatementToPieces(tcase.input) + stmtPieces, err := parser.SplitStatementToPieces(tcase.input) require.NoError(t, err) out := strings.Join(stmtPieces, ";") @@ -766,13 +852,15 @@ func TestDefaultStatus(t *testing.T) { } func TestShowTableStatus(t *testing.T) { + parser := NewTestParser() query := "Show Table Status FROM customer" - tree, err := Parse(query) + tree, err := parser.Parse(query) require.NoError(t, err) require.NotNil(t, tree) } func BenchmarkStringTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -782,7 +870,7 @@ func BenchmarkStringTraces(b *testing.B) { parsed := make([]Statement, 0, len(queries)) for _, q := range queries { - pp, err := Parse(q) + pp, err := parser.Parse(q) if err != nil { b.Fatal(err) } diff --git a/go/vt/sqlparser/ast_visit.go b/go/vt/sqlparser/ast_visit.go index a88d689f102..07013a3f2d8 100644 --- a/go/vt/sqlparser/ast_visit.go +++ b/go/vt/sqlparser/ast_visit.go @@ -204,8 +204,8 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfGeomFromWKBExpr(in, f) case *GeomPropertyFuncExpr: return VisitRefOfGeomPropertyFuncExpr(in, f) - case GroupBy: - return VisitGroupBy(in, f) + case *GroupBy: + return VisitRefOfGroupBy(in, f) case *GroupConcatExpr: return VisitRefOfGroupConcatExpr(in, f) case IdentifierCI: @@ -422,6 +422,8 @@ func VisitSQLNode(in SQLNode, f Visit) error { return VisitRefOfRollback(in, f) case RootNode: return VisitRootNode(in, f) + case *RowAlias: + return VisitRefOfRowAlias(in, f) case *SRollback: return VisitRefOfSRollback(in, f) case *Savepoint: @@ -884,6 +886,9 @@ func VisitRefOfAvg(in *Avg, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfBegin(in *Begin, f Visit) error { @@ -938,6 +943,9 @@ func VisitRefOfBitAnd(in *BitAnd, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfBitOr(in *BitOr, f Visit) error { @@ -950,6 +958,9 @@ func VisitRefOfBitOr(in *BitOr, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfBitXor(in *BitXor, f Visit) error { @@ -962,6 +973,9 @@ func VisitRefOfBitXor(in *BitXor, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfCallProc(in *CallProc, f Visit) error { @@ -1105,12 +1119,6 @@ func VisitRefOfColumnType(in *ColumnType, f Visit) error { if cont, err := f(in); err != nil || !cont { return err } - if err := VisitRefOfLiteral(in.Length, f); err != nil { - return err - } - if err := VisitRefOfLiteral(in.Scale, f); err != nil { - return err - } return nil } func VisitColumns(in Columns, f Visit) error { @@ -1218,12 +1226,6 @@ func VisitRefOfConvertType(in *ConvertType, f Visit) error { if cont, err := f(in); err != nil || !cont { return err } - if err := VisitRefOfLiteral(in.Length, f); err != nil { - return err - } - if err := VisitRefOfLiteral(in.Scale, f); err != nil { - return err - } return nil } func VisitRefOfConvertUsingExpr(in *ConvertUsingExpr, f Visit) error { @@ -1248,6 +1250,9 @@ func VisitRefOfCount(in *Count, f Visit) error { if err := VisitExprs(in.Args, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfCountStar(in *CountStar, f Visit) error { @@ -1257,6 +1262,9 @@ func VisitRefOfCountStar(in *CountStar, f Visit) error { if cont, err := f(in); err != nil || !cont { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfCreateDatabase(in *CreateDatabase, f Visit) error { @@ -1377,10 +1385,12 @@ func VisitRefOfDelete(in *Delete, f Visit) error { if err := VisitRefOfParsedComments(in.Comments, f); err != nil { return err } - if err := VisitTableNames(in.Targets, f); err != nil { - return err + for _, el := range in.TableExprs { + if err := VisitTableExpr(el, f); err != nil { + return err + } } - if err := VisitTableExprs(in.TableExprs, f); err != nil { + if err := VisitTableNames(in.Targets, f); err != nil { return err } if err := VisitPartitions(in.Partitions, f); err != nil { @@ -1684,7 +1694,7 @@ func VisitRefOfFuncExpr(in *FuncExpr, f Visit) error { if err := VisitIdentifierCI(in.Name, f); err != nil { return err } - if err := VisitSelectExprs(in.Exprs, f); err != nil { + if err := VisitExprs(in.Exprs, f); err != nil { return err } return nil @@ -1872,14 +1882,14 @@ func VisitRefOfGeomPropertyFuncExpr(in *GeomPropertyFuncExpr, f Visit) error { } return nil } -func VisitGroupBy(in GroupBy, f Visit) error { +func VisitRefOfGroupBy(in *GroupBy, f Visit) error { if in == nil { return nil } if cont, err := f(in); err != nil || !cont { return err } - for _, el := range in { + for _, el := range in.Exprs { if err := VisitExpr(el, f); err != nil { return err } @@ -1993,6 +2003,9 @@ func VisitRefOfInsert(in *Insert, f Visit) error { if err := VisitInsertRows(in.Rows, f); err != nil { return err } + if err := VisitRefOfRowAlias(in.RowAlias, f); err != nil { + return err + } if err := VisitOnDup(in.OnDup, f); err != nil { return err } @@ -2660,6 +2673,9 @@ func VisitRefOfMax(in *Max, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfMemberOfExpr(in *MemberOfExpr, f Visit) error { @@ -2687,6 +2703,9 @@ func VisitRefOfMin(in *Min, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfModifyColumn(in *ModifyColumn, f Visit) error { @@ -3415,6 +3434,21 @@ func VisitRootNode(in RootNode, f Visit) error { } return nil } +func VisitRefOfRowAlias(in *RowAlias, f Visit) error { + if in == nil { + return nil + } + if cont, err := f(in); err != nil || !cont { + return err + } + if err := VisitIdentifierCS(in.TableName, f); err != nil { + return err + } + if err := VisitColumns(in.Columns, f); err != nil { + return err + } + return nil +} func VisitRefOfSRollback(in *SRollback, f Visit) error { if in == nil { return nil @@ -3463,7 +3497,7 @@ func VisitRefOfSelect(in *Select, f Visit) error { if err := VisitRefOfWhere(in.Where, f); err != nil { return err } - if err := VisitGroupBy(in.GroupBy, f); err != nil { + if err := VisitRefOfGroupBy(in.GroupBy, f); err != nil { return err } if err := VisitRefOfWhere(in.Having, f); err != nil { @@ -3665,6 +3699,9 @@ func VisitRefOfStd(in *Std, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfStdDev(in *StdDev, f Visit) error { @@ -3677,6 +3714,9 @@ func VisitRefOfStdDev(in *StdDev, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfStdPop(in *StdPop, f Visit) error { @@ -3689,6 +3729,9 @@ func VisitRefOfStdPop(in *StdPop, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfStdSamp(in *StdSamp, f Visit) error { @@ -3701,6 +3744,9 @@ func VisitRefOfStdSamp(in *StdSamp, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfStream(in *Stream, f Visit) error { @@ -3826,6 +3872,9 @@ func VisitRefOfSum(in *Sum, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitTableExprs(in TableExprs, f Visit) error { @@ -4014,8 +4063,10 @@ func VisitRefOfUpdate(in *Update, f Visit) error { if err := VisitRefOfParsedComments(in.Comments, f); err != nil { return err } - if err := VisitTableExprs(in.TableExprs, f); err != nil { - return err + for _, el := range in.TableExprs { + if err := VisitTableExpr(el, f); err != nil { + return err + } } if err := VisitUpdateExprs(in.Exprs, f); err != nil { return err @@ -4188,6 +4239,9 @@ func VisitRefOfVarPop(in *VarPop, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfVarSamp(in *VarSamp, f Visit) error { @@ -4200,6 +4254,9 @@ func VisitRefOfVarSamp(in *VarSamp, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitRefOfVariable(in *Variable, f Visit) error { @@ -4224,6 +4281,9 @@ func VisitRefOfVariance(in *Variance, f Visit) error { if err := VisitExpr(in.Arg, f); err != nil { return err } + if err := VisitRefOfOverClause(in.OverClause, f); err != nil { + return err + } return nil } func VisitVindexParam(in VindexParam, f Visit) error { diff --git a/go/vt/sqlparser/cached_size.go b/go/vt/sqlparser/cached_size.go index d86b8a21155..94e004e5e5b 100644 --- a/go/vt/sqlparser/cached_size.go +++ b/go/vt/sqlparser/cached_size.go @@ -351,7 +351,7 @@ func (cached *Argument) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field Name string size += hack.RuntimeAllocSize(int64(len(cached.Name))) @@ -407,12 +407,14 @@ func (cached *Avg) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *Begin) CachedSize(alloc bool) int64 { @@ -506,12 +508,14 @@ func (cached *BitAnd) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *BitOr) CachedSize(alloc bool) int64 { @@ -520,12 +524,14 @@ func (cached *BitOr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *BitXor) CachedSize(alloc bool) int64 { @@ -534,12 +540,14 @@ func (cached *BitXor) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *CallProc) CachedSize(alloc bool) int64 { @@ -723,10 +731,10 @@ func (cached *ColumnType) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.Type))) // field Options *vitess.io/vitess/go/vt/sqlparser.ColumnTypeOptions size += cached.Options.CachedSize(true) - // field Length *vitess.io/vitess/go/vt/sqlparser.Literal - size += cached.Length.CachedSize(true) - // field Scale *vitess.io/vitess/go/vt/sqlparser.Literal - size += cached.Scale.CachedSize(true) + // field Length *int + size += hack.RuntimeAllocSize(int64(8)) + // field Scale *int + size += hack.RuntimeAllocSize(int64(8)) // field Charset vitess.io/vitess/go/vt/sqlparser.ColumnCharset size += cached.Charset.CachedSize(false) // field EnumValues []string @@ -905,10 +913,10 @@ func (cached *ConvertType) CachedSize(alloc bool) int64 { } // field Type string size += hack.RuntimeAllocSize(int64(len(cached.Type))) - // field Length *vitess.io/vitess/go/vt/sqlparser.Literal - size += cached.Length.CachedSize(true) - // field Scale *vitess.io/vitess/go/vt/sqlparser.Literal - size += cached.Scale.CachedSize(true) + // field Length *int + size += hack.RuntimeAllocSize(int64(8)) + // field Scale *int + size += hack.RuntimeAllocSize(int64(8)) // field Charset vitess.io/vitess/go/vt/sqlparser.ColumnCharset size += cached.Charset.CachedSize(false) return size @@ -935,7 +943,7 @@ func (cached *Count) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(32) + size += int64(48) } // field Args vitess.io/vitess/go/vt/sqlparser.Exprs { @@ -946,6 +954,8 @@ func (cached *Count) CachedSize(alloc bool) int64 { } } } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *CountStar) CachedSize(alloc bool) int64 { @@ -954,8 +964,10 @@ func (cached *CountStar) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(8) + size += int64(16) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *CreateDatabase) CachedSize(alloc bool) int64 { @@ -1106,14 +1118,7 @@ func (cached *Delete) CachedSize(alloc bool) int64 { size += cached.With.CachedSize(true) // field Comments *vitess.io/vitess/go/vt/sqlparser.ParsedComments size += cached.Comments.CachedSize(true) - // field Targets vitess.io/vitess/go/vt/sqlparser.TableNames - { - size += hack.RuntimeAllocSize(int64(cap(cached.Targets)) * int64(32)) - for _, elem := range cached.Targets { - size += elem.CachedSize(false) - } - } - // field TableExprs vitess.io/vitess/go/vt/sqlparser.TableExprs + // field TableExprs []vitess.io/vitess/go/vt/sqlparser.TableExpr { size += hack.RuntimeAllocSize(int64(cap(cached.TableExprs)) * int64(16)) for _, elem := range cached.TableExprs { @@ -1122,6 +1127,13 @@ func (cached *Delete) CachedSize(alloc bool) int64 { } } } + // field Targets vitess.io/vitess/go/vt/sqlparser.TableNames + { + size += hack.RuntimeAllocSize(int64(cap(cached.Targets)) * int64(32)) + for _, elem := range cached.Targets { + size += elem.CachedSize(false) + } + } // field Partitions vitess.io/vitess/go/vt/sqlparser.Partitions { size += hack.RuntimeAllocSize(int64(cap(cached.Partitions)) * int64(32)) @@ -1432,7 +1444,7 @@ func (cached *FuncExpr) CachedSize(alloc bool) int64 { size += cached.Qualifier.CachedSize(false) // field Name vitess.io/vitess/go/vt/sqlparser.IdentifierCI size += cached.Name.CachedSize(false) - // field Exprs vitess.io/vitess/go/vt/sqlparser.SelectExprs + // field Exprs vitess.io/vitess/go/vt/sqlparser.Exprs { size += hack.RuntimeAllocSize(int64(cap(cached.Exprs)) * int64(16)) for _, elem := range cached.Exprs { @@ -1665,6 +1677,25 @@ func (cached *GeomPropertyFuncExpr) CachedSize(alloc bool) int64 { } return size } +func (cached *GroupBy) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Exprs []vitess.io/vitess/go/vt/sqlparser.Expr + { + size += hack.RuntimeAllocSize(int64(cap(cached.Exprs)) * int64(16)) + for _, elem := range cached.Exprs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} func (cached *GroupConcatExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1731,8 +1762,8 @@ func (cached *IndexColumn) CachedSize(alloc bool) int64 { } // field Column vitess.io/vitess/go/vt/sqlparser.IdentifierCI size += cached.Column.CachedSize(false) - // field Length *vitess.io/vitess/go/vt/sqlparser.Literal - size += cached.Length.CachedSize(true) + // field Length *int + size += hack.RuntimeAllocSize(int64(8)) // field Expression vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Expression.(cachedObject); ok { size += cc.CachedSize(true) @@ -1842,6 +1873,8 @@ func (cached *Insert) CachedSize(alloc bool) int64 { if cc, ok := cached.Rows.(cachedObject); ok { size += cc.CachedSize(true) } + // field RowAlias *vitess.io/vitess/go/vt/sqlparser.RowAlias + size += cached.RowAlias.CachedSize(true) // field OnDup vitess.io/vitess/go/vt/sqlparser.OnDup { size += hack.RuntimeAllocSize(int64(cap(cached.OnDup)) * int64(8)) @@ -2727,12 +2760,14 @@ func (cached *Max) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *MemberOfExpr) CachedSize(alloc bool) int64 { @@ -2759,12 +2794,14 @@ func (cached *Min) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *ModifyColumn) CachedSize(alloc bool) int64 { @@ -3064,12 +3101,24 @@ func (cached *ParsedQuery) CachedSize(alloc bool) int64 { } // field Query string size += hack.RuntimeAllocSize(int64(len(cached.Query))) - // field bindLocations []vitess.io/vitess/go/vt/sqlparser.bindLocation + // field bindLocations []vitess.io/vitess/go/vt/sqlparser.BindLocation { size += hack.RuntimeAllocSize(int64(cap(cached.bindLocations)) * int64(16)) } return size } +func (cached *Parser) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field version string + size += hack.RuntimeAllocSize(int64(len(cached.version))) + return size +} func (cached *PartitionDefinition) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -3561,6 +3610,25 @@ func (cached *RevertMigration) CachedSize(alloc bool) int64 { size += cached.Comments.CachedSize(true) return size } +func (cached *RowAlias) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field TableName vitess.io/vitess/go/vt/sqlparser.IdentifierCS + size += cached.TableName.CachedSize(false) + // field Columns vitess.io/vitess/go/vt/sqlparser.Columns + { + size += hack.RuntimeAllocSize(int64(cap(cached.Columns)) * int64(32)) + for _, elem := range cached.Columns { + size += elem.CachedSize(false) + } + } + return size +} func (cached *SRollback) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -3591,7 +3659,7 @@ func (cached *Select) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(192) + size += int64(176) } // field Cache *bool size += hack.RuntimeAllocSize(int64(1)) @@ -3619,15 +3687,8 @@ func (cached *Select) CachedSize(alloc bool) int64 { } // field Where *vitess.io/vitess/go/vt/sqlparser.Where size += cached.Where.CachedSize(true) - // field GroupBy vitess.io/vitess/go/vt/sqlparser.GroupBy - { - size += hack.RuntimeAllocSize(int64(cap(cached.GroupBy)) * int64(16)) - for _, elem := range cached.GroupBy { - if cc, ok := elem.(cachedObject); ok { - size += cc.CachedSize(true) - } - } - } + // field GroupBy *vitess.io/vitess/go/vt/sqlparser.GroupBy + size += cached.GroupBy.CachedSize(true) // field Having *vitess.io/vitess/go/vt/sqlparser.Where size += cached.Having.CachedSize(true) // field Windows vitess.io/vitess/go/vt/sqlparser.NamedWindows @@ -3843,12 +3904,14 @@ func (cached *Std) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *StdDev) CachedSize(alloc bool) int64 { @@ -3857,12 +3920,14 @@ func (cached *StdDev) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *StdPop) CachedSize(alloc bool) int64 { @@ -3871,12 +3936,14 @@ func (cached *StdPop) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *StdSamp) CachedSize(alloc bool) int64 { @@ -3885,12 +3952,14 @@ func (cached *StdSamp) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *Stream) CachedSize(alloc bool) int64 { @@ -4012,12 +4081,14 @@ func (cached *Sum) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *TableAndLockType) CachedSize(alloc bool) int64 { @@ -4226,7 +4297,7 @@ func (cached *Update) CachedSize(alloc bool) int64 { size += cached.With.CachedSize(true) // field Comments *vitess.io/vitess/go/vt/sqlparser.ParsedComments size += cached.Comments.CachedSize(true) - // field TableExprs vitess.io/vitess/go/vt/sqlparser.TableExprs + // field TableExprs []vitess.io/vitess/go/vt/sqlparser.TableExpr { size += hack.RuntimeAllocSize(int64(cap(cached.TableExprs)) * int64(16)) for _, elem := range cached.TableExprs { @@ -4371,12 +4442,14 @@ func (cached *VarPop) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *VarSamp) CachedSize(alloc bool) int64 { @@ -4385,12 +4458,14 @@ func (cached *VarSamp) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *Variable) CachedSize(alloc bool) int64 { @@ -4411,12 +4486,14 @@ func (cached *Variance) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field Arg vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Arg.(cachedObject); ok { size += cc.CachedSize(true) } + // field OverClause *vitess.io/vitess/go/vt/sqlparser.OverClause + size += cached.OverClause.CachedSize(true) return size } func (cached *VindexParam) CachedSize(alloc bool) int64 { diff --git a/go/vt/sqlparser/comments.go b/go/vt/sqlparser/comments.go index 84b73f8e81c..780f1e67594 100644 --- a/go/vt/sqlparser/comments.go +++ b/go/vt/sqlparser/comments.go @@ -17,11 +17,13 @@ limitations under the License. package sqlparser import ( + "fmt" "strconv" "strings" "unicode" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sysvars" "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" @@ -60,6 +62,9 @@ const ( // MaxPriorityValue specifies the maximum value allowed for the priority query directive. Valid priority values are // between zero and MaxPriorityValue. MaxPriorityValue = 100 + + // OptimizerHintSetVar is the optimizer hint used in MySQL to set the value of a specific session variable for a query. + OptimizerHintSetVar = "SET_VAR" ) var ErrInvalidPriority = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Invalid priority value specified in query") @@ -266,6 +271,206 @@ func (c *ParsedComments) Directives() *CommentDirectives { return c._directives } +// GetMySQLSetVarValue gets the value of the given variable if it is part of a /*+ SET_VAR() */ MySQL optimizer hint. +func (c *ParsedComments) GetMySQLSetVarValue(key string) string { + if c == nil { + // If we have no parsed comments, then we return an empty string. + return "" + } + for _, commentStr := range c.comments { + // Skip all the comments that don't start with the query optimizer prefix. + if commentStr[0:3] != queryOptimizerPrefix { + continue + } + + pos := 4 + for pos < len(commentStr) { + // Go over the entire comment and extract an optimizer hint. + // We get back the final position of the cursor, along with the start and end of + // the optimizer hint name and content. + finalPos, ohNameStart, ohNameEnd, ohContentStart, ohContentEnd := getOptimizerHint(pos, commentStr) + pos = finalPos + 1 + // If we didn't find an optimizer hint or if it was malformed, we skip it. + if ohContentEnd == -1 { + break + } + // Construct the name and the content from the starts and ends. + ohName := commentStr[ohNameStart:ohNameEnd] + ohContent := commentStr[ohContentStart:ohContentEnd] + // Check if the optimizer hint name matches `SET_VAR`. + if strings.EqualFold(strings.TrimSpace(ohName), OptimizerHintSetVar) { + // If it does, then we cut the string at the first occurrence of "=". + // That gives us the name of the variable, and the value that it is being set to. + // If the variable matches what we are looking for, we return its value. + setVarName, setVarValue, isValid := strings.Cut(ohContent, "=") + if !isValid { + continue + } + if strings.EqualFold(strings.TrimSpace(setVarName), key) { + return strings.TrimSpace(setVarValue) + } + } + } + + // MySQL only parses the first comment that has the optimizer hint prefix. The following ones are ignored. + return "" + } + return "" +} + +// SetMySQLSetVarValue updates or sets the value of the given variable as part of a /*+ SET_VAR() */ MySQL optimizer hint. +func (c *ParsedComments) SetMySQLSetVarValue(key string, value string) (newComments Comments) { + if c == nil { + // If we have no parsed comments, then we create a new one with the required optimizer hint and return it. + newComments = append(newComments, fmt.Sprintf("/*+ %v(%v=%v) */", OptimizerHintSetVar, key, value)) + return + } + seenFirstOhComment := false + for _, commentStr := range c.comments { + // Skip all the comments that don't start with the query optimizer prefix. + // Also, since MySQL only parses the first comment that has the optimizer hint prefix and ignores the following ones, + // we skip over all the comments that come after we have seen the first comment with the optimizer hint. + if seenFirstOhComment || commentStr[0:3] != queryOptimizerPrefix { + newComments = append(newComments, commentStr) + continue + } + + seenFirstOhComment = true + finalComment := "/*+" + keyPresent := false + pos := 4 + for pos < len(commentStr) { + // Go over the entire comment and extract an optimizer hint. + // We get back the final position of the cursor, along with the start and end of + // the optimizer hint name and content. + finalPos, ohNameStart, ohNameEnd, ohContentStart, ohContentEnd := getOptimizerHint(pos, commentStr) + pos = finalPos + 1 + // If we didn't find an optimizer hint or if it was malformed, we skip it. + if ohContentEnd == -1 { + break + } + // Construct the name and the content from the starts and ends. + ohName := commentStr[ohNameStart:ohNameEnd] + ohContent := commentStr[ohContentStart:ohContentEnd] + // Check if the optimizer hint name matches `SET_VAR`. + if strings.EqualFold(strings.TrimSpace(ohName), OptimizerHintSetVar) { + // If it does, then we cut the string at the first occurrence of "=". + // That gives us the name of the variable, and the value that it is being set to. + // If the variable matches what we are looking for, we can change its value. + // Otherwise we add the comment as is to our final comments and move on. + setVarName, _, isValid := strings.Cut(ohContent, "=") + if !isValid || !strings.EqualFold(strings.TrimSpace(setVarName), key) { + finalComment += fmt.Sprintf(" %v(%v)", ohName, ohContent) + continue + } + if strings.EqualFold(strings.TrimSpace(setVarName), key) { + keyPresent = true + finalComment += fmt.Sprintf(" %v(%v=%v)", ohName, strings.TrimSpace(setVarName), value) + } + } else { + // If it doesn't match, we add it to our final comment and move on. + finalComment += fmt.Sprintf(" %v(%v)", ohName, ohContent) + } + } + // If we haven't found any SET_VAR optimizer hint with the matching variable, + // then we add a new optimizer hint to introduce this variable. + if !keyPresent { + finalComment += fmt.Sprintf(" %v(%v=%v)", OptimizerHintSetVar, key, value) + } + + finalComment += " */" + newComments = append(newComments, finalComment) + } + // If we have not seen even a single comment that has the optimizer hint prefix, + // then we add a new optimizer hint to introduce this variable. + if !seenFirstOhComment { + newComments = append(newComments, fmt.Sprintf("/*+ %v(%v=%v) */", OptimizerHintSetVar, key, value)) + } + return newComments +} + +// getOptimizerHint goes over the comment string from the given initial position. +// It returns back the final position of the cursor, along with the start and end of +// the optimizer hint name and content. +func getOptimizerHint(initialPos int, commentStr string) (pos int, ohNameStart int, ohNameEnd int, ohContentStart int, ohContentEnd int) { + ohContentEnd = -1 + // skip spaces as they aren't interesting. + pos = skipBlanks(initialPos, commentStr) + ohNameStart = pos + pos++ + // All characters until we get a space of a opening bracket are part of the optimizer hint name. + for pos < len(commentStr) { + if commentStr[pos] == ' ' || commentStr[pos] == '(' { + break + } + pos++ + } + // Mark the end of the optimizer hint name and skip spaces. + ohNameEnd = pos + pos = skipBlanks(pos, commentStr) + // Verify that the comment is not malformed. If it doesn't contain an opening bracket + // at the current position, then something is wrong. + if pos >= len(commentStr) || commentStr[pos] != '(' { + return + } + // Seeing the opening bracket, marks the start of the optimizer hint content. + // We skip over the comment until we see the end of the parenthesis. + pos++ + ohContentStart = pos + pos = skipUntilParenthesisEnd(pos, commentStr) + ohContentEnd = pos + return +} + +// skipUntilParenthesisEnd reads the comment string given the initial position and skips over until +// it has seen the end of opening bracket. +func skipUntilParenthesisEnd(pos int, commentStr string) int { + for pos < len(commentStr) { + switch commentStr[pos] { + case ')': + // If we see a closing bracket, we have found the ending of our parenthesis. + return pos + case '\'': + // If we see a single quote character, then it signifies the start of a new string. + // We wait until we see the end of this string. + pos++ + pos = skipUntilCharacter(pos, commentStr, '\'') + case '"': + // If we see a double quote character, then it signifies the start of a new string. + // We wait until we see the end of this string. + pos++ + pos = skipUntilCharacter(pos, commentStr, '"') + } + pos++ + } + + return pos +} + +// skipUntilCharacter skips until the given character has been seen in the comment string, given the starting position. +func skipUntilCharacter(pos int, commentStr string, ch byte) int { + for pos < len(commentStr) { + if commentStr[pos] != ch { + pos++ + continue + } + break + } + return pos +} + +// skipBlanks skips over space characters from the comment string, given the starting position. +func skipBlanks(pos int, commentStr string) int { + for pos < len(commentStr) { + if commentStr[pos] == ' ' { + pos++ + continue + } + break + } + return pos +} + func (c *ParsedComments) Length() int { if c == nil { return 0 @@ -349,6 +554,27 @@ func AllowScatterDirective(stmt Statement) bool { return checkDirective(stmt, DirectiveAllowScatter) } +// ForeignKeyChecksState returns the state of foreign_key_checks variable if it is part of a SET_VAR optimizer hint in the comments. +func ForeignKeyChecksState(stmt Statement) *bool { + cmt, ok := stmt.(Commented) + if ok { + fkChecksVal := cmt.GetParsedComments().GetMySQLSetVarValue(sysvars.ForeignKeyChecks) + // If the value of the `foreign_key_checks` optimizer hint is something that doesn't make sense, + // then MySQL just ignores it and treats it like the case, where it is unspecified. We are choosing + // to have the same behaviour here. If the value doesn't match any of the acceptable values, we return nil, + // that signifies that no value was specified. + switch strings.ToLower(fkChecksVal) { + case "on", "1": + fkState := true + return &fkState + case "off", "0": + fkState := false + return &fkState + } + } + return nil +} + func checkDirective(stmt Statement, key string) bool { cmt, ok := stmt.(Commented) if ok { diff --git a/go/vt/sqlparser/comments_test.go b/go/vt/sqlparser/comments_test.go index b3c1bf9fec8..42d02e35652 100644 --- a/go/vt/sqlparser/comments_test.go +++ b/go/vt/sqlparser/comments_test.go @@ -18,12 +18,12 @@ package sqlparser import ( "fmt" - "reflect" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/sysvars" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -142,15 +142,9 @@ func TestSplitComments(t *testing.T) { gotSQL, gotComments := SplitMarginComments(testCase.input) gotLeadingComments, gotTrailingComments := gotComments.Leading, gotComments.Trailing - if gotSQL != testCase.outSQL { - t.Errorf("test input: '%s', got SQL\n%+v, want\n%+v", testCase.input, gotSQL, testCase.outSQL) - } - if gotLeadingComments != testCase.outLeadingComments { - t.Errorf("test input: '%s', got LeadingComments\n%+v, want\n%+v", testCase.input, gotLeadingComments, testCase.outLeadingComments) - } - if gotTrailingComments != testCase.outTrailingComments { - t.Errorf("test input: '%s', got TrailingComments\n%+v, want\n%+v", testCase.input, gotTrailingComments, testCase.outTrailingComments) - } + assert.Equal(t, testCase.outSQL, gotSQL, "SQL mismatch") + assert.Equal(t, testCase.outLeadingComments, gotLeadingComments, "LeadingComments mismatch") + assert.Equal(t, testCase.outTrailingComments, gotTrailingComments, "TrailingCommints mismatch") }) } } @@ -224,10 +218,7 @@ a`, }} for _, testCase := range testCases { gotSQL := StripLeadingComments(testCase.input) - - if gotSQL != testCase.outSQL { - t.Errorf("test input: '%s', got SQL\n%+v, want\n%+v", testCase.input, gotSQL, testCase.outSQL) - } + assert.Equal(t, testCase.outSQL, gotSQL) } } @@ -253,10 +244,8 @@ func TestExtractMysqlComment(t *testing.T) { }} for _, testCase := range testCases { gotVersion, gotSQL := ExtractMysqlComment(testCase.input) + assert.Equal(t, testCase.outVersion, gotVersion, "version mismatch") - if gotVersion != testCase.outVersion { - t.Errorf("test input: '%s', got version\n%+v, want\n%+v", testCase.input, gotVersion, testCase.outVersion) - } if gotSQL != testCase.outSQL { t.Errorf("test input: '%s', got SQL\n%+v, want\n%+v", testCase.input, gotSQL, testCase.outSQL) } @@ -321,6 +310,7 @@ func TestExtractCommentDirectives(t *testing.T) { }, }} + parser := NewTestParser() for _, testCase := range testCases { t.Run(testCase.input, func(t *testing.T) { sqls := []string{ @@ -338,7 +328,7 @@ func TestExtractCommentDirectives(t *testing.T) { for _, sql := range sqls { t.Run(sql, func(t *testing.T) { var comments *ParsedComments - stmt, _ := Parse(sql) + stmt, _ := parser.Parse(sql) switch s := stmt.(type) { case *Select: comments = s.Comments @@ -367,9 +357,8 @@ func TestExtractCommentDirectives(t *testing.T) { require.Nil(t, vals) return } - if !reflect.DeepEqual(vals.m, testCase.vals) { - t.Errorf("test input: '%v', got vals %T:\n%+v, want %T\n%+v", testCase.input, vals, vals, testCase.vals, testCase.vals) - } + + assert.Equal(t, testCase.vals, vals.m) }) } }) @@ -393,19 +382,20 @@ func TestExtractCommentDirectives(t *testing.T) { } func TestSkipQueryPlanCacheDirective(t *testing.T) { - stmt, _ := Parse("insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)") + parser := NewTestParser() + stmt, _ := parser.Parse("insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("insert into user(id) values (1), (2)") + stmt, _ = parser.Parse("insert into user(id) values (1), (2)") assert.True(t, CachePlan(stmt)) - stmt, _ = Parse("update /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ users set name=1") + stmt, _ = parser.Parse("update /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ users set name=1") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from users") + stmt, _ = parser.Parse("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from users") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("delete /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ from users") + stmt, _ = parser.Parse("delete /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ from users") assert.False(t, CachePlan(stmt)) } @@ -426,9 +416,10 @@ func TestIgnoreMaxPayloadSizeDirective(t *testing.T) { {"show create table users", false}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := IgnoreMaxPayloadSizeDirective(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("IgnoreMaxPayloadSizeDirective(stmt) returned %v but expected %v", got, test.expected)) }) @@ -452,9 +443,10 @@ func TestIgnoreMaxMaxMemoryRowsDirective(t *testing.T) { {"show create table users", false}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := IgnoreMaxMaxMemoryRowsDirective(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("IgnoreMaxPayloadSizeDirective(stmt) returned %v but expected %v", got, test.expected)) }) @@ -478,9 +470,10 @@ func TestConsolidator(t *testing.T) { {"select /*vt+ CONSOLIDATOR=enabled_replicas */ * from users", querypb.ExecuteOptions_CONSOLIDATOR_ENABLED_REPLICAS}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := Consolidator(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("Consolidator(stmt) returned %v but expected %v", got, test.expected)) }) @@ -535,19 +528,136 @@ func TestGetPriorityFromStatement(t *testing.T) { }, } + parser := NewTestParser() for _, testCase := range testCases { - theThestCase := testCase - t.Run(theThestCase.query, func(t *testing.T) { + t.Run(testCase.query, func(t *testing.T) { t.Parallel() - stmt, err := Parse(theThestCase.query) + stmt, err := parser.Parse(testCase.query) assert.NoError(t, err) actualPriority, actualError := GetPriorityFromStatement(stmt) - if theThestCase.expectedError != nil { - assert.ErrorIs(t, actualError, theThestCase.expectedError) + if testCase.expectedError != nil { + assert.ErrorIs(t, actualError, testCase.expectedError) } else { assert.NoError(t, err) - assert.Equal(t, theThestCase.expectedPriority, actualPriority) + assert.Equal(t, testCase.expectedPriority, actualPriority) + } + }) + } +} + +// TestGetMySQLSetVarValue tests the functionality of GetMySQLSetVarValue +func TestGetMySQLSetVarValue(t *testing.T) { + tests := []struct { + name string + comments []string + valToFind string + want string + }{ + { + name: "SET_VAR clause in the middle", + comments: []string{"/*+ NO_RANGE_OPTIMIZATION(t3 PRIMARY, f2_idx) SET_VAR(foreign_key_checks=OFF) NO_ICP(t1, t2) */"}, + valToFind: sysvars.ForeignKeyChecks, + want: "OFF", + }, + { + name: "Single SET_VAR clause", + comments: []string{"/*+ SET_VAR(sort_buffer_size = 16M) */"}, + valToFind: "sort_buffer_size", + want: "16M", + }, + { + name: "No comments", + comments: nil, + valToFind: "sort_buffer_size", + want: "", + }, + { + name: "Multiple SET_VAR clauses", + comments: []string{"/*+ SET_VAR(sort_buffer_size = 16M) */", "/*+ SET_VAR(optimizer_switch = 'mrr_cost_b(ased=of\"f') */", "/*+ SET_VAR( foReiGn_key_checks = On) */"}, + valToFind: sysvars.ForeignKeyChecks, + want: "", + }, + { + name: "Verify casing", + comments: []string{"/*+ SET_VAR(optimizer_switch = 'mrr_cost_b(ased=of\"f') SET_VAR( foReiGn_key_checks = On) */"}, + valToFind: sysvars.ForeignKeyChecks, + want: "On", + }, + { + name: "Leading comment is a normal comment", + comments: []string{"/* This is a normal comment */", "/*+ MAX_EXECUTION_TIME(1000) SET_VAR( foreign_key_checks = 1) */"}, + valToFind: sysvars.ForeignKeyChecks, + want: "1", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ParsedComments{ + comments: tt.comments, + } + assert.Equal(t, tt.want, c.GetMySQLSetVarValue(tt.valToFind)) + }) + } +} + +func TestSetMySQLSetVarValue(t *testing.T) { + tests := []struct { + name string + comments []string + key string + value string + commentsWanted Comments + }{ + { + name: "SET_VAR clause in the middle", + comments: []string{"/*+ NO_RANGE_OPTIMIZATION(t3 PRIMARY, f2_idx) SET_VAR(foreign_key_checks=OFF) NO_ICP(t1, t2) */"}, + key: sysvars.ForeignKeyChecks, + value: "On", + commentsWanted: []string{"/*+ NO_RANGE_OPTIMIZATION(t3 PRIMARY, f2_idx) SET_VAR(foreign_key_checks=On) NO_ICP(t1, t2) */"}, + }, + { + name: "Single SET_VAR clause", + comments: []string{"/*+ SET_VAR(sort_buffer_size = 16M) */"}, + key: "sort_buffer_size", + value: "1Mb", + commentsWanted: []string{"/*+ SET_VAR(sort_buffer_size=1Mb) */"}, + }, + { + name: "No comments", + comments: nil, + key: "sort_buffer_size", + value: "13M", + commentsWanted: []string{"/*+ SET_VAR(sort_buffer_size=13M) */"}, + }, + { + name: "Multiple SET_VAR clauses", + comments: []string{"/*+ SET_VAR(sort_buffer_size = 16M) */", "/*+ SET_VAR(optimizer_switch = 'mrr_cost_b(ased=of\"f') */", "/*+ SET_VAR( foReiGn_key_checks = On) */"}, + key: sysvars.ForeignKeyChecks, + value: "1", + commentsWanted: []string{"/*+ SET_VAR(sort_buffer_size = 16M) SET_VAR(foreign_key_checks=1) */", "/*+ SET_VAR(optimizer_switch = 'mrr_cost_b(ased=of\"f') */", "/*+ SET_VAR( foReiGn_key_checks = On) */"}, + }, + { + name: "Verify casing", + comments: []string{"/*+ SET_VAR(optimizer_switch = 'mrr_cost_b(ased=of\"f') SET_VAR( foReiGn_key_checks = On) */"}, + key: sysvars.ForeignKeyChecks, + value: "off", + commentsWanted: []string{"/*+ SET_VAR(optimizer_switch = 'mrr_cost_b(ased=of\"f') SET_VAR(foReiGn_key_checks=off) */"}, + }, + { + name: "Leading comment is a normal comment", + comments: []string{"/* This is a normal comment */", "/*+ MAX_EXECUTION_TIME(1000) SET_VAR( foreign_key_checks = 1) */"}, + key: sysvars.ForeignKeyChecks, + value: "Off", + commentsWanted: []string{"/* This is a normal comment */", "/*+ MAX_EXECUTION_TIME(1000) SET_VAR(foreign_key_checks=Off) */"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ParsedComments{ + comments: tt.comments, } + newComments := c.SetMySQLSetVarValue(tt.key, tt.value) + require.EqualValues(t, tt.commentsWanted, newComments) }) } } diff --git a/go/vt/sqlparser/constants.go b/go/vt/sqlparser/constants.go index 3848c53f3e0..b1f33184ec0 100644 --- a/go/vt/sqlparser/constants.go +++ b/go/vt/sqlparser/constants.go @@ -27,9 +27,14 @@ const ( SQLCalcFoundRowsStr = "sql_calc_found_rows " // Select.Lock - NoLockStr = "" - ForUpdateStr = " for update" - ShareModeStr = " lock in share mode" + NoLockStr = "" + ForUpdateStr = " for update" + ForUpdateNoWaitStr = " for update nowait" + ForUpdateSkipLockedStr = " for update skip locked" + ForShareStr = " for share" + ForShareNoWaitStr = " for share nowait" + ForShareSkipLockedStr = " for share skip locked" + ShareModeStr = " lock in share mode" // Select.Cache SQLCacheStr = "sql_cache " @@ -117,10 +122,15 @@ const ( NaturalLeftJoinStr = "natural left join" NaturalRightJoinStr = "natural right join" - // Index hints. - UseStr = "use " + // IgnoreStr string. IgnoreStr = "ignore " - ForceStr = "force " + + // Index hints. + UseStr = "use index" + IgnoreIndexStr = "ignore index" + ForceStr = "force index" + UseVindexStr = "use vindex" + IgnoreVindexStr = "ignore vindex" // Index hints For types. JoinForStr = "join" @@ -257,10 +267,8 @@ const ( EmptyStr = "" TreeStr = "tree" JSONStr = "json" - VitessStr = "vitess" TraditionalStr = "traditional" AnalyzeStr = "analyze" - VTExplainStr = "vtexplain" QueriesStr = "queries" AllVExplainStr = "all" PlanStr = "plan" @@ -309,6 +317,7 @@ const ( VitessTargetStr = " vitess_target" VitessVariablesStr = " vitess_metadata variables" VschemaTablesStr = " vschema tables" + VschemaKeyspacesStr = " vschema keyspaces" VschemaVindexesStr = " vschema vindexes" WarningsStr = " warnings" @@ -513,8 +522,13 @@ const ( // Constants for Enum Type - Lock const ( NoLock Lock = iota - ForUpdateLock ShareModeLock + ForShareLock + ForShareLockNoWait + ForShareLockSkipLocked + ForUpdateLock + ForUpdateLockNoWait + ForUpdateLockSkipLocked ) // Constants for Enum Type - TrimType @@ -663,8 +677,8 @@ const ( NotRegexpOp ) -func Inverse(in ComparisonExprOperator) ComparisonExprOperator { - switch in { +func (op ComparisonExprOperator) Inverse() ComparisonExprOperator { + switch op { case EqualOp: return NotEqualOp case LessThanOp: @@ -695,6 +709,15 @@ func Inverse(in ComparisonExprOperator) ComparisonExprOperator { panic("unreachable") } +func (op ComparisonExprOperator) IsCommutative() bool { + switch op { + case EqualOp, NotEqualOp, NullSafeEqualOp: + return true + default: + return false + } +} + // Constant for Enum Type - IsExprOperator const ( IsNullOp IsExprOperator = iota @@ -751,6 +774,8 @@ const ( UseOp IndexHintType = iota IgnoreOp ForceOp + UseVindexOp + IgnoreVindexOp ) // Constant for Enum Type - IndexHintForType @@ -799,8 +824,6 @@ const ( EmptyType ExplainType = iota TreeType JSONType - VitessType - VTExplainType TraditionalType AnalyzeType ) @@ -881,6 +904,7 @@ const ( VitessTarget VitessVariables VschemaTables + VschemaKeyspaces VschemaVindexes Warnings Keyspace @@ -916,6 +940,8 @@ const ( ThrottleAllMigrationType UnthrottleMigrationType UnthrottleAllMigrationType + ForceCutOverMigrationType + ForceCutOverAllMigrationType ) // ColumnStorage constants diff --git a/go/vt/sqlparser/goyacc/goyacc.go b/go/vt/sqlparser/goyacc/goyacc.go index 5864b5090b4..51650b0891e 100644 --- a/go/vt/sqlparser/goyacc/goyacc.go +++ b/go/vt/sqlparser/goyacc/goyacc.go @@ -49,7 +49,6 @@ import ( "bufio" "bytes" "fmt" - "go/format" "os" "regexp" "sort" @@ -58,6 +57,8 @@ import ( "unicode" "github.com/spf13/pflag" + + "vitess.io/vitess/go/tools/codegen" ) // the following are adjustable @@ -3326,7 +3327,7 @@ func exit(status int) { if ftable != nil { ftable.Flush() ftable = nil - gofmt() + _ = codegen.GoImports(oflag) } if foutput != nil { foutput.Flush() @@ -3339,18 +3340,6 @@ func exit(status int) { os.Exit(status) } -func gofmt() { - src, err := os.ReadFile(oflag) - if err != nil { - return - } - src, err = format.Source(src) - if err != nil { - return - } - os.WriteFile(oflag, src, 0666) -} - const fastAppendHelperText = ` func $$Iaddr(v any) __yyunsafe__.Pointer { type h struct { diff --git a/go/vt/sqlparser/impossible_query.go b/go/vt/sqlparser/impossible_query.go index 512931f1db7..a6bf1ea8736 100644 --- a/go/vt/sqlparser/impossible_query.go +++ b/go/vt/sqlparser/impossible_query.go @@ -27,6 +27,9 @@ package sqlparser func FormatImpossibleQuery(buf *TrackedBuffer, node SQLNode) { switch node := node.(type) { case *Select: + if node.With != nil { + node.With.Format(buf) + } buf.Myprintf("select %v from ", node.SelectExprs) var prefix string for _, n := range node.From { diff --git a/go/vt/sqlparser/keywords.go b/go/vt/sqlparser/keywords.go index 36c329d8e0a..ef5aa80bff1 100644 --- a/go/vt/sqlparser/keywords.go +++ b/go/vt/sqlparser/keywords.go @@ -285,6 +285,7 @@ var keywords = []keyword{ {"following", FOLLOWING}, {"for", FOR}, {"force", FORCE}, + {"force_cutover", FORCE_CUTOVER}, {"foreign", FOREIGN}, {"format", FORMAT}, {"format_bytes", FORMAT_BYTES}, @@ -413,6 +414,7 @@ var keywords = []keyword{ {"localtimestamp", LOCALTIMESTAMP}, {"locate", LOCATE}, {"lock", LOCK}, + {"locked", LOCKED}, {"logs", LOGS}, {"long", UNUSED}, {"longblob", LONGBLOB}, @@ -420,6 +422,7 @@ var keywords = []keyword{ {"loop", UNUSED}, {"low_priority", LOW_PRIORITY}, {"ltrim", LTRIM}, + {"mid", MID}, {"min", MIN}, {"manifest", MANIFEST}, {"master_bind", UNUSED}, @@ -457,6 +460,7 @@ var keywords = []keyword{ {"none", NONE}, {"not", NOT}, {"now", NOW}, + {"nowait", NOWAIT}, {"no_write_to_binlog", NO_WRITE_TO_BINLOG}, {"nth_value", NTH_VALUE}, {"ntile", NTILE}, @@ -549,6 +553,7 @@ var keywords = []keyword{ {"right", RIGHT}, {"rlike", RLIKE}, {"rollback", ROLLBACK}, + {"rollup", ROLLUP}, {"row", ROW}, {"row_format", ROW_FORMAT}, {"row_number", ROW_NUMBER}, @@ -575,6 +580,7 @@ var keywords = []keyword{ {"signal", UNUSED}, {"signed", SIGNED}, {"simple", SIMPLE}, + {"skip", SKIP}, {"slow", SLOW}, {"smallint", SMALLINT}, {"snapshot", SNAPSHOT}, @@ -716,6 +722,7 @@ var keywords = []keyword{ {"unicode", UNICODE}, {"union", UNION}, {"unique", UNIQUE}, + {"unknown", UNKNOWN}, {"unlock", UNLOCK}, {"unsigned", UNSIGNED}, {"unthrottle", UNTHROTTLE}, @@ -814,14 +821,6 @@ func (cit *caseInsensitiveTable) LookupString(name string) (int, bool) { return 0, false } -func (cit *caseInsensitiveTable) Lookup(name []byte) (int, bool) { - hash := fnv1aI(offset64, name) - if candidate, ok := cit.h[hash]; ok { - return candidate.id, candidate.match(name) - } - return 0, false -} - func init() { for _, kw := range keywords { if kw.id == UNUSED { @@ -849,16 +848,6 @@ func KeywordString(id int) string { const offset64 = uint64(14695981039346656037) const prime64 = uint64(1099511628211) -func fnv1aI(h uint64, s []byte) uint64 { - for _, c := range s { - if 'A' <= c && c <= 'Z' { - c += 'a' - 'A' - } - h = (h ^ uint64(c)) * prime64 - } - return h -} - func fnv1aIstr(h uint64, s string) uint64 { for i := 0; i < len(s); i++ { c := s[i] diff --git a/go/vt/sqlparser/keywords_test.go b/go/vt/sqlparser/keywords_test.go index 0209ee20352..d386339a57f 100644 --- a/go/vt/sqlparser/keywords_test.go +++ b/go/vt/sqlparser/keywords_test.go @@ -32,6 +32,7 @@ func TestCompatibility(t *testing.T) { require.NoError(t, err) defer file.Close() + parser := NewTestParser() scanner := bufio.NewScanner(file) skipStep := 4 for scanner.Scan() { @@ -46,7 +47,7 @@ func TestCompatibility(t *testing.T) { word = "`" + word + "`" } sql := fmt.Sprintf("create table %s(c1 int)", word) - _, err := ParseStrictDDL(sql) + _, err := parser.ParseStrictDDL(sql) if err != nil { t.Errorf("%s is not compatible with mysql", word) } diff --git a/go/vt/sqlparser/like_filter_test.go b/go/vt/sqlparser/like_filter_test.go index 242e45e2f8d..3249eb152b9 100644 --- a/go/vt/sqlparser/like_filter_test.go +++ b/go/vt/sqlparser/like_filter_test.go @@ -30,7 +30,8 @@ func TestEmptyLike(t *testing.T) { } func TestLikePrefixRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like 'key%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like 'key%'") if e != nil { t.Error(e) } @@ -42,7 +43,8 @@ func TestLikePrefixRegexp(t *testing.T) { } func TestLikeAnyCharsRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '%val1%val2%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '%val1%val2%'") if e != nil { t.Error(e) } @@ -54,7 +56,8 @@ func TestLikeAnyCharsRegexp(t *testing.T) { } func TestSingleAndMultipleCharsRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '_val1_val2%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '_val1_val2%'") if e != nil { t.Error(e) } @@ -66,7 +69,8 @@ func TestSingleAndMultipleCharsRegexp(t *testing.T) { } func TestSpecialCharactersRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '?.*?'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '?.*?'") if e != nil { t.Error(e) } @@ -78,7 +82,8 @@ func TestSpecialCharactersRegexp(t *testing.T) { } func TestQuoteLikeSpecialCharacters(t *testing.T) { - show, e := Parse(`show vitess_metadata variables like 'part1_part2\\%part3_part4\\_part5%'`) + parser := NewTestParser() + show, e := parser.Parse(`show vitess_metadata variables like 'part1_part2\\%part3_part4\\_part5%'`) if e != nil { t.Error(e) } diff --git a/go/vt/sqlparser/literal.go b/go/vt/sqlparser/literal.go index 71fed3d7d16..bde53798a19 100644 --- a/go/vt/sqlparser/literal.go +++ b/go/vt/sqlparser/literal.go @@ -87,8 +87,8 @@ func LiteralToValue(lit *Literal) (sqltypes.Value, error) { buf := datetime.Date_YYYY_MM_DD.Format(datetime.DateTime{Date: d}, 0) return sqltypes.NewDate(hack.String(buf)), nil case TimeVal: - t, l, ok := datetime.ParseTime(lit.Val, -1) - if !ok { + t, l, state := datetime.ParseTime(lit.Val, -1) + if state != datetime.TimeOK { return sqltypes.Value{}, fmt.Errorf("invalid time literal: %v", lit.Val) } buf := datetime.Time_hh_mm_ss.Format(datetime.DateTime{Time: t}, uint8(l)) diff --git a/go/vt/sqlparser/normalizer.go b/go/vt/sqlparser/normalizer.go index 3cc5fc4cb60..48311a39a7a 100644 --- a/go/vt/sqlparser/normalizer.go +++ b/go/vt/sqlparser/normalizer.go @@ -148,8 +148,8 @@ func (nz *normalizer) walkUpSelect(cursor *Cursor) bool { } parent := cursor.Parent() switch parent.(type) { - case *Order, GroupBy: - return false + case *Order, *GroupBy: + return true case *Limit: nz.convertLiteral(node, cursor) default: @@ -165,7 +165,7 @@ func validateLiteral(node *Literal) error { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Incorrect DATE value: '%s'", node.Val) } case TimeVal: - if _, _, ok := datetime.ParseTime(node.Val, -1); !ok { + if _, _, state := datetime.ParseTime(node.Val, -1); state != datetime.TimeOK { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Incorrect TIME value: '%s'", node.Val) } case TimestampVal: @@ -207,7 +207,12 @@ func (nz *normalizer) convertLiteralDedup(node *Literal, cursor *Cursor) { } // Modify the AST node to a bindvar. - cursor.Replace(NewTypedArgument(bvname, node.SQLType())) + arg, err := NewTypedArgumentFromLiteral(bvname, node) + if err != nil { + nz.err = err + return + } + cursor.Replace(arg) } // convertLiteral converts an Literal without the dedup. @@ -224,7 +229,12 @@ func (nz *normalizer) convertLiteral(node *Literal, cursor *Cursor) { bvname := nz.reserved.nextUnusedVar() nz.bindVars[bvname] = bval - cursor.Replace(NewTypedArgument(bvname, node.SQLType())) + arg, err := NewTypedArgumentFromLiteral(bvname, node) + if err != nil { + nz.err = err + return + } + cursor.Replace(arg) } // convertComparison attempts to convert IN clauses to @@ -268,7 +278,12 @@ func (nz *normalizer) parameterize(left, right Expr) Expr { return nil } bvname := nz.decideBindVarName(lit, col, bval) - return NewTypedArgument(bvname, lit.SQLType()) + arg, err := NewTypedArgumentFromLiteral(bvname, lit) + if err != nil { + nz.err = err + return nil + } + return arg } func (nz *normalizer) decideBindVarName(lit *Literal, col *ColName, bval *querypb.BindVariable) string { diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go index 7c8c5e7a963..19b0cfbcac6 100644 --- a/go/vt/sqlparser/normalizer_test.go +++ b/go/vt/sqlparser/normalizer_test.go @@ -17,12 +17,12 @@ limitations under the License. package sqlparser import ( - "bytes" "fmt" - "math/rand" + "math/rand/v2" "reflect" "regexp" "strconv" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -75,14 +75,28 @@ func TestNormalize(t *testing.T) { }, { // float val in: "select * from t where foobar = 1.2", - outstmt: "select * from t where foobar = :foobar /* DECIMAL */", + outstmt: "select * from t where foobar = :foobar /* DECIMAL(2,1) */", outbv: map[string]*querypb.BindVariable{ "foobar": sqltypes.DecimalBindVariable("1.2"), }, + }, { + // datetime val + in: "select * from t where foobar = timestamp'2012-02-29 12:34:56.123456'", + outstmt: "select * from t where foobar = :foobar /* DATETIME(6) */", + outbv: map[string]*querypb.BindVariable{ + "foobar": sqltypes.ValueBindVariable(sqltypes.NewDatetime("2012-02-29 12:34:56.123456")), + }, + }, { + // time val + in: "select * from t where foobar = time'12:34:56.123456'", + outstmt: "select * from t where foobar = :foobar /* TIME(6) */", + outbv: map[string]*querypb.BindVariable{ + "foobar": sqltypes.ValueBindVariable(sqltypes.NewTime("12:34:56.123456")), + }, }, { // multiple vals in: "select * from t where foo = 1.2 and bar = 2", - outstmt: "select * from t where foo = :foo /* DECIMAL */ and bar = :bar /* INT64 */", + outstmt: "select * from t where foo = :foo /* DECIMAL(2,1) */ and bar = :bar /* INT64 */", outbv: map[string]*querypb.BindVariable{ "foo": sqltypes.DecimalBindVariable("1.2"), "bar": sqltypes.Int64BindVariable(2), @@ -379,10 +393,29 @@ func TestNormalize(t *testing.T) { "v1": sqltypes.HexValBindVariable([]byte("x'31'")), "v2": sqltypes.Int64BindVariable(31), }, + }, { + // ORDER BY and GROUP BY variable + in: "select a, b from t group by 1, field(a,1,2,3) order by 1 asc, field(a,1,2,3)", + outstmt: "select a, b from t group by 1, field(a, :bv1 /* INT64 */, :bv2 /* INT64 */, :bv3 /* INT64 */) order by 1 asc, field(a, :bv1 /* INT64 */, :bv2 /* INT64 */, :bv3 /* INT64 */) asc", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1), + "bv2": sqltypes.Int64BindVariable(2), + "bv3": sqltypes.Int64BindVariable(3), + }, + }, { + // list in on duplicate key update + in: "insert into t(a, b) values (1, 2) on duplicate key update b = if(values(b) in (1, 2), b, values(b))", + outstmt: "insert into t(a, b) values (:bv1 /* INT64 */, :bv2 /* INT64 */) on duplicate key update b = if(values(b) in ::bv3, b, values(b))", + outbv: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1), + "bv2": sqltypes.Int64BindVariable(2), + "bv3": sqltypes.TestBindVariable([]any{1, 2}), + }, }} + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(t, err) known := GetBindvars(stmt) bv := make(map[string]*querypb.BindVariable) @@ -407,9 +440,10 @@ func TestNormalizeInvalidDates(t *testing.T) { in: "select timestamp'foo'", err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "Incorrect DATETIME value: '%s'", "foo"), }} + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(t, err) known := GetBindvars(stmt) bv := make(map[string]*querypb.BindVariable) @@ -419,12 +453,13 @@ func TestNormalizeInvalidDates(t *testing.T) { } func TestNormalizeValidSQL(t *testing.T) { + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.partialDDL || tcase.ignoreNormalizerTest { return } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err, tcase.input) // Skip the test for the queries that do not run the normalizer if !CanNormalize(tree) { @@ -438,7 +473,7 @@ func TestNormalizeValidSQL(t *testing.T) { if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" { return } - _, err = Parse(normalizerOutput) + _, err = parser.Parse(normalizerOutput) require.NoError(t, err, normalizerOutput) }) } @@ -454,7 +489,8 @@ func TestNormalizeOneCasae(t *testing.T) { if testOne.input == "" { t.Skip("empty test case") } - tree, err := Parse(testOne.input) + parser := NewTestParser() + tree, err := parser.Parse(testOne.input) require.NoError(t, err, testOne.input) // Skip the test for the queries that do not run the normalizer if !CanNormalize(tree) { @@ -468,12 +504,13 @@ func TestNormalizeOneCasae(t *testing.T) { if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" { return } - _, err = Parse(normalizerOutput) + _, err = parser.Parse(normalizerOutput) require.NoError(t, err, normalizerOutput) } func TestGetBindVars(t *testing.T) { - stmt, err := Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") + parser := NewTestParser() + stmt, err := parser.Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") if err != nil { t.Fatal(err) } @@ -497,8 +534,9 @@ Prior to skip: BenchmarkNormalize-8 500000 3620 ns/op 1461 B/op 55 allocs/op */ func BenchmarkNormalize(b *testing.B) { + parser := NewTestParser() sql := "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" - ast, reservedVars, err := Parse2(sql) + ast, reservedVars, err := parser.Parse2(sql) if err != nil { b.Fatal(err) } @@ -508,6 +546,7 @@ func BenchmarkNormalize(b *testing.B) { } func BenchmarkNormalizeTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -518,7 +557,7 @@ func BenchmarkNormalizeTraces(b *testing.B) { parsed := make([]Statement, 0, len(queries)) reservedVars := make([]BindVars, 0, len(queries)) for _, q := range queries { - pp, kb, err := Parse2(q) + pp, kb, err := parser.Parse2(q) if err != nil { b.Fatal(err) } @@ -540,6 +579,7 @@ func BenchmarkNormalizeTraces(b *testing.B) { func BenchmarkNormalizeVTGate(b *testing.B) { const keyspace = "main_keyspace" + parser := NewTestParser() queries := loadQueries(b, "lobsters.sql.gz") if len(queries) > 10000 { @@ -551,7 +591,7 @@ func BenchmarkNormalizeVTGate(b *testing.B) { for i := 0; i < b.N; i++ { for _, sql := range queries { - stmt, reservedVars, err := Parse2(sql) + stmt, reservedVars, err := parser.Parse2(sql) if err != nil { b.Fatal(err) } @@ -573,6 +613,7 @@ func BenchmarkNormalizeVTGate(b *testing.B) { SQLSelectLimitUnset, "", nil, /*sysvars*/ + nil, nil, /*views*/ ) if err != nil { @@ -598,9 +639,9 @@ func randtmpl(template string) string { for i, c := range result { switch c { case '#': - result[i] = numberBytes[rand.Intn(len(numberBytes))] + result[i] = numberBytes[rand.IntN(len(numberBytes))] case '@': - result[i] = letterBytes[rand.Intn(len(letterBytes))] + result[i] = letterBytes[rand.IntN(len(letterBytes))] } } return string(result) @@ -610,7 +651,7 @@ func randString(n int) string { const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" b := make([]byte, n) for i := range b { - b[i] = letterBytes[rand.Intn(len(letterBytes))] + b[i] = letterBytes[rand.IntN(len(letterBytes))] } return string(b) } @@ -625,19 +666,19 @@ values func BenchmarkNormalizeTPCCInsert(b *testing.B) { generateInsert := func(rows int) string { - var query bytes.Buffer + var query strings.Builder query.WriteString("INSERT IGNORE INTO customer0 (c_id, c_d_id, c_w_id, c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone, c_since, c_credit, c_credit_lim, c_discount, c_balance, c_ytd_payment, c_payment_cnt, c_delivery_cnt, c_data) values ") for i := 0; i < rows; i++ { fmt.Fprintf(&query, "(%d, %d, %d, '%s','OE','%s','%s', '%s', '%s', '%s', '%s','%s',NOW(),'%s',50000,%f,-10,10,1,0,'%s' )", rand.Int(), rand.Int(), rand.Int(), - "first-"+randString(rand.Intn(10)), + "first-"+randString(rand.IntN(10)), randtmpl("last-@@@@"), randtmpl("street1-@@@@@@@@@@@@"), randtmpl("street2-@@@@@@@@@@@@"), randtmpl("city-@@@@@@@@@@@@"), randtmpl("@@"), randtmpl("zip-#####"), randtmpl("################"), - "GC", rand.Float64(), randString(300+rand.Intn(200)), + "GC", rand.Float64(), randString(300+rand.IntN(200)), ) if i < rows-1 { query.WriteString(", ") @@ -846,9 +887,10 @@ func benchmarkNormalization(b *testing.B, sqls []string) { b.Helper() b.ReportAllocs() b.ResetTimer() + parser := NewTestParser() for i := 0; i < b.N; i++ { for _, sql := range sqls { - stmt, reserved, err := Parse2(sql) + stmt, reserved, err := parser.Parse2(sql) if err != nil { b.Fatalf("%v: %q", err, sql) } @@ -864,6 +906,7 @@ func benchmarkNormalization(b *testing.B, sqls []string) { "", nil, nil, + nil, ) if err != nil { b.Fatal(err) diff --git a/go/vt/sqlparser/parse_next_test.go b/go/vt/sqlparser/parse_next_test.go index 2e55fbb8a9a..687bb7fbb51 100644 --- a/go/vt/sqlparser/parse_next_test.go +++ b/go/vt/sqlparser/parse_next_test.go @@ -17,7 +17,6 @@ limitations under the License. package sqlparser import ( - "bytes" "io" "strings" "testing" @@ -29,13 +28,14 @@ import ( // TestParseNextValid concatenates all the valid SQL test cases and check it can read // them as one long string. func TestParseNextValid(t *testing.T) { - var sql bytes.Buffer + var sql strings.Builder for _, tcase := range validSQL { sql.WriteString(strings.TrimSuffix(tcase.input, ";")) sql.WriteRune(';') } - tokens := NewStringTokenizer(sql.String()) + parser := NewTestParser() + tokens := parser.NewStringTokenizer(sql.String()) for _, tcase := range validSQL { want := tcase.output if want == "" { @@ -55,7 +55,8 @@ func TestParseNextValid(t *testing.T) { func TestIgnoreSpecialComments(t *testing.T) { input := `SELECT 1;/*! ALTER TABLE foo DISABLE KEYS */;SELECT 2;` - tokenizer := NewStringTokenizer(input) + parser := NewTestParser() + tokenizer := parser.NewStringTokenizer(input) tokenizer.SkipSpecialComments = true one, err := ParseNextStrictDDL(tokenizer) require.NoError(t, err) @@ -68,6 +69,7 @@ func TestIgnoreSpecialComments(t *testing.T) { // TestParseNextErrors tests all the error cases, and ensures a valid // SQL statement can be passed afterwards. func TestParseNextErrors(t *testing.T) { + parser := NewTestParser() for _, tcase := range invalidSQL { if tcase.excludeMulti { // Skip tests which leave unclosed strings, or comments. @@ -75,7 +77,7 @@ func TestParseNextErrors(t *testing.T) { } t.Run(tcase.input, func(t *testing.T) { sql := tcase.input + "; select 1 from t" - tokens := NewStringTokenizer(sql) + tokens := parser.NewStringTokenizer(sql) // The first statement should be an error _, err := ParseNextStrictDDL(tokens) @@ -134,9 +136,9 @@ func TestParseNextEdgeCases(t *testing.T) { input: "create table a ignore me this is garbage; select 1 from a", want: []string{"create table a", "select 1 from a"}, }} - + parser := NewTestParser() for _, test := range tests { - tokens := NewStringTokenizer(test.input) + tokens := parser.NewStringTokenizer(test.input) for i, want := range test.want { tree, err := ParseNext(tokens) @@ -166,7 +168,8 @@ func TestParseNextStrictNonStrict(t *testing.T) { want := []string{"create table a", "select 1 from a"} // First go through as expected with non-strict DDL parsing. - tokens := NewStringTokenizer(input) + parser := NewTestParser() + tokens := parser.NewStringTokenizer(input) for i, want := range want { tree, err := ParseNext(tokens) if err != nil { @@ -178,7 +181,7 @@ func TestParseNextStrictNonStrict(t *testing.T) { } // Now try again with strict parsing and observe the expected error. - tokens = NewStringTokenizer(input) + tokens = parser.NewStringTokenizer(input) _, err := ParseNextStrictDDL(tokens) if err == nil || !strings.Contains(err.Error(), "ignore") { t.Fatalf("ParseNext(%q) err = %q, want ignore", input, err) diff --git a/go/vt/sqlparser/parse_table.go b/go/vt/sqlparser/parse_table.go index 8766994ecfd..d522a855054 100644 --- a/go/vt/sqlparser/parse_table.go +++ b/go/vt/sqlparser/parse_table.go @@ -23,8 +23,8 @@ import ( // ParseTable parses the input as a qualified table name. // It handles all valid literal escaping. -func ParseTable(input string) (keyspace, table string, err error) { - tokenizer := NewStringTokenizer(input) +func (p *Parser) ParseTable(input string) (keyspace, table string, err error) { + tokenizer := p.NewStringTokenizer(input) // Start, want ID token, value := tokenizer.Scan() diff --git a/go/vt/sqlparser/parse_table_test.go b/go/vt/sqlparser/parse_table_test.go index 09e7ea44177..5f187cbc6d0 100644 --- a/go/vt/sqlparser/parse_table_test.go +++ b/go/vt/sqlparser/parse_table_test.go @@ -56,8 +56,9 @@ func TestParseTable(t *testing.T) { input: "k.t.", err: true, }} + parser := NewTestParser() for _, tcase := range testcases { - keyspace, table, err := ParseTable(tcase.input) + keyspace, table, err := parser.ParseTable(tcase.input) assert.Equal(t, tcase.keyspace, keyspace) assert.Equal(t, tcase.table, table) if tcase.err { diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index 1837a104e4c..93f74cfacbc 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -18,23 +18,21 @@ package sqlparser import ( "bufio" - "bytes" "compress/gzip" "fmt" "io" - "math/rand" + "math/rand/v2" "os" "path" "strings" "sync" "testing" - "vitess.io/vitess/go/test/utils" - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/utils" ) var ( @@ -507,6 +505,9 @@ var ( }, { input: "WITH topsales2003 AS (SELECT salesRepEmployeeNumber employeeNumber, SUM(quantityOrdered * priceEach) sales FROM orders INNER JOIN orderdetails USING (orderNumber) INNER JOIN customers USING (customerNumber) WHERE YEAR(shippedDate) = 2003 AND status = 'Shipped' GROUP BY salesRepEmployeeNumber ORDER BY sales DESC LIMIT 5)SELECT employeeNumber, firstName, lastName, sales FROM employees JOIN topsales2003 USING (employeeNumber)", output: "with topsales2003 as (select salesRepEmployeeNumber as employeeNumber, sum(quantityOrdered * priceEach) as sales from orders join orderdetails using (orderNumber) join customers using (customerNumber) where YEAR(shippedDate) = 2003 and `status` = 'Shipped' group by salesRepEmployeeNumber order by sales desc limit 5) select employeeNumber, firstName, lastName, sales from employees join topsales2003 using (employeeNumber)", + }, { + input: "WITH count_a AS (SELECT COUNT(`id`) AS `num` FROM `tbl_a`), count_b AS (SELECT COUNT(`id`) AS `num` FROM tbl_b) SELECT 'a', `num` FROM `count_a` UNION SELECT 'b', `num` FROM `count_b`", + output: "with count_a as (select count(id) as num from tbl_a) , count_b as (select count(id) as num from tbl_b) select 'a', num from count_a union select 'b', num from count_b", }, { input: "select 1 from t", }, { @@ -693,8 +694,18 @@ var ( input: "select /* distinct */ distinct 1 from t", }, { input: "select /* straight_join */ straight_join 1 from t", + }, { + input: "select /* for share */ 1 from t for share", + }, { + input: "select /* for share */ 1 from t for share nowait", + }, { + input: "select /* for share */ 1 from t for share skip locked", }, { input: "select /* for update */ 1 from t for update", + }, { + input: "select /* for update */ 1 from t for update nowait", + }, { + input: "select /* for update */ 1 from t for update skip locked", }, { input: "select /* lock in share mode */ 1 from t lock in share mode", }, { @@ -1140,6 +1151,8 @@ var ( output: "select * from t where id = (select a from t1 union select b from t2 order by a asc limit 1)", }, { input: "select /* order by asc */ 1 from t order by a asc", + }, { + input: "select a, b, c, count(*), sum(foo) from t group by a, b, c with rollup", }, { input: "select /* order by desc */ 1 from t order by a desc", }, { @@ -1287,6 +1300,12 @@ var ( input: "insert /* bool in on duplicate */ into a values (1, 2, 3) on duplicate key update b = values(b), c = d", }, { input: "insert /* bool in on duplicate */ into a values (1, 2, 3) on duplicate key update b = values(a.b), c = d", + }, { + input: "insert into a values (1, 2, 3) as `a_values`", + output: "insert into a values (1, 2, 3) as a_values", + }, { + input: "insert into a values (1, 2, 3) as `a_values` (`foo`, bar, baz)", + output: "insert into a values (1, 2, 3) as a_values (foo, bar, baz)", }, { input: "insert /* bool expression on duplicate */ into a values (1, 2) on duplicate key update b = func(a), c = a > d", }, { @@ -1351,7 +1370,7 @@ var ( input: "delete /* limit */ from a limit b", }, { input: "delete /* alias where */ t.* from a as t where t.id = 2", - output: "delete /* alias where */ t from a as t where t.id = 2", + output: "delete /* alias where */ from a as t where t.id = 2", }, { input: "delete t.* from t, t1", output: "delete t from t, t1", @@ -1756,6 +1775,24 @@ var ( }, { input: "alter schema d collate = 'utf8_bin' character set = geostd8 character set = geostd8", output: "alter database d collate 'utf8_bin' character set geostd8 character set geostd8", + }, { + input: `DROP INDEX Indexes ON mydb.mytable`, + output: "alter table mydb.mytable drop key `Indexes`", + }, { + input: `create index Indexes on b (col1)`, + output: "alter table b add key `Indexes` (col1)", + }, { + input: `create fulltext index Indexes on b (col1)`, + output: "alter table b add fulltext key `Indexes` (col1)", + }, { + input: `create spatial index Indexes on b (col1)`, + output: "alter table b add spatial key `Indexes` (col1)", + }, { + input: "alter table a alter index indexes visible, alter index indexes invisible", + output: "alter table a alter index `indexes` visible, alter index `indexes` invisible", + }, { + input: "alter table a add spatial key indexes (column1)", + output: "alter table a add spatial key `indexes` (column1)", }, { input: "create table a", partialDDL: true, @@ -2369,6 +2406,8 @@ var ( input: "show vitess_targets", }, { input: "show vschema tables", + }, { + input: "show vschema keyspaces", }, { input: "show vschema vindexes", }, { @@ -2410,6 +2449,13 @@ var ( input: "alter vitess_migration complete all", }, { input: "alter vitess_migration '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90' cancel", + }, { + input: "alter vitess_migration force_cutover all", + }, { + input: "alter vitess_migration '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90' force_cutover", + }, { + input: "alter vitess_migration '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90' FORCE_CUTOVER", + output: "alter vitess_migration '9748c3b7_7fdb_11eb_ac2c_f875a4d24e90' force_cutover", }, { input: "alter vitess_migration cancel all", }, { @@ -2447,6 +2493,10 @@ var ( }, { input: "show foobar like select * from table where syntax is 'ignored'", output: "show foobar", + }, { + // Making sure "force_cutover" is not a keyword + input: "select force_cutover from t", + output: "select `force_cutover` from t", }, { input: "use db", output: "use db", @@ -2502,16 +2552,6 @@ var ( input: "explain format = tree select * from t", }, { input: "explain format = json select * from t", - }, { - input: "explain format = vtexplain select * from t", - }, { - input: "explain format = vitess select * from t", - }, { - input: "describe format = vitess select * from t", - output: "explain format = vitess select * from t", - }, { - input: "describe format = vtexplain select * from t", - output: "explain format = vtexplain select * from t", }, { input: "explain delete from t", }, { @@ -2648,6 +2688,20 @@ var ( }, { input: "SELECT id FROM blog_posts USE INDEX (PRIMARY) WHERE id = 10", output: "select id from blog_posts use index (`PRIMARY`) where id = 10", + }, { + input: "select * from payment_pulls ignore vindex (lookup_vindex_name) where customer_id in (1, 10) and payment_id = 5", + }, { + input: "select * from payment_pulls ignore vindex (lookup_vindex_name, x, t) order by id", + output: "select * from payment_pulls ignore vindex (lookup_vindex_name, x, t) order by id asc", + }, { + input: "select * from payment_pulls use vindex (lookup_vindex_name) where customer_id in (1, 10) and payment_id = 5", + }, { + input: "select * from payment_pulls use vindex (lookup_vindex_name, x, t) order by id", + output: "select * from payment_pulls use vindex (lookup_vindex_name, x, t) order by id asc", + }, { + input: "select * from payment_pulls use vindex (lookup_vindex_name, x, t) ignore vindex (x, t)", + }, { + input: "select * from payment_pulls use vindex (lookup_vindex_name, x, t) ignore vindex (x, t) join tab ignore vindex (y)", }, { input: "select name, group_concat(score) from t group by name", output: "select `name`, group_concat(score) from t group by `name`", @@ -3668,6 +3722,72 @@ var ( }, { input: `select * from t1 where col1 like 'ks\_' and col2 = 'ks\_' and col1 like 'ks_' and col2 = 'ks_'`, output: `select * from t1 where col1 like 'ks\_' and col2 = 'ks\_' and col1 like 'ks_' and col2 = 'ks_'`, + }, { + input: "select 1 from dual where 'bac' = 'b' 'a' 'c'", + output: "select 1 from dual where 'bac' = 'bac'", + }, { + input: "select 'b' 'a' 'c'", + output: "select 'bac' from dual", + }, { + input: "select 1 where 'bac' = N'b' 'a' 'c'", + output: "select 1 from dual where 'bac' = N'bac'", + /*We need to ignore this test because, after the normalizer, we change the produced NChar + string into an introducer expression, so the vttablet will never see a NChar string */ + ignoreNormalizerTest: true, + }, { + input: "select _ascii 'b' 'a' 'c'", + output: "select _ascii 'bac' from dual", + }, { + input: "SELECT time, subject, AVG(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, avg(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, BIT_AND(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, bit_and(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, BIT_OR(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, bit_or(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, BIT_XOR(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, bit_xor(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, COUNT(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, count(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, COUNT(*) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, count(*) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, MAX(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, max(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, MIN(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, min(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, STD(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, std(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, STDDEV(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, stddev(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, STDDEV_POP(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, stddev_pop(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, STDDEV_SAMP(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, stddev_samp(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, SUM(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, sum(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, VAR_POP(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, var_pop(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, VAR_SAMP(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, var_samp(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT time, subject, VARIANCE(val) OVER (PARTITION BY time, subject) AS window_result FROM observations GROUP BY time, subject;", + output: "select `time`, subject, variance(val) over ( partition by `time`, subject) as window_result from observations group by `time`, subject", + }, { + input: "SELECT id, coalesce( (SELECT Json_arrayagg(Json_array(id)) FROM (SELECT *, Row_number() over (ORDER BY users.order ASC) FROM unsharded as users WHERE users.purchaseorderid = orders.id) users), json_array()) AS users, coalesce( (SELECT json_arrayagg(json_array(id)) FROM (SELECT *, row_number() over (ORDER BY tests.order ASC) FROM unsharded as tests WHERE tests.purchaseorderid = orders.id) tests), json_array()) AS tests FROM unsharded as orders WHERE orders.id = 'xxx'", + output: "select id, coalesce((select Json_arrayagg(json_array(id)) from (select *, row_number() over ( order by users.`order` asc) from unsharded as users where users.purchaseorderid = orders.id) as users), json_array()) as users, coalesce((select json_arrayagg(json_array(id)) from (select *, row_number() over ( order by tests.`order` asc) from unsharded as tests where tests.purchaseorderid = orders.id) as tests), json_array()) as tests from unsharded as orders where orders.id = 'xxx'", }, { input: `kill connection 18446744073709551615`, }, { @@ -3675,16 +3795,20 @@ var ( }, { input: `kill 18446744073709551615`, output: `kill connection 18446744073709551615`, + }, { + input: `select * from tbl where foo is unknown or bar is not unknown`, + output: `select * from tbl where foo is null or bar is not null`, }} ) func TestValid(t *testing.T) { + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err, tcase.input) out := String(tree) assert.Equal(t, tcase.output, out) @@ -3716,15 +3840,16 @@ func TestParallelValid(t *testing.T) { wg := sync.WaitGroup{} wg.Add(parallelism) + parser := NewTestParser() for i := 0; i < parallelism; i++ { go func() { defer wg.Done() for j := 0; j < numIters; j++ { - tcase := validSQL[rand.Intn(len(validSQL))] + tcase := validSQL[rand.IntN(len(validSQL))] if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("Parse(%q) err: %v, want nil", tcase.input, err) continue @@ -3923,9 +4048,10 @@ func TestInvalid(t *testing.T) { }, } + parser := NewTestParser() for _, tcase := range invalidSQL { t.Run(tcase.input, func(t *testing.T) { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) require.Error(t, err) require.Contains(t, err.Error(), tcase.err) }) @@ -4063,12 +4189,13 @@ func TestIntroducers(t *testing.T) { input: "select _utf8mb3 'x'", output: "select _utf8mb3 'x' from dual", }} + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) assert.NoError(t, err) out := String(tree) assert.Equal(t, tcase.output, out) @@ -4157,11 +4284,12 @@ func TestCaseSensitivity(t *testing.T) { }, { input: "select /* use */ 1 from t1 use index (A) where b = 1", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4256,11 +4384,12 @@ func TestKeywords(t *testing.T) { output: "select current_user(), current_user() from dual", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4333,11 +4462,12 @@ func TestConvert(t *testing.T) { input: "select cast(json_keys(c) as char(64) array) from t", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4381,7 +4511,7 @@ func TestConvert(t *testing.T) { }} for _, tcase := range invalidSQL { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -4419,12 +4549,13 @@ func TestSelectInto(t *testing.T) { output: "alter vschema create vindex my_vdx using `hash`", }} + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err) out := String(tree) assert.Equal(t, tcase.output, out) @@ -4443,7 +4574,7 @@ func TestSelectInto(t *testing.T) { }} for _, tcase := range invalidSQL { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -4480,8 +4611,9 @@ func TestPositionedErr(t *testing.T) { output: PositionedErr{"syntax error", 34, ""}, }} + parser := NewTestParser() for _, tcase := range invalidSQL { - tkn := NewStringTokenizer(tcase.input) + tkn := parser.NewStringTokenizer(tcase.input) _, err := ParseNext(tkn) if posErr, ok := err.(PositionedErr); !ok { @@ -4530,11 +4662,12 @@ func TestSubStr(t *testing.T) { output: `select substr(substr('foo', 1), 2) from t`, }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4554,8 +4687,9 @@ func TestLoadData(t *testing.T) { "load data infile 'x.txt' into table 'c'", "load data from s3 'x.txt' into table x"} + parser := NewTestParser() for _, tcase := range validSQL { - _, err := Parse(tcase) + _, err := parser.Parse(tcase) require.NoError(t, err) } } @@ -4662,6 +4796,7 @@ func TestCreateTable(t *testing.T) { primary key (id), spatial key geom (geom), fulltext key fts (full_name), + fulltext key indexes (full_name), unique key by_username (username), unique key by_username2 (username), unique key by_username3 (username), @@ -4678,6 +4813,7 @@ func TestCreateTable(t *testing.T) { primary key (id), spatial key geom (geom), fulltext key fts (full_name), + fulltext key ` + "`indexes`" + ` (full_name), unique key by_username (username), unique key by_username2 (username), unique key by_username3 (username), @@ -4862,6 +4998,7 @@ func TestCreateTable(t *testing.T) { primary key (id, username), key by_email (email(10), username), constraint second_ibfk_1 foreign key (k, j) references t2 (a, b), + constraint indexes foreign key (k, j) references t2 (a, b), constraint second_ibfk_1 foreign key (k, j) references t2 (a, b) on delete restrict, constraint second_ibfk_1 foreign key (k, j) references t2 (a, b) on delete no action, constraint second_ibfk_1 foreign key (k, j) references t2 (a, b) on delete cascade on update set default, @@ -4898,6 +5035,7 @@ func TestCreateTable(t *testing.T) { primary key (id, username), key by_email (email(10), username), constraint second_ibfk_1 foreign key (k, j) references t2 (a, b), + constraint ` + "`indexes`" + ` foreign key (k, j) references t2 (a, b), constraint second_ibfk_1 foreign key (k, j) references t2 (a, b) on delete restrict, constraint second_ibfk_1 foreign key (k, j) references t2 (a, b) on delete no action, constraint second_ibfk_1 foreign key (k, j) references t2 (a, b) on delete cascade on update set default, @@ -5732,10 +5870,11 @@ partition by range (YEAR(purchased)) subpartition by hash (TO_DAYS(purchased)) output: "create table t (\n\tid int,\n\tinfo JSON,\n\tkey zips ((cast(info -> '$.field' as unsigned array)))\n)", }, } + parser := NewTestParser() for _, test := range createTableQueries { sql := strings.TrimSpace(test.input) t.Run(sql, func(t *testing.T) { - tree, err := ParseStrictDDL(sql) + tree, err := parser.ParseStrictDDL(sql) require.NoError(t, err) got := String(tree) expected := test.output @@ -5758,7 +5897,8 @@ func TestOne(t *testing.T) { return } sql := strings.TrimSpace(testOne.input) - tree, err := Parse(sql) + parser := NewTestParser() + tree, err := parser.Parse(sql) require.NoError(t, err) got := String(tree) expected := testOne.output @@ -5787,8 +5927,9 @@ func TestCreateTableLike(t *testing.T) { "create table ks.a like unsharded_ks.b", }, } + parser := NewTestParser() for _, tcase := range testCases { - tree, err := ParseStrictDDL(tcase.input) + tree, err := parser.ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -5817,8 +5958,9 @@ func TestCreateTableEscaped(t *testing.T) { "\tprimary key (`delete`)\n" + ")", }} + parser := NewTestParser() for _, tcase := range testCases { - tree, err := ParseStrictDDL(tcase.input) + tree, err := parser.ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -5944,6 +6086,18 @@ var ( input: "create table 2t.3t2 (c1 bigint not null, c2 text, primary key(c1))", output: "syntax error at position 18 near '.3'", excludeMulti: true, + }, { + input: "ALTER TABLE t ADD PARTITION (PARTITION p10 VALUES LESS THAN (10)), ADD PARTITION (PARTITION p20 VALUES LESS THAN (20))", + output: "syntax error at position 67", + }, { + input: "ALTER TABLE t DROP PARTITION p1, DROP PARTITION p2", + output: "syntax error at position 38 near 'DROP'", + }, { + input: "ALTER TABLE t DROP PARTITION p1, ADD COLUMN c INT", + output: "syntax error at position 37 near 'ADD'", + }, { + input: "ALTER TABLE t ADD COLUMN c INT, DROP PARTITION p1", + output: "syntax error at position 47 near 'PARTITION'", }, { input: "execute stmt1 using a, @b", output: "syntax error at position 22 near 'a'", @@ -5963,9 +6117,10 @@ var ( ) func TestErrors(t *testing.T) { + parser := NewTestParser() for _, tcase := range invalidSQL { t.Run(tcase.input, func(t *testing.T) { - _, err := ParseStrictDDL(tcase.input) + _, err := parser.ParseStrictDDL(tcase.input) require.Error(t, err, tcase.output) require.Equal(t, tcase.output, err.Error()) }) @@ -5998,8 +6153,9 @@ func TestSkipToEnd(t *testing.T) { input: "create table a bb 'a;'; select * from t", output: "extra characters encountered after end of DDL: 'select'", }} + parser := NewTestParser() for _, tcase := range testcases { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -6031,8 +6187,9 @@ func loadQueries(t testing.TB, filename string) (queries []string) { } func TestParseDjangoQueries(t *testing.T) { + parser := NewTestParser() for _, query := range loadQueries(t, "django_queries.txt") { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { t.Errorf("failed to parse %q: %v", query, err) } @@ -6040,8 +6197,9 @@ func TestParseDjangoQueries(t *testing.T) { } func TestParseLobstersQueries(t *testing.T) { + parser := NewTestParser() for _, query := range loadQueries(t, "lobsters.sql.gz") { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { t.Errorf("failed to parse %q: %v", query, err) } @@ -6056,14 +6214,14 @@ func TestParseVersionedComments(t *testing.T) { }{ { input: `CREATE TABLE table1 (id int) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 /*!50900 PARTITION BY RANGE (id) (PARTITION x VALUES LESS THAN (5) ENGINE = InnoDB, PARTITION t VALUES LESS THAN (20) ENGINE = InnoDB) */`, - mysqlVersion: "50401", + mysqlVersion: "5.4.1", output: `create table table1 ( id int ) ENGINE InnoDB, CHARSET utf8mb4`, }, { input: `CREATE TABLE table1 (id int) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 /*!50900 PARTITION BY RANGE (id) (PARTITION x VALUES LESS THAN (5) ENGINE = InnoDB, PARTITION t VALUES LESS THAN (20) ENGINE = InnoDB) */`, - mysqlVersion: "80001", + mysqlVersion: "8.0.1", output: `create table table1 ( id int ) ENGINE InnoDB, @@ -6076,10 +6234,9 @@ partition by range (id) for _, testcase := range testcases { t.Run(testcase.input+":"+testcase.mysqlVersion, func(t *testing.T) { - oldMySQLVersion := mySQLParserVersion - defer func() { mySQLParserVersion = oldMySQLVersion }() - mySQLParserVersion = testcase.mysqlVersion - tree, err := Parse(testcase.input) + parser, err := New(Options{MySQLServerVersion: testcase.mysqlVersion}) + require.NoError(t, err) + tree, err := parser.Parse(testcase.input) require.NoError(t, err, testcase.input) out := String(tree) require.Equal(t, testcase.output, out) @@ -6088,6 +6245,7 @@ partition by range (id) } func BenchmarkParseTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -6099,7 +6257,7 @@ func BenchmarkParseTraces(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { b.Fatal(err) } @@ -6116,16 +6274,17 @@ func BenchmarkParseStress(b *testing.B) { sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" ) + parser := NewTestParser() for i, sql := range []string{sql1, sql2} { b.Run(fmt.Sprintf("sql%d", i), func(b *testing.B) { - var buf bytes.Buffer + var buf strings.Builder buf.WriteString(sql) querySQL := buf.String() b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := Parse(querySQL) + _, err := parser.Parse(querySQL) if err != nil { b.Fatal(err) } @@ -6143,7 +6302,7 @@ func BenchmarkParse3(b *testing.B) { // Size of value is 1/10 size of query. Then we add // 10 such values to the where clause. - var baseval bytes.Buffer + var baseval strings.Builder for i := 0; i < benchQuerySize/100; i++ { // Add an escape character: This will force the upcoming // tokenizer improvement to still create a copy of the string. @@ -6155,7 +6314,7 @@ func BenchmarkParse3(b *testing.B) { } } - var buf bytes.Buffer + var buf strings.Builder buf.WriteString("select a from t1 where v = 1") for i := 0; i < 10; i++ { fmt.Fprintf(&buf, " and v%d = \"%d%s\"", i, i, baseval.String()) @@ -6164,8 +6323,9 @@ func BenchmarkParse3(b *testing.B) { b.ResetTimer() b.ReportAllocs() + parser := NewTestParser() for i := 0; i < b.N; i++ { - if _, err := Parse(benchQuery); err != nil { + if _, err := parser.Parse(benchQuery); err != nil { b.Fatal(err) } } @@ -6216,6 +6376,7 @@ func escapeNewLines(in string) string { } func testFile(t *testing.T, filename, tempDir string) { + parser := NewTestParser() t.Run(filename, func(t *testing.T) { fail := false expected := strings.Builder{} @@ -6225,7 +6386,7 @@ func testFile(t *testing.T, filename, tempDir string) { tcase.output = tcase.input } expected.WriteString(fmt.Sprintf("%sINPUT\n%s\nEND\n", tcase.comments, escapeNewLines(tcase.input))) - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if tcase.errStr != "" { errPresent := "" if err != nil { @@ -6328,7 +6489,7 @@ func parsePartial(r *bufio.Reader, readType []string, lineno int, fileName strin if returnTypeNumber != -1 { break } - panic(fmt.Errorf("error reading file %s: line %d: %s - Expected keyword", fileName, lineno, err.Error())) + panic(fmt.Errorf("error reading file %s: line %d: Expected keyword", fileName, lineno)) } input := "" for { diff --git a/go/vt/sqlparser/parsed_query.go b/go/vt/sqlparser/parsed_query.go index b6b03a1901a..a612e555ee8 100644 --- a/go/vt/sqlparser/parsed_query.go +++ b/go/vt/sqlparser/parsed_query.go @@ -21,12 +21,7 @@ import ( "fmt" "strings" - "vitess.io/vitess/go/bytes2" - vjson "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -34,11 +29,12 @@ import ( // bind locations are precomputed for fast substitutions. type ParsedQuery struct { Query string - bindLocations []bindLocation + bindLocations []BindLocation + truncateUILen int } -type bindLocation struct { - offset, length int +type BindLocation struct { + Offset, Length int } // NewParsedQuery returns a ParsedQuery of the ast. @@ -67,8 +63,8 @@ func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVaria func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) error { current := 0 for _, loc := range pq.bindLocations { - buf.WriteString(pq.Query[current:loc.offset]) - name := pq.Query[loc.offset : loc.offset+loc.length] + buf.WriteString(pq.Query[current:loc.Offset]) + name := pq.Query[loc.Offset : loc.Offset+loc.Length] if encodable, ok := extras[name[1:]]; ok { encodable.EncodeSQL(buf) } else { @@ -78,86 +74,19 @@ func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*qu } EncodeValue(buf, supplied) } - current = loc.offset + loc.length + current = loc.Offset + loc.Length } buf.WriteString(pq.Query[current:]) return nil } -// AppendFromRow behaves like Append but takes a querypb.Row directly, assuming that -// the fields in the row are in the same order as the placeholders in this query. The fields might include generated -// columns which are dropped, by checking against skipFields, before binding the variables -// note: there can be more fields than bind locations since extra columns might be requested from the source if not all -// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for -// values from the database on the source: sum/count for aggregation queries, for example -func (pq *ParsedQuery) AppendFromRow(buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error { - if len(fields) < len(pq.bindLocations) { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ", - len(fields), len(pq.bindLocations)) - } - - type colInfo struct { - typ querypb.Type - length int64 - offset int64 - } - rowInfo := make([]*colInfo, 0) - - offset := int64(0) - for i, field := range fields { // collect info required for fields to be bound - length := row.Lengths[i] - if !skipFields[strings.ToLower(field.Name)] { - rowInfo = append(rowInfo, &colInfo{ - typ: field.Type, - length: length, - offset: offset, - }) - } - if length > 0 { - offset += row.Lengths[i] - } - } - - // bind field values to locations - var offsetQuery int - for i, loc := range pq.bindLocations { - col := rowInfo[i] - buf.WriteString(pq.Query[offsetQuery:loc.offset]) - typ := col.typ - - switch typ { - case querypb.Type_TUPLE: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) - case querypb.Type_JSON: - if col.length < 0 { // An SQL NULL and not an actual JSON value - buf.WriteString(sqltypes.NullStr) - } else { // A JSON value (which may be a JSON null literal value) - buf2 := row.Values[col.offset : col.offset+col.length] - vv, err := vjson.MarshalSQLValue(buf2) - if err != nil { - return err - } - buf.WriteString(vv.RawStr()) - } - default: - if col.length < 0 { - // -1 means a null variable; serialize it directly - buf.WriteString(sqltypes.NullStr) - } else { - vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length]) - vv.EncodeSQLBytes2(buf) - } - } - offsetQuery = loc.offset + loc.length - } - buf.WriteString(pq.Query[offsetQuery:]) - return nil +func (pq *ParsedQuery) BindLocations() []BindLocation { + return pq.bindLocations } // MarshalJSON is a custom JSON marshaler for ParsedQuery. -// Note that any queries longer that 512 bytes will be truncated. func (pq *ParsedQuery) MarshalJSON() ([]byte, error) { - return json.Marshal(TruncateForUI(pq.Query)) + return json.Marshal(pq.Query) } // EncodeValue encodes one bind variable value into the query. diff --git a/go/vt/sqlparser/parsed_query_test.go b/go/vt/sqlparser/parsed_query_test.go index 8c89a51984d..ef59676883f 100644 --- a/go/vt/sqlparser/parsed_query_test.go +++ b/go/vt/sqlparser/parsed_query_test.go @@ -27,7 +27,8 @@ import ( ) func TestNewParsedQuery(t *testing.T) { - stmt, err := Parse("select * from a where id =:id") + parser := NewTestParser() + stmt, err := parser.Parse("select * from a where id =:id") if err != nil { t.Error(err) return @@ -35,7 +36,7 @@ func TestNewParsedQuery(t *testing.T) { pq := NewParsedQuery(stmt) want := &ParsedQuery{ Query: "select * from a where id = :id", - bindLocations: []bindLocation{{offset: 27, length: 3}}, + bindLocations: []BindLocation{{Offset: 27, Length: 3}}, } if !reflect.DeepEqual(pq, want) { t.Errorf("GenerateParsedQuery: %+v, want %+v", pq, want) @@ -135,8 +136,9 @@ func TestGenerateQuery(t *testing.T) { }, } + parser := NewTestParser() for _, tcase := range tcases { - tree, err := Parse(tcase.query) + tree, err := parser.Parse(tcase.query) if err != nil { t.Errorf("parse failed for %s: %v", tcase.desc, err) continue diff --git a/go/vt/sqlparser/parser.go b/go/vt/sqlparser/parser.go index ae630ce3dea..d4948396ae5 100644 --- a/go/vt/sqlparser/parser.go +++ b/go/vt/sqlparser/parser.go @@ -17,22 +17,20 @@ limitations under the License. package sqlparser import ( + "errors" "fmt" "io" "strconv" "strings" "sync" - "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -var versionFlagSync sync.Once - // parserPool is a pool for parser objects. var parserPool = sync.Pool{ New: func() any { @@ -43,9 +41,6 @@ var parserPool = sync.Pool{ // zeroParser is a zero-initialized parser to help reinitialize the parser for pooling. var zeroParser yyParserImpl -// mySQLParserVersion is the version of MySQL that the parser would emulate -var mySQLParserVersion string - // yyParsePooled is a wrapper around yyParse that pools the parser objects. There isn't a // particularly good reason to use yyParse directly, since it immediately discards its parser. // @@ -80,12 +75,12 @@ func yyParsePooled(yylex yyLexer) int { // bind variables that were found in the original SQL query. If a DDL statement // is partially parsed but still contains a syntax error, the // error is ignored and the DDL is returned anyway. -func Parse2(sql string) (Statement, BindVars, error) { - tokenizer := NewStringTokenizer(sql) +func (p *Parser) Parse2(sql string) (Statement, BindVars, error) { + tokenizer := p.NewStringTokenizer(sql) if yyParsePooled(tokenizer) != 0 { if tokenizer.partialDDL != nil { if typ, val := tokenizer.Scan(); typ != 0 { - return nil, nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", string(val)) + return nil, nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", val) } log.Warningf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError) switch x := tokenizer.partialDDL.(type) { @@ -105,30 +100,8 @@ func Parse2(sql string) (Statement, BindVars, error) { return tokenizer.ParseTree, tokenizer.BindVars, nil } -func checkParserVersionFlag() { - if flag.Parsed() { - versionFlagSync.Do(func() { - convVersion, err := convertMySQLVersionToCommentVersion(servenv.MySQLServerVersion()) - if err != nil { - log.Fatalf("unable to parse mysql version: %v", err) - } - mySQLParserVersion = convVersion - }) - } -} - -// SetParserVersion sets the mysql parser version -func SetParserVersion(version string) { - mySQLParserVersion = version -} - -// GetParserVersion returns the version of the mysql parser -func GetParserVersion() string { - return mySQLParserVersion -} - -// convertMySQLVersionToCommentVersion converts the MySQL version into comment version format. -func convertMySQLVersionToCommentVersion(version string) (string, error) { +// ConvertMySQLVersionToCommentVersion converts the MySQL version into comment version format. +func ConvertMySQLVersionToCommentVersion(version string) (string, error) { var res = make([]int, 3) idx := 0 val := "" @@ -166,8 +139,8 @@ func convertMySQLVersionToCommentVersion(version string) (string, error) { } // ParseExpr parses an expression and transforms it to an AST -func ParseExpr(sql string) (Expr, error) { - stmt, err := Parse("select " + sql) +func (p *Parser) ParseExpr(sql string) (Expr, error) { + stmt, err := p.Parse("select " + sql) if err != nil { return nil, err } @@ -176,15 +149,15 @@ func ParseExpr(sql string) (Expr, error) { } // Parse behaves like Parse2 but does not return a set of bind variables -func Parse(sql string) (Statement, error) { - stmt, _, err := Parse2(sql) +func (p *Parser) Parse(sql string) (Statement, error) { + stmt, _, err := p.Parse2(sql) return stmt, err } // ParseStrictDDL is the same as Parse except it errors on // partially parsed DDL statements. -func ParseStrictDDL(sql string) (Statement, error) { - tokenizer := NewStringTokenizer(sql) +func (p *Parser) ParseStrictDDL(sql string) (Statement, error) { + tokenizer := p.NewStringTokenizer(sql) if yyParsePooled(tokenizer) != 0 { return nil, tokenizer.LastError } @@ -194,17 +167,11 @@ func ParseStrictDDL(sql string) (Statement, error) { return tokenizer.ParseTree, nil } -// ParseTokenizer is a raw interface to parse from the given tokenizer. -// This does not used pooled parsers, and should not be used in general. -func ParseTokenizer(tokenizer *Tokenizer) int { - return yyParse(tokenizer) -} - // ParseNext parses a single SQL statement from the tokenizer // returning a Statement which is the AST representation of the query. // The tokenizer will always read up to the end of the statement, allowing for // the next call to ParseNext to parse any subsequent SQL statements. When -// there are no more statements to parse, a error of io.EOF is returned. +// there are no more statements to parse, an error of io.EOF is returned. func ParseNext(tokenizer *Tokenizer) (Statement, error) { return parseNext(tokenizer, false) } @@ -243,10 +210,10 @@ func parseNext(tokenizer *Tokenizer, strict bool) (Statement, error) { // ErrEmpty is a sentinel error returned when parsing empty statements. var ErrEmpty = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.EmptyQuery, "Query was empty") -// SplitStatement returns the first sql statement up to either a ; or EOF +// SplitStatement returns the first sql statement up to either a ';' or EOF // and the remainder from the given buffer -func SplitStatement(blob string) (string, string, error) { - tokenizer := NewStringTokenizer(blob) +func (p *Parser) SplitStatement(blob string) (string, string, error) { + tokenizer := p.NewStringTokenizer(blob) tkn := 0 for { tkn, _ = tokenizer.Scan() @@ -263,9 +230,25 @@ func SplitStatement(blob string) (string, string, error) { return blob, "", nil } +// SplitStatements splits a given blob into multiple SQL statements. +func (p *Parser) SplitStatements(blob string) (statements []Statement, err error) { + tokenizer := p.NewStringTokenizer(blob) + for { + stmt, err := ParseNext(tokenizer) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, err + } + statements = append(statements, stmt) + } + return statements, nil +} + // SplitStatementToPieces split raw sql statement that may have multi sql pieces to sql pieces // returns the sql pieces blob contains; or error if sql cannot be parsed -func SplitStatementToPieces(blob string) (pieces []string, err error) { +func (p *Parser) SplitStatementToPieces(blob string) (pieces []string, err error) { // fast path: the vast majority of SQL statements do not have semicolons in them if blob == "" { return nil, nil @@ -273,12 +256,15 @@ func SplitStatementToPieces(blob string) (pieces []string, err error) { switch strings.IndexByte(blob, ';') { case -1: // if there is no semicolon, return blob as a whole return []string{blob}, nil - case len(blob) - 1: // if there's a single semicolon and it's the last character, return blob without it + case len(blob) - 1: // if there's a single semicolon, and it's the last character, return blob without it return []string{blob[:len(blob)-1]}, nil } pieces = make([]string, 0, 16) - tokenizer := NewStringTokenizer(blob) + // It's safe here to not case about version specific tokenization + // because we are only interested in semicolons and splitting + // statements. + tokenizer := p.NewStringTokenizer(blob) tkn := 0 var stmt string @@ -313,6 +299,49 @@ loop: return } -func IsMySQL80AndAbove() bool { - return mySQLParserVersion >= "80000" +func (p *Parser) IsMySQL80AndAbove() bool { + return p.version >= "80000" +} + +func (p *Parser) SetTruncateErrLen(l int) { + p.truncateErrLen = l +} + +type Options struct { + MySQLServerVersion string + TruncateUILen int + TruncateErrLen int +} + +type Parser struct { + version string + truncateUILen int + truncateErrLen int +} + +func New(opts Options) (*Parser, error) { + if opts.MySQLServerVersion == "" { + opts.MySQLServerVersion = config.DefaultMySQLVersion + } + convVersion, err := ConvertMySQLVersionToCommentVersion(opts.MySQLServerVersion) + if err != nil { + return nil, err + } + return &Parser{ + version: convVersion, + truncateUILen: opts.TruncateUILen, + truncateErrLen: opts.TruncateErrLen, + }, nil +} + +func NewTestParser() *Parser { + convVersion, err := ConvertMySQLVersionToCommentVersion(config.DefaultMySQLVersion) + if err != nil { + panic(err) + } + return &Parser{ + version: convVersion, + truncateUILen: 512, + truncateErrLen: 0, + } } diff --git a/go/vt/sqlparser/parser_test.go b/go/vt/sqlparser/parser_test.go index 537cc598da7..5cb15317f29 100644 --- a/go/vt/sqlparser/parser_test.go +++ b/go/vt/sqlparser/parser_test.go @@ -51,9 +51,10 @@ func TestEmptyErrorAndComments(t *testing.T) { output: "select 1 from dual", }, } + parser := NewTestParser() for _, testcase := range testcases { t.Run(testcase.input, func(t *testing.T) { - res, err := Parse(testcase.input) + res, err := parser.Parse(testcase.input) if testcase.err != nil { require.Equal(t, testcase.err, err) } else { @@ -63,7 +64,7 @@ func TestEmptyErrorAndComments(t *testing.T) { }) t.Run(testcase.input+"-Strict DDL", func(t *testing.T) { - res, err := ParseStrictDDL(testcase.input) + res, err := parser.ParseStrictDDL(testcase.input) if testcase.err != nil { require.Equal(t, testcase.err, err) } else { diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go index a6cbffee351..0d29d8b87e3 100644 --- a/go/vt/sqlparser/precedence_test.go +++ b/go/vt/sqlparser/precedence_test.go @@ -18,7 +18,6 @@ package sqlparser import ( "fmt" - "math/rand" "testing" "time" @@ -53,8 +52,9 @@ func TestAndOrPrecedence(t *testing.T) { input: "select * from a where a=b or c=d and e=f", output: "(a = b or (c = d and e = f))", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -77,8 +77,9 @@ func TestPlusStarPrecedence(t *testing.T) { input: "select 1*2+3 from a", output: "((1 * 2) + 3)", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -104,8 +105,9 @@ func TestIsPrecedence(t *testing.T) { input: "select * from a where (a=1 and b=2) is true", output: "((a = 1 and b = 2) is true)", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -158,9 +160,10 @@ func TestParens(t *testing.T) { {in: "0 <=> (1 and 0)", expected: "0 <=> (1 and 0)"}, } + parser := NewTestParser() for _, tc := range tests { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse("select " + tc.in) + stmt, err := parser.Parse("select " + tc.in) require.NoError(t, err) out := String(stmt) require.Equal(t, "select "+tc.expected+" from dual", out) @@ -171,12 +174,10 @@ func TestParens(t *testing.T) { func TestRandom(t *testing.T) { // The purpose of this test is to find discrepancies between Format and parsing. If for example our precedence rules are not consistent between the two, this test should find it. // The idea is to generate random queries, and pass them through the parser and then the unparser, and one more time. The result of the first unparse should be the same as the second result. - seed := time.Now().UnixNano() - r := rand.New(rand.NewSource(seed)) - fmt.Printf("seed is %d\n", seed) - g := NewGenerator(r, 5) + g := NewGenerator(5) endBy := time.Now().Add(1 * time.Second) + parser := NewTestParser() for { if time.Now().After(endBy) { break @@ -186,7 +187,7 @@ func TestRandom(t *testing.T) { inputQ := "select " + String(randomExpr) + " from t" // When it's parsed and unparsed - parsedInput, err := Parse(inputQ) + parsedInput, err := parser.Parse(inputQ) require.NoError(t, err, inputQ) // Then the unparsing should be the same as the input query diff --git a/go/vt/sqlparser/predicate_rewriting.go b/go/vt/sqlparser/predicate_rewriting.go index 40e9a953f57..234a2f4acd5 100644 --- a/go/vt/sqlparser/predicate_rewriting.go +++ b/go/vt/sqlparser/predicate_rewriting.go @@ -16,17 +16,16 @@ limitations under the License. package sqlparser -import ( - "vitess.io/vitess/go/vt/log" -) +import "slices" // RewritePredicate walks the input AST and rewrites any boolean logic into a simpler form // This simpler form is CNF plus logic for extracting predicates from OR, plus logic for turning ORs into IN -// Note: In order to re-plan, we need to empty the accumulated metadata in the AST, -// so ColName.Metadata will be nil:ed out as part of this rewrite func RewritePredicate(ast SQLNode) SQLNode { - for { - printExpr(ast) + original := CloneSQLNode(ast) + + // Beware: converting to CNF in this loop might cause exponential formula growth. + // We bail out early to prevent going overboard. + for loop := 0; loop < 15; loop++ { exprChanged := false stopOnChange := func(SQLNode, SQLNode) bool { return !exprChanged @@ -37,9 +36,8 @@ func RewritePredicate(ast SQLNode) SQLNode { return true } - rewritten, state := simplifyExpression(e) - if ch, isChange := state.(changed); isChange { - printRule(ch.rule, ch.exprMatched) + rewritten, changed := simplifyExpression(e) + if changed { exprChanged = true cursor.Replace(rewritten) } @@ -50,9 +48,11 @@ func RewritePredicate(ast SQLNode) SQLNode { return ast } } + + return original } -func simplifyExpression(expr Expr) (Expr, rewriteState) { +func simplifyExpression(expr Expr) (Expr, bool) { switch expr := expr.(type) { case *NotExpr: return simplifyNot(expr) @@ -63,105 +63,91 @@ func simplifyExpression(expr Expr) (Expr, rewriteState) { case *AndExpr: return simplifyAnd(expr) } - return expr, noChange{} + return expr, false } -func simplifyNot(expr *NotExpr) (Expr, rewriteState) { +func simplifyNot(expr *NotExpr) (Expr, bool) { switch child := expr.Expr.(type) { case *NotExpr: - return child.Expr, - newChange("NOT NOT A => A", f(expr)) + return child.Expr, true case *OrExpr: - return &AndExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, - newChange("NOT (A OR B) => NOT A AND NOT B", f(expr)) + // not(or(a,b)) => and(not(a),not(b)) + return &AndExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, true case *AndExpr: - return &OrExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, - newChange("NOT (A AND B) => NOT A OR NOT B", f(expr)) + // not(and(a,b)) => or(not(a), not(b)) + return &OrExpr{Right: &NotExpr{Expr: child.Right}, Left: &NotExpr{Expr: child.Left}}, true } - return expr, noChange{} + return expr, false } -// ExtractINFromOR will add additional predicated to an OR. -// this rewriter should not be used in a fixed point way, since it returns the original expression with additions, -// and it will therefor OOM before it stops rewriting -func ExtractINFromOR(expr *OrExpr) []Expr { - // we check if we have two comparisons on either side of the OR - // that we can add as an ANDed comparison. - // WHERE (a = 5 and B) or (a = 6 AND C) => - // WHERE (a = 5 AND B) OR (a = 6 AND C) AND a IN (5,6) - // This rewrite makes it possible to find a better route than Scatter if the `a` column has a helpful vindex - lftPredicates := SplitAndExpression(nil, expr.Left) - rgtPredicates := SplitAndExpression(nil, expr.Right) - var ins []Expr - for _, lft := range lftPredicates { - l, ok := lft.(*ComparisonExpr) - if !ok { - continue - } - for _, rgt := range rgtPredicates { - r, ok := rgt.(*ComparisonExpr) - if !ok { - continue - } - in, state := tryTurningOrIntoIn(l, r) - if state.changed() { - ins = append(ins, in) - } - } +func simplifyOr(expr *OrExpr) (Expr, bool) { + res, rewritten := distinctOr(expr) + if rewritten { + return res, true } - return uniquefy(ins) -} - -func simplifyOr(expr *OrExpr) (Expr, rewriteState) { or := expr // first we search for ANDs and see how they can be simplified land, lok := or.Left.(*AndExpr) rand, rok := or.Right.(*AndExpr) - switch { - case lok && rok: + + if lok && rok { // (<> AND <>) OR (<> AND <>) + // or(and(T1,T2), and(T2, T3)) => and(T1, or(T2, T2)) var a, b, c Expr - var change changed switch { case Equals.Expr(land.Left, rand.Left): - change = newChange("(A and B) or (A and C) => A AND (B OR C)", f(expr)) a, b, c = land.Left, land.Right, rand.Right + return &AndExpr{Left: a, Right: &OrExpr{Left: b, Right: c}}, true case Equals.Expr(land.Left, rand.Right): - change = newChange("(A and B) or (C and A) => A AND (B OR C)", f(expr)) a, b, c = land.Left, land.Right, rand.Left + return &AndExpr{Left: a, Right: &OrExpr{Left: b, Right: c}}, true case Equals.Expr(land.Right, rand.Left): - change = newChange("(B and A) or (A and C) => A AND (B OR C)", f(expr)) a, b, c = land.Right, land.Left, rand.Right + return &AndExpr{Left: a, Right: &OrExpr{Left: b, Right: c}}, true case Equals.Expr(land.Right, rand.Right): - change = newChange("(B and A) or (C and A) => A AND (B OR C)", f(expr)) a, b, c = land.Right, land.Left, rand.Left - default: - return expr, noChange{} + return &AndExpr{Left: a, Right: &OrExpr{Left: b, Right: c}}, true } - return &AndExpr{Left: a, Right: &OrExpr{Left: b, Right: c}}, change - case lok: - // (<> AND <>) OR <> + } + + // (<> AND <>) OR <> + if lok { // Simplification if Equals.Expr(or.Right, land.Left) || Equals.Expr(or.Right, land.Right) { - return or.Right, newChange("(A AND B) OR A => A", f(expr)) + // or(and(a,b), c) => c where c=a or c=b + return or.Right, true } + // Distribution Law - return &AndExpr{Left: &OrExpr{Left: land.Left, Right: or.Right}, Right: &OrExpr{Left: land.Right, Right: or.Right}}, - newChange("(A AND B) OR C => (A OR C) AND (B OR C)", f(expr)) - case rok: - // <> OR (<> AND <>) + // or(c, and(a,b)) => and(or(c,a), or(c,b)) + return &AndExpr{ + Left: &OrExpr{ + Left: land.Left, + Right: or.Right, + }, + Right: &OrExpr{ + Left: land.Right, + Right: or.Right, + }, + }, true + } + + // <> OR (<> AND <>) + if rok { // Simplification if Equals.Expr(or.Left, rand.Left) || Equals.Expr(or.Left, rand.Right) { - return or.Left, newChange("A OR (A AND B) => A", f(expr)) + // or(a,and(b,c)) => a + return or.Left, true } + // Distribution Law + // or(and(a,b), c) => and(or(c,a), or(c,b)) return &AndExpr{ - Left: &OrExpr{Left: or.Left, Right: rand.Left}, - Right: &OrExpr{Left: or.Left, Right: rand.Right}, - }, - newChange("C OR (A AND B) => (C OR A) AND (C OR B)", f(expr)) + Left: &OrExpr{Left: or.Left, Right: rand.Left}, + Right: &OrExpr{Left: or.Left, Right: rand.Right}, + }, true } // next, we want to try to turn multiple ORs into an IN when possible @@ -169,63 +155,223 @@ func simplifyOr(expr *OrExpr) (Expr, rewriteState) { rgtCmp, rok := or.Right.(*ComparisonExpr) if lok && rok { newExpr, rewritten := tryTurningOrIntoIn(lftCmp, rgtCmp) - if rewritten.changed() { - return newExpr, rewritten + if rewritten { + // or(a=x,a=y) => in(a,[x,y]) + return newExpr, true } } // Try to make distinct - return distinctOr(expr) + result, changed := distinctOr(expr) + if changed { + return result, true + } + return result, false +} + +func simplifyXor(expr *XorExpr) (Expr, bool) { + // xor(a,b) => and(or(a,b), not(and(a,b)) + return &AndExpr{ + Left: &OrExpr{Left: expr.Left, Right: expr.Right}, + Right: &NotExpr{Expr: &AndExpr{Left: expr.Left, Right: expr.Right}}, + }, true } -func tryTurningOrIntoIn(l, r *ComparisonExpr) (Expr, rewriteState) { +func simplifyAnd(expr *AndExpr) (Expr, bool) { + res, rewritten := distinctAnd(expr) + if rewritten { + return res, true + } + and := expr + if or, ok := and.Left.(*OrExpr); ok { + // Simplification + // and(or(a,b),c) => c when c=a or c=b + if Equals.Expr(or.Left, and.Right) { + return and.Right, true + } + if Equals.Expr(or.Right, and.Right) { + return and.Right, true + } + } + if or, ok := and.Right.(*OrExpr); ok { + // Simplification + if Equals.Expr(or.Left, and.Left) { + return and.Left, true + } + if Equals.Expr(or.Right, and.Left) { + return and.Left, true + } + } + + return expr, false +} + +// ExtractINFromOR rewrites the OR expression into an IN clause. +// Each side of each ORs has to be an equality comparison expression and the column names have to +// match for all sides of each comparison. +// This rewriter takes a query that looks like this WHERE a = 1 and b = 11 or a = 2 and b = 12 or a = 3 and b = 13 +// And rewrite that to WHERE (a, b) IN ((1,11), (2,12), (3,13)) +func ExtractINFromOR(expr *OrExpr) []Expr { + var varNames []*ColName + var values []Exprs + orSlice := orToSlice(expr) + for _, expr := range orSlice { + andSlice := andToSlice(expr) + if len(andSlice) == 0 { + return nil + } + + var currentVarNames []*ColName + var currentValues []Expr + for _, comparisonExpr := range andSlice { + if comparisonExpr.Operator != EqualOp { + return nil + } + + var colName *ColName + if left, ok := comparisonExpr.Left.(*ColName); ok { + colName = left + currentValues = append(currentValues, comparisonExpr.Right) + } + + if right, ok := comparisonExpr.Right.(*ColName); ok { + if colName != nil { + return nil + } + colName = right + currentValues = append(currentValues, comparisonExpr.Left) + } + + if colName == nil { + return nil + } + + currentVarNames = append(currentVarNames, colName) + } + + if len(varNames) == 0 { + varNames = currentVarNames + } else if !slices.EqualFunc(varNames, currentVarNames, func(col1, col2 *ColName) bool { return col1.Equal(col2) }) { + return nil + } + + values = append(values, currentValues) + } + + var nameTuple ValTuple + for _, name := range varNames { + nameTuple = append(nameTuple, name) + } + + var valueTuple ValTuple + for _, value := range values { + valueTuple = append(valueTuple, ValTuple(value)) + } + + return []Expr{&ComparisonExpr{ + Operator: InOp, + Left: nameTuple, + Right: valueTuple, + }} +} + +func orToSlice(expr *OrExpr) []Expr { + var exprs []Expr + + handleOrSide := func(e Expr) { + switch e := e.(type) { + case *OrExpr: + exprs = append(exprs, orToSlice(e)...) + default: + exprs = append(exprs, e) + } + } + + handleOrSide(expr.Left) + handleOrSide(expr.Right) + return exprs +} + +func andToSlice(expr Expr) []*ComparisonExpr { + var andExpr *AndExpr + switch expr := expr.(type) { + case *AndExpr: + andExpr = expr + case *ComparisonExpr: + return []*ComparisonExpr{expr} + default: + return nil + } + + var exprs []*ComparisonExpr + handleAndSide := func(e Expr) bool { + switch e := e.(type) { + case *AndExpr: + slice := andToSlice(e) + if slice == nil { + return false + } + exprs = append(exprs, slice...) + case *ComparisonExpr: + exprs = append(exprs, e) + default: + return false + } + return true + } + + if !handleAndSide(andExpr.Left) { + return nil + } + if !handleAndSide(andExpr.Right) { + return nil + } + + return exprs +} + +func tryTurningOrIntoIn(l, r *ComparisonExpr) (Expr, bool) { // looks for A = X OR A = Y and turns them into A IN (X, Y) col, ok := l.Left.(*ColName) if !ok || !Equals.Expr(col, r.Left) { - return nil, noChange{} + return nil, false } var tuple ValTuple - var ruleStr string + switch l.Operator { case EqualOp: tuple = ValTuple{l.Right} - ruleStr = "A = <>" case InOp: lft, ok := l.Right.(ValTuple) if !ok { - return nil, noChange{} + return nil, false } tuple = lft - ruleStr = "A IN (<>, <>)" default: - return nil, noChange{} + return nil, false } - ruleStr += " OR " - switch r.Operator { case EqualOp: tuple = append(tuple, r.Right) - ruleStr += "A = <>" + case InOp: lft, ok := r.Right.(ValTuple) if !ok { - return nil, noChange{} + return nil, false } tuple = append(tuple, lft...) - ruleStr += "A IN (<>, <>)" + default: - return nil, noChange{} + return nil, false } - ruleStr += " => A IN (<>, <>)" - return &ComparisonExpr{ Operator: InOp, Left: col, Right: uniquefy(tuple), - }, newChange(ruleStr, f(&OrExpr{Left: l, Right: r})) + }, true } func uniquefy(tuple ValTuple) (output ValTuple) { @@ -241,45 +387,7 @@ outer: return } -func simplifyXor(expr *XorExpr) (Expr, rewriteState) { - // DeMorgan Rewriter - return &AndExpr{ - Left: &OrExpr{Left: expr.Left, Right: expr.Right}, - Right: &NotExpr{Expr: &AndExpr{Left: expr.Left, Right: expr.Right}}, - }, newChange("(A XOR B) => (A OR B) AND NOT (A AND B)", f(expr)) -} - -func simplifyAnd(expr *AndExpr) (Expr, rewriteState) { - res, rewritten := distinctAnd(expr) - if rewritten.changed() { - return res, rewritten - } - and := expr - if or, ok := and.Left.(*OrExpr); ok { - // Simplification - - if Equals.Expr(or.Left, and.Right) { - return and.Right, newChange("(A OR B) AND A => A", f(expr)) - } - if Equals.Expr(or.Right, and.Right) { - return and.Right, newChange("(A OR B) AND B => B", f(expr)) - } - } - if or, ok := and.Right.(*OrExpr); ok { - // Simplification - if Equals.Expr(or.Left, and.Left) { - return and.Left, newChange("A AND (A OR B) => A", f(expr)) - } - if Equals.Expr(or.Right, and.Left) { - return and.Left, newChange("A AND (B OR A) => A", f(expr)) - } - } - - return expr, noChange{} -} - -func distinctOr(in *OrExpr) (Expr, rewriteState) { - var skipped []*OrExpr +func distinctOr(in *OrExpr) (result Expr, changed bool) { todo := []*OrExpr{in} var leaves []Expr for len(todo) > 0 { @@ -296,27 +404,23 @@ func distinctOr(in *OrExpr) (Expr, rewriteState) { addAnd(curr.Left) addAnd(curr.Right) } - original := len(leaves) + var predicates []Expr outer1: - for len(leaves) > 0 { - curr := leaves[0] - leaves = leaves[1:] + for _, curr := range leaves { for _, alreadyIn := range predicates { if Equals.Expr(alreadyIn, curr) { - if log.V(0) { - skipped = append(skipped, &OrExpr{Left: alreadyIn, Right: curr}) - } + changed = true continue outer1 } } predicates = append(predicates, curr) } - if original == len(predicates) { - return in, noChange{} + if !changed { + return in, false } - var result Expr + for i, curr := range predicates { if i == 0 { result = curr @@ -325,25 +429,10 @@ outer1: result = &OrExpr{Left: result, Right: curr} } - return result, newChange("A OR A => A", func() Expr { - var result Expr - for _, orExpr := range skipped { - if result == nil { - result = orExpr - continue - } - - result = &OrExpr{ - Left: result, - Right: orExpr, - } - } - return result - }) + return } -func distinctAnd(in *AndExpr) (Expr, rewriteState) { - var skipped []*AndExpr +func distinctAnd(in *AndExpr) (result Expr, changed bool) { todo := []*AndExpr{in} var leaves []Expr for len(todo) > 0 { @@ -359,25 +448,23 @@ func distinctAnd(in *AndExpr) (Expr, rewriteState) { addExpr(curr.Left) addExpr(curr.Right) } - original := len(leaves) var predicates []Expr outer1: for _, curr := range leaves { for _, alreadyIn := range predicates { if Equals.Expr(alreadyIn, curr) { - if log.V(0) { - skipped = append(skipped, &AndExpr{Left: alreadyIn, Right: curr}) - } + changed = true continue outer1 } } predicates = append(predicates, curr) } - if original == len(predicates) { - return in, noChange{} + + if !changed { + return in, false } - var result Expr + for i, curr := range predicates { if i == 0 { result = curr @@ -385,62 +472,5 @@ outer1: } result = &AndExpr{Left: result, Right: curr} } - return AndExpressions(leaves...), newChange("A AND A => A", func() Expr { - var result Expr - for _, andExpr := range skipped { - if result == nil { - result = andExpr - continue - } - - result = &AndExpr{ - Left: result, - Right: andExpr, - } - } - return result - }) -} - -type ( - rewriteState interface { - changed() bool - } - noChange struct{} - - // changed makes it possible to make sure we have a rule string for each change we do in the expression tree - changed struct { - rule string - - // ExprMatched is a function here so building of this expression can be paid only when we are debug logging - exprMatched func() Expr - } -) - -func (noChange) changed() bool { return false } -func (changed) changed() bool { return true } - -// f returns a function that returns the expression. It's short by design, so it interferes minimally -// used for logging -func f(e Expr) func() Expr { - return func() Expr { return e } -} - -func printRule(rule string, expr func() Expr) { - if log.V(10) { - log.Infof("Rule: %s ON %s", rule, String(expr())) - } -} - -func printExpr(expr SQLNode) { - if log.V(10) { - log.Infof("Current: %s", String(expr)) - } -} - -func newChange(rule string, exprMatched func() Expr) changed { - return changed{ - rule: rule, - exprMatched: exprMatched, - } + return AndExpressions(leaves...), true } diff --git a/go/vt/sqlparser/predicate_rewriting_test.go b/go/vt/sqlparser/predicate_rewriting_test.go index 34e23597894..ceb4b276017 100644 --- a/go/vt/sqlparser/predicate_rewriting_test.go +++ b/go/vt/sqlparser/predicate_rewriting_test.go @@ -86,13 +86,14 @@ func TestSimplifyExpression(in *testing.T) { expected: "A and (B or C)", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) - expr, didRewrite := simplifyExpression(expr) - assert.True(t, didRewrite.changed()) + expr, changed := simplifyExpression(expr) + assert.True(t, changed) assert.Equal(t, tc.expected, String(expr)) }) } @@ -129,11 +130,38 @@ func TestRewritePredicate(in *testing.T) { }, { in: "A and (B or A)", expected: "A", + }, { + in: "(a = 1 and b = 41) or (a = 2 and b = 42)", + // this might look weird, but it allows the planner to either a or b in a vindex operation + expected: "a in (1, 2) and (a = 1 or b = 42) and ((b = 41 or a = 2) and b in (41, 42))", + }, { + in: "(a = 1 and b = 41) or (a = 2 and b = 42) or (a = 3 and b = 43)", + expected: "a in (1, 2, 3) and (a in (1, 2) or b = 43) and ((a = 1 or b = 42 or a = 3) and (a = 1 or b = 42 or b = 43)) and ((b = 41 or a = 2 or a = 3) and (b = 41 or a = 2 or b = 43) and ((b in (41, 42) or a = 3) and b in (41, 42, 43)))", + }, { + // the following two tests show some pathological cases that would grow too much, and so we abort the rewriting + in: "a = 1 and b = 41 or a = 2 and b = 42 or a = 3 and b = 43 or a = 4 and b = 44 or a = 5 and b = 45 or a = 6 and b = 46", + expected: "a = 1 and b = 41 or a = 2 and b = 42 or a = 3 and b = 43 or a = 4 and b = 44 or a = 5 and b = 45 or a = 6 and b = 46", + }, { + in: "a = 5 and B or a = 6 and C", + expected: "a in (5, 6) and (a = 5 or C) and ((B or a = 6) and (B or C))", + }, { + in: "(a = 5 and b = 1 or b = 2 and a = 6)", + expected: "(a = 5 or b = 2) and a in (5, 6) and (b in (1, 2) and (b = 1 or a = 6))", + }, { + in: "(a in (1,5) and B or C and a = 6)", + expected: "(a in (1, 5) or C) and a in (1, 5, 6) and ((B or C) and (B or a = 6))", + }, { + in: "(a in (1, 5) and B or C and a in (5, 7))", + expected: "(a in (1, 5) or C) and a in (1, 5, 7) and ((B or C) and (B or a in (5, 7)))", + }, { + in: "not n0 xor not (n2 and n3) xor (not n2 and (n1 xor n1) xor (n0 xor n0 xor n2))", + expected: "not n0 xor not (n2 and n3) xor (not n2 and (n1 xor n1) xor (n0 xor n0 xor n2))", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) output := RewritePredicate(expr) @@ -147,28 +175,17 @@ func TestExtractINFromOR(in *testing.T) { in string expected string }{{ - in: "(A and B) or (B and A)", - expected: "", - }, { - in: "(a = 5 and B) or A", - expected: "", - }, { - in: "a = 5 and B or a = 6 and C", - expected: "a in (5, 6)", + in: "a = 1 and b = 41 or a = 2 and b = 42 or a = 3 and b = 43 or a = 4 and b = 44 or a = 5 and b = 45 or a = 6 and b = 46", + expected: "(a, b) in ((1, 41), (2, 42), (3, 43), (4, 44), (5, 45), (6, 46))", }, { - in: "(a = 5 and b = 1 or b = 2 and a = 6)", - expected: "a in (5, 6) and b in (1, 2)", - }, { - in: "(a in (1,5) and B or C and a = 6)", - expected: "a in (1, 5, 6)", - }, { - in: "(a in (1, 5) and B or C and a in (5, 7))", - expected: "a in (1, 5, 7)", + in: "a = 1 or a = 2 or a = 3 or a = 4 or a = 5 or a = 6", + expected: "(a) in ((1), (2), (3), (4), (5), (6))", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) output := ExtractINFromOR(expr.(*OrExpr)) diff --git a/go/vt/sqlparser/random_expr.go b/go/vt/sqlparser/random_expr.go index 6eed8145ed2..f5b394b36fe 100644 --- a/go/vt/sqlparser/random_expr.go +++ b/go/vt/sqlparser/random_expr.go @@ -18,7 +18,7 @@ package sqlparser import ( "fmt" - "math/rand" + "math/rand/v2" ) // This file is used to generate random expressions to be used for testing @@ -32,7 +32,7 @@ const ( type ( ExprGenerator interface { - Generate(r *rand.Rand, config ExprGeneratorConfig) Expr + Generate(config ExprGeneratorConfig) Expr } QueryGenerator interface { @@ -53,7 +53,6 @@ type ( } Generator struct { - r *rand.Rand depth int maxDepth int isAggregate bool @@ -120,9 +119,8 @@ func (egc ExprGeneratorConfig) IsAggregateConfig() ExprGeneratorConfig { return egc } -func NewGenerator(r *rand.Rand, maxDepth int, exprGenerators ...ExprGenerator) *Generator { +func NewGenerator(maxDepth int, exprGenerators ...ExprGenerator) *Generator { g := Generator{ - r: r, maxDepth: maxDepth, exprGenerators: exprGenerators, } @@ -190,7 +188,7 @@ func (g *Generator) Expression(genConfig ExprGeneratorConfig) Expr { // don't create expressions from the expression exprGenerators if we haven't created an aggregation yet if _, ok := generator.(QueryGenerator); ok || genConfig.AggrRule != IsAggregate { options = append(options, func() Expr { - expr := generator.Generate(g.r, genConfig) + expr := generator.Generate(genConfig) if expr == nil { return g.randomLiteral() } @@ -208,7 +206,7 @@ func (g *Generator) Expression(genConfig ExprGeneratorConfig) Expr { // if an arbitrary number of columns may be generated, randomly choose 1-3 columns if numCols == 0 { - numCols = g.r.Intn(3) + 1 + numCols = rand.IntN(3) + 1 } if numCols == 1 { @@ -244,7 +242,7 @@ func (g *Generator) makeAggregateIfNecessary(genConfig ExprGeneratorConfig, expr } func (g *Generator) randomAggregate(genConfig ExprGeneratorConfig) Expr { - isDistinct := g.r.Intn(10) < 1 + isDistinct := rand.IntN(10) < 1 options := []exprF{ func() Expr { return &CountStar{} }, @@ -324,7 +322,7 @@ func (g *Generator) subqueryExpr(genConfig ExprGeneratorConfig) Expr { for _, generator := range g.exprGenerators { if qg, ok := generator.(QueryGenerator); ok { options = append(options, func() Expr { - expr := qg.Generate(g.r, genConfig) + expr := qg.Generate(genConfig) if expr == nil { return g.randomTupleLiteral(genConfig) } @@ -342,7 +340,7 @@ func (g *Generator) subqueryExpr(genConfig ExprGeneratorConfig) Expr { func (g *Generator) randomTupleLiteral(genConfig ExprGeneratorConfig) Expr { if genConfig.NumCols == 0 { - genConfig.NumCols = g.r.Intn(3) + 1 + genConfig.NumCols = rand.IntN(3) + 1 } tuple := ValTuple{} @@ -372,11 +370,11 @@ func (g *Generator) randomBool(prob float32) bool { if prob < 0 || prob > 1 { prob = 0.5 } - return g.r.Float32() < prob + return rand.Float32() < prob } func (g *Generator) intLiteral() Expr { - t := fmt.Sprintf("%d", g.r.Intn(100)-g.r.Intn(100)) + t := fmt.Sprintf("%d", rand.IntN(100)-rand.IntN(100)) //nolint SA4000 return NewIntLiteral(t) } @@ -404,10 +402,10 @@ func (g *Generator) comparison(genConfig ExprGeneratorConfig) Expr { defer g.exit() // specifc 1-3 columns - numCols := g.r.Intn(3) + 1 + numCols := rand.IntN(3) + 1 cmp := &ComparisonExpr{ - Operator: comparisonOps[g.r.Intn(len(comparisonOps))], + Operator: comparisonOps[rand.IntN(len(comparisonOps))], Left: g.Expression(genConfig.SetNumCols(numCols)), Right: g.Expression(genConfig.SetNumCols(numCols)), } @@ -427,7 +425,7 @@ func (g *Generator) caseExpr(genConfig ExprGeneratorConfig) Expr { elseExpr = g.Expression(genConfig) } - size := g.r.Intn(2) + 1 + size := rand.IntN(2) + 1 var whens []*When for i := 0; i < size; i++ { var cond Expr @@ -457,7 +455,7 @@ func (g *Generator) arithmetic(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() - op := arithmeticOps[g.r.Intn(len(arithmeticOps))] + op := arithmeticOps[rand.IntN(len(arithmeticOps))] return &BinaryExpr{ Operator: op, @@ -469,11 +467,11 @@ func (g *Generator) arithmetic(genConfig ExprGeneratorConfig) Expr { type exprF func() Expr func (g *Generator) randomOf(options []exprF) Expr { - return options[g.r.Intn(len(options))]() + return options[rand.IntN(len(options))]() } func (g *Generator) randomOfS(options []string) string { - return options[g.r.Intn(len(options))] + return options[rand.IntN(len(options))] } func (g *Generator) andExpr(genConfig ExprGeneratorConfig) Expr { @@ -513,7 +511,7 @@ func (g *Generator) inExpr(genConfig ExprGeneratorConfig) Expr { g.enter() defer g.exit() - size := g.r.Intn(3) + 2 + size := rand.IntN(3) + 2 inExprGenConfig := NewExprGeneratorConfig(genConfig.AggrRule, "", size, true) tuple1 := g.Expression(inExprGenConfig) tuple2 := ValTuple{g.Expression(inExprGenConfig)} @@ -556,7 +554,7 @@ func (g *Generator) isExpr(genConfig ExprGeneratorConfig) Expr { ops := []IsExprOperator{IsNullOp, IsNotNullOp, IsTrueOp, IsNotTrueOp, IsFalseOp, IsNotFalseOp} return &IsExpr{ - Right: ops[g.r.Intn(len(ops))], + Right: ops[rand.IntN(len(ops))], Left: g.Expression(genConfig), } } diff --git a/go/vt/sqlparser/redact_query.go b/go/vt/sqlparser/redact_query.go index 194ad1ca64d..e6b8c009c68 100644 --- a/go/vt/sqlparser/redact_query.go +++ b/go/vt/sqlparser/redact_query.go @@ -19,11 +19,11 @@ package sqlparser import querypb "vitess.io/vitess/go/vt/proto/query" // RedactSQLQuery returns a sql string with the params stripped out for display -func RedactSQLQuery(sql string) (string, error) { +func (p *Parser) RedactSQLQuery(sql string) (string, error) { bv := map[string]*querypb.BindVariable{} sqlStripped, comments := SplitMarginComments(sql) - stmt, reservedVars, err := Parse2(sqlStripped) + stmt, reservedVars, err := p.Parse2(sqlStripped) if err != nil { return "", err } diff --git a/go/vt/sqlparser/redact_query_test.go b/go/vt/sqlparser/redact_query_test.go index 1cfd6d83af3..042f0f5b5f2 100644 --- a/go/vt/sqlparser/redact_query_test.go +++ b/go/vt/sqlparser/redact_query_test.go @@ -23,8 +23,9 @@ import ( ) func TestRedactSQLStatements(t *testing.T) { + parser := NewTestParser() sql := "select a,b,c from t where x = 1234 and y = 1234 and z = 'apple'" - redactedSQL, err := RedactSQLQuery(sql) + redactedSQL, err := parser.RedactSQLQuery(sql) if err != nil { t.Fatalf("redacting sql failed: %v", err) } diff --git a/go/vt/sqlparser/rewriter_test.go b/go/vt/sqlparser/rewriter_test.go index 3044e04f8b0..628d6fbd0a4 100644 --- a/go/vt/sqlparser/rewriter_test.go +++ b/go/vt/sqlparser/rewriter_test.go @@ -17,7 +17,6 @@ limitations under the License. package sqlparser import ( - "math/rand" "testing" "github.com/stretchr/testify/assert" @@ -26,7 +25,7 @@ import ( ) func BenchmarkVisitLargeExpression(b *testing.B) { - gen := NewGenerator(rand.New(rand.NewSource(1)), 5) + gen := NewGenerator(5) exp := gen.Expression(ExprGeneratorConfig{}) depth := 0 @@ -43,7 +42,8 @@ func BenchmarkVisitLargeExpression(b *testing.B) { func TestReplaceWorksInLaterCalls(t *testing.T) { q := "select * from tbl1" - stmt, err := Parse(q) + parser := NewTestParser() + stmt, err := parser.Parse(q) require.NoError(t, err) count := 0 Rewrite(stmt, func(cursor *Cursor) bool { @@ -67,7 +67,8 @@ func TestReplaceWorksInLaterCalls(t *testing.T) { func TestReplaceAndRevisitWorksInLaterCalls(t *testing.T) { q := "select * from tbl1" - stmt, err := Parse(q) + parser := NewTestParser() + stmt, err := parser.Parse(q) require.NoError(t, err) count := 0 Rewrite(stmt, func(cursor *Cursor) bool { @@ -94,7 +95,8 @@ func TestReplaceAndRevisitWorksInLaterCalls(t *testing.T) { } func TestChangeValueTypeGivesError(t *testing.T) { - parse, err := Parse("select * from a join b on a.id = b.id") + parser := NewTestParser() + parse, err := parser.Parse("select * from a join b on a.id = b.id") require.NoError(t, err) defer func() { diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index d837b38da7a..2fa91d9fda5 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -6,6 +6,8 @@ package sqlparser import ( __yyfmt__ "fmt" __yyunsafe__ "unsafe" + + "vitess.io/vitess/go/ptr" ) //line sql.y:17 @@ -34,722 +36,728 @@ func markBindVariable(yylex yyLexer, bvar string) { } const MEMBER = 57346 -const FUNCTION_CALL_NON_KEYWORD = 57347 -const STRING_TYPE_PREFIX_NON_KEYWORD = 57348 -const LEX_ERROR = 57349 -const UNION = 57350 -const SELECT = 57351 -const STREAM = 57352 -const VSTREAM = 57353 -const INSERT = 57354 -const UPDATE = 57355 -const DELETE = 57356 -const FROM = 57357 -const WHERE = 57358 -const GROUP = 57359 -const HAVING = 57360 -const ORDER = 57361 -const BY = 57362 -const LIMIT = 57363 -const OFFSET = 57364 -const FOR = 57365 -const ALL = 57366 -const DISTINCT = 57367 -const AS = 57368 -const EXISTS = 57369 -const ASC = 57370 -const DESC = 57371 -const INTO = 57372 -const DUPLICATE = 57373 -const DEFAULT = 57374 -const SET = 57375 -const LOCK = 57376 -const UNLOCK = 57377 -const KEYS = 57378 -const DO = 57379 -const CALL = 57380 -const DISTINCTROW = 57381 -const PARSER = 57382 -const GENERATED = 57383 -const ALWAYS = 57384 -const OUTFILE = 57385 -const S3 = 57386 -const DATA = 57387 -const LOAD = 57388 -const LINES = 57389 -const TERMINATED = 57390 -const ESCAPED = 57391 -const ENCLOSED = 57392 -const DUMPFILE = 57393 -const CSV = 57394 -const HEADER = 57395 -const MANIFEST = 57396 -const OVERWRITE = 57397 -const STARTING = 57398 -const OPTIONALLY = 57399 -const VALUES = 57400 -const LAST_INSERT_ID = 57401 -const NEXT = 57402 -const VALUE = 57403 -const SHARE = 57404 -const MODE = 57405 -const SQL_NO_CACHE = 57406 -const SQL_CACHE = 57407 -const SQL_CALC_FOUND_ROWS = 57408 -const JOIN = 57409 -const STRAIGHT_JOIN = 57410 -const LEFT = 57411 -const RIGHT = 57412 -const INNER = 57413 -const OUTER = 57414 -const CROSS = 57415 -const NATURAL = 57416 -const USE = 57417 -const FORCE = 57418 -const ON = 57419 -const USING = 57420 -const INPLACE = 57421 -const COPY = 57422 -const INSTANT = 57423 -const ALGORITHM = 57424 -const NONE = 57425 -const SHARED = 57426 -const EXCLUSIVE = 57427 -const SUBQUERY_AS_EXPR = 57428 -const STRING = 57429 -const ID = 57430 -const AT_ID = 57431 -const AT_AT_ID = 57432 -const HEX = 57433 -const NCHAR_STRING = 57434 -const INTEGRAL = 57435 -const FLOAT = 57436 -const DECIMAL = 57437 -const HEXNUM = 57438 -const COMMENT = 57439 -const COMMENT_KEYWORD = 57440 -const BITNUM = 57441 -const BIT_LITERAL = 57442 -const COMPRESSION = 57443 -const VALUE_ARG = 57444 -const LIST_ARG = 57445 -const OFFSET_ARG = 57446 -const JSON_PRETTY = 57447 -const JSON_STORAGE_SIZE = 57448 -const JSON_STORAGE_FREE = 57449 -const JSON_CONTAINS = 57450 -const JSON_CONTAINS_PATH = 57451 -const JSON_EXTRACT = 57452 -const JSON_KEYS = 57453 -const JSON_OVERLAPS = 57454 -const JSON_SEARCH = 57455 -const JSON_VALUE = 57456 -const EXTRACT = 57457 -const NULL = 57458 -const TRUE = 57459 -const FALSE = 57460 -const OFF = 57461 -const DISCARD = 57462 -const IMPORT = 57463 -const ENABLE = 57464 -const DISABLE = 57465 -const TABLESPACE = 57466 -const VIRTUAL = 57467 -const STORED = 57468 -const BOTH = 57469 -const LEADING = 57470 -const TRAILING = 57471 -const KILL = 57472 -const EMPTY_FROM_CLAUSE = 57473 -const LOWER_THAN_CHARSET = 57474 -const CHARSET = 57475 -const UNIQUE = 57476 -const KEY = 57477 -const EXPRESSION_PREC_SETTER = 57478 -const OR = 57479 -const XOR = 57480 -const AND = 57481 -const NOT = 57482 -const BETWEEN = 57483 -const CASE = 57484 -const WHEN = 57485 -const THEN = 57486 -const ELSE = 57487 -const END = 57488 -const LE = 57489 -const GE = 57490 -const NE = 57491 -const NULL_SAFE_EQUAL = 57492 -const IS = 57493 -const LIKE = 57494 -const REGEXP = 57495 -const RLIKE = 57496 -const IN = 57497 -const ASSIGNMENT_OPT = 57498 -const SHIFT_LEFT = 57499 -const SHIFT_RIGHT = 57500 -const DIV = 57501 -const MOD = 57502 -const UNARY = 57503 -const COLLATE = 57504 -const BINARY = 57505 -const UNDERSCORE_ARMSCII8 = 57506 -const UNDERSCORE_ASCII = 57507 -const UNDERSCORE_BIG5 = 57508 -const UNDERSCORE_BINARY = 57509 -const UNDERSCORE_CP1250 = 57510 -const UNDERSCORE_CP1251 = 57511 -const UNDERSCORE_CP1256 = 57512 -const UNDERSCORE_CP1257 = 57513 -const UNDERSCORE_CP850 = 57514 -const UNDERSCORE_CP852 = 57515 -const UNDERSCORE_CP866 = 57516 -const UNDERSCORE_CP932 = 57517 -const UNDERSCORE_DEC8 = 57518 -const UNDERSCORE_EUCJPMS = 57519 -const UNDERSCORE_EUCKR = 57520 -const UNDERSCORE_GB18030 = 57521 -const UNDERSCORE_GB2312 = 57522 -const UNDERSCORE_GBK = 57523 -const UNDERSCORE_GEOSTD8 = 57524 -const UNDERSCORE_GREEK = 57525 -const UNDERSCORE_HEBREW = 57526 -const UNDERSCORE_HP8 = 57527 -const UNDERSCORE_KEYBCS2 = 57528 -const UNDERSCORE_KOI8R = 57529 -const UNDERSCORE_KOI8U = 57530 -const UNDERSCORE_LATIN1 = 57531 -const UNDERSCORE_LATIN2 = 57532 -const UNDERSCORE_LATIN5 = 57533 -const UNDERSCORE_LATIN7 = 57534 -const UNDERSCORE_MACCE = 57535 -const UNDERSCORE_MACROMAN = 57536 -const UNDERSCORE_SJIS = 57537 -const UNDERSCORE_SWE7 = 57538 -const UNDERSCORE_TIS620 = 57539 -const UNDERSCORE_UCS2 = 57540 -const UNDERSCORE_UJIS = 57541 -const UNDERSCORE_UTF16 = 57542 -const UNDERSCORE_UTF16LE = 57543 -const UNDERSCORE_UTF32 = 57544 -const UNDERSCORE_UTF8 = 57545 -const UNDERSCORE_UTF8MB4 = 57546 -const UNDERSCORE_UTF8MB3 = 57547 -const INTERVAL = 57548 -const WINDOW_EXPR = 57549 -const JSON_EXTRACT_OP = 57550 -const JSON_UNQUOTE_EXTRACT_OP = 57551 -const CREATE = 57552 -const ALTER = 57553 -const DROP = 57554 -const RENAME = 57555 -const ANALYZE = 57556 -const ADD = 57557 -const FLUSH = 57558 -const CHANGE = 57559 -const MODIFY = 57560 -const DEALLOCATE = 57561 -const REVERT = 57562 -const QUERIES = 57563 -const SCHEMA = 57564 -const TABLE = 57565 -const INDEX = 57566 -const VIEW = 57567 -const TO = 57568 -const IGNORE = 57569 -const IF = 57570 -const PRIMARY = 57571 -const COLUMN = 57572 -const SPATIAL = 57573 -const FULLTEXT = 57574 -const KEY_BLOCK_SIZE = 57575 -const CHECK = 57576 -const INDEXES = 57577 -const ACTION = 57578 -const CASCADE = 57579 -const CONSTRAINT = 57580 -const FOREIGN = 57581 -const NO = 57582 -const REFERENCES = 57583 -const RESTRICT = 57584 -const SHOW = 57585 -const DESCRIBE = 57586 -const EXPLAIN = 57587 -const DATE = 57588 -const ESCAPE = 57589 -const REPAIR = 57590 -const OPTIMIZE = 57591 -const TRUNCATE = 57592 -const COALESCE = 57593 -const EXCHANGE = 57594 -const REBUILD = 57595 -const PARTITIONING = 57596 -const REMOVE = 57597 -const PREPARE = 57598 -const EXECUTE = 57599 -const MAXVALUE = 57600 -const PARTITION = 57601 -const REORGANIZE = 57602 -const LESS = 57603 -const THAN = 57604 -const PROCEDURE = 57605 -const TRIGGER = 57606 -const VINDEX = 57607 -const VINDEXES = 57608 -const DIRECTORY = 57609 -const NAME = 57610 -const UPGRADE = 57611 -const STATUS = 57612 -const VARIABLES = 57613 -const WARNINGS = 57614 -const CASCADED = 57615 -const DEFINER = 57616 -const OPTION = 57617 -const SQL = 57618 -const UNDEFINED = 57619 -const SEQUENCE = 57620 -const MERGE = 57621 -const TEMPORARY = 57622 -const TEMPTABLE = 57623 -const INVOKER = 57624 -const SECURITY = 57625 -const FIRST = 57626 -const AFTER = 57627 -const LAST = 57628 -const VITESS_MIGRATION = 57629 -const CANCEL = 57630 -const RETRY = 57631 -const LAUNCH = 57632 -const COMPLETE = 57633 -const CLEANUP = 57634 -const THROTTLE = 57635 -const UNTHROTTLE = 57636 -const EXPIRE = 57637 -const RATIO = 57638 -const VITESS_THROTTLER = 57639 -const BEGIN = 57640 -const START = 57641 -const TRANSACTION = 57642 -const COMMIT = 57643 -const ROLLBACK = 57644 -const SAVEPOINT = 57645 -const RELEASE = 57646 -const WORK = 57647 -const CONSISTENT = 57648 -const SNAPSHOT = 57649 -const BIT = 57650 -const TINYINT = 57651 -const SMALLINT = 57652 -const MEDIUMINT = 57653 -const INT = 57654 -const INTEGER = 57655 -const BIGINT = 57656 -const INTNUM = 57657 -const REAL = 57658 -const DOUBLE = 57659 -const FLOAT_TYPE = 57660 -const FLOAT4_TYPE = 57661 -const FLOAT8_TYPE = 57662 -const DECIMAL_TYPE = 57663 -const NUMERIC = 57664 -const TIME = 57665 -const TIMESTAMP = 57666 -const DATETIME = 57667 -const YEAR = 57668 -const CHAR = 57669 -const VARCHAR = 57670 -const BOOL = 57671 -const CHARACTER = 57672 -const VARBINARY = 57673 -const NCHAR = 57674 -const TEXT = 57675 -const TINYTEXT = 57676 -const MEDIUMTEXT = 57677 -const LONGTEXT = 57678 -const BLOB = 57679 -const TINYBLOB = 57680 -const MEDIUMBLOB = 57681 -const LONGBLOB = 57682 -const JSON = 57683 -const JSON_SCHEMA_VALID = 57684 -const JSON_SCHEMA_VALIDATION_REPORT = 57685 -const ENUM = 57686 -const GEOMETRY = 57687 -const POINT = 57688 -const LINESTRING = 57689 -const POLYGON = 57690 -const GEOMCOLLECTION = 57691 -const GEOMETRYCOLLECTION = 57692 -const MULTIPOINT = 57693 -const MULTILINESTRING = 57694 -const MULTIPOLYGON = 57695 -const ASCII = 57696 -const UNICODE = 57697 -const NULLX = 57698 -const AUTO_INCREMENT = 57699 -const APPROXNUM = 57700 -const SIGNED = 57701 -const UNSIGNED = 57702 -const ZEROFILL = 57703 -const PURGE = 57704 -const BEFORE = 57705 -const CODE = 57706 -const COLLATION = 57707 -const COLUMNS = 57708 -const DATABASES = 57709 -const ENGINES = 57710 -const EVENT = 57711 -const EXTENDED = 57712 -const FIELDS = 57713 -const FULL = 57714 -const FUNCTION = 57715 -const GTID_EXECUTED = 57716 -const KEYSPACES = 57717 -const OPEN = 57718 -const PLUGINS = 57719 -const PRIVILEGES = 57720 -const PROCESSLIST = 57721 -const SCHEMAS = 57722 -const TABLES = 57723 -const TRIGGERS = 57724 -const USER = 57725 -const VGTID_EXECUTED = 57726 -const VITESS_KEYSPACES = 57727 -const VITESS_METADATA = 57728 -const VITESS_MIGRATIONS = 57729 -const VITESS_REPLICATION_STATUS = 57730 -const VITESS_SHARDS = 57731 -const VITESS_TABLETS = 57732 -const VITESS_TARGET = 57733 -const VSCHEMA = 57734 -const VITESS_THROTTLED_APPS = 57735 -const NAMES = 57736 -const GLOBAL = 57737 -const SESSION = 57738 -const ISOLATION = 57739 -const LEVEL = 57740 -const READ = 57741 -const WRITE = 57742 -const ONLY = 57743 -const REPEATABLE = 57744 -const COMMITTED = 57745 -const UNCOMMITTED = 57746 -const SERIALIZABLE = 57747 -const ADDDATE = 57748 -const CURRENT_TIMESTAMP = 57749 -const DATABASE = 57750 -const CURRENT_DATE = 57751 -const CURDATE = 57752 -const DATE_ADD = 57753 -const DATE_SUB = 57754 -const NOW = 57755 -const SUBDATE = 57756 -const CURTIME = 57757 -const CURRENT_TIME = 57758 -const LOCALTIME = 57759 -const LOCALTIMESTAMP = 57760 -const CURRENT_USER = 57761 -const UTC_DATE = 57762 -const UTC_TIME = 57763 -const UTC_TIMESTAMP = 57764 -const SYSDATE = 57765 -const DAY = 57766 -const DAY_HOUR = 57767 -const DAY_MICROSECOND = 57768 -const DAY_MINUTE = 57769 -const DAY_SECOND = 57770 -const HOUR = 57771 -const HOUR_MICROSECOND = 57772 -const HOUR_MINUTE = 57773 -const HOUR_SECOND = 57774 -const MICROSECOND = 57775 -const MINUTE = 57776 -const MINUTE_MICROSECOND = 57777 -const MINUTE_SECOND = 57778 -const MONTH = 57779 -const QUARTER = 57780 -const SECOND = 57781 -const SECOND_MICROSECOND = 57782 -const YEAR_MONTH = 57783 -const WEEK = 57784 -const SQL_TSI_DAY = 57785 -const SQL_TSI_WEEK = 57786 -const SQL_TSI_HOUR = 57787 -const SQL_TSI_MINUTE = 57788 -const SQL_TSI_MONTH = 57789 -const SQL_TSI_QUARTER = 57790 -const SQL_TSI_SECOND = 57791 -const SQL_TSI_MICROSECOND = 57792 -const SQL_TSI_YEAR = 57793 -const REPLACE = 57794 -const CONVERT = 57795 -const CAST = 57796 -const SUBSTR = 57797 -const SUBSTRING = 57798 -const SEPARATOR = 57799 -const TIMESTAMPADD = 57800 -const TIMESTAMPDIFF = 57801 -const WEIGHT_STRING = 57802 -const LTRIM = 57803 -const RTRIM = 57804 -const TRIM = 57805 -const JSON_ARRAY = 57806 -const JSON_OBJECT = 57807 -const JSON_QUOTE = 57808 -const JSON_DEPTH = 57809 -const JSON_TYPE = 57810 -const JSON_LENGTH = 57811 -const JSON_VALID = 57812 -const JSON_ARRAY_APPEND = 57813 -const JSON_ARRAY_INSERT = 57814 -const JSON_INSERT = 57815 -const JSON_MERGE = 57816 -const JSON_MERGE_PATCH = 57817 -const JSON_MERGE_PRESERVE = 57818 -const JSON_REMOVE = 57819 -const JSON_REPLACE = 57820 -const JSON_SET = 57821 -const JSON_UNQUOTE = 57822 -const COUNT = 57823 -const AVG = 57824 -const MAX = 57825 -const MIN = 57826 -const SUM = 57827 -const GROUP_CONCAT = 57828 -const BIT_AND = 57829 -const BIT_OR = 57830 -const BIT_XOR = 57831 -const STD = 57832 -const STDDEV = 57833 -const STDDEV_POP = 57834 -const STDDEV_SAMP = 57835 -const VAR_POP = 57836 -const VAR_SAMP = 57837 -const VARIANCE = 57838 -const ANY_VALUE = 57839 -const REGEXP_INSTR = 57840 -const REGEXP_LIKE = 57841 -const REGEXP_REPLACE = 57842 -const REGEXP_SUBSTR = 57843 -const ExtractValue = 57844 -const UpdateXML = 57845 -const GET_LOCK = 57846 -const RELEASE_LOCK = 57847 -const RELEASE_ALL_LOCKS = 57848 -const IS_FREE_LOCK = 57849 -const IS_USED_LOCK = 57850 -const LOCATE = 57851 -const POSITION = 57852 -const ST_GeometryCollectionFromText = 57853 -const ST_GeometryFromText = 57854 -const ST_LineStringFromText = 57855 -const ST_MultiLineStringFromText = 57856 -const ST_MultiPointFromText = 57857 -const ST_MultiPolygonFromText = 57858 -const ST_PointFromText = 57859 -const ST_PolygonFromText = 57860 -const ST_GeometryCollectionFromWKB = 57861 -const ST_GeometryFromWKB = 57862 -const ST_LineStringFromWKB = 57863 -const ST_MultiLineStringFromWKB = 57864 -const ST_MultiPointFromWKB = 57865 -const ST_MultiPolygonFromWKB = 57866 -const ST_PointFromWKB = 57867 -const ST_PolygonFromWKB = 57868 -const ST_AsBinary = 57869 -const ST_AsText = 57870 -const ST_Dimension = 57871 -const ST_Envelope = 57872 -const ST_IsSimple = 57873 -const ST_IsEmpty = 57874 -const ST_GeometryType = 57875 -const ST_X = 57876 -const ST_Y = 57877 -const ST_Latitude = 57878 -const ST_Longitude = 57879 -const ST_EndPoint = 57880 -const ST_IsClosed = 57881 -const ST_Length = 57882 -const ST_NumPoints = 57883 -const ST_StartPoint = 57884 -const ST_PointN = 57885 -const ST_Area = 57886 -const ST_Centroid = 57887 -const ST_ExteriorRing = 57888 -const ST_InteriorRingN = 57889 -const ST_NumInteriorRings = 57890 -const ST_NumGeometries = 57891 -const ST_GeometryN = 57892 -const ST_LongFromGeoHash = 57893 -const ST_PointFromGeoHash = 57894 -const ST_LatFromGeoHash = 57895 -const ST_GeoHash = 57896 -const ST_AsGeoJSON = 57897 -const ST_GeomFromGeoJSON = 57898 -const MATCH = 57899 -const AGAINST = 57900 -const BOOLEAN = 57901 -const LANGUAGE = 57902 -const WITH = 57903 -const QUERY = 57904 -const EXPANSION = 57905 -const WITHOUT = 57906 -const VALIDATION = 57907 -const UNUSED = 57908 -const ARRAY = 57909 -const BYTE = 57910 -const CUME_DIST = 57911 -const DESCRIPTION = 57912 -const DENSE_RANK = 57913 -const EMPTY = 57914 -const EXCEPT = 57915 -const FIRST_VALUE = 57916 -const GROUPING = 57917 -const GROUPS = 57918 -const JSON_TABLE = 57919 -const LAG = 57920 -const LAST_VALUE = 57921 -const LATERAL = 57922 -const LEAD = 57923 -const NTH_VALUE = 57924 -const NTILE = 57925 -const OF = 57926 -const OVER = 57927 -const PERCENT_RANK = 57928 -const RANK = 57929 -const RECURSIVE = 57930 -const ROW_NUMBER = 57931 -const SYSTEM = 57932 -const WINDOW = 57933 -const ACTIVE = 57934 -const ADMIN = 57935 -const AUTOEXTEND_SIZE = 57936 -const BUCKETS = 57937 -const CLONE = 57938 -const COLUMN_FORMAT = 57939 -const COMPONENT = 57940 -const DEFINITION = 57941 -const ENFORCED = 57942 -const ENGINE_ATTRIBUTE = 57943 -const EXCLUDE = 57944 -const FOLLOWING = 57945 -const GET_MASTER_PUBLIC_KEY = 57946 -const HISTOGRAM = 57947 -const HISTORY = 57948 -const INACTIVE = 57949 -const INVISIBLE = 57950 -const LOCKED = 57951 -const MASTER_COMPRESSION_ALGORITHMS = 57952 -const MASTER_PUBLIC_KEY_PATH = 57953 -const MASTER_TLS_CIPHERSUITES = 57954 -const MASTER_ZSTD_COMPRESSION_LEVEL = 57955 -const NESTED = 57956 -const NETWORK_NAMESPACE = 57957 -const NOWAIT = 57958 -const NULLS = 57959 -const OJ = 57960 -const OLD = 57961 -const OPTIONAL = 57962 -const ORDINALITY = 57963 -const ORGANIZATION = 57964 -const OTHERS = 57965 -const PARTIAL = 57966 -const PATH = 57967 -const PERSIST = 57968 -const PERSIST_ONLY = 57969 -const PRECEDING = 57970 -const PRIVILEGE_CHECKS_USER = 57971 -const PROCESS = 57972 -const RANDOM = 57973 -const REFERENCE = 57974 -const REQUIRE_ROW_FORMAT = 57975 -const RESOURCE = 57976 -const RESPECT = 57977 -const RESTART = 57978 -const RETAIN = 57979 -const REUSE = 57980 -const ROLE = 57981 -const SECONDARY = 57982 -const SECONDARY_ENGINE = 57983 -const SECONDARY_ENGINE_ATTRIBUTE = 57984 -const SECONDARY_LOAD = 57985 -const SECONDARY_UNLOAD = 57986 -const SIMPLE = 57987 -const SKIP = 57988 -const SRID = 57989 -const THREAD_PRIORITY = 57990 -const TIES = 57991 -const UNBOUNDED = 57992 -const VCPU = 57993 -const VISIBLE = 57994 -const RETURNING = 57995 -const FORMAT_BYTES = 57996 -const FORMAT_PICO_TIME = 57997 -const PS_CURRENT_THREAD_ID = 57998 -const PS_THREAD_ID = 57999 -const GTID_SUBSET = 58000 -const GTID_SUBTRACT = 58001 -const WAIT_FOR_EXECUTED_GTID_SET = 58002 -const WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS = 58003 -const FORMAT = 58004 -const TREE = 58005 -const VITESS = 58006 -const TRADITIONAL = 58007 -const VTEXPLAIN = 58008 -const VEXPLAIN = 58009 -const PLAN = 58010 -const LOCAL = 58011 -const LOW_PRIORITY = 58012 -const NO_WRITE_TO_BINLOG = 58013 -const LOGS = 58014 -const ERROR = 58015 -const GENERAL = 58016 -const HOSTS = 58017 -const OPTIMIZER_COSTS = 58018 -const USER_RESOURCES = 58019 -const SLOW = 58020 -const CHANNEL = 58021 -const RELAY = 58022 -const EXPORT = 58023 -const CURRENT = 58024 -const ROW = 58025 -const ROWS = 58026 -const AVG_ROW_LENGTH = 58027 -const CONNECTION = 58028 -const CHECKSUM = 58029 -const DELAY_KEY_WRITE = 58030 -const ENCRYPTION = 58031 -const ENGINE = 58032 -const INSERT_METHOD = 58033 -const MAX_ROWS = 58034 -const MIN_ROWS = 58035 -const PACK_KEYS = 58036 -const PASSWORD = 58037 -const FIXED = 58038 -const DYNAMIC = 58039 -const COMPRESSED = 58040 -const REDUNDANT = 58041 -const COMPACT = 58042 -const ROW_FORMAT = 58043 -const STATS_AUTO_RECALC = 58044 -const STATS_PERSISTENT = 58045 -const STATS_SAMPLE_PAGES = 58046 -const STORAGE = 58047 -const MEMORY = 58048 -const DISK = 58049 -const PARTITIONS = 58050 -const LINEAR = 58051 -const RANGE = 58052 -const LIST = 58053 -const SUBPARTITION = 58054 -const SUBPARTITIONS = 58055 -const HASH = 58056 +const MULTIPLE_TEXT_LITERAL = 57347 +const FUNCTION_CALL_NON_KEYWORD = 57348 +const STRING_TYPE_PREFIX_NON_KEYWORD = 57349 +const LEX_ERROR = 57350 +const UNION = 57351 +const SELECT = 57352 +const STREAM = 57353 +const VSTREAM = 57354 +const INSERT = 57355 +const UPDATE = 57356 +const DELETE = 57357 +const FROM = 57358 +const WHERE = 57359 +const GROUP = 57360 +const HAVING = 57361 +const ORDER = 57362 +const BY = 57363 +const LIMIT = 57364 +const OFFSET = 57365 +const FOR = 57366 +const ALL = 57367 +const DISTINCT = 57368 +const AS = 57369 +const EXISTS = 57370 +const ASC = 57371 +const DESC = 57372 +const INTO = 57373 +const DUPLICATE = 57374 +const DEFAULT = 57375 +const SET = 57376 +const LOCK = 57377 +const UNLOCK = 57378 +const KEYS = 57379 +const DO = 57380 +const CALL = 57381 +const DISTINCTROW = 57382 +const PARSER = 57383 +const GENERATED = 57384 +const ALWAYS = 57385 +const OUTFILE = 57386 +const S3 = 57387 +const DATA = 57388 +const LOAD = 57389 +const LINES = 57390 +const TERMINATED = 57391 +const ESCAPED = 57392 +const ENCLOSED = 57393 +const DUMPFILE = 57394 +const CSV = 57395 +const HEADER = 57396 +const MANIFEST = 57397 +const OVERWRITE = 57398 +const STARTING = 57399 +const OPTIONALLY = 57400 +const VALUES = 57401 +const LAST_INSERT_ID = 57402 +const NEXT = 57403 +const VALUE = 57404 +const SHARE = 57405 +const MODE = 57406 +const SQL_NO_CACHE = 57407 +const SQL_CACHE = 57408 +const SQL_CALC_FOUND_ROWS = 57409 +const JOIN = 57410 +const STRAIGHT_JOIN = 57411 +const LEFT = 57412 +const RIGHT = 57413 +const INNER = 57414 +const OUTER = 57415 +const CROSS = 57416 +const NATURAL = 57417 +const USE = 57418 +const FORCE = 57419 +const ON = 57420 +const USING = 57421 +const INPLACE = 57422 +const COPY = 57423 +const INSTANT = 57424 +const ALGORITHM = 57425 +const NONE = 57426 +const SHARED = 57427 +const EXCLUSIVE = 57428 +const SUBQUERY_AS_EXPR = 57429 +const STRING = 57430 +const ID = 57431 +const AT_ID = 57432 +const AT_AT_ID = 57433 +const HEX = 57434 +const NCHAR_STRING = 57435 +const INTEGRAL = 57436 +const FLOAT = 57437 +const DECIMAL = 57438 +const HEXNUM = 57439 +const COMMENT = 57440 +const COMMENT_KEYWORD = 57441 +const BITNUM = 57442 +const BIT_LITERAL = 57443 +const COMPRESSION = 57444 +const VALUE_ARG = 57445 +const LIST_ARG = 57446 +const OFFSET_ARG = 57447 +const JSON_PRETTY = 57448 +const JSON_STORAGE_SIZE = 57449 +const JSON_STORAGE_FREE = 57450 +const JSON_CONTAINS = 57451 +const JSON_CONTAINS_PATH = 57452 +const JSON_EXTRACT = 57453 +const JSON_KEYS = 57454 +const JSON_OVERLAPS = 57455 +const JSON_SEARCH = 57456 +const JSON_VALUE = 57457 +const EXTRACT = 57458 +const NULL = 57459 +const UNKNOWN = 57460 +const TRUE = 57461 +const FALSE = 57462 +const OFF = 57463 +const DISCARD = 57464 +const IMPORT = 57465 +const ENABLE = 57466 +const DISABLE = 57467 +const TABLESPACE = 57468 +const VIRTUAL = 57469 +const STORED = 57470 +const BOTH = 57471 +const LEADING = 57472 +const TRAILING = 57473 +const KILL = 57474 +const EMPTY_FROM_CLAUSE = 57475 +const LOWER_THAN_CHARSET = 57476 +const CHARSET = 57477 +const UNIQUE = 57478 +const KEY = 57479 +const EXPRESSION_PREC_SETTER = 57480 +const OR = 57481 +const XOR = 57482 +const AND = 57483 +const NOT = 57484 +const BETWEEN = 57485 +const CASE = 57486 +const WHEN = 57487 +const THEN = 57488 +const ELSE = 57489 +const END = 57490 +const LE = 57491 +const GE = 57492 +const NE = 57493 +const NULL_SAFE_EQUAL = 57494 +const IS = 57495 +const LIKE = 57496 +const REGEXP = 57497 +const RLIKE = 57498 +const IN = 57499 +const ASSIGNMENT_OPT = 57500 +const SHIFT_LEFT = 57501 +const SHIFT_RIGHT = 57502 +const DIV = 57503 +const MOD = 57504 +const UNARY = 57505 +const COLLATE = 57506 +const BINARY = 57507 +const UNDERSCORE_ARMSCII8 = 57508 +const UNDERSCORE_ASCII = 57509 +const UNDERSCORE_BIG5 = 57510 +const UNDERSCORE_BINARY = 57511 +const UNDERSCORE_CP1250 = 57512 +const UNDERSCORE_CP1251 = 57513 +const UNDERSCORE_CP1256 = 57514 +const UNDERSCORE_CP1257 = 57515 +const UNDERSCORE_CP850 = 57516 +const UNDERSCORE_CP852 = 57517 +const UNDERSCORE_CP866 = 57518 +const UNDERSCORE_CP932 = 57519 +const UNDERSCORE_DEC8 = 57520 +const UNDERSCORE_EUCJPMS = 57521 +const UNDERSCORE_EUCKR = 57522 +const UNDERSCORE_GB18030 = 57523 +const UNDERSCORE_GB2312 = 57524 +const UNDERSCORE_GBK = 57525 +const UNDERSCORE_GEOSTD8 = 57526 +const UNDERSCORE_GREEK = 57527 +const UNDERSCORE_HEBREW = 57528 +const UNDERSCORE_HP8 = 57529 +const UNDERSCORE_KEYBCS2 = 57530 +const UNDERSCORE_KOI8R = 57531 +const UNDERSCORE_KOI8U = 57532 +const UNDERSCORE_LATIN1 = 57533 +const UNDERSCORE_LATIN2 = 57534 +const UNDERSCORE_LATIN5 = 57535 +const UNDERSCORE_LATIN7 = 57536 +const UNDERSCORE_MACCE = 57537 +const UNDERSCORE_MACROMAN = 57538 +const UNDERSCORE_SJIS = 57539 +const UNDERSCORE_SWE7 = 57540 +const UNDERSCORE_TIS620 = 57541 +const UNDERSCORE_UCS2 = 57542 +const UNDERSCORE_UJIS = 57543 +const UNDERSCORE_UTF16 = 57544 +const UNDERSCORE_UTF16LE = 57545 +const UNDERSCORE_UTF32 = 57546 +const UNDERSCORE_UTF8 = 57547 +const UNDERSCORE_UTF8MB4 = 57548 +const UNDERSCORE_UTF8MB3 = 57549 +const INTERVAL = 57550 +const WINDOW_EXPR = 57551 +const JSON_EXTRACT_OP = 57552 +const JSON_UNQUOTE_EXTRACT_OP = 57553 +const CREATE = 57554 +const ALTER = 57555 +const DROP = 57556 +const RENAME = 57557 +const ANALYZE = 57558 +const ADD = 57559 +const FLUSH = 57560 +const CHANGE = 57561 +const MODIFY = 57562 +const DEALLOCATE = 57563 +const REVERT = 57564 +const QUERIES = 57565 +const SCHEMA = 57566 +const TABLE = 57567 +const INDEX = 57568 +const VIEW = 57569 +const TO = 57570 +const IGNORE = 57571 +const IF = 57572 +const PRIMARY = 57573 +const COLUMN = 57574 +const SPATIAL = 57575 +const FULLTEXT = 57576 +const KEY_BLOCK_SIZE = 57577 +const CHECK = 57578 +const INDEXES = 57579 +const ACTION = 57580 +const CASCADE = 57581 +const CONSTRAINT = 57582 +const FOREIGN = 57583 +const NO = 57584 +const REFERENCES = 57585 +const RESTRICT = 57586 +const SHOW = 57587 +const DESCRIBE = 57588 +const EXPLAIN = 57589 +const DATE = 57590 +const ESCAPE = 57591 +const REPAIR = 57592 +const OPTIMIZE = 57593 +const TRUNCATE = 57594 +const COALESCE = 57595 +const EXCHANGE = 57596 +const REBUILD = 57597 +const PARTITIONING = 57598 +const REMOVE = 57599 +const PREPARE = 57600 +const EXECUTE = 57601 +const MAXVALUE = 57602 +const PARTITION = 57603 +const REORGANIZE = 57604 +const LESS = 57605 +const THAN = 57606 +const PROCEDURE = 57607 +const TRIGGER = 57608 +const VINDEX = 57609 +const VINDEXES = 57610 +const DIRECTORY = 57611 +const NAME = 57612 +const UPGRADE = 57613 +const STATUS = 57614 +const VARIABLES = 57615 +const WARNINGS = 57616 +const CASCADED = 57617 +const DEFINER = 57618 +const OPTION = 57619 +const SQL = 57620 +const UNDEFINED = 57621 +const SEQUENCE = 57622 +const MERGE = 57623 +const TEMPORARY = 57624 +const TEMPTABLE = 57625 +const INVOKER = 57626 +const SECURITY = 57627 +const FIRST = 57628 +const AFTER = 57629 +const LAST = 57630 +const VITESS_MIGRATION = 57631 +const CANCEL = 57632 +const RETRY = 57633 +const LAUNCH = 57634 +const COMPLETE = 57635 +const CLEANUP = 57636 +const THROTTLE = 57637 +const UNTHROTTLE = 57638 +const FORCE_CUTOVER = 57639 +const EXPIRE = 57640 +const RATIO = 57641 +const VITESS_THROTTLER = 57642 +const BEGIN = 57643 +const START = 57644 +const TRANSACTION = 57645 +const COMMIT = 57646 +const ROLLBACK = 57647 +const SAVEPOINT = 57648 +const RELEASE = 57649 +const WORK = 57650 +const CONSISTENT = 57651 +const SNAPSHOT = 57652 +const BIT = 57653 +const TINYINT = 57654 +const SMALLINT = 57655 +const MEDIUMINT = 57656 +const INT = 57657 +const INTEGER = 57658 +const BIGINT = 57659 +const INTNUM = 57660 +const REAL = 57661 +const DOUBLE = 57662 +const FLOAT_TYPE = 57663 +const FLOAT4_TYPE = 57664 +const FLOAT8_TYPE = 57665 +const DECIMAL_TYPE = 57666 +const NUMERIC = 57667 +const TIME = 57668 +const TIMESTAMP = 57669 +const DATETIME = 57670 +const YEAR = 57671 +const CHAR = 57672 +const VARCHAR = 57673 +const BOOL = 57674 +const CHARACTER = 57675 +const VARBINARY = 57676 +const NCHAR = 57677 +const TEXT = 57678 +const TINYTEXT = 57679 +const MEDIUMTEXT = 57680 +const LONGTEXT = 57681 +const BLOB = 57682 +const TINYBLOB = 57683 +const MEDIUMBLOB = 57684 +const LONGBLOB = 57685 +const JSON = 57686 +const JSON_SCHEMA_VALID = 57687 +const JSON_SCHEMA_VALIDATION_REPORT = 57688 +const ENUM = 57689 +const GEOMETRY = 57690 +const POINT = 57691 +const LINESTRING = 57692 +const POLYGON = 57693 +const GEOMCOLLECTION = 57694 +const GEOMETRYCOLLECTION = 57695 +const MULTIPOINT = 57696 +const MULTILINESTRING = 57697 +const MULTIPOLYGON = 57698 +const ASCII = 57699 +const UNICODE = 57700 +const NULLX = 57701 +const AUTO_INCREMENT = 57702 +const APPROXNUM = 57703 +const SIGNED = 57704 +const UNSIGNED = 57705 +const ZEROFILL = 57706 +const PURGE = 57707 +const BEFORE = 57708 +const CODE = 57709 +const COLLATION = 57710 +const COLUMNS = 57711 +const DATABASES = 57712 +const ENGINES = 57713 +const EVENT = 57714 +const EXTENDED = 57715 +const FIELDS = 57716 +const FULL = 57717 +const FUNCTION = 57718 +const GTID_EXECUTED = 57719 +const KEYSPACES = 57720 +const OPEN = 57721 +const PLUGINS = 57722 +const PRIVILEGES = 57723 +const PROCESSLIST = 57724 +const SCHEMAS = 57725 +const TABLES = 57726 +const TRIGGERS = 57727 +const USER = 57728 +const VGTID_EXECUTED = 57729 +const VITESS_KEYSPACES = 57730 +const VITESS_METADATA = 57731 +const VITESS_MIGRATIONS = 57732 +const VITESS_REPLICATION_STATUS = 57733 +const VITESS_SHARDS = 57734 +const VITESS_TABLETS = 57735 +const VITESS_TARGET = 57736 +const VSCHEMA = 57737 +const VITESS_THROTTLED_APPS = 57738 +const NAMES = 57739 +const GLOBAL = 57740 +const SESSION = 57741 +const ISOLATION = 57742 +const LEVEL = 57743 +const READ = 57744 +const WRITE = 57745 +const ONLY = 57746 +const REPEATABLE = 57747 +const COMMITTED = 57748 +const UNCOMMITTED = 57749 +const SERIALIZABLE = 57750 +const ADDDATE = 57751 +const CURRENT_TIMESTAMP = 57752 +const DATABASE = 57753 +const CURRENT_DATE = 57754 +const CURDATE = 57755 +const DATE_ADD = 57756 +const DATE_SUB = 57757 +const NOW = 57758 +const SUBDATE = 57759 +const CURTIME = 57760 +const CURRENT_TIME = 57761 +const LOCALTIME = 57762 +const LOCALTIMESTAMP = 57763 +const CURRENT_USER = 57764 +const UTC_DATE = 57765 +const UTC_TIME = 57766 +const UTC_TIMESTAMP = 57767 +const SYSDATE = 57768 +const DAY = 57769 +const DAY_HOUR = 57770 +const DAY_MICROSECOND = 57771 +const DAY_MINUTE = 57772 +const DAY_SECOND = 57773 +const HOUR = 57774 +const HOUR_MICROSECOND = 57775 +const HOUR_MINUTE = 57776 +const HOUR_SECOND = 57777 +const MICROSECOND = 57778 +const MINUTE = 57779 +const MINUTE_MICROSECOND = 57780 +const MINUTE_SECOND = 57781 +const MONTH = 57782 +const QUARTER = 57783 +const SECOND = 57784 +const SECOND_MICROSECOND = 57785 +const YEAR_MONTH = 57786 +const WEEK = 57787 +const SQL_TSI_DAY = 57788 +const SQL_TSI_WEEK = 57789 +const SQL_TSI_HOUR = 57790 +const SQL_TSI_MINUTE = 57791 +const SQL_TSI_MONTH = 57792 +const SQL_TSI_QUARTER = 57793 +const SQL_TSI_SECOND = 57794 +const SQL_TSI_MICROSECOND = 57795 +const SQL_TSI_YEAR = 57796 +const REPLACE = 57797 +const CONVERT = 57798 +const CAST = 57799 +const SUBSTR = 57800 +const SUBSTRING = 57801 +const MID = 57802 +const SEPARATOR = 57803 +const TIMESTAMPADD = 57804 +const TIMESTAMPDIFF = 57805 +const WEIGHT_STRING = 57806 +const LTRIM = 57807 +const RTRIM = 57808 +const TRIM = 57809 +const JSON_ARRAY = 57810 +const JSON_OBJECT = 57811 +const JSON_QUOTE = 57812 +const JSON_DEPTH = 57813 +const JSON_TYPE = 57814 +const JSON_LENGTH = 57815 +const JSON_VALID = 57816 +const JSON_ARRAY_APPEND = 57817 +const JSON_ARRAY_INSERT = 57818 +const JSON_INSERT = 57819 +const JSON_MERGE = 57820 +const JSON_MERGE_PATCH = 57821 +const JSON_MERGE_PRESERVE = 57822 +const JSON_REMOVE = 57823 +const JSON_REPLACE = 57824 +const JSON_SET = 57825 +const JSON_UNQUOTE = 57826 +const COUNT = 57827 +const AVG = 57828 +const MAX = 57829 +const MIN = 57830 +const SUM = 57831 +const GROUP_CONCAT = 57832 +const BIT_AND = 57833 +const BIT_OR = 57834 +const BIT_XOR = 57835 +const STD = 57836 +const STDDEV = 57837 +const STDDEV_POP = 57838 +const STDDEV_SAMP = 57839 +const VAR_POP = 57840 +const VAR_SAMP = 57841 +const VARIANCE = 57842 +const ANY_VALUE = 57843 +const REGEXP_INSTR = 57844 +const REGEXP_LIKE = 57845 +const REGEXP_REPLACE = 57846 +const REGEXP_SUBSTR = 57847 +const ExtractValue = 57848 +const UpdateXML = 57849 +const GET_LOCK = 57850 +const RELEASE_LOCK = 57851 +const RELEASE_ALL_LOCKS = 57852 +const IS_FREE_LOCK = 57853 +const IS_USED_LOCK = 57854 +const LOCATE = 57855 +const POSITION = 57856 +const ST_GeometryCollectionFromText = 57857 +const ST_GeometryFromText = 57858 +const ST_LineStringFromText = 57859 +const ST_MultiLineStringFromText = 57860 +const ST_MultiPointFromText = 57861 +const ST_MultiPolygonFromText = 57862 +const ST_PointFromText = 57863 +const ST_PolygonFromText = 57864 +const ST_GeometryCollectionFromWKB = 57865 +const ST_GeometryFromWKB = 57866 +const ST_LineStringFromWKB = 57867 +const ST_MultiLineStringFromWKB = 57868 +const ST_MultiPointFromWKB = 57869 +const ST_MultiPolygonFromWKB = 57870 +const ST_PointFromWKB = 57871 +const ST_PolygonFromWKB = 57872 +const ST_AsBinary = 57873 +const ST_AsText = 57874 +const ST_Dimension = 57875 +const ST_Envelope = 57876 +const ST_IsSimple = 57877 +const ST_IsEmpty = 57878 +const ST_GeometryType = 57879 +const ST_X = 57880 +const ST_Y = 57881 +const ST_Latitude = 57882 +const ST_Longitude = 57883 +const ST_EndPoint = 57884 +const ST_IsClosed = 57885 +const ST_Length = 57886 +const ST_NumPoints = 57887 +const ST_StartPoint = 57888 +const ST_PointN = 57889 +const ST_Area = 57890 +const ST_Centroid = 57891 +const ST_ExteriorRing = 57892 +const ST_InteriorRingN = 57893 +const ST_NumInteriorRings = 57894 +const ST_NumGeometries = 57895 +const ST_GeometryN = 57896 +const ST_LongFromGeoHash = 57897 +const ST_PointFromGeoHash = 57898 +const ST_LatFromGeoHash = 57899 +const ST_GeoHash = 57900 +const ST_AsGeoJSON = 57901 +const ST_GeomFromGeoJSON = 57902 +const MATCH = 57903 +const AGAINST = 57904 +const BOOLEAN = 57905 +const LANGUAGE = 57906 +const WITH = 57907 +const QUERY = 57908 +const EXPANSION = 57909 +const WITHOUT = 57910 +const VALIDATION = 57911 +const ROLLUP = 57912 +const UNUSED = 57913 +const ARRAY = 57914 +const BYTE = 57915 +const CUME_DIST = 57916 +const DESCRIPTION = 57917 +const DENSE_RANK = 57918 +const EMPTY = 57919 +const EXCEPT = 57920 +const FIRST_VALUE = 57921 +const GROUPING = 57922 +const GROUPS = 57923 +const JSON_TABLE = 57924 +const LAG = 57925 +const LAST_VALUE = 57926 +const LATERAL = 57927 +const LEAD = 57928 +const NTH_VALUE = 57929 +const NTILE = 57930 +const OF = 57931 +const OVER = 57932 +const PERCENT_RANK = 57933 +const RANK = 57934 +const RECURSIVE = 57935 +const ROW_NUMBER = 57936 +const SYSTEM = 57937 +const WINDOW = 57938 +const ACTIVE = 57939 +const ADMIN = 57940 +const AUTOEXTEND_SIZE = 57941 +const BUCKETS = 57942 +const CLONE = 57943 +const COLUMN_FORMAT = 57944 +const COMPONENT = 57945 +const DEFINITION = 57946 +const ENFORCED = 57947 +const ENGINE_ATTRIBUTE = 57948 +const EXCLUDE = 57949 +const FOLLOWING = 57950 +const GET_MASTER_PUBLIC_KEY = 57951 +const HISTOGRAM = 57952 +const HISTORY = 57953 +const INACTIVE = 57954 +const INVISIBLE = 57955 +const LOCKED = 57956 +const MASTER_COMPRESSION_ALGORITHMS = 57957 +const MASTER_PUBLIC_KEY_PATH = 57958 +const MASTER_TLS_CIPHERSUITES = 57959 +const MASTER_ZSTD_COMPRESSION_LEVEL = 57960 +const NESTED = 57961 +const NETWORK_NAMESPACE = 57962 +const NOWAIT = 57963 +const NULLS = 57964 +const OJ = 57965 +const OLD = 57966 +const OPTIONAL = 57967 +const ORDINALITY = 57968 +const ORGANIZATION = 57969 +const OTHERS = 57970 +const PARTIAL = 57971 +const PATH = 57972 +const PERSIST = 57973 +const PERSIST_ONLY = 57974 +const PRECEDING = 57975 +const PRIVILEGE_CHECKS_USER = 57976 +const PROCESS = 57977 +const RANDOM = 57978 +const REFERENCE = 57979 +const REQUIRE_ROW_FORMAT = 57980 +const RESOURCE = 57981 +const RESPECT = 57982 +const RESTART = 57983 +const RETAIN = 57984 +const REUSE = 57985 +const ROLE = 57986 +const SECONDARY = 57987 +const SECONDARY_ENGINE = 57988 +const SECONDARY_ENGINE_ATTRIBUTE = 57989 +const SECONDARY_LOAD = 57990 +const SECONDARY_UNLOAD = 57991 +const SIMPLE = 57992 +const SKIP = 57993 +const SRID = 57994 +const THREAD_PRIORITY = 57995 +const TIES = 57996 +const UNBOUNDED = 57997 +const VCPU = 57998 +const VISIBLE = 57999 +const RETURNING = 58000 +const FORMAT_BYTES = 58001 +const FORMAT_PICO_TIME = 58002 +const PS_CURRENT_THREAD_ID = 58003 +const PS_THREAD_ID = 58004 +const GTID_SUBSET = 58005 +const GTID_SUBTRACT = 58006 +const WAIT_FOR_EXECUTED_GTID_SET = 58007 +const WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS = 58008 +const FORMAT = 58009 +const TREE = 58010 +const VITESS = 58011 +const TRADITIONAL = 58012 +const VTEXPLAIN = 58013 +const VEXPLAIN = 58014 +const PLAN = 58015 +const LOCAL = 58016 +const LOW_PRIORITY = 58017 +const NO_WRITE_TO_BINLOG = 58018 +const LOGS = 58019 +const ERROR = 58020 +const GENERAL = 58021 +const HOSTS = 58022 +const OPTIMIZER_COSTS = 58023 +const USER_RESOURCES = 58024 +const SLOW = 58025 +const CHANNEL = 58026 +const RELAY = 58027 +const EXPORT = 58028 +const CURRENT = 58029 +const ROW = 58030 +const ROWS = 58031 +const AVG_ROW_LENGTH = 58032 +const CONNECTION = 58033 +const CHECKSUM = 58034 +const DELAY_KEY_WRITE = 58035 +const ENCRYPTION = 58036 +const ENGINE = 58037 +const INSERT_METHOD = 58038 +const MAX_ROWS = 58039 +const MIN_ROWS = 58040 +const PACK_KEYS = 58041 +const PASSWORD = 58042 +const FIXED = 58043 +const DYNAMIC = 58044 +const COMPRESSED = 58045 +const REDUNDANT = 58046 +const COMPACT = 58047 +const ROW_FORMAT = 58048 +const STATS_AUTO_RECALC = 58049 +const STATS_PERSISTENT = 58050 +const STATS_SAMPLE_PAGES = 58051 +const STORAGE = 58052 +const MEMORY = 58053 +const DISK = 58054 +const PARTITIONS = 58055 +const LINEAR = 58056 +const RANGE = 58057 +const LIST = 58058 +const SUBPARTITION = 58059 +const SUBPARTITIONS = 58060 +const HASH = 58061 var yyToknames = [...]string{ "$end", "error", "$unk", "MEMBER", + "MULTIPLE_TEXT_LITERAL", "FUNCTION_CALL_NON_KEYWORD", "STRING_TYPE_PREFIX_NON_KEYWORD", "LEX_ERROR", @@ -865,6 +873,7 @@ var yyToknames = [...]string{ "JSON_VALUE", "EXTRACT", "NULL", + "UNKNOWN", "TRUE", "FALSE", "OFF", @@ -1057,6 +1066,7 @@ var yyToknames = [...]string{ "CLEANUP", "THROTTLE", "UNTHROTTLE", + "FORCE_CUTOVER", "EXPIRE", "RATIO", "VITESS_THROTTLER", @@ -1219,6 +1229,7 @@ var yyToknames = [...]string{ "CAST", "SUBSTR", "SUBSTRING", + "MID", "SEPARATOR", "TIMESTAMPADD", "TIMESTAMPDIFF", @@ -1328,6 +1339,7 @@ var yyToknames = [...]string{ "EXPANSION", "WITHOUT", "VALIDATION", + "ROLLUP", "UNUSED", "ARRAY", "BYTE", @@ -1492,2795 +1504,3028 @@ var yyExca = [...]int{ 1, -1, -2, 0, -1, 2, - 13, 51, - 14, 51, + 14, 49, + 15, 49, -2, 40, -1, 52, - 1, 159, - 732, 159, - -2, 167, + 1, 157, + 737, 157, + -2, 165, -1, 53, - 136, 167, - 178, 167, - 347, 167, + 138, 165, + 180, 165, + 350, 165, -2, 523, -1, 61, - 36, 774, - 241, 774, - 252, 774, - 287, 788, - 288, 788, - -2, 776, + 37, 777, + 243, 777, + 254, 777, + 289, 791, + 290, 791, + -2, 779, -1, 66, - 243, 812, - -2, 810, + 245, 815, + -2, 813, -1, 122, - 240, 1587, - -2, 133, + 242, 1602, + -2, 131, -1, 124, - 1, 160, - 732, 160, - -2, 167, + 1, 158, + 737, 158, + -2, 165, -1, 135, - 137, 408, - 246, 408, + 139, 408, + 248, 408, -2, 512, -1, 154, - 136, 167, - 178, 167, - 347, 167, + 138, 165, + 180, 165, + 350, 165, -2, 532, - -1, 733, - 164, 41, - -2, 45, - -1, 939, - 87, 1604, - -2, 1458, - -1, 940, - 87, 1605, - 223, 1609, - -2, 1459, - -1, 941, - 223, 1608, + -1, 737, + 166, 41, + -2, 43, + -1, 944, + 88, 1619, + -2, 1463, + -1, 945, + 88, 1620, + 225, 1624, + -2, 1464, + -1, 946, + 225, 1623, -2, 42, - -1, 1024, - 60, 886, - -2, 901, - -1, 1111, - 251, 43, - 256, 43, + -1, 1030, + 61, 887, + -2, 900, + -1, 1118, + 253, 1093, + 258, 1093, -2, 419, - -1, 1196, + -1, 1203, 1, 580, - 732, 580, - -2, 167, - -1, 1498, - 223, 1609, - -2, 1459, - -1, 1707, - 60, 887, - -2, 906, - -1, 1708, - 60, 888, - -2, 907, - -1, 1759, - 136, 167, - 178, 167, - 347, 167, + 737, 580, + -2, 165, + -1, 1506, + 225, 1624, + -2, 1464, + -1, 1717, + 61, 888, + -2, 904, + -1, 1718, + 61, 889, + -2, 905, + -1, 1774, + 138, 165, + 180, 165, + 350, 165, -2, 458, - -1, 1840, - 137, 408, - 246, 408, + -1, 1855, + 139, 408, + 248, 408, -2, 512, - -1, 1849, - 251, 44, - 256, 44, + -1, 1864, + 253, 1094, + 258, 1094, -2, 420, - -1, 2287, - 223, 1613, - -2, 1607, - -1, 2288, - 223, 1609, - -2, 1605, - -1, 2388, - 136, 167, - 178, 167, - 347, 167, + -1, 2304, + 225, 1628, + -2, 1622, + -1, 2305, + 225, 1624, + -2, 1620, + -1, 2408, + 138, 165, + 180, 165, + 350, 165, -2, 459, - -1, 2395, - 26, 188, - -2, 190, - -1, 2849, - 78, 98, - 88, 98, - -2, 965, - -1, 2918, - 707, 698, - -2, 672, - -1, 3126, - 50, 1555, - -2, 1549, - -1, 3941, - 707, 698, - -2, 686, - -1, 4028, - 90, 630, - 95, 630, - 105, 630, - 180, 630, - 181, 630, - 182, 630, - 183, 630, - 184, 630, - 185, 630, - 186, 630, - 187, 630, - 188, 630, - 189, 630, - 190, 630, - 191, 630, - 192, 630, - 193, 630, - 194, 630, - 195, 630, - 196, 630, - 197, 630, - 198, 630, - 199, 630, - 200, 630, - 201, 630, - 202, 630, - 203, 630, - 204, 630, - 205, 630, - 206, 630, - 207, 630, - 208, 630, - 209, 630, - 210, 630, - 211, 630, - 212, 630, - 213, 630, - 214, 630, - 215, 630, - 216, 630, - 217, 630, - 218, 630, - 219, 630, - 220, 630, - 221, 630, - -2, 1976, + -1, 2415, + 27, 186, + -2, 188, + -1, 2869, + 79, 96, + 89, 96, + -2, 963, + -1, 2938, + 712, 700, + -2, 674, + -1, 3160, + 51, 1567, + -2, 1561, + -1, 3994, + 712, 700, + -2, 688, + -1, 4086, + 91, 632, + 96, 632, + 106, 632, + 182, 632, + 183, 632, + 184, 632, + 185, 632, + 186, 632, + 187, 632, + 188, 632, + 189, 632, + 190, 632, + 191, 632, + 192, 632, + 193, 632, + 194, 632, + 195, 632, + 196, 632, + 197, 632, + 198, 632, + 199, 632, + 200, 632, + 201, 632, + 202, 632, + 203, 632, + 204, 632, + 205, 632, + 206, 632, + 207, 632, + 208, 632, + 209, 632, + 210, 632, + 211, 632, + 212, 632, + 213, 632, + 214, 632, + 215, 632, + 216, 632, + 217, 632, + 218, 632, + 219, 632, + 220, 632, + 221, 632, + 222, 632, + 223, 632, + -2, 1993, } const yyPrivate = 57344 -const yyLast = 54976 +const yyLast = 56041 var yyAct = [...]int{ - 955, 3603, 3604, 87, 3602, 4026, 4103, 3922, 943, 3278, - 4116, 4007, 4070, 1263, 950, 3554, 942, 2081, 4071, 2385, - 1968, 3995, 3906, 3831, 2316, 3178, 3407, 3185, 3227, 2093, - 3236, 3241, 3238, 3237, 3235, 3240, 1762, 1261, 3139, 2024, - 3904, 3239, 5, 3541, 2745, 2318, 3256, 3079, 2459, 737, - 3193, 3255, 3143, 3140, 3452, 3446, 3641, 2982, 2340, 3127, - 2809, 731, 3438, 764, 904, 2356, 903, 2422, 908, 3972, - 3258, 42, 1818, 732, 2883, 3285, 2964, 2915, 2447, 1722, - 3472, 2427, 2885, 2884, 2359, 2373, 1022, 1073, 87, 2490, - 2360, 1041, 163, 1143, 1019, 2834, 1865, 2815, 2361, 41, - 3137, 2271, 2785, 1709, 2801, 2239, 43, 1022, 2283, 2238, - 2077, 2116, 2468, 2956, 2446, 2032, 149, 2348, 2429, 1847, - 1106, 1101, 1083, 2507, 2876, 1751, 2851, 1731, 2363, 1119, - 100, 2822, 1688, 1510, 104, 2120, 2336, 105, 2052, 1437, - 1422, 1964, 1854, 747, 3142, 1077, 1080, 2444, 1109, 1112, - 1946, 1081, 2418, 2419, 1021, 1107, 1025, 1108, 1750, 1058, - 1736, 1060, 2189, 735, 3636, 2128, 1031, 2147, 1040, 742, - 3894, 2783, 2341, 107, 1470, 1043, 1028, 2023, 1252, 85, - 1976, 1813, 167, 127, 125, 126, 99, 1026, 1192, 905, - 1494, 1017, 1839, 132, 1027, 133, 1053, 741, 1029, 734, - 98, 4104, 1259, 3542, 1238, 2284, 106, 1514, 2461, 2462, - 2463, 84, 1519, 93, 3224, 3957, 2461, 724, 2938, 2937, - 2505, 2906, 3534, 1048, 1052, 4053, 1016, 2972, 2973, 3953, - 1034, 3952, 2313, 2314, 669, 128, 2039, 3497, 2038, 2037, - 1074, 3958, 2036, 2035, 2034, 1145, 134, 1148, 2007, 1208, - 666, 1684, 667, 4047, 2553, 2781, 3123, 4074, 1162, 1163, - 1164, 1931, 1167, 1168, 1169, 1170, 1123, 3083, 1173, 1174, - 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, - 1185, 1186, 1187, 1188, 1189, 1122, 1067, 1035, 1156, 1068, - 725, 1018, 2811, 95, 1209, 3607, 2, 1020, 1090, 1085, - 709, 1098, 3931, 2908, 1149, 1152, 1153, 128, 1097, 1096, - 1095, 95, 3246, 4126, 1433, 4069, 4094, 1454, 2494, 4109, - 1042, 4057, 4055, 3412, 3411, 709, 727, 2931, 3907, 2928, - 2055, 2746, 3953, 2044, 95, 3607, 3304, 909, 3827, 3246, - 1165, 111, 112, 113, 4108, 116, 4056, 4054, 122, 190, - 1015, 191, 3243, 3826, 661, 703, 1066, 1070, 907, 1066, - 1070, 907, 2493, 1147, 1146, 4084, 722, 723, 3244, 190, - 703, 3547, 3837, 129, 3548, 128, 1010, 1011, 1012, 1013, - 4051, 95, 3606, 1024, 3566, 3555, 172, 959, 960, 961, - 3996, 4004, 1716, 129, 3250, 3244, 2487, 1099, 1424, 3836, - 2086, 703, 4031, 3324, 703, 1828, 172, 3175, 3176, 86, - 2782, 1055, 1056, 700, 959, 960, 961, 86, 3174, 2945, - 2946, 3250, 3606, 1245, 2860, 1247, 2971, 2859, 86, 2865, - 2861, 2559, 2562, 2825, 2380, 2381, 1752, 4008, 1753, 4036, - 2379, 2955, 169, 2016, 2017, 170, 1256, 1228, 2492, 1094, - 1008, 1201, 1202, 1451, 1007, 1452, 1453, 4034, 2826, 3923, - 1972, 685, 169, 1244, 1246, 170, 4040, 4041, 189, 1229, - 2872, 1222, 3282, 1216, 683, 3195, 3196, 3654, 1217, 2398, - 2397, 3936, 4035, 1204, 1233, 1234, 3565, 95, 189, 86, - 1191, 3017, 88, 703, 2438, 95, 703, 2560, 3280, 2818, - 2819, 703, 3312, 2315, 3310, 1216, 95, 1092, 3247, 4075, - 1217, 2551, 2015, 4012, 680, 717, 2019, 2432, 1215, 721, - 1214, 1748, 1692, 695, 715, 1471, 3286, 2957, 1434, 2983, - 4076, 3878, 703, 3879, 2916, 3247, 2344, 2469, 690, 2941, - 4012, 2508, 4106, 1921, 2344, 1947, 1249, 3301, 693, 1472, - 1473, 1474, 1475, 1476, 1477, 1478, 1480, 1479, 1481, 1482, - 1423, 1255, 3273, 1230, 3283, 1223, 704, 95, 1254, 1242, - 3274, 1231, 1232, 1243, 1166, 2512, 2554, 2555, 2557, 2556, - 1237, 704, 173, 1248, 3194, 1059, 1197, 1922, 1235, 1923, - 3281, 179, 2959, 2514, 3536, 3535, 3197, 1973, 1236, 2529, - 2532, 2530, 173, 2531, 1172, 1171, 2510, 1121, 1241, 3811, - 2472, 179, 704, 2985, 1832, 704, 670, 2511, 672, 686, - 3611, 706, 2357, 705, 676, 3449, 674, 678, 687, 679, - 2513, 673, 1103, 684, 3532, 1093, 675, 688, 689, 692, - 696, 697, 698, 694, 691, 1141, 682, 707, 2515, 4048, - 1102, 1140, 1139, 1138, 1103, 1137, 3018, 2521, 2517, 2519, - 2520, 2518, 2522, 2523, 1136, 1135, 2431, 1134, 1129, 1142, - 1695, 3197, 4081, 4127, 1485, 3082, 1069, 1063, 1061, 1069, - 1063, 1061, 1078, 2995, 2994, 2993, 1078, 1115, 2987, 1078, - 2991, 1151, 2986, 1076, 2984, 1114, 1965, 1114, 2445, 2989, - 1260, 1150, 1260, 1260, 704, 2909, 1054, 704, 2988, 2960, - 1120, 2498, 704, 2497, 3217, 164, 1114, 1117, 1118, 1961, - 1078, 1425, 1159, 2940, 1111, 1115, 2990, 2992, 2342, 2343, - 1826, 1825, 1824, 2926, 1962, 164, 2342, 2343, 1822, 1207, - 660, 2954, 4049, 704, 2953, 1110, 3919, 1749, 3486, 3531, - 1022, 1495, 1500, 1501, 3468, 1504, 1506, 1507, 1508, 1509, - 2976, 1512, 1513, 1515, 1515, 2943, 1515, 1515, 1520, 1520, - 1520, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, - 1532, 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, - 1542, 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, - 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, - 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, - 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, - 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, - 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, - 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, - 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, - 1622, 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, - 1632, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, - 1642, 1643, 1644, 1492, 1250, 3930, 2907, 1645, 1415, 1647, - 1648, 1649, 1650, 1651, 1416, 1417, 956, 1488, 1489, 1490, - 1491, 1520, 1520, 1520, 1520, 1520, 1520, 1502, 3495, 3496, - 708, 2491, 2930, 3450, 956, 1100, 1658, 1659, 1660, 1661, - 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, - 1496, 701, 1933, 1932, 1934, 1935, 1936, 956, 1438, 89, - 2874, 1062, 165, 3605, 1062, 1505, 702, 1685, 4010, 177, - 1203, 1200, 3302, 4039, 1213, 1212, 2929, 1218, 1219, 1220, - 1221, 1516, 165, 1517, 1518, 1432, 3248, 3249, 94, 177, - 1438, 1521, 1522, 3395, 1195, 4010, 94, 2561, 3564, 3252, - 4009, 1257, 1258, 3605, 2435, 1132, 1853, 94, 1130, 2560, - 185, 2910, 2963, 3248, 3249, 1715, 1485, 4038, 1121, 1226, - 1691, 2786, 2788, 2856, 2489, 2821, 3252, 4009, 2758, 1022, - 185, 2089, 1951, 1022, 1682, 1486, 1487, 1089, 1740, 1022, - 1091, 1646, 1206, 3091, 2436, 3090, 2816, 1121, 1482, 668, - 124, 2434, 2386, 166, 171, 168, 174, 175, 176, 178, - 180, 181, 182, 183, 1485, 1683, 1448, 3173, 94, 184, - 186, 187, 188, 166, 171, 168, 174, 175, 176, 178, - 180, 181, 182, 183, 1716, 2437, 2585, 1465, 1037, 184, - 186, 187, 188, 1253, 1952, 2433, 3944, 1699, 1448, 119, - 1144, 1703, 2574, 4120, 1239, 1977, 3527, 1021, 1121, 1477, - 1478, 1480, 1479, 1481, 1482, 1852, 1211, 3462, 2509, 2028, - 1958, 1120, 3003, 1701, 1754, 2121, 1702, 104, 2129, 2121, - 105, 2594, 1683, 1652, 1653, 1654, 1655, 1656, 1657, 1094, - 2899, 1086, 2130, 1454, 1158, 4085, 1689, 3650, 1088, 1087, - 1120, 1094, 1190, 2057, 1133, 1124, 1114, 1131, 1676, 1453, - 1126, 1452, 1453, 2585, 1127, 1125, 107, 2058, 1483, 1484, - 2056, 3502, 120, 3501, 1444, 2966, 2476, 1436, 2966, 1862, - 2965, 1454, 1948, 2965, 1949, 1128, 2113, 1950, 1861, 1851, - 2787, 2486, 2484, 1132, 1130, 1454, 1697, 1092, 2566, 2567, - 2568, 4077, 1829, 1830, 1831, 3487, 1444, 1845, 1033, 3974, - 3912, 1120, 4122, 3561, 3319, 3562, 1194, 1114, 1117, 1118, - 2488, 1078, 1700, 1718, 1225, 1111, 1115, 4090, 1716, 2481, - 1970, 1916, 1838, 1454, 1686, 1227, 1867, 1018, 1868, 1721, - 1870, 1872, 1698, 1196, 1876, 1878, 1880, 1882, 1884, 1857, - 1020, 1898, 1240, 1978, 3975, 3913, 1121, 1855, 1855, 1260, - 1210, 3819, 1745, 1746, 1454, 1121, 2481, 3818, 2485, 1906, - 1907, 1856, 1941, 2127, 709, 1912, 1913, 3809, 3577, 1451, - 1716, 1452, 1453, 1821, 2105, 2094, 2095, 2096, 2097, 2107, - 2098, 2099, 2100, 2112, 2108, 2101, 2102, 2109, 2110, 2111, - 2103, 2104, 2106, 1848, 3576, 2483, 1716, 1835, 1836, 1955, - 1834, 1953, 1954, 2126, 1956, 1957, 3509, 1451, 1716, 1452, - 1453, 1193, 1859, 1939, 4118, 1093, 3005, 4119, 1454, 4117, - 1704, 1451, 1928, 1452, 1453, 2276, 1940, 1093, 86, 44, - 45, 88, 1902, 1454, 4128, 2046, 2048, 2049, 3840, 1894, - 2621, 3508, 1897, 3498, 1899, 1966, 1748, 3225, 92, 1120, - 3213, 1157, 48, 76, 77, 1154, 74, 78, 1120, 1451, - 2047, 1452, 1453, 1124, 1114, 75, 3277, 2881, 1126, 959, - 960, 961, 1127, 1125, 2880, 1454, 2879, 1938, 2441, 128, - 1097, 1096, 1095, 1454, 1942, 1926, 1927, 1827, 1925, 1924, - 1451, 1914, 1452, 1453, 62, 1473, 1474, 1475, 1476, 1477, - 1478, 1480, 1479, 1481, 1482, 1983, 95, 1475, 1476, 1477, - 1478, 1480, 1479, 1481, 1482, 1260, 1260, 4088, 1716, 1979, - 1980, 4129, 1908, 1905, 1904, 1419, 1903, 1874, 2005, 87, - 1696, 4078, 87, 1984, 3939, 1454, 3492, 709, 2863, 709, - 1991, 1992, 1993, 1458, 1459, 1460, 1461, 1462, 1463, 1464, - 1456, 2004, 83, 1471, 1451, 2975, 1452, 1453, 1443, 1440, - 1441, 1442, 1447, 1449, 1446, 3938, 1445, 4018, 1716, 1451, - 3916, 1452, 1453, 2457, 2456, 3915, 1439, 1472, 1473, 1474, - 1475, 1476, 1477, 1478, 1480, 1479, 1481, 1482, 2455, 2454, - 1443, 1440, 1441, 1442, 1447, 1449, 1446, 42, 1445, 3914, - 42, 2084, 2084, 2082, 2082, 2085, 3814, 1454, 1439, 3798, - 1471, 1451, 3797, 1452, 1453, 2453, 2452, 1981, 2591, 1451, - 4079, 1452, 1453, 1454, 1985, 3649, 1987, 1988, 1989, 1990, - 3647, 2050, 2633, 1994, 1472, 1473, 1474, 1475, 1476, 1477, - 1478, 1480, 1479, 1481, 1482, 2006, 51, 54, 57, 56, - 59, 3573, 73, 2807, 4105, 82, 79, 1472, 1473, 1474, - 1475, 1476, 1477, 1478, 1480, 1479, 1481, 1482, 1725, 1454, - 1471, 1451, 2167, 1452, 1453, 1682, 110, 4065, 1716, 61, - 91, 90, 3181, 1681, 71, 72, 58, 109, 1680, 108, - 1679, 2590, 80, 81, 1472, 1473, 1474, 1475, 1476, 1477, - 1478, 1480, 1479, 1481, 1482, 85, 1683, 3481, 85, 2029, - 2054, 1454, 2631, 2156, 1726, 1450, 1716, 2012, 2013, 2807, - 4003, 1716, 1450, 1716, 3932, 2807, 3982, 3182, 2276, 2807, - 3978, 3845, 2273, 2061, 63, 64, 110, 65, 66, 67, - 68, 2275, 2059, 1451, 3506, 1452, 1453, 109, 3491, 108, - 3287, 3184, 101, 4016, 1716, 954, 3284, 1716, 103, 1451, - 2287, 1452, 1453, 102, 3965, 1716, 3844, 2286, 2060, 3179, - 2062, 2063, 2064, 2065, 2066, 2067, 2069, 2071, 2072, 2073, - 2074, 2075, 2076, 3216, 2285, 1496, 2088, 3195, 3196, 3215, - 2131, 2132, 2133, 2134, 3180, 4014, 1716, 103, 60, 2890, - 2274, 2877, 2122, 2272, 2145, 1451, 1678, 1452, 1453, 2166, - 2542, 2148, 2541, 2115, 2117, 2503, 2150, 1716, 1716, 3802, - 2155, 2151, 3545, 3929, 2152, 2153, 2154, 2502, 3186, 2149, - 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 1471, - 1454, 2573, 3822, 1716, 101, 2339, 2365, 1451, 2321, 1452, - 1453, 103, 2181, 1454, 2287, 102, 1716, 2290, 2291, 2807, - 3810, 2354, 2008, 1472, 1473, 1474, 1475, 1476, 1477, 1478, - 1480, 1479, 1481, 1482, 104, 3545, 1716, 105, 2285, 2807, - 3543, 3801, 2395, 1454, 2481, 1716, 3466, 1716, 89, 1454, - 2713, 1716, 3206, 3205, 2823, 104, 3194, 1974, 105, 3203, - 3204, 3553, 2179, 1454, 2053, 3201, 3202, 2917, 3197, 1937, - 1716, 2895, 2190, 2332, 2367, 1454, 3201, 3200, 1083, 1454, - 2831, 1716, 2560, 2939, 3891, 1716, 2394, 2349, 2350, 1454, - 1817, 2920, 2404, 2405, 2406, 2407, 1929, 3889, 1716, 3461, - 2399, 1454, 2400, 2401, 2402, 2403, 2913, 2914, 2390, 1034, - 1454, 1083, 2389, 2289, 2320, 1919, 2292, 2293, 2410, 2411, - 2412, 2413, 2371, 1454, 2807, 2806, 2831, 3886, 1716, 2326, - 2308, 2327, 1915, 3868, 1716, 2482, 1451, 1911, 1452, 1453, - 2424, 2393, 2263, 2264, 2265, 2266, 2267, 2334, 1910, 1451, - 1909, 1452, 1453, 2470, 1727, 2430, 1251, 94, 2352, 3437, - 1716, 2587, 1716, 3430, 1716, 2087, 1716, 2376, 2377, 2375, - 2823, 1454, 2331, 3427, 1716, 109, 2392, 1067, 2391, 1451, - 1068, 1452, 1453, 1454, 3183, 1451, 3927, 1452, 1453, 1729, - 1817, 1816, 2467, 2481, 3425, 1716, 2440, 2310, 1454, 1451, - 1450, 1452, 1453, 2190, 1454, 3463, 2852, 3387, 1716, 3970, - 1454, 1451, 2852, 1452, 1453, 1451, 1454, 1452, 1453, 2425, - 2414, 2416, 2417, 2421, 3943, 1451, 2803, 1452, 1453, 2439, - 2807, 2475, 2443, 1123, 2478, 2451, 2479, 1451, 2581, 1452, - 1453, 103, 3461, 1855, 1450, 2495, 1451, 2831, 1452, 1453, - 1760, 1759, 1122, 3138, 2425, 1728, 2477, 2474, 2473, 1451, - 3168, 1452, 1453, 70, 3461, 3510, 3416, 3385, 1716, 2853, - 2560, 2496, 2830, 2499, 3203, 2853, 190, 2500, 2501, 2855, - 3111, 1454, 3381, 1716, 2378, 2560, 2587, 2911, 3378, 1716, - 2713, 2618, 1717, 1719, 3376, 1716, 2617, 2481, 2464, 2587, - 129, 1454, 151, 2347, 1720, 2565, 1454, 1451, 2311, 1452, - 1453, 2087, 2030, 172, 2014, 1960, 3511, 3512, 3513, 1451, - 2506, 1452, 1453, 1747, 3228, 1105, 1104, 2831, 1454, 1506, - 95, 1506, 4044, 3985, 1451, 3833, 1452, 1453, 1023, 1723, - 1451, 3799, 1452, 1453, 162, 1454, 1451, 2577, 1452, 1453, - 150, 3661, 1451, 1454, 1452, 1453, 3526, 3523, 3504, 3329, - 3328, 1819, 2423, 2287, 2535, 3374, 1716, 3275, 1454, 169, - 2286, 3230, 170, 3226, 1454, 2921, 2420, 1471, 2415, 2409, - 1467, 2408, 1468, 1944, 3514, 3372, 1716, 2580, 1454, 2887, - 3432, 1841, 1842, 161, 160, 189, 1469, 1483, 1484, 1466, - 1850, 1472, 1473, 1474, 1475, 1476, 1477, 1478, 1480, 1479, - 1481, 1482, 3370, 1716, 1846, 2550, 95, 1451, 1815, 1452, - 1453, 121, 1195, 2886, 3187, 1890, 3279, 1454, 3191, 3428, - 2558, 3515, 3516, 3517, 3834, 3190, 1454, 1451, 3806, 1452, - 1453, 2438, 1451, 2324, 1452, 1453, 2010, 1454, 3473, 3474, - 4100, 1454, 3368, 1716, 4098, 2569, 1454, 4072, 3366, 1716, - 2054, 3479, 2629, 3951, 1451, 1454, 1452, 1453, 3873, 3192, - 1454, 2887, 3364, 1716, 3188, 1454, 1891, 1892, 1893, 3189, - 3476, 1451, 2571, 1452, 1453, 1454, 3222, 3221, 3220, 1451, - 3138, 1452, 1453, 2900, 2536, 1454, 155, 1843, 158, 665, - 1840, 2583, 156, 157, 1451, 3478, 1452, 1453, 2011, 173, - 1451, 2582, 1452, 1453, 3631, 2593, 3630, 2570, 179, 2572, - 3362, 1716, 3157, 3156, 1451, 3947, 1452, 1453, 2575, 1724, - 2576, 3360, 1716, 3835, 2544, 2545, 1716, 2578, 2330, 2547, - 3358, 1716, 3160, 1454, 3467, 2338, 2757, 3161, 2548, 3356, - 1716, 1454, 3158, 3116, 3354, 1716, 3115, 3159, 3911, 3352, - 1716, 3640, 3642, 1451, 3629, 1452, 1453, 1454, 2627, 3350, - 1716, 3457, 1451, 726, 1452, 1453, 1454, 3125, 2789, 3348, - 1716, 1959, 1006, 1451, 3199, 1452, 1453, 1451, 1454, 1452, - 1453, 2870, 1451, 1454, 1452, 1453, 1022, 2084, 1454, 2082, - 2792, 1451, 3454, 1452, 1453, 2891, 1451, 1038, 1452, 1453, - 3453, 1451, 1161, 1452, 1453, 1039, 1160, 2828, 2829, 3295, - 2886, 1451, 1886, 1452, 1453, 2790, 2365, 3334, 1716, 1022, - 2848, 1451, 101, 1452, 1453, 3317, 1716, 2969, 1418, 2129, - 2600, 3459, 164, 102, 2793, 3162, 2795, 2840, 2841, 103, - 2927, 2778, 1716, 2130, 2053, 129, 2827, 2615, 2808, 4114, - 2776, 1716, 3218, 1454, 2539, 3128, 3130, 1454, 4023, 1887, - 1888, 1889, 2751, 1716, 3131, 3928, 1454, 2728, 1716, 1451, - 3829, 1452, 1453, 3528, 101, 42, 3198, 1451, 2844, 1452, - 1453, 103, 1454, 2335, 2845, 102, 2804, 2847, 2528, 1454, - 1689, 2817, 2527, 1451, 2780, 1452, 1453, 2349, 2350, 1454, - 2846, 2526, 1451, 3114, 1452, 1453, 1046, 1047, 2525, 2873, - 2875, 3113, 2800, 2524, 1451, 1683, 1452, 1453, 3899, 1451, - 3439, 1452, 1453, 1454, 1451, 2820, 1452, 1453, 159, 2805, - 2564, 2866, 108, 2925, 3898, 110, 2850, 2720, 1716, 3637, - 3876, 2711, 1716, 3648, 3646, 1454, 109, 109, 108, 2854, - 2709, 1716, 3645, 3638, 2857, 3524, 3458, 2430, 3456, 2864, - 1454, 2867, 3231, 2465, 1454, 1833, 2696, 1716, 1045, 2936, - 1454, 2124, 3447, 2694, 1716, 2889, 2125, 110, 2823, 3615, - 2892, 2893, 1454, 3393, 2878, 4102, 4101, 1454, 109, 1451, - 108, 1452, 1453, 1451, 1454, 1452, 1453, 2888, 4101, 103, - 110, 2803, 1451, 3019, 1452, 1453, 2619, 2692, 1716, 1454, - 2896, 109, 2185, 2901, 2902, 2903, 2322, 2897, 1451, 1741, - 1452, 1453, 1733, 4102, 2933, 1451, 3917, 1452, 1453, 2690, - 1716, 1838, 114, 115, 3490, 1451, 152, 1452, 1453, 153, - 1036, 3, 97, 1, 2688, 1716, 2922, 2923, 2686, 1716, - 2027, 1454, 2912, 10, 3389, 2979, 2980, 1454, 2932, 1451, - 2025, 1452, 1453, 9, 1014, 1421, 2684, 1716, 1454, 165, - 1420, 2682, 1716, 1454, 2026, 3494, 177, 8, 2680, 1716, - 4033, 1451, 681, 1452, 1453, 2312, 1687, 1454, 4073, 4029, - 2996, 2958, 2269, 2678, 1716, 1454, 1451, 2977, 1452, 1453, - 1451, 2961, 1452, 1453, 4030, 1930, 1451, 1920, 1452, 1453, - 3556, 2237, 3830, 3234, 2471, 3522, 2428, 185, 1451, 1113, - 1452, 1453, 2302, 1451, 154, 1452, 1453, 2387, 2388, 1454, - 1451, 3998, 1452, 1453, 1454, 2676, 1716, 2934, 118, 1717, - 2309, 2674, 1716, 1071, 2997, 1451, 117, 1452, 1453, 3000, - 1116, 1224, 2672, 1716, 2466, 3546, 2871, 2670, 1716, 2396, - 166, 171, 168, 174, 175, 176, 178, 180, 181, 182, - 183, 2668, 1716, 1766, 2333, 1454, 184, 186, 187, 188, - 2882, 1764, 1765, 1763, 1768, 1767, 3303, 1451, 2978, 1452, - 1453, 2620, 3021, 1451, 3394, 1452, 1453, 3077, 2967, 2018, - 716, 2968, 2843, 710, 1451, 192, 1452, 1453, 1755, 1451, - 1454, 1452, 1453, 2666, 1716, 1734, 3408, 1155, 2664, 1716, - 671, 3207, 2504, 1451, 677, 1452, 1453, 2981, 1503, 2009, - 3112, 1451, 2858, 1452, 1453, 2998, 1065, 1057, 2323, 2794, - 1064, 3807, 3146, 3084, 3451, 3124, 3095, 3126, 3086, 2810, - 3129, 3122, 3910, 3639, 2365, 3983, 1454, 3012, 2868, 2662, - 1716, 2274, 1730, 2274, 2272, 1451, 2272, 1452, 1453, 3057, - 1451, 3415, 1452, 1453, 2592, 2119, 2442, 3145, 1493, 87, - 2364, 3610, 2365, 2365, 2365, 2365, 2365, 1454, 2999, 3067, - 3068, 3069, 3070, 3071, 2657, 1716, 1454, 2045, 739, 738, - 736, 3085, 2365, 3087, 2796, 2365, 2824, 1457, 944, 3095, - 2784, 1451, 1742, 1452, 1453, 2835, 3094, 1454, 2833, 3150, - 1970, 2832, 2367, 2537, 3167, 1454, 2372, 3475, 3471, 4025, - 1454, 2366, 2362, 2802, 895, 894, 3110, 3106, 3119, 748, - 2653, 1716, 740, 1454, 730, 893, 1451, 1025, 1452, 1453, - 2367, 2367, 2367, 2367, 2367, 3117, 1454, 892, 3120, 3261, - 3132, 3133, 1454, 3262, 2942, 3276, 3107, 3108, 3109, 3251, - 2367, 3326, 3151, 2367, 2944, 3154, 3149, 2869, 1026, 3259, - 3325, 3152, 3153, 3118, 3155, 1027, 3169, 104, 3163, 3170, - 105, 3171, 1451, 1454, 1452, 1453, 3272, 1435, 1706, 1084, - 3300, 2651, 1716, 3934, 2563, 3323, 1705, 3941, 3177, 2644, - 1716, 3242, 3540, 1454, 2642, 1716, 3223, 3209, 3059, 3210, - 3061, 2918, 3208, 1451, 2458, 1452, 1453, 3322, 69, 46, - 3211, 3212, 1451, 3905, 1452, 1453, 3072, 3073, 3074, 3075, - 2774, 3135, 3260, 3263, 3971, 3264, 2773, 887, 884, 2430, - 1454, 3232, 3253, 1451, 3612, 1452, 1453, 3613, 3614, 3080, - 3270, 1451, 3081, 1452, 1453, 3141, 1451, 3954, 1452, 1453, - 3141, 1454, 3955, 883, 3956, 2174, 1431, 2769, 1428, 1451, - 4046, 1452, 1453, 2020, 3288, 96, 36, 3291, 3290, 35, - 34, 33, 1451, 32, 1452, 1453, 26, 2768, 1451, 3298, - 1452, 1453, 25, 24, 23, 3308, 3305, 3306, 3254, 3307, - 22, 29, 3309, 19, 3311, 21, 3313, 2836, 2839, 2840, - 2841, 2837, 20, 2838, 2842, 18, 3245, 3473, 3474, 1451, - 4068, 1452, 1453, 4113, 2767, 1471, 123, 55, 52, 1506, - 50, 131, 130, 1506, 2579, 53, 49, 1198, 2584, 1451, - 47, 1452, 1453, 31, 30, 2766, 3233, 17, 16, 1472, - 1473, 1474, 1475, 1476, 1477, 1478, 1480, 1479, 1481, 1482, - 15, 2588, 14, 2589, 3410, 13, 12, 11, 2596, 7, - 6, 3414, 2598, 2599, 39, 38, 1451, 37, 1452, 1453, - 3299, 2605, 2606, 2607, 2608, 2609, 2610, 2611, 2612, 2613, - 2614, 28, 2616, 27, 40, 4, 2905, 1451, 2460, 1452, - 1453, 0, 0, 0, 3144, 0, 0, 0, 0, 2365, - 0, 3440, 3441, 3443, 0, 2622, 2623, 2624, 2625, 2626, - 0, 2628, 3488, 3448, 0, 2630, 3455, 0, 0, 2635, - 2636, 728, 2637, 0, 1454, 2640, 0, 2641, 2643, 2645, - 2646, 2647, 2648, 2649, 2650, 2652, 2654, 2655, 2656, 2658, - 1454, 2660, 2661, 2663, 2665, 2667, 2669, 2671, 2673, 2675, - 2677, 2679, 2681, 2683, 2685, 2687, 2689, 2691, 2693, 2695, - 2697, 2698, 2699, 3482, 2701, 3477, 2703, 2367, 2705, 2706, - 3460, 2708, 2710, 2712, 3480, 3260, 3263, 2715, 3264, 3445, - 0, 2719, 3489, 3483, 0, 2724, 2725, 2726, 2727, 3505, - 0, 3507, 3293, 3294, 0, 0, 0, 0, 2738, 2739, - 2740, 2741, 2742, 2743, 3550, 3551, 2747, 2748, 2765, 1454, - 0, 0, 3470, 0, 2750, 3499, 3500, 2113, 3417, 2756, - 3419, 3420, 3421, 1454, 2764, 2759, 2760, 2761, 2762, 2763, - 1044, 3484, 3485, 1050, 1050, 0, 2770, 2771, 0, 2772, - 0, 0, 2775, 2777, 2333, 0, 2779, 0, 0, 0, - 1454, 0, 0, 0, 1454, 0, 2791, 0, 0, 0, - 1451, 0, 1452, 1453, 0, 1454, 0, 0, 3533, 1454, - 0, 0, 3537, 3538, 3539, 0, 1451, 0, 1452, 1453, - 1454, 3552, 2836, 2839, 2840, 2841, 2837, 0, 2838, 2842, - 0, 0, 0, 2755, 1454, 3568, 0, 3529, 3530, 0, - 0, 0, 0, 1454, 0, 0, 0, 2754, 0, 0, - 0, 1454, 0, 0, 0, 2105, 2094, 2095, 2096, 2097, - 2107, 2098, 2099, 2100, 2112, 2108, 2101, 2102, 2109, 2110, - 2111, 2103, 2104, 2106, 2753, 0, 1454, 0, 2752, 0, - 1454, 0, 0, 0, 0, 1451, 0, 1452, 1453, 2749, - 1454, 0, 0, 2744, 1454, 0, 0, 0, 0, 1451, - 1454, 1452, 1453, 0, 2737, 0, 0, 0, 1454, 3628, - 0, 3632, 3633, 1454, 0, 0, 0, 3618, 2736, 3619, - 3620, 3621, 1454, 3608, 0, 0, 1451, 2735, 1452, 1453, - 1451, 0, 1452, 1453, 3145, 2734, 87, 3634, 3145, 0, - 0, 1451, 0, 1452, 1453, 1451, 0, 1452, 1453, 0, - 1454, 0, 0, 0, 0, 0, 1451, 0, 1452, 1453, - 2733, 0, 3572, 0, 2732, 0, 2084, 0, 2082, 3663, - 1451, 3635, 1452, 1453, 2731, 3655, 3644, 3643, 2730, 1451, - 0, 1452, 1453, 0, 2729, 3651, 3653, 1451, 1454, 1452, - 1453, 0, 2723, 0, 0, 0, 0, 2722, 0, 1454, - 0, 0, 0, 3813, 42, 0, 2721, 0, 0, 0, - 3667, 0, 1451, 0, 1452, 1453, 1451, 0, 1452, 1453, - 0, 0, 0, 0, 0, 0, 1451, 0, 1452, 1453, - 1451, 0, 1452, 1453, 2718, 0, 1451, 0, 1452, 1453, - 0, 0, 0, 3805, 1451, 3804, 1452, 1453, 0, 1451, - 0, 1452, 1453, 0, 0, 3820, 0, 0, 1451, 0, - 1452, 1453, 3825, 3832, 3824, 0, 1714, 1710, 3803, 0, - 0, 0, 2717, 0, 3870, 3871, 3007, 3008, 3009, 3010, - 3011, 1711, 3657, 2716, 3664, 3665, 1451, 0, 1452, 1453, - 0, 0, 2084, 0, 2082, 3874, 3016, 0, 3815, 3816, - 3817, 0, 0, 0, 0, 0, 2328, 2329, 1713, 0, - 1712, 3599, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3141, 0, 0, 1451, 3145, 1452, 1453, 0, 0, - 3877, 0, 3659, 3808, 3880, 1451, 0, 1452, 1453, 1523, - 1524, 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, 1533, - 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1543, 1544, - 1545, 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, + 960, 3647, 3648, 87, 3646, 4084, 4161, 3975, 948, 3312, + 4174, 4065, 4129, 4128, 1271, 955, 1984, 947, 2099, 3597, + 2405, 4053, 3957, 3212, 3219, 913, 3447, 3880, 42, 2333, + 2111, 3261, 3270, 3275, 3272, 1777, 3173, 3955, 3584, 3271, + 1269, 3269, 3028, 2040, 3274, 3273, 5, 2335, 3290, 3227, + 2479, 3111, 3289, 741, 3177, 3174, 3493, 3487, 3689, 3002, + 2829, 2360, 3171, 3477, 3161, 769, 3027, 735, 909, 2442, + 736, 4026, 908, 3292, 1833, 2903, 3319, 2984, 2935, 2467, + 1733, 2447, 1080, 2904, 2510, 2905, 1028, 2379, 87, 163, + 2376, 2393, 1048, 2854, 1025, 43, 2835, 1055, 2381, 1126, + 1880, 2821, 2257, 2048, 3515, 1150, 41, 1028, 2805, 2380, + 2256, 1027, 2095, 1031, 2289, 2488, 2976, 1862, 149, 2466, + 2368, 2527, 1090, 2133, 2449, 2896, 1113, 1108, 1766, 2871, + 2383, 1746, 1050, 1698, 1519, 104, 2139, 105, 2070, 2062, + 1445, 100, 1430, 1869, 1980, 1087, 3176, 751, 2464, 1084, + 1119, 1961, 1088, 2438, 1114, 1115, 1765, 1065, 2439, 1067, + 1751, 1116, 746, 1037, 3684, 1720, 2803, 1502, 2166, 2147, + 1478, 1034, 2039, 1259, 85, 2842, 1992, 3448, 132, 167, + 3504, 1047, 3676, 1033, 107, 2361, 127, 125, 126, 1854, + 133, 1199, 1023, 93, 1032, 1060, 910, 1035, 745, 739, + 1523, 738, 99, 1267, 98, 1245, 4162, 2481, 2482, 2483, + 1059, 3585, 3258, 1528, 4010, 84, 2481, 2958, 2957, 2525, + 2926, 3577, 1022, 4111, 106, 2992, 1946, 2993, 4006, 2055, + 4005, 2054, 128, 2330, 2331, 2053, 1130, 2052, 3540, 2051, + 4011, 1040, 728, 134, 1155, 2050, 1081, 673, 2023, 1215, + 670, 2801, 671, 2575, 4105, 3157, 4132, 1152, 1163, 4184, + 1441, 2514, 1097, 1092, 3651, 3115, 4127, 1737, 4152, 3451, + 1169, 1170, 1171, 3450, 1174, 1175, 1176, 1177, 2, 1074, + 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, + 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1738, 1129, 1462, + 1049, 1026, 1075, 1041, 128, 2513, 2831, 3984, 3280, 1024, + 1735, 1104, 1103, 1102, 1105, 729, 1216, 1156, 1159, 1160, + 2357, 3277, 2356, 111, 112, 113, 2928, 116, 713, 3280, + 122, 4006, 4167, 191, 2951, 2354, 665, 95, 95, 95, + 1736, 4115, 4113, 3958, 2766, 3651, 713, 1021, 726, 727, + 95, 3650, 914, 1172, 707, 2060, 2073, 4166, 1016, 1017, + 1018, 1019, 2948, 3338, 3278, 1030, 4114, 4112, 1073, 1077, + 912, 190, 128, 4080, 3876, 3875, 1154, 963, 964, 965, + 3590, 86, 1153, 3591, 4142, 3278, 3886, 4109, 86, 3609, + 1432, 3284, 707, 1062, 1063, 129, 1106, 190, 963, 964, + 965, 3598, 1727, 86, 4054, 2845, 4062, 2582, 172, 2507, + 3885, 2104, 3284, 4089, 707, 3364, 1843, 2802, 4094, 3209, + 3210, 129, 3208, 1073, 1077, 912, 1096, 2991, 4066, 1098, + 2846, 1767, 3650, 1768, 172, 704, 4092, 1459, 2399, 1460, + 1461, 2400, 2401, 2032, 2033, 2579, 4098, 4099, 1252, 2975, + 1254, 2885, 3703, 1264, 1198, 3229, 3230, 1235, 1101, 95, + 1208, 1209, 4093, 1014, 169, 2880, 95, 170, 2879, 707, + 1458, 2881, 2580, 1013, 3608, 1442, 3976, 1240, 1241, 1223, + 1236, 95, 2512, 689, 1224, 1229, 2892, 707, 1251, 1253, + 169, 189, 1211, 170, 1223, 3346, 687, 2418, 2417, 1224, + 1988, 2838, 2839, 3344, 4070, 3281, 2332, 1222, 3316, 1221, + 4133, 3049, 707, 3314, 2573, 2031, 1099, 189, 2035, 721, + 725, 719, 1763, 3320, 1446, 4070, 3281, 3989, 1101, 1173, + 1093, 4134, 1702, 2977, 2936, 2489, 684, 1095, 1094, 707, + 3928, 1936, 3929, 2961, 2551, 699, 2552, 2528, 2553, 3307, + 1962, 86, 2532, 1431, 88, 2364, 2534, 3308, 1256, 4164, + 694, 1238, 1239, 1263, 3228, 2458, 708, 1261, 1244, 1262, + 3860, 697, 1204, 2979, 3579, 1237, 3231, 2576, 3578, 2577, + 1230, 1847, 1242, 2554, 1179, 1937, 1099, 1938, 2452, 1178, + 2530, 1479, 1243, 1128, 2531, 1249, 1066, 3575, 1128, 1250, + 2492, 3317, 2929, 1139, 708, 173, 3315, 2533, 1109, 1255, + 2175, 2535, 1110, 3335, 179, 1480, 1481, 1482, 1483, 1484, + 1485, 1486, 1488, 1487, 1489, 1490, 708, 1149, 3655, 95, + 2377, 173, 1456, 1110, 1248, 2965, 2966, 1148, 1989, 674, + 179, 676, 690, 1147, 710, 1100, 709, 680, 1146, 678, + 682, 691, 683, 4106, 677, 1145, 688, 1144, 1143, 679, + 692, 693, 696, 700, 701, 702, 698, 695, 3490, 686, + 711, 3231, 1142, 1141, 1136, 1705, 3114, 3050, 4185, 4139, + 1085, 708, 2541, 2537, 2539, 2540, 2538, 2542, 2543, 2544, + 1076, 1070, 1068, 1085, 1121, 1061, 1127, 1981, 1122, 708, + 3251, 1127, 2465, 2960, 1268, 1085, 1268, 1268, 2167, 1083, + 1158, 2980, 3574, 2169, 2518, 1100, 1121, 2174, 2170, 1841, + 1157, 2171, 2172, 2173, 708, 2517, 2168, 2176, 2177, 2178, + 2179, 2180, 2181, 2182, 2183, 2184, 1977, 3142, 2451, 164, + 1433, 1452, 3140, 1166, 1444, 1076, 1070, 1068, 2362, 2363, + 1764, 708, 1140, 1840, 1028, 1503, 1508, 1509, 1839, 1512, + 1514, 1515, 1516, 1517, 1518, 164, 1521, 1522, 1524, 1524, + 2946, 1524, 1524, 1529, 1529, 1529, 1532, 1533, 1534, 1535, + 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, 1544, 1545, + 1546, 1547, 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, + 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, + 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, + 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, + 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, + 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, 1615, + 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 1625, + 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, + 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, + 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1446, 1257, + 2930, 1500, 1654, 3983, 1656, 1657, 1658, 1659, 1660, 1424, + 1425, 1948, 1947, 1949, 1950, 1951, 1529, 1529, 1529, 1529, + 1529, 1529, 2927, 3538, 3539, 1440, 3649, 1107, 1210, 1207, + 1423, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, + 1676, 1677, 1678, 1679, 1680, 712, 4097, 1496, 1497, 1498, + 1499, 1504, 4068, 94, 961, 961, 961, 1510, 2511, 2950, + 94, 1513, 1695, 4029, 2894, 2581, 705, 1069, 3491, 1525, + 3435, 1526, 1527, 4068, 1868, 94, 3282, 3283, 165, 3607, + 1202, 706, 1220, 2963, 4067, 177, 1692, 1530, 1531, 3286, + 4096, 1219, 1137, 1225, 1226, 1227, 1228, 3282, 3283, 2580, + 1233, 1978, 1837, 2949, 165, 4067, 1456, 3649, 1493, 1214, + 3286, 177, 664, 89, 1128, 3123, 1701, 1265, 1266, 2509, + 1493, 2974, 1069, 4107, 2973, 1028, 185, 1494, 1495, 1028, + 3336, 1726, 2806, 2808, 1128, 1028, 3971, 1451, 1448, 1449, + 1450, 1455, 1457, 1454, 1967, 1453, 1128, 3529, 2983, 3511, + 1709, 2876, 185, 3122, 1713, 1447, 2841, 2778, 2107, 1128, + 1027, 1755, 1655, 1213, 4178, 2836, 672, 1693, 2455, 166, + 171, 168, 174, 175, 176, 178, 180, 181, 182, 183, + 2131, 2406, 1493, 1867, 1490, 184, 186, 187, 188, 124, + 3207, 2605, 1473, 707, 2996, 166, 171, 168, 174, 175, + 176, 178, 180, 181, 182, 183, 2594, 1044, 1246, 2456, + 1260, 184, 186, 187, 188, 1452, 2454, 1127, 1727, 1993, + 1711, 1966, 1712, 94, 104, 1218, 105, 1165, 3997, 1661, + 1662, 1663, 1664, 1665, 1666, 1693, 2364, 1127, 1101, 1197, + 119, 1138, 1131, 1121, 1699, 1151, 3570, 1133, 3503, 1127, + 2457, 1134, 1132, 2148, 1686, 1121, 1124, 1125, 2529, 1085, + 2453, 2044, 1127, 1118, 1122, 1974, 2986, 2149, 1121, 1124, + 1125, 2985, 1085, 107, 1769, 3023, 1118, 1122, 2123, 2112, + 2113, 2114, 2115, 2125, 2116, 2117, 2118, 2130, 2126, 2119, + 2120, 2127, 2128, 2129, 2121, 2122, 2124, 1117, 2605, 1860, + 2140, 2919, 2807, 1201, 3003, 1707, 1232, 3215, 2140, 4143, + 2614, 1844, 1845, 1846, 120, 2986, 2508, 1234, 1708, 1710, + 2985, 1460, 1461, 1461, 1870, 1870, 3698, 3545, 1986, 1128, + 1931, 1853, 4135, 1462, 1732, 1729, 3544, 1882, 2496, 1883, + 1128, 1885, 1887, 1913, 1872, 1891, 1893, 1895, 1897, 1899, + 1026, 1024, 3216, 1877, 1876, 1696, 1268, 1247, 1760, 1761, + 1970, 1828, 1968, 1969, 1866, 1971, 1972, 1973, 1994, 1871, + 1217, 2501, 1963, 2506, 1964, 2134, 3218, 1965, 1921, 1922, + 1462, 2504, 1203, 1836, 1927, 1928, 4176, 2501, 3005, 4177, + 1851, 4175, 2610, 1139, 3213, 1462, 1863, 1137, 4030, 2146, + 4186, 1850, 1849, 3530, 1039, 708, 4180, 2075, 1714, 1200, + 2294, 2505, 3868, 3229, 3230, 3867, 3963, 1462, 4148, 1727, + 3214, 2076, 1491, 1492, 2074, 1100, 1874, 2503, 3858, 2362, + 2363, 3621, 1127, 2066, 2067, 2587, 2588, 1131, 1121, 3604, + 2641, 3605, 1133, 1127, 4031, 1164, 1134, 1132, 3620, 1161, + 731, 3552, 1917, 1909, 3220, 1982, 1912, 3551, 1914, 3015, + 3014, 3013, 3964, 3541, 3007, 2609, 3011, 1135, 3006, 3259, + 3004, 1459, 3247, 1460, 1461, 3009, 1842, 1485, 1486, 1488, + 1487, 1489, 1490, 128, 3008, 2901, 2900, 4187, 1956, 2899, + 1104, 1103, 1102, 1483, 1484, 1485, 1486, 1488, 1487, 1489, + 1490, 2461, 3010, 3012, 1462, 3311, 1957, 1941, 1999, 1462, + 1954, 1451, 1448, 1449, 1450, 1455, 1457, 1454, 1459, 1453, + 1460, 1461, 3228, 1268, 1268, 1940, 1939, 1995, 1996, 1447, + 2021, 1727, 2601, 1459, 3231, 1460, 1461, 87, 2145, 1929, + 87, 2000, 3025, 2066, 2067, 2064, 2065, 1923, 2007, 2008, + 2009, 1479, 1955, 1427, 1475, 1459, 1476, 1460, 1461, 2020, + 1920, 1919, 42, 1918, 1479, 42, 2995, 1889, 1706, 2063, + 1477, 1491, 1492, 1474, 1953, 1480, 1481, 1482, 1483, 1484, + 1485, 1486, 1488, 1487, 1489, 1490, 3535, 713, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1488, 1487, 1489, 1490, 1479, + 1481, 1482, 1483, 1484, 1485, 1486, 1488, 1487, 1489, 1490, + 2102, 2102, 2100, 2100, 2103, 963, 964, 965, 713, 4136, + 1462, 2653, 1763, 1480, 1481, 1482, 1483, 1484, 1485, 1486, + 1488, 1487, 1489, 1490, 2883, 713, 2068, 2477, 2476, 1479, + 1692, 2593, 1459, 1943, 1460, 1461, 1997, 1459, 3992, 1460, + 1461, 3217, 3991, 2001, 3967, 2003, 2004, 2005, 2006, 1479, + 3966, 1462, 2010, 1480, 1481, 1482, 1483, 1484, 1485, 1486, + 1488, 1487, 1489, 1490, 2022, 2475, 2474, 2473, 2472, 2827, + 4163, 3353, 2186, 1480, 1481, 1482, 1483, 1484, 1485, 1486, + 1488, 1487, 1489, 1490, 1480, 1481, 1482, 1483, 1484, 1485, + 1486, 1488, 1487, 1489, 1490, 4146, 1727, 1942, 85, 1462, + 3965, 85, 2045, 1466, 1467, 1468, 1469, 1470, 1471, 1472, + 1464, 1693, 2072, 2651, 1462, 2294, 2028, 2029, 1462, 2291, + 110, 110, 3863, 1458, 1727, 1462, 4123, 1727, 2293, 1462, + 2135, 109, 109, 108, 108, 3847, 2077, 1727, 2827, 1727, + 1462, 1740, 959, 103, 1462, 3846, 101, 101, 1459, 3697, + 1460, 1461, 3695, 103, 1458, 1727, 1727, 102, 102, 1462, + 2304, 2106, 2303, 2827, 4061, 2827, 4040, 2079, 2078, 3617, + 2080, 2081, 2082, 2083, 2084, 2085, 2087, 2089, 2090, 2091, + 2092, 2093, 2094, 2208, 4076, 1727, 2302, 1741, 1691, 1459, + 1504, 1460, 1461, 2150, 2151, 2152, 2153, 2827, 4036, 4074, + 1727, 1727, 1727, 4072, 1727, 2292, 2290, 2164, 2141, 2185, + 3941, 1727, 1727, 1727, 3939, 1727, 3948, 1727, 3588, 3982, + 3871, 1727, 1462, 2827, 3859, 3936, 1727, 3502, 1690, 3918, + 1727, 1458, 1689, 1462, 3588, 1727, 3985, 1459, 3549, 1460, + 1461, 2827, 3586, 3895, 3476, 1727, 4024, 3534, 2501, 1727, + 1462, 2385, 1459, 2200, 1460, 1461, 1459, 3321, 1460, 1461, + 2304, 3318, 2374, 1459, 1462, 1460, 1461, 1459, 2301, 1460, + 1461, 2307, 2308, 3509, 1727, 104, 2387, 105, 1459, 3250, + 1460, 1461, 1459, 3221, 1460, 1461, 2302, 3225, 2415, 2369, + 2370, 2733, 1727, 1462, 3224, 3249, 104, 1459, 105, 1460, + 1461, 3240, 3239, 3894, 2349, 3237, 3238, 3469, 1727, 1462, + 3235, 3236, 3851, 1462, 2910, 2337, 2897, 2071, 3466, 1727, + 3235, 3234, 103, 1090, 2851, 1727, 2580, 2959, 3226, 1832, + 2940, 2933, 2934, 3222, 1688, 3464, 1727, 3850, 3223, 1681, + 2424, 2425, 2426, 2427, 2564, 2872, 2872, 2419, 2410, 2420, + 2421, 2422, 2423, 2563, 2409, 2391, 1090, 1462, 2827, 2826, + 1459, 1040, 1460, 1461, 2523, 2430, 2431, 2432, 2433, 2105, + 1727, 1459, 2522, 1460, 1461, 2350, 2359, 2338, 3427, 1727, + 3596, 1727, 2343, 2024, 2344, 2352, 2325, 2444, 1459, 1990, + 1460, 1461, 2413, 1952, 3425, 1727, 3996, 1944, 3421, 1727, + 2490, 2450, 1459, 2372, 1460, 1461, 1934, 1462, 2873, 2873, + 2396, 2397, 2395, 1930, 1926, 1074, 1925, 1924, 2875, 2580, + 2412, 2411, 1462, 1832, 1831, 109, 1462, 1775, 1774, 3202, + 1462, 1459, 2487, 1460, 1461, 1742, 1258, 2843, 1075, 2580, + 2824, 2937, 3418, 1727, 2460, 1462, 2915, 1459, 1130, 1460, + 1461, 1459, 1462, 1460, 1461, 103, 3506, 2843, 1870, 1462, + 2414, 2827, 86, 44, 45, 88, 2851, 2850, 2445, 2441, + 2434, 2436, 2437, 3172, 2495, 2459, 3455, 2498, 2463, 2499, + 3237, 2471, 92, 3145, 3502, 2515, 48, 76, 77, 1462, + 74, 78, 3416, 1727, 1458, 1459, 1462, 1460, 1461, 75, + 2502, 2494, 2497, 2445, 2493, 2398, 2733, 3414, 1727, 2851, + 1129, 3412, 1727, 2822, 2638, 3410, 1727, 2637, 3505, 2516, + 2519, 1462, 2851, 2501, 2520, 2521, 1462, 2484, 62, 3502, + 3408, 1727, 1462, 2367, 2603, 1731, 2328, 3406, 1727, 2105, + 95, 3262, 2046, 2030, 2602, 1459, 1976, 1460, 1461, 1762, + 1029, 2585, 1112, 2526, 1728, 1730, 1462, 1111, 2501, 95, + 1459, 1462, 1460, 1461, 1459, 4102, 1460, 1461, 1459, 3553, + 1460, 1461, 4043, 3313, 3404, 1727, 3882, 1514, 1734, 1514, + 1905, 3402, 1727, 1459, 3848, 1460, 1461, 83, 3710, 3569, + 1459, 3566, 1460, 1461, 1462, 2597, 3547, 1459, 3369, 1460, + 1461, 3368, 1834, 1462, 2443, 3883, 3400, 1727, 3309, 3264, + 1462, 3890, 2304, 2557, 2303, 3260, 2907, 3398, 1727, 1462, + 3554, 3555, 3556, 1462, 1479, 2941, 2440, 1459, 95, 1460, + 1461, 1906, 1907, 1908, 1459, 2458, 1460, 1461, 2600, 2435, + 2341, 3396, 1727, 2429, 1462, 1202, 3394, 1727, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1488, 1487, 1489, 1490, 1459, + 1462, 1460, 1461, 2572, 1459, 2428, 1460, 1461, 1462, 1959, + 1459, 1865, 1460, 1461, 1861, 1830, 121, 2578, 4158, 3392, + 1727, 51, 54, 57, 56, 59, 4156, 73, 3390, 1727, + 82, 79, 2906, 2586, 1459, 3524, 1460, 1461, 4130, 1459, + 2026, 1460, 1461, 2589, 3388, 1727, 3516, 3517, 3374, 1727, + 4004, 1462, 2072, 3923, 61, 91, 90, 3519, 3522, 71, + 72, 58, 3256, 3255, 3254, 1462, 1694, 80, 81, 3351, + 1727, 3172, 1459, 2920, 1460, 1461, 2558, 1462, 669, 3557, + 2907, 1459, 1901, 1460, 1461, 2798, 1727, 1462, 1459, 3521, + 1460, 1461, 1739, 2796, 1727, 1462, 3191, 1459, 3190, 1460, + 1461, 1459, 2027, 1460, 1461, 2613, 2591, 2590, 1462, 2592, + 63, 64, 3194, 65, 66, 67, 68, 3195, 2595, 3675, + 2596, 3674, 1459, 4000, 1460, 1461, 3558, 3559, 3560, 1902, + 1903, 1904, 2598, 2777, 3192, 3884, 2771, 1727, 1459, 3193, + 1460, 1461, 2566, 2567, 2358, 2347, 1459, 2569, 1460, 1461, + 2748, 1727, 730, 1042, 3510, 3150, 2570, 1462, 2647, 3149, + 2765, 3962, 2740, 1727, 3688, 2809, 3690, 2131, 1462, 3673, + 3162, 3164, 2731, 1727, 60, 3196, 1462, 2860, 2861, 3165, + 2729, 1727, 3495, 3498, 1028, 2102, 3159, 2100, 2812, 1459, + 3494, 1460, 1461, 2716, 1727, 1975, 1045, 1012, 3233, 1462, + 2890, 2911, 1043, 1459, 1046, 1460, 1461, 2848, 2849, 2148, + 1462, 1168, 1167, 101, 2810, 1459, 2385, 1460, 1461, 1028, + 2868, 3329, 2649, 2149, 102, 1459, 2906, 1460, 1461, 2989, + 1426, 2947, 42, 1459, 129, 1460, 1461, 2813, 103, 2815, + 1462, 2865, 2714, 1727, 2867, 3500, 1459, 4172, 1460, 1461, + 2847, 2828, 101, 2712, 1727, 3252, 1462, 2071, 2561, 103, + 1462, 2710, 1727, 102, 89, 2123, 2112, 2113, 2114, 2115, + 2125, 2116, 2117, 2118, 2130, 2126, 2119, 2120, 2127, 2128, + 2129, 2121, 2122, 2124, 2708, 1727, 2369, 2370, 4079, 1699, + 2837, 1462, 2800, 3981, 3878, 1459, 1727, 1460, 1461, 1462, + 3232, 2864, 2353, 2866, 1053, 1054, 1459, 3148, 1460, 1461, + 110, 2550, 2893, 2895, 1459, 3147, 1460, 1461, 1744, 2549, + 2820, 109, 1693, 108, 2870, 2706, 1727, 2548, 2886, 2840, + 2547, 2825, 103, 2945, 2546, 2545, 1462, 1459, 3478, 1460, + 1461, 2704, 1727, 2584, 108, 2702, 1727, 3947, 1459, 1462, + 1460, 1461, 2874, 110, 3946, 3926, 3696, 2877, 2450, 3694, + 3693, 1462, 3686, 3567, 109, 2884, 108, 2887, 1462, 3499, + 2956, 3497, 3265, 1462, 94, 2485, 2700, 1727, 1459, 1462, + 1460, 1461, 1848, 1052, 1743, 2898, 2143, 2909, 110, 3488, + 109, 2144, 2912, 2913, 1459, 3685, 1460, 1461, 1459, 109, + 1460, 1461, 2843, 2908, 4160, 4159, 3, 3659, 2824, 3051, + 2639, 1462, 2339, 2916, 1756, 2917, 1748, 4159, 2921, 2922, + 2923, 2698, 1727, 114, 115, 2953, 4160, 2204, 3968, 1459, + 1462, 1460, 1461, 1853, 2696, 1727, 3533, 1459, 2043, 1460, + 1461, 10, 97, 1, 2942, 2943, 2694, 1727, 1462, 2041, + 1020, 1429, 9, 2692, 1727, 1462, 2999, 3000, 2690, 1727, + 2042, 2952, 1462, 8, 2688, 1727, 1428, 3537, 1462, 4091, + 685, 2329, 1462, 1697, 1459, 4131, 1460, 1461, 1462, 4087, + 4088, 70, 1945, 1935, 3599, 2255, 3879, 1459, 3268, 1460, + 1461, 2981, 3016, 1462, 2997, 2978, 2686, 1727, 1462, 1459, + 2491, 1460, 1461, 3565, 1462, 2448, 1459, 2287, 1460, 1461, + 1120, 1459, 154, 1460, 1461, 2684, 1727, 1459, 2407, 1460, + 1461, 1462, 3034, 3035, 3036, 3037, 3038, 3039, 3040, 3041, + 3042, 3043, 2408, 2682, 1727, 1462, 4056, 2319, 118, 1078, + 2677, 1727, 117, 1123, 2954, 1231, 3017, 2673, 1727, 1459, + 2486, 1460, 1461, 2671, 1727, 1728, 2326, 2664, 1727, 3589, + 2891, 2416, 1781, 2662, 1727, 1779, 1780, 1778, 1459, 1783, + 1460, 1461, 1782, 4028, 2856, 2859, 2860, 2861, 2857, 1727, + 2858, 2862, 3337, 2640, 4137, 3434, 1459, 2034, 1460, 1461, + 3980, 2351, 720, 1459, 3053, 1460, 1461, 3109, 2863, 714, + 1459, 2987, 1460, 1461, 2988, 192, 1459, 3855, 1460, 1461, + 1459, 1770, 1460, 1461, 1749, 1462, 1459, 1162, 1460, 1461, + 1694, 3571, 675, 3241, 2998, 2524, 681, 1511, 2025, 3001, + 1462, 1459, 3146, 1460, 1461, 1462, 1459, 3018, 1460, 1461, + 2878, 3116, 1459, 1072, 1460, 1461, 1064, 2340, 2814, 3127, + 1071, 2932, 3118, 3856, 3180, 2385, 3492, 3158, 3044, 1459, + 3160, 1460, 1461, 2830, 3163, 2198, 3089, 2292, 2290, 2292, + 2290, 3156, 1462, 1459, 3961, 1460, 1461, 3179, 3687, 87, + 2387, 4041, 2385, 2385, 2385, 2385, 2385, 2888, 1745, 3454, + 3099, 3100, 3101, 3102, 3103, 2612, 2138, 2462, 1462, 1501, + 3471, 2384, 2385, 3654, 1031, 2385, 2061, 2387, 2387, 2387, + 2387, 2387, 3127, 743, 3117, 3467, 3119, 3184, 1462, 742, + 3433, 1986, 1462, 740, 2816, 2844, 1462, 2387, 1465, 3201, + 2387, 3126, 949, 2804, 1757, 1462, 3139, 3141, 3143, 2855, + 3144, 1462, 3154, 3138, 2853, 2281, 2282, 2283, 2284, 2285, + 3153, 2852, 3151, 1459, 2559, 1460, 1461, 3429, 2392, 3166, + 3167, 3518, 2306, 1462, 3514, 2309, 2310, 4083, 1459, 3285, + 1460, 1461, 1462, 1459, 1033, 1460, 1461, 2386, 2382, 3293, + 3183, 3186, 3187, 3366, 3189, 1032, 3197, 2823, 104, 3203, + 105, 3185, 3204, 1462, 3188, 900, 899, 3205, 1462, 752, + 744, 2327, 1462, 3365, 734, 3211, 1462, 3357, 962, 898, + 1459, 3355, 1460, 1461, 897, 3295, 3296, 3242, 2962, 3244, + 3243, 2902, 3310, 2964, 2889, 3306, 2794, 3152, 1443, 1716, + 1719, 2348, 1091, 3334, 3987, 2583, 1459, 1462, 1460, 1461, + 3363, 1715, 3245, 3246, 3294, 3994, 3266, 3297, 2793, 3276, + 2450, 3298, 3287, 3583, 3169, 3257, 1459, 2789, 1460, 1461, + 1459, 3304, 1460, 1461, 1459, 2938, 1460, 1461, 2478, 69, + 46, 1462, 3175, 1459, 3956, 1460, 1461, 3175, 2788, 1459, + 4025, 1460, 1461, 2787, 892, 3322, 889, 2786, 3325, 3324, + 3656, 2785, 3657, 3658, 3112, 3113, 3332, 4007, 1462, 4008, + 888, 1459, 4009, 1460, 1461, 2193, 1439, 3342, 3339, 3340, + 1459, 3341, 1460, 1461, 3343, 1436, 3345, 4104, 3347, 2036, + 96, 1462, 2784, 36, 35, 34, 3358, 3359, 3360, 3361, + 3362, 1459, 33, 1460, 1461, 32, 1459, 26, 1460, 1461, + 1459, 25, 1460, 1461, 1459, 24, 1460, 1461, 23, 22, + 29, 1514, 1725, 1721, 19, 1514, 2775, 21, 20, 18, + 3279, 4126, 4171, 3267, 123, 55, 52, 1722, 2599, 50, + 131, 3479, 2604, 3481, 130, 1459, 53, 1460, 1461, 49, + 1205, 47, 31, 2774, 3449, 30, 17, 16, 15, 14, + 13, 3453, 2345, 2346, 1724, 2607, 1723, 2608, 12, 11, + 1725, 1721, 7, 2616, 6, 39, 2773, 2618, 2619, 1459, + 38, 1460, 1461, 37, 28, 1722, 2625, 2626, 2627, 2628, + 2629, 2630, 2631, 2632, 2633, 2634, 3333, 2636, 27, 3178, + 40, 4, 2925, 2385, 2480, 3480, 1459, 3482, 1460, 1461, + 1717, 1718, 1724, 3484, 1723, 0, 3531, 3489, 0, 0, + 2642, 2643, 2644, 2645, 2646, 3496, 2648, 0, 2387, 1459, + 2650, 1460, 1461, 1462, 2655, 2656, 3501, 2657, 0, 0, + 2660, 2661, 2663, 2665, 2666, 2667, 2668, 2669, 2670, 2672, + 2674, 2675, 2676, 2678, 732, 2680, 2681, 2683, 2685, 2687, + 2689, 2691, 2693, 2695, 2697, 2699, 2701, 2703, 2705, 2707, + 2709, 2711, 2713, 2715, 2717, 2718, 2719, 3288, 2721, 3523, + 2723, 3294, 2725, 2726, 3297, 2728, 2730, 2732, 3298, 3532, + 3526, 2735, 3525, 3520, 0, 2739, 3548, 1462, 3550, 2744, + 2745, 2746, 2747, 0, 3327, 3328, 1462, 0, 3593, 3594, + 1462, 0, 2758, 2759, 2760, 2761, 2762, 2763, 2772, 1462, + 2767, 2768, 3456, 1462, 3458, 3459, 3460, 3486, 2770, 1462, + 0, 3542, 3543, 2776, 1462, 0, 0, 0, 2779, 2780, + 2781, 2782, 2783, 0, 1462, 0, 0, 0, 0, 2790, + 2791, 0, 2792, 0, 0, 2795, 2797, 2351, 1462, 2799, + 3513, 0, 0, 1051, 1462, 0, 1057, 1057, 0, 2811, + 0, 1459, 0, 1460, 1461, 0, 0, 1462, 0, 3527, + 3528, 1462, 2769, 0, 3576, 0, 3595, 0, 3580, 3581, + 3582, 2764, 1462, 0, 0, 2757, 0, 0, 0, 0, + 0, 0, 3611, 0, 2756, 0, 0, 0, 2755, 0, + 0, 0, 0, 0, 2754, 1462, 0, 0, 0, 2753, + 0, 0, 0, 2620, 0, 1462, 0, 0, 0, 2752, + 0, 1462, 0, 0, 0, 1459, 0, 1460, 1461, 1462, + 2635, 0, 0, 2751, 1459, 0, 1460, 1461, 1459, 2750, + 1460, 1461, 1462, 0, 0, 0, 1462, 1459, 0, 1460, + 1461, 1459, 2749, 1460, 1461, 0, 2743, 1459, 0, 1460, + 1461, 0, 1459, 0, 1460, 1461, 1462, 2742, 3672, 0, + 0, 3679, 1459, 3681, 1460, 1461, 0, 3662, 1462, 3663, + 3664, 3665, 0, 1462, 0, 3652, 1459, 0, 1460, 1461, + 2741, 0, 1459, 1462, 1460, 1461, 3179, 0, 0, 87, + 2738, 3179, 0, 3682, 0, 1459, 2737, 1460, 1461, 1459, + 1462, 1460, 1461, 0, 2736, 1462, 0, 0, 0, 0, + 1459, 0, 1460, 1461, 42, 1462, 0, 2734, 0, 1462, + 2102, 2727, 2100, 3712, 3683, 3616, 0, 0, 0, 3692, + 3691, 0, 3704, 1459, 3702, 1460, 1461, 0, 3699, 0, + 3701, 2724, 0, 1459, 0, 1460, 1461, 0, 0, 1459, + 0, 1460, 1461, 2722, 0, 0, 3862, 1459, 2720, 1460, + 1461, 0, 0, 0, 0, 0, 0, 3716, 2679, 0, + 1459, 0, 1460, 1461, 1459, 0, 1460, 1461, 0, 0, + 3572, 3573, 0, 0, 0, 2659, 0, 0, 0, 0, + 2658, 0, 0, 0, 1459, 0, 1460, 1461, 3854, 3853, + 2654, 0, 0, 0, 2652, 0, 1459, 0, 1460, 1461, + 3869, 1459, 0, 1460, 1461, 0, 3873, 3874, 0, 3881, + 0, 1459, 3852, 1460, 1461, 0, 0, 0, 3920, 3921, + 0, 0, 3029, 3030, 3031, 3032, 3033, 3706, 1459, 0, + 1460, 1461, 0, 1459, 3680, 1460, 1461, 2102, 0, 2100, + 3924, 0, 3048, 1459, 0, 1460, 1461, 1459, 0, 1460, + 1461, 0, 0, 3864, 3865, 3866, 2856, 2859, 2860, 2861, + 2857, 0, 2858, 2862, 0, 0, 3516, 3517, 0, 3175, + 0, 3713, 3714, 3969, 3179, 0, 3708, 0, 3927, 0, + 0, 0, 3930, 0, 0, 0, 0, 0, 1532, 1533, + 1534, 1535, 1536, 1537, 1538, 1539, 1540, 1541, 1542, 1543, + 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1552, 1553, 1554, 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, - 1615, 1616, 1617, 1618, 1620, 1621, 1622, 1623, 1624, 1625, - 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1634, 1635, - 1641, 1642, 1643, 1644, 1658, 1659, 1660, 1661, 1662, 1663, - 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 3921, 3918, - 3903, 3144, 3875, 3900, 3901, 3144, 3902, 1454, 0, 1714, - 1710, 1454, 3935, 0, 0, 0, 1454, 0, 0, 0, - 0, 0, 0, 1454, 1711, 0, 0, 0, 3920, 0, - 87, 0, 0, 0, 3147, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1454, 1707, - 1708, 1713, 3165, 1712, 0, 0, 3924, 0, 0, 0, - 0, 3937, 0, 0, 0, 0, 3940, 0, 0, 3942, - 3812, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1454, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 3909, 2714, 0, 0, 0, 2707, 0, 0, 42, 0, - 2704, 0, 0, 0, 0, 0, 0, 2702, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1455, 3960, 0, 3980, 3961, 0, 0, 0, 87, - 0, 0, 2700, 0, 0, 0, 3926, 0, 0, 0, - 0, 0, 0, 1451, 3969, 1452, 1453, 1451, 0, 1452, - 1453, 1511, 1451, 0, 1452, 1453, 3976, 0, 0, 1451, - 3986, 1452, 1453, 0, 2659, 4011, 0, 3997, 3984, 0, - 3945, 0, 3989, 3994, 3991, 3990, 3988, 3993, 0, 0, - 3297, 3832, 4000, 3992, 1451, 0, 1452, 1453, 0, 0, - 0, 0, 3144, 4021, 0, 0, 0, 42, 0, 0, - 0, 0, 3314, 3315, 4024, 3316, 4042, 3318, 3320, 4032, - 0, 4037, 4066, 4050, 0, 0, 1451, 4011, 1452, 1453, - 4052, 3327, 1783, 0, 4063, 0, 3331, 3332, 3333, 3335, - 3336, 3337, 3338, 3339, 3340, 3341, 3342, 3343, 3344, 3345, - 3346, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, - 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3380, 3382, - 3383, 3384, 3386, 1970, 4067, 3388, 4083, 3390, 3391, 3392, - 4082, 4093, 3396, 3397, 3398, 3399, 3400, 3401, 3402, 3403, - 3404, 3405, 3406, 2084, 4099, 2082, 4096, 4095, 4086, 4097, - 4092, 3413, 4062, 3981, 4011, 3418, 1454, 4107, 0, 3422, - 3423, 0, 3424, 3426, 4115, 3429, 3431, 3141, 3433, 3434, - 3435, 3436, 4123, 4121, 0, 1454, 3442, 0, 0, 1454, - 3949, 0, 0, 0, 1454, 0, 0, 0, 3959, 1454, - 0, 0, 4132, 4133, 3871, 4131, 0, 0, 1454, 0, - 0, 2084, 0, 2082, 4130, 0, 0, 0, 0, 3933, - 0, 3464, 3465, 0, 0, 3469, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1771, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2639, 4080, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2638, - 0, 0, 0, 2634, 0, 0, 0, 0, 2632, 0, - 0, 0, 4058, 2597, 0, 0, 0, 0, 0, 0, - 0, 0, 2586, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1451, 0, 1452, 1453, 0, 1732, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1451, 1784, 1452, 1453, 1451, 0, 1452, 1453, 0, - 1451, 3544, 1452, 1453, 0, 1451, 0, 1452, 1453, 0, - 0, 0, 0, 0, 1451, 1820, 1452, 1453, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3563, 0, 0, 3567, - 0, 0, 0, 0, 1797, 1800, 1801, 1802, 1803, 1804, - 1805, 0, 1806, 1807, 1809, 1810, 1808, 1811, 1812, 1785, - 1786, 1787, 1788, 1769, 1770, 1798, 3578, 1772, 0, 1773, - 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, 0, 0, - 1782, 1789, 1790, 1791, 1792, 0, 1793, 1794, 1795, 1796, - 0, 0, 0, 1690, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 940, 0, 0, 0, 0, 0, 0, 0, 0, - 3601, 0, 0, 1975, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 3609, 0, 0, 0, 0, 0, 0, - 0, 3616, 663, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1009, 0, 0, 0, 0, 195, 0, 0, - 195, 0, 0, 0, 714, 0, 0, 0, 0, 720, + 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, + 1625, 1626, 1627, 1629, 1630, 1631, 1632, 1633, 1634, 1635, + 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1650, + 1651, 1652, 1653, 1667, 1668, 1669, 1670, 1671, 1672, 1673, + 1674, 1675, 1676, 1677, 1678, 1679, 1680, 3925, 3954, 3953, + 3970, 0, 0, 1462, 0, 3944, 0, 1462, 3178, 0, + 0, 3988, 3950, 3178, 3952, 0, 0, 0, 1462, 0, + 0, 0, 0, 0, 0, 1700, 0, 0, 0, 87, + 3181, 0, 3019, 0, 0, 0, 0, 3972, 0, 0, + 3973, 0, 0, 0, 0, 0, 0, 0, 3199, 0, + 0, 0, 0, 0, 42, 3857, 3977, 0, 0, 0, + 3993, 0, 3990, 0, 0, 4124, 0, 0, 0, 0, + 0, 0, 3995, 0, 0, 1798, 0, 0, 3861, 0, + 0, 0, 0, 0, 667, 0, 0, 0, 2617, 0, + 0, 0, 2611, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 2606, 1015, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 4013, 0, 0, 4014, 0, 0, 0, 4038, 0, 0, + 0, 0, 87, 0, 0, 0, 0, 0, 1463, 0, + 0, 1459, 4023, 1460, 1461, 1459, 1086, 1460, 1461, 0, + 0, 0, 0, 0, 0, 4032, 1459, 42, 1460, 1461, + 0, 0, 3091, 0, 3093, 0, 4044, 0, 4069, 1520, + 0, 0, 0, 0, 4055, 4042, 3331, 4047, 4052, 4049, + 3104, 3105, 3106, 3107, 4048, 0, 4046, 3881, 4058, 4051, + 4050, 0, 0, 0, 0, 0, 3974, 4077, 3348, 3349, + 0, 3350, 3352, 3354, 0, 0, 3178, 0, 0, 0, + 0, 0, 0, 0, 4100, 4090, 4095, 4082, 0, 0, + 0, 4108, 1786, 0, 0, 4069, 0, 0, 4110, 3367, + 4121, 0, 0, 0, 3371, 3372, 3373, 3375, 3376, 3377, + 3378, 3379, 3380, 3381, 3382, 3383, 3384, 3385, 3386, 3387, + 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, + 3409, 3411, 3413, 3415, 3417, 3419, 3420, 3422, 3423, 3424, + 3426, 4125, 1986, 3428, 4140, 3430, 3431, 3432, 4141, 4151, + 3436, 3437, 3438, 3439, 3440, 3441, 3442, 3443, 3444, 3445, + 3446, 4155, 2102, 4157, 2100, 4154, 4153, 4144, 4150, 3452, + 4120, 4034, 4069, 3457, 0, 4165, 1799, 3461, 3462, 4039, + 3463, 3465, 4173, 3468, 3470, 3175, 3472, 3473, 3474, 3475, + 4181, 4179, 0, 0, 0, 0, 3483, 0, 0, 0, + 0, 0, 0, 0, 3979, 0, 0, 0, 0, 0, + 4190, 4191, 3921, 4189, 1798, 0, 0, 0, 0, 0, + 2102, 0, 2100, 4188, 0, 0, 0, 0, 0, 0, + 0, 3507, 3508, 3986, 0, 3512, 0, 0, 3998, 1812, + 1815, 1816, 1817, 1818, 1819, 1820, 0, 1821, 1822, 1824, + 1825, 1823, 1826, 1827, 1800, 1801, 1802, 1803, 1784, 1785, + 1813, 0, 1787, 0, 1788, 1789, 1790, 1791, 1792, 1793, + 1794, 1795, 1796, 0, 0, 1797, 1804, 1805, 1806, 1807, + 0, 1808, 1809, 1810, 1811, 0, 0, 0, 0, 0, + 0, 0, 4116, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4033, 0, 1747, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3587, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1786, 0, 0, 0, 1835, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 945, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3606, 0, 0, 3610, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3622, 0, 0, 0, 0, 0, 0, 0, + 0, 195, 0, 0, 195, 0, 0, 0, 718, 0, + 0, 0, 0, 724, 0, 1799, 0, 0, 0, 0, + 0, 0, 0, 0, 195, 0, 0, 0, 4138, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 195, 0, 1010, 0, 2294, 1694, 0, 1011, 0, 0, + 0, 0, 0, 0, 0, 0, 3645, 2101, 0, 0, + 0, 1814, 0, 1991, 0, 724, 195, 724, 0, 3653, + 0, 0, 0, 0, 0, 0, 3660, 0, 1812, 1815, + 1816, 1817, 1818, 1819, 1820, 0, 1821, 1822, 1824, 1825, + 1823, 1826, 1827, 1800, 1801, 1802, 1803, 1784, 1785, 1813, + 0, 1787, 0, 1788, 1789, 1790, 1791, 1792, 1793, 1794, + 1795, 1796, 0, 0, 1797, 1804, 1805, 1806, 1807, 0, + 1808, 1809, 1810, 1811, 0, 0, 0, 0, 0, 0, + 1206, 0, 1212, 968, 969, 970, 971, 972, 973, 974, + 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, + 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, + 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, + 1005, 1006, 1007, 1008, 1009, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1435, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3870, 0, 0, 0, 0, + 0, 0, 0, 0, 3877, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3887, 3888, 3889, 0, 3891, 0, + 3892, 3893, 0, 0, 0, 3896, 3897, 3898, 3899, 3900, + 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, + 3911, 3912, 3913, 3914, 3915, 3916, 3917, 0, 3919, 3922, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3931, 3932, 3933, 3934, 3935, 3937, + 3938, 3940, 3942, 3943, 3945, 3643, 0, 0, 3949, 0, + 0, 0, 3951, 2056, 2057, 2058, 2059, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2069, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1814, 0, 0, 0, 0, 0, 0, 3978, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 2108, 2109, 0, 0, 0, 0, 2132, + 0, 0, 2136, 2137, 0, 0, 0, 2142, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, + 2162, 2163, 0, 2165, 0, 0, 0, 2187, 2188, 2189, + 2190, 2191, 2192, 2194, 0, 2199, 0, 2201, 2202, 2203, + 0, 2205, 2206, 2207, 0, 2209, 2210, 2211, 2212, 2213, + 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, 2222, 2223, + 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, 2232, 2233, + 2234, 2235, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, + 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, + 2254, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, + 2267, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, + 2277, 2278, 2279, 2280, 0, 0, 0, 0, 0, 2286, + 0, 2288, 0, 2295, 2296, 2297, 2298, 2299, 2300, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2311, 2312, 2313, 2314, 2315, 2316, 2317, 2318, + 0, 2320, 2321, 2322, 2323, 2324, 1759, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 4003, 0, 0, 1776, 0, 0, 0, 0, + 0, 0, 3960, 0, 0, 0, 0, 0, 0, 0, + 0, 1057, 0, 0, 0, 0, 4018, 0, 0, 0, + 0, 0, 4021, 0, 4022, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2365, 2366, + 0, 0, 0, 0, 0, 0, 0, 4037, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 195, 0, 195, 0, 2404, 0, 0, 0, 0, 0, + 0, 0, 0, 4063, 4064, 0, 0, 0, 1915, 0, + 0, 0, 0, 0, 0, 0, 0, 4071, 4073, 4075, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 724, + 0, 724, 724, 0, 0, 4081, 0, 0, 0, 0, + 0, 0, 0, 1960, 0, 0, 0, 4103, 0, 0, + 0, 724, 195, 0, 0, 2446, 0, 0, 0, 0, + 1987, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1998, 0, 0, 0, + 1506, 190, 0, 2002, 0, 4122, 0, 0, 0, 0, + 0, 0, 2931, 0, 2013, 2014, 2015, 2016, 2017, 2018, + 2019, 0, 0, 0, 0, 129, 0, 151, 1694, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 172, 4145, + 4147, 4149, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1010, 0, 0, 0, 162, + 1011, 0, 4170, 0, 0, 150, 0, 0, 0, 0, + 2101, 0, 0, 0, 4002, 0, 0, 0, 0, 0, + 4182, 4183, 4012, 0, 169, 0, 0, 170, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1856, 1857, 161, + 160, 189, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1694, 0, 968, 969, 970, 971, + 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, + 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, + 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 0, 0, + 0, 1506, 0, 0, 0, 0, 0, 0, 0, 0, + 2049, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 155, 1858, 158, 0, 1855, 0, 156, 157, + 0, 0, 0, 0, 0, 173, 0, 0, 0, 0, + 0, 0, 0, 0, 179, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 195, + 0, 0, 0, 724, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1079, 0, 195, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 720, 195, 720, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3821, 0, 0, 0, - 0, 0, 0, 0, 0, 3828, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1799, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 3838, 3839, 0, 3841, 0, - 3842, 3843, 0, 0, 0, 3846, 3847, 3848, 3849, 3850, - 3851, 3852, 3853, 3854, 3855, 3856, 3857, 3858, 3859, 3860, - 3861, 3862, 3863, 3864, 3865, 3866, 3867, 0, 3869, 3872, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3881, 3882, 3883, 3884, 3885, 3887, - 3888, 3890, 3892, 3893, 3895, 0, 0, 0, 0, 0, - 0, 0, 2040, 2041, 2042, 2043, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2051, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3925, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 2090, 2091, 0, 0, 0, 0, 2114, 1050, - 1050, 2118, 0, 0, 0, 2123, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2135, 2136, 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, - 0, 2146, 0, 0, 0, 2168, 2169, 2170, 2171, 2172, - 2173, 2175, 0, 2180, 0, 2182, 2183, 2184, 0, 2186, - 2187, 2188, 0, 2191, 2192, 2193, 2194, 2195, 2196, 2197, - 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, - 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, - 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, - 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, 2240, - 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, - 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, - 2261, 2262, 0, 0, 0, 0, 1783, 2268, 0, 2270, - 0, 2277, 2278, 2279, 2280, 2281, 2282, 1050, 0, 1050, - 1050, 1050, 1050, 1050, 0, 0, 0, 0, 0, 0, - 2294, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 0, 2303, - 2304, 2305, 2306, 2307, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3950, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1050, 0, - 3966, 0, 0, 0, 0, 0, 3967, 3968, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2345, 2346, 0, 0, 0, 0, 0, 0, 3979, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2384, 0, 0, 0, - 0, 0, 0, 0, 4005, 4006, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 190, 0, 0, 4013, 4015, - 4017, 0, 1771, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 129, - 0, 151, 0, 4045, 0, 0, 0, 0, 0, 0, - 0, 0, 172, 0, 0, 0, 0, 2426, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1199, 0, 1205, 0, 0, 0, - 0, 4064, 0, 162, 0, 0, 0, 0, 0, 150, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 195, 0, 195, 0, 1784, 0, 169, 0, - 0, 170, 0, 0, 0, 4087, 4089, 4091, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 138, 139, 161, 160, 189, 0, 1427, 0, 0, 0, - 0, 720, 0, 720, 720, 0, 0, 0, 4112, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 720, 195, 0, 4124, 4125, 1797, 1800, - 1801, 1802, 1803, 1804, 1805, 0, 1806, 1807, 1809, 1810, - 1808, 1811, 1812, 1785, 1786, 1787, 1788, 1769, 1770, 1798, - 0, 1772, 1498, 1773, 1774, 1775, 1776, 1777, 1778, 1779, - 1780, 1781, 0, 0, 1782, 1789, 1790, 1791, 1792, 0, - 1793, 1794, 1795, 1796, 957, 0, 2276, 0, 0, 958, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2083, - 0, 0, 0, 0, 0, 155, 136, 158, 143, 135, - 0, 156, 157, 0, 0, 0, 0, 0, 173, 0, - 0, 0, 0, 0, 0, 0, 0, 179, 144, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 147, 145, 140, 141, 142, 146, 0, 0, - 0, 0, 0, 0, 137, 0, 0, 0, 0, 0, - 0, 0, 0, 148, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 964, 965, 966, 967, 968, 969, - 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, - 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, - 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, - 1000, 1001, 1002, 1003, 1004, 1005, 0, 0, 0, 0, + 0, 0, 195, 0, 0, 2615, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 2621, 2622, 2623, 2624, 0, + 0, 0, 0, 724, 0, 0, 195, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 724, 0, + 0, 0, 0, 0, 0, 195, 0, 0, 0, 724, + 0, 0, 0, 0, 0, 0, 0, 0, 1520, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2595, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2601, 2602, 2603, 2604, 0, 0, 0, - 0, 164, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1511, 0, 0, 1799, + 0, 0, 724, 0, 724, 0, 0, 0, 0, 164, + 0, 0, 724, 0, 0, 1506, 724, 0, 0, 724, + 724, 724, 724, 0, 724, 0, 724, 724, 0, 724, + 724, 724, 724, 724, 724, 0, 0, 0, 0, 0, + 0, 0, 1506, 724, 724, 1506, 724, 1506, 195, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 195, - 0, 0, 0, 720, 720, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1744, 0, 0, - 0, 0, 195, 0, 0, 0, 0, 159, 0, 0, - 0, 0, 0, 0, 0, 0, 1761, 0, 0, 0, - 0, 0, 720, 0, 0, 195, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 720, 0, 0, - 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 720, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1498, 0, 0, 0, 0, 1900, - 720, 720, 0, 720, 0, 720, 720, 0, 720, 720, - 720, 720, 720, 720, 0, 152, 0, 0, 153, 1732, - 0, 1498, 0, 0, 1498, 720, 1498, 195, 0, 0, - 0, 0, 0, 0, 1945, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 195, 165, 0, - 0, 1971, 0, 0, 0, 177, 0, 0, 0, 0, - 720, 0, 195, 0, 0, 0, 0, 1982, 0, 0, - 0, 0, 0, 0, 1986, 0, 720, 0, 195, 195, - 0, 0, 0, 0, 0, 1997, 1998, 1999, 2000, 2001, - 2002, 2003, 0, 0, 0, 195, 185, 0, 0, 0, - 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, - 0, 195, 195, 195, 195, 195, 195, 195, 195, 195, - 720, 0, 0, 0, 0, 0, 0, 0, 939, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, + 0, 724, 0, 195, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 724, 0, 0, 724, 0, 195, + 195, 0, 0, 0, 0, 159, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 195, 0, 0, 0, + 0, 0, 0, 195, 0, 0, 0, 0, 0, 0, + 0, 0, 195, 195, 195, 195, 195, 195, 195, 195, + 195, 724, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1747, 0, 2371, 0, 0, 0, 0, 0, + 0, 0, 2375, 0, 2378, 0, 0, 2049, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 152, 0, 0, 153, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 165, 0, + 0, 0, 0, 0, 0, 177, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 185, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 724, 724, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 724, 0, 0, 0, 0, 0, 0, 0, 0, + 195, 0, 0, 0, 0, 0, 0, 0, 0, 166, 171, 168, 174, 175, 176, 178, 180, 181, 182, 183, 0, 0, 0, 0, 0, 184, 186, 187, 188, 0, + 0, 0, 0, 0, 0, 2049, 0, 0, 0, 0, + 2994, 0, 2536, 0, 0, 0, 0, 0, 0, 724, + 0, 0, 2555, 2556, 0, 0, 2560, 0, 0, 1506, + 0, 0, 0, 0, 3020, 3021, 3022, 0, 2565, 3024, + 0, 0, 3026, 0, 0, 2568, 0, 1506, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3045, 3046, 3047, 0, 0, 0, 0, 0, + 0, 2571, 0, 0, 3052, 0, 0, 3054, 3055, 3056, + 0, 0, 0, 3057, 3058, 0, 0, 3059, 0, 3060, + 0, 0, 0, 0, 0, 0, 3061, 0, 3062, 0, + 0, 0, 3063, 0, 3064, 0, 0, 3065, 0, 3066, + 0, 3067, 0, 3068, 0, 3069, 0, 3070, 0, 3071, + 0, 3072, 0, 3073, 0, 3074, 0, 3075, 0, 3076, + 0, 3077, 0, 3078, 0, 3079, 0, 3080, 0, 3081, + 0, 3082, 0, 0, 0, 3083, 0, 3084, 0, 3085, + 0, 0, 3086, 0, 3087, 0, 3088, 0, 2258, 3090, + 0, 0, 3092, 0, 0, 3094, 3095, 3096, 3097, 0, + 0, 0, 0, 3098, 2258, 2258, 2258, 2258, 2258, 0, + 0, 2305, 0, 0, 0, 0, 0, 0, 0, 3108, + 0, 0, 0, 0, 0, 0, 0, 3121, 0, 0, + 3125, 0, 0, 0, 0, 0, 0, 0, 0, 3128, + 3129, 3130, 3131, 3132, 3133, 0, 0, 0, 3134, 3135, + 0, 3136, 0, 3137, 0, 0, 195, 0, 0, 0, + 0, 724, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1057, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, + 724, 0, 0, 0, 0, 0, 0, 0, 3170, 0, + 0, 0, 0, 0, 195, 0, 0, 0, 724, 0, + 0, 2305, 195, 0, 195, 0, 195, 195, 0, 0, + 0, 0, 0, 3200, 0, 0, 0, 0, 0, 0, + 0, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 699, 0, 0, 0, 0, 0, 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 3263, 0, 0, 0, 724, 0, + 0, 0, 0, 0, 0, 724, 724, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 724, 0, 0, 0, 0, + 0, 724, 724, 944, 0, 724, 0, 724, 0, 0, + 0, 0, 0, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2974, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 719, 0, - 719, 0, 0, 0, 0, 0, 0, 0, 0, 1050, - 0, 0, 3001, 3002, 0, 0, 3004, 0, 0, 3006, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 2033, 0, 0, 0, 0, 0, 720, 720, 0, 3013, - 3014, 3015, 0, 0, 0, 0, 0, 0, 0, 720, - 0, 3020, 0, 0, 3022, 3023, 3024, 0, 195, 0, - 3025, 3026, 0, 0, 3027, 0, 3028, 0, 0, 0, - 0, 0, 0, 3029, 0, 3030, 0, 0, 0, 3031, - 0, 3032, 0, 0, 3033, 0, 3034, 0, 3035, 0, - 3036, 0, 3037, 0, 3038, 0, 3039, 0, 3040, 0, - 3041, 0, 3042, 0, 3043, 0, 3044, 720, 3045, 0, - 3046, 0, 3047, 0, 3048, 0, 3049, 1498, 3050, 0, - 0, 0, 3051, 0, 3052, 0, 3053, 0, 0, 3054, - 0, 3055, 0, 3056, 1498, 2240, 3058, 0, 0, 3060, - 0, 0, 3062, 3063, 3064, 3065, 0, 0, 0, 0, - 3066, 2240, 2240, 2240, 2240, 2240, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3076, 0, 0, 0, - 0, 0, 0, 0, 3089, 0, 0, 3093, 0, 1050, - 0, 0, 0, 0, 0, 0, 3096, 3097, 3098, 3099, - 3100, 3101, 0, 0, 0, 3102, 3103, 0, 3104, 0, - 3105, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 2869, 0, 724, 0, + 0, 0, 0, 724, 0, 0, 0, 724, 724, 0, + 3356, 0, 0, 0, 0, 703, 0, 0, 0, 0, + 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3370, 0, 0, 0, + 0, 0, 0, 0, 0, 195, 0, 0, 0, 0, + 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, + 190, 0, 195, 195, 0, 0, 195, 0, 195, 0, + 2918, 0, 0, 723, 0, 723, 0, 0, 195, 0, + 0, 0, 0, 0, 129, 195, 151, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 172, 0, 0, + 0, 0, 0, 0, 0, 0, 901, 0, 0, 0, + 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, + 724, 0, 0, 0, 0, 0, 0, 0, 162, 0, + 0, 0, 0, 0, 150, 0, 2967, 2968, 2969, 2970, + 2971, 2972, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 169, 0, 0, 170, 0, 0, 0, + 0, 0, 0, 2049, 2982, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 722, 0, 138, 139, 161, 160, + 189, 0, 0, 0, 0, 0, 2990, 0, 0, 0, + 0, 1506, 0, 2305, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1082, 0, 1089, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3568, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 3592, 0, + 0, 155, 136, 158, 143, 135, 0, 156, 157, 0, + 0, 0, 0, 0, 173, 0, 0, 0, 0, 0, + 0, 0, 0, 179, 144, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 147, 145, + 140, 141, 142, 146, 0, 0, 0, 0, 0, 0, + 137, 0, 0, 0, 0, 0, 3612, 0, 3613, 148, + 3614, 0, 3615, 0, 0, 0, 0, 0, 0, 0, + 3618, 3619, 0, 0, 0, 0, 0, 0, 0, 3623, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3624, 0, 3625, 0, 3626, 0, 3627, + 0, 3628, 0, 3629, 0, 3630, 0, 3631, 0, 3632, + 0, 3633, 0, 3634, 0, 3635, 0, 3636, 0, 3637, + 0, 3638, 0, 3639, 0, 0, 3640, 0, 0, 0, + 3641, 0, 3642, 0, 195, 0, 0, 0, 3644, 0, + 0, 0, 195, 0, 0, 0, 0, 0, 164, 0, + 0, 0, 0, 724, 0, 0, 0, 0, 0, 0, + 3661, 0, 0, 0, 0, 0, 0, 724, 0, 3666, + 0, 3667, 3668, 0, 3669, 0, 3670, 0, 0, 0, + 0, 3671, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 195, 0, 0, 0, 0, 195, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 3700, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3709, 0, 0, 3711, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3715, 0, 0, 0, + 0, 0, 0, 0, 159, 0, 3253, 0, 0, 0, + 0, 0, 3849, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 724, 0, 0, 0, 0, 0, + 195, 0, 3291, 0, 0, 0, 0, 195, 0, 0, + 0, 0, 0, 0, 0, 0, 3305, 0, 0, 0, + 0, 724, 0, 0, 0, 0, 0, 0, 724, 0, + 0, 0, 724, 724, 0, 0, 3323, 724, 0, 3326, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1506, 724, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 195, 195, 195, 195, + 195, 195, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 152, 0, 0, 153, 0, 0, 0, + 0, 0, 0, 195, 195, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 723, 1422, 723, + 723, 0, 0, 0, 0, 0, 195, 165, 0, 0, + 3959, 0, 0, 0, 177, 0, 0, 0, 0, 723, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 724, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1505, 0, + 0, 0, 0, 0, 0, 185, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 724, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3485, 0, 0, 0, 0, 0, 166, 171, + 168, 174, 175, 176, 178, 180, 181, 182, 183, 0, + 0, 0, 0, 0, 184, 186, 187, 188, 0, 0, + 190, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1852, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 129, 0, 151, 0, 0, 0, + 1270, 0, 1270, 1270, 0, 0, 0, 172, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1434, 0, 0, 0, 0, 3546, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 162, 0, + 0, 0, 724, 0, 150, 0, 0, 3561, 0, 0, + 3562, 3563, 3564, 0, 724, 0, 0, 0, 0, 0, + 0, 0, 0, 169, 0, 4001, 170, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 724, 1856, 1857, 161, 160, + 189, 0, 0, 0, 0, 0, 0, 0, 0, 1505, + 195, 0, 0, 724, 0, 0, 0, 0, 0, 0, + 0, 4015, 0, 0, 4016, 0, 4017, 724, 0, 0, + 0, 1506, 0, 0, 724, 724, 1506, 195, 195, 195, + 195, 195, 0, 0, 0, 0, 0, 0, 0, 195, + 0, 0, 0, 0, 0, 195, 0, 195, 0, 0, + 195, 195, 195, 0, 0, 0, 0, 0, 0, 0, + 0, 723, 723, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 155, 1858, 158, 0, 1855, 195, 156, 157, 0, + 0, 723, 0, 0, 173, 0, 0, 0, 0, 724, + 0, 0, 1506, 179, 0, 0, 723, 724, 4101, 0, + 0, 0, 195, 0, 0, 0, 0, 723, 0, 0, + 0, 0, 0, 0, 0, 0, 195, 723, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4117, 0, 4118, + 0, 4119, 0, 0, 0, 0, 195, 0, 0, 195, + 723, 0, 723, 0, 0, 0, 0, 0, 0, 0, + 723, 0, 0, 1505, 723, 0, 0, 723, 723, 723, + 723, 0, 723, 0, 723, 723, 0, 723, 723, 723, + 723, 723, 723, 0, 0, 0, 0, 0, 0, 0, + 1505, 723, 723, 1505, 723, 1505, 0, 723, 0, 0, + 0, 0, 0, 0, 1703, 1704, 0, 0, 0, 0, + 0, 4168, 0, 4169, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 164, 723, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 723, 0, 1753, 723, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1771, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1829, 0, 0, 0, 0, 724, 0, 0, 0, 0, + 1838, 0, 0, 0, 0, 0, 0, 0, 0, 723, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1082, 0, 1864, 0, 0, 0, 0, + 0, 0, 195, 1873, 0, 0, 0, 1875, 0, 0, + 1878, 1879, 1881, 1881, 159, 1881, 0, 1881, 1881, 0, + 1890, 1881, 1881, 1881, 1881, 1881, 0, 0, 0, 0, + 0, 0, 0, 0, 1910, 1911, 0, 1082, 0, 0, + 1916, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 195, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1958, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1979, 0, 195, 1983, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, + 195, 195, 195, 0, 0, 0, 0, 0, 0, 0, + 724, 724, 0, 152, 0, 0, 153, 0, 0, 0, + 0, 0, 1270, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 723, 723, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 165, 0, 723, + 0, 0, 0, 0, 177, 0, 0, 0, 0, 724, + 724, 724, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 3136, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 185, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 723, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1505, 0, 0, + 0, 0, 0, 0, 902, 0, 2110, 0, 0, 0, + 0, 0, 0, 0, 0, 1505, 0, 0, 166, 171, + 168, 174, 175, 176, 178, 180, 181, 182, 183, 3999, + 0, 0, 0, 0, 184, 186, 187, 188, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2288, 0, - 3166, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 957, 0, 0, 0, 0, 958, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2083, 0, - 0, 0, 195, 0, 0, 0, 0, 720, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2351, 0, 0, - 0, 3229, 0, 0, 0, 2355, 0, 2358, 0, 0, - 2033, 0, 195, 0, 0, 720, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 195, 0, 0, 0, 720, - 0, 0, 2288, 195, 0, 195, 0, 195, 195, 0, + 193, 0, 0, 668, 0, 0, 0, 0, 0, 1270, + 1270, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2037, 668, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1038, + 0, 0, 0, 0, 0, 724, 0, 724, 0, 195, + 0, 0, 0, 0, 0, 0, 1058, 1058, 0, 0, + 0, 0, 0, 0, 0, 668, 0, 0, 1506, 0, + 0, 0, 195, 0, 0, 724, 0, 724, 0, 0, + 2096, 0, 0, 0, 0, 0, 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 720, 964, 965, 966, 967, 968, 969, 970, - 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, - 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, - 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, - 1001, 1002, 1003, 1004, 1005, 0, 0, 3321, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 720, - 0, 3330, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 720, 0, 0, 0, - 0, 0, 720, 0, 0, 190, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1837, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 129, - 0, 151, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 172, 0, 0, 0, 0, 0, 0, 720, - 0, 0, 0, 0, 720, 0, 0, 0, 720, 720, - 0, 0, 0, 0, 0, 0, 0, 0, 2033, 0, - 0, 0, 0, 162, 0, 2516, 0, 0, 0, 150, - 0, 0, 0, 0, 2533, 2534, 0, 0, 2538, 0, - 0, 0, 0, 0, 0, 0, 195, 0, 169, 0, - 2543, 170, 0, 195, 0, 0, 0, 2546, 719, 1414, - 719, 719, 195, 195, 0, 0, 195, 0, 195, 0, - 1841, 1842, 161, 160, 189, 0, 0, 0, 195, 0, - 719, 0, 0, 2549, 0, 195, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1497, - 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, - 720, 0, 0, 0, 0, 0, 0, 0, 3525, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3549, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 155, 1843, 158, 0, 1840, - 0, 156, 157, 0, 0, 0, 0, 0, 173, 1498, - 0, 2288, 0, 0, 0, 0, 0, 179, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3569, - 0, 3570, 0, 0, 3571, 0, 0, 3574, 3575, 0, - 0, 0, 0, 0, 0, 0, 3579, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 897, 0, 0, - 3580, 0, 3581, 0, 3582, 0, 3583, 0, 3584, 0, - 3585, 0, 3586, 0, 3587, 0, 3588, 0, 3589, 0, - 3590, 0, 3591, 0, 3592, 0, 3593, 0, 3594, 0, - 3595, 0, 0, 3596, 0, 0, 0, 3597, 0, 3598, - 0, 0, 0, 0, 0, 3600, 0, 0, 0, 0, - 0, 0, 0, 193, 0, 0, 664, 0, 0, 1497, - 0, 0, 0, 0, 0, 0, 0, 0, 3617, 0, - 0, 164, 0, 0, 0, 0, 664, 3622, 0, 3623, - 3624, 0, 3625, 0, 3626, 0, 0, 0, 0, 3627, - 0, 0, 1032, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1051, - 1051, 0, 0, 0, 3652, 0, 0, 0, 664, 0, - 719, 719, 0, 0, 0, 3660, 0, 0, 3662, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 3666, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3800, 0, 0, 719, - 0, 0, 0, 195, 0, 0, 0, 159, 0, 0, - 0, 195, 0, 0, 719, 0, 0, 0, 0, 0, - 0, 0, 720, 0, 0, 1814, 0, 0, 0, 0, - 0, 0, 0, 720, 2849, 1823, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 195, 719, 0, - 1849, 0, 195, 0, 0, 0, 0, 0, 1858, 0, - 0, 1497, 1860, 0, 0, 1863, 1864, 719, 719, 0, - 719, 0, 719, 719, 0, 719, 719, 719, 719, 719, - 719, 0, 0, 0, 0, 0, 0, 0, 1497, 1895, - 1896, 1497, 719, 1497, 0, 1901, 0, 0, 2898, 0, - 0, 0, 0, 0, 896, 152, 0, 0, 153, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3908, - 720, 0, 0, 0, 0, 0, 195, 719, 0, 0, - 0, 0, 0, 195, 0, 0, 0, 0, 165, 0, - 1963, 0, 0, 719, 0, 177, 0, 720, 0, 0, - 0, 0, 0, 0, 720, 0, 0, 0, 0, 0, - 0, 0, 0, 720, 2947, 2948, 2949, 2950, 2951, 2952, - 0, 0, 718, 0, 0, 0, 0, 0, 0, 1498, - 0, 0, 0, 0, 0, 0, 185, 719, 0, 2033, - 2962, 0, 195, 195, 195, 195, 195, 195, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2970, 0, 0, 195, 195, 0, - 0, 0, 0, 0, 1075, 0, 1082, 0, 0, 166, - 171, 168, 174, 175, 176, 178, 180, 181, 182, 183, - 0, 0, 195, 0, 0, 184, 186, 187, 188, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 720, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 724, 0, 0, 0, 723, + 0, 0, 0, 0, 0, 0, 0, 0, 195, 0, + 0, 724, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 724, 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 723, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 720, 0, 0, 0, 0, 0, 0, - 0, 0, 3948, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 719, 719, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 719, 0, 3962, 0, - 0, 3963, 0, 3964, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 724, 0, + 0, 0, 1270, 0, 0, 724, 0, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 723, 0, 0, 0, + 0, 0, 0, 723, 723, 723, 724, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2342, 723, 0, 0, 0, 0, 0, 723, + 723, 0, 0, 723, 0, 723, 0, 0, 0, 0, + 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 2355, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1753, + 0, 0, 1270, 0, 0, 0, 723, 0, 0, 0, + 0, 723, 0, 0, 0, 723, 723, 0, 0, 0, + 0, 0, 1082, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 719, 0, 0, 0, 720, 0, - 0, 0, 0, 0, 1497, 0, 0, 0, 0, 0, - 720, 0, 0, 2092, 0, 0, 0, 0, 0, 0, - 0, 1497, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 4043, 0, 0, - 0, 720, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 195, 0, 0, 664, 720, - 664, 0, 0, 0, 0, 0, 4059, 0, 4060, 0, - 4061, 0, 0, 720, 0, 0, 0, 1498, 0, 0, - 720, 720, 1498, 195, 195, 195, 195, 195, 0, 0, - 0, 0, 0, 0, 0, 195, 0, 0, 0, 0, - 0, 195, 0, 195, 0, 0, 195, 195, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 664, 0, 0, 0, 3219, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 4110, 0, 4111, 0, 0, 0, 0, 0, 1499, 0, - 3257, 0, 195, 0, 0, 719, 0, 0, 0, 0, - 0, 0, 0, 0, 3271, 720, 0, 0, 1498, 0, - 0, 0, 0, 720, 0, 0, 0, 0, 195, 0, - 0, 0, 0, 0, 3289, 0, 0, 3292, 0, 0, - 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 719, 0, 0, 0, 0, 0, - 0, 0, 195, 0, 0, 195, 0, 0, 0, 0, + 0, 0, 0, 0, 724, 0, 0, 0, 0, 1089, + 0, 0, 0, 0, 0, 0, 2468, 2469, 2470, 0, + 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1082, 0, 724, 195, + 0, 0, 1089, 1873, 0, 0, 1873, 0, 1873, 0, + 0, 0, 0, 0, 2500, 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 719, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 719, 0, 0, 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 719, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1082, + 0, 0, 0, 0, 2096, 0, 0, 0, 2096, 2096, + 0, 0, 0, 0, 724, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 724, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1506, 724, 0, 724, 0, 1505, + 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 724, 2305, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 195, 724, 0, 0, 0, 0, 668, + 0, 668, 0, 0, 0, 95, 0, 0, 1010, 0, + 0, 2574, 950, 1011, 963, 964, 965, 951, 0, 0, + 952, 953, 0, 954, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 724, 0, 0, 959, 0, 966, + 967, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 724, 0, + 0, 668, 0, 195, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 724, 0, 724, + 0, 0, 0, 0, 1270, 0, 0, 3299, 3300, 1507, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 968, + 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, + 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, + 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, + 1009, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 719, 0, 0, 720, - 0, 0, 0, 2448, 2449, 2450, 3444, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1499, 0, - 0, 0, 0, 719, 0, 0, 0, 0, 0, 719, - 1858, 0, 0, 1858, 195, 1858, 0, 0, 0, 0, - 0, 2480, 0, 0, 1262, 0, 1262, 1262, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1426, 0, 0, 0, - 0, 0, 0, 0, 0, 664, 719, 0, 0, 0, - 0, 719, 0, 0, 0, 719, 719, 0, 0, 0, - 195, 3503, 0, 0, 0, 0, 0, 0, 1032, 0, + 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3301, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3518, 0, 0, 3519, 3520, 3521, 0, 0, 195, - 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 195, - 664, 0, 195, 195, 195, 0, 0, 0, 0, 0, - 0, 0, 720, 720, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1499, 0, 0, 0, 0, 0, 0, 719, 0, 0, - 0, 720, 720, 720, 720, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1499, 0, 0, - 1499, 0, 1499, 664, 0, 0, 0, 0, 0, 0, + 2882, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1917, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 664, 0, - 0, 0, 0, 0, 0, 0, 1497, 0, 719, 0, - 0, 0, 0, 0, 1969, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 664, 0, 0, 0, 0, 0, 0, 664, 0, - 0, 0, 0, 0, 0, 0, 0, 1995, 1996, 664, - 664, 664, 664, 664, 664, 664, 0, 0, 0, 0, + 3302, 3303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, + 1507, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 723, + 0, 0, 0, 0, 0, 0, 723, 0, 0, 0, + 723, 723, 0, 0, 2817, 723, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2832, 0, + 0, 1505, 723, 0, 915, 0, 0, 0, 668, 0, + 919, 0, 0, 0, 916, 917, 0, 0, 0, 918, + 920, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1038, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1693, 1694, 0, 0, - 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 668, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 723, 0, 0, + 0, 0, 0, 0, 0, 2914, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1498, 0, 0, 0, 0, 720, 0, - 720, 0, 0, 0, 0, 1738, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1756, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2355, 0, 1507, 0, 0, 0, 0, 2939, + 0, 0, 0, 1873, 1873, 723, 0, 0, 2944, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1507, 0, 0, 1507, 2955, 1507, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 720, 0, - 0, 0, 0, 0, 1075, 0, 0, 0, 0, 0, - 0, 195, 0, 0, 720, 0, 0, 0, 0, 0, - 0, 0, 0, 1866, 1866, 0, 1866, 720, 1866, 1866, - 0, 1875, 1866, 1866, 1866, 1866, 1866, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1075, 0, - 0, 0, 0, 0, 664, 0, 0, 0, 0, 719, + 0, 0, 0, 0, 0, 0, 0, 1932, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1943, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1967, - 720, 0, 0, 0, 720, 720, 0, 0, 0, 0, - 0, 0, 0, 1499, 0, 2862, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1051, 1051, 0, 0, 0, - 1499, 0, 0, 720, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1262, 0, 0, 0, 0, 0, 0, + 0, 0, 668, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1985, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 719, 0, 0, + 0, 0, 0, 0, 0, 668, 0, 0, 0, 0, + 0, 0, 668, 0, 0, 0, 0, 0, 0, 0, + 2096, 2011, 2012, 668, 668, 668, 668, 668, 668, 668, + 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 2096, 0, + 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 719, 0, 0, 0, 0, 0, - 0, 719, 0, 0, 0, 1858, 1858, 0, 0, 0, - 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1497, 2935, 0, 0, + 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 723, 0, 0, 0, 1505, + 0, 0, 723, 723, 1505, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1051, 1969, 1051, 1051, 1051, 1051, 1051, - 0, 720, 0, 0, 0, 0, 0, 0, 3946, 0, - 0, 0, 0, 0, 0, 0, 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 720, 195, 0, 1917, 1262, - 1262, 0, 719, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 2021, 0, 1051, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1032, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 664, 0, 0, 0, 0, 0, 0, 1969, 664, - 719, 664, 0, 664, 2374, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 720, - 2078, 0, 0, 0, 0, 0, 0, 0, 0, 1498, - 0, 720, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3248, 0, 0, 0, 0, + 0, 0, 0, 3110, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1270, 0, 723, 0, 0, + 1505, 0, 0, 0, 0, 723, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 668, + 0, 0, 0, 0, 0, 0, 1881, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 720, 2288, 0, 0, 0, + 0, 0, 0, 0, 3155, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 3330, 1270, 0, + 0, 0, 0, 0, 0, 3182, 1881, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1507, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 720, 0, 0, + 0, 0, 0, 0, 0, 0, 1507, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 195, 720, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 719, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 719, 0, 0, - 0, 0, 0, 0, 720, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 720, 0, 719, 0, - 0, 195, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 720, 719, 720, 0, 0, - 0, 1262, 0, 0, 0, 0, 0, 0, 0, 0, - 719, 0, 0, 0, 1497, 0, 0, 719, 719, 1497, - 0, 0, 664, 0, 0, 0, 0, 0, 0, 664, - 0, 0, 0, 0, 0, 0, 0, 0, 664, 664, - 0, 0, 664, 0, 2540, 0, 0, 0, 0, 0, - 2325, 0, 0, 0, 664, 0, 0, 0, 0, 0, - 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2337, 0, - 3214, 0, 0, 0, 0, 0, 0, 664, 0, 0, - 0, 0, 1738, 0, 0, 1262, 0, 0, 0, 0, - 0, 0, 719, 0, 0, 1497, 0, 0, 0, 0, - 719, 0, 0, 95, 0, 1075, 957, 0, 0, 0, - 945, 958, 959, 960, 961, 946, 0, 0, 947, 948, - 0, 949, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 954, 962, 963, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3296, 0, 0, 1499, 0, 1969, 0, 0, - 0, 0, 1082, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3265, 3266, 0, 0, 0, 1075, - 0, 0, 0, 0, 0, 1082, 964, 965, 966, 967, - 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, - 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, - 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, - 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 0, 0, - 0, 0, 1075, 0, 0, 0, 0, 2078, 0, 0, - 0, 2078, 2078, 0, 0, 0, 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3267, + 1082, 0, 0, 0, 0, 0, 0, 0, 2355, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1985, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3268, 3269, 0, 0, - 0, 0, 0, 2552, 0, 3493, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 664, - 0, 0, 0, 0, 0, 0, 0, 1917, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 719, - 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 664, 1262, 0, 0, 0, 664, 0, - 910, 0, 0, 0, 0, 0, 914, 0, 0, 0, - 911, 912, 0, 0, 0, 913, 915, 0, 719, 719, - 719, 719, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1932, 0, 0, 0, 0, + 0, 0, 0, 0, 3536, 0, 0, 0, 0, 0, + 0, 1058, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1038, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 668, 0, 0, 1829, 0, 723, 723, + 1985, 668, 0, 668, 0, 668, 2394, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 664, 0, 0, 0, 0, 0, 0, 2904, + 0, 0, 0, 0, 0, 0, 0, 723, 723, 723, + 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1499, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 664, 664, - 664, 664, 664, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 664, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1497, 0, 0, 0, 0, 719, 0, 719, 0, 0, - 0, 0, 0, 0, 0, 1051, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 2797, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2812, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 719, 0, 0, 0, 0, + 0, 2355, 2355, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 719, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 3600, 3601, 3602, 3603, 668, 0, 0, 0, 0, 0, + 0, 668, 0, 723, 0, 723, 0, 0, 0, 0, + 0, 668, 668, 0, 0, 668, 0, 2562, 0, 0, + 0, 0, 0, 0, 0, 0, 1505, 668, 0, 0, + 0, 0, 0, 723, 668, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2894, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 719, 0, 0, - 2337, 719, 719, 0, 0, 0, 0, 2919, 0, 0, - 0, 0, 0, 0, 0, 1051, 2924, 0, 0, 0, + 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 719, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1499, 0, 0, 0, 0, 1499, 664, - 664, 664, 664, 664, 0, 0, 0, 0, 0, 0, - 0, 3164, 0, 0, 0, 0, 0, 1917, 0, 664, - 0, 0, 664, 3172, 1969, 0, 0, 0, 2078, 0, + 1507, 0, 1985, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3677, 0, 3677, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3688, 3690, 3689, 3753, 3754, 3755, 3756, 3757, - 3758, 3759, 789, 0, 0, 0, 0, 0, 664, 0, - 0, 0, 0, 0, 0, 0, 2078, 0, 719, 0, - 0, 0, 0, 0, 1499, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 664, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 664, 0, - 0, 0, 719, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 664, 0, - 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3705, 0, 3707, 0, + 0, 0, 0, 0, 0, 0, 723, 0, 0, 0, + 0, 0, 0, 723, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 723, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 2355, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 3872, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1270, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 719, 0, 0, 0, - 0, 3078, 0, 0, 0, 0, 1497, 0, 719, 0, - 0, 0, 0, 1262, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 719, 719, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1866, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 719, 0, 0, 0, 0, 0, - 0, 0, 3121, 0, 0, 0, 0, 0, 0, 719, - 0, 0, 0, 0, 0, 0, 1262, 0, 0, 0, - 0, 0, 0, 3148, 1866, 0, 0, 0, 0, 0, - 664, 0, 0, 0, 0, 0, 0, 0, 3694, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 719, 0, 3702, 3703, 0, 0, 3778, 3777, 3776, - 0, 0, 3774, 3775, 3773, 0, 0, 0, 0, 0, - 0, 0, 0, 719, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 664, 0, 0, 0, - 0, 0, 719, 0, 719, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1075, 0, - 0, 0, 0, 0, 0, 664, 2337, 3779, 910, 0, - 765, 766, 3780, 3781, 914, 3782, 768, 769, 911, 912, - 0, 763, 767, 913, 915, 664, 0, 0, 664, 664, - 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 3685, - 3686, 3687, 3691, 3692, 3693, 3704, 3751, 3752, 3760, 3762, - 866, 3761, 3763, 3764, 3765, 3768, 3769, 3770, 3771, 3766, - 3767, 3772, 3668, 3672, 3669, 3670, 3671, 3683, 3673, 3674, - 3675, 3676, 3677, 3678, 3679, 3680, 3681, 3682, 3684, 3783, - 3784, 3785, 3786, 3787, 3788, 3697, 3701, 3700, 3698, 3699, - 3695, 3696, 3723, 3722, 3724, 3725, 3726, 3727, 3728, 3729, - 3731, 3730, 3732, 3733, 3734, 3735, 3736, 3737, 3705, 3706, - 3709, 3710, 3708, 3707, 3711, 3720, 3721, 3712, 3713, 3714, - 3715, 3716, 3717, 3719, 3718, 3738, 3739, 3740, 3741, 3742, - 3744, 3743, 3747, 3748, 3746, 3745, 3750, 3749, 0, 0, - 0, 0, 3409, 0, 0, 0, 0, 0, 0, 0, - 916, 0, 917, 0, 0, 921, 0, 0, 0, 923, - 922, 0, 924, 886, 885, 0, 0, 918, 919, 0, - 920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3677, + 0, 0, 0, 0, 0, 0, 3677, 0, 3677, 0, + 0, 0, 0, 668, 0, 0, 0, 0, 0, 0, + 0, 1932, 723, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2355, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 668, 0, 0, 0, 0, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1917, 0, 0, 0, 0, 3789, 3790, 3791, 3792, 3793, - 3794, 3795, 3796, 0, 0, 0, 0, 0, 0, 1499, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1505, 723, 0, 723, 0, 0, 0, 668, + 0, 0, 0, 0, 0, 0, 2924, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 2337, 2337, 0, 0, 0, + 723, 723, 0, 0, 0, 2355, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1917, 0, 0, + 0, 0, 1507, 0, 0, 0, 0, 0, 0, 2355, + 0, 0, 723, 0, 0, 668, 668, 668, 668, 668, + 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3557, 3558, 3559, 3560, 0, 0, + 0, 0, 668, 668, 0, 0, 0, 0, 0, 0, + 0, 0, 723, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 668, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4019, 723, 0, 0, 0, + 0, 0, 0, 0, 0, 4027, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 723, 2355, 723, 4035, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1270, 1270, 0, 3737, 3739, 3738, 3802, + 3803, 3804, 3805, 3806, 3807, 3808, 794, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4085, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4027, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2355, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1829, 0, + 4085, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1058, 0, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1507, 0, 0, 0, 0, 1507, 668, 668, 668, 668, + 668, 0, 0, 0, 0, 0, 0, 0, 3198, 0, + 0, 0, 0, 0, 1932, 0, 668, 0, 0, 668, + 3206, 1985, 0, 0, 3743, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 3751, + 3752, 0, 0, 3827, 3826, 3825, 0, 0, 3823, 3824, + 3822, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1507, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 668, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3828, 915, 668, 770, 771, 3829, 3830, + 919, 3831, 773, 774, 916, 917, 0, 768, 772, 918, + 920, 0, 0, 0, 0, 668, 0, 0, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3656, 0, 3658, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3734, 3735, 3736, 3740, + 3741, 3742, 3753, 3800, 3801, 3809, 3811, 871, 3810, 3812, + 3813, 3814, 3817, 3818, 3819, 3820, 3815, 3816, 3821, 3717, + 3721, 3718, 3719, 3720, 3732, 3722, 3723, 3724, 3725, 3726, + 3727, 3728, 3729, 3730, 3731, 3733, 3832, 3833, 3834, 3835, + 3836, 3837, 3746, 3750, 3749, 3747, 3748, 3744, 3745, 3772, + 3771, 3773, 3774, 3775, 3776, 3777, 3778, 3780, 3779, 3781, + 3782, 3783, 3784, 3785, 3786, 3754, 3755, 3758, 3759, 3757, + 3756, 3760, 3769, 3770, 3761, 3762, 3763, 3764, 3765, 3766, + 3768, 3767, 3787, 3788, 3789, 3790, 3791, 3793, 3792, 3796, + 3797, 3795, 3794, 3799, 3798, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 921, 0, + 922, 0, 0, 926, 0, 0, 0, 928, 927, 0, + 929, 891, 890, 0, 0, 923, 924, 0, 925, 0, + 0, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 668, 0, 0, + 0, 0, 0, 3838, 3839, 3840, 3841, 3842, 3843, 3844, + 3845, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 2337, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1917, 0, 0, 0, 3823, 0, 0, + 0, 0, 0, 0, 0, 0, 668, 0, 0, 668, + 668, 668, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1262, 0, 664, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 3896, 0, 0, 0, 3896, 3896, 0, - 0, 0, 0, 0, 0, 1499, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 2337, 0, 0, 0, - 0, 0, 3999, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1917, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1969, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 2337, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2337, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1932, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1507, 0, 0, + 0, 1932, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 3973, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 3977, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1262, 1262, + 0, 0, 0, 0, 0, 0, 0, 1932, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 4019, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4027, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 3973, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 2337, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 392, 3409, 0, - 4027, 1397, 1383, 520, 0, 1325, 1400, 1294, 1313, 1410, - 1316, 1319, 1362, 1272, 1340, 411, 1310, 1265, 1298, 1267, - 1305, 1268, 1296, 1327, 269, 1293, 1385, 1344, 1399, 362, - 266, 1274, 1299, 425, 1315, 203, 1364, 481, 251, 373, - 370, 575, 281, 272, 268, 249, 315, 381, 423, 510, - 417, 1406, 366, 1350, 0, 491, 396, 0, 0, 0, - 1329, 1389, 1338, 1376, 1324, 1363, 1282, 1349, 1401, 1311, - 1359, 1402, 321, 247, 323, 202, 408, 492, 285, 0, - 0, 0, 0, 4001, 941, 0, 0, 0, 0, 4002, - 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, - 0, 347, 356, 355, 336, 337, 339, 341, 346, 353, - 359, 1307, 1356, 1396, 1308, 1358, 264, 319, 271, 263, - 572, 1407, 1388, 1271, 1337, 1395, 1332, 0, 0, 228, - 1398, 1331, 0, 1361, 0, 1413, 1266, 1352, 0, 1269, - 1273, 1409, 1393, 1302, 274, 0, 0, 0, 0, 0, - 0, 0, 1328, 1339, 1373, 1377, 1322, 0, 0, 0, - 0, 0, 0, 0, 0, 1300, 0, 1348, 0, 0, - 0, 1278, 1270, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1326, 0, 0, 0, 0, - 1281, 0, 1301, 1374, 0, 1264, 296, 1275, 397, 256, - 0, 448, 1381, 1392, 1323, 616, 1394, 1321, 1320, 1368, - 1279, 1387, 1314, 361, 1277, 328, 197, 224, 0, 1312, - 407, 456, 468, 1386, 1297, 1306, 252, 1304, 466, 421, - 594, 232, 283, 453, 427, 464, 435, 286, 1347, 1366, - 465, 368, 577, 445, 591, 617, 618, 262, 401, 603, - 514, 611, 635, 225, 259, 415, 499, 597, 488, 393, - 573, 574, 327, 487, 294, 201, 365, 623, 223, 474, - 367, 241, 230, 579, 600, 288, 451, 630, 212, 509, - 589, 238, 478, 0, 0, 638, 246, 498, 214, 586, - 497, 389, 324, 325, 213, 0, 452, 267, 292, 0, - 0, 257, 410, 581, 582, 255, 639, 227, 610, 219, - 1276, 609, 403, 576, 587, 390, 379, 218, 585, 388, - 378, 332, 351, 352, 279, 305, 442, 371, 443, 304, - 306, 399, 398, 400, 206, 598, 0, 207, 0, 493, - 599, 640, 447, 211, 233, 234, 236, 1292, 278, 282, - 290, 293, 301, 302, 311, 363, 414, 441, 437, 446, - 1382, 571, 592, 604, 615, 621, 622, 624, 625, 626, - 627, 628, 631, 629, 402, 309, 489, 331, 369, 1371, - 1412, 420, 467, 239, 596, 490, 199, 1286, 1291, 1284, - 0, 253, 254, 1353, 567, 1287, 1285, 1342, 1343, 1288, - 1403, 1404, 1405, 1390, 641, 642, 643, 644, 645, 646, - 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, - 657, 658, 636, 500, 506, 501, 502, 503, 504, 505, - 0, 507, 1375, 1280, 0, 1289, 1290, 1384, 583, 584, - 659, 380, 480, 593, 333, 345, 348, 338, 357, 0, - 358, 334, 335, 340, 342, 343, 344, 349, 350, 354, - 360, 248, 209, 386, 394, 570, 310, 215, 216, 217, - 516, 517, 518, 519, 607, 608, 612, 204, 457, 458, - 459, 460, 291, 602, 307, 463, 462, 329, 330, 375, - 444, 532, 534, 545, 549, 551, 553, 559, 562, 533, - 535, 546, 550, 552, 554, 560, 563, 522, 524, 526, - 528, 541, 540, 537, 565, 566, 543, 548, 527, 539, - 544, 557, 564, 561, 521, 525, 529, 538, 556, 555, - 536, 547, 558, 542, 530, 523, 531, 1346, 196, 220, - 364, 1408, 449, 287, 637, 606, 601, 205, 222, 1283, - 261, 1295, 1303, 0, 1309, 1317, 1318, 1330, 1333, 1334, - 1335, 1336, 1354, 1355, 1357, 1365, 1367, 1370, 1372, 1379, - 1391, 1411, 198, 200, 208, 221, 231, 235, 242, 260, - 275, 277, 284, 297, 308, 316, 317, 320, 326, 376, - 382, 383, 384, 385, 404, 405, 406, 409, 412, 413, - 416, 418, 419, 422, 426, 430, 431, 432, 434, 436, - 438, 450, 455, 469, 470, 471, 472, 473, 476, 477, - 482, 483, 484, 485, 486, 494, 495, 508, 578, 580, - 595, 613, 619, 475, 299, 300, 439, 440, 312, 313, - 633, 634, 298, 590, 620, 588, 632, 614, 433, 374, - 1345, 1351, 377, 280, 303, 318, 1360, 605, 496, 226, - 461, 289, 250, 1378, 1380, 210, 245, 229, 258, 273, - 276, 322, 387, 395, 424, 429, 295, 270, 243, 454, - 240, 479, 511, 512, 513, 515, 391, 265, 428, 1341, - 1369, 372, 568, 569, 314, 392, 0, 0, 0, 1397, - 1383, 520, 0, 1325, 1400, 1294, 1313, 1410, 1316, 1319, - 1362, 1272, 1340, 411, 1310, 1265, 1298, 1267, 1305, 1268, - 1296, 1327, 269, 1293, 1385, 1344, 1399, 362, 266, 1274, - 1299, 425, 1315, 203, 1364, 481, 251, 373, 370, 575, - 281, 272, 268, 249, 315, 381, 423, 510, 417, 1406, - 366, 1350, 0, 491, 396, 0, 0, 0, 1329, 1389, - 1338, 1376, 1324, 1363, 1282, 1349, 1401, 1311, 1359, 1402, - 321, 247, 323, 202, 408, 492, 285, 0, 0, 0, - 0, 0, 194, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 237, 0, 0, 244, 0, 0, 0, 347, - 356, 355, 336, 337, 339, 341, 346, 353, 359, 1307, - 1356, 1396, 1308, 1358, 264, 319, 271, 263, 572, 1407, - 1388, 1271, 1337, 1395, 1332, 0, 0, 228, 1398, 1331, - 0, 1361, 0, 1413, 1266, 1352, 0, 1269, 1273, 1409, - 1393, 1302, 274, 0, 0, 0, 0, 0, 0, 0, - 1328, 1339, 1373, 1377, 1322, 0, 0, 0, 0, 0, - 0, 3173, 0, 1300, 0, 1348, 0, 0, 0, 1278, - 1270, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1326, 0, 0, 0, 0, 1281, 0, - 1301, 1374, 0, 1264, 296, 1275, 397, 256, 0, 448, - 1381, 1392, 1323, 616, 1394, 1321, 1320, 1368, 1279, 1387, - 1314, 361, 1277, 328, 197, 224, 0, 1312, 407, 456, - 468, 1386, 1297, 1306, 252, 1304, 466, 421, 594, 232, - 283, 453, 427, 464, 435, 286, 1347, 1366, 465, 368, - 577, 445, 591, 617, 618, 262, 401, 603, 514, 611, - 635, 225, 259, 415, 499, 597, 488, 393, 573, 574, - 327, 487, 294, 201, 365, 623, 223, 474, 367, 241, - 230, 579, 600, 288, 451, 630, 212, 509, 589, 238, - 478, 0, 0, 638, 246, 498, 214, 586, 497, 389, - 324, 325, 213, 0, 452, 267, 292, 0, 0, 257, - 410, 581, 582, 255, 639, 227, 610, 219, 1276, 609, - 403, 576, 587, 390, 379, 218, 585, 388, 378, 332, - 351, 352, 279, 305, 442, 371, 443, 304, 306, 399, - 398, 400, 206, 598, 0, 207, 0, 493, 599, 640, - 447, 211, 233, 234, 236, 1292, 278, 282, 290, 293, - 301, 302, 311, 363, 414, 441, 437, 446, 1382, 571, - 592, 604, 615, 621, 622, 624, 625, 626, 627, 628, - 631, 629, 402, 309, 489, 331, 369, 1371, 1412, 420, - 467, 239, 596, 490, 199, 1286, 1291, 1284, 0, 253, - 254, 1353, 567, 1287, 1285, 1342, 1343, 1288, 1403, 1404, - 1405, 1390, 641, 642, 643, 644, 645, 646, 647, 648, - 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, - 636, 500, 506, 501, 502, 503, 504, 505, 0, 507, - 1375, 1280, 0, 1289, 1290, 1384, 583, 584, 659, 380, - 480, 593, 333, 345, 348, 338, 357, 0, 358, 334, - 335, 340, 342, 343, 344, 349, 350, 354, 360, 248, - 209, 386, 394, 570, 310, 215, 216, 217, 516, 517, - 518, 519, 607, 608, 612, 204, 457, 458, 459, 460, - 291, 602, 307, 463, 462, 329, 330, 375, 444, 532, - 534, 545, 549, 551, 553, 559, 562, 533, 535, 546, - 550, 552, 554, 560, 563, 522, 524, 526, 528, 541, - 540, 537, 565, 566, 543, 548, 527, 539, 544, 557, - 564, 561, 521, 525, 529, 538, 556, 555, 536, 547, - 558, 542, 530, 523, 531, 1346, 196, 220, 364, 1408, - 449, 287, 637, 606, 601, 205, 222, 1283, 261, 1295, - 1303, 0, 1309, 1317, 1318, 1330, 1333, 1334, 1335, 1336, - 1354, 1355, 1357, 1365, 1367, 1370, 1372, 1379, 1391, 1411, - 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, - 284, 297, 308, 316, 317, 320, 326, 376, 382, 383, - 384, 385, 404, 405, 406, 409, 412, 413, 416, 418, - 419, 422, 426, 430, 431, 432, 434, 436, 438, 450, - 455, 469, 470, 471, 472, 473, 476, 477, 482, 483, - 484, 485, 486, 494, 495, 508, 578, 580, 595, 613, - 619, 475, 299, 300, 439, 440, 312, 313, 633, 634, - 298, 590, 620, 588, 632, 614, 433, 374, 1345, 1351, - 377, 280, 303, 318, 1360, 605, 496, 226, 461, 289, - 250, 1378, 1380, 210, 245, 229, 258, 273, 276, 322, - 387, 395, 424, 429, 295, 270, 243, 454, 240, 479, - 511, 512, 513, 515, 391, 265, 428, 1341, 1369, 372, - 568, 569, 314, 392, 0, 0, 0, 1397, 1383, 520, - 0, 1325, 1400, 1294, 1313, 1410, 1316, 1319, 1362, 1272, - 1340, 411, 1310, 1265, 1298, 1267, 1305, 1268, 1296, 1327, - 269, 1293, 1385, 1344, 1399, 362, 266, 1274, 1299, 425, - 1315, 203, 1364, 481, 251, 373, 370, 575, 281, 272, - 268, 249, 315, 381, 423, 510, 417, 1406, 366, 1350, - 0, 491, 396, 0, 0, 0, 1329, 1389, 1338, 1376, - 1324, 1363, 1282, 1349, 1401, 1311, 1359, 1402, 321, 247, - 323, 202, 408, 492, 285, 0, 0, 0, 0, 0, - 709, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 237, 0, 0, 244, 0, 0, 0, 347, 356, 355, - 336, 337, 339, 341, 346, 353, 359, 1307, 1356, 1396, - 1308, 1358, 264, 319, 271, 263, 572, 1407, 1388, 1271, - 1337, 1395, 1332, 0, 0, 228, 1398, 1331, 0, 1361, - 0, 1413, 1266, 1352, 0, 1269, 1273, 1409, 1393, 1302, - 274, 0, 0, 0, 0, 0, 0, 0, 1328, 1339, - 1373, 1377, 1322, 0, 0, 0, 0, 0, 0, 3134, - 0, 1300, 0, 1348, 0, 0, 0, 1278, 1270, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1326, 0, 0, 0, 0, 1281, 0, 1301, 1374, - 0, 1264, 296, 1275, 397, 256, 0, 448, 1381, 1392, - 1323, 616, 1394, 1321, 1320, 1368, 1279, 1387, 1314, 361, - 1277, 328, 197, 224, 0, 1312, 407, 456, 468, 1386, - 1297, 1306, 252, 1304, 466, 421, 594, 232, 283, 453, - 427, 464, 435, 286, 1347, 1366, 465, 368, 577, 445, - 591, 617, 618, 262, 401, 603, 514, 611, 635, 225, - 259, 415, 499, 597, 488, 393, 573, 574, 327, 487, - 294, 201, 365, 623, 223, 474, 367, 241, 230, 579, - 600, 288, 451, 630, 212, 509, 589, 238, 478, 0, - 0, 638, 246, 498, 214, 586, 497, 389, 324, 325, - 213, 0, 452, 267, 292, 0, 0, 257, 410, 581, - 582, 255, 639, 227, 610, 219, 1276, 609, 403, 576, - 587, 390, 379, 218, 585, 388, 378, 332, 351, 352, - 279, 305, 442, 371, 443, 304, 306, 399, 398, 400, - 206, 598, 0, 207, 0, 493, 599, 640, 447, 211, - 233, 234, 236, 1292, 278, 282, 290, 293, 301, 302, - 311, 363, 414, 441, 437, 446, 1382, 571, 592, 604, - 615, 621, 622, 624, 625, 626, 627, 628, 631, 629, - 402, 309, 489, 331, 369, 1371, 1412, 420, 467, 239, - 596, 490, 199, 1286, 1291, 1284, 0, 253, 254, 1353, - 567, 1287, 1285, 1342, 1343, 1288, 1403, 1404, 1405, 1390, - 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, 636, 500, - 506, 501, 502, 503, 504, 505, 0, 507, 1375, 1280, - 0, 1289, 1290, 1384, 583, 584, 659, 380, 480, 593, - 333, 345, 348, 338, 357, 0, 358, 334, 335, 340, - 342, 343, 344, 349, 350, 354, 360, 248, 209, 386, - 394, 570, 310, 215, 216, 217, 516, 517, 518, 519, - 607, 608, 612, 204, 457, 458, 459, 460, 291, 602, - 307, 463, 462, 329, 330, 375, 444, 532, 534, 545, - 549, 551, 553, 559, 562, 533, 535, 546, 550, 552, - 554, 560, 563, 522, 524, 526, 528, 541, 540, 537, - 565, 566, 543, 548, 527, 539, 544, 557, 564, 561, - 521, 525, 529, 538, 556, 555, 536, 547, 558, 542, - 530, 523, 531, 1346, 196, 220, 364, 1408, 449, 287, - 637, 606, 601, 205, 222, 1283, 261, 1295, 1303, 0, - 1309, 1317, 1318, 1330, 1333, 1334, 1335, 1336, 1354, 1355, - 1357, 1365, 1367, 1370, 1372, 1379, 1391, 1411, 198, 200, - 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, - 308, 316, 317, 320, 326, 376, 382, 383, 384, 385, - 404, 405, 406, 409, 412, 413, 416, 418, 419, 422, - 426, 430, 431, 432, 434, 436, 438, 450, 455, 469, - 470, 471, 472, 473, 476, 477, 482, 483, 484, 485, - 486, 494, 495, 508, 578, 580, 595, 613, 619, 475, - 299, 300, 439, 440, 312, 313, 633, 634, 298, 590, - 620, 588, 632, 614, 433, 374, 1345, 1351, 377, 280, - 303, 318, 1360, 605, 496, 226, 461, 289, 250, 1378, - 1380, 210, 245, 229, 258, 273, 276, 322, 387, 395, - 424, 429, 295, 270, 243, 454, 240, 479, 511, 512, - 513, 515, 391, 265, 428, 1341, 1369, 372, 568, 569, - 314, 392, 0, 0, 0, 1397, 1383, 520, 0, 1325, - 1400, 1294, 1313, 1410, 1316, 1319, 1362, 1272, 1340, 411, - 1310, 1265, 1298, 1267, 1305, 1268, 1296, 1327, 269, 1293, - 1385, 1344, 1399, 362, 266, 1274, 1299, 425, 1315, 203, - 1364, 481, 251, 373, 370, 575, 281, 272, 268, 249, - 315, 381, 423, 510, 417, 1406, 366, 1350, 0, 491, - 396, 0, 0, 0, 1329, 1389, 1338, 1376, 1324, 1363, - 1282, 1349, 1401, 1311, 1359, 1402, 321, 247, 323, 202, - 408, 492, 285, 0, 0, 0, 0, 0, 941, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, - 0, 244, 0, 0, 0, 347, 356, 355, 336, 337, - 339, 341, 346, 353, 359, 1307, 1356, 1396, 1308, 1358, - 264, 319, 271, 263, 572, 1407, 1388, 1271, 1337, 1395, - 1332, 0, 0, 228, 1398, 1331, 0, 1361, 0, 1413, - 1266, 1352, 0, 1269, 1273, 1409, 1393, 1302, 274, 0, - 0, 0, 0, 0, 0, 0, 1328, 1339, 1373, 1377, - 1322, 0, 0, 0, 0, 0, 0, 2353, 0, 1300, - 0, 1348, 0, 0, 0, 1278, 1270, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1326, - 0, 0, 0, 0, 1281, 0, 1301, 1374, 0, 1264, - 296, 1275, 397, 256, 0, 448, 1381, 1392, 1323, 616, - 1394, 1321, 1320, 1368, 1279, 1387, 1314, 361, 1277, 328, - 197, 224, 0, 1312, 407, 456, 468, 1386, 1297, 1306, - 252, 1304, 466, 421, 594, 232, 283, 453, 427, 464, - 435, 286, 1347, 1366, 465, 368, 577, 445, 591, 617, - 618, 262, 401, 603, 514, 611, 635, 225, 259, 415, - 499, 597, 488, 393, 573, 574, 327, 487, 294, 201, - 365, 623, 223, 474, 367, 241, 230, 579, 600, 288, - 451, 630, 212, 509, 589, 238, 478, 0, 0, 638, - 246, 498, 214, 586, 497, 389, 324, 325, 213, 0, - 452, 267, 292, 0, 0, 257, 410, 581, 582, 255, - 639, 227, 610, 219, 1276, 609, 403, 576, 587, 390, - 379, 218, 585, 388, 378, 332, 351, 352, 279, 305, - 442, 371, 443, 304, 306, 399, 398, 400, 206, 598, - 0, 207, 0, 493, 599, 640, 447, 211, 233, 234, - 236, 1292, 278, 282, 290, 293, 301, 302, 311, 363, - 414, 441, 437, 446, 1382, 571, 592, 604, 615, 621, - 622, 624, 625, 626, 627, 628, 631, 629, 402, 309, - 489, 331, 369, 1371, 1412, 420, 467, 239, 596, 490, - 199, 1286, 1291, 1284, 0, 253, 254, 1353, 567, 1287, - 1285, 1342, 1343, 1288, 1403, 1404, 1405, 1390, 641, 642, - 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, - 653, 654, 655, 656, 657, 658, 636, 500, 506, 501, - 502, 503, 504, 505, 0, 507, 1375, 1280, 0, 1289, - 1290, 1384, 583, 584, 659, 380, 480, 593, 333, 345, - 348, 338, 357, 0, 358, 334, 335, 340, 342, 343, - 344, 349, 350, 354, 360, 248, 209, 386, 394, 570, - 310, 215, 216, 217, 516, 517, 518, 519, 607, 608, - 612, 204, 457, 458, 459, 460, 291, 602, 307, 463, - 462, 329, 330, 375, 444, 532, 534, 545, 549, 551, - 553, 559, 562, 533, 535, 546, 550, 552, 554, 560, - 563, 522, 524, 526, 528, 541, 540, 537, 565, 566, - 543, 548, 527, 539, 544, 557, 564, 561, 521, 525, - 529, 538, 556, 555, 536, 547, 558, 542, 530, 523, - 531, 1346, 196, 220, 364, 1408, 449, 287, 637, 606, - 601, 205, 222, 1283, 261, 1295, 1303, 0, 1309, 1317, - 1318, 1330, 1333, 1334, 1335, 1336, 1354, 1355, 1357, 1365, - 1367, 1370, 1372, 1379, 1391, 1411, 198, 200, 208, 221, - 231, 235, 242, 260, 275, 277, 284, 297, 308, 316, - 317, 320, 326, 376, 382, 383, 384, 385, 404, 405, - 406, 409, 412, 413, 416, 418, 419, 422, 426, 430, - 431, 432, 434, 436, 438, 450, 455, 469, 470, 471, - 472, 473, 476, 477, 482, 483, 484, 485, 486, 494, - 495, 508, 578, 580, 595, 613, 619, 475, 299, 300, - 439, 440, 312, 313, 633, 634, 298, 590, 620, 588, - 632, 614, 433, 374, 1345, 1351, 377, 280, 303, 318, - 1360, 605, 496, 226, 461, 289, 250, 1378, 1380, 210, - 245, 229, 258, 273, 276, 322, 387, 395, 424, 429, - 295, 270, 243, 454, 240, 479, 511, 512, 513, 515, - 391, 265, 428, 1341, 1369, 372, 568, 569, 314, 392, - 0, 0, 0, 1397, 1383, 520, 0, 1325, 1400, 1294, - 1313, 1410, 1316, 1319, 1362, 1272, 1340, 411, 1310, 1265, - 1298, 1267, 1305, 1268, 1296, 1327, 269, 1293, 1385, 1344, - 1399, 362, 266, 1274, 1299, 425, 1315, 203, 1364, 481, - 251, 373, 370, 575, 281, 272, 268, 249, 315, 381, - 423, 510, 417, 1406, 366, 1350, 0, 491, 396, 0, - 0, 0, 1329, 1389, 1338, 1376, 1324, 1363, 1282, 1349, - 1401, 1311, 1359, 1402, 321, 247, 323, 202, 408, 492, - 285, 0, 95, 0, 0, 0, 709, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, - 0, 0, 0, 347, 356, 355, 336, 337, 339, 341, - 346, 353, 359, 1307, 1356, 1396, 1308, 1358, 264, 319, - 271, 263, 572, 1407, 1388, 1271, 1337, 1395, 1332, 0, - 0, 228, 1398, 1331, 0, 1361, 0, 1413, 1266, 1352, - 0, 1269, 1273, 1409, 1393, 1302, 274, 0, 0, 0, - 0, 0, 0, 0, 1328, 1339, 1373, 1377, 1322, 0, - 0, 0, 0, 0, 0, 0, 0, 1300, 0, 1348, - 0, 0, 0, 1278, 1270, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1326, 0, 0, - 0, 0, 1281, 0, 1301, 1374, 0, 1264, 296, 1275, - 397, 256, 0, 448, 1381, 1392, 1323, 616, 1394, 1321, - 1320, 1368, 1279, 1387, 1314, 361, 1277, 328, 197, 224, - 0, 1312, 407, 456, 468, 1386, 1297, 1306, 252, 1304, - 466, 421, 594, 232, 283, 453, 427, 464, 435, 286, - 1347, 1366, 465, 368, 577, 445, 591, 617, 618, 262, - 401, 603, 514, 611, 635, 225, 259, 415, 499, 597, - 488, 393, 573, 574, 327, 487, 294, 201, 365, 623, - 223, 474, 367, 241, 230, 579, 600, 288, 451, 630, - 212, 509, 589, 238, 478, 0, 0, 638, 246, 498, - 214, 586, 497, 389, 324, 325, 213, 0, 452, 267, - 292, 0, 0, 257, 410, 581, 582, 255, 639, 227, - 610, 219, 1276, 609, 403, 576, 587, 390, 379, 218, - 585, 388, 378, 332, 351, 352, 279, 305, 442, 371, - 443, 304, 306, 399, 398, 400, 206, 598, 0, 207, - 0, 493, 599, 640, 447, 211, 233, 234, 236, 1292, - 278, 282, 290, 293, 301, 302, 311, 363, 414, 441, - 437, 446, 1382, 571, 592, 604, 615, 621, 622, 624, - 625, 626, 627, 628, 631, 629, 402, 309, 489, 331, - 369, 1371, 1412, 420, 467, 239, 596, 490, 199, 1286, - 1291, 1284, 0, 253, 254, 1353, 567, 1287, 1285, 1342, - 1343, 1288, 1403, 1404, 1405, 1390, 641, 642, 643, 644, - 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, - 655, 656, 657, 658, 636, 500, 506, 501, 502, 503, - 504, 505, 0, 507, 1375, 1280, 0, 1289, 1290, 1384, - 583, 584, 659, 380, 480, 593, 333, 345, 348, 338, - 357, 0, 358, 334, 335, 340, 342, 343, 344, 349, - 350, 354, 360, 248, 209, 386, 394, 570, 310, 215, - 216, 217, 516, 517, 518, 519, 607, 608, 612, 204, - 457, 458, 459, 460, 291, 602, 307, 463, 462, 329, - 330, 375, 444, 532, 534, 545, 549, 551, 553, 559, - 562, 533, 535, 546, 550, 552, 554, 560, 563, 522, - 524, 526, 528, 541, 540, 537, 565, 566, 543, 548, - 527, 539, 544, 557, 564, 561, 521, 525, 529, 538, - 556, 555, 536, 547, 558, 542, 530, 523, 531, 1346, - 196, 220, 364, 1408, 449, 287, 637, 606, 601, 205, - 222, 1283, 261, 1295, 1303, 0, 1309, 1317, 1318, 1330, - 1333, 1334, 1335, 1336, 1354, 1355, 1357, 1365, 1367, 1370, - 1372, 1379, 1391, 1411, 198, 200, 208, 221, 231, 235, - 242, 260, 275, 277, 284, 297, 308, 316, 317, 320, - 326, 376, 382, 383, 384, 385, 404, 405, 406, 409, - 412, 413, 416, 418, 419, 422, 426, 430, 431, 432, - 434, 436, 438, 450, 455, 469, 470, 471, 472, 473, - 476, 477, 482, 483, 484, 485, 486, 494, 495, 508, - 578, 580, 595, 613, 619, 475, 299, 300, 439, 440, - 312, 313, 633, 634, 298, 590, 620, 588, 632, 614, - 433, 374, 1345, 1351, 377, 280, 303, 318, 1360, 605, - 496, 226, 461, 289, 250, 1378, 1380, 210, 245, 229, - 258, 273, 276, 322, 387, 395, 424, 429, 295, 270, - 243, 454, 240, 479, 511, 512, 513, 515, 391, 265, - 428, 1341, 1369, 372, 568, 569, 314, 392, 0, 0, - 0, 1397, 1383, 520, 0, 1325, 1400, 1294, 1313, 1410, - 1316, 1319, 1362, 1272, 1340, 411, 1310, 1265, 1298, 1267, - 1305, 1268, 1296, 1327, 269, 1293, 1385, 1344, 1399, 362, - 266, 1274, 1299, 425, 1315, 203, 1364, 481, 251, 373, - 370, 575, 281, 272, 268, 249, 315, 381, 423, 510, - 417, 1406, 366, 1350, 0, 491, 396, 0, 0, 0, - 1329, 1389, 1338, 1376, 1324, 1363, 1282, 1349, 1401, 1311, - 1359, 1402, 321, 247, 323, 202, 408, 492, 285, 0, - 0, 0, 0, 0, 194, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, - 0, 347, 356, 355, 336, 337, 339, 341, 346, 353, - 359, 1307, 1356, 1396, 1308, 1358, 264, 319, 271, 263, - 572, 1407, 1388, 1271, 1337, 1395, 1332, 0, 0, 228, - 1398, 1331, 0, 1361, 0, 1413, 1266, 1352, 0, 1269, - 1273, 1409, 1393, 1302, 274, 0, 0, 0, 0, 0, - 0, 0, 1328, 1339, 1373, 1377, 1322, 0, 0, 0, - 0, 0, 0, 0, 0, 1300, 0, 1348, 0, 0, - 0, 1278, 1270, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1326, 0, 0, 0, 0, - 1281, 0, 1301, 1374, 0, 1264, 296, 1275, 397, 256, - 0, 448, 1381, 1392, 1323, 616, 1394, 1321, 1320, 1368, - 1279, 1387, 1314, 361, 1277, 328, 197, 224, 0, 1312, - 407, 456, 468, 1386, 1297, 1306, 252, 1304, 466, 421, - 594, 232, 283, 453, 427, 464, 435, 286, 1347, 1366, - 465, 368, 577, 445, 591, 617, 618, 262, 401, 603, - 514, 611, 635, 225, 259, 415, 499, 597, 488, 393, - 573, 574, 327, 487, 294, 201, 365, 623, 223, 474, - 367, 241, 230, 579, 600, 288, 451, 630, 212, 509, - 589, 238, 478, 0, 0, 638, 246, 498, 214, 586, - 497, 389, 324, 325, 213, 0, 452, 267, 292, 0, - 0, 257, 410, 581, 582, 255, 639, 227, 610, 219, - 1276, 609, 403, 576, 587, 390, 379, 218, 585, 388, - 378, 332, 351, 352, 279, 305, 442, 371, 443, 304, - 306, 399, 398, 400, 206, 598, 0, 207, 0, 493, - 599, 640, 447, 211, 233, 234, 236, 1292, 278, 282, - 290, 293, 301, 302, 311, 363, 414, 441, 437, 446, - 1382, 571, 592, 604, 615, 621, 622, 624, 625, 626, - 627, 628, 631, 629, 402, 309, 489, 331, 369, 1371, - 1412, 420, 467, 239, 596, 490, 199, 1286, 1291, 1284, - 0, 253, 254, 1353, 567, 1287, 1285, 1342, 1343, 1288, - 1403, 1404, 1405, 1390, 641, 642, 643, 644, 645, 646, - 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, - 657, 658, 636, 500, 506, 501, 502, 503, 504, 505, - 0, 507, 1375, 1280, 0, 1289, 1290, 1384, 583, 584, - 659, 380, 480, 593, 333, 345, 348, 338, 357, 0, - 358, 334, 335, 340, 342, 343, 344, 349, 350, 354, - 360, 248, 209, 386, 394, 570, 310, 215, 216, 217, - 516, 517, 518, 519, 607, 608, 612, 204, 457, 458, - 459, 460, 291, 602, 307, 463, 462, 329, 330, 375, - 444, 532, 534, 545, 549, 551, 553, 559, 562, 533, - 535, 546, 550, 552, 554, 560, 563, 522, 524, 526, - 528, 541, 540, 537, 565, 566, 543, 548, 527, 539, - 544, 557, 564, 561, 521, 525, 529, 538, 556, 555, - 536, 547, 558, 542, 530, 523, 531, 1346, 196, 220, - 364, 1408, 449, 287, 637, 606, 601, 205, 222, 1283, - 261, 1295, 1303, 0, 1309, 1317, 1318, 1330, 1333, 1334, - 1335, 1336, 1354, 1355, 1357, 1365, 1367, 1370, 1372, 1379, - 1391, 1411, 198, 200, 208, 221, 231, 235, 242, 260, - 275, 277, 284, 297, 308, 316, 317, 320, 326, 376, - 382, 383, 384, 385, 404, 405, 406, 409, 412, 413, - 416, 418, 419, 422, 426, 430, 431, 432, 434, 436, - 438, 450, 455, 469, 470, 471, 472, 473, 476, 477, - 482, 483, 484, 485, 486, 494, 495, 508, 578, 580, - 595, 613, 619, 475, 299, 300, 439, 440, 312, 313, - 633, 634, 298, 590, 620, 588, 632, 614, 433, 374, - 1345, 1351, 377, 280, 303, 318, 1360, 605, 496, 226, - 461, 289, 250, 1378, 1380, 210, 245, 229, 258, 273, - 276, 322, 387, 395, 424, 429, 295, 270, 243, 454, - 240, 479, 511, 512, 513, 515, 391, 265, 428, 1341, - 1369, 372, 568, 569, 314, 392, 0, 0, 0, 1397, - 1383, 520, 0, 1325, 1400, 1294, 1313, 1410, 1316, 1319, - 1362, 1272, 1340, 411, 1310, 1265, 1298, 1267, 1305, 1268, - 1296, 1327, 269, 1293, 1385, 1344, 1399, 362, 266, 1274, - 1299, 425, 1315, 203, 1364, 481, 251, 373, 370, 575, - 281, 272, 268, 249, 315, 381, 423, 510, 417, 1406, - 366, 1350, 0, 491, 396, 0, 0, 0, 1329, 1389, - 1338, 1376, 1324, 1363, 1282, 1349, 1401, 1311, 1359, 1402, - 321, 247, 323, 202, 408, 492, 285, 0, 0, 0, - 0, 0, 709, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 237, 0, 0, 244, 0, 0, 0, 347, - 356, 355, 336, 337, 339, 341, 346, 353, 359, 1307, - 1356, 1396, 1308, 1358, 264, 319, 271, 263, 572, 1407, - 1388, 1271, 1337, 1395, 1332, 0, 0, 228, 1398, 1331, - 0, 1361, 0, 1413, 1266, 1352, 0, 1269, 1273, 1409, - 1393, 1302, 274, 0, 0, 0, 0, 0, 0, 0, - 1328, 1339, 1373, 1377, 1322, 0, 0, 0, 0, 0, - 0, 0, 0, 1300, 0, 1348, 0, 0, 0, 1278, - 1270, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1326, 0, 0, 0, 0, 1281, 0, - 1301, 1374, 0, 1264, 296, 1275, 397, 256, 0, 448, - 1381, 1392, 1323, 616, 1394, 1321, 1320, 1368, 1279, 1387, - 1314, 361, 1277, 328, 197, 224, 0, 1312, 407, 456, - 468, 1386, 1297, 1306, 252, 1304, 466, 421, 594, 232, - 283, 453, 427, 464, 435, 286, 1347, 1366, 465, 368, - 577, 445, 591, 617, 618, 262, 401, 603, 514, 611, - 635, 225, 259, 415, 499, 597, 488, 393, 573, 574, - 327, 487, 294, 201, 365, 623, 223, 474, 367, 241, - 230, 579, 600, 288, 451, 630, 212, 509, 589, 238, - 478, 0, 0, 638, 246, 498, 214, 586, 497, 389, - 324, 325, 213, 0, 452, 267, 292, 0, 0, 257, - 410, 581, 582, 255, 639, 227, 610, 219, 1276, 609, - 403, 576, 587, 390, 379, 218, 585, 388, 378, 332, - 351, 352, 279, 305, 442, 371, 443, 304, 306, 399, - 398, 400, 206, 598, 0, 207, 0, 493, 599, 640, - 447, 211, 233, 234, 236, 1292, 278, 282, 290, 293, - 301, 302, 311, 363, 414, 441, 437, 446, 1382, 571, - 592, 604, 615, 621, 622, 624, 625, 626, 627, 628, - 631, 629, 402, 309, 489, 331, 369, 1371, 1412, 420, - 467, 239, 596, 490, 199, 1286, 1291, 1284, 0, 253, - 254, 1353, 567, 1287, 1285, 1342, 1343, 1288, 1403, 1404, - 1405, 1390, 641, 642, 643, 644, 645, 646, 647, 648, - 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, - 636, 500, 506, 501, 502, 503, 504, 505, 0, 507, - 1375, 1280, 0, 1289, 1290, 1384, 583, 584, 659, 380, - 480, 593, 333, 345, 348, 338, 357, 0, 358, 334, - 335, 340, 342, 343, 344, 349, 350, 354, 360, 248, - 209, 386, 394, 570, 310, 215, 216, 217, 516, 517, - 518, 519, 607, 608, 612, 204, 457, 458, 459, 460, - 291, 602, 307, 463, 462, 329, 330, 375, 444, 532, - 534, 545, 549, 551, 553, 559, 562, 533, 535, 546, - 550, 552, 554, 560, 563, 522, 524, 526, 528, 541, - 540, 537, 565, 566, 543, 548, 527, 539, 544, 557, - 564, 561, 521, 525, 529, 538, 556, 555, 536, 547, - 558, 542, 530, 523, 531, 1346, 196, 220, 364, 1408, - 449, 287, 637, 606, 601, 205, 222, 1283, 261, 1295, - 1303, 0, 1309, 1317, 1318, 1330, 1333, 1334, 1335, 1336, - 1354, 1355, 1357, 1365, 1367, 1370, 1372, 1379, 1391, 1411, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1932, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 668, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 393, 0, 0, 0, 0, 1405, 1391, 523, 0, 1333, + 1408, 1302, 1321, 1418, 1324, 1327, 1370, 1280, 1348, 413, + 1318, 1273, 1306, 1275, 1313, 1276, 1304, 1335, 269, 1301, + 1393, 1352, 1407, 363, 266, 1282, 1307, 427, 1323, 203, + 1372, 484, 251, 374, 371, 578, 281, 272, 268, 249, + 316, 382, 425, 513, 419, 1414, 367, 1358, 0, 494, + 398, 0, 0, 1507, 1337, 1397, 1346, 1384, 1332, 1371, + 1290, 1357, 1409, 1319, 1367, 1410, 322, 247, 324, 202, + 410, 495, 285, 0, 0, 0, 0, 4059, 946, 0, + 0, 0, 4057, 4060, 0, 0, 0, 0, 237, 0, + 0, 244, 0, 0, 0, 348, 357, 356, 337, 338, + 340, 342, 347, 354, 360, 1315, 1364, 602, 1404, 1316, + 1366, 264, 320, 271, 263, 575, 1415, 1396, 1279, 1345, + 1403, 1340, 1932, 0, 228, 1406, 1339, 0, 1369, 0, + 1421, 1274, 1360, 0, 1277, 1281, 1417, 1401, 1310, 274, + 0, 0, 0, 0, 0, 0, 0, 1336, 1347, 1381, + 1385, 1330, 0, 0, 0, 0, 0, 0, 0, 0, + 1308, 0, 1356, 0, 0, 0, 1286, 1278, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1985, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1334, 0, 0, 0, 0, 1289, 0, 1309, 1382, 0, + 1272, 296, 1283, 399, 256, 0, 450, 1389, 1400, 1331, + 620, 1402, 1329, 1328, 1376, 1287, 1395, 1322, 362, 1285, + 329, 197, 224, 0, 1320, 409, 458, 470, 1394, 1305, + 1314, 252, 1312, 468, 423, 597, 232, 283, 455, 429, + 466, 437, 286, 1355, 1374, 467, 369, 580, 447, 594, + 621, 622, 262, 403, 607, 517, 615, 639, 225, 259, + 417, 502, 600, 491, 394, 576, 577, 328, 490, 294, + 201, 366, 627, 223, 476, 368, 241, 230, 582, 604, + 298, 288, 453, 634, 212, 512, 592, 238, 480, 0, + 0, 642, 246, 501, 214, 589, 500, 390, 325, 326, + 213, 0, 454, 267, 292, 0, 0, 257, 412, 584, + 585, 255, 643, 227, 614, 219, 1284, 613, 405, 579, + 590, 391, 380, 218, 588, 389, 379, 333, 352, 353, + 279, 306, 444, 372, 445, 305, 307, 401, 400, 402, + 206, 601, 0, 207, 0, 496, 603, 644, 449, 211, + 233, 234, 236, 1300, 278, 282, 290, 293, 302, 303, + 312, 364, 416, 443, 439, 448, 1390, 574, 595, 608, + 619, 625, 626, 628, 629, 630, 631, 632, 635, 633, + 404, 310, 492, 332, 370, 1379, 1420, 422, 469, 239, + 599, 493, 199, 1294, 1299, 1292, 0, 253, 254, 1361, + 570, 1295, 1293, 1350, 1351, 1296, 1411, 1412, 1413, 1398, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 662, 640, 503, + 509, 504, 505, 506, 507, 508, 0, 510, 1383, 1288, + 0, 1297, 1298, 395, 1392, 586, 587, 663, 381, 483, + 596, 334, 346, 349, 339, 358, 0, 359, 335, 336, + 341, 343, 344, 345, 350, 351, 355, 361, 248, 209, + 387, 396, 573, 311, 215, 216, 217, 519, 520, 521, + 522, 611, 612, 616, 204, 459, 460, 461, 462, 291, + 606, 308, 465, 464, 330, 331, 376, 446, 535, 537, + 548, 552, 554, 556, 562, 565, 536, 538, 549, 553, + 555, 557, 563, 566, 525, 527, 529, 531, 544, 543, + 540, 568, 569, 546, 551, 530, 542, 547, 560, 567, + 564, 524, 528, 532, 541, 559, 558, 539, 550, 561, + 545, 533, 526, 534, 1354, 196, 220, 365, 1416, 451, + 287, 641, 610, 481, 605, 205, 222, 1291, 261, 1303, + 1311, 0, 1317, 1325, 1326, 1338, 1341, 1342, 1343, 1344, + 1362, 1363, 1365, 1373, 1375, 1378, 1380, 1387, 1399, 1419, 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, - 284, 297, 308, 316, 317, 320, 326, 376, 382, 383, - 384, 385, 404, 405, 406, 409, 412, 413, 416, 418, - 419, 422, 426, 430, 431, 432, 434, 436, 438, 450, - 455, 469, 470, 471, 472, 473, 476, 477, 482, 483, - 484, 485, 486, 494, 495, 508, 578, 580, 595, 613, - 619, 475, 299, 300, 439, 440, 312, 313, 633, 634, - 298, 590, 620, 588, 632, 614, 433, 374, 1345, 1351, - 377, 280, 303, 318, 1360, 605, 496, 226, 461, 289, - 250, 1378, 1380, 210, 245, 229, 258, 273, 276, 322, - 387, 395, 424, 429, 295, 270, 243, 454, 240, 479, - 511, 512, 513, 515, 391, 265, 428, 1341, 1369, 372, - 568, 569, 314, 392, 0, 0, 0, 1397, 1383, 520, - 0, 1325, 1400, 1294, 1313, 1410, 1316, 1319, 1362, 1272, - 1340, 411, 1310, 1265, 1298, 1267, 1305, 1268, 1296, 1327, - 269, 1293, 1385, 1344, 1399, 362, 266, 1274, 1299, 425, - 1315, 203, 1364, 481, 251, 373, 370, 575, 281, 272, - 268, 249, 315, 381, 423, 510, 417, 1406, 366, 1350, - 0, 491, 396, 0, 0, 0, 1329, 1389, 1338, 1376, - 1324, 1363, 1282, 1349, 1401, 1311, 1359, 1402, 321, 247, - 323, 202, 408, 492, 285, 0, 0, 0, 0, 0, - 941, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 237, 0, 0, 244, 0, 0, 0, 347, 356, 355, - 336, 337, 339, 341, 346, 353, 359, 1307, 1356, 1396, - 1308, 1358, 264, 319, 271, 263, 572, 1407, 1388, 1271, - 1337, 1395, 1332, 0, 0, 228, 1398, 1331, 0, 1361, - 0, 1413, 1266, 1352, 0, 1269, 1273, 1409, 1393, 1302, - 274, 0, 0, 0, 0, 0, 0, 0, 1328, 1339, - 1373, 1377, 1322, 0, 0, 0, 0, 0, 0, 0, - 0, 1300, 0, 1348, 0, 0, 0, 1278, 1270, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1326, 0, 0, 0, 0, 1281, 0, 1301, 1374, - 0, 1264, 296, 1275, 397, 256, 0, 448, 1381, 1392, - 1323, 616, 1394, 1321, 1320, 1368, 1279, 1387, 1314, 361, - 1277, 328, 197, 224, 0, 1312, 407, 456, 468, 1386, - 1297, 1306, 252, 1304, 466, 421, 594, 232, 283, 453, - 427, 464, 435, 286, 1347, 1366, 465, 368, 577, 445, - 591, 617, 618, 262, 401, 603, 514, 611, 635, 225, - 259, 415, 499, 597, 488, 393, 573, 574, 327, 487, - 294, 201, 365, 623, 223, 474, 367, 241, 230, 579, - 600, 288, 451, 630, 212, 509, 589, 238, 478, 0, - 0, 638, 246, 498, 214, 586, 497, 389, 324, 325, - 213, 0, 452, 267, 292, 0, 0, 257, 410, 581, - 582, 255, 639, 227, 610, 219, 1276, 609, 403, 576, - 587, 390, 379, 218, 585, 388, 378, 332, 351, 352, - 279, 305, 442, 371, 443, 304, 306, 399, 398, 400, - 206, 598, 0, 207, 0, 493, 599, 640, 447, 211, - 233, 234, 236, 1292, 278, 282, 290, 293, 301, 302, - 311, 363, 414, 441, 437, 446, 1382, 571, 592, 604, - 615, 621, 622, 624, 625, 626, 627, 628, 631, 629, - 402, 309, 489, 331, 369, 1371, 1412, 420, 467, 239, - 596, 490, 199, 1286, 1291, 1284, 0, 253, 254, 1353, - 567, 1287, 1285, 1342, 1343, 1288, 1403, 1404, 1405, 1390, - 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, 636, 500, - 506, 501, 502, 503, 504, 505, 0, 507, 1375, 1280, - 0, 1289, 1290, 1384, 583, 584, 659, 380, 480, 593, - 333, 345, 348, 338, 357, 0, 358, 334, 335, 340, - 342, 343, 344, 349, 350, 354, 360, 248, 209, 386, - 394, 570, 310, 215, 216, 217, 516, 517, 518, 519, - 607, 608, 612, 204, 457, 458, 459, 460, 291, 602, - 307, 463, 462, 329, 330, 375, 444, 532, 534, 545, - 549, 551, 553, 559, 562, 533, 535, 546, 550, 552, - 554, 560, 563, 522, 524, 526, 528, 541, 540, 537, - 565, 566, 543, 548, 527, 539, 544, 557, 564, 561, - 521, 525, 529, 538, 556, 555, 536, 547, 558, 542, - 530, 523, 531, 1346, 196, 220, 364, 1408, 449, 287, - 637, 606, 601, 205, 222, 1283, 261, 1295, 1303, 0, - 1309, 1317, 1318, 1330, 1333, 1334, 1335, 1336, 1354, 1355, - 1357, 1365, 1367, 1370, 1372, 1379, 1391, 1411, 198, 200, + 284, 297, 309, 317, 318, 321, 327, 377, 383, 384, + 385, 386, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 485, 486, + 487, 488, 489, 497, 498, 511, 581, 583, 598, 617, + 623, 477, 300, 301, 441, 442, 313, 314, 637, 638, + 299, 593, 624, 591, 636, 618, 435, 375, 1353, 1359, + 378, 280, 304, 319, 1368, 609, 499, 226, 463, 289, + 250, 1386, 1388, 210, 245, 229, 258, 273, 276, 323, + 388, 397, 426, 431, 295, 270, 243, 456, 240, 482, + 514, 515, 516, 518, 392, 265, 430, 1349, 1377, 373, + 571, 572, 315, 393, 0, 0, 0, 0, 1405, 1391, + 523, 0, 1333, 1408, 1302, 1321, 1418, 1324, 1327, 1370, + 1280, 1348, 413, 1318, 1273, 1306, 1275, 1313, 1276, 1304, + 1335, 269, 1301, 1393, 1352, 1407, 363, 266, 1282, 1307, + 427, 1323, 203, 1372, 484, 251, 374, 371, 578, 281, + 272, 268, 249, 316, 382, 425, 513, 419, 1414, 367, + 1358, 0, 494, 398, 0, 0, 0, 1337, 1397, 1346, + 1384, 1332, 1371, 1290, 1357, 1409, 1319, 1367, 1410, 322, + 247, 324, 202, 410, 495, 285, 0, 0, 0, 0, + 0, 194, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 348, 357, + 356, 337, 338, 340, 342, 347, 354, 360, 1315, 1364, + 602, 1404, 1316, 1366, 264, 320, 271, 263, 575, 1415, + 1396, 1279, 1345, 1403, 1340, 0, 0, 228, 1406, 1339, + 0, 1369, 0, 1421, 1274, 1360, 0, 1277, 1281, 1417, + 1401, 1310, 274, 0, 0, 0, 0, 0, 0, 0, + 1336, 1347, 1381, 1385, 1330, 0, 0, 0, 0, 0, + 0, 3207, 0, 1308, 0, 1356, 0, 0, 0, 1286, + 1278, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1334, 0, 0, 0, 0, 1289, 0, + 1309, 1382, 0, 1272, 296, 1283, 399, 256, 0, 450, + 1389, 1400, 1331, 620, 1402, 1329, 1328, 1376, 1287, 1395, + 1322, 362, 1285, 329, 197, 224, 0, 1320, 409, 458, + 470, 1394, 1305, 1314, 252, 1312, 468, 423, 597, 232, + 283, 455, 429, 466, 437, 286, 1355, 1374, 467, 369, + 580, 447, 594, 621, 622, 262, 403, 607, 517, 615, + 639, 225, 259, 417, 502, 600, 491, 394, 576, 577, + 328, 490, 294, 201, 366, 627, 223, 476, 368, 241, + 230, 582, 604, 298, 288, 453, 634, 212, 512, 592, + 238, 480, 0, 0, 642, 246, 501, 214, 589, 500, + 390, 325, 326, 213, 0, 454, 267, 292, 0, 0, + 257, 412, 584, 585, 255, 643, 227, 614, 219, 1284, + 613, 405, 579, 590, 391, 380, 218, 588, 389, 379, + 333, 352, 353, 279, 306, 444, 372, 445, 305, 307, + 401, 400, 402, 206, 601, 0, 207, 0, 496, 603, + 644, 449, 211, 233, 234, 236, 1300, 278, 282, 290, + 293, 302, 303, 312, 364, 416, 443, 439, 448, 1390, + 574, 595, 608, 619, 625, 626, 628, 629, 630, 631, + 632, 635, 633, 404, 310, 492, 332, 370, 1379, 1420, + 422, 469, 239, 599, 493, 199, 1294, 1299, 1292, 0, + 253, 254, 1361, 570, 1295, 1293, 1350, 1351, 1296, 1411, + 1412, 1413, 1398, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, + 662, 640, 503, 509, 504, 505, 506, 507, 508, 0, + 510, 1383, 1288, 0, 1297, 1298, 395, 1392, 586, 587, + 663, 381, 483, 596, 334, 346, 349, 339, 358, 0, + 359, 335, 336, 341, 343, 344, 345, 350, 351, 355, + 361, 248, 209, 387, 396, 573, 311, 215, 216, 217, + 519, 520, 521, 522, 611, 612, 616, 204, 459, 460, + 461, 462, 291, 606, 308, 465, 464, 330, 331, 376, + 446, 535, 537, 548, 552, 554, 556, 562, 565, 536, + 538, 549, 553, 555, 557, 563, 566, 525, 527, 529, + 531, 544, 543, 540, 568, 569, 546, 551, 530, 542, + 547, 560, 567, 564, 524, 528, 532, 541, 559, 558, + 539, 550, 561, 545, 533, 526, 534, 1354, 196, 220, + 365, 1416, 451, 287, 641, 610, 481, 605, 205, 222, + 1291, 261, 1303, 1311, 0, 1317, 1325, 1326, 1338, 1341, + 1342, 1343, 1344, 1362, 1363, 1365, 1373, 1375, 1378, 1380, + 1387, 1399, 1419, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 309, 317, 318, 321, 327, + 377, 383, 384, 385, 386, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 485, 486, 487, 488, 489, 497, 498, 511, 581, + 583, 598, 617, 623, 477, 300, 301, 441, 442, 313, + 314, 637, 638, 299, 593, 624, 591, 636, 618, 435, + 375, 1353, 1359, 378, 280, 304, 319, 1368, 609, 499, + 226, 463, 289, 250, 1386, 1388, 210, 245, 229, 258, + 273, 276, 323, 388, 397, 426, 431, 295, 270, 243, + 456, 240, 482, 514, 515, 516, 518, 392, 265, 430, + 1349, 1377, 373, 571, 572, 315, 393, 0, 0, 0, + 0, 1405, 1391, 523, 0, 1333, 1408, 1302, 1321, 1418, + 1324, 1327, 1370, 1280, 1348, 413, 1318, 1273, 1306, 1275, + 1313, 1276, 1304, 1335, 269, 1301, 1393, 1352, 1407, 363, + 266, 1282, 1307, 427, 1323, 203, 1372, 484, 251, 374, + 371, 578, 281, 272, 268, 249, 316, 382, 425, 513, + 419, 1414, 367, 1358, 0, 494, 398, 0, 0, 0, + 1337, 1397, 1346, 1384, 1332, 1371, 1290, 1357, 1409, 1319, + 1367, 1410, 322, 247, 324, 202, 410, 495, 285, 0, + 0, 0, 0, 0, 713, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, + 0, 348, 357, 356, 337, 338, 340, 342, 347, 354, + 360, 1315, 1364, 602, 1404, 1316, 1366, 264, 320, 271, + 263, 575, 1415, 1396, 1279, 1345, 1403, 1340, 0, 0, + 228, 1406, 1339, 0, 1369, 0, 1421, 1274, 1360, 0, + 1277, 1281, 1417, 1401, 1310, 274, 0, 0, 0, 0, + 0, 0, 0, 1336, 1347, 1381, 1385, 1330, 0, 0, + 0, 0, 0, 0, 3168, 0, 1308, 0, 1356, 0, + 0, 0, 1286, 1278, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1334, 0, 0, 0, + 0, 1289, 0, 1309, 1382, 0, 1272, 296, 1283, 399, + 256, 0, 450, 1389, 1400, 1331, 620, 1402, 1329, 1328, + 1376, 1287, 1395, 1322, 362, 1285, 329, 197, 224, 0, + 1320, 409, 458, 470, 1394, 1305, 1314, 252, 1312, 468, + 423, 597, 232, 283, 455, 429, 466, 437, 286, 1355, + 1374, 467, 369, 580, 447, 594, 621, 622, 262, 403, + 607, 517, 615, 639, 225, 259, 417, 502, 600, 491, + 394, 576, 577, 328, 490, 294, 201, 366, 627, 223, + 476, 368, 241, 230, 582, 604, 298, 288, 453, 634, + 212, 512, 592, 238, 480, 0, 0, 642, 246, 501, + 214, 589, 500, 390, 325, 326, 213, 0, 454, 267, + 292, 0, 0, 257, 412, 584, 585, 255, 643, 227, + 614, 219, 1284, 613, 405, 579, 590, 391, 380, 218, + 588, 389, 379, 333, 352, 353, 279, 306, 444, 372, + 445, 305, 307, 401, 400, 402, 206, 601, 0, 207, + 0, 496, 603, 644, 449, 211, 233, 234, 236, 1300, + 278, 282, 290, 293, 302, 303, 312, 364, 416, 443, + 439, 448, 1390, 574, 595, 608, 619, 625, 626, 628, + 629, 630, 631, 632, 635, 633, 404, 310, 492, 332, + 370, 1379, 1420, 422, 469, 239, 599, 493, 199, 1294, + 1299, 1292, 0, 253, 254, 1361, 570, 1295, 1293, 1350, + 1351, 1296, 1411, 1412, 1413, 1398, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 662, 640, 503, 509, 504, 505, 506, + 507, 508, 0, 510, 1383, 1288, 0, 1297, 1298, 395, + 1392, 586, 587, 663, 381, 483, 596, 334, 346, 349, + 339, 358, 0, 359, 335, 336, 341, 343, 344, 345, + 350, 351, 355, 361, 248, 209, 387, 396, 573, 311, + 215, 216, 217, 519, 520, 521, 522, 611, 612, 616, + 204, 459, 460, 461, 462, 291, 606, 308, 465, 464, + 330, 331, 376, 446, 535, 537, 548, 552, 554, 556, + 562, 565, 536, 538, 549, 553, 555, 557, 563, 566, + 525, 527, 529, 531, 544, 543, 540, 568, 569, 546, + 551, 530, 542, 547, 560, 567, 564, 524, 528, 532, + 541, 559, 558, 539, 550, 561, 545, 533, 526, 534, + 1354, 196, 220, 365, 1416, 451, 287, 641, 610, 481, + 605, 205, 222, 1291, 261, 1303, 1311, 0, 1317, 1325, + 1326, 1338, 1341, 1342, 1343, 1344, 1362, 1363, 1365, 1373, + 1375, 1378, 1380, 1387, 1399, 1419, 198, 200, 208, 221, + 231, 235, 242, 260, 275, 277, 284, 297, 309, 317, + 318, 321, 327, 377, 383, 384, 385, 386, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 485, 486, 487, 488, 489, 497, + 498, 511, 581, 583, 598, 617, 623, 477, 300, 301, + 441, 442, 313, 314, 637, 638, 299, 593, 624, 591, + 636, 618, 435, 375, 1353, 1359, 378, 280, 304, 319, + 1368, 609, 499, 226, 463, 289, 250, 1386, 1388, 210, + 245, 229, 258, 273, 276, 323, 388, 397, 426, 431, + 295, 270, 243, 456, 240, 482, 514, 515, 516, 518, + 392, 265, 430, 1349, 1377, 373, 571, 572, 315, 393, + 0, 0, 0, 0, 1405, 1391, 523, 0, 1333, 1408, + 1302, 1321, 1418, 1324, 1327, 1370, 1280, 1348, 413, 1318, + 1273, 1306, 1275, 1313, 1276, 1304, 1335, 269, 1301, 1393, + 1352, 1407, 363, 266, 1282, 1307, 427, 1323, 203, 1372, + 484, 251, 374, 371, 578, 281, 272, 268, 249, 316, + 382, 425, 513, 419, 1414, 367, 1358, 0, 494, 398, + 0, 0, 0, 1337, 1397, 1346, 1384, 1332, 1371, 1290, + 1357, 1409, 1319, 1367, 1410, 322, 247, 324, 202, 410, + 495, 285, 0, 0, 0, 0, 0, 946, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 348, 357, 356, 337, 338, 340, + 342, 347, 354, 360, 1315, 1364, 602, 1404, 1316, 1366, + 264, 320, 271, 263, 575, 1415, 1396, 1279, 1345, 1403, + 1340, 0, 0, 228, 1406, 1339, 0, 1369, 0, 1421, + 1274, 1360, 0, 1277, 1281, 1417, 1401, 1310, 274, 0, + 0, 0, 0, 0, 0, 0, 1336, 1347, 1381, 1385, + 1330, 0, 0, 0, 0, 0, 0, 2373, 0, 1308, + 0, 1356, 0, 0, 0, 1286, 1278, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1334, + 0, 0, 0, 0, 1289, 0, 1309, 1382, 0, 1272, + 296, 1283, 399, 256, 0, 450, 1389, 1400, 1331, 620, + 1402, 1329, 1328, 1376, 1287, 1395, 1322, 362, 1285, 329, + 197, 224, 0, 1320, 409, 458, 470, 1394, 1305, 1314, + 252, 1312, 468, 423, 597, 232, 283, 455, 429, 466, + 437, 286, 1355, 1374, 467, 369, 580, 447, 594, 621, + 622, 262, 403, 607, 517, 615, 639, 225, 259, 417, + 502, 600, 491, 394, 576, 577, 328, 490, 294, 201, + 366, 627, 223, 476, 368, 241, 230, 582, 604, 298, + 288, 453, 634, 212, 512, 592, 238, 480, 0, 0, + 642, 246, 501, 214, 589, 500, 390, 325, 326, 213, + 0, 454, 267, 292, 0, 0, 257, 412, 584, 585, + 255, 643, 227, 614, 219, 1284, 613, 405, 579, 590, + 391, 380, 218, 588, 389, 379, 333, 352, 353, 279, + 306, 444, 372, 445, 305, 307, 401, 400, 402, 206, + 601, 0, 207, 0, 496, 603, 644, 449, 211, 233, + 234, 236, 1300, 278, 282, 290, 293, 302, 303, 312, + 364, 416, 443, 439, 448, 1390, 574, 595, 608, 619, + 625, 626, 628, 629, 630, 631, 632, 635, 633, 404, + 310, 492, 332, 370, 1379, 1420, 422, 469, 239, 599, + 493, 199, 1294, 1299, 1292, 0, 253, 254, 1361, 570, + 1295, 1293, 1350, 1351, 1296, 1411, 1412, 1413, 1398, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 661, 662, 640, 503, 509, + 504, 505, 506, 507, 508, 0, 510, 1383, 1288, 0, + 1297, 1298, 395, 1392, 586, 587, 663, 381, 483, 596, + 334, 346, 349, 339, 358, 0, 359, 335, 336, 341, + 343, 344, 345, 350, 351, 355, 361, 248, 209, 387, + 396, 573, 311, 215, 216, 217, 519, 520, 521, 522, + 611, 612, 616, 204, 459, 460, 461, 462, 291, 606, + 308, 465, 464, 330, 331, 376, 446, 535, 537, 548, + 552, 554, 556, 562, 565, 536, 538, 549, 553, 555, + 557, 563, 566, 525, 527, 529, 531, 544, 543, 540, + 568, 569, 546, 551, 530, 542, 547, 560, 567, 564, + 524, 528, 532, 541, 559, 558, 539, 550, 561, 545, + 533, 526, 534, 1354, 196, 220, 365, 1416, 451, 287, + 641, 610, 481, 605, 205, 222, 1291, 261, 1303, 1311, + 0, 1317, 1325, 1326, 1338, 1341, 1342, 1343, 1344, 1362, + 1363, 1365, 1373, 1375, 1378, 1380, 1387, 1399, 1419, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 309, 317, 318, 321, 327, 377, 383, 384, 385, + 386, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 485, 486, 487, + 488, 489, 497, 498, 511, 581, 583, 598, 617, 623, + 477, 300, 301, 441, 442, 313, 314, 637, 638, 299, + 593, 624, 591, 636, 618, 435, 375, 1353, 1359, 378, + 280, 304, 319, 1368, 609, 499, 226, 463, 289, 250, + 1386, 1388, 210, 245, 229, 258, 273, 276, 323, 388, + 397, 426, 431, 295, 270, 243, 456, 240, 482, 514, + 515, 516, 518, 392, 265, 430, 1349, 1377, 373, 571, + 572, 315, 393, 0, 0, 0, 0, 1405, 1391, 523, + 0, 1333, 1408, 1302, 1321, 1418, 1324, 1327, 1370, 1280, + 1348, 413, 1318, 1273, 1306, 1275, 1313, 1276, 1304, 1335, + 269, 1301, 1393, 1352, 1407, 363, 266, 1282, 1307, 427, + 1323, 203, 1372, 484, 251, 374, 371, 578, 281, 272, + 268, 249, 316, 382, 425, 513, 419, 1414, 367, 1358, + 0, 494, 398, 0, 0, 0, 1337, 1397, 1346, 1384, + 1332, 1371, 1290, 1357, 1409, 1319, 1367, 1410, 322, 247, + 324, 202, 410, 495, 285, 0, 95, 0, 0, 0, + 713, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 237, 0, 0, 244, 0, 0, 0, 348, 357, 356, + 337, 338, 340, 342, 347, 354, 360, 1315, 1364, 602, + 1404, 1316, 1366, 264, 320, 271, 263, 575, 1415, 1396, + 1279, 1345, 1403, 1340, 0, 0, 228, 1406, 1339, 0, + 1369, 0, 1421, 1274, 1360, 0, 1277, 1281, 1417, 1401, + 1310, 274, 0, 0, 0, 0, 0, 0, 0, 1336, + 1347, 1381, 1385, 1330, 0, 0, 0, 0, 0, 0, + 0, 0, 1308, 0, 1356, 0, 0, 0, 1286, 1278, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1334, 0, 0, 0, 0, 1289, 0, 1309, + 1382, 0, 1272, 296, 1283, 399, 256, 0, 450, 1389, + 1400, 1331, 620, 1402, 1329, 1328, 1376, 1287, 1395, 1322, + 362, 1285, 329, 197, 224, 0, 1320, 409, 458, 470, + 1394, 1305, 1314, 252, 1312, 468, 423, 597, 232, 283, + 455, 429, 466, 437, 286, 1355, 1374, 467, 369, 580, + 447, 594, 621, 622, 262, 403, 607, 517, 615, 639, + 225, 259, 417, 502, 600, 491, 394, 576, 577, 328, + 490, 294, 201, 366, 627, 223, 476, 368, 241, 230, + 582, 604, 298, 288, 453, 634, 212, 512, 592, 238, + 480, 0, 0, 642, 246, 501, 214, 589, 500, 390, + 325, 326, 213, 0, 454, 267, 292, 0, 0, 257, + 412, 584, 585, 255, 643, 227, 614, 219, 1284, 613, + 405, 579, 590, 391, 380, 218, 588, 389, 379, 333, + 352, 353, 279, 306, 444, 372, 445, 305, 307, 401, + 400, 402, 206, 601, 0, 207, 0, 496, 603, 644, + 449, 211, 233, 234, 236, 1300, 278, 282, 290, 293, + 302, 303, 312, 364, 416, 443, 439, 448, 1390, 574, + 595, 608, 619, 625, 626, 628, 629, 630, 631, 632, + 635, 633, 404, 310, 492, 332, 370, 1379, 1420, 422, + 469, 239, 599, 493, 199, 1294, 1299, 1292, 0, 253, + 254, 1361, 570, 1295, 1293, 1350, 1351, 1296, 1411, 1412, + 1413, 1398, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, + 640, 503, 509, 504, 505, 506, 507, 508, 0, 510, + 1383, 1288, 0, 1297, 1298, 395, 1392, 586, 587, 663, + 381, 483, 596, 334, 346, 349, 339, 358, 0, 359, + 335, 336, 341, 343, 344, 345, 350, 351, 355, 361, + 248, 209, 387, 396, 573, 311, 215, 216, 217, 519, + 520, 521, 522, 611, 612, 616, 204, 459, 460, 461, + 462, 291, 606, 308, 465, 464, 330, 331, 376, 446, + 535, 537, 548, 552, 554, 556, 562, 565, 536, 538, + 549, 553, 555, 557, 563, 566, 525, 527, 529, 531, + 544, 543, 540, 568, 569, 546, 551, 530, 542, 547, + 560, 567, 564, 524, 528, 532, 541, 559, 558, 539, + 550, 561, 545, 533, 526, 534, 1354, 196, 220, 365, + 1416, 451, 287, 641, 610, 481, 605, 205, 222, 1291, + 261, 1303, 1311, 0, 1317, 1325, 1326, 1338, 1341, 1342, + 1343, 1344, 1362, 1363, 1365, 1373, 1375, 1378, 1380, 1387, + 1399, 1419, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 309, 317, 318, 321, 327, 377, + 383, 384, 385, 386, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 485, 486, 487, 488, 489, 497, 498, 511, 581, 583, + 598, 617, 623, 477, 300, 301, 441, 442, 313, 314, + 637, 638, 299, 593, 624, 591, 636, 618, 435, 375, + 1353, 1359, 378, 280, 304, 319, 1368, 609, 499, 226, + 463, 289, 250, 1386, 1388, 210, 245, 229, 258, 273, + 276, 323, 388, 397, 426, 431, 295, 270, 243, 456, + 240, 482, 514, 515, 516, 518, 392, 265, 430, 1349, + 1377, 373, 571, 572, 315, 393, 0, 0, 0, 0, + 1405, 1391, 523, 0, 1333, 1408, 1302, 1321, 1418, 1324, + 1327, 1370, 1280, 1348, 413, 1318, 1273, 1306, 1275, 1313, + 1276, 1304, 1335, 269, 1301, 1393, 1352, 1407, 363, 266, + 1282, 1307, 427, 1323, 203, 1372, 484, 251, 374, 371, + 578, 281, 272, 268, 249, 316, 382, 425, 513, 419, + 1414, 367, 1358, 0, 494, 398, 0, 0, 0, 1337, + 1397, 1346, 1384, 1332, 1371, 1290, 1357, 1409, 1319, 1367, + 1410, 322, 247, 324, 202, 410, 495, 285, 0, 0, + 0, 0, 0, 194, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 348, 357, 356, 337, 338, 340, 342, 347, 354, 360, + 1315, 1364, 602, 1404, 1316, 1366, 264, 320, 271, 263, + 575, 1415, 1396, 1279, 1345, 1403, 1340, 0, 0, 228, + 1406, 1339, 0, 1369, 0, 1421, 1274, 1360, 0, 1277, + 1281, 1417, 1401, 1310, 274, 0, 0, 0, 0, 0, + 0, 0, 1336, 1347, 1381, 1385, 1330, 0, 0, 0, + 0, 0, 0, 0, 0, 1308, 0, 1356, 0, 0, + 0, 1286, 1278, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1334, 0, 0, 0, 0, + 1289, 0, 1309, 1382, 0, 1272, 296, 1283, 399, 256, + 0, 450, 1389, 1400, 1331, 620, 1402, 1329, 1328, 1376, + 1287, 1395, 1322, 362, 1285, 329, 197, 224, 0, 1320, + 409, 458, 470, 1394, 1305, 1314, 252, 1312, 468, 423, + 597, 232, 283, 455, 429, 466, 437, 286, 1355, 1374, + 467, 369, 580, 447, 594, 621, 622, 262, 403, 607, + 517, 615, 639, 225, 259, 417, 502, 600, 491, 394, + 576, 577, 328, 490, 294, 201, 366, 627, 223, 476, + 368, 241, 230, 582, 604, 298, 288, 453, 634, 212, + 512, 592, 238, 480, 0, 0, 642, 246, 501, 214, + 589, 500, 390, 325, 326, 213, 0, 454, 267, 292, + 0, 0, 257, 412, 584, 585, 255, 643, 227, 614, + 219, 1284, 613, 405, 579, 590, 391, 380, 218, 588, + 389, 379, 333, 352, 353, 279, 306, 444, 372, 445, + 305, 307, 401, 400, 402, 206, 601, 0, 207, 0, + 496, 603, 644, 449, 211, 233, 234, 236, 1300, 278, + 282, 290, 293, 302, 303, 312, 364, 416, 443, 439, + 448, 1390, 574, 595, 608, 619, 625, 626, 628, 629, + 630, 631, 632, 635, 633, 404, 310, 492, 332, 370, + 1379, 1420, 422, 469, 239, 599, 493, 199, 1294, 1299, + 1292, 0, 253, 254, 1361, 570, 1295, 1293, 1350, 1351, + 1296, 1411, 1412, 1413, 1398, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 661, 662, 640, 503, 509, 504, 505, 506, 507, + 508, 0, 510, 1383, 1288, 0, 1297, 1298, 395, 1392, + 586, 587, 663, 381, 483, 596, 334, 346, 349, 339, + 358, 0, 359, 335, 336, 341, 343, 344, 345, 350, + 351, 355, 361, 248, 209, 387, 396, 573, 311, 215, + 216, 217, 519, 520, 521, 522, 611, 612, 616, 204, + 459, 460, 461, 462, 291, 606, 308, 465, 464, 330, + 331, 376, 446, 535, 537, 548, 552, 554, 556, 562, + 565, 536, 538, 549, 553, 555, 557, 563, 566, 525, + 527, 529, 531, 544, 543, 540, 568, 569, 546, 551, + 530, 542, 547, 560, 567, 564, 524, 528, 532, 541, + 559, 558, 539, 550, 561, 545, 533, 526, 534, 1354, + 196, 220, 365, 1416, 451, 287, 641, 610, 481, 605, + 205, 222, 1291, 261, 1303, 1311, 0, 1317, 1325, 1326, + 1338, 1341, 1342, 1343, 1344, 1362, 1363, 1365, 1373, 1375, + 1378, 1380, 1387, 1399, 1419, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 309, 317, 318, + 321, 327, 377, 383, 384, 385, 386, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 485, 486, 487, 488, 489, 497, 498, + 511, 581, 583, 598, 617, 623, 477, 300, 301, 441, + 442, 313, 314, 637, 638, 299, 593, 624, 591, 636, + 618, 435, 375, 1353, 1359, 378, 280, 304, 319, 1368, + 609, 499, 226, 463, 289, 250, 1386, 1388, 210, 245, + 229, 258, 273, 276, 323, 388, 397, 426, 431, 295, + 270, 243, 456, 240, 482, 514, 515, 516, 518, 392, + 265, 430, 1349, 1377, 373, 571, 572, 315, 393, 0, + 0, 0, 0, 1405, 1391, 523, 0, 1333, 1408, 1302, + 1321, 1418, 1324, 1327, 1370, 1280, 1348, 413, 1318, 1273, + 1306, 1275, 1313, 1276, 1304, 1335, 269, 1301, 1393, 1352, + 1407, 363, 266, 1282, 1307, 427, 1323, 203, 1372, 484, + 251, 374, 371, 578, 281, 272, 268, 249, 316, 382, + 425, 513, 419, 1414, 367, 1358, 0, 494, 398, 0, + 0, 0, 1337, 1397, 1346, 1384, 1332, 1371, 1290, 1357, + 1409, 1319, 1367, 1410, 322, 247, 324, 202, 410, 495, + 285, 0, 0, 0, 0, 0, 713, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 348, 357, 356, 337, 338, 340, 342, + 347, 354, 360, 1315, 1364, 602, 1404, 1316, 1366, 264, + 320, 271, 263, 575, 1415, 1396, 1279, 1345, 1403, 1340, + 0, 0, 228, 1406, 1339, 0, 1369, 0, 1421, 1274, + 1360, 0, 1277, 1281, 1417, 1401, 1310, 274, 0, 0, + 0, 0, 0, 0, 0, 1336, 1347, 1381, 1385, 1330, + 0, 0, 0, 0, 0, 0, 0, 0, 1308, 0, + 1356, 0, 0, 0, 1286, 1278, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1334, 0, + 0, 0, 0, 1289, 0, 1309, 1382, 0, 1272, 296, + 1283, 399, 256, 0, 450, 1389, 1400, 1331, 620, 1402, + 1329, 1328, 1376, 1287, 1395, 1322, 362, 1285, 329, 197, + 224, 0, 1320, 409, 458, 470, 1394, 1305, 1314, 252, + 1312, 468, 423, 597, 232, 283, 455, 429, 466, 437, + 286, 1355, 1374, 467, 369, 580, 447, 594, 621, 622, + 262, 403, 607, 517, 615, 639, 225, 259, 417, 502, + 600, 491, 394, 576, 577, 328, 490, 294, 201, 366, + 627, 223, 476, 368, 241, 230, 582, 604, 298, 288, + 453, 634, 212, 512, 592, 238, 480, 0, 0, 642, + 246, 501, 214, 589, 500, 390, 325, 326, 213, 0, + 454, 267, 292, 0, 0, 257, 412, 584, 585, 255, + 643, 227, 614, 219, 1284, 613, 405, 579, 590, 391, + 380, 218, 588, 389, 379, 333, 352, 353, 279, 306, + 444, 372, 445, 305, 307, 401, 400, 402, 206, 601, + 0, 207, 0, 496, 603, 644, 449, 211, 233, 234, + 236, 1300, 278, 282, 290, 293, 302, 303, 312, 364, + 416, 443, 439, 448, 1390, 574, 595, 608, 619, 625, + 626, 628, 629, 630, 631, 632, 635, 633, 404, 310, + 492, 332, 370, 1379, 1420, 422, 469, 239, 599, 493, + 199, 1294, 1299, 1292, 0, 253, 254, 1361, 570, 1295, + 1293, 1350, 1351, 1296, 1411, 1412, 1413, 1398, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 662, 640, 503, 509, 504, + 505, 506, 507, 508, 0, 510, 1383, 1288, 0, 1297, + 1298, 395, 1392, 586, 587, 663, 381, 483, 596, 334, + 346, 349, 339, 358, 0, 359, 335, 336, 341, 343, + 344, 345, 350, 351, 355, 361, 248, 209, 387, 396, + 573, 311, 215, 216, 217, 519, 520, 521, 522, 611, + 612, 616, 204, 459, 460, 461, 462, 291, 606, 308, + 465, 464, 330, 331, 376, 446, 535, 537, 548, 552, + 554, 556, 562, 565, 536, 538, 549, 553, 555, 557, + 563, 566, 525, 527, 529, 531, 544, 543, 540, 568, + 569, 546, 551, 530, 542, 547, 560, 567, 564, 524, + 528, 532, 541, 559, 558, 539, 550, 561, 545, 533, + 526, 534, 1354, 196, 220, 365, 1416, 451, 287, 641, + 610, 481, 605, 205, 222, 1291, 261, 1303, 1311, 0, + 1317, 1325, 1326, 1338, 1341, 1342, 1343, 1344, 1362, 1363, + 1365, 1373, 1375, 1378, 1380, 1387, 1399, 1419, 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, - 308, 316, 317, 320, 326, 376, 382, 383, 384, 385, - 404, 405, 406, 409, 412, 413, 416, 418, 419, 422, - 426, 430, 431, 432, 434, 436, 438, 450, 455, 469, - 470, 471, 472, 473, 476, 477, 482, 483, 484, 485, - 486, 494, 495, 508, 578, 580, 595, 613, 619, 475, - 299, 300, 439, 440, 312, 313, 633, 634, 298, 590, - 620, 588, 632, 614, 433, 374, 1345, 1351, 377, 280, - 303, 318, 1360, 605, 496, 226, 461, 289, 250, 1378, - 1380, 210, 245, 229, 258, 273, 276, 322, 387, 395, - 424, 429, 295, 270, 243, 454, 240, 479, 511, 512, - 513, 515, 391, 265, 428, 1341, 1369, 372, 568, 569, - 314, 392, 0, 0, 0, 0, 0, 520, 0, 761, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 411, - 0, 0, 0, 0, 749, 0, 0, 0, 269, 754, - 0, 0, 0, 362, 266, 0, 0, 425, 0, 203, - 0, 481, 251, 373, 370, 575, 281, 272, 268, 249, - 315, 381, 423, 510, 417, 760, 366, 0, 0, 491, - 396, 0, 0, 0, 0, 0, 756, 757, 0, 0, - 0, 0, 0, 0, 0, 0, 321, 247, 323, 202, - 408, 492, 285, 0, 95, 0, 0, 957, 941, 733, - 907, 945, 958, 959, 960, 961, 946, 0, 237, 947, - 948, 244, 949, 0, 906, 791, 793, 792, 856, 857, - 858, 859, 860, 861, 862, 789, 954, 962, 963, 0, - 264, 319, 271, 263, 572, 0, 0, 2176, 2177, 2178, - 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, - 0, 729, 746, 0, 759, 0, 0, 0, 274, 0, + 309, 317, 318, 321, 327, 377, 383, 384, 385, 386, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 485, 486, 487, 488, + 489, 497, 498, 511, 581, 583, 598, 617, 623, 477, + 300, 301, 441, 442, 313, 314, 637, 638, 299, 593, + 624, 591, 636, 618, 435, 375, 1353, 1359, 378, 280, + 304, 319, 1368, 609, 499, 226, 463, 289, 250, 1386, + 1388, 210, 245, 229, 258, 273, 276, 323, 388, 397, + 426, 431, 295, 270, 243, 456, 240, 482, 514, 515, + 516, 518, 392, 265, 430, 1349, 1377, 373, 571, 572, + 315, 393, 0, 0, 0, 0, 1405, 1391, 523, 0, + 1333, 1408, 1302, 1321, 1418, 1324, 1327, 1370, 1280, 1348, + 413, 1318, 1273, 1306, 1275, 1313, 1276, 1304, 1335, 269, + 1301, 1393, 1352, 1407, 363, 266, 1282, 1307, 427, 1323, + 203, 1372, 484, 251, 374, 371, 578, 281, 272, 268, + 249, 316, 382, 425, 513, 419, 1414, 367, 1358, 0, + 494, 398, 0, 0, 0, 1337, 1397, 1346, 1384, 1332, + 1371, 1290, 1357, 1409, 1319, 1367, 1410, 322, 247, 324, + 202, 410, 495, 285, 0, 0, 0, 0, 0, 946, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 348, 357, 356, 337, + 338, 340, 342, 347, 354, 360, 1315, 1364, 602, 1404, + 1316, 1366, 264, 320, 271, 263, 575, 1415, 1396, 1279, + 1345, 1403, 1340, 0, 0, 228, 1406, 1339, 0, 1369, + 0, 1421, 1274, 1360, 0, 1277, 1281, 1417, 1401, 1310, + 274, 0, 0, 0, 0, 0, 0, 0, 1336, 1347, + 1381, 1385, 1330, 0, 0, 0, 0, 0, 0, 0, + 0, 1308, 0, 1356, 0, 0, 0, 1286, 1278, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1334, 0, 0, 0, 0, 1289, 0, 1309, 1382, + 0, 1272, 296, 1283, 399, 256, 0, 450, 1389, 1400, + 1331, 620, 1402, 1329, 1328, 1376, 1287, 1395, 1322, 362, + 1285, 329, 197, 224, 0, 1320, 409, 458, 470, 1394, + 1305, 1314, 252, 1312, 468, 423, 597, 232, 283, 455, + 429, 466, 437, 286, 1355, 1374, 467, 369, 580, 447, + 594, 621, 622, 262, 403, 607, 517, 615, 639, 225, + 259, 417, 502, 600, 491, 394, 576, 577, 328, 490, + 294, 201, 366, 627, 223, 476, 368, 241, 230, 582, + 604, 298, 288, 453, 634, 212, 512, 592, 238, 480, + 0, 0, 642, 246, 501, 214, 589, 500, 390, 325, + 326, 213, 0, 454, 267, 292, 0, 0, 257, 412, + 584, 585, 255, 643, 227, 614, 219, 1284, 613, 405, + 579, 590, 391, 380, 218, 588, 389, 379, 333, 352, + 353, 279, 306, 444, 372, 445, 305, 307, 401, 400, + 402, 206, 601, 0, 207, 0, 496, 603, 644, 449, + 211, 233, 234, 236, 1300, 278, 282, 290, 293, 302, + 303, 312, 364, 416, 443, 439, 448, 1390, 574, 595, + 608, 619, 625, 626, 628, 629, 630, 631, 632, 635, + 633, 404, 310, 492, 332, 370, 1379, 1420, 422, 469, + 239, 599, 493, 199, 1294, 1299, 1292, 0, 253, 254, + 1361, 570, 1295, 1293, 1350, 1351, 1296, 1411, 1412, 1413, + 1398, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 661, 662, 640, + 503, 509, 504, 505, 506, 507, 508, 0, 510, 1383, + 1288, 0, 1297, 1298, 395, 1392, 586, 587, 663, 381, + 483, 596, 334, 346, 349, 339, 358, 0, 359, 335, + 336, 341, 343, 344, 345, 350, 351, 355, 361, 248, + 209, 387, 396, 573, 311, 215, 216, 217, 519, 520, + 521, 522, 611, 612, 616, 204, 459, 460, 461, 462, + 291, 606, 308, 465, 464, 330, 331, 376, 446, 535, + 537, 548, 552, 554, 556, 562, 565, 536, 538, 549, + 553, 555, 557, 563, 566, 525, 527, 529, 531, 544, + 543, 540, 568, 569, 546, 551, 530, 542, 547, 560, + 567, 564, 524, 528, 532, 541, 559, 558, 539, 550, + 561, 545, 533, 526, 534, 1354, 196, 220, 365, 1416, + 451, 287, 641, 610, 481, 605, 205, 222, 1291, 261, + 1303, 1311, 0, 1317, 1325, 1326, 1338, 1341, 1342, 1343, + 1344, 1362, 1363, 1365, 1373, 1375, 1378, 1380, 1387, 1399, + 1419, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 309, 317, 318, 321, 327, 377, 383, + 384, 385, 386, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 485, + 486, 487, 488, 489, 497, 498, 511, 581, 583, 598, + 617, 623, 477, 300, 301, 441, 442, 313, 314, 637, + 638, 299, 593, 624, 591, 636, 618, 435, 375, 1353, + 1359, 378, 280, 304, 319, 1368, 609, 499, 226, 463, + 289, 250, 1386, 1388, 210, 245, 229, 258, 273, 276, + 323, 388, 397, 426, 431, 295, 270, 243, 456, 240, + 482, 514, 515, 516, 518, 392, 265, 430, 1349, 1377, + 373, 571, 572, 315, 393, 0, 0, 0, 0, 0, + 0, 523, 0, 766, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 0, 753, 0, + 0, 0, 269, 758, 0, 0, 0, 363, 266, 0, + 0, 427, 0, 203, 0, 484, 251, 374, 371, 578, + 281, 272, 268, 249, 316, 382, 425, 513, 419, 765, + 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, + 760, 761, 0, 0, 0, 0, 0, 0, 0, 0, + 322, 247, 324, 202, 410, 495, 285, 0, 95, 0, + 0, 1010, 946, 737, 912, 950, 1011, 963, 964, 965, + 951, 0, 237, 952, 953, 244, 954, 0, 911, 796, + 798, 797, 861, 862, 863, 864, 865, 866, 867, 794, + 959, 602, 966, 967, 0, 264, 320, 271, 263, 575, + 0, 0, 2195, 2196, 2197, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 733, 750, 0, 764, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 743, 744, 0, 0, 0, - 0, 901, 0, 745, 0, 0, 753, 964, 965, 966, - 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, - 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, - 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, - 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 755, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 296, 0, 397, 256, 0, 448, 900, 0, 0, 616, - 0, 0, 898, 0, 0, 0, 0, 361, 0, 328, - 197, 224, 0, 0, 407, 456, 468, 0, 0, 0, - 951, 0, 466, 421, 594, 232, 283, 453, 427, 464, - 435, 286, 0, 0, 465, 368, 577, 445, 591, 617, - 618, 262, 401, 603, 514, 611, 635, 225, 259, 415, - 499, 597, 488, 393, 573, 574, 327, 487, 294, 201, - 365, 623, 223, 474, 367, 241, 230, 579, 600, 288, - 451, 630, 212, 509, 589, 238, 478, 0, 0, 638, - 246, 498, 214, 586, 497, 389, 324, 325, 213, 0, - 452, 267, 292, 0, 0, 257, 410, 952, 953, 255, - 639, 797, 610, 219, 0, 609, 403, 576, 587, 390, - 379, 218, 585, 388, 378, 332, 805, 806, 279, 305, - 882, 881, 880, 304, 306, 878, 879, 877, 206, 598, - 0, 207, 0, 493, 599, 640, 447, 211, 233, 234, - 236, 0, 278, 282, 290, 293, 301, 302, 311, 363, - 414, 441, 437, 446, 0, 571, 592, 604, 615, 621, - 622, 624, 625, 626, 627, 628, 631, 629, 402, 309, - 489, 331, 369, 0, 0, 420, 467, 239, 596, 490, - 888, 910, 899, 765, 766, 889, 890, 914, 891, 768, - 769, 911, 912, 762, 763, 767, 913, 915, 641, 642, - 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, - 653, 654, 655, 656, 657, 658, 636, 500, 506, 501, - 502, 503, 504, 505, 0, 507, 902, 752, 751, 0, - 758, 0, 787, 788, 790, 794, 795, 796, 807, 854, - 855, 863, 865, 866, 864, 867, 868, 869, 872, 873, - 874, 875, 870, 871, 876, 770, 774, 771, 772, 773, - 785, 775, 776, 777, 778, 779, 780, 781, 782, 783, - 784, 786, 925, 926, 927, 928, 929, 930, 800, 804, - 803, 801, 802, 798, 799, 826, 825, 827, 828, 829, - 830, 831, 832, 834, 833, 835, 836, 837, 838, 839, - 840, 808, 809, 812, 813, 811, 810, 814, 823, 824, - 815, 816, 817, 818, 819, 820, 822, 821, 841, 842, - 843, 844, 845, 847, 846, 850, 851, 849, 848, 853, - 852, 750, 196, 220, 364, 0, 449, 287, 637, 606, - 601, 205, 222, 916, 261, 917, 0, 0, 921, 0, - 0, 0, 923, 922, 0, 924, 886, 885, 0, 0, - 918, 919, 0, 920, 0, 0, 198, 200, 208, 221, - 231, 235, 242, 260, 275, 277, 284, 297, 308, 316, - 317, 320, 326, 376, 382, 383, 384, 385, 404, 405, - 406, 409, 412, 413, 416, 418, 419, 422, 426, 430, - 431, 432, 434, 436, 438, 450, 455, 469, 470, 471, - 472, 473, 476, 477, 482, 483, 484, 485, 486, 494, - 495, 508, 578, 580, 595, 613, 619, 475, 931, 932, - 933, 934, 935, 936, 937, 938, 298, 590, 620, 588, - 632, 614, 433, 374, 0, 0, 377, 280, 303, 318, - 0, 605, 496, 226, 461, 289, 250, 956, 0, 210, - 245, 229, 258, 273, 276, 322, 387, 395, 424, 429, - 295, 270, 243, 454, 240, 479, 511, 512, 513, 515, - 391, 265, 428, 392, 0, 372, 568, 569, 314, 520, - 0, 761, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 411, 0, 0, 0, 0, 749, 0, 0, 0, - 269, 754, 0, 0, 0, 362, 266, 0, 0, 425, - 0, 203, 0, 481, 251, 373, 370, 575, 281, 272, - 268, 249, 315, 381, 423, 510, 417, 760, 366, 0, - 0, 491, 396, 0, 0, 0, 0, 0, 756, 757, - 0, 0, 0, 0, 0, 0, 2382, 0, 321, 247, - 323, 202, 408, 492, 285, 0, 95, 0, 0, 957, - 941, 733, 907, 945, 958, 959, 960, 961, 946, 0, - 237, 947, 948, 244, 949, 0, 906, 791, 793, 792, - 856, 857, 858, 859, 860, 861, 862, 789, 954, 962, - 963, 2383, 264, 319, 271, 263, 572, 0, 0, 0, + 747, 748, 0, 0, 0, 0, 906, 0, 749, 0, + 0, 757, 968, 969, 970, 971, 972, 973, 974, 975, + 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, + 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, + 1006, 1007, 1008, 1009, 759, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 399, 256, 0, + 450, 905, 0, 0, 620, 0, 0, 903, 0, 0, + 0, 0, 362, 0, 329, 197, 224, 0, 0, 409, + 458, 470, 0, 0, 0, 956, 0, 468, 423, 597, + 232, 283, 455, 429, 466, 437, 286, 0, 0, 467, + 369, 580, 447, 594, 621, 622, 262, 403, 607, 517, + 615, 639, 225, 259, 417, 502, 600, 491, 394, 576, + 577, 328, 490, 294, 201, 366, 627, 223, 476, 368, + 241, 230, 582, 604, 298, 288, 453, 634, 212, 512, + 592, 238, 480, 0, 0, 642, 246, 501, 214, 589, + 500, 390, 325, 326, 213, 0, 454, 267, 292, 0, + 0, 257, 412, 957, 958, 255, 643, 802, 614, 219, + 0, 613, 405, 579, 590, 391, 380, 218, 588, 389, + 379, 333, 810, 811, 279, 306, 887, 886, 885, 305, + 307, 883, 884, 882, 206, 601, 0, 207, 0, 496, + 603, 644, 449, 211, 233, 234, 236, 0, 278, 282, + 290, 293, 302, 303, 312, 364, 416, 443, 439, 448, + 0, 574, 595, 608, 619, 625, 626, 628, 629, 630, + 631, 632, 635, 633, 404, 310, 492, 332, 370, 0, + 0, 422, 469, 239, 599, 493, 893, 915, 904, 770, + 771, 894, 895, 919, 896, 773, 774, 916, 917, 767, + 768, 772, 918, 920, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 661, 662, 640, 503, 509, 504, 505, 506, 507, 508, + 0, 510, 907, 756, 755, 0, 762, 763, 0, 792, + 793, 795, 799, 800, 801, 812, 859, 860, 868, 870, + 871, 869, 872, 873, 874, 877, 878, 879, 880, 875, + 876, 881, 775, 779, 776, 777, 778, 790, 780, 781, + 782, 783, 784, 785, 786, 787, 788, 789, 791, 930, + 931, 932, 933, 934, 935, 805, 809, 808, 806, 807, + 803, 804, 831, 830, 832, 833, 834, 835, 836, 837, + 839, 838, 840, 841, 842, 843, 844, 845, 813, 814, + 817, 818, 816, 815, 819, 828, 829, 820, 821, 822, + 823, 824, 825, 827, 826, 846, 847, 848, 849, 850, + 852, 851, 855, 856, 854, 853, 858, 857, 754, 196, + 220, 365, 0, 451, 287, 641, 610, 481, 605, 205, + 222, 921, 261, 922, 0, 0, 926, 0, 0, 0, + 928, 927, 0, 929, 891, 890, 0, 0, 923, 924, + 0, 925, 0, 0, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 309, 317, 318, 321, + 327, 377, 383, 384, 385, 386, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 485, 486, 487, 488, 489, 497, 498, 511, + 581, 583, 598, 617, 623, 477, 936, 937, 938, 939, + 940, 941, 942, 943, 299, 593, 624, 591, 636, 618, + 435, 375, 0, 0, 378, 280, 304, 319, 0, 609, + 499, 226, 463, 289, 250, 961, 0, 210, 245, 229, + 258, 273, 276, 323, 388, 397, 426, 431, 295, 270, + 243, 456, 240, 482, 514, 515, 516, 518, 392, 265, + 430, 393, 0, 373, 571, 572, 315, 0, 523, 0, + 766, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 753, 0, 0, 0, 269, + 758, 0, 0, 0, 363, 266, 0, 0, 427, 0, + 203, 0, 484, 251, 374, 371, 578, 281, 272, 268, + 249, 316, 382, 425, 513, 419, 765, 367, 0, 0, + 494, 398, 0, 0, 0, 0, 0, 760, 761, 0, + 0, 0, 0, 0, 0, 2402, 0, 322, 247, 324, + 202, 410, 495, 285, 0, 95, 0, 0, 1010, 946, + 737, 912, 950, 1011, 963, 964, 965, 951, 0, 237, + 952, 953, 244, 954, 0, 911, 796, 798, 797, 861, + 862, 863, 864, 865, 866, 867, 794, 959, 602, 966, + 967, 2403, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, - 0, 0, 0, 729, 746, 0, 759, 0, 0, 0, + 0, 0, 0, 733, 750, 0, 764, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 743, 744, 0, - 0, 0, 0, 901, 0, 745, 0, 0, 753, 964, - 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, - 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, - 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, - 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, - 1005, 755, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 296, 0, 397, 256, 0, 448, 900, 0, - 0, 616, 0, 0, 898, 0, 0, 0, 0, 361, - 0, 328, 197, 224, 0, 0, 407, 456, 468, 0, - 0, 0, 951, 0, 466, 421, 594, 232, 283, 453, - 427, 464, 435, 286, 0, 0, 465, 368, 577, 445, - 591, 617, 618, 262, 401, 603, 514, 611, 635, 225, - 259, 415, 499, 597, 488, 393, 573, 574, 327, 487, - 294, 201, 365, 623, 223, 474, 367, 241, 230, 579, - 600, 288, 451, 630, 212, 509, 589, 238, 478, 0, - 0, 638, 246, 498, 214, 586, 497, 389, 324, 325, - 213, 0, 452, 267, 292, 0, 0, 257, 410, 952, - 953, 255, 639, 797, 610, 219, 0, 609, 403, 576, - 587, 390, 379, 218, 585, 388, 378, 332, 805, 806, - 279, 305, 882, 881, 880, 304, 306, 878, 879, 877, - 206, 598, 0, 207, 0, 493, 599, 640, 447, 211, - 233, 234, 236, 0, 278, 282, 290, 293, 301, 302, - 311, 363, 414, 441, 437, 446, 0, 571, 592, 604, - 615, 621, 622, 624, 625, 626, 627, 628, 631, 629, - 402, 309, 489, 331, 369, 0, 0, 420, 467, 239, - 596, 490, 888, 910, 899, 765, 766, 889, 890, 914, - 891, 768, 769, 911, 912, 762, 763, 767, 913, 915, - 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, 636, 500, - 506, 501, 502, 503, 504, 505, 0, 507, 902, 752, - 751, 0, 758, 0, 787, 788, 790, 794, 795, 796, - 807, 854, 855, 863, 865, 866, 864, 867, 868, 869, - 872, 873, 874, 875, 870, 871, 876, 770, 774, 771, - 772, 773, 785, 775, 776, 777, 778, 779, 780, 781, - 782, 783, 784, 786, 925, 926, 927, 928, 929, 930, - 800, 804, 803, 801, 802, 798, 799, 826, 825, 827, - 828, 829, 830, 831, 832, 834, 833, 835, 836, 837, - 838, 839, 840, 808, 809, 812, 813, 811, 810, 814, - 823, 824, 815, 816, 817, 818, 819, 820, 822, 821, - 841, 842, 843, 844, 845, 847, 846, 850, 851, 849, - 848, 853, 852, 750, 196, 220, 364, 0, 449, 287, - 637, 606, 601, 205, 222, 916, 261, 917, 0, 0, - 921, 0, 0, 0, 923, 922, 0, 924, 886, 885, - 0, 0, 918, 919, 0, 920, 0, 0, 198, 200, - 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, - 308, 316, 317, 320, 326, 376, 382, 383, 384, 385, - 404, 405, 406, 409, 412, 413, 416, 418, 419, 422, - 426, 430, 431, 432, 434, 436, 438, 450, 455, 469, - 470, 471, 472, 473, 476, 477, 482, 483, 484, 485, - 486, 494, 495, 508, 578, 580, 595, 613, 619, 475, - 931, 932, 933, 934, 935, 936, 937, 938, 298, 590, - 620, 588, 632, 614, 433, 374, 0, 0, 377, 280, - 303, 318, 0, 605, 496, 226, 461, 289, 250, 956, - 0, 210, 245, 229, 258, 273, 276, 322, 387, 395, - 424, 429, 295, 270, 243, 454, 240, 479, 511, 512, - 513, 515, 391, 265, 428, 0, 392, 372, 568, 569, - 314, 86, 520, 0, 761, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 749, - 0, 0, 0, 269, 754, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 760, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 756, 757, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 95, - 0, 0, 957, 941, 733, 907, 945, 958, 959, 960, - 961, 946, 0, 237, 947, 948, 244, 949, 0, 906, - 791, 793, 792, 856, 857, 858, 859, 860, 861, 862, - 789, 954, 962, 963, 0, 264, 319, 271, 263, 572, - 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, - 0, 0, 0, 0, 0, 0, 729, 746, 0, 759, - 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 747, 748, 0, + 0, 0, 0, 906, 0, 749, 0, 0, 757, 968, + 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, + 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, + 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, + 1009, 759, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 296, 0, 399, 256, 0, 450, 905, 0, + 0, 620, 0, 0, 903, 0, 0, 0, 0, 362, + 0, 329, 197, 224, 0, 0, 409, 458, 470, 0, + 0, 0, 956, 0, 468, 423, 597, 232, 283, 455, + 429, 466, 437, 286, 0, 0, 467, 369, 580, 447, + 594, 621, 622, 262, 403, 607, 517, 615, 639, 225, + 259, 417, 502, 600, 491, 394, 576, 577, 328, 490, + 294, 201, 366, 627, 223, 476, 368, 241, 230, 582, + 604, 298, 288, 453, 634, 212, 512, 592, 238, 480, + 0, 0, 642, 246, 501, 214, 589, 500, 390, 325, + 326, 213, 0, 454, 267, 292, 0, 0, 257, 412, + 957, 958, 255, 643, 802, 614, 219, 0, 613, 405, + 579, 590, 391, 380, 218, 588, 389, 379, 333, 810, + 811, 279, 306, 887, 886, 885, 305, 307, 883, 884, + 882, 206, 601, 0, 207, 0, 496, 603, 644, 449, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 302, + 303, 312, 364, 416, 443, 439, 448, 0, 574, 595, + 608, 619, 625, 626, 628, 629, 630, 631, 632, 635, + 633, 404, 310, 492, 332, 370, 0, 0, 422, 469, + 239, 599, 493, 893, 915, 904, 770, 771, 894, 895, + 919, 896, 773, 774, 916, 917, 767, 768, 772, 918, + 920, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 661, 662, 640, + 503, 509, 504, 505, 506, 507, 508, 0, 510, 907, + 756, 755, 0, 762, 763, 0, 792, 793, 795, 799, + 800, 801, 812, 859, 860, 868, 870, 871, 869, 872, + 873, 874, 877, 878, 879, 880, 875, 876, 881, 775, + 779, 776, 777, 778, 790, 780, 781, 782, 783, 784, + 785, 786, 787, 788, 789, 791, 930, 931, 932, 933, + 934, 935, 805, 809, 808, 806, 807, 803, 804, 831, + 830, 832, 833, 834, 835, 836, 837, 839, 838, 840, + 841, 842, 843, 844, 845, 813, 814, 817, 818, 816, + 815, 819, 828, 829, 820, 821, 822, 823, 824, 825, + 827, 826, 846, 847, 848, 849, 850, 852, 851, 855, + 856, 854, 853, 858, 857, 754, 196, 220, 365, 0, + 451, 287, 641, 610, 481, 605, 205, 222, 921, 261, + 922, 0, 0, 926, 0, 0, 0, 928, 927, 0, + 929, 891, 890, 0, 0, 923, 924, 0, 925, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 309, 317, 318, 321, 327, 377, 383, + 384, 385, 386, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 485, + 486, 487, 488, 489, 497, 498, 511, 581, 583, 598, + 617, 623, 477, 936, 937, 938, 939, 940, 941, 942, + 943, 299, 593, 624, 591, 636, 618, 435, 375, 0, + 0, 378, 280, 304, 319, 0, 609, 499, 226, 463, + 289, 250, 961, 0, 210, 245, 229, 258, 273, 276, + 323, 388, 397, 426, 431, 295, 270, 243, 456, 240, + 482, 514, 515, 516, 518, 392, 265, 430, 393, 0, + 373, 571, 572, 315, 86, 523, 0, 766, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 753, 0, 0, 0, 269, 758, 0, 0, + 0, 363, 266, 0, 0, 427, 0, 203, 0, 484, + 251, 374, 371, 578, 281, 272, 268, 249, 316, 382, + 425, 513, 419, 765, 367, 0, 0, 494, 398, 0, + 0, 0, 0, 0, 760, 761, 0, 0, 0, 0, + 0, 0, 0, 0, 322, 247, 324, 202, 410, 495, + 285, 0, 95, 0, 0, 1010, 946, 737, 912, 950, + 1011, 963, 964, 965, 951, 0, 237, 952, 953, 244, + 954, 0, 911, 796, 798, 797, 861, 862, 863, 864, + 865, 866, 867, 794, 959, 602, 966, 967, 0, 264, + 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 733, 750, 0, 764, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 743, 744, 0, 0, 0, 0, 901, 0, 745, 0, - 0, 753, 964, 965, 966, 967, 968, 969, 970, 971, + 0, 0, 0, 0, 747, 748, 0, 0, 0, 0, + 906, 0, 749, 0, 0, 757, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, - 1002, 1003, 1004, 1005, 755, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 900, 0, 0, 616, 0, 0, 898, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 951, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 952, 953, 255, 639, 797, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 805, 806, 279, 305, 882, 881, 880, 304, 306, - 878, 879, 877, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 888, 910, 899, 765, 766, - 889, 890, 914, 891, 768, 769, 911, 912, 762, 763, - 767, 913, 915, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 902, 752, 751, 0, 758, 0, 787, 788, 790, - 794, 795, 796, 807, 854, 855, 863, 865, 866, 864, - 867, 868, 869, 872, 873, 874, 875, 870, 871, 876, - 770, 774, 771, 772, 773, 785, 775, 776, 777, 778, - 779, 780, 781, 782, 783, 784, 786, 925, 926, 927, - 928, 929, 930, 800, 804, 803, 801, 802, 798, 799, - 826, 825, 827, 828, 829, 830, 831, 832, 834, 833, - 835, 836, 837, 838, 839, 840, 808, 809, 812, 813, - 811, 810, 814, 823, 824, 815, 816, 817, 818, 819, - 820, 822, 821, 841, 842, 843, 844, 845, 847, 846, - 850, 851, 849, 848, 853, 852, 750, 196, 220, 364, - 94, 449, 287, 637, 606, 601, 205, 222, 916, 261, - 917, 0, 0, 921, 0, 0, 0, 923, 922, 0, - 924, 886, 885, 0, 0, 918, 919, 0, 920, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 931, 932, 933, 934, 935, 936, 937, - 938, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 956, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 761, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 749, 0, 0, 0, 269, 754, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 760, 366, 0, 0, 491, 396, 0, 0, - 0, 0, 0, 756, 757, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 95, 0, 0, 957, 941, 733, 907, 945, 958, - 959, 960, 961, 946, 0, 237, 947, 948, 244, 949, - 0, 906, 791, 793, 792, 856, 857, 858, 859, 860, - 861, 862, 789, 954, 962, 963, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 0, 0, 0, 0, 0, 0, 0, 729, 746, - 0, 759, 0, 0, 0, 274, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 743, 744, 0, 0, 0, 0, 901, 0, - 745, 0, 0, 753, 964, 965, 966, 967, 968, 969, - 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, - 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, - 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, - 1000, 1001, 1002, 1003, 1004, 1005, 755, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 900, 0, 0, 616, 0, 0, 898, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 951, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 3987, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 952, 953, 255, 639, 797, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 805, 806, 279, 305, 882, 881, 880, - 304, 306, 878, 879, 877, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 888, 910, 899, - 765, 766, 889, 890, 914, 891, 768, 769, 911, 912, - 762, 763, 767, 913, 915, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 902, 752, 751, 0, 758, 0, 787, - 788, 790, 794, 795, 796, 807, 854, 855, 863, 865, - 866, 864, 867, 868, 869, 872, 873, 874, 875, 870, - 871, 876, 770, 774, 771, 772, 773, 785, 775, 776, - 777, 778, 779, 780, 781, 782, 783, 784, 786, 925, - 926, 927, 928, 929, 930, 800, 804, 803, 801, 802, - 798, 799, 826, 825, 827, 828, 829, 830, 831, 832, - 834, 833, 835, 836, 837, 838, 839, 840, 808, 809, - 812, 813, 811, 810, 814, 823, 824, 815, 816, 817, - 818, 819, 820, 822, 821, 841, 842, 843, 844, 845, - 847, 846, 850, 851, 849, 848, 853, 852, 750, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, - 916, 261, 917, 0, 0, 921, 0, 0, 0, 923, - 922, 0, 924, 886, 885, 0, 0, 918, 919, 0, - 920, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 931, 932, 933, 934, 935, - 936, 937, 938, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 956, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 761, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 0, 0, 0, 749, 0, 0, 0, 269, 754, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 760, 366, 0, 0, 491, 396, - 0, 0, 0, 0, 0, 756, 757, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 95, 0, 1716, 957, 941, 733, 907, - 945, 958, 959, 960, 961, 946, 0, 237, 947, 948, - 244, 949, 0, 906, 791, 793, 792, 856, 857, 858, - 859, 860, 861, 862, 789, 954, 962, 963, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, - 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, - 729, 746, 0, 759, 0, 0, 0, 274, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 743, 744, 0, 0, 0, 0, - 901, 0, 745, 0, 0, 753, 964, 965, 966, 967, + 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 759, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 399, 256, 0, 450, 905, 0, 0, 620, 0, + 0, 903, 0, 0, 0, 0, 362, 0, 329, 197, + 224, 0, 0, 409, 458, 470, 0, 0, 0, 956, + 0, 468, 423, 597, 232, 283, 455, 429, 466, 437, + 286, 0, 0, 467, 369, 580, 447, 594, 621, 622, + 262, 403, 607, 517, 615, 639, 225, 259, 417, 502, + 600, 491, 394, 576, 577, 328, 490, 294, 201, 366, + 627, 223, 476, 368, 241, 230, 582, 604, 298, 288, + 453, 634, 212, 512, 592, 238, 480, 0, 0, 642, + 246, 501, 214, 589, 500, 390, 325, 326, 213, 0, + 454, 267, 292, 0, 0, 257, 412, 957, 958, 255, + 643, 802, 614, 219, 0, 613, 405, 579, 590, 391, + 380, 218, 588, 389, 379, 333, 810, 811, 279, 306, + 887, 886, 885, 305, 307, 883, 884, 882, 206, 601, + 0, 207, 0, 496, 603, 644, 449, 211, 233, 234, + 236, 0, 278, 282, 290, 293, 302, 303, 312, 364, + 416, 443, 439, 448, 0, 574, 595, 608, 619, 625, + 626, 628, 629, 630, 631, 632, 635, 633, 404, 310, + 492, 332, 370, 0, 0, 422, 469, 239, 599, 493, + 893, 915, 904, 770, 771, 894, 895, 919, 896, 773, + 774, 916, 917, 767, 768, 772, 918, 920, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 662, 640, 503, 509, 504, + 505, 506, 507, 508, 0, 510, 907, 756, 755, 0, + 762, 763, 0, 792, 793, 795, 799, 800, 801, 812, + 859, 860, 868, 870, 871, 869, 872, 873, 874, 877, + 878, 879, 880, 875, 876, 881, 775, 779, 776, 777, + 778, 790, 780, 781, 782, 783, 784, 785, 786, 787, + 788, 789, 791, 930, 931, 932, 933, 934, 935, 805, + 809, 808, 806, 807, 803, 804, 831, 830, 832, 833, + 834, 835, 836, 837, 839, 838, 840, 841, 842, 843, + 844, 845, 813, 814, 817, 818, 816, 815, 819, 828, + 829, 820, 821, 822, 823, 824, 825, 827, 826, 846, + 847, 848, 849, 850, 852, 851, 855, 856, 854, 853, + 858, 857, 754, 196, 220, 365, 94, 451, 287, 641, + 610, 481, 605, 205, 222, 921, 261, 922, 0, 0, + 926, 0, 0, 0, 928, 927, 0, 929, 891, 890, + 0, 0, 923, 924, 0, 925, 0, 0, 198, 200, + 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, + 309, 317, 318, 321, 327, 377, 383, 384, 385, 386, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 485, 486, 487, 488, + 489, 497, 498, 511, 581, 583, 598, 617, 623, 477, + 936, 937, 938, 939, 940, 941, 942, 943, 299, 593, + 624, 591, 636, 618, 435, 375, 0, 0, 378, 280, + 304, 319, 0, 609, 499, 226, 463, 289, 250, 961, + 0, 210, 245, 229, 258, 273, 276, 323, 388, 397, + 426, 431, 295, 270, 243, 456, 240, 482, 514, 515, + 516, 518, 392, 265, 430, 393, 0, 373, 571, 572, + 315, 0, 523, 0, 766, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 753, + 0, 0, 0, 269, 758, 0, 0, 0, 363, 266, + 0, 0, 427, 0, 203, 0, 484, 251, 374, 371, + 578, 281, 272, 268, 249, 316, 382, 425, 513, 419, + 765, 367, 0, 0, 494, 398, 0, 0, 0, 0, + 0, 760, 761, 0, 0, 0, 0, 0, 0, 0, + 0, 322, 247, 324, 202, 410, 495, 285, 0, 95, + 0, 0, 1010, 946, 737, 912, 950, 1011, 963, 964, + 965, 951, 0, 237, 952, 953, 244, 954, 0, 911, + 796, 798, 797, 861, 862, 863, 864, 865, 866, 867, + 794, 959, 602, 966, 967, 0, 264, 320, 271, 263, + 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, + 0, 0, 0, 0, 0, 0, 0, 733, 750, 0, + 764, 0, 0, 0, 274, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 747, 748, 0, 0, 0, 0, 906, 0, 749, + 0, 0, 757, 968, 969, 970, 971, 972, 973, 974, + 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, + 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, + 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, + 1005, 1006, 1007, 1008, 1009, 759, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 296, 0, 399, 256, + 0, 450, 905, 0, 0, 620, 0, 0, 903, 0, + 0, 0, 0, 362, 0, 329, 197, 224, 0, 0, + 409, 458, 470, 0, 0, 0, 956, 0, 468, 423, + 597, 232, 283, 455, 429, 466, 437, 286, 4045, 0, + 467, 369, 580, 447, 594, 621, 622, 262, 403, 607, + 517, 615, 639, 225, 259, 417, 502, 600, 491, 394, + 576, 577, 328, 490, 294, 201, 366, 627, 223, 476, + 368, 241, 230, 582, 604, 298, 288, 453, 634, 212, + 512, 592, 238, 480, 0, 0, 642, 246, 501, 214, + 589, 500, 390, 325, 326, 213, 0, 454, 267, 292, + 0, 0, 257, 412, 957, 958, 255, 643, 802, 614, + 219, 0, 613, 405, 579, 590, 391, 380, 218, 588, + 389, 379, 333, 810, 811, 279, 306, 887, 886, 885, + 305, 307, 883, 884, 882, 206, 601, 0, 207, 0, + 496, 603, 644, 449, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 302, 303, 312, 364, 416, 443, 439, + 448, 0, 574, 595, 608, 619, 625, 626, 628, 629, + 630, 631, 632, 635, 633, 404, 310, 492, 332, 370, + 0, 0, 422, 469, 239, 599, 493, 893, 915, 904, + 770, 771, 894, 895, 919, 896, 773, 774, 916, 917, + 767, 768, 772, 918, 920, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 661, 662, 640, 503, 509, 504, 505, 506, 507, + 508, 0, 510, 907, 756, 755, 0, 762, 763, 0, + 792, 793, 795, 799, 800, 801, 812, 859, 860, 868, + 870, 871, 869, 872, 873, 874, 877, 878, 879, 880, + 875, 876, 881, 775, 779, 776, 777, 778, 790, 780, + 781, 782, 783, 784, 785, 786, 787, 788, 789, 791, + 930, 931, 932, 933, 934, 935, 805, 809, 808, 806, + 807, 803, 804, 831, 830, 832, 833, 834, 835, 836, + 837, 839, 838, 840, 841, 842, 843, 844, 845, 813, + 814, 817, 818, 816, 815, 819, 828, 829, 820, 821, + 822, 823, 824, 825, 827, 826, 846, 847, 848, 849, + 850, 852, 851, 855, 856, 854, 853, 858, 857, 754, + 196, 220, 365, 0, 451, 287, 641, 610, 481, 605, + 205, 222, 921, 261, 922, 0, 0, 926, 0, 0, + 0, 928, 927, 0, 929, 891, 890, 0, 0, 923, + 924, 0, 925, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 309, 317, 318, + 321, 327, 377, 383, 384, 385, 386, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 485, 486, 487, 488, 489, 497, 498, + 511, 581, 583, 598, 617, 623, 477, 936, 937, 938, + 939, 940, 941, 942, 943, 299, 593, 624, 591, 636, + 618, 435, 375, 0, 0, 378, 280, 304, 319, 0, + 609, 499, 226, 463, 289, 250, 961, 0, 210, 245, + 229, 258, 273, 276, 323, 388, 397, 426, 431, 295, + 270, 243, 456, 240, 482, 514, 515, 516, 518, 392, + 265, 430, 393, 0, 373, 571, 572, 315, 0, 523, + 0, 766, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 753, 0, 0, 0, + 269, 758, 0, 0, 0, 363, 266, 0, 0, 427, + 0, 203, 0, 484, 251, 374, 371, 578, 281, 272, + 268, 249, 316, 382, 425, 513, 419, 765, 367, 0, + 0, 494, 398, 0, 0, 0, 0, 0, 760, 761, + 0, 0, 0, 0, 0, 0, 0, 0, 322, 247, + 324, 202, 410, 495, 285, 0, 95, 0, 1727, 1010, + 946, 737, 912, 950, 1011, 963, 964, 965, 951, 0, + 237, 952, 953, 244, 954, 0, 911, 796, 798, 797, + 861, 862, 863, 864, 865, 866, 867, 794, 959, 602, + 966, 967, 0, 264, 320, 271, 263, 575, 0, 0, + 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 0, 0, 0, 0, 733, 750, 0, 764, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 747, 748, + 0, 0, 0, 0, 906, 0, 749, 0, 0, 757, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, - 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 755, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 900, 0, 0, 616, 0, - 0, 898, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 951, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 952, 953, 255, 639, - 797, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 805, 806, 279, 305, 882, - 881, 880, 304, 306, 878, 879, 877, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 888, - 910, 899, 765, 766, 889, 890, 914, 891, 768, 769, - 911, 912, 762, 763, 767, 913, 915, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 902, 752, 751, 0, 758, - 0, 787, 788, 790, 794, 795, 796, 807, 854, 855, - 863, 865, 866, 864, 867, 868, 869, 872, 873, 874, - 875, 870, 871, 876, 770, 774, 771, 772, 773, 785, - 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, - 786, 925, 926, 927, 928, 929, 930, 800, 804, 803, - 801, 802, 798, 799, 826, 825, 827, 828, 829, 830, - 831, 832, 834, 833, 835, 836, 837, 838, 839, 840, - 808, 809, 812, 813, 811, 810, 814, 823, 824, 815, - 816, 817, 818, 819, 820, 822, 821, 841, 842, 843, - 844, 845, 847, 846, 850, 851, 849, 848, 853, 852, - 750, 196, 220, 364, 0, 449, 287, 637, 606, 601, - 205, 222, 916, 261, 917, 0, 0, 921, 0, 0, - 0, 923, 922, 0, 924, 886, 885, 0, 0, 918, - 919, 0, 920, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 931, 932, 933, - 934, 935, 936, 937, 938, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 956, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 761, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 0, 0, 0, 749, 0, 0, 0, 269, - 754, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 760, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 756, 757, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 95, 0, 0, 957, 941, - 733, 907, 945, 958, 959, 960, 961, 946, 0, 237, - 947, 948, 244, 949, 0, 906, 791, 793, 792, 856, - 857, 858, 859, 860, 861, 862, 789, 954, 962, 963, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, - 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, - 0, 0, 729, 746, 0, 759, 0, 0, 0, 274, + 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, + 1008, 1009, 759, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 399, 256, 0, 450, 905, + 0, 0, 620, 0, 0, 903, 0, 0, 0, 0, + 362, 0, 329, 197, 224, 0, 0, 409, 458, 470, + 0, 0, 0, 956, 0, 468, 423, 597, 232, 283, + 455, 429, 466, 437, 286, 0, 0, 467, 369, 580, + 447, 594, 621, 622, 262, 403, 607, 517, 615, 639, + 225, 259, 417, 502, 600, 491, 394, 576, 577, 328, + 490, 294, 201, 366, 627, 223, 476, 368, 241, 230, + 582, 604, 298, 288, 453, 634, 212, 512, 592, 238, + 480, 0, 0, 642, 246, 501, 214, 589, 500, 390, + 325, 326, 213, 0, 454, 267, 292, 0, 0, 257, + 412, 957, 958, 255, 643, 802, 614, 219, 0, 613, + 405, 579, 590, 391, 380, 218, 588, 389, 379, 333, + 810, 811, 279, 306, 887, 886, 885, 305, 307, 883, + 884, 882, 206, 601, 0, 207, 0, 496, 603, 644, + 449, 211, 233, 234, 236, 0, 278, 282, 290, 293, + 302, 303, 312, 364, 416, 443, 439, 448, 0, 574, + 595, 608, 619, 625, 626, 628, 629, 630, 631, 632, + 635, 633, 404, 310, 492, 332, 370, 0, 0, 422, + 469, 239, 599, 493, 893, 915, 904, 770, 771, 894, + 895, 919, 896, 773, 774, 916, 917, 767, 768, 772, + 918, 920, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, + 640, 503, 509, 504, 505, 506, 507, 508, 0, 510, + 907, 756, 755, 0, 762, 763, 0, 792, 793, 795, + 799, 800, 801, 812, 859, 860, 868, 870, 871, 869, + 872, 873, 874, 877, 878, 879, 880, 875, 876, 881, + 775, 779, 776, 777, 778, 790, 780, 781, 782, 783, + 784, 785, 786, 787, 788, 789, 791, 930, 931, 932, + 933, 934, 935, 805, 809, 808, 806, 807, 803, 804, + 831, 830, 832, 833, 834, 835, 836, 837, 839, 838, + 840, 841, 842, 843, 844, 845, 813, 814, 817, 818, + 816, 815, 819, 828, 829, 820, 821, 822, 823, 824, + 825, 827, 826, 846, 847, 848, 849, 850, 852, 851, + 855, 856, 854, 853, 858, 857, 754, 196, 220, 365, + 0, 451, 287, 641, 610, 481, 605, 205, 222, 921, + 261, 922, 0, 0, 926, 0, 0, 0, 928, 927, + 0, 929, 891, 890, 0, 0, 923, 924, 0, 925, + 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 309, 317, 318, 321, 327, 377, + 383, 384, 385, 386, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 485, 486, 487, 488, 489, 497, 498, 511, 581, 583, + 598, 617, 623, 477, 936, 937, 938, 939, 940, 941, + 942, 943, 299, 593, 624, 591, 636, 618, 435, 375, + 0, 0, 378, 280, 304, 319, 0, 609, 499, 226, + 463, 289, 250, 961, 0, 210, 245, 229, 258, 273, + 276, 323, 388, 397, 426, 431, 295, 270, 243, 456, + 240, 482, 514, 515, 516, 518, 392, 265, 430, 393, + 0, 373, 571, 572, 315, 0, 523, 0, 766, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 0, 753, 0, 0, 0, 269, 758, 0, + 0, 0, 363, 266, 0, 0, 427, 0, 203, 0, + 484, 251, 374, 371, 578, 281, 272, 268, 249, 316, + 382, 425, 513, 419, 765, 367, 0, 0, 494, 398, + 0, 0, 0, 0, 0, 760, 761, 0, 0, 0, + 0, 0, 0, 0, 0, 322, 247, 324, 202, 410, + 495, 285, 0, 95, 0, 0, 1010, 946, 737, 912, + 950, 1011, 963, 964, 965, 951, 0, 237, 952, 953, + 244, 954, 0, 911, 796, 798, 797, 861, 862, 863, + 864, 865, 866, 867, 794, 959, 602, 966, 967, 0, + 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, + 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, + 0, 733, 750, 0, 764, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 743, 744, 1049, 0, - 0, 0, 901, 0, 745, 0, 0, 753, 964, 965, - 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, - 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, - 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, - 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, - 755, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 900, 0, 0, - 616, 0, 0, 898, 0, 0, 0, 0, 361, 0, - 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, - 0, 951, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 952, 953, - 255, 639, 797, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 805, 806, 279, - 305, 882, 881, 880, 304, 306, 878, 879, 877, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 888, 910, 899, 765, 766, 889, 890, 914, 891, - 768, 769, 911, 912, 762, 763, 767, 913, 915, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 902, 752, 751, - 0, 758, 0, 787, 788, 790, 794, 795, 796, 807, - 854, 855, 863, 865, 866, 864, 867, 868, 869, 872, - 873, 874, 875, 870, 871, 876, 770, 774, 771, 772, - 773, 785, 775, 776, 777, 778, 779, 780, 781, 782, - 783, 784, 786, 925, 926, 927, 928, 929, 930, 800, - 804, 803, 801, 802, 798, 799, 826, 825, 827, 828, - 829, 830, 831, 832, 834, 833, 835, 836, 837, 838, - 839, 840, 808, 809, 812, 813, 811, 810, 814, 823, - 824, 815, 816, 817, 818, 819, 820, 822, 821, 841, - 842, 843, 844, 845, 847, 846, 850, 851, 849, 848, - 853, 852, 750, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 916, 261, 917, 0, 0, 921, - 0, 0, 0, 923, 922, 0, 924, 886, 885, 0, - 0, 918, 919, 0, 920, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 931, - 932, 933, 934, 935, 936, 937, 938, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 956, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 761, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 0, 0, 0, 749, 0, 0, - 0, 269, 754, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 760, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 756, - 757, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 95, 0, 0, - 957, 941, 733, 907, 945, 958, 959, 960, 961, 946, - 0, 237, 947, 948, 244, 949, 0, 906, 791, 793, - 792, 856, 857, 858, 859, 860, 861, 862, 789, 954, - 962, 963, 0, 264, 319, 271, 263, 572, 0, 0, - 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, - 0, 0, 0, 0, 729, 746, 0, 759, 0, 0, - 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 743, 744, - 0, 0, 0, 0, 901, 0, 745, 0, 0, 753, - 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, + 0, 0, 0, 0, 0, 747, 748, 1056, 0, 0, + 0, 906, 0, 749, 0, 0, 757, 968, 969, 970, + 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, + 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, + 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, + 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 759, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 296, 0, 399, 256, 0, 450, 905, 0, 0, 620, + 0, 0, 903, 0, 0, 0, 0, 362, 0, 329, + 197, 224, 0, 0, 409, 458, 470, 0, 0, 0, + 956, 0, 468, 423, 597, 232, 283, 455, 429, 466, + 437, 286, 0, 0, 467, 369, 580, 447, 594, 621, + 622, 262, 403, 607, 517, 615, 639, 225, 259, 417, + 502, 600, 491, 394, 576, 577, 328, 490, 294, 201, + 366, 627, 223, 476, 368, 241, 230, 582, 604, 298, + 288, 453, 634, 212, 512, 592, 238, 480, 0, 0, + 642, 246, 501, 214, 589, 500, 390, 325, 326, 213, + 0, 454, 267, 292, 0, 0, 257, 412, 957, 958, + 255, 643, 802, 614, 219, 0, 613, 405, 579, 590, + 391, 380, 218, 588, 389, 379, 333, 810, 811, 279, + 306, 887, 886, 885, 305, 307, 883, 884, 882, 206, + 601, 0, 207, 0, 496, 603, 644, 449, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 302, 303, 312, + 364, 416, 443, 439, 448, 0, 574, 595, 608, 619, + 625, 626, 628, 629, 630, 631, 632, 635, 633, 404, + 310, 492, 332, 370, 0, 0, 422, 469, 239, 599, + 493, 893, 915, 904, 770, 771, 894, 895, 919, 896, + 773, 774, 916, 917, 767, 768, 772, 918, 920, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 661, 662, 640, 503, 509, + 504, 505, 506, 507, 508, 0, 510, 907, 756, 755, + 0, 762, 763, 0, 792, 793, 795, 799, 800, 801, + 812, 859, 860, 868, 870, 871, 869, 872, 873, 874, + 877, 878, 879, 880, 875, 876, 881, 775, 779, 776, + 777, 778, 790, 780, 781, 782, 783, 784, 785, 786, + 787, 788, 789, 791, 930, 931, 932, 933, 934, 935, + 805, 809, 808, 806, 807, 803, 804, 831, 830, 832, + 833, 834, 835, 836, 837, 839, 838, 840, 841, 842, + 843, 844, 845, 813, 814, 817, 818, 816, 815, 819, + 828, 829, 820, 821, 822, 823, 824, 825, 827, 826, + 846, 847, 848, 849, 850, 852, 851, 855, 856, 854, + 853, 858, 857, 754, 196, 220, 365, 0, 451, 287, + 641, 610, 481, 605, 205, 222, 921, 261, 922, 0, + 0, 926, 0, 0, 0, 928, 927, 0, 929, 891, + 890, 0, 0, 923, 924, 0, 925, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 309, 317, 318, 321, 327, 377, 383, 384, 385, + 386, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 485, 486, 487, + 488, 489, 497, 498, 511, 581, 583, 598, 617, 623, + 477, 936, 937, 938, 939, 940, 941, 942, 943, 299, + 593, 624, 591, 636, 618, 435, 375, 0, 0, 378, + 280, 304, 319, 0, 609, 499, 226, 463, 289, 250, + 961, 0, 210, 245, 229, 258, 273, 276, 323, 388, + 397, 426, 431, 295, 270, 243, 456, 240, 482, 514, + 515, 516, 518, 392, 265, 430, 393, 0, 373, 571, + 572, 315, 0, 523, 0, 766, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 0, 0, 0, + 753, 0, 0, 0, 269, 758, 0, 0, 0, 363, + 266, 0, 0, 427, 0, 203, 0, 484, 251, 374, + 371, 578, 281, 272, 268, 249, 316, 382, 425, 513, + 419, 765, 367, 0, 0, 494, 398, 0, 0, 0, + 0, 0, 760, 761, 0, 0, 0, 0, 0, 0, + 0, 0, 322, 247, 324, 202, 410, 495, 285, 0, + 95, 0, 0, 1010, 946, 737, 912, 950, 1011, 963, + 964, 965, 951, 0, 237, 952, 953, 244, 954, 0, + 911, 796, 798, 797, 861, 862, 863, 864, 865, 866, + 867, 794, 959, 602, 966, 967, 0, 264, 320, 271, + 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 733, 750, + 0, 764, 0, 0, 0, 274, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 747, 748, 0, 0, 0, 0, 906, 0, + 749, 0, 0, 757, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, - 1004, 1005, 755, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 900, - 0, 0, 616, 0, 0, 898, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 951, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 952, 953, 255, 639, 797, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 805, - 806, 279, 305, 882, 881, 880, 304, 306, 878, 879, - 877, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 888, 910, 899, 765, 766, 889, 890, - 914, 891, 768, 769, 911, 912, 762, 763, 767, 913, - 915, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 902, - 752, 751, 0, 758, 0, 787, 788, 790, 794, 795, - 796, 807, 854, 855, 863, 865, 866, 864, 867, 868, - 869, 872, 873, 874, 875, 870, 871, 876, 770, 774, - 771, 772, 773, 785, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 784, 786, 925, 926, 927, 928, 929, - 930, 800, 804, 803, 801, 802, 798, 799, 826, 825, - 827, 828, 829, 830, 831, 832, 834, 833, 835, 836, - 837, 838, 839, 840, 808, 809, 812, 813, 811, 810, - 814, 823, 824, 815, 816, 817, 818, 819, 820, 822, - 821, 841, 842, 843, 844, 845, 847, 846, 850, 851, - 849, 848, 853, 852, 750, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 916, 261, 917, 0, - 0, 921, 0, 0, 0, 923, 922, 0, 924, 886, - 885, 0, 0, 918, 919, 0, 920, 0, 0, 198, - 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 931, 932, 933, 934, 935, 936, 937, 938, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 956, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 761, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 749, - 0, 0, 0, 269, 754, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 760, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 756, 757, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 95, - 0, 0, 957, 941, 733, 907, 945, 958, 959, 960, - 961, 946, 0, 237, 947, 948, 244, 949, 0, 906, - 791, 793, 792, 856, 857, 858, 859, 860, 861, 862, - 789, 954, 962, 963, 0, 264, 319, 271, 263, 572, - 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, - 0, 0, 0, 0, 0, 0, 729, 746, 0, 759, - 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 1004, 1005, 1006, 1007, 1008, 1009, 759, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 399, + 256, 0, 450, 905, 0, 0, 620, 0, 0, 903, + 0, 0, 0, 0, 362, 0, 329, 197, 224, 0, + 0, 409, 458, 470, 0, 0, 0, 956, 0, 468, + 423, 597, 232, 283, 455, 429, 466, 437, 286, 0, + 0, 467, 369, 580, 447, 594, 621, 622, 262, 403, + 607, 517, 615, 639, 225, 259, 417, 502, 600, 491, + 394, 576, 577, 328, 490, 294, 201, 366, 627, 223, + 476, 368, 241, 230, 582, 604, 298, 288, 453, 634, + 212, 512, 592, 238, 480, 0, 0, 642, 246, 501, + 214, 589, 500, 390, 325, 326, 213, 0, 454, 267, + 292, 0, 0, 257, 412, 957, 958, 255, 643, 802, + 614, 219, 0, 613, 405, 579, 590, 391, 380, 218, + 588, 389, 379, 333, 810, 811, 279, 306, 887, 886, + 885, 305, 307, 883, 884, 882, 206, 601, 0, 207, + 0, 496, 603, 644, 449, 211, 233, 234, 236, 0, + 278, 282, 290, 293, 302, 303, 312, 364, 416, 443, + 439, 448, 0, 574, 595, 608, 619, 625, 626, 628, + 629, 630, 631, 632, 635, 633, 404, 310, 492, 332, + 370, 0, 0, 422, 469, 239, 599, 493, 893, 915, + 904, 770, 771, 894, 895, 919, 896, 773, 774, 916, + 917, 767, 768, 772, 918, 920, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 662, 640, 503, 509, 504, 505, 506, + 507, 508, 0, 510, 907, 756, 755, 0, 762, 763, + 0, 792, 793, 795, 799, 800, 801, 812, 859, 860, + 868, 870, 871, 869, 872, 873, 874, 877, 878, 879, + 880, 875, 876, 881, 775, 779, 776, 777, 778, 790, + 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, + 791, 930, 931, 932, 933, 934, 935, 805, 809, 808, + 806, 807, 803, 804, 831, 830, 832, 833, 834, 835, + 836, 837, 839, 838, 840, 841, 842, 843, 844, 845, + 813, 814, 817, 818, 816, 815, 819, 828, 829, 820, + 821, 822, 823, 824, 825, 827, 826, 846, 847, 848, + 849, 850, 852, 851, 855, 856, 854, 853, 858, 857, + 754, 196, 220, 365, 0, 451, 287, 641, 610, 481, + 605, 205, 222, 921, 261, 922, 0, 0, 926, 0, + 0, 0, 928, 927, 0, 929, 891, 890, 0, 0, + 923, 924, 0, 925, 0, 0, 198, 200, 208, 221, + 231, 235, 242, 260, 275, 277, 284, 297, 309, 317, + 318, 321, 327, 377, 383, 384, 385, 386, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 485, 486, 487, 488, 489, 497, + 498, 511, 581, 583, 598, 617, 623, 477, 936, 937, + 938, 939, 940, 941, 942, 943, 299, 593, 624, 591, + 636, 618, 435, 375, 0, 0, 378, 280, 304, 319, + 0, 609, 499, 226, 463, 289, 250, 961, 0, 210, + 245, 229, 258, 273, 276, 323, 388, 397, 426, 431, + 295, 270, 243, 456, 240, 482, 514, 515, 516, 518, + 392, 265, 430, 393, 0, 373, 571, 572, 315, 0, + 523, 0, 766, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 0, 0, 0, 753, 0, 0, + 0, 269, 758, 0, 0, 0, 363, 266, 0, 0, + 427, 0, 203, 0, 484, 251, 374, 371, 578, 281, + 272, 268, 249, 316, 382, 425, 513, 419, 765, 367, + 0, 0, 494, 398, 0, 0, 0, 0, 0, 760, + 761, 0, 0, 0, 0, 0, 0, 0, 0, 322, + 247, 324, 202, 410, 495, 285, 0, 95, 0, 0, + 1010, 946, 737, 912, 950, 1011, 963, 964, 965, 951, + 0, 237, 952, 953, 244, 954, 0, 911, 796, 798, + 797, 861, 862, 863, 864, 865, 866, 867, 794, 959, + 602, 966, 967, 0, 264, 320, 271, 263, 575, 0, + 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, + 0, 0, 0, 0, 0, 733, 750, 0, 764, 0, + 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 747, + 748, 0, 0, 0, 0, 906, 0, 749, 0, 0, + 757, 968, 969, 970, 971, 972, 973, 974, 975, 976, + 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, + 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, + 1007, 1008, 1009, 3124, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 296, 0, 399, 256, 0, 450, + 905, 0, 0, 620, 0, 0, 903, 0, 0, 0, + 0, 362, 0, 329, 197, 224, 0, 0, 409, 458, + 470, 0, 0, 0, 956, 0, 468, 423, 597, 232, + 283, 455, 429, 466, 437, 286, 0, 0, 467, 369, + 580, 447, 594, 621, 622, 262, 403, 607, 517, 615, + 639, 225, 259, 417, 502, 600, 491, 394, 576, 577, + 328, 490, 294, 201, 366, 627, 223, 476, 368, 241, + 230, 582, 604, 298, 288, 453, 634, 212, 512, 592, + 238, 480, 0, 0, 642, 246, 501, 214, 589, 500, + 390, 325, 326, 213, 0, 454, 267, 292, 0, 0, + 257, 412, 957, 958, 255, 643, 802, 614, 219, 0, + 613, 405, 579, 590, 391, 380, 218, 588, 389, 379, + 333, 810, 811, 279, 306, 887, 886, 885, 305, 307, + 883, 884, 882, 206, 601, 0, 207, 0, 496, 603, + 644, 449, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 302, 303, 312, 364, 416, 443, 439, 448, 0, + 574, 595, 608, 619, 625, 626, 628, 629, 630, 631, + 632, 635, 633, 404, 310, 492, 332, 370, 0, 0, + 422, 469, 239, 599, 493, 893, 915, 904, 770, 771, + 894, 895, 919, 896, 773, 774, 916, 917, 767, 768, + 772, 918, 920, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, + 662, 640, 503, 509, 504, 505, 506, 507, 508, 0, + 510, 907, 756, 755, 0, 762, 763, 0, 792, 793, + 795, 799, 800, 801, 812, 859, 860, 868, 870, 871, + 869, 872, 873, 874, 877, 878, 879, 880, 875, 876, + 881, 775, 779, 776, 777, 778, 790, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 789, 791, 930, 931, + 932, 933, 934, 935, 805, 809, 808, 806, 807, 803, + 804, 831, 830, 832, 833, 834, 835, 836, 837, 839, + 838, 840, 841, 842, 843, 844, 845, 813, 814, 817, + 818, 816, 815, 819, 828, 829, 820, 821, 822, 823, + 824, 825, 827, 826, 846, 847, 848, 849, 850, 852, + 851, 855, 856, 854, 853, 858, 857, 754, 196, 220, + 365, 0, 451, 287, 641, 610, 481, 605, 205, 222, + 921, 261, 922, 0, 0, 926, 0, 0, 0, 928, + 927, 0, 929, 891, 890, 0, 0, 923, 924, 0, + 925, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 309, 317, 318, 321, 327, + 377, 383, 384, 385, 386, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 485, 486, 487, 488, 489, 497, 498, 511, 581, + 583, 598, 617, 623, 477, 936, 937, 938, 939, 940, + 941, 942, 943, 299, 593, 624, 591, 636, 618, 435, + 375, 0, 0, 378, 280, 304, 319, 0, 609, 499, + 226, 463, 289, 250, 961, 0, 210, 245, 229, 258, + 273, 276, 323, 388, 397, 426, 431, 295, 270, 243, + 456, 240, 482, 514, 515, 516, 518, 392, 265, 430, + 393, 0, 373, 571, 572, 315, 0, 523, 0, 766, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 0, 0, 0, 753, 0, 0, 0, 269, 758, + 0, 0, 0, 363, 266, 0, 0, 427, 0, 203, + 0, 484, 251, 374, 371, 578, 281, 272, 268, 249, + 316, 382, 425, 513, 419, 765, 367, 0, 0, 494, + 398, 0, 0, 0, 0, 0, 760, 761, 0, 0, + 0, 0, 0, 0, 0, 0, 322, 247, 324, 202, + 410, 495, 285, 0, 95, 0, 0, 1010, 946, 737, + 912, 950, 1011, 963, 964, 965, 951, 0, 237, 952, + 953, 244, 954, 0, 911, 796, 798, 797, 861, 862, + 863, 864, 865, 866, 867, 794, 959, 602, 966, 967, + 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 733, 750, 0, 764, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 743, 744, 0, 0, 0, 0, 901, 0, 745, 0, - 0, 753, 964, 965, 966, 967, 968, 969, 970, 971, - 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, - 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, - 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, - 1002, 1003, 1004, 1005, 3092, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 900, 0, 0, 616, 0, 0, 898, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 951, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 952, 953, 255, 639, 797, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 805, 806, 279, 305, 882, 881, 880, 304, 306, - 878, 879, 877, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 888, 910, 899, 765, 766, - 889, 890, 914, 891, 768, 769, 911, 912, 762, 763, - 767, 913, 915, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 902, 752, 751, 0, 758, 0, 787, 788, 790, - 794, 795, 796, 807, 854, 855, 863, 865, 866, 864, - 867, 868, 869, 872, 873, 874, 875, 870, 871, 876, - 770, 774, 771, 772, 773, 785, 775, 776, 777, 778, - 779, 780, 781, 782, 783, 784, 786, 925, 926, 927, - 928, 929, 930, 800, 804, 803, 801, 802, 798, 799, - 826, 825, 827, 828, 829, 830, 831, 832, 834, 833, - 835, 836, 837, 838, 839, 840, 808, 809, 812, 813, - 811, 810, 814, 823, 824, 815, 816, 817, 818, 819, - 820, 822, 821, 841, 842, 843, 844, 845, 847, 846, - 850, 851, 849, 848, 853, 852, 750, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 916, 261, - 917, 0, 0, 921, 0, 0, 0, 923, 922, 0, - 924, 886, 885, 0, 0, 918, 919, 0, 920, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 931, 932, 933, 934, 935, 936, 937, - 938, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 956, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 761, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 749, 0, 0, 0, 269, 754, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 760, 366, 0, 0, 491, 396, 0, 0, - 0, 0, 0, 756, 757, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 95, 0, 0, 957, 941, 733, 907, 945, 958, - 959, 960, 961, 946, 0, 237, 947, 948, 244, 949, - 0, 906, 791, 793, 792, 856, 857, 858, 859, 860, - 861, 862, 789, 954, 962, 963, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 0, 0, 0, 0, 0, 0, 0, 729, 746, - 0, 759, 0, 0, 0, 274, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 743, 744, 0, 0, 0, 0, 901, 0, - 745, 0, 0, 753, 964, 965, 966, 967, 968, 969, + 0, 0, 0, 0, 0, 0, 747, 748, 0, 0, + 0, 0, 906, 0, 749, 0, 0, 757, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, - 1000, 1001, 1002, 1003, 1004, 1005, 3088, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 900, 0, 0, 616, 0, 0, 898, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 951, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 952, 953, 255, 639, 797, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 805, 806, 279, 305, 882, 881, 880, - 304, 306, 878, 879, 877, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 888, 910, 899, - 765, 766, 889, 890, 914, 891, 768, 769, 911, 912, - 762, 763, 767, 913, 915, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 902, 752, 751, 0, 758, 0, 787, - 788, 790, 794, 795, 796, 807, 854, 855, 863, 865, - 866, 864, 867, 868, 869, 872, 873, 874, 875, 870, - 871, 876, 770, 774, 771, 772, 773, 785, 775, 776, - 777, 778, 779, 780, 781, 782, 783, 784, 786, 925, - 926, 927, 928, 929, 930, 800, 804, 803, 801, 802, - 798, 799, 826, 825, 827, 828, 829, 830, 831, 832, - 834, 833, 835, 836, 837, 838, 839, 840, 808, 809, - 812, 813, 811, 810, 814, 823, 824, 815, 816, 817, - 818, 819, 820, 822, 821, 841, 842, 843, 844, 845, - 847, 846, 850, 851, 849, 848, 853, 852, 750, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, - 916, 261, 917, 0, 0, 921, 0, 0, 0, 923, - 922, 0, 924, 886, 885, 0, 0, 918, 919, 0, - 920, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 931, 932, 933, 934, 935, - 936, 937, 938, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 956, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 761, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 0, 0, 0, 749, 0, 0, 0, 269, 754, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 760, 366, 0, 0, 491, 396, - 0, 0, 0, 0, 0, 756, 757, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 95, 0, 0, 957, 941, 1070, 907, - 945, 958, 959, 960, 961, 946, 0, 237, 947, 948, - 244, 949, 0, 906, 791, 793, 792, 856, 857, 858, - 859, 860, 861, 862, 789, 954, 962, 963, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, - 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, - 0, 746, 0, 759, 0, 0, 0, 274, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 743, 744, 0, 0, 0, 0, - 901, 0, 745, 0, 0, 753, 964, 965, 966, 967, - 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, - 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, - 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, - 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, 755, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 900, 0, 0, 616, 0, - 0, 898, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 951, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 952, 953, 255, 639, - 797, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 805, 806, 279, 305, 882, - 881, 880, 304, 306, 878, 879, 877, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 888, - 910, 899, 765, 766, 889, 890, 914, 891, 768, 769, - 911, 912, 762, 763, 767, 913, 915, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 902, 752, 751, 0, 758, - 0, 787, 788, 790, 794, 795, 796, 807, 854, 855, - 863, 865, 866, 864, 867, 868, 869, 872, 873, 874, - 875, 870, 871, 876, 770, 774, 771, 772, 773, 785, - 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, - 786, 925, 926, 927, 928, 929, 930, 800, 804, 803, - 801, 802, 798, 799, 826, 825, 827, 828, 829, 830, - 831, 832, 834, 833, 835, 836, 837, 838, 839, 840, - 808, 809, 812, 813, 811, 810, 814, 823, 824, 815, - 816, 817, 818, 819, 820, 822, 821, 841, 842, 843, - 844, 845, 847, 846, 850, 851, 849, 848, 853, 852, - 750, 196, 220, 364, 0, 449, 287, 637, 606, 601, - 205, 222, 916, 261, 917, 0, 0, 921, 0, 0, - 0, 923, 922, 0, 924, 886, 885, 0, 0, 918, - 919, 0, 920, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 931, 932, 933, - 934, 935, 936, 937, 938, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 956, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 761, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 0, 0, 0, 749, 0, 0, 0, 269, - 754, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 760, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 756, 757, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 95, 0, 0, 957, 941, - 1070, 907, 945, 958, 959, 960, 961, 946, 0, 237, - 947, 948, 244, 949, 0, 906, 791, 793, 792, 856, - 857, 858, 859, 860, 861, 862, 789, 954, 962, 963, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, - 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, - 0, 0, 0, 746, 0, 759, 0, 0, 0, 274, + 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, + 3120, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 399, 256, 0, 450, 905, 0, 0, + 620, 0, 0, 903, 0, 0, 0, 0, 362, 0, + 329, 197, 224, 0, 0, 409, 458, 470, 0, 0, + 0, 956, 0, 468, 423, 597, 232, 283, 455, 429, + 466, 437, 286, 0, 0, 467, 369, 580, 447, 594, + 621, 622, 262, 403, 607, 517, 615, 639, 225, 259, + 417, 502, 600, 491, 394, 576, 577, 328, 490, 294, + 201, 366, 627, 223, 476, 368, 241, 230, 582, 604, + 298, 288, 453, 634, 212, 512, 592, 238, 480, 0, + 0, 642, 246, 501, 214, 589, 500, 390, 325, 326, + 213, 0, 454, 267, 292, 0, 0, 257, 412, 957, + 958, 255, 643, 802, 614, 219, 0, 613, 405, 579, + 590, 391, 380, 218, 588, 389, 379, 333, 810, 811, + 279, 306, 887, 886, 885, 305, 307, 883, 884, 882, + 206, 601, 0, 207, 0, 496, 603, 644, 449, 211, + 233, 234, 236, 0, 278, 282, 290, 293, 302, 303, + 312, 364, 416, 443, 439, 448, 0, 574, 595, 608, + 619, 625, 626, 628, 629, 630, 631, 632, 635, 633, + 404, 310, 492, 332, 370, 0, 0, 422, 469, 239, + 599, 493, 893, 915, 904, 770, 771, 894, 895, 919, + 896, 773, 774, 916, 917, 767, 768, 772, 918, 920, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 662, 640, 503, + 509, 504, 505, 506, 507, 508, 0, 510, 907, 756, + 755, 0, 762, 763, 0, 792, 793, 795, 799, 800, + 801, 812, 859, 860, 868, 870, 871, 869, 872, 873, + 874, 877, 878, 879, 880, 875, 876, 881, 775, 779, + 776, 777, 778, 790, 780, 781, 782, 783, 784, 785, + 786, 787, 788, 789, 791, 930, 931, 932, 933, 934, + 935, 805, 809, 808, 806, 807, 803, 804, 831, 830, + 832, 833, 834, 835, 836, 837, 839, 838, 840, 841, + 842, 843, 844, 845, 813, 814, 817, 818, 816, 815, + 819, 828, 829, 820, 821, 822, 823, 824, 825, 827, + 826, 846, 847, 848, 849, 850, 852, 851, 855, 856, + 854, 853, 858, 857, 754, 196, 220, 365, 0, 451, + 287, 641, 610, 481, 605, 205, 222, 921, 261, 922, + 0, 0, 926, 0, 0, 0, 928, 927, 0, 929, + 891, 890, 0, 0, 923, 924, 0, 925, 0, 0, + 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, + 284, 297, 309, 317, 318, 321, 327, 377, 383, 384, + 385, 386, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 485, 486, + 487, 488, 489, 497, 498, 511, 581, 583, 598, 617, + 623, 477, 936, 937, 938, 939, 940, 941, 942, 943, + 299, 593, 624, 591, 636, 618, 435, 375, 0, 0, + 378, 280, 304, 319, 0, 609, 499, 226, 463, 289, + 250, 961, 0, 210, 245, 229, 258, 273, 276, 323, + 388, 397, 426, 431, 295, 270, 243, 456, 240, 482, + 514, 515, 516, 518, 392, 265, 430, 393, 0, 373, + 571, 572, 315, 0, 523, 0, 766, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 0, 0, + 0, 753, 0, 0, 0, 269, 758, 0, 0, 0, + 363, 266, 0, 0, 427, 0, 203, 0, 484, 251, + 374, 371, 578, 281, 272, 268, 249, 316, 382, 425, + 513, 419, 765, 367, 0, 0, 494, 398, 0, 0, + 0, 0, 0, 760, 761, 0, 0, 0, 0, 0, + 0, 0, 0, 322, 247, 324, 202, 410, 495, 285, + 0, 95, 0, 0, 1010, 946, 1077, 912, 950, 1011, + 963, 964, 965, 951, 0, 237, 952, 953, 244, 954, + 0, 911, 796, 798, 797, 861, 862, 863, 864, 865, + 866, 867, 794, 959, 602, 966, 967, 0, 264, 320, + 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, + 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, + 750, 0, 764, 0, 0, 0, 274, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 747, 748, 0, 0, 0, 0, 906, + 0, 749, 0, 0, 757, 968, 969, 970, 971, 972, + 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, + 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, + 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, + 1003, 1004, 1005, 1006, 1007, 1008, 1009, 759, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 296, 0, + 399, 256, 0, 450, 905, 0, 0, 620, 0, 0, + 903, 0, 0, 0, 0, 362, 0, 329, 197, 224, + 0, 0, 409, 458, 470, 0, 0, 0, 956, 0, + 468, 423, 597, 232, 283, 455, 429, 466, 437, 286, + 0, 0, 467, 369, 580, 447, 594, 621, 622, 262, + 403, 607, 517, 615, 639, 225, 259, 417, 502, 600, + 491, 394, 576, 577, 328, 490, 294, 201, 366, 627, + 223, 476, 368, 241, 230, 582, 604, 298, 288, 453, + 634, 212, 512, 592, 238, 480, 0, 0, 642, 246, + 501, 214, 589, 500, 390, 325, 326, 213, 0, 454, + 267, 292, 0, 0, 257, 412, 957, 958, 255, 643, + 802, 614, 219, 0, 613, 405, 579, 590, 391, 380, + 218, 588, 389, 379, 333, 810, 811, 279, 306, 887, + 886, 885, 305, 307, 883, 884, 882, 206, 601, 0, + 207, 0, 496, 603, 644, 449, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 302, 303, 312, 364, 416, + 443, 439, 448, 0, 574, 595, 608, 619, 625, 626, + 628, 629, 630, 631, 632, 635, 633, 404, 310, 492, + 332, 370, 0, 0, 422, 469, 239, 599, 493, 893, + 915, 904, 770, 771, 894, 895, 919, 896, 773, 774, + 916, 917, 767, 768, 772, 918, 920, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 662, 640, 503, 509, 504, 505, + 506, 507, 508, 0, 510, 907, 756, 755, 0, 762, + 763, 0, 792, 793, 795, 799, 800, 801, 812, 859, + 860, 868, 870, 871, 869, 872, 873, 874, 877, 878, + 879, 880, 875, 876, 881, 775, 779, 776, 777, 778, + 790, 780, 781, 782, 783, 784, 785, 786, 787, 788, + 789, 791, 930, 931, 932, 933, 934, 935, 805, 809, + 808, 806, 807, 803, 804, 831, 830, 832, 833, 834, + 835, 836, 837, 839, 838, 840, 841, 842, 843, 844, + 845, 813, 814, 817, 818, 816, 815, 819, 828, 829, + 820, 821, 822, 823, 824, 825, 827, 826, 846, 847, + 848, 849, 850, 852, 851, 855, 856, 854, 853, 858, + 857, 754, 196, 220, 365, 0, 451, 287, 641, 610, + 481, 605, 205, 222, 921, 261, 922, 0, 0, 926, + 0, 0, 0, 928, 927, 0, 929, 891, 890, 0, + 0, 923, 924, 0, 925, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 309, + 317, 318, 321, 327, 377, 383, 384, 385, 386, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 485, 486, 487, 488, 489, + 497, 498, 511, 581, 583, 598, 617, 623, 477, 936, + 937, 938, 939, 940, 941, 942, 943, 299, 593, 624, + 591, 636, 618, 435, 375, 0, 0, 378, 280, 304, + 319, 0, 609, 499, 226, 463, 289, 250, 961, 0, + 210, 245, 229, 258, 273, 276, 323, 388, 397, 426, + 431, 295, 270, 243, 456, 240, 482, 514, 515, 516, + 518, 392, 265, 430, 393, 0, 373, 571, 572, 315, + 0, 523, 0, 766, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 0, 753, 0, + 0, 0, 269, 758, 0, 0, 0, 363, 266, 0, + 0, 427, 0, 203, 0, 484, 251, 374, 371, 578, + 281, 272, 268, 249, 316, 382, 425, 513, 419, 765, + 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, + 760, 761, 0, 0, 0, 0, 0, 0, 0, 0, + 322, 247, 324, 202, 410, 495, 285, 0, 95, 0, + 0, 1010, 946, 1077, 912, 950, 1011, 963, 964, 965, + 951, 0, 237, 952, 953, 244, 954, 0, 911, 796, + 798, 797, 861, 862, 863, 864, 865, 866, 867, 794, + 959, 602, 966, 967, 0, 264, 320, 271, 263, 575, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 0, 0, 750, 0, 764, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 743, 744, 0, 0, - 0, 0, 901, 0, 745, 0, 0, 753, 964, 965, - 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, + 747, 748, 0, 0, 0, 0, 906, 0, 749, 0, + 0, 757, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1004, 1005, - 2070, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 900, 0, 0, - 616, 0, 0, 898, 0, 0, 0, 0, 361, 0, - 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, - 0, 951, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 952, 953, - 255, 639, 797, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 805, 806, 279, - 305, 882, 881, 880, 304, 306, 878, 879, 877, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 888, 910, 899, 765, 766, 889, 890, 914, 891, - 768, 769, 911, 912, 762, 763, 767, 913, 915, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 902, 752, 751, - 0, 758, 0, 787, 788, 790, 794, 795, 796, 807, - 854, 855, 863, 865, 866, 864, 867, 868, 869, 872, - 873, 874, 875, 870, 871, 876, 770, 774, 771, 772, - 773, 785, 775, 776, 777, 778, 779, 780, 781, 782, - 783, 784, 786, 925, 926, 927, 928, 929, 930, 800, - 804, 803, 801, 802, 798, 799, 826, 825, 827, 828, - 829, 830, 831, 832, 834, 833, 835, 836, 837, 838, - 839, 840, 808, 809, 812, 813, 811, 810, 814, 823, - 824, 815, 816, 817, 818, 819, 820, 822, 821, 841, - 842, 843, 844, 845, 847, 846, 850, 851, 849, 848, - 853, 852, 750, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 916, 261, 917, 0, 0, 921, - 0, 0, 0, 923, 922, 0, 924, 886, 885, 0, - 0, 918, 919, 0, 920, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 931, - 932, 933, 934, 935, 936, 937, 938, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 956, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 761, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 0, 0, 0, 749, 0, 0, - 0, 269, 754, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 760, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 756, - 757, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 95, 0, 0, - 957, 941, 1070, 907, 945, 958, 959, 960, 961, 946, - 0, 237, 947, 948, 244, 949, 0, 906, 791, 793, - 792, 856, 857, 858, 859, 860, 861, 862, 789, 954, - 962, 963, 0, 264, 319, 271, 263, 572, 0, 0, + 1006, 1007, 1008, 1009, 2088, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 399, 256, 0, + 450, 905, 0, 0, 620, 0, 0, 903, 0, 0, + 0, 0, 362, 0, 329, 197, 224, 0, 0, 409, + 458, 470, 0, 0, 0, 956, 0, 468, 423, 597, + 232, 283, 455, 429, 466, 437, 286, 0, 0, 467, + 369, 580, 447, 594, 621, 622, 262, 403, 607, 517, + 615, 639, 225, 259, 417, 502, 600, 491, 394, 576, + 577, 328, 490, 294, 201, 366, 627, 223, 476, 368, + 241, 230, 582, 604, 298, 288, 453, 634, 212, 512, + 592, 238, 480, 0, 0, 642, 246, 501, 214, 589, + 500, 390, 325, 326, 213, 0, 454, 267, 292, 0, + 0, 257, 412, 957, 958, 255, 643, 802, 614, 219, + 0, 613, 405, 579, 590, 391, 380, 218, 588, 389, + 379, 333, 810, 811, 279, 306, 887, 886, 885, 305, + 307, 883, 884, 882, 206, 601, 0, 207, 0, 496, + 603, 644, 449, 211, 233, 234, 236, 0, 278, 282, + 290, 293, 302, 303, 312, 364, 416, 443, 439, 448, + 0, 574, 595, 608, 619, 625, 626, 628, 629, 630, + 631, 632, 635, 633, 404, 310, 492, 332, 370, 0, + 0, 422, 469, 239, 599, 493, 893, 915, 904, 770, + 771, 894, 895, 919, 896, 773, 774, 916, 917, 767, + 768, 772, 918, 920, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 661, 662, 640, 503, 509, 504, 505, 506, 507, 508, + 0, 510, 907, 756, 755, 0, 762, 763, 0, 792, + 793, 795, 799, 800, 801, 812, 859, 860, 868, 870, + 871, 869, 872, 873, 874, 877, 878, 879, 880, 875, + 876, 881, 775, 779, 776, 777, 778, 790, 780, 781, + 782, 783, 784, 785, 786, 787, 788, 789, 791, 930, + 931, 932, 933, 934, 935, 805, 809, 808, 806, 807, + 803, 804, 831, 830, 832, 833, 834, 835, 836, 837, + 839, 838, 840, 841, 842, 843, 844, 845, 813, 814, + 817, 818, 816, 815, 819, 828, 829, 820, 821, 822, + 823, 824, 825, 827, 826, 846, 847, 848, 849, 850, + 852, 851, 855, 856, 854, 853, 858, 857, 754, 196, + 220, 365, 0, 451, 287, 641, 610, 481, 605, 205, + 222, 921, 261, 922, 0, 0, 926, 0, 0, 0, + 928, 927, 0, 929, 891, 890, 0, 0, 923, 924, + 0, 925, 0, 0, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 309, 317, 318, 321, + 327, 377, 383, 384, 385, 386, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 485, 486, 487, 488, 489, 497, 498, 511, + 581, 583, 598, 617, 623, 477, 936, 937, 938, 939, + 940, 941, 942, 943, 299, 593, 624, 591, 636, 618, + 435, 375, 0, 0, 378, 280, 304, 319, 0, 609, + 499, 226, 463, 289, 250, 961, 0, 210, 245, 229, + 258, 273, 276, 323, 388, 397, 426, 431, 295, 270, + 243, 456, 240, 482, 514, 515, 516, 518, 392, 265, + 430, 393, 0, 373, 571, 572, 315, 0, 523, 0, + 766, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 753, 0, 0, 0, 269, + 758, 0, 0, 0, 363, 266, 0, 0, 427, 0, + 203, 0, 484, 251, 374, 371, 578, 281, 272, 268, + 249, 316, 382, 425, 513, 419, 765, 367, 0, 0, + 494, 398, 0, 0, 0, 0, 0, 760, 761, 0, + 0, 0, 0, 0, 0, 0, 0, 322, 247, 324, + 202, 410, 495, 285, 0, 95, 0, 0, 1010, 946, + 1077, 912, 950, 1011, 963, 964, 965, 951, 0, 237, + 952, 953, 244, 954, 0, 911, 796, 798, 797, 861, + 862, 863, 864, 865, 866, 867, 794, 959, 602, 966, + 967, 0, 264, 320, 271, 263, 575, 0, 0, 0, + 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, + 0, 0, 0, 0, 750, 0, 764, 0, 0, 0, + 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 747, 748, 0, + 0, 0, 0, 906, 0, 749, 0, 0, 757, 968, + 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, + 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, + 999, 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, + 1009, 2086, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 296, 0, 399, 256, 0, 450, 905, 0, + 0, 620, 0, 0, 903, 0, 0, 0, 0, 362, + 0, 329, 197, 224, 0, 0, 409, 458, 470, 0, + 0, 0, 956, 0, 468, 423, 597, 232, 283, 455, + 429, 466, 437, 286, 0, 0, 467, 369, 580, 447, + 594, 621, 622, 262, 403, 607, 517, 615, 639, 225, + 259, 417, 502, 600, 491, 394, 576, 577, 328, 490, + 294, 201, 366, 627, 223, 476, 368, 241, 230, 582, + 604, 298, 288, 453, 634, 212, 512, 592, 238, 480, + 0, 0, 642, 246, 501, 214, 589, 500, 390, 325, + 326, 213, 0, 454, 267, 292, 0, 0, 257, 412, + 957, 958, 255, 643, 802, 614, 219, 0, 613, 405, + 579, 590, 391, 380, 218, 588, 389, 379, 333, 810, + 811, 279, 306, 887, 886, 885, 305, 307, 883, 884, + 882, 206, 601, 0, 207, 0, 496, 603, 644, 449, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 302, + 303, 312, 364, 416, 443, 439, 448, 0, 574, 595, + 608, 619, 625, 626, 628, 629, 630, 631, 632, 635, + 633, 404, 310, 492, 332, 370, 0, 0, 422, 469, + 239, 599, 493, 893, 915, 904, 770, 771, 894, 895, + 919, 896, 773, 774, 916, 917, 767, 768, 772, 918, + 920, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 661, 662, 640, + 503, 509, 504, 505, 506, 507, 508, 0, 510, 907, + 756, 755, 0, 762, 763, 0, 792, 793, 795, 799, + 800, 801, 812, 859, 860, 868, 870, 871, 869, 872, + 873, 874, 877, 878, 879, 880, 875, 876, 881, 775, + 779, 776, 777, 778, 790, 780, 781, 782, 783, 784, + 785, 786, 787, 788, 789, 791, 930, 931, 932, 933, + 934, 935, 805, 809, 808, 806, 807, 803, 804, 831, + 830, 832, 833, 834, 835, 836, 837, 839, 838, 840, + 841, 842, 843, 844, 845, 813, 814, 817, 818, 816, + 815, 819, 828, 829, 820, 821, 822, 823, 824, 825, + 827, 826, 846, 847, 848, 849, 850, 852, 851, 855, + 856, 854, 853, 858, 857, 754, 196, 220, 365, 0, + 451, 287, 641, 610, 481, 605, 205, 222, 921, 261, + 922, 0, 0, 926, 0, 0, 0, 928, 927, 0, + 929, 891, 890, 0, 0, 923, 924, 0, 925, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 309, 317, 318, 321, 327, 377, 383, + 384, 385, 386, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 485, + 486, 487, 488, 489, 497, 498, 511, 581, 583, 598, + 617, 623, 477, 936, 937, 938, 939, 940, 941, 942, + 943, 299, 593, 624, 591, 636, 618, 435, 375, 0, + 0, 378, 280, 304, 319, 0, 609, 499, 226, 463, + 289, 250, 961, 0, 210, 245, 229, 258, 273, 276, + 323, 388, 397, 426, 431, 295, 270, 243, 456, 240, + 482, 514, 515, 516, 518, 392, 265, 430, 393, 0, + 373, 571, 572, 315, 0, 523, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 0, 0, 0, 0, 269, 0, 0, 0, + 0, 363, 266, 0, 0, 427, 0, 203, 0, 484, + 251, 374, 371, 578, 281, 272, 268, 249, 316, 382, + 425, 513, 419, 0, 367, 0, 0, 494, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 322, 247, 324, 202, 410, 495, + 285, 0, 0, 0, 0, 0, 713, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 348, 357, 356, 337, 338, 340, 342, + 347, 354, 360, 0, 0, 602, 0, 0, 0, 264, + 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 1128, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 399, 256, 0, 450, 0, 0, 1127, 620, 0, + 0, 0, 0, 0, 1124, 1125, 362, 1085, 329, 197, + 224, 1118, 1122, 409, 458, 470, 0, 0, 0, 252, + 0, 468, 423, 597, 232, 283, 455, 429, 466, 437, + 286, 0, 0, 467, 369, 580, 447, 594, 621, 622, + 262, 403, 607, 517, 615, 639, 225, 259, 417, 502, + 600, 491, 394, 576, 577, 328, 490, 294, 201, 366, + 627, 223, 476, 368, 241, 230, 582, 604, 298, 288, + 453, 634, 212, 512, 592, 238, 480, 0, 0, 642, + 246, 501, 214, 589, 500, 390, 325, 326, 213, 0, + 454, 267, 292, 0, 0, 257, 412, 584, 585, 255, + 643, 227, 614, 219, 0, 613, 405, 579, 590, 391, + 380, 218, 588, 389, 379, 333, 352, 353, 279, 306, + 444, 372, 445, 305, 307, 401, 400, 402, 206, 601, + 0, 207, 0, 496, 603, 644, 449, 211, 233, 234, + 236, 0, 278, 282, 290, 293, 302, 303, 312, 364, + 416, 443, 439, 448, 0, 574, 595, 608, 619, 625, + 626, 628, 629, 630, 631, 632, 635, 633, 404, 310, + 492, 332, 370, 0, 0, 422, 469, 239, 599, 493, + 199, 0, 0, 0, 0, 253, 254, 0, 570, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 662, 640, 503, 509, 504, + 505, 506, 507, 508, 0, 510, 0, 0, 0, 0, + 0, 395, 0, 586, 587, 663, 381, 483, 596, 334, + 346, 349, 339, 358, 0, 359, 335, 336, 341, 343, + 344, 345, 350, 351, 355, 361, 248, 209, 387, 396, + 573, 311, 215, 216, 217, 519, 520, 521, 522, 611, + 612, 616, 204, 459, 460, 461, 462, 291, 606, 308, + 465, 464, 330, 331, 376, 446, 535, 537, 548, 552, + 554, 556, 562, 565, 536, 538, 549, 553, 555, 557, + 563, 566, 525, 527, 529, 531, 544, 543, 540, 568, + 569, 546, 551, 530, 542, 547, 560, 567, 564, 524, + 528, 532, 541, 559, 558, 539, 550, 561, 545, 533, + 526, 534, 0, 196, 220, 365, 0, 451, 287, 641, + 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, + 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, + 309, 317, 318, 321, 327, 377, 383, 384, 385, 386, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 485, 486, 487, 488, + 489, 497, 498, 511, 581, 583, 598, 617, 623, 477, + 300, 301, 441, 442, 313, 314, 637, 638, 299, 593, + 624, 591, 636, 618, 435, 375, 0, 0, 378, 280, + 304, 319, 0, 609, 499, 226, 463, 289, 250, 0, + 0, 210, 245, 229, 258, 273, 276, 323, 388, 397, + 426, 431, 295, 270, 243, 456, 240, 482, 514, 515, + 516, 518, 392, 265, 430, 393, 0, 373, 571, 572, + 315, 0, 523, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 363, 266, + 0, 0, 427, 0, 203, 0, 484, 251, 374, 371, + 578, 281, 272, 268, 249, 316, 382, 425, 513, 419, + 0, 367, 0, 0, 494, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 322, 247, 324, 202, 410, 495, 285, 0, 0, + 0, 0, 1688, 946, 0, 0, 1685, 0, 0, 0, + 0, 1683, 0, 237, 1684, 1682, 244, 1687, 0, 911, + 348, 357, 356, 337, 338, 340, 342, 347, 354, 360, + 0, 0, 602, 0, 0, 0, 264, 320, 271, 263, + 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 296, 0, 399, 256, + 0, 450, 0, 0, 0, 620, 0, 0, 0, 0, + 0, 0, 0, 362, 0, 329, 197, 224, 0, 0, + 409, 458, 470, 0, 0, 0, 252, 0, 468, 423, + 597, 232, 283, 455, 429, 466, 437, 286, 0, 0, + 467, 369, 580, 447, 594, 621, 622, 262, 403, 607, + 517, 615, 639, 225, 259, 417, 502, 600, 491, 394, + 576, 577, 328, 490, 294, 201, 366, 627, 223, 476, + 368, 241, 230, 582, 604, 298, 288, 453, 634, 212, + 512, 592, 238, 480, 0, 0, 642, 246, 501, 214, + 589, 500, 390, 325, 326, 213, 0, 454, 267, 292, + 0, 0, 257, 412, 584, 585, 255, 643, 227, 614, + 219, 0, 613, 405, 579, 590, 391, 380, 218, 588, + 389, 379, 333, 352, 353, 279, 306, 444, 372, 445, + 305, 307, 401, 400, 402, 206, 601, 0, 207, 0, + 496, 603, 644, 449, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 302, 303, 312, 364, 416, 443, 439, + 448, 0, 574, 595, 608, 619, 625, 626, 628, 629, + 630, 631, 632, 635, 633, 404, 310, 492, 332, 370, + 0, 0, 422, 469, 239, 599, 493, 199, 0, 0, + 0, 0, 253, 254, 0, 570, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 661, 662, 640, 503, 509, 504, 505, 506, 507, + 508, 0, 510, 0, 0, 0, 0, 0, 395, 0, + 586, 587, 663, 381, 483, 596, 334, 346, 349, 339, + 358, 0, 359, 335, 336, 341, 343, 344, 345, 350, + 351, 355, 361, 248, 209, 387, 396, 573, 311, 215, + 216, 217, 519, 520, 521, 522, 611, 612, 616, 204, + 459, 460, 461, 462, 291, 606, 308, 465, 464, 330, + 331, 376, 446, 535, 537, 548, 552, 554, 556, 562, + 565, 536, 538, 549, 553, 555, 557, 563, 566, 525, + 527, 529, 531, 544, 543, 540, 568, 569, 546, 551, + 530, 542, 547, 560, 567, 564, 524, 528, 532, 541, + 559, 558, 539, 550, 561, 545, 533, 526, 534, 0, + 196, 220, 365, 0, 451, 287, 641, 610, 481, 605, + 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, + 235, 242, 260, 275, 277, 284, 297, 309, 317, 318, + 321, 327, 377, 383, 384, 385, 386, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 485, 486, 487, 488, 489, 497, 498, + 511, 581, 583, 598, 617, 623, 477, 300, 301, 441, + 442, 313, 314, 637, 638, 299, 593, 624, 591, 636, + 618, 435, 375, 0, 0, 378, 280, 304, 319, 0, + 609, 499, 226, 463, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 323, 388, 397, 426, 431, 295, + 270, 243, 456, 240, 482, 514, 515, 516, 518, 392, + 265, 430, 393, 0, 373, 571, 572, 315, 86, 523, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 0, 0, 0, 0, + 269, 0, 0, 0, 0, 363, 266, 0, 0, 427, + 0, 203, 0, 484, 251, 374, 371, 578, 281, 272, + 268, 249, 316, 382, 425, 513, 419, 0, 367, 0, + 0, 494, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 322, 247, + 324, 202, 410, 495, 285, 0, 95, 0, 0, 0, + 194, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 237, 0, 0, 244, 0, 0, 0, 348, 357, 356, + 337, 338, 340, 342, 347, 354, 360, 0, 0, 602, + 0, 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, - 0, 0, 0, 0, 0, 746, 0, 759, 0, 0, - 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 743, 744, - 0, 0, 0, 0, 901, 0, 745, 0, 0, 753, - 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, - 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, - 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, - 994, 995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, - 1004, 1005, 2068, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 900, - 0, 0, 616, 0, 0, 898, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 951, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 952, 953, 255, 639, 797, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 805, - 806, 279, 305, 882, 881, 880, 304, 306, 878, 879, - 877, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 888, 910, 899, 765, 766, 889, 890, - 914, 891, 768, 769, 911, 912, 762, 763, 767, 913, - 915, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 902, - 752, 751, 0, 758, 0, 787, 788, 790, 794, 795, - 796, 807, 854, 855, 863, 865, 866, 864, 867, 868, - 869, 872, 873, 874, 875, 870, 871, 876, 770, 774, - 771, 772, 773, 785, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 784, 786, 925, 926, 927, 928, 929, - 930, 800, 804, 803, 801, 802, 798, 799, 826, 825, - 827, 828, 829, 830, 831, 832, 834, 833, 835, 836, - 837, 838, 839, 840, 808, 809, 812, 813, 811, 810, - 814, 823, 824, 815, 816, 817, 818, 819, 820, 822, - 821, 841, 842, 843, 844, 845, 847, 846, 850, 851, - 849, 848, 853, 852, 750, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 916, 261, 917, 0, - 0, 921, 0, 0, 0, 923, 922, 0, 924, 886, - 885, 0, 0, 918, 919, 0, 920, 0, 0, 198, - 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 931, 932, 933, 934, 935, 936, 937, 938, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 956, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, - 0, 0, 0, 709, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, - 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, - 1121, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 1120, 616, 0, 0, 0, 0, 0, - 1117, 1118, 361, 1078, 328, 197, 224, 1111, 1115, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 399, 256, 0, 450, 0, + 0, 0, 620, 0, 0, 0, 0, 0, 0, 0, + 362, 0, 329, 197, 224, 0, 0, 409, 458, 470, + 0, 0, 0, 252, 0, 468, 423, 597, 232, 283, + 455, 429, 466, 437, 286, 0, 0, 467, 369, 580, + 447, 594, 621, 622, 262, 403, 607, 517, 615, 639, + 225, 259, 417, 502, 600, 491, 394, 576, 577, 328, + 490, 294, 201, 366, 627, 223, 476, 368, 241, 230, + 582, 604, 298, 288, 453, 634, 212, 512, 592, 238, + 480, 0, 0, 642, 246, 501, 214, 589, 500, 390, + 325, 326, 213, 0, 454, 267, 292, 0, 0, 257, + 412, 584, 585, 255, 643, 227, 614, 219, 0, 613, + 405, 579, 590, 391, 380, 218, 588, 389, 379, 333, + 352, 353, 279, 306, 444, 372, 445, 305, 307, 401, + 400, 402, 206, 601, 0, 207, 0, 496, 603, 644, + 449, 211, 233, 234, 236, 0, 278, 282, 290, 293, + 302, 303, 312, 364, 416, 443, 439, 448, 0, 574, + 595, 608, 619, 625, 626, 628, 629, 630, 631, 632, + 635, 633, 404, 310, 492, 332, 370, 0, 0, 422, + 469, 239, 599, 493, 199, 0, 0, 0, 0, 253, + 254, 0, 570, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, + 640, 503, 509, 504, 505, 506, 507, 508, 0, 510, + 0, 0, 0, 0, 0, 395, 0, 586, 587, 663, + 381, 483, 596, 334, 346, 349, 339, 358, 0, 359, + 335, 336, 341, 343, 344, 345, 350, 351, 355, 361, + 248, 209, 387, 396, 573, 311, 215, 216, 217, 519, + 520, 521, 522, 611, 612, 616, 204, 459, 460, 461, + 462, 291, 606, 308, 465, 464, 330, 331, 376, 446, + 535, 537, 548, 552, 554, 556, 562, 565, 536, 538, + 549, 553, 555, 557, 563, 566, 525, 527, 529, 531, + 544, 543, 540, 568, 569, 546, 551, 530, 542, 547, + 560, 567, 564, 524, 528, 532, 541, 559, 558, 539, + 550, 561, 545, 533, 526, 534, 0, 196, 220, 365, + 94, 451, 287, 641, 610, 481, 605, 205, 222, 0, + 261, 0, 0, 0, 0, 0, 0, 2389, 0, 0, + 2388, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 309, 317, 318, 321, 327, 377, + 383, 384, 385, 386, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 485, 486, 487, 488, 489, 497, 498, 511, 581, 583, + 598, 617, 623, 477, 300, 301, 441, 442, 313, 314, + 637, 638, 299, 593, 624, 591, 636, 618, 435, 375, + 0, 0, 378, 280, 304, 319, 0, 609, 499, 226, + 463, 289, 250, 0, 0, 210, 245, 229, 258, 273, + 276, 323, 388, 397, 426, 431, 295, 270, 243, 456, + 240, 482, 514, 515, 516, 518, 392, 265, 430, 1750, + 0, 373, 571, 572, 315, 0, 523, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 1752, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 363, 266, 0, 0, 427, 0, 203, 0, + 484, 251, 374, 371, 578, 281, 272, 268, 249, 316, + 382, 425, 513, 419, 0, 367, 0, 0, 494, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 322, 247, 324, 202, 410, + 495, 285, 0, 0, 0, 0, 1754, 713, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 348, 357, 356, 337, 338, 340, + 342, 347, 354, 360, 0, 0, 602, 0, 0, 0, + 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, + 0, 0, 0, 228, 0, 0, 0, 1459, 0, 1460, + 1461, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 0, 0, 0, 1678, 941, 0, 0, 1675, 0, - 0, 0, 0, 1673, 0, 237, 1674, 1672, 244, 1677, - 0, 906, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -4288,71 +4533,71 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, + 296, 0, 399, 256, 0, 450, 0, 0, 0, 620, + 0, 0, 0, 0, 0, 0, 0, 362, 0, 329, + 197, 224, 0, 0, 409, 458, 470, 0, 0, 0, + 252, 0, 468, 423, 597, 232, 283, 455, 429, 466, + 437, 286, 0, 0, 467, 369, 580, 447, 594, 621, + 622, 262, 403, 607, 517, 615, 639, 225, 259, 417, + 502, 600, 491, 394, 576, 577, 328, 490, 294, 201, + 366, 627, 223, 476, 368, 241, 230, 582, 604, 298, + 288, 453, 634, 212, 512, 592, 238, 480, 0, 0, + 642, 246, 501, 214, 589, 500, 390, 325, 326, 213, + 0, 454, 267, 292, 0, 0, 257, 412, 584, 585, + 255, 643, 227, 614, 219, 0, 613, 405, 579, 590, + 391, 380, 218, 588, 389, 379, 333, 352, 353, 279, + 306, 444, 372, 445, 305, 307, 401, 400, 402, 206, + 601, 0, 207, 0, 496, 603, 644, 449, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 302, 303, 312, + 364, 416, 443, 439, 448, 0, 574, 595, 608, 619, + 625, 626, 628, 629, 630, 631, 632, 635, 633, 404, + 310, 492, 332, 370, 0, 0, 422, 469, 239, 599, + 493, 199, 0, 0, 0, 0, 253, 254, 0, 570, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, - 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 656, 657, 658, 659, 660, 661, 662, 640, 503, 509, + 504, 505, 506, 507, 508, 0, 510, 0, 0, 0, + 0, 0, 395, 0, 586, 587, 663, 381, 483, 596, + 334, 346, 349, 339, 358, 0, 359, 335, 336, 341, + 343, 344, 345, 350, 351, 355, 361, 248, 209, 387, + 396, 573, 311, 215, 216, 217, 519, 520, 521, 522, + 611, 612, 616, 204, 459, 460, 461, 462, 291, 606, + 308, 465, 464, 330, 331, 376, 446, 535, 537, 548, + 552, 554, 556, 562, 565, 536, 538, 549, 553, 555, + 557, 563, 566, 525, 527, 529, 531, 544, 543, 540, + 568, 569, 546, 551, 530, 542, 547, 560, 567, 564, + 524, 528, 532, 541, 559, 558, 539, 550, 561, 545, + 533, 526, 534, 0, 196, 220, 365, 0, 451, 287, + 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 0, 392, 372, 568, 569, 314, 86, 520, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 411, - 0, 0, 0, 0, 0, 0, 0, 0, 269, 0, - 0, 0, 0, 362, 266, 0, 0, 425, 0, 203, - 0, 481, 251, 373, 370, 575, 281, 272, 268, 249, - 315, 381, 423, 510, 417, 0, 366, 0, 0, 491, - 396, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 321, 247, 323, 202, - 408, 492, 285, 0, 95, 0, 0, 0, 194, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, - 0, 244, 0, 0, 0, 347, 356, 355, 336, 337, - 339, 341, 346, 353, 359, 0, 0, 0, 0, 0, - 264, 319, 271, 263, 572, 0, 0, 0, 0, 0, - 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 309, 317, 318, 321, 327, 377, 383, 384, 385, + 386, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 485, 486, 487, + 488, 489, 497, 498, 511, 581, 583, 598, 617, 623, + 477, 300, 301, 441, 442, 313, 314, 637, 638, 299, + 593, 624, 591, 636, 618, 435, 375, 0, 0, 378, + 280, 304, 319, 0, 609, 499, 226, 463, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 323, 388, + 397, 426, 431, 295, 270, 243, 456, 240, 482, 514, + 515, 516, 518, 392, 265, 430, 393, 0, 373, 571, + 572, 315, 86, 523, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 0, 0, 0, + 0, 0, 0, 0, 269, 0, 0, 0, 0, 363, + 266, 0, 0, 427, 0, 203, 0, 484, 251, 374, + 371, 578, 281, 272, 268, 249, 316, 382, 425, 513, + 419, 0, 367, 0, 0, 494, 398, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 322, 247, 324, 202, 410, 495, 285, 0, + 95, 0, 1727, 0, 713, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, + 0, 348, 357, 356, 337, 338, 340, 342, 347, 354, + 360, 0, 0, 602, 0, 0, 0, 264, 320, 271, + 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, + 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -4360,72 +4605,144 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 399, + 256, 0, 450, 0, 0, 0, 620, 0, 0, 0, + 0, 0, 0, 0, 362, 0, 329, 197, 224, 0, + 0, 409, 458, 470, 0, 0, 0, 252, 0, 468, + 423, 597, 232, 283, 455, 429, 466, 437, 286, 0, + 0, 467, 369, 580, 447, 594, 621, 622, 262, 403, + 607, 517, 615, 639, 225, 259, 417, 502, 600, 491, + 394, 576, 577, 328, 490, 294, 201, 366, 627, 223, + 476, 368, 241, 230, 582, 604, 298, 288, 453, 634, + 212, 512, 592, 238, 480, 0, 0, 642, 246, 501, + 214, 589, 500, 390, 325, 326, 213, 0, 454, 267, + 292, 0, 0, 257, 412, 584, 585, 255, 643, 227, + 614, 219, 0, 613, 405, 579, 590, 391, 380, 218, + 588, 389, 379, 333, 352, 353, 279, 306, 444, 372, + 445, 305, 307, 401, 400, 402, 206, 601, 0, 207, + 0, 496, 603, 644, 449, 211, 233, 234, 236, 0, + 278, 282, 290, 293, 302, 303, 312, 364, 416, 443, + 439, 448, 0, 574, 595, 608, 619, 625, 626, 628, + 629, 630, 631, 632, 635, 633, 404, 310, 492, 332, + 370, 0, 0, 422, 469, 239, 599, 493, 199, 0, + 0, 0, 0, 253, 254, 0, 570, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 662, 640, 503, 509, 504, 505, 506, + 507, 508, 0, 510, 0, 0, 0, 0, 0, 395, + 0, 586, 587, 663, 381, 483, 596, 334, 346, 349, + 339, 358, 0, 359, 335, 336, 341, 343, 344, 345, + 350, 351, 355, 361, 248, 209, 387, 396, 573, 311, + 215, 216, 217, 519, 520, 521, 522, 611, 612, 616, + 204, 459, 460, 461, 462, 291, 606, 308, 465, 464, + 330, 331, 376, 446, 535, 537, 548, 552, 554, 556, + 562, 565, 536, 538, 549, 553, 555, 557, 563, 566, + 525, 527, 529, 531, 544, 543, 540, 568, 569, 546, + 551, 530, 542, 547, 560, 567, 564, 524, 528, 532, + 541, 559, 558, 539, 550, 561, 545, 533, 526, 534, + 0, 196, 220, 365, 94, 451, 287, 641, 610, 481, + 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 296, 0, 397, 256, 0, 448, 0, 0, 0, 616, - 0, 0, 0, 0, 0, 0, 0, 361, 0, 328, - 197, 224, 0, 0, 407, 456, 468, 0, 0, 0, - 252, 0, 466, 421, 594, 232, 283, 453, 427, 464, - 435, 286, 0, 0, 465, 368, 577, 445, 591, 617, - 618, 262, 401, 603, 514, 611, 635, 225, 259, 415, - 499, 597, 488, 393, 573, 574, 327, 487, 294, 201, - 365, 623, 223, 474, 367, 241, 230, 579, 600, 288, - 451, 630, 212, 509, 589, 238, 478, 0, 0, 638, - 246, 498, 214, 586, 497, 389, 324, 325, 213, 0, - 452, 267, 292, 0, 0, 257, 410, 581, 582, 255, - 639, 227, 610, 219, 0, 609, 403, 576, 587, 390, - 379, 218, 585, 388, 378, 332, 351, 352, 279, 305, - 442, 371, 443, 304, 306, 399, 398, 400, 206, 598, - 0, 207, 0, 493, 599, 640, 447, 211, 233, 234, - 236, 0, 278, 282, 290, 293, 301, 302, 311, 363, - 414, 441, 437, 446, 0, 571, 592, 604, 615, 621, - 622, 624, 625, 626, 627, 628, 631, 629, 402, 309, - 489, 331, 369, 0, 0, 420, 467, 239, 596, 490, - 199, 0, 0, 0, 0, 253, 254, 0, 567, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 641, 642, - 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, - 653, 654, 655, 656, 657, 658, 636, 500, 506, 501, - 502, 503, 504, 505, 0, 507, 0, 0, 0, 0, - 0, 0, 583, 584, 659, 380, 480, 593, 333, 345, - 348, 338, 357, 0, 358, 334, 335, 340, 342, 343, - 344, 349, 350, 354, 360, 248, 209, 386, 394, 570, - 310, 215, 216, 217, 516, 517, 518, 519, 607, 608, - 612, 204, 457, 458, 459, 460, 291, 602, 307, 463, - 462, 329, 330, 375, 444, 532, 534, 545, 549, 551, - 553, 559, 562, 533, 535, 546, 550, 552, 554, 560, - 563, 522, 524, 526, 528, 541, 540, 537, 565, 566, - 543, 548, 527, 539, 544, 557, 564, 561, 521, 525, - 529, 538, 556, 555, 536, 547, 558, 542, 530, 523, - 531, 0, 196, 220, 364, 94, 449, 287, 637, 606, - 601, 205, 222, 0, 261, 0, 0, 0, 0, 0, - 0, 2369, 0, 0, 2368, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, - 231, 235, 242, 260, 275, 277, 284, 297, 308, 316, - 317, 320, 326, 376, 382, 383, 384, 385, 404, 405, - 406, 409, 412, 413, 416, 418, 419, 422, 426, 430, - 431, 432, 434, 436, 438, 450, 455, 469, 470, 471, - 472, 473, 476, 477, 482, 483, 484, 485, 486, 494, - 495, 508, 578, 580, 595, 613, 619, 475, 299, 300, - 439, 440, 312, 313, 633, 634, 298, 590, 620, 588, - 632, 614, 433, 374, 0, 0, 377, 280, 303, 318, - 0, 605, 496, 226, 461, 289, 250, 0, 0, 210, - 245, 229, 258, 273, 276, 322, 387, 395, 424, 429, - 295, 270, 243, 454, 240, 479, 511, 512, 513, 515, - 391, 265, 428, 1735, 0, 372, 568, 569, 314, 520, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 411, 0, 0, 0, 1737, 0, 0, 0, 0, - 269, 0, 0, 0, 0, 362, 266, 0, 0, 425, - 0, 203, 0, 481, 251, 373, 370, 575, 281, 272, - 268, 249, 315, 381, 423, 510, 417, 0, 366, 0, - 0, 491, 396, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 321, 247, - 323, 202, 408, 492, 285, 0, 0, 0, 0, 1739, - 709, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 237, 0, 0, 244, 0, 0, 0, 347, 356, 355, - 336, 337, 339, 341, 346, 353, 359, 0, 0, 0, - 0, 0, 264, 319, 271, 263, 572, 0, 0, 0, - 0, 0, 0, 0, 0, 228, 0, 0, 0, 1451, - 0, 1452, 1453, 0, 0, 0, 0, 0, 0, 0, - 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 231, 235, 242, 260, 275, 277, 284, 297, 309, 317, + 318, 321, 327, 377, 383, 384, 385, 386, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 485, 486, 487, 488, 489, 497, + 498, 511, 581, 583, 598, 617, 623, 477, 300, 301, + 441, 442, 313, 314, 637, 638, 299, 593, 624, 591, + 636, 618, 435, 375, 0, 0, 378, 280, 304, 319, + 0, 609, 499, 226, 463, 289, 250, 0, 0, 210, + 245, 229, 258, 273, 276, 323, 388, 397, 426, 431, + 295, 270, 243, 456, 240, 482, 514, 515, 516, 518, + 392, 265, 430, 393, 0, 373, 571, 572, 315, 0, + 523, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 363, 266, 0, 0, + 427, 0, 203, 0, 484, 251, 374, 371, 578, 281, + 272, 268, 249, 316, 382, 425, 513, 419, 0, 367, + 0, 0, 494, 398, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 322, + 247, 324, 202, 410, 495, 285, 0, 95, 0, 0, + 0, 194, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 348, 357, + 356, 337, 338, 340, 342, 347, 354, 360, 0, 0, + 602, 0, 0, 0, 264, 320, 271, 263, 575, 0, + 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 296, 0, 399, 256, 0, 450, + 0, 0, 0, 620, 0, 0, 0, 0, 0, 0, + 0, 362, 0, 329, 197, 224, 0, 0, 409, 458, + 470, 0, 0, 0, 252, 0, 468, 423, 597, 232, + 283, 455, 429, 466, 437, 286, 0, 0, 467, 369, + 580, 447, 594, 621, 622, 262, 403, 607, 517, 615, + 639, 225, 259, 417, 502, 600, 491, 394, 576, 577, + 328, 490, 294, 201, 366, 627, 223, 476, 368, 241, + 230, 582, 604, 298, 288, 453, 634, 212, 512, 592, + 238, 480, 0, 0, 642, 246, 501, 214, 589, 500, + 390, 325, 326, 213, 0, 454, 267, 292, 0, 0, + 257, 412, 584, 585, 255, 643, 227, 614, 219, 0, + 613, 405, 579, 590, 391, 380, 218, 588, 389, 379, + 333, 352, 353, 279, 306, 444, 372, 445, 305, 307, + 401, 400, 402, 206, 601, 0, 207, 0, 496, 603, + 644, 449, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 302, 303, 312, 364, 416, 443, 439, 448, 0, + 574, 595, 608, 619, 625, 626, 628, 629, 630, 631, + 632, 635, 633, 404, 310, 492, 332, 370, 0, 0, + 422, 469, 239, 599, 493, 199, 0, 0, 0, 0, + 253, 254, 0, 570, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, + 662, 640, 503, 509, 504, 505, 506, 507, 508, 0, + 510, 0, 0, 0, 0, 0, 395, 0, 586, 587, + 663, 381, 483, 596, 334, 346, 349, 339, 358, 0, + 359, 335, 336, 341, 343, 344, 345, 350, 351, 355, + 361, 248, 209, 387, 396, 573, 311, 215, 216, 217, + 519, 520, 521, 522, 611, 612, 616, 204, 459, 460, + 461, 462, 291, 606, 308, 465, 464, 330, 331, 376, + 446, 535, 537, 548, 552, 554, 556, 562, 565, 536, + 538, 549, 553, 555, 557, 563, 566, 525, 527, 529, + 531, 544, 543, 540, 568, 569, 546, 551, 530, 542, + 547, 560, 567, 564, 524, 528, 532, 541, 559, 558, + 539, 550, 561, 545, 533, 526, 534, 0, 196, 220, + 365, 0, 451, 287, 641, 610, 481, 605, 205, 222, + 0, 261, 0, 0, 0, 0, 0, 0, 2389, 0, + 0, 2388, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, + 260, 275, 277, 284, 297, 309, 317, 318, 321, 327, + 377, 383, 384, 385, 386, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 485, 486, 487, 488, 489, 497, 498, 511, 581, + 583, 598, 617, 623, 477, 300, 301, 441, 442, 313, + 314, 637, 638, 299, 593, 624, 591, 636, 618, 435, + 375, 0, 0, 378, 280, 304, 319, 0, 609, 499, + 226, 463, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 323, 388, 397, 426, 431, 295, 270, 243, + 456, 240, 482, 514, 515, 516, 518, 392, 265, 430, + 393, 0, 373, 571, 572, 315, 0, 523, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 0, 0, 2336, 0, 0, 0, 0, 269, 0, + 0, 0, 0, 363, 266, 0, 0, 427, 0, 203, + 0, 484, 251, 374, 371, 578, 281, 272, 268, 249, + 316, 382, 425, 513, 419, 0, 367, 0, 0, 494, + 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 322, 247, 324, 202, + 410, 495, 285, 0, 0, 0, 0, 1933, 194, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, + 0, 244, 0, 0, 0, 348, 357, 356, 337, 338, + 340, 342, 347, 354, 360, 0, 0, 602, 0, 0, + 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -4433,68 +4750,142 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 296, 0, 397, 256, 0, 448, 0, 0, - 0, 616, 0, 0, 0, 0, 0, 0, 0, 361, - 0, 328, 197, 224, 0, 0, 407, 456, 468, 0, - 0, 0, 252, 0, 466, 421, 594, 232, 283, 453, - 427, 464, 435, 286, 0, 0, 465, 368, 577, 445, - 591, 617, 618, 262, 401, 603, 514, 611, 635, 225, - 259, 415, 499, 597, 488, 393, 573, 574, 327, 487, - 294, 201, 365, 623, 223, 474, 367, 241, 230, 579, - 600, 288, 451, 630, 212, 509, 589, 238, 478, 0, - 0, 638, 246, 498, 214, 586, 497, 389, 324, 325, - 213, 0, 452, 267, 292, 0, 0, 257, 410, 581, - 582, 255, 639, 227, 610, 219, 0, 609, 403, 576, - 587, 390, 379, 218, 585, 388, 378, 332, 351, 352, - 279, 305, 442, 371, 443, 304, 306, 399, 398, 400, - 206, 598, 0, 207, 0, 493, 599, 640, 447, 211, - 233, 234, 236, 0, 278, 282, 290, 293, 301, 302, - 311, 363, 414, 441, 437, 446, 0, 571, 592, 604, - 615, 621, 622, 624, 625, 626, 627, 628, 631, 629, - 402, 309, 489, 331, 369, 0, 0, 420, 467, 239, - 596, 490, 199, 0, 0, 0, 0, 253, 254, 0, - 567, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, 636, 500, - 506, 501, 502, 503, 504, 505, 0, 507, 0, 0, - 0, 0, 0, 0, 583, 584, 659, 380, 480, 593, - 333, 345, 348, 338, 357, 0, 358, 334, 335, 340, - 342, 343, 344, 349, 350, 354, 360, 248, 209, 386, - 394, 570, 310, 215, 216, 217, 516, 517, 518, 519, - 607, 608, 612, 204, 457, 458, 459, 460, 291, 602, - 307, 463, 462, 329, 330, 375, 444, 532, 534, 545, - 549, 551, 553, 559, 562, 533, 535, 546, 550, 552, - 554, 560, 563, 522, 524, 526, 528, 541, 540, 537, - 565, 566, 543, 548, 527, 539, 544, 557, 564, 561, - 521, 525, 529, 538, 556, 555, 536, 547, 558, 542, - 530, 523, 531, 0, 196, 220, 364, 0, 449, 287, - 637, 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, - 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, - 308, 316, 317, 320, 326, 376, 382, 383, 384, 385, - 404, 405, 406, 409, 412, 413, 416, 418, 419, 422, - 426, 430, 431, 432, 434, 436, 438, 450, 455, 469, - 470, 471, 472, 473, 476, 477, 482, 483, 484, 485, - 486, 494, 495, 508, 578, 580, 595, 613, 619, 475, - 299, 300, 439, 440, 312, 313, 633, 634, 298, 590, - 620, 588, 632, 614, 433, 374, 0, 0, 377, 280, - 303, 318, 0, 605, 496, 226, 461, 289, 250, 0, - 0, 210, 245, 229, 258, 273, 276, 322, 387, 395, - 424, 429, 295, 270, 243, 454, 240, 479, 511, 512, - 513, 515, 391, 265, 428, 0, 392, 372, 568, 569, - 314, 86, 520, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 95, - 0, 1716, 0, 709, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 296, 0, 399, 256, 0, 450, 0, 0, 0, + 620, 0, 0, 0, 0, 0, 0, 0, 362, 0, + 329, 197, 224, 0, 0, 409, 458, 470, 0, 0, + 0, 252, 0, 468, 423, 597, 232, 283, 455, 429, + 466, 437, 286, 0, 2334, 467, 369, 580, 447, 594, + 621, 622, 262, 403, 607, 517, 615, 639, 225, 259, + 417, 502, 600, 491, 394, 576, 577, 328, 490, 294, + 201, 366, 627, 223, 476, 368, 241, 230, 582, 604, + 298, 288, 453, 634, 212, 512, 592, 238, 480, 0, + 0, 642, 246, 501, 214, 589, 500, 390, 325, 326, + 213, 0, 454, 267, 292, 0, 0, 257, 412, 584, + 585, 255, 643, 227, 614, 219, 0, 613, 405, 579, + 590, 391, 380, 218, 588, 389, 379, 333, 352, 353, + 279, 306, 444, 372, 445, 305, 307, 401, 400, 402, + 206, 601, 0, 207, 0, 496, 603, 644, 449, 211, + 233, 234, 236, 0, 278, 282, 290, 293, 302, 303, + 312, 364, 416, 443, 439, 448, 0, 574, 595, 608, + 619, 625, 626, 628, 629, 630, 631, 632, 635, 633, + 404, 310, 492, 332, 370, 0, 0, 422, 469, 239, + 599, 493, 199, 0, 0, 0, 0, 253, 254, 0, + 570, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 662, 640, 503, + 509, 504, 505, 506, 507, 508, 0, 510, 0, 0, + 0, 0, 0, 395, 0, 586, 587, 663, 381, 483, + 596, 334, 346, 349, 339, 358, 0, 359, 335, 336, + 341, 343, 344, 345, 350, 351, 355, 361, 248, 209, + 387, 396, 573, 311, 215, 216, 217, 519, 520, 521, + 522, 611, 612, 616, 204, 459, 460, 461, 462, 291, + 606, 308, 465, 464, 330, 331, 376, 446, 535, 537, + 548, 552, 554, 556, 562, 565, 536, 538, 549, 553, + 555, 557, 563, 566, 525, 527, 529, 531, 544, 543, + 540, 568, 569, 546, 551, 530, 542, 547, 560, 567, + 564, 524, 528, 532, 541, 559, 558, 539, 550, 561, + 545, 533, 526, 534, 0, 196, 220, 365, 0, 451, + 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, + 284, 297, 309, 317, 318, 321, 327, 377, 383, 384, + 385, 386, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 485, 486, + 487, 488, 489, 497, 498, 511, 581, 583, 598, 617, + 623, 477, 300, 301, 441, 442, 313, 314, 637, 638, + 299, 593, 624, 591, 636, 618, 435, 375, 0, 0, + 378, 280, 304, 319, 0, 609, 499, 226, 463, 289, + 250, 0, 0, 210, 245, 229, 258, 273, 276, 323, + 388, 397, 426, 431, 295, 270, 243, 456, 240, 482, + 514, 515, 516, 518, 392, 265, 430, 393, 0, 373, + 571, 572, 315, 0, 523, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 0, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 363, 266, 0, 0, 427, 0, 203, 0, 484, 251, + 374, 371, 578, 281, 272, 268, 249, 316, 382, 425, + 513, 419, 0, 367, 0, 0, 494, 398, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 322, 247, 324, 202, 410, 495, 285, + 0, 0, 0, 0, 0, 713, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 348, 357, 356, 337, 338, 340, 342, 347, + 354, 360, 0, 0, 602, 0, 0, 0, 264, 320, + 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, + 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, + 0, 0, 0, 0, 0, 1079, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 296, 0, + 399, 256, 0, 450, 0, 0, 0, 620, 0, 0, + 0, 0, 0, 0, 0, 362, 1085, 329, 197, 224, + 1083, 0, 409, 458, 470, 0, 0, 0, 252, 0, + 468, 423, 597, 232, 283, 455, 429, 466, 437, 286, + 0, 0, 467, 369, 580, 447, 594, 621, 622, 262, + 403, 607, 517, 615, 639, 225, 259, 417, 502, 600, + 491, 394, 576, 577, 328, 490, 294, 201, 366, 627, + 223, 476, 368, 241, 230, 582, 604, 298, 288, 453, + 634, 212, 512, 592, 238, 480, 0, 0, 642, 246, + 501, 214, 589, 500, 390, 325, 326, 213, 0, 454, + 267, 292, 0, 0, 257, 412, 584, 585, 255, 643, + 227, 614, 219, 0, 613, 405, 579, 590, 391, 380, + 218, 588, 389, 379, 333, 352, 353, 279, 306, 444, + 372, 445, 305, 307, 401, 400, 402, 206, 601, 0, + 207, 0, 496, 603, 644, 449, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 302, 303, 312, 364, 416, + 443, 439, 448, 0, 574, 595, 608, 619, 625, 626, + 628, 629, 630, 631, 632, 635, 633, 404, 310, 492, + 332, 370, 0, 0, 422, 469, 239, 599, 493, 199, + 0, 0, 0, 0, 253, 254, 0, 570, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 662, 640, 503, 509, 504, 505, + 506, 507, 508, 0, 510, 0, 0, 0, 0, 0, + 395, 0, 586, 587, 663, 381, 483, 596, 334, 346, + 349, 339, 358, 0, 359, 335, 336, 341, 343, 344, + 345, 350, 351, 355, 361, 248, 209, 387, 396, 573, + 311, 215, 216, 217, 519, 520, 521, 522, 611, 612, + 616, 204, 459, 460, 461, 462, 291, 606, 308, 465, + 464, 330, 331, 376, 446, 535, 537, 548, 552, 554, + 556, 562, 565, 536, 538, 549, 553, 555, 557, 563, + 566, 525, 527, 529, 531, 544, 543, 540, 568, 569, + 546, 551, 530, 542, 547, 560, 567, 564, 524, 528, + 532, 541, 559, 558, 539, 550, 561, 545, 533, 526, + 534, 0, 196, 220, 365, 0, 451, 287, 641, 610, + 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 309, + 317, 318, 321, 327, 377, 383, 384, 385, 386, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 485, 486, 487, 488, 489, + 497, 498, 511, 581, 583, 598, 617, 623, 477, 300, + 301, 441, 442, 313, 314, 637, 638, 299, 593, 624, + 591, 636, 618, 435, 375, 0, 0, 378, 280, 304, + 319, 0, 609, 499, 226, 463, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 323, 388, 397, 426, + 431, 295, 270, 243, 456, 240, 482, 514, 515, 516, + 518, 392, 265, 430, 393, 0, 373, 571, 572, 315, + 0, 523, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 2336, 0, 0, + 0, 0, 269, 0, 0, 0, 0, 363, 266, 0, + 0, 427, 0, 203, 0, 484, 251, 374, 371, 578, + 281, 272, 268, 249, 316, 382, 425, 513, 419, 0, + 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 322, 247, 324, 202, 410, 495, 285, 0, 0, 0, + 0, 1933, 194, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 244, 0, 0, 0, 348, + 357, 356, 337, 338, 340, 342, 347, 354, 360, 0, + 0, 602, 0, 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, @@ -4505,285 +4896,287 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 94, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 0, 0, 0, 296, 0, 399, 256, 0, + 450, 0, 0, 0, 620, 0, 0, 0, 0, 0, + 0, 0, 362, 0, 329, 197, 224, 0, 0, 409, + 458, 470, 0, 0, 0, 252, 0, 468, 423, 597, + 232, 283, 455, 429, 466, 437, 286, 0, 0, 467, + 369, 580, 447, 594, 621, 622, 262, 403, 607, 517, + 615, 639, 225, 259, 417, 502, 600, 491, 394, 576, + 577, 328, 490, 294, 201, 366, 627, 223, 476, 368, + 241, 230, 582, 604, 298, 288, 453, 634, 212, 512, + 592, 238, 480, 0, 0, 642, 246, 501, 214, 589, + 500, 390, 325, 326, 213, 0, 454, 267, 292, 0, + 0, 257, 412, 584, 585, 255, 643, 227, 614, 219, + 0, 613, 405, 579, 590, 391, 380, 218, 588, 389, + 379, 333, 352, 353, 279, 306, 444, 372, 445, 305, + 307, 401, 400, 402, 206, 601, 0, 207, 0, 496, + 603, 644, 449, 211, 233, 234, 236, 0, 278, 282, + 290, 293, 302, 303, 312, 364, 416, 443, 439, 448, + 0, 574, 595, 608, 619, 625, 626, 628, 629, 630, + 631, 632, 635, 633, 404, 310, 492, 332, 370, 0, + 0, 422, 469, 239, 599, 493, 199, 0, 0, 0, + 0, 253, 254, 0, 570, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 661, 662, 640, 503, 509, 504, 505, 506, 507, 508, + 0, 510, 0, 0, 0, 0, 0, 395, 0, 586, + 587, 663, 381, 483, 596, 334, 346, 349, 339, 358, + 0, 359, 335, 336, 341, 343, 344, 345, 350, 351, + 355, 361, 248, 209, 387, 396, 573, 311, 215, 216, + 217, 519, 520, 521, 522, 611, 612, 616, 204, 459, + 460, 461, 462, 291, 606, 308, 465, 464, 330, 331, + 376, 446, 535, 537, 548, 552, 554, 556, 562, 565, + 536, 538, 549, 553, 555, 557, 563, 566, 525, 527, + 529, 531, 544, 543, 540, 568, 569, 546, 551, 530, + 542, 547, 560, 567, 564, 524, 528, 532, 541, 559, + 558, 539, 550, 561, 545, 533, 526, 534, 0, 196, + 220, 365, 0, 451, 287, 641, 610, 481, 605, 205, + 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 309, 317, 318, 321, + 327, 377, 383, 384, 385, 386, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 485, 486, 487, 488, 489, 497, 498, 511, + 581, 583, 598, 617, 623, 477, 300, 301, 441, 442, + 313, 314, 637, 638, 299, 593, 624, 591, 636, 618, + 435, 375, 0, 0, 378, 280, 304, 319, 0, 609, + 499, 226, 463, 289, 250, 0, 0, 210, 245, 229, + 258, 273, 276, 323, 388, 397, 426, 431, 295, 270, + 243, 456, 240, 482, 514, 515, 516, 518, 392, 265, + 430, 393, 0, 373, 571, 572, 315, 0, 523, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 363, 266, 0, 0, 427, 0, + 203, 0, 484, 251, 374, 371, 578, 281, 272, 268, + 249, 316, 382, 425, 513, 419, 0, 367, 0, 0, + 494, 398, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 322, 247, 324, + 202, 410, 495, 285, 0, 0, 0, 1727, 0, 713, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 348, 357, 356, 337, + 338, 340, 342, 347, 354, 360, 0, 0, 602, 0, + 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, + 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 95, 0, 0, 0, 194, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, - 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 296, 0, 399, 256, 0, 450, 0, 0, + 0, 620, 0, 0, 0, 3678, 0, 0, 0, 362, + 0, 329, 197, 224, 0, 0, 409, 458, 470, 0, + 0, 0, 252, 0, 468, 423, 597, 232, 283, 455, + 429, 466, 437, 286, 0, 0, 467, 369, 580, 447, + 594, 621, 622, 262, 403, 607, 517, 615, 639, 225, + 259, 417, 502, 600, 491, 394, 576, 577, 328, 490, + 294, 201, 366, 627, 223, 476, 368, 241, 230, 582, + 604, 298, 288, 453, 634, 212, 512, 592, 238, 480, + 0, 0, 642, 246, 501, 214, 589, 500, 390, 325, + 326, 213, 0, 454, 267, 292, 0, 0, 257, 412, + 584, 585, 255, 643, 227, 614, 219, 0, 613, 405, + 579, 590, 391, 380, 218, 588, 389, 379, 333, 352, + 353, 279, 306, 444, 372, 445, 305, 307, 401, 400, + 402, 206, 601, 0, 207, 0, 496, 603, 644, 449, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 302, + 303, 312, 364, 416, 443, 439, 448, 0, 574, 595, + 608, 619, 625, 626, 628, 629, 630, 631, 632, 635, + 633, 404, 310, 492, 332, 370, 0, 0, 422, 469, + 239, 599, 493, 199, 0, 0, 0, 0, 253, 254, + 0, 570, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 661, 662, 640, + 503, 509, 504, 505, 506, 507, 508, 0, 510, 0, + 0, 0, 0, 0, 395, 0, 586, 587, 663, 381, + 483, 596, 334, 346, 349, 339, 358, 0, 359, 335, + 336, 341, 343, 344, 345, 350, 351, 355, 361, 248, + 209, 387, 396, 573, 311, 215, 216, 217, 519, 520, + 521, 522, 611, 612, 616, 204, 459, 460, 461, 462, + 291, 606, 308, 465, 464, 330, 331, 376, 446, 535, + 537, 548, 552, 554, 556, 562, 565, 536, 538, 549, + 553, 555, 557, 563, 566, 525, 527, 529, 531, 544, + 543, 540, 568, 569, 546, 551, 530, 542, 547, 560, + 567, 564, 524, 528, 532, 541, 559, 558, 539, 550, + 561, 545, 533, 526, 534, 0, 196, 220, 365, 0, + 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, - 0, 261, 0, 0, 0, 0, 0, 0, 2369, 0, - 0, 2368, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 0, 0, 2319, 0, 0, 0, 0, 269, 0, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 0, 0, 0, 1918, 194, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, - 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 309, 317, 318, 321, 327, 377, 383, + 384, 385, 386, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 485, + 486, 487, 488, 489, 497, 498, 511, 581, 583, 598, + 617, 623, 477, 300, 301, 441, 442, 313, 314, 637, + 638, 299, 593, 624, 591, 636, 618, 435, 375, 0, + 0, 378, 280, 304, 319, 0, 609, 499, 226, 463, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 323, 388, 397, 426, 431, 295, 270, 243, 456, 240, + 482, 514, 515, 516, 518, 392, 265, 430, 393, 0, + 373, 571, 572, 315, 0, 523, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 0, 0, 0, 0, 269, 0, 0, 0, + 0, 363, 266, 0, 0, 427, 0, 203, 0, 484, + 251, 374, 371, 578, 281, 272, 268, 249, 316, 382, + 425, 513, 419, 0, 367, 0, 0, 494, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 322, 247, 324, 202, 410, 495, + 285, 0, 0, 0, 0, 2097, 713, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 348, 357, 356, 337, 338, 340, 342, + 347, 354, 360, 0, 0, 602, 0, 0, 0, 264, + 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 2098, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, - 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 2317, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, - 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, - 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, - 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, - 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, - 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, - 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, - 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, - 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, - 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, - 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, - 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, - 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, - 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, - 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 0, 399, 256, 0, 450, 0, 0, 0, 620, 0, + 0, 0, 0, 0, 0, 0, 362, 0, 329, 197, + 224, 0, 0, 409, 458, 470, 0, 0, 0, 252, + 0, 468, 423, 597, 232, 283, 455, 429, 466, 437, + 286, 0, 0, 467, 369, 580, 447, 594, 621, 622, + 262, 403, 607, 517, 615, 639, 225, 259, 417, 502, + 600, 491, 394, 576, 577, 328, 490, 294, 201, 366, + 627, 223, 476, 368, 241, 230, 582, 604, 298, 288, + 453, 634, 212, 512, 592, 238, 480, 0, 0, 642, + 246, 501, 214, 589, 500, 390, 325, 326, 213, 0, + 454, 267, 292, 0, 0, 257, 412, 584, 585, 255, + 643, 227, 614, 219, 0, 613, 405, 579, 590, 391, + 380, 218, 588, 389, 379, 333, 352, 353, 279, 306, + 444, 372, 445, 305, 307, 401, 400, 402, 206, 601, + 0, 207, 0, 496, 603, 644, 449, 211, 233, 234, + 236, 0, 278, 282, 290, 293, 302, 303, 312, 364, + 416, 443, 439, 448, 0, 574, 595, 608, 619, 625, + 626, 628, 629, 630, 631, 632, 635, 633, 404, 310, + 492, 332, 370, 0, 0, 422, 469, 239, 599, 493, + 199, 0, 0, 0, 0, 253, 254, 0, 570, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 662, 640, 503, 509, 504, + 505, 506, 507, 508, 0, 510, 0, 0, 0, 0, + 0, 395, 0, 586, 587, 663, 381, 483, 596, 334, + 346, 349, 339, 358, 0, 359, 335, 336, 341, 343, + 344, 345, 350, 351, 355, 361, 248, 209, 387, 396, + 573, 311, 215, 216, 217, 519, 520, 521, 522, 611, + 612, 616, 204, 459, 460, 461, 462, 291, 606, 308, + 465, 464, 330, 331, 376, 446, 535, 537, 548, 552, + 554, 556, 562, 565, 536, 538, 549, 553, 555, 557, + 563, 566, 525, 527, 529, 531, 544, 543, 540, 568, + 569, 546, 551, 530, 542, 547, 560, 567, 564, 524, + 528, 532, 541, 559, 558, 539, 550, 561, 545, 533, + 526, 534, 0, 196, 220, 365, 0, 451, 287, 641, + 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, + 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, + 309, 317, 318, 321, 327, 377, 383, 384, 385, 386, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 485, 486, 487, 488, + 489, 497, 498, 511, 581, 583, 598, 617, 623, 477, + 300, 301, 441, 442, 313, 314, 637, 638, 299, 593, + 624, 591, 636, 618, 435, 375, 0, 0, 378, 280, + 304, 319, 0, 609, 499, 226, 463, 289, 250, 0, + 0, 210, 245, 229, 258, 273, 276, 323, 388, 397, + 426, 431, 295, 270, 243, 456, 240, 482, 514, 515, + 516, 518, 392, 265, 430, 393, 0, 373, 571, 572, + 315, 0, 523, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 363, 266, + 0, 0, 427, 0, 203, 0, 484, 251, 374, 371, + 578, 281, 272, 268, 249, 316, 382, 425, 513, 419, + 0, 367, 0, 0, 494, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 322, 247, 324, 202, 410, 495, 285, 0, 0, + 0, 0, 2833, 713, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 348, 357, 356, 337, 338, 340, 342, 347, 354, 360, + 0, 0, 602, 0, 0, 0, 264, 320, 271, 263, + 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2834, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 296, 0, 399, 256, + 0, 450, 0, 0, 0, 620, 0, 0, 0, 0, + 0, 0, 0, 362, 0, 329, 197, 224, 0, 0, + 409, 458, 470, 0, 0, 0, 252, 0, 468, 423, + 597, 232, 283, 455, 429, 466, 437, 286, 0, 0, + 467, 369, 580, 447, 594, 621, 622, 262, 403, 607, + 517, 615, 639, 225, 259, 417, 502, 600, 491, 394, + 576, 577, 328, 490, 294, 201, 366, 627, 223, 476, + 368, 241, 230, 582, 604, 298, 288, 453, 634, 212, + 512, 592, 238, 480, 0, 0, 642, 246, 501, 214, + 589, 500, 390, 325, 326, 213, 0, 454, 267, 292, + 0, 0, 257, 412, 584, 585, 255, 643, 227, 614, + 219, 0, 613, 405, 579, 590, 391, 380, 218, 588, + 389, 379, 333, 352, 353, 279, 306, 444, 372, 445, + 305, 307, 401, 400, 402, 206, 601, 0, 207, 0, + 496, 603, 644, 449, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 302, 303, 312, 364, 416, 443, 439, + 448, 0, 574, 595, 608, 619, 625, 626, 628, 629, + 630, 631, 632, 635, 633, 404, 310, 492, 332, 370, + 0, 0, 422, 469, 239, 599, 493, 199, 0, 0, + 0, 0, 253, 254, 0, 570, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 661, 662, 640, 503, 509, 504, 505, 506, 507, + 508, 0, 510, 0, 0, 0, 0, 0, 395, 0, + 586, 587, 663, 381, 483, 596, 334, 346, 349, 339, + 358, 0, 359, 335, 336, 341, 343, 344, 345, 350, + 351, 355, 361, 248, 209, 387, 396, 573, 311, 215, + 216, 217, 519, 520, 521, 522, 611, 612, 616, 204, + 459, 460, 461, 462, 291, 606, 308, 465, 464, 330, + 331, 376, 446, 535, 537, 548, 552, 554, 556, 562, + 565, 536, 538, 549, 553, 555, 557, 563, 566, 525, + 527, 529, 531, 544, 543, 540, 568, 569, 546, 551, + 530, 542, 547, 560, 567, 564, 524, 528, 532, 541, + 559, 558, 539, 550, 561, 545, 533, 526, 534, 0, + 196, 220, 365, 0, 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, - 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, - 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 0, 0, 0, 0, 709, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, - 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, - 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, - 0, 0, 0, 0, 0, 0, 0, 0, 1072, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, - 616, 0, 0, 0, 0, 0, 0, 0, 361, 1078, - 328, 197, 224, 1076, 0, 407, 456, 468, 0, 0, - 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, - 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, - 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, - 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, - 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, - 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, - 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, - 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, - 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, - 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, - 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, - 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, - 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, - 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, - 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 0, 0, 2319, 0, 0, 0, - 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, - 1918, 194, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, - 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, - 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 235, 242, 260, 275, 277, 284, 297, 309, 317, 318, + 321, 327, 377, 383, 384, 385, 386, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 485, 486, 487, 488, 489, 497, 498, + 511, 581, 583, 598, 617, 623, 477, 300, 301, 441, + 442, 313, 314, 637, 638, 299, 593, 624, 591, 636, + 618, 435, 375, 0, 0, 378, 280, 304, 319, 0, + 609, 499, 226, 463, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 323, 388, 397, 426, 431, 295, + 270, 243, 456, 240, 482, 514, 515, 516, 518, 392, + 265, 430, 393, 0, 373, 571, 572, 315, 0, 523, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 0, 0, 0, 0, + 269, 0, 0, 0, 0, 363, 266, 0, 0, 427, + 0, 203, 0, 484, 251, 374, 371, 578, 281, 272, + 268, 249, 316, 382, 425, 513, 419, 0, 367, 0, + 0, 494, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 322, 247, + 324, 202, 410, 495, 285, 0, 0, 0, 0, 0, + 713, 0, 0, 0, 0, 2818, 0, 0, 0, 0, + 237, 0, 0, 244, 2819, 0, 0, 348, 357, 356, + 337, 338, 340, 342, 347, 354, 360, 0, 0, 602, + 0, 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, @@ -4794,360 +5187,72 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, - 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, - 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, - 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, - 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, - 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, - 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, - 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, - 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, - 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, - 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, - 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, - 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, - 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, - 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, - 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, - 0, 1716, 0, 709, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, - 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 0, 616, 0, 0, 0, 3897, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 0, 0, 0, 2079, 709, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, - 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 2080, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, - 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 0, 0, 0, 2813, 709, 0, 0, + 0, 0, 0, 296, 0, 399, 256, 0, 450, 0, + 0, 0, 620, 0, 0, 0, 0, 0, 0, 0, + 362, 0, 329, 197, 224, 0, 0, 409, 458, 470, + 0, 0, 0, 252, 0, 468, 423, 597, 232, 283, + 455, 429, 466, 437, 286, 0, 0, 467, 369, 580, + 447, 594, 621, 622, 262, 403, 607, 517, 615, 639, + 225, 259, 417, 502, 600, 491, 394, 576, 577, 328, + 490, 294, 201, 366, 627, 223, 476, 368, 241, 230, + 582, 604, 298, 288, 453, 634, 212, 512, 592, 238, + 480, 0, 0, 642, 246, 501, 214, 589, 500, 390, + 325, 326, 213, 0, 454, 267, 292, 0, 0, 257, + 412, 584, 585, 255, 643, 227, 614, 219, 0, 613, + 405, 579, 590, 391, 380, 218, 588, 389, 379, 333, + 352, 353, 279, 306, 444, 372, 445, 305, 307, 401, + 400, 402, 206, 601, 0, 207, 0, 496, 603, 644, + 449, 211, 233, 234, 236, 0, 278, 282, 290, 293, + 302, 303, 312, 364, 416, 443, 439, 448, 0, 574, + 595, 608, 619, 625, 626, 628, 629, 630, 631, 632, + 635, 633, 404, 310, 492, 332, 370, 0, 0, 422, + 469, 239, 599, 493, 199, 0, 0, 0, 0, 253, + 254, 0, 570, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, + 640, 503, 509, 504, 505, 506, 507, 508, 0, 510, + 0, 0, 0, 0, 0, 395, 0, 586, 587, 663, + 381, 483, 596, 334, 346, 349, 339, 358, 0, 359, + 335, 336, 341, 343, 344, 345, 350, 351, 355, 361, + 248, 209, 387, 396, 573, 311, 215, 216, 217, 519, + 520, 521, 522, 611, 612, 616, 204, 459, 460, 461, + 462, 291, 606, 308, 465, 464, 330, 331, 376, 446, + 535, 537, 548, 552, 554, 556, 562, 565, 536, 538, + 549, 553, 555, 557, 563, 566, 525, 527, 529, 531, + 544, 543, 540, 568, 569, 546, 551, 530, 542, 547, + 560, 567, 564, 524, 528, 532, 541, 559, 558, 539, + 550, 561, 545, 533, 526, 534, 0, 196, 220, 365, + 0, 451, 287, 641, 610, 481, 605, 205, 222, 0, + 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 309, 317, 318, 321, 327, 377, + 383, 384, 385, 386, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 485, 486, 487, 488, 489, 497, 498, 511, 581, 583, + 598, 617, 623, 477, 300, 301, 441, 442, 313, 314, + 637, 638, 299, 593, 624, 591, 636, 618, 435, 375, + 0, 0, 378, 280, 304, 319, 0, 609, 499, 226, + 463, 289, 250, 0, 0, 210, 245, 229, 258, 273, + 276, 323, 388, 397, 426, 431, 295, 270, 243, 456, + 240, 482, 514, 515, 516, 518, 392, 265, 430, 393, + 0, 373, 571, 572, 315, 0, 523, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 0, 0, 0, 0, 0, 269, 1773, 0, + 0, 0, 363, 266, 0, 0, 427, 0, 203, 0, + 484, 251, 374, 371, 578, 281, 272, 268, 249, 316, + 382, 425, 513, 419, 0, 367, 0, 0, 494, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 322, 247, 324, 202, 410, + 495, 285, 0, 0, 0, 0, 1772, 713, 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, - 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, - 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 2814, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, - 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, - 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, - 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, - 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, - 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, - 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, - 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, - 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, - 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, - 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, - 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, - 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, - 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, - 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, - 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, - 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, - 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, - 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 0, 0, 0, 0, 709, - 0, 0, 0, 0, 2798, 0, 0, 0, 0, 237, - 0, 0, 244, 2799, 0, 0, 347, 356, 355, 336, - 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, - 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, - 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, - 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, - 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, - 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, - 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, - 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, - 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, - 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, - 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, - 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, - 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, - 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, - 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, - 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, - 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, - 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, - 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, - 0, 269, 1758, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, - 1757, 709, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, - 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, - 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, - 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, + 244, 0, 0, 0, 348, 357, 356, 337, 338, 340, + 342, 347, 354, 360, 0, 0, 602, 0, 0, 0, + 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, + 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -5155,141 +5260,69 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, - 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, - 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, - 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, - 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, - 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, - 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, - 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, - 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, - 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, - 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, - 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, - 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, - 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, - 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, - 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 296, 0, 399, 256, 0, 450, 0, 0, 0, 620, + 0, 0, 0, 0, 0, 0, 0, 362, 0, 329, + 197, 224, 0, 0, 409, 458, 470, 0, 0, 0, + 252, 0, 468, 423, 597, 232, 283, 455, 429, 466, + 437, 286, 0, 0, 467, 369, 580, 447, 594, 621, + 622, 262, 403, 607, 517, 615, 639, 225, 259, 417, + 502, 600, 491, 394, 576, 577, 328, 490, 294, 201, + 366, 627, 223, 476, 368, 241, 230, 582, 604, 298, + 288, 453, 634, 212, 512, 592, 238, 480, 0, 0, + 642, 246, 501, 214, 589, 500, 390, 325, 326, 213, + 0, 454, 267, 292, 0, 0, 257, 412, 584, 585, + 255, 643, 227, 614, 219, 0, 613, 405, 579, 590, + 391, 380, 218, 588, 389, 379, 333, 352, 353, 279, + 306, 444, 372, 445, 305, 307, 401, 400, 402, 206, + 601, 0, 207, 0, 496, 603, 644, 449, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 302, 303, 312, + 364, 416, 443, 439, 448, 0, 574, 595, 608, 619, + 625, 626, 628, 629, 630, 631, 632, 635, 633, 404, + 310, 492, 332, 370, 0, 0, 422, 469, 239, 599, + 493, 199, 0, 0, 0, 0, 253, 254, 0, 570, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 661, 662, 640, 503, 509, + 504, 505, 506, 507, 508, 0, 510, 0, 0, 0, + 0, 0, 395, 0, 586, 587, 663, 381, 483, 596, + 334, 346, 349, 339, 358, 0, 359, 335, 336, 341, + 343, 344, 345, 350, 351, 355, 361, 248, 209, 387, + 396, 573, 311, 215, 216, 217, 519, 520, 521, 522, + 611, 612, 616, 204, 459, 460, 461, 462, 291, 606, + 308, 465, 464, 330, 331, 376, 446, 535, 537, 548, + 552, 554, 556, 562, 565, 536, 538, 549, 553, 555, + 557, 563, 566, 525, 527, 529, 531, 544, 543, 540, + 568, 569, 546, 551, 530, 542, 547, 560, 567, 564, + 524, 528, 532, 541, 559, 558, 539, 550, 561, 545, + 533, 526, 534, 0, 196, 220, 365, 0, 451, 287, + 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, - 0, 0, 0, 711, 712, 713, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, - 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 0, 0, 0, 0, 709, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, - 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 297, 309, 317, 318, 321, 327, 377, 383, 384, 385, + 386, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 485, 486, 487, + 488, 489, 497, 498, 511, 581, 583, 598, 617, 623, + 477, 300, 301, 441, 442, 313, 314, 637, 638, 299, + 593, 624, 591, 636, 618, 435, 375, 0, 0, 378, + 280, 304, 319, 0, 609, 499, 226, 463, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 323, 388, + 397, 426, 431, 295, 270, 243, 456, 240, 482, 514, + 515, 516, 518, 392, 265, 430, 393, 0, 373, 571, + 572, 315, 0, 523, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 0, 0, 0, + 0, 0, 0, 0, 269, 0, 0, 0, 0, 363, + 266, 0, 0, 427, 0, 203, 0, 484, 251, 374, + 371, 578, 281, 272, 268, 249, 316, 382, 425, 513, + 419, 0, 367, 0, 0, 494, 398, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 322, 247, 324, 202, 410, 495, 285, 0, + 0, 0, 0, 0, 715, 716, 717, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, + 0, 348, 357, 356, 337, 338, 340, 342, 347, 354, + 360, 0, 0, 602, 0, 0, 0, 264, 320, 271, + 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -5299,141 +5332,142 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 4020, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 399, + 256, 0, 450, 0, 0, 0, 620, 0, 0, 0, + 0, 0, 0, 0, 362, 0, 329, 197, 224, 0, + 0, 409, 458, 470, 0, 0, 0, 252, 0, 468, + 423, 597, 232, 283, 455, 429, 466, 437, 286, 0, + 0, 467, 369, 580, 447, 594, 621, 622, 262, 403, + 607, 517, 615, 639, 225, 259, 417, 502, 600, 491, + 394, 576, 577, 328, 490, 294, 201, 366, 627, 223, + 476, 368, 241, 230, 582, 604, 298, 288, 453, 634, + 212, 512, 592, 238, 480, 0, 0, 642, 246, 501, + 214, 589, 500, 390, 325, 326, 213, 0, 454, 267, + 292, 0, 0, 257, 412, 584, 585, 255, 643, 227, + 614, 219, 0, 613, 405, 579, 590, 391, 380, 218, + 588, 389, 379, 333, 352, 353, 279, 306, 444, 372, + 445, 305, 307, 401, 400, 402, 206, 601, 0, 207, + 0, 496, 603, 644, 449, 211, 233, 234, 236, 0, + 278, 282, 290, 293, 302, 303, 312, 364, 416, 443, + 439, 448, 0, 574, 595, 608, 619, 625, 626, 628, + 629, 630, 631, 632, 635, 633, 404, 310, 492, 332, + 370, 0, 0, 422, 469, 239, 599, 493, 199, 0, + 0, 0, 0, 253, 254, 0, 570, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 662, 640, 503, 509, 504, 505, 506, + 507, 508, 0, 510, 0, 0, 0, 0, 0, 395, + 0, 586, 587, 663, 381, 483, 596, 334, 346, 349, + 339, 358, 0, 359, 335, 336, 341, 343, 344, 345, + 350, 351, 355, 361, 248, 209, 387, 396, 573, 311, + 215, 216, 217, 519, 520, 521, 522, 611, 612, 616, + 204, 459, 460, 461, 462, 291, 606, 308, 465, 464, + 330, 331, 376, 446, 535, 537, 548, 552, 554, 556, + 562, 565, 536, 538, 549, 553, 555, 557, 563, 566, + 525, 527, 529, 531, 544, 543, 540, 568, 569, 546, + 551, 530, 542, 547, 560, 567, 564, 524, 528, 532, + 541, 559, 558, 539, 550, 561, 545, 533, 526, 534, + 0, 196, 220, 365, 0, 451, 287, 641, 610, 481, + 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, + 231, 235, 242, 260, 275, 277, 284, 297, 309, 317, + 318, 321, 327, 377, 383, 384, 385, 386, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 485, 486, 487, 488, 489, 497, + 498, 511, 581, 583, 598, 617, 623, 477, 300, 301, + 441, 442, 313, 314, 637, 638, 299, 593, 624, 591, + 636, 618, 435, 375, 0, 0, 378, 280, 304, 319, + 0, 609, 499, 226, 463, 289, 250, 0, 0, 210, + 245, 229, 258, 273, 276, 323, 388, 397, 426, 431, + 295, 270, 243, 456, 240, 482, 514, 515, 516, 518, + 392, 265, 430, 393, 0, 373, 571, 572, 315, 0, + 523, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 363, 266, 0, 0, + 427, 0, 203, 0, 484, 251, 374, 371, 578, 281, + 272, 268, 249, 316, 382, 425, 513, 419, 0, 367, + 0, 0, 494, 398, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 322, + 247, 324, 202, 410, 495, 285, 0, 0, 0, 0, + 0, 713, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 348, 357, + 356, 337, 338, 340, 342, 347, 354, 360, 0, 0, + 602, 0, 0, 0, 264, 320, 271, 263, 575, 0, + 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 296, 0, 399, 256, 0, 450, + 0, 0, 0, 620, 0, 0, 0, 4020, 0, 0, + 0, 362, 0, 329, 197, 224, 0, 0, 409, 458, + 470, 0, 0, 0, 252, 0, 468, 423, 597, 232, + 283, 455, 429, 466, 437, 286, 0, 0, 467, 369, + 580, 447, 594, 621, 622, 262, 403, 607, 517, 615, + 639, 225, 259, 417, 502, 600, 491, 394, 576, 577, + 328, 490, 294, 201, 366, 627, 223, 476, 368, 241, + 230, 582, 604, 298, 288, 453, 634, 212, 512, 592, + 238, 480, 0, 0, 642, 246, 501, 214, 589, 500, + 390, 325, 326, 213, 0, 454, 267, 292, 0, 0, + 257, 412, 584, 585, 255, 643, 227, 614, 219, 0, + 613, 405, 579, 590, 391, 380, 218, 588, 389, 379, + 333, 352, 353, 279, 306, 444, 372, 445, 305, 307, + 401, 400, 402, 206, 601, 0, 207, 0, 496, 603, + 644, 449, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 302, 303, 312, 364, 416, 443, 439, 448, 0, + 574, 595, 608, 619, 625, 626, 628, 629, 630, 631, + 632, 635, 633, 404, 310, 492, 332, 370, 0, 0, + 422, 469, 239, 599, 493, 199, 0, 0, 0, 0, + 253, 254, 0, 570, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, + 662, 640, 503, 509, 504, 505, 506, 507, 508, 0, + 510, 0, 0, 0, 0, 0, 395, 0, 586, 587, + 663, 381, 483, 596, 334, 346, 349, 339, 358, 0, + 359, 335, 336, 341, 343, 344, 345, 350, 351, 355, + 361, 248, 209, 387, 396, 573, 311, 215, 216, 217, + 519, 520, 521, 522, 611, 612, 616, 204, 459, 460, + 461, 462, 291, 606, 308, 465, 464, 330, 331, 376, + 446, 535, 537, 548, 552, 554, 556, 562, 565, 536, + 538, 549, 553, 555, 557, 563, 566, 525, 527, 529, + 531, 544, 543, 540, 568, 569, 546, 551, 530, 542, + 547, 560, 567, 564, 524, 528, 532, 541, 559, 558, + 539, 550, 561, 545, 533, 526, 534, 0, 196, 220, + 365, 0, 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 0, 0, 0, 1918, 194, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, - 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, - 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, - 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, - 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, - 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, - 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, - 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, - 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, - 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, - 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, - 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, - 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, - 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, - 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, - 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, - 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, - 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, - 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, - 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, - 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 0, 0, 0, 0, 709, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, - 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 260, 275, 277, 284, 297, 309, 317, 318, 321, 327, + 377, 383, 384, 385, 386, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 485, 486, 487, 488, 489, 497, 498, 511, 581, + 583, 598, 617, 623, 477, 300, 301, 441, 442, 313, + 314, 637, 638, 299, 593, 624, 591, 636, 618, 435, + 375, 0, 0, 378, 280, 304, 319, 0, 609, 499, + 226, 463, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 323, 388, 397, 426, 431, 295, 270, 243, + 456, 240, 482, 514, 515, 516, 518, 392, 265, 430, + 393, 0, 373, 571, 572, 315, 0, 523, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 0, 0, 0, 0, 0, 0, 0, 269, 0, + 0, 0, 0, 363, 266, 0, 0, 427, 0, 203, + 0, 484, 251, 374, 371, 578, 281, 272, 268, 249, + 316, 382, 425, 513, 419, 0, 367, 0, 0, 494, + 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 322, 247, 324, 202, + 410, 495, 285, 0, 0, 0, 0, 1933, 194, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, + 0, 244, 0, 0, 0, 348, 357, 356, 337, 338, + 340, 342, 347, 354, 360, 0, 0, 602, 0, 0, + 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -5444,140 +5478,141 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, - 616, 0, 0, 0, 3897, 0, 0, 0, 361, 0, - 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, - 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, - 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, - 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, - 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, - 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, - 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, - 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, - 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, - 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, - 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, - 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, - 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, - 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, - 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, - 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, - 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 95, 0, 0, - 0, 709, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, - 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, - 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, - 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 399, 256, 0, 450, 0, 0, 0, + 620, 0, 0, 0, 0, 0, 0, 0, 362, 0, + 329, 197, 224, 0, 0, 409, 458, 470, 0, 0, + 0, 252, 0, 468, 423, 597, 232, 283, 455, 429, + 466, 437, 286, 0, 0, 467, 369, 580, 447, 594, + 621, 622, 262, 403, 607, 517, 615, 639, 225, 259, + 417, 502, 600, 491, 394, 576, 577, 328, 490, 294, + 201, 366, 627, 223, 476, 368, 241, 230, 582, 604, + 298, 288, 453, 634, 212, 512, 592, 238, 480, 0, + 0, 642, 246, 501, 214, 589, 500, 390, 325, 326, + 213, 0, 454, 267, 292, 0, 0, 257, 412, 584, + 585, 255, 643, 227, 614, 219, 0, 613, 405, 579, + 590, 391, 380, 218, 588, 389, 379, 333, 352, 353, + 279, 306, 444, 372, 445, 305, 307, 401, 400, 402, + 206, 601, 0, 207, 0, 496, 603, 644, 449, 211, + 233, 234, 236, 0, 278, 282, 290, 293, 302, 303, + 312, 364, 416, 443, 439, 448, 0, 574, 595, 608, + 619, 625, 626, 628, 629, 630, 631, 632, 635, 633, + 404, 310, 492, 332, 370, 0, 0, 422, 469, 239, + 599, 493, 199, 0, 0, 0, 0, 253, 254, 0, + 570, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 662, 640, 503, + 509, 504, 505, 506, 507, 508, 0, 510, 0, 0, + 0, 0, 0, 395, 0, 586, 587, 663, 381, 483, + 596, 334, 346, 349, 339, 358, 0, 359, 335, 336, + 341, 343, 344, 345, 350, 351, 355, 361, 248, 209, + 387, 396, 573, 311, 215, 216, 217, 519, 520, 521, + 522, 611, 612, 616, 204, 459, 460, 461, 462, 291, + 606, 308, 465, 464, 330, 331, 376, 446, 535, 537, + 548, 552, 554, 556, 562, 565, 536, 538, 549, 553, + 555, 557, 563, 566, 525, 527, 529, 531, 544, 543, + 540, 568, 569, 546, 551, 530, 542, 547, 560, 567, + 564, 524, 528, 532, 541, 559, 558, 539, 550, 561, + 545, 533, 526, 534, 0, 196, 220, 365, 0, 451, + 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, + 284, 297, 309, 317, 318, 321, 327, 377, 383, 384, + 385, 386, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 485, 486, + 487, 488, 489, 497, 498, 511, 581, 583, 598, 617, + 623, 477, 300, 301, 441, 442, 313, 314, 637, 638, + 299, 593, 624, 591, 636, 618, 435, 375, 0, 0, + 378, 280, 304, 319, 0, 609, 499, 226, 463, 289, + 250, 0, 0, 210, 245, 229, 258, 273, 276, 323, + 388, 397, 426, 431, 295, 270, 243, 456, 240, 482, + 514, 515, 516, 518, 392, 265, 430, 393, 0, 373, + 571, 572, 315, 0, 523, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 0, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 363, 266, 0, 0, 427, 0, 203, 0, 484, 251, + 374, 371, 578, 281, 272, 268, 249, 316, 382, 425, + 513, 419, 0, 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, - 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, - 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, - 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, - 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, - 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, - 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, - 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, - 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, - 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, - 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, - 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, - 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, - 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, - 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, - 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 322, 247, 324, 202, 410, 495, 285, + 0, 0, 0, 0, 0, 713, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 348, 357, 356, 337, 338, 340, 342, 347, + 354, 360, 0, 0, 602, 0, 0, 0, 264, 320, + 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, + 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 296, 0, + 399, 256, 0, 450, 0, 0, 0, 620, 0, 0, + 0, 3678, 0, 0, 0, 362, 0, 329, 197, 224, + 0, 0, 409, 458, 470, 0, 0, 0, 252, 0, + 468, 423, 597, 232, 283, 455, 429, 466, 437, 286, + 0, 0, 467, 369, 580, 447, 594, 621, 622, 262, + 403, 607, 517, 615, 639, 225, 259, 417, 502, 600, + 491, 394, 576, 577, 328, 490, 294, 201, 366, 627, + 223, 476, 368, 241, 230, 582, 604, 298, 288, 453, + 634, 212, 512, 592, 238, 480, 0, 0, 642, 246, + 501, 214, 589, 500, 390, 325, 326, 213, 0, 454, + 267, 292, 0, 0, 257, 412, 584, 585, 255, 643, + 227, 614, 219, 0, 613, 405, 579, 590, 391, 380, + 218, 588, 389, 379, 333, 352, 353, 279, 306, 444, + 372, 445, 305, 307, 401, 400, 402, 206, 601, 0, + 207, 0, 496, 603, 644, 449, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 302, 303, 312, 364, 416, + 443, 439, 448, 0, 574, 595, 608, 619, 625, 626, + 628, 629, 630, 631, 632, 635, 633, 404, 310, 492, + 332, 370, 0, 0, 422, 469, 239, 599, 493, 199, + 0, 0, 0, 0, 253, 254, 0, 570, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 662, 640, 503, 509, 504, 505, + 506, 507, 508, 0, 510, 0, 0, 0, 0, 0, + 395, 0, 586, 587, 663, 381, 483, 596, 334, 346, + 349, 339, 358, 0, 359, 335, 336, 341, 343, 344, + 345, 350, 351, 355, 361, 248, 209, 387, 396, 573, + 311, 215, 216, 217, 519, 520, 521, 522, 611, 612, + 616, 204, 459, 460, 461, 462, 291, 606, 308, 465, + 464, 330, 331, 376, 446, 535, 537, 548, 552, 554, + 556, 562, 565, 536, 538, 549, 553, 555, 557, 563, + 566, 525, 527, 529, 531, 544, 543, 540, 568, 569, + 546, 551, 530, 542, 547, 560, 567, 564, 524, 528, + 532, 541, 559, 558, 539, 550, 561, 545, 533, 526, + 534, 0, 196, 220, 365, 0, 451, 287, 641, 610, + 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 0, 0, 0, 2370, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, - 0, 0, 0, 194, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 309, + 317, 318, 321, 327, 377, 383, 384, 385, 386, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 485, 486, 487, 488, 489, + 497, 498, 511, 581, 583, 598, 617, 623, 477, 300, + 301, 441, 442, 313, 314, 637, 638, 299, 593, 624, + 591, 636, 618, 435, 375, 0, 0, 378, 280, 304, + 319, 0, 609, 499, 226, 463, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 323, 388, 397, 426, + 431, 295, 270, 243, 456, 240, 482, 514, 515, 516, + 518, 392, 265, 430, 393, 0, 373, 571, 572, 315, + 0, 523, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 0, 0, 0, + 0, 0, 269, 0, 0, 0, 0, 363, 266, 0, + 0, 427, 0, 203, 0, 484, 251, 374, 371, 578, + 281, 272, 268, 249, 316, 382, 425, 513, 419, 0, + 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 322, 247, 324, 202, 410, 495, 285, 0, 95, 0, + 0, 0, 713, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 244, 0, 0, 0, 348, + 357, 356, 337, 338, 340, 342, 347, 354, 360, 0, + 0, 602, 0, 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, @@ -5588,71 +5623,72 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 399, 256, 0, + 450, 0, 0, 0, 620, 0, 0, 0, 0, 0, + 0, 0, 362, 0, 329, 197, 224, 0, 0, 409, + 458, 470, 0, 0, 0, 252, 0, 468, 423, 597, + 232, 283, 455, 429, 466, 437, 286, 0, 0, 467, + 369, 580, 447, 594, 621, 622, 262, 403, 607, 517, + 615, 639, 225, 259, 417, 502, 600, 491, 394, 576, + 577, 328, 490, 294, 201, 366, 627, 223, 476, 368, + 241, 230, 582, 604, 298, 288, 453, 634, 212, 512, + 592, 238, 480, 0, 0, 642, 246, 501, 214, 589, + 500, 390, 325, 326, 213, 0, 454, 267, 292, 0, + 0, 257, 412, 584, 585, 255, 643, 227, 614, 219, + 0, 613, 405, 579, 590, 391, 380, 218, 588, 389, + 379, 333, 352, 353, 279, 306, 444, 372, 445, 305, + 307, 401, 400, 402, 206, 601, 0, 207, 0, 496, + 603, 644, 449, 211, 233, 234, 236, 0, 278, 282, + 290, 293, 302, 303, 312, 364, 416, 443, 439, 448, + 0, 574, 595, 608, 619, 625, 626, 628, 629, 630, + 631, 632, 635, 633, 404, 310, 492, 332, 370, 0, + 0, 422, 469, 239, 599, 493, 199, 0, 0, 0, + 0, 253, 254, 0, 570, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 661, 662, 640, 503, 509, 504, 505, 506, 507, 508, + 0, 510, 0, 0, 0, 0, 0, 395, 0, 586, + 587, 663, 381, 483, 596, 334, 346, 349, 339, 358, + 0, 359, 335, 336, 341, 343, 344, 345, 350, 351, + 355, 361, 248, 209, 387, 396, 573, 311, 215, 216, + 217, 519, 520, 521, 522, 611, 612, 616, 204, 459, + 460, 461, 462, 291, 606, 308, 465, 464, 330, 331, + 376, 446, 535, 537, 548, 552, 554, 556, 562, 565, + 536, 538, 549, 553, 555, 557, 563, 566, 525, 527, + 529, 531, 544, 543, 540, 568, 569, 546, 551, 530, + 542, 547, 560, 567, 564, 524, 528, 532, 541, 559, + 558, 539, 550, 561, 545, 533, 526, 534, 0, 196, + 220, 365, 0, 451, 287, 641, 610, 481, 605, 205, + 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 309, 317, 318, 321, + 327, 377, 383, 384, 385, 386, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 485, 486, 487, 488, 489, 497, 498, 511, + 581, 583, 598, 617, 623, 477, 300, 301, 441, 442, + 313, 314, 637, 638, 299, 593, 624, 591, 636, 618, + 435, 375, 0, 0, 378, 280, 304, 319, 0, 609, + 499, 226, 463, 289, 250, 0, 0, 210, 245, 229, + 258, 273, 276, 323, 388, 397, 426, 431, 295, 270, + 243, 456, 240, 482, 514, 515, 516, 518, 392, 265, + 430, 393, 0, 373, 571, 572, 315, 0, 523, 0, + 0, 0, 0, 2390, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 363, 266, 0, 0, 427, 0, + 203, 0, 484, 251, 374, 371, 578, 281, 272, 268, + 249, 316, 382, 425, 513, 419, 0, 367, 0, 0, + 494, 398, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 322, 247, 324, + 202, 410, 495, 285, 0, 0, 0, 0, 0, 194, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 348, 357, 356, 337, + 338, 340, 342, 347, 354, 360, 0, 0, 602, 0, + 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, + 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 0, 0, 0, 1739, 709, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, - 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -5660,69 +5696,69 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, - 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 296, 0, 399, 256, 0, 450, 0, 0, + 0, 620, 0, 0, 0, 0, 0, 0, 0, 362, + 0, 329, 197, 224, 0, 0, 409, 458, 470, 0, + 0, 0, 252, 0, 468, 423, 597, 232, 283, 455, + 429, 466, 437, 286, 0, 0, 467, 369, 580, 447, + 594, 621, 622, 262, 403, 607, 517, 615, 639, 225, + 259, 417, 502, 600, 491, 394, 576, 577, 328, 490, + 294, 201, 366, 627, 223, 476, 368, 241, 230, 582, + 604, 298, 288, 453, 634, 212, 512, 592, 238, 480, + 0, 0, 642, 246, 501, 214, 589, 500, 390, 325, + 326, 213, 0, 454, 267, 292, 0, 0, 257, 412, + 584, 585, 255, 643, 227, 614, 219, 0, 613, 405, + 579, 590, 391, 380, 218, 588, 389, 379, 333, 352, + 353, 279, 306, 444, 372, 445, 305, 307, 401, 400, + 402, 206, 601, 0, 207, 0, 496, 603, 644, 449, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 302, + 303, 312, 364, 416, 443, 439, 448, 0, 574, 595, + 608, 619, 625, 626, 628, 629, 630, 631, 632, 635, + 633, 404, 310, 492, 332, 370, 0, 0, 422, 469, + 239, 599, 493, 199, 0, 0, 0, 0, 253, 254, + 0, 570, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 661, 662, 640, + 503, 509, 504, 505, 506, 507, 508, 0, 510, 0, + 0, 0, 0, 0, 395, 0, 586, 587, 663, 381, + 483, 596, 334, 346, 349, 339, 358, 0, 359, 335, + 336, 341, 343, 344, 345, 350, 351, 355, 361, 248, + 209, 387, 396, 573, 311, 215, 216, 217, 519, 520, + 521, 522, 611, 612, 616, 204, 459, 460, 461, 462, + 291, 606, 308, 465, 464, 330, 331, 376, 446, 535, + 537, 548, 552, 554, 556, 562, 565, 536, 538, 549, + 553, 555, 557, 563, 566, 525, 527, 529, 531, 544, + 543, 540, 568, 569, 546, 551, 530, 542, 547, 560, + 567, 564, 524, 528, 532, 541, 559, 558, 539, 550, + 561, 545, 533, 526, 534, 0, 196, 220, 365, 0, + 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 0, 0, 0, 0, 194, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, - 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 309, 317, 318, 321, 327, 377, 383, + 384, 385, 386, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 485, + 486, 487, 488, 489, 497, 498, 511, 581, 583, 598, + 617, 623, 477, 300, 301, 441, 442, 313, 314, 637, + 638, 299, 593, 624, 591, 636, 618, 435, 375, 0, + 0, 378, 280, 304, 319, 0, 609, 499, 226, 463, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 323, 388, 397, 426, 431, 295, 270, 243, 456, 240, + 482, 514, 515, 516, 518, 392, 265, 430, 393, 0, + 373, 571, 572, 315, 0, 523, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 0, 0, 0, 0, 269, 0, 0, 0, + 0, 363, 266, 0, 0, 427, 0, 203, 0, 484, + 251, 374, 371, 578, 281, 272, 268, 249, 316, 382, + 425, 513, 419, 0, 367, 0, 0, 494, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 322, 247, 324, 202, 410, 495, + 285, 0, 0, 0, 0, 1754, 713, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 348, 357, 356, 337, 338, 340, 342, + 347, 354, 360, 0, 0, 602, 0, 0, 0, 264, + 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -5733,140 +5769,141 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, - 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, - 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, - 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, - 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, - 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, - 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, - 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, - 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, - 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, - 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, - 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, - 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, - 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, - 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, - 0, 196, 220, 364, 2031, 449, 287, 637, 606, 601, + 0, 399, 256, 0, 450, 0, 0, 0, 620, 0, + 0, 0, 0, 0, 0, 0, 362, 0, 329, 197, + 224, 0, 0, 409, 458, 470, 0, 0, 0, 252, + 0, 468, 423, 597, 232, 283, 455, 429, 466, 437, + 286, 0, 0, 467, 369, 580, 447, 594, 621, 622, + 262, 403, 607, 517, 615, 639, 225, 259, 417, 502, + 600, 491, 394, 576, 577, 328, 490, 294, 201, 366, + 627, 223, 476, 368, 241, 230, 582, 604, 298, 288, + 453, 634, 212, 512, 592, 238, 480, 0, 0, 642, + 246, 501, 214, 589, 500, 390, 325, 326, 213, 0, + 454, 267, 292, 0, 0, 257, 412, 584, 585, 255, + 643, 227, 614, 219, 0, 613, 405, 579, 590, 391, + 380, 218, 588, 389, 379, 333, 352, 353, 279, 306, + 444, 372, 445, 305, 307, 401, 400, 402, 206, 601, + 0, 207, 0, 496, 603, 644, 449, 211, 233, 234, + 236, 0, 278, 282, 290, 293, 302, 303, 312, 364, + 416, 443, 439, 448, 0, 574, 595, 608, 619, 625, + 626, 628, 629, 630, 631, 632, 635, 633, 404, 310, + 492, 332, 370, 0, 0, 422, 469, 239, 599, 493, + 199, 0, 0, 0, 0, 253, 254, 0, 570, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 662, 640, 503, 509, 504, + 505, 506, 507, 508, 0, 510, 0, 0, 0, 0, + 0, 395, 0, 586, 587, 663, 381, 483, 596, 334, + 346, 349, 339, 358, 0, 359, 335, 336, 341, 343, + 344, 345, 350, 351, 355, 361, 248, 209, 387, 396, + 573, 311, 215, 216, 217, 519, 520, 521, 522, 611, + 612, 616, 204, 459, 460, 461, 462, 291, 606, 308, + 465, 464, 330, 331, 376, 446, 535, 537, 548, 552, + 554, 556, 562, 565, 536, 538, 549, 553, 555, 557, + 563, 566, 525, 527, 529, 531, 544, 543, 540, 568, + 569, 546, 551, 530, 542, 547, 560, 567, 564, 524, + 528, 532, 541, 559, 558, 539, 550, 561, 545, 533, + 526, 534, 0, 196, 220, 365, 0, 451, 287, 641, + 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, + 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, + 309, 317, 318, 321, 327, 377, 383, 384, 385, 386, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 485, 486, 487, 488, + 489, 497, 498, 511, 581, 583, 598, 617, 623, 477, + 300, 301, 441, 442, 313, 314, 637, 638, 299, 593, + 624, 591, 636, 618, 435, 375, 0, 0, 378, 280, + 304, 319, 0, 609, 499, 226, 463, 289, 250, 0, + 0, 210, 245, 229, 258, 273, 276, 323, 388, 397, + 426, 431, 295, 270, 243, 456, 240, 482, 514, 515, + 516, 518, 392, 265, 430, 393, 0, 373, 571, 572, + 315, 0, 523, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 363, 266, + 0, 0, 427, 0, 203, 0, 484, 251, 374, 371, + 578, 281, 272, 268, 249, 316, 382, 425, 513, 419, + 0, 367, 0, 0, 494, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 322, 247, 324, 202, 410, 495, 285, 0, 0, + 0, 0, 0, 194, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 348, 357, 356, 337, 338, 340, 342, 347, 354, 360, + 0, 0, 602, 0, 0, 0, 264, 320, 271, 263, + 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 296, 0, 399, 256, + 0, 450, 0, 0, 0, 620, 0, 0, 0, 0, + 0, 0, 0, 362, 0, 329, 197, 224, 0, 0, + 409, 458, 470, 0, 0, 0, 252, 0, 468, 423, + 597, 232, 283, 455, 429, 466, 437, 286, 0, 0, + 467, 369, 580, 447, 594, 621, 622, 262, 403, 607, + 517, 615, 639, 225, 259, 417, 502, 600, 491, 394, + 576, 577, 328, 490, 294, 201, 366, 627, 223, 476, + 368, 241, 230, 582, 604, 298, 288, 453, 634, 212, + 512, 592, 238, 480, 0, 0, 642, 246, 501, 214, + 589, 500, 390, 325, 326, 213, 0, 454, 267, 292, + 0, 0, 257, 412, 584, 585, 255, 643, 227, 614, + 219, 0, 613, 405, 579, 590, 391, 380, 218, 588, + 389, 379, 333, 352, 353, 279, 306, 444, 372, 445, + 305, 307, 401, 400, 402, 206, 601, 0, 207, 0, + 496, 603, 644, 449, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 302, 303, 312, 364, 416, 443, 439, + 448, 0, 574, 595, 608, 619, 625, 626, 628, 629, + 630, 631, 632, 635, 633, 404, 310, 492, 332, 370, + 0, 0, 422, 469, 239, 599, 493, 199, 0, 0, + 0, 0, 253, 254, 0, 570, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 661, 662, 640, 503, 509, 504, 505, 506, 507, + 508, 0, 510, 0, 0, 0, 0, 0, 395, 0, + 586, 587, 663, 381, 483, 596, 334, 346, 349, 339, + 358, 0, 359, 335, 336, 341, 343, 344, 345, 350, + 351, 355, 361, 248, 209, 387, 396, 573, 311, 215, + 216, 217, 519, 520, 521, 522, 611, 612, 616, 204, + 459, 460, 461, 462, 291, 606, 308, 465, 464, 330, + 331, 376, 446, 535, 537, 548, 552, 554, 556, 562, + 565, 536, 538, 549, 553, 555, 557, 563, 566, 525, + 527, 529, 531, 544, 543, 540, 568, 569, 546, 551, + 530, 542, 547, 560, 567, 564, 524, 528, 532, 541, + 559, 558, 539, 550, 561, 545, 533, 526, 534, 0, + 196, 220, 365, 2047, 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, - 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, - 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 0, 0, 0, 2022, 709, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, - 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, - 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, - 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, - 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, - 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, - 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, - 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, - 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, - 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, - 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, - 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, - 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, - 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, - 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, - 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, - 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, - 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, - 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, - 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 1885, 0, 0, 0, 0, 0, - 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, - 0, 709, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, - 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, - 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 235, 242, 260, 275, 277, 284, 297, 309, 317, 318, + 321, 327, 377, 383, 384, 385, 386, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 485, 486, 487, 488, 489, 497, 498, + 511, 581, 583, 598, 617, 623, 477, 300, 301, 441, + 442, 313, 314, 637, 638, 299, 593, 624, 591, 636, + 618, 435, 375, 0, 0, 378, 280, 304, 319, 0, + 609, 499, 226, 463, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 323, 388, 397, 426, 431, 295, + 270, 243, 456, 240, 482, 514, 515, 516, 518, 392, + 265, 430, 393, 0, 373, 571, 572, 315, 0, 523, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 0, 0, 0, 0, + 269, 0, 0, 0, 0, 363, 266, 0, 0, 427, + 0, 203, 0, 484, 251, 374, 371, 578, 281, 272, + 268, 249, 316, 382, 425, 513, 419, 0, 367, 0, + 0, 494, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 322, 247, + 324, 202, 410, 495, 285, 0, 0, 0, 0, 2038, + 713, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 237, 0, 0, 244, 0, 0, 0, 348, 357, 356, + 337, 338, 340, 342, 347, 354, 360, 0, 0, 602, + 0, 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, @@ -5877,141 +5914,142 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, - 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, - 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, - 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, - 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, - 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, - 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, - 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, - 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, - 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, - 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, - 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, - 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, - 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, - 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, - 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 1883, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, - 0, 0, 0, 709, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, - 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 399, 256, 0, 450, 0, + 0, 0, 620, 0, 0, 0, 0, 0, 0, 0, + 362, 0, 329, 197, 224, 0, 0, 409, 458, 470, + 0, 0, 0, 252, 0, 468, 423, 597, 232, 283, + 455, 429, 466, 437, 286, 0, 0, 467, 369, 580, + 447, 594, 621, 622, 262, 403, 607, 517, 615, 639, + 225, 259, 417, 502, 600, 491, 394, 576, 577, 328, + 490, 294, 201, 366, 627, 223, 476, 368, 241, 230, + 582, 604, 298, 288, 453, 634, 212, 512, 592, 238, + 480, 0, 0, 642, 246, 501, 214, 589, 500, 390, + 325, 326, 213, 0, 454, 267, 292, 0, 0, 257, + 412, 584, 585, 255, 643, 227, 614, 219, 0, 613, + 405, 579, 590, 391, 380, 218, 588, 389, 379, 333, + 352, 353, 279, 306, 444, 372, 445, 305, 307, 401, + 400, 402, 206, 601, 0, 207, 0, 496, 603, 644, + 449, 211, 233, 234, 236, 0, 278, 282, 290, 293, + 302, 303, 312, 364, 416, 443, 439, 448, 0, 574, + 595, 608, 619, 625, 626, 628, 629, 630, 631, 632, + 635, 633, 404, 310, 492, 332, 370, 0, 0, 422, + 469, 239, 599, 493, 199, 0, 0, 0, 0, 253, + 254, 0, 570, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, + 640, 503, 509, 504, 505, 506, 507, 508, 0, 510, + 0, 0, 0, 0, 0, 395, 0, 586, 587, 663, + 381, 483, 596, 334, 346, 349, 339, 358, 0, 359, + 335, 336, 341, 343, 344, 345, 350, 351, 355, 361, + 248, 209, 387, 396, 573, 311, 215, 216, 217, 519, + 520, 521, 522, 611, 612, 616, 204, 459, 460, 461, + 462, 291, 606, 308, 465, 464, 330, 331, 376, 446, + 535, 537, 548, 552, 554, 556, 562, 565, 536, 538, + 549, 553, 555, 557, 563, 566, 525, 527, 529, 531, + 544, 543, 540, 568, 569, 546, 551, 530, 542, 547, + 560, 567, 564, 524, 528, 532, 541, 559, 558, 539, + 550, 561, 545, 533, 526, 534, 0, 196, 220, 365, + 0, 451, 287, 641, 610, 481, 605, 205, 222, 0, + 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 309, 317, 318, 321, 327, 377, + 383, 384, 385, 386, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 485, 486, 487, 488, 489, 497, 498, 511, 581, 583, + 598, 617, 623, 477, 300, 301, 441, 442, 313, 314, + 637, 638, 299, 593, 624, 591, 636, 618, 435, 375, + 0, 0, 378, 280, 304, 319, 0, 609, 499, 226, + 463, 289, 250, 0, 0, 210, 245, 229, 258, 273, + 276, 323, 388, 397, 426, 431, 295, 270, 243, 456, + 240, 482, 514, 515, 516, 518, 392, 265, 430, 393, + 0, 373, 571, 572, 315, 0, 523, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 1900, 0, 0, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 363, 266, 0, 0, 427, 0, 203, 0, + 484, 251, 374, 371, 578, 281, 272, 268, 249, 316, + 382, 425, 513, 419, 0, 367, 0, 0, 494, 398, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 322, 247, 324, 202, 410, + 495, 285, 0, 0, 0, 0, 0, 713, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 348, 357, 356, 337, 338, 340, + 342, 347, 354, 360, 0, 0, 602, 0, 0, 0, + 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, + 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 1881, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 296, 0, 399, 256, 0, 450, 0, 0, 0, 620, + 0, 0, 0, 0, 0, 0, 0, 362, 0, 329, + 197, 224, 0, 0, 409, 458, 470, 0, 0, 0, + 252, 0, 468, 423, 597, 232, 283, 455, 429, 466, + 437, 286, 0, 0, 467, 369, 580, 447, 594, 621, + 622, 262, 403, 607, 517, 615, 639, 225, 259, 417, + 502, 600, 491, 394, 576, 577, 328, 490, 294, 201, + 366, 627, 223, 476, 368, 241, 230, 582, 604, 298, + 288, 453, 634, 212, 512, 592, 238, 480, 0, 0, + 642, 246, 501, 214, 589, 500, 390, 325, 326, 213, + 0, 454, 267, 292, 0, 0, 257, 412, 584, 585, + 255, 643, 227, 614, 219, 0, 613, 405, 579, 590, + 391, 380, 218, 588, 389, 379, 333, 352, 353, 279, + 306, 444, 372, 445, 305, 307, 401, 400, 402, 206, + 601, 0, 207, 0, 496, 603, 644, 449, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 302, 303, 312, + 364, 416, 443, 439, 448, 0, 574, 595, 608, 619, + 625, 626, 628, 629, 630, 631, 632, 635, 633, 404, + 310, 492, 332, 370, 0, 0, 422, 469, 239, 599, + 493, 199, 0, 0, 0, 0, 253, 254, 0, 570, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 661, 662, 640, 503, 509, + 504, 505, 506, 507, 508, 0, 510, 0, 0, 0, + 0, 0, 395, 0, 586, 587, 663, 381, 483, 596, + 334, 346, 349, 339, 358, 0, 359, 335, 336, 341, + 343, 344, 345, 350, 351, 355, 361, 248, 209, 387, + 396, 573, 311, 215, 216, 217, 519, 520, 521, 522, + 611, 612, 616, 204, 459, 460, 461, 462, 291, 606, + 308, 465, 464, 330, 331, 376, 446, 535, 537, 548, + 552, 554, 556, 562, 565, 536, 538, 549, 553, 555, + 557, 563, 566, 525, 527, 529, 531, 544, 543, 540, + 568, 569, 546, 551, 530, 542, 547, 560, 567, 564, + 524, 528, 532, 541, 559, 558, 539, 550, 561, 545, + 533, 526, 534, 0, 196, 220, 365, 0, 451, 287, + 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 0, 0, 0, 0, 709, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, - 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 309, 317, 318, 321, 327, 377, 383, 384, 385, + 386, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 485, 486, 487, + 488, 489, 497, 498, 511, 581, 583, 598, 617, 623, + 477, 300, 301, 441, 442, 313, 314, 637, 638, 299, + 593, 624, 591, 636, 618, 435, 375, 0, 0, 378, + 280, 304, 319, 0, 609, 499, 226, 463, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 323, 388, + 397, 426, 431, 295, 270, 243, 456, 240, 482, 514, + 515, 516, 518, 392, 265, 430, 393, 0, 373, 571, + 572, 315, 0, 523, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 1898, 0, 0, + 0, 0, 0, 0, 269, 0, 0, 0, 0, 363, + 266, 0, 0, 427, 0, 203, 0, 484, 251, 374, + 371, 578, 281, 272, 268, 249, 316, 382, 425, 513, + 419, 0, 367, 0, 0, 494, 398, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 322, 247, 324, 202, 410, 495, 285, 0, + 0, 0, 0, 0, 713, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, + 0, 348, 357, 356, 337, 338, 340, 342, 347, 354, + 360, 0, 0, 602, 0, 0, 0, 264, 320, 271, + 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -6021,141 +6059,142 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 0, 0, 0, 0, 0, 0, 296, 0, 399, + 256, 0, 450, 0, 0, 0, 620, 0, 0, 0, + 0, 0, 0, 0, 362, 0, 329, 197, 224, 0, + 0, 409, 458, 470, 0, 0, 0, 252, 0, 468, + 423, 597, 232, 283, 455, 429, 466, 437, 286, 0, + 0, 467, 369, 580, 447, 594, 621, 622, 262, 403, + 607, 517, 615, 639, 225, 259, 417, 502, 600, 491, + 394, 576, 577, 328, 490, 294, 201, 366, 627, 223, + 476, 368, 241, 230, 582, 604, 298, 288, 453, 634, + 212, 512, 592, 238, 480, 0, 0, 642, 246, 501, + 214, 589, 500, 390, 325, 326, 213, 0, 454, 267, + 292, 0, 0, 257, 412, 584, 585, 255, 643, 227, + 614, 219, 0, 613, 405, 579, 590, 391, 380, 218, + 588, 389, 379, 333, 352, 353, 279, 306, 444, 372, + 445, 305, 307, 401, 400, 402, 206, 601, 0, 207, + 0, 496, 603, 644, 449, 211, 233, 234, 236, 0, + 278, 282, 290, 293, 302, 303, 312, 364, 416, 443, + 439, 448, 0, 574, 595, 608, 619, 625, 626, 628, + 629, 630, 631, 632, 635, 633, 404, 310, 492, 332, + 370, 0, 0, 422, 469, 239, 599, 493, 199, 0, + 0, 0, 0, 253, 254, 0, 570, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 662, 640, 503, 509, 504, 505, 506, + 507, 508, 0, 510, 0, 0, 0, 0, 0, 395, + 0, 586, 587, 663, 381, 483, 596, 334, 346, 349, + 339, 358, 0, 359, 335, 336, 341, 343, 344, 345, + 350, 351, 355, 361, 248, 209, 387, 396, 573, 311, + 215, 216, 217, 519, 520, 521, 522, 611, 612, 616, + 204, 459, 460, 461, 462, 291, 606, 308, 465, 464, + 330, 331, 376, 446, 535, 537, 548, 552, 554, 556, + 562, 565, 536, 538, 549, 553, 555, 557, 563, 566, + 525, 527, 529, 531, 544, 543, 540, 568, 569, 546, + 551, 530, 542, 547, 560, 567, 564, 524, 528, 532, + 541, 559, 558, 539, 550, 561, 545, 533, 526, 534, + 0, 196, 220, 365, 0, 451, 287, 641, 610, 481, + 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, + 231, 235, 242, 260, 275, 277, 284, 297, 309, 317, + 318, 321, 327, 377, 383, 384, 385, 386, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 485, 486, 487, 488, 489, 497, + 498, 511, 581, 583, 598, 617, 623, 477, 300, 301, + 441, 442, 313, 314, 637, 638, 299, 593, 624, 591, + 636, 618, 435, 375, 0, 0, 378, 280, 304, 319, + 0, 609, 499, 226, 463, 289, 250, 0, 0, 210, + 245, 229, 258, 273, 276, 323, 388, 397, 426, 431, + 295, 270, 243, 456, 240, 482, 514, 515, 516, 518, + 392, 265, 430, 393, 0, 373, 571, 572, 315, 0, + 523, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 1896, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 363, 266, 0, 0, + 427, 0, 203, 0, 484, 251, 374, 371, 578, 281, + 272, 268, 249, 316, 382, 425, 513, 419, 0, 367, + 0, 0, 494, 398, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 322, + 247, 324, 202, 410, 495, 285, 0, 0, 0, 0, + 0, 713, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 348, 357, + 356, 337, 338, 340, 342, 347, 354, 360, 0, 0, + 602, 0, 0, 0, 264, 320, 271, 263, 575, 0, + 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 296, 0, 399, 256, 0, 450, + 0, 0, 0, 620, 0, 0, 0, 0, 0, 0, + 0, 362, 0, 329, 197, 224, 0, 0, 409, 458, + 470, 0, 0, 0, 252, 0, 468, 423, 597, 232, + 283, 455, 429, 466, 437, 286, 0, 0, 467, 369, + 580, 447, 594, 621, 622, 262, 403, 607, 517, 615, + 639, 225, 259, 417, 502, 600, 491, 394, 576, 577, + 328, 490, 294, 201, 366, 627, 223, 476, 368, 241, + 230, 582, 604, 298, 288, 453, 634, 212, 512, 592, + 238, 480, 0, 0, 642, 246, 501, 214, 589, 500, + 390, 325, 326, 213, 0, 454, 267, 292, 0, 0, + 257, 412, 584, 585, 255, 643, 227, 614, 219, 0, + 613, 405, 579, 590, 391, 380, 218, 588, 389, 379, + 333, 352, 353, 279, 306, 444, 372, 445, 305, 307, + 401, 400, 402, 206, 601, 0, 207, 0, 496, 603, + 644, 449, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 302, 303, 312, 364, 416, 443, 439, 448, 0, + 574, 595, 608, 619, 625, 626, 628, 629, 630, 631, + 632, 635, 633, 404, 310, 492, 332, 370, 0, 0, + 422, 469, 239, 599, 493, 199, 0, 0, 0, 0, + 253, 254, 0, 570, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, + 662, 640, 503, 509, 504, 505, 506, 507, 508, 0, + 510, 0, 0, 0, 0, 0, 395, 0, 586, 587, + 663, 381, 483, 596, 334, 346, 349, 339, 358, 0, + 359, 335, 336, 341, 343, 344, 345, 350, 351, 355, + 361, 248, 209, 387, 396, 573, 311, 215, 216, 217, + 519, 520, 521, 522, 611, 612, 616, 204, 459, 460, + 461, 462, 291, 606, 308, 465, 464, 330, 331, 376, + 446, 535, 537, 548, 552, 554, 556, 562, 565, 536, + 538, 549, 553, 555, 557, 563, 566, 525, 527, 529, + 531, 544, 543, 540, 568, 569, 546, 551, 530, 542, + 547, 560, 567, 564, 524, 528, 532, 541, 559, 558, + 539, 550, 561, 545, 533, 526, 534, 0, 196, 220, + 365, 0, 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 1879, 0, 0, 0, 0, 0, 0, 269, 0, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 0, 0, 0, 0, 709, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, - 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, - 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, - 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, - 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, - 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, - 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, - 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, - 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, - 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, - 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, - 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, - 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, - 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, - 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, - 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, - 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, - 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, - 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, - 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 1877, 0, 0, 0, 0, 0, 0, 269, - 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 0, 0, 0, 0, 709, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, - 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, + 260, 275, 277, 284, 297, 309, 317, 318, 321, 327, + 377, 383, 384, 385, 386, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 485, 486, 487, 488, 489, 497, 498, 511, 581, + 583, 598, 617, 623, 477, 300, 301, 441, 442, 313, + 314, 637, 638, 299, 593, 624, 591, 636, 618, 435, + 375, 0, 0, 378, 280, 304, 319, 0, 609, 499, + 226, 463, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 323, 388, 397, 426, 431, 295, 270, 243, + 456, 240, 482, 514, 515, 516, 518, 392, 265, 430, + 393, 0, 373, 571, 572, 315, 0, 523, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 1894, 0, 0, 0, 0, 0, 0, 269, 0, + 0, 0, 0, 363, 266, 0, 0, 427, 0, 203, + 0, 484, 251, 374, 371, 578, 281, 272, 268, 249, + 316, 382, 425, 513, 419, 0, 367, 0, 0, 494, + 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 322, 247, 324, 202, + 410, 495, 285, 0, 0, 0, 0, 0, 713, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, + 0, 244, 0, 0, 0, 348, 357, 356, 337, 338, + 340, 342, 347, 354, 360, 0, 0, 602, 0, 0, + 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -6166,140 +6205,141 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, - 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, - 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, - 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, - 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, - 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, - 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, - 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, - 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, - 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, - 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, - 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, - 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, - 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, - 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, - 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, - 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, - 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 1873, 0, 0, 0, 0, 0, - 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, - 0, 709, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, - 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, - 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, - 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 399, 256, 0, 450, 0, 0, 0, + 620, 0, 0, 0, 0, 0, 0, 0, 362, 0, + 329, 197, 224, 0, 0, 409, 458, 470, 0, 0, + 0, 252, 0, 468, 423, 597, 232, 283, 455, 429, + 466, 437, 286, 0, 0, 467, 369, 580, 447, 594, + 621, 622, 262, 403, 607, 517, 615, 639, 225, 259, + 417, 502, 600, 491, 394, 576, 577, 328, 490, 294, + 201, 366, 627, 223, 476, 368, 241, 230, 582, 604, + 298, 288, 453, 634, 212, 512, 592, 238, 480, 0, + 0, 642, 246, 501, 214, 589, 500, 390, 325, 326, + 213, 0, 454, 267, 292, 0, 0, 257, 412, 584, + 585, 255, 643, 227, 614, 219, 0, 613, 405, 579, + 590, 391, 380, 218, 588, 389, 379, 333, 352, 353, + 279, 306, 444, 372, 445, 305, 307, 401, 400, 402, + 206, 601, 0, 207, 0, 496, 603, 644, 449, 211, + 233, 234, 236, 0, 278, 282, 290, 293, 302, 303, + 312, 364, 416, 443, 439, 448, 0, 574, 595, 608, + 619, 625, 626, 628, 629, 630, 631, 632, 635, 633, + 404, 310, 492, 332, 370, 0, 0, 422, 469, 239, + 599, 493, 199, 0, 0, 0, 0, 253, 254, 0, + 570, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 662, 640, 503, + 509, 504, 505, 506, 507, 508, 0, 510, 0, 0, + 0, 0, 0, 395, 0, 586, 587, 663, 381, 483, + 596, 334, 346, 349, 339, 358, 0, 359, 335, 336, + 341, 343, 344, 345, 350, 351, 355, 361, 248, 209, + 387, 396, 573, 311, 215, 216, 217, 519, 520, 521, + 522, 611, 612, 616, 204, 459, 460, 461, 462, 291, + 606, 308, 465, 464, 330, 331, 376, 446, 535, 537, + 548, 552, 554, 556, 562, 565, 536, 538, 549, 553, + 555, 557, 563, 566, 525, 527, 529, 531, 544, 543, + 540, 568, 569, 546, 551, 530, 542, 547, 560, 567, + 564, 524, 528, 532, 541, 559, 558, 539, 550, 561, + 545, 533, 526, 534, 0, 196, 220, 365, 0, 451, + 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, + 284, 297, 309, 317, 318, 321, 327, 377, 383, 384, + 385, 386, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 485, 486, + 487, 488, 489, 497, 498, 511, 581, 583, 598, 617, + 623, 477, 300, 301, 441, 442, 313, 314, 637, 638, + 299, 593, 624, 591, 636, 618, 435, 375, 0, 0, + 378, 280, 304, 319, 0, 609, 499, 226, 463, 289, + 250, 0, 0, 210, 245, 229, 258, 273, 276, 323, + 388, 397, 426, 431, 295, 270, 243, 456, 240, 482, + 514, 515, 516, 518, 392, 265, 430, 393, 0, 373, + 571, 572, 315, 0, 523, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 1892, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 363, 266, 0, 0, 427, 0, 203, 0, 484, 251, + 374, 371, 578, 281, 272, 268, 249, 316, 382, 425, + 513, 419, 0, 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, - 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, - 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, - 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, - 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, - 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, - 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, - 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, - 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, - 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, - 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, - 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, - 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, - 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, - 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, - 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, + 0, 0, 0, 322, 247, 324, 202, 410, 495, 285, + 0, 0, 0, 0, 0, 713, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 348, 357, 356, 337, 338, 340, 342, 347, + 354, 360, 0, 0, 602, 0, 0, 0, 264, 320, + 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, + 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 296, 0, + 399, 256, 0, 450, 0, 0, 0, 620, 0, 0, + 0, 0, 0, 0, 0, 362, 0, 329, 197, 224, + 0, 0, 409, 458, 470, 0, 0, 0, 252, 0, + 468, 423, 597, 232, 283, 455, 429, 466, 437, 286, + 0, 0, 467, 369, 580, 447, 594, 621, 622, 262, + 403, 607, 517, 615, 639, 225, 259, 417, 502, 600, + 491, 394, 576, 577, 328, 490, 294, 201, 366, 627, + 223, 476, 368, 241, 230, 582, 604, 298, 288, 453, + 634, 212, 512, 592, 238, 480, 0, 0, 642, 246, + 501, 214, 589, 500, 390, 325, 326, 213, 0, 454, + 267, 292, 0, 0, 257, 412, 584, 585, 255, 643, + 227, 614, 219, 0, 613, 405, 579, 590, 391, 380, + 218, 588, 389, 379, 333, 352, 353, 279, 306, 444, + 372, 445, 305, 307, 401, 400, 402, 206, 601, 0, + 207, 0, 496, 603, 644, 449, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 302, 303, 312, 364, 416, + 443, 439, 448, 0, 574, 595, 608, 619, 625, 626, + 628, 629, 630, 631, 632, 635, 633, 404, 310, 492, + 332, 370, 0, 0, 422, 469, 239, 599, 493, 199, + 0, 0, 0, 0, 253, 254, 0, 570, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 662, 640, 503, 509, 504, 505, + 506, 507, 508, 0, 510, 0, 0, 0, 0, 0, + 395, 0, 586, 587, 663, 381, 483, 596, 334, 346, + 349, 339, 358, 0, 359, 335, 336, 341, 343, 344, + 345, 350, 351, 355, 361, 248, 209, 387, 396, 573, + 311, 215, 216, 217, 519, 520, 521, 522, 611, 612, + 616, 204, 459, 460, 461, 462, 291, 606, 308, 465, + 464, 330, 331, 376, 446, 535, 537, 548, 552, 554, + 556, 562, 565, 536, 538, 549, 553, 555, 557, 563, + 566, 525, 527, 529, 531, 544, 543, 540, 568, 569, + 546, 551, 530, 542, 547, 560, 567, 564, 524, 528, + 532, 541, 559, 558, 539, 550, 561, 545, 533, 526, + 534, 0, 196, 220, 365, 0, 451, 287, 641, 610, + 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 1871, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, - 0, 0, 0, 709, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, + 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 309, + 317, 318, 321, 327, 377, 383, 384, 385, 386, 406, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 485, 486, 487, 488, 489, + 497, 498, 511, 581, 583, 598, 617, 623, 477, 300, + 301, 441, 442, 313, 314, 637, 638, 299, 593, 624, + 591, 636, 618, 435, 375, 0, 0, 378, 280, 304, + 319, 0, 609, 499, 226, 463, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 323, 388, 397, 426, + 431, 295, 270, 243, 456, 240, 482, 514, 515, 516, + 518, 392, 265, 430, 393, 0, 373, 571, 572, 315, + 0, 523, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 1888, 0, 0, 0, 0, + 0, 0, 269, 0, 0, 0, 0, 363, 266, 0, + 0, 427, 0, 203, 0, 484, 251, 374, 371, 578, + 281, 272, 268, 249, 316, 382, 425, 513, 419, 0, + 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 322, 247, 324, 202, 410, 495, 285, 0, 0, 0, + 0, 0, 713, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 244, 0, 0, 0, 348, + 357, 356, 337, 338, 340, 342, 347, 354, 360, 0, + 0, 602, 0, 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, @@ -6310,71 +6350,72 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 1869, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 0, 0, 0, 0, 0, 296, 0, 399, 256, 0, + 450, 0, 0, 0, 620, 0, 0, 0, 0, 0, + 0, 0, 362, 0, 329, 197, 224, 0, 0, 409, + 458, 470, 0, 0, 0, 252, 0, 468, 423, 597, + 232, 283, 455, 429, 466, 437, 286, 0, 0, 467, + 369, 580, 447, 594, 621, 622, 262, 403, 607, 517, + 615, 639, 225, 259, 417, 502, 600, 491, 394, 576, + 577, 328, 490, 294, 201, 366, 627, 223, 476, 368, + 241, 230, 582, 604, 298, 288, 453, 634, 212, 512, + 592, 238, 480, 0, 0, 642, 246, 501, 214, 589, + 500, 390, 325, 326, 213, 0, 454, 267, 292, 0, + 0, 257, 412, 584, 585, 255, 643, 227, 614, 219, + 0, 613, 405, 579, 590, 391, 380, 218, 588, 389, + 379, 333, 352, 353, 279, 306, 444, 372, 445, 305, + 307, 401, 400, 402, 206, 601, 0, 207, 0, 496, + 603, 644, 449, 211, 233, 234, 236, 0, 278, 282, + 290, 293, 302, 303, 312, 364, 416, 443, 439, 448, + 0, 574, 595, 608, 619, 625, 626, 628, 629, 630, + 631, 632, 635, 633, 404, 310, 492, 332, 370, 0, + 0, 422, 469, 239, 599, 493, 199, 0, 0, 0, + 0, 253, 254, 0, 570, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 661, 662, 640, 503, 509, 504, 505, 506, 507, 508, + 0, 510, 0, 0, 0, 0, 0, 395, 0, 586, + 587, 663, 381, 483, 596, 334, 346, 349, 339, 358, + 0, 359, 335, 336, 341, 343, 344, 345, 350, 351, + 355, 361, 248, 209, 387, 396, 573, 311, 215, 216, + 217, 519, 520, 521, 522, 611, 612, 616, 204, 459, + 460, 461, 462, 291, 606, 308, 465, 464, 330, 331, + 376, 446, 535, 537, 548, 552, 554, 556, 562, 565, + 536, 538, 549, 553, 555, 557, 563, 566, 525, 527, + 529, 531, 544, 543, 540, 568, 569, 546, 551, 530, + 542, 547, 560, 567, 564, 524, 528, 532, 541, 559, + 558, 539, 550, 561, 545, 533, 526, 534, 0, 196, + 220, 365, 0, 451, 287, 641, 610, 481, 605, 205, + 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 309, 317, 318, 321, + 327, 377, 383, 384, 385, 386, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 485, 486, 487, 488, 489, 497, 498, 511, + 581, 583, 598, 617, 623, 477, 300, 301, 441, 442, + 313, 314, 637, 638, 299, 593, 624, 591, 636, 618, + 435, 375, 0, 0, 378, 280, 304, 319, 0, 609, + 499, 226, 463, 289, 250, 0, 0, 210, 245, 229, + 258, 273, 276, 323, 388, 397, 426, 431, 295, 270, + 243, 456, 240, 482, 514, 515, 516, 518, 392, 265, + 430, 393, 0, 373, 571, 572, 315, 0, 523, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 1886, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 363, 266, 0, 0, 427, 0, + 203, 0, 484, 251, 374, 371, 578, 281, 272, 268, + 249, 316, 382, 425, 513, 419, 0, 367, 0, 0, + 494, 398, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 322, 247, 324, + 202, 410, 495, 285, 0, 0, 0, 0, 0, 713, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 348, 357, 356, 337, + 338, 340, 342, 347, 354, 360, 0, 0, 602, 0, + 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, + 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 0, 0, 0, 0, 709, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, - 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -6382,69 +6423,69 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, - 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 296, 0, 399, 256, 0, 450, 0, 0, + 0, 620, 0, 0, 0, 0, 0, 0, 0, 362, + 0, 329, 197, 224, 0, 0, 409, 458, 470, 0, + 0, 0, 252, 0, 468, 423, 597, 232, 283, 455, + 429, 466, 437, 286, 0, 0, 467, 369, 580, 447, + 594, 621, 622, 262, 403, 607, 517, 615, 639, 225, + 259, 417, 502, 600, 491, 394, 576, 577, 328, 490, + 294, 201, 366, 627, 223, 476, 368, 241, 230, 582, + 604, 298, 288, 453, 634, 212, 512, 592, 238, 480, + 0, 0, 642, 246, 501, 214, 589, 500, 390, 325, + 326, 213, 0, 454, 267, 292, 0, 0, 257, 412, + 584, 585, 255, 643, 227, 614, 219, 0, 613, 405, + 579, 590, 391, 380, 218, 588, 389, 379, 333, 352, + 353, 279, 306, 444, 372, 445, 305, 307, 401, 400, + 402, 206, 601, 0, 207, 0, 496, 603, 644, 449, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 302, + 303, 312, 364, 416, 443, 439, 448, 0, 574, 595, + 608, 619, 625, 626, 628, 629, 630, 631, 632, 635, + 633, 404, 310, 492, 332, 370, 0, 0, 422, 469, + 239, 599, 493, 199, 0, 0, 0, 0, 253, 254, + 0, 570, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 661, 662, 640, + 503, 509, 504, 505, 506, 507, 508, 0, 510, 0, + 0, 0, 0, 0, 395, 0, 586, 587, 663, 381, + 483, 596, 334, 346, 349, 339, 358, 0, 359, 335, + 336, 341, 343, 344, 345, 350, 351, 355, 361, 248, + 209, 387, 396, 573, 311, 215, 216, 217, 519, 520, + 521, 522, 611, 612, 616, 204, 459, 460, 461, 462, + 291, 606, 308, 465, 464, 330, 331, 376, 446, 535, + 537, 548, 552, 554, 556, 562, 565, 536, 538, 549, + 553, 555, 557, 563, 566, 525, 527, 529, 531, 544, + 543, 540, 568, 569, 546, 551, 530, 542, 547, 560, + 567, 564, 524, 528, 532, 541, 559, 558, 539, 550, + 561, 545, 533, 526, 534, 0, 196, 220, 365, 0, + 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 1844, 0, 0, 0, 709, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, - 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, + 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, + 277, 284, 297, 309, 317, 318, 321, 327, 377, 383, + 384, 385, 386, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 485, + 486, 487, 488, 489, 497, 498, 511, 581, 583, 598, + 617, 623, 477, 300, 301, 441, 442, 313, 314, 637, + 638, 299, 593, 624, 591, 636, 618, 435, 375, 0, + 0, 378, 280, 304, 319, 0, 609, 499, 226, 463, + 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, + 323, 388, 397, 426, 431, 295, 270, 243, 456, 240, + 482, 514, 515, 516, 518, 392, 265, 430, 393, 0, + 373, 571, 572, 315, 0, 523, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 1884, + 0, 0, 0, 0, 0, 0, 269, 0, 0, 0, + 0, 363, 266, 0, 0, 427, 0, 203, 0, 484, + 251, 374, 371, 578, 281, 272, 268, 249, 316, 382, + 425, 513, 419, 0, 367, 0, 0, 494, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 322, 247, 324, 202, 410, 495, + 285, 0, 0, 0, 0, 0, 713, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 348, 357, 356, 337, 338, 340, 342, + 347, 354, 360, 0, 0, 602, 0, 0, 0, 264, + 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -6455,140 +6496,141 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 0, 0, 0, 616, 0, - 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, - 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, - 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, - 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, - 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, - 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, - 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, - 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, - 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, - 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, - 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, - 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, - 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, - 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, - 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, + 0, 399, 256, 0, 450, 0, 0, 0, 620, 0, + 0, 0, 0, 0, 0, 0, 362, 0, 329, 197, + 224, 0, 0, 409, 458, 470, 0, 0, 0, 252, + 0, 468, 423, 597, 232, 283, 455, 429, 466, 437, + 286, 0, 0, 467, 369, 580, 447, 594, 621, 622, + 262, 403, 607, 517, 615, 639, 225, 259, 417, 502, + 600, 491, 394, 576, 577, 328, 490, 294, 201, 366, + 627, 223, 476, 368, 241, 230, 582, 604, 298, 288, + 453, 634, 212, 512, 592, 238, 480, 0, 0, 642, + 246, 501, 214, 589, 500, 390, 325, 326, 213, 0, + 454, 267, 292, 0, 0, 257, 412, 584, 585, 255, + 643, 227, 614, 219, 0, 613, 405, 579, 590, 391, + 380, 218, 588, 389, 379, 333, 352, 353, 279, 306, + 444, 372, 445, 305, 307, 401, 400, 402, 206, 601, + 0, 207, 0, 496, 603, 644, 449, 211, 233, 234, + 236, 0, 278, 282, 290, 293, 302, 303, 312, 364, + 416, 443, 439, 448, 0, 574, 595, 608, 619, 625, + 626, 628, 629, 630, 631, 632, 635, 633, 404, 310, + 492, 332, 370, 0, 0, 422, 469, 239, 599, 493, + 199, 0, 0, 0, 0, 253, 254, 0, 570, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 662, 640, 503, 509, 504, + 505, 506, 507, 508, 0, 510, 0, 0, 0, 0, + 0, 395, 0, 586, 587, 663, 381, 483, 596, 334, + 346, 349, 339, 358, 0, 359, 335, 336, 341, 343, + 344, 345, 350, 351, 355, 361, 248, 209, 387, 396, + 573, 311, 215, 216, 217, 519, 520, 521, 522, 611, + 612, 616, 204, 459, 460, 461, 462, 291, 606, 308, + 465, 464, 330, 331, 376, 446, 535, 537, 548, 552, + 554, 556, 562, 565, 536, 538, 549, 553, 555, 557, + 563, 566, 525, 527, 529, 531, 544, 543, 540, 568, + 569, 546, 551, 530, 542, 547, 560, 567, 564, 524, + 528, 532, 541, 559, 558, 539, 550, 561, 545, 533, + 526, 534, 0, 196, 220, 365, 0, 451, 287, 641, + 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, + 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, + 309, 317, 318, 321, 327, 377, 383, 384, 385, 386, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 485, 486, 487, 488, + 489, 497, 498, 511, 581, 583, 598, 617, 623, 477, + 300, 301, 441, 442, 313, 314, 637, 638, 299, 593, + 624, 591, 636, 618, 435, 375, 0, 0, 378, 280, + 304, 319, 0, 609, 499, 226, 463, 289, 250, 0, + 0, 210, 245, 229, 258, 273, 276, 323, 388, 397, + 426, 431, 295, 270, 243, 456, 240, 482, 514, 515, + 516, 518, 392, 265, 430, 393, 0, 373, 571, 572, + 315, 0, 523, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 413, 0, 0, 0, 0, 0, + 0, 0, 0, 269, 0, 0, 0, 0, 363, 266, + 0, 0, 427, 0, 203, 0, 484, 251, 374, 371, + 578, 281, 272, 268, 249, 316, 382, 425, 513, 419, + 0, 367, 0, 0, 494, 398, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 322, 247, 324, 202, 410, 495, 285, 0, 1859, + 0, 0, 0, 713, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, + 348, 357, 356, 337, 338, 340, 342, 347, 354, 360, + 0, 0, 602, 0, 0, 0, 264, 320, 271, 263, + 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 296, 0, 399, 256, + 0, 450, 0, 0, 0, 620, 0, 0, 0, 0, + 0, 0, 0, 362, 0, 329, 197, 224, 0, 0, + 409, 458, 470, 0, 0, 0, 252, 0, 468, 423, + 597, 232, 283, 455, 429, 466, 437, 286, 0, 0, + 467, 369, 580, 447, 594, 621, 622, 262, 403, 607, + 517, 615, 639, 225, 259, 417, 502, 600, 491, 394, + 576, 577, 328, 490, 294, 201, 366, 627, 223, 476, + 368, 241, 230, 582, 604, 298, 288, 453, 634, 212, + 512, 592, 238, 480, 0, 0, 642, 246, 501, 214, + 589, 500, 390, 325, 326, 213, 0, 454, 267, 292, + 0, 0, 257, 412, 584, 585, 255, 643, 227, 614, + 219, 0, 613, 405, 579, 590, 391, 380, 218, 588, + 389, 379, 333, 352, 353, 279, 306, 444, 372, 445, + 305, 307, 401, 400, 402, 206, 601, 0, 207, 0, + 496, 603, 644, 449, 211, 233, 234, 236, 0, 278, + 282, 290, 293, 302, 303, 312, 364, 416, 443, 439, + 448, 0, 574, 595, 608, 619, 625, 626, 628, 629, + 630, 631, 632, 635, 633, 404, 310, 492, 332, 370, + 0, 0, 422, 469, 239, 599, 493, 199, 0, 0, + 0, 0, 253, 254, 0, 570, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, + 660, 661, 662, 640, 503, 509, 504, 505, 506, 507, + 508, 0, 510, 0, 0, 0, 0, 0, 395, 0, + 586, 587, 663, 381, 483, 596, 334, 346, 349, 339, + 358, 0, 359, 335, 336, 341, 343, 344, 345, 350, + 351, 355, 361, 248, 209, 387, 396, 573, 311, 215, + 216, 217, 519, 520, 521, 522, 611, 612, 616, 204, + 459, 460, 461, 462, 291, 606, 308, 465, 464, 330, + 331, 376, 446, 535, 537, 548, 552, 554, 556, 562, + 565, 536, 538, 549, 553, 555, 557, 563, 566, 525, + 527, 529, 531, 544, 543, 540, 568, 569, 546, 551, + 530, 542, 547, 560, 567, 564, 524, 528, 532, 541, + 559, 558, 539, 550, 561, 545, 533, 526, 534, 0, + 196, 220, 365, 0, 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, - 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 0, 0, 0, 0, 0, 0, 1743, 269, - 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 0, 0, 0, 0, 194, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, - 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, - 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, - 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, - 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, - 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, - 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, - 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, - 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, - 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, - 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, - 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, - 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, - 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, - 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, - 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, - 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, - 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, - 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 404, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, - 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, - 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 95, 0, 0, - 0, 941, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, - 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, - 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, + 235, 242, 260, 275, 277, 284, 297, 309, 317, 318, + 321, 327, 377, 383, 384, 385, 386, 406, 407, 408, + 411, 414, 415, 418, 420, 421, 424, 428, 432, 433, + 434, 436, 438, 440, 452, 457, 471, 472, 473, 474, + 475, 478, 479, 485, 486, 487, 488, 489, 497, 498, + 511, 581, 583, 598, 617, 623, 477, 300, 301, 441, + 442, 313, 314, 637, 638, 299, 593, 624, 591, 636, + 618, 435, 375, 0, 0, 378, 280, 304, 319, 0, + 609, 499, 226, 463, 289, 250, 0, 0, 210, 245, + 229, 258, 273, 276, 323, 388, 397, 426, 431, 295, + 270, 243, 456, 240, 482, 514, 515, 516, 518, 392, + 265, 430, 393, 0, 373, 571, 572, 315, 0, 523, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 413, 0, 0, 0, 0, 0, 0, 0, 1758, + 269, 0, 0, 0, 0, 363, 266, 0, 0, 427, + 0, 203, 0, 484, 251, 374, 371, 578, 281, 272, + 268, 249, 316, 382, 425, 513, 419, 0, 367, 0, + 0, 494, 398, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 322, 247, + 324, 202, 410, 495, 285, 0, 0, 0, 0, 0, + 194, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 237, 0, 0, 244, 0, 0, 0, 348, 357, 356, + 337, 338, 340, 342, 347, 354, 360, 0, 0, 602, + 0, 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, @@ -6599,141 +6641,142 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, - 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, - 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, - 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, - 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, - 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, - 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, - 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, - 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, - 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, - 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, - 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, - 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, - 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, - 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, - 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, - 0, 0, 0, 194, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, - 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 296, 0, 399, 256, 0, 450, 0, + 0, 0, 620, 0, 0, 0, 0, 0, 0, 0, + 362, 0, 329, 197, 224, 0, 0, 409, 458, 470, + 0, 0, 0, 252, 0, 468, 423, 597, 232, 283, + 455, 429, 466, 437, 286, 0, 0, 467, 369, 580, + 447, 594, 621, 622, 262, 403, 607, 517, 615, 639, + 225, 259, 417, 502, 600, 491, 394, 576, 577, 328, + 490, 294, 201, 366, 627, 223, 476, 368, 241, 230, + 582, 604, 298, 288, 453, 634, 212, 512, 592, 238, + 480, 0, 0, 642, 246, 501, 214, 589, 500, 390, + 325, 326, 213, 0, 454, 267, 292, 0, 0, 257, + 412, 584, 585, 255, 643, 227, 614, 219, 0, 613, + 405, 579, 590, 391, 380, 218, 588, 389, 379, 333, + 352, 353, 279, 306, 444, 372, 445, 305, 307, 401, + 400, 402, 206, 601, 0, 207, 0, 496, 603, 644, + 449, 211, 233, 234, 236, 0, 278, 282, 290, 293, + 302, 303, 312, 364, 416, 443, 439, 448, 0, 574, + 595, 608, 619, 625, 626, 628, 629, 630, 631, 632, + 635, 633, 404, 310, 492, 332, 370, 0, 0, 422, + 469, 239, 599, 493, 199, 0, 0, 0, 0, 253, + 254, 0, 570, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, + 640, 503, 509, 504, 505, 506, 507, 508, 0, 510, + 0, 0, 0, 0, 0, 395, 0, 586, 587, 663, + 381, 483, 596, 334, 346, 349, 339, 358, 0, 359, + 335, 336, 341, 343, 344, 345, 350, 351, 355, 361, + 248, 209, 387, 396, 573, 311, 215, 216, 217, 519, + 520, 521, 522, 611, 612, 616, 204, 459, 460, 461, + 462, 291, 606, 308, 465, 464, 330, 331, 376, 446, + 535, 537, 548, 552, 554, 556, 562, 565, 536, 538, + 549, 553, 555, 557, 563, 566, 525, 527, 529, 531, + 544, 543, 540, 568, 569, 546, 551, 530, 542, 547, + 560, 567, 564, 524, 528, 532, 541, 559, 558, 539, + 550, 561, 545, 533, 526, 534, 0, 196, 220, 365, + 0, 451, 287, 641, 610, 481, 605, 205, 222, 0, + 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, + 275, 277, 284, 297, 309, 317, 318, 321, 327, 377, + 383, 384, 385, 386, 406, 407, 408, 411, 414, 415, + 418, 420, 421, 424, 428, 432, 433, 434, 436, 438, + 440, 452, 457, 471, 472, 473, 474, 475, 478, 479, + 485, 486, 487, 488, 489, 497, 498, 511, 581, 583, + 598, 617, 623, 477, 300, 301, 441, 442, 313, 314, + 637, 638, 299, 593, 624, 591, 636, 618, 435, 375, + 0, 0, 378, 280, 304, 319, 0, 609, 499, 226, + 463, 289, 250, 0, 0, 210, 245, 229, 258, 273, + 276, 323, 388, 397, 426, 431, 295, 270, 243, 456, + 240, 482, 514, 515, 516, 518, 392, 265, 430, 393, + 0, 373, 571, 572, 315, 0, 523, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 413, 0, + 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, + 0, 0, 363, 266, 0, 0, 427, 0, 203, 0, + 484, 251, 374, 371, 578, 281, 272, 268, 249, 316, + 382, 425, 513, 419, 0, 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 322, 247, 324, 202, 410, + 495, 285, 0, 95, 0, 0, 0, 946, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, + 244, 0, 0, 0, 348, 357, 356, 337, 338, 340, + 342, 347, 354, 360, 0, 0, 602, 0, 0, 0, + 264, 320, 271, 263, 575, 0, 0, 0, 0, 0, + 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1430, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 1429, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, - 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, + 296, 0, 399, 256, 0, 450, 0, 0, 0, 620, + 0, 0, 0, 0, 0, 0, 0, 362, 0, 329, + 197, 224, 0, 0, 409, 458, 470, 0, 0, 0, + 252, 0, 468, 423, 597, 232, 283, 455, 429, 466, + 437, 286, 0, 0, 467, 369, 580, 447, 594, 621, + 622, 262, 403, 607, 517, 615, 639, 225, 259, 417, + 502, 600, 491, 394, 576, 577, 328, 490, 294, 201, + 366, 627, 223, 476, 368, 241, 230, 582, 604, 298, + 288, 453, 634, 212, 512, 592, 238, 480, 0, 0, + 642, 246, 501, 214, 589, 500, 390, 325, 326, 213, + 0, 454, 267, 292, 0, 0, 257, 412, 584, 585, + 255, 643, 227, 614, 219, 0, 613, 405, 579, 590, + 391, 380, 218, 588, 389, 379, 333, 352, 353, 279, + 306, 444, 372, 445, 305, 307, 401, 400, 402, 206, + 601, 0, 207, 0, 496, 603, 644, 449, 211, 233, + 234, 236, 0, 278, 282, 290, 293, 302, 303, 312, + 364, 416, 443, 439, 448, 0, 574, 595, 608, 619, + 625, 626, 628, 629, 630, 631, 632, 635, 633, 404, + 310, 492, 332, 370, 0, 0, 422, 469, 239, 599, + 493, 199, 0, 0, 0, 0, 253, 254, 0, 570, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 645, + 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, + 656, 657, 658, 659, 660, 661, 662, 640, 503, 509, + 504, 505, 506, 507, 508, 0, 510, 0, 0, 0, + 0, 0, 395, 0, 586, 587, 663, 381, 483, 596, + 334, 346, 349, 339, 358, 0, 359, 335, 336, 341, + 343, 344, 345, 350, 351, 355, 361, 248, 209, 387, + 396, 573, 311, 215, 216, 217, 519, 520, 521, 522, + 611, 612, 616, 204, 459, 460, 461, 462, 291, 606, + 308, 465, 464, 330, 331, 376, 446, 535, 537, 548, + 552, 554, 556, 562, 565, 536, 538, 549, 553, 555, + 557, 563, 566, 525, 527, 529, 531, 544, 543, 540, + 568, 569, 546, 551, 530, 542, 547, 560, 567, 564, + 524, 528, 532, 541, 559, 558, 539, 550, 561, 545, + 533, 526, 534, 0, 196, 220, 365, 0, 451, 287, + 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 0, 0, 0, 0, 194, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, - 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, + 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, + 297, 309, 317, 318, 321, 327, 377, 383, 384, 385, + 386, 406, 407, 408, 411, 414, 415, 418, 420, 421, + 424, 428, 432, 433, 434, 436, 438, 440, 452, 457, + 471, 472, 473, 474, 475, 478, 479, 485, 486, 487, + 488, 489, 497, 498, 511, 581, 583, 598, 617, 623, + 477, 300, 301, 441, 442, 313, 314, 637, 638, 299, + 593, 624, 591, 636, 618, 435, 375, 0, 0, 378, + 280, 304, 319, 0, 609, 499, 226, 463, 289, 250, + 0, 0, 210, 245, 229, 258, 273, 276, 323, 388, + 397, 426, 431, 295, 270, 243, 456, 240, 482, 514, + 515, 516, 518, 392, 265, 430, 393, 0, 373, 571, + 572, 315, 0, 523, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 413, 0, 0, 0, 0, + 0, 0, 0, 0, 269, 0, 0, 0, 0, 363, + 266, 0, 0, 427, 0, 203, 0, 484, 251, 374, + 371, 578, 281, 272, 268, 249, 316, 382, 425, 513, + 419, 0, 367, 0, 0, 494, 398, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 322, 247, 324, 202, 410, 495, 285, 0, + 0, 0, 0, 0, 194, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 237, 0, 0, 244, 0, 0, + 0, 348, 357, 356, 337, 338, 340, 342, 347, 354, + 360, 0, 0, 602, 0, 0, 0, 264, 320, 271, + 263, 575, 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -6743,288 +6786,363 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, + 0, 0, 0, 0, 0, 1438, 0, 296, 0, 399, + 256, 0, 450, 0, 0, 0, 620, 0, 0, 0, + 0, 0, 0, 0, 362, 0, 329, 197, 224, 0, + 0, 409, 458, 470, 0, 0, 0, 252, 0, 468, + 423, 597, 232, 283, 455, 429, 466, 437, 286, 0, + 0, 467, 369, 580, 447, 594, 621, 622, 262, 403, + 607, 517, 615, 639, 225, 259, 417, 502, 600, 491, + 394, 576, 577, 328, 490, 294, 201, 366, 627, 223, + 476, 368, 241, 230, 582, 604, 298, 288, 453, 634, + 212, 512, 592, 238, 480, 0, 0, 642, 246, 501, + 214, 589, 500, 390, 325, 326, 213, 0, 454, 267, + 292, 0, 0, 257, 412, 584, 585, 255, 643, 227, + 614, 219, 0, 613, 405, 579, 590, 391, 380, 218, + 588, 389, 379, 333, 352, 353, 279, 306, 444, 372, + 445, 305, 307, 401, 400, 402, 206, 601, 0, 207, + 0, 496, 603, 644, 449, 211, 233, 234, 236, 0, + 278, 282, 290, 293, 302, 303, 312, 364, 416, 443, + 439, 448, 0, 574, 595, 608, 619, 625, 626, 628, + 629, 630, 631, 632, 635, 633, 404, 310, 492, 332, + 370, 0, 0, 422, 469, 239, 599, 493, 199, 0, + 0, 0, 0, 253, 254, 0, 570, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 662, 640, 503, 509, 504, 505, 506, + 507, 508, 0, 510, 0, 0, 0, 0, 0, 395, + 0, 586, 587, 663, 381, 483, 596, 334, 346, 349, + 339, 358, 0, 359, 335, 336, 341, 343, 344, 345, + 350, 351, 355, 361, 248, 209, 387, 396, 573, 311, + 215, 216, 217, 519, 520, 521, 522, 611, 612, 616, + 204, 459, 460, 461, 462, 291, 606, 308, 465, 464, + 330, 331, 376, 446, 535, 537, 548, 552, 554, 556, + 562, 565, 536, 538, 549, 553, 555, 557, 563, 566, + 525, 527, 529, 531, 544, 543, 540, 568, 569, 546, + 551, 530, 542, 547, 560, 567, 564, 524, 528, 532, + 541, 559, 558, 539, 550, 561, 545, 533, 526, 534, + 0, 196, 220, 365, 0, 451, 287, 641, 610, 481, + 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, + 231, 235, 242, 260, 275, 277, 284, 297, 309, 317, + 318, 321, 327, 377, 383, 384, 385, 386, 406, 407, + 408, 411, 414, 415, 418, 420, 421, 424, 428, 432, + 433, 434, 436, 438, 440, 452, 457, 471, 472, 473, + 474, 475, 478, 479, 485, 486, 487, 488, 489, 497, + 498, 511, 581, 583, 598, 617, 623, 477, 300, 301, + 441, 442, 313, 314, 637, 638, 1437, 593, 624, 591, + 636, 618, 435, 375, 0, 0, 378, 280, 304, 319, + 0, 609, 499, 226, 463, 289, 250, 0, 0, 210, + 245, 229, 258, 273, 276, 323, 388, 397, 426, 431, + 295, 270, 243, 456, 240, 482, 514, 515, 516, 518, + 392, 265, 430, 393, 0, 373, 571, 572, 315, 0, + 523, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 413, 0, 0, 0, 0, 0, 0, 0, + 0, 269, 0, 0, 0, 0, 363, 266, 0, 0, + 427, 0, 203, 0, 484, 251, 374, 371, 578, 281, + 272, 268, 249, 316, 382, 425, 513, 419, 0, 367, + 0, 0, 494, 398, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 322, + 247, 324, 202, 410, 495, 285, 0, 0, 0, 0, + 0, 194, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 237, 0, 0, 244, 0, 0, 0, 348, 357, + 356, 337, 338, 340, 342, 347, 354, 360, 0, 0, + 602, 0, 0, 0, 264, 320, 271, 263, 575, 0, + 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 296, 0, 399, 256, 0, 450, + 0, 0, 0, 620, 0, 0, 0, 0, 0, 0, + 0, 362, 0, 329, 197, 224, 0, 0, 409, 458, + 470, 0, 0, 0, 252, 0, 468, 423, 597, 232, + 283, 455, 429, 466, 437, 286, 0, 0, 467, 369, + 580, 447, 594, 621, 622, 262, 403, 607, 517, 615, + 639, 225, 259, 417, 502, 600, 491, 394, 576, 577, + 328, 490, 294, 201, 366, 627, 223, 476, 368, 241, + 230, 582, 604, 298, 288, 453, 634, 212, 512, 592, + 238, 480, 0, 0, 642, 246, 501, 214, 589, 500, + 390, 325, 326, 213, 0, 454, 267, 292, 0, 0, + 257, 412, 584, 585, 255, 643, 227, 614, 219, 0, + 613, 405, 579, 590, 391, 380, 218, 588, 389, 379, + 333, 352, 353, 279, 306, 444, 372, 445, 305, 307, + 401, 400, 402, 206, 601, 0, 207, 0, 496, 603, + 644, 449, 211, 233, 234, 236, 0, 278, 282, 290, + 293, 302, 303, 312, 364, 416, 443, 439, 448, 0, + 574, 595, 608, 619, 625, 626, 628, 629, 630, 631, + 632, 635, 633, 404, 310, 492, 332, 370, 0, 0, + 422, 469, 239, 599, 493, 199, 0, 0, 0, 0, + 253, 254, 0, 570, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 645, 646, 647, 648, 649, 650, 651, + 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, + 662, 640, 503, 509, 504, 505, 506, 507, 508, 0, + 510, 0, 0, 0, 0, 0, 395, 0, 586, 587, + 663, 381, 483, 596, 334, 346, 349, 339, 358, 0, + 359, 335, 336, 341, 343, 344, 345, 350, 351, 355, + 361, 248, 209, 387, 396, 573, 311, 215, 216, 217, + 519, 520, 521, 522, 611, 612, 616, 204, 459, 460, + 461, 462, 291, 606, 308, 465, 464, 330, 331, 376, + 446, 535, 537, 548, 552, 554, 556, 562, 565, 536, + 538, 549, 553, 555, 557, 563, 566, 525, 527, 529, + 531, 544, 543, 540, 568, 569, 546, 551, 530, 542, + 547, 560, 567, 564, 524, 528, 532, 541, 559, 558, + 539, 550, 561, 545, 533, 526, 534, 0, 196, 220, + 365, 0, 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1030, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1036, 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 392, 0, 372, 568, 569, 314, 520, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 411, 0, - 0, 0, 0, 0, 0, 0, 0, 269, 0, 0, - 0, 0, 362, 266, 0, 0, 425, 0, 203, 0, - 481, 251, 373, 370, 575, 281, 272, 268, 249, 315, - 381, 423, 510, 417, 0, 366, 0, 0, 491, 396, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 321, 247, 323, 202, 408, - 492, 285, 0, 0, 0, 0, 0, 194, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 237, 0, 0, - 244, 0, 0, 0, 347, 356, 355, 336, 337, 339, - 341, 346, 353, 359, 0, 0, 0, 0, 0, 264, - 319, 271, 263, 572, 0, 0, 0, 0, 0, 0, - 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 260, 275, 277, 284, 297, 309, 317, 318, 321, 327, + 377, 383, 384, 385, 386, 406, 407, 408, 411, 414, + 415, 418, 420, 421, 424, 428, 432, 433, 434, 436, + 438, 440, 452, 457, 471, 472, 473, 474, 475, 478, + 479, 485, 486, 487, 488, 489, 497, 498, 511, 581, + 583, 598, 617, 623, 477, 300, 301, 441, 442, 313, + 314, 637, 638, 299, 593, 624, 591, 636, 618, 435, + 375, 0, 0, 378, 280, 304, 319, 0, 609, 499, + 226, 463, 289, 250, 0, 0, 210, 245, 229, 258, + 273, 276, 323, 388, 397, 426, 431, 295, 270, 243, + 456, 240, 482, 514, 515, 516, 518, 392, 265, 430, + 393, 0, 373, 571, 572, 315, 0, 523, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 413, + 0, 0, 0, 0, 0, 0, 0, 0, 269, 0, + 0, 0, 0, 363, 266, 0, 0, 427, 0, 203, + 0, 484, 251, 374, 371, 578, 281, 272, 268, 249, + 316, 382, 425, 513, 419, 0, 367, 0, 0, 494, + 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 322, 247, 324, 202, + 410, 495, 285, 0, 0, 0, 0, 0, 194, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 237, 0, + 0, 244, 0, 0, 0, 348, 357, 356, 337, 338, + 340, 342, 347, 354, 360, 0, 0, 602, 0, 0, + 0, 264, 320, 271, 263, 575, 0, 0, 0, 0, + 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, - 0, 397, 256, 0, 448, 0, 662, 0, 616, 0, - 0, 0, 0, 0, 0, 0, 361, 0, 328, 197, - 224, 0, 0, 407, 456, 468, 0, 0, 0, 252, - 0, 466, 421, 594, 232, 283, 453, 427, 464, 435, - 286, 0, 0, 465, 368, 577, 445, 591, 617, 618, - 262, 401, 603, 514, 611, 635, 225, 259, 415, 499, - 597, 488, 393, 573, 574, 327, 487, 294, 201, 365, - 623, 223, 474, 367, 241, 230, 579, 600, 288, 451, - 630, 212, 509, 589, 238, 478, 0, 0, 638, 246, - 498, 214, 586, 497, 389, 324, 325, 213, 0, 452, - 267, 292, 0, 0, 257, 410, 581, 582, 255, 639, - 227, 610, 219, 0, 609, 403, 576, 587, 390, 379, - 218, 585, 388, 378, 332, 351, 352, 279, 305, 442, - 371, 443, 304, 306, 399, 398, 400, 206, 598, 0, - 207, 0, 493, 599, 640, 447, 211, 233, 234, 236, - 0, 278, 282, 290, 293, 301, 302, 311, 363, 414, - 441, 437, 446, 0, 571, 592, 604, 615, 621, 622, - 624, 625, 626, 627, 628, 631, 629, 402, 309, 489, - 331, 369, 0, 0, 420, 467, 239, 596, 490, 199, - 0, 0, 0, 0, 253, 254, 0, 567, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 641, 642, 643, - 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, - 654, 655, 656, 657, 658, 636, 500, 506, 501, 502, - 503, 504, 505, 0, 507, 0, 0, 0, 0, 0, - 0, 583, 584, 659, 380, 480, 593, 333, 345, 348, - 338, 357, 0, 358, 334, 335, 340, 342, 343, 344, - 349, 350, 354, 360, 248, 209, 386, 394, 570, 310, - 215, 216, 217, 516, 517, 518, 519, 607, 608, 612, - 204, 457, 458, 459, 460, 291, 602, 307, 463, 462, - 329, 330, 375, 444, 532, 534, 545, 549, 551, 553, - 559, 562, 533, 535, 546, 550, 552, 554, 560, 563, - 522, 524, 526, 528, 541, 540, 537, 565, 566, 543, - 548, 527, 539, 544, 557, 564, 561, 521, 525, 529, - 538, 556, 555, 536, 547, 558, 542, 530, 523, 531, - 0, 196, 220, 364, 0, 449, 287, 637, 606, 601, - 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, - 235, 242, 260, 275, 277, 284, 297, 308, 316, 317, - 320, 326, 376, 382, 383, 384, 385, 404, 405, 406, - 409, 412, 413, 416, 418, 419, 422, 426, 430, 431, - 432, 434, 436, 438, 450, 455, 469, 470, 471, 472, - 473, 476, 477, 482, 483, 484, 485, 486, 494, 495, - 508, 578, 580, 595, 613, 619, 475, 299, 300, 439, - 440, 312, 313, 633, 634, 298, 590, 620, 588, 632, - 614, 433, 374, 0, 0, 377, 280, 303, 318, 0, - 605, 496, 226, 461, 289, 250, 0, 0, 210, 245, - 229, 258, 273, 276, 322, 387, 395, 424, 429, 295, - 270, 243, 454, 240, 479, 511, 512, 513, 515, 391, - 265, 428, 392, 0, 372, 568, 569, 314, 520, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 411, 0, 0, 0, 0, 0, 0, 0, 0, 269, - 0, 0, 0, 0, 362, 266, 0, 0, 425, 0, - 203, 0, 481, 251, 373, 370, 575, 281, 272, 268, - 249, 315, 381, 423, 510, 417, 0, 366, 0, 0, - 491, 396, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 321, 247, 323, - 202, 408, 492, 285, 0, 0, 0, 0, 0, 709, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, - 0, 0, 244, 0, 0, 0, 347, 356, 355, 336, - 337, 339, 341, 346, 353, 359, 0, 0, 0, 0, - 0, 264, 319, 271, 263, 572, 0, 0, 0, 0, - 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 296, 0, 399, 256, 0, 450, 0, 666, 0, + 620, 0, 0, 0, 0, 0, 0, 0, 362, 0, + 329, 197, 224, 0, 0, 409, 458, 470, 0, 0, + 0, 252, 0, 468, 423, 597, 232, 283, 455, 429, + 466, 437, 286, 0, 0, 467, 369, 580, 447, 594, + 621, 622, 262, 403, 607, 517, 615, 639, 225, 259, + 417, 502, 600, 491, 394, 576, 577, 328, 490, 294, + 201, 366, 627, 223, 476, 368, 241, 230, 582, 604, + 298, 288, 453, 634, 212, 512, 592, 238, 480, 0, + 0, 642, 246, 501, 214, 589, 500, 390, 325, 326, + 213, 0, 454, 267, 292, 0, 0, 257, 412, 584, + 585, 255, 643, 227, 614, 219, 0, 613, 405, 579, + 590, 391, 380, 218, 588, 389, 379, 333, 352, 353, + 279, 306, 444, 372, 445, 305, 307, 401, 400, 402, + 206, 601, 0, 207, 0, 496, 603, 644, 449, 211, + 233, 234, 236, 0, 278, 282, 290, 293, 302, 303, + 312, 364, 416, 443, 439, 448, 0, 574, 595, 608, + 619, 625, 626, 628, 629, 630, 631, 632, 635, 633, + 404, 310, 492, 332, 370, 0, 0, 422, 469, 239, + 599, 493, 199, 0, 0, 0, 0, 253, 254, 0, + 570, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 662, 640, 503, + 509, 504, 505, 506, 507, 508, 0, 510, 0, 0, + 0, 0, 0, 395, 0, 586, 587, 663, 381, 483, + 596, 334, 346, 349, 339, 358, 0, 359, 335, 336, + 341, 343, 344, 345, 350, 351, 355, 361, 248, 209, + 387, 396, 573, 311, 215, 216, 217, 519, 520, 521, + 522, 611, 612, 616, 204, 459, 460, 461, 462, 291, + 606, 308, 465, 464, 330, 331, 376, 446, 535, 537, + 548, 552, 554, 556, 562, 565, 536, 538, 549, 553, + 555, 557, 563, 566, 525, 527, 529, 531, 544, 543, + 540, 568, 569, 546, 551, 530, 542, 547, 560, 567, + 564, 524, 528, 532, 541, 559, 558, 539, 550, 561, + 545, 533, 526, 534, 0, 196, 220, 365, 0, 451, + 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 198, 200, 208, 221, 231, 235, 242, 260, 275, 277, + 284, 297, 309, 317, 318, 321, 327, 377, 383, 384, + 385, 386, 406, 407, 408, 411, 414, 415, 418, 420, + 421, 424, 428, 432, 433, 434, 436, 438, 440, 452, + 457, 471, 472, 473, 474, 475, 478, 479, 485, 486, + 487, 488, 489, 497, 498, 511, 581, 583, 598, 617, + 623, 477, 300, 301, 441, 442, 313, 314, 637, 638, + 299, 593, 624, 591, 636, 618, 435, 375, 0, 0, + 378, 280, 304, 319, 0, 609, 499, 226, 463, 289, + 250, 0, 0, 210, 245, 229, 258, 273, 276, 323, + 388, 397, 426, 431, 295, 270, 243, 456, 240, 482, + 514, 515, 516, 518, 392, 265, 430, 393, 0, 373, + 571, 572, 315, 0, 523, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 413, 0, 0, 0, + 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, + 363, 266, 0, 0, 427, 0, 203, 0, 484, 251, + 374, 371, 578, 281, 272, 268, 249, 316, 382, 425, + 513, 419, 0, 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 296, 0, 397, 256, 0, 448, 0, 0, 0, - 616, 0, 0, 0, 0, 0, 0, 0, 361, 0, - 328, 197, 224, 0, 0, 407, 456, 468, 0, 0, - 0, 252, 0, 466, 421, 594, 232, 283, 453, 427, - 464, 435, 286, 0, 0, 465, 368, 577, 445, 591, - 617, 618, 262, 401, 603, 514, 611, 635, 225, 259, - 415, 499, 597, 488, 393, 573, 574, 327, 487, 294, - 201, 365, 623, 223, 474, 367, 241, 230, 579, 600, - 288, 451, 630, 212, 509, 589, 238, 478, 0, 0, - 638, 246, 498, 214, 586, 497, 389, 324, 325, 213, - 0, 452, 267, 292, 0, 0, 257, 410, 581, 582, - 255, 639, 227, 610, 219, 0, 609, 403, 576, 587, - 390, 379, 218, 585, 388, 378, 332, 351, 352, 279, - 305, 442, 371, 443, 304, 306, 399, 398, 400, 206, - 598, 0, 207, 0, 493, 599, 640, 447, 211, 233, - 234, 236, 0, 278, 282, 290, 293, 301, 302, 311, - 363, 414, 441, 437, 446, 0, 571, 592, 604, 615, - 621, 622, 624, 625, 626, 627, 628, 631, 629, 402, - 309, 489, 331, 369, 0, 0, 420, 467, 239, 596, - 490, 199, 0, 0, 0, 0, 253, 254, 0, 567, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 641, - 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, - 652, 653, 654, 655, 656, 657, 658, 636, 500, 506, - 501, 502, 503, 504, 505, 0, 507, 0, 0, 0, - 0, 0, 0, 583, 584, 659, 380, 480, 593, 333, - 345, 348, 338, 357, 0, 358, 334, 335, 340, 342, - 343, 344, 349, 350, 354, 360, 248, 209, 386, 394, - 570, 310, 215, 216, 217, 516, 517, 518, 519, 607, - 608, 612, 204, 457, 458, 459, 460, 291, 602, 307, - 463, 462, 329, 330, 375, 444, 532, 534, 545, 549, - 551, 553, 559, 562, 533, 535, 546, 550, 552, 554, - 560, 563, 522, 524, 526, 528, 541, 540, 537, 565, - 566, 543, 548, 527, 539, 544, 557, 564, 561, 521, - 525, 529, 538, 556, 555, 536, 547, 558, 542, 530, - 523, 531, 0, 196, 220, 364, 0, 449, 287, 637, - 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, + 0, 0, 0, 322, 247, 324, 202, 410, 495, 285, + 0, 0, 0, 0, 0, 713, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, + 0, 0, 348, 357, 356, 337, 338, 340, 342, 347, + 354, 360, 0, 0, 602, 0, 0, 0, 264, 320, + 271, 263, 575, 0, 0, 0, 0, 0, 0, 0, + 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 296, 0, + 399, 256, 0, 450, 0, 0, 0, 620, 0, 0, + 0, 0, 0, 0, 0, 362, 0, 329, 197, 224, + 0, 0, 409, 458, 470, 0, 0, 0, 252, 0, + 468, 423, 597, 232, 283, 455, 429, 466, 437, 286, + 0, 0, 467, 369, 580, 447, 594, 621, 622, 262, + 403, 607, 517, 615, 639, 225, 259, 417, 502, 600, + 491, 394, 576, 577, 328, 490, 294, 201, 366, 627, + 223, 476, 368, 241, 230, 582, 604, 298, 288, 453, + 634, 212, 512, 592, 238, 480, 0, 0, 642, 246, + 501, 214, 589, 500, 390, 325, 326, 213, 0, 454, + 267, 292, 0, 0, 257, 412, 584, 585, 255, 643, + 227, 614, 219, 0, 613, 405, 579, 590, 391, 380, + 218, 588, 389, 379, 333, 352, 353, 279, 306, 444, + 372, 445, 305, 307, 401, 400, 402, 206, 601, 0, + 207, 0, 496, 603, 644, 449, 211, 233, 234, 236, + 0, 278, 282, 290, 293, 302, 303, 312, 364, 416, + 443, 439, 448, 0, 574, 595, 608, 619, 625, 626, + 628, 629, 630, 631, 632, 635, 633, 404, 310, 492, + 332, 370, 0, 0, 422, 469, 239, 599, 493, 199, + 0, 0, 0, 0, 253, 254, 0, 570, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 645, 646, 647, + 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 662, 640, 503, 509, 504, 505, + 506, 507, 508, 0, 510, 0, 0, 0, 0, 0, + 395, 0, 586, 587, 663, 381, 483, 596, 334, 346, + 349, 339, 358, 0, 359, 335, 336, 341, 343, 344, + 345, 350, 351, 355, 361, 248, 209, 387, 396, 573, + 311, 215, 216, 217, 519, 520, 521, 522, 611, 612, + 616, 204, 459, 460, 461, 462, 291, 606, 308, 465, + 464, 330, 331, 376, 446, 535, 537, 548, 552, 554, + 556, 562, 565, 536, 538, 549, 553, 555, 557, 563, + 566, 525, 527, 529, 531, 544, 543, 540, 568, 569, + 546, 551, 530, 542, 547, 560, 567, 564, 524, 528, + 532, 541, 559, 558, 539, 550, 561, 545, 533, 526, + 534, 0, 196, 220, 365, 0, 451, 287, 641, 610, + 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, - 221, 231, 235, 242, 260, 275, 277, 284, 297, 308, - 316, 317, 320, 326, 376, 382, 383, 384, 385, 4028, - 405, 406, 409, 412, 413, 416, 418, 419, 422, 426, - 430, 431, 432, 434, 436, 438, 450, 455, 469, 470, - 471, 472, 473, 476, 477, 482, 483, 484, 485, 486, - 494, 495, 508, 578, 580, 595, 613, 619, 475, 299, - 300, 439, 440, 312, 313, 633, 634, 298, 590, 620, - 588, 632, 614, 433, 374, 0, 0, 377, 280, 303, - 318, 0, 605, 496, 226, 461, 289, 250, 0, 0, - 210, 245, 229, 258, 273, 276, 322, 387, 395, 424, - 429, 295, 270, 243, 454, 240, 479, 511, 512, 513, - 515, 391, 265, 428, 392, 0, 372, 568, 569, 314, - 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 411, 0, 0, 0, 0, 0, 0, 0, - 0, 269, 0, 0, 0, 0, 362, 266, 0, 0, - 425, 0, 203, 0, 481, 251, 373, 370, 575, 281, - 272, 268, 249, 315, 381, 423, 510, 417, 0, 366, - 0, 0, 491, 396, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 321, - 247, 323, 202, 408, 492, 285, 0, 0, 0, 0, - 0, 709, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 237, 0, 0, 244, 0, 0, 0, 347, 356, - 355, 336, 337, 339, 341, 346, 353, 359, 0, 0, - 0, 0, 0, 264, 319, 271, 263, 572, 0, 0, - 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, + 221, 231, 235, 242, 260, 275, 277, 284, 297, 309, + 317, 318, 321, 327, 377, 383, 384, 385, 386, 4086, + 407, 408, 411, 414, 415, 418, 420, 421, 424, 428, + 432, 433, 434, 436, 438, 440, 452, 457, 471, 472, + 473, 474, 475, 478, 479, 485, 486, 487, 488, 489, + 497, 498, 511, 581, 583, 598, 617, 623, 477, 300, + 301, 441, 442, 313, 314, 637, 638, 299, 593, 624, + 591, 636, 618, 435, 375, 0, 0, 378, 280, 304, + 319, 0, 609, 499, 226, 463, 289, 250, 0, 0, + 210, 245, 229, 258, 273, 276, 323, 388, 397, 426, + 431, 295, 270, 243, 456, 240, 482, 514, 515, 516, + 518, 392, 265, 430, 393, 0, 373, 571, 572, 315, + 0, 523, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 413, 0, 0, 0, 0, 0, 0, + 0, 0, 269, 0, 0, 0, 0, 363, 266, 0, + 0, 427, 0, 203, 0, 484, 251, 374, 371, 578, + 281, 272, 268, 249, 316, 382, 425, 513, 419, 0, + 367, 0, 0, 494, 398, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 322, 247, 324, 202, 410, 495, 285, 0, 0, 0, + 0, 0, 713, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 237, 0, 0, 244, 0, 0, 0, 348, + 357, 356, 337, 338, 340, 342, 347, 354, 360, 0, + 0, 602, 0, 0, 0, 264, 320, 271, 263, 575, + 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 296, 0, 397, 256, 0, 448, 0, - 0, 0, 616, 0, 0, 0, 0, 0, 0, 0, - 361, 0, 328, 197, 224, 0, 0, 407, 456, 468, - 0, 0, 0, 252, 0, 466, 421, 594, 232, 283, - 453, 427, 464, 435, 286, 0, 0, 465, 368, 577, - 445, 591, 617, 618, 262, 401, 603, 514, 611, 635, - 225, 259, 415, 499, 597, 488, 393, 573, 574, 327, - 487, 294, 201, 365, 623, 223, 474, 367, 241, 230, - 579, 600, 288, 451, 630, 212, 509, 589, 238, 478, - 0, 0, 638, 246, 498, 214, 586, 497, 389, 324, - 325, 213, 0, 452, 267, 292, 0, 0, 257, 410, - 581, 582, 255, 639, 227, 610, 219, 0, 609, 403, - 576, 587, 390, 379, 218, 585, 388, 378, 332, 351, - 352, 279, 305, 442, 371, 443, 304, 306, 399, 398, - 400, 206, 598, 0, 207, 0, 493, 599, 640, 447, - 211, 233, 234, 236, 0, 278, 282, 290, 293, 301, - 302, 311, 363, 414, 441, 437, 446, 0, 571, 592, - 604, 615, 621, 622, 624, 625, 626, 627, 628, 631, - 629, 402, 309, 489, 331, 369, 0, 0, 420, 467, - 239, 596, 490, 199, 0, 0, 0, 0, 253, 254, - 0, 567, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 641, 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, 658, 636, - 500, 506, 501, 502, 503, 504, 505, 0, 507, 0, - 0, 0, 0, 0, 0, 583, 584, 659, 380, 480, - 593, 333, 345, 348, 338, 357, 0, 358, 334, 335, - 340, 342, 343, 344, 349, 350, 354, 360, 248, 209, - 386, 394, 570, 310, 215, 216, 217, 516, 517, 518, - 519, 607, 608, 612, 204, 457, 458, 459, 460, 291, - 602, 307, 463, 462, 329, 330, 375, 444, 532, 534, - 545, 549, 551, 553, 559, 562, 533, 535, 546, 550, - 552, 554, 560, 563, 522, 524, 526, 528, 541, 540, - 537, 565, 566, 543, 548, 527, 539, 544, 557, 564, - 561, 521, 525, 529, 538, 556, 555, 536, 547, 558, - 542, 530, 523, 531, 0, 196, 220, 364, 0, 449, - 287, 637, 606, 601, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, - 200, 208, 221, 231, 235, 242, 260, 275, 277, 284, - 297, 308, 316, 317, 320, 326, 376, 382, 383, 384, - 385, 404, 405, 406, 409, 412, 413, 416, 418, 419, - 422, 426, 430, 431, 432, 434, 436, 438, 450, 455, - 469, 470, 471, 472, 473, 476, 477, 482, 483, 484, - 485, 486, 494, 495, 508, 578, 580, 595, 613, 619, - 475, 299, 300, 439, 440, 312, 313, 633, 634, 298, - 590, 620, 588, 632, 614, 433, 374, 0, 0, 377, - 280, 303, 318, 0, 605, 496, 226, 461, 289, 250, - 0, 0, 210, 245, 229, 258, 273, 276, 322, 387, - 395, 424, 429, 295, 270, 243, 454, 240, 479, 511, - 512, 513, 515, 391, 265, 428, 392, 0, 372, 568, - 569, 314, 520, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 411, 0, 0, 0, 0, 0, - 0, 0, 0, 269, 0, 0, 0, 0, 362, 266, - 0, 0, 425, 0, 203, 0, 481, 251, 373, 370, - 575, 281, 272, 268, 249, 315, 381, 423, 510, 417, - 0, 366, 0, 0, 491, 396, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 321, 247, 323, 202, 408, 492, 285, 0, 0, - 0, 0, 0, 941, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 237, 0, 0, 244, 0, 0, 0, - 347, 356, 355, 336, 337, 339, 341, 346, 353, 359, - 0, 0, 0, 0, 0, 264, 319, 271, 263, 572, - 0, 0, 0, 0, 0, 0, 0, 0, 228, 0, + 0, 0, 0, 0, 0, 296, 0, 399, 256, 0, + 450, 0, 0, 0, 620, 0, 0, 0, 0, 0, + 0, 0, 362, 0, 329, 197, 224, 0, 0, 409, + 458, 470, 0, 0, 0, 252, 0, 468, 423, 597, + 232, 283, 455, 429, 466, 437, 286, 0, 0, 467, + 369, 580, 447, 594, 621, 622, 262, 403, 607, 517, + 615, 639, 225, 259, 417, 502, 600, 491, 394, 576, + 577, 328, 490, 294, 201, 366, 627, 223, 476, 368, + 241, 230, 582, 604, 298, 288, 453, 634, 212, 512, + 592, 238, 480, 0, 0, 642, 246, 501, 214, 589, + 500, 390, 325, 326, 213, 0, 454, 267, 292, 0, + 0, 257, 412, 584, 585, 255, 643, 227, 614, 219, + 0, 613, 405, 579, 590, 391, 380, 218, 588, 389, + 379, 333, 352, 353, 279, 306, 444, 372, 445, 305, + 307, 401, 400, 402, 206, 601, 0, 207, 0, 496, + 603, 644, 449, 211, 233, 234, 236, 0, 278, 282, + 290, 293, 302, 303, 312, 364, 416, 443, 439, 448, + 0, 574, 595, 608, 619, 625, 626, 628, 629, 630, + 631, 632, 635, 633, 404, 310, 492, 332, 370, 0, + 0, 422, 469, 239, 599, 493, 199, 0, 0, 0, + 0, 253, 254, 0, 570, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, + 661, 662, 640, 503, 509, 504, 505, 506, 507, 508, + 0, 510, 0, 0, 0, 0, 0, 395, 0, 586, + 587, 663, 381, 483, 596, 334, 346, 349, 339, 358, + 0, 359, 335, 336, 341, 343, 344, 345, 350, 351, + 355, 361, 248, 209, 387, 396, 573, 311, 215, 216, + 217, 519, 520, 521, 522, 611, 612, 616, 204, 459, + 460, 461, 462, 291, 606, 308, 465, 464, 330, 331, + 376, 446, 535, 537, 548, 552, 554, 556, 562, 565, + 536, 538, 549, 553, 555, 557, 563, 566, 525, 527, + 529, 531, 544, 543, 540, 568, 569, 546, 551, 530, + 542, 547, 560, 567, 564, 524, 528, 532, 541, 559, + 558, 539, 550, 561, 545, 533, 526, 534, 0, 196, + 220, 365, 0, 451, 287, 641, 610, 481, 605, 205, + 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, + 242, 260, 275, 277, 284, 297, 309, 317, 318, 321, + 327, 377, 383, 384, 385, 386, 406, 407, 408, 411, + 414, 415, 418, 420, 421, 424, 428, 432, 433, 434, + 436, 438, 440, 452, 457, 471, 472, 473, 474, 475, + 478, 479, 485, 486, 487, 488, 489, 497, 498, 511, + 581, 583, 598, 617, 623, 477, 300, 301, 441, 442, + 313, 314, 637, 638, 299, 593, 624, 591, 636, 618, + 435, 375, 0, 0, 378, 280, 304, 319, 0, 609, + 499, 226, 463, 289, 250, 0, 0, 210, 245, 229, + 258, 273, 276, 323, 388, 397, 426, 431, 295, 270, + 243, 456, 240, 482, 514, 515, 516, 518, 392, 265, + 430, 393, 0, 373, 571, 572, 315, 0, 523, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 413, 0, 0, 0, 0, 0, 0, 0, 0, 269, + 0, 0, 0, 0, 363, 266, 0, 0, 427, 0, + 203, 0, 484, 251, 374, 371, 578, 281, 272, 268, + 249, 316, 382, 425, 513, 419, 0, 367, 0, 0, + 494, 398, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 322, 247, 324, + 202, 410, 495, 285, 0, 0, 0, 0, 0, 946, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 237, + 0, 0, 244, 0, 0, 0, 348, 357, 356, 337, + 338, 340, 342, 347, 354, 360, 0, 0, 602, 0, + 0, 0, 264, 320, 271, 263, 575, 0, 0, 0, + 0, 0, 0, 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, + 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -7032,71 +7150,71 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 296, 0, 397, 256, 0, - 448, 0, 0, 0, 616, 0, 0, 0, 0, 0, - 0, 0, 361, 0, 328, 197, 224, 0, 0, 407, - 456, 468, 0, 0, 0, 252, 0, 466, 421, 594, - 232, 283, 453, 427, 464, 435, 286, 0, 0, 465, - 368, 577, 445, 591, 617, 618, 262, 401, 603, 514, - 611, 635, 225, 259, 415, 499, 597, 488, 393, 573, - 574, 327, 487, 294, 201, 365, 623, 223, 474, 367, - 241, 230, 579, 600, 288, 451, 630, 212, 509, 589, - 238, 478, 0, 0, 638, 246, 498, 214, 586, 497, - 389, 324, 325, 213, 0, 452, 267, 292, 0, 0, - 257, 410, 581, 582, 255, 639, 227, 610, 219, 0, - 609, 403, 576, 587, 390, 379, 218, 585, 388, 378, - 332, 351, 352, 279, 305, 442, 371, 443, 304, 306, - 399, 398, 400, 206, 598, 0, 207, 0, 493, 599, - 640, 447, 211, 233, 234, 236, 0, 278, 282, 290, - 293, 301, 302, 311, 363, 414, 441, 437, 446, 0, - 571, 592, 604, 615, 621, 622, 624, 625, 626, 627, - 628, 631, 629, 402, 309, 489, 331, 369, 0, 0, - 420, 467, 239, 596, 490, 199, 0, 0, 0, 0, - 253, 254, 0, 567, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 641, 642, 643, 644, 645, 646, 647, - 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, - 658, 636, 500, 506, 501, 502, 503, 504, 505, 0, - 507, 0, 0, 0, 0, 0, 0, 583, 584, 659, - 380, 480, 593, 333, 345, 348, 338, 357, 0, 358, - 334, 335, 340, 342, 343, 344, 349, 350, 354, 360, - 248, 209, 386, 394, 570, 310, 215, 216, 217, 516, - 517, 518, 519, 607, 608, 612, 204, 457, 458, 459, - 460, 291, 602, 307, 463, 462, 329, 330, 375, 444, - 532, 534, 545, 549, 551, 553, 559, 562, 533, 535, - 546, 550, 552, 554, 560, 563, 522, 524, 526, 528, - 541, 540, 537, 565, 566, 543, 548, 527, 539, 544, - 557, 564, 561, 521, 525, 529, 538, 556, 555, 536, - 547, 558, 542, 530, 523, 531, 0, 196, 220, 364, - 0, 449, 287, 637, 606, 601, 205, 222, 0, 261, + 0, 0, 296, 0, 399, 256, 0, 450, 0, 0, + 0, 620, 0, 0, 0, 0, 0, 0, 0, 362, + 0, 329, 197, 224, 0, 0, 409, 458, 470, 0, + 0, 0, 252, 0, 468, 423, 597, 232, 283, 455, + 429, 466, 437, 286, 0, 0, 467, 369, 580, 447, + 594, 621, 622, 262, 403, 607, 517, 615, 639, 225, + 259, 417, 502, 600, 491, 394, 576, 577, 328, 490, + 294, 201, 366, 627, 223, 476, 368, 241, 230, 582, + 604, 298, 288, 453, 634, 212, 512, 592, 238, 480, + 0, 0, 642, 246, 501, 214, 589, 500, 390, 325, + 326, 213, 0, 454, 267, 292, 0, 0, 257, 412, + 584, 585, 255, 643, 227, 614, 219, 0, 613, 405, + 579, 590, 391, 380, 218, 588, 389, 379, 333, 352, + 353, 279, 306, 444, 372, 445, 305, 307, 401, 400, + 402, 206, 601, 0, 207, 0, 496, 603, 644, 449, + 211, 233, 234, 236, 0, 278, 282, 290, 293, 302, + 303, 312, 364, 416, 443, 439, 448, 0, 574, 595, + 608, 619, 625, 626, 628, 629, 630, 631, 632, 635, + 633, 404, 310, 492, 332, 370, 0, 0, 422, 469, + 239, 599, 493, 199, 0, 0, 0, 0, 253, 254, + 0, 570, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 661, 662, 640, + 503, 509, 504, 505, 506, 507, 508, 0, 510, 0, + 0, 0, 0, 0, 395, 0, 586, 587, 663, 381, + 483, 596, 334, 346, 349, 339, 358, 0, 359, 335, + 336, 341, 343, 344, 345, 350, 351, 355, 361, 248, + 209, 387, 396, 573, 311, 215, 216, 217, 519, 520, + 521, 522, 611, 612, 616, 204, 459, 460, 461, 462, + 291, 606, 308, 465, 464, 330, 331, 376, 446, 535, + 537, 548, 552, 554, 556, 562, 565, 536, 538, 549, + 553, 555, 557, 563, 566, 525, 527, 529, 531, 544, + 543, 540, 568, 569, 546, 551, 530, 542, 547, 560, + 567, 564, 524, 528, 532, 541, 559, 558, 539, 550, + 561, 545, 533, 526, 534, 0, 196, 220, 365, 0, + 451, 287, 641, 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, 260, 275, - 277, 284, 297, 308, 316, 317, 320, 326, 376, 382, - 383, 384, 385, 404, 405, 406, 409, 412, 413, 416, - 418, 419, 422, 426, 430, 431, 432, 434, 436, 438, - 450, 455, 469, 470, 471, 472, 473, 476, 477, 482, - 483, 484, 485, 486, 494, 495, 508, 578, 580, 595, - 613, 619, 475, 299, 300, 439, 440, 312, 313, 633, - 634, 298, 590, 620, 588, 632, 614, 433, 374, 0, - 0, 377, 280, 303, 318, 0, 605, 496, 226, 461, + 277, 284, 297, 309, 317, 318, 321, 327, 377, 383, + 384, 385, 386, 406, 407, 408, 411, 414, 415, 418, + 420, 421, 424, 428, 432, 433, 434, 436, 438, 440, + 452, 457, 471, 472, 473, 474, 475, 478, 479, 485, + 486, 487, 488, 489, 497, 498, 511, 581, 583, 598, + 617, 623, 477, 300, 301, 441, 442, 313, 314, 637, + 638, 299, 593, 624, 591, 636, 618, 435, 375, 0, + 0, 378, 280, 304, 319, 0, 609, 499, 226, 463, 289, 250, 0, 0, 210, 245, 229, 258, 273, 276, - 322, 387, 395, 424, 429, 295, 270, 243, 454, 240, - 479, 511, 512, 513, 515, 391, 265, 428, 392, 0, - 372, 568, 569, 314, 520, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 411, 0, 0, 0, - 0, 0, 0, 0, 0, 269, 0, 0, 0, 0, - 362, 266, 0, 0, 425, 0, 203, 0, 481, 251, - 373, 370, 575, 281, 272, 268, 249, 315, 381, 423, - 510, 417, 0, 366, 0, 0, 491, 396, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 321, 247, 323, 202, 408, 492, 285, - 0, 0, 0, 0, 0, 194, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 237, 0, 0, 244, 0, - 0, 0, 347, 356, 355, 336, 337, 339, 341, 346, - 353, 359, 0, 0, 0, 0, 0, 264, 319, 271, - 263, 572, 0, 0, 0, 0, 0, 0, 0, 0, - 228, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, + 323, 388, 397, 426, 431, 295, 270, 243, 456, 240, + 482, 514, 515, 516, 518, 392, 265, 430, 393, 0, + 373, 571, 572, 315, 0, 523, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 413, 0, 0, + 0, 0, 0, 0, 0, 0, 269, 0, 0, 0, + 0, 363, 266, 0, 0, 427, 0, 203, 0, 484, + 251, 374, 371, 578, 281, 272, 268, 249, 316, 382, + 425, 513, 419, 0, 367, 0, 0, 494, 398, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 322, 247, 324, 202, 410, 495, + 285, 0, 0, 0, 0, 0, 194, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 237, 0, 0, 244, + 0, 0, 0, 348, 357, 356, 337, 338, 340, 342, + 347, 354, 360, 0, 0, 602, 0, 0, 0, 264, + 320, 271, 263, 575, 0, 0, 0, 0, 0, 0, + 0, 0, 228, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -7104,80 +7222,81 @@ var yyAct = [...]int{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 296, 0, 397, - 256, 0, 448, 0, 0, 0, 616, 0, 0, 0, - 0, 0, 0, 0, 361, 0, 328, 197, 224, 0, - 0, 407, 456, 468, 0, 0, 0, 252, 0, 466, - 421, 594, 232, 283, 453, 427, 464, 435, 286, 0, - 0, 465, 368, 577, 445, 591, 617, 618, 262, 401, - 603, 514, 611, 635, 225, 259, 415, 499, 597, 488, - 393, 573, 574, 327, 487, 294, 201, 365, 623, 223, - 474, 367, 241, 230, 579, 600, 288, 451, 630, 212, - 509, 589, 238, 478, 0, 0, 638, 246, 498, 214, - 586, 497, 389, 324, 325, 213, 0, 452, 267, 292, - 0, 0, 257, 410, 581, 582, 255, 639, 227, 610, - 219, 0, 609, 403, 576, 587, 390, 379, 218, 585, - 388, 378, 332, 351, 352, 279, 305, 442, 371, 443, - 304, 306, 399, 398, 400, 206, 598, 0, 207, 0, - 493, 599, 640, 447, 211, 233, 234, 236, 0, 278, - 282, 290, 293, 301, 302, 311, 363, 414, 441, 437, - 446, 0, 571, 592, 604, 615, 621, 622, 624, 625, - 626, 627, 628, 631, 629, 402, 309, 489, 331, 369, - 0, 0, 420, 467, 239, 596, 490, 199, 0, 0, - 0, 0, 253, 254, 0, 567, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 641, 642, 643, 644, 645, - 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, - 656, 657, 658, 636, 500, 506, 501, 502, 503, 504, - 505, 0, 507, 0, 0, 0, 0, 0, 0, 583, - 584, 659, 380, 480, 593, 333, 345, 348, 338, 357, - 0, 358, 334, 335, 340, 342, 343, 344, 349, 350, - 354, 360, 248, 209, 386, 394, 570, 310, 215, 216, - 217, 516, 517, 518, 519, 607, 608, 612, 204, 457, - 458, 459, 460, 291, 602, 307, 463, 462, 329, 330, - 375, 444, 532, 534, 545, 549, 551, 553, 559, 562, - 533, 535, 546, 550, 552, 554, 560, 563, 522, 524, - 526, 528, 541, 540, 537, 565, 566, 543, 548, 527, - 539, 544, 557, 564, 561, 521, 525, 529, 538, 556, - 555, 536, 547, 558, 542, 530, 523, 531, 0, 196, - 220, 364, 0, 449, 287, 637, 606, 601, 205, 222, - 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 296, + 0, 399, 256, 0, 450, 0, 0, 0, 620, 0, + 0, 0, 0, 0, 0, 0, 362, 0, 329, 197, + 224, 0, 0, 409, 458, 470, 0, 0, 0, 252, + 0, 468, 423, 597, 232, 283, 455, 429, 466, 437, + 286, 0, 0, 467, 369, 580, 447, 594, 621, 622, + 262, 403, 607, 517, 615, 639, 225, 259, 417, 502, + 600, 491, 394, 576, 577, 328, 490, 294, 201, 366, + 627, 223, 476, 368, 241, 230, 582, 604, 298, 288, + 453, 634, 212, 512, 592, 238, 480, 0, 0, 642, + 246, 501, 214, 589, 500, 390, 325, 326, 213, 0, + 454, 267, 292, 0, 0, 257, 412, 584, 585, 255, + 643, 227, 614, 219, 0, 613, 405, 579, 590, 391, + 380, 218, 588, 389, 379, 333, 352, 353, 279, 306, + 444, 372, 445, 305, 307, 401, 400, 402, 206, 601, + 0, 207, 0, 496, 603, 644, 449, 211, 233, 234, + 236, 0, 278, 282, 290, 293, 302, 303, 312, 364, + 416, 443, 439, 448, 0, 574, 595, 608, 619, 625, + 626, 628, 629, 630, 631, 632, 635, 633, 404, 310, + 492, 332, 370, 0, 0, 422, 469, 239, 599, 493, + 199, 0, 0, 0, 0, 253, 254, 0, 570, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 662, 640, 503, 509, 504, + 505, 506, 507, 508, 0, 510, 0, 0, 0, 0, + 0, 395, 0, 586, 587, 663, 381, 483, 596, 334, + 346, 349, 339, 358, 0, 359, 335, 336, 341, 343, + 344, 345, 350, 351, 355, 361, 248, 209, 387, 396, + 573, 311, 215, 216, 217, 519, 520, 521, 522, 611, + 612, 616, 204, 459, 460, 461, 462, 291, 606, 308, + 465, 464, 330, 331, 376, 446, 535, 537, 548, 552, + 554, 556, 562, 565, 536, 538, 549, 553, 555, 557, + 563, 566, 525, 527, 529, 531, 544, 543, 540, 568, + 569, 546, 551, 530, 542, 547, 560, 567, 564, 524, + 528, 532, 541, 559, 558, 539, 550, 561, 545, 533, + 526, 534, 0, 196, 220, 365, 0, 451, 287, 641, + 610, 481, 605, 205, 222, 0, 261, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 198, 200, 208, 221, 231, 235, 242, - 260, 275, 277, 284, 297, 308, 316, 317, 320, 326, - 376, 382, 383, 384, 385, 404, 405, 406, 409, 412, - 413, 416, 418, 419, 422, 426, 430, 431, 432, 434, - 436, 438, 450, 455, 469, 470, 471, 472, 473, 476, - 477, 482, 483, 484, 485, 486, 494, 495, 508, 578, - 580, 595, 613, 619, 475, 299, 300, 439, 440, 312, - 313, 633, 634, 298, 590, 620, 588, 632, 614, 433, - 374, 0, 0, 377, 280, 303, 318, 0, 605, 496, - 226, 461, 289, 250, 0, 0, 210, 245, 229, 258, - 273, 276, 322, 387, 395, 424, 429, 295, 270, 243, - 454, 240, 479, 511, 512, 513, 515, 391, 265, 428, - 0, 0, 372, 568, 569, 314, + 0, 0, 0, 0, 0, 0, 0, 0, 198, 200, + 208, 221, 231, 235, 242, 260, 275, 277, 284, 297, + 309, 317, 318, 321, 327, 377, 383, 384, 385, 386, + 406, 407, 408, 411, 414, 415, 418, 420, 421, 424, + 428, 432, 433, 434, 436, 438, 440, 452, 457, 471, + 472, 473, 474, 475, 478, 479, 485, 486, 487, 488, + 489, 497, 498, 511, 581, 583, 598, 617, 623, 477, + 300, 301, 441, 442, 313, 314, 637, 638, 299, 593, + 624, 591, 636, 618, 435, 375, 0, 0, 378, 280, + 304, 319, 0, 609, 499, 226, 463, 289, 250, 0, + 0, 210, 245, 229, 258, 273, 276, 323, 388, 397, + 426, 431, 295, 270, 243, 456, 240, 482, 514, 515, + 516, 518, 392, 265, 430, 0, 0, 373, 571, 572, + 315, } var yyPact = [...]int{ - -1000, -1000, 1319, -1000, -532, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 1962, -1000, -533, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 2401, 2519, -1000, -1000, -1000, -1000, 2569, -1000, 1002, - 2074, -1000, 2363, 4957, -1000, 54244, 500, -1000, 51356, -436, - 860, 234, 35472, -1000, 207, -1000, 193, 52800, 199, -1000, - -1000, -1000, -1000, -436, 21030, 2277, 56, 52, 54244, -1000, - -1000, -1000, -1000, -353, 2542, 2069, -1000, 408, -1000, -1000, - -1000, -1000, -1000, -1000, 50634, -1000, 1098, -1000, -1000, 2359, - 2349, 2577, 915, 2304, -1000, 2461, 2069, -1000, 21030, 2498, - 2432, 20308, 20308, 462, -1000, -1000, 268, -1000, -1000, 30418, - 54244, 38360, 890, -1000, 2363, -1000, -1000, -1000, 219, -1000, - 378, 1978, -1000, 1977, -1000, 469, 899, 392, 871, 868, - 391, 389, 388, 379, 377, 376, 375, 369, 398, -1000, - 938, 938, -218, -219, 361, 450, 448, 448, 1108, 479, - 2320, 2316, -1000, -1000, 938, 938, 938, 396, 938, 938, - 938, 938, 321, 320, 938, 938, 938, 938, 938, 938, - 938, 938, 938, 938, 938, 938, 938, 938, 938, 938, - 938, 902, 2363, 300, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 2398, 2471, -1000, -1000, -1000, -1000, 2579, -1000, 1052, + 2098, -1000, 2371, 6321, -1000, 55304, 770, -1000, 52396, -441, + 885, 254, 36402, -1000, 201, -1000, 194, 53850, 197, -1000, + -1000, -1000, -1000, -441, 21862, 2321, 72, 62, 55304, -1000, + -1000, -1000, -1000, -361, 2549, 2050, -1000, 378, -1000, -1000, + -1000, -1000, -1000, -1000, 51669, -1000, 1203, -1000, -1000, 2377, + 2359, 2309, 942, 2322, -1000, 2492, 2050, -1000, 21862, 2532, + 2449, 21135, 21135, 449, -1000, -1000, 276, -1000, -1000, 31313, + 55304, 39310, 287, -1000, 2371, -1000, -1000, -1000, 216, -1000, + 334, 1978, -1000, 1973, -1000, 919, 1089, 396, 863, 474, + 395, 394, 380, 379, 377, 370, 365, 359, 354, -1000, + 991, 991, -204, -210, 388, 467, 445, 445, 1100, 498, + 2345, 2344, -1000, -1000, 991, 991, 991, 349, 991, 991, + 991, 991, 303, 298, 991, 991, 991, 991, 991, 991, + 991, 991, 991, 991, 991, 991, 991, 991, 991, 991, + 991, 897, 2371, 284, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -7224,66 +7343,68 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 54244, 210, 54244, -1000, 809, 499, -1000, -1000, -440, 1090, - 1090, 122, 1090, 1090, 1090, 1090, 184, 973, 49, -1000, - 182, 284, 197, 293, 1079, 183, -1000, -1000, 258, 1079, - 1806, -1000, 923, 281, 163, -1000, 1090, 1090, -1000, 13785, - 209, 13785, 13785, -1000, 2345, -1000, -1000, -1000, -1000, -1000, - 1333, -1000, -1000, -1000, -1000, -18, 478, -1000, -1000, -1000, - -1000, 52800, 49912, 290, -1000, -1000, 769, 1852, 1371, 21030, - 1291, 913, -1000, -1000, 1976, 876, -1000, -1000, -1000, -1000, - -1000, 800, -1000, 23196, 23196, 23196, 23196, -1000, -1000, 1983, - 49190, 1983, 1983, 23196, 1983, 23196, 1983, 1983, 1983, 21030, - 1983, 1983, 1983, 1983, -1000, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, -1000, -1000, -1000, -1000, 1983, 808, 1983, 1983, - 1983, 1983, 1983, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 1983, 1983, 1983, 1983, 1983, 1983, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, -1000, + -1000, -1000, -1000, -1000, 55304, 217, 55304, -1000, 838, 767, + -1000, -1000, -445, 1108, 1108, 108, 1108, 1108, 1108, 1108, + 196, 963, 56, -1000, 191, 272, 188, 279, 1092, 206, + -1000, -1000, 268, 1092, 1855, -1000, 948, 278, 168, -1000, + 1108, 1108, -1000, 14568, 236, 14568, 14568, -1000, 2366, -1000, + -1000, -1000, -1000, -1000, 1360, -1000, -1000, -1000, -1000, -29, + 495, -1000, -1000, -1000, -1000, 53850, 50942, 235, -1000, -1000, + 343, 1652, 1400, 21862, 1459, 926, -1000, -1000, 1308, 902, + -1000, -1000, -1000, -1000, -1000, 800, -1000, 24043, 24043, 24043, + 24043, -1000, -1000, 1981, 50215, 1981, 1981, 24043, 1981, 24043, + 1981, 1981, 1981, 1981, 21862, 1981, 1981, 1981, 1981, -1000, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, -1000, -1000, -1000, + -1000, 1981, 837, 1981, 1981, 1981, 1981, 1981, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 1981, 1981, 1981, 1981, 1981, + 1981, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, -1000, -1000, -1000, 1758, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 26951, 1651, 1647, 1607, -1000, + 18954, 1981, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 26084, 1500, 1498, 1493, -1000, 18142, 1983, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 55304, -1000, 1981, 226, 53850, 53850, 400, + 1371, -1000, -1000, 2492, 2050, -1000, 2549, 2514, 378, -1000, + 3145, 1622, 1632, 1576, 2050, 1956, 55304, -1000, 2000, -1000, + -1000, -1000, -328, -371, 2199, 1606, 1854, -1000, -1000, -1000, + -1000, 2465, 21862, -1000, -1000, 2570, -1000, 28405, 836, 2568, + 49488, -1000, 449, 449, 1970, 430, 14, -1000, -1000, -1000, + -1000, 1020, 35675, -1000, -1000, -1000, -1000, -1000, 1848, 55304, + -1000, -1000, 4160, 53850, -1000, 2097, -1000, 1844, -1000, 2024, + 21862, 2072, 760, 53850, 515, 510, 476, -1000, -56, -1000, + -1000, -1000, -1000, -1000, -1000, 991, 991, 991, -1000, 308, + 2531, 6321, 7041, -1000, -1000, -1000, 48761, 2096, 53850, -1000, + 2093, -1000, 1124, 835, 874, 874, 53850, -1000, -1000, 54577, + 53850, 1114, 1113, 53850, 53850, 53850, 53850, -1000, 48034, -1000, + 47307, 46580, 1370, 53850, 45853, 45126, 44399, 43672, 42945, -1000, + 2219, -1000, 2057, -1000, -1000, -1000, 54577, 53850, 53850, 54577, + 53850, 54577, 55304, 53850, -1000, -1000, 401, -1000, -1000, 1366, + 1364, 1363, 991, 991, 1350, 1836, 1835, 1833, 991, 991, + 1342, 1832, 37856, 1825, 282, 1329, 1328, 1310, 1510, 1816, + 193, 1812, 1377, 1355, 1309, 53850, 2091, 55304, -1000, 257, + 1043, 953, 1011, 2371, 2319, 1967, 491, 759, 53850, 450, + 450, 53850, -1000, 15301, 55304, 255, -1000, 1808, 21862, -1000, + 1103, 1092, 1092, -1000, -1000, -1000, -1000, -1000, -1000, 1108, + 55304, 1103, -1000, -1000, -1000, 1092, 1108, 55304, 1108, 1108, + 1108, 1108, 1092, 1092, 1092, 1108, 55304, 55304, 55304, 55304, + 55304, 55304, 55304, 55304, 55304, 14568, 948, 1108, -446, -1000, + 1802, -1000, -1000, -1000, 2194, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54244, -1000, 1983, - 218, 52800, 52800, 397, 1334, -1000, -1000, 2461, 2069, -1000, - 2542, 2477, 408, -1000, 3785, 1628, 1721, 1532, 2069, 1956, - 54244, -1000, 1992, -1000, -1000, -1000, -1000, 2207, 1524, 1804, - -1000, -1000, -1000, -1000, 1907, 21030, -1000, -1000, 2557, -1000, - 27529, 805, 2554, 48468, -1000, 462, 462, 1975, 430, 22, - -1000, -1000, -1000, -1000, 962, 34750, -1000, -1000, -1000, -1000, - -1000, 1902, 54244, -1000, -1000, 4793, 1173, -1000, 2071, -1000, - 1842, -1000, 2014, 21030, 2080, 498, 1173, 491, 490, 489, - -1000, -64, -1000, -1000, -1000, -1000, -1000, -1000, 938, 938, - 938, -1000, 343, 2495, 4957, 6237, -1000, -1000, -1000, 47746, - 2067, 1173, -1000, 2053, -1000, 1041, 859, 870, 870, 1173, - -1000, -1000, 53522, 1173, 1040, 1031, 1173, 1173, 52800, 52800, - -1000, 47024, -1000, 46302, 45580, 1331, 52800, 44858, 44136, 43414, - 42692, 41970, -1000, 2330, -1000, 2133, -1000, -1000, -1000, 53522, - 1173, 1173, 53522, 52800, 53522, 54244, 1173, -1000, -1000, 360, - -1000, -1000, 1330, 1328, 1327, 938, 938, 1326, 1800, 1798, - 1787, 938, 938, 1295, 1782, 36916, 1765, 286, 1293, 1292, - 1289, 1290, 1746, 229, 1719, 1281, 1230, 1288, 52800, 2036, - 54244, -1000, 254, 945, 994, 958, 2363, 2276, 1967, 476, - 494, 1173, 451, 451, 52800, -1000, 14513, 54244, 217, -1000, - 1707, 21030, -1000, 1080, 1079, 1079, -1000, -1000, -1000, -1000, - -1000, -1000, 1090, 54244, 1080, -1000, -1000, -1000, 1079, 1090, - 54244, 1090, 1090, 1090, 1090, 1079, 1079, 1079, 1090, 54244, - 54244, 54244, 54244, 54244, 54244, 54244, 54244, 54244, 13785, 923, - 1090, -441, -1000, 1672, -1000, -1000, 2171, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -7298,328 +7419,333 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 14568, 14568, -1000, -1000, -1000, -1000, -1000, 1964, + -1000, 189, 23, 195, -1000, 42218, 541, 1007, -1000, 541, + -1000, -1000, -1000, 1963, 41491, -1000, -449, -455, -457, -459, + -1000, -1000, -1000, -463, -465, -1000, -1000, -1000, 21862, 21862, + 21862, 21862, -251, -1000, 1323, 24043, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 21862, 249, 1159, 24043, 24043, 24043, 24043, + 24043, 24043, 24043, 25497, 24770, 24043, 24043, 24043, 24043, 24043, + 24043, -1000, -1000, 33494, 5084, 5084, 902, 902, 902, 902, + -1000, -168, 1960, 54577, -1000, -1000, -1000, 833, 21862, 21862, + 902, -1000, 1426, 1991, 18954, 21862, 21862, 21862, 21862, 1050, + 1400, 54577, 21862, -1000, 1576, -1000, -1000, -1000, -1000, 1341, + -1000, -1000, 1127, 2353, 2353, 2353, 2353, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 2353, 21862, 264, + 264, 734, 21862, 21862, 21862, 21862, 21862, 21862, 17500, 21862, + 21862, 24043, 21862, 21862, 21862, 1576, 21862, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 1576, 21862, 1532, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 16767, 21862, 21862, 21862, 21862, + 21862, -1000, -1000, -1000, -1000, -1000, -1000, 21862, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 1576, 21862, 21862, 21862, 21862, + 21862, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 1621, 1633, 1575, 21862, -1000, 1957, -1000, -186, + 30586, 21862, 1796, 2566, 2071, 53850, -1000, -1000, -1000, -1000, + 2492, -1000, 2492, 1621, 3097, 2254, 21135, -1000, -1000, 3097, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1801, + -1000, 55304, 1956, 2445, 53850, -1000, -309, -1000, -311, 2250, + 1795, 955, -1000, 21862, 21862, 1954, -1000, 1770, 55304, -1000, + -251, -1000, 40764, -1000, -1000, 13835, 55304, 352, 55304, -1000, + 29859, 40037, 331, -1000, 14, 1926, -1000, 20, 21, 18227, + 901, -1000, -1000, -1000, 388, 26224, 1880, 901, 118, -1000, + -1000, -1000, 2024, -1000, 2024, 2024, 2024, 2024, 955, 955, + 955, 955, -1000, -1000, -1000, -1000, -1000, 2087, 2065, -1000, + 2024, 2024, 2024, 2024, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 13785, 13785, -1000, -1000, -1000, -1000, - -1000, 1966, -1000, 189, 26, 196, -1000, 41248, 480, 957, - -1000, 480, -1000, -1000, -1000, 1964, 40526, -1000, -445, -446, - -447, -450, -1000, -1000, -1000, -451, -453, -1000, -1000, -1000, - 21030, 21030, 21030, 21030, -268, -1000, 1216, 23196, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 21030, 224, 997, 23196, 23196, - 23196, 23196, 23196, 23196, 23196, 24640, 23918, 23196, 23196, 23196, - 23196, 23196, 23196, -1000, -1000, 32584, 5973, 5973, 876, 876, - 876, 876, -1000, -175, 1963, 53522, -1000, -1000, -1000, 798, - 21030, 21030, 876, -1000, 1173, 2944, 18142, 20308, 20308, 21030, - 967, 1371, 53522, 21030, -1000, 1532, -1000, -1000, -1000, -1000, - 1207, -1000, -1000, 1093, 2354, 2354, 2354, 2354, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 2354, 21030, - 1270, 1270, 833, 21030, 21030, 21030, 21030, 21030, 21030, 16697, - 21030, 21030, 23196, 21030, 21030, 21030, 1532, 21030, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 1532, 21030, 1536, 21030, - 21030, 21030, 21030, 21030, 21030, 20308, 15969, 20308, 20308, 20308, - 20308, 20308, -1000, -1000, -1000, -1000, -1000, -1000, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 1532, 21030, 21030, 21030, - 21030, 21030, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 1568, 1629, 1527, 21030, -1000, 1960, -1000, -184, - 29696, 21030, 1658, 2551, 2105, 52800, -1000, -1000, -1000, -1000, - 2461, -1000, 2461, 1568, 3572, 2218, 20308, -1000, -1000, 3572, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1667, -1000, - 54244, 1956, 2407, 52800, 2222, 1655, 357, -1000, 21030, 21030, - 1955, -1000, 1799, 54244, -1000, -268, -1000, 39804, -1000, -1000, - 13057, 54244, 346, 54244, -1000, 28974, 39082, 265, -1000, 22, - 1936, -1000, 25, 17, 17419, 864, -1000, -1000, -1000, 361, - 25362, 1737, 864, 103, -1000, -1000, -1000, 2014, -1000, 2014, - 2014, 2014, 2014, 357, 357, 357, 357, -1000, -1000, -1000, - -1000, -1000, 2034, 2032, -1000, 2014, 2014, 2014, 2014, -1000, + -1000, 2061, 2061, 2061, 2048, 2048, 2026, 2026, 427, -1000, + 21862, 486, 39310, 2427, 1304, 2659, 257, 455, 2066, 53850, + 53850, 53850, 455, -1000, 1486, 1484, 1446, -1000, -524, 1948, + -1000, -1000, 2524, -1000, -1000, 906, 1168, 1164, 894, 53850, + 232, 322, -1000, 440, -1000, 39310, 53850, 1098, 874, 53850, + -1000, 53850, -1000, -1000, -1000, -1000, -1000, 53850, -1000, -1000, + 1944, -1000, 1979, 1198, 1152, 1182, 1144, 1944, -1000, -1000, + -173, 1944, -1000, 1944, -1000, 1944, -1000, 1944, -1000, 1944, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 985, + 336, -369, 53850, 232, 480, -1000, 469, 33494, -1000, -1000, + -1000, 33494, 33494, -1000, -1000, -1000, -1000, 1791, 1783, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 2031, 2031, 2031, 2029, - 2029, 2015, 2015, 435, -1000, 21030, 416, 38360, 2419, 1282, - 1219, 254, 453, 2103, 1173, 1173, 1173, 453, -1000, 1425, - 1398, 1383, -1000, -518, 1950, -1000, -1000, 2493, -1000, -1000, - 960, 1057, 1056, 1117, 52800, 236, 334, -1000, 431, -1000, - 38360, 1173, 1028, 870, 1173, -1000, 1173, -1000, -1000, -1000, - -1000, -1000, 1173, -1000, -1000, 1949, -1000, 1845, 1168, 1055, - 1131, 1054, 1949, -1000, -1000, -182, 1949, -1000, 1949, -1000, - 1949, -1000, 1949, -1000, 1949, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 981, 304, -307, 52800, 236, 470, - -1000, 468, 32584, -1000, -1000, -1000, 32584, 32584, -1000, -1000, - -1000, -1000, 1637, 1625, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -508, 55304, + -1000, 252, 1004, 306, 310, 314, 55304, 375, 2480, 2479, + 2475, 2472, 2464, 2456, 248, 297, 55304, 55304, 450, 2168, + 55304, 2400, 55304, -1000, -1000, -1000, -1000, -1000, 1772, 1763, + -1000, 1400, 55304, -1000, -1000, 1108, 1108, -1000, -1000, 55304, + 1108, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1108, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -502, 54244, -1000, 248, 956, 324, 335, - 353, 54244, 352, 2439, 2434, 2427, 2418, 2414, 305, 316, - 54244, 54244, 451, 2157, 54244, 2377, 54244, -1000, -1000, -1000, - -1000, -1000, 1622, 1620, -1000, 1371, 54244, -1000, -1000, 1090, - 1090, -1000, -1000, 54244, 1090, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 1090, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 54244, - -1000, -1000, -1000, -1000, -18, 187, -1000, -1000, 52800, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -104, -1000, - 801, 15, 409, -1000, -1000, -1000, -1000, -1000, 2457, -1000, - 1371, 1009, 1006, -1000, 1983, -1000, -1000, 1069, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 224, 23196, 23196, 23196, - 1598, 828, 1429, 1392, 1229, 1239, 1239, 929, 23196, 929, - 23196, 863, 863, 863, 863, 863, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 1616, -1000, 1983, 53522, 1817, 15969, - 1952, 2163, 1532, 908, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 55304, -1000, -1000, -1000, -1000, + -29, 187, -1000, -1000, 53850, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -108, -1000, 717, 26, 383, -1000, + -1000, -1000, -1000, -1000, 2489, -1000, 1400, 1077, 1078, -1000, + 1981, -1000, -1000, 1213, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 249, 24043, 24043, 24043, 1396, 840, + 1416, 1427, 1332, 1223, 1223, 1205, 24043, 1205, 24043, 907, + 907, 907, 907, 907, -1000, -1000, -1000, -1000, -1000, -1000, + 1758, -1000, 1753, -1000, 1981, 54577, 1790, 16767, 1405, 1965, + 1576, 921, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 4134, 1813, -1000, 1813, 1503, 971, - -1000, 21030, 1532, 4125, -1000, -1000, 1532, 1532, 21030, -1000, - -1000, 21030, 21030, 21030, 21030, 1219, 1219, 1219, 1219, 1219, - 1219, 1219, 1219, 1219, 1219, 21030, 1219, 1948, -1000, -1000, + -1000, -1000, 3864, 1576, 1652, 1576, 1276, 3853, 1058, -1000, + 21862, 1576, 3849, -1000, -1000, 1576, 1576, 21862, -1000, -1000, + 21862, 21862, 21862, 21862, 2659, 2659, 2659, 2659, 2659, 2659, + 2659, 2659, 2659, 2659, 21862, 2659, 1938, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 1943, 2541, 1314, 1219, - 1219, 1219, 1219, 1219, 21030, 2187, -1000, -1000, -1000, 1534, - 4120, 1369, 4115, 1219, 1219, -1000, 1219, 4111, 4092, 1532, - 1852, 2876, 2871, 1219, 1219, 1219, 1219, 1219, 2863, 2802, - 1219, 1219, 2756, 1219, 3876, 1219, 2721, 2680, 2675, 2633, - 2619, 2614, 2603, 2597, 2555, 2540, 2533, 2528, 2510, 2506, - 2491, 2469, 2435, 2428, 1219, 1219, 1219, 3844, 1219, 3819, - 1219, 3812, 1219, 1219, 3807, 2412, 2403, 1532, 1942, -1000, - 3803, 1219, 3525, 3514, 3476, 2399, 3448, 3439, 3434, 1219, - 1219, 1219, 2329, 3426, 3420, 3416, 3406, 3402, 3377, 3369, - 3360, 3346, 1219, 1527, 1527, 1527, 1527, 1527, 3335, -271, - 1219, 1532, -1000, -1000, -1000, -1000, -1000, 3331, 2324, 3320, - 3316, 3289, 3275, 1532, 1938, 1983, 795, -1000, -1000, 1813, - 1532, 1532, 1813, 1813, 3206, 3190, 3017, 2996, 2959, 2939, - 1219, 1219, -1000, 1219, 2908, 2902, 2312, 2303, 1532, -1000, - 1527, 54244, -1000, -431, -1000, -7, 936, 1983, -1000, 36916, - 1532, -1000, 5074, -1000, 1233, -1000, -1000, -1000, -1000, -1000, - 34028, 1951, 3572, -1000, -1000, 1983, 1776, -1000, -1000, 357, - 90, 33306, 857, 857, 128, 1371, 1371, 21030, -1000, -1000, - -1000, -1000, -1000, -1000, 792, 2512, 400, 1983, -1000, 1979, - 3285, -1000, -1000, -1000, 2402, 26807, -1000, -1000, 1983, 1983, - 54244, 1937, 1931, -1000, 790, -1000, 1255, 1936, 22, 8, - -1000, -1000, -1000, -1000, 1371, -1000, 1348, 356, 341, -1000, - 438, -1000, -1000, -1000, -1000, 2290, 92, -1000, -1000, -1000, - 365, 357, -1000, -1000, -1000, -1000, -1000, -1000, 1611, 1611, - -1000, -1000, -1000, -1000, -1000, 1280, -1000, -1000, -1000, -1000, - 1278, -1000, -1000, 1271, -1000, -1000, 2641, 2129, 416, -1000, - -1000, 938, 1609, -1000, -1000, 2305, 938, 938, 52800, -1000, - -1000, 1722, 2419, 248, 54244, 986, 2156, -1000, 2103, 2103, - 2103, 54244, -1000, -1000, -1000, -1000, -1000, -1000, -504, 165, - 618, -1000, -1000, -1000, 2008, 52800, 1758, -1000, 232, -1000, - 1718, -1000, 52800, -1000, 1742, 2028, 1173, 1173, -1000, -1000, - -1000, 52800, 1983, -1000, -1000, -1000, -1000, 493, 2358, 297, - -1000, -1000, -290, -1000, -1000, 236, 232, 53522, 1173, 864, - -1000, -1000, -1000, -1000, -1000, -505, 1734, 481, 239, 329, - 54244, 54244, 54244, 54244, 54244, 54244, 512, -1000, -1000, 35, - -1000, -1000, 215, -1000, -1000, -1000, -1000, 215, -1000, -1000, - -1000, -1000, 307, 466, -1000, 54244, 54244, 914, -1000, -1000, - -1000, -1000, -1000, 1079, -1000, -1000, 1079, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2343, - 54244, 10, -471, -1000, -468, 21030, -1000, -1000, -1000, -1000, - 1312, 496, 1429, 23196, 23196, 2944, 2944, 23196, -1000, -1000, - -1000, 350, 350, 32584, -1000, 23196, 21030, 20308, -1000, -1000, - 21030, 21030, 961, -1000, 21030, 1167, -1000, 21030, -1000, -1000, - 1527, 1219, 1219, 1219, 1219, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 1896, -1000, 21030, 21030, 21030, - 1532, 312, -1000, -1000, -1000, -1000, -1000, 2538, -1000, 21030, - -1000, 32584, 21030, 21030, 21030, -1000, -1000, -1000, 21030, 21030, - -1000, -1000, 21030, -1000, 21030, -1000, -1000, -1000, -1000, -1000, - -1000, 21030, -1000, 21030, -1000, -1000, -1000, 21030, -1000, 21030, - -1000, -1000, 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, - 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, - 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, - 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, -1000, -1000, - 21030, -1000, 21030, -1000, 21030, -1000, -1000, 21030, -1000, 21030, - -1000, 21030, -1000, 21030, 21030, -1000, 21030, 21030, 21030, -1000, - 21030, 21030, 21030, 21030, -1000, -1000, -1000, -1000, 21030, 21030, - 21030, 21030, 21030, 21030, 21030, 21030, 21030, 21030, -1000, -1000, - -1000, -1000, -1000, -1000, 21030, -1000, 38360, 23, -271, 1536, - 23, 1536, 22474, 813, 811, 21752, -1000, 20308, 15241, -1000, - -1000, -1000, -1000, -1000, 21030, 21030, 21030, 21030, 21030, 21030, - -1000, -1000, -1000, 21030, 21030, -1000, 21030, -1000, 21030, -1000, - -1000, -1000, -1000, -1000, 936, -1000, 870, 870, 870, 52800, - -1000, -1000, -1000, -1000, 1932, -1000, 2438, -1000, 2238, 2235, - 2536, 2512, -1000, 28974, 3572, -1000, -1000, 52800, -423, -1000, - 2270, 2357, 857, 857, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 12329, 2461, 21030, 2153, 53522, 247, -1000, 28252, 52800, - 53522, 28974, 28974, 28974, 28974, 28974, -1000, 2196, 2195, -1000, - 2225, 2215, 2318, 54244, -1000, 1568, 1732, -1000, 21030, 31140, - 1922, 28974, -1000, -1000, 28974, 54244, 11601, -1000, -1000, 2, - -13, -1000, -1000, -1000, -1000, 361, -1000, -1000, 1550, 2400, - 2282, -1000, -1000, -1000, -1000, -1000, 1728, -1000, 1717, 1926, - 1711, 1704, 304, -1000, 2047, 2326, 938, 938, -1000, 1254, - -1000, 1173, 1599, 1593, -1000, -1000, -1000, 472, -1000, 2375, - 54244, 2151, 2150, 2149, -1000, -515, 1251, 2026, 1982, 21030, - 2024, 2492, 1882, 52800, -1000, -1000, 53522, -1000, 294, -1000, - 416, 52800, -1000, -1000, -1000, 334, 54244, -1000, 8486, -1000, - -1000, -1000, 232, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 54244, 271, -1000, 2020, 1274, -1000, -1000, 2088, -1000, -1000, - -1000, -1000, -1000, 216, 190, 1566, 213, 1560, 213, -1000, - 54244, 911, 2129, 54244, -1000, -1000, -1000, 1090, 1090, -1000, - -1000, 2325, -1000, 1173, 1219, 23196, 23196, -1000, 876, -1000, - -1000, 384, -248, 2014, 2014, -1000, 2014, 2015, -1000, 2014, - 174, 2014, 172, 2014, -1000, -1000, 1532, 1532, -1000, 1527, - -1000, 2287, 1181, -1000, 1371, 21030, 2889, -1000, -1000, -1000, - -1000, -1000, -71, 2842, 2833, 1219, -1000, 2013, 2012, 21030, - 1219, 1532, 2279, 1219, 1219, 1219, 1219, 1219, 1219, 1219, - 1219, 1219, 1219, 1219, 1219, 2231, 2221, 2211, 2206, 2201, - 2192, 2183, 2172, 2124, 2110, 2104, 2064, 2037, 2017, 1946, - 1940, 1219, 1219, 1934, 1219, 1919, 1859, -1000, 1371, 1527, - 2516, 1527, 1219, 1219, 2445, 313, 1219, 1702, 1702, 1702, - 1702, 1702, 1527, 1527, 1527, 1527, 1219, 52800, -1000, -271, - -1000, -1000, -310, -311, -1000, 1532, -271, 1918, 23196, 1219, - 23196, 23196, 23196, 1219, 1532, -1000, 1846, 1825, 2081, 1815, - 1219, 2042, 1219, 1219, 1219, 1811, -1000, 2447, 2447, 2447, - 1696, 1233, 54244, -1000, -1000, -1000, -1000, 2512, 2505, 1899, - -1000, -1000, 90, 573, -1000, 2294, 2357, -1000, 2488, 2261, - 2486, -1000, -1000, -1000, -1000, -1000, 1371, -1000, 2350, 1916, - -1000, 955, 1857, -1000, -1000, 19586, 1698, 2226, 531, 1696, - 1894, 3285, 2111, 2143, 3000, -1000, -1000, -1000, -1000, 2178, - -1000, 2134, -1000, -1000, 1992, -1000, 1519, 346, 28974, 1778, - 1778, -1000, 525, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 1076, 8486, 2571, -1000, 1558, -1000, 1346, 205, 1247, -1000, - -1000, 938, 938, -1000, 1025, 1023, -1000, 54244, 2011, -1000, - 357, 1554, 357, 1245, -1000, -1000, 1210, -1000, -1000, -1000, - -1000, 1973, 2092, -1000, -1000, -1000, -1000, 54244, -1000, -1000, - 54244, 54244, 54244, 2010, 2485, -1000, 21030, 2009, 944, 2334, - 52800, 52800, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, 471, 938, -485, 311, 310, 938, 938, - 938, -526, -1000, -1000, 1691, 1687, -1000, -207, -1000, 21030, - -1000, -1000, -1000, -1000, -1000, 1273, 1273, 1500, 1498, 1493, - -1000, 1992, -1000, -1000, -1000, 1712, -1000, -1000, -193, 52800, - 52800, 52800, 52800, -1000, -1000, -1000, 1107, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 876, - 1532, 412, -195, 1532, -1000, -1000, 357, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21030, -1000, 21030, - -1000, 1371, 21030, 2461, 1461, 21030, 21030, -1000, 1198, 1172, - 1219, -1000, -1000, -1000, 21030, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21030, -1000, - 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, - 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, - 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, 21030, -1000, - -1000, 21030, -1000, -1000, -1000, 21030, -1000, 21030, -1000, 21030, - -1000, -1000, -1000, 21030, 303, 350, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1532, 344, -1000, - -1000, -1000, -1000, 2514, -1000, 1532, 21030, 2944, -1000, 2944, - 2944, 2944, -1000, -1000, -1000, 21030, -1000, 21030, 21030, -1000, - 21030, -1000, 21030, -1000, -1000, -1000, -1000, 21030, 1983, 2237, - 1983, 1983, 31140, -1000, -1000, 2505, 2471, 2483, 2247, 2249, - 2249, 2294, -1000, 2482, 2474, -1000, 1440, 2473, 1435, 999, - -1000, 53522, 21030, 247, -1000, 419, 52800, 247, 52800, -1000, - 2478, -1000, -1000, 21030, 2004, -1000, 21030, -1000, -1000, -1000, - -1000, 5973, 2512, 1778, -1000, -1000, 887, -1000, 21030, -1000, - 9314, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1422, - 1419, -1000, -1000, 1994, 21030, -1000, -1000, -1000, 1692, 1630, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1992, -1000, - -1000, -1000, -1000, 334, -510, 2089, 52800, 1171, -1000, 1671, - 1882, 330, 247, 1416, 938, 938, 938, 1161, 1155, 36916, - 1654, -1000, 52800, 413, -1000, 334, -1000, -229, -244, 1219, - -1000, -1000, 2394, -1000, -1000, 15241, -1000, -1000, 1988, 2096, - -1000, -1000, -1000, -1000, 2210, -178, -208, -1000, -1000, 1219, - 1219, 1250, 1532, -1000, 1219, 1219, 1577, 1542, -1000, 1219, - 1219, 1219, 1219, 1219, 1219, 1219, 1219, 1219, 1219, 1219, - 1219, 1219, 1219, 1219, 1219, 1219, 1219, 1219, 1219, 1527, - 1785, -1000, 303, 1532, 2131, -1000, -1000, 5973, -1000, -1000, - 2478, 2470, 23, -1000, -1000, 230, 23, 1371, 985, 1532, - 1532, 985, 1779, 1219, 1749, 1736, 1219, 1219, 31862, -1000, - 2464, 2448, 37638, 37638, 936, 2471, -280, 21030, 21030, 2243, - 1123, -1000, -1000, -1000, -1000, 1409, 1385, -1000, 1380, -1000, - 2563, -1000, 1371, -1000, 247, -1000, 523, 1857, -1000, 2461, - 1371, 52800, 1371, 76, 2478, -1000, 1219, -1000, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, 1983, - 1983, 1983, 1983, 1983, 1983, 1983, 1983, -1000, -1000, 52800, - 1837, -1000, -1000, 2389, 1634, 164, -1000, 1535, 1882, -1000, - -1000, 206, -1000, 21030, -1000, 36916, 1375, 1344, -1000, -1000, - -1000, -1000, -526, -1000, -1000, -1000, -1000, -1000, -1000, 408, - 1876, -1000, 934, 52800, 54244, -1000, 2202, -1000, -1000, -1000, - 21030, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 1935, 2564, 1313, 2659, 2659, + 2659, 2659, 2659, 21862, 2376, -1000, -1000, -1000, 1544, 3535, + 1356, 3531, 2659, 2659, -1000, 2659, 3521, 3516, 1576, 2644, + 2638, 2659, 2659, 2659, 2659, 2659, 2634, 2628, 2659, 2659, + 2621, 2659, 3499, 2659, 2614, 2596, 2577, 2545, 2539, 2534, + 2527, 2515, 2502, 2457, 2426, 2422, 2406, 2365, 2342, 2334, + 2323, 2274, 2659, 2659, 2659, 3489, 2659, 3484, 2659, 3472, + 2659, 2659, 3452, 2261, 2253, 1576, 1927, -1000, 3448, 2659, + 3435, 3427, 3421, 2243, 3411, 3388, 3377, 2659, 2659, 2659, + 2231, 3373, 3360, 3354, 3340, 3330, 3325, 3319, 3315, 3306, + 2659, 1575, 1575, 1575, 1575, 1575, 3302, -263, 2659, 1576, + -1000, -1000, -1000, -1000, -1000, 3293, 2217, 3229, 3087, 3064, + 3037, 1576, 1981, 832, -1000, -1000, 1575, 1576, 1576, 1575, + 1575, 3003, 2972, 2968, 2964, 2959, 2938, 2659, 2659, -1000, + 2659, 2929, 2907, 2174, 2166, 1576, -1000, 1575, 55304, -1000, + -440, -1000, -3, 956, 1981, -1000, 37856, 1576, -1000, 4361, + -1000, 1217, -1000, -1000, -1000, -1000, -1000, 34948, 1934, -1000, + -1000, -1000, -1000, 1981, 1779, -1000, -1000, -1000, -1000, 955, + 93, 34221, 884, 884, 127, 1400, 1400, 21862, -1000, -1000, + -1000, -1000, -1000, -1000, 831, 2555, 371, 1981, -1000, 1943, + 2676, -1000, -1000, -1000, 2444, 27678, -1000, -1000, 1981, 1981, + 55304, 1840, 1839, -1000, 826, -1000, 1430, 1926, 14, 46, + -1000, -1000, -1000, -1000, 1400, -1000, 1443, 355, 362, -1000, + 452, -1000, -1000, -1000, -1000, 2328, 105, -1000, -1000, -1000, + 374, 955, -1000, -1000, -1000, -1000, -1000, -1000, 1735, 1735, + -1000, -1000, -1000, -1000, -1000, 1292, -1000, -1000, -1000, -1000, + 1289, -1000, -1000, 1288, -1000, -1000, 2901, 2167, 486, -1000, + -1000, 991, 1733, -1000, -1000, 2330, 991, 991, 53850, -1000, + -1000, 1866, 2427, 252, 55304, 1055, 2165, -1000, 2066, 2066, + 2066, 55304, -1000, -1000, -1000, -1000, -1000, -1000, -510, 186, + 514, -1000, -1000, -1000, 5102, 53850, 1752, -1000, 230, -1000, + 1861, -1000, 53850, -1000, 1750, 2047, 53850, 53850, -1000, -1000, + -1000, 53850, 1981, -1000, -1000, -1000, -1000, 528, 2368, 329, + -1000, -1000, -288, -1000, -1000, 232, 230, 54577, 53850, 901, + -1000, -1000, -1000, -1000, -1000, -511, 1747, 459, 241, 544, + 55304, 55304, 55304, 55304, 55304, 55304, 790, -1000, -1000, 40, + -1000, -1000, 218, -1000, -1000, -1000, -1000, -1000, 218, -1000, + -1000, -1000, -1000, -1000, 286, 466, -1000, 55304, 55304, 959, + -1000, -1000, -1000, -1000, -1000, 1092, -1000, -1000, 1092, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2364, + 55304, 8, -478, -1000, -474, 21862, -1000, -1000, -1000, -1000, + 1321, 828, 1416, 24043, 24043, 1991, 1991, 24043, -1000, -1000, + -1000, 1023, 1023, 33494, -1000, 24043, 21862, -1000, -1000, 21862, + 21862, 21862, 1022, -1000, 21862, 1291, -1000, 21862, -1000, -263, + 1575, 2659, 2659, 2659, 2659, -263, -263, -263, -263, -263, + -263, -263, -263, -263, -263, 1915, -1000, 21862, 21862, 21862, + 1576, 330, -1000, -1000, -1000, -1000, -1000, 2563, -1000, 21862, + -1000, 33494, 21862, 21862, 21862, -1000, -1000, -1000, 21862, 21862, + -1000, -1000, 21862, -1000, 21862, -1000, -1000, -1000, -1000, -1000, + -1000, 21862, -1000, 21862, -1000, -1000, -1000, 21862, -1000, 21862, + -1000, -1000, 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, + 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, + 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, + 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, -1000, -1000, + 21862, -1000, 21862, -1000, 21862, -1000, -1000, 21862, -1000, 21862, + -1000, 21862, -1000, 21862, 21862, -1000, 21862, 21862, 21862, -1000, + 21862, 21862, 21862, 21862, -1000, -1000, -1000, -1000, 21862, 21862, + 21862, 21862, 21862, 21862, 21862, 21862, 21862, 21862, -1000, -1000, + -1000, -1000, -1000, -1000, 21862, -1000, 39310, 19, -263, 1532, + 19, 1532, 23316, 829, 791, 22589, -1000, 21862, 16034, -1000, + -1000, -1000, -1000, -1000, 21862, 21862, 21862, 21862, 21862, 21862, + -1000, -1000, -1000, 21862, 21862, -1000, 21862, -1000, 21862, -1000, + -1000, -1000, -1000, -1000, 956, -1000, 458, 453, 874, 53850, + -1000, -1000, -1000, -1000, 1904, -1000, 2461, -1000, 2270, 2266, + 2562, 2555, 21135, -1000, 29859, -1000, -1000, 53850, -429, -1000, + 2308, 2291, 884, 884, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 13102, 2492, 21862, 2163, 54577, 251, -1000, 29132, 53850, + 54577, 29859, 29859, 29859, 29859, 29859, -1000, 2200, 2198, -1000, + 2236, 2214, 2277, 55304, -1000, 1621, 1745, -1000, 21862, 32040, + 1860, 29859, -1000, -1000, 29859, 55304, 12369, -1000, -1000, 3, + -4, -1000, -1000, -1000, -1000, 388, -1000, -1000, 1174, 2443, + 2325, -1000, -1000, -1000, -1000, -1000, 1741, -1000, 1731, 1901, + 1726, 1722, 336, -1000, 2043, 2361, 991, 991, -1000, 1275, + -1000, 1426, 1714, 1698, -1000, -1000, -1000, 456, -1000, 2397, + 55304, 2156, 2155, 2154, -1000, -522, 1272, 2037, 1968, 21862, + 2031, 2521, 1882, 53850, -1000, -1000, 54577, -1000, 262, -1000, + 486, 53850, -1000, -1000, -1000, 322, 55304, -1000, 8317, -1000, + -1000, -1000, 230, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 55304, 256, -1000, 2030, 1312, -1000, -1000, 2004, -1000, -1000, + -1000, -1000, -1000, 229, 224, 1680, 207, 1676, 207, -1000, + 55304, 910, 2167, 55304, -1000, -1000, -1000, 1108, 1108, -1000, + -1000, 2356, -1000, 1426, 2659, 24043, 24043, -1000, 902, -1000, + -1000, 448, -226, 2024, 2024, -1000, 2024, 2026, -1000, 2024, + 170, 2024, 162, 2024, -1000, -1000, 1576, 1576, -1000, 1575, + 2150, 1557, 2892, -1000, 1400, 21862, 2888, -1000, -1000, -263, + -263, -263, -263, -263, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -63, 2884, 2864, 2659, -1000, 2023, + 2020, 21862, 2659, 1576, 2129, 2659, 2659, 2659, 2659, 2659, + 2659, 2659, 2659, 2659, 2659, 2659, 2659, 2125, 2109, 2100, + 2067, 2062, 2038, 2027, 2002, 1995, 1958, 1951, 1936, 1932, + 1928, 1913, 1863, 2659, 2659, 1819, 2659, 1815, 1799, -1000, + 1400, 1575, 2838, 1575, 2659, 2659, 2801, 295, 2659, 1712, + 1712, 1712, 1712, 1712, 1575, 1575, 1575, 1575, 2659, 53850, + -1000, -263, -1000, -1000, -366, -370, -1000, 1576, -263, 1897, + 24043, 2659, 24043, 24043, 24043, 2659, 1576, -1000, 1756, 1739, + 2796, 1728, 2659, 2781, 2659, 2659, 2659, 1665, -1000, 2484, + 1981, 2484, 1981, 2484, 1669, 1217, 55304, -1000, -1000, -1000, + -1000, 2555, 2541, -1000, 1887, -1000, 93, 615, -1000, 2303, + 2291, -1000, 2520, 2302, 2518, -1000, -1000, -1000, -1000, -1000, + 1400, -1000, 2383, 1905, -1000, 994, 1939, -1000, -1000, 20408, + 1694, 2265, 824, 1669, 1950, 2676, 2138, 2149, 3618, -1000, + -1000, -1000, -1000, 2191, -1000, 2160, -1000, -1000, 2000, -1000, + 2116, 352, 29859, 1930, 1930, -1000, 822, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 1183, 8317, 2592, -1000, 1666, -1000, + 1395, 205, 1266, -1000, -1000, 991, 991, -1000, 1096, 1087, + -1000, 55304, 2018, -1000, 955, 1657, 955, 1260, -1000, -1000, + 1254, -1000, -1000, -1000, -1000, 2046, 2216, -1000, -1000, -1000, + -1000, 55304, -1000, -1000, 55304, 55304, 55304, 2013, 2512, -1000, + 21862, 2011, 992, 2701, 53850, 53850, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 432, 991, -491, + 292, 288, 991, 991, 991, -523, -1000, -1000, 1662, 1655, + -1000, -202, -1000, 21862, -1000, -1000, -1000, -1000, -1000, 1418, + 1418, 1651, 1647, 1607, -1000, 2000, -1000, -1000, -1000, 1800, + -1000, -1000, -181, 53850, 53850, 53850, 53850, -1000, -1000, -1000, + 1242, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 902, 1576, 399, -194, 1576, -1000, -1000, + 955, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 21862, -1000, 21862, -1000, 21862, 1400, 21862, -1000, -1000, + -1000, -1000, -1000, 2492, 1588, 21862, 21862, -1000, 1251, 1234, + 2659, -1000, -1000, -1000, 21862, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21862, -1000, + 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, + 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, + 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, 21862, -1000, + -1000, 21862, -1000, -1000, -1000, 21862, -1000, 21862, -1000, 21862, + -1000, -1000, -1000, 21862, 312, 1023, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 1576, 350, -1000, + -1000, -1000, 2561, -1000, 1576, 21862, 1991, -1000, 1991, 1991, + 1991, -1000, -1000, -1000, 21862, -1000, 21862, 21862, -1000, 21862, + -1000, 21862, -1000, -1000, -1000, -1000, 21862, 1981, 2271, 38583, + 1981, 38583, 1981, 32040, -1000, -1000, 2541, 2546, 2511, 2279, + 2282, 2282, 2303, -1000, 2509, 2508, -1000, 1571, 2505, 1568, + 1086, -1000, 54577, 21862, -1000, 251, 37856, -1000, 393, 53850, + 251, 53850, -1000, 2540, -1000, -1000, 21862, 2010, -1000, 21862, + -1000, -1000, -1000, -1000, 5084, 2555, 1930, -1000, -1000, 918, + -1000, 21862, -1000, 10267, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, 1564, 1554, -1000, -1000, 2006, 21862, -1000, -1000, + -1000, 1757, 1732, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 2000, -1000, -1000, -1000, -1000, 322, -515, 2687, 53850, + 1231, -1000, 1644, 1882, 289, 251, 1541, 991, 991, 991, + 1218, 1215, 37856, 1641, -1000, 53850, 411, -1000, 322, -1000, + -211, -212, 2659, -1000, -1000, 2437, -1000, -1000, 16034, -1000, + -1000, 1998, 2036, -1000, -1000, -1000, -1000, 2241, -171, -198, + -1000, -1000, 2659, 2659, 2659, 2032, 1576, -1000, 2659, 2659, + 1723, 1663, -1000, 2659, 2659, 2659, 2659, 2659, 2659, 2659, + 2659, 2659, 2659, 2659, 2659, 2659, 2659, 2659, 2659, 2659, + 2659, 2659, 2659, 1575, 1650, -1000, 312, 1576, 2145, -1000, + -1000, 5084, -1000, -1000, 2540, 2504, 19, -1000, -1000, 237, + 19, 1400, 1028, 1576, 1576, 1028, 1646, 2659, 1635, 1631, + 2659, 2659, 32767, -1000, 2503, 2496, 1637, -1000, -1000, 38583, + 1637, 38583, 956, 2546, -270, 21862, 21862, 2275, 1238, -1000, + -1000, -1000, -1000, 1519, 1469, -1000, 1463, -1000, 2584, -1000, + 1400, -1000, 1981, 251, -1000, 811, 1939, -1000, 2492, 1400, + 53850, 1400, 90, 2540, -1000, 2659, -1000, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, 1981, + 1981, 1981, 1981, 1981, 1981, 1981, -1000, -1000, 53850, 2670, + -1000, -1000, 2436, 1639, 167, -1000, 1656, 1882, -1000, -1000, + 250, -1000, 21862, -1000, 37856, 1461, 1457, -1000, -1000, -1000, + -1000, -523, -1000, -1000, -1000, -1000, -1000, -1000, 378, 1817, + -1000, 974, 53850, 55304, -1000, 2229, -1000, -1000, -1000, -1000, + 21862, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21030, -1000, - 1532, 2126, -1000, -358, -1000, -486, 21030, -271, -1000, -1000, - -271, -1000, -1000, -1000, -1000, -1000, 21030, -1000, -1000, 21030, - -1000, 21030, -1000, -1000, 1576, -1000, -1000, -1000, -1000, -1000, - 1576, 1576, -1000, -280, -1000, 1861, -1000, 52800, 1371, 1852, - -1000, 1122, -1000, -1000, -1000, -1000, -1000, 53522, 1857, 52800, - -1000, 1541, 1532, 1983, 2461, -1000, 1537, -1000, 408, -1000, - 1986, 1982, -1000, -1000, -1000, 18864, -1000, -1000, -1000, -1000, - -1000, 267, -188, 15241, 10873, 1531, -1000, -187, 1219, 1527, - -1000, -461, -1000, -1000, -1000, -1000, 291, -1000, -1000, 1852, - -1000, -1000, 1607, 1565, 1379, 36194, -1000, -1000, -1000, -1000, - -280, -1000, -1000, 2382, -1000, -1000, 1751, -1000, -1000, 31140, - 52078, -1000, -172, 338, -188, 21030, 1985, 1532, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -37, -1000, -1000, 519, - -1000, -1000, -1000, 2088, -199, -1000, -1000, -1000, 318, -475, - -298, -299, 23196, -1000, 21030, -1000, 21030, -1000, 21030, -1000, - -1000, -1000, 52800, 1983, -1000, 1489, -1000, 3989, -327, 2120, - -1000, -132, -1000, -1000, -1000, 1072, 1341, -1000, -1000, -1000, - -1000, -1000, -1000, 1431, 52800, -1000, 421, -1000, -1000, 14513, - -193, -215, 992, -1000, -1000, -1000, -1000, -1000, 2944, 1329, - 1129, 1219, -1000, 52800, -1000, 52078, -322, 864, 5973, -1000, - 2117, 2113, 2522, -1000, -1000, -1000, -1000, -1000, -1000, -529, - 1465, 250, -1000, -1000, -1000, 318, -301, -1000, 21030, -1000, - 21030, -1000, 1532, -1000, -1000, 2372, 76, -1000, 2560, -1000, - 2534, 1060, 1060, -1000, 1106, -529, -1000, -1000, -1000, -1000, - 1219, 1219, -1000, -329, -1000, -1000, -1000, -1000, -1000, 420, - 1302, -1000, -1000, -1000, -1000, -1000, 5973, -1000, -1000, -1000, - 263, 263, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 21862, -1000, + 1576, 2142, -1000, -364, -1000, -492, 21862, -263, -1000, -1000, + -263, -1000, -1000, -1000, -1000, -1000, 21862, -1000, -1000, 21862, + -1000, 21862, -1000, -1000, 1637, -1000, -1000, -1000, 37129, -1000, + 1637, -1000, 1637, -1000, -270, -1000, 1667, -1000, 53850, 1400, + 381, -1000, 1220, -1000, -1000, -1000, -1000, -1000, 54577, 53850, + 1939, 53850, -1000, -1000, 1618, 1576, 1981, 2492, -1000, 1586, + -1000, 378, -1000, 1994, 1968, -1000, -1000, -1000, 19681, -1000, + -1000, -1000, -1000, -1000, 283, -178, 16034, 11636, 1584, -1000, + -176, 2659, 1575, -1000, -467, -1000, -1000, -1000, -1000, 280, + -1000, -1000, 1652, -1000, -1000, 1624, 1620, 1605, -1000, -1000, + -1000, -1000, -1000, -1000, -270, -1000, -1000, 2431, -1000, -214, + -1000, -1000, 1648, 1559, -1000, -1000, -1000, 32040, 53123, -1000, + -165, 316, -178, 21862, 1987, 1576, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -38, -1000, -1000, 798, -1000, -1000, + -1000, 2004, -196, -1000, -1000, -1000, 301, -482, -283, -284, + 24043, -1000, 21862, -1000, 21862, -1000, 21862, -1000, 53850, 1981, + -1000, -1000, -1000, 1547, -1000, 3891, -381, 2130, -1000, -136, + -1000, -1000, -1000, 1112, 1428, -1000, -1000, -1000, -1000, -1000, + -1000, 2664, 53850, -1000, 426, -1000, -1000, 15301, -181, -200, + 1064, -1000, -1000, -1000, -1000, -1000, 1991, 1516, 1229, 2659, + -1000, 53850, -1000, 53123, -375, 901, 5084, -1000, 2118, 2110, + 2560, -1000, -1000, -1000, -1000, -1000, -1000, -529, 1490, 265, + -1000, -1000, -1000, 301, -293, -1000, 21862, -1000, 21862, -1000, + 1576, -1000, -1000, 2389, 90, -1000, 2582, -1000, 2572, 1030, + 1030, -1000, 1209, -529, -1000, -1000, -1000, -1000, 2659, 2659, + -1000, -388, -1000, -1000, -1000, -1000, -1000, 423, 1267, -1000, + -1000, -1000, -1000, -1000, 5084, -1000, -1000, -1000, 231, 231, + -1000, -1000, } var yyPgo = [...]int{ - 0, 3158, 3156, 28, 6, 41, 35, 3155, 3154, 3153, - 177, 3151, 3137, 3135, 3134, 3130, 3129, 2624, 2610, 2600, - 3127, 3126, 3125, 3122, 3120, 3108, 3107, 3104, 3103, 39, - 106, 68, 99, 211, 213, 3100, 176, 166, 198, 3097, - 3096, 3095, 116, 192, 83, 82, 195, 3092, 3091, 74, - 3090, 3088, 3087, 185, 184, 183, 1040, 3086, 182, 112, - 48, 3083, 3080, 3076, 3075, 3072, 3065, 3063, 3061, 3060, - 3054, 3053, 3052, 3046, 3043, 3041, 3040, 3039, 3036, 296, - 3035, 3033, 21, 3030, 76, 3028, 3026, 3025, 3024, 3023, - 11, 3022, 3017, 26, 44, 3012, 3009, 47, 3008, 3007, - 3004, 2998, 2997, 69, 2994, 22, 2983, 40, 2979, 2978, - 121, 2974, 2971, 2966, 43, 2962, 2961, 2957, 29, 167, - 2956, 2955, 139, 2954, 2953, 2950, 165, 206, 2949, 2239, - 205, 108, 111, 2948, 2947, 103, 188, 2946, 123, 2927, - 2924, 2915, 150, 2914, 3191, 2913, 2909, 64, 70, 199, - 2907, 2895, 163, 66, 8, 16, 17, 2894, 2892, 63, - 73, 2889, 101, 2885, 2884, 104, 84, 2883, 90, 98, - 2882, 2881, 5, 7, 2879, 1, 4, 2, 80, 2878, - 2877, 115, 2876, 2873, 2871, 95, 2868, 2865, 4363, 2862, - 85, 128, 102, 62, 2860, 171, 131, 2858, 2857, 2856, - 2854, 2850, 49, 2849, 2848, 2847, 138, 251, 162, 2831, - 144, 337, 52, 143, 2830, 189, 77, 197, 190, 2828, - 2825, 135, 133, 2824, 2821, 55, 164, 191, 2812, 94, - 127, 117, 168, 91, 130, 2808, 2805, 56, 60, 2803, - 2802, 2801, 2800, 174, 2799, 2797, 59, 2795, 54, 2794, - 186, 2792, 136, 79, 2791, 170, 169, 2790, 61, 2789, - 2788, 65, 96, 100, 38, 2787, 158, 161, 125, 172, - 2786, 2782, 53, 2780, 2779, 2778, 196, 292, 2774, 2772, - 294, 178, 141, 147, 89, 2771, 299, 2770, 2767, 13, - 4391, 6814, 2766, 37, 160, 2765, 2758, 6537, 20, 45, - 24, 2755, 204, 2753, 2752, 2750, 2749, 217, 202, 110, - 159, 57, 2744, 2741, 2736, 36, 2735, 2734, 2733, 2732, - 2731, 2723, 72, 34, 33, 32, 212, 58, 19, 97, - 153, 152, 67, 2709, 2706, 2705, 124, 87, 2704, 157, - 155, 120, 129, 2701, 180, 142, 119, 2700, 93, 31, - 2696, 2693, 2688, 2681, 92, 2678, 2677, 2674, 2669, 151, - 146, 118, 78, 2666, 81, 114, 149, 145, 51, 2665, - 46, 2664, 2663, 30, 193, 23, 2662, 15, 105, 109, - 2661, 5648, 181, 2660, 9, 298, 148, 2657, 2655, 10, - 12, 18, 2654, 2639, 2638, 2636, 132, 2635, 2632, 2630, - 2625, 27, 50, 25, 14, 113, 75, 2620, 2615, 140, - 2614, 2593, 2592, 0, 1005, 126, 2591, 207, + 0, 3204, 3202, 31, 6, 45, 44, 3201, 3200, 3198, + 172, 3184, 3183, 3180, 3175, 3174, 3172, 2630, 2619, 2608, + 3169, 3168, 3160, 3159, 3158, 3157, 3156, 3155, 3152, 43, + 95, 25, 106, 215, 193, 3151, 171, 163, 197, 3150, + 3149, 3146, 118, 189, 83, 85, 190, 3144, 3140, 75, + 3139, 3136, 3135, 188, 187, 186, 1089, 3134, 179, 115, + 50, 3132, 3131, 3130, 3129, 3128, 3127, 3124, 3120, 3119, + 3118, 3115, 3111, 3107, 3105, 3102, 3095, 3094, 3093, 278, + 3090, 3089, 21, 3087, 77, 3085, 3076, 3075, 3072, 3070, + 11, 3069, 3067, 26, 42, 66, 3065, 3064, 51, 3063, + 3062, 3060, 3056, 3054, 71, 3050, 22, 3044, 37, 3040, + 3039, 127, 3038, 3035, 3025, 38, 3023, 3019, 3015, 30, + 168, 3011, 3010, 140, 3005, 3004, 3003, 169, 224, 3002, + 2248, 3001, 97, 3000, 2999, 2998, 165, 191, 2995, 121, + 2994, 2993, 2992, 151, 2988, 3254, 2986, 2985, 68, 73, + 201, 2984, 2979, 199, 72, 8, 2978, 17, 18, 2974, + 2970, 65, 70, 2969, 114, 2966, 2965, 101, 87, 2957, + 109, 98, 2948, 2947, 5, 7, 2937, 1, 4, 2, + 104, 2934, 2931, 103, 2928, 2924, 2921, 93, 2914, 2909, + 3875, 2904, 91, 130, 108, 63, 2903, 166, 175, 2902, + 2898, 2895, 2894, 2893, 53, 2889, 2883, 2876, 138, 1275, + 123, 2873, 146, 352, 54, 147, 2871, 196, 78, 198, + 167, 2869, 2866, 136, 134, 2865, 2859, 57, 164, 192, + 2858, 94, 131, 120, 181, 92, 141, 2857, 2851, 58, + 60, 2848, 2844, 2841, 2834, 170, 2833, 2830, 64, 2827, + 56, 2826, 202, 2824, 335, 80, 2823, 182, 162, 2820, + 67, 2818, 2817, 90, 100, 62, 36, 2816, 156, 159, + 128, 185, 2813, 2810, 55, 2802, 2798, 2797, 195, 306, + 2796, 2795, 316, 173, 144, 148, 84, 2793, 263, 2792, + 2787, 14, 4355, 6376, 177, 40, 160, 2784, 2781, 7734, + 16, 47, 29, 2775, 205, 2769, 180, 2768, 2762, 2757, + 242, 203, 112, 157, 59, 2755, 2753, 2752, 2743, 35, + 2742, 2739, 2737, 2736, 2735, 2732, 41, 39, 34, 74, + 213, 61, 20, 96, 158, 153, 69, 2731, 2730, 2729, + 125, 82, 2720, 155, 154, 126, 99, 2715, 176, 143, + 117, 2713, 105, 33, 2712, 2709, 2708, 2706, 89, 2702, + 2688, 2682, 2680, 152, 145, 124, 79, 2675, 81, 119, + 150, 149, 52, 2673, 48, 2670, 2658, 32, 178, 27, + 2656, 19, 102, 110, 2655, 6223, 2654, 9, 262, 161, + 2653, 2652, 10, 13, 12, 2650, 2649, 2645, 2643, 133, + 2641, 2640, 2639, 2637, 24, 49, 23, 15, 116, 139, + 76, 2636, 2621, 142, 2620, 2613, 2612, 0, 1031, 129, + 2576, 200, } -//line sql.y:8575 +//line sql.y:8655 type yySymType struct { union any empty struct{} @@ -7843,6 +7969,11 @@ func (st *yySymType) fromFirstLastTypeUnion() FromFirstLastType { return v } +func (st *yySymType) groupByUnion() *GroupBy { + v, _ := st.union.(*GroupBy) + return v +} + func (st *yySymType) ignoreUnion() Ignore { v, _ := st.union.(Ignore) return v @@ -7903,6 +8034,11 @@ func (st *yySymType) insertActionUnion() InsertAction { return v } +func (st *yySymType) intPtrUnion() *int { + v, _ := st.union.(*int) + return v +} + func (st *yySymType) integerUnion() int { v, _ := st.union.(int) return v @@ -8103,6 +8239,11 @@ func (st *yySymType) revertMigrationUnion() *RevertMigration { return v } +func (st *yySymType) rowAliasUnion() *RowAlias { + v, _ := st.union.(*RowAlias) + return v +} + func (st *yySymType) scopeUnion() Scope { v, _ := st.union.(Scope) return v @@ -8304,59 +8445,59 @@ func (st *yySymType) withUnion() *With { } var yyR1 = [...]int{ - 0, 411, 412, 412, 7, 7, 7, 7, 7, 7, + 0, 415, 416, 416, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 258, 381, 382, 382, 256, 256, 28, 74, 36, - 36, 35, 35, 38, 38, 37, 31, 31, 31, 32, - 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, - 33, 33, 29, 29, 29, 29, 30, 30, 30, 30, - 30, 15, 16, 34, 34, 17, 17, 108, 108, 18, - 19, 19, 19, 19, 415, 415, 183, 183, 181, 181, - 182, 182, 261, 261, 20, 265, 265, 267, 267, 267, - 267, 257, 257, 257, 21, 21, 266, 266, 268, 268, - 268, 271, 271, 271, 271, 310, 310, 310, 22, 22, - 22, 22, 22, 128, 128, 384, 384, 383, 377, 377, - 376, 376, 375, 380, 380, 379, 379, 378, 40, 41, - 50, 50, 50, 50, 51, 52, 385, 385, 350, 57, - 57, 56, 56, 56, 56, 56, 56, 58, 58, 54, - 54, 53, 53, 55, 55, 352, 352, 338, 338, 351, - 351, 351, 351, 351, 351, 351, 337, 337, 139, 139, - 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, - 235, 235, 235, 235, 235, 235, 235, 400, 400, 400, - 399, 399, 236, 236, 236, 236, 236, 236, 236, 236, - 148, 148, 159, 159, 159, 159, 159, 159, 146, 146, - 147, 145, 145, 145, 153, 153, 153, 153, 153, 153, - 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, - 153, 404, 404, 404, 404, 404, 404, 404, 404, 404, - 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, - 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, - 404, 404, 404, 404, 404, 404, 404, 404, 404, 404, - 404, 404, 404, 158, 158, 154, 154, 154, 155, 155, - 155, 156, 156, 401, 401, 401, 401, 315, 315, 315, - 315, 318, 318, 316, 316, 316, 316, 316, 316, 316, - 316, 316, 317, 317, 317, 317, 317, 317, 317, 319, - 319, 319, 319, 319, 320, 320, 320, 320, 320, 320, - 320, 320, 320, 320, 320, 320, 320, 320, 320, 320, - 321, 321, 321, 321, 321, 321, 321, 321, 336, 336, - 322, 322, 330, 330, 331, 331, 332, 332, 332, 333, - 333, 333, 334, 334, 327, 327, 327, 327, 327, 327, - 327, 327, 327, 329, 329, 328, 328, 328, 339, 364, - 364, 363, 363, 361, 361, 361, 361, 361, 361, 361, - 361, 348, 348, 358, 358, 358, 358, 358, 347, 347, - 343, 343, 343, 344, 344, 345, 345, 342, 342, 346, - 346, 360, 360, 359, 359, 340, 340, 341, 341, 366, - 402, 402, 402, 402, 402, 403, 403, 367, 392, 394, - 394, 394, 393, 393, 390, 391, 389, 389, 389, 389, - 389, 84, 84, 84, 284, 284, 285, 285, 356, 356, - 355, 355, 355, 357, 357, 354, 354, 354, 354, 354, - 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, - 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, - 354, 354, 354, 354, 354, 354, 279, 279, 279, 388, - 388, 388, 388, 388, 388, 387, 387, 387, 353, 353, - 353, 353, 386, 386, 59, 59, 216, 216, 405, 405, - 406, 406, 406, 47, 47, 47, 47, 47, 47, 46, + 7, 260, 385, 258, 258, 28, 74, 36, 36, 35, + 35, 38, 38, 37, 31, 31, 31, 32, 32, 32, + 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, + 29, 29, 29, 29, 30, 30, 30, 30, 30, 15, + 16, 34, 34, 17, 17, 109, 109, 18, 19, 19, + 19, 19, 419, 419, 185, 185, 183, 183, 184, 184, + 263, 263, 20, 267, 267, 269, 269, 269, 269, 259, + 259, 259, 21, 21, 268, 268, 270, 270, 270, 273, + 273, 273, 273, 313, 313, 313, 22, 22, 22, 22, + 22, 129, 129, 387, 387, 386, 381, 381, 380, 380, + 379, 384, 384, 383, 383, 382, 40, 41, 50, 50, + 50, 50, 51, 52, 388, 388, 354, 57, 57, 56, + 56, 56, 56, 56, 56, 58, 58, 54, 54, 53, + 53, 55, 55, 356, 356, 342, 342, 355, 355, 355, + 355, 355, 355, 355, 341, 341, 140, 140, 237, 237, + 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, + 237, 237, 237, 237, 237, 403, 403, 403, 402, 402, + 238, 238, 238, 238, 238, 238, 238, 238, 149, 149, + 161, 161, 161, 161, 161, 161, 147, 147, 148, 146, + 146, 146, 154, 154, 154, 154, 154, 154, 154, 154, + 154, 154, 154, 154, 154, 154, 154, 154, 154, 407, + 407, 407, 407, 407, 407, 407, 407, 407, 407, 407, + 407, 407, 407, 407, 407, 407, 407, 407, 407, 407, + 407, 407, 407, 407, 407, 407, 407, 407, 407, 407, + 407, 407, 407, 407, 407, 407, 407, 407, 407, 407, + 407, 160, 160, 155, 155, 155, 157, 157, 156, 156, + 156, 158, 158, 404, 404, 404, 404, 319, 319, 319, + 319, 322, 322, 320, 320, 320, 320, 320, 320, 320, + 320, 320, 321, 321, 321, 321, 321, 321, 321, 323, + 323, 323, 323, 323, 324, 324, 324, 324, 324, 324, + 324, 324, 324, 324, 324, 324, 324, 324, 324, 324, + 325, 325, 325, 325, 325, 325, 325, 325, 340, 340, + 329, 329, 334, 334, 335, 335, 336, 336, 336, 337, + 337, 337, 338, 338, 331, 331, 331, 331, 331, 331, + 331, 331, 331, 333, 333, 332, 332, 332, 343, 368, + 368, 367, 367, 365, 365, 365, 365, 365, 365, 365, + 365, 352, 352, 362, 362, 362, 362, 362, 351, 351, + 347, 347, 347, 348, 348, 349, 349, 346, 346, 350, + 350, 364, 364, 363, 363, 344, 344, 345, 345, 370, + 405, 405, 405, 405, 405, 406, 406, 371, 395, 397, + 397, 397, 396, 396, 393, 394, 392, 392, 392, 392, + 392, 84, 84, 84, 286, 286, 287, 287, 360, 360, + 359, 359, 359, 361, 361, 358, 358, 358, 358, 358, + 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, + 358, 358, 358, 358, 358, 358, 358, 358, 358, 358, + 358, 358, 358, 358, 358, 358, 281, 281, 281, 391, + 391, 391, 391, 391, 391, 390, 390, 390, 357, 357, + 357, 357, 389, 389, 59, 59, 218, 218, 408, 408, + 410, 410, 410, 47, 47, 47, 47, 47, 47, 46, 46, 46, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, @@ -8364,170 +8505,172 @@ var yyR1 = [...]int{ 43, 43, 43, 43, 43, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 23, 23, 23, 110, 110, 111, 111, 111, - 111, 113, 113, 113, 369, 369, 60, 60, 3, 3, - 171, 173, 174, 174, 172, 172, 172, 172, 172, 172, - 62, 62, 61, 61, 176, 175, 177, 177, 177, 1, - 1, 2, 2, 4, 4, 374, 374, 374, 374, 374, - 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, - 374, 374, 374, 374, 374, 374, 374, 335, 335, 335, - 368, 368, 370, 112, 112, 112, 112, 112, 112, 112, - 112, 112, 112, 116, 115, 115, 114, 117, 117, 117, - 117, 117, 117, 117, 117, 372, 372, 372, 63, 63, - 373, 323, 324, 325, 5, 6, 349, 371, 124, 124, - 24, 39, 39, 25, 25, 25, 25, 26, 26, 64, - 67, 67, 65, 65, 65, 65, 65, 65, 65, 65, + 23, 23, 23, 23, 23, 23, 23, 111, 111, 112, + 112, 112, 112, 114, 114, 114, 373, 373, 60, 60, + 3, 3, 173, 175, 176, 176, 174, 174, 174, 174, + 174, 174, 62, 62, 61, 61, 178, 177, 179, 179, + 179, 1, 1, 2, 2, 4, 4, 378, 378, 378, + 378, 378, 378, 378, 378, 378, 378, 378, 378, 378, + 378, 378, 378, 378, 378, 378, 378, 378, 378, 339, + 339, 339, 372, 372, 374, 113, 113, 113, 113, 113, + 113, 113, 113, 113, 113, 117, 116, 116, 115, 118, + 118, 118, 118, 118, 118, 118, 118, 376, 376, 376, + 63, 63, 377, 326, 327, 328, 5, 6, 353, 375, + 125, 125, 24, 39, 39, 25, 25, 25, 25, 26, + 26, 64, 67, 67, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, - 65, 65, 65, 65, 278, 278, 287, 287, 277, 277, - 302, 302, 302, 280, 280, 280, 281, 281, 398, 398, - 398, 274, 274, 66, 66, 66, 303, 303, 303, 303, - 69, 69, 407, 407, 408, 408, 409, 409, 409, 70, - 71, 71, 305, 305, 306, 306, 72, 73, 85, 85, - 85, 85, 85, 85, 85, 86, 86, 86, 86, 109, - 109, 109, 10, 10, 10, 10, 81, 81, 81, 9, - 9, 11, 68, 68, 75, 395, 395, 396, 397, 397, - 397, 397, 76, 78, 27, 27, 27, 27, 27, 27, - 134, 134, 122, 122, 122, 122, 122, 122, 122, 122, - 122, 122, 122, 122, 129, 129, 129, 123, 123, 416, - 79, 80, 80, 127, 127, 127, 120, 120, 120, 126, - 126, 126, 12, 12, 13, 260, 260, 14, 14, 131, - 131, 133, 133, 133, 133, 133, 135, 135, 135, 135, - 135, 135, 135, 130, 130, 132, 132, 132, 132, 295, - 295, 295, 294, 294, 165, 165, 167, 166, 166, 168, - 168, 169, 169, 169, 169, 214, 214, 191, 191, 253, - 253, 254, 254, 252, 252, 259, 259, 255, 255, 255, - 255, 262, 262, 170, 170, 170, 170, 178, 178, 179, - 179, 180, 180, 304, 304, 300, 300, 300, 299, 299, - 184, 184, 184, 186, 185, 185, 185, 185, 187, 187, - 189, 189, 188, 188, 190, 195, 195, 194, 194, 192, - 192, 192, 192, 193, 193, 193, 193, 196, 196, 144, - 144, 144, 144, 144, 144, 144, 144, 157, 157, 157, - 157, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 243, 243, 149, 149, 149, 149, 149, 149, - 149, 149, 149, 149, 149, 149, 149, 149, 149, 152, - 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, - 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, - 152, 152, 152, 152, 219, 219, 218, 218, 87, 87, - 87, 88, 88, 89, 89, 89, 89, 89, 90, 90, - 90, 90, 90, 90, 90, 92, 92, 91, 91, 209, - 209, 292, 292, 93, 94, 94, 97, 97, 96, 95, - 95, 101, 101, 98, 98, 100, 100, 99, 102, 102, - 103, 104, 104, 275, 275, 197, 197, 205, 205, 205, - 205, 198, 198, 198, 198, 198, 198, 198, 206, 206, - 206, 213, 207, 207, 203, 203, 201, 201, 201, 201, - 201, 201, 201, 201, 201, 201, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, - 202, 202, 202, 202, 202, 202, 202, 162, 162, 162, - 162, 224, 224, 150, 150, 150, 150, 150, 150, 150, - 150, 150, 150, 150, 150, 150, 150, 150, 151, 151, - 163, 163, 163, 163, 164, 164, 164, 164, 164, 164, - 164, 312, 312, 118, 118, 118, 118, 118, 118, 118, - 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, - 118, 118, 118, 119, 119, 119, 119, 119, 119, 119, + 65, 65, 65, 65, 65, 65, 65, 280, 280, 289, + 289, 279, 279, 304, 304, 304, 282, 282, 282, 283, + 283, 401, 401, 401, 276, 276, 66, 66, 66, 305, + 305, 305, 305, 69, 69, 411, 411, 412, 412, 413, + 413, 413, 70, 71, 71, 308, 308, 309, 309, 72, + 73, 85, 85, 85, 85, 85, 86, 86, 86, 86, + 110, 110, 110, 10, 10, 10, 10, 81, 81, 81, + 9, 9, 11, 68, 68, 75, 398, 398, 399, 400, + 400, 400, 400, 76, 78, 27, 27, 27, 27, 27, + 27, 135, 135, 123, 123, 123, 123, 123, 123, 123, + 123, 123, 123, 123, 123, 130, 130, 130, 124, 124, + 420, 79, 80, 80, 128, 128, 128, 121, 121, 121, + 127, 127, 127, 12, 12, 13, 262, 262, 14, 14, + 134, 134, 133, 133, 136, 136, 136, 136, 136, 136, + 136, 131, 131, 132, 132, 132, 132, 297, 297, 297, + 296, 296, 167, 167, 169, 168, 168, 170, 170, 171, + 171, 171, 171, 216, 216, 193, 193, 255, 255, 256, + 256, 254, 254, 261, 261, 257, 257, 257, 257, 264, + 264, 172, 172, 172, 172, 180, 180, 181, 181, 182, + 182, 307, 307, 302, 302, 302, 301, 301, 186, 186, + 186, 188, 187, 187, 187, 187, 189, 189, 191, 191, + 190, 190, 192, 197, 197, 196, 196, 194, 194, 194, + 194, 194, 194, 195, 195, 195, 195, 198, 198, 145, + 145, 145, 145, 145, 145, 145, 145, 409, 409, 159, + 159, 159, 159, 162, 162, 162, 162, 162, 162, 162, + 162, 162, 162, 162, 245, 245, 150, 150, 150, 150, + 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, + 150, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, + 153, 153, 153, 153, 153, 153, 221, 221, 220, 220, + 87, 87, 87, 88, 88, 89, 89, 89, 89, 89, + 90, 90, 90, 90, 90, 90, 90, 92, 92, 91, + 91, 211, 211, 294, 294, 93, 94, 94, 95, 95, + 98, 98, 97, 96, 96, 102, 102, 99, 99, 101, + 101, 100, 103, 103, 104, 105, 105, 277, 277, 199, + 199, 207, 207, 207, 207, 200, 200, 200, 200, 200, + 200, 200, 208, 208, 208, 215, 209, 209, 205, 205, + 203, 203, 203, 203, 203, 203, 203, 203, 203, 203, + 203, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, + 204, 204, 164, 164, 164, 164, 226, 226, 151, 151, + 151, 151, 151, 151, 151, 151, 151, 151, 151, 151, + 151, 151, 151, 152, 152, 165, 165, 165, 165, 166, + 166, 166, 166, 166, 166, 166, 315, 315, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, - 119, 417, 417, 326, 326, 326, 204, 204, 204, 204, - 204, 125, 125, 125, 125, 125, 309, 309, 309, 313, - 313, 313, 311, 311, 311, 311, 311, 311, 311, 311, - 311, 311, 311, 311, 311, 311, 311, 314, 314, 222, - 222, 121, 121, 220, 220, 221, 223, 223, 215, 215, - 215, 215, 217, 217, 200, 200, 200, 225, 225, 226, - 226, 105, 106, 106, 107, 107, 227, 227, 229, 228, - 228, 230, 231, 231, 231, 232, 232, 233, 233, 233, - 49, 49, 49, 49, 49, 44, 44, 44, 44, 45, - 45, 45, 45, 136, 136, 136, 136, 138, 138, 137, - 137, 82, 82, 83, 83, 83, 142, 142, 143, 143, - 143, 140, 140, 141, 141, 250, 250, 234, 234, 234, - 241, 241, 241, 237, 237, 239, 239, 239, 240, 240, - 240, 238, 247, 247, 249, 249, 248, 248, 244, 244, - 245, 245, 246, 246, 246, 242, 242, 199, 199, 199, - 199, 199, 251, 251, 251, 251, 263, 263, 210, 210, - 212, 212, 211, 211, 161, 264, 264, 272, 269, 269, - 270, 270, 296, 296, 296, 273, 273, 286, 286, 282, - 282, 283, 283, 276, 276, 288, 288, 288, 77, 208, - 208, 365, 365, 362, 291, 291, 293, 293, 297, 297, - 301, 301, 298, 298, 8, 410, 410, 410, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, - 289, 289, 289, 289, 289, 289, 289, 289, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, - 290, 290, 413, 414, 307, 308, 308, 308, + 119, 119, 119, 119, 119, 119, 119, 119, 120, 120, + 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120, 120, 120, 120, 120, 421, 421, 330, 330, + 330, 206, 206, 206, 206, 206, 126, 126, 126, 126, + 126, 312, 312, 312, 316, 316, 316, 314, 314, 314, + 314, 314, 314, 314, 314, 314, 314, 314, 314, 314, + 314, 314, 317, 317, 224, 224, 122, 122, 222, 222, + 223, 225, 225, 217, 217, 217, 217, 219, 219, 202, + 202, 202, 227, 227, 318, 318, 228, 228, 106, 107, + 107, 108, 108, 229, 229, 231, 230, 230, 232, 233, + 233, 233, 234, 234, 235, 235, 235, 49, 49, 49, + 49, 49, 44, 44, 44, 44, 45, 45, 45, 45, + 137, 137, 137, 137, 139, 139, 138, 138, 82, 82, + 83, 83, 83, 143, 143, 144, 144, 144, 141, 141, + 142, 142, 252, 252, 252, 252, 252, 252, 252, 236, + 236, 236, 243, 243, 243, 239, 239, 241, 241, 241, + 242, 242, 242, 240, 249, 249, 251, 251, 250, 250, + 246, 246, 247, 247, 248, 248, 248, 244, 244, 201, + 201, 201, 201, 201, 253, 253, 253, 253, 306, 306, + 306, 265, 265, 212, 212, 214, 214, 213, 213, 163, + 266, 266, 274, 271, 271, 272, 272, 298, 298, 298, + 275, 275, 288, 288, 284, 284, 285, 285, 278, 278, + 290, 290, 290, 77, 210, 210, 369, 369, 366, 293, + 293, 295, 295, 299, 299, 303, 303, 300, 300, 8, + 414, 414, 414, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, + 291, 291, 291, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, + 292, 417, 418, 310, 311, 311, 311, } var yyR2 = [...]int{ @@ -8535,32 +8678,32 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 0, 1, 1, 0, 1, 1, 1, 2, 3, 2, - 3, 0, 1, 3, 1, 4, 3, 3, 4, 3, - 2, 3, 4, 3, 4, 2, 7, 1, 3, 3, - 3, 3, 1, 2, 1, 1, 3, 2, 3, 3, - 2, 5, 7, 10, 9, 7, 8, 1, 1, 10, - 11, 9, 8, 8, 1, 1, 1, 3, 1, 3, - 1, 3, 0, 4, 3, 1, 3, 3, 3, 3, - 3, 1, 1, 2, 5, 4, 1, 3, 3, 2, - 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, - 6, 12, 2, 0, 2, 0, 2, 1, 0, 2, - 1, 3, 3, 0, 1, 1, 3, 3, 6, 4, - 7, 8, 8, 8, 6, 3, 1, 1, 5, 0, - 1, 1, 1, 1, 2, 2, 2, 0, 1, 4, - 4, 4, 4, 4, 4, 2, 4, 1, 3, 1, - 1, 3, 4, 3, 3, 3, 5, 10, 0, 2, - 0, 2, 3, 5, 3, 4, 2, 3, 2, 3, - 3, 3, 3, 2, 2, 4, 4, 1, 1, 1, - 1, 1, 0, 2, 2, 3, 3, 2, 2, 2, - 1, 1, 2, 2, 2, 2, 2, 2, 1, 1, - 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, - 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 1, 1, 1, 2, 3, 2, 3, 0, + 1, 3, 1, 4, 3, 3, 4, 3, 2, 3, + 4, 3, 4, 2, 7, 1, 3, 3, 3, 3, + 1, 2, 1, 1, 3, 2, 3, 3, 2, 5, + 7, 10, 9, 7, 8, 1, 1, 10, 11, 9, + 8, 8, 1, 1, 1, 3, 1, 3, 1, 3, + 0, 4, 3, 1, 3, 3, 3, 3, 3, 1, + 1, 2, 5, 4, 1, 3, 3, 2, 2, 2, + 2, 2, 1, 1, 1, 1, 2, 2, 6, 12, + 2, 0, 2, 0, 2, 1, 0, 2, 1, 3, + 3, 0, 1, 1, 3, 3, 6, 4, 7, 8, + 8, 8, 6, 3, 1, 1, 5, 0, 1, 1, + 1, 1, 2, 2, 2, 0, 1, 4, 4, 4, + 4, 4, 4, 2, 4, 1, 3, 1, 1, 3, + 4, 3, 3, 3, 5, 10, 0, 2, 0, 2, + 3, 5, 3, 4, 2, 3, 2, 3, 3, 3, + 3, 2, 2, 4, 4, 1, 1, 1, 1, 1, + 0, 2, 2, 3, 3, 2, 2, 2, 1, 1, + 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, + 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 2, 1, 3, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, @@ -8591,108 +8734,110 @@ var yyR2 = [...]int{ 3, 3, 3, 2, 2, 3, 4, 4, 2, 11, 3, 6, 8, 6, 6, 6, 13, 8, 6, 6, 10, 7, 5, 5, 5, 7, 5, 5, 5, 5, - 5, 7, 7, 5, 5, 0, 6, 5, 6, 4, - 5, 0, 8, 9, 0, 3, 0, 1, 0, 3, - 8, 4, 1, 3, 3, 6, 7, 7, 8, 4, - 0, 1, 0, 1, 3, 3, 1, 1, 2, 1, - 1, 0, 2, 0, 2, 5, 3, 7, 4, 4, - 4, 4, 3, 3, 3, 7, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 2, 0, 2, 2, - 1, 3, 2, 0, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 3, 1, 3, 3, 0, 2, 2, - 2, 2, 2, 2, 2, 4, 4, 3, 0, 1, - 4, 3, 4, 4, 3, 3, 3, 2, 1, 3, - 3, 3, 5, 7, 7, 6, 5, 3, 2, 4, - 5, 5, 3, 3, 7, 3, 3, 3, 3, 4, - 7, 5, 2, 4, 4, 4, 4, 4, 5, 5, - 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, - 4, 4, 4, 4, 4, 2, 3, 3, 3, 3, - 5, 2, 3, 3, 2, 3, 4, 4, 4, 3, - 4, 4, 5, 3, 0, 1, 0, 1, 1, 1, - 0, 2, 2, 0, 2, 2, 0, 2, 0, 1, - 1, 1, 1, 2, 1, 3, 1, 1, 1, 1, - 1, 3, 0, 1, 1, 3, 3, 2, 2, 1, - 1, 5, 0, 1, 0, 1, 2, 3, 0, 3, - 3, 3, 3, 3, 1, 0, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 0, 1, 1, 4, - 4, 4, 2, 2, 3, 1, 3, 2, 1, 2, - 1, 2, 2, 4, 3, 3, 6, 4, 7, 6, - 1, 3, 2, 2, 2, 2, 1, 1, 1, 3, - 2, 1, 1, 1, 0, 1, 1, 0, 3, 0, - 2, 0, 2, 1, 2, 2, 0, 1, 1, 0, - 1, 1, 5, 5, 4, 0, 2, 4, 4, 0, - 1, 0, 1, 2, 3, 4, 1, 1, 1, 1, - 1, 1, 1, 1, 3, 1, 2, 3, 5, 0, - 1, 2, 1, 1, 0, 1, 2, 1, 3, 1, - 1, 1, 4, 3, 1, 1, 2, 3, 7, 0, - 3, 0, 1, 1, 3, 1, 3, 1, 1, 3, - 3, 1, 3, 4, 4, 4, 3, 2, 4, 0, - 1, 0, 2, 0, 1, 0, 1, 2, 1, 1, - 1, 2, 2, 1, 2, 3, 2, 3, 2, 2, - 2, 1, 1, 3, 3, 0, 1, 1, 2, 6, - 5, 6, 6, 0, 2, 3, 3, 0, 2, 3, - 3, 3, 2, 3, 1, 3, 6, 3, 4, 3, - 1, 3, 4, 5, 6, 3, 4, 5, 6, 3, - 4, 1, 1, 1, 3, 3, 3, 3, 3, 3, - 5, 5, 3, 3, 3, 3, 3, 3, 1, 1, - 1, 1, 1, 3, 1, 1, 1, 2, 2, 2, - 2, 1, 1, 2, 7, 7, 6, 6, 2, 2, - 5, 6, 3, 3, 1, 3, 1, 3, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, - 2, 2, 4, 2, 4, 0, 1, 2, 5, 0, - 3, 0, 1, 4, 4, 2, 0, 1, 1, 2, - 2, 1, 1, 2, 2, 0, 1, 1, 1, 1, - 5, 1, 3, 0, 3, 1, 1, 1, 2, 1, - 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 3, 4, 6, 4, 4, 8, 6, - 8, 6, 5, 4, 10, 2, 2, 1, 2, 2, - 2, 2, 2, 4, 5, 5, 5, 5, 5, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4, 8, - 4, 8, 8, 6, 5, 4, 4, 4, 4, 4, - 7, 4, 4, 6, 6, 6, 8, 6, 6, 4, - 4, 3, 4, 6, 6, 4, 4, 6, 4, 6, - 4, 4, 4, 4, 4, 4, 6, 4, 6, 4, - 4, 4, 6, 4, 6, 4, 4, 6, 4, 6, + 5, 7, 7, 5, 5, 5, 5, 0, 6, 5, + 6, 4, 5, 0, 8, 9, 0, 3, 0, 1, + 0, 3, 8, 4, 1, 3, 3, 6, 7, 7, + 8, 4, 0, 1, 0, 1, 3, 3, 1, 1, + 2, 1, 1, 0, 2, 0, 2, 5, 3, 7, + 4, 4, 4, 4, 3, 3, 3, 7, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 2, 0, + 2, 2, 1, 3, 2, 0, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 3, 1, 3, 3, 0, + 2, 2, 2, 2, 2, 2, 2, 4, 4, 3, + 0, 1, 4, 3, 4, 4, 3, 3, 3, 2, + 1, 3, 3, 3, 5, 7, 7, 6, 5, 3, + 2, 4, 5, 5, 3, 3, 7, 3, 3, 3, + 3, 4, 7, 5, 2, 4, 4, 4, 4, 4, + 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 2, 2, 4, 4, 4, 4, 4, 2, 3, 3, + 3, 3, 3, 5, 2, 3, 3, 2, 3, 4, + 4, 4, 3, 4, 4, 5, 3, 0, 1, 0, + 1, 1, 1, 0, 2, 2, 0, 2, 2, 0, + 2, 0, 1, 1, 1, 1, 2, 1, 3, 1, + 1, 1, 1, 1, 3, 0, 1, 1, 3, 3, + 2, 2, 1, 1, 5, 0, 1, 0, 1, 2, + 3, 0, 3, 3, 3, 1, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, + 4, 4, 4, 2, 2, 3, 1, 3, 2, 1, + 2, 1, 2, 2, 4, 3, 3, 6, 4, 7, + 6, 1, 3, 2, 2, 2, 2, 1, 1, 1, + 3, 2, 1, 1, 1, 0, 1, 1, 0, 3, + 0, 2, 0, 2, 1, 2, 2, 0, 1, 1, + 0, 1, 1, 5, 5, 4, 0, 2, 4, 4, + 0, 1, 1, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 3, 1, 2, 3, 5, 0, 1, 2, + 1, 1, 0, 1, 2, 1, 3, 1, 1, 1, + 4, 3, 1, 1, 2, 3, 7, 0, 3, 0, + 1, 1, 3, 1, 3, 1, 1, 3, 3, 1, + 3, 4, 4, 4, 3, 2, 4, 0, 1, 0, + 2, 0, 1, 0, 1, 2, 1, 1, 1, 2, + 2, 1, 2, 3, 2, 3, 2, 2, 2, 1, + 1, 3, 3, 0, 1, 1, 2, 6, 5, 6, + 6, 5, 5, 0, 2, 3, 3, 0, 2, 3, + 3, 3, 2, 3, 1, 3, 6, 1, 1, 3, + 4, 3, 1, 3, 4, 5, 6, 3, 4, 5, + 6, 3, 4, 1, 1, 1, 3, 3, 3, 3, + 3, 3, 5, 5, 3, 3, 3, 3, 3, 3, + 1, 1, 1, 1, 1, 3, 1, 1, 1, 2, + 2, 2, 2, 1, 1, 2, 7, 7, 6, 6, + 2, 2, 5, 6, 3, 3, 1, 3, 1, 3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 4, 2, 4, 0, 1, 2, + 5, 0, 3, 0, 1, 4, 4, 2, 1, 0, + 0, 1, 1, 2, 2, 1, 1, 2, 2, 0, + 1, 1, 1, 1, 5, 1, 3, 0, 3, 1, + 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 3, 4, 6, + 4, 4, 8, 8, 6, 8, 6, 5, 4, 10, + 2, 2, 1, 2, 2, 2, 2, 2, 5, 6, + 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 8, 4, 8, 8, 6, 5, + 4, 4, 4, 4, 4, 7, 4, 4, 6, 6, + 6, 8, 6, 6, 4, 4, 3, 4, 6, 6, + 4, 4, 6, 4, 6, 4, 4, 4, 4, 4, + 4, 6, 4, 6, 4, 4, 4, 6, 4, 6, + 4, 4, 6, 4, 6, 4, 6, 8, 4, 6, + 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, 6, 8, - 4, 6, 8, 4, 6, 8, 4, 6, 8, 4, - 6, 8, 4, 6, 8, 4, 6, 8, 4, 4, - 4, 6, 4, 6, 4, 8, 6, 4, 4, 6, - 4, 6, 8, 4, 6, 8, 4, 4, 6, 8, - 6, 4, 6, 6, 8, 10, 7, 8, 8, 9, - 4, 4, 4, 4, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 4, 4, 4, 4, 4, 4, - 6, 4, 6, 5, 9, 6, 9, 8, 6, 8, - 8, 8, 6, 1, 1, 1, 1, 1, 1, 1, - 1, 0, 2, 6, 8, 10, 12, 14, 6, 8, - 8, 10, 12, 14, 6, 8, 10, 12, 6, 8, - 4, 4, 3, 4, 6, 6, 4, 6, 4, 6, - 8, 0, 2, 1, 1, 1, 1, 1, 1, 1, + 4, 6, 8, 4, 4, 4, 6, 4, 6, 4, + 8, 6, 4, 4, 6, 4, 6, 8, 4, 6, + 8, 4, 4, 6, 8, 6, 4, 6, 6, 8, + 10, 7, 8, 8, 9, 4, 4, 4, 4, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 4, + 4, 4, 4, 4, 4, 6, 4, 6, 5, 9, + 6, 9, 8, 6, 8, 8, 8, 6, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 2, 6, 8, + 10, 12, 14, 6, 8, 8, 10, 12, 14, 6, + 8, 10, 12, 6, 8, 4, 4, 3, 4, 6, + 6, 4, 6, 4, 6, 8, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 0, 2, 0, 2, 3, 4, 4, 4, 4, - 4, 0, 3, 4, 7, 3, 1, 1, 1, 0, - 5, 5, 2, 3, 1, 2, 2, 1, 2, 1, - 2, 2, 1, 2, 2, 1, 1, 0, 1, 0, - 1, 0, 2, 1, 2, 4, 0, 2, 1, 1, - 3, 5, 1, 1, 1, 2, 2, 0, 3, 0, - 2, 2, 1, 3, 0, 1, 0, 1, 3, 1, - 3, 2, 0, 1, 1, 0, 1, 2, 4, 4, - 0, 2, 2, 1, 1, 3, 3, 3, 3, 3, - 3, 3, 3, 0, 3, 3, 3, 0, 3, 1, - 1, 0, 4, 0, 1, 1, 0, 3, 1, 3, - 2, 1, 1, 0, 1, 2, 4, 9, 3, 5, - 0, 3, 3, 0, 1, 0, 2, 2, 0, 2, - 2, 2, 0, 2, 1, 2, 3, 3, 0, 2, - 1, 2, 3, 4, 3, 0, 1, 2, 1, 5, - 4, 4, 1, 3, 3, 5, 0, 5, 1, 3, - 1, 2, 3, 4, 1, 1, 3, 3, 1, 2, - 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, - 2, 0, 3, 0, 1, 0, 1, 1, 5, 0, - 1, 0, 1, 2, 1, 1, 1, 1, 1, 1, - 0, 1, 1, 1, 3, 0, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 2, 0, 2, + 3, 4, 4, 4, 4, 4, 0, 3, 4, 7, + 3, 1, 1, 1, 0, 5, 5, 2, 3, 1, + 2, 2, 1, 2, 1, 2, 2, 1, 2, 2, + 1, 1, 0, 1, 0, 1, 0, 2, 1, 2, + 4, 0, 2, 1, 1, 3, 5, 1, 1, 1, + 2, 2, 0, 4, 0, 2, 0, 2, 2, 1, + 3, 0, 1, 0, 1, 3, 1, 3, 2, 0, + 1, 1, 0, 1, 2, 4, 4, 0, 2, 2, + 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, + 0, 3, 3, 3, 0, 3, 1, 1, 0, 4, + 0, 1, 1, 0, 3, 1, 3, 2, 1, 1, + 0, 1, 2, 3, 4, 2, 3, 4, 4, 9, + 3, 5, 0, 3, 3, 0, 1, 0, 2, 2, + 0, 2, 2, 2, 0, 2, 1, 2, 3, 3, + 0, 2, 1, 2, 3, 4, 3, 0, 1, 3, + 1, 6, 5, 4, 1, 3, 3, 5, 0, 2, + 5, 0, 5, 1, 3, 1, 2, 3, 4, 1, + 1, 3, 3, 1, 2, 1, 1, 1, 1, 1, + 1, 1, 0, 1, 0, 2, 0, 3, 0, 1, + 0, 1, 1, 5, 0, 1, 0, 1, 2, 1, + 1, 1, 1, 1, 1, 0, 1, 1, 1, 3, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -8754,857 +8899,869 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 0, 0, 1, 1, + 1, 1, 1, 0, 0, 1, 1, } var yyChk = [...]int{ - -1000, -411, -79, -416, -7, -29, -15, -16, -17, -18, + -1000, -415, -79, -420, -7, -29, -15, -16, -17, -18, -19, -20, -21, -22, -23, -24, -25, -26, -64, -67, -65, -66, -69, -70, -71, -72, -73, -9, -11, -68, -27, -28, -74, -75, -76, -77, -78, -12, -13, -14, - -8, -32, -31, -30, 10, 11, -108, -35, 33, -40, - -50, 227, -51, -41, 228, -52, 230, 229, 267, 231, - 379, 260, 75, 315, 316, 318, 319, 320, 321, -109, - 684, 265, 266, 233, 37, 46, 34, 35, 38, 237, - 273, 274, 236, 133, -33, -36, 9, -413, 12, 469, - 262, 261, 29, -34, 578, 87, -80, -412, 732, -250, - -234, 23, 34, 30, -233, -229, -127, -234, 21, 19, - 8, -79, -79, -79, 13, 14, -79, -350, -352, 87, - 160, 87, -79, -57, -56, -54, -53, -55, -58, 32, - -47, -48, -374, -46, -43, 232, 229, 277, 123, 124, - 267, 268, 269, 231, 251, 266, 270, 265, 286, -42, - 82, 34, 578, 581, -357, 228, 234, 235, 230, 470, - 126, 125, 76, -354, 374, 611, 702, -58, 704, 101, - 104, 703, 45, 241, 705, 706, 707, 618, 708, 250, - 709, 710, 711, 712, 718, 659, 719, 720, 721, 127, - 8, -79, -301, -297, 91, -290, 575, 253, 609, 423, - 610, 302, 82, 42, 514, 584, 371, 374, 611, 499, - 702, 380, 315, 331, 325, 504, 505, 506, 354, 346, - 576, 612, 585, 305, 254, 290, 696, 344, 136, 704, - 309, 613, 268, 381, 382, 614, 383, 101, 318, 420, - 717, 308, 615, 715, 104, 703, 323, 80, 498, 52, - 699, 45, 263, 428, 429, 342, 236, 338, 705, 291, - 616, 587, 284, 126, 123, 724, 37, 334, 51, 31, - 714, 125, 50, 706, 151, 617, 707, 618, 385, 361, - 690, 49, 386, 269, 619, 85, 274, 580, 312, 698, - 387, 519, 335, 388, 301, 713, 233, 620, 679, 671, - 672, 389, 390, 691, 366, 362, 367, 521, 621, 412, - 503, 391, 675, 676, 731, 53, 622, 623, 692, 124, - 624, 79, 708, 81, 329, 330, 625, 299, 252, 524, - 525, 414, 358, 481, 488, 489, 111, 112, 484, 113, - 490, 114, 491, 492, 493, 482, 115, 108, 483, 494, - 495, 359, 360, 116, 496, 110, 109, 485, 487, 117, - 497, 250, 36, 392, 577, 303, 59, 307, 278, 415, - 47, 364, 728, 46, 686, 526, 626, 689, 357, 353, - 478, 54, 627, 628, 629, 630, 500, 709, 356, 328, - 352, 723, 4, 296, 501, 710, 63, 235, 369, 368, - 370, 285, 411, 349, 631, 632, 633, 257, 83, 634, - 339, 22, 635, 636, 393, 292, 637, 57, 638, 639, - 418, 266, 640, 55, 711, 40, 641, 271, 725, 712, - 642, 643, 644, 685, 645, 273, 646, 395, 647, 673, - 674, 394, 363, 365, 527, 280, 396, 379, 238, 579, - 648, 313, 333, 270, 716, 649, 258, 515, 516, 517, - 518, 697, 523, 522, 272, 277, 265, 419, 259, 650, - 651, 652, 653, 654, 306, 670, 655, 656, 319, 718, - 479, 44, 657, 658, 659, 660, 661, 300, 295, 413, - 422, 62, 84, 376, 662, 663, 695, 327, 324, 293, - 460, 462, 463, 464, 465, 466, 461, 468, 664, 316, - 56, 719, 720, 721, 287, 722, 507, 508, 509, 510, - 10, 561, 544, 572, 545, 562, 546, 555, 547, 563, - 571, 573, 528, 536, 529, 537, 567, 550, 564, 556, - 549, 548, 570, 553, 557, 530, 538, 568, 554, 531, - 539, 532, 540, 533, 541, 566, 565, 558, 569, 534, - 542, 560, 535, 543, 559, 551, 552, 431, 729, 730, - 502, 398, 127, 297, 298, 48, 350, 279, 665, 310, - 666, 340, 341, 475, 476, 355, 326, 351, 682, 317, - 680, 281, 399, 480, 267, 667, 421, 294, 372, 377, - 311, 583, 520, 286, 400, 694, 582, 511, 512, 348, - 345, 288, 513, 668, 684, 401, 242, 282, 283, 669, - 681, 402, 403, 304, 404, 405, 406, 407, 408, 410, - 314, 409, 683, 677, 678, 289, 459, 581, 322, 343, - 378, 441, 442, 443, 444, 445, 446, 447, 448, 449, - 450, 451, 452, 453, 454, 455, 456, 457, 458, 477, - 240, -79, 240, -188, -297, -129, 686, 688, 179, -269, - 382, -287, 384, 397, 392, 402, 390, -278, 393, 395, - 280, -398, 412, 240, 399, 227, 385, 394, 403, 404, - 304, 410, 405, 314, 409, 289, 406, 407, 408, -381, - 179, 707, 722, 136, 347, 389, 387, 413, 686, 91, - -303, 91, 92, 93, -290, 317, -305, 322, -291, -381, - -290, 320, -79, -79, -307, -307, -129, -207, -144, 144, - -157, -258, -160, 92, -149, -152, -201, -202, -203, -204, - -158, -217, -256, 168, 169, 176, 145, -213, -161, 27, - 574, 471, 470, 179, 32, 222, 69, 70, 473, 147, - 58, 12, 436, 437, -159, 426, 427, 438, 432, 433, - 498, 500, 501, 502, 499, 504, 505, 506, 507, 508, - 509, 510, 511, 512, 513, 503, 514, 475, 476, 118, - 477, 108, 110, 109, 478, 479, 480, 344, 526, 527, - 521, 524, 525, 523, 522, 359, 360, 481, 544, 545, - 549, 548, 546, 547, 550, 553, 554, 555, 556, 557, - 558, 560, 559, 551, 552, 529, 528, 530, 531, 532, - 533, 534, 535, 537, 536, 538, 539, 540, 541, 542, - 543, 561, 562, 563, 564, 565, 567, 566, 571, 570, - 568, 569, 573, 572, 482, 483, 111, 112, 113, 114, - 115, 116, 117, 484, 487, 485, 486, 488, 489, 490, - 495, 496, 491, 492, 493, 494, 497, 370, 368, 369, - 365, 364, 363, -89, -101, 600, 599, -102, 423, 428, - 429, 431, -150, -151, -163, -164, -291, -297, 245, 425, - 239, 174, 469, -153, -147, -215, 107, 93, -31, -211, - 424, 434, 435, 439, 430, 440, 586, 588, 603, 604, - 606, 591, 596, 595, 598, 515, 516, 517, 518, 519, - 520, 671, 672, 673, 674, 675, 676, 677, 678, -381, - -290, 91, -155, -154, -197, 94, 99, 102, 103, 105, - -404, 263, 340, 341, 119, -413, 700, 90, 95, 96, - 97, 98, 120, 121, 180, 181, 182, 183, 184, 185, - 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, - 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, - 216, 217, 218, 219, 220, 221, 45, 398, 398, -188, - -79, -79, -79, -79, -410, 703, 579, -227, -127, -229, - -33, -31, -413, 9, -79, -31, -32, -30, -36, -38, - 605, -37, -297, 100, -234, -250, 13, 163, 43, 51, - -232, -233, -34, -31, -144, 20, 24, 25, -132, 170, - -144, -297, -132, -276, 244, -79, -79, -265, -310, 317, - -267, 413, 686, 412, -257, -270, 91, -256, -269, 411, - 92, -351, 160, -337, -341, -291, 255, -367, 251, -188, - -360, -359, -291, -413, -128, -286, 241, 249, 248, 137, - -385, 140, 297, 425, 239, -53, -54, -55, -269, 178, - 706, -110, 272, 276, 88, 88, -341, -340, -339, -386, - 276, 255, -366, -358, 247, 256, -347, 248, 249, -342, - 241, 138, -386, -342, 246, 256, 251, 255, 276, 276, - 127, 276, 127, 276, 276, 276, 276, 276, 276, 276, - 276, 276, 271, -348, 152, -348, 582, 582, -354, -386, - 251, 241, -386, -386, 247, -288, -342, 243, 26, 243, - 36, 36, -348, -348, -348, -269, 178, -348, -348, -348, - -348, 284, 284, -348, -348, -348, -348, -348, -348, -348, - -348, -348, -348, -348, -348, -348, -348, -348, -348, -348, - 240, -385, -136, 409, 304, 82, -56, 286, -39, -188, - -286, 241, 242, -385, 273, -188, 223, 240, 689, -280, - 160, 16, -280, -277, 398, 396, 383, 388, -280, -280, - -280, -280, 287, 381, -343, 241, 36, 252, 398, 287, - 381, 287, 288, 287, 288, 391, 401, 287, -302, 15, - 163, 425, 386, 390, 280, 240, 281, 242, 400, 288, - -302, 90, -281, 160, 287, 398, 283, -280, -280, -308, - -413, -293, -291, -289, 232, 24, 143, 26, 28, 146, - 179, 130, 20, 147, 38, 234, 347, 251, 178, 247, - 470, 227, 73, 586, 426, 433, 424, 432, 436, 472, - 473, 425, 384, 32, 14, 588, 29, 261, 25, 39, - 172, 229, 150, 589, 264, 27, 262, 118, 121, 591, - 23, 76, 256, 15, 249, 41, 17, 592, 593, 18, - 245, 244, 163, 241, 71, 12, 222, 30, 159, 67, - 594, 138, 133, 595, 596, 597, 598, 131, 69, 160, - 21, 726, 434, 435, 34, 687, 574, 275, 174, 74, - 60, 688, 144, 430, 599, 600, 119, 601, 122, 77, - 693, 140, 19, 72, 43, 602, 276, 603, 246, 727, - 604, 416, 605, 161, 230, 469, 70, 162, 700, 606, - 701, 239, 397, 9, 474, 33, 260, 248, 129, 68, - 440, 607, 240, 149, 243, 132, 120, 8, 137, 35, - 13, 75, 78, 437, 438, 439, 58, 128, 578, 148, - 16, 608, 417, 142, -381, 689, -308, -308, 33, 92, - -407, -408, -409, 578, 416, 243, -291, -188, -85, 679, - 231, -86, 685, 24, 238, -134, 398, -122, 179, 707, - 690, 691, 692, 689, 395, 697, 695, 693, 287, 694, - 88, 140, 142, 143, 4, -144, 159, -198, 152, 153, - 154, 155, 156, 157, 158, 164, 163, 144, 146, 160, - -243, 141, 165, 166, 167, 168, 169, 170, 171, 173, - 172, 174, 175, 161, 162, 178, 225, 226, -152, -152, - -152, -152, -213, -219, -218, -413, -215, -381, -290, -297, - -413, -413, -152, -275, -413, -149, -413, -413, -413, -413, - -222, -144, -413, -413, -417, -413, -417, -417, -417, -326, - -413, -326, -326, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, -413, -413, -413, -413, 223, -413, -413, -413, - -413, -413, -326, -326, -326, -326, -326, -326, -413, -413, - -413, -413, -413, -413, -413, -413, -413, -413, -413, -413, - -413, -413, 103, 99, 102, 94, -217, 105, 90, 90, - 90, 90, -31, -32, -207, -413, -307, -395, -396, -191, - -188, -413, 304, -291, -291, 273, 96, -232, -34, -31, - -227, -233, -229, -31, -79, -120, -133, 64, 65, -135, - 25, 39, 68, 66, 24, -414, 89, -414, -250, -414, - 88, -38, -253, 87, 62, 44, 90, 90, 88, 22, - -228, -230, -144, 15, -295, 4, -294, 26, -291, 90, - 223, 15, -189, 30, -188, -276, -276, 88, 91, 317, - -266, -268, 414, 416, 152, -296, -291, 90, 32, 89, - 88, -188, -315, -318, -320, -319, -321, -316, -317, 344, - 345, 179, 348, 350, 351, 352, 353, 354, 355, 356, - 357, 358, 361, 33, 263, 340, 341, 342, 343, 362, - 363, 364, 365, 367, 368, 369, 370, 325, 346, 576, - 326, 327, 328, 329, 330, 331, 333, 334, 337, 335, - 336, 338, 339, -382, -381, 87, 89, 88, -322, 87, - -144, -136, 240, -381, 241, 241, 241, -79, 469, -348, - -348, -348, 271, 20, -46, -43, -374, 19, -42, -43, - 232, 123, 124, 229, 87, -337, 87, -346, -382, -381, - 87, 138, 246, 137, -345, -342, -345, -346, -381, -215, - -381, 138, 138, -381, -381, -262, -291, -262, -262, 24, - -262, 24, -262, 24, 96, -291, -262, 24, -262, 24, - -262, 24, -262, 24, -262, 24, 32, 79, 80, 81, - 32, 83, 84, 85, -215, -381, -381, -215, -337, -215, - -188, -381, -269, 96, 96, 96, -348, -348, 96, 90, - 90, 90, -348, -348, 96, 90, -299, -297, 90, 90, - -387, 257, 301, 303, 96, 96, 96, 96, 32, 90, - -388, 32, 714, 713, 715, 716, 717, 90, 96, 32, - 96, 32, 96, -291, 87, -188, -142, 291, 227, 229, - 232, 77, 90, 307, 308, 305, 310, 311, 152, 45, - 88, 243, 240, -381, -282, 245, -282, -291, -298, -297, - -289, -188, 243, 380, 90, -144, -344, 15, 163, -302, - -302, -280, -188, -344, -302, -280, -188, -280, -280, -280, - -280, -302, -302, -302, -280, -297, -297, -188, -188, -188, - -188, -188, -188, -188, -308, -281, -280, 689, 90, -274, - 15, 77, -308, -308, 88, 323, 417, 418, -306, 320, - -81, -291, 90, -10, -29, -18, -17, -19, 152, -10, - 88, 578, -181, -188, 689, 689, 689, 689, 689, 689, - -144, -144, -144, -144, 601, -205, 119, 144, 120, 121, - -160, -144, -206, -211, -213, 106, 163, 146, 160, -243, - -149, -152, -149, -149, -149, -149, -149, -149, 222, -149, - 222, -149, -149, -149, -149, -149, -149, -309, -291, 90, - 179, -156, -155, 105, -404, -156, 575, 88, -218, 223, - -144, -144, -381, -118, 442, 443, 444, 445, 447, 448, - 449, 452, 453, 457, 458, 441, 459, 446, 451, 454, - 455, 456, 450, 343, -144, -130, -132, -130, -144, -220, - -221, 148, -215, -144, -414, -414, 96, 170, -126, 25, - 39, -126, -126, -126, -126, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -126, -144, -119, 441, 459, - 446, 451, 454, 455, 456, 450, 343, 460, 461, 462, - 463, 464, 465, 466, 467, 468, -119, -118, -144, -144, - -144, -144, -144, -144, -87, -144, 130, 131, 132, -207, - -144, -149, -144, -144, -144, -414, -144, -144, -144, -208, - -207, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -380, -379, -378, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -207, -207, -207, -207, -207, -144, -414, - -144, -162, -147, 96, -258, 105, 92, -144, -144, -144, - -144, -144, -144, -131, -130, -293, -298, -289, -290, -130, - -131, -131, -130, -130, -144, -144, -144, -144, -144, -144, - -144, -144, -414, -144, -144, -144, -144, -144, -250, -414, - -207, 88, -397, 416, 417, 687, -300, 276, -299, 26, - -208, 90, 15, -260, 78, -291, -232, -232, 64, 65, - 60, -130, -135, -414, -37, 26, -252, -291, 63, 90, - -327, -269, 371, 372, 179, -144, -144, 88, -231, 28, - 29, -188, -294, 170, -298, -188, -261, 276, -188, -166, - -168, -169, -170, -191, -214, -413, -171, -31, 597, 594, - 15, -181, -182, -190, -297, -267, -310, -266, 88, 415, - 417, 418, 77, 122, -144, -328, 178, -356, -355, -354, - -337, -339, -340, -341, 89, -328, -333, 377, 376, -322, - -322, -322, -322, -322, -327, -327, -327, -327, 87, 87, - -322, -322, -322, -322, -330, 87, -330, -330, -331, -330, - 87, -331, -332, 87, -332, -367, -144, -364, -363, -361, - -362, 250, 101, 669, 625, 578, 618, 659, 78, -359, - -231, 96, -414, -142, -283, 245, -365, -362, -381, -381, - -381, -283, 91, 90, 91, 90, 91, 90, -111, -60, - -1, 726, 727, 728, 88, 20, -338, -337, -59, 301, - -370, -371, 276, -366, -360, -346, 138, -345, -346, -346, - -381, 88, 30, 127, 127, 127, 127, 578, 229, 33, - -284, 617, 144, 669, 625, -337, -59, 243, 243, -309, - -309, -309, 90, 90, -279, 722, -181, -138, 293, 152, - 282, 282, 240, 295, 240, 295, -188, 306, 309, 307, - 308, 305, 310, 311, 24, 24, 24, 24, 24, 294, - 296, 298, 284, -188, -188, -282, 77, -183, -188, 27, - -297, 90, 90, -188, -280, -280, -188, -280, -280, -188, - -409, 324, -291, 358, 680, 681, 683, 682, -122, 416, - 88, 578, 23, -123, 23, -413, 119, 120, 121, -206, - -149, -152, -149, 143, 264, -149, -149, -413, -215, -414, - -293, 26, 88, 78, -414, 168, 88, 88, -414, -414, - 88, 15, -223, -221, 150, -144, -414, 88, -414, -414, - -207, -144, -144, -144, -144, -414, -414, -414, -414, -414, - -414, -414, -414, -414, -414, -207, -414, 88, 88, 15, - -313, 26, -414, -414, -414, -414, -414, -222, -414, 15, - -414, 78, 88, 163, 88, -414, -414, -414, 88, 88, - -414, -414, 88, -414, 88, -414, -414, -414, -414, -414, - -414, 88, -414, 88, -414, -414, -414, 88, -414, 88, - -414, -414, 88, -414, 88, -414, 88, -414, 88, -414, - 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, - 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, - 88, -414, 88, -414, 88, -414, 88, -414, -414, -414, - 88, -414, 88, -414, 88, -414, -414, 88, -414, 88, - -414, 88, -414, 88, 88, -414, 88, 88, 88, -414, - 88, 88, 88, 88, -414, -414, -414, -414, 88, 88, - 88, 88, 88, 88, 88, 88, 88, 88, -414, -414, - -414, -414, -414, -414, 88, -94, 602, -414, -414, 88, - -414, 88, 88, 88, 88, 88, -414, -413, 223, -414, - -414, -414, -414, -414, 88, 88, 88, 88, 88, 88, - -414, -414, -414, 88, 88, -414, 88, -414, 88, -414, - -396, 686, 417, -195, -194, -192, 75, 244, 76, -413, - -299, -414, -156, -258, -259, -258, -200, -291, 96, 105, - -234, -165, -167, 15, -135, -213, 89, 88, -327, -238, - -244, -277, -291, 90, 179, -329, 179, -329, 371, 372, - -230, 223, -196, 16, -199, 33, 58, -29, -413, -413, - 33, 88, -184, -186, -185, -187, 67, 71, 73, 68, - 69, 70, 74, -304, 26, -31, -166, -31, -413, -188, - -181, -415, 15, 78, -415, 88, 223, -268, -271, 419, - 416, 422, -381, 90, -110, 88, -354, -341, -235, -139, - 41, -334, 378, -327, 585, -327, -336, 90, -336, 96, - 96, 96, 89, -49, -44, -45, 34, 82, -361, -348, - 90, 40, -348, -348, -291, 89, -231, -138, -188, 144, - 77, -365, -365, -365, -297, -2, 725, 731, 138, 87, - 383, 19, -252, 88, 89, -216, 302, 89, -112, -291, - 89, 87, -346, -346, -291, -413, 240, 32, 32, 669, - 625, 617, -59, -216, -215, -381, -328, 724, 723, 89, - 242, 300, -143, 436, -140, 90, 91, -188, -188, -188, - -188, -188, -188, 232, 229, 406, -405, 312, -405, 285, - 243, -181, -188, 88, -84, 259, 254, -302, -302, 34, - -188, 416, 698, 696, -144, 143, 264, -160, -152, -118, - -118, -149, -311, 179, 344, 263, 342, 338, 358, 349, - 376, 340, 377, 335, 334, 333, -311, -309, -149, -207, - -132, -144, -144, 151, -144, 149, -144, -414, -414, -414, - -414, -414, -227, -144, -144, -144, -414, 179, 344, 15, - -144, -309, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -378, -144, -207, - -144, -207, -144, -144, -144, -144, -144, -379, -379, -379, - -379, -379, -207, -207, -207, -207, -144, -413, -291, -97, - -96, -95, 652, 244, -94, -162, -97, -162, 222, -144, - 222, 222, 222, -144, -131, -293, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -192, -342, -342, -342, - -262, 88, -273, 23, 15, 58, 58, -165, -196, -166, - -135, -291, -241, 679, -247, 47, -245, -246, 48, -242, - 49, 57, -329, -329, 170, -232, -144, -263, 77, -264, - -272, -215, -210, -212, -211, -413, -251, -414, -291, -262, - -264, -168, -169, -169, -168, -169, 67, 67, 67, 72, - 67, 72, 67, -185, -297, -414, -144, -300, 78, -166, - -166, -190, -297, 170, 416, 420, 421, -354, -403, 119, - 144, 32, 77, 374, 101, -401, 178, 614, 664, 669, - 625, 618, 659, -402, 246, 137, 138, 258, 26, 42, - 89, 88, 89, 88, 89, 89, 88, -285, -284, -45, - -44, -348, -348, 96, -381, 90, 90, 242, 27, -188, - 77, 77, 77, -113, 729, 96, 87, -3, 82, -144, - 87, 20, -337, -215, -372, -323, -373, -324, -325, -5, - -6, -349, -116, 58, 101, -63, 45, 241, 709, 710, - 127, -413, 722, -364, -252, -368, -370, -188, -148, -413, - -159, -146, -145, -147, -153, 168, 169, 263, 340, 341, - -216, -188, -137, 291, 299, 87, -141, 92, -384, 78, - 282, 374, 282, 374, 90, -406, 313, 90, -406, -188, - -84, -49, -188, -280, -280, 34, -381, -414, -160, -152, - -125, 163, 578, -314, 584, -322, -322, -322, -332, -322, - 330, -322, 330, -322, -414, -414, -414, 88, -414, 23, - -414, -144, 88, -121, 474, 88, 88, -414, 87, 87, - -144, -414, -414, -414, 88, -414, -414, -414, -414, -414, - -414, -414, -414, -414, -414, -414, -414, -414, 88, -414, - 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, - 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, - 88, -414, 88, -414, 88, -414, 88, -414, 88, -414, - -414, 88, -414, -414, -414, 88, -414, 88, -414, 88, - -414, -414, -414, 88, -312, 670, -414, -414, -414, -414, - -414, -414, -414, -414, -414, -414, -414, -93, -292, -291, - -94, 634, 634, -414, -94, -224, 88, -149, -414, -149, - -149, -149, -414, -414, -414, 88, -414, 88, 88, -414, - 88, -414, 88, -414, -414, -414, -414, 88, -193, 23, - -193, -193, -414, -258, -188, -196, -225, 17, -238, 52, - 350, -249, -248, 56, 48, -246, 20, 50, 20, 31, - -263, 88, 152, 88, -414, -414, 88, 58, 223, -414, - -196, -179, -178, 77, 78, -180, 77, -178, 67, 67, - -253, 88, -261, -166, -196, -196, 223, 119, -413, -148, - 13, 90, 90, -381, -400, 713, 714, 32, 96, -348, - -348, 138, 138, -188, 87, -327, 90, -327, 96, 96, - 32, 83, 84, 85, 32, 79, 80, 81, -188, -188, - -188, -188, -369, 87, 20, -144, 87, 152, 89, -252, - -252, 278, 163, -348, 707, 284, 284, -348, -348, -348, - -115, -114, 729, 89, -414, 88, -335, 578, 581, -144, - -154, -154, -253, 89, -377, 578, -383, -291, -291, -291, - -291, 96, 98, -414, 576, 74, 579, -414, -327, -144, - -144, -144, -232, 90, -144, -144, 96, 96, -414, -144, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -144, - -144, -144, -144, -144, -144, -144, -144, -144, -144, -207, - -144, -414, -176, -175, -177, 690, 119, 32, -311, -414, - -209, 276, -100, -99, -98, 15, -414, -144, -118, -118, - -118, -118, -144, -144, -144, -144, -144, -144, -413, 67, - 19, 17, -413, -413, -300, -225, -226, 18, 20, -239, - 54, -237, 53, -237, -248, 20, 20, 90, 20, 90, - 138, -272, -144, -212, 58, -29, -291, -210, -291, -227, - -144, 87, -144, -156, -196, -196, -144, -202, 498, 500, - 501, 502, 499, 504, 505, 506, 507, 508, 509, 510, - 511, 512, 513, 503, 514, 475, 476, 477, 108, 110, - 109, 478, 479, 480, 344, 526, 527, 521, 524, 525, - 523, 522, 359, 360, 481, 544, 545, 549, 548, 546, - 547, 550, 553, 554, 555, 556, 557, 558, 560, 559, - 551, 552, 529, 528, 530, 531, 532, 533, 534, 535, - 537, 536, 538, 539, 540, 541, 542, 543, 561, 562, - 563, 564, 565, 567, 566, 571, 570, 568, 569, 573, - 572, 482, 483, 111, 112, 113, 114, 115, 116, 117, - 484, 487, 485, 488, 489, 490, 495, 496, 491, 492, - 493, 494, 497, 370, 368, 369, 365, 364, 363, 423, - 428, 429, 431, 515, 516, 517, 518, 519, 520, 671, - 672, 673, 674, 675, 676, 677, 678, 90, 90, 87, - -144, 89, 89, -253, -368, -60, 89, -254, -252, 96, - 89, 279, -211, -413, 90, -348, -348, -348, 96, 96, - -299, -414, 88, -291, -402, -370, 582, 582, -414, 26, - -376, -375, -293, 87, 78, 63, 577, 580, -414, -414, - 88, -414, -414, -414, 89, 89, -414, -414, -414, -414, - -414, -414, -414, -414, -414, -414, -414, -414, -414, -414, - -414, -414, -414, -414, -414, -414, -414, -414, 88, -414, - -175, -177, -414, 77, -156, -227, 20, -97, 301, 303, - -97, -414, -414, -414, -414, -414, 88, -414, -414, 88, - -414, 88, -414, -414, -255, -414, -291, 246, 20, 20, - -255, -255, -195, -226, -107, -106, -105, 608, -144, -207, - -240, 55, 77, 122, 90, 90, 90, 13, -210, 223, - -232, -252, -173, 383, -227, -414, -252, 89, 26, 89, - 731, 138, 89, -211, -124, -413, 275, -299, 90, 90, - -114, -117, -29, 88, 152, -252, -188, 63, -144, -207, - -414, 77, 589, 690, -92, -91, -88, 701, 727, -207, - -94, -94, -144, -144, -144, 88, -414, -414, -414, -107, - 88, -104, -103, -291, 77, 122, -264, -291, 89, -414, - -413, -232, 89, -236, -29, 87, -3, 275, -323, -373, - -324, -325, -5, -6, -349, -82, 578, -375, -353, -297, - -293, 90, 96, 89, 578, -414, -414, -90, 146, 699, - 667, -154, 222, -414, 88, -414, 88, -414, 88, -291, - 246, -105, 88, 26, -300, -174, -172, -291, 631, -393, - -392, 574, -403, -399, 119, 144, 101, -401, 669, 625, - 128, 129, -82, -144, 87, -414, -83, 290, 686, 223, - -384, 579, -90, 700, 645, 620, 645, 620, -149, -144, - -144, -144, -103, -413, -414, 88, 23, -315, -62, 642, - -390, -391, 77, -394, 389, 641, 662, 119, 90, 89, - -252, 251, -298, -377, 580, 143, -118, -414, 88, -414, - 88, -414, -93, -172, 638, -328, -156, -391, 77, -390, - 77, 14, 13, -4, 730, 89, 292, -90, 645, 620, - -144, -144, -414, -61, 27, -173, -389, 259, 254, 257, - 33, -389, 96, -4, -414, -414, 642, 253, 32, 119, - -156, -176, -175, -175, + -8, -32, -31, -30, 11, 12, -109, -35, 34, -40, + -50, 229, -51, -41, 230, -52, 232, 231, 269, 233, + 382, 262, 76, 318, 319, 321, 322, 323, 324, -110, + 689, 267, 268, 235, 38, 47, 35, 36, 39, 239, + 275, 276, 238, 135, -33, -36, 10, -417, 13, 472, + 264, 263, 30, -34, 582, 88, -80, -416, 737, -252, + -236, 24, 35, 31, -235, -231, -128, -236, 22, 20, + 9, -79, -79, -79, 14, 15, -79, -354, -356, 88, + 162, 88, -79, -57, -56, -54, -53, -55, -58, 33, + -47, -48, -378, -46, -43, 234, 231, 279, 125, 126, + 269, 270, 271, 233, 253, 268, 272, 267, 288, -42, + 83, 35, 582, 585, -361, 230, 236, 237, 232, 473, + 128, 127, 77, -358, 377, 616, 707, -58, 709, 102, + 105, 708, 46, 243, 710, 711, 712, 623, 713, 252, + 714, 715, 716, 717, 723, 664, 724, 725, 726, 129, + 9, -79, -303, -299, 92, -292, 579, 255, 614, 426, + 615, 304, 83, 43, 518, 589, 374, 377, 616, 503, + 707, 383, 318, 334, 328, 508, 509, 510, 357, 349, + 580, 617, 590, 307, 256, 292, 701, 347, 138, 709, + 311, 618, 270, 384, 385, 619, 386, 102, 321, 423, + 722, 310, 620, 720, 105, 708, 326, 81, 502, 53, + 704, 46, 265, 431, 432, 345, 238, 341, 710, 293, + 621, 592, 286, 128, 125, 729, 38, 337, 52, 32, + 719, 127, 51, 711, 153, 622, 712, 623, 388, 364, + 695, 50, 389, 271, 624, 86, 276, 584, 315, 703, + 390, 523, 338, 391, 303, 718, 235, 625, 314, 684, + 676, 677, 392, 393, 696, 369, 365, 370, 525, 626, + 415, 507, 394, 680, 681, 736, 54, 627, 628, 697, + 126, 629, 80, 713, 82, 332, 333, 630, 301, 254, + 528, 529, 417, 361, 485, 492, 493, 112, 113, 488, + 114, 494, 115, 495, 496, 497, 486, 116, 109, 487, + 498, 499, 362, 363, 117, 500, 111, 110, 489, 491, + 118, 501, 252, 37, 395, 581, 305, 60, 309, 280, + 418, 48, 367, 733, 47, 691, 530, 631, 694, 360, + 356, 482, 55, 632, 633, 634, 635, 504, 714, 359, + 331, 355, 728, 4, 298, 477, 505, 715, 64, 237, + 372, 371, 373, 287, 414, 352, 636, 637, 638, 259, + 84, 639, 342, 23, 640, 641, 396, 294, 642, 58, + 643, 644, 421, 268, 645, 56, 716, 41, 646, 273, + 730, 717, 647, 648, 649, 690, 650, 275, 651, 398, + 652, 678, 679, 397, 366, 368, 531, 282, 399, 382, + 240, 583, 653, 316, 336, 272, 721, 654, 260, 519, + 520, 521, 522, 702, 527, 526, 274, 279, 267, 422, + 261, 655, 656, 657, 658, 659, 308, 675, 660, 661, + 322, 587, 723, 483, 45, 662, 663, 664, 665, 666, + 302, 297, 416, 425, 63, 85, 379, 667, 668, 700, + 330, 327, 295, 463, 465, 466, 467, 468, 469, 464, + 471, 669, 319, 57, 724, 725, 726, 289, 727, 511, + 512, 513, 514, 11, 565, 548, 576, 549, 566, 550, + 559, 551, 567, 575, 577, 532, 540, 533, 541, 571, + 554, 568, 560, 553, 552, 574, 557, 561, 534, 542, + 572, 558, 535, 543, 536, 544, 537, 545, 570, 569, + 562, 573, 538, 546, 564, 539, 547, 563, 555, 556, + 434, 734, 735, 506, 401, 129, 299, 300, 49, 353, + 281, 670, 312, 671, 343, 344, 479, 480, 358, 329, + 354, 687, 320, 685, 283, 402, 484, 269, 672, 424, + 296, 375, 121, 380, 313, 588, 524, 288, 403, 699, + 586, 515, 516, 351, 348, 290, 517, 673, 689, 404, + 244, 284, 285, 674, 686, 405, 406, 306, 407, 408, + 409, 410, 411, 413, 317, 412, 688, 682, 683, 291, + 462, 585, 325, 346, 381, 444, 445, 446, 447, 448, + 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, + 459, 460, 461, 481, 242, -79, 242, -190, -299, -130, + 691, 693, 181, -271, 385, -289, 387, 400, 395, 405, + 393, -280, 396, 398, 282, -401, 415, 242, 402, 229, + 388, 397, 406, 407, 306, 413, 408, 317, 412, 291, + 409, 410, 411, -385, 181, 712, 727, 138, 350, 392, + 390, 416, 691, 92, -305, 92, 93, 94, -292, 320, + -308, 325, -293, -385, -292, 323, -79, -79, -310, -310, + -130, -209, -145, 146, -159, -260, -162, 93, -150, -153, + -203, -204, -205, -206, -160, -219, -258, 170, 171, 178, + 147, -215, -163, 28, 578, 474, 473, 181, 33, 224, + 70, 71, 476, 477, 149, 59, 13, 439, 440, -161, + 429, 430, 441, 435, 436, 502, 504, 505, 506, 503, + 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, + 507, 518, 479, 480, 119, 481, 109, 111, 110, 482, + 483, 484, 347, 530, 531, 525, 528, 529, 527, 526, + 362, 363, 485, 548, 549, 553, 552, 550, 551, 554, + 557, 558, 559, 560, 561, 562, 564, 563, 555, 556, + 533, 532, 534, 535, 536, 537, 538, 539, 541, 540, + 542, 543, 544, 545, 546, 547, 565, 566, 567, 568, + 569, 571, 570, 575, 574, 572, 573, 577, 576, 486, + 487, 112, 113, 114, 115, 116, 117, 118, 488, 491, + 489, 490, 492, 493, 494, 499, 500, 495, 496, 497, + 498, 501, 373, 371, 372, 368, 367, 366, -89, -102, + 605, 604, -103, 426, 431, 432, 434, -151, -152, -165, + -166, -293, -299, 247, 428, 241, 176, 472, -154, -148, + -217, 108, 94, -31, -213, 427, 437, 438, 442, 433, + 443, 591, 593, 608, 609, 611, 596, 601, 600, 603, + 519, 520, 521, 522, 523, 524, 676, 677, 678, 679, + 680, 681, 682, 683, -385, -292, 92, -157, -155, -199, + 95, 100, 103, 104, 106, -407, 265, 343, 344, 120, + -417, 705, -156, 97, 98, 99, 122, 123, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, + 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 91, 96, 46, 401, 401, -190, -79, -79, -79, -79, + -414, 708, 583, -229, -128, -231, -33, -31, -417, 10, + -79, -31, -32, -30, -36, -38, 610, -37, -299, 101, + -236, -252, 14, 63, 165, 44, 52, -234, -235, -34, + -31, -145, 21, 25, 26, -132, 172, -145, -299, -132, + -278, 246, -79, -79, -267, -313, 320, -269, 416, 691, + 415, -259, -272, 92, -258, -271, 414, 93, -355, 162, + -341, -345, -293, 257, -371, 253, -190, -364, -363, -293, + -417, -129, -288, 243, 251, 250, 139, -388, 142, 299, + 428, 241, -53, -54, -55, -271, 180, 711, -111, 274, + 278, 89, 89, -345, -344, -343, -389, 278, 257, -370, + -362, 249, 258, -351, 250, 251, -346, 243, 140, -389, + -346, 248, 258, 253, 257, 278, 278, 129, 278, 129, + 278, 278, 278, 278, 278, 278, 278, 278, 278, 273, + -352, 154, -352, 586, 586, -358, -389, 253, 243, -389, + -389, 249, -290, -346, 245, 27, 245, 37, 37, -352, + -352, -352, -271, 180, -352, -352, -352, -352, 286, 286, + -352, -352, -352, -352, -352, -352, -352, -352, -352, -352, + -352, -352, -352, -352, -352, -352, -352, 242, -388, -137, + 412, 306, 83, -56, 288, -39, -190, -288, 243, 244, + -388, 275, -190, 225, 242, 694, -282, 162, 17, -282, + -279, 401, 399, 386, 391, -282, -282, -282, -282, 289, + 384, -347, 243, 37, 254, 401, 289, 384, 289, 290, + 289, 290, 394, 404, 289, -304, 16, 165, 428, 389, + 393, 282, 242, 283, 244, 403, 290, -304, 91, -283, + 162, 289, 401, 395, 285, -282, -282, -311, -417, -295, + -293, -291, 234, 25, 145, 27, 29, 148, 181, 132, + 21, 149, 39, 236, 350, 253, 180, 249, 473, 229, + 74, 591, 429, 436, 427, 435, 439, 475, 476, 428, + 387, 33, 15, 593, 30, 263, 26, 40, 174, 231, + 152, 594, 266, 28, 264, 119, 123, 596, 24, 77, + 258, 16, 251, 42, 18, 597, 598, 19, 247, 246, + 165, 243, 72, 13, 224, 31, 161, 68, 599, 140, + 135, 600, 601, 602, 603, 133, 70, 162, 22, 731, + 437, 438, 35, 692, 578, 277, 176, 75, 61, 693, + 146, 433, 604, 605, 120, 606, 124, 78, 698, 142, + 20, 73, 44, 607, 278, 608, 248, 732, 609, 419, + 610, 163, 232, 472, 71, 164, 705, 611, 706, 241, + 400, 10, 478, 34, 262, 250, 131, 69, 443, 612, + 242, 151, 245, 134, 122, 9, 139, 36, 14, 76, + 79, 440, 441, 442, 59, 130, 582, 150, 17, 613, + 420, 144, -385, 694, -311, -311, 34, 93, -411, -412, + -413, 582, 419, 245, -293, -190, -85, 684, 233, -86, + 690, 25, 240, -135, 401, -123, 181, 712, 695, 696, + 697, 694, 398, 702, 700, 698, 289, 699, 89, 142, + 144, 145, 4, -145, 161, -200, 154, 155, 156, 157, + 158, 159, 160, 166, 165, 146, 148, 162, -245, 143, + 167, 168, 169, 170, 171, 172, 173, 175, 174, 176, + 177, 163, 164, 180, 227, 228, -153, -153, -153, -153, + -215, -221, -220, -417, -217, -385, -292, -299, -417, -417, + -153, -277, -417, -150, -417, -417, -417, -417, -417, -224, + -145, -417, -417, -421, -417, -421, -421, -421, -330, -417, + -330, -330, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, -417, -417, -417, -417, 225, -417, -417, -417, -417, + -417, -330, -330, -330, -330, -330, -330, -417, -417, -417, + -417, -417, -417, -417, -417, -417, -417, -417, -417, -417, + -417, 91, 104, 100, 103, 95, -219, 106, 91, 91, + 91, 91, -31, -32, -209, -417, -310, -398, -399, -193, + -190, -417, 306, -293, -293, 275, 97, -234, -34, -31, + -229, -235, -231, -31, -79, -121, -134, 65, 66, -133, + -136, 26, 40, 69, 67, 25, -418, 90, -418, -252, + -418, 89, -38, -255, 88, 638, 668, 638, 668, 63, + 45, 91, 91, 89, 23, -230, -232, -145, 16, -297, + 4, -296, 27, -293, 91, 225, 16, -191, 31, -190, + -278, -278, 89, 92, 320, -268, -270, 417, 419, 154, + -298, -293, 91, 33, 90, 89, -190, -319, -322, -324, + -323, -325, -320, -321, 347, 348, 181, 351, 353, 354, + 355, 356, 357, 358, 359, 360, 361, 364, 34, 265, + 343, 344, 345, 346, 365, 366, 367, 368, 370, 371, + 372, 373, 328, 349, 580, 329, 330, 331, 332, 333, + 334, 336, 337, 340, 338, 339, 341, 342, -294, -293, + 88, 90, 89, -329, 88, -145, -137, 242, -293, 243, + 243, 243, -79, 472, -352, -352, -352, 273, 21, -46, + -43, -378, 20, -42, -43, 234, 125, 126, 231, 88, + -341, 88, -350, -294, -293, 88, 140, 248, 139, -349, + -346, -349, -350, -293, -217, -293, 140, 140, -293, -293, + -264, -293, -264, -264, 25, -264, 25, -264, 25, 97, + -293, -264, 25, -264, 25, -264, 25, -264, 25, -264, + 25, 33, 80, 81, 82, 33, 84, 85, 86, -217, + -293, -293, -217, -341, -217, -190, -293, -271, 97, 97, + 97, -352, -352, 97, 91, 91, 91, -352, -352, 97, + 91, -301, -299, 91, 91, -390, 259, 303, 305, 97, + 97, 97, 97, 33, 91, -391, 33, 719, 718, 720, + 721, 722, 91, 97, 33, 97, 33, 97, -293, 88, + -190, -143, 293, 229, 231, 234, 78, 91, 309, 310, + 307, 312, 313, 314, 154, 46, 89, 245, 242, -293, + -284, 247, -284, -293, -300, -299, -291, -190, 245, 383, + 91, -145, -348, 16, 165, -304, -304, -282, -190, -348, + -304, -282, -190, -282, -282, -282, -282, -304, -304, -304, + -282, -299, -299, -190, -190, -190, -190, -190, -190, -190, + -311, -283, -282, 694, 91, -276, 16, 78, -311, -311, + 89, 326, 420, 421, -309, 323, -81, -293, 91, -10, + -29, -18, -17, -19, 154, -10, 89, 582, -183, -190, + 694, 694, 694, 694, 694, 694, -145, -145, -145, -145, + 606, -207, -409, 146, 122, 123, 120, 121, -162, -145, + -208, -213, -215, 107, 165, 148, 162, -245, -150, -153, + -150, -150, -150, -150, -150, -150, 224, -150, 224, -150, + -150, -150, -150, -150, -150, -312, -293, 91, 181, -158, + -157, 106, -407, -158, 579, 89, -220, 225, -145, -145, + -385, -119, 445, 446, 447, 448, 450, 451, 452, 455, + 456, 460, 461, 444, 462, 449, 454, 457, 458, 459, + 453, 346, -145, -210, -209, -210, -145, -145, -222, -223, + 150, -217, -145, -418, -418, 97, 172, -127, 26, 40, + -127, -127, -127, -127, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -127, -145, -120, 444, 462, 449, + 454, 457, 458, 459, 453, 346, 463, 464, 465, 466, + 467, 468, 469, 470, 471, -120, -119, -145, -145, -145, + -145, -145, -145, -87, -145, 132, 133, 134, -209, -145, + -150, -145, -145, -145, -418, -145, -145, -145, -210, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -384, -383, -382, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -209, -209, -209, -209, -209, -145, -418, -145, -164, + -148, 97, -260, 106, 93, -145, -145, -145, -145, -145, + -145, -210, -295, -300, -291, -292, -209, -210, -210, -209, + -209, -145, -145, -145, -145, -145, -145, -145, -145, -418, + -145, -145, -145, -145, -145, -252, -418, -209, 89, -400, + 419, 420, 692, -302, 278, -301, 27, -210, 91, 16, + -262, 79, -293, -234, -234, 65, 66, 61, -131, -132, + -136, -418, -37, 27, -254, -293, 631, 631, 64, 91, + -331, -271, 374, 375, 181, -145, -145, 89, -233, 29, + 30, -190, -296, 172, -300, -190, -263, 278, -190, -168, + -170, -171, -172, -193, -216, -417, -173, -31, 602, 599, + 16, -183, -184, -192, -299, -269, -313, -268, 89, 418, + 420, 421, 78, 124, -145, -332, 180, -360, -359, -358, + -341, -343, -344, -345, 90, -332, -337, 380, 379, -329, + -329, -329, -329, -329, -331, -331, -331, -331, 88, 88, + -329, -329, -329, -329, -334, 88, -334, -334, -335, -334, + 88, -335, -336, 88, -336, -371, -145, -368, -367, -365, + -366, 252, 102, 674, 630, 582, 623, 664, 79, -363, + -233, 97, -418, -143, -285, 247, -369, -366, -293, -293, + -293, -285, 92, 91, 92, 91, 92, 91, -112, -60, + -1, 731, 732, 733, 89, 21, -342, -341, -59, 303, + -374, -375, 278, -370, -364, -350, 140, -349, -350, -350, + -293, 89, 31, 129, 129, 129, 129, 582, 231, 34, + -286, 622, 146, 674, 630, -341, -59, 245, 245, -312, + -312, -312, 91, 91, -281, 727, -183, -139, 295, 154, + 284, 284, 242, 297, 242, 297, -190, 308, 311, 309, + 310, 307, 312, 313, 314, 25, 25, 25, 25, 25, + 25, 296, 298, 300, 286, -190, -190, -284, 78, -185, + -190, 28, -299, 91, 91, -190, -282, -282, -190, -282, + -282, -190, -413, 327, -293, 361, 685, 687, -123, 419, + 89, 582, 24, -124, 24, -417, -409, 122, 123, -208, + -150, -153, -150, 145, 266, -150, -150, -417, -217, -418, + -295, 27, 89, 79, -418, 170, 89, -418, -418, 89, + 16, 89, -225, -223, 152, -145, -418, 89, -418, -418, + -209, -145, -145, -145, -145, -418, -418, -418, -418, -418, + -418, -418, -418, -418, -418, -209, -418, 89, 89, 16, + -316, 27, -418, -418, -418, -418, -418, -224, -418, 16, + -418, 79, 89, 165, 89, -418, -418, -418, 89, 89, + -418, -418, 89, -418, 89, -418, -418, -418, -418, -418, + -418, 89, -418, 89, -418, -418, -418, 89, -418, 89, + -418, -418, 89, -418, 89, -418, 89, -418, 89, -418, + 89, -418, 89, -418, 89, -418, 89, -418, 89, -418, + 89, -418, 89, -418, 89, -418, 89, -418, 89, -418, + 89, -418, 89, -418, 89, -418, 89, -418, -418, -418, + 89, -418, 89, -418, 89, -418, -418, 89, -418, 89, + -418, 89, -418, 89, 89, -418, 89, 89, 89, -418, + 89, 89, 89, 89, -418, -418, -418, -418, 89, 89, + 89, 89, 89, 89, 89, 89, 89, 89, -418, -418, + -418, -418, -418, -418, 89, -94, 607, -418, -418, 89, + -418, 89, 89, 89, 89, 89, -418, -417, 225, -418, + -418, -418, -418, -418, 89, 89, 89, 89, 89, 89, + -418, -418, -418, 89, 89, -418, 89, -418, 89, -418, + -399, 691, 420, -197, -196, -194, 76, 246, 77, -417, + -301, -418, -158, -260, -261, -260, -202, -293, 97, 106, + -236, -167, 89, -169, 16, -215, 90, 89, -331, -240, + -246, -279, -293, 91, 181, -333, 181, -333, 374, 375, + -232, 225, -198, 17, -201, 34, 59, -29, -417, -417, + 34, 89, -186, -188, -187, -189, 68, 72, 74, 69, + 70, 71, 75, -307, 27, -31, -168, -31, -417, -190, + -183, -419, 16, 79, -419, 89, 225, -270, -273, 422, + 419, 425, -385, 91, -111, 89, -358, -345, -237, -140, + 42, -338, 381, -331, 590, -331, -340, 91, -340, 97, + 97, 97, 90, -49, -44, -45, 35, 83, -365, -352, + 91, 41, -352, -352, -293, 90, -233, -139, -190, 146, + 78, -369, -369, -369, -299, -2, 730, 736, 140, 88, + 386, 20, -254, 89, 90, -218, 304, 90, -113, -293, + 90, 88, -350, -350, -293, -417, 242, 33, 33, 674, + 630, 622, -59, -218, -217, -293, -332, 729, 728, 90, + 244, 302, -144, 439, -141, 91, 92, -190, -190, -190, + -190, -190, -190, 234, 231, 409, -408, 315, -408, 287, + 245, -183, -190, 89, -84, 261, 256, -304, -304, 35, + -190, 419, 703, 701, -145, 145, 266, -162, -153, -119, + -119, -150, -314, 181, 347, 265, 345, 341, 361, 352, + 379, 343, 380, 338, 337, 336, -314, -312, -150, -209, + -145, -145, -145, 153, -145, 151, -145, -95, -94, -418, + -418, -418, -418, -418, -95, -95, -95, -95, -95, -95, + -95, -95, -95, -95, -229, -145, -145, -145, -418, 181, + 347, 16, -145, -312, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -382, + -145, -209, -145, -209, -145, -145, -145, -145, -145, -383, + -383, -383, -383, -383, -209, -209, -209, -209, -145, -417, + -293, -98, -97, -96, 657, 246, -94, -164, -98, -164, + 224, -145, 224, 224, 224, -145, -210, -295, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -194, -346, + 284, -346, 284, -346, -264, 89, -275, 24, 16, 59, + 59, -167, -198, -132, -168, -293, -243, 684, -249, 48, + -247, -248, 49, -244, 50, 58, -333, -333, 172, -234, + -145, -265, 78, -266, -274, -217, -212, -214, -213, -417, + -253, -418, -293, -264, -266, -170, -171, -171, -170, -171, + 68, 68, 68, 73, 68, 73, 68, -187, -299, -418, + -145, -302, 79, -168, -168, -192, -299, 172, 419, 423, + 424, -358, -406, 120, 146, 33, 78, 377, 102, -404, + 180, 619, 669, 674, 630, 623, 664, -405, 248, 139, + 140, 260, 27, 43, 90, 89, 90, 89, 90, 90, + 89, -287, -286, -45, -44, -352, -352, 97, -385, 91, + 91, 244, 28, -190, 78, 78, 78, -114, 734, 97, + 88, -3, 83, -145, 88, 21, -341, -217, -376, -326, + -377, -327, -328, -5, -6, -353, -117, 59, 102, -63, + 46, 243, 714, 715, 129, -417, 727, -368, -254, -372, + -374, -190, -149, -417, -161, -147, -146, -148, -154, 170, + 171, 265, 343, 344, -218, -190, -138, 293, 301, 88, + -142, 93, -387, 79, 284, 377, 284, 377, 91, -410, + 316, 91, -410, -190, -84, -49, -190, -282, -282, 35, + -385, -418, -162, -153, -126, 165, 582, -317, 589, -329, + -329, -329, -336, -329, 333, -329, 333, -329, -418, -418, + -418, 89, -418, 24, -418, 89, -145, 89, -95, -95, + -95, -95, -95, -122, 478, 89, 89, -418, 88, 88, + -145, -418, -418, -418, 89, -418, -418, -418, -418, -418, + -418, -418, -418, -418, -418, -418, -418, -418, 89, -418, + 89, -418, 89, -418, 89, -418, 89, -418, 89, -418, + 89, -418, 89, -418, 89, -418, 89, -418, 89, -418, + 89, -418, 89, -418, 89, -418, 89, -418, 89, -418, + -418, 89, -418, -418, -418, 89, -418, 89, -418, 89, + -418, -418, -418, 89, -315, 675, -418, -418, -418, -418, + -418, -418, -418, -418, -418, -418, -418, -93, -294, -94, + 639, 639, -418, -94, -226, 89, -150, -418, -150, -150, + -150, -418, -418, -418, 89, -418, 89, 89, -418, 89, + -418, 89, -418, -418, -418, -418, 89, -195, 24, -417, + -195, -417, -195, -418, -260, -190, -198, -227, 18, -240, + 53, 353, -251, -250, 57, 49, -248, 21, 51, 21, + 32, -265, 89, 154, -306, 89, 27, -418, -418, 89, + 59, 225, -418, -198, -181, -180, 78, 79, -182, 78, + -180, 68, 68, -255, 89, -263, -168, -198, -198, 225, + 120, -417, -149, 14, 91, 91, -385, -403, 718, 719, + 33, 97, -352, -352, 140, 140, -190, 88, -331, 91, + -331, 97, 97, 33, 84, 85, 86, 33, 80, 81, + 82, -190, -190, -190, -190, -373, 88, 21, -145, 88, + 154, 90, -254, -254, 280, 165, -352, 712, 286, 286, + -352, -352, -352, -116, -115, 734, 90, -418, 89, -339, + 582, 585, -145, -155, -155, -255, 90, -381, 582, -386, + -293, -293, -293, -293, 97, 99, -418, 580, 75, 583, + -418, -331, -145, -145, -145, -145, -234, 91, -145, -145, + 97, 97, -418, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -145, -145, -145, -145, -145, -145, -145, + -145, -145, -145, -209, -145, -418, -178, -177, -179, 695, + 120, 33, -314, -418, -211, 278, -101, -100, -99, 16, + -418, -145, -119, -119, -119, -119, -145, -145, -145, -145, + -145, -145, -417, 68, 20, 18, -257, -293, 248, -417, + -257, -417, -302, -227, -228, 19, 21, -241, 55, -239, + 54, -239, -250, 21, 21, 91, 21, 91, 140, -274, + -145, -214, -301, 59, -29, -293, -212, -293, -229, -145, + 88, -145, -158, -198, -198, -145, -204, 502, 504, 505, + 506, 503, 508, 509, 510, 511, 512, 513, 514, 515, + 516, 517, 507, 518, 479, 480, 481, 109, 111, 110, + 482, 483, 484, 347, 530, 531, 525, 528, 529, 527, + 526, 362, 363, 485, 548, 549, 553, 552, 550, 551, + 554, 557, 558, 559, 560, 561, 562, 564, 563, 555, + 556, 533, 532, 534, 535, 536, 537, 538, 539, 541, + 540, 542, 543, 544, 545, 546, 547, 565, 566, 567, + 568, 569, 571, 570, 575, 574, 572, 573, 577, 576, + 486, 487, 112, 113, 114, 115, 116, 117, 118, 488, + 491, 489, 492, 493, 494, 499, 500, 495, 496, 497, + 498, 501, 373, 371, 372, 368, 367, 366, 426, 431, + 432, 434, 519, 520, 521, 522, 523, 524, 676, 677, + 678, 679, 680, 681, 682, 683, 91, 91, 88, -145, + 90, 90, -255, -372, -60, 90, -256, -254, 97, 90, + 281, -213, -417, 91, -352, -352, -352, 97, 97, -301, + -418, 89, -293, -405, -374, 586, 586, -418, 27, -380, + -379, -295, 88, 79, 64, 581, 584, -418, -418, -418, + 89, -418, -418, -418, 90, 90, -418, -418, -418, -418, + -418, -418, -418, -418, -418, -418, -418, -418, -418, -418, + -418, -418, -418, -418, -418, -418, -418, -418, 89, -418, + -177, -179, -418, 78, -158, -229, 21, -98, 303, 305, + -98, -418, -418, -418, -418, -418, 89, -418, -418, 89, + -418, 89, -418, -418, -257, -418, 21, 21, 89, -418, + -257, -418, -257, -197, -228, -108, -107, -106, 613, -145, + -209, -242, 56, 78, 124, 91, 91, 91, 14, -417, + -212, 225, -306, -234, -254, -175, 386, -229, -418, -254, + 90, 27, 90, 736, 140, 90, -213, -125, -417, 277, + -301, 91, 91, -115, -118, -29, 89, 154, -254, -190, + 64, -145, -209, -418, 78, 594, 695, -92, -91, -88, + 706, 732, -209, -94, -94, -145, -145, -145, -418, -293, + 248, -418, -418, -108, 89, -105, -104, -293, -318, 582, + 78, 124, -266, -254, -306, -293, 90, -418, -417, -234, + 90, -238, -29, 88, -3, 277, -326, -377, -327, -328, + -5, -6, -353, -82, 582, -379, -357, -299, -295, 91, + 97, 90, 582, -418, -418, -90, 148, 704, 672, -155, + 224, -418, 89, -418, 89, -418, 89, -106, 89, 27, + 587, -418, -302, -176, -174, -293, 636, -396, -395, 578, + -406, -402, 120, 146, 102, -404, 674, 630, 130, 131, + -82, -145, 88, -418, -83, 292, 691, 225, -387, 583, + -90, 705, 650, 625, 650, 625, -150, -145, -145, -145, + -104, -417, -418, 89, 24, -319, -62, 647, -393, -394, + 78, -397, 392, 646, 667, 120, 91, 90, -254, 253, + -300, -381, 584, 145, -119, -418, 89, -418, 89, -418, + -93, -174, 643, -332, -158, -394, 78, -393, 78, 15, + 14, -4, 735, 90, 294, -90, 650, 625, -145, -145, + -418, -61, 28, -175, -392, 261, 256, 259, 34, -392, + 97, -4, -418, -418, 647, 255, 33, 120, -158, -178, + -177, -177, } var yyDef = [...]int{ - 879, -2, -2, 881, 2, 4, 5, 6, 7, 8, + 880, -2, -2, 882, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, - 39, 72, 74, 75, 879, 879, 879, 0, 879, 0, - 0, 879, -2, -2, 879, 1610, 0, 879, 0, 874, - 0, -2, 794, 800, 0, 809, -2, 0, 0, 879, - 879, 2234, 2234, 874, 0, 0, 0, 0, 0, 879, - 879, 879, 879, 1615, 1476, 52, 879, 0, 87, 88, - 829, 830, 831, 67, 0, 2232, 880, 1, 3, 73, - 77, 0, 0, 0, 60, 1485, 0, 80, 0, 0, - 883, 0, 0, 1593, 879, 879, 0, 128, 129, 0, - 0, 0, -2, 132, -2, 161, 162, 163, 0, 168, - 605, 526, 578, 524, 563, -2, 512, 0, 0, 0, + 39, 70, 72, 73, 880, 880, 880, 0, 880, 0, + 0, 880, -2, -2, 880, 1625, 0, 880, 0, 875, + 0, -2, 797, 803, 0, 812, -2, 0, 0, 880, + 880, 2253, 2253, 875, 0, 0, 0, 0, 0, 880, + 880, 880, 880, 1630, 1483, 50, 880, 0, 85, 86, + 830, 831, 832, 65, 0, 2251, 881, 1, 3, 71, + 75, 0, 0, 0, 58, 1492, 0, 78, 0, 0, + 884, 0, 0, 1608, 880, 880, 0, 126, 127, 0, + 0, 0, -2, 130, -2, 159, 160, 161, 0, 166, + 607, 526, 578, 524, 563, -2, 512, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 529, - 401, 401, 0, 0, -2, 512, 512, 512, 1595, 0, + 401, 401, 0, 0, -2, 512, 512, 512, 1610, 0, 0, 0, 560, 463, 401, 401, 401, 0, 401, 401, 401, 401, 0, 0, 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, 401, - 401, 1503, 167, 1611, 1608, 1609, 1768, 1769, 1770, 1771, - 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1781, - 1782, 1783, 1784, 1785, 1786, 1787, 1788, 1789, 1790, 1791, - 1792, 1793, 1794, 1795, 1796, 1797, 1798, 1799, 1800, 1801, - 1802, 1803, 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1820, 1821, - 1822, 1823, 1824, 1825, 1826, 1827, 1828, 1829, 1830, 1831, - 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, - 1842, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1851, - 1852, 1853, 1854, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, - 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1879, 1880, 1881, - 1882, 1883, 1884, 1885, 1886, 1887, 1888, 1889, 1890, 1891, - 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1900, 1901, - 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, - 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, - 1922, 1923, 1924, 1925, 1926, 1927, 1928, 1929, 1930, 1931, - 1932, 1933, 1934, 1935, 1936, 1937, 1938, 1939, 1940, 1941, - 1942, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1951, - 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, - 1962, 1963, 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, - 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, - 1982, 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, 1991, - 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, - 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, - 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, - 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, - 2032, 2033, 2034, 2035, 2036, 2037, 2038, 2039, 2040, 2041, - 2042, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2050, 2051, - 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, - 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, - 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, - 2082, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, - 2092, 2093, 2094, 2095, 2096, 2097, 2098, 2099, 2100, 2101, - 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, - 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, - 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, - 2132, 2133, 2134, 2135, 2136, 2137, 2138, 2139, 2140, 2141, - 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, - 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, - 2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, - 2172, 2173, 2174, 2175, 2176, 2177, 2178, 2179, 2180, 2181, - 2182, 2183, 2184, 2185, 2186, 2187, 2188, 2189, 2190, 2191, - 2192, 2193, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, - 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, - 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2219, 2220, 2221, - 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2229, 2230, 2231, - 0, 1587, 0, 718, 982, 0, 875, 876, 0, 783, - 783, 0, 783, 783, 783, 783, 0, 0, 0, 732, - 0, 0, 0, 0, 780, 0, 748, 749, 0, 780, - 0, 755, 786, 0, 0, 761, 783, 783, 764, 2235, - 0, 2235, 2235, 1578, 0, 777, 775, 789, 790, 42, - 793, 796, 797, 798, 799, 802, 0, 813, 816, 1604, - 1605, 0, 818, 825, 842, 843, 0, 47, 1132, 0, - 1004, 0, 1010, -2, 1021, 1038, 1039, 1040, 1041, 1042, - 1044, 1045, 1046, 0, 0, 0, 0, 1051, 1052, 0, - 0, 0, 0, 0, 1113, 0, 0, 0, 0, 1449, - 0, 0, 1411, 1411, 1147, 1411, 1411, 1413, 1413, 1413, - 1820, 1958, 1966, 2142, 1781, 1787, 1788, 1789, 2088, 2089, - 2090, 2091, 2179, 2180, 2184, 1882, 1776, 2155, 2156, 0, - 2231, 1919, 1927, 1928, 1952, 2052, 2165, 1799, 1947, 2016, - 1879, 1901, 1902, 2034, 2035, 1923, 1924, 1905, 2094, 2096, - 2112, 2113, 2098, 2100, 2109, 2115, 2120, 2099, 2111, 2116, - 2129, 2133, 2136, 2137, 2138, 2106, 2104, 2117, 2121, 2123, - 2125, 2131, 2134, 2107, 2105, 2118, 2122, 2124, 2126, 2132, - 2135, 2093, 2097, 2101, 2110, 2128, 2108, 2127, 2102, 2114, - 2119, 2130, 2103, 2095, 1917, 1920, 1908, 1909, 1911, 1913, - 1918, 1925, 1931, 1910, 1930, 1929, 0, 1906, 1907, 1912, - 1922, 1926, 1914, 1915, 1916, 1921, 1932, 1972, 1971, 1970, - 2015, 1943, 2014, 0, 0, 0, 0, 0, 1771, 1825, - 1826, 2139, 1333, 1334, 1335, 1336, 0, 0, 0, 0, - 0, 0, 0, 293, 294, 1462, 1463, 46, 1131, 1574, - 1413, 1413, 1413, 1413, 1413, 1413, 1073, 1074, 1075, 1076, - 1077, 1101, 1102, 1108, 1109, 2029, 2030, 2031, 2032, 1863, - 2174, 1871, 1872, 2011, 2012, 1884, 1885, 2205, 2206, -2, - -2, -2, 234, 235, 236, 237, 238, 239, 240, 241, - 0, 1824, 2153, 2154, 230, 0, 0, 298, 299, 295, - 296, 297, 1115, 1116, 251, 252, 253, 254, 255, 256, - 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, - 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, - 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, - 287, 288, 289, 290, 291, 292, 2234, 0, 852, 0, - 0, 0, 0, 0, 0, 1616, 1617, 1485, 0, 1477, - 1476, 65, 0, 879, -2, 0, 0, 0, 0, 49, - 0, 54, 939, 882, 79, 78, 1525, 0, 0, 0, - 61, 1486, 69, 71, 1487, 0, 884, 885, 0, 915, - 919, 0, 0, 0, 1594, 1593, 1593, 104, 0, 0, - 105, 125, 126, 127, 0, 0, 111, 112, 1580, 1581, - 45, 0, 0, 179, 180, 0, 43, 428, 0, 175, - 0, 421, 360, 0, 1503, 0, 0, 0, 0, 0, - 879, 0, 1588, 156, 157, 164, 165, 166, 401, 401, - 401, 575, 0, 0, 167, 167, 533, 534, 535, 0, - 0, -2, 426, 0, 513, 0, 0, 415, 415, 419, - 417, 418, 0, 0, 0, 0, 0, 0, 0, 0, - 552, 0, 553, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 666, 0, 402, 0, 573, 574, 464, 0, - 0, 0, 0, 0, 0, 0, 0, 1596, 1597, 0, - 550, 551, 0, 0, 0, 401, 401, 0, 0, 0, - 0, 401, 401, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 155, 1516, 0, 0, 0, -2, 0, 710, 0, - 0, 0, 1589, 1589, 0, 717, 0, 0, 0, 722, - 0, 0, 723, 0, 780, 780, 778, 779, 725, 726, - 727, 728, 783, 0, 0, 410, 411, 412, 780, 783, - 0, 783, 783, 783, 783, 780, 780, 780, 783, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2235, 786, - 783, 0, 756, 0, 757, 758, 759, 762, 763, 765, - 2236, 2237, 1606, 1607, 1618, 1619, 1620, 1621, 1622, 1623, - 1624, 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632, 1633, - 1634, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, - 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, - 1654, 1655, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, - 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1672, 1673, - 1674, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 1682, 1683, - 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1693, - 1694, 1695, 1696, 1697, 1698, 1699, 1700, 1701, 1702, 1703, - 1704, 1705, 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, - 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1722, 1723, - 1724, 1725, 1726, 1727, 1728, 1729, 1730, 1731, 1732, 1733, - 1734, 1735, 1736, 1737, 1738, 1739, 1740, 1741, 1742, 1743, - 1744, 1745, 1746, 1747, 1748, 1749, 1750, 1751, 1752, 1753, - 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, - 1764, 1765, 1766, 1767, 2235, 2235, 769, 773, 1579, 795, - 801, 803, 804, 0, 0, 814, 817, 836, 51, 1870, - 824, 51, 826, 827, 828, 854, 855, 860, 0, 0, - 0, 0, 866, 867, 868, 0, 0, 871, 872, 873, - 0, 0, 0, 0, 0, 1002, 0, 0, 1121, 1122, - 1123, 1124, 1125, 1126, 1127, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1022, 1023, 0, 0, 0, 1047, 1048, - 1049, 1050, 1053, 0, 1064, 0, 1066, 1458, -2, 0, - 0, 0, 1058, 1059, 0, 0, 0, 0, 0, 0, - 0, 1450, 0, 0, 1145, 0, 1146, 1148, 1149, 1150, - 0, 1151, 1152, 889, 889, 889, 889, 889, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 889, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1599, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 143, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 899, 0, 0, 899, 899, - 0, 0, 222, 223, 224, 225, 226, 227, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 242, 243, 244, 245, 246, 247, 300, 248, - 249, 250, 1131, 0, 0, 0, 48, 844, 845, 0, - 965, 1599, 0, 0, 895, 0, 1614, 59, 68, 70, - 1485, 63, 1485, 0, 901, 0, 0, -2, -2, 902, - 908, 909, 910, 911, 912, 56, 2233, 57, 0, 76, - 0, 50, 0, 0, 0, 0, 374, 1528, 0, 0, - 1478, 1479, 1482, 0, 916, 1964, 920, 0, 922, 923, - 0, 0, 102, 0, 981, 0, 0, 0, 113, 0, - 115, 116, 0, 0, 0, 385, 1582, 1583, 1584, -2, - 408, 0, 385, 369, 308, 309, 310, 360, 312, 360, - 360, 360, 360, 374, 374, 374, 374, 343, 344, 345, - 346, 347, 0, 0, 329, 360, 360, 360, 360, 350, - 351, 352, 353, 354, 355, 356, 357, 313, 314, 315, - 316, 317, 318, 319, 320, 321, 362, 362, 362, 362, - 362, 366, 366, 0, 44, 0, 389, 0, 1482, 0, - 0, 1516, 1591, 1601, 0, 0, 0, 1591, 134, 0, - 0, 0, 576, 616, 527, 564, 577, 0, 530, 531, - -2, 0, 0, 512, 0, 514, 0, 409, 0, -2, - 0, 419, 0, 415, 419, 416, 419, 407, 420, 554, - 555, 556, 0, 558, 559, 646, 951, 0, 0, 0, - 0, 0, 652, 653, 654, 0, 656, 657, 658, 659, - 660, 661, 662, 663, 664, 665, 565, 566, 567, 568, - 569, 570, 571, 572, 0, 0, 0, 0, 514, 0, - 561, 0, 0, 465, 466, 467, 0, 0, 470, 471, - 472, 473, 0, 0, 476, 477, 478, 968, 969, 479, - 480, 505, 506, 507, 481, 482, 483, 484, 485, 486, - 487, 499, 500, 501, 502, 503, 504, 488, 489, 490, - 491, 492, 493, 496, 0, 149, 1507, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1589, 0, 0, 0, 0, 898, 983, 1612, - 1613, 719, 0, 0, 784, 785, 0, 413, 414, 783, - 783, 729, 770, 0, 783, 733, 771, 734, 736, 735, - 737, 750, 751, 783, 740, 781, 782, 741, 742, 743, - 744, 745, 746, 747, 766, 752, 753, 754, 787, 0, - 791, 792, 767, 768, 0, 0, 807, 808, 0, 815, - 839, 837, 838, 840, 832, 833, 834, 835, 0, 841, - 0, 0, 857, 98, 862, 863, 864, 865, 877, 870, - 1133, 999, 1000, 1001, 0, 1003, 1007, 0, 1117, 1119, - 1009, 1005, 1011, 1128, 1129, 1130, 0, 0, 0, 0, - 0, 1015, 1019, 1024, 1025, 1026, 1027, 1028, 0, 1029, - 0, 1032, 1033, 1034, 1035, 1036, 1037, 1043, 1426, 1427, - 1428, 1062, 301, 302, 0, 1063, 0, 0, 0, 0, - 0, 0, 0, 0, 1373, 1374, 1375, 1376, 1377, 1378, - 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, - 1389, 1390, 1391, 1392, 1132, 0, 913, 0, 0, 1456, - 1453, 0, 0, 0, 1412, 1414, 0, 0, 0, 890, - 891, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1393, 1394, - 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, - 1405, 1406, 1407, 1408, 1409, 1410, 0, 0, 1429, 0, - 0, 0, 0, 0, 1449, 0, 1068, 1069, 1070, 0, - 0, 0, 0, 0, 0, 1191, 0, 0, 0, 0, - 1600, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 144, 145, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1337, 1338, 1339, 1340, 41, 0, 0, 0, - 0, 0, 0, 0, 900, 1460, 0, -2, -2, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1362, 0, 0, 0, 0, 0, 0, 1572, - 0, 0, 847, 848, 850, 0, 985, 0, 966, 0, - 0, 853, 0, 894, 0, 897, 62, 64, 906, 907, - 0, 924, 903, 58, 53, 0, 0, 943, 1526, 374, - 1548, 0, 383, 383, 380, 1488, 1489, 0, 1481, 1483, - 1484, 81, 921, 917, 0, 997, 0, 0, 980, 0, - 927, 929, 930, 931, 963, 0, 934, 935, 0, 0, - 0, 0, 0, 100, 982, 106, 0, 114, 0, 0, - 119, 120, 107, 108, 109, 110, 0, 605, -2, 460, - 181, 183, 184, 185, 176, -2, 372, 370, 371, 311, + 401, 1510, 165, 1626, 1623, 1624, 1783, 1784, 1785, 1786, + 1787, 1788, 1789, 1790, 1791, 1792, 1793, 1794, 1795, 1796, + 1797, 1798, 1799, 1800, 1801, 1802, 1803, 1804, 1805, 1806, + 1807, 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, + 1837, 1838, 1839, 1840, 1841, 1842, 1843, 1844, 1845, 1846, + 1847, 1848, 1849, 1850, 1851, 1852, 1853, 1854, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, + 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, + 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, 1885, 1886, + 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1894, 1895, 1896, + 1897, 1898, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, + 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, + 1917, 1918, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, + 1927, 1928, 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, + 1937, 1938, 1939, 1940, 1941, 1942, 1943, 1944, 1945, 1946, + 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, + 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, + 1977, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, + 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, + 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, + 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, + 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, + 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, + 2037, 2038, 2039, 2040, 2041, 2042, 2043, 2044, 2045, 2046, + 2047, 2048, 2049, 2050, 2051, 2052, 2053, 2054, 2055, 2056, + 2057, 2058, 2059, 2060, 2061, 2062, 2063, 2064, 2065, 2066, + 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, + 2077, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 2086, + 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2095, 2096, + 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, + 2107, 2108, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2116, + 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, + 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2135, 2136, + 2137, 2138, 2139, 2140, 2141, 2142, 2143, 2144, 2145, 2146, + 2147, 2148, 2149, 2150, 2151, 2152, 2153, 2154, 2155, 2156, + 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2166, + 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, + 2177, 2178, 2179, 2180, 2181, 2182, 2183, 2184, 2185, 2186, + 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, + 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2205, 2206, + 2207, 2208, 2209, 2210, 2211, 2212, 2213, 2214, 2215, 2216, + 2217, 2218, 2219, 2220, 2221, 2222, 2223, 2224, 2225, 2226, + 2227, 2228, 2229, 2230, 2231, 2232, 2233, 2234, 2235, 2236, + 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, + 2247, 2248, 2249, 2250, 0, 1602, 0, 720, 980, 0, + 876, 877, 0, 786, 786, 0, 786, 786, 786, 786, + 0, 0, 0, 734, 0, 0, 0, 0, 783, 0, + 750, 751, 0, 783, 0, 757, 789, 0, 0, 764, + 786, 786, 767, 2254, 0, 2254, 2254, 1593, 0, 780, + 778, 792, 793, 42, 796, 799, 800, 801, 802, 805, + 0, 816, 819, 1619, 1620, 0, 821, 826, 843, 844, + 0, 45, 1136, 0, 1004, 0, 1012, -2, 1023, 1040, + 1041, 1042, 1043, 1044, 1046, 1047, 1048, 0, 0, 0, + 0, 1053, 1054, 0, 0, 0, 0, 0, 1117, 0, + 0, 0, 0, 1982, 1454, 0, 0, 1416, 1416, 1152, + 1416, 1416, 1418, 1418, 1418, 1835, 1974, 1983, 2160, 1796, + 1802, 1803, 1804, 2106, 2107, 2108, 2109, 2198, 2199, 2203, + 1898, 1791, 2173, 2174, 0, 2250, 1935, 1943, 1944, 1968, + 2070, 2183, 1814, 1963, 2033, 1895, 1917, 1918, 2051, 2052, + 1939, 1940, 1921, 2112, 2114, 2130, 2131, 2116, 2118, 2127, + 2133, 2138, 2117, 2129, 2134, 2147, 2151, 2154, 2155, 2156, + 2124, 2122, 2135, 2139, 2141, 2143, 2149, 2152, 2125, 2123, + 2136, 2140, 2142, 2144, 2150, 2153, 2111, 2115, 2119, 2128, + 2146, 2126, 2145, 2120, 2132, 2137, 2148, 2121, 2113, 1933, + 1936, 1924, 1925, 1927, 1929, 1934, 1941, 1947, 1926, 1946, + 1945, 0, 1922, 1923, 1928, 1938, 1942, 1930, 1931, 1932, + 1937, 1948, 1989, 1988, 1987, 2032, 1959, 2031, 0, 0, + 0, 0, 0, 1786, 1840, 1841, 2157, 1338, 1339, 1340, + 1341, 0, 0, 0, 0, 0, 0, 0, 291, 292, + 1467, 1468, 44, 1135, 1589, 1418, 1418, 1418, 1418, 1418, + 1418, 1075, 1076, 1077, 1078, 1079, 1105, 1106, 1112, 1113, + 2046, 2047, 2048, 2049, 1878, 2193, 1887, 1888, 2028, 2029, + 1900, 1901, 2224, 2225, -2, -2, -2, 232, 233, 234, + 235, 236, 237, 238, 239, 0, 1839, 2171, 2172, 228, + 0, 0, 296, 293, 294, 295, 1119, 1120, 249, 250, + 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, + 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, + 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, + 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, + 298, 299, 2253, 0, 853, 0, 0, 0, 0, 0, + 0, 1631, 1632, 1492, 0, 1484, 1483, 63, 0, 880, + -2, 0, 0, 0, 0, 47, 0, 52, 937, 883, + 77, 76, 1532, 1535, 0, 0, 0, 59, 1493, 67, + 69, 1494, 0, 885, 886, 0, 913, 917, 0, 0, + 0, 1609, 1608, 1608, 102, 0, 0, 103, 123, 124, + 125, 0, 0, 109, 110, 1595, 1596, 43, 0, 0, + 177, 178, 0, 1093, 428, 0, 173, 0, 421, 360, + 0, 1510, 0, 0, 0, 0, 0, 880, 0, 1603, + 154, 155, 162, 163, 164, 401, 401, 401, 575, 0, + 0, 165, 165, 533, 534, 535, 0, 0, -2, 426, + 0, 513, 0, 0, 415, 415, 419, 417, 418, 0, + 0, 0, 0, 0, 0, 0, 0, 552, 0, 553, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 668, + 0, 402, 0, 573, 574, 464, 0, 0, 0, 0, + 0, 0, 0, 0, 1611, 1612, 0, 550, 551, 0, + 0, 0, 401, 401, 0, 0, 0, 0, 401, 401, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 153, 1523, + 0, 0, 0, -2, 0, 712, 0, 0, 0, 1604, + 1604, 0, 719, 0, 0, 0, 724, 0, 0, 725, + 0, 783, 783, 781, 782, 727, 728, 729, 730, 786, + 0, 0, 410, 411, 412, 783, 786, 0, 786, 786, + 786, 786, 783, 783, 783, 786, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 2254, 789, 786, 0, 758, + 0, 759, 760, 761, 762, 765, 766, 768, 2255, 2256, + 1621, 1622, 1633, 1634, 1635, 1636, 1637, 1638, 1639, 1640, + 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, + 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, + 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, + 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, + 1691, 1692, 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1700, + 1701, 1702, 1703, 1704, 1705, 1706, 1707, 1708, 1709, 1710, + 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, + 1721, 1722, 1723, 1724, 1725, 1726, 1727, 1728, 1729, 1730, + 1731, 1732, 1733, 1734, 1735, 1736, 1737, 1738, 1739, 1740, + 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, 1749, 1750, + 1751, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, + 1761, 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, 1770, + 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, + 1781, 1782, 2254, 2254, 772, 776, 1594, 798, 804, 806, + 807, 0, 0, 817, 820, 837, 49, 1886, 825, 49, + 827, 828, 829, 855, 856, 861, 0, 0, 0, 0, + 867, 868, 869, 0, 0, 872, 873, 874, 0, 0, + 0, 0, 0, 1002, 0, 0, 1125, 1126, 1127, 1128, + 1129, 1130, 1131, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1024, 1025, 0, 0, 0, 1049, 1050, 1051, 1052, + 1055, 0, 1066, 0, 1068, 1463, -2, 0, 0, 0, + 1060, 1061, 0, 0, 0, 1614, 1614, 0, 0, 0, + 1455, 0, 0, 1150, 0, 1151, 1153, 1154, 1155, 0, + 1156, 1157, 890, 890, 890, 890, 890, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 890, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1614, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 141, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1614, 0, 0, 1614, 1614, 0, + 0, 220, 221, 222, 223, 224, 225, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 297, 240, 241, 242, 243, 244, 245, 300, 246, + 247, 248, 1135, 0, 0, 0, 46, 845, 846, 0, + 963, 1614, 0, 0, 896, 0, 1629, 57, 66, 68, + 1492, 61, 1492, 0, 900, 0, 0, -2, -2, 901, + 902, 906, 907, 908, 909, 910, 54, 2252, 55, 0, + 74, 0, 48, 0, 0, 1533, 0, 1536, 0, 0, + 0, 374, 1540, 0, 0, 1485, 1486, 1489, 0, 914, + 1980, 918, 0, 920, 921, 0, 0, 100, 0, 979, + 0, 0, 0, 111, 0, 113, 114, 0, 0, 0, + 385, 1597, 1598, 1599, -2, 408, 0, 385, 369, 308, + 309, 310, 360, 312, 360, 360, 360, 360, 374, 374, + 374, 374, 343, 344, 345, 346, 347, 0, 0, 329, + 360, 360, 360, 360, 350, 351, 352, 353, 354, 355, + 356, 357, 313, 314, 315, 316, 317, 318, 319, 320, + 321, 362, 362, 362, 362, 362, 366, 366, 0, 1094, + 0, 389, 0, 1489, 0, 0, 1523, 1606, 1616, 0, + 0, 0, 1606, 132, 0, 0, 0, 576, 618, 527, + 564, 577, 0, 530, 531, -2, 0, 0, 512, 0, + 514, 0, 409, 0, -2, 0, 419, 0, 415, 419, + 416, 419, 407, 420, 554, 555, 556, 0, 558, 559, + 648, 949, 0, 0, 0, 0, 0, 654, 655, 656, + 0, 658, 659, 660, 661, 662, 663, 664, 665, 666, + 667, 565, 566, 567, 568, 569, 570, 571, 572, 0, + 0, 0, 0, 514, 0, 561, 0, 0, 465, 466, + 467, 0, 0, 470, 471, 472, 473, 0, 0, 476, + 477, 478, 966, 967, 479, 480, 505, 506, 507, 481, + 482, 483, 484, 485, 486, 487, 499, 500, 501, 502, + 503, 504, 488, 489, 490, 491, 492, 493, 496, 0, + 147, 1514, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1604, 0, + 0, 0, 0, 899, 981, 1627, 1628, 721, 0, 0, + 787, 788, 0, 413, 414, 786, 786, 731, 773, 0, + 786, 735, 774, 736, 738, 737, 739, 752, 753, 786, + 742, 784, 785, 743, 744, 745, 746, 747, 748, 749, + 769, 754, 755, 756, 790, 0, 794, 795, 770, 771, + 0, 0, 810, 811, 0, 818, 840, 838, 839, 841, + 833, 834, 835, 836, 0, 842, 0, 0, 858, 96, + 863, 864, 865, 866, 878, 871, 1137, 999, 1000, 1001, + 0, 1003, 1009, 0, 1121, 1123, 1007, 1008, 1011, 1005, + 1013, 1132, 1133, 1134, 0, 0, 0, 0, 0, 1017, + 1021, 1026, 1027, 1028, 1029, 1030, 0, 1031, 0, 1034, + 1035, 1036, 1037, 1038, 1039, 1045, 1431, 1432, 1433, 1064, + 301, 302, 0, 1065, 0, 0, 0, 0, 0, 0, + 0, 0, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, + 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, + 1396, 1397, 1136, 0, 1615, 0, 0, 0, 1461, 1458, + 0, 0, 0, 1417, 1419, 0, 0, 0, 891, 892, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1398, 1399, 1400, + 1401, 1402, 1403, 1404, 1405, 1406, 1407, 1408, 1409, 1410, + 1411, 1412, 1413, 1414, 1415, 0, 0, 1434, 0, 0, + 0, 0, 0, 1454, 0, 1070, 1071, 1072, 0, 0, + 0, 0, 0, 0, 1196, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 142, 143, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1342, 1343, 1344, 1345, 41, 0, 0, 0, 0, 0, + 0, 0, 1465, 0, -2, -2, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1367, + 0, 0, 0, 0, 0, 0, 1587, 0, 0, 848, + 849, 851, 0, 983, 0, 964, 0, 0, 854, 0, + 895, 0, 898, 60, 62, 904, 905, 0, 922, 911, + 903, 56, 51, 0, 0, 941, 1534, 1537, 1538, 374, + 1560, 0, 383, 383, 380, 1495, 1496, 0, 1488, 1490, + 1491, 79, 919, 915, 0, 997, 0, 0, 978, 0, + 925, 927, 928, 929, 961, 0, 932, 933, 0, 0, + 0, 0, 0, 98, 980, 104, 0, 112, 0, 0, + 117, 118, 105, 106, 107, 108, 0, 607, -2, 460, + 179, 181, 182, 183, 174, -2, 372, 370, 371, 311, 374, 374, 337, 338, 339, 340, 341, 342, 0, 0, 330, 331, 332, 333, 322, 0, 323, 324, 325, 364, - 0, 326, 327, 0, 328, 427, 0, 1490, 390, 391, + 0, 326, 327, 0, 328, 427, 0, 1497, 390, 391, 393, 401, 0, 396, 397, 0, 401, 401, 0, 422, - 423, 0, 1482, 1507, 0, 0, 0, 1602, 1601, 1601, - 1601, 0, 169, 170, 171, 172, 173, 174, 641, 0, - 0, 617, 639, 640, 167, 0, 0, 177, 516, 515, - 0, 673, 0, 425, 0, 0, 419, 419, 404, 405, - 557, 0, 0, 648, 649, 650, 651, 0, 0, 0, + 423, 0, 1489, 1514, 0, 0, 0, 1617, 1616, 1616, + 1616, 0, 167, 168, 169, 170, 171, 172, 643, 0, + 0, 619, 641, 642, 165, 0, 0, 175, 516, 515, + 0, 675, 0, 425, 0, 0, 419, 419, 404, 405, + 557, 0, 0, 650, 651, 652, 653, 0, 0, 0, 543, 454, 0, 544, 545, 514, 516, 0, 0, 385, 468, 469, 474, 475, 494, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 592, 593, 594, - 597, 599, 518, 603, 596, 598, 600, 518, 604, 1504, - 1505, 1506, 0, 0, 711, 0, 0, 451, 96, 1590, - 716, 720, 721, 780, 739, 772, 780, 731, 738, 760, - 805, 806, 811, 819, 820, 821, 822, 823, 861, 0, - 0, 0, 0, 869, 0, 0, 1008, 1118, 1120, 1012, - 0, 1016, 1020, 0, 0, 0, 0, 0, 1067, 1065, - 1460, 0, 0, 0, 1114, 0, 0, 0, 1136, 1137, - 0, 0, 0, 1454, 0, 0, 1143, 0, 1415, 1153, - 0, 0, 0, 0, 0, 1159, 1160, 1161, 1162, 1163, - 1164, 1165, 1166, 1167, 1168, 1476, 1170, 0, 0, 0, - 0, 0, 1175, 1176, 1177, 1178, 1179, 0, 1181, 0, - 1182, 0, 0, 0, 0, 1189, 1190, 1192, 0, 0, - 1195, 1196, 0, 1198, 0, 1200, 1201, 1202, 1203, 1204, - 1205, 0, 1207, 0, 1209, 1210, 1211, 0, 1213, 0, - 1215, 1216, 0, 1218, 0, 1220, 0, 1223, 0, 1226, - 0, 1229, 0, 1232, 0, 1235, 0, 1238, 0, 1241, - 0, 1244, 0, 1247, 0, 1250, 0, 1253, 0, 1256, - 0, 1259, 0, 1262, 0, 1265, 0, 1268, 1269, 1270, - 0, 1272, 0, 1274, 0, 1277, 1278, 0, 1280, 0, - 1283, 0, 1286, 0, 0, 1287, 0, 0, 0, 1291, - 0, 0, 0, 0, 1300, 1301, 1302, 1303, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1314, 1315, - 1316, 1317, 1318, 1319, 0, 1321, 0, 1096, 0, 0, - 1096, 0, 0, 0, 0, 0, 1134, 899, 0, 1416, - 1417, 1418, 1419, 1420, 0, 0, 0, 0, 0, 0, - 1360, 1361, 1363, 0, 0, 1366, 0, 1368, 0, 1573, - 846, 849, 851, 937, 986, 987, 0, 0, 0, 0, - 967, 1598, 892, 893, 896, 945, 0, 1464, 0, 0, - 924, 997, 925, 0, 904, 55, 940, 0, 1530, 1529, - 1542, 1555, 383, 383, 377, 378, 384, 379, 381, 382, - 1480, 0, 1485, 0, 1566, 0, 0, 1558, 0, 0, - 0, 0, 0, 0, 0, 0, 970, 0, 0, 973, - 0, 0, 0, 0, 964, 935, 0, 936, 0, -2, - 0, 0, 94, 95, 0, 0, 0, 117, 118, 0, - 0, 124, 386, 387, 158, 167, 462, 182, 435, 0, + 597, 599, 518, 603, 605, 596, 598, 600, 518, 604, + 606, 1511, 1512, 1513, 0, 0, 713, 0, 0, 451, + 94, 1605, 718, 722, 723, 783, 741, 775, 783, 733, + 740, 763, 808, 809, 814, 822, 823, 824, 862, 0, + 0, 0, 0, 870, 0, 0, 1010, 1122, 1124, 1014, + 0, 1018, 1022, 0, 0, 0, 0, 0, 1069, 1067, + 1465, 0, 0, 0, 1118, 0, 0, 1140, 1141, 0, + 0, 0, 0, 1459, 0, 0, 1148, 0, 1420, 1099, + 0, 0, 0, 0, 0, 1099, 1099, 1099, 1099, 1099, + 1099, 1099, 1099, 1099, 1099, 1483, 1175, 0, 0, 0, + 0, 0, 1180, 1181, 1182, 1183, 1184, 0, 1186, 0, + 1187, 0, 0, 0, 0, 1194, 1195, 1197, 0, 0, + 1200, 1201, 0, 1203, 0, 1205, 1206, 1207, 1208, 1209, + 1210, 0, 1212, 0, 1214, 1215, 1216, 0, 1218, 0, + 1220, 1221, 0, 1223, 0, 1225, 0, 1228, 0, 1231, + 0, 1234, 0, 1237, 0, 1240, 0, 1243, 0, 1246, + 0, 1249, 0, 1252, 0, 1255, 0, 1258, 0, 1261, + 0, 1264, 0, 1267, 0, 1270, 0, 1273, 1274, 1275, + 0, 1277, 0, 1279, 0, 1282, 1283, 0, 1285, 0, + 1288, 0, 1291, 0, 0, 1292, 0, 0, 0, 1296, + 0, 0, 0, 0, 1305, 1306, 1307, 1308, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1319, 1320, + 1321, 1322, 1323, 1324, 0, 1326, 0, 1100, 0, 0, + 1100, 0, 0, 0, 0, 0, 1138, 1614, 0, 1421, + 1422, 1423, 1424, 1425, 0, 0, 0, 0, 0, 0, + 1365, 1366, 1368, 0, 0, 1371, 0, 1373, 0, 1588, + 847, 850, 852, 935, 984, 985, 0, 0, 0, 0, + 965, 1613, 893, 894, 897, 943, 0, 1469, 0, 0, + 922, 997, 0, 923, 0, 53, 938, 0, 1542, 1541, + 1554, 1567, 383, 383, 377, 378, 384, 379, 381, 382, + 1487, 0, 1492, 0, 1581, 0, 0, 1570, 0, 0, + 0, 0, 0, 0, 0, 0, 968, 0, 0, 971, + 0, 0, 0, 0, 962, 933, 0, 934, 0, -2, + 0, 0, 92, 93, 0, 0, 0, 115, 116, 0, + 0, 122, 386, 387, 156, 165, 462, 180, 435, 0, 0, 307, 373, 334, 335, 336, 0, 358, 0, 0, - 0, 0, 456, 130, 1494, 1493, 401, 401, 392, 0, - 395, 0, 0, 0, 1603, 361, 424, 0, 148, 0, - 0, 0, 0, 0, 154, 611, 0, 0, 618, 0, - 0, 0, 525, 0, 536, 537, 0, 645, -2, 707, - 389, 0, 403, 406, 952, 0, 0, 538, 0, 541, + 0, 0, 456, 128, 1501, 1500, 401, 401, 392, 0, + 395, 0, 0, 0, 1618, 361, 424, 0, 146, 0, + 0, 0, 0, 0, 152, 613, 0, 0, 620, 0, + 0, 0, 525, 0, 536, 537, 0, 647, -2, 709, + 389, 0, 403, 406, 950, 0, 0, 538, 0, 541, 542, 455, 516, 547, 548, 562, 549, 497, 498, 495, - 0, 0, 1517, 1518, 1523, 1521, 1522, 135, 583, 585, + 0, 0, 1524, 1525, 1530, 1528, 1529, 133, 583, 585, 589, 584, 588, 0, 0, 0, 520, 0, 520, 581, - 0, 451, 1490, 0, 715, 452, 453, 783, 783, 856, - 99, 0, 859, 0, 0, 0, 0, 1013, 1017, 1030, - 1031, 1421, 1447, 360, 360, 1434, 360, 366, 1437, 360, - 1439, 360, 1442, 360, 1445, 1446, 0, 0, 1060, 0, - 914, 0, 0, 1142, 1457, 0, 0, 1154, 1155, 1156, - 1157, 1158, 1451, 0, 0, 0, 1174, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 146, 147, 0, - 0, 0, 0, 0, 0, 1371, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1091, 1095, 0, - 1097, 1098, 0, 0, 1323, 0, 0, 1341, 0, 0, - 0, 0, 0, 0, 0, 1461, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 988, 993, 993, 993, - 0, 0, 0, 1585, 1586, 1465, 1466, 997, 1467, 926, - 905, 944, 1548, 0, 1541, 0, -2, 1550, 0, 0, - 0, 1556, 375, 376, 918, 82, 998, 85, 0, 1566, - 1575, 0, 1557, 1568, 1570, 0, 0, 0, 1562, 0, - 997, 928, 959, 961, 0, 956, 971, 972, 974, 0, - 976, 0, 978, 979, 939, 933, 0, 102, 0, 997, - 997, 101, 0, 984, 121, 122, 123, 461, 186, 191, - 0, 0, 0, 196, 0, 198, 0, 0, 0, 203, - 204, 401, 401, 436, 0, 304, 306, 0, 0, 189, - 374, 0, 374, 0, 365, 367, 0, 437, 457, 1491, - 1492, 0, 0, 394, 398, 399, 400, 0, 1592, 150, - 0, 0, 0, 614, 0, 642, 0, 0, 0, 0, - 0, 0, 178, 517, 674, 675, 676, 677, 678, 679, - 680, 681, 682, 0, 401, 0, 0, 0, 401, 401, - 401, 0, 699, 388, 0, 0, 670, 667, 539, 0, - 220, 221, 228, 229, 231, 0, 0, 0, 0, 0, - 546, 939, 1508, 1509, 1510, 0, 1520, 1524, 138, 0, - 0, 0, 0, 591, 595, 601, 0, 519, 602, 712, - 713, 714, 97, 724, 730, 858, 878, 1006, 1014, 1018, - 0, 0, 0, 0, 1448, 1432, 374, 1435, 1436, 1438, - 1440, 1441, 1443, 1444, 1056, 1057, 1061, 0, 1139, 0, - 1141, 1455, 0, 1485, 0, 0, 0, 1173, 0, 0, - 0, 1184, 1183, 1185, 0, 1187, 1188, 1193, 1194, 1197, - 1199, 1206, 1208, 1212, 1214, 1217, 1219, 1221, 0, 1224, - 0, 1227, 0, 1230, 0, 1233, 0, 1236, 0, 1239, - 0, 1242, 0, 1245, 0, 1248, 0, 1251, 0, 1254, - 0, 1257, 0, 1260, 0, 1263, 0, 1266, 0, 1271, - 1273, 0, 1276, 1279, 1281, 0, 1284, 0, 1288, 0, - 1290, 1292, 1293, 0, 0, 0, 1304, 1305, 1306, 1307, - 1308, 1309, 1310, 1311, 1312, 1313, 1320, 0, 1089, 1092, - 1322, 1099, 1100, 1105, 1325, 0, 0, 0, 1328, 0, - 0, 0, 1332, 1135, 1343, 0, 1348, 0, 0, 1354, - 0, 1358, 0, 1364, 1365, 1367, 1369, 0, 0, 0, - 0, 0, 965, 946, 66, 1467, 1469, 0, 1535, 1533, - 1533, 1543, 1544, 0, 0, 1551, 0, 0, 0, 0, - 86, 0, 0, 0, 1571, 0, 0, 0, 0, 103, - 1476, 953, 960, 0, 0, 954, 0, 955, 975, 977, - 932, 0, 997, 997, 92, 93, 0, 192, 0, 194, - 0, 197, 199, 200, 201, 207, 208, 209, 202, 0, - 0, 303, 305, 0, 0, 348, 359, 349, 0, 0, - 1495, 1496, 1497, 1498, 1499, 1500, 1501, 1502, 939, 151, - 152, 153, 606, 0, 616, 0, 941, 0, 609, 0, - 528, 0, 0, 0, 401, 401, 401, 0, 0, 0, - 0, 684, 0, 0, 647, 0, 655, 0, 0, 0, - 232, 233, 0, 1519, 582, 0, 136, 137, 0, 0, - 587, 521, 522, 1054, 0, 0, 0, 1055, 1433, 0, - 0, 0, 0, 1452, 0, 0, 0, 0, 1180, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1296, 0, 0, 0, 636, 637, 0, 1372, 1094, - 1476, 0, 1096, 1106, 1107, 0, 1096, 1342, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 994, - 0, 0, 0, 0, 985, 1469, 1474, 0, 0, 1538, - 0, 1531, 1534, 1532, 1545, 0, 0, 1552, 0, 1554, - 0, 1576, 1577, 1569, 0, 1561, 1564, 1560, 1563, 1485, - 957, 0, 962, 0, 1476, 91, 0, 195, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 205, 206, 0, - 0, 363, 368, 0, 0, 0, 607, 0, 942, 619, - 610, 0, 697, 0, 701, 0, 0, 0, 704, 705, - 706, 683, 0, 687, 429, 671, 668, 669, 540, 0, - 139, 140, 0, 0, 0, 1422, 0, 1425, 1138, 1140, - 0, 1169, 1171, 1172, 1430, 1431, 1186, 1222, 1225, 1228, - 1231, 1234, 1237, 1240, 1243, 1246, 1249, 1252, 1255, 1258, - 1261, 1264, 1267, 1275, 1282, 1285, 1289, 1294, 0, 1297, - 0, 0, 1298, 0, 638, 1085, 0, 0, 1103, 1104, - 0, 1327, 1329, 1330, 1331, 1344, 0, 1349, 1350, 0, - 1355, 0, 1359, 1370, 0, 990, 947, 948, 995, 996, - 0, 0, 938, 1474, 84, 1475, 1472, 0, 1470, 1468, - 1527, 0, 1536, 1537, 1546, 1547, 1553, 0, 1559, 0, - 89, 0, 0, 0, 1485, 193, 0, 212, 0, 615, - 0, 618, 608, 695, 696, 0, 708, 700, 702, 703, - 685, -2, 1511, 0, 0, 0, 590, 1423, 0, 0, - 1299, 0, 634, 635, 1093, 1086, 0, 1071, 1072, 1090, - 1324, 1326, 0, 0, 0, 0, 989, 991, 992, 83, - 0, 1471, 1111, 0, 1539, 1540, 1567, 1565, 958, 965, - 0, 90, 442, 435, 1511, 0, 0, 0, 688, 689, - 690, 691, 692, 693, 694, 579, 1513, 141, 142, 0, - 509, 510, 511, 135, 0, 1144, 1295, 1087, 0, 0, - 0, 0, 0, 1345, 0, 1351, 0, 1356, 0, 949, - 950, 1473, 0, 0, 620, 0, 622, 0, -2, 430, - 443, 0, 187, 213, 214, 0, 0, 217, 218, 219, - 210, 211, 131, 0, 0, 709, 0, 1514, 1515, 0, - 138, 0, 0, 1078, 1079, 1080, 1081, 1083, 0, 0, - 0, 0, 1112, 1091, 621, 0, 0, 385, 0, 631, - 431, 432, 0, 438, 439, 440, 441, 215, 216, 643, - 0, 0, 508, 586, 1424, 0, 0, 1346, 0, 1352, - 0, 1357, 0, 623, 624, 632, 0, 433, 0, 434, - 0, 0, 0, 612, 0, 643, 1512, 1088, 1082, 1084, - 0, 0, 1110, 0, 633, 629, 444, 446, 447, 0, - 0, 445, 644, 613, 1347, 1353, 0, 448, 449, 450, - 625, 626, 627, 628, + 0, 451, 1497, 0, 717, 452, 453, 786, 786, 857, + 97, 0, 860, 0, 0, 0, 0, 1015, 1019, 1032, + 1033, 1426, 1452, 360, 360, 1439, 360, 366, 1442, 360, + 1444, 360, 1447, 360, 1450, 1451, 0, 0, 1062, 0, + 0, 0, 0, 1147, 1462, 0, 0, 1158, 1098, 1099, + 1099, 1099, 1099, 1099, 1164, 1165, 1166, 1167, 1168, 1169, + 1170, 1171, 1172, 1173, 1456, 0, 0, 0, 1179, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 144, + 145, 0, 0, 0, 0, 0, 0, 1376, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1093, + 1097, 0, 1101, 1102, 0, 0, 1328, 0, 0, 1346, + 0, 0, 0, 0, 0, 0, 0, 1466, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 986, 993, + 0, 993, 0, 993, 0, 0, 0, 1600, 1601, 1470, + 1471, 997, 1472, 912, 924, 942, 1560, 0, 1553, 0, + -2, 1562, 0, 0, 0, 1568, 375, 376, 916, 80, + 998, 83, 0, 1581, 1590, 0, 1578, 1583, 1585, 0, + 0, 0, 1574, 0, 997, 926, 957, 959, 0, 954, + 969, 970, 972, 0, 974, 0, 976, 977, 937, 931, + 0, 100, 0, 997, 997, 99, 0, 982, 119, 120, + 121, 461, 184, 189, 0, 0, 0, 194, 0, 196, + 0, 0, 0, 201, 202, 401, 401, 436, 0, 304, + 306, 0, 0, 187, 374, 0, 374, 0, 365, 367, + 0, 437, 457, 1498, 1499, 0, 0, 394, 398, 399, + 400, 0, 1607, 148, 0, 0, 0, 616, 0, 644, + 0, 0, 0, 0, 0, 0, 176, 517, 676, 677, + 678, 679, 680, 681, 682, 683, 684, 0, 401, 0, + 0, 0, 401, 401, 401, 0, 701, 388, 0, 0, + 672, 669, 539, 0, 218, 219, 226, 227, 229, 0, + 0, 0, 0, 0, 546, 937, 1515, 1516, 1517, 0, + 1527, 1531, 136, 0, 0, 0, 0, 591, 595, 601, + 0, 519, 602, 714, 715, 716, 95, 726, 732, 859, + 879, 1006, 1016, 1020, 0, 0, 0, 0, 1453, 1437, + 374, 1440, 1441, 1443, 1445, 1446, 1448, 1449, 1058, 1059, + 1063, 0, 1144, 0, 1146, 0, 1460, 0, 1159, 1160, + 1161, 1162, 1163, 1492, 0, 0, 0, 1178, 0, 0, + 0, 1189, 1188, 1190, 0, 1192, 1193, 1198, 1199, 1202, + 1204, 1211, 1213, 1217, 1219, 1222, 1224, 1226, 0, 1229, + 0, 1232, 0, 1235, 0, 1238, 0, 1241, 0, 1244, + 0, 1247, 0, 1250, 0, 1253, 0, 1256, 0, 1259, + 0, 1262, 0, 1265, 0, 1268, 0, 1271, 0, 1276, + 1278, 0, 1281, 1284, 1286, 0, 1289, 0, 1293, 0, + 1295, 1297, 1298, 0, 0, 0, 1309, 1310, 1311, 1312, + 1313, 1314, 1315, 1316, 1317, 1318, 1325, 0, 1091, 1327, + 1103, 1104, 1109, 1330, 0, 0, 0, 1333, 0, 0, + 0, 1337, 1139, 1348, 0, 1353, 0, 0, 1359, 0, + 1363, 0, 1369, 1370, 1372, 1374, 0, 0, 0, 0, + 0, 0, 0, 963, 944, 64, 1472, 1476, 0, 1547, + 1545, 1545, 1555, 1556, 0, 0, 1563, 0, 0, 0, + 0, 84, 0, 0, 1569, 0, 0, 1586, 0, 0, + 0, 0, 101, 1483, 951, 958, 0, 0, 952, 0, + 953, 973, 975, 930, 0, 997, 997, 90, 91, 0, + 190, 0, 192, 0, 195, 197, 198, 199, 205, 206, + 207, 200, 0, 0, 303, 305, 0, 0, 348, 359, + 349, 0, 0, 1502, 1503, 1504, 1505, 1506, 1507, 1508, + 1509, 937, 149, 150, 151, 608, 0, 618, 0, 939, + 0, 611, 0, 528, 0, 0, 0, 401, 401, 401, + 0, 0, 0, 0, 686, 0, 0, 649, 0, 657, + 0, 0, 0, 230, 231, 0, 1526, 582, 0, 134, + 135, 0, 0, 587, 521, 522, 1056, 0, 0, 0, + 1057, 1438, 0, 0, 0, 0, 0, 1457, 0, 0, + 0, 0, 1185, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1301, 0, 0, 0, 638, + 639, 0, 1377, 1096, 1483, 0, 1100, 1110, 1111, 0, + 1100, 1347, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 994, 0, 0, 0, 945, 946, 0, + 0, 0, 983, 1476, 1481, 0, 0, 1550, 0, 1543, + 1546, 1544, 1557, 0, 0, 1564, 0, 1566, 0, 1591, + 1592, 1584, 1579, 0, 1573, 1576, 1578, 1575, 1492, 955, + 0, 960, 0, 1483, 89, 0, 193, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 203, 204, 0, 0, + 363, 368, 0, 0, 0, 609, 0, 940, 621, 612, + 0, 699, 0, 703, 0, 0, 0, 706, 707, 708, + 685, 0, 689, 429, 673, 670, 671, 540, 0, 137, + 138, 0, 0, 0, 1427, 0, 1430, 1142, 1145, 1143, + 0, 1174, 1176, 1177, 1435, 1436, 1191, 1227, 1230, 1233, + 1236, 1239, 1242, 1245, 1248, 1251, 1254, 1257, 1260, 1263, + 1266, 1269, 1272, 1280, 1287, 1290, 1294, 1299, 0, 1302, + 0, 0, 1303, 0, 640, 1087, 0, 0, 1107, 1108, + 0, 1332, 1334, 1335, 1336, 1349, 0, 1354, 1355, 0, + 1360, 0, 1364, 1375, 0, 988, 995, 996, 0, 991, + 0, 992, 0, 936, 1481, 82, 1482, 1479, 0, 1477, + 1474, 1539, 0, 1548, 1549, 1558, 1559, 1565, 0, 0, + 1578, 0, 1572, 87, 0, 0, 0, 1492, 191, 0, + 210, 0, 617, 0, 620, 610, 697, 698, 0, 710, + 702, 704, 705, 687, -2, 1518, 0, 0, 0, 590, + 1428, 0, 0, 1304, 0, 636, 637, 1095, 1088, 0, + 1073, 1074, 1092, 1329, 1331, 0, 0, 0, 987, 947, + 948, 989, 990, 81, 0, 1478, 1115, 0, 1473, 0, + 1551, 1552, 1582, 0, 1571, 1577, 956, 963, 0, 88, + 442, 435, 1518, 0, 0, 0, 690, 691, 692, 693, + 694, 695, 696, 579, 1520, 139, 140, 0, 509, 510, + 511, 133, 0, 1149, 1300, 1089, 0, 0, 0, 0, + 0, 1350, 0, 1356, 0, 1361, 0, 1480, 0, 0, + 1475, 1580, 622, 0, 624, 0, -2, 430, 443, 0, + 185, 211, 212, 0, 0, 215, 216, 217, 208, 209, + 129, 0, 0, 711, 0, 1521, 1522, 0, 136, 0, + 0, 1080, 1081, 1082, 1083, 1085, 0, 0, 0, 0, + 1116, 1093, 623, 0, 0, 385, 0, 633, 431, 432, + 0, 438, 439, 440, 441, 213, 214, 645, 0, 0, + 508, 586, 1429, 0, 0, 1351, 0, 1357, 0, 1362, + 0, 625, 626, 634, 0, 433, 0, 434, 0, 0, + 0, 614, 0, 645, 1519, 1090, 1084, 1086, 0, 0, + 1114, 0, 635, 631, 444, 446, 447, 0, 0, 445, + 646, 615, 1352, 1358, 0, 448, 449, 450, 627, 628, + 629, 630, } var yyTok1 = [...]int{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 145, 3, 3, 3, 173, 165, 3, - 87, 89, 170, 168, 88, 169, 223, 171, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 732, - 153, 152, 154, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 147, 3, 3, 3, 175, 167, 3, + 88, 90, 172, 170, 89, 171, 225, 173, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 737, + 155, 154, 156, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 175, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 177, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 141, 3, 176, + 3, 3, 3, 3, 143, 3, 178, } var yyTok2 = [...]int{ @@ -9616,19 +9773,19 @@ var yyTok2 = [...]int{ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 90, 91, 92, 93, 94, + 82, 83, 84, 85, 86, 87, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, - 135, 136, 137, 138, 139, 140, 142, 143, 144, 146, - 147, 148, 149, 150, 151, 155, 156, 157, 158, 159, - 160, 161, 162, 163, 164, 166, 167, 172, 174, 177, - 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 135, 136, 137, 138, 139, 140, 141, 142, 144, 145, + 146, 148, 149, 150, 151, 152, 153, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 168, 169, 174, + 176, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, - 218, 219, 220, 221, 222, 224, 225, 226, 227, 228, + 218, 219, 220, 221, 222, 223, 224, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, @@ -9728,7 +9885,8 @@ var yyTok3 = [...]int{ 58040, 715, 58041, 716, 58042, 717, 58043, 718, 58044, 719, 58045, 720, 58046, 721, 58047, 722, 58048, 723, 58049, 724, 58050, 725, 58051, 726, 58052, 727, 58053, 728, 58054, 729, - 58055, 730, 58056, 731, 0, + 58055, 730, 58056, 731, 58057, 732, 58058, 733, 58059, 734, + 58060, 735, 58061, 736, 0, } var yyErrorMessages = [...]struct { @@ -10078,7 +10236,7 @@ yydefault: case 1: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:599 +//line sql.y:616 { stmt := yyDollar[2].statementUnion() // If the statement is empty and we have comments @@ -10092,199 +10250,187 @@ yydefault: } case 2: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:612 +//line sql.y:629 { } case 3: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:613 +//line sql.y:630 { } case 4: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:617 +//line sql.y:634 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL case 40: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:656 +//line sql.y:673 { setParseTree(yylex, nil) } case 41: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:662 - { - yyLOCAL = NewVariableExpression(yyDollar[1].str, SingleAt) - } - yyVAL.union = yyLOCAL - case 42: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:668 - { - yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) - } - case 43: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:673 +//line sql.y:679 { - yyVAL.identifierCI = NewIdentifierCI("") + yyLOCAL = NewVariableExpression(yyDollar[1].str, SingleAt) } - case 44: + yyVAL.union = yyLOCAL + case 42: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:677 +//line sql.y:685 { - yyVAL.identifierCI = yyDollar[1].identifierCI + yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 45: + case 43: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:683 +//line sql.y:691 { yyLOCAL = NewVariableExpression(string(yyDollar[1].str), SingleAt) } yyVAL.union = yyLOCAL - case 46: + case 44: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:687 +//line sql.y:695 { yyLOCAL = NewVariableExpression(string(yyDollar[1].str), DoubleAt) } yyVAL.union = yyLOCAL - case 47: + case 45: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:693 +//line sql.y:701 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 48: + case 46: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:699 +//line sql.y:707 { yyLOCAL = &Load{} } yyVAL.union = yyLOCAL - case 49: + case 47: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *With -//line sql.y:705 +//line sql.y:713 { yyLOCAL = &With{CTEs: yyDollar[2].ctesUnion(), Recursive: false} } yyVAL.union = yyLOCAL - case 50: + case 48: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *With -//line sql.y:709 +//line sql.y:717 { yyLOCAL = &With{CTEs: yyDollar[3].ctesUnion(), Recursive: true} } yyVAL.union = yyLOCAL - case 51: + case 49: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *With -//line sql.y:714 +//line sql.y:722 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 52: + case 50: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *With -//line sql.y:718 +//line sql.y:726 { yyLOCAL = yyDollar[1].withUnion() } yyVAL.union = yyLOCAL - case 53: + case 51: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:724 +//line sql.y:732 { yySLICE := (*[]*CommonTableExpr)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].cteUnion()) } - case 54: + case 52: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*CommonTableExpr -//line sql.y:728 +//line sql.y:736 { yyLOCAL = []*CommonTableExpr{yyDollar[1].cteUnion()} } yyVAL.union = yyLOCAL - case 55: + case 53: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *CommonTableExpr -//line sql.y:734 +//line sql.y:742 { yyLOCAL = &CommonTableExpr{ID: yyDollar[1].identifierCS, Columns: yyDollar[2].columnsUnion(), Subquery: yyDollar[4].subqueryUnion()} } yyVAL.union = yyLOCAL - case 56: + case 54: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:740 +//line sql.y:748 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 57: + case 55: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:744 +//line sql.y:752 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 58: + case 56: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:748 +//line sql.y:756 { setLockInSelect(yyDollar[2].selStmtUnion(), yyDollar[3].lockUnion()) yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 59: + case 57: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:771 +//line sql.y:779 { yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion()) yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 60: + case 58: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:777 +//line sql.y:785 { yyDollar[1].selStmtUnion().SetLimit(yyDollar[2].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 61: + case 59: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:782 +//line sql.y:790 { yyDollar[1].selStmtUnion().SetOrderBy(yyDollar[2].orderByUnion()) yyDollar[1].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 62: + case 60: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:788 +//line sql.y:796 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion()) @@ -10292,20 +10438,20 @@ yydefault: yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 63: + case 61: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:795 +//line sql.y:803 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetLimit(yyDollar[3].limitUnion()) yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 64: + case 62: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:801 +//line sql.y:809 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) yyDollar[2].selStmtUnion().SetOrderBy(yyDollar[3].orderByUnion()) @@ -10313,175 +10459,175 @@ yydefault: yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 65: + case 63: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:808 +//line sql.y:816 { yyDollar[2].selStmtUnion().SetWith(yyDollar[1].withUnion()) } - case 66: + case 64: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:812 +//line sql.y:820 { yyLOCAL = NewSelect(Comments(yyDollar[2].strs), SelectExprs{&Nextval{Expr: yyDollar[5].exprUnion()}}, []string{yyDollar[3].str} /*options*/, nil, TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}, nil /*where*/, nil /*groupBy*/, nil /*having*/, nil) } yyVAL.union = yyLOCAL - case 67: + case 65: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:818 +//line sql.y:826 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 68: + case 66: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:822 +//line sql.y:830 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 69: + case 67: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:826 +//line sql.y:834 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 70: + case 68: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:830 +//line sql.y:838 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 71: + case 69: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:834 +//line sql.y:842 { yyLOCAL = &Union{Left: yyDollar[1].selStmtUnion(), Distinct: yyDollar[2].booleanUnion(), Right: yyDollar[3].selStmtUnion()} } yyVAL.union = yyLOCAL - case 72: + case 70: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:840 +//line sql.y:848 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 73: + case 71: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:844 +//line sql.y:852 { setLockInSelect(yyDollar[1].selStmtUnion(), yyDollar[2].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 74: + case 72: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:849 +//line sql.y:857 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 75: + case 73: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:853 +//line sql.y:861 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 76: + case 74: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:859 +//line sql.y:867 { yyLOCAL = yyDollar[2].selStmtUnion() } yyVAL.union = yyLOCAL - case 77: + case 75: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:863 +//line sql.y:871 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 78: + case 76: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:868 +//line sql.y:876 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyDollar[1].selStmtUnion().SetLock(yyDollar[3].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 79: + case 77: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:874 +//line sql.y:882 { yyDollar[1].selStmtUnion().SetInto(yyDollar[3].selectIntoUnion()) yyDollar[1].selStmtUnion().SetLock(yyDollar[2].lockUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 80: + case 78: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:880 +//line sql.y:888 { yyDollar[1].selStmtUnion().SetInto(yyDollar[2].selectIntoUnion()) yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 81: + case 79: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:887 +//line sql.y:895 { yyLOCAL = &Stream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName} } yyVAL.union = yyLOCAL - case 82: + case 80: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:893 +//line sql.y:901 { yyLOCAL = &VStream{Comments: Comments(yyDollar[2].strs).Parsed(), SelectExpr: yyDollar[3].selectExprUnion(), Table: yyDollar[5].tableName, Where: NewWhere(WhereClause, yyDollar[6].exprUnion()), Limit: yyDollar[7].limitUnion()} } yyVAL.union = yyLOCAL - case 83: + case 81: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:901 +//line sql.y:909 { - yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].selectIntoUnion() /*into*/, yyDollar[6].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[7].exprUnion()), GroupBy(yyDollar[8].exprsUnion()), NewWhere(HavingClause, yyDollar[9].exprUnion()), yyDollar[10].namedWindowsUnion()) + yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, yyDollar[5].selectIntoUnion() /*into*/, yyDollar[6].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[7].exprUnion()), yyDollar[8].groupByUnion(), NewWhere(HavingClause, yyDollar[9].exprUnion()), yyDollar[10].namedWindowsUnion()) } yyVAL.union = yyLOCAL - case 84: + case 82: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL SelectStatement -//line sql.y:905 +//line sql.y:913 { - yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, nil, yyDollar[5].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[6].exprUnion()), GroupBy(yyDollar[7].exprsUnion()), NewWhere(HavingClause, yyDollar[8].exprUnion()), yyDollar[9].namedWindowsUnion()) + yyLOCAL = NewSelect(Comments(yyDollar[2].strs), yyDollar[4].selectExprsUnion() /*SelectExprs*/, yyDollar[3].strs /*options*/, nil, yyDollar[5].tableExprsUnion() /*from*/, NewWhere(WhereClause, yyDollar[6].exprUnion()), yyDollar[7].groupByUnion(), NewWhere(HavingClause, yyDollar[8].exprUnion()), yyDollar[9].namedWindowsUnion()) } yyVAL.union = yyLOCAL - case 85: + case 83: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:911 +//line sql.y:919 { // insert_data returns a *Insert pre-filled with Columns & Values ins := yyDollar[6].insUnion() @@ -10494,10 +10640,10 @@ yydefault: yyLOCAL = ins } yyVAL.union = yyLOCAL - case 86: + case 84: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:923 +//line sql.y:931 { cols := make(Columns, 0, len(yyDollar[7].updateExprsUnion())) vals := make(ValTuple, 0, len(yyDollar[8].updateExprsUnion())) @@ -10508,329 +10654,329 @@ yydefault: yyLOCAL = &Insert{Action: yyDollar[1].insertActionUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Ignore: yyDollar[3].ignoreUnion(), Table: getAliasedTableExprFromTableName(yyDollar[4].tableName), Partitions: yyDollar[5].partitionsUnion(), Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprsUnion())} } yyVAL.union = yyLOCAL - case 87: + case 85: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL InsertAction -//line sql.y:935 +//line sql.y:943 { yyLOCAL = InsertAct } yyVAL.union = yyLOCAL - case 88: + case 86: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL InsertAction -//line sql.y:939 +//line sql.y:947 { yyLOCAL = ReplaceAct } yyVAL.union = yyLOCAL - case 89: + case 87: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Statement -//line sql.y:945 +//line sql.y:953 { yyLOCAL = &Update{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: yyDollar[5].tableExprsUnion(), Exprs: yyDollar[7].updateExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion()), OrderBy: yyDollar[9].orderByUnion(), Limit: yyDollar[10].limitUnion()} } yyVAL.union = yyLOCAL - case 90: + case 88: yyDollar = yyS[yypt-11 : yypt+1] var yyLOCAL Statement -//line sql.y:951 +//line sql.y:959 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[6].tableName, As: yyDollar[7].identifierCS}}, Partitions: yyDollar[8].partitionsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion()), OrderBy: yyDollar[10].orderByUnion(), Limit: yyDollar[11].limitUnion()} } yyVAL.union = yyLOCAL - case 91: + case 89: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Statement -//line sql.y:955 +//line sql.y:963 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[6].tableNamesUnion(), TableExprs: yyDollar[8].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[9].exprUnion())} } yyVAL.union = yyLOCAL - case 92: + case 90: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:959 +//line sql.y:967 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())} } yyVAL.union = yyLOCAL - case 93: + case 91: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:963 +//line sql.y:971 { yyLOCAL = &Delete{With: yyDollar[1].withUnion(), Comments: Comments(yyDollar[3].strs).Parsed(), Ignore: yyDollar[4].ignoreUnion(), Targets: yyDollar[5].tableNamesUnion(), TableExprs: yyDollar[7].tableExprsUnion(), Where: NewWhere(WhereClause, yyDollar[8].exprUnion())} } yyVAL.union = yyLOCAL - case 94: + case 92: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:968 +//line sql.y:976 { } - case 95: + case 93: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:969 +//line sql.y:977 { } - case 96: + case 94: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:973 +//line sql.y:981 { yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 97: + case 95: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:977 +//line sql.y:985 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 98: + case 96: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:983 +//line sql.y:991 { yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 99: + case 97: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:987 +//line sql.y:995 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 100: + case 98: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableNames -//line sql.y:993 +//line sql.y:1001 { yyLOCAL = TableNames{yyDollar[1].tableName} } yyVAL.union = yyLOCAL - case 101: + case 99: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:997 +//line sql.y:1005 { yySLICE := (*TableNames)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableName) } - case 102: + case 100: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Partitions -//line sql.y:1002 +//line sql.y:1010 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 103: + case 101: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Partitions -//line sql.y:1006 +//line sql.y:1014 { yyLOCAL = yyDollar[3].partitionsUnion() } yyVAL.union = yyLOCAL - case 104: + case 102: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:1012 +//line sql.y:1020 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[3].setExprsUnion()) } yyVAL.union = yyLOCAL - case 105: + case 103: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SetExprs -//line sql.y:1018 +//line sql.y:1026 { yyLOCAL = SetExprs{yyDollar[1].setExprUnion()} } yyVAL.union = yyLOCAL - case 106: + case 104: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1022 +//line sql.y:1030 { yySLICE := (*SetExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion()) } - case 107: + case 105: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1028 +//line sql.y:1036 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("on")} } yyVAL.union = yyLOCAL - case 108: + case 106: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1032 +//line sql.y:1040 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: NewStrLiteral("off")} } yyVAL.union = yyLOCAL - case 109: + case 107: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1036 +//line sql.y:1044 { yyLOCAL = &SetExpr{Var: yyDollar[1].variableUnion(), Expr: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 110: + case 108: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1040 +//line sql.y:1048 { yyLOCAL = &SetExpr{Var: NewSetVariable(string(yyDollar[1].str), SessionScope), Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 111: + case 109: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:1046 +//line sql.y:1054 { yyLOCAL = NewSetVariable(string(yyDollar[1].str), SessionScope) } yyVAL.union = yyLOCAL - case 112: + case 110: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Variable -//line sql.y:1050 +//line sql.y:1058 { yyLOCAL = yyDollar[1].variableUnion() } yyVAL.union = yyLOCAL - case 113: + case 111: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Variable -//line sql.y:1054 +//line sql.y:1062 { yyLOCAL = NewSetVariable(string(yyDollar[2].str), yyDollar[1].scopeUnion()) } yyVAL.union = yyLOCAL - case 114: + case 112: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:1060 +//line sql.y:1068 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), UpdateSetExprsScope(yyDollar[5].setExprsUnion(), yyDollar[3].scopeUnion())) } yyVAL.union = yyLOCAL - case 115: + case 113: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:1064 +//line sql.y:1072 { yyLOCAL = NewSetStatement(Comments(yyDollar[2].strs).Parsed(), yyDollar[4].setExprsUnion()) } yyVAL.union = yyLOCAL - case 116: + case 114: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SetExprs -//line sql.y:1070 +//line sql.y:1078 { yyLOCAL = SetExprs{yyDollar[1].setExprUnion()} } yyVAL.union = yyLOCAL - case 117: + case 115: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1074 +//line sql.y:1082 { yySLICE := (*SetExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].setExprUnion()) } - case 118: + case 116: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1080 +//line sql.y:1088 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionIsolationStr, NextTxScope), Expr: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 119: + case 117: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1084 +//line sql.y:1092 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("off")} } yyVAL.union = yyLOCAL - case 120: + case 118: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SetExpr -//line sql.y:1088 +//line sql.y:1096 { yyLOCAL = &SetExpr{Var: NewSetVariable(TransactionReadOnlyStr, NextTxScope), Expr: NewStrLiteral("on")} } yyVAL.union = yyLOCAL - case 121: + case 119: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1094 +//line sql.y:1102 { yyVAL.str = RepeatableReadStr } - case 122: + case 120: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1098 +//line sql.y:1106 { yyVAL.str = ReadCommittedStr } - case 123: + case 121: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1102 +//line sql.y:1110 { yyVAL.str = ReadUncommittedStr } - case 124: + case 122: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1106 +//line sql.y:1114 { yyVAL.str = SerializableStr } - case 125: + case 123: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1112 +//line sql.y:1120 { yyLOCAL = SessionScope } yyVAL.union = yyLOCAL - case 126: + case 124: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1116 +//line sql.y:1124 { yyLOCAL = SessionScope } yyVAL.union = yyLOCAL - case 127: + case 125: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Scope -//line sql.y:1120 +//line sql.y:1128 { yyLOCAL = GlobalScope } yyVAL.union = yyLOCAL - case 128: + case 126: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1126 +//line sql.y:1134 { yyDollar[1].createTableUnion().TableSpec = yyDollar[2].tableSpecUnion() yyDollar[1].createTableUnion().FullyParsed = true yyLOCAL = yyDollar[1].createTableUnion() } yyVAL.union = yyLOCAL - case 129: + case 127: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1132 +//line sql.y:1140 { // Create table [name] like [name] yyDollar[1].createTableUnion().OptLike = yyDollar[2].optLikeUnion() @@ -10838,10 +10984,10 @@ yydefault: yyLOCAL = yyDollar[1].createTableUnion() } yyVAL.union = yyLOCAL - case 130: + case 128: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:1139 +//line sql.y:1147 { indexDef := yyDollar[1].alterTableUnion().AlterOptions[0].(*AddIndexDefinition).IndexDefinition indexDef.Columns = yyDollar[3].indexColumnsUnion() @@ -10851,413 +10997,413 @@ yydefault: yyLOCAL = yyDollar[1].alterTableUnion() } yyVAL.union = yyLOCAL - case 131: + case 129: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Statement -//line sql.y:1148 +//line sql.y:1156 { yyLOCAL = &CreateView{ViewName: yyDollar[8].tableName, Comments: Comments(yyDollar[2].strs).Parsed(), IsReplace: yyDollar[3].booleanUnion(), Algorithm: yyDollar[4].str, Definer: yyDollar[5].definerUnion(), Security: yyDollar[6].str, Columns: yyDollar[9].columnsUnion(), Select: yyDollar[11].selStmtUnion(), CheckOption: yyDollar[12].str} } yyVAL.union = yyLOCAL - case 132: + case 130: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:1152 +//line sql.y:1160 { yyDollar[1].createDatabaseUnion().FullyParsed = true yyDollar[1].createDatabaseUnion().CreateOptions = yyDollar[2].databaseOptionsUnion() yyLOCAL = yyDollar[1].createDatabaseUnion() } yyVAL.union = yyLOCAL - case 133: + case 131: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:1159 +//line sql.y:1167 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 134: + case 132: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:1163 +//line sql.y:1171 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 135: + case 133: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1168 +//line sql.y:1176 { yyVAL.identifierCI = NewIdentifierCI("") } - case 136: + case 134: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1172 +//line sql.y:1180 { yyVAL.identifierCI = yyDollar[2].identifierCI } - case 137: + case 135: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1178 +//line sql.y:1186 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 138: + case 136: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1183 +//line sql.y:1191 { var v []VindexParam yyLOCAL = v } yyVAL.union = yyLOCAL - case 139: + case 137: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1188 +//line sql.y:1196 { yyLOCAL = yyDollar[2].vindexParamsUnion() } yyVAL.union = yyLOCAL - case 140: + case 138: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []VindexParam -//line sql.y:1194 +//line sql.y:1202 { yyLOCAL = make([]VindexParam, 0, 4) yyLOCAL = append(yyLOCAL, yyDollar[1].vindexParam) } yyVAL.union = yyLOCAL - case 141: + case 139: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1199 +//line sql.y:1207 { yySLICE := (*[]VindexParam)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].vindexParam) } - case 142: + case 140: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1205 +//line sql.y:1213 { yyVAL.vindexParam = VindexParam{Key: yyDollar[1].identifierCI, Val: yyDollar[3].str} } - case 143: + case 141: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1210 +//line sql.y:1218 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 144: + case 142: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1214 +//line sql.y:1222 { yyLOCAL = yyDollar[1].jsonObjectParamsUnion() } yyVAL.union = yyLOCAL - case 145: + case 143: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JSONObjectParam -//line sql.y:1220 +//line sql.y:1228 { yyLOCAL = []*JSONObjectParam{yyDollar[1].jsonObjectParam} } yyVAL.union = yyLOCAL - case 146: + case 144: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1224 +//line sql.y:1232 { yySLICE := (*[]*JSONObjectParam)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].jsonObjectParam) } - case 147: + case 145: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1230 +//line sql.y:1238 { yyVAL.jsonObjectParam = &JSONObjectParam{Key: yyDollar[1].exprUnion(), Value: yyDollar[3].exprUnion()} } - case 148: + case 146: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *CreateTable -//line sql.y:1236 +//line sql.y:1244 { yyLOCAL = &CreateTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[6].tableName, IfNotExists: yyDollar[5].booleanUnion(), Temp: yyDollar[3].booleanUnion()} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 149: + case 147: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1243 +//line sql.y:1251 { yyLOCAL = &AlterTable{Comments: Comments(yyDollar[2].strs).Parsed(), Table: yyDollar[4].tableName} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 150: + case 148: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1250 +//line sql.y:1258 { yyLOCAL = &AlterTable{Table: yyDollar[7].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[4].identifierCI}, Options: yyDollar[5].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 151: + case 149: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1255 +//line sql.y:1263 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: IndexTypeFullText}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 152: + case 150: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1260 +//line sql.y:1268 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: IndexTypeSpatial}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 153: + case 151: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *AlterTable -//line sql.y:1265 +//line sql.y:1273 { yyLOCAL = &AlterTable{Table: yyDollar[8].tableName, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition: &IndexDefinition{Info: &IndexInfo{Name: yyDollar[5].identifierCI, Type: IndexTypeUnique}, Options: yyDollar[6].indexOptionsUnion()}}}} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 154: + case 152: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *CreateDatabase -//line sql.y:1272 +//line sql.y:1280 { yyLOCAL = &CreateDatabase{Comments: Comments(yyDollar[4].strs).Parsed(), DBName: yyDollar[6].identifierCS, IfNotExists: yyDollar[5].booleanUnion()} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 155: + case 153: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *AlterDatabase -//line sql.y:1279 +//line sql.y:1287 { yyLOCAL = &AlterDatabase{} setDDL(yylex, yyLOCAL) } yyVAL.union = yyLOCAL - case 158: + case 156: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1290 +//line sql.y:1298 { yyLOCAL = yyDollar[2].tableSpecUnion() yyLOCAL.Options = yyDollar[4].tableOptionsUnion() yyLOCAL.PartitionOption = yyDollar[5].partitionOptionUnion() } yyVAL.union = yyLOCAL - case 159: + case 157: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1297 +//line sql.y:1305 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 160: + case 158: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1301 +//line sql.y:1309 { yyLOCAL = yyDollar[1].databaseOptionsUnion() } yyVAL.union = yyLOCAL - case 161: + case 159: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1307 +//line sql.y:1315 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 162: + case 160: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1311 +//line sql.y:1319 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 163: + case 161: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []DatabaseOption -//line sql.y:1315 +//line sql.y:1323 { yyLOCAL = []DatabaseOption{yyDollar[1].databaseOption} } yyVAL.union = yyLOCAL - case 164: + case 162: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1319 +//line sql.y:1327 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 165: + case 163: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1323 +//line sql.y:1331 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 166: + case 164: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1327 +//line sql.y:1335 { yySLICE := (*[]DatabaseOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].databaseOption) } - case 167: + case 165: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:1333 +//line sql.y:1341 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 168: + case 166: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:1337 +//line sql.y:1345 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 169: + case 167: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1343 +//line sql.y:1351 { yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 170: + case 168: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1347 +//line sql.y:1355 { yyVAL.databaseOption = DatabaseOption{Type: CharacterSetType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 171: + case 169: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1353 +//line sql.y:1361 { yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 172: + case 170: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1357 +//line sql.y:1365 { yyVAL.databaseOption = DatabaseOption{Type: CollateType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 173: + case 171: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1363 +//line sql.y:1371 { yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: string(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 174: + case 172: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1367 +//line sql.y:1375 { yyVAL.databaseOption = DatabaseOption{Type: EncryptionType, Value: encodeSQLString(yyDollar[4].str), IsDefault: yyDollar[1].booleanUnion()} } - case 175: + case 173: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *OptLike -//line sql.y:1373 +//line sql.y:1381 { yyLOCAL = &OptLike{LikeTable: yyDollar[2].tableName} } yyVAL.union = yyLOCAL - case 176: + case 174: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *OptLike -//line sql.y:1377 +//line sql.y:1385 { yyLOCAL = &OptLike{LikeTable: yyDollar[3].tableName} } yyVAL.union = yyLOCAL - case 177: + case 175: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColumnDefinition -//line sql.y:1383 +//line sql.y:1391 { yyLOCAL = []*ColumnDefinition{yyDollar[1].columnDefinitionUnion()} } yyVAL.union = yyLOCAL - case 178: + case 176: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1387 +//line sql.y:1395 { yySLICE := (*[]*ColumnDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].columnDefinitionUnion()) } - case 179: + case 177: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1393 +//line sql.y:1401 { yyLOCAL = &TableSpec{} yyLOCAL.AddColumn(yyDollar[1].columnDefinitionUnion()) } yyVAL.union = yyLOCAL - case 180: + case 178: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *TableSpec -//line sql.y:1398 +//line sql.y:1406 { yyLOCAL = &TableSpec{} yyLOCAL.AddConstraint(yyDollar[1].constraintDefinitionUnion()) } yyVAL.union = yyLOCAL - case 181: + case 179: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1403 +//line sql.y:1411 { yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion()) } - case 182: + case 180: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1407 +//line sql.y:1415 { yyVAL.tableSpecUnion().AddColumn(yyDollar[3].columnDefinitionUnion()) yyVAL.tableSpecUnion().AddConstraint(yyDollar[4].constraintDefinitionUnion()) } - case 183: + case 181: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1412 +//line sql.y:1420 { yyVAL.tableSpecUnion().AddIndex(yyDollar[3].indexDefinitionUnion()) } - case 184: + case 182: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1416 +//line sql.y:1424 { yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion()) } - case 185: + case 183: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1420 +//line sql.y:1428 { yyVAL.tableSpecUnion().AddConstraint(yyDollar[3].constraintDefinitionUnion()) } - case 186: + case 184: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColumnDefinition -//line sql.y:1431 +//line sql.y:1439 { yyDollar[2].columnType.Options = yyDollar[4].columnTypeOptionsUnion() if yyDollar[2].columnType.Options.Collate == "" { @@ -11267,10 +11413,10 @@ yydefault: yyLOCAL = &ColumnDefinition{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType} } yyVAL.union = yyLOCAL - case 187: + case 185: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL *ColumnDefinition -//line sql.y:1440 +//line sql.y:1448 { yyDollar[2].columnType.Options = yyDollar[9].columnTypeOptionsUnion() yyDollar[2].columnType.Options.As = yyDollar[7].exprUnion() @@ -11279,279 +11425,271 @@ yydefault: yyLOCAL = &ColumnDefinition{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType} } yyVAL.union = yyLOCAL - case 188: + case 186: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:1449 +//line sql.y:1457 { yyVAL.str = "" } - case 189: + case 187: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1453 +//line sql.y:1461 { yyVAL.str = "" } - case 190: + case 188: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1462 +//line sql.y:1470 { yyLOCAL = &ColumnTypeOptions{Null: nil, Default: nil, OnUpdate: nil, Autoincrement: false, KeyOpt: ColKeyNone, Comment: nil, As: nil, Invisible: nil, Format: UnspecifiedFormat, EngineAttribute: nil, SecondaryEngineAttribute: nil} } yyVAL.union = yyLOCAL - case 191: + case 189: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1466 +//line sql.y:1474 { - val := true - yyDollar[1].columnTypeOptionsUnion().Null = &val + yyDollar[1].columnTypeOptionsUnion().Null = ptr.Of(true) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 192: + case 190: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1472 +//line sql.y:1479 { - val := false - yyDollar[1].columnTypeOptionsUnion().Null = &val + yyDollar[1].columnTypeOptionsUnion().Null = ptr.Of(false) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 193: + case 191: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1478 +//line sql.y:1484 { yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[4].exprUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 194: + case 192: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1483 +//line sql.y:1489 { yyDollar[1].columnTypeOptionsUnion().Default = yyDollar[3].exprUnion() yyDollar[1].columnTypeOptionsUnion().DefaultLiteral = true yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 195: + case 193: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1489 +//line sql.y:1495 { yyDollar[1].columnTypeOptionsUnion().OnUpdate = yyDollar[4].exprUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 196: + case 194: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1494 +//line sql.y:1500 { yyDollar[1].columnTypeOptionsUnion().Autoincrement = true yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 197: + case 195: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1499 +//line sql.y:1505 { yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 198: + case 196: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1504 +//line sql.y:1510 { yyDollar[1].columnTypeOptionsUnion().KeyOpt = yyDollar[2].colKeyOptUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 199: + case 197: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1509 +//line sql.y:1515 { yyDollar[1].columnTypeOptionsUnion().Collate = encodeSQLString(yyDollar[3].str) } - case 200: + case 198: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1513 +//line sql.y:1519 { yyDollar[1].columnTypeOptionsUnion().Collate = string(yyDollar[3].identifierCI.String()) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 201: + case 199: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1518 +//line sql.y:1524 { yyDollar[1].columnTypeOptionsUnion().Format = yyDollar[3].columnFormatUnion() } - case 202: + case 200: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1522 +//line sql.y:1528 { yyDollar[1].columnTypeOptionsUnion().SRID = NewIntLiteral(yyDollar[3].str) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 203: + case 201: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1527 +//line sql.y:1533 { - val := false - yyDollar[1].columnTypeOptionsUnion().Invisible = &val + yyDollar[1].columnTypeOptionsUnion().Invisible = ptr.Of(false) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 204: + case 202: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1533 +//line sql.y:1538 { - val := true - yyDollar[1].columnTypeOptionsUnion().Invisible = &val + yyDollar[1].columnTypeOptionsUnion().Invisible = ptr.Of(true) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 205: + case 203: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1539 +//line sql.y:1543 { yyDollar[1].columnTypeOptionsUnion().EngineAttribute = NewStrLiteral(yyDollar[4].str) } - case 206: + case 204: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:1543 +//line sql.y:1547 { yyDollar[1].columnTypeOptionsUnion().SecondaryEngineAttribute = NewStrLiteral(yyDollar[4].str) } - case 207: + case 205: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat -//line sql.y:1549 +//line sql.y:1553 { yyLOCAL = FixedFormat } yyVAL.union = yyLOCAL - case 208: + case 206: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat -//line sql.y:1553 +//line sql.y:1557 { yyLOCAL = DynamicFormat } yyVAL.union = yyLOCAL - case 209: + case 207: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnFormat -//line sql.y:1557 +//line sql.y:1561 { yyLOCAL = DefaultFormat } yyVAL.union = yyLOCAL - case 210: + case 208: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnStorage -//line sql.y:1563 +//line sql.y:1567 { yyLOCAL = VirtualStorage } yyVAL.union = yyLOCAL - case 211: + case 209: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnStorage -//line sql.y:1567 +//line sql.y:1571 { yyLOCAL = StoredStorage } yyVAL.union = yyLOCAL - case 212: + case 210: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1572 +//line sql.y:1576 { yyLOCAL = &ColumnTypeOptions{} } yyVAL.union = yyLOCAL - case 213: + case 211: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1576 +//line sql.y:1580 { yyDollar[1].columnTypeOptionsUnion().Storage = yyDollar[2].columnStorageUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 214: + case 212: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1581 +//line sql.y:1585 { - val := true - yyDollar[1].columnTypeOptionsUnion().Null = &val + yyDollar[1].columnTypeOptionsUnion().Null = ptr.Of(true) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 215: + case 213: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1587 +//line sql.y:1590 { - val := false - yyDollar[1].columnTypeOptionsUnion().Null = &val + yyDollar[1].columnTypeOptionsUnion().Null = ptr.Of(false) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 216: + case 214: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1593 +//line sql.y:1595 { yyDollar[1].columnTypeOptionsUnion().Comment = NewStrLiteral(yyDollar[3].str) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 217: + case 215: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1598 +//line sql.y:1600 { yyDollar[1].columnTypeOptionsUnion().KeyOpt = yyDollar[2].colKeyOptUnion() yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 218: + case 216: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1603 +//line sql.y:1605 { - val := false - yyDollar[1].columnTypeOptionsUnion().Invisible = &val + yyDollar[1].columnTypeOptionsUnion().Invisible = ptr.Of(false) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 219: + case 217: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColumnTypeOptions -//line sql.y:1609 +//line sql.y:1610 { - val := true - yyDollar[1].columnTypeOptionsUnion().Invisible = &val + yyDollar[1].columnTypeOptionsUnion().Invisible = ptr.Of(true) yyLOCAL = yyDollar[1].columnTypeOptionsUnion() } yyVAL.union = yyLOCAL - case 220: + case 218: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1617 @@ -11559,7 +11697,7 @@ yydefault: yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 222: + case 220: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1624 @@ -11567,7 +11705,7 @@ yydefault: yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_timestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 223: + case 221: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1628 @@ -11575,7 +11713,7 @@ yydefault: yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtime"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 224: + case 222: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1632 @@ -11583,7 +11721,7 @@ yydefault: yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("localtimestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 225: + case 223: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1636 @@ -11591,7 +11729,7 @@ yydefault: yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_timestamp"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 226: + case 224: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1640 @@ -11599,7 +11737,7 @@ yydefault: yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("now"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 227: + case 225: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1644 @@ -11607,7 +11745,7 @@ yydefault: yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("sysdate"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 230: + case 228: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1654 @@ -11615,7 +11753,7 @@ yydefault: yyLOCAL = &NullVal{} } yyVAL.union = yyLOCAL - case 232: + case 230: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1661 @@ -11623,7 +11761,7 @@ yydefault: yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 233: + case 231: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1665 @@ -11631,7 +11769,7 @@ yydefault: yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 234: + case 232: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1671 @@ -11639,7 +11777,7 @@ yydefault: yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 235: + case 233: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1675 @@ -11647,7 +11785,7 @@ yydefault: yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 236: + case 234: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1679 @@ -11655,7 +11793,7 @@ yydefault: yyLOCAL = yyDollar[1].boolValUnion() } yyVAL.union = yyLOCAL - case 237: + case 235: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1683 @@ -11663,7 +11801,7 @@ yydefault: yyLOCAL = NewHexLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 238: + case 236: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1687 @@ -11671,7 +11809,7 @@ yydefault: yyLOCAL = NewHexNumLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 239: + case 237: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1691 @@ -11679,7 +11817,7 @@ yydefault: yyLOCAL = NewBitLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 240: + case 238: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1695 @@ -11687,7 +11825,7 @@ yydefault: yyLOCAL = NewBitLiteral("0b" + yyDollar[1].str) } yyVAL.union = yyLOCAL - case 241: + case 239: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1699 @@ -11695,7 +11833,7 @@ yydefault: yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 242: + case 240: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1703 @@ -11703,7 +11841,7 @@ yydefault: yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral("0b" + yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 243: + case 241: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1707 @@ -11711,7 +11849,7 @@ yydefault: yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexNumLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 244: + case 242: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1711 @@ -11719,7 +11857,7 @@ yydefault: yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewBitLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 245: + case 243: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1715 @@ -11727,7 +11865,7 @@ yydefault: yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewHexLiteral(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 246: + case 244: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1719 @@ -11735,7 +11873,7 @@ yydefault: yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 247: + case 245: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1723 @@ -11744,7 +11882,7 @@ yydefault: yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: arg} } yyVAL.union = yyLOCAL - case 248: + case 246: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1728 @@ -11752,7 +11890,7 @@ yydefault: yyLOCAL = NewDateLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 249: + case 247: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1732 @@ -11760,7 +11898,7 @@ yydefault: yyLOCAL = NewTimeLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 250: + case 248: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr //line sql.y:1736 @@ -11768,259 +11906,259 @@ yydefault: yyLOCAL = NewTimestampLiteral(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 251: + case 249: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1742 { yyVAL.str = Armscii8Str } - case 252: + case 250: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1746 { yyVAL.str = ASCIIStr } - case 253: + case 251: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1750 { yyVAL.str = Big5Str } - case 254: + case 252: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1754 { yyVAL.str = UBinaryStr } - case 255: + case 253: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1758 { yyVAL.str = Cp1250Str } - case 256: + case 254: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1762 { yyVAL.str = Cp1251Str } - case 257: + case 255: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1766 { yyVAL.str = Cp1256Str } - case 258: + case 256: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1770 { yyVAL.str = Cp1257Str } - case 259: + case 257: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1774 { yyVAL.str = Cp850Str } - case 260: + case 258: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1778 { yyVAL.str = Cp852Str } - case 261: + case 259: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1782 { yyVAL.str = Cp866Str } - case 262: + case 260: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1786 { yyVAL.str = Cp932Str } - case 263: + case 261: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1790 { yyVAL.str = Dec8Str } - case 264: + case 262: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1794 { yyVAL.str = EucjpmsStr } - case 265: + case 263: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1798 { yyVAL.str = EuckrStr } - case 266: + case 264: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1802 { yyVAL.str = Gb18030Str } - case 267: + case 265: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1806 { yyVAL.str = Gb2312Str } - case 268: + case 266: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1810 { yyVAL.str = GbkStr } - case 269: + case 267: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1814 { yyVAL.str = Geostd8Str } - case 270: + case 268: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1818 { yyVAL.str = GreekStr } - case 271: + case 269: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1822 { yyVAL.str = HebrewStr } - case 272: + case 270: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1826 { yyVAL.str = Hp8Str } - case 273: + case 271: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1830 { yyVAL.str = Keybcs2Str } - case 274: + case 272: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1834 { yyVAL.str = Koi8rStr } - case 275: + case 273: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1838 { yyVAL.str = Koi8uStr } - case 276: + case 274: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1842 { yyVAL.str = Latin1Str } - case 277: + case 275: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1846 { yyVAL.str = Latin2Str } - case 278: + case 276: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1850 { yyVAL.str = Latin5Str } - case 279: + case 277: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1854 { yyVAL.str = Latin7Str } - case 280: + case 278: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1858 { yyVAL.str = MacceStr } - case 281: + case 279: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1862 { yyVAL.str = MacromanStr } - case 282: + case 280: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1866 { yyVAL.str = SjisStr } - case 283: + case 281: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1870 { yyVAL.str = Swe7Str } - case 284: + case 282: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1874 { yyVAL.str = Tis620Str } - case 285: + case 283: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1878 { yyVAL.str = Ucs2Str } - case 286: + case 284: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1882 { yyVAL.str = UjisStr } - case 287: + case 285: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1886 { yyVAL.str = Utf16Str } - case 288: + case 286: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1890 { yyVAL.str = Utf16leStr } - case 289: + case 287: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1894 { yyVAL.str = Utf32Str } - case 290: + case 288: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1898 { yyVAL.str = Utf8mb3Str } - case 291: + case 289: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1902 { yyVAL.str = Utf8mb4Str } - case 292: + case 290: yyDollar = yyS[yypt-1 : yypt+1] //line sql.y:1906 { yyVAL.str = Utf8mb3Str } - case 295: + case 293: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1916 @@ -12028,7 +12166,7 @@ yydefault: yyLOCAL = NewIntLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 296: + case 294: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1920 @@ -12036,7 +12174,7 @@ yydefault: yyLOCAL = NewFloatLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 297: + case 295: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1924 @@ -12044,10 +12182,26 @@ yydefault: yyLOCAL = NewDecimalLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 298: + case 296: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr //line sql.y:1930 + { + yyLOCAL = yyDollar[1].exprUnion() + } + yyVAL.union = yyLOCAL + case 297: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Expr +//line sql.y:1934 + { + yyLOCAL = AppendString(yyDollar[1].exprUnion(), yyDollar[2].str) + } + yyVAL.union = yyLOCAL + case 298: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL Expr +//line sql.y:1940 { yyLOCAL = NewStrLiteral(yyDollar[1].str) } @@ -12055,7 +12209,7 @@ yydefault: case 299: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1934 +//line sql.y:1944 { yyLOCAL = &UnaryExpr{Operator: NStringOp, Expr: NewStrLiteral(yyDollar[1].str)} } @@ -12063,7 +12217,7 @@ yydefault: case 300: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:1938 +//line sql.y:1948 { yyLOCAL = &IntroducerExpr{CharacterSet: yyDollar[1].str, Expr: NewStrLiteral(yyDollar[2].str)} } @@ -12071,7 +12225,7 @@ yydefault: case 301: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1944 +//line sql.y:1954 { yyLOCAL = yyDollar[1].exprUnion() } @@ -12079,7 +12233,7 @@ yydefault: case 302: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:1948 +//line sql.y:1958 { yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } @@ -12087,7 +12241,7 @@ yydefault: case 303: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1954 +//line sql.y:1964 { yyLOCAL = ColKeyPrimary } @@ -12095,7 +12249,7 @@ yydefault: case 304: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1958 +//line sql.y:1968 { yyLOCAL = ColKeyUnique } @@ -12103,7 +12257,7 @@ yydefault: case 305: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1962 +//line sql.y:1972 { yyLOCAL = ColKeyUniqueKey } @@ -12111,14 +12265,14 @@ yydefault: case 306: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColumnKeyOption -//line sql.y:1966 +//line sql.y:1976 { yyLOCAL = ColKey } yyVAL.union = yyLOCAL case 307: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:1972 +//line sql.y:1982 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Unsigned = yyDollar[2].booleanUnion() @@ -12126,74 +12280,74 @@ yydefault: } case 311: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:1983 +//line sql.y:1993 { yyVAL.columnType = yyDollar[1].columnType - yyVAL.columnType.Length = yyDollar[2].literalUnion() + yyVAL.columnType.Length = yyDollar[2].intPtrUnion() } case 312: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1988 +//line sql.y:1998 { yyVAL.columnType = yyDollar[1].columnType } case 313: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1994 +//line sql.y:2004 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 314: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:1998 +//line sql.y:2008 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 315: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2002 +//line sql.y:2012 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 316: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2006 +//line sql.y:2016 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 317: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2010 +//line sql.y:2020 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 318: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2014 +//line sql.y:2024 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 319: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2018 +//line sql.y:2028 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 320: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2022 +//line sql.y:2032 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 321: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2026 +//line sql.y:2036 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 322: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2032 +//line sql.y:2042 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12201,7 +12355,7 @@ yydefault: } case 323: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2038 +//line sql.y:2048 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12209,7 +12363,7 @@ yydefault: } case 324: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2044 +//line sql.y:2054 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12217,7 +12371,7 @@ yydefault: } case 325: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2050 +//line sql.y:2060 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12225,7 +12379,7 @@ yydefault: } case 326: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2056 +//line sql.y:2066 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12233,7 +12387,7 @@ yydefault: } case 327: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2062 +//line sql.y:2072 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12241,7 +12395,7 @@ yydefault: } case 328: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2068 +//line sql.y:2078 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length @@ -12249,265 +12403,265 @@ yydefault: } case 329: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2076 +//line sql.y:2086 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 330: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2080 +//line sql.y:2090 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } case 331: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2084 +//line sql.y:2094 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } case 332: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2088 +//line sql.y:2098 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } case 333: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2092 +//line sql.y:2102 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } case 334: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2098 +//line sql.y:2108 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion(), Charset: yyDollar[3].columnCharset} } case 335: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2102 +//line sql.y:2112 { // CHAR BYTE is an alias for binary. See also: // https://dev.mysql.com/doc/refman/8.0/en/string-type-syntax.html - yyVAL.columnType = &ColumnType{Type: "binary", Length: yyDollar[2].literalUnion()} + yyVAL.columnType = &ColumnType{Type: "binary", Length: yyDollar[2].intPtrUnion()} } case 336: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2108 +//line sql.y:2118 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion(), Charset: yyDollar[3].columnCharset} } case 337: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2112 +//line sql.y:2122 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } case 338: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2116 +//line sql.y:2126 { - yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } case 339: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2120 +//line sql.y:2130 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } case 340: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2124 +//line sql.y:2134 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } case 341: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2128 +//line sql.y:2138 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } case 342: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2132 +//line sql.y:2142 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), Charset: yyDollar[2].columnCharset} } case 343: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2136 +//line sql.y:2146 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 344: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2140 +//line sql.y:2150 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 345: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2144 +//line sql.y:2154 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 346: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2148 +//line sql.y:2158 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 347: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2152 +//line sql.y:2162 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 348: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2156 +//line sql.y:2166 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset} } case 349: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2161 +//line sql.y:2171 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].columnCharset} } case 350: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2167 +//line sql.y:2177 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 351: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2171 +//line sql.y:2181 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 352: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2175 +//line sql.y:2185 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 353: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2179 +//line sql.y:2189 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 354: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2183 +//line sql.y:2193 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 355: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2187 +//line sql.y:2197 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 356: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2191 +//line sql.y:2201 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 357: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2195 +//line sql.y:2205 { yyVAL.columnType = &ColumnType{Type: string(yyDollar[1].str)} } case 358: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2201 +//line sql.y:2211 { yyVAL.strs = make([]string, 0, 4) yyVAL.strs = append(yyVAL.strs, encodeSQLString(yyDollar[1].str)) } case 359: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2206 +//line sql.y:2216 { yyVAL.strs = append(yyDollar[1].strs, encodeSQLString(yyDollar[3].str)) } case 360: yyDollar = yyS[yypt-0 : yypt+1] - var yyLOCAL *Literal -//line sql.y:2211 + var yyLOCAL *int +//line sql.y:2221 { yyLOCAL = nil } yyVAL.union = yyLOCAL case 361: yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL *Literal -//line sql.y:2215 + var yyLOCAL *int +//line sql.y:2225 { - yyLOCAL = NewIntLiteral(yyDollar[2].str) + yyLOCAL = ptr.Of(convertStringToInt(yyDollar[2].str)) } yyVAL.union = yyLOCAL case 362: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2220 +//line sql.y:2230 { yyVAL.LengthScaleOption = LengthScaleOption{} } case 363: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2224 +//line sql.y:2234 { yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntLiteral(yyDollar[2].str), - Scale: NewIntLiteral(yyDollar[4].str), + Length: ptr.Of(convertStringToInt(yyDollar[2].str)), + Scale: ptr.Of(convertStringToInt(yyDollar[4].str)), } } case 364: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2233 +//line sql.y:2243 { yyVAL.LengthScaleOption = yyDollar[1].LengthScaleOption } case 365: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2237 +//line sql.y:2247 { yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntLiteral(yyDollar[2].str), + Length: ptr.Of(convertStringToInt(yyDollar[2].str)), } } case 366: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2244 +//line sql.y:2254 { yyVAL.LengthScaleOption = LengthScaleOption{} } case 367: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2248 +//line sql.y:2258 { yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntLiteral(yyDollar[2].str), + Length: ptr.Of(convertStringToInt(yyDollar[2].str)), } } case 368: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2254 +//line sql.y:2264 { yyVAL.LengthScaleOption = LengthScaleOption{ - Length: NewIntLiteral(yyDollar[2].str), - Scale: NewIntLiteral(yyDollar[4].str), + Length: ptr.Of(convertStringToInt(yyDollar[2].str)), + Scale: ptr.Of(convertStringToInt(yyDollar[4].str)), } } case 369: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2262 +//line sql.y:2272 { yyLOCAL = false } @@ -12515,7 +12669,7 @@ yydefault: case 370: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2266 +//line sql.y:2276 { yyLOCAL = true } @@ -12523,7 +12677,7 @@ yydefault: case 371: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2270 +//line sql.y:2280 { yyLOCAL = false } @@ -12531,7 +12685,7 @@ yydefault: case 372: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2275 +//line sql.y:2285 { yyLOCAL = false } @@ -12539,66 +12693,66 @@ yydefault: case 373: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2279 +//line sql.y:2289 { yyLOCAL = true } yyVAL.union = yyLOCAL case 374: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2284 +//line sql.y:2294 { yyVAL.columnCharset = ColumnCharset{} } case 375: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2288 +//line sql.y:2298 { yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].identifierCI.String()), Binary: yyDollar[3].booleanUnion()} } case 376: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2292 +//line sql.y:2302 { yyVAL.columnCharset = ColumnCharset{Name: encodeSQLString(yyDollar[2].str), Binary: yyDollar[3].booleanUnion()} } case 377: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2296 +//line sql.y:2306 { yyVAL.columnCharset = ColumnCharset{Name: string(yyDollar[2].str)} } case 378: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2300 +//line sql.y:2310 { // ASCII: Shorthand for CHARACTER SET latin1. yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: yyDollar[2].booleanUnion()} } case 379: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2305 +//line sql.y:2315 { // UNICODE: Shorthand for CHARACTER SET ucs2. yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: yyDollar[2].booleanUnion()} } case 380: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2310 +//line sql.y:2320 { // BINARY: Shorthand for default CHARACTER SET but with binary collation yyVAL.columnCharset = ColumnCharset{Name: "", Binary: true} } case 381: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2315 +//line sql.y:2325 { // BINARY ASCII: Shorthand for CHARACTER SET latin1 with binary collation yyVAL.columnCharset = ColumnCharset{Name: "latin1", Binary: true} } case 382: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2320 +//line sql.y:2330 { // BINARY UNICODE: Shorthand for CHARACTER SET ucs2 with binary collation yyVAL.columnCharset = ColumnCharset{Name: "ucs2", Binary: true} @@ -12606,7 +12760,7 @@ yydefault: case 383: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2326 +//line sql.y:2336 { yyLOCAL = false } @@ -12614,33 +12768,33 @@ yydefault: case 384: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2330 +//line sql.y:2340 { yyLOCAL = true } yyVAL.union = yyLOCAL case 385: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2335 +//line sql.y:2345 { yyVAL.str = "" } case 386: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2339 +//line sql.y:2349 { yyVAL.str = string(yyDollar[2].identifierCI.String()) } case 387: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2343 +//line sql.y:2353 { yyVAL.str = encodeSQLString(yyDollar[2].str) } case 388: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *IndexDefinition -//line sql.y:2349 +//line sql.y:2359 { yyLOCAL = &IndexDefinition{Info: yyDollar[1].indexInfoUnion(), Columns: yyDollar[3].indexColumnsUnion(), Options: yyDollar[5].indexOptionsUnion()} } @@ -12648,7 +12802,7 @@ yydefault: case 389: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2354 +//line sql.y:2364 { yyLOCAL = nil } @@ -12656,7 +12810,7 @@ yydefault: case 390: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2358 +//line sql.y:2368 { yyLOCAL = yyDollar[1].indexOptionsUnion() } @@ -12664,14 +12818,14 @@ yydefault: case 391: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:2364 +//line sql.y:2374 { yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()} } yyVAL.union = yyLOCAL case 392: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2368 +//line sql.y:2378 { yySLICE := (*[]*IndexOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].indexOptionUnion()) @@ -12679,7 +12833,7 @@ yydefault: case 393: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2374 +//line sql.y:2384 { yyLOCAL = yyDollar[1].indexOptionUnion() } @@ -12687,7 +12841,7 @@ yydefault: case 394: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2378 +//line sql.y:2388 { // should not be string yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} @@ -12696,7 +12850,7 @@ yydefault: case 395: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2383 +//line sql.y:2393 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[2].str)} } @@ -12704,7 +12858,7 @@ yydefault: case 396: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2387 +//line sql.y:2397 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)} } @@ -12712,7 +12866,7 @@ yydefault: case 397: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2391 +//line sql.y:2401 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str)} } @@ -12720,7 +12874,7 @@ yydefault: case 398: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2395 +//line sql.y:2405 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str) + " " + string(yyDollar[2].str), String: yyDollar[3].identifierCI.String()} } @@ -12728,7 +12882,7 @@ yydefault: case 399: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2399 +//line sql.y:2409 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } @@ -12736,27 +12890,27 @@ yydefault: case 400: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:2403 +//line sql.y:2413 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL case 401: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2409 +//line sql.y:2419 { yyVAL.str = "" } case 402: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2413 +//line sql.y:2423 { yyVAL.str = string(yyDollar[1].str) } case 403: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2419 +//line sql.y:2429 { yyLOCAL = &IndexInfo{Type: IndexTypePrimary, ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI("PRIMARY")} } @@ -12764,7 +12918,7 @@ yydefault: case 404: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2423 +//line sql.y:2433 { yyLOCAL = &IndexInfo{Type: IndexTypeSpatial, Name: NewIdentifierCI(yyDollar[3].str)} } @@ -12772,7 +12926,7 @@ yydefault: case 405: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2427 +//line sql.y:2437 { yyLOCAL = &IndexInfo{Type: IndexTypeFullText, Name: NewIdentifierCI(yyDollar[3].str)} } @@ -12780,7 +12934,7 @@ yydefault: case 406: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2431 +//line sql.y:2441 { yyLOCAL = &IndexInfo{Type: IndexTypeUnique, ConstraintName: NewIdentifierCI(yyDollar[1].str), Name: NewIdentifierCI(yyDollar[4].str)} } @@ -12788,100 +12942,100 @@ yydefault: case 407: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexInfo -//line sql.y:2435 +//line sql.y:2445 { yyLOCAL = &IndexInfo{Type: IndexTypeDefault, Name: NewIdentifierCI(yyDollar[2].str)} } yyVAL.union = yyLOCAL case 408: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2440 +//line sql.y:2450 { yyVAL.str = "" } case 409: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2444 +//line sql.y:2454 { yyVAL.str = yyDollar[2].str } case 410: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2450 +//line sql.y:2460 { yyVAL.str = string(yyDollar[1].str) } case 411: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2454 +//line sql.y:2464 { yyVAL.str = string(yyDollar[1].str) } case 412: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2458 +//line sql.y:2468 { yyVAL.str = string(yyDollar[1].str) } case 413: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2464 +//line sql.y:2474 { yyVAL.str = string(yyDollar[1].str) } case 414: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2468 +//line sql.y:2478 { yyVAL.str = string(yyDollar[1].str) } case 415: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2473 +//line sql.y:2483 { yyVAL.str = "" } case 416: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2477 +//line sql.y:2487 { yyVAL.str = yyDollar[1].str } case 417: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2483 +//line sql.y:2493 { yyVAL.str = string(yyDollar[1].str) } case 418: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2487 +//line sql.y:2497 { yyVAL.str = string(yyDollar[1].str) } case 419: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2492 +//line sql.y:2502 { yyVAL.str = "" } case 420: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2496 +//line sql.y:2506 { yyVAL.str = string(yyDollar[1].identifierCI.String()) } case 421: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexColumn -//line sql.y:2502 +//line sql.y:2512 { yyLOCAL = []*IndexColumn{yyDollar[1].indexColumnUnion()} } yyVAL.union = yyLOCAL case 422: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2506 +//line sql.y:2516 { yySLICE := (*[]*IndexColumn)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].indexColumnUnion()) @@ -12889,15 +13043,15 @@ yydefault: case 423: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *IndexColumn -//line sql.y:2512 +//line sql.y:2522 { - yyLOCAL = &IndexColumn{Column: yyDollar[1].identifierCI, Length: yyDollar[2].literalUnion(), Direction: yyDollar[3].orderDirectionUnion()} + yyLOCAL = &IndexColumn{Column: yyDollar[1].identifierCI, Length: yyDollar[2].intPtrUnion(), Direction: yyDollar[3].orderDirectionUnion()} } yyVAL.union = yyLOCAL case 424: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *IndexColumn -//line sql.y:2516 +//line sql.y:2526 { yyLOCAL = &IndexColumn{Expression: yyDollar[2].exprUnion(), Direction: yyDollar[4].orderDirectionUnion()} } @@ -12905,7 +13059,7 @@ yydefault: case 425: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2522 +//line sql.y:2532 { yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()} } @@ -12913,7 +13067,7 @@ yydefault: case 426: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2526 +//line sql.y:2536 { yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()} } @@ -12921,7 +13075,7 @@ yydefault: case 427: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2532 +//line sql.y:2542 { yyLOCAL = &ConstraintDefinition{Name: yyDollar[2].identifierCI, Details: yyDollar[3].constraintInfoUnion()} } @@ -12929,7 +13083,7 @@ yydefault: case 428: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConstraintDefinition -//line sql.y:2536 +//line sql.y:2546 { yyLOCAL = &ConstraintDefinition{Details: yyDollar[1].constraintInfoUnion()} } @@ -12937,7 +13091,7 @@ yydefault: case 429: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL ConstraintInfo -//line sql.y:2542 +//line sql.y:2552 { yyLOCAL = &ForeignKeyDefinition{IndexName: NewIdentifierCI(yyDollar[3].str), Source: yyDollar[5].columnsUnion(), ReferenceDefinition: yyDollar[7].referenceDefinitionUnion()} } @@ -12945,7 +13099,7 @@ yydefault: case 430: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2548 +//line sql.y:2558 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion()} } @@ -12953,7 +13107,7 @@ yydefault: case 431: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2552 +//line sql.y:2562 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion()} } @@ -12961,7 +13115,7 @@ yydefault: case 432: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2556 +//line sql.y:2566 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion()} } @@ -12969,7 +13123,7 @@ yydefault: case 433: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2560 +//line sql.y:2570 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnDelete: yyDollar[7].referenceActionUnion(), OnUpdate: yyDollar[8].referenceActionUnion()} } @@ -12977,7 +13131,7 @@ yydefault: case 434: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2564 +//line sql.y:2574 { yyLOCAL = &ReferenceDefinition{ReferencedTable: yyDollar[2].tableName, ReferencedColumns: yyDollar[4].columnsUnion(), Match: yyDollar[6].matchActionUnion(), OnUpdate: yyDollar[7].referenceActionUnion(), OnDelete: yyDollar[8].referenceActionUnion()} } @@ -12985,7 +13139,7 @@ yydefault: case 435: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2569 +//line sql.y:2579 { yyLOCAL = nil } @@ -12993,7 +13147,7 @@ yydefault: case 436: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ReferenceDefinition -//line sql.y:2573 +//line sql.y:2583 { yyLOCAL = yyDollar[1].referenceDefinitionUnion() } @@ -13001,7 +13155,7 @@ yydefault: case 437: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL ConstraintInfo -//line sql.y:2579 +//line sql.y:2589 { yyLOCAL = &CheckConstraintDefinition{Expr: yyDollar[3].exprUnion(), Enforced: yyDollar[5].booleanUnion()} } @@ -13009,7 +13163,7 @@ yydefault: case 438: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2585 +//line sql.y:2595 { yyLOCAL = yyDollar[2].matchActionUnion() } @@ -13017,7 +13171,7 @@ yydefault: case 439: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2591 +//line sql.y:2601 { yyLOCAL = Full } @@ -13025,7 +13179,7 @@ yydefault: case 440: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2595 +//line sql.y:2605 { yyLOCAL = Partial } @@ -13033,7 +13187,7 @@ yydefault: case 441: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2599 +//line sql.y:2609 { yyLOCAL = Simple } @@ -13041,7 +13195,7 @@ yydefault: case 442: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2604 +//line sql.y:2614 { yyLOCAL = DefaultMatch } @@ -13049,7 +13203,7 @@ yydefault: case 443: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL MatchAction -//line sql.y:2608 +//line sql.y:2618 { yyLOCAL = yyDollar[1].matchActionUnion() } @@ -13057,7 +13211,7 @@ yydefault: case 444: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2614 +//line sql.y:2624 { yyLOCAL = yyDollar[3].referenceActionUnion() } @@ -13065,7 +13219,7 @@ yydefault: case 445: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2620 +//line sql.y:2630 { yyLOCAL = yyDollar[3].referenceActionUnion() } @@ -13073,7 +13227,7 @@ yydefault: case 446: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2626 +//line sql.y:2636 { yyLOCAL = Restrict } @@ -13081,7 +13235,7 @@ yydefault: case 447: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2630 +//line sql.y:2640 { yyLOCAL = Cascade } @@ -13089,7 +13243,7 @@ yydefault: case 448: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2634 +//line sql.y:2644 { yyLOCAL = NoAction } @@ -13097,7 +13251,7 @@ yydefault: case 449: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2638 +//line sql.y:2648 { yyLOCAL = SetDefault } @@ -13105,33 +13259,33 @@ yydefault: case 450: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ReferenceAction -//line sql.y:2642 +//line sql.y:2652 { yyLOCAL = SetNull } yyVAL.union = yyLOCAL case 451: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2647 +//line sql.y:2657 { yyVAL.str = "" } case 452: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2651 +//line sql.y:2661 { yyVAL.str = string(yyDollar[1].str) } case 453: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2655 +//line sql.y:2665 { yyVAL.str = string(yyDollar[1].str) } case 454: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2661 +//line sql.y:2671 { yyLOCAL = true } @@ -13139,7 +13293,7 @@ yydefault: case 455: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:2665 +//line sql.y:2675 { yyLOCAL = false } @@ -13147,7 +13301,7 @@ yydefault: case 456: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2670 +//line sql.y:2680 { yyLOCAL = true } @@ -13155,7 +13309,7 @@ yydefault: case 457: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2674 +//line sql.y:2684 { yyLOCAL = yyDollar[1].booleanUnion() } @@ -13163,7 +13317,7 @@ yydefault: case 458: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2679 +//line sql.y:2689 { yyLOCAL = nil } @@ -13171,7 +13325,7 @@ yydefault: case 459: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2683 +//line sql.y:2693 { yyLOCAL = yyDollar[1].tableOptionsUnion() } @@ -13179,21 +13333,21 @@ yydefault: case 460: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2689 +//line sql.y:2699 { yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()} } yyVAL.union = yyLOCAL case 461: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2693 +//line sql.y:2703 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableOptionUnion()) } case 462: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2697 +//line sql.y:2707 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion()) @@ -13201,14 +13355,14 @@ yydefault: case 463: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableOptions -//line sql.y:2703 +//line sql.y:2713 { yyLOCAL = TableOptions{yyDollar[1].tableOptionUnion()} } yyVAL.union = yyLOCAL case 464: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2707 +//line sql.y:2717 { yySLICE := (*TableOptions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].tableOptionUnion()) @@ -13216,7 +13370,7 @@ yydefault: case 465: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2713 +//line sql.y:2723 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13224,7 +13378,7 @@ yydefault: case 466: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2717 +//line sql.y:2727 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13232,7 +13386,7 @@ yydefault: case 467: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2721 +//line sql.y:2731 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13240,7 +13394,7 @@ yydefault: case 468: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2725 +//line sql.y:2735 { yyLOCAL = &TableOption{Name: (string(yyDollar[2].str)), String: yyDollar[4].str, CaseSensitive: true} } @@ -13248,7 +13402,7 @@ yydefault: case 469: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2729 +//line sql.y:2739 { yyLOCAL = &TableOption{Name: string(yyDollar[2].str), String: yyDollar[4].str, CaseSensitive: true} } @@ -13256,7 +13410,7 @@ yydefault: case 470: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2733 +//line sql.y:2743 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13264,7 +13418,7 @@ yydefault: case 471: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2737 +//line sql.y:2747 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } @@ -13272,7 +13426,7 @@ yydefault: case 472: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2741 +//line sql.y:2751 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } @@ -13280,7 +13434,7 @@ yydefault: case 473: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2745 +//line sql.y:2755 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } @@ -13288,7 +13442,7 @@ yydefault: case 474: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2749 +//line sql.y:2759 { yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)} } @@ -13296,7 +13450,7 @@ yydefault: case 475: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2753 +//line sql.y:2763 { yyLOCAL = &TableOption{Name: (string(yyDollar[1].str) + " " + string(yyDollar[2].str)), Value: NewStrLiteral(yyDollar[4].str)} } @@ -13304,7 +13458,7 @@ yydefault: case 476: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2757 +//line sql.y:2767 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13312,7 +13466,7 @@ yydefault: case 477: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2761 +//line sql.y:2771 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } @@ -13320,7 +13474,7 @@ yydefault: case 478: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2765 +//line sql.y:2775 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: yyDollar[3].identifierCS.String(), CaseSensitive: true} } @@ -13328,7 +13482,7 @@ yydefault: case 479: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2769 +//line sql.y:2779 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } @@ -13336,7 +13490,7 @@ yydefault: case 480: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2773 +//line sql.y:2783 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } @@ -13344,7 +13498,7 @@ yydefault: case 481: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2777 +//line sql.y:2787 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13352,7 +13506,7 @@ yydefault: case 482: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2781 +//line sql.y:2791 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13360,7 +13514,7 @@ yydefault: case 483: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2785 +//line sql.y:2795 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13368,7 +13522,7 @@ yydefault: case 484: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2789 +//line sql.y:2799 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13376,7 +13530,7 @@ yydefault: case 485: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2793 +//line sql.y:2803 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } @@ -13384,7 +13538,7 @@ yydefault: case 486: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2797 +//line sql.y:2807 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } @@ -13392,7 +13546,7 @@ yydefault: case 487: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2801 +//line sql.y:2811 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } @@ -13400,7 +13554,7 @@ yydefault: case 488: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2805 +//line sql.y:2815 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewStrLiteral(yyDollar[3].str)} } @@ -13408,7 +13562,7 @@ yydefault: case 489: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2809 +//line sql.y:2819 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13416,7 +13570,7 @@ yydefault: case 490: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2813 +//line sql.y:2823 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } @@ -13424,7 +13578,7 @@ yydefault: case 491: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2817 +//line sql.y:2827 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13432,7 +13586,7 @@ yydefault: case 492: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2821 +//line sql.y:2831 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: string(yyDollar[3].str)} } @@ -13440,7 +13594,7 @@ yydefault: case 493: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2825 +//line sql.y:2835 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Value: NewIntLiteral(yyDollar[3].str)} } @@ -13448,7 +13602,7 @@ yydefault: case 494: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2829 +//line sql.y:2839 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), String: (yyDollar[3].identifierCI.String() + yyDollar[4].str), CaseSensitive: true} } @@ -13456,63 +13610,63 @@ yydefault: case 495: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *TableOption -//line sql.y:2833 +//line sql.y:2843 { yyLOCAL = &TableOption{Name: string(yyDollar[1].str), Tables: yyDollar[4].tableNamesUnion()} } yyVAL.union = yyLOCAL case 496: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2838 +//line sql.y:2848 { yyVAL.str = "" } case 497: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2842 +//line sql.y:2852 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) } case 498: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2846 +//line sql.y:2856 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) } case 508: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2865 +//line sql.y:2875 { yyVAL.str = String(TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}) } case 509: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2869 +//line sql.y:2879 { yyVAL.str = yyDollar[1].identifierCI.String() } case 510: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2873 +//line sql.y:2883 { yyVAL.str = encodeSQLString(yyDollar[1].str) } case 511: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:2877 +//line sql.y:2887 { yyVAL.str = string(yyDollar[1].str) } case 512: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2882 +//line sql.y:2892 { yyVAL.str = "" } case 514: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:2888 +//line sql.y:2898 { yyLOCAL = false } @@ -13520,7 +13674,7 @@ yydefault: case 515: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:2892 +//line sql.y:2902 { yyLOCAL = true } @@ -13528,7 +13682,7 @@ yydefault: case 516: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ColName -//line sql.y:2897 +//line sql.y:2907 { yyLOCAL = nil } @@ -13536,27 +13690,27 @@ yydefault: case 517: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ColName -//line sql.y:2901 +//line sql.y:2911 { yyLOCAL = yyDollar[2].colNameUnion() } yyVAL.union = yyLOCAL case 518: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:2906 +//line sql.y:2916 { yyVAL.str = "" } case 519: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:2910 +//line sql.y:2920 { yyVAL.str = string(yyDollar[2].str) } case 520: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Literal -//line sql.y:2915 +//line sql.y:2925 { yyLOCAL = nil } @@ -13564,7 +13718,7 @@ yydefault: case 521: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Literal -//line sql.y:2919 +//line sql.y:2929 { yyLOCAL = NewIntLiteral(yyDollar[2].str) } @@ -13572,7 +13726,7 @@ yydefault: case 522: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Literal -//line sql.y:2923 +//line sql.y:2933 { yyLOCAL = NewDecimalLiteral(yyDollar[2].str) } @@ -13580,7 +13734,7 @@ yydefault: case 523: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2928 +//line sql.y:2938 { yyLOCAL = nil } @@ -13588,14 +13742,14 @@ yydefault: case 524: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2932 +//line sql.y:2942 { yyLOCAL = yyDollar[1].alterOptionsUnion() } yyVAL.union = yyLOCAL case 525: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:2936 +//line sql.y:2946 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, &OrderByOption{Cols: yyDollar[5].columnsUnion()}) @@ -13603,14 +13757,14 @@ yydefault: case 526: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2940 +//line sql.y:2950 { yyLOCAL = yyDollar[1].alterOptionsUnion() } yyVAL.union = yyLOCAL case 527: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2944 +//line sql.y:2954 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionsUnion()...) @@ -13618,7 +13772,7 @@ yydefault: case 528: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2948 +//line sql.y:2958 { yyLOCAL = append(append(yyDollar[1].alterOptionsUnion(), yyDollar[3].alterOptionsUnion()...), &OrderByOption{Cols: yyDollar[7].columnsUnion()}) } @@ -13626,21 +13780,21 @@ yydefault: case 529: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:2954 +//line sql.y:2964 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL case 530: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2958 +//line sql.y:2968 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) } case 531: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:2962 +//line sql.y:2972 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) @@ -13648,7 +13802,7 @@ yydefault: case 532: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2968 +//line sql.y:2978 { yyLOCAL = yyDollar[1].tableOptionsUnion() } @@ -13656,7 +13810,7 @@ yydefault: case 533: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2972 +//line sql.y:2982 { yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()} } @@ -13664,7 +13818,7 @@ yydefault: case 534: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2976 +//line sql.y:2986 { yyLOCAL = &AddConstraintDefinition{ConstraintDefinition: yyDollar[2].constraintDefinitionUnion()} } @@ -13672,7 +13826,7 @@ yydefault: case 535: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2980 +//line sql.y:2990 { yyLOCAL = &AddIndexDefinition{IndexDefinition: yyDollar[2].indexDefinitionUnion()} } @@ -13680,7 +13834,7 @@ yydefault: case 536: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2984 +//line sql.y:2994 { yyLOCAL = &AddColumns{Columns: yyDollar[4].columnDefinitionsUnion()} } @@ -13688,7 +13842,7 @@ yydefault: case 537: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2988 +//line sql.y:2998 { yyLOCAL = &AddColumns{Columns: []*ColumnDefinition{yyDollar[3].columnDefinitionUnion()}, First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()} } @@ -13696,7 +13850,7 @@ yydefault: case 538: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2992 +//line sql.y:3002 { yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: true} } @@ -13704,7 +13858,7 @@ yydefault: case 539: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL AlterOption -//line sql.y:2996 +//line sql.y:3006 { yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[6].exprUnion(), DefaultLiteral: true} } @@ -13712,7 +13866,7 @@ yydefault: case 540: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3000 +//line sql.y:3010 { yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), DropDefault: false, DefaultVal: yyDollar[7].exprUnion()} } @@ -13720,25 +13874,23 @@ yydefault: case 541: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3004 +//line sql.y:3014 { - val := false - yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val} + yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: ptr.Of(false)} } yyVAL.union = yyLOCAL case 542: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3009 +//line sql.y:3018 { - val := true - yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: &val} + yyLOCAL = &AlterColumn{Column: yyDollar[3].colNameUnion(), Invisible: ptr.Of(true)} } yyVAL.union = yyLOCAL case 543: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3014 +//line sql.y:3022 { yyLOCAL = &AlterCheck{Name: yyDollar[3].identifierCI, Enforced: yyDollar[4].booleanUnion()} } @@ -13746,7 +13898,7 @@ yydefault: case 544: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3018 +//line sql.y:3026 { yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: false} } @@ -13754,7 +13906,7 @@ yydefault: case 545: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3022 +//line sql.y:3030 { yyLOCAL = &AlterIndex{Name: yyDollar[3].identifierCI, Invisible: true} } @@ -13762,7 +13914,7 @@ yydefault: case 546: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3026 +//line sql.y:3034 { yyLOCAL = &ChangeColumn{OldColumn: yyDollar[3].colNameUnion(), NewColDefinition: yyDollar[4].columnDefinitionUnion(), First: yyDollar[5].booleanUnion(), After: yyDollar[6].colNameUnion()} } @@ -13770,7 +13922,7 @@ yydefault: case 547: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3030 +//line sql.y:3038 { yyLOCAL = &ModifyColumn{NewColDefinition: yyDollar[3].columnDefinitionUnion(), First: yyDollar[4].booleanUnion(), After: yyDollar[5].colNameUnion()} } @@ -13778,7 +13930,7 @@ yydefault: case 548: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3034 +//line sql.y:3042 { yyLOCAL = &RenameColumn{OldName: yyDollar[3].colNameUnion(), NewName: yyDollar[5].colNameUnion()} } @@ -13786,7 +13938,7 @@ yydefault: case 549: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3038 +//line sql.y:3046 { yyLOCAL = &AlterCharset{CharacterSet: yyDollar[4].str, Collate: yyDollar[5].str} } @@ -13794,7 +13946,7 @@ yydefault: case 550: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3042 +//line sql.y:3050 { yyLOCAL = &KeyState{Enable: false} } @@ -13802,7 +13954,7 @@ yydefault: case 551: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3046 +//line sql.y:3054 { yyLOCAL = &KeyState{Enable: true} } @@ -13810,7 +13962,7 @@ yydefault: case 552: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3050 +//line sql.y:3058 { yyLOCAL = &TablespaceOperation{Import: false} } @@ -13818,7 +13970,7 @@ yydefault: case 553: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3054 +//line sql.y:3062 { yyLOCAL = &TablespaceOperation{Import: true} } @@ -13826,7 +13978,7 @@ yydefault: case 554: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3058 +//line sql.y:3066 { yyLOCAL = &DropColumn{Name: yyDollar[3].colNameUnion()} } @@ -13834,7 +13986,7 @@ yydefault: case 555: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3062 +//line sql.y:3070 { yyLOCAL = &DropKey{Type: NormalKeyType, Name: yyDollar[3].identifierCI} } @@ -13842,7 +13994,7 @@ yydefault: case 556: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3066 +//line sql.y:3074 { yyLOCAL = &DropKey{Type: PrimaryKeyType} } @@ -13850,7 +14002,7 @@ yydefault: case 557: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3070 +//line sql.y:3078 { yyLOCAL = &DropKey{Type: ForeignKeyType, Name: yyDollar[4].identifierCI} } @@ -13858,7 +14010,7 @@ yydefault: case 558: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3074 +//line sql.y:3082 { yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI} } @@ -13866,7 +14018,7 @@ yydefault: case 559: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3078 +//line sql.y:3086 { yyLOCAL = &DropKey{Type: CheckKeyType, Name: yyDollar[3].identifierCI} } @@ -13874,7 +14026,7 @@ yydefault: case 560: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3082 +//line sql.y:3090 { yyLOCAL = &Force{} } @@ -13882,7 +14034,7 @@ yydefault: case 561: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3086 +//line sql.y:3094 { yyLOCAL = &RenameTableName{Table: yyDollar[3].tableName} } @@ -13890,7 +14042,7 @@ yydefault: case 562: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3090 +//line sql.y:3098 { yyLOCAL = &RenameIndex{OldName: yyDollar[3].identifierCI, NewName: yyDollar[5].identifierCI} } @@ -13898,14 +14050,14 @@ yydefault: case 563: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:3096 +//line sql.y:3104 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL case 564: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3100 +//line sql.y:3108 { yySLICE := (*[]AlterOption)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].alterOptionUnion()) @@ -13913,7 +14065,7 @@ yydefault: case 565: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3106 +//line sql.y:3114 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } @@ -13921,7 +14073,7 @@ yydefault: case 566: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3110 +//line sql.y:3118 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } @@ -13929,7 +14081,7 @@ yydefault: case 567: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3114 +//line sql.y:3122 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } @@ -13937,7 +14089,7 @@ yydefault: case 568: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3118 +//line sql.y:3126 { yyLOCAL = AlgorithmValue(string(yyDollar[3].str)) } @@ -13945,7 +14097,7 @@ yydefault: case 569: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3122 +//line sql.y:3130 { yyLOCAL = &LockOption{Type: DefaultType} } @@ -13953,7 +14105,7 @@ yydefault: case 570: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3126 +//line sql.y:3134 { yyLOCAL = &LockOption{Type: NoneType} } @@ -13961,7 +14113,7 @@ yydefault: case 571: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3130 +//line sql.y:3138 { yyLOCAL = &LockOption{Type: SharedType} } @@ -13969,7 +14121,7 @@ yydefault: case 572: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3134 +//line sql.y:3142 { yyLOCAL = &LockOption{Type: ExclusiveType} } @@ -13977,7 +14129,7 @@ yydefault: case 573: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3138 +//line sql.y:3146 { yyLOCAL = &Validation{With: true} } @@ -13985,7 +14137,7 @@ yydefault: case 574: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL AlterOption -//line sql.y:3142 +//line sql.y:3150 { yyLOCAL = &Validation{With: false} } @@ -13993,7 +14145,7 @@ yydefault: case 575: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3148 +//line sql.y:3156 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -14004,7 +14156,7 @@ yydefault: case 576: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3155 +//line sql.y:3163 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -14015,7 +14167,7 @@ yydefault: case 577: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3162 +//line sql.y:3170 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().AlterOptions = yyDollar[2].alterOptionsUnion() @@ -14026,7 +14178,7 @@ yydefault: case 578: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:3169 +//line sql.y:3177 { yyDollar[1].alterTableUnion().FullyParsed = true yyDollar[1].alterTableUnion().PartitionSpec = yyDollar[2].partSpecUnion() @@ -14036,7 +14188,7 @@ yydefault: case 579: yyDollar = yyS[yypt-11 : yypt+1] var yyLOCAL Statement -//line sql.y:3175 +//line sql.y:3183 { yyLOCAL = &AlterView{ViewName: yyDollar[7].tableName, Comments: Comments(yyDollar[2].strs).Parsed(), Algorithm: yyDollar[3].str, Definer: yyDollar[4].definerUnion(), Security: yyDollar[5].str, Columns: yyDollar[8].columnsUnion(), Select: yyDollar[10].selStmtUnion(), CheckOption: yyDollar[11].str} } @@ -14044,7 +14196,7 @@ yydefault: case 580: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3185 +//line sql.y:3193 { yyDollar[1].alterDatabaseUnion().FullyParsed = true yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS @@ -14055,7 +14207,7 @@ yydefault: case 581: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3192 +//line sql.y:3200 { yyDollar[1].alterDatabaseUnion().FullyParsed = true yyDollar[1].alterDatabaseUnion().DBName = yyDollar[2].identifierCS @@ -14066,7 +14218,7 @@ yydefault: case 582: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:3199 +//line sql.y:3207 { yyLOCAL = &AlterVschema{ Action: CreateVindexDDLAction, @@ -14082,7 +14234,7 @@ yydefault: case 583: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3211 +//line sql.y:3219 { yyLOCAL = &AlterVschema{ Action: DropVindexDDLAction, @@ -14096,7 +14248,7 @@ yydefault: case 584: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3221 +//line sql.y:3229 { yyLOCAL = &AlterVschema{Action: AddVschemaTableDDLAction, Table: yyDollar[6].tableName} } @@ -14104,7 +14256,7 @@ yydefault: case 585: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3225 +//line sql.y:3233 { yyLOCAL = &AlterVschema{Action: DropVschemaTableDDLAction, Table: yyDollar[6].tableName} } @@ -14112,7 +14264,7 @@ yydefault: case 586: yyDollar = yyS[yypt-13 : yypt+1] var yyLOCAL Statement -//line sql.y:3229 +//line sql.y:3237 { yyLOCAL = &AlterVschema{ Action: AddColVindexDDLAction, @@ -14129,7 +14281,7 @@ yydefault: case 587: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Statement -//line sql.y:3242 +//line sql.y:3250 { yyLOCAL = &AlterVschema{ Action: DropColVindexDDLAction, @@ -14143,7 +14295,7 @@ yydefault: case 588: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3252 +//line sql.y:3260 { yyLOCAL = &AlterVschema{Action: AddSequenceDDLAction, Table: yyDollar[6].tableName} } @@ -14151,7 +14303,7 @@ yydefault: case 589: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3256 +//line sql.y:3264 { yyLOCAL = &AlterVschema{Action: DropSequenceDDLAction, Table: yyDollar[6].tableName} } @@ -14159,7 +14311,7 @@ yydefault: case 590: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Statement -//line sql.y:3260 +//line sql.y:3268 { yyLOCAL = &AlterVschema{ Action: AddAutoIncDDLAction, @@ -14174,7 +14326,7 @@ yydefault: case 591: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3271 +//line sql.y:3279 { yyLOCAL = &AlterVschema{ Action: DropAutoIncDDLAction, @@ -14185,7 +14337,7 @@ yydefault: case 592: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3278 +//line sql.y:3286 { yyLOCAL = &AlterMigration{ Type: RetryMigrationType, @@ -14196,7 +14348,7 @@ yydefault: case 593: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3285 +//line sql.y:3293 { yyLOCAL = &AlterMigration{ Type: CleanupMigrationType, @@ -14207,7 +14359,7 @@ yydefault: case 594: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3292 +//line sql.y:3300 { yyLOCAL = &AlterMigration{ Type: LaunchMigrationType, @@ -14218,7 +14370,7 @@ yydefault: case 595: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3299 +//line sql.y:3307 { yyLOCAL = &AlterMigration{ Type: LaunchMigrationType, @@ -14230,7 +14382,7 @@ yydefault: case 596: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3307 +//line sql.y:3315 { yyLOCAL = &AlterMigration{ Type: LaunchAllMigrationType, @@ -14240,7 +14392,7 @@ yydefault: case 597: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3313 +//line sql.y:3321 { yyLOCAL = &AlterMigration{ Type: CompleteMigrationType, @@ -14251,7 +14403,7 @@ yydefault: case 598: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3320 +//line sql.y:3328 { yyLOCAL = &AlterMigration{ Type: CompleteAllMigrationType, @@ -14261,7 +14413,7 @@ yydefault: case 599: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3326 +//line sql.y:3334 { yyLOCAL = &AlterMigration{ Type: CancelMigrationType, @@ -14272,7 +14424,7 @@ yydefault: case 600: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3333 +//line sql.y:3341 { yyLOCAL = &AlterMigration{ Type: CancelAllMigrationType, @@ -14282,7 +14434,7 @@ yydefault: case 601: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3339 +//line sql.y:3347 { yyLOCAL = &AlterMigration{ Type: ThrottleMigrationType, @@ -14295,7 +14447,7 @@ yydefault: case 602: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3348 +//line sql.y:3356 { yyLOCAL = &AlterMigration{ Type: ThrottleAllMigrationType, @@ -14307,7 +14459,7 @@ yydefault: case 603: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3356 +//line sql.y:3364 { yyLOCAL = &AlterMigration{ Type: UnthrottleMigrationType, @@ -14318,7 +14470,7 @@ yydefault: case 604: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3363 +//line sql.y:3371 { yyLOCAL = &AlterMigration{ Type: UnthrottleAllMigrationType, @@ -14326,17 +14478,38 @@ yydefault: } yyVAL.union = yyLOCAL case 605: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:3377 + { + yyLOCAL = &AlterMigration{ + Type: ForceCutOverMigrationType, + UUID: string(yyDollar[4].str), + } + } + yyVAL.union = yyLOCAL + case 606: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Statement +//line sql.y:3384 + { + yyLOCAL = &AlterMigration{ + Type: ForceCutOverAllMigrationType, + } + } + yyVAL.union = yyLOCAL + case 607: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3370 +//line sql.y:3391 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 606: + case 608: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3374 +//line sql.y:3395 { yyDollar[3].partitionOptionUnion().Partitions = yyDollar[4].integerUnion() yyDollar[3].partitionOptionUnion().SubPartition = yyDollar[5].subPartitionUnion() @@ -14344,10 +14517,10 @@ yydefault: yyLOCAL = yyDollar[3].partitionOptionUnion() } yyVAL.union = yyLOCAL - case 607: + case 609: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3383 +//line sql.y:3404 { yyLOCAL = &PartitionOption{ IsLinear: yyDollar[1].booleanUnion(), @@ -14356,10 +14529,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 608: + case 610: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3391 +//line sql.y:3412 { yyLOCAL = &PartitionOption{ IsLinear: yyDollar[1].booleanUnion(), @@ -14369,10 +14542,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 609: + case 611: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3400 +//line sql.y:3421 { yyLOCAL = &PartitionOption{ Type: yyDollar[1].partitionByTypeUnion(), @@ -14380,10 +14553,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 610: + case 612: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionOption -//line sql.y:3407 +//line sql.y:3428 { yyLOCAL = &PartitionOption{ Type: yyDollar[1].partitionByTypeUnion(), @@ -14391,18 +14564,18 @@ yydefault: } } yyVAL.union = yyLOCAL - case 611: + case 613: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3415 +//line sql.y:3436 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 612: + case 614: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3419 +//line sql.y:3440 { yyLOCAL = &SubPartition{ IsLinear: yyDollar[3].booleanUnion(), @@ -14412,10 +14585,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 613: + case 615: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL *SubPartition -//line sql.y:3428 +//line sql.y:3449 { yyLOCAL = &SubPartition{ IsLinear: yyDollar[3].booleanUnion(), @@ -14426,682 +14599,678 @@ yydefault: } } yyVAL.union = yyLOCAL - case 614: + case 616: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3439 +//line sql.y:3460 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 615: + case 617: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3443 +//line sql.y:3464 { yyLOCAL = yyDollar[2].partDefsUnion() } yyVAL.union = yyLOCAL - case 616: + case 618: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3448 +//line sql.y:3469 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 617: + case 619: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3452 +//line sql.y:3473 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 618: + case 620: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3457 +//line sql.y:3478 { yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 619: + case 621: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3461 +//line sql.y:3482 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 620: + case 622: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL TableExpr -//line sql.y:3467 +//line sql.y:3488 { yyLOCAL = &JSONTableExpr{Expr: yyDollar[3].exprUnion(), Filter: yyDollar[5].exprUnion(), Columns: yyDollar[6].jtColumnListUnion(), Alias: yyDollar[8].identifierCS} } yyVAL.union = yyLOCAL - case 621: + case 623: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL []*JtColumnDefinition -//line sql.y:3473 +//line sql.y:3494 { yyLOCAL = yyDollar[3].jtColumnListUnion() } yyVAL.union = yyLOCAL - case 622: + case 624: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*JtColumnDefinition -//line sql.y:3479 +//line sql.y:3500 { yyLOCAL = []*JtColumnDefinition{yyDollar[1].jtColumnDefinitionUnion()} } yyVAL.union = yyLOCAL - case 623: + case 625: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3483 +//line sql.y:3504 { yySLICE := (*[]*JtColumnDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].jtColumnDefinitionUnion()) } - case 624: + case 626: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3489 +//line sql.y:3510 { yyLOCAL = &JtColumnDefinition{JtOrdinal: &JtOrdinalColDef{Name: yyDollar[1].identifierCI}} } yyVAL.union = yyLOCAL - case 625: + case 627: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3493 +//line sql.y:3514 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 626: + case 628: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3499 +//line sql.y:3520 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 627: + case 629: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3505 +//line sql.y:3526 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 628: + case 630: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3511 +//line sql.y:3532 { yyDollar[2].columnType.Options = &ColumnTypeOptions{Collate: yyDollar[3].str} jtPath := &JtPathColDef{Name: yyDollar[1].identifierCI, Type: yyDollar[2].columnType, JtColExists: yyDollar[4].booleanUnion(), Path: yyDollar[6].exprUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()} yyLOCAL = &JtColumnDefinition{JtPath: jtPath} } yyVAL.union = yyLOCAL - case 629: + case 631: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *JtColumnDefinition -//line sql.y:3517 +//line sql.y:3538 { jtNestedPath := &JtNestedPathColDef{Path: yyDollar[3].exprUnion(), Columns: yyDollar[4].jtColumnListUnion()} yyLOCAL = &JtColumnDefinition{JtNestedPath: jtNestedPath} } yyVAL.union = yyLOCAL - case 630: + case 632: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3523 +//line sql.y:3544 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 631: + case 633: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3527 +//line sql.y:3548 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 632: + case 634: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3531 +//line sql.y:3552 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 633: + case 635: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3535 +//line sql.y:3556 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 634: + case 636: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3541 +//line sql.y:3562 { yyLOCAL = yyDollar[1].jtOnResponseUnion() } yyVAL.union = yyLOCAL - case 635: + case 637: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3547 +//line sql.y:3568 { yyLOCAL = yyDollar[1].jtOnResponseUnion() } yyVAL.union = yyLOCAL - case 636: + case 638: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3553 +//line sql.y:3574 { yyLOCAL = &JtOnResponse{ResponseType: ErrorJSONType} } yyVAL.union = yyLOCAL - case 637: + case 639: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3557 +//line sql.y:3578 { yyLOCAL = &JtOnResponse{ResponseType: NullJSONType} } yyVAL.union = yyLOCAL - case 638: + case 640: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *JtOnResponse -//line sql.y:3561 +//line sql.y:3582 { yyLOCAL = &JtOnResponse{ResponseType: DefaultJSONType, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 639: + case 641: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL PartitionByType -//line sql.y:3567 +//line sql.y:3588 { yyLOCAL = RangeType } yyVAL.union = yyLOCAL - case 640: + case 642: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL PartitionByType -//line sql.y:3571 +//line sql.y:3592 { yyLOCAL = ListType } yyVAL.union = yyLOCAL - case 641: + case 643: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3576 +//line sql.y:3597 { yyLOCAL = -1 } yyVAL.union = yyLOCAL - case 642: + case 644: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL int -//line sql.y:3580 +//line sql.y:3601 { yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 643: + case 645: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:3585 +//line sql.y:3606 { yyLOCAL = -1 } yyVAL.union = yyLOCAL - case 644: + case 646: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL int -//line sql.y:3589 +//line sql.y:3610 { yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 645: + case 647: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3595 +//line sql.y:3616 { yyLOCAL = &PartitionSpec{Action: AddAction, Definitions: []*PartitionDefinition{yyDollar[4].partDefUnion()}} } yyVAL.union = yyLOCAL - case 646: + case 648: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3599 +//line sql.y:3620 { yyLOCAL = &PartitionSpec{Action: DropAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 647: + case 649: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3603 +//line sql.y:3624 { yyLOCAL = &PartitionSpec{Action: ReorganizeAction, Names: yyDollar[3].partitionsUnion(), Definitions: yyDollar[6].partDefsUnion()} } yyVAL.union = yyLOCAL - case 648: + case 650: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3607 +//line sql.y:3628 { yyLOCAL = &PartitionSpec{Action: DiscardAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 649: + case 651: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3611 +//line sql.y:3632 { yyLOCAL = &PartitionSpec{Action: DiscardAction, IsAll: true} } yyVAL.union = yyLOCAL - case 650: + case 652: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3615 +//line sql.y:3636 { yyLOCAL = &PartitionSpec{Action: ImportAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 651: + case 653: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3619 +//line sql.y:3640 { yyLOCAL = &PartitionSpec{Action: ImportAction, IsAll: true} } yyVAL.union = yyLOCAL - case 652: + case 654: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3623 +//line sql.y:3644 { yyLOCAL = &PartitionSpec{Action: TruncateAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 653: + case 655: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3627 +//line sql.y:3648 { yyLOCAL = &PartitionSpec{Action: TruncateAction, IsAll: true} } yyVAL.union = yyLOCAL - case 654: + case 656: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3631 +//line sql.y:3652 { yyLOCAL = &PartitionSpec{Action: CoalesceAction, Number: NewIntLiteral(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 655: + case 657: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3635 +//line sql.y:3656 { yyLOCAL = &PartitionSpec{Action: ExchangeAction, Names: Partitions{yyDollar[3].identifierCI}, TableName: yyDollar[6].tableName, WithoutValidation: yyDollar[7].booleanUnion()} } yyVAL.union = yyLOCAL - case 656: + case 658: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3639 +//line sql.y:3660 { yyLOCAL = &PartitionSpec{Action: AnalyzeAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 657: + case 659: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3643 +//line sql.y:3664 { yyLOCAL = &PartitionSpec{Action: AnalyzeAction, IsAll: true} } yyVAL.union = yyLOCAL - case 658: + case 660: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3647 +//line sql.y:3668 { yyLOCAL = &PartitionSpec{Action: CheckAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 659: + case 661: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3651 +//line sql.y:3672 { yyLOCAL = &PartitionSpec{Action: CheckAction, IsAll: true} } yyVAL.union = yyLOCAL - case 660: + case 662: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3655 +//line sql.y:3676 { yyLOCAL = &PartitionSpec{Action: OptimizeAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 661: + case 663: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3659 +//line sql.y:3680 { yyLOCAL = &PartitionSpec{Action: OptimizeAction, IsAll: true} } yyVAL.union = yyLOCAL - case 662: + case 664: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3663 +//line sql.y:3684 { yyLOCAL = &PartitionSpec{Action: RebuildAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 663: + case 665: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3667 +//line sql.y:3688 { yyLOCAL = &PartitionSpec{Action: RebuildAction, IsAll: true} } yyVAL.union = yyLOCAL - case 664: + case 666: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3671 +//line sql.y:3692 { yyLOCAL = &PartitionSpec{Action: RepairAction, Names: yyDollar[3].partitionsUnion()} } yyVAL.union = yyLOCAL - case 665: + case 667: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3675 +//line sql.y:3696 { yyLOCAL = &PartitionSpec{Action: RepairAction, IsAll: true} } yyVAL.union = yyLOCAL - case 666: + case 668: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionSpec -//line sql.y:3679 +//line sql.y:3700 { yyLOCAL = &PartitionSpec{Action: UpgradeAction} } yyVAL.union = yyLOCAL - case 667: + case 669: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3684 +//line sql.y:3705 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 668: + case 670: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:3688 +//line sql.y:3709 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 669: + case 671: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:3692 +//line sql.y:3713 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 670: + case 672: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*PartitionDefinition -//line sql.y:3698 +//line sql.y:3719 { yyLOCAL = []*PartitionDefinition{yyDollar[1].partDefUnion()} } yyVAL.union = yyLOCAL - case 671: + case 673: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3702 +//line sql.y:3723 { yySLICE := (*[]*PartitionDefinition)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].partDefUnion()) } - case 672: + case 674: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:3708 +//line sql.y:3729 { yyVAL.partDefUnion().Options = yyDollar[2].partitionDefinitionOptionsUnion() } - case 673: + case 675: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3713 +//line sql.y:3734 { yyLOCAL = &PartitionDefinitionOptions{} } yyVAL.union = yyLOCAL - case 674: + case 676: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3717 +//line sql.y:3738 { yyDollar[1].partitionDefinitionOptionsUnion().ValueRange = yyDollar[2].partitionValueRangeUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 675: + case 677: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3722 +//line sql.y:3743 { yyDollar[1].partitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 676: + case 678: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3727 +//line sql.y:3748 { yyDollar[1].partitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 677: + case 679: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3732 +//line sql.y:3753 { yyDollar[1].partitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 678: + case 680: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3737 +//line sql.y:3758 { yyDollar[1].partitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 679: + case 681: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3742 +//line sql.y:3763 { - val := yyDollar[2].integerUnion() - yyDollar[1].partitionDefinitionOptionsUnion().MaxRows = &val + yyDollar[1].partitionDefinitionOptionsUnion().MaxRows = ptr.Of(yyDollar[2].integerUnion()) yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 680: + case 682: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3748 +//line sql.y:3768 { - val := yyDollar[2].integerUnion() - yyDollar[1].partitionDefinitionOptionsUnion().MinRows = &val + yyDollar[1].partitionDefinitionOptionsUnion().MinRows = ptr.Of(yyDollar[2].integerUnion()) yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 681: + case 683: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3754 +//line sql.y:3773 { yyDollar[1].partitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 682: + case 684: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinitionOptions -//line sql.y:3759 +//line sql.y:3778 { yyDollar[1].partitionDefinitionOptionsUnion().SubPartitionDefinitions = yyDollar[2].subPartitionDefinitionsUnion() yyLOCAL = yyDollar[1].partitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 683: + case 685: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SubPartitionDefinitions -//line sql.y:3765 +//line sql.y:3784 { yyLOCAL = yyDollar[2].subPartitionDefinitionsUnion() } yyVAL.union = yyLOCAL - case 684: + case 686: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SubPartitionDefinitions -//line sql.y:3771 +//line sql.y:3790 { yyLOCAL = SubPartitionDefinitions{yyDollar[1].subPartitionDefinitionUnion()} } yyVAL.union = yyLOCAL - case 685: + case 687: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3775 +//line sql.y:3794 { yySLICE := (*SubPartitionDefinitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].subPartitionDefinitionUnion()) } - case 686: + case 688: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SubPartitionDefinition -//line sql.y:3781 +//line sql.y:3800 { yyLOCAL = &SubPartitionDefinition{Name: yyDollar[2].identifierCI, Options: yyDollar[3].subPartitionDefinitionOptionsUnion()} } yyVAL.union = yyLOCAL - case 687: + case 689: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3786 +//line sql.y:3805 { yyLOCAL = &SubPartitionDefinitionOptions{} } yyVAL.union = yyLOCAL - case 688: + case 690: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3790 +//line sql.y:3809 { yyDollar[1].subPartitionDefinitionOptionsUnion().Comment = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 689: + case 691: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3795 +//line sql.y:3814 { yyDollar[1].subPartitionDefinitionOptionsUnion().Engine = yyDollar[2].partitionEngineUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 690: + case 692: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3800 +//line sql.y:3819 { yyDollar[1].subPartitionDefinitionOptionsUnion().DataDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 691: + case 693: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3805 +//line sql.y:3824 { yyDollar[1].subPartitionDefinitionOptionsUnion().IndexDirectory = yyDollar[2].literalUnion() yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 692: + case 694: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3810 +//line sql.y:3829 { - val := yyDollar[2].integerUnion() - yyDollar[1].subPartitionDefinitionOptionsUnion().MaxRows = &val + yyDollar[1].subPartitionDefinitionOptionsUnion().MaxRows = ptr.Of(yyDollar[2].integerUnion()) yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 693: + case 695: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3816 +//line sql.y:3834 { - val := yyDollar[2].integerUnion() - yyDollar[1].subPartitionDefinitionOptionsUnion().MinRows = &val + yyDollar[1].subPartitionDefinitionOptionsUnion().MinRows = ptr.Of(yyDollar[2].integerUnion()) yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 694: + case 696: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *SubPartitionDefinitionOptions -//line sql.y:3822 +//line sql.y:3839 { yyDollar[1].subPartitionDefinitionOptionsUnion().TableSpace = yyDollar[2].str yyLOCAL = yyDollar[1].subPartitionDefinitionOptionsUnion() } yyVAL.union = yyLOCAL - case 695: + case 697: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3829 +//line sql.y:3846 { yyLOCAL = &PartitionValueRange{ Type: LessThanType, @@ -15109,10 +15278,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 696: + case 698: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3836 +//line sql.y:3853 { yyLOCAL = &PartitionValueRange{ Type: LessThanType, @@ -15120,10 +15289,10 @@ yydefault: } } yyVAL.union = yyLOCAL - case 697: + case 699: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *PartitionValueRange -//line sql.y:3843 +//line sql.y:3860 { yyLOCAL = &PartitionValueRange{ Type: InType, @@ -15131,131 +15300,131 @@ yydefault: } } yyVAL.union = yyLOCAL - case 698: + case 700: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:3851 +//line sql.y:3868 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 699: + case 701: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:3855 +//line sql.y:3872 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 700: + case 702: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *PartitionEngine -//line sql.y:3861 +//line sql.y:3878 { yyLOCAL = &PartitionEngine{Storage: yyDollar[1].booleanUnion(), Name: yyDollar[4].identifierCS.String()} } yyVAL.union = yyLOCAL - case 701: + case 703: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Literal -//line sql.y:3867 +//line sql.y:3884 { yyLOCAL = NewStrLiteral(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 702: + case 704: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Literal -//line sql.y:3873 +//line sql.y:3890 { yyLOCAL = NewStrLiteral(yyDollar[4].str) } yyVAL.union = yyLOCAL - case 703: + case 705: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Literal -//line sql.y:3879 +//line sql.y:3896 { yyLOCAL = NewStrLiteral(yyDollar[4].str) } yyVAL.union = yyLOCAL - case 704: + case 706: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3885 +//line sql.y:3902 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 705: + case 707: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:3891 +//line sql.y:3908 { yyLOCAL = convertStringToInt(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 706: + case 708: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3897 +//line sql.y:3914 { yyVAL.str = yyDollar[3].identifierCS.String() } - case 707: + case 709: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *PartitionDefinition -//line sql.y:3903 +//line sql.y:3920 { yyLOCAL = &PartitionDefinition{Name: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 708: + case 710: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:3909 +//line sql.y:3926 { yyVAL.str = "" } - case 709: + case 711: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:3913 +//line sql.y:3930 { yyVAL.str = "" } - case 710: + case 712: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3919 +//line sql.y:3936 { yyLOCAL = &RenameTable{TablePairs: yyDollar[3].renameTablePairsUnion()} } yyVAL.union = yyLOCAL - case 711: + case 713: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*RenameTablePair -//line sql.y:3925 +//line sql.y:3942 { yyLOCAL = []*RenameTablePair{{FromTable: yyDollar[1].tableName, ToTable: yyDollar[3].tableName}} } yyVAL.union = yyLOCAL - case 712: + case 714: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:3929 +//line sql.y:3946 { yySLICE := (*[]*RenameTablePair)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, &RenameTablePair{FromTable: yyDollar[3].tableName, ToTable: yyDollar[5].tableName}) } - case 713: + case 715: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3935 +//line sql.y:3952 { yyLOCAL = &DropTable{FromTables: yyDollar[6].tableNamesUnion(), IfExists: yyDollar[5].booleanUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), Temp: yyDollar[3].booleanUnion()} } yyVAL.union = yyLOCAL - case 714: + case 716: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3939 +//line sql.y:3956 { // Change this to an alter statement if yyDollar[4].identifierCI.Lowered() == "primary" { @@ -15265,1335 +15434,1327 @@ yydefault: } } yyVAL.union = yyLOCAL - case 715: + case 717: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:3948 +//line sql.y:3965 { yyLOCAL = &DropView{FromTables: yyDollar[5].tableNamesUnion(), Comments: Comments(yyDollar[2].strs).Parsed(), IfExists: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 716: + case 718: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3952 +//line sql.y:3969 { yyLOCAL = &DropDatabase{Comments: Comments(yyDollar[2].strs).Parsed(), DBName: yyDollar[5].identifierCS, IfExists: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 717: + case 719: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3958 +//line sql.y:3975 { yyLOCAL = &TruncateTable{Table: yyDollar[3].tableName} } yyVAL.union = yyLOCAL - case 718: + case 720: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:3962 +//line sql.y:3979 { yyLOCAL = &TruncateTable{Table: yyDollar[2].tableName} } yyVAL.union = yyLOCAL - case 719: + case 721: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:3968 +//line sql.y:3985 { yyLOCAL = &Analyze{IsLocal: yyDollar[2].booleanUnion(), Table: yyDollar[4].tableName} } yyVAL.union = yyLOCAL - case 720: + case 722: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3974 +//line sql.y:3991 { yyLOCAL = &PurgeBinaryLogs{To: string(yyDollar[5].str)} } yyVAL.union = yyLOCAL - case 721: + case 723: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:3978 +//line sql.y:3995 { yyLOCAL = &PurgeBinaryLogs{Before: string(yyDollar[5].str)} } yyVAL.union = yyLOCAL - case 722: + case 724: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3984 +//line sql.y:4001 { yyLOCAL = &Show{&ShowBasic{Command: Charset, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 723: + case 725: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3988 +//line sql.y:4005 { yyLOCAL = &Show{&ShowBasic{Command: Collation, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 724: + case 726: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:3992 +//line sql.y:4009 { yyLOCAL = &Show{&ShowBasic{Full: yyDollar[2].booleanUnion(), Command: Column, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 725: + case 727: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:3996 +//line sql.y:4013 { yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 726: + case 728: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4000 +//line sql.y:4017 { yyLOCAL = &Show{&ShowBasic{Command: Database, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 727: + case 729: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4004 +//line sql.y:4021 { yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 728: + case 730: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4008 +//line sql.y:4025 { yyLOCAL = &Show{&ShowBasic{Command: Keyspace, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 729: + case 731: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4012 +//line sql.y:4029 { yyLOCAL = &Show{&ShowBasic{Command: Function, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 730: + case 732: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:4016 +//line sql.y:4033 { yyLOCAL = &Show{&ShowBasic{Command: Index, Tbl: yyDollar[5].tableName, DbName: yyDollar[6].identifierCS, Filter: yyDollar[7].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 731: + case 733: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4020 +//line sql.y:4037 { yyLOCAL = &Show{&ShowBasic{Command: OpenTable, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 732: + case 734: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4024 +//line sql.y:4041 { yyLOCAL = &Show{&ShowBasic{Command: Privilege}} } yyVAL.union = yyLOCAL - case 733: + case 735: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4028 +//line sql.y:4045 { yyLOCAL = &Show{&ShowBasic{Command: Procedure, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 734: + case 736: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4032 +//line sql.y:4049 { yyLOCAL = &Show{&ShowBasic{Command: StatusSession, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 735: + case 737: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4036 +//line sql.y:4053 { yyLOCAL = &Show{&ShowBasic{Command: StatusGlobal, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 736: + case 738: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4040 +//line sql.y:4057 { yyLOCAL = &Show{&ShowBasic{Command: VariableSession, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 737: + case 739: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4044 +//line sql.y:4061 { yyLOCAL = &Show{&ShowBasic{Command: VariableGlobal, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 738: + case 740: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4048 +//line sql.y:4065 { yyLOCAL = &Show{&ShowBasic{Command: TableStatus, DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 739: + case 741: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4052 +//line sql.y:4069 { yyLOCAL = &Show{&ShowBasic{Command: Table, Full: yyDollar[2].booleanUnion(), DbName: yyDollar[4].identifierCS, Filter: yyDollar[5].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 740: + case 742: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4056 +//line sql.y:4073 { yyLOCAL = &Show{&ShowBasic{Command: Trigger, DbName: yyDollar[3].identifierCS, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 741: + case 743: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4060 +//line sql.y:4077 { yyLOCAL = &Show{&ShowCreate{Command: CreateDb, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 742: + case 744: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4064 +//line sql.y:4081 { yyLOCAL = &Show{&ShowCreate{Command: CreateE, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 743: + case 745: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4068 +//line sql.y:4085 { yyLOCAL = &Show{&ShowCreate{Command: CreateF, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 744: + case 746: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4072 +//line sql.y:4089 { yyLOCAL = &Show{&ShowCreate{Command: CreateProc, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 745: + case 747: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4076 +//line sql.y:4093 { yyLOCAL = &Show{&ShowCreate{Command: CreateTbl, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 746: + case 748: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4080 +//line sql.y:4097 { yyLOCAL = &Show{&ShowCreate{Command: CreateTr, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 747: + case 749: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4084 +//line sql.y:4101 { yyLOCAL = &Show{&ShowCreate{Command: CreateV, Op: yyDollar[4].tableName}} } yyVAL.union = yyLOCAL - case 748: + case 750: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4088 +//line sql.y:4105 { yyLOCAL = &Show{&ShowBasic{Command: Engines}} } yyVAL.union = yyLOCAL - case 749: + case 751: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4092 +//line sql.y:4109 { yyLOCAL = &Show{&ShowBasic{Command: Plugins}} } yyVAL.union = yyLOCAL - case 750: + case 752: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4096 +//line sql.y:4113 { yyLOCAL = &Show{&ShowBasic{Command: GtidExecGlobal, DbName: yyDollar[4].identifierCS}} } yyVAL.union = yyLOCAL - case 751: + case 753: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4100 +//line sql.y:4117 { yyLOCAL = &Show{&ShowBasic{Command: VGtidExecGlobal, DbName: yyDollar[4].identifierCS}} } yyVAL.union = yyLOCAL - case 752: + case 754: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4104 +//line sql.y:4121 { yyLOCAL = &Show{&ShowBasic{Command: VitessVariables, Filter: yyDollar[4].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 753: + case 755: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4108 +//line sql.y:4125 { yyLOCAL = &Show{&ShowBasic{Command: VitessMigrations, Filter: yyDollar[4].showFilterUnion(), DbName: yyDollar[3].identifierCS}} } yyVAL.union = yyLOCAL - case 754: + case 756: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4112 +//line sql.y:4129 { yyLOCAL = &ShowMigrationLogs{UUID: string(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 755: + case 757: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4116 +//line sql.y:4133 { yyLOCAL = &ShowThrottledApps{} } yyVAL.union = yyLOCAL - case 756: + case 758: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4120 +//line sql.y:4137 { yyLOCAL = &Show{&ShowBasic{Command: VitessReplicationStatus, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 757: + case 759: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4124 +//line sql.y:4141 { yyLOCAL = &ShowThrottlerStatus{} } yyVAL.union = yyLOCAL - case 758: + case 760: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4128 +//line sql.y:4145 { yyLOCAL = &Show{&ShowBasic{Command: VschemaTables}} } yyVAL.union = yyLOCAL - case 759: + case 761: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Statement +//line sql.y:4149 + { + yyLOCAL = &Show{&ShowBasic{Command: VschemaKeyspaces}} + } + yyVAL.union = yyLOCAL + case 762: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4132 +//line sql.y:4153 { yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes}} } yyVAL.union = yyLOCAL - case 760: + case 763: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4136 +//line sql.y:4157 { yyLOCAL = &Show{&ShowBasic{Command: VschemaVindexes, Tbl: yyDollar[5].tableName}} } yyVAL.union = yyLOCAL - case 761: + case 764: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4140 +//line sql.y:4161 { yyLOCAL = &Show{&ShowBasic{Command: Warnings}} } yyVAL.union = yyLOCAL - case 762: + case 765: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4144 +//line sql.y:4165 { yyLOCAL = &Show{&ShowBasic{Command: VitessShards, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 763: + case 766: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4148 +//line sql.y:4169 { yyLOCAL = &Show{&ShowBasic{Command: VitessTablets, Filter: yyDollar[3].showFilterUnion()}} } yyVAL.union = yyLOCAL - case 764: + case 767: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4152 +//line sql.y:4173 { yyLOCAL = &Show{&ShowBasic{Command: VitessTarget}} } yyVAL.union = yyLOCAL - case 765: + case 768: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4159 +//line sql.y:4180 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].identifierCI.String())}} } yyVAL.union = yyLOCAL - case 766: + case 769: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4163 +//line sql.y:4184 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 767: + case 770: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4167 +//line sql.y:4188 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String()}} } yyVAL.union = yyLOCAL - case 768: + case 771: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4171 +//line sql.y:4192 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 769: + case 772: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4175 +//line sql.y:4196 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}} } yyVAL.union = yyLOCAL - case 770: + case 773: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4179 +//line sql.y:4200 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}} } yyVAL.union = yyLOCAL - case 771: + case 774: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4183 +//line sql.y:4204 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str) + " " + string(yyDollar[3].str) + " " + String(yyDollar[4].tableName)}} } yyVAL.union = yyLOCAL - case 772: + case 775: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4187 +//line sql.y:4208 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[3].str)}} } yyVAL.union = yyLOCAL - case 773: + case 776: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4191 +//line sql.y:4212 { yyLOCAL = &Show{&ShowOther{Command: string(yyDollar[2].str)}} } yyVAL.union = yyLOCAL - case 774: + case 777: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4197 +//line sql.y:4218 { yyVAL.str = "" } - case 775: + case 778: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4201 +//line sql.y:4222 { yyVAL.str = "extended " } - case 776: + case 779: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4207 +//line sql.y:4228 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 777: + case 780: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4211 +//line sql.y:4232 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 778: + case 781: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4217 +//line sql.y:4238 { yyVAL.str = string(yyDollar[1].str) } - case 779: + case 782: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4221 +//line sql.y:4242 { yyVAL.str = string(yyDollar[1].str) } - case 780: + case 783: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4227 +//line sql.y:4248 { yyVAL.identifierCS = NewIdentifierCS("") } - case 781: + case 784: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4231 +//line sql.y:4252 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 782: + case 785: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4235 +//line sql.y:4256 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 783: + case 786: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4241 +//line sql.y:4262 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 784: + case 787: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4245 +//line sql.y:4266 { yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 785: + case 788: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4249 +//line sql.y:4270 { yyLOCAL = &ShowFilter{Filter: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 786: + case 789: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4255 +//line sql.y:4276 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 787: + case 790: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ShowFilter -//line sql.y:4259 +//line sql.y:4280 { yyLOCAL = &ShowFilter{Like: string(yyDollar[2].str)} } yyVAL.union = yyLOCAL - case 788: + case 791: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4265 +//line sql.y:4286 { yyVAL.empty = struct{}{} } - case 789: + case 792: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4269 +//line sql.y:4290 { yyVAL.empty = struct{}{} } - case 790: + case 793: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4273 +//line sql.y:4294 { yyVAL.empty = struct{}{} } - case 791: + case 794: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4279 +//line sql.y:4300 { yyVAL.str = string(yyDollar[1].str) } - case 792: + case 795: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4283 +//line sql.y:4304 { yyVAL.str = string(yyDollar[1].str) } - case 793: + case 796: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4289 +//line sql.y:4310 { yyLOCAL = &Use{DBName: yyDollar[2].identifierCS} } yyVAL.union = yyLOCAL - case 794: + case 797: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4293 +//line sql.y:4314 { yyLOCAL = &Use{DBName: IdentifierCS{v: ""}} } yyVAL.union = yyLOCAL - case 795: + case 798: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4297 +//line sql.y:4318 { yyLOCAL = &Use{DBName: NewIdentifierCS(yyDollar[2].identifierCS.String() + "@" + string(yyDollar[3].str))} } yyVAL.union = yyLOCAL - case 796: + case 799: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4304 +//line sql.y:4325 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 797: + case 800: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4308 +//line sql.y:4329 { yyVAL.identifierCS = NewIdentifierCS("@" + string(yyDollar[1].str)) } - case 798: + case 801: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4312 +//line sql.y:4333 { yyVAL.identifierCS = NewIdentifierCS("@@" + string(yyDollar[1].str)) } - case 799: + case 802: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4316 +//line sql.y:4337 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 800: + case 803: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4323 +//line sql.y:4344 { yyLOCAL = &Begin{} } yyVAL.union = yyLOCAL - case 801: + case 804: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4327 +//line sql.y:4348 { yyLOCAL = &Begin{TxAccessModes: yyDollar[3].txAccessModesUnion()} } yyVAL.union = yyLOCAL - case 802: + case 805: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4332 +//line sql.y:4353 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 803: + case 806: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4336 +//line sql.y:4357 { yyLOCAL = yyDollar[1].txAccessModesUnion() } yyVAL.union = yyLOCAL - case 804: + case 807: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []TxAccessMode -//line sql.y:4342 +//line sql.y:4363 { yyLOCAL = []TxAccessMode{yyDollar[1].txAccessModeUnion()} } yyVAL.union = yyLOCAL - case 805: + case 808: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4346 +//line sql.y:4367 { yySLICE := (*[]TxAccessMode)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].txAccessModeUnion()) } - case 806: + case 809: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4352 +//line sql.y:4373 { yyLOCAL = WithConsistentSnapshot } yyVAL.union = yyLOCAL - case 807: + case 810: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4356 +//line sql.y:4377 { yyLOCAL = ReadWrite } yyVAL.union = yyLOCAL - case 808: + case 811: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TxAccessMode -//line sql.y:4360 +//line sql.y:4381 { yyLOCAL = ReadOnly } yyVAL.union = yyLOCAL - case 809: + case 812: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4367 +//line sql.y:4388 { yyLOCAL = &Commit{} } yyVAL.union = yyLOCAL - case 810: + case 813: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4373 +//line sql.y:4394 { yyLOCAL = &Rollback{} } yyVAL.union = yyLOCAL - case 811: + case 814: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4377 +//line sql.y:4398 { yyLOCAL = &SRollback{Name: yyDollar[5].identifierCI} } yyVAL.union = yyLOCAL - case 812: + case 815: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4382 +//line sql.y:4403 { yyVAL.empty = struct{}{} } - case 813: + case 816: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4384 +//line sql.y:4405 { yyVAL.empty = struct{}{} } - case 814: + case 817: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4387 +//line sql.y:4408 { yyVAL.empty = struct{}{} } - case 815: + case 818: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4389 +//line sql.y:4410 { yyVAL.empty = struct{}{} } - case 816: + case 819: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4393 +//line sql.y:4414 { yyLOCAL = &Savepoint{Name: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 817: + case 820: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4399 +//line sql.y:4420 { yyLOCAL = &Release{Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 818: + case 821: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4404 +//line sql.y:4425 { yyLOCAL = EmptyType } yyVAL.union = yyLOCAL - case 819: + case 822: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4408 +//line sql.y:4429 { yyLOCAL = JSONType } yyVAL.union = yyLOCAL - case 820: + case 823: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4412 +//line sql.y:4433 { yyLOCAL = TreeType } yyVAL.union = yyLOCAL - case 821: - yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL ExplainType -//line sql.y:4416 - { - yyLOCAL = VitessType - } - yyVAL.union = yyLOCAL - case 822: - yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL ExplainType -//line sql.y:4420 - { - yyLOCAL = VTExplainType - } - yyVAL.union = yyLOCAL - case 823: + case 824: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4424 +//line sql.y:4437 { yyLOCAL = TraditionalType } yyVAL.union = yyLOCAL - case 824: + case 825: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ExplainType -//line sql.y:4428 +//line sql.y:4441 { yyLOCAL = AnalyzeType } yyVAL.union = yyLOCAL - case 825: + case 826: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4433 +//line sql.y:4446 { yyLOCAL = PlanVExplainType } yyVAL.union = yyLOCAL - case 826: + case 827: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4437 +//line sql.y:4450 { yyLOCAL = PlanVExplainType } yyVAL.union = yyLOCAL - case 827: + case 828: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4441 +//line sql.y:4454 { yyLOCAL = AllVExplainType } yyVAL.union = yyLOCAL - case 828: + case 829: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL VExplainType -//line sql.y:4445 +//line sql.y:4458 { yyLOCAL = QueriesVExplainType } yyVAL.union = yyLOCAL - case 829: + case 830: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4451 +//line sql.y:4464 { yyVAL.str = yyDollar[1].str } - case 830: + case 831: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4455 +//line sql.y:4468 { yyVAL.str = yyDollar[1].str } - case 831: + case 832: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4459 +//line sql.y:4472 { yyVAL.str = yyDollar[1].str } - case 832: + case 833: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4465 +//line sql.y:4478 { yyLOCAL = yyDollar[1].selStmtUnion() } yyVAL.union = yyLOCAL - case 833: + case 834: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4469 +//line sql.y:4482 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 834: + case 835: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4473 +//line sql.y:4486 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 835: + case 836: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Statement -//line sql.y:4477 +//line sql.y:4490 { yyLOCAL = yyDollar[1].statementUnion() } yyVAL.union = yyLOCAL - case 836: + case 837: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4482 +//line sql.y:4495 { yyVAL.str = "" } - case 837: + case 838: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4486 +//line sql.y:4499 { yyVAL.str = yyDollar[1].identifierCI.val } - case 838: + case 839: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4490 +//line sql.y:4503 { yyVAL.str = encodeSQLString(yyDollar[1].str) } - case 839: + case 840: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4496 +//line sql.y:4509 { yyLOCAL = &ExplainTab{Table: yyDollar[3].tableName, Wild: yyDollar[4].str} } yyVAL.union = yyLOCAL - case 840: + case 841: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4500 +//line sql.y:4513 { yyLOCAL = &ExplainStmt{Type: yyDollar[3].explainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()} } yyVAL.union = yyLOCAL - case 841: + case 842: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4506 +//line sql.y:4519 { yyLOCAL = &VExplainStmt{Type: yyDollar[3].vexplainTypeUnion(), Statement: yyDollar[4].statementUnion(), Comments: Comments(yyDollar[2].strs).Parsed()} } yyVAL.union = yyLOCAL - case 842: + case 843: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4512 +//line sql.y:4525 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 843: + case 844: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4516 +//line sql.y:4529 { yyLOCAL = &OtherAdmin{} } yyVAL.union = yyLOCAL - case 844: + case 845: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4522 +//line sql.y:4535 { yyLOCAL = &LockTables{Tables: yyDollar[3].tableAndLockTypesUnion()} } yyVAL.union = yyLOCAL - case 845: + case 846: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableAndLockTypes -//line sql.y:4528 +//line sql.y:4541 { yyLOCAL = TableAndLockTypes{yyDollar[1].tableAndLockTypeUnion()} } yyVAL.union = yyLOCAL - case 846: + case 847: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4532 +//line sql.y:4545 { yySLICE := (*TableAndLockTypes)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableAndLockTypeUnion()) } - case 847: + case 848: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *TableAndLockType -//line sql.y:4538 +//line sql.y:4551 { yyLOCAL = &TableAndLockType{Table: yyDollar[1].aliasedTableNameUnion(), Lock: yyDollar[2].lockTypeUnion()} } yyVAL.union = yyLOCAL - case 848: + case 849: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LockType -//line sql.y:4544 +//line sql.y:4557 { yyLOCAL = Read } yyVAL.union = yyLOCAL - case 849: + case 850: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL LockType -//line sql.y:4548 +//line sql.y:4561 { yyLOCAL = ReadLocal } yyVAL.union = yyLOCAL - case 850: + case 851: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LockType -//line sql.y:4552 +//line sql.y:4565 { yyLOCAL = Write } yyVAL.union = yyLOCAL - case 851: + case 852: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL LockType -//line sql.y:4556 +//line sql.y:4569 { yyLOCAL = LowPriorityWrite } yyVAL.union = yyLOCAL - case 852: + case 853: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Statement -//line sql.y:4562 +//line sql.y:4575 { yyLOCAL = &UnlockTables{} } yyVAL.union = yyLOCAL - case 853: + case 854: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4568 +//line sql.y:4581 { yyLOCAL = &RevertMigration{Comments: Comments(yyDollar[2].strs).Parsed(), UUID: string(yyDollar[4].str)} } yyVAL.union = yyLOCAL - case 854: + case 855: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4574 +//line sql.y:4587 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), FlushOptions: yyDollar[3].strs} } yyVAL.union = yyLOCAL - case 855: + case 856: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:4578 +//line sql.y:4591 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion()} } yyVAL.union = yyLOCAL - case 856: + case 857: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:4582 +//line sql.y:4595 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), WithLock: true} } yyVAL.union = yyLOCAL - case 857: + case 858: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4586 +//line sql.y:4599 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion()} } yyVAL.union = yyLOCAL - case 858: + case 859: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Statement -//line sql.y:4590 +//line sql.y:4603 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), WithLock: true} } yyVAL.union = yyLOCAL - case 859: + case 860: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Statement -//line sql.y:4594 +//line sql.y:4607 { yyLOCAL = &Flush{IsLocal: yyDollar[2].booleanUnion(), TableNames: yyDollar[4].tableNamesUnion(), ForExport: true} } yyVAL.union = yyLOCAL - case 860: + case 861: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4600 +//line sql.y:4613 { yyVAL.strs = []string{yyDollar[1].str} } - case 861: + case 862: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4604 +//line sql.y:4617 { yyVAL.strs = append(yyDollar[1].strs, yyDollar[3].str) } - case 862: - yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4610 - { - yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) - } case 863: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4614 +//line sql.y:4623 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } case 864: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4618 +//line sql.y:4627 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } case 865: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4622 +//line sql.y:4631 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } case 866: - yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4626 + yyDollar = yyS[yypt-2 : yypt+1] +//line sql.y:4635 { - yyVAL.str = string(yyDollar[1].str) + yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } case 867: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4630 +//line sql.y:4639 { yyVAL.str = string(yyDollar[1].str) } case 868: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4634 +//line sql.y:4643 { yyVAL.str = string(yyDollar[1].str) } case 869: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:4647 + { + yyVAL.str = string(yyDollar[1].str) + } + case 870: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4638 +//line sql.y:4651 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) + yyDollar[3].str } - case 870: + case 871: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4642 +//line sql.y:4655 { yyVAL.str = string(yyDollar[1].str) + " " + string(yyDollar[2].str) } - case 871: + case 872: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4646 +//line sql.y:4659 { yyVAL.str = string(yyDollar[1].str) } - case 872: + case 873: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4650 +//line sql.y:4663 { yyVAL.str = string(yyDollar[1].str) } - case 873: + case 874: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4654 +//line sql.y:4667 { yyVAL.str = string(yyDollar[1].str) } - case 874: + case 875: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4659 +//line sql.y:4672 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 875: + case 876: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4663 +//line sql.y:4676 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 876: + case 877: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4667 +//line sql.y:4680 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 877: + case 878: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4672 +//line sql.y:4685 { yyVAL.str = "" } - case 878: + case 879: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4676 +//line sql.y:4689 { yyVAL.str = " " + string(yyDollar[1].str) + " " + string(yyDollar[2].str) + " " + yyDollar[3].identifierCI.String() } - case 879: + case 880: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4681 +//line sql.y:4694 { setAllowComments(yylex, true) } - case 880: + case 881: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4685 +//line sql.y:4698 { yyVAL.strs = yyDollar[2].strs setAllowComments(yylex, false) } - case 881: + case 882: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4691 +//line sql.y:4704 { yyVAL.strs = nil } - case 882: + case 883: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4695 +//line sql.y:4708 { yyVAL.strs = append(yyDollar[1].strs, yyDollar[2].str) } - case 883: + case 884: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4701 +//line sql.y:4714 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 884: + case 885: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:4705 +//line sql.y:4718 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 885: + case 886: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:4709 +//line sql.y:4722 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 886: + case 887: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4714 +//line sql.y:4727 { yyVAL.str = "" } - case 887: + case 888: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4718 +//line sql.y:4731 { yyVAL.str = SQLNoCacheStr } - case 888: + case 889: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4722 +//line sql.y:4735 { yyVAL.str = SQLCacheStr } - case 889: + case 890: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:4727 +//line sql.y:4740 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 890: + case 891: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4731 +//line sql.y:4744 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 891: + case 892: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:4735 +//line sql.y:4748 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 892: + case 893: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4741 +//line sql.y:4754 { yyLOCAL = &PrepareStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Statement: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 893: + case 894: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:4745 +//line sql.y:4758 { yyLOCAL = &PrepareStmt{ Name: yyDollar[3].identifierCI, @@ -16602,595 +16763,573 @@ yydefault: } } yyVAL.union = yyLOCAL - case 894: + case 895: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4755 +//line sql.y:4768 { yyLOCAL = &ExecuteStmt{Name: yyDollar[3].identifierCI, Comments: Comments(yyDollar[2].strs).Parsed(), Arguments: yyDollar[4].variablesUnion()} } yyVAL.union = yyLOCAL - case 895: + case 896: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4760 +//line sql.y:4773 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 896: + case 897: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4764 +//line sql.y:4777 { yyLOCAL = yyDollar[2].variablesUnion() } yyVAL.union = yyLOCAL - case 897: + case 898: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4770 +//line sql.y:4783 { yyLOCAL = &DeallocateStmt{Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI} } yyVAL.union = yyLOCAL - case 898: + case 899: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Statement -//line sql.y:4774 +//line sql.y:4787 { yyLOCAL = &DeallocateStmt{Comments: Comments(yyDollar[2].strs).Parsed(), Name: yyDollar[4].identifierCI} } yyVAL.union = yyLOCAL - case 899: - yyDollar = yyS[yypt-0 : yypt+1] - var yyLOCAL SelectExprs -//line sql.y:4779 - { - yyLOCAL = nil - } - yyVAL.union = yyLOCAL case 900: - yyDollar = yyS[yypt-1 : yypt+1] - var yyLOCAL SelectExprs -//line sql.y:4783 + yyDollar = yyS[yypt-0 : yypt+1] +//line sql.y:4792 { - yyLOCAL = yyDollar[1].selectExprsUnion() + yyVAL.strs = nil } - yyVAL.union = yyLOCAL case 901: - yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4788 + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:4796 { - yyVAL.strs = nil + yyVAL.strs = yyDollar[1].strs } case 902: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4792 +//line sql.y:4802 { yyVAL.strs = []string{yyDollar[1].str} } case 903: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4796 - { // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce' - yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str} - } - case 904: - yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4800 +//line sql.y:4806 { - yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str} - } - case 905: - yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:4804 - { - yyVAL.strs = []string{yyDollar[1].str, yyDollar[2].str, yyDollar[3].str, yyDollar[4].str} + yyVAL.strs = append(yyDollar[1].strs, yyDollar[2].str) } - case 906: + case 904: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4810 +//line sql.y:4812 { yyVAL.str = SQLNoCacheStr } - case 907: + case 905: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4814 +//line sql.y:4816 { yyVAL.str = SQLCacheStr } - case 908: + case 906: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4818 +//line sql.y:4820 { yyVAL.str = DistinctStr } - case 909: + case 907: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4822 +//line sql.y:4824 { yyVAL.str = DistinctStr } - case 910: + case 908: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4826 +//line sql.y:4828 { yyVAL.str = StraightJoinHint } - case 911: + case 909: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4830 +//line sql.y:4832 { yyVAL.str = SQLCalcFoundRowsStr } - case 912: + case 910: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4834 +//line sql.y:4836 { yyVAL.str = AllStr // These are not picked up by NewSelect, and so ALL will be dropped. But this is OK, since it's redundant anyway } - case 913: + case 911: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectExprs -//line sql.y:4840 +//line sql.y:4842 { yyLOCAL = SelectExprs{yyDollar[1].selectExprUnion()} } yyVAL.union = yyLOCAL - case 914: + case 912: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4844 +//line sql.y:4846 { yySLICE := (*SelectExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].selectExprUnion()) } - case 915: + case 913: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4850 +//line sql.y:4852 { yyLOCAL = &StarExpr{} } yyVAL.union = yyLOCAL - case 916: + case 914: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4854 +//line sql.y:4856 { yyLOCAL = &AliasedExpr{Expr: yyDollar[1].exprUnion(), As: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 917: + case 915: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4858 +//line sql.y:4860 { yyLOCAL = &StarExpr{TableName: TableName{Name: yyDollar[1].identifierCS}} } yyVAL.union = yyLOCAL - case 918: + case 916: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL SelectExpr -//line sql.y:4862 +//line sql.y:4864 { yyLOCAL = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}} } yyVAL.union = yyLOCAL - case 919: + case 917: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:4867 +//line sql.y:4869 { yyVAL.identifierCI = IdentifierCI{} } - case 920: + case 918: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4871 +//line sql.y:4873 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 921: + case 919: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:4875 +//line sql.y:4877 { yyVAL.identifierCI = yyDollar[2].identifierCI } - case 923: + case 921: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:4882 +//line sql.y:4884 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 924: + case 922: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4887 +//line sql.y:4889 { yyLOCAL = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewIdentifierCS("dual")}}} } yyVAL.union = yyLOCAL - case 925: + case 923: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4891 +//line sql.y:4893 { yyLOCAL = yyDollar[1].tableExprsUnion() } yyVAL.union = yyLOCAL - case 926: + case 924: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4897 +//line sql.y:4899 { yyLOCAL = yyDollar[2].tableExprsUnion() } yyVAL.union = yyLOCAL - case 927: + case 925: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExprs -//line sql.y:4903 +//line sql.y:4905 { yyLOCAL = TableExprs{yyDollar[1].tableExprUnion()} } yyVAL.union = yyLOCAL - case 928: + case 926: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4907 +//line sql.y:4909 { yySLICE := (*TableExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].tableExprUnion()) } - case 931: + case 929: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4917 +//line sql.y:4919 { yyLOCAL = yyDollar[1].aliasedTableNameUnion() } yyVAL.union = yyLOCAL - case 932: + case 930: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4921 +//line sql.y:4923 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].derivedTableUnion(), As: yyDollar[3].identifierCS, Columns: yyDollar[4].columnsUnion()} } yyVAL.union = yyLOCAL - case 933: + case 931: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4925 +//line sql.y:4927 { yyLOCAL = &ParenTableExpr{Exprs: yyDollar[2].tableExprsUnion()} } yyVAL.union = yyLOCAL - case 934: + case 932: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TableExpr -//line sql.y:4929 +//line sql.y:4931 { yyLOCAL = yyDollar[1].tableExprUnion() } yyVAL.union = yyLOCAL - case 935: + case 933: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *DerivedTable -//line sql.y:4935 +//line sql.y:4937 { yyLOCAL = &DerivedTable{Lateral: false, Select: yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 936: + case 934: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *DerivedTable -//line sql.y:4939 +//line sql.y:4941 { yyLOCAL = &DerivedTable{Lateral: true, Select: yyDollar[2].selStmtUnion()} } yyVAL.union = yyLOCAL - case 937: + case 935: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *AliasedTableExpr -//line sql.y:4945 +//line sql.y:4947 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].identifierCS, Hints: yyDollar[3].indexHintsUnion()} } yyVAL.union = yyLOCAL - case 938: + case 936: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL *AliasedTableExpr -//line sql.y:4949 +//line sql.y:4951 { yyLOCAL = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitionsUnion(), As: yyDollar[6].identifierCS, Hints: yyDollar[7].indexHintsUnion()} } yyVAL.union = yyLOCAL - case 939: + case 937: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Columns -//line sql.y:4954 +//line sql.y:4956 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 940: + case 938: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Columns -//line sql.y:4958 +//line sql.y:4960 { yyLOCAL = yyDollar[2].columnsUnion() } yyVAL.union = yyLOCAL - case 941: + case 939: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Columns -//line sql.y:4963 +//line sql.y:4965 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 942: + case 940: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4967 +//line sql.y:4969 { yyLOCAL = yyDollar[1].columnsUnion() } yyVAL.union = yyLOCAL - case 943: + case 941: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4973 +//line sql.y:4975 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 944: + case 942: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4977 +//line sql.y:4979 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 945: + case 943: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*Variable -//line sql.y:4983 +//line sql.y:4985 { yyLOCAL = []*Variable{yyDollar[1].variableUnion()} } yyVAL.union = yyLOCAL - case 946: + case 944: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:4987 +//line sql.y:4989 { yySLICE := (*[]*Variable)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].variableUnion()) } - case 947: + case 945: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4993 +//line sql.y:4995 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 948: + case 946: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:4997 +//line sql.y:4999 { yyLOCAL = Columns{NewIdentifierCI(string(yyDollar[1].str))} } yyVAL.union = yyLOCAL - case 949: + case 947: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5001 +//line sql.y:5003 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 950: + case 948: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5005 +//line sql.y:5007 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, NewIdentifierCI(string(yyDollar[3].str))) } - case 951: + case 949: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Partitions -//line sql.y:5011 +//line sql.y:5013 { yyLOCAL = Partitions{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 952: + case 950: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5015 +//line sql.y:5017 { yySLICE := (*Partitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 953: + case 951: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5028 +//line sql.y:5030 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 954: + case 952: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5032 +//line sql.y:5034 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 955: + case 953: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5036 +//line sql.y:5038 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion(), Condition: yyDollar[4].joinCondition} } yyVAL.union = yyLOCAL - case 956: + case 954: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL TableExpr -//line sql.y:5040 +//line sql.y:5042 { yyLOCAL = &JoinTableExpr{LeftExpr: yyDollar[1].tableExprUnion(), Join: yyDollar[2].joinTypeUnion(), RightExpr: yyDollar[3].tableExprUnion()} } yyVAL.union = yyLOCAL - case 957: + case 955: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5046 +//line sql.y:5048 { yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()} } - case 958: + case 956: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:5048 +//line sql.y:5050 { yyVAL.joinCondition = &JoinCondition{Using: yyDollar[3].columnsUnion()} } - case 959: + case 957: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5052 +//line sql.y:5054 { yyVAL.joinCondition = &JoinCondition{} } - case 960: + case 958: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5054 +//line sql.y:5056 { yyVAL.joinCondition = yyDollar[1].joinCondition } - case 961: + case 959: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5058 +//line sql.y:5060 { yyVAL.joinCondition = &JoinCondition{} } - case 962: + case 960: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5060 +//line sql.y:5062 { yyVAL.joinCondition = &JoinCondition{On: yyDollar[2].exprUnion()} } - case 963: + case 961: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5063 +//line sql.y:5065 { yyVAL.empty = struct{}{} } - case 964: + case 962: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5065 +//line sql.y:5067 { yyVAL.empty = struct{}{} } - case 965: + case 963: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5068 +//line sql.y:5070 { yyVAL.identifierCS = NewIdentifierCS("") } - case 966: + case 964: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5072 +//line sql.y:5074 { yyVAL.identifierCS = yyDollar[1].identifierCS } - case 967: + case 965: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5076 +//line sql.y:5078 { yyVAL.identifierCS = yyDollar[2].identifierCS } - case 969: + case 967: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5083 +//line sql.y:5085 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 970: + case 968: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL JoinType -//line sql.y:5089 +//line sql.y:5091 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 971: + case 969: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5093 +//line sql.y:5095 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 972: + case 970: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5097 +//line sql.y:5099 { yyLOCAL = NormalJoinType } yyVAL.union = yyLOCAL - case 973: + case 971: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL JoinType -//line sql.y:5103 +//line sql.y:5105 { yyLOCAL = StraightJoinType } yyVAL.union = yyLOCAL - case 974: + case 972: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5109 +//line sql.y:5111 { yyLOCAL = LeftJoinType } yyVAL.union = yyLOCAL - case 975: + case 973: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL JoinType -//line sql.y:5113 +//line sql.y:5115 { yyLOCAL = LeftJoinType } yyVAL.union = yyLOCAL - case 976: + case 974: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5117 +//line sql.y:5119 { yyLOCAL = RightJoinType } yyVAL.union = yyLOCAL - case 977: + case 975: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL JoinType -//line sql.y:5121 +//line sql.y:5123 { yyLOCAL = RightJoinType } yyVAL.union = yyLOCAL - case 978: + case 976: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5127 +//line sql.y:5129 { yyLOCAL = NaturalJoinType } yyVAL.union = yyLOCAL - case 979: + case 977: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL JoinType -//line sql.y:5131 +//line sql.y:5133 { if yyDollar[2].joinTypeUnion() == LeftJoinType { yyLOCAL = NaturalLeftJoinType @@ -17199,103 +17338,119 @@ yydefault: } } yyVAL.union = yyLOCAL - case 980: + case 978: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5141 +//line sql.y:5143 { yyVAL.tableName = yyDollar[2].tableName } - case 981: + case 979: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5145 +//line sql.y:5147 { yyVAL.tableName = yyDollar[1].tableName } - case 982: + case 980: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5151 +//line sql.y:5153 { yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS} } - case 983: + case 981: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5155 +//line sql.y:5157 { yyVAL.tableName = TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS} } - case 984: + case 982: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5161 +//line sql.y:5163 { yyVAL.tableName = TableName{Name: yyDollar[1].identifierCS} } - case 985: + case 983: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5166 +//line sql.y:5168 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 986: + case 984: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5170 +//line sql.y:5172 { yyLOCAL = yyDollar[1].indexHintsUnion() } yyVAL.union = yyLOCAL - case 987: + case 985: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IndexHints -//line sql.y:5176 +//line sql.y:5178 { yyLOCAL = IndexHints{yyDollar[1].indexHintUnion()} } yyVAL.union = yyLOCAL - case 988: + case 986: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:5180 +//line sql.y:5182 { yySLICE := (*IndexHints)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].indexHintUnion()) } - case 989: + case 987: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5186 +//line sql.y:5188 { yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL - case 990: + case 988: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5190 +//line sql.y:5192 { yyLOCAL = &IndexHint{Type: UseOp, ForType: yyDollar[3].indexHintForTypeUnion()} } yyVAL.union = yyLOCAL - case 991: + case 989: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5194 +//line sql.y:5196 { yyLOCAL = &IndexHint{Type: IgnoreOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL - case 992: + case 990: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *IndexHint -//line sql.y:5198 +//line sql.y:5200 { yyLOCAL = &IndexHint{Type: ForceOp, ForType: yyDollar[3].indexHintForTypeUnion(), Indexes: yyDollar[5].columnsUnion()} } yyVAL.union = yyLOCAL + case 991: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL *IndexHint +//line sql.y:5204 + { + yyLOCAL = &IndexHint{Type: UseVindexOp, Indexes: yyDollar[4].columnsUnion()} + } + yyVAL.union = yyLOCAL + case 992: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL *IndexHint +//line sql.y:5208 + { + yyLOCAL = &IndexHint{Type: IgnoreVindexOp, Indexes: yyDollar[4].columnsUnion()} + } + yyVAL.union = yyLOCAL case 993: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5203 +//line sql.y:5213 { yyLOCAL = NoForType } @@ -17303,7 +17458,7 @@ yydefault: case 994: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5207 +//line sql.y:5217 { yyLOCAL = JoinForType } @@ -17311,7 +17466,7 @@ yydefault: case 995: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5211 +//line sql.y:5221 { yyLOCAL = OrderByForType } @@ -17319,7 +17474,7 @@ yydefault: case 996: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL IndexHintForType -//line sql.y:5215 +//line sql.y:5225 { yyLOCAL = GroupByForType } @@ -17327,7 +17482,7 @@ yydefault: case 997: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:5221 +//line sql.y:5231 { yyLOCAL = nil } @@ -17335,7 +17490,7 @@ yydefault: case 998: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5225 +//line sql.y:5235 { yyLOCAL = yyDollar[2].exprUnion() } @@ -17343,7 +17498,7 @@ yydefault: case 999: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5232 +//line sql.y:5242 { yyLOCAL = &OrExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } @@ -17351,7 +17506,7 @@ yydefault: case 1000: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5236 +//line sql.y:5246 { yyLOCAL = &XorExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } @@ -17359,7 +17514,7 @@ yydefault: case 1001: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5240 +//line sql.y:5250 { yyLOCAL = &AndExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].exprUnion()} } @@ -17367,7 +17522,7 @@ yydefault: case 1002: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5244 +//line sql.y:5254 { yyLOCAL = &NotExpr{Expr: yyDollar[2].exprUnion()} } @@ -17375,7 +17530,7 @@ yydefault: case 1003: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5248 +//line sql.y:5258 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: yyDollar[3].isExprOperatorUnion()} } @@ -17383,7 +17538,7 @@ yydefault: case 1004: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5252 +//line sql.y:5262 { yyLOCAL = yyDollar[1].exprUnion() } @@ -17391,7 +17546,7 @@ yydefault: case 1005: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5256 +//line sql.y:5266 { yyLOCAL = &AssignmentExpr{Left: yyDollar[1].variableUnion(), Right: yyDollar[3].exprUnion()} } @@ -17399,417 +17554,427 @@ yydefault: case 1006: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5260 +//line sql.y:5270 { yyLOCAL = &MemberOfExpr{Value: yyDollar[1].exprUnion(), JSONArr: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL case 1007: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:5276 + { + } + case 1008: + yyDollar = yyS[yypt-1 : yypt+1] +//line sql.y:5279 + { + } + case 1009: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5266 +//line sql.y:5284 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNullOp} } yyVAL.union = yyLOCAL - case 1008: + case 1010: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5270 +//line sql.y:5288 { yyLOCAL = &IsExpr{Left: yyDollar[1].exprUnion(), Right: IsNotNullOp} } yyVAL.union = yyLOCAL - case 1009: + case 1011: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5274 +//line sql.y:5292 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: yyDollar[2].comparisonExprOperatorUnion(), Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1010: + case 1012: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5278 +//line sql.y:5296 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1011: + case 1013: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5284 +//line sql.y:5302 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: InOp, Right: yyDollar[3].colTupleUnion()} } yyVAL.union = yyLOCAL - case 1012: + case 1014: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5288 +//line sql.y:5306 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotInOp, Right: yyDollar[4].colTupleUnion()} } yyVAL.union = yyLOCAL - case 1013: + case 1015: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5292 +//line sql.y:5310 { yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: true, From: yyDollar[3].exprUnion(), To: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1014: + case 1016: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5296 +//line sql.y:5314 { yyLOCAL = &BetweenExpr{Left: yyDollar[1].exprUnion(), IsBetween: false, From: yyDollar[4].exprUnion(), To: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1015: + case 1017: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5300 +//line sql.y:5318 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1016: + case 1018: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5304 +//line sql.y:5322 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1017: + case 1019: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5308 +//line sql.y:5326 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: LikeOp, Right: yyDollar[3].exprUnion(), Escape: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1018: + case 1020: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5312 +//line sql.y:5330 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotLikeOp, Right: yyDollar[4].exprUnion(), Escape: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1019: + case 1021: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5316 +//line sql.y:5334 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: RegexpOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1020: + case 1022: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5320 +//line sql.y:5338 { yyLOCAL = &ComparisonExpr{Left: yyDollar[1].exprUnion(), Operator: NotRegexpOp, Right: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1021: + case 1023: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5324 +//line sql.y:5342 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1022: + case 1024: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5330 +//line sql.y:5348 { } - case 1023: + case 1025: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5333 +//line sql.y:5351 { } - case 1024: + case 1026: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5339 +//line sql.y:5357 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitOrOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1025: + case 1027: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5343 +//line sql.y:5361 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitAndOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1026: + case 1028: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5347 +//line sql.y:5365 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftLeftOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1027: + case 1029: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5351 +//line sql.y:5369 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ShiftRightOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1028: + case 1030: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5355 +//line sql.y:5373 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: PlusOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1029: + case 1031: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5359 +//line sql.y:5377 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MinusOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1030: + case 1032: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5363 +//line sql.y:5381 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAdd, Date: yyDollar[1].exprUnion(), Unit: yyDollar[5].intervalTypeUnion(), Interval: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1031: + case 1033: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5367 +//line sql.y:5385 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinarySub, Date: yyDollar[1].exprUnion(), Unit: yyDollar[5].intervalTypeUnion(), Interval: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1032: + case 1034: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5371 +//line sql.y:5389 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: MultOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1033: + case 1035: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5375 +//line sql.y:5393 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: DivOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1034: + case 1036: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5379 +//line sql.y:5397 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1035: + case 1037: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5383 +//line sql.y:5401 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: IntDivOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1036: + case 1038: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5387 +//line sql.y:5405 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: ModOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1037: + case 1039: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5391 +//line sql.y:5409 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: BitXorOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1038: + case 1040: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5395 +//line sql.y:5413 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1039: + case 1041: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5401 +//line sql.y:5419 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1040: + case 1042: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5405 +//line sql.y:5423 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1041: + case 1043: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5409 +//line sql.y:5427 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1042: + case 1044: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5413 +//line sql.y:5431 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1043: + case 1045: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5417 +//line sql.y:5435 { yyLOCAL = &CollateExpr{Expr: yyDollar[1].exprUnion(), Collation: yyDollar[3].str} } yyVAL.union = yyLOCAL - case 1044: + case 1046: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5421 +//line sql.y:5439 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1045: + case 1047: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5425 +//line sql.y:5443 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1046: + case 1048: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5429 +//line sql.y:5447 { yyLOCAL = yyDollar[1].variableUnion() } yyVAL.union = yyLOCAL - case 1047: + case 1049: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5433 +//line sql.y:5451 { yyLOCAL = yyDollar[2].exprUnion() // TODO: do we really want to ignore unary '+' before any kind of literals? } yyVAL.union = yyLOCAL - case 1048: + case 1050: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5437 +//line sql.y:5455 { yyLOCAL = &UnaryExpr{Operator: UMinusOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1049: + case 1051: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5441 +//line sql.y:5459 { yyLOCAL = &UnaryExpr{Operator: TildaOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1050: + case 1052: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5445 +//line sql.y:5463 { yyLOCAL = &UnaryExpr{Operator: BangOp, Expr: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1051: + case 1053: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5449 +//line sql.y:5467 { yyLOCAL = yyDollar[1].subqueryUnion() } yyVAL.union = yyLOCAL - case 1052: + case 1054: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5453 +//line sql.y:5471 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1053: + case 1055: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5457 +//line sql.y:5475 { yyLOCAL = &ExistsExpr{Subquery: yyDollar[2].subqueryUnion()} } yyVAL.union = yyLOCAL - case 1054: + case 1056: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:5461 +//line sql.y:5479 { yyLOCAL = &MatchExpr{Columns: yyDollar[2].colNamesUnion(), Expr: yyDollar[5].exprUnion(), Option: yyDollar[6].matchExprOptionUnion()} } yyVAL.union = yyLOCAL - case 1055: + case 1057: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:5465 +//line sql.y:5483 { yyLOCAL = &CastExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion(), Array: yyDollar[6].booleanUnion()} } yyVAL.union = yyLOCAL - case 1056: + case 1058: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5469 +//line sql.y:5487 { yyLOCAL = &ConvertExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].convertTypeUnion()} } yyVAL.union = yyLOCAL - case 1057: + case 1059: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5473 +//line sql.y:5491 { yyLOCAL = &ConvertUsingExpr{Expr: yyDollar[3].exprUnion(), Type: yyDollar[5].str} } yyVAL.union = yyLOCAL - case 1058: + case 1060: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5477 +//line sql.y:5495 { // From: https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#operator_binary // To convert a string expression to a binary string, these constructs are equivalent: @@ -17818,3169 +17983,3194 @@ yydefault: yyLOCAL = &ConvertExpr{Expr: yyDollar[2].exprUnion(), Type: &ConvertType{Type: yyDollar[1].str}} } yyVAL.union = yyLOCAL - case 1059: + case 1061: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5485 +//line sql.y:5503 { yyLOCAL = &Default{ColName: yyDollar[2].str} } yyVAL.union = yyLOCAL - case 1060: + case 1062: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5489 +//line sql.y:5507 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAddLeft, Date: yyDollar[5].exprUnion(), Unit: yyDollar[3].intervalTypeUnion(), Interval: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1061: + case 1063: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5493 +//line sql.y:5511 { yyLOCAL = &IntervalFuncExpr{Expr: yyDollar[3].exprUnion(), Exprs: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1062: + case 1064: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5497 +//line sql.y:5515 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONExtractOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1063: + case 1065: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:5501 +//line sql.y:5519 { yyLOCAL = &BinaryExpr{Left: yyDollar[1].exprUnion(), Operator: JSONUnquoteExtractOp, Right: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1064: + case 1066: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5507 +//line sql.y:5525 { yyLOCAL = yyDollar[1].colNamesUnion() } yyVAL.union = yyLOCAL - case 1065: + case 1067: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5511 +//line sql.y:5529 { yyLOCAL = yyDollar[2].colNamesUnion() } yyVAL.union = yyLOCAL - case 1066: + case 1068: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*ColName -//line sql.y:5517 +//line sql.y:5535 { yyLOCAL = []*ColName{yyDollar[1].colNameUnion()} } yyVAL.union = yyLOCAL - case 1067: + case 1069: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5521 +//line sql.y:5539 { yySLICE := (*[]*ColName)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].colNameUnion()) } - case 1068: + case 1070: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5527 +//line sql.y:5545 { yyLOCAL = BothTrimType } yyVAL.union = yyLOCAL - case 1069: + case 1071: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5531 +//line sql.y:5549 { yyLOCAL = LeadingTrimType } yyVAL.union = yyLOCAL - case 1070: + case 1072: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL TrimType -//line sql.y:5535 +//line sql.y:5553 { yyLOCAL = TrailingTrimType } yyVAL.union = yyLOCAL - case 1071: + case 1073: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FrameUnitType -//line sql.y:5541 +//line sql.y:5559 { yyLOCAL = FrameRowsType } yyVAL.union = yyLOCAL - case 1072: + case 1074: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FrameUnitType -//line sql.y:5545 +//line sql.y:5563 { yyLOCAL = FrameRangeType } yyVAL.union = yyLOCAL - case 1073: + case 1075: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5552 +//line sql.y:5570 { yyLOCAL = CumeDistExprType } yyVAL.union = yyLOCAL - case 1074: + case 1076: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5556 +//line sql.y:5574 { yyLOCAL = DenseRankExprType } yyVAL.union = yyLOCAL - case 1075: + case 1077: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5560 +//line sql.y:5578 { yyLOCAL = PercentRankExprType } yyVAL.union = yyLOCAL - case 1076: + case 1078: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5564 +//line sql.y:5582 { yyLOCAL = RankExprType } yyVAL.union = yyLOCAL - case 1077: + case 1079: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ArgumentLessWindowExprType -//line sql.y:5568 +//line sql.y:5586 { yyLOCAL = RowNumberExprType } yyVAL.union = yyLOCAL - case 1078: + case 1080: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5574 +//line sql.y:5592 { yyLOCAL = &FramePoint{Type: CurrentRowType} } yyVAL.union = yyLOCAL - case 1079: + case 1081: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5578 +//line sql.y:5596 { yyLOCAL = &FramePoint{Type: UnboundedPrecedingType} } yyVAL.union = yyLOCAL - case 1080: + case 1082: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5582 +//line sql.y:5600 { yyLOCAL = &FramePoint{Type: UnboundedFollowingType} } yyVAL.union = yyLOCAL - case 1081: + case 1083: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5586 +//line sql.y:5604 { yyLOCAL = &FramePoint{Type: ExprPrecedingType, Expr: yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1082: + case 1084: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5590 +//line sql.y:5608 { yyLOCAL = &FramePoint{Type: ExprPrecedingType, Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1083: + case 1085: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5594 +//line sql.y:5612 { yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1084: + case 1086: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *FramePoint -//line sql.y:5598 +//line sql.y:5616 { yyLOCAL = &FramePoint{Type: ExprFollowingType, Expr: yyDollar[2].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1085: + case 1087: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5603 +//line sql.y:5621 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1086: + case 1088: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5607 +//line sql.y:5625 { yyLOCAL = yyDollar[1].frameClauseUnion() } yyVAL.union = yyLOCAL - case 1087: + case 1089: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5613 +//line sql.y:5631 { yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[2].framePointUnion()} } yyVAL.union = yyLOCAL - case 1088: + case 1090: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *FrameClause -//line sql.y:5617 +//line sql.y:5635 { yyLOCAL = &FrameClause{Unit: yyDollar[1].frameUnitTypeUnion(), Start: yyDollar[3].framePointUnion(), End: yyDollar[5].framePointUnion()} } yyVAL.union = yyLOCAL - case 1089: + case 1091: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Exprs -//line sql.y:5622 +//line sql.y:5640 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1090: + case 1092: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Exprs -//line sql.y:5626 +//line sql.y:5644 { yyLOCAL = yyDollar[3].exprsUnion() } yyVAL.union = yyLOCAL - case 1091: + case 1093: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5631 +//line sql.y:5649 { + yyVAL.identifierCI = IdentifierCI{} } - case 1092: + case 1094: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:5634 +//line sql.y:5653 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 1093: + case 1095: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *WindowSpecification -//line sql.y:5640 +//line sql.y:5659 { yyLOCAL = &WindowSpecification{Name: yyDollar[1].identifierCI, PartitionClause: yyDollar[2].exprsUnion(), OrderClause: yyDollar[3].orderByUnion(), FrameClause: yyDollar[4].frameClauseUnion()} } yyVAL.union = yyLOCAL - case 1094: + case 1096: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *OverClause -//line sql.y:5646 +//line sql.y:5665 { yyLOCAL = &OverClause{WindowSpec: yyDollar[3].windowSpecificationUnion()} } yyVAL.union = yyLOCAL - case 1095: + case 1097: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *OverClause -//line sql.y:5650 +//line sql.y:5669 { yyLOCAL = &OverClause{WindowName: yyDollar[2].identifierCI} } yyVAL.union = yyLOCAL - case 1096: + case 1098: + yyDollar = yyS[yypt-1 : yypt+1] + var yyLOCAL *OverClause +//line sql.y:5675 + { + yyLOCAL = yyDollar[1].overClauseUnion() + } + yyVAL.union = yyLOCAL + case 1099: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL *OverClause +//line sql.y:5679 + { + yyLOCAL = nil + } + yyVAL.union = yyLOCAL + case 1100: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *NullTreatmentClause -//line sql.y:5655 +//line sql.y:5684 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1098: + case 1102: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *NullTreatmentClause -//line sql.y:5662 +//line sql.y:5691 { yyLOCAL = &NullTreatmentClause{yyDollar[1].nullTreatmentTypeUnion()} } yyVAL.union = yyLOCAL - case 1099: + case 1103: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL NullTreatmentType -//line sql.y:5668 +//line sql.y:5697 { yyLOCAL = RespectNullsType } yyVAL.union = yyLOCAL - case 1100: + case 1104: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL NullTreatmentType -//line sql.y:5672 +//line sql.y:5701 { yyLOCAL = IgnoreNullsType } yyVAL.union = yyLOCAL - case 1101: + case 1105: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FirstOrLastValueExprType -//line sql.y:5678 +//line sql.y:5707 { yyLOCAL = FirstValueExprType } yyVAL.union = yyLOCAL - case 1102: + case 1106: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL FirstOrLastValueExprType -//line sql.y:5682 +//line sql.y:5711 { yyLOCAL = LastValueExprType } yyVAL.union = yyLOCAL - case 1103: + case 1107: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL FromFirstLastType -//line sql.y:5688 +//line sql.y:5717 { yyLOCAL = FromFirstType } yyVAL.union = yyLOCAL - case 1104: + case 1108: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL FromFirstLastType -//line sql.y:5692 +//line sql.y:5721 { yyLOCAL = FromLastType } yyVAL.union = yyLOCAL - case 1105: + case 1109: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *FromFirstLastClause -//line sql.y:5697 +//line sql.y:5726 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1107: + case 1111: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *FromFirstLastClause -//line sql.y:5704 +//line sql.y:5733 { yyLOCAL = &FromFirstLastClause{yyDollar[1].fromFirstLastTypeUnion()} } yyVAL.union = yyLOCAL - case 1108: + case 1112: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LagLeadExprType -//line sql.y:5710 +//line sql.y:5739 { yyLOCAL = LagExprType } yyVAL.union = yyLOCAL - case 1109: + case 1113: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL LagLeadExprType -//line sql.y:5714 +//line sql.y:5743 { yyLOCAL = LeadExprType } yyVAL.union = yyLOCAL - case 1110: + case 1114: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *WindowDefinition -//line sql.y:5720 +//line sql.y:5749 { yyLOCAL = &WindowDefinition{Name: yyDollar[1].identifierCI, WindowSpec: yyDollar[4].windowSpecificationUnion()} } yyVAL.union = yyLOCAL - case 1111: + case 1115: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL WindowDefinitions -//line sql.y:5726 +//line sql.y:5755 { yyLOCAL = WindowDefinitions{yyDollar[1].windowDefinitionUnion()} } yyVAL.union = yyLOCAL - case 1112: + case 1116: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5730 +//line sql.y:5759 { yySLICE := (*WindowDefinitions)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].windowDefinitionUnion()) } - case 1113: + case 1117: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:5736 +//line sql.y:5765 { yyVAL.str = "" } - case 1114: + case 1118: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5740 +//line sql.y:5769 { yyVAL.str = string(yyDollar[2].identifierCI.String()) } - case 1115: + case 1119: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL BoolVal -//line sql.y:5746 +//line sql.y:5775 { yyLOCAL = BoolVal(true) } yyVAL.union = yyLOCAL - case 1116: + case 1120: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL BoolVal -//line sql.y:5750 +//line sql.y:5779 { yyLOCAL = BoolVal(false) } yyVAL.union = yyLOCAL - case 1117: + case 1121: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5757 +//line sql.y:5786 { yyLOCAL = IsTrueOp } yyVAL.union = yyLOCAL - case 1118: + case 1122: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5761 +//line sql.y:5790 { yyLOCAL = IsNotTrueOp } yyVAL.union = yyLOCAL - case 1119: + case 1123: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5765 +//line sql.y:5794 { yyLOCAL = IsFalseOp } yyVAL.union = yyLOCAL - case 1120: + case 1124: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL IsExprOperator -//line sql.y:5769 +//line sql.y:5798 { yyLOCAL = IsNotFalseOp } yyVAL.union = yyLOCAL - case 1121: + case 1125: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5775 +//line sql.y:5804 { yyLOCAL = EqualOp } yyVAL.union = yyLOCAL - case 1122: + case 1126: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5779 +//line sql.y:5808 { yyLOCAL = LessThanOp } yyVAL.union = yyLOCAL - case 1123: + case 1127: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5783 +//line sql.y:5812 { yyLOCAL = GreaterThanOp } yyVAL.union = yyLOCAL - case 1124: + case 1128: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5787 +//line sql.y:5816 { yyLOCAL = LessEqualOp } yyVAL.union = yyLOCAL - case 1125: + case 1129: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5791 +//line sql.y:5820 { yyLOCAL = GreaterEqualOp } yyVAL.union = yyLOCAL - case 1126: + case 1130: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5795 +//line sql.y:5824 { yyLOCAL = NotEqualOp } yyVAL.union = yyLOCAL - case 1127: + case 1131: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ComparisonExprOperator -//line sql.y:5799 +//line sql.y:5828 { yyLOCAL = NullSafeEqualOp } yyVAL.union = yyLOCAL - case 1128: + case 1132: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5805 +//line sql.y:5834 { yyLOCAL = yyDollar[1].valTupleUnion() } yyVAL.union = yyLOCAL - case 1129: + case 1133: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5809 +//line sql.y:5838 { yyLOCAL = yyDollar[1].subqueryUnion() } yyVAL.union = yyLOCAL - case 1130: + case 1134: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ColTuple -//line sql.y:5813 +//line sql.y:5842 { yyLOCAL = ListArg(yyDollar[1].str[2:]) markBindVariable(yylex, yyDollar[1].str[2:]) } yyVAL.union = yyLOCAL - case 1131: + case 1135: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Subquery -//line sql.y:5820 +//line sql.y:5849 { yyLOCAL = &Subquery{yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1132: + case 1136: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Exprs -//line sql.y:5826 +//line sql.y:5855 { yyLOCAL = Exprs{yyDollar[1].exprUnion()} } yyVAL.union = yyLOCAL - case 1133: + case 1137: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:5830 +//line sql.y:5859 { yySLICE := (*Exprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].exprUnion()) } - case 1134: + case 1138: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5840 +//line sql.y:5869 { - yyLOCAL = &FuncExpr{Name: yyDollar[1].identifierCI, Exprs: yyDollar[3].selectExprsUnion()} + yyLOCAL = &FuncExpr{Name: yyDollar[1].identifierCI, Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1135: + case 1139: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5844 +//line sql.y:5873 { - yyLOCAL = &FuncExpr{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCI, Exprs: yyDollar[5].selectExprsUnion()} + yyLOCAL = &FuncExpr{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCI, Exprs: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1136: + case 1140: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5854 +//line sql.y:5883 { - yyLOCAL = &FuncExpr{Name: NewIdentifierCI("left"), Exprs: yyDollar[3].selectExprsUnion()} + yyLOCAL = &FuncExpr{Name: NewIdentifierCI("left"), Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1137: + case 1141: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5858 +//line sql.y:5887 { - yyLOCAL = &FuncExpr{Name: NewIdentifierCI("right"), Exprs: yyDollar[3].selectExprsUnion()} + yyLOCAL = &FuncExpr{Name: NewIdentifierCI("right"), Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1138: + case 1142: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5862 +//line sql.y:5891 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1139: + case 1143: + yyDollar = yyS[yypt-8 : yypt+1] + var yyLOCAL Expr +//line sql.y:5895 + { + yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} + } + yyVAL.union = yyLOCAL + case 1144: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5866 +//line sql.y:5899 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1140: + case 1145: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5870 +//line sql.y:5903 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion(), To: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1141: + case 1146: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5874 +//line sql.y:5907 { yyLOCAL = &SubstrExpr{Name: yyDollar[3].exprUnion(), From: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1142: + case 1147: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5878 +//line sql.y:5911 { yyLOCAL = &CaseExpr{Expr: yyDollar[2].exprUnion(), Whens: yyDollar[3].whensUnion(), Else: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1143: + case 1148: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:5882 +//line sql.y:5915 { yyLOCAL = &ValuesFuncExpr{Name: yyDollar[3].colNameUnion()} } yyVAL.union = yyLOCAL - case 1144: + case 1149: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:5886 +//line sql.y:5919 { yyLOCAL = &InsertExpr{Str: yyDollar[3].exprUnion(), Pos: yyDollar[5].exprUnion(), Len: yyDollar[7].exprUnion(), NewStr: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1145: + case 1150: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5890 +//line sql.y:5923 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1146: + case 1151: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5901 +//line sql.y:5934 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("utc_date")} } yyVAL.union = yyLOCAL - case 1147: + case 1152: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:5905 +//line sql.y:5938 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1148: + case 1153: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5911 +//line sql.y:5944 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("current_date")} } yyVAL.union = yyLOCAL - case 1149: + case 1154: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5915 +//line sql.y:5948 { yyLOCAL = &FuncExpr{Name: NewIdentifierCI("curdate")} } yyVAL.union = yyLOCAL - case 1150: + case 1155: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5919 +//line sql.y:5952 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("utc_time"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 1151: + case 1156: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5924 +//line sql.y:5957 { yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("curtime"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL - case 1152: - yyDollar = yyS[yypt-2 : yypt+1] - var yyLOCAL Expr -//line sql.y:5929 - { - yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_time"), Fsp: yyDollar[2].integerUnion()} - } - yyVAL.union = yyLOCAL - case 1153: - yyDollar = yyS[yypt-4 : yypt+1] - var yyLOCAL Expr -//line sql.y:5933 - { - yyLOCAL = &CountStar{} - } - yyVAL.union = yyLOCAL - case 1154: - yyDollar = yyS[yypt-5 : yypt+1] - var yyLOCAL Expr -//line sql.y:5937 - { - yyLOCAL = &Count{Distinct: yyDollar[3].booleanUnion(), Args: yyDollar[4].exprsUnion()} - } - yyVAL.union = yyLOCAL - case 1155: - yyDollar = yyS[yypt-5 : yypt+1] - var yyLOCAL Expr -//line sql.y:5941 - { - yyLOCAL = &Max{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} - } - yyVAL.union = yyLOCAL - case 1156: - yyDollar = yyS[yypt-5 : yypt+1] - var yyLOCAL Expr -//line sql.y:5945 - { - yyLOCAL = &Min{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} - } - yyVAL.union = yyLOCAL case 1157: - yyDollar = yyS[yypt-5 : yypt+1] + yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:5949 +//line sql.y:5962 { - yyLOCAL = &Sum{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} + yyLOCAL = &CurTimeFuncExpr{Name: NewIdentifierCI("current_time"), Fsp: yyDollar[2].integerUnion()} } yyVAL.union = yyLOCAL case 1158: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5953 +//line sql.y:5966 { - yyLOCAL = &Avg{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion()} + yyLOCAL = &CountStar{OverClause: yyDollar[5].overClauseUnion()} } yyVAL.union = yyLOCAL case 1159: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5957 +//line sql.y:5970 { - yyLOCAL = &BitAnd{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &Count{Distinct: yyDollar[3].booleanUnion(), Args: yyDollar[4].exprsUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL case 1160: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5961 +//line sql.y:5974 { - yyLOCAL = &BitOr{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &Max{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL case 1161: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5965 +//line sql.y:5978 { - yyLOCAL = &BitXor{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &Min{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL case 1162: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5969 +//line sql.y:5982 { - yyLOCAL = &Std{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &Sum{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL case 1163: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:5973 +//line sql.y:5986 { - yyLOCAL = &StdDev{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &Avg{Distinct: yyDollar[3].booleanUnion(), Arg: yyDollar[4].exprUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL case 1164: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5977 +//line sql.y:5990 { - yyLOCAL = &StdPop{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &BitAnd{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} } yyVAL.union = yyLOCAL case 1165: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5981 +//line sql.y:5994 { - yyLOCAL = &StdSamp{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &BitOr{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} } yyVAL.union = yyLOCAL case 1166: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5985 +//line sql.y:5998 { - yyLOCAL = &VarPop{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &BitXor{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} } yyVAL.union = yyLOCAL case 1167: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5989 +//line sql.y:6002 { - yyLOCAL = &VarSamp{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &Std{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} } yyVAL.union = yyLOCAL case 1168: - yyDollar = yyS[yypt-4 : yypt+1] + yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:5993 +//line sql.y:6006 { - yyLOCAL = &Variance{Arg: yyDollar[3].exprUnion()} + yyLOCAL = &StdDev{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} } yyVAL.union = yyLOCAL case 1169: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Expr +//line sql.y:6010 + { + yyLOCAL = &StdPop{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1170: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Expr +//line sql.y:6014 + { + yyLOCAL = &StdSamp{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1171: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Expr +//line sql.y:6018 + { + yyLOCAL = &VarPop{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1172: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Expr +//line sql.y:6022 + { + yyLOCAL = &VarSamp{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1173: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL Expr +//line sql.y:6026 + { + yyLOCAL = &Variance{Arg: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} + } + yyVAL.union = yyLOCAL + case 1174: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:5997 +//line sql.y:6030 { yyLOCAL = &GroupConcatExpr{Distinct: yyDollar[3].booleanUnion(), Exprs: yyDollar[4].exprsUnion(), OrderBy: yyDollar[5].orderByUnion(), Separator: yyDollar[6].str, Limit: yyDollar[7].limitUnion()} } yyVAL.union = yyLOCAL - case 1170: + case 1175: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6001 +//line sql.y:6034 { yyLOCAL = &AnyValue{Arg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1171: + case 1176: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6005 +//line sql.y:6038 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprTimestampadd, Date: yyDollar[7].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: yyDollar[3].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1172: + case 1177: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6009 +//line sql.y:6042 { yyLOCAL = &TimestampDiffExpr{Unit: yyDollar[3].intervalTypeUnion(), Expr1: yyDollar[5].exprUnion(), Expr2: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1173: + case 1178: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6013 +//line sql.y:6046 { yyLOCAL = &ExtractFuncExpr{IntervalType: yyDollar[3].intervalTypeUnion(), Expr: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1174: + case 1179: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:6017 +//line sql.y:6050 { yyLOCAL = &WeightStringFuncExpr{Expr: yyDollar[3].exprUnion(), As: yyDollar[4].convertTypeUnion()} } yyVAL.union = yyLOCAL - case 1175: + case 1180: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6021 +//line sql.y:6054 { yyLOCAL = &JSONPrettyExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1176: + case 1181: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6025 +//line sql.y:6058 { yyLOCAL = &JSONStorageFreeExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1177: + case 1182: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6029 +//line sql.y:6062 { yyLOCAL = &JSONStorageSizeExpr{JSONVal: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1178: + case 1183: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6033 +//line sql.y:6066 { yyLOCAL = &TrimFuncExpr{TrimFuncType: LTrimType, Type: LeadingTrimType, StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1179: + case 1184: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6037 +//line sql.y:6070 { yyLOCAL = &TrimFuncExpr{TrimFuncType: RTrimType, Type: TrailingTrimType, StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1180: + case 1185: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:6041 +//line sql.y:6074 { yyLOCAL = &TrimFuncExpr{Type: yyDollar[3].trimTypeUnion(), TrimArg: yyDollar[4].exprUnion(), StringArg: yyDollar[6].exprUnion()} } yyVAL.union = yyLOCAL - case 1181: + case 1186: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6045 +//line sql.y:6078 { yyLOCAL = &TrimFuncExpr{StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1182: + case 1187: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6049 +//line sql.y:6082 { yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1183: + case 1188: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6053 +//line sql.y:6086 { yyLOCAL = &CharExpr{Exprs: yyDollar[3].exprsUnion(), Charset: yyDollar[5].str} } yyVAL.union = yyLOCAL - case 1184: + case 1189: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6057 +//line sql.y:6090 { yyLOCAL = &TrimFuncExpr{TrimArg: yyDollar[3].exprUnion(), StringArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1185: + case 1190: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6061 +//line sql.y:6094 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1186: + case 1191: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6065 +//line sql.y:6098 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion(), Pos: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1187: + case 1192: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6069 +//line sql.y:6102 { yyLOCAL = &LocateExpr{SubStr: yyDollar[3].exprUnion(), Str: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1188: + case 1193: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6073 +//line sql.y:6106 { yyLOCAL = &LockingFunc{Type: GetLock, Name: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1189: + case 1194: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6077 +//line sql.y:6110 { yyLOCAL = &LockingFunc{Type: IsFreeLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1190: + case 1195: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6081 +//line sql.y:6114 { yyLOCAL = &LockingFunc{Type: IsUsedLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1191: + case 1196: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:6085 +//line sql.y:6118 { yyLOCAL = &LockingFunc{Type: ReleaseAllLocks} } yyVAL.union = yyLOCAL - case 1192: + case 1197: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6089 +//line sql.y:6122 { yyLOCAL = &LockingFunc{Type: ReleaseLock, Name: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1193: + case 1198: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6093 +//line sql.y:6126 { yyLOCAL = &JSONSchemaValidFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1194: + case 1199: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6097 +//line sql.y:6130 { yyLOCAL = &JSONSchemaValidationReportFuncExpr{Schema: yyDollar[3].exprUnion(), Document: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1195: + case 1200: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6101 +//line sql.y:6134 { yyLOCAL = &JSONArrayExpr{Params: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1196: + case 1201: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6105 +//line sql.y:6138 { yyLOCAL = &GeomFormatExpr{FormatType: BinaryFormat, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1197: + case 1202: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6109 +//line sql.y:6142 { yyLOCAL = &GeomFormatExpr{FormatType: BinaryFormat, Geom: yyDollar[3].exprUnion(), AxisOrderOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1198: + case 1203: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6113 +//line sql.y:6146 { yyLOCAL = &GeomFormatExpr{FormatType: TextFormat, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1199: + case 1204: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6117 +//line sql.y:6150 { yyLOCAL = &GeomFormatExpr{FormatType: TextFormat, Geom: yyDollar[3].exprUnion(), AxisOrderOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1200: + case 1205: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6121 +//line sql.y:6154 { yyLOCAL = &GeomPropertyFuncExpr{Property: IsEmpty, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1201: + case 1206: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6125 +//line sql.y:6158 { yyLOCAL = &GeomPropertyFuncExpr{Property: IsSimple, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1202: + case 1207: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6129 +//line sql.y:6162 { yyLOCAL = &GeomPropertyFuncExpr{Property: Dimension, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1203: + case 1208: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6133 +//line sql.y:6166 { yyLOCAL = &GeomPropertyFuncExpr{Property: Envelope, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1204: + case 1209: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6137 +//line sql.y:6170 { yyLOCAL = &GeomPropertyFuncExpr{Property: GeometryType, Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1205: + case 1210: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6141 +//line sql.y:6174 { yyLOCAL = &PointPropertyFuncExpr{Property: Latitude, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1206: + case 1211: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6145 +//line sql.y:6178 { yyLOCAL = &PointPropertyFuncExpr{Property: Latitude, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1207: + case 1212: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6149 +//line sql.y:6182 { yyLOCAL = &PointPropertyFuncExpr{Property: Longitude, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1208: + case 1213: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6153 +//line sql.y:6186 { yyLOCAL = &PointPropertyFuncExpr{Property: Longitude, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1209: + case 1214: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6157 +//line sql.y:6190 { yyLOCAL = &LinestrPropertyFuncExpr{Property: EndPoint, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1210: + case 1215: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6161 +//line sql.y:6194 { yyLOCAL = &LinestrPropertyFuncExpr{Property: IsClosed, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1211: + case 1216: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6165 +//line sql.y:6198 { yyLOCAL = &LinestrPropertyFuncExpr{Property: Length, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1212: + case 1217: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6169 +//line sql.y:6202 { yyLOCAL = &LinestrPropertyFuncExpr{Property: Length, Linestring: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1213: + case 1218: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6173 +//line sql.y:6206 { yyLOCAL = &LinestrPropertyFuncExpr{Property: NumPoints, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1214: + case 1219: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6177 +//line sql.y:6210 { yyLOCAL = &LinestrPropertyFuncExpr{Property: PointN, Linestring: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1215: + case 1220: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6181 +//line sql.y:6214 { yyLOCAL = &LinestrPropertyFuncExpr{Property: StartPoint, Linestring: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1216: + case 1221: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6185 +//line sql.y:6218 { yyLOCAL = &PointPropertyFuncExpr{Property: XCordinate, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1217: + case 1222: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6189 +//line sql.y:6222 { yyLOCAL = &PointPropertyFuncExpr{Property: XCordinate, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1218: + case 1223: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6193 +//line sql.y:6226 { yyLOCAL = &PointPropertyFuncExpr{Property: YCordinate, Point: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1219: + case 1224: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6197 +//line sql.y:6230 { yyLOCAL = &PointPropertyFuncExpr{Property: YCordinate, Point: yyDollar[3].exprUnion(), ValueToSet: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1220: + case 1225: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6201 +//line sql.y:6234 { yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1221: + case 1226: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6205 +//line sql.y:6238 { yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1222: + case 1227: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6209 +//line sql.y:6242 { yyLOCAL = &GeomFromTextExpr{Type: GeometryFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1223: + case 1228: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6213 +//line sql.y:6246 { yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1224: + case 1229: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6217 +//line sql.y:6250 { yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1225: + case 1230: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6221 +//line sql.y:6254 { yyLOCAL = &GeomFromTextExpr{Type: GeometryCollectionFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1226: + case 1231: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6225 +//line sql.y:6258 { yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1227: + case 1232: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6229 +//line sql.y:6262 { yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1228: + case 1233: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6233 +//line sql.y:6266 { yyLOCAL = &GeomFromTextExpr{Type: LineStringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1229: + case 1234: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6237 +//line sql.y:6270 { yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1230: + case 1235: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6241 +//line sql.y:6274 { yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1231: + case 1236: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6245 +//line sql.y:6278 { yyLOCAL = &GeomFromTextExpr{Type: MultiLinestringFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1232: + case 1237: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6249 +//line sql.y:6282 { yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1233: + case 1238: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6253 +//line sql.y:6286 { yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1234: + case 1239: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6257 +//line sql.y:6290 { yyLOCAL = &GeomFromTextExpr{Type: MultiPointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1235: + case 1240: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6261 +//line sql.y:6294 { yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1236: + case 1241: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6265 +//line sql.y:6298 { yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1237: + case 1242: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6269 +//line sql.y:6302 { yyLOCAL = &GeomFromTextExpr{Type: MultiPolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1238: + case 1243: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6273 +//line sql.y:6306 { yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1239: + case 1244: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6277 +//line sql.y:6310 { yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1240: + case 1245: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6281 +//line sql.y:6314 { yyLOCAL = &GeomFromTextExpr{Type: PointFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1241: + case 1246: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6285 +//line sql.y:6318 { yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1242: + case 1247: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6289 +//line sql.y:6322 { yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1243: + case 1248: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6293 +//line sql.y:6326 { yyLOCAL = &GeomFromTextExpr{Type: PolygonFromText, WktText: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1244: + case 1249: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6297 +//line sql.y:6330 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1245: + case 1250: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6301 +//line sql.y:6334 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1246: + case 1251: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6305 +//line sql.y:6338 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1247: + case 1252: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6309 +//line sql.y:6342 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1248: + case 1253: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6313 +//line sql.y:6346 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1249: + case 1254: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6317 +//line sql.y:6350 { yyLOCAL = &GeomFromWKBExpr{Type: GeometryCollectionFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1250: + case 1255: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6321 +//line sql.y:6354 { yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1251: + case 1256: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6325 +//line sql.y:6358 { yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1252: + case 1257: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6329 +//line sql.y:6362 { yyLOCAL = &GeomFromWKBExpr{Type: LineStringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1253: + case 1258: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6333 +//line sql.y:6366 { yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1254: + case 1259: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6337 +//line sql.y:6370 { yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1255: + case 1260: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6341 +//line sql.y:6374 { yyLOCAL = &GeomFromWKBExpr{Type: MultiLinestringFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1256: + case 1261: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6345 +//line sql.y:6378 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1257: + case 1262: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6349 +//line sql.y:6382 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1258: + case 1263: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6353 +//line sql.y:6386 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1259: + case 1264: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6357 +//line sql.y:6390 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1260: + case 1265: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6361 +//line sql.y:6394 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1261: + case 1266: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6365 +//line sql.y:6398 { yyLOCAL = &GeomFromWKBExpr{Type: MultiPolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1262: + case 1267: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6369 +//line sql.y:6402 { yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1263: + case 1268: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6373 +//line sql.y:6406 { yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1264: + case 1269: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6377 +//line sql.y:6410 { yyLOCAL = &GeomFromWKBExpr{Type: PointFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1265: + case 1270: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6381 +//line sql.y:6414 { yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1266: + case 1271: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6385 +//line sql.y:6418 { yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1267: + case 1272: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6389 +//line sql.y:6422 { yyLOCAL = &GeomFromWKBExpr{Type: PolygonFromWKB, WkbBlob: yyDollar[3].exprUnion(), Srid: yyDollar[5].exprUnion(), AxisOrderOpt: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1268: + case 1273: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6393 +//line sql.y:6426 { yyLOCAL = &PolygonPropertyFuncExpr{Property: Area, Polygon: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1269: + case 1274: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6397 +//line sql.y:6430 { yyLOCAL = &PolygonPropertyFuncExpr{Property: Centroid, Polygon: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1270: + case 1275: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6401 +//line sql.y:6434 { yyLOCAL = &PolygonPropertyFuncExpr{Property: ExteriorRing, Polygon: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1271: + case 1276: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6405 +//line sql.y:6438 { yyLOCAL = &PolygonPropertyFuncExpr{Property: InteriorRingN, Polygon: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1272: + case 1277: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6409 +//line sql.y:6442 { yyLOCAL = &PolygonPropertyFuncExpr{Property: NumInteriorRings, Polygon: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1273: + case 1278: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6413 +//line sql.y:6446 { yyLOCAL = &GeomCollPropertyFuncExpr{Property: GeometryN, GeomColl: yyDollar[3].exprUnion(), PropertyDefArg: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1274: + case 1279: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6417 +//line sql.y:6450 { yyLOCAL = &GeomCollPropertyFuncExpr{Property: NumGeometries, GeomColl: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1275: + case 1280: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6421 +//line sql.y:6454 { yyLOCAL = &GeoHashFromLatLongExpr{Longitude: yyDollar[3].exprUnion(), Latitude: yyDollar[5].exprUnion(), MaxLength: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1276: + case 1281: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6425 +//line sql.y:6458 { yyLOCAL = &GeoHashFromPointExpr{Point: yyDollar[3].exprUnion(), MaxLength: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1277: + case 1282: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6429 +//line sql.y:6462 { yyLOCAL = &GeomFromGeoHashExpr{GeomType: LatitudeFromHash, GeoHash: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1278: + case 1283: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6433 +//line sql.y:6466 { yyLOCAL = &GeomFromGeoHashExpr{GeomType: LongitudeFromHash, GeoHash: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1279: + case 1284: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6437 +//line sql.y:6470 { yyLOCAL = &GeomFromGeoHashExpr{GeomType: PointFromHash, GeoHash: yyDollar[3].exprUnion(), SridOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1280: + case 1285: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6441 +//line sql.y:6474 { yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1281: + case 1286: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6445 +//line sql.y:6478 { yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion(), HigherDimHandlerOpt: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1282: + case 1287: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6449 +//line sql.y:6482 { yyLOCAL = &GeomFromGeoJSONExpr{GeoJSON: yyDollar[3].exprUnion(), HigherDimHandlerOpt: yyDollar[5].exprUnion(), Srid: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1283: + case 1288: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6453 +//line sql.y:6486 { yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1284: + case 1289: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6457 +//line sql.y:6490 { yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion(), MaxDecimalDigits: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1285: + case 1290: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6461 +//line sql.y:6494 { yyLOCAL = &GeoJSONFromGeomExpr{Geom: yyDollar[3].exprUnion(), MaxDecimalDigits: yyDollar[5].exprUnion(), Bitmask: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1286: + case 1291: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6465 +//line sql.y:6498 { yyLOCAL = &JSONObjectExpr{Params: yyDollar[3].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1287: + case 1292: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6469 +//line sql.y:6502 { yyLOCAL = &JSONQuoteExpr{StringArg: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1288: + case 1293: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6473 +//line sql.y:6506 { yyLOCAL = &JSONContainsExpr{Target: yyDollar[3].exprUnion(), Candidate: yyDollar[5].exprsUnion()[0], PathList: yyDollar[5].exprsUnion()[1:]} } yyVAL.union = yyLOCAL - case 1289: + case 1294: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6477 +//line sql.y:6510 { yyLOCAL = &JSONContainsPathExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), PathList: yyDollar[7].exprsUnion()} } yyVAL.union = yyLOCAL - case 1290: + case 1295: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6481 +//line sql.y:6514 { yyLOCAL = &JSONExtractExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1291: + case 1296: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6485 +//line sql.y:6518 { yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1292: + case 1297: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6489 +//line sql.y:6522 { yyLOCAL = &JSONKeysExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1293: + case 1298: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6493 +//line sql.y:6526 { yyLOCAL = &JSONOverlapsExpr{JSONDoc1: yyDollar[3].exprUnion(), JSONDoc2: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1294: + case 1299: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6497 +//line sql.y:6530 { yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1295: + case 1300: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:6501 +//line sql.y:6534 { yyLOCAL = &JSONSearchExpr{JSONDoc: yyDollar[3].exprUnion(), OneOrAll: yyDollar[5].exprUnion(), SearchStr: yyDollar[7].exprUnion(), EscapeChar: yyDollar[9].exprsUnion()[0], PathList: yyDollar[9].exprsUnion()[1:]} } yyVAL.union = yyLOCAL - case 1296: + case 1301: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL Expr -//line sql.y:6505 +//line sql.y:6538 { yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion()} } yyVAL.union = yyLOCAL - case 1297: + case 1302: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6509 +//line sql.y:6542 { yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion()} } yyVAL.union = yyLOCAL - case 1298: + case 1303: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6513 +//line sql.y:6546 { yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), ErrorOnResponse: yyDollar[7].jtOnResponseUnion()} } yyVAL.union = yyLOCAL - case 1299: + case 1304: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Expr -//line sql.y:6517 +//line sql.y:6550 { yyLOCAL = &JSONValueExpr{JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion(), ReturningType: yyDollar[6].convertTypeUnion(), EmptyOnResponse: yyDollar[7].jtOnResponseUnion(), ErrorOnResponse: yyDollar[8].jtOnResponseUnion()} } yyVAL.union = yyLOCAL - case 1300: + case 1305: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6521 +//line sql.y:6554 { yyLOCAL = &JSONAttributesExpr{Type: DepthAttributeType, JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1301: + case 1306: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6525 +//line sql.y:6558 { yyLOCAL = &JSONAttributesExpr{Type: ValidAttributeType, JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1302: + case 1307: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6529 +//line sql.y:6562 { yyLOCAL = &JSONAttributesExpr{Type: TypeAttributeType, JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1303: + case 1308: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6533 +//line sql.y:6566 { yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1304: + case 1309: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6537 +//line sql.y:6570 { yyLOCAL = &JSONAttributesExpr{Type: LengthAttributeType, JSONDoc: yyDollar[3].exprUnion(), Path: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1305: + case 1310: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6541 +//line sql.y:6574 { yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayAppendType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1306: + case 1311: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6545 +//line sql.y:6578 { yyLOCAL = &JSONValueModifierExpr{Type: JSONArrayInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1307: + case 1312: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6549 +//line sql.y:6582 { yyLOCAL = &JSONValueModifierExpr{Type: JSONInsertType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1308: + case 1313: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6553 +//line sql.y:6586 { yyLOCAL = &JSONValueModifierExpr{Type: JSONReplaceType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1309: + case 1314: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6557 +//line sql.y:6590 { yyLOCAL = &JSONValueModifierExpr{Type: JSONSetType, JSONDoc: yyDollar[3].exprUnion(), Params: yyDollar[5].jsonObjectParamsUnion()} } yyVAL.union = yyLOCAL - case 1310: + case 1315: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6561 +//line sql.y:6594 { yyLOCAL = &JSONValueMergeExpr{Type: JSONMergeType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1311: + case 1316: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6565 +//line sql.y:6598 { yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePatchType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1312: + case 1317: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6569 +//line sql.y:6602 { yyLOCAL = &JSONValueMergeExpr{Type: JSONMergePreserveType, JSONDoc: yyDollar[3].exprUnion(), JSONDocList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1313: + case 1318: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6573 +//line sql.y:6606 { yyLOCAL = &JSONRemoveExpr{JSONDoc: yyDollar[3].exprUnion(), PathList: yyDollar[5].exprsUnion()} } yyVAL.union = yyLOCAL - case 1314: + case 1319: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6577 +//line sql.y:6610 { yyLOCAL = &JSONUnquoteExpr{JSONValue: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1315: + case 1320: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6581 +//line sql.y:6614 { yyLOCAL = &MultiPolygonExpr{PolygonParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1316: + case 1321: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6585 +//line sql.y:6618 { yyLOCAL = &MultiPointExpr{PointParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1317: + case 1322: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6589 +//line sql.y:6622 { yyLOCAL = &MultiLinestringExpr{LinestringParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1318: + case 1323: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6593 +//line sql.y:6626 { yyLOCAL = &PolygonExpr{LinestringParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1319: + case 1324: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6597 +//line sql.y:6630 { yyLOCAL = &LineStringExpr{PointParams: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1320: + case 1325: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6601 +//line sql.y:6634 { yyLOCAL = &PointExpr{XCordinate: yyDollar[3].exprUnion(), YCordinate: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1321: + case 1326: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6605 +//line sql.y:6638 { yyLOCAL = &ArgumentLessWindowExpr{Type: yyDollar[1].argumentLessWindowExprTypeUnion(), OverClause: yyDollar[4].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1322: + case 1327: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6609 +//line sql.y:6642 { yyLOCAL = &FirstOrLastValueExpr{Type: yyDollar[1].firstOrLastValueExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1323: + case 1328: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Expr -//line sql.y:6613 +//line sql.y:6646 { yyLOCAL = &NtileExpr{N: yyDollar[3].exprUnion(), OverClause: yyDollar[5].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1324: + case 1329: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Expr -//line sql.y:6617 +//line sql.y:6650 { yyLOCAL = &NTHValueExpr{Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), FromFirstLastClause: yyDollar[7].fromFirstLastClauseUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1325: + case 1330: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6621 +//line sql.y:6654 { yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), NullTreatmentClause: yyDollar[5].nullTreatmentClauseUnion(), OverClause: yyDollar[6].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1326: + case 1331: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL Expr -//line sql.y:6625 +//line sql.y:6658 { yyLOCAL = &LagLeadExpr{Type: yyDollar[1].lagLeadExprTypeUnion(), Expr: yyDollar[3].exprUnion(), N: yyDollar[5].exprUnion(), Default: yyDollar[6].exprUnion(), NullTreatmentClause: yyDollar[8].nullTreatmentClauseUnion(), OverClause: yyDollar[9].overClauseUnion()} } yyVAL.union = yyLOCAL - case 1327: + case 1332: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6629 +//line sql.y:6662 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1328: + case 1333: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6633 +//line sql.y:6666 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprAdddate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: IntervalNone} } yyVAL.union = yyLOCAL - case 1329: + case 1334: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6637 +//line sql.y:6670 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprDateAdd, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1330: + case 1335: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6641 +//line sql.y:6674 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprDateSub, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1331: + case 1336: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6645 +//line sql.y:6678 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[6].exprUnion(), Unit: yyDollar[7].intervalTypeUnion()} } yyVAL.union = yyLOCAL - case 1332: + case 1337: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6649 +//line sql.y:6682 { yyLOCAL = &IntervalDateExpr{Syntax: IntervalDateExprSubdate, Date: yyDollar[3].exprUnion(), Interval: yyDollar[5].exprUnion(), Unit: IntervalNone} } yyVAL.union = yyLOCAL - case 1337: + case 1342: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6659 +//line sql.y:6692 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1338: + case 1343: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6663 +//line sql.y:6696 { yyLOCAL = NewIntLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 1339: + case 1344: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6667 +//line sql.y:6700 { yyLOCAL = yyDollar[1].variableUnion() } yyVAL.union = yyLOCAL - case 1340: + case 1345: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:6671 +//line sql.y:6704 { yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 1341: + case 1346: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:6676 +//line sql.y:6709 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1342: + case 1347: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:6680 +//line sql.y:6713 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 1343: + case 1348: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6686 +//line sql.y:6719 { yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1344: + case 1349: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6690 +//line sql.y:6723 { yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1345: + case 1350: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:6694 +//line sql.y:6727 { yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1346: + case 1351: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Expr -//line sql.y:6698 +//line sql.y:6731 { yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion()} } yyVAL.union = yyLOCAL - case 1347: + case 1352: yyDollar = yyS[yypt-14 : yypt+1] var yyLOCAL Expr -//line sql.y:6702 +//line sql.y:6735 { // Match type is kept expression as TRIM( ' m ') is accepted yyLOCAL = &RegexpInstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), ReturnOption: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()} } yyVAL.union = yyLOCAL - case 1348: + case 1353: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6707 +//line sql.y:6740 { yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1349: + case 1354: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6711 +//line sql.y:6744 { yyLOCAL = &RegexpLikeExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), MatchType: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1350: + case 1355: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6715 +//line sql.y:6748 { yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1351: + case 1356: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:6719 +//line sql.y:6752 { yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1352: + case 1357: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Expr -//line sql.y:6723 +//line sql.y:6756 { yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion()} } yyVAL.union = yyLOCAL - case 1353: + case 1358: yyDollar = yyS[yypt-14 : yypt+1] var yyLOCAL Expr -//line sql.y:6727 +//line sql.y:6760 { // Match type is kept expression as TRIM( ' m ') is accepted yyLOCAL = &RegexpReplaceExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Repl: yyDollar[7].exprUnion(), Position: yyDollar[9].exprUnion(), Occurrence: yyDollar[11].exprUnion(), MatchType: yyDollar[13].exprUnion()} } yyVAL.union = yyLOCAL - case 1354: + case 1359: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6732 +//line sql.y:6765 { yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1355: + case 1360: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6736 +//line sql.y:6769 { yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1356: + case 1361: yyDollar = yyS[yypt-10 : yypt+1] var yyLOCAL Expr -//line sql.y:6740 +//line sql.y:6773 { yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion()} } yyVAL.union = yyLOCAL - case 1357: + case 1362: yyDollar = yyS[yypt-12 : yypt+1] var yyLOCAL Expr -//line sql.y:6744 +//line sql.y:6777 { // Match type is kept expression as TRIM( ' m ') is accepted yyLOCAL = &RegexpSubstrExpr{Expr: yyDollar[3].exprUnion(), Pattern: yyDollar[5].exprUnion(), Position: yyDollar[7].exprUnion(), Occurrence: yyDollar[9].exprUnion(), MatchType: yyDollar[11].exprUnion()} } yyVAL.union = yyLOCAL - case 1358: + case 1363: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6751 +//line sql.y:6784 { yyLOCAL = &ExtractValueExpr{Fragment: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1359: + case 1364: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6755 +//line sql.y:6788 { yyLOCAL = &UpdateXMLExpr{Target: yyDollar[3].exprUnion(), XPathExpr: yyDollar[5].exprUnion(), NewXML: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1360: + case 1365: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6761 +//line sql.y:6794 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatBytesType, Argument: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1361: + case 1366: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6765 +//line sql.y:6798 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: FormatPicoTimeType, Argument: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1362: + case 1367: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Expr -//line sql.y:6769 +//line sql.y:6802 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsCurrentThreadIDType} } yyVAL.union = yyLOCAL - case 1363: + case 1368: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6773 +//line sql.y:6806 { yyLOCAL = &PerformanceSchemaFuncExpr{Type: PsThreadIDType, Argument: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1364: + case 1369: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6779 +//line sql.y:6812 { yyLOCAL = >IDFuncExpr{Type: GTIDSubsetType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1365: + case 1370: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6783 +//line sql.y:6816 { yyLOCAL = >IDFuncExpr{Type: GTIDSubtractType, Set1: yyDollar[3].exprUnion(), Set2: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1366: + case 1371: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6787 +//line sql.y:6820 { yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1367: + case 1372: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6791 +//line sql.y:6824 { yyLOCAL = >IDFuncExpr{Type: WaitForExecutedGTIDSetType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1368: + case 1373: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6795 +//line sql.y:6828 { yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1369: + case 1374: yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL Expr -//line sql.y:6799 +//line sql.y:6832 { yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion()} } yyVAL.union = yyLOCAL - case 1370: + case 1375: yyDollar = yyS[yypt-8 : yypt+1] var yyLOCAL Expr -//line sql.y:6803 +//line sql.y:6836 { yyLOCAL = >IDFuncExpr{Type: WaitUntilSQLThreadAfterGTIDSType, Set1: yyDollar[3].exprUnion(), Timeout: yyDollar[5].exprUnion(), Channel: yyDollar[7].exprUnion()} } yyVAL.union = yyLOCAL - case 1371: + case 1376: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6808 +//line sql.y:6841 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1372: + case 1377: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:6812 +//line sql.y:6845 { yyLOCAL = yyDollar[2].convertTypeUnion() } yyVAL.union = yyLOCAL - case 1373: + case 1378: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6818 +//line sql.y:6851 { yyLOCAL = IntervalDayHour } yyVAL.union = yyLOCAL - case 1374: + case 1379: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6822 +//line sql.y:6855 { yyLOCAL = IntervalDayMicrosecond } yyVAL.union = yyLOCAL - case 1375: + case 1380: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6826 +//line sql.y:6859 { yyLOCAL = IntervalDayMinute } yyVAL.union = yyLOCAL - case 1376: + case 1381: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6830 +//line sql.y:6863 { yyLOCAL = IntervalDaySecond } yyVAL.union = yyLOCAL - case 1377: + case 1382: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6834 +//line sql.y:6867 { yyLOCAL = IntervalHourMicrosecond } yyVAL.union = yyLOCAL - case 1378: + case 1383: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6838 +//line sql.y:6871 { yyLOCAL = IntervalHourMinute } yyVAL.union = yyLOCAL - case 1379: + case 1384: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6842 +//line sql.y:6875 { yyLOCAL = IntervalHourSecond } yyVAL.union = yyLOCAL - case 1380: + case 1385: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6846 +//line sql.y:6879 { yyLOCAL = IntervalMinuteMicrosecond } yyVAL.union = yyLOCAL - case 1381: + case 1386: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6850 +//line sql.y:6883 { yyLOCAL = IntervalMinuteSecond } yyVAL.union = yyLOCAL - case 1382: + case 1387: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6854 +//line sql.y:6887 { yyLOCAL = IntervalSecondMicrosecond } yyVAL.union = yyLOCAL - case 1383: + case 1388: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6858 +//line sql.y:6891 { yyLOCAL = IntervalYearMonth } yyVAL.union = yyLOCAL - case 1384: + case 1389: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6862 +//line sql.y:6895 { yyLOCAL = IntervalDay } yyVAL.union = yyLOCAL - case 1385: + case 1390: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6866 +//line sql.y:6899 { yyLOCAL = IntervalWeek } yyVAL.union = yyLOCAL - case 1386: + case 1391: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6870 +//line sql.y:6903 { yyLOCAL = IntervalHour } yyVAL.union = yyLOCAL - case 1387: + case 1392: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6874 +//line sql.y:6907 { yyLOCAL = IntervalMinute } yyVAL.union = yyLOCAL - case 1388: + case 1393: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6878 +//line sql.y:6911 { yyLOCAL = IntervalMonth } yyVAL.union = yyLOCAL - case 1389: + case 1394: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6882 +//line sql.y:6915 { yyLOCAL = IntervalQuarter } yyVAL.union = yyLOCAL - case 1390: + case 1395: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6886 +//line sql.y:6919 { yyLOCAL = IntervalSecond } yyVAL.union = yyLOCAL - case 1391: + case 1396: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6890 +//line sql.y:6923 { yyLOCAL = IntervalMicrosecond } yyVAL.union = yyLOCAL - case 1392: + case 1397: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6894 +//line sql.y:6927 { yyLOCAL = IntervalYear } yyVAL.union = yyLOCAL - case 1393: + case 1398: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6900 +//line sql.y:6933 { yyLOCAL = IntervalDay } yyVAL.union = yyLOCAL - case 1394: + case 1399: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6904 +//line sql.y:6937 { yyLOCAL = IntervalWeek } yyVAL.union = yyLOCAL - case 1395: + case 1400: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6908 +//line sql.y:6941 { yyLOCAL = IntervalHour } yyVAL.union = yyLOCAL - case 1396: + case 1401: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6912 +//line sql.y:6945 { yyLOCAL = IntervalMinute } yyVAL.union = yyLOCAL - case 1397: + case 1402: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6916 +//line sql.y:6949 { yyLOCAL = IntervalMonth } yyVAL.union = yyLOCAL - case 1398: + case 1403: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6920 +//line sql.y:6953 { yyLOCAL = IntervalQuarter } yyVAL.union = yyLOCAL - case 1399: + case 1404: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6924 +//line sql.y:6957 { yyLOCAL = IntervalSecond } yyVAL.union = yyLOCAL - case 1400: + case 1405: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6928 +//line sql.y:6961 { yyLOCAL = IntervalMicrosecond } yyVAL.union = yyLOCAL - case 1401: + case 1406: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6932 +//line sql.y:6965 { yyLOCAL = IntervalYear } yyVAL.union = yyLOCAL - case 1402: + case 1407: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6936 +//line sql.y:6969 { yyLOCAL = IntervalDay } yyVAL.union = yyLOCAL - case 1403: + case 1408: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6940 +//line sql.y:6973 { yyLOCAL = IntervalWeek } yyVAL.union = yyLOCAL - case 1404: + case 1409: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6944 +//line sql.y:6977 { yyLOCAL = IntervalHour } yyVAL.union = yyLOCAL - case 1405: + case 1410: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6948 +//line sql.y:6981 { yyLOCAL = IntervalMinute } yyVAL.union = yyLOCAL - case 1406: + case 1411: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6952 +//line sql.y:6985 { yyLOCAL = IntervalMonth } yyVAL.union = yyLOCAL - case 1407: + case 1412: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6956 +//line sql.y:6989 { yyLOCAL = IntervalQuarter } yyVAL.union = yyLOCAL - case 1408: + case 1413: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6960 +//line sql.y:6993 { yyLOCAL = IntervalSecond } yyVAL.union = yyLOCAL - case 1409: + case 1414: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6964 +//line sql.y:6997 { yyLOCAL = IntervalMicrosecond } yyVAL.union = yyLOCAL - case 1410: + case 1415: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL IntervalType -//line sql.y:6968 +//line sql.y:7001 { yyLOCAL = IntervalYear } yyVAL.union = yyLOCAL - case 1413: + case 1418: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL int -//line sql.y:6978 +//line sql.y:7011 { yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 1414: + case 1419: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL int -//line sql.y:6982 +//line sql.y:7015 { yyLOCAL = 0 } yyVAL.union = yyLOCAL - case 1415: + case 1420: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL int -//line sql.y:6986 +//line sql.y:7019 { yyLOCAL = convertStringToInt(yyDollar[2].str) } yyVAL.union = yyLOCAL - case 1416: + case 1421: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:6996 +//line sql.y:7029 { - yyLOCAL = &FuncExpr{Name: NewIdentifierCI("if"), Exprs: yyDollar[3].selectExprsUnion()} + yyLOCAL = &FuncExpr{Name: NewIdentifierCI("if"), Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1417: + case 1422: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:7000 +//line sql.y:7033 { - yyLOCAL = &FuncExpr{Name: NewIdentifierCI("database"), Exprs: yyDollar[3].selectExprsUnion()} + yyLOCAL = &FuncExpr{Name: NewIdentifierCI("database"), Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1418: + case 1423: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:7004 +//line sql.y:7037 { - yyLOCAL = &FuncExpr{Name: NewIdentifierCI("schema"), Exprs: yyDollar[3].selectExprsUnion()} + yyLOCAL = &FuncExpr{Name: NewIdentifierCI("schema"), Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1419: + case 1424: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:7008 +//line sql.y:7041 { - yyLOCAL = &FuncExpr{Name: NewIdentifierCI("mod"), Exprs: yyDollar[3].selectExprsUnion()} + yyLOCAL = &FuncExpr{Name: NewIdentifierCI("mod"), Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1420: + case 1425: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Expr -//line sql.y:7012 +//line sql.y:7045 { - yyLOCAL = &FuncExpr{Name: NewIdentifierCI("replace"), Exprs: yyDollar[3].selectExprsUnion()} + yyLOCAL = &FuncExpr{Name: NewIdentifierCI("replace"), Exprs: yyDollar[3].exprsUnion()} } yyVAL.union = yyLOCAL - case 1421: + case 1426: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:7018 +//line sql.y:7051 { yyLOCAL = NoOption } yyVAL.union = yyLOCAL - case 1422: + case 1427: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:7022 +//line sql.y:7055 { yyLOCAL = BooleanModeOpt } yyVAL.union = yyLOCAL - case 1423: + case 1428: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:7026 +//line sql.y:7059 { yyLOCAL = NaturalLanguageModeOpt } yyVAL.union = yyLOCAL - case 1424: + case 1429: yyDollar = yyS[yypt-7 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:7030 +//line sql.y:7063 { yyLOCAL = NaturalLanguageModeWithQueryExpansionOpt } yyVAL.union = yyLOCAL - case 1425: + case 1430: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL MatchExprOption -//line sql.y:7034 +//line sql.y:7067 { yyLOCAL = QueryExpansionOpt } yyVAL.union = yyLOCAL - case 1426: + case 1431: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7040 +//line sql.y:7073 { yyVAL.str = string(yyDollar[1].identifierCI.String()) } - case 1427: + case 1432: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7044 +//line sql.y:7077 { yyVAL.str = string(yyDollar[1].str) } - case 1428: + case 1433: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7048 +//line sql.y:7081 { yyVAL.str = string(yyDollar[1].str) } - case 1429: + case 1434: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7054 +//line sql.y:7087 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1430: + case 1435: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7058 +//line sql.y:7091 { - yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)} + yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: ptr.Of(convertStringToInt(yyDollar[4].str))} } yyVAL.union = yyLOCAL - case 1431: + case 1436: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7062 +//line sql.y:7095 { - yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: NewIntLiteral(yyDollar[4].str)} + yyLOCAL = &ConvertType{Type: string(yyDollar[2].str), Length: ptr.Of(convertStringToInt(yyDollar[4].str))} } yyVAL.union = yyLOCAL - case 1432: + case 1437: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7068 +//line sql.y:7101 { - yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } yyVAL.union = yyLOCAL - case 1433: + case 1438: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7072 +//line sql.y:7105 { - yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion(), Charset: yyDollar[3].columnCharset} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion(), Charset: yyDollar[3].columnCharset} } yyVAL.union = yyLOCAL - case 1434: + case 1439: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7076 +//line sql.y:7109 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1435: + case 1440: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7080 +//line sql.y:7113 { - yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } yyVAL.union = yyLOCAL - case 1436: + case 1441: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7084 +//line sql.y:7117 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} yyLOCAL.Length = yyDollar[2].LengthScaleOption.Length yyLOCAL.Scale = yyDollar[2].LengthScaleOption.Scale } yyVAL.union = yyLOCAL - case 1437: + case 1442: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7090 +//line sql.y:7123 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1438: + case 1443: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7094 +//line sql.y:7127 { - yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } yyVAL.union = yyLOCAL - case 1439: + case 1444: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7098 +//line sql.y:7131 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1440: + case 1445: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7102 +//line sql.y:7135 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1441: + case 1446: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7106 +//line sql.y:7139 { - yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } yyVAL.union = yyLOCAL - case 1442: + case 1447: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7110 +//line sql.y:7143 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1443: + case 1448: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7114 +//line sql.y:7147 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1444: + case 1449: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7118 +//line sql.y:7151 { - yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].literalUnion()} + yyLOCAL = &ConvertType{Type: string(yyDollar[1].str), Length: yyDollar[2].intPtrUnion()} } yyVAL.union = yyLOCAL - case 1445: + case 1450: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7122 +//line sql.y:7155 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1446: + case 1451: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ConvertType -//line sql.y:7126 +//line sql.y:7159 { yyLOCAL = &ConvertType{Type: string(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1447: + case 1452: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7132 +//line sql.y:7165 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1448: + case 1453: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:7136 +//line sql.y:7169 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1449: + case 1454: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:7141 +//line sql.y:7174 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1450: + case 1455: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7145 +//line sql.y:7178 { yyLOCAL = yyDollar[1].exprUnion() } yyVAL.union = yyLOCAL - case 1451: + case 1456: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7150 +//line sql.y:7183 { yyVAL.str = string("") } - case 1452: + case 1457: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7154 +//line sql.y:7187 { yyVAL.str = encodeSQLString(yyDollar[2].str) } - case 1453: + case 1458: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*When -//line sql.y:7160 +//line sql.y:7193 { yyLOCAL = []*When{yyDollar[1].whenUnion()} } yyVAL.union = yyLOCAL - case 1454: + case 1459: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7164 +//line sql.y:7197 { yySLICE := (*[]*When)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[2].whenUnion()) } - case 1455: + case 1460: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *When -//line sql.y:7170 +//line sql.y:7203 { yyLOCAL = &When{Cond: yyDollar[2].exprUnion(), Val: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1456: + case 1461: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:7175 +//line sql.y:7208 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1457: + case 1462: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:7179 +//line sql.y:7212 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 1458: + case 1463: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ColName -//line sql.y:7185 +//line sql.y:7218 { yyLOCAL = &ColName{Name: yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 1459: + case 1464: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *ColName -//line sql.y:7189 +//line sql.y:7222 { yyLOCAL = &ColName{Name: NewIdentifierCI(string(yyDollar[1].str))} } yyVAL.union = yyLOCAL - case 1460: + case 1465: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *ColName -//line sql.y:7193 +//line sql.y:7226 { yyLOCAL = &ColName{Qualifier: TableName{Name: yyDollar[1].identifierCS}, Name: yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 1461: + case 1466: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *ColName -//line sql.y:7197 +//line sql.y:7230 { yyLOCAL = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].identifierCS, Name: yyDollar[3].identifierCS}, Name: yyDollar[5].identifierCI} } yyVAL.union = yyLOCAL - case 1462: + case 1467: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7203 +//line sql.y:7236 { yyLOCAL = yyDollar[1].colNameUnion() } yyVAL.union = yyLOCAL - case 1463: + case 1468: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7207 +//line sql.y:7240 { yyLOCAL = &Offset{V: convertStringToInt(yyDollar[1].str)} } yyVAL.union = yyLOCAL - case 1464: + case 1469: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7213 +//line sql.y:7246 { // TODO(sougou): Deprecate this construct. if yyDollar[1].identifierCI.Lowered() != "value" { @@ -20990,426 +21180,442 @@ yydefault: yyLOCAL = NewIntLiteral("1") } yyVAL.union = yyLOCAL - case 1465: + case 1470: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:7222 +//line sql.y:7255 { yyLOCAL = NewIntLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 1466: + case 1471: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:7226 +//line sql.y:7259 { yyLOCAL = parseBindVariable(yylex, yyDollar[1].str[1:]) } yyVAL.union = yyLOCAL - case 1467: + case 1472: yyDollar = yyS[yypt-0 : yypt+1] - var yyLOCAL Exprs -//line sql.y:7231 + var yyLOCAL *GroupBy +//line sql.y:7264 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1468: - yyDollar = yyS[yypt-3 : yypt+1] - var yyLOCAL Exprs -//line sql.y:7235 + case 1473: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL *GroupBy +//line sql.y:7268 { - yyLOCAL = yyDollar[3].exprsUnion() + yyLOCAL = &GroupBy{Exprs: yyDollar[3].exprsUnion(), WithRollup: yyDollar[4].booleanUnion()} } yyVAL.union = yyLOCAL - case 1469: + case 1474: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL bool +//line sql.y:7273 + { + yyLOCAL = false + } + yyVAL.union = yyLOCAL + case 1475: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL bool +//line sql.y:7277 + { + yyLOCAL = true + } + yyVAL.union = yyLOCAL + case 1476: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Expr -//line sql.y:7240 +//line sql.y:7283 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1470: + case 1477: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Expr -//line sql.y:7244 +//line sql.y:7287 { yyLOCAL = yyDollar[2].exprUnion() } yyVAL.union = yyLOCAL - case 1471: + case 1478: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *NamedWindow -//line sql.y:7250 +//line sql.y:7293 { yyLOCAL = &NamedWindow{yyDollar[2].windowDefinitionsUnion()} } yyVAL.union = yyLOCAL - case 1472: + case 1479: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:7256 +//line sql.y:7299 { yyLOCAL = NamedWindows{yyDollar[1].namedWindowUnion()} } yyVAL.union = yyLOCAL - case 1473: + case 1480: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7260 +//line sql.y:7303 { yySLICE := (*NamedWindows)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].namedWindowUnion()) } - case 1474: + case 1481: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:7265 +//line sql.y:7308 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1475: + case 1482: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL NamedWindows -//line sql.y:7269 +//line sql.y:7312 { yyLOCAL = yyDollar[1].namedWindowsUnion() } yyVAL.union = yyLOCAL - case 1476: + case 1483: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL OrderBy -//line sql.y:7274 +//line sql.y:7317 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1477: + case 1484: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderBy -//line sql.y:7278 +//line sql.y:7321 { yyLOCAL = yyDollar[1].orderByUnion() } yyVAL.union = yyLOCAL - case 1478: + case 1485: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL OrderBy -//line sql.y:7284 +//line sql.y:7327 { yyLOCAL = yyDollar[3].orderByUnion() } yyVAL.union = yyLOCAL - case 1479: + case 1486: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderBy -//line sql.y:7290 +//line sql.y:7333 { yyLOCAL = OrderBy{yyDollar[1].orderUnion()} } yyVAL.union = yyLOCAL - case 1480: + case 1487: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7294 +//line sql.y:7337 { yySLICE := (*OrderBy)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].orderUnion()) } - case 1481: + case 1488: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Order -//line sql.y:7300 +//line sql.y:7343 { yyLOCAL = &Order{Expr: yyDollar[1].exprUnion(), Direction: yyDollar[2].orderDirectionUnion()} } yyVAL.union = yyLOCAL - case 1482: + case 1489: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:7305 +//line sql.y:7348 { yyLOCAL = AscOrder } yyVAL.union = yyLOCAL - case 1483: + case 1490: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:7309 +//line sql.y:7352 { yyLOCAL = AscOrder } yyVAL.union = yyLOCAL - case 1484: + case 1491: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL OrderDirection -//line sql.y:7313 +//line sql.y:7356 { yyLOCAL = DescOrder } yyVAL.union = yyLOCAL - case 1485: + case 1492: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Limit -//line sql.y:7318 +//line sql.y:7361 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1486: + case 1493: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Limit -//line sql.y:7322 +//line sql.y:7365 { yyLOCAL = yyDollar[1].limitUnion() } yyVAL.union = yyLOCAL - case 1487: + case 1494: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Limit -//line sql.y:7328 +//line sql.y:7371 { yyLOCAL = &Limit{Rowcount: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1488: + case 1495: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Limit -//line sql.y:7332 +//line sql.y:7375 { yyLOCAL = &Limit{Offset: yyDollar[2].exprUnion(), Rowcount: yyDollar[4].exprUnion()} } yyVAL.union = yyLOCAL - case 1489: + case 1496: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Limit -//line sql.y:7336 +//line sql.y:7379 { yyLOCAL = &Limit{Offset: yyDollar[4].exprUnion(), Rowcount: yyDollar[2].exprUnion()} } yyVAL.union = yyLOCAL - case 1490: + case 1497: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7341 +//line sql.y:7384 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1491: + case 1498: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7345 +//line sql.y:7388 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1492: + case 1499: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7349 +//line sql.y:7392 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion(), yyDollar[2].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1493: + case 1500: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7353 +//line sql.y:7396 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1494: + case 1501: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []AlterOption -//line sql.y:7357 +//line sql.y:7400 { yyLOCAL = []AlterOption{yyDollar[1].alterOptionUnion()} } yyVAL.union = yyLOCAL - case 1495: + case 1502: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7364 +//line sql.y:7407 { yyLOCAL = &LockOption{Type: DefaultType} } yyVAL.union = yyLOCAL - case 1496: + case 1503: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7368 +//line sql.y:7411 { yyLOCAL = &LockOption{Type: NoneType} } yyVAL.union = yyLOCAL - case 1497: + case 1504: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7372 +//line sql.y:7415 { yyLOCAL = &LockOption{Type: SharedType} } yyVAL.union = yyLOCAL - case 1498: + case 1505: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7376 +//line sql.y:7419 { yyLOCAL = &LockOption{Type: ExclusiveType} } yyVAL.union = yyLOCAL - case 1499: + case 1506: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7382 +//line sql.y:7425 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1500: + case 1507: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7386 +//line sql.y:7429 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1501: + case 1508: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7390 +//line sql.y:7433 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1502: + case 1509: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL AlterOption -//line sql.y:7394 +//line sql.y:7437 { yyLOCAL = AlgorithmValue(yyDollar[3].str) } yyVAL.union = yyLOCAL - case 1503: + case 1510: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7399 +//line sql.y:7442 { yyVAL.str = "" } - case 1504: + case 1511: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7403 +//line sql.y:7446 { yyVAL.str = string(yyDollar[3].str) } - case 1505: + case 1512: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7407 +//line sql.y:7450 { yyVAL.str = string(yyDollar[3].str) } - case 1506: + case 1513: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7411 +//line sql.y:7454 { yyVAL.str = string(yyDollar[3].str) } - case 1507: + case 1514: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7416 +//line sql.y:7459 { yyVAL.str = "" } - case 1508: + case 1515: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7420 +//line sql.y:7463 { yyVAL.str = yyDollar[3].str } - case 1509: + case 1516: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7426 +//line sql.y:7469 { yyVAL.str = string(yyDollar[1].str) } - case 1510: + case 1517: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7430 +//line sql.y:7473 { yyVAL.str = string(yyDollar[1].str) } - case 1511: + case 1518: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7435 +//line sql.y:7478 { yyVAL.str = "" } - case 1512: + case 1519: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:7439 +//line sql.y:7482 { yyVAL.str = yyDollar[2].str } - case 1513: + case 1520: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7444 +//line sql.y:7487 { yyVAL.str = "cascaded" } - case 1514: + case 1521: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7448 +//line sql.y:7491 { yyVAL.str = string(yyDollar[1].str) } - case 1515: + case 1522: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7452 +//line sql.y:7495 { yyVAL.str = string(yyDollar[1].str) } - case 1516: + case 1523: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL *Definer -//line sql.y:7457 +//line sql.y:7500 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1517: + case 1524: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Definer -//line sql.y:7461 +//line sql.y:7504 { yyLOCAL = yyDollar[3].definerUnion() } yyVAL.union = yyLOCAL - case 1518: + case 1525: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Definer -//line sql.y:7467 +//line sql.y:7510 { yyLOCAL = &Definer{ Name: string(yyDollar[1].str), } } yyVAL.union = yyLOCAL - case 1519: + case 1526: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Definer -//line sql.y:7473 +//line sql.y:7516 { yyLOCAL = &Definer{ Name: string(yyDollar[1].str), } } yyVAL.union = yyLOCAL - case 1520: + case 1527: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *Definer -//line sql.y:7479 +//line sql.y:7522 { yyLOCAL = &Definer{ Name: yyDollar[1].str, @@ -21417,369 +21623,433 @@ yydefault: } } yyVAL.union = yyLOCAL - case 1521: + case 1528: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7488 +//line sql.y:7531 { yyVAL.str = encodeSQLString(yyDollar[1].str) } - case 1522: + case 1529: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7492 +//line sql.y:7535 { yyVAL.str = formatIdentifier(yyDollar[1].str) } - case 1523: + case 1530: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7497 +//line sql.y:7540 { yyVAL.str = "" } - case 1524: + case 1531: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7501 +//line sql.y:7544 { yyVAL.str = formatAddress(yyDollar[1].str) } - case 1525: + case 1532: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL Lock -//line sql.y:7507 +//line sql.y:7550 { yyLOCAL = ForUpdateLock } yyVAL.union = yyLOCAL - case 1526: + case 1533: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Lock +//line sql.y:7554 + { + yyLOCAL = ForUpdateLockNoWait + } + yyVAL.union = yyLOCAL + case 1534: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Lock +//line sql.y:7558 + { + yyLOCAL = ForUpdateLockSkipLocked + } + yyVAL.union = yyLOCAL + case 1535: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL Lock +//line sql.y:7562 + { + yyLOCAL = ForShareLock + } + yyVAL.union = yyLOCAL + case 1536: + yyDollar = yyS[yypt-3 : yypt+1] + var yyLOCAL Lock +//line sql.y:7566 + { + yyLOCAL = ForShareLockNoWait + } + yyVAL.union = yyLOCAL + case 1537: + yyDollar = yyS[yypt-4 : yypt+1] + var yyLOCAL Lock +//line sql.y:7570 + { + yyLOCAL = ForShareLockSkipLocked + } + yyVAL.union = yyLOCAL + case 1538: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL Lock -//line sql.y:7511 +//line sql.y:7574 { yyLOCAL = ShareModeLock } yyVAL.union = yyLOCAL - case 1527: + case 1539: yyDollar = yyS[yypt-9 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7517 +//line sql.y:7580 { yyLOCAL = &SelectInto{Type: IntoOutfileS3, FileName: encodeSQLString(yyDollar[4].str), Charset: yyDollar[5].columnCharset, FormatOption: yyDollar[6].str, ExportOption: yyDollar[7].str, Manifest: yyDollar[8].str, Overwrite: yyDollar[9].str} } yyVAL.union = yyLOCAL - case 1528: + case 1540: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7521 +//line sql.y:7584 { yyLOCAL = &SelectInto{Type: IntoDumpfile, FileName: encodeSQLString(yyDollar[3].str), Charset: ColumnCharset{}, FormatOption: "", ExportOption: "", Manifest: "", Overwrite: ""} } yyVAL.union = yyLOCAL - case 1529: + case 1541: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *SelectInto -//line sql.y:7525 +//line sql.y:7588 { yyLOCAL = &SelectInto{Type: IntoOutfile, FileName: encodeSQLString(yyDollar[3].str), Charset: yyDollar[4].columnCharset, FormatOption: "", ExportOption: yyDollar[5].str, Manifest: "", Overwrite: ""} } yyVAL.union = yyLOCAL - case 1530: + case 1542: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7530 +//line sql.y:7593 { yyVAL.str = "" } - case 1531: + case 1543: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7534 +//line sql.y:7597 { yyVAL.str = " format csv" + yyDollar[3].str } - case 1532: + case 1544: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7538 +//line sql.y:7601 { yyVAL.str = " format text" + yyDollar[3].str } - case 1533: + case 1545: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7543 +//line sql.y:7606 { yyVAL.str = "" } - case 1534: + case 1546: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7547 +//line sql.y:7610 { yyVAL.str = " header" } - case 1535: + case 1547: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7552 +//line sql.y:7615 { yyVAL.str = "" } - case 1536: + case 1548: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7556 +//line sql.y:7619 { yyVAL.str = " manifest on" } - case 1537: + case 1549: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7560 +//line sql.y:7623 { yyVAL.str = " manifest off" } - case 1538: + case 1550: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7565 +//line sql.y:7628 { yyVAL.str = "" } - case 1539: + case 1551: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7569 +//line sql.y:7632 { yyVAL.str = " overwrite on" } - case 1540: + case 1552: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7573 +//line sql.y:7636 { yyVAL.str = " overwrite off" } - case 1541: + case 1553: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7579 +//line sql.y:7642 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1542: + case 1554: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7584 +//line sql.y:7647 { yyVAL.str = "" } - case 1543: + case 1555: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7588 +//line sql.y:7651 { yyVAL.str = " lines" + yyDollar[2].str } - case 1544: + case 1556: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7594 +//line sql.y:7657 { yyVAL.str = yyDollar[1].str } - case 1545: + case 1557: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7598 +//line sql.y:7661 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1546: + case 1558: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7604 +//line sql.y:7667 { yyVAL.str = " starting by " + encodeSQLString(yyDollar[3].str) } - case 1547: + case 1559: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7608 +//line sql.y:7671 { yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str) } - case 1548: + case 1560: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7613 +//line sql.y:7676 { yyVAL.str = "" } - case 1549: + case 1561: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7617 +//line sql.y:7680 { yyVAL.str = " " + yyDollar[1].str + yyDollar[2].str } - case 1550: + case 1562: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7623 +//line sql.y:7686 { yyVAL.str = yyDollar[1].str } - case 1551: + case 1563: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7627 +//line sql.y:7690 { yyVAL.str = yyDollar[1].str + yyDollar[2].str } - case 1552: + case 1564: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7633 +//line sql.y:7696 { yyVAL.str = " terminated by " + encodeSQLString(yyDollar[3].str) } - case 1553: + case 1565: yyDollar = yyS[yypt-4 : yypt+1] -//line sql.y:7637 +//line sql.y:7700 { yyVAL.str = yyDollar[1].str + " enclosed by " + encodeSQLString(yyDollar[4].str) } - case 1554: + case 1566: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7641 +//line sql.y:7704 { yyVAL.str = " escaped by " + encodeSQLString(yyDollar[3].str) } - case 1555: + case 1567: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7646 +//line sql.y:7709 { yyVAL.str = "" } - case 1556: + case 1568: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7650 +//line sql.y:7713 { yyVAL.str = " optionally" } - case 1557: - yyDollar = yyS[yypt-2 : yypt+1] + case 1569: + yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *Insert -//line sql.y:7663 +//line sql.y:7726 { - yyLOCAL = &Insert{Rows: yyDollar[2].valuesUnion()} + yyLOCAL = &Insert{Rows: yyDollar[2].valuesUnion(), RowAlias: yyDollar[3].rowAliasUnion()} } yyVAL.union = yyLOCAL - case 1558: + case 1570: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL *Insert -//line sql.y:7667 +//line sql.y:7730 { yyLOCAL = &Insert{Rows: yyDollar[1].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1559: - yyDollar = yyS[yypt-5 : yypt+1] + case 1571: + yyDollar = yyS[yypt-6 : yypt+1] var yyLOCAL *Insert -//line sql.y:7671 +//line sql.y:7734 { - yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[5].valuesUnion()} + yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[5].valuesUnion(), RowAlias: yyDollar[6].rowAliasUnion()} } yyVAL.union = yyLOCAL - case 1560: - yyDollar = yyS[yypt-4 : yypt+1] + case 1572: + yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL *Insert -//line sql.y:7675 +//line sql.y:7738 { - yyLOCAL = &Insert{Columns: []IdentifierCI{}, Rows: yyDollar[4].valuesUnion()} + yyLOCAL = &Insert{Columns: []IdentifierCI{}, Rows: yyDollar[4].valuesUnion(), RowAlias: yyDollar[5].rowAliasUnion()} } yyVAL.union = yyLOCAL - case 1561: + case 1573: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL *Insert -//line sql.y:7679 +//line sql.y:7742 { yyLOCAL = &Insert{Columns: yyDollar[2].columnsUnion(), Rows: yyDollar[4].selStmtUnion()} } yyVAL.union = yyLOCAL - case 1562: + case 1574: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Columns -//line sql.y:7685 +//line sql.y:7748 { yyLOCAL = Columns{yyDollar[1].identifierCI} } yyVAL.union = yyLOCAL - case 1563: + case 1575: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Columns -//line sql.y:7689 +//line sql.y:7752 { yyLOCAL = Columns{yyDollar[3].identifierCI} } yyVAL.union = yyLOCAL - case 1564: + case 1576: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7693 +//line sql.y:7756 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].identifierCI) } - case 1565: + case 1577: yyDollar = yyS[yypt-5 : yypt+1] -//line sql.y:7697 +//line sql.y:7760 { yySLICE := (*Columns)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[5].identifierCI) } - case 1566: + case 1578: + yyDollar = yyS[yypt-0 : yypt+1] + var yyLOCAL *RowAlias +//line sql.y:7765 + { + yyLOCAL = nil + } + yyVAL.union = yyLOCAL + case 1579: + yyDollar = yyS[yypt-2 : yypt+1] + var yyLOCAL *RowAlias +//line sql.y:7769 + { + yyLOCAL = &RowAlias{TableName: yyDollar[2].identifierCS} + } + yyVAL.union = yyLOCAL + case 1580: + yyDollar = yyS[yypt-5 : yypt+1] + var yyLOCAL *RowAlias +//line sql.y:7773 + { + yyLOCAL = &RowAlias{TableName: yyDollar[2].identifierCS, Columns: yyDollar[4].columnsUnion()} + } + yyVAL.union = yyLOCAL + case 1581: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7702 +//line sql.y:7778 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1567: + case 1582: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7706 +//line sql.y:7782 { yyLOCAL = yyDollar[5].updateExprsUnion() } yyVAL.union = yyLOCAL - case 1568: + case 1583: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Values -//line sql.y:7712 +//line sql.y:7788 { yyLOCAL = Values{yyDollar[1].valTupleUnion()} } yyVAL.union = yyLOCAL - case 1569: + case 1584: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7716 +//line sql.y:7792 { yySLICE := (*Values)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].valTupleUnion()) } - case 1570: + case 1585: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7722 +//line sql.y:7798 { yyLOCAL = yyDollar[1].valTupleUnion() } yyVAL.union = yyLOCAL - case 1571: + case 1586: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7726 +//line sql.y:7802 { yyLOCAL = ValTuple{} } yyVAL.union = yyLOCAL - case 1572: + case 1587: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7732 +//line sql.y:7808 { yyLOCAL = ValTuple(yyDollar[2].exprsUnion()) } yyVAL.union = yyLOCAL - case 1573: + case 1588: yyDollar = yyS[yypt-4 : yypt+1] var yyLOCAL ValTuple -//line sql.y:7736 +//line sql.y:7812 { yyLOCAL = ValTuple(yyDollar[3].exprsUnion()) } yyVAL.union = yyLOCAL - case 1574: + case 1589: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7741 +//line sql.y:7817 { if len(yyDollar[1].valTupleUnion()) == 1 { yyLOCAL = yyDollar[1].valTupleUnion()[0] @@ -21788,300 +22058,300 @@ yydefault: } } yyVAL.union = yyLOCAL - case 1575: + case 1590: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL UpdateExprs -//line sql.y:7751 +//line sql.y:7827 { yyLOCAL = UpdateExprs{yyDollar[1].updateExprUnion()} } yyVAL.union = yyLOCAL - case 1576: + case 1591: yyDollar = yyS[yypt-3 : yypt+1] -//line sql.y:7755 +//line sql.y:7831 { yySLICE := (*UpdateExprs)(yyIaddr(yyVAL.union)) *yySLICE = append(*yySLICE, yyDollar[3].updateExprUnion()) } - case 1577: + case 1592: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL *UpdateExpr -//line sql.y:7761 +//line sql.y:7837 { yyLOCAL = &UpdateExpr{Name: yyDollar[1].colNameUnion(), Expr: yyDollar[3].exprUnion()} } yyVAL.union = yyLOCAL - case 1579: + case 1594: yyDollar = yyS[yypt-2 : yypt+1] -//line sql.y:7768 +//line sql.y:7844 { yyVAL.str = "charset" } - case 1582: + case 1597: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7778 +//line sql.y:7854 { yyLOCAL = NewStrLiteral(yyDollar[1].identifierCI.String()) } yyVAL.union = yyLOCAL - case 1583: + case 1598: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7782 +//line sql.y:7858 { yyLOCAL = NewStrLiteral(yyDollar[1].str) } yyVAL.union = yyLOCAL - case 1584: + case 1599: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Expr -//line sql.y:7786 +//line sql.y:7862 { yyLOCAL = &Default{} } yyVAL.union = yyLOCAL - case 1587: + case 1602: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7795 +//line sql.y:7871 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1588: + case 1603: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL bool -//line sql.y:7797 +//line sql.y:7873 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1589: + case 1604: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7800 +//line sql.y:7876 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1590: + case 1605: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL bool -//line sql.y:7802 +//line sql.y:7878 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1591: + case 1606: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL bool -//line sql.y:7805 +//line sql.y:7881 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1592: + case 1607: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL bool -//line sql.y:7807 +//line sql.y:7883 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1593: + case 1608: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Ignore -//line sql.y:7810 +//line sql.y:7886 { yyLOCAL = false } yyVAL.union = yyLOCAL - case 1594: + case 1609: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Ignore -//line sql.y:7812 +//line sql.y:7888 { yyLOCAL = true } yyVAL.union = yyLOCAL - case 1595: + case 1610: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7815 +//line sql.y:7891 { yyVAL.empty = struct{}{} } - case 1596: + case 1611: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7817 +//line sql.y:7893 { yyVAL.empty = struct{}{} } - case 1597: + case 1612: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7819 +//line sql.y:7895 { yyVAL.empty = struct{}{} } - case 1598: + case 1613: yyDollar = yyS[yypt-5 : yypt+1] var yyLOCAL Statement -//line sql.y:7823 +//line sql.y:7899 { yyLOCAL = &CallProc{Name: yyDollar[2].tableName, Params: yyDollar[4].exprsUnion()} } yyVAL.union = yyLOCAL - case 1599: + case 1614: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL Exprs -//line sql.y:7828 +//line sql.y:7904 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1600: + case 1615: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL Exprs -//line sql.y:7832 +//line sql.y:7908 { yyLOCAL = yyDollar[1].exprsUnion() } yyVAL.union = yyLOCAL - case 1601: + case 1616: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:7837 +//line sql.y:7913 { yyLOCAL = nil } yyVAL.union = yyLOCAL - case 1602: + case 1617: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL []*IndexOption -//line sql.y:7839 +//line sql.y:7915 { yyLOCAL = []*IndexOption{yyDollar[1].indexOptionUnion()} } yyVAL.union = yyLOCAL - case 1603: + case 1618: yyDollar = yyS[yypt-2 : yypt+1] var yyLOCAL *IndexOption -//line sql.y:7843 +//line sql.y:7919 { yyLOCAL = &IndexOption{Name: string(yyDollar[1].str), String: string(yyDollar[2].identifierCI.String())} } yyVAL.union = yyLOCAL - case 1604: + case 1619: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7849 +//line sql.y:7925 { yyVAL.identifierCI = yyDollar[1].identifierCI } - case 1605: + case 1620: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7853 +//line sql.y:7929 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 1607: + case 1622: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7860 +//line sql.y:7936 { yyVAL.identifierCI = NewIdentifierCI(string(yyDollar[1].str)) } - case 1608: + case 1623: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7866 +//line sql.y:7942 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 1609: + case 1624: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7870 +//line sql.y:7946 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 1610: + case 1625: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:7876 +//line sql.y:7952 { yyVAL.identifierCS = NewIdentifierCS("") } - case 1611: + case 1626: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7880 +//line sql.y:7956 { yyVAL.identifierCS = yyDollar[1].identifierCS } - case 1613: + case 1628: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:7887 +//line sql.y:7963 { yyVAL.identifierCS = NewIdentifierCS(string(yyDollar[1].str)) } - case 1614: + case 1629: yyDollar = yyS[yypt-3 : yypt+1] var yyLOCAL Statement -//line sql.y:7893 +//line sql.y:7969 { yyLOCAL = &Kill{Type: yyDollar[2].killTypeUnion(), ProcesslistID: convertStringToUInt64(yyDollar[3].str)} } yyVAL.union = yyLOCAL - case 1615: + case 1630: yyDollar = yyS[yypt-0 : yypt+1] var yyLOCAL KillType -//line sql.y:7899 +//line sql.y:7975 { yyLOCAL = ConnectionType } yyVAL.union = yyLOCAL - case 1616: + case 1631: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL KillType -//line sql.y:7903 +//line sql.y:7979 { yyLOCAL = ConnectionType } yyVAL.union = yyLOCAL - case 1617: + case 1632: yyDollar = yyS[yypt-1 : yypt+1] var yyLOCAL KillType -//line sql.y:7907 +//line sql.y:7983 { yyLOCAL = QueryType } yyVAL.union = yyLOCAL - case 2232: + case 2251: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:8550 +//line sql.y:8630 { } - case 2233: + case 2252: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:8555 +//line sql.y:8635 { } - case 2234: + case 2253: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:8559 +//line sql.y:8639 { skipToEnd(yylex) } - case 2235: + case 2254: yyDollar = yyS[yypt-0 : yypt+1] -//line sql.y:8564 +//line sql.y:8644 { skipToEnd(yylex) } - case 2236: + case 2255: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:8568 +//line sql.y:8648 { skipToEnd(yylex) } - case 2237: + case 2256: yyDollar = yyS[yypt-1 : yypt+1] -//line sql.y:8572 +//line sql.y:8652 { skipToEnd(yylex) } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index fcb481725e9..98ac245f4ad 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -16,6 +16,8 @@ limitations under the License. %{ package sqlparser +import "vitess.io/vitess/go/ptr" + func setParseTree(yylex yyLexer, stmt Statement) { yylex.(*Tokenizer).ParseTree = stmt } @@ -82,6 +84,7 @@ func markBindVariable(yylex yyLexer, bvar string) { ctes []*CommonTableExpr order *Order limit *Limit + rowAlias *RowAlias updateExpr *UpdateExpr setExpr *SetExpr @@ -159,6 +162,7 @@ func markBindVariable(yylex yyLexer, bvar string) { selectExprs SelectExprs tableOptions TableOptions starExpr StarExpr + groupBy *GroupBy colKeyOpt ColumnKeyOption referenceAction ReferenceAction @@ -191,6 +195,7 @@ func markBindVariable(yylex yyLexer, bvar string) { partitionByType PartitionByType definer *Definer integer int + intPtr *int JSONTableExpr *JSONTableExpr jtColumnDefinition *JtColumnDefinition @@ -202,6 +207,15 @@ func markBindVariable(yylex yyLexer, bvar string) { // These precedence rules are there to handle shift-reduce conflicts. %nonassoc MEMBER +// MULTIPLE_TEXT_LITERAL is used to resolve shift-reduce conflicts occuring due to multiple STRING symbols occuring one after the other. +// According to the ANSI standard, these strings should be concatenated together. +// The shift-reduce conflict occurrs because after seeing a STRING, if we see another one, then we can either shift to concatenate them or +// reduce the STRING into a text_literal, eventually into a simple_expr and use the coming string as an alias. +// The way to fix this conflict is to give shifting higher precedence than reducing. +// Adding no precedence also works, since shifting is the default, but it reports a conflict which we can avoid by adding this precedence rule. +// In order to ensure lower precedence of reduction, this rule has to come before the precedence declaration of STRING. +// This precedence should not be used anywhere else other than with rules where text_literal is being reduced. +%nonassoc MULTIPLE_TEXT_LITERAL // FUNCTION_CALL_NON_KEYWORD is used to resolve shift-reduce conflicts occuring due to function_call_generic symbol and // having special parsing for functions whose names are non-reserved keywords. The shift-reduce conflict occurrs because // after seeing a non-reserved keyword, if we see '(', then we can either shift to use the special parsing grammar rule or @@ -244,7 +258,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %token VALUE_ARG LIST_ARG OFFSET_ARG %token JSON_PRETTY JSON_STORAGE_SIZE JSON_STORAGE_FREE JSON_CONTAINS JSON_CONTAINS_PATH JSON_EXTRACT JSON_KEYS JSON_OVERLAPS JSON_SEARCH JSON_VALUE %token EXTRACT -%token NULL TRUE FALSE OFF +%token NULL UNKNOWN TRUE FALSE OFF %token DISCARD IMPORT ENABLE DISABLE TABLESPACE %token VIRTUAL STORED %token BOTH LEADING TRAILING @@ -303,7 +317,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %token SEQUENCE MERGE TEMPORARY TEMPTABLE INVOKER SECURITY FIRST AFTER LAST // Migration tokens -%token VITESS_MIGRATION CANCEL RETRY LAUNCH COMPLETE CLEANUP THROTTLE UNTHROTTLE EXPIRE RATIO +%token VITESS_MIGRATION CANCEL RETRY LAUNCH COMPLETE CLEANUP THROTTLE UNTHROTTLE FORCE_CUTOVER EXPIRE RATIO // Throttler tokens %token VITESS_THROTTLER @@ -343,7 +357,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %token SQL_TSI_DAY SQL_TSI_WEEK SQL_TSI_HOUR SQL_TSI_MINUTE SQL_TSI_MONTH SQL_TSI_QUARTER SQL_TSI_SECOND SQL_TSI_MICROSECOND SQL_TSI_YEAR %token REPLACE %token CONVERT CAST -%token SUBSTR SUBSTRING +%token SUBSTR SUBSTRING MID %token SEPARATOR %token TIMESTAMPADD TIMESTAMPDIFF %token WEIGHT_STRING @@ -362,7 +376,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %token ST_Area ST_Centroid ST_ExteriorRing ST_InteriorRingN ST_NumInteriorRings ST_NumGeometries ST_GeometryN ST_LongFromGeoHash ST_PointFromGeoHash ST_LatFromGeoHash ST_GeoHash ST_AsGeoJSON ST_GeomFromGeoJSON // Match -%token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION WITHOUT VALIDATION +%token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION WITHOUT VALIDATION ROLLUP // MySQL reserved words that are unused by this grammar will map to this token. %token UNUSED ARRAY BYTE CUME_DIST DESCRIPTION DENSE_RANK EMPTY EXCEPT FIRST_VALUE GROUPING GROUPS JSON_TABLE LAG LAST_VALUE LATERAL LEAD @@ -435,6 +449,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %type frame_clause frame_clause_opt %type window_spec %type over_clause +%type over_clause_opt %type null_treatment_type %type null_treatment_clause null_treatment_clause_opt %type from_first_last_type @@ -457,14 +472,14 @@ func markBindVariable(yylex yyLexer, bvar string) { %type cache_opt separator_opt flush_option for_channel_opt maxvalue %type match_option %type distinct_opt union_op replace_opt local_opt -%type select_expression_list select_expression_list_opt +%type select_expression_list %type select_expression -%type select_options flush_option_list +%type select_options select_options_opt flush_option_list %type select_option algorithm_view security_view security_view_opt %type generated_always_opt user_username address_opt %type definer_opt user %type expression signed_literal signed_literal_or_null null_as_literal now_or_signed_literal signed_literal bit_expr regular_expressions xml_expressions -%type simple_expr literal NUM_literal text_literal text_literal_or_arg bool_pri literal_or_null now predicate tuple_expression null_int_variable_arg performance_schema_function_expressions gtid_function_expressions +%type simple_expr literal NUM_literal text_start text_literal text_literal_or_arg bool_pri literal_or_null now predicate tuple_expression null_int_variable_arg performance_schema_function_expressions gtid_function_expressions %type from_opt table_references from_clause %type table_reference table_factor join_table json_table_function %type jt_column @@ -497,7 +512,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %type when_expression_list %type when_expression %type expression_opt else_expression_opt default_with_comma_opt -%type group_by_opt +%type group_by_opt %type having_opt %type order_by_opt order_list order_by_clause %type order @@ -529,15 +544,17 @@ func markBindVariable(yylex yyLexer, bvar string) { %type sql_id sql_id_opt reserved_sql_id col_alias as_ci_opt %type charset_value %type table_id reserved_table_id table_alias as_opt_id table_id_opt from_database_opt use_table_name +%type row_alias_opt %type as_opt work_opt savepoint_opt %type skip_to_end ddl_skip_to_end %type charset %type set_session_or_global %type convert_type returning_type_opt convert_type_weight_string -%type array_opt +%type array_opt rollup_opt %type column_type %type int_type decimal_type numeric_type time_type char_type spatial_type -%type length_opt partition_comment partition_data_directory partition_index_directory +%type partition_comment partition_data_directory partition_index_directory +%type length_opt %type func_datetime_precision %type charset_opt %type collate_opt @@ -572,7 +589,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %type vindex_param_list vindex_params_opt %type json_object_param %type json_object_param_list json_object_param_opt -%type ci_identifier ci_identifier_opt vindex_type vindex_type_opt +%type ci_identifier vindex_type vindex_type_opt %type database_or_schema column_opt insert_method_options row_format_options %type fk_reference_action fk_on_delete fk_on_update %type fk_match fk_match_opt fk_match_action @@ -585,7 +602,7 @@ func markBindVariable(yylex yyLexer, bvar string) { %type keys %type reference_definition reference_definition_opt %type underscore_charsets -%type expire_opt +%type expire_opt null_or_unknown %type ratio_opt %type tx_chacteristics_opt tx_chars %type tx_char @@ -669,15 +686,6 @@ ci_identifier: $$ = NewIdentifierCI(string($1)) } -ci_identifier_opt: - { - $$ = NewIdentifierCI("") - } -| ci_identifier - { - $$ = $1 - } - variable_expr: AT_ID { @@ -703,11 +711,11 @@ load_statement: with_clause: WITH with_list { - $$ = &With{CTEs: $2, Recursive: false} + $$ = &With{CTEs: $2, Recursive: false} } | WITH RECURSIVE with_list { - $$ = &With{CTEs: $3, Recursive: true} + $$ = &With{CTEs: $3, Recursive: true} } with_clause_opt: @@ -716,33 +724,33 @@ with_clause_opt: } | with_clause { - $$ = $1 + $$ = $1 } with_list: with_list ',' common_table_expr { - $$ = append($1, $3) + $$ = append($1, $3) } | common_table_expr { - $$ = []*CommonTableExpr{$1} + $$ = []*CommonTableExpr{$1} } common_table_expr: table_id column_list_opt AS subquery { - $$ = &CommonTableExpr{ID: $1, Columns: $2, Subquery: $4} + $$ = &CommonTableExpr{ID: $1, Columns: $2, Subquery: $4} } query_expression_parens: openb query_expression_parens closeb { - $$ = $2 + $$ = $2 } | openb query_expression closeb { - $$ = $2 + $$ = $2 } | openb query_expression locking_clause closeb { @@ -769,117 +777,117 @@ query_expression_parens: query_expression: query_expression_body order_by_opt limit_opt { - $1.SetOrderBy($2) - $1.SetLimit($3) - $$ = $1 + $1.SetOrderBy($2) + $1.SetLimit($3) + $$ = $1 } | query_expression_parens limit_clause { - $1.SetLimit($2) - $$ = $1 + $1.SetLimit($2) + $$ = $1 } | query_expression_parens order_by_clause limit_opt { - $1.SetOrderBy($2) - $1.SetLimit($3) - $$ = $1 + $1.SetOrderBy($2) + $1.SetLimit($3) + $$ = $1 } | with_clause query_expression_body order_by_opt limit_opt { - $2.SetWith($1) - $2.SetOrderBy($3) - $2.SetLimit($4) - $$ = $2 + $2.SetWith($1) + $2.SetOrderBy($3) + $2.SetLimit($4) + $$ = $2 } | with_clause query_expression_parens limit_clause { - $2.SetWith($1) - $2.SetLimit($3) - $$ = $2 + $2.SetWith($1) + $2.SetLimit($3) + $$ = $2 } | with_clause query_expression_parens order_by_clause limit_opt { - $2.SetWith($1) - $2.SetOrderBy($3) - $2.SetLimit($4) - $$ = $2 + $2.SetWith($1) + $2.SetOrderBy($3) + $2.SetLimit($4) + $$ = $2 } | with_clause query_expression_parens { - $2.SetWith($1) + $2.SetWith($1) } | SELECT comment_opt cache_opt NEXT num_val for_from table_name { - $$ = NewSelect(Comments($2), SelectExprs{&Nextval{Expr: $5}}, []string{$3}/*options*/, nil, TableExprs{&AliasedTableExpr{Expr: $7}}, nil/*where*/, nil/*groupBy*/, nil/*having*/, nil) + $$ = NewSelect(Comments($2), SelectExprs{&Nextval{Expr: $5}}, []string{$3}/*options*/, nil, TableExprs{&AliasedTableExpr{Expr: $7}}, nil/*where*/, nil/*groupBy*/, nil/*having*/, nil) } query_expression_body: query_primary { - $$ = $1 + $$ = $1 } | query_expression_body union_op query_primary { - $$ = &Union{Left: $1, Distinct: $2, Right: $3} + $$ = &Union{Left: $1, Distinct: $2, Right: $3} } | query_expression_parens union_op query_primary { - $$ = &Union{Left: $1, Distinct: $2, Right: $3} + $$ = &Union{Left: $1, Distinct: $2, Right: $3} } | query_expression_body union_op query_expression_parens { - $$ = &Union{Left: $1, Distinct: $2, Right: $3} + $$ = &Union{Left: $1, Distinct: $2, Right: $3} } | query_expression_parens union_op query_expression_parens { - $$ = &Union{Left: $1, Distinct: $2, Right: $3} + $$ = &Union{Left: $1, Distinct: $2, Right: $3} } select_statement: query_expression { - $$ = $1 + $$ = $1 } | query_expression locking_clause { setLockInSelect($1, $2) - $$ = $1 + $$ = $1 } | query_expression_parens { - $$ = $1 + $$ = $1 } | select_stmt_with_into { - $$ = $1 + $$ = $1 } select_stmt_with_into: openb select_stmt_with_into closeb { - $$ = $2; + $$ = $2 } | query_expression into_clause { - $1.SetInto($2) - $$ = $1 + $1.SetInto($2) + $$ = $1 } | query_expression into_clause locking_clause { - $1.SetInto($2) - $1.SetLock($3) - $$ = $1 + $1.SetInto($2) + $1.SetLock($3) + $$ = $1 } | query_expression locking_clause into_clause { - $1.SetInto($3) - $1.SetLock($2) - $$ = $1 + $1.SetInto($3) + $1.SetLock($2) + $$ = $1 } | query_expression_parens into_clause { - $1.SetInto($2) - $$ = $1 + $1.SetInto($2) + $$ = $1 } stream_statement: @@ -897,13 +905,13 @@ vstream_statement: // query_primary is an unparenthesized SELECT with no order by clause or beyond. query_primary: // 1 2 3 4 5 6 7 8 9 10 - SELECT comment_opt select_options select_expression_list into_clause from_opt where_expression_opt group_by_opt having_opt named_windows_list_opt + SELECT comment_opt select_options_opt select_expression_list into_clause from_opt where_expression_opt group_by_opt having_opt named_windows_list_opt { - $$ = NewSelect(Comments($2), $4/*SelectExprs*/, $3/*options*/, $5/*into*/, $6/*from*/, NewWhere(WhereClause, $7), GroupBy($8), NewWhere(HavingClause, $9), $10) + $$ = NewSelect(Comments($2), $4/*SelectExprs*/, $3/*options*/, $5/*into*/, $6/*from*/, NewWhere(WhereClause, $7), $8, NewWhere(HavingClause, $9), $10) } -| SELECT comment_opt select_options select_expression_list from_opt where_expression_opt group_by_opt having_opt named_windows_list_opt +| SELECT comment_opt select_options_opt select_expression_list from_opt where_expression_opt group_by_opt having_opt named_windows_list_opt { - $$ = NewSelect(Comments($2), $4/*SelectExprs*/, $3/*options*/, nil, $5/*from*/, NewWhere(WhereClause, $6), GroupBy($7), NewWhere(HavingClause, $8), $9) + $$ = NewSelect(Comments($2), $4/*SelectExprs*/, $3/*options*/, nil, $5/*from*/, NewWhere(WhereClause, $6), $7, NewWhere(HavingClause, $8), $9) } insert_statement: @@ -1004,7 +1012,7 @@ opt_partition_clause: } | PARTITION openb partition_list closeb { - $$ = $3 + $$ = $3 } set_statement: @@ -1246,22 +1254,22 @@ alter_table_prefix: } create_index_prefix: - CREATE comment_opt INDEX ci_identifier using_opt ON table_name + CREATE comment_opt INDEX sql_id using_opt ON table_name { $$ = &AlterTable{Table: $7, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition:&IndexDefinition{Info: &IndexInfo{Name:$4}, Options:$5}}}} setDDL(yylex, $$) } -| CREATE comment_opt FULLTEXT INDEX ci_identifier using_opt ON table_name +| CREATE comment_opt FULLTEXT INDEX sql_id using_opt ON table_name { $$ = &AlterTable{Table: $8, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition:&IndexDefinition{Info: &IndexInfo{Name:$5, Type: IndexTypeFullText}, Options:$6}}}} setDDL(yylex, $$) } -| CREATE comment_opt SPATIAL INDEX ci_identifier using_opt ON table_name +| CREATE comment_opt SPATIAL INDEX sql_id using_opt ON table_name { $$ = &AlterTable{Table: $8, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition:&IndexDefinition{Info: &IndexInfo{Name:$5, Type: IndexTypeSpatial}, Options:$6}}}} setDDL(yylex, $$) } -| CREATE comment_opt UNIQUE INDEX ci_identifier using_opt ON table_name +| CREATE comment_opt UNIQUE INDEX sql_id using_opt ON table_name { $$ = &AlterTable{Table: $8, AlterOptions: []AlterOption{&AddIndexDefinition{IndexDefinition:&IndexDefinition{Info: &IndexInfo{Name:$5, Type: IndexTypeUnique}, Options:$6}}}} setDDL(yylex, $$) @@ -1431,7 +1439,7 @@ column_definition: { $2.Options = $4 if $2.Options.Collate == "" { - $2.Options.Collate = $3 + $2.Options.Collate = $3 } $2.Options.Reference = $5 $$ = &ColumnDefinition{Name: $1, Type: $2} @@ -1464,20 +1472,18 @@ column_attribute_list_opt: } | column_attribute_list_opt NULL { - val := true - $1.Null = &val + $1.Null = ptr.Of(true) $$ = $1 } | column_attribute_list_opt NOT NULL { - val := false - $1.Null = &val + $1.Null = ptr.Of(false) $$ = $1 } | column_attribute_list_opt DEFAULT openb expression closeb { - $1.Default = $4 - $$ = $1 + $1.Default = $4 + $$ = $1 } | column_attribute_list_opt DEFAULT now_or_signed_literal { @@ -1525,14 +1531,12 @@ column_attribute_list_opt: } | column_attribute_list_opt VISIBLE { - val := false - $1.Invisible = &val + $1.Invisible = ptr.Of(false) $$ = $1 } | column_attribute_list_opt INVISIBLE { - val := true - $1.Invisible = &val + $1.Invisible = ptr.Of(true) $$ = $1 } | column_attribute_list_opt ENGINE_ATTRIBUTE equal_opt STRING @@ -1547,25 +1551,25 @@ column_attribute_list_opt: column_format: FIXED { - $$ = FixedFormat + $$ = FixedFormat } | DYNAMIC { - $$ = DynamicFormat + $$ = DynamicFormat } | DEFAULT { - $$ = DefaultFormat + $$ = DefaultFormat } column_storage: VIRTUAL { - $$ = VirtualStorage + $$ = VirtualStorage } | STORED { - $$ = StoredStorage + $$ = StoredStorage } generated_column_attribute_list_opt: @@ -1579,14 +1583,12 @@ generated_column_attribute_list_opt: } | generated_column_attribute_list_opt NULL { - val := true - $1.Null = &val + $1.Null = ptr.Of(true) $$ = $1 } | generated_column_attribute_list_opt NOT NULL { - val := false - $1.Null = &val + $1.Null = ptr.Of(false) $$ = $1 } | generated_column_attribute_list_opt COMMENT_KEYWORD STRING @@ -1601,21 +1603,19 @@ generated_column_attribute_list_opt: } | generated_column_attribute_list_opt VISIBLE { - val := false - $1.Invisible = &val + $1.Invisible = ptr.Of(false) $$ = $1 } | generated_column_attribute_list_opt INVISIBLE { - val := true - $1.Invisible = &val + $1.Invisible = ptr.Of(true) $$ = $1 } now_or_signed_literal: now { - $$ = $1 + $$ = $1 } | signed_literal_or_null @@ -1659,41 +1659,41 @@ NULL literal | '+' NUM_literal { - $$= $2 + $$= $2 } | '-' NUM_literal { - $$ = &UnaryExpr{Operator: UMinusOp, Expr: $2} + $$ = &UnaryExpr{Operator: UMinusOp, Expr: $2} } literal: -text_literal +text_literal %prec MULTIPLE_TEXT_LITERAL { - $$= $1 + $$= $1 } | NUM_literal { - $$= $1 + $$= $1 } | boolean_value { - $$ = $1 + $$ = $1 } | HEX { - $$ = NewHexLiteral($1) + $$ = NewHexLiteral($1) } | HEXNUM { - $$ = NewHexNumLiteral($1) + $$ = NewHexNumLiteral($1) } | BITNUM { - $$ = NewBitLiteral($1) + $$ = NewBitLiteral($1) } | BIT_LITERAL { - $$ = NewBitLiteral("0b" + $1) + $$ = NewBitLiteral("0b" + $1) } | VALUE_ARG { @@ -1701,19 +1701,19 @@ text_literal } | underscore_charsets BIT_LITERAL %prec UNARY { - $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewBitLiteral("0b" + $2)} + $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewBitLiteral("0b" + $2)} } | underscore_charsets HEXNUM %prec UNARY { - $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewHexNumLiteral($2)} + $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewHexNumLiteral($2)} } | underscore_charsets BITNUM %prec UNARY { - $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewBitLiteral($2)} + $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewBitLiteral($2)} } | underscore_charsets HEX %prec UNARY { - $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewHexLiteral($2)} + $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewHexLiteral($2)} } | underscore_charsets column_name_or_offset %prec UNARY { @@ -1726,15 +1726,15 @@ text_literal } | DATE STRING { - $$ = NewDateLiteral($2) + $$ = NewDateLiteral($2) } | TIME STRING { - $$ = NewTimeLiteral($2) + $$ = NewTimeLiteral($2) } | TIMESTAMP STRING { - $$ = NewTimestampLiteral($2) + $$ = NewTimestampLiteral($2) } underscore_charsets: @@ -1926,21 +1926,31 @@ INTEGRAL } text_literal: +text_start + { + $$ = $1 + } +| text_literal STRING + { + $$ = AppendString($1, $2) + } + +text_start: STRING { - $$ = NewStrLiteral($1) + $$ = NewStrLiteral($1) } | NCHAR_STRING { - $$ = &UnaryExpr{Operator: NStringOp, Expr: NewStrLiteral($1)} + $$ = &UnaryExpr{Operator: NStringOp, Expr: NewStrLiteral($1)} } | underscore_charsets STRING %prec UNARY { - $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewStrLiteral($2)} + $$ = &IntroducerExpr{CharacterSet: $1, Expr: NewStrLiteral($2)} } text_literal_or_arg: - text_literal + text_literal %prec MULTIPLE_TEXT_LITERAL { $$ = $1 } @@ -2213,7 +2223,7 @@ length_opt: } | '(' INTEGRAL ')' { - $$ = NewIntLiteral($2) + $$ = ptr.Of(convertStringToInt($2)) } double_length_opt: @@ -2223,8 +2233,8 @@ double_length_opt: | '(' INTEGRAL ',' INTEGRAL ')' { $$ = LengthScaleOption{ - Length: NewIntLiteral($2), - Scale: NewIntLiteral($4), + Length: ptr.Of(convertStringToInt($2)), + Scale: ptr.Of(convertStringToInt($4)), } } @@ -2236,7 +2246,7 @@ double_length_opt | '(' INTEGRAL ')' { $$ = LengthScaleOption{ - Length: NewIntLiteral($2), + Length: ptr.Of(convertStringToInt($2)), } } @@ -2247,14 +2257,14 @@ decimal_length_opt: | '(' INTEGRAL ')' { $$ = LengthScaleOption{ - Length: NewIntLiteral($2), + Length: ptr.Of(convertStringToInt($2)), } } | '(' INTEGRAL ',' INTEGRAL ')' { $$ = LengthScaleOption{ - Length: NewIntLiteral($2), - Scale: NewIntLiteral($4), + Length: ptr.Of(convertStringToInt($2)), + Scale: ptr.Of(convertStringToInt($4)), } } @@ -2492,7 +2502,7 @@ name_opt: { $$ = "" } -| ci_identifier +| sql_id { $$ = string($1.String()) } @@ -2518,7 +2528,7 @@ index_column: } constraint_definition: - CONSTRAINT ci_identifier_opt constraint_info + CONSTRAINT sql_id_opt constraint_info { $$ = &ConstraintDefinition{Name: $2, Details: $3} } @@ -2528,7 +2538,7 @@ constraint_definition: } check_constraint_definition: - CONSTRAINT ci_identifier_opt check_constraint_info + CONSTRAINT sql_id_opt check_constraint_info { $$ = &ConstraintDefinition{Name: $2, Details: $3} } @@ -2998,27 +3008,25 @@ alter_option: } | ALTER column_opt column_name SET DEFAULT openb expression closeb { - $$ = &AlterColumn{Column: $3, DropDefault:false, DefaultVal:$7} + $$ = &AlterColumn{Column: $3, DropDefault:false, DefaultVal:$7} } | ALTER column_opt column_name SET VISIBLE { - val := false - $$ = &AlterColumn{Column: $3, Invisible:&val} + $$ = &AlterColumn{Column: $3, Invisible: ptr.Of(false)} } | ALTER column_opt column_name SET INVISIBLE { - val := true - $$ = &AlterColumn{Column: $3, Invisible:&val} + $$ = &AlterColumn{Column: $3, Invisible: ptr.Of(true)} } -| ALTER CHECK ci_identifier enforced +| ALTER CHECK sql_id enforced { $$ = &AlterCheck{Name: $3, Enforced: $4} } -| ALTER INDEX ci_identifier VISIBLE +| ALTER INDEX sql_id VISIBLE { $$ = &AlterIndex{Name: $3, Invisible: false} } -| ALTER INDEX ci_identifier INVISIBLE +| ALTER INDEX sql_id INVISIBLE { $$ = &AlterIndex{Name: $3, Invisible: true} } @@ -3058,7 +3066,7 @@ alter_option: { $$ = &DropColumn{Name:$3} } -| DROP index_or_key ci_identifier +| DROP index_or_key sql_id { $$ = &DropKey{Type:NormalKeyType, Name:$3} } @@ -3066,15 +3074,15 @@ alter_option: { $$ = &DropKey{Type:PrimaryKeyType} } -| DROP FOREIGN KEY ci_identifier +| DROP FOREIGN KEY sql_id { $$ = &DropKey{Type:ForeignKeyType, Name:$4} } -| DROP CHECK ci_identifier +| DROP CHECK sql_id { $$ = &DropKey{Type:CheckKeyType, Name:$3} } -| DROP CONSTRAINT ci_identifier +| DROP CONSTRAINT sql_id { $$ = &DropKey{Type:CheckKeyType, Name:$3} } @@ -3086,7 +3094,7 @@ alter_option: { $$ = &RenameTableName{Table:$3} } -| RENAME index_or_key ci_identifier TO ci_identifier +| RENAME index_or_key sql_id TO sql_id { $$ = &RenameIndex{OldName:$3, NewName:$5} } @@ -3104,43 +3112,43 @@ alter_commands_modifier_list: alter_commands_modifier: ALGORITHM equal_opt DEFAULT { - $$ = AlgorithmValue(string($3)) + $$ = AlgorithmValue(string($3)) } | ALGORITHM equal_opt INPLACE { - $$ = AlgorithmValue(string($3)) + $$ = AlgorithmValue(string($3)) } | ALGORITHM equal_opt COPY { - $$ = AlgorithmValue(string($3)) + $$ = AlgorithmValue(string($3)) } | ALGORITHM equal_opt INSTANT { - $$ = AlgorithmValue(string($3)) + $$ = AlgorithmValue(string($3)) } | LOCK equal_opt DEFAULT { - $$ = &LockOption{Type:DefaultType} + $$ = &LockOption{Type:DefaultType} } | LOCK equal_opt NONE { - $$ = &LockOption{Type:NoneType} + $$ = &LockOption{Type:NoneType} } | LOCK equal_opt SHARED { - $$ = &LockOption{Type:SharedType} + $$ = &LockOption{Type:SharedType} } | LOCK equal_opt EXCLUSIVE { - $$ = &LockOption{Type:ExclusiveType} + $$ = &LockOption{Type:ExclusiveType} } | WITH VALIDATION { - $$ = &Validation{With:true} + $$ = &Validation{With:true} } | WITHOUT VALIDATION { - $$ = &Validation{With:false} + $$ = &Validation{With:false} } alter_statement: @@ -3365,6 +3373,19 @@ alter_statement: Type: UnthrottleAllMigrationType, } } +| ALTER comment_opt VITESS_MIGRATION STRING FORCE_CUTOVER + { + $$ = &AlterMigration{ + Type: ForceCutOverMigrationType, + UUID: string($4), + } + } +| ALTER comment_opt VITESS_MIGRATION FORCE_CUTOVER ALL + { + $$ = &AlterMigration{ + Type: ForceCutOverAllMigrationType, + } + } partitions_options_opt: { @@ -3372,16 +3393,16 @@ partitions_options_opt: } | PARTITION BY partitions_options_beginning partitions_opt subpartition_opt partition_definitions_opt { - $3.Partitions = $4 - $3.SubPartition = $5 - $3.Definitions = $6 - $$ = $3 + $3.Partitions = $4 + $3.SubPartition = $5 + $3.Definitions = $6 + $$ = $3 } partitions_options_beginning: linear_opt HASH '(' expression ')' { - $$ = &PartitionOption { + $$ = &PartitionOption { IsLinear: $1, Type: HashType, Expr: $4, @@ -3389,7 +3410,7 @@ partitions_options_beginning: } | linear_opt KEY algorithm_opt '(' column_list_empty ')' { - $$ = &PartitionOption { + $$ = &PartitionOption { IsLinear: $1, Type: KeyType, KeyAlgorithm: $3, @@ -3398,7 +3419,7 @@ partitions_options_beginning: } | range_or_list '(' expression ')' { - $$ = &PartitionOption { + $$ = &PartitionOption { Type: $1, Expr: $3, } @@ -3740,14 +3761,12 @@ partition_definition_attribute_list_opt: } | partition_definition_attribute_list_opt partition_max_rows { - val := $2 - $1.MaxRows = &val + $1.MaxRows = ptr.Of($2) $$ = $1 } | partition_definition_attribute_list_opt partition_min_rows { - val := $2 - $1.MinRows = &val + $1.MinRows = ptr.Of($2) $$ = $1 } | partition_definition_attribute_list_opt partition_tablespace_name @@ -3808,14 +3827,12 @@ subpartition_definition_attribute_list_opt: } | subpartition_definition_attribute_list_opt partition_max_rows { - val := $2 - $1.MaxRows = &val + $1.MaxRows = ptr.Of($2) $$ = $1 } | subpartition_definition_attribute_list_opt partition_min_rows { - val := $2 - $1.MinRows = &val + $1.MinRows = ptr.Of($2) $$ = $1 } | subpartition_definition_attribute_list_opt partition_tablespace_name @@ -3935,13 +3952,13 @@ drop_statement: { $$ = &DropTable{FromTables: $6, IfExists: $5, Comments: Comments($2).Parsed(), Temp: $3} } -| DROP comment_opt INDEX ci_identifier ON table_name algorithm_lock_opt +| DROP comment_opt INDEX sql_id ON table_name algorithm_lock_opt { // Change this to an alter statement if $4.Lowered() == "primary" { - $$ = &AlterTable{FullyParsed:true, Table: $6,AlterOptions: append([]AlterOption{&DropKey{Type:PrimaryKeyType}},$7...)} + $$ = &AlterTable{FullyParsed:true, Table: $6,AlterOptions: append([]AlterOption{&DropKey{Type:PrimaryKeyType}},$7...)} } else { - $$ = &AlterTable{FullyParsed: true, Table: $6,AlterOptions: append([]AlterOption{&DropKey{Type:NormalKeyType, Name:$4}},$7...)} + $$ = &AlterTable{FullyParsed: true, Table: $6,AlterOptions: append([]AlterOption{&DropKey{Type:NormalKeyType, Name:$4}},$7...)} } } | DROP comment_opt VIEW exists_opt view_name_list restrict_or_cascade_opt @@ -4128,6 +4145,10 @@ show_statement: { $$ = &Show{&ShowBasic{Command: VschemaTables}} } +| SHOW VSCHEMA KEYSPACES + { + $$ = &Show{&ShowBasic{Command: VschemaKeyspaces}} + } | SHOW VSCHEMA VINDEXES { $$ = &Show{&ShowBasic{Command: VschemaVindexes}} @@ -4215,11 +4236,11 @@ full_opt: columns_or_fields: COLUMNS { - $$ = string($1) + $$ = string($1) } | FIELDS { - $$ = string($1) + $$ = string($1) } from_database_opt: @@ -4253,11 +4274,11 @@ like_or_where_opt: like_opt: /* empty */ { - $$ = nil + $$ = nil } | LIKE STRING { - $$ = &ShowFilter{Like:string($2)} + $$ = &ShowFilter{Like:string($2)} } session_or_local_opt: @@ -4412,14 +4433,6 @@ explain_format_opt: { $$ = TreeType } -| FORMAT '=' VITESS - { - $$ = VitessType - } -| FORMAT '=' VTEXPLAIN - { - $$ = VTExplainType - } | FORMAT '=' TRADITIONAL { $$ = TraditionalType @@ -4712,15 +4725,15 @@ union_op: cache_opt: { - $$ = "" + $$ = "" } | SQL_NO_CACHE { - $$ = SQLNoCacheStr + $$ = SQLNoCacheStr } | SQL_CACHE { - $$ = SQLCacheStr + $$ = SQLCacheStr } distinct_opt: @@ -4775,34 +4788,23 @@ deallocate_statement: $$ = &DeallocateStmt{Comments: Comments($2).Parsed(), Name: $4} } -select_expression_list_opt: +select_options_opt: { $$ = nil } -| select_expression_list +| select_options { $$ = $1 } select_options: - { - $$ = nil - } -| select_option +select_option { $$ = []string{$1} } -| select_option select_option // TODO: figure out a way to do this recursively instead. - { // TODO: This is a hack since I couldn't get it to work in a nicer way. I got 'conflicts: 8 shift/reduce' - $$ = []string{$1, $2} - } -| select_option select_option select_option +| select_options select_option { - $$ = []string{$1, $2, $3} - } -| select_option select_option select_option select_option - { - $$ = []string{$1, $2, $3, $4} + $$ = append($1, $2) } select_option: @@ -4889,7 +4891,7 @@ from_opt: } | from_clause { - $$ = $1 + $$ = $1 } from_clause: @@ -5130,9 +5132,9 @@ natural_join: | NATURAL outer_join { if $2 == LeftJoinType { - $$ = NaturalLeftJoinType + $$ = NaturalLeftJoinType } else { - $$ = NaturalRightJoinType + $$ = NaturalRightJoinType } } @@ -5198,6 +5200,14 @@ index_hint: { $$ = &IndexHint{Type: ForceOp, ForType: $3, Indexes: $5} } +| USE VINDEX openb index_list closeb + { + $$ = &IndexHint{Type: UseVindexOp, Indexes: $4 } + } +| IGNORE VINDEX openb index_list closeb + { + $$ = &IndexHint{Type: IgnoreVindexOp, Indexes: $4} + } index_hint_for_opt: { @@ -5230,19 +5240,19 @@ where_expression_opt: expression: expression OR expression %prec OR { - $$ = &OrExpr{Left: $1, Right: $3} + $$ = &OrExpr{Left: $1, Right: $3} } | expression XOR expression %prec XOR { - $$ = &XorExpr{Left: $1, Right: $3} + $$ = &XorExpr{Left: $1, Right: $3} } | expression AND expression %prec AND { - $$ = &AndExpr{Left: $1, Right: $3} + $$ = &AndExpr{Left: $1, Right: $3} } | NOT expression %prec NOT { - $$ = &NotExpr{Expr: $2} + $$ = &NotExpr{Expr: $2} } | bool_pri IS is_suffix %prec IS { @@ -5250,43 +5260,51 @@ expression: } | bool_pri %prec EXPRESSION_PREC_SETTER { - $$ = $1 + $$ = $1 } | user_defined_variable ASSIGNMENT_OPT expression %prec ASSIGNMENT_OPT { - $$ = &AssignmentExpr{Left: $1, Right: $3} + $$ = &AssignmentExpr{Left: $1, Right: $3} } | expression MEMBER OF openb expression closeb { $$ = &MemberOfExpr{Value: $1, JSONArr:$5 } } +null_or_unknown: + NULL + { + } +| UNKNOWN + { + } + bool_pri: -bool_pri IS NULL %prec IS +bool_pri IS null_or_unknown %prec IS { $$ = &IsExpr{Left: $1, Right: IsNullOp} } -| bool_pri IS NOT NULL %prec IS +| bool_pri IS NOT null_or_unknown %prec IS { - $$ = &IsExpr{Left: $1, Right: IsNotNullOp} + $$ = &IsExpr{Left: $1, Right: IsNotNullOp} } | bool_pri compare predicate { - $$ = &ComparisonExpr{Left: $1, Operator: $2, Right: $3} + $$ = &ComparisonExpr{Left: $1, Operator: $2, Right: $3} } | predicate %prec EXPRESSION_PREC_SETTER { - $$ = $1 + $$ = $1 } predicate: bit_expr IN col_tuple { - $$ = &ComparisonExpr{Left: $1, Operator: InOp, Right: $3} + $$ = &ComparisonExpr{Left: $1, Operator: InOp, Right: $3} } | bit_expr NOT IN col_tuple { - $$ = &ComparisonExpr{Left: $1, Operator: NotInOp, Right: $4} + $$ = &ComparisonExpr{Left: $1, Operator: NotInOp, Right: $4} } | bit_expr BETWEEN bit_expr AND predicate { @@ -5294,27 +5312,27 @@ bit_expr IN col_tuple } | bit_expr NOT BETWEEN bit_expr AND predicate { - $$ = &BetweenExpr{Left: $1, IsBetween: false, From: $4, To: $6} + $$ = &BetweenExpr{Left: $1, IsBetween: false, From: $4, To: $6} } | bit_expr LIKE simple_expr { - $$ = &ComparisonExpr{Left: $1, Operator: LikeOp, Right: $3} + $$ = &ComparisonExpr{Left: $1, Operator: LikeOp, Right: $3} } | bit_expr NOT LIKE simple_expr { - $$ = &ComparisonExpr{Left: $1, Operator: NotLikeOp, Right: $4} + $$ = &ComparisonExpr{Left: $1, Operator: NotLikeOp, Right: $4} } | bit_expr LIKE simple_expr ESCAPE simple_expr %prec LIKE { - $$ = &ComparisonExpr{Left: $1, Operator: LikeOp, Right: $3, Escape: $5} + $$ = &ComparisonExpr{Left: $1, Operator: LikeOp, Right: $3, Escape: $5} } | bit_expr NOT LIKE simple_expr ESCAPE simple_expr %prec LIKE { - $$ = &ComparisonExpr{Left: $1, Operator: NotLikeOp, Right: $4, Escape: $6} + $$ = &ComparisonExpr{Left: $1, Operator: NotLikeOp, Right: $4, Escape: $6} } | bit_expr regexp_symbol bit_expr { - $$ = &ComparisonExpr{Left: $1, Operator: RegexpOp, Right: $3} + $$ = &ComparisonExpr{Left: $1, Operator: RegexpOp, Right: $3} } | bit_expr NOT regexp_symbol bit_expr { @@ -5322,7 +5340,7 @@ bit_expr IN col_tuple } | bit_expr %prec EXPRESSION_PREC_SETTER { - $$ = $1 + $$ = $1 } regexp_symbol: @@ -5337,109 +5355,109 @@ regexp_symbol: bit_expr: bit_expr '|' bit_expr %prec '|' { - $$ = &BinaryExpr{Left: $1, Operator: BitOrOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: BitOrOp, Right: $3} } | bit_expr '&' bit_expr %prec '&' { - $$ = &BinaryExpr{Left: $1, Operator: BitAndOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: BitAndOp, Right: $3} } | bit_expr SHIFT_LEFT bit_expr %prec SHIFT_LEFT { - $$ = &BinaryExpr{Left: $1, Operator: ShiftLeftOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: ShiftLeftOp, Right: $3} } | bit_expr SHIFT_RIGHT bit_expr %prec SHIFT_RIGHT { - $$ = &BinaryExpr{Left: $1, Operator: ShiftRightOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: ShiftRightOp, Right: $3} } | bit_expr '+' bit_expr %prec '+' { - $$ = &BinaryExpr{Left: $1, Operator: PlusOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: PlusOp, Right: $3} } | bit_expr '-' bit_expr %prec '-' { - $$ = &BinaryExpr{Left: $1, Operator: MinusOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: MinusOp, Right: $3} } | bit_expr '+' INTERVAL bit_expr interval %prec '+' { - $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAdd, Date: $1, Unit: $5, Interval: $4} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAdd, Date: $1, Unit: $5, Interval: $4} } | bit_expr '-' INTERVAL bit_expr interval %prec '-' { - $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinarySub, Date: $1, Unit: $5, Interval: $4} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinarySub, Date: $1, Unit: $5, Interval: $4} } | bit_expr '*' bit_expr %prec '*' { - $$ = &BinaryExpr{Left: $1, Operator: MultOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: MultOp, Right: $3} } | bit_expr '/' bit_expr %prec '/' { - $$ = &BinaryExpr{Left: $1, Operator: DivOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: DivOp, Right: $3} } | bit_expr '%' bit_expr %prec '%' { - $$ = &BinaryExpr{Left: $1, Operator: ModOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: ModOp, Right: $3} } | bit_expr DIV bit_expr %prec DIV { - $$ = &BinaryExpr{Left: $1, Operator: IntDivOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: IntDivOp, Right: $3} } | bit_expr MOD bit_expr %prec MOD { - $$ = &BinaryExpr{Left: $1, Operator: ModOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: ModOp, Right: $3} } | bit_expr '^' bit_expr %prec '^' { - $$ = &BinaryExpr{Left: $1, Operator: BitXorOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: BitXorOp, Right: $3} } | simple_expr %prec EXPRESSION_PREC_SETTER { - $$ = $1 + $$ = $1 } simple_expr: function_call_keyword { - $$ = $1 + $$ = $1 } | function_call_nonkeyword { - $$ = $1 + $$ = $1 } | function_call_generic { - $$ = $1 + $$ = $1 } | function_call_conflict { - $$ = $1 + $$ = $1 } | simple_expr COLLATE charset %prec UNARY { - $$ = &CollateExpr{Expr: $1, Collation: $3} + $$ = &CollateExpr{Expr: $1, Collation: $3} } | literal_or_null { - $$ = $1 + $$ = $1 } | column_name_or_offset { - $$ = $1 + $$ = $1 } | variable_expr { - $$ = $1 + $$ = $1 } | '+' simple_expr %prec UNARY { - $$= $2; // TODO: do we really want to ignore unary '+' before any kind of literals? + $$ = $2 // TODO: do we really want to ignore unary '+' before any kind of literals? } | '-' simple_expr %prec UNARY { - $$ = &UnaryExpr{Operator: UMinusOp, Expr: $2} + $$ = &UnaryExpr{Operator: UMinusOp, Expr: $2} } | '~' simple_expr %prec UNARY { - $$ = &UnaryExpr{Operator: TildaOp, Expr: $2} + $$ = &UnaryExpr{Operator: TildaOp, Expr: $2} } | '!' simple_expr %prec UNARY { @@ -5447,19 +5465,19 @@ function_call_keyword } | subquery { - $$= $1 + $$= $1 } | tuple_expression { - $$ = $1 + $$ = $1 } | EXISTS subquery { - $$ = &ExistsExpr{Subquery: $2} + $$ = &ExistsExpr{Subquery: $2} } | MATCH column_names_opt_paren AGAINST openb bit_expr match_option closeb { - $$ = &MatchExpr{Columns: $2, Expr: $5, Option: $6} + $$ = &MatchExpr{Columns: $2, Expr: $5, Option: $6} } | CAST openb expression AS convert_type array_opt closeb { @@ -5487,7 +5505,7 @@ function_call_keyword } | INTERVAL bit_expr interval '+' bit_expr %prec INTERVAL { - $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAddLeft, Date: $5, Unit: $3, Interval: $2} + $$ = &IntervalDateExpr{Syntax: IntervalDateExprBinaryAddLeft, Date: $5, Unit: $3, Interval: $2} } | INTERVAL openb expression ',' expression_list closeb { @@ -5495,11 +5513,11 @@ function_call_keyword } | column_name_or_offset JSON_EXTRACT_OP text_literal_or_arg { - $$ = &BinaryExpr{Left: $1, Operator: JSONExtractOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: JSONExtractOp, Right: $3} } | column_name_or_offset JSON_UNQUOTE_EXTRACT_OP text_literal_or_arg { - $$ = &BinaryExpr{Left: $1, Operator: JSONUnquoteExtractOp, Right: $3} + $$ = &BinaryExpr{Left: $1, Operator: JSONUnquoteExtractOp, Right: $3} } column_names_opt_paren: @@ -5629,6 +5647,7 @@ window_partition_clause_opt: sql_id_opt: { + $$ = IdentifierCI{} } | sql_id { @@ -5651,6 +5670,16 @@ over_clause: $$ = &OverClause{WindowName: $2} } +over_clause_opt: + over_clause + { + $$ = $1 + } +| + { + $$ = nil + } + null_treatment_clause_opt: { $$ = nil @@ -5818,7 +5847,7 @@ col_tuple: subquery: query_expression_parens %prec SUBQUERY_AS_EXPR { - $$ = &Subquery{$1} + $$ = &Subquery{$1} } expression_list: @@ -5836,11 +5865,11 @@ expression_list: introduce side effects due to being a simple identifier */ function_call_generic: - sql_id openb select_expression_list_opt closeb + sql_id openb expression_list_opt closeb { $$ = &FuncExpr{Name: $1, Exprs: $3} } -| table_id '.' reserved_sql_id openb select_expression_list_opt closeb +| table_id '.' reserved_sql_id openb expression_list_opt closeb { $$ = &FuncExpr{Qualifier: $1, Name: $3, Exprs: $5} } @@ -5850,11 +5879,11 @@ function_call_generic: as a result */ function_call_keyword: - LEFT openb select_expression_list closeb + LEFT openb expression_list_opt closeb { $$ = &FuncExpr{Name: NewIdentifierCI("left"), Exprs: $3} } -| RIGHT openb select_expression_list closeb +| RIGHT openb expression_list_opt closeb { $$ = &FuncExpr{Name: NewIdentifierCI("right"), Exprs: $3} } @@ -5862,17 +5891,21 @@ function_call_keyword: { $$ = &SubstrExpr{Name: $3, From: $5, To: $7} } +| MID openb expression ',' expression ',' expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: $7} + } | SUBSTRING openb expression ',' expression closeb { $$ = &SubstrExpr{Name: $3, From: $5} } | SUBSTRING openb expression FROM expression FOR expression closeb { - $$ = &SubstrExpr{Name: $3, From: $5, To: $7} + $$ = &SubstrExpr{Name: $3, From: $5, To: $7} } | SUBSTRING openb expression FROM expression closeb { - $$ = &SubstrExpr{Name: $3, From: $5} + $$ = &SubstrExpr{Name: $3, From: $5} } | CASE expression_opt when_expression_list else_expression_opt END { @@ -5903,7 +5936,7 @@ UTC_DATE func_paren_opt } | now { - $$ = $1 + $$ = $1 } // curdate /* doesn't support fsp */ @@ -5929,69 +5962,69 @@ UTC_DATE func_paren_opt { $$ = &CurTimeFuncExpr{Name:NewIdentifierCI("current_time"), Fsp: $2} } -| COUNT openb '*' closeb +| COUNT openb '*' closeb over_clause_opt { - $$ = &CountStar{} + $$ = &CountStar{OverClause: $5} } -| COUNT openb distinct_opt expression_list closeb +| COUNT openb distinct_opt expression_list closeb over_clause_opt { - $$ = &Count{Distinct:$3, Args:$4} + $$ = &Count{Distinct:$3, Args:$4, OverClause: $6} } -| MAX openb distinct_opt expression closeb +| MAX openb distinct_opt expression closeb over_clause_opt { - $$ = &Max{Distinct:$3, Arg:$4} + $$ = &Max{Distinct:$3, Arg:$4, OverClause: $6} } -| MIN openb distinct_opt expression closeb +| MIN openb distinct_opt expression closeb over_clause_opt { - $$ = &Min{Distinct:$3, Arg:$4} + $$ = &Min{Distinct:$3, Arg:$4, OverClause: $6} } -| SUM openb distinct_opt expression closeb +| SUM openb distinct_opt expression closeb over_clause_opt { - $$ = &Sum{Distinct:$3, Arg:$4} + $$ = &Sum{Distinct:$3, Arg:$4, OverClause: $6} } -| AVG openb distinct_opt expression closeb +| AVG openb distinct_opt expression closeb over_clause_opt { - $$ = &Avg{Distinct:$3, Arg:$4} + $$ = &Avg{Distinct:$3, Arg:$4, OverClause: $6} } -| BIT_AND openb expression closeb +| BIT_AND openb expression closeb over_clause_opt { - $$ = &BitAnd{Arg:$3} + $$ = &BitAnd{Arg:$3, OverClause: $5} } -| BIT_OR openb expression closeb +| BIT_OR openb expression closeb over_clause_opt { - $$ = &BitOr{Arg:$3} + $$ = &BitOr{Arg:$3, OverClause: $5} } -| BIT_XOR openb expression closeb +| BIT_XOR openb expression closeb over_clause_opt { - $$ = &BitXor{Arg:$3} + $$ = &BitXor{Arg:$3, OverClause: $5} } -| STD openb expression closeb +| STD openb expression closeb over_clause_opt { - $$ = &Std{Arg:$3} + $$ = &Std{Arg:$3, OverClause: $5} } -| STDDEV openb expression closeb +| STDDEV openb expression closeb over_clause_opt { - $$ = &StdDev{Arg:$3} + $$ = &StdDev{Arg:$3, OverClause: $5} } -| STDDEV_POP openb expression closeb +| STDDEV_POP openb expression closeb over_clause_opt { - $$ = &StdPop{Arg:$3} + $$ = &StdPop{Arg:$3, OverClause: $5} } -| STDDEV_SAMP openb expression closeb +| STDDEV_SAMP openb expression closeb over_clause_opt { - $$ = &StdSamp{Arg:$3} + $$ = &StdSamp{Arg:$3, OverClause: $5} } -| VAR_POP openb expression closeb +| VAR_POP openb expression closeb over_clause_opt { - $$ = &VarPop{Arg:$3} + $$ = &VarPop{Arg:$3, OverClause: $5} } -| VAR_SAMP openb expression closeb +| VAR_SAMP openb expression closeb over_clause_opt { - $$ = &VarSamp{Arg:$3} + $$ = &VarSamp{Arg:$3, OverClause: $5} } -| VARIANCE openb expression closeb +| VARIANCE openb expression closeb over_clause_opt { - $$ = &Variance{Arg:$3} + $$ = &Variance{Arg:$3, OverClause: $5} } | GROUP_CONCAT openb distinct_opt expression_list order_by_opt separator_opt limit_opt closeb { @@ -6391,31 +6424,31 @@ UTC_DATE func_paren_opt } | ST_Area openb expression closeb { - $$ = &PolygonPropertyFuncExpr{ Property: Area, Polygon: $3 } + $$ = &PolygonPropertyFuncExpr{ Property: Area, Polygon: $3 } } | ST_Centroid openb expression closeb { - $$ = &PolygonPropertyFuncExpr{ Property: Centroid, Polygon: $3 } + $$ = &PolygonPropertyFuncExpr{ Property: Centroid, Polygon: $3 } } | ST_ExteriorRing openb expression closeb { - $$ = &PolygonPropertyFuncExpr{ Property: ExteriorRing, Polygon: $3 } + $$ = &PolygonPropertyFuncExpr{ Property: ExteriorRing, Polygon: $3 } } | ST_InteriorRingN openb expression ',' expression closeb { - $$ = &PolygonPropertyFuncExpr{ Property: InteriorRingN, Polygon: $3, PropertyDefArg: $5 } + $$ = &PolygonPropertyFuncExpr{ Property: InteriorRingN, Polygon: $3, PropertyDefArg: $5 } } | ST_NumInteriorRings openb expression closeb { - $$ = &PolygonPropertyFuncExpr{ Property: NumInteriorRings, Polygon: $3 } + $$ = &PolygonPropertyFuncExpr{ Property: NumInteriorRings, Polygon: $3 } } | ST_GeometryN openb expression ',' expression closeb { - $$ = &GeomCollPropertyFuncExpr{ Property: GeometryN, GeomColl: $3, PropertyDefArg: $5 } + $$ = &GeomCollPropertyFuncExpr{ Property: GeometryN, GeomColl: $3, PropertyDefArg: $5 } } | ST_NumGeometries openb expression closeb { - $$ = &GeomCollPropertyFuncExpr{ Property: NumGeometries, GeomColl: $3 } + $$ = &GeomCollPropertyFuncExpr{ Property: NumGeometries, GeomColl: $3 } } | ST_GeoHash openb expression ',' expression ',' expression closeb { @@ -6816,83 +6849,83 @@ returning_type_opt: interval: DAY_HOUR { - $$=IntervalDayHour + $$=IntervalDayHour } | DAY_MICROSECOND { - $$=IntervalDayMicrosecond + $$=IntervalDayMicrosecond } | DAY_MINUTE { - $$=IntervalDayMinute + $$=IntervalDayMinute } | DAY_SECOND { - $$=IntervalDaySecond + $$=IntervalDaySecond } | HOUR_MICROSECOND { - $$=IntervalHourMicrosecond + $$=IntervalHourMicrosecond } | HOUR_MINUTE { - $$=IntervalHourMinute + $$=IntervalHourMinute } | HOUR_SECOND { - $$=IntervalHourSecond + $$=IntervalHourSecond } | MINUTE_MICROSECOND { - $$=IntervalMinuteMicrosecond + $$=IntervalMinuteMicrosecond } | MINUTE_SECOND { - $$=IntervalMinuteSecond + $$=IntervalMinuteSecond } | SECOND_MICROSECOND { - $$=IntervalSecondMicrosecond + $$=IntervalSecondMicrosecond } | YEAR_MONTH { - $$=IntervalYearMonth + $$=IntervalYearMonth } | DAY { - $$=IntervalDay + $$=IntervalDay } | WEEK { - $$=IntervalWeek + $$=IntervalWeek } | HOUR { - $$=IntervalHour + $$=IntervalHour } | MINUTE { - $$=IntervalMinute + $$=IntervalMinute } | MONTH { - $$=IntervalMonth + $$=IntervalMonth } | QUARTER { - $$=IntervalQuarter + $$=IntervalQuarter } | SECOND { - $$=IntervalSecond + $$=IntervalSecond } | MICROSECOND { - $$=IntervalMicrosecond + $$=IntervalMicrosecond } | YEAR { - $$=IntervalYear + $$=IntervalYear } timestampadd_interval: @@ -6976,7 +7009,7 @@ func_paren_opt: func_datetime_precision: /* empty */ { - $$ = 0 + $$ = 0 } | openb closeb { @@ -6984,7 +7017,7 @@ func_datetime_precision: } | openb INTEGRAL closeb { - $$ = convertStringToInt($2) + $$ = convertStringToInt($2) } /* @@ -6992,23 +7025,23 @@ func_datetime_precision: the names are non-reserved, they need a dedicated rule so as not to conflict */ function_call_conflict: - IF openb select_expression_list closeb + IF openb expression_list closeb { $$ = &FuncExpr{Name: NewIdentifierCI("if"), Exprs: $3} } -| DATABASE openb select_expression_list_opt closeb +| DATABASE openb expression_list_opt closeb { $$ = &FuncExpr{Name: NewIdentifierCI("database"), Exprs: $3} } -| SCHEMA openb select_expression_list_opt closeb +| SCHEMA openb expression_list_opt closeb { $$ = &FuncExpr{Name: NewIdentifierCI("schema"), Exprs: $3} } -| MOD openb select_expression_list closeb +| MOD openb expression_list closeb { $$ = &FuncExpr{Name: NewIdentifierCI("mod"), Exprs: $3} } -| REPLACE openb select_expression_list closeb +| REPLACE openb expression_list closeb { $$ = &FuncExpr{Name: NewIdentifierCI("replace"), Exprs: $3} } @@ -7056,11 +7089,11 @@ convert_type_weight_string: } | AS BINARY '(' INTEGRAL ')' { - $$ = &ConvertType{Type: string($2), Length: NewIntLiteral($4)} + $$ = &ConvertType{Type: string($2), Length: ptr.Of(convertStringToInt($4))} } | AS CHAR '(' INTEGRAL ')' { - $$ = &ConvertType{Type: string($2), Length: NewIntLiteral($4)} + $$ = &ConvertType{Type: string($2), Length: ptr.Of(convertStringToInt($4))} } convert_type: @@ -7231,11 +7264,21 @@ group_by_opt: { $$ = nil } -| GROUP BY expression_list +| GROUP BY expression_list rollup_opt { - $$ = $3 + $$ = &GroupBy{Exprs: $3, WithRollup: $4} + } + +rollup_opt: + { + $$ = false + } +| WITH ROLLUP + { + $$ = true } + having_opt: { $$ = nil @@ -7276,7 +7319,7 @@ order_by_opt: } | order_by_clause { - $$ = $1 + $$ = $1 } order_by_clause: @@ -7320,7 +7363,7 @@ limit_opt: } | limit_clause { - $$ = $1 + $$ = $1 } limit_clause: @@ -7343,19 +7386,19 @@ algorithm_lock_opt: } | lock_index algorithm_index { - $$ = []AlterOption{$1,$2} + $$ = []AlterOption{$1,$2} } | algorithm_index lock_index { - $$ = []AlterOption{$1,$2} + $$ = []AlterOption{$1,$2} } | algorithm_index { - $$ = []AlterOption{$1} + $$ = []AlterOption{$1} } | lock_index { - $$ = []AlterOption{$1} + $$ = []AlterOption{$1} } @@ -7507,6 +7550,26 @@ FOR UPDATE { $$ = ForUpdateLock } +| FOR UPDATE NOWAIT + { + $$ = ForUpdateLockNoWait + } +| FOR UPDATE SKIP LOCKED + { + $$ = ForUpdateLockSkipLocked + } +| FOR SHARE + { + $$ = ForShareLock + } +| FOR SHARE NOWAIT + { + $$ = ForShareLockNoWait + } +| FOR SHARE SKIP LOCKED + { + $$ = ForShareLockSkipLocked + } | LOCK IN SHARE MODE { $$ = ShareModeLock @@ -7515,15 +7578,15 @@ FOR UPDATE into_clause: INTO OUTFILE S3 STRING charset_opt format_opt export_options manifest_opt overwrite_opt { -$$ = &SelectInto{Type:IntoOutfileS3, FileName:encodeSQLString($4), Charset:$5, FormatOption:$6, ExportOption:$7, Manifest:$8, Overwrite:$9} + $$ = &SelectInto{Type:IntoOutfileS3, FileName:encodeSQLString($4), Charset:$5, FormatOption:$6, ExportOption:$7, Manifest:$8, Overwrite:$9} } | INTO DUMPFILE STRING { -$$ = &SelectInto{Type:IntoDumpfile, FileName:encodeSQLString($3), Charset:ColumnCharset{}, FormatOption:"", ExportOption:"", Manifest:"", Overwrite:""} + $$ = &SelectInto{Type:IntoDumpfile, FileName:encodeSQLString($3), Charset:ColumnCharset{}, FormatOption:"", ExportOption:"", Manifest:"", Overwrite:""} } | INTO OUTFILE STRING charset_opt export_options { -$$ = &SelectInto{Type:IntoOutfile, FileName:encodeSQLString($3), Charset:$4, FormatOption:"", ExportOption:$5, Manifest:"", Overwrite:""} + $$ = &SelectInto{Type:IntoOutfile, FileName:encodeSQLString($3), Charset:$4, FormatOption:"", ExportOption:$5, Manifest:"", Overwrite:""} } format_opt: @@ -7659,21 +7722,21 @@ optionally_opt: // Because the rules are together, the parser can keep shifting // the tokens until it disambiguates a as sql_id and select as keyword. insert_data: - VALUES tuple_list + VALUES tuple_list row_alias_opt { - $$ = &Insert{Rows: $2} + $$ = &Insert{Rows: $2, RowAlias: $3} } | select_statement { $$ = &Insert{Rows: $1} } -| openb ins_column_list closeb VALUES tuple_list +| openb ins_column_list closeb VALUES tuple_list row_alias_opt { - $$ = &Insert{Columns: $2, Rows: $5} + $$ = &Insert{Columns: $2, Rows: $5, RowAlias: $6} } -| openb closeb VALUES tuple_list +| openb closeb VALUES tuple_list row_alias_opt { - $$ = &Insert{Columns: []IdentifierCI{}, Rows: $4} + $$ = &Insert{Columns: []IdentifierCI{}, Rows: $4, RowAlias: $5} } | openb ins_column_list closeb select_statement { @@ -7698,6 +7761,19 @@ ins_column_list: $$ = append($$, $5) } +row_alias_opt: + { + $$ = nil + } +| AS table_alias + { + $$ = &RowAlias{TableName: $2} + } +| AS table_alias openb column_list closeb + { + $$ = &RowAlias{TableName: $2, Columns: $4} + } + on_dup_opt: { $$ = nil @@ -7740,9 +7816,9 @@ tuple_expression: row_tuple { if len($1) == 1 { - $$ = $1[0] + $$ = $1[0] } else { - $$ = $1 + $$ = $1 } } @@ -8180,6 +8256,7 @@ non_reserved_keyword: | FIXED | FLUSH | FOLLOWING +| FORCE_CUTOVER | FORMAT | FORMAT_BYTES %prec FUNCTION_CALL_NON_KEYWORD | FORMAT_PICO_TIME %prec FUNCTION_CALL_NON_KEYWORD @@ -8276,6 +8353,7 @@ non_reserved_keyword: | MEMORY | MEMBER | MERGE +| MID %prec FUNCTION_CALL_NON_KEYWORD | MIN %prec FUNCTION_CALL_NON_KEYWORD | MIN_ROWS | MODE @@ -8361,6 +8439,7 @@ non_reserved_keyword: | REUSE | ROLE | ROLLBACK +| ROLLUP | ROW_FORMAT | RTRIM %prec FUNCTION_CALL_NON_KEYWORD | S3 @@ -8481,6 +8560,7 @@ non_reserved_keyword: | UNCOMMITTED | UNDEFINED | UNICODE +| UNKNOWN | UNSIGNED | UNTHROTTLE | UNUSED diff --git a/go/vt/sqlparser/testdata/select_cases.txt b/go/vt/sqlparser/testdata/select_cases.txt index 661045add7d..835a4ad4931 100644 --- a/go/vt/sqlparser/testdata/select_cases.txt +++ b/go/vt/sqlparser/testdata/select_cases.txt @@ -710,7 +710,7 @@ INPUT select substr(null,null,null),mid(null,null,null); END OUTPUT -select substr(null, null, null), mid(null, null, null) from dual +select substr(null, null, null), substr(null, null, null) from dual END INPUT select * from t1 where a=if(b<10,_ucs2 0x00C0,_ucs2 0x0062); @@ -1040,7 +1040,7 @@ INPUT select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1 group by 1; END OUTPUT -select t1.a, group_concat(c order by (select mid(group_concat(c order by a asc), 1, 5) from t2 where t2.a = t1.a) desc) as grp from t1 group by 1 +select t1.a, group_concat(c order by (select substr(group_concat(c order by a asc), 1, 5) from t2 where t2.a = t1.a) desc) as grp from t1 group by 1 END INPUT select a as like_lll from t1 where a like 'lll%'; @@ -1147,8 +1147,8 @@ END INPUT select a, group_concat(b order by b) from t1 group by a with rollup; END -ERROR -syntax error at position 61 near 'with' +OUTPUT +select a, group_concat(b order by b asc) from t1 group by a with rollup END INPUT select t1.a, t2.a, t2.b, bit_count(t2.b) from t1 left join t2 on t1.a=t2.a; @@ -1166,7 +1166,7 @@ INPUT select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) ); END OUTPUT -select a from t1 where mid(a + 0, 6, 3) in (mid(20040106123400, 6, 3)) +select a from t1 where substr(a + 0, 6, 3) in (substr(20040106123400, 6, 3)) END INPUT select word, word=binary 0xdf as t from t1 having t > 0; @@ -2996,7 +2996,7 @@ INPUT select concat(a, if(b>10, 'x' 'æ', 'y' 'ß')) from t1; END OUTPUT -select concat(a, if(b > 10, 'x' as `æ`, 'y' as `ß`)) from t1 +select concat(a, if(b > 10, 'xæ', 'yß')) from t1 END INPUT select * from (t1 natural join t2) natural join (t3 join (t4 natural join t5) on (b < z)); @@ -3596,7 +3596,7 @@ INPUT select mid(@my_uuid,15,1); END OUTPUT -select mid(@my_uuid, 15, 1) from dual +select substr(@my_uuid, 15, 1) from dual END INPUT select collation(concat_ws(_latin2'a',_latin2'b')), coercibility(concat_ws(_latin2'a',_latin2'b')); @@ -3806,7 +3806,7 @@ INPUT select format('f','')<=replace(1,1,mid(0xd9,2,1)); END OUTPUT -select format('f', '') <= replace(1, 1, mid(0xd9, 2, 1)) from dual +select format('f', '') <= replace(1, 1, substr(0xd9, 2, 1)) from dual END INPUT select substring('hello', 4294967295, 4294967295); @@ -3866,7 +3866,7 @@ INPUT select group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a)) as grp from t1; END OUTPUT -select group_concat(c order by (select mid(group_concat(c order by a asc), 1, 5) from t2 where t2.a = t1.a) asc) as grp from t1 +select group_concat(c order by (select substr(group_concat(c order by a asc), 1, 5) from t2 where t2.a = t1.a) asc) as grp from t1 END INPUT select a1,a2,b,min(c) from t1 where ((a1 > 'a') or (a1 < '9')) and ((a2 >= 'b') and (a2 < 'z')) and (b = 'a') and ((c < 'h112') or (c = 'j121') or (c > 'k121' and c < 'm122') or (c > 'o122')) group by a1,a2,b; @@ -7597,8 +7597,8 @@ END INPUT select a, a is not false, a is not true, a is not unknown from t1; END -ERROR -syntax error at position 58 near 'unknown' +OUTPUT +select a, a is not false, a is not true, a is not null from t1 END INPUT select cast(NULL as signed), cast(1/0 as signed); @@ -7633,8 +7633,8 @@ END INPUT select a, group_concat(distinct b) from t1 group by a with rollup; END -ERROR -syntax error at position 59 near 'with' +OUTPUT +select a, group_concat(distinct b) from t1 group by a with rollup END INPUT select quote(trim(concat(' ', 'a'))); @@ -8384,7 +8384,7 @@ INPUT select quote(concat('abc'', 'cba')); END ERROR -syntax error at position 37 near '));' +syntax error at position 33 near 'cba' END INPUT select max(t1.a1), max(t2.a1) from t1, t2 where t2.a2=9; @@ -8671,8 +8671,8 @@ END INPUT select a, a is false, a is true, a is unknown from t1; END -ERROR -syntax error at position 46 near 'unknown' +OUTPUT +select a, a is false, a is true, a is null from t1 END INPUT select std(e) from bug22555 group by i; @@ -10376,7 +10376,7 @@ INPUT select concat(a, if(b>10, 'x' 'x', 'y' 'y')) from t1; END OUTPUT -select concat(a, if(b > 10, 'x' as x, 'y' as y)) from t1 +select concat(a, if(b > 10, 'xx', 'yy')) from t1 END INPUT select * from `information_schema`.`COLUMNS` where `TABLE_NAME` = NULL; @@ -11107,8 +11107,8 @@ END INPUT select a, count(a) from t1 group by a with rollup; END -ERROR -syntax error at position 43 near 'with' +OUTPUT +select a, count(a) from t1 group by a with rollup END INPUT select * from t1,t2 left join t3 on (t2.i=t3.i) order by t1.i,t2.i,t3.i; @@ -11191,8 +11191,8 @@ END INPUT select f1, group_concat(f1+1) from t1 group by f1 with rollup; END -ERROR -syntax error at position 55 near 'with' +OUTPUT +select f1, group_concat(f1 + 1) from t1 group by f1 with rollup END INPUT select substring('hello', -18446744073709551617, -18446744073709551617); @@ -12014,7 +12014,7 @@ INPUT select mid('hello',1,null),mid('hello',null,1),mid(null,1,1); END OUTPUT -select mid('hello', 1, null), mid('hello', null, 1), mid(null, 1, 1) from dual +select substr('hello', 1, null), substr('hello', null, 1), substr(null, 1, 1) from dual END INPUT select locate(_ujis 0xa2a1,_ujis 0xa1a2a1a3); @@ -12380,7 +12380,7 @@ INPUT select group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a) desc) as grp from t1; END OUTPUT -select group_concat(c order by (select mid(group_concat(c order by a asc), 1, 5) from t2 where t2.a = t1.a) desc) as grp from t1 +select group_concat(c order by (select substr(group_concat(c order by a asc), 1, 5) from t2 where t2.a = t1.a) desc) as grp from t1 END INPUT select a.text, b.id, b.betreff from t2 a inner join t3 b on a.id = b.forum inner join t1 c on b.id = c.thread where match(b.betreff) against ('+abc' in boolean mode) union select a.text, b.id, b.betreff from t2 a inner join t3 b on a.id = b.forum inner join t1 c on b.id = c.thread where match(c.beitrag) against ('+abc' in boolean mode) order by match(betreff) against ('+abc' in boolean mode) desc; @@ -12656,7 +12656,7 @@ INPUT select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) ); END OUTPUT -select a from t1 where mid(a + 0, 6, 3) = mid(20040106123400, 6, 3) +select a from t1 where substr(a + 0, 6, 3) = substr(20040106123400, 6, 3) END INPUT select concat('',str_to_date('8:11:2.123456 03-01-02','%H:%i:%S.%f %y-%m-%d')); @@ -13039,8 +13039,8 @@ END INPUT select a, group_concat(b) from t1 group by a with rollup; END -ERROR -syntax error at position 50 near 'with' +OUTPUT +select a, group_concat(b) from t1 group by a with rollup END INPUT select insert('txs',2,1,'hi'),insert('is ',4,0,'a'),insert('txxxxt',2,4,'es'); @@ -13585,8 +13585,8 @@ END INPUT select a, group_concat(distinct b order by b) from t1 group by a with rollup; END -ERROR -syntax error at position 70 near 'with' +OUTPUT +select a, group_concat(distinct b order by b asc) from t1 group by a with rollup END INPUT select 4|||| delimiter 'abcd'|||| select 5; @@ -16772,7 +16772,7 @@ INPUT select 'The cost of accessing t1 (dont care if it changes' '^'; END OUTPUT -select 'The cost of accessing t1 (dont care if it changes' as `^` from dual +select 'The cost of accessing t1 (dont care if it changes^' from dual END INPUT select * from t1 natural left join t2 where (i is not null)=0; @@ -18872,7 +18872,7 @@ INPUT select 'hello' 'monty'; END OUTPUT -select 'hello' as monty from dual +select 'hellomonty' from dual END INPUT select collation(bin(130)), coercibility(bin(130)); @@ -21266,7 +21266,7 @@ INPUT select t1.a, group_concat(c order by (select mid(group_concat(c order by a),1,5) from t2 where t2.a=t1.a)) as grp from t1 group by 1; END OUTPUT -select t1.a, group_concat(c order by (select mid(group_concat(c order by a asc), 1, 5) from t2 where t2.a = t1.a) asc) as grp from t1 group by 1 +select t1.a, group_concat(c order by (select substr(group_concat(c order by a asc), 1, 5) from t2 where t2.a = t1.a) asc) as grp from t1 group by 1 END INPUT select hex(inet_aton('127')); @@ -21296,7 +21296,7 @@ INPUT select left('hello',2),right('hello',2),substring('hello',2,2),mid('hello',1,5); END OUTPUT -select left('hello', 2), right('hello', 2), substr('hello', 2, 2), mid('hello', 1, 5) from dual +select left('hello', 2), right('hello', 2), substr('hello', 2, 2), substr('hello', 1, 5) from dual END INPUT select date_add("1997-12-31",INTERVAL "1 1" YEAR_MONTH); @@ -22087,8 +22087,8 @@ END INPUT select count(distinct (f1+1)) from t1 group by f1 with rollup; END -ERROR -syntax error at position 55 near 'with' +OUTPUT +select count(distinct f1 + 1) from t1 group by f1 with rollup END INPUT select -9223372036854775808; diff --git a/go/vt/sqlparser/testdata/union_cases.txt b/go/vt/sqlparser/testdata/union_cases.txt index 8e2def0e04e..2f3505a84ff 100644 --- a/go/vt/sqlparser/testdata/union_cases.txt +++ b/go/vt/sqlparser/testdata/union_cases.txt @@ -109,8 +109,8 @@ END INPUT SELECT a, SUM(a), SUM(a)+1 FROM (SELECT a FROM t1 UNION select 2) d GROUP BY a WITH ROLLUP; END -ERROR -syntax error at position 86 near 'WITH' +OUTPUT +select a, sum(a), sum(a) + 1 from (select a from t1 union select 2 from dual) as d group by a with rollup END INPUT SELECT * FROM t1 UNION ALL SELECT * FROM t2 ORDER BY a LIMIT 5 OFFSET 6; @@ -374,7 +374,7 @@ INPUT SELECT product, country_id , year, SUM(profit) FROM t1 GROUP BY product, country_id, year WITH CUBE UNION ALL SELECT product, country_id , year, SUM(profit) FROM t1 GROUP BY product, country_id, year WITH ROLLUP; END ERROR -syntax error at position 95 near 'WITH' +syntax error at position 100 near 'CUBE' END INPUT select (with recursive dt as (select t1.a as a union select a+1 from dt where a<10) select dt1.a from dt dt1 where dt1.a=t1.a ) as subq from t1; @@ -493,8 +493,8 @@ END INPUT SELECT a, SUM(a), SUM(a)+1 FROM (SELECT 1 a UNION select 2) d GROUP BY a WITH ROLLUP; END -ERROR -syntax error at position 80 near 'WITH' +OUTPUT +select a, sum(a), sum(a) + 1 from (select 1 as a from dual union select 2 from dual) as d group by a with rollup END INPUT SELECT LOCATION FROM T1 WHERE EVENT_ID=2 UNION ALL SELECT LOCATION FROM T1 WHERE EVENT_ID=3; @@ -619,8 +619,8 @@ END INPUT SELECT a, SUM(a), SUM(a)+1, CONCAT(SUM(a),'x'), SUM(a)+SUM(a), SUM(a) FROM (SELECT 1 a, 2 b UNION SELECT 2,3 UNION SELECT 5,6 ) d GROUP BY a WITH ROLLUP; END -ERROR -syntax error at position 152 near 'WITH' +OUTPUT +select a, sum(a), sum(a) + 1, CONCAT(sum(a), 'x'), sum(a) + sum(a), sum(a) from (select 1 as a, 2 as b from dual union select 2, 3 from dual union select 5, 6 from dual) as d group by a with rollup END INPUT select st_equals(st_union(ST_GeomFromText('point(1 1)'), ST_GeomFromText('multipoint(2 2, 3 3)')), st_intersection(ST_GeomFromText('point(0 0)'), ST_GeomFromText('point(1 1)'))); @@ -889,8 +889,8 @@ END INPUT SELECT a, SUM(a), SUM(a)+1, CONCAT(SUM(a),'x'), SUM(a)+SUM(a), SUM(a) FROM (SELECT 1 a, 2 b UNION SELECT 2,3 UNION SELECT 5,6 ) d GROUP BY a WITH ROLLUP ORDER BY GROUPING(a),a; END -ERROR -syntax error at position 154 near 'WITH' +OUTPUT +select a, sum(a), sum(a) + 1, CONCAT(sum(a), 'x'), sum(a) + sum(a), sum(a) from (select 1 as a, 2 as b from dual union select 2, 3 from dual union select 5, 6 from dual) as d group by a with rollup order by GROUPING(a) asc, a asc END INPUT SELECT ST_ASTEXT(ST_UNION(ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())'), ST_GEOMFROMTEXT('GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(GEOMETRYCOLLECTION(GEOMETRYCOLLECTION())))'))) as geom; @@ -1004,7 +1004,7 @@ INPUT SELECT 1 FOR SHARE UNION SELECT 2; END ERROR -syntax error at position 19 near 'SHARE' +syntax error at position 25 near 'UNION' END INPUT SELECT ST_AsText(ST_Union(shore, boundary)) FROM lakes, named_places WHERE lakes.name = 'Blue Lake' AND named_places.name = 'Goose Island'; @@ -1087,8 +1087,8 @@ END INPUT SELECT a, SUM(a), SUM(a)+1, CONCAT(SUM(a),'x'), SUM(a)+SUM(a), SUM(a) FROM (SELECT 1 a, 2 b UNION SELECT 2,3 UNION SELECT 5,6 ) d GROUP BY a WITH ROLLUP ORDER BY SUM(a); END -ERROR -syntax error at position 154 near 'WITH' +OUTPUT +select a, sum(a), sum(a) + 1, CONCAT(sum(a), 'x'), sum(a) + sum(a), sum(a) from (select 1 as a, 2 as b from dual union select 2, 3 from dual union select 5, 6 from dual) as d group by a with rollup order by sum(a) asc END INPUT select 'a' union select concat('a', -0.0); diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index 2b82e619445..58f575f8642 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -44,18 +44,18 @@ type Tokenizer struct { multi bool specialComment *Tokenizer - Pos int - buf string + Pos int + buf string + parser *Parser } // NewStringTokenizer creates a new Tokenizer for the // sql string. -func NewStringTokenizer(sql string) *Tokenizer { - checkParserVersionFlag() - +func (p *Parser) NewStringTokenizer(sql string) *Tokenizer { return &Tokenizer{ buf: sql, BindVars: make(map[string]struct{}), + parser: p, } } @@ -680,9 +680,9 @@ func (tkn *Tokenizer) scanMySQLSpecificComment() (int, string) { commentVersion, sql := ExtractMysqlComment(tkn.buf[start:tkn.Pos]) - if mySQLParserVersion >= commentVersion { + if tkn.parser.version >= commentVersion { // Only add the special comment to the tokenizer if the version of MySQL is higher or equal to the comment version - tkn.specialComment = NewStringTokenizer(sql) + tkn.specialComment = tkn.parser.NewStringTokenizer(sql) } return tkn.Scan() diff --git a/go/vt/sqlparser/token_test.go b/go/vt/sqlparser/token_test.go index 0fd43b8f86c..b6848d35f06 100644 --- a/go/vt/sqlparser/token_test.go +++ b/go/vt/sqlparser/token_test.go @@ -74,9 +74,10 @@ func TestLiteralID(t *testing.T) { out: "@x @y", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - tkn := NewStringTokenizer(tcase.in) + tkn := parser.NewStringTokenizer(tcase.in) id, out := tkn.Scan() require.Equal(t, tcase.id, id) require.Equal(t, tcase.out, string(out)) @@ -148,9 +149,10 @@ func TestString(t *testing.T) { want: "hello", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - id, got := NewStringTokenizer(tcase.in).Scan() + id, got := parser.NewStringTokenizer(tcase.in).Scan() require.Equal(t, tcase.id, id, "Scan(%q) = (%s), want (%s)", tcase.in, tokenName(id), tokenName(tcase.id)) require.Equal(t, tcase.want, string(got)) }) @@ -193,9 +195,10 @@ func TestSplitStatement(t *testing.T) { sql: "", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - sql, rem, err := SplitStatement(tcase.in) + sql, rem, err := parser.SplitStatement(tcase.in) if err != nil { t.Errorf("EndOfStatementPosition(%s): ERROR: %v", tcase.in, err) return @@ -218,27 +221,28 @@ func TestVersion(t *testing.T) { in string id []int }{{ - version: "50709", + version: "5.7.9", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{FROM, IN, EXISTS, 0}, }, { - version: "80101", + version: "8.1.1", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{FROM, IN, EXISTS, 0}, }, { - version: "80201", + version: "8.2.1", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{SELECT, FROM, IN, EXISTS, 0}, }, { - version: "80102", + version: "8.1.2", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{SELECT, FROM, IN, EXISTS, 0}, }} for _, tcase := range testcases { t.Run(tcase.version+"_"+tcase.in, func(t *testing.T) { - mySQLParserVersion = tcase.version - tok := NewStringTokenizer(tcase.in) + parser, err := New(Options{MySQLServerVersion: tcase.version}) + require.NoError(t, err) + tok := parser.NewStringTokenizer(tcase.in) for _, expectedID := range tcase.id { id, _ := tok.Scan() require.Equal(t, expectedID, id) @@ -306,9 +310,10 @@ func TestIntegerAndID(t *testing.T) { out: "3.2", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - tkn := NewStringTokenizer(tcase.in) + tkn := parser.NewStringTokenizer(tcase.in) id, out := tkn.Scan() require.Equal(t, tcase.id, id) expectedOut := tcase.out diff --git a/go/vt/sqlparser/tracked_buffer.go b/go/vt/sqlparser/tracked_buffer.go index aab0c1a1331..aec206f3b3d 100644 --- a/go/vt/sqlparser/tracked_buffer.go +++ b/go/vt/sqlparser/tracked_buffer.go @@ -34,7 +34,7 @@ type NodeFormatter func(buf *TrackedBuffer, node SQLNode) // want to generate a query that's different from the default. type TrackedBuffer struct { *strings.Builder - bindLocations []bindLocation + bindLocations []BindLocation nodeFormatter NodeFormatter literal func(string) (int, error) fast bool @@ -288,9 +288,9 @@ func areBothISExpr(op Expr, val Expr) bool { // WriteArg writes a value argument into the buffer along with // tracking information for future substitutions. func (buf *TrackedBuffer) WriteArg(prefix, arg string) { - buf.bindLocations = append(buf.bindLocations, bindLocation{ - offset: buf.Len(), - length: len(prefix) + len(arg), + buf.bindLocations = append(buf.bindLocations, BindLocation{ + Offset: buf.Len(), + Length: len(prefix) + len(arg), }) buf.WriteString(prefix) buf.WriteString(arg) diff --git a/go/vt/sqlparser/tracked_buffer_test.go b/go/vt/sqlparser/tracked_buffer_test.go index 2375441b34e..4dff65634e8 100644 --- a/go/vt/sqlparser/tracked_buffer_test.go +++ b/go/vt/sqlparser/tracked_buffer_test.go @@ -278,16 +278,17 @@ func TestCanonicalOutput(t *testing.T) { }, } + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { - tree, err := Parse(tc.input) + tree, err := parser.Parse(tc.input) require.NoError(t, err, tc.input) out := CanonicalString(tree) require.Equal(t, tc.canonical, out, "bad serialization") // Make sure we've generated a valid query! - rereadStmt, err := Parse(out) + rereadStmt, err := parser.Parse(out) require.NoError(t, err, out) out = CanonicalString(rereadStmt) require.Equal(t, tc.canonical, out, "bad serialization") diff --git a/go/vt/sqlparser/truncate_query.go b/go/vt/sqlparser/truncate_query.go index 4bb63730fd2..996ceeb20cf 100644 --- a/go/vt/sqlparser/truncate_query.go +++ b/go/vt/sqlparser/truncate_query.go @@ -16,73 +16,36 @@ limitations under the License. package sqlparser -import ( - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/servenv" -) - -var ( - // truncateUILen truncate queries in debug UIs to the given length. 0 means unlimited. - truncateUILen = 512 - - // truncateErrLen truncate queries in error logs to the given length. 0 means unlimited. - truncateErrLen = 0 -) - const TruncationText = "[TRUNCATED]" -func registerQueryTruncationFlags(fs *pflag.FlagSet) { - fs.IntVar(&truncateUILen, "sql-max-length-ui", truncateUILen, "truncate queries in debug UIs to the given length (default 512)") - fs.IntVar(&truncateErrLen, "sql-max-length-errors", truncateErrLen, "truncate queries in error logs to the given length (default unlimited)") -} - -func init() { - for _, cmd := range []string{ - "vtgate", - "vttablet", - "vtcombo", - "vtctld", - "vtctl", - "vtexplain", - "vtbackup", - "vttestserver", - "vtbench", - } { - servenv.OnParseFor(cmd, registerQueryTruncationFlags) - } -} - // GetTruncateErrLen is a function used to read the value of truncateErrLen -func GetTruncateErrLen() int { - return truncateErrLen -} - -// SetTruncateErrLen is a function used to override the value of truncateErrLen -// It is only meant to be used from tests and not from production code. -func SetTruncateErrLen(errLen int) { - truncateErrLen = errLen +func (p *Parser) GetTruncateErrLen() int { + return p.truncateErrLen } -func truncateQuery(query string, max int) string { +func TruncateQuery(query string, max int) string { sql, comments := SplitMarginComments(query) - if max == 0 || len(sql) <= max { + if max == 0 || len(sql) <= max || len(sql) < len(TruncationText) { return comments.Leading + sql + comments.Trailing } + if max < len(TruncationText)+1 { + max = len(TruncationText) + 1 + } + return comments.Leading + sql[:max-(len(TruncationText)+1)] + " " + TruncationText + comments.Trailing } // TruncateForUI is used when displaying queries on various Vitess status pages // to keep the pages small enough to load and render properly -func TruncateForUI(query string) string { - return truncateQuery(query, truncateUILen) +func (p *Parser) TruncateForUI(query string) string { + return TruncateQuery(query, p.truncateUILen) } // TruncateForLog is used when displaying queries as part of error logs // to avoid overwhelming logging systems with potentially long queries and // bind value data. -func TruncateForLog(query string) string { - return truncateQuery(query, truncateErrLen) +func (p *Parser) TruncateForLog(query string) string { + return TruncateQuery(query, p.truncateErrLen) } diff --git a/go/vt/sqlparser/truncate_query_test.go b/go/vt/sqlparser/truncate_query_test.go index e5fc2fc0a9c..64d53ae7b10 100644 --- a/go/vt/sqlparser/truncate_query_test.go +++ b/go/vt/sqlparser/truncate_query_test.go @@ -13,6 +13,21 @@ func TestTruncateQuery(t *testing.T) { max int want string }{ + { + query: "select 111", + max: 2, + want: "select 111", + }, + { + query: "select 1111", + max: 2, + want: " [TRUNCATED]", + }, + { + query: "select 11111", + max: 2, + want: " [TRUNCATED]", + }, { query: "select * from test where name = 'abc'", max: 30, @@ -26,7 +41,7 @@ func TestTruncateQuery(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprintf("%s-%d", tt.query, tt.max), func(t *testing.T) { - assert.Equalf(t, tt.want, truncateQuery(tt.query, tt.max), "truncateQuery(%v, %v)", tt.query, tt.max) + assert.Equalf(t, tt.want, TruncateQuery(tt.query, tt.max), "TruncateQuery(%v, %v)", tt.query, tt.max) }) } } diff --git a/go/vt/sqlparser/utils.go b/go/vt/sqlparser/utils.go index 0f3c66f2ea3..b785128917f 100644 --- a/go/vt/sqlparser/utils.go +++ b/go/vt/sqlparser/utils.go @@ -19,24 +19,25 @@ package sqlparser import ( "fmt" "sort" + "strings" querypb "vitess.io/vitess/go/vt/proto/query" ) // QueryMatchesTemplates sees if the given query has the same fingerprint as one of the given templates // (one is enough) -func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, err error) { +func (p *Parser) QueryMatchesTemplates(query string, queryTemplates []string) (match bool, err error) { if len(queryTemplates) == 0 { return false, fmt.Errorf("No templates found") } bv := make(map[string]*querypb.BindVariable) normalize := func(q string) (string, error) { - q, err := NormalizeAlphabetically(q) + q, err := p.NormalizeAlphabetically(q) if err != nil { return "", err } - stmt, reservedVars, err := Parse2(q) + stmt, reservedVars, err := p.Parse2(q) if err != nil { return "", err } @@ -69,8 +70,8 @@ func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, e // NormalizeAlphabetically rewrites given query such that: // - WHERE 'AND' expressions are reordered alphabetically -func NormalizeAlphabetically(query string) (normalized string, err error) { - stmt, err := Parse(query) +func (p *Parser) NormalizeAlphabetically(query string) (normalized string, err error) { + stmt, err := p.Parse(query) if err != nil { return normalized, err } @@ -118,12 +119,12 @@ func NormalizeAlphabetically(query string) (normalized string, err error) { // replaces any cases of the provided database name with the // specified replacement name. // Note: both database names provided should be unescaped strings. -func ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { +func (p *Parser) ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { if newdb == olddb { // Nothing to do here. return query, nil } - in, err := Parse(query) + in, err := p.Parse(query) if err != nil { return "", err } @@ -135,14 +136,14 @@ func ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { upd := Rewrite(in, func(cursor *Cursor) bool { switch node := cursor.Node().(type) { case TableName: - if !node.Qualifier.IsEmpty() && + if node.Qualifier.NotEmpty() && node.Qualifier.String() == oldQualifier.String() { node.Qualifier = newQualifier cursor.Replace(node) modified = true } case *ShowBasic: // for things like 'show tables from _vt' - if !node.DbName.IsEmpty() && + if node.DbName.NotEmpty() && node.DbName.String() == oldQualifier.String() { node.DbName = newQualifier cursor.Replace(node) @@ -160,3 +161,22 @@ func ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { } return query, nil } + +// ReplaceTableQualifiersMultiQuery accepts a multi-query string and modifies it +// via ReplaceTableQualifiers, one query at a time. +func (p *Parser) ReplaceTableQualifiersMultiQuery(multiQuery, olddb, newdb string) (string, error) { + queries, err := p.SplitStatementToPieces(multiQuery) + if err != nil { + return multiQuery, err + } + var modifiedQueries []string + for _, query := range queries { + // Replace any provided sidecar database qualifiers with the correct one. + query, err := p.ReplaceTableQualifiers(query, olddb, newdb) + if err != nil { + return query, err + } + modifiedQueries = append(modifiedQueries, query) + } + return strings.Join(modifiedQueries, ";"), nil +} diff --git a/go/vt/sqlparser/utils_test.go b/go/vt/sqlparser/utils_test.go index 63c9b10ba43..64339211917 100644 --- a/go/vt/sqlparser/utils_test.go +++ b/go/vt/sqlparser/utils_test.go @@ -47,8 +47,9 @@ func TestNormalizeAlphabetically(t *testing.T) { out: "select * from tbl where b = 4 or a = 3", }} + parser := NewTestParser() for _, tc := range testcases { - normalized, err := NormalizeAlphabetically(tc.in) + normalized, err := parser.NormalizeAlphabetically(tc.in) assert.NoError(t, err) assert.Equal(t, tc.out, normalized) } @@ -173,9 +174,10 @@ func TestQueryMatchesTemplates(t *testing.T) { out: true, }, } + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - match, err := QueryMatchesTemplates(tc.q, tc.tmpl) + match, err := parser.QueryMatchesTemplates(tc.q, tc.tmpl) assert.NoError(t, err) assert.Equal(t, tc.out, match) }) @@ -263,9 +265,79 @@ func TestReplaceTableQualifiers(t *testing.T) { out: "set names 'binary'", }, } + parser := NewTestParser() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := ReplaceTableQualifiers(tt.in, origDB, tt.newdb) + got, err := parser.ReplaceTableQualifiers(tt.in, origDB, tt.newdb) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.out, got, "RemoveTableQualifiers(); in: %s, out: %s", tt.in, got) + }) + } +} + +func TestReplaceTableQualifiersMultiQuery(t *testing.T) { + origDB := "_vt" + tests := []struct { + name string + in string + newdb string + out string + wantErr bool + }{ + { + name: "invalid select", + in: "select frog bar person", + out: "", + wantErr: true, + }, + { + name: "simple select", + in: "select * from _vt.foo", + out: "select * from foo", + }, + { + name: "simple select with new db", + in: "select * from _vt.foo", + newdb: "_vt_test", + out: "select * from _vt_test.foo", + }, + { + name: "simple select with new db same", + in: "select * from _vt.foo where id=1", // should be unchanged + newdb: "_vt", + out: "select * from _vt.foo where id=1", + }, + { + name: "simple select with new db needing escaping", + in: "select * from _vt.foo", + newdb: "1_vt-test", + out: "select * from `1_vt-test`.foo", + }, + { + name: "multi query", + in: "select * from _vt.foo ; select * from _vt.bar", + out: "select * from foo;select * from bar", + }, + { + name: "multi query with new db", + in: "select * from _vt.foo ; select * from _vt.bar", + newdb: "_vt_test", + out: "select * from _vt_test.foo;select * from _vt_test.bar", + }, + { + name: "multi query with error", + in: "select * from _vt.foo ; select * from _vt.bar ; sel ect fr om wh at", + wantErr: true, + }, + } + parser := NewTestParser() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parser.ReplaceTableQualifiersMultiQuery(tt.in, origDB, tt.newdb) if tt.wantErr { require.Error(t, err) } else { diff --git a/go/vt/sqlparser/version_test.go b/go/vt/sqlparser/version_test.go index 718b5804aad..8b570404cee 100644 --- a/go/vt/sqlparser/version_test.go +++ b/go/vt/sqlparser/version_test.go @@ -49,7 +49,7 @@ func TestConvertMySQLVersion(t *testing.T) { for _, tcase := range testcases { t.Run(tcase.version, func(t *testing.T) { - output, err := convertMySQLVersionToCommentVersion(tcase.version) + output, err := ConvertMySQLVersionToCommentVersion(tcase.version) if tcase.error != "" { require.EqualError(t, err, tcase.error) } else { diff --git a/go/vt/sqlparser/walker_test.go b/go/vt/sqlparser/walker_test.go index 560ed2ff470..80c8a4683ce 100644 --- a/go/vt/sqlparser/walker_test.go +++ b/go/vt/sqlparser/walker_test.go @@ -18,7 +18,6 @@ package sqlparser import ( "fmt" - "math/rand" "testing" "github.com/stretchr/testify/require" @@ -27,7 +26,7 @@ import ( func BenchmarkWalkLargeExpression(b *testing.B) { for i := 0; i < 10; i++ { b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { - exp := NewGenerator(rand.New(rand.NewSource(int64(i*100))), 5).Expression(ExprGeneratorConfig{}) + exp := NewGenerator(5).Expression(ExprGeneratorConfig{}) count := 0 for i := 0; i < b.N; i++ { err := Walk(func(node SQLNode) (kontinue bool, err error) { @@ -43,7 +42,7 @@ func BenchmarkWalkLargeExpression(b *testing.B) { func BenchmarkRewriteLargeExpression(b *testing.B) { for i := 1; i < 7; i++ { b.Run(fmt.Sprintf("%d", i), func(b *testing.B) { - exp := NewGenerator(rand.New(rand.NewSource(int64(i*100))), i).Expression(ExprGeneratorConfig{}) + exp := NewGenerator(i).Expression(ExprGeneratorConfig{}) count := 0 for i := 0; i < b.N; i++ { _ = Rewrite(exp, func(_ *Cursor) bool { diff --git a/go/vt/srvtopo/discover.go b/go/vt/srvtopo/discover.go index 91aaea9daf6..2997dc42e21 100644 --- a/go/vt/srvtopo/discover.go +++ b/go/vt/srvtopo/discover.go @@ -29,20 +29,23 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -// FindAllTargets goes through all serving shards in the topology -// for the provided tablet types. It returns one Target object per -// keyspace / shard / matching TabletType. -func FindAllTargets(ctx context.Context, ts Server, cell string, tabletTypes []topodatapb.TabletType) ([]*querypb.Target, error) { - ksNames, err := ts.GetSrvKeyspaceNames(ctx, cell, true) - if err != nil { - return nil, err +// FindAllTargets goes through all serving shards in the topology for the provided keyspaces +// and tablet types. If no keyspaces are provided all available keyspaces in the topo are +// fetched. It returns one Target object per keyspace/shard/matching TabletType. +func FindAllTargets(ctx context.Context, ts Server, cell string, keyspaces []string, tabletTypes []topodatapb.TabletType) ([]*querypb.Target, error) { + var err error + if len(keyspaces) == 0 { + keyspaces, err = ts.GetSrvKeyspaceNames(ctx, cell, true) + if err != nil { + return nil, err + } } var targets []*querypb.Target var wg sync.WaitGroup var mu sync.Mutex var errRecorder concurrency.AllErrorRecorder - for _, ksName := range ksNames { + for _, ksName := range keyspaces { wg.Add(1) go func(keyspace string) { defer wg.Done() diff --git a/go/vt/srvtopo/discover_test.go b/go/vt/srvtopo/discover_test.go index ca4774a1b84..75c5f25cc6e 100644 --- a/go/vt/srvtopo/discover_test.go +++ b/go/vt/srvtopo/discover_test.go @@ -18,11 +18,13 @@ package srvtopo import ( "context" - "reflect" "sort" "testing" "time" + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/topo/memorytopo" querypb "vitess.io/vitess/go/vt/proto/query" @@ -59,19 +61,16 @@ func TestFindAllTargets(t *testing.T) { srvTopoCacheTTL = 1 * time.Second }() - rs := NewResilientServer(ctx, ts, "TestFindAllKeyspaceShards") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) // No keyspace / shards. - ks, err := FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if len(ks) > 0 { - t.Errorf("why did I get anything? %v", ks) - } + ks, err := FindAllTargets(ctx, rs, "cell1", []string{"test_keyspace"}, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}) + assert.NoError(t, err) + assert.Len(t, ks, 0) // Add one. - if err := ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace", &topodatapb.SrvKeyspace{ + assert.NoError(t, ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace", &topodatapb.SrvKeyspace{ Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ { ServedType: topodatapb.TabletType_PRIMARY, @@ -82,28 +81,34 @@ func TestFindAllTargets(t *testing.T) { }, }, }, - }); err != nil { - t.Fatalf("can't add srvKeyspace: %v", err) - } + })) // Get it. - ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(ks, []*querypb.Target{ + ks, err = FindAllTargets(ctx, rs, "cell1", []string{"test_keyspace"}, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}) + assert.NoError(t, err) + assert.EqualValues(t, []*querypb.Target{ { Cell: "cell1", Keyspace: "test_keyspace", Shard: "test_shard0", TabletType: topodatapb.TabletType_PRIMARY, }, - }) { - t.Errorf("got wrong value: %v", ks) - } + }, ks) + + // Get any keyspace. + ks, err = FindAllTargets(ctx, rs, "cell1", nil, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}) + assert.NoError(t, err) + assert.EqualValues(t, []*querypb.Target{ + { + Cell: "cell1", + Keyspace: "test_keyspace", + Shard: "test_shard0", + TabletType: topodatapb.TabletType_PRIMARY, + }, + }, ks) // Add another one. - if err := ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace2", &topodatapb.SrvKeyspace{ + assert.NoError(t, ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace2", &topodatapb.SrvKeyspace{ Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ { ServedType: topodatapb.TabletType_PRIMARY, @@ -122,17 +127,13 @@ func TestFindAllTargets(t *testing.T) { }, }, }, - }); err != nil { - t.Fatalf("can't add srvKeyspace: %v", err) - } + })) - // Get it for all types. - ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } + // Get it for any keyspace, all types. + ks, err = FindAllTargets(ctx, rs, "cell1", nil, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA}) + assert.NoError(t, err) sort.Sort(TargetArray(ks)) - if !reflect.DeepEqual(ks, []*querypb.Target{ + assert.EqualValues(t, []*querypb.Target{ { Cell: "cell1", Keyspace: "test_keyspace", @@ -151,23 +152,40 @@ func TestFindAllTargets(t *testing.T) { Shard: "test_shard2", TabletType: topodatapb.TabletType_REPLICA, }, - }) { - t.Errorf("got wrong value: %v", ks) - } + }, ks) - // Only get the REPLICA targets. - ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(ks, []*querypb.Target{ + // Only get 1 keyspace for all types. + ks, err = FindAllTargets(ctx, rs, "cell1", []string{"test_keyspace2"}, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA}) + assert.NoError(t, err) + assert.EqualValues(t, []*querypb.Target{ + { + Cell: "cell1", + Keyspace: "test_keyspace2", + Shard: "test_shard1", + TabletType: topodatapb.TabletType_PRIMARY, + }, { Cell: "cell1", Keyspace: "test_keyspace2", Shard: "test_shard2", TabletType: topodatapb.TabletType_REPLICA, }, - }) { - t.Errorf("got wrong value: %v", ks) - } + }, ks) + + // Only get the REPLICA targets for any keyspace. + ks, err = FindAllTargets(ctx, rs, "cell1", []string{}, []topodatapb.TabletType{topodatapb.TabletType_REPLICA}) + assert.NoError(t, err) + assert.Equal(t, []*querypb.Target{ + { + Cell: "cell1", + Keyspace: "test_keyspace2", + Shard: "test_shard2", + TabletType: topodatapb.TabletType_REPLICA, + }, + }, ks) + + // Get non-existent keyspace. + ks, err = FindAllTargets(ctx, rs, "cell1", []string{"doesnt-exist"}, []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA}) + assert.NoError(t, err) + assert.Len(t, ks, 0) } diff --git a/go/vt/srvtopo/resilient_server.go b/go/vt/srvtopo/resilient_server.go index d1521952ab0..78fc9134bce 100644 --- a/go/vt/srvtopo/resilient_server.go +++ b/go/vt/srvtopo/resilient_server.go @@ -70,7 +70,6 @@ const ( // - return the last known value of the data if there is an error type ResilientServer struct { topoServer *topo.Server - counts *stats.CountersWithSingleLabel *SrvKeyspaceWatcher *SrvVSchemaWatcher @@ -79,22 +78,13 @@ type ResilientServer struct { // NewResilientServer creates a new ResilientServer // based on the provided topo.Server. -func NewResilientServer(ctx context.Context, base *topo.Server, counterPrefix string) *ResilientServer { +func NewResilientServer(ctx context.Context, base *topo.Server, counts *stats.CountersWithSingleLabel) *ResilientServer { if srvTopoCacheRefresh > srvTopoCacheTTL { log.Fatalf("srv_topo_cache_refresh must be less than or equal to srv_topo_cache_ttl") } - var metric string - if counterPrefix == "" { - metric = counterPrefix + "Counts" - } else { - metric = "" - } - counts := stats.NewCountersWithSingleLabel(metric, "Resilient srvtopo server operations", "type") - return &ResilientServer{ topoServer: base, - counts: counts, SrvKeyspaceWatcher: NewSrvKeyspaceWatcher(ctx, base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), SrvVSchemaWatcher: NewSrvVSchemaWatcher(ctx, base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), SrvKeyspaceNamesQuery: NewSrvKeyspaceNamesQuery(base, counts, srvTopoCacheRefresh, srvTopoCacheTTL), diff --git a/go/vt/srvtopo/resilient_server_test.go b/go/vt/srvtopo/resilient_server_test.go index c237d43f300..dbcf48ce176 100644 --- a/go/vt/srvtopo/resilient_server_test.go +++ b/go/vt/srvtopo/resilient_server_test.go @@ -28,6 +28,7 @@ import ( "github.com/google/safehtml/template" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/key" "github.com/stretchr/testify/assert" @@ -53,7 +54,8 @@ func TestGetSrvKeyspace(t *testing.T) { srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspace") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) // Ask for a not-yet-created keyspace _, err := rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") @@ -175,7 +177,7 @@ func TestGetSrvKeyspace(t *testing.T) { // Now simulate a topo service error and see that the last value is // cached for at least half of the expected ttl. errorTestStart := time.Now() - errorReqsBefore := rs.counts.Counts()[errorCategory] + errorReqsBefore := counts.Counts()[errorCategory] forceErr := topo.NewError(topo.Timeout, "test topo error") factory.SetError(forceErr) @@ -271,7 +273,7 @@ func TestGetSrvKeyspace(t *testing.T) { // Check that the expected number of errors were counted during the // interval - errorReqs := rs.counts.Counts()[errorCategory] + errorReqs := counts.Counts()[errorCategory] expectedErrors := int64(time.Since(errorTestStart) / srvTopoCacheRefresh) if errorReqs-errorReqsBefore > expectedErrors { t.Errorf("expected <= %v error requests got %d", expectedErrors, errorReqs-errorReqsBefore) @@ -370,7 +372,8 @@ func TestSrvKeyspaceCachedError(t *testing.T) { srvTopoCacheTTL = 1 * time.Second srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ctx, ts, "TestSrvKeyspaceCachedErrors") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) // Ask for an unknown keyspace, should get an error. _, err := rs.GetSrvKeyspace(ctx, "test_cell", "unknown_ks") @@ -401,7 +404,8 @@ func TestGetSrvKeyspaceCreated(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "test_cell") defer ts.Close() - rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceCreated") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) // Set SrvKeyspace with value. want := &topodatapb.SrvKeyspace{} @@ -435,7 +439,8 @@ func TestWatchSrvVSchema(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "test_cell") - rs := NewResilientServer(ctx, ts, "TestWatchSrvVSchema") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) // mu protects watchValue and watchErr. mu := sync.Mutex{} @@ -529,7 +534,8 @@ func TestGetSrvKeyspaceNames(t *testing.T) { srvTopoCacheTTL = 1 * time.Second srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceNames") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) // Set SrvKeyspace with value want := &topodatapb.SrvKeyspace{} @@ -614,7 +620,7 @@ func TestGetSrvKeyspaceNames(t *testing.T) { // Check that we only checked the topo service 1 or 2 times during the // period where we got the cached error. - cachedReqs, ok := rs.counts.Counts()[cachedCategory] + cachedReqs, ok := counts.Counts()[cachedCategory] if !ok || cachedReqs > 2 { t.Errorf("expected <= 2 cached requests got %v", cachedReqs) } @@ -640,7 +646,7 @@ func TestGetSrvKeyspaceNames(t *testing.T) { t.Errorf("GetSrvKeyspaceNames got %v want %v", names, wantNames) } - errorReqs, ok := rs.counts.Counts()[errorCategory] + errorReqs, ok := counts.Counts()[errorCategory] if !ok || errorReqs == 0 { t.Errorf("expected non-zero error requests got %v", errorReqs) } @@ -684,8 +690,8 @@ func TestSrvKeyspaceWatcher(t *testing.T) { srvTopoCacheTTL = 1 * time.Second srvTopoCacheRefresh = 1 * time.Second }() - - rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceWatcher") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) var wmu sync.Mutex var wseen []watched @@ -811,7 +817,8 @@ func TestSrvKeyspaceListener(t *testing.T) { srvTopoCacheRefresh = 1 * time.Second }() - rs := NewResilientServer(ctx, ts, "TestGetSrvKeyspaceWatcher") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) cancelCtx, cancelFunc := context.WithCancel(context.Background()) var callbackCount atomic.Int32 diff --git a/go/vt/srvtopo/resolver.go b/go/vt/srvtopo/resolver.go index 98d77e259ef..0ccfb0fd872 100644 --- a/go/vt/srvtopo/resolver.go +++ b/go/vt/srvtopo/resolver.go @@ -41,7 +41,7 @@ type Gateway interface { queryservice.QueryService // QueryServiceByAlias returns a QueryService - QueryServiceByAlias(alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) + QueryServiceByAlias(ctx context.Context, alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) // GetServingKeyspaces returns list of serving keyspaces. GetServingKeyspaces() []string @@ -83,24 +83,6 @@ type ResolvedShard struct { Gateway Gateway } -// ResolvedShardEqual is an equality check on *ResolvedShard. -func ResolvedShardEqual(rs1, rs2 *ResolvedShard) bool { - return proto.Equal(rs1.Target, rs2.Target) -} - -// ResolvedShardsEqual is an equality check on []*ResolvedShard. -func ResolvedShardsEqual(rss1, rss2 []*ResolvedShard) bool { - if len(rss1) != len(rss2) { - return false - } - for i, rs1 := range rss1 { - if !ResolvedShardEqual(rs1, rss2[i]) { - return false - } - } - return true -} - // WithKeyspace returns a ResolvedShard with a new keyspace keeping other parameters the same func (rs *ResolvedShard) WithKeyspace(newKeyspace string) *ResolvedShard { return &ResolvedShard{ @@ -114,8 +96,7 @@ func (rs *ResolvedShard) WithKeyspace(newKeyspace string) *ResolvedShard { } } -// GetKeyspaceShards return all the shards in a keyspace. It follows -// redirection if ServedFrom is set. It is only valid for the local cell. +// GetKeyspaceShards return all the shards in a keyspace. It is only valid for the local cell. // Do not use it to further resolve shards, instead use the Resolve* methods. func (r *Resolver) GetKeyspaceShards(ctx context.Context, keyspace string, tabletType topodatapb.TabletType) (string, *topodatapb.SrvKeyspace, []*topodatapb.ShardReference, error) { srvKeyspace, err := r.topoServ.GetSrvKeyspace(ctx, r.localCell, keyspace) @@ -123,17 +104,6 @@ func (r *Resolver) GetKeyspaceShards(ctx context.Context, keyspace string, table return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "keyspace %v fetch error: %v", keyspace, err) } - // check if the keyspace has been redirected for this tabletType. - for _, sf := range srvKeyspace.ServedFrom { - if sf.TabletType == tabletType { - keyspace = sf.Keyspace - srvKeyspace, err = r.topoServ.GetSrvKeyspace(ctx, r.localCell, keyspace) - if err != nil { - return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "keyspace %v fetch error: %v", keyspace, err) - } - } - } - partition := topoproto.SrvKeyspaceGetPartition(srvKeyspace, tabletType) if partition == nil { return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "No partition found for tabletType %v in keyspace %v", topoproto.TabletTypeLString(tabletType), keyspace) diff --git a/go/vt/srvtopo/resolver_test.go b/go/vt/srvtopo/resolver_test.go index 95e6dbe620c..fae8bef1fb2 100644 --- a/go/vt/srvtopo/resolver_test.go +++ b/go/vt/srvtopo/resolver_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -33,10 +34,11 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -func initResolver(t *testing.T, ctx context.Context, name string) *Resolver { +func initResolver(t *testing.T, ctx context.Context) *Resolver { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - rs := NewResilientServer(ctx, ts, name) + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + rs := NewResilientServer(ctx, ts, counts) // Create sharded keyspace and shards. if err := ts.CreateKeyspace(ctx, "sks", &topodatapb.Keyspace{}); err != nil { @@ -97,7 +99,7 @@ func initResolver(t *testing.T, ctx context.Context, name string) *Resolver { func TestResolveDestinations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - resolver := initResolver(t, ctx, "TestResolveDestinations") + resolver := initResolver(t, ctx) id1 := &querypb.Value{ Type: sqltypes.VarChar, diff --git a/go/vt/srvtopo/status.go b/go/vt/srvtopo/status.go index b3069be6c38..9a5e8684b7f 100644 --- a/go/vt/srvtopo/status.go +++ b/go/vt/srvtopo/status.go @@ -133,12 +133,6 @@ var partitions = template.Must(template.New("partitions").Parse(` {{ end }}
{{ end }} -{{if .ServedFrom }} -ServedFrom:
-{{ range .ServedFrom }} - {{ .TabletType }}: {{ .Keyspace}}
-{{ end }} -{{ end }} `)) // StatusAsHTML returns an HTML version of our status. diff --git a/go/vt/srvtopo/watch.go b/go/vt/srvtopo/watch.go index 36d8fd428bd..4a0ccda2d59 100644 --- a/go/vt/srvtopo/watch.go +++ b/go/vt/srvtopo/watch.go @@ -23,6 +23,7 @@ import ( "time" "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" ) @@ -204,8 +205,11 @@ func (entry *watchEntry) onErrorLocked(ctx context.Context, err error, init bool entry.value = nil } } else { - entry.lastError = fmt.Errorf("ResilientWatch stream failed for %v: %w", entry.key, err) - log.Errorf("%v", entry.lastError) + if !topo.IsErrType(err, topo.Interrupted) { + // No need to log if we're explicitly interrupted. + entry.lastError = fmt.Errorf("ResilientWatch stream failed for %v: %w", entry.key, err) + log.Errorf("%v", entry.lastError) + } // Even though we didn't get a new value, update the lastValueTime // here since the watch was successfully running before and we want @@ -224,8 +228,7 @@ func (entry *watchEntry) onErrorLocked(ctx context.Context, err error, init bool if len(entry.listeners) > 0 && !topo.IsErrType(err, topo.Interrupted) { go func() { - time.Sleep(entry.rw.cacheRefreshInterval) - + _ = timer.SleepContext(ctx, entry.rw.cacheRefreshInterval) entry.mutex.Lock() entry.ensureWatchingLocked(ctx) entry.mutex.Unlock() diff --git a/go/vt/srvtopo/watch_srvkeyspace.go b/go/vt/srvtopo/watch_srvkeyspace.go index cefe95c6951..ac2d8c0bac1 100644 --- a/go/vt/srvtopo/watch_srvkeyspace.go +++ b/go/vt/srvtopo/watch_srvkeyspace.go @@ -40,7 +40,7 @@ func (k *srvKeyspaceKey) String() string { func NewSrvKeyspaceWatcher(ctx context.Context, topoServer *topo.Server, counts *stats.CountersWithSingleLabel, cacheRefresh, cacheTTL time.Duration) *SrvKeyspaceWatcher { watch := func(entry *watchEntry) { key := entry.key.(*srvKeyspaceKey) - requestCtx, requestCancel := context.WithCancel(context.Background()) + requestCtx, requestCancel := context.WithCancel(ctx) defer requestCancel() current, changes, err := topoServer.WatchSrvKeyspace(requestCtx, key.cell, key.keyspace) diff --git a/go/vt/srvtopo/watch_srvvschema.go b/go/vt/srvtopo/watch_srvvschema.go index 1b5536e623d..c758211375d 100644 --- a/go/vt/srvtopo/watch_srvvschema.go +++ b/go/vt/srvtopo/watch_srvvschema.go @@ -21,8 +21,9 @@ import ( "time" "vitess.io/vitess/go/stats" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/topo" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) type SrvVSchemaWatcher struct { diff --git a/go/vt/sysvars/sysvars.go b/go/vt/sysvars/sysvars.go index 98da8ff07b7..c8037563ca1 100644 --- a/go/vt/sysvars/sysvars.go +++ b/go/vt/sysvars/sysvars.go @@ -57,6 +57,8 @@ var ( off = "0" utf8mb4 = "'utf8mb4'" + ForeignKeyChecks = "foreign_key_checks" + Autocommit = SystemVariable{Name: "autocommit", IsBoolean: true, Default: on} Charset = SystemVariable{Name: "charset", Default: utf8mb4, IdentifierAsString: true} ClientFoundRows = SystemVariable{Name: "client_found_rows", IsBoolean: true, Default: off} @@ -186,7 +188,7 @@ var ( {Name: "end_markers_in_json", IsBoolean: true, SupportSetVar: true}, {Name: "eq_range_index_dive_limit", SupportSetVar: true}, {Name: "explicit_defaults_for_timestamp"}, - {Name: "foreign_key_checks", IsBoolean: true, SupportSetVar: true}, + {Name: ForeignKeyChecks, IsBoolean: true, SupportSetVar: true}, {Name: "group_concat_max_len", SupportSetVar: true}, {Name: "information_schema_stats_expiry"}, {Name: "max_heap_table_size", SupportSetVar: true}, diff --git a/go/vt/tableacl/testlib/testlib.go b/go/vt/tableacl/testlib/testlib.go index bdde9ae800f..1ebe8b7d564 100644 --- a/go/vt/tableacl/testlib/testlib.go +++ b/go/vt/tableacl/testlib/testlib.go @@ -19,7 +19,7 @@ package testlib import ( "errors" "fmt" - "math/rand" + "math/rand/v2" "testing" querypb "vitess.io/vitess/go/vt/proto/query" @@ -30,7 +30,7 @@ import ( // TestSuite tests a concrete acl.Factory implementation. func TestSuite(t *testing.T, factory acl.Factory) { - name := fmt.Sprintf("tableacl-test-%d", rand.Int63()) + name := fmt.Sprintf("tableacl-test-%d", rand.Int64()) tableacl.Register(name, factory) tableacl.SetDefaultACL(name) diff --git a/go/vt/throttler/aggregated_interval_history_test.go b/go/vt/throttler/aggregated_interval_history_test.go index 6a77d57af07..f9348c10920 100644 --- a/go/vt/throttler/aggregated_interval_history_test.go +++ b/go/vt/throttler/aggregated_interval_history_test.go @@ -19,6 +19,8 @@ package throttler import ( "testing" "time" + + "github.com/stretchr/testify/assert" ) func TestAggregatedIntervalHistory(t *testing.T) { @@ -26,7 +28,6 @@ func TestAggregatedIntervalHistory(t *testing.T) { h.addPerThread(0, record{sinceZero(0 * time.Second), 1000}) h.addPerThread(1, record{sinceZero(0 * time.Second), 2000}) - if got, want := h.average(sinceZero(250*time.Millisecond), sinceZero(750*time.Millisecond)), 3000.0; got != want { - t.Errorf("average(0.25s, 0.75s) across both threads = %v, want = %v", got, want) - } + got := h.average(sinceZero(250*time.Millisecond), sinceZero(750*time.Millisecond)) + assert.Equal(t, 3000.0, got) } diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go index 126b9098236..cd2a4e11307 100644 --- a/go/vt/throttler/demo/throttler_demo.go +++ b/go/vt/throttler/demo/throttler_demo.go @@ -18,7 +18,7 @@ package main import ( "context" - "math/rand" + "math/rand/v2" "net/http" "sync" "testing" @@ -26,6 +26,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -114,9 +116,9 @@ type replica struct { wg sync.WaitGroup } -func newReplica(lagUpdateInterval, degrationInterval, degrationDuration time.Duration, ts *topo.Server) *replica { +func newReplica(env *vtenv.Environment, lagUpdateInterval, degrationInterval, degrationDuration time.Duration, ts *topo.Server) *replica { t := &testing.T{} - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(env, logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) fakeTablet := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, nil, testlib.TabletKeyspaceShard(t, "ks", "-80")) fakeTablet.StartActionLoop(t, wr) @@ -187,7 +189,7 @@ func (r *replica) processReplicationStream() { actualRate = 0 } if !r.nextDegration.IsZero() && time.Now().After(r.nextDegration) && r.currentDegrationEnd.IsZero() { - degradedRate := rand.Int63n(rate) + degradedRate := rand.Int64N(rate) log.Infof("degrading the replica for %.f seconds from %v TPS to %v", r.degrationDuration.Seconds(), rate, degradedRate) r.throttler.SetMaxRate(degradedRate) r.currentDegrationEnd = time.Now().Add(r.degrationDuration) @@ -308,7 +310,15 @@ func main() { log.Infof("start rate set to: %v", rate) ts := memorytopo.NewServer(context.Background(), "cell1") - replica := newReplica(lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts) + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatal(err) + } + replica := newReplica(env, lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts) primary := &primary{replica: replica} client := newClient(context.Background(), primary, replica, ts) client.run() diff --git a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go b/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go deleted file mode 100644 index 1518d7ea8d8..00000000000 --- a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package grpcthrottlerclient contains the gRPC version of the throttler client protocol. -package grpcthrottlerclient - -import ( - "flag" - - "context" - - "google.golang.org/grpc" - - "vitess.io/vitess/go/vt/grpcclient" - "vitess.io/vitess/go/vt/throttler/throttlerclient" - "vitess.io/vitess/go/vt/vterrors" - - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" - throttlerservicepb "vitess.io/vitess/go/vt/proto/throttlerservice" -) - -var ( - cert = flag.String("throttler_client_grpc_cert", "", "the cert to use to connect") - key = flag.String("throttler_client_grpc_key", "", "the key to use to connect") - ca = flag.String("throttler_client_grpc_ca", "", "the server ca to use to validate servers when connecting") - crl = flag.String("throttler_client_grpc_crl", "", "the server crl to use to validate server certificates when connecting") - name = flag.String("throttler_client_grpc_server_name", "", "the server name to use to validate server certificate") -) - -type client struct { - conn *grpc.ClientConn - gRPCClient throttlerservicepb.ThrottlerClient -} - -func factory(addr string) (throttlerclient.Client, error) { - opt, err := grpcclient.SecureDialOption(*cert, *key, *ca, *crl, *name) - if err != nil { - return nil, err - } - conn, err := grpcclient.Dial(addr, grpcclient.FailFast(false), opt) - if err != nil { - return nil, err - } - gRPCClient := throttlerservicepb.NewThrottlerClient(conn) - - return &client{conn, gRPCClient}, nil -} - -// MaxRates is part of the throttlerclient.Client interface and returns the -// current max rate for each throttler of the process. -func (c *client) MaxRates(ctx context.Context) (map[string]int64, error) { - response, err := c.gRPCClient.MaxRates(ctx, &throttlerdatapb.MaxRatesRequest{}) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Rates, nil -} - -// SetMaxRate is part of the throttlerclient.Client interface and sets the rate -// on all throttlers of the server. -func (c *client) SetMaxRate(ctx context.Context, rate int64) ([]string, error) { - request := &throttlerdatapb.SetMaxRateRequest{ - Rate: rate, - } - - response, err := c.gRPCClient.SetMaxRate(ctx, request) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Names, nil -} - -// GetConfiguration is part of the throttlerclient.Client interface. -func (c *client) GetConfiguration(ctx context.Context, throttlerName string) (map[string]*throttlerdatapb.Configuration, error) { - response, err := c.gRPCClient.GetConfiguration(ctx, &throttlerdatapb.GetConfigurationRequest{ - ThrottlerName: throttlerName, - }) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Configurations, nil -} - -// UpdateConfiguration is part of the throttlerclient.Client interface. -func (c *client) UpdateConfiguration(ctx context.Context, throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) { - response, err := c.gRPCClient.UpdateConfiguration(ctx, &throttlerdatapb.UpdateConfigurationRequest{ - ThrottlerName: throttlerName, - Configuration: configuration, - CopyZeroValues: copyZeroValues, - }) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Names, nil -} - -// ResetConfiguration is part of the throttlerclient.Client interface. -func (c *client) ResetConfiguration(ctx context.Context, throttlerName string) ([]string, error) { - response, err := c.gRPCClient.ResetConfiguration(ctx, &throttlerdatapb.ResetConfigurationRequest{ - ThrottlerName: throttlerName, - }) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Names, nil -} - -// Close is part of the throttlerclient.Client interface. -func (c *client) Close() { - c.conn.Close() -} - -func init() { - throttlerclient.RegisterFactory("grpc", factory) -} diff --git a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient_test.go b/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient_test.go deleted file mode 100644 index d3ae3c40a33..00000000000 --- a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient_test.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package grpcthrottlerclient - -import ( - "fmt" - "net" - "testing" - - "google.golang.org/grpc" - - "vitess.io/vitess/go/vt/throttler" - "vitess.io/vitess/go/vt/throttler/grpcthrottlerserver" - "vitess.io/vitess/go/vt/throttler/throttlerclienttest" -) - -// TestThrottlerServer tests the gRPC implementation using a throttler client -// and server. -func TestThrottlerServer(t *testing.T) { - // Use the global manager which is a singleton. - port := startGRPCServer(t, throttler.GlobalManager) - - // Create a ThrottlerClient gRPC client to talk to the throttler. - client, err := factory(fmt.Sprintf("localhost:%v", port)) - if err != nil { - t.Fatalf("Cannot create client: %v", err) - } - defer client.Close() - - throttlerclienttest.TestSuite(t, client) -} - -// TestThrottlerServerPanics tests the panic handling of the gRPC throttler -// server implementation. -func TestThrottlerServerPanics(t *testing.T) { - // For testing the panic handling, use a fake Manager instead. - port := startGRPCServer(t, &throttlerclienttest.FakeManager{}) - - // Create a ThrottlerClient gRPC client to talk to the throttler. - client, err := factory(fmt.Sprintf("localhost:%v", port)) - if err != nil { - t.Fatalf("Cannot create client: %v", err) - } - defer client.Close() - - throttlerclienttest.TestSuitePanics(t, client) -} - -func startGRPCServer(t *testing.T, m throttler.Manager) int { - // Listen on a random port. - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Cannot listen: %v", err) - } - - s := grpc.NewServer() - grpcthrottlerserver.RegisterServer(s, m) - // Call Serve() after our service has been registered. Otherwise, the test - // will fail with the error "grpc: Server.RegisterService after Server.Serve". - go s.Serve(listener) - return listener.Addr().(*net.TCPAddr).Port -} diff --git a/go/vt/throttler/interval_history_test.go b/go/vt/throttler/interval_history_test.go index 7bad56e41c1..ec30b1c23c9 100644 --- a/go/vt/throttler/interval_history_test.go +++ b/go/vt/throttler/interval_history_test.go @@ -17,9 +17,11 @@ limitations under the License. package throttler import ( - "strings" "testing" "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIntervalHistory_AverageIncludesPartialIntervals(t *testing.T) { @@ -33,9 +35,8 @@ func TestIntervalHistory_AverageIncludesPartialIntervals(t *testing.T) { h.add(record{sinceZero(3 * time.Second), 10000000}) // Rate within [1s, 2s) = 1000 and within [2s, 3s) = 2000 = average of 1500 want := 1500.0 - if got := h.average(sinceZero(1500*time.Millisecond), sinceZero(2500*time.Millisecond)); got != want { - t.Errorf("average(1.5s, 2.5s) = %v, want = %v", got, want) - } + got := h.average(sinceZero(1500*time.Millisecond), sinceZero(2500*time.Millisecond)) + assert.Equal(t, want, got) } func TestIntervalHistory_AverageRangeSmallerThanInterval(t *testing.T) { @@ -43,9 +44,8 @@ func TestIntervalHistory_AverageRangeSmallerThanInterval(t *testing.T) { h.add(record{sinceZero(0 * time.Second), 10000}) want := 10000.0 - if got := h.average(sinceZero(250*time.Millisecond), sinceZero(750*time.Millisecond)); got != want { - t.Errorf("average(0.25s, 0.75s) = %v, want = %v", got, want) - } + got := h.average(sinceZero(250*time.Millisecond), sinceZero(750*time.Millisecond)) + assert.Equal(t, want, got) } func TestIntervalHistory_GapsCountedAsZero(t *testing.T) { @@ -55,22 +55,17 @@ func TestIntervalHistory_GapsCountedAsZero(t *testing.T) { h.add(record{sinceZero(3 * time.Second), 1000}) want := 500.0 - if got := h.average(sinceZero(0*time.Second), sinceZero(4*time.Second)); got != want { - t.Errorf("average(0s, 4s) = %v, want = %v", got, want) - } + got := h.average(sinceZero(0*time.Second), sinceZero(4*time.Second)) + assert.Equal(t, want, got) } func TestIntervalHistory_AddNoDuplicateInterval(t *testing.T) { defer func() { r := recover() + require.NotNil(t, r, "add() did not panic") - if r == nil { - t.Fatal("add() did not panic") - } want := "BUG: cannot add record because it is already covered by a previous entry" - if !strings.Contains(r.(string), want) { - t.Fatalf("add() did panic for the wrong reason: got = %v, want = %v", r, want) - } + require.Contains(t, r, want, "add() did panic for the wrong reason") }() h := newIntervalHistory(10, 1*time.Second) @@ -82,14 +77,10 @@ func TestIntervalHistory_AddNoDuplicateInterval(t *testing.T) { func TestIntervalHistory_RecordDoesNotStartAtInterval(t *testing.T) { defer func() { r := recover() + require.NotNil(t, r, "add() did not panic") - if r == nil { - t.Fatal("add() did not panic") - } want := "BUG: cannot add record because it does not start at the beginning of the interval" - if !strings.Contains(r.(string), want) { - t.Fatalf("add() did panic for the wrong reason: got = %v, want = %v", r, want) - } + require.Contains(t, r, want, "add() did panic for the wrong reason") }() h := newIntervalHistory(1, 1*time.Second) diff --git a/go/vt/throttler/manager_test.go b/go/vt/throttler/manager_test.go index e6c3359b242..3d61d4d6b68 100644 --- a/go/vt/throttler/manager_test.go +++ b/go/vt/throttler/manager_test.go @@ -20,10 +20,12 @@ import ( "fmt" "reflect" "sort" - "strings" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" ) @@ -60,12 +62,11 @@ func (f *managerTestFixture) tearDown() { func TestManager_Registration(t *testing.T) { m := newManager() t1, err := newThrottler(m, "t1", "TPS", 1 /* threadCount */, MaxRateModuleDisabled, ReplicationLagModuleDisabled, time.Now) - if err != nil { - t.Fatal(err) - } - if err := m.registerThrottler("t1", t1); err == nil { - t.Fatalf("manager should not accept a duplicate registration of a throttler: %v", err) - } + require.NoError(t, err) + + err = m.registerThrottler("t1", t1) + require.Error(t, err, "manager should not accept a duplicate registration of a throttler") + t1.Close() // Unregistering an unregistered throttler should log an error. @@ -81,18 +82,16 @@ func TestManager_SetMaxRate(t *testing.T) { // Test SetMaxRate(). want := []string{"t1", "t2"} - if got := f.m.SetMaxRate(23); !reflect.DeepEqual(got, want) { - t.Errorf("manager did not set the rate on all throttlers. got = %v, want = %v", got, want) - } + got := f.m.SetMaxRate(23) + assert.Equal(t, want, got, "manager did not set the rate on all throttlers") // Test MaxRates(). wantRates := map[string]int64{ "t1": 23, "t2": 23, } - if gotRates := f.m.MaxRates(); !reflect.DeepEqual(gotRates, wantRates) { - t.Errorf("manager did not set the rate on all throttlers. got = %v, want = %v", gotRates, wantRates) - } + gotRates := f.m.MaxRates() + assert.Equal(t, wantRates, gotRates, "manager did not set the rate on all throttlers") } func TestManager_GetConfiguration(t *testing.T) { @@ -108,24 +107,16 @@ func TestManager_GetConfiguration(t *testing.T) { "t2": defaultMaxReplicationLagModuleConfig.Clone().Configuration, } got, err := f.m.GetConfiguration("" /* all */) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, want) { - t.Errorf("manager did not return the correct initial config for all throttlers. got = %v, want = %v", got, want) - } + require.NoError(t, err) + assert.Equal(t, want, got, "manager did not return the correct initial config for all throttlers") // Test GetConfiguration() when a specific throttler is requested. wantT2 := map[string]*throttlerdatapb.Configuration{ "t2": defaultMaxReplicationLagModuleConfig.Clone().Configuration, } gotT2, err := f.m.GetConfiguration("t2") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotT2, wantT2) { - t.Errorf("manager did not return the correct initial config for throttler: %v got = %v, want = %v", "t2", gotT2, wantT2) - } + require.NoError(t, err) + assert.Equal(t, wantT2, gotT2, "manager did not return the correct initial config for throttler: t2") // Now change the config and then reset it back. newConfig := &throttlerdatapb.Configuration{ @@ -133,42 +124,35 @@ func TestManager_GetConfiguration(t *testing.T) { IgnoreNSlowestReplicas: defaultIgnoreNSlowestReplicas + 1, } allNames, err := f.m.UpdateConfiguration("", newConfig, false /* copyZeroValues */) - if err != nil { - t.Fatal(err) - } - // Verify it was changed. - if err := checkConfig(f.m, []string{"t1", "t2"}, allNames, defaultTargetLag+1, defaultIgnoreNSlowestReplicas+1); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + + err = checkConfig(f.m, []string{"t1", "t2"}, allNames, defaultTargetLag+1, defaultIgnoreNSlowestReplicas+1) + require.NoError(t, err) + // Reset only "t2". - if names, err := f.m.ResetConfiguration("t2"); err != nil || !reflect.DeepEqual(names, []string{"t2"}) { - t.Fatalf("Reset failed or returned wrong throttler names: %v err: %v", names, err) - } + names, err := f.m.ResetConfiguration("t2") + require.NoError(t, err) + assert.Equal(t, []string{"t2"}, names, "Reset failed or returned wrong throttler names") + gotT2AfterReset, err := f.m.GetConfiguration("t2") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotT2AfterReset, wantT2) { - t.Errorf("manager did not return the correct initial config for throttler %v after reset: got = %v, want = %v", "t2", gotT2AfterReset, wantT2) - } + require.NoError(t, err) + assert.Equal(t, wantT2, gotT2AfterReset, "manager did not return the correct initial config for throttler t2 after reset") + // Reset all throttlers. - if names, err := f.m.ResetConfiguration(""); err != nil || !reflect.DeepEqual(names, []string{"t1", "t2"}) { - t.Fatalf("Reset failed or returned wrong throttler names: %v err: %v", names, err) - } + + names, err = f.m.ResetConfiguration("") + require.NoError(t, err) + assert.Equal(t, []string{"t1", "t2"}, names, "Reset failed or returned wrong throttler names") + gotAfterReset, err := f.m.GetConfiguration("") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotAfterReset, want) { - t.Errorf("manager did not return the correct initial config for all throttlers after reset. got = %v, want = %v", got, want) - } + require.NoError(t, err) + assert.Equal(t, want, gotAfterReset, "manager did not return the correct initial config for all throttlers after reset") } func TestManager_UpdateConfiguration_Error(t *testing.T) { f := &managerTestFixture{} - if err := f.setUp(); err != nil { - t.Fatal(err) - } + err := f.setUp() + require.NoError(t, err) defer f.tearDown() // Check that errors from Verify() are correctly propagated. @@ -176,21 +160,15 @@ func TestManager_UpdateConfiguration_Error(t *testing.T) { // max < 2 is not allowed. MaxReplicationLagSec: 1, } - if _, err := f.m.UpdateConfiguration("t2", invalidConfig, false /* copyZeroValues */); err == nil { - t.Fatal("expected error but got nil") - } else { - want := "max_replication_lag_sec must be >= 2" - if !strings.Contains(err.Error(), want) { - t.Fatalf("received wrong error. got = %v, want contains = %v", err, want) - } - } + _, err = f.m.UpdateConfiguration("t2", invalidConfig, false /* copyZeroValues */) + wantErr := "max_replication_lag_sec must be >= 2" + require.ErrorContains(t, err, wantErr) } func TestManager_UpdateConfiguration_Partial(t *testing.T) { f := &managerTestFixture{} - if err := f.setUp(); err != nil { - t.Fatal(err) - } + err := f.setUp() + require.NoError(t, err) defer f.tearDown() // Verify that a partial update only updates that one field. @@ -199,47 +177,40 @@ func TestManager_UpdateConfiguration_Partial(t *testing.T) { IgnoreNSlowestReplicas: wantIgnoreNSlowestReplicas, } names, err := f.m.UpdateConfiguration("t2", partialConfig, false /* copyZeroValues */) - if err != nil { - t.Fatal(err) - } - if err := checkConfig(f.m, []string{"t2"}, names, defaultTargetLag, wantIgnoreNSlowestReplicas); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + + err = checkConfig(f.m, []string{"t2"}, names, defaultTargetLag, wantIgnoreNSlowestReplicas) + require.NoError(t, err) + // Repeat test for all throttlers. allNames, err := f.m.UpdateConfiguration("" /* all */, partialConfig, false /* copyZeroValues */) - if err != nil { - t.Fatal(err) - } - if err := checkConfig(f.m, []string{"t1", "t2"}, allNames, defaultTargetLag, wantIgnoreNSlowestReplicas); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + + err = checkConfig(f.m, []string{"t1", "t2"}, allNames, defaultTargetLag, wantIgnoreNSlowestReplicas) + require.NoError(t, err) } func TestManager_UpdateConfiguration_ZeroValues(t *testing.T) { f := &managerTestFixture{} - if err := f.setUp(); err != nil { - t.Fatal(err) - } + err := f.setUp() + require.NoError(t, err) defer f.tearDown() // Test the explicit copy of zero values. zeroValueConfig := defaultMaxReplicationLagModuleConfig.Configuration.CloneVT() zeroValueConfig.IgnoreNSlowestReplicas = 0 names, err := f.m.UpdateConfiguration("t2", zeroValueConfig, true /* copyZeroValues */) - if err != nil { - t.Fatal(err) - } - if err := checkConfig(f.m, []string{"t2"}, names, defaultTargetLag, 0); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + + err = checkConfig(f.m, []string{"t2"}, names, defaultTargetLag, 0) + require.NoError(t, err) + // Repeat test for all throttlers. allNames, err := f.m.UpdateConfiguration("" /* all */, zeroValueConfig, true /* copyZeroValues */) - if err != nil { - t.Fatal(err) - } - if err := checkConfig(f.m, []string{"t1", "t2"}, allNames, defaultTargetLag, 0); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + + err = checkConfig(f.m, []string{"t1", "t2"}, allNames, defaultTargetLag, 0) + require.NoError(t, err) } func checkConfig(m *managerImpl, throttlers []string, updatedThrottlers []string, targetLag int64, ignoreNSlowestReplicas int32) error { diff --git a/go/vt/throttler/max_replication_lag_module.go b/go/vt/throttler/max_replication_lag_module.go index f08c9211205..ac184fe7be8 100644 --- a/go/vt/throttler/max_replication_lag_module.go +++ b/go/vt/throttler/max_replication_lag_module.go @@ -382,7 +382,6 @@ logResult: r.Reason += clearReason } - log.Infof("%v", r) m.results.add(r) } diff --git a/go/vt/throttler/max_replication_lag_module_test.go b/go/vt/throttler/max_replication_lag_module_test.go index 6379b067412..77be6501e4c 100644 --- a/go/vt/throttler/max_replication_lag_module_test.go +++ b/go/vt/throttler/max_replication_lag_module_test.go @@ -23,10 +23,10 @@ import ( "time" "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/log" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -51,32 +51,33 @@ const ( ) type testFixture struct { + tb testing.TB m *MaxReplicationLagModule ratesHistory *fakeRatesHistory } -func newTestFixtureWithMaxReplicationLag(maxReplicationLag int64) (*testFixture, error) { +func newTestFixtureWithMaxReplicationLag(tb testing.TB, maxReplicationLag int64) *testFixture { config := NewMaxReplicationLagModuleConfig(maxReplicationLag) - return newTestFixture(config) + return newTestFixture(tb, config) } -func newTestFixture(config MaxReplicationLagModuleConfig) (*testFixture, error) { +func newTestFixture(tb testing.TB, config MaxReplicationLagModuleConfig) *testFixture { ratesHistory := newFakeRatesHistory() fc := &fakeClock{} // Do not start at 0*time.Second because than the code cannot distinguish // between a legimate value and a zero time.Time value. fc.setNow(1 * time.Second) m, err := NewMaxReplicationLagModule(config, ratesHistory.aggregatedIntervalHistory, fc.now) - if err != nil { - return nil, err - } + require.NoError(tb, err) + // Updates for the throttler go into a big channel and will be ignored. m.rateUpdateChan = make(chan<- struct{}, 1000) return &testFixture{ + tb: tb, m: m, ratesHistory: ratesHistory, - }, nil + } } // process does the same thing as MaxReplicationLagModule.ProcessRecords() does @@ -91,17 +92,10 @@ func (tf *testFixture) recalculateRate(lagRecord replicationLagRecord) { tf.m.recalculateRate(lagRecord) } -func (tf *testFixture) checkState(state state, rate int64, lastRateChange time.Time) error { - if got, want := tf.m.currentState, state; got != want { - return fmt.Errorf("module in wrong state. got = %v, want = %v", got, want) - } - if got, want := tf.m.MaxRate(), rate; got != want { - return fmt.Errorf("module has wrong MaxRate(). got = %v, want = %v", got, want) - } - if got, want := tf.m.lastRateChange, lastRateChange; got != want { - return fmt.Errorf("module has wrong lastRateChange time. got = %v, want = %v", got, want) - } - return nil +func (tf *testFixture) checkState(state state, rate int64, lastRateChange time.Time) { + require.Equal(tf.tb, state, tf.m.currentState, "module in wrong state") + require.Equal(tf.tb, rate, tf.m.MaxRate(), "module has wrong MaxRate()") + require.Equal(tf.tb, lastRateChange, tf.m.lastRateChange, "module has wrong lastRateChange time") } func TestNewMaxReplicationLagModule_recalculateRate(t *testing.T) { @@ -128,14 +122,11 @@ func TestNewMaxReplicationLagModule_recalculateRate(t *testing.T) { }, } - for _, aTestCase := range testCases { - theCase := aTestCase - + for _, theCase := range testCases { t.Run(theCase.name, func(t *testing.T) { t.Parallel() - fixture, err := newTestFixtureWithMaxReplicationLag(5) - assert.NoError(t, err) + fixture := newTestFixtureWithMaxReplicationLag(t, 5) if theCase.expectPanic { assert.Panics(t, func() { fixture.recalculateRate(theCase.lagRecord) }) @@ -146,15 +137,9 @@ func TestNewMaxReplicationLagModule_recalculateRate(t *testing.T) { } func TestMaxReplicationLagModule_RateNotZeroWhenDisabled(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(ReplicationLagModuleDisabled) - if err != nil { - t.Fatal(err) - } - + tf := newTestFixtureWithMaxReplicationLag(t, ReplicationLagModuleDisabled) // Initial rate must not be zero. It's ReplicationLagModuleDisabled instead. - if err := tf.checkState(stateIncreaseRate, ReplicationLagModuleDisabled, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, ReplicationLagModuleDisabled, sinceZero(1*time.Second)) } func TestMaxReplicationLagModule_InitialStateAndWait(t *testing.T) { @@ -162,34 +147,24 @@ func TestMaxReplicationLagModule_InitialStateAndWait(t *testing.T) { // Overwrite the default config to make sure we test a non-default value. config.InitialRate = 123 config.MaxDurationBetweenIncreasesSec = 23 - tf, err := newTestFixture(config) - if err != nil { - t.Fatal(err) - } + tf := newTestFixture(t, config) // Initial rate must be config.InitialRate. - if err := tf.checkState(stateIncreaseRate, config.InitialRate, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, config.InitialRate, sinceZero(1*time.Second)) + // After startup, the next increment won't happen until // config.MaxDurationBetweenIncreasesSec elapsed. - if got, want := tf.m.nextAllowedChangeAfterInit, sinceZero(config.MaxDurationBetweenIncreases()+1*time.Second); got != want { - t.Fatalf("got = %v, want = %v", got, want) - } + require.Equal(t, sinceZero(config.MaxDurationBetweenIncreases()+1*time.Second), tf.m.nextAllowedChangeAfterInit) } // TestMaxReplicationLagModule_Increase tests only the continuous increase of the // rate and assumes that we are well below the replica capacity. func TestMaxReplicationLagModule_Increase(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(5) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 5) // We start at config.InitialRate. - if err := tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)) + // After the initial wait period of 62s // (config.MaxDurationBetweenIncreasesSec), regular increments start. @@ -200,31 +175,25 @@ func TestMaxReplicationLagModule_Increase(t *testing.T) { tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // Rate was increased to 200 based on actual rate of 100 within [0s, 69s]. // r2 becomes the "replica under test". - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) + // We have to wait at least config.MinDurationBetweenIncreasesSec (40s) before // the next increase. - if got, want := tf.m.replicaUnderTest.nextAllowedChange, sinceZero(70*time.Second).Add(tf.m.config.MinDurationBetweenIncreases()); got != want { - t.Fatalf("got = %v, want = %v", got, want) - } + require.Equal(t, sinceZero(70*time.Second).Add(tf.m.config.MinDurationBetweenIncreases()), tf.m.replicaUnderTest.nextAllowedChange) + // r2 @ 75s, 0s lag tf.ratesHistory.add(sinceZero(70*time.Second), 100) tf.ratesHistory.add(sinceZero(74*time.Second), 200) tf.process(lagRecord(sinceZero(75*time.Second), r2, 0)) // Lag record was ignored because it's within the wait period. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r1 @ 80s, 0s lag tf.ratesHistory.add(sinceZero(79*time.Second), 200) tf.process(lagRecord(sinceZero(80*time.Second), r1, 0)) // The r1 lag update was ignored because an increment "under test" is always // locked in with the replica which triggered the increase (r2 this time). - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // No increase is possible for the next 20 seconds. @@ -232,25 +201,19 @@ func TestMaxReplicationLagModule_Increase(t *testing.T) { tf.ratesHistory.add(sinceZero(80*time.Second), 200) tf.ratesHistory.add(sinceZero(89*time.Second), 200) tf.process(lagRecord(sinceZero(90*time.Second), r2, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r1 @ 100s, 0s lag tf.ratesHistory.add(sinceZero(99*time.Second), 200) tf.process(lagRecord(sinceZero(100*time.Second), r1, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // Next rate increase is possible after testing the rate for 40s. // r2 @ 110s, 0s lag tf.ratesHistory.add(sinceZero(109*time.Second), 200) tf.process(lagRecord(sinceZero(110*time.Second), r2, 0)) - if err := tf.checkState(stateIncreaseRate, 400, sinceZero(110*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 400, sinceZero(110*time.Second)) } // TestMaxReplicationLagModule_ReplicaUnderTest_LastErrorOrNotUp is @@ -258,19 +221,14 @@ func TestMaxReplicationLagModule_Increase(t *testing.T) { // test that the system makes progress if the currently tracked replica has // LastError set or is no longer tracked. func TestMaxReplicationLagModule_ReplicaUnderTest_LastErrorOrNotUp(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(5) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 5) // r2 @ 70s, 0s lag tf.ratesHistory.add(sinceZero(69*time.Second), 100) tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // Rate was increased to 200 based on actual rate of 100 within [0s, 69s]. // r2 becomes the "replica under test". - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r2 @ 75s, 0s lag, LastError set rError := lagRecord(sinceZero(75*time.Second), r2, 0) @@ -284,9 +242,7 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_LastErrorOrNotUp(t *testing.T) // We ignore r2 as "replica under test" because it has LastError set. // Instead, we act on r1. // r1 becomes the "replica under test". - if err := tf.checkState(stateIncreaseRate, 400, sinceZero(110*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 400, sinceZero(110*time.Second)) // We'll simulate a shutdown of r1 i.e. we're no longer tracking it. // r1 @ 115s, 0s lag, !Up @@ -302,36 +258,27 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_LastErrorOrNotUp(t *testing.T) // We ignore r1 as "replica under test" because it has !Up set. // Instead, we act on r2. // r2 becomes the "replica under test". - if err := tf.checkState(stateIncreaseRate, 800, sinceZero(150*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 800, sinceZero(150*time.Second)) } // TestMaxReplicationLagModule_ReplicaUnderTest_Timeout tests the safe guard // that a "replica under test" which didn't report its lag for a while will be // cleared such that any other replica can become the new "replica under test". func TestMaxReplicationLagModule_ReplicaUnderTest_Timeout(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(5) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 5) // r2 @ 70s, 0s lag tf.ratesHistory.add(sinceZero(69*time.Second), 100) tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // Rate was increased to 200 based on actual rate of 100 within [0s, 69s]. // r2 becomes the "replica under test". - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r1 @ 80s, 0s lag (ignored because r2 is the "replica under test") tf.ratesHistory.add(sinceZero(70*time.Second), 100) tf.ratesHistory.add(sinceZero(79*time.Second), 200) tf.process(lagRecord(sinceZero(80*time.Second), r1, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r2 as "replica under test" did not report its lag for too long. // We'll ignore it from now and let other replicas trigger rate changes. @@ -340,28 +287,21 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_Timeout(t *testing.T) { // (last rate change + test duration + max duration between increases). tf.ratesHistory.add(sinceZero(172*time.Second), 200) tf.process(lagRecord(sinceZero(173*time.Second), r1, 0)) - if err := tf.checkState(stateIncreaseRate, 400, sinceZero(173*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 400, sinceZero(173*time.Second)) } // TestMaxReplicationLagModule_ReplicaUnderTest_IncreaseToDecrease verifies that // the current "replica under test" is ignored when our state changes from // "stateIncreaseRate" to "stateDecreaseAndGuessRate". func TestMaxReplicationLagModule_ReplicaUnderTest_IncreaseToDecrease(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(5) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 5) // r2 @ 70s, 0s lag (triggers the increase state) tf.ratesHistory.add(sinceZero(69*time.Second), 100) tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // Rate was increased to 200 based on actual rate of 100 within [0s, 69s]. // r2 becomes the "replica under test". - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r1 @ 80s, 0s lag // This lag record is required in the next step to correctly calculate how @@ -370,16 +310,12 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_IncreaseToDecrease(t *testing. tf.ratesHistory.add(sinceZero(79*time.Second), 200) tf.process(lagRecord(sinceZero(80*time.Second), r1, 0)) // r1 remains the "replica under test". - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r2 @ 90s, 0s lag (ignored because the test duration is not up yet) tf.ratesHistory.add(sinceZero(89*time.Second), 200) tf.process(lagRecord(sinceZero(90*time.Second), r2, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r1 @ 100s, 3s lag (above target, provokes a decrease) tf.ratesHistory.add(sinceZero(99*time.Second), 200) @@ -387,18 +323,14 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_IncreaseToDecrease(t *testing. // r1 becomes the "replica under test". // r1's high lag triggered the decrease state and therefore we did not wait // for the pending increase of "replica under test" r2. - if err := tf.checkState(stateDecreaseAndGuessRate, 140, sinceZero(100*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateDecreaseAndGuessRate, 140, sinceZero(100*time.Second)) // r2 lag records are ignored while r1 is the "replica under test". // r2 @ 110s, 0s lag tf.ratesHistory.add(sinceZero(100*time.Second), 200) tf.ratesHistory.add(sinceZero(109*time.Second), 140) tf.process(lagRecord(sinceZero(110*time.Second), r2, 0)) - if err := tf.checkState(stateDecreaseAndGuessRate, 140, sinceZero(100*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateDecreaseAndGuessRate, 140, sinceZero(100*time.Second)) // r1 leaves the "replica under test" as soon as the test duration is up // or its lag improved to a better state. @@ -408,19 +340,14 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_IncreaseToDecrease(t *testing. tf.ratesHistory.add(sinceZero(118*time.Second), 140) tf.process(lagRecord(sinceZero(119*time.Second), r1, 0)) // Rate increases to 170, the middle of: [good, bad] = [140, 200]. - if err := tf.checkState(stateIncreaseRate, 170, sinceZero(119*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 170, sinceZero(119*time.Second)) } // TestMaxReplicationLagModule_ReplicaUnderTest_DecreaseToEmergency verifies // that the current "replica under test" is ignored when our state changes from // "stateDecreaseAndGuessRate" to "stateEmergency". func TestMaxReplicationLagModule_ReplicaUnderTest_DecreaseToEmergency(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(5) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 5) // INCREASE @@ -429,9 +356,7 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_DecreaseToEmergency(t *testing // much r1 lags behind due to the rate increase. tf.ratesHistory.add(sinceZero(19*time.Second), 100) tf.process(lagRecord(sinceZero(20*time.Second), r1, 0)) - if err := tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)) // DECREASE @@ -439,9 +364,7 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_DecreaseToEmergency(t *testing tf.ratesHistory.add(sinceZero(39*time.Second), 100) tf.process(lagRecord(sinceZero(40*time.Second), r1, 3)) // r1 becomes the "replica under test". - if err := tf.checkState(stateDecreaseAndGuessRate, 70, sinceZero(40*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateDecreaseAndGuessRate, 70, sinceZero(40*time.Second)) // EMERGENCY @@ -451,9 +374,7 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_DecreaseToEmergency(t *testing tf.ratesHistory.add(sinceZero(49*time.Second), 70) tf.process(lagRecord(sinceZero(50*time.Second), r2, 10)) // r1 overrides r2 as new "replica under test". - if err := tf.checkState(stateEmergency, 35, sinceZero(50*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateEmergency, 35, sinceZero(50*time.Second)) // r1 lag becomes worse than the r1 lag now. We don't care and keep r1 as // "replica under test" for now. @@ -461,9 +382,7 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_DecreaseToEmergency(t *testing tf.ratesHistory.add(sinceZero(50*time.Second), 70) tf.ratesHistory.add(sinceZero(59*time.Second), 35) tf.process(lagRecord(sinceZero(60*time.Second), r1, 15)) - if err := tf.checkState(stateEmergency, 35, sinceZero(50*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateEmergency, 35, sinceZero(50*time.Second)) // INCREASE @@ -472,9 +391,7 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_DecreaseToEmergency(t *testing tf.ratesHistory.add(sinceZero(69*time.Second), 35) tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // r2 becomes the new "replica under test". - if err := tf.checkState(stateIncreaseRate, 70, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 70, sinceZero(70*time.Second)) // EMERGENCY @@ -484,23 +401,16 @@ func TestMaxReplicationLagModule_ReplicaUnderTest_DecreaseToEmergency(t *testing tf.ratesHistory.add(sinceZero(79*time.Second), 70) tf.process(lagRecord(sinceZero(80*time.Second), r1, 15)) // r1 becomes the new "replica under test". - if err := tf.checkState(stateEmergency, 35, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateEmergency, 35, sinceZero(80*time.Second)) } // TestMaxReplicationLagModule_Increase_BadRateUpperBound verifies that a // known bad rate is always the upper bound for any rate increase. func TestMaxReplicationLagModule_Increase_BadRateUpperBound(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(5) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 5) // We start at config.InitialRate. - if err := tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)) // Assume that a bad value of 150 was set @ 30s and log error if err := tf.m.memory.markBad(150, sinceZero(30*time.Second)); err != nil { @@ -514,24 +424,17 @@ func TestMaxReplicationLagModule_Increase_BadRateUpperBound(t *testing.T) { // [0s, 69s]. // However, this would go over the bad rate. Therefore, the new rate will be // the middle of [100, 150] ([actual rate, bad rate]). - if err := tf.checkState(stateIncreaseRate, 125, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 125, sinceZero(70*time.Second)) } // TestMaxReplicationLagModule_Increase_MinimumProgress verifies that the // calculated new rate is never identical to the current rate and at least by // "memoryGranularity" higher. func TestMaxReplicationLagModule_Increase_MinimumProgress(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(5) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 5) // We start at config.InitialRate. - if err := tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)) // Assume that a bad value of 105 was set @ 30s. tf.m.memory.markBad(105, sinceZero(30*time.Second)) @@ -547,26 +450,19 @@ func TestMaxReplicationLagModule_Increase_MinimumProgress(t *testing.T) { // But then the new rate is identical to the old set rate of 100. // In such a case, we always advance the new rate by "memoryGranularity" // (which is currently 5). - if err := tf.checkState(stateIncreaseRate, 105, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 105, sinceZero(70*time.Second)) } // TestMaxReplicationLagModule_Decrease verifies that we correctly calculate the // replication rate in the decreaseAndGuessRate state. func TestMaxReplicationLagModule_Decrease(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(5) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 5) // r2 @ 70s, 0s lag tf.ratesHistory.add(sinceZero(69*time.Second), 100) tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // Rate was increased to 200 based on actual rate of 100 within [0s, 69s]. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r2 @ 90s, 3s lag (above target, provokes a decrease) tf.ratesHistory.add(sinceZero(70*time.Second), 100) @@ -581,9 +477,7 @@ func TestMaxReplicationLagModule_Decrease(t *testing.T) { // Since this backlog is spread across SpreadBacklogAcrossSec (20s), // the guessed rate gets further reduced by 30 QPS (600 queries / 20s). // Hence, the rate is set to 140 QPS (170 - 30). - if err := tf.checkState(stateDecreaseAndGuessRate, 140, sinceZero(90*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateDecreaseAndGuessRate, 140, sinceZero(90*time.Second)) } // TestMaxReplicationLagModule_Decrease_NoReplicaHistory skips decreasing the @@ -591,44 +485,33 @@ func TestMaxReplicationLagModule_Decrease(t *testing.T) { // replication lag value since the last rate change for r2. Therefore, we cannot // reliably guess its rate and wait for the next available record. func TestMaxReplicationLagModule_Decrease_NoReplicaHistory(t *testing.T) { - tf, err := newTestFixtureWithMaxReplicationLag(10) - if err != nil { - t.Fatal(err) - } + tf := newTestFixtureWithMaxReplicationLag(t, 10) // r2 @ 70s, 0s lag tf.ratesHistory.add(sinceZero(69*time.Second), 100) tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // Rate was increased to 200 based on actual rate of 100 within [0s, 69s]. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r1 @ 80s, 3s lag (above target, but no decrease triggered) tf.ratesHistory.add(sinceZero(70*time.Second), 100) tf.ratesHistory.add(sinceZero(79*time.Second), 200) tf.process(lagRecord(sinceZero(80*time.Second), r1, 3)) // Rate was decreased by 25% (half the emergency decrease) as safety measure. - if err := tf.checkState(stateDecreaseAndGuessRate, 150, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateDecreaseAndGuessRate, 150, sinceZero(80*time.Second)) // r2 @ 90s, 0s lag tf.ratesHistory.add(sinceZero(80*time.Second), 200) tf.ratesHistory.add(sinceZero(89*time.Second), 150) tf.process(lagRecord(sinceZero(90*time.Second), r2, 0)) // r2 is ignored because r1 is the "replica under test". - if err := tf.checkState(stateDecreaseAndGuessRate, 150, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateDecreaseAndGuessRate, 150, sinceZero(80*time.Second)) // r1 recovers after the rate decrease and triggers a new increase. // r1 @ 100s, 0s lag tf.ratesHistory.add(sinceZero(99*time.Second), 150) tf.process(lagRecord(sinceZero(100*time.Second), r1, 0)) - if err := tf.checkState(stateIncreaseRate, 300, sinceZero(100*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 300, sinceZero(100*time.Second)) } func TestMaxReplicationLagModule_IgnoreNSlowestReplicas_REPLICA(t *testing.T) { @@ -648,33 +531,22 @@ func testIgnoreNSlowestReplicas(t *testing.T, r1UID, r2UID uint32) { config.IgnoreNSlowestRdonlys = 1 typ = "RDONLY" } - tf, err := newTestFixture(config) - if err != nil { - t.Fatal(err) - } + tf := newTestFixture(t, config) // r1 @ 80s, 0s lag tf.ratesHistory.add(sinceZero(79*time.Second), 100) tf.process(lagRecord(sinceZero(80*time.Second), r1UID, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)) // r2 @ 90s, 10s lag tf.ratesHistory.add(sinceZero(80*time.Second), 100) tf.ratesHistory.add(sinceZero(90*time.Second), 200) tf.process(lagRecord(sinceZero(90*time.Second), r2UID, 10)) // Although r2's lag is high, it's ignored because it's the 1 slowest replica. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)) results := tf.m.results.latestValues() - if got, want := len(results), 2; got != want { - t.Fatalf("skipped replica should have been recorded on the results page. got = %v, want = %v", got, want) - } - if got, want := results[0].Reason, fmt.Sprintf("skipping this replica because it's among the 1 slowest %v tablets", typ); got != want { - t.Fatalf("skipped replica should have been recorded as skipped on the results page. got = %v, want = %v", got, want) - } + require.Len(t, results, 2, "skipped replica should have been recorded on the results page") + require.Equal(t, fmt.Sprintf("skipping this replica because it's among the 1 slowest %v tablets", typ), results[0].Reason, "skipped replica should have been recorded as skipped on the results page.") // r1 @ 100s, 20s lag tf.ratesHistory.add(sinceZero(99*time.Second), 200) @@ -682,27 +554,20 @@ func testIgnoreNSlowestReplicas(t *testing.T, r1UID, r2UID uint32) { // r1 would become the new 1 slowest replica. However, we do not ignore it // because then we would ignore all known replicas in a row. // => react to the high lag and reduce the rate by 50% from 200 to 100. - if err := tf.checkState(stateEmergency, 100, sinceZero(100*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateEmergency, 100, sinceZero(100*time.Second)) } func TestMaxReplicationLagModule_IgnoreNSlowestReplicas_NotEnoughReplicas(t *testing.T) { config := NewMaxReplicationLagModuleConfig(5) config.IgnoreNSlowestReplicas = 1 - tf, err := newTestFixture(config) - if err != nil { - t.Fatal(err) - } + tf := newTestFixture(t, config) // r2 @ 70s, 10s lag tf.ratesHistory.add(sinceZero(69*time.Second), 100) tf.process(lagRecord(sinceZero(70*time.Second), r2, 10)) // r2 is the 1 slowest replica. However, it's not ignored because then we // would ignore all replicas. Therefore, we react to its lag increase. - if err := tf.checkState(stateEmergency, 50, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateEmergency, 50, sinceZero(70*time.Second)) } // TestMaxReplicationLagModule_IgnoreNSlowestReplicas_IsIgnoredDuringIncrease @@ -715,45 +580,34 @@ func TestMaxReplicationLagModule_IgnoreNSlowestReplicas_NotEnoughReplicas(t *tes func TestMaxReplicationLagModule_IgnoreNSlowestReplicas_IsIgnoredDuringIncrease(t *testing.T) { config := NewMaxReplicationLagModuleConfig(5) config.IgnoreNSlowestReplicas = 1 - tf, err := newTestFixture(config) - if err != nil { - t.Fatal(err) - } + tf := newTestFixture(t, config) // r2 @ 70s, 0s lag tf.ratesHistory.add(sinceZero(69*time.Second), 100) tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // Rate was increased to 200 based on actual rate of 100 within [0s, 69s]. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r1 @ 80s, 0s lag tf.ratesHistory.add(sinceZero(70*time.Second), 100) tf.ratesHistory.add(sinceZero(79*time.Second), 200) tf.process(lagRecord(sinceZero(80*time.Second), r1, 0)) // Lag record was ignored because it's within the wait period. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r2 becomes slow and will be ignored now. // r2 @ 90s, 10s lag tf.ratesHistory.add(sinceZero(89*time.Second), 200) tf.m.replicaLagCache.add(lagRecord(sinceZero(90*time.Second), r2, 10)) // We ignore the 1 slowest replica and do not decrease despite r2's high lag. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r1 @ 110s, 0s lag tf.ratesHistory.add(sinceZero(109*time.Second), 200) tf.process(lagRecord(sinceZero(110*time.Second), r1, 0)) // Meanwhile, r1 is doing fine and will trigger the next increase because // we're no longer waiting for the ignored r2. - if err := tf.checkState(stateIncreaseRate, 400, sinceZero(110*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 400, sinceZero(110*time.Second)) } // TestMaxReplicationLagModule_IgnoreNSlowestReplicas_IncludeRdonly is the same @@ -764,55 +618,36 @@ func TestMaxReplicationLagModule_IgnoreNSlowestReplicas_IncludeRdonly(t *testing // We ignore up to 1 REPLICA and 1 RDONLY tablet. config.IgnoreNSlowestReplicas = 1 config.IgnoreNSlowestRdonlys = 1 - tf, err := newTestFixture(config) - if err != nil { - t.Fatal(err) - } + tf := newTestFixture(t, config) // r1 @ 80s, 0s lag tf.ratesHistory.add(sinceZero(79*time.Second), 100) tf.process(lagRecord(sinceZero(80*time.Second), r1, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)) // rdonly1 @ 85s, 0s lag tf.ratesHistory.add(sinceZero(80*time.Second), 100) tf.ratesHistory.add(sinceZero(84*time.Second), 200) tf.process(lagRecord(sinceZero(85*time.Second), rdonly1, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)) // r2 @ 90s, 10s lag tf.ratesHistory.add(sinceZero(89*time.Second), 200) tf.process(lagRecord(sinceZero(90*time.Second), r2, 10)) // Although r2's lag is high, it's ignored because it's the 1 slowest REPLICA tablet. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)) results := tf.m.results.latestValues() - if got, want := len(results), 3; got != want { - t.Fatalf("skipped replica should have been recorded on the results page. got = %v, want = %v", got, want) - } - if got, want := results[0].Reason, "skipping this replica because it's among the 1 slowest REPLICA tablets"; got != want { - t.Fatalf("skipped replica should have been recorded as skipped on the results page. got = %v, want = %v", got, want) - } + require.Len(t, results, 3, "skipped replica should have been recorded on the results page") + require.Equal(t, "skipping this replica because it's among the 1 slowest REPLICA tablets", results[0].Reason, "skipped replica should have been recorded as skipped on the results page") // rdonly2 @ 95s, 10s lag tf.ratesHistory.add(sinceZero(94*time.Second), 200) tf.process(lagRecord(sinceZero(95*time.Second), rdonly2, 10)) // Although rdonly2's lag is high, it's ignored because it's the 1 slowest RDONLY tablet. - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(80*time.Second)) results = tf.m.results.latestValues() - if got, want := len(results), 4; got != want { - t.Fatalf("skipped replica should have been recorded on the results page. got = %v, want = %v", got, want) - } - if got, want := results[0].Reason, "skipping this replica because it's among the 1 slowest RDONLY tablets"; got != want { - t.Fatalf("skipped replica should have been recorded as skipped on the results page. got = %v, want = %v", got, want) - } + require.Len(t, results, 4, "skipped replica should have been recorded on the results page") + require.Equal(t, "skipping this replica because it's among the 1 slowest RDONLY tablets", results[0].Reason, "skipped replica should have been recorded as skipped on the results page") // r1 @ 100s, 11s lag tf.ratesHistory.add(sinceZero(99*time.Second), 200) @@ -820,9 +655,7 @@ func TestMaxReplicationLagModule_IgnoreNSlowestReplicas_IncludeRdonly(t *testing // r1 would become the new 1 slowest REPLICA tablet. However, we do not ignore // it because then we would ignore all known replicas in a row. // => react to the high lag and reduce the rate by 50% from 200 to 100. - if err := tf.checkState(stateEmergency, 100, sinceZero(100*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateEmergency, 100, sinceZero(100*time.Second)) // r2 and rdonly are omitted here for brevity. @@ -831,9 +664,7 @@ func TestMaxReplicationLagModule_IgnoreNSlowestReplicas_IncludeRdonly(t *testing // r1 @ 120s, 0s lag tf.ratesHistory.add(sinceZero(119*time.Second), 100) tf.process(lagRecord(sinceZero(120*time.Second), r1, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(120*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(120*time.Second)) // rdonly1 @ 125s, 11s lag tf.ratesHistory.add(sinceZero(120*time.Second), 100) @@ -842,9 +673,7 @@ func TestMaxReplicationLagModule_IgnoreNSlowestReplicas_IncludeRdonly(t *testing // rdonly1 would become the new 1 slowest RDONLY tablet. However, we do not // ignore it because then we would ignore all known replicas in a row. // => react to the high lag and reduce the rate by 50% from 200 to 100. - if err := tf.checkState(stateEmergency, 100, sinceZero(125*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateEmergency, 100, sinceZero(125*time.Second)) } // TestMaxReplicationLagModule_EmergencyDoesNotChangeBadValues verifies that a @@ -863,30 +692,21 @@ func TestMaxReplicationLagModule_EmergencyDoesNotChangeBadValues(t *testing.T) { // Use a very aggressive aging rate to verify that bad rates do not age while // we're in the "emergency" state. config.AgeBadRateAfterSec = 21 - tf, err := newTestFixture(config) - if err != nil { - t.Fatal(err) - } + tf := newTestFixture(t, config) // INCREASE (necessary to set a "good" rate in the memory) // r2 @ 70s, 0s lag tf.ratesHistory.add(sinceZero(69*time.Second), 100) tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) - if err := tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 200, sinceZero(70*time.Second)) // r2 @ 110s, 0s lag tf.ratesHistory.add(sinceZero(70*time.Second), 100) tf.ratesHistory.add(sinceZero(109*time.Second), 200) tf.process(lagRecord(sinceZero(110*time.Second), r2, 0)) - if err := tf.checkState(stateIncreaseRate, 400, sinceZero(110*time.Second)); err != nil { - t.Fatal(err) - } - if got, want := tf.m.memory.highestGood(), int64(200); got != want { - t.Fatalf("wrong good rate: got = %v, want = %v", got, want) - } + tf.checkState(stateIncreaseRate, 400, sinceZero(110*time.Second)) + require.Equal(t, int64(200), tf.m.memory.highestGood(), "wrong good rate") // DECREASE (necessary to set a "bad" rate in the memory) @@ -894,12 +714,8 @@ func TestMaxReplicationLagModule_EmergencyDoesNotChangeBadValues(t *testing.T) { tf.ratesHistory.add(sinceZero(110*time.Second), 200) tf.ratesHistory.add(sinceZero(129*time.Second), 400) tf.process(lagRecord(sinceZero(130*time.Second), r2, 3)) - if err := tf.checkState(stateDecreaseAndGuessRate, 280, sinceZero(130*time.Second)); err != nil { - t.Fatal(err) - } - if got, want := tf.m.memory.lowestBad(), int64(400); got != want { - t.Fatalf("wrong bad rate: got = %v, want = %v", got, want) - } + tf.checkState(stateDecreaseAndGuessRate, 280, sinceZero(130*time.Second)) + require.Equal(t, int64(400), tf.m.memory.lowestBad(), "wrong bad rate") // memory: [good, bad] now is [200, 400]. @@ -910,12 +726,8 @@ func TestMaxReplicationLagModule_EmergencyDoesNotChangeBadValues(t *testing.T) { tf.ratesHistory.add(sinceZero(130*time.Second), 400) tf.ratesHistory.add(sinceZero(139*time.Second), 280) tf.process(lagRecord(sinceZero(140*time.Second), r1, 3600)) - if err := tf.checkState(stateEmergency, 140, sinceZero(140*time.Second)); err != nil { - t.Fatal(err) - } - if got, want := tf.m.memory.lowestBad(), int64(280); got != want { - t.Fatalf("bad rate should change when we transition to the emergency state: got = %v, want = %v", got, want) - } + tf.checkState(stateEmergency, 140, sinceZero(140*time.Second)) + require.Equal(t, int64(280), tf.m.memory.lowestBad(), "bad rate should change when we transition to the emergency state") // memory: [good, bad] now is [200, 280]. @@ -923,9 +735,7 @@ func TestMaxReplicationLagModule_EmergencyDoesNotChangeBadValues(t *testing.T) { tf.ratesHistory.add(sinceZero(140*time.Second), 280) tf.ratesHistory.add(sinceZero(149*time.Second), 140) tf.process(lagRecord(sinceZero(150*time.Second), r2, 0)) - if err := tf.checkState(stateEmergency, 140, sinceZero(140*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateEmergency, 140, sinceZero(140*time.Second)) tf.ratesHistory.add(sinceZero(160*time.Second), 140) // r1 keeps to drive the throttler rate down, but not the bad rate. @@ -940,20 +750,14 @@ func TestMaxReplicationLagModule_EmergencyDoesNotChangeBadValues(t *testing.T) { tf.ratesHistory.add(r1Time, int64(rates[i-1])) } tf.process(lagRecord(r1Time, r1, 3600)) - if err := tf.checkState(stateEmergency, int64(rates[i]), r1Time); err != nil { - t.Fatalf("time=%d: %v", tm, err) - } - if got, want := tf.m.memory.lowestBad(), int64(280); got != want { - t.Fatalf("time=%d: bad rate must not change when the old state is the emergency state: got = %v, want = %v", tm, got, want) - } + tf.checkState(stateEmergency, int64(rates[i]), r1Time) + require.Equal(t, int64(280), tf.m.memory.lowestBad(), "bad rate must not change when the old state is the emergency state") // r2 @ s, 0s lag (ignored because r1 is the "replica under test") r2Time := sinceZero(time.Duration(tm+10) * time.Second) tf.ratesHistory.add(r2Time, int64(rates[i])) tf.process(lagRecord(r2Time, r2, 0)) - if err := tf.checkState(stateEmergency, int64(rates[i]), r1Time); err != nil { - t.Fatalf("time=%d: %v", tm, err) - } + tf.checkState(stateEmergency, int64(rates[i]), r1Time) } // INCREASE @@ -966,25 +770,18 @@ func TestMaxReplicationLagModule_EmergencyDoesNotChangeBadValues(t *testing.T) { tf.ratesHistory.add(sinceZero(339*time.Second), 1) tf.process(lagRecord(sinceZero(340*time.Second), r1, 0)) // New rate is 240, the middle of [good, bad] = [200, 240]. - if err := tf.checkState(stateIncreaseRate, 240, sinceZero(340*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 240, sinceZero(340*time.Second)) } func TestMaxReplicationLagModule_NoIncreaseIfMaxRateWasNotApproached(t *testing.T) { config := NewMaxReplicationLagModuleConfig(5) - tf, err := newTestFixture(config) - if err != nil { - t.Fatal(err) - } + tf := newTestFixture(t, config) // r1 @ 20s, 0s lag // This lag record is required in the next step to correctly calculate how // much r1 lags behind due to the rate increase. tf.process(lagRecord(sinceZero(20*time.Second), r1, 0)) - if err := tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)) // Master gets 10 QPS in second 69. // r1 @ 70s, 0s lag. @@ -993,9 +790,7 @@ func TestMaxReplicationLagModule_NoIncreaseIfMaxRateWasNotApproached(t *testing. tf.ratesHistory.add(sinceZero(69*time.Second), 10) tf.process(lagRecord(sinceZero(70*time.Second), r1, 0)) // r1 becomes the "replica under test". - if err := tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)) } // lagRecord creates a fake record using a fake LegacyTabletStats object. @@ -1034,15 +829,10 @@ func tabletStats(uid, lag uint32) discovery.TabletHealth { func TestApplyLatestConfig(t *testing.T) { config := NewMaxReplicationLagModuleConfig(5) - tf, err := newTestFixture(config) - if err != nil { - t.Fatal(err) - } + tf := newTestFixture(t, config) // We start at config.InitialRate. - if err := tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 100, sinceZero(1*time.Second)) // Change the default MaxIncrease from 100% to 200% and test that it's // correctly propagated. config.MaxIncrease = 2 @@ -1053,9 +843,7 @@ func TestApplyLatestConfig(t *testing.T) { tf.process(lagRecord(sinceZero(70*time.Second), r2, 0)) // Rate was increased to 300 based on an actual rate of 100 within [0s, 69s]. // That's a 200% increase. - if err := tf.checkState(stateIncreaseRate, 300, sinceZero(70*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 300, sinceZero(70*time.Second)) // Now reset the config to its default values. tf.m.resetConfiguration() @@ -1066,7 +854,5 @@ func TestApplyLatestConfig(t *testing.T) { tf.ratesHistory.add(sinceZero(80*time.Second), 300) tf.ratesHistory.add(sinceZero(109*time.Second), 300) tf.process(lagRecord(sinceZero(110*time.Second), r2, 0)) - if err := tf.checkState(stateIncreaseRate, 600, sinceZero(110*time.Second)); err != nil { - t.Fatal(err) - } + tf.checkState(stateIncreaseRate, 600, sinceZero(110*time.Second)) } diff --git a/go/vt/throttler/memory_test.go b/go/vt/throttler/memory_test.go index 899e175672a..7dcc13301f7 100644 --- a/go/vt/throttler/memory_test.go +++ b/go/vt/throttler/memory_test.go @@ -20,168 +20,157 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/log" + "github.com/stretchr/testify/require" ) func TestMemory(t *testing.T) { m := newMemory(5, 1*time.Second, 0.10) // Add several good rates. - if err := m.markGood(201); err != nil { - log.Errorf("m.markGood(201) failed :%v ", err) - } + err := m.markGood(201) + require.NoError(t, err) want200 := int64(200) - if got := m.highestGood(); got != want200 { - t.Fatalf("memory with one good entry: got = %v, want = %v", got, want200) - } + got := m.highestGood() + require.Equal(t, want200, got, "memory with one good entry") - //log error - if err := m.markGood(101); err != nil { - log.Errorf("m.markGood(101) failed :%v ", err) - } + err = m.markGood(101) + require.NoError(t, err) - if got := m.highestGood(); got != want200 { - t.Fatalf("wrong order within memory: got = %v, want = %v", got, want200) - } + got = m.highestGood() + require.Equal(t, want200, got, "wrong order within memory") - //log error - if err := m.markGood(301); err != nil { - log.Errorf(" m.markGood(301) failed :%v ", err) - } + err = m.markGood(301) + require.NoError(t, err) want300 := int64(300) - if got := m.highestGood(); got != want300 { - t.Fatalf("wrong order within memory: got = %v, want = %v", got, want300) - } - m.markGood(306) + got = m.highestGood() + require.Equal(t, want300, got, "wrong order within memory") + + err = m.markGood(306) + require.NoError(t, err) + want305 := int64(305) - if got := m.highestGood(); got != want305 { - t.Fatalf("wrong order within memory: got = %v, want = %v", got, want305) - } + got = m.highestGood() + require.Equal(t, want305, got, "wrong order within memory") // 300 and 305 will turn from good to bad. - if got := m.lowestBad(); got != 0 { - t.Fatalf("lowestBad should return zero value when no bad rate is recorded yet: got = %v", got) - } - - //log error - if err := m.markBad(300, sinceZero(0)); err != nil { - log.Errorf(" m.markBad(300, sinceZero(0)) failed :%v ", err) - } - - if got, want := m.lowestBad(), want300; got != want { - t.Fatalf("bad rate was not recorded: got = %v, want = %v", got, want) - } - if got := m.highestGood(); got != want200 { - t.Fatalf("new lower bad rate did not invalidate previous good rates: got = %v, want = %v", got, want200) - } - - //log error - if err := m.markBad(311, sinceZero(0)); err != nil { - log.Errorf(" m.markBad(311, sinceZero(0)) failed :%v ", err) - } - - if got := m.lowestBad(); got != want300 { - t.Fatalf("bad rates higher than the current one should be ignored: got = %v, want = %v", got, want300) - } + got = m.lowestBad() + require.Equal(t, int64(0), got, "lowestBad should return zero value when no bad rate is recorded yet") + + err = m.markBad(300, sinceZero(0)) + require.NoError(t, err) + + got = m.lowestBad() + require.Equal(t, want300, got, "bad rate was not recorded") + + got = m.highestGood() + require.Equal(t, want200, got, "new lower bad rate did not invalidate previous good rates") + + err = m.markBad(311, sinceZero(0)) + require.NoError(t, err) + + got = m.lowestBad() + require.Equal(t, want300, got, "bad rates higher than the current one should be ignored") // a good 601 will be ignored because the first bad is at 300. - if err := m.markGood(601); err == nil { - t.Fatal("good rates cannot go beyond the lowest bad rate: should have returned an error") - } - if got := m.lowestBad(); got != want300 { - t.Fatalf("good rates cannot go beyond the lowest bad rate: got = %v, want = %v", got, want300) - } - if got := m.highestGood(); got != want200 { - t.Fatalf("good rates beyond the lowest bad rate must be ignored: got = %v, want = %v", got, want200) - } + err = m.markGood(601) + require.Error(t, err, "good rates cannot go beyond the lowest bad rate") + + got = m.lowestBad() + require.Equal(t, want300, got, "good rates cannot go beyond the lowest bad rate") + + got = m.highestGood() + require.Equal(t, want200, got, "good rates beyond the lowest bad rate must be ignored") // 199 will be rounded up to 200. - err := m.markBad(199, sinceZero(0)) + err = m.markBad(199, sinceZero(0)) + require.NoError(t, err) - if err != nil { - t.Fatalf(" m.markBad(199, sinceZero(0)) failed :%v ", err) - } + got = m.lowestBad() + require.Equal(t, want200, got, "bad rate was not updated") - if got := m.lowestBad(); got != want200 { - t.Fatalf("bad rate was not updated: got = %v, want = %v", got, want200) - } want100 := int64(100) - if got := m.highestGood(); got != want100 { - t.Fatalf("previous highest good rate was not marked as bad: got = %v, want = %v", got, want100) - } + got = m.highestGood() + require.Equal(t, want100, got, "previous highest good rate was not marked as bad") } func TestMemory_markDownIgnoresDrasticBadValues(t *testing.T) { m := newMemory(1, 1*time.Second, 0.10) good := int64(1000) bad := int64(1001) - m.markGood(good) - m.markBad(bad, sinceZero(0)) - if got := m.highestGood(); got != good { - t.Fatalf("good rate was not correctly inserted: got = %v, want = %v", got, good) - } - if got := m.lowestBad(); got != bad { - t.Fatalf("bad rate was not correctly inserted: got = %v, want = %v", got, bad) - } - - if err := m.markBad(500, sinceZero(0)); err == nil { - t.Fatal("bad rate should have been ignored and an error should have been returned") - } - if got := m.highestGood(); got != good { - t.Fatalf("bad rate should have been ignored: got = %v, want = %v", got, good) - } - if got := m.lowestBad(); got != bad { - t.Fatalf("bad rate should have been ignored: got = %v, want = %v", got, bad) - } + + err := m.markGood(good) + require.NoError(t, err) + + err = m.markBad(bad, sinceZero(0)) + require.NoError(t, err) + + got := m.highestGood() + require.Equal(t, good, got, "good rate was not correctly inserted") + + got = m.lowestBad() + require.Equal(t, bad, got, "bad rate was not correctly inserted") + + err = m.markBad(500, sinceZero(0)) + require.Error(t, err, "bad rate should have been ignored and an error should have been returned") + + got = m.highestGood() + require.Equal(t, good, got, "bad rate should have been ignored") + + got = m.lowestBad() + require.Equal(t, bad, got, "bad rate should have been ignored") } func TestMemory_Aging(t *testing.T) { m := newMemory(1, 2*time.Second, 0.10) - m.markBad(100, sinceZero(0)) - if got, want := m.lowestBad(), int64(100); got != want { - t.Fatalf("bad rate was not correctly inserted: got = %v, want = %v", got, want) - } + err := m.markBad(100, sinceZero(0)) + require.NoError(t, err) + + got := m.lowestBad() + require.Equal(t, int64(100), got, "bad rate was not correctly inserted") // Bad rate successfully ages by 10%. m.ageBadRate(sinceZero(2 * time.Second)) - if got, want := m.lowestBad(), int64(110); got != want { - t.Fatalf("bad rate should have been increased due to its age: got = %v, want = %v", got, want) - } + + got = m.lowestBad() + require.Equal(t, int64(110), got, "bad rate should have been increased due to its age") // A recent aging resets the age timer. m.ageBadRate(sinceZero(2 * time.Second)) - if got, want := m.lowestBad(), int64(110); got != want { - t.Fatalf("a bad rate should not age again until the age is up again: got = %v, want = %v", got, want) - } + got = m.lowestBad() + require.Equal(t, int64(110), got, "a bad rate should not age again until the age is up again") // The age timer will be reset if the bad rate changes. - m.markBad(100, sinceZero(3*time.Second)) + err = m.markBad(100, sinceZero(3*time.Second)) + require.NoError(t, err) + m.ageBadRate(sinceZero(4 * time.Second)) - if got, want := m.lowestBad(), int64(100); got != want { - t.Fatalf("bad rate must not age yet: got = %v, want = %v", got, want) - } + + got = m.lowestBad() + require.Equal(t, int64(100), got, "bad rate must not age yet") // The age timer won't be reset when the rate stays the same. - m.markBad(100, sinceZero(4*time.Second)) + err = m.markBad(100, sinceZero(4*time.Second)) + require.NoError(t, err) + m.ageBadRate(sinceZero(5 * time.Second)) - if got, want := m.lowestBad(), int64(110); got != want { - t.Fatalf("bad rate should have aged again: got = %v, want = %v", got, want) - } + + got = m.lowestBad() + require.Equal(t, int64(110), got, "bad rate should have aged again") // Update the aging config. It will be effective immediately. m.updateAgingConfiguration(1*time.Second, 0.05) m.ageBadRate(sinceZero(6 * time.Second)) - if got, want := m.lowestBad(), int64(115); got != want { - t.Fatalf("bad rate should have aged after the configuration update: got = %v, want = %v", got, want) - } + + got = m.lowestBad() + require.Equal(t, int64(115), got, "bad rate should have aged after the configuration update") // If the new bad rate is not higher, it should increase by the memory granularity at least. m.markBad(5, sinceZero(10*time.Second)) m.ageBadRate(sinceZero(11 * time.Second)) - if got, want := m.lowestBad(), int64(5+memoryGranularity); got != want { - t.Fatalf("bad rate should have aged after the configuration update: got = %v, want = %v", got, want) - } + + got = m.lowestBad() + require.Equal(t, int64(5+memoryGranularity), got, "bad rate should have aged after the configuration update") } diff --git a/go/vt/throttler/replication_lag_cache_test.go b/go/vt/throttler/replication_lag_cache_test.go index 312f97e1999..135c0f03956 100644 --- a/go/vt/throttler/replication_lag_cache_test.go +++ b/go/vt/throttler/replication_lag_cache_test.go @@ -20,6 +20,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/discovery" ) @@ -33,44 +35,39 @@ func TestReplicationLagCache(t *testing.T) { // If there is no entry yet, a zero struct is returned. zeroEntry := c.atOrAfter(r1Key, sinceZero(0*time.Second)) - if !zeroEntry.isZero() { - t.Fatalf("atOrAfter() should have returned a zero entry but did not: %v", zeroEntry) - } + require.True(t, zeroEntry.isZero(), "atOrAfter() should have returned a zero entry") // First entry at 1s. c.add(lagRecord(sinceZero(1*time.Second), r1, 1)) - if got, want := c.latest(r1Key).time, sinceZero(1*time.Second); got != want { - t.Fatalf("latest(r1) = %v, want = %v", got, want) - } + got, want := c.latest(r1Key).time, sinceZero(1*time.Second) + require.Equal(t, want, got) // Second entry at 2s makes the cache full. c.add(lagRecord(sinceZero(2*time.Second), r1, 2)) - if got, want := c.latest(r1Key).time, sinceZero(2*time.Second); got != want { - t.Fatalf("latest(r1) = %v, want = %v", got, want) - } - if got, want := c.atOrAfter(r1Key, sinceZero(1*time.Second)).time, sinceZero(1*time.Second); got != want { - t.Fatalf("atOrAfter(r1) = %v, want = %v", got, want) - } + got, want = c.latest(r1Key).time, sinceZero(2*time.Second) + require.Equal(t, want, got) + + got, want = c.atOrAfter(r1Key, sinceZero(1*time.Second)).time, sinceZero(1*time.Second) + require.Equal(t, want, got) // Third entry at 3s evicts the 1s entry. c.add(lagRecord(sinceZero(3*time.Second), r1, 3)) - if got, want := c.latest(r1Key).time, sinceZero(3*time.Second); got != want { - t.Fatalf("latest(r1) = %v, want = %v", got, want) - } + got, want = c.latest(r1Key).time, sinceZero(3*time.Second) + require.Equal(t, want, got) + // Requesting an entry at 1s or after gets us the entry for 2s. - if got, want := c.atOrAfter(r1Key, sinceZero(1*time.Second)).time, sinceZero(2*time.Second); got != want { - t.Fatalf("atOrAfter(r1) = %v, want = %v", got, want) - } + got, want = c.atOrAfter(r1Key, sinceZero(1*time.Second)).time, sinceZero(2*time.Second) + require.Equal(t, want, got) // Wrap around one more time. Entries at 4s and 5s should be left. c.add(lagRecord(sinceZero(4*time.Second), r1, 4)) c.add(lagRecord(sinceZero(5*time.Second), r1, 5)) - if got, want := c.latest(r1Key).time, sinceZero(5*time.Second); got != want { - t.Fatalf("latest(r1) = %v, want = %v", got, want) - } - if got, want := c.atOrAfter(r1Key, sinceZero(1*time.Second)).time, sinceZero(4*time.Second); got != want { - t.Fatalf("atOrAfter(r1) = %v, want = %v", got, want) - } + + got, want = c.latest(r1Key).time, sinceZero(5*time.Second) + require.Equal(t, want, got) + + got, want = c.atOrAfter(r1Key, sinceZero(1*time.Second)).time, sinceZero(4*time.Second) + require.Equal(t, want, got) } func TestReplicationLagCache_SortByLag(t *testing.T) { @@ -80,14 +77,10 @@ func TestReplicationLagCache_SortByLag(t *testing.T) { c.add(lagRecord(sinceZero(1*time.Second), r1, 30)) c.sortByLag(1 /* ignoreNSlowestReplicas */, 30 /* minimumReplicationLag */) - if c.slowReplicas[r1Key] { - t.Fatal("the only replica tracked should not get ignored") - } + require.False(t, c.slowReplicas[r1Key], "the only replica tracked should not get ignored") c.add(lagRecord(sinceZero(1*time.Second), r2, 1)) c.sortByLag(1 /* ignoreNSlowestReplicas */, 1 /* minimumReplicationLag */) - if !c.slowReplicas[r1Key] { - t.Fatal("r1 should be tracked as a slow replica") - } + require.True(t, c.slowReplicas[r1Key], "r1 should be tracked as a slow replica") } diff --git a/go/vt/throttler/result.go b/go/vt/throttler/result.go index 179711116a3..0976a180877 100644 --- a/go/vt/throttler/result.go +++ b/go/vt/throttler/result.go @@ -17,8 +17,8 @@ limitations under the License. package throttler import ( - "bytes" "fmt" + "strings" "sync" "text/template" "time" @@ -81,7 +81,7 @@ type result struct { } func (r result) String() string { - var b bytes.Buffer + var b strings.Builder if err := resultStringTemplate.Execute(&b, r); err != nil { panic(fmt.Sprintf("failed to Execute() template: %v", err)) } diff --git a/go/vt/throttler/result_test.go b/go/vt/throttler/result_test.go index 9efc7df9412..8cc5357ef7b 100644 --- a/go/vt/throttler/result_test.go +++ b/go/vt/throttler/result_test.go @@ -17,9 +17,10 @@ limitations under the License. package throttler import ( - "reflect" "testing" "time" + + "github.com/stretchr/testify/require" ) var ( @@ -127,9 +128,7 @@ reason: emergency state decreased the rate`, for _, tc := range testcases { got := tc.r.String() - if got != tc.want { - t.Fatalf("record.String() = %v, want = %v for full record: %#v", got, tc.want, tc.r) - } + require.Equal(t, tc.want, got) } } @@ -143,19 +142,16 @@ func TestResultRing(t *testing.T) { // Use the ring partially. rr.add(r1) - if got, want := rr.latestValues(), []result{r1}; !reflect.DeepEqual(got, want) { - t.Fatalf("items not correctly added to resultRing. got = %v, want = %v", got, want) - } + got, want := rr.latestValues(), []result{r1} + require.Equal(t, want, got, "items not correctly added to resultRing") // Use it fully. rr.add(r2) - if got, want := rr.latestValues(), []result{r2, r1}; !reflect.DeepEqual(got, want) { - t.Fatalf("items not correctly added to resultRing. got = %v, want = %v", got, want) - } + got, want = rr.latestValues(), []result{r2, r1} + require.Equal(t, want, got, "items not correctly added to resultRing") // Let it wrap. rr.add(r3) - if got, want := rr.latestValues(), []result{r3, r2}; !reflect.DeepEqual(got, want) { - t.Fatalf("resultRing did not wrap correctly. got = %v, want = %v", got, want) - } + got, want = rr.latestValues(), []result{r3, r2} + require.Equal(t, want, got, "resultRing did not wrap correctly") } diff --git a/go/vt/throttler/thread_throttler_test.go b/go/vt/throttler/thread_throttler_test.go index 7cb27e76487..2f97a66c6bc 100644 --- a/go/vt/throttler/thread_throttler_test.go +++ b/go/vt/throttler/thread_throttler_test.go @@ -19,6 +19,8 @@ package throttler import ( "testing" "time" + + "github.com/stretchr/testify/require" ) func TestThrottle_NoBurst(t *testing.T) { @@ -28,11 +30,10 @@ func TestThrottle_NoBurst(t *testing.T) { // 1. This means that in any time interval of length t seconds, the throttler should // not allow more than floor(2*t+1) requests. For example, in the interval [1500ms, 1501ms], of // length 1ms, we shouldn't be able to send more than floor(2*10^-3+1)=1 requests. - if gotBackoff := tt.throttle(sinceZero(1500 * time.Millisecond)); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled us: backoff = %v", gotBackoff) - } + gotBackoff := tt.throttle(sinceZero(1500 * time.Millisecond)) + require.Equal(t, NotThrottled, gotBackoff, "throttler should not have throttled us") + wantBackoff := 499 * time.Millisecond - if gotBackoff := tt.throttle(sinceZero(1501 * time.Millisecond)); gotBackoff != wantBackoff { - t.Fatalf("throttler should have throttled us. got = %v, want = %v", gotBackoff, wantBackoff) - } + gotBackoff = tt.throttle(sinceZero(1501 * time.Millisecond)) + require.Equal(t, wantBackoff, gotBackoff, "throttler should have throttled us") } diff --git a/go/vt/throttler/throttler.go b/go/vt/throttler/throttler.go index 83a1c52225e..909888bd0d4 100644 --- a/go/vt/throttler/throttler.go +++ b/go/vt/throttler/throttler.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/proto/topodata" throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" ) @@ -50,7 +51,7 @@ const ( // MaxRateModuleDisabled can be set in NewThrottler() to disable throttling // by a fixed rate. - MaxRateModuleDisabled = math.MaxInt64 + MaxRateModuleDisabled = int64(math.MaxInt64) // InvalidMaxRate is a constant which will fail in a NewThrottler() call. // It should be used when returning maxRate in an error case. @@ -58,7 +59,7 @@ const ( // ReplicationLagModuleDisabled can be set in NewThrottler() to disable // throttling based on the MySQL replication lag. - ReplicationLagModuleDisabled = math.MaxInt64 + ReplicationLagModuleDisabled = int64(math.MaxInt64) // InvalidMaxReplicationLag is a constant which will fail in a NewThrottler() // call. It should be used when returning maxReplicationlag in an error case. @@ -224,6 +225,28 @@ func (t *Throttler) Throttle(threadID int) time.Duration { return t.threadThrottlers[threadID].throttle(t.nowFunc()) } +// MaxLag returns the max of all the last replication lag values seen across all tablets of +// the provided type, excluding ignored tablets. +func (t *Throttler) MaxLag(tabletType topodata.TabletType) uint32 { + cache := t.maxReplicationLagModule.lagCacheByType(tabletType) + + var maxLag uint32 + cacheEntries := cache.entries + + for key := range cacheEntries { + if cache.isIgnored(key) { + continue + } + + lag := cache.latest(key).Stats.ReplicationLagSeconds + if lag > maxLag { + maxLag = lag + } + } + + return maxLag +} + // ThreadFinished marks threadID as finished and redistributes the thread's // rate allotment across the other threads. // After ThreadFinished() is called, Throttle() must not be called anymore. diff --git a/go/vt/throttler/throttler_test.go b/go/vt/throttler/throttler_test.go index 0bb0ed0387a..b33bb2ca255 100644 --- a/go/vt/throttler/throttler_test.go +++ b/go/vt/throttler/throttler_test.go @@ -18,9 +18,10 @@ package throttler import ( "runtime" - "strings" "testing" "time" + + "github.com/stretchr/testify/require" ) // The main purpose of the benchmarks below is to demonstrate the functionality @@ -176,35 +177,30 @@ func TestThrottle(t *testing.T) { // 2 QPS should divide the current second into two chunks of 500 ms: // a) [1s, 1.5s), b) [1.5s, 2s) // First call goes through since the chunk is not "used" yet. - if gotBackoff := throttler.Throttle(0); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled us: backoff = %v", gotBackoff) - } + gotBackoff := throttler.Throttle(0) + require.Equal(t, NotThrottled, gotBackoff, "throttler should not have throttled us") // Next call should tell us to backoff until we reach the second chunk. fc.setNow(1000 * time.Millisecond) wantBackoff := 500 * time.Millisecond - if gotBackoff := throttler.Throttle(0); gotBackoff != wantBackoff { - t.Fatalf("throttler should have throttled us. got = %v, want = %v", gotBackoff, wantBackoff) - } + gotBackoff = throttler.Throttle(0) + require.Equal(t, wantBackoff, gotBackoff, "throttler should have throttled us") // Some time elpased, but we are still in the first chunk and must backoff. fc.setNow(1111 * time.Millisecond) wantBackoff2 := 389 * time.Millisecond - if gotBackoff := throttler.Throttle(0); gotBackoff != wantBackoff2 { - t.Fatalf("throttler should have still throttled us. got = %v, want = %v", gotBackoff, wantBackoff2) - } + gotBackoff = throttler.Throttle(0) + require.Equal(t, wantBackoff2, gotBackoff, "throttler should have still throttled us") // Enough time elapsed that we are in the second chunk now. fc.setNow(1500 * time.Millisecond) - if gotBackoff := throttler.Throttle(0); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled us: backoff = %v", gotBackoff) - } + gotBackoff = throttler.Throttle(0) + require.Equal(t, NotThrottled, gotBackoff, "throttler should not have throttled us") // We're in the third chunk and are allowed to issue the third request. fc.setNow(2001 * time.Millisecond) - if gotBackoff := throttler.Throttle(0); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled us: backoff = %v", gotBackoff) - } + gotBackoff = throttler.Throttle(0) + require.Equal(t, NotThrottled, gotBackoff, "throttler should not have throttled us") } func TestThrottle_RateRemainderIsDistributedAcrossThreads(t *testing.T) { @@ -216,9 +212,8 @@ func TestThrottle_RateRemainderIsDistributedAcrossThreads(t *testing.T) { fc.setNow(1000 * time.Millisecond) // Out of 5 QPS, each thread gets 1 and two threads get 1 query extra. for threadID := 0; threadID < 2; threadID++ { - if gotBackoff := throttler.Throttle(threadID); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled thread %d: backoff = %v", threadID, gotBackoff) - } + gotBackoff := throttler.Throttle(threadID) + require.Equalf(t, NotThrottled, gotBackoff, "throttler should not have throttled thread %d", threadID) } fc.setNow(1500 * time.Millisecond) @@ -229,21 +224,18 @@ func TestThrottle_RateRemainderIsDistributedAcrossThreads(t *testing.T) { threadsWithMoreThanOneQPS++ } else { wantBackoff := 500 * time.Millisecond - if gotBackoff != wantBackoff { - t.Fatalf("throttler did throttle us with the wrong backoff time. got = %v, want = %v", gotBackoff, wantBackoff) - } + require.Equal(t, wantBackoff, gotBackoff, "throttler did throttle us with the wrong backoff time") } } if want := 2; threadsWithMoreThanOneQPS != want { - t.Fatalf("wrong number of threads were throttled: %v != %v", threadsWithMoreThanOneQPS, want) + require.Equal(t, want, threadsWithMoreThanOneQPS, "wrong number of threads were throttled") } // Now, all threads are throttled. for threadID := 0; threadID < 2; threadID++ { wantBackoff := 500 * time.Millisecond - if gotBackoff := throttler.Throttle(threadID); gotBackoff != wantBackoff { - t.Fatalf("throttler should have throttled thread %d. got = %v, want = %v", threadID, gotBackoff, wantBackoff) - } + gotBackoff := throttler.Throttle(threadID) + require.Equalf(t, wantBackoff, gotBackoff, "throttler should have throttled thread %d", threadID) } } @@ -256,16 +248,14 @@ func TestThreadFinished(t *testing.T) { // [1000ms, 2000ms): Each thread consumes their 1 QPS. fc.setNow(1000 * time.Millisecond) for threadID := 0; threadID < 2; threadID++ { - if gotBackoff := throttler.Throttle(threadID); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled thread %d: backoff = %v", threadID, gotBackoff) - } + gotBackoff := throttler.Throttle(threadID) + require.Equalf(t, NotThrottled, gotBackoff, "throttler should not have throttled thread %d", threadID) } // Now they would be throttled. wantBackoff := 1000 * time.Millisecond for threadID := 0; threadID < 2; threadID++ { - if gotBackoff := throttler.Throttle(threadID); gotBackoff != wantBackoff { - t.Fatalf("throttler should have throttled thread %d. got = %v, want = %v", threadID, gotBackoff, wantBackoff) - } + gotBackoff := throttler.Throttle(threadID) + require.Equalf(t, wantBackoff, gotBackoff, "throttler should have throttled thread %d", threadID) } // [2000ms, 3000ms): One thread finishes, other one gets remaining 1 QPS extra. @@ -288,29 +278,23 @@ func TestThreadFinished(t *testing.T) { } // Consume 2 QPS. - if gotBackoff := throttler.Throttle(0); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled us: backoff = %v", gotBackoff) - } + gotBackoff := throttler.Throttle(0) + require.Equal(t, NotThrottled, gotBackoff, "throttler should not have throttled us") + fc.setNow(2500 * time.Millisecond) - if gotBackoff := throttler.Throttle(0); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled us: backoff = %v", gotBackoff) - } + gotBackoff = throttler.Throttle(0) + require.Equal(t, NotThrottled, gotBackoff, "throttler should not have throttled us") // 2 QPS are consumed. Thread 0 should be throttled now. wantBackoff2 := 500 * time.Millisecond - if gotBackoff := throttler.Throttle(0); gotBackoff != wantBackoff2 { - t.Fatalf("throttler should have throttled us. got = %v, want = %v", gotBackoff, wantBackoff2) - } + gotBackoff = throttler.Throttle(0) + require.Equal(t, wantBackoff2, gotBackoff, "throttler should have throttled us") // Throttle() from a finished thread will panic. defer func() { msg := recover() - if msg == nil { - t.Fatal("Throttle() from a thread which called ThreadFinished() should panic") - } - if !strings.Contains(msg.(string), "already finished") { - t.Fatalf("Throttle() after ThreadFinished() panic'd for wrong reason: %v", msg) - } + require.NotNil(t, msg) + require.Contains(t, msg, "already finished", "Throttle() after ThreadFinished() panic'd for wrong reason") }() throttler.Throttle(1) } @@ -326,19 +310,18 @@ func TestThrottle_MaxRateIsZero(t *testing.T) { fc.setNow(1000 * time.Millisecond) wantBackoff := 1000 * time.Millisecond - if gotBackoff := throttler.Throttle(0); gotBackoff != wantBackoff { - t.Fatalf("throttler should have throttled us. got = %v, want = %v", gotBackoff, wantBackoff) - } + gotBackoff := throttler.Throttle(0) + require.Equal(t, wantBackoff, gotBackoff, "throttler should have throttled us") + fc.setNow(1111 * time.Millisecond) wantBackoff2 := 1000 * time.Millisecond - if gotBackoff := throttler.Throttle(0); gotBackoff != wantBackoff2 { - t.Fatalf("throttler should have throttled us. got = %v, want = %v", gotBackoff, wantBackoff2) - } + gotBackoff = throttler.Throttle(0) + require.Equal(t, wantBackoff2, gotBackoff, "throttler should have throttled us") + fc.setNow(2000 * time.Millisecond) wantBackoff3 := 1000 * time.Millisecond - if gotBackoff := throttler.Throttle(0); gotBackoff != wantBackoff3 { - t.Fatalf("throttler should have throttled us. got = %v, want = %v", gotBackoff, wantBackoff3) - } + gotBackoff = throttler.Throttle(0) + require.Equal(t, wantBackoff3, gotBackoff, "throttler should have throttled us") } func TestThrottle_MaxRateDisabled(t *testing.T) { @@ -349,9 +332,8 @@ func TestThrottle_MaxRateDisabled(t *testing.T) { fc.setNow(1000 * time.Millisecond) // No QPS set. 10 requests in a row are fine. for i := 0; i < 10; i++ { - if gotBackoff := throttler.Throttle(0); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled us: request = %v, backoff = %v", i, gotBackoff) - } + gotBackoff := throttler.Throttle(0) + require.Equal(t, NotThrottled, gotBackoff, "throttler should not have throttled us") } } @@ -368,15 +350,13 @@ func TestThrottle_MaxRateLowerThanThreadCount(t *testing.T) { // must not starve. fc.setNow(1000 * time.Millisecond) for threadID := 0; threadID < 1; threadID++ { - if gotBackoff := throttler.Throttle(threadID); gotBackoff != NotThrottled { - t.Fatalf("throttler should not have throttled thread %d: backoff = %v", threadID, gotBackoff) - } + gotBackoff := throttler.Throttle(threadID) + require.Equalf(t, NotThrottled, gotBackoff, "throttler should not have throttled thread %d", threadID) } wantBackoff := 1000 * time.Millisecond for threadID := 0; threadID < 1; threadID++ { - if gotBackoff := throttler.Throttle(threadID); gotBackoff != wantBackoff { - t.Fatalf("throttler should have throttled thread %d: got = %v, want = %v", threadID, gotBackoff, wantBackoff) - } + gotBackoff := throttler.Throttle(threadID) + require.Equalf(t, wantBackoff, gotBackoff, "throttler should have throttled thread %d", threadID) } } @@ -400,12 +380,8 @@ func TestClose(t *testing.T) { defer func() { msg := recover() - if msg == nil { - t.Fatal("Throttle() after Close() should panic") - } - if !strings.Contains(msg.(string), "must not access closed Throttler") { - t.Fatalf("Throttle() after ThreadFinished() panic'd for wrong reason: %v", msg) - } + require.NotNil(t, msg) + require.Contains(t, msg, "must not access closed Throttler", "Throttle() after ThreadFinished() panic'd for wrong reason") }() throttler.Throttle(0) } @@ -417,12 +393,8 @@ func TestThreadFinished_SecondCallPanics(t *testing.T) { defer func() { msg := recover() - if msg == nil { - t.Fatal("Second ThreadFinished() after ThreadFinished() should panic") - } - if !strings.Contains(msg.(string), "already finished") { - t.Fatalf("ThreadFinished() after ThreadFinished() panic'd for wrong reason: %v", msg) - } + require.NotNil(t, msg) + require.Contains(t, msg, "already finished", "Throttle() after ThreadFinished() panic'd for wrong reason") }() throttler.ThreadFinished(0) } diff --git a/go/vt/throttler/throttlerclient/throttlerclient.go b/go/vt/throttler/throttlerclient/throttlerclient.go deleted file mode 100644 index cf01ccb1239..00000000000 --- a/go/vt/throttler/throttlerclient/throttlerclient.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package throttlerclient defines the generic RPC client interface for the -// throttler service. It has to be implemented for the different RPC frameworks -// e.g. gRPC. -package throttlerclient - -import ( - "fmt" - "log" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/servenv" - - "context" - - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" -) - -// protocol specifics which RPC client implementation should be used. -var protocol = "grpc" - -func init() { - servenv.OnParseFor("vttablet", registerFlags) -} - -func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&protocol, "throttler_client_protocol", protocol, "the protocol to use to talk to the integrated throttler service") -} - -// Client defines the generic RPC interface for the throttler service. -type Client interface { - // MaxRates returns the current max rate for each throttler of the process. - MaxRates(ctx context.Context) (map[string]int64, error) - - // SetMaxRate allows to change the current max rate for all throttlers - // of the process. - // It returns the names of the updated throttlers. - SetMaxRate(ctx context.Context, rate int64) ([]string, error) - - // GetConfiguration returns the configuration of the MaxReplicationlag module - // for the given throttler or all throttlers if "throttlerName" is empty. - GetConfiguration(ctx context.Context, throttlerName string) (map[string]*throttlerdatapb.Configuration, error) - - // UpdateConfiguration (partially) updates the configuration of the - // MaxReplicationlag module for the given throttler or all throttlers if - // "throttlerName" is empty. - // If "copyZeroValues" is true, fields with zero values will be copied - // as well. - // The function returns the names of the updated throttlers. - UpdateConfiguration(ctx context.Context, throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) - - // ResetConfiguration resets the configuration of the MaxReplicationlag module - // to the initial configuration for the given throttler or all throttlers if - // "throttlerName" is empty. - // The function returns the names of the updated throttlers. - ResetConfiguration(ctx context.Context, throttlerName string) ([]string, error) - - // Close will terminate the connection and free resources. - Close() -} - -// Factory has to be implemented and must create a new RPC client for a given -// "addr". -type Factory func(addr string) (Client, error) - -var factories = make(map[string]Factory) - -// RegisterFactory allows a client implementation to register itself. -func RegisterFactory(name string, factory Factory) { - if _, ok := factories[name]; ok { - log.Fatalf("RegisterFactory: %s already exists", name) - } - factories[name] = factory -} - -// New will return a client for the selected RPC implementation. -func New(addr string) (Client, error) { - factory, ok := factories[protocol] - if !ok { - return nil, fmt.Errorf("unknown throttler client protocol: %v", protocol) - } - return factory(addr) -} diff --git a/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go b/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go deleted file mode 100644 index 38fd9d76286..00000000000 --- a/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package throttlerclienttest contains the testsuite against which each -// RPC implementation of the throttlerclient interface must be tested. -package throttlerclienttest - -// NOTE: This file is not test-only code because it is referenced by -// tests in other packages and therefore it has to be regularly -// visible. - -// NOTE: This code is in its own package such that its dependencies -// (e.g. zookeeper) won't be drawn into production binaries as well. - -import ( - "reflect" - "strings" - "testing" - - "context" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/throttler" - "vitess.io/vitess/go/vt/throttler/throttlerclient" - - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" -) - -// TestSuite runs the test suite on the given throttlerclient and throttlerserver. -func TestSuite(t *testing.T, c throttlerclient.Client) { - tf := &testFixture{} - if err := tf.setUp(); err != nil { - t.Fatal(err) - } - defer tf.tearDown() - - tf.maxRates(t, c) - - tf.setMaxRate(t, c) - - tf.configuration(t, c) -} - -// TestSuitePanics tests the panic handling of each RPC method. Unlike TestSuite -// it does not use the real throttler.managerImpl. Instead, it uses FakeManager -// which allows us to panic on each RPC. -func TestSuitePanics(t *testing.T, c throttlerclient.Client) { - maxRatesPanics(t, c) - - setMaxRatePanics(t, c) - - getConfigurationPanics(t, c) - - updateConfigurationPanics(t, c) - - resetConfigurationPanics(t, c) -} - -var throttlerNames = []string{"t1", "t2"} - -type testFixture struct { - throttlers []*throttler.Throttler -} - -func (tf *testFixture) setUp() error { - for _, name := range throttlerNames { - t, err := throttler.NewThrottler(name, "TPS", 1 /* threadCount */, 1, throttler.ReplicationLagModuleDisabled) - if err != nil { - return err - } - tf.throttlers = append(tf.throttlers, t) - } - return nil -} - -func (tf *testFixture) tearDown() { - for _, t := range tf.throttlers { - t.Close() - } -} - -func (tf *testFixture) maxRates(t *testing.T, client throttlerclient.Client) { - _, err := client.SetMaxRate(context.Background(), 23) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - - got, err := client.MaxRates(context.Background()) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - want := map[string]int64{ - "t1": 23, - "t2": 23, - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("rate was not updated on all registered throttlers. got = %v, want = %v", got, throttlerNames) - } -} - -func (tf *testFixture) setMaxRate(t *testing.T, client throttlerclient.Client) { - got, err := client.SetMaxRate(context.Background(), 23) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - - if !reflect.DeepEqual(got, throttlerNames) { - t.Fatalf("rate was not updated on all registered throttlers. got = %v, want = %v", got, throttlerNames) - } -} - -func (tf *testFixture) configuration(t *testing.T, client throttlerclient.Client) { - initialConfigs, err := client.GetConfiguration(context.Background(), "" /* all */) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - - // Test UpdateConfiguration. - config := &throttlerdatapb.Configuration{ - TargetReplicationLagSec: 1, - MaxReplicationLagSec: 2, - InitialRate: 3, - MaxIncrease: 0.4, - EmergencyDecrease: 0.5, - MinDurationBetweenIncreasesSec: 6, - MaxDurationBetweenIncreasesSec: 7, - MinDurationBetweenDecreasesSec: 8, - SpreadBacklogAcrossSec: 9, - IgnoreNSlowestReplicas: 10, - IgnoreNSlowestRdonlys: 11, - AgeBadRateAfterSec: 12, - BadRateIncrease: 0.13, - MaxRateApproachThreshold: 0.9, - } - names, err := client.UpdateConfiguration(context.Background(), "t2", config /* false */, true /* copyZeroValues */) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - if got, want := names, []string{"t2"}; !reflect.DeepEqual(got, want) { - t.Fatalf("returned names of updated throttlers is wrong. got = %v, want = %v", got, want) - } - - // Test GetConfiguration. - configs, err := client.GetConfiguration(context.Background(), "t2") - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - if len(configs) != 1 || configs["t2"] == nil { - t.Fatalf("wrong named configuration returned. got = %v, want configuration for t2", configs) - } - if got, want := configs["t2"], config; !proto.Equal(got, want) { - t.Fatalf("did not read updated config. got = %v, want = %v", got, want) - } - - // Reset should return the initial configs. - namesForReset, err := client.ResetConfiguration(context.Background(), "" /* all */) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - if got, want := namesForReset, throttlerNames; !reflect.DeepEqual(got, want) { - t.Fatalf("returned names of reset throttlers is wrong. got = %v, want = %v", got, want) - } - - // Verify that it was correctly set. - configsAfterReset, err := client.GetConfiguration(context.Background(), "" /* all */) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - if got, want := configsAfterReset, initialConfigs; !reflect.DeepEqual(got, want) { - t.Fatalf("wrong configurations after reset. got = %v, want = %v", got, want) - } -} - -// FakeManager implements the throttler.Manager interface and panics on all -// methods defined in the interface. -type FakeManager struct { -} - -const panicMsg = "RPC server implementation should handle this" - -// MaxRates implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) MaxRates() map[string]int64 { - panic(panicMsg) -} - -// SetMaxRate implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) SetMaxRate(int64) []string { - panic(panicMsg) -} - -// GetConfiguration implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) GetConfiguration(throttlerName string) (map[string]*throttlerdatapb.Configuration, error) { - panic(panicMsg) -} - -// UpdateConfiguration implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) UpdateConfiguration(throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) { - panic(panicMsg) -} - -// ResetConfiguration implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) ResetConfiguration(throttlerName string) ([]string, error) { - panic(panicMsg) -} - -// Test methods which test for each RPC that panics are caught. - -func maxRatesPanics(t *testing.T, client throttlerclient.Client) { - _, err := client.MaxRates(context.Background()) - if !errorFromPanicHandler(err) { - t.Fatalf("MaxRates RPC implementation does not catch panics properly: %v", err) - } -} - -func setMaxRatePanics(t *testing.T, client throttlerclient.Client) { - _, err := client.SetMaxRate(context.Background(), 23) - if !errorFromPanicHandler(err) { - t.Fatalf("SetMaxRate RPC implementation does not catch panics properly: %v", err) - } -} - -func getConfigurationPanics(t *testing.T, client throttlerclient.Client) { - _, err := client.GetConfiguration(context.Background(), "") - if !errorFromPanicHandler(err) { - t.Fatalf("GetConfiguration RPC implementation does not catch panics properly: %v", err) - } -} - -func updateConfigurationPanics(t *testing.T, client throttlerclient.Client) { - _, err := client.UpdateConfiguration(context.Background(), "", nil, false) - if !errorFromPanicHandler(err) { - t.Fatalf("UpdateConfiguration RPC implementation does not catch panics properly: %v", err) - } -} - -func resetConfigurationPanics(t *testing.T, client throttlerclient.Client) { - _, err := client.ResetConfiguration(context.Background(), "") - if !errorFromPanicHandler(err) { - t.Fatalf("ResetConfiguration RPC implementation does not catch panics properly: %v", err) - } -} - -func errorFromPanicHandler(err error) bool { - if err == nil || !strings.Contains(err.Error(), panicMsg) { - return false - } - return true -} diff --git a/go/vt/throttler/throttlerlogz_test.go b/go/vt/throttler/throttlerlogz_test.go index 6fdb137577c..22927f3f201 100644 --- a/go/vt/throttler/throttlerlogz_test.go +++ b/go/vt/throttler/throttlerlogz_test.go @@ -19,8 +19,9 @@ package throttler import ( "net/http" "net/http/httptest" - "strings" "testing" + + "github.com/stretchr/testify/require" ) func TestThrottlerlogzHandler_MissingSlash(t *testing.T) { @@ -30,9 +31,8 @@ func TestThrottlerlogzHandler_MissingSlash(t *testing.T) { throttlerlogzHandler(response, request, m) - if got, want := response.Body.String(), "invalid /throttlerlogz path"; !strings.Contains(got, want) { - t.Fatalf("/throttlerlogz without the slash does not work (the Go HTTP server does automatically redirect in practice though). got = %v, want = %v", got, want) - } + got := response.Body.String() + require.Contains(t, got, "invalid /throttlerlogz path", "/throttlerlogz without the slash does not work (the Go HTTP server does automatically redirect in practice though)") } func TestThrottlerlogzHandler_NonExistantThrottler(t *testing.T) { @@ -41,9 +41,8 @@ func TestThrottlerlogzHandler_NonExistantThrottler(t *testing.T) { throttlerlogzHandler(response, request, newManager()) - if got, want := response.Body.String(), `throttler not found`; !strings.Contains(got, want) { - t.Fatalf("/throttlerlogz page for non-existent t1 should not succeed. got = %v, want = %v", got, want) - } + got := response.Body.String() + require.Contains(t, got, "throttler not found", "/throttlerlogz page for non-existent t1 should not succeed") } func TestThrottlerlogzHandler(t *testing.T) { @@ -152,8 +151,6 @@ func TestThrottlerlogzHandler(t *testing.T) { throttlerlogzHandler(response, request, f.m) got := response.Body.String() - if !strings.Contains(got, tc.want) { - t.Fatalf("testcase '%v': result not shown in log. got = %v, want = %v", tc.desc, got, tc.want) - } + require.Containsf(t, got, tc.want, "testcase '%v': result not shown in log", tc.desc) } } diff --git a/go/vt/throttler/throttlerz_test.go b/go/vt/throttler/throttlerz_test.go index be40598468a..9fd95603439 100644 --- a/go/vt/throttler/throttlerz_test.go +++ b/go/vt/throttler/throttlerz_test.go @@ -19,8 +19,9 @@ package throttler import ( "net/http" "net/http/httptest" - "strings" "testing" + + "github.com/stretchr/testify/require" ) func TestThrottlerzHandler_MissingSlash(t *testing.T) { @@ -30,9 +31,8 @@ func TestThrottlerzHandler_MissingSlash(t *testing.T) { throttlerzHandler(response, request, m) - if got, want := response.Body.String(), "invalid /throttlerz path"; !strings.Contains(got, want) { - t.Fatalf("/throttlerz without the slash does not work (the Go HTTP server does automatically redirect in practice though). got = %v, want = %v", got, want) - } + got := response.Body.String() + require.Contains(t, got, "invalid /throttlerz path", "/throttlerz without the slash does not work (the Go HTTP server does automatically redirect in practice though)") } func TestThrottlerzHandler_List(t *testing.T) { @@ -47,12 +47,9 @@ func TestThrottlerzHandler_List(t *testing.T) { throttlerzHandler(response, request, f.m) - if got, want := response.Body.String(), `
t1`; !strings.Contains(got, want) { - t.Fatalf("list does not include 't1'. got = %v, want = %v", got, want) - } - if got, want := response.Body.String(), `t2`; !strings.Contains(got, want) { - t.Fatalf("list does not include 't1'. got = %v, want = %v", got, want) - } + got := response.Body.String() + require.Contains(t, got, `t1`, "list does not include 't1'") + require.Contains(t, got, `t2`, "list does not include 't2'") } func TestThrottlerzHandler_Details(t *testing.T) { @@ -67,7 +64,6 @@ func TestThrottlerzHandler_Details(t *testing.T) { throttlerzHandler(response, request, f.m) - if got, want := response.Body.String(), `Details for Throttler 't1'`; !strings.Contains(got, want) { - t.Fatalf("details for 't1' not shown. got = %v, want = %v", got, want) - } + got := response.Body.String() + require.Contains(t, got, `Details for Throttler 't1'`, "details for 't1' not shown") } diff --git a/go/vt/tlstest/tlstest_test.go b/go/vt/tlstest/tlstest_test.go index 5c79e45b906..1a6e0ae70ba 100644 --- a/go/vt/tlstest/tlstest_test.go +++ b/go/vt/tlstest/tlstest_test.go @@ -28,6 +28,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/vt/vttls" ) @@ -89,21 +90,20 @@ func testClientServer(t *testing.T, combineCerts bool) { dialer := new(net.Dialer) dialer.Timeout = 10 * time.Second - wg := sync.WaitGroup{} - // // Positive case: accept on server side, connect a client, send data. // - var clientErr error - wg.Add(1) - go func() { - defer wg.Done() - clientConn, clientErr := tls.DialWithDialer(dialer, "tcp", addr, clientConfig) - if clientErr == nil { - _, _ = clientConn.Write([]byte{42}) - clientConn.Close() + var clientEG errgroup.Group + clientEG.Go(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addr, clientConfig) + if err != nil { + return err } - }() + + _, _ = conn.Write([]byte{42}) + _ = conn.Close() + return nil + }) serverConn, err := listener.Accept() if err != nil { @@ -119,10 +119,8 @@ func testClientServer(t *testing.T, combineCerts bool) { } serverConn.Close() - wg.Wait() - - if clientErr != nil { - t.Fatalf("Dial failed: %v", clientErr) + if err := clientEG.Wait(); err != nil { + t.Fatalf("client dial failed: %v", err) } // @@ -142,21 +140,23 @@ func testClientServer(t *testing.T, combineCerts bool) { t.Fatalf("TLSClientConfig failed: %v", err) } - var serverErr error - wg.Add(1) - go func() { + var serverEG errgroup.Group + serverEG.Go(func() error { // We expect the Accept to work, but the first read to fail. - defer wg.Done() - serverConn, serverErr := listener.Accept() + conn, err := listener.Accept() + if err != nil { + return err + } + // This will fail. - if serverErr == nil { - result := make([]byte, 1) - if n, err := serverConn.Read(result); err == nil { - fmt.Printf("Was able to read from server: %v\n", n) - } - serverConn.Close() + result := make([]byte, 1) + if n, err := conn.Read(result); err == nil { + return fmt.Errorf("unexpectedly able to read %d bytes from server", n) } - }() + + _ = conn.Close() + return nil + }) // When using TLS 1.2, the Dial will fail. // With TLS 1.3, the Dial will succeed and the first Read will fail. @@ -167,9 +167,9 @@ func testClientServer(t *testing.T, combineCerts bool) { } return } - wg.Wait() - if serverErr != nil { - t.Fatalf("Connection failed: %v", serverErr) + + if err := serverEG.Wait(); err != nil { + t.Fatalf("server read failed: %v", err) } data := make([]byte, 1) diff --git a/go/vt/topo/consultopo/error.go b/go/vt/topo/consultopo/error.go index 42f474e065b..62167a4d295 100644 --- a/go/vt/topo/consultopo/error.go +++ b/go/vt/topo/consultopo/error.go @@ -40,15 +40,16 @@ var ( // are either application-level errors, or context errors. func convertError(err error, nodePath string) error { // Unwrap errors from the Go HTTP client. - if urlErr, ok := err.(*url.Error); ok { + var urlErr *url.Error + if errors.As(err, &urlErr) { err = urlErr.Err } // Convert specific sentinel values. - switch err { - case context.Canceled: + switch { + case errors.Is(err, context.Canceled): return topo.NewError(topo.Interrupted, nodePath) - case context.DeadlineExceeded: + case errors.Is(err, context.DeadlineExceeded): return topo.NewError(topo.Timeout, nodePath) } diff --git a/go/vt/topo/consultopo/version.go b/go/vt/topo/consultopo/version.go index 49071136024..6157ba4dc71 100644 --- a/go/vt/topo/consultopo/version.go +++ b/go/vt/topo/consultopo/version.go @@ -18,8 +18,6 @@ package consultopo import ( "fmt" - - "vitess.io/vitess/go/vt/topo" ) // ConsulVersion is consul's idea of a version. @@ -31,13 +29,3 @@ type ConsulVersion uint64 func (v ConsulVersion) String() string { return fmt.Sprintf("%v", uint64(v)) } - -// VersionFromInt is used by old-style functions to create a proper -// Version: if version is -1, returns nil. Otherwise returns the -// ConsulVersion object. -func VersionFromInt(version int64) topo.Version { - if version == -1 { - return nil - } - return ConsulVersion(version) -} diff --git a/go/vt/topo/errors.go b/go/vt/topo/errors.go index a645f1aa178..3be4b60b103 100644 --- a/go/vt/topo/errors.go +++ b/go/vt/topo/errors.go @@ -36,6 +36,7 @@ const ( NoUpdateNeeded NoImplementation NoReadOnlyImplementation + ResourceExhausted ) // Error represents a topo error. @@ -68,6 +69,8 @@ func NewError(code ErrorCode, node string) error { message = fmt.Sprintf("no such topology implementation %s", node) case NoReadOnlyImplementation: message = fmt.Sprintf("no read-only topology implementation %s", node) + case ResourceExhausted: + message = fmt.Sprintf("server resource exhausted: %s", node) default: message = fmt.Sprintf("unknown code: %s", node) } diff --git a/go/vt/topo/etcd2topo/error.go b/go/vt/topo/etcd2topo/error.go index e784fecd9b9..5e13d0bdf8d 100644 --- a/go/vt/topo/etcd2topo/error.go +++ b/go/vt/topo/etcd2topo/error.go @@ -45,7 +45,8 @@ func convertError(err error, nodePath string) error { return nil } - if typeErr, ok := err.(rpctypes.EtcdError); ok { + var typeErr rpctypes.EtcdError + if errors.As(err, &typeErr) { switch typeErr.Code() { case codes.NotFound: return topo.NewError(topo.NoNode, nodePath) @@ -61,6 +62,8 @@ func convertError(err error, nodePath string) error { // etcd primary election is failing, so timeout // also sounds reasonable there. return topo.NewError(topo.Timeout, nodePath) + case codes.ResourceExhausted: + return topo.NewError(topo.ResourceExhausted, nodePath) } return err } @@ -74,15 +77,17 @@ func convertError(err error, nodePath string) error { return topo.NewError(topo.Interrupted, nodePath) case codes.DeadlineExceeded: return topo.NewError(topo.Timeout, nodePath) + case codes.ResourceExhausted: + return topo.NewError(topo.ResourceExhausted, nodePath) default: return err } } - switch err { - case context.Canceled: + switch { + case errors.Is(err, context.Canceled): return topo.NewError(topo.Interrupted, nodePath) - case context.DeadlineExceeded: + case errors.Is(err, context.DeadlineExceeded): return topo.NewError(topo.Timeout, nodePath) default: return err diff --git a/go/vt/topo/etcd2topo/server_test.go b/go/vt/topo/etcd2topo/server_test.go index 732829ee78b..bbf9f8e9164 100644 --- a/go/vt/topo/etcd2topo/server_test.go +++ b/go/vt/topo/etcd2topo/server_test.go @@ -22,9 +22,12 @@ import ( "os" "os/exec" "path" + "strings" "testing" "time" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/testfiles" "vitess.io/vitess/go/vt/log" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -36,12 +39,14 @@ import ( ) // startEtcd starts an etcd subprocess, and waits for it to be ready. -func startEtcd(t *testing.T) string { +func startEtcd(t *testing.T, port int) (string, *exec.Cmd) { // Create a temporary directory. dataDir := t.TempDir() // Get our two ports to listen to. - port := testfiles.GoVtTopoEtcd2topoPort + if port == 0 { + port = testfiles.GoVtTopoEtcd2topoPort + } name := "vitess_unit_test" clientAddr := fmt.Sprintf("http://localhost:%v", port) peerAddr := fmt.Sprintf("http://localhost:%v", port+1) @@ -94,7 +99,7 @@ func startEtcd(t *testing.T) string { } }) - return clientAddr + return clientAddr, cmd } // startEtcdWithTLS starts an etcd subprocess with TLS setup, and waits for it to be ready. @@ -219,7 +224,7 @@ func TestEtcd2TLS(t *testing.T) { func TestEtcd2Topo(t *testing.T) { // Start a single etcd in the background. - clientAddr := startEtcd(t) + clientAddr, _ := startEtcd(t, 0) testIndex := 0 newServer := func() *topo.Server { @@ -257,6 +262,105 @@ func TestEtcd2Topo(t *testing.T) { ts.Close() } +// TestEtcd2TopoGetTabletsPartialResults confirms that GetTablets handles partial results +// correctly when etcd2 is used along with the normal vtctldclient <-> vtctld client/server +// path. +func TestEtcd2TopoGetTabletsPartialResults(t *testing.T) { + ctx := context.Background() + cells := []string{"cell1", "cell2"} + root := "/vitess" + // Start three etcd instances in the background. One will serve the global topo data + // while the other two will serve the cell topo data. + globalClientAddr, _ := startEtcd(t, 0) + cellClientAddrs := make([]string, len(cells)) + cellClientCmds := make([]*exec.Cmd, len(cells)) + cellTSs := make([]*topo.Server, len(cells)) + for i := 0; i < len(cells); i++ { + addr, cmd := startEtcd(t, testfiles.GoVtTopoEtcd2topoPort+(i+100*i)) + cellClientAddrs[i] = addr + cellClientCmds[i] = cmd + } + require.Equal(t, len(cells), len(cellTSs)) + + // Setup the global topo server. + globalTS, err := topo.OpenServer("etcd2", globalClientAddr, path.Join(root, topo.GlobalCell)) + require.NoError(t, err, "OpenServer() failed for global topo server: %v", err) + + // Setup the cell topo servers. + for i, cell := range cells { + cellTSs[i], err = topo.OpenServer("etcd2", cellClientAddrs[i], path.Join(root, topo.GlobalCell)) + require.NoError(t, err, "OpenServer() failed for cell %s topo server: %v", cell, err) + } + + // Create the CellInfo and Tablet records/keys. + for i, cell := range cells { + err = globalTS.CreateCellInfo(ctx, cell, &topodatapb.CellInfo{ + ServerAddress: cellClientAddrs[i], + Root: path.Join(root, cell), + }) + require.NoError(t, err, "CreateCellInfo() failed in global cell for cell %s: %v", cell, err) + ta := &topodatapb.TabletAlias{ + Cell: cell, + Uid: uint32(100 + i), + } + err = globalTS.CreateTablet(ctx, &topodatapb.Tablet{Alias: ta}) + require.NoError(t, err, "CreateTablet() failed in cell %s: %v", cell, err) + } + + // This returns stdout and stderr lines as a slice of strings along with the command error. + getTablets := func(strict bool) ([]string, []string, error) { + cmd := exec.Command("vtctldclient", "--server", "internal", "--topo-implementation", "etcd2", "--topo-global-server-address", globalClientAddr, "GetTablets", fmt.Sprintf("--strict=%t", strict)) + var stdout, stderr strings.Builder + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + // Trim any leading and trailing newlines so we don't have an empty string at + // either end of the slices which throws off the logical number of lines produced. + var stdoutLines, stderrLines []string + if stdout.Len() > 0 { // Otherwise we'll have a 1 element slice with an empty string + stdoutLines = strings.Split(strings.Trim(stdout.String(), "\n"), "\n") + } + if stderr.Len() > 0 { // Otherwise we'll have a 1 element slice with an empty string + stderrLines = strings.Split(strings.Trim(stderr.String(), "\n"), "\n") + } + return stdoutLines, stderrLines, err + } + + // Execute the vtctldclient command. + stdout, stderr, err := getTablets(false) + require.NoError(t, err, "Unexpected error: %v, output: %s", err, strings.Join(stdout, "\n")) + // We get each of the single tablets in each cell. + require.Len(t, stdout, len(cells)) + // And no error message. + require.Len(t, stderr, 0, "Unexpected error message: %s", strings.Join(stderr, "\n")) + + // Stop the last cell topo server. + cmd := cellClientCmds[len(cells)-1] + require.NotNil(t, cmd) + err = cmd.Process.Kill() + require.NoError(t, err) + _ = cmd.Wait() + + // Execute the vtctldclient command to get partial results. + stdout, stderr, err = getTablets(false) + require.NoError(t, err, "Unexpected error: %v, output: %s", err, strings.Join(stdout, "\n")) + // We get partial results, missing the tablet from the last cell. + require.Len(t, stdout, len(cells)-1, "Unexpected output: %s", strings.Join(stdout, "\n")) + // We get an error message for the cell that was unreachable. + require.Greater(t, len(stderr), 0, "Unexpected error message: %s", strings.Join(stderr, "\n")) + + // Execute the vtctldclient command with strict enabled. + _, stderr, err = getTablets(true) + require.Error(t, err) // We get an error + // We still get an error message printed to the console for the cell that was unreachable. + require.Greater(t, len(stderr), 0, "Unexpected error message: %s", strings.Join(stderr, "\n")) + + globalTS.Close() + for _, cellTS := range cellTSs { + cellTS.Close() + } +} + // testKeyspaceLock tests etcd-specific heartbeat (TTL). // Note TTL granularity is in seconds, even though the API uses time.Duration. // So we have to wait a long time in these tests. diff --git a/go/vt/topo/etcd2topo/version.go b/go/vt/topo/etcd2topo/version.go index 5fc0a704af8..004719e1522 100644 --- a/go/vt/topo/etcd2topo/version.go +++ b/go/vt/topo/etcd2topo/version.go @@ -18,8 +18,6 @@ package etcd2topo import ( "fmt" - - "vitess.io/vitess/go/vt/topo" ) // EtcdVersion is etcd's idea of a version. @@ -31,13 +29,3 @@ type EtcdVersion int64 func (v EtcdVersion) String() string { return fmt.Sprintf("%v", int64(v)) } - -// VersionFromInt is used by old-style functions to create a proper -// Version: if version is -1, returns nil. Otherwise returns the -// EtcdVersion object. -func VersionFromInt(version int64) topo.Version { - if version == -1 { - return nil - } - return EtcdVersion(version) -} diff --git a/go/vt/topo/etcd2topo/watch.go b/go/vt/topo/etcd2topo/watch.go index cdc9be44b21..92db205f6d8 100644 --- a/go/vt/topo/etcd2topo/watch.go +++ b/go/vt/topo/etcd2topo/watch.go @@ -51,7 +51,10 @@ func (s *Server) Watch(ctx context.Context, filePath string) (*topo.WatchData, < } wd := &topo.WatchData{ Contents: initial.Kvs[0].Value, - Version: EtcdVersion(initial.Kvs[0].ModRevision), + // ModRevision is used for the topo.Version value as we get the new Revision value back + // when updating the file/key within a transaction in file.go and so this is the opaque + // version that we can use to enforce serializabile writes for the file/key. + Version: EtcdVersion(initial.Kvs[0].ModRevision), } // Create an outer context that will be canceled on return and will cancel all inner watches. @@ -76,7 +79,7 @@ func (s *Server) Watch(ctx context.Context, filePath string) (*topo.WatchData, < defer close(notifications) defer outerCancel() - var currVersion = initial.Header.Revision + var rev = initial.Header.Revision var watchRetries int for { select { @@ -107,9 +110,9 @@ func (s *Server) Watch(ctx context.Context, filePath string) (*topo.WatchData, < // Cancel inner context on retry and create new one. watchCancel() watchCtx, watchCancel = context.WithCancel(ctx) - newWatcher := s.cli.Watch(watchCtx, nodePath, clientv3.WithRev(currVersion)) + newWatcher := s.cli.Watch(watchCtx, nodePath, clientv3.WithRev(rev)) if newWatcher == nil { - log.Warningf("watch %v failed and get a nil channel returned, currVersion: %v", nodePath, currVersion) + log.Warningf("watch %v failed and get a nil channel returned, rev: %v", nodePath, rev) } else { watcher = newWatcher } @@ -126,14 +129,14 @@ func (s *Server) Watch(ctx context.Context, filePath string) (*topo.WatchData, < return } - currVersion = wresp.Header.GetRevision() + rev = wresp.Header.GetRevision() for _, ev := range wresp.Events { switch ev.Type { case mvccpb.PUT: notifications <- &topo.WatchData{ Contents: ev.Kv.Value, - Version: EtcdVersion(ev.Kv.Version), + Version: EtcdVersion(ev.Kv.ModRevision), } case mvccpb.DELETE: // Node is gone, send a final notice. @@ -200,7 +203,7 @@ func (s *Server) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.Wa defer close(notifications) defer outerCancel() - var currVersion = initial.Header.Revision + var rev = initial.Header.Revision var watchRetries int for { select { @@ -228,9 +231,9 @@ func (s *Server) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.Wa watchCancel() watchCtx, watchCancel = context.WithCancel(ctx) - newWatcher := s.cli.Watch(watchCtx, nodePath, clientv3.WithRev(currVersion), clientv3.WithPrefix()) + newWatcher := s.cli.Watch(watchCtx, nodePath, clientv3.WithRev(rev), clientv3.WithPrefix()) if newWatcher == nil { - log.Warningf("watch %v failed and get a nil channel returned, currVersion: %v", nodePath, currVersion) + log.Warningf("watch %v failed and get a nil channel returned, rev: %v", nodePath, rev) } else { watcher = newWatcher } @@ -247,7 +250,7 @@ func (s *Server) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.Wa return } - currVersion = wresp.Header.GetRevision() + rev = wresp.Header.GetRevision() for _, ev := range wresp.Events { switch ev.Type { @@ -256,7 +259,7 @@ func (s *Server) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.Wa Path: string(ev.Kv.Key), WatchData: topo.WatchData{ Contents: ev.Kv.Value, - Version: EtcdVersion(ev.Kv.Version), + Version: EtcdVersion(ev.Kv.ModRevision), }, } case mvccpb.DELETE: diff --git a/go/vt/topo/etcd2topo/watch_test.go b/go/vt/topo/etcd2topo/watch_test.go new file mode 100644 index 00000000000..4ed8ca135b0 --- /dev/null +++ b/go/vt/topo/etcd2topo/watch_test.go @@ -0,0 +1,159 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd2topo + +import ( + "context" + "fmt" + "path" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + clientv3 "go.etcd.io/etcd/client/v3" + + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/topo" +) + +// TestWatchTopoVersion tests how the topo.Version values work within the etcd2topo +// Watch implementation. Today, those logical versions are based on the key's +// ModRevision value, which is a monotonically increasing int64 value. See +// https://github.com/vitessio/vitess/pull/15847 for additional details and the +// current reasoning behing using ModRevision. This can be changed in the future +// but should be done so intentionally, thus this test ensures we don't change the +// behavior accidentally/uinintentionally. +func TestWatchTopoVersion(t *testing.T) { + ctx := utils.LeakCheckContext(t) + etcdServerAddr, _ := startEtcd(t, 0) + root := "/vitess/test" + name := "testkey" + path := path.Join(root, name) + value := "testval" + // We use these two variables to ensure that we receive all of the changes in + // our watch. + changesMade := atomic.Int64{} // This is accessed across goroutines + changesSeen := int64(0) + client, err := clientv3.New(clientv3.Config{ + Endpoints: []string{etcdServerAddr}, + DialTimeout: 5 * time.Second, + }) + require.NoError(t, err) + serverRunningCh := make(chan struct{}) + server := &Server{ + cli: client, + root: root, + running: serverRunningCh, + } + defer server.Close() + + // Create the key as the vitess topo server requires that it exist before you + // can watch it (the lower level etcd watch does not require this). + client.Put(ctx, path, fmt.Sprintf("%s-%d", value, changesMade.Load())) + changesMade.Add(1) + + var data <-chan *topo.WatchData + _, data, err = server.Watch(ctx, name) + require.NoError(t, err, "Server.Watch() error = %v", err) + + // Coordinate between the goroutines on the delete so that we don't miss + // N changes when restarting the watch. + token := make(chan struct{}) + defer close(token) + + // Run a goroutine that updates the key we're watching. + go func() { + cur := changesMade.Load() + 1 + batchSize := int64(10) + for i := cur; i <= cur+batchSize; i++ { + client.Put(ctx, path, fmt.Sprintf("%s-%d", value, i)) + changesMade.Add(1) + select { + case <-ctx.Done(): + return + default: + } + } + // Delete the key to ensure that our version continues to be monotonically + // increasing. + client.Delete(ctx, path) + changesMade.Add(1) + // Let the main goroutine process the delete and restart the watch before + // we make more changes. + token <- struct{}{} + cur = changesMade.Load() + 1 + for i := cur; i <= cur+batchSize; i++ { + client.Put(ctx, path, fmt.Sprintf("%s-%d", value, i)) + changesMade.Add(1) + select { + case <-ctx.Done(): + return + default: + } + } + }() + + // When using ModRevision as the logical key version, the Revision is initially + // 1 as we're at the first change of the keyspace (it has been created). This + // means that the first time we receive a change in the watch, we should expect + // the key's topo.Version to be 2 as it's the second change to the keyspace. + // We start with 1 as we increment this every time we receive a change in the + // watch. + expectedVersion := int64(1) + + // Consider the test done when we've been watching the key for 10 seconds. We + // should receive all of the changes made within 1 second but we allow for a lot + // of extra time to prevent flakiness when the host is very slow for any reason. + watchCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + for { + select { + case <-watchCtx.Done(): + require.Equal(t, changesMade.Load(), changesSeen, "expected %d changes, got %d", changesMade.Load(), changesSeen) + return // Success, we're done + case <-ctx.Done(): + require.FailNow(t, "test context cancelled") + case <-serverRunningCh: + require.FailNow(t, "topo server is no longer running") + case wd := <-data: + changesSeen++ + expectedVersion++ + if wd.Err != nil { + if topo.IsErrType(wd.Err, topo.NoNode) { + // This was our delete. We'll restart the watch. + // Note that the lower level etcd watch doesn't treat delete as + // any special kind of change/event, it's another change to the + // key, but our topo server Watch treats this as an implicit end + // of the watch and it terminates it. + // We create the key again as the vitess topo server requires + // that it exist before watching it. + client.Put(ctx, path, fmt.Sprintf("%s-%d", value, changesMade.Load())) + changesMade.Add(1) + _, data, err = server.Watch(ctx, name) + require.NoError(t, err, "Server.Watch() error = %v", err) + <-token // Tell the goroutine making changes to continue + continue + } + require.FailNow(t, "unexpected error in watch data", "error: %v", wd.Err) + } + gotVersion := int64(wd.Version.(EtcdVersion)) + require.Equal(t, expectedVersion, gotVersion, "expected version %d, got %d", expectedVersion, gotVersion) + } + } +} diff --git a/go/vt/topo/events/keyspace_change_syslog.go b/go/vt/topo/events/keyspace_change_syslog.go index d7f456ae6b8..7404c3ca882 100644 --- a/go/vt/topo/events/keyspace_change_syslog.go +++ b/go/vt/topo/events/keyspace_change_syslog.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. diff --git a/go/vt/topo/events/keyspace_change_syslog_test.go b/go/vt/topo/events/keyspace_change_syslog_test.go index 1367cf27b23..8ba7225a025 100644 --- a/go/vt/topo/events/keyspace_change_syslog_test.go +++ b/go/vt/topo/events/keyspace_change_syslog_test.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. diff --git a/go/vt/topo/events/shard_change_syslog.go b/go/vt/topo/events/shard_change_syslog.go index 3f6422a9175..2055e4268ec 100644 --- a/go/vt/topo/events/shard_change_syslog.go +++ b/go/vt/topo/events/shard_change_syslog.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. diff --git a/go/vt/topo/events/shard_change_syslog_test.go b/go/vt/topo/events/shard_change_syslog_test.go index fc721bae923..bdac457853e 100644 --- a/go/vt/topo/events/shard_change_syslog_test.go +++ b/go/vt/topo/events/shard_change_syslog_test.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. diff --git a/go/vt/topo/events/tablet_change_syslog.go b/go/vt/topo/events/tablet_change_syslog.go index e2dae020c8e..55de46674dc 100644 --- a/go/vt/topo/events/tablet_change_syslog.go +++ b/go/vt/topo/events/tablet_change_syslog.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. diff --git a/go/vt/topo/events/tablet_change_syslog_test.go b/go/vt/topo/events/tablet_change_syslog_test.go index 4a5bb4d7ea9..7ecabf3f7fb 100644 --- a/go/vt/topo/events/tablet_change_syslog_test.go +++ b/go/vt/topo/events/tablet_change_syslog_test.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. diff --git a/go/vt/topo/faketopo/faketopo.go b/go/vt/topo/faketopo/faketopo.go index 8601d28f5b6..69ccf08a969 100644 --- a/go/vt/topo/faketopo/faketopo.go +++ b/go/vt/topo/faketopo/faketopo.go @@ -21,12 +21,11 @@ import ( "strings" "sync" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/log" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - - "vitess.io/vitess/go/vt/topo" ) // FakeFactory implements the Factory interface. This is supposed to be used only for testing diff --git a/go/vt/topo/helpers/compare_test.go b/go/vt/topo/helpers/compare_test.go index d31eedee2e9..82924e522f5 100644 --- a/go/vt/topo/helpers/compare_test.go +++ b/go/vt/topo/helpers/compare_test.go @@ -17,9 +17,10 @@ limitations under the License. package helpers import ( + "context" "testing" - "context" + "vitess.io/vitess/go/vt/sqlparser" ) func TestBasicCompare(t *testing.T) { @@ -32,7 +33,7 @@ func TestBasicCompare(t *testing.T) { t.Fatalf("Compare keyspaces is not failing when topos are not in sync") } - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) err = CompareKeyspaces(ctx, fromTS, toTS) if err != nil { diff --git a/go/vt/topo/helpers/copy.go b/go/vt/topo/helpers/copy.go index 0df706eba31..6dff1c6ac22 100644 --- a/go/vt/topo/helpers/copy.go +++ b/go/vt/topo/helpers/copy.go @@ -25,6 +25,7 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -33,7 +34,7 @@ import ( ) // CopyKeyspaces will create the keyspaces in the destination topo. -func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) error { +func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server, parser *sqlparser.Parser) error { keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { return fmt.Errorf("GetKeyspaces: %w", err) @@ -57,7 +58,7 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) error { vs, err := fromTS.GetVSchema(ctx, keyspace) switch { case err == nil: - _, err = vindexes.BuildKeyspace(vs) + _, err = vindexes.BuildKeyspace(vs, parser) if err != nil { log.Errorf("BuildKeyspace(%v): %v", keyspace, err) break diff --git a/go/vt/topo/helpers/copy_test.go b/go/vt/topo/helpers/copy_test.go index 2086a2e6552..142c6eb49ac 100644 --- a/go/vt/topo/helpers/copy_test.go +++ b/go/vt/topo/helpers/copy_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -104,7 +106,7 @@ func TestBasic(t *testing.T) { fromTS, toTS := createSetup(ctx, t) // check keyspace copy - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) keyspaces, err := toTS.GetKeyspaces(ctx) if err != nil { t.Fatalf("toTS.GetKeyspaces failed: %v", err) @@ -112,7 +114,7 @@ func TestBasic(t *testing.T) { if len(keyspaces) != 1 || keyspaces[0] != "test_keyspace" { t.Fatalf("unexpected keyspaces: %v", keyspaces) } - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) // check shard copy CopyShards(ctx, fromTS, toTS) diff --git a/go/vt/topo/helpers/tee.go b/go/vt/topo/helpers/tee.go deleted file mode 100644 index b2178144087..00000000000 --- a/go/vt/topo/helpers/tee.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helpers - -import ( - "context" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo" -) - -// TeeFactory is an implementation of topo.Factory that uses a primary -// underlying topo.Server for all changes, but also duplicates the -// changes to a secondary topo.Server. It also locks both topo servers -// when needed. It is meant to be used during transitions from one -// topo.Server to another. -// -// - primary: we read everything from it, and write to it. We also create -// LeaderParticipation from it. -// - secondary: we write to it as well, but we usually don't fail. -// - we lock primary/secondary if reverseLockOrder is False, -// -// or secondary/primary if reverseLockOrder is True. -type TeeFactory struct { - primary *topo.Server - secondary *topo.Server - reverseLockOrder bool -} - -// HasGlobalReadOnlyCell is part of the topo.Factory interface. -func (f *TeeFactory) HasGlobalReadOnlyCell(serverAddr, root string) bool { - return false -} - -// Create is part of the topo.Factory interface. -func (f *TeeFactory) Create(cell, serverAddr, root string) (topo.Conn, error) { - ctx := context.Background() - primaryConn, err := f.primary.ConnForCell(ctx, cell) - if err != nil { - return nil, err - } - secondaryConn, err := f.secondary.ConnForCell(ctx, cell) - if err != nil { - return nil, err - } - - lockFirst := primaryConn - lockSecond := secondaryConn - if f.reverseLockOrder { - lockFirst = secondaryConn - lockSecond = primaryConn - } - - return &TeeConn{ - primary: primaryConn, - secondary: secondaryConn, - lockFirst: lockFirst, - lockSecond: lockSecond, - }, nil -} - -// NewTee returns a new topo.Server object. It uses a TeeFactory. -func NewTee(primary, secondary *topo.Server, reverseLockOrder bool) (*topo.Server, error) { - f := &TeeFactory{ - primary: primary, - secondary: secondary, - reverseLockOrder: reverseLockOrder, - } - return topo.NewWithFactory(f, "" /*serverAddress*/, "" /*root*/) -} - -// TeeConn implements the topo.Conn interface. -type TeeConn struct { - primary topo.Conn - secondary topo.Conn - - lockFirst topo.Conn - lockSecond topo.Conn -} - -// Close is part of the topo.Conn interface. -func (c *TeeConn) Close() { - c.primary.Close() - c.secondary.Close() -} - -// ListDir is part of the topo.Conn interface. -func (c *TeeConn) ListDir(ctx context.Context, dirPath string, full bool) ([]topo.DirEntry, error) { - return c.primary.ListDir(ctx, dirPath, full) -} - -// Create is part of the topo.Conn interface. -func (c *TeeConn) Create(ctx context.Context, filePath string, contents []byte) (topo.Version, error) { - primaryVersion, err := c.primary.Create(ctx, filePath, contents) - if err != nil { - return nil, err - } - - // This is critical enough that we want to fail. However, we support - // an unconditional update if the file already exists. - _, err = c.secondary.Create(ctx, filePath, contents) - if topo.IsErrType(err, topo.NodeExists) { - _, err = c.secondary.Update(ctx, filePath, contents, nil) - } - if err != nil { - return nil, err - } - - return primaryVersion, nil -} - -// Update is part of the topo.Conn interface. -func (c *TeeConn) Update(ctx context.Context, filePath string, contents []byte, version topo.Version) (topo.Version, error) { - primaryVersion, err := c.primary.Update(ctx, filePath, contents, version) - if err != nil { - // Failed on primary, not updating secondary. - return nil, err - } - - // Always do an unconditional update on secondary. - if _, err = c.secondary.Update(ctx, filePath, contents, nil); err != nil { - log.Warningf("secondary.Update(%v,unconditonal) failed: %v", filePath, err) - } - return primaryVersion, nil -} - -// Get is part of the topo.Conn interface. -func (c *TeeConn) Get(ctx context.Context, filePath string) ([]byte, topo.Version, error) { - return c.primary.Get(ctx, filePath) -} - -// List is part of the topo.Conn interface. -func (c *TeeConn) List(ctx context.Context, filePathPrefix string) ([]topo.KVInfo, error) { - return c.primary.List(ctx, filePathPrefix) -} - -// Delete is part of the topo.Conn interface. -func (c *TeeConn) Delete(ctx context.Context, filePath string, version topo.Version) error { - // If primary fails, no need to go further. - if err := c.primary.Delete(ctx, filePath, version); err != nil { - return err - } - - // Always do an unconditonal delete on secondary. - if err := c.secondary.Delete(ctx, filePath, nil); err != nil && !topo.IsErrType(err, topo.NoNode) { - // Secondary didn't work, and the node wasn't gone already. - log.Warningf("secondary.Delete(%v) failed: %v", filePath, err) - } - - return nil -} - -// Watch is part of the topo.Conn interface -func (c *TeeConn) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-chan *topo.WatchData, error) { - return c.primary.Watch(ctx, filePath) -} - -func (c *TeeConn) WatchRecursive(ctx context.Context, path string) ([]*topo.WatchDataRecursive, <-chan *topo.WatchDataRecursive, error) { - return c.primary.WatchRecursive(ctx, path) -} - -// -// Lock management. -// - -// teeTopoLockDescriptor implements the topo.LockDescriptor interface. -type teeTopoLockDescriptor struct { - c *TeeConn - dirPath string - firstLockDescriptor topo.LockDescriptor - secondLockDescriptor topo.LockDescriptor -} - -// Lock is part of the topo.Conn interface. -func (c *TeeConn) Lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { - return c.lock(ctx, dirPath, contents) -} - -// TryLock is part of the topo.Conn interface. Its implementation is same as Lock -func (c *TeeConn) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { - return c.Lock(ctx, dirPath, contents) -} - -// Lock is part of the topo.Conn interface. -func (c *TeeConn) lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { - // Lock lockFirst. - fLD, err := c.lockFirst.Lock(ctx, dirPath, contents) - if err != nil { - return nil, err - } - - // Lock lockSecond. - sLD, err := c.lockSecond.Lock(ctx, dirPath, contents) - if err != nil { - if err := fLD.Unlock(ctx); err != nil { - log.Warningf("Failed to unlock lockFirst after failed lockSecond lock for %v: %v", dirPath, err) - } - return nil, err - } - - // Remember both locks in teeTopoLockDescriptor. - return &teeTopoLockDescriptor{ - c: c, - dirPath: dirPath, - firstLockDescriptor: fLD, - secondLockDescriptor: sLD, - }, nil -} - -// Check is part of the topo.LockDescriptor interface. -func (ld *teeTopoLockDescriptor) Check(ctx context.Context) error { - if err := ld.firstLockDescriptor.Check(ctx); err != nil { - return err - } - return ld.secondLockDescriptor.Check(ctx) -} - -// Unlock is part of the topo.LockDescriptor interface. -func (ld *teeTopoLockDescriptor) Unlock(ctx context.Context) error { - // Unlock lockSecond, then lockFirst. - serr := ld.secondLockDescriptor.Unlock(ctx) - ferr := ld.firstLockDescriptor.Unlock(ctx) - - if serr != nil { - if ferr != nil { - log.Warningf("First Unlock(%v) failed: %v", ld.dirPath, ferr) - } - return serr - } - return ferr -} - -// NewLeaderParticipation is part of the topo.Conn interface. -func (c *TeeConn) NewLeaderParticipation(name, id string) (topo.LeaderParticipation, error) { - return c.primary.NewLeaderParticipation(name, id) -} diff --git a/go/vt/topo/helpers/tee_test.go b/go/vt/topo/helpers/tee_test.go deleted file mode 100644 index 4dda901c300..00000000000 --- a/go/vt/topo/helpers/tee_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helpers - -import ( - "reflect" - "testing" - - "github.com/stretchr/testify/require" - - "context" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -func TestTee(t *testing.T) { - ctx := context.Background() - - // create the setup, copy the data - fromTS, toTS := createSetup(ctx, t) - CopyKeyspaces(ctx, fromTS, toTS) - CopyShards(ctx, fromTS, toTS) - CopyTablets(ctx, fromTS, toTS) - - // create a tee and check it implements the interface. - teeTS, err := NewTee(fromTS, toTS, true) - require.NoError(t, err) - - // create a keyspace, make sure it is on both sides - if err := teeTS.CreateKeyspace(ctx, "keyspace2", &topodatapb.Keyspace{}); err != nil { - t.Fatalf("tee.CreateKeyspace(keyspace2) failed: %v", err) - } - teeKeyspaces, err := teeTS.GetKeyspaces(ctx) - if err != nil { - t.Fatalf("tee.GetKeyspaces() failed: %v", err) - } - expected := []string{"keyspace2", "test_keyspace"} - if !reflect.DeepEqual(expected, teeKeyspaces) { - t.Errorf("teeKeyspaces mismatch, got %+v, want %+v", teeKeyspaces, expected) - } - fromKeyspaces, err := fromTS.GetKeyspaces(ctx) - if err != nil { - t.Fatalf("fromTS.GetKeyspaces() failed: %v", err) - } - expected = []string{"keyspace2", "test_keyspace"} - if !reflect.DeepEqual(expected, fromKeyspaces) { - t.Errorf("fromKeyspaces mismatch, got %+v, want %+v", fromKeyspaces, expected) - } - toKeyspaces, err := toTS.GetKeyspaces(ctx) - if err != nil { - t.Fatalf("toTS.GetKeyspaces() failed: %v", err) - } - expected = []string{"keyspace2", "test_keyspace"} - if !reflect.DeepEqual(expected, toKeyspaces) { - t.Errorf("toKeyspaces mismatch, got %+v, want %+v", toKeyspaces, expected) - } -} diff --git a/go/vt/topo/helpers/tee_topo_test.go b/go/vt/topo/helpers/tee_topo_test.go deleted file mode 100644 index 8a4c5690846..00000000000 --- a/go/vt/topo/helpers/tee_topo_test.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helpers - -import ( - "testing" - - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/topo/test" -) - -func TestTeeTopo(t *testing.T) { - ctx := utils.LeakCheckContext(t) - test.TopoServerTestSuite(t, ctx, func() *topo.Server { - s1 := memorytopo.NewServer(ctx, test.LocalCellName) - s2 := memorytopo.NewServer(ctx, test.LocalCellName) - tee, err := NewTee(s1, s2, false) - if err != nil { - t.Fatalf("NewTee() failed: %v", err) - } - return tee - }, []string{"checkTryLock", "checkShardWithLock"}) -} diff --git a/go/vt/topo/keyspace.go b/go/vt/topo/keyspace.go index feb80c374e5..dced769ca78 100755 --- a/go/vt/topo/keyspace.go +++ b/go/vt/topo/keyspace.go @@ -19,8 +19,16 @@ package topo import ( "context" "path" + "sort" + "sync" + + "github.com/spf13/pflag" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/event" @@ -31,7 +39,25 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -// This file contains keyspace utility functions +// This file contains keyspace utility functions. + +// Default concurrency to use in order to avoid overhwelming the topo server. +var DefaultConcurrency = 32 + +// shardKeySuffix is the suffix of a shard key. +// The full key looks like this: +// /vitess/global/keyspaces/customer/shards/80-/Shard +const shardKeySuffix = "Shard" + +func registerFlags(fs *pflag.FlagSet) { + fs.IntVar(&DefaultConcurrency, "topo_read_concurrency", DefaultConcurrency, "Concurrency of topo reads.") +} + +func init() { + servenv.OnParseFor("vtcombo", registerFlags) + servenv.OnParseFor("vtctld", registerFlags) + servenv.OnParseFor("vtgate", registerFlags) +} // KeyspaceInfo is a meta struct that contains metadata to give the // data more context and convenience. This is the main way we interact @@ -58,110 +84,6 @@ func ValidateKeyspaceName(name string) error { return validateObjectName(name) } -// GetServedFrom returns a Keyspace_ServedFrom record if it exists. -func (ki *KeyspaceInfo) GetServedFrom(tabletType topodatapb.TabletType) *topodatapb.Keyspace_ServedFrom { - for _, ksf := range ki.ServedFroms { - if ksf.TabletType == tabletType { - return ksf - } - } - return nil -} - -// CheckServedFromMigration makes sure a requested migration is safe -func (ki *KeyspaceInfo) CheckServedFromMigration(tabletType topodatapb.TabletType, cells []string, keyspace string, remove bool) error { - // primary is a special case with a few extra checks - if tabletType == topodatapb.TabletType_PRIMARY { - // TODO(deepthi): these master references will go away when we delete legacy resharding - if !remove { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot add master back to %v", ki.keyspace) - } - if len(cells) > 0 { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot migrate only some cells for master removal in keyspace %v", ki.keyspace) - } - if len(ki.ServedFroms) > 1 { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot migrate master into %v until everything else is migrated", ki.keyspace) - } - } - - // we can't remove a type we don't have - if ki.GetServedFrom(tabletType) == nil && remove { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "supplied type cannot be migrated") - } - - // check the keyspace is consistent in any case - for _, ksf := range ki.ServedFroms { - if ksf.Keyspace != keyspace { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "inconsistent keyspace specified in migration: %v != %v for type %v", keyspace, ksf.Keyspace, ksf.TabletType) - } - } - - return nil -} - -// UpdateServedFromMap handles ServedFromMap. It can add or remove -// records, cells, ... -func (ki *KeyspaceInfo) UpdateServedFromMap(tabletType topodatapb.TabletType, cells []string, keyspace string, remove bool, allCells []string) error { - // check parameters to be sure - if err := ki.CheckServedFromMigration(tabletType, cells, keyspace, remove); err != nil { - return err - } - - ksf := ki.GetServedFrom(tabletType) - if ksf == nil { - // the record doesn't exist - if remove { - if len(ki.ServedFroms) == 0 { - ki.ServedFroms = nil - } - log.Warningf("Trying to remove KeyspaceServedFrom for missing type %v in keyspace %v", tabletType, ki.keyspace) - } else { - ki.ServedFroms = append(ki.ServedFroms, &topodatapb.Keyspace_ServedFrom{ - TabletType: tabletType, - Cells: cells, - Keyspace: keyspace, - }) - } - return nil - } - - if remove { - result, emptyList := removeCells(ksf.Cells, cells, allCells) - if emptyList { - // we don't have any cell left, we need to clear this record - var newServedFroms []*topodatapb.Keyspace_ServedFrom - for _, k := range ki.ServedFroms { - if k != ksf { - newServedFroms = append(newServedFroms, k) - } - } - ki.ServedFroms = newServedFroms - } else { - ksf.Cells = result - } - } else { - if ksf.Keyspace != keyspace { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot UpdateServedFromMap on existing record for keyspace %v, different keyspace: %v != %v", ki.keyspace, ksf.Keyspace, keyspace) - } - ksf.Cells = addCells(ksf.Cells, cells) - } - return nil -} - -// ComputeCellServedFrom returns the ServedFrom list for a cell -func (ki *KeyspaceInfo) ComputeCellServedFrom(cell string) []*topodatapb.SrvKeyspace_ServedFrom { - var result []*topodatapb.SrvKeyspace_ServedFrom - for _, ksf := range ki.ServedFroms { - if InCellList(cell, ksf.Cells) { - result = append(result, &topodatapb.SrvKeyspace_ServedFrom{ - TabletType: ksf.TabletType, - Keyspace: ksf.Keyspace, - }) - } - } - return result -} - // CreateKeyspace wraps the underlying Conn.Create // and dispatches the event. func (ts *Server) CreateKeyspace(ctx context.Context, keyspace string, value *topodatapb.Keyspace) error { @@ -270,56 +192,171 @@ func (ts *Server) UpdateKeyspace(ctx context.Context, ki *KeyspaceInfo) error { return nil } -// FindAllShardsInKeyspace reads and returns all the existing shards in -// a keyspace. It doesn't take any lock. -func (ts *Server) FindAllShardsInKeyspace(ctx context.Context, keyspace string) (map[string]*ShardInfo, error) { - shards, err := ts.GetShardNames(ctx, keyspace) +// FindAllShardsInKeyspaceOptions controls the behavior of +// Server.FindAllShardsInKeyspace. +type FindAllShardsInKeyspaceOptions struct { + // Concurrency controls the maximum number of concurrent calls to GetShard. + // If <= 0, Concurrency is set to 1. + Concurrency int +} + +// FindAllShardsInKeyspace reads and returns all the existing shards in a +// keyspace. It doesn't take any lock. +// +// If opt is non-nil, it is used to configure the method's behavior. Otherwise, +// the default options are used. +func (ts *Server) FindAllShardsInKeyspace(ctx context.Context, keyspace string, opt *FindAllShardsInKeyspaceOptions) (map[string]*ShardInfo, error) { + // Apply any necessary defaults. + if opt == nil { + opt = &FindAllShardsInKeyspaceOptions{} + } + if opt.Concurrency <= 0 { + opt.Concurrency = DefaultConcurrency + } + + // Unescape the keyspace name as this can e.g. come from the VSchema where + // a keyspace/database name will need to be SQL escaped if it has special + // characters such as a dash. + keyspace, err := sqlescape.UnescapeID(keyspace) if err != nil { - return nil, vterrors.Wrapf(err, "failed to get list of shards for keyspace '%v'", keyspace) + return nil, vterrors.Wrapf(err, "FindAllShardsInKeyspace(%s) invalid keyspace name", keyspace) + } + + // First try to get all shards using List if we can. + buildResultFromList := func(kvpairs []KVInfo) (map[string]*ShardInfo, error) { + result := make(map[string]*ShardInfo, len(kvpairs)) + for _, entry := range kvpairs { + // The shard key looks like this: /vitess/global/keyspaces/commerce/shards/-80/Shard + shardKey := string(entry.Key) + // We don't want keys that aren't Shards. For example: + // /vitess/global/keyspaces/commerce/shards/0/locks/7587876423742065323 + // This example key can happen with Shards because you can get a shard + // lock in the topo via TopoServer.LockShard(). + if path.Base(shardKey) != shardKeySuffix { + continue + } + shardName := path.Base(path.Dir(shardKey)) // The base part of the dir is "-80" + // Validate the extracted shard name. + if _, _, err := ValidateShardName(shardName); err != nil { + return nil, vterrors.Wrapf(err, "FindAllShardsInKeyspace(%s): unexpected shard key/path %q contains invalid shard name/range %q", + keyspace, shardKey, shardName) + } + shard := &topodatapb.Shard{} + if err := shard.UnmarshalVT(entry.Value); err != nil { + return nil, vterrors.Wrapf(err, "FindAllShardsInKeyspace(%s): invalid data found for shard %q in %q", + keyspace, shardName, shardKey) + } + result[shardName] = &ShardInfo{ + keyspace: keyspace, + shardName: shardName, + version: entry.Version, + Shard: shard, + } + } + return result, nil + } + shardsPath := path.Join(KeyspacesPath, keyspace, ShardsPath) + listRes, err := ts.globalCell.List(ctx, shardsPath) + if err == nil { // We have everything we need to build the result + return buildResultFromList(listRes) + } + if IsErrType(err, NoNode) { + // The path doesn't exist, let's see if the keyspace exists. + if _, kerr := ts.GetKeyspace(ctx, keyspace); kerr != nil { + return nil, vterrors.Wrapf(err, "FindAllShardsInKeyspace(%s): List", keyspace) + } + // We simply have no shards. + return make(map[string]*ShardInfo, 0), nil + } + // Currently the ZooKeeper implementation does not support index prefix + // scans so we fall back to concurrently fetching the shards one by one. + // It is also possible that the response containing all shards is too + // large in which case we also fall back to the one by one fetch. + if !IsErrType(err, NoImplementation) && !IsErrType(err, ResourceExhausted) { + return nil, vterrors.Wrapf(err, "FindAllShardsInKeyspace(%s): List", keyspace) } - result := make(map[string]*ShardInfo, len(shards)) + // Fall back to the shard by shard method. + shards, err := ts.GetShardNames(ctx, keyspace) + if err != nil { + return nil, vterrors.Wrapf(err, "failed to get list of shard names for keyspace '%s'", keyspace) + } + + // Keyspaces with a large number of shards and geographically distributed + // topo instances may experience significant latency fetching shard records. + // + // A prior version of this logic used unbounded concurrency to fetch shard + // records which resulted in overwhelming topo server instances: + // https://github.com/vitessio/vitess/pull/5436. + // + // However, removing the concurrency altogether can cause large operations + // to fail due to timeout. The caller chooses the appropriate concurrency + // level so that certain paths can be optimized (such as vtctld + // RebuildKeyspace calls, which do not run on every vttablet). + var ( + mu sync.Mutex + result = make(map[string]*ShardInfo, len(shards)) + ) + + eg, ctx := errgroup.WithContext(ctx) + eg.SetLimit(int(opt.Concurrency)) + for _, shard := range shards { - si, err := ts.GetShard(ctx, keyspace, shard) - if err != nil { - if IsErrType(err, NoNode) { - log.Warningf("GetShard(%v, %v) returned ErrNoNode, consider checking the topology.", keyspace, shard) - } else { - return nil, vterrors.Wrapf(err, "GetShard(%v, %v) failed", keyspace, shard) + shard := shard + + eg.Go(func() error { + si, err := ts.GetShard(ctx, keyspace, shard) + switch { + case IsErrType(err, NoNode): + log.Warningf("GetShard(%s, %s) returned ErrNoNode, consider checking the topology.", keyspace, shard) + return nil + case err == nil: + mu.Lock() + result[shard] = si + mu.Unlock() + + return nil + default: + return vterrors.Wrapf(err, "GetShard(%s, %s) failed", keyspace, shard) } - } - result[shard] = si + }) } + + if err := eg.Wait(); err != nil { + return nil, err + } + return result, nil } // GetServingShards returns all shards where the primary is serving. func (ts *Server) GetServingShards(ctx context.Context, keyspace string) ([]*ShardInfo, error) { - shards, err := ts.GetShardNames(ctx, keyspace) + shards, err := ts.FindAllShardsInKeyspace(ctx, keyspace, nil) if err != nil { return nil, vterrors.Wrapf(err, "failed to get list of shards for keyspace '%v'", keyspace) } result := make([]*ShardInfo, 0, len(shards)) for _, shard := range shards { - si, err := ts.GetShard(ctx, keyspace, shard) - if err != nil { - return nil, vterrors.Wrapf(err, "GetShard(%v, %v) failed", keyspace, shard) - } - if !si.IsPrimaryServing { + if !shard.IsPrimaryServing { continue } - result = append(result, si) + result = append(result, shard) } if len(result) == 0 { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "%v has no serving shards", keyspace) } + // Sort the shards by KeyRange for deterministic results. + sort.Slice(result, func(i, j int) bool { + return key.KeyRangeLess(result[i].KeyRange, result[j].KeyRange) + }) + return result, nil } // GetOnlyShard returns the single ShardInfo of an unsharded keyspace. func (ts *Server) GetOnlyShard(ctx context.Context, keyspace string) (*ShardInfo, error) { - allShards, err := ts.FindAllShardsInKeyspace(ctx, keyspace) + allShards, err := ts.FindAllShardsInKeyspace(ctx, keyspace, nil) if err != nil { return nil, err } diff --git a/go/vt/topo/keyspace_external_test.go b/go/vt/topo/keyspace_external_test.go new file mode 100644 index 00000000000..bfcb2f591a9 --- /dev/null +++ b/go/vt/topo/keyspace_external_test.go @@ -0,0 +1,221 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topo_test + +import ( + "context" + "fmt" + "slices" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqlescape" + "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +func TestServerFindAllShardsInKeyspace(t *testing.T) { + const defaultKeyspace = "keyspace" + tests := []struct { + name string + shards int + keyspace string // If you want to override the default + opt *topo.FindAllShardsInKeyspaceOptions + }{ + { + name: "negative concurrency", + shards: 1, + // Ensure this doesn't panic. + opt: &topo.FindAllShardsInKeyspaceOptions{Concurrency: -1}, + }, + { + name: "unsharded", + shards: 1, + // Make sure the defaults apply as expected. + opt: nil, + }, + { + name: "sharded", + shards: 32, + opt: &topo.FindAllShardsInKeyspaceOptions{Concurrency: 8}, + }, + { + name: "SQL escaped keyspace", + shards: 32, + keyspace: "`my-keyspace`", + opt: &topo.FindAllShardsInKeyspaceOptions{Concurrency: 8}, + }, + } + + for _, tt := range tests { + keyspace := defaultKeyspace + if tt.keyspace != "" { + // Most calls such as CreateKeyspace will not accept invalid characters + // in the value so we'll only use the original test case value in + // FindAllShardsInKeyspace. This allows us to test and confirm that + // FindAllShardsInKeyspace can handle SQL escaped or backtick'd names. + keyspace, _ = sqlescape.UnescapeID(tt.keyspace) + } else { + tt.keyspace = defaultKeyspace + } + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := memorytopo.NewServer(ctx) + defer ts.Close() + + // Create an ephemeral keyspace and generate shard records within + // the keyspace to fetch later. + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{})) + + shards, err := key.GenerateShardRanges(tt.shards) + require.NoError(t, err) + + for _, s := range shards { + require.NoError(t, ts.CreateShard(ctx, keyspace, s)) + } + + // Verify that we return a complete list of shards and that each + // key range is present in the output. + out, err := ts.FindAllShardsInKeyspace(ctx, tt.keyspace, tt.opt) + require.NoError(t, err) + require.Len(t, out, tt.shards) + + for _, s := range shards { + if _, ok := out[s]; !ok { + t.Errorf("shard %q was not found", s) + } + } + }) + } +} + +func TestServerGetServingShards(t *testing.T) { + keyspace := "ks1" + errNoListImpl := topo.NewError(topo.NoImplementation, "don't be doing no listing round here") + + // This is needed because memorytopo doesn't implement locks using + // keys in the topo. So we simulate the behavior of other topo server + // implementations and how they implement TopoServer.LockShard(). + createSimulatedShardLock := func(ctx context.Context, ts *topo.Server, keyspace, shard string) error { + conn, err := ts.ConnForCell(ctx, topo.GlobalCell) + if err != nil { + return err + } + lockKey := fmt.Sprintf("keyspaces/%s/shards/%s/locks/1234", keyspace, shard) + _, err = conn.Create(ctx, lockKey, []byte("lock")) + return err + } + + tests := []struct { + shards int // Number of shards to create + err string // Error message we expect, if any + fallback bool // Should we fallback to the shard by shard method + }{ + { + shards: 0, + err: fmt.Sprintf("%s has no serving shards", keyspace), + }, + { + shards: 2, + }, + { + shards: 128, + }, + { + shards: 512, + fallback: true, + }, + { + shards: 1024, + }, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("%d shards with fallback = %t", tt.shards, tt.fallback), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, factory := memorytopo.NewServerAndFactory(ctx) + defer ts.Close() + stats := factory.GetCallStats() + require.NotNil(t, stats) + + if tt.fallback { + factory.AddOperationError(memorytopo.List, ".*", errNoListImpl) + } + + err := ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}) + require.NoError(t, err) + var shardNames []string + if tt.shards > 0 { + shardNames, err = key.GenerateShardRanges(tt.shards) + require.NoError(t, err) + require.Equal(t, tt.shards, len(shardNames)) + for _, shardName := range shardNames { + err = ts.CreateShard(ctx, keyspace, shardName) + require.NoError(t, err) + } + // A shard lock typically becomes a key in the topo like this: + // /vitess/global/keyspaces//shards//locks/XXXX + // We want to confirm that this key is ignored when building + // the results. + err = createSimulatedShardLock(ctx, ts, keyspace, shardNames[0]) + require.NoError(t, err) + } + + // Verify that we return a complete list of shards and that each + // key range is present in the output. + stats.ResetAll() // We only want the stats for GetServingShards + shardInfos, err := ts.GetServingShards(ctx, keyspace) + if tt.err != "" { + require.EqualError(t, err, tt.err) + return + } + require.NoError(t, err) + require.Len(t, shardInfos, tt.shards) + for _, shardName := range shardNames { + f := func(si *topo.ShardInfo) bool { + return key.KeyRangeString(si.Shard.KeyRange) == shardName + } + require.True(t, slices.ContainsFunc(shardInfos, f), "shard %q was not found in the results", + shardName) + } + + // Now we check the stats based on the number of shards and whether or not + // we should have had a List error and fell back to the shard by shard method. + callcounts := stats.Counts() + require.NotNil(t, callcounts) + require.Equal(t, int64(1), callcounts["List"]) // We should always try + switch { + case tt.fallback: // We get the shards one by one from the list + require.Equal(t, int64(1), callcounts["ListDir"]) // GetShardNames + require.Equal(t, int64(tt.shards), callcounts["Get"]) // GetShard + case tt.shards < 1: // We use a Get to check that the keyspace exists + require.Equal(t, int64(0), callcounts["ListDir"]) + require.Equal(t, int64(1), callcounts["Get"]) + default: // We should not make any ListDir or Get calls + require.Equal(t, int64(0), callcounts["ListDir"]) + require.Equal(t, int64(0), callcounts["Get"]) + } + }) + } +} diff --git a/go/vt/topo/keyspace_test.go b/go/vt/topo/keyspace_test.go deleted file mode 100644 index 1abf873b8e0..00000000000 --- a/go/vt/topo/keyspace_test.go +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package topo - -import ( - "reflect" - "testing" - - topodatapb "vitess.io/vitess/go/vt/proto/topodata" -) - -// This file tests the keyspace related object functionalities. - -func TestUpdateServedFromMap(t *testing.T) { - // TODO(deepthi): delete this test once legacy resharding code is deleted - ki := &KeyspaceInfo{ - keyspace: "ks", - version: nil, - Keyspace: &topodatapb.Keyspace{ - ServedFroms: []*topodatapb.Keyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_RDONLY, - Cells: nil, - Keyspace: "source", - }, - { - TabletType: topodatapb.TabletType_PRIMARY, - Cells: nil, - Keyspace: "source", - }, - }, - }, - } - allCells := []string{"first", "second", "third"} - - // migrate one cell - if err := ki.UpdateServedFromMap(topodatapb.TabletType_RDONLY, []string{"first"}, "source", true, allCells); err != nil || !reflect.DeepEqual(ki.ServedFroms, []*topodatapb.Keyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_RDONLY, - Cells: []string{"second", "third"}, - Keyspace: "source", - }, - { - TabletType: topodatapb.TabletType_PRIMARY, - Cells: nil, - Keyspace: "source", - }, - }) { - t.Fatalf("one cell add failed: %v", ki) - } - - // re-add that cell, going back - if err := ki.UpdateServedFromMap(topodatapb.TabletType_RDONLY, []string{"first"}, "source", false, nil); err != nil || !reflect.DeepEqual(ki.ServedFroms, []*topodatapb.Keyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_RDONLY, - Cells: []string{"second", "third", "first"}, - Keyspace: "source", - }, - { - TabletType: topodatapb.TabletType_PRIMARY, - Cells: nil, - Keyspace: "source", - }, - }) { - t.Fatalf("going back should have remove the record: %#v", ki.Keyspace.ServedFroms) - } - - // now remove the cell again - if err := ki.UpdateServedFromMap(topodatapb.TabletType_RDONLY, []string{"first"}, "source", true, allCells); err != nil || !reflect.DeepEqual(ki.ServedFroms, []*topodatapb.Keyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_RDONLY, - Cells: []string{"second", "third"}, - Keyspace: "source", - }, - { - TabletType: topodatapb.TabletType_PRIMARY, - Cells: nil, - Keyspace: "source", - }, - }) { - t.Fatalf("one cell add failed: %v", ki) - } - - // couple error cases - if err := ki.UpdateServedFromMap(topodatapb.TabletType_RDONLY, []string{"second"}, "othersource", true, allCells); err == nil || (err.Error() != "inconsistent keyspace specified in migration: othersource != source for type MASTER" && err.Error() != "inconsistent keyspace specified in migration: othersource != source for type RDONLY") { - t.Fatalf("different keyspace should fail: %v", err) - } - if err := ki.UpdateServedFromMap(topodatapb.TabletType_PRIMARY, nil, "source", true, allCells); err == nil || err.Error() != "cannot migrate master into ks until everything else is migrated" { - t.Fatalf("migrate the master early should have failed: %v", err) - } - - // now remove all cells - if err := ki.UpdateServedFromMap(topodatapb.TabletType_RDONLY, []string{"second", "third"}, "source", true, allCells); err != nil || !reflect.DeepEqual(ki.ServedFroms, []*topodatapb.Keyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_PRIMARY, - Cells: nil, - Keyspace: "source", - }, - }) { - t.Fatalf("remove all cells failed: %v", ki) - } - if err := ki.UpdateServedFromMap(topodatapb.TabletType_RDONLY, nil, "source", true, allCells); err == nil || err.Error() != "supplied type cannot be migrated" { - t.Fatalf("migrate rdonly again should have failed: %v", err) - } - - // finally migrate the primary - if err := ki.UpdateServedFromMap(topodatapb.TabletType_PRIMARY, []string{"second"}, "source", true, allCells); err == nil || err.Error() != "cannot migrate only some cells for master removal in keyspace ks" { - t.Fatalf("migrate master with cells should have failed: %v", err) - } - if err := ki.UpdateServedFromMap(topodatapb.TabletType_PRIMARY, nil, "source", true, allCells); err != nil || ki.ServedFroms != nil { - t.Fatalf("migrate the master failed: %v", ki) - } - - // error case again - if err := ki.UpdateServedFromMap(topodatapb.TabletType_PRIMARY, nil, "source", true, allCells); err == nil || err.Error() != "supplied type cannot be migrated" { - t.Fatalf("migrate the master again should have failed: %v", err) - } -} - -func TestComputeCellServedFrom(t *testing.T) { - ki := &KeyspaceInfo{ - keyspace: "ks", - version: nil, - Keyspace: &topodatapb.Keyspace{ - ServedFroms: []*topodatapb.Keyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_PRIMARY, - Cells: nil, - Keyspace: "source", - }, - { - TabletType: topodatapb.TabletType_REPLICA, - Cells: []string{"c1", "c2"}, - Keyspace: "source", - }, - }, - }, - } - - m := ki.ComputeCellServedFrom("c3") - if !reflect.DeepEqual(m, []*topodatapb.SrvKeyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_PRIMARY, - Keyspace: "source", - }, - }) { - t.Fatalf("c3 failed: %v", m) - } - - m = ki.ComputeCellServedFrom("c2") - if !reflect.DeepEqual(m, []*topodatapb.SrvKeyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_PRIMARY, - Keyspace: "source", - }, - { - TabletType: topodatapb.TabletType_REPLICA, - Keyspace: "source", - }, - }) { - t.Fatalf("c2 failed: %v", m) - } -} diff --git a/go/vt/topo/locks.go b/go/vt/topo/locks.go index 8d30d85e891..6325124c429 100644 --- a/go/vt/topo/locks.go +++ b/go/vt/topo/locks.go @@ -127,21 +127,6 @@ var locksKey locksKeyType // - a context with a locksInfo structure for future reference. // - an unlock method // - an error if anything failed. -// -// We lock a keyspace for the following operations to be guaranteed -// exclusive operation: -// * changing a keyspace sharding info fields (is this one necessary?) -// * changing a keyspace 'ServedFrom' field (is this one necessary?) -// * resharding operations: -// - horizontal resharding: includes changing the shard's 'ServedType', -// as well as the associated horizontal resharding operations. -// - vertical resharding: includes changing the keyspace 'ServedFrom' -// field, as well as the associated vertical resharding operations. -// - 'vtctl SetShardIsPrimaryServing' emergency operations -// - 'vtctl SetShardTabletControl' emergency operations -// - 'vtctl SourceShardAdd' and 'vtctl SourceShardDelete' emergency operations -// -// * keyspace-wide schema changes func (ts *Server) LockKeyspace(ctx context.Context, keyspace, action string) (context.Context, func(*error), error) { i, ok := ctx.Value(locksKey).(*locksInfo) if !ok { diff --git a/go/vt/topo/memorytopo/directory.go b/go/vt/topo/memorytopo/directory.go index f68c87a2166..b8fa11a9d52 100644 --- a/go/vt/topo/memorytopo/directory.go +++ b/go/vt/topo/memorytopo/directory.go @@ -27,6 +27,8 @@ import ( // ListDir is part of the topo.Conn interface. func (c *Conn) ListDir(ctx context.Context, dirPath string, full bool) ([]topo.DirEntry, error) { + c.factory.callstats.Add([]string{"ListDir"}, 1) + if err := c.dial(ctx); err != nil { return nil, err } @@ -37,6 +39,9 @@ func (c *Conn) ListDir(ctx context.Context, dirPath string, full bool) ([]topo.D if c.factory.err != nil { return nil, c.factory.err } + if err := c.factory.getOperationError(ListDir, dirPath); err != nil { + return nil, err + } isRoot := false if dirPath == "" || dirPath == "/" { diff --git a/go/vt/topo/memorytopo/election.go b/go/vt/topo/memorytopo/election.go index 868a2c53287..1b6d2292f5c 100644 --- a/go/vt/topo/memorytopo/election.go +++ b/go/vt/topo/memorytopo/election.go @@ -24,15 +24,21 @@ import ( "vitess.io/vitess/go/vt/topo" ) -// NewLeaderParticipation is part of the topo.Server interface +// NewLeaderParticipation is part of the topo.Conn interface. func (c *Conn) NewLeaderParticipation(name, id string) (topo.LeaderParticipation, error) { - if c.closed { + c.factory.callstats.Add([]string{"NewLeaderParticipation"}, 1) + + if c.closed.Load() { return nil, ErrConnectionClosed } c.factory.mu.Lock() defer c.factory.mu.Unlock() + if err := c.factory.getOperationError(NewLeaderParticipation, id); err != nil { + return nil, err + } + // Make sure the global path exists. electionPath := path.Join(electionsPath, name) if n := c.factory.getOrCreatePath(c.cell, electionPath); n == nil { @@ -72,7 +78,7 @@ type cLeaderParticipation struct { // WaitForLeadership is part of the topo.LeaderParticipation interface. func (mp *cLeaderParticipation) WaitForLeadership() (context.Context, error) { - if mp.c.closed { + if mp.c.closed.Load() { return nil, ErrConnectionClosed } @@ -120,7 +126,7 @@ func (mp *cLeaderParticipation) Stop() { // GetCurrentLeaderID is part of the topo.LeaderParticipation interface func (mp *cLeaderParticipation) GetCurrentLeaderID(ctx context.Context) (string, error) { - if mp.c.closed { + if mp.c.closed.Load() { return "", ErrConnectionClosed } @@ -139,7 +145,7 @@ func (mp *cLeaderParticipation) GetCurrentLeaderID(ctx context.Context) (string, // WaitForNewLeader is part of the topo.LeaderParticipation interface func (mp *cLeaderParticipation) WaitForNewLeader(ctx context.Context) (<-chan string, error) { - if mp.c.closed { + if mp.c.closed.Load() { return nil, ErrConnectionClosed } diff --git a/go/vt/topo/memorytopo/file.go b/go/vt/topo/memorytopo/file.go index 0007203799f..86722477e53 100644 --- a/go/vt/topo/memorytopo/file.go +++ b/go/vt/topo/memorytopo/file.go @@ -30,6 +30,8 @@ import ( // Create is part of topo.Conn interface. func (c *Conn) Create(ctx context.Context, filePath string, contents []byte) (topo.Version, error) { + c.factory.callstats.Add([]string{"Create"}, 1) + if err := c.dial(ctx); err != nil { return nil, err } @@ -44,6 +46,9 @@ func (c *Conn) Create(ctx context.Context, filePath string, contents []byte) (to if c.factory.err != nil { return nil, c.factory.err } + if err := c.factory.getOperationError(Create, filePath); err != nil { + return nil, err + } // Get the parent dir. dir, file := path.Split(filePath) @@ -74,6 +79,8 @@ func (c *Conn) Create(ctx context.Context, filePath string, contents []byte) (to // Update is part of topo.Conn interface. func (c *Conn) Update(ctx context.Context, filePath string, contents []byte, version topo.Version) (topo.Version, error) { + c.factory.callstats.Add([]string{"Update"}, 1) + if err := c.dial(ctx); err != nil { return nil, err } @@ -88,6 +95,9 @@ func (c *Conn) Update(ctx context.Context, filePath string, contents []byte, ver if c.factory.err != nil { return nil, c.factory.err } + if err := c.factory.getOperationError(Update, filePath); err != nil { + return nil, err + } // Get the parent dir, we'll need it in case of creation. dir, file := path.Split(filePath) @@ -152,6 +162,8 @@ func (c *Conn) Update(ctx context.Context, filePath string, contents []byte, ver // Get is part of topo.Conn interface. func (c *Conn) Get(ctx context.Context, filePath string) ([]byte, topo.Version, error) { + c.factory.callstats.Add([]string{"Get"}, 1) + if err := c.dial(ctx); err != nil { return nil, nil, err } @@ -162,6 +174,9 @@ func (c *Conn) Get(ctx context.Context, filePath string) ([]byte, topo.Version, if c.factory.err != nil { return nil, nil, c.factory.err } + if err := c.factory.getOperationError(Get, filePath); err != nil { + return nil, nil, err + } // Get the node. n := c.factory.nodeByPath(c.cell, filePath) @@ -177,6 +192,8 @@ func (c *Conn) Get(ctx context.Context, filePath string) ([]byte, topo.Version, // List is part of the topo.Conn interface. func (c *Conn) List(ctx context.Context, filePathPrefix string) ([]topo.KVInfo, error) { + c.factory.callstats.Add([]string{"List"}, 1) + if err := c.dial(ctx); err != nil { return nil, err } @@ -187,6 +204,9 @@ func (c *Conn) List(ctx context.Context, filePathPrefix string) ([]topo.KVInfo, if c.factory.err != nil { return nil, c.factory.err } + if err := c.factory.getOperationError(List, filePathPrefix); err != nil { + return nil, err + } dir, file := path.Split(filePathPrefix) // Get the node to list. @@ -236,6 +256,8 @@ func gatherChildren(n *node, dirPath string) []topo.KVInfo { // Delete is part of topo.Conn interface. func (c *Conn) Delete(ctx context.Context, filePath string, version topo.Version) error { + c.factory.callstats.Add([]string{"Delete"}, 1) + if err := c.dial(ctx); err != nil { return err } @@ -246,6 +268,9 @@ func (c *Conn) Delete(ctx context.Context, filePath string, version topo.Version if c.factory.err != nil { return c.factory.err } + if err := c.factory.getOperationError(Delete, filePath); err != nil { + return err + } // Get the parent dir. dir, file := path.Split(filePath) diff --git a/go/vt/topo/memorytopo/lock.go b/go/vt/topo/memorytopo/lock.go index c15fb9099bb..d0943c7058d 100644 --- a/go/vt/topo/memorytopo/lock.go +++ b/go/vt/topo/memorytopo/lock.go @@ -42,11 +42,29 @@ type memoryTopoLockDescriptor struct { // TryLock is part of the topo.Conn interface. Its implementation is same as Lock func (c *Conn) TryLock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { + c.factory.callstats.Add([]string{"TryLock"}, 1) + + c.factory.mu.Lock() + err := c.factory.getOperationError(TryLock, dirPath) + c.factory.mu.Unlock() + if err != nil { + return nil, err + } + return c.Lock(ctx, dirPath, contents) } // Lock is part of the topo.Conn interface. func (c *Conn) Lock(ctx context.Context, dirPath, contents string) (topo.LockDescriptor, error) { + c.factory.callstats.Add([]string{"Lock"}, 1) + + c.factory.mu.Lock() + err := c.factory.getOperationError(Lock, dirPath) + c.factory.mu.Unlock() + if err != nil { + return nil, err + } + return c.lock(ctx, dirPath, contents) } @@ -112,7 +130,7 @@ func (ld *memoryTopoLockDescriptor) Unlock(ctx context.Context) error { } func (c *Conn) unlock(ctx context.Context, dirPath string) error { - if c.closed { + if c.closed.Load() { return ErrConnectionClosed } diff --git a/go/vt/topo/memorytopo/memorytopo.go b/go/vt/topo/memorytopo/memorytopo.go index f24b2f6c89e..9d703a2869a 100644 --- a/go/vt/topo/memorytopo/memorytopo.go +++ b/go/vt/topo/memorytopo/memorytopo.go @@ -22,10 +22,13 @@ package memorytopo import ( "context" "errors" - "math/rand" + "math/rand/v2" + "regexp" "strings" "sync" + "sync/atomic" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" @@ -48,6 +51,25 @@ const ( UnreachableServerAddr = "unreachable" ) +// Operation is one of the operations defined by topo.Conn +type Operation int + +// The following is the list of topo.Conn operations +const ( + ListDir = Operation(iota) + Create + Update + Get + List + Delete + Lock + TryLock + Watch + WatchRecursive + NewLeaderParticipation + Close +) + // Factory is a memory-based implementation of topo.Factory. It // takes a file-system like approach, with directories at each level // being an actual directory node. This is meant to be closer to @@ -70,6 +92,18 @@ type Factory struct { // err is used for testing purposes to force queries / watches // to return the given error err error + // operationErrors is used for testing purposes to fake errors from + // operations and paths matching the spec + operationErrors map[Operation][]errorSpec + // callstats allows us to keep track of how many topo.Conn calls + // we make (Create, Get, Update, Delete, List, ListDir, etc). + callstats *stats.CountersWithMultiLabels +} + +type errorSpec struct { + op Operation + pathPattern *regexp.Regexp + err error } // HasGlobalReadOnlyCell is part of the topo.Factory interface. @@ -105,6 +139,10 @@ func (f *Factory) SetError(err error) { } } +func (f *Factory) GetCallStats() *stats.CountersWithMultiLabels { + return f.callstats +} + // Lock blocks all requests to the topo and is exposed to allow tests to // simulate an unresponsive topo server func (f *Factory) Lock() { @@ -123,13 +161,13 @@ type Conn struct { factory *Factory cell string serverAddr string - closed bool + closed atomic.Bool } // dial returns immediately, unless the Conn points to the sentinel // UnreachableServerAddr, in which case it will block until the context expires. func (c *Conn) dial(ctx context.Context) error { - if c.closed { + if c.closed.Load() { return ErrConnectionClosed } if c.serverAddr == UnreachableServerAddr { @@ -141,7 +179,8 @@ func (c *Conn) dial(ctx context.Context) error { // Close is part of the topo.Conn interface. func (c *Conn) Close() { - c.closed = true + c.factory.callstats.Add([]string{"Close"}, 1) + c.closed.Store(true) } type watch struct { @@ -235,8 +274,10 @@ func (n *node) PropagateWatchError(err error) { // in case of a problem. func NewServerAndFactory(ctx context.Context, cells ...string) (*topo.Server, *Factory) { f := &Factory{ - cells: make(map[string]*node), - generation: uint64(rand.Int63n(1 << 60)), + cells: make(map[string]*node), + generation: uint64(rand.Int64N(1 << 60)), + callstats: stats.NewCountersWithMultiLabels("", "", []string{"Call"}), + operationErrors: make(map[Operation][]errorSpec), } f.cells[topo.GlobalCell] = f.newDirectory(topo.GlobalCell, nil) @@ -348,3 +389,24 @@ func (f *Factory) recursiveDelete(n *node) { f.recursiveDelete(parent) } } + +func (f *Factory) AddOperationError(op Operation, pathPattern string, err error) { + f.mu.Lock() + defer f.mu.Unlock() + + f.operationErrors[op] = append(f.operationErrors[op], errorSpec{ + op: op, + pathPattern: regexp.MustCompile(pathPattern), + err: err, + }) +} + +func (f *Factory) getOperationError(op Operation, path string) error { + specs := f.operationErrors[op] + for _, spec := range specs { + if spec.pathPattern.MatchString(path) { + return spec.err + } + } + return nil +} diff --git a/go/vt/topo/memorytopo/version.go b/go/vt/topo/memorytopo/version.go index 468ef6f2110..0cf8ab098d1 100644 --- a/go/vt/topo/memorytopo/version.go +++ b/go/vt/topo/memorytopo/version.go @@ -18,8 +18,6 @@ package memorytopo import ( "fmt" - - "vitess.io/vitess/go/vt/topo" ) // NodeVersion is the local topo.Version implementation @@ -28,13 +26,3 @@ type NodeVersion uint64 func (v NodeVersion) String() string { return fmt.Sprintf("%v", uint64(v)) } - -// VersionFromInt is used by old-style functions to create a proper -// Version: if version is -1, returns nil. Otherwise returns the -// NodeVersion object. -func VersionFromInt(version int64) topo.Version { - if version == -1 { - return nil - } - return NodeVersion(version) -} diff --git a/go/vt/topo/memorytopo/watch.go b/go/vt/topo/memorytopo/watch.go index 73b2d248434..dcb90a8f0ef 100644 --- a/go/vt/topo/memorytopo/watch.go +++ b/go/vt/topo/memorytopo/watch.go @@ -25,7 +25,9 @@ import ( // Watch is part of the topo.Conn interface. func (c *Conn) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-chan *topo.WatchData, error) { - if c.closed { + c.factory.callstats.Add([]string{"Watch"}, 1) + + if c.closed.Load() { return nil, nil, ErrConnectionClosed } @@ -35,6 +37,9 @@ func (c *Conn) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-c if c.factory.err != nil { return nil, nil, c.factory.err } + if err := c.factory.getOperationError(Watch, filePath); err != nil { + return nil, nil, err + } n := c.factory.nodeByPath(c.cell, filePath) if n == nil { @@ -75,7 +80,9 @@ func (c *Conn) Watch(ctx context.Context, filePath string) (*topo.WatchData, <-c // WatchRecursive is part of the topo.Conn interface. func (c *Conn) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.WatchDataRecursive, <-chan *topo.WatchDataRecursive, error) { - if c.closed { + c.factory.callstats.Add([]string{"WatchRecursive"}, 1) + + if c.closed.Load() { return nil, nil, ErrConnectionClosed } @@ -85,6 +92,9 @@ func (c *Conn) WatchRecursive(ctx context.Context, dirpath string) ([]*topo.Watc if c.factory.err != nil { return nil, nil, c.factory.err } + if err := c.factory.getOperationError(WatchRecursive, dirpath); err != nil { + return nil, nil, err + } n := c.factory.getOrCreatePath(c.cell, dirpath) if n == nil { diff --git a/go/vt/topo/routing_rules_lock.go b/go/vt/topo/routing_rules_lock.go new file mode 100644 index 00000000000..db4fa63bc9b --- /dev/null +++ b/go/vt/topo/routing_rules_lock.go @@ -0,0 +1,37 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topo + +import ( + "context" + "fmt" +) + +// RoutingRulesLock is a wrapper over TopoLock, to serialize updates to routing rules. +type RoutingRulesLock struct { + *TopoLock +} + +func NewRoutingRulesLock(ctx context.Context, ts *Server, name string) (*RoutingRulesLock, error) { + return &RoutingRulesLock{ + TopoLock: &TopoLock{ + Path: RoutingRulesPath, + Name: fmt.Sprintf("RoutingRules::%s", name), + ts: ts, + }, + }, nil +} diff --git a/go/vt/topo/routing_rules_lock_test.go b/go/vt/topo/routing_rules_lock_test.go new file mode 100644 index 00000000000..23027517019 --- /dev/null +++ b/go/vt/topo/routing_rules_lock_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topo_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" +) + +// TestKeyspaceRoutingRulesLock tests that the lock is acquired and released correctly. +func TestKeyspaceRoutingRulesLock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + currentTopoLockTimeout := topo.LockTimeout + topo.LockTimeout = testLockTimeout + defer func() { + topo.LockTimeout = currentTopoLockTimeout + }() + + err := ts.CreateKeyspaceRoutingRules(ctx, &vschemapb.KeyspaceRoutingRules{}) + require.NoError(t, err) + + lock, err := topo.NewRoutingRulesLock(ctx, ts, "ks1") + require.NoError(t, err) + _, unlock, err := lock.Lock(ctx) + require.NoError(t, err) + + // re-acquiring the lock should fail + _, _, err = lock.Lock(ctx) + require.Error(t, err) + + unlock(&err) + + // re-acquiring the lock should succeed + _, _, err = lock.Lock(ctx) + require.NoError(t, err) +} diff --git a/go/vt/topo/server.go b/go/vt/topo/server.go index 1995e8b6ec4..6e5fadd8b97 100644 --- a/go/vt/topo/server.go +++ b/go/vt/topo/server.go @@ -45,6 +45,7 @@ package topo import ( "context" "fmt" + "path" "sync" "github.com/spf13/pflag" @@ -67,29 +68,32 @@ const ( // Filenames for all object types. const ( - CellInfoFile = "CellInfo" - CellsAliasFile = "CellsAlias" - KeyspaceFile = "Keyspace" - ShardFile = "Shard" - VSchemaFile = "VSchema" - ShardReplicationFile = "ShardReplication" - TabletFile = "Tablet" - SrvVSchemaFile = "SrvVSchema" - SrvKeyspaceFile = "SrvKeyspace" - RoutingRulesFile = "RoutingRules" - ExternalClustersFile = "ExternalClusters" - ShardRoutingRulesFile = "ShardRoutingRules" + CellInfoFile = "CellInfo" + CellsAliasFile = "CellsAlias" + KeyspaceFile = "Keyspace" + ShardFile = "Shard" + VSchemaFile = "VSchema" + ShardReplicationFile = "ShardReplication" + TabletFile = "Tablet" + SrvVSchemaFile = "SrvVSchema" + SrvKeyspaceFile = "SrvKeyspace" + RoutingRulesFile = "RoutingRules" + ExternalClustersFile = "ExternalClusters" + ShardRoutingRulesFile = "ShardRoutingRules" + CommonRoutingRulesFile = "Rules" ) // Path for all object types. const ( - CellsPath = "cells" - CellsAliasesPath = "cells_aliases" - KeyspacesPath = "keyspaces" - ShardsPath = "shards" - TabletsPath = "tablets" - MetadataPath = "metadata" - ExternalClusterVitess = "vitess" + CellsPath = "cells" + CellsAliasesPath = "cells_aliases" + KeyspacesPath = "keyspaces" + ShardsPath = "shards" + TabletsPath = "tablets" + MetadataPath = "metadata" + ExternalClusterVitess = "vitess" + RoutingRulesPath = "routing_rules" + KeyspaceRoutingRulesPath = "keyspace" ) // Factory is a factory interface to create Conn objects. @@ -418,3 +422,8 @@ func (ts *Server) IsReadOnly() (bool, error) { return true, nil } + +// GetKeyspaceRoutingRulesPath returns the path to the keyspace routing rules file in the topo. +func (ts *Server) GetKeyspaceRoutingRulesPath() string { + return path.Join(RoutingRulesPath, KeyspaceRoutingRulesPath, CommonRoutingRulesFile) +} diff --git a/go/vt/topo/shard.go b/go/vt/topo/shard.go index 183ed409bbb..b9554bf789f 100644 --- a/go/vt/topo/shard.go +++ b/go/vt/topo/shard.go @@ -77,39 +77,6 @@ func removeCellsFromList(toRemove, fullList []string) []string { return leftoverCells } -// removeCells will remove the cells from the provided list. It returns -// the new list, and a boolean that indicates the returned list is empty. -func removeCells(cells, toRemove, fullList []string) ([]string, bool) { - // The assumption here is we already migrated something, - // and we're reverting that part. So we're gonna remove - // records only. - leftoverCells := make([]string, 0, len(cells)) - if len(cells) == 0 { - // we migrated all the cells already, take the full list - // and remove all the ones we're not reverting - for _, cell := range fullList { - if !InCellList(cell, toRemove) { - leftoverCells = append(leftoverCells, cell) - } - } - } else { - // we migrated a subset of the cells, - // remove the ones we're reverting - for _, cell := range cells { - if !InCellList(cell, toRemove) { - leftoverCells = append(leftoverCells, cell) - } - } - } - - if len(leftoverCells) == 0 { - // we don't have any cell left, we need to clear this record - return nil, true - } - - return leftoverCells, false -} - // IsShardUsingRangeBasedSharding returns true if the shard name // implies it is using range based sharding. func IsShardUsingRangeBasedSharding(shard string) bool { @@ -223,12 +190,7 @@ func (ts *Server) GetShard(ctx context.Context, keyspace, shard string) (*ShardI if err = value.UnmarshalVT(data); err != nil { return nil, vterrors.Wrapf(err, "GetShard(%v,%v): bad shard data", keyspace, shard) } - return &ShardInfo{ - keyspace: keyspace, - shardName: shard, - version: version, - Shard: value, - }, nil + return NewShardInfo(keyspace, shard, value, version), nil } // updateShard updates the shard data, with the right version. @@ -314,7 +276,14 @@ func (ts *Server) CreateShard(ctx context.Context, keyspace, shard string) (err // Set primary as serving only if its keyrange doesn't overlap // with other shards. This applies to unsharded keyspaces also value.IsPrimaryServing = true - sis, err := ts.FindAllShardsInKeyspace(ctx, keyspace) + sis, err := ts.FindAllShardsInKeyspace(ctx, keyspace, &FindAllShardsInKeyspaceOptions{ + // Assume that CreateShard may be called by many vttablets concurrently + // in a large, sharded keyspace. Do not apply concurrency to avoid + // overwhelming the toposerver. + // + // See: https://github.com/vitessio/vitess/pull/5436. + Concurrency: 1, + }) if err != nil && !IsErrType(err, NoNode) { return err } @@ -659,7 +628,7 @@ func (ts *Server) GetTabletMapForShardByCell(ctx context.Context, keyspace, shar // get the tablets for the cells we were able to reach, forward // ErrPartialResult from FindAllTabletAliasesInShard - result, gerr := ts.GetTabletMap(ctx, aliases) + result, gerr := ts.GetTabletMap(ctx, aliases, nil) if gerr == nil && err != nil { gerr = err } diff --git a/go/vt/topo/shard_test.go b/go/vt/topo/shard_test.go index 2c0b9082816..9afc8d0ea78 100644 --- a/go/vt/topo/shard_test.go +++ b/go/vt/topo/shard_test.go @@ -77,24 +77,6 @@ func TestRemoveCellsFromList(t *testing.T) { } } -func TestRemoveCells(t *testing.T) { - var cells []string - allCells := []string{"first", "second", "third"} - - // remove from empty list should return allCells - what we remove - var emptyResult bool - cells, emptyResult = removeCells(cells, []string{"second"}, allCells) - if emptyResult || !reflect.DeepEqual(cells, []string{"first", "third"}) { - t.Fatalf("removeCells(full)-second failed: got %v", cells) - } - - // removethe next two cells, should return empty list - cells, emptyResult = removeCells(cells, []string{"first", "third"}, allCells) - if !emptyResult { - t.Fatalf("removeCells(full)-first-third is not empty: %v", cells) - } -} - func lockedKeyspaceContext(keyspace string) context.Context { ctx := context.Background() return context.WithValue(ctx, locksKey, &locksInfo{ @@ -158,69 +140,148 @@ func TestUpdateSourcePrimaryDeniedTables(t *testing.T) { func TestUpdateSourceDeniedTables(t *testing.T) { si := NewShardInfo("ks", "sh", &topodatapb.Shard{}, nil) - - // check we enforce the keyspace lock ctx := context.Background() - if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, nil, false, nil); err == nil || err.Error() != "keyspace ks is not locked (no locksInfo)" { - t.Fatalf("unlocked keyspace produced wrong error: %v", err) + ctxWithLock := lockedKeyspaceContext("ks") + + type testCase struct { + name string + ctx context.Context + tabletType topodatapb.TabletType + cells []string + remove bool + tables []string + + wantError string + wantTabletControl *topodatapb.Shard_TabletControl } - ctx = lockedKeyspaceContext("ks") - // add one cell - if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + // These tests update the state of the shard tablet controls, so subsequent tests + // depend on the cumulative state from the previous tests. + testCases := []testCase{ { - TabletType: topodatapb.TabletType_RDONLY, - Cells: []string{"first"}, - DeniedTables: []string{"t1", "t2"}, - }, - }) { - t.Fatalf("one cell add failed: %v", si) - } - - // remove that cell, going back - if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, true, nil); err != nil || len(si.TabletControls) != 0 { - t.Fatalf("going back should have remove the record: %v", si) - } + name: "enforce keyspace lock", + ctx: ctx, + tabletType: topodatapb.TabletType_RDONLY, + cells: []string{"first"}, - // re-add a cell, then another with different table list to - // make sure it fails - if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first"}, false, []string{"t1", "t2"}); err != nil { - t.Fatalf("one cell add failed: %v", si) - } - if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t2", "t3"}); err == nil || err.Error() != "trying to use two different sets of denied tables for shard ks/sh: [t1 t2] and [t2 t3]" { - t.Fatalf("different table list should fail: %v", err) - } - // add another cell, see the list grow - if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ + wantError: "keyspace ks is not locked (no locksInfo)", + }, { - TabletType: topodatapb.TabletType_RDONLY, - Cells: []string{"first", "second"}, - DeniedTables: []string{"t1", "t2"}, + name: "add one cell", + tabletType: topodatapb.TabletType_RDONLY, + cells: []string{"first"}, + tables: []string{"t1", "t2"}, + wantTabletControl: &topodatapb.Shard_TabletControl{ + TabletType: topodatapb.TabletType_RDONLY, + Cells: []string{"first"}, + DeniedTables: []string{"t1", "t2"}, + }, }, - }) { - t.Fatalf("second cell add failed: %v", si) - } - - // add all cells, see the list grow to all - if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"first", "second", "third"}, false, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { - TabletType: topodatapb.TabletType_RDONLY, - Cells: []string{"first", "second", "third"}, - DeniedTables: []string{"t1", "t2"}, + name: "remove the only cell", + tabletType: topodatapb.TabletType_RDONLY, + cells: []string{"first"}, + remove: true, + }, + { + name: "re-add cell", + tabletType: topodatapb.TabletType_RDONLY, + cells: []string{"first"}, + tables: []string{"t1", "t2"}, + wantTabletControl: &topodatapb.Shard_TabletControl{ + TabletType: topodatapb.TabletType_RDONLY, + Cells: []string{"first"}, + DeniedTables: []string{"t1", "t2"}, + }, + }, + { + name: "re-add existing cell, different tables, should fail", + tabletType: topodatapb.TabletType_RDONLY, + cells: []string{"first"}, + tables: []string{"t3"}, + wantError: "trying to use two different sets of denied tables for shard", + }, + { + name: "add all cells, see cell list grow to all", + tabletType: topodatapb.TabletType_RDONLY, + cells: []string{"first", "second", "third"}, + tables: []string{"t1", "t2"}, + wantTabletControl: &topodatapb.Shard_TabletControl{ + TabletType: topodatapb.TabletType_RDONLY, + Cells: []string{"first", "second", "third"}, + DeniedTables: []string{"t1", "t2"}, + }, + }, + { + name: "remove one cell", + tabletType: topodatapb.TabletType_RDONLY, + cells: []string{"second"}, + remove: true, + tables: []string{"t1", "t2"}, + wantTabletControl: &topodatapb.Shard_TabletControl{ + TabletType: topodatapb.TabletType_RDONLY, + Cells: []string{"first", "third"}, + DeniedTables: []string{"t1", "t2"}, + }, + }, + { + name: "add replica tablet type", + tabletType: topodatapb.TabletType_REPLICA, + cells: []string{"first"}, + tables: []string{"t1", "t2"}, + wantTabletControl: &topodatapb.Shard_TabletControl{ + TabletType: topodatapb.TabletType_REPLICA, + Cells: []string{"first"}, + DeniedTables: []string{"t1", "t2"}, + }, + }, + { + name: "confirm rdonly still stays the same, after replica was added", + tabletType: topodatapb.TabletType_RDONLY, + wantTabletControl: &topodatapb.Shard_TabletControl{ + TabletType: topodatapb.TabletType_RDONLY, + Cells: []string{"first", "third"}, + DeniedTables: []string{"t1", "t2"}, + }, + }, + { + name: "remove rdonly entry", + tabletType: topodatapb.TabletType_RDONLY, + cells: []string{"first", "third"}, + remove: true, + tables: []string{"t1", "t2"}, }, - }) { - t.Fatalf("all cells add failed: %v", si) - } - - // remove one cell from the full list - if err := si.UpdateDeniedTables(ctx, topodatapb.TabletType_RDONLY, []string{"second"}, true, []string{"t1", "t2"}); err != nil || !reflect.DeepEqual(si.TabletControls, []*topodatapb.Shard_TabletControl{ { - TabletType: topodatapb.TabletType_RDONLY, - Cells: []string{"first", "third"}, - DeniedTables: []string{"t1", "t2"}, + name: "remove replica entry", + tabletType: topodatapb.TabletType_REPLICA, + cells: []string{"first", "third"}, + remove: true, + tables: []string{"t1", "t2"}, }, - }) { - t.Fatalf("one cell removal from all failed: %v", si) + } + + for _, tcase := range testCases { + t.Run(tcase.name, func(t *testing.T) { + if tcase.ctx == nil { + tcase.ctx = ctxWithLock + } + var err error + if tcase.tables != nil || tcase.cells != nil { + err = si.UpdateDeniedTables(tcase.ctx, tcase.tabletType, tcase.cells, tcase.remove, tcase.tables) + } + if tcase.wantError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tcase.wantError) + return + } + require.NoError(t, err) + if tcase.wantTabletControl == nil { + require.Nil(t, si.GetTabletControl(tcase.tabletType)) + } else { + require.EqualValuesf(t, tcase.wantTabletControl, si.GetTabletControl(tcase.tabletType), + "want: %v, got: %v", tcase.wantTabletControl, si.GetTabletControl(tcase.tabletType)) + } + }) } } diff --git a/go/vt/topo/srv_keyspace.go b/go/vt/topo/srv_keyspace.go index 8054764eeff..11e49f3c569 100644 --- a/go/vt/topo/srv_keyspace.go +++ b/go/vt/topo/srv_keyspace.go @@ -137,7 +137,7 @@ func (ts *Server) GetShardServingCells(ctx context.Context, si *ShardInfo) (serv var mu sync.Mutex for _, cell := range cells { wg.Add(1) - go func(cell, keyspace string) { + go func(cell string) { defer wg.Done() srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, si.keyspace) switch { @@ -166,7 +166,7 @@ func (ts *Server) GetShardServingCells(ctx context.Context, si *ShardInfo) (serv rec.RecordError(err) return } - }(cell, si.Keyspace()) + }(cell) } wg.Wait() if rec.HasErrors() { @@ -188,7 +188,7 @@ func (ts *Server) GetShardServingTypes(ctx context.Context, si *ShardInfo) (serv var mu sync.Mutex for _, cell := range cells { wg.Add(1) - go func(cell, keyspace string) { + go func(cell string) { defer wg.Done() srvKeyspace, err := ts.GetSrvKeyspace(ctx, cell, si.keyspace) switch { @@ -223,7 +223,7 @@ func (ts *Server) GetShardServingTypes(ctx context.Context, si *ShardInfo) (serv rec.RecordError(err) return } - }(cell, si.Keyspace()) + }(cell) } wg.Wait() if rec.HasErrors() { @@ -613,10 +613,8 @@ func (ts *Server) MigrateServedType(ctx context.Context, keyspace string, shards case IsErrType(err, NoNode): // Assuming this cell is not active, nothing to do. default: - if err != nil { - rec.RecordError(err) - return - } + rec.RecordError(err) + return } }(cell, keyspace) } diff --git a/go/vt/topo/srv_vschema.go b/go/vt/topo/srv_vschema.go index 453e8c5f8d0..c118253e8a8 100644 --- a/go/vt/topo/srv_vschema.go +++ b/go/vt/topo/srv_vschema.go @@ -204,6 +204,12 @@ func (ts *Server) RebuildSrvVSchema(ctx context.Context, cells []string) error { } srvVSchema.ShardRoutingRules = srr + krr, err := ts.GetKeyspaceRoutingRules(ctx) + if err != nil { + return fmt.Errorf("GetKeyspaceRoutingRules failed: %v", err) + } + srvVSchema.KeyspaceRoutingRules = krr + // now save the SrvVSchema in all cells in parallel for _, cell := range cells { wg.Add(1) diff --git a/go/vt/topo/tablet.go b/go/vt/topo/tablet.go index d17235f6948..671a0f43905 100644 --- a/go/vt/topo/tablet.go +++ b/go/vt/topo/tablet.go @@ -24,6 +24,8 @@ import ( "sync" "time" + "golang.org/x/sync/semaphore" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/key" @@ -75,29 +77,6 @@ func IsRunningQueryService(tt topodatapb.TabletType) bool { return false } -// IsSubjectToLameduck returns if a tablet is subject to being -// lameduck. Lameduck is a transition period where we are still -// allowed to serve, but we tell the clients we are going away -// soon. Typically, a vttablet will still serve, but broadcast a -// non-serving state through its health check. then vtgate will catch -// that non-serving state, and stop sending queries. -// -// Primaries are not subject to lameduck, as we usually want to transition -// them as fast as possible. -// -// Replica and rdonly will use lameduck when going from healthy to -// unhealthy (either because health check fails, or they're shutting down). -// -// Other types are probably not serving user visible traffic, so they -// need to transition as fast as possible too. -func IsSubjectToLameduck(tt topodatapb.TabletType) bool { - switch tt { - case topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY: - return true - } - return false -} - // IsRunningUpdateStream returns if a tablet is running the update stream // RPC service. func IsRunningUpdateStream(tt topodatapb.TabletType) bool { @@ -132,36 +111,6 @@ func NewTablet(uid uint32, cell, host string) *topodatapb.Tablet { } } -// TabletEquality returns true iff two Tablet are representing the same tablet -// process: same uid/cell, running on the same host / ports. -func TabletEquality(left, right *topodatapb.Tablet) bool { - if !topoproto.TabletAliasEqual(left.Alias, right.Alias) { - return false - } - if left.Hostname != right.Hostname { - return false - } - if left.MysqlHostname != right.MysqlHostname { - return false - } - if left.MysqlPort != right.MysqlPort { - return false - } - if len(left.PortMap) != len(right.PortMap) { - return false - } - for key, lvalue := range left.PortMap { - rvalue, ok := right.PortMap[key] - if !ok { - return false - } - if lvalue != rvalue { - return false - } - } - return true -} - // TabletInfo is the container for a Tablet, read from the topology server. type TabletInfo struct { version Version // node version - used to prevent stomping concurrent writes @@ -227,7 +176,7 @@ func NewTabletInfo(tablet *topodatapb.Tablet, version Version) *TabletInfo { func (ts *Server) GetTablet(ctx context.Context, alias *topodatapb.TabletAlias) (*TabletInfo, error) { conn, err := ts.ConnForCell(ctx, alias.Cell) if err != nil { - log.Errorf("Unable to get connection for cell %s", alias.Cell) + log.Errorf("unable to get connection for cell %q: %v", alias.Cell, err) return nil, err } @@ -238,7 +187,7 @@ func (ts *Server) GetTablet(ctx context.Context, alias *topodatapb.TabletAlias) tabletPath := path.Join(TabletsPath, topoproto.TabletAliasString(alias), TabletFile) data, version, err := conn.Get(ctx, tabletPath) if err != nil { - log.Errorf("unable to connect to tablet %s: %s", alias, err) + log.Errorf("unable to connect to tablet %q: %v", alias, err) return nil, err } tablet := &topodatapb.Tablet{} @@ -282,10 +231,18 @@ func (ts *Server) GetTabletAliasesByCell(ctx context.Context, cell string) ([]*t return result, nil } +// GetTabletsByCellOptions controls the behavior of +// Server.FindAllShardsInKeyspace. +type GetTabletsByCellOptions struct { + // Concurrency controls the maximum number of concurrent calls to GetTablet. + Concurrency int +} + // GetTabletsByCell returns all the tablets in the cell. // It returns ErrNoNode if the cell doesn't exist. +// It returns ErrPartialResult if some tablets couldn't be read. The results in the slice are incomplete. // It returns (nil, nil) if the cell exists, but there are no tablets in it. -func (ts *Server) GetTabletsByCell(ctx context.Context, cellAlias string) ([]*TabletInfo, error) { +func (ts *Server) GetTabletsByCell(ctx context.Context, cellAlias string, opt *GetTabletsByCellOptions) ([]*TabletInfo, error) { // If the cell doesn't exist, this will return ErrNoNode. cellConn, err := ts.ConnForCell(ctx, cellAlias) if err != nil { @@ -293,10 +250,12 @@ func (ts *Server) GetTabletsByCell(ctx context.Context, cellAlias string) ([]*Ta } listResults, err := cellConn.List(ctx, TabletsPath) if err != nil || len(listResults) == 0 { - // Currently the ZooKeeper and Memory topo implementations do not support scans + // Currently the ZooKeeper implementation does not support scans // so we fall back to the more costly method of fetching the tablets one by one. - if IsErrType(err, NoImplementation) { - return ts.GetTabletsIndividuallyByCell(ctx, cellAlias) + // In the etcd case, it is possible that the response is too large. We also fall + // back to fetching the tablets one by one in that case. + if IsErrType(err, NoImplementation) || IsErrType(err, ResourceExhausted) { + return ts.GetTabletsIndividuallyByCell(ctx, cellAlias, opt) } if IsErrType(err, NoNode) { return nil, nil @@ -319,8 +278,9 @@ func (ts *Server) GetTabletsByCell(ctx context.Context, cellAlias string) ([]*Ta // GetTabletsIndividuallyByCell returns a sorted list of tablets for topo servers that do not // directly support the topoConn.List() functionality. // It returns ErrNoNode if the cell doesn't exist. +// It returns ErrPartialResult if some tablets couldn't be read. The results in the slice are incomplete. // It returns (nil, nil) if the cell exists, but there are no tablets in it. -func (ts *Server) GetTabletsIndividuallyByCell(ctx context.Context, cell string) ([]*TabletInfo, error) { +func (ts *Server) GetTabletsIndividuallyByCell(ctx context.Context, cell string, opt *GetTabletsByCellOptions) ([]*TabletInfo, error) { // If the cell doesn't exist, this will return ErrNoNode. aliases, err := ts.GetTabletAliasesByCell(ctx, cell) if err != nil { @@ -328,10 +288,14 @@ func (ts *Server) GetTabletsIndividuallyByCell(ctx context.Context, cell string) } sort.Sort(topoproto.TabletAliasList(aliases)) - tabletMap, err := ts.GetTabletMap(ctx, aliases) + var partialResultErr error + tabletMap, err := ts.GetTabletMap(ctx, aliases, opt) if err != nil { - // we got another error than topo.ErrNoNode - return nil, err + if IsErrType(err, PartialResult) { + partialResultErr = err + } else { + return nil, err + } } tablets := make([]*TabletInfo, 0, len(aliases)) for _, tabletAlias := range aliases { @@ -345,7 +309,7 @@ func (ts *Server) GetTabletsIndividuallyByCell(ctx context.Context, cell string) } } - return tablets, nil + return tablets, partialResultErr } // UpdateTablet updates the tablet data only - not associated replication paths. @@ -443,21 +407,20 @@ func (ts *Server) CreateTablet(ctx context.Context, tablet *topodatapb.Tablet) e return err } tabletPath := path.Join(TabletsPath, topoproto.TabletAliasString(tablet.Alias), TabletFile) - if _, err = conn.Create(ctx, tabletPath, data); err != nil { + if _, err := conn.Create(ctx, tabletPath, data); err != nil { return err } - if updateErr := UpdateTabletReplicationData(ctx, ts, tablet); updateErr != nil { - return updateErr + if err := UpdateTabletReplicationData(ctx, ts, tablet); err != nil { + return err } - if err == nil { - event.Dispatch(&events.TabletChange{ - Tablet: tablet, - Status: "created", - }) - } - return err + event.Dispatch(&events.TabletChange{ + Tablet: tablet, + Status: "created", + }) + + return nil } // DeleteTablet wraps the underlying conn.Delete @@ -503,41 +466,61 @@ func DeleteTabletReplicationData(ctx context.Context, ts *Server, tablet *topoda } // GetTabletMap tries to read all the tablets in the provided list, -// and returns them all in a map. -// If error is ErrPartialResult, the results in the dictionary are +// and returns them in a map. +// If error is ErrPartialResult, the results in the map are // incomplete, meaning some tablets couldn't be read. // The map is indexed by topoproto.TabletAliasString(tablet alias). -func (ts *Server) GetTabletMap(ctx context.Context, tabletAliases []*topodatapb.TabletAlias) (map[string]*TabletInfo, error) { +func (ts *Server) GetTabletMap(ctx context.Context, tabletAliases []*topodatapb.TabletAlias, opt *GetTabletsByCellOptions) (map[string]*TabletInfo, error) { span, ctx := trace.NewSpan(ctx, "topo.GetTabletMap") span.Annotate("num_tablets", len(tabletAliases)) defer span.Finish() - wg := sync.WaitGroup{} - mutex := sync.Mutex{} + var ( + mu sync.Mutex + wg sync.WaitGroup + tabletMap = make(map[string]*TabletInfo) + returnErr error + ) - tabletMap := make(map[string]*TabletInfo) - var someError error + concurrency := DefaultConcurrency + if opt != nil && opt.Concurrency > 0 { + concurrency = opt.Concurrency + } + var sem = semaphore.NewWeighted(int64(concurrency)) for _, tabletAlias := range tabletAliases { wg.Add(1) go func(tabletAlias *topodatapb.TabletAlias) { defer wg.Done() + if err := sem.Acquire(ctx, 1); err != nil { + // Only happens if context is cancelled. + mu.Lock() + defer mu.Unlock() + log.Warningf("%v: %v", tabletAlias, err) + // We only need to set this on the first error. + if returnErr == nil { + returnErr = NewError(PartialResult, tabletAlias.GetCell()) + } + return + } tabletInfo, err := ts.GetTablet(ctx, tabletAlias) - mutex.Lock() + sem.Release(1) + mu.Lock() + defer mu.Unlock() if err != nil { log.Warningf("%v: %v", tabletAlias, err) // There can be data races removing nodes - ignore them for now. - if !IsErrType(err, NoNode) { - someError = NewError(PartialResult, "") + // We only need to set this on first error. + if returnErr == nil && !IsErrType(err, NoNode) { + returnErr = NewError(PartialResult, tabletAlias.GetCell()) } } else { tabletMap[topoproto.TabletAliasString(tabletAlias)] = tabletInfo } - mutex.Unlock() }(tabletAlias) } wg.Wait() - return tabletMap, someError + return tabletMap, returnErr } // InitTablet creates or updates a tablet. If no parent is specified diff --git a/go/vt/topo/tablet_test.go b/go/vt/topo/tablet_test.go new file mode 100644 index 00000000000..3a0153a11b5 --- /dev/null +++ b/go/vt/topo/tablet_test.go @@ -0,0 +1,173 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topo_test + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" +) + +// Test various cases of calls to GetTabletsByCell. +// GetTabletsByCell first tries to get all the tablets using List. +// If the response is too large, we will get an error, and fall back to one tablet at a time. +func TestServerGetTabletsByCell(t *testing.T) { + tests := []struct { + name string + tablets int + opt *topo.GetTabletsByCellOptions + listError error + }{ + { + name: "negative concurrency", + tablets: 1, + // Ensure this doesn't panic. + opt: &topo.GetTabletsByCellOptions{Concurrency: -1}, + }, + { + name: "single", + tablets: 1, + // Make sure the defaults apply as expected. + opt: nil, + }, + { + name: "multiple", + // should work with more than 1 tablet + tablets: 32, + opt: &topo.GetTabletsByCellOptions{Concurrency: 8}, + }, + { + name: "multiple with list error", + // should work with more than 1 tablet when List returns an error + tablets: 32, + opt: &topo.GetTabletsByCellOptions{Concurrency: 8}, + listError: topo.NewError(topo.ResourceExhausted, ""), + }, + } + + const cell = "zone1" + const keyspace = "keyspace" + const shard = "shard" + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx, cell) + defer ts.Close() + if tt.listError != nil { + factory.AddOperationError(memorytopo.List, ".*", tt.listError) + } + + // Create an ephemeral keyspace and generate shard records within + // the keyspace to fetch later. + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablets := make([]*topo.TabletInfo, tt.tablets) + + for i := 0; i < tt.tablets; i++ { + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cell, + Uid: uint32(i), + }, + Hostname: "host1", + PortMap: map[string]int32{ + "vt": int32(i), + }, + Keyspace: keyspace, + Shard: shard, + } + tInfo := &topo.TabletInfo{Tablet: tablet} + tablets[i] = tInfo + require.NoError(t, ts.CreateTablet(ctx, tablet)) + } + + // Verify that we return a complete list of tablets and that each + // tablet matches what we expect. + out, err := ts.GetTabletsByCell(ctx, cell, tt.opt) + require.NoError(t, err) + require.Len(t, out, tt.tablets) + + for i, tab := range tablets { + require.Equal(t, tab.Tablet, tablets[i].Tablet) + } + }) + } +} + +func TestServerGetTabletsByCellPartialResults(t *testing.T) { + const cell = "zone1" + const keyspace = "keyspace" + const shard = "shard" + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts, factory := memorytopo.NewServerAndFactory(ctx, cell) + defer ts.Close() + + // Create an ephemeral keyspace and generate shard records within + // the keyspace to fetch later. + require.NoError(t, ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{})) + require.NoError(t, ts.CreateShard(ctx, keyspace, shard)) + + tablets := make([]*topo.TabletInfo, 3) + + for i := 0; i < len(tablets); i++ { + tablet := &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: cell, + Uid: uint32(i), + }, + Hostname: "host1", + PortMap: map[string]int32{ + "vt": int32(i), + }, + Keyspace: keyspace, + Shard: shard, + } + tInfo := &topo.TabletInfo{Tablet: tablet} + tablets[i] = tInfo + require.NoError(t, ts.CreateTablet(ctx, tablet)) + } + + // Force fallback to getting tablets individually. + factory.AddOperationError(memorytopo.List, ".*", topo.NewError(topo.NoImplementation, "List not supported")) + + // Cause the Get for the second tablet to fail. + factory.AddOperationError(memorytopo.Get, "tablets/zone1-0000000001/Tablet", errors.New("fake error")) + + // Verify that we return a partial list of tablets and that each + // tablet matches what we expect. + out, err := ts.GetTabletsByCell(ctx, cell, nil) + assert.Error(t, err) + assert.True(t, topo.IsErrType(err, topo.PartialResult), "Not a partial result: %v", err) + assert.Len(t, out, 2) + assert.True(t, proto.Equal(tablets[0].Tablet, out[0].Tablet), "Got: %v, want %v", tablets[0].Tablet, out[0].Tablet) + assert.True(t, proto.Equal(tablets[2].Tablet, out[1].Tablet), "Got: %v, want %v", tablets[2].Tablet, out[1].Tablet) +} diff --git a/go/vt/topo/test/keyspace.go b/go/vt/topo/test/keyspace.go index 0458e7fd2d7..9c7b99d016e 100644 --- a/go/vt/topo/test/keyspace.go +++ b/go/vt/topo/test/keyspace.go @@ -61,20 +61,7 @@ func checkKeyspace(t *testing.T, ctx context.Context, ts *topo.Server) { t.Errorf("GetKeyspaces: want %v, got %v", []string{"test_keyspace"}, keyspaces) } - k := &topodatapb.Keyspace{ - ServedFroms: []*topodatapb.Keyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_REPLICA, - Cells: []string{"c1", "c2"}, - Keyspace: "test_keyspace3", - }, - { - TabletType: topodatapb.TabletType_PRIMARY, - Cells: nil, - Keyspace: "test_keyspace3", - }, - }, - } + k := &topodatapb.Keyspace{} if err := ts.CreateKeyspace(ctx, "test_keyspace2", k); err != nil { t.Errorf("CreateKeyspace: %v", err) } diff --git a/go/vt/topo/test/serving.go b/go/vt/topo/test/serving.go index dd00f3da370..62cbca99a99 100644 --- a/go/vt/topo/test/serving.go +++ b/go/vt/topo/test/serving.go @@ -50,12 +50,6 @@ func checkSrvKeyspace(t *testing.T, ctx context.Context, ts *topo.Server) { }, }, }, - ServedFrom: []*topodatapb.SrvKeyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_REPLICA, - Keyspace: "other_keyspace", - }, - }, } if err := ts.UpdateSrvKeyspace(ctx, LocalCellName, "test_keyspace", srvKeyspace); err != nil { t.Errorf("UpdateSrvKeyspace(1): %v", err) diff --git a/go/vt/topo/test/shard.go b/go/vt/topo/test/shard.go index b5c92c4a3ec..270b236b98d 100644 --- a/go/vt/topo/test/shard.go +++ b/go/vt/topo/test/shard.go @@ -22,7 +22,6 @@ import ( "time" "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/topo" @@ -82,13 +81,23 @@ func checkShard(t *testing.T, ctx context.Context, ts *topo.Server) { t.Fatalf("shard.PrimaryAlias = %v, want %v", si.Shard.PrimaryAlias, other) } + // Test FindAllShardsInKeyspace. + require.NoError(t, err) + _, err = ts.FindAllShardsInKeyspace(ctx, "test_keyspace", nil) + require.NoError(t, err) + + // Test GetServingShards. + require.NoError(t, err) + _, err = ts.GetServingShards(ctx, "test_keyspace") + require.NoError(t, err) + // test GetShardNames - shards, err := ts.GetShardNames(ctx, "test_keyspace") + shardNames, err := ts.GetShardNames(ctx, "test_keyspace") if err != nil { t.Errorf("GetShardNames: %v", err) } - if len(shards) != 1 || shards[0] != "b0-c0" { - t.Errorf(`GetShardNames: want [ "b0-c0" ], got %v`, shards) + if len(shardNames) != 1 || shardNames[0] != "b0-c0" { + t.Errorf(`GetShardNames: want [ "b0-c0" ], got %v`, shardNames) } if _, err := ts.GetShardNames(ctx, "test_keyspace666"); !topo.IsErrType(err, topo.NoNode) { diff --git a/go/vt/topo/test/trylock.go b/go/vt/topo/test/trylock.go index 4519d1bcaab..c553e74bb61 100644 --- a/go/vt/topo/test/trylock.go +++ b/go/vt/topo/test/trylock.go @@ -138,7 +138,7 @@ func checkTryLockTimeout(ctx context.Context, t *testing.T, conn topo.Conn) { // test we can't unlock again if err := lockDescriptor.Unlock(ctx); err == nil { - require.Fail(t, "Unlock failed", err.Error()) + require.Fail(t, "Unlock succeeded but should not have") } } diff --git a/go/vt/topo/topo_lock.go b/go/vt/topo/topo_lock.go new file mode 100644 index 00000000000..ffd732fff36 --- /dev/null +++ b/go/vt/topo/topo_lock.go @@ -0,0 +1,169 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topo + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// ITopoLock is the interface for a lock that can be used to lock a key in the topology server. +// The lock is associated with a context and can be unlocked by calling the returned function. +// Note that we don't need an Unlock method on the interface, as the Lock() function +// returns a function that can be used to unlock the lock. +type ITopoLock interface { + Lock(ctx context.Context) (context.Context, func(*error), error) +} + +type TopoLock struct { + Path string // topo path to lock + Name string // name, for logging purposes + + ts *Server +} + +var _ ITopoLock = (*TopoLock)(nil) + +func (ts *Server) NewTopoLock(path, name string) *TopoLock { + return &TopoLock{ + ts: ts, + Path: path, + Name: name, + } +} + +func (tl *TopoLock) String() string { + return fmt.Sprintf("TopoLock{Path: %v, Name: %v}", tl.Path, tl.Name) +} + +// perform the topo lock operation +func (l *Lock) lock(ctx context.Context, ts *Server, path string) (LockDescriptor, error) { + ctx, cancel := context.WithTimeout(ctx, LockTimeout) + defer cancel() + span, ctx := trace.NewSpan(ctx, "TopoServer.Lock") + span.Annotate("action", l.Action) + span.Annotate("path", path) + defer span.Finish() + + j, err := l.ToJSON() + if err != nil { + return nil, err + } + return ts.globalCell.Lock(ctx, path, j) +} + +// unlock unlocks a previously locked key. +func (l *Lock) unlock(ctx context.Context, path string, lockDescriptor LockDescriptor, actionError error) error { + // Detach from the parent timeout, but copy the trace span. + // We need to still release the lock even if the parent + // context timed out. + ctx = trace.CopySpan(context.TODO(), ctx) + ctx, cancel := context.WithTimeout(ctx, RemoteOperationTimeout) + defer cancel() + + span, ctx := trace.NewSpan(ctx, "TopoServer.Unlock") + span.Annotate("action", l.Action) + span.Annotate("path", path) + defer span.Finish() + + // first update the actionNode + if actionError != nil { + l.Status = "Error: " + actionError.Error() + } else { + l.Status = "Done" + } + return lockDescriptor.Unlock(ctx) +} + +// Lock adds lock information to the context, checks that the lock is not already held, and locks it. +// It returns a new context with the lock information and a function to unlock the lock. +func (tl TopoLock) Lock(ctx context.Context) (context.Context, func(*error), error) { + i, ok := ctx.Value(locksKey).(*locksInfo) + if !ok { + i = &locksInfo{ + info: make(map[string]*lockInfo), + } + ctx = context.WithValue(ctx, locksKey, i) + } + i.mu.Lock() + defer i.mu.Unlock() + // check that we are not already locked + if _, ok := i.info[tl.Path]; ok { + return nil, nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "lock for %v is already held", tl.Path) + } + + // lock it + l := newLock(fmt.Sprintf("lock for %s", tl.Name)) + lockDescriptor, err := l.lock(ctx, tl.ts, tl.Path) + if err != nil { + return nil, nil, err + } + // and update our structure + i.info[tl.Path] = &lockInfo{ + lockDescriptor: lockDescriptor, + actionNode: l, + } + return ctx, func(finalErr *error) { + i.mu.Lock() + defer i.mu.Unlock() + + if _, ok := i.info[tl.Path]; !ok { + if *finalErr != nil { + log.Errorf("trying to unlock %v multiple times", tl.Path) + } else { + *finalErr = vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "trying to unlock %v multiple times", tl.Path) + } + return + } + + err := l.unlock(ctx, tl.Path, lockDescriptor, *finalErr) + // if we have an error, we log it, but we still want to delete the lock + if *finalErr != nil { + if err != nil { + // both error are set, just log the unlock error + log.Errorf("unlock(%v) failed: %v", tl.Path, err) + } + } else { + *finalErr = err + } + delete(i.info, tl.Path) + }, nil +} + +func CheckLocked(ctx context.Context, keyPath string) error { + // extract the locksInfo pointer + i, ok := ctx.Value(locksKey).(*locksInfo) + if !ok { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "%s is not locked (no locksInfo)", keyPath) + } + i.mu.Lock() + defer i.mu.Unlock() + + // find the individual entry + _, ok = i.info[keyPath] + if !ok { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "%s is not locked (no lockInfo in map)", keyPath) + } + + // and we're good for now. + return nil +} diff --git a/go/vt/topo/topo_lock_test.go b/go/vt/topo/topo_lock_test.go new file mode 100644 index 00000000000..c378c05a9ff --- /dev/null +++ b/go/vt/topo/topo_lock_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topo_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" +) + +// lower the lock timeout for testing +const testLockTimeout = 3 * time.Second + +// TestTopoLockTimeout tests that the lock times out after the specified duration. +func TestTopoLockTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + err := ts.CreateKeyspaceRoutingRules(ctx, &vschemapb.KeyspaceRoutingRules{}) + require.NoError(t, err) + lock, err := topo.NewRoutingRulesLock(ctx, ts, "ks1") + require.NoError(t, err) + + currentTopoLockTimeout := topo.LockTimeout + topo.LockTimeout = testLockTimeout + defer func() { + topo.LockTimeout = currentTopoLockTimeout + }() + + // acquire the lock + origCtx := ctx + _, unlock, err := lock.Lock(origCtx) + require.NoError(t, err) + defer unlock(&err) + + // re-acquiring the lock should fail + _, _, err2 := lock.Lock(origCtx) + require.Errorf(t, err2, "deadline exceeded") +} + +// TestTopoLockBasic tests basic lock operations. +func TestTopoLockBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + err := ts.CreateKeyspaceRoutingRules(ctx, &vschemapb.KeyspaceRoutingRules{}) + require.NoError(t, err) + lock, err := topo.NewRoutingRulesLock(ctx, ts, "ks1") + require.NoError(t, err) + + origCtx := ctx + ctx, unlock, err := lock.Lock(origCtx) + require.NoError(t, err) + + // locking the same key again, without unlocking, should return an error + _, _, err2 := lock.Lock(ctx) + require.ErrorContains(t, err2, "already held") + + // confirm that the lock can be re-acquired after unlocking + unlock(&err) + _, unlock, err = lock.Lock(origCtx) + require.NoError(t, err) + defer unlock(&err) +} diff --git a/go/vt/topo/topoproto/tablet.go b/go/vt/topo/topoproto/tablet.go index 63e71807119..31ac41e14a0 100644 --- a/go/vt/topo/topoproto/tablet.go +++ b/go/vt/topo/topoproto/tablet.go @@ -80,11 +80,6 @@ func TabletAliasString(ta *topodatapb.TabletAlias) string { return fmt.Sprintf("%v-%010d", ta.Cell, ta.Uid) } -// TabletAliasUIDStr returns a string version of the uid -func TabletAliasUIDStr(ta *topodatapb.TabletAlias) string { - return fmt.Sprintf("%010d", ta.Uid) -} - const tabletAliasFormat = "^(?P[-_.a-zA-Z0-9]+)-(?P[0-9]+)$" var tabletAliasRegexp = regexp.MustCompile(tabletAliasFormat) @@ -290,13 +285,6 @@ func TabletDbName(tablet *topodatapb.Tablet) string { return VtDbPrefix + tablet.Keyspace } -// TabletIsAssigned returns if this tablet is assigned to a keyspace and shard. -// A "scrap" node will show up as assigned even though its data cannot be used -// for serving. -func TabletIsAssigned(tablet *topodatapb.Tablet) bool { - return tablet != nil && tablet.Keyspace != "" && tablet.Shard != "" -} - // IsServingType returns true if the tablet type is one that should be serving to be healthy, or false if the tablet type // should not be serving in it's healthy state. func IsServingType(tabletType topodatapb.TabletType) bool { diff --git a/go/vt/topo/topotests/cell_info_test.go b/go/vt/topo/topotests/cell_info_test.go index becdbd8d14a..5d039ffbe6f 100644 --- a/go/vt/topo/topotests/cell_info_test.go +++ b/go/vt/topo/topotests/cell_info_test.go @@ -217,7 +217,6 @@ func TestExpandCells(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { expanded, err := ts.ExpandCells(ctx, tt.in) if tt.shouldErr { diff --git a/go/vt/topo/topotests/shard_watch_test.go b/go/vt/topo/topotests/shard_watch_test.go index 80b696c106d..f4ac83c627a 100644 --- a/go/vt/topo/topotests/shard_watch_test.go +++ b/go/vt/topo/topotests/shard_watch_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/vt/topo/vschema.go b/go/vt/topo/vschema.go index 0f63a26c2ae..c6845691b25 100644 --- a/go/vt/topo/vschema.go +++ b/go/vt/topo/vschema.go @@ -158,3 +158,57 @@ func (ts *Server) GetShardRoutingRules(ctx context.Context) (*vschemapb.ShardRou } return srr, nil } + +// CreateKeyspaceRoutingRules wraps the underlying Conn.Create. +func (ts *Server) CreateKeyspaceRoutingRules(ctx context.Context, value *vschemapb.KeyspaceRoutingRules) error { + data, err := value.MarshalVT() + if err != nil { + return err + } + if _, err := ts.globalCell.Create(ctx, ts.GetKeyspaceRoutingRulesPath(), data); err != nil { + return err + } + return nil +} + +// SaveKeyspaceRoutingRules saves the given routing rules proto in the topo at +// the defined path. +// It does NOT delete the file if you have requested to save empty routing rules +// (effectively deleting all routing rules in the file). This makes it different +// from the other routing rules (table and shard) save functions today. This is +// done as it simplifies the interactions with this key/file so that the typical +// access pattern is: +// - If the file exists, we can lock it, read it, modify it, and save it back. +// - If the file does not exist, we can create it and save the new rules. +// - If multiple callers are racing to create the file, only one will succeed +// and all other callers can simply retry once as the file will now exist. +// +// We can revisit this in the future and align things as we add locking and other +// topo server features to the other types of routing rules. We may then apply +// this new model used for keyspace routing rules to the other routing rules, or +// we may come up with a better model and apply it to the keyspace routing rules +// as well. +func (ts *Server) SaveKeyspaceRoutingRules(ctx context.Context, rules *vschemapb.KeyspaceRoutingRules) error { + data, err := rules.MarshalVT() + if err != nil { + return err + } + _, err = ts.globalCell.Update(ctx, ts.GetKeyspaceRoutingRulesPath(), data, nil) + return err +} + +func (ts *Server) GetKeyspaceRoutingRules(ctx context.Context) (*vschemapb.KeyspaceRoutingRules, error) { + rules := &vschemapb.KeyspaceRoutingRules{} + data, _, err := ts.globalCell.Get(ctx, ts.GetKeyspaceRoutingRulesPath()) + if err != nil { + if IsErrType(err, NoNode) { + return nil, nil + } + return nil, err + } + err = rules.UnmarshalVT(data) + if err != nil { + return nil, vterrors.Wrapf(err, "bad keyspace routing rules data: %q", data) + } + return rules, nil +} diff --git a/go/vt/topo/zk2topo/error.go b/go/vt/topo/zk2topo/error.go index 1ebc3896f40..1149ad60bf3 100644 --- a/go/vt/topo/zk2topo/error.go +++ b/go/vt/topo/zk2topo/error.go @@ -18,6 +18,7 @@ package zk2topo import ( "context" + "errors" "github.com/z-division/go-zookeeper/zk" @@ -26,20 +27,20 @@ import ( // Error codes returned by the zookeeper Go client: func convertError(err error, node string) error { - switch err { - case zk.ErrBadVersion: + switch { + case errors.Is(err, zk.ErrBadVersion): return topo.NewError(topo.BadVersion, node) - case zk.ErrNoNode: + case errors.Is(err, zk.ErrNoNode): return topo.NewError(topo.NoNode, node) - case zk.ErrNodeExists: + case errors.Is(err, zk.ErrNodeExists): return topo.NewError(topo.NodeExists, node) - case zk.ErrNotEmpty: + case errors.Is(err, zk.ErrNotEmpty): return topo.NewError(topo.NodeNotEmpty, node) - case zk.ErrSessionExpired: + case errors.Is(err, zk.ErrSessionExpired): return topo.NewError(topo.Timeout, node) - case context.Canceled: + case errors.Is(err, context.Canceled): return topo.NewError(topo.Interrupted, node) - case context.DeadlineExceeded: + case errors.Is(err, context.DeadlineExceeded): return topo.NewError(topo.Timeout, node) } return err diff --git a/go/vt/topo/zk2topo/lock.go b/go/vt/topo/zk2topo/lock.go index 974361544a5..5baf1f7f33f 100644 --- a/go/vt/topo/zk2topo/lock.go +++ b/go/vt/topo/zk2topo/lock.go @@ -91,19 +91,22 @@ func (zs *Server) lock(ctx context.Context, dirPath, contents string) (topo.Lock case context.Canceled: errToReturn = topo.NewError(topo.Interrupted, nodePath) default: - errToReturn = vterrors.Wrapf(err, "failed to obtain action lock: %v", nodePath) + errToReturn = vterrors.Wrapf(err, "failed to obtain lock: %v", nodePath) } // Regardless of the reason, try to cleanup. - log.Warningf("Failed to obtain action lock: %v", err) + log.Warningf("Failed to obtain lock: %v", err) - if err := zs.conn.Delete(ctx, nodePath, -1); err != nil { - log.Warningf("Failed to close connection :%v", err) + cleanupCtx, cancel := context.WithTimeout(context.Background(), baseTimeout) + defer cancel() + + if err := zs.conn.Delete(cleanupCtx, nodePath, -1); err != nil { + log.Warningf("Failed to cleanup unsuccessful lock path %s: %v", nodePath, err) } // Show the other locks in the directory dir := path.Dir(nodePath) - children, _, err := zs.conn.Children(ctx, dir) + children, _, err := zs.conn.Children(cleanupCtx, dir) if err != nil { log.Warningf("Failed to get children of %v: %v", dir, err) return nil, errToReturn @@ -115,7 +118,7 @@ func (zs *Server) lock(ctx context.Context, dirPath, contents string) (topo.Lock } childPath := path.Join(dir, children[0]) - data, _, err := zs.conn.Get(ctx, childPath) + data, _, err := zs.conn.Get(cleanupCtx, childPath) if err != nil { log.Warningf("Failed to get first locks node %v (may have just ended): %v", childPath, err) return nil, errToReturn diff --git a/go/vt/topo/zk2topo/zk_conn.go b/go/vt/topo/zk2topo/zk_conn.go index a0eec8b4340..22bd2046b60 100644 --- a/go/vt/topo/zk2topo/zk_conn.go +++ b/go/vt/topo/zk2topo/zk_conn.go @@ -21,7 +21,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "math/rand" + "math/rand/v2" "net" "os" "strings" @@ -76,11 +76,6 @@ func Time(i int64) time.Time { return time.Unix(i/1000, i%1000*1000000) } -// ZkTime returns a ZK time (int64) from a time.Time -func ZkTime(t time.Time) int64 { - return t.Unix()*1000 + int64(t.Nanosecond()/1000000) -} - // ZkConn is a wrapper class on top of a zk.Conn. // It will do a few things for us: // - add the context parameter. However, we do not enforce its deadlines @@ -251,7 +246,7 @@ func (c *ZkConn) withRetry(ctx context.Context, action func(conn *zk.Conn) error if i > 0 { // Add a bit of backoff time before retrying: // 1 second base + up to 5 seconds. - time.Sleep(1*time.Second + time.Duration(rand.Int63n(5e9))) + time.Sleep(1*time.Second + time.Duration(rand.Int64N(5e9))) } // Get the current connection, or connect. diff --git a/go/vt/topotools/events/reparent_syslog.go b/go/vt/topotools/events/reparent_syslog.go index dae22467d1f..dd995f34d73 100644 --- a/go/vt/topotools/events/reparent_syslog.go +++ b/go/vt/topotools/events/reparent_syslog.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. diff --git a/go/vt/topotools/events/reparent_syslog_test.go b/go/vt/topotools/events/reparent_syslog_test.go index 93a9b860fe2..f4ba39f602b 100644 --- a/go/vt/topotools/events/reparent_syslog_test.go +++ b/go/vt/topotools/events/reparent_syslog_test.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. diff --git a/go/vt/topotools/keyspace.go b/go/vt/topotools/keyspace.go index d8a5740f3ae..cab326397ff 100644 --- a/go/vt/topotools/keyspace.go +++ b/go/vt/topotools/keyspace.go @@ -141,22 +141,12 @@ func UpdateShardRecords( return nil } -// KeyspaceEquality returns true iff two KeyspaceInformations are identical for testing purposes +// KeyspaceEquality returns true iff two Keyspace fields are identical for testing purposes. func KeyspaceEquality(left, right *topodatapb.Keyspace) bool { if left.KeyspaceType != right.KeyspaceType { return false } - if len(left.ServedFroms) != len(right.ServedFroms) { - return false - } - for i := range left.ServedFroms { - if left.ServedFroms[i] != right.ServedFroms[i] { - return false - } - } - if left.KeyspaceType != right.KeyspaceType { - return false - } + if left.BaseKeyspace != right.BaseKeyspace { return false } @@ -165,5 +155,9 @@ func KeyspaceEquality(left, right *topodatapb.Keyspace) bool { return false } + if left.SidecarDbName != right.SidecarDbName { + return false + } + return left.DurabilityPolicy == right.DurabilityPolicy } diff --git a/go/vt/topotools/rebuild_keyspace.go b/go/vt/topotools/rebuild_keyspace.go index d58ce0b7160..72e060b79e2 100644 --- a/go/vt/topotools/rebuild_keyspace.go +++ b/go/vt/topotools/rebuild_keyspace.go @@ -64,7 +64,12 @@ func RebuildKeyspaceLocked(ctx context.Context, log logutil.Logger, ts *topo.Ser } } - shards, err := ts.FindAllShardsInKeyspace(ctx, keyspace) + shards, err := ts.FindAllShardsInKeyspace(ctx, keyspace, &topo.FindAllShardsInKeyspaceOptions{ + // Fetch shard records concurrently to speed up the rebuild process. + // This call is invoked by the first tablet in a given keyspace or + // manually via vtctld, so there is little risk of a thundering herd. + Concurrency: 8, + }) if err != nil { return err } @@ -94,9 +99,8 @@ func RebuildKeyspaceLocked(ctx context.Context, log logutil.Logger, ts *topo.Ser return err } srvKeyspaceMap[cell] = &topodatapb.SrvKeyspace{ - ServedFrom: ki.ComputeCellServedFrom(cell), + ThrottlerConfig: ki.ThrottlerConfig, } - srvKeyspaceMap[cell].ThrottlerConfig = ki.ThrottlerConfig } servedTypes := []topodatapb.TabletType{topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} diff --git a/go/vt/topotools/routing_rules.go b/go/vt/topotools/routing_rules.go index 9eb64c936d7..a3bc5a8a957 100644 --- a/go/vt/topotools/routing_rules.go +++ b/go/vt/topotools/routing_rules.go @@ -27,7 +27,7 @@ import ( vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) -//region routing rules +// region routing rules func GetRoutingRulesMap(rules *vschemapb.RoutingRules) map[string][]string { if rules == nil { @@ -69,9 +69,9 @@ func SaveRoutingRules(ctx context.Context, ts *topo.Server, rules map[string][]s return ts.SaveRoutingRules(ctx, rrs) } -//endregion +// endregion -//region shard routing rules +// region shard routing rules func GetShardRoutingRuleKey(fromKeyspace, shard string) string { return fmt.Sprintf("%s.%s", fromKeyspace, shard) @@ -122,3 +122,99 @@ func SaveShardRoutingRules(ctx context.Context, ts *topo.Server, srr map[string] return ts.SaveShardRoutingRules(ctx, srs) } + +// endregion + +// region keyspace routing rules + +// GetKeyspaceRoutingRulesMap returns a map of fromKeyspace=>toKeyspace from a vschemapb.KeyspaceRoutingRules +func GetKeyspaceRoutingRulesMap(rules *vschemapb.KeyspaceRoutingRules) map[string]string { + if rules == nil { + return make(map[string]string) + } + rulesMap := make(map[string]string, len(rules.Rules)) + for _, rr := range rules.Rules { + rulesMap[rr.FromKeyspace] = rr.ToKeyspace + } + return rulesMap +} + +// GetKeyspaceRoutingRules fetches keyspace routing rules from the topology server and returns a +// map of fromKeyspace=>toKeyspace. +func GetKeyspaceRoutingRules(ctx context.Context, ts *topo.Server) (map[string]string, error) { + keyspaceRoutingRules, err := ts.GetKeyspaceRoutingRules(ctx) + if err != nil { + return nil, err + } + rules := GetKeyspaceRoutingRulesMap(keyspaceRoutingRules) + return rules, nil +} + +// buildKeyspaceRoutingRules builds a vschemapb.KeyspaceRoutingRules struct from a map of +// fromKeyspace=>toKeyspace values. +func buildKeyspaceRoutingRules(rules *map[string]string) *vschemapb.KeyspaceRoutingRules { + keyspaceRoutingRules := &vschemapb.KeyspaceRoutingRules{Rules: make([]*vschemapb.KeyspaceRoutingRule, 0, len(*rules))} + for from, to := range *rules { + keyspaceRoutingRules.Rules = append(keyspaceRoutingRules.Rules, &vschemapb.KeyspaceRoutingRule{ + FromKeyspace: from, + ToKeyspace: to, + }) + } + return keyspaceRoutingRules +} + +// saveKeyspaceRoutingRulesLocked saves the keyspace routing rules in the topo server. It expects the caller to +// have acquired a RoutingRulesLock. +func saveKeyspaceRoutingRulesLocked(ctx context.Context, ts *topo.Server, rules map[string]string) error { + if err := topo.CheckLocked(ctx, topo.RoutingRulesPath); err != nil { + return err + } + return ts.SaveKeyspaceRoutingRules(ctx, buildKeyspaceRoutingRules(&rules)) +} + +// UpdateKeyspaceRoutingRules updates the keyspace routing rules in the topo server. +// If the keyspace routing rules do not yet exist, it will create them. If multiple callers +// are racing to create the initial keyspace routing rules then the first writer will win +// and the other callers can immediately retry when getting the resulting topo.NodeExists +// error. When the routing rules already exist, it will acquire a RoutingRulesLock and +// then modify the keyspace routing rules in-place. +func UpdateKeyspaceRoutingRules(ctx context.Context, ts *topo.Server, reason string, + update func(ctx context.Context, rules *map[string]string) error) (err error) { + var lock *topo.RoutingRulesLock + lock, err = topo.NewRoutingRulesLock(ctx, ts, reason) + if err != nil { + return err + } + lockCtx, unlock, lockErr := lock.Lock(ctx) + if lockErr != nil { + // If the key does not yet exist then let's create it. + if !topo.IsErrType(lockErr, topo.NoNode) { + return lockErr + } + rules := make(map[string]string) + if err := update(ctx, &rules); err != nil { + return err + } + // This will fail if the key already exists and thus avoids any races here. The first + // writer will win and the others will have to retry. This situation should be very + // rare as we are typically only updating the rules from here on out. + if err := ts.CreateKeyspaceRoutingRules(ctx, buildKeyspaceRoutingRules(&rules)); err != nil { + return err + } + return nil + } + defer unlock(&err) + rules, err := GetKeyspaceRoutingRules(lockCtx, ts) + if err != nil { + return err + } + if err := update(lockCtx, &rules); err != nil { + return err + } + if err := saveKeyspaceRoutingRulesLocked(lockCtx, ts, rules); err != nil { + return err + } + return nil +} + +// endregion diff --git a/go/vt/topotools/routing_rules_test.go b/go/vt/topotools/routing_rules_test.go index 0b4f265a77b..2d4d9feacd1 100644 --- a/go/vt/topotools/routing_rules_test.go +++ b/go/vt/topotools/routing_rules_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" ) @@ -90,3 +91,77 @@ func TestShardRoutingRulesRoundTrip(t *testing.T) { assert.Equal(t, srr, roundtripRules) } + +func TestKeyspaceRoutingRulesRoundTrip(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + rulesMap := map[string]string{ + "ks1": "ks2", + "ks4": "ks5", + } + + err := UpdateKeyspaceRoutingRules(ctx, ts, "test", func(ctx context.Context, rules *map[string]string) error { + for k, v := range rulesMap { + (*rules)[k] = v + } + return nil + }) + require.NoError(t, err, "could not save keyspace routing rules to topo %v", rulesMap) + + roundtripRulesMap, err := GetKeyspaceRoutingRules(ctx, ts) + require.NoError(t, err, "could not fetch keyspace routing rules from topo") + assert.EqualValues(t, rulesMap, roundtripRulesMap) +} + +// TestSaveKeyspaceRoutingRulesLocked confirms that saveKeyspaceRoutingRulesLocked() can only be called +// with a locked routing_rules lock. +func TestSaveKeyspaceRoutingRulesLocked(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + + rulesMap := map[string]string{ + "ks1": "ks2", + "ks4": "ks5", + } + + t.Run("unlocked, doesn't exist", func(t *testing.T) { + err := saveKeyspaceRoutingRulesLocked(ctx, ts, rulesMap) + require.Errorf(t, err, "node doesn't exist: routing_rules") + }) + + t.Run("create", func(t *testing.T) { + err := ts.CreateKeyspaceRoutingRules(ctx, buildKeyspaceRoutingRules(&rulesMap)) + require.NoError(t, err) + }) + + t.Run("create again", func(t *testing.T) { + err := ts.CreateKeyspaceRoutingRules(ctx, buildKeyspaceRoutingRules(&rulesMap)) + require.True(t, topo.IsErrType(err, topo.NodeExists)) + }) + + t.Run("unlocked", func(t *testing.T) { + err := saveKeyspaceRoutingRulesLocked(ctx, ts, rulesMap) + require.Errorf(t, err, "routing_rules is not locked (no locksInfo)") + }) + + // declare and acquire lock + lock, err := topo.NewRoutingRulesLock(ctx, ts, "test") + require.NoError(t, err) + lockCtx, unlock, err := lock.Lock(ctx) + require.NoError(t, err) + defer unlock(&err) + + t.Run("locked, locked ctx", func(t *testing.T) { + err = saveKeyspaceRoutingRulesLocked(lockCtx, ts, rulesMap) + require.NoError(t, err) + }) + t.Run("locked, unlocked ctx", func(t *testing.T) { + err = saveKeyspaceRoutingRulesLocked(ctx, ts, rulesMap) + require.Errorf(t, err, "routing_rules is not locked (no locksInfo)") + }) +} diff --git a/go/vt/topotools/shard_test.go b/go/vt/topotools/shard_test.go index f2fb5f50340..9fa9cd057b2 100644 --- a/go/vt/topotools/shard_test.go +++ b/go/vt/topotools/shard_test.go @@ -19,7 +19,7 @@ package topotools import ( "context" "fmt" - "math/rand" + "math/rand/v2" "sync" "testing" @@ -114,7 +114,7 @@ func TestGetOrCreateShard(t *testing.T) { defer wg.Done() for j := 0; j < 100; j++ { - index := rand.Intn(10) + index := rand.IntN(10) shard := fmt.Sprintf("%v", index) si, err := ts.GetOrCreateShard(ctx, keyspace, shard) if err != nil { diff --git a/go/vt/topotools/split.go b/go/vt/topotools/split.go index ace3dda94a7..9da6b99878f 100644 --- a/go/vt/topotools/split.go +++ b/go/vt/topotools/split.go @@ -19,9 +19,6 @@ package topotools import ( "errors" "fmt" - "sort" - - "context" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -77,185 +74,3 @@ func combineKeyRanges(shards []*topo.ShardInfo) (*topodatapb.KeyRange, error) { } return result, nil } - -// OverlappingShards contains sets of shards that overlap which each-other. -// With this library, there is no guarantee of which set will be left or right. -type OverlappingShards struct { - Left []*topo.ShardInfo - Right []*topo.ShardInfo -} - -// ContainsShard returns true if either Left or Right lists contain -// the provided Shard. -func (os *OverlappingShards) ContainsShard(shardName string) bool { - for _, l := range os.Left { - if l.ShardName() == shardName { - return true - } - } - for _, r := range os.Right { - if r.ShardName() == shardName { - return true - } - } - return false -} - -// OverlappingShardsForShard returns the OverlappingShards object -// from the list that has he provided shard, or nil -func OverlappingShardsForShard(os []*OverlappingShards, shardName string) *OverlappingShards { - for _, o := range os { - if o.ContainsShard(shardName) { - return o - } - } - return nil -} - -// FindOverlappingShards will return an array of OverlappingShards -// for the provided keyspace. -// We do not support more than two overlapping shards (for instance, -// having 40-80, 40-60 and 40-50 in the same keyspace is not supported and -// will return an error). -// If shards don't perfectly overlap, they are not returned. -func FindOverlappingShards(ctx context.Context, ts *topo.Server, keyspace string) ([]*OverlappingShards, error) { - shardMap, err := ts.FindAllShardsInKeyspace(ctx, keyspace) - if err != nil { - return nil, err - } - - return findOverlappingShards(shardMap) -} - -// findOverlappingShards does the work for FindOverlappingShards but -// can be called on test data too. -func findOverlappingShards(shardMap map[string]*topo.ShardInfo) ([]*OverlappingShards, error) { - - var result []*OverlappingShards - - for len(shardMap) > 0 { - var left []*topo.ShardInfo - var right []*topo.ShardInfo - - // get the first value from the map, seed our left array with it - var name string - var si *topo.ShardInfo - for name, si = range shardMap { - break - } - left = append(left, si) - delete(shardMap, name) - - // keep adding entries until we have no more to add - for { - foundOne := false - - // try left to right - si := findIntersectingShard(shardMap, left) - if si != nil { - if intersect(si, right) { - return nil, fmt.Errorf("shard %v intersects with more than one shard, this is not supported", si.ShardName()) - } - foundOne = true - right = append(right, si) - } - - // try right to left - si = findIntersectingShard(shardMap, right) - if si != nil { - if intersect(si, left) { - return nil, fmt.Errorf("shard %v intersects with more than one shard, this is not supported", si.ShardName()) - } - foundOne = true - left = append(left, si) - } - - // we haven't found anything new, we're done - if !foundOne { - break - } - } - - // save what we found if it's good - if len(right) > 0 { - // sort both lists - sort.Sort(shardInfoList(left)) - sort.Sort(shardInfoList(right)) - - // we should not have holes on either side - hasHoles := false - for i := 0; i < len(left)-1; i++ { - if string(left[i].KeyRange.End) != string(left[i+1].KeyRange.Start) { - hasHoles = true - } - } - for i := 0; i < len(right)-1; i++ { - if string(right[i].KeyRange.End) != string(right[i+1].KeyRange.Start) { - hasHoles = true - } - } - if hasHoles { - continue - } - - // the two sides should match - if !key.KeyRangeStartEqual(left[0].KeyRange, right[0].KeyRange) { - continue - } - if !key.KeyRangeEndEqual(left[len(left)-1].KeyRange, right[len(right)-1].KeyRange) { - continue - } - - // all good, we have a valid overlap - result = append(result, &OverlappingShards{ - Left: left, - Right: right, - }) - } - } - return result, nil -} - -// findIntersectingShard will go through the map and take the first -// entry in there that intersect with the source array, remove it from -// the map, and return it -func findIntersectingShard(shardMap map[string]*topo.ShardInfo, sourceArray []*topo.ShardInfo) *topo.ShardInfo { - for name, si := range shardMap { - for _, sourceShardInfo := range sourceArray { - if si.KeyRange == nil || sourceShardInfo.KeyRange == nil || key.KeyRangeIntersect(si.KeyRange, sourceShardInfo.KeyRange) { - delete(shardMap, name) - return si - } - } - } - return nil -} - -// intersect returns true if the provided shard intersect with any shard -// in the destination array -func intersect(si *topo.ShardInfo, allShards []*topo.ShardInfo) bool { - for _, shard := range allShards { - if key.KeyRangeIntersect(si.KeyRange, shard.KeyRange) { - return true - } - } - return false -} - -// shardInfoList is a helper type to sort ShardInfo array by keyrange -type shardInfoList []*topo.ShardInfo - -// Len is part of sort.Interface -func (sil shardInfoList) Len() int { - return len(sil) -} - -// Less is part of sort.Interface -func (sil shardInfoList) Less(i, j int) bool { - return string(sil[i].KeyRange.Start) < string(sil[j].KeyRange.Start) -} - -// Swap is part of sort.Interface -func (sil shardInfoList) Swap(i, j int) { - sil[i], sil[j] = sil[j], sil[i] -} diff --git a/go/vt/topotools/split_test.go b/go/vt/topotools/split_test.go index 003dc767317..6e93ee345d3 100644 --- a/go/vt/topotools/split_test.go +++ b/go/vt/topotools/split_test.go @@ -17,7 +17,6 @@ limitations under the License. package topotools import ( - "encoding/hex" "testing" "github.com/stretchr/testify/assert" @@ -27,75 +26,6 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -// helper methods for tests to be shorter - -func hki(hexValue string) []byte { - k, err := hex.DecodeString(hexValue) - if err != nil { - panic(err) - } - return k -} - -func si(start, end string) *topo.ShardInfo { - s := hki(start) - e := hki(end) - return topo.NewShardInfo("keyspace", start+"-"+end, &topodatapb.Shard{ - KeyRange: &topodatapb.KeyRange{ - Start: s, - End: e, - }, - }, nil) -} - -type expectedOverlappingShard struct { - left []string - right []string -} - -func overlappingShardMatch(ol []*topo.ShardInfo, or []*topo.ShardInfo, e expectedOverlappingShard) bool { - if len(ol)+1 != len(e.left) { - return false - } - if len(or)+1 != len(e.right) { - return false - } - for i, l := range ol { - if l.ShardName() != e.left[i]+"-"+e.left[i+1] { - return false - } - } - for i, r := range or { - if r.ShardName() != e.right[i]+"-"+e.right[i+1] { - return false - } - } - return true -} - -func compareResultLists(t *testing.T, os []*OverlappingShards, expected []expectedOverlappingShard) { - if len(os) != len(expected) { - t.Errorf("Unexpected result length, got %v, want %v", len(os), len(expected)) - return - } - - for _, o := range os { - found := false - for _, e := range expected { - if overlappingShardMatch(o.Left, o.Right, e) { - found = true - } - if overlappingShardMatch(o.Right, o.Left, e) { - found = true - } - } - if !found { - t.Errorf("OverlappingShard %v not found in expected %v", o, expected) - return - } - } -} - func TestValidateForReshard(t *testing.T) { testcases := []struct { sources []string @@ -169,191 +99,3 @@ func TestValidateForReshard(t *testing.T) { } } } - -func TestFindOverlappingShardsNoOverlap(t *testing.T) { - var shardMap map[string]*topo.ShardInfo - var os []*OverlappingShards - var err error - - // no shards - shardMap = map[string]*topo.ShardInfo{} - os, err = findOverlappingShards(shardMap) - if len(os) != 0 || err != nil { - t.Errorf("empty shard map: %v %v", os, err) - } - - // just one shard, full keyrange - shardMap = map[string]*topo.ShardInfo{ - "0": {}, - } - os, err = findOverlappingShards(shardMap) - if len(os) != 0 || err != nil { - t.Errorf("just one shard, full keyrange: %v %v", os, err) - } - - // just one shard, partial keyrange - shardMap = map[string]*topo.ShardInfo{ - "-80": si("", "80"), - } - os, err = findOverlappingShards(shardMap) - if len(os) != 0 || err != nil { - t.Errorf("just one shard, partial keyrange: %v %v", os, err) - } - - // two non-overlapping shards - shardMap = map[string]*topo.ShardInfo{ - "-80": si("", "80"), - "80": si("80", ""), - } - os, err = findOverlappingShards(shardMap) - if len(os) != 0 || err != nil { - t.Errorf("two non-overlapping shards: %v %v", os, err) - } - - // shards with holes - shardMap = map[string]*topo.ShardInfo{ - "-80": si("", "80"), - "80": si("80", ""), - "-20": si("", "20"), - // HOLE: "20-40": si("20", "40"), - "40-60": si("40", "60"), - "60-80": si("60", "80"), - } - os, err = findOverlappingShards(shardMap) - if len(os) != 0 || err != nil { - t.Errorf("shards with holes: %v %v", os, err) - } - - // shards not overlapping - shardMap = map[string]*topo.ShardInfo{ - "-80": si("", "80"), - "80": si("80", ""), - // MISSING: "-20": si("", "20"), - "20-40": si("20", "40"), - "40-60": si("40", "60"), - "60-80": si("60", "80"), - } - os, err = findOverlappingShards(shardMap) - if len(os) != 0 || err != nil { - t.Errorf("shards not overlapping: %v %v", os, err) - } -} - -func TestFindOverlappingShardsOverlap(t *testing.T) { - var shardMap map[string]*topo.ShardInfo - var os []*OverlappingShards - var err error - - // split in progress - shardMap = map[string]*topo.ShardInfo{ - "-80": si("", "80"), - "80": si("80", ""), - "-40": si("", "40"), - "40-80": si("40", "80"), - } - os, err = findOverlappingShards(shardMap) - if len(os) != 1 || err != nil { - t.Errorf("split in progress: %v %v", os, err) - } - compareResultLists(t, os, []expectedOverlappingShard{ - { - left: []string{"", "80"}, - right: []string{"", "40", "80"}, - }, - }) - - // 1 to 4 split - shardMap = map[string]*topo.ShardInfo{ - "-": si("", ""), - "-40": si("", "40"), - "40-80": si("40", "80"), - "80-c0": si("80", "c0"), - "c0-": si("c0", ""), - } - os, err = findOverlappingShards(shardMap) - if len(os) != 1 || err != nil { - t.Errorf("1 to 4 split: %v %v", os, err) - } - compareResultLists(t, os, []expectedOverlappingShard{ - { - left: []string{"", ""}, - right: []string{"", "40", "80", "c0", ""}, - }, - }) - - // 2 to 3 split - shardMap = map[string]*topo.ShardInfo{ - "-40": si("", "40"), - "40-80": si("40", "80"), - "80-": si("80", ""), - "-30": si("", "30"), - "30-60": si("30", "60"), - "60-80": si("60", "80"), - } - os, err = findOverlappingShards(shardMap) - if len(os) != 1 || err != nil { - t.Errorf("2 to 3 split: %v %v", os, err) - } - compareResultLists(t, os, []expectedOverlappingShard{ - { - left: []string{"", "40", "80"}, - right: []string{"", "30", "60", "80"}, - }, - }) - - // multiple concurrent splits - shardMap = map[string]*topo.ShardInfo{ - "-80": si("", "80"), - "80-": si("80", ""), - "-40": si("", "40"), - "40-80": si("40", "80"), - "80-c0": si("80", "c0"), - "c0-": si("c0", ""), - } - os, err = findOverlappingShards(shardMap) - if len(os) != 2 || err != nil { - t.Errorf("2 to 3 split: %v %v", os, err) - } - compareResultLists(t, os, []expectedOverlappingShard{ - { - left: []string{"", "80"}, - right: []string{"", "40", "80"}, - }, - { - left: []string{"80", ""}, - right: []string{"80", "c0", ""}, - }, - }) - - // find a shard in there - if o := OverlappingShardsForShard(os, "-60"); o != nil { - t.Errorf("Found a shard where I shouldn't have!") - } - if o := OverlappingShardsForShard(os, "-40"); o == nil { - t.Errorf("Found no shard where I should have!") - } else { - compareResultLists(t, []*OverlappingShards{o}, - []expectedOverlappingShard{ - { - left: []string{"", "80"}, - right: []string{"", "40", "80"}, - }, - }) - } -} - -func TestFindOverlappingShardsErrors(t *testing.T) { - var shardMap map[string]*topo.ShardInfo - var err error - - // 3 overlapping shards - shardMap = map[string]*topo.ShardInfo{ - "-20": si("", "20"), - "-40": si("", "40"), - "-80": si("", "80"), - } - _, err = findOverlappingShards(shardMap) - if err == nil { - t.Errorf("3 overlapping shards with no error") - } -} diff --git a/go/vt/topotools/tablet.go b/go/vt/topotools/tablet.go index 8bbca4b8c03..397af9ddf7c 100644 --- a/go/vt/topotools/tablet.go +++ b/go/vt/topotools/tablet.go @@ -45,7 +45,6 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" - querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/proto/vttime" @@ -127,7 +126,7 @@ func DoCellsHaveRdonlyTablets(ctx context.Context, ts *topo.Server, cells []stri } for _, cell := range cells { - tablets, err := ts.GetTabletsByCell(ctx, cell) + tablets, err := ts.GetTabletsByCell(ctx, cell, nil) if err != nil { return false, err } @@ -241,11 +240,6 @@ func TabletIdent(tablet *topodatapb.Tablet) string { return fmt.Sprintf("%s-%d (%s%s)", tablet.Alias.Cell, tablet.Alias.Uid, tablet.Hostname, tagStr) } -// TargetIdent returns a concise string representation of a query target -func TargetIdent(target *querypb.Target) string { - return fmt.Sprintf("%s/%s (%s)", target.Keyspace, target.Shard, target.TabletType) -} - // TabletEquality returns true iff two Tablets are identical for testing purposes func TabletEquality(left, right *topodatapb.Tablet) bool { if left.Keyspace != right.Keyspace { diff --git a/go/vt/topotools/utils.go b/go/vt/topotools/utils.go index 6b618383a1e..ae70b299bdd 100644 --- a/go/vt/topotools/utils.go +++ b/go/vt/topotools/utils.go @@ -17,10 +17,8 @@ limitations under the License. package topotools import ( - "reflect" - "sync" - "context" + "sync" "vitess.io/vitess/go/vt/topo" @@ -43,7 +41,7 @@ func GetTabletMapForCell(ctx context.Context, ts *topo.Server, cell string) (map if err != nil { return nil, err } - tabletMap, err := ts.GetTabletMap(ctx, aliases) + tabletMap, err := ts.GetTabletMap(ctx, aliases, nil) if err != nil { // we got another error than topo.ErrNoNode return nil, err @@ -65,7 +63,7 @@ func GetAllTabletsAcrossCells(ctx context.Context, ts *topo.Server) ([]*topo.Tab wg.Add(len(cells)) for i, cell := range cells { go func(i int, cell string) { - results[i], errors[i] = ts.GetTabletsByCell(ctx, cell) + results[i], errors[i] = ts.GetTabletsByCell(ctx, cell, nil) wg.Done() }(i, cell) } @@ -101,37 +99,3 @@ func SortedTabletMap(tabletMap map[string]*topo.TabletInfo) (map[string]*topo.Ta } return replicaMap, primaryMap } - -// CopyMapKeys copies keys from map m into a new slice with the -// type specified by typeHint. Reflection can't make a new slice type -// just based on the key type AFAICT. -func CopyMapKeys(m any, typeHint any) any { - mapVal := reflect.ValueOf(m) - keys := reflect.MakeSlice(reflect.TypeOf(typeHint), 0, mapVal.Len()) - for _, k := range mapVal.MapKeys() { - keys = reflect.Append(keys, k) - } - return keys.Interface() -} - -// CopyMapValues copies values from map m into a new slice with the -// type specified by typeHint. Reflection can't make a new slice type -// just based on the key type AFAICT. -func CopyMapValues(m any, typeHint any) any { - mapVal := reflect.ValueOf(m) - vals := reflect.MakeSlice(reflect.TypeOf(typeHint), 0, mapVal.Len()) - for _, k := range mapVal.MapKeys() { - vals = reflect.Append(vals, mapVal.MapIndex(k)) - } - return vals.Interface() -} - -// MapKeys returns an array with th provided map keys. -func MapKeys(m any) []any { - keys := make([]any, 0, 16) - mapVal := reflect.ValueOf(m) - for _, kv := range mapVal.MapKeys() { - keys = append(keys, kv.Interface()) - } - return keys -} diff --git a/go/vt/topotools/vschema_ddl.go b/go/vt/topotools/vschema_ddl.go index ff4d9f4ad04..3c6f5bced3c 100644 --- a/go/vt/topotools/vschema_ddl.go +++ b/go/vt/topotools/vschema_ddl.go @@ -124,7 +124,7 @@ func ApplyVSchemaDDL(ksName string, ks *vschemapb.Keyspace, alterVschema *sqlpar // already exists. spec := alterVschema.VindexSpec name := spec.Name.String() - if !spec.Type.IsEmpty() { + if spec.Type.NotEmpty() { owner, params := spec.ParseParams() if vindex, ok := ks.Vindexes[name]; ok { if vindex.Type != spec.Type.String() { diff --git a/go/vt/vitessdriver/rows.go b/go/vt/vitessdriver/rows.go index a2438bb891c..1af88e64ec3 100644 --- a/go/vt/vitessdriver/rows.go +++ b/go/vt/vitessdriver/rows.go @@ -119,3 +119,80 @@ func (ri *rows) ColumnTypeScanType(index int) reflect.Type { return typeUnknown } } + +func (ri *rows) ColumnTypeDatabaseTypeName(index int) string { + field := ri.qr.Fields[index] + switch field.GetType() { + case query.Type_INT8: + return "TINYINT" + case query.Type_UINT8: + return "UNSIGNED TINYINT" + case query.Type_INT16: + return "SMALLINT" + case query.Type_UINT16: + return "UNSIGNED SMALLINT" + case query.Type_YEAR: + return "YEAR" + case query.Type_INT24: + return "MEDIUMINT" + case query.Type_UINT24: + return "UNSIGNED MEDIUMINT" + case query.Type_INT32: + return "INT" + case query.Type_UINT32: + return "UNSIGNED INT" + case query.Type_INT64: + return "BIGINT" + case query.Type_UINT64: + return "UNSIGNED BIGINT" + case query.Type_FLOAT32: + return "FLOAT" + case query.Type_FLOAT64: + return "DOUBLE" + case query.Type_DECIMAL: + return "DECIMAL" + case query.Type_VARCHAR: + return "VARCHAR" + case query.Type_TEXT: + return "TEXT" + case query.Type_BLOB: + return "BLOB" + case query.Type_VARBINARY: + return "VARBINARY" + case query.Type_CHAR: + return "CHAR" + case query.Type_BINARY: + return "BINARY" + case query.Type_BIT: + return "BIT" + case query.Type_ENUM: + return "ENUM" + case query.Type_SET: + return "SET" + case query.Type_HEXVAL: + return "VARBINARY" + case query.Type_HEXNUM: + return "VARBINARY" + case query.Type_BITNUM: + return "VARBINARY" + case query.Type_GEOMETRY: + return "GEOMETRY" + case query.Type_JSON: + return "JSON" + case query.Type_TIMESTAMP: + return "TIMESTAMP" + case query.Type_DATE: + return "DATE" + case query.Type_TIME: + return "TIME" + case query.Type_DATETIME: + return "DATETIME" + default: + return "" + } +} + +func (ri *rows) ColumnTypeNullable(index int) (nullable, ok bool) { + field := ri.qr.Fields[index] + return field.GetFlags()&uint32(query.MySqlFlag_NOT_NULL_FLAG) == 0, true +} diff --git a/go/vt/vitessdriver/rows_test.go b/go/vt/vitessdriver/rows_test.go index 13584e70dd8..bb196da30c3 100644 --- a/go/vt/vitessdriver/rows_test.go +++ b/go/vt/vitessdriver/rows_test.go @@ -226,3 +226,123 @@ func TestColumnTypeScanType(t *testing.T) { assert.Equal(t, ri.ColumnTypeScanType(i), wantTypes[i], fmt.Sprintf("unexpected type %v, wanted %v", ri.ColumnTypeScanType(i), wantTypes[i])) } } + +// Test that the ColumnTypeScanType function returns the correct reflection type for each +// sql type. The sql type in turn comes from a table column's type. +func TestColumnTypeDatabaseTypeName(t *testing.T) { + var r = sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "field1", + Type: sqltypes.Int8, + }, + { + Name: "field2", + Type: sqltypes.Uint8, + }, + { + Name: "field3", + Type: sqltypes.Int16, + }, + { + Name: "field4", + Type: sqltypes.Uint16, + }, + { + Name: "field5", + Type: sqltypes.Int24, + }, + { + Name: "field6", + Type: sqltypes.Uint24, + }, + { + Name: "field7", + Type: sqltypes.Int32, + }, + { + Name: "field8", + Type: sqltypes.Uint32, + }, + { + Name: "field9", + Type: sqltypes.Int64, + }, + { + Name: "field10", + Type: sqltypes.Uint64, + }, + { + Name: "field11", + Type: sqltypes.Float32, + }, + { + Name: "field12", + Type: sqltypes.Float64, + }, + { + Name: "field13", + Type: sqltypes.VarBinary, + }, + { + Name: "field14", + Type: sqltypes.Datetime, + }, + }, + } + + ri := newRows(&r, &converter{}).(driver.RowsColumnTypeDatabaseTypeName) + defer ri.Close() + + wantTypes := []string{ + "TINYINT", + "UNSIGNED TINYINT", + "SMALLINT", + "UNSIGNED SMALLINT", + "MEDIUMINT", + "UNSIGNED MEDIUMINT", + "INT", + "UNSIGNED INT", + "BIGINT", + "UNSIGNED BIGINT", + "FLOAT", + "DOUBLE", + "VARBINARY", + "DATETIME", + } + + for i := 0; i < len(wantTypes); i++ { + assert.Equal(t, ri.ColumnTypeDatabaseTypeName(i), wantTypes[i], fmt.Sprintf("unexpected type %v, wanted %v", ri.ColumnTypeDatabaseTypeName(i), wantTypes[i])) + } +} + +// Test that the ColumnTypeScanType function returns the correct reflection type for each +// sql type. The sql type in turn comes from a table column's type. +func TestColumnTypeNullable(t *testing.T) { + var r = sqltypes.Result{ + Fields: []*querypb.Field{ + { + Name: "field1", + Type: sqltypes.Int64, + Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG), + }, + { + Name: "field2", + Type: sqltypes.Int64, + }, + }, + } + + ri := newRows(&r, &converter{}).(driver.RowsColumnTypeNullable) + defer ri.Close() + + nullable := []bool{ + false, + true, + } + + for i := 0; i < len(nullable); i++ { + null, _ := ri.ColumnTypeNullable(i) + assert.Equal(t, null, nullable[i], fmt.Sprintf("unexpected type %v, wanted %v", null, nullable[i])) + } +} diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index 92d11ba18ea..1e83875de35 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -32,11 +32,15 @@ import ( "github.com/gorilla/mux" "github.com/patrickmn/go-cache" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/sets" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin/cluster" "vitess.io/vitess/go/vt/vtadmin/cluster/dynamic" @@ -77,6 +81,8 @@ type API struct { // vtexplain is now global again due to stat exporters in the tablet layer // we're not super concerned because we will be deleting vtexplain Soon(TM). vtexplainLock sync.Mutex + + env *vtenv.Environment } // Options wraps the configuration options for different components of the @@ -92,7 +98,7 @@ type Options struct { // NewAPI returns a new API, configured to service the given set of clusters, // and configured with the given options. -func NewAPI(clusters []*cluster.Cluster, opts Options) *API { +func NewAPI(env *vtenv.Environment, clusters []*cluster.Cluster, opts Options) *API { clusterMap := make(map[string]*cluster.Cluster, len(clusters)) for _, cluster := range clusters { clusterMap[cluster.ID] = cluster @@ -138,6 +144,7 @@ func NewAPI(clusters []*cluster.Cluster, opts Options) *API { clusters: clusters, clusterMap: clusterMap, authz: authz, + env: env, } if opts.EnableDynamicClusters { @@ -296,6 +303,7 @@ func (api *API) WithCluster(c *cluster.Cluster, id string) dynamic.API { serv: api.serv, authz: api.authz, options: api.options, + env: api.env, } if c != nil { @@ -366,6 +374,13 @@ func (api *API) Handler() http.Handler { router.HandleFunc("/keyspace/{cluster_id}/{name}/validate/schema", httpAPI.Adapt(vtadminhttp.ValidateSchemaKeyspace)).Name("API.ValidateSchemaKeyspace").Methods("PUT", "OPTIONS") router.HandleFunc("/keyspace/{cluster_id}/{name}/validate/version", httpAPI.Adapt(vtadminhttp.ValidateVersionKeyspace)).Name("API.ValidateVersionKeyspace").Methods("PUT", "OPTIONS") router.HandleFunc("/keyspaces", httpAPI.Adapt(vtadminhttp.GetKeyspaces)).Name("API.GetKeyspaces") + router.HandleFunc("/migration/{cluster_id}/{keyspace}", httpAPI.Adapt(vtadminhttp.ApplySchema)).Name("API.ApplySchema").Methods("POST") + router.HandleFunc("/migration/{cluster_id}/{keyspace}/cancel", httpAPI.Adapt(vtadminhttp.CancelSchemaMigration)).Name("API.CancelSchemaMigration").Methods("PUT", "OPTIONS") + router.HandleFunc("/migration/{cluster_id}/{keyspace}/cleanup", httpAPI.Adapt(vtadminhttp.CleanupSchemaMigration)).Name("API.CleanupSchemaMigration").Methods("PUT", "OPTIONS") + router.HandleFunc("/migration/{cluster_id}/{keyspace}/complete", httpAPI.Adapt(vtadminhttp.CompleteSchemaMigration)).Name("API.CompleteSchemaMigration").Methods("PUT", "OPTIONS") + router.HandleFunc("/migration/{cluster_id}/{keyspace}/launch", httpAPI.Adapt(vtadminhttp.LaunchSchemaMigration)).Name("API.LaunchSchemaMigration").Methods("PUT", "OPTIONS") + router.HandleFunc("/migration/{cluster_id}/{keyspace}/retry", httpAPI.Adapt(vtadminhttp.RetrySchemaMigration)).Name("API.RetrySchemaMigration").Methods("PUT", "OPTIONS") + router.HandleFunc("/migrations/", httpAPI.Adapt(vtadminhttp.GetSchemaMigrations)).Name("API.GetSchemaMigrations") router.HandleFunc("/schema/{table}", httpAPI.Adapt(vtadminhttp.FindSchema)).Name("API.FindSchema") router.HandleFunc("/schema/{cluster_id}/{keyspace}/{table}", httpAPI.Adapt(vtadminhttp.GetSchema)).Name("API.GetSchema") router.HandleFunc("/schemas", httpAPI.Adapt(vtadminhttp.GetSchemas)).Name("API.GetSchemas") @@ -432,6 +447,82 @@ func (api *API) EjectDynamicCluster(key string, value any) { api.clusters = append(api.clusters[:clusterIndex], api.clusters[clusterIndex+1:]...) } +// ApplySchema is part of the vtadminpb.VTAdminServer interface. +func (api *API) ApplySchema(ctx context.Context, req *vtadminpb.ApplySchemaRequest) (*vtctldatapb.ApplySchemaResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.ApplySchema") + defer span.Finish() + + span.Annotate("cluster_id", req.ClusterId) + + if !api.authz.IsAuthorized(ctx, req.ClusterId, rbac.SchemaMigrationResource, rbac.CreateAction) { + return nil, fmt.Errorf("%w: cannot create schema migration in %s", errors.ErrUnauthorized, req.ClusterId) + } + + c, err := api.getClusterForRequest(req.ClusterId) + if err != nil { + return nil, err + } + + return c.ApplySchema(ctx, req.Request) +} + +// CancelSchemaMigration is part of the vtadminpb.VTAdminServer interface. +func (api *API) CancelSchemaMigration(ctx context.Context, req *vtadminpb.CancelSchemaMigrationRequest) (*vtctldatapb.CancelSchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.CancelSchemaMigration") + defer span.Finish() + + span.Annotate("cluster_id", req.ClusterId) + + if !api.authz.IsAuthorized(ctx, req.ClusterId, rbac.SchemaMigrationResource, rbac.CancelAction) { + return nil, fmt.Errorf("%w: cannot cancel schema migration in %s", errors.ErrUnauthorized, req.ClusterId) + } + + c, err := api.getClusterForRequest(req.ClusterId) + if err != nil { + return nil, err + } + + return c.CancelSchemaMigration(ctx, req.Request) +} + +// CleanupSchemaMigration is part of the vtadminpb.VTAdminServer interface. +func (api *API) CleanupSchemaMigration(ctx context.Context, req *vtadminpb.CleanupSchemaMigrationRequest) (*vtctldatapb.CleanupSchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.CleanupSchemaMigration") + defer span.Finish() + + span.Annotate("cluster_id", req.ClusterId) + + if !api.authz.IsAuthorized(ctx, req.ClusterId, rbac.SchemaMigrationResource, rbac.CleanupSchemaMigrationAction) { + return nil, fmt.Errorf("%w: cannot cleanup schema migration in %s", errors.ErrUnauthorized, req.ClusterId) + } + + c, err := api.getClusterForRequest(req.ClusterId) + if err != nil { + return nil, err + } + + return c.CleanupSchemaMigration(ctx, req.Request) +} + +// CompleteSchemaMigration is part of the vtadminpb.VTAdminServer interface. +func (api *API) CompleteSchemaMigration(ctx context.Context, req *vtadminpb.CompleteSchemaMigrationRequest) (*vtctldatapb.CompleteSchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.CompleteSchemaMigration") + defer span.Finish() + + span.Annotate("cluster_id", req.ClusterId) + + if !api.authz.IsAuthorized(ctx, req.ClusterId, rbac.SchemaMigrationResource, rbac.CompleteSchemaMigrationAction) { + return nil, fmt.Errorf("%w: cannot complete schema migration in %s", errors.ErrUnauthorized, req.ClusterId) + } + + c, err := api.getClusterForRequest(req.ClusterId) + if err != nil { + return nil, err + } + + return c.CompleteSchemaMigration(ctx, req.Request) +} + // CreateKeyspace is part of the vtadminpb.VTAdminServer interface. func (api *API) CreateKeyspace(ctx context.Context, req *vtadminpb.CreateKeyspaceRequest) (*vtadminpb.CreateKeyspaceResponse, error) { span, ctx := trace.NewSpan(ctx, "API.CreateKeyspace") @@ -1015,6 +1106,113 @@ func (api *API) GetSchemas(ctx context.Context, req *vtadminpb.GetSchemasRequest }, nil } +// GetSchemaMigrations is part of the vtadminpb.VTAdminServer interface. +func (api *API) GetSchemaMigrations(ctx context.Context, req *vtadminpb.GetSchemaMigrationsRequest) (*vtadminpb.GetSchemaMigrationsResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.GetSchemaMigrations") + defer span.Finish() + + clusterIDs := make([]string, 0, len(req.ClusterRequests)) + requestsByCluster := make(map[string][]*vtctldatapb.GetSchemaMigrationsRequest, len(req.ClusterRequests)) + + for _, r := range req.ClusterRequests { + clusterIDs = append(clusterIDs, r.ClusterId) + requestsByCluster[r.ClusterId] = append(requestsByCluster[r.ClusterId], r.Request) + } + + clusters, _ := api.getClustersForRequest(clusterIDs) + + var ( + m sync.Mutex + wg sync.WaitGroup + rec concurrency.AllErrorRecorder + results = make([]*vtadminpb.SchemaMigration, 0, len(req.ClusterRequests)) + ) + + m.Lock() + for _, c := range clusters { + if len(requestsByCluster[c.ID]) == 0 { + wg.Add(1) + go func(ctx context.Context, c *cluster.Cluster) { + defer wg.Done() + + span, ctx := trace.NewSpan(ctx, "API.getClusterKeyspaces") + defer span.Finish() + + span.Annotate("cluster_id", c.ID) + + if !api.authz.IsAuthorized(ctx, c.ID, rbac.SchemaMigrationResource, rbac.GetAction) { + return + } + + keyspaces, err := c.GetKeyspaces(ctx) + if err != nil { + rec.RecordError(err) + return + } + + m.Lock() + defer m.Unlock() + + for _, ks := range keyspaces { + requestsByCluster[c.ID] = append(requestsByCluster[c.ID], &vtctldatapb.GetSchemaMigrationsRequest{ + Keyspace: ks.Keyspace.Name, + }) + } + }(ctx, c) + } + } + m.Unlock() + + wg.Wait() + if rec.HasErrors() { + return nil, rec.Error() + } + + for _, c := range clusters { + if requestsByCluster[c.ID] == nil { + continue + } + + for _, r := range requestsByCluster[c.ID] { + wg.Add(1) + + go func(ctx context.Context, c *cluster.Cluster, r *vtctldatapb.GetSchemaMigrationsRequest) { + defer wg.Done() + + span, ctx := trace.NewSpan(ctx, "API.getClusterSchemaMigrations") + defer span.Finish() + + span.Annotate("cluster_id", c.ID) + + if !api.authz.IsAuthorized(ctx, c.ID, rbac.SchemaMigrationResource, rbac.GetAction) { + return + } + + migrations, err := c.GetSchemaMigrations(ctx, r) + if err != nil { + rec.RecordError(err) + return + } + + m.Lock() + defer m.Unlock() + + results = append(results, migrations...) + }(ctx, c, r) + } + } + + wg.Wait() + + if rec.HasErrors() { + return nil, rec.Error() + } + + return &vtadminpb.GetSchemaMigrationsResponse{ + SchemaMigrations: results, + }, nil +} + // GetShardReplicationPositions is part of the vtadminpb.VTAdminServer interface. func (api *API) GetShardReplicationPositions(ctx context.Context, req *vtadminpb.GetShardReplicationPositionsRequest) (*vtadminpb.GetShardReplicationPositionsResponse, error) { span, ctx := trace.NewSpan(ctx, "API.GetShardReplicationPositions") @@ -1515,6 +1713,25 @@ func (api *API) GetWorkflows(ctx context.Context, req *vtadminpb.GetWorkflowsReq }, nil } +// LaunchSchemaMigration is part of the vtadminpb.VTAdminServer interface. +func (api *API) LaunchSchemaMigration(ctx context.Context, req *vtadminpb.LaunchSchemaMigrationRequest) (*vtctldatapb.LaunchSchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.LaunchSchemaMigration") + defer span.Finish() + + span.Annotate("cluster_id", req.ClusterId) + + if !api.authz.IsAuthorized(ctx, req.ClusterId, rbac.SchemaMigrationResource, rbac.LaunchSchemaMigrationAction) { + return nil, fmt.Errorf("%w: cannot launch schema migration in %s", errors.ErrUnauthorized, req.ClusterId) + } + + c, err := api.getClusterForRequest(req.ClusterId) + if err != nil { + return nil, err + } + + return c.LaunchSchemaMigration(ctx, req.Request) +} + // PingTablet is part of the vtadminpb.VTAdminServer interface. func (api *API) PingTablet(ctx context.Context, req *vtadminpb.PingTabletRequest) (*vtadminpb.PingTabletResponse, error) { span, ctx := trace.NewSpan(ctx, "API.PingTablet") @@ -1722,6 +1939,25 @@ func (api *API) ReloadSchemaShard(ctx context.Context, req *vtadminpb.ReloadSche }, nil } +// RetrySchemaMigration is part of the vtadminpb.VTAdminServer interface. +func (api *API) RetrySchemaMigration(ctx context.Context, req *vtadminpb.RetrySchemaMigrationRequest) (*vtctldatapb.RetrySchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "API.RetrySchemaMigration") + defer span.Finish() + + span.Annotate("cluster_id", req.ClusterId) + + if !api.authz.IsAuthorized(ctx, req.ClusterId, rbac.SchemaMigrationResource, rbac.RetryAction) { + return nil, fmt.Errorf("%w: cannot retry schema migration in %s", errors.ErrUnauthorized, req.ClusterId) + } + + c, err := api.getClusterForRequest(req.ClusterId) + if err != nil { + return nil, err + } + + return c.RetrySchemaMigration(ctx, req.Request) +} + // RunHealthCheck is part of the vtadminpb.VTAdminServer interface. func (api *API) RunHealthCheck(ctx context.Context, req *vtadminpb.RunHealthCheckRequest) (*vtadminpb.RunHealthCheckResponse, error) { span, ctx := trace.NewSpan(ctx, "API.RunHealthCheck") @@ -2148,7 +2384,9 @@ func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) return nil, er.Error() } - vte, err := vtexplain.Init(ctx, srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}) + ts := memorytopo.NewServer(ctx, vtexplain.Cell) + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + vte, err := vtexplain.Init(ctx, api.env, ts, srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}, srvTopoCounts) if err != nil { return nil, fmt.Errorf("error initilaizing vtexplain: %w", err) } diff --git a/go/vt/vtadmin/api_authz_test.go b/go/vt/vtadmin/api_authz_test.go index eb67757a1c1..94a8befd473 100644 --- a/go/vt/vtadmin/api_authz_test.go +++ b/go/vt/vtadmin/api_authz_test.go @@ -33,6 +33,7 @@ import ( "vitess.io/vitess/go/vt/vtadmin/rbac" "vitess.io/vitess/go/vt/vtadmin/testutil" "vitess.io/vitess/go/vt/vtadmin/vtctldclient/fakevtctldclient" + "vitess.io/vitess/go/vt/vtenv" logutilpb "vitess.io/vitess/go/vt/proto/logutil" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" @@ -43,6 +44,266 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) +func TestApplySchema(t *testing.T) { + t.Parallel() + + opts := vtadmin.Options{ + RBAC: &rbac.Config{ + Rules: []*struct { + Resource string + Actions []string + Subjects []string + Clusters []string + }{ + { + Resource: "SchemaMigration", + Actions: []string{"create"}, + Subjects: []string{"user:allowed"}, + Clusters: []string{"*"}, + }, + }, + }, + } + err := opts.RBAC.Reify() + require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) + + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) + } + }) + + t.Run("unauthorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "other"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.ApplySchema(ctx, &vtadminpb.ApplySchemaRequest{ + ClusterId: "test", + Request: &vtctldatapb.ApplySchemaRequest{ + Keyspace: "test", + }, + }) + assert.Error(t, err, "actor %+v should not be permitted to ApplySchema", actor) + assert.Nil(t, resp, "actor %+v should not be permitted to ApplySchema", actor) + }) + + t.Run("authorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "allowed"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.ApplySchema(ctx, &vtadminpb.ApplySchemaRequest{ + ClusterId: "test", + Request: &vtctldatapb.ApplySchemaRequest{ + Keyspace: "test", + }, + }) + require.NoError(t, err) + assert.NotNil(t, resp, "actor %+v should be permitted to ApplySchema", actor) + }) +} + +func TestCancelSchemaMigration(t *testing.T) { + t.Parallel() + + opts := vtadmin.Options{ + RBAC: &rbac.Config{ + Rules: []*struct { + Resource string + Actions []string + Subjects []string + Clusters []string + }{ + { + Resource: "SchemaMigration", + Actions: []string{"cancel"}, + Subjects: []string{"user:allowed"}, + Clusters: []string{"*"}, + }, + }, + }, + } + err := opts.RBAC.Reify() + require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) + + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) + } + }) + + t.Run("unauthorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "other"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.CancelSchemaMigration(ctx, &vtadminpb.CancelSchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: "test", + }, + }) + assert.Error(t, err, "actor %+v should not be permitted to CancelSchemaMigration", actor) + assert.Nil(t, resp, "actor %+v should not be permitted to CancelSchemaMigration", actor) + }) + + t.Run("authorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "allowed"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.CancelSchemaMigration(ctx, &vtadminpb.CancelSchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: "test", + }, + }) + require.NoError(t, err) + assert.NotNil(t, resp, "actor %+v should be permitted to CancelSchemaMigration", actor) + }) +} + +func TestCleanupSchemaMigration(t *testing.T) { + t.Parallel() + + opts := vtadmin.Options{ + RBAC: &rbac.Config{ + Rules: []*struct { + Resource string + Actions []string + Subjects []string + Clusters []string + }{ + { + Resource: "SchemaMigration", + Actions: []string{"cleanup_schema_migration"}, + Subjects: []string{"user:allowed"}, + Clusters: []string{"*"}, + }, + }, + }, + } + err := opts.RBAC.Reify() + require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) + + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) + } + }) + + t.Run("unauthorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "other"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.CleanupSchemaMigration(ctx, &vtadminpb.CleanupSchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: "test", + }, + }) + assert.Error(t, err, "actor %+v should not be permitted to CleanupSchemaMigration", actor) + assert.Nil(t, resp, "actor %+v should not be permitted to CleanupSchemaMigration", actor) + }) + + t.Run("authorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "allowed"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.CleanupSchemaMigration(ctx, &vtadminpb.CleanupSchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: "test", + }, + }) + require.NoError(t, err) + assert.NotNil(t, resp, "actor %+v should be permitted to CleanupSchemaMigration", actor) + }) +} + +func TestCompleteSchemaMigration(t *testing.T) { + t.Parallel() + + opts := vtadmin.Options{ + RBAC: &rbac.Config{ + Rules: []*struct { + Resource string + Actions []string + Subjects []string + Clusters []string + }{ + { + Resource: "SchemaMigration", + Actions: []string{"complete_schema_migration"}, + Subjects: []string{"user:allowed"}, + Clusters: []string{"*"}, + }, + }, + }, + } + err := opts.RBAC.Reify() + require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) + + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) + } + }) + + t.Run("unauthorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "other"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.CompleteSchemaMigration(ctx, &vtadminpb.CompleteSchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: "test", + }, + }) + assert.Error(t, err, "actor %+v should not be permitted to CompleteSchemaMigration", actor) + assert.Nil(t, resp, "actor %+v should not be permitted to CompleteSchemaMigration", actor) + }) + + t.Run("authorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "allowed"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.CompleteSchemaMigration(ctx, &vtadminpb.CompleteSchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: "test", + }, + }) + require.NoError(t, err) + assert.NotNil(t, resp, "actor %+v should be permitted to CompleteSchemaMigration", actor) + }) +} + func TestCreateKeyspace(t *testing.T) { t.Parallel() @@ -66,7 +327,7 @@ func TestCreateKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -78,9 +339,7 @@ func TestCreateKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.CreateKeyspace(ctx, &vtadminpb.CreateKeyspaceRequest{ ClusterId: "test", @@ -97,9 +356,7 @@ func TestCreateKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.CreateKeyspace(ctx, &vtadminpb.CreateKeyspaceRequest{ ClusterId: "test", @@ -135,7 +392,7 @@ func TestCreateShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -147,9 +404,7 @@ func TestCreateShard(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.CreateShard(ctx, &vtadminpb.CreateShardRequest{ ClusterId: "test", @@ -167,9 +422,7 @@ func TestCreateShard(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.CreateShard(ctx, &vtadminpb.CreateShardRequest{ ClusterId: "test", @@ -206,7 +459,7 @@ func TestDeleteKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -218,9 +471,7 @@ func TestDeleteKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.DeleteKeyspace(ctx, &vtadminpb.DeleteKeyspaceRequest{ ClusterId: "test", @@ -237,9 +488,7 @@ func TestDeleteKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.DeleteKeyspace(ctx, &vtadminpb.DeleteKeyspaceRequest{ ClusterId: "test", @@ -275,7 +524,7 @@ func TestDeleteShards(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -287,9 +536,7 @@ func TestDeleteShards(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.DeleteShards(ctx, &vtadminpb.DeleteShardsRequest{ ClusterId: "test", @@ -311,9 +558,7 @@ func TestDeleteShards(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.DeleteShards(ctx, &vtadminpb.DeleteShardsRequest{ ClusterId: "test", @@ -354,7 +599,7 @@ func TestDeleteTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -366,9 +611,7 @@ func TestDeleteTablet(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.DeleteTablet(ctx, &vtadminpb.DeleteTabletRequest{ ClusterIds: []string{"test"}, @@ -386,9 +629,7 @@ func TestDeleteTablet(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.DeleteTablet(ctx, &vtadminpb.DeleteTabletRequest{ ClusterIds: []string{"test"}, @@ -425,7 +666,7 @@ func TestEmergencyFailoverShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -437,9 +678,7 @@ func TestEmergencyFailoverShard(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.EmergencyFailoverShard(ctx, &vtadminpb.EmergencyFailoverShardRequest{ ClusterId: "test", @@ -457,9 +696,7 @@ func TestEmergencyFailoverShard(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.EmergencyFailoverShard(ctx, &vtadminpb.EmergencyFailoverShardRequest{ ClusterId: "test", @@ -505,7 +742,7 @@ func TestFindSchema(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -514,9 +751,7 @@ func TestFindSchema(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ Table: "t1", @@ -528,7 +763,7 @@ func TestFindSchema(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -537,9 +772,7 @@ func TestFindSchema(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ Table: "t1", @@ -550,7 +783,7 @@ func TestFindSchema(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -559,9 +792,7 @@ func TestFindSchema(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ Table: "t1", @@ -601,7 +832,7 @@ func TestGetBackups(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -613,9 +844,7 @@ func TestGetBackups(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetBackups(ctx, &vtadminpb.GetBackupsRequest{}) assert.NoError(t, err) @@ -627,9 +856,7 @@ func TestGetBackups(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetBackups(ctx, &vtadminpb.GetBackupsRequest{}) assert.NotEmpty(t, resp.Backups, "actor %+v should be permitted to GetBackups", actor) @@ -641,9 +868,7 @@ func TestGetBackups(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetBackups(ctx, &vtadminpb.GetBackupsRequest{}) assert.NotEmpty(t, resp.Backups, "actor %+v should be permitted to GetBackups", actor) @@ -680,7 +905,7 @@ func TestGetCellInfos(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -692,9 +917,7 @@ func TestGetCellInfos(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetCellInfos(ctx, &vtadminpb.GetCellInfosRequest{ NamesOnly: true, @@ -708,9 +931,7 @@ func TestGetCellInfos(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetCellInfos(ctx, &vtadminpb.GetCellInfosRequest{ NamesOnly: true, @@ -724,9 +945,7 @@ func TestGetCellInfos(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetCellInfos(ctx, &vtadminpb.GetCellInfosRequest{ NamesOnly: true, @@ -765,7 +984,7 @@ func TestGetCellsAliases(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -777,9 +996,7 @@ func TestGetCellsAliases(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetCellsAliases(ctx, &vtadminpb.GetCellsAliasesRequest{}) assert.NoError(t, err) @@ -791,9 +1008,7 @@ func TestGetCellsAliases(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetCellsAliases(ctx, &vtadminpb.GetCellsAliasesRequest{}) assert.NotEmpty(t, resp.Aliases, "actor %+v should be permitted to GetCellsAliases", actor) @@ -805,9 +1020,7 @@ func TestGetCellsAliases(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetCellsAliases(ctx, &vtadminpb.GetCellsAliasesRequest{}) assert.NotEmpty(t, resp.Aliases, "actor %+v should be permitted to GetCellsAliases", actor) @@ -838,7 +1051,7 @@ func TestGetClusters(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -850,9 +1063,7 @@ func TestGetClusters(t *testing.T) { var actor *rbac.Actor ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetClusters(ctx, &vtadminpb.GetClustersRequest{}) assert.Empty(t, resp.Clusters, "actor %+v should not be permitted to GetClusters", actor) @@ -863,9 +1074,7 @@ func TestGetClusters(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetClusters(ctx, &vtadminpb.GetClustersRequest{}) assert.Empty(t, resp.Clusters, "actor %+v should not be permitted to GetClusters", actor) @@ -876,9 +1085,7 @@ func TestGetClusters(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetClusters(ctx, &vtadminpb.GetClustersRequest{}) require.NoError(t, err) @@ -918,7 +1125,7 @@ func TestGetGates(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -927,9 +1134,7 @@ func TestGetGates(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) assert.NoError(t, err) @@ -939,7 +1144,7 @@ func TestGetGates(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -948,9 +1153,7 @@ func TestGetGates(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) assert.NotEmpty(t, resp.Gates, "actor %+v should be permitted to GetGates", actor) @@ -961,7 +1164,7 @@ func TestGetGates(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -970,9 +1173,7 @@ func TestGetGates(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) assert.NotEmpty(t, resp.Gates, "actor %+v should be permitted to GetGates", actor) @@ -1004,7 +1205,7 @@ func TestGetKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1016,9 +1217,7 @@ func TestGetKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetKeyspace(ctx, &vtadminpb.GetKeyspaceRequest{ ClusterId: "test", @@ -1033,20 +1232,105 @@ func TestGetKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.GetKeyspace(ctx, &vtadminpb.GetKeyspaceRequest{ + ClusterId: "test", + Keyspace: "test", + }) + require.NoError(t, err) + assert.NotNil(t, resp, "actor %+v should be permitted to GetKeyspace", actor) + }) +} + +func TestGetKeyspaces(t *testing.T) { + t.Parallel() + + opts := vtadmin.Options{ + RBAC: &rbac.Config{ + Rules: []*struct { + Resource string + Actions []string + Subjects []string + Clusters []string + }{ + { + Resource: "Keyspace", + Actions: []string{"get"}, + Subjects: []string{"user:allowed-all"}, + Clusters: []string{"*"}, + }, + { + Resource: "Keyspace", + Actions: []string{"get"}, + Subjects: []string{"user:allowed-other"}, + Clusters: []string{"other"}, + }, + }, + }, + } + err := opts.RBAC.Reify() + require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) + + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) } + }) + + t.Run("unauthorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "unauthorized"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.GetKeyspaces(ctx, &vtadminpb.GetKeyspacesRequest{}) + assert.NoError(t, err) + assert.Empty(t, resp.Keyspaces, "actor %+v should not be permitted to GetKeyspaces", actor) + }) + + t.Run("partial access", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "allowed-other"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, _ := api.GetKeyspaces(ctx, &vtadminpb.GetKeyspacesRequest{}) + assert.NotEmpty(t, resp.Keyspaces, "actor %+v should be permitted to GetKeyspaces", actor) + ksMap := map[string][]string{} + for _, ks := range resp.Keyspaces { + if _, ok := ksMap[ks.Cluster.Id]; !ok { + ksMap[ks.Cluster.Id] = []string{} + } + ksMap[ks.Cluster.Id] = append(ksMap[ks.Cluster.Id], ks.Keyspace.Name) + } + assert.Equal(t, ksMap, map[string][]string{"other": {"otherks"}}, "actor %+v should be permitted to GetKeyspaces", actor) + }) + + t.Run("full access", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "allowed-all"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) - resp, err := api.GetKeyspace(ctx, &vtadminpb.GetKeyspaceRequest{ - ClusterId: "test", - Keyspace: "test", - }) - require.NoError(t, err) - assert.NotNil(t, resp, "actor %+v should be permitted to GetKeyspace", actor) + resp, _ := api.GetKeyspaces(ctx, &vtadminpb.GetKeyspacesRequest{}) + assert.NotEmpty(t, resp.Keyspaces, "actor %+v should be permitted to GetKeyspaces", actor) + ksMap := map[string][]string{} + for _, ks := range resp.Keyspaces { + if _, ok := ksMap[ks.Cluster.Id]; !ok { + ksMap[ks.Cluster.Id] = []string{} + } + ksMap[ks.Cluster.Id] = append(ksMap[ks.Cluster.Id], ks.Keyspace.Name) + } + assert.Equal(t, ksMap, map[string][]string{"test": {"test"}, "other": {"otherks"}}, "actor %+v should be permitted to GetKeyspaces", actor) }) } -func TestGetKeyspaces(t *testing.T) { +func TestGetSchemaMigrations(t *testing.T) { t.Parallel() opts := vtadmin.Options{ @@ -1058,13 +1342,13 @@ func TestGetKeyspaces(t *testing.T) { Clusters []string }{ { - Resource: "Keyspace", + Resource: "SchemaMigration", Actions: []string{"get"}, Subjects: []string{"user:allowed-all"}, Clusters: []string{"*"}, }, { - Resource: "Keyspace", + Resource: "SchemaMigration", Actions: []string{"get"}, Subjects: []string{"user:allowed-other"}, Clusters: []string{"other"}, @@ -1075,7 +1359,7 @@ func TestGetKeyspaces(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1087,13 +1371,11 @@ func TestGetKeyspaces(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) - resp, err := api.GetKeyspaces(ctx, &vtadminpb.GetKeyspacesRequest{}) + resp, err := api.GetSchemaMigrations(ctx, &vtadminpb.GetSchemaMigrationsRequest{}) assert.NoError(t, err) - assert.Empty(t, resp.Keyspaces, "actor %+v should not be permitted to GetKeyspaces", actor) + assert.Empty(t, resp.SchemaMigrations, "actor %+v should not be permitted to GetSchemaMigrations", actor) }) t.Run("partial access", func(t *testing.T) { @@ -1101,20 +1383,11 @@ func TestGetKeyspaces(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) - resp, _ := api.GetKeyspaces(ctx, &vtadminpb.GetKeyspacesRequest{}) - assert.NotEmpty(t, resp.Keyspaces, "actor %+v should be permitted to GetKeyspaces", actor) - ksMap := map[string][]string{} - for _, ks := range resp.Keyspaces { - if _, ok := ksMap[ks.Cluster.Id]; !ok { - ksMap[ks.Cluster.Id] = []string{} - } - ksMap[ks.Cluster.Id] = append(ksMap[ks.Cluster.Id], ks.Keyspace.Name) - } - assert.Equal(t, ksMap, map[string][]string{"other": {"otherks"}}, "actor %+v should be permitted to GetKeyspaces", actor) + resp, _ := api.GetSchemaMigrations(ctx, &vtadminpb.GetSchemaMigrationsRequest{}) + assert.NotEmpty(t, resp.SchemaMigrations, "actor %+v should be permitted to GetSchemaMigrations", actor) + assert.Len(t, resp.SchemaMigrations, 3, "'other' actor should be able to see the 3 migrations in cluster 'other'") }) t.Run("full access", func(t *testing.T) { @@ -1122,20 +1395,11 @@ func TestGetKeyspaces(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) - resp, _ := api.GetKeyspaces(ctx, &vtadminpb.GetKeyspacesRequest{}) - assert.NotEmpty(t, resp.Keyspaces, "actor %+v should be permitted to GetKeyspaces", actor) - ksMap := map[string][]string{} - for _, ks := range resp.Keyspaces { - if _, ok := ksMap[ks.Cluster.Id]; !ok { - ksMap[ks.Cluster.Id] = []string{} - } - ksMap[ks.Cluster.Id] = append(ksMap[ks.Cluster.Id], ks.Keyspace.Name) - } - assert.Equal(t, ksMap, map[string][]string{"test": {"test"}, "other": {"otherks"}}, "actor %+v should be permitted to GetKeyspaces", actor) + resp, _ := api.GetSchemaMigrations(ctx, &vtadminpb.GetSchemaMigrationsRequest{}) + assert.NotEmpty(t, resp.SchemaMigrations, "actor %+v should be permitted to GetSchemaMigrations", actor) + assert.Len(t, resp.SchemaMigrations, 4, "'all' actor should be able to see migrations in all clusters") }) } @@ -1165,7 +1429,7 @@ func TestGetSchema(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1174,9 +1438,7 @@ func TestGetSchema(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetSchema(ctx, &vtadminpb.GetSchemaRequest{ ClusterId: "test", @@ -1190,7 +1452,7 @@ func TestGetSchema(t *testing.T) { t.Run("authorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1199,9 +1461,7 @@ func TestGetSchema(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetSchema(ctx, &vtadminpb.GetSchemaRequest{ ClusterId: "test", @@ -1245,7 +1505,7 @@ func TestGetSchemas(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1254,9 +1514,7 @@ func TestGetSchemas(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetSchemas(ctx, &vtadminpb.GetSchemasRequest{}) assert.NoError(t, err) @@ -1266,7 +1524,7 @@ func TestGetSchemas(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1275,9 +1533,7 @@ func TestGetSchemas(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetSchemas(ctx, &vtadminpb.GetSchemasRequest{}) assert.NotEmpty(t, resp.Schemas, "actor %+v should be permitted to GetSchemas", actor) @@ -1294,7 +1550,7 @@ func TestGetSchemas(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1303,9 +1559,7 @@ func TestGetSchemas(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetSchemas(ctx, &vtadminpb.GetSchemasRequest{}) assert.NotEmpty(t, resp.Schemas, "actor %+v should be permitted to GetSchemas", actor) @@ -1349,7 +1603,7 @@ func TestGetShardReplicationPositions(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1361,9 +1615,7 @@ func TestGetShardReplicationPositions(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetShardReplicationPositions(ctx, &vtadminpb.GetShardReplicationPositionsRequest{}) assert.NoError(t, err) @@ -1375,9 +1627,7 @@ func TestGetShardReplicationPositions(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetShardReplicationPositions(ctx, &vtadminpb.GetShardReplicationPositionsRequest{}) assert.NotEmpty(t, resp.ReplicationPositions, "actor %+v should be permitted to GetShardReplicationPositions", actor) @@ -1396,9 +1646,7 @@ func TestGetShardReplicationPositions(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetShardReplicationPositions(ctx, &vtadminpb.GetShardReplicationPositionsRequest{}) assert.NotEmpty(t, resp.ReplicationPositions, "actor %+v should be permitted to GetShardReplicationPositions", actor) @@ -1436,7 +1684,7 @@ func TestGetSrvVSchema(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1448,9 +1696,7 @@ func TestGetSrvVSchema(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetSrvVSchema(ctx, &vtadminpb.GetSrvVSchemaRequest{ ClusterId: "test", @@ -1465,9 +1711,7 @@ func TestGetSrvVSchema(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetSrvVSchema(ctx, &vtadminpb.GetSrvVSchemaRequest{ ClusterId: "test", @@ -1507,7 +1751,7 @@ func TestGetSrvVSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1519,9 +1763,7 @@ func TestGetSrvVSchemas(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetSrvVSchemas(ctx, &vtadminpb.GetSrvVSchemasRequest{}) require.NoError(t, err) @@ -1533,9 +1775,7 @@ func TestGetSrvVSchemas(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetSrvVSchemas(ctx, &vtadminpb.GetSrvVSchemasRequest{}) assert.NotEmpty(t, resp.SrvVSchemas, "actor %+v should be permitted to GetSrvVSchemas", actor) @@ -1554,9 +1794,7 @@ func TestGetSrvVSchemas(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetSrvVSchemas(ctx, &vtadminpb.GetSrvVSchemasRequest{}) assert.NotEmpty(t, resp.SrvVSchemas, "actor %+v should be permitted to GetSrvVSchemas", actor) @@ -1594,7 +1832,7 @@ func TestGetTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1606,9 +1844,7 @@ func TestGetTablet(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetTablet(ctx, &vtadminpb.GetTabletRequest{ Alias: &topodatapb.TabletAlias{ @@ -1625,9 +1861,7 @@ func TestGetTablet(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetTablet(ctx, &vtadminpb.GetTabletRequest{ Alias: &topodatapb.TabletAlias{ @@ -1669,7 +1903,7 @@ func TestGetTablets(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1681,9 +1915,7 @@ func TestGetTablets(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetTablets(ctx, &vtadminpb.GetTabletsRequest{}) require.NoError(t, err) @@ -1695,9 +1927,7 @@ func TestGetTablets(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetTablets(ctx, &vtadminpb.GetTabletsRequest{}) assert.NotEmpty(t, resp.Tablets, "actor %+v should be permitted to GetTablets", actor) @@ -1716,9 +1946,7 @@ func TestGetTablets(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetTablets(ctx, &vtadminpb.GetTabletsRequest{}) assert.NotEmpty(t, resp.Tablets, "actor %+v should be permitted to GetTablets", actor) @@ -1756,7 +1984,7 @@ func TestGetVSchema(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1768,9 +1996,7 @@ func TestGetVSchema(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetVSchema(ctx, &vtadminpb.GetVSchemaRequest{ ClusterId: "test", @@ -1785,9 +2011,7 @@ func TestGetVSchema(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetVSchema(ctx, &vtadminpb.GetVSchemaRequest{ ClusterId: "test", @@ -1827,7 +2051,7 @@ func TestGetVSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1839,9 +2063,7 @@ func TestGetVSchemas(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetVSchemas(ctx, &vtadminpb.GetVSchemasRequest{}) require.NoError(t, err) @@ -1853,9 +2075,7 @@ func TestGetVSchemas(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetVSchemas(ctx, &vtadminpb.GetVSchemasRequest{}) assert.NotEmpty(t, resp.VSchemas, "actor %+v should be permitted to GetVSchemas", actor) @@ -1874,9 +2094,7 @@ func TestGetVSchemas(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetVSchemas(ctx, &vtadminpb.GetVSchemasRequest{}) assert.NotEmpty(t, resp.VSchemas, "actor %+v should be permitted to GetVSchemas", actor) @@ -1923,7 +2141,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1932,9 +2150,7 @@ func TestGetVtctlds(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetVtctlds(ctx, &vtadminpb.GetVtctldsRequest{}) assert.NoError(t, err) @@ -1944,7 +2160,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1953,9 +2169,7 @@ func TestGetVtctlds(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetVtctlds(ctx, &vtadminpb.GetVtctldsRequest{}) assert.NotEmpty(t, resp.Vtctlds, "actor %+v should be permitted to GetVtctlds", actor) @@ -1966,7 +2180,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1975,9 +2189,7 @@ func TestGetVtctlds(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetVtctlds(ctx, &vtadminpb.GetVtctldsRequest{}) assert.NotEmpty(t, resp.Vtctlds, "actor %+v should be permitted to GetVtctlds", actor) @@ -2009,7 +2221,7 @@ func TestGetWorkflow(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2021,9 +2233,7 @@ func TestGetWorkflow(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetWorkflow(ctx, &vtadminpb.GetWorkflowRequest{ ClusterId: "test", @@ -2039,9 +2249,7 @@ func TestGetWorkflow(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetWorkflow(ctx, &vtadminpb.GetWorkflowRequest{ ClusterId: "test", @@ -2082,7 +2290,7 @@ func TestGetWorkflows(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2094,9 +2302,7 @@ func TestGetWorkflows(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.GetWorkflows(ctx, &vtadminpb.GetWorkflowsRequest{}) require.NoError(t, err) @@ -2108,9 +2314,7 @@ func TestGetWorkflows(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetWorkflows(ctx, &vtadminpb.GetWorkflowsRequest{}) assert.NotEmpty(t, resp.WorkflowsByCluster, "actor %+v should be permitted to GetWorkflows", actor) @@ -2122,9 +2326,7 @@ func TestGetWorkflows(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.GetWorkflows(ctx, &vtadminpb.GetWorkflowsRequest{}) assert.NotEmpty(t, resp.WorkflowsByCluster, "actor %+v should be permitted to GetWorkflows", actor) @@ -2132,6 +2334,71 @@ func TestGetWorkflows(t *testing.T) { }) } +func TestLaunchSchemaMigration(t *testing.T) { + t.Parallel() + + opts := vtadmin.Options{ + RBAC: &rbac.Config{ + Rules: []*struct { + Resource string + Actions []string + Subjects []string + Clusters []string + }{ + { + Resource: "SchemaMigration", + Actions: []string{"launch_schema_migration"}, + Subjects: []string{"user:allowed"}, + Clusters: []string{"*"}, + }, + }, + }, + } + err := opts.RBAC.Reify() + require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) + + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) + } + }) + + t.Run("unauthorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "other"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.LaunchSchemaMigration(ctx, &vtadminpb.LaunchSchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: "test", + }, + }) + assert.Error(t, err, "actor %+v should not be permitted to LaunchSchemaMigration", actor) + assert.Nil(t, resp, "actor %+v should not be permitted to LaunchSchemaMigration", actor) + }) + + t.Run("authorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "allowed"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.LaunchSchemaMigration(ctx, &vtadminpb.LaunchSchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: "test", + }, + }) + require.NoError(t, err) + assert.NotNil(t, resp, "actor %+v should be permitted to LaunchSchemaMigration", actor) + }) +} + func TestPingTablet(t *testing.T) { t.Parallel() @@ -2155,7 +2422,7 @@ func TestPingTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2167,9 +2434,7 @@ func TestPingTablet(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.PingTablet(ctx, &vtadminpb.PingTabletRequest{ Alias: &topodatapb.TabletAlias{ @@ -2186,9 +2451,7 @@ func TestPingTablet(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.PingTablet(ctx, &vtadminpb.PingTabletRequest{ Alias: &topodatapb.TabletAlias{ @@ -2224,7 +2487,7 @@ func TestPlannedFailoverShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2236,9 +2499,7 @@ func TestPlannedFailoverShard(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.PlannedFailoverShard(ctx, &vtadminpb.PlannedFailoverShardRequest{ ClusterId: "test", @@ -2256,9 +2517,7 @@ func TestPlannedFailoverShard(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.PlannedFailoverShard(ctx, &vtadminpb.PlannedFailoverShardRequest{ ClusterId: "test", @@ -2295,7 +2554,7 @@ func TestRefreshState(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2307,9 +2566,7 @@ func TestRefreshState(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.RefreshState(ctx, &vtadminpb.RefreshStateRequest{ Alias: &topodatapb.TabletAlias{ @@ -2326,9 +2583,7 @@ func TestRefreshState(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.RefreshState(ctx, &vtadminpb.RefreshStateRequest{ Alias: &topodatapb.TabletAlias{ @@ -2364,7 +2619,7 @@ func TestRefreshTabletReplicationSource(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2376,9 +2631,7 @@ func TestRefreshTabletReplicationSource(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.RefreshTabletReplicationSource(ctx, &vtadminpb.RefreshTabletReplicationSourceRequest{ Alias: &topodatapb.TabletAlias{ @@ -2395,9 +2648,7 @@ func TestRefreshTabletReplicationSource(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.RefreshTabletReplicationSource(ctx, &vtadminpb.RefreshTabletReplicationSourceRequest{ Alias: &topodatapb.TabletAlias{ @@ -2439,7 +2690,7 @@ func TestReloadSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2451,9 +2702,7 @@ func TestReloadSchemas(t *testing.T) { actor := &rbac.Actor{Name: "unauthorized"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.ReloadSchemas(ctx, &vtadminpb.ReloadSchemasRequest{ Keyspaces: []string{ @@ -2470,9 +2719,7 @@ func TestReloadSchemas(t *testing.T) { actor := &rbac.Actor{Name: "allowed-other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.ReloadSchemas(ctx, &vtadminpb.ReloadSchemasRequest{ Keyspaces: []string{ @@ -2488,9 +2735,7 @@ func TestReloadSchemas(t *testing.T) { actor := &rbac.Actor{Name: "allowed-all"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, _ := api.ReloadSchemas(ctx, &vtadminpb.ReloadSchemasRequest{ Keyspaces: []string{ @@ -2502,6 +2747,71 @@ func TestReloadSchemas(t *testing.T) { }) } +func TestRetrySchemaMigration(t *testing.T) { + t.Parallel() + + opts := vtadmin.Options{ + RBAC: &rbac.Config{ + Rules: []*struct { + Resource string + Actions []string + Subjects []string + Clusters []string + }{ + { + Resource: "SchemaMigration", + Actions: []string{"retry"}, + Subjects: []string{"user:allowed"}, + Clusters: []string{"*"}, + }, + }, + }, + } + err := opts.RBAC.Reify() + require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) + + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) + t.Cleanup(func() { + if err := api.Close(); err != nil { + t.Logf("api did not close cleanly: %s", err.Error()) + } + }) + + t.Run("unauthorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "other"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.RetrySchemaMigration(ctx, &vtadminpb.RetrySchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: "test", + }, + }) + assert.Error(t, err, "actor %+v should not be permitted to RetrySchemaMigration", actor) + assert.Nil(t, resp, "actor %+v should not be permitted to RetrySchemaMigration", actor) + }) + + t.Run("authorized actor", func(t *testing.T) { + t.Parallel() + + actor := &rbac.Actor{Name: "allowed"} + ctx := context.Background() + ctx = rbac.NewContext(ctx, actor) + + resp, err := api.RetrySchemaMigration(ctx, &vtadminpb.RetrySchemaMigrationRequest{ + ClusterId: "test", + Request: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: "test", + }, + }) + require.NoError(t, err) + assert.NotNil(t, resp, "actor %+v should be permitted to RetrySchemaMigration", actor) + }) +} + func TestRunHealthCheck(t *testing.T) { t.Parallel() @@ -2525,7 +2835,7 @@ func TestRunHealthCheck(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2537,9 +2847,7 @@ func TestRunHealthCheck(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.RunHealthCheck(ctx, &vtadminpb.RunHealthCheckRequest{ Alias: &topodatapb.TabletAlias{ @@ -2556,9 +2864,7 @@ func TestRunHealthCheck(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.RunHealthCheck(ctx, &vtadminpb.RunHealthCheckRequest{ Alias: &topodatapb.TabletAlias{ @@ -2594,7 +2900,7 @@ func TestSetReadOnly(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2606,9 +2912,7 @@ func TestSetReadOnly(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.SetReadOnly(ctx, &vtadminpb.SetReadOnlyRequest{ Alias: &topodatapb.TabletAlias{ @@ -2625,9 +2929,7 @@ func TestSetReadOnly(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.SetReadOnly(ctx, &vtadminpb.SetReadOnlyRequest{ Alias: &topodatapb.TabletAlias{ @@ -2663,7 +2965,7 @@ func TestSetReadWrite(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2675,9 +2977,7 @@ func TestSetReadWrite(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.SetReadWrite(ctx, &vtadminpb.SetReadWriteRequest{ Alias: &topodatapb.TabletAlias{ @@ -2694,9 +2994,7 @@ func TestSetReadWrite(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.SetReadWrite(ctx, &vtadminpb.SetReadWriteRequest{ Alias: &topodatapb.TabletAlias{ @@ -2732,7 +3030,7 @@ func TestStartReplication(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2744,9 +3042,7 @@ func TestStartReplication(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.StartReplication(ctx, &vtadminpb.StartReplicationRequest{ Alias: &topodatapb.TabletAlias{ @@ -2763,9 +3059,7 @@ func TestStartReplication(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.StartReplication(ctx, &vtadminpb.StartReplicationRequest{ Alias: &topodatapb.TabletAlias{ @@ -2801,7 +3095,7 @@ func TestStopReplication(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2813,9 +3107,7 @@ func TestStopReplication(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.StopReplication(ctx, &vtadminpb.StopReplicationRequest{ Alias: &topodatapb.TabletAlias{ @@ -2832,9 +3124,7 @@ func TestStopReplication(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.StopReplication(ctx, &vtadminpb.StopReplicationRequest{ Alias: &topodatapb.TabletAlias{ @@ -2870,7 +3160,7 @@ func TestTabletExternallyPromoted(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2882,9 +3172,7 @@ func TestTabletExternallyPromoted(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.TabletExternallyPromoted(ctx, &vtadminpb.TabletExternallyPromotedRequest{ Alias: &topodatapb.TabletAlias{ @@ -2901,9 +3189,7 @@ func TestTabletExternallyPromoted(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.TabletExternallyPromoted(ctx, &vtadminpb.TabletExternallyPromotedRequest{ Alias: &topodatapb.TabletAlias{ @@ -2942,7 +3228,7 @@ func TestVTExplain(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2951,9 +3237,7 @@ func TestVTExplain(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.VTExplain(ctx, &vtadminpb.VTExplainRequest{ Cluster: "test", @@ -2966,7 +3250,7 @@ func TestVTExplain(t *testing.T) { t.Run("authorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2975,9 +3259,7 @@ func TestVTExplain(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.VTExplain(ctx, &vtadminpb.VTExplainRequest{ Cluster: "test", @@ -3011,7 +3293,7 @@ func TestValidateKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -3023,9 +3305,7 @@ func TestValidateKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.ValidateKeyspace(ctx, &vtadminpb.ValidateKeyspaceRequest{ ClusterId: "test", @@ -3040,9 +3320,7 @@ func TestValidateKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.ValidateKeyspace(ctx, &vtadminpb.ValidateKeyspaceRequest{ ClusterId: "test", @@ -3076,7 +3354,7 @@ func TestValidateSchemaKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -3088,9 +3366,7 @@ func TestValidateSchemaKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.ValidateSchemaKeyspace(ctx, &vtadminpb.ValidateSchemaKeyspaceRequest{ ClusterId: "test", @@ -3105,9 +3381,7 @@ func TestValidateSchemaKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.ValidateSchemaKeyspace(ctx, &vtadminpb.ValidateSchemaKeyspaceRequest{ ClusterId: "test", @@ -3141,7 +3415,7 @@ func TestValidateVersionKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -3153,9 +3427,7 @@ func TestValidateVersionKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "other"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.ValidateVersionKeyspace(ctx, &vtadminpb.ValidateVersionKeyspaceRequest{ ClusterId: "test", @@ -3170,9 +3442,7 @@ func TestValidateVersionKeyspace(t *testing.T) { actor := &rbac.Actor{Name: "allowed"} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) resp, err := api.ValidateVersionKeyspace(ctx, &vtadminpb.ValidateVersionKeyspaceRequest{ ClusterId: "test", @@ -3191,6 +3461,38 @@ func testClusters(t testing.TB) []*cluster.Cluster { Name: "test", }, VtctldClient: &fakevtctldclient.VtctldClient{ + ApplySchemaResults: map[string]struct { + Response *vtctldatapb.ApplySchemaResponse + Error error + }{ + "test": { + Response: &vtctldatapb.ApplySchemaResponse{}, + }, + }, + CancelSchemaMigrationResults: map[string]struct { + Response *vtctldatapb.CancelSchemaMigrationResponse + Error error + }{ + "test": { + Response: &vtctldatapb.CancelSchemaMigrationResponse{}, + }, + }, + CleanupSchemaMigrationResults: map[string]struct { + Response *vtctldatapb.CleanupSchemaMigrationResponse + Error error + }{ + "test": { + Response: &vtctldatapb.CleanupSchemaMigrationResponse{}, + }, + }, + CompleteSchemaMigrationResults: map[string]struct { + Response *vtctldatapb.CompleteSchemaMigrationResponse + Error error + }{ + "test": { + Response: &vtctldatapb.CompleteSchemaMigrationResponse{}, + }, + }, DeleteShardsResults: map[string]error{ "test/-": nil, }, @@ -3279,6 +3581,18 @@ func testClusters(t testing.TB) []*cluster.Cluster { }, }, }, + GetSchemaMigrationsResults: map[string]struct { + Response *vtctldatapb.GetSchemaMigrationsResponse + Error error + }{ + "test": { + Response: &vtctldatapb.GetSchemaMigrationsResponse{ + Migrations: []*vtctldatapb.SchemaMigration{ + {}, + }, + }, + }, + }, GetSchemaResults: map[string]struct { Response *vtctldatapb.GetSchemaResponse Error error @@ -3348,6 +3662,14 @@ func testClusters(t testing.TB) []*cluster.Cluster { }, }}, }, + LaunchSchemaMigrationResults: map[string]struct { + Response *vtctldatapb.LaunchSchemaMigrationResponse + Error error + }{ + "test": { + Response: &vtctldatapb.LaunchSchemaMigrationResponse{}, + }, + }, PingTabletResults: map[string]error{ "zone1-0000000100": nil, }, @@ -3379,6 +3701,14 @@ func testClusters(t testing.TB) []*cluster.Cluster { Response: &vtctldatapb.ReparentTabletResponse{}, }, }, + RetrySchemaMigrationResults: map[string]struct { + Response *vtctldatapb.RetrySchemaMigrationResponse + Error error + }{ + "test": { + Response: &vtctldatapb.RetrySchemaMigrationResponse{}, + }, + }, RunHealthCheckResults: map[string]error{ "zone1-0000000100": nil, }, @@ -3449,7 +3779,7 @@ func testClusters(t testing.TB) []*cluster.Cluster { Config: &cluster.Config{ TopoReadPoolConfig: &cluster.RPCPoolConfig{ Size: 100, - WaitTimeout: time.Millisecond * 50, + WaitTimeout: time.Millisecond * 500, }, }, }, { @@ -3516,6 +3846,18 @@ func testClusters(t testing.TB) []*cluster.Cluster { }, }, }, + GetSchemaMigrationsResults: map[string]struct { + Response *vtctldatapb.GetSchemaMigrationsResponse + Error error + }{ + "otherks": { + Response: &vtctldatapb.GetSchemaMigrationsResponse{ + Migrations: []*vtctldatapb.SchemaMigration{ + {}, {}, {}, + }, + }, + }, + }, GetSchemaResults: map[string]struct { Response *vtctldatapb.GetSchemaResponse Error error @@ -3598,7 +3940,7 @@ func testClusters(t testing.TB) []*cluster.Cluster { Config: &cluster.Config{ TopoReadPoolConfig: &cluster.RPCPoolConfig{ Size: 100, - WaitTimeout: time.Millisecond * 50, + WaitTimeout: time.Millisecond * 500, }, }, }, diff --git a/go/vt/vtadmin/api_test.go b/go/vt/vtadmin/api_test.go index 4a68abd6b73..82c744b95db 100644 --- a/go/vt/vtadmin/api_test.go +++ b/go/vt/vtadmin/api_test.go @@ -32,6 +32,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/vtenv" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo" @@ -545,8 +547,6 @@ func TestFindSchema(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -555,7 +555,7 @@ func TestFindSchema(t *testing.T) { clusters[i] = vtadmintestutil.BuildCluster(t, cfg) } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) defer api.Close() resp, err := api.FindSchema(ctx, tt.req) @@ -765,7 +765,7 @@ func TestFindSchema(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}) + api := NewAPI(vtenv.NewTestEnv(), []*cluster.Cluster{c1, c2}, Options{}) defer api.Close() schema, err := api.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ @@ -860,12 +860,10 @@ func TestGetClusters(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(tt.clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), tt.clusters, Options{}) resp, err := api.GetClusters(ctx, &vtadminpb.GetClustersRequest{}) assert.NoError(t, err) @@ -943,7 +941,7 @@ func TestGetGates(t *testing.T) { }, } - api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}) + api := NewAPI(vtenv.NewTestEnv(), []*cluster.Cluster{cluster1, cluster2}, Options{}) ctx := context.Background() resp, err := api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) @@ -1050,8 +1048,6 @@ func TestGetKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1065,11 +1061,11 @@ func TestGetKeyspace(t *testing.T) { testutil.AddShards(ctx, t, ts, shards...) topos[i] = ts vtctlds[i] = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) } - testutil.WithTestServers(t, func(t *testing.T, clients ...vtctldclient.VtctldClient) { + testutil.WithTestServers(ctx, t, func(t *testing.T, clients ...vtctldclient.VtctldClient) { clusters := make([]*cluster.Cluster, len(clients)) for i, client := range clients { clusters[i] = vtadmintestutil.BuildCluster(t, vtadmintestutil.TestClusterConfig{ @@ -1081,7 +1077,7 @@ func TestGetKeyspace(t *testing.T) { }) } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) ks, err := api.GetKeyspace(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -1282,8 +1278,6 @@ func TestGetKeyspaces(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1309,14 +1303,14 @@ func TestGetKeyspaces(t *testing.T) { servers := []vtctlservicepb.VtctldServer{ testutil.NewVtctldServerWithTabletManagerClient(t, topos[0], nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }), testutil.NewVtctldServerWithTabletManagerClient(t, topos[1], nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }), } - testutil.WithTestServers(t, func(t *testing.T, clients ...vtctldclient.VtctldClient) { + testutil.WithTestServers(ctx, t, func(t *testing.T, clients ...vtctldclient.VtctldClient) { clusters := []*cluster.Cluster{ vtadmintestutil.BuildCluster(t, vtadmintestutil.TestClusterConfig{ Cluster: &vtadminpb.Cluster{ @@ -1334,7 +1328,7 @@ func TestGetKeyspaces(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetKeyspaces(ctx, tt.req) require.NoError(t, err) @@ -1536,20 +1530,18 @@ func TestGetSchema(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.AddTablets(ctx, t, tt.ts, nil, vtadmintestutil.TopodataTabletsFromVTAdminTablets(tt.tablets)...) - testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { c := vtadmintestutil.BuildCluster(t, vtadmintestutil.TestClusterConfig{ Cluster: &vtadminpb.Cluster{ Id: fmt.Sprintf("c%d", tt.clusterID), @@ -1558,7 +1550,7 @@ func TestGetSchema(t *testing.T) { VtctldClient: client, Tablets: tt.tablets, }) - api := NewAPI([]*cluster.Cluster{c}, Options{}) + api := NewAPI(vtenv.NewTestEnv(), []*cluster.Cluster{c}, Options{}) defer api.Close() resp, err := api.GetSchema(ctx, tt.req) @@ -1688,7 +1680,7 @@ func TestGetSchema(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}) + api := NewAPI(vtenv.NewTestEnv(), []*cluster.Cluster{c1, c2}, Options{}) defer api.Close() schema, err := api.GetSchema(ctx, &vtadminpb.GetSchemaRequest{ @@ -2177,8 +2169,6 @@ func TestGetSchemas(t *testing.T) { // Note that these test cases were written prior to the existence of // WithTestServers, so they are all written with the assumption that // there are exactly 2 clusters. - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) @@ -2198,14 +2188,14 @@ func TestGetSchemas(t *testing.T) { vtctlds := []vtctlservicepb.VtctldServer{ testutil.NewVtctldServerWithTabletManagerClient(t, topos[0], &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }), testutil.NewVtctldServerWithTabletManagerClient(t, topos[1], &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }), } - testutil.WithTestServers(t, func(t *testing.T, clients ...vtctldclient.VtctldClient) { + testutil.WithTestServers(ctx, t, func(t *testing.T, clients ...vtctldclient.VtctldClient) { clusters := make([]*cluster.Cluster, len(topos)) for cdx, toposerver := range topos { // Handle when a test doesn't define any tablets for a given cluster. @@ -2242,7 +2232,7 @@ func TestGetSchemas(t *testing.T) { }) } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) defer api.Close() resp, err := api.GetSchemas(ctx, tt.req) @@ -2463,7 +2453,7 @@ func TestGetSchemas(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}) + api := NewAPI(vtenv.NewTestEnv(), []*cluster.Cluster{c1, c2}, Options{}) defer api.Close() resp, err := api.GetSchemas(context.Background(), &vtadminpb.GetSchemasRequest{ @@ -2624,8 +2614,6 @@ func TestGetSrvKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -2637,10 +2625,10 @@ func TestGetSrvKeyspace(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) - testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { for cell, sks := range tt.cellSrvKeyspaces { err := toposerver.UpdateSrvKeyspace(ctx, cell, tt.keyspace, sks) require.NoError(t, err) @@ -2656,7 +2644,7 @@ func TestGetSrvKeyspace(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetSrvKeyspace(ctx, tt.req) if tt.shouldErr { @@ -2785,8 +2773,6 @@ func TestGetSrvKeyspaces(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) @@ -2801,10 +2787,10 @@ func TestGetSrvKeyspaces(t *testing.T) { } vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) - testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { for keyspace, sks := range tt.cellSrvKeyspaces { for cell, sk := range sks { err := toposerver.UpdateSrvKeyspace(ctx, cell, keyspace, sk) @@ -2822,7 +2808,7 @@ func TestGetSrvKeyspaces(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetSrvKeyspaces(ctx, tt.req) if tt.shouldErr { @@ -2954,8 +2940,6 @@ func TestGetSrvVSchema(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) @@ -2966,10 +2950,10 @@ func TestGetSrvVSchema(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) - testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { for cell, svs := range tt.cellSrvVSchemas { err := toposerver.UpdateSrvVSchema(ctx, cell, svs) require.NoError(t, err) @@ -2985,7 +2969,7 @@ func TestGetSrvVSchema(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetSrvVSchema(ctx, tt.req) if tt.shouldErr { @@ -3248,8 +3232,6 @@ func TestGetSrvVSchemas(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) @@ -3260,10 +3242,10 @@ func TestGetSrvVSchemas(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) - testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { for cell, svs := range tt.cellSrvVSchemas { err := toposerver.UpdateSrvVSchema(ctx, cell, svs) require.NoError(t, err) @@ -3279,7 +3261,7 @@ func TestGetSrvVSchemas(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetSrvVSchemas(ctx, tt.req) if tt.shouldErr { @@ -3529,8 +3511,6 @@ func TestGetTablet(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3550,7 +3530,7 @@ func TestGetTablet(t *testing.T) { }) } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetTablet(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -3724,8 +3704,6 @@ func TestGetTablets(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3745,7 +3723,7 @@ func TestGetTablets(t *testing.T) { }) } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetTablets(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -3870,13 +3848,11 @@ func TestGetVSchema(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() clusters := []*cluster.Cluster{vtadmintestutil.BuildCluster(t, tt.clusterCfg)} - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetVSchema(ctx, tt.req) if tt.shouldErr { @@ -4196,8 +4172,6 @@ func TestGetVSchemas(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -4206,7 +4180,7 @@ func TestGetVSchemas(t *testing.T) { } clusters := vtadmintestutil.BuildClusters(t, tt.clusterCfgs...) - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.GetVSchemas(ctx, tt.req) if tt.shouldErr { @@ -4290,7 +4264,7 @@ func TestGetVtctlds(t *testing.T) { }, } - api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}) + api := NewAPI(vtenv.NewTestEnv(), []*cluster.Cluster{cluster1, cluster2}, Options{}) ctx := context.Background() resp, err := api.GetVtctlds(ctx, &vtadminpb.GetVtctldsRequest{}) @@ -4417,12 +4391,10 @@ func TestGetWorkflow(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}) + api := NewAPI(vtenv.NewTestEnv(), vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}) resp, err := api.GetWorkflow(ctx, tt.req) if tt.shouldErr { @@ -4856,12 +4828,10 @@ func TestGetWorkflows(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}) + api := NewAPI(vtenv.NewTestEnv(), vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}) resp, err := api.GetWorkflows(ctx, tt.req) if tt.shouldErr { @@ -5097,8 +5067,6 @@ func TestVTExplain(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -5112,10 +5080,10 @@ func TestVTExplain(t *testing.T) { } vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) - testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { if tt.srvVSchema != nil { err := toposerver.UpdateSrvVSchema(ctx, "c0_cell1", tt.srvVSchema) require.NoError(t, err) @@ -5151,7 +5119,7 @@ func TestVTExplain(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(vtenv.NewTestEnv(), clusters, Options{}) resp, err := api.VTExplain(ctx, tt.req) if tt.expectedError != nil { @@ -5348,12 +5316,10 @@ func TestServeHTTP(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(tt.clusters, Options{EnableDynamicClusters: tt.enableDynamicClusters}) + api := NewAPI(vtenv.NewTestEnv(), tt.clusters, Options{EnableDynamicClusters: tt.enableDynamicClusters}) // Copy the Cookie over to a new Request req := httptest.NewRequest(http.MethodGet, "/api/clusters", nil) diff --git a/go/vt/vtadmin/cache/cache.go b/go/vt/vtadmin/cache/cache.go index 1768ce1f924..bc53efb80db 100644 --- a/go/vt/vtadmin/cache/cache.go +++ b/go/vt/vtadmin/cache/cache.go @@ -54,6 +54,9 @@ const ( // backfill requests to still process, if a config is passed with a // non-positive BackfillRequestTTL. DefaultBackfillRequestTTL = time.Millisecond * 100 + // DefaultBackfillQueueSize is the default value used for the size of the + // backfill queue, if a config is passed with a non-positive BackfillQueueSize. + DefaultBackfillQueueSize = 0 ) // Config is the configuration for a cache. @@ -125,6 +128,11 @@ func New[Key Keyer, Value any](fillFunc func(ctx context.Context, req Key) (Valu cfg.BackfillRequestTTL = DefaultBackfillRequestTTL } + if cfg.BackfillQueueSize < 0 { + log.Warningf("BackfillQueueSize (%v) must be positive, defaulting to %v", cfg.BackfillQueueSize, DefaultBackfillQueueSize) + cfg.BackfillQueueSize = DefaultBackfillQueueSize + } + c := &Cache[Key, Value]{ cache: cache.New(cfg.DefaultExpiration, cfg.CleanupInterval), lastFill: map[string]time.Time{}, diff --git a/go/vt/vtadmin/cache/cache_test.go b/go/vt/vtadmin/cache/cache_test.go index 93a6898db5d..a86022a8f9d 100644 --- a/go/vt/vtadmin/cache/cache_test.go +++ b/go/vt/vtadmin/cache/cache_test.go @@ -67,7 +67,6 @@ func TestBackfillDuplicates(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -93,6 +92,43 @@ func TestBackfillDuplicates(t *testing.T) { } } +func TestBackfillQueueSize(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + configuredBackfillQueueSize int + expectedBackfillQueueSize int + }{ + { + name: "configured negative backfill queue size", + configuredBackfillQueueSize: -1, + expectedBackfillQueueSize: 0, + }, { + name: "configured 0 backfill queue size", + configuredBackfillQueueSize: 0, + expectedBackfillQueueSize: 0, + }, { + name: "configured positive backfill queue size", + configuredBackfillQueueSize: 1, + expectedBackfillQueueSize: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + c := cache.New(func(ctx context.Context, req intkey) (any, error) { + return nil, nil + }, cache.Config{ + BackfillQueueSize: tt.configuredBackfillQueueSize, + }) + var config cache.Config = c.Debug()["config"].(cache.Config) + assert.Equal(t, tt.expectedBackfillQueueSize, config.BackfillQueueSize) + }) + } +} + func TestBackfillTTL(t *testing.T) { t.Parallel() @@ -131,7 +167,6 @@ func TestBackfillTTL(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cache/refresh_test.go b/go/vt/vtadmin/cache/refresh_test.go index c12bb63ad6a..b16a10f34d5 100644 --- a/go/vt/vtadmin/cache/refresh_test.go +++ b/go/vt/vtadmin/cache/refresh_test.go @@ -65,7 +65,6 @@ func TestShouldRefreshFromIncomingContext(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -114,7 +113,6 @@ func TestShouldRefreshFromRequest(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/cluster.go b/go/vt/vtadmin/cluster/cluster.go index 6f8e355c326..bdc9272a92f 100644 --- a/go/vt/vtadmin/cluster/cluster.go +++ b/go/vt/vtadmin/cluster/cluster.go @@ -23,7 +23,7 @@ import ( stderrors "errors" "fmt" "io" - "math/rand" + "math/rand/v2" "sort" "strings" "sync" @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/sets" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo/topoproto" @@ -46,6 +47,7 @@ import ( "vitess.io/vitess/go/vt/vtadmin/vtadminproto" "vitess.io/vitess/go/vt/vtadmin/vtctldclient" "vitess.io/vitess/go/vt/vtadmin/vtsql" + "vitess.io/vitess/go/vt/vtctl/schematools" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" @@ -351,6 +353,70 @@ func (c *Cluster) parseTablet(rows *sql.Rows) (*vtadminpb.Tablet, error) { return tablet, nil } +// ApplySchema applies a schema to the given keyspace in this cluster. +func (c *Cluster) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySchemaRequest) (*vtctldatapb.ApplySchemaResponse, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.ApplySchema") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("sql", strings.Join(req.Sql, "; ")) + span.Annotate("ddl_strategy", req.DdlStrategy) + span.Annotate("uuid_list", strings.Join(req.UuidList, ", ")) + span.Annotate("migration_context", req.MigrationContext) + + if d, ok, err := protoutil.DurationFromProto(req.WaitReplicasTimeout); ok && err != nil { + span.Annotate("wait_replicas_timeout", d.String()) + } + + span.Annotate("caller_id", strings.Join( + []string{callerid.GetPrincipal(req.CallerId), callerid.GetComponent(req.CallerId), callerid.GetSubcomponent(req.CallerId)}, + ":", + )) + span.Annotate("batch_size", req.BatchSize) + + return c.Vtctld.ApplySchema(ctx, req) +} + +// CancelSchemaMigration cancels one or all migrations in a keyspace in this +// cluster, terminating any running ones as needed. +func (c *Cluster) CancelSchemaMigration(ctx context.Context, req *vtctldatapb.CancelSchemaMigrationRequest) (*vtctldatapb.CancelSchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.CancelSchemaMigration") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + return c.Vtctld.CancelSchemaMigration(ctx, req) +} + +// CleanupSchemaMigration marks a schema migration in this cluster as ready for +// artifact cleanup. +func (c *Cluster) CleanupSchemaMigration(ctx context.Context, req *vtctldatapb.CleanupSchemaMigrationRequest) (*vtctldatapb.CleanupSchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.CleanupSchemaMigration") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + return c.Vtctld.CleanupSchemaMigration(ctx, req) +} + +// CompleteSchemaMigration completes one or all migrations in a keyspace +// executed with --postpone-completion in this cluster. +func (c *Cluster) CompleteSchemaMigration(ctx context.Context, req *vtctldatapb.CompleteSchemaMigrationRequest) (*vtctldatapb.CompleteSchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.CompleteSchemaMigration") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + return c.Vtctld.CompleteSchemaMigration(ctx, req) +} + // CreateKeyspace creates a keyspace in the given cluster, proxying a // CreateKeyspaceRequest to a vtctld in that cluster. func (c *Cluster) CreateKeyspace(ctx context.Context, req *vtctldatapb.CreateKeyspaceRequest) (*vtadminpb.Keyspace, error) { @@ -1424,7 +1490,10 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta span.Annotate("cache_hit", ok) if ok { + log.Infof("GetSchemas(cluster = %s) fetching schemas from schema cache", c.ID) return schemas, err + } else { + log.Infof("GetSchemas(cluster = %s) bypassing schema cache", c.ID) } } @@ -1543,6 +1612,51 @@ func (c *Cluster) GetSchemas(ctx context.Context, opts GetSchemaOptions) ([]*vta return schemas, nil } +// GetSchemaMigrations returns one or more schema migrations for a keyspace in +// this cluster. +func (c *Cluster) GetSchemaMigrations(ctx context.Context, req *vtctldatapb.GetSchemaMigrationsRequest) ([]*vtadminpb.SchemaMigration, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.GetSchemaMigrations") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + span.Annotate("migration_context", req.MigrationContext) + + if req.Status != vtctldatapb.SchemaMigration_UNKNOWN { + span.Annotate("status", schematools.SchemaMigrationStatusName(req.Status)) + } + + if d, ok, err := protoutil.DurationFromProto(req.Recent); ok && err == nil { + span.Annotate("recent", d.String()) + } + + switch req.Order { + case vtctldatapb.QueryOrdering_ASCENDING: + span.Annotate("order", "asc") + default: + span.Annotate("order", "desc") + } + + span.Annotate("skip", req.Skip) + span.Annotate("limit", req.Limit) + + resp, err := c.Vtctld.GetSchemaMigrations(ctx, req) + if err != nil { + return nil, err + } + + migrations := make([]*vtadminpb.SchemaMigration, len(resp.Migrations)) + for i, m := range resp.Migrations { + migrations[i] = &vtadminpb.SchemaMigration{ + Cluster: c.ToProto(), + SchemaMigration: m, + } + } + + return migrations, nil +} + // Note that for this function we use the tablets parameter, ignoring the // opts.Tablets value completely. func (c *Cluster) getSchemaFromTablets(ctx context.Context, keyspace string, tablets []*vtadminpb.Tablet, opts GetSchemaOptions) (*vtadminpb.Schema, error) { @@ -1684,7 +1798,7 @@ func (c *Cluster) getTabletsToQueryForSchemas(ctx context.Context, keyspace stri return nil, fmt.Errorf("%w for shard %s/%s", errors.ErrNoServingTablet, shard.Keyspace, shard.Name) } - randomServingTablet := shardTablets[rand.Intn(len(shardTablets))] + randomServingTablet := shardTablets[rand.IntN(len(shardTablets))] tabletsToQuery = append(tabletsToQuery, randomServingTablet) } @@ -1701,7 +1815,7 @@ func (c *Cluster) getTabletsToQueryForSchemas(ctx context.Context, keyspace stri return nil, err } - randomServingTablet := keyspaceTablets[rand.Intn(len(keyspaceTablets))] + randomServingTablet := keyspaceTablets[rand.IntN(len(keyspaceTablets))] return []*vtadminpb.Tablet{randomServingTablet}, nil } @@ -1956,6 +2070,19 @@ func (c *Cluster) GetWorkflows(ctx context.Context, keyspaces []string, opts Get }) } +// LaunchSchemaMigration starts a schema migration in the given keyspace in +// this cluster that was started with --postpone-launch. +func (c *Cluster) LaunchSchemaMigration(ctx context.Context, req *vtctldatapb.LaunchSchemaMigrationRequest) (*vtctldatapb.LaunchSchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.LaunchSchemaMigration") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + return c.Vtctld.LaunchSchemaMigration(ctx, req) +} + // PlannedFailoverShard fails over the shard either to a new primary or away // from an old primary. Both the current and candidate primaries must be // reachable and running. @@ -2297,6 +2424,19 @@ func (c *Cluster) reloadTabletSchemas(ctx context.Context, req *vtadminpb.Reload return results, nil } +// RetrySchemaMigration retries a schema migration in the given keyspace in +// this cluster. +func (c *Cluster) RetrySchemaMigration(ctx context.Context, req *vtctldatapb.RetrySchemaMigrationRequest) (*vtctldatapb.RetrySchemaMigrationResponse, error) { + span, ctx := trace.NewSpan(ctx, "Cluster.RetrySchemaMigration") + defer span.Finish() + + AnnotateSpan(c, span) + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + return c.Vtctld.RetrySchemaMigration(ctx, req) +} + // SetWritable toggles the writability of a tablet, setting it to either // read-write or read-only. func (c *Cluster) SetWritable(ctx context.Context, req *vtctldatapb.SetWritableRequest) error { diff --git a/go/vt/vtadmin/cluster/cluster_internal_test.go b/go/vt/vtadmin/cluster/cluster_internal_test.go index 696d7783d15..d39b81329ae 100644 --- a/go/vt/vtadmin/cluster/cluster_internal_test.go +++ b/go/vt/vtadmin/cluster/cluster_internal_test.go @@ -142,8 +142,6 @@ func TestDeleteTablets(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -331,8 +329,6 @@ func TestEmergencyFailoverShard(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -493,8 +489,6 @@ func Test_getShardSets(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -662,8 +656,6 @@ func TestPlannedFailoverShard(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -775,8 +767,6 @@ func TestRefreshState(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -899,8 +889,6 @@ func TestRefreshTabletReplicationSource(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1189,8 +1177,6 @@ func Test_reloadKeyspaceSchemas(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1516,8 +1502,6 @@ func Test_reloadShardSchemas(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1700,8 +1684,6 @@ func Test_reloadTabletSchemas(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1903,8 +1885,6 @@ func TestTabletExternallyPromoted(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/cluster_test.go b/go/vt/vtadmin/cluster/cluster_test.go index 53c3b4f71cd..4deba4ff05b 100644 --- a/go/vt/vtadmin/cluster/cluster_test.go +++ b/go/vt/vtadmin/cluster/cluster_test.go @@ -255,7 +255,6 @@ func TestCreateShard(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { defer tt.tc.Cluster.Close() _, err := tt.tc.Cluster.CreateShard(ctx, tt.req) @@ -595,8 +594,6 @@ func TestFindTablet(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -807,8 +804,6 @@ func TestFindTablets(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1195,8 +1190,6 @@ func TestFindWorkflows(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1474,7 +1467,6 @@ func TestGetCellInfos(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1557,7 +1549,6 @@ func TestGetCellsAliases(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1704,8 +1695,6 @@ func TestGetSchema(t *testing.T) { for i, tt := range tests { i := i - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -2683,8 +2672,6 @@ func TestGetSchema(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -2942,8 +2929,6 @@ func TestGetShardReplicationPositions(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3032,8 +3017,6 @@ func TestGetVSchema(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3191,8 +3174,6 @@ func TestGetWorkflow(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3358,8 +3339,6 @@ func TestGetWorkflows(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3436,8 +3415,6 @@ func TestSetWritable(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3583,8 +3560,6 @@ func TestToggleTabletReplication(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/config_test.go b/go/vt/vtadmin/cluster/config_test.go index a6246429585..252fa217bdc 100644 --- a/go/vt/vtadmin/cluster/config_test.go +++ b/go/vt/vtadmin/cluster/config_test.go @@ -131,8 +131,6 @@ func TestMergeConfig(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/discovery/discovery_consul.go b/go/vt/vtadmin/cluster/discovery/discovery_consul.go index 3f405c6aa40..76ee229edd5 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_consul.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_consul.go @@ -19,7 +19,7 @@ package discovery import ( "context" "fmt" - "math/rand" + "math/rand/v2" "strings" "text/template" "time" @@ -205,7 +205,7 @@ func (c *ConsulDiscovery) discoverVTGate(ctx context.Context, tags []string, exe return nil, ErrNoVTGates } - return vtgates[rand.Intn(len(vtgates))], nil + return vtgates[rand.IntN(len(vtgates))], nil } // DiscoverVTGateAddr is part of the Discovery interface. @@ -350,7 +350,7 @@ func (c *ConsulDiscovery) discoverVtctld(ctx context.Context, tags []string, exe return nil, ErrNoVtctlds } - return vtctlds[rand.Intn(len(vtctlds))], nil + return vtctlds[rand.IntN(len(vtctlds))], nil } // DiscoverVtctldAddr is part of the Discovery interface. diff --git a/go/vt/vtadmin/cluster/discovery/discovery_consul_test.go b/go/vt/vtadmin/cluster/discovery/discovery_consul_test.go index a298e5906c9..701251a7910 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_consul_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_consul_test.go @@ -233,8 +233,6 @@ func TestConsulDiscoverVTGates(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -344,8 +342,6 @@ func TestConsulDiscoverVTGate(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -442,8 +438,6 @@ func TestConsulDiscoverVTGateAddr(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/discovery/discovery_dynamic_test.go b/go/vt/vtadmin/cluster/discovery/discovery_dynamic_test.go index f2c2874fc6f..8e9c8c9a043 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_dynamic_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_dynamic_test.go @@ -93,8 +93,6 @@ func TestDynamicDiscoverVTGate(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() disco := &DynamicDiscovery{} @@ -237,8 +235,6 @@ func TestDynamicDiscoverVTGates(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -330,8 +326,6 @@ func TestDynamicDiscoverVtctld(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -475,8 +469,6 @@ func TestDynamicDiscoverVtctlds(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/discovery/discovery_json.go b/go/vt/vtadmin/cluster/discovery/discovery_json.go index 3433fb4aece..981937e3e3e 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_json.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_json.go @@ -20,7 +20,7 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + "math/rand/v2" "vitess.io/vitess/go/trace" @@ -130,7 +130,7 @@ func (d *JSONDiscovery) discoverVTGate(ctx context.Context, tags []string) (*vta return nil, ErrNoVTGates } - gate := gates[rand.Intn(len(gates))] + gate := gates[rand.IntN(len(gates))] return gate, nil } @@ -230,7 +230,7 @@ func (d *JSONDiscovery) discoverVtctld(ctx context.Context, tags []string) (*vta return nil, ErrNoVtctlds } - vtctld := vtctlds[rand.Intn(len(vtctlds))] + vtctld := vtctlds[rand.IntN(len(vtctlds))] return vtctld, nil } diff --git a/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go b/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go index 344ee32863d..46ae2aa2577 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_static_file_test.go @@ -93,8 +93,6 @@ func TestDiscoverVTGate(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -238,8 +236,6 @@ func TestDiscoverVTGates(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -331,8 +327,6 @@ func TestDiscoverVtctld(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -476,8 +470,6 @@ func TestDiscoverVtctlds(t *testing.T) { ctx := context.Background() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/discovery/discovery_test.go b/go/vt/vtadmin/cluster/discovery/discovery_test.go index 3ee67cbc9df..76616d6514c 100644 --- a/go/vt/vtadmin/cluster/discovery/discovery_test.go +++ b/go/vt/vtadmin/cluster/discovery/discovery_test.go @@ -50,8 +50,6 @@ func TestNew(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery.go b/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery.go index cf1f7ec7702..f711692d6fd 100644 --- a/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery.go +++ b/go/vt/vtadmin/cluster/discovery/fakediscovery/discovery.go @@ -19,7 +19,7 @@ package fakediscovery import ( "context" - "math/rand" + "math/rand/v2" "sync" "github.com/stretchr/testify/assert" @@ -189,7 +189,7 @@ func (d *Fake) DiscoverVTGate(ctx context.Context, tags []string) (*vtadminpb.VT return nil, assert.AnError } - return gates[rand.Intn(len(gates))], nil + return gates[rand.IntN(len(gates))], nil } // DiscoverVTGateAddr is part of the discovery.Discovery interface. @@ -299,5 +299,5 @@ func (d *Fake) DiscoverVtctld(ctx context.Context, tags []string) (*vtadminpb.Vt return nil, assert.AnError } - return vtctlds[rand.Intn(len(vtctlds))], nil + return vtctlds[rand.IntN(len(vtctlds))], nil } diff --git a/go/vt/vtadmin/cluster/dynamic/cluster_test.go b/go/vt/vtadmin/cluster/dynamic/cluster_test.go index 54b2b46d44b..4de01a48dda 100644 --- a/go/vt/vtadmin/cluster/dynamic/cluster_test.go +++ b/go/vt/vtadmin/cluster/dynamic/cluster_test.go @@ -53,7 +53,6 @@ func TestClusterFromString(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/file_config_test.go b/go/vt/vtadmin/cluster/file_config_test.go index e7fd2393cfb..2536a7aa5b9 100644 --- a/go/vt/vtadmin/cluster/file_config_test.go +++ b/go/vt/vtadmin/cluster/file_config_test.go @@ -215,7 +215,6 @@ name="devcluster"`, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -380,8 +379,6 @@ func TestCombine(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/flags_test.go b/go/vt/vtadmin/cluster/flags_test.go index 676c0243029..33a3cdf416b 100644 --- a/go/vt/vtadmin/cluster/flags_test.go +++ b/go/vt/vtadmin/cluster/flags_test.go @@ -98,8 +98,6 @@ func TestMergeFlagsByImpl(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/internal/caches/schemacache/cache_test.go b/go/vt/vtadmin/cluster/internal/caches/schemacache/cache_test.go index 1c7d76edeb0..2b22396d827 100644 --- a/go/vt/vtadmin/cluster/internal/caches/schemacache/cache_test.go +++ b/go/vt/vtadmin/cluster/internal/caches/schemacache/cache_test.go @@ -131,7 +131,6 @@ func TestLoadOptions(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/cluster/resolver/resolver_test.go b/go/vt/vtadmin/cluster/resolver/resolver_test.go index fd1dbab5f13..f1f13f8200c 100644 --- a/go/vt/vtadmin/cluster/resolver/resolver_test.go +++ b/go/vt/vtadmin/cluster/resolver/resolver_test.go @@ -291,7 +291,6 @@ func TestBuild(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/credentials/credentials_test.go b/go/vt/vtadmin/credentials/credentials_test.go index bc85c4884a2..e8cd2c0d197 100644 --- a/go/vt/vtadmin/credentials/credentials_test.go +++ b/go/vt/vtadmin/credentials/credentials_test.go @@ -56,8 +56,6 @@ func Test_loadCredentials(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/debug/debug_test.go b/go/vt/vtadmin/debug/debug_test.go index 4f87dbcc33c..dd1c42bdbd8 100644 --- a/go/vt/vtadmin/debug/debug_test.go +++ b/go/vt/vtadmin/debug/debug_test.go @@ -17,7 +17,7 @@ limitations under the License. package debug import ( - "math/rand" + "math/rand/v2" "testing" "time" @@ -44,10 +44,10 @@ func TestSanitizeString(t *testing.T) { t.Run("", func(t *testing.T) { t.Parallel() - length := rand.Intn(20) + 1 // [1, 21) + length := rand.IntN(20) + 1 // [1, 21) word := "" for j := 0; j < length; j++ { - k := rand.Intn(len(letters)) + k := rand.IntN(len(letters)) word += letters[k : k+1] } @@ -66,8 +66,8 @@ func TestTimeToString(t *testing.T) { t.Parallel() start := time.Now() - secondsoff := rand.Intn(60) - minutesoff := rand.Intn(60) + secondsoff := rand.IntN(60) + minutesoff := rand.IntN(60) in := start.Add(time.Second*time.Duration(secondsoff) + time.Minute*time.Duration(minutesoff)) out, err := time.Parse(time.RFC3339, TimeToString(in)) diff --git a/go/vt/vtadmin/http/request.go b/go/vt/vtadmin/http/request.go index a9cc7a16965..9d38b88ed91 100644 --- a/go/vt/vtadmin/http/request.go +++ b/go/vt/vtadmin/http/request.go @@ -77,6 +77,25 @@ func (r Request) ParseQueryParamAsUint32(name string, defaultVal uint32) (uint32 return defaultVal, nil } +// ParseQueryParamAsInt32 attempts to parse the query parameter of the given +// name into a uint32 value. If the parameter is not set, the provided default +// value is returned. +func (r Request) ParseQueryParamAsInt32(name string, defaultVal int32) (int32, error) { + if param := r.URL.Query().Get(name); param != "" { + val, err := strconv.ParseInt(param, 10, 32) + if err != nil { + return defaultVal, &errors.BadRequest{ + Err: err, + ErrDetails: fmt.Sprintf("could not parse query parameter %s (= %v) into int32 value", name, param), + } + } + + return int32(val), nil + } + + return defaultVal, nil +} + // Vars is a mapping of the route variable values in a given request. // // See (gorilla/mux).Vars for details. We define a type here to add some diff --git a/go/vt/vtadmin/http/request_test.go b/go/vt/vtadmin/http/request_test.go index ba235a4877f..0a7e602c163 100644 --- a/go/vt/vtadmin/http/request_test.go +++ b/go/vt/vtadmin/http/request_test.go @@ -71,8 +71,6 @@ func TestParseQueryParamAsBool(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/http/schema_migrations.go b/go/vt/vtadmin/http/schema_migrations.go new file mode 100644 index 00000000000..e0207989648 --- /dev/null +++ b/go/vt/vtadmin/http/schema_migrations.go @@ -0,0 +1,144 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package http + +import ( + "context" + "encoding/json" + "io" + + "github.com/gorilla/mux" + + "vitess.io/vitess/go/vt/vtadmin/errors" + + vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +// ApplySchema implements the http wrapper for POST /migration/{cluster_id}/{keyspace}/. +func ApplySchema(ctx context.Context, r Request, api *API) *JSONResponse { + decoder := json.NewDecoder(r.Body) + defer r.Body.Close() + + var req vtctldatapb.ApplySchemaRequest + if err := decoder.Decode(&req); err != nil { + return NewJSONResponse(nil, &errors.BadRequest{ + Err: err, + }) + } + + vars := mux.Vars(r.Request) + req.Keyspace = vars["keyspace"] + + resp, err := api.server.ApplySchema(ctx, &vtadminpb.ApplySchemaRequest{ + ClusterId: vars["cluster_id"], + Request: &req, + }) + + return NewJSONResponse(resp, err) +} + +// CancelSchemaMigration implements the http wrapper for /migration/{cluster_id}/{keyspace}/cancel[?uuid]. +func CancelSchemaMigration(ctx context.Context, r Request, api *API) *JSONResponse { + vars := mux.Vars(r.Request) + + resp, err := api.server.CancelSchemaMigration(ctx, &vtadminpb.CancelSchemaMigrationRequest{ + ClusterId: vars["cluster_id"], + Request: &vtctldatapb.CancelSchemaMigrationRequest{ + Keyspace: vars["keyspace"], + Uuid: r.URL.Query().Get("uuid"), + }, + }) + + return NewJSONResponse(resp, err) +} + +// CleanupSchemaMigration implements the http wrapper for /migration/{cluster_id}/{keyspace}/cleanup[?uuid]. +func CleanupSchemaMigration(ctx context.Context, r Request, api *API) *JSONResponse { + vars := mux.Vars(r.Request) + + resp, err := api.server.CleanupSchemaMigration(ctx, &vtadminpb.CleanupSchemaMigrationRequest{ + ClusterId: vars["cluster_id"], + Request: &vtctldatapb.CleanupSchemaMigrationRequest{ + Keyspace: vars["keyspace"], + Uuid: r.URL.Query().Get("uuid"), + }, + }) + + return NewJSONResponse(resp, err) +} + +// CompleteSchemaMigration implements the http wrapper for /migration/{cluster_id}/{keyspace}/complete[?uuid]. +func CompleteSchemaMigration(ctx context.Context, r Request, api *API) *JSONResponse { + vars := mux.Vars(r.Request) + + resp, err := api.server.CompleteSchemaMigration(ctx, &vtadminpb.CompleteSchemaMigrationRequest{ + ClusterId: vars["cluster_id"], + Request: &vtctldatapb.CompleteSchemaMigrationRequest{ + Keyspace: vars["keyspace"], + Uuid: r.URL.Query().Get("uuid"), + }, + }) + + return NewJSONResponse(resp, err) +} + +// GetSchemaMigrations implements the http wrapper for /migrations/. +func GetSchemaMigrations(ctx context.Context, r Request, api *API) *JSONResponse { + decoder := json.NewDecoder(r.Body) + defer r.Body.Close() + + var req vtadminpb.GetSchemaMigrationsRequest + if err := decoder.Decode(&req); err != nil && err != io.EOF { + return NewJSONResponse(nil, &errors.BadRequest{ + Err: err, + }) + } + + resp, err := api.server.GetSchemaMigrations(ctx, &req) + return NewJSONResponse(resp, err) +} + +// LaunchSchemaMigration implements the http wrapper for /migration/{cluster_id}/{keyspace}/launch[?uuid]. +func LaunchSchemaMigration(ctx context.Context, r Request, api *API) *JSONResponse { + vars := mux.Vars(r.Request) + + resp, err := api.server.LaunchSchemaMigration(ctx, &vtadminpb.LaunchSchemaMigrationRequest{ + ClusterId: vars["cluster_id"], + Request: &vtctldatapb.LaunchSchemaMigrationRequest{ + Keyspace: vars["keyspace"], + Uuid: r.URL.Query().Get("uuid"), + }, + }) + + return NewJSONResponse(resp, err) +} + +// RetrySchemaMigration implements the http wrapper for /migration/{cluster_id}/{keyspace}/retry[?uuid]. +func RetrySchemaMigration(ctx context.Context, r Request, api *API) *JSONResponse { + vars := mux.Vars(r.Request) + + resp, err := api.server.RetrySchemaMigration(ctx, &vtadminpb.RetrySchemaMigrationRequest{ + ClusterId: vars["cluster_id"], + Request: &vtctldatapb.RetrySchemaMigrationRequest{ + Keyspace: vars["keyspace"], + Uuid: r.URL.Query().Get("uuid"), + }, + }) + + return NewJSONResponse(resp, err) +} diff --git a/go/vt/vtadmin/http/schemas.go b/go/vt/vtadmin/http/schemas.go index 4b157720cb7..c97ff45ac5c 100644 --- a/go/vt/vtadmin/http/schemas.go +++ b/go/vt/vtadmin/http/schemas.go @@ -101,7 +101,7 @@ func getTableSizeOpts(r Request) (*vtadminpb.GetSchemaTableSizeOptions, error) { // ReloadSchemas implements the http wrapper for /schemas/reload func ReloadSchemas(ctx context.Context, r Request, api *API) *JSONResponse { - concurrency, err := r.ParseQueryParamAsUint32("concurrency", 0) + concurrency, err := r.ParseQueryParamAsInt32("concurrency", 0) if err != nil { return NewJSONResponse(nil, err) } diff --git a/go/vt/vtadmin/http/shards.go b/go/vt/vtadmin/http/shards.go index 56d22742be6..776944d0a16 100644 --- a/go/vt/vtadmin/http/shards.go +++ b/go/vt/vtadmin/http/shards.go @@ -178,7 +178,7 @@ func ReloadSchemaShard(ctx context.Context, r Request, api *API) *JSONResponse { var params struct { WaitPosition string `json:"wait_position"` IncludePrimary bool `json:"include_primary"` - Concurrency uint32 `json:"concurrency"` + Concurrency int32 `json:"concurrency"` } if err := decoder.Decode(¶ms); err != nil { diff --git a/go/vt/vtadmin/http/tablets.go b/go/vt/vtadmin/http/tablets.go index 5e819ce945b..d6c8ba788b5 100644 --- a/go/vt/vtadmin/http/tablets.go +++ b/go/vt/vtadmin/http/tablets.go @@ -212,7 +212,7 @@ func StartReplication(ctx context.Context, r Request, api *API) *JSONResponse { return NewJSONResponse(result, err) } -// StartReplication stops replication on the specified tablet. +// StopReplication stops replication on the specified tablet. func StopReplication(ctx context.Context, r Request, api *API) *JSONResponse { vars := r.Vars() diff --git a/go/vt/vtadmin/internal/backoff/backoff.go b/go/vt/vtadmin/internal/backoff/backoff.go index 025991d28d6..c3c4ec4ae86 100644 --- a/go/vt/vtadmin/internal/backoff/backoff.go +++ b/go/vt/vtadmin/internal/backoff/backoff.go @@ -27,13 +27,13 @@ package backoff import ( "fmt" + "math/rand/v2" "strings" "time" grpcbackoff "google.golang.org/grpc/backoff" "vitess.io/vitess/go/vt/log" - vtrand "vitess.io/vitess/go/vt/vtadmin/internal/rand" ) // Strategy defines the interface for different backoff strategies. @@ -83,7 +83,7 @@ func backoffCommon(retries int, cfg grpcbackoff.Config, adjust func(cur float64) } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + cfg.Jitter*(vtrand.Float64()*2-1) + backoff *= 1 + cfg.Jitter*(rand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/go/vt/vtadmin/internal/backoff/backoff_test.go b/go/vt/vtadmin/internal/backoff/backoff_test.go index 687ab936cd7..daa2f8078ec 100644 --- a/go/vt/vtadmin/internal/backoff/backoff_test.go +++ b/go/vt/vtadmin/internal/backoff/backoff_test.go @@ -51,7 +51,6 @@ func TestExponentialBackoff(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/internal/rand/rand.go b/go/vt/vtadmin/internal/rand/rand.go deleted file mode 100644 index aa48f10da7e..00000000000 --- a/go/vt/vtadmin/internal/rand/rand.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package rand provides functions mirroring math/rand that are safe for -// concurrent use, seeded independently of math/rand's global source. -package rand - -/* -- TODO: (ajm188) implement the rest of the global functions vtadmin uses. -- TODO: (ajm188) consider moving to go/internal at the top-level for use in - more places. -*/ - -import ( - "math/rand" - "sync" - "time" -) - -var ( - r = rand.New(rand.NewSource(time.Now().UnixNano())) - m sync.Mutex -) - -// Float64 implements rand.Float64 on the vtrand global source. -func Float64() float64 { - m.Lock() - defer m.Unlock() - - return r.Float64() -} diff --git a/go/vt/vtadmin/rbac/authentication.go b/go/vt/vtadmin/rbac/authentication.go index 374f95b636d..9e5e3aebb96 100644 --- a/go/vt/vtadmin/rbac/authentication.go +++ b/go/vt/vtadmin/rbac/authentication.go @@ -99,7 +99,13 @@ type actorkey struct{} // NewContext returns a context with the given actor stored in it. This is used // to pass actor information from the authentication middleware and interceptors // to the actual vtadmin api methods. +// +// If actor is nil, the context is returned with no actor attached. func NewContext(ctx context.Context, actor *Actor) context.Context { + if actor == nil { + return ctx + } + return context.WithValue(ctx, actorkey{}, actor) } diff --git a/go/vt/vtadmin/rbac/authorizer_test.go b/go/vt/vtadmin/rbac/authorizer_test.go index c61f4c7fc59..62abd76cc99 100644 --- a/go/vt/vtadmin/rbac/authorizer_test.go +++ b/go/vt/vtadmin/rbac/authorizer_test.go @@ -104,8 +104,6 @@ func TestIsAuthorized(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtadmin/rbac/rbac.go b/go/vt/vtadmin/rbac/rbac.go index 12d23b3ac20..3f15a0d22d2 100644 --- a/go/vt/vtadmin/rbac/rbac.go +++ b/go/vt/vtadmin/rbac/rbac.go @@ -65,12 +65,22 @@ type Action string const ( /* generic actions */ + CancelAction Action = "cancel" CreateAction Action = "create" DeleteAction Action = "delete" GetAction Action = "get" PingAction Action = "ping" PutAction Action = "put" ReloadAction Action = "reload" + RetryAction Action = "retry" + + // cancel, complete, cleanup, launch, retry + + /* schema-migration-specific actions */ + + CleanupSchemaMigrationAction Action = "cleanup_schema_migration" + CompleteSchemaMigrationAction Action = "complete_schema_migration" + LaunchSchemaMigrationAction Action = "launch_schema_migration" /* shard-specific actions */ @@ -112,10 +122,14 @@ const ( SrvKeyspaceResource Resource = "SrvKeyspace" SrvVSchemaResource Resource = "SrvVSchema" + /* schema resources */ + + SchemaResource Resource = "Schema" + SchemaMigrationResource Resource = "SchemaMigration" + /* misc resources */ BackupResource Resource = "Backup" - SchemaResource Resource = "Schema" ShardReplicationPositionResource Resource = "ShardReplicationPosition" WorkflowResource Resource = "Workflow" diff --git a/go/vt/vtadmin/testutil/authztestgen/config.json b/go/vt/vtadmin/testutil/authztestgen/config.json index 01b0da66465..eb229625d3b 100644 --- a/go/vt/vtadmin/testutil/authztestgen/config.json +++ b/go/vt/vtadmin/testutil/authztestgen/config.json @@ -5,6 +5,26 @@ "id": "test", "name": "test", "vtctldclient_mock_data": [ + { + "field": "ApplySchemaResults", + "type": "map[string]struct{\nResponse *vtctldatapb.ApplySchemaResponse\nError error}", + "value": "\"test\": {\nResponse: &vtctldatapb.ApplySchemaResponse{},\n}," + }, + { + "field": "CancelSchemaMigrationResults", + "type": "map[string]struct{\nResponse *vtctldatapb.CancelSchemaMigrationResponse\nError error}", + "value": "\"test\": {\nResponse: &vtctldatapb.CancelSchemaMigrationResponse{},\n}," + }, + { + "field": "CleanupSchemaMigrationResults", + "type": "map[string]struct{\nResponse *vtctldatapb.CleanupSchemaMigrationResponse\nError error}", + "value": "\"test\": {\nResponse: &vtctldatapb.CleanupSchemaMigrationResponse{},\n}," + }, + { + "field": "CompleteSchemaMigrationResults", + "type": "map[string]struct{\nResponse *vtctldatapb.CompleteSchemaMigrationResponse\nError error}", + "value": "\"test\": {\nResponse: &vtctldatapb.CompleteSchemaMigrationResponse{},\n}," + }, { "field": "DeleteShardsResults", "type": "map[string]error", @@ -50,6 +70,11 @@ "type": "&struct{\nKeyspaces []*vtctldatapb.Keyspace\nError error}", "value": "Keyspaces: []*vtctldatapb.Keyspace{\n{\nName: \"test\",\nKeyspace: &topodatapb.Keyspace{},\n},\n}," }, + { + "field": "GetSchemaMigrationsResults", + "type": "map[string]struct{\nResponse *vtctldatapb.GetSchemaMigrationsResponse\nError error}", + "value": "\"test\": {\nResponse: &vtctldatapb.GetSchemaMigrationsResponse{\nMigrations: []*vtctldatapb.SchemaMigration{\n{},\n},\n},\n}," + }, { "field": "GetSchemaResults", "type": "map[string]struct{\nResponse *vtctldatapb.GetSchemaResponse\nError error}", @@ -71,6 +96,11 @@ "type": "map[string]struct{\nResponse *vtctldatapb.GetWorkflowsResponse\nError error}", "value": "\"test\": {\nResponse: &vtctldatapb.GetWorkflowsResponse{\nWorkflows: []*vtctldatapb.Workflow{\n{\nName: \"testworkflow\",\n},\n},\n}}," }, + { + "field": "LaunchSchemaMigrationResults", + "type": "map[string]struct{\nResponse *vtctldatapb.LaunchSchemaMigrationResponse\nError error}", + "value": "\"test\": {\nResponse: &vtctldatapb.LaunchSchemaMigrationResponse{},\n}," + }, { "field": "PingTabletResults", "type": "map[string]error", @@ -96,6 +126,11 @@ "type": "map[string]struct{\nResponse *vtctldatapb.ReparentTabletResponse\nError error\n}", "value": "\"zone1-0000000100\": {\nResponse: &vtctldatapb.ReparentTabletResponse{},\n}," }, + { + "field": "RetrySchemaMigrationResults", + "type": "map[string]struct{\nResponse *vtctldatapb.RetrySchemaMigrationResponse\nError error}", + "value": "\"test\": {\nResponse: &vtctldatapb.RetrySchemaMigrationResponse{},\n}," + }, { "field": "RunHealthCheckResults", "type": "map[string]error", @@ -182,6 +217,11 @@ "type": "&struct{\nKeyspaces []*vtctldatapb.Keyspace\nError error}", "value": "Keyspaces: []*vtctldatapb.Keyspace{\n{\nName: \"otherks\",\nKeyspace: &topodatapb.Keyspace{},\n},\n}," }, + { + "field": "GetSchemaMigrationsResults", + "type": "map[string]struct{\nResponse *vtctldatapb.GetSchemaMigrationsResponse\nError error}", + "value": "\"otherks\": {\nResponse: &vtctldatapb.GetSchemaMigrationsResponse{\nMigrations: []*vtctldatapb.SchemaMigration{\n{}, {}, {},\n},\n},\n}," + }, { "field": "GetSchemaResults", "type": "map[string]struct{\nResponse *vtctldatapb.GetSchemaResponse\nError error}", @@ -225,6 +265,138 @@ } ], "tests": [ + { + "method": "ApplySchema", + "rules": [ + { + "resource": "SchemaMigration", + "actions": ["create"], + "subjects": ["user:allowed"], + "clusters": ["*"] + } + ], + "request": "&vtadminpb.ApplySchemaRequest{\nClusterId: \"test\",\nRequest: &vtctldatapb.ApplySchemaRequest{\nKeyspace: \"test\",\n},\n}", + "cases": [ + { + "name": "unauthorized actor", + "actor": {"name": "other"}, + "include_error_var": true, + "assertions": [ + "assert.Error(t, err, $$)", + "assert.Nil(t, resp, $$)" + ] + }, + { + "name": "authorized actor", + "actor": {"name": "allowed"}, + "include_error_var": true, + "is_permitted": true, + "assertions": [ + "require.NoError(t, err)", + "assert.NotNil(t, resp, $$)" + ] + } + ] + }, + { + "method": "CancelSchemaMigration", + "rules": [ + { + "resource": "SchemaMigration", + "actions": ["cancel"], + "subjects": ["user:allowed"], + "clusters": ["*"] + } + ], + "request": "&vtadminpb.CancelSchemaMigrationRequest{\nClusterId: \"test\",\nRequest: &vtctldatapb.CancelSchemaMigrationRequest{\nKeyspace: \"test\",\n},\n}", + "cases": [ + { + "name": "unauthorized actor", + "actor": {"name": "other"}, + "include_error_var": true, + "assertions": [ + "assert.Error(t, err, $$)", + "assert.Nil(t, resp, $$)" + ] + }, + { + "name": "authorized actor", + "actor": {"name": "allowed"}, + "include_error_var": true, + "is_permitted": true, + "assertions": [ + "require.NoError(t, err)", + "assert.NotNil(t, resp, $$)" + ] + } + ] + }, + { + "method": "CleanupSchemaMigration", + "rules": [ + { + "resource": "SchemaMigration", + "actions": ["cleanup_schema_migration"], + "subjects": ["user:allowed"], + "clusters": ["*"] + } + ], + "request": "&vtadminpb.CleanupSchemaMigrationRequest{\nClusterId: \"test\",\nRequest: &vtctldatapb.CleanupSchemaMigrationRequest{\nKeyspace: \"test\",\n},\n}", + "cases": [ + { + "name": "unauthorized actor", + "actor": {"name": "other"}, + "include_error_var": true, + "assertions": [ + "assert.Error(t, err, $$)", + "assert.Nil(t, resp, $$)" + ] + }, + { + "name": "authorized actor", + "actor": {"name": "allowed"}, + "include_error_var": true, + "is_permitted": true, + "assertions": [ + "require.NoError(t, err)", + "assert.NotNil(t, resp, $$)" + ] + } + ] + }, + { + "method": "CompleteSchemaMigration", + "rules": [ + { + "resource": "SchemaMigration", + "actions": ["complete_schema_migration"], + "subjects": ["user:allowed"], + "clusters": ["*"] + } + ], + "request": "&vtadminpb.CompleteSchemaMigrationRequest{\nClusterId: \"test\",\nRequest: &vtctldatapb.CompleteSchemaMigrationRequest{\nKeyspace: \"test\",\n},\n}", + "cases": [ + { + "name": "unauthorized actor", + "actor": {"name": "other"}, + "include_error_var": true, + "assertions": [ + "assert.Error(t, err, $$)", + "assert.Nil(t, resp, $$)" + ] + }, + { + "name": "authorized actor", + "actor": {"name": "allowed"}, + "include_error_var": true, + "is_permitted": true, + "assertions": [ + "require.NoError(t, err)", + "assert.NotNil(t, resp, $$)" + ] + } + ] + }, { "method": "CreateKeyspace", "rules": [ @@ -798,6 +970,54 @@ } ] }, + { + "method": "GetSchemaMigrations", + "rules": [ + { + "resource": "SchemaMigration", + "actions": ["get"], + "subjects": ["user:allowed-all"], + "clusters": ["*"] + }, + { + "resource": "SchemaMigration", + "actions": ["get"], + "subjects": ["user:allowed-other"], + "clusters": ["other"] + } + ], + "request": "&vtadminpb.GetSchemaMigrationsRequest{}", + "cases": [ + { + "name": "unauthorized actor", + "actor": {"name": "unauthorized"}, + "is_permitted": false, + "include_error_var": true, + "assertions": [ + "assert.NoError(t, err)", + "assert.Empty(t, resp.SchemaMigrations, $$)" + ] + }, + { + "name": "partial access", + "actor": {"name": "allowed-other"}, + "is_permitted": true, + "assertions": [ + "assert.NotEmpty(t, resp.SchemaMigrations, $$)", + "assert.Len(t, resp.SchemaMigrations, 3, \"'other' actor should be able to see the 3 migrations in cluster 'other'\")" + ] + }, + { + "name": "full access", + "actor": {"name": "allowed-all"}, + "is_permitted": true, + "assertions": [ + "assert.NotEmpty(t, resp.SchemaMigrations, $$)", + "assert.Len(t, resp.SchemaMigrations, 4, \"'all' actor should be able to see migrations in all clusters\")" + ] + } + ] + }, { "method": "GetSchema", "rules": [ @@ -1350,6 +1570,39 @@ } ] }, + { + "method": "LaunchSchemaMigration", + "rules": [ + { + "resource": "SchemaMigration", + "actions": ["launch_schema_migration"], + "subjects": ["user:allowed"], + "clusters": ["*"] + } + ], + "request": "&vtadminpb.LaunchSchemaMigrationRequest{\nClusterId: \"test\",\nRequest: &vtctldatapb.LaunchSchemaMigrationRequest{\nKeyspace: \"test\",\n},\n}", + "cases": [ + { + "name": "unauthorized actor", + "actor": {"name": "other"}, + "include_error_var": true, + "assertions": [ + "assert.Error(t, err, $$)", + "assert.Nil(t, resp, $$)" + ] + }, + { + "name": "authorized actor", + "actor": {"name": "allowed"}, + "include_error_var": true, + "is_permitted": true, + "assertions": [ + "require.NoError(t, err)", + "assert.NotNil(t, resp, $$)" + ] + } + ] + }, { "method": "PingTablet", "rules": [ @@ -1527,6 +1780,39 @@ } ] }, + { + "method": "RetrySchemaMigration", + "rules": [ + { + "resource": "SchemaMigration", + "actions": ["retry"], + "subjects": ["user:allowed"], + "clusters": ["*"] + } + ], + "request": "&vtadminpb.RetrySchemaMigrationRequest{\nClusterId: \"test\",\nRequest: &vtctldatapb.RetrySchemaMigrationRequest{\nKeyspace: \"test\",\n},\n}", + "cases": [ + { + "name": "unauthorized actor", + "actor": {"name": "other"}, + "include_error_var": true, + "assertions": [ + "assert.Error(t, err, $$)", + "assert.Nil(t, resp, $$)" + ] + }, + { + "name": "authorized actor", + "actor": {"name": "allowed"}, + "include_error_var": true, + "is_permitted": true, + "assertions": [ + "require.NoError(t, err)", + "assert.NotNil(t, resp, $$)" + ] + } + ] + }, { "method": "RunHealthCheck", "rules": [ diff --git a/go/vt/vtadmin/testutil/authztestgen/template.go b/go/vt/vtadmin/testutil/authztestgen/template.go index 518d710fb3f..1c96d7aeede 100644 --- a/go/vt/vtadmin/testutil/authztestgen/template.go +++ b/go/vt/vtadmin/testutil/authztestgen/template.go @@ -51,6 +51,7 @@ import ( "vitess.io/vitess/go/vt/vtadmin/rbac" "vitess.io/vitess/go/vt/vtadmin/testutil" "vitess.io/vitess/go/vt/vtadmin/vtctldclient/fakevtctldclient" + "vitess.io/vitess/go/vt/vtenv" logutilpb "vitess.io/vitess/go/vt/proto/logutil" mysqlctlpb "vitess.io/vitess/go/vt/proto/mysqlctl" @@ -88,7 +89,7 @@ func Test{{ .Method }}(t *testing.T) { require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) {{ if not .SerializeCases }} - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -101,7 +102,7 @@ func Test{{ .Method }}(t *testing.T) { t.Run("{{ .Name }}", func(t *testing.T) { t.Parallel() {{ if $test.SerializeCases }} - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(vtenv.NewTestEnv(), testClusters(t), opts) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -111,9 +112,8 @@ func Test{{ .Method }}(t *testing.T) { {{ getActor .Actor }} ctx := context.Background() - if actor != nil { - ctx = rbac.NewContext(ctx, actor) - } + ctx = rbac.NewContext(ctx, actor) + {{ if .IncludeErrorVar }} resp, err := api.{{ $test.Method }}(ctx, {{ $test.Request }}) {{ else }} @@ -165,7 +165,7 @@ func testClusters(t testing.TB) []*cluster.Cluster { Config: &cluster.Config{ TopoReadPoolConfig: &cluster.RPCPoolConfig{ Size: 100, - WaitTimeout: time.Millisecond * 50, + WaitTimeout: time.Millisecond * 500, }, }, }, diff --git a/go/vt/vtadmin/testutil/cluster.go b/go/vt/vtadmin/testutil/cluster.go index 9141d6b0c22..bd238b388a8 100644 --- a/go/vt/vtadmin/testutil/cluster.go +++ b/go/vt/vtadmin/testutil/cluster.go @@ -41,6 +41,7 @@ import ( grpcvtctldtestutil "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" "vitess.io/vitess/go/vt/vtctl/vtctldclient" + "vitess.io/vitess/go/vt/vtenv" vtadminpb "vitess.io/vitess/go/vt/proto/vtadmin" vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" @@ -119,7 +120,7 @@ func BuildCluster(t testing.TB, cfg TestClusterConfig) *cluster.Cluster { clusterConf.Name = cfg.Cluster.Name clusterConf.DiscoveryImpl = discoveryTestImplName - clusterConf = clusterConf.WithVtctldTestConfigOptions(vtadminvtctldclient.WithDialFunc(func(addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) { + clusterConf = clusterConf.WithVtctldTestConfigOptions(vtadminvtctldclient.WithDialFunc(func(ctx context.Context, addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) { return cfg.VtctldClient, nil })).WithVtSQLTestConfigOptions(vtsql.WithDialFunc(func(c vitessdriver.Configuration) (*sql.DB, error) { return sql.OpenDB(&fakevtsql.Connector{Tablets: tablets, ShouldErr: cfg.DBConfig.ShouldErr}), nil @@ -169,7 +170,7 @@ func BuildIntegrationTestCluster(t testing.TB, ctx context.Context, c *vtadminpb ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := grpcvtctldtestutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) localclient := localvtctldclient.New(vtctld) diff --git a/go/vt/vtadmin/vtctldclient/config.go b/go/vt/vtadmin/vtctldclient/config.go index 53b6fd83a5c..4a11001c3e6 100644 --- a/go/vt/vtadmin/vtctldclient/config.go +++ b/go/vt/vtadmin/vtctldclient/config.go @@ -17,6 +17,7 @@ limitations under the License. package vtctldclient import ( + "context" "fmt" "github.com/spf13/pflag" @@ -40,7 +41,7 @@ type Config struct { ResolverOptions *resolver.Options - dialFunc func(addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) + dialFunc func(ctx context.Context, addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) } // ConfigOption is a function that mutates a Config. It should return the same @@ -52,7 +53,7 @@ type ConfigOption func(cfg *Config) *Config // // It is used to support dependency injection in tests, and needs to be exported // for higher-level tests (via vtadmin/testutil). -func WithDialFunc(f func(addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error)) ConfigOption { +func WithDialFunc(f func(ctx context.Context, addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error)) ConfigOption { return func(cfg *Config) *Config { cfg.dialFunc = f return cfg diff --git a/go/vt/vtadmin/vtctldclient/fakevtctldclient/vtctldclient.go b/go/vt/vtadmin/vtctldclient/fakevtctldclient/vtctldclient.go index e2701a1f594..428d38fc8e0 100644 --- a/go/vt/vtadmin/vtctldclient/fakevtctldclient/vtctldclient.go +++ b/go/vt/vtadmin/vtctldclient/fakevtctldclient/vtctldclient.go @@ -38,6 +38,22 @@ import ( type VtctldClient struct { vtctldclient.VtctldClient + ApplySchemaResults map[string]struct { + Response *vtctldatapb.ApplySchemaResponse + Error error + } + CancelSchemaMigrationResults map[string]struct { + Response *vtctldatapb.CancelSchemaMigrationResponse + Error error + } + CleanupSchemaMigrationResults map[string]struct { + Response *vtctldatapb.CleanupSchemaMigrationResponse + Error error + } + CompleteSchemaMigrationResults map[string]struct { + Response *vtctldatapb.CompleteSchemaMigrationResponse + Error error + } CreateKeyspaceShouldErr bool CreateShardShouldErr bool DeleteKeyspaceShouldErr bool @@ -77,6 +93,10 @@ type VtctldClient struct { Keyspaces []*vtctldatapb.Keyspace Error error } + GetSchemaMigrationsResults map[string]struct { + Response *vtctldatapb.GetSchemaMigrationsResponse + Error error + } GetSchemaResults map[string]struct { Response *vtctldatapb.GetSchemaResponse Error error @@ -93,6 +113,10 @@ type VtctldClient struct { Response *vtctldatapb.GetWorkflowsResponse Error error } + LaunchSchemaMigrationResults map[string]struct { + Response *vtctldatapb.LaunchSchemaMigrationResponse + Error error + } PingTabletResults map[string]error PlannedReparentShardResults map[string]struct { Response *vtctldatapb.PlannedReparentShardResponse @@ -115,6 +139,10 @@ type VtctldClient struct { Response *vtctldatapb.ReparentTabletResponse Error error } + RetrySchemaMigrationResults map[string]struct { + Response *vtctldatapb.RetrySchemaMigrationResponse + Error error + } RunHealthCheckResults map[string]error SetWritableResults map[string]error ShardReplicationPositionsResults map[string]struct { @@ -152,6 +180,66 @@ var _ vtctldclient.VtctldClient = (*VtctldClient)(nil) // Close is part of the vtctldclient.VtctldClient interface. func (fake *VtctldClient) Close() error { return nil } +// ApplySchema is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplySchemaResponse, error) { + if fake.ApplySchemaResults == nil { + return nil, fmt.Errorf("%w: ApplySchemaResults not set on fake vtctldclient", assert.AnError) + } + + key := req.Keyspace + + if resp, ok := fake.ApplySchemaResults[key]; ok { + return resp.Response, resp.Error + } + + return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) +} + +// CancelSchemaMigration is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) CancelSchemaMigration(ctx context.Context, req *vtctldatapb.CancelSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CancelSchemaMigrationResponse, error) { + if fake.CancelSchemaMigrationResults == nil { + return nil, fmt.Errorf("%w: CancelSchemaMigrationResults not set on fake vtctldclient", assert.AnError) + } + + key := req.Keyspace + + if resp, ok := fake.CancelSchemaMigrationResults[key]; ok { + return resp.Response, resp.Error + } + + return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) +} + +// CleanupSchemaMigration is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) CleanupSchemaMigration(ctx context.Context, req *vtctldatapb.CleanupSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CleanupSchemaMigrationResponse, error) { + if fake.CleanupSchemaMigrationResults == nil { + return nil, fmt.Errorf("%w: CleanupSchemaMigrationResults not set on fake vtctldclient", assert.AnError) + } + + key := req.Keyspace + + if resp, ok := fake.CleanupSchemaMigrationResults[key]; ok { + return resp.Response, resp.Error + } + + return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) +} + +// CompleteSchemaMigration is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) CompleteSchemaMigration(ctx context.Context, req *vtctldatapb.CompleteSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.CompleteSchemaMigrationResponse, error) { + if fake.CompleteSchemaMigrationResults == nil { + return nil, fmt.Errorf("%w: CompleteSchemaMigrationResults not set on fake vtctldclient", assert.AnError) + } + + key := req.Keyspace + + if resp, ok := fake.CompleteSchemaMigrationResults[key]; ok { + return resp.Response, resp.Error + } + + return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) +} + // CreateKeyspace is part of the vtctldclient.VtctldClient interface. func (fake *VtctldClient) CreateKeyspace(ctx context.Context, req *vtctldatapb.CreateKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.CreateKeyspaceResponse, error) { if fake.CreateKeyspaceShouldErr { @@ -159,7 +247,6 @@ func (fake *VtctldClient) CreateKeyspace(ctx context.Context, req *vtctldatapb.C } ks := &topodatapb.Keyspace{ - ServedFroms: req.ServedFroms, KeyspaceType: req.Type, BaseKeyspace: req.BaseKeyspace, SnapshotTime: req.SnapshotTime, @@ -346,6 +433,20 @@ func (fake *VtctldClient) GetKeyspaces(ctx context.Context, req *vtctldatapb.Get }, nil } +// GetSchemaMigrations is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) GetSchemaMigrations(ctx context.Context, req *vtctldatapb.GetSchemaMigrationsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaMigrationsResponse, error) { + if fake.GetSchemaMigrationsResults == nil { + return nil, fmt.Errorf("%w: GetSchemaMigrationsResults not set on fake vtctldclient", assert.AnError) + } + + key := req.Keyspace + if result, ok := fake.GetSchemaMigrationsResults[key]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) +} + // GetSchema is part of the vtctldclient.VtctldClient interface. func (fake *VtctldClient) GetSchema(ctx context.Context, req *vtctldatapb.GetSchemaRequest, opts ...grpc.CallOption) (*vtctldatapb.GetSchemaResponse, error) { if fake.GetSchemaResults == nil { @@ -429,6 +530,21 @@ func (fake *VtctldClient) GetWorkflows(ctx context.Context, req *vtctldatapb.Get return nil, fmt.Errorf("%w: no result set for keyspace %s", assert.AnError, req.Keyspace) } +// LaunchSchemaMigration is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) LaunchSchemaMigration(ctx context.Context, req *vtctldatapb.LaunchSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.LaunchSchemaMigrationResponse, error) { + if fake.LaunchSchemaMigrationResults == nil { + return nil, fmt.Errorf("%w: LaunchSchemaMigrationResults not set on fake vtctldclient", assert.AnError) + } + + key := req.Keyspace + + if resp, ok := fake.LaunchSchemaMigrationResults[key]; ok { + return resp.Response, resp.Error + } + + return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) +} + // PingTablet is part of the vtctldclient.VtctldClient interface. func (fake *VtctldClient) PingTablet(ctx context.Context, req *vtctldatapb.PingTabletRequest, opts ...grpc.CallOption) (*vtctldatapb.PingTabletResponse, error) { if fake.PingTabletResults == nil { @@ -534,6 +650,21 @@ func (fake *VtctldClient) ReparentTablet(ctx context.Context, req *vtctldatapb.R return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) } +// RetrySchemaMigration is part of the vtctldclient.VtctldClient interface. +func (fake *VtctldClient) RetrySchemaMigration(ctx context.Context, req *vtctldatapb.RetrySchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.RetrySchemaMigrationResponse, error) { + if fake.RetrySchemaMigrationResults == nil { + return nil, fmt.Errorf("%w: RetrySchemaMigrationResults not set on fake vtctldclient", assert.AnError) + } + + key := req.Keyspace + + if resp, ok := fake.RetrySchemaMigrationResults[key]; ok { + return resp.Response, resp.Error + } + + return nil, fmt.Errorf("%w: no result set for %s", assert.AnError, key) +} + // RunHealthCheck is part of the vtctldclient.VtctldClient interface. func (fake *VtctldClient) RunHealthCheck(ctx context.Context, req *vtctldatapb.RunHealthCheckRequest, opts ...grpc.CallOption) (*vtctldatapb.RunHealthCheckResponse, error) { if fake.RunHealthCheckResults == nil { diff --git a/go/vt/vtadmin/vtctldclient/proxy.go b/go/vt/vtadmin/vtctldclient/proxy.go index abb16ac556d..c0a9773ea2c 100644 --- a/go/vt/vtadmin/vtctldclient/proxy.go +++ b/go/vt/vtadmin/vtctldclient/proxy.go @@ -63,7 +63,7 @@ type ClientProxy struct { // DialFunc is called to open a new vtctdclient connection. In production, // this should always be grpcvtctldclient.NewWithDialOpts, but it is // exported for testing purposes. - dialFunc func(addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) + dialFunc func(ctx context.Context, addr string, ff grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) resolver grpcresolver.Builder m sync.Mutex @@ -124,8 +124,7 @@ func (vtctld *ClientProxy) dial(ctx context.Context) error { opts = append(opts, grpc.WithResolvers(vtctld.resolver)) - // TODO: update dialFunc to take ctx as first arg. - client, err := vtctld.dialFunc(resolver.DialAddr(vtctld.resolver, "vtctld"), grpcclient.FailFast(false), opts...) + client, err := vtctld.dialFunc(ctx, resolver.DialAddr(vtctld.resolver, "vtctld"), grpcclient.FailFast(false), opts...) if err != nil { return err } diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index 77b7f267a30..a4b6a1a61f3 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -24,6 +24,7 @@ import ( "time" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/hook" @@ -34,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -75,6 +77,7 @@ var tabletMap map[uint32]*comboTablet // it to the map. If it's a primary tablet, it also issues a TER. func CreateTablet( ctx context.Context, + env *vtenv.Environment, ts *topo.Server, cell string, uid uint32, @@ -82,6 +85,7 @@ func CreateTablet( tabletType topodatapb.TabletType, mysqld mysqlctl.MysqlDaemon, dbcfgs *dbconfigs.DBConfigs, + srvTopoCounts *stats.CountersWithSingleLabel, ) error { alias := &topodatapb.TabletAlias{ Cell: cell, @@ -89,7 +93,7 @@ func CreateTablet( } log.Infof("Creating %v tablet %v for %v/%v", tabletType, topoproto.TabletAliasString(alias), keyspace, shard) - controller := tabletserver.NewServer(ctx, topoproto.TabletAliasString(alias), ts, alias) + controller := tabletserver.NewServer(ctx, env, topoproto.TabletAliasString(alias), ts, alias, srvTopoCounts) initTabletType := tabletType if tabletType == topodatapb.TabletType_PRIMARY { initTabletType = topodatapb.TabletType_REPLICA @@ -100,6 +104,7 @@ func CreateTablet( } tm := &tabletmanager.TabletManager{ BatchCtx: context.Background(), + Env: env, TopoServer: ts, MysqlDaemon: mysqld, DBConfigs: dbcfgs, @@ -117,7 +122,7 @@ func CreateTablet( Type: initTabletType, DbNameOverride: dbname, } - if err := tm.Start(tablet, 0); err != nil { + if err := tm.Start(tablet, nil); err != nil { return err } @@ -163,12 +168,14 @@ func InitRoutingRules( // InitTabletMap creates the action tms and associated data structures // for all tablets, based on the vttest proto parameter. func InitTabletMap( + env *vtenv.Environment, ts *topo.Server, tpb *vttestpb.VTTestTopology, mysqld mysqlctl.MysqlDaemon, dbcfgs *dbconfigs.DBConfigs, schemaDir string, ensureDatabase bool, + srvTopoCounts *stats.CountersWithSingleLabel, ) (uint32, error) { tabletMap = make(map[uint32]*comboTablet) @@ -184,11 +191,11 @@ func InitTabletMap( }) // iterate through the keyspaces - wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil) + wr := wrangler.New(env, logutil.NewConsoleLogger(), ts, nil) var uid uint32 = 1 for _, kpb := range tpb.Keyspaces { var err error - uid, err = CreateKs(ctx, ts, tpb, mysqld, dbcfgs, schemaDir, kpb, ensureDatabase, uid, wr) + uid, err = CreateKs(ctx, env, ts, tpb, mysqld, dbcfgs, schemaDir, kpb, ensureDatabase, uid, wr, srvTopoCounts) if err != nil { return 0, err } @@ -279,6 +286,7 @@ func DeleteKs( // CreateKs creates keyspace, shards and tablets with mysql database func CreateKs( ctx context.Context, + env *vtenv.Environment, ts *topo.Server, tpb *vttestpb.VTTestTopology, mysqld mysqlctl.MysqlDaemon, @@ -288,98 +296,76 @@ func CreateKs( ensureDatabase bool, uid uint32, wr *wrangler.Wrangler, + srvTopoCounts *stats.CountersWithSingleLabel, ) (uint32, error) { keyspace := kpb.Name - if kpb.ServedFrom != "" { - // if we have a redirect, create a completely redirected - // keyspace and no tablet - if err := ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{ - ServedFroms: []*topodatapb.Keyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_PRIMARY, - Keyspace: kpb.ServedFrom, - }, - { - TabletType: topodatapb.TabletType_REPLICA, - Keyspace: kpb.ServedFrom, - }, - { - TabletType: topodatapb.TabletType_RDONLY, - Keyspace: kpb.ServedFrom, - }, - }, - }); err != nil { - return 0, fmt.Errorf("CreateKeyspace(%v) failed: %v", keyspace, err) - } - } else { - // create a regular keyspace - if err := ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}); err != nil { - return 0, fmt.Errorf("CreateKeyspace(%v) failed: %v", keyspace, err) + // create a regular keyspace + if err := ts.CreateKeyspace(ctx, keyspace, &topodatapb.Keyspace{}); err != nil { + return 0, fmt.Errorf("CreateKeyspace(%v) failed: %v", keyspace, err) + } + + // iterate through the shards + for _, spb := range kpb.Shards { + shard := spb.Name + if err := ts.CreateShard(ctx, keyspace, shard); err != nil { + return 0, fmt.Errorf("CreateShard(%v:%v) failed: %v", keyspace, shard, err) } - // iterate through the shards - for _, spb := range kpb.Shards { - shard := spb.Name - if err := ts.CreateShard(ctx, keyspace, shard); err != nil { - return 0, fmt.Errorf("CreateShard(%v:%v) failed: %v", keyspace, shard, err) + for _, cell := range tpb.Cells { + dbname := spb.DbNameOverride + if dbname == "" { + dbname = fmt.Sprintf("vt_%v_%v", keyspace, shard) } - for _, cell := range tpb.Cells { - dbname := spb.DbNameOverride - if dbname == "" { - dbname = fmt.Sprintf("vt_%v_%v", keyspace, shard) - } + replicas := int(kpb.ReplicaCount) + if replicas == 0 { + // 2 replicas in order to ensure the primary cell has a primary and a replica + replicas = 2 + } + rdonlys := int(kpb.RdonlyCount) + if rdonlys == 0 { + rdonlys = 1 + } - replicas := int(kpb.ReplicaCount) - if replicas == 0 { - // 2 replicas in order to ensure the primary cell has a primary and a replica - replicas = 2 - } - rdonlys := int(kpb.RdonlyCount) - if rdonlys == 0 { - rdonlys = 1 + if ensureDatabase { + // Create Database if not exist + conn, err := mysqld.GetDbaConnection(context.TODO()) + if err != nil { + return 0, fmt.Errorf("GetConnection failed: %v", err) } + defer conn.Close() - if ensureDatabase { - // Create Database if not exist - conn, err := mysqld.GetDbaConnection(context.TODO()) - if err != nil { - return 0, fmt.Errorf("GetConnection failed: %v", err) - } - defer conn.Close() + _, err = conn.ExecuteFetch("CREATE DATABASE IF NOT EXISTS `"+dbname+"`", 1, false) + if err != nil { + return 0, fmt.Errorf("error ensuring database exists: %v", err) + } - _, err = conn.ExecuteFetch("CREATE DATABASE IF NOT EXISTS `"+dbname+"`", 1, false) - if err != nil { - return 0, fmt.Errorf("error ensuring database exists: %v", err) - } + } + if cell == tpb.Cells[0] { + replicas-- + // create the primary + if err := CreateTablet(ctx, env, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_PRIMARY, mysqld, dbcfgs.Clone(), srvTopoCounts); err != nil { + return 0, err } - if cell == tpb.Cells[0] { - replicas-- - - // create the primary - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_PRIMARY, mysqld, dbcfgs.Clone()); err != nil { - return 0, err - } - uid++ - } + uid++ + } - for i := 0; i < replicas; i++ { - // create a replica tablet - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_REPLICA, mysqld, dbcfgs.Clone()); err != nil { - return 0, err - } - uid++ + for i := 0; i < replicas; i++ { + // create a replica tablet + if err := CreateTablet(ctx, env, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_REPLICA, mysqld, dbcfgs.Clone(), srvTopoCounts); err != nil { + return 0, err } + uid++ + } - for i := 0; i < rdonlys; i++ { - // create a rdonly tablet - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_RDONLY, mysqld, dbcfgs.Clone()); err != nil { - return 0, err - } - uid++ + for i := 0; i < rdonlys; i++ { + // create a rdonly tablet + if err := CreateTablet(ctx, env, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_RDONLY, mysqld, dbcfgs.Clone(), srvTopoCounts); err != nil { + return 0, err } + uid++ } } } @@ -394,7 +380,7 @@ func CreateKs( return 0, fmt.Errorf("cannot load vschema file %v for keyspace %v: %v", f, keyspace, err) } - _, err = vindexes.BuildKeyspace(formal) + _, err = vindexes.BuildKeyspace(formal, wr.SQLParser()) if err != nil { return 0, fmt.Errorf("BuildKeyspace(%v) failed: %v", keyspace, err) } @@ -419,7 +405,7 @@ func CreateKs( // // dialer is our tabletconn.Dialer -func dialer(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { +func dialer(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { t, ok := tabletMap[tablet.Alias.Uid] if !ok { return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "connection refused") @@ -855,6 +841,10 @@ func (itmc *internalTabletManagerClient) ExecuteFetchAsDba(context.Context, *top return nil, fmt.Errorf("not implemented in vtcombo") } +func (itmc *internalTabletManagerClient) ExecuteMultiFetchAsDba(context.Context, *topodatapb.Tablet, bool, *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) ([]*querypb.QueryResult, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) ExecuteFetchAsAllPrivs(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) { return nil, fmt.Errorf("not implemented in vtcombo") } @@ -887,6 +877,14 @@ func (itmc *internalTabletManagerClient) DeleteVReplicationWorkflow(context.Cont return nil, fmt.Errorf("not implemented in vtcombo") } +func (itmc *internalTabletManagerClient) HasVReplicationWorkflows(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + +func (itmc *internalTabletManagerClient) ReadVReplicationWorkflows(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) ReadVReplicationWorkflow(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { return nil, fmt.Errorf("not implemented in vtcombo") } @@ -903,6 +901,10 @@ func (itmc *internalTabletManagerClient) UpdateVReplicationWorkflow(context.Cont return nil, fmt.Errorf("not implemented in vtcombo") } +func (itmc *internalTabletManagerClient) UpdateVReplicationWorkflows(context.Context, *topodatapb.Tablet, *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) { + return nil, fmt.Errorf("not implemented in vtcombo") +} + func (itmc *internalTabletManagerClient) ResetReplication(context.Context, *topodatapb.Tablet) error { return fmt.Errorf("not implemented in vtcombo") } @@ -923,7 +925,7 @@ func (itmc *internalTabletManagerClient) UndoDemotePrimary(context.Context, *top return fmt.Errorf("not implemented in vtcombo") } -func (itmc *internalTabletManagerClient) SetReplicationSource(context.Context, *topodatapb.Tablet, *topodatapb.TabletAlias, int64, string, bool, bool) error { +func (itmc *internalTabletManagerClient) SetReplicationSource(context.Context, *topodatapb.Tablet, *topodatapb.TabletAlias, int64, string, bool, bool, float64) error { return fmt.Errorf("not implemented in vtcombo") } diff --git a/go/vt/vtctl/backup.go b/go/vt/vtctl/backup.go index c2f90ec4b14..e832b1f79d1 100644 --- a/go/vt/vtctl/backup.go +++ b/go/vt/vtctl/backup.go @@ -70,9 +70,9 @@ func init() { } func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - concurrency := subFlags.Int("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") + concurrency := subFlags.Int32("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") allowPrimary := subFlags.Bool("allow_primary", false, "Allows backups to be taken on primary. Warning!! If you are using the builtin backup engine, this will shutdown your primary mysql for as long as it takes to create a backup.") - incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position, or name of backup from which to create an incremental backup. Default: empty. If given, then this backup becomes an incremental backup from given position or given backup. If value is 'auto', this backup will be taken from the last successful backup position.") upgradeSafe := subFlags.Bool("upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") if err := subFlags.Parse(args); err != nil { @@ -89,7 +89,7 @@ func commandBackup(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F return wr.VtctldServer().Backup(&vtctldatapb.BackupRequest{ TabletAlias: tabletAlias, - Concurrency: uint64(*concurrency), + Concurrency: *concurrency, AllowPrimary: *allowPrimary, IncrementalFromPos: *incrementalFromPos, UpgradeSafe: *upgradeSafe, @@ -112,9 +112,9 @@ func (b *backupEventStreamLogger) Send(resp *vtctldatapb.BackupResponse) error { } func commandBackupShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - concurrency := subFlags.Int("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") + concurrency := subFlags.Int32("concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously") allowPrimary := subFlags.Bool("allow_primary", false, "Whether to use primary tablet for backup. Warning!! If you are using the builtin backup engine, this will shutdown your primary mysql for as long as it takes to create a backup.") - incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position") + incrementalFromPos := subFlags.String("incremental_from_pos", "", "Position, or name of backup from which to create an incremental backup. Default: empty. If given, then this backup becomes an incremental backup from given position or given backup. If value is 'auto', this backup will be taken from the last successful backup position.") upgradeSafe := subFlags.Bool("upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.") if err := subFlags.Parse(args); err != nil { @@ -132,7 +132,7 @@ func commandBackupShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf return wr.VtctldServer().BackupShard(&vtctldatapb.BackupShardRequest{ Keyspace: keyspace, Shard: shard, - Concurrency: uint64(*concurrency), + Concurrency: *concurrency, AllowPrimary: *allowPrimary, IncrementalFromPos: *incrementalFromPos, UpgradeSafe: *upgradeSafe, diff --git a/go/vt/vtctl/endtoend/get_schema_test.go b/go/vt/vtctl/endtoend/get_schema_test.go index 2373fb6e3a5..a9829a193f3 100644 --- a/go/vt/vtctl/endtoend/get_schema_test.go +++ b/go/vt/vtctl/endtoend/get_schema_test.go @@ -5,6 +5,7 @@ import ( "testing" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/vtenv" "github.com/google/uuid" "github.com/stretchr/testify/assert" @@ -161,7 +162,7 @@ func TestGetSchema(t *testing.T) { logger := logutil.NewMemoryLogger() - err := vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc), []string{ + err := vtctl.RunCommand(ctx, wrangler.New(vtenv.NewTestEnv(), logger, topo, &tmc), []string{ "GetSchema", topoproto.TabletAliasString(tablet.Alias), }) @@ -201,7 +202,7 @@ func TestGetSchema(t *testing.T) { }, } - err = vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc), []string{ + err = vtctl.RunCommand(ctx, wrangler.New(vtenv.NewTestEnv(), logger, topo, &tmc), []string{ "GetSchema", "--table_sizes_only", topoproto.TabletAliasString(tablet.Alias), diff --git a/go/vt/vtctl/endtoend/onlineddl_show_test.go b/go/vt/vtctl/endtoend/onlineddl_show_test.go index fe795af752d..ed848c14be8 100644 --- a/go/vt/vtctl/endtoend/onlineddl_show_test.go +++ b/go/vt/vtctl/endtoend/onlineddl_show_test.go @@ -13,6 +13,7 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/tmclienttest" "vitess.io/vitess/go/vt/wrangler" @@ -119,14 +120,14 @@ func onlineDDLTest(t *testing.T, args []string, expectedQuery string) { tmclienttest.SetProtocol("go.vt.vtctl.endtoend", t.Name()) logger := logutil.NewMemoryLogger() - wr := wrangler.New(logger, fakeTopo, &tmc) + wr := wrangler.New(vtenv.NewTestEnv(), logger, fakeTopo, &tmc) err := vtctl.RunCommand(ctx, wr, args) assert.Error(t, err) assert.NotEmpty(t, err.Error()) containsExpectedError := false expectedErrors := []string{ - "unable to get shard names for keyspace", + "unable to get shards for keyspace", "no ExecuteFetchAsDba results on fake TabletManagerClient", } for _, expect := range expectedErrors { diff --git a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go b/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go deleted file mode 100644 index 14147316508..00000000000 --- a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fakevtctlclient - -import ( - "fmt" - "io" - "strings" - "sync" - "time" - - "vitess.io/vitess/go/protoutil" - "vitess.io/vitess/go/vt/logutil" - - logutilpb "vitess.io/vitess/go/vt/proto/logutil" -) - -// FakeLoggerEventStreamingClient is the base for the fakes for vtctlclient. -// It allows to register a (multi-)line string for a given command and return the result as channel which streams it back. -type FakeLoggerEventStreamingClient struct { - results map[string]*result - // mu guards all fields of the structs. - mu sync.Mutex -} - -// NewFakeLoggerEventStreamingClient creates a new fake. -func NewFakeLoggerEventStreamingClient() *FakeLoggerEventStreamingClient { - return &FakeLoggerEventStreamingClient{results: make(map[string]*result)} -} - -// generateKey returns a map key for a []string. -// ([]string is not supported as map key.) -func generateKey(args []string) string { - return strings.Join(args, " ") -} - -// result contains the result the fake should respond for a given command. -type result struct { - output string - err error - // count is the number of times this result is registered for the same - // command. With each stream of this result, count will be decreased by one. - count int - // addr optionally specifies which server address is expected from the client. - addr string -} - -func (r1 result) Equals(r2 result) bool { - return r1.output == r2.output && - ((r1.err == nil && r2.err == nil) || - (r1.err != nil && r2.err != nil && r1.err.Error() == r2.err.Error())) -} - -// RegisterResult registers for a given command (args) the result which the fake should return. -// Once the result was returned, it will be automatically deregistered. -func (f *FakeLoggerEventStreamingClient) RegisterResult(args []string, output string, err error) error { - return f.RegisterResultForAddr("" /* addr */, args, output, err) -} - -// RegisterResultForAddr is identical to RegisterResult but also expects that -// the client did dial "addr" as server address. -func (f *FakeLoggerEventStreamingClient) RegisterResultForAddr(addr string, args []string, output string, err error) error { - f.mu.Lock() - defer f.mu.Unlock() - - k := generateKey(args) - v := result{output, err, 1, addr} - if result, ok := f.results[k]; ok { - if result.Equals(v) { - result.count++ - return nil - } - return fmt.Errorf("a different result (%v) is already registered for command: %v", result, args) - } - f.results[k] = &v - return nil -} - -// RegisteredCommands returns a list of commands which are currently registered. -// This is useful to check that all registered results have been consumed. -func (f *FakeLoggerEventStreamingClient) RegisteredCommands() []string { - f.mu.Lock() - defer f.mu.Unlock() - - var commands []string - for k := range f.results { - commands = append(commands, k) - } - return commands -} - -type streamResultAdapter struct { - lines []string - index int - err error -} - -func (s *streamResultAdapter) Recv() (*logutilpb.Event, error) { - if s.index < len(s.lines) { - result := &logutilpb.Event{ - Time: protoutil.TimeToProto(time.Now()), - Level: logutilpb.Level_CONSOLE, - File: "fakevtctlclient", - Line: -1, - Value: s.lines[s.index], - } - s.index++ - return result, nil - } - if s.err == nil { - return nil, io.EOF - } - return nil, s.err -} - -// StreamResult returns an EventStream which streams back a registered result as logging events. -// "addr" is the server address which the client dialed and may be empty. -func (f *FakeLoggerEventStreamingClient) StreamResult(addr string, args []string) (logutil.EventStream, error) { - f.mu.Lock() - defer f.mu.Unlock() - - k := generateKey(args) - result, ok := f.results[k] - if !ok { - return nil, fmt.Errorf("no response was registered for args: %v", args) - } - if result.addr != "" && addr != result.addr { - return nil, fmt.Errorf("client sent request to wrong server address. got: %v want: %v", addr, result.addr) - } - result.count-- - if result.count == 0 { - delete(f.results, k) - } - - return &streamResultAdapter{ - lines: strings.Split(result.output, "\n"), - index: 0, - err: result.err, - }, nil -} diff --git a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient_test.go b/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient_test.go deleted file mode 100644 index 04a0ad5e03d..00000000000 --- a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient_test.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fakevtctlclient - -import ( - "errors" - "io" - "reflect" - "strings" - "testing" - - logutilpb "vitess.io/vitess/go/vt/proto/logutil" -) - -func TestStreamOutputAndError(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - args := []string{"CopySchemaShard", "test_keyspace/0", "test_keyspace/2"} - output := []string{"event1", "event2"} - wantErr := errors.New("something went wrong") - - err := fake.RegisterResult(args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - - verifyStreamOutputAndError(t, fake, "" /* addr */, args, output, wantErr) -} - -func TestStreamOutput(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - args := []string{"CopySchemaShard", "test_keyspace/0", "test_keyspace/2"} - output := []string{"event1", "event2"} - var wantErr error - - err := fake.RegisterResult(args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - - verifyStreamOutputAndError(t, fake, "" /* addr */, args, output, wantErr) -} - -// TestStreamOutputForAddr is similar to TestStreamOutput but also tests that -// the correct server address was used by the client. -func TestStreamOutputForAddr(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - addr := "localhost:12345" - args := []string{"CopySchemaShard", "test_keyspace/0", "test_keyspace/2"} - output := []string{"event1", "event2"} - var wantErr error - - // Used address matches. - err := fake.RegisterResultForAddr(addr, args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - verifyStreamOutputAndError(t, fake, addr, args, output, wantErr) - - // Used address does not match. - err = fake.RegisterResultForAddr(addr, args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - _, err = fake.StreamResult("different-addr", args) - if err == nil || !strings.Contains(err.Error(), "client sent request to wrong server address") { - t.Fatalf("fake should have failed because the client used the wrong address: %v", err) - } -} - -func verifyStreamOutputAndError(t *testing.T, fake *FakeLoggerEventStreamingClient, addr string, args, output []string, wantErr error) { - stream, err := fake.StreamResult(addr, args) - if err != nil { - t.Fatalf("Failed to stream result: %v", err) - } - - // Verify output and error. - i := 0 - for { - var event *logutilpb.Event - event, err = stream.Recv() - if err != nil { - break - } - if i > len(output) { - t.Fatalf("Received more events than expected. got: %v want: %v", i, len(output)) - } - if event.Value != output[i] { - t.Errorf("Received event is not identical to the received one. got: %v want: %v", event.Value, output[i]) - } - t.Logf("Received event: %v", event) - i++ - } - if i != len(output) { - t.Errorf("Number of received events mismatches. got: %v want: %v", i, len(output)) - } - if err == io.EOF { - err = nil - } - if err != wantErr { - t.Errorf("Wrong error received. got: %v want: %v", err, wantErr) - } -} - -func TestNoResultRegistered(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - stream, err := fake.StreamResult("" /* addr */, []string{"ListShardTablets", "test_keyspace/0"}) - if stream != nil { - t.Fatalf("No stream should have been returned because no matching result is registered.") - } - wantErr := "no response was registered for args: [ListShardTablets test_keyspace/0]" - if err.Error() != wantErr { - t.Errorf("Wrong error for missing result was returned. got: '%v' want: '%v'", err, wantErr) - } -} - -func TestResultAlreadyRegistered(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - errFirst := fake.RegisterResult([]string{"ListShardTablets", "test_keyspace/0"}, "output1", nil) - if errFirst != nil { - t.Fatalf("Registering the result should have been successful. Error: %v", errFirst) - } - - errSecond := fake.RegisterResult([]string{"ListShardTablets", "test_keyspace/0"}, "output2", nil) - if errSecond == nil { - t.Fatal("Registering a duplicate, different result should not have been successful.") - } - want := ") is already registered for command: " - if !strings.Contains(errSecond.Error(), want) { - t.Fatalf("Wrong error message: got: '%v' want: '%v'", errSecond, want) - } -} - -func TestRegisterMultipleResultsForSameCommand(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - args := []string{"CopySchemaShard", "test_keyspace/0", "test_keyspace/2"} - output := []string{"event1", "event2"} - var wantErr error - - // Register first result. - err := fake.RegisterResult(args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - registeredCommands := []string{strings.Join(args, " ")} - verifyListOfRegisteredCommands(t, fake, registeredCommands) - - // Register second result. - err = fake.RegisterResult(args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - verifyListOfRegisteredCommands(t, fake, registeredCommands) - - // Consume first result. - verifyStreamOutputAndError(t, fake, "" /* addr */, args, output, wantErr) - verifyListOfRegisteredCommands(t, fake, registeredCommands) - - // Consume second result. - verifyStreamOutputAndError(t, fake, "" /* addr */, args, output, wantErr) - verifyListOfRegisteredCommands(t, fake, []string{}) -} - -func verifyListOfRegisteredCommands(t *testing.T, fake *FakeLoggerEventStreamingClient, want []string) { - got := fake.RegisteredCommands() - if len(got) == 0 && len(want) == 0 { - return - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("fake.RegisteredCommands() = %v, want: %v", got, want) - } -} diff --git a/go/vt/vtctl/fakevtctlclient/fakevtctlclient.go b/go/vt/vtctl/fakevtctlclient/fakevtctlclient.go deleted file mode 100644 index 11224b745e9..00000000000 --- a/go/vt/vtctl/fakevtctlclient/fakevtctlclient.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fakevtctlclient contains a fake for the vtctlclient interface. -package fakevtctlclient - -import ( - "time" - - "context" - - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/vtctl/vtctlclient" -) - -// FakeVtctlClient is a fake which implements the vtctlclient interface. -// The fake can be used to return a specific result for a given command. -// If the command is not registered, an error will be thrown. -type FakeVtctlClient struct { - *FakeLoggerEventStreamingClient -} - -// NewFakeVtctlClient creates a FakeVtctlClient struct. -func NewFakeVtctlClient() *FakeVtctlClient { - return &FakeVtctlClient{NewFakeLoggerEventStreamingClient()} -} - -// FakeVtctlClientFactory always returns the current instance. -func (f *FakeVtctlClient) FakeVtctlClientFactory(addr string) (vtctlclient.VtctlClient, error) { - return f, nil -} - -// ExecuteVtctlCommand is part of the vtctlclient interface. -func (f *FakeVtctlClient) ExecuteVtctlCommand(ctx context.Context, args []string, actionTimeout time.Duration) (logutil.EventStream, error) { - return f.FakeLoggerEventStreamingClient.StreamResult("" /* addr */, args) -} - -// Close is part of the vtctlclient interface. -func (f *FakeVtctlClient) Close() {} diff --git a/go/vt/vtctl/grpcvtctlclient/client.go b/go/vt/vtctl/grpcvtctlclient/client.go index f0fe94ca330..c09ead0687c 100644 --- a/go/vt/vtctl/grpcvtctlclient/client.go +++ b/go/vt/vtctl/grpcvtctlclient/client.go @@ -18,9 +18,8 @@ limitations under the License. package grpcvtctlclient import ( - "time" - "context" + "time" "google.golang.org/grpc" @@ -39,13 +38,13 @@ type gRPCVtctlClient struct { c vtctlservicepb.VtctlClient } -func gRPCVtctlClientFactory(addr string) (vtctlclient.VtctlClient, error) { +func gRPCVtctlClientFactory(ctx context.Context, addr string) (vtctlclient.VtctlClient, error) { opt, err := grpcclientcommon.SecureDialOption() if err != nil { return nil, err } // create the RPC client - cc, err := grpcclient.Dial(addr, grpcclient.FailFast(false), opt) + cc, err := grpcclient.DialContext(ctx, addr, grpcclient.FailFast(false), opt) if err != nil { return nil, err } diff --git a/go/vt/vtctl/grpcvtctlclient/client_test.go b/go/vt/vtctl/grpcvtctlclient/client_test.go index 50e1968533e..00ec4888e76 100644 --- a/go/vt/vtctl/grpcvtctlclient/client_test.go +++ b/go/vt/vtctl/grpcvtctlclient/client_test.go @@ -32,6 +32,7 @@ import ( "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtctl/grpcvtctlserver" "vitess.io/vitess/go/vt/vtctl/vtctlclienttest" + "vitess.io/vitess/go/vt/vtenv" vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" ) @@ -52,11 +53,11 @@ func TestVtctlServer(t *testing.T) { // Create a gRPC server and listen on the port server := grpc.NewServer() - vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts)) + vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(vtenv.NewTestEnv(), ts)) go server.Serve(listener) // Create a VtctlClient gRPC client to talk to the fake server - client, err := gRPCVtctlClientFactory(fmt.Sprintf("localhost:%v", port)) + client, err := gRPCVtctlClientFactory(ctx, fmt.Sprintf("localhost:%v", port)) if err != nil { t.Fatalf("Cannot create client: %v", err) } @@ -86,7 +87,7 @@ func TestVtctlAuthClient(t *testing.T) { opts = append(opts, grpc.UnaryInterceptor(servenv.FakeAuthUnaryInterceptor)) server := grpc.NewServer(opts...) - vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts)) + vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(vtenv.NewTestEnv(), ts)) go server.Serve(listener) authJSON := `{ @@ -116,7 +117,7 @@ func TestVtctlAuthClient(t *testing.T) { require.NoError(t, err, "failed to set `--grpc_auth_static_client_creds=%s`", f.Name()) // Create a VtctlClient gRPC client to talk to the fake server - client, err := gRPCVtctlClientFactory(fmt.Sprintf("localhost:%v", port)) + client, err := gRPCVtctlClientFactory(ctx, fmt.Sprintf("localhost:%v", port)) if err != nil { t.Fatalf("Cannot create client: %v", err) } diff --git a/go/vt/vtctl/grpcvtctldclient/client.go b/go/vt/vtctl/grpcvtctldclient/client.go index 497867aebb0..9015fee8009 100644 --- a/go/vt/vtctl/grpcvtctldclient/client.go +++ b/go/vt/vtctl/grpcvtctldclient/client.go @@ -48,13 +48,13 @@ type gRPCVtctldClient struct { //go:generate -command grpcvtctldclient go run ../vtctldclient/codegen //go:generate grpcvtctldclient --out client_gen.go -func gRPCVtctldClientFactory(addr string) (vtctldclient.VtctldClient, error) { +func gRPCVtctldClientFactory(ctx context.Context, addr string) (vtctldclient.VtctldClient, error) { opt, err := grpcclientcommon.SecureDialOption() if err != nil { return nil, err } - conn, err := grpcclient.Dial(addr, grpcclient.FailFast(false), opt) + conn, err := grpcclient.DialContext(ctx, addr, grpcclient.FailFast(false), opt) if err != nil { return nil, err } @@ -67,8 +67,8 @@ func gRPCVtctldClientFactory(addr string) (vtctldclient.VtctldClient, error) { // NewWithDialOpts returns a vtctldclient.VtctldClient configured with the given // DialOptions. It is exported for use in vtadmin. -func NewWithDialOpts(addr string, failFast grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) { - conn, err := grpcclient.Dial(addr, failFast, opts...) +func NewWithDialOpts(ctx context.Context, addr string, failFast grpcclient.FailFast, opts ...grpc.DialOption) (vtctldclient.VtctldClient, error) { + conn, err := grpcclient.DialContext(ctx, addr, failFast, opts...) if err != nil { return nil, err } diff --git a/go/vt/vtctl/grpcvtctldclient/client_gen.go b/go/vt/vtctl/grpcvtctldclient/client_gen.go index 087b566fe5d..ff753111020 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_gen.go +++ b/go/vt/vtctl/grpcvtctldclient/client_gen.go @@ -47,6 +47,15 @@ func (client *gRPCVtctldClient) AddCellsAlias(ctx context.Context, in *vtctldata return client.c.AddCellsAlias(ctx, in, opts...) } +// ApplyKeyspaceRoutingRules is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ApplyKeyspaceRoutingRules(ctx context.Context, in *vtctldatapb.ApplyKeyspaceRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyKeyspaceRoutingRulesResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ApplyKeyspaceRoutingRules(ctx, in, opts...) +} + // ApplyRoutingRules is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) ApplyRoutingRules(ctx context.Context, in *vtctldatapb.ApplyRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyRoutingRulesResponse, error) { if client.c == nil { @@ -245,6 +254,15 @@ func (client *gRPCVtctldClient) ExecuteHook(ctx context.Context, in *vtctldatapb return client.c.ExecuteHook(ctx, in, opts...) } +// ExecuteMultiFetchAsDBA is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ExecuteMultiFetchAsDBA(ctx context.Context, in *vtctldatapb.ExecuteMultiFetchAsDBARequest, opts ...grpc.CallOption) (*vtctldatapb.ExecuteMultiFetchAsDBAResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ExecuteMultiFetchAsDBA(ctx, in, opts...) +} + // FindAllShardsInKeyspace is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) FindAllShardsInKeyspace(ctx context.Context, in *vtctldatapb.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.FindAllShardsInKeyspaceResponse, error) { if client.c == nil { @@ -254,6 +272,15 @@ func (client *gRPCVtctldClient) FindAllShardsInKeyspace(ctx context.Context, in return client.c.FindAllShardsInKeyspace(ctx, in, opts...) } +// ForceCutOverSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) ForceCutOverSchemaMigration(ctx context.Context, in *vtctldatapb.ForceCutOverSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.ForceCutOverSchemaMigrationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.ForceCutOverSchemaMigration(ctx, in, opts...) +} + // GetBackups is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) GetBackups(ctx context.Context, in *vtctldatapb.GetBackupsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetBackupsResponse, error) { if client.c == nil { @@ -308,6 +335,15 @@ func (client *gRPCVtctldClient) GetKeyspace(ctx context.Context, in *vtctldatapb return client.c.GetKeyspace(ctx, in, opts...) } +// GetKeyspaceRoutingRules is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetKeyspaceRoutingRules(ctx context.Context, in *vtctldatapb.GetKeyspaceRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetKeyspaceRoutingRulesResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetKeyspaceRoutingRules(ctx, in, opts...) +} + // GetKeyspaces is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) GetKeyspaces(ctx context.Context, in *vtctldatapb.GetKeyspacesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetKeyspacesResponse, error) { if client.c == nil { @@ -362,6 +398,15 @@ func (client *gRPCVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.Ge return client.c.GetShard(ctx, in, opts...) } +// GetShardReplication is part of the vtctlservicepb.VtctldClient interface. +func (client *gRPCVtctldClient) GetShardReplication(ctx context.Context, in *vtctldatapb.GetShardReplicationRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardReplicationResponse, error) { + if client.c == nil { + return nil, status.Error(codes.Unavailable, connClosedMsg) + } + + return client.c.GetShardReplication(ctx, in, opts...) +} + // GetShardRoutingRules is part of the vtctlservicepb.VtctldClient interface. func (client *gRPCVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) { if client.c == nil { diff --git a/go/vt/vtctl/grpcvtctldclient/client_test.go b/go/vt/vtctl/grpcvtctldclient/client_test.go index 93c95ffa607..cb0e1477cf7 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_test.go +++ b/go/vt/vtctl/grpcvtctldclient/client_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vtctl/vtctldclient" + "vitess.io/vitess/go/vt/vtenv" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" @@ -41,10 +42,10 @@ func TestFindAllShardsInKeyspace(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) - testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { ks := &vtctldatapb.Keyspace{ Name: "testkeyspace", Keyspace: &topodatapb.Keyspace{}, @@ -88,10 +89,10 @@ func TestGetKeyspace(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) - testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { expected := &vtctldatapb.GetKeyspaceResponse{ Keyspace: &vtctldatapb.Keyspace{ Name: "testkeyspace", @@ -117,10 +118,10 @@ func TestGetKeyspaces(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) }) - testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { + testutil.WithTestServer(ctx, t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { resp, err := client.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) assert.NoError(t, err) assert.Empty(t, resp.Keyspaces) diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go index f5f7847b499..1f17782402f 100644 --- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go +++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -46,7 +47,7 @@ func TestInitShardPrimary(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() defer tmc.Close() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmc) primaryDb := fakesqldb.New(t) defer primaryDb.Close() @@ -64,25 +65,27 @@ func TestInitShardPrimary(t *testing.T) { tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // These come from InitShardPrimary "FAKE RESET ALL REPLICATION", - "FAKE SET SLAVE POSITION", - "FAKE SET MASTER", - "START SLAVE", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "FAKE SET SOURCE", + "START REPLICA", } tablet2.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet2.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort)) tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", "FAKE RESET ALL REPLICATION", - "FAKE SET SLAVE POSITION", - "FAKE SET MASTER", - "START SLAVE", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "FAKE SET SOURCE", + "START REPLICA", } tablet3.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet3.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort)) @@ -93,7 +96,7 @@ func TestInitShardPrimary(t *testing.T) { tablet.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) } - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) resp, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ Keyspace: tablet1.Tablet.Keyspace, Shard: tablet1.Tablet.Shard, @@ -109,7 +112,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmc) primaryDb := fakesqldb.New(t) defer primaryDb.Close() @@ -127,17 +130,19 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { tablet2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE RESET ALL REPLICATION", - "FAKE SET SLAVE POSITION", - "FAKE SET MASTER", - "START SLAVE", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "FAKE SET SOURCE", + "START REPLICA", } tablet2.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet2.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort)) tablet3.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ "FAKE RESET ALL REPLICATION", - "FAKE SET SLAVE POSITION", - "FAKE SET MASTER", - "START SLAVE", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "FAKE SET SOURCE", + "START REPLICA", } tablet3.FakeMysqlDaemon.SetReplicationSourceInputs = append(tablet3.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", tablet1.Tablet.Hostname, tablet1.Tablet.MysqlPort)) @@ -148,7 +153,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { tablet.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) } - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(vtenv.NewTestEnv(), ts) _, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ Keyspace: tablet1.Tablet.Keyspace, Shard: tablet1.Tablet.Shard, diff --git a/go/vt/vtctl/grpcvtctldserver/query_test.go b/go/vt/vtctl/grpcvtctldserver/query_test.go index 6073d3bc395..b9299592c15 100644 --- a/go/vt/vtctl/grpcvtctldserver/query_test.go +++ b/go/vt/vtctl/grpcvtctldserver/query_test.go @@ -25,12 +25,11 @@ import ( "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/vtctl/schematools" - "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/vtctl/schematools" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" - "vitess.io/vitess/go/vt/proto/vttime" + vttimepb "vitess.io/vitess/go/vt/proto/vttime" ) var now = time.Now() @@ -86,7 +85,6 @@ func TestRowToSchemaMigration(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { out, err := rowToSchemaMigration(test.row) if test.shouldErr { @@ -110,7 +108,7 @@ func TestValueToVTTime(t *testing.T) { tests := []struct { name string value string - expected *vttime.Time + expected *vttimepb.Time shouldErr bool }{ { @@ -130,7 +128,6 @@ func TestValueToVTTime(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() @@ -153,7 +150,7 @@ func TestValueToVTDuration(t *testing.T) { name string value string defaultUnit string - expected *vttime.Duration + expected *vttimepb.Duration shouldErr bool }{ { @@ -182,7 +179,6 @@ func TestValueToVTDuration(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { out, err := valueToVTDuration(test.value, test.defaultUnit) if test.shouldErr { @@ -197,6 +193,8 @@ func TestValueToVTDuration(t *testing.T) { } func TestAlterSchemaMigrationQuery(t *testing.T) { + t.Parallel() + uuid := "4e5dcf80_354b_11eb_82cd_f875a4d24e90" tcases := []struct { @@ -228,6 +226,8 @@ func TestAlterSchemaMigrationQuery(t *testing.T) { for _, tcase := range tcases { testName := fmt.Sprintf("%s %s", tcase.command, tcase.uuid) t.Run(testName, func(t *testing.T) { + t.Parallel() + query, err := alterSchemaMigrationQuery(tcase.command, tcase.uuid) assert.NoError(t, err) assert.Equal(t, tcase.expect, query) diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index 25b711e1019..e2d7edcf2f5 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -60,6 +60,7 @@ import ( "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -92,13 +93,13 @@ type VtctldServer struct { } // NewVtctldServer returns a new VtctldServer for the given topo server. -func NewVtctldServer(ts *topo.Server) *VtctldServer { +func NewVtctldServer(env *vtenv.Environment, ts *topo.Server) *VtctldServer { tmc := tmclient.NewTabletManagerClient() return &VtctldServer{ ts: ts, tmc: tmc, - ws: workflow.NewServer(ts, tmc), + ws: workflow.NewServer(env, ts, tmc), } } @@ -108,7 +109,7 @@ func NewTestVtctldServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *Vtc return &VtctldServer{ ts: ts, tmc: tmc, - ws: workflow.NewServer(ts, tmc), + ws: workflow.NewServer(vtenv.NewTestEnv(), ts, tmc), } } @@ -221,6 +222,8 @@ func (s *VtctldServer) ApplyShardRoutingRules(ctx context.Context, req *vtctldat // ApplySchema is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySchemaRequest) (resp *vtctldatapb.ApplySchemaResponse, err error) { + log.Infof("VtctldServer.ApplySchema: keyspace=%s, migrationContext=%v, ddlStrategy=%v, batchSize=%v", req.Keyspace, req.MigrationContext, req.DdlStrategy, req.BatchSize) + span, ctx := trace.NewSpan(ctx, "VtctldServer.ApplySchema") defer span.Finish() @@ -268,7 +271,7 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc logstream = append(logstream, e) }) - executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout, req.BatchSize) + executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout, req.BatchSize, s.ws.SQLParser()) if err = executor.SetDDLStrategy(req.DdlStrategy); err != nil { err = vterrors.Wrapf(err, "invalid DdlStrategy: %s", req.DdlStrategy) @@ -337,7 +340,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV span.Annotate("sql_mode", true) var stmt sqlparser.Statement - stmt, err = sqlparser.Parse(req.Sql) + stmt, err = s.ws.SQLParser().Parse(req.Sql) if err != nil { err = vterrors.Wrapf(err, "Parse(%s)", req.Sql) return nil, err @@ -364,15 +367,43 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV vs = req.VSchema } - if req.DryRun { // we return what was passed in and parsed, rather than current - return &vtctldatapb.ApplyVSchemaResponse{VSchema: vs}, nil - } - - _, err = vindexes.BuildKeyspace(vs) + ksVs, err := vindexes.BuildKeyspace(vs, s.ws.SQLParser()) if err != nil { err = vterrors.Wrapf(err, "BuildKeyspace(%s)", req.Keyspace) return nil, err } + response := &vtctldatapb.ApplyVSchemaResponse{ + VSchema: vs, + UnknownVindexParams: make(map[string]*vtctldatapb.ApplyVSchemaResponse_ParamList), + } + + // Attach unknown Vindex params to the response. + var vdxNames []string + var unknownVindexParams []string + for name := range ksVs.Vindexes { + vdxNames = append(vdxNames, name) + } + sort.Strings(vdxNames) + for _, name := range vdxNames { + vdx := ksVs.Vindexes[name] + if val, ok := vdx.(vindexes.ParamValidating); ok { + ups := val.UnknownParams() + if len(ups) == 0 { + continue + } + response.UnknownVindexParams[name] = &vtctldatapb.ApplyVSchemaResponse_ParamList{Params: ups} + unknownVindexParams = append(unknownVindexParams, fmt.Sprintf("%s (%s)", name, strings.Join(ups, ", "))) + } + } + + if req.Strict && len(unknownVindexParams) > 0 { // return early if unknown params found in strict mode + err = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongArguments, "unknown vindex params: %s", strings.Join(unknownVindexParams, "; ")) + return response, err + } + + if req.DryRun { // return early if dry run + return response, err + } if err = s.ts.SaveVSchema(ctx, req.Keyspace, vs); err != nil { err = vterrors.Wrapf(err, "SaveVSchema(%s, %v)", req.Keyspace, req.VSchema) @@ -390,7 +421,8 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV err = vterrors.Wrapf(err, "GetVSchema(%s)", req.Keyspace) return nil, err } - return &vtctldatapb.ApplyVSchemaResponse{VSchema: updatedVS}, nil + response.VSchema = updatedVS + return response, nil } // Backup is part of the vtctlservicepb.VtctldServer interface. @@ -431,8 +463,14 @@ func (s *VtctldServer) BackupShard(req *vtctldatapb.BackupShardRequest, stream v span.Annotate("incremental_from_pos", req.IncrementalFromPos) tablets, stats, err := reparentutil.ShardReplicationStatuses(ctx, s.ts, s.tmc, req.Keyspace, req.Shard) + + // Instead of return on err directly, only return err when no tablets for backup at all if err != nil { - return err + tablets = reparentutil.GetBackupCandidates(tablets, stats) + // Only return err when no usable tablet + if len(tablets) == 0 { + return err + } } var ( @@ -480,7 +518,7 @@ func (s *VtctldServer) backupTablet(ctx context.Context, tablet *topodatapb.Tabl Send(resp *vtctldatapb.BackupResponse) error }) error { r := &tabletmanagerdatapb.BackupRequest{ - Concurrency: int64(req.Concurrency), + Concurrency: req.Concurrency, AllowPrimary: req.AllowPrimary, IncrementalFromPos: req.IncrementalFromPos, UpgradeSafe: req.UpgradeSafe, @@ -673,6 +711,37 @@ func (s *VtctldServer) CleanupSchemaMigration(ctx context.Context, req *vtctldat return resp, nil } +// ForceCutOverSchemaMigration is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) ForceCutOverSchemaMigration(ctx context.Context, req *vtctldatapb.ForceCutOverSchemaMigrationRequest) (resp *vtctldatapb.ForceCutOverSchemaMigrationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.ForceCutOverSchemaMigration") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("uuid", req.Uuid) + + query, err := alterSchemaMigrationQuery("force_cutover", req.Uuid) + if err != nil { + return nil, err + } + + log.Infof("Calling ApplySchema to force cut-over migration %s", req.Uuid) + qr, err := s.ApplySchema(ctx, &vtctldatapb.ApplySchemaRequest{ + Keyspace: req.Keyspace, + Sql: []string{query}, + WaitReplicasTimeout: protoutil.DurationToProto(DefaultWaitReplicasTimeout), + }) + if err != nil { + return nil, err + } + + resp = &vtctldatapb.ForceCutOverSchemaMigrationResponse{ + RowsAffectedByShard: qr.RowsAffectedByShard, + } + return resp, nil +} + // CompleteSchemaMigration is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) CompleteSchemaMigration(ctx context.Context, req *vtctldatapb.CompleteSchemaMigrationRequest) (resp *vtctldatapb.CompleteSchemaMigrationResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.CompleteSchemaMigration") @@ -738,7 +807,6 @@ func (s *VtctldServer) CreateKeyspace(ctx context.Context, req *vtctldatapb.Crea ki := &topodatapb.Keyspace{ KeyspaceType: req.Type, - ServedFroms: req.ServedFroms, BaseKeyspace: req.BaseKeyspace, SnapshotTime: req.SnapshotTime, DurabilityPolicy: req.DurabilityPolicy, @@ -1189,6 +1257,36 @@ func (s *VtctldServer) ExecuteFetchAsDBA(ctx context.Context, req *vtctldatapb.E return &vtctldatapb.ExecuteFetchAsDBAResponse{Result: qr}, nil } +// ExecuteMultiFetchAsDBA is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) ExecuteMultiFetchAsDBA(ctx context.Context, req *vtctldatapb.ExecuteMultiFetchAsDBARequest) (resp *vtctldatapb.ExecuteMultiFetchAsDBAResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.ExecuteMultiFetchAsDBA") + defer span.Finish() + + defer panicHandler(&err) + + span.Annotate("tablet_alias", topoproto.TabletAliasString(req.TabletAlias)) + span.Annotate("max_rows", req.MaxRows) + span.Annotate("disable_binlogs", req.DisableBinlogs) + span.Annotate("reload_schema", req.ReloadSchema) + + ti, err := s.ts.GetTablet(ctx, req.TabletAlias) + if err != nil { + return nil, err + } + + qrs, err := s.tmc.ExecuteMultiFetchAsDba(ctx, ti.Tablet, false, &tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest{ + Sql: []byte(req.Sql), + MaxRows: uint64(req.MaxRows), + DisableBinlogs: req.DisableBinlogs, + ReloadSchema: req.ReloadSchema, + }) + if err != nil { + return nil, err + } + + return &vtctldatapb.ExecuteMultiFetchAsDBAResponse{Results: qrs}, nil +} + // ExecuteHook is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) ExecuteHook(ctx context.Context, req *vtctldatapb.ExecuteHookRequest) (resp *vtctldatapb.ExecuteHookResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.ExecuteHook") @@ -1237,7 +1335,7 @@ func (s *VtctldServer) FindAllShardsInKeyspace(ctx context.Context, req *vtctlda span.Annotate("keyspace", req.Keyspace) - result, err := s.ts.FindAllShardsInKeyspace(ctx, req.Keyspace) + result, err := s.ts.FindAllShardsInKeyspace(ctx, req.Keyspace, nil) if err != nil { return nil, err } @@ -1699,6 +1797,43 @@ func (s *VtctldServer) GetShard(ctx context.Context, req *vtctldatapb.GetShardRe }, nil } +// GetShardReplication is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetShardReplication(ctx context.Context, req *vtctldatapb.GetShardReplicationRequest) (resp *vtctldatapb.GetShardReplicationResponse, err error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.GetShardReplication") + defer span.Finish() + + defer panicHandler(&err) + + cells := req.Cells + if len(cells) == 0 { + ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer cancel() + + cells, err = s.ts.GetCellInfoNames(ctx) + if err != nil { + return nil, err + } + } + + span.Annotate("keyspace", req.Keyspace) + span.Annotate("shard", req.Shard) + span.Annotate("cells", strings.Join(cells, ",")) + + replicationByCell := make(map[string]*topodatapb.ShardReplication, len(cells)) + for _, cell := range cells { + data, err := s.ts.GetShardReplication(ctx, cell, req.Keyspace, req.Shard) + if err != nil { + return nil, err + } + + replicationByCell[cell] = data.ShardReplication + } + + return &vtctldatapb.GetShardReplicationResponse{ + ShardReplicationByCell: replicationByCell, + }, nil +} + // GetSrvKeyspaceNames is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) GetSrvKeyspaceNames(ctx context.Context, req *vtctldatapb.GetSrvKeyspaceNamesRequest) (resp *vtctldatapb.GetSrvKeyspaceNamesResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.GetSrvKeyspaceNames") @@ -1976,7 +2111,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable case len(req.TabletAliases) > 0: span.Annotate("tablet_aliases", strings.Join(topoproto.TabletAliasList(req.TabletAliases).ToStringSlice(), ",")) - tabletMap, err = s.ts.GetTabletMap(ctx, req.TabletAliases) + tabletMap, err = s.ts.GetTabletMap(ctx, req.TabletAliases, nil) if err != nil { err = fmt.Errorf("GetTabletMap(%v) failed: %w", req.TabletAliases, err) } @@ -1998,7 +2133,6 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable if req.Strict { return nil, err } - log.Warningf("GetTablets encountered non-fatal error %s; continuing because Strict=false", err) default: return nil, err @@ -2018,7 +2152,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable tablets := make([]*topodatapb.Tablet, 0, len(tabletMap)) for _, ti := range tabletMap { - if req.TabletType != 0 && ti.Type != req.TabletType { + if req.TabletType != topodatapb.TabletType_UNKNOWN && ti.Type != req.TabletType { continue } adjustTypeForStalePrimary(ti, truePrimaryTimestamp) @@ -2052,7 +2186,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable go func(cell string) { defer wg.Done() - tablets, err := s.ts.GetTabletsByCell(ctx, cell) + tablets, err := s.ts.GetTabletsByCell(ctx, cell, nil) if err != nil { if req.Strict { log.Infof("GetTablets got an error from cell %s: %s. Running in strict mode, so canceling other cell RPCs", cell, err) @@ -2086,7 +2220,7 @@ func (s *VtctldServer) GetTablets(ctx context.Context, req *vtctldatapb.GetTable if req.Keyspace != "" && tablet.Keyspace != req.Keyspace { continue } - if req.TabletType != 0 && tablet.Type != req.TabletType { + if req.TabletType != topodatapb.TabletType_UNKNOWN && tablet.Type != req.TabletType { continue } @@ -2705,6 +2839,10 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap } else if !ok { waitReplicasTimeout = time.Second * 30 } + tolerableReplLag, _, err := protoutil.DurationFromProto(req.TolerableReplicationLag) + if err != nil { + return nil, err + } span.Annotate("keyspace", req.Keyspace) span.Annotate("shard", req.Shard) @@ -2734,6 +2872,7 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap AvoidPrimaryAlias: req.AvoidPrimary, NewPrimaryAlias: req.NewPrimary, WaitReplicasTimeout: waitReplicasTimeout, + TolerableReplLag: tolerableReplLag, }, ) @@ -2746,7 +2885,7 @@ func (s *VtctldServer) PlannedReparentShard(ctx context.Context, req *vtctldatap resp.Keyspace = ev.ShardInfo.Keyspace() resp.Shard = ev.ShardInfo.ShardName() - if !topoproto.TabletAliasIsZero(ev.NewPrimary.Alias) { + if ev.NewPrimary != nil && !topoproto.TabletAliasIsZero(ev.NewPrimary.Alias) { resp.PromotedPrimary = ev.NewPrimary.Alias } } @@ -3118,7 +3257,7 @@ func (s *VtctldServer) ReparentTablet(ctx context.Context, req *vtctldatapb.Repa return nil, err } - if err = s.tmc.SetReplicationSource(ctx, tablet.Tablet, shard.PrimaryAlias, 0, "", false, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet)); err != nil { + if err = s.tmc.SetReplicationSource(ctx, tablet.Tablet, shard.PrimaryAlias, 0, "", false, reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet.Tablet), 0); err != nil { return nil, err } @@ -3318,47 +3457,6 @@ func (s *VtctldServer) SetKeyspaceDurabilityPolicy(ctx context.Context, req *vtc }, nil } -// SetKeyspaceServedFrom is part of the vtctlservicepb.VtctldServer interface. -func (s *VtctldServer) SetKeyspaceServedFrom(ctx context.Context, req *vtctldatapb.SetKeyspaceServedFromRequest) (resp *vtctldatapb.SetKeyspaceServedFromResponse, err error) { - span, ctx := trace.NewSpan(ctx, "VtctldServer.SetKeyspaceServedFrom") - defer span.Finish() - - defer panicHandler(&err) - - span.Annotate("keyspace", req.Keyspace) - span.Annotate("tablet_type", topoproto.TabletTypeLString(req.TabletType)) - span.Annotate("cells", strings.Join(req.Cells, ",")) - span.Annotate("remove", req.Remove) - span.Annotate("source_keyspace", req.SourceKeyspace) - - ctx, unlock, lockErr := s.ts.LockKeyspace(ctx, req.Keyspace, "SetKeyspaceServedFrom") - if lockErr != nil { - err = lockErr - return nil, err - } - - defer unlock(&err) - - ki, err := s.ts.GetKeyspace(ctx, req.Keyspace) - if err != nil { - return nil, err - } - - err = ki.UpdateServedFromMap(req.TabletType, req.Cells, req.SourceKeyspace, req.Remove, nil) - if err != nil { - return nil, err - } - - err = s.ts.UpdateKeyspace(ctx, ki) - if err != nil { - return nil, err - } - - return &vtctldatapb.SetKeyspaceServedFromResponse{ - Keyspace: ki.Keyspace, - }, nil -} - // SetShardIsPrimaryServing is part of the vtctlservicepb.VtctldServer interface. func (s *VtctldServer) SetShardIsPrimaryServing(ctx context.Context, req *vtctldatapb.SetShardIsPrimaryServingRequest) (resp *vtctldatapb.SetShardIsPrimaryServingResponse, err error) { span, ctx := trace.NewSpan(ctx, "VtctldServer.SetShardIsPrimaryServing") @@ -4421,7 +4519,7 @@ func (s *VtctldServer) ValidateShard(ctx context.Context, req *vtctldatapb.Valid getTabletMapCtx, getTabletMapCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer getTabletMapCancel() - tabletMap, _ := s.ts.GetTabletMap(getTabletMapCtx, aliases) + tabletMap, _ := s.ts.GetTabletMap(getTabletMapCtx, aliases, nil) var primaryAlias *topodatapb.TabletAlias for _, alias := range aliases { @@ -4830,6 +4928,7 @@ func (s *VtctldServer) VDiffCreate(ctx context.Context, req *vtctldatapb.VDiffCr span.Annotate("tablet_types", req.TabletTypes) span.Annotate("tables", req.Tables) span.Annotate("auto_retry", req.AutoRetry) + span.Annotate("max_diff_duration", req.MaxDiffDuration) resp, err = s.ws.VDiffCreate(ctx, req) return resp, err @@ -4959,8 +5058,8 @@ func (s *VtctldServer) WorkflowUpdate(ctx context.Context, req *vtctldatapb.Work } // StartServer registers a VtctldServer for RPCs on the given gRPC server. -func StartServer(s *grpc.Server, ts *topo.Server) { - vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts)) +func StartServer(s *grpc.Server, env *vtenv.Environment, ts *topo.Server) { + vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(env, ts)) } // getTopologyCell is a helper method that returns a topology cell given its path. @@ -4981,9 +5080,7 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v return nil, err } - data, _, dataErr := conn.Get(ctx, relativePath) - - if dataErr == nil { + if data, _, err := conn.Get(ctx, relativePath); err == nil { result, err := topo.DecodeContent(relativePath, data, false) if err != nil { err := vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error decoding file content for cell %s: %v", cellPath, err) @@ -4995,15 +5092,13 @@ func (s *VtctldServer) getTopologyCell(ctx context.Context, cellPath string) (*v return &topoCell, nil } - children, childrenErr := conn.ListDir(ctx, relativePath, false /*full*/) - - if childrenErr != nil && dataErr != nil { + children, err := conn.ListDir(ctx, relativePath, false /*full*/) + if err != nil { err := vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cell %s with path %s has no file contents and no children: %v", cell, cellPath, err) return nil, err } topoCell.Children = make([]string, len(children)) - for i, c := range children { topoCell.Children[i] = c.Name } @@ -5069,3 +5164,67 @@ func (s *VtctldServer) diffVersion(ctx context.Context, primaryVersion string, p er.RecordError(fmt.Errorf("primary %v version %v is different than replica %v version %v", topoproto.TabletAliasString(primaryAlias), primaryVersion, topoproto.TabletAliasString(alias), replicaVersion)) } } + +// ApplyKeyspaceRoutingRules is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) ApplyKeyspaceRoutingRules(ctx context.Context, req *vtctldatapb.ApplyKeyspaceRoutingRulesRequest) (*vtctldatapb.ApplyKeyspaceRoutingRulesResponse, error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.ApplyKeyspaceRoutingRules") + defer span.Finish() + + span.Annotate("skip_rebuild", req.SkipRebuild) + span.Annotate("rebuild_cells", strings.Join(req.RebuildCells, ",")) + + resp := &vtctldatapb.ApplyKeyspaceRoutingRulesResponse{} + + update := func() error { + return topotools.UpdateKeyspaceRoutingRules(ctx, s.ts, "ApplyKeyspaceRoutingRules", + func(ctx context.Context, rules *map[string]string) error { + clear(*rules) + for _, rule := range req.GetKeyspaceRoutingRules().Rules { + (*rules)[rule.FromKeyspace] = rule.ToKeyspace + } + return nil + }) + } + err := update() + if err != nil { + // If we were racing with another caller to create the initial routing rules, then + // we can immediately retry the operation. + if !topo.IsErrType(err, topo.NodeExists) { + return nil, err + } + if err = update(); err != nil { + return nil, err + } + } + + newRules, err := s.ts.GetKeyspaceRoutingRules(ctx) + if err != nil { + return nil, err + } + resp.KeyspaceRoutingRules = newRules + + if req.SkipRebuild { + return resp, nil + } + + if err := s.ts.RebuildSrvVSchema(ctx, req.RebuildCells); err != nil { + return nil, vterrors.Wrapf(err, "RebuildSrvVSchema(%v) failed: %v", req.RebuildCells, err) + } + + return resp, nil +} + +// GetKeyspaceRoutingRules is part of the vtctlservicepb.VtctldServer interface. +func (s *VtctldServer) GetKeyspaceRoutingRules(ctx context.Context, req *vtctldatapb.GetKeyspaceRoutingRulesRequest) (*vtctldatapb.GetKeyspaceRoutingRulesResponse, error) { + span, ctx := trace.NewSpan(ctx, "VtctldServer.GetKeyspaceRoutingRules") + defer span.Finish() + + rules, err := s.ts.GetKeyspaceRoutingRules(ctx) + if err != nil { + return nil, err + } + + return &vtctldatapb.GetKeyspaceRoutingRulesResponse{ + KeyspaceRoutingRules: rules, + }, nil +} diff --git a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go index 3100855e370..4d7c5aa1943 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" @@ -106,8 +107,9 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, }, }, - PopulateReparentJournalDelays: map[string]time.Duration{ - "zone1-0000000200": time.Second * 29, + SetReplicationSourceDelays: map[string]time.Duration{ + "zone1-0000000100": time.Second * 29, + "zone1-0000000101": time.Second * 29, }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, @@ -223,8 +225,9 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, }, }, - PopulateReparentJournalDelays: map[string]time.Duration{ - "zone1-0000000200": time.Second * 31, + SetReplicationSourceDelays: map[string]time.Duration{ + "zone1-0000000100": time.Second * 31, + "zone1-0000000101": time.Second * 31, }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000200": nil, @@ -290,8 +293,6 @@ func TestEmergencyReparentShardSlow(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -310,7 +311,7 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) @@ -592,8 +593,6 @@ func TestPlannedReparentShardSlow(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -608,7 +607,7 @@ func TestPlannedReparentShardSlow(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.PlannedReparentShard(ctx, tt.req) @@ -738,7 +737,7 @@ func TestSleepTablet(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) start := time.Now() diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index 38029f0e799..426f54c074b 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "os" + "slices" "sort" "strings" "testing" @@ -46,6 +47,7 @@ import ( "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" "vitess.io/vitess/go/vt/vtctl/schematools" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/tmclienttest" @@ -85,7 +87,7 @@ func TestPanicHandler(t *testing.T) { }() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, nil, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.AddCellInfo(context.Background(), nil) @@ -141,7 +143,7 @@ func TestAddCellInfo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.AddCellInfo(ctx, tt.req) if tt.shouldErr { @@ -214,7 +216,7 @@ func TestAddCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.AddCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -326,7 +328,7 @@ func TestApplyRoutingRules(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.ApplyRoutingRules(ctx, tt.req) if tt.shouldErr { @@ -351,6 +353,7 @@ func TestApplyVSchema(t *testing.T) { req *vtctldatapb.ApplyVSchemaRequest exp *vtctldatapb.ApplyVSchemaResponse shouldErr bool + err string }{ { name: "normal", @@ -397,6 +400,84 @@ func TestApplyVSchema(t *testing.T) { Keyspace: "testkeyspace", }, shouldErr: true, + }, { + name: "unknown params", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspacesharded", + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup", + Params: map[string]string{ + "hello": "world", + "goodbye": "world", + }, + }, + }, + }, + SkipRebuild: true, + }, + exp: &vtctldatapb.ApplyVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup", + Params: map[string]string{ + "hello": "world", + "goodbye": "world", + }, + }, + }, + }, + UnknownVindexParams: map[string]*vtctldatapb.ApplyVSchemaResponse_ParamList{ + "lookup1": { + Params: []string{"goodbye", "hello"}, + }, + }, + }, + shouldErr: false, + }, { + name: "strict unknown params", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspacesharded", + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup", + Params: map[string]string{ + "hello": "world", + "goodbye": "world", + }, + }, + }, + }, + SkipRebuild: true, + Strict: true, + }, + exp: &vtctldatapb.ApplyVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup", + Params: map[string]string{ + "hello": "world", + "goodbye": "world", + }, + }, + }, + }, + UnknownVindexParams: map[string]*vtctldatapb.ApplyVSchemaResponse_ParamList{ + "lookup1": { + Params: []string{"goodbye", "hello"}, + }, + }, + }, + shouldErr: true, + err: "unknown vindex params: lookup1 (goodbye, hello)", }, { name: "dry run", req: &vtctldatapb.ApplyVSchemaRequest{ @@ -412,6 +493,100 @@ func TestApplyVSchema(t *testing.T) { }, }, shouldErr: false, + }, { + name: "dry run with invalid params", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspacesharded", + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup_invalid", + }, + }, + }, + DryRun: true, + }, + exp: &vtctldatapb.ApplyVSchemaResponse{}, + shouldErr: true, + }, { + name: "dry run with unknown params", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspacesharded", + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup", + Params: map[string]string{ + "hello": "world", + "goodbye": "world", + }, + }, + }, + }, + DryRun: true, + }, + exp: &vtctldatapb.ApplyVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup", + Params: map[string]string{ + "hello": "world", + "goodbye": "world", + }, + }, + }, + }, + UnknownVindexParams: map[string]*vtctldatapb.ApplyVSchemaResponse_ParamList{ + "lookup1": { + Params: []string{"goodbye", "hello"}, + }, + }, + }, + shouldErr: false, + }, { + name: "strict dry run with unknown params", + req: &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: "testkeyspacesharded", + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup", + Params: map[string]string{ + "hello": "world", + "goodbye": "world", + }, + }, + }, + }, + DryRun: true, + Strict: true, + }, + exp: &vtctldatapb.ApplyVSchemaResponse{ + VSchema: &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup1": { + Type: "lookup", + Params: map[string]string{ + "hello": "world", + "goodbye": "world", + }, + }, + }, + }, + UnknownVindexParams: map[string]*vtctldatapb.ApplyVSchemaResponse_ParamList{ + "lookup1": { + Params: []string{"goodbye", "hello"}, + }, + }, + }, + shouldErr: true, + err: "unknown vindex params: lookup1 (goodbye, hello)", }, } @@ -421,7 +596,7 @@ func TestApplyVSchema(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ @@ -463,6 +638,9 @@ func TestApplyVSchema(t *testing.T) { res, err := vtctld.ApplyVSchema(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) + if tt.err != "" { + assert.ErrorContains(t, err, tt.err) + } return } @@ -701,7 +879,7 @@ func TestBackup(t *testing.T) { testutil.AddTablet(ctx, t, tt.ts, tt.tablet, nil) } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) client := localvtctldclient.New(vtctld) stream, err := client.Backup(ctx, tt.req) @@ -1041,7 +1219,7 @@ func TestBackupShard(t *testing.T) { }, tt.tablets..., ) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) client := localvtctldclient.New(vtctld) stream, err := client.BackupShard(ctx, tt.req) @@ -1249,8 +1427,8 @@ func TestCancelSchemaMigration(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { + t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1261,7 +1439,7 @@ func TestCancelSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.CancelSchemaMigration(ctx, test.req) @@ -1483,8 +1661,6 @@ func TestChangeTabletType(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1493,7 +1669,9 @@ func TestChangeTabletType(t *testing.T) { ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ TopoServer: ts, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(vtenv.NewTestEnv(), ts) + }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, @@ -1539,7 +1717,9 @@ func TestChangeTabletType(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ TopoServer: nil, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(vtenv.NewTestEnv(), ts) + }) testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -1751,6 +1931,8 @@ func TestCleanupSchemaMigration(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "zone1") @@ -1760,7 +1942,7 @@ func TestCleanupSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.CleanupSchemaMigration(ctx, test.req) @@ -1775,6 +1957,210 @@ func TestCleanupSchemaMigration(t *testing.T) { } } +func TestForceCutOverSchemaMigration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablets []*topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.ForceCutOverSchemaMigrationRequest + expected *vtctldatapb.ForceCutOverSchemaMigrationResponse + shouldErr bool + }{ + { + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.ForceCutOverSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + expected: &vtctldatapb.ForceCutOverSchemaMigrationResponse{ + RowsAffectedByShard: map[string]uint64{ + "-80": 1, + "80-": 0, + }, + }, + }, + { + name: "no shard primary", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: &querypb.QueryResult{}, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.ForceCutOverSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + { + name: "executeQuery failure", + tablets: []*topodatapb.Tablet{ + { + Keyspace: "ks", + Shard: "-80", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + { + Keyspace: "ks", + Shard: "80-", + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteQueryResults: map[string]struct { + Response *querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + "zone1-0000000200": { + Response: &querypb.QueryResult{}, + }, + }, + PrimaryPositionResults: map[string]struct { + Position string + Error error + }{ + "zone1-0000000100": {}, + "zone1-0000000200": {}, + }, + ReloadSchemaResults: map[string]error{ + "zone1-0000000100": nil, + "zone1-0000000200": nil, + }, + }, + req: &vtctldatapb.ForceCutOverSchemaMigrationRequest{ + Keyspace: "ks", + Uuid: "abc", + }, + shouldErr: true, + }, + // execute query failure + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + + testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ + AlsoSetShardPrimary: true, + }, test.tablets...) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(vtenv.NewTestEnv(), ts) + }) + + resp, err := vtctld.ForceCutOverSchemaMigration(ctx, test.req) + if test.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, test.expected, resp) + }) + } +} + func TestCompleteSchemaMigration(t *testing.T) { t.Parallel() @@ -1952,11 +2338,11 @@ func TestCompleteSchemaMigration(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { + t.Parallel() - ctx, Complete := context.WithCancel(context.Background()) - defer Complete() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ @@ -1964,7 +2350,7 @@ func TestCompleteSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.CompleteSchemaMigration(ctx, test.req) @@ -2207,8 +2593,6 @@ func TestCreateKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -2220,7 +2604,7 @@ func TestCreateKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) for name, ks := range tt.topo { @@ -2486,8 +2870,6 @@ func TestCreateShard(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() if tt.req == nil { @@ -2498,7 +2880,7 @@ func TestCreateShard(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) for _, ks := range tt.keyspaces { @@ -2553,7 +2935,7 @@ func TestDeleteCellInfo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.DeleteCellInfo(ctx, tt.req) if tt.shouldErr { @@ -2614,7 +2996,7 @@ func TestDeleteCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.DeleteCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -2846,7 +3228,7 @@ func TestDeleteKeyspace(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) @@ -3348,19 +3730,17 @@ func TestDeleteShards(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() cells := []string{"zone1", "zone2", "zone3"} - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -3477,8 +3857,6 @@ func TestDeleteSrvKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3503,7 +3881,7 @@ func TestDeleteSrvKeyspace(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.DeleteSrvVSchema(ctx, tt.req) if tt.shouldErr { @@ -3951,8 +4329,6 @@ func TestDeleteTablets(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3964,7 +4340,7 @@ func TestDeleteTablets(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) // Setup tablets and shards @@ -3986,7 +4362,7 @@ func TestDeleteTablets(t *testing.T) { // value anymore defer unlock(&lerr) - // we do, however, care that the lock context gets propogated + // we do, however, care that the lock context gets propagated // both to additional calls to lock, and to the actual RPC call. ctx = lctx } @@ -4193,7 +4569,7 @@ func TestEmergencyReparentShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) @@ -4325,7 +4701,6 @@ func TestExecuteFetchAsApp(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -4335,7 +4710,7 @@ func TestExecuteFetchAsApp(t *testing.T) { testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.ExecuteFetchAsApp(ctx, tt.req) if tt.shouldErr { @@ -4452,7 +4827,6 @@ func TestExecuteFetchAsDBA(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -4462,7 +4836,7 @@ func TestExecuteFetchAsDBA(t *testing.T) { testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.ExecuteFetchAsDBA(ctx, tt.req) if tt.shouldErr { @@ -4476,6 +4850,135 @@ func TestExecuteFetchAsDBA(t *testing.T) { } } +func TestExecuteMultiFetchAsDBA(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + tablet *topodatapb.Tablet + tmc *testutil.TabletManagerClient + req *vtctldatapb.ExecuteMultiFetchAsDBARequest + expected *vtctldatapb.ExecuteMultiFetchAsDBAResponse + shouldErr bool + }{ + { + name: "ok", + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteMultiFetchAsDbaResults: map[string]struct { + Response []*querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: []*querypb.QueryResult{ + {InsertId: 100}, + {InsertId: 101}, + }, + }, + }, + }, + req: &vtctldatapb.ExecuteMultiFetchAsDBARequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Sql: "select 1; select 2", + }, + expected: &vtctldatapb.ExecuteMultiFetchAsDBAResponse{ + Results: []*querypb.QueryResult{ + {InsertId: 100}, + {InsertId: 101}, + }, + }, + }, + { + name: "tablet not found", + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteMultiFetchAsDbaResults: map[string]struct { + Response []*querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Response: []*querypb.QueryResult{ + {InsertId: 100}, + {InsertId: 101}, + }, + }, + }, + }, + req: &vtctldatapb.ExecuteMultiFetchAsDBARequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Sql: "select 1; select 2;", + }, + shouldErr: true, + }, + { + name: "query error", + tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + tmc: &testutil.TabletManagerClient{ + ExecuteMultiFetchAsDbaResults: map[string]struct { + Response []*querypb.QueryResult + Error error + }{ + "zone1-0000000100": { + Error: assert.AnError, + }, + }, + }, + req: &vtctldatapb.ExecuteMultiFetchAsDBARequest{ + TabletAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Sql: "select 1; select 2", + }, + shouldErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + testutil.AddTablet(ctx, t, ts, tt.tablet, nil) + + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(vtenv.NewTestEnv(), ts) + }) + resp, err := vtctld.ExecuteMultiFetchAsDBA(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + utils.MustMatch(t, tt.expected, resp) + }) + } +} + func TestExecuteHook(t *testing.T) { t.Parallel() @@ -4647,7 +5150,7 @@ func TestExecuteHook(t *testing.T) { t.Run(tt.name, func(t *testing.T) { testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.ExecuteHook(ctx, tt.req) @@ -4668,7 +5171,7 @@ func TestFindAllShardsInKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) ks := &vtctldatapb.Keyspace{ @@ -4710,7 +5213,7 @@ func TestGetBackups(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.BackupStorage.Backups = map[string][]string{ @@ -4818,7 +5321,7 @@ func TestGetKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) expected := &vtctldatapb.GetKeyspaceResponse{ @@ -4844,7 +5347,7 @@ func TestGetCellInfoNames(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) @@ -4853,7 +5356,7 @@ func TestGetCellInfoNames(t *testing.T) { ts = memorytopo.NewServer(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err = vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) @@ -4862,7 +5365,7 @@ func TestGetCellInfoNames(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) topofactory.SetError(assert.AnError) @@ -4877,7 +5380,7 @@ func TestGetCellInfo(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) expected := &topodatapb.CellInfo{ @@ -4905,7 +5408,7 @@ func TestGetCellsAliases(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "c11", "c12", "c13", "c21", "c22") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) alias1 := &topodatapb.CellsAlias{ @@ -4932,7 +5435,7 @@ func TestGetCellsAliases(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) topofactory.SetError(assert.AnError) @@ -4999,8 +5502,6 @@ func TestGetFullStatus(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -5012,7 +5513,9 @@ func TestGetFullStatus(t *testing.T) { FullStatusResult: &replicationdatapb.FullStatus{ ServerUuid: tt.serverUUID, }, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(vtenv.NewTestEnv(), ts) + }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, @@ -5037,7 +5540,7 @@ func TestGetKeyspaces(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) @@ -5194,7 +5697,6 @@ func TestGetPermissions(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -5205,7 +5707,7 @@ func TestGetPermissions(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.GetPermissions(ctx, tt.req) if tt.shouldErr { @@ -5263,7 +5765,6 @@ func TestGetRoutingRules(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -5281,7 +5782,7 @@ func TestGetRoutingRules(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.GetRoutingRules(ctx, &vtctldatapb.GetRoutingRulesRequest{}) if tt.shouldErr { @@ -5306,7 +5807,7 @@ func TestGetSchema(t *testing.T) { }{}, } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) validAlias := &topodatapb.TabletAlias{ @@ -5631,7 +6132,6 @@ func TestGetSchemaMigrations(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { t.Parallel() @@ -5671,7 +6171,7 @@ func TestGetSchemaMigrations(t *testing.T) { ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{AlsoSetShardPrimary: true}, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) if test.failTopo { @@ -5702,81 +6202,284 @@ func TestGetShard(t *testing.T) { shouldErr bool }{ { - name: "success", - topo: []*vtctldatapb.Shard{ - { - Keyspace: "testkeyspace", - Name: "-", + name: "success", + topo: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: nil, + req: &vtctldatapb.GetShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + }, + expected: &vtctldatapb.GetShardResponse{ + Shard: &vtctldatapb.Shard{ + Keyspace: "testkeyspace", + Name: "-", + Shard: &topodatapb.Shard{ + KeyRange: &topodatapb.KeyRange{}, + IsPrimaryServing: true, + }, + }, + }, + shouldErr: false, + }, + { + name: "shard not found", + topo: nil, + topoError: nil, + req: &vtctldatapb.GetShardRequest{ + Keyspace: "testkeyspace", + ShardName: "-", + }, + shouldErr: true, + }, + { + name: "unavailable topo server", + topo: []*vtctldatapb.Shard{ + { + Keyspace: "testkeyspace", + Name: "-", + }, + }, + topoError: assert.AnError, + req: &vtctldatapb.GetShardRequest{}, + shouldErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cells := []string{"zone1", "zone2", "zone3"} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(vtenv.NewTestEnv(), ts) + }) + + testutil.AddShards(ctx, t, ts, tt.topo...) + + if tt.topoError != nil { + topofactory.SetError(tt.topoError) + } + + resp, err := vtctld.GetShard(ctx, tt.req) + if tt.shouldErr { + assert.Error(t, err) + return + } + + utils.MustMatch(t, tt.expected, resp) + }) + } +} + +func TestGetShardReplication(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + shardReplicationByCell map[string]map[string]map[string]*topodatapb.ShardReplication + topoError error + req *vtctldatapb.GetShardReplicationRequest + expected *vtctldatapb.GetShardReplicationResponse + shouldErr bool + }{ + { + name: "success", + shardReplicationByCell: map[string]map[string]map[string]*topodatapb.ShardReplication{ + "zone1": { + "ks1": { + "0": &topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 100}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 101}}, + }, + }, + }, + }, + "zone2": { + "ks1": { + "0": &topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 200}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 201}}, + }, + }, + }, + }, + }, + req: &vtctldatapb.GetShardReplicationRequest{ + Keyspace: "ks1", + Shard: "0", + }, + expected: &vtctldatapb.GetShardReplicationResponse{ + ShardReplicationByCell: map[string]*topodatapb.ShardReplication{ + "zone1": { + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 100}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 101}}, + }, + }, + "zone2": { + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 200}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 201}}, + }, + }, + }, + }, + }, + { + name: "cell filtering", + shardReplicationByCell: map[string]map[string]map[string]*topodatapb.ShardReplication{ + "zone1": { + "ks1": { + "0": &topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 100}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 101}}, + }, + }, + }, + }, + "zone2": { + "ks1": { + "0": &topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 200}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 201}}, + }, + }, + }, }, }, - topoError: nil, - req: &vtctldatapb.GetShardRequest{ - Keyspace: "testkeyspace", - ShardName: "-", + req: &vtctldatapb.GetShardReplicationRequest{ + Keyspace: "ks1", + Shard: "0", + Cells: []string{"zone2"}, }, - expected: &vtctldatapb.GetShardResponse{ - Shard: &vtctldatapb.Shard{ - Keyspace: "testkeyspace", - Name: "-", - Shard: &topodatapb.Shard{ - KeyRange: &topodatapb.KeyRange{}, - IsPrimaryServing: true, + expected: &vtctldatapb.GetShardReplicationResponse{ + ShardReplicationByCell: map[string]*topodatapb.ShardReplication{ + "zone2": { + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 200}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 201}}, + }, }, }, }, - shouldErr: false, }, { - name: "shard not found", - topo: nil, - topoError: nil, - req: &vtctldatapb.GetShardRequest{ - Keyspace: "testkeyspace", - ShardName: "-", + name: "all cells topo down", + shardReplicationByCell: map[string]map[string]map[string]*topodatapb.ShardReplication{ + "zone1": { + "ks1": { + "0": &topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 100}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 101}}, + }, + }, + }, + }, + "zone2": { + "ks1": { + "0": &topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 200}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 201}}, + }, + }, + }, + }, }, + req: &vtctldatapb.GetShardReplicationRequest{ + Keyspace: "ks1", + Shard: "0", + }, + topoError: errors.New("topo down for testing"), shouldErr: true, }, { - name: "unavailable topo server", - topo: []*vtctldatapb.Shard{ - { - Keyspace: "testkeyspace", - Name: "-", + name: "cell filtering topo down", + shardReplicationByCell: map[string]map[string]map[string]*topodatapb.ShardReplication{ + "zone1": { + "ks1": { + "0": &topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 100}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone1", Uid: 101}}, + }, + }, + }, + }, + "zone2": { + "ks1": { + "0": &topodatapb.ShardReplication{ + Nodes: []*topodatapb.ShardReplication_Node{ + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 200}}, + {TabletAlias: &topodatapb.TabletAlias{Cell: "zone2", Uid: 201}}, + }, + }, + }, }, }, - topoError: assert.AnError, - req: &vtctldatapb.GetShardRequest{}, + req: &vtctldatapb.GetShardReplicationRequest{ + Keyspace: "ks1", + Shard: "0", + Cells: []string{"zone2"}, + }, + topoError: errors.New("topo down for testing"), shouldErr: true, }, } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - cells := []string{"zone1", "zone2", "zone3"} + cells := make([]string, 0, len(tt.shardReplicationByCell)) + for cell := range tt.shardReplicationByCell { + cells = append(cells, cell) + } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) - vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) - }) + ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) - testutil.AddShards(ctx, t, ts, tt.topo...) + for cell, shardReplication := range tt.shardReplicationByCell { + for ks, shardRepl := range shardReplication { + for shard, repl := range shardRepl { + err := ts.UpdateShardReplicationFields(ctx, cell, ks, shard, func(sr *topodatapb.ShardReplication) error { + sr.Nodes = repl.Nodes + return nil + }) + require.NoError(t, err, "UpdateShardReplicationFields(%s, %s, %s, %+v) failed", cell, ks, shard, repl) + } + } + } if tt.topoError != nil { - topofactory.SetError(tt.topoError) + factory.SetError(tt.topoError) } - resp, err := vtctld.GetShard(ctx, tt.req) + vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(vtenv.NewTestEnv(), ts) + }) + resp, err := vtctld.GetShardReplication(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) return } + require.NoError(t, err) utils.MustMatch(t, tt.expected, resp) }) } @@ -5873,8 +6576,6 @@ func TestGetSrvKeyspaceNames(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -5899,7 +6600,7 @@ func TestGetSrvKeyspaceNames(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.GetSrvKeyspaceNames(ctx, tt.req) if tt.shouldErr { @@ -6040,8 +6741,6 @@ func TestGetSrvKeyspaces(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -6056,7 +6755,7 @@ func TestGetSrvKeyspaces(t *testing.T) { testutil.AddSrvKeyspaces(t, ts, tt.srvKeyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) if tt.topoErr != nil { @@ -6083,7 +6782,7 @@ func TestGetSrvVSchema(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) zone1SrvVSchema := &vschemapb.SrvVSchema{ @@ -6285,8 +6984,6 @@ func TestGetSrvVSchemas(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -6294,7 +6991,7 @@ func TestGetSrvVSchemas(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2", "zone3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) zone1SrvVSchema := &vschemapb.SrvVSchema{ @@ -6354,7 +7051,7 @@ func TestGetTablet(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) tablet := &topodatapb.Tablet{ @@ -6393,12 +7090,13 @@ func TestGetTablets(t *testing.T) { t.Parallel() tests := []struct { - name string - cells []string - tablets []*topodatapb.Tablet - req *vtctldatapb.GetTabletsRequest - expected []*topodatapb.Tablet - shouldErr bool + name string + cells []string + unreachableCells []string // Cells that will return a ctx timeout error when trying to get tablets + tablets []*topodatapb.Tablet + req *vtctldatapb.GetTabletsRequest + expected []*topodatapb.Tablet + shouldErr bool }{ { name: "no tablets", @@ -6747,6 +7445,72 @@ func TestGetTablets(t *testing.T) { }, shouldErr: true, }, + { + name: "multiple cells with one timing out and strict false", + cells: []string{"cell1", "cell2"}, + unreachableCells: []string{"cell2"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + Uid: 200, + }, + Keyspace: "ks1", + Shard: "-", + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Cells: []string{"cell1", "cell2"}, + Strict: false, + }, + shouldErr: false, + expected: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + }, + }, + }, + { + name: "multiple cells with one timing out and strict true", + cells: []string{"cell1", "cell2"}, + unreachableCells: []string{"cell2"}, + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell1", + Uid: 100, + }, + Keyspace: "ks1", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "cell2", + Uid: 200, + }, + Keyspace: "ks1", + Shard: "-", + }, + }, + req: &vtctldatapb.GetTabletsRequest{ + Cells: []string{"cell1", "cell2"}, + Strict: true, + }, + shouldErr: true, + }, { name: "in nonstrict mode if all cells fail the request fails", cells: []string{"cell1"}, @@ -6968,8 +7732,6 @@ func TestGetTablets(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -6977,12 +7739,32 @@ func TestGetTablets(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) - resp, err := vtctld.GetTablets(ctx, tt.req) + for _, cell := range tt.cells { + if slices.Contains(tt.unreachableCells, cell) { + err := ts.UpdateCellInfoFields(ctx, cell, func(ci *topodatapb.CellInfo) error { + ci.ServerAddress = memorytopo.UnreachableServerAddr + return nil + }) + require.NoError(t, err, "failed to update %s cell to point at unreachable address", cell) + } + } + + var ( + resp *vtctldatapb.GetTabletsResponse + err error + ) + if len(tt.unreachableCells) > 0 { + gtCtx, gtCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer gtCancel() + resp, err = vtctld.GetTablets(gtCtx, tt.req) + } else { + resp, err = vtctld.GetTablets(ctx, tt.req) + } if tt.shouldErr { assert.Error(t, err) return @@ -7001,7 +7783,7 @@ func TestGetTopologyPath(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) err := ts.CreateKeyspace(ctx, "keyspace1", &topodatapb.Keyspace{}) @@ -7062,8 +7844,6 @@ func TestGetTopologyPath(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -7090,7 +7870,7 @@ func TestGetVSchema(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) t.Run("found", func(t *testing.T) { @@ -7309,11 +8089,11 @@ func TestLaunchSchemaMigration(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.name, func(t *testing.T) { + t.Parallel() - ctx, Launch := context.WithCancel(context.Background()) - defer Launch() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() ts := memorytopo.NewServer(ctx, "zone1") testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ @@ -7321,7 +8101,7 @@ func TestLaunchSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.LaunchSchemaMigration(ctx, test.req) @@ -7408,7 +8188,7 @@ func TestPingTablet(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.PingTablet(ctx, tt.req) @@ -7439,7 +8219,7 @@ func TestPlannedReparentShard(t *testing.T) { req *vtctldatapb.PlannedReparentShardRequest expected *vtctldatapb.PlannedReparentShardResponse expectEventsToOccur bool - shouldErr bool + expectedErr string }{ { name: "successful reparent", @@ -7554,7 +8334,6 @@ func TestPlannedReparentShard(t *testing.T) { }, }, expectEventsToOccur: true, - shouldErr: false, }, { // Note: this is testing the error-handling done in @@ -7570,7 +8349,7 @@ func TestPlannedReparentShard(t *testing.T) { Shard: "-", }, expectEventsToOccur: false, - shouldErr: true, + expectedErr: "node doesn't exist: keyspaces/testkeyspace/shards/-", }, { name: "invalid WaitReplicasTimeout", @@ -7580,7 +8359,71 @@ func TestPlannedReparentShard(t *testing.T) { Nanos: 1, }, }, - shouldErr: true, + expectedErr: "duration: seconds:-1 nanos:1 is out of range for time.Duration", + }, + { + name: "tablet unreachable", + ts: memorytopo.NewServer(ctx, "zone1"), + tablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + PrimaryTermStartTime: &vttime.Time{ + Seconds: 100, + }, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + Type: topodatapb.TabletType_REPLICA, + Keyspace: "testkeyspace", + Shard: "-", + }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_RDONLY, + Keyspace: "testkeyspace", + Shard: "-", + }, + }, + tmc: &testutil.TabletManagerClient{ + // This is only needed to verify reachability, so empty results are fine. + PrimaryStatusResults: map[string]struct { + Status *replicationdatapb.PrimaryStatus + Error error + }{ + "zone1-0000000200": { + Error: fmt.Errorf("primary status failed"), + }, + "zone1-0000000101": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + "zone1-0000000100": { + Status: &replicationdatapb.PrimaryStatus{}, + }, + }, + }, + req: &vtctldatapb.PlannedReparentShardRequest{ + Keyspace: "testkeyspace", + Shard: "-", + NewPrimary: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 200, + }, + WaitReplicasTimeout: protoutil.DurationToProto(time.Millisecond * 10), + }, + expectEventsToOccur: true, + expectedErr: "primary status failed", }, } @@ -7593,7 +8436,7 @@ func TestPlannedReparentShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.PlannedReparentShard(ctx, tt.req) @@ -7610,8 +8453,8 @@ func TestPlannedReparentShard(t *testing.T) { testutil.AssertLogutilEventsOccurred(t, resp, "expected events to occur during ERS") }() - if tt.shouldErr { - assert.Error(t, err) + if tt.expectedErr != "" { + assert.EqualError(t, err, tt.expectedErr) return } @@ -7636,7 +8479,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.RebuildKeyspaceGraph(ctx, &vtctldatapb.RebuildKeyspaceGraphRequest{ @@ -7653,7 +8496,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.RebuildKeyspaceGraph(context.Background(), &vtctldatapb.RebuildKeyspaceGraphRequest{ @@ -7673,7 +8516,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) factory.SetError(assert.AnError) @@ -7694,7 +8537,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) lctx, unlock, lerr := ts.LockKeyspace(context.Background(), "testkeyspace", "test lock") @@ -7730,7 +8573,6 @@ func TestRebuildVSchemaGraph(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -7743,7 +8585,7 @@ func TestRebuildVSchemaGraph(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.RebuildVSchemaGraph(ctx, req) if tt.shouldErr { @@ -7842,7 +8684,7 @@ func TestRefreshState(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.RefreshState(ctx, tt.req) if tt.shouldErr { @@ -8027,7 +8869,7 @@ func TestRefreshStateByShard(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.RefreshStateByShard(ctx, tt.req) if tt.shouldErr { @@ -8122,7 +8964,6 @@ func TestReloadSchema(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -8131,7 +8972,7 @@ func TestReloadSchema(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.ReloadSchema(ctx, tt.req) if tt.shouldErr { @@ -8216,7 +9057,6 @@ func TestReloadSchemaKeyspace(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -8229,7 +9069,7 @@ func TestReloadSchemaKeyspace(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.ReloadSchemaKeyspace(ctx, tt.req) if tt.shouldErr { @@ -8374,7 +9214,6 @@ func TestReloadSchemaShard(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -8387,7 +9226,7 @@ func TestReloadSchemaShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.ReloadSchemaShard(ctx, tt.req) if tt.shouldErr { @@ -8406,7 +9245,7 @@ func TestRemoveBackup(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) setup := func() { @@ -8586,8 +9425,6 @@ func TestRemoveKeyspaceCell(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -8597,7 +9434,7 @@ func TestRemoveKeyspaceCell(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) // Setup topo @@ -8875,8 +9712,6 @@ func TestRemoveShardCell(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -8886,7 +9721,7 @@ func TestRemoveShardCell(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) // Setup shard topos and replication graphs. @@ -9481,8 +10316,6 @@ func TestReparentTablet(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -9496,7 +10329,7 @@ func TestReparentTablet(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ @@ -9629,7 +10462,7 @@ func TestRestoreFromBackup(t *testing.T) { }, tt.tablets..., ) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) client := localvtctldclient.New(vtctld) stream, err := client.RestoreFromBackup(ctx, tt.req) @@ -9847,7 +10680,7 @@ func TestRetrySchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.RetrySchemaMigration(ctx, test.req) @@ -9943,7 +10776,6 @@ func TestRunHealthCheck(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -9954,7 +10786,7 @@ func TestRunHealthCheck(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.RunHealthCheck(ctx, tt.req) if tt.shouldErr { @@ -10023,7 +10855,6 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -10034,7 +10865,7 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.SetKeyspaceDurabilityPolicy(ctx, tt.req) if tt.expectedErr != "" { @@ -10093,7 +10924,7 @@ func TestSetShardIsPrimaryServing(t *testing.T) { name: "lock error", setup: func(t *testing.T, tt *testcase) context.Context { var cancel func() - tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ctx, cancel = context.WithCancel(ctx) tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -10131,7 +10962,7 @@ func TestSetShardIsPrimaryServing(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.SetShardIsPrimaryServing(tt.ctx, tt.req) if tt.shouldErr { @@ -10345,7 +11176,7 @@ func TestSetShardTabletControl(t *testing.T) { name: "keyspace lock error", setup: func(t *testing.T, tt *testcase) { var cancel func() - tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ctx, cancel = context.WithCancel(ctx) tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -10381,7 +11212,7 @@ func TestSetShardTabletControl(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.SetShardTabletControl(tt.ctx, tt.req) if tt.shouldErr { @@ -10573,7 +11404,6 @@ func TestSetWritable(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -10585,7 +11415,7 @@ func TestSetWritable(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.SetWritable(ctx, tt.req) @@ -10606,7 +11436,7 @@ func TestShardReplicationAdd(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) tablets := []*topodatapb.Tablet{ @@ -10774,7 +11604,7 @@ func TestShardReplicationPositions(t *testing.T) { }, tmc: &testutil.TabletManagerClient{ PrimaryPositionDelays: map[string]time.Duration{ - "zone1-0000000100": time.Millisecond * 100, + "zone1-0000000100": time.Second * 2, }, PrimaryPositionResults: map[string]struct { Position string @@ -10785,7 +11615,7 @@ func TestShardReplicationPositions(t *testing.T) { }, }, ReplicationStatusDelays: map[string]time.Duration{ - "zone1-0000000101": time.Millisecond * 100, + "zone1-0000000101": time.Second * 2, }, ReplicationStatusResults: map[string]struct { Position *replicationdatapb.Status @@ -10798,7 +11628,7 @@ func TestShardReplicationPositions(t *testing.T) { }, }, }, - ctxTimeout: time.Millisecond * 10, + ctxTimeout: time.Second, req: &vtctldatapb.ShardReplicationPositionsRequest{ Keyspace: "testkeyspace", Shard: "-", @@ -10901,7 +11731,7 @@ func TestShardReplicationPositions(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) requestCtx := ctx @@ -10932,7 +11762,7 @@ func TestShardReplicationRemove(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) tablets := []*topodatapb.Tablet{ @@ -11084,7 +11914,6 @@ func TestSourceShardAdd(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -11092,7 +11921,7 @@ func TestSourceShardAdd(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -11219,7 +12048,6 @@ func TestSourceShardDelete(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -11227,7 +12055,7 @@ func TestSourceShardDelete(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -11405,7 +12233,6 @@ func TestStartReplication(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -11419,7 +12246,7 @@ func TestStartReplication(t *testing.T) { AlsoSetShardPrimary: true, }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.StartReplication(ctx, tt.req) @@ -11544,7 +12371,6 @@ func TestStopReplication(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -11556,7 +12382,7 @@ func TestStopReplication(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) _, err := vtctld.StopReplication(ctx, tt.req) @@ -11929,8 +12755,6 @@ func TestTabletExternallyReparented(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -11943,7 +12767,7 @@ func TestTabletExternallyReparented(t *testing.T) { TopoServer: ts, } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) if tt.tmcHasNoTopo { @@ -12110,7 +12934,6 @@ func TestUpdateCellInfo(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -12128,7 +12951,7 @@ func TestUpdateCellInfo(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.UpdateCellInfo(ctx, tt.req) if tt.shouldErr { @@ -12251,7 +13074,6 @@ func TestUpdateCellsAlias(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -12278,7 +13100,7 @@ func TestUpdateCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.UpdateCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -12386,7 +13208,7 @@ func TestValidate(t *testing.T) { SkipShardCreation: false, }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.Validate(ctx, &vtctldatapb.ValidateRequest{ @@ -12503,7 +13325,7 @@ func TestValidateSchemaKeyspace(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) schema1 := &tabletmanagerdatapb.SchemaDefinition{ @@ -12689,7 +13511,7 @@ func TestValidateVersionKeyspace(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) tests := []*struct { @@ -12804,7 +13626,7 @@ func TestValidateVersionShard(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) tests := []*struct { @@ -13396,7 +14218,7 @@ func TestValidateShard(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(vtenv.NewTestEnv(), ts) }) resp, err := vtctld.ValidateShard(ctx, tt.req) if tt.shouldErr { diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_backupstorage.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_backupstorage.go index dc273dcb962..1097d2994cc 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_backupstorage.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_backupstorage.go @@ -104,7 +104,7 @@ func (a handlesByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() } // *backupstorage.BackupStorageImplementation to this value before use. const BackupStorageImplementation = "grpcvtctldserver.testutil" -// BackupStorage is the singleton test backupstorage.BackupStorage intastnce. It +// BackupStorage is the singleton test backupstorage.BackupStorage instance. It // is public and singleton to allow tests to both mutate and assert against its // state. var BackupStorage = &backupStorage{ diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go index ba7c8477d22..9f10ab6c04c 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -186,6 +186,7 @@ type TabletManagerClient struct { } // keyed by tablet alias. ChangeTabletTypeResult map[string]error + ChangeTabletTypeDelays map[string]time.Duration // keyed by tablet alias. DemotePrimaryDelays map[string]time.Duration // keyed by tablet alias. @@ -208,6 +209,13 @@ type TabletManagerClient struct { Error error } // keyed by tablet alias. + ExecuteMultiFetchAsDbaDelays map[string]time.Duration + // keyed by tablet alias. + ExecuteMultiFetchAsDbaResults map[string]struct { + Response []*querypb.QueryResult + Error error + } + // keyed by tablet alias. ExecuteHookDelays map[string]time.Duration // keyed by tablet alias. ExecuteHookResults map[string]struct { @@ -461,7 +469,20 @@ func (fake *TabletManagerClient) Backup(ctx context.Context, tablet *topodatapb. // ChangeType is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) ChangeType(ctx context.Context, tablet *topodatapb.Tablet, newType topodatapb.TabletType, semiSync bool) error { - if result, ok := fake.ChangeTabletTypeResult[topoproto.TabletAliasString(tablet.Alias)]; ok { + key := topoproto.TabletAliasString(tablet.Alias) + + if fake.ChangeTabletTypeDelays != nil { + if delay, ok := fake.ChangeTabletTypeDelays[key]; ok { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + + if result, ok := fake.ChangeTabletTypeResult[key]; ok { return result } @@ -551,6 +572,30 @@ func (fake *TabletManagerClient) ExecuteFetchAsDba(ctx context.Context, tablet * return nil, fmt.Errorf("%w: no ExecuteFetchAsDba result set for tablet %s", assert.AnError, key) } +// ExecuteMultiFetchAsDba is part of the tmclient.TabletManagerClient interface. +func (fake *TabletManagerClient) ExecuteMultiFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) ([]*querypb.QueryResult, error) { + if fake.ExecuteMultiFetchAsDbaResults == nil { + return nil, fmt.Errorf("%w: no ExecuteMultiFetchAsDba results on fake TabletManagerClient", assert.AnError) + } + + key := topoproto.TabletAliasString(tablet.Alias) + if fake.ExecuteMultiFetchAsDbaDelays != nil { + if delay, ok := fake.ExecuteMultiFetchAsDbaDelays[key]; ok { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(delay): + // proceed to results + } + } + } + if result, ok := fake.ExecuteMultiFetchAsDbaResults[key]; ok { + return result.Response, result.Error + } + + return nil, fmt.Errorf("%w: no ExecuteMultiFetchAsDba result set for tablet %s", assert.AnError, key) +} + // ExecuteHook is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) ExecuteHook(ctx context.Context, tablet *topodatapb.Tablet, hook *hk.Hook) (*hk.HookResult, error) { if fake.ExecuteHookResults == nil { @@ -1061,7 +1106,7 @@ func (fake *TabletManagerClient) RunHealthCheck(ctx context.Context, tablet *top } // SetReplicationSource is part of the tmclient.TabletManagerClient interface. -func (fake *TabletManagerClient) SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool) error { +func (fake *TabletManagerClient) SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool, heartbeatInterval float64) error { if fake.SetReplicationSourceResults == nil { return assert.AnError } @@ -1348,7 +1393,7 @@ func (fake *TabletManagerClient) UndoDemotePrimary(ctx context.Context, tablet * return assert.AnError } -// VReplicationExec is part of the tmclient.TabletManagerCLient interface. +// VReplicationExec is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) { if fake.VReplicationExecResults == nil { return nil, assert.AnError @@ -1374,7 +1419,7 @@ func (fake *TabletManagerClient) VReplicationExec(ctx context.Context, tablet *t if resultsForTablet, ok := fake.VReplicationExecResults[key]; ok { // Round trip the expected query both to ensure it's valid and to // standardize on capitalization and formatting. - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { return nil, err } @@ -1393,7 +1438,7 @@ func (fake *TabletManagerClient) VReplicationExec(ctx context.Context, tablet *t return nil, assert.AnError } -// CheckThrottler is part of the tmclient.TabletManagerCLient interface. +// CheckThrottler is part of the tmclient.TabletManagerClient interface. func (fake *TabletManagerClient) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { if fake.CheckThrottlerResults == nil { return nil, assert.AnError diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/util.go b/go/vt/vtctl/grpcvtctldserver/testutil/util.go index 97638e9c41e..b685d22840b 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/util.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/util.go @@ -41,7 +41,7 @@ import ( // implementation, then runs the test func with a client created to point at // that server. func WithTestServer( - t *testing.T, + ctx context.Context, t *testing.T, server vtctlservicepb.VtctldServer, test func(t *testing.T, client vtctldclient.VtctldClient), ) { @@ -56,7 +56,7 @@ func WithTestServer( go s.Serve(lis) defer s.Stop() - client, err := vtctldclient.New("grpc", lis.Addr().String()) + client, err := vtctldclient.New(ctx, "grpc", lis.Addr().String()) require.NoError(t, err, "cannot create vtctld client") defer client.Close() @@ -67,7 +67,7 @@ func WithTestServer( // implementations, and then runs the test func with N clients created, where // clients[i] points at servers[i]. func WithTestServers( - t *testing.T, + ctx context.Context, t *testing.T, test func(t *testing.T, clients ...vtctldclient.VtctldClient), servers ...vtctlservicepb.VtctldServer, ) { @@ -91,7 +91,7 @@ func WithTestServers( // Start up a test server for the head of our server slice, accumulate // the resulting client, and recurse on the tail of our server slice. - WithTestServer(t, servers[0], func(t *testing.T, client vtctldclient.VtctldClient) { + WithTestServer(ctx, t, servers[0], func(t *testing.T, client vtctldclient.VtctldClient) { clients = append(clients, client) withTestServers(t, servers[1:]...) }) diff --git a/go/vt/vtctl/grpcvtctldserver/topo.go b/go/vt/vtctl/grpcvtctldserver/topo.go index 70fae6613aa..5ec369ca17f 100644 --- a/go/vt/vtctl/grpcvtctldserver/topo.go +++ b/go/vt/vtctl/grpcvtctldserver/topo.go @@ -161,7 +161,7 @@ func deleteShardCell(ctx context.Context, ts *topo.Server, keyspace string, shar // Get all the tablet records for the aliases we've collected. Note that // GetTabletMap ignores ErrNoNode, which is convenient for our purpose; it // means a tablet was deleted but is still referenced. - tabletMap, err := ts.GetTabletMap(ctx, aliases) + tabletMap, err := ts.GetTabletMap(ctx, aliases, nil) if err != nil { return fmt.Errorf("GetTabletMap() failed: %w", err) } diff --git a/go/vt/vtctl/grpcvtctlserver/server.go b/go/vt/vtctl/grpcvtctlserver/server.go index afd7b9df1c9..d89f91b2d29 100644 --- a/go/vt/vtctl/grpcvtctlserver/server.go +++ b/go/vt/vtctl/grpcvtctlserver/server.go @@ -25,6 +25,8 @@ import ( "google.golang.org/grpc" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" @@ -40,12 +42,13 @@ import ( // VtctlServer is our RPC server type VtctlServer struct { vtctlservicepb.UnimplementedVtctlServer - ts *topo.Server + ts *topo.Server + env *vtenv.Environment } // NewVtctlServer returns a new Vtctl Server for the topo server. -func NewVtctlServer(ts *topo.Server) *VtctlServer { - return &VtctlServer{ts: ts} +func NewVtctlServer(env *vtenv.Environment, ts *topo.Server) *VtctlServer { + return &VtctlServer{env: env, ts: ts} } // ExecuteVtctlCommand is part of the vtctldatapb.VtctlServer interface @@ -72,13 +75,13 @@ func (s *VtctlServer) ExecuteVtctlCommand(args *vtctldatapb.ExecuteVtctlCommandR // create the wrangler tmc := tmclient.NewTabletManagerClient() defer tmc.Close() - wr := wrangler.New(logger, s.ts, tmc) + wr := wrangler.New(s.env, logger, s.ts, tmc) // execute the command return vtctl.RunCommand(stream.Context(), wr, args.Args) } // StartServer registers the VtctlServer for RPCs -func StartServer(s *grpc.Server, ts *topo.Server) { - vtctlservicepb.RegisterVtctlServer(s, NewVtctlServer(ts)) +func StartServer(s *grpc.Server, env *vtenv.Environment, ts *topo.Server) { + vtctlservicepb.RegisterVtctlServer(s, NewVtctlServer(env, ts)) } diff --git a/go/vt/vtctl/internal/grpcshim/bidi_stream.go b/go/vt/vtctl/internal/grpcshim/bidi_stream.go index a620cb929aa..92e7c24068b 100644 --- a/go/vt/vtctl/internal/grpcshim/bidi_stream.go +++ b/go/vt/vtctl/internal/grpcshim/bidi_stream.go @@ -101,7 +101,7 @@ type BidiStream struct { // NewBidiStream returns a BidiStream ready for embedded use. The provided ctx // will be used for the stream context, and types embedding BidiStream should -// check context cancellation/expiriation in their respective Recv and Send +// check context cancellation/expiration in their respective Recv and Send // methods. // // See the documentation on BidiStream for example usage. @@ -123,7 +123,7 @@ func (bs *BidiStream) Closed() <-chan struct{} { // IsClosed returns true if the stream has been closed for sending. // -// It is a conveince function for attempting to select on the channel returned +// It is a convenience function for attempting to select on the channel returned // by bs.Closed(). func (bs *BidiStream) IsClosed() bool { select { diff --git a/go/vt/vtctl/localvtctldclient/client.go b/go/vt/vtctl/localvtctldclient/client.go index f94f1124037..abd02b7e28a 100644 --- a/go/vt/vtctl/localvtctldclient/client.go +++ b/go/vt/vtctl/localvtctldclient/client.go @@ -17,6 +17,7 @@ limitations under the License. package localvtctldclient import ( + "context" "errors" "sync" @@ -58,7 +59,7 @@ func SetServer(s vtctlservicepb.VtctldServer) { server = s } -func localVtctldClientFactory(addr string) (vtctldclient.VtctldClient, error) { +func localVtctldClientFactory(ctx context.Context, addr string) (vtctldclient.VtctldClient, error) { m.Lock() defer m.Unlock() diff --git a/go/vt/vtctl/localvtctldclient/client_gen.go b/go/vt/vtctl/localvtctldclient/client_gen.go index 198fc12908f..e854514dcfa 100644 --- a/go/vt/vtctl/localvtctldclient/client_gen.go +++ b/go/vt/vtctl/localvtctldclient/client_gen.go @@ -39,6 +39,11 @@ func (client *localVtctldClient) AddCellsAlias(ctx context.Context, in *vtctldat return client.s.AddCellsAlias(ctx, in) } +// ApplyKeyspaceRoutingRules is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) ApplyKeyspaceRoutingRules(ctx context.Context, in *vtctldatapb.ApplyKeyspaceRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyKeyspaceRoutingRulesResponse, error) { + return client.s.ApplyKeyspaceRoutingRules(ctx, in) +} + // ApplyRoutingRules is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) ApplyRoutingRules(ctx context.Context, in *vtctldatapb.ApplyRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.ApplyRoutingRulesResponse, error) { return client.s.ApplyRoutingRules(ctx, in) @@ -241,11 +246,21 @@ func (client *localVtctldClient) ExecuteHook(ctx context.Context, in *vtctldatap return client.s.ExecuteHook(ctx, in) } +// ExecuteMultiFetchAsDBA is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) ExecuteMultiFetchAsDBA(ctx context.Context, in *vtctldatapb.ExecuteMultiFetchAsDBARequest, opts ...grpc.CallOption) (*vtctldatapb.ExecuteMultiFetchAsDBAResponse, error) { + return client.s.ExecuteMultiFetchAsDBA(ctx, in) +} + // FindAllShardsInKeyspace is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) FindAllShardsInKeyspace(ctx context.Context, in *vtctldatapb.FindAllShardsInKeyspaceRequest, opts ...grpc.CallOption) (*vtctldatapb.FindAllShardsInKeyspaceResponse, error) { return client.s.FindAllShardsInKeyspace(ctx, in) } +// ForceCutOverSchemaMigration is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) ForceCutOverSchemaMigration(ctx context.Context, in *vtctldatapb.ForceCutOverSchemaMigrationRequest, opts ...grpc.CallOption) (*vtctldatapb.ForceCutOverSchemaMigrationResponse, error) { + return client.s.ForceCutOverSchemaMigration(ctx, in) +} + // GetBackups is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) GetBackups(ctx context.Context, in *vtctldatapb.GetBackupsRequest, opts ...grpc.CallOption) (*vtctldatapb.GetBackupsResponse, error) { return client.s.GetBackups(ctx, in) @@ -276,6 +291,11 @@ func (client *localVtctldClient) GetKeyspace(ctx context.Context, in *vtctldatap return client.s.GetKeyspace(ctx, in) } +// GetKeyspaceRoutingRules is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) GetKeyspaceRoutingRules(ctx context.Context, in *vtctldatapb.GetKeyspaceRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetKeyspaceRoutingRulesResponse, error) { + return client.s.GetKeyspaceRoutingRules(ctx, in) +} + // GetKeyspaces is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) GetKeyspaces(ctx context.Context, in *vtctldatapb.GetKeyspacesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetKeyspacesResponse, error) { return client.s.GetKeyspaces(ctx, in) @@ -306,6 +326,11 @@ func (client *localVtctldClient) GetShard(ctx context.Context, in *vtctldatapb.G return client.s.GetShard(ctx, in) } +// GetShardReplication is part of the vtctlservicepb.VtctldClient interface. +func (client *localVtctldClient) GetShardReplication(ctx context.Context, in *vtctldatapb.GetShardReplicationRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardReplicationResponse, error) { + return client.s.GetShardReplication(ctx, in) +} + // GetShardRoutingRules is part of the vtctlservicepb.VtctldClient interface. func (client *localVtctldClient) GetShardRoutingRules(ctx context.Context, in *vtctldatapb.GetShardRoutingRulesRequest, opts ...grpc.CallOption) (*vtctldatapb.GetShardRoutingRulesResponse, error) { return client.s.GetShardRoutingRules(ctx, in) diff --git a/go/vt/vtctl/reparent.go b/go/vt/vtctl/reparent.go index 7ed0f6582b9..4498228d9c7 100644 --- a/go/vt/vtctl/reparent.go +++ b/go/vt/vtctl/reparent.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/wrangler" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -114,6 +115,7 @@ func commandPlannedReparentShard(ctx context.Context, wr *wrangler.Wrangler, sub } waitReplicasTimeout := subFlags.Duration("wait_replicas_timeout", topo.RemoteOperationTimeout, "time to wait for replicas to catch up on replication before and after reparenting") + tolerableReplicationLag := subFlags.Duration("tolerable-replication-lag", 0, "amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary") keyspaceShard := subFlags.String("keyspace_shard", "", "keyspace/shard of the shard that needs to be reparented") newPrimary := subFlags.String("new_primary", "", "alias of a tablet that should be the new primary") avoidTablet := subFlags.String("avoid_tablet", "", "alias of a tablet that should not be the primary, i.e. reparent to any other tablet if this one is the primary") @@ -149,7 +151,13 @@ func commandPlannedReparentShard(ctx context.Context, wr *wrangler.Wrangler, sub return err } } - return wr.PlannedReparentShard(ctx, keyspace, shard, newPrimaryAlias, avoidTabletAlias, *waitReplicasTimeout) + + return wr.PlannedReparentShard(ctx, keyspace, shard, reparentutil.PlannedReparentOptions{ + NewPrimaryAlias: newPrimaryAlias, + AvoidPrimaryAlias: avoidTabletAlias, + WaitReplicasTimeout: *waitReplicasTimeout, + TolerableReplLag: *tolerableReplicationLag, + }) } func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { @@ -189,8 +197,14 @@ func commandEmergencyReparentShard(ctx context.Context, wr *wrangler.Wrangler, s return err } } - unreachableReplicas := topoproto.ParseTabletSet(*ignoreReplicasList) - return wr.EmergencyReparentShard(ctx, keyspace, shard, tabletAlias, *waitReplicasTimeout, unreachableReplicas, *preventCrossCellPromotion, *waitForAllTablets) + + return wr.EmergencyReparentShard(ctx, keyspace, shard, reparentutil.EmergencyReparentOptions{ + NewPrimaryAlias: tabletAlias, + WaitAllTablets: *waitForAllTablets, + WaitReplicasTimeout: *waitReplicasTimeout, + IgnoreReplicas: topoproto.ParseTabletSet(*ignoreReplicasList), + PreventCrossCellPromotion: *preventCrossCellPromotion, + }) } func commandTabletExternallyReparented(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { diff --git a/go/vt/vtctl/reparentutil/durability.go b/go/vt/vtctl/reparentutil/durability.go index e68485a395c..29a5b2e712a 100644 --- a/go/vt/vtctl/reparentutil/durability.go +++ b/go/vt/vtctl/reparentutil/durability.go @@ -69,13 +69,13 @@ func init() { // Durabler is the interface which is used to get the promotion rules for candidates and the semi sync setup type Durabler interface { - // promotionRule represents the precedence in which we want to tablets to be promoted. + // PromotionRule represents the precedence in which we want to tablets to be promoted. // The higher the promotion rule of a tablet, the more we want it to be promoted in case of a failover - promotionRule(*topodatapb.Tablet) promotionrule.CandidatePromotionRule - // semiSyncAckers represents the number of semi-sync ackers required for a given tablet if it were to become the PRIMARY instance - semiSyncAckers(*topodatapb.Tablet) int - // isReplicaSemiSync returns whether the "replica" should send semi-sync acks if "primary" were to become the PRIMARY instance - isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool + PromotionRule(*topodatapb.Tablet) promotionrule.CandidatePromotionRule + // SemiSyncAckers represents the number of semi-sync ackers required for a given tablet if it were to become the PRIMARY instance + SemiSyncAckers(*topodatapb.Tablet) int + // IsReplicaSemiSync returns whether the "replica" should send semi-sync acks if "primary" were to become the PRIMARY instance + IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool } func RegisterDurability(name string, newDurablerFunc NewDurabler) { @@ -108,13 +108,13 @@ func PromotionRule(durability Durabler, tablet *topodatapb.Tablet) promotionrule if tablet == nil || tablet.Alias == nil { return promotionrule.MustNot } - return durability.promotionRule(tablet) + return durability.PromotionRule(tablet) } // SemiSyncAckers returns the primary semi-sync setting for the instance. // 0 means none. Non-zero specifies the number of required ackers. func SemiSyncAckers(durability Durabler, tablet *topodatapb.Tablet) int { - return durability.semiSyncAckers(tablet) + return durability.SemiSyncAckers(tablet) } // IsReplicaSemiSync returns the replica semi-sync setting from the tablet record. @@ -124,7 +124,7 @@ func IsReplicaSemiSync(durability Durabler, primary, replica *topodatapb.Tablet) if primary == nil || primary.Alias == nil || replica == nil || replica.Alias == nil { return false } - return durability.isReplicaSemiSync(primary, replica) + return durability.IsReplicaSemiSync(primary, replica) } //======================================================================= @@ -132,8 +132,8 @@ func IsReplicaSemiSync(durability Durabler, primary, replica *topodatapb.Tablet) // durabilityNone has no semi-sync and returns NeutralPromoteRule for Primary and Replica tablet types, MustNotPromoteRule for everything else type durabilityNone struct{} -// promotionRule implements the Durabler interface -func (d *durabilityNone) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { +// PromotionRule implements the Durabler interface +func (d *durabilityNone) PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { switch tablet.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return promotionrule.Neutral @@ -141,13 +141,13 @@ func (d *durabilityNone) promotionRule(tablet *topodatapb.Tablet) promotionrule. return promotionrule.MustNot } -// semiSyncAckers implements the Durabler interface -func (d *durabilityNone) semiSyncAckers(tablet *topodatapb.Tablet) int { +// SemiSyncAckers implements the Durabler interface +func (d *durabilityNone) SemiSyncAckers(tablet *topodatapb.Tablet) int { return 0 } -// isReplicaSemiSync implements the Durabler interface -func (d *durabilityNone) isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { +// IsReplicaSemiSync implements the Durabler interface +func (d *durabilityNone) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { return false } @@ -159,8 +159,8 @@ type durabilitySemiSync struct { rdonlySemiSync bool } -// promotionRule implements the Durabler interface -func (d *durabilitySemiSync) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { +// PromotionRule implements the Durabler interface +func (d *durabilitySemiSync) PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { switch tablet.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return promotionrule.Neutral @@ -168,13 +168,13 @@ func (d *durabilitySemiSync) promotionRule(tablet *topodatapb.Tablet) promotionr return promotionrule.MustNot } -// semiSyncAckers implements the Durabler interface -func (d *durabilitySemiSync) semiSyncAckers(tablet *topodatapb.Tablet) int { +// SemiSyncAckers implements the Durabler interface +func (d *durabilitySemiSync) SemiSyncAckers(tablet *topodatapb.Tablet) int { return 1 } -// isReplicaSemiSync implements the Durabler interface -func (d *durabilitySemiSync) isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { +// IsReplicaSemiSync implements the Durabler interface +func (d *durabilitySemiSync) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { switch replica.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return true @@ -193,8 +193,8 @@ type durabilityCrossCell struct { rdonlySemiSync bool } -// promotionRule implements the Durabler interface -func (d *durabilityCrossCell) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { +// PromotionRule implements the Durabler interface +func (d *durabilityCrossCell) PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { switch tablet.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return promotionrule.Neutral @@ -202,13 +202,13 @@ func (d *durabilityCrossCell) promotionRule(tablet *topodatapb.Tablet) promotion return promotionrule.MustNot } -// semiSyncAckers implements the Durabler interface -func (d *durabilityCrossCell) semiSyncAckers(tablet *topodatapb.Tablet) int { +// SemiSyncAckers implements the Durabler interface +func (d *durabilityCrossCell) SemiSyncAckers(tablet *topodatapb.Tablet) int { return 1 } -// isReplicaSemiSync implements the Durabler interface -func (d *durabilityCrossCell) isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { +// IsReplicaSemiSync implements the Durabler interface +func (d *durabilityCrossCell) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { switch replica.Type { case topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA: return primary.Alias.Cell != replica.Alias.Cell @@ -223,8 +223,8 @@ func (d *durabilityCrossCell) isReplicaSemiSync(primary, replica *topodatapb.Tab // durabilityTest is like durabilityNone. It overrides the type for a specific tablet to prefer. It is only meant to be used for testing purposes! type durabilityTest struct{} -// promotionRule implements the Durabler interface -func (d *durabilityTest) promotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { +// PromotionRule implements the Durabler interface +func (d *durabilityTest) PromotionRule(tablet *topodatapb.Tablet) promotionrule.CandidatePromotionRule { if topoproto.TabletAliasString(tablet.Alias) == "zone2-0000000200" { return promotionrule.Prefer } @@ -236,12 +236,12 @@ func (d *durabilityTest) promotionRule(tablet *topodatapb.Tablet) promotionrule. return promotionrule.MustNot } -// semiSyncAckers implements the Durabler interface -func (d *durabilityTest) semiSyncAckers(tablet *topodatapb.Tablet) int { +// SemiSyncAckers implements the Durabler interface +func (d *durabilityTest) SemiSyncAckers(tablet *topodatapb.Tablet) int { return 0 } -// isReplicaSemiSync implements the Durabler interface -func (d *durabilityTest) isReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { +// IsReplicaSemiSync implements the Durabler interface +func (d *durabilityTest) IsReplicaSemiSync(primary, replica *topodatapb.Tablet) bool { return false } diff --git a/go/vt/vtctl/reparentutil/durability_test.go b/go/vt/vtctl/reparentutil/durability_test.go index f1429b29621..5745da64f7e 100644 --- a/go/vt/vtctl/reparentutil/durability_test.go +++ b/go/vt/vtctl/reparentutil/durability_test.go @@ -326,7 +326,7 @@ func TestDurabilityTest(t *testing.T) { for _, testcase := range testcases { t.Run(topoproto.TabletAliasString(testcase.tablet.Alias), func(t *testing.T) { - rule := durabilityRules.promotionRule(testcase.tablet) + rule := durabilityRules.PromotionRule(testcase.tablet) assert.Equal(t, testcase.promotionRule, rule) }) } diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter.go b/go/vt/vtctl/reparentutil/emergency_reparenter.go index 7f190a4d994..c3542850bee 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter.go @@ -68,15 +68,8 @@ type EmergencyReparentOptions struct { } // counters for Emergency Reparent Shard -var ( - // TODO(timvaillancourt): remove legacyERS* gauges in v19+. - legacyERSCounter = stats.NewGauge("ers_counter", "Number of times Emergency Reparent Shard has been run") - legacyERSSuccessCounter = stats.NewGauge("ers_success_counter", "Number of times Emergency Reparent Shard has succeeded") - legacyERSFailureCounter = stats.NewGauge("ers_failure_counter", "Number of times Emergency Reparent Shard has failed") - - ersCounter = stats.NewCountersWithMultiLabels("emergency_reparent_counts", "Number of times Emergency Reparent Shard has been run", - []string{"Keyspace", "Shard", "Result"}, - ) +var ersCounter = stats.NewCountersWithMultiLabels("emergency_reparent_counts", "Number of times Emergency Reparent Shard has been run", + []string{"Keyspace", "Shard", "Result"}, ) // NewEmergencyReparenter returns a new EmergencyReparenter object, ready to @@ -125,11 +118,9 @@ func (erp *EmergencyReparenter) ReparentShard(ctx context.Context, keyspace stri reparentShardOpTimings.Add("EmergencyReparentShard", time.Since(startTime)) switch err { case nil: - legacyERSSuccessCounter.Add(1) ersCounter.Add(append(statsLabels, successResult), 1) event.DispatchUpdate(ev, "finished EmergencyReparentShard") default: - legacyERSFailureCounter.Add(1) ersCounter.Add(append(statsLabels, failureResult), 1) event.DispatchUpdate(ev, "failed EmergencyReparentShard: "+err.Error()) } @@ -154,7 +145,6 @@ func (erp *EmergencyReparenter) getLockAction(newPrimaryAlias *topodatapb.Tablet func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *events.Reparent, keyspace, shard string, opts EmergencyReparentOptions) (err error) { // log the starting of the operation and increment the counter erp.logger.Infof("will initiate emergency reparent shard in keyspace - %s, shard - %s", keyspace, shard) - legacyERSCounter.Add(1) var ( stoppedReplicationSnapshot *replicationSnapshot @@ -308,7 +298,7 @@ func (erp *EmergencyReparenter) reparentShardLocked(ctx context.Context, ev *eve // Since the new primary tablet belongs to the validCandidateTablets list, we no longer need any additional constraint checks // Final step is to promote our primary candidate - err = erp.promoteNewPrimary(ctx, ev, newPrimary, opts, tabletMap, stoppedReplicationSnapshot.statusMap) + _, err = erp.reparentReplicas(ctx, ev, newPrimary, tabletMap, stoppedReplicationSnapshot.statusMap, opts, false /* intermediateReparent */) if err != nil { return err } @@ -449,9 +439,16 @@ func (erp *EmergencyReparenter) promoteIntermediateSource( validCandidateTablets []*topodatapb.Tablet, opts EmergencyReparentOptions, ) ([]*topodatapb.Tablet, error) { - // we reparent all the other tablets to start replication from our new source + // Create a tablet map from all the valid replicas + validTabletMap := map[string]*topo.TabletInfo{} + for _, candidate := range validCandidateTablets { + alias := topoproto.TabletAliasString(candidate.Alias) + validTabletMap[alias] = tabletMap[alias] + } + + // we reparent all the other valid tablets to start replication from our new source // we wait for all the replicas so that we can choose a better candidate from the ones that started replication later - reachableTablets, err := erp.reparentReplicas(ctx, ev, source, tabletMap, statusMap, opts, true /* waitForAllReplicas */, false /* populateReparentJournal */) + reachableTablets, err := erp.reparentReplicas(ctx, ev, source, validTabletMap, statusMap, opts, true /* intermediateReparent */) if err != nil { return nil, err } @@ -480,8 +477,10 @@ func (erp *EmergencyReparenter) reparentReplicas( tabletMap map[string]*topo.TabletInfo, statusMap map[string]*replicationdatapb.StopReplicationStatus, opts EmergencyReparentOptions, - waitForAllReplicas bool, - populateReparentJournal bool, + intermediateReparent bool, // intermediateReparent represents whether the reparenting of the replicas is the final reparent or not. + // Since ERS can sometimes promote a tablet, which isn't a candidate for promotion, if it is the most advanced, we don't want to + // call PromoteReplica on it. We just want to get all replicas to replicate from it to get caught up, after which we'll promote the primary + // candidate separately. During the final promotion, we call `PromoteReplica` and `PopulateReparentJournal`. ) ([]*topodatapb.Tablet, error) { var ( @@ -490,6 +489,8 @@ func (erp *EmergencyReparenter) reparentReplicas( ) replCtx, replCancel := context.WithTimeout(context.Background(), opts.WaitReplicasTimeout) + primaryCtx, primaryCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer primaryCancel() event.DispatchUpdate(ev, "reparenting all tablets") @@ -511,13 +512,26 @@ func (erp *EmergencyReparenter) reparentReplicas( rec := concurrency.AllErrorRecorder{} handlePrimary := func(alias string, tablet *topodatapb.Tablet) error { - position, err := erp.tmc.PrimaryPosition(replCtx, tablet) - if err != nil { - return err - } - if populateReparentJournal { + if !intermediateReparent { + var position string + var err error + if ev.ShardInfo.PrimaryAlias == nil { + erp.logger.Infof("setting up %v as new primary for an uninitialized cluster", alias) + // we call InitPrimary when the PrimaryAlias in the ShardInfo is empty. This happens when we have an uninitialized cluster. + position, err = erp.tmc.InitPrimary(primaryCtx, tablet, SemiSyncAckers(opts.durability, tablet) > 0) + } else { + erp.logger.Infof("starting promotion for the new primary - %v", alias) + // we call PromoteReplica which changes the tablet type, fixes the semi-sync, set the primary to read-write and flushes the binlogs + position, err = erp.tmc.PromoteReplica(primaryCtx, tablet, SemiSyncAckers(opts.durability, tablet) > 0) + } + if err != nil { + return vterrors.Wrapf(err, "primary-elect tablet %v failed to be upgraded to primary: %v", alias, err) + } erp.logger.Infof("populating reparent journal on new primary %v", alias) - return erp.tmc.PopulateReparentJournal(replCtx, tablet, now, opts.lockAction, newPrimaryTablet.Alias, position) + err = erp.tmc.PopulateReparentJournal(primaryCtx, tablet, now, opts.lockAction, tablet.Alias, position) + if err != nil { + return vterrors.Wrapf(err, "failed to PopulateReparentJournal on primary: %v", err) + } } return nil } @@ -539,7 +553,7 @@ func (erp *EmergencyReparenter) reparentReplicas( forceStart = fs } - err := erp.tmc.SetReplicationSource(replCtx, ti.Tablet, newPrimaryTablet.Alias, 0, "", forceStart, IsReplicaSemiSync(opts.durability, newPrimaryTablet, ti.Tablet)) + err := erp.tmc.SetReplicationSource(replCtx, ti.Tablet, newPrimaryTablet.Alias, 0, "", forceStart, IsReplicaSemiSync(opts.durability, newPrimaryTablet, ti.Tablet), 0) if err != nil { err = vterrors.Wrapf(err, "tablet %v SetReplicationSource failed: %v", alias, err) rec.RecordError(err) @@ -552,8 +566,8 @@ func (erp *EmergencyReparenter) reparentReplicas( replicaMutex.Unlock() // Signal that at least one goroutine succeeded to SetReplicationSource. - // We do this only when we do not want to wait for all the replicas - if !waitForAllReplicas { + // We do this only when we do not want to wait for all the replicas. + if !intermediateReparent { replSuccessCancel() } } @@ -587,10 +601,10 @@ func (erp *EmergencyReparenter) reparentReplicas( primaryErr := handlePrimary(topoproto.TabletAliasString(newPrimaryTablet.Alias), newPrimaryTablet) if primaryErr != nil { - erp.logger.Warningf("primary failed to PopulateReparentJournal") + erp.logger.Errorf("failed to promote %s to primary", topoproto.TabletAliasString(newPrimaryTablet.Alias)) replCancel() - return nil, vterrors.Wrapf(primaryErr, "failed to PopulateReparentJournal on primary: %v", primaryErr) + return nil, vterrors.Wrapf(primaryErr, "failed to promote %v to primary", topoproto.TabletAliasString(newPrimaryTablet.Alias)) } // We should only cancel the context that all the replicas are using when they are done. @@ -697,41 +711,11 @@ func (erp *EmergencyReparenter) identifyPrimaryCandidate( } } // Unreachable code. - // We should have found atleast 1 tablet in the valid list. + // We should have found at least 1 tablet in the valid list. // If the list is empty, then we should have errored out much sooner. return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unreachable - did not find a valid primary candidate even though the valid candidate list was non-empty") } -func (erp *EmergencyReparenter) promoteNewPrimary( - ctx context.Context, - ev *events.Reparent, - newPrimary *topodatapb.Tablet, - opts EmergencyReparentOptions, - tabletMap map[string]*topo.TabletInfo, - statusMap map[string]*replicationdatapb.StopReplicationStatus, -) error { - var err error - if ev.ShardInfo.PrimaryAlias == nil { - erp.logger.Infof("setting up %v as new primary for an uninitialized cluster", newPrimary.Alias) - // we call InitPrimary when the PrimaryAlias in the ShardInfo is empty. This happens when we have an uninitialized cluster. - _, err = erp.tmc.InitPrimary(ctx, newPrimary, SemiSyncAckers(opts.durability, newPrimary) > 0) - } else { - erp.logger.Infof("starting promotion for the new primary - %v", newPrimary.Alias) - // we call PromoteReplica which changes the tablet type, fixes the semi-sync, set the primary to read-write and flushes the binlogs - _, err = erp.tmc.PromoteReplica(ctx, newPrimary, SemiSyncAckers(opts.durability, newPrimary) > 0) - } - if err != nil { - return vterrors.Wrapf(err, "primary-elect tablet %v failed to be upgraded to primary: %v", newPrimary.Alias, err) - } - // we now reparent all the replicas to the new primary we have promoted. - // Here we do not need to wait for all the replicas, We can finish early when even 1 succeeds. - _, err = erp.reparentReplicas(ctx, ev, newPrimary, tabletMap, statusMap, opts, false /* waitForAllReplicas */, true /* populateReparentJournal */) - if err != nil { - return err - } - return nil -} - // filterValidCandidates filters valid tablets, keeping only the ones which can successfully be promoted without any constraint failures and can make forward progress on being promoted func (erp *EmergencyReparenter) filterValidCandidates(validTablets []*topodatapb.Tablet, tabletsReachable []*topodatapb.Tablet, prevPrimary *topodatapb.Tablet, opts EmergencyReparentOptions) ([]*topodatapb.Tablet, error) { var restrictedValidTablets []*topodatapb.Tablet diff --git a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go index d7f8bb6a1db..ffd2c4a3926 100644 --- a/go/vt/vtctl/reparentutil/emergency_reparenter_test.go +++ b/go/vt/vtctl/reparentutil/emergency_reparenter_test.go @@ -60,8 +60,6 @@ func TestNewEmergencyReparenter(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -100,8 +98,6 @@ func TestEmergencyReparenter_getLockAction(t *testing.T) { erp := &EmergencyReparenter{} for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -145,14 +141,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -262,14 +250,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -398,14 +378,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000101": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000101": { - Error: nil, - }, - }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -521,14 +493,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { }, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000102": nil, }, @@ -1068,9 +1032,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-21", Error: nil, }, - "zone1-0000000102": { - Error: nil, - }, }, StopReplicationAndGetStatusResults: map[string]struct { StopStatus *replicationdatapb.StopReplicationStatus @@ -1174,14 +1135,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: assert.AnError, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, PopulateReparentJournalResults: map[string]error{ "zone1-0000000102": nil, }, @@ -1296,14 +1249,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -1416,14 +1361,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -1531,14 +1468,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -1664,14 +1593,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -1796,14 +1717,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { Error: nil, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -1900,8 +1813,6 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) @@ -1949,7 +1860,7 @@ func TestEmergencyReparenter_reparentShardLocked(t *testing.T) { } } -func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { +func TestEmergencyReparenter_promotionOfNewPrimary(t *testing.T) { t.Parallel() tests := []struct { @@ -1974,14 +1885,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -2053,17 +1956,9 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { shouldErr: false, }, { - name: "PrimaryPosition error", + name: "PromoteReplica error", emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: fmt.Errorf("primary position error"), - }, - }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -2105,14 +2000,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": assert.AnError, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -2154,14 +2041,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -2217,14 +2096,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -2284,14 +2155,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, PromoteReplicaResults: map[string]struct { Result string Error error @@ -2345,14 +2208,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, InitPrimaryResults: map[string]struct { Result string Error error @@ -2428,8 +2283,6 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { durability, _ := GetDurabilityPolicy("none") for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -2475,7 +2328,7 @@ func TestEmergencyReparenter_promoteNewPrimary(t *testing.T) { tt.emergencyReparentOps.durability = durability erp := NewEmergencyReparenter(ts, tt.tmc, logger) - err := erp.promoteNewPrimary(ctx, ev, tabletInfo.Tablet, tt.emergencyReparentOps, tt.tabletMap, tt.statusMap) + _, err := erp.reparentReplicas(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.emergencyReparentOps, false) if tt.shouldErr { assert.Error(t, err) assert.Contains(t, err.Error(), tt.errShouldContain) @@ -2716,8 +2569,6 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -2735,9 +2586,6 @@ func TestEmergencyReparenter_waitForAllRelayLogsToApply(t *testing.T) { func TestEmergencyReparenterStats(t *testing.T) { ersCounter.ResetAll() - legacyERSCounter.Reset() - legacyERSSuccessCounter.Reset() - legacyERSFailureCounter.Reset() reparentShardOpTimings.Reset() emergencyReparentOps := EmergencyReparentOptions{} @@ -2754,14 +2602,6 @@ func TestEmergencyReparenterStats(t *testing.T) { Error: nil, }, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000102": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000100": nil, "zone1-0000000101": nil, @@ -2870,11 +2710,6 @@ func TestEmergencyReparenterStats(t *testing.T) { require.EqualValues(t, map[string]int64{"testkeyspace.-.success": 1}, ersCounter.Counts()) require.EqualValues(t, map[string]int64{"All": 1, "EmergencyReparentShard": 1}, reparentShardOpTimings.Counts()) - // check the legacy counter values - require.EqualValues(t, 1, legacyERSCounter.Get()) - require.EqualValues(t, 1, legacyERSSuccessCounter.Get()) - require.EqualValues(t, 0, legacyERSFailureCounter.Get()) - // set emergencyReparentOps to request a non existent tablet emergencyReparentOps.NewPrimaryAlias = &topodatapb.TabletAlias{ Cell: "bogus", @@ -2888,11 +2723,6 @@ func TestEmergencyReparenterStats(t *testing.T) { // check the counter values require.EqualValues(t, map[string]int64{"testkeyspace.-.success": 1, "testkeyspace.-.failure": 1}, ersCounter.Counts()) require.EqualValues(t, map[string]int64{"All": 2, "EmergencyReparentShard": 2}, reparentShardOpTimings.Counts()) - - // check the legacy counter values - require.EqualValues(t, 2, legacyERSCounter.Get()) - require.EqualValues(t, 1, legacyERSSuccessCounter.Get()) - require.EqualValues(t, 1, legacyERSFailureCounter.Get()) } func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { @@ -3159,8 +2989,6 @@ func TestEmergencyReparenter_findMostAdvanced(t *testing.T) { } func TestEmergencyReparenter_reparentReplicas(t *testing.T) { - t.Parallel() - tests := []struct { name string emergencyReparentOps EmergencyReparentOptions @@ -3174,6 +3002,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { statusMap map[string]*replicationdatapb.StopReplicationStatus shouldErr bool errShouldContain string + remoteOpTimeout time.Duration }{ { name: "success", @@ -3182,9 +3011,9 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error + PromoteReplicaResults: map[string]struct { + Result string + Error error }{ "zone1-0000000100": { Error: nil, @@ -3253,12 +3082,12 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { shouldErr: false, }, { - name: "PrimaryPosition error", + name: "PromoteReplica error", emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ - PrimaryPositionResults: map[string]struct { - Position string - Error error + PromoteReplicaResults: map[string]struct { + Result string + Error error }{ "zone1-0000000100": { Error: fmt.Errorf("primary position error"), @@ -3297,9 +3126,9 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": assert.AnError, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error + PromoteReplicaResults: map[string]struct { + Result string + Error error }{ "zone1-0000000100": { Error: nil, @@ -3338,9 +3167,9 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error + PromoteReplicaResults: map[string]struct { + Result string + Error error }{ "zone1-0000000100": { Error: nil, @@ -3393,9 +3222,9 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error + PromoteReplicaResults: map[string]struct { + Result string + Error error }{ "zone1-0000000100": { Error: nil, @@ -3451,9 +3280,9 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error + PromoteReplicaResults: map[string]struct { + Result string + Error error }{ "zone1-0000000100": { Error: nil, @@ -3502,9 +3331,9 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error + PromoteReplicaResults: map[string]struct { + Result string + Error error }{ "zone1-0000000100": { Error: nil, @@ -3538,17 +3367,111 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { shard: "-", shouldErr: false, }, + { + name: "primary promotion gets infinitely stuck", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + PromoteReplicaResults: map[string]struct { + Result string + Error error + }{ + "zone1-0000000100": { + Error: nil, + }, + }, + PromoteReplicaDelays: map[string]time.Duration{ + "zone1-0000000100": 500 * time.Hour, + }, + SetReplicationSourceResults: map[string]error{ + "zone1-0000000101": nil, + "zone1-0000000102": nil, + }, + }, + remoteOpTimeout: 100 * time.Millisecond, + newPrimaryTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { // forceStart = false + Before: &replicationdatapb.Status{ + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), + }, + }, + "zone1-0000000102": { // forceStart = true + Before: &replicationdatapb.Status{ + IoState: int32(replication.ReplicationStateRunning), + SqlState: int32(replication.ReplicationStateRunning), + }, + }, + }, + keyspace: "testkeyspace", + shard: "-", + shouldErr: true, + errShouldContain: "failed to promote zone1-0000000100 to primary: primary-elect tablet zone1-0000000100 failed to be upgraded to primary: context deadline exceeded", + }, } durability, _ := GetDurabilityPolicy("none") for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() + if tt.remoteOpTimeout != 0 { + oldTimeout := topo.RemoteOperationTimeout + topo.RemoteOperationTimeout = tt.remoteOpTimeout + defer func() { + topo.RemoteOperationTimeout = oldTimeout + }() + } logger := logutil.NewMemoryLogger() - ev := &events.Reparent{} + ev := &events.Reparent{ + ShardInfo: topo.ShardInfo{ + Shard: &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 000, + }, + }, + }, + } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -3579,7 +3502,7 @@ func TestEmergencyReparenter_reparentReplicas(t *testing.T) { tt.emergencyReparentOps.durability = durability erp := NewEmergencyReparenter(ts, tt.tmc, logger) - _, err := erp.reparentReplicas(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.emergencyReparentOps, false /* waitForAllReplicas */, true /* populateReparentJournal */) + _, err := erp.reparentReplicas(ctx, ev, tabletInfo.Tablet, tt.tabletMap, tt.statusMap, tt.emergencyReparentOps, false /* intermediateReparent */) if tt.shouldErr { assert.Error(t, err) assert.Contains(t, err.Error(), tt.errShouldContain) @@ -3617,14 +3540,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000101": nil, "zone1-0000000102": nil, @@ -3727,24 +3642,177 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { }, Hostname: "requires force start", }, + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + }, }, }, { - name: "all replicas failed", + name: "success - filter with valid tablets before", emergencyReparentOps: EmergencyReparentOptions{}, tmc: &testutil.TabletManagerClient{ PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, + SetReplicationSourceResults: map[string]error{ + "zone1-0000000101": nil, + }, + }, + newSourceTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", }, }, - + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + "zone1-0000000404": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 404, + }, + Hostname: "ignored tablet", + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{ + "zone1-0000000101": { // forceStart = false + Before: &replicationdatapb.Status{ + IoState: int32(replication.ReplicationStateStopped), + SqlState: int32(replication.ReplicationStateStopped), + }, + }, + }, + keyspace: "testkeyspace", + shard: "-", + shouldErr: false, + result: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + validCandidateTablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, { + name: "success - only 2 tablets and they error", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, + SetReplicationSourceResults: map[string]error{ + "zone1-0000000101": fmt.Errorf("An error"), + }, + }, + newSourceTabletAlias: "zone1-0000000100", + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, + }, + "zone1-0000000101": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + "zone1-0000000102": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Hostname: "requires force start", + }, + }, + }, + statusMap: map[string]*replicationdatapb.StopReplicationStatus{}, + keyspace: "testkeyspace", + shard: "-", + shouldErr: false, + result: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, + }, + validCandidateTablets: []*topodatapb.Tablet{ + { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Hostname: "primary-elect", + }, { + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + }, + }, + }, + { + name: "all replicas failed", + emergencyReparentOps: EmergencyReparentOptions{}, + tmc: &testutil.TabletManagerClient{ + PopulateReparentJournalResults: map[string]error{ + "zone1-0000000100": nil, + }, SetReplicationSourceResults: map[string]error{ // everyone fails, we all fail "zone1-0000000101": assert.AnError, @@ -3769,7 +3837,7 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { }, }, }, - "zone1-00000000102": { + "zone1-0000000102": { Tablet: &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -3812,14 +3880,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000101": nil, // this one succeeds, so we're good "zone1-0000000102": assert.AnError, @@ -3898,14 +3958,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { PopulateReparentJournalResults: map[string]error{ "zone1-0000000100": nil, }, - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000101": nil, "zone1-0000000102": nil, @@ -3990,8 +4042,6 @@ func TestEmergencyReparenter_promoteIntermediateSource(t *testing.T) { durability, _ := GetDurabilityPolicy("none") for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -4259,14 +4309,6 @@ func TestParentContextCancelled(t *testing.T) { emergencyReparentOps := EmergencyReparentOptions{IgnoreReplicas: sets.New[string]("zone1-0000000404"), WaitReplicasTimeout: time.Minute, durability: durability} // Make the replica tablet return its results after 3 seconds tmc := &testutil.TabletManagerClient{ - PrimaryPositionResults: map[string]struct { - Position string - Error error - }{ - "zone1-0000000100": { - Error: nil, - }, - }, SetReplicationSourceResults: map[string]error{ "zone1-0000000101": nil, }, @@ -4326,7 +4368,7 @@ func TestParentContextCancelled(t *testing.T) { time.Sleep(time.Second) cancel() }() - _, err = erp.reparentReplicas(ctx, ev, tabletMap[newPrimaryTabletAlias].Tablet, tabletMap, statusMap, emergencyReparentOps, false, false) + _, err = erp.reparentReplicas(ctx, ev, tabletMap[newPrimaryTabletAlias].Tablet, tabletMap, statusMap, emergencyReparentOps, true) require.NoError(t, err) } diff --git a/go/vt/vtctl/reparentutil/planned_reparenter.go b/go/vt/vtctl/reparentutil/planned_reparenter.go index 9fc933a8e35..7221508b702 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter.go @@ -60,6 +60,7 @@ type PlannedReparentOptions struct { NewPrimaryAlias *topodatapb.TabletAlias AvoidPrimaryAlias *topodatapb.TabletAlias WaitReplicasTimeout time.Duration + TolerableReplLag time.Duration // Private options managed internally. We use value-passing semantics to // set these options inside a PlannedReparent without leaking these details @@ -151,7 +152,7 @@ func (pr *PlannedReparenter) getLockAction(opts PlannedReparentOptions) string { // primary), as well as an error. // // It will also set the NewPrimaryAlias option if the caller did not specify -// one, provided it can choose a new primary candidate. See ChooseNewPrimary() +// one, provided it can choose a new primary candidate. See ElectNewPrimary() // for details on primary candidate selection. func (pr *PlannedReparenter) preflightChecks( ctx context.Context, @@ -176,22 +177,17 @@ func (pr *PlannedReparenter) preflightChecks( event.DispatchUpdate(ev, "current primary is different than tablet to avoid, nothing to do") return true, nil } + } - event.DispatchUpdate(ev, "searching for primary candidate") - - opts.NewPrimaryAlias, err = ChooseNewPrimary(ctx, pr.tmc, &ev.ShardInfo, tabletMap, opts.AvoidPrimaryAlias, opts.WaitReplicasTimeout, opts.durability, pr.logger) - if err != nil { - return true, err - } - - if opts.NewPrimaryAlias == nil { - return true, vterrors.Errorf(vtrpc.Code_INTERNAL, "cannot find a tablet to reparent to in the same cell as the current primary") - } - - pr.logger.Infof("elected new primary candidate %v", topoproto.TabletAliasString(opts.NewPrimaryAlias)) - event.DispatchUpdate(ev, "elected new primary candidate") + event.DispatchUpdate(ev, "electing a primary candidate") + opts.NewPrimaryAlias, err = ElectNewPrimary(ctx, pr.tmc, &ev.ShardInfo, tabletMap, opts.NewPrimaryAlias, opts.AvoidPrimaryAlias, opts.WaitReplicasTimeout, opts.TolerableReplLag, opts.durability, pr.logger) + if err != nil { + return true, err } + pr.logger.Infof("elected new primary candidate %v", topoproto.TabletAliasString(opts.NewPrimaryAlias)) + event.DispatchUpdate(ev, "elected new primary candidate") + primaryElectAliasStr := topoproto.TabletAliasString(opts.NewPrimaryAlias) newPrimaryTabletInfo, ok := tabletMap[primaryElectAliasStr] @@ -258,7 +254,7 @@ func (pr *PlannedReparenter) performGracefulPromotion( setSourceCtx, setSourceCancel := context.WithTimeout(ctx, opts.WaitReplicasTimeout) defer setSourceCancel() - if err := pr.tmc.SetReplicationSource(setSourceCtx, primaryElect, currentPrimary.Alias, 0, snapshotPos, true, IsReplicaSemiSync(opts.durability, currentPrimary.Tablet, primaryElect)); err != nil { + if err := pr.tmc.SetReplicationSource(setSourceCtx, primaryElect, currentPrimary.Alias, 0, snapshotPos, true, IsReplicaSemiSync(opts.durability, currentPrimary.Tablet, primaryElect), 0); err != nil { return vterrors.Wrapf(err, "replication on primary-elect %v did not catch up in time; replication must be healthy to perform PlannedReparent", primaryElectAliasStr) } @@ -684,7 +680,7 @@ func (pr *PlannedReparenter) reparentTablets( // that it needs to start replication after transitioning from // PRIMARY => REPLICA. forceStartReplication := false - if err := pr.tmc.SetReplicationSource(replCtx, tablet, ev.NewPrimary.Alias, reparentJournalTimestamp, "", forceStartReplication, IsReplicaSemiSync(opts.durability, ev.NewPrimary, tablet)); err != nil { + if err := pr.tmc.SetReplicationSource(replCtx, tablet, ev.NewPrimary.Alias, reparentJournalTimestamp, "", forceStartReplication, IsReplicaSemiSync(opts.durability, ev.NewPrimary, tablet), 0); err != nil { rec.RecordError(vterrors.Wrapf(err, "tablet %v failed to SetReplicationSource(%v): %v", alias, primaryElectAliasStr, err)) } }(alias, tabletInfo.Tablet) diff --git a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go index c564a95167e..25e4c86f7c5 100644 --- a/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go +++ b/go/vt/vtctl/reparentutil/planned_reparenter_flaky_test.go @@ -62,8 +62,6 @@ func TestNewPlannedReparenter(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -444,8 +442,6 @@ func TestPlannedReparenter_ReparentShard(t *testing.T) { logger := logutil.NewMemoryLogger() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -542,8 +538,6 @@ func TestPlannedReparenter_getLockAction(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -616,6 +610,114 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { }, shouldErr: false, }, + { + name: "new primary provided - replication lag is tolerable", + ev: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, nil), + }, + tmc: &testutil.TabletManagerClient{ + ReplicationStatusResults: map[string]struct { + Position *replicationdatapb.Status + Error error + }{ + "zone1-0000000100": { + Position: &replicationdatapb.Status{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-2", + ReplicationLagSeconds: 2, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + opts: &PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + TolerableReplLag: 10 * time.Second, + }, + expectedIsNoop: false, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, nil), + NewPrimary: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + shouldErr: false, + }, + { + name: "new primary provided - replication lag is not tolerable", + ev: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, nil), + }, + tmc: &testutil.TabletManagerClient{ + ReplicationStatusResults: map[string]struct { + Position *replicationdatapb.Status + Error error + }{ + "zone1-0000000100": { + Position: &replicationdatapb.Status{ + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-2", + ReplicationLagSeconds: 25, + }, + }, + }, + }, + tabletMap: map[string]*topo.TabletInfo{ + "zone1-0000000100": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, + }, + }, + opts: &PlannedReparentOptions{ + NewPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + TolerableReplLag: 10 * time.Second, + }, + expectedIsNoop: true, + expectedEvent: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 500, + }, + }, nil), + }, + shouldErr: true, + }, { name: "invariants hold with primary selection", tmc: &testutil.TabletManagerClient{ @@ -745,10 +847,10 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { shouldErr: false, }, { - // this doesn't cause an actual error from ChooseNewPrimary, because + // this doesn't cause an actual error from ElectNewPrimary, because // there is no way to do that other than something going horribly wrong // in go runtime, however we do check that we - // get a non-nil result from ChooseNewPrimary in preflightChecks and + // get a non-nil result from ElectNewPrimary in preflightChecks and // bail out if we don't, so we're forcing that case here. name: "cannot choose new primary-elect", ev: &events.Reparent{ @@ -779,9 +881,12 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { shouldErr: true, }, { - name: "primary-elect is not in tablet map", - ev: &events.Reparent{}, - tabletMap: map[string]*topo.TabletInfo{}, + name: "primary-elect is not in tablet map", + ev: &events.Reparent{ + ShardInfo: *topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: nil, + }, nil), + }, tabletMap: map[string]*topo.TabletInfo{}, opts: &PlannedReparentOptions{ NewPrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -952,8 +1057,6 @@ func TestPlannedReparenter_preflightChecks(t *testing.T) { logger := logutil.NewMemoryLogger() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1565,8 +1668,6 @@ func TestPlannedReparenter_performGracefulPromotion(t *testing.T) { logger := logutil.NewMemoryLogger() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1727,8 +1828,6 @@ func TestPlannedReparenter_performInitialPromotion(t *testing.T) { logger := logutil.NewMemoryLogger() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1898,8 +1997,6 @@ func TestPlannedReparenter_performPartialPromotionRecovery(t *testing.T) { logger := logutil.NewMemoryLogger() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -2264,8 +2361,6 @@ func TestPlannedReparenter_performPotentialPromotion(t *testing.T) { logger := logutil.NewMemoryLogger() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3085,8 +3180,6 @@ func TestPlannedReparenter_reparentShardLocked(t *testing.T) { logger := logutil.NewMemoryLogger() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -3702,8 +3795,6 @@ func TestPlannedReparenter_reparentTablets(t *testing.T) { logger := logutil.NewMemoryLogger() for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtctl/reparentutil/promotionrule/promotion_rule.go b/go/vt/vtctl/reparentutil/promotionrule/promotion_rule.go index c92805b8955..261c783a228 100644 --- a/go/vt/vtctl/reparentutil/promotionrule/promotion_rule.go +++ b/go/vt/vtctl/reparentutil/promotionrule/promotion_rule.go @@ -21,7 +21,6 @@ import ( ) // CandidatePromotionRule describe the promotion preference/rule for an instance. -// It maps to promotion_rule column in candidate_database_instance type CandidatePromotionRule string const ( diff --git a/go/vt/vtctl/reparentutil/replication.go b/go/vt/vtctl/reparentutil/replication.go index 9b33a5b0536..da67e735882 100644 --- a/go/vt/vtctl/reparentutil/replication.go +++ b/go/vt/vtctl/reparentutil/replication.go @@ -194,7 +194,7 @@ func SetReplicationSource(ctx context.Context, ts *topo.Server, tmc tmclient.Tab } isSemiSync := IsReplicaSemiSync(durability, shardPrimary.Tablet, tablet) - return tmc.SetReplicationSource(ctx, tablet, shardPrimary.Alias, 0, "", false, isSemiSync) + return tmc.SetReplicationSource(ctx, tablet, shardPrimary.Alias, 0, "", false, isSemiSync, 0) } // replicationSnapshot stores the status maps and the tablets that were reachable diff --git a/go/vt/vtctl/reparentutil/replication_test.go b/go/vt/vtctl/reparentutil/replication_test.go index ed7bd152e9c..922dc6bc2da 100644 --- a/go/vt/vtctl/reparentutil/replication_test.go +++ b/go/vt/vtctl/reparentutil/replication_test.go @@ -54,7 +54,7 @@ func TestFindValidEmergencyReparentCandidates(t *testing.T) { statusMap map[string]*replicationdatapb.StopReplicationStatus primaryStatusMap map[string]*replicationdatapb.PrimaryStatus // Note: for these tests, it's simpler to compare keys than actual - // mysql.Postion structs, which are just thin wrappers around the + // mysql.Position structs, which are just thin wrappers around the // mysql.GTIDSet interface. If a tablet alias makes it into the map, we // know it was chosen by the method, and that either // mysql.DecodePosition was successful (in the primary case) or @@ -205,8 +205,6 @@ func TestFindValidEmergencyReparentCandidates(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1280,8 +1278,6 @@ func Test_stopReplicationAndBuildStatusMaps(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { durability, err := GetDurabilityPolicy(tt.durability) require.NoError(t, err) @@ -1377,8 +1373,6 @@ func TestReplicaWasRunning(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1465,8 +1459,6 @@ func TestSQLThreadWasRunning(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1563,8 +1555,6 @@ func TestWaitForRelayLogsToApply(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtctl/reparentutil/util.go b/go/vt/vtctl/reparentutil/util.go index cfde8f34508..b3c4061ab70 100644 --- a/go/vt/vtctl/reparentutil/util.go +++ b/go/vt/vtctl/reparentutil/util.go @@ -19,9 +19,11 @@ package reparentutil import ( "context" "fmt" + "strings" "sync" "time" + "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" "vitess.io/vitess/go/mysql/replication" @@ -32,7 +34,6 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/reparentutil/promotionrule" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -48,7 +49,7 @@ var ( successResult = "success" ) -// ChooseNewPrimary finds a tablet that should become a primary after reparent. +// ElectNewPrimary finds a tablet that should become a primary after reparent. // The criteria for the new primary-elect are (preferably) to be in the same // cell as the current primary, and to be different from avoidPrimaryAlias. The // tablet with the most advanced replication position is chosen to minimize the @@ -58,13 +59,15 @@ var ( // with transactions being executed on the current primary, so when all tablets // are at roughly the same position, then the choice of new primary-elect will // be somewhat unpredictable. -func ChooseNewPrimary( +func ElectNewPrimary( ctx context.Context, tmc tmclient.TabletManagerClient, shardInfo *topo.ShardInfo, tabletMap map[string]*topo.TabletInfo, + newPrimaryAlias *topodatapb.TabletAlias, avoidPrimaryAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, + tolerableReplLag time.Duration, durability Durabler, // (TODO:@ajm188) it's a little gross we need to pass this, maybe embed in the context? logger logutil.Logger, @@ -84,25 +87,51 @@ func ChooseNewPrimary( errorGroup, groupCtx = errgroup.WithContext(ctx) ) + // candidates are the list of tablets that can be potentially promoted after filtering out based on preliminary checks. + candidates := []*topodatapb.Tablet{} + reasonsToInvalidate := strings.Builder{} for _, tablet := range tabletMap { switch { + case newPrimaryAlias != nil: + // If newPrimaryAlias is provided, then that is the only valid tablet, even if it is not of type replica or in a different cell. + if !topoproto.TabletAliasEqual(tablet.Alias, newPrimaryAlias) { + reasonsToInvalidate.WriteString(fmt.Sprintf("\n%v does not match the new primary alias provided", topoproto.TabletAliasString(tablet.Alias))) + continue + } case primaryCell != "" && tablet.Alias.Cell != primaryCell: + reasonsToInvalidate.WriteString(fmt.Sprintf("\n%v is not in the same cell as the previous primary", topoproto.TabletAliasString(tablet.Alias))) continue case avoidPrimaryAlias != nil && topoproto.TabletAliasEqual(tablet.Alias, avoidPrimaryAlias): + reasonsToInvalidate.WriteString(fmt.Sprintf("\n%v matches the primary alias to avoid", topoproto.TabletAliasString(tablet.Alias))) continue case tablet.Tablet.Type != topodatapb.TabletType_REPLICA: + reasonsToInvalidate.WriteString(fmt.Sprintf("\n%v is not a replica", topoproto.TabletAliasString(tablet.Alias))) continue } - tb := tablet.Tablet + candidates = append(candidates, tablet.Tablet) + } + + // There is only one tablet and tolerable replication lag is unspecified, + // then we don't need to find the position of the said tablet for sorting. + // We can just return the tablet quickly. + // This check isn't required, but it saves us an RPC call that is otherwise unnecessary. + if len(candidates) == 1 && tolerableReplLag == 0 { + return candidates[0].Alias, nil + } + + for _, tablet := range candidates { + tb := tablet errorGroup.Go(func() error { // find and store the positions for the tablet - pos, err := findPositionForTablet(groupCtx, tb, logger, tmc, waitReplicasTimeout) + pos, replLag, err := findPositionAndLagForTablet(groupCtx, tb, logger, tmc, waitReplicasTimeout) mu.Lock() defer mu.Unlock() - if err == nil { + if err == nil && (tolerableReplLag == 0 || tolerableReplLag >= replLag) { validTablets = append(validTablets, tb) tabletPositions = append(tabletPositions, pos) + } else { + reasonsToInvalidate.WriteString(fmt.Sprintf("\n%v has %v replication lag which is more than the tolerable amount", topoproto.TabletAliasString(tablet.Alias), replLag)) } return err }) @@ -113,9 +142,9 @@ func ChooseNewPrimary( return nil, err } - // return nothing if there are no valid tablets available + // return an error if there are no valid tablets available if len(validTablets) == 0 { - return nil, nil + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "cannot find a tablet to reparent to%v", reasonsToInvalidate.String()) } // sort the tablets for finding the best primary @@ -127,9 +156,9 @@ func ChooseNewPrimary( return validTablets[0].Alias, nil } -// findPositionForTablet processes the replication position for a single tablet and +// findPositionAndLagForTablet processes the replication position and lag for a single tablet and // returns it. It is safe to call from multiple goroutines. -func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, error) { +func findPositionAndLagForTablet(ctx context.Context, tablet *topodatapb.Tablet, logger logutil.Logger, tmc tmclient.TabletManagerClient, waitTimeout time.Duration) (replication.Position, time.Duration, error) { logger.Infof("getting replication position from %v", topoproto.TabletAliasString(tablet.Alias)) ctx, cancel := context.WithTimeout(ctx, waitTimeout) @@ -140,10 +169,10 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge sqlErr, isSQLErr := sqlerror.NewSQLErrorFromError(err).(*sqlerror.SQLError) if isSQLErr && sqlErr != nil && sqlErr.Number() == sqlerror.ERNotReplica { logger.Warningf("no replication statue from %v, using empty gtid set", topoproto.TabletAliasString(tablet.Alias)) - return replication.Position{}, nil + return replication.Position{}, 0, nil } logger.Warningf("failed to get replication status from %v, ignoring tablet: %v", topoproto.TabletAliasString(tablet.Alias), err) - return replication.Position{}, err + return replication.Position{}, 0, err } // Use the relay log position if available, otherwise use the executed GTID set (binary log position). @@ -154,10 +183,10 @@ func findPositionForTablet(ctx context.Context, tablet *topodatapb.Tablet, logge pos, err := replication.DecodePosition(positionString) if err != nil { logger.Warningf("cannot decode replica position %v for tablet %v, ignoring tablet: %v", positionString, topoproto.TabletAliasString(tablet.Alias), err) - return replication.Position{}, err + return replication.Position{}, 0, err } - return pos, nil + return pos, time.Second * time.Duration(status.ReplicationLagSeconds), nil } // FindCurrentPrimary returns the current primary tablet of a shard, if any. The @@ -218,7 +247,7 @@ func ShardReplicationStatuses(ctx context.Context, ts *topo.Server, tmc tmclient if err != nil { return nil, nil, err } - tablets := topotools.CopyMapValues(tabletMap, []*topo.TabletInfo{}).([]*topo.TabletInfo) + tablets := maps.Values(tabletMap) log.Infof("Gathering tablet replication status for: %v", tablets) wg := sync.WaitGroup{} @@ -343,3 +372,15 @@ func waitForCatchUp( } return nil } + +// GetBackupCandidates is used to get a list of healthy tablets for backup +func GetBackupCandidates(tablets []*topo.TabletInfo, stats []*replicationdatapb.Status) (res []*topo.TabletInfo) { + for i, stat := range stats { + // shardTablets[i] and stats[i] is 1:1 mapping + // Always include TabletType_PRIMARY. Healthy shardTablets[i] will be added to tablets + if tablets[i].Type == topodatapb.TabletType_PRIMARY || stat != nil { + res = append(res, tablets[i]) + } + } + return res +} diff --git a/go/vt/vtctl/reparentutil/util_test.go b/go/vt/vtctl/reparentutil/util_test.go index a9e6274d490..dd13e48f7b7 100644 --- a/go/vt/vtctl/reparentutil/util_test.go +++ b/go/vt/vtctl/reparentutil/util_test.go @@ -61,7 +61,7 @@ func (fake *chooseNewPrimaryTestTMClient) ReplicationStatus(ctx context.Context, return nil, assert.AnError } -func TestChooseNewPrimary(t *testing.T) { +func TestElectNewPrimary(t *testing.T) { t.Parallel() ctx := context.Background() @@ -71,23 +71,242 @@ func TestChooseNewPrimary(t *testing.T) { tmc *chooseNewPrimaryTestTMClient shardInfo *topo.ShardInfo tabletMap map[string]*topo.TabletInfo + newPrimaryAlias *topodatapb.TabletAlias avoidPrimaryAlias *topodatapb.TabletAlias + tolerableReplLag time.Duration expected *topodatapb.TabletAlias - shouldErr bool + errContains []string }{ { name: "found a replica", tmc: &chooseNewPrimaryTestTMClient{ - // zone1-101 is behind zone1-102 + // zone1-101 is behind zone1-102 and zone1-102 has a tolerable replication lag replicationStatuses: map[string]*replicationdatapb.Status{ "zone1-0000000101": { Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", }, "zone1-0000000102": { - Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + ReplicationLagSeconds: 20, + }, + }, + }, + tolerableReplLag: 50 * time.Second, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 0, + }, + expected: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + errContains: nil, + }, + { + name: "new primary alias provided - no tolerable replication lag", + tolerableReplLag: 0, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + newPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + expected: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + errContains: nil, + }, + { + name: "new primary alias provided - with tolerable replication lag", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-102 has a tolerable replication lag + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + ReplicationLagSeconds: 20, + }, + }, + }, + tolerableReplLag: 50 * time.Second, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + newPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + expected: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + errContains: nil, + }, + { + name: "new primary alias provided - with intolerable replication lag", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-102 has an intolerable replication lag + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + ReplicationLagSeconds: 100, + }, + }, + }, + tolerableReplLag: 50 * time.Second, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, }, }, }, + newPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + expected: nil, + errContains: []string{ + `cannot find a tablet to reparent to`, + `zone1-0000000100 does not match the new primary alias provided`, + `zone1-0000000101 does not match the new primary alias provided`, + `zone1-0000000102 has 1m40s replication lag which is more than the tolerable amount`, + }, + }, + { + name: "found a replica ignoring replica lag", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-101 is behind zone1-102 and we don't care about the replication lag + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + ReplicationLagSeconds: 230, + }, + }, + }, + tolerableReplLag: 0, shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ PrimaryAlias: &topodatapb.TabletAlias{ Cell: "zone1", @@ -131,7 +350,67 @@ func TestChooseNewPrimary(t *testing.T) { Cell: "zone1", Uid: 102, }, - shouldErr: false, + errContains: nil, + }, + { + name: "found a replica - ignore one with replication lag", + tmc: &chooseNewPrimaryTestTMClient{ + // zone1-101 is behind zone1-102 + replicationStatuses: map[string]*replicationdatapb.Status{ + "zone1-0000000101": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1", + }, + "zone1-0000000102": { + Position: "MySQL56/3E11FA47-71CA-11E1-9E33-C80AA9429562:1-5", + ReplicationLagSeconds: 232, + }, + }, + }, + tolerableReplLag: 50 * time.Second, + shardInfo: topo.NewShardInfo("testkeyspace", "-", &topodatapb.Shard{ + PrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + }, nil), + tabletMap: map[string]*topo.TabletInfo{ + "primary": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 100, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + }, + "replica1": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + "replica2": { + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 102, + }, + Type: topodatapb.TabletType_REPLICA, + }, + }, + }, + avoidPrimaryAlias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 0, + }, + expected: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 101, + }, + errContains: nil, }, { name: "found a replica - more advanced relay log position", @@ -191,7 +470,7 @@ func TestChooseNewPrimary(t *testing.T) { Cell: "zone1", Uid: 102, }, - shouldErr: false, + errContains: nil, }, { name: "no active primary in shard", @@ -231,7 +510,7 @@ func TestChooseNewPrimary(t *testing.T) { Cell: "zone1", Uid: 101, }, - shouldErr: false, + errContains: nil, }, { name: "avoid primary alias is nil", @@ -273,7 +552,7 @@ func TestChooseNewPrimary(t *testing.T) { Cell: "zone1", Uid: 101, }, - shouldErr: false, + errContains: nil, }, { name: "avoid primary alias and shard primary are nil", tmc: &chooseNewPrimaryTestTMClient{ @@ -312,7 +591,7 @@ func TestChooseNewPrimary(t *testing.T) { Cell: "zone1", Uid: 101, }, - shouldErr: false, + errContains: nil, }, { name: "no replicas in primary cell", @@ -366,8 +645,13 @@ func TestChooseNewPrimary(t *testing.T) { Cell: "zone1", Uid: 0, }, - expected: nil, - shouldErr: false, + expected: nil, + errContains: []string{ + `cannot find a tablet to reparent to`, + `zone2-0000000200 is not a replica`, + `zone1-0000000101 is not in the same cell as the previous primary`, + `zone1-0000000102 is not in the same cell as the previous primary`, + }, }, { name: "only available tablet is AvoidPrimary", @@ -403,8 +687,11 @@ func TestChooseNewPrimary(t *testing.T) { Cell: "zone1", Uid: 101, }, - expected: nil, - shouldErr: false, + expected: nil, + errContains: []string{ + `cannot find a tablet to reparent to +zone1-0000000101 matches the primary alias to avoid`, + }, }, { name: "no replicas in shard", @@ -430,22 +717,25 @@ func TestChooseNewPrimary(t *testing.T) { Cell: "zone1", Uid: 0, }, - expected: nil, - shouldErr: false, + expected: nil, + errContains: []string{ + `cannot find a tablet to reparent to +zone1-0000000100 is not a replica`, + }, }, } durability, err := GetDurabilityPolicy("none") require.NoError(t, err) for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() - actual, err := ChooseNewPrimary(ctx, tt.tmc, tt.shardInfo, tt.tabletMap, tt.avoidPrimaryAlias, time.Millisecond*50, durability, logger) - if tt.shouldErr { - assert.Error(t, err) + actual, err := ElectNewPrimary(ctx, tt.tmc, tt.shardInfo, tt.tabletMap, tt.newPrimaryAlias, tt.avoidPrimaryAlias, time.Millisecond*50, tt.tolerableReplLag, durability, logger) + if len(tt.errContains) > 0 { + for _, errC := range tt.errContains { + assert.ErrorContains(t, err, errC) + } return } @@ -465,6 +755,7 @@ func TestFindPositionForTablet(t *testing.T) { tmc *testutil.TabletManagerClient tablet *topodatapb.Tablet expectedPosition string + expectedLag time.Duration expectedErr string }{ { @@ -476,7 +767,8 @@ func TestFindPositionForTablet(t *testing.T) { }{ "zone1-0000000100": { Position: &replicationdatapb.Status{ - Position: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + Position: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + ReplicationLagSeconds: 201, }, }, }, @@ -487,6 +779,7 @@ func TestFindPositionForTablet(t *testing.T) { Uid: 100, }, }, + expectedLag: 201 * time.Second, expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", }, { name: "no replication status", @@ -506,6 +799,7 @@ func TestFindPositionForTablet(t *testing.T) { Uid: 100, }, }, + expectedLag: 0, expectedPosition: "", }, { name: "relay log", @@ -516,8 +810,9 @@ func TestFindPositionForTablet(t *testing.T) { }{ "zone1-0000000100": { Position: &replicationdatapb.Status{ - Position: "unused", - RelayLogPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + Position: "unused", + RelayLogPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", + ReplicationLagSeconds: 291, }, }, }, @@ -528,6 +823,7 @@ func TestFindPositionForTablet(t *testing.T) { Uid: 100, }, }, + expectedLag: 291 * time.Second, expectedPosition: "MySQL56/3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5", }, { name: "error in parsing position", @@ -555,7 +851,7 @@ func TestFindPositionForTablet(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - pos, err := findPositionForTablet(ctx, test.tablet, logger, test.tmc, 10*time.Second) + pos, lag, err := findPositionAndLagForTablet(ctx, test.tablet, logger, test.tmc, 10*time.Second) if test.expectedErr != "" { require.EqualError(t, err, test.expectedErr) return @@ -563,6 +859,7 @@ func TestFindPositionForTablet(t *testing.T) { require.NoError(t, err) posString := replication.EncodePosition(pos) require.Equal(t, test.expectedPosition, posString) + require.Equal(t, test.expectedLag, lag) }) } } @@ -726,8 +1023,6 @@ func TestFindCurrentPrimary(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -1218,3 +1513,105 @@ func Test_getTabletsWithPromotionRules(t *testing.T) { }) } } + +func TestGetBackupCandidates(t *testing.T) { + var ( + primaryTablet = &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 1, + }, + Type: topodatapb.TabletType_PRIMARY, + }, + } + replicaTablet = &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 2, + }, + Type: topodatapb.TabletType_REPLICA, + }, + } + rdonlyTablet = &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 3, + }, + Type: topodatapb.TabletType_RDONLY, + }, + } + spareTablet = &topo.TabletInfo{ + Tablet: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{ + Cell: "zone1", + Uid: 4, + }, + Type: topodatapb.TabletType_SPARE, + }, + } + ) + tests := []struct { + name string + in []*topo.TabletInfo + expected []*topo.TabletInfo + status []*replicationdatapb.Status + }{ + { + name: "one primary tablet with status", + in: []*topo.TabletInfo{primaryTablet}, + expected: []*topo.TabletInfo{primaryTablet}, + status: []*replicationdatapb.Status{{}}, + }, + { + name: "one primary tablet with no status", + in: []*topo.TabletInfo{primaryTablet}, + expected: []*topo.TabletInfo{primaryTablet}, + status: []*replicationdatapb.Status{nil}, + }, + { + name: "4 tablets with no status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet}, + status: []*replicationdatapb.Status{nil, nil, nil, nil}, + }, + { + name: "4 tablets with full status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + status: []*replicationdatapb.Status{{}, {}, {}, {}}, + }, + { + name: "4 tablets with no primaryTablet status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + status: []*replicationdatapb.Status{nil, {}, {}, {}}, + }, + { + name: "4 tablets with no replicaTablet status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, rdonlyTablet, spareTablet}, + status: []*replicationdatapb.Status{{}, nil, {}, {}}, + }, + { + name: "4 tablets with no rdonlyTablet status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, replicaTablet, spareTablet}, + status: []*replicationdatapb.Status{{}, {}, nil, {}}, + }, + { + name: "4 tablets with no spareTablet status", + in: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet, spareTablet}, + expected: []*topo.TabletInfo{primaryTablet, replicaTablet, rdonlyTablet}, + status: []*replicationdatapb.Status{{}, {}, {}, nil}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res := GetBackupCandidates(tt.in, tt.status) + require.EqualValues(t, tt.expected, res) + }) + } +} diff --git a/go/vt/vtctl/schematools/reload_test.go b/go/vt/vtctl/schematools/reload_test.go index 4f00e300d13..58a579b3821 100644 --- a/go/vt/vtctl/schematools/reload_test.go +++ b/go/vt/vtctl/schematools/reload_test.go @@ -325,7 +325,6 @@ func TestReloadShard(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtctl/vdiff2.go b/go/vt/vtctl/vdiff2.go index 7cd1c7e00ca..9f53bb3590d 100644 --- a/go/vt/vtctl/vdiff2.go +++ b/go/vt/vtctl/vdiff2.go @@ -121,9 +121,10 @@ func commandVDiff2(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.F UpdateTableStats: *updateTableStats, }, ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{ - OnlyPks: *onlyPks, - DebugQuery: *debugQuery, - Format: format, + OnlyPks: *onlyPks, + DebugQuery: *debugQuery, + Format: format, + MaxSampleRows: 10, }, } diff --git a/go/vt/vtctl/vdiff2_test.go b/go/vt/vtctl/vdiff2_test.go index 1348cd06448..0e5b2f41c60 100644 --- a/go/vt/vtctl/vdiff2_test.go +++ b/go/vt/vtctl/vdiff2_test.go @@ -8,8 +8,8 @@ import ( "time" "github.com/google/uuid" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gotest.tools/assert" "vitess.io/vitess/go/sqltypes" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" diff --git a/go/vt/vtctl/vdiff_env_test.go b/go/vt/vtctl/vdiff_env_test.go index 955d2673d20..fdcf29367cc 100644 --- a/go/vt/vtctl/vdiff_env_test.go +++ b/go/vt/vtctl/vdiff_env_test.go @@ -19,7 +19,7 @@ package vtctl import ( "context" "fmt" - "math/rand" + "math/rand/v2" "sync" "testing" @@ -43,7 +43,7 @@ import ( const ( // vdiffStopPosition is the default stop position for the target vreplication. - // It can be overridden with the positons argument to newTestVDiffEnv. + // It can be overridden with the positions argument to newTestVDiffEnv. vdiffStopPosition = "MySQL56/d834e6b8-7cbf-11ed-a1eb-0242ac120002:1-892" // vdiffSourceGtid should be the position reported by the source side VStreamResults. // It's expected to be higher the vdiffStopPosition. @@ -82,8 +82,8 @@ func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShar env.wr = wrangler.NewTestWrangler(env.cmdlog, env.topoServ, env.tmc) // Generate a unique dialer name. - dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.Intn(1000000000)) - tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.IntN(1000000000)) + tabletconn.RegisterDialer(dialerName, func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { env.mu.Lock() defer env.mu.Unlock() if qs, ok := env.tablets[int(tablet.Alias.Uid)]; ok { diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 11d3cf85e68..324cdda0a76 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -1814,8 +1814,6 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags force := subFlags.Bool("force", false, "Proceeds even if the keyspace already exists") allowEmptyVSchema := subFlags.Bool("allow_empty_vschema", false, "If set this will allow a new keyspace to have no vschema") - var servedFrom flagutil.StringMapValue - subFlags.Var(&servedFrom, "served_from", "Specifies a comma-separated list of tablet_type:keyspace pairs used to serve traffic") keyspaceType := subFlags.String("keyspace_type", "", "Specifies the type of the keyspace") baseKeyspace := subFlags.String("base_keyspace", "", "Specifies the base keyspace for a snapshot keyspace") timestampStr := subFlags.String("snapshot_time", "", "Specifies the snapshot time for this keyspace") @@ -1870,18 +1868,6 @@ func commandCreateKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags DurabilityPolicy: *durabilityPolicy, SidecarDbName: *sidecarDBName, } - if len(servedFrom) > 0 { - for name, value := range servedFrom { - tt, err := topo.ParseServingTabletType(name) - if err != nil { - return err - } - ki.ServedFroms = append(ki.ServedFroms, &topodatapb.Keyspace_ServedFrom{ - TabletType: tt, - Keyspace: value, - }) - } - } err := wr.TopoServer().CreateKeyspace(ctx, keyspace, ki) if *force && topo.IsErrType(err, topo.NodeExists) { wr.Logger().Infof("keyspace %v already exists (ignoring error with --force)", keyspace) @@ -2089,7 +2075,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub const defaultMaxReplicationLagAllowed = defaultWaitTime cells := subFlags.String("cells", "", "Cell(s) or CellAlias(es) (comma-separated) to replicate from.") - tabletTypesStr := subFlags.String("tablet_types", "in_order:REPLICA,PRIMARY", "Source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). Defaults to --vreplication_tablet_type parameter value for the tablet, which has the default value of in_order:REPLICA,PRIMARY. Note: SwitchTraffic overrides this default and uses in_order:RDONLY,REPLICA,PRIMARY to switch all traffic by default.") + tabletTypesStr := subFlags.String("tablet_types", "in_order:REPLICA,PRIMARY", "Source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). Note: SwitchTraffic overrides this default and uses in_order:RDONLY,REPLICA,PRIMARY to switch all traffic by default.") dryRun := subFlags.Bool("dry_run", false, "Does a dry run of SwitchTraffic and only reports the actions to be taken. --dry_run is only supported for SwitchTraffic, ReverseTraffic and Complete.") timeout := subFlags.Duration("timeout", defaultWaitTime, "Specifies the maximum time to wait, in seconds, for vreplication to catch up on primary migrations. The migration will be cancelled on a timeout. --timeout is only supported for SwitchTraffic and ReverseTraffic.") reverseReplication := subFlags.Bool("reverse_replication", true, "Also reverse the replication (default true). --reverse_replication is only supported for SwitchTraffic.") @@ -2100,6 +2086,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub dropForeignKeys := subFlags.Bool("drop_foreign_keys", false, "If true, tables in the target keyspace will be created without foreign keys.") maxReplicationLagAllowed := subFlags.Duration("max_replication_lag_allowed", defaultMaxReplicationLagAllowed, "Allow traffic to be switched only if vreplication lag is below this (in seconds)") atomicCopy := subFlags.Bool("atomic-copy", false, "(EXPERIMENTAL) Use this if your source keyspace has tables which use foreign key constraints. All tables from the source will be moved.") + shards := subFlags.StringSlice("shards", nil, "(Optional) Specifies a comma-separated list of shards to operate on.") onDDL := "IGNORE" subFlags.StringVar(&onDDL, "on-ddl", onDDL, "What to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.") @@ -2162,10 +2149,11 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub StopAfterCopy: *stopAfterCopy, AtomicCopy: *atomicCopy, } + var shardsWithStreams []string printDetails := func() error { s := "" - res, err := wr.ShowWorkflow(ctx, workflowName, target) + res, err := wr.ShowWorkflow(ctx, workflowName, target, shardsWithStreams) if err != nil { return err } @@ -2329,6 +2317,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub vrwp.KeepRoutingRules = *keepRoutingRules } vrwp.WorkflowType = workflowType + vrwp.ShardSubset = *shards wf, err := wr.NewVReplicationWorkflow(ctx, workflowType, vrwp) if err != nil { log.Warningf("NewVReplicationWorkflow returned error %+v", wf) @@ -2338,6 +2327,15 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub return fmt.Errorf("workflow %s does not exist", ksWorkflow) } + if len(vrwp.ShardSubset) > 0 { + if workflowType == wrangler.MoveTablesWorkflow && action != vReplicationWorkflowActionCreate && wf.IsPartialMigration() { + log.Infof("Subset of shards: %s have been specified for keyspace %s, workflow %s, for action %s", + vrwp.ShardSubset, target, workflowName, action) + } else { + return fmt.Errorf("The --shards option can only be specified for existing Partial MoveTables workflows") + } + } + printCopyProgress := func() error { copyProgress, err := wf.GetCopyProgress() if err != nil { @@ -2352,7 +2350,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub sort.Strings(tables) s := "" var progress wrangler.TableCopyProgress - for table := range *copyProgress { + for _, table := range tables { var rowCountPct, tableSizePct int64 progress = *(*copyProgress)[table] if progress.SourceRowCount > 0 { @@ -2378,6 +2376,18 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub } } + wr.WorkflowParams = vrwp + + switch vrwp.WorkflowType { + case wrangler.MoveTablesWorkflow: + // If this is not a partial MoveTables, SourceShards is nil and all shards will be polled. + shardsWithStreams = vrwp.SourceShards + case wrangler.ReshardWorkflow: + shardsWithStreams = vrwp.TargetShards + default: + } + + wr.WorkflowParams = vrwp var dryRunResults *[]string startState := wf.CachedState() switch action { @@ -2413,7 +2423,7 @@ func commandVReplicationWorkflow(ctx context.Context, wr *wrangler.Wrangler, sub case <-ctx.Done(): return case <-ticker.C: - totalStreams, startedStreams, workflowErrors, err := wf.GetStreamCount() + totalStreams, startedStreams, workflowErrors, err := wf.GetStreamCount(shardsWithStreams) if err != nil { errCh <- err close(errCh) @@ -2783,7 +2793,7 @@ func commandReloadSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p } func commandReloadSchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - concurrency := subFlags.Int("concurrency", 10, "How many tablets to reload in parallel") + concurrency := subFlags.Int32("concurrency", 10, "How many tablets to reload in parallel") includePrimary := subFlags.Bool("include_primary", true, "Include the primary tablet") if err := subFlags.Parse(args); err != nil { @@ -2801,7 +2811,7 @@ func commandReloadSchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFla Shard: shard, WaitPosition: "", IncludePrimary: *includePrimary, - Concurrency: uint32(*concurrency), + Concurrency: *concurrency, }) if resp != nil { for _, e := range resp.Events { @@ -2812,7 +2822,7 @@ func commandReloadSchemaShard(ctx context.Context, wr *wrangler.Wrangler, subFla } func commandReloadSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - concurrency := subFlags.Int("concurrency", 10, "How many tablets to reload in parallel") + concurrency := subFlags.Int32("concurrency", 10, "How many tablets to reload in parallel") includePrimary := subFlags.Bool("include_primary", true, "Include the primary tablet(s)") if err := subFlags.Parse(args); err != nil { @@ -2825,7 +2835,7 @@ func commandReloadSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, sub Keyspace: subFlags.Arg(0), WaitPosition: "", IncludePrimary: *includePrimary, - Concurrency: uint32(*concurrency), + Concurrency: *concurrency, }) if resp != nil { for _, e := range resp.Events { @@ -2900,7 +2910,6 @@ func commandValidateSchemaKeyspace(ctx context.Context, wr *wrangler.Wrangler, s } func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - subFlags.MarkDeprecated("allow_long_unavailability", "") sql := subFlags.String("sql", "", "A list of semicolon-delimited SQL commands") sqlFile := subFlags.String("sql-file", "", "Identifies the file that contains the SQL commands") ddlStrategy := subFlags.String("ddl_strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'") @@ -2936,7 +2945,7 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf *migrationContext = *requestContext } - parts, err := sqlparser.SplitStatementToPieces(change) + parts, err := wr.SQLParser().SplitStatementToPieces(change) if err != nil { return err } @@ -3356,7 +3365,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p *sql = string(sqlBytes) } - stmt, err := sqlparser.Parse(*sql) + stmt, err := wr.SQLParser().Parse(*sql) if err != nil { return fmt.Errorf("error parsing vschema statement `%s`: %v", *sql, err) } @@ -3407,7 +3416,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p } // Validate the VSchema. - ksVs, err := vindexes.BuildKeyspace(vs) + ksVs, err := vindexes.BuildKeyspace(vs, wr.SQLParser()) if err != nil { return err } @@ -3422,7 +3431,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p vdx := ksVs.Vindexes[name] if val, ok := vdx.(vindexes.ParamValidating); ok { for _, param := range val.UnknownParams() { - wr.Logger().Warningf("Unknown param in vindex %s: %s", name, param) + wr.Logger().Warningf("Unknown parameter in vindex %s: %s", name, param) } } } @@ -3439,7 +3448,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p return err } - if _, err := vindexes.BuildKeyspace(vs); err != nil { + if _, err := vindexes.BuildKeyspace(vs, wr.SQLParser()); err != nil { return err } @@ -3721,8 +3730,9 @@ func commandHelp(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.Fla } func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag.FlagSet, args []string) error { - usage := "usage: Workflow [--dry-run] [--cells] [--tablet-types] [.] start/stop/update/delete/show/listall/tags []" + usage := "usage: Workflow [--shards ] [--dry-run] [--cells] [--tablet-types] [.] start/stop/update/delete/show/listall/tags []" dryRun := subFlags.Bool("dry-run", false, "Does a dry run of the Workflow action and reports the query and list of tablets on which the operation will be applied") + shards := subFlags.StringSlice("shards", nil, "(Optional) Specifies a comma-separated list of shards to operate on.") cells := subFlags.StringSlice("cells", []string{}, "New Cell(s) or CellAlias(es) (comma-separated) to replicate from. (Update only)") tabletTypesStrs := subFlags.StringSlice("tablet-types", []string{}, "New source tablet types to replicate from (e.g. PRIMARY, REPLICA, RDONLY). (Update only)") onDDL := subFlags.String("on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE. (Update only)") @@ -3732,6 +3742,9 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag if subFlags.NArg() < 2 { return fmt.Errorf(usage) } + if len(*shards) > 0 { + log.Infof("Subset of shards specified: %d, %v", len(*shards), strings.Join(*shards, ",")) + } keyspace := subFlags.Arg(0) action := strings.ToLower(subFlags.Arg(1)) var workflow string @@ -3816,9 +3829,10 @@ func commandWorkflow(ctx context.Context, wr *wrangler.Wrangler, subFlags *pflag TabletTypes: tabletTypes, TabletSelectionPreference: tsp, OnDdl: binlogdatapb.OnDDLAction(onddl), + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), // We don't allow changing this in the client command } } - results, err = wr.WorkflowAction(ctx, workflow, keyspace, action, *dryRun, rpcReq) // Only update currently uses the new RPC path + results, err = wr.WorkflowAction(ctx, workflow, keyspace, action, *dryRun, rpcReq, *shards) // Only update currently uses the new RPC path if err != nil { return err } diff --git a/go/vt/vtctl/vtctl_env_test.go b/go/vt/vtctl/vtctl_env_test.go index e502fbdf86a..7537eae9e8b 100644 --- a/go/vt/vtctl/vtctl_env_test.go +++ b/go/vt/vtctl/vtctl_env_test.go @@ -55,7 +55,7 @@ type testVTCtlEnv struct { var vtctlEnv *testVTCtlEnv func init() { - tabletconn.RegisterDialer("VTCtlTest", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tabletconn.RegisterDialer("VTCtlTest", func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { vtctlEnv.mu.Lock() defer vtctlEnv.mu.Unlock() if qs, ok := vtctlEnv.tablets[int(tablet.Alias.Uid)]; ok { diff --git a/go/vt/vtctl/vtctl_test.go b/go/vt/vtctl/vtctl_test.go index eb6a5f5941f..fe3564421b8 100644 --- a/go/vt/vtctl/vtctl_test.go +++ b/go/vt/vtctl/vtctl_test.go @@ -43,7 +43,7 @@ var ( unknownParamsLoggedDryRunVSchema string ) -// TestApplyVSchema tests the the MoveTables client command +// TestApplyVSchema tests the MoveTables client command // via the commandVRApplyVSchema() cmd handler. func TestApplyVSchema(t *testing.T) { shard := "0" @@ -89,9 +89,9 @@ func TestApplyVSchema(t *testing.T) { } If this is not what you expected, check the input data \(as JSON parsing will skip unexpected fields\)\. -.*W.* .* vtctl.go:.* Unknown param in vindex binary_vdx: hello -W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: foo -W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: hello`, +.*W.* .* vtctl.go:.* Unknown parameter in vindex binary_vdx: hello +W.* .* vtctl.go:.* Unknown parameter in vindex hash_vdx: foo +W.* .* vtctl.go:.* Unknown parameter in vindex hash_vdx: hello`, }, { name: "UnknownParamsLoggedWithDryRun", @@ -117,9 +117,9 @@ W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: hello`, } If this is not what you expected, check the input data \(as JSON parsing will skip unexpected fields\)\. -.*W.* .* vtctl.go:.* Unknown param in vindex binary_vdx: hello -W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: foo -W.* .* vtctl.go:.* Unknown param in vindex hash_vdx: hello +.*W.* .* vtctl.go:.* Unknown parameter in vindex binary_vdx: hello +W.* .* vtctl.go:.* Unknown parameter in vindex hash_vdx: foo +W.* .* vtctl.go:.* Unknown parameter in vindex hash_vdx: hello Dry run: Skipping update of VSchema`, }, } @@ -139,7 +139,7 @@ Dry run: Skipping update of VSchema`, } } -// TestMoveTables tests the the MoveTables client command +// TestMoveTables tests the MoveTables client command // via the commandVReplicationWorkflow() cmd handler. // This currently only tests the Progress action (which is // a parent of the Show action) but it can be used to test @@ -149,7 +149,8 @@ func TestMoveTables(t *testing.T) { shard := "0" sourceKs := "sourceks" targetKs := "targetks" - table := "customer" + table1 := "customer" + table2 := "customer_order" wf := "testwf" ksWf := fmt.Sprintf("%s.%s", targetKs, wf) minTableSize := 16384 // a single 16KiB InnoDB page @@ -159,16 +160,22 @@ func TestMoveTables(t *testing.T) { defer env.close() source := env.addTablet(100, sourceKs, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY) target := env.addTablet(200, targetKs, shard, &topodatapb.KeyRange{}, topodatapb.TabletType_PRIMARY) - sourceCol := fmt.Sprintf(`keyspace:"%s" shard:"%s" filter:{rules:{match:"%s" filter:"select * from %s"}}`, - sourceKs, shard, table, table) + sourceCol := fmt.Sprintf(`keyspace:"%s" shard:"%s" filter:{rules:{match:"%s" filter:"select * from %s"} rules:{match:"%s" filter:"select * from %s"}}`, + sourceKs, shard, table1, table1, table2, table2) bls := &binlogdatapb.BinlogSource{ Keyspace: sourceKs, Shard: shard, Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: table, - Filter: fmt.Sprintf("select * from %s", table), - }}, + Rules: []*binlogdatapb.Rule{ + { + Match: table1, + Filter: fmt.Sprintf("select * from %s", table1), + }, + { + Match: table2, + Filter: fmt.Sprintf("select * from %s", table2), + }, + }, }, } now := time.Now().UTC().Unix() @@ -200,12 +207,13 @@ func TestMoveTables(t *testing.T) { expectResults: func() { env.tmc.setVRResults( target.tablet, - fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)", + fmt.Sprintf("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%d) and id in (select max(id) from _vt.copy_state where vrepl_id in (%d) group by vrepl_id, table_name)", vrID, vrID), sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "table_name|lastpk", - "varchar|varbinary"), - fmt.Sprintf("%s|", table), + "vrepl_id|table_name|lastpk", + "int64|varchar|varbinary"), + fmt.Sprintf("%d|%s|", vrID, table1), + fmt.Sprintf("%d|%s|", vrID, table2), ), ) env.tmc.setDBAResults( @@ -215,7 +223,8 @@ func TestMoveTables(t *testing.T) { sqltypes.MakeTestResult(sqltypes.MakeTestFields( "table_name", "varchar"), - table, + table1, + table2, ), ) env.tmc.setVRResults( @@ -231,26 +240,28 @@ func TestMoveTables(t *testing.T) { ) env.tmc.setDBAResults( target.tablet, - fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s')", - targetKs, table), + fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s','%s')", + targetKs, table1, table2), sqltypes.MakeTestResult(sqltypes.MakeTestFields( "table_name|table_rows|data_length", "varchar|int64|int64"), - fmt.Sprintf("%s|0|%d", table, minTableSize), + fmt.Sprintf("%s|0|%d", table1, minTableSize), + fmt.Sprintf("%s|0|%d", table2, minTableSize), ), ) env.tmc.setDBAResults( source.tablet, - fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s')", - sourceKs, table), + fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s','%s')", + sourceKs, table1, table2), sqltypes.MakeTestResult(sqltypes.MakeTestFields( "table_name|table_rows|data_length", "varchar|int64|int64"), - fmt.Sprintf("%s|10|%d", table, minTableSize), + fmt.Sprintf("%s|10|%d", table1, minTableSize), + fmt.Sprintf("%s|10|%d", table2, minTableSize), ), ) }, - want: fmt.Sprintf("\nCopy Progress (approx):\n\n\ncustomer: rows copied 0/10 (0%%), size copied 16384/16384 (100%%)\n\n\n\nThe following vreplication streams exist for workflow %s:\n\nid=%d on %s/%s-0000000%d: Status: Copying. VStream has not started.\n\n\n", + want: fmt.Sprintf("\nCopy Progress (approx):\n\n\ncustomer: rows copied 0/10 (0%%), size copied 16384/16384 (100%%)\ncustomer_order: rows copied 0/10 (0%%), size copied 16384/16384 (100%%)\n\n\n\nThe following vreplication streams exist for workflow %s:\n\nid=%d on %s/%s-0000000%d: Status: Copying. VStream has not started.\n\n\n", ksWf, vrID, shard, env.cell, target.tablet.Alias.Uid), }, { @@ -260,12 +271,13 @@ func TestMoveTables(t *testing.T) { expectResults: func() { env.tmc.setVRResults( target.tablet, - fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)", + fmt.Sprintf("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%d) and id in (select max(id) from _vt.copy_state where vrepl_id in (%d) group by vrepl_id, table_name)", vrID, vrID), sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "table_name|lastpk", - "varchar|varbinary"), - fmt.Sprintf("%s|", table), + "vrepl_id|table_name|lastpk", + "int64|varchar|varbinary"), + fmt.Sprintf("%d|%s|", vrID, table1), + fmt.Sprintf("%d|%s|", vrID, table2), ), ) env.tmc.setDBAResults( @@ -275,7 +287,8 @@ func TestMoveTables(t *testing.T) { sqltypes.MakeTestResult(sqltypes.MakeTestFields( "table_name", "varchar"), - table, + table1, + table2, ), ) env.tmc.setVRResults( @@ -291,26 +304,28 @@ func TestMoveTables(t *testing.T) { ) env.tmc.setDBAResults( target.tablet, - fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s')", - targetKs, table), + fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s','%s')", + targetKs, table1, table2), sqltypes.MakeTestResult(sqltypes.MakeTestFields( "table_name|table_rows|data_length", "varchar|int64|int64"), - fmt.Sprintf("%s|5|%d", table, minTableSize), + fmt.Sprintf("%s|5|%d", table1, minTableSize), + fmt.Sprintf("%s|5|%d", table2, minTableSize), ), ) env.tmc.setDBAResults( source.tablet, - fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s')", - sourceKs, table), + fmt.Sprintf("select table_name, table_rows, data_length from information_schema.tables where table_schema = 'vt_%s' and table_name in ('%s','%s')", + sourceKs, table1, table2), sqltypes.MakeTestResult(sqltypes.MakeTestFields( "table_name|table_rows|data_length", "varchar|int64|int64"), - fmt.Sprintf("%s|10|%d", table, minTableSize), + fmt.Sprintf("%s|10|%d", table1, minTableSize), + fmt.Sprintf("%s|10|%d", table2, minTableSize), ), ) }, - want: fmt.Sprintf("\nCopy Progress (approx):\n\n\ncustomer: rows copied 5/10 (50%%), size copied 16384/16384 (100%%)\n\n\n\nThe following vreplication streams exist for workflow %s:\n\nid=%d on %s/%s-0000000%d: Status: Error: Duplicate entry '6' for key 'customer.PRIMARY' (errno 1062) (sqlstate 23000) during query: insert into customer(customer_id,email) values (6,'mlord@planetscale.com').\n\n\n", + want: fmt.Sprintf("\nCopy Progress (approx):\n\n\ncustomer: rows copied 5/10 (50%%), size copied 16384/16384 (100%%)\ncustomer_order: rows copied 5/10 (50%%), size copied 16384/16384 (100%%)\n\n\n\nThe following vreplication streams exist for workflow %s:\n\nid=%d on %s/%s-0000000%d: Status: Error: Duplicate entry '6' for key 'customer.PRIMARY' (errno 1062) (sqlstate 23000) during query: insert into customer(customer_id,email) values (6,'mlord@planetscale.com').\n\n\n", ksWf, vrID, shard, env.cell, target.tablet.Alias.Uid), }, { @@ -320,7 +335,7 @@ func TestMoveTables(t *testing.T) { expectResults: func() { env.tmc.setVRResults( target.tablet, - fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)", + fmt.Sprintf("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%d) and id in (select max(id) from _vt.copy_state where vrepl_id in (%d) group by vrepl_id, table_name)", vrID, vrID), &sqltypes.Result{}, ) diff --git a/go/vt/vtctl/vtctlclient/interface.go b/go/vt/vtctl/vtctlclient/interface.go index e9bf0cdc257..8de7f48097b 100644 --- a/go/vt/vtctl/vtctlclient/interface.go +++ b/go/vt/vtctl/vtctlclient/interface.go @@ -56,7 +56,7 @@ type VtctlClient interface { } // Factory functions are registered by client implementations -type Factory func(addr string) (VtctlClient, error) +type Factory func(ctx context.Context, addr string) (VtctlClient, error) var factories = make(map[string]Factory) @@ -68,22 +68,11 @@ func RegisterFactory(name string, factory Factory) { factories[name] = factory } -// UnregisterFactoryForTest allows to unregister a client implementation from the static map. -// This function is used by unit tests to cleanly unregister any fake implementations. -// This way, a test package can use the same name for different fakes and no dangling fakes are -// left behind in the static factories map after the test. -func UnregisterFactoryForTest(name string) { - if _, ok := factories[name]; !ok { - log.Fatalf("UnregisterFactoryForTest: %s is not registered", name) - } - delete(factories, name) -} - // New allows a user of the client library to get its implementation. -func New(addr string) (VtctlClient, error) { +func New(ctx context.Context, addr string) (VtctlClient, error) { factory, ok := factories[vtctlClientProtocol] if !ok { return nil, fmt.Errorf("unknown vtctl client protocol: %v", vtctlClientProtocol) } - return factory(addr) + return factory(ctx, addr) } diff --git a/go/vt/vtctl/vtctlclient/wrapper.go b/go/vt/vtctl/vtctlclient/wrapper.go index a30dde3e8dd..d33aad5b4e3 100644 --- a/go/vt/vtctl/vtctlclient/wrapper.go +++ b/go/vt/vtctl/vtctlclient/wrapper.go @@ -17,13 +17,12 @@ limitations under the License. package vtctlclient import ( + "context" "errors" "fmt" "io" "time" - "context" - logutilpb "vitess.io/vitess/go/vt/proto/logutil" ) @@ -39,7 +38,7 @@ func RunCommandAndWait(ctx context.Context, server string, args []string, recv f return errors.New("no function closure for Event stream specified") } // create the client - client, err := New(server) + client, err := New(ctx, server) if err != nil { return fmt.Errorf("cannot dial to server %v: %v", server, err) } diff --git a/go/vt/vtctl/vtctldclient/client.go b/go/vt/vtctl/vtctldclient/client.go index 5b90a08ecdd..4b6def326db 100644 --- a/go/vt/vtctl/vtctldclient/client.go +++ b/go/vt/vtctl/vtctldclient/client.go @@ -3,6 +3,7 @@ package vtctldclient import ( + "context" "fmt" "log" @@ -17,12 +18,12 @@ type VtctldClient interface { } // Factory is a function that creates new VtctldClients. -type Factory func(addr string) (VtctldClient, error) +type Factory func(ctx context.Context, addr string) (VtctldClient, error) var registry = map[string]Factory{} // Register adds a VtctldClient factory for the given name (protocol). -// Attempting to register mulitple factories for the same protocol is a fatal +// Attempting to register multiple factories for the same protocol is a fatal // error. func Register(name string, factory Factory) { if _, ok := registry[name]; ok { @@ -40,11 +41,11 @@ func Register(name string, factory Factory) { // global namespace to determine the protocol to use. Instead, we require // users to specify their own flag in their own (hopefully not global) namespace // to determine the protocol to pass into here. -func New(protocol string, addr string) (VtctldClient, error) { +func New(ctx context.Context, protocol string, addr string) (VtctldClient, error) { factory, ok := registry[protocol] if !ok { return nil, fmt.Errorf("unknown vtctld client protocol: %s", protocol) } - return factory(addr) + return factory(ctx, addr) } diff --git a/go/vt/vtctl/workflow/common/utils.go b/go/vt/vtctl/workflow/common/utils.go new file mode 100644 index 00000000000..86fa33bc191 --- /dev/null +++ b/go/vt/vtctl/workflow/common/utils.go @@ -0,0 +1,55 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" +) + +// GetShards returns a subset of shards in a keyspace. If no subset is provided, all shards are returned. +func GetShards(ctx context.Context, ts *topo.Server, keyspace string, shardSubset []string) ([]string, error) { + var err error + allShards, err := ts.GetShardNames(ctx, keyspace) + if err != nil { + return nil, err + } + if len(allShards) == 0 { + return nil, fmt.Errorf("no shards found in keyspace %s", keyspace) + } + + if len(shardSubset) == 0 { + return allShards, nil + } + existingShards := make(map[string]bool, len(allShards)) + for _, shard := range allShards { + existingShards[shard] = true + } + // Validate that the provided shards are part of the keyspace. + for _, shard := range shardSubset { + _, found := existingShards[shard] + if !found { + return nil, fmt.Errorf("shard %s not found in keyspace %s", shard, keyspace) + } + } + log.Infof("Selecting subset of shards in keyspace %s: %d from %d :: %+v", + keyspace, len(shardSubset), len(allShards), shardSubset) + return shardSubset, nil +} diff --git a/go/vt/vtctl/workflow/log_recorder_test.go b/go/vt/vtctl/workflow/log_recorder_test.go index b58d1d42a79..0234e811771 100644 --- a/go/vt/vtctl/workflow/log_recorder_test.go +++ b/go/vt/vtctl/workflow/log_recorder_test.go @@ -20,7 +20,7 @@ import ( "testing" "time" - "github.com/magiconair/properties/assert" + "github.com/stretchr/testify/assert" ) func TestLogRecorder(t *testing.T) { diff --git a/go/vt/vtctl/workflow/materializer.go b/go/vt/vtctl/workflow/materializer.go index 6be5ac7f445..f0171f31cab 100644 --- a/go/vt/vtctl/workflow/materializer.go +++ b/go/vt/vtctl/workflow/materializer.go @@ -18,20 +18,23 @@ package workflow import ( "context" + "encoding/json" "fmt" "strings" "sync" - "text/template" "time" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/schemadiff" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/schematools" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -39,6 +42,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) @@ -62,6 +66,21 @@ type materializer struct { isPartial bool primaryVindexesDiffer bool workflowType binlogdatapb.VReplicationWorkflowType + + env *vtenv.Environment +} + +func (mz *materializer) getWorkflowType() binlogdatapb.VReplicationWorkflowType { + var workflowType binlogdatapb.VReplicationWorkflowType + switch mz.ms.MaterializationIntent { + case vtctldatapb.MaterializationIntent_CUSTOM: + workflowType = binlogdatapb.VReplicationWorkflowType_Materialize + case vtctldatapb.MaterializationIntent_MOVETABLES: + workflowType = binlogdatapb.VReplicationWorkflowType_MoveTables + case vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX: + workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex + } + return workflowType } func (mz *materializer) getWorkflowSubType() (binlogdatapb.VReplicationWorkflowSubType, error) { @@ -78,7 +97,19 @@ func (mz *materializer) getWorkflowSubType() (binlogdatapb.VReplicationWorkflowS } } -func (mz *materializer) createMoveTablesStreams(req *vtctldatapb.MoveTablesCreateRequest) error { +func (mz *materializer) getOptionsJSON() (string, error) { + defaultJSON := "{}" + if mz.ms.WorkflowOptions == nil { + return defaultJSON, nil + } + optionsJSON, err := json.Marshal(mz.ms.WorkflowOptions) + if err != nil || optionsJSON == nil { + return defaultJSON, err + } + return string(optionsJSON), nil +} + +func (mz *materializer) createWorkflowStreams(req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) error { if err := validateNewWorkflow(mz.ctx, mz.ts, mz.tmc, mz.ms.TargetKeyspace, mz.ms.Workflow); err != nil { return err } @@ -95,6 +126,12 @@ func (mz *materializer) createMoveTablesStreams(req *vtctldatapb.MoveTablesCreat if err != nil { return err } + req.WorkflowSubType = workflowSubType + optionsJSON, err := mz.getOptionsJSON() + if err != nil { + return err + } + req.Options = optionsJSON return mz.forAllTargets(func(target *topo.ShardInfo) error { targetPrimary, err := mz.ts.GetTablet(mz.ctx, target.PrimaryAlias) @@ -113,67 +150,26 @@ func (mz *materializer) createMoveTablesStreams(req *vtctldatapb.MoveTablesCreat if len(sourceShards) == 1 && key.KeyRangeEqual(sourceShards[0].KeyRange, target.KeyRange) { streamKeyRangesEqual = true } - blses, err := mz.generateBinlogSources(mz.ctx, target, sourceShards, streamKeyRangesEqual) + + // Each tablet needs its own copy of the request as it will have a unique + // BinlogSource. + tabletReq := req.CloneVT() + tabletReq.BinlogSource, err = mz.generateBinlogSources(mz.ctx, target, sourceShards, streamKeyRangesEqual) if err != nil { return err } - _, err = mz.tmc.CreateVReplicationWorkflow(mz.ctx, targetPrimary.Tablet, &tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ - Workflow: req.Workflow, - BinlogSource: blses, - Cells: req.Cells, - TabletTypes: req.TabletTypes, - TabletSelectionPreference: req.TabletSelectionPreference, - WorkflowType: mz.workflowType, - WorkflowSubType: workflowSubType, - DeferSecondaryKeys: req.DeferSecondaryKeys, - AutoStart: req.AutoStart, - StopAfterCopy: req.StopAfterCopy, - }) + + _, err = mz.tmc.CreateVReplicationWorkflow(mz.ctx, targetPrimary.Tablet, tabletReq) return err }) } -// createMaterializerStreams creates the vreplication streams for Materialize -// and LookupVindex workflows. -func (mz *materializer) createMaterializerStreams() error { - if err := validateNewWorkflow(mz.ctx, mz.ts, mz.tmc, mz.ms.TargetKeyspace, mz.ms.Workflow); err != nil { - return err - } - err := mz.buildMaterializer() - if err != nil { - return err - } - if err := mz.deploySchema(); err != nil { - return err - } - insertMap := make(map[string]string, len(mz.targetShards)) - for _, targetShard := range mz.targetShards { - sourceShards := mz.filterSourceShards(targetShard) - // streamKeyRangesEqual allows us to optimize the stream for the cases - // where while the target keyspace may be sharded, the target shard has - // a single source shard to stream data from and the target and source - // shard have equal key ranges. This can be done, for example, when doing - // shard by shard migrations -- migrating a single shard at a time between - // sharded source and sharded target keyspaces. - streamKeyRangesEqual := false - if len(sourceShards) == 1 && key.KeyRangeEqual(sourceShards[0].KeyRange, targetShard.KeyRange) { - streamKeyRangesEqual = true - } - inserts, err := mz.generateInserts(mz.ctx, sourceShards, streamKeyRangesEqual) - if err != nil { - return err - } - insertMap[key.KeyRangeString(targetShard.KeyRange)] = inserts - } - if err := mz.createStreams(mz.ctx, insertMap); err != nil { - return err - } - return nil +func (mz *materializer) getTenantClause() (*sqlparser.Expr, error) { + return getTenantClause(mz.ms.WorkflowOptions, mz.targetVSchema, mz.env.Parser()) } -func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*topo.ShardInfo, keyRangesEqual bool) (string, error) { - ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, "{{.dbname}}") - +func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard *topo.ShardInfo, sourceShards []*topo.ShardInfo, keyRangesEqual bool) ([]*binlogdatapb.BinlogSource, error) { + blses := make([]*binlogdatapb.BinlogSource, 0, len(mz.sourceShards)) for _, sourceShard := range sourceShards { bls := &binlogdatapb.BinlogSource{ Keyspace: mz.ms.SourceKeyspace, @@ -185,105 +181,16 @@ func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*top TargetTimeZone: mz.ms.TargetTimeZone, OnDdl: binlogdatapb.OnDDLAction(binlogdatapb.OnDDLAction_value[mz.ms.OnDdl]), } - for _, ts := range mz.ms.TableSettings { - rule := &binlogdatapb.Rule{ - Match: ts.TargetTable, - } - if ts.SourceExpression == "" { - bls.Filter.Rules = append(bls.Filter.Rules, rule) - continue - } - - // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + var tenantClause *sqlparser.Expr + var err error + if mz.IsMultiTenantMigration() { + tenantClause, err = mz.getTenantClause() if err != nil { - return "", err - } - sel, ok := stmt.(*sqlparser.Select) - if !ok { - return "", fmt.Errorf("unrecognized statement: %s", ts.SourceExpression) - } - filter := ts.SourceExpression - if !keyRangesEqual && mz.targetVSchema.Keyspace.Sharded && mz.targetVSchema.Tables[ts.TargetTable].Type != vindexes.TypeReference { - cv, err := vindexes.FindBestColVindex(mz.targetVSchema.Tables[ts.TargetTable]) - if err != nil { - return "", err - } - mappedCols := make([]*sqlparser.ColName, 0, len(cv.Columns)) - for _, col := range cv.Columns { - colName, err := matchColInSelect(col, sel) - if err != nil { - return "", err - } - mappedCols = append(mappedCols, colName) - } - subExprs := make(sqlparser.SelectExprs, 0, len(mappedCols)+2) - for _, mappedCol := range mappedCols { - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: mappedCol}) - } - vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name) - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)}) - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("{{.keyrange}}")}) - inKeyRange := &sqlparser.FuncExpr{ - Name: sqlparser.NewIdentifierCI("in_keyrange"), - Exprs: subExprs, - } - if sel.Where != nil { - sel.Where = &sqlparser.Where{ - Type: sqlparser.WhereClause, - Expr: &sqlparser.AndExpr{ - Left: inKeyRange, - Right: sel.Where.Expr, - }, - } - } else { - sel.Where = &sqlparser.Where{ - Type: sqlparser.WhereClause, - Expr: inKeyRange, - } - } - - filter = sqlparser.String(sel) + return nil, err } - - rule.Filter = filter - - bls.Filter.Rules = append(bls.Filter.Rules, rule) } - workflowSubType := binlogdatapb.VReplicationWorkflowSubType_None - if mz.isPartial { - workflowSubType = binlogdatapb.VReplicationWorkflowSubType_Partial - } - var workflowType binlogdatapb.VReplicationWorkflowType - switch mz.ms.MaterializationIntent { - case vtctldatapb.MaterializationIntent_CUSTOM: - workflowType = binlogdatapb.VReplicationWorkflowType_Materialize - case vtctldatapb.MaterializationIntent_MOVETABLES: - workflowType = binlogdatapb.VReplicationWorkflowType_MoveTables - case vtctldatapb.MaterializationIntent_CREATELOOKUPINDEX: - workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex - } - ig.AddRow(mz.ms.Workflow, bls, "", mz.ms.Cell, mz.ms.TabletTypes, - workflowType, - workflowSubType, mz.ms.DeferSecondaryKeys) - } - return ig.String(), nil -} -func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard *topo.ShardInfo, sourceShards []*topo.ShardInfo, keyRangesEqual bool) ([]*binlogdatapb.BinlogSource, error) { - blses := make([]*binlogdatapb.BinlogSource, 0, len(mz.sourceShards)) - for _, sourceShard := range sourceShards { - bls := &binlogdatapb.BinlogSource{ - Keyspace: mz.ms.SourceKeyspace, - Shard: sourceShard.ShardName(), - Filter: &binlogdatapb.Filter{}, - StopAfterCopy: mz.ms.StopAfterCopy, - ExternalCluster: mz.ms.ExternalCluster, - SourceTimeZone: mz.ms.SourceTimeZone, - TargetTimeZone: mz.ms.TargetTimeZone, - OnDdl: binlogdatapb.OnDDLAction(binlogdatapb.OnDDLAction_value[mz.ms.OnDdl]), - } for _, ts := range mz.ms.TableSettings { rule := &binlogdatapb.Rule{ Match: ts.TargetTable, @@ -295,7 +202,7 @@ func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard * } // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + stmt, err := mz.env.Parser().Parse(ts.SourceExpression) if err != nil { return nil, err } @@ -303,7 +210,6 @@ func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard * if !ok { return nil, fmt.Errorf("unrecognized statement: %s", ts.SourceExpression) } - filter := ts.SourceExpression if !keyRangesEqual && mz.targetVSchema.Keyspace.Sharded && mz.targetVSchema.Tables[ts.TargetTable].Type != vindexes.TypeReference { cv, err := vindexes.FindBestColVindex(mz.targetVSchema.Tables[ts.TargetTable]) if err != nil { @@ -317,36 +223,23 @@ func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard * } mappedCols = append(mappedCols, colName) } - subExprs := make(sqlparser.SelectExprs, 0, len(mappedCols)+2) + subExprs := make(sqlparser.Exprs, 0, len(mappedCols)+2) for _, mappedCol := range mappedCols { - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: mappedCol}) + subExprs = append(subExprs, mappedCol) } vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name) - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)}) - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(key.KeyRangeString(targetShard.KeyRange))}) + subExprs = append(subExprs, sqlparser.NewStrLiteral(vindexName)) + subExprs = append(subExprs, sqlparser.NewStrLiteral(key.KeyRangeString(targetShard.KeyRange))) inKeyRange := &sqlparser.FuncExpr{ Name: sqlparser.NewIdentifierCI("in_keyrange"), Exprs: subExprs, } - if sel.Where != nil { - sel.Where = &sqlparser.Where{ - Type: sqlparser.WhereClause, - Expr: &sqlparser.AndExpr{ - Left: inKeyRange, - Right: sel.Where.Expr, - }, - } - } else { - sel.Where = &sqlparser.Where{ - Type: sqlparser.WhereClause, - Expr: inKeyRange, - } - } - - filter = sqlparser.String(sel) + addFilter(sel, inKeyRange) } - - rule.Filter = filter + if tenantClause != nil { + addFilter(sel, *tenantClause) + } + rule.Filter = sqlparser.String(sel) bls.Filter.Rules = append(bls.Filter.Rules, rule) } blses = append(blses, bls) @@ -358,6 +251,20 @@ func (mz *materializer) deploySchema() error { var sourceDDLs map[string]string var mu sync.Mutex + // Auto-increment columns are typically used with unsharded MySQL tables + // but should not generally be used with sharded ones. Because it's common + // to use MoveTables to move table(s) from an unsharded keyspace to a + // sharded one we automatically remove the clauses by default to prevent + // accidents and avoid having to later do a costly ALTER TABLE operation + // to remove them. + // We do, however, allow the user to override this behavior and retain them. + removeAutoInc := false + if mz.workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables && + (mz.targetVSchema != nil && mz.targetVSchema.Keyspace != nil && mz.targetVSchema.Keyspace.Sharded) && + (mz.ms != nil && mz.ms.GetWorkflowOptions().GetStripShardedAutoIncrement()) { + removeAutoInc = true + } + return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { allTables := []string{"/.*/"} @@ -402,10 +309,11 @@ func (mz *materializer) deploySchema() error { } createDDL := ts.CreateDdl - if createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { + // Make any necessary adjustments to the create DDL. + if removeAutoInc || createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { if ts.SourceExpression != "" { // Check for table if non-empty SourceExpression. - sourceTableName, err := sqlparser.TableFromStatement(ts.SourceExpression) + sourceTableName, err := mz.env.Parser().TableFromStatement(ts.SourceExpression) if err != nil { return err } @@ -421,7 +329,7 @@ func (mz *materializer) deploySchema() error { } if createDDL == createDDLAsCopyDropConstraint { - strippedDDL, err := stripTableConstraints(ddl) + strippedDDL, err := stripTableConstraints(ddl, mz.env.Parser()) if err != nil { return err } @@ -430,13 +338,21 @@ func (mz *materializer) deploySchema() error { } if createDDL == createDDLAsCopyDropForeignKeys { - strippedDDL, err := stripTableForeignKeys(ddl) + strippedDDL, err := stripTableForeignKeys(ddl, mz.env.Parser()) if err != nil { return err } ddl = strippedDDL } + + if removeAutoInc { + ddl, err = stripAutoIncrement(ddl, mz.env.Parser()) + if err != nil { + return err + } + } + createDDL = ddl } @@ -444,13 +360,30 @@ func (mz *materializer) deploySchema() error { } if len(applyDDLs) > 0 { + if mz.ms.AtomicCopy { + // AtomicCopy suggests we may be interested in Foreign Key support. As such, we want to + // normalize the source schema: ensure the order of table definitions is compatible with + // the constraints graph. We want to first create the parents, then the children. + // We use schemadiff to normalize the schema. + // For now, and because this is could have wider implications, we ignore any errors in + // reading the source schema. + env := schemadiff.NewEnv(mz.env, mz.env.CollationEnv().DefaultConnectionCharset()) + schema, err := schemadiff.NewSchemaFromQueries(env, applyDDLs) + if err != nil { + log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) + } else { + applyDDLs = schema.ToQueries() + log.Infof("AtomicCopy used, and schema was normalized via schemadiff. %v queries normalized", len(applyDDLs)) + } + } sql := strings.Join(applyDDLs, ";\n") _, err = mz.tmc.ApplySchema(mz.ctx, targetTablet.Tablet, &tmutils.SchemaChange{ - SQL: sql, - Force: false, - AllowReplication: true, - SQLMode: vreplication.SQLMode, + SQL: sql, + Force: false, + AllowReplication: true, + SQLMode: vreplication.SQLMode, + DisableForeignKeyChecks: true, }) if err != nil { return err @@ -468,7 +401,7 @@ func (mz *materializer) buildMaterializer() error { if err != nil { return err } - targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace) + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace, mz.env.Parser()) if err != nil { return err } @@ -505,10 +438,22 @@ func (mz *materializer) buildMaterializer() error { if err != nil { return err } - if len(ms.SourceShards) > 0 { + + // For a multi-tenant migration, user can specify a subset of target shards to stream to, based + // on the vindex they have chosen. This is to optimize the number of streams: for example, if we + // have 256 shards and a tenant maps to a single shard we can avoid creating 255 unnecessary streams + // that would be filtered out by the vindex anyway. + var specifiedTargetShards []string + switch { + case mz.IsMultiTenantMigration(): + specifiedTargetShards = ms.WorkflowOptions.Shards + case len(ms.SourceShards) > 0: // shard-by-shard migration + specifiedTargetShards = ms.SourceShards + } + if len(specifiedTargetShards) > 0 { var targetShards2 []*topo.ShardInfo for _, shard := range targetShards { - for _, shard2 := range ms.SourceShards { + for _, shard2 := range specifiedTargetShards { if shard.ShardName() == shard2 { targetShards2 = append(targetShards2, shard) break @@ -544,60 +489,28 @@ func (mz *materializer) buildMaterializer() error { return nil } -func (mz *materializer) createStreams(ctx context.Context, insertsMap map[string]string) error { - return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { - keyRange := key.KeyRangeString(target.KeyRange) - inserts := insertsMap[keyRange] - targetPrimary, err := mz.ts.GetTablet(ctx, target.PrimaryAlias) - if err != nil { - return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) - } - buf := &strings.Builder{} - t := template.Must(template.New("").Parse(inserts)) - input := map[string]string{ - "keyrange": keyRange, - "dbname": targetPrimary.DbName(), - } - if err := t.Execute(buf, input); err != nil { - return err - } - if _, err := mz.tmc.VReplicationExec(ctx, targetPrimary.Tablet, buf.String()); err != nil { - return err - } - return nil - }) -} - func (mz *materializer) startStreams(ctx context.Context) error { return forAllShards(mz.targetShards, func(target *topo.ShardInfo) error { targetPrimary, err := mz.ts.GetTablet(ctx, target.PrimaryAlias) if err != nil { return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) } - query := fmt.Sprintf("update _vt.vreplication set state='Running' where db_name=%s and workflow=%s", encodeString(targetPrimary.DbName()), encodeString(mz.ms.Workflow)) - if _, err := mz.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { - return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + if _, err := mz.tmc.UpdateVReplicationWorkflow(ctx, targetPrimary.Tablet, &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{ + Workflow: mz.ms.Workflow, + State: binlogdatapb.VReplicationWorkflowState_Running, + // Don't change anything else, so pass simulated NULLs. + Cells: textutil.SimulatedNullStringSlice, + TabletTypes: []topodatapb.TabletType{ + topodatapb.TabletType(textutil.SimulatedNullInt), + }, + OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt), + }); err != nil { + return vterrors.Wrap(err, "failed to update workflow") } return nil }) } -func Materialize(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, ms *vtctldatapb.MaterializeSettings) error { - mz := &materializer{ - ctx: ctx, - ts: ts, - sourceTs: ts, - tmc: tmc, - ms: ms, - } - - err := mz.createMaterializerStreams() - if err != nil { - return err - } - return mz.startStreams(ctx) -} - func (mz *materializer) forAllTargets(f func(*topo.ShardInfo) error) error { var wg sync.WaitGroup allErrors := &concurrency.AllErrorRecorder{} @@ -757,3 +670,10 @@ func primaryVindexesDiffer(ms *vtctldatapb.MaterializeSettings, source, target * } return false } + +func (mz *materializer) IsMultiTenantMigration() bool { + if mz.ms.WorkflowOptions != nil && mz.ms.WorkflowOptions.TenantId != "" { + return true + } + return false +} diff --git a/go/vt/vtctl/workflow/materializer_env_test.go b/go/vt/vtctl/workflow/materializer_env_test.go index 1026628405e..587712f620c 100644 --- a/go/vt/vtctl/workflow/materializer_env_test.go +++ b/go/vt/vtctl/workflow/materializer_env_test.go @@ -21,18 +21,19 @@ import ( "fmt" "os" "regexp" - "strconv" "strings" "sync" "testing" + "time" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/mysqlctl/tmutils" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -51,13 +52,11 @@ type queryResult struct { } type testMaterializerEnv struct { - ws *Server - ms *vtctldatapb.MaterializeSettings - sources []string - targets []string - tablets map[int]*topodatapb.Tablet - // Importing the tabletmanager package causes a circular dependency. :-( - //tms map[int]*tabletmanager.TabletManager + ws *Server + ms *vtctldatapb.MaterializeSettings + sources []string + targets []string + tablets map[int]*topodatapb.Tablet topoServ *topo.Server cell string tmc *testMaterializerTMClient @@ -82,7 +81,8 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M cell: "cell", tmc: newTestMaterializerTMClient(), } - env.ws = NewServer(env.topoServ, env.tmc) + venv := vtenv.NewTestEnv() + env.ws = NewServer(venv, env.topoServ, env.tmc) tabletID := 100 for _, shard := range sources { _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_PRIMARY) @@ -98,7 +98,7 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M for _, ts := range ms.TableSettings { tableName := ts.TargetTable - table, err := sqlparser.TableFromStatement(ts.SourceExpression) + table, err := venv.Parser().TableFromStatement(ts.SourceExpression) if err == nil { tableName = table.Name.String() } @@ -115,22 +115,9 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M }}, } } - if ms.Workflow != "" { - env.expectValidation() - } return env } -func (env *testMaterializerEnv) expectValidation() { - for _, tablet := range env.tablets { - tabletID := int(tablet.Alias.Uid) - if tabletID < 200 { - continue - } - env.tmc.expectVRQuery(tabletID, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", env.ms.TargetKeyspace, env.ms.Workflow), &sqltypes.Result{}) - } -} - func (env *testMaterializerEnv) close() { for _, t := range env.tablets { env.deleteTablet(t) @@ -183,8 +170,6 @@ type testMaterializerTMClient struct { mu sync.Mutex vrQueries map[int][]*queryResult createVReplicationWorkflowRequests map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest - getSchemaCounts map[string]int - muSchemaCount sync.Mutex // Used to confirm the number of times WorkflowDelete was called. workflowDeleteCalls int @@ -195,19 +180,6 @@ func newTestMaterializerTMClient() *testMaterializerTMClient { schema: make(map[string]*tabletmanagerdatapb.SchemaDefinition), vrQueries: make(map[int][]*queryResult), createVReplicationWorkflowRequests: make(map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest), - getSchemaCounts: make(map[string]int), - } -} - -func (tmc *testMaterializerTMClient) schemaRequested(uid uint32) { - tmc.muSchemaCount.Lock() - defer tmc.muSchemaCount.Unlock() - key := strconv.Itoa(int(uid)) - n, ok := tmc.getSchemaCounts[key] - if !ok { - tmc.getSchemaCounts[key] = 1 - } else { - tmc.getSchemaCounts[key] = n + 1 } } @@ -259,15 +231,7 @@ func (tmc *testMaterializerTMClient) DeleteVReplicationWorkflow(ctx context.Cont }, nil } -func (tmc *testMaterializerTMClient) getSchemaRequestCount(uid uint32) int { - tmc.muSchemaCount.Lock() - defer tmc.muSchemaCount.Unlock() - key := strconv.Itoa(int(uid)) - return tmc.getSchemaCounts[key] -} - func (tmc *testMaterializerTMClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) { - tmc.schemaRequested(tablet.Alias.Uid) schemaDefn := &tabletmanagerdatapb.SchemaDefinition{} for _, table := range request.Tables { if table == "/.*/" { @@ -380,3 +344,58 @@ func (tmc *testMaterializerTMClient) VDiff(ctx context.Context, tablet *topodata }, }, nil } + +func (tmc *testMaterializerTMClient) HasVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) { + return &tabletmanagerdatapb.HasVReplicationWorkflowsResponse{ + Has: false, + }, nil +} + +func (tmc *testMaterializerTMClient) ReadVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { + workflowType := binlogdatapb.VReplicationWorkflowType_MoveTables + if len(req.IncludeWorkflows) > 0 { + for _, wf := range req.IncludeWorkflows { + if strings.Contains(wf, "lookup") { + workflowType = binlogdatapb.VReplicationWorkflowType_CreateLookupIndex + } + } + return &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{ + Workflows: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ + { + Workflow: req.IncludeWorkflows[0], + WorkflowType: workflowType, + Streams: []*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ + { + Id: 1, + State: binlogdatapb.VReplicationWorkflowState_Running, + Bls: &binlogdatapb.BinlogSource{ + Keyspace: "sourceks", + Shard: "0", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: ".*", + }, + }, + }, + }, + Pos: "MySQL56/" + position, + TimeUpdated: protoutil.TimeToProto(time.Now()), + TimeHeartbeat: protoutil.TimeToProto(time.Now()), + }, + }, + }, + }, + }, nil + } else { + return &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{}, nil + } +} + +func (tmc *testMaterializerTMClient) UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { + return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{ + Result: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, nil +} diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go index fc39bb4d30b..9a43ea5ed7e 100644 --- a/go/vt/vtctl/workflow/materializer_test.go +++ b/go/vt/vtctl/workflow/materializer_test.go @@ -30,8 +30,9 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -42,42 +43,17 @@ import ( vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) -const getWorkflowQuery = "select id from _vt.vreplication where db_name='vt_targetks' and workflow='workflow'" -const mzUpdateQuery = "update _vt.vreplication set state='Running' where db_name='vt_targetks' and workflow='workflow'" -const mzSelectFrozenQuery = "select 1 from _vt.vreplication where db_name='vt_targetks' and message='FROZEN' and workflow_sub_type != 1" -const mzCheckJournal = "/select val from _vt.resharding_journal where id=" -const mzGetWorkflowStatusQuery = "select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type, time_heartbeat, defer_secondary_keys, component_throttled, time_throttled, rows_copied from _vt.vreplication where workflow = 'workflow' and db_name = 'vt_targetks'" -const mzGetCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1" -const mzGetLatestCopyState = "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)" -const insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys\) values ` -const eol = "$" +const ( + position = "9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97" + mzSelectFrozenQuery = "select 1 from _vt.vreplication where db_name='vt_targetks' and message='FROZEN' and workflow_sub_type != 1" + mzCheckJournal = "/select val from _vt.resharding_journal where id=" + mzGetCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1" + mzGetLatestCopyState = "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)" + insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options\) values ` +) var ( defaultOnDDL = binlogdatapb.OnDDLAction_IGNORE.String() - binlogSource = &binlogdatapb.BinlogSource{ - Keyspace: "sourceks", - Shard: "0", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select * from t1", - }}, - }, - } - getWorkflowRes = sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", - "int64|blob|varchar|varchar|varchar|int64|int64|int64", - ), - fmt.Sprintf("1|%s||zone1|replica|1|0|1", binlogSource), - ) - getWorkflowStatusRes = sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type|time_heartbeat|defer_secondary_keys|component_throttled|time_throttled|rows_copied", - "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|int64|int64", - ), - fmt.Sprintf("1|wf1|%s|MySQL56/9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97|NULL|0|running|vt_ks|1686577659|0|||1|0|0|0||0|10", binlogSource), - ) ) func TestStripForeignKeys(t *testing.T) { @@ -134,7 +110,7 @@ func TestStripForeignKeys(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableForeignKeys(tc.ddl) + newDDL, err := stripTableForeignKeys(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -208,7 +184,7 @@ func TestStripConstraints(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableConstraints(tc.ddl) + newDDL, err := stripTableConstraints(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -219,6 +195,94 @@ func TestStripConstraints(t *testing.T) { } } +func TestStripAutoIncrement(t *testing.T) { + parser := sqlparser.NewTestParser() + + tcs := []struct { + desc string + ddl string + want string + expectErr bool + }{ + { + desc: "invalid DDL", + ddl: "CREATE TABLE `table1` (\n" + + "`id` massiveint NOT NULL,\n" + + "PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + expectErr: true, + }, + { + desc: "has auto increment", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int NOT NULL AUTO_INCREMENT,\n" + + "`c1` varchar(128),\n" + + "PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + want: "create table table1 (\n" + + "\tid int not null,\n" + + "\tc1 varchar(128),\n" + + "\tprimary key (id)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + }, + { + desc: "has no auto increment", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int NOT NULL,\n" + + "`c1` varchar(128),\n" + + "PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + want: "create table table1 (\n" + + "\tid int not null,\n" + + "\tc1 varchar(128),\n" + + "\tprimary key (id)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + }, + { + desc: "has auto increment with secondary key", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int NOT NULL auto_increment,\n" + + "`c1` varchar(128),\n" + + "`c2` varchar(128),\n" + + "UNIQUE KEY `c1` (`c1`),\n" + + "PRIMARY KEY (`id`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + want: "create table table1 (\n" + + "\tid int not null,\n" + + "\tc1 varchar(128),\n" + + "\tc2 varchar(128),\n" + + "\tunique key c1 (c1),\n" + + "\tprimary key (id)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + }, + { + desc: "has auto increment with multi-col PK", + ddl: "CREATE TABLE `table1` (\n" + + "`id` int NOT NULL auto_increment,\n" + + "`c1` varchar(128) NOT NULL,\n" + + "`c2` varchar(128),\n" + + "PRIMARY KEY (`id`, `c2`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=latin1;", + want: "create table table1 (\n" + + "\tid int not null,\n" + + "\tc1 varchar(128) not null,\n" + + "\tc2 varchar(128),\n" + + "\tprimary key (id, c2)\n" + + ") ENGINE InnoDB,\n" + + " CHARSET latin1", + }, + } + + for _, tc := range tcs { + strippedDDL, err := stripAutoIncrement(tc.ddl, parser) + require.Equal(t, tc.expectErr, (err != nil), "unexpected error result", "expected error %t, got: %v", tc.expectErr, err) + require.Equal(t, tc.want, strippedDDL, fmt.Sprintf("stripped DDL %q does not match our expected result: %q", strippedDDL, tc.want)) + } +} + func TestAddTablesToVSchema(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -447,11 +511,7 @@ func TestMigrateVSchema(t *testing.T) { defer env.close() env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) _, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ @@ -501,26 +561,23 @@ func TestMoveTablesDDLFlag(t *testing.T) { defer env.close() // This is the default and go does not marshal defaults // for prototext fields so we use the default insert stmt. - //insert = fmt.Sprintf(`/insert into .vreplication\(.*on_ddl:%s.*`, onDDLAction) - //env.tmc.expectVRQuery(100, "/.*", &sqltypes.Result{}) + // insert = fmt.Sprintf(`/insert into .vreplication\(.*on_ddl:%s.*`, onDDLAction) + // env.tmc.expectVRQuery(100, "/.*", &sqltypes.Result{}) // TODO: we cannot test the actual query generated w/o having a // TabletManager. Importing the tabletmanager package, however, causes // a circular dependency. // The TabletManager portion is tested in rpc_vreplication_test.go. env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) targetShard, err := env.topoServ.GetShardNames(ctx, ms.TargetKeyspace) require.NoError(t, err) sourceShard, err := env.topoServ.GetShardNames(ctx, ms.SourceKeyspace) require.NoError(t, err) - want := fmt.Sprintf("shard_streams:{key:\"%s/%s\" value:{streams:{id:1 tablet:{cell:\"%s\" uid:200} source_shard:\"%s/%s\" position:\"9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97\" status:\"running\" info:\"VStream Lag: 0s\"}}} traffic_state:\"Reads Not Switched. Writes Not Switched\"", - ms.TargetKeyspace, targetShard[0], env.cell, ms.SourceKeyspace, sourceShard[0]) + want := fmt.Sprintf("shard_streams:{key:\"%s/%s\" value:{streams:{id:1 tablet:{cell:\"%s\" uid:200} source_shard:\"%s/%s\" position:\"%s\" status:\"Running\" info:\"VStream Lag: 0s\"}}} traffic_state:\"Reads Not Switched. Writes Not Switched\"", + ms.TargetKeyspace, targetShard[0], env.cell, ms.SourceKeyspace, sourceShard[0], position) res, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ Workflow: ms.Workflow, @@ -553,26 +610,41 @@ func TestMoveTablesNoRoutingRules(t *testing.T) { defer env.close() // This is the default and go does not marshal defaults // for prototext fields so we use the default insert stmt. - //insert = fmt.Sprintf(`/insert into .vreplication\(.*on_ddl:%s.*`, onDDLAction) - //env.tmc.expectVRQuery(100, "/.*", &sqltypes.Result{}) + // insert = fmt.Sprintf(`/insert into .vreplication\(.*on_ddl:%s.*`, onDDLAction) + // env.tmc.expectVRQuery(100, "/.*", &sqltypes.Result{}) // TODO: we cannot test the actual query generated w/o having a // TabletManager. Importing the tabletmanager package, however, causes // a circular dependency. // The TabletManager portion is tested in rpc_vreplication_test.go. env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, getWorkflowQuery, getWorkflowRes) env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, mzGetWorkflowStatusQuery, getWorkflowStatusRes) env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) targetShard, err := env.topoServ.GetShardNames(ctx, ms.TargetKeyspace) require.NoError(t, err) sourceShard, err := env.topoServ.GetShardNames(ctx, ms.SourceKeyspace) require.NoError(t, err) - want := fmt.Sprintf("shard_streams:{key:\"%s/%s\" value:{streams:{id:1 tablet:{cell:\"%s\" uid:200} source_shard:\"%s/%s\" position:\"9d10e6ec-07a0-11ee-ae73-8e53f4cf3083:1-97\" status:\"running\" info:\"VStream Lag: 0s\"}}} traffic_state:\"Reads Not Switched. Writes Not Switched\"", - ms.TargetKeyspace, targetShard[0], env.cell, ms.SourceKeyspace, sourceShard[0]) + want := &vtctldatapb.WorkflowStatusResponse{ + ShardStreams: map[string]*vtctldatapb.WorkflowStatusResponse_ShardStreams{ + fmt.Sprintf("%s/%s", ms.TargetKeyspace, targetShard[0]): { + Streams: []*vtctldatapb.WorkflowStatusResponse_ShardStreamState{ + { + Id: 1, + Tablet: &topodatapb.TabletAlias{ + Cell: env.cell, + Uid: 200, + }, + SourceShard: fmt.Sprintf("%s/%s", ms.SourceKeyspace, sourceShard[0]), + Position: position, + Status: binlogdatapb.VReplicationWorkflowState_Running.String(), + Info: "VStream Lag: 0s", + }, + }, + }, + }, + TrafficState: "Reads Not Switched. Writes Not Switched", + } res, err := env.ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ Workflow: ms.Workflow, @@ -582,7 +654,7 @@ func TestMoveTablesNoRoutingRules(t *testing.T) { NoRoutingRules: true, }) require.NoError(t, err) - require.Equal(t, want, fmt.Sprintf("%+v", res)) + require.EqualValues(t, want, res, "got: %+v, want: %+v", res, want) rr, err := env.ws.ts.GetRoutingRules(ctx) require.NoError(t, err) require.Zerof(t, len(rr.Rules), "routing rules should be empty, found %+v", rr.Rules) @@ -664,8 +736,9 @@ func TestCreateLookupVindexFull(t *testing.T) { } env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, "/CREATE TABLE `lookup`", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetCopyState, &sqltypes.Result{}) + env.tmc.expectVRQuery(200, mzGetLatestCopyState, &sqltypes.Result{}) env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_targetks' and workflow='lookup'", &sqltypes.Result{}) @@ -2181,1288 +2254,185 @@ func TestCreateLookupVindexFailures(t *testing.T) { } } -func TestExternalizeLookupVindex(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - // Keyspace where the vindex is created. - SourceKeyspace: "sourceks", - // Keyspace where the lookup table and VReplication workflow is created. - TargetKeyspace: "targetks", - } - ctx, cancel := context.WithCancel(context.Background()) +// TestKeyRangesEqualOptimization tests that we optimize the source +// filtering when there's only one source shard for the stream and +// its keyrange is equal to the target shard for the stream. This +// means that even if the target keyspace is sharded, the source +// does not need to perform the in_keyrange filtering. +func TestKeyRangesEqualOptimization(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - sourceVschema := &vschemapb.Keyspace{ - Sharded: false, + workflow := "testwf" + cells := []string{"cell"} + sourceKs := "sourceks" + targetKs := "targetks" + table := "t1" + tableSettings := []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: table, + SourceExpression: fmt.Sprintf("select * from %s", table), + }} + targetVSchema := &vschemapb.Keyspace{ + Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "xxhash": { Type: "xxhash", }, - "owned_lookup": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "targetks.owned_lookup", - "from": "c1", - "to": "c2", - "write_only": "true", - }, - Owner: "t1", - }, - "unowned_lookup": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "targetks.unowned_lookup", - "from": "c1", - "to": "c2", - "write_only": "true", - }, - }, - "unqualified_lookup": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "unqualified", - "from": "c1", - "to": "c2", - }, - }, }, Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Name: "xxhash", - Column: "col1", - }, { - Name: "owned_lookup", - Column: "col2", - }}, + table: { + ColumnVindexes: []*vschemapb.ColumnVindex{ + { + Column: "id", + Name: "xxhash", + }, + }, }, }, } - fields := sqltypes.MakeTestFields( - "id|state|message|source", - "int64|varbinary|varbinary|blob", - ) - ownedSourceStopAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"owned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}} stop_after_copy:true`, - ms.SourceKeyspace, ms.SourceKeyspace) - ownedSourceKeepRunningAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"owned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}}`, - ms.SourceKeyspace, ms.SourceKeyspace) - ownedRunning := sqltypes.MakeTestResult(fields, "1|Running|msg|"+ownedSourceKeepRunningAfterCopy) - ownedStopped := sqltypes.MakeTestResult(fields, "1|Stopped|Stopped after copy|"+ownedSourceStopAfterCopy) - unownedSourceStopAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"unowned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}} stop_after_copy:true`, - ms.SourceKeyspace, ms.SourceKeyspace) - unownedSourceKeepRunningAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"unowned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}}`, - ms.SourceKeyspace, ms.SourceKeyspace) - unownedRunning := sqltypes.MakeTestResult(fields, "2|Running|msg|"+unownedSourceKeepRunningAfterCopy) - unownedStopped := sqltypes.MakeTestResult(fields, "2|Stopped|Stopped after copy|"+unownedSourceStopAfterCopy) - testcases := []struct { - request *vtctldatapb.LookupVindexExternalizeRequest - vrResponse *sqltypes.Result - err string - expectedVschema *vschemapb.Keyspace - expectDelete bool + testCases := []struct { + name string + sourceShards []string + targetShards []string + moveTablesReq *vtctldatapb.MoveTablesCreateRequest + // Target Shards are in the order specifed in the targetShards slice + // with the UIDs starting at 200 and increasing by 10 for each tablet + // and shard since there's only a primary tablet per shard. + wantReqs map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest }{ { - request: &vtctldatapb.LookupVindexExternalizeRequest{ - Name: "owned_lookup", - Keyspace: ms.SourceKeyspace, - TableKeyspace: ms.TargetKeyspace, + name: "no in_keyrange filter -- partial, one equal shard", + moveTablesReq: &vtctldatapb.MoveTablesCreateRequest{ + Workflow: workflow, + TargetKeyspace: targetKs, + SourceKeyspace: sourceKs, + Cells: []string{"cell"}, + SourceShards: []string{"-80"}, // Partial MoveTables just for this shard + IncludeTables: []string{table}, }, - vrResponse: ownedStopped, - expectedVschema: &vschemapb.Keyspace{ - Vindexes: map[string]*vschemapb.Vindex{ - "owned_lookup": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "targetks.owned_lookup", - "from": "c1", - "to": "c2", + sourceShards: []string{"-80", "80-"}, + targetShards: []string{"-80", "80-"}, + wantReqs: map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ + 200: { + Workflow: workflow, + WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, + WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType_Partial, + Cells: cells, + BinlogSource: []*binlogdatapb.BinlogSource{ + { + Keyspace: sourceKs, + Shard: "-80", // Keyranges are equal between the source and target + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: table, + Filter: fmt.Sprintf("select * from %s", table), + }, + }, + }, }, - Owner: "t1", }, + Options: "{}", }, }, - expectDelete: true, }, { - request: &vtctldatapb.LookupVindexExternalizeRequest{ - Name: "unowned_lookup", - Keyspace: ms.SourceKeyspace, - TableKeyspace: ms.TargetKeyspace, + name: "in_keyrange filter -- unequal shards", + moveTablesReq: &vtctldatapb.MoveTablesCreateRequest{ + Workflow: workflow, + TargetKeyspace: targetKs, + SourceKeyspace: sourceKs, + Cells: []string{"cell"}, + IncludeTables: []string{table}, }, - vrResponse: unownedStopped, - expectedVschema: &vschemapb.Keyspace{ - Vindexes: map[string]*vschemapb.Vindex{ - "unowned_lookup": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "targetks.unowned_lookup", - "from": "c1", - "to": "c2", + sourceShards: []string{"-"}, + targetShards: []string{"-80", "80-"}, + wantReqs: map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ + 200: { + Workflow: workflow, + WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, + Cells: cells, + BinlogSource: []*binlogdatapb.BinlogSource{ + { + Keyspace: sourceKs, + Shard: "-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: table, + Filter: fmt.Sprintf("select * from %s where in_keyrange(id, '%s.xxhash', '-80')", table, targetKs), + }, + }, + }, + }, + }, + Options: "{}", + }, + 210: { + Workflow: workflow, + WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, + Cells: cells, + BinlogSource: []*binlogdatapb.BinlogSource{ + { + Keyspace: sourceKs, + Shard: "-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: table, + Filter: fmt.Sprintf("select * from %s where in_keyrange(id, '%s.xxhash', '80-')", table, targetKs), + }, + }, + }, }, }, + Options: "{}", }, }, - err: "is not in Running state", }, { - request: &vtctldatapb.LookupVindexExternalizeRequest{ - Name: "owned_lookup", - Keyspace: ms.SourceKeyspace, - TableKeyspace: ms.TargetKeyspace, + name: "in_keyrange filter -- unequal shards on merge", + moveTablesReq: &vtctldatapb.MoveTablesCreateRequest{ + Workflow: workflow, + TargetKeyspace: targetKs, + SourceKeyspace: sourceKs, + Cells: []string{"cell"}, + IncludeTables: []string{table}, }, - vrResponse: ownedRunning, - expectedVschema: &vschemapb.Keyspace{ - Vindexes: map[string]*vschemapb.Vindex{ - "owned_lookup": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "targetks.owned_lookup", - "from": "c1", - "to": "c2", - }, - Owner: "t1", - }, - }, - }, - expectDelete: true, - }, - { - request: &vtctldatapb.LookupVindexExternalizeRequest{ - Name: "unowned_lookup", - Keyspace: ms.SourceKeyspace, - TableKeyspace: ms.TargetKeyspace, - }, - vrResponse: unownedRunning, - expectedVschema: &vschemapb.Keyspace{ - Vindexes: map[string]*vschemapb.Vindex{ - "unowned_lookup": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "targetks.unowned_lookup", - "from": "c1", - "to": "c2", - }, - }, - }, - }, - }, - { - request: &vtctldatapb.LookupVindexExternalizeRequest{ - Name: "absent_lookup", - Keyspace: ms.SourceKeyspace, - TableKeyspace: ms.TargetKeyspace, - }, - expectedVschema: &vschemapb.Keyspace{ - Vindexes: map[string]*vschemapb.Vindex{ - "absent_lookup": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "targetks.absent_lookup", - "from": "c1", - "to": "c2", - }, - }, - }, - }, - err: "vindex absent_lookup not found in the sourceks keyspace", - }, - } - for _, tcase := range testcases { - t.Run(tcase.request.Name, func(t *testing.T) { - // Resave the source schema for every iteration. - err := env.topoServ.SaveVSchema(ctx, tcase.request.Keyspace, sourceVschema) - require.NoError(t, err) - err = env.topoServ.RebuildSrvVSchema(ctx, []string{env.cell}) - require.NoError(t, err) - - validationQuery := fmt.Sprintf("select id, state, message, source from _vt.vreplication where workflow='%s' and db_name='vt_%s'", - tcase.request.Name, ms.TargetKeyspace) - env.tmc.expectVRQuery(200, validationQuery, tcase.vrResponse) - env.tmc.expectVRQuery(210, validationQuery, tcase.vrResponse) - - preWorkflowDeleteCalls := env.tmc.workflowDeleteCalls - _, err = env.ws.LookupVindexExternalize(ctx, tcase.request) - if tcase.err != "" { - if err == nil || !strings.Contains(err.Error(), tcase.err) { - require.FailNow(t, "LookupVindexExternalize error", "ExternalizeVindex(%v) err: %v, must contain %v", tcase.request, err, tcase.err) - } - return - } - require.NoError(t, err) - expectedWorkflowDeleteCalls := preWorkflowDeleteCalls - if tcase.expectDelete { - // We expect the RPC to be called on each target shard. - expectedWorkflowDeleteCalls = preWorkflowDeleteCalls + (len(env.targets)) - } - require.Equal(t, expectedWorkflowDeleteCalls, env.tmc.workflowDeleteCalls) - - aftervschema, err := env.topoServ.GetVSchema(ctx, ms.SourceKeyspace) - require.NoError(t, err) - vindex := aftervschema.Vindexes[tcase.request.Name] - expectedVindex := tcase.expectedVschema.Vindexes[tcase.request.Name] - require.NotNil(t, vindex, "vindex %s not found in vschema", tcase.request.Name) - require.NotContains(t, vindex.Params, "write_only", tcase.request) - require.Equal(t, expectedVindex, vindex, "vindex mismatch. expected: %+v, got: %+v", expectedVindex, vindex) - }) - } -} - -func TestMaterializerOneToOne(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{ - { - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }, - { - TargetTable: "t2", - SourceExpression: "select * from t3", - CreateDdl: "t2ddl", - }, - { - TargetTable: "t4", - SourceExpression: "", // empty - CreateDdl: "t4ddl", - }, - }, - Cell: "zone1", - TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ - topodatapb.TabletType_PRIMARY, - topodatapb.TabletType_RDONLY, - }), - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `\(`+ - `'workflow', `+ - (`'keyspace:\\"sourceks\\" shard:\\"0\\" `+ - `filter:{`+ - `rules:{match:\\"t1\\" filter:\\"select.*t1\\"} `+ - `rules:{match:\\"t2\\" filter:\\"select.*t3\\"} `+ - `rules:{match:\\"t4\\"}`+ - `}', `)+ - `'', [0-9]*, [0-9]*, 'zone1', 'primary,rdonly', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false`+ - `\)`+eol, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) -} - -func TestMaterializerManyToOne(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }, { - TargetTable: "t2", - SourceExpression: "select * from t3", - CreateDdl: "t2ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"-80", "80-"}, []string{"0"}) - defer env.close() - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"-80\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ - `, `+ - `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"80-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ - eol, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) -} - -func TestMaterializerOneToMany(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "xxhash": { - Type: "xxhash", - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Column: "c1", - Name: "xxhash", - }}, - }, - }, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery( - 210, - insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) -} - -func TestMaterializerManyToMany(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"-40", "40-"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "xxhash": { - Type: "xxhash", - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Column: "c1", - Name: "xxhash", - }}, - }, - }, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `.*shard:\\"-40\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`+ - `.*shard:\\"40-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*-80.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery( - 210, - insertPrefix+ - `.*shard:\\"-40\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`+ - `.*shard:\\"40-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1.*targetks\.xxhash.*80-.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) -} - -func TestMaterializerMulticolumnVindex(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "region": { - Type: "region_experimental", - Params: map[string]string{ - "region_bytes": "1", - }, - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Columns: []string{"c1", "c2"}, - Name: "region", - }}, - }, - }, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1, c2.*targetks\.region.*-80.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery( - 210, - insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1, c2.*targetks\.region.*80-.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) -} - -func TestMaterializerDeploySchema(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }, { - TargetTable: "t2", - SourceExpression: "select * from t3", - CreateDdl: "t2ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - delete(env.tmc.schema, "targetks.t2") - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, `t2ddl`, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ - eol, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) - require.Equal(t, env.tmc.getSchemaRequestCount(100), 1) - require.Equal(t, env.tmc.getSchemaRequestCount(200), 1) -} - -func TestMaterializerCopySchema(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "copy", - }, { - TargetTable: "t2", - SourceExpression: "select * from t3", - CreateDdl: "t2ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - delete(env.tmc.schema, "targetks.t1") - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, `t1_schema`, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ - eol, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) - require.Equal(t, env.tmc.getSchemaRequestCount(100), 1) - require.Equal(t, env.tmc.getSchemaRequestCount(200), 1) - -} - -func TestMaterializerExplicitColumns(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select c1, c1+c2, c2 from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "region": { - Type: "region_experimental", - Params: map[string]string{ - "region_bytes": "1", - }, - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Columns: []string{"c1", "c2"}, - Name: "region", - }}, - }, - }, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1, c2.*targetks\.region.*-80.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery( - 210, - insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c1, c2.*targetks\.region.*80-.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) -} - -func TestMaterializerRenamedColumns(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select c3 as c1, c1+c2, c4 as c2 from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "region": { - Type: "region_experimental", - Params: map[string]string{ - "region_bytes": "1", - }, - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Columns: []string{"c1", "c2"}, - Name: "region", - }}, - }, - }, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery( - 200, - insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c3, c4.*targetks\.region.*-80.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery( - 210, - insertPrefix+ - `.*shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1 where in_keyrange\(c3, c4.*targetks\.region.*80-.*`, - &sqltypes.Result{}, - ) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) -} - -func TestMaterializerStopAfterCopy(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - StopAfterCopy: true, - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }, { - TargetTable: "t2", - SourceExpression: "select * from t3", - CreateDdl: "t2ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, insertPrefix+`.*stop_after_copy:true`, &sqltypes.Result{}) - env.tmc.expectVRQuery(200, mzUpdateQuery, &sqltypes.Result{}) - - err := env.ws.Materialize(ctx, ms) - require.NoError(t, err) - env.tmc.verifyQueries(t) -} - -func TestMaterializerNoTargetVSchema(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "table t1 not found in vschema for keyspace targetks") -} - -func TestMaterializerNoDDL(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - delete(env.tmc.schema, "targetks.t1") - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") - require.Equal(t, env.tmc.getSchemaRequestCount(100), 0) - require.Equal(t, env.tmc.getSchemaRequestCount(200), 1) - -} - -func TestMaterializerNoSourcePrimary(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "copy", - }}, - } - sources := []string{"0"} - targets := []string{"0"} - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Copied from newTestMaterializerEnv - env := &testMaterializerEnv{ - ms: ms, - sources: sources, - targets: targets, - tablets: make(map[int]*topodatapb.Tablet), - topoServ: memorytopo.NewServer(ctx, "cell"), - cell: "cell", - tmc: newTestMaterializerTMClient(), - } - env.ws = NewServer(env.topoServ, env.tmc) - defer env.close() - - tabletID := 100 - for _, shard := range sources { - _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_REPLICA) - tabletID += 10 - } - tabletID = 200 - for _, shard := range targets { - _ = env.addTablet(tabletID, env.ms.TargetKeyspace, shard, topodatapb.TabletType_PRIMARY) - tabletID += 10 - } - - // Skip the schema creation part. - - env.expectValidation() - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "shard must have a primary for copying schema: 0") -} - -func TestMaterializerTableMismatchNonCopy(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t2", - CreateDdl: "", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - delete(env.tmc.schema, "targetks.t1") - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") -} - -func TestMaterializerTableMismatchCopy(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t2", - CreateDdl: "copy", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - delete(env.tmc.schema, "targetks.t1") - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "source and target table names must match for copying schema: t2 vs t1") -} - -func TestMaterializerNoSourceTable(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "copy", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - delete(env.tmc.schema, "targetks.t1") - delete(env.tmc.schema, "sourceks.t1") - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "source table t1 does not exist") -} - -func TestMaterializerSyntaxError(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "bad query", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "syntax error at position 4 near 'bad'") -} - -func TestMaterializerNotASelect(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "update t1 set val=1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "unrecognized statement: update t1 set val=1") -} - -func TestMaterializerNoGoodVindex(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select * from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "lookup_unique": { - Type: "lookup_unique", - Params: map[string]string{ - "table": "t1", - "from": "c1", - "to": "c2", - }, - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Column: "c1", - Name: "lookup_unique", - }}, - }, - }, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "could not find a vindex to compute keyspace id for table t1") -} - -func TestMaterializerComplexVindexExpression(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select a+b as c1 from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "xxhash": { - Type: "xxhash", - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Column: "c1", - Name: "xxhash", - }}, - }, - }, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "vindex column cannot be a complex expression: a + b as c1") -} - -func TestMaterializerNoVindexInExpression(t *testing.T) { - ms := &vtctldatapb.MaterializeSettings{ - Workflow: "workflow", - SourceKeyspace: "sourceks", - TargetKeyspace: "targetks", - TableSettings: []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: "t1", - SourceExpression: "select c2 from t1", - CreateDdl: "t1ddl", - }}, - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() - - vs := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "xxhash": { - Type: "xxhash", - }, - }, - Tables: map[string]*vschemapb.Table{ - "t1": { - ColumnVindexes: []*vschemapb.ColumnVindex{{ - Column: "c1", - Name: "xxhash", - }}, - }, - }, - } - - if err := env.topoServ.SaveVSchema(context.Background(), "targetks", vs); err != nil { - t.Fatal(err) - } - - env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) - env.tmc.expectVRQuery(210, mzSelectFrozenQuery, &sqltypes.Result{}) - err := env.ws.Materialize(ctx, ms) - require.EqualError(t, err, "could not find vindex column c1") -} - -// TestKeyRangesEqualOptimization tests that we optimize the source -// filtering when there's only one source shard for the stream and -// its keyrange is equal to the target shard for the stream. This -// means that even if the target keyspace is sharded, the source -// does not need to perform the in_keyrange filtering. -func TestKeyRangesEqualOptimization(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - workflow := "testwf" - cells := []string{"cell"} - sourceKs := "sourceks" - targetKs := "targetks" - table := "t1" - tableSettings := []*vtctldatapb.TableMaterializeSettings{{ - TargetTable: table, - SourceExpression: fmt.Sprintf("select * from %s", table), - }} - targetVSchema := &vschemapb.Keyspace{ - Sharded: true, - Vindexes: map[string]*vschemapb.Vindex{ - "xxhash": { - Type: "xxhash", - }, - }, - Tables: map[string]*vschemapb.Table{ - table: { - ColumnVindexes: []*vschemapb.ColumnVindex{ - { - Column: "id", - Name: "xxhash", - }, - }, - }, - }, - } - - testCases := []struct { - name string - sourceShards []string - targetShards []string - moveTablesReq *vtctldatapb.MoveTablesCreateRequest - // Target Shards are in the order specifed in the targetShards slice - // with the UIDs starting at 200 and increasing by 10 for each tablet - // and shard since there's only a primary tablet per shard. - wantReqs map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest - }{ - { - name: "no in_keyrange filter -- partial, one equal shard", - moveTablesReq: &vtctldatapb.MoveTablesCreateRequest{ - Workflow: workflow, - TargetKeyspace: targetKs, - SourceKeyspace: sourceKs, - Cells: []string{"cell"}, - SourceShards: []string{"-80"}, // Partial MoveTables just for this shard - IncludeTables: []string{table}, - }, - sourceShards: []string{"-80", "80-"}, - targetShards: []string{"-80", "80-"}, - wantReqs: map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ - 200: { - Workflow: workflow, - WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, - WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType_Partial, - Cells: cells, - BinlogSource: []*binlogdatapb.BinlogSource{ - { - Keyspace: sourceKs, - Shard: "-80", // Keyranges are equal between the source and target - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{ - { - Match: table, - Filter: fmt.Sprintf("select * from %s", table), - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "in_keyrange filter -- unequal shards", - moveTablesReq: &vtctldatapb.MoveTablesCreateRequest{ - Workflow: workflow, - TargetKeyspace: targetKs, - SourceKeyspace: sourceKs, - Cells: []string{"cell"}, - IncludeTables: []string{table}, - }, - sourceShards: []string{"-"}, - targetShards: []string{"-80", "80-"}, - wantReqs: map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ - 200: { - Workflow: workflow, - WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, - Cells: cells, - BinlogSource: []*binlogdatapb.BinlogSource{ - { - Keyspace: sourceKs, - Shard: "-", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{ - { - Match: table, - Filter: fmt.Sprintf("select * from %s where in_keyrange(id, '%s.xxhash', '-80')", table, targetKs), - }, - }, - }, - }, - }, - }, - 210: { - Workflow: workflow, - WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, - Cells: cells, - BinlogSource: []*binlogdatapb.BinlogSource{ - { - Keyspace: sourceKs, - Shard: "-", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{ - { - Match: table, - Filter: fmt.Sprintf("select * from %s where in_keyrange(id, '%s.xxhash', '80-')", table, targetKs), - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "in_keyrange filter -- unequal shards on merge", - moveTablesReq: &vtctldatapb.MoveTablesCreateRequest{ - Workflow: workflow, - TargetKeyspace: targetKs, - SourceKeyspace: sourceKs, - Cells: []string{"cell"}, - IncludeTables: []string{table}, - }, - sourceShards: []string{"-80", "80-"}, - targetShards: []string{"-"}, - wantReqs: map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ - 200: { - Workflow: workflow, - WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, - Cells: cells, - BinlogSource: []*binlogdatapb.BinlogSource{ - { - Keyspace: sourceKs, - Shard: "-80", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{ - { - Match: table, - Filter: fmt.Sprintf("select * from %s where in_keyrange(id, '%s.xxhash', '-')", table, targetKs), - }, - }, - }, - }, - { - Keyspace: sourceKs, - Shard: "80-", - Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{ - { - Match: table, - Filter: fmt.Sprintf("select * from %s where in_keyrange(id, '%s.xxhash', '-')", table, targetKs), - }, - }, - }, + sourceShards: []string{"-80", "80-"}, + targetShards: []string{"-"}, + wantReqs: map[uint32]*tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ + 200: { + Workflow: workflow, + WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, + Cells: cells, + BinlogSource: []*binlogdatapb.BinlogSource{ + { + Keyspace: sourceKs, + Shard: "-80", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: table, + Filter: fmt.Sprintf("select * from %s where in_keyrange(id, '%s.xxhash', '-')", table, targetKs), + }, + }, + }, + }, + { + Keyspace: sourceKs, + Shard: "80-", + Filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{ + { + Match: table, + Filter: fmt.Sprintf("select * from %s where in_keyrange(id, '%s.xxhash', '-')", table, targetKs), + }, + }, + }, }, }, + Options: "{}", }, }, }, @@ -3496,6 +2466,7 @@ func TestKeyRangesEqualOptimization(t *testing.T) { }, }, }, + Options: "{}", }, 210: { Workflow: workflow, @@ -3515,6 +2486,7 @@ func TestKeyRangesEqualOptimization(t *testing.T) { }, }, }, + Options: "{}", }, }, }, @@ -3547,7 +2519,6 @@ func TestKeyRangesEqualOptimization(t *testing.T) { if tablet.Keyspace != targetKs || tablet.Type != topodatapb.TabletType_PRIMARY { continue } - env.tmc.expectVRQuery(int(tablet.Alias.Uid), mzSelectFrozenQuery, &sqltypes.Result{}) // If we are doing a partial MoveTables, we will only perform the workflow // stream creation / INSERT statment on the shard(s) we're migrating. if len(tc.moveTablesReq.SourceShards) > 0 && !slices.Contains(tc.moveTablesReq.SourceShards, tablet.Shard) { @@ -3563,9 +2534,20 @@ func TestKeyRangesEqualOptimization(t *testing.T) { tmc: env.tmc, ms: ms, workflowType: workflowType, + env: vtenv.NewTestEnv(), } - err = mz.createMoveTablesStreams(tc.moveTablesReq) - require.NoError(t, err, "createMoveTablesStreams failed: %v", err) + err = mz.createWorkflowStreams(&tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ + Workflow: tc.moveTablesReq.Workflow, + Cells: tc.moveTablesReq.Cells, + TabletTypes: tc.moveTablesReq.TabletTypes, + TabletSelectionPreference: tc.moveTablesReq.TabletSelectionPreference, + WorkflowType: workflowType, + DeferSecondaryKeys: tc.moveTablesReq.DeferSecondaryKeys, + AutoStart: tc.moveTablesReq.AutoStart, + StopAfterCopy: tc.moveTablesReq.StopAfterCopy, + Options: "{}", + }) + require.NoError(t, err, "createWorkflowStreams failed: %v", err) }) } } diff --git a/go/vt/vtctl/workflow/resharder.go b/go/vt/vtctl/workflow/resharder.go index 161b1c4567d..95fcea3a2a9 100644 --- a/go/vt/vtctl/workflow/resharder.go +++ b/go/vt/vtctl/workflow/resharder.go @@ -19,15 +19,14 @@ package workflow import ( "context" - "errors" "fmt" + "slices" "sync" "time" - "google.golang.org/protobuf/encoding/prototext" - - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/concurrency" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/topo" @@ -37,7 +36,9 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) type resharder struct { @@ -60,14 +61,15 @@ type resharder struct { } type refStream struct { - workflow string - bls *binlogdatapb.BinlogSource - cell string - tabletTypes string + workflow string + bls *binlogdatapb.BinlogSource + cell string + tabletTypes string + workflowType binlogdatapb.VReplicationWorkflowType + workflowSubType binlogdatapb.VReplicationWorkflowSubType } func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string, sources, targets []string, cell, tabletTypes string) (*resharder, error) { - ts := s.ts rs := &resharder{ s: s, keyspace: keyspace, @@ -78,7 +80,7 @@ func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string, tabletTypes: tabletTypes, } for _, shard := range sources { - si, err := ts.GetShard(ctx, keyspace, shard) + si, err := s.ts.GetShard(ctx, keyspace, shard) if err != nil { return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard) } @@ -86,14 +88,14 @@ func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string, return nil, fmt.Errorf("source shard %v is not in serving state", shard) } rs.sourceShards = append(rs.sourceShards, si) - primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + primary, err := s.ts.GetTablet(ctx, si.PrimaryAlias) if err != nil { return nil, vterrors.Wrapf(err, "GetTablet(%s) failed", si.PrimaryAlias) } rs.sourcePrimaries[si.ShardName()] = primary } for _, shard := range targets { - si, err := ts.GetShard(ctx, keyspace, shard) + si, err := s.ts.GetShard(ctx, keyspace, shard) if err != nil { return nil, vterrors.Wrapf(err, "GetShard(%s) failed", shard) } @@ -101,7 +103,7 @@ func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string, return nil, fmt.Errorf("target shard %v is in serving state", shard) } rs.targetShards = append(rs.targetShards, si) - primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + primary, err := s.ts.GetTablet(ctx, si.PrimaryAlias) if err != nil { return nil, vterrors.Wrapf(err, "GetTablet(%s) failed", si.PrimaryAlias) } @@ -114,7 +116,7 @@ func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string, return nil, vterrors.Wrap(err, "validateTargets") } - vschema, err := ts.GetVSchema(ctx, keyspace) + vschema, err := s.ts.GetVSchema(ctx, keyspace) if err != nil { return nil, vterrors.Wrap(err, "GetVSchema") } @@ -126,16 +128,18 @@ func (s *Server) buildResharder(ctx context.Context, keyspace, workflow string, return rs, nil } +// validateTargets ensures that the target shards have no existing +// VReplication workflow streams as that is an invalid starting +// state for the non-serving shards involved in a Reshard. func (rs *resharder) validateTargets(ctx context.Context) error { err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { targetPrimary := rs.targetPrimaries[target.ShardName()] - query := fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s", encodeString(targetPrimary.DbName())) - p3qr, err := rs.s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query) + res, err := rs.s.tmc.HasVReplicationWorkflows(ctx, targetPrimary.Tablet, &tabletmanagerdatapb.HasVReplicationWorkflowsRequest{}) if err != nil { - return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + return vterrors.Wrapf(err, "HasVReplicationWorkflows(%v)", targetPrimary.Tablet) } - if len(p3qr.Rows) != 0 { - return errors.New("some streams already exist in the target shards, please clean them up and retry the command") + if res.Has { + return vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, "some streams already exist in the target shards, please clean them up and retry the command") } return nil }) @@ -147,12 +151,13 @@ func (rs *resharder) readRefStreams(ctx context.Context) error { err := rs.forAll(rs.sourceShards, func(source *topo.ShardInfo) error { sourcePrimary := rs.sourcePrimaries[source.ShardName()] - query := fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name=%s and message != 'FROZEN'", encodeString(sourcePrimary.DbName())) - p3qr, err := rs.s.tmc.VReplicationExec(ctx, sourcePrimary.Tablet, query) + req := &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + ExcludeFrozen: true, + } + res, err := rs.s.tmc.ReadVReplicationWorkflows(ctx, sourcePrimary.Tablet, req) if err != nil { - return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", sourcePrimary.Tablet, query) + return vterrors.Wrapf(err, "ReadVReplicationWorkflows(%v, %+v)", sourcePrimary.Tablet, req) } - qr := sqltypes.Proto3ToResult(p3qr) mu.Lock() defer mu.Unlock() @@ -169,44 +174,39 @@ func (rs *resharder) readRefStreams(ctx context.Context) error { ref[k] = true } } - for _, row := range qr.Rows { - - workflow := row[0].ToString() - if workflow == "" { + for _, workflow := range res.Workflows { + if workflow.Workflow == "" { return fmt.Errorf("VReplication streams must have named workflows for migration: shard: %s:%s", source.Keyspace(), source.ShardName()) } - var bls binlogdatapb.BinlogSource - rowBytes, err := row[1].ToBytes() - if err != nil { - return err - } - if err := prototext.Unmarshal(rowBytes, &bls); err != nil { - return vterrors.Wrapf(err, "prototext.Unmarshal: %v", row) - } - isReference, err := rs.blsIsReference(&bls) - if err != nil { - return vterrors.Wrap(err, "blsIsReference") - } - if !isReference { - continue - } - refKey := fmt.Sprintf("%s:%s:%s", workflow, bls.Keyspace, bls.Shard) - if mustCreate { - rs.refStreams[refKey] = &refStream{ - workflow: workflow, - bls: &bls, - cell: row[2].ToString(), - tabletTypes: row[3].ToString(), + for _, stream := range workflow.Streams { + bls := stream.Bls + isReference, err := rs.blsIsReference(bls) + if err != nil { + return vterrors.Wrap(err, "blsIsReference") + } + if !isReference { + continue } - } else { - if !ref[refKey] { - return fmt.Errorf("streams are mismatched across source shards for workflow: %s", workflow) + refKey := fmt.Sprintf("%s:%s:%s", workflow.Workflow, bls.Keyspace, bls.Shard) + if mustCreate { + rs.refStreams[refKey] = &refStream{ + workflow: workflow.Workflow, + bls: bls, + cell: workflow.Cells, + tabletTypes: discovery.BuildTabletTypesString(workflow.TabletTypes, workflow.TabletSelectionPreference), + workflowType: workflow.WorkflowType, + workflowSubType: workflow.WorkflowSubType, + } + } else { + if !ref[refKey] { + return fmt.Errorf("streams are mismatched across source shards for workflow: %s", workflow) + } + delete(ref, refKey) } - delete(ref, refKey) } - } - if len(ref) != 0 { - return fmt.Errorf("streams are mismatched across source shards: %v", ref) + if len(ref) != 0 { + return fmt.Errorf("streams are mismatched across source shards: %v", ref) + } } return nil }) @@ -260,6 +260,8 @@ func (rs *resharder) copySchema(ctx context.Context) error { return err } +// createStreams creates all of the VReplication streams that +// need to now exist on the new shards. func (rs *resharder) createStreams(ctx context.Context) error { var excludeRules []*binlogdatapb.Rule for tableName, table := range rs.vschema.Tables { @@ -276,8 +278,8 @@ func (rs *resharder) createStreams(ctx context.Context) error { ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, targetPrimary.DbName()) - // copy excludeRules to prevent data race. - copyExcludeRules := append([]*binlogdatapb.Rule(nil), excludeRules...) + // Clone excludeRules to prevent data races. + copyExcludeRules := slices.Clone(excludeRules) for _, source := range rs.sourceShards { if !key.KeyRangeIntersect(target.KeyRange, source.KeyRange) { continue @@ -303,9 +305,8 @@ func (rs *resharder) createStreams(ctx context.Context) error { for _, rstream := range rs.refStreams { ig.AddRow(rstream.workflow, rstream.bls, "", rstream.cell, rstream.tabletTypes, - // TODO: fix based on original stream. - binlogdatapb.VReplicationWorkflowType_Reshard, - binlogdatapb.VReplicationWorkflowSubType_None, + rstream.workflowType, + rstream.workflowSubType, rs.deferSecondaryKeys) } query := ig.String() @@ -321,9 +322,20 @@ func (rs *resharder) createStreams(ctx context.Context) error { func (rs *resharder) startStreams(ctx context.Context) error { err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { targetPrimary := rs.targetPrimaries[target.ShardName()] - query := fmt.Sprintf("update _vt.vreplication set state='Running' where db_name=%s", encodeString(targetPrimary.DbName())) - if _, err := rs.s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { - return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + // This is the rare case where we truly want to update every stream/record + // because we've already confirmed that there were no existing workflows + // on the shards when we started, and we want to start all of the ones + // that we've created on the new shards as we're migrating them. + req := &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + AllWorkflows: true, + State: binlogdatapb.VReplicationWorkflowState_Running, + // We don't want to update anything else so use simulated NULLs. + Message: textutil.SimulatedNullString, + StopPosition: textutil.SimulatedNullString, + } + if _, err := rs.s.tmc.UpdateVReplicationWorkflows(ctx, targetPrimary.Tablet, req); err != nil { + return vterrors.Wrapf(err, "UpdateVReplicationWorkflows(%v, 'state='%s')", + targetPrimary.Tablet, binlogdatapb.VReplicationWorkflowState_Running.String()) } return nil }) diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go index 6927b56b89d..17b01736a77 100644 --- a/go/vt/vtctl/workflow/server.go +++ b/go/vt/vtctl/workflow/server.go @@ -17,8 +17,8 @@ limitations under the License. package workflow import ( - "bytes" "context" + "encoding/json" "errors" "fmt" "math" @@ -31,6 +31,7 @@ import ( "time" "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" @@ -54,7 +55,9 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/schematools" + "vitess.io/vitess/go/vt/vtctl/workflow/common" "vitess.io/vitess/go/vt/vtctl/workflow/vexec" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" @@ -71,6 +74,17 @@ import ( vttimepb "vitess.io/vitess/go/vt/proto/vttime" ) +const ( + // We don't use a suffix for the primary tablet types in routing rules. + primaryTabletSuffix = "" + replicaTabletSuffix = "@replica" + rdonlyTabletSuffix = "@rdonly" + // Globally routable tables don't have a keyspace prefix. + globalTableQualifier = "" +) + +var tabletTypeSuffixes = []string{primaryTabletSuffix, replicaTabletSuffix, rdonlyTabletSuffix} + // tableCopyProgress stores the row counts and disk sizes of the source and target tables type tableCopyProgress struct { TargetRowCount, TargetTableSize int64 @@ -142,19 +156,25 @@ var ( type Server struct { ts *topo.Server tmc tmclient.TabletManagerClient - // Limt the number of concurrent background goroutines if needed. + // Limit the number of concurrent background goroutines if needed. sem *semaphore.Weighted + env *vtenv.Environment } // NewServer returns a new server instance with the given topo.Server and // TabletManagerClient. -func NewServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *Server { +func NewServer(env *vtenv.Environment, ts *topo.Server, tmc tmclient.TabletManagerClient) *Server { return &Server{ ts: ts, tmc: tmc, + env: env, } } +func (s *Server) SQLParser() *sqlparser.Parser { + return s.env.Parser() +} + // CheckReshardingJournalExistsOnTablet returns the journal (or an empty // journal) and a boolean to indicate if the resharding_journal table exists on // the given tablet. @@ -338,15 +358,19 @@ func (s *Server) GetCellsWithTableReadsSwitched( return cellsSwitched, cellsNotSwitched, nil } -func (s *Server) GetWorkflow(ctx context.Context, keyspace, workflow string, includeLogs bool) (*vtctldatapb.Workflow, error) { +func (s *Server) GetWorkflow(ctx context.Context, keyspace, workflow string, includeLogs bool, shards []string) (*vtctldatapb.Workflow, error) { res, err := s.GetWorkflows(ctx, &vtctldatapb.GetWorkflowsRequest{ Keyspace: keyspace, Workflow: workflow, IncludeLogs: includeLogs, + Shards: shards, }) if err != nil { return nil, err } + if res == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "%s workflow not found in the %s keyspace", workflow, keyspace) + } if len(res.Workflows) != 1 { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected number of workflows returned for %s.%s; expected 1, got %d", keyspace, workflow, len(res.Workflows)) @@ -366,299 +390,295 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows defer span.Finish() span.Annotate("keyspace", req.Keyspace) + span.Annotate("workflow", req.Workflow) span.Annotate("active_only", req.ActiveOnly) span.Annotate("include_logs", req.IncludeLogs) + span.Annotate("shards", req.Shards) - where := "" - predicates := []string{} + readReq := &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{} + if req.Workflow != "" { + readReq.IncludeWorkflows = []string{req.Workflow} + } if req.ActiveOnly { - predicates = append(predicates, "state <> 'Stopped'") + readReq.ExcludeStates = []binlogdatapb.VReplicationWorkflowState{binlogdatapb.VReplicationWorkflowState_Stopped} } - if req.Workflow != "" { - predicates = append(predicates, fmt.Sprintf("workflow = '%s'", req.Workflow)) - } - if len(predicates) > 0 { - where = fmt.Sprintf("WHERE %s", strings.Join(predicates, " AND ")) - } - - query := fmt.Sprintf(` - SELECT - id, - workflow, - source, - pos, - stop_pos, - max_replication_lag, - state, - db_name, - time_updated, - transaction_timestamp, - message, - tags, - workflow_type, - workflow_sub_type, - time_heartbeat, - defer_secondary_keys, - component_throttled, - time_throttled, - rows_copied - FROM - _vt.vreplication - %s`, - where, - ) - vx := vexec.NewVExec(req.Keyspace, "", s.ts, s.tmc) - results, err := vx.QueryContext(ctx, query) + // Guards access to the maps used throughout. + m := sync.Mutex{} + + shards, err := common.GetShards(ctx, s.ts, req.Keyspace, req.Shards) if err != nil { return nil, err } + results := make(map[*topo.TabletInfo]*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, len(shards)) + readWorkflowsEg, readWorkflowsCtx := errgroup.WithContext(ctx) + for _, shard := range shards { + readWorkflowsEg.Go(func() error { + si, err := s.ts.GetShard(readWorkflowsCtx, req.Keyspace, shard) + if err != nil { + return err + } + if si.PrimaryAlias == nil { + return fmt.Errorf("%w %s/%s", vexec.ErrNoShardPrimary, req.Keyspace, shard) + } + primary, err := s.ts.GetTablet(readWorkflowsCtx, si.PrimaryAlias) + if err != nil { + return err + } + if primary == nil { + return fmt.Errorf("%w %s/%s: tablet %v not found", vexec.ErrNoShardPrimary, req.Keyspace, shard, topoproto.TabletAliasString(si.PrimaryAlias)) + } + // Clone the request so that we can set the correct DB name for tablet. + req := readReq.CloneVT() + wres, err := s.tmc.ReadVReplicationWorkflows(readWorkflowsCtx, primary.Tablet, req) + if err != nil { + return err + } + m.Lock() + defer m.Unlock() + results[primary] = wres + return nil + }) + } + if readWorkflowsEg.Wait() != nil { + return nil, err + } - m := sync.Mutex{} // guards access to the following maps during concurrent calls to scanWorkflow - workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results)) - sourceKeyspaceByWorkflow := make(map[string]string, len(results)) - sourceShardsByWorkflow := make(map[string]sets.Set[string], len(results)) - targetKeyspaceByWorkflow := make(map[string]string, len(results)) - targetShardsByWorkflow := make(map[string]sets.Set[string], len(results)) - maxVReplicationLagByWorkflow := make(map[string]float64, len(results)) - maxVReplicationTransactionLagByWorkflow := make(map[string]float64, len(results)) + copyStatesByShardStreamId := make(map[string][]*vtctldatapb.Workflow_Stream_CopyState, len(results)) - // We guarantee the following invariants when this function is called for a - // given workflow: - // - workflow.Name != "" (more precisely, ".Name is set 'properly'") - // - workflowsMap[workflow.Name] == workflow - // - sourceShardsByWorkflow[workflow.Name] != nil - // - targetShardsByWorkflow[workflow.Name] != nil - // - workflow.ShardStatuses != nil - scanWorkflow := func(ctx context.Context, workflow *vtctldatapb.Workflow, row sqltypes.RowNamedValues, tablet *topo.TabletInfo) error { - span, ctx := trace.NewSpan(ctx, "workflow.Server.scanWorkflow") + fetchCopyStates := func(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) error { + span, ctx := trace.NewSpan(ctx, "workflow.Server.fetchCopyStates") defer span.Finish() span.Annotate("keyspace", req.Keyspace) span.Annotate("shard", tablet.Shard) - span.Annotate("active_only", req.ActiveOnly) - span.Annotate("workflow", workflow.Name) span.Annotate("tablet_alias", tablet.AliasString()) - id, err := row["id"].ToCastInt64() + copyStates, err := s.getWorkflowCopyStates(ctx, tablet, streamIds) if err != nil { return err } - var bls binlogdatapb.BinlogSource - rowBytes, err := row["source"].ToBytes() - if err != nil { - return err - } - if err := prototext.Unmarshal(rowBytes, &bls); err != nil { - return err - } + m.Lock() + defer m.Unlock() - // The value in the pos column can be compressed and thus not - // have a valid GTID consisting of valid UTF-8 characters so we - // have to decode it so that it's properly decompressed first - // when needed. - pos, err := row.ToString("pos") - if err != nil { - return err - } - if pos != "" { - mpos, err := binlogplayer.DecodePosition(pos) - if err != nil { - return err - } - pos = mpos.String() + for _, copyState := range copyStates { + shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, copyState.StreamId) + copyStatesByShardStreamId[shardStreamId] = append( + copyStatesByShardStreamId[shardStreamId], + copyState, + ) } - stopPos := row["stop_pos"].ToString() - state := row["state"].ToString() - dbName := row["db_name"].ToString() + return nil + } - timeUpdatedSeconds, err := row["time_updated"].ToCastInt64() - if err != nil { - return err - } + fetchCopyStatesEg, fetchCopyStatesCtx := errgroup.WithContext(ctx) + for tablet, result := range results { + tablet := tablet // loop closure - transactionTimeSeconds, err := row["transaction_timestamp"].ToCastInt64() - if err != nil { - return err + streamIds := make([]int32, 0, len(result.Workflows)) + for _, wf := range result.Workflows { + for _, stream := range wf.Streams { + streamIds = append(streamIds, stream.Id) + } } - message := row["message"].ToString() - - tags := row["tags"].ToString() - var tagArray []string - if tags != "" { - tagArray = strings.Split(tags, ",") + if len(streamIds) == 0 { + continue } - workflowType, _ := row["workflow_type"].ToInt32() - workflowSubType, _ := row["workflow_sub_type"].ToInt32() - - timeHeartbeat, err := row["time_heartbeat"].ToCastInt64() - if err != nil { - return err - } + fetchCopyStatesEg.Go(func() error { + return fetchCopyStates(fetchCopyStatesCtx, tablet, streamIds) + }) + } - componentThrottled := row["component_throttled"].ToString() - timeThrottled, err := row["time_throttled"].ToCastInt64() - if err != nil { - return err - } + if err := fetchCopyStatesEg.Wait(); err != nil { + return nil, err + } - deferSecondaryKeys, err := row["defer_secondary_keys"].ToBool() - if err != nil { - return err - } + workflowsMap := make(map[string]*vtctldatapb.Workflow, len(results)) + sourceKeyspaceByWorkflow := make(map[string]string, len(results)) + sourceShardsByWorkflow := make(map[string]sets.Set[string], len(results)) + targetKeyspaceByWorkflow := make(map[string]string, len(results)) + targetShardsByWorkflow := make(map[string]sets.Set[string], len(results)) + maxVReplicationLagByWorkflow := make(map[string]float64, len(results)) + maxVReplicationTransactionLagByWorkflow := make(map[string]float64, len(results)) - rowsCopied, err := row["rows_copied"].ToCastInt64() - if err != nil { - return err - } + // We guarantee the following invariants when this function is called for a + // given workflow: + // - workflow.Name != "" (more precisely, ".Name is set 'properly'") + // - workflowsMap[workflow.Name] == workflow + // - sourceShardsByWorkflow[workflow.Name] != nil + // - targetShardsByWorkflow[workflow.Name] != nil + // - workflow.ShardStatuses != nil + scanWorkflow := func(ctx context.Context, workflow *vtctldatapb.Workflow, res *tabletmanagerdatapb.ReadVReplicationWorkflowResponse, tablet *topo.TabletInfo) error { + // This is not called concurrently, but we still protect the maps to ensure + // that we're concurrency-safe in the face of future changes (e.g. where other + // things are running concurrently with this which also access these maps). + m.Lock() + defer m.Unlock() + for _, rstream := range res.Streams { + // The value in the pos column can be compressed and thus not + // have a valid GTID consisting of valid UTF-8 characters so we + // have to decode it so that it's properly decompressed first + // when needed. + pos := rstream.Pos + if pos != "" { + mpos, err := binlogplayer.DecodePosition(pos) + if err != nil { + return err + } + pos = mpos.String() + } - stream := &vtctldatapb.Workflow_Stream{ - Id: id, - Shard: tablet.Shard, - Tablet: tablet.Alias, - BinlogSource: &bls, - Position: pos, - StopPosition: stopPos, - State: state, - DbName: dbName, - TransactionTimestamp: &vttimepb.Time{ - Seconds: transactionTimeSeconds, - }, - TimeUpdated: &vttimepb.Time{ - Seconds: timeUpdatedSeconds, - }, - Message: message, - Tags: tagArray, - RowsCopied: rowsCopied, - ThrottlerStatus: &vtctldatapb.Workflow_Stream_ThrottlerStatus{ - ComponentThrottled: componentThrottled, - TimeThrottled: &vttimepb.Time{ - Seconds: timeThrottled, + cells := strings.Split(res.Cells, ",") + for i := range cells { + cells[i] = strings.TrimSpace(cells[i]) + } + options := res.Options + if options != "" { + if err := json.Unmarshal([]byte(options), &workflow.Options); err != nil { + return err + } + } + stream := &vtctldatapb.Workflow_Stream{ + Id: int64(rstream.Id), + Shard: tablet.Shard, + Tablet: tablet.Alias, + BinlogSource: rstream.Bls, + Position: pos, + StopPosition: rstream.StopPos, + State: rstream.State.String(), + DbName: tablet.DbName(), + TabletTypes: res.TabletTypes, + TabletSelectionPreference: res.TabletSelectionPreference, + Cells: cells, + TransactionTimestamp: rstream.TransactionTimestamp, + TimeUpdated: rstream.TimeUpdated, + Message: rstream.Message, + Tags: strings.Split(res.Tags, ","), + RowsCopied: rstream.RowsCopied, + ThrottlerStatus: &vtctldatapb.Workflow_Stream_ThrottlerStatus{ + ComponentThrottled: rstream.ComponentThrottled, + TimeThrottled: rstream.TimeThrottled, }, - }, - } - - stream.CopyStates, err = s.getWorkflowCopyStates(ctx, tablet, id) - if err != nil { - return err - } + } - span.Annotate("num_copy_states", len(stream.CopyStates)) + // Merge in copy states, which we've already fetched. + shardStreamId := fmt.Sprintf("%s/%d", tablet.Shard, stream.Id) + if copyState, ok := copyStatesByShardStreamId[shardStreamId]; ok { + stream.CopyStates = copyState + } - // At this point, we're going to start modifying the maps defined - // outside this function, as well as fields on the passed-in Workflow - // pointer. Since we're running concurrently, take the lock. - // - // We've already made the remote call to getCopyStates, so synchronizing - // here shouldn't hurt too badly, performance-wise. - m.Lock() - defer m.Unlock() + if rstream.TimeUpdated == nil { + rstream.TimeUpdated = &vttimepb.Time{} + } - workflow.WorkflowType = binlogdatapb.VReplicationWorkflowType_name[workflowType] - workflow.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType_name[workflowSubType] - workflow.DeferSecondaryKeys = deferSecondaryKeys + switch { + case strings.Contains(strings.ToLower(stream.Message), "error"): + stream.State = binlogdatapb.VReplicationWorkflowState_Error.String() + case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && len(stream.CopyStates) > 0: + stream.State = binlogdatapb.VReplicationWorkflowState_Copying.String() + case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && int64(time.Now().Second())-rstream.TimeUpdated.Seconds > 10: + stream.State = binlogdatapb.VReplicationWorkflowState_Lagging.String() + } - switch { - case strings.Contains(strings.ToLower(stream.Message), "error"): - stream.State = binlogdatapb.VReplicationWorkflowState_Error.String() - case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && len(stream.CopyStates) > 0: - stream.State = binlogdatapb.VReplicationWorkflowState_Copying.String() - case stream.State == binlogdatapb.VReplicationWorkflowState_Running.String() && int64(time.Now().Second())-timeUpdatedSeconds > 10: - stream.State = binlogdatapb.VReplicationWorkflowState_Lagging.String() - } + shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString()) + shardStream, ok := workflow.ShardStreams[shardStreamKey] + if !ok { + ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer cancel() - shardStreamKey := fmt.Sprintf("%s/%s", tablet.Shard, tablet.AliasString()) - shardStream, ok := workflow.ShardStreams[shardStreamKey] - if !ok { - ctx, cancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) - defer cancel() + si, err := s.ts.GetShard(ctx, req.Keyspace, tablet.Shard) + if err != nil { + return err + } - si, err := s.ts.GetShard(ctx, req.Keyspace, tablet.Shard) - if err != nil { - return err - } + shardStream = &vtctldatapb.Workflow_ShardStream{ + Streams: nil, + TabletControls: si.TabletControls, + IsPrimaryServing: si.IsPrimaryServing, + } - shardStream = &vtctldatapb.Workflow_ShardStream{ - Streams: nil, - TabletControls: si.TabletControls, - IsPrimaryServing: si.IsPrimaryServing, + workflow.ShardStreams[shardStreamKey] = shardStream } - workflow.ShardStreams[shardStreamKey] = shardStream - } - - shardStream.Streams = append(shardStream.Streams, stream) - sourceShardsByWorkflow[workflow.Name].Insert(stream.BinlogSource.Shard) - targetShardsByWorkflow[workflow.Name].Insert(tablet.Shard) + shardStream.Streams = append(shardStream.Streams, stream) + sourceShardsByWorkflow[workflow.Name].Insert(stream.BinlogSource.Shard) + targetShardsByWorkflow[workflow.Name].Insert(tablet.Shard) - if ks, ok := sourceKeyspaceByWorkflow[workflow.Name]; ok && ks != stream.BinlogSource.Keyspace { - return vterrors.Wrapf(ErrMultipleSourceKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, stream.BinlogSource.Keyspace) - } + if ks, ok := sourceKeyspaceByWorkflow[workflow.Name]; ok && ks != stream.BinlogSource.Keyspace { + return vterrors.Wrapf(ErrMultipleSourceKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, stream.BinlogSource.Keyspace) + } - sourceKeyspaceByWorkflow[workflow.Name] = stream.BinlogSource.Keyspace + sourceKeyspaceByWorkflow[workflow.Name] = stream.BinlogSource.Keyspace - if ks, ok := targetKeyspaceByWorkflow[workflow.Name]; ok && ks != tablet.Keyspace { - return vterrors.Wrapf(ErrMultipleTargetKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, tablet.Keyspace) - } + if ks, ok := targetKeyspaceByWorkflow[workflow.Name]; ok && ks != tablet.Keyspace { + return vterrors.Wrapf(ErrMultipleTargetKeyspaces, "workflow = %v, ks1 = %v, ks2 = %v", workflow.Name, ks, tablet.Keyspace) + } - targetKeyspaceByWorkflow[workflow.Name] = tablet.Keyspace + targetKeyspaceByWorkflow[workflow.Name] = tablet.Keyspace - timeUpdated := time.Unix(timeUpdatedSeconds, 0) - vreplicationLag := time.Since(timeUpdated) + if stream.TimeUpdated == nil { + stream.TimeUpdated = &vttimepb.Time{} + } + timeUpdated := time.Unix(stream.TimeUpdated.Seconds, 0) + vreplicationLag := time.Since(timeUpdated) - // MaxVReplicationLag represents the time since we last processed any event - // in the workflow. - if currentMaxLag, ok := maxVReplicationLagByWorkflow[workflow.Name]; ok { - if vreplicationLag.Seconds() > currentMaxLag { + // MaxVReplicationLag represents the time since we last processed any event + // in the workflow. + if currentMaxLag, ok := maxVReplicationLagByWorkflow[workflow.Name]; ok { + if vreplicationLag.Seconds() > currentMaxLag { + maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() + } + } else { maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() } - } else { - maxVReplicationLagByWorkflow[workflow.Name] = vreplicationLag.Seconds() - } - - // MaxVReplicationTransactionLag estimates the actual statement processing lag - // between the source and the target. If we are still processing source events it - // is the difference b/w current time and the timestamp of the last event. If - // heartbeats are more recent than the last event, then the lag is the time since - // the last heartbeat as there can be an actual event immediately after the - // heartbeat, but which has not yet been processed on the target. - // We don't allow switching during the copy phase, so in that case we just return - // a large lag. All timestamps are in seconds since epoch. - if _, ok := maxVReplicationTransactionLagByWorkflow[workflow.Name]; !ok { - maxVReplicationTransactionLagByWorkflow[workflow.Name] = 0 - } - lastTransactionTime := transactionTimeSeconds - lastHeartbeatTime := timeHeartbeat - if stream.State == binlogdatapb.VReplicationWorkflowState_Copying.String() { - maxVReplicationTransactionLagByWorkflow[workflow.Name] = math.MaxInt64 - } else { - if lastTransactionTime == 0 /* no new events after copy */ || - lastHeartbeatTime > lastTransactionTime /* no recent transactions, so all caught up */ { - lastTransactionTime = lastHeartbeatTime + workflow.WorkflowType = res.WorkflowType.String() + workflow.WorkflowSubType = res.WorkflowSubType.String() + workflow.DeferSecondaryKeys = res.DeferSecondaryKeys + + // MaxVReplicationTransactionLag estimates the actual statement processing lag + // between the source and the target. If we are still processing source events it + // is the difference b/w current time and the timestamp of the last event. If + // heartbeats are more recent than the last event, then the lag is the time since + // the last heartbeat as there can be an actual event immediately after the + // heartbeat, but which has not yet been processed on the target. + // We don't allow switching during the copy phase, so in that case we just return + // a large lag. All timestamps are in seconds since epoch. + if _, ok := maxVReplicationTransactionLagByWorkflow[workflow.Name]; !ok { + maxVReplicationTransactionLagByWorkflow[workflow.Name] = 0 } - now := time.Now().Unix() /* seconds since epoch */ - transactionReplicationLag := float64(now - lastTransactionTime) - if transactionReplicationLag > maxVReplicationTransactionLagByWorkflow[workflow.Name] { - maxVReplicationTransactionLagByWorkflow[workflow.Name] = transactionReplicationLag + if rstream.TransactionTimestamp == nil { + rstream.TransactionTimestamp = &vttimepb.Time{} + } + lastTransactionTime := rstream.TransactionTimestamp.Seconds + if rstream.TimeHeartbeat == nil { + rstream.TimeHeartbeat = &vttimepb.Time{} + } + lastHeartbeatTime := rstream.TimeHeartbeat.Seconds + if stream.State == binlogdatapb.VReplicationWorkflowState_Copying.String() { + maxVReplicationTransactionLagByWorkflow[workflow.Name] = math.MaxInt64 + } else { + if lastTransactionTime == 0 /* no new events after copy */ || + lastHeartbeatTime > lastTransactionTime /* no recent transactions, so all caught up */ { + + lastTransactionTime = lastHeartbeatTime + } + now := time.Now().Unix() /* seconds since epoch */ + transactionReplicationLag := float64(now - lastTransactionTime) + if transactionReplicationLag > maxVReplicationTransactionLagByWorkflow[workflow.Name] { + maxVReplicationTransactionLagByWorkflow[workflow.Name] = transactionReplicationLag + } } } return nil } - var ( - scanWorkflowWg sync.WaitGroup - scanWorkflowErrors concurrency.FirstErrorRecorder - ) - for tablet, result := range results { - qr := sqltypes.Proto3ToResult(result) - // In the old implementation, we knew we had at most one (0 <= N <= 1) // workflow for each shard primary we queried. There might be multiple // rows (streams) comprising that workflow, so we would aggregate the @@ -670,8 +690,8 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows // to a workflow we're already aggregating, or if it's a workflow we // haven't seen yet for that shard primary. We use the workflow name to // dedupe for this. - for _, row := range qr.Named().Rows { - workflowName := row["workflow"].ToString() + for _, wfres := range result.Workflows { + workflowName := wfres.Workflow workflow, ok := workflowsMap[workflowName] if !ok { workflow = &vtctldatapb.Workflow{ @@ -684,21 +704,12 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows targetShardsByWorkflow[workflowName] = sets.New[string]() } - scanWorkflowWg.Add(1) - go func(ctx context.Context, workflow *vtctldatapb.Workflow, row sqltypes.RowNamedValues, tablet *topo.TabletInfo) { - defer scanWorkflowWg.Done() - if err := scanWorkflow(ctx, workflow, row, tablet); err != nil { - scanWorkflowErrors.RecordError(err) - } - }(ctx, workflow, row, tablet) + if err := scanWorkflow(ctx, workflow, wfres, tablet); err != nil { + return nil, err + } } } - scanWorkflowWg.Wait() - if scanWorkflowErrors.HasErrors() { - return nil, scanWorkflowErrors.Error() - } - var ( fetchLogsWG sync.WaitGroup vrepLogQuery = strings.TrimSpace(` @@ -743,7 +754,8 @@ ORDER BY return } - results, err := vx.WithWorkflow(workflow.Name).QueryContext(ctx, query) + vx := vexec.NewVExec(req.Keyspace, workflow.Name, s.ts, s.tmc, s.SQLParser()) + results, err := vx.QueryContext(ctx, query) if err != nil { // Note that we do not return here. If there are any query results // in the map (i.e. some tablets returned successfully), we will @@ -842,7 +854,8 @@ ORDER BY if stream.Id > streamLog.StreamId { log.Warningf("Found stream log for nonexistent stream: %+v", streamLog) - break + // This can happen on manual/failed workflow cleanup so keep going. + continue } // stream.Id == streamLog.StreamId @@ -929,7 +942,6 @@ ORDER BY func (s *Server) getWorkflowState(ctx context.Context, targetKeyspace, workflowName string) (*trafficSwitcher, *State, error) { ts, err := s.buildTrafficSwitcher(ctx, targetKeyspace, workflowName) - if err != nil { log.Errorf("buildTrafficSwitcher failed: %v", err) return nil, nil, err @@ -973,11 +985,16 @@ func (s *Server) getWorkflowState(ctx context.Context, targetKeyspace, workflowN // We assume a consistent state, so only choose routing rule for one table. if len(ts.Tables()) == 0 { return nil, nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no tables in workflow %s.%s", targetKeyspace, workflowName) - } table := ts.Tables()[0] - if ts.isPartialMigration { // shard level traffic switching is all or nothing + if ts.IsMultiTenantMigration() { + // Deduce which traffic has been switched by looking at the current keyspace routing rules. + err := updateKeyspaceRoutingState(ctx, ts.TopoServer(), sourceKeyspace, targetKeyspace, state) + if err != nil { + return nil, nil, err + } + } else if ts.isPartialMigration { // shard level traffic switching is all or nothing shardRoutingRules, err := s.ts.GetShardRoutingRules(ctx) if err != nil { return nil, nil, err @@ -1050,16 +1067,24 @@ func (s *Server) getWorkflowState(ctx context.Context, targetKeyspace, workflowN return ts, state, nil } -func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, id int64) ([]*vtctldatapb.Workflow_Stream_CopyState, error) { +func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletInfo, streamIds []int32) ([]*vtctldatapb.Workflow_Stream_CopyState, error) { span, ctx := trace.NewSpan(ctx, "workflow.Server.getWorkflowCopyStates") defer span.Finish() span.Annotate("keyspace", tablet.Keyspace) span.Annotate("shard", tablet.Shard) span.Annotate("tablet_alias", tablet.AliasString()) - span.Annotate("vrepl_id", id) + span.Annotate("stream_ids", fmt.Sprintf("%#v", streamIds)) - query := fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)", id, id) + idsBV, err := sqltypes.BuildBindVariable(streamIds) + if err != nil { + return nil, err + } + query, err := sqlparser.ParseAndBind("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in %a and id in (select max(id) from _vt.copy_state where vrepl_id in %a group by vrepl_id, table_name)", + idsBV, idsBV) + if err != nil { + return nil, err + } qr, err := s.tmc.VReplicationExec(ctx, tablet.Tablet, query) if err != nil { return nil, err @@ -1072,10 +1097,15 @@ func (s *Server) getWorkflowCopyStates(ctx context.Context, tablet *topo.TabletI copyStates := make([]*vtctldatapb.Workflow_Stream_CopyState, len(result.Rows)) for i, row := range result.Rows { - // These fields are technically varbinary, but this is close enough. + streamId, err := row[0].ToInt64() + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to cast vrepl_id to int64: %v", err) + } + // These string fields are technically varbinary, but this is close enough. copyStates[i] = &vtctldatapb.Workflow_Stream_CopyState{ - Table: row[0].ToString(), - LastPk: row[1].ToString(), + StreamId: streamId, + Table: row[1].ToString(), + LastPk: row[2].ToString(), } } @@ -1167,42 +1197,29 @@ func (s *Server) LookupVindexExternalize(ctx context.Context, req *vtctldatapb.L if err != nil { return err } - p3qr, err := s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, fmt.Sprintf("select id, state, message, source from _vt.vreplication where workflow=%s and db_name=%s", encodeString(req.Name), encodeString(targetPrimary.DbName()))) + res, err := s.tmc.ReadVReplicationWorkflow(ctx, targetPrimary.Tablet, &tabletmanagerdatapb.ReadVReplicationWorkflowRequest{ + Workflow: req.Name, + }) if err != nil { return err } - qr := sqltypes.Proto3ToResult(p3qr) - if qr == nil || len(qr.Rows) == 0 { + if res == nil { return vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "workflow %s not found on %v", req.Name, targetPrimary.Alias) } - for _, row := range qr.Rows { - id, err := row[0].ToCastInt64() - if err != nil { - return err - } - state := binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[row[1].ToString()]) - message := row[2].ToString() - var bls binlogdatapb.BinlogSource - sourceBytes, err := row[3].ToBytes() - if err != nil { - return err - } - if err := prototext.Unmarshal(sourceBytes, &bls); err != nil { - return err - } - if bls.Filter == nil || len(bls.Filter.Rules) != 1 { + for _, stream := range res.Streams { + if stream.Bls.Filter == nil || len(stream.Bls.Filter.Rules) != 1 { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "invalid binlog source") } - if vindex.Owner == "" || !bls.StopAfterCopy { + if vindex.Owner == "" || !stream.Bls.StopAfterCopy { // If there's no owner or we've requested that the workflow NOT be stopped // after the copy phase completes, then all streams need to be running. - if state != binlogdatapb.VReplicationWorkflowState_Running { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "stream %d for %v.%v is not in Running state: %v", id, targetShard.Keyspace(), targetShard.ShardName(), state) + if stream.State != binlogdatapb.VReplicationWorkflowState_Running { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "stream %d for %v.%v is not in Running state: %v", stream.Id, targetShard.Keyspace(), targetShard.ShardName(), stream.State) } } else { // If there is an owner, all streams need to be stopped after copy. - if state != binlogdatapb.VReplicationWorkflowState_Stopped || !strings.Contains(message, "Stopped after copy") { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "stream %d for %v.%v is not in Stopped after copy state: %v, %v", id, targetShard.Keyspace(), targetShard.ShardName(), state, message) + if stream.State != binlogdatapb.VReplicationWorkflowState_Stopped || !strings.Contains(stream.Message, "Stopped after copy") { + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "stream %d for %v.%v is not in Stopped after copy state: %v, %v", stream.Id, targetShard.Keyspace(), targetShard.ShardName(), stream.State, stream.Message) } } } @@ -1246,9 +1263,29 @@ func (s *Server) Materialize(ctx context.Context, ms *vtctldatapb.MaterializeSet sourceTs: s.ts, tmc: s.tmc, ms: ms, + env: s.env, + } + + tt, err := topoproto.ParseTabletTypes(ms.TabletTypes) + if err != nil { + return err } - err := mz.createMaterializerStreams() + cells := strings.Split(ms.Cell, ",") + for i := range cells { + cells[i] = strings.TrimSpace(cells[i]) + } + + err = mz.createWorkflowStreams(&tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ + Workflow: ms.Workflow, + Cells: strings.Split(ms.Cell, ","), + TabletTypes: tt, + TabletSelectionPreference: ms.TabletSelectionPreference, + WorkflowType: mz.getWorkflowType(), + DeferSecondaryKeys: ms.DeferSecondaryKeys, + AutoStart: true, + StopAfterCopy: ms.StopAfterCopy, + }) if err != nil { return err } @@ -1263,20 +1300,21 @@ func (s *Server) MoveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl } func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest, - workflowType binlogdatapb.VReplicationWorkflowType) (res *vtctldatapb.WorkflowStatusResponse, err error) { - - span, ctx := trace.NewSpan(ctx, "workflow.Server.MoveTablesCreate") + workflowType binlogdatapb.VReplicationWorkflowType, +) (res *vtctldatapb.WorkflowStatusResponse, err error) { + span, ctx := trace.NewSpan(ctx, "workflow.Server.moveTablesCreate") defer span.Finish() span.Annotate("keyspace", req.TargetKeyspace) span.Annotate("workflow", req.Workflow) + span.Annotate("workflow_type", workflowType) span.Annotate("cells", req.Cells) span.Annotate("tablet_types", req.TabletTypes) span.Annotate("on_ddl", req.OnDdl) sourceKeyspace := req.SourceKeyspace targetKeyspace := req.TargetKeyspace - //FIXME validate tableSpecs, allTables, excludeTables + // FIXME validate tableSpecs, allTables, excludeTables var ( tables = req.IncludeTables externalTopo *topo.Server @@ -1302,6 +1340,19 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl if vschema == nil { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "no vschema found for target keyspace %s", targetKeyspace) } + + if workflowType == binlogdatapb.VReplicationWorkflowType_MoveTables && + req.GetWorkflowOptions().GetTenantId() != "" { + multiTenantSpec := vschema.MultiTenantSpec + if multiTenantSpec == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "multi-tenant spec not found for target keyspace %s", targetKeyspace) + } + // Validate that the tenant id matches the data type of the column provided in the multi-tenant spec of the vschema. + if err := validateTenantId(multiTenantSpec.TenantIdColumnType, req.WorkflowOptions.TenantId); err != nil { + return nil, err + } + } + ksTables, err := getTablesInKeyspace(ctx, sourceTopo, s.tmc, sourceKeyspace) if err != nil { return nil, err @@ -1344,7 +1395,6 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl return nil, err } } - ms := &vtctldatapb.MaterializeSettings{ Workflow: req.Workflow, MaterializationIntent: vtctldatapb.MaterializationIntent_MOVETABLES, @@ -1359,6 +1409,7 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl OnDdl: req.OnDdl, DeferSecondaryKeys: req.DeferSecondaryKeys, AtomicCopy: req.AtomicCopy, + WorkflowOptions: req.WorkflowOptions, } if req.SourceTimeZone != "" { ms.SourceTimeZone = req.SourceTimeZone @@ -1385,8 +1436,18 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl tmc: s.tmc, ms: ms, workflowType: workflowType, + env: s.env, } - err = mz.createMoveTablesStreams(req) + err = mz.createWorkflowStreams(&tabletmanagerdatapb.CreateVReplicationWorkflowRequest{ + Workflow: req.Workflow, + Cells: req.Cells, + TabletTypes: req.TabletTypes, + TabletSelectionPreference: req.TabletSelectionPreference, + WorkflowType: mz.workflowType, + DeferSecondaryKeys: req.DeferSecondaryKeys, + AutoStart: req.AutoStart, + StopAfterCopy: req.StopAfterCopy, + }) if err != nil { return nil, err } @@ -1414,44 +1475,14 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl // Now that the streams have been successfully created, let's put the associated // routing rules in place. if externalTopo == nil { - if req.NoRoutingRules { - log.Warningf("Found --no-routing-rules flag, not creating routing rules for workflow %s.%s", targetKeyspace, req.Workflow) - } else { - // Save routing rules before vschema. If we save vschema first, and routing - // rules fails to save, we may generate duplicate table errors. - if mz.isPartial { - if err := createDefaultShardRoutingRules(mz.ctx, mz.ms, mz.ts); err != nil { - return nil, err - } - } - - rules, err := topotools.GetRoutingRules(ctx, s.ts) - if err != nil { - return nil, err - } - for _, table := range tables { - toSource := []string{sourceKeyspace + "." + table} - rules[table] = toSource - rules[table+"@replica"] = toSource - rules[table+"@rdonly"] = toSource - rules[targetKeyspace+"."+table] = toSource - rules[targetKeyspace+"."+table+"@replica"] = toSource - rules[targetKeyspace+"."+table+"@rdonly"] = toSource - rules[targetKeyspace+"."+table] = toSource - rules[sourceKeyspace+"."+table+"@replica"] = toSource - rules[sourceKeyspace+"."+table+"@rdonly"] = toSource - } - if err := topotools.SaveRoutingRules(ctx, s.ts, rules); err != nil { - return nil, err - } - } - if vschema != nil { - // We added to the vschema. - if err := s.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { - return nil, err - } + if err := s.setupInitialRoutingRules(ctx, req, mz, tables, vschema); err != nil { + return nil, err } + // We added to the vschema. + if err := s.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + return nil, err + } } if err := s.ts.RebuildSrvVSchema(ctx, nil); err != nil { return nil, err @@ -1493,13 +1524,91 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl return nil, err } } - + var targetShards []string + for _, shard := range mz.targetShards { + targetShards = append(targetShards, shard.ShardName()) + } return s.WorkflowStatus(ctx, &vtctldatapb.WorkflowStatusRequest{ Keyspace: targetKeyspace, Workflow: req.Workflow, + Shards: targetShards, }) } +func (s *Server) validateRoutingRuleFlags(req *vtctldatapb.MoveTablesCreateRequest, mz *materializer) error { + if mz.IsMultiTenantMigration() { + switch { + case req.NoRoutingRules: + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot use --no-routing-rules in a multi-tenant migration") + case mz.isPartial: + return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "cannot run partial shard migration along with multi-tenant migration") + } + } + return nil +} + +func (s *Server) setupInitialRoutingRules(ctx context.Context, req *vtctldatapb.MoveTablesCreateRequest, mz *materializer, tables []string, vschema *vschemapb.Keyspace) error { + if err := s.validateRoutingRuleFlags(req, mz); err != nil { + return err + } + + sourceKeyspace := req.SourceKeyspace + targetKeyspace := req.TargetKeyspace + + if req.NoRoutingRules { + log.Warningf("Found --no-routing-rules flag, not creating routing rules for workflow %s.%s", targetKeyspace, req.Workflow) + return nil + } + + // Save routing rules before vschema. If we save vschema first, and routing + // rules fails to save, we may generate duplicate table errors. + if mz.isPartial { + if err := createDefaultShardRoutingRules(mz.ctx, mz.ms, mz.ts); err != nil { + return err + } + } + + if mz.IsMultiTenantMigration() { + log.Infof("Setting up keyspace routing rules for workflow %s.%s", targetKeyspace, req.Workflow) + // Note that you can never point the target keyspace to the source keyspace in a multi-tenant migration + // since the target takes write traffic for all tenants! + routes := make(map[string]string) + for _, tt := range tabletTypeSuffixes { + routes[sourceKeyspace+tt] = sourceKeyspace + } + + if err := updateKeyspaceRoutingRules(ctx, s.ts, "Create", routes); err != nil { + return err + } + return nil + } + + // Setup table routing rules. + rules, err := topotools.GetRoutingRules(ctx, s.ts) + if err != nil { + return err + } + routeTableToSource := func(keyspace, table string) { + key := table + route := fmt.Sprintf("%s.%s", sourceKeyspace, table) + if keyspace != "" { + key = fmt.Sprintf("%s.%s", keyspace, table) + } + for _, typ := range tabletTypeSuffixes { + rules[key+typ] = []string{route} + } + } + for _, table := range tables { + for _, ks := range []string{globalTableQualifier, targetKeyspace, sourceKeyspace} { + routeTableToSource(ks, table) + } + } + if err := topotools.SaveRoutingRules(ctx, s.ts, rules); err != nil { + return err + } + return nil +} + // MoveTablesComplete is part of the vtctlservicepb.VtctldServer interface. // It cleans up a successful MoveTables workflow and its related artifacts. // Note: this is currently re-used for Reshard as well. @@ -1581,7 +1690,8 @@ func (s *Server) ReshardCreate(ctx context.Context, req *vtctldatapb.ReshardCrea log.Errorf("%w", err2) return nil, err } - rs, err := s.buildResharder(ctx, keyspace, req.Workflow, req.SourceShards, req.TargetShards, strings.Join(cells, ","), "") + tabletTypesStr := discovery.BuildTabletTypesString(req.TabletTypes, req.TabletSelectionPreference) + rs, err := s.buildResharder(ctx, keyspace, req.Workflow, req.SourceShards, req.TargetShards, strings.Join(cells, ","), tabletTypesStr) if err != nil { return nil, vterrors.Wrap(err, "buildResharder") } @@ -1604,7 +1714,10 @@ func (s *Server) ReshardCreate(ctx context.Context, req *vtctldatapb.ReshardCrea } else { log.Warningf("Streams will not be started since --auto-start is set to false") } - return nil, nil + return s.WorkflowStatus(ctx, &vtctldatapb.WorkflowStatusRequest{ + Keyspace: req.Keyspace, + Workflow: req.Workflow, + }) } // VDiffCreate is part of the vtctlservicepb.VtctldServer interface. @@ -1622,10 +1735,23 @@ func (s *Server) VDiffCreate(ctx context.Context, req *vtctldatapb.VDiffCreateRe span.Annotate("tablet_types", req.TabletTypes) span.Annotate("tables", req.Tables) span.Annotate("auto_retry", req.AutoRetry) + span.Annotate("max_diff_duration", req.MaxDiffDuration) - tabletTypesStr := topoproto.MakeStringTypeCSV(req.TabletTypes) - if req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { - tabletTypesStr = discovery.InOrderHint + tabletTypesStr + tabletTypesStr := discovery.BuildTabletTypesString(req.TabletTypes, req.TabletSelectionPreference) + + // This is a pointer so there's no ZeroValue in the message + // and an older v18 client will not provide it. + if req.MaxDiffDuration == nil { + req.MaxDiffDuration = &vttimepb.Duration{} + } + // The other vttime.Duration vars should not be nil as the + // client should always provide them, but we check anyway to + // be safe. + if req.FilteredReplicationWaitTime == nil { + req.FilteredReplicationWaitTime = &vttimepb.Duration{} + } + if req.WaitUpdateInterval == nil { + req.WaitUpdateInterval = &vttimepb.Duration{} } options := &tabletmanagerdatapb.VDiffOptions{ @@ -1637,14 +1763,16 @@ func (s *Server) VDiffCreate(ctx context.Context, req *vtctldatapb.VDiffCreateRe CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{ Tables: strings.Join(req.Tables, ","), AutoRetry: req.AutoRetry, - MaxRows: req.MaxExtraRowsToCompare, + MaxRows: req.Limit, TimeoutSeconds: req.FilteredReplicationWaitTime.Seconds, MaxExtraRowsToCompare: req.MaxExtraRowsToCompare, UpdateTableStats: req.UpdateTableStats, + MaxDiffSeconds: req.MaxDiffDuration.Seconds, }, ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{ - OnlyPks: req.OnlyPKs, - DebugQuery: req.DebugQuery, + OnlyPks: req.OnlyPKs, + DebugQuery: req.DebugQuery, + MaxSampleRows: req.MaxReportSampleRows, }, } @@ -1665,6 +1793,16 @@ func (s *Server) VDiffCreate(ctx context.Context, req *vtctldatapb.VDiffCreateRe req.TargetKeyspace, req.Workflow) } + workflowStatus, err := s.getWorkflowStatus(ctx, req.TargetKeyspace, req.Workflow) + if err != nil { + return nil, err + } + if workflowStatus != binlogdatapb.VReplicationWorkflowState_Running { + log.Infof("Workflow %s.%s is not running, cannot start VDiff in state %s", req.TargetKeyspace, req.Workflow, workflowStatus) + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, + "not all streams are running in workflow %s.%s", req.TargetKeyspace, req.Workflow) + } + err = ts.ForAllTargets(func(target *MigrationTarget) error { _, err := s.tmc.VDiff(ctx, target.GetPrimary().Tablet, tabletreq) return err @@ -1828,6 +1966,9 @@ func (s *Server) WorkflowDelete(ctx context.Context, req *vtctldatapb.WorkflowDe span.Annotate("keyspace", req.Keyspace) span.Annotate("workflow", req.Workflow) + span.Annotate("keep_data", req.KeepData) + span.Annotate("keep_routing_rules", req.KeepRoutingRules) + span.Annotate("shards", req.Shards) // Cleanup related data and artifacts. if _, err := s.DropTargets(ctx, req.Keyspace, req.Workflow, req.KeepData, req.KeepRoutingRules, false); err != nil { @@ -1840,7 +1981,8 @@ func (s *Server) WorkflowDelete(ctx context.Context, req *vtctldatapb.WorkflowDe deleteReq := &tabletmanagerdatapb.DeleteVReplicationWorkflowRequest{ Workflow: req.Workflow, } - vx := vexec.NewVExec(req.Keyspace, req.Workflow, s.ts, s.tmc) + vx := vexec.NewVExec(req.Keyspace, req.Workflow, s.ts, s.tmc, s.env.Parser()) + vx.SetShardSubset(req.Shards) callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { res, err := s.tmc.DeleteVReplicationWorkflow(ctx, tablet.Tablet, deleteReq) if err != nil { @@ -1914,7 +2056,7 @@ func (s *Server) WorkflowStatus(ctx context.Context, req *vtctldatapb.WorkflowSt } } - workflow, err := s.GetWorkflow(ctx, req.Keyspace, req.Workflow, false) + workflow, err := s.GetWorkflow(ctx, req.Keyspace, req.Workflow, false, req.Shards) if err != nil { return nil, err } @@ -1952,6 +2094,9 @@ func (s *Server) WorkflowStatus(ctx context.Context, req *vtctldatapb.WorkflowSt if updateLag > 0*1e9 { info = append(info, "VStream may not be running") } + if st.TransactionTimestamp == nil { + st.TransactionTimestamp = &vttimepb.Time{} + } txLag := int64(now) - st.TransactionTimestamp.Seconds info = append(info, fmt.Sprintf("VStream Lag: %ds", txLag/1e9)) if st.TransactionTimestamp.Seconds > 0 { // if no events occur after copy phase, TransactionTimeStamp can be 0 @@ -2028,7 +2173,7 @@ func (s *Server) GetCopyProgress(ctx context.Context, ts *trafficSwitcher, state sourceTableSizes[table] = 0 } - var getTableMetrics = func(tablet *topodatapb.Tablet, query string, rowCounts *map[string]int64, tableSizes *map[string]int64) error { + getTableMetrics := func(tablet *topodatapb.Tablet, query string, rowCounts *map[string]int64, tableSizes *map[string]int64) error { p3qr, err := s.tmc.ExecuteFetchAsDba(ctx, tablet, true, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ Query: []byte(query), MaxRows: uint64(len(tables)), @@ -2116,7 +2261,7 @@ func (s *Server) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUp span.Annotate("on_ddl", req.TabletRequest.OnDdl) span.Annotate("state", req.TabletRequest.State) - vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc) + vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc, s.env.Parser()) callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { res, err := s.tmc.UpdateVReplicationWorkflow(ctx, tablet.Tablet, req.TabletRequest) if err != nil { @@ -2215,25 +2360,20 @@ func (s *Server) collectTargetStreams(ctx context.Context, mz *materializer) ([] var shardTablets []string var mu sync.Mutex err := mz.forAllTargets(func(target *topo.ShardInfo) error { - var qrproto *querypb.QueryResult - var id int64 var err error targetPrimary, err := s.ts.GetTablet(ctx, target.PrimaryAlias) if err != nil { return vterrors.Wrapf(err, "GetTablet(%v) failed", target.PrimaryAlias) } - query := fmt.Sprintf("select id from _vt.vreplication where db_name=%s and workflow=%s", encodeString(targetPrimary.DbName()), encodeString(mz.ms.Workflow)) - if qrproto, err = s.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { - return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) + res, err := s.tmc.ReadVReplicationWorkflow(ctx, targetPrimary.Tablet, &tabletmanagerdatapb.ReadVReplicationWorkflowRequest{ + Workflow: mz.ms.Workflow, + }) + if err != nil { + return vterrors.Wrapf(err, "failed to read vreplication workflow on %+v", targetPrimary.Tablet) } - qr := sqltypes.Proto3ToResult(qrproto) - for i := 0; i < len(qr.Rows); i++ { - id, err = qr.Rows[i][0].ToCastInt64() - if err != nil { - return err - } + for _, stream := range res.Streams { mu.Lock() - shardTablets = append(shardTablets, fmt.Sprintf("%s:%d", target.ShardName(), id)) + shardTablets = append(shardTablets, fmt.Sprintf("%s:%d", target.ShardName(), stream.Id)) mu.Unlock() } return nil @@ -2453,6 +2593,7 @@ func (s *Server) buildTrafficSwitcher(ctx context.Context, targetKeyspace, workf optTabletTypes: optTabletTypes, workflowType: tgtInfo.WorkflowType, workflowSubType: tgtInfo.WorkflowSubType, + options: tgtInfo.Options, } log.Infof("Migration ID for workflow %s: %d", workflowName, ts.id) sourceTopo := s.ts @@ -2529,7 +2670,7 @@ func (s *Server) buildTrafficSwitcher(ctx context.Context, targetKeyspace, workf if err != nil { return nil, err } - ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace) + ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace, s.env.Parser()) if err != nil { return nil, err } @@ -2557,6 +2698,9 @@ func (s *Server) dropRelatedArtifacts(ctx context.Context, keepRoutingRules bool if err := sw.deleteShardRoutingRules(ctx); err != nil { return err } + if err := sw.deleteKeyspaceRoutingRules(ctx); err != nil { + return err + } } return nil @@ -2642,6 +2786,10 @@ func (s *Server) dropArtifacts(ctx context.Context, keepRoutingRules bool, sw is if err := sw.deleteShardRoutingRules(ctx); err != nil { return err } + if err := sw.deleteKeyspaceRoutingRules(ctx); err != nil { + return err + } + } return nil @@ -2709,7 +2857,7 @@ func (s *Server) DeleteShard(ctx context.Context, keyspace, shard string, recurs // GetTabletMap ignores ErrNoNode, and it's good for // our purpose, it means a tablet was deleted but is // still referenced. - tabletMap, err := s.ts.GetTabletMap(ctx, aliases) + tabletMap, err := s.ts.GetTabletMap(ctx, aliases, nil) if err != nil { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "GetTabletMap() failed: %v", err) } @@ -2762,7 +2910,8 @@ func (s *Server) DeleteShard(ctx context.Context, keyspace, shard string, recurs // updateShardRecords updates the shard records based on 'from' or 'to' direction. func (s *Server) updateShardRecords(ctx context.Context, keyspace string, shards []*topo.ShardInfo, cells []string, - servedType topodatapb.TabletType, isFrom bool, clearSourceShards bool, logger logutil.Logger) (err error) { + servedType topodatapb.TabletType, isFrom bool, clearSourceShards bool, logger logutil.Logger, +) (err error) { return topotools.UpdateShardRecords(ctx, s.ts, s.tmc, keyspace, shards, cells, servedType, isFrom, clearSourceShards, logger) } @@ -2871,8 +3020,14 @@ func (s *Server) WorkflowSwitchTraffic(ctx context.Context, req *vtctldatapb.Wor if err != nil { return nil, err } + if ts.IsMultiTenantMigration() { + // In a multi-tenant migration, multiple migrations would be writing to the same table, so we can't stop writes like + // we do with MoveTables, using denied tables, since it would block all other migrations as well as traffic for + // tenants which have already been migrated. + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cannot reverse traffic for multi-tenant migrations") + } } - reason, err := s.canSwitch(ctx, ts, startState, direction, int64(maxReplicationLagAllowed.Seconds())) + reason, err := s.canSwitch(ctx, ts, startState, direction, int64(maxReplicationLagAllowed.Seconds()), req.Shards) if err != nil { return nil, err } @@ -2884,7 +3039,9 @@ func (s *Server) WorkflowSwitchTraffic(ctx context.Context, req *vtctldatapb.Wor return nil, err } if hasReplica || hasRdonly { - if rdDryRunResults, err = s.switchReads(ctx, req, ts, startState, timeout, false, direction); err != nil { + // If we're going to switch writes immediately after then we don't need to + // rebuild the SrvVSchema here as we will do it after switching writes. + if rdDryRunResults, err = s.switchReads(ctx, req, ts, startState, !hasPrimary /* rebuildSrvVSchema */, direction); err != nil { return nil, err } log.Infof("Switch Reads done for workflow %s.%s", req.Keyspace, req.Workflow) @@ -2933,12 +3090,13 @@ func (s *Server) WorkflowSwitchTraffic(ctx context.Context, req *vtctldatapb.Wor } else { resp.CurrentState = currentState.String() } + log.Infof("SwitchTraffic done for workflow %s.%s, returning response %v", req.Keyspace, req.Workflow, resp) } return resp, nil } // switchReads is a generic way of switching read traffic for a workflow. -func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, state *State, timeout time.Duration, cancel bool, direction TrafficSwitchDirection) (*[]string, error) { +func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, state *State, rebuildSrvVSchema bool, direction TrafficSwitchDirection) (*[]string, error) { var roTabletTypes []topodatapb.TabletType // When we are switching all traffic we also get the primary tablet type, which we need to // filter out for switching reads. @@ -2959,17 +3117,42 @@ func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitc } } + cellsStr := strings.Join(req.Cells, ",") + // Consistently handle errors by logging and returning them. handleError := func(message string, err error) (*[]string, error) { - ts.Logger().Error(err) - return nil, err + werr := vterrors.Wrapf(err, message) + ts.Logger().Error(werr) + return nil, werr } - log.Infof("Switching reads: %s.%s tablet types: %s, cells: %s, workflow state: %s", ts.targetKeyspace, ts.workflow, roTypesToSwitchStr, ts.optCells, state.String()) + log.Infof("Switching reads: %s.%s tablet types: %s, cells: %s, workflow state: %s", ts.targetKeyspace, ts.workflow, roTypesToSwitchStr, cellsStr, state.String()) if !switchReplica && !switchRdonly { return handleError("invalid tablet types", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "tablet types must be REPLICA or RDONLY: %s", roTypesToSwitchStr)) } - if !ts.isPartialMigration { // shard level traffic switching is all or nothing + // For partial (shard-by-shard migrations) or multi-tenant migrations, traffic for all tablet types + // is expected to be switched at once. For other MoveTables migrations where we use table routing rules + // replica/rdonly traffic can be switched first and then primary traffic can be switched later. + trafficSwitchingIsAllOrNothing := false + switch { + case ts.isPartialMigration: + // shard level traffic switching is all or nothing + trafficSwitchingIsAllOrNothing = true + case ts.MigrationType() == binlogdatapb.MigrationType_TABLES && ts.IsMultiTenantMigration(): + if direction == DirectionBackward { + return handleError("invalid request", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "requesting reversal of read traffic for multi-tenant migrations is not supported")) + } + // For multi-tenant migrations, we only support switching traffic to all cells at once + allCells, err := ts.TopoServer().GetCellInfoNames(ctx) + if err != nil { + return nil, err + } + if len(req.GetCells()) != 0 && len(req.GetCells()) != len(allCells) { + return handleError("invalid request", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "requesting read traffic for multi-tenant migrations must include all cells")) + } + } + + if !trafficSwitchingIsAllOrNothing { if direction == DirectionBackward && switchReplica && len(state.ReplicaCellsSwitched) == 0 { return handleError("invalid request", vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "requesting reversal of read traffic for REPLICAs but REPLICA reads have not been switched")) } @@ -2977,11 +3160,6 @@ func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitc return handleError("invalid request", vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "requesting reversal of SwitchReads for RDONLYs but RDONLY reads have not been switched")) } } - var cells = req.Cells - // If no cells were provided in the command then use the value from the workflow. - if len(cells) == 0 && ts.optCells != "" { - cells = strings.Split(strings.TrimSpace(ts.optCells), ",") - } // If there are no rdonly tablets in the cells ask to switch rdonly tablets as well so that routing rules // are updated for rdonly as well. Otherwise vitess will not know that the workflow has completed and will @@ -2989,7 +3167,7 @@ func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitc // rdonly tablets. if switchReplica && !switchRdonly { var err error - rdonlyTabletsExist, err := topotools.DoCellsHaveRdonlyTablets(ctx, s.ts, cells) + rdonlyTabletsExist, err := topotools.DoCellsHaveRdonlyTablets(ctx, s.ts, req.Cells) if err != nil { return nil, err } @@ -3025,22 +3203,32 @@ func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitc defer unlock(&err) if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { - if ts.isPartialMigration { + switch { + case ts.IsMultiTenantMigration(): + err := sw.switchKeyspaceReads(ctx, roTabletTypes) + if err != nil { + return handleError(fmt.Sprintf("failed to switch read traffic, from source keyspace %s to target keyspace %s, workflow %s", + ts.SourceKeyspaceName(), ts.TargetKeyspaceName(), ts.WorkflowName()), err) + } + case ts.isPartialMigration: ts.Logger().Infof("Partial migration, skipping switchTableReads as traffic is all or nothing per shard and overridden for reads AND writes in the ShardRoutingRule created when switching writes.") - } else if err := sw.switchTableReads(ctx, cells, roTabletTypes, direction); err != nil { - return handleError("failed to switch read traffic for the tables", err) + default: + err := sw.switchTableReads(ctx, req.Cells, roTabletTypes, rebuildSrvVSchema, direction) + if err != nil { + return handleError("failed to switch read traffic for the tables", err) + } } return sw.logs(), nil } - ts.Logger().Infof("About to switchShardReads: %+v, %+s, %+v", cells, roTypesToSwitchStr, direction) - if err := sw.switchShardReads(ctx, cells, roTabletTypes, direction); err != nil { + ts.Logger().Infof("About to switchShardReads: cells: %s, tablet types: %s, direction: %d", cellsStr, roTypesToSwitchStr, direction) + if err := sw.switchShardReads(ctx, req.Cells, roTabletTypes, direction); err != nil { return handleError("failed to switch read traffic for the shards", err) } - ts.Logger().Infof("switchShardReads Completed: %+v, %+s, %+v", cells, roTypesToSwitchStr, direction) - if err := s.ts.ValidateSrvKeyspace(ctx, ts.targetKeyspace, strings.Join(cells, ",")); err != nil { + ts.Logger().Infof("switchShardReads Completed: cells: %s, tablet types: %s, direction: %d", cellsStr, roTypesToSwitchStr, direction) + if err := s.ts.ValidateSrvKeyspace(ctx, ts.targetKeyspace, cellsStr); err != nil { err2 := vterrors.Wrapf(err, "after switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", - ts.targetKeyspace, strings.Join(cells, ",")) + ts.targetKeyspace, cellsStr) return handleError("failed to validate SrvKeyspace record", err2) } return sw.logs(), nil @@ -3048,8 +3236,8 @@ func (s *Server) switchReads(ctx context.Context, req *vtctldatapb.WorkflowSwitc // switchWrites is a generic way of migrating write traffic for a workflow. func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwitchTrafficRequest, ts *trafficSwitcher, timeout time.Duration, - cancel bool) (journalID int64, dryRunResults *[]string, err error) { - + cancel bool, +) (journalID int64, dryRunResults *[]string, err error) { var sw iswitcher if req.DryRun { sw = &switcherDryRun{ts: ts, drLog: NewLogRecorder()} @@ -3059,7 +3247,7 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit // Consistently handle errors by logging and returning them. handleError := func(message string, err error) (int64, *[]string, error) { - werr := vterrors.Errorf(vtrpcpb.Code_INTERNAL, fmt.Sprintf("%s: %v", message, err)) + werr := vterrors.Wrapf(err, message) ts.Logger().Error(werr) return 0, nil, werr } @@ -3117,7 +3305,7 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit } if !journalsExist { ts.Logger().Infof("No previous journals were found. Proceeding normally.") - sm, err := BuildStreamMigrator(ctx, ts, cancel) + sm, err := BuildStreamMigrator(ctx, ts, cancel, s.env.Parser()) if err != nil { return handleError("failed to migrate the workflow streams", err) } @@ -3126,8 +3314,27 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit return 0, sw.logs(), nil } + // We stop writes on the source before stopping the source streams so that the catchup time + // is lessened and other workflows that we have to migrate such as intra-keyspace materialize + // workflows also have a chance to catch up as well because those are internally generated + // GTIDs within the shards we're switching traffic away from. + // For intra-keyspace materialization streams that we migrate where the source and target are + // the keyspace being resharded, we wait for those to catchup in the stopStreams path before + // we actually stop them. + ts.Logger().Infof("Stopping source writes") + if err := sw.stopSourceWrites(ctx); err != nil { + sw.cancelMigration(ctx, sm) + return handleError(fmt.Sprintf("failed to stop writes in the %s keyspace", ts.SourceKeyspaceName()), err) + } + ts.Logger().Infof("Stopping streams") - sourceWorkflows, err = sw.stopStreams(ctx, sm) + // Use a shorter context for this since since when doing a Reshard, if there are intra-keyspace + // materializations then we have to wait for them to catchup before switching traffic for the + // Reshard workflow. We use the the same timeout value here that is used for VReplication catchup + // with the inter-keyspace workflows. + stopCtx, stopCancel := context.WithTimeout(ctx, timeout) + defer stopCancel() + sourceWorkflows, err = sw.stopStreams(stopCtx, sm) if err != nil { for key, streams := range sm.Streams() { for _, stream := range streams { @@ -3135,13 +3342,7 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit } } sw.cancelMigration(ctx, sm) - return handleError("failed to stop the workflow streams", err) - } - - ts.Logger().Infof("Stopping source writes") - if err := sw.stopSourceWrites(ctx); err != nil { - sw.cancelMigration(ctx, sm) - return handleError(fmt.Sprintf("failed to stop writes in the %s keyspace", ts.SourceKeyspaceName()), err) + return handleError(fmt.Sprintf("failed to stop the workflow streams in the %s keyspace", ts.SourceKeyspaceName()), err) } if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { @@ -3182,6 +3383,20 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit sw.cancelMigration(ctx, sm) return handleError("failed to create the reverse vreplication streams", err) } + + // Initialize any target sequences, if there are any, before allowing new writes. + if req.InitializeTargetSequences && len(sequenceMetadata) > 0 { + ts.Logger().Infof("Initializing target sequences") + // Writes are blocked so we can safely initialize the sequence tables but + // we also want to use a shorter timeout than the parent context. + // We use at most half of the overall timeout. + initSeqCtx, cancel := context.WithTimeout(ctx, timeout/2) + defer cancel() + if err := sw.initializeTargetSequences(initSeqCtx, sequenceMetadata); err != nil { + sw.cancelMigration(ctx, sm) + return handleError(fmt.Sprintf("failed to initialize the sequences used in the %s keyspace", ts.TargetKeyspaceName()), err) + } + } } else { if cancel { return handleError("invalid cancel", vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "traffic switching has reached the point of no return, cannot cancel")) @@ -3198,17 +3413,6 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit if err := sw.createJournals(ctx, sourceWorkflows); err != nil { return handleError("failed to create the journal", err) } - // Initialize any target sequences, if there are any, before allowing new writes. - if req.InitializeTargetSequences && len(sequenceMetadata) > 0 { - // Writes are blocked so we can safely initialize the sequence tables but - // we also want to use a shorter timeout than the parent context. - // We use up at most half of the overall timeout. - initSeqCtx, cancel := context.WithTimeout(ctx, timeout/2) - defer cancel() - if err := sw.initializeTargetSequences(initSeqCtx, sequenceMetadata); err != nil { - return handleError(fmt.Sprintf("failed to initialize the sequences used in the %s keyspace", ts.TargetKeyspaceName()), err) - } - } if err := sw.allowTargetWrites(ctx); err != nil { return handleError(fmt.Sprintf("failed to allow writes in the %s keyspace", ts.TargetKeyspaceName()), err) } @@ -3231,13 +3435,14 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit return ts.id, sw.logs(), nil } -func (s *Server) canSwitch(ctx context.Context, ts *trafficSwitcher, state *State, direction TrafficSwitchDirection, maxAllowedReplLagSecs int64) (reason string, err error) { +func (s *Server) canSwitch(ctx context.Context, ts *trafficSwitcher, state *State, direction TrafficSwitchDirection, + maxAllowedReplLagSecs int64, shards []string) (reason string, err error) { if direction == DirectionForward && state.WritesSwitched || direction == DirectionBackward && !state.WritesSwitched { log.Infof("writes already switched no need to check lag") return "", nil } - wf, err := s.GetWorkflow(ctx, state.TargetKeyspace, state.Workflow, false) + wf, err := s.GetWorkflow(ctx, state.TargetKeyspace, state.Workflow, false, shards) if err != nil { return "", err } @@ -3402,8 +3607,8 @@ func (s *Server) applySQLShard(ctx context.Context, tabletInfo *topo.TabletInfo, // fillStringTemplate returns the string template filled. func fillStringTemplate(tmpl string, vars any) (string, error) { myTemplate := template.Must(template.New("").Parse(tmpl)) - data := new(bytes.Buffer) - if err := myTemplate.Execute(data, vars); err != nil { + var data strings.Builder + if err := myTemplate.Execute(&data, vars); err != nil { return "", err } return data.String(), nil @@ -3447,11 +3652,14 @@ func (s *Server) prepareCreateLookup(ctx context.Context, workflow, keyspace str if !strings.Contains(vindex.Type, "lookup") { return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex %s is not a lookup type", vindex.Type) } - targetKeyspace, targetTableName, err = sqlparser.ParseTable(vindex.Params["table"]) + targetKeyspace, targetTableName, err = s.env.Parser().ParseTable(vindex.Params["table"]) if err != nil || targetKeyspace == "" { return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex table name (%s) must be in the form .", vindex.Params["table"]) } vindexFromCols = strings.Split(vindex.Params["from"], ",") + for i, col := range vindexFromCols { + vindexFromCols[i] = strings.TrimSpace(col) + } if strings.Contains(vindex.Type, "unique") { if len(vindexFromCols) != 1 { return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unique vindex 'from' should have only one column") @@ -3776,3 +3984,27 @@ func (s *Server) MigrateCreate(ctx context.Context, req *vtctldatapb.MigrateCrea } return s.moveTablesCreate(ctx, moveTablesCreateRequest, binlogdatapb.VReplicationWorkflowType_Migrate) } + +// getWorkflowStatus gets the overall status of the workflow by checking the status of all the streams. If all streams are not +// in the same state, it returns the unknown state. +func (s *Server) getWorkflowStatus(ctx context.Context, keyspace string, workflow string) (binlogdatapb.VReplicationWorkflowState, error) { + workflowStatus := binlogdatapb.VReplicationWorkflowState_Unknown + wf, err := s.GetWorkflow(ctx, keyspace, workflow, false, nil) + if err != nil { + return workflowStatus, err + } + for _, shardStream := range wf.GetShardStreams() { + for _, stream := range shardStream.GetStreams() { + state, ok := binlogdatapb.VReplicationWorkflowState_value[stream.State] + if !ok { + return workflowStatus, fmt.Errorf("invalid state for stream %s of workflow %s.%s", stream.State, keyspace, workflow) + } + currentStatus := binlogdatapb.VReplicationWorkflowState(state) + if workflowStatus != binlogdatapb.VReplicationWorkflowState_Unknown && currentStatus != workflowStatus { + return binlogdatapb.VReplicationWorkflowState_Unknown, nil + } + workflowStatus = currentStatus + } + } + return workflowStatus, nil +} diff --git a/go/vt/vtctl/workflow/server_test.go b/go/vt/vtctl/workflow/server_test.go index 85c60336351..174cc2aaf6a 100644 --- a/go/vt/vtctl/workflow/server_test.go +++ b/go/vt/vtctl/workflow/server_test.go @@ -27,12 +27,15 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) type fakeTMC struct { @@ -130,7 +133,6 @@ func TestCheckReshardingJournalExistsOnTablet(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -142,7 +144,7 @@ func TestCheckReshardingJournalExistsOnTablet(t *testing.T) { }, } - ws := NewServer(nil, tmc) + ws := NewServer(vtenv.NewTestEnv(), nil, tmc) journal, exists, err := ws.CheckReshardingJournalExistsOnTablet(ctx, tt.tablet, 1) if tt.shouldErr { assert.Error(t, err) @@ -163,3 +165,37 @@ func TestCheckReshardingJournalExistsOnTablet(t *testing.T) { }) } } + +// TestVDiffCreate performs some basic tests of the VDiffCreate function +// to ensure that it behaves as expected given a specific request. +func TestVDiffCreate(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer(ctx, "cell") + tmc := &fakeTMC{} + s := NewServer(vtenv.NewTestEnv(), ts, tmc) + + tests := []struct { + name string + req *vtctldatapb.VDiffCreateRequest + wantErr string + }{ + { + name: "no values", + req: &vtctldatapb.VDiffCreateRequest{}, + // We did not provide any keyspace or shard. + wantErr: "FindAllShardsInKeyspace() invalid keyspace name: UnescapeID err: invalid input identifier ''", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := s.VDiffCreate(ctx, tt.req) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + return + } + require.NoError(t, err) + require.NotNil(t, got) + require.NotEmpty(t, got.UUID) + }) + } +} diff --git a/go/vt/vtctl/workflow/stream_migrator.go b/go/vt/vtctl/workflow/stream_migrator.go index 75d509614b7..b294ba1fcd0 100644 --- a/go/vt/vtctl/workflow/stream_migrator.go +++ b/go/vt/vtctl/workflow/stream_migrator.go @@ -18,29 +18,44 @@ package workflow import ( "context" + "errors" "fmt" + "sort" "strings" "sync" "text/template" + "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" "google.golang.org/protobuf/encoding/prototext" "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +/* + This file contains code that is specific to VReplication Reshard + workflows -- which require migrating the *other* VReplication + workflows (aside from the Reshard workflow itself) that exist in + the keyspace from one set of shards to another when switching traffic. +*/ + // StreamType is an enum representing the kind of stream. // // (TODO:@ajm188) This should be made package-private once the last references @@ -54,21 +69,25 @@ const ( StreamTypeReference ) -// StreamMigrator contains information needed to migrate a stream +// StreamMigrator contains information needed to migrate VReplication +// streams during Reshard workflows when the keyspace's VReplication +// workflows need to be migrated from one set of shards to another. type StreamMigrator struct { streams map[string][]*VReplicationStream workflows []string templates []*VReplicationStream ts ITrafficSwitcher logger logutil.Logger + parser *sqlparser.Parser } // BuildStreamMigrator creates a new StreamMigrator based on the given // TrafficSwitcher. -func BuildStreamMigrator(ctx context.Context, ts ITrafficSwitcher, cancelMigrate bool) (*StreamMigrator, error) { +func BuildStreamMigrator(ctx context.Context, ts ITrafficSwitcher, cancelMigrate bool, parser *sqlparser.Parser) (*StreamMigrator, error) { sm := &StreamMigrator{ ts: ts, logger: ts.Logger(), + parser: parser, } if sm.ts.MigrationType() == binlogdatapb.MigrationType_TABLES { @@ -97,6 +116,42 @@ func BuildStreamMigrator(ctx context.Context, ts ITrafficSwitcher, cancelMigrate return sm, nil } +// BuildLegacyStreamMigrator creates a new StreamMigrator based on the given +// TrafficSwitcher using the legacy VReplicationExec method. +// Note: this should be removed along with the vtctl client code / wrangler. +func BuildLegacyStreamMigrator(ctx context.Context, ts ITrafficSwitcher, cancelMigrate bool, parser *sqlparser.Parser) (*StreamMigrator, error) { + sm := &StreamMigrator{ + ts: ts, + logger: ts.Logger(), + parser: parser, + } + + if sm.ts.MigrationType() == binlogdatapb.MigrationType_TABLES { + // Source streams should be stopped only for shard migrations. + return sm, nil + } + + var err error + + sm.streams, err = sm.legacyReadSourceStreams(ctx, cancelMigrate) + if err != nil { + return nil, err + } + + // Loop executes only once. + for _, tabletStreams := range sm.streams { + tmpl, err := sm.templatize(ctx, tabletStreams) + if err != nil { + return nil, err + } + + sm.workflows = VReplicationStreams(tmpl).Workflows() + break + } + + return sm, nil +} + // StreamMigratorFinalize finalizes the stream migration. // // (TODO:@ajm88) in the original implementation, "it's a standalone function @@ -147,21 +202,26 @@ func (sm *StreamMigrator) Templates() []*VReplicationStream { return VReplicationStreams(sm.templates).Copy().ToSlice() } -// CancelMigration cancels a migration -func (sm *StreamMigrator) CancelMigration(ctx context.Context) { +// CancelStreamMigrations cancels the stream migrations. +func (sm *StreamMigrator) CancelStreamMigrations(ctx context.Context) { if sm.streams == nil { return } _ = sm.deleteTargetStreams(ctx) + // Restart the source streams, but leave the Reshard workflow's reverse + // variant stopped. err := sm.ts.ForAllSources(func(source *MigrationSource) error { - query := fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos=null, message='' where db_name=%s and workflow != %s", encodeString(source.GetPrimary().DbName()), encodeString(sm.ts.ReverseWorkflowName())) + // We intend to update all but our workflow's reverse streams, so we + // indicate that it's safe in this case using the comment diretive. + query := fmt.Sprintf("update /*vt+ %s */ _vt.vreplication set state='Running', stop_pos=null, message='' where db_name=%s and workflow != %s", + vreplication.AllowUnsafeWriteCommentDirective, encodeString(source.GetPrimary().DbName()), encodeString(sm.ts.ReverseWorkflowName())) _, err := sm.ts.VReplicationExec(ctx, source.GetPrimary().Alias, query) return err }) if err != nil { - sm.logger.Errorf("Cancel migration failed: could not restart source streams: %v", err) + sm.logger.Errorf("Cancel stream migrations failed: could not restart source streams: %v", err) } } @@ -178,7 +238,26 @@ func (sm *StreamMigrator) MigrateStreams(ctx context.Context) error { return sm.createTargetStreams(ctx, sm.templates) } -// StopStreams stops streams +// LegacyStopStreams stops streams using the legacy VReplicationExec method. +// Note: this should be removed along with the vtctl client code / wrangler. +func (sm *StreamMigrator) LegacyStopStreams(ctx context.Context) ([]string, error) { + if sm.streams == nil { + return nil, nil + } + + if err := sm.legacyStopSourceStreams(ctx); err != nil { + return nil, err + } + + positions, err := sm.syncSourceStreams(ctx) + if err != nil { + return nil, err + } + + return sm.legacyVerifyStreamPositions(ctx, positions) +} + +// StopStreams stops streams. func (sm *StreamMigrator) StopStreams(ctx context.Context) ([]string, error) { if sm.streams == nil { return nil, nil @@ -198,7 +277,11 @@ func (sm *StreamMigrator) StopStreams(ctx context.Context) ([]string, error) { /* tablet streams */ -func (sm *StreamMigrator) readTabletStreams(ctx context.Context, ti *topo.TabletInfo, constraint string) ([]*VReplicationStream, error) { +// readTabletStreams reads all of the VReplication workflow streams *except* +// the Reshard workflow's reverse variant using the legacy VReplicationExec +// method. +// Note: this should be removed along with the vtctl client code / wrangler. +func (sm *StreamMigrator) legacyReadTabletStreams(ctx context.Context, ti *topo.TabletInfo, constraint string) ([]*VReplicationStream, error) { query := fmt.Sprintf("select id, workflow, source, pos, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where db_name=%s and workflow != %s", encodeString(ti.DbName()), encodeString(sm.ts.ReverseWorkflowName())) if constraint != "" { @@ -280,8 +363,174 @@ func (sm *StreamMigrator) readTabletStreams(ctx context.Context, ti *topo.Tablet return tabletStreams, nil } +// readTabletStreams reads all of the VReplication workflow streams *except* +// the Reshard workflow's reverse variant. +func (sm *StreamMigrator) readTabletStreams(ctx context.Context, ti *topo.TabletInfo, ids []int32, states []binlogdatapb.VReplicationWorkflowState, excludeFrozen bool) ([]*VReplicationStream, error) { + req := &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + ExcludeWorkflows: []string{sm.ts.ReverseWorkflowName()}, + IncludeIds: ids, + IncludeStates: states, + ExcludeFrozen: excludeFrozen, + } + + res, err := sm.ts.TabletManagerClient().ReadVReplicationWorkflows(ctx, ti.Tablet, req) + if err != nil { + return nil, err + } + + tabletStreams := make([]*VReplicationStream, 0, len(res.Workflows)) + + for _, workflow := range res.Workflows { + switch workflow.Workflow { + case "": + return nil, fmt.Errorf("VReplication streams must have named workflows for migration: shard: %s:%s", + ti.Keyspace, ti.Shard) + case sm.ts.WorkflowName(): + return nil, fmt.Errorf("VReplication stream has the same workflow name as the resharding workflow: shard: %s:%s", + ti.Keyspace, ti.Shard) + } + + for _, stream := range workflow.Streams { + isReference, err := sm.blsIsReference(stream.Bls) + if err != nil { + return nil, vterrors.Wrap(err, "blsIsReference") + } + + if isReference { + sm.ts.Logger().Infof("readTabletStreams: ignoring reference table %+v", stream.Bls) + continue + } + + pos, err := replication.DecodePosition(stream.Pos) + if err != nil { + return nil, err + } + + tabletStreams = append(tabletStreams, &VReplicationStream{ + ID: stream.Id, + Workflow: workflow.Workflow, + BinlogSource: stream.Bls, + Position: pos, + WorkflowType: workflow.WorkflowType, + WorkflowSubType: workflow.WorkflowSubType, + DeferSecondaryKeys: workflow.DeferSecondaryKeys, + }) + } + } + return tabletStreams, nil +} + /* source streams */ +// legacyReadSourceStreams reads all of the VReplication workflow source streams using +// the legacy VReplicationExec method. +// Note: this should be removed along with the vtctl client code / wrangler. +func (sm *StreamMigrator) legacyReadSourceStreams(ctx context.Context, cancelMigrate bool) (map[string][]*VReplicationStream, error) { + var ( + mu sync.Mutex + streams = make(map[string][]*VReplicationStream) + ) + + err := sm.ts.ForAllSources(func(source *MigrationSource) error { + if !cancelMigrate { + // This flow protects us from the following scenario: When we create streams, + // we always do it in two phases. We start them off as Stopped, and then + // update them to Running. If such an operation fails, we may be left with + // lingering Stopped streams. They should actually be cleaned up by the user. + // In the current workflow, we stop streams and restart them. + // Once existing streams are stopped, there will be confusion about which of + // them can be restarted because they will be no different from the lingering streams. + // To prevent this confusion, we first check if there are any stopped streams. + // If so, we request the operator to clean them up, or restart them before going ahead. + // This allows us to assume that all stopped streams can be safely restarted + // if we cancel the operation. + stoppedStreams, err := sm.legacyReadTabletStreams(ctx, source.GetPrimary(), "state = 'Stopped' and message != 'FROZEN'") + if err != nil { + return err + } + + if len(stoppedStreams) != 0 { + return fmt.Errorf("cannot migrate until all streams are running: %s: %d", source.GetShard().ShardName(), source.GetPrimary().Alias.Uid) + } + } + + tabletStreams, err := sm.legacyReadTabletStreams(ctx, source.GetPrimary(), "") + if err != nil { + return err + } + + if len(tabletStreams) == 0 { + // No VReplication is running. So, we have no work to do. + return nil + } + + query := fmt.Sprintf("select distinct vrepl_id from _vt.copy_state where vrepl_id in %s", VReplicationStreams(tabletStreams).Values()) + p3qr, err := sm.ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, query) + switch { + case err != nil: + return err + case len(p3qr.Rows) != 0: + return fmt.Errorf("cannot migrate while vreplication streams in source shards are still copying: %s", source.GetShard().ShardName()) + } + + mu.Lock() + defer mu.Unlock() + streams[source.GetShard().ShardName()] = tabletStreams + return nil + }) + + if err != nil { + return nil, err + } + + // Validate that streams match across source shards. + var ( + reference []*VReplicationStream + refshard string + streams2 = make(map[string][]*VReplicationStream) + ) + + for k, v := range streams { + if reference == nil { + refshard = k + reference = v + continue + } + + streams2[k] = append([]*VReplicationStream(nil), v...) + } + + for shard, tabletStreams := range streams2 { + for _, refStream := range reference { + err := func() error { + for i := 0; i < len(tabletStreams); i++ { + vrs := tabletStreams[i] + + if refStream.Workflow == vrs.Workflow && + refStream.BinlogSource.Keyspace == vrs.BinlogSource.Keyspace && + refStream.BinlogSource.Shard == vrs.BinlogSource.Shard { + // Delete the matched item and scan for the next stream. + tabletStreams = append(tabletStreams[:i], tabletStreams[i+1:]...) + return nil + } + } + + return fmt.Errorf("streams are mismatched across source shards: %s vs %s", refshard, shard) + }() + + if err != nil { + return nil, err + } + } + + if len(tabletStreams) != 0 { + return nil, fmt.Errorf("streams are mismatched across source shards: %s vs %s", refshard, shard) + } + } + + return streams, nil +} + func (sm *StreamMigrator) readSourceStreams(ctx context.Context, cancelMigrate bool) (map[string][]*VReplicationStream, error) { var ( mu sync.Mutex @@ -301,7 +550,8 @@ func (sm *StreamMigrator) readSourceStreams(ctx context.Context, cancelMigrate b // If so, we request the operator to clean them up, or restart them before going ahead. // This allows us to assume that all stopped streams can be safely restarted // if we cancel the operation. - stoppedStreams, err := sm.readTabletStreams(ctx, source.GetPrimary(), "state = 'Stopped' and message != 'FROZEN'") + stoppedStreams, err := sm.readTabletStreams(ctx, source.GetPrimary(), nil, + []binlogdatapb.VReplicationWorkflowState{binlogdatapb.VReplicationWorkflowState_Stopped}, true) if err != nil { return err } @@ -311,7 +561,7 @@ func (sm *StreamMigrator) readSourceStreams(ctx context.Context, cancelMigrate b } } - tabletStreams, err := sm.readTabletStreams(ctx, source.GetPrimary(), "") + tabletStreams, err := sm.readTabletStreams(ctx, source.GetPrimary(), nil, nil, false) if err != nil { return err } @@ -388,7 +638,10 @@ func (sm *StreamMigrator) readSourceStreams(ctx context.Context, cancelMigrate b return streams, nil } -func (sm *StreamMigrator) stopSourceStreams(ctx context.Context) error { +// legacyStopSourceStreams stops the source streams using the legacy VReplicationExec +// method. +// Note: this should be removed along with the vtctl client code / wrangler. +func (sm *StreamMigrator) legacyStopSourceStreams(ctx context.Context) error { var ( mu sync.Mutex stoppedStreams = make(map[string][]*VReplicationStream) @@ -406,7 +659,97 @@ func (sm *StreamMigrator) stopSourceStreams(ctx context.Context) error { return err } - tabletStreams, err = sm.readTabletStreams(ctx, source.GetPrimary(), fmt.Sprintf("id in %s", VReplicationStreams(tabletStreams).Values())) + tabletStreams, err = sm.legacyReadTabletStreams(ctx, source.GetPrimary(), fmt.Sprintf("id in %s", VReplicationStreams(tabletStreams).Values())) + if err != nil { + return err + } + + mu.Lock() + defer mu.Unlock() + stoppedStreams[source.GetShard().ShardName()] = tabletStreams + + return nil + }) + + if err != nil { + return err + } + + sm.streams = stoppedStreams + return nil +} + +func (sm *StreamMigrator) stopSourceStreams(ctx context.Context) error { + var ( + mu sync.Mutex + stoppedStreams = make(map[string][]*VReplicationStream) + ) + + err := sm.ts.ForAllSources(func(source *MigrationSource) error { + shard := source.GetShard().ShardName() + tabletStreams := sm.streams[shard] + if len(tabletStreams) == 0 { + return nil + } + + // For intra-keyspace materialize workflows where the source and target are both + // the keyspace that is being resharded, we need to wait for those to catchup as + // well. New writes have already been blocked on the source, but the materialization + // workflow(s) still need to catchup with writes that happened just before writes + // were stopped on the source. + eg, egCtx := errgroup.WithContext(ctx) + for _, vrs := range tabletStreams { + if vrs.WorkflowType == binlogdatapb.VReplicationWorkflowType_Materialize && vrs.BinlogSource.Keyspace == sm.ts.TargetKeyspaceName() { + if vrs.BinlogSource == nil { // Should never happen + return fmt.Errorf("no binlog source is defined for materialization workflow %s", vrs.Workflow) + } + eg.Go(func() error { + sourceTablet := source.primary.Tablet.CloneVT() + if sourceTablet.Shard != vrs.BinlogSource.Shard { + si, err := sm.ts.TopoServer().GetTabletMapForShard(egCtx, vrs.BinlogSource.GetKeyspace(), vrs.BinlogSource.GetShard()) + if err != nil { + return err + } + for _, tablet := range si { + if tablet.GetType() == topodatapb.TabletType_PRIMARY { + sourceTablet = tablet.CloneVT() + break + } + } + } + if sourceTablet == nil { + return fmt.Errorf("no primary tablet found for materialization workflow %s and its stream from the binary log source %s/%s", + vrs.Workflow, vrs.BinlogSource.GetKeyspace(), vrs.BinlogSource.GetShard()) + } + pos, err := sm.ts.TabletManagerClient().PrimaryPosition(egCtx, sourceTablet) + if err != nil { + return err + } + sm.ts.Logger().Infof("Waiting for intra-keyspace materialization workflow %s on %v/%v to reach position %v for stream source from %s/%s, starting from position %s on tablet %s", + vrs.Workflow, source.primary.Keyspace, source.primary.Shard, pos, vrs.BinlogSource.Keyspace, vrs.BinlogSource.Shard, vrs.Position, topoproto.TabletAliasString(source.primary.Tablet.Alias)) + if err := sm.ts.TabletManagerClient().VReplicationWaitForPos(egCtx, source.primary.Tablet, vrs.ID, pos); err != nil { + return err + } + return nil + }) + } + } + if err := eg.Wait(); err != nil { + var xtra string + if errors.Is(err, context.DeadlineExceeded) { + xtra = " (increase the --timeout value if needed)" + } + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "error waiting for intra-keyspace materialization workflow %s to catch up%s: %v", + tabletStreams[0].Workflow, xtra, err) + } + + query := fmt.Sprintf("update _vt.vreplication set state='Stopped', message='for cutover' where id in %s", VReplicationStreams(tabletStreams).Values()) + _, err := sm.ts.TabletManagerClient().VReplicationExec(ctx, source.GetPrimary().Tablet, query) + if err != nil { + return err + } + + tabletStreams, err = sm.readTabletStreams(ctx, source.GetPrimary(), VReplicationStreams(tabletStreams).IDs(), nil, false) if err != nil { return err } @@ -470,8 +813,19 @@ func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]rep allErrors.RecordError(err) return } - - query := fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for cutover' where id=%d", replication.EncodePosition(pos), vrs.ID) + comment := "" + if vrs.WorkflowType == binlogdatapb.VReplicationWorkflowType_Materialize && vrs.BinlogSource.Keyspace == sm.ts.TargetKeyspaceName() { + // For intra-keyspace materializations in a keyspace that's being + // resharded, we don't have serving tablets on the workflow's current + // target side. So we instruct the VReplication engine and controller + // on the target tablets to include non-serving tablets in their + // search for source tablets to stream from as we migrate and setup + // these intra-keyspace materializations on the current target side + // that we're preparing to switch traffic to. + comment = fmt.Sprintf("/*vt+ %s=1 */ ", vreplication.IncludeNonServingTabletsCommentDirective) + } + query := fmt.Sprintf("update %s_vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for cutover' where id=%d", + comment, replication.EncodePosition(pos), vrs.ID) if _, err := sm.ts.TabletManagerClient().VReplicationExec(ctx, primary.Tablet, query); err != nil { allErrors.RecordError(err) return @@ -493,6 +847,72 @@ func (sm *StreamMigrator) syncSourceStreams(ctx context.Context) (map[string]rep return stopPositions, allErrors.AggrError(vterrors.Aggregate) } +// legacyVerifyStreamPositions verifies the stream positions using the legacy +// VReplicationExec method. +// Note: this should be removed along with the vtctl client code / wrangler. +func (sm *StreamMigrator) legacyVerifyStreamPositions(ctx context.Context, stopPositions map[string]replication.Position) ([]string, error) { + var ( + mu sync.Mutex + stoppedStreams = make(map[string][]*VReplicationStream) + ) + + err := sm.ts.ForAllSources(func(source *MigrationSource) error { + tabletStreams := sm.streams[source.GetShard().ShardName()] + if len(tabletStreams) == 0 { + return nil + } + + tabletStreams, err := sm.legacyReadTabletStreams(ctx, source.GetPrimary(), fmt.Sprintf("id in %s", VReplicationStreams(tabletStreams).Values())) + if err != nil { + return err + } + + mu.Lock() + defer mu.Unlock() + stoppedStreams[source.GetShard().ShardName()] = tabletStreams + + return nil + }) + + if err != nil { + return nil, err + } + + // This is not really required because it's not used later. + // But we keep it up-to-date for good measure. + sm.streams = stoppedStreams + + var ( + oneSet []*VReplicationStream + allErrors concurrency.AllErrorRecorder + ) + + for _, tabletStreams := range stoppedStreams { + if oneSet == nil { + oneSet = tabletStreams + } + + for _, vrs := range tabletStreams { + key := fmt.Sprintf("%s:%s", vrs.BinlogSource.Keyspace, vrs.BinlogSource.Shard) + if pos := stopPositions[key]; !vrs.Position.Equal(pos) { + allErrors.RecordError(fmt.Errorf("%s: stream %d position: %s does not match %s", key, vrs.ID, replication.EncodePosition(vrs.Position), replication.EncodePosition(pos))) + } + } + } + + if allErrors.HasErrors() { + return nil, allErrors.AggrError(vterrors.Aggregate) + } + + sm.templates, err = sm.templatize(ctx, oneSet) + if err != nil { + // Unreachable: we've already templatized this before. + return nil, err + } + + return VReplicationStreams(sm.templates).Workflows(), allErrors.AggrError(vterrors.Aggregate) +} + func (sm *StreamMigrator) verifyStreamPositions(ctx context.Context, stopPositions map[string]replication.Position) ([]string, error) { var ( mu sync.Mutex @@ -505,7 +925,7 @@ func (sm *StreamMigrator) verifyStreamPositions(ctx context.Context, stopPositio return nil } - tabletStreams, err := sm.readTabletStreams(ctx, source.GetPrimary(), fmt.Sprintf("id in %s", VReplicationStreams(tabletStreams).Values())) + tabletStreams, err := sm.readTabletStreams(ctx, source.GetPrimary(), VReplicationStreams(tabletStreams).IDs(), nil, false) if err != nil { return err } @@ -566,13 +986,14 @@ func (sm *StreamMigrator) createTargetStreams(ctx context.Context, tmpl []*VRepl return sm.ts.ForAllTargets(func(target *MigrationTarget) error { ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, target.GetPrimary().DbName()) tabletStreams := VReplicationStreams(tmpl).Copy().ToSlice() + var err error - for _, vrs := range tabletStreams { + addStreamRow := func(vrs *VReplicationStream) error { for _, rule := range vrs.BinlogSource.Filter.Rules { buf := &strings.Builder{} t := template.Must(template.New("").Parse(rule.Filter)) - if err := t.Execute(buf, key.KeyRangeString(target.GetShard().KeyRange)); err != nil { + if err := t.Execute(buf, key.KeyRangeString(target.GetShard().GetKeyRange())); err != nil { return err } @@ -581,9 +1002,53 @@ func (sm *StreamMigrator) createTargetStreams(ctx context.Context, tmpl []*VRepl ig.AddRow(vrs.Workflow, vrs.BinlogSource, replication.EncodePosition(vrs.Position), "", "", vrs.WorkflowType, vrs.WorkflowSubType, vrs.DeferSecondaryKeys) + return nil } - _, err := sm.ts.VReplicationExec(ctx, target.GetPrimary().GetAlias(), ig.String()) + var intraKeyspaceStreams map[string]bool + + for _, vrs := range tabletStreams { + // If we have an intra-keyspace materialization workflow, we need to + // create the streams from each target shard to each target shard + // rather than simply copying the streams from the source shards. + if vrs.WorkflowType == binlogdatapb.VReplicationWorkflowType_Materialize && vrs.BinlogSource.Keyspace == sm.ts.TargetKeyspaceName() { + if intraKeyspaceStreams == nil { + intraKeyspaceStreams = make(map[string]bool) + } + targets := maps.Values(sm.ts.Targets()) + sort.Slice(targets, func(i, j int) bool { + return key.KeyRangeLess(targets[i].GetShard().GetKeyRange(), targets[j].GetShard().GetKeyRange()) + }) + for _, st := range targets { + stream := *vrs // Copy + stream.BinlogSource.Shard = st.GetShard().ShardName() + key := fmt.Sprintf("%s:%s/%s:%s/%s", stream.Workflow, target.si.Keyspace(), target.GetShard().ShardName(), st.GetShard().Keyspace(), st.GetShard().ShardName()) + if intraKeyspaceStreams[key] { + continue // We've already created the stream. + } + pos, err := sm.ts.TabletManagerClient().PrimaryPosition(ctx, st.primary.Tablet) + if err != nil { + return err + } + sm.ts.Logger().Infof("Setting position for intra-keyspace materialization workflow %s on %v/%v to %v on tablet %s", + stream.Workflow, st.primary.Keyspace, st.primary.Shard, pos, topoproto.TabletAliasString(st.primary.Tablet.Alias)) + stream.Position, err = binlogplayer.DecodePosition(pos) + if err != nil { + return err + } + intraKeyspaceStreams[key] = true + if err := addStreamRow(&stream); err != nil { + return err + } + } + continue + } + if err := addStreamRow(vrs); err != nil { + return err + } + } + + _, err = sm.ts.VReplicationExec(ctx, target.GetPrimary().GetAlias(), ig.String()) return err }) } @@ -674,7 +1139,7 @@ func (sm *StreamMigrator) templatizeRule(ctx context.Context, rule *binlogdatapb } func (sm *StreamMigrator) templatizeKeyRange(ctx context.Context, rule *binlogdatapb.Rule) error { - statement, err := sqlparser.Parse(rule.Filter) + statement, err := sm.parser.Parse(rule.Filter) if err != nil { return err } @@ -696,7 +1161,7 @@ func (sm *StreamMigrator) templatizeKeyRange(ctx context.Context, rule *binlogda continue } - var krExpr sqlparser.SelectExpr + var krExpr sqlparser.Expr switch len(funcExpr.Exprs) { case 1: krExpr = funcExpr.Exprs[0] @@ -706,12 +1171,7 @@ func (sm *StreamMigrator) templatizeKeyRange(ctx context.Context, rule *binlogda return fmt.Errorf("unexpected in_keyrange parameters: %v", sqlparser.String(funcExpr)) } - aliased, ok := krExpr.(*sqlparser.AliasedExpr) - if !ok { - return fmt.Errorf("unexpected in_keyrange parameters: %v", sqlparser.String(funcExpr)) - } - - val, ok := aliased.Expr.(*sqlparser.Literal) + val, ok := krExpr.(*sqlparser.Literal) if !ok { return fmt.Errorf("unexpected in_keyrange parameters: %v", sqlparser.String(funcExpr)) } @@ -729,10 +1189,10 @@ func (sm *StreamMigrator) templatizeKeyRange(ctx context.Context, rule *binlogda vtable := sm.ts.SourceKeyspaceSchema().Tables[rule.Match] inkr := &sqlparser.FuncExpr{ Name: sqlparser.NewIdentifierCI("in_keyrange"), - Exprs: sqlparser.SelectExprs{ - &sqlparser.AliasedExpr{Expr: &sqlparser.ColName{Name: vtable.ColumnVindexes[0].Columns[0]}}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vtable.ColumnVindexes[0].Type)}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("{{.}}")}, + Exprs: sqlparser.Exprs{ + &sqlparser.ColName{Name: vtable.ColumnVindexes[0].Columns[0]}, + sqlparser.NewStrLiteral(vtable.ColumnVindexes[0].Type), + sqlparser.NewStrLiteral("{{.}}"), }, } sel.AddWhere(inkr) diff --git a/go/vt/vtctl/workflow/stream_migrator_test.go b/go/vt/vtctl/workflow/stream_migrator_test.go index 04f787eb4d4..38ae10280f7 100644 --- a/go/vt/vtctl/workflow/stream_migrator_test.go +++ b/go/vt/vtctl/workflow/stream_migrator_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -304,7 +306,7 @@ func TestTemplatize(t *testing.T) { }, }, } - ksschema, err := vindexes.BuildKeyspaceSchema(vs, "ks") + ksschema, err := vindexes.BuildKeyspaceSchema(vs, "ks", sqlparser.NewTestParser()) require.NoError(t, err, "could not create test keyspace %+v", vs) ts := &testTrafficSwitcher{ diff --git a/go/vt/vtctl/workflow/switcher.go b/go/vt/vtctl/workflow/switcher.go index 0cbdce164dc..aa41655aab8 100644 --- a/go/vt/vtctl/workflow/switcher.go +++ b/go/vt/vtctl/workflow/switcher.go @@ -42,6 +42,10 @@ func (r *switcher) deleteShardRoutingRules(ctx context.Context) error { return r.ts.deleteShardRoutingRules(ctx) } +func (r *switcher) deleteKeyspaceRoutingRules(ctx context.Context) error { + return r.ts.deleteKeyspaceRoutingRules(ctx) +} + func (r *switcher) dropSourceDeniedTables(ctx context.Context) error { return r.ts.dropSourceDeniedTables(ctx) } @@ -62,12 +66,20 @@ func (r *switcher) dropSourceShards(ctx context.Context) error { return r.ts.dropSourceShards(ctx) } +func (r *switcher) switchKeyspaceReads(ctx context.Context, servedTypes []topodatapb.TabletType) error { + if err := changeKeyspaceRouting(ctx, r.ts.TopoServer(), servedTypes, + r.ts.SourceKeyspaceName() /* from */, r.ts.TargetKeyspaceName() /* to */, "SwitchReads"); err != nil { + return err + } + return nil +} + func (r *switcher) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { return r.ts.switchShardReads(ctx, cells, servedTypes, direction) } -func (r *switcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { - return r.ts.switchTableReads(ctx, cells, servedTypes, direction) +func (r *switcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, rebuildSrvVSchema bool, direction TrafficSwitchDirection) error { + return r.ts.switchTableReads(ctx, cells, servedTypes, rebuildSrvVSchema, direction) } func (r *switcher) startReverseVReplication(ctx context.Context) error { diff --git a/go/vt/vtctl/workflow/switcher_dry_run.go b/go/vt/vtctl/workflow/switcher_dry_run.go index 1c8a05e00c2..03faa4c4ca2 100644 --- a/go/vt/vtctl/workflow/switcher_dry_run.go +++ b/go/vt/vtctl/workflow/switcher_dry_run.go @@ -24,7 +24,8 @@ import ( "strings" "time" - "vitess.io/vitess/go/maps2" + "golang.org/x/exp/maps" + "vitess.io/vitess/go/mysql/replication" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -55,6 +56,23 @@ func (dr *switcherDryRun) deleteShardRoutingRules(ctx context.Context) error { return nil } +func (dr *switcherDryRun) deleteKeyspaceRoutingRules(ctx context.Context) error { + if dr.ts.IsMultiTenantMigration() { + dr.drLog.Log("Keyspace routing rules will be deleted") + } + return nil +} + +func (dr *switcherDryRun) switchKeyspaceReads(ctx context.Context, types []topodatapb.TabletType) error { + var tabletTypes []string + for _, servedType := range types { + tabletTypes = append(tabletTypes, servedType.String()) + } + dr.drLog.Logf("Switch reads from keyspace %s to keyspace %s for tablet types [%s]", + dr.ts.SourceKeyspaceName(), dr.ts.TargetKeyspaceName(), strings.Join(tabletTypes, ",")) + return nil +} + func (dr *switcherDryRun) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { sourceShards := make([]string, 0) targetShards := make([]string, 0) @@ -76,7 +94,7 @@ func (dr *switcherDryRun) switchShardReads(ctx context.Context, cells []string, return nil } -func (dr *switcherDryRun) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { +func (dr *switcherDryRun) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, rebuildSrvVSchema bool, direction TrafficSwitchDirection) error { ks := dr.ts.TargetKeyspaceName() if direction == DirectionBackward { ks = dr.ts.SourceKeyspaceName() @@ -88,6 +106,9 @@ func (dr *switcherDryRun) switchTableReads(ctx context.Context, cells []string, tables := strings.Join(dr.ts.Tables(), ",") dr.drLog.Logf("Switch reads for tables [%s] to keyspace %s for tablet types [%s]", tables, ks, strings.Join(tabletTypes, ",")) dr.drLog.Logf("Routing rules for tables [%s] will be updated", tables) + if rebuildSrvVSchema { + dr.drLog.Logf("Serving VSchema will be rebuilt for the %s keyspace", ks) + } return nil } @@ -214,7 +235,7 @@ func (dr *switcherDryRun) stopStreams(ctx context.Context, sm *StreamMigrator) ( } func (dr *switcherDryRun) cancelMigration(ctx context.Context, sm *StreamMigrator) { - dr.drLog.Log("Cancel stream migrations as requested") + dr.drLog.Log("Cancel migration as requested") } func (dr *switcherDryRun) lockKeyspace(ctx context.Context, keyspace, _ string) (context.Context, func(*error), error) { @@ -380,7 +401,7 @@ func (dr *switcherDryRun) resetSequences(ctx context.Context) error { } func (dr *switcherDryRun) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { - sortedBackingTableNames := maps2.Keys(sequencesByBackingTable) + sortedBackingTableNames := maps.Keys(sequencesByBackingTable) slices.Sort(sortedBackingTableNames) dr.drLog.Log(fmt.Sprintf("The following sequence backing tables used by tables being moved will be initialized: %s", strings.Join(sortedBackingTableNames, ","))) diff --git a/go/vt/vtctl/workflow/switcher_interface.go b/go/vt/vtctl/workflow/switcher_interface.go index 8d0f9e847be..0780aaf484c 100644 --- a/go/vt/vtctl/workflow/switcher_interface.go +++ b/go/vt/vtctl/workflow/switcher_interface.go @@ -36,7 +36,8 @@ type iswitcher interface { changeRouting(ctx context.Context) error streamMigraterfinalize(ctx context.Context, ts *trafficSwitcher, workflows []string) error startReverseVReplication(ctx context.Context) error - switchTableReads(ctx context.Context, cells []string, servedType []topodatapb.TabletType, direction TrafficSwitchDirection) error + switchKeyspaceReads(ctx context.Context, types []topodatapb.TabletType) error + switchTableReads(ctx context.Context, cells []string, servedType []topodatapb.TabletType, rebuildSrvVSchema bool, direction TrafficSwitchDirection) error switchShardReads(ctx context.Context, cells []string, servedType []topodatapb.TabletType, direction TrafficSwitchDirection) error validateWorkflowHasCompleted(ctx context.Context) error removeSourceTables(ctx context.Context, removalType TableRemovalType) error @@ -50,6 +51,7 @@ type iswitcher interface { dropTargetShards(ctx context.Context) error deleteRoutingRules(ctx context.Context) error deleteShardRoutingRules(ctx context.Context) error + deleteKeyspaceRoutingRules(ctx context.Context) error addParticipatingTablesToKeyspace(ctx context.Context, keyspace, tableSpecs string) error resetSequences(ctx context.Context) error initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go index 35f1d1b966b..9ea1c8b609b 100644 --- a/go/vt/vtctl/workflow/traffic_switcher.go +++ b/go/vt/vtctl/workflow/traffic_switcher.go @@ -18,6 +18,7 @@ package workflow import ( "context" + "encoding/json" "errors" "fmt" "sort" @@ -25,10 +26,12 @@ import ( "sync" "time" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" + + "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" "vitess.io/vitess/go/json2" - "vitess.io/vitess/go/maps2" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -38,6 +41,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -173,6 +177,7 @@ type TargetInfo struct { OptTabletTypes string WorkflowType binlogdatapb.VReplicationWorkflowType WorkflowSubType binlogdatapb.VReplicationWorkflowSubType + Options *vtctldatapb.WorkflowOptions } // MigrationSource contains the metadata for each migration source. @@ -233,6 +238,7 @@ type trafficSwitcher struct { targetTimeZone string workflowType binlogdatapb.VReplicationWorkflowType workflowSubType binlogdatapb.VReplicationWorkflowSubType + options *vtctldatapb.WorkflowOptions } func (ts *trafficSwitcher) TopoServer() *topo.Server { return ts.ws.ts } @@ -380,9 +386,6 @@ func (ts *trafficSwitcher) addParticipatingTablesToKeyspace(ctx context.Context, if err := json2.Unmarshal([]byte(wrap), ks); err != nil { return err } - if err != nil { - return err - } for table, vtab := range ks.Tables { vschema.Tables[table] = vtab } @@ -436,6 +439,21 @@ func (ts *trafficSwitcher) deleteShardRoutingRules(ctx context.Context) error { return nil } +func (ts *trafficSwitcher) deleteKeyspaceRoutingRules(ctx context.Context) error { + if !ts.IsMultiTenantMigration() { + return nil + } + log.Infof("deleteKeyspaceRoutingRules: workflow %s.%s", ts.targetKeyspace, ts.workflow) + reason := fmt.Sprintf("Deleting rules for %s", ts.SourceKeyspaceName()) + return topotools.UpdateKeyspaceRoutingRules(ctx, ts.TopoServer(), reason, + func(ctx context.Context, rules *map[string]string) error { + for _, suffix := range tabletTypeSuffixes { + delete(*rules, ts.SourceKeyspaceName()+suffix) + } + return nil + }) +} + func (ts *trafficSwitcher) dropSourceDeniedTables(ctx context.Context) error { return ts.ForAllSources(func(source *MigrationSource) error { if _, err := ts.TopoServer().UpdateShardFields(ctx, ts.SourceKeyspaceName(), source.GetShard().ShardName(), func(si *topo.ShardInfo) error { @@ -490,26 +508,33 @@ func (ts *trafficSwitcher) dropParticipatingTablesFromKeyspace(ctx context.Conte func (ts *trafficSwitcher) removeSourceTables(ctx context.Context, removalType TableRemovalType) error { err := ts.ForAllSources(func(source *MigrationSource) error { for _, tableName := range ts.Tables() { - query := fmt.Sprintf("drop table %s.%s", - sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), - sqlescape.EscapeID(sqlescape.UnescapeID(tableName))) + primaryDbName, err := sqlescape.EnsureEscaped(source.GetPrimary().DbName()) + if err != nil { + return err + } + tableNameEscaped, err := sqlescape.EnsureEscaped(tableName) + if err != nil { + return err + } + + query := fmt.Sprintf("drop table %s.%s", primaryDbName, tableNameEscaped) if removalType == DropTable { ts.Logger().Infof("%s: Dropping table %s.%s\n", source.GetPrimary().String(), source.GetPrimary().DbName(), tableName) } else { - renameName := getRenameFileName(tableName) + renameName, err := sqlescape.EnsureEscaped(getRenameFileName(tableName)) + if err != nil { + return err + } ts.Logger().Infof("%s: Renaming table %s.%s to %s.%s\n", source.GetPrimary().String(), source.GetPrimary().DbName(), tableName, source.GetPrimary().DbName(), renameName) - query = fmt.Sprintf("rename table %s.%s TO %s.%s", - sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), - sqlescape.EscapeID(sqlescape.UnescapeID(tableName)), - sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), - sqlescape.EscapeID(sqlescape.UnescapeID(renameName))) + query = fmt.Sprintf("rename table %s.%s TO %s.%s", primaryDbName, tableNameEscaped, primaryDbName, renameName) } - _, err := ts.ws.tmc.ExecuteFetchAsDba(ctx, source.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ - Query: []byte(query), - MaxRows: 1, - ReloadSchema: true, + _, err = ts.ws.tmc.ExecuteFetchAsDba(ctx, source.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: 1, + ReloadSchema: true, + DisableForeignKeyChecks: true, }) if err != nil { ts.Logger().Errorf("%s: Error removing table %s: %v", source.GetPrimary().String(), tableName, err) @@ -542,15 +567,12 @@ func (ts *trafficSwitcher) dropSourceShards(ctx context.Context) error { } func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { - var fromShards, toShards []*topo.ShardInfo - if direction == DirectionForward { - fromShards, toShards = ts.SourceShards(), ts.TargetShards() - } else { - fromShards, toShards = ts.TargetShards(), ts.SourceShards() - } - if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), strings.Join(cells, ",")); err != nil { + cellsStr := strings.Join(cells, ",") + log.Infof("switchShardReads: cells: %s, tablet types: %+v, direction %d", cellsStr, servedTypes, direction) + fromShards, toShards := ts.SourceShards(), ts.TargetShards() + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), cellsStr); err != nil { err2 := vterrors.Wrapf(err, "Before switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", - ts.TargetKeyspaceName(), strings.Join(cells, ",")) + ts.TargetKeyspaceName(), cellsStr) log.Errorf("%w", err2) return err2 } @@ -566,17 +588,17 @@ func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, return err } } - if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), strings.Join(cells, ",")); err != nil { + if err := ts.TopoServer().ValidateSrvKeyspace(ctx, ts.TargetKeyspaceName(), cellsStr); err != nil { err2 := vterrors.Wrapf(err, "after switching shard reads, found SrvKeyspace for %s is corrupt in cell %s", - ts.TargetKeyspaceName(), strings.Join(cells, ",")) + ts.TargetKeyspaceName(), cellsStr) log.Errorf("%w", err2) return err2 } return nil } -func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, direction TrafficSwitchDirection) error { - log.Infof("switchTableReads: servedTypes: %+v, direction %t", servedTypes, direction) +func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, servedTypes []topodatapb.TabletType, rebuildSrvVSchema bool, direction TrafficSwitchDirection) error { + log.Infof("switchTableReads: cells: %s, tablet types: %+v, direction %d", strings.Join(cells, ","), servedTypes, direction) rules, err := topotools.GetRoutingRules(ctx, ts.TopoServer()) if err != nil { return err @@ -607,12 +629,16 @@ func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, if err := topotools.SaveRoutingRules(ctx, ts.TopoServer(), rules); err != nil { return err } - return ts.TopoServer().RebuildSrvVSchema(ctx, cells) + if rebuildSrvVSchema { + return ts.TopoServer().RebuildSrvVSchema(ctx, cells) + } + return nil } func (ts *trafficSwitcher) startReverseVReplication(ctx context.Context) error { return ts.ForAllSources(func(source *MigrationSource) error { - query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s", encodeString(source.GetPrimary().DbName())) + query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", + encodeString(source.GetPrimary().DbName()), encodeString(ts.ReverseWorkflowName())) _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, query) return err }) @@ -705,7 +731,14 @@ func (ts *trafficSwitcher) changeRouting(ctx context.Context) error { } func (ts *trafficSwitcher) changeWriteRoute(ctx context.Context) error { - if ts.isPartialMigration { + if ts.IsMultiTenantMigration() { + // For multi-tenant migrations, we can only move forward and not backwards. + ts.Logger().Infof("Pointing keyspace routing rules for primary to %s for workflow %s", ts.TargetKeyspaceName(), ts.workflow) + if err := changeKeyspaceRouting(ctx, ts.TopoServer(), []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, + ts.SourceKeyspaceName() /* from */, ts.TargetKeyspaceName() /* to */, "SwitchWrites"); err != nil { + return err + } + } else if ts.isPartialMigration { srr, err := topotools.GetShardRoutingRules(ctx, ts.TopoServer()) if err != nil { return err @@ -779,7 +812,7 @@ func (ts *trafficSwitcher) changeShardRouting(ctx context.Context) error { return nil } -func (ts *trafficSwitcher) getReverseVReplicationUpdateQuery(targetCell string, sourceCell string, dbname string) string { +func (ts *trafficSwitcher) getReverseVReplicationUpdateQuery(targetCell string, sourceCell string, dbname string, options string) string { // we try to be clever to understand what user intends: // if target's cell is present in cells but not source's cell we replace it // with the source's cell. @@ -789,8 +822,8 @@ func (ts *trafficSwitcher) getReverseVReplicationUpdateQuery(targetCell string, } if ts.optCells != "" || ts.optTabletTypes != "" { - query := fmt.Sprintf("update _vt.vreplication set cell = '%s', tablet_types = '%s' where workflow = '%s' and db_name = '%s'", - ts.optCells, ts.optTabletTypes, ts.ReverseWorkflowName(), dbname) + query := fmt.Sprintf("update _vt.vreplication set cell = '%s', tablet_types = '%s', options = '%s' where workflow = '%s' and db_name = '%s'", + ts.optCells, ts.optTabletTypes, options, ts.ReverseWorkflowName(), dbname) return query } return "" @@ -843,7 +876,7 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error SourceTimeZone: bls.TargetTimeZone, TargetTimeZone: bls.SourceTimeZone, } - + var err error for _, rule := range bls.Filter.Rules { if rule.Filter == "exclude" { reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, rule) @@ -880,6 +913,12 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error } } filter = fmt.Sprintf("select * from %s%s", sqlescape.EscapeID(rule.Match), inKeyrange) + if ts.IsMultiTenantMigration() { + filter, err = ts.addTenantFilter(ctx, filter) + if err != nil { + return err + } + } } reverseBls.Filter.Rules = append(reverseBls.Filter.Rules, &binlogdatapb.Rule{ Match: rule.Match, @@ -888,7 +927,7 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error } log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s", source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position) - _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, + _, err = ts.VReplicationExec(ctx, source.GetPrimary().Alias, binlogplayer.CreateVReplicationState(ts.ReverseWorkflowName(), reverseBls, target.Position, binlogdatapb.VReplicationWorkflowState_Stopped, source.GetPrimary().DbName(), ts.workflowType, ts.workflowSubType)) if err != nil { @@ -896,7 +935,12 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error } // if user has defined the cell/tablet_types parameters in the forward workflow, update the reverse workflow as well - updateQuery := ts.getReverseVReplicationUpdateQuery(target.GetPrimary().Alias.Cell, source.GetPrimary().Alias.Cell, source.GetPrimary().DbName()) + optionsJSON, err := json.Marshal(ts.options) + if err != nil { + return err + } + updateQuery := ts.getReverseVReplicationUpdateQuery(target.GetPrimary().Alias.Cell, + source.GetPrimary().Alias.Cell, source.GetPrimary().DbName(), string(optionsJSON)) if updateQuery != "" { log.Infof("Updating vreplication stream entry on %s with: %s", source.GetPrimary().Alias, updateQuery) _, err = ts.VReplicationExec(ctx, source.GetPrimary().Alias, updateQuery) @@ -907,6 +951,33 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error return err } +func (ts *trafficSwitcher) addTenantFilter(ctx context.Context, filter string) (string, error) { + parser := ts.ws.env.Parser() + vschema, err := ts.TopoServer().GetVSchema(ctx, ts.targetKeyspace) + if err != nil { + return "", err + } + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ts.targetKeyspace, parser) + if err != nil { + return "", err + } + tenantClause, err := getTenantClause(ts.options, targetVSchema, parser) + if err != nil { + return "", err + } + stmt, err := parser.Parse(filter) + if err != nil { + return "", err + } + sel, ok := stmt.(*sqlparser.Select) + if !ok { + return "", fmt.Errorf("unrecognized statement: %s", filter) + } + addFilter(sel, *tenantClause) + filter = sqlparser.String(sel) + return filter, nil +} + func (ts *trafficSwitcher) waitForCatchup(ctx context.Context, filteredReplicationWaitTime time.Duration) error { ctx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) defer cancel() @@ -933,7 +1004,7 @@ func (ts *trafficSwitcher) waitForCatchup(ctx context.Context, filteredReplicati }); err != nil { return err } - // all targets have caught up, record their positions for setting up reverse workflows + // All targets have caught up, record their positions for setting up reverse workflows. return ts.ForAllTargets(func(target *MigrationTarget) error { var err error target.Position, err = ts.TabletManagerClient().PrimaryPosition(ctx, target.GetPrimary().Tablet) @@ -999,10 +1070,10 @@ func (ts *trafficSwitcher) cancelMigration(ctx context.Context, sm *StreamMigrat err = ts.changeShardsAccess(ctx, ts.SourceKeyspaceName(), ts.SourceShards(), allowWrites) } if err != nil { - ts.Logger().Errorf("Cancel migration failed:", err) + ts.Logger().Errorf("Cancel migration failed: %v", err) } - sm.CancelMigration(ctx) + sm.CancelStreamMigrations(ctx) err = ts.ForAllTargets(func(target *MigrationTarget) error { query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", @@ -1063,22 +1134,27 @@ func (ts *trafficSwitcher) dropSourceReverseVReplicationStreams(ctx context.Cont } func (ts *trafficSwitcher) removeTargetTables(ctx context.Context) error { - log.Flush() err := ts.ForAllTargets(func(target *MigrationTarget) error { log.Infof("ForAllTargets: %+v", target) for _, tableName := range ts.Tables() { - query := fmt.Sprintf("drop table %s.%s", - sqlescape.EscapeID(sqlescape.UnescapeID(target.GetPrimary().DbName())), - sqlescape.EscapeID(sqlescape.UnescapeID(tableName))) + primaryDbName, err := sqlescape.EnsureEscaped(target.GetPrimary().DbName()) + if err != nil { + return err + } + tableName, err := sqlescape.EnsureEscaped(tableName) + if err != nil { + return err + } + query := fmt.Sprintf("drop table %s.%s", primaryDbName, tableName) ts.Logger().Infof("%s: Dropping table %s.%s\n", target.GetPrimary().String(), target.GetPrimary().DbName(), tableName) res, err := ts.ws.tmc.ExecuteFetchAsDba(ctx, target.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ - Query: []byte(query), - MaxRows: 1, - ReloadSchema: true, + Query: []byte(query), + MaxRows: 1, + ReloadSchema: true, + DisableForeignKeyChecks: true, }) log.Infof("Removed target table with result: %+v", res) - log.Flush() if err != nil { ts.Logger().Errorf("%s: Error removing table %s: %v", target.GetPrimary().String(), tableName, err) @@ -1112,7 +1188,8 @@ func (ts *trafficSwitcher) dropTargetShards(ctx context.Context) error { func (ts *trafficSwitcher) validate(ctx context.Context) error { if ts.MigrationType() == binlogdatapb.MigrationType_TABLES { - if ts.isPartialMigration { + if ts.isPartialMigration || + (ts.IsMultiTenantMigration() && len(ts.options.GetShards()) > 0) { return nil } sourceTopo := ts.ws.ts @@ -1349,7 +1426,7 @@ func (ts *trafficSwitcher) getTargetSequenceMetadata(ctx context.Context) (map[s // error if any is seen. func (ts *trafficSwitcher) findSequenceUsageInKeyspace(vschema *vschemapb.Keyspace) (map[string]*sequenceMetadata, bool, error) { allFullyQualified := true - targets := maps2.Values(ts.Targets()) + targets := maps.Values(ts.Targets()) if len(targets) == 0 || targets[0].GetPrimary() == nil { // This should never happen return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary tablet found for target keyspace %s", ts.targetKeyspace) } @@ -1358,7 +1435,7 @@ func (ts *trafficSwitcher) findSequenceUsageInKeyspace(vschema *vschemapb.Keyspa for _, table := range ts.Tables() { vs, ok := vschema.Tables[table] - if !ok || vs == nil || vs.AutoIncrement == nil || vs.AutoIncrement.Sequence == "" { + if !ok || vs.GetAutoIncrement().GetSequence() == "" { continue } sm := &sequenceMetadata{ @@ -1400,7 +1477,7 @@ func (ts *trafficSwitcher) findSequenceUsageInKeyspace(vschema *vschemapb.Keyspa // the primary tablet serving the sequence to refresh/reset its cache to // be sure that it does not provide a value that is less than the current max. func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { - initSequenceTable := func(ictx context.Context, sequenceTableName string, sequenceMetadata *sequenceMetadata) error { + initSequenceTable := func(ictx context.Context, sequenceMetadata *sequenceMetadata) error { // Now we need to run this query on the target shards in order // to get the max value and set the next id for the sequence to // a higher value. @@ -1422,13 +1499,17 @@ func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequen MaxRows: 1, }) if terr != nil || len(qr.Rows) != 1 { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", - ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s on tablet %s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, topoproto.TabletAliasString(primary.Alias), terr) } - maxID, terr := sqltypes.Proto3ToResult(qr).Rows[0][0].ToInt64() - if terr != nil { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", - ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + rawVal := sqltypes.Proto3ToResult(qr).Rows[0][0] + maxID := int64(0) + if !rawVal.IsNull() { // If it's NULL then there are no rows and 0 remains the max + maxID, terr = rawVal.ToInt64() + if terr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s on tablet %s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, topoproto.TabletAliasString(primary.Alias), terr) + } } srMu.Lock() defer srMu.Unlock() @@ -1443,6 +1524,10 @@ func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequen return ictx.Err() default: } + if len(shardResults) == 0 { // This should never happen + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "did not get any results for the max used sequence value for target table %s.%s in order to initialize the backing sequence table", + ts.targetKeyspace, sequenceMetadata.usingTableName) + } // Sort the values to find the max value across all shards. sort.Slice(shardResults, func(i, j int) bool { return shardResults[i] < shardResults[j] @@ -1477,12 +1562,7 @@ func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequen ) // Now execute this on the primary tablet of the unsharded keyspace // housing the backing table. - primaryTablet, ierr := ts.TopoServer().GetTablet(ictx, sequenceShard.PrimaryAlias) - if ierr != nil { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the primary tablet for %s.%s using alias %s: %v", - sequenceShard.Keyspace(), sequenceShard.ShardName(), sequenceShard.PrimaryAlias, ierr) - } - qr, ierr := ts.ws.tmc.ExecuteFetchAsApp(ictx, primaryTablet.Tablet, true, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ + qr, ierr := ts.ws.tmc.ExecuteFetchAsApp(ictx, sequenceTablet.Tablet, true, &tabletmanagerdatapb.ExecuteFetchAsAppRequest{ Query: []byte(query.Query), MaxRows: 1, }) @@ -1517,10 +1597,9 @@ func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequen } initGroup, gctx := errgroup.WithContext(ctx) - for sequenceTableName, sequenceMetadata := range sequencesByBackingTable { - sequenceTableName, sequenceMetadata := sequenceTableName, sequenceMetadata // https://golang.org/doc/faq#closures_and_goroutines + for _, sequenceMetadata := range sequencesByBackingTable { initGroup.Go(func() error { - return initSequenceTable(gctx, sequenceTableName, sequenceMetadata) + return initSequenceTable(gctx, sequenceMetadata) }) } return initGroup.Wait() @@ -1551,3 +1630,10 @@ func (ts *trafficSwitcher) resetSequences(ctx context.Context) error { return ts.TabletManagerClient().ResetSequences(ctx, source.GetPrimary().Tablet, ts.Tables()) }) } + +func (ts *trafficSwitcher) IsMultiTenantMigration() bool { + if ts.options != nil && ts.options.TenantId != "" { + return true + } + return false +} diff --git a/go/vt/vtctl/workflow/utils.go b/go/vt/vtctl/workflow/utils.go index 1a723c6192c..d4e8d7b4ec0 100644 --- a/go/vt/vtctl/workflow/utils.go +++ b/go/vt/vtctl/workflow/utils.go @@ -19,13 +19,19 @@ package workflow import ( "bytes" "context" + "encoding/json" "fmt" "hash/fnv" "math" "sort" + "strconv" "strings" "sync" + querypb "vitess.io/vitess/go/vt/proto/query" + + "vitess.io/vitess/go/vt/vtgate/vindexes" + "google.golang.org/protobuf/encoding/prototext" "vitess.io/vitess/go/sets" @@ -86,7 +92,7 @@ func getTablesInKeyspace(ctx context.Context, ts *topo.Server, tmc tmclient.Tabl // validateNewWorkflow ensures that the specified workflow doesn't already exist // in the keyspace. func validateNewWorkflow(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, keyspace, workflow string) error { - allshards, err := ts.FindAllShardsInKeyspace(ctx, keyspace) + allshards, err := ts.FindAllShardsInKeyspace(ctx, keyspace, nil) if err != nil { return err } @@ -106,25 +112,18 @@ func validateNewWorkflow(ctx context.Context, ts *topo.Server, tmc tmclient.Tabl allErrors.RecordError(vterrors.Wrap(err, "validateWorkflowName.GetTablet")) return } - validations := []struct { - query string - msg string - }{{ - fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and workflow=%s", encodeString(primary.DbName()), encodeString(workflow)), - fmt.Sprintf("workflow %s already exists in keyspace %s on tablet %v", workflow, keyspace, primary.Alias), - }, { - fmt.Sprintf("select 1 from _vt.vreplication where db_name=%s and message='FROZEN' and workflow_sub_type != %d", encodeString(primary.DbName()), binlogdatapb.VReplicationWorkflowSubType_Partial), - fmt.Sprintf("found previous frozen workflow on tablet %v, please review and delete it first before creating a new workflow", - primary.Alias), - }} - for _, validation := range validations { - p3qr, err := tmc.VReplicationExec(ctx, primary.Tablet, validation.query) - if err != nil { - allErrors.RecordError(vterrors.Wrap(err, "validateWorkflowName.VReplicationExec")) - return - } - if p3qr != nil && len(p3qr.Rows) != 0 { - allErrors.RecordError(vterrors.Wrap(fmt.Errorf(validation.msg), "validateWorkflowName.VReplicationExec")) + res, err := tmc.ReadVReplicationWorkflows(ctx, primary.Tablet, &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{}) + if err != nil { + allErrors.RecordError(vterrors.Wrap(err, "validateWorkflowName.ReadVReplicationWorkflows")) + return + } + if res == nil { + // There are no workflows on this tablet. + return + } + for _, wf := range res.Workflows { + if wf.Workflow == workflow { + allErrors.RecordError(fmt.Errorf("workflow %s already exists in keyspace %s on tablet %v", workflow, keyspace, primary.Alias)) return } } @@ -167,8 +166,8 @@ func createDefaultShardRoutingRules(ctx context.Context, ms *vtctldatapb.Materia return nil } -func stripTableConstraints(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableConstraints(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -189,8 +188,8 @@ func stripTableConstraints(ddl string) (string, error) { return newDDL, nil } -func stripTableForeignKeys(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableForeignKeys(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -218,6 +217,25 @@ func stripTableForeignKeys(ddl string) (string, error) { return newDDL, nil } +func stripAutoIncrement(ddl string, parser *sqlparser.Parser) (string, error) { + newDDL, err := parser.ParseStrictDDL(ddl) + if err != nil { + return "", err + } + + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.ColumnDefinition: + if node.Type.Options.Autoincrement { + node.Type.Options.Autoincrement = false + } + } + return true, nil + }, newDDL) + + return sqlparser.String(newDDL), nil +} + func getSourceTableDDLs(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, shards []*topo.ShardInfo) (map[string]string, error) { sourceDDLs := make(map[string]string) allTables := []string{"/.*/"} @@ -326,7 +344,7 @@ func getMigrationID(targetKeyspace string, shardTablets []string) (int64, error) // // It returns ErrNoStreams if there are no targets found for the workflow. func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string) (*TargetInfo, error) { - targetShards, err := ts.GetShardNames(ctx, targetKeyspace) + targetShards, err := ts.FindAllShardsInKeyspace(ctx, targetKeyspace, nil) if err != nil { return nil, err } @@ -338,24 +356,20 @@ func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManag targets = make(map[string]*MigrationTarget, len(targetShards)) workflowType binlogdatapb.VReplicationWorkflowType workflowSubType binlogdatapb.VReplicationWorkflowSubType + options vtctldatapb.WorkflowOptions ) // We check all shards in the target keyspace. Not all of them may have a // stream. For example, if we're splitting -80 to [-40,40-80], only those // two target shards will have vreplication streams, and the other shards in // the target keyspace will not. - for _, targetShard := range targetShards { - si, err := ts.GetShard(ctx, targetKeyspace, targetShard) - if err != nil { - return nil, err - } - - if si.PrimaryAlias == nil { + for targetShardName, targetShard := range targetShards { + if targetShard.PrimaryAlias == nil { // This can happen if bad inputs are given. return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "shard %v/%v doesn't have a primary set", targetKeyspace, targetShard) } - primary, err := ts.GetTablet(ctx, si.PrimaryAlias) + primary, err := ts.GetTablet(ctx, targetShard.PrimaryAlias) if err != nil { return nil, err } @@ -367,12 +381,12 @@ func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManag return nil, err } - if len(wf.Streams) < 1 { + if wf == nil || len(wf.Streams) < 1 { continue } target := &MigrationTarget{ - si: si, + si: targetShard, primary: primary, Sources: make(map[int32]*binlogdatapb.BinlogSource), } @@ -381,6 +395,13 @@ func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManag optTabletTypes = topoproto.MakeStringTypeCSV(wf.TabletTypes) workflowType = wf.WorkflowType workflowSubType = wf.WorkflowSubType + optionsJSON := wf.GetOptions() + if optionsJSON != "" { + if err := json.Unmarshal([]byte(optionsJSON), &options); err != nil { + log.Errorf("failed to unmarshal options: %v %s", err, optionsJSON) + return nil, err + } + } for _, stream := range wf.Streams { if stream.Message == Frozen { @@ -389,7 +410,7 @@ func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManag target.Sources[stream.Id] = stream.Bls } - targets[targetShard] = target + targets[targetShardName] = target } if len(targets) == 0 { @@ -403,6 +424,7 @@ func BuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManag OptTabletTypes: optTabletTypes, WorkflowType: workflowType, WorkflowSubType: workflowSubType, + Options: &options, }, nil } @@ -531,12 +553,20 @@ func doValidateWorkflowHasCompleted(ctx context.Context, ts *trafficSwitcher) er } else { _ = ts.ForAllTargets(func(target *MigrationTarget) error { wg.Add(1) - query := fmt.Sprintf("select 1 from _vt.vreplication where db_name='%s' and workflow='%s' and message!='FROZEN'", target.GetPrimary().DbName(), ts.WorkflowName()) - rs, _ := ts.VReplicationExec(ctx, target.GetPrimary().Alias, query) - if len(rs.Rows) > 0 { - rec.RecordError(fmt.Errorf("vreplication streams are not frozen on tablet %d", target.GetPrimary().Alias.Uid)) + defer wg.Done() + res, err := ts.ws.tmc.ReadVReplicationWorkflow(ctx, target.GetPrimary().Tablet, &tabletmanagerdatapb.ReadVReplicationWorkflowRequest{ + Workflow: ts.WorkflowName(), + }) + if err != nil { + rec.RecordError(err) + return nil + } + for _, stream := range res.Streams { + if stream.Message != Frozen { + rec.RecordError(fmt.Errorf("vreplication streams are not frozen on tablet %d", target.GetPrimary().Alias.Uid)) + return nil + } } - wg.Done() return nil }) } @@ -657,11 +687,8 @@ func areTabletsAvailableToStreamFrom(ctx context.Context, req *vtctldatapb.Workf // New callers should instead use the new BuildTargets function. // // It returns ErrNoStreams if there are no targets found for the workflow. -func LegacyBuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string) (*TargetInfo, error) { - targetShards, err := ts.GetShardNames(ctx, targetKeyspace) - if err != nil { - return nil, err - } +func LegacyBuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, targetKeyspace string, workflow string, + targetShards []string) (*TargetInfo, error) { var ( frozen bool @@ -766,3 +793,159 @@ func LegacyBuildTargets(ctx context.Context, ts *topo.Server, tmc tmclient.Table WorkflowSubType: workflowSubType, }, nil } + +func addFilter(sel *sqlparser.Select, filter sqlparser.Expr) { + if sel.Where != nil { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: &sqlparser.AndExpr{ + Left: filter, + Right: sel.Where.Expr, + }, + } + } else { + sel.Where = &sqlparser.Where{ + Type: sqlparser.WhereClause, + Expr: filter, + } + } +} + +func getTenantClause(vrOptions *vtctldatapb.WorkflowOptions, + targetVSchema *vindexes.KeyspaceSchema, parser *sqlparser.Parser) (*sqlparser.Expr, error) { + if vrOptions.TenantId == "" { + return nil, nil + } + if targetVSchema == nil || targetVSchema.MultiTenantSpec == nil { + return nil, fmt.Errorf("target keyspace not defined, or it does not have multi-tenant spec") + } + tenantColumnName := targetVSchema.MultiTenantSpec.TenantIdColumnName + tenantColumnType := targetVSchema.MultiTenantSpec.TenantIdColumnType + if tenantColumnName == "" { + return nil, fmt.Errorf("tenant column name not defined in multi-tenant spec") + } + + var tenantId string + switch tenantColumnType { + case querypb.Type_INT64: + _, err := strconv.Atoi(vrOptions.TenantId) + if err != nil { + return nil, fmt.Errorf("tenant id is not a valid int: %s", vrOptions.TenantId) + } + tenantId = vrOptions.TenantId + case querypb.Type_VARCHAR: + tenantId = fmt.Sprintf("'%s'", vrOptions.TenantId) + default: + return nil, fmt.Errorf("unsupported tenant column type: %s", tenantColumnType) + } + + stmt, err := parser.Parse(fmt.Sprintf("select * from t where %s = %s", tenantColumnName, tenantId)) + if err != nil { + return nil, err + } + sel, ok := stmt.(*sqlparser.Select) + if !ok { + return nil, fmt.Errorf("error getting select: %s", tenantId) + } + return &sel.Where.Expr, nil +} + +func changeKeyspaceRouting(ctx context.Context, ts *topo.Server, tabletTypes []topodatapb.TabletType, + sourceKeyspace, targetKeyspace, reason string) error { + routes := make(map[string]string) + for _, tabletType := range tabletTypes { + suffix := getTabletTypeSuffix(tabletType) + routes[sourceKeyspace+suffix] = targetKeyspace + } + if err := updateKeyspaceRoutingRules(ctx, ts, reason, routes); err != nil { + return err + } + return ts.RebuildSrvVSchema(ctx, nil) +} + +// updateKeyspaceRoutingRules updates the keyspace routing rules for the (effective) source +// keyspace to the target keyspace. +func updateKeyspaceRoutingRules(ctx context.Context, ts *topo.Server, reason string, routes map[string]string) error { + update := func() error { + return topotools.UpdateKeyspaceRoutingRules(ctx, ts, reason, + func(ctx context.Context, rules *map[string]string) error { + for fromKeyspace, toKeyspace := range routes { + (*rules)[fromKeyspace] = toKeyspace + } + return nil + }) + } + err := update() + if err == nil { + return nil + } + // If we were racing with another caller to create the initial routing rules, then + // we can immediately retry the operation. + if !topo.IsErrType(err, topo.NodeExists) { + return err + } + return update() +} + +func validateTenantId(dataType querypb.Type, value string) error { + switch dataType { + case querypb.Type_INT64: + _, err := strconv.Atoi(value) + if err != nil { + return fmt.Errorf("value %s is not a valid int", value) + } + case querypb.Type_VARCHAR: + // no validation needed + default: + return fmt.Errorf("unsupported data type: %s", dataType) + } + return nil +} + +func updateKeyspaceRoutingState(ctx context.Context, ts *topo.Server, sourceKeyspace, targetKeyspace string, state *State) error { + // For multi-tenant migrations, we only support switching traffic to all cells at once + cells, err := ts.GetCellInfoNames(ctx) + if err != nil { + return err + } + + rules, err := topotools.GetKeyspaceRoutingRules(ctx, ts) + if err != nil { + return err + } + hasSwitched := func(tabletTypePrefix string) bool { + ks, ok := rules[sourceKeyspace+tabletTypePrefix] + return ok && ks == targetKeyspace + } + rdonlySwitched := hasSwitched(rdonlyTabletSuffix) + replicaSwitched := hasSwitched(replicaTabletSuffix) + primarySwitched := hasSwitched(primaryTabletSuffix) + if rdonlySwitched { + state.RdonlyCellsSwitched = cells + state.RdonlyCellsNotSwitched = nil + } else { + state.RdonlyCellsNotSwitched = cells + state.RdonlyCellsSwitched = nil + } + if replicaSwitched { + state.ReplicaCellsSwitched = cells + state.ReplicaCellsNotSwitched = nil + } else { + state.ReplicaCellsNotSwitched = cells + state.ReplicaCellsSwitched = nil + } + state.WritesSwitched = primarySwitched + return nil +} + +func getTabletTypeSuffix(tabletType topodatapb.TabletType) string { + switch tabletType { + case topodatapb.TabletType_REPLICA: + return replicaTabletSuffix + case topodatapb.TabletType_RDONLY: + return rdonlyTabletSuffix + case topodatapb.TabletType_PRIMARY: + return primaryTabletSuffix + } + return "" +} diff --git a/go/vt/vtctl/workflow/utils_test.go b/go/vt/vtctl/workflow/utils_test.go new file mode 100644 index 00000000000..e63ae00aa19 --- /dev/null +++ b/go/vt/vtctl/workflow/utils_test.go @@ -0,0 +1,172 @@ +package workflow + +import ( + "context" + "fmt" + "math" + "math/rand" + "os" + "os/exec" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" + + "vitess.io/vitess/go/testfiles" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/etcd2topo" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/topotools" +) + +// TestUpdateKeyspaceRoutingRule confirms that the keyspace routing rules are updated correctly. +func TestUpdateKeyspaceRoutingRule(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + routes := make(map[string]string) + for _, tabletType := range tabletTypeSuffixes { + routes["from"+tabletType] = "to" + } + err := updateKeyspaceRoutingRules(ctx, ts, "test", routes) + require.NoError(t, err) + rules, err := topotools.GetKeyspaceRoutingRules(ctx, ts) + require.NoError(t, err) + require.EqualValues(t, routes, rules) +} + +// TestConcurrentKeyspaceRoutingRulesUpdates runs multiple keyspace routing rules updates concurrently to test +// the locking mechanism. +func TestConcurrentKeyspaceRoutingRulesUpdates(t *testing.T) { + if os.Getenv("GOCOVERDIR") != "" { + // While running this test in CI along with all other tests in for code coverage this test hangs very often. + // Possibly due to some resource constraints, since this test is one of the last. + // However just running this package by itself with code coverage works fine in CI. + t.Logf("Skipping TestConcurrentKeyspaceRoutingRulesUpdates test in code coverage mode") + t.Skip() + } + + ctx := context.Background() + + ts := memorytopo.NewServer(ctx, "zone1") + defer ts.Close() + t.Run("memtopo", func(t *testing.T) { + testConcurrentKeyspaceRoutingRulesUpdates(t, ctx, ts) + }) + + etcdServerAddress := startEtcd(t) + log.Infof("Successfully started etcd server at %s", etcdServerAddress) + topoName := "etcd2_test" // "etcd2" is already registered on init(), so using a different name + topo.RegisterFactory(topoName, etcd2topo.Factory{}) + ts, err := topo.OpenServer(topoName, etcdServerAddress, "/vitess") + require.NoError(t, err) + t.Run("etcd", func(t *testing.T) { + testConcurrentKeyspaceRoutingRulesUpdates(t, ctx, ts) + ts.Close() + }) +} + +func testConcurrentKeyspaceRoutingRulesUpdates(t *testing.T, ctx context.Context, ts *topo.Server) { + concurrency := 100 + duration := 10 * time.Second + + var wg sync.WaitGroup + wg.Add(concurrency) + + shortCtx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + log.Infof("Starting %d concurrent updates", concurrency) + for i := 0; i < concurrency; i++ { + go func(id int) { + defer wg.Done() + for { + select { + case <-shortCtx.Done(): + return + default: + update(t, ts, id) + } + } + }(i) + } + wg.Wait() + log.Infof("All updates completed") + rules, err := ts.GetKeyspaceRoutingRules(ctx) + require.NoError(t, err) + require.LessOrEqual(t, concurrency, len(rules.Rules)) +} + +func update(t *testing.T, ts *topo.Server, id int) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + s := fmt.Sprintf("%d_%d", id, rand.Intn(math.MaxInt)) + routes := make(map[string]string) + for _, tabletType := range tabletTypeSuffixes { + from := fmt.Sprintf("from%s%s", s, tabletType) + routes[from] = s + tabletType + } + err := updateKeyspaceRoutingRules(ctx, ts, "test", routes) + require.NoError(t, err) + got, err := topotools.GetKeyspaceRoutingRules(ctx, ts) + require.NoError(t, err) + for _, tabletType := range tabletTypeSuffixes { + from := fmt.Sprintf("from%s%s", s, tabletType) + require.Equal(t, s+tabletType, got[from]) + } +} + +// startEtcd starts an etcd subprocess, and waits for it to be ready. +func startEtcd(t *testing.T) string { + // Create a temporary directory. + dataDir := t.TempDir() + + // Get our two ports to listen to. + port := testfiles.GoVtTopoEtcd2topoPort + name := "vitess_unit_test" + clientAddr := fmt.Sprintf("http://localhost:%v", port) + peerAddr := fmt.Sprintf("http://localhost:%v", port+1) + initialCluster := fmt.Sprintf("%v=%v", name, peerAddr) + cmd := exec.Command("etcd", + "-name", name, + "-advertise-client-urls", clientAddr, + "-initial-advertise-peer-urls", peerAddr, + "-listen-client-urls", clientAddr, + "-listen-peer-urls", peerAddr, + "-initial-cluster", initialCluster, + "-data-dir", dataDir) + err := cmd.Start() + require.NoError(t, err, "failed to start etcd") + + // Create a client to connect to the created etcd. + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{clientAddr}, + DialTimeout: 5 * time.Second, + }) + require.NoError(t, err, "newCellClient(%v) failed", clientAddr) + defer cli.Close() + + // Wait until we can list "/", or timeout. + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + start := time.Now() + for { + if _, err := cli.Get(ctx, "/"); err == nil { + break + } + if time.Since(start) > 10*time.Second { + require.FailNow(t, "Failed to start etcd daemon in time") + } + time.Sleep(10 * time.Millisecond) + } + t.Cleanup(func() { + if cmd.Process.Kill() != nil { + log.Infof("cmd.Process.Kill() failed : %v", err) + } + }) + + return clientAddr +} diff --git a/go/vt/vtctl/workflow/vexec/query_plan.go b/go/vt/vtctl/workflow/vexec/query_plan.go index e6a9cb3a54d..52e7ee00b61 100644 --- a/go/vt/vtctl/workflow/vexec/query_plan.go +++ b/go/vt/vtctl/workflow/vexec/query_plan.go @@ -31,7 +31,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) -// QueryPlan defines the interface to executing a preprared vexec query on one +// QueryPlan defines the interface to executing a prepared vexec query on one // or more tablets. Implementations should ensure that it is safe to call the // various Execute* methods repeatedly and in multiple goroutines. type QueryPlan interface { diff --git a/go/vt/vtctl/workflow/vexec/query_plan_test.go b/go/vt/vtctl/workflow/vexec/query_plan_test.go index 2899b9a3107..382e5213b5b 100644 --- a/go/vt/vtctl/workflow/vexec/query_plan_test.go +++ b/go/vt/vtctl/workflow/vexec/query_plan_test.go @@ -153,8 +153,6 @@ func TestQueryPlanExecute(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -303,8 +301,6 @@ func TestQueryPlanExecuteScatter(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() diff --git a/go/vt/vtctl/workflow/vexec/query_planner_test.go b/go/vt/vtctl/workflow/vexec/query_planner_test.go index 9199c8a0947..ec162ebc4c7 100644 --- a/go/vt/vtctl/workflow/vexec/query_planner_test.go +++ b/go/vt/vtctl/workflow/vexec/query_planner_test.go @@ -65,8 +65,6 @@ func TestVReplicationQueryPlanner_PlanQuery(t *testing.T) { planner := NewVReplicationQueryPlanner(nil, "", "") for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -115,8 +113,6 @@ func TestVReplicationQueryPlanner_planSelect(t *testing.T) { planner := NewVReplicationQueryPlanner(nil, "testworkflow", "vt_testkeyspace") for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -169,8 +165,6 @@ func TestVReplicationQueryPlanner_planUpdate(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -230,8 +224,6 @@ func TestVReplicationQueryPlanner_planDelete(t *testing.T) { planner := NewVReplicationQueryPlanner(nil, "", "vt_testkeyspace") for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() @@ -351,13 +343,11 @@ func TestVReplicationLogQueryPlanner(t *testing.T) { } for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { t.Parallel() planner := NewVReplicationLogQueryPlanner(nil, tt.targetStreamIDs) - stmt, err := sqlparser.Parse(tt.query) + stmt, err := sqlparser.NewTestParser().Parse(tt.query) require.NoError(t, err, "could not parse query %q", tt.query) qp, err := planner.planSelect(stmt.(*sqlparser.Select)) if tt.shouldErr { diff --git a/go/vt/vtctl/workflow/vexec/testutil/query.go b/go/vt/vtctl/workflow/vexec/testutil/query.go index 3988f7a112f..1add74e5b02 100644 --- a/go/vt/vtctl/workflow/vexec/testutil/query.go +++ b/go/vt/vtctl/workflow/vexec/testutil/query.go @@ -41,7 +41,7 @@ func ParsedQueryFromString(t *testing.T, query string) *sqlparser.ParsedQuery { func StatementFromString(t *testing.T, query string) sqlparser.Statement { t.Helper() - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err, "could not parse query %v", query) return stmt diff --git a/go/vt/vtctl/workflow/vexec/vexec.go b/go/vt/vtctl/workflow/vexec/vexec.go index 477b81a1a03..9e6fbe96940 100644 --- a/go/vt/vtctl/workflow/vexec/vexec.go +++ b/go/vt/vtctl/workflow/vexec/vexec.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/workflow/common" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -95,6 +96,9 @@ type VExec struct { // to support running in modes like: // - Execute serially rather than concurrently. // - Only return error if greater than some percentage of the targets fail. + + parser *sqlparser.Parser + shardSubset []string } // NewVExec returns a new instance suitable for making vexec queries to a given @@ -102,15 +106,24 @@ type VExec struct { // string). The provided topo server is used to look up target tablets for // queries. A given instance will discover targets exactly once for its // lifetime, so to force a refresh, create another instance. -func NewVExec(keyspace string, workflow string, ts *topo.Server, tmc tmclient.TabletManagerClient) *VExec { +func NewVExec(keyspace string, workflow string, ts *topo.Server, tmc tmclient.TabletManagerClient, parser *sqlparser.Parser) *VExec { return &VExec{ ts: ts, tmc: tmc, keyspace: keyspace, workflow: workflow, + parser: parser, } } +func (vx *VExec) SetShardSubset(shardSubset []string) { + vx.shardSubset = shardSubset +} + +func (vx *VExec) GetShardSubset() []string { + return vx.shardSubset +} + // QueryContext executes the given vexec query, returning a mapping of tablet // to querypb.QueryResult. // @@ -127,7 +140,7 @@ func (vx *VExec) QueryContext(ctx context.Context, query string) (map[*topo.Tabl } } - stmt, err := sqlparser.Parse(query) + stmt, err := vx.parser.Parse(query) if err != nil { return nil, err } @@ -205,15 +218,11 @@ func (vx *VExec) initialize(ctx context.Context) error { getShardsCtx, getShardsCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) defer getShardsCancel() - shards, err := vx.ts.GetShardNames(getShardsCtx, vx.keyspace) + shards, err := common.GetShards(getShardsCtx, vx.ts, vx.keyspace, vx.shardSubset) if err != nil { return err } - if len(shards) == 0 { - return fmt.Errorf("%w %s", ErrNoShardsForKeyspace, vx.keyspace) - } - primaries := make([]*topo.TabletInfo, 0, len(shards)) for _, shard := range shards { @@ -299,6 +308,7 @@ func (vx *VExec) WithWorkflow(workflow string) *VExec { ts: vx.ts, tmc: vx.tmc, primaries: vx.primaries, + parser: vx.parser, workflow: workflow, } } @@ -306,9 +316,9 @@ func (vx *VExec) WithWorkflow(workflow string) *VExec { func extractTableName(stmt sqlparser.Statement) (string, error) { switch stmt := stmt.(type) { case *sqlparser.Update: - return sqlparser.String(stmt.TableExprs), nil + return sqlparser.ToString(stmt.TableExprs), nil case *sqlparser.Delete: - return sqlparser.String(stmt.TableExprs), nil + return sqlparser.ToString(stmt.TableExprs), nil case *sqlparser.Insert: return sqlparser.String(stmt.Table), nil case *sqlparser.Select: diff --git a/go/vt/vtctl/workflow/vreplication_stream.go b/go/vt/vtctl/workflow/vreplication_stream.go index 980d686bae9..db294333de4 100644 --- a/go/vt/vtctl/workflow/vreplication_stream.go +++ b/go/vt/vtctl/workflow/vreplication_stream.go @@ -41,6 +41,15 @@ type VReplicationStream struct { // some aggregate functionality. type VReplicationStreams []*VReplicationStream +// IDs returns the IDs of the VReplicationStreams. +func (streams VReplicationStreams) IDs() []int32 { + ids := make([]int32, len(streams)) + for i := range streams { + ids[i] = streams[i].ID + } + return ids +} + // Values returns a string representing the IDs of the VReplicationStreams for // use in an IN clause. // diff --git a/go/vt/vtctld/action_repository.go b/go/vt/vtctld/action_repository.go index 0076ee65ba6..e0f6c45535a 100644 --- a/go/vt/vtctld/action_repository.go +++ b/go/vt/vtctld/action_repository.go @@ -23,6 +23,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" @@ -79,6 +81,7 @@ type actionTabletRecord struct { // ActionRepository is a repository of actions that can be performed // on a {Keyspace,Shard,Tablet}. type ActionRepository struct { + env *vtenv.Environment keyspaceActions map[string]actionKeyspaceMethod shardActions map[string]actionShardMethod tabletActions map[string]actionTabletRecord @@ -87,8 +90,9 @@ type ActionRepository struct { // NewActionRepository creates and returns a new ActionRepository, // with no actions. -func NewActionRepository(ts *topo.Server) *ActionRepository { +func NewActionRepository(env *vtenv.Environment, ts *topo.Server) *ActionRepository { return &ActionRepository{ + env: env, keyspaceActions: make(map[string]actionKeyspaceMethod), shardActions: make(map[string]actionShardMethod), tabletActions: make(map[string]actionTabletRecord), @@ -125,7 +129,7 @@ func (ar *ActionRepository) ApplyKeyspaceAction(ctx context.Context, actionName, } ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(ar.env, logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) output, err := action(ctx, wr, keyspace) cancel() if err != nil { @@ -152,7 +156,7 @@ func (ar *ActionRepository) ApplyShardAction(ctx context.Context, actionName, ke } ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(ar.env, logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) output, err := action(ctx, wr, keyspace, shard) cancel() if err != nil { @@ -186,7 +190,7 @@ func (ar *ActionRepository) ApplyTabletAction(ctx context.Context, actionName st // run the action ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(ar.env, logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) output, err := action.method(ctx, wr, tabletAlias) cancel() if err != nil { diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go index 43afcb29452..0452fce3c3d 100644 --- a/go/vt/vtctld/api.go +++ b/go/vt/vtctld/api.go @@ -487,7 +487,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { logstream := logutil.NewMemoryLogger() - wr := wrangler.New(logstream, ts, tmClient) + wr := wrangler.New(actions.env, logstream, ts, tmClient) err := vtctl.RunCommand(r.Context(), wr, args) if err != nil { resp.Error = err.Error() @@ -523,7 +523,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { logger := logutil.NewCallbackLogger(func(ev *logutilpb.Event) { w.Write([]byte(logutil.EventString(ev))) }) - wr := wrangler.New(logger, ts, tmClient) + wr := wrangler.New(actions.env, logger, ts, tmClient) apiCallUUID, err := schema.CreateUUID() if err != nil { @@ -531,7 +531,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { } requestContext := fmt.Sprintf("vtctld/api:%s", apiCallUUID) - executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second, 0) + executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second, 0, actions.env.Parser()) if err := executor.SetDDLStrategy(req.DDLStrategy); err != nil { return fmt.Errorf("error setting DDL strategy: %v", err) } diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index 6443d89a56b..d8ac8beccc1 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/vt/servenv/testutils" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/wrangler" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -47,7 +48,7 @@ func TestAPI(t *testing.T) { cells := []string{"cell1", "cell2"} ts := memorytopo.NewServer(ctx, cells...) defer ts.Close() - actionRepo := NewActionRepository(ts) + actionRepo := NewActionRepository(vtenv.NewTestEnv(), ts) server := testutils.HTTPTestServer() defer server.Close() @@ -237,7 +238,6 @@ func TestAPI(t *testing.T) { // Keyspaces {"GET", "keyspaces", "", `["ks1", "ks3"]`, http.StatusOK}, {"GET", "keyspaces/ks1", "", `{ - "served_froms": [], "keyspace_type":0, "base_keyspace":"", "snapshot_time":null, @@ -324,11 +324,11 @@ func TestAPI(t *testing.T) { // vtctl RunCommand {"POST", "vtctl/", `["GetKeyspace","ks1"]`, `{ "Error": "", - "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 0,\n \"base_keyspace\": \"\",\n \"snapshot_time\": null,\n \"durability_policy\": \"semi_sync\",\n \"throttler_config\": null,\n \"sidecar_db_name\": \"_vt_sidecar_ks1\"\n}\n\n" + "Output": "{\n \"keyspace_type\": 0,\n \"base_keyspace\": \"\",\n \"snapshot_time\": null,\n \"durability_policy\": \"semi_sync\",\n \"throttler_config\": null,\n \"sidecar_db_name\": \"_vt_sidecar_ks1\"\n}\n\n" }`, http.StatusOK}, {"POST", "vtctl/", `["GetKeyspace","ks3"]`, `{ "Error": "", - "Output": "{\n \"served_froms\": [],\n \"keyspace_type\": 1,\n \"base_keyspace\": \"ks1\",\n \"snapshot_time\": {\n \"seconds\": \"1136214245\",\n \"nanoseconds\": 0\n },\n \"durability_policy\": \"none\",\n \"throttler_config\": null,\n \"sidecar_db_name\": \"_vt\"\n}\n\n" + "Output": "{\n \"keyspace_type\": 1,\n \"base_keyspace\": \"ks1\",\n \"snapshot_time\": {\n \"seconds\": \"1136214245\",\n \"nanoseconds\": 0\n },\n \"durability_policy\": \"none\",\n \"throttler_config\": null,\n \"sidecar_db_name\": \"_vt\"\n}\n\n" }`, http.StatusOK}, {"POST", "vtctl/", `["GetVSchema","ks3"]`, `{ "Error": "", diff --git a/go/vt/vtctld/tablet_data.go b/go/vt/vtctld/tablet_data.go index 66cfed6b4a9..f9482849bee 100644 --- a/go/vt/vtctld/tablet_data.go +++ b/go/vt/vtctld/tablet_data.go @@ -113,7 +113,7 @@ func (th *tabletHealth) stream(ctx context.Context, ts *topo.Server, tabletAlias return err } - conn, err := tabletconn.GetDialer()(ti.Tablet, grpcclient.FailFast(true)) + conn, err := tabletconn.GetDialer()(ctx, ti.Tablet, grpcclient.FailFast(true)) if err != nil { return err } diff --git a/go/vt/vtctld/tablet_data_test.go b/go/vt/vtctld/tablet_data_test.go index d40c6647ef3..bbb03e3b878 100644 --- a/go/vt/vtctld/tablet_data_test.go +++ b/go/vt/vtctld/tablet_data_test.go @@ -29,6 +29,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/grpcqueryservice" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" @@ -112,7 +113,7 @@ func TestTabletData(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") defer ts.Close() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go index ab9cf24c9a5..5ca3908c053 100644 --- a/go/vt/vtctld/vtctld.go +++ b/go/vt/vtctld/vtctld.go @@ -23,6 +23,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/acl" @@ -48,8 +50,8 @@ func registerVtctldFlags(fs *pflag.FlagSet) { } // InitVtctld initializes all the vtctld functionality. -func InitVtctld(ts *topo.Server) error { - actionRepo := NewActionRepository(ts) +func InitVtctld(env *vtenv.Environment, ts *topo.Server) error { + actionRepo := NewActionRepository(env, ts) // keyspace actions actionRepo.RegisterKeyspaceAction("ValidateKeyspace", diff --git a/go/vt/vtenv/cached_size.go b/go/vt/vtenv/cached_size.go new file mode 100644 index 00000000000..808cc4cdca3 --- /dev/null +++ b/go/vt/vtenv/cached_size.go @@ -0,0 +1,37 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package vtenv + +import hack "vitess.io/vitess/go/hack" + +func (cached *Environment) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field collationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.collationEnv.CachedSize(true) + // field parser *vitess.io/vitess/go/vt/sqlparser.Parser + size += cached.parser.CachedSize(true) + // field mysqlVersion string + size += hack.RuntimeAllocSize(int64(len(cached.mysqlVersion))) + return size +} diff --git a/go/vt/vtenv/vtenv.go b/go/vt/vtenv/vtenv.go new file mode 100644 index 00000000000..1371affff52 --- /dev/null +++ b/go/vt/vtenv/vtenv.go @@ -0,0 +1,97 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtenv + +import ( + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" + "vitess.io/vitess/go/vt/sqlparser" +) + +type Environment struct { + collationEnv *collations.Environment + parser *sqlparser.Parser + mysqlVersion string + truncateUILen int + truncateErrLen int +} + +type Options struct { + MySQLServerVersion string + TruncateUILen int + TruncateErrLen int +} + +func New(cfg Options) (*Environment, error) { + if cfg.MySQLServerVersion == "" { + cfg.MySQLServerVersion = config.DefaultMySQLVersion + } + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: cfg.MySQLServerVersion, + TruncateErrLen: cfg.TruncateErrLen, + TruncateUILen: cfg.TruncateUILen, + }) + if err != nil { + return nil, err + } + return &Environment{ + collationEnv: collations.NewEnvironment(cfg.MySQLServerVersion), + parser: parser, + mysqlVersion: cfg.MySQLServerVersion, + truncateUILen: cfg.TruncateUILen, + truncateErrLen: cfg.TruncateErrLen, + }, nil +} + +func NewTestEnv() *Environment { + return &Environment{ + collationEnv: collations.NewEnvironment(config.DefaultMySQLVersion), + parser: sqlparser.NewTestParser(), + mysqlVersion: config.DefaultMySQLVersion, + truncateUILen: 512, + truncateErrLen: 0, + } +} + +func (e *Environment) CollationEnv() *collations.Environment { + return e.collationEnv +} + +func (e *Environment) Parser() *sqlparser.Parser { + return e.parser +} + +func (e *Environment) MySQLVersion() string { + return e.mysqlVersion +} + +// TruncateForUI is used when displaying queries on various Vitess status pages +// to keep the pages small enough to load and render properly +func (e *Environment) TruncateForUI(query string) string { + return sqlparser.TruncateQuery(query, e.truncateUILen) +} + +// TruncateForLog is used when displaying queries as part of error logs +// to avoid overwhelming logging systems with potentially long queries and +// bind value data. +func (e *Environment) TruncateForLog(query string) string { + return sqlparser.TruncateQuery(query, e.truncateErrLen) +} + +func (e *Environment) TruncateErrLen() int { + return e.truncateErrLen +} diff --git a/go/vt/vtenv/vtenv_test.go b/go/vt/vtenv/vtenv_test.go new file mode 100644 index 00000000000..f0d15e5156b --- /dev/null +++ b/go/vt/vtenv/vtenv_test.go @@ -0,0 +1,49 @@ +package vtenv + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" + "vitess.io/vitess/go/vt/sqlparser" +) + +func TestNewDefaults(t *testing.T) { + e, err := New(Options{}) + assert.NoError(t, err) + assert.Equal(t, config.DefaultMySQLVersion, e.MySQLVersion()) + assert.Equal(t, collations.MySQL8(), e.CollationEnv()) + assert.Equal(t, 0, e.Parser().GetTruncateErrLen()) + assert.Equal(t, "foo", e.TruncateForLog("foo")) + assert.Equal(t, "foo", e.TruncateForUI("foo")) +} + +func TestNewCustom(t *testing.T) { + e, err := New(Options{ + MySQLServerVersion: "8.0.34", + TruncateErrLen: 15, + TruncateUILen: 16, + }) + assert.NoError(t, err) + assert.Equal(t, "8.0.34", e.MySQLVersion()) + assert.Equal(t, collations.MySQL8(), e.CollationEnv()) + assert.Equal(t, 15, e.Parser().GetTruncateErrLen()) + assert.Equal(t, "sel [TRUNCATED]", e.TruncateForLog("select 11111111111")) + assert.Equal(t, "sele [TRUNCATED]", e.TruncateForUI("select 11111111111")) +} + +func TestNewError(t *testing.T) { + _, err := New(Options{ + MySQLServerVersion: "invalid", + }) + assert.Error(t, err) +} + +func TestNewTestEnv(t *testing.T) { + e := NewTestEnv() + assert.Equal(t, config.DefaultMySQLVersion, e.MySQLVersion()) + assert.Equal(t, collations.MySQL8(), e.CollationEnv()) + assert.Equal(t, sqlparser.NewTestParser(), e.Parser()) +} diff --git a/go/vt/vterrors/code.go b/go/vt/vterrors/code.go index 6bc317db4ed..d485c930b77 100644 --- a/go/vt/vterrors/code.go +++ b/go/vt/vterrors/code.go @@ -22,13 +22,16 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +// Errors added to the list of variables below must be added to the Errors slice a little below in this same file. +// This will enable the auto-documentation of error code in the website repository. + var ( VT03001 = errorWithState("VT03001", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "aggregate functions take a single argument '%s'", "This aggregation function only takes a single argument.") VT03002 = errorWithState("VT03002", vtrpcpb.Code_INVALID_ARGUMENT, ForbidSchemaChange, "changing schema from '%s' to '%s' is not allowed", "This schema change is not allowed. You cannot change the keyspace of a table.") VT03003 = errorWithState("VT03003", vtrpcpb.Code_INVALID_ARGUMENT, UnknownTable, "unknown table '%s' in MULTI DELETE", "The specified table in this DELETE statement is unknown.") VT03004 = errorWithState("VT03004", vtrpcpb.Code_INVALID_ARGUMENT, NonUpdateableTable, "the target table %s of the DELETE is not updatable", "You cannot delete something that is not a real MySQL table.") VT03005 = errorWithState("VT03005", vtrpcpb.Code_INVALID_ARGUMENT, WrongGroupField, "cannot group on '%s'", "The planner does not allow grouping on certain field. For instance, aggregation function.") - VT03006 = errorWithState("VT03006", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "column count does not match value count at row 1", "The number of columns you want to insert do not match the number of columns of your SELECT query.") + VT03006 = errorWithState("VT03006", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "column count does not match value count with the row", "The number of columns you want to insert do not match the number of columns of your SELECT query.") VT03007 = errorWithoutState("VT03007", vtrpcpb.Code_INVALID_ARGUMENT, "keyspace not specified", "You need to add a keyspace qualifier.") VT03008 = errorWithState("VT03008", vtrpcpb.Code_INVALID_ARGUMENT, CantUseOptionHere, "incorrect usage/placement of '%s'", "The given token is not usable in this situation. Please refer to the MySQL documentation to learn more about your token's syntax.") VT03009 = errorWithState("VT03009", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueForVar, "unexpected value type for '%s': %v", "You cannot assign this type to the given variable.") @@ -36,7 +39,7 @@ var ( VT03011 = errorWithoutState("VT03011", vtrpcpb.Code_INVALID_ARGUMENT, "invalid value type: %v", "The given value type is not accepted.") VT03012 = errorWithoutState("VT03012", vtrpcpb.Code_INVALID_ARGUMENT, "invalid syntax: %s", "The syntax is invalid. Please refer to the MySQL documentation for the proper syntax.") VT03013 = errorWithState("VT03013", vtrpcpb.Code_INVALID_ARGUMENT, NonUniqTable, "not unique table/alias: '%s'", "This table or alias name is already use. Please use another one that is unique.") - VT03014 = errorWithState("VT03014", vtrpcpb.Code_INVALID_ARGUMENT, BadFieldError, "unknown column '%d' in '%s'", "The given column is unknown.") + VT03014 = errorWithState("VT03014", vtrpcpb.Code_INVALID_ARGUMENT, BadFieldError, "unknown column '%s' in '%s'", "The given column is unknown.") VT03015 = errorWithoutState("VT03015", vtrpcpb.Code_INVALID_ARGUMENT, "column has duplicate set values: '%v'", "Cannot assign multiple values to a column in an update statement.") VT03016 = errorWithoutState("VT03016", vtrpcpb.Code_INVALID_ARGUMENT, "unknown vindex column: '%s'", "The given column is unknown in the vindex table.") VT03017 = errorWithState("VT03017", vtrpcpb.Code_INVALID_ARGUMENT, SyntaxError, "where clause can only be of the type 'pos > '", "This vstream where clause can only be a greater than filter.") @@ -49,6 +52,13 @@ var ( VT03024 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' user defined variable does not exists", "The query cannot be prepared using the user defined variable as it does not exists for this session.") VT03025 = errorWithState("VT03025", vtrpcpb.Code_INVALID_ARGUMENT, WrongArguments, "Incorrect arguments to %s", "The execute statement have wrong number of arguments") VT03026 = errorWithoutState("VT03024", vtrpcpb.Code_INVALID_ARGUMENT, "'%s' bind variable does not exists", "The query cannot be executed as missing the bind variable.") + VT03027 = errorWithState("VT03027", vtrpcpb.Code_INVALID_ARGUMENT, BadNullError, "Column '%s' cannot be null", "The column cannot have null value.") + VT03028 = errorWithState("VT03028", vtrpcpb.Code_INVALID_ARGUMENT, BadNullError, "Column '%s' cannot be null on row %d, col %d", "The column cannot have null value.") + VT03029 = errorWithState("VT03029", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "column count does not match value count with the row for vindex '%s'", "The number of columns you want to insert do not match the number of columns of your SELECT query.") + VT03030 = errorWithState("VT03030", vtrpcpb.Code_INVALID_ARGUMENT, WrongValueCountOnRow, "lookup column count does not match value count with the row (columns, count): (%v, %d)", "The number of columns you want to insert do not match the number of columns of your SELECT query.") + VT03031 = errorWithoutState("VT03031", vtrpcpb.Code_INVALID_ARGUMENT, "EXPLAIN is only supported for single keyspace", "EXPLAIN has to be sent down as a single query to the underlying MySQL, and this is not possible if it uses tables from multiple keyspaces") + VT03032 = errorWithState("VT03032", vtrpcpb.Code_INVALID_ARGUMENT, NonUpdateableTable, "the target table %s of the UPDATE is not updatable", "You cannot update a table that is not a real MySQL table.") + VT03033 = errorWithState("VT03033", vtrpcpb.Code_INVALID_ARGUMENT, ViewWrongList, "In definition of view, derived table or common table expression, SELECT list and column names list have different column counts", "The table column list and derived column list have different column counts.") VT05001 = errorWithState("VT05001", vtrpcpb.Code_NOT_FOUND, DbDropExists, "cannot drop database '%s'; database does not exists", "The given database does not exist; Vitess cannot drop it.") VT05002 = errorWithState("VT05002", vtrpcpb.Code_NOT_FOUND, BadDb, "cannot alter database '%s'; unknown database", "The given database does not exist; Vitess cannot alter it.") @@ -80,7 +90,12 @@ var ( VT09016 = errorWithState("VT09016", vtrpcpb.Code_FAILED_PRECONDITION, RowIsReferenced2, "Cannot delete or update a parent row: a foreign key constraint fails", "SET DEFAULT is not supported by InnoDB") VT09017 = errorWithoutState("VT09017", vtrpcpb.Code_FAILED_PRECONDITION, "%s", "Invalid syntax for the statement type.") VT09018 = errorWithoutState("VT09018", vtrpcpb.Code_FAILED_PRECONDITION, "%s", "Invalid syntax for the vindex function statement.") - VT09019 = errorWithoutState("VT09019", vtrpcpb.Code_FAILED_PRECONDITION, "%s has cyclic foreign keys", "Vitess doesn't support cyclic foreign keys.") + VT09019 = errorWithoutState("VT09019", vtrpcpb.Code_FAILED_PRECONDITION, "keyspace '%s' has cyclic foreign keys. Cycle exists between %v", "Vitess doesn't support cyclic foreign keys.") + VT09020 = errorWithoutState("VT09020", vtrpcpb.Code_FAILED_PRECONDITION, "can not use multiple vindex hints for table %s", "Vitess does not allow using multiple vindex hints on the same table.") + VT09021 = errorWithState("VT09021", vtrpcpb.Code_FAILED_PRECONDITION, KeyDoesNotExist, "Vindex '%s' does not exist in table '%s'", "Vindex hints have to reference an existing vindex, and no such vindex could be found for the given table.") + VT09022 = errorWithoutState("VT09022", vtrpcpb.Code_FAILED_PRECONDITION, "Destination does not have exactly one shard: %v", "Cannot send query to multiple shards.") + VT09023 = errorWithoutState("VT09023", vtrpcpb.Code_FAILED_PRECONDITION, "could not map %v to a keyspace id", "Unable to determine the shard for the given row.") + VT09024 = errorWithoutState("VT09024", vtrpcpb.Code_FAILED_PRECONDITION, "could not map %v to a unique keyspace id: %v", "Unable to determine the shard for the given row.") VT10001 = errorWithoutState("VT10001", vtrpcpb.Code_ABORTED, "foreign key constraints are not allowed", "Foreign key constraints are not allowed, see https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/.") @@ -97,6 +112,8 @@ var ( VT14004 = errorWithoutState("VT14004", vtrpcpb.Code_UNAVAILABLE, "cannot find keyspace for: %s", "The specified keyspace could not be found.") VT14005 = errorWithoutState("VT14005", vtrpcpb.Code_UNAVAILABLE, "cannot lookup sidecar database for keyspace: %s", "Failed to read sidecar database identifier.") + // Errors is a list of errors that must match all the variables + // defined above to enable auto-documentation of error codes. Errors = []func(args ...any) *VitessError{ VT03001, VT03002, @@ -124,6 +141,13 @@ var ( VT03024, VT03025, VT03026, + VT03027, + VT03028, + VT03029, + VT03030, + VT03031, + VT03032, + VT03033, VT05001, VT05002, VT05003, @@ -151,6 +175,12 @@ var ( VT09016, VT09017, VT09018, + VT09019, + VT09020, + VT09021, + VT09022, + VT09023, + VT09024, VT10001, VT12001, VT12002, diff --git a/go/vt/vterrors/errors_test.go b/go/vt/vterrors/errors_test.go index 8c039e5874f..49b77ee0385 100644 --- a/go/vt/vterrors/errors_test.go +++ b/go/vt/vterrors/errors_test.go @@ -21,7 +21,7 @@ import ( "errors" "fmt" "io" - "math/rand" + "math/rand/v2" "reflect" "strings" "testing" diff --git a/go/vt/vterrors/last_error.go b/go/vt/vterrors/last_error.go index 314a54aae00..1f051825041 100644 --- a/go/vt/vterrors/last_error.go +++ b/go/vt/vterrors/last_error.go @@ -38,7 +38,6 @@ type LastError struct { } func NewLastError(name string, maxTimeInError time.Duration) *LastError { - log.Infof("Created last error: %s, with maxTimeInError: %s", name, maxTimeInError) return &LastError{ name: name, maxTimeInError: maxTimeInError, @@ -49,20 +48,17 @@ func (le *LastError) Record(err error) { le.mu.Lock() defer le.mu.Unlock() if err == nil { - log.Infof("Resetting last error: %s", le.name) le.err = nil le.firstSeen = time.Time{} le.lastSeen = time.Time{} return } if !Equals(err, le.err) { - log.Infof("Got new last error %+v for %s, was %+v", err, le.name, le.err) le.firstSeen = time.Now() le.lastSeen = time.Now() le.err = err } else { // same error seen - log.Infof("Got the same last error for %q: %+v ; first seen at %s and last seen %dms ago", le.name, le.err, le.firstSeen, int(time.Since(le.lastSeen).Milliseconds())) if time.Since(le.lastSeen) > le.maxTimeInError { // reset firstSeen, since it has been long enough since the last time we saw this error log.Infof("Resetting firstSeen for %s, since it is too long since the last one", le.name) diff --git a/go/vt/vterrors/state.go b/go/vt/vterrors/state.go index 5e3dcf22dfb..8223405fc92 100644 --- a/go/vt/vterrors/state.go +++ b/go/vt/vterrors/state.go @@ -47,6 +47,9 @@ const ( WrongValueCountOnRow WrongValue WrongArguments + BadNullError + InvalidGroupFuncUse + ViewWrongList // failed precondition NoDB @@ -58,6 +61,7 @@ const ( RowIsReferenced2 NoReferencedRow2 UnknownStmtHandler + KeyDoesNotExist // not found BadDb diff --git a/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt index aab1ab0234f..b63683ca274 100644 --- a/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt +++ b/go/vt/vtexplain/testdata/multi-output/unsharded-output.txt @@ -24,7 +24,7 @@ update t1 set intval = 10 update t1 set floatval = 9.99 1 ks_unsharded/-: begin -1 ks_unsharded/-: update t1 set floatval = 9.99 limit 10001 /* DECIMAL */ +1 ks_unsharded/-: update t1 set floatval = 9.99 limit 10001 /* DECIMAL(3,2) */ 1 ks_unsharded/-: commit ---------------------------------------------------------------------- @@ -37,7 +37,7 @@ delete from t1 where id = 100 ---------------------------------------------------------------------- insert into t1 (id,intval,floatval) values (1,2,3.14) on duplicate key update intval=3, floatval=3.14 -1 ks_unsharded/-: insert into t1(id, intval, floatval) values (1, 2, 3.14) on duplicate key update intval = 3, floatval = 3.14 /* DECIMAL */ +1 ks_unsharded/-: insert into t1(id, intval, floatval) values (1, 2, 3.14) on duplicate key update intval = 3, floatval = 3.14 /* DECIMAL(3,2) */ ---------------------------------------------------------------------- select ID from t1 diff --git a/go/vt/vtexplain/vtexplain.go b/go/vt/vtexplain/vtexplain.go index 55e76606e08..64f1ca3cea1 100644 --- a/go/vt/vtexplain/vtexplain.go +++ b/go/vt/vtexplain/vtexplain.go @@ -20,7 +20,6 @@ limitations under the License. package vtexplain import ( - "bytes" "context" "fmt" "sort" @@ -29,6 +28,10 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtgate" @@ -43,9 +46,7 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) -var ( - batchInterval = 10 * time.Millisecond -) +var batchInterval = 10 * time.Millisecond func init() { servenv.OnParseFor("vtexplain", func(fs *pflag.FlagSet) { @@ -54,7 +55,7 @@ func init() { } const ( - vtexplainCell = "explainCell" + Cell = "explainCell" // ModeMulti is the default mode with autocommit implemented at vtgate ModeMulti = "multi" @@ -147,6 +148,8 @@ type ( // time simulator batchTime *sync2.Batcher globalTabletEnv *tabletEnv + + env *vtenv.Environment } ) @@ -154,10 +157,11 @@ type ( func (tq *TabletQuery) MarshalJSON() ([]byte, error) { // Convert Bindvars to strings for nicer output bindVars := make(map[string]string) + var buf strings.Builder for k, v := range tq.BindVars { - var b strings.Builder - sqlparser.EncodeValue(&b, v) - bindVars[k] = b.String() + buf.Reset() + sqlparser.EncodeValue(&buf, v) + bindVars[k] = buf.String() } return jsonutil.MarshalNoEscape(&struct { @@ -181,27 +185,30 @@ type TabletActions struct { } // Init sets up the fake execution environment -func Init(ctx context.Context, vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplain, error) { +func Init(ctx context.Context, env *vtenv.Environment, ts *topo.Server, vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options, srvTopoCounts *stats.CountersWithSingleLabel) (*VTExplain, error) { // Verify options if opts.ReplicationMode != "ROW" && opts.ReplicationMode != "STATEMENT" { return nil, fmt.Errorf("invalid replication mode \"%s\"", opts.ReplicationMode) } - parsedDDLs, err := parseSchema(sqlSchema, opts) + parsedDDLs, err := parseSchema(sqlSchema, opts, env.Parser()) if err != nil { return nil, fmt.Errorf("parseSchema: %v", err) } - tabletEnv, err := newTabletEnvironment(parsedDDLs, opts) + tabletEnv, err := newTabletEnvironment(parsedDDLs, opts, env.CollationEnv()) if err != nil { return nil, fmt.Errorf("initTabletEnvironment: %v", err) } - vte := &VTExplain{vtgateSession: &vtgatepb.Session{ - TargetString: "", - Autocommit: true, - }} + vte := &VTExplain{ + vtgateSession: &vtgatepb.Session{ + TargetString: "", + Autocommit: true, + }, + env: env, + } vte.setGlobalTabletEnv(tabletEnv) - err = vte.initVtgateExecutor(ctx, vSchemaStr, ksShardMapStr, opts) + err = vte.initVtgateExecutor(ctx, ts, vSchemaStr, ksShardMapStr, opts, srvTopoCounts) if err != nil { return nil, fmt.Errorf("initVtgateExecutor: %v", err.Error()) } @@ -227,10 +234,10 @@ func (vte *VTExplain) Stop() { } } -func parseSchema(sqlSchema string, opts *Options) ([]sqlparser.DDLStatement, error) { +func parseSchema(sqlSchema string, opts *Options, parser *sqlparser.Parser) ([]sqlparser.DDLStatement, error) { parsedDDLs := make([]sqlparser.DDLStatement, 0, 16) for { - sql, rem, err := sqlparser.SplitStatement(sqlSchema) + sql, rem, err := parser.SplitStatement(sqlSchema) sqlSchema = rem if err != nil { return nil, err @@ -245,12 +252,12 @@ func parseSchema(sqlSchema string, opts *Options) ([]sqlparser.DDLStatement, err var stmt sqlparser.Statement if opts.StrictDDL { - stmt, err = sqlparser.ParseStrictDDL(sql) + stmt, err = parser.ParseStrictDDL(sql) if err != nil { return nil, err } } else { - stmt, err = sqlparser.Parse(sql) + stmt, err = parser.Parse(sql) if err != nil { log.Errorf("ERROR: failed to parse sql: %s, got error: %v", sql, err) continue @@ -294,7 +301,7 @@ func (vte *VTExplain) Run(sql string) ([]*Explain, error) { sql = s } - sql, rem, err = sqlparser.SplitStatement(sql) + sql, rem, err = vte.env.Parser().SplitStatement(sql) if err != nil { return nil, err } @@ -337,7 +344,7 @@ func (vte *VTExplain) explain(sql string) (*Explain, error) { // ExplainsAsText returns a text representation of the explains in logical time // order func (vte *VTExplain) ExplainsAsText(explains []*Explain) (string, error) { - var b bytes.Buffer + var b strings.Builder for _, explain := range explains { fmt.Fprintf(&b, "----------------------------------------------------------------------\n") fmt.Fprintf(&b, "%s\n\n", explain.SQL) @@ -381,7 +388,7 @@ func (vte *VTExplain) specialHandlingOfSavepoints(q *MysqlQuery) error { return nil } - stmt, err := sqlparser.Parse(q.SQL) + stmt, err := vte.env.Parser().Parse(q.SQL) if err != nil { return err } diff --git a/go/vt/vtexplain/vtexplain_test.go b/go/vt/vtexplain/vtexplain_test.go index 54f1efbc522..49bb94fedb1 100644 --- a/go/vt/vtexplain/vtexplain_test.go +++ b/go/vt/vtexplain/vtexplain_test.go @@ -28,6 +28,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/key" @@ -49,7 +53,7 @@ type testopts struct { shardmap map[string]map[string]*topo.ShardInfo } -func initTest(ctx context.Context, mode string, opts *Options, topts *testopts, t *testing.T) *VTExplain { +func initTest(ctx context.Context, ts *topo.Server, mode string, opts *Options, topts *testopts, t *testing.T) *VTExplain { schema, err := os.ReadFile("testdata/test-schema.sql") require.NoError(t, err) @@ -65,7 +69,8 @@ func initTest(ctx context.Context, mode string, opts *Options, topts *testopts, } opts.ExecutionMode = mode - vte, err := Init(ctx, string(vSchema), string(schema), shardmap, opts) + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + vte, err := Init(ctx, vtenv.NewTestEnv(), ts, string(vSchema), string(schema), shardmap, opts, srvTopoCounts) require.NoError(t, err, "vtexplain Init error\n%s", string(schema)) return vte } @@ -88,7 +93,8 @@ func runTestCase(testcase, mode string, opts *Options, topts *testopts, t *testi t.Run(testcase, func(t *testing.T) { ctx := utils.LeakCheckContext(t) - vte := initTest(ctx, mode, opts, topts, t) + ts := memorytopo.NewServer(ctx, Cell) + vte := initTest(ctx, ts, mode, opts, topts, t) defer vte.Stop() sqlFile := fmt.Sprintf("testdata/%s-queries.sql", testcase) @@ -154,8 +160,8 @@ func TestExplain(t *testing.T) { func TestErrors(t *testing.T) { ctx := utils.LeakCheckContext(t) - - vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + ts := memorytopo.NewServer(ctx, Cell) + vte := initTest(ctx, ts, ModeMulti, defaultTestOpts(), &testopts{}, t) defer vte.Stop() tests := []struct { @@ -194,8 +200,8 @@ func TestErrors(t *testing.T) { func TestJSONOutput(t *testing.T) { ctx := utils.LeakCheckContext(t) - - vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + ts := memorytopo.NewServer(ctx, Cell) + vte := initTest(ctx, ts, ModeMulti, defaultTestOpts(), &testopts{}, t) defer vte.Stop() sql := "select 1 from user where id = 1" explains, err := vte.Run(sql) @@ -344,7 +350,9 @@ func TestInit(t *testing.T) { } }` schema := "create table table_missing_primary_vindex (id int primary key)" - _, err := Init(ctx, vschema, schema, "", defaultTestOpts()) + ts := memorytopo.NewServer(ctx, Cell) + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + _, err := Init(ctx, vtenv.NewTestEnv(), ts, vschema, schema, "", defaultTestOpts(), srvTopoCounts) require.Error(t, err) require.Contains(t, err.Error(), "missing primary col vindex") } diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index 8167c510b01..22939efde20 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -22,26 +22,24 @@ package vtexplain import ( "context" "fmt" + "path" "sort" "strings" "vitess.io/vitess/go/cache/theine" - "vitess.io/vitess/go/vt/vtgate/logstats" - "vitess.io/vitess/go/vt/vtgate/vindexes" - - "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/topo/memorytopo" - - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/queryservice" querypb "vitess.io/vitess/go/vt/proto/query" @@ -50,14 +48,14 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" ) -func (vte *VTExplain) initVtgateExecutor(ctx context.Context, vSchemaStr, ksShardMapStr string, opts *Options) error { +func (vte *VTExplain) initVtgateExecutor(ctx context.Context, ts *topo.Server, vSchemaStr, ksShardMapStr string, opts *Options, srvTopoCounts *stats.CountersWithSingleLabel) error { vte.explainTopo = &ExplainTopo{NumShards: opts.NumShards} - vte.explainTopo.TopoServer = memorytopo.NewServer(ctx, vtexplainCell) + vte.explainTopo.TopoServer = ts vte.healthCheck = discovery.NewFakeHealthCheck(nil) - resolver := vte.newFakeResolver(ctx, opts, vte.explainTopo, vtexplainCell) + resolver := vte.newFakeResolver(ctx, opts, vte.explainTopo, Cell) - err := vte.buildTopology(ctx, opts, vSchemaStr, ksShardMapStr, opts.NumShards) + err := vte.buildTopology(ctx, ts, opts, vSchemaStr, ksShardMapStr, opts.NumShards, srvTopoCounts) if err != nil { return err } @@ -75,7 +73,7 @@ func (vte *VTExplain) initVtgateExecutor(ctx context.Context, vSchemaStr, ksShar var schemaTracker vtgate.SchemaInfo // no schema tracker for these tests queryLogBufferSize := 10 plans := theine.NewStore[vtgate.PlanCacheKey, *engine.Plan](4*1024*1024, false) - vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0) + vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.env, vte.explainTopo, Cell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0) vte.vtgateExecutor.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) return nil @@ -95,7 +93,7 @@ func (vte *VTExplain) newFakeResolver(ctx context.Context, opts *Options, serv s return vtgate.NewResolver(srvResolver, serv, cell, sc) } -func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaStr string, ksShardMapStr string, numShardsPerKeyspace int) error { +func (vte *VTExplain) buildTopology(ctx context.Context, ts *topo.Server, opts *Options, vschemaStr string, ksShardMapStr string, numShardsPerKeyspace int, srvTopoCounts *stats.CountersWithSingleLabel) error { vte.explainTopo.Lock.Lock() defer vte.explainTopo.Lock.Unlock() @@ -107,7 +105,7 @@ func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaS if err != nil { return err } - schema := vindexes.BuildVSchema(&srvVSchema) + schema := vindexes.BuildVSchema(&srvVSchema, vte.env.Parser()) for ks, ksSchema := range schema.Keyspaces { if ksSchema.Error != nil { return vterrors.Wrapf(ksSchema.Error, "vschema failed to load on keyspace [%s]", ks) @@ -120,6 +118,10 @@ func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaS return err } + conn, err := ts.ConnForCell(ctx, Cell) + if err != nil { + return err + } vte.explainTopo.TabletConns = make(map[string]*explainTablet) vte.explainTopo.KeyspaceShards = make(map[string]map[string]*topodatapb.ShardReference) for ks, vschema := range vte.explainTopo.Keyspaces { @@ -130,6 +132,32 @@ func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaS vte.explainTopo.KeyspaceShards[ks] = make(map[string]*topodatapb.ShardReference) + srvPath := path.Join(topo.KeyspacesPath, ks, topo.SrvKeyspaceFile) + srvKeyspace := &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_PRIMARY, + ShardReferences: shards, + }, + { + ServedType: topodatapb.TabletType_REPLICA, + ShardReferences: shards, + }, + { + ServedType: topodatapb.TabletType_RDONLY, + ShardReferences: shards, + }, + }, + } + data, err := srvKeyspace.MarshalVT() + if err != nil { + return err + } + _, err = conn.Update(ctx, srvPath, data, nil) + if err != nil { + return err + } + for _, shard := range shards { // If the topology is in the middle of a reshard, there can be two shards covering the same key range (e.g. // both source shard 80- and target shard 80-c0 cover the keyrange 80-c0). For the purposes of explain, we @@ -142,14 +170,13 @@ func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaS hostname := fmt.Sprintf("%s/%s", ks, shard.Name) log.Infof("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name) - tablet := vte.healthCheck.AddFakeTablet(vtexplainCell, hostname, 1, ks, shard.Name, topodatapb.TabletType_PRIMARY, true, 1, nil, func(t *topodatapb.Tablet) queryservice.QueryService { - return vte.newTablet(ctx, opts, t) + tablet := vte.healthCheck.AddFakeTablet(Cell, hostname, 1, ks, shard.Name, topodatapb.TabletType_PRIMARY, true, 1, nil, func(t *topodatapb.Tablet) queryservice.QueryService { + return vte.newTablet(ctx, vte.env, opts, t, ts, srvTopoCounts) }) vte.explainTopo.TabletConns[hostname] = tablet.(*explainTablet) vte.explainTopo.KeyspaceShards[ks][shard.Name] = shard } } - return err } diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go index 85aa64037a7..6f28cd99ec0 100644 --- a/go/vt/vtexplain/vtexplain_vttablet.go +++ b/go/vt/vtexplain/vtexplain_vttablet.go @@ -23,7 +23,10 @@ import ( "strings" "sync" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" @@ -34,7 +37,6 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -98,13 +100,15 @@ type explainTablet struct { mysqlQueries []*MysqlQuery currentTime int vte *VTExplain + + collationEnv *collations.Environment } var _ queryservice.QueryService = (*explainTablet)(nil) -func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatapb.Tablet) *explainTablet { +func (vte *VTExplain) newTablet(ctx context.Context, env *vtenv.Environment, opts *Options, t *topodatapb.Tablet, ts *topo.Server, srvTopoCounts *stats.CountersWithSingleLabel) *explainTablet { db := fakesqldb.New(nil) - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, env.Parser()) config := tabletenv.NewCurrentConfig() config.TrackSchemaVersions = false @@ -117,9 +121,9 @@ func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatap config.EnableTableGC = false // XXX much of this is cloned from the tabletserver tests - tsv := tabletserver.NewTabletServer(ctx, topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(ctx, ""), t.Alias) + tsv := tabletserver.NewTabletServer(ctx, env, topoproto.TabletAliasString(t.Alias), config, ts, t.Alias, srvTopoCounts) - tablet := explainTablet{db: db, tsv: tsv, vte: vte} + tablet := explainTablet{db: db, tsv: tsv, vte: vte, collationEnv: env.CollationEnv()} db.Handler = &tablet tablet.QueryService = queryservice.Wrap( @@ -129,7 +133,7 @@ func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatap }, ) - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params dbcfgs := dbconfigs.NewTestDBConfigs(cp, cp, "") cnf := mysqlctl.NewMycnf(22222, 6802) @@ -280,7 +284,7 @@ func (t *explainTablet) Close(ctx context.Context) error { return t.tsv.Close(ctx) } -func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tabletEnv, error) { +func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options, collationEnv *collations.Environment) (*tabletEnv, error) { tEnv := newTabletEnv() schemaQueries := map[string]*sqltypes.Result{ "select unix_timestamp()": { @@ -302,6 +306,15 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet {sqltypes.NewVarChar("STRICT_TRANS_TABLES")}, }, }, + "select @@global.collation_server": { + Fields: []*querypb.Field{{ + Type: sqltypes.VarChar, + Charset: uint32(collations.SystemCollation.Collation), + }}, + Rows: [][]sqltypes.Value{ + {sqltypes.NewVarChar("utf8mb4_0900_ai_ci")}, + }, + }, "select @@session.sql_mode as sql_mode": { Fields: []*querypb.Field{{ Name: "sql_mode", @@ -442,10 +455,18 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet indexRows := make([][]sqltypes.Value, 0, 4) for _, ddl := range ddls { table := sqlparser.String(ddl.GetTable().Name) - backtickedTable := sqlescape.EscapeID(sqlescape.UnescapeID(table)) + sanitizedTable, err := sqlescape.UnescapeID(table) + if err != nil { + return nil, err + } + backtickedTable := sqlescape.EscapeID(sanitizedTable) if ddl.GetOptLike() != nil { likeTable := ddl.GetOptLike().LikeTable.Name.String() - backtickedLikeTable := sqlescape.EscapeID(sqlescape.UnescapeID(likeTable)) + sanitizedLikeTable, err := sqlescape.UnescapeID(likeTable) + if err != nil { + return nil, err + } + backtickedLikeTable := sqlescape.EscapeID(sanitizedLikeTable) likeQuery := "SELECT * FROM " + backtickedLikeTable + " WHERE 1 != 1" query := "SELECT * FROM " + backtickedTable + " WHERE 1 != 1" @@ -454,8 +475,8 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet } tEnv.addResult(query, tEnv.getResult(likeQuery)) - likeQuery = fmt.Sprintf(mysqlctl.GetColumnNamesQuery, "database()", sqlescape.UnescapeID(likeTable)) - query = fmt.Sprintf(mysqlctl.GetColumnNamesQuery, "database()", sqlescape.UnescapeID(table)) + likeQuery = fmt.Sprintf(mysqlctl.GetColumnNamesQuery, "database()", sqltypes.EncodeStringSQL(sanitizedLikeTable)) + query = fmt.Sprintf(mysqlctl.GetColumnNamesQuery, "database()", sqltypes.EncodeStringSQL(sanitizedTable)) if tEnv.getResult(likeQuery) == nil { return nil, fmt.Errorf("check your schema, table[%s] doesn't exist", likeTable) } @@ -479,7 +500,7 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet colType := &querypb.Field{ Name: "column_type", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(collationEnv.DefaultConnectionCharset()), } colTypes = append(colTypes, colType) for _, col := range ddl.GetTableSpec().Columns { @@ -496,7 +517,7 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet tEnv.addResult("SELECT * FROM "+backtickedTable+" WHERE 1 != 1", &sqltypes.Result{ Fields: rowTypes, }) - query := fmt.Sprintf(mysqlctl.GetColumnNamesQuery, "database()", sqlescape.UnescapeID(table)) + query := fmt.Sprintf(mysqlctl.GetColumnNamesQuery, "database()", sqltypes.EncodeStringSQL(sanitizedTable)) tEnv.addResult(query, &sqltypes.Result{ Fields: colTypes, Rows: colValues, @@ -581,7 +602,7 @@ func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) { // Parse the select statement to figure out the table and columns // that were referenced so that the synthetic response has the // expected field names and types. - stmt, err := sqlparser.Parse(query) + stmt, err := t.vte.env.Parser().Parse(query) if err != nil { return nil, err } @@ -598,7 +619,7 @@ func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) { // Gen4 supports more complex queries so we now need to // handle multiple FROM clauses - tables := make([]*sqlparser.AliasedTableExpr, len(selStmt.From)) + tables := make([]*sqlparser.AliasedTableExpr, 0, len(selStmt.From)) for _, from := range selStmt.From { tables = append(tables, getTables(from)...) } @@ -646,7 +667,7 @@ func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) { rows := make([][]sqltypes.Value, 0, rowCount) for i, col := range colNames { colType := colTypes[i] - cs := collations.DefaultCollationForType(colType) + cs := collations.CollationForType(colType, t.collationEnv.DefaultConnectionCharset()) fields[i] = &querypb.Field{ Name: col, Type: colType, @@ -734,7 +755,7 @@ func (t *explainTablet) analyzeWhere(selStmt *sqlparser.Select, tableColumnMap m // Check if we have a duplicate value isNewValue := true for _, v := range inVal { - result, err := evalengine.NullsafeCompare(v, value, collations.Default()) + result, err := evalengine.NullsafeCompare(v, value, t.collationEnv, t.collationEnv.DefaultConnectionCharset(), nil) if err != nil { return "", nil, 0, nil, err } diff --git a/go/vt/vtexplain/vtexplain_vttablet_test.go b/go/vt/vtexplain/vtexplain_vttablet_test.go index 614ad186224..ddf24b90afa 100644 --- a/go/vt/vtexplain/vtexplain_vttablet_test.go +++ b/go/vt/vtexplain/vtexplain_vttablet_test.go @@ -20,10 +20,16 @@ import ( "context" "encoding/json" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -70,10 +76,16 @@ create table t2 ( ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vte, err := Init(ctx, testVSchema, testSchema, "", opts) + ts := memorytopo.NewServer(ctx, Cell) + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + vte, err := Init(ctx, vtenv.NewTestEnv(), ts, testVSchema, testSchema, "", opts, srvTopoCounts) require.NoError(t, err) defer vte.Stop() + // Check if the correct schema query is registered. + _, found := vte.globalTabletEnv.schemaQueries["SELECT COLUMN_NAME as column_name\n\t\tFROM INFORMATION_SCHEMA.COLUMNS\n\t\tWHERE TABLE_SCHEMA = database() AND TABLE_NAME = 't1'\n\t\tORDER BY ORDINAL_POSITION"] + assert.True(t, found) + sql := "SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id" _, err = vte.Run(sql) @@ -117,25 +129,30 @@ create table test_partitioned ( PARTITION p2018_06_16 VALUES LESS THAN (1529132400) ENGINE = InnoDB, PARTITION p2018_06_17 VALUES LESS THAN (1529218800) ENGINE = InnoDB)*/; ` - - ddls, err := parseSchema(testSchema, &Options{StrictDDL: false}) + env := vtenv.NewTestEnv() + ddls, err := parseSchema(testSchema, &Options{StrictDDL: false}, env.Parser()) if err != nil { t.Fatalf("parseSchema: %v", err) } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) + ts := memorytopo.NewServer(ctx, Cell) + vte := initTest(ctx, ts, ModeMulti, defaultTestOpts(), &testopts{}, t) defer vte.Stop() - tabletEnv, _ := newTabletEnvironment(ddls, defaultTestOpts()) + tabletEnv, _ := newTabletEnvironment(ddls, defaultTestOpts(), env.CollationEnv()) vte.setGlobalTabletEnv(tabletEnv) - - tablet := vte.newTablet(ctx, defaultTestOpts(), &topodatapb.Tablet{ - Keyspace: "test_keyspace", + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tablet := vte.newTablet(ctx, env, defaultTestOpts(), &topodatapb.Tablet{ + Keyspace: "ks_sharded", Shard: "-80", - Alias: &topodatapb.TabletAlias{}, - }) + Alias: &topodatapb.TabletAlias{ + Cell: Cell, + }, + }, ts, srvTopoCounts) + + time.Sleep(10 * time.Millisecond) se := tablet.tsv.SchemaEngine() tables := se.GetSchema() @@ -181,9 +198,9 @@ create table test_partitioned ( func TestErrParseSchema(t *testing.T) { testSchema := `create table t1 like t2` - ddl, err := parseSchema(testSchema, &Options{StrictDDL: true}) + ddl, err := parseSchema(testSchema, &Options{StrictDDL: true}, sqlparser.NewTestParser()) require.NoError(t, err) - _, err = newTabletEnvironment(ddl, defaultTestOpts()) + _, err = newTabletEnvironment(ddl, defaultTestOpts(), collations.MySQL8()) require.Error(t, err, "check your schema, table[t2] doesn't exist") } diff --git a/go/vt/vtgate/autocommit_test.go b/go/vt/vtgate/autocommit_test.go index fa63695bfbd..1ba99c01ef2 100644 --- a/go/vt/vtgate/autocommit_test.go +++ b/go/vt/vtgate/autocommit_test.go @@ -185,8 +185,10 @@ func TestAutocommitDeleteIn(t *testing.T) { require.NoError(t, err) assertQueries(t, sbc1, []*querypb.BoundQuery{{ - Sql: "delete from user_extra where user_id in (1, 2)", - BindVariables: map[string]*querypb.BindVariable{}, + Sql: "delete from user_extra where user_id in ::__vals", + BindVariables: map[string]*querypb.BindVariable{ + "__vals": sqltypes.TestBindVariable([]any{int64(1), int64(2)}), + }, }}) testCommitCount(t, "sbc1", sbc1, 0) @@ -391,11 +393,12 @@ func TestAutocommitTransactionStarted(t *testing.T) { // multi shard query - savepoint needed sql = "update `user` set a = 2 where id in (1, 4)" + expectedSql := "update `user` set a = 2 where id in ::__vals" _, err = executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(session), sql, map[string]*querypb.BindVariable{}) require.NoError(t, err) require.Len(t, sbc1.Queries, 2) require.Contains(t, sbc1.Queries[0].Sql, "savepoint") - require.Equal(t, sql, sbc1.Queries[1].Sql) + require.Equal(t, expectedSql, sbc1.Queries[1].Sql) testCommitCount(t, "sbc1", sbc1, 0) } diff --git a/go/vt/vtgate/buffer/buffer.go b/go/vt/vtgate/buffer/buffer.go index 622bb03b082..260fb272544 100644 --- a/go/vt/vtgate/buffer/buffer.go +++ b/go/vt/vtgate/buffer/buffer.go @@ -164,6 +164,10 @@ func New(cfg *Config) *Buffer { } } +func (b *Buffer) GetConfig() *Config { + return b.config +} + // WaitForFailoverEnd blocks until a pending buffering due to a failover for // keyspace/shard is over. // If there is no ongoing failover, "err" is checked. If it's caused by a diff --git a/go/vt/vtgate/buffer/buffer_test.go b/go/vt/vtgate/buffer/buffer_test.go index 7f32364d57f..c730a8336d1 100644 --- a/go/vt/vtgate/buffer/buffer_test.go +++ b/go/vt/vtgate/buffer/buffer_test.go @@ -20,9 +20,13 @@ import ( "context" "fmt" "strings" + "sync" + "sync/atomic" "testing" "time" + "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" @@ -69,10 +73,18 @@ var ( ) func TestBuffering(t *testing.T) { - testAllImplementations(t, testBuffering1) + testAllImplementations(t, func(t *testing.T, fail failover) { + testBuffering1WithOptions(t, fail, 1) + }) +} + +func TestBufferingConcurrent(t *testing.T) { + testAllImplementations(t, func(t *testing.T, fail failover) { + testBuffering1WithOptions(t, fail, 2) + }) } -func testBuffering1(t *testing.T, fail failover) { +func testBuffering1WithOptions(t *testing.T, fail failover, concurrency int) { resetVariables() defer checkVariables(t) @@ -86,6 +98,7 @@ func testBuffering1(t *testing.T, fail failover) { topoproto.KeyspaceShardString(keyspace, shard): true, } cfg.now = func() time.Time { return now } + cfg.DrainConcurrency = concurrency b := New(cfg) @@ -782,3 +795,66 @@ func testShutdown1(t *testing.T, fail failover) { t.Fatal(err) } } + +func TestParallelRangeIndex(t *testing.T) { + suite := []struct { + max int + concurrency int + calls []int + }{ + { + max: 0, + concurrency: 0, + calls: []int{}, + }, + { + max: 100, + concurrency: 0, + calls: []int{}, + }, + { + max: 9, + concurrency: 3, + calls: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + { + max: 0, + concurrency: 10, + calls: []int{0}, + }, + { + max: 9, + concurrency: 9, + calls: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + } + + for idx, tc := range suite { + name := fmt.Sprintf("%d_max%d_concurrency%d", idx, tc.max, tc.concurrency) + t.Run(name, func(t *testing.T) { + var mu sync.Mutex + var wg sync.WaitGroup + var counter atomic.Int64 + + wg.Add(tc.concurrency) + var got []int + for i := 0; i < tc.concurrency; i++ { + go func() { + defer wg.Done() + for { + idx, ok := parallelRangeIndex(&counter, tc.max) + if !ok { + break + } + + mu.Lock() + got = append(got, idx) + mu.Unlock() + } + }() + } + wg.Wait() + assert.ElementsMatch(t, got, tc.calls, "must call passed function with matching indexes") + }) + } +} diff --git a/go/vt/vtgate/buffer/flags.go b/go/vt/vtgate/buffer/flags.go index a17cc09ccc3..01a3c33e869 100644 --- a/go/vt/vtgate/buffer/flags.go +++ b/go/vt/vtgate/buffer/flags.go @@ -70,6 +70,9 @@ func verifyFlags() error { if bufferSize < 1 { return fmt.Errorf("--buffer_size must be >= 1 (specified value: %d)", bufferSize) } + if bufferMinTimeBetweenFailovers < 1*time.Second { + return fmt.Errorf("--buffer_min_time_between_failovers must be >= 1s (specified value: %v)", bufferMinTimeBetweenFailovers) + } if bufferDrainConcurrency < 1 { return fmt.Errorf("--buffer_drain_concurrency must be >= 1 (specified value: %d)", bufferDrainConcurrency) @@ -162,16 +165,6 @@ func NewDefaultConfig() *Config { } } -// EnableBuffering is used in tests where we require the keyspace event watcher to be created -func EnableBuffering() { - bufferEnabled = true -} - -// DisableBuffering is the counterpart of EnableBuffering -func DisableBuffering() { - bufferEnabled = false -} - func NewConfigFromFlags() *Config { if err := verifyFlags(); err != nil { log.Fatalf("Invalid buffer configuration: %v", err) diff --git a/go/vt/vtgate/buffer/shard_buffer.go b/go/vt/vtgate/buffer/shard_buffer.go index ae33aabb399..b0764c2ad91 100644 --- a/go/vt/vtgate/buffer/shard_buffer.go +++ b/go/vt/vtgate/buffer/shard_buffer.go @@ -21,17 +21,17 @@ import ( "fmt" "runtime/debug" "sync" + "sync/atomic" "time" "vitess.io/vitess/go/vt/discovery" - - "vitess.io/vitess/go/vt/vtgate/errorsanitizer" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/errorsanitizer" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // bufferState represents the different states a shardBuffer object can be in. @@ -561,6 +561,18 @@ func (sb *shardBuffer) stopBufferingLocked(reason stopReason, details string) { go sb.drain(q, clientEntryError) } +// parallelRangeIndex uses counter to return a unique idx value up to the +// passed max and ok will be set to false if the counter exceeds the max +func parallelRangeIndex(counter *atomic.Int64, max int) (idx int, ok bool) { + next := counter.Add(1) + if next-1 > int64(max) { + return -1, false + } + // if this is a 32-bit platform, max won't exceed the 32-bit integer limit + // so a cast from a too-large 64-bit int to a 32-bit int will never happen + return int(next) - 1, true +} + func (sb *shardBuffer) drain(q []*entry, err error) { defer sb.wg.Done() @@ -569,10 +581,32 @@ func (sb *shardBuffer) drain(q []*entry, err error) { sb.timeoutThread.stop() start := sb.timeNow() - // TODO(mberlin): Parallelize the drain by pumping the data through a channel. - for _, e := range q { - sb.unblockAndWait(e, err, true /* releaseSlot */, true /* blockingWait */) + + entryCount := len(q) + parallelism := min(sb.buf.config.DrainConcurrency, entryCount) + + var wg sync.WaitGroup + var rangeCounter atomic.Int64 + + wg.Add(parallelism) + for i := 0; i < parallelism; i++ { + go func() { + defer wg.Done() + for { + idx, ok := parallelRangeIndex(&rangeCounter, entryCount-1) + if !ok { + break + } + // Shared access to the q slice is concurrency-safe because each goroutine receives + // a unique set of slice indices from parallelRangeIndex above and the slice remains + // immutable for the lifetime of this operation. + sb.unblockAndWait(q[idx], err, true /* releaseSlot */, true /* blockingWait */) + } + }() } + + wg.Wait() + d := sb.timeNow().Sub(start) log.Infof("Draining finished for shard: %s Took: %v for: %d requests.", topoproto.KeyspaceShardString(sb.keyspace, sb.shard), d, len(q)) requestsDrained.Add(sb.statsKey, int64(len(q))) diff --git a/go/vt/vtgate/endtoend/oltp_test.go b/go/vt/vtgate/endtoend/oltp_test.go index f8ca646f8c7..02e9e2c92a7 100644 --- a/go/vt/vtgate/endtoend/oltp_test.go +++ b/go/vt/vtgate/endtoend/oltp_test.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "fmt" - "math/rand" + "math/rand/v2" "sync" "testing" @@ -21,12 +21,12 @@ const cValueTemplate = "###########-###########-###########-" + const padValueTemplate = "###########-###########-###########-" + "###########-###########" -func sysbenchRandom(rng *rand.Rand, template string) []byte { +func sysbenchRandom(template string) []byte { out := make([]byte, 0, len(template)) for i := range template { switch template[i] { case '#': - out = append(out, '0'+byte(rng.Intn(10))) + out = append(out, '0'+byte(rand.IntN(10))) default: out = append(out, template[i]) } @@ -40,8 +40,6 @@ func BenchmarkOLTP(b *testing.B) { const MaxRows = 10000 const RangeSize = 100 - rng := rand.New(rand.NewSource(1234)) - ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) if err != nil { @@ -62,7 +60,7 @@ func BenchmarkOLTP(b *testing.B) { if j > 0 { query.WriteString(", ") } - _, _ = fmt.Fprintf(&query, "(%d, %d, '%s', '%s')", rows, rng.Int31n(0xFFFF), sysbenchRandom(rng, cValueTemplate), sysbenchRandom(rng, padValueTemplate)) + _, _ = fmt.Fprintf(&query, "(%d, %d, '%s', '%s')", rows, rand.Int32N(0xFFFF), sysbenchRandom(cValueTemplate), sysbenchRandom(padValueTemplate)) rows++ } @@ -77,10 +75,10 @@ func BenchmarkOLTP(b *testing.B) { b.Run("SimpleRanges", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - id := rng.Intn(MaxRows) + id := rand.IntN(MaxRows) query.Reset() - _, _ = fmt.Fprintf(&query, "SELECT c FROM oltp_test WHERE id BETWEEN %d AND %d", id, id+rng.Intn(RangeSize)-1) + _, _ = fmt.Fprintf(&query, "SELECT c FROM oltp_test WHERE id BETWEEN %d AND %d", id, id+rand.IntN(RangeSize)-1) _, err := conn.ExecuteFetch(query.String(), 1000, false) if err != nil { b.Error(err) @@ -91,10 +89,10 @@ func BenchmarkOLTP(b *testing.B) { b.Run("SumRanges", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - id := rng.Intn(MaxRows) + id := rand.IntN(MaxRows) query.Reset() - _, _ = fmt.Fprintf(&query, "SELECT SUM(k) FROM oltp_test WHERE id BETWEEN %d AND %d", id, id+rng.Intn(RangeSize)-1) + _, _ = fmt.Fprintf(&query, "SELECT SUM(k) FROM oltp_test WHERE id BETWEEN %d AND %d", id, id+rand.IntN(RangeSize)-1) _, err := conn.ExecuteFetch(query.String(), 1000, false) if err != nil { b.Error(err) @@ -105,10 +103,10 @@ func BenchmarkOLTP(b *testing.B) { b.Run("OrderRanges", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - id := rng.Intn(MaxRows) + id := rand.IntN(MaxRows) query.Reset() - _, _ = fmt.Fprintf(&query, "SELECT c FROM oltp_test WHERE id BETWEEN %d AND %d ORDER BY c", id, id+rng.Intn(RangeSize)-1) + _, _ = fmt.Fprintf(&query, "SELECT c FROM oltp_test WHERE id BETWEEN %d AND %d ORDER BY c", id, id+rand.IntN(RangeSize)-1) _, err := conn.ExecuteFetch(query.String(), 1000, false) if err != nil { b.Error(err) @@ -119,10 +117,10 @@ func BenchmarkOLTP(b *testing.B) { b.Run("DistinctRanges", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - id := rng.Intn(MaxRows) + id := rand.IntN(MaxRows) query.Reset() - _, _ = fmt.Fprintf(&query, "SELECT DISTINCT c FROM oltp_test WHERE id BETWEEN %d AND %d ORDER BY c", id, id+rng.Intn(RangeSize)-1) + _, _ = fmt.Fprintf(&query, "SELECT DISTINCT c FROM oltp_test WHERE id BETWEEN %d AND %d ORDER BY c", id, id+rand.IntN(RangeSize)-1) _, err := conn.ExecuteFetch(query.String(), 1000, false) if err != nil { b.Error(err) diff --git a/go/vt/vtgate/endtoend/vstream_test.go b/go/vt/vtgate/endtoend/vstream_test.go index 42dd6e3d2a3..246d17f88b5 100644 --- a/go/vt/vtgate/endtoend/vstream_test.go +++ b/go/vt/vtgate/endtoend/vstream_test.go @@ -493,7 +493,7 @@ func TestVStreamCopyResume(t *testing.T) { // Also, to ensure that the client can resume properly, make sure that // the Fields value is present in the sqltypes.Result field and not missing. // It's not guaranteed that BOTH shards have streamed a row yet as the order - // of events in the stream is non-determinstic. So we check to be sure that + // of events in the stream is non-deterministic. So we check to be sure that // at least one shard has copied rows and thus has a full TableLastPK proto // message. eventStr := ev.String() @@ -617,9 +617,9 @@ func TestVStreamSharded(t *testing.T) { received bool } expectedEvents := []*expectedEvent{ - {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"-80"}`, false}, + {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_-80" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"-80" enum_set_string_values:true}`, false}, {`type:ROW row_event:{table_name:"ks.t1_sharded" row_changes:{after:{lengths:1 lengths:1 values:"11"}} keyspace:"ks" shard:"-80"}`, false}, - {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"80-"}`, false}, + {`type:FIELD field_event:{table_name:"ks.t1_sharded" fields:{name:"id1" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id1" column_length:20 charset:63 flags:53251 column_type:"bigint(20)"} fields:{name:"id2" type:INT64 table:"t1_sharded" org_table:"t1_sharded" database:"vt_ks_80-" org_name:"id2" column_length:20 charset:63 flags:32768 column_type:"bigint(20)"} keyspace:"ks" shard:"80-" enum_set_string_values:true}`, false}, {`type:ROW row_event:{table_name:"ks.t1_sharded" row_changes:{after:{lengths:1 lengths:1 values:"44"}} keyspace:"ks" shard:"80-"}`, false}, } for { diff --git a/go/vt/vtgate/engine/aggregations.go b/go/vt/vtgate/engine/aggregations.go index dd7a259d1b6..4673a2717e5 100644 --- a/go/vt/vtgate/engine/aggregations.go +++ b/go/vt/vtgate/engine/aggregations.go @@ -49,15 +49,17 @@ type AggregateParams struct { // This is based on the function passed in the select expression and // not what we use to aggregate at the engine primitive level. OrigOpcode AggregateOpcode + + CollationEnv *collations.Environment } -func NewAggregateParam(opcode AggregateOpcode, col int, alias string) *AggregateParams { +func NewAggregateParam(opcode AggregateOpcode, col int, alias string, collationEnv *collations.Environment) *AggregateParams { out := &AggregateParams{ - Opcode: opcode, - Col: col, - Alias: alias, - WCol: -1, - Type: evalengine.UnknownType(), + Opcode: opcode, + Col: col, + Alias: alias, + WCol: -1, + CollationEnv: collationEnv, } if opcode.NeedsComparableValues() { out.KeyCol = col @@ -74,8 +76,8 @@ func (ap *AggregateParams) String() string { if ap.WAssigned() { keyCol = fmt.Sprintf("%s|%d", keyCol, ap.WCol) } - if sqltypes.IsText(ap.Type.Type) && ap.Type.Coll != collations.Unknown { - keyCol += " COLLATE " + collations.Local().LookupName(ap.Type.Coll) + if sqltypes.IsText(ap.Type.Type()) && ap.CollationEnv.IsSupported(ap.Type.Collation()) { + keyCol += " COLLATE " + ap.CollationEnv.LookupName(ap.Type.Collation()) } dispOrigOp := "" if ap.OrigOpcode != AggregateUnassigned && ap.OrigOpcode != ap.Opcode { @@ -89,9 +91,9 @@ func (ap *AggregateParams) String() string { func (ap *AggregateParams) typ(inputType querypb.Type) querypb.Type { if ap.OrigOpcode != AggregateUnassigned { - return ap.OrigOpcode.Type(inputType) + return ap.OrigOpcode.SQLType(inputType) } - return ap.Opcode.Type(inputType) + return ap.Opcode.SQLType(inputType) } type aggregator interface { @@ -101,9 +103,11 @@ type aggregator interface { } type aggregatorDistinct struct { - column int - last sqltypes.Value - coll collations.ID + column int + last sqltypes.Value + coll collations.ID + collationEnv *collations.Environment + values *evalengine.EnumSetValues } func (a *aggregatorDistinct) shouldReturn(row []sqltypes.Value) (bool, error) { @@ -112,7 +116,7 @@ func (a *aggregatorDistinct) shouldReturn(row []sqltypes.Value) (bool, error) { next := row[a.column] if !last.IsNull() { if last.TinyWeightCmp(next) == 0 { - cmp, err := evalengine.NullsafeCompare(last, next, a.coll) + cmp, err := evalengine.NullsafeCompare(last, next, a.collationEnv, a.coll, a.values) if err != nil { return true, err } @@ -380,8 +384,10 @@ func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (agg ag = &aggregatorCount{ from: aggr.Col, distinct: aggregatorDistinct{ - column: distinct, - coll: aggr.Type.Coll, + column: distinct, + coll: aggr.Type.Collation(), + collationEnv: aggr.CollationEnv, + values: aggr.Type.Values(), }, } @@ -398,8 +404,10 @@ func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (agg from: aggr.Col, sum: sum, distinct: aggregatorDistinct{ - column: distinct, - coll: aggr.Type.Coll, + column: distinct, + coll: aggr.Type.Collation(), + collationEnv: aggr.CollationEnv, + values: aggr.Type.Values(), }, } @@ -407,7 +415,7 @@ func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (agg ag = &aggregatorMin{ aggregatorMinMax{ from: aggr.Col, - minmax: evalengine.NewAggregationMinMax(sourceType, aggr.Type.Coll), + minmax: evalengine.NewAggregationMinMax(sourceType, aggr.CollationEnv, aggr.Type.Collation(), aggr.Type.Values()), }, } @@ -415,7 +423,7 @@ func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (agg ag = &aggregatorMax{ aggregatorMinMax{ from: aggr.Col, - minmax: evalengine.NewAggregationMinMax(sourceType, aggr.Type.Coll), + minmax: evalengine.NewAggregationMinMax(sourceType, aggr.CollationEnv, aggr.Type.Collation(), aggr.Type.Values()), }, } diff --git a/go/vt/vtgate/engine/aggregations_test.go b/go/vt/vtgate/engine/aggregations_test.go index 55ec59f73e1..9facf3d4f6f 100644 --- a/go/vt/vtgate/engine/aggregations_test.go +++ b/go/vt/vtgate/engine/aggregations_test.go @@ -19,7 +19,7 @@ package engine import ( "context" "fmt" - "math/rand" + "math/rand/v2" "strings" "testing" @@ -58,7 +58,7 @@ func benchmarkName(fields []*querypb.Field) string { func BenchmarkScalarAggregate(b *testing.B) { var rand_i64 = sqltypes.RandomGenerators[sqltypes.Int64] var rand_i64small = func() sqltypes.Value { - return sqltypes.NewInt64(rand.Int63n(1024)) + return sqltypes.NewInt64(rand.Int64N(1024)) } var rand_f64 = sqltypes.RandomGenerators[sqltypes.Float64] var rand_dec = sqltypes.RandomGenerators[sqltypes.Decimal] @@ -142,7 +142,7 @@ func BenchmarkScalarAggregate(b *testing.B) { return sqltypes.NewVarChar(uuid.New().String()) }, func() sqltypes.Value { - return sqltypes.NewVarChar(fmt.Sprintf("%x-%x", rand.Intn(256), rand.Intn(256))) + return sqltypes.NewVarChar(fmt.Sprintf("%x-%x", rand.IntN(256), rand.IntN(256))) }, }, params: []*AggregateParams{ diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go index b70f83b192d..e65ff61a9f6 100644 --- a/go/vt/vtgate/engine/cached_size.go +++ b/go/vt/vtgate/engine/cached_size.go @@ -35,8 +35,10 @@ func (cached *AggregateParams) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(96) + size += int64(112) } + // field Type vitess.io/vitess/go/vt/vtgate/evalengine.Type + size += cached.Type.CachedSize(false) // field Alias string size += hack.RuntimeAllocSize(int64(len(cached.Alias))) // field Expr vitess.io/vitess/go/vt/sqlparser.Expr @@ -45,6 +47,8 @@ func (cached *AggregateParams) CachedSize(alloc bool) int64 { } // field Original *vitess.io/vitess/go/vt/sqlparser.AliasedExpr size += cached.Original.CachedSize(true) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *AlterVSchema) CachedSize(alloc bool) int64 { @@ -67,10 +71,14 @@ func (cached *CheckCol) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(48) } // field WsCol *int size += hack.RuntimeAllocSize(int64(8)) + // field Type vitess.io/vitess/go/vt/vtgate/evalengine.Type + size += cached.Type.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } @@ -145,7 +153,7 @@ func (cached *DML) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(128) + size += int64(144) } // field Query string size += hack.RuntimeAllocSize(int64(len(cached.Query))) @@ -173,13 +181,66 @@ func (cached *DML) CachedSize(alloc bool) int64 { size += cached.RoutingParameters.CachedSize(true) return size } + +//go:nocheckptr +func (cached *DMLWithInput) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(96) + } + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field DMLs []vitess.io/vitess/go/vt/vtgate/engine.Primitive + { + size += hack.RuntimeAllocSize(int64(cap(cached.DMLs)) * int64(16)) + for _, elem := range cached.DMLs { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + // field OutputCols [][]int + { + size += hack.RuntimeAllocSize(int64(cap(cached.OutputCols)) * int64(24)) + for _, elem := range cached.OutputCols { + { + size += hack.RuntimeAllocSize(int64(cap(elem)) * int64(8)) + } + } + } + // field BVList []map[string]int + { + size += hack.RuntimeAllocSize(int64(cap(cached.BVList)) * int64(8)) + for _, elem := range cached.BVList { + if elem != nil { + size += int64(48) + hmap := reflect.ValueOf(elem) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 208)) + if len(elem) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 208)) + } + for k := range elem { + size += hack.RuntimeAllocSize(int64(len(k))) + } + } + } + } + return size +} func (cached *Delete) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) } size := int64(0) if alloc { - size += int64(8) + size += int64(16) } // field DML *vitess.io/vitess/go/vt/vtgate/engine.DML size += cached.DML.CachedSize(true) @@ -199,7 +260,7 @@ func (cached *Distinct) CachedSize(alloc bool) int64 { } // field CheckCols []vitess.io/vitess/go/vt/vtgate/engine.CheckCol { - size += hack.RuntimeAllocSize(int64(cap(cached.CheckCols)) * int64(23)) + size += hack.RuntimeAllocSize(int64(cap(cached.CheckCols)) * int64(48)) for _, elem := range cached.CheckCols { size += elem.CachedSize(false) } @@ -280,7 +341,7 @@ func (cached *FkChild) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(64) + size += int64(80) } // field BVName string size += hack.RuntimeAllocSize(int64(len(cached.BVName))) @@ -288,6 +349,13 @@ func (cached *FkChild) CachedSize(alloc bool) int64 { { size += hack.RuntimeAllocSize(int64(cap(cached.Cols)) * int64(8)) } + // field NonLiteralInfo []vitess.io/vitess/go/vt/vtgate/engine.NonLiteralUpdateInfo + { + size += hack.RuntimeAllocSize(int64(cap(cached.NonLiteralInfo)) * int64(32)) + for _, elem := range cached.NonLiteralInfo { + size += elem.CachedSize(false) + } + } // field Exec vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Exec.(cachedObject); ok { size += cc.CachedSize(true) @@ -339,12 +407,16 @@ func (cached *GroupByParams) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(80) } // field Expr vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Expr.(cachedObject); ok { size += cc.CachedSize(true) } + // field Type vitess.io/vitess/go/vt/vtgate/evalengine.Type + size += cached.Type.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *HashJoin) CachedSize(alloc bool) int64 { @@ -353,7 +425,7 @@ func (cached *HashJoin) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(112) + size += int64(128) } // field Left vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Left.(cachedObject); ok { @@ -371,6 +443,16 @@ func (cached *HashJoin) CachedSize(alloc bool) int64 { if cc, ok := cached.ASTPred.(cachedObject); ok { size += cc.CachedSize(true) } + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) + // field Values *vitess.io/vitess/go/vt/vtgate/evalengine.EnumSetValues + if cached.Values != nil { + size += int64(24) + size += hack.RuntimeAllocSize(int64(cap(*cached.Values)) * int64(16)) + for _, elem := range *cached.Values { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *Insert) CachedSize(alloc bool) int64 { @@ -379,10 +461,10 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(240) + size += int64(224) } - // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace - size += cached.Keyspace.CachedSize(true) + // field InsertCommon vitess.io/vitess/go/vt/vtgate/engine.InsertCommon + size += cached.InsertCommon.CachedSize(false) // field Query string size += hack.RuntimeAllocSize(int64(len(cached.Query))) // field VindexValues [][][]vitess.io/vitess/go/vt/vtgate/evalengine.Expr @@ -404,19 +486,6 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } } } - // field ColVindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex - { - size += hack.RuntimeAllocSize(int64(cap(cached.ColVindexes)) * int64(8)) - for _, elem := range cached.ColVindexes { - size += elem.CachedSize(true) - } - } - // field TableName string - size += hack.RuntimeAllocSize(int64(len(cached.TableName))) - // field Generate *vitess.io/vitess/go/vt/vtgate/engine.Generate - size += cached.Generate.CachedSize(true) - // field Prefix string - size += hack.RuntimeAllocSize(int64(len(cached.Prefix))) // field Mid vitess.io/vitess/go/vt/sqlparser.Values { size += hack.RuntimeAllocSize(int64(cap(cached.Mid)) * int64(24)) @@ -431,8 +500,56 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } } } - // field Suffix string - size += hack.RuntimeAllocSize(int64(len(cached.Suffix))) + // field Alias string + size += hack.RuntimeAllocSize(int64(len(cached.Alias))) + return size +} +func (cached *InsertCommon) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(144) + } + // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace + size += cached.Keyspace.CachedSize(true) + // field TableName string + size += hack.RuntimeAllocSize(int64(len(cached.TableName))) + // field Generate *vitess.io/vitess/go/vt/vtgate/engine.Generate + size += cached.Generate.CachedSize(true) + // field ColVindexes []*vitess.io/vitess/go/vt/vtgate/vindexes.ColumnVindex + { + size += hack.RuntimeAllocSize(int64(cap(cached.ColVindexes)) * int64(8)) + for _, elem := range cached.ColVindexes { + size += elem.CachedSize(true) + } + } + // field Prefix string + size += hack.RuntimeAllocSize(int64(len(cached.Prefix))) + // field Suffix vitess.io/vitess/go/vt/sqlparser.OnDup + { + size += hack.RuntimeAllocSize(int64(cap(cached.Suffix)) * int64(8)) + for _, elem := range cached.Suffix { + size += elem.CachedSize(true) + } + } + return size +} +func (cached *InsertSelect) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(176) + } + // field InsertCommon vitess.io/vitess/go/vt/vtgate/engine.InsertCommon + size += cached.InsertCommon.CachedSize(false) + // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Input.(cachedObject); ok { + size += cc.CachedSize(true) + } // field VindexValueOffset [][]int { size += hack.RuntimeAllocSize(int64(cap(cached.VindexValueOffset)) * int64(24)) @@ -442,10 +559,6 @@ func (cached *Insert) CachedSize(alloc bool) int64 { } } } - // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive - if cc, ok := cached.Input.(cachedObject); ok { - size += cc.CachedSize(true) - } return size } @@ -581,7 +694,10 @@ func (cached *MemorySort) CachedSize(alloc bool) int64 { } // field OrderBy vitess.io/vitess/go/vt/vtgate/evalengine.Comparison { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(27)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(56)) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(false) + } } // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Input.(cachedObject); ok { @@ -608,8 +724,23 @@ func (cached *MergeSort) CachedSize(alloc bool) int64 { } // field OrderBy vitess.io/vitess/go/vt/vtgate/evalengine.Comparison { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(27)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(56)) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(false) + } + } + return size +} +func (cached *NonLiteralUpdateInfo) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) } + size := int64(0) + if alloc { + size += int64(32) + } + // field UpdateExprBvName string + size += hack.RuntimeAllocSize(int64(len(cached.UpdateExprBvName))) return size } func (cached *OnlineDDL) CachedSize(alloc bool) int64 { @@ -801,7 +932,10 @@ func (cached *Route) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.FieldQuery))) // field OrderBy vitess.io/vitess/go/vt/vtgate/evalengine.Comparison { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(27)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(56)) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(false) + } } // field RoutingParameters *vitess.io/vitess/go/vt/vtgate/engine.RoutingParameters size += cached.RoutingParameters.CachedSize(true) @@ -940,7 +1074,7 @@ func (cached *SemiJoin) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(64) + size += int64(48) } // field Left vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Left.(cachedObject); ok { @@ -950,10 +1084,6 @@ func (cached *SemiJoin) CachedSize(alloc bool) int64 { if cc, ok := cached.Right.(cachedObject); ok { size += cc.CachedSize(true) } - // field Cols []int - { - size += hack.RuntimeAllocSize(int64(cap(cached.Cols)) * int64(8)) - } // field Vars map[string]int if cached.Vars != nil { size += int64(48) @@ -976,7 +1106,7 @@ func (cached *Send) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(64) } // field Keyspace *vitess.io/vitess/go/vt/vtgate/vindexes.Keyspace size += cached.Keyspace.CachedSize(true) @@ -988,6 +1118,25 @@ func (cached *Send) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.Query))) return size } +func (cached *Sequential) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Sources []vitess.io/vitess/go/vt/vtgate/engine.Primitive + { + size += hack.RuntimeAllocSize(int64(cap(cached.Sources)) * int64(16)) + for _, elem := range cached.Sources { + if cc, ok := elem.(cachedObject); ok { + size += cc.CachedSize(true) + } + } + } + return size +} func (cached *SessionPrimitive) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1041,12 +1190,19 @@ func (cached *SimpleProjection) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(64) } // field Cols []int { size += hack.RuntimeAllocSize(int64(cap(cached.Cols)) * int64(8)) } + // field ColNames []string + { + size += hack.RuntimeAllocSize(int64(cap(cached.ColNames)) * int64(16)) + for _, elem := range cached.ColNames { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Input.(cachedObject); ok { size += cc.CachedSize(true) @@ -1200,6 +1356,23 @@ func (cached *UpdateTarget) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.Target))) return size } +func (cached *Upsert) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field Upserts []vitess.io/vitess/go/vt/vtgate/engine.upsert + { + size += hack.RuntimeAllocSize(int64(cap(cached.Upserts)) * int64(32)) + for _, elem := range cached.Upserts { + size += elem.CachedSize(false) + } + } + return size +} func (cached *UserDefinedVariable) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1411,3 +1584,21 @@ func (cached *shardRoute) CachedSize(alloc bool) int64 { } return size } +func (cached *upsert) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Insert vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Insert.(cachedObject); ok { + size += cc.CachedSize(true) + } + // field Update vitess.io/vitess/go/vt/vtgate/engine.Primitive + if cc, ok := cached.Update.(cachedObject); ok { + size += cc.CachedSize(true) + } + return size +} diff --git a/go/vt/vtgate/engine/concatenate.go b/go/vt/vtgate/engine/concatenate.go index 1e8cb655547..13727124e78 100644 --- a/go/vt/vtgate/engine/concatenate.go +++ b/go/vt/vtgate/engine/concatenate.go @@ -96,16 +96,16 @@ func (c *Concatenate) TryExecute(ctx context.Context, vcursor VCursor, bindVars return nil, err } - fields, err := c.getFields(res) + fields, fieldTypes, err := c.getFieldTypes(vcursor, res) if err != nil { return nil, err } var rows [][]sqltypes.Value - err = c.coerceAndVisitResults(res, fields, func(result *sqltypes.Result) error { + err = c.coerceAndVisitResults(res, fieldTypes, func(result *sqltypes.Result) error { rows = append(rows, result.Rows...) return nil - }) + }, evalengine.ParseSQLMode(vcursor.SQLMode())) if err != nil { return nil, err } @@ -116,8 +116,8 @@ func (c *Concatenate) TryExecute(ctx context.Context, vcursor VCursor, bindVars }, nil } -func (c *Concatenate) coerceValuesTo(row sqltypes.Row, fields []*querypb.Field) error { - if len(row) != len(fields) { +func (c *Concatenate) coerceValuesTo(row sqltypes.Row, fieldTypes []evalengine.Type, sqlmode evalengine.SQLMode) error { + if len(row) != len(fieldTypes) { return errWrongNumberOfColumnsInSelect } @@ -125,8 +125,8 @@ func (c *Concatenate) coerceValuesTo(row sqltypes.Row, fields []*querypb.Field) if _, found := c.NoNeedToTypeCheck[i]; found { continue } - if fields[i].Type != value.Type() { - newValue, err := evalengine.CoerceTo(value, fields[i].Type) + if fieldTypes[i].Type() != value.Type() { + newValue, err := evalengine.CoerceTo(value, fieldTypes[i], sqlmode) if err != nil { return err } @@ -136,44 +136,44 @@ func (c *Concatenate) coerceValuesTo(row sqltypes.Row, fields []*querypb.Field) return nil } -func (c *Concatenate) getFields(res []*sqltypes.Result) (resultFields []*querypb.Field, err error) { +func (c *Concatenate) getFieldTypes(vcursor VCursor, res []*sqltypes.Result) ([]*querypb.Field, []evalengine.Type, error) { if len(res) == 0 { - return nil, nil + return nil, nil, nil } - resultFields = res[0].Fields - columns := make([][]sqltypes.Type, len(resultFields)) - - addFields := func(fields []*querypb.Field) error { - if len(fields) != len(columns) { - return errWrongNumberOfColumnsInSelect - } - for idx, field := range fields { - columns[idx] = append(columns[idx], field.Type) - } - return nil - } + typers := make([]evalengine.TypeAggregator, len(res[0].Fields)) + collations := vcursor.Environment().CollationEnv() for _, r := range res { if r == nil || r.Fields == nil { continue } - err := addFields(r.Fields) - if err != nil { - return nil, err + if len(r.Fields) != len(typers) { + return nil, nil, errWrongNumberOfColumnsInSelect + } + for idx, field := range r.Fields { + if err := typers[idx].AddField(field, collations); err != nil { + return nil, nil, err + } } } - // The resulting column types need to be the coercion of all the input columns - for colIdx, t := range columns { + fields := make([]*querypb.Field, 0, len(typers)) + types := make([]evalengine.Type, 0, len(typers)) + for colIdx, typer := range typers { + f := res[0].Fields[colIdx] + if _, found := c.NoNeedToTypeCheck[colIdx]; found { + fields = append(fields, f) + types = append(types, evalengine.NewTypeFromField(f)) continue } - resultFields[colIdx].Type = evalengine.AggregateTypes(t) + t := typer.Type() + fields = append(fields, t.ToField(f.Name)) + types = append(types, t) } - - return resultFields, nil + return fields, types, nil } func (c *Concatenate) execSources(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) ([]*sqltypes.Result, error) { @@ -228,16 +228,17 @@ func (c *Concatenate) sequentialExec(ctx context.Context, vcursor VCursor, bindV // TryStreamExecute performs a streaming exec. func (c *Concatenate) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool, callback func(*sqltypes.Result) error) error { + sqlmode := evalengine.ParseSQLMode(vcursor.SQLMode()) if vcursor.Session().InTransaction() { // as we are in a transaction, we need to execute all queries inside a single connection, // which holds the single transaction we have - return c.sequentialStreamExec(ctx, vcursor, bindVars, callback) + return c.sequentialStreamExec(ctx, vcursor, bindVars, callback, sqlmode) } // not in transaction, so execute in parallel. - return c.parallelStreamExec(ctx, vcursor, bindVars, callback) + return c.parallelStreamExec(ctx, vcursor, bindVars, callback, sqlmode) } -func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, in func(*sqltypes.Result) error) error { +func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, in func(*sqltypes.Result) error, sqlmode evalengine.SQLMode) error { // Scoped context; any early exit triggers cancel() to clean up ongoing work. ctx, cancel := context.WithCancel(inCtx) defer cancel() @@ -249,7 +250,7 @@ func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, condFields = sync.NewCond(&muFields) // Condition var for field arrival wg errgroup.Group // Wait group for all streaming goroutines rest = make([]*sqltypes.Result, len(c.Sources)) // Collects first result from each source to derive fields - fields []*querypb.Field // Cached final field types + fieldTypes []evalengine.Type // Cached final field types ) // Process each result chunk, considering type coercion. @@ -262,7 +263,7 @@ func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, needsCoercion := false for idx, field := range rest[srcIdx].Fields { _, skip := c.NoNeedToTypeCheck[idx] - if !skip && fields[idx].Type != field.Type { + if !skip && fieldTypes[idx].Type() != field.Type { needsCoercion = true break } @@ -271,7 +272,7 @@ func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, // Apply type coercion if needed. if needsCoercion { for _, row := range res.Rows { - if err := c.coerceValuesTo(row, fields); err != nil { + if err := c.coerceValuesTo(row, fieldTypes, sqlmode); err != nil { return err } } @@ -284,37 +285,35 @@ func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, currIndex, currSource := i, source wg.Go(func() error { err := vcursor.StreamExecutePrimitive(ctx, currSource, bindVars, true, func(resultChunk *sqltypes.Result) error { - // Process fields when they arrive; coordinate field agreement across sources. - if resultChunk.Fields != nil { - muFields.Lock() + muFields.Lock() + // Process fields when they arrive; coordinate field agreement across sources. + if resultChunk.Fields != nil && rest[currIndex] == nil { // Capture the initial result chunk to determine field types later. - if rest[currIndex] == nil { - rest[currIndex] = resultChunk - - // If this was the last source to report its fields, derive the final output fields. - if !slices.Contains(rest, nil) { + rest[currIndex] = resultChunk + + // If this was the last source to report its fields, derive the final output fields. + if !slices.Contains(rest, nil) { + // We have received fields from all sources. We can now calculate the output types + var err error + resultChunk.Fields, fieldTypes, err = c.getFieldTypes(vcursor, rest) + if err != nil { muFields.Unlock() - - // We have received fields from all sources. We can now calculate the output types - var err error - fields, err = c.getFields(rest) - if err != nil { - return err - } - resultChunk.Fields = fields - - defer condFields.Broadcast() - return callback(resultChunk, currIndex) + return err } + + muFields.Unlock() + defer condFields.Broadcast() + return callback(resultChunk, currIndex) } - // Wait for fields from all sources. - for slices.Contains(rest, nil) { - condFields.Wait() - } - muFields.Unlock() } + // Wait for fields from all sources. + for slices.Contains(rest, nil) { + condFields.Wait() + } + muFields.Unlock() + // Context check to avoid extra work. if ctx.Err() != nil { return nil @@ -340,7 +339,7 @@ func (c *Concatenate) parallelStreamExec(inCtx context.Context, vcursor VCursor, return wg.Wait() } -func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) error { +func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error, sqlmode evalengine.SQLMode) error { // all the below fields ensure that the fields are sent only once. results := make([][]*sqltypes.Result, len(c.Sources)) @@ -369,12 +368,12 @@ func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, firsts[i] = result[0] } - fields, err := c.getFields(firsts) + _, fieldTypes, err := c.getFieldTypes(vcursor, firsts) if err != nil { return err } for _, res := range results { - if err = c.coerceAndVisitResults(res, fields, callback); err != nil { + if err = c.coerceAndVisitResults(res, fieldTypes, callback, sqlmode); err != nil { return err } } @@ -384,25 +383,26 @@ func (c *Concatenate) sequentialStreamExec(ctx context.Context, vcursor VCursor, func (c *Concatenate) coerceAndVisitResults( res []*sqltypes.Result, - fields []*querypb.Field, + fieldTypes []evalengine.Type, callback func(*sqltypes.Result) error, + sqlmode evalengine.SQLMode, ) error { for _, r := range res { if len(r.Rows) > 0 && - len(fields) != len(r.Rows[0]) { + len(fieldTypes) != len(r.Rows[0]) { return errWrongNumberOfColumnsInSelect } needsCoercion := false for idx, field := range r.Fields { - if fields[idx].Type != field.Type { + if fieldTypes[idx].Type() != field.Type { needsCoercion = true break } } if needsCoercion { for _, row := range r.Rows { - err := c.coerceValuesTo(row, fields) + err := c.coerceValuesTo(row, fieldTypes, sqlmode) if err != nil { return err } @@ -418,35 +418,29 @@ func (c *Concatenate) coerceAndVisitResults( // GetFields fetches the field info. func (c *Concatenate) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - res, err := c.Sources[0].GetFields(ctx, vcursor, bindVars) - if err != nil { - return nil, err - } - - columns := make([][]sqltypes.Type, len(res.Fields)) - - addFields := func(fields []*querypb.Field) { - for idx, field := range fields { - columns[idx] = append(columns[idx], field.Type) - } - } - - addFields(res.Fields) - - for i := 1; i < len(c.Sources); i++ { - result, err := c.Sources[i].GetFields(ctx, vcursor, bindVars) + sourceFields := make([][]*querypb.Field, 0, len(c.Sources)) + for _, src := range c.Sources { + f, err := src.GetFields(ctx, vcursor, bindVars) if err != nil { return nil, err } - addFields(result.Fields) + sourceFields = append(sourceFields, f.Fields) } - // The resulting column types need to be the coercion of all the input columns - for colIdx, t := range columns { - res.Fields[colIdx].Type = evalengine.AggregateTypes(t) - } + fields := make([]*querypb.Field, 0, len(sourceFields[0])) + collations := vcursor.Environment().CollationEnv() - return res, nil + for colIdx := 0; colIdx < len(sourceFields[0]); colIdx++ { + var typer evalengine.TypeAggregator + for _, src := range sourceFields { + if err := typer.AddField(src[colIdx], collations); err != nil { + return nil, err + } + } + name := sourceFields[0][colIdx].Name + fields = append(fields, typer.Field(name)) + } + return &sqltypes.Result{Fields: fields}, nil } // NeedsTransaction returns whether a transaction is needed for this primitive diff --git a/go/vt/vtgate/engine/concatenate_test.go b/go/vt/vtgate/engine/concatenate_test.go index b886d1312af..dd2b1300e9b 100644 --- a/go/vt/vtgate/engine/concatenate_test.go +++ b/go/vt/vtgate/engine/concatenate_test.go @@ -23,6 +23,7 @@ import ( "strings" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" "github.com/stretchr/testify/assert" @@ -32,7 +33,17 @@ import ( ) func r(names, types string, rows ...string) *sqltypes.Result { - return sqltypes.MakeTestResult(sqltypes.MakeTestFields(names, types), rows...) + fields := sqltypes.MakeTestFields(names, types) + for _, f := range fields { + if sqltypes.IsText(f.Type) { + f.Charset = collations.CollationUtf8mb4ID + } else { + f.Charset = collations.CollationBinaryID + } + _, flags := sqltypes.TypeToMySQL(f.Type) + f.Flags = uint32(flags) + } + return sqltypes.MakeTestResult(fields, rows...) } func TestConcatenate_NoErrors(t *testing.T) { @@ -173,12 +184,12 @@ func TestConcatenateTypes(t *testing.T) { tests := []struct { t1, t2, expected string }{ - {t1: "int32", t2: "int64", expected: "int64"}, - {t1: "int32", t2: "int32", expected: "int32"}, - {t1: "int32", t2: "varchar", expected: "varchar"}, - {t1: "int32", t2: "decimal", expected: "decimal"}, - {t1: "hexval", t2: "uint64", expected: "varchar"}, - {t1: "varchar", t2: "varbinary", expected: "varbinary"}, + {t1: "int32", t2: "int64", expected: `[name:"id" type:int64 charset:63]`}, + {t1: "int32", t2: "int32", expected: `[name:"id" type:int32 charset:63]`}, + {t1: "int32", t2: "varchar", expected: `[name:"id" type:varchar charset:255]`}, + {t1: "int32", t2: "decimal", expected: `[name:"id" type:decimal charset:63]`}, + {t1: "hexval", t2: "uint64", expected: `[name:"id" type:varchar charset:255]`}, + {t1: "varchar", t2: "varbinary", expected: `[name:"id" type:varbinary charset:63 flags:128]`}, } for _, test := range tests { @@ -196,8 +207,7 @@ func TestConcatenateTypes(t *testing.T) { res, err := concatenate.GetFields(context.Background(), &noopVCursor{}, nil) require.NoError(t, err) - expected := fmt.Sprintf(`[name:"id" type:%s]`, test.expected) - assert.Equal(t, expected, strings.ToLower(fmt.Sprintf("%v", res.Fields))) + assert.Equal(t, test.expected, strings.ToLower(fmt.Sprintf("%v", res.Fields))) }) } } diff --git a/go/vt/vtgate/engine/dbddl.go b/go/vt/vtgate/engine/dbddl.go index be0c5b049b7..60bb4a7202b 100644 --- a/go/vt/vtgate/engine/dbddl.go +++ b/go/vt/vtgate/engine/dbddl.go @@ -58,12 +58,12 @@ type DBDDLPlugin interface { // DBDDL is just a container around custom database provisioning plugins // The default behaviour is to just return an error type DBDDL struct { + noInputs + noTxNeeded + name string create bool queryTimeout int - - noInputs - noTxNeeded } // NewDBDDL creates the engine primitive diff --git a/go/vt/vtgate/engine/ddl.go b/go/vt/vtgate/engine/ddl.go index 17aa7945537..cfdaa5866dc 100644 --- a/go/vt/vtgate/engine/ddl.go +++ b/go/vt/vtgate/engine/ddl.go @@ -32,6 +32,9 @@ var _ Primitive = (*DDL)(nil) // DDL represents a DDL statement, either normal or online DDL type DDL struct { + noTxNeeded + noInputs + Keyspace *vindexes.Keyspace SQL string DDL sqlparser.DDLStatement @@ -43,10 +46,6 @@ type DDL struct { OnlineDDLEnabled bool CreateTempTable bool - - noTxNeeded - - noInputs } func (ddl *DDL) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/delete.go b/go/vt/vtgate/engine/delete.go index 5f0f2408993..6e354aae5f5 100644 --- a/go/vt/vtgate/engine/delete.go +++ b/go/vt/vtgate/engine/delete.go @@ -45,7 +45,7 @@ func (del *Delete) TryExecute(ctx context.Context, vcursor VCursor, bindVars map ctx, cancelFunc := addQueryTimeout(ctx, vcursor, del.QueryTimeout) defer cancelFunc() - rss, _, err := del.findRoute(ctx, vcursor, bindVars) + rss, bvs, err := del.findRoute(ctx, vcursor, bindVars) if err != nil { return nil, err } @@ -58,7 +58,7 @@ func (del *Delete) TryExecute(ctx context.Context, vcursor VCursor, bindVars map case Unsharded: return del.execUnsharded(ctx, del, vcursor, bindVars, rss) case Equal, IN, Scatter, ByDestination, SubShard, EqualUnique, MultiEqual: - return del.execMultiDestination(ctx, del, vcursor, bindVars, rss, del.deleteVindexEntries) + return del.execMultiDestination(ctx, del, vcursor, bindVars, rss, del.deleteVindexEntries, bvs) default: // Unreachable. return nil, fmt.Errorf("unsupported opcode: %v", del.Opcode) @@ -130,6 +130,7 @@ func (del *Delete) description() PrimitiveDescription { "OwnedVindexQuery": del.OwnedVindexQuery, "MultiShardAutocommit": del.MultiShardAutocommit, "QueryTimeout": del.QueryTimeout, + "NoAutoCommit": del.PreventAutoCommit, } addFieldsIfNotEmpty(del.DML, other) diff --git a/go/vt/vtgate/engine/delete_test.go b/go/vt/vtgate/engine/delete_test.go index be67c7fc9e6..18dcef5cbe4 100644 --- a/go/vt/vtgate/engine/delete_test.go +++ b/go/vt/vtgate/engine/delete_test.go @@ -89,7 +89,7 @@ func TestDeleteEqual(t *testing.T) { }) // Failure case - expr := evalengine.NewBindVar("aa", evalengine.UnknownType()) + expr := evalengine.NewBindVar("aa", evalengine.Type{}) del.Values = []evalengine.Expr{expr} _, err = del.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) require.EqualError(t, err, "query arguments missing for aa") @@ -121,7 +121,7 @@ func TestDeleteEqualMultiCol(t *testing.T) { }) // Failure case - expr := evalengine.NewBindVar("aa", evalengine.UnknownType()) + expr := evalengine.NewBindVar("aa", evalengine.Type{}) del.Values = []evalengine.Expr{expr} _, err = del.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) require.EqualError(t, err, "query arguments missing for aa") @@ -544,7 +544,7 @@ func TestDeleteInChangedVindexMultiCol(t *testing.T) { `Execute delete from lkp_rg_tbl where from = :from and toc = :toc from: type:INT64 value:"6" toc: type:VARBINARY value:"\x01N\xb1\x90ɢ\xfa\x16\x9c" true`, `Execute delete from lkp_rg_tbl where from = :from and toc = :toc from: type:INT64 value:"7" toc: type:VARBINARY value:"\x02N\xb1\x90ɢ\xfa\x16\x9c" true`, // Finally, the actual delete, which is also sent to -20, same route as the subquery. - `ExecuteMultiShard sharded.-20: dummy_update {} true true`, + `ExecuteMultiShard sharded.-20: dummy_update {__vals0: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} true true`, }) } @@ -611,3 +611,36 @@ func TestDeleteMultiEqual(t *testing.T) { `ExecuteMultiShard sharded.-20: dummy_delete {} sharded.20-: dummy_delete {} true false`, }) } + +// TestDeleteInUnique is a test function for delete statement using an IN clause with the Vindexes, +// the query is correctly split according to the corresponding values in the IN list. +func TestDeleteInUnique(t *testing.T) { + ks := buildTestVSchema().Keyspaces["sharded"] + upd := &Delete{ + DML: &DML{ + RoutingParameters: &RoutingParameters{ + Opcode: IN, + Keyspace: ks.Keyspace, + Vindex: ks.Vindexes["hash"], + Values: []evalengine.Expr{evalengine.TupleExpr{ + evalengine.NewLiteralInt(1), + evalengine.NewLiteralInt(2), + evalengine.NewLiteralInt(4), + }}}, + Query: "delete t where id in ::__vals", + }, + } + + tupleBV := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: append([]*querypb.Value{sqltypes.ValueToProto(sqltypes.NewInt64(1))}, sqltypes.ValueToProto(sqltypes.NewInt64(2)), sqltypes.ValueToProto(sqltypes.NewInt64(4))), + } + vc := newDMLTestVCursor("-20", "20-") + vc.shardForKsid = []string{"-20", "20-"} + _, err := upd.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{"__vals": tupleBV}, false) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations sharded [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, + `ExecuteMultiShard sharded.-20: delete t where id in ::__vals {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"4"}} sharded.20-: delete t where id in ::__vals {__vals: type:TUPLE values:{type:INT64 value:"2"}} true false`, + }) +} diff --git a/go/vt/vtgate/engine/distinct.go b/go/vt/vtgate/engine/distinct.go index cd6b93a9f32..189440611c3 100644 --- a/go/vt/vtgate/engine/distinct.go +++ b/go/vt/vtgate/engine/distinct.go @@ -19,12 +19,14 @@ package engine import ( "context" "fmt" + "sync" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vthash" ) // Distinct Primitive is used to uniqueify results @@ -38,131 +40,63 @@ type ( Truncate int } CheckCol struct { - Col int - WsCol *int - Type evalengine.Type + Col int + WsCol *int + Type evalengine.Type + CollationEnv *collations.Environment } probeTable struct { - seenRows map[evalengine.HashCode][]sqltypes.Row - checkCols []CheckCol + seenRows map[vthash.Hash]struct{} + checkCols []CheckCol + sqlmode evalengine.SQLMode + collationEnv *collations.Environment } ) -func (pt *probeTable) exists(inputRow sqltypes.Row) (bool, error) { - // the two prime numbers used here (17 and 31) are used to - // calculate hashcode from all column values in the input sqltypes.Row +func (pt *probeTable) exists(inputRow sqltypes.Row) (sqltypes.Row, error) { code, err := pt.hashCodeForRow(inputRow) if err != nil { - return false, err - } - - existingRows, found := pt.seenRows[code] - if !found { - // nothing with this hash code found, we can be sure it's a not seen sqltypes.Row - pt.seenRows[code] = []sqltypes.Row{inputRow} - return false, nil + return nil, err } - // we found something in the map - still need to check all individual values - // so we don't just fall for a hash collision - for _, existingRow := range existingRows { - exists, err := pt.equal(existingRow, inputRow) - if err != nil { - return false, err - } - if exists { - return true, nil - } + if _, found := pt.seenRows[code]; found { + return nil, nil } - pt.seenRows[code] = append(existingRows, inputRow) - - return false, nil + pt.seenRows[code] = struct{}{} + return inputRow, nil } -func (pt *probeTable) hashCodeForRow(inputRow sqltypes.Row) (evalengine.HashCode, error) { - // Why use 17 and 31 in this method? - // Copied from an old usenet discussion on the topic: - // https://groups.google.com/g/comp.programming/c/HSurZEyrZ1E?pli=1#d887b5bdb2dac99d - // > It's a mixture of superstition and good sense. - // > Suppose the multiplier were 26, and consider - // > hashing a hundred-character string. How much influence does - // > the string's first character have on the final value of `h', - // > just before the mod operation? The first character's value - // > will have been multiplied by MULT 99 times, so if the arithmetic - // > were done in infinite precision the value would consist of some - // > jumble of bits followed by 99 low-order zero bits -- each time - // > you multiply by MULT you introduce another low-order zero, right? - // > The computer's finite arithmetic just chops away all the excess - // > high-order bits, so the first character's actual contribution to - // > `h' is ... precisely zero! The `h' value depends only on the - // > rightmost 32 string characters (assuming a 32-bit int), and even - // > then things are not wonderful: the first of those final 32 bytes - // > influences only the leftmost bit of `h' and has no effect on - // > the remaining 31. Clearly, an even-valued MULT is a poor idea. - // > - // > Need MULT be prime? Not as far as I know (I don't know - // > everything); any odd value ought to suffice. 31 may be attractive - // > because it is close to a power of two, and it may be easier for - // > the compiler to replace a possibly slow multiply instruction with - // > a shift and subtract (31*x == (x << 5) - x) on machines where it - // > makes a difference. Setting MULT one greater than a power of two - // > (e.g., 33) would also be easy to optimize, but might produce too - // > "simple" an arrangement: mostly a juxtaposition of two copies - // > of the original set of bits, with a little mixing in the middle. - // > So you want an odd MULT that has plenty of one-bits. - - code := evalengine.HashCode(17) +func (pt *probeTable) hashCodeForRow(inputRow sqltypes.Row) (vthash.Hash, error) { + hasher := vthash.New() for i, checkCol := range pt.checkCols { if i >= len(inputRow) { - return 0, vterrors.VT13001("index out of range in row when creating the DISTINCT hash code") + return vthash.Hash{}, vterrors.VT13001("index out of range in row when creating the DISTINCT hash code") } col := inputRow[checkCol.Col] - hashcode, err := evalengine.NullsafeHashcode(col, checkCol.Type.Coll, col.Type()) + err := evalengine.NullsafeHashcode128(&hasher, col, checkCol.Type.Collation(), checkCol.Type.Type(), pt.sqlmode, checkCol.Type.Values()) if err != nil { if err != evalengine.UnsupportedCollationHashError || checkCol.WsCol == nil { - return 0, err + return vthash.Hash{}, err } checkCol = checkCol.SwitchToWeightString() pt.checkCols[i] = checkCol - hashcode, err = evalengine.NullsafeHashcode(inputRow[checkCol.Col], checkCol.Type.Coll, col.Type()) + err = evalengine.NullsafeHashcode128(&hasher, inputRow[checkCol.Col], checkCol.Type.Collation(), checkCol.Type.Type(), pt.sqlmode, checkCol.Type.Values()) if err != nil { - return 0, err + return vthash.Hash{}, err } } - code = code*31 + hashcode - } - return code, nil -} - -func (pt *probeTable) equal(a, b sqltypes.Row) (bool, error) { - for i, checkCol := range pt.checkCols { - cmp, err := evalengine.NullsafeCompare(a[i], b[i], checkCol.Type.Coll) - if err != nil { - _, isCollErr := err.(evalengine.UnsupportedCollationError) - if !isCollErr || checkCol.WsCol == nil { - return false, err - } - checkCol = checkCol.SwitchToWeightString() - pt.checkCols[i] = checkCol - cmp, err = evalengine.NullsafeCompare(a[i], b[i], checkCol.Type.Coll) - if err != nil { - return false, err - } - } - if cmp != 0 { - return false, nil - } } - return true, nil + return hasher.Sum128(), nil } -func newProbeTable(checkCols []CheckCol) *probeTable { +func newProbeTable(checkCols []CheckCol, collationEnv *collations.Environment) *probeTable { cols := make([]CheckCol, len(checkCols)) copy(cols, checkCols) return &probeTable{ - seenRows: map[evalengine.HashCode][]sqltypes.Row{}, - checkCols: cols, + seenRows: make(map[vthash.Hash]struct{}), + checkCols: cols, + collationEnv: collationEnv, } } @@ -178,15 +112,15 @@ func (d *Distinct) TryExecute(ctx context.Context, vcursor VCursor, bindVars map InsertID: input.InsertID, } - pt := newProbeTable(d.CheckCols) + pt := newProbeTable(d.CheckCols, vcursor.Environment().CollationEnv()) for _, row := range input.Rows { - exists, err := pt.exists(row) + appendRow, err := pt.exists(row) if err != nil { return nil, err } - if !exists { - result.Rows = append(result.Rows, row) + if appendRow != nil { + result.Rows = append(result.Rows, appendRow) } } if d.Truncate > 0 { @@ -197,20 +131,23 @@ func (d *Distinct) TryExecute(ctx context.Context, vcursor VCursor, bindVars map // TryStreamExecute implements the Primitive interface func (d *Distinct) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - pt := newProbeTable(d.CheckCols) + var mu sync.Mutex + pt := newProbeTable(d.CheckCols, vcursor.Environment().CollationEnv()) err := vcursor.StreamExecutePrimitive(ctx, d.Source, bindVars, wantfields, func(input *sqltypes.Result) error { result := &sqltypes.Result{ Fields: input.Fields, InsertID: input.InsertID, } + mu.Lock() + defer mu.Unlock() for _, row := range input.Rows { - exists, err := pt.exists(row) + appendRow, err := pt.exists(row) if err != nil { return err } - if !exists { - result.Rows = append(result.Rows, row) + if appendRow != nil { + result.Rows = append(result.Rows, appendRow) } } return callback(result.Truncate(len(d.CheckCols))) @@ -272,16 +209,17 @@ func (d *Distinct) description() PrimitiveDescription { // SwitchToWeightString returns a new CheckCol that works on the weight string column instead func (cc CheckCol) SwitchToWeightString() CheckCol { return CheckCol{ - Col: *cc.WsCol, - WsCol: nil, - Type: evalengine.Type{Type: sqltypes.VarBinary, Coll: collations.CollationBinaryID}, + Col: *cc.WsCol, + WsCol: nil, + Type: evalengine.NewType(sqltypes.VarBinary, collations.CollationBinaryID), + CollationEnv: cc.CollationEnv, } } func (cc CheckCol) String() string { var collation string - if sqltypes.IsText(cc.Type.Type) && cc.Type.Coll != collations.Unknown { - collation = ": " + collations.Local().LookupName(cc.Type.Coll) + if cc.Type.Valid() && sqltypes.IsText(cc.Type.Type()) && cc.Type.Collation() != collations.Unknown { + collation = ": " + cc.CollationEnv.LookupName(cc.Type.Collation()) } var column string diff --git a/go/vt/vtgate/engine/distinct_test.go b/go/vt/vtgate/engine/distinct_test.go index 65f8e5d430c..d7fe8786158 100644 --- a/go/vt/vtgate/engine/distinct_test.go +++ b/go/vt/vtgate/engine/distinct_test.go @@ -88,14 +88,10 @@ func TestDistinct(t *testing.T) { if sqltypes.IsNumber(tc.inputs.Fields[i].Type) { collID = collations.CollationBinaryID } - t := evalengine.Type{ - Type: tc.inputs.Fields[i].Type, - Coll: collID, - Nullable: false, - } checkCols = append(checkCols, CheckCol{ - Col: i, - Type: t, + Col: i, + Type: evalengine.NewTypeEx(tc.inputs.Fields[i].Type, collID, false, 0, 0, nil), + CollationEnv: collations.MySQL8(), }) } } @@ -135,12 +131,65 @@ func TestDistinct(t *testing.T) { } } +func TestDistinctStreamAsync(t *testing.T) { + distinct := &Distinct{ + Source: &fakePrimitive{ + results: sqltypes.MakeTestStreamingResults(sqltypes.MakeTestFields("myid|id|num|name", "varchar|int64|int64|varchar"), + "a|1|1|a", + "a|1|1|a", + "a|1|1|a", + "a|1|1|a", + "---", + "c|1|1|a", + "a|1|1|a", + "z|1|1|a", + "a|1|1|t", + "a|1|1|a", + "a|1|1|a", + "a|1|1|a", + "---", + "c|1|1|a", + "a|1|1|a", + "---", + "c|1|1|a", + "a|1|1|a", + "a|1|1|a", + "c|1|1|a", + "a|1|1|a", + "a|1|1|a", + "---", + "c|1|1|a", + "a|1|1|a", + ), + async: true, + }, + CheckCols: []CheckCol{ + {Col: 0, Type: evalengine.NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)}, + {Col: 1, Type: evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)}, + {Col: 2, Type: evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)}, + {Col: 3, Type: evalengine.NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)}, + }, + } + + qr := &sqltypes.Result{} + err := distinct.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(result *sqltypes.Result) error { + qr.Rows = append(qr.Rows, result.Rows...) + return nil + }) + require.NoError(t, err) + require.NoError(t, sqltypes.RowsEqualsStr(` +[[VARCHAR("c") INT64(1) INT64(1) VARCHAR("a")] +[VARCHAR("a") INT64(1) INT64(1) VARCHAR("a")] +[VARCHAR("z") INT64(1) INT64(1) VARCHAR("a")] +[VARCHAR("a") INT64(1) INT64(1) VARCHAR("t")]]`, qr.Rows)) +} + func TestWeightStringFallBack(t *testing.T) { offsetOne := 1 checkCols := []CheckCol{{ Col: 0, WsCol: &offsetOne, - Type: evalengine.UnknownType(), + Type: evalengine.NewType(sqltypes.VarBinary, collations.CollationBinaryID), }} input := r("myid|weightstring(myid)", "varchar|varbinary", @@ -165,6 +214,6 @@ func TestWeightStringFallBack(t *testing.T) { utils.MustMatch(t, []CheckCol{{ Col: 0, WsCol: &offsetOne, - Type: evalengine.UnknownType(), + Type: evalengine.NewType(sqltypes.VarBinary, collations.CollationBinaryID), }}, distinct.CheckCols, "checkCols should not be updated") } diff --git a/go/vt/vtgate/engine/dml.go b/go/vt/vtgate/engine/dml.go index 51177f41e08..db777c36698 100644 --- a/go/vt/vtgate/engine/dml.go +++ b/go/vt/vtgate/engine/dml.go @@ -36,6 +36,8 @@ import ( // DML contains the common elements between Update and Delete plans type DML struct { + txNeeded + // Query specifies the query to be executed. Query string @@ -61,10 +63,10 @@ type DML struct { // QueryTimeout contains the optional timeout (in milliseconds) to apply to this query QueryTimeout int + PreventAutoCommit bool + // RoutingParameters parameters required for query routing. *RoutingParameters - - txNeeded } // NewDML returns and empty initialized DML struct. @@ -73,10 +75,11 @@ func NewDML() *DML { } func (dml *DML) execUnsharded(ctx context.Context, primitive Primitive, vcursor VCursor, bindVars map[string]*querypb.BindVariable, rss []*srvtopo.ResolvedShard) (*sqltypes.Result, error) { - return execShard(ctx, primitive, vcursor, dml.Query, bindVars, rss[0], true /* rollbackOnError */, true /* canAutocommit */) + return execShard(ctx, primitive, vcursor, dml.Query, bindVars, rss[0], true /* rollbackOnError */, !dml.PreventAutoCommit /* canAutocommit */) } -func (dml *DML) execMultiDestination(ctx context.Context, primitive Primitive, vcursor VCursor, bindVars map[string]*querypb.BindVariable, rss []*srvtopo.ResolvedShard, dmlSpecialFunc func(context.Context, VCursor, map[string]*querypb.BindVariable, []*srvtopo.ResolvedShard) error) (*sqltypes.Result, error) { +func (dml *DML) execMultiDestination(ctx context.Context, primitive Primitive, vcursor VCursor, bindVars map[string]*querypb.BindVariable, rss []*srvtopo.ResolvedShard, dmlSpecialFunc func(context.Context, VCursor, + map[string]*querypb.BindVariable, []*srvtopo.ResolvedShard) error, bvs []map[string]*querypb.BindVariable) (*sqltypes.Result, error) { if len(rss) == 0 { return &sqltypes.Result{}, nil } @@ -88,7 +91,7 @@ func (dml *DML) execMultiDestination(ctx context.Context, primitive Primitive, v for i := range rss { queries[i] = &querypb.BoundQuery{ Sql: dml.Query, - BindVariables: bindVars, + BindVariables: bvs[i], } } return execMultiShard(ctx, primitive, vcursor, rss, queries, dml.MultiShardAutocommit) diff --git a/go/vt/vtgate/engine/dml_with_input.go b/go/vt/vtgate/engine/dml_with_input.go new file mode 100644 index 00000000000..0974f753cef --- /dev/null +++ b/go/vt/vtgate/engine/dml_with_input.go @@ -0,0 +1,192 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vterrors" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +var _ Primitive = (*DMLWithInput)(nil) + +const DmlVals = "dml_vals" + +// DMLWithInput represents the instructions to perform a DML operation based on the input result. +type DMLWithInput struct { + txNeeded + + Input Primitive + + DMLs []Primitive + OutputCols [][]int + BVList []map[string]int +} + +func (dml *DMLWithInput) RouteType() string { + return "DMLWithInput" +} + +func (dml *DMLWithInput) GetKeyspaceName() string { + return dml.Input.GetKeyspaceName() +} + +func (dml *DMLWithInput) GetTableName() string { + return dml.Input.GetTableName() +} + +func (dml *DMLWithInput) Inputs() ([]Primitive, []map[string]any) { + return append([]Primitive{dml.Input}, dml.DMLs...), nil +} + +// TryExecute performs a non-streaming exec. +func (dml *DMLWithInput) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { + inputRes, err := vcursor.ExecutePrimitive(ctx, dml.Input, bindVars, false) + if err != nil { + return nil, err + } + if inputRes == nil || len(inputRes.Rows) == 0 { + return &sqltypes.Result{}, nil + } + + var res *sqltypes.Result + for idx, prim := range dml.DMLs { + var qr *sqltypes.Result + if len(dml.BVList) == 0 || len(dml.BVList[idx]) == 0 { + qr, err = executeLiteralUpdate(ctx, vcursor, bindVars, prim, inputRes, dml.OutputCols[idx]) + } else { + qr, err = executeNonLiteralUpdate(ctx, vcursor, bindVars, prim, inputRes, dml.OutputCols[idx], dml.BVList[idx]) + } + if err != nil { + return nil, err + } + + if res == nil { + res = qr + } else { + res.RowsAffected += qr.RowsAffected + } + } + return res, nil +} + +// executeLiteralUpdate executes the primitive that can be executed with a single bind variable from the input result. +// The column updated have same value for all rows in the input result. +func executeLiteralUpdate(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, prim Primitive, inputRes *sqltypes.Result, outputCols []int) (*sqltypes.Result, error) { + var bv *querypb.BindVariable + if len(outputCols) == 1 { + bv = getBVSingle(inputRes.Rows, outputCols[0]) + } else { + bv = getBVMulti(inputRes.Rows, outputCols) + } + + bindVars[DmlVals] = bv + return vcursor.ExecutePrimitive(ctx, prim, bindVars, false) +} + +func getBVSingle(rows []sqltypes.Row, offset int) *querypb.BindVariable { + bv := &querypb.BindVariable{Type: querypb.Type_TUPLE} + for _, row := range rows { + bv.Values = append(bv.Values, sqltypes.ValueToProto(row[offset])) + } + return bv +} + +func getBVMulti(rows []sqltypes.Row, offsets []int) *querypb.BindVariable { + bv := &querypb.BindVariable{Type: querypb.Type_TUPLE} + outputVals := make([]sqltypes.Value, 0, len(offsets)) + for _, row := range rows { + for _, offset := range offsets { + outputVals = append(outputVals, row[offset]) + } + bv.Values = append(bv.Values, sqltypes.TupleToProto(outputVals)) + outputVals = outputVals[:0] + } + return bv +} + +// executeNonLiteralUpdate executes the primitive that needs to be executed per row from the input result. +// The column updated might have different value for each row in the input result. +func executeNonLiteralUpdate(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, prim Primitive, inputRes *sqltypes.Result, outputCols []int, vars map[string]int) (qr *sqltypes.Result, err error) { + var res *sqltypes.Result + for _, row := range inputRes.Rows { + var bv *querypb.BindVariable + if len(outputCols) == 1 { + bv = getBVSingle([]sqltypes.Row{row}, outputCols[0]) + } else { + bv = getBVMulti([]sqltypes.Row{row}, outputCols) + } + bindVars[DmlVals] = bv + for k, v := range vars { + bindVars[k] = sqltypes.ValueBindVariable(row[v]) + } + qr, err = vcursor.ExecutePrimitive(ctx, prim, bindVars, false) + if err != nil { + return nil, err + } + if res == nil { + res = qr + } else { + res.RowsAffected += res.RowsAffected + } + } + return res, nil +} + +// TryStreamExecute performs a streaming exec. +func (dml *DMLWithInput) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + res, err := dml.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(res) +} + +// GetFields fetches the field info. +func (dml *DMLWithInput) GetFields(context.Context, VCursor, map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.VT13001("unreachable code for DMLs") +} + +func (dml *DMLWithInput) description() PrimitiveDescription { + var offsets []string + for idx, offset := range dml.OutputCols { + offsets = append(offsets, fmt.Sprintf("%d:%v", idx, offset)) + } + other := map[string]any{ + "Offset": offsets, + } + var bvList []string + for idx, vars := range dml.BVList { + if len(vars) == 0 { + continue + } + bvList = append(bvList, fmt.Sprintf("%d:[%s]", idx, orderedStringIntMap(vars))) + } + if len(bvList) > 0 { + other["BindVars"] = bvList + } + return PrimitiveDescription{ + OperatorType: "DMLWithInput", + TargetTabletType: topodatapb.TabletType_PRIMARY, + Other: other, + } +} diff --git a/go/vt/vtgate/engine/dml_with_input_test.go b/go/vt/vtgate/engine/dml_with_input_test.go new file mode 100644 index 00000000000..b41dc9e148c --- /dev/null +++ b/go/vt/vtgate/engine/dml_with_input_test.go @@ -0,0 +1,182 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func TestDeleteWithInputSingleOffset(t *testing.T) { + input := &fakePrimitive{results: []*sqltypes.Result{ + sqltypes.MakeTestResult(sqltypes.MakeTestFields("id", "int64"), "1", "2", "3"), + }} + + del := &DMLWithInput{ + Input: input, + DMLs: []Primitive{&Delete{ + DML: &DML{ + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: &vindexes.Keyspace{ + Name: "ks", + Sharded: true, + }, + }, + Query: "dummy_delete", + }, + }}, + OutputCols: [][]int{{0}}, + } + + vc := newDMLTestVCursor("-20", "20-") + _, err := del.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ` + + `ks.-20: dummy_delete {dml_vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"}} ` + + `ks.20-: dummy_delete {dml_vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"}} true false`, + }) + + vc.Rewind() + input.rewind() + err = del.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ` + + `ks.-20: dummy_delete {dml_vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"}} ` + + `ks.20-: dummy_delete {dml_vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"}} true false`, + }) +} + +func TestDeleteWithInputMultiOffset(t *testing.T) { + input := &fakePrimitive{results: []*sqltypes.Result{ + sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col", "int64|varchar"), "1|a", "2|b", "3|c"), + }} + + del := &DMLWithInput{ + Input: input, + DMLs: []Primitive{&Delete{ + DML: &DML{ + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: &vindexes.Keyspace{ + Name: "ks", + Sharded: true, + }, + }, + Query: "dummy_delete", + }, + }}, + OutputCols: [][]int{{1, 0}}, + } + + vc := newDMLTestVCursor("-20", "20-") + _, err := del.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ` + + `ks.-20: dummy_delete {dml_vals: type:TUPLE values:{type:TUPLE value:"\x950\x01a\x89\x02\x011"} values:{type:TUPLE value:"\x950\x01b\x89\x02\x012"} values:{type:TUPLE value:"\x950\x01c\x89\x02\x013"}} ` + + `ks.20-: dummy_delete {dml_vals: type:TUPLE values:{type:TUPLE value:"\x950\x01a\x89\x02\x011"} values:{type:TUPLE value:"\x950\x01b\x89\x02\x012"} values:{type:TUPLE value:"\x950\x01c\x89\x02\x013"}} true false`, + }) + + vc.Rewind() + input.rewind() + err = del.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ` + + `ks.-20: dummy_delete {dml_vals: type:TUPLE values:{type:TUPLE value:"\x950\x01a\x89\x02\x011"} values:{type:TUPLE value:"\x950\x01b\x89\x02\x012"} values:{type:TUPLE value:"\x950\x01c\x89\x02\x013"}} ` + + `ks.20-: dummy_delete {dml_vals: type:TUPLE values:{type:TUPLE value:"\x950\x01a\x89\x02\x011"} values:{type:TUPLE value:"\x950\x01b\x89\x02\x012"} values:{type:TUPLE value:"\x950\x01c\x89\x02\x013"}} true false`, + }) +} + +func TestDeleteWithMultiTarget(t *testing.T) { + input := &fakePrimitive{results: []*sqltypes.Result{ + sqltypes.MakeTestResult( + sqltypes.MakeTestFields("id|id|user_id", "int64|int64|int64"), + "1|100|1", "2|100|2", "3|200|3"), + }} + + vindex, _ := vindexes.CreateVindex("hash", "", nil) + + del1 := &Delete{ + DML: &DML{ + RoutingParameters: &RoutingParameters{ + Opcode: IN, + Keyspace: &vindexes.Keyspace{Name: "ks", Sharded: true}, + Vindex: vindex, + Values: []evalengine.Expr{ + &evalengine.BindVariable{Key: "dml_vals", Type: sqltypes.Tuple}, + }, + }, + Query: "dummy_delete_1", + }, + } + + del2 := &Delete{ + DML: &DML{ + RoutingParameters: &RoutingParameters{ + Opcode: MultiEqual, + Keyspace: &vindexes.Keyspace{Name: "ks", Sharded: true}, + Vindex: vindex, + Values: []evalengine.Expr{ + &evalengine.TupleBindVariable{Key: "dml_vals", Index: 1}, + }, + }, + Query: "dummy_delete_2", + }, + } + + del := &DMLWithInput{ + Input: input, + DMLs: []Primitive{del1, del2}, + OutputCols: [][]int{{0}, {1, 2}}, + } + + vc := newDMLTestVCursor("-20", "20-") + _, err := del.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"3"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, + `ExecuteMultiShard ks.-20: dummy_delete_1 {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"} dml_vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"}} true true`, + `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"3"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, + `ExecuteMultiShard ks.-20: dummy_delete_2 {dml_vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x03100\x89\x02\x011"} values:{type:TUPLE value:"\x89\x02\x03100\x89\x02\x012"} values:{type:TUPLE value:"\x89\x02\x03200\x89\x02\x013"}} true true`, + }) + + vc.Rewind() + input.rewind() + err = del.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"3"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, + `ExecuteMultiShard ks.-20: dummy_delete_1 {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"} dml_vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"}} true true`, + `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"3"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, + `ExecuteMultiShard ks.-20: dummy_delete_2 {dml_vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x03100\x89\x02\x011"} values:{type:TUPLE value:"\x89\x02\x03100\x89\x02\x012"} values:{type:TUPLE value:"\x89\x02\x03200\x89\x02\x013"}} true true`, + }) +} diff --git a/go/vt/vtgate/engine/fake_primitive_test.go b/go/vt/vtgate/engine/fake_primitive_test.go index dcec32f1ffd..e992c2a4623 100644 --- a/go/vt/vtgate/engine/fake_primitive_test.go +++ b/go/vt/vtgate/engine/fake_primitive_test.go @@ -23,8 +23,9 @@ import ( "strings" "testing" - "vitess.io/vitess/go/sqltypes" + "golang.org/x/sync/errgroup" + "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -41,6 +42,8 @@ type fakePrimitive struct { log []string allResultsInOneCall bool + + async bool } func (f *fakePrimitive) Inputs() ([]Primitive, []map[string]any) { @@ -86,6 +89,13 @@ func (f *fakePrimitive) TryStreamExecute(ctx context.Context, vcursor VCursor, b return f.sendErr } + if f.async { + return f.asyncCall(callback) + } + return f.syncCall(wantfields, callback) +} + +func (f *fakePrimitive) syncCall(wantfields bool, callback func(*sqltypes.Result) error) error { readMoreResults := true for readMoreResults && f.curResult < len(f.results) { readMoreResults = f.allResultsInOneCall @@ -116,9 +126,46 @@ func (f *fakePrimitive) TryStreamExecute(ctx context.Context, vcursor VCursor, b } } } - return nil } + +func (f *fakePrimitive) asyncCall(callback func(*sqltypes.Result) error) error { + var g errgroup.Group + var fields []*querypb.Field + if len(f.results) > 0 { + fields = f.results[0].Fields + } + for _, res := range f.results { + qr := res + g.Go(func() error { + if qr == nil { + return f.sendErr + } + if err := callback(&sqltypes.Result{Fields: fields}); err != nil { + return err + } + result := &sqltypes.Result{} + for i := 0; i < len(qr.Rows); i++ { + result.Rows = append(result.Rows, qr.Rows[i]) + // Send only two rows at a time. + if i%2 == 1 { + if err := callback(result); err != nil { + return err + } + result = &sqltypes.Result{} + } + } + if len(result.Rows) != 0 { + if err := callback(result); err != nil { + return err + } + } + return nil + }) + } + return g.Wait() +} + func (f *fakePrimitive) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { f.log = append(f.log, fmt.Sprintf("GetFields %v", printBindVars(bindVars))) return f.TryExecute(ctx, vcursor, bindVars, true /* wantfields */) diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go index 6c99af33313..08a40c0d835 100644 --- a/go/vt/vtgate/engine/fake_vcursor_test.go +++ b/go/vt/vtgate/engine/fake_vcursor_test.go @@ -21,18 +21,23 @@ import ( "context" "fmt" "reflect" + "slices" "sort" "strings" "sync" "testing" "time" + "github.com/google/go-cmp/cmp" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -52,6 +57,7 @@ type noopVCursor struct { inTx bool } +// MySQLVersion implements VCursor. func (t *noopVCursor) Commit(ctx context.Context) error { return nil } @@ -125,13 +131,22 @@ func (t *noopVCursor) SetContextWithValue(key, value interface{}) func() { // ConnCollation implements VCursor func (t *noopVCursor) ConnCollation() collations.ID { - return collations.Default() + return collations.MySQL8().DefaultConnectionCharset() +} + +// CollationEnv implements VCursor +func (t *noopVCursor) Environment() *vtenv.Environment { + return vtenv.NewTestEnv() } func (t *noopVCursor) TimeZone() *time.Location { return nil } +func (t *noopVCursor) SQLMode() string { + return config.DefaultSQLMode +} + func (t *noopVCursor) ExecutePrimitive(ctx context.Context, primitive Primitive, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { return primitive.TryExecute(ctx, t, bindVars, wantfields) } @@ -404,6 +419,8 @@ type loggingVCursor struct { ksShardMap map[string][]string shardSession []*srvtopo.ResolvedShard + + parser *sqlparser.Parser } func (f *loggingVCursor) HasCreatedTempTable() { @@ -790,13 +807,21 @@ func (f *loggingVCursor) nextResult() (*sqltypes.Result, error) { } func (f *loggingVCursor) CanUseSetVar() bool { - useSetVar := sqlparser.IsMySQL80AndAbove() && !f.disableSetVar + useSetVar := f.SQLParser().IsMySQL80AndAbove() && !f.disableSetVar if useSetVar { f.log = append(f.log, "SET_VAR can be used") } return useSetVar } +// SQLParser implements VCursor +func (t *loggingVCursor) SQLParser() *sqlparser.Parser { + if t.parser == nil { + return sqlparser.NewTestParser() + } + return t.parser +} + func (t *noopVCursor) VExplainLogging() {} func (t *noopVCursor) DisableLogging() {} func (t *noopVCursor) GetVExplainLogs() []ExecuteEntry { @@ -806,18 +831,39 @@ func (t *noopVCursor) GetLogs() ([]ExecuteEntry, error) { return nil, nil } -func expectResult(t *testing.T, msg string, result, want *sqltypes.Result) { +func expectResult(t *testing.T, result, want *sqltypes.Result) { t.Helper() fieldsResult := fmt.Sprintf("%v", result.Fields) fieldsWant := fmt.Sprintf("%v", want.Fields) if fieldsResult != fieldsWant { - t.Errorf("%s (mismatch in Fields):\n%s\nwant:\n%s", msg, fieldsResult, fieldsWant) + t.Errorf("mismatch in Fields\n%s\nwant:\n%s", fieldsResult, fieldsWant) } rowsResult := fmt.Sprintf("%v", result.Rows) rowsWant := fmt.Sprintf("%v", want.Rows) if rowsResult != rowsWant { - t.Errorf("%s (mismatch in Rows):\n%s\nwant:\n%s", msg, rowsResult, rowsWant) + t.Errorf("mismatch in Rows:\n%s\nwant:\n%s", rowsResult, rowsWant) + } +} + +func expectResultAnyOrder(t *testing.T, result, want *sqltypes.Result) { + t.Helper() + f := func(a, b sqltypes.Row) int { + for i := range a { + l := a[i].RawStr() + r := b[i].RawStr() + x := strings.Compare(l, r) + if x == 0 { + continue + } + return x + } + return 0 + } + slices.SortFunc(result.Rows, f) + slices.SortFunc(want.Rows, f) + if diff := cmp.Diff(want, result); diff != "" { + t.Errorf("result: %+v, want %+v\ndiff: %s", result, want, diff) } } diff --git a/go/vt/vtgate/engine/filter.go b/go/vt/vtgate/engine/filter.go index c0a54f2b6ac..dc7af1acfeb 100644 --- a/go/vt/vtgate/engine/filter.go +++ b/go/vt/vtgate/engine/filter.go @@ -18,6 +18,7 @@ package engine import ( "context" + "sync" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -29,13 +30,13 @@ var _ Primitive = (*Filter)(nil) // Filter is a primitive that performs the FILTER operation. type Filter struct { + noTxNeeded + Predicate evalengine.Expr ASTPredicate sqlparser.Expr Input Primitive Truncate int - - noTxNeeded } // RouteType returns a description of the query routing type used by the primitive @@ -78,9 +79,14 @@ func (f *Filter) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[s // TryStreamExecute satisfies the Primitive interface. func (f *Filter) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + var mu sync.Mutex + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) filter := func(results *sqltypes.Result) error { var rows [][]sqltypes.Value + + mu.Lock() + defer mu.Unlock() for _, row := range results.Rows { env.Row = row evalResult, err := env.Evaluate(f.Predicate) diff --git a/go/vt/vtgate/engine/filter_test.go b/go/vt/vtgate/engine/filter_test.go index 9a8335e4d7e..fc888019dfe 100644 --- a/go/vt/vtgate/engine/filter_test.go +++ b/go/vt/vtgate/engine/filter_test.go @@ -23,12 +23,15 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/evalengine" ) func TestFilterPass(t *testing.T) { + collationEnv := collations.MySQL8() utf8mb4Bin := collationEnv.LookupByName("utf8mb4_bin") predicate := &sqlparser.ComparisonExpr{ Operator: sqlparser.GreaterThanOp, @@ -70,6 +73,7 @@ func TestFilterPass(t *testing.T) { pred, err := evalengine.Translate(predicate, &evalengine.Config{ Collation: utf8mb4Bin, ResolveColumn: evalengine.FieldResolver(tc.res.Fields).Column, + Environment: vtenv.NewTestEnv(), }) require.NoError(t, err) @@ -83,3 +87,65 @@ func TestFilterPass(t *testing.T) { }) } } + +func TestFilterStreaming(t *testing.T) { + collationEnv := collations.MySQL8() + utf8mb4Bin := collationEnv.LookupByName("utf8mb4_bin") + predicate := &sqlparser.ComparisonExpr{ + Operator: sqlparser.GreaterThanOp, + Left: sqlparser.NewColName("left"), + Right: sqlparser.NewColName("right"), + } + + tcases := []struct { + name string + res []*sqltypes.Result + expRes string + }{{ + name: "int32", + res: sqltypes.MakeTestStreamingResults(sqltypes.MakeTestFields("left|right", "int32|int32"), "0|1", "---", "1|0", "2|3"), + expRes: `[[INT32(1) INT32(0)]]`, + }, { + name: "uint16", + res: sqltypes.MakeTestStreamingResults(sqltypes.MakeTestFields("left|right", "uint16|uint16"), "0|1", "1|0", "---", "2|3"), + expRes: `[[UINT16(1) UINT16(0)]]`, + }, { + name: "uint64_int64", + res: sqltypes.MakeTestStreamingResults(sqltypes.MakeTestFields("left|right", "uint64|int64"), "0|1", "---", "1|0", "2|3"), + expRes: `[[UINT64(1) INT64(0)]]`, + }, { + name: "int32_uint32", + res: sqltypes.MakeTestStreamingResults(sqltypes.MakeTestFields("left|right", "int32|uint32"), "0|1", "---", "1|0", "---", "2|3"), + expRes: `[[INT32(1) UINT32(0)]]`, + }, { + name: "uint16_int8", + res: sqltypes.MakeTestStreamingResults(sqltypes.MakeTestFields("left|right", "uint16|int8"), "0|1", "1|0", "2|3", "---"), + expRes: `[[UINT16(1) INT8(0)]]`, + }, { + name: "uint64_int32", + res: sqltypes.MakeTestStreamingResults(sqltypes.MakeTestFields("left|right", "uint64|int32"), "0|1", "1|0", "2|3", "---", "0|1", "1|3", "5|3"), + expRes: `[[UINT64(1) INT32(0)] [UINT64(5) INT32(3)]]`, + }} + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { + pred, err := evalengine.Translate(predicate, &evalengine.Config{ + Collation: utf8mb4Bin, + ResolveColumn: evalengine.FieldResolver(tc.res[0].Fields).Column, + Environment: vtenv.NewTestEnv(), + }) + require.NoError(t, err) + + filter := &Filter{ + Predicate: pred, + Input: &fakePrimitive{results: tc.res, async: true}, + } + qr := &sqltypes.Result{} + err = filter.TryStreamExecute(context.Background(), &noopVCursor{}, nil, false, func(result *sqltypes.Result) error { + qr.Rows = append(qr.Rows, result.Rows...) + return nil + }) + require.NoError(t, err) + require.NoError(t, sqltypes.RowsEqualsStr(tc.expRes, qr.Rows)) + }) + } +} diff --git a/go/vt/vtgate/engine/fk_cascade.go b/go/vt/vtgate/engine/fk_cascade.go index d0bddbea8f9..35122ac9563 100644 --- a/go/vt/vtgate/engine/fk_cascade.go +++ b/go/vt/vtgate/engine/fk_cascade.go @@ -19,6 +19,7 @@ package engine import ( "context" "fmt" + "maps" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -29,22 +30,37 @@ import ( // FkChild contains the Child Primitive to be executed collecting the values from the Selection Primitive using the column indexes. // BVName is used to pass the value as bind variable to the Child Primitive. type FkChild struct { + // BVName is the bind variable name for the tuple bind variable used in the primitive. BVName string - Cols []int // indexes - Exec Primitive + // Cols are the indexes of the column that need to be selected from the SELECT query to create the tuple bind variable. + Cols []int + // NonLiteralInfo stores the information that is needed to run an update query with non-literal values. + NonLiteralInfo []NonLiteralUpdateInfo + Exec Primitive +} + +// NonLiteralUpdateInfo stores the information required to process non-literal update queries. +// It stores 4 information- +// 1. CompExprCol- The index of the comparison expression in the select query to know if the row value is actually being changed or not. +// 2. UpdateExprCol- The index of the updated expression in the select query. +// 3. UpdateExprBvName- The bind variable name to store the updated expression into. +type NonLiteralUpdateInfo struct { + CompExprCol int + UpdateExprCol int + UpdateExprBvName string } // FkCascade is a primitive that implements foreign key cascading using Selection as values required to execute the FkChild Primitives. // On success, it executes the Parent Primitive. type FkCascade struct { + txNeeded + // Selection is the Primitive that is used to find the rows that are going to be modified in the child tables. Selection Primitive // Children is a list of child foreign key Primitives that are executed using rows from the Selection Primitive. Children []*FkChild // Parent is the Primitive that is executed after the children are modified. Parent Primitive - - txNeeded } // RouteType implements the Primitive interface. @@ -82,81 +98,110 @@ func (fkc *FkCascade) TryExecute(ctx context.Context, vcursor VCursor, bindVars } for _, child := range fkc.Children { - // We create a bindVariable for each Child - // that stores the tuple of columns involved in the fk constraint. - bv := &querypb.BindVariable{ - Type: querypb.Type_TUPLE, - } - for _, row := range selectionRes.Rows { - var tupleValues []sqltypes.Value - for _, colIdx := range child.Cols { - tupleValues = append(tupleValues, row[colIdx]) - } - bv.Values = append(bv.Values, sqltypes.TupleToProto(tupleValues)) + // Having non-empty UpdateExprBvNames is an indication that we have an update query with non-literal expressions in it. + // We need to run this query differently because we need to run an update for each row we get back from the SELECT. + if len(child.NonLiteralInfo) > 0 { + err = fkc.executeNonLiteralExprFkChild(ctx, vcursor, bindVars, wantfields, selectionRes, child) + } else { + err = fkc.executeLiteralExprFkChild(ctx, vcursor, bindVars, wantfields, selectionRes, child, false) } - // Execute the child primitive, and bail out incase of failure. - // Since this Primitive is always executed in a transaction, the changes should - // be rolled back incase of an error. - bindVars[child.BVName] = bv - _, err = vcursor.ExecutePrimitive(ctx, child.Exec, bindVars, wantfields) if err != nil { return nil, err } - delete(bindVars, child.BVName) } // All the children are modified successfully, we can now execute the Parent Primitive. return vcursor.ExecutePrimitive(ctx, fkc.Parent, bindVars, wantfields) } -// TryStreamExecute implements the Primitive interface. -func (fkc *FkCascade) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - // We create a bindVariable for each Child - // that stores the tuple of columns involved in the fk constraint. - var bindVariables []*querypb.BindVariable - for range fkc.Children { - bindVariables = append(bindVariables, &querypb.BindVariable{ - Type: querypb.Type_TUPLE, - }) +func (fkc *FkCascade) executeLiteralExprFkChild(ctx context.Context, vcursor VCursor, in map[string]*querypb.BindVariable, wantfields bool, selectionRes *sqltypes.Result, child *FkChild, isStreaming bool) error { + bindVars := maps.Clone(in) + // We create a bindVariable that stores the tuple of columns involved in the fk constraint. + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, } + for _, row := range selectionRes.Rows { + var tupleValues []sqltypes.Value - // Execute the Selection primitive to find the rows that are going to modified. - // This will be used to find the rows that need modification on the children. - err := vcursor.StreamExecutePrimitive(ctx, fkc.Selection, bindVars, wantfields, func(result *sqltypes.Result) error { - if len(result.Rows) == 0 { - return nil - } - for idx, child := range fkc.Children { - for _, row := range result.Rows { - var tupleValues []sqltypes.Value - for _, colIdx := range child.Cols { - tupleValues = append(tupleValues, row[colIdx]) - } - bindVariables[idx].Values = append(bindVariables[idx].Values, sqltypes.TupleToProto(tupleValues)) - } + for _, colIdx := range child.Cols { + tupleValues = append(tupleValues, row[colIdx]) } - return nil - }) - if err != nil { - return err + bv.Values = append(bv.Values, sqltypes.TupleToProto(tupleValues)) } - // Execute the child primitive, and bail out incase of failure. // Since this Primitive is always executed in a transaction, the changes should // be rolled back incase of an error. - for idx, child := range fkc.Children { - bindVars[child.BVName] = bindVariables[idx] - err = vcursor.StreamExecutePrimitive(ctx, child.Exec, bindVars, wantfields, func(result *sqltypes.Result) error { - return nil - }) + bindVars[child.BVName] = bv + var err error + if isStreaming { + err = vcursor.StreamExecutePrimitive(ctx, child.Exec, bindVars, wantfields, func(result *sqltypes.Result) error { return nil }) + } else { + _, err = vcursor.ExecutePrimitive(ctx, child.Exec, bindVars, wantfields) + } + if err != nil { + return err + } + return nil +} + +func (fkc *FkCascade) executeNonLiteralExprFkChild(ctx context.Context, vcursor VCursor, in map[string]*querypb.BindVariable, wantfields bool, selectionRes *sqltypes.Result, child *FkChild) error { + // For each row in the SELECT we need to run the child primitive. + for _, row := range selectionRes.Rows { + bindVars := maps.Clone(in) + // First we check if any of the columns is being updated at all. + skipRow := true + for _, info := range child.NonLiteralInfo { + // We use a null-safe comparison, so the value is guaranteed to be not null. + // We check if the column has updated or not. + isUnchanged, err := row[info.CompExprCol].ToBool() + if err != nil { + return err + } + if !isUnchanged { + // If any column has changed, then we can't skip this row. + // We need to execute the child primitive. + skipRow = false + break + } + } + // If none of the columns have changed, then there is no update to cascade, we can move on. + if skipRow { + continue + } + // We create a bindVariable that stores the tuple of columns involved in the fk constraint. + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + } + // Create a tuple from the Row. + var tupleValues []sqltypes.Value + for _, colIdx := range child.Cols { + tupleValues = append(tupleValues, row[colIdx]) + } + bv.Values = append(bv.Values, sqltypes.TupleToProto(tupleValues)) + // Execute the child primitive, and bail out incase of failure. + // Since this Primitive is always executed in a transaction, the changes should + // be rolled back in case of an error. + bindVars[child.BVName] = bv + + // Next, we need to copy the updated expressions value into the bind variables map. + for _, info := range child.NonLiteralInfo { + bindVars[info.UpdateExprBvName] = sqltypes.ValueBindVariable(row[info.UpdateExprCol]) + } + _, err := vcursor.ExecutePrimitive(ctx, child.Exec, bindVars, wantfields) if err != nil { return err } - delete(bindVars, child.BVName) } + return nil +} - // All the children are modified successfully, we can now execute the Parent Primitive. - return vcursor.StreamExecutePrimitive(ctx, fkc.Parent, bindVars, wantfields, callback) +// TryStreamExecute implements the Primitive interface. +func (fkc *FkCascade) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + res, err := fkc.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(res) } // Inputs implements the Primitive interface. @@ -168,11 +213,15 @@ func (fkc *FkCascade) Inputs() ([]Primitive, []map[string]any) { inputName: "Selection", }) for idx, child := range fkc.Children { - inputsMap = append(inputsMap, map[string]any{ + childInfoMap := map[string]any{ inputName: fmt.Sprintf("CascadeChild-%d", idx+1), "BvName": child.BVName, "Cols": child.Cols, - }) + } + if len(child.NonLiteralInfo) > 0 { + childInfoMap["NonLiteralUpdateInfo"] = child.NonLiteralInfo + } + inputsMap = append(inputsMap, childInfoMap) inputs = append(inputs, child.Exec) } inputs = append(inputs, fkc.Parent) diff --git a/go/vt/vtgate/engine/fk_cascade_test.go b/go/vt/vtgate/engine/fk_cascade_test.go index ddd381003b1..942fe44a709 100644 --- a/go/vt/vtgate/engine/fk_cascade_test.go +++ b/go/vt/vtgate/engine/fk_cascade_test.go @@ -80,7 +80,7 @@ func TestDeleteCascade(t *testing.T) { require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, - `StreamExecuteMulti select cola, colb from parent where foo = 48 ks.0: {} `, + `ExecuteMultiShard ks.0: select cola, colb from parent where foo = 48 {} false false`, `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.0: delete from child where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x011\x950\x01a"} values:{type:TUPLE value:"\x89\x02\x012\x950\x01b"}} true true`, `ResolveDestinations ks [] Destinations:DestinationAllShards()`, @@ -141,7 +141,7 @@ func TestUpdateCascade(t *testing.T) { require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, - `StreamExecuteMulti select cola, colb from parent where foo = 48 ks.0: {} `, + `ExecuteMultiShard ks.0: select cola, colb from parent where foo = 48 {} false false`, `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.0: update child set ca = :vtg1 where (ca, cb) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x011\x950\x01a"} values:{type:TUPLE value:"\x89\x02\x012\x950\x01b"}} true true`, `ResolveDestinations ks [] Destinations:DestinationAllShards()`, @@ -149,6 +149,82 @@ func TestUpdateCascade(t *testing.T) { }) } +// TestNonLiteralUpdateCascade tests that FkCascade executes the child and parent primitives for a non-literal update cascade. +func TestNonLiteralUpdateCascade(t *testing.T) { + fakeRes := sqltypes.MakeTestResult(sqltypes.MakeTestFields("cola|cola <=> colb + 2|colb + 2", "int64|int64|int64"), "1|1|3", "2|0|5", "3|0|7") + + inputP := &Route{ + Query: "select cola, cola <=> colb + 2, colb + 2, from parent where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + } + childP := &Update{ + DML: &DML{ + Query: "update child set ca = :fkc_upd where (ca) in ::__vals", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + parentP := &Update{ + DML: &DML{ + Query: "update parent set cola = colb + 2 where foo = 48", + RoutingParameters: &RoutingParameters{ + Opcode: Unsharded, + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + } + fkc := &FkCascade{ + Selection: inputP, + Children: []*FkChild{{ + BVName: "__vals", + Cols: []int{0}, + NonLiteralInfo: []NonLiteralUpdateInfo{ + { + UpdateExprBvName: "fkc_upd", + UpdateExprCol: 2, + CompExprCol: 1, + }, + }, + Exec: childP, + }}, + Parent: parentP, + } + + vc := newDMLTestVCursor("0") + vc.results = []*sqltypes.Result{fakeRes} + _, err := fkc.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select cola, cola <=> colb + 2, colb + 2, from parent where foo = 48 {} false false`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set ca = :fkc_upd where (ca) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x012"} fkc_upd: type:INT64 value:"5"} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set ca = :fkc_upd where (ca) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x013"} fkc_upd: type:INT64 value:"7"} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update parent set cola = colb + 2 where foo = 48 {} true true`, + }) + + vc.Rewind() + err = fkc.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, true, func(result *sqltypes.Result) error { return nil }) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: select cola, cola <=> colb + 2, colb + 2, from parent where foo = 48 {} false false`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set ca = :fkc_upd where (ca) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x012"} fkc_upd: type:INT64 value:"5"} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update child set ca = :fkc_upd where (ca) in ::__vals {__vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x013"} fkc_upd: type:INT64 value:"7"} true true`, + `ResolveDestinations ks [] Destinations:DestinationAllShards()`, + `ExecuteMultiShard ks.0: update parent set cola = colb + 2 where foo = 48 {} true true`, + }) +} + // TestNeedsTransactionInExecPrepared tests that if we have a foreign key cascade inside an ExecStmt plan, then we do mark the plan to require a transaction. func TestNeedsTransactionInExecPrepared(t *testing.T) { // Even if FkCascade is wrapped in ExecStmt, the plan should be marked such that it requires a transaction. diff --git a/go/vt/vtgate/engine/fk_verify.go b/go/vt/vtgate/engine/fk_verify.go index 350aeec59e0..7184e5d8381 100644 --- a/go/vt/vtgate/engine/fk_verify.go +++ b/go/vt/vtgate/engine/fk_verify.go @@ -35,10 +35,10 @@ type Verify struct { // FkVerify is a primitive that verifies that the foreign key constraints in parent tables are satisfied. // It does this by executing a select distinct query on the parent table with the values that are being inserted/updated. type FkVerify struct { + txNeeded + Verify []*Verify Exec Primitive - - txNeeded } // constants for verification type. @@ -83,18 +83,11 @@ func (f *FkVerify) TryExecute(ctx context.Context, vcursor VCursor, bindVars map // TryStreamExecute implements the Primitive interface func (f *FkVerify) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - for _, v := range f.Verify { - err := vcursor.StreamExecutePrimitive(ctx, v.Exec, bindVars, wantfields, func(qr *sqltypes.Result) error { - if len(qr.Rows) > 0 { - return getError(v.Typ) - } - return nil - }) - if err != nil { - return err - } + res, err := f.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err } - return vcursor.StreamExecutePrimitive(ctx, f.Exec, bindVars, wantfields, callback) + return callback(res) } // Inputs implements the Primitive interface diff --git a/go/vt/vtgate/engine/fk_verify_test.go b/go/vt/vtgate/engine/fk_verify_test.go index 5635a32bc2c..5c9ff83c2ec 100644 --- a/go/vt/vtgate/engine/fk_verify_test.go +++ b/go/vt/vtgate/engine/fk_verify_test.go @@ -74,7 +74,7 @@ func TestFKVerifyUpdate(t *testing.T) { require.NoError(t, err) vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, - `StreamExecuteMulti select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null ks.0: {} `, + `ExecuteMultiShard ks.0: select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null {} false false`, `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.0: update child set cola = 1, colb = 'a' where foo = 48 {} true true`, }) @@ -97,7 +97,7 @@ func TestFKVerifyUpdate(t *testing.T) { require.ErrorContains(t, err, "Cannot add or update a child row: a foreign key constraint fails") vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, - `StreamExecuteMulti select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null ks.0: {} `, + `ExecuteMultiShard ks.0: select 1 from child c left join parent p on p.cola = 1 and p.colb = 'a' where p.cola is null and p.colb is null {} false false`, }) }) @@ -119,7 +119,7 @@ func TestFKVerifyUpdate(t *testing.T) { require.ErrorContains(t, err, "Cannot delete or update a parent row: a foreign key constraint fails") vc.ExpectLog(t, []string{ `ResolveDestinations ks [] Destinations:DestinationAllShards()`, - `StreamExecuteMulti select 1 from grandchild g join child c on g.cola = c.cola and g.colb = c.colb where c.foo = 48 ks.0: {} `, + `ExecuteMultiShard ks.0: select 1 from grandchild g join child c on g.cola = c.cola and g.colb = c.colb where c.foo = 48 {} false false`, }) }) } diff --git a/go/vt/vtgate/engine/fuzz.go b/go/vt/vtgate/engine/fuzz.go index 13dac3a99d4..5d00ed2db23 100644 --- a/go/vt/vtgate/engine/fuzz.go +++ b/go/vt/vtgate/engine/fuzz.go @@ -80,7 +80,7 @@ func execUpdate(f *fuzz.ConsumeFuzzer) { _, _ = upd.TryExecute(ctx, vc, map[string]*querypb.BindVariable{}, false) } -// execUpdate implements a wrapper to fuzz Insert.Tryexecute() +// execInsert implements a wrapper to fuzz Insert.Tryexecute() func execInsert(f *fuzz.ConsumeFuzzer) { ins := &Insert{} err := f.GenerateStruct(ins) @@ -91,7 +91,7 @@ func execInsert(f *fuzz.ConsumeFuzzer) { _, _ = ins.TryExecute(ctx, vc, map[string]*querypb.BindVariable{}, false) } -// execUpdate implements a wrapper to fuzz Route.Tryexecute() +// execRoute implements a wrapper to fuzz Route.Tryexecute() func execRoute(f *fuzz.ConsumeFuzzer) { sel := &Route{} err := f.GenerateStruct(sel) diff --git a/go/vt/vtgate/engine/hash_join.go b/go/vt/vtgate/engine/hash_join.go index a38fc21bf97..6ac34e1ab79 100644 --- a/go/vt/vtgate/engine/hash_join.go +++ b/go/vt/vtgate/engine/hash_join.go @@ -20,48 +20,76 @@ import ( "context" "fmt" "strings" + "sync" + "sync/atomic" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vthash" ) var _ Primitive = (*HashJoin)(nil) -// HashJoin specifies the parameters for a join primitive -// Hash joins work by fetch all the input from the LHS, and building a hash map, known as the probe table, for this input. -// The key to the map is the hashcode of the value for column that we are joining by. -// Then the RHS is fetched, and we can check if the rows from the RHS matches any from the LHS. -// When they match by hash code, we double-check that we are not working with a false positive by comparing the values. -type HashJoin struct { - Opcode JoinOpcode - - // Left and Right are the LHS and RHS primitives - // of the Join. They can be any primitive. - Left, Right Primitive `json:",omitempty"` - - // Cols defines which columns from the left - // or right results should be used to build the - // return result. For results coming from the - // left query, the index values go as -1, -2, etc. - // For the right query, they're 1, 2, etc. - // If Cols is {-1, -2, 1, 2}, it means that - // the returned result will be {Left0, Left1, Right0, Right1}. - Cols []int `json:",omitempty"` - - // The keys correspond to the column offset in the inputs where - // the join columns can be found - LHSKey, RHSKey int - - // The join condition. Used for plan descriptions - ASTPred sqlparser.Expr - - // collation and type are used to hash the incoming values correctly - Collation collations.ID - ComparisonType querypb.Type -} +type ( + // HashJoin specifies the parameters for a join primitive + // Hash joins work by fetch all the input from the LHS, and building a hash map, known as the probe table, for this input. + // The key to the map is the hashcode of the value for column that we are joining by. + // Then the RHS is fetched, and we can check if the rows from the RHS matches any from the LHS. + // When they match by hash code, we double-check that we are not working with a false positive by comparing the values. + HashJoin struct { + Opcode JoinOpcode + + // Left and Right are the LHS and RHS primitives + // of the Join. They can be any primitive. + Left, Right Primitive `json:",omitempty"` + + // Cols defines which columns from the left + // or right results should be used to build the + // return result. For results coming from the + // left query, the index values go as -1, -2, etc. + // For the right query, they're 1, 2, etc. + // If Cols is {-1, -2, 1, 2}, it means that + // the returned result will be {Left0, Left1, Right0, Right1}. + Cols []int `json:",omitempty"` + + // The keys correspond to the column offset in the inputs where + // the join columns can be found + LHSKey, RHSKey int + + // The join condition. Used for plan descriptions + ASTPred sqlparser.Expr + + // collation and type are used to hash the incoming values correctly + Collation collations.ID + ComparisonType querypb.Type + + CollationEnv *collations.Environment + + // Values for enum and set types + Values *evalengine.EnumSetValues + } + + hashJoinProbeTable struct { + innerMap map[vthash.Hash]*probeTableEntry + + coll collations.ID + typ querypb.Type + lhsKey, rhsKey int + cols []int + hasher vthash.Hasher + sqlmode evalengine.SQLMode + values *evalengine.EnumSetValues + } + + probeTableEntry struct { + row sqltypes.Row + next *probeTableEntry + seen bool + } +) // TryExecute implements the Primitive interface func (hj *HashJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { @@ -70,10 +98,13 @@ func (hj *HashJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma return nil, err } + pt := newHashJoinProbeTable(hj.Collation, hj.ComparisonType, hj.LHSKey, hj.RHSKey, hj.Cols, hj.Values) // build the probe table from the LHS result - probeTable, err := hj.buildProbeTable(lresult) - if err != nil { - return nil, err + for _, row := range lresult.Rows { + err := pt.addLeftRow(row) + if err != nil { + return nil, err + } } rresult, err := vcursor.ExecutePrimitive(ctx, hj.Right, bindVars, wantfields) @@ -86,68 +117,37 @@ func (hj *HashJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma } for _, currentRHSRow := range rresult.Rows { - joinVal := currentRHSRow[hj.RHSKey] - if joinVal.IsNull() { - continue - } - hashcode, err := evalengine.NullsafeHashcode(joinVal, hj.Collation, hj.ComparisonType) + matches, err := pt.get(currentRHSRow) if err != nil { return nil, err } - lftRows := probeTable[hashcode] - for _, currentLHSRow := range lftRows { - lhsVal := currentLHSRow[hj.LHSKey] - // hash codes can give false positives, so we need to check with a real comparison as well - cmp, err := evalengine.NullsafeCompare(joinVal, lhsVal, hj.Collation) - if err != nil { - return nil, err - } + result.Rows = append(result.Rows, matches...) + } - if cmp == 0 { - // we have a match! - result.Rows = append(result.Rows, joinRows(currentLHSRow, currentRHSRow, hj.Cols)) - } - } + if hj.Opcode == LeftJoin { + result.Rows = append(result.Rows, pt.notFetched()...) } return result, nil } -func (hj *HashJoin) buildProbeTable(lresult *sqltypes.Result) (map[evalengine.HashCode][]sqltypes.Row, error) { - probeTable := map[evalengine.HashCode][]sqltypes.Row{} - for _, current := range lresult.Rows { - joinVal := current[hj.LHSKey] - if joinVal.IsNull() { - continue - } - hashcode, err := evalengine.NullsafeHashcode(joinVal, hj.Collation, hj.ComparisonType) - if err != nil { - return nil, err - } - probeTable[hashcode] = append(probeTable[hashcode], current) - } - return probeTable, nil -} - // TryStreamExecute implements the Primitive interface func (hj *HashJoin) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { // build the probe table from the LHS result - probeTable := map[evalengine.HashCode][]sqltypes.Row{} + pt := newHashJoinProbeTable(hj.Collation, hj.ComparisonType, hj.LHSKey, hj.RHSKey, hj.Cols, hj.Values) var lfields []*querypb.Field + var mu sync.Mutex err := vcursor.StreamExecutePrimitive(ctx, hj.Left, bindVars, wantfields, func(result *sqltypes.Result) error { + mu.Lock() + defer mu.Unlock() if len(lfields) == 0 && len(result.Fields) != 0 { lfields = result.Fields } for _, current := range result.Rows { - joinVal := current[hj.LHSKey] - if joinVal.IsNull() { - continue - } - hashcode, err := evalengine.NullsafeHashcode(joinVal, hj.Collation, hj.ComparisonType) + err := pt.addLeftRow(current) if err != nil { return err } - probeTable[hashcode] = append(probeTable[hashcode], current) } return nil }) @@ -155,43 +155,50 @@ func (hj *HashJoin) TryStreamExecute(ctx context.Context, vcursor VCursor, bindV return err } - return vcursor.StreamExecutePrimitive(ctx, hj.Right, bindVars, wantfields, func(result *sqltypes.Result) error { + var sendFields atomic.Bool + sendFields.Store(wantfields) + + err = vcursor.StreamExecutePrimitive(ctx, hj.Right, bindVars, sendFields.Load(), func(result *sqltypes.Result) error { + mu.Lock() + defer mu.Unlock() // compare the results coming from the RHS with the probe-table res := &sqltypes.Result{} - if len(result.Fields) != 0 { - res = &sqltypes.Result{ - Fields: joinFields(lfields, result.Fields, hj.Cols), - } + if len(result.Fields) != 0 && sendFields.CompareAndSwap(true, false) { + res.Fields = joinFields(lfields, result.Fields, hj.Cols) } for _, currentRHSRow := range result.Rows { - joinVal := currentRHSRow[hj.RHSKey] - if joinVal.IsNull() { - continue - } - hashcode, err := evalengine.NullsafeHashcode(joinVal, hj.Collation, hj.ComparisonType) + results, err := pt.get(currentRHSRow) if err != nil { return err } - lftRows := probeTable[hashcode] - for _, currentLHSRow := range lftRows { - lhsVal := currentLHSRow[hj.LHSKey] - // hash codes can give false positives, so we need to check with a real comparison as well - cmp, err := evalengine.NullsafeCompare(joinVal, lhsVal, hj.Collation) - if err != nil { - return err - } - - if cmp == 0 { - // we have a match! - res.Rows = append(res.Rows, joinRows(currentLHSRow, currentRHSRow, hj.Cols)) - } - } + res.Rows = append(res.Rows, results...) } if len(res.Rows) != 0 || len(res.Fields) != 0 { return callback(res) } return nil }) + if err != nil { + return err + } + + if hj.Opcode == LeftJoin { + res := &sqltypes.Result{} + if sendFields.CompareAndSwap(true, false) { + // If we still have not sent the fields, we need to fetch + // the fields from the RHS to be able to build the result fields + rres, err := hj.Right.GetFields(ctx, vcursor, bindVars) + if err != nil { + return err + } + res.Fields = joinFields(lfields, rres.Fields, hj.Cols) + } + // this will only be called when all the concurrent access to the pt has + // ceased, so we don't need to lock it here + res.Rows = pt.notFetched() + return callback(res) + } + return nil } // RouteType implements the Primitive interface @@ -248,7 +255,7 @@ func (hj *HashJoin) description() PrimitiveDescription { } coll := hj.Collation if coll != collations.Unknown { - other["Collation"] = collations.Local().LookupName(coll) + other["Collation"] = hj.CollationEnv.LookupName(coll) } return PrimitiveDescription{ OperatorType: "Join", @@ -256,3 +263,70 @@ func (hj *HashJoin) description() PrimitiveDescription { Other: other, } } + +func newHashJoinProbeTable(coll collations.ID, typ querypb.Type, lhsKey, rhsKey int, cols []int, values *evalengine.EnumSetValues) *hashJoinProbeTable { + return &hashJoinProbeTable{ + innerMap: map[vthash.Hash]*probeTableEntry{}, + coll: coll, + typ: typ, + lhsKey: lhsKey, + rhsKey: rhsKey, + cols: cols, + hasher: vthash.New(), + values: values, + } +} + +func (pt *hashJoinProbeTable) addLeftRow(r sqltypes.Row) error { + hash, err := pt.hash(r[pt.lhsKey]) + if err != nil { + return err + } + pt.innerMap[hash] = &probeTableEntry{ + row: r, + next: pt.innerMap[hash], + } + + return nil +} + +func (pt *hashJoinProbeTable) hash(val sqltypes.Value) (vthash.Hash, error) { + err := evalengine.NullsafeHashcode128(&pt.hasher, val, pt.coll, pt.typ, pt.sqlmode, pt.values) + if err != nil { + return vthash.Hash{}, err + } + + res := pt.hasher.Sum128() + pt.hasher.Reset() + return res, nil +} + +func (pt *hashJoinProbeTable) get(rrow sqltypes.Row) (result []sqltypes.Row, err error) { + val := rrow[pt.rhsKey] + if val.IsNull() { + return + } + + hash, err := pt.hash(val) + if err != nil { + return nil, err + } + + for e := pt.innerMap[hash]; e != nil; e = e.next { + e.seen = true + result = append(result, joinRows(e.row, rrow, pt.cols)) + } + + return +} + +func (pt *hashJoinProbeTable) notFetched() (rows []sqltypes.Row) { + for _, e := range pt.innerMap { + for ; e != nil; e = e.next { + if !e.seen { + rows = append(rows, joinRows(e.row, nil, pt.cols)) + } + } + } + return +} diff --git a/go/vt/vtgate/engine/hash_join_test.go b/go/vt/vtgate/engine/hash_join_test.go index 8add0b78fa2..d3271c643be 100644 --- a/go/vt/vtgate/engine/hash_join_test.go +++ b/go/vt/vtgate/engine/hash_join_test.go @@ -22,126 +22,153 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) -func TestHashJoinExecuteSameType(t *testing.T) { - leftPrim := &fakePrimitive{ - results: []*sqltypes.Result{ - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col1|col2|col3", - "int64|varchar|varchar", +func TestHashJoinVariations(t *testing.T) { + // This test tries the different variations of hash-joins: + // comparing values of same type and different types, and both left and right outer joins + lhs := func() Primitive { + return &fakePrimitive{ + results: []*sqltypes.Result{ + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "col1|col2", + "int64|varchar", + ), + "1|1", + "2|2", + "3|b", + "null|b", ), - "1|a|aa", - "2|b|bb", - "3|c|cc", - ), - }, + }, + } } - rightPrim := &fakePrimitive{ - results: []*sqltypes.Result{ - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col4|col5|col6", - "int64|varchar|varchar", + rhs := func() Primitive { + return &fakePrimitive{ + results: []*sqltypes.Result{ + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "col4|col5", + "int64|varchar", + ), + "1|1", + "3|2", + "5|null", + "4|b", ), - "1|d|dd", - "3|e|ee", - "4|f|ff", - "3|g|gg", - ), - }, + }, + } } - // Normal join - jn := &HashJoin{ - Opcode: InnerJoin, - Left: leftPrim, - Right: rightPrim, - Cols: []int{-1, -2, 1, 2}, - LHSKey: 0, - RHSKey: 0, - } - r, err := jn.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{}, true) - require.NoError(t, err) - leftPrim.ExpectLog(t, []string{ - `Execute true`, - }) - rightPrim.ExpectLog(t, []string{ - `Execute true`, - }) - expectResult(t, "jn.Execute", r, sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col1|col2|col4|col5", - "int64|varchar|int64|varchar", - ), - "1|a|1|d", - "3|c|3|e", - "3|c|3|g", - )) -} + rows := func(r ...string) []string { return r } -func TestHashJoinExecuteDifferentType(t *testing.T) { - leftPrim := &fakePrimitive{ - results: []*sqltypes.Result{ - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col1|col2|col3", - "int64|varchar|varchar", - ), - "1|a|aa", - "2|b|bb", - "3|c|cc", - "5|c|cc", - ), - }, - } - rightPrim := &fakePrimitive{ - results: []*sqltypes.Result{ - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col4|col5|col6", - "varchar|varchar|varchar", - ), - "1.00|d|dd", - "3|e|ee", - "2.89|z|zz", - "4|f|ff", - "3|g|gg", - " 5.0toto|g|gg", - "w|ww|www", - ), - }, + tests := []struct { + name string + typ JoinOpcode + lhs, rhs int + expected []string + reverse bool + }{{ + name: "inner join, same type", + typ: InnerJoin, + lhs: 0, + rhs: 0, + expected: rows("1|1|1|1", "3|b|3|2"), + }, { + name: "inner join, coercion", + typ: InnerJoin, + lhs: 0, + rhs: 1, + expected: rows("1|1|1|1", "2|2|3|2"), + }, { + name: "left join, same type", + typ: LeftJoin, + lhs: 0, + rhs: 0, + expected: rows("1|1|1|1", "3|b|3|2", "2|2|null|null", "null|b|null|null"), + }, { + name: "left join, coercion", + typ: LeftJoin, + lhs: 0, + rhs: 1, + expected: rows("1|1|1|1", "2|2|3|2", "3|b|null|null", "null|b|null|null"), + }, { + name: "right join, same type", + typ: LeftJoin, + lhs: 0, + rhs: 0, + expected: rows("1|1|1|1", "3|2|3|b", "4|b|null|null", "5|null|null|null"), + reverse: true, + }, { + name: "right join, coercion", + typ: LeftJoin, + lhs: 0, + rhs: 1, + reverse: true, + expected: rows("1|1|1|1", "3|2|null|null", "4|b|null|null", "5|null|null|null"), + }} + + for _, tc := range tests { + + var fields []*querypb.Field + var first, last func() Primitive + if tc.reverse { + first, last = rhs, lhs + fields = sqltypes.MakeTestFields( + "col4|col5|col1|col2", + "int64|varchar|int64|varchar", + ) + } else { + first, last = lhs, rhs + fields = sqltypes.MakeTestFields( + "col1|col2|col4|col5", + "int64|varchar|int64|varchar", + ) + } + + expected := sqltypes.MakeTestResult(fields, tc.expected...) + + typ, err := evalengine.CoerceTypes(typeForOffset(tc.lhs), typeForOffset(tc.rhs), collations.MySQL8()) + require.NoError(t, err) + + jn := &HashJoin{ + Opcode: tc.typ, + Cols: []int{-1, -2, 1, 2}, + LHSKey: tc.lhs, + RHSKey: tc.rhs, + Collation: typ.Collation(), + ComparisonType: typ.Type(), + CollationEnv: collations.MySQL8(), + } + + t.Run(tc.name, func(t *testing.T) { + jn.Left = first() + jn.Right = last() + r, err := jn.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + expectResultAnyOrder(t, r, expected) + }) + t.Run("Streaming "+tc.name, func(t *testing.T) { + jn.Left = first() + jn.Right = last() + r, err := wrapStreamExecute(jn, &noopVCursor{}, map[string]*querypb.BindVariable{}, true) + require.NoError(t, err) + expectResultAnyOrder(t, r, expected) + }) } +} - // Normal join - jn := &HashJoin{ - Opcode: InnerJoin, - Left: leftPrim, - Right: rightPrim, - Cols: []int{-1, -2, 1, 2}, - LHSKey: 0, - RHSKey: 0, - ComparisonType: querypb.Type_FLOAT64, +func typeForOffset(i int) evalengine.Type { + switch i { + case 0: + return evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID) + case 1: + return evalengine.NewType(sqltypes.VarChar, collations.MySQL8().DefaultConnectionCharset()) + default: + panic(i) } - r, err := jn.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{}, true) - require.NoError(t, err) - leftPrim.ExpectLog(t, []string{ - `Execute true`, - }) - rightPrim.ExpectLog(t, []string{ - `Execute true`, - }) - expectResult(t, "jn.Execute", r, sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "col1|col2|col4|col5", - "int64|varchar|varchar|varchar", - ), - "1|a|1.00|d", - "3|c|3|e", - "3|c|3|g", - "5|c| 5.0toto|g", - )) } diff --git a/go/vt/vtgate/engine/insert.go b/go/vt/vtgate/engine/insert.go index fc65fbfbff9..af2d290d957 100644 --- a/go/vt/vtgate/engine/insert.go +++ b/go/vt/vtgate/engine/insert.go @@ -18,14 +18,10 @@ package engine import ( "context" - "encoding/json" "fmt" "strconv" "strings" - "sync" - "time" - "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -40,93 +36,43 @@ import ( var _ Primitive = (*Insert)(nil) -type ( - // Insert represents the instructions to perform an insert operation. - Insert struct { - // Opcode is the execution opcode. - Opcode InsertOpcode +// Insert represents the instructions to perform an insert operation. +type Insert struct { + noInputs + InsertCommon - // Ignore is for INSERT IGNORE and INSERT...ON DUPLICATE KEY constructs - // for sharded cases. - Ignore bool + // Query specifies the query to be executed. + // For InsertSharded plans, this value is unused, + // and Prefix, Mid and Suffix are used instead. + Query string - // Keyspace specifies the keyspace to send the query to. - Keyspace *vindexes.Keyspace + // VindexValues specifies values for all the vindex columns. + // This is a three-dimensional data structure: + // Insert.Values[i] represents the values to be inserted for the i'th colvindex (i < len(Insert.Table.ColumnVindexes)) + // Insert.Values[i].Values[j] represents values for the j'th column of the given colVindex (j < len(colVindex[i].Columns) + // Insert.Values[i].Values[j].Values[k] represents the value pulled from row k for that column: (k < len(ins.rows)) + VindexValues [][][]evalengine.Expr - // Query specifies the query to be executed. - // For InsertSharded plans, this value is unused, - // and Prefix, Mid and Suffix are used instead. - Query string + // Mid is the row values for the sharded insert plans. + Mid sqlparser.Values - // VindexValues specifies values for all the vindex columns. - // This is a three-dimensional data structure: - // Insert.Values[i] represents the values to be inserted for the i'th colvindex (i < len(Insert.Table.ColumnVindexes)) - // Insert.Values[i].Values[j] represents values for the j'th column of the given colVindex (j < len(colVindex[i].Columns) - // Insert.Values[i].Values[j].Values[k] represents the value pulled from row k for that column: (k < len(ins.rows)) - VindexValues [][][]evalengine.Expr - - // ColVindexes are the vindexes that will use the VindexValues - ColVindexes []*vindexes.ColumnVindex - - // TableName is the name of the table on which row will be inserted. - TableName string - - // Generate is only set for inserts where a sequence must be generated. - Generate *Generate - - // Prefix, Mid and Suffix are for sharded insert plans. - Prefix string - Mid sqlparser.Values - Suffix string - - // Option to override the standard behavior and allow a multi-shard insert - // to use single round trip autocommit. - // - // This is a clear violation of the SQL semantics since it means the statement - // is not atomic in the presence of PK conflicts on one shard and not another. - // However some application use cases would prefer that the statement partially - // succeed in order to get the performance benefits of autocommit. - MultiShardAutocommit bool - - // QueryTimeout contains the optional timeout (in milliseconds) to apply to this query - QueryTimeout int - - // VindexValueOffset stores the offset for each column in the ColumnVindex - // that will appear in the result set of the select query. - VindexValueOffset [][]int - - // Input is a select query plan to retrieve results for inserting data. - Input Primitive `json:",omitempty"` - - // ForceNonStreaming is true when the insert table and select table are same. - // This will avoid locking by the select table. - ForceNonStreaming bool - - // Insert needs tx handling - txNeeded - } - - ksID = []byte -) - -func (ins *Insert) Inputs() ([]Primitive, []map[string]any) { - if ins.Input == nil { - return nil, nil - } - return []Primitive{ins.Input}, nil + // Alias represents the row alias with columns if specified in the query. + Alias string } -// NewQueryInsert creates an Insert with a query string. -func NewQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query string) *Insert { +// newQueryInsert creates an Insert with a query string. +func newQueryInsert(opcode InsertOpcode, keyspace *vindexes.Keyspace, query string) *Insert { return &Insert{ - Opcode: opcode, - Keyspace: keyspace, - Query: query, + InsertCommon: InsertCommon{ + Opcode: opcode, + Keyspace: keyspace, + }, + Query: query, } } -// NewInsert creates a new Insert. -func NewInsert( +// newInsert creates a new Insert. +func newInsert( opcode InsertOpcode, ignore bool, keyspace *vindexes.Keyspace, @@ -134,16 +80,18 @@ func NewInsert( table *vindexes.Table, prefix string, mid sqlparser.Values, - suffix string, + suffix sqlparser.OnDup, ) *Insert { ins := &Insert{ - Opcode: opcode, - Ignore: ignore, - Keyspace: keyspace, + InsertCommon: InsertCommon{ + Opcode: opcode, + Keyspace: keyspace, + Ignore: ignore, + Prefix: prefix, + Suffix: suffix, + }, VindexValues: vindexValues, - Prefix: prefix, Mid: mid, - Suffix: suffix, } if table != nil { ins.TableName = table.Name.String() @@ -157,208 +105,59 @@ func NewInsert( return ins } -// Generate represents the instruction to generate -// a value from a sequence. -type Generate struct { - Keyspace *vindexes.Keyspace - Query string - // Values are the supplied values for the column, which - // will be stored as a list within the expression. New - // values will be generated based on how many were not - // supplied (NULL). - Values evalengine.Expr - // Insert using Select, offset for auto increment column - Offset int -} - -// InsertOpcode is a number representing the opcode -// for the Insert primitive. -type InsertOpcode int - -const ( - // InsertUnsharded is for routing an insert statement - // to an unsharded keyspace. - InsertUnsharded = InsertOpcode(iota) - // InsertSharded is for routing an insert statement - // to individual shards. Requires: A list of Values, one - // for each ColVindex. If the table has an Autoinc column, - // A Generate subplan must be created. - InsertSharded - // InsertSelect is for routing an insert statement - // based on rows returned from the select statement. - InsertSelect -) - -var insName = map[InsertOpcode]string{ - InsertUnsharded: "InsertUnsharded", - InsertSharded: "InsertSharded", - InsertSelect: "InsertSelect", -} - -// String returns the opcode -func (code InsertOpcode) String() string { - return strings.ReplaceAll(insName[code], "Insert", "") -} - -// MarshalJSON serializes the InsertOpcode as a JSON string. -// It's used for testing and diagnostics. -func (code InsertOpcode) MarshalJSON() ([]byte, error) { - return json.Marshal(insName[code]) -} - // RouteType returns a description of the query routing type used by the primitive func (ins *Insert) RouteType() string { return insName[ins.Opcode] } -// GetKeyspaceName specifies the Keyspace that this primitive routes to. -func (ins *Insert) GetKeyspaceName() string { - return ins.Keyspace.Name -} - -// GetTableName specifies the table that this primitive routes to. -func (ins *Insert) GetTableName() string { - return ins.TableName -} - // TryExecute performs a non-streaming exec. -func (ins *Insert) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { +func (ins *Insert) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { ctx, cancelFunc := addQueryTimeout(ctx, vcursor, ins.QueryTimeout) defer cancelFunc() switch ins.Opcode { case InsertUnsharded: - return ins.execInsertUnsharded(ctx, vcursor, bindVars) + return ins.insertIntoUnshardedTable(ctx, vcursor, bindVars) case InsertSharded: - return ins.execInsertSharded(ctx, vcursor, bindVars) - case InsertSelect: - return ins.execInsertFromSelect(ctx, vcursor, bindVars) + return ins.insertIntoShardedTable(ctx, vcursor, bindVars) default: - // Unreachable. - return nil, fmt.Errorf("unsupported query route: %v", ins) + return nil, vterrors.VT13001("unexpected query route: %v", ins.Opcode) } } // TryStreamExecute performs a streaming exec. func (ins *Insert) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - if ins.Input == nil || ins.ForceNonStreaming { - res, err := ins.TryExecute(ctx, vcursor, bindVars, wantfields) - if err != nil { - return err - } - return callback(res) - } - if ins.QueryTimeout != 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(ins.QueryTimeout)*time.Millisecond) - defer cancel() - } - - unsharded := ins.Opcode == InsertUnsharded - var mu sync.Mutex - output := &sqltypes.Result{} - - err := vcursor.StreamExecutePrimitiveStandalone(ctx, ins.Input, bindVars, false, func(result *sqltypes.Result) error { - if len(result.Rows) == 0 { - return nil - } - - // should process only one chunk at a time. - // as parallel chunk insert will try to use the same transaction in the vttablet - // this will cause transaction in use error. - mu.Lock() - defer mu.Unlock() - - var insertID int64 - var qr *sqltypes.Result - var err error - if unsharded { - insertID, qr, err = ins.insertIntoUnshardedTable(ctx, vcursor, bindVars, result) - } else { - insertID, qr, err = ins.insertIntoShardedTable(ctx, vcursor, bindVars, result) - } - if err != nil { - return err - } - - output.RowsAffected += qr.RowsAffected - // InsertID needs to be updated to the least insertID value in sqltypes.Result - if output.InsertID == 0 || output.InsertID > uint64(insertID) { - output.InsertID = uint64(insertID) - } - return nil - }) + res, err := ins.TryExecute(ctx, vcursor, bindVars, wantfields) if err != nil { return err } - return callback(output) + return callback(res) } -func (ins *Insert) insertIntoShardedTable(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, result *sqltypes.Result) (int64, *sqltypes.Result, error) { - insertID, err := ins.processGenerateFromRows(ctx, vcursor, result.Rows) +func (ins *Insert) insertIntoUnshardedTable(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + insertID, err := ins.processGenerateFromValues(ctx, vcursor, ins, bindVars) if err != nil { - return 0, nil, err - } - - rss, queries, err := ins.getInsertSelectQueries(ctx, vcursor, bindVars, result.Rows) - if err != nil { - return 0, nil, err - } - - qr, err := ins.executeInsertQueries(ctx, vcursor, rss, queries, insertID) - if err != nil { - return 0, nil, err - } - return insertID, qr, nil -} - -// GetFields fetches the field info. -func (ins *Insert) GetFields(context.Context, VCursor, map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable code for %q", ins.Query) -} - -func (ins *Insert) execInsertUnsharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - query := ins.Query - if ins.Input != nil { - result, err := vcursor.ExecutePrimitive(ctx, ins.Input, bindVars, false) - if err != nil { - return nil, err - } - if len(result.Rows) == 0 { - return &sqltypes.Result{}, nil - } - query = ins.getInsertQueryForUnsharded(result, bindVars) + return nil, err } - _, qr, err := ins.executeUnshardedTableQuery(ctx, vcursor, bindVars, query) - return qr, err -} - -func (ins *Insert) getInsertQueryForUnsharded(result *sqltypes.Result, bindVars map[string]*querypb.BindVariable) string { - var mids sqlparser.Values - for r, inputRow := range result.Rows { - row := sqlparser.ValTuple{} - for c, value := range inputRow { - bvName := insertVarOffset(r, c) - bindVars[bvName] = sqltypes.ValueBindVariable(value) - row = append(row, sqlparser.NewArgument(bvName)) - } - mids = append(mids, row) - } - return ins.Prefix + sqlparser.String(mids) + ins.Suffix + return ins.executeUnshardedTableQuery(ctx, vcursor, ins, bindVars, ins.Query, uint64(insertID)) } -func (ins *Insert) execInsertSharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - insertID, err := ins.processGenerateFromValues(ctx, vcursor, bindVars) +func (ins *Insert) insertIntoShardedTable( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, +) (*sqltypes.Result, error) { + insertID, err := ins.processGenerateFromValues(ctx, vcursor, ins, bindVars) if err != nil { return nil, err } - rss, queries, err := ins.getInsertShardedRoute(ctx, vcursor, bindVars) + rss, queries, err := ins.getInsertShardedQueries(ctx, vcursor, bindVars) if err != nil { return nil, err } - return ins.executeInsertQueries(ctx, vcursor, rss, queries, insertID) + return ins.executeInsertQueries(ctx, vcursor, rss, queries, uint64(insertID)) } func (ins *Insert) executeInsertQueries( @@ -366,7 +165,7 @@ func (ins *Insert) executeInsertQueries( vcursor VCursor, rss []*srvtopo.ResolvedShard, queries []*querypb.BoundQuery, - insertID int64, + insertID uint64, ) (*sqltypes.Result, error) { autocommit := (len(rss) == 1 || ins.MultiShardAutocommit) && vcursor.AutocommitApproval() err := allowOnlyPrimary(rss...) @@ -379,347 +178,51 @@ func (ins *Insert) executeInsertQueries( } if insertID != 0 { - result.InsertID = uint64(insertID) + result.InsertID = insertID } return result, nil } -func (ins *Insert) getInsertSelectQueries( - ctx context.Context, - vcursor VCursor, - bindVars map[string]*querypb.BindVariable, - rows []sqltypes.Row, -) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { - colVindexes := ins.ColVindexes - if len(colVindexes) != len(ins.VindexValueOffset) { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex value offsets and vindex info do not match") - } - - // Here we go over the incoming rows and extract values for the vindexes we need to update - shardingCols := make([][]sqltypes.Row, len(colVindexes)) - for _, inputRow := range rows { - for colIdx := range colVindexes { - offsets := ins.VindexValueOffset[colIdx] - row := make(sqltypes.Row, 0, len(offsets)) - for _, offset := range offsets { - if offset == -1 { // value not provided from select query - row = append(row, sqltypes.NULL) - continue - } - row = append(row, inputRow[offset]) - } - shardingCols[colIdx] = append(shardingCols[colIdx], row) - } - } - - keyspaceIDs, err := ins.processPrimary(ctx, vcursor, shardingCols[0], colVindexes[0]) - if err != nil { - return nil, nil, err - } - - for vIdx := 1; vIdx < len(colVindexes); vIdx++ { - colVindex := colVindexes[vIdx] - var err error - if colVindex.Owned { - err = ins.processOwned(ctx, vcursor, shardingCols[vIdx], colVindex, keyspaceIDs) - } else { - err = ins.processUnowned(ctx, vcursor, shardingCols[vIdx], colVindex, keyspaceIDs) - } - if err != nil { - return nil, nil, err - } - } - - var indexes []*querypb.Value - var destinations []key.Destination - for i, ksid := range keyspaceIDs { - if ksid != nil { - indexes = append(indexes, &querypb.Value{ - Value: strconv.AppendInt(nil, int64(i), 10), - }) - destinations = append(destinations, key.DestinationKeyspaceID(ksid)) - } - } - if len(destinations) == 0 { - // In this case, all we have is nil KeyspaceIds, we don't do - // anything at all. - return nil, nil, nil - } - - rss, indexesPerRss, err := vcursor.ResolveDestinations(ctx, ins.Keyspace.Name, indexes, destinations) - if err != nil { - return nil, nil, err - } - - queries := make([]*querypb.BoundQuery, len(rss)) - for i := range rss { - bvs := sqltypes.CopyBindVariables(bindVars) // we don't want to create one huge bindvars for all values - var mids sqlparser.Values - for _, indexValue := range indexesPerRss[i] { - index, _ := strconv.Atoi(string(indexValue.Value)) - if keyspaceIDs[index] != nil { - row := sqlparser.ValTuple{} - for colOffset, value := range rows[index] { - bvName := insertVarOffset(index, colOffset) - bvs[bvName] = sqltypes.ValueBindVariable(value) - row = append(row, sqlparser.NewArgument(bvName)) - } - mids = append(mids, row) - } - } - rewritten := ins.Prefix + sqlparser.String(mids) + ins.Suffix - queries[i] = &querypb.BoundQuery{ - Sql: rewritten, - BindVariables: bvs, - } - } - - return rss, queries, nil -} - -func (ins *Insert) execInsertFromSelect(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { - // run the SELECT query - if ins.Input == nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "something went wrong planning INSERT SELECT") - } - - result, err := vcursor.ExecutePrimitive(ctx, ins.Input, bindVars, false) - if err != nil { - return nil, err - } - if len(result.Rows) == 0 { - return &sqltypes.Result{}, nil - } - - _, qr, err := ins.insertIntoShardedTable(ctx, vcursor, bindVars, result) - return qr, err -} - -// shouldGenerate determines if a sequence value should be generated for a given value -func shouldGenerate(v sqltypes.Value) bool { - if v.IsNull() { - return true - } - - // Unless the NO_AUTO_VALUE_ON_ZERO sql mode is active in mysql, it also - // treats 0 as a value that should generate a new sequence. - n, err := v.ToCastUint64() - if err == nil && n == 0 { - return true - } - - return false -} - -// processGenerateFromValues generates new values using a sequence if necessary. -// If no value was generated, it returns 0. Values are generated only -// for cases where none are supplied. -func (ins *Insert) processGenerateFromValues( - ctx context.Context, - vcursor VCursor, - bindVars map[string]*querypb.BindVariable, -) (insertID int64, err error) { - if ins.Generate == nil { - return 0, nil - } - - // Scan input values to compute the number of values to generate, and - // keep track of where they should be filled. - env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) - resolved, err := env.Evaluate(ins.Generate.Values) - if err != nil { - return 0, err - } - count := int64(0) - values := resolved.TupleValues() - for _, val := range values { - if shouldGenerate(val) { - count++ - } - } - - // If generation is needed, generate the requested number of values (as one call). - if count != 0 { - rss, _, err := vcursor.ResolveDestinations(ctx, ins.Generate.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) - if err != nil { - return 0, err - } - if len(rss) != 1 { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss)) - } - bindVars := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(count)} - qr, err := vcursor.ExecuteStandalone(ctx, ins, ins.Generate.Query, bindVars, rss[0]) - if err != nil { - return 0, err - } - // If no rows are returned, it's an internal error, and the code - // must panic, which will be caught and reported. - insertID, err = qr.Rows[0][0].ToCastInt64() - if err != nil { - return 0, err - } - } - - // Fill the holes where no value was supplied. - cur := insertID - for i, v := range values { - if shouldGenerate(v) { - bindVars[SeqVarName+strconv.Itoa(i)] = sqltypes.Int64BindVariable(cur) - cur++ - } else { - bindVars[SeqVarName+strconv.Itoa(i)] = sqltypes.ValueBindVariable(v) - } - } - return insertID, nil -} - -// processGenerateFromRows generates new values using a sequence if necessary. -// If no value was generated, it returns 0. Values are generated only -// for cases where none are supplied. -func (ins *Insert) processGenerateFromRows( - ctx context.Context, - vcursor VCursor, - rows []sqltypes.Row, -) (insertID int64, err error) { - if ins.Generate == nil { - return 0, nil - } - var count int64 - offset := ins.Generate.Offset - genColPresent := offset < len(rows[0]) - if genColPresent { - for _, val := range rows { - if val[offset].IsNull() { - count++ - } - } - } else { - count = int64(len(rows)) - } - - if count == 0 { - return 0, nil - } - - // If generation is needed, generate the requested number of values (as one call). - rss, _, err := vcursor.ResolveDestinations(ctx, ins.Generate.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) - if err != nil { - return 0, err - } - if len(rss) != 1 { - return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss)) - } - bindVars := map[string]*querypb.BindVariable{"n": sqltypes.Int64BindVariable(count)} - qr, err := vcursor.ExecuteStandalone(ctx, ins, ins.Generate.Query, bindVars, rss[0]) - if err != nil { - return 0, err - } - // If no rows are returned, it's an internal error, and the code - // must panic, which will be caught and reported. - insertID, err = qr.Rows[0][0].ToCastInt64() - if err != nil { - return 0, err - } - - used := insertID - for idx, val := range rows { - if genColPresent { - if val[offset].IsNull() { - val[offset] = sqltypes.NewInt64(used) - used++ - } - } else { - rows[idx] = append(val, sqltypes.NewInt64(used)) - used++ - } - } - - return insertID, nil -} - -// getInsertShardedRoute performs all the vindex related work +// getInsertShardedQueries performs all the vindex related work // and returns a map of shard to queries. // Using the primary vindex, it computes the target keyspace ids. // For owned vindexes, it creates entries. // For unowned vindexes with no input values, it reverse maps. // For unowned vindexes with values, it validates. // If it's an IGNORE or ON DUPLICATE key insert, it drops unroutable rows. -func (ins *Insert) getInsertShardedRoute( +func (ins *Insert) getInsertShardedQueries( ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, ) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { + // vindexRowsValues builds the values of all vindex columns. // the 3-d structure indexes are colVindex, row, col. Note that // ins.Values indexes are colVindex, col, row. So, the conversion // involves a transpose. // The reason we need to transpose is that all the Vindex APIs // require inputs in that format. - vindexRowsValues := make([][]sqltypes.Row, len(ins.VindexValues)) - rowCount := 0 - env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) - colVindexes := ins.ColVindexes - for vIdx, vColValues := range ins.VindexValues { - if len(vColValues) != len(colVindexes[vIdx].Columns) { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] supplied vindex column values don't match vschema: %v %v", vColValues, colVindexes[vIdx].Columns) - } - for colIdx, colValues := range vColValues { - rowsResolvedValues := make(sqltypes.Row, 0, len(colValues)) - for _, colValue := range colValues { - result, err := env.Evaluate(colValue) - if err != nil { - return nil, nil, err - } - rowsResolvedValues = append(rowsResolvedValues, result.Value(vcursor.ConnCollation())) - } - // This is the first iteration: allocate for transpose. - if colIdx == 0 { - if len(rowsResolvedValues) == 0 { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] rowcount is zero for inserts: %v", rowsResolvedValues) - } - if rowCount == 0 { - rowCount = len(rowsResolvedValues) - } - if rowCount != len(rowsResolvedValues) { - return nil, nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] uneven row values for inserts: %d %d", rowCount, len(rowsResolvedValues)) - } - vindexRowsValues[vIdx] = make([]sqltypes.Row, rowCount) - } - // Perform the transpose. - for rowNum, colVal := range rowsResolvedValues { - vindexRowsValues[vIdx][rowNum] = append(vindexRowsValues[vIdx][rowNum], colVal) - } - } + vindexRowsValues, err := ins.buildVindexRowsValues(ctx, vcursor, bindVars) + if err != nil { + return nil, nil, err } // The output from the following 'process' functions is a list of // keyspace ids. For regular inserts, a failure to find a route // results in an error. For 'ignore' type inserts, the keyspace // id is returned as nil, which is used later to drop the corresponding rows. - if len(vindexRowsValues) == 0 || len(colVindexes) == 0 { + if len(vindexRowsValues) == 0 || len(ins.ColVindexes) == 0 { return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_FAILED_PRECONDITION, vterrors.RequiresPrimaryKey, vterrors.PrimaryVindexNotSet, ins.TableName) } - keyspaceIDs, err := ins.processPrimary(ctx, vcursor, vindexRowsValues[0], colVindexes[0]) + + keyspaceIDs, err := ins.processVindexes(ctx, vcursor, vindexRowsValues) if err != nil { return nil, nil, err } - for vIdx := 1; vIdx < len(colVindexes); vIdx++ { - colVindex := colVindexes[vIdx] - var err error - if colVindex.Owned { - err = ins.processOwned(ctx, vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) - } else { - err = ins.processUnowned(ctx, vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) - } - if err != nil { - return nil, nil, err - } - } - // Build 3-d bindvars. Skip rows with nil keyspace ids in case // we're executing an insert ignore. - for vIdx, colVindex := range colVindexes { + for vIdx, colVindex := range ins.ColVindexes { for rowNum, rowColumnKeys := range vindexRowsValues[vIdx] { if keyspaceIDs[rowNum] == nil { // InsertIgnore: skip the row. @@ -764,25 +267,37 @@ func (ins *Insert) getInsertShardedRoute( for _, indexValue := range indexesPerRss[i] { index, _ := strconv.ParseInt(string(indexValue.Value), 0, 64) if keyspaceIDs[index] != nil { + walkFunc := func(node sqlparser.SQLNode) (kontinue bool, err error) { + var arg string + switch argType := node.(type) { + case *sqlparser.Argument: + arg = argType.Name + case sqlparser.ListArg: + arg = string(argType) + default: + return true, nil + } + bv, exists := bindVars[arg] + if !exists { + return false, vterrors.VT03026(arg) + } + shardBindVars[arg] = bv + return true, nil + } mids = append(mids, sqlparser.String(ins.Mid[index])) for _, expr := range ins.Mid[index] { - err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if arg, ok := node.(*sqlparser.Argument); ok { - bv, exists := bindVars[arg.Name] - if !exists { - return false, vterrors.VT03026(arg.Name) - } - shardBindVars[arg.Name] = bv - } - return true, nil - }, expr, nil) + err = sqlparser.Walk(walkFunc, expr, nil) if err != nil { return nil, nil, err } } + err = sqlparser.Walk(walkFunc, ins.Suffix, nil) + if err != nil { + return nil, nil, err + } } } - rewritten := ins.Prefix + strings.Join(mids, ",") + ins.Suffix + rewritten := ins.Prefix + strings.Join(mids, ",") + ins.Alias + sqlparser.String(ins.Suffix) queries[i] = &querypb.BoundQuery{ Sql: rewritten, BindVariables: shardBindVars, @@ -792,173 +307,50 @@ func (ins *Insert) getInsertShardedRoute( return rss, queries, nil } -// processPrimary maps the primary vindex values to the keyspace ids. -func (ins *Insert) processPrimary(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex) ([]ksID, error) { - destinations, err := vindexes.Map(ctx, colVindex.Vindex, vcursor, vindexColumnsKeys) - if err != nil { - return nil, err - } - - keyspaceIDs := make([]ksID, len(destinations)) - for i, destination := range destinations { - switch d := destination.(type) { - case key.DestinationKeyspaceID: - // This is a single keyspace id, we're good. - keyspaceIDs[i] = d - case key.DestinationNone: - // No valid keyspace id, we may return an error. - if !ins.Ignore { - return nil, fmt.Errorf("could not map %v to a keyspace id", vindexColumnsKeys[i]) - } - default: - return nil, fmt.Errorf("could not map %v to a unique keyspace id: %v", vindexColumnsKeys[i], destination) - } - } - - return keyspaceIDs, nil -} - -// processOwned creates vindex entries for the values of an owned column. -func (ins *Insert) processOwned(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex, ksids []ksID) error { - if !ins.Ignore { - return colVindex.Vindex.(vindexes.Lookup).Create(ctx, vcursor, vindexColumnsKeys, ksids, false /* ignoreMode */) - } - - // InsertIgnore - var createIndexes []int - var createKeys []sqltypes.Row - var createKsids []ksID - - for rowNum, rowColumnKeys := range vindexColumnsKeys { - if ksids[rowNum] == nil { - continue - } - createIndexes = append(createIndexes, rowNum) - createKeys = append(createKeys, rowColumnKeys) - createKsids = append(createKsids, ksids[rowNum]) - } - if createKeys == nil { - return nil - } - - err := colVindex.Vindex.(vindexes.Lookup).Create(ctx, vcursor, createKeys, createKsids, true) - if err != nil { - return err - } - // After creation, verify that the keys map to the keyspace ids. If not, remove - // those that don't map. - verified, err := vindexes.Verify(ctx, colVindex.Vindex, vcursor, createKeys, createKsids) - if err != nil { - return err - } - for i, v := range verified { - if !v { - ksids[createIndexes[i]] = nil - } - } - return nil -} - -// processUnowned either reverse maps or validates the values for an unowned column. -func (ins *Insert) processUnowned(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex, ksids []ksID) error { - var reverseIndexes []int - var reverseKsids []ksID - - var verifyIndexes []int - var verifyKeys []sqltypes.Row - var verifyKsids []ksID - - // Check if this VIndex is reversible or not. - reversibleVindex, isReversible := colVindex.Vindex.(vindexes.Reversible) - - for rowNum, rowColumnKeys := range vindexColumnsKeys { - // If we weren't able to determine a keyspace id from the primary VIndex, skip this row - if ksids[rowNum] == nil { - continue +func (ins *Insert) buildVindexRowsValues(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) ([][]sqltypes.Row, error) { + vindexRowsValues := make([][]sqltypes.Row, len(ins.VindexValues)) + rowCount := 0 + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) + colVindexes := ins.ColVindexes + for vIdx, vColValues := range ins.VindexValues { + if len(vColValues) != len(colVindexes[vIdx].Columns) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] supplied vindex column values don't match vschema: %v %v", vColValues, colVindexes[vIdx].Columns) } - - if rowColumnKeys[0].IsNull() { - // If the value of the column is `NULL`, but this is a reversible VIndex, - // we will try to generate the value from the keyspace id generated by the primary VIndex. - if isReversible { - reverseIndexes = append(reverseIndexes, rowNum) - reverseKsids = append(reverseKsids, ksids[rowNum]) + for colIdx, colValues := range vColValues { + rowsResolvedValues := make(sqltypes.Row, 0, len(colValues)) + for _, colValue := range colValues { + result, err := env.Evaluate(colValue) + if err != nil { + return nil, err + } + rowsResolvedValues = append(rowsResolvedValues, result.Value(vcursor.ConnCollation())) } - - // Otherwise, don't do anything. Whether `NULL` is a valid value for this column will be - // handled by MySQL. - } else { - // If a value for this column was specified, the keyspace id values from the - // secondary VIndex need to be verified against the keyspace id from the primary VIndex - verifyIndexes = append(verifyIndexes, rowNum) - verifyKeys = append(verifyKeys, rowColumnKeys) - verifyKsids = append(verifyKsids, ksids[rowNum]) - } - } - - // Reverse map values for secondary VIndex columns from the primary VIndex's keyspace id. - if reverseKsids != nil { - reverseKeys, err := reversibleVindex.ReverseMap(vcursor, reverseKsids) - if err != nil { - return err - } - - for i, reverseKey := range reverseKeys { - // Fill the first column with the reverse-mapped value. - vindexColumnsKeys[reverseIndexes[i]][0] = reverseKey - } - } - - // Verify that the keyspace ids generated by the primary and secondary VIndexes match - if verifyIndexes != nil { - // If values were supplied, we validate against keyspace id. - verified, err := vindexes.Verify(ctx, colVindex.Vindex, vcursor, verifyKeys, verifyKsids) - if err != nil { - return err - } - - var mismatchVindexKeys []sqltypes.Row - for i, v := range verified { - rowNum := verifyIndexes[i] - if !v { - if !ins.Ignore { - mismatchVindexKeys = append(mismatchVindexKeys, vindexColumnsKeys[rowNum]) - continue + // This is the first iteration: allocate for transpose. + if colIdx == 0 { + if len(rowsResolvedValues) == 0 { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] rowcount is zero for inserts: %v", rowsResolvedValues) } - - // Skip the whole row if this is a `INSERT IGNORE` or `INSERT ... ON DUPLICATE KEY ...` statement - // but the keyspace ids didn't match. - ksids[verifyIndexes[i]] = nil + if rowCount == 0 { + rowCount = len(rowsResolvedValues) + } + if rowCount != len(rowsResolvedValues) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] uneven row values for inserts: %d %d", rowCount, len(rowsResolvedValues)) + } + vindexRowsValues[vIdx] = make([]sqltypes.Row, rowCount) + } + // Perform the transpose. + for rowNum, colVal := range rowsResolvedValues { + vindexRowsValues[vIdx][rowNum] = append(vindexRowsValues[vIdx][rowNum], colVal) } - } - - if mismatchVindexKeys != nil { - return fmt.Errorf("values %v for column %v does not map to keyspace ids", mismatchVindexKeys, colVindex.Columns) } } - - return nil -} - -// InsertVarName returns a name for the bind var for this column. This method is used by the planner and engine, -// to make sure they both produce the same names -func InsertVarName(col sqlparser.IdentifierCI, rowNum int) string { - return fmt.Sprintf("_%s_%d", col.CompliantName(), rowNum) -} - -func insertVarOffset(rowNum, colOffset int) string { - return fmt.Sprintf("_c%d_%d", rowNum, colOffset) + return vindexRowsValues, nil } func (ins *Insert) description() PrimitiveDescription { - other := map[string]any{ - "Query": ins.Query, - "TableName": ins.GetTableName(), - "MultiShardAutocommit": ins.MultiShardAutocommit, - "QueryTimeout": ins.QueryTimeout, - "InsertIgnore": ins.Ignore, - "InputAsNonStreaming": ins.ForceNonStreaming, - } + other := ins.commonDesc() + other["Query"] = ins.Query + other["TableName"] = ins.GetTableName() if len(ins.VindexValues) > 0 { valuesOffsets := map[string]string{} @@ -981,35 +373,19 @@ func (ins *Insert) description() PrimitiveDescription { other["VindexValues"] = valuesOffsets } - if ins.Generate != nil { - if ins.Generate.Values == nil { - other["AutoIncrement"] = fmt.Sprintf("%s:Offset(%d)", ins.Generate.Query, ins.Generate.Offset) - } else { - other["AutoIncrement"] = fmt.Sprintf("%s:Values::%s", ins.Generate.Query, sqlparser.String(ins.Generate.Values)) - } - } - - if len(ins.VindexValueOffset) > 0 { - valuesOffsets := map[string]string{} - for idx, ints := range ins.VindexValueOffset { - if len(ins.ColVindexes) < idx { - panic("ins.ColVindexes and ins.VindexValueOffset do not line up") - } - vindex := ins.ColVindexes[idx] - marshal, _ := json.Marshal(ints) - valuesOffsets[vindex.Name] = string(marshal) + // This is a check to ensure we send the correct query to the database. + // "ActualQuery" should not be part of the plan output, if it does, it means the query was not rewritten correctly. + if ins.Mid != nil { + var mids []string + for _, n := range ins.Mid { + mids = append(mids, sqlparser.String(n)) } - other["VindexOffsetFromSelect"] = valuesOffsets - } - if len(ins.Mid) > 0 { - mids := slice.Map(ins.Mid, func(from sqlparser.ValTuple) string { - return sqlparser.String(from) - }) - shardQuery := fmt.Sprintf("%s%s%s", ins.Prefix, strings.Join(mids, ", "), ins.Suffix) - if shardQuery != ins.Query { - other["ShardedQuery"] = shardQuery + shardedQuery := ins.Prefix + strings.Join(mids, ", ") + ins.Alias + sqlparser.String(ins.Suffix) + if shardedQuery != ins.Query { + other["ActualQuery"] = shardedQuery } } + return PrimitiveDescription{ OperatorType: "Insert", Keyspace: ins.Keyspace, @@ -1019,41 +395,8 @@ func (ins *Insert) description() PrimitiveDescription { } } -func (ins *Insert) insertIntoUnshardedTable(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, result *sqltypes.Result) (int64, *sqltypes.Result, error) { - query := ins.getInsertQueryForUnsharded(result, bindVars) - return ins.executeUnshardedTableQuery(ctx, vcursor, bindVars, query) -} - -func (ins *Insert) executeUnshardedTableQuery(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, query string) (int64, *sqltypes.Result, error) { - insertID, err := ins.processGenerateFromValues(ctx, vcursor, bindVars) - if err != nil { - return 0, nil, err - } - - rss, _, err := vcursor.ResolveDestinations(ctx, ins.Keyspace.Name, nil, []key.Destination{key.DestinationAllShards{}}) - if err != nil { - return 0, nil, err - } - if len(rss) != 1 { - return 0, nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) - } - err = allowOnlyPrimary(rss...) - if err != nil { - return 0, nil, err - } - qr, err := execShard(ctx, ins, vcursor, query, bindVars, rss[0], true, true /* canAutocommit */) - if err != nil { - return 0, nil, err - } - - // If processGenerateFromValues generated new values, it supercedes - // any ids that MySQL might have generated. If both generated - // values, we don't return an error because this behavior - // is required to support migration. - if insertID != 0 { - qr.InsertID = uint64(insertID) - } else { - insertID = int64(qr.InsertID) - } - return insertID, qr, nil +// InsertVarName returns a name for the bind var for this column. This method is used by the planner and engine, +// to make sure they both produce the same names +func InsertVarName(col sqlparser.IdentifierCI, rowNum int) string { + return fmt.Sprintf("_%s_%d", col.CompliantName(), rowNum) } diff --git a/go/vt/vtgate/engine/insert_common.go b/go/vt/vtgate/engine/insert_common.go new file mode 100644 index 00000000000..e29aa7fd792 --- /dev/null +++ b/go/vt/vtgate/engine/insert_common.go @@ -0,0 +1,482 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +type ( + InsertCommon struct { + // Insert needs tx handling + txNeeded + + // Opcode is the execution opcode. + Opcode InsertOpcode + + // Keyspace specifies the keyspace to send the query to. + Keyspace *vindexes.Keyspace + + // Ignore is for INSERT IGNORE and INSERT...ON DUPLICATE KEY constructs + // for sharded cases. + Ignore bool + + // TableName is the name of the table on which row will be inserted. + TableName string + + // Option to override the standard behavior and allow a multi-shard insert + // to use single round trip autocommit. + // + // This is a clear violation of the SQL semantics since it means the statement + // is not atomic in the presence of PK conflicts on one shard and not another. + // However, some application use cases would prefer that the statement partially + // succeed in order to get the performance benefits of autocommit. + MultiShardAutocommit bool + + // QueryTimeout contains the optional timeout (in milliseconds) to apply to this query + QueryTimeout int + + // ForceNonStreaming is true when the insert table and select table are same. + // This will avoid locking by the select table. + ForceNonStreaming bool + + PreventAutoCommit bool + + // Generate is only set for inserts where a sequence must be generated. + Generate *Generate + + // ColVindexes are the vindexes that will use the VindexValues + ColVindexes []*vindexes.ColumnVindex + + // Prefix, Suffix are for sharded insert plans. + Prefix string + Suffix sqlparser.OnDup + } + + ksID = []byte + + // Generate represents the instruction to generate + // a value from a sequence. + Generate struct { + Keyspace *vindexes.Keyspace + Query string + // Values are the supplied values for the column, which + // will be stored as a list within the expression. New + // values will be generated based on how many were not + // supplied (NULL). + Values evalengine.Expr + // Insert using Select, offset for auto increment column + Offset int + } + + // InsertOpcode is a number representing the opcode + // for the Insert primitive. + InsertOpcode int +) + +const nextValBV = "n" + +const ( + // InsertUnsharded is for routing an insert statement + // to an unsharded keyspace. + InsertUnsharded = InsertOpcode(iota) + // InsertSharded is for routing an insert statement + // to individual shards. Requires: A list of Values, one + // for each ColVindex. If the table has an Autoinc column, + // A Generate subplan must be created. + InsertSharded +) + +var insName = map[InsertOpcode]string{ + InsertUnsharded: "InsertUnsharded", + InsertSharded: "InsertSharded", +} + +// String returns the opcode +func (code InsertOpcode) String() string { + return strings.ReplaceAll(insName[code], "Insert", "") +} + +// MarshalJSON serializes the InsertOpcode as a JSON string. +// It's used for testing and diagnostics. +func (code InsertOpcode) MarshalJSON() ([]byte, error) { + return json.Marshal(insName[code]) +} + +// GetKeyspaceName specifies the Keyspace that this primitive routes to. +func (ic *InsertCommon) GetKeyspaceName() string { + return ic.Keyspace.Name +} + +// GetTableName specifies the table that this primitive routes to. +func (ic *InsertCommon) GetTableName() string { + return ic.TableName +} + +// GetFields fetches the field info. +func (ic *InsertCommon) GetFields(context.Context, VCursor, map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.VT13001("unexpected fields call for insert query") +} + +func (ins *InsertCommon) executeUnshardedTableQuery(ctx context.Context, vcursor VCursor, loggingPrimitive Primitive, bindVars map[string]*querypb.BindVariable, query string, insertID uint64) (*sqltypes.Result, error) { + rss, _, err := vcursor.ResolveDestinations(ctx, ins.Keyspace.Name, nil, []key.Destination{key.DestinationAllShards{}}) + if err != nil { + return nil, err + } + if len(rss) != 1 { + return nil, vterrors.VT09022(rss) + } + err = allowOnlyPrimary(rss...) + if err != nil { + return nil, err + } + qr, err := execShard(ctx, loggingPrimitive, vcursor, query, bindVars, rss[0], true, !ins.PreventAutoCommit /* canAutocommit */) + if err != nil { + return nil, err + } + + // If processGenerateFromValues generated new values, it supersedes + // any ids that MySQL might have generated. If both generated + // values, we don't return an error because this behavior + // is required to support migration. + if insertID != 0 { + qr.InsertID = insertID + } + return qr, nil +} + +func (ins *InsertCommon) processVindexes(ctx context.Context, vcursor VCursor, vindexRowsValues [][]sqltypes.Row) ([]ksID, error) { + colVindexes := ins.ColVindexes + keyspaceIDs, err := ins.processPrimary(ctx, vcursor, vindexRowsValues[0], colVindexes[0]) + if err != nil { + return nil, err + } + + for vIdx := 1; vIdx < len(colVindexes); vIdx++ { + colVindex := colVindexes[vIdx] + if colVindex.Owned { + err = ins.processOwned(ctx, vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) + } else { + err = ins.processUnowned(ctx, vcursor, vindexRowsValues[vIdx], colVindex, keyspaceIDs) + } + if err != nil { + return nil, err + } + } + return keyspaceIDs, nil +} + +// processPrimary maps the primary vindex values to the keyspace ids. +func (ic *InsertCommon) processPrimary(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex) ([]ksID, error) { + destinations, err := vindexes.Map(ctx, colVindex.Vindex, vcursor, vindexColumnsKeys) + if err != nil { + return nil, err + } + + keyspaceIDs := make([]ksID, len(destinations)) + for i, destination := range destinations { + switch d := destination.(type) { + case key.DestinationKeyspaceID: + // This is a single keyspace id, we're good. + keyspaceIDs[i] = d + case key.DestinationNone: + // Not a valid keyspace id, so we cannot determine which shard this row belongs to. + // We have to return an error. + return nil, vterrors.VT09023(vindexColumnsKeys[i]) + default: + return nil, vterrors.VT09024(vindexColumnsKeys[i], destination) + } + } + + return keyspaceIDs, nil +} + +// processOwned creates vindex entries for the values of an owned column. +func (ic *InsertCommon) processOwned(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex, ksids []ksID) error { + if !ic.Ignore { + return colVindex.Vindex.(vindexes.Lookup).Create(ctx, vcursor, vindexColumnsKeys, ksids, false /* ignoreMode */) + } + + // InsertIgnore + var createIndexes []int + var createKeys []sqltypes.Row + var createKsids []ksID + + for rowNum, rowColumnKeys := range vindexColumnsKeys { + if ksids[rowNum] == nil { + continue + } + createIndexes = append(createIndexes, rowNum) + createKeys = append(createKeys, rowColumnKeys) + createKsids = append(createKsids, ksids[rowNum]) + } + if createKeys == nil { + return nil + } + + err := colVindex.Vindex.(vindexes.Lookup).Create(ctx, vcursor, createKeys, createKsids, true) + if err != nil { + return err + } + // After creation, verify that the keys map to the keyspace ids. If not, remove + // those that don't map. + verified, err := vindexes.Verify(ctx, colVindex.Vindex, vcursor, createKeys, createKsids) + if err != nil { + return err + } + for i, v := range verified { + if !v { + ksids[createIndexes[i]] = nil + } + } + return nil +} + +// processUnowned either reverse maps or validates the values for an unowned column. +func (ic *InsertCommon) processUnowned(ctx context.Context, vcursor VCursor, vindexColumnsKeys []sqltypes.Row, colVindex *vindexes.ColumnVindex, ksids []ksID) error { + var reverseIndexes []int + var reverseKsids []ksID + + var verifyIndexes []int + var verifyKeys []sqltypes.Row + var verifyKsids []ksID + + // Check if this VIndex is reversible or not. + reversibleVindex, isReversible := colVindex.Vindex.(vindexes.Reversible) + + for rowNum, rowColumnKeys := range vindexColumnsKeys { + // If we weren't able to determine a keyspace id from the primary VIndex, skip this row + if ksids[rowNum] == nil { + continue + } + + if rowColumnKeys[0].IsNull() { + // If the value of the column is `NULL`, but this is a reversible VIndex, + // we will try to generate the value from the keyspace id generated by the primary VIndex. + if isReversible { + reverseIndexes = append(reverseIndexes, rowNum) + reverseKsids = append(reverseKsids, ksids[rowNum]) + } + + // Otherwise, don't do anything. Whether `NULL` is a valid value for this column will be + // handled by MySQL. + } else { + // If a value for this column was specified, the keyspace id values from the + // secondary VIndex need to be verified against the keyspace id from the primary VIndex + verifyIndexes = append(verifyIndexes, rowNum) + verifyKeys = append(verifyKeys, rowColumnKeys) + verifyKsids = append(verifyKsids, ksids[rowNum]) + } + } + + // Reverse map values for secondary VIndex columns from the primary VIndex's keyspace id. + if reverseKsids != nil { + reverseKeys, err := reversibleVindex.ReverseMap(vcursor, reverseKsids) + if err != nil { + return err + } + + for i, reverseKey := range reverseKeys { + // Fill the first column with the reverse-mapped value. + vindexColumnsKeys[reverseIndexes[i]][0] = reverseKey + } + } + + // Verify that the keyspace ids generated by the primary and secondary VIndexes match + if verifyIndexes != nil { + // If values were supplied, we validate against keyspace id. + verified, err := vindexes.Verify(ctx, colVindex.Vindex, vcursor, verifyKeys, verifyKsids) + if err != nil { + return err + } + + var mismatchVindexKeys []sqltypes.Row + for i, v := range verified { + rowNum := verifyIndexes[i] + if !v { + if !ic.Ignore { + mismatchVindexKeys = append(mismatchVindexKeys, vindexColumnsKeys[rowNum]) + continue + } + + // Skip the whole row if this is a `INSERT IGNORE` or `INSERT ... ON DUPLICATE KEY ...` statement + // but the keyspace ids didn't match. + ksids[verifyIndexes[i]] = nil + } + } + + if mismatchVindexKeys != nil { + return fmt.Errorf("values %v for column %v does not map to keyspace ids", mismatchVindexKeys, colVindex.Columns) + } + } + + return nil +} + +// processGenerateFromSelect generates new values using a sequence if necessary. +// If no value was generated, it returns 0. Values are generated only +// for cases where none are supplied. +func (ic *InsertCommon) processGenerateFromSelect( + ctx context.Context, + vcursor VCursor, + loggingPrimitive Primitive, + rows []sqltypes.Row, +) (insertID int64, err error) { + if ic.Generate == nil { + return 0, nil + } + var count int64 + offset := ic.Generate.Offset + genColPresent := offset < len(rows[0]) + if genColPresent { + for _, row := range rows { + if shouldGenerate(row[offset], evalengine.ParseSQLMode(vcursor.SQLMode())) { + count++ + } + } + } else { + count = int64(len(rows)) + } + + if count == 0 { + return 0, nil + } + + insertID, err = ic.execGenerate(ctx, vcursor, loggingPrimitive, count) + if err != nil { + return 0, err + } + + used := insertID + for idx, val := range rows { + if genColPresent { + if shouldGenerate(val[offset], evalengine.ParseSQLMode(vcursor.SQLMode())) { + val[offset] = sqltypes.NewInt64(used) + used++ + } + } else { + rows[idx] = append(val, sqltypes.NewInt64(used)) + used++ + } + } + + return insertID, nil +} + +// processGenerateFromValues generates new values using a sequence if necessary. +// If no value was generated, it returns 0. Values are generated only +// for cases where none are supplied. +func (ic *InsertCommon) processGenerateFromValues( + ctx context.Context, + vcursor VCursor, + loggingPrimitive Primitive, + bindVars map[string]*querypb.BindVariable, +) (insertID int64, err error) { + if ic.Generate == nil { + return 0, nil + } + + // Scan input values to compute the number of values to generate, and + // keep track of where they should be filled. + env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) + resolved, err := env.Evaluate(ic.Generate.Values) + if err != nil { + return 0, err + } + count := int64(0) + values := resolved.TupleValues() + for _, val := range values { + if shouldGenerate(val, evalengine.ParseSQLMode(vcursor.SQLMode())) { + count++ + } + } + + // If generation is needed, generate the requested number of values (as one call). + if count != 0 { + insertID, err = ic.execGenerate(ctx, vcursor, loggingPrimitive, count) + if err != nil { + return 0, err + } + } + + // Fill the holes where no value was supplied. + cur := insertID + for i, v := range values { + if shouldGenerate(v, evalengine.ParseSQLMode(vcursor.SQLMode())) { + bindVars[SeqVarName+strconv.Itoa(i)] = sqltypes.Int64BindVariable(cur) + cur++ + } else { + bindVars[SeqVarName+strconv.Itoa(i)] = sqltypes.ValueBindVariable(v) + } + } + return insertID, nil +} + +func (ic *InsertCommon) execGenerate(ctx context.Context, vcursor VCursor, loggingPrimitive Primitive, count int64) (int64, error) { + // If generation is needed, generate the requested number of values (as one call). + rss, _, err := vcursor.ResolveDestinations(ctx, ic.Generate.Keyspace.Name, nil, []key.Destination{key.DestinationAnyShard{}}) + if err != nil { + return 0, err + } + if len(rss) != 1 { + return 0, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "auto sequence generation can happen through single shard only, it is getting routed to %d shards", len(rss)) + } + bindVars := map[string]*querypb.BindVariable{nextValBV: sqltypes.Int64BindVariable(count)} + qr, err := vcursor.ExecuteStandalone(ctx, loggingPrimitive, ic.Generate.Query, bindVars, rss[0]) + if err != nil { + return 0, err + } + // If no rows are returned, it's an internal error, and the code + // must panic, which will be caught and reported. + return qr.Rows[0][0].ToCastInt64() +} + +// shouldGenerate determines if a sequence value should be generated for a given value +func shouldGenerate(v sqltypes.Value, sqlmode evalengine.SQLMode) bool { + if v.IsNull() { + return true + } + + // Unless the NO_AUTO_VALUE_ON_ZERO sql mode is active in mysql, it also + // treats 0 as a value that should generate a new sequence. + value, err := evalengine.CoerceTo(v, evalengine.NewType(sqltypes.Uint64, collations.CollationBinaryID), sqlmode) + if err != nil { + return false + } + + id, err := value.ToCastUint64() + if err != nil { + return false + } + + return id == 0 +} diff --git a/go/vt/vtgate/engine/insert_select.go b/go/vt/vtgate/engine/insert_select.go new file mode 100644 index 00000000000..88767420508 --- /dev/null +++ b/go/vt/vtgate/engine/insert_select.go @@ -0,0 +1,423 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "sync" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/key" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ Primitive = (*InsertSelect)(nil) + +type ( + // InsertSelect represents the instructions to perform an insert operation with input rows from a select. + InsertSelect struct { + InsertCommon + + // Input is a select query plan to retrieve results for inserting data. + Input Primitive + + // VindexValueOffset stores the offset for each column in the ColumnVindex + // that will appear in the result set of the select query. + VindexValueOffset [][]int + } +) + +// newInsertSelect creates a new InsertSelect. +func newInsertSelect( + ignore bool, + keyspace *vindexes.Keyspace, + table *vindexes.Table, + prefix string, + suffix sqlparser.OnDup, + vv [][]int, + input Primitive, +) *InsertSelect { + ins := &InsertSelect{ + InsertCommon: InsertCommon{ + Ignore: ignore, + Keyspace: keyspace, + Prefix: prefix, + Suffix: suffix, + }, + Input: input, + VindexValueOffset: vv, + } + if table != nil { + ins.TableName = table.Name.String() + for _, colVindex := range table.ColumnVindexes { + if colVindex.IsPartialVindex() { + continue + } + ins.ColVindexes = append(ins.ColVindexes, colVindex) + } + } + return ins +} + +func (ins *InsertSelect) Inputs() ([]Primitive, []map[string]any) { + return []Primitive{ins.Input}, nil +} + +// RouteType returns a description of the query routing type used by the primitive +func (ins *InsertSelect) RouteType() string { + return "InsertSelect" +} + +// TryExecute performs a non-streaming exec. +func (ins *InsertSelect) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { + ctx, cancelFunc := addQueryTimeout(ctx, vcursor, ins.QueryTimeout) + defer cancelFunc() + + if ins.Keyspace.Sharded { + return ins.execInsertSharded(ctx, vcursor, bindVars) + } + return ins.execInsertUnsharded(ctx, vcursor, bindVars) +} + +// TryStreamExecute performs a streaming exec. +func (ins *InsertSelect) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + if ins.ForceNonStreaming { + res, err := ins.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(res) + } + ctx, cancelFunc := addQueryTimeout(ctx, vcursor, ins.QueryTimeout) + defer cancelFunc() + + sharded := ins.Keyspace.Sharded + output := &sqltypes.Result{} + err := ins.execSelectStreaming(ctx, vcursor, bindVars, func(irr insertRowsResult) error { + if len(irr.rows) == 0 { + return nil + } + + var qr *sqltypes.Result + var err error + if sharded { + qr, err = ins.insertIntoShardedTable(ctx, vcursor, bindVars, irr) + } else { + qr, err = ins.insertIntoUnshardedTable(ctx, vcursor, bindVars, irr) + } + if err != nil { + return err + } + + output.RowsAffected += qr.RowsAffected + // InsertID needs to be updated to the least insertID value in sqltypes.Result + if output.InsertID == 0 || output.InsertID > qr.InsertID { + output.InsertID = qr.InsertID + } + return nil + }) + if err != nil { + return err + } + return callback(output) +} + +func (ins *InsertSelect) execInsertUnsharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + irr, err := ins.execSelect(ctx, vcursor, bindVars) + if err != nil { + return nil, err + } + if len(irr.rows) == 0 { + return &sqltypes.Result{}, nil + } + return ins.insertIntoUnshardedTable(ctx, vcursor, bindVars, irr) +} + +func (ins *InsertSelect) insertIntoUnshardedTable(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, irr insertRowsResult) (*sqltypes.Result, error) { + query := ins.getInsertUnshardedQuery(irr.rows, bindVars) + return ins.executeUnshardedTableQuery(ctx, vcursor, ins, bindVars, query, irr.insertID) +} + +func (ins *InsertSelect) getInsertUnshardedQuery(rows []sqltypes.Row, bindVars map[string]*querypb.BindVariable) string { + var mids sqlparser.Values + for r, inputRow := range rows { + row := sqlparser.ValTuple{} + for c, value := range inputRow { + bvName := insertVarOffset(r, c) + bindVars[bvName] = sqltypes.ValueBindVariable(value) + row = append(row, sqlparser.NewArgument(bvName)) + } + mids = append(mids, row) + } + return ins.Prefix + sqlparser.String(mids) + sqlparser.String(ins.Suffix) +} + +func (ins *InsertSelect) insertIntoShardedTable( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, + irr insertRowsResult, +) (*sqltypes.Result, error) { + rss, queries, err := ins.getInsertShardedQueries(ctx, vcursor, bindVars, irr.rows) + if err != nil { + return nil, err + } + + qr, err := ins.executeInsertQueries(ctx, vcursor, rss, queries, irr.insertID) + if err != nil { + return nil, err + } + qr.InsertID = uint64(irr.insertID) + return qr, nil +} + +func (ins *InsertSelect) executeInsertQueries( + ctx context.Context, + vcursor VCursor, + rss []*srvtopo.ResolvedShard, + queries []*querypb.BoundQuery, + insertID uint64, +) (*sqltypes.Result, error) { + autocommit := (len(rss) == 1 || ins.MultiShardAutocommit) && vcursor.AutocommitApproval() + err := allowOnlyPrimary(rss...) + if err != nil { + return nil, err + } + result, errs := vcursor.ExecuteMultiShard(ctx, ins, rss, queries, true /* rollbackOnError */, autocommit) + if errs != nil { + return nil, vterrors.Aggregate(errs) + } + + if insertID != 0 { + result.InsertID = insertID + } + return result, nil +} + +func (ins *InsertSelect) getInsertShardedQueries( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, + rows []sqltypes.Row, +) ([]*srvtopo.ResolvedShard, []*querypb.BoundQuery, error) { + vindexRowsValues, err := ins.buildVindexRowsValues(rows) + if err != nil { + return nil, nil, err + } + + keyspaceIDs, err := ins.processVindexes(ctx, vcursor, vindexRowsValues) + if err != nil { + return nil, nil, err + } + + var indexes []*querypb.Value + var destinations []key.Destination + for i, ksid := range keyspaceIDs { + if ksid != nil { + indexes = append(indexes, &querypb.Value{ + Value: strconv.AppendInt(nil, int64(i), 10), + }) + destinations = append(destinations, key.DestinationKeyspaceID(ksid)) + } + } + if len(destinations) == 0 { + // In this case, all we have is nil KeyspaceIds, we don't do + // anything at all. + return nil, nil, nil + } + + rss, indexesPerRss, err := vcursor.ResolveDestinations(ctx, ins.Keyspace.Name, indexes, destinations) + if err != nil { + return nil, nil, err + } + + queries := make([]*querypb.BoundQuery, len(rss)) + for i := range rss { + bvs := sqltypes.CopyBindVariables(bindVars) // we don't want to create one huge bindvars for all values + var mids sqlparser.Values + for _, indexValue := range indexesPerRss[i] { + index, _ := strconv.Atoi(string(indexValue.Value)) + if keyspaceIDs[index] != nil { + row := sqlparser.ValTuple{} + for colOffset, value := range rows[index] { + bvName := insertVarOffset(index, colOffset) + bvs[bvName] = sqltypes.ValueBindVariable(value) + row = append(row, sqlparser.NewArgument(bvName)) + } + mids = append(mids, row) + } + } + rewritten := ins.Prefix + sqlparser.String(mids) + sqlparser.String(ins.Suffix) + queries[i] = &querypb.BoundQuery{ + Sql: rewritten, + BindVariables: bvs, + } + } + + return rss, queries, nil +} + +func (ins *InsertSelect) buildVindexRowsValues(rows []sqltypes.Row) ([][]sqltypes.Row, error) { + colVindexes := ins.ColVindexes + if len(colVindexes) != len(ins.VindexValueOffset) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "vindex value offsets and vindex info do not match") + } + + // Here we go over the incoming rows and extract values for the vindexes we need to update + vindexRowsValues := make([][]sqltypes.Row, len(colVindexes)) + for _, inputRow := range rows { + for colIdx := range colVindexes { + offsets := ins.VindexValueOffset[colIdx] + row := make(sqltypes.Row, 0, len(offsets)) + for _, offset := range offsets { + if offset == -1 { // value not provided from select query + row = append(row, sqltypes.NULL) + continue + } + row = append(row, inputRow[offset]) + } + vindexRowsValues[colIdx] = append(vindexRowsValues[colIdx], row) + } + } + return vindexRowsValues, nil +} + +func (ins *InsertSelect) execInsertSharded(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + result, err := ins.execSelect(ctx, vcursor, bindVars) + if err != nil { + return nil, err + } + if len(result.rows) == 0 { + return &sqltypes.Result{}, nil + } + + return ins.insertIntoShardedTable(ctx, vcursor, bindVars, result) +} + +func (ins *InsertSelect) description() PrimitiveDescription { + other := ins.commonDesc() + other["TableName"] = ins.GetTableName() + + if len(ins.VindexValueOffset) > 0 { + valuesOffsets := map[string]string{} + for idx, ints := range ins.VindexValueOffset { + if len(ins.ColVindexes) < idx { + panic("ins.ColVindexes and ins.VindexValueOffset do not line up") + } + vindex := ins.ColVindexes[idx] + marshal, _ := json.Marshal(ints) + valuesOffsets[vindex.Name] = string(marshal) + } + other["VindexOffsetFromSelect"] = valuesOffsets + } + + return PrimitiveDescription{ + OperatorType: "Insert", + Keyspace: ins.Keyspace, + Variant: "Select", + TargetTabletType: topodatapb.TabletType_PRIMARY, + Other: other, + } +} + +func (ic *InsertCommon) commonDesc() map[string]any { + other := map[string]any{ + "MultiShardAutocommit": ic.MultiShardAutocommit, + "QueryTimeout": ic.QueryTimeout, + "InsertIgnore": ic.Ignore, + "InputAsNonStreaming": ic.ForceNonStreaming, + "NoAutoCommit": ic.PreventAutoCommit, + } + + if ic.Generate != nil { + if ic.Generate.Values == nil { + other["AutoIncrement"] = fmt.Sprintf("%s:Offset(%d)", ic.Generate.Query, ic.Generate.Offset) + } else { + other["AutoIncrement"] = fmt.Sprintf("%s:Values::%s", ic.Generate.Query, sqlparser.String(ic.Generate.Values)) + } + } + return other +} + +func insertVarOffset(rowNum, colOffset int) string { + return fmt.Sprintf("_c%d_%d", rowNum, colOffset) +} + +type insertRowsResult struct { + rows []sqltypes.Row + insertID uint64 +} + +func (ins *InsertSelect) execSelect( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, +) (insertRowsResult, error) { + res, err := vcursor.ExecutePrimitive(ctx, ins.Input, bindVars, false) + if err != nil || len(res.Rows) == 0 { + return insertRowsResult{}, err + } + + insertID, err := ins.processGenerateFromSelect(ctx, vcursor, ins, res.Rows) + if err != nil { + return insertRowsResult{}, err + } + + return insertRowsResult{ + rows: res.Rows, + insertID: uint64(insertID), + }, nil +} + +func (ins *InsertSelect) execSelectStreaming( + ctx context.Context, + vcursor VCursor, + bindVars map[string]*querypb.BindVariable, + callback func(irr insertRowsResult) error, +) error { + var mu sync.Mutex + return vcursor.StreamExecutePrimitiveStandalone(ctx, ins.Input, bindVars, false, func(result *sqltypes.Result) error { + if len(result.Rows) == 0 { + return nil + } + + // should process only one chunk at a time. + // as parallel chunk insert will try to use the same transaction in the vttablet + // this will cause transaction in use error out with "transaction in use" error. + mu.Lock() + defer mu.Unlock() + + insertID, err := ins.processGenerateFromSelect(ctx, vcursor, ins, result.Rows) + if err != nil { + return err + } + + return callback(insertRowsResult{ + rows: result.Rows, + insertID: uint64(insertID), + }) + }) +} diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index 014654f37d6..2de95e5d186 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -33,7 +33,7 @@ import ( ) func TestInsertUnsharded(t *testing.T) { - ins := NewQueryInsert( + ins := newQueryInsert( InsertUnsharded, &vindexes.Keyspace{ Name: "ks", @@ -55,7 +55,7 @@ func TestInsertUnsharded(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.0: dummy_insert {} true true`, }) - expectResult(t, "Execute", result, &sqltypes.Result{InsertID: 4}) + expectResult(t, result, &sqltypes.Result{InsertID: 4}) // Failure cases vc = &loggingVCursor{shardErr: errors.New("shard_error")} @@ -64,11 +64,11 @@ func TestInsertUnsharded(t *testing.T) { vc = &loggingVCursor{} _, err = ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) - require.EqualError(t, err, `Keyspace does not have exactly one shard: []`) + require.EqualError(t, err, `VT09022: Destination does not have exactly one shard: []`) } func TestInsertUnshardedGenerate(t *testing.T) { - ins := NewQueryInsert( + ins := newQueryInsert( InsertUnsharded, &vindexes.Keyspace{ Name: "ks", @@ -117,11 +117,11 @@ func TestInsertUnshardedGenerate(t *testing.T) { }) // The insert id returned by ExecuteMultiShard should be overwritten by processGenerateFromValues. - expectResult(t, "Execute", result, &sqltypes.Result{InsertID: 4}) + expectResult(t, result, &sqltypes.Result{InsertID: 4}) } func TestInsertUnshardedGenerate_Zeros(t *testing.T) { - ins := NewQueryInsert( + ins := newQueryInsert( InsertUnsharded, &vindexes.Keyspace{ Name: "ks", @@ -170,7 +170,7 @@ func TestInsertUnshardedGenerate_Zeros(t *testing.T) { }) // The insert id returned by ExecuteMultiShard should be overwritten by processGenerateFromValues. - expectResult(t, "Execute", result, &sqltypes.Result{InsertID: 4}) + expectResult(t, result, &sqltypes.Result{InsertID: 4}) } func TestInsertShardedSimple(t *testing.T) { @@ -194,11 +194,11 @@ func TestInsertShardedSimple(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -213,7 +213,7 @@ func TestInsertShardedSimple(t *testing.T) { sqlparser.Values{ {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -227,12 +227,168 @@ func TestInsertShardedSimple(t *testing.T) { `ResolveDestinations sharded [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, // Row 2 will go to -20, rows 1 & 3 will go to 20- `ExecuteMultiShard ` + - `sharded.20-: prefix(:_id_0 /* INT64 */) suffix {_id_0: type:INT64 value:"1"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */) {_id_0: type:INT64 value:"1"} ` + + `true true`, + }) + + // Multiple rows are not autocommitted by default + ins = newInsert( + InsertSharded, + false, + ks.Keyspace, + [][][]evalengine.Expr{{ + // colVindex columns: id + // 3 rows. + { + evalengine.NewLiteralInt(1), + evalengine.NewLiteralInt(2), + evalengine.NewLiteralInt(3), + }, + }}, + ks.Tables["t1"], + "prefix", + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}}, + }, + nil, + ) + vc = newDMLTestVCursor("-20", "20-") + vc.shardForKsid = []string{"20-", "-20", "20-"} + + _, err = ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + if err != nil { + t.Fatal(err) + } + vc.ExpectLog(t, []string{ + // Based on shardForKsid, values returned will be 20-, -20, 20-. + `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, + // Row 2 will go to -20, rows 1 & 3 will go to 20- + `ExecuteMultiShard ` + + `sharded.20-: prefix(:_id_0 /* INT64 */),(:_id_2 /* INT64 */) {_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:_id_1 /* INT64 */) {_id_1: type:INT64 value:"2"} ` + + `true false`, + }) + + // Optional flag overrides autocommit + ins = newInsert( + InsertSharded, + false, + ks.Keyspace, + [][][]evalengine.Expr{{ + // colVindex columns: id + // 3 rows. + { + evalengine.NewLiteralInt(1), + evalengine.NewLiteralInt(2), + evalengine.NewLiteralInt(3), + }, + }}, + + ks.Tables["t1"], + "prefix", + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}}, + {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}}, + }, + nil, + ) + ins.MultiShardAutocommit = true + + vc = newDMLTestVCursor("-20", "20-") + vc.shardForKsid = []string{"20-", "-20", "20-"} + + _, err = ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + if err != nil { + t.Fatal(err) + } + vc.ExpectLog(t, []string{ + // Based on shardForKsid, values returned will be 20-, -20, 20-. + `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, + // Row 2 will go to -20, rows 1 & 3 will go to 20- + `ExecuteMultiShard ` + + `sharded.20-: prefix(:_id_0 /* INT64 */),(:_id_2 /* INT64 */) {_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:_id_1 /* INT64 */) {_id_1: type:INT64 value:"2"} ` + + `true true`, + }) +} + +func TestInsertShardWithONDuplicateKey(t *testing.T) { + invschema := &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "sharded": { + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "hash": { + Type: "hash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "hash", + Columns: []string{"id"}, + }}, + }, + }, + }, + }, + } + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) + ks := vs.Keyspaces["sharded"] + + // A single row insert should be autocommitted + ins := newInsert( + InsertSharded, + false, + ks.Keyspace, + [][][]evalengine.Expr{{ + // colVindex columns: id + { + evalengine.NewLiteralInt(1), + }, + }}, + ks.Tables["t1"], + "prefix", + sqlparser.Values{ + {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + }, + sqlparser.OnDup{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("suffix1"), Expr: &sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("suffix2"), Expr: &sqlparser.FuncExpr{ + Name: sqlparser.NewIdentifierCI("if"), + Exprs: sqlparser.Exprs{ + sqlparser.NewComparisonExpr(sqlparser.InOp, &sqlparser.ValuesFuncExpr{Name: sqlparser.NewColName("col")}, sqlparser.ListArg("_id_1"), nil), + sqlparser.NewColName("col"), + &sqlparser.ValuesFuncExpr{Name: sqlparser.NewColName("col")}, + }, + }}}, + ) + vc := newDMLTestVCursor("-20", "20-") + vc.shardForKsid = []string{"20-", "-20", "20-"} + + _, err := ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{ + "_id_1": sqltypes.TestBindVariable([]int{1, 2}), + }, false) + if err != nil { + t.Fatal(err) + } + vc.ExpectLog(t, []string{ + // Based on shardForKsid, values returned will be 20-. + `ResolveDestinations sharded [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, + // Row 2 will go to -20, rows 1 & 3 will go to 20- + `ExecuteMultiShard ` + + `sharded.20-: prefix(:_id_0 /* INT64 */) on duplicate key update ` + + `suffix1 = :_id_0 /* INT64 */, suffix2 = if(values(col) in ::_id_1, col, values(col)) ` + + `{_id_0: type:INT64 value:"1" ` + + `_id_1: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} ` + `true true`, }) // Multiple rows are not autocommitted by default - ins = NewInsert( + ins = newInsert( InsertSharded, false, ks.Keyspace, @@ -252,7 +408,9 @@ func TestInsertShardedSimple(t *testing.T) { {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}}, {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}}, }, - " suffix", + sqlparser.OnDup{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("suffix"), Expr: &sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + }, ) vc = newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -266,13 +424,13 @@ func TestInsertShardedSimple(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, // Row 2 will go to -20, rows 1 & 3 will go to 20- `ExecuteMultiShard ` + - `sharded.20-: prefix(:_id_0 /* INT64 */),(:_id_2 /* INT64 */) suffix {_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix(:_id_1 /* INT64 */) suffix {_id_1: type:INT64 value:"2"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */),(:_id_2 /* INT64 */) on duplicate key update suffix = :_id_0 /* INT64 */ {_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:_id_1 /* INT64 */) on duplicate key update suffix = :_id_0 /* INT64 */ {_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2"} ` + `true false`, }) // Optional flag overrides autocommit - ins = NewInsert( + ins = newInsert( InsertSharded, false, ks.Keyspace, @@ -293,7 +451,9 @@ func TestInsertShardedSimple(t *testing.T) { {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}}, {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}}, }, - " suffix", + sqlparser.OnDup{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("suffix"), Expr: &sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, + }, ) ins.MultiShardAutocommit = true @@ -309,8 +469,8 @@ func TestInsertShardedSimple(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, // Row 2 will go to -20, rows 1 & 3 will go to 20- `ExecuteMultiShard ` + - `sharded.20-: prefix(:_id_0 /* INT64 */),(:_id_2 /* INT64 */) suffix {_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix(:_id_1 /* INT64 */) suffix {_id_1: type:INT64 value:"2"} ` + + `sharded.20-: prefix(:_id_0 /* INT64 */),(:_id_2 /* INT64 */) on duplicate key update suffix = :_id_0 /* INT64 */ {_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + + `sharded.-20: prefix(:_id_1 /* INT64 */) on duplicate key update suffix = :_id_0 /* INT64 */ {_id_0: type:INT64 value:"1" _id_1: type:INT64 value:"2"} ` + `true true`, }) } @@ -341,10 +501,10 @@ func TestInsertShardedFail(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -360,14 +520,14 @@ func TestInsertShardedFail(t *testing.T) { sqlparser.Values{ {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) vc := &loggingVCursor{} // The lookup will fail to map to a keyspace id. _, err := ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) - require.EqualError(t, err, `could not map [INT64(1)] to a keyspace id`) + require.EqualError(t, err, `VT09023: could not map [INT64(1)] to a keyspace id`) } func TestInsertShardedGenerate(t *testing.T) { @@ -391,10 +551,10 @@ func TestInsertShardedGenerate(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -414,7 +574,7 @@ func TestInsertShardedGenerate(t *testing.T) { {&sqlparser.Argument{Name: "__seq1", Type: sqltypes.Int64}}, {&sqlparser.Argument{Name: "__seq2", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) ins.Generate = &Generate{ @@ -454,15 +614,15 @@ func TestInsertShardedGenerate(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, // Row 2 will go to -20, rows 1 & 3 will go to 20- `ExecuteMultiShard ` + - `sharded.20-: prefix(:__seq0 /* INT64 */),(:__seq2 /* INT64 */) suffix ` + + `sharded.20-: prefix(:__seq0 /* INT64 */),(:__seq2 /* INT64 */) ` + `{__seq0: type:INT64 value:"1" __seq2: type:INT64 value:"3"} ` + - `sharded.-20: prefix(:__seq1 /* INT64 */) suffix ` + + `sharded.-20: prefix(:__seq1 /* INT64 */) ` + `{__seq1: type:INT64 value:"2"} ` + `true false`, }) // The insert id returned by ExecuteMultiShard should be overwritten by processGenerateFromValues. - expectResult(t, "Execute", result, &sqltypes.Result{InsertID: 2}) + expectResult(t, result, &sqltypes.Result{InsertID: 2}) } func TestInsertShardedOwned(t *testing.T) { @@ -510,10 +670,10 @@ func TestInsertShardedOwned(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -552,7 +712,7 @@ func TestInsertShardedOwned(t *testing.T) { {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Int64}}, {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) vc := newDMLTestVCursor("-20", "20-") @@ -574,12 +734,12 @@ func TestInsertShardedOwned(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + `sharded.20-: prefix(:_id_0 /* INT64 */, :_c1_0 /* INT64 */, :_c2_0 /* INT64 */, :_c3_0 /* INT64 */)` + - `,(:_id_2 /* INT64 */, :_c1_2 /* INT64 */, :_c2_2 /* INT64 */, :_c3_2 /* INT64 */) suffix ` + + `,(:_id_2 /* INT64 */, :_c1_2 /* INT64 */, :_c2_2 /* INT64 */, :_c3_2 /* INT64 */) ` + `{_c1_0: type:INT64 value:"4" _c1_2: type:INT64 value:"6" ` + `_c2_0: type:INT64 value:"7" _c2_2: type:INT64 value:"9" ` + `_c3_0: type:INT64 value:"10" _c3_2: type:INT64 value:"12" ` + `_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix(:_id_1 /* INT64 */, :_c1_1 /* INT64 */, :_c2_1 /* INT64 */, :_c3_1 /* INT64 */) suffix ` + + `sharded.-20: prefix(:_id_1 /* INT64 */, :_c1_1 /* INT64 */, :_c2_1 /* INT64 */, :_c3_1 /* INT64 */) ` + `{_c1_1: type:INT64 value:"5" _c2_1: type:INT64 value:"8" _c3_1: type:INT64 value:"11" ` + `_id_1: type:INT64 value:"2"} ` + `true false`, @@ -620,10 +780,10 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -644,7 +804,7 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { sqlparser.Values{ {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Null}}, }, - " suffix", + nil, ) vc := newDMLTestVCursor("-20", "20-") @@ -656,7 +816,7 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { } vc.ExpectLog(t, []string{ `ResolveDestinations sharded [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, - `ExecuteMultiShard sharded.20-: prefix(:_id_0 /* INT64 */, :_c3_0 /* NULL_TYPE */) suffix ` + + `ExecuteMultiShard sharded.20-: prefix(:_id_0 /* INT64 */, :_c3_0 /* NULL_TYPE */) ` + `{_c3_0: _id_0: type:INT64 value:"1"} true true`, }) } @@ -697,10 +857,10 @@ func TestInsertShardedGeo(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -730,7 +890,7 @@ func TestInsertShardedGeo(t *testing.T) { {&sqlparser.Argument{Name: "_region_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}}, {&sqlparser.Argument{Name: "_region_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) vc := newDMLTestVCursor("-20", "20-") @@ -745,9 +905,9 @@ func TestInsertShardedGeo(t *testing.T) { `id_0: type:INT64 value:"1" id_1: type:INT64 value:"1" ` + `keyspace_id_0: type:VARBINARY value:"\x01\x16k@\xb4J\xbaK\xd6" keyspace_id_1: type:VARBINARY value:"\xff\x16k@\xb4J\xbaK\xd6" true`, `ResolveDestinations sharded [value:"0" value:"1"] Destinations:DestinationKeyspaceID(01166b40b44aba4bd6),DestinationKeyspaceID(ff166b40b44aba4bd6)`, - `ExecuteMultiShard sharded.20-: prefix(:_region_0 /* INT64 */, :_id_0 /* INT64 */) suffix ` + + `ExecuteMultiShard sharded.20-: prefix(:_region_0 /* INT64 */, :_id_0 /* INT64 */) ` + `{_id_0: type:INT64 value:"1" _region_0: type:INT64 value:"1"} ` + - `sharded.-20: prefix(:_region_1 /* INT64 */, :_id_1 /* INT64 */) suffix ` + + `sharded.-20: prefix(:_region_1 /* INT64 */, :_id_1 /* INT64 */) ` + `{_id_1: type:INT64 value:"1" _region_1: type:INT64 value:"255"} ` + `true false`, }) @@ -803,10 +963,10 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, true, ks.Keyspace, @@ -816,7 +976,6 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { // rows for id evalengine.NewLiteralInt(1), - evalengine.NewLiteralInt(2), evalengine.NewLiteralInt(3), evalengine.NewLiteralInt(4), }, @@ -825,14 +984,12 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { { // rows for c1 evalengine.NewLiteralInt(5), - evalengine.NewLiteralInt(6), evalengine.NewLiteralInt(7), evalengine.NewLiteralInt(8), }, { // rows for c2 evalengine.NewLiteralInt(9), - evalengine.NewLiteralInt(10), evalengine.NewLiteralInt(11), evalengine.NewLiteralInt(12), }, @@ -841,7 +998,6 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { { // rows for c3 evalengine.NewLiteralInt(13), - evalengine.NewLiteralInt(14), evalengine.NewLiteralInt(15), evalengine.NewLiteralInt(16), }, @@ -852,9 +1008,8 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}}, {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Int64}}, {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Int64}}, - {&sqlparser.Argument{Name: "_id_3", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_3", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_3", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_3", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) ksid0Lookup := sqltypes.MakeTestResult( @@ -897,31 +1052,29 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { t.Fatal(err) } vc.ExpectLog(t, []string{ - `Execute select from1, toc from prim where from1 in ::from1 from1: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} values:{type:INT64 value:"3"} values:{type:INT64 value:"4"} false`, - `Execute insert ignore into lkp2(from1, from2, toc) values(:from1_0, :from2_0, :toc_0), (:from1_1, :from2_1, :toc_1), (:from1_2, :from2_2, :toc_2) ` + + `Execute select from1, toc from prim where from1 in ::from1 ` + + `from1: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"3"} values:{type:INT64 value:"4"} false`, + `Execute insert ignore into lkp2(from1, from2, toc) values` + + `(:from1_0, :from2_0, :toc_0), (:from1_1, :from2_1, :toc_1), (:from1_2, :from2_2, :toc_2) ` + `from1_0: type:INT64 value:"5" from1_1: type:INT64 value:"7" from1_2: type:INT64 value:"8" ` + `from2_0: type:INT64 value:"9" from2_1: type:INT64 value:"11" from2_2: type:INT64 value:"12" ` + - `toc_0: type:VARBINARY value:"\x00" toc_1: type:VARBINARY value:"\x00" toc_2: type:VARBINARY value:"\x00" ` + - `true`, - // row 2 is out because it didn't map to a ksid. + `toc_0: type:VARBINARY value:"\x00" toc_1: type:VARBINARY value:"\x00" toc_2: type:VARBINARY value:"\x00" true`, `Execute select from1 from lkp2 where from1 = :from1 and toc = :toc from1: type:INT64 value:"5" toc: type:VARBINARY value:"\x00" false`, `Execute select from1 from lkp2 where from1 = :from1 and toc = :toc from1: type:INT64 value:"7" toc: type:VARBINARY value:"\x00" false`, `Execute select from1 from lkp2 where from1 = :from1 and toc = :toc from1: type:INT64 value:"8" toc: type:VARBINARY value:"\x00" false`, `Execute insert ignore into lkp1(from, toc) values(:from_0, :toc_0), (:from_1, :toc_1) ` + `from_0: type:INT64 value:"13" from_1: type:INT64 value:"16" ` + - `toc_0: type:VARBINARY value:"\x00" toc_1: type:VARBINARY value:"\x00" ` + - `true`, - // row 3 is out because it failed Verify. Only two verifications from lkp1. + `toc_0: type:VARBINARY value:"\x00" toc_1: type:VARBINARY value:"\x00" true`, + // row 2 is out because it failed Verify. Only two verifications from lkp1. `Execute select from from lkp1 where from = :from and toc = :toc from: type:INT64 value:"13" toc: type:VARBINARY value:"\x00" false`, `Execute select from from lkp1 where from = :from and toc = :toc from: type:INT64 value:"16" toc: type:VARBINARY value:"\x00" false`, - `ResolveDestinations sharded [value:"0" value:"3"] Destinations:DestinationKeyspaceID(00),DestinationKeyspaceID(00)`, - // Bind vars for rows 2 & 3 may be missing because they were not sent. + `ResolveDestinations sharded [value:"0" value:"2"] Destinations:DestinationKeyspaceID(00),DestinationKeyspaceID(00)`, + // Bind vars for rows 2 may be missing because they were not sent. `ExecuteMultiShard ` + - `sharded.20-: prefix(:_id_0 /* INT64 */, :_c1_0 /* INT64 */, :_c2_0 /* INT64 */, :_c3_0 /* INT64 */) suffix ` + + `sharded.20-: prefix(:_id_0 /* INT64 */, :_c1_0 /* INT64 */, :_c2_0 /* INT64 */, :_c3_0 /* INT64 */) ` + `{_c1_0: type:INT64 value:"5" _c2_0: type:INT64 value:"9" _c3_0: type:INT64 value:"13" _id_0: type:INT64 value:"1"} ` + - `sharded.-20: prefix(:_id_3 /* INT64 */, :_c1_3 /* INT64 */, :_c2_3 /* INT64 */, :_c3_3 /* INT64 */) suffix ` + - `{_c1_3: type:INT64 value:"8" _c2_3: type:INT64 value:"12" _c3_3: type:INT64 value:"16" _id_3: type:INT64 value:"4"} ` + - `true false`, + `sharded.-20: prefix(:_id_2 /* INT64 */, :_c1_2 /* INT64 */, :_c2_2 /* INT64 */, :_c3_2 /* INT64 */) ` + + `{_c1_2: type:INT64 value:"8" _c2_2: type:INT64 value:"12" _c3_2: type:INT64 value:"16" _id_2: type:INT64 value:"4"} true false`, }) } @@ -959,10 +1112,10 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, true, ks.Keyspace, @@ -984,7 +1137,7 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { sqlparser.Values{ {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) ksid0 := sqltypes.MakeTestResult( @@ -1009,7 +1162,7 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { vc.ExpectLog(t, []string{ `Execute select from from lkp1 where from = :from and toc = :toc from: toc: type:VARBINARY value:"\x16k@\xb4J\xbaK\xd6" false`, `ResolveDestinations sharded [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, - `ExecuteMultiShard sharded.-20: prefix(:_id_0 /* INT64 */, :_c3_0 /* INT64 */) suffix ` + + `ExecuteMultiShard sharded.-20: prefix(:_id_0 /* INT64 */, :_c3_0 /* INT64 */) ` + `{_c3_0: _id_0: type:INT64 value:"1"} true true`, }) } @@ -1057,10 +1210,10 @@ func TestInsertShardedUnownedVerify(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -1102,7 +1255,7 @@ func TestInsertShardedUnownedVerify(t *testing.T) { {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Int64}}, {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c2_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) // nonemptyResult will cause the lookup verify queries to succeed. @@ -1141,12 +1294,12 @@ func TestInsertShardedUnownedVerify(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + `sharded.20-: prefix(:_id_0 /* INT64 */, :_c1_0 /* INT64 */, :_c2_0 /* INT64 */, :_c3_0 /* INT64 */),` + - `(:_id_2 /* INT64 */, :_c1_2 /* INT64 */, :_c2_2 /* INT64 */, :_c3_2 /* INT64 */) suffix ` + + `(:_id_2 /* INT64 */, :_c1_2 /* INT64 */, :_c2_2 /* INT64 */, :_c3_2 /* INT64 */) ` + `{_c1_0: type:INT64 value:"4" _c1_2: type:INT64 value:"6" ` + `_c2_0: type:INT64 value:"7" _c2_2: type:INT64 value:"9" ` + `_c3_0: type:INT64 value:"10" _c3_2: type:INT64 value:"12" ` + `_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + - `sharded.-20: prefix(:_id_1 /* INT64 */, :_c1_1 /* INT64 */, :_c2_1 /* INT64 */, :_c3_1 /* INT64 */) suffix ` + + `sharded.-20: prefix(:_id_1 /* INT64 */, :_c1_1 /* INT64 */, :_c2_1 /* INT64 */, :_c3_1 /* INT64 */) ` + `{_c1_1: type:INT64 value:"5" _c2_1: type:INT64 value:"8" ` + `_c3_1: type:INT64 value:"11" _id_1: type:INT64 value:"2"} ` + `true false`, @@ -1185,10 +1338,10 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, true, ks.Keyspace, @@ -1216,7 +1369,7 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "v2", Type: sqltypes.VarChar}}, {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "v3", Type: sqltypes.VarChar}}, }, - " suffix", + nil, ) // nonemptyResult will cause the lookup verify queries to succeed. @@ -1251,9 +1404,9 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { // Based on shardForKsid, values returned will be 20-, -20. `ResolveDestinations sharded [value:"0" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + - `sharded.20-: prefix(:_id_0 /* INT64 */, :_c3_0 /* INT64 */, :v1 /* VARCHAR */) suffix ` + + `sharded.20-: prefix(:_id_0 /* INT64 */, :_c3_0 /* INT64 */, :v1 /* VARCHAR */) ` + `{_c3_0: type:INT64 value:"10" _id_0: type:INT64 value:"1" v1: type:VARCHAR value:"a"} ` + - `sharded.-20: prefix(:_id_2 /* INT64 */, :_c3_2 /* INT64 */, :v3 /* VARCHAR */) suffix ` + + `sharded.-20: prefix(:_id_2 /* INT64 */, :_c3_2 /* INT64 */, :v3 /* VARCHAR */) ` + `{_c3_2: type:INT64 value:"12" _id_2: type:INT64 value:"3" v3: type:VARCHAR value:"c"} ` + `true false`, }) @@ -1291,10 +1444,10 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -1316,7 +1469,7 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { sqlparser.Values{ {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Int64}}, }, - " suffix", + nil, ) vc := newDMLTestVCursor("-20", "20-") @@ -1368,10 +1521,10 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -1413,7 +1566,7 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { {&sqlparser.Argument{Name: "_id_1", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_1", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c2_1", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c3_1", Type: sqltypes.Null}}, {&sqlparser.Argument{Name: "_id_2", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c1_2", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c2_2", Type: sqltypes.Null}, &sqlparser.Argument{Name: "_c3_2", Type: sqltypes.Null}}, }, - " suffix", + nil, ) // nonemptyResult will cause the lookup verify queries to succeed. @@ -1439,13 +1592,13 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard sharded.20-: ` + `prefix(:_id_0 /* INT64 */, :_c1_0 /* NULL_TYPE */, :_c2_0 /* NULL_TYPE */, :_c3_0 /* NULL_TYPE */),` + - `(:_id_2 /* INT64 */, :_c1_2 /* NULL_TYPE */, :_c2_2 /* NULL_TYPE */, :_c3_2 /* NULL_TYPE */) suffix ` + + `(:_id_2 /* INT64 */, :_c1_2 /* NULL_TYPE */, :_c2_2 /* NULL_TYPE */, :_c3_2 /* NULL_TYPE */) ` + `{_c1_0: type:UINT64 value:"1" _c1_2: type:UINT64 value:"3" ` + `_c2_0: _c2_2: ` + `_c3_0: type:UINT64 value:"1" _c3_2: type:UINT64 value:"3" ` + `_id_0: type:INT64 value:"1" _id_2: type:INT64 value:"3"} ` + `sharded.-20: ` + - `prefix(:_id_1 /* INT64 */, :_c1_1 /* NULL_TYPE */, :_c2_1 /* NULL_TYPE */, :_c3_1 /* NULL_TYPE */) suffix ` + + `prefix(:_id_1 /* INT64 */, :_c1_1 /* NULL_TYPE */, :_c2_1 /* NULL_TYPE */, :_c3_1 /* NULL_TYPE */) ` + `{_c1_1: type:UINT64 value:"2" _c2_1: _c3_1: type:UINT64 value:"2" _id_1: type:INT64 value:"2"} true false`, }) } @@ -1482,10 +1635,10 @@ func TestInsertShardedUnownedReverseMapSuccess(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( + ins := newInsert( InsertSharded, false, ks.Keyspace, @@ -1507,7 +1660,7 @@ func TestInsertShardedUnownedReverseMapSuccess(t *testing.T) { sqlparser.Values{ {&sqlparser.Argument{Name: "_id_0", Type: sqltypes.Int64}, &sqlparser.Argument{Name: "_c3_0", Type: sqltypes.Null}}, }, - " suffix", + nil, ) vc := newDMLTestVCursor("-20", "20-") @@ -1529,25 +1682,17 @@ func TestInsertSelectSimple(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{{1}}, - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = append(ins.ColVindexes, ks.Tables["t1"].ColumnVindexes...) - ins.Prefix = "prefix " - ins.Suffix = " suffix" + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect(false, ks.Keyspace, ks.Tables["t1"], "prefix ", nil, [][]int{{1}}, rb) vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1571,10 +1716,10 @@ func TestInsertSelectSimple(t *testing.T) { // two rows go to the 20- shard, and one row go to the -20 shard `ExecuteMultiShard ` + - `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) suffix ` + + `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) ` + `{_c0_0: type:VARCHAR value:"a" _c0_1: type:INT64 value:"1"` + ` _c2_0: type:VARCHAR value:"b" _c2_1: type:INT64 value:"2"} ` + - `sharded.-20: prefix values (:_c1_0, :_c1_1) suffix` + + `sharded.-20: prefix values (:_c1_0, :_c1_1)` + ` {_c1_0: type:VARCHAR value:"a" _c1_1: type:INT64 value:"3"} true false`}) vc.Rewind() @@ -1591,10 +1736,10 @@ func TestInsertSelectSimple(t *testing.T) { // two rows go to the 20- shard, and one row go to the -20 shard `ExecuteMultiShard ` + - `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) suffix ` + + `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) ` + `{_c0_0: type:VARCHAR value:"a" _c0_1: type:INT64 value:"1"` + ` _c2_0: type:VARCHAR value:"b" _c2_1: type:INT64 value:"2"} ` + - `sharded.-20: prefix values (:_c1_0, :_c1_1) suffix` + + `sharded.-20: prefix values (:_c1_0, :_c1_1)` + ` {_c1_0: type:VARCHAR value:"a" _c1_1: type:INT64 value:"3"} true false`}) } @@ -1620,26 +1765,27 @@ func TestInsertSelectOwned(t *testing.T) { Name: "onecol", Columns: []string{"c3"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + nil, + [][]int{ {1}, // The primary vindex has a single column as sharding key {0}}, // the onecol vindex uses the 'name' column - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = append(ins.ColVindexes, ks.Tables["t1"].ColumnVindexes...) - ins.Prefix = "prefix " - ins.Suffix = " suffix" + rb, + ) vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1669,11 +1815,11 @@ func TestInsertSelectOwned(t *testing.T) { // insert values into the main table `ExecuteMultiShard ` + // first we insert two rows on the 20- shard - `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) suffix ` + + `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) ` + `{_c0_0: type:VARCHAR value:"a" _c0_1: type:INT64 value:"1" _c2_0: type:VARCHAR value:"b" _c2_1: type:INT64 value:"2"} ` + // next we insert one row on the -20 shard - `sharded.-20: prefix values (:_c1_0, :_c1_1) suffix ` + + `sharded.-20: prefix values (:_c1_0, :_c1_1) ` + `{_c1_0: type:VARCHAR value:"a" _c1_1: type:INT64 value:"3"} ` + `true false`}) @@ -1697,11 +1843,11 @@ func TestInsertSelectOwned(t *testing.T) { // insert values into the main table `ExecuteMultiShard ` + // first we insert two rows on the 20- shard - `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) suffix ` + + `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) ` + `{_c0_0: type:VARCHAR value:"a" _c0_1: type:INT64 value:"1" _c2_0: type:VARCHAR value:"b" _c2_1: type:INT64 value:"2"} ` + // next we insert one row on the -20 shard - `sharded.-20: prefix values (:_c1_0, :_c1_1) suffix ` + + `sharded.-20: prefix values (:_c1_0, :_c1_1) ` + `{_c1_0: type:VARCHAR value:"a" _c1_1: type:INT64 value:"3"} ` + `true false`}) } @@ -1720,27 +1866,25 @@ func TestInsertSelectGenerate(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := NewInsert( - InsertSelect, - false, - ks.Keyspace, - nil, - ks.Tables["t1"], - "prefix ", - nil, - " suffix") - ins.Query = "dummy_insert" - ins.VindexValueOffset = [][]int{{1}} // The primary vindex has a single column as sharding key - ins.Input = &Route{ + rb := &Route{ Query: "dummy_select", FieldQuery: "dummy_field_query", RoutingParameters: &RoutingParameters{ Opcode: Scatter, Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + nil, + [][]int{{1}}, // The primary vindex has a single column as sharding key + rb, + ) ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -1760,7 +1904,7 @@ func TestInsertSelectGenerate(t *testing.T) { "varchar|int64"), "a|1", "a|null", - "b|null"), + "b|0"), // This is the result for the sequence query sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -1785,19 +1929,19 @@ func TestInsertSelectGenerate(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + // first we send the insert to the 20- shard - `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) suffix ` + + `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) ` + `{_c0_0: type:VARCHAR value:"a" ` + `_c0_1: type:INT64 value:"1" ` + `_c2_0: type:VARCHAR value:"b" ` + `_c2_1: type:INT64 value:"3"} ` + // next we send the insert to the -20 shard - `sharded.-20: prefix values (:_c1_0, :_c1_1) suffix ` + + `sharded.-20: prefix values (:_c1_0, :_c1_1) ` + `{_c1_0: type:VARCHAR value:"a" _c1_1: type:INT64 value:"2"} ` + `true false`, }) // The insert id returned by ExecuteMultiShard should be overwritten by processGenerateFromValues. - expectResult(t, "Execute", result, &sqltypes.Result{InsertID: 2}) + expectResult(t, result, &sqltypes.Result{InsertID: 2}) } func TestStreamingInsertSelectGenerate(t *testing.T) { @@ -1814,23 +1958,26 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ - {1}}, // The primary vindex has a single column as sharding key - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - ins.ColVindexes = ks.Tables["t1"].ColumnVindexes + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + nil, + [][]int{ + {1}}, // The primary vindex has a single column as sharding key + rb, + ) ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -1839,8 +1986,6 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { Query: "dummy_generate", Offset: 1, } - ins.Prefix = "prefix " - ins.Suffix = " suffix" vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1881,19 +2026,19 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + // first we send the insert to the 20- shard - `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) suffix ` + + `sharded.20-: prefix values (:_c0_0, :_c0_1), (:_c2_0, :_c2_1) ` + `{_c0_0: type:VARCHAR value:"a" ` + `_c0_1: type:INT64 value:"1" ` + `_c2_0: type:VARCHAR value:"b" ` + `_c2_1: type:INT64 value:"3"} ` + // next we send the insert to the -20 shard - `sharded.-20: prefix values (:_c1_0, :_c1_1) suffix ` + + `sharded.-20: prefix values (:_c1_0, :_c1_1) ` + `{_c1_0: type:VARCHAR value:"a" _c1_1: type:INT64 value:"2"} ` + `true false`, }) // The insert id returned by ExecuteMultiShard should be overwritten by processGenerateFromValues. - expectResult(t, "Execute", output, &sqltypes.Result{InsertID: 2}) + expectResult(t, output, &sqltypes.Result{InsertID: 2}) } func TestInsertSelectGenerateNotProvided(t *testing.T) { @@ -1910,23 +2055,24 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ - {1}}, // The primary vindex has a single column as sharding key - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = ks.Tables["t1"].ColumnVindexes + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + nil, + [][]int{{1}}, // The primary vindex has a single column as sharding key, + rb, + ) ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -1935,8 +2081,6 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Query: "dummy_generate", Offset: 2, } - ins.Prefix = "prefix " - ins.Suffix = " suffix" vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -1972,16 +2116,16 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { `ExecuteStandalone dummy_generate n: type:INT64 value:"3" ks2 -20`, `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + - `sharded.20-: prefix values (:_c0_0, :_c0_1, :_c0_2), (:_c2_0, :_c2_1, :_c2_2) suffix ` + + `sharded.20-: prefix values (:_c0_0, :_c0_1, :_c0_2), (:_c2_0, :_c2_1, :_c2_2) ` + `{_c0_0: type:VARCHAR value:"a" _c0_1: type:INT64 value:"1" _c0_2: type:INT64 value:"10" ` + `_c2_0: type:VARCHAR value:"b" _c2_1: type:INT64 value:"3" _c2_2: type:INT64 value:"12"} ` + - `sharded.-20: prefix values (:_c1_0, :_c1_1, :_c1_2) suffix ` + + `sharded.-20: prefix values (:_c1_0, :_c1_1, :_c1_2) ` + `{_c1_0: type:VARCHAR value:"a" _c1_1: type:INT64 value:"2" _c1_2: type:INT64 value:"11"} ` + `true false`, }) // The insert id returned by ExecuteMultiShard should be overwritten by processGenerateFromValues. - expectResult(t, "Execute", result, &sqltypes.Result{InsertID: 10}) + expectResult(t, result, &sqltypes.Result{InsertID: 10}) } func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { @@ -1998,23 +2142,24 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ - {1}}, // The primary vindex has a single column as sharding key - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = ks.Tables["t1"].ColumnVindexes + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t1"], + "prefix ", + nil, + [][]int{{1}}, // The primary vindex has a single column as sharding key, + rb, + ) ins.Generate = &Generate{ Keyspace: &vindexes.Keyspace{ Name: "ks2", @@ -2023,8 +2168,6 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Query: "dummy_generate", Offset: 2, } - ins.Prefix = "prefix " - ins.Suffix = " suffix" vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -2064,16 +2207,16 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { `ExecuteStandalone dummy_generate n: type:INT64 value:"3" ks2 -20`, `ResolveDestinations sharded [value:"0" value:"1" value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(4eb190c9a2fa169c)`, `ExecuteMultiShard ` + - `sharded.20-: prefix values (:_c0_0, :_c0_1, :_c0_2), (:_c2_0, :_c2_1, :_c2_2) suffix ` + + `sharded.20-: prefix values (:_c0_0, :_c0_1, :_c0_2), (:_c2_0, :_c2_1, :_c2_2) ` + `{_c0_0: type:VARCHAR value:"a" _c0_1: type:INT64 value:"1" _c0_2: type:INT64 value:"10" ` + `_c2_0: type:VARCHAR value:"b" _c2_1: type:INT64 value:"3" _c2_2: type:INT64 value:"12"} ` + - `sharded.-20: prefix values (:_c1_0, :_c1_1, :_c1_2) suffix ` + + `sharded.-20: prefix values (:_c1_0, :_c1_1, :_c1_2) ` + `{_c1_0: type:VARCHAR value:"a" _c1_1: type:INT64 value:"2" _c1_2: type:INT64 value:"11"} ` + `true false`, }) // The insert id returned by ExecuteMultiShard should be overwritten by processGenerateFromValues. - expectResult(t, "Execute", output, &sqltypes.Result{InsertID: 10}) + expectResult(t, output, &sqltypes.Result{InsertID: 10}) } func TestInsertSelectUnowned(t *testing.T) { @@ -2096,25 +2239,24 @@ func TestInsertSelectUnowned(t *testing.T) { Name: "onecol", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: ks.Keyspace, - Query: "dummy_insert", - VindexValueOffset: [][]int{ - {0}}, // the onecol vindex as unowned lookup sharding column - Input: &Route{ - Query: "dummy_select", - FieldQuery: "dummy_field_query", - RoutingParameters: &RoutingParameters{ - Opcode: Scatter, - Keyspace: ks.Keyspace}}} - - ins.ColVindexes = append(ins.ColVindexes, ks.Tables["t2"].ColumnVindexes...) - ins.Prefix = "prefix " - ins.Suffix = " suffix" + rb := &Route{ + Query: "dummy_select", + FieldQuery: "dummy_field_query", + RoutingParameters: &RoutingParameters{ + Opcode: Scatter, + Keyspace: ks.Keyspace}} + ins := newInsertSelect( + false, + ks.Keyspace, + ks.Tables["t2"], + "prefix ", + nil, + [][]int{{0}}, // // the onecol vindex as unowned lookup sharding column + rb, + ) vc := newDMLTestVCursor("-20", "20-") vc.shardForKsid = []string{"20-", "-20", "20-"} @@ -2141,11 +2283,11 @@ func TestInsertSelectUnowned(t *testing.T) { // insert values into the main table `ExecuteMultiShard ` + // first we insert two rows on the 20- shard - `sharded.20-: prefix values (:_c0_0), (:_c2_0) suffix ` + + `sharded.20-: prefix values (:_c0_0), (:_c2_0) ` + `{_c0_0: type:INT64 value:"1" _c2_0: type:INT64 value:"2"} ` + // next we insert one row on the -20 shard - `sharded.-20: prefix values (:_c1_0) suffix ` + + `sharded.-20: prefix values (:_c1_0) ` + `{_c1_0: type:INT64 value:"3"} ` + `true false`}) @@ -2169,11 +2311,11 @@ func TestInsertSelectUnowned(t *testing.T) { // insert values into the main table `ExecuteMultiShard ` + // first we insert two rows on the 20- shard - `sharded.20-: prefix values (:_c0_0), (:_c2_0) suffix ` + + `sharded.20-: prefix values (:_c0_0), (:_c2_0) ` + `{_c0_0: type:INT64 value:"1" _c2_0: type:INT64 value:"2"} ` + // next we insert one row on the -20 shard - `sharded.-20: prefix values (:_c1_0) suffix ` + + `sharded.-20: prefix values (:_c1_0) ` + `{_c1_0: type:INT64 value:"3"} ` + `true false`}) } @@ -2201,7 +2343,7 @@ func TestInsertSelectShardingCases(t *testing.T) { "uks2": {Tables: map[string]*vschemapb.Table{"u2": {}}}, }} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) sks1 := vs.Keyspaces["sks1"] sks2 := vs.Keyspaces["sks2"] uks1 := vs.Keyspaces["uks1"] @@ -2220,16 +2362,15 @@ func TestInsertSelectShardingCases(t *testing.T) { RoutingParameters: &RoutingParameters{Opcode: Unsharded, Keyspace: uks2.Keyspace}} // sks1 and sks2 - ins := &Insert{ - Opcode: InsertSelect, - Keyspace: sks1.Keyspace, - Query: "dummy_insert", - Prefix: "prefix ", - Suffix: " suffix", - ColVindexes: sks1.Tables["s1"].ColumnVindexes, - VindexValueOffset: [][]int{{0}}, - Input: sRoute, - } + ins := newInsertSelect( + false, + sks1.Keyspace, + sks1.Tables["s1"], + "prefix ", + nil, + [][]int{{0}}, + sRoute, + ) vc := &loggingVCursor{ resolvedTargetTabletType: topodatapb.TabletType_PRIMARY, @@ -2252,7 +2393,7 @@ func TestInsertSelectShardingCases(t *testing.T) { // the query exec `ResolveDestinations sks1 [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, - `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) + `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) {_c0_0: type:INT64 value:"1"} true true`}) vc.Rewind() err = ins.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false, func(result *sqltypes.Result) error { @@ -2266,7 +2407,7 @@ func TestInsertSelectShardingCases(t *testing.T) { // the query exec `ResolveDestinations sks1 [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, - `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) + `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) {_c0_0: type:INT64 value:"1"} true true`}) // sks1 and uks2 ins.Input = uRoute @@ -2281,7 +2422,7 @@ func TestInsertSelectShardingCases(t *testing.T) { // the query exec `ResolveDestinations sks1 [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, - `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) + `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) {_c0_0: type:INT64 value:"1"} true true`}) vc.Rewind() err = ins.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false, func(result *sqltypes.Result) error { @@ -2295,17 +2436,18 @@ func TestInsertSelectShardingCases(t *testing.T) { // the query exec `ResolveDestinations sks1 [value:"0"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, - `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) + `ExecuteMultiShard sks1.-20: prefix values (:_c0_0) {_c0_0: type:INT64 value:"1"} true true`}) // uks1 and sks2 - ins = &Insert{ - Opcode: InsertUnsharded, - Keyspace: uks1.Keyspace, - Query: "dummy_insert", - Prefix: "prefix ", - Suffix: " suffix", - Input: sRoute, - } + ins = newInsertSelect( + false, + uks1.Keyspace, + nil, + "prefix ", + nil, + nil, + sRoute, + ) vc.Rewind() _, err = ins.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) @@ -2317,7 +2459,7 @@ func TestInsertSelectShardingCases(t *testing.T) { // the query exec `ResolveDestinations uks1 [] Destinations:DestinationAllShards()`, - `ExecuteMultiShard uks1.0: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) + `ExecuteMultiShard uks1.0: prefix values (:_c0_0) {_c0_0: type:INT64 value:"1"} true true`}) vc.Rewind() err = ins.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false, func(result *sqltypes.Result) error { @@ -2331,7 +2473,7 @@ func TestInsertSelectShardingCases(t *testing.T) { // the query exec `ResolveDestinations uks1 [] Destinations:DestinationAllShards()`, - `ExecuteMultiShard uks1.0: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) + `ExecuteMultiShard uks1.0: prefix values (:_c0_0) {_c0_0: type:INT64 value:"1"} true true`}) // uks1 and uks2 ins.Input = uRoute @@ -2346,7 +2488,7 @@ func TestInsertSelectShardingCases(t *testing.T) { // the query exec `ResolveDestinations uks1 [] Destinations:DestinationAllShards()`, - `ExecuteMultiShard uks1.0: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) + `ExecuteMultiShard uks1.0: prefix values (:_c0_0) {_c0_0: type:INT64 value:"1"} true true`}) vc.Rewind() err = ins.TryStreamExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false, func(result *sqltypes.Result) error { @@ -2360,5 +2502,5 @@ func TestInsertSelectShardingCases(t *testing.T) { // the query exec `ResolveDestinations uks1 [] Destinations:DestinationAllShards()`, - `ExecuteMultiShard uks1.0: prefix values (:_c0_0) suffix {_c0_0: type:INT64 value:"1"} true true`}) + `ExecuteMultiShard uks1.0: prefix values (:_c0_0) {_c0_0: type:INT64 value:"1"} true true`}) } diff --git a/go/vt/vtgate/engine/join.go b/go/vt/vtgate/engine/join.go index ef50389c989..dc952673cfe 100644 --- a/go/vt/vtgate/engine/join.go +++ b/go/vt/vtgate/engine/join.go @@ -17,6 +17,7 @@ limitations under the License. package engine import ( + "bytes" "context" "fmt" "strings" @@ -61,7 +62,7 @@ func (jn *Join) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st result := &sqltypes.Result{} if len(lresult.Rows) == 0 && wantfields { for k, col := range jn.Vars { - joinVars[k] = bindvarForType(lresult.Fields[col].Type) + joinVars[k] = bindvarForType(lresult.Fields[col]) } rresult, err := jn.Right.GetFields(ctx, vcursor, combineVars(bindVars, joinVars)) if err != nil { @@ -95,19 +96,21 @@ func (jn *Join) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st return result, nil } -func bindvarForType(t querypb.Type) *querypb.BindVariable { +func bindvarForType(field *querypb.Field) *querypb.BindVariable { bv := &querypb.BindVariable{ - Type: t, + Type: field.Type, Value: nil, } - switch t { + switch field.Type { case querypb.Type_INT8, querypb.Type_UINT8, querypb.Type_INT16, querypb.Type_UINT16, querypb.Type_INT32, querypb.Type_UINT32, querypb.Type_INT64, querypb.Type_UINT64: bv.Value = []byte("0") case querypb.Type_FLOAT32, querypb.Type_FLOAT64: bv.Value = []byte("0e0") case querypb.Type_DECIMAL: - bv.Value = []byte("0.0") + size := max(1, int(field.ColumnLength-field.Decimals)) + scale := max(1, int(field.Decimals)) + bv.Value = append(append(bytes.Repeat([]byte{'0'}, size), byte('.')), bytes.Repeat([]byte{'0'}, scale)...) default: return sqltypes.NullBindVariable } @@ -225,7 +228,7 @@ func joinFields(lfields, rfields []*querypb.Field, cols []int) []*querypb.Field return fields } -func joinRows(lrow, rrow []sqltypes.Value, cols []int) []sqltypes.Value { +func joinRows(lrow, rrow sqltypes.Row, cols []int) sqltypes.Row { row := make([]sqltypes.Value, len(cols)) for i, index := range cols { if index < 0 { diff --git a/go/vt/vtgate/engine/join_test.go b/go/vt/vtgate/engine/join_test.go index 2df507f9512..eef5810ce69 100644 --- a/go/vt/vtgate/engine/join_test.go +++ b/go/vt/vtgate/engine/join_test.go @@ -89,7 +89,7 @@ func TestJoinExecute(t *testing.T) { `Execute a: type:INT64 value:"10" bv: type:VARCHAR value:"b" false`, `Execute a: type:INT64 value:"10" bv: type:VARCHAR value:"c" false`, }) - expectResult(t, "jn.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col2|col4|col5", "int64|varchar|int64|varchar", @@ -116,7 +116,7 @@ func TestJoinExecute(t *testing.T) { `Execute a: type:INT64 value:"10" bv: type:VARCHAR value:"b" false`, `Execute a: type:INT64 value:"10" bv: type:VARCHAR value:"c" false`, }) - expectResult(t, "jn.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col2|col4|col5", "int64|varchar|int64|varchar", @@ -251,7 +251,7 @@ func TestJoinExecuteNoResult(t *testing.T) { "int64|varchar|int64|varchar", ), ) - expectResult(t, "jn.Execute", r, wantResult) + expectResult(t, r, wantResult) } func TestJoinExecuteErrors(t *testing.T) { @@ -389,7 +389,7 @@ func TestJoinStreamExecute(t *testing.T) { `StreamExecute bv: type:VARCHAR value:"b" false`, `StreamExecute bv: type:VARCHAR value:"c" false`, }) - expectResult(t, "jn.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col2|col4|col5", "int64|varchar|int64|varchar", @@ -418,7 +418,7 @@ func TestJoinStreamExecute(t *testing.T) { `StreamExecute bv: type:VARCHAR value:"b" false`, `StreamExecute bv: type:VARCHAR value:"c" false`, }) - expectResult(t, "jn.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col2|col4|col5", "int64|varchar|int64|varchar", @@ -475,7 +475,7 @@ func TestGetFields(t *testing.T) { `GetFields bv: `, `Execute bv: true`, }) - expectResult(t, "jn.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col2|col4|col5", "int64|varchar|int64|varchar", diff --git a/go/vt/vtgate/engine/limit.go b/go/vt/vtgate/engine/limit.go index 4ef809ad1fa..01fcde6bd82 100644 --- a/go/vt/vtgate/engine/limit.go +++ b/go/vt/vtgate/engine/limit.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "strconv" + "sync" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -39,6 +40,8 @@ type Limit struct { Input Primitive } +var UpperLimitStr = "__upper_limit" + // RouteType returns a description of the query routing type used by the primitive func (l *Limit) RouteType() string { return l.Input.RouteType() @@ -62,7 +65,8 @@ func (l *Limit) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[st } // When offset is present, we hijack the limit value so we can calculate // the offset in memory from the result of the scatter query with count + offset. - bindVars["__upper_limit"] = sqltypes.Int64BindVariable(int64(count + offset)) + + bindVars[UpperLimitStr] = sqltypes.Int64BindVariable(int64(count + offset)) result, err := vcursor.ExecutePrimitive(ctx, l.Input, bindVars, wantfields) if err != nil { @@ -95,10 +99,13 @@ func (l *Limit) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars // When offset is present, we hijack the limit value so we can calculate // the offset in memory from the result of the scatter query with count + offset. - bindVars["__upper_limit"] = sqltypes.Int64BindVariable(int64(count + offset)) + bindVars[UpperLimitStr] = sqltypes.Int64BindVariable(int64(count + offset)) + var mu sync.Mutex err = vcursor.StreamExecutePrimitive(ctx, l.Input, bindVars, wantfields, func(qr *sqltypes.Result) error { - if len(qr.Fields) != 0 { + mu.Lock() + defer mu.Unlock() + if wantfields && len(qr.Fields) != 0 { if err := callback(&sqltypes.Result{Fields: qr.Fields}); err != nil { return err } diff --git a/go/vt/vtgate/engine/limit_test.go b/go/vt/vtgate/engine/limit_test.go index d5c6602f820..8b91dadecb5 100644 --- a/go/vt/vtgate/engine/limit_test.go +++ b/go/vt/vtgate/engine/limit_test.go @@ -130,7 +130,7 @@ func TestLimitExecute(t *testing.T) { results: []*sqltypes.Result{inputResult}, } l = &Limit{ - Count: evalengine.NewBindVar("l", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + Count: evalengine.NewBindVar("l", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)), Input: fp, } @@ -343,8 +343,8 @@ func TestLimitOffsetExecute(t *testing.T) { } l = &Limit{ - Count: evalengine.NewBindVar("l", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), - Offset: evalengine.NewBindVar("o", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + Count: evalengine.NewBindVar("l", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)), + Offset: evalengine.NewBindVar("o", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)), Input: fp, } result, err = l.TryExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{"l": sqltypes.Int64BindVariable(1), "o": sqltypes.Int64BindVariable(1)}, false) @@ -396,7 +396,7 @@ func TestLimitStreamExecute(t *testing.T) { // Test with bind vars. fp.rewind() - l.Count = evalengine.NewBindVar("l", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}) + l.Count = evalengine.NewBindVar("l", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)) results = nil err = l.TryStreamExecute(context.Background(), &noopVCursor{}, map[string]*querypb.BindVariable{"l": sqltypes.Int64BindVariable(2)}, true, func(qr *sqltypes.Result) error { results = append(results, qr) @@ -451,6 +451,73 @@ func TestLimitStreamExecute(t *testing.T) { } } +func TestLimitStreamExecuteAsync(t *testing.T) { + bindVars := make(map[string]*querypb.BindVariable) + fields := sqltypes.MakeTestFields( + "col1|col2", + "int64|varchar", + ) + inputResults := sqltypes.MakeTestStreamingResults( + fields, + "a|1", + "b|2", + "d|3", + "e|4", + "a|1", + "b|2", + "d|3", + "e|4", + "---", + "c|7", + "x|8", + "y|9", + "c|7", + "x|8", + "y|9", + "c|7", + "x|8", + "y|9", + "---", + "l|4", + "m|5", + "n|6", + "l|4", + "m|5", + "n|6", + "l|4", + "m|5", + "n|6", + ) + fp := &fakePrimitive{ + results: inputResults, + async: true, + } + + const maxCount = 26 + for i := 0; i <= maxCount*20; i++ { + expRows := i + l := &Limit{ + Count: evalengine.NewLiteralInt(int64(expRows)), + Input: fp, + } + // Test with limit smaller than input. + results := &sqltypes.Result{} + + err := l.TryStreamExecute(context.Background(), &noopVCursor{}, bindVars, true, func(qr *sqltypes.Result) error { + if qr != nil { + results.Rows = append(results.Rows, qr.Rows...) + } + return nil + }) + require.NoError(t, err) + if expRows > maxCount { + expRows = maxCount + } + require.Len(t, results.Rows, expRows) + } + +} + func TestOffsetStreamExecute(t *testing.T) { bindVars := make(map[string]*querypb.BindVariable) fields := sqltypes.MakeTestFields( @@ -540,7 +607,7 @@ func TestLimitInputFail(t *testing.T) { func TestLimitInvalidCount(t *testing.T) { l := &Limit{ - Count: evalengine.NewBindVar("l", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + Count: evalengine.NewBindVar("l", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)), } _, _, err := l.getCountAndOffset(context.Background(), &noopVCursor{}, nil) assert.EqualError(t, err, "query arguments missing for l") diff --git a/go/vt/vtgate/engine/lock.go b/go/vt/vtgate/engine/lock.go index c1701f6c166..7739cbcd0cc 100644 --- a/go/vt/vtgate/engine/lock.go +++ b/go/vt/vtgate/engine/lock.go @@ -38,6 +38,9 @@ var _ Primitive = (*Lock)(nil) // Lock primitive will execute sql containing lock functions type Lock struct { + noInputs + noTxNeeded + // Keyspace specifies the keyspace to send the query to. Keyspace *vindexes.Keyspace @@ -47,10 +50,6 @@ type Lock struct { FieldQuery string LockFunctions []*LockFunc - - noInputs - - noTxNeeded } type LockFunc struct { diff --git a/go/vt/vtgate/engine/memory_sort.go b/go/vt/vtgate/engine/memory_sort.go index b896b303923..d9919045eaf 100644 --- a/go/vt/vtgate/engine/memory_sort.go +++ b/go/vt/vtgate/engine/memory_sort.go @@ -23,6 +23,7 @@ import ( "reflect" "strconv" "strings" + "sync" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -58,11 +59,6 @@ func (ms *MemorySort) GetTableName() string { return ms.Input.GetTableName() } -// SetTruncateColumnCount sets the truncate column count. -func (ms *MemorySort) SetTruncateColumnCount(count int) { - ms.TruncateColumnCount = count -} - // TryExecute satisfies the Primitive interface. func (ms *MemorySort) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { count, err := ms.fetchCount(ctx, vcursor, bindVars) @@ -101,7 +97,11 @@ func (ms *MemorySort) TryStreamExecute(ctx context.Context, vcursor VCursor, bin Compare: ms.OrderBy, Limit: count, } + + var mu sync.Mutex err = vcursor.StreamExecutePrimitive(ctx, ms.Input, bindVars, wantfields, func(qr *sqltypes.Result) error { + mu.Lock() + defer mu.Unlock() if len(qr.Fields) != 0 { if err := cb(&sqltypes.Result{Fields: qr.Fields}); err != nil { return err @@ -138,7 +138,7 @@ func (ms *MemorySort) NeedsTransaction() bool { func (ms *MemorySort) fetchCount(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (int, error) { if ms.UpperLimit == nil { - return math.MaxInt64, nil + return math.MaxInt, nil } env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) resolved, err := env.Evaluate(ms.UpperLimit) diff --git a/go/vt/vtgate/engine/memory_sort_test.go b/go/vt/vtgate/engine/memory_sort_test.go index bc9369c57af..21d73613158 100644 --- a/go/vt/vtgate/engine/memory_sort_test.go +++ b/go/vt/vtgate/engine/memory_sort_test.go @@ -26,17 +26,9 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtgate/evalengine" ) -func init() { - // We require MySQL 8.0 collations for the comparisons in the tests - mySQLVersion := "8.0.0" - servenv.SetMySQLServerVersionForTest(mySQLVersion) - collationEnv = collations.NewEnvironment(mySQLVersion) -} - func TestMemorySortExecute(t *testing.T) { fields := sqltypes.MakeTestFields( "c1|c2", @@ -75,7 +67,7 @@ func TestMemorySortExecute(t *testing.T) { utils.MustMatch(t, wantResult, result) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.TryExecute(context.Background(), &noopVCursor{}, bv, false) @@ -136,7 +128,7 @@ func TestMemorySortStreamExecuteWeightString(t *testing.T) { t.Run("Limit test", func(t *testing.T) { fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} results = nil @@ -194,7 +186,7 @@ func TestMemorySortExecuteWeightString(t *testing.T) { utils.MustMatch(t, wantResult, result) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.TryExecute(context.Background(), &noopVCursor{}, bv, false) @@ -225,11 +217,11 @@ func TestMemorySortStreamExecuteCollation(t *testing.T) { )}, } - collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") + collationID, _ := collations.MySQL8().LookupID("utf8mb4_hu_0900_ai_ci") ms := &MemorySort{ OrderBy: []evalengine.OrderByParams{{ Col: 0, - Type: evalengine.Type{Type: sqltypes.VarChar, Coll: collationID}, + Type: evalengine.NewType(sqltypes.VarChar, collationID), }}, Input: fp, } @@ -277,7 +269,7 @@ func TestMemorySortStreamExecuteCollation(t *testing.T) { t.Run("Limit test", func(t *testing.T) { fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} results = nil @@ -313,11 +305,11 @@ func TestMemorySortExecuteCollation(t *testing.T) { )}, } - collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") + collationID, _ := collations.MySQL8().LookupID("utf8mb4_hu_0900_ai_ci") ms := &MemorySort{ OrderBy: []evalengine.OrderByParams{{ Col: 0, - Type: evalengine.Type{Type: sqltypes.VarChar, Coll: collationID}, + Type: evalengine.NewType(sqltypes.VarChar, collationID), }}, Input: fp, } @@ -336,7 +328,7 @@ func TestMemorySortExecuteCollation(t *testing.T) { utils.MustMatch(t, wantResult, result) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.TryExecute(context.Background(), &noopVCursor{}, bv, false) @@ -393,7 +385,7 @@ func TestMemorySortStreamExecute(t *testing.T) { utils.MustMatch(t, wantResults, results) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} results = nil @@ -552,7 +544,7 @@ func TestMemorySortMultiColumn(t *testing.T) { utils.MustMatch(t, wantResult, result) fp.rewind() - ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}) + ms.UpperLimit = evalengine.NewBindVar("__upper_limit", evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID)) bv := map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)} result, err = ms.TryExecute(context.Background(), &noopVCursor{}, bv, false) @@ -657,3 +649,57 @@ func TestMemorySortExecuteNoVarChar(t *testing.T) { t.Errorf("StreamExecute err: %v, want %v", err, want) } } + +func TestMemorySortStreamAsync(t *testing.T) { + fields := sqltypes.MakeTestFields( + "c1|c2", + "varbinary|decimal", + ) + fp := &fakePrimitive{ + results: sqltypes.MakeTestStreamingResults( + fields, + "a|1", + "g|2", + "a|1", + "---", + "c|3", + "g|2", + "a|1", + "---", + "c|4", + "c|3", + "g|2", + "a|1", + "---", + "c|4", + "c|3", + "g|2", + "a|1", + "---", + "c|4", + "c|3", + ), + async: true, + } + + ms := &MemorySort{ + OrderBy: []evalengine.OrderByParams{{ + WeightStringCol: -1, + Col: 1, + }}, + Input: fp, + } + + qr := &sqltypes.Result{} + err := ms.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(res *sqltypes.Result) error { + qr.Rows = append(qr.Rows, res.Rows...) + return nil + }) + require.NoError(t, err) + require.NoError(t, sqltypes.RowsEqualsStr( + `[[VARBINARY("a") DECIMAL(1)] [VARBINARY("a") DECIMAL(1)] [VARBINARY("a") DECIMAL(1)] [VARBINARY("a") DECIMAL(1)] [VARBINARY("a") DECIMAL(1)] +[VARBINARY("g") DECIMAL(2)] [VARBINARY("g") DECIMAL(2)] [VARBINARY("g") DECIMAL(2)] [VARBINARY("g") DECIMAL(2)] +[VARBINARY("c") DECIMAL(3)] [VARBINARY("c") DECIMAL(3)] [VARBINARY("c") DECIMAL(3)] [VARBINARY("c") DECIMAL(3)] +[VARBINARY("c") DECIMAL(4)] [VARBINARY("c") DECIMAL(4)] [VARBINARY("c") DECIMAL(4)]]`, + qr.Rows)) +} diff --git a/go/vt/vtgate/engine/merge_sort.go b/go/vt/vtgate/engine/merge_sort.go index 3c26a383594..fac57c37ccb 100644 --- a/go/vt/vtgate/engine/merge_sort.go +++ b/go/vt/vtgate/engine/merge_sort.go @@ -48,11 +48,12 @@ var _ Primitive = (*MergeSort)(nil) // be used like other Primitives in VTGate. However, it satisfies the Primitive API // so that vdiff can use it. In that situation, only StreamExecute is used. type MergeSort struct { + noInputs + noTxNeeded + Primitives []StreamExecutor OrderBy evalengine.Comparison ScatterErrorsAsWarnings bool - noInputs - noTxNeeded } // RouteType satisfies Primitive. diff --git a/go/vt/vtgate/engine/merge_sort_test.go b/go/vt/vtgate/engine/merge_sort_test.go index 803c70ca463..6b383e12572 100644 --- a/go/vt/vtgate/engine/merge_sort_test.go +++ b/go/vt/vtgate/engine/merge_sort_test.go @@ -179,10 +179,10 @@ func TestMergeSortCollation(t *testing.T) { ), }} - collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") + collationID, _ := collations.MySQL8().LookupID("utf8mb4_hu_0900_ai_ci") orderBy := []evalengine.OrderByParams{{ Col: 0, - Type: evalengine.Type{Type: sqltypes.VarChar, Coll: collationID}, + Type: evalengine.NewType(sqltypes.VarChar, collationID), }} var results []*sqltypes.Result diff --git a/go/vt/vtgate/engine/mstream.go b/go/vt/vtgate/engine/mstream.go index 033196ef576..af24199026b 100644 --- a/go/vt/vtgate/engine/mstream.go +++ b/go/vt/vtgate/engine/mstream.go @@ -30,6 +30,9 @@ var _ Primitive = (*MStream)(nil) // MStream is an operator for message streaming from specific keyspace, destination type MStream struct { + noTxNeeded + noInputs + // Keyspace specifies the keyspace to stream messages from Keyspace *vindexes.Keyspace @@ -38,10 +41,6 @@ type MStream struct { // TableName specifies the table on which stream will be executed. TableName string - - noTxNeeded - - noInputs } // RouteType implements the Primitive interface diff --git a/go/vt/vtgate/engine/online_ddl.go b/go/vt/vtgate/engine/online_ddl.go index c972fee66e9..9acf55869bc 100644 --- a/go/vt/vtgate/engine/online_ddl.go +++ b/go/vt/vtgate/engine/online_ddl.go @@ -20,7 +20,6 @@ import ( "context" "fmt" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -35,16 +34,15 @@ var _ Primitive = (*OnlineDDL)(nil) // OnlineDDL represents the instructions to perform an online schema change via vtctld type OnlineDDL struct { + noTxNeeded + noInputs + Keyspace *vindexes.Keyspace DDL sqlparser.DDLStatement SQL string DDLStrategySetting *schema.DDLStrategySetting // TargetDestination specifies an explicit target destination to send the query to. TargetDestination key.Destination - - noTxNeeded - - noInputs } func (v *OnlineDDL) description() PrimitiveDescription { @@ -79,7 +77,7 @@ func (v *OnlineDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma { Name: "uuid", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(vcursor.ConnCollation()), }, }, Rows: [][]sqltypes.Value{}, @@ -90,7 +88,7 @@ func (v *OnlineDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma migrationContext = fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()) } onlineDDLs, err := schema.NewOnlineDDLs(v.GetKeyspaceName(), v.SQL, v.DDL, - v.DDLStrategySetting, migrationContext, "", + v.DDLStrategySetting, migrationContext, "", vcursor.Environment().Parser(), ) if err != nil { return result, err diff --git a/go/vt/vtgate/engine/opcode/constants.go b/go/vt/vtgate/engine/opcode/constants.go index dd73a78974d..28c09de0fd6 100644 --- a/go/vt/vtgate/engine/opcode/constants.go +++ b/go/vt/vtgate/engine/opcode/constants.go @@ -19,8 +19,10 @@ package opcode import ( "fmt" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) // PulloutOpcode is a number representing the opcode @@ -74,21 +76,11 @@ const ( AggregateAnyValue AggregateCountStar AggregateGroupConcat + AggregateAvg + AggregateUDF // This is an opcode used to represent UDFs _NumOfOpCodes // This line must be last of the opcodes! ) -var ( - // OpcodeType keeps track of the known output types for different aggregate functions - OpcodeType = map[AggregateOpcode]querypb.Type{ - AggregateCountDistinct: sqltypes.Int64, - AggregateCount: sqltypes.Int64, - AggregateCountStar: sqltypes.Int64, - AggregateSumDistinct: sqltypes.Decimal, - AggregateSum: sqltypes.Decimal, - AggregateGtid: sqltypes.VarChar, - } -) - // SupportedAggregates maps the list of supported aggregate // functions to their opcodes. var SupportedAggregates = map[string]AggregateOpcode{ @@ -96,6 +88,7 @@ var SupportedAggregates = map[string]AggregateOpcode{ "sum": AggregateSum, "min": AggregateMin, "max": AggregateMax, + "avg": AggregateAvg, // These functions don't exist in mysql, but are used // to display the plan. "count_distinct": AggregateCountDistinct, @@ -117,6 +110,7 @@ var AggregateName = map[AggregateOpcode]string{ AggregateCountStar: "count_star", AggregateGroupConcat: "group_concat", AggregateAnyValue: "any_value", + AggregateAvg: "avg", } func (code AggregateOpcode) String() string { @@ -134,7 +128,7 @@ func (code AggregateOpcode) MarshalJSON() ([]byte, error) { } // Type returns the opcode return sql type, and a bool telling is we are sure about this type or not -func (code AggregateOpcode) Type(typ querypb.Type) querypb.Type { +func (code AggregateOpcode) SQLType(typ querypb.Type) querypb.Type { switch code { case AggregateUnassigned: return sqltypes.Null @@ -148,7 +142,7 @@ func (code AggregateOpcode) Type(typ querypb.Type) querypb.Type { return sqltypes.Text case AggregateMax, AggregateMin, AggregateAnyValue: return typ - case AggregateSumDistinct, AggregateSum: + case AggregateSumDistinct, AggregateSum, AggregateAvg: if typ == sqltypes.Unknown { return sqltypes.Unknown } @@ -160,11 +154,35 @@ func (code AggregateOpcode) Type(typ querypb.Type) querypb.Type { return sqltypes.Int64 case AggregateGtid: return sqltypes.VarChar + case AggregateUDF: + return sqltypes.Unknown default: panic(code.String()) // we have a unit test checking we never reach here } } +func (code AggregateOpcode) Nullable() bool { + switch code { + case AggregateCount, AggregateCountStar: + return false + default: + return true + } +} + +func (code AggregateOpcode) ResolveType(t evalengine.Type, env *collations.Environment) evalengine.Type { + sqltype := code.SQLType(t.Type()) + collation := collations.CollationForType(sqltype, env.DefaultConnectionCharset()) + nullable := code.Nullable() + size := t.Size() + + scale := t.Scale() + if code == AggregateAvg { + scale += 4 + } + return evalengine.NewTypeEx(sqltype, collation, nullable, size, scale, t.Values()) +} + func (code AggregateOpcode) NeedsComparableValues() bool { switch code { case AggregateCountDistinct, AggregateSumDistinct, AggregateMin, AggregateMax: diff --git a/go/vt/vtgate/engine/opcode/constants_test.go b/go/vt/vtgate/engine/opcode/constants_test.go index 50cfc49a71c..5687a42433d 100644 --- a/go/vt/vtgate/engine/opcode/constants_test.go +++ b/go/vt/vtgate/engine/opcode/constants_test.go @@ -17,14 +17,147 @@ limitations under the License. package opcode import ( + "encoding/json" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" ) func TestCheckAllAggrOpCodes(t *testing.T) { // This test is just checking that we never reach the panic when using Type() on valid opcodes for i := AggregateOpcode(0); i < _NumOfOpCodes; i++ { - i.Type(sqltypes.Null) + i.SQLType(sqltypes.Null) + } +} + +func TestType(t *testing.T) { + tt := []struct { + opcode AggregateOpcode + typ querypb.Type + out querypb.Type + }{ + {AggregateUnassigned, sqltypes.VarChar, sqltypes.Null}, + {AggregateGroupConcat, sqltypes.VarChar, sqltypes.Text}, + {AggregateGroupConcat, sqltypes.Blob, sqltypes.Blob}, + {AggregateGroupConcat, sqltypes.Unknown, sqltypes.Unknown}, + {AggregateMax, sqltypes.Int64, sqltypes.Int64}, + {AggregateMax, sqltypes.Float64, sqltypes.Float64}, + {AggregateSumDistinct, sqltypes.Unknown, sqltypes.Unknown}, + {AggregateSumDistinct, sqltypes.Int64, sqltypes.Decimal}, + {AggregateSumDistinct, sqltypes.Decimal, sqltypes.Decimal}, + {AggregateCount, sqltypes.Int32, sqltypes.Int64}, + {AggregateCountStar, sqltypes.Int64, sqltypes.Int64}, + {AggregateGtid, sqltypes.VarChar, sqltypes.VarChar}, + } + + for _, tc := range tt { + t.Run(tc.opcode.String()+"_"+tc.typ.String(), func(t *testing.T) { + out := tc.opcode.SQLType(tc.typ) + assert.Equal(t, tc.out, out) + }) + } +} + +func TestType_Panic(t *testing.T) { + defer func() { + if r := recover(); r != nil { + errMsg, ok := r.(string) + assert.True(t, ok, "Expected a string panic message") + assert.Contains(t, errMsg, "ERROR", "Expected panic message containing 'ERROR'") + } + }() + AggregateOpcode(999).SQLType(sqltypes.VarChar) +} + +func TestNeedsListArg(t *testing.T) { + tt := []struct { + opcode PulloutOpcode + out bool + }{ + {PulloutValue, false}, + {PulloutIn, true}, + {PulloutNotIn, true}, + {PulloutExists, false}, + {PulloutNotExists, false}, + } + + for _, tc := range tt { + t.Run(tc.opcode.String(), func(t *testing.T) { + out := tc.opcode.NeedsListArg() + assert.Equal(t, tc.out, out) + }) + } +} + +func TestPulloutOpcode_MarshalJSON(t *testing.T) { + tt := []struct { + opcode PulloutOpcode + out string + }{ + {PulloutValue, "\"PulloutValue\""}, + {PulloutIn, "\"PulloutIn\""}, + {PulloutNotIn, "\"PulloutNotIn\""}, + {PulloutExists, "\"PulloutExists\""}, + {PulloutNotExists, "\"PulloutNotExists\""}, + } + + for _, tc := range tt { + t.Run(tc.opcode.String(), func(t *testing.T) { + out, err := json.Marshal(tc.opcode) + require.NoError(t, err, "Unexpected error") + assert.Equal(t, tc.out, string(out)) + }) + } +} + +func TestAggregateOpcode_MarshalJSON(t *testing.T) { + tt := []struct { + opcode AggregateOpcode + out string + }{ + {AggregateCount, "\"count\""}, + {AggregateSum, "\"sum\""}, + {AggregateMin, "\"min\""}, + {AggregateMax, "\"max\""}, + {AggregateCountDistinct, "\"count_distinct\""}, + {AggregateSumDistinct, "\"sum_distinct\""}, + {AggregateGtid, "\"vgtid\""}, + {AggregateCountStar, "\"count_star\""}, + {AggregateGroupConcat, "\"group_concat\""}, + {AggregateAnyValue, "\"any_value\""}, + {AggregateAvg, "\"avg\""}, + {999, "\"ERROR\""}, + } + + for _, tc := range tt { + t.Run(tc.opcode.String(), func(t *testing.T) { + out, err := json.Marshal(tc.opcode) + require.NoError(t, err, "Unexpected error") + assert.Equal(t, tc.out, string(out)) + }) + } +} + +func TestNeedsComparableValues(t *testing.T) { + for i := AggregateOpcode(0); i < _NumOfOpCodes; i++ { + if i == AggregateCountDistinct || i == AggregateSumDistinct || i == AggregateMin || i == AggregateMax { + assert.True(t, i.NeedsComparableValues()) + } else { + assert.False(t, i.NeedsComparableValues()) + } + } +} + +func TestIsDistinct(t *testing.T) { + for i := AggregateOpcode(0); i < _NumOfOpCodes; i++ { + if i == AggregateCountDistinct || i == AggregateSumDistinct { + assert.True(t, i.IsDistinct()) + } else { + assert.False(t, i.IsDistinct()) + } } } diff --git a/go/vt/vtgate/engine/ordered_aggregate.go b/go/vt/vtgate/engine/ordered_aggregate.go index 07ea06fa5fd..b67483216cf 100644 --- a/go/vt/vtgate/engine/ordered_aggregate.go +++ b/go/vt/vtgate/engine/ordered_aggregate.go @@ -28,13 +28,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/evalengine" ) -var ( - // Some predefined values - countZero = sqltypes.MakeTrusted(sqltypes.Int64, []byte("0")) - countOne = sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")) - sumZero = sqltypes.MakeTrusted(sqltypes.Decimal, []byte("0")) -) - var _ Primitive = (*OrderedAggregate)(nil) // OrderedAggregate is a primitive that expects the underlying primitive @@ -67,6 +60,7 @@ type GroupByParams struct { Expr sqlparser.Expr FromGroupBy bool Type evalengine.Type + CollationEnv *collations.Environment } // String returns a string. Used for plan descriptions @@ -78,8 +72,8 @@ func (gbp GroupByParams) String() string { out = fmt.Sprintf("(%d|%d)", gbp.KeyCol, gbp.WeightStringCol) } - if sqltypes.IsText(gbp.Type.Type) && gbp.Type.Coll != collations.Unknown { - out += " COLLATE " + collations.Local().LookupName(gbp.Type.Coll) + if sqltypes.IsText(gbp.Type.Type()) && gbp.Type.Collation() != collations.Unknown { + out += " COLLATE " + gbp.CollationEnv.LookupName(gbp.Type.Collation()) } return out @@ -100,11 +94,6 @@ func (oa *OrderedAggregate) GetTableName() string { return oa.Input.GetTableName() } -// SetTruncateColumnCount sets the truncate column count. -func (oa *OrderedAggregate) SetTruncateColumnCount(count int) { - oa.TruncateColumnCount = count -} - // TryExecute is a Primitive function. func (oa *OrderedAggregate) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, _ bool) (*sqltypes.Result, error) { qr, err := oa.execute(ctx, vcursor, bindVars) @@ -348,14 +337,14 @@ func (oa *OrderedAggregate) nextGroupBy(currentKey, nextRow []sqltypes.Value) (n return nextRow, true, nil } - cmp, err := evalengine.NullsafeCompare(v1, v2, gb.Type.Coll) + cmp, err := evalengine.NullsafeCompare(v1, v2, gb.CollationEnv, gb.Type.Collation(), gb.Type.Values()) if err != nil { _, isCollationErr := err.(evalengine.UnsupportedCollationError) if !isCollationErr || gb.WeightStringCol == -1 { return nil, false, err } gb.KeyCol = gb.WeightStringCol - cmp, err = evalengine.NullsafeCompare(currentKey[gb.WeightStringCol], nextRow[gb.WeightStringCol], gb.Type.Coll) + cmp, err = evalengine.NullsafeCompare(currentKey[gb.WeightStringCol], nextRow[gb.WeightStringCol], gb.CollationEnv, gb.Type.Collation(), gb.Type.Values()) if err != nil { return nil, false, err } diff --git a/go/vt/vtgate/engine/ordered_aggregate_test.go b/go/vt/vtgate/engine/ordered_aggregate_test.go index 2eca4fc7ba9..3eaa63819e4 100644 --- a/go/vt/vtgate/engine/ordered_aggregate_test.go +++ b/go/vt/vtgate/engine/ordered_aggregate_test.go @@ -32,19 +32,9 @@ import ( "vitess.io/vitess/go/test/utils" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/servenv" . "vitess.io/vitess/go/vt/vtgate/engine/opcode" ) -var collationEnv *collations.Environment - -func init() { - // We require MySQL 8.0 collations for the comparisons in the tests - mySQLVersion := "8.0.0" - servenv.SetMySQLServerVersionForTest(mySQLVersion) - collationEnv = collations.NewEnvironment(mySQLVersion) -} - func TestOrderedAggregateExecute(t *testing.T) { fields := sqltypes.MakeTestFields( "col|count(*)", @@ -62,7 +52,7 @@ func TestOrderedAggregateExecute(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -94,7 +84,7 @@ func TestOrderedAggregateExecuteTruncate(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateSum, 1, "") + aggr := NewAggregateParam(AggregateSum, 1, "", collations.MySQL8()) aggr.OrigOpcode = AggregateCountStar oa := &OrderedAggregate{ @@ -134,7 +124,7 @@ func TestMinMaxFailsCorrectly(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateMax, 0, "") + aggr := NewAggregateParam(AggregateMax, 0, "", collations.MySQL8()) aggr.WCol = 1 oa := &ScalarAggregate{ Aggregates: []*AggregateParams{aggr}, @@ -163,7 +153,7 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -202,7 +192,7 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 2}}, TruncateColumnCount: 2, Input: fp, @@ -305,8 +295,8 @@ func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { )}, } - aggr1 := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)") - aggr2 := NewAggregateParam(AggregateSum, 2, "") + aggr1 := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)", collations.MySQL8()) + aggr2 := NewAggregateParam(AggregateSum, 2, "", collations.MySQL8()) aggr2.OrigOpcode = AggregateCountStar oa := &OrderedAggregate{ Aggregates: []*AggregateParams{aggr1, aggr2}, @@ -374,12 +364,12 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { )}, } - aggr2 := NewAggregateParam(AggregateSum, 2, "") + aggr2 := NewAggregateParam(AggregateSum, 2, "", collations.MySQL8()) aggr2.OrigOpcode = AggregateCountDistinct oa := &OrderedAggregate{ Aggregates: []*AggregateParams{ - NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)"), + NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)", collations.MySQL8()), aggr2}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, @@ -460,8 +450,8 @@ func TestOrderedAggregateSumDistinctGood(t *testing.T) { oa := &OrderedAggregate{ Aggregates: []*AggregateParams{ - NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)"), - NewAggregateParam(AggregateSum, 2, ""), + NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)", collations.MySQL8()), + NewAggregateParam(AggregateSum, 2, "", collations.MySQL8()), }, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, @@ -504,7 +494,7 @@ func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -536,7 +526,7 @@ func TestOrderedAggregateKeysFail(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -566,7 +556,7 @@ func TestOrderedAggregateMergeFail(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -627,7 +617,7 @@ func TestOrderedAggregateExecuteGtid(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateGtid, 1, "vgtid")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGtid, 1, "vgtid", collations.MySQL8())}, TruncateColumnCount: 2, Input: fp, } @@ -660,7 +650,7 @@ func TestCountDistinctOnVarchar(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)") + aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)", collations.MySQL8()) aggr.WCol = 2 oa := &OrderedAggregate{ Aggregates: []*AggregateParams{aggr}, @@ -720,7 +710,7 @@ func TestCountDistinctOnVarcharWithNulls(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)") + aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)", collations.MySQL8()) aggr.WCol = 2 oa := &OrderedAggregate{ Aggregates: []*AggregateParams{aggr}, @@ -782,7 +772,7 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct c2)") + aggr := NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct c2)", collations.MySQL8()) aggr.WCol = 2 oa := &OrderedAggregate{ Aggregates: []*AggregateParams{aggr}, @@ -848,8 +838,8 @@ func TestMultiDistinct(t *testing.T) { oa := &OrderedAggregate{ Aggregates: []*AggregateParams{ - NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)"), - NewAggregateParam(AggregateSumDistinct, 2, "sum(distinct c3)"), + NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)", collations.MySQL8()), + NewAggregateParam(AggregateSumDistinct, 2, "sum(distinct c3)", collations.MySQL8()), }, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, @@ -904,10 +894,11 @@ func TestOrderedAggregateCollate(t *testing.T) { )}, } + collationEnv := collations.MySQL8() collationID, _ := collationEnv.LookupID("utf8mb4_0900_ai_ci") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, - GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.Type{Coll: collationID}}}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collationEnv)}, + GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.NewType(sqltypes.Unknown, collationID)}}, Input: fp, } @@ -942,10 +933,11 @@ func TestOrderedAggregateCollateAS(t *testing.T) { )}, } + collationEnv := collations.MySQL8() collationID, _ := collationEnv.LookupID("utf8mb4_0900_as_ci") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, - GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.Type{Coll: collationID}}}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collationEnv)}, + GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.NewType(sqltypes.Unknown, collationID)}}, Input: fp, } @@ -982,10 +974,11 @@ func TestOrderedAggregateCollateKS(t *testing.T) { )}, } + collationEnv := collations.MySQL8() collationID, _ := collationEnv.LookupID("utf8mb4_ja_0900_as_cs_ks") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, - GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.Type{Coll: collationID}}}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collationEnv)}, + GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.NewType(sqltypes.Unknown, collationID)}}, Input: fp, } @@ -1066,7 +1059,7 @@ func TestGroupConcatWithAggrOnEngine(t *testing.T) { t.Run(tcase.name, func(t *testing.T) { fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "group_concat(c2)")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "group_concat(c2)", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -1145,7 +1138,7 @@ func TestGroupConcat(t *testing.T) { t.Run(tcase.name, func(t *testing.T) { fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } diff --git a/go/vt/vtgate/engine/plan_description.go b/go/vt/vtgate/engine/plan_description.go index 72220fda460..a8daa25ecd0 100644 --- a/go/vt/vtgate/engine/plan_description.go +++ b/go/vt/vtgate/engine/plan_description.go @@ -21,6 +21,7 @@ import ( "encoding/json" "fmt" "sort" + "strings" "vitess.io/vitess/go/tools/graphviz" "vitess.io/vitess/go/vt/key" @@ -266,3 +267,11 @@ func (m orderedMap) MarshalJSON() ([]byte, error) { buf.WriteString("}") return buf.Bytes(), nil } + +func (m orderedMap) String() string { + var output []string + for _, val := range m { + output = append(output, fmt.Sprintf("%s:%v", val.key, val.val)) + } + return strings.Join(output, " ") +} diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index a3a37f97fe4..1c0e7de7a19 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -87,7 +88,9 @@ type ( Session() SessionActions ConnCollation() collations.ID + Environment() *vtenv.Environment TimeZone() *time.Location + SQLMode() string ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedShard, query *querypb.BoundQuery, lockFuncType sqlparser.LockingFuncType) (*sqltypes.Result, error) diff --git a/go/vt/vtgate/engine/projection.go b/go/vt/vtgate/engine/projection.go index e0055baa757..6fb75bdf800 100644 --- a/go/vt/vtgate/engine/projection.go +++ b/go/vt/vtgate/engine/projection.go @@ -21,6 +21,7 @@ import ( "sync" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" @@ -31,10 +32,11 @@ var _ Primitive = (*Projection)(nil) // Projection can evaluate expressions and project the results type Projection struct { + noTxNeeded + Cols []string Exprs []evalengine.Expr Input Primitive - noTxNeeded } // RouteType implements the Primitive interface @@ -74,7 +76,7 @@ func (p *Projection) TryExecute(ctx context.Context, vcursor VCursor, bindVars m resultRows = append(resultRows, resultRow) } if wantfields { - result.Fields, err = p.evalFields(env, result.Fields) + result.Fields, err = p.evalFields(env, result.Fields, vcursor.ConnCollation()) if err != nil { return nil, err } @@ -88,11 +90,14 @@ func (p *Projection) TryStreamExecute(ctx context.Context, vcursor VCursor, bind env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) var once sync.Once var fields []*querypb.Field + var mu sync.Mutex return vcursor.StreamExecutePrimitive(ctx, p.Input, bindVars, wantfields, func(qr *sqltypes.Result) error { var err error + mu.Lock() + defer mu.Unlock() if wantfields { once.Do(func() { - fields, err = p.evalFields(env, qr.Fields) + fields, err = p.evalFields(env, qr.Fields, vcursor.ConnCollation()) if err != nil { return } @@ -131,14 +136,14 @@ func (p *Projection) GetFields(ctx context.Context, vcursor VCursor, bindVars ma return nil, err } env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) - qr.Fields, err = p.evalFields(env, qr.Fields) + qr.Fields, err = p.evalFields(env, qr.Fields, vcursor.ConnCollation()) if err != nil { return nil, err } return qr, nil } -func (p *Projection) evalFields(env *evalengine.ExpressionEnv, infields []*querypb.Field) ([]*querypb.Field, error) { +func (p *Projection) evalFields(env *evalengine.ExpressionEnv, infields []*querypb.Field, coll collations.ID) ([]*querypb.Field, error) { // TODO: once the evalengine becomes smart enough, we should be able to remove the // dependency on these fields altogether env.Fields = infields @@ -149,15 +154,22 @@ func (p *Projection) evalFields(env *evalengine.ExpressionEnv, infields []*query if err != nil { return nil, err } - fl := mysql.FlagsForColumn(typ.Type, typ.Coll) - if !sqltypes.IsNull(typ.Type) && !typ.Nullable { + fl := mysql.FlagsForColumn(typ.Type(), typ.Collation()) + if !sqltypes.IsNull(typ.Type()) && !typ.Nullable() { fl |= uint32(querypb.MySqlFlag_NOT_NULL_FLAG) } + typCol := typ.Collation() + if sqltypes.IsTextOrBinary(typ.Type()) && typCol != collations.CollationBinaryID { + typCol = coll + } + fields = append(fields, &querypb.Field{ - Name: col, - Type: typ.Type, - Charset: uint32(typ.Coll), - Flags: fl, + Name: col, + Type: typ.Type(), + Charset: uint32(typCol), + ColumnLength: uint32(typ.Size()), + Decimals: uint32(typ.Scale()), + Flags: fl, }) } return fields, nil diff --git a/go/vt/vtgate/engine/projection_test.go b/go/vt/vtgate/engine/projection_test.go index 37d1730e2e1..51b1ffb558c 100644 --- a/go/vt/vtgate/engine/projection_test.go +++ b/go/vt/vtgate/engine/projection_test.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -38,7 +39,10 @@ func TestMultiply(t *testing.T) { Left: &sqlparser.Offset{V: 0}, Right: &sqlparser.Offset{V: 1}, } - evalExpr, err := evalengine.Translate(expr, nil) + evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ + Environment: vtenv.NewTestEnv(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( @@ -72,13 +76,62 @@ func TestMultiply(t *testing.T) { assert.Equal(t, "[[UINT64(6)] [UINT64(0)] [UINT64(2)]]", fmt.Sprintf("%v", qr.Rows)) } +func TestProjectionStreaming(t *testing.T) { + expr := &sqlparser.BinaryExpr{ + Operator: sqlparser.MultOp, + Left: &sqlparser.Offset{V: 0}, + Right: &sqlparser.Offset{V: 1}, + } + evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ + Environment: vtenv.NewTestEnv(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) + require.NoError(t, err) + fp := &fakePrimitive{ + results: sqltypes.MakeTestStreamingResults( + sqltypes.MakeTestFields("a|b", "uint64|uint64"), + "3|2", + "1|0", + "6|2", + "---", + "3|2", + "---", + "1|0", + "---", + "1|2", + "4|2", + "---", + "5|5", + "4|10", + ), + async: true, + } + proj := &Projection{ + Cols: []string{"apa"}, + Exprs: []evalengine.Expr{evalExpr}, + Input: fp, + } + + qr := &sqltypes.Result{} + err = proj.TryStreamExecute(context.Background(), &noopVCursor{}, nil, true, func(result *sqltypes.Result) error { + qr.Rows = append(qr.Rows, result.Rows...) + return nil + }) + require.NoError(t, err) + require.NoError(t, sqltypes.RowsEqualsStr(`[[UINT64(25)] [UINT64(40)] [UINT64(6)] [UINT64(2)] [UINT64(8)] [UINT64(0)] [UINT64(6)] [UINT64(0)] [UINT64(12)]]`, + qr.Rows)) +} + func TestEmptyInput(t *testing.T) { expr := &sqlparser.BinaryExpr{ Operator: sqlparser.MultOp, Left: &sqlparser.Offset{V: 0}, Right: &sqlparser.Offset{V: 1}, } - evalExpr, err := evalengine.Translate(expr, nil) + evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ + Environment: vtenv.NewTestEnv(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult(sqltypes.MakeTestFields("a|b", "uint64|uint64"))}, @@ -93,22 +146,25 @@ func TestEmptyInput(t *testing.T) { require.NoError(t, err) assert.Equal(t, "[]", fmt.Sprintf("%v", qr.Rows)) - //fp = &fakePrimitive{ + // fp = &fakePrimitive{ // results: []*sqltypes.Result{sqltypes.MakeTestResult( // sqltypes.MakeTestFields("a|b", "uint64|uint64"), // "3|2", // "1|0", // "1|2", // )}, - //} - //proj.Input = fp - //qr, err = wrapStreamExecute(proj, newNoopVCursor(context.Background()), nil, true) - //require.NoError(t, err) - //assert.Equal(t, "[[UINT64(6)] [UINT64(0)] [UINT64(2)]]", fmt.Sprintf("%v", qr.Rows)) + // } + // proj.Input = fp + // qr, err = wrapStreamExecute(proj, newNoopVCursor(context.Background()), nil, true) + // require.NoError(t, err) + // assert.Equal(t, "[[UINT64(6)] [UINT64(0)] [UINT64(2)]]", fmt.Sprintf("%v", qr.Rows)) } func TestHexAndBinaryArgument(t *testing.T) { - hexExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), nil) + hexExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), &evalengine.Config{ + Environment: vtenv.NewTestEnv(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) proj := &Projection{ Cols: []string{"hex"}, @@ -140,7 +196,7 @@ func TestFields(t *testing.T) { name: `string`, bindVar: sqltypes.StringBindVariable("test"), typ: querypb.Type_VARCHAR, - collation: collations.Default(), + collation: collations.MySQL8().DefaultConnectionCharset(), }, { name: `binary`, @@ -152,7 +208,10 @@ func TestFields(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - bindExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), nil) + bindExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), &evalengine.Config{ + Environment: vtenv.NewTestEnv(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) proj := &Projection{ Cols: []string{"col"}, @@ -169,3 +228,41 @@ func TestFields(t *testing.T) { }) } } + +func TestFieldConversion(t *testing.T) { + var testCases = []struct { + name string + expr string + typ querypb.Type + collation collations.ID + }{ + { + name: `convert different charset`, + expr: `_latin1 0xFF`, + typ: sqltypes.VarChar, + collation: collations.MySQL8().DefaultConnectionCharset(), + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + arg, err := sqlparser.NewTestParser().ParseExpr(testCase.expr) + require.NoError(t, err) + bindExpr, err := evalengine.Translate(arg, &evalengine.Config{ + Environment: vtenv.NewTestEnv(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) + require.NoError(t, err) + proj := &Projection{ + Cols: []string{"col"}, + Exprs: []evalengine.Expr{bindExpr}, + Input: &SingleRow{}, + noTxNeeded: noTxNeeded{}, + } + qr, err := proj.TryExecute(context.Background(), &noopVCursor{}, nil, true) + require.NoError(t, err) + assert.Equal(t, testCase.typ, qr.Fields[0].Type) + assert.Equal(t, testCase.collation, collations.ID(qr.Fields[0].Charset)) + }) + } +} diff --git a/go/vt/vtgate/engine/rename_fields.go b/go/vt/vtgate/engine/rename_fields.go index e1dc7cbbb43..3fdab364468 100644 --- a/go/vt/vtgate/engine/rename_fields.go +++ b/go/vt/vtgate/engine/rename_fields.go @@ -28,10 +28,11 @@ var _ Primitive = (*RenameFields)(nil) // RenameFields is a primitive that renames the fields type RenameFields struct { + noTxNeeded + Cols []string Indices []int Input Primitive - noTxNeeded } // NewRenameField creates a new rename field diff --git a/go/vt/vtgate/engine/replace_variables.go b/go/vt/vtgate/engine/replace_variables.go index 66375266427..d3184d8756b 100644 --- a/go/vt/vtgate/engine/replace_variables.go +++ b/go/vt/vtgate/engine/replace_variables.go @@ -27,8 +27,8 @@ var _ Primitive = (*ReplaceVariables)(nil) // ReplaceVariables is used in SHOW VARIABLES statements so that it replaces the values for vitess-aware variables type ReplaceVariables struct { - Input Primitive noTxNeeded + Input Primitive } // NewReplaceVariables is used to create a new ReplaceVariables primitive diff --git a/go/vt/vtgate/engine/revert_migration.go b/go/vt/vtgate/engine/revert_migration.go index e7237d01da4..a7690d07f42 100644 --- a/go/vt/vtgate/engine/revert_migration.go +++ b/go/vt/vtgate/engine/revert_migration.go @@ -34,14 +34,13 @@ var _ Primitive = (*RevertMigration)(nil) // RevertMigration represents the instructions to perform an online schema change via vtctld type RevertMigration struct { + noTxNeeded + noInputs + Keyspace *vindexes.Keyspace Stmt *sqlparser.RevertMigration Query string TargetDestination key.Destination - - noTxNeeded - - noInputs } func (v *RevertMigration) description() PrimitiveDescription { @@ -88,7 +87,7 @@ func (v *RevertMigration) TryExecute(ctx context.Context, vcursor VCursor, bindV return nil, err } ddlStrategySetting.Strategy = schema.DDLStrategyOnline // and we keep the options as they were - onlineDDL, err := schema.NewOnlineDDL(v.GetKeyspaceName(), "", sql, ddlStrategySetting, fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()), "") + onlineDDL, err := schema.NewOnlineDDL(v.GetKeyspaceName(), "", sql, ddlStrategySetting, fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()), "", vcursor.Environment().Parser()) if err != nil { return result, err } diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index 30713f45f91..f28dda01a52 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -19,7 +19,7 @@ package engine import ( "context" "fmt" - "math/rand" + "math/rand/v2" "sort" "strings" "time" @@ -52,6 +52,12 @@ var ( // Route represents the instructions to route a read query to // one or many vttablets. type Route struct { + // Route does not take inputs + noInputs + + // Route does not need transaction handling + noTxNeeded + // TargetTabletType specifies an explicit target destination tablet type // this is only used in conjunction with TargetDestination TargetTabletType topodatapb.TabletType @@ -89,12 +95,6 @@ type Route struct { // select count(*) from tbl where lookupColumn = 'not there' // select exists() NoRoutesSpecialHandling bool - - // Route does not take inputs - noInputs - - // Route does not need transaction handling - noTxNeeded } // NewRoute creates a Route. @@ -128,11 +128,6 @@ func (route *Route) GetTableName() string { return route.TableName } -// SetTruncateColumnCount sets the truncate column count. -func (route *Route) SetTruncateColumnCount(count int) { - route.TruncateColumnCount = count -} - // TryExecute performs a non-streaming exec. func (route *Route) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { ctx, cancelFunc := addQueryTimeout(ctx, vcursor, route.QueryTimeout) @@ -542,7 +537,7 @@ func (route *Route) executeWarmingReplicaRead(ctx context.Context, vcursor VCurs return } - if vcursor.GetWarmingReadsPercent() == 0 || rand.Intn(100) > vcursor.GetWarmingReadsPercent() { + if vcursor.GetWarmingReadsPercent() == 0 || rand.IntN(100) > vcursor.GetWarmingReadsPercent() { return } diff --git a/go/vt/vtgate/engine/route_test.go b/go/vt/vtgate/engine/route_test.go index 274ac58c7d4..b2f4020cb59 100644 --- a/go/vt/vtgate/engine/route_test.go +++ b/go/vt/vtgate/engine/route_test.go @@ -31,7 +31,6 @@ import ( "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -46,13 +45,6 @@ var defaultSelectResult = sqltypes.MakeTestResult( "1", ) -func init() { - // We require MySQL 8.0 collations for the comparisons in the tests - mySQLVersion := "8.0.0" - servenv.SetMySQLServerVersionForTest(mySQLVersion) - collationEnv = collations.NewEnvironment(mySQLVersion) -} - func TestSelectUnsharded(t *testing.T) { sel := NewRoute( Unsharded, @@ -74,7 +66,7 @@ func TestSelectUnsharded(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.0: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -83,7 +75,7 @@ func TestSelectUnsharded(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `StreamExecuteMulti dummy_select ks.0: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { @@ -138,7 +130,7 @@ func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { expectedLog: []string{ "FindTable(tableName)", "ResolveDestinations routedKeyspace [] Destinations:DestinationAnyShard()", - "ExecuteMultiShard routedKeyspace.1: dummy_select {table_name: type:VARCHAR value:\"routedTable\"} false false"}, + "ExecuteMultiShard routedKeyspace.1: dummy_select {__vtschemaname: type:VARCHAR table_name: type:VARCHAR value:\"routedTable\"} false false"}, }, { testName: "table name predicate - not routed", tableName: map[string]evalengine.Expr{"table_name": evalengine.NewLiteralString([]byte("tableName"), collations.SystemCollation)}, @@ -146,7 +138,7 @@ func TestInformationSchemaWithTableAndSchemaWithRoutedTables(t *testing.T) { expectedLog: []string{ "FindTable(tableName)", "ResolveDestinations ks [] Destinations:DestinationAnyShard()", - "ExecuteMultiShard ks.1: dummy_select {table_name: type:VARCHAR value:\"tableName\"} false false"}, + "ExecuteMultiShard ks.1: dummy_select {__vtschemaname: type:VARCHAR table_name: type:VARCHAR value:\"tableName\"} false false"}, }, { testName: "schema predicate", tableSchema: []string{"myKeyspace"}, @@ -219,7 +211,7 @@ func TestSelectScatter(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -228,7 +220,7 @@ func TestSelectScatter(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `StreamExecuteMulti dummy_select ks.-20: {} ks.20-: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestSelectEqualUnique(t *testing.T) { @@ -257,7 +249,7 @@ func TestSelectEqualUnique(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -266,7 +258,7 @@ func TestSelectEqualUnique(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6)`, `StreamExecuteMulti dummy_select ks.-20: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestSelectNone(t *testing.T) { @@ -290,7 +282,7 @@ func TestSelectNone(t *testing.T) { result, err := sel.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) require.NoError(t, err) require.Empty(t, vc.log) - expectResult(t, "sel.Execute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -308,7 +300,7 @@ func TestSelectNone(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -317,7 +309,7 @@ func TestSelectNone(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `StreamExecuteMulti dummy_select ks.-20: {} `, }) - expectResult(t, "sel.StreamExecute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) } func TestSelectEqualUniqueScatter(t *testing.T) { @@ -351,7 +343,7 @@ func TestSelectEqualUniqueScatter(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyRange(-)`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -360,7 +352,7 @@ func TestSelectEqualUniqueScatter(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyRange(-)`, `StreamExecuteMulti dummy_select ks.-20: {} ks.20-: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestSelectEqual(t *testing.T) { @@ -403,7 +395,7 @@ func TestSelectEqual(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyspaceIDs(00,80)`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -413,7 +405,7 @@ func TestSelectEqual(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationKeyspaceIDs(00,80)`, `StreamExecuteMulti dummy_select ks.-20: {} ks.20-: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestSelectEqualNoRoute(t *testing.T) { @@ -443,7 +435,7 @@ func TestSelectEqualNoRoute(t *testing.T) { `Execute select from, toc from lkp where from in ::from from: type:TUPLE values:{type:INT64 value:"1"} false`, `ResolveDestinations ks [type:INT64 value:"1"] Destinations:DestinationNone()`, }) - expectResult(t, "sel.Execute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -466,7 +458,7 @@ func TestSelectEqualNoRoute(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -477,7 +469,7 @@ func TestSelectEqualNoRoute(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `StreamExecuteMulti dummy_select ks.-20: {} `, }) - expectResult(t, "sel.StreamExecute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) } func TestINUnique(t *testing.T) { @@ -513,7 +505,7 @@ func TestINUnique(t *testing.T) { `ks.20-: dummy_select {__vals: type:TUPLE values:{type:INT64 value:"4"}} ` + `false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -522,7 +514,7 @@ func TestINUnique(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, `StreamExecuteMulti dummy_select ks.-20: {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} ks.20-: {__vals: type:TUPLE values:{type:INT64 value:"4"}} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestINNonUnique(t *testing.T) { @@ -579,7 +571,7 @@ func TestINNonUnique(t *testing.T) { `ks.20-: dummy_select {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"4"}} ` + `false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -589,7 +581,7 @@ func TestINNonUnique(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4"] Destinations:DestinationKeyspaceIDs(00,80),DestinationKeyspaceIDs(00),DestinationKeyspaceIDs(80)`, `StreamExecuteMulti dummy_select ks.-20: {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} ks.20-: {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"4"}} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestMultiEqual(t *testing.T) { @@ -623,7 +615,7 @@ func TestMultiEqual(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -632,7 +624,7 @@ func TestMultiEqual(t *testing.T) { `ResolveDestinations ks [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, `StreamExecuteMulti dummy_select ks.-20: {} ks.20-: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestSelectLike(t *testing.T) { @@ -670,7 +662,7 @@ func TestSelectLike(t *testing.T) { `ResolveDestinations ks [type:VARCHAR value:"a%"] Destinations:DestinationKeyRange(0c-0d)`, `ExecuteMultiShard ks.-0c80: dummy_select {} ks.0c80-0d: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() @@ -681,7 +673,7 @@ func TestSelectLike(t *testing.T) { `ResolveDestinations ks [type:VARCHAR value:"a%"] Destinations:DestinationKeyRange(0c-0d)`, `StreamExecuteMulti dummy_select ks.-0c80: {} ks.0c80-0d: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() @@ -700,7 +692,7 @@ func TestSelectLike(t *testing.T) { `ResolveDestinations ks [type:VARCHAR value:"ab%"] Destinations:DestinationKeyRange(0c92-0c93)`, `ExecuteMultiShard ks.0c80-0d: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() @@ -711,7 +703,7 @@ func TestSelectLike(t *testing.T) { `ResolveDestinations ks [type:VARCHAR value:"ab%"] Destinations:DestinationKeyRange(0c92-0c93)`, `StreamExecuteMulti dummy_select ks.0c80-0d: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } @@ -736,7 +728,7 @@ func TestSelectNext(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.-: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, _ = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -744,7 +736,7 @@ func TestSelectNext(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `StreamExecuteMulti dummy_select ks.-: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestSelectDBA(t *testing.T) { @@ -768,7 +760,7 @@ func TestSelectDBA(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, _ = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -776,7 +768,7 @@ func TestSelectDBA(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `StreamExecuteMulti dummy_select ks.-20: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestSelectReference(t *testing.T) { @@ -800,7 +792,7 @@ func TestSelectReference(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, _ = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -808,7 +800,7 @@ func TestSelectReference(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `StreamExecuteMulti dummy_select ks.-20: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestRouteGetFields(t *testing.T) { @@ -840,7 +832,7 @@ func TestRouteGetFields(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select_field {} false false`, }) - expectResult(t, "sel.Execute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, true) @@ -851,7 +843,7 @@ func TestRouteGetFields(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select_field {} false false`, }) - expectResult(t, "sel.StreamExecute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) vc.Rewind() // test with special no-routes handling @@ -864,7 +856,7 @@ func TestRouteGetFields(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, true) @@ -875,7 +867,7 @@ func TestRouteGetFields(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAnyShard()`, `StreamExecuteMulti dummy_select ks.-20: {} `, }) - expectResult(t, "sel.StreamExecute", result, &sqltypes.Result{}) + expectResult(t, result, &sqltypes.Result{}) } func TestRouteSort(t *testing.T) { @@ -924,7 +916,7 @@ func TestRouteSort(t *testing.T) { "2", "3", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) sel.OrderBy[0].Desc = true vc.Rewind() @@ -940,7 +932,7 @@ func TestRouteSort(t *testing.T) { "1", "1", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) vc = &loggingVCursor{ shards: []string{"0"}, @@ -1013,7 +1005,7 @@ func TestRouteSortWeightStrings(t *testing.T) { "g|d", "v|x", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) }) t.Run("Descending ordering using weighted strings", func(t *testing.T) { @@ -1032,7 +1024,7 @@ func TestRouteSortWeightStrings(t *testing.T) { "c|t", "a|a", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) }) t.Run("Error when no weight string set", func(t *testing.T) { @@ -1073,11 +1065,11 @@ func TestRouteSortCollation(t *testing.T) { "dummy_select_field", ) - collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") + collationID, _ := collations.MySQL8().LookupID("utf8mb4_hu_0900_ai_ci") sel.OrderBy = []evalengine.OrderByParams{{ Col: 0, - Type: evalengine.Type{Type: sqltypes.VarChar, Coll: collationID}, + Type: evalengine.NewType(sqltypes.VarChar, collationID), }} vc := &loggingVCursor{ @@ -1118,7 +1110,7 @@ func TestRouteSortCollation(t *testing.T) { "cs", "d", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) }) t.Run("Descending ordering using Collation", func(t *testing.T) { @@ -1137,13 +1129,12 @@ func TestRouteSortCollation(t *testing.T) { "c", "c", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) }) t.Run("Error when Unknown Collation", func(t *testing.T) { sel.OrderBy = []evalengine.OrderByParams{{ - Col: 0, - Type: evalengine.UnknownType(), + Col: 0, }} vc := &loggingVCursor{ @@ -1169,7 +1160,7 @@ func TestRouteSortCollation(t *testing.T) { t.Run("Error when Unsupported Collation", func(t *testing.T) { sel.OrderBy = []evalengine.OrderByParams{{ Col: 0, - Type: evalengine.Type{Coll: 1111}, + Type: evalengine.NewType(sqltypes.Unknown, 1111), }} vc := &loggingVCursor{ @@ -1239,7 +1230,7 @@ func TestRouteSortTruncate(t *testing.T) { "2", "3", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) } func TestRouteStreamTruncate(t *testing.T) { @@ -1281,7 +1272,7 @@ func TestRouteStreamTruncate(t *testing.T) { "1", "2", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) } func TestRouteStreamSortTruncate(t *testing.T) { @@ -1330,7 +1321,7 @@ func TestRouteStreamSortTruncate(t *testing.T) { "1", "2", ) - expectResult(t, "sel.Execute", result, wantResult) + expectResult(t, result, wantResult) } func TestParamsFail(t *testing.T) { @@ -1432,7 +1423,7 @@ func TestExecFail(t *testing.T) { `ResolveDestinations ks [] Destinations:DestinationAllShards()`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() vc.resultErr = sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", "query timeout -20") @@ -1474,7 +1465,7 @@ func TestSelectEqualUniqueMultiColumnVindex(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(1) INT64(2)]] Destinations:DestinationKeyspaceID(0106e7ea22ce92708f)`, `ExecuteMultiShard ks.-20: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -1483,7 +1474,7 @@ func TestSelectEqualUniqueMultiColumnVindex(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(1) INT64(2)]] Destinations:DestinationKeyspaceID(0106e7ea22ce92708f)`, `StreamExecuteMulti dummy_select ks.-20: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestSelectEqualMultiColumnVindex(t *testing.T) { @@ -1511,7 +1502,7 @@ func TestSelectEqualMultiColumnVindex(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(32)]] Destinations:DestinationKeyRange(20-21)`, `ExecuteMultiShard ks.-20: dummy_select {} ks.20-: dummy_select {} false false`, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -1520,7 +1511,7 @@ func TestSelectEqualMultiColumnVindex(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(32)]] Destinations:DestinationKeyRange(20-21)`, `StreamExecuteMulti dummy_select ks.-20: {} ks.20-: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestINMultiColumnVindex(t *testing.T) { @@ -1557,7 +1548,7 @@ func TestINMultiColumnVindex(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(1) INT64(3)] [INT64(1) INT64(4)] [INT64(2) INT64(3)] [INT64(2) INT64(4)]] Destinations:DestinationKeyspaceID(014eb190c9a2fa169c),DestinationKeyspaceID(01d2fd8867d50d2dfe),DestinationKeyspaceID(024eb190c9a2fa169c),DestinationKeyspaceID(02d2fd8867d50d2dfe)`, `ExecuteMultiShard ks.-20: dummy_select {__vals0: type:TUPLE values:{type:INT64 value:"1"} __vals1: type:TUPLE values:{type:INT64 value:"3"}} ks.20-: dummy_select {__vals0: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} __vals1: type:TUPLE values:{type:INT64 value:"4"} values:{type:INT64 value:"3"}} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -1566,7 +1557,7 @@ func TestINMultiColumnVindex(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(1) INT64(3)] [INT64(1) INT64(4)] [INT64(2) INT64(3)] [INT64(2) INT64(4)]] Destinations:DestinationKeyspaceID(014eb190c9a2fa169c),DestinationKeyspaceID(01d2fd8867d50d2dfe),DestinationKeyspaceID(024eb190c9a2fa169c),DestinationKeyspaceID(02d2fd8867d50d2dfe)`, `StreamExecuteMulti dummy_select ks.-20: {__vals0: type:TUPLE values:{type:INT64 value:"1"} __vals1: type:TUPLE values:{type:INT64 value:"3"}} ks.20-: {__vals0: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} __vals1: type:TUPLE values:{type:INT64 value:"4"} values:{type:INT64 value:"3"}} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestINMixedMultiColumnComparision(t *testing.T) { @@ -1600,7 +1591,7 @@ func TestINMixedMultiColumnComparision(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(1) INT64(3)] [INT64(1) INT64(4)]] Destinations:DestinationKeyspaceID(014eb190c9a2fa169c),DestinationKeyspaceID(01d2fd8867d50d2dfe)`, `ExecuteMultiShard ks.-20: dummy_select {__vals1: type:TUPLE values:{type:INT64 value:"3"}} ks.20-: dummy_select {__vals1: type:TUPLE values:{type:INT64 value:"4"}} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -1609,7 +1600,7 @@ func TestINMixedMultiColumnComparision(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(1) INT64(3)] [INT64(1) INT64(4)]] Destinations:DestinationKeyspaceID(014eb190c9a2fa169c),DestinationKeyspaceID(01d2fd8867d50d2dfe)`, `StreamExecuteMulti dummy_select ks.-20: {__vals1: type:TUPLE values:{type:INT64 value:"3"}} ks.20-: {__vals1: type:TUPLE values:{type:INT64 value:"4"}} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestMultiEqualMultiCol(t *testing.T) { @@ -1643,7 +1634,7 @@ func TestMultiEqualMultiCol(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(1) INT64(2)] [INT64(3) INT64(4)]] Destinations:DestinationKeyspaceID(0106e7ea22ce92708f),DestinationKeyspaceID(03d2fd8867d50d2dfe)`, `ExecuteMultiShard ks.-20: dummy_select {} ks.40-: dummy_select {} false false`, }) - expectResult(t, "sel.Execute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) vc.Rewind() result, err = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{}, false) @@ -1652,7 +1643,7 @@ func TestMultiEqualMultiCol(t *testing.T) { `ResolveDestinationsMultiCol ks [[INT64(1) INT64(2)] [INT64(3) INT64(4)]] Destinations:DestinationKeyspaceID(0106e7ea22ce92708f),DestinationKeyspaceID(03d2fd8867d50d2dfe)`, `StreamExecuteMulti dummy_select ks.-20: {} ks.40-: {} `, }) - expectResult(t, "sel.StreamExecute", result, defaultSelectResult) + expectResult(t, result, defaultSelectResult) } func TestBuildRowColValues(t *testing.T) { @@ -1717,3 +1708,46 @@ func TestBuildMultiColumnVindexValues(t *testing.T) { }) } } + +// TestSelectTupleMultiCol tests route execution having bind variable with multi column tuple. +func TestSelectTupleMultiCol(t *testing.T) { + vindex, _ := vindexes.CreateVindex("multicol", "", map[string]string{ + "column_count": "2", + "column_vindex": "hash,binary", + }) + + sel := NewRoute( + MultiEqual, + &vindexes.Keyspace{Name: "user", Sharded: true}, + "select 1 from multicol_tbl where (colb, colx, cola) in ::vals", + "select 1 from multicol_tbl where 1 != 1", + ) + sel.Vindex = vindex + sel.Values = []evalengine.Expr{ + &evalengine.TupleBindVariable{Key: "vals", Index: 0}, + &evalengine.TupleBindVariable{Key: "vals", Index: 1}, + } + + v1 := sqltypes.TestTuple(sqltypes.NewInt64(1), sqltypes.NewVarChar("a")) + v2 := sqltypes.TestTuple(sqltypes.NewInt64(4), sqltypes.NewVarChar("b")) + tupleBV := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: append([]*querypb.Value{sqltypes.ValueToProto(v1)}, sqltypes.ValueToProto(v2)), + } + vc := &loggingVCursor{ + shards: []string{"-20", "20-"}, + } + _, err := sel.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{"vals": tupleBV}, false) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinationsMultiCol user [[INT64(1) VARCHAR("a")] [INT64(4) VARCHAR("b")]] Destinations:DestinationKeyspaceID(166b40b461),DestinationKeyspaceID(d2fd886762)`, + `ExecuteMultiShard user.-20: select 1 from multicol_tbl where (colb, colx, cola) in ::vals {vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x011\x950\x01a"} values:{type:TUPLE value:"\x89\x02\x014\x950\x01b"}} false false`, + }) + + vc.Rewind() + _, _ = wrapStreamExecute(sel, vc, map[string]*querypb.BindVariable{"vals": tupleBV}, false) + vc.ExpectLog(t, []string{ + `ResolveDestinationsMultiCol user [[INT64(1) VARCHAR("a")] [INT64(4) VARCHAR("b")]] Destinations:DestinationKeyspaceID(166b40b461),DestinationKeyspaceID(d2fd886762)`, + `StreamExecuteMulti select 1 from multicol_tbl where (colb, colx, cola) in ::vals user.-20: {vals: type:TUPLE values:{type:TUPLE value:"\x89\x02\x011\x950\x01a"} values:{type:TUPLE value:"\x89\x02\x014\x950\x01b"}} `, + }) +} diff --git a/go/vt/vtgate/engine/routing.go b/go/vt/vtgate/engine/routing.go index a4f6dabde20..e05366c4aeb 100644 --- a/go/vt/vtgate/engine/routing.go +++ b/go/vt/vtgate/engine/routing.go @@ -192,22 +192,21 @@ func (rp *RoutingParameters) routeInfoSchemaQuery(ctx context.Context, vcursor V env := evalengine.NewExpressionEnv(ctx, bindVars, vcursor) var specifiedKS string - for _, tableSchema := range rp.SysTableTableSchema { + for idx, tableSchema := range rp.SysTableTableSchema { result, err := env.Evaluate(tableSchema) if err != nil { return nil, err } ks := result.Value(vcursor.ConnCollation()).ToString() - if specifiedKS == "" { + switch { + case idx == 0: specifiedKS = ks - } - if specifiedKS != ks { + case specifiedKS != ks: return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "specifying two different database in the query is not supported") } } - if specifiedKS != "" { - bindVars[sqltypes.BvSchemaName] = sqltypes.StringBindVariable(specifiedKS) - } + + bindVars[sqltypes.BvSchemaName] = sqltypes.StringBindVariable(specifiedKS) tableNames := map[string]string{} for tblBvName, sysTableName := range rp.SysTableTableName { diff --git a/go/vt/vtgate/engine/rows.go b/go/vt/vtgate/engine/rows.go index 2b81c85145f..424d5585a36 100644 --- a/go/vt/vtgate/engine/rows.go +++ b/go/vt/vtgate/engine/rows.go @@ -27,14 +27,14 @@ var _ Primitive = (*Rows)(nil) // Rows simply returns a number or rows type Rows struct { - rows [][]sqltypes.Value - fields []*querypb.Field - noInputs noTxNeeded + + rows [][]sqltypes.Value + fields []*querypb.Field } -// NewRowsPrimitive returns a new Rows primitie +// NewRowsPrimitive returns a new Rows primitive func NewRowsPrimitive(rows [][]sqltypes.Value, fields []*querypb.Field) Primitive { return &Rows{rows: rows, fields: fields} } diff --git a/go/vt/vtgate/engine/scalar_aggregation.go b/go/vt/vtgate/engine/scalar_aggregation.go index 85e90420ff9..929536b9cdf 100644 --- a/go/vt/vtgate/engine/scalar_aggregation.go +++ b/go/vt/vtgate/engine/scalar_aggregation.go @@ -80,7 +80,7 @@ func (sa *ScalarAggregate) NeedsTransaction() bool { // TryExecute implements the Primitive interface func (sa *ScalarAggregate) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - result, err := vcursor.ExecutePrimitive(ctx, sa.Input, bindVars, wantfields) + result, err := vcursor.ExecutePrimitive(ctx, sa.Input, bindVars, true) if err != nil { return nil, err } @@ -114,7 +114,7 @@ func (sa *ScalarAggregate) TryStreamExecute(ctx context.Context, vcursor VCursor var fields []*querypb.Field fieldsSent := !wantfields - err := vcursor.StreamExecutePrimitive(ctx, sa.Input, bindVars, wantfields, func(result *sqltypes.Result) error { + err := vcursor.StreamExecutePrimitive(ctx, sa.Input, bindVars, true, func(result *sqltypes.Result) error { // as the underlying primitive call is not sync // and here scalar aggregate is using shared variables we have to sync the callback // for correct aggregation. diff --git a/go/vt/vtgate/engine/scalar_aggregation_test.go b/go/vt/vtgate/engine/scalar_aggregation_test.go index 3329fc72d39..99031c95f34 100644 --- a/go/vt/vtgate/engine/scalar_aggregation_test.go +++ b/go/vt/vtgate/engine/scalar_aggregation_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" . "vitess.io/vitess/go/vt/vtgate/engine/opcode" @@ -273,8 +274,8 @@ func TestScalarDistinctAggrOnEngine(t *testing.T) { oa := &ScalarAggregate{ Aggregates: []*AggregateParams{ - NewAggregateParam(AggregateCountDistinct, 0, "count(distinct value)"), - NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct value)"), + NewAggregateParam(AggregateCountDistinct, 0, "count(distinct value)", collations.MySQL8()), + NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct value)", collations.MySQL8()), }, Input: fp, } @@ -311,9 +312,9 @@ func TestScalarDistinctPushedDown(t *testing.T) { "8|90", )}} - countAggr := NewAggregateParam(AggregateSum, 0, "count(distinct value)") + countAggr := NewAggregateParam(AggregateSum, 0, "count(distinct value)", collations.MySQL8()) countAggr.OrigOpcode = AggregateCountDistinct - sumAggr := NewAggregateParam(AggregateSum, 1, "sum(distinct value)") + sumAggr := NewAggregateParam(AggregateSum, 1, "sum(distinct value)", collations.MySQL8()) sumAggr.OrigOpcode = AggregateSumDistinct oa := &ScalarAggregate{ Aggregates: []*AggregateParams{ diff --git a/go/vt/vtgate/engine/semi_join.go b/go/vt/vtgate/engine/semi_join.go index d291b348da9..de8eeef5a32 100644 --- a/go/vt/vtgate/engine/semi_join.go +++ b/go/vt/vtgate/engine/semi_join.go @@ -18,8 +18,6 @@ package engine import ( "context" - "fmt" - "strings" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -33,17 +31,9 @@ type SemiJoin struct { // of the SemiJoin. They can be any primitive. Left, Right Primitive `json:",omitempty"` - // Cols defines which columns from the left - // results should be used to build the - // return result. For results coming from the - // left query, the index values go as -1, -2, etc. - // If Cols is {-1, -2}, it means that - // the returned result will be {Left0, Left1}. - Cols []int `json:",omitempty"` - // Vars defines the list of SemiJoinVars that need to // be built from the LHS result before invoking - // the RHS subqquery. + // the RHS subquery. Vars map[string]int `json:",omitempty"` } @@ -54,7 +44,7 @@ func (jn *SemiJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma if err != nil { return nil, err } - result := &sqltypes.Result{Fields: projectFields(lresult.Fields, jn.Cols)} + result := &sqltypes.Result{Fields: lresult.Fields} for _, lrow := range lresult.Rows { for k, col := range jn.Vars { joinVars[k] = sqltypes.ValueBindVariable(lrow[col]) @@ -64,7 +54,7 @@ func (jn *SemiJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma return nil, err } if len(rresult.Rows) > 0 { - result.Rows = append(result.Rows, projectRows(lrow, jn.Cols)) + result.Rows = append(result.Rows, lrow) } } return result, nil @@ -74,7 +64,7 @@ func (jn *SemiJoin) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma func (jn *SemiJoin) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { joinVars := make(map[string]*querypb.BindVariable) err := vcursor.StreamExecutePrimitive(ctx, jn.Left, bindVars, wantfields, func(lresult *sqltypes.Result) error { - result := &sqltypes.Result{Fields: projectFields(lresult.Fields, jn.Cols)} + result := &sqltypes.Result{Fields: lresult.Fields} for _, lrow := range lresult.Rows { for k, col := range jn.Vars { joinVars[k] = sqltypes.ValueBindVariable(lrow[col]) @@ -82,7 +72,7 @@ func (jn *SemiJoin) TryStreamExecute(ctx context.Context, vcursor VCursor, bindV rowAdded := false err := vcursor.StreamExecutePrimitive(ctx, jn.Right, combineVars(bindVars, joinVars), false, func(rresult *sqltypes.Result) error { if len(rresult.Rows) > 0 && !rowAdded { - result.Rows = append(result.Rows, projectRows(lrow, jn.Cols)) + result.Rows = append(result.Rows, lrow) rowAdded = true } return nil @@ -135,8 +125,7 @@ func (jn *SemiJoin) NeedsTransaction() bool { func (jn *SemiJoin) description() PrimitiveDescription { other := map[string]any{ - "TableName": jn.GetTableName(), - "ProjectedIndexes": strings.Trim(strings.Join(strings.Fields(fmt.Sprint(jn.Cols)), ","), "[]"), + "TableName": jn.GetTableName(), } if len(jn.Vars) > 0 { other["JoinVars"] = orderedStringIntMap(jn.Vars) @@ -146,30 +135,3 @@ func (jn *SemiJoin) description() PrimitiveDescription { Other: other, } } - -func projectFields(lfields []*querypb.Field, cols []int) []*querypb.Field { - if lfields == nil { - return nil - } - if len(cols) == 0 { - return lfields - } - fields := make([]*querypb.Field, len(cols)) - for i, index := range cols { - fields[i] = lfields[-index-1] - } - return fields -} - -func projectRows(lrow []sqltypes.Value, cols []int) []sqltypes.Value { - if len(cols) == 0 { - return lrow - } - row := make([]sqltypes.Value, len(cols)) - for i, index := range cols { - if index < 0 { - row[i] = lrow[-index-1] - } - } - return row -} diff --git a/go/vt/vtgate/engine/semi_join_test.go b/go/vt/vtgate/engine/semi_join_test.go index ca89882ab8a..8fee0490415 100644 --- a/go/vt/vtgate/engine/semi_join_test.go +++ b/go/vt/vtgate/engine/semi_join_test.go @@ -74,7 +74,6 @@ func TestSemiJoinExecute(t *testing.T) { Vars: map[string]int{ "bv": 1, }, - Cols: []int{-1, -2, -3}, } r, err := jn.TryExecute(context.Background(), &noopVCursor{}, bv, true) require.NoError(t, err) @@ -139,7 +138,6 @@ func TestSemiJoinStreamExecute(t *testing.T) { Vars: map[string]int{ "bv": 1, }, - Cols: []int{-1, -2, -3}, } r, err := wrapStreamExecute(jn, &noopVCursor{}, map[string]*querypb.BindVariable{}, true) require.NoError(t, err) @@ -152,7 +150,7 @@ func TestSemiJoinStreamExecute(t *testing.T) { `StreamExecute bv: type:VARCHAR value:"c" false`, `StreamExecute bv: type:VARCHAR value:"d" false`, }) - expectResult(t, "jn.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col2|col3", "int64|varchar|varchar", diff --git a/go/vt/vtgate/engine/send.go b/go/vt/vtgate/engine/send.go index 1a95d8f93fa..31c9e9e0eb0 100644 --- a/go/vt/vtgate/engine/send.go +++ b/go/vt/vtgate/engine/send.go @@ -33,6 +33,8 @@ var _ Primitive = (*Send)(nil) // Send is an operator to send query to the specific keyspace, tabletType and destination type Send struct { + noInputs + // Keyspace specifies the keyspace to send the query to. Keyspace *vindexes.Keyspace @@ -54,7 +56,10 @@ type Send struct { // MultishardAutocommit specifies that a multishard transaction query can autocommit MultishardAutocommit bool - noInputs + ReservedConnectionNeeded bool + + // QueryTimeout contains the optional timeout (in milliseconds) to apply to this query + QueryTimeout int } // ShardName as key for setting shard name in bind variables map @@ -86,21 +91,14 @@ func (s *Send) GetTableName() string { // TryExecute implements Primitive interface func (s *Send) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { - ctx, cancelFunc := addQueryTimeout(ctx, vcursor, 0) + ctx, cancelFunc := addQueryTimeout(ctx, vcursor, s.QueryTimeout) defer cancelFunc() - rss, _, err := vcursor.ResolveDestinations(ctx, s.Keyspace.Name, nil, []key.Destination{s.TargetDestination}) + + rss, err := s.checkAndReturnShards(ctx, vcursor) if err != nil { return nil, err } - if !s.Keyspace.Sharded && len(rss) != 1 { - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) - } - - if s.SingleShardOnly && len(rss) != 1 { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unexpected error, DestinationKeyspaceID mapping to multiple shards: %s, got: %v", s.Query, s.TargetDestination) - } - queries := make([]*querypb.BoundQuery, len(rss)) for i, rs := range rss { bv := bindVars @@ -123,6 +121,26 @@ func (s *Send) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[str return result, nil } +func (s *Send) checkAndReturnShards(ctx context.Context, vcursor VCursor) ([]*srvtopo.ResolvedShard, error) { + rss, _, err := vcursor.ResolveDestinations(ctx, s.Keyspace.Name, nil, []key.Destination{s.TargetDestination}) + if err != nil { + return nil, err + } + + if !s.Keyspace.Sharded && len(rss) != 1 { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) + } + + if s.SingleShardOnly && len(rss) != 1 { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unexpected error, DestinationKeyspaceID mapping to multiple shards: %s, got: %v", s.Query, s.TargetDestination) + } + + if s.ReservedConnectionNeeded { + vcursor.Session().NeedsReservedConn() + } + return rss, nil +} + func (s *Send) canAutoCommit(vcursor VCursor, rss []*srvtopo.ResolvedShard) bool { if s.IsDML { return (len(rss) == 1 || s.MultishardAutocommit) && vcursor.AutocommitApproval() @@ -140,19 +158,11 @@ func copyBindVars(in map[string]*querypb.BindVariable) map[string]*querypb.BindV // TryStreamExecute implements Primitive interface func (s *Send) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { - rss, _, err := vcursor.ResolveDestinations(ctx, s.Keyspace.Name, nil, []key.Destination{s.TargetDestination}) + rss, err := s.checkAndReturnShards(ctx, vcursor) if err != nil { return err } - if !s.Keyspace.Sharded && len(rss) != 1 { - return vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "Keyspace does not have exactly one shard: %v", rss) - } - - if s.SingleShardOnly && len(rss) != 1 { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unexpected error, DestinationKeyspaceID mapping to multiple shards: %s, got: %v", s.Query, s.TargetDestination) - } - multiBindVars := make([]map[string]*querypb.BindVariable, len(rss)) for i, rs := range rss { bv := bindVars @@ -178,20 +188,14 @@ func (s *Send) GetFields(ctx context.Context, vcursor VCursor, bindVars map[stri func (s *Send) description() PrimitiveDescription { other := map[string]any{ - "Query": s.Query, - "Table": s.GetTableName(), - } - if s.IsDML { - other["IsDML"] = true - } - if s.SingleShardOnly { - other["SingleShardOnly"] = true - } - if s.ShardNameNeeded { - other["ShardNameNeeded"] = true - } - if s.MultishardAutocommit { - other["MultishardAutocommit"] = true + "Query": s.Query, + "Table": s.GetTableName(), + "IsDML": s.IsDML, + "SingleShardOnly": s.SingleShardOnly, + "ShardNameNeeded": s.ShardNameNeeded, + "MultishardAutocommit": s.MultishardAutocommit, + "ReservedConnectionNeeded": s.ReservedConnectionNeeded, + "QueryTimeout": s.QueryTimeout, } return PrimitiveDescription{ OperatorType: "Send", diff --git a/go/vt/vtgate/engine/sequential.go b/go/vt/vtgate/engine/sequential.go new file mode 100644 index 00000000000..ecf74d663a2 --- /dev/null +++ b/go/vt/vtgate/engine/sequential.go @@ -0,0 +1,107 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +// Sequential Primitive is used to execute DML statements in a fixed order. +// Any failure, stops the execution and returns. +type Sequential struct { + txNeeded + Sources []Primitive +} + +var _ Primitive = (*Sequential)(nil) + +// NewSequential creates a Sequential primitive. +func NewSequential(Sources []Primitive) *Sequential { + return &Sequential{ + Sources: Sources, + } +} + +// RouteType returns a description of the query routing type used by the primitive +func (s *Sequential) RouteType() string { + return "Sequential" +} + +// GetKeyspaceName specifies the Keyspace that this primitive routes to +func (s *Sequential) GetKeyspaceName() string { + res := s.Sources[0].GetKeyspaceName() + for i := 1; i < len(s.Sources); i++ { + res = formatTwoOptionsNicely(res, s.Sources[i].GetKeyspaceName()) + } + return res +} + +// GetTableName specifies the table that this primitive routes to. +func (s *Sequential) GetTableName() string { + res := s.Sources[0].GetTableName() + for i := 1; i < len(s.Sources); i++ { + res = formatTwoOptionsNicely(res, s.Sources[i].GetTableName()) + } + return res +} + +// TryExecute performs a non-streaming exec. +func (s *Sequential) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantFields bool) (*sqltypes.Result, error) { + finalRes := &sqltypes.Result{} + for _, source := range s.Sources { + res, err := vcursor.ExecutePrimitive(ctx, source, bindVars, wantFields) + if err != nil { + return nil, err + } + finalRes.RowsAffected += res.RowsAffected + if finalRes.InsertID == 0 { + finalRes.InsertID = res.InsertID + } + if res.Info != "" { + finalRes.Info = res.Info + } + } + return finalRes, nil +} + +// TryStreamExecute performs a streaming exec. +func (s *Sequential) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantFields bool, callback func(*sqltypes.Result) error) error { + qr, err := s.TryExecute(ctx, vcursor, bindVars, wantFields) + if err != nil { + return err + } + return callback(qr) +} + +// GetFields fetches the field info. +func (s *Sequential) GetFields(context.Context, VCursor, map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unreachable code for Sequential engine") +} + +// Inputs returns the input primitives for this +func (s *Sequential) Inputs() ([]Primitive, []map[string]any) { + return s.Sources, nil +} + +func (s *Sequential) description() PrimitiveDescription { + return PrimitiveDescription{OperatorType: s.RouteType()} +} diff --git a/go/vt/vtgate/engine/session_primitive.go b/go/vt/vtgate/engine/session_primitive.go index 834f335dd6f..c9c39c39e2b 100644 --- a/go/vt/vtgate/engine/session_primitive.go +++ b/go/vt/vtgate/engine/session_primitive.go @@ -27,11 +27,11 @@ import ( // SessionPrimitive the session primitive is a very small primitive used // when we have simple engine code that needs to interact with the Session type SessionPrimitive struct { - action func(sa SessionActions) (*sqltypes.Result, error) - name string - noInputs noTxNeeded + + action func(sa SessionActions) (*sqltypes.Result, error) + name string } var _ Primitive = (*SessionPrimitive)(nil) diff --git a/go/vt/vtgate/engine/set.go b/go/vt/vtgate/engine/set.go index df56fc04ed2..9d370b6ec36 100644 --- a/go/vt/vtgate/engine/set.go +++ b/go/vt/vtgate/engine/set.go @@ -17,7 +17,6 @@ limitations under the License. package engine import ( - "bytes" "context" "encoding/json" "fmt" @@ -46,10 +45,10 @@ import ( type ( // Set contains the instructions to perform set. Set struct { + noTxNeeded + Ops []SetOp Input Primitive - - noTxNeeded } // SetOp is an interface that different type of set operations implements. @@ -181,7 +180,6 @@ func (u *UserDefinedVariable) MarshalJSON() ([]byte, error) { Name: u.Name, Expr: sqlparser.String(u.Expr), }) - } // VariableName implements the SetOp interface method. @@ -209,7 +207,6 @@ func (svi *SysVarIgnore) MarshalJSON() ([]byte, error) { Type: "SysVarIgnore", SysVarIgnore: *svi, }) - } // VariableName implements the SetOp interface method. @@ -219,7 +216,6 @@ func (svi *SysVarIgnore) VariableName() string { // Execute implements the SetOp interface method. func (svi *SysVarIgnore) Execute(context.Context, VCursor, *evalengine.ExpressionEnv) error { - log.Infof("Ignored inapplicable SET %v = %v", svi.Name, svi.Expr) return nil } @@ -234,7 +230,6 @@ func (svci *SysVarCheckAndIgnore) MarshalJSON() ([]byte, error) { Type: "SysVarCheckAndIgnore", SysVarCheckAndIgnore: *svci, }) - } // VariableName implements the SetOp interface method @@ -253,7 +248,7 @@ func (svci *SysVarCheckAndIgnore) Execute(ctx context.Context, vcursor VCursor, return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unexpected error, DestinationKeyspaceID mapping to multiple shards: %v", svci.TargetDestination) } checkSysVarQuery := fmt.Sprintf("select 1 from dual where @@%s = %s", svci.Name, svci.Expr) - result, err := execShard(ctx, nil, vcursor, checkSysVarQuery, env.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */) + _, err = execShard(ctx, nil, vcursor, checkSysVarQuery, env.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */) if err != nil { // Rather than returning the error, we will just log the error // as the intention for executing the query it to validate the current setting and eventually ignore it anyways. @@ -261,9 +256,6 @@ func (svci *SysVarCheckAndIgnore) Execute(ctx context.Context, vcursor VCursor, log.Warningf("unable to validate the current settings for '%s': %s", svci.Name, err.Error()) return nil } - if len(result.Rows) == 0 { - log.Infof("Ignored inapplicable SET %v = %v", svci.Name, svci.Expr) - } return nil } @@ -278,7 +270,6 @@ func (svs *SysVarReservedConn) MarshalJSON() ([]byte, error) { Type: "SysVarSet", SysVarReservedConn: *svs, }) - } // VariableName implements the SetOp interface method @@ -363,8 +354,8 @@ func (svs *SysVarReservedConn) checkAndUpdateSysVar(ctx context.Context, vcursor } else { value = qr.Rows[0][0] } - buf := new(bytes.Buffer) - value.EncodeSQL(buf) + var buf strings.Builder + value.EncodeSQL(&buf) s := buf.String() vcursor.Session().SetSysVar(svs.Name, s) diff --git a/go/vt/vtgate/engine/set_test.go b/go/vt/vtgate/engine/set_test.go index 9245e9a618d..0677ee40bd8 100644 --- a/go/vt/vtgate/engine/set_test.go +++ b/go/vt/vtgate/engine/set_test.go @@ -107,7 +107,7 @@ func TestSetTable(t *testing.T) { setOps: []SetOp{ &UserDefinedVariable{ Name: "x", - Expr: evalengine.NewColumn(0, evalengine.UnknownType(), nil), + Expr: evalengine.NewColumn(0, evalengine.Type{}, nil), }, }, qr: []*sqltypes.Result{sqltypes.MakeTestResult( @@ -363,7 +363,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - changed additional - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -383,7 +383,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - changed less - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -420,7 +420,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -459,7 +459,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL80", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -479,7 +479,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change to empty - non empty orig - MySQL80 - should use reserved conn", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -499,7 +499,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL80 - SET_VAR disabled", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -520,7 +520,7 @@ func TestSetTable(t *testing.T) { disableSetVar: true, }, { testName: "sql_mode set an unsupported mode", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -540,7 +540,7 @@ func TestSetTable(t *testing.T) { disableSetVar: true, }, { testName: "default_week_format change - empty orig - MySQL80", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "default_week_format", @@ -565,23 +565,22 @@ func TestSetTable(t *testing.T) { tc.input = &SingleRow{} } - oldMySQLVersion := sqlparser.GetParserVersion() - defer func() { sqlparser.SetParserVersion(oldMySQLVersion) }() - if tc.mysqlVersion != "" { - sqlparser.SetParserVersion(tc.mysqlVersion) - } - set := &Set{ Ops: tc.setOps, Input: tc.input, } + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: tc.mysqlVersion, + }) + require.NoError(t, err) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, results: tc.qr, multiShardErrs: []error{tc.execErr}, disableSetVar: tc.disableSetVar, + parser: parser, } - _, err := set.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + _, err = set.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) if tc.expectedError == "" { require.NoError(t, err) } else { diff --git a/go/vt/vtgate/engine/show_exec.go b/go/vt/vtgate/engine/show_exec.go index f0c251aa376..d09733d0ec1 100644 --- a/go/vt/vtgate/engine/show_exec.go +++ b/go/vt/vtgate/engine/show_exec.go @@ -28,11 +28,11 @@ var _ Primitive = (*ShowExec)(nil) // ShowExec is a primitive to call into executor via vcursor. type ShowExec struct { - Command sqlparser.ShowCommandType - ShowFilter *sqlparser.ShowFilter - noInputs noTxNeeded + + Command sqlparser.ShowCommandType + ShowFilter *sqlparser.ShowFilter } func (s *ShowExec) RouteType() string { diff --git a/go/vt/vtgate/engine/simple_projection.go b/go/vt/vtgate/engine/simple_projection.go index 1a4f4ce92c4..6edc5883be1 100644 --- a/go/vt/vtgate/engine/simple_projection.go +++ b/go/vt/vtgate/engine/simple_projection.go @@ -18,7 +18,13 @@ package engine import ( "context" + "fmt" + "strconv" + "strings" + "google.golang.org/protobuf/proto" + + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -29,8 +35,10 @@ var _ Primitive = (*SimpleProjection)(nil) type SimpleProjection struct { // Cols defines the column numbers from the underlying primitive // to be returned. - Cols []int - Input Primitive + Cols []int + // ColNames are the column names to use for the columns. + ColNames []string + Input Primitive } // NeedsTransaction implements the Primitive interface @@ -86,6 +94,10 @@ func (sc *SimpleProjection) Inputs() ([]Primitive, []map[string]any) { // buildResult builds a new result by pulling the necessary columns from // the input in the requested order. func (sc *SimpleProjection) buildResult(inner *sqltypes.Result) *sqltypes.Result { + if sc.namesOnly() { + sc.renameFields(inner.Fields) + return inner + } result := &sqltypes.Result{Fields: sc.buildFields(inner)} result.Rows = make([][]sqltypes.Value, 0, len(inner.Rows)) for _, innerRow := range inner.Rows { @@ -99,21 +111,53 @@ func (sc *SimpleProjection) buildResult(inner *sqltypes.Result) *sqltypes.Result return result } +func (sc *SimpleProjection) namesOnly() bool { + return sc.Cols == nil +} + func (sc *SimpleProjection) buildFields(inner *sqltypes.Result) []*querypb.Field { if len(inner.Fields) == 0 { return nil } fields := make([]*querypb.Field, 0, len(sc.Cols)) - for _, col := range sc.Cols { - fields = append(fields, inner.Fields[col]) + for idx, col := range sc.Cols { + field := inner.Fields[col] + if sc.ColNames[idx] != "" { + field = proto.Clone(field).(*querypb.Field) + field.Name = sc.ColNames[idx] + } + fields = append(fields, field) } return fields } +func (sc *SimpleProjection) renameFields(fields []*querypb.Field) { + if len(fields) == 0 { + return + } + for idx, name := range sc.ColNames { + if sc.ColNames[idx] != "" { + fields[idx].Name = name + } + } +} + func (sc *SimpleProjection) description() PrimitiveDescription { - other := map[string]any{ - "Columns": sc.Cols, + other := map[string]any{} + if !sc.namesOnly() { + other["Columns"] = strings.Join(slice.Map(sc.Cols, strconv.Itoa), ",") } + + var colNames []string + for idx, cName := range sc.ColNames { + if cName != "" { + colNames = append(colNames, fmt.Sprintf("%d:%s", idx, cName)) + } + } + if colNames != nil { + other["ColumnNames"] = colNames + } + return PrimitiveDescription{ OperatorType: "SimpleProjection", Other: other, diff --git a/go/vt/vtgate/engine/simple_projection_test.go b/go/vt/vtgate/engine/simple_projection_test.go index 99d644c93af..37c5a4d1dc0 100644 --- a/go/vt/vtgate/engine/simple_projection_test.go +++ b/go/vt/vtgate/engine/simple_projection_test.go @@ -44,8 +44,9 @@ func TestSubqueryExecute(t *testing.T) { } sq := &SimpleProjection{ - Cols: []int{0, 2}, - Input: prim, + Cols: []int{0, 2}, + ColNames: []string{"", ""}, + Input: prim, } bv := map[string]*querypb.BindVariable{ @@ -59,7 +60,7 @@ func TestSubqueryExecute(t *testing.T) { prim.ExpectLog(t, []string{ `Execute a: type:INT64 value:"1" true`, }) - expectResult(t, "sq.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col3", "int64|varchar", @@ -93,8 +94,9 @@ func TestSubqueryStreamExecute(t *testing.T) { } sq := &SimpleProjection{ - Cols: []int{0, 2}, - Input: prim, + Cols: []int{0, 2}, + ColNames: []string{"", ""}, + Input: prim, } bv := map[string]*querypb.BindVariable{ @@ -108,7 +110,7 @@ func TestSubqueryStreamExecute(t *testing.T) { prim.ExpectLog(t, []string{ `StreamExecute a: type:INT64 value:"1" true`, }) - expectResult(t, "sq.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col3", "int64|varchar", @@ -142,8 +144,9 @@ func TestSubqueryGetFields(t *testing.T) { } sq := &SimpleProjection{ - Cols: []int{0, 2}, - Input: prim, + Cols: []int{0, 2}, + ColNames: []string{"", ""}, + Input: prim, } bv := map[string]*querypb.BindVariable{ @@ -158,7 +161,7 @@ func TestSubqueryGetFields(t *testing.T) { `GetFields a: type:INT64 value:"1"`, `Execute a: type:INT64 value:"1" true`, }) - expectResult(t, "sq.Execute", r, sqltypes.MakeTestResult( + expectResult(t, r, sqltypes.MakeTestResult( sqltypes.MakeTestFields( "col1|col3", "int64|varchar", diff --git a/go/vt/vtgate/engine/sql_calc_found_rows.go b/go/vt/vtgate/engine/sql_calc_found_rows.go index 2472bfd1d14..64ec80f99c7 100644 --- a/go/vt/vtgate/engine/sql_calc_found_rows.go +++ b/go/vt/vtgate/engine/sql_calc_found_rows.go @@ -34,22 +34,22 @@ type SQLCalcFoundRows struct { } // RouteType implements the Primitive interface -func (s SQLCalcFoundRows) RouteType() string { +func (s *SQLCalcFoundRows) RouteType() string { return "SQLCalcFoundRows" } // GetKeyspaceName implements the Primitive interface -func (s SQLCalcFoundRows) GetKeyspaceName() string { +func (s *SQLCalcFoundRows) GetKeyspaceName() string { return s.LimitPrimitive.GetKeyspaceName() } // GetTableName implements the Primitive interface -func (s SQLCalcFoundRows) GetTableName() string { +func (s *SQLCalcFoundRows) GetTableName() string { return s.LimitPrimitive.GetTableName() } // TryExecute implements the Primitive interface -func (s SQLCalcFoundRows) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { +func (s *SQLCalcFoundRows) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { limitQr, err := vcursor.ExecutePrimitive(ctx, s.LimitPrimitive, bindVars, wantfields) if err != nil { return nil, err @@ -70,7 +70,7 @@ func (s SQLCalcFoundRows) TryExecute(ctx context.Context, vcursor VCursor, bindV } // TryStreamExecute implements the Primitive interface -func (s SQLCalcFoundRows) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { +func (s *SQLCalcFoundRows) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { err := vcursor.StreamExecutePrimitive(ctx, s.LimitPrimitive, bindVars, wantfields, callback) if err != nil { return err @@ -104,21 +104,21 @@ func (s SQLCalcFoundRows) TryStreamExecute(ctx context.Context, vcursor VCursor, } // GetFields implements the Primitive interface -func (s SQLCalcFoundRows) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { +func (s *SQLCalcFoundRows) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { return s.LimitPrimitive.GetFields(ctx, vcursor, bindVars) } // NeedsTransaction implements the Primitive interface -func (s SQLCalcFoundRows) NeedsTransaction() bool { +func (s *SQLCalcFoundRows) NeedsTransaction() bool { return s.LimitPrimitive.NeedsTransaction() } // Inputs implements the Primitive interface -func (s SQLCalcFoundRows) Inputs() ([]Primitive, []map[string]any) { +func (s *SQLCalcFoundRows) Inputs() ([]Primitive, []map[string]any) { return []Primitive{s.LimitPrimitive, s.CountPrimitive}, nil } -func (s SQLCalcFoundRows) description() PrimitiveDescription { +func (s *SQLCalcFoundRows) description() PrimitiveDescription { return PrimitiveDescription{ OperatorType: "SQL_CALC_FOUND_ROWS", } diff --git a/go/vt/vtgate/engine/throttle_app.go b/go/vt/vtgate/engine/throttle_app.go index db485e6bec3..8b2370699cf 100644 --- a/go/vt/vtgate/engine/throttle_app.go +++ b/go/vt/vtgate/engine/throttle_app.go @@ -31,12 +31,11 @@ var _ Primitive = (*ThrottleApp)(nil) // ThrottleApp represents the instructions to perform an online schema change via vtctld type ThrottleApp struct { - Keyspace *vindexes.Keyspace - ThrottledAppRule *topodatapb.ThrottledAppRule - noTxNeeded - noInputs + + Keyspace *vindexes.Keyspace + ThrottledAppRule *topodatapb.ThrottledAppRule } func (v *ThrottleApp) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/uncorrelated_subquery_test.go b/go/vt/vtgate/engine/uncorrelated_subquery_test.go index 3e80c6369a7..085fe09238f 100644 --- a/go/vt/vtgate/engine/uncorrelated_subquery_test.go +++ b/go/vt/vtgate/engine/uncorrelated_subquery_test.go @@ -65,7 +65,7 @@ func TestPulloutSubqueryValueGood(t *testing.T) { require.NoError(t, err) sfp.ExpectLog(t, []string{`Execute aa: type:INT64 value:"1" false`}) ufp.ExpectLog(t, []string{`Execute aa: type:INT64 value:"1" sq: type:INT64 value:"1" false`}) - expectResult(t, "ps.Execute", result, underlyingResult) + expectResult(t, result, underlyingResult) } func TestPulloutSubqueryValueNone(t *testing.T) { @@ -279,7 +279,7 @@ func TestPulloutSubqueryStream(t *testing.T) { require.NoError(t, err) sfp.ExpectLog(t, []string{`Execute aa: type:INT64 value:"1" false`}) ufp.ExpectLog(t, []string{`StreamExecute aa: type:INT64 value:"1" sq: type:INT64 value:"1" true`}) - expectResult(t, "ps.StreamExecute", result, underlyingResult) + expectResult(t, result, underlyingResult) } func TestPulloutSubqueryGetFields(t *testing.T) { diff --git a/go/vt/vtgate/engine/unlock.go b/go/vt/vtgate/engine/unlock.go new file mode 100644 index 00000000000..5addbb957fa --- /dev/null +++ b/go/vt/vtgate/engine/unlock.go @@ -0,0 +1,79 @@ +/* +Copyright 2020 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vterrors" +) + +var _ Primitive = (*Unlock)(nil) + +// Unlock primitive will execute unlock tables to all connections in the session. +type Unlock struct { + noTxNeeded + noInputs +} + +const unlockTables = "unlock tables" + +func (u *Unlock) RouteType() string { + return "UNLOCK" +} + +func (u *Unlock) GetKeyspaceName() string { + return "" +} + +func (u *Unlock) GetTableName() string { + return "" +} + +func (u *Unlock) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.VT13001("GetFields should not be called for unlock tables") +} + +func (u *Unlock) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + rss := vcursor.Session().ShardSession() + + if len(rss) == 0 { + return &sqltypes.Result{}, nil + } + bqs := make([]*querypb.BoundQuery, len(rss)) + for i := 0; i < len(rss); i++ { + bqs[i] = &querypb.BoundQuery{Sql: unlockTables} + } + qr, errs := vcursor.ExecuteMultiShard(ctx, u, rss, bqs, true, false) + return qr, vterrors.Aggregate(errs) +} + +func (u *Unlock) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + qr, err := u.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(qr) +} + +func (u *Unlock) description() PrimitiveDescription { + return PrimitiveDescription{ + OperatorType: "UnlockTables", + } +} diff --git a/go/vt/vtgate/engine/update.go b/go/vt/vtgate/engine/update.go index 3db7972fba5..13c590bbb63 100644 --- a/go/vt/vtgate/engine/update.go +++ b/go/vt/vtgate/engine/update.go @@ -42,13 +42,13 @@ type VindexValues struct { // Update represents the instructions to perform an update. type Update struct { + // Update does not take inputs + noInputs + *DML // ChangedVindexValues contains values for updated Vindexes during an update statement. ChangedVindexValues map[string]*VindexValues - - // Update does not take inputs - noInputs } // TryExecute performs a non-streaming exec. @@ -56,7 +56,7 @@ func (upd *Update) TryExecute(ctx context.Context, vcursor VCursor, bindVars map ctx, cancelFunc := addQueryTimeout(ctx, vcursor, upd.QueryTimeout) defer cancelFunc() - rss, _, err := upd.findRoute(ctx, vcursor, bindVars) + rss, bvs, err := upd.findRoute(ctx, vcursor, bindVars) if err != nil { return nil, err } @@ -69,7 +69,7 @@ func (upd *Update) TryExecute(ctx context.Context, vcursor VCursor, bindVars map case Unsharded: return upd.execUnsharded(ctx, upd, vcursor, bindVars, rss) case Equal, EqualUnique, IN, Scatter, ByDestination, SubShard, MultiEqual: - return upd.execMultiDestination(ctx, upd, vcursor, bindVars, rss, upd.updateVindexEntries) + return upd.execMultiDestination(ctx, upd, vcursor, bindVars, rss, upd.updateVindexEntries, bvs) default: // Unreachable. return nil, fmt.Errorf("unsupported opcode: %v", upd.Opcode) @@ -204,6 +204,7 @@ func (upd *Update) description() PrimitiveDescription { "OwnedVindexQuery": upd.OwnedVindexQuery, "MultiShardAutocommit": upd.MultiShardAutocommit, "QueryTimeout": upd.QueryTimeout, + "NoAutoCommit": upd.PreventAutoCommit, } addFieldsIfNotEmpty(upd.DML, other) diff --git a/go/vt/vtgate/engine/update_target.go b/go/vt/vtgate/engine/update_target.go index a15d47b9915..9f47199079a 100644 --- a/go/vt/vtgate/engine/update_target.go +++ b/go/vt/vtgate/engine/update_target.go @@ -30,12 +30,11 @@ var _ Primitive = (*UpdateTarget)(nil) // UpdateTarget is an operator to update target string. type UpdateTarget struct { - // Target string to be updated - Target string - noInputs - noTxNeeded + + // Target string to be updated + Target string } func (updTarget *UpdateTarget) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/update_test.go b/go/vt/vtgate/engine/update_test.go index 22c2b90d60e..eb6af5a5299 100644 --- a/go/vt/vtgate/engine/update_test.go +++ b/go/vt/vtgate/engine/update_test.go @@ -21,6 +21,7 @@ import ( "errors" "testing" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -93,7 +94,7 @@ func TestUpdateEqual(t *testing.T) { }) // Failure case - upd.Values = []evalengine.Expr{evalengine.NewBindVar("aa", evalengine.UnknownType())} + upd.Values = []evalengine.Expr{evalengine.NewBindVar("aa", evalengine.Type{})} _, err = upd.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) require.EqualError(t, err, `query arguments missing for aa`) } @@ -208,34 +209,6 @@ func TestUpdateEqualNoRoute(t *testing.T) { }) } -func TestUpdateEqualNoScatter(t *testing.T) { - t.Skip("planner does not produces this plan anymore") - vindex, _ := vindexes.CreateVindex("lookup_unique", "", map[string]string{ - "table": "lkp", - "from": "from", - "to": "toc", - "write_only": "true", - }) - upd := &Update{ - DML: &DML{ - RoutingParameters: &RoutingParameters{ - Opcode: Equal, - Keyspace: &vindexes.Keyspace{ - Name: "ks", - Sharded: true, - }, - Vindex: vindex, - Values: []evalengine.Expr{evalengine.NewLiteralInt(1)}, - }, - Query: "dummy_update", - }, - } - - vc := newDMLTestVCursor("0") - _, err := upd.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) - require.EqualError(t, err, `cannot map vindex to unique keyspace id: DestinationKeyRange(-)`) -} - func TestUpdateEqualChangedVindex(t *testing.T) { ks := buildTestVSchema().Keyspaces["sharded"] upd := &Update{ @@ -637,7 +610,7 @@ func TestUpdateIn(t *testing.T) { vc.ExpectLog(t, []string{ `ResolveDestinations sharded [type:INT64 value:"1" type:INT64 value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f)`, // ResolveDestinations is hard-coded to return -20. - `ExecuteMultiShard sharded.-20: dummy_update {} true true`, + `ExecuteMultiShard sharded.-20: dummy_update {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} true true`, }) } @@ -663,7 +636,7 @@ func TestUpdateInStreamExecute(t *testing.T) { vc.ExpectLog(t, []string{ `ResolveDestinations sharded [type:INT64 value:"1" type:INT64 value:"2"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f)`, // ResolveDestinations is hard-coded to return -20. - `ExecuteMultiShard sharded.-20: dummy_update {} true true`, + `ExecuteMultiShard sharded.-20: dummy_update {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} true true`, }) } @@ -688,7 +661,7 @@ func TestUpdateInMultiCol(t *testing.T) { vc.ExpectLog(t, []string{ `ResolveDestinationsMultiCol sharded [[INT64(1) INT64(3)] [INT64(1) INT64(4)] [INT64(2) INT64(3)] [INT64(2) INT64(4)]] Destinations:DestinationKeyspaceID(014eb190c9a2fa169c),DestinationKeyspaceID(01d2fd8867d50d2dfe),DestinationKeyspaceID(024eb190c9a2fa169c),DestinationKeyspaceID(02d2fd8867d50d2dfe)`, // ResolveDestinations is hard-coded to return -20. - `ExecuteMultiShard sharded.-20: dummy_update {} true true`, + `ExecuteMultiShard sharded.-20: dummy_update {__vals0: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"} __vals1: type:TUPLE values:{type:INT64 value:"3"} values:{type:INT64 value:"4"}} true true`, }) } @@ -761,7 +734,7 @@ func TestUpdateInChangedVindex(t *testing.T) { `Execute delete from lkp1 where from = :from and toc = :toc from: type:INT64 value:"23" toc: type:VARBINARY value:"\x06\xe7\xea\"Βp\x8f" true`, `Execute insert into lkp1(from, toc) values(:from_0, :toc_0) from_0: type:INT64 value:"3" toc_0: type:VARBINARY value:"\x06\xe7\xea\"Βp\x8f" true`, // Finally, the actual update, which is also sent to -20, same route as the subquery. - `ExecuteMultiShard sharded.-20: dummy_update {} true true`, + `ExecuteMultiShard sharded.-20: dummy_update {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} true true`, }) // No rows changing @@ -775,7 +748,7 @@ func TestUpdateInChangedVindex(t *testing.T) { // It gets used to perform the subquery to fetch the changing column values. `ExecuteMultiShard sharded.-20: dummy_subquery {} false false`, // Subquery returns no rows. So, no vindexes are updated. We still pass-through the original update. - `ExecuteMultiShard sharded.-20: dummy_update {} true true`, + `ExecuteMultiShard sharded.-20: dummy_update {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} true true`, }) // multiple rows changing. @@ -818,7 +791,7 @@ func TestUpdateInChangedVindex(t *testing.T) { `Execute delete from lkp1 where from = :from and toc = :toc from: type:INT64 value:"23" toc: type:VARBINARY value:"\x06\xe7\xea\"Βp\x8f" true`, `Execute insert into lkp1(from, toc) values(:from_0, :toc_0) from_0: type:INT64 value:"3" toc_0: type:VARBINARY value:"\x06\xe7\xea\"Βp\x8f" true`, // Finally, the actual update, which is also sent to -20, same route as the subquery. - `ExecuteMultiShard sharded.-20: dummy_update {} true true`, + `ExecuteMultiShard sharded.-20: dummy_update {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} true true`, }) } @@ -880,7 +853,7 @@ func TestUpdateInChangedVindexMultiCol(t *testing.T) { `Execute delete from lkp_rg_tbl where from = :from and toc = :toc from: type:INT64 value:"7" toc: type:VARBINARY value:"\x02N\xb1\x90ɢ\xfa\x16\x9c" true`, `Execute insert into lkp_rg_tbl(from, toc) values(:from_0, :toc_0) from_0: type:INT64 value:"5" toc_0: type:VARBINARY value:"\x02N\xb1\x90ɢ\xfa\x16\x9c" true`, // Finally, the actual update, which is also sent to -20, same route as the subquery. - `ExecuteMultiShard sharded.-20: dummy_update {} true true`, + `ExecuteMultiShard sharded.-20: dummy_update {__vals0: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"2"}} true true`, }) } @@ -948,6 +921,39 @@ func TestUpdateMultiEqual(t *testing.T) { }) } +// TestUpdateInUnique is a test function for update statement using an IN clause with the Vindexes, +// the query is correctly split according to the corresponding values in the IN list. +func TestUpdateInUnique(t *testing.T) { + ks := buildTestVSchema().Keyspaces["sharded"] + upd := &Update{ + DML: &DML{ + RoutingParameters: &RoutingParameters{ + Opcode: IN, + Keyspace: ks.Keyspace, + Vindex: ks.Vindexes["hash"], + Values: []evalengine.Expr{evalengine.TupleExpr{ + evalengine.NewLiteralInt(1), + evalengine.NewLiteralInt(2), + evalengine.NewLiteralInt(4), + }}}, + Query: "update t set n = 'b' where id in ::__vals", + }, + } + + tupleBV := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: append([]*querypb.Value{sqltypes.ValueToProto(sqltypes.NewInt64(1))}, sqltypes.ValueToProto(sqltypes.NewInt64(2)), sqltypes.ValueToProto(sqltypes.NewInt64(4))), + } + vc := newDMLTestVCursor("-20", "20-") + vc.shardForKsid = []string{"-20", "20-"} + _, err := upd.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{"__vals": tupleBV}, false) + require.NoError(t, err) + vc.ExpectLog(t, []string{ + `ResolveDestinations sharded [type:INT64 value:"1" type:INT64 value:"2" type:INT64 value:"4"] Destinations:DestinationKeyspaceID(166b40b44aba4bd6),DestinationKeyspaceID(06e7ea22ce92708f),DestinationKeyspaceID(d2fd8867d50d2dfe)`, + `ExecuteMultiShard sharded.-20: update t set n = 'b' where id in ::__vals {__vals: type:TUPLE values:{type:INT64 value:"1"} values:{type:INT64 value:"4"}} sharded.20-: update t set n = 'b' where id in ::__vals {__vals: type:TUPLE values:{type:INT64 value:"2"}} true false`, + }) +} + func buildTestVSchema() *vindexes.VSchema { invschema := &vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ @@ -1023,7 +1029,7 @@ func buildTestVSchema() *vindexes.VSchema { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) return vs } diff --git a/go/vt/vtgate/engine/upsert.go b/go/vt/vtgate/engine/upsert.go new file mode 100644 index 00000000000..2e42452a7a4 --- /dev/null +++ b/go/vt/vtgate/engine/upsert.go @@ -0,0 +1,137 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package engine + +import ( + "context" + "fmt" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +var _ Primitive = (*Upsert)(nil) + +// Upsert Primitive will execute the insert primitive first and +// if there is `Duplicate Key` error, it executes the update primitive. +type Upsert struct { + txNeeded + Upserts []upsert +} + +type upsert struct { + Insert Primitive + Update Primitive +} + +// AddUpsert appends to the Upsert Primitive. +func (u *Upsert) AddUpsert(ins, upd Primitive) { + u.Upserts = append(u.Upserts, upsert{ + Insert: ins, + Update: upd, + }) +} + +// RouteType implements Primitive interface type. +func (u *Upsert) RouteType() string { + return "UPSERT" +} + +// GetKeyspaceName implements Primitive interface type. +func (u *Upsert) GetKeyspaceName() string { + if len(u.Upserts) > 0 { + return u.Upserts[0].Insert.GetKeyspaceName() + } + return "" +} + +// GetTableName implements Primitive interface type. +func (u *Upsert) GetTableName() string { + if len(u.Upserts) > 0 { + return u.Upserts[0].Insert.GetTableName() + } + return "" +} + +// GetFields implements Primitive interface type. +func (u *Upsert) GetFields(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + return nil, vterrors.VT13001("unexpected to receive GetFields call for insert on duplicate key update query") +} + +// TryExecute implements Primitive interface type. +func (u *Upsert) TryExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool) (*sqltypes.Result, error) { + result := &sqltypes.Result{} + for _, up := range u.Upserts { + qr, err := execOne(ctx, vcursor, bindVars, wantfields, up) + if err != nil { + return nil, err + } + result.RowsAffected += qr.RowsAffected + } + return result, nil +} + +func execOne(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, up upsert) (*sqltypes.Result, error) { + insQr, err := vcursor.ExecutePrimitive(ctx, up.Insert, bindVars, wantfields) + if err == nil { + return insQr, nil + } + if vterrors.Code(err) != vtrpcpb.Code_ALREADY_EXISTS { + return nil, err + } + updQr, err := vcursor.ExecutePrimitive(ctx, up.Update, bindVars, wantfields) + if err != nil { + return nil, err + } + // To match mysql, need to report +1 on rows affected if there is any change. + if updQr.RowsAffected > 0 { + updQr.RowsAffected += 1 + } + return updQr, nil +} + +// TryStreamExecute implements Primitive interface type. +func (u *Upsert) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { + qr, err := u.TryExecute(ctx, vcursor, bindVars, wantfields) + if err != nil { + return err + } + return callback(qr) +} + +// Inputs implements Primitive interface type. +func (u *Upsert) Inputs() ([]Primitive, []map[string]any) { + var inputs []Primitive + var inputsMap []map[string]any + for i, up := range u.Upserts { + inputs = append(inputs, up.Insert, up.Update) + inputsMap = append(inputsMap, + map[string]any{inputName: fmt.Sprintf("Insert-%d", i+1)}, + map[string]any{inputName: fmt.Sprintf("Update-%d", i+1)}) + } + return inputs, inputsMap +} + +func (u *Upsert) description() PrimitiveDescription { + return PrimitiveDescription{ + OperatorType: "Upsert", + TargetTabletType: topodatapb.TabletType_PRIMARY, + } +} diff --git a/go/vt/vtgate/engine/vexplain.go b/go/vt/vtgate/engine/vexplain.go index ad540f96c9c..010901021fa 100644 --- a/go/vt/vtgate/engine/vexplain.go +++ b/go/vt/vtgate/engine/vexplain.go @@ -122,7 +122,7 @@ func (v *VExplain) convertToVExplainAllResult(ctx context.Context, vcursor VCurs explainQuery := fmt.Sprintf("explain format = json %v", entry.Query) // We rely on the parser to see if the query we have is explainable or not // If we get an error in parsing then we can't execute explain on the given query, and we skip it - _, err := sqlparser.Parse(explainQuery) + _, err := vcursor.Environment().Parser().Parse(explainQuery) if err != nil { continue } diff --git a/go/vt/vtgate/engine/vindex_func.go b/go/vt/vtgate/engine/vindex_func.go index ccc3d366c20..ecd83baeaad 100644 --- a/go/vt/vtgate/engine/vindex_func.go +++ b/go/vt/vtgate/engine/vindex_func.go @@ -38,6 +38,12 @@ var _ Primitive = (*VindexFunc)(nil) // VindexFunc is a primitive that performs vindex functions. type VindexFunc struct { + // VindexFunc does not take inputs + noInputs + + // VindexFunc does not need to work inside a tx + noTxNeeded + Opcode VindexOpcode // Fields is the field info for the result. Fields []*querypb.Field @@ -46,12 +52,6 @@ type VindexFunc struct { // TODO(sougou): add support for MultiColumn. Vindex vindexes.SingleColumn Value evalengine.Expr - - // VindexFunc does not take inputs - noInputs - - // VindexFunc does not need to work inside a tx - noTxNeeded } // VindexOpcode is the opcode for a VindexFunc. diff --git a/go/vt/vtgate/engine/vindex_lookup.go b/go/vt/vtgate/engine/vindex_lookup.go index aaf49feea95..8bf8755c40e 100644 --- a/go/vt/vtgate/engine/vindex_lookup.go +++ b/go/vt/vtgate/engine/vindex_lookup.go @@ -227,10 +227,6 @@ func (vr *VindexLookup) executeBatch(ctx context.Context, vcursor VCursor, ids [ } else { result, err = vcursor.ExecutePrimitive(ctx, vr.Lookup, bindVars, false) } - if err != nil { - return nil, err - } - if err != nil { return nil, vterrors.Wrapf(err, "failed while running the lookup query") } diff --git a/go/vt/vtgate/engine/vschema_ddl.go b/go/vt/vtgate/engine/vschema_ddl.go index 1e385269c1d..cb107eecb58 100644 --- a/go/vt/vtgate/engine/vschema_ddl.go +++ b/go/vt/vtgate/engine/vschema_ddl.go @@ -30,13 +30,11 @@ var _ Primitive = (*AlterVSchema)(nil) // AlterVSchema operator applies changes to VSchema type AlterVSchema struct { - Keyspace *vindexes.Keyspace - - AlterVschemaDDL *sqlparser.AlterVschema - noTxNeeded - noInputs + + Keyspace *vindexes.Keyspace + AlterVschemaDDL *sqlparser.AlterVschema } func (v *AlterVSchema) description() PrimitiveDescription { diff --git a/go/vt/vtgate/engine/vstream.go b/go/vt/vtgate/engine/vstream.go index 2ad8286dfcc..5d3c92c4d98 100644 --- a/go/vt/vtgate/engine/vstream.go +++ b/go/vt/vtgate/engine/vstream.go @@ -35,14 +35,14 @@ var _ Primitive = (*VStream)(nil) // VStream is an operator for streaming specific keyspace, destination type VStream struct { + noTxNeeded + noInputs + Keyspace *vindexes.Keyspace TargetDestination key.Destination TableName string Position string Limit int - - noTxNeeded - noInputs } // RouteType implements the Primitive interface diff --git a/go/vt/vtgate/evalengine/api_aggregation.go b/go/vt/vtgate/evalengine/api_aggregation.go index c0d490ced22..78ab8335d6d 100644 --- a/go/vt/vtgate/evalengine/api_aggregation.go +++ b/go/vt/vtgate/evalengine/api_aggregation.go @@ -389,6 +389,7 @@ func (s *aggregationDecimal) Min(value sqltypes.Value) error { } if !s.dec.IsInitialized() || dec.Cmp(s.dec) < 0 { s.dec = dec + s.prec = -dec.Exponent() } return nil } @@ -403,6 +404,7 @@ func (s *aggregationDecimal) Max(value sqltypes.Value) error { } if !s.dec.IsInitialized() || dec.Cmp(s.dec) > 0 { s.dec = dec + s.prec = -dec.Exponent() } return nil } @@ -443,8 +445,10 @@ func NewAggregationSum(type_ sqltypes.Type) Sum { // The aggregation is performed using the slow NullSafeComparison path of the // evaluation engine. type aggregationMinMax struct { - current sqltypes.Value - collation collations.ID + current sqltypes.Value + collation collations.ID + collationEnv *collations.Environment + values *EnumSetValues } func (a *aggregationMinMax) minmax(value sqltypes.Value, max bool) (err error) { @@ -455,7 +459,7 @@ func (a *aggregationMinMax) minmax(value sqltypes.Value, max bool) (err error) { a.current = value return nil } - n, err := compare(a.current, value, a.collation) + n, err := compare(a.current, value, a.collationEnv, a.collation, a.values) if err != nil { return err } @@ -481,17 +485,17 @@ func (a *aggregationMinMax) Reset() { a.current = sqltypes.NULL } -func NewAggregationMinMax(type_ sqltypes.Type, collation collations.ID) MinMax { +func NewAggregationMinMax(typ sqltypes.Type, collationEnv *collations.Environment, collation collations.ID, values *EnumSetValues) MinMax { switch { - case sqltypes.IsSigned(type_): - return &aggregationInt{t: type_} - case sqltypes.IsUnsigned(type_): - return &aggregationUint{t: type_} - case sqltypes.IsFloat(type_): - return &aggregationFloat{t: type_} - case sqltypes.IsDecimal(type_): + case sqltypes.IsSigned(typ): + return &aggregationInt{t: typ} + case sqltypes.IsUnsigned(typ): + return &aggregationUint{t: typ} + case sqltypes.IsFloat(typ): + return &aggregationFloat{t: typ} + case sqltypes.IsDecimal(typ): return &aggregationDecimal{} default: - return &aggregationMinMax{collation: collation} + return &aggregationMinMax{collation: collation, collationEnv: collationEnv, values: values} } } diff --git a/go/vt/vtgate/evalengine/api_aggregation_test.go b/go/vt/vtgate/evalengine/api_aggregation_test.go index aab49541e71..05884b4bb4b 100644 --- a/go/vt/vtgate/evalengine/api_aggregation_test.go +++ b/go/vt/vtgate/evalengine/api_aggregation_test.go @@ -72,6 +72,12 @@ func TestMinMax(t *testing.T) { min: sqltypes.NewVarBinary("a"), max: sqltypes.NewVarBinary("b"), }, + { + type_: sqltypes.Decimal, + values: []sqltypes.Value{sqltypes.NewDecimal("1.001"), sqltypes.NewDecimal("2.1")}, + min: sqltypes.NewDecimal("1.001"), + max: sqltypes.NewDecimal("2.1"), + }, { // accent insensitive type_: sqltypes.VarChar, @@ -131,7 +137,7 @@ func TestMinMax(t *testing.T) { for i, tcase := range tcases { t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run("Min", func(t *testing.T) { - agg := NewAggregationMinMax(tcase.type_, tcase.coll) + agg := NewAggregationMinMax(tcase.type_, collations.MySQL8(), tcase.coll, nil) for _, v := range tcase.values { err := agg.Min(v) @@ -147,7 +153,7 @@ func TestMinMax(t *testing.T) { }) t.Run("Max", func(t *testing.T) { - agg := NewAggregationMinMax(tcase.type_, tcase.coll) + agg := NewAggregationMinMax(tcase.type_, collations.MySQL8(), tcase.coll, nil) for _, v := range tcase.values { err := agg.Max(v) diff --git a/go/vt/vtgate/evalengine/api_arithmetic.go b/go/vt/vtgate/evalengine/api_arithmetic.go deleted file mode 100644 index 4da7e3450a2..00000000000 --- a/go/vt/vtgate/evalengine/api_arithmetic.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package evalengine - -import ( - "vitess.io/vitess/go/sqltypes" -) - -// evalengine represents a numeric value extracted from -// a Value, used for arithmetic operations. -var zeroBytes = []byte("0") - -// Add adds two values together -// if v1 or v2 is null, then it returns null -func Add(v1, v2 sqltypes.Value) (sqltypes.Value, error) { - if v1.IsNull() || v2.IsNull() { - return sqltypes.NULL, nil - } - e1, err := valueToEval(v1, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - e2, err := valueToEval(v2, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - r, err := addNumericWithError(e1, e2) - if err != nil { - return sqltypes.NULL, err - } - return evalToSQLValue(r), nil -} - -// Subtract takes two values and subtracts them -func Subtract(v1, v2 sqltypes.Value) (sqltypes.Value, error) { - if v1.IsNull() || v2.IsNull() { - return sqltypes.NULL, nil - } - e1, err := valueToEval(v1, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - e2, err := valueToEval(v2, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - r, err := subtractNumericWithError(e1, e2) - if err != nil { - return sqltypes.NULL, err - } - return evalToSQLValue(r), nil -} - -// Multiply takes two values and multiplies it together -func Multiply(v1, v2 sqltypes.Value) (sqltypes.Value, error) { - if v1.IsNull() || v2.IsNull() { - return sqltypes.NULL, nil - } - e1, err := valueToEval(v1, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - e2, err := valueToEval(v2, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - r, err := multiplyNumericWithError(e1, e2) - if err != nil { - return sqltypes.NULL, err - } - return evalToSQLValue(r), nil -} - -// Divide (Float) for MySQL. Replicates behavior of "/" operator -func Divide(v1, v2 sqltypes.Value) (sqltypes.Value, error) { - if v1.IsNull() || v2.IsNull() { - return sqltypes.NULL, nil - } - e1, err := valueToEval(v1, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - e2, err := valueToEval(v2, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - r, err := divideNumericWithError(e1, e2, true) - if err != nil { - return sqltypes.NULL, err - } - return evalToSQLValue(r), nil -} - -// NullSafeAdd adds two Values in a null-safe manner. A null value -// is treated as 0. If both values are null, then a null is returned. -// If both values are not null, a numeric value is built -// from each input: Signed->int64, Unsigned->uint64, Float->float64. -// Otherwise the 'best type fit' is chosen for the number: int64 or float64. -// opArithAdd is performed by upgrading types as needed, or in case -// of overflow: int64->uint64, int64->float64, uint64->float64. -// Unsigned ints can only be added to positive ints. After the -// addition, if one of the input types was Decimal, then -// a Decimal is built. Otherwise, the final type of the -// result is preserved. -func NullSafeAdd(v1, v2 sqltypes.Value, resultType sqltypes.Type) (sqltypes.Value, error) { - if v1.IsNull() { - v1 = sqltypes.MakeTrusted(resultType, zeroBytes) - } - if v2.IsNull() { - v2 = sqltypes.MakeTrusted(resultType, zeroBytes) - } - - e1, err := valueToEval(v1, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - e2, err := valueToEval(v2, collationNumeric) - if err != nil { - return sqltypes.NULL, err - } - r, err := addNumericWithError(e1, e2) - if err != nil { - return sqltypes.NULL, err - } - return evalToSQLValueWithType(r, resultType), nil -} diff --git a/go/vt/vtgate/evalengine/api_arithmetic_test.go b/go/vt/vtgate/evalengine/api_arithmetic_test.go index 40373423aa5..c0a68de8f83 100644 --- a/go/vt/vtgate/evalengine/api_arithmetic_test.go +++ b/go/vt/vtgate/evalengine/api_arithmetic_test.go @@ -17,550 +17,27 @@ limitations under the License. package evalengine import ( - "encoding/binary" "fmt" - "math" "reflect" - "strconv" "testing" - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/vthash" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" - - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/vthash" ) var ( NULL = sqltypes.NULL - NewInt32 = sqltypes.NewInt32 NewInt64 = sqltypes.NewInt64 NewUint64 = sqltypes.NewUint64 NewFloat64 = sqltypes.NewFloat64 TestValue = sqltypes.TestValue - NewDecimal = sqltypes.NewDecimal - - maxUint64 uint64 = math.MaxUint64 ) -func TestArithmetics(t *testing.T) { - type tcase struct { - v1, v2, out sqltypes.Value - err string - } - - tests := []struct { - operator string - f func(a, b sqltypes.Value) (sqltypes.Value, error) - cases []tcase - }{{ - operator: "-", - f: Subtract, - cases: []tcase{{ - // All Nulls - v1: NULL, - v2: NULL, - out: NULL, - }, { - // First value null. - v1: NewInt32(1), - v2: NULL, - out: NULL, - }, { - // Second value null. - v1: NULL, - v2: NewInt32(1), - out: NULL, - }, { - // case with negative value - v1: NewInt64(-1), - v2: NewInt64(-2), - out: NewInt64(1), - }, { - // testing for int64 overflow with min negative value - v1: NewInt64(math.MinInt64), - v2: NewInt64(1), - err: dataOutOfRangeError(math.MinInt64, 1, "BIGINT", "-").Error(), - }, { - v1: NewUint64(4), - v2: NewInt64(5), - err: dataOutOfRangeError(4, 5, "BIGINT UNSIGNED", "-").Error(), - }, { - // testing uint - int - v1: NewUint64(7), - v2: NewInt64(5), - out: NewUint64(2), - }, { - v1: NewUint64(math.MaxUint64), - v2: NewInt64(0), - out: NewUint64(math.MaxUint64), - }, { - // testing for int64 overflow - v1: NewInt64(math.MinInt64), - v2: NewUint64(0), - err: dataOutOfRangeError(math.MinInt64, 0, "BIGINT UNSIGNED", "-").Error(), - }, { - v1: TestValue(sqltypes.VarChar, "c"), - v2: NewInt64(1), - out: NewFloat64(-1), - }, { - v1: NewUint64(1), - v2: TestValue(sqltypes.VarChar, "c"), - out: NewFloat64(1), - }, { - // testing for error for parsing float value to uint64 - v1: TestValue(sqltypes.Uint64, "1.2"), - v2: NewInt64(2), - err: "unparsed tail left after parsing uint64 from \"1.2\": \".2\"", - }, { - // testing for error for parsing float value to uint64 - v1: NewUint64(2), - v2: TestValue(sqltypes.Uint64, "1.2"), - err: "unparsed tail left after parsing uint64 from \"1.2\": \".2\"", - }, { - // uint64 - uint64 - v1: NewUint64(8), - v2: NewUint64(4), - out: NewUint64(4), - }, { - // testing for float subtraction: float - int - v1: NewFloat64(1.2), - v2: NewInt64(2), - out: NewFloat64(-0.8), - }, { - // testing for float subtraction: float - uint - v1: NewFloat64(1.2), - v2: NewUint64(2), - out: NewFloat64(-0.8), - }, { - v1: NewInt64(-1), - v2: NewUint64(2), - err: dataOutOfRangeError(-1, 2, "BIGINT UNSIGNED", "-").Error(), - }, { - v1: NewInt64(2), - v2: NewUint64(1), - out: NewUint64(1), - }, { - // testing int64 - float64 method - v1: NewInt64(-2), - v2: NewFloat64(1.0), - out: NewFloat64(-3.0), - }, { - // testing uint64 - float64 method - v1: NewUint64(1), - v2: NewFloat64(-2.0), - out: NewFloat64(3.0), - }, { - // testing uint - int to return uintplusint - v1: NewUint64(1), - v2: NewInt64(-2), - out: NewUint64(3), - }, { - // testing for float - float - v1: NewFloat64(1.2), - v2: NewFloat64(3.2), - out: NewFloat64(-2), - }, { - // testing uint - uint if v2 > v1 - v1: NewUint64(2), - v2: NewUint64(4), - err: dataOutOfRangeError(2, 4, "BIGINT UNSIGNED", "-").Error(), - }, { - // testing uint - (- int) - v1: NewUint64(1), - v2: NewInt64(-2), - out: NewUint64(3), - }}, - }, { - operator: "+", - f: Add, - cases: []tcase{{ - // All Nulls - v1: NULL, - v2: NULL, - out: NULL, - }, { - // First value null. - v1: NewInt32(1), - v2: NULL, - out: NULL, - }, { - // Second value null. - v1: NULL, - v2: NewInt32(1), - out: NULL, - }, { - // case with negatives - v1: NewInt64(-1), - v2: NewInt64(-2), - out: NewInt64(-3), - }, { - // testing for overflow int64, result will be unsigned int - v1: NewInt64(math.MaxInt64), - v2: NewUint64(2), - out: NewUint64(9223372036854775809), - }, { - v1: NewInt64(-2), - v2: NewUint64(1), - err: dataOutOfRangeError(1, -2, "BIGINT UNSIGNED", "+").Error(), - }, { - v1: NewInt64(math.MaxInt64), - v2: NewInt64(-2), - out: NewInt64(9223372036854775805), - }, { - // Normal case - v1: NewUint64(1), - v2: NewUint64(2), - out: NewUint64(3), - }, { - // testing for overflow uint64 - v1: NewUint64(maxUint64), - v2: NewUint64(2), - err: dataOutOfRangeError(maxUint64, 2, "BIGINT UNSIGNED", "+").Error(), - }, { - // int64 underflow - v1: NewInt64(math.MinInt64), - v2: NewInt64(-2), - err: dataOutOfRangeError(math.MinInt64, -2, "BIGINT", "+").Error(), - }, { - // checking int64 max value can be returned - v1: NewInt64(math.MaxInt64), - v2: NewUint64(0), - out: NewUint64(9223372036854775807), - }, { - // testing whether uint64 max value can be returned - v1: NewUint64(math.MaxUint64), - v2: NewInt64(0), - out: NewUint64(math.MaxUint64), - }, { - v1: NewUint64(math.MaxInt64), - v2: NewInt64(1), - out: NewUint64(9223372036854775808), - }, { - v1: NewUint64(1), - v2: TestValue(sqltypes.VarChar, "c"), - out: NewFloat64(1), - }, { - v1: NewUint64(1), - v2: TestValue(sqltypes.VarChar, "1.2"), - out: NewFloat64(2.2), - }, { - v1: TestValue(sqltypes.Int64, "1.2"), - v2: NewInt64(2), - err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", - }, { - v1: NewInt64(2), - v2: TestValue(sqltypes.Int64, "1.2"), - err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", - }, { - // testing for uint64 overflow with max uint64 + int value - v1: NewUint64(maxUint64), - v2: NewInt64(2), - err: dataOutOfRangeError(maxUint64, 2, "BIGINT UNSIGNED", "+").Error(), - }, { - v1: sqltypes.NewHexNum([]byte("0x9")), - v2: NewInt64(1), - out: NewUint64(10), - }}, - }, { - operator: "/", - f: Divide, - cases: []tcase{{ - // All Nulls - v1: NULL, - v2: NULL, - out: NULL, - }, { - // First value null. - v1: NULL, - v2: NewInt32(1), - out: NULL, - }, { - // Second value null. - v1: NewInt32(1), - v2: NULL, - out: NULL, - }, { - // Second arg 0 - v1: NewInt32(5), - v2: NewInt32(0), - out: NULL, - }, { - // Both arguments zero - v1: NewInt32(0), - v2: NewInt32(0), - out: NULL, - }, { - // case with negative value - v1: NewInt64(-1), - v2: NewInt64(-2), - out: NewDecimal("0.5000"), - }, { - // float64 division by zero - v1: NewFloat64(2), - v2: NewFloat64(0), - out: NULL, - }, { - // Lower bound for int64 - v1: NewInt64(math.MinInt64), - v2: NewInt64(1), - out: NewDecimal(strconv.Itoa(math.MinInt64) + ".0000"), - }, { - // upper bound for uint64 - v1: NewUint64(math.MaxUint64), - v2: NewUint64(1), - out: NewDecimal(strconv.FormatUint(math.MaxUint64, 10) + ".0000"), - }, { - // testing for error in types - v1: TestValue(sqltypes.Int64, "1.2"), - v2: NewInt64(2), - err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", - }, { - // testing for error in types - v1: NewInt64(2), - v2: TestValue(sqltypes.Int64, "1.2"), - err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", - }, { - // testing for uint/int - v1: NewUint64(4), - v2: NewInt64(5), - out: NewDecimal("0.8000"), - }, { - // testing for uint/uint - v1: NewUint64(1), - v2: NewUint64(2), - out: NewDecimal("0.5000"), - }, { - // testing for float64/int64 - v1: TestValue(sqltypes.Float64, "1.2"), - v2: NewInt64(-2), - out: NewFloat64(-0.6), - }, { - // testing for float64/uint64 - v1: TestValue(sqltypes.Float64, "1.2"), - v2: NewUint64(2), - out: NewFloat64(0.6), - }, { - // testing for overflow of float64 - v1: NewFloat64(math.MaxFloat64), - v2: NewFloat64(0.5), - err: dataOutOfRangeError(math.MaxFloat64, 0.5, "DOUBLE", "/").Error(), - }}, - }, { - operator: "*", - f: Multiply, - cases: []tcase{{ - // All Nulls - v1: NULL, - v2: NULL, - out: NULL, - }, { - // First value null. - v1: NewInt32(1), - v2: NULL, - out: NULL, - }, { - // Second value null. - v1: NULL, - v2: NewInt32(1), - out: NULL, - }, { - // case with negative value - v1: NewInt64(-1), - v2: NewInt64(-2), - out: NewInt64(2), - }, { - // testing for int64 overflow with min negative value - v1: NewInt64(math.MinInt64), - v2: NewInt64(1), - out: NewInt64(math.MinInt64), - }, { - // testing for error in types - v1: TestValue(sqltypes.Int64, "1.2"), - v2: NewInt64(2), - err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", - }, { - // testing for error in types - v1: NewInt64(2), - v2: TestValue(sqltypes.Int64, "1.2"), - err: "unparsed tail left after parsing int64 from \"1.2\": \".2\"", - }, { - // testing for uint*int - v1: NewUint64(4), - v2: NewInt64(5), - out: NewUint64(20), - }, { - // testing for uint*uint - v1: NewUint64(1), - v2: NewUint64(2), - out: NewUint64(2), - }, { - // testing for float64*int64 - v1: TestValue(sqltypes.Float64, "1.2"), - v2: NewInt64(-2), - out: NewFloat64(-2.4), - }, { - // testing for float64*uint64 - v1: TestValue(sqltypes.Float64, "1.2"), - v2: NewUint64(2), - out: NewFloat64(2.4), - }, { - // testing for overflow of int64 - v1: NewInt64(math.MaxInt64), - v2: NewInt64(2), - err: dataOutOfRangeError(math.MaxInt64, 2, "BIGINT", "*").Error(), - }, { - // testing for underflow of uint64*max.uint64 - v1: NewInt64(2), - v2: NewUint64(maxUint64), - err: dataOutOfRangeError(maxUint64, 2, "BIGINT UNSIGNED", "*").Error(), - }, { - v1: NewUint64(math.MaxUint64), - v2: NewUint64(1), - out: NewUint64(math.MaxUint64), - }, { - // Checking whether maxInt value can be passed as uint value - v1: NewUint64(math.MaxInt64), - v2: NewInt64(3), - err: dataOutOfRangeError(math.MaxInt64, 3, "BIGINT UNSIGNED", "*").Error(), - }}, - }} - - for _, test := range tests { - t.Run(test.operator, func(t *testing.T) { - for _, tcase := range test.cases { - name := fmt.Sprintf("%s%s%s", tcase.v1.String(), test.operator, tcase.v2.String()) - t.Run(name, func(t *testing.T) { - got, err := test.f(tcase.v1, tcase.v2) - if tcase.err == "" { - require.NoError(t, err) - require.Equal(t, tcase.out, got) - } else { - require.EqualError(t, err, tcase.err) - } - }) - } - }) - } -} - -func TestNullSafeAdd(t *testing.T) { - tcases := []struct { - v1, v2 sqltypes.Value - out sqltypes.Value - err error - }{{ - // All nulls. - v1: NULL, - v2: NULL, - out: NewInt64(0), - }, { - // First value null. - v1: NewInt32(1), - v2: NULL, - out: NewInt64(1), - }, { - // Second value null. - v1: NULL, - v2: NewInt32(1), - out: NewInt64(1), - }, { - // Normal case. - v1: NewInt64(1), - v2: NewInt64(2), - out: NewInt64(3), - }, { - // Make sure underlying error is returned for LHS. - v1: TestValue(sqltypes.Int64, "1.2"), - v2: NewInt64(2), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), - }, { - // Make sure underlying error is returned for RHS. - v1: NewInt64(2), - v2: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), - }, { - // Make sure underlying error is returned while adding. - v1: NewInt64(-1), - v2: NewUint64(2), - out: NewInt64(1), - }, { - v1: NewInt64(-100), - v2: NewUint64(10), - err: dataOutOfRangeError(10, -100, "BIGINT UNSIGNED", "+"), - }, { - // Make sure underlying error is returned while converting. - v1: NewFloat64(1), - v2: NewFloat64(2), - out: NewInt64(3), - }} - for _, tcase := range tcases { - got, err := NullSafeAdd(tcase.v1, tcase.v2, sqltypes.Int64) - - if tcase.err == nil { - require.NoError(t, err) - } else { - require.EqualError(t, err, tcase.err.Error()) - } - - if !reflect.DeepEqual(got, tcase.out) { - t.Errorf("NullSafeAdd(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), printValue(got), printValue(tcase.out)) - } - } -} - -func TestNewIntegralNumeric(t *testing.T) { - tcases := []struct { - v sqltypes.Value - out eval - err error - }{{ - v: NewInt64(1), - out: newEvalInt64(1), - }, { - v: NewUint64(1), - out: newEvalUint64(1), - }, { - v: NewFloat64(1), - out: newEvalInt64(1), - }, { - // For non-number type, Int64 is the default. - v: TestValue(sqltypes.VarChar, "1"), - out: newEvalInt64(1), - }, { - // If Int64 can't work, we use Uint64. - v: TestValue(sqltypes.VarChar, "18446744073709551615"), - out: newEvalUint64(18446744073709551615), - }, { - // Only valid Int64 allowed if type is Int64. - v: TestValue(sqltypes.Int64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing int64 from \"1.2\": \".2\""), - }, { - // Only valid Uint64 allowed if type is Uint64. - v: TestValue(sqltypes.Uint64, "1.2"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "unparsed tail left after parsing uint64 from \"1.2\": \".2\""), - }, { - v: TestValue(sqltypes.VarChar, "abcd"), - err: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: 'abcd'"), - }} - for _, tcase := range tcases { - got, err := valueToEvalNumeric(tcase.v) - if err != nil && !vterrors.Equals(err, tcase.err) { - t.Errorf("newIntegralNumeric(%s) error: %v, want %v", printValue(tcase.v), vterrors.Print(err), vterrors.Print(tcase.err)) - } - if tcase.err == nil { - continue - } - - utils.MustMatch(t, tcase.out, got, "newIntegralNumeric") - } -} - func TestAddNumeric(t *testing.T) { tcases := []struct { v1, v2 eval @@ -594,12 +71,12 @@ func TestAddNumeric(t *testing.T) { // Int64 overflow. v1: newEvalInt64(9223372036854775807), v2: newEvalInt64(2), - err: dataOutOfRangeError(9223372036854775807, 2, "BIGINT", "+"), + err: dataOutOfRangeError(int64(9223372036854775807), int64(2), "BIGINT", "+"), }, { // Int64 underflow. v1: newEvalInt64(-9223372036854775807), v2: newEvalInt64(-2), - err: dataOutOfRangeError(-9223372036854775807, -2, "BIGINT", "+"), + err: dataOutOfRangeError(int64(-9223372036854775807), int64(-2), "BIGINT", "+"), }, { v1: newEvalInt64(-1), v2: newEvalUint64(2), @@ -608,7 +85,7 @@ func TestAddNumeric(t *testing.T) { // Uint64 overflow. v1: newEvalUint64(18446744073709551615), v2: newEvalUint64(2), - err: dataOutOfRangeError(uint64(18446744073709551615), 2, "BIGINT UNSIGNED", "+"), + err: dataOutOfRangeError(uint64(18446744073709551615), uint64(2), "BIGINT UNSIGNED", "+"), }} for _, tcase := range tcases { got, err := addNumericWithError(tcase.v1, tcase.v2) @@ -686,60 +163,81 @@ func TestPrioritize(t *testing.T) { } func TestToSqlValue(t *testing.T) { + nt := func(t sqltypes.Type) Type { + return NewType(t, collations.CollationBinaryID) + } + tcases := []struct { - typ sqltypes.Type + typ Type v eval out sqltypes.Value err error }{{ - typ: sqltypes.Int64, + typ: nt(sqltypes.Int64), v: newEvalInt64(1), out: NewInt64(1), }, { - typ: sqltypes.Int64, + typ: nt(sqltypes.Int64), v: newEvalUint64(1), out: NewInt64(1), }, { - typ: sqltypes.Int64, + typ: nt(sqltypes.Int64), v: newEvalFloat(1.2e-16), out: NewInt64(0), }, { - typ: sqltypes.Uint64, + typ: nt(sqltypes.Uint64), v: newEvalInt64(1), out: NewUint64(1), }, { - typ: sqltypes.Uint64, + typ: nt(sqltypes.Uint64), v: newEvalUint64(1), out: NewUint64(1), }, { - typ: sqltypes.Uint64, + typ: nt(sqltypes.Uint64), v: newEvalFloat(1.2e-16), out: NewUint64(0), }, { - typ: sqltypes.Float64, + typ: nt(sqltypes.Float64), v: newEvalInt64(1), out: TestValue(sqltypes.Float64, "1"), }, { - typ: sqltypes.Float64, + typ: nt(sqltypes.Float64), v: newEvalUint64(1), out: TestValue(sqltypes.Float64, "1"), }, { - typ: sqltypes.Float64, + typ: nt(sqltypes.Float64), v: newEvalFloat(1.2e-16), out: TestValue(sqltypes.Float64, "1.2e-16"), }, { - typ: sqltypes.Decimal, + typ: nt(sqltypes.Decimal), v: newEvalInt64(1), out: TestValue(sqltypes.Decimal, "1"), }, { - typ: sqltypes.Decimal, + typ: nt(sqltypes.Decimal), v: newEvalUint64(1), out: TestValue(sqltypes.Decimal, "1"), }, { // For float, we should not use scientific notation. - typ: sqltypes.Decimal, + typ: nt(sqltypes.Decimal), v: newEvalFloat(1.2e-16), out: TestValue(sqltypes.Decimal, "0.00000000000000012"), + }, { + // null in should return null out no matter what type + typ: nt(sqltypes.Int64), + v: nil, + out: sqltypes.NULL, + }, { + typ: nt(sqltypes.Uint64), + v: nil, + out: sqltypes.NULL, + }, { + typ: nt(sqltypes.Float64), + v: nil, + out: sqltypes.NULL, + }, { + typ: nt(sqltypes.VarChar), + v: nil, + out: sqltypes.NULL, }} for _, tcase := range tcases { got := evalToSQLValueWithType(tcase.v, tcase.typ) @@ -808,71 +306,3 @@ func printValue(v sqltypes.Value) string { vBytes, _ := v.ToBytes() return fmt.Sprintf("%v:%q", v.Type(), vBytes) } - -// These benchmarks show that using existing ASCII representations -// for numbers is about 6x slower than using native representations. -// However, 229ns is still a negligible time compared to the cost of -// other operations. The additional complexity of introducing native -// types is currently not worth it. So, we'll stay with the existing -// ASCII representation for now. Using interfaces is more expensive -// than native representation of values. This is probably because -// interfaces also allocate memory, and also perform type assertions. -// Actual benchmark is based on NoNative. So, the numbers are similar. -// Date: 6/4/17 -// Version: go1.8 -// BenchmarkAddActual-8 10000000 263 ns/op -// BenchmarkAddNoNative-8 10000000 228 ns/op -// BenchmarkAddNative-8 50000000 40.0 ns/op -// BenchmarkAddGoInterface-8 30000000 52.4 ns/op -// BenchmarkAddGoNonInterface-8 2000000000 1.00 ns/op -// BenchmarkAddGo-8 2000000000 1.00 ns/op -func BenchmarkAddActual(b *testing.B) { - v1 := sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")) - v2 := sqltypes.MakeTrusted(sqltypes.Int64, []byte("12")) - for i := 0; i < b.N; i++ { - v1, _ = NullSafeAdd(v1, v2, sqltypes.Int64) - } -} - -func BenchmarkAddNoNative(b *testing.B) { - v1 := sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")) - v2 := sqltypes.MakeTrusted(sqltypes.Int64, []byte("12")) - for i := 0; i < b.N; i++ { - iv1, _ := v1.ToInt64() - iv2, _ := v2.ToInt64() - v1 = sqltypes.MakeTrusted(sqltypes.Int64, strconv.AppendInt(nil, iv1+iv2, 10)) - } -} - -func BenchmarkAddNative(b *testing.B) { - v1 := makeNativeInt64(1) - v2 := makeNativeInt64(12) - for i := 0; i < b.N; i++ { - iv1 := int64(binary.BigEndian.Uint64(v1.Raw())) - iv2 := int64(binary.BigEndian.Uint64(v2.Raw())) - v1 = makeNativeInt64(iv1 + iv2) - } -} - -func makeNativeInt64(v int64) sqltypes.Value { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, uint64(v)) - return sqltypes.MakeTrusted(sqltypes.Int64, buf) -} - -func BenchmarkAddGoInterface(b *testing.B) { - var v1, v2 any - v1 = int64(1) - v2 = int64(2) - for i := 0; i < b.N; i++ { - v1 = v1.(int64) + v2.(int64) - } -} - -func BenchmarkAddGo(b *testing.B) { - v1 := int64(1) - v2 := int64(2) - for i := 0; i < b.N; i++ { - v1 += v2 - } -} diff --git a/go/vt/vtgate/evalengine/api_coerce.go b/go/vt/vtgate/evalengine/api_coerce.go index 130727d8f31..eef83c58422 100644 --- a/go/vt/vtgate/evalengine/api_coerce.go +++ b/go/vt/vtgate/evalengine/api_coerce.go @@ -19,12 +19,80 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) -func CoerceTo(value sqltypes.Value, typ sqltypes.Type) (sqltypes.Value, error) { - cast, err := valueToEvalCast(value, value.Type(), collations.Unknown) +func CoerceTo(value sqltypes.Value, typ Type, sqlmode SQLMode) (sqltypes.Value, error) { + cast, err := valueToEvalCast(value, value.Type(), collations.Unknown, typ.values, sqlmode) if err != nil { return sqltypes.Value{}, err } return evalToSQLValueWithType(cast, typ), nil } + +// CoerceTypes takes two input types, and decides how they should be coerced before compared +func CoerceTypes(v1, v2 Type, collationEnv *collations.Environment) (out Type, err error) { + if v1.Equal(&v2) { + return v1, nil + } + if sqltypes.IsNull(v1.Type()) || sqltypes.IsNull(v2.Type()) { + return NewType(sqltypes.Null, collations.CollationBinaryID), nil + } + + out = Type{ + init: true, + nullable: v1.Nullable() || v2.Nullable(), + } + + switch { + case sqltypes.IsTextOrBinary(v1.Type()) && sqltypes.IsTextOrBinary(v2.Type()): + mergedCollation, _, _, ferr := mergeCollations(typedCoercionCollation(v1.Type(), v1.Collation()), typedCoercionCollation(v2.Type(), v2.Collation()), v1.Type(), v2.Type(), collationEnv) + if ferr != nil { + return Type{}, ferr + } + out.collation = mergedCollation.Collation + out.typ = sqltypes.VarChar + return + + case sqltypes.IsDateOrTime(v1.Type()): + out.collation = collations.CollationBinaryID + out.typ = v1.Type() + return + + case sqltypes.IsDateOrTime(v2.Type()): + out.collation = collations.CollationBinaryID + out.typ = v2.Type() + return + + case sqltypes.IsNumber(v1.Type()) || sqltypes.IsNumber(v2.Type()): + out.collation = collations.CollationBinaryID + switch { + case sqltypes.IsTextOrBinary(v1.Type()) || sqltypes.IsFloat(v1.Type()) || sqltypes.IsDecimal(v1.Type()) || + sqltypes.IsTextOrBinary(v2.Type()) || sqltypes.IsFloat(v2.Type()) || sqltypes.IsDecimal(v2.Type()): + out.typ = sqltypes.Float64 + return + case sqltypes.IsSigned(v1.Type()): + switch { + case sqltypes.IsUnsigned(v2.Type()): + out.typ = sqltypes.Uint64 + return + case sqltypes.IsSigned(v2.Type()): + out.typ = sqltypes.Int64 + return + default: + return Type{}, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot coerce SIGNED into %v", v2.Type()) + } + case sqltypes.IsUnsigned(v1.Type()): + switch { + case sqltypes.IsSigned(v2.Type()) || sqltypes.IsUnsigned(v2.Type()): + out.typ = sqltypes.Uint64 + return + default: + return Type{}, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot coerce UNSIGNED into %v", v2.Type()) + } + } + } + + return Type{}, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot coerce %v into %v", v1.Type(), v2.Type()) +} diff --git a/go/vt/vtgate/evalengine/api_compare.go b/go/vt/vtgate/evalengine/api_compare.go index 1f30a17b9d5..6873ad40143 100644 --- a/go/vt/vtgate/evalengine/api_compare.go +++ b/go/vt/vtgate/evalengine/api_compare.go @@ -43,7 +43,7 @@ func (err UnsupportedCollationError) Error() string { // UnsupportedCollationHashError is returned when we try to get the hash value and are missing the collation to use var UnsupportedCollationHashError = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "text type with an unknown/unsupported collation cannot be hashed") -func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { +func compare(v1, v2 sqltypes.Value, collationEnv *collations.Environment, collationID collations.ID, values *EnumSetValues) (int, error) { v1t := v1.Type() // We have a fast path here for the case where both values are @@ -115,7 +115,7 @@ func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { Collation: collationID, Coercibility: collations.CoerceImplicit, Repertoire: collations.RepertoireUnicode, - }) + }, values) if err != nil { return 0, err } @@ -124,12 +124,12 @@ func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { Collation: collationID, Coercibility: collations.CoerceImplicit, Repertoire: collations.RepertoireUnicode, - }) + }, values) if err != nil { return 0, err } - out, err := evalCompare(v1eval, v2eval) + out, err := evalCompare(v1eval, v2eval, collationEnv) if err != nil { return 0, err } @@ -147,7 +147,7 @@ func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { // numeric, then a numeric comparison is performed after // necessary conversions. If none are numeric, then it's // a simple binary comparison. Uncomparable values return an error. -func NullsafeCompare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { +func NullsafeCompare(v1, v2 sqltypes.Value, collationEnv *collations.Environment, collationID collations.ID, values *EnumSetValues) (int, error) { // Based on the categorization defined for the types, // we're going to allow comparison of the following: // Null, isNumber, IsBinary. This will exclude IsQuoted @@ -161,7 +161,7 @@ func NullsafeCompare(v1, v2 sqltypes.Value, collationID collations.ID) (int, err if v2.IsNull() { return 1, nil } - return compare(v1, v2, collationID) + return compare(v1, v2, collationEnv, collationID, values) } // OrderByParams specifies the parameters for ordering. @@ -176,6 +176,8 @@ type ( // Type for knowing if the collation is relevant Type Type + + CollationEnv *collations.Environment } Comparison []OrderByParams @@ -198,8 +200,8 @@ func (obp *OrderByParams) String() string { val += " ASC" } - if sqltypes.IsText(obp.Type.Type) && obp.Type.Coll != collations.Unknown { - val += " COLLATE " + collations.Local().LookupName(obp.Type.Coll) + if sqltypes.IsText(obp.Type.Type()) && obp.Type.Collation() != collations.Unknown { + val += " COLLATE " + obp.CollationEnv.LookupName(obp.Type.Collation()) } return val } @@ -211,7 +213,7 @@ func (obp *OrderByParams) Compare(r1, r2 []sqltypes.Value) int { if cmp == 0 { var err error - cmp, err = NullsafeCompare(v1, v2, obp.Type.Coll) + cmp, err = NullsafeCompare(v1, v2, obp.CollationEnv, obp.Type.Collation(), obp.Type.values) if err != nil { _, isCollationErr := err.(UnsupportedCollationError) if !isCollationErr || obp.WeightStringCol == -1 { @@ -220,7 +222,7 @@ func (obp *OrderByParams) Compare(r1, r2 []sqltypes.Value) int { // in case of a comparison or collation error switch to using the weight string column for ordering obp.Col = obp.WeightStringCol obp.WeightStringCol = -1 - cmp, err = NullsafeCompare(r1[obp.Col], r2[obp.Col], obp.Type.Coll) + cmp, err = NullsafeCompare(r1[obp.Col], r2[obp.Col], obp.CollationEnv, obp.Type.Collation(), obp.Type.values) if err != nil { panic(err) } @@ -236,7 +238,7 @@ func (obp *OrderByParams) Compare(r1, r2 []sqltypes.Value) int { func (cmp Comparison) tinyWeighters(fields []*querypb.Field) []tinyWeighter { weights := make([]tinyWeighter, 0, len(cmp)) for _, c := range cmp { - if apply := TinyWeighter(fields[c.Col], c.Type.Coll); apply != nil { + if apply := TinyWeighter(fields[c.Col], c.Type.Collation()); apply != nil { weights = append(weights, tinyWeighter{c.Col, apply}) } } diff --git a/go/vt/vtgate/evalengine/api_compare_test.go b/go/vt/vtgate/evalengine/api_compare_test.go index 3f97d9d18e9..aa05f5d2787 100644 --- a/go/vt/vtgate/evalengine/api_compare_test.go +++ b/go/vt/vtgate/evalengine/api_compare_test.go @@ -20,7 +20,7 @@ import ( "context" "fmt" "math" - "math/rand" + "math/rand/v2" "slices" "strings" "testing" @@ -30,14 +30,13 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) type testCase struct { @@ -51,18 +50,11 @@ type testCase struct { } var ( - T = true - F = false - collationEnv *collations.Environment + T = true + F = false + collationEnv *collations.Environment = collations.MySQL8() ) -func init() { - // We require MySQL 8.0 collations for the comparisons in the tests - mySQLVersion := "8.0.0" - servenv.SetMySQLServerVersionForTest(mySQLVersion) - collationEnv = collations.NewEnvironment(mySQLVersion) -} - func defaultCollation() collations.TypedCollation { return collations.TypedCollation{ Collation: collationEnv.LookupByName("utf8mb4_bin"), @@ -78,11 +70,13 @@ func (tc testCase) run(t *testing.T) { for i, value := range tc.row { fields[i] = &querypb.Field{Type: value.Type()} } - env := NewExpressionEnv(context.Background(), tc.bv, nil) + venv := vtenv.NewTestEnv() + env := NewExpressionEnv(context.Background(), tc.bv, NewEmptyVCursor(venv, time.UTC)) env.Row = tc.row ast := &astCompiler{ cfg: &Config{ - Collation: collations.CollationUtf8mb4ID, + Collation: collations.CollationUtf8mb4ID, + Environment: venv, }, } cmp, err := ast.translateComparisonExpr2(tc.op, tc.v1, tc.v2) @@ -110,7 +104,7 @@ func TestCompareIntegers(t *testing.T) { tests := []testCase{ { name: "integers are equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), v2: newColumn(0, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Int64, collations.CollationBinaryID)), v2: newColumn(0, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt64(18)}, }, @@ -131,25 +125,25 @@ func TestCompareIntegers(t *testing.T) { }, { name: "integers are not equal (3)", - v1: newColumn(0, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Int64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt64(18), sqltypes.NewInt64(98)}, }, { name: "unsigned integers are equal", - v1: newColumn(0, Type{Type: sqltypes.Uint64, Coll: collations.CollationBinaryID}), v2: newColumn(0, Type{Type: sqltypes.Uint64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Uint64, collations.CollationBinaryID)), v2: newColumn(0, NewType(sqltypes.Uint64, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewUint64(18)}, }, { name: "unsigned integer and integer are equal", - v1: newColumn(0, Type{Type: sqltypes.Uint64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Uint64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewUint64(18), sqltypes.NewInt64(18)}, }, { name: "unsigned integer and integer are not equal", - v1: newColumn(0, Type{Type: sqltypes.Uint64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Uint64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewUint64(18), sqltypes.NewInt64(42)}, }, @@ -207,7 +201,7 @@ func TestCompareFloats(t *testing.T) { tests := []testCase{ { name: "floats are equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), v2: newColumn(0, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Float64, collations.CollationBinaryID)), v2: newColumn(0, NewType(sqltypes.Float64, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(18)}, }, @@ -228,7 +222,7 @@ func TestCompareFloats(t *testing.T) { }, { name: "floats are not equal (3)", - v1: newColumn(0, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Float64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Float64, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(16516.84), sqltypes.NewFloat64(219541.01)}, }, @@ -286,37 +280,37 @@ func TestCompareDecimals(t *testing.T) { tests := []testCase{ { name: "decimals are equal", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("12.9019")}, }, { name: "decimals are not equal", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("12.9019"), sqltypes.NewDecimal("489.156849")}, }, { name: "decimal is greater than decimal", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.129"), sqltypes.NewDecimal("192.128")}, }, { name: "decimal is not greater than decimal", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.128"), sqltypes.NewDecimal("192.129")}, }, { name: "decimal is less than decimal", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.128"), sqltypes.NewDecimal("192.129")}, }, { name: "decimal is not less than decimal", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.129"), sqltypes.NewDecimal("192.128")}, }, @@ -334,151 +328,151 @@ func TestCompareNumerics(t *testing.T) { tests := []testCase{ { name: "decimal and float are equal", - v1: newColumn(0, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Float64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(189.6), sqltypes.NewDecimal("189.6")}, }, { name: "decimal and float with negative values are equal", - v1: newColumn(0, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Float64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(-98.1839), sqltypes.NewDecimal("-98.1839")}, }, { name: "decimal and float with negative values are not equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Float64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(-98.9381), sqltypes.NewDecimal("-98.1839")}, }, { name: "decimal and float with negative values are not equal (2)", - v1: newColumn(0, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Float64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewFloat64(-98.9381), sqltypes.NewDecimal("-98.1839")}, }, { name: "decimal and integer are equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Int64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt64(8979), sqltypes.NewDecimal("8979")}, }, { name: "decimal and integer are equal (2)", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("8979.0000"), sqltypes.NewInt64(8979)}, }, { name: "decimal and unsigned integer are equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Uint64, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Uint64, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Decimal, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewUint64(901), sqltypes.NewDecimal("901")}, }, { name: "decimal and unsigned integer are equal (2)", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Uint64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Uint64, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("901.00"), sqltypes.NewUint64(901)}, }, { name: "decimal and unsigned integer are not equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Uint64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Uint64, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.129"), sqltypes.NewUint64(192)}, }, { name: "decimal and unsigned integer are not equal (2)", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Uint64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Uint64, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("192.129"), sqltypes.NewUint64(192)}, }, { name: "decimal is greater than integer", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("1.01"), sqltypes.NewInt64(1)}, }, { name: "decimal is greater-equal to integer", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("1.00"), sqltypes.NewInt64(1)}, }, { name: "decimal is less than integer", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDecimal(".99"), sqltypes.NewInt64(1)}, }, { name: "decimal is less-equal to integer", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("1.00"), sqltypes.NewInt64(1)}, }, { name: "decimal is greater than float", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Float64, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("849.896"), sqltypes.NewFloat64(86.568)}, }, { name: "decimal is not greater than float", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Float64, collations.CollationBinaryID)), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("15.23"), sqltypes.NewFloat64(8689.5)}, }, { name: "decimal is greater-equal to float (1)", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Float64, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("65"), sqltypes.NewFloat64(65)}, }, { name: "decimal is greater-equal to float (2)", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Float64, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("65"), sqltypes.NewFloat64(60)}, }, { name: "decimal is less than float", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Float64, collations.CollationBinaryID)), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDecimal("0.998"), sqltypes.NewFloat64(0.999)}, }, { name: "decimal is less-equal to float", - v1: newColumn(0, Type{Type: sqltypes.Decimal, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Float64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Decimal, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Float64, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDecimal("1.000101"), sqltypes.NewFloat64(1.00101)}, }, { name: "different int types are equal for 8 bit", - v1: newColumn(0, Type{Type: sqltypes.Int8, Coll: collations.CollationBinaryID}), v2: NewLiteralInt(0), + v1: newColumn(0, NewType(sqltypes.Int8, collations.CollationBinaryID)), v2: NewLiteralInt(0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt8(0)}, }, { name: "different int types are equal for 32 bit", - v1: newColumn(0, Type{Type: sqltypes.Int32, Coll: collations.CollationBinaryID}), v2: NewLiteralInt(0), + v1: newColumn(0, NewType(sqltypes.Int32, collations.CollationBinaryID)), v2: NewLiteralInt(0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewInt32(0)}, }, { name: "different int types are equal for float32 bit", - v1: newColumn(0, Type{Type: sqltypes.Float32, Coll: collations.CollationBinaryID}), v2: NewLiteralFloat(1.0), + v1: newColumn(0, NewType(sqltypes.Float32, collations.CollationBinaryID)), v2: NewLiteralFloat(1.0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.MakeTrusted(sqltypes.Float32, []byte("1.0"))}, }, { name: "different unsigned int types are equal for 8 bit", - v1: newColumn(0, Type{Type: sqltypes.Uint8, Coll: collations.CollationBinaryID}), v2: NewLiteralInt(0), + v1: newColumn(0, NewType(sqltypes.Uint8, collations.CollationBinaryID)), v2: NewLiteralInt(0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.MakeTrusted(sqltypes.Uint8, []byte("0"))}, }, { name: "different unsigned int types are equal for 32 bit", - v1: newColumn(0, Type{Type: sqltypes.Uint32, Coll: collations.CollationBinaryID}), v2: NewLiteralInt(0), + v1: newColumn(0, NewType(sqltypes.Uint32, collations.CollationBinaryID)), v2: NewLiteralInt(0), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewUint32(0)}, }, @@ -496,73 +490,73 @@ func TestCompareDatetime(t *testing.T) { tests := []testCase{ { name: "datetimes are equal", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-22 12:00:00")}, }, { name: "datetimes are not equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-22 12:00:00"), sqltypes.NewDatetime("2020-10-22 12:00:00")}, }, { name: "datetimes are not equal (2)", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-22 12:00:00"), sqltypes.NewDatetime("2021-10-22 10:23:56")}, }, { name: "datetimes are not equal (3)", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-01 00:00:00"), sqltypes.NewDatetime("2021-02-01 00:00:00")}, }, { name: "datetime is greater than datetime", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-01 13:10:02")}, }, { name: "datetime is not greater than datetime", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-01 13:10:02"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, { name: "datetime is less than datetime", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-01 13:10:02"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, { name: "datetime is not less than datetime", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-01 13:10:02")}, }, { name: "datetime is greater-equal to datetime (1)", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, { name: "datetime is greater-equal to datetime (2)", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-01 13:10:02")}, }, { name: "datetime is less-equal to datetime (1)", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-30 10:42:50"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, { name: "datetime is less-equal to datetime (2)", - v1: newColumn(0, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Datetime, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDatetime("2021-10-01 13:10:02"), sqltypes.NewDatetime("2021-10-30 10:42:50")}, }, @@ -580,73 +574,73 @@ func TestCompareTimestamp(t *testing.T) { tests := []testCase{ { name: "timestamps are equal", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-22 12:00:00")}, }, { name: "timestamps are not equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-22 12:00:00"), sqltypes.NewTimestamp("2020-10-22 12:00:00")}, }, { name: "timestamps are not equal (2)", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-22 12:00:00"), sqltypes.NewTimestamp("2021-10-22 10:23:56")}, }, { name: "timestamps are not equal (3)", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-01 00:00:00"), sqltypes.NewTimestamp("2021-02-01 00:00:00")}, }, { name: "timestamp is greater than timestamp", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-01 13:10:02")}, }, { name: "timestamp is not greater than timestamp", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-01 13:10:02"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, { name: "timestamp is less than timestamp", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-01 13:10:02"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, { name: "timestamp is not less than timestamp", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-01 13:10:02")}, }, { name: "timestamp is greater-equal to timestamp (1)", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, { name: "timestamp is greater-equal to timestamp (2)", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-01 13:10:02")}, }, { name: "timestamp is less-equal to timestamp (1)", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-30 10:42:50"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, { name: "timestamp is less-equal to timestamp (2)", - v1: newColumn(0, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewTimestamp("2021-10-01 13:10:02"), sqltypes.NewTimestamp("2021-10-30 10:42:50")}, }, @@ -668,67 +662,67 @@ func TestCompareDate(t *testing.T) { tests := []testCase{ { name: "dates are equal", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22")}, }, { name: "dates are not equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewDate("2020-10-21")}, }, { name: "dates are not equal (2)", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-01"), sqltypes.NewDate("2021-02-01")}, }, { name: "date is greater than date", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-01")}, }, { name: "date is not greater than date", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-01"), sqltypes.NewDate("2021-10-30")}, }, { name: "date is less than date", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-01"), sqltypes.NewDate("2021-10-30")}, }, { name: "date is not less than date", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-01")}, }, { name: "date is greater-equal to date (1)", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-30")}, }, { name: "date is greater-equal to date (2)", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-01")}, }, { name: "date is less-equal to date (1)", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-30"), sqltypes.NewDate("2021-10-30")}, }, { name: "date is less-equal to date (2)", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-01"), sqltypes.NewDate("2021-10-30")}, }, @@ -746,79 +740,79 @@ func TestCompareTime(t *testing.T) { tests := []testCase{ { name: "times are equal", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTime("12:00:00")}, }, { name: "times are not equal (1)", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewTime("12:00:00"), sqltypes.NewTime("10:23:56")}, }, { name: "times are not equal (2)", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewTime("00:00:00"), sqltypes.NewTime("10:15:00")}, }, { name: "time is greater than time", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTime("18:14:35"), sqltypes.NewTime("13:01:38")}, }, { name: "time is not greater than time", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTime("02:46:02"), sqltypes.NewTime("10:42:50")}, }, { name: "time is greater than time", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTime("101:14:35"), sqltypes.NewTime("13:01:38")}, }, { name: "time is not greater than time", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &F, op: sqlparser.GreaterThanOp, row: []sqltypes.Value{sqltypes.NewTime("24:46:02"), sqltypes.NewTime("101:42:50")}, }, { name: "time is less than time", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewTime("04:30:00"), sqltypes.NewTime("09:23:48")}, }, { name: "time is not less than time", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &F, op: sqlparser.LessThanOp, row: []sqltypes.Value{sqltypes.NewTime("15:21:00"), sqltypes.NewTime("10:00:00")}, }, { name: "time is greater-equal to time (1)", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewTime("10:42:50"), sqltypes.NewTime("10:42:50")}, }, { name: "time is greater-equal to time (2)", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.GreaterEqualOp, row: []sqltypes.Value{sqltypes.NewTime("19:42:50"), sqltypes.NewTime("13:10:02")}, }, { name: "time is less-equal to time (1)", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewTime("10:42:50"), sqltypes.NewTime("10:42:50")}, }, { name: "time is less-equal to time (2)", - v1: newColumn(0, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Time, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.LessEqualOp, row: []sqltypes.Value{sqltypes.NewTime("10:10:02"), sqltypes.NewTime("10:42:50")}, }, @@ -836,13 +830,13 @@ func TestCompareDates(t *testing.T) { tests := []testCase{ { name: "date equal datetime", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewDatetime("2021-10-22 00:00:00")}, }, { name: "date equal datetime through bind variables", - v1: NewBindVar("k1", Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: NewBindVar("k2", Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: NewBindVar("k1", NewType(sqltypes.Date, collations.CollationBinaryID)), v2: NewBindVar("k2", NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, bv: map[string]*querypb.BindVariable{ "k1": {Type: sqltypes.Date, Value: []byte("2021-10-22")}, @@ -851,7 +845,7 @@ func TestCompareDates(t *testing.T) { }, { name: "date not equal datetime through bind variables", - v1: NewBindVar("k1", Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: NewBindVar("k2", Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: NewBindVar("k1", NewType(sqltypes.Date, collations.CollationBinaryID)), v2: NewBindVar("k2", NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, bv: map[string]*querypb.BindVariable{ "k1": {Type: sqltypes.Date, Value: []byte("2021-02-20")}, @@ -860,73 +854,73 @@ func TestCompareDates(t *testing.T) { }, { name: "date not equal datetime", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewDatetime("2021-10-20 00:06:00")}, }, { name: "date equal timestamp", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewTimestamp("2021-10-22 00:00:00")}, }, { name: "date not equal timestamp", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-10-22"), sqltypes.NewTimestamp("2021-10-22 16:00:00")}, }, { name: "date equal time", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &F, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewDate(time.Now().Format("2006-01-02")), sqltypes.NewTime("00:00:00")}, }, { name: "date not equal time", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate(time.Now().Format("2006-01-02")), sqltypes.NewTime("12:00:00")}, }, { name: "string equal datetime", - v1: newColumn(0, Type{Type: sqltypes.VarChar, Coll: collations.CollationUtf8mb4ID}), v2: newColumn(1, Type{Type: sqltypes.Datetime, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)), v2: newColumn(1, NewType(sqltypes.Datetime, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-10-22"), sqltypes.NewDatetime("2021-10-22 00:00:00")}, }, { name: "string equal timestamp", - v1: newColumn(0, Type{Type: sqltypes.VarChar, Coll: collations.CollationUtf8mb4ID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-10-22 00:00:00"), sqltypes.NewTimestamp("2021-10-22 00:00:00")}, }, { name: "string not equal timestamp", - v1: newColumn(0, Type{Type: sqltypes.VarChar, Coll: collations.CollationUtf8mb4ID}), v2: newColumn(1, Type{Type: sqltypes.Timestamp, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)), v2: newColumn(1, NewType(sqltypes.Timestamp, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-10-22 06:00:30"), sqltypes.NewTimestamp("2021-10-20 15:02:10")}, }, { name: "string equal time", - v1: newColumn(0, Type{Type: sqltypes.VarChar, Coll: collations.CollationUtf8mb4ID}), v2: newColumn(1, Type{Type: sqltypes.Time, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)), v2: newColumn(1, NewType(sqltypes.Time, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("00:05:12"), sqltypes.NewTime("00:05:12")}, }, { name: "string equal date", - v1: newColumn(0, Type{Type: sqltypes.VarChar, Coll: collations.CollationUtf8mb4ID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-02-22"), sqltypes.NewDate("2021-02-22")}, }, { name: "string not equal date (1, date on the RHS)", - v1: newColumn(0, Type{Type: sqltypes.VarChar, Coll: collations.CollationUtf8mb4ID}), v2: newColumn(1, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)), v2: newColumn(1, NewType(sqltypes.Date, collations.CollationBinaryID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("2021-02-20"), sqltypes.NewDate("2021-03-30")}, }, { name: "string not equal date (2, date on the LHS)", - v1: newColumn(0, Type{Type: sqltypes.Date, Coll: collations.CollationBinaryID}), v2: newColumn(1, Type{Type: sqltypes.VarChar, Coll: collations.CollationUtf8mb4ID}), + v1: newColumn(0, NewType(sqltypes.Date, collations.CollationBinaryID)), v2: newColumn(1, NewType(sqltypes.VarChar, collations.CollationUtf8mb4ID)), out: &T, op: sqlparser.NotEqualOp, row: []sqltypes.Value{sqltypes.NewDate("2021-03-30"), sqltypes.NewVarChar("2021-02-20")}, }, @@ -944,13 +938,13 @@ func TestCompareStrings(t *testing.T) { tests := []testCase{ { name: "string equal string", - v1: newColumn(0, Type{Type: sqltypes.VarChar, Coll: collations.Default()}), v2: newColumn(1, Type{Type: sqltypes.VarChar, Coll: collations.Default()}), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.MySQL8().DefaultConnectionCharset())), v2: newColumn(1, NewType(sqltypes.VarChar, collations.MySQL8().DefaultConnectionCharset())), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("toto"), sqltypes.NewVarChar("toto")}, }, { name: "string equal number", - v1: newColumn(0, Type{Type: sqltypes.VarChar, Coll: collations.Default()}), v2: newColumn(1, Type{Type: sqltypes.Int64, Coll: collations.CollationBinaryID}), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.MySQL8().DefaultConnectionCharset())), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("1"), sqltypes.NewInt64(1)}, }, @@ -1114,11 +1108,12 @@ func TestNullComparisons(t *testing.T) { } func TestNullsafeCompare(t *testing.T) { - collation := collationEnv.LookupByName("utf8mb4_general_ci") + collation := collations.ID(collations.CollationUtf8mb4ID) tcases := []struct { v1, v2 sqltypes.Value out int err error + values *EnumSetValues }{ { v1: NULL, @@ -1145,23 +1140,60 @@ func TestNullsafeCompare(t *testing.T) { v2: TestValue(sqltypes.VarChar, " 6736380880502626304.000000 aa"), out: -1, }, + { + v1: TestValue(sqltypes.Enum, "foo"), + v2: TestValue(sqltypes.Enum, "bar"), + out: -1, + values: &EnumSetValues{"'foo'", "'bar'"}, + }, { v1: TestValue(sqltypes.Enum, "foo"), v2: TestValue(sqltypes.Enum, "bar"), out: 1, }, + { + v1: TestValue(sqltypes.Enum, "foo"), + v2: TestValue(sqltypes.VarChar, "bar"), + out: 1, + values: &EnumSetValues{"'foo'", "'bar'"}, + }, + { + v1: TestValue(sqltypes.VarChar, "foo"), + v2: TestValue(sqltypes.Enum, "bar"), + out: 1, + }, + { + v1: TestValue(sqltypes.Set, "bar"), + v2: TestValue(sqltypes.Set, "foo,bar"), + out: -1, + values: &EnumSetValues{"'foo'", "'bar'"}, + }, + { + v1: TestValue(sqltypes.Set, "bar"), + v2: TestValue(sqltypes.Set, "foo,bar"), + out: -1, + }, + { + v1: TestValue(sqltypes.VarChar, "bar"), + v2: TestValue(sqltypes.Set, "foo,bar"), + out: -1, + values: &EnumSetValues{"'foo'", "'bar'"}, + }, + { + v1: TestValue(sqltypes.Set, "bar"), + v2: TestValue(sqltypes.VarChar, "foo,bar"), + out: -1, + }, } for _, tcase := range tcases { t.Run(fmt.Sprintf("%v/%v", tcase.v1, tcase.v2), func(t *testing.T) { - got, err := NullsafeCompare(tcase.v1, tcase.v2, collation) + got, err := NullsafeCompare(tcase.v1, tcase.v2, collations.MySQL8(), collation, tcase.values) if tcase.err != nil { require.EqualError(t, err, tcase.err.Error()) return } require.NoError(t, err) - if got != tcase.out { - t.Errorf("NullsafeCompare(%v, %v): %v, want %v", printValue(tcase.v1), printValue(tcase.v2), got, tcase.out) - } + assert.Equal(t, tcase.out, got) }) } } @@ -1242,7 +1274,7 @@ func TestNullsafeCompareCollate(t *testing.T) { } for _, tcase := range tcases { t.Run(fmt.Sprintf("%v/%v", tcase.v1, tcase.v2), func(t *testing.T) { - got, err := NullsafeCompare(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) + got, err := NullsafeCompare(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), collations.MySQL8(), tcase.collation, nil) if tcase.err == nil { require.NoError(t, err) } else { @@ -1293,7 +1325,7 @@ func BenchmarkNullSafeComparison(b *testing.B) { for i := 0; i < b.N; i++ { for _, lhs := range inputs { for _, rhs := range inputs { - _, _ = NullsafeCompare(lhs, rhs, collid) + _, _ = NullsafeCompare(lhs, rhs, collations.MySQL8(), collid, nil) } } } @@ -1323,7 +1355,7 @@ func BenchmarkNullSafeComparison(b *testing.B) { for i := 0; i < b.N; i++ { for _, lhs := range inputs { for _, rhs := range inputs { - _, _ = NullsafeCompare(lhs, rhs, collations.CollationUtf8mb4ID) + _, _ = NullsafeCompare(lhs, rhs, collations.MySQL8(), collations.CollationUtf8mb4ID, nil) } } } @@ -1341,30 +1373,30 @@ func TestCompareSorter(t *testing.T) { Count: 100, Limit: 10, Random: sqltypes.RandomGenerators[sqltypes.Int64], - Cmp: Comparison{{Col: 0, Desc: false, Type: Type{Type: sqltypes.Int64}}}, + Cmp: Comparison{{Col: 0, Desc: false, Type: NewType(sqltypes.Int64, collations.Unknown)}}, }, { Count: 100, Limit: 10, Random: sqltypes.RandomGenerators[sqltypes.Int64], - Cmp: Comparison{{Col: 0, Desc: true, Type: Type{Type: sqltypes.Int64}}}, + Cmp: Comparison{{Col: 0, Desc: true, Type: NewType(sqltypes.Int64, collations.Unknown)}}, }, { Count: 100, Limit: math.MaxInt, Random: sqltypes.RandomGenerators[sqltypes.Int64], - Cmp: Comparison{{Col: 0, Desc: false, Type: Type{Type: sqltypes.Int64}}}, + Cmp: Comparison{{Col: 0, Desc: false, Type: NewType(sqltypes.Int64, collations.Unknown)}}, }, { Count: 100, Limit: math.MaxInt, Random: sqltypes.RandomGenerators[sqltypes.Int64], - Cmp: Comparison{{Col: 0, Desc: true, Type: Type{Type: sqltypes.Int64}}}, + Cmp: Comparison{{Col: 0, Desc: true, Type: NewType(sqltypes.Int64, collations.Unknown)}}, }, } for _, tc := range cases { - t.Run(fmt.Sprintf("%s-%d-%d", tc.Cmp[0].Type.Type, tc.Count, tc.Limit), func(t *testing.T) { + t.Run(fmt.Sprintf("%s-%d-%d", tc.Cmp[0].Type.Type(), tc.Count, tc.Limit), func(t *testing.T) { unsorted := make([]sqltypes.Row, 0, tc.Count) for i := 0; i < tc.Count; i++ { unsorted = append(unsorted, []sqltypes.Value{tc.Random()}) diff --git a/go/vt/vtgate/evalengine/api_hash.go b/go/vt/vtgate/evalengine/api_hash.go index 209f766840d..a5e5d1778dd 100644 --- a/go/vt/vtgate/evalengine/api_hash.go +++ b/go/vt/vtgate/evalengine/api_hash.go @@ -34,8 +34,8 @@ type HashCode = uint64 // NullsafeHashcode returns an int64 hashcode that is guaranteed to be the same // for two values that are considered equal by `NullsafeCompare`. -func NullsafeHashcode(v sqltypes.Value, collation collations.ID, coerceType sqltypes.Type) (HashCode, error) { - e, err := valueToEvalCast(v, coerceType, collation) +func NullsafeHashcode(v sqltypes.Value, collation collations.ID, coerceType sqltypes.Type, sqlmode SQLMode, values *EnumSetValues) (HashCode, error) { + e, err := valueToEvalCast(v, coerceType, collation, values, sqlmode) if err != nil { return 0, err } @@ -75,7 +75,7 @@ var ErrHashCoercionIsNotExact = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, " // for two values that are considered equal by `NullsafeCompare`. // This can be used to avoid having to do comparison checks after a hash, // since we consider the 128 bits of entropy enough to guarantee uniqueness. -func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type) error { +func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type, sqlmode SQLMode, values *EnumSetValues) error { switch { case v.IsNull(), sqltypes.IsNull(coerceTo): hash.Write16(hashPrefixNil) @@ -97,7 +97,7 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat case v.IsText(), v.IsBinary(): f, _ = fastparse.ParseFloat64(v.RawStr()) default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode, values) } if err != nil { return err @@ -137,7 +137,7 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat } neg = i < 0 default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode, values) } if err != nil { return err @@ -180,7 +180,7 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat u, err = uint64(fval), nil } default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode, values) } if err != nil { return err @@ -199,7 +199,7 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat case sqltypes.IsText(coerceTo): coll := colldata.Lookup(collation) if coll == nil { - panic("cannot hash unsupported collation") + return UnsupportedCollationHashError } hash.Write16(hashPrefixBytes) coll.Hash(hash, v.Raw(), 0) @@ -223,20 +223,20 @@ func NullsafeHashcode128(hash *vthash.Hasher, v sqltypes.Value, collation collat fval, _ := fastparse.ParseFloat64(v.RawStr()) dec = decimal.NewFromFloat(fval) default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode, values) } hash.Write16(hashPrefixDecimal) dec.Hash(hash) default: - return nullsafeHashcode128Default(hash, v, collation, coerceTo) + return nullsafeHashcode128Default(hash, v, collation, coerceTo, sqlmode, values) } return nil } -func nullsafeHashcode128Default(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type) error { +func nullsafeHashcode128Default(hash *vthash.Hasher, v sqltypes.Value, collation collations.ID, coerceTo sqltypes.Type, sqlmode SQLMode, values *EnumSetValues) error { // Slow path to handle all other types. This uses the generic // logic for value casting to ensure we match MySQL here. - e, err := valueToEvalCast(v, coerceTo, collation) + e, err := valueToEvalCast(v, coerceTo, collation, values, sqlmode) if err != nil { return err } diff --git a/go/vt/vtgate/evalengine/api_hash_test.go b/go/vt/vtgate/evalengine/api_hash_test.go index 832a1ed3b88..bb2652ec6f2 100644 --- a/go/vt/vtgate/evalengine/api_hash_test.go +++ b/go/vt/vtgate/evalengine/api_hash_test.go @@ -24,13 +24,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vthash" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" ) func TestHashCodes(t *testing.T) { @@ -54,14 +52,14 @@ func TestHashCodes(t *testing.T) { for _, tc := range cases { t.Run(fmt.Sprintf("%v %s %v", tc.static, equality(tc.equal).Operator(), tc.dynamic), func(t *testing.T) { - cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) + cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.MySQL8(), collations.CollationUtf8mb4ID, nil) require.NoError(t, err) require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) - h1, err := NullsafeHashcode(tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) + h1, err := NullsafeHashcode(tc.static, collations.CollationUtf8mb4ID, tc.static.Type(), 0, nil) require.NoError(t, err) - h2, err := NullsafeHashcode(tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) + h2, err := NullsafeHashcode(tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type(), 0, nil) require.ErrorIs(t, err, tc.err) assert.Equalf(t, tc.equal, h1 == h2, "HASH(%v) %s HASH(%v) (expected %s)", tc.static, equality(h1 == h2).Operator(), tc.dynamic, equality(tc.equal)) @@ -74,19 +72,19 @@ func TestHashCodes(t *testing.T) { func TestHashCodesRandom(t *testing.T) { tested := 0 equal := 0 - collation := collations.Local().LookupByName("utf8mb4_general_ci") + collation := collations.MySQL8().LookupByName("utf8mb4_general_ci") endTime := time.Now().Add(1 * time.Second) for time.Now().Before(endTime) { tested++ v1, v2 := sqltypes.TestRandomValues() - cmp, err := NullsafeCompare(v1, v2, collation) + cmp, err := NullsafeCompare(v1, v2, collations.MySQL8(), collation, nil) require.NoErrorf(t, err, "%s compared with %s", v1.String(), v2.String()) typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) - hash1, err := NullsafeHashcode(v1, collation, typ) + hash1, err := NullsafeHashcode(v1, collation, typ, 0, nil) require.NoError(t, err) - hash2, err := NullsafeHashcode(v2, collation, typ) + hash2, err := NullsafeHashcode(v2, collation, typ, 0, nil) require.NoError(t, err) if cmp == 0 { equal++ @@ -139,16 +137,16 @@ func TestHashCodes128(t *testing.T) { for _, tc := range cases { t.Run(fmt.Sprintf("%v %s %v", tc.static, equality(tc.equal).Operator(), tc.dynamic), func(t *testing.T) { - cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) + cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.MySQL8(), collations.CollationUtf8mb4ID, nil) require.NoError(t, err) require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) hasher1 := vthash.New() - err = NullsafeHashcode128(&hasher1, tc.static, collations.CollationUtf8mb4ID, tc.static.Type()) + err = NullsafeHashcode128(&hasher1, tc.static, collations.CollationUtf8mb4ID, tc.static.Type(), 0, nil) require.NoError(t, err) hasher2 := vthash.New() - err = NullsafeHashcode128(&hasher2, tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type()) + err = NullsafeHashcode128(&hasher2, tc.dynamic, collations.CollationUtf8mb4ID, tc.static.Type(), 0, nil) require.ErrorIs(t, err, tc.err) h1 := hasher1.Sum128() @@ -163,21 +161,21 @@ func TestHashCodes128(t *testing.T) { func TestHashCodesRandom128(t *testing.T) { tested := 0 equal := 0 - collation := collations.Local().LookupByName("utf8mb4_general_ci") + collation := collations.MySQL8().LookupByName("utf8mb4_general_ci") endTime := time.Now().Add(1 * time.Second) for time.Now().Before(endTime) { tested++ v1, v2 := sqltypes.TestRandomValues() - cmp, err := NullsafeCompare(v1, v2, collation) + cmp, err := NullsafeCompare(v1, v2, collations.MySQL8(), collation, nil) require.NoErrorf(t, err, "%s compared with %s", v1.String(), v2.String()) typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) hasher1 := vthash.New() - err = NullsafeHashcode128(&hasher1, v1, collation, typ) + err = NullsafeHashcode128(&hasher1, v1, collation, typ, 0, nil) require.NoError(t, err) hasher2 := vthash.New() - err = NullsafeHashcode128(&hasher2, v2, collation, typ) + err = NullsafeHashcode128(&hasher2, v2, collation, typ, 0, nil) require.NoError(t, err) if cmp == 0 { equal++ @@ -197,7 +195,7 @@ func coerceTo(v1, v2 sqltypes.Type) (sqltypes.Type, error) { if sqltypes.IsNull(v1) || sqltypes.IsNull(v2) { return sqltypes.Null, nil } - if (sqltypes.IsText(v1) || sqltypes.IsBinary(v1)) && (sqltypes.IsText(v2) || sqltypes.IsBinary(v2)) { + if (sqltypes.IsTextOrBinary(v1)) && (sqltypes.IsTextOrBinary(v2)) { return sqltypes.VarChar, nil } if sqltypes.IsDateOrTime(v1) { @@ -209,7 +207,7 @@ func coerceTo(v1, v2 sqltypes.Type) (sqltypes.Type, error) { if sqltypes.IsNumber(v1) || sqltypes.IsNumber(v2) { switch { - case sqltypes.IsText(v1) || sqltypes.IsBinary(v1) || sqltypes.IsText(v2) || sqltypes.IsBinary(v2): + case sqltypes.IsTextOrBinary(v1) || sqltypes.IsTextOrBinary(v2): return sqltypes.Float64, nil case sqltypes.IsFloat(v2) || v2 == sqltypes.Decimal || sqltypes.IsFloat(v1) || v1 == sqltypes.Decimal: return sqltypes.Float64, nil diff --git a/go/vt/vtgate/evalengine/api_literal.go b/go/vt/vtgate/evalengine/api_literal.go index f12988233e8..16897650362 100644 --- a/go/vt/vtgate/evalengine/api_literal.go +++ b/go/vt/vtgate/evalengine/api_literal.go @@ -203,8 +203,8 @@ func NewLiteralBinaryFromBit(val []byte) (*Literal, error) { func NewBindVar(key string, typ Type) *BindVariable { return &BindVariable{ Key: key, - Type: typ.Type, - Collation: typ.Coll, + Type: typ.Type(), + Collation: typ.Collation(), dynamicTypeOffset: -1, } } @@ -222,9 +222,13 @@ func NewBindVarTuple(key string, coll collations.ID) *BindVariable { func NewColumn(offset int, typ Type, original sqlparser.Expr) *Column { return &Column{ Offset: offset, - Type: typ.Type, - Collation: typedCoercionCollation(typ.Type, typ.Coll), + Type: typ.Type(), + Size: typ.size, + Scale: typ.scale, + Collation: typedCoercionCollation(typ.Type(), typ.Collation()), Original: original, + Nullable: typ.nullable, + Values: typ.values, dynamicTypeOffset: -1, } } diff --git a/go/vt/vtgate/evalengine/api_type_aggregation.go b/go/vt/vtgate/evalengine/api_type_aggregation.go index 5ab1d2e5338..45c0377bca4 100644 --- a/go/vt/vtgate/evalengine/api_type_aggregation.go +++ b/go/vt/vtgate/evalengine/api_type_aggregation.go @@ -16,7 +16,11 @@ limitations under the License. package evalengine -import "vitess.io/vitess/go/sqltypes" +import ( + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/proto/query" +) type typeAggregation struct { double uint16 @@ -42,37 +46,92 @@ type typeAggregation struct { geometry uint16 blob uint16 total uint16 + + nullable bool + scale, size int32 } -func AggregateTypes(types []sqltypes.Type) sqltypes.Type { - var typeAgg typeAggregation - for _, typ := range types { - var flag typeFlag - if typ == sqltypes.HexVal || typ == sqltypes.HexNum { - typ = sqltypes.Binary - flag = flagHex - } - typeAgg.add(typ, flag) +type TypeAggregator struct { + types typeAggregation + collations collationAggregation + size, scale int32 + invalid int32 +} + +func (ta *TypeAggregator) Add(typ Type, env *collations.Environment) error { + if !typ.Valid() { + ta.invalid++ + return nil } - return typeAgg.result() + + ta.types.addNullable(typ.typ, typ.nullable, typ.size, typ.scale) + if err := ta.collations.add(typedCoercionCollation(typ.typ, typ.collation), env); err != nil { + return err + } + ta.size = max(typ.size, ta.size) + ta.scale = max(typ.scale, ta.scale) + return nil +} + +func (ta *TypeAggregator) AddField(f *query.Field, env *collations.Environment) error { + return ta.Add(NewTypeFromField(f), env) +} + +func (ta *TypeAggregator) Type() Type { + if ta.invalid > 0 || ta.types.empty() { + return Type{} + } + return NewTypeEx(ta.types.result(), ta.collations.result().Collation, ta.types.nullable, ta.size, ta.scale, nil) +} + +func (ta *TypeAggregator) Field(name string) *query.Field { + typ := ta.Type() + return typ.ToField(name) +} + +func (ta *typeAggregation) empty() bool { + return ta.total == 0 } func (ta *typeAggregation) addEval(e eval) { var t sqltypes.Type var f typeFlag + var size, scale int32 switch e := e.(type) { case nil: t = sqltypes.Null + ta.nullable = true case *evalBytes: t = sqltypes.Type(e.tt) f = e.flag + size = e.Size() + scale = e.Scale() default: t = e.SQLType() + size = e.Size() + scale = e.Scale() } - ta.add(t, f) + ta.add(t, f, size, scale) } -func (ta *typeAggregation) add(tt sqltypes.Type, f typeFlag) { +func (ta *typeAggregation) addNullable(typ sqltypes.Type, nullable bool, size, scale int32) { + var flag typeFlag + if typ == sqltypes.HexVal || typ == sqltypes.HexNum { + typ = sqltypes.Binary + flag |= flagHex + } + if nullable { + flag |= flagNullable + } + ta.add(typ, flag, size, scale) +} + +func (ta *typeAggregation) add(tt sqltypes.Type, f typeFlag, size, scale int32) { + if f&flagNullable != 0 { + ta.nullable = true + } + ta.size = max(ta.size, size) + ta.scale = max(ta.scale, scale) switch tt { case sqltypes.Float32, sqltypes.Float64: ta.double++ @@ -114,7 +173,7 @@ func (ta *typeAggregation) add(tt sqltypes.Type, f typeFlag) { ta.timestamp++ case sqltypes.Geometry: ta.geometry++ - case sqltypes.Blob: + case sqltypes.Blob, sqltypes.Text: ta.blob++ default: return @@ -122,6 +181,23 @@ func (ta *typeAggregation) add(tt sqltypes.Type, f typeFlag) { ta.total++ } +func nextSignedTypeForUnsigned(t sqltypes.Type) sqltypes.Type { + switch t { + case sqltypes.Uint8: + return sqltypes.Int16 + case sqltypes.Uint16: + return sqltypes.Int24 + case sqltypes.Uint24: + return sqltypes.Int32 + case sqltypes.Uint32: + return sqltypes.Int64 + case sqltypes.Uint64: + return sqltypes.Decimal + default: + panic("bad unsigned integer type") + } +} + func (ta *typeAggregation) result() sqltypes.Type { /* If all types are numeric, the aggregated type is also numeric: @@ -141,7 +217,7 @@ func (ta *typeAggregation) result() sqltypes.Type { If all temporal types are DATE, TIME, or TIMESTAMP, the result is DATE, TIME, or TIMESTAMP, respectively. Otherwise, for a mix of temporal types, the result is DATETIME. If all types are GEOMETRY, the result is GEOMETRY. - If any type is BLOB, the result is BLOB. + If any type is BLOB, the result is BLOB. This also applies to TEXT. For all other type combinations, the result is VARCHAR. Literal NULL operands are ignored for type aggregation. */ @@ -175,11 +251,14 @@ func (ta *typeAggregation) result() sqltypes.Type { if ta.unsigned == ta.total { return ta.unsignedMax } - if ta.unsignedMax == sqltypes.Uint64 && ta.signed > 0 { - return sqltypes.Decimal + if ta.signed == 0 { + panic("bad type aggregation for signed/unsigned types") + } + agtype := nextSignedTypeForUnsigned(ta.unsignedMax) + if sqltypes.IsSigned(agtype) { + return max(agtype, ta.signedMax) } - // TODO - return sqltypes.Uint64 + return agtype } if ta.char == ta.total { diff --git a/go/vt/vtgate/evalengine/api_type_aggregation_test.go b/go/vt/vtgate/evalengine/api_type_aggregation_test.go new file mode 100644 index 00000000000..dee24c18e06 --- /dev/null +++ b/go/vt/vtgate/evalengine/api_type_aggregation_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" +) + +func TestEvalengineTypeAggregations(t *testing.T) { + aggregationCases := []struct { + types []sqltypes.Type + result sqltypes.Type + }{ + {[]sqltypes.Type{sqltypes.Int64, sqltypes.Int32, sqltypes.Float64}, sqltypes.Float64}, + {[]sqltypes.Type{sqltypes.Int64, sqltypes.Decimal, sqltypes.Float64}, sqltypes.Float64}, + {[]sqltypes.Type{sqltypes.Int64, sqltypes.Int32, sqltypes.Decimal}, sqltypes.Decimal}, + {[]sqltypes.Type{sqltypes.Int64, sqltypes.Int32, sqltypes.Int64}, sqltypes.Int64}, + {[]sqltypes.Type{sqltypes.Int32, sqltypes.Int16, sqltypes.Int8}, sqltypes.Int32}, + {[]sqltypes.Type{sqltypes.Int32, sqltypes.Uint16, sqltypes.Uint8}, sqltypes.Int32}, + {[]sqltypes.Type{sqltypes.Int32, sqltypes.Uint16, sqltypes.Uint32}, sqltypes.Int64}, + {[]sqltypes.Type{sqltypes.Int32, sqltypes.Uint16, sqltypes.Uint64}, sqltypes.Decimal}, + {[]sqltypes.Type{sqltypes.Bit, sqltypes.Bit, sqltypes.Bit}, sqltypes.Bit}, + {[]sqltypes.Type{sqltypes.Bit, sqltypes.Int32, sqltypes.Float64}, sqltypes.Float64}, + {[]sqltypes.Type{sqltypes.Bit, sqltypes.Decimal, sqltypes.Float64}, sqltypes.Float64}, + {[]sqltypes.Type{sqltypes.Bit, sqltypes.Int32, sqltypes.Decimal}, sqltypes.Decimal}, + {[]sqltypes.Type{sqltypes.Bit, sqltypes.Int32, sqltypes.Int64}, sqltypes.Int64}, + {[]sqltypes.Type{sqltypes.Char, sqltypes.VarChar}, sqltypes.VarChar}, + {[]sqltypes.Type{sqltypes.Char, sqltypes.Char}, sqltypes.VarChar}, + {[]sqltypes.Type{sqltypes.Char, sqltypes.VarChar, sqltypes.VarBinary}, sqltypes.VarBinary}, + {[]sqltypes.Type{sqltypes.Char, sqltypes.Char, sqltypes.Set, sqltypes.Enum}, sqltypes.VarChar}, + {[]sqltypes.Type{sqltypes.TypeJSON, sqltypes.TypeJSON}, sqltypes.TypeJSON}, + {[]sqltypes.Type{sqltypes.Geometry, sqltypes.Geometry}, sqltypes.Geometry}, + {[]sqltypes.Type{sqltypes.Text, sqltypes.Text}, sqltypes.Blob}, + {[]sqltypes.Type{sqltypes.Blob, sqltypes.Blob}, sqltypes.Blob}, + } + collEnv := collations.MySQL8() + for i, tc := range aggregationCases { + t.Run(fmt.Sprintf("%d.%v", i, tc.result), func(t *testing.T) { + var typer TypeAggregator + + for _, tt := range tc.types { + // this test only aggregates binary collations because textual collation + // aggregation is tested in the `mysql/collations` package + + err := typer.Add(NewType(tt, collations.CollationBinaryID), collEnv) + require.NoError(t, err) + } + + res := typer.Type() + require.Equalf(t, tc.result, res.Type(), "expected aggregate(%v) = %v, got %v", tc.types, tc.result, res.Type()) + }) + } +} diff --git a/go/vt/vtgate/evalengine/arena.go b/go/vt/vtgate/evalengine/arena.go index 590dc3b02c7..ccfe63f514f 100644 --- a/go/vt/vtgate/evalengine/arena.go +++ b/go/vt/vtgate/evalengine/arena.go @@ -32,6 +32,8 @@ type Arena struct { aFloat64 []evalFloat aDecimal []evalDecimal aBytes []evalBytes + aEnum []evalEnum + aSet []evalSet } func (a *Arena) reset() { @@ -40,6 +42,8 @@ func (a *Arena) reset() { a.aFloat64 = a.aFloat64[:0] a.aDecimal = a.aDecimal[:0] a.aBytes = a.aBytes[:0] + a.aEnum = a.aEnum[:0] + a.aSet = a.aSet[:0] } func (a *Arena) newEvalDecimalWithPrec(dec decimal.Decimal, prec int32) *evalDecimal { @@ -61,6 +65,32 @@ func (a *Arena) newEvalDecimal(dec decimal.Decimal, m, d int32) *evalDecimal { return a.newEvalDecimalWithPrec(dec.Clamp(m-d, d), d) } +func (a *Arena) newEvalEnum(raw []byte, values *EnumSetValues) *evalEnum { + if cap(a.aEnum) > len(a.aEnum) { + a.aEnum = a.aEnum[:len(a.aEnum)+1] + } else { + a.aEnum = append(a.aEnum, evalEnum{}) + } + val := &a.aEnum[len(a.aInt64)-1] + s := string(raw) + val.string = s + val.value = valueIdx(values, s) + return val +} + +func (a *Arena) newEvalSet(raw []byte, values *EnumSetValues) *evalSet { + if cap(a.aSet) > len(a.aSet) { + a.aSet = a.aSet[:len(a.aSet)+1] + } else { + a.aSet = append(a.aSet, evalSet{}) + } + val := &a.aSet[len(a.aInt64)-1] + s := string(raw) + val.string = s + val.set = evalSetBits(values, s) + return val +} + func (a *Arena) newEvalBool(b bool) *evalInt64 { if b { return a.newEvalInt64(1) diff --git a/go/vt/vtgate/evalengine/arithmetic.go b/go/vt/vtgate/evalengine/arithmetic.go index c258dab1672..031b387d275 100644 --- a/go/vt/vtgate/evalengine/arithmetic.go +++ b/go/vt/vtgate/evalengine/arithmetic.go @@ -25,7 +25,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" ) -func dataOutOfRangeError[N1, N2 int | int64 | uint64 | float64](v1 N1, v2 N2, typ, sign string) error { +func dataOutOfRangeError[N1, N2 int64 | uint64 | float64](v1 N1, v2 N2, typ, sign string) error { return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "%s value is out of range in '(%v %s %v)'", typ, v1, sign, v2) } diff --git a/go/vt/vtgate/evalengine/cached_size.go b/go/vt/vtgate/evalengine/cached_size.go index d227af1a237..65f0bd37d12 100644 --- a/go/vt/vtgate/evalengine/cached_size.go +++ b/go/vt/vtgate/evalengine/cached_size.go @@ -145,10 +145,12 @@ func (cached *CollateExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field UnaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.UnaryExpr size += cached.UnaryExpr.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *Column) CachedSize(alloc bool) int64 { @@ -157,12 +159,20 @@ func (cached *Column) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(48) + size += int64(64) } // field Original vitess.io/vitess/go/vt/sqlparser.Expr if cc, ok := cached.Original.(cachedObject); ok { size += cc.CachedSize(true) } + // field Values *vitess.io/vitess/go/vt/vtgate/evalengine.EnumSetValues + if cached.Values != nil { + size += int64(24) + size += hack.RuntimeAllocSize(int64(cap(*cached.Values)) * int64(16)) + for _, elem := range *cached.Values { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } return size } func (cached *ComparisonExpr) CachedSize(alloc bool) int64 { @@ -187,12 +197,14 @@ func (cached *CompiledExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(64) + size += int64(80) } // field code []vitess.io/vitess/go/vt/vtgate/evalengine.frame { size += hack.RuntimeAllocSize(int64(cap(cached.code)) * int64(8)) } + // field typed vitess.io/vitess/go/vt/vtgate/evalengine.ctype + size += cached.typed.CachedSize(false) // field ir vitess.io/vitess/go/vt/vtgate/evalengine.IR if cc, ok := cached.ir.(cachedObject); ok { size += cc.CachedSize(true) @@ -211,6 +223,12 @@ func (cached *ConvertExpr) CachedSize(alloc bool) int64 { size += cached.UnaryExpr.CachedSize(false) // field Type string size += hack.RuntimeAllocSize(int64(len(cached.Type))) + // field Length *int + size += hack.RuntimeAllocSize(int64(8)) + // field Scale *int + size += hack.RuntimeAllocSize(int64(8)) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *ConvertUsingExpr) CachedSize(alloc bool) int64 { @@ -219,10 +237,12 @@ func (cached *ConvertUsingExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field UnaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.UnaryExpr size += cached.UnaryExpr.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *InExpr) CachedSize(alloc bool) int64 { @@ -255,10 +275,12 @@ func (cached *IntroducerExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field UnaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.UnaryExpr size += cached.UnaryExpr.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *IsExpr) CachedSize(alloc bool) int64 { @@ -343,6 +365,50 @@ func (cached *NotExpr) CachedSize(alloc bool) int64 { size += cached.UnaryExpr.CachedSize(false) return size } +func (cached *OrderByParams) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(64) + } + // field Type vitess.io/vitess/go/vt/vtgate/evalengine.Type + size += cached.Type.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) + return size +} +func (cached *TupleBindVariable) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Key string + size += hack.RuntimeAllocSize(int64(len(cached.Key))) + return size +} +func (cached *Type) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field values *vitess.io/vitess/go/vt/vtgate/evalengine.EnumSetValues + if cached.values != nil { + size += int64(24) + size += hack.RuntimeAllocSize(int64(cap(*cached.values)) * int64(16)) + for _, elem := range *cached.values { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } + return size +} func (cached *UnaryExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -363,8 +429,10 @@ func (cached *UntypedExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(80) + size += int64(96) } + // field env *vitess.io/vitess/go/vt/vtenv.Environment + size += cached.env.CachedSize(true) // field ir vitess.io/vitess/go/vt/vtgate/evalengine.IR if cc, ok := cached.ir.(cachedObject); ok { size += cc.CachedSize(true) @@ -537,6 +605,18 @@ func (cached *builtinChangeCase) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinChar) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinCharLength) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -765,6 +845,18 @@ func (cached *builtinDegrees) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinElt) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinExp) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -777,6 +869,18 @@ func (cached *builtinExp) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinField) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinFloor) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -801,6 +905,18 @@ func (cached *builtinFromBase64) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinFromDays) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinFromUnixtime) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -885,6 +1001,18 @@ func (cached *builtinInetNtoa) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinInsert) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinIsIPV4) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1041,6 +1169,18 @@ func (cached *builtinJSONUnquote) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinLastDay) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinLeftRight) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1077,6 +1217,18 @@ func (cached *builtinLn) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinLocate) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinLog) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1365,6 +1517,30 @@ func (cached *builtinRepeat) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinReplace) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinReverse) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinRound) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1401,6 +1577,18 @@ func (cached *builtinSHA2) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinSecToTime) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinSecond) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1437,6 +1625,18 @@ func (cached *builtinSin) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinSpace) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinSqrt) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1461,6 +1661,18 @@ func (cached *builtinStrcmp) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinSubstring) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinSysdate) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1497,6 +1709,18 @@ func (cached *builtinTime) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinTimeToSec) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinToBase64) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1509,6 +1733,30 @@ func (cached *builtinToBase64) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *builtinToDays) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} +func (cached *builtinToSeconds) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr + size += cached.CallExpr.CachedSize(false) + return size +} func (cached *builtinTrim) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1659,12 +1907,14 @@ func (cached *builtinWeightString) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(80) + size += int64(64) } // field CallExpr vitess.io/vitess/go/vt/vtgate/evalengine.CallExpr size += cached.CallExpr.CachedSize(false) // field Cast string size += hack.RuntimeAllocSize(int64(len(cached.Cast))) + // field Len *int + size += hack.RuntimeAllocSize(int64(8)) return size } func (cached *builtinYear) CachedSize(alloc bool) int64 { @@ -1691,6 +1941,24 @@ func (cached *builtinYearWeek) CachedSize(alloc bool) int64 { size += cached.CallExpr.CachedSize(false) return size } +func (cached *ctype) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(32) + } + // field Values *vitess.io/vitess/go/vt/vtgate/evalengine.EnumSetValues + if cached.Values != nil { + size += int64(24) + size += hack.RuntimeAllocSize(int64(cap(*cached.Values)) * int64(16)) + for _, elem := range *cached.Values { + size += hack.RuntimeAllocSize(int64(len(elem))) + } + } + return size +} func (cached *evalBytes) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1711,12 +1979,24 @@ func (cached *evalDecimal) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(16) + size += int64(24) } // field dec vitess.io/vitess/go/mysql/decimal.Decimal size += cached.dec.CachedSize(false) return size } +func (cached *evalEnum) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field string string + size += hack.RuntimeAllocSize(int64(len(cached.string))) + return size +} func (cached *evalFloat) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1737,6 +2017,18 @@ func (cached *evalInt64) CachedSize(alloc bool) int64 { } return size } +func (cached *evalSet) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(24) + } + // field string string + size += hack.RuntimeAllocSize(int64(len(cached.string))) + return size +} func (cached *evalTemporal) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -1786,7 +2078,10 @@ func (cached *typedExpr) CachedSize(alloc bool) int64 { } // field types []vitess.io/vitess/go/vt/vtgate/evalengine.ctype { - size += hack.RuntimeAllocSize(int64(cap(cached.types)) * int64(10)) + size += hack.RuntimeAllocSize(int64(cap(cached.types)) * int64(32)) + for _, elem := range cached.types { + size += elem.CachedSize(false) + } } // field compiled *vitess.io/vitess/go/vt/vtgate/evalengine.CompiledExpr size += cached.compiled.CachedSize(true) diff --git a/go/vt/vtgate/evalengine/casting_test.go b/go/vt/vtgate/evalengine/casting_test.go index 93c04d74539..1d75a9b24ab 100644 --- a/go/vt/vtgate/evalengine/casting_test.go +++ b/go/vt/vtgate/evalengine/casting_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" ) func TestEvalResultToBooleanStrict(t *testing.T) { @@ -43,7 +45,7 @@ func TestEvalResultToBooleanStrict(t *testing.T) { for _, res := range trueValues { name := evalToSQLValue(res).String() t.Run(fmt.Sprintf("ToBooleanStrict() %s expected true (success)", name), func(t *testing.T) { - result, err := (&EvalResult{res}).ToBooleanStrict() + result, err := (&EvalResult{v: res, collationEnv: collations.MySQL8()}).ToBooleanStrict() require.NoError(t, err, name) require.Equal(t, true, result, name) }) @@ -51,7 +53,7 @@ func TestEvalResultToBooleanStrict(t *testing.T) { for _, res := range falseValues { name := evalToSQLValue(res).String() t.Run(fmt.Sprintf("ToBooleanStrict() %s expected false (success)", name), func(t *testing.T) { - result, err := (&EvalResult{res}).ToBooleanStrict() + result, err := (&EvalResult{v: res, collationEnv: collations.MySQL8()}).ToBooleanStrict() require.NoError(t, err, name) require.Equal(t, false, result, name) }) @@ -59,7 +61,7 @@ func TestEvalResultToBooleanStrict(t *testing.T) { for _, res := range invalid { name := evalToSQLValue(res).String() t.Run(fmt.Sprintf("ToBooleanStrict() %s expected fail", name), func(t *testing.T) { - _, err := (&EvalResult{res}).ToBooleanStrict() + _, err := (&EvalResult{v: res, collationEnv: collations.MySQL8()}).ToBooleanStrict() require.Error(t, err) }) } diff --git a/go/vt/vtgate/evalengine/collation.go b/go/vt/vtgate/evalengine/collation.go index 7cb341f52b0..c0feca87556 100644 --- a/go/vt/vtgate/evalengine/collation.go +++ b/go/vt/vtgate/evalengine/collation.go @@ -54,13 +54,13 @@ func evalCollation(e eval) collations.TypedCollation { } } -func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type) (collations.TypedCollation, colldata.Coercion, colldata.Coercion, error) { +func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type, env *collations.Environment) (collations.TypedCollation, colldata.Coercion, colldata.Coercion, error) { if c1.Collation == c2.Collation { return c1, nil, nil, nil } - lt := sqltypes.IsText(t1) || sqltypes.IsBinary(t1) - rt := sqltypes.IsText(t2) || sqltypes.IsBinary(t2) + lt := sqltypes.IsTextOrBinary(t1) + rt := sqltypes.IsTextOrBinary(t2) if !lt || !rt { if lt { return c1, nil, nil, nil @@ -71,18 +71,17 @@ func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type) (co return collationBinary, nil, nil, nil } - env := collations.Local() return colldata.Merge(env, c1, c2, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) } -func mergeAndCoerceCollations(left, right eval) (eval, eval, collations.TypedCollation, error) { +func mergeAndCoerceCollations(left, right eval, env *collations.Environment) (eval, eval, collations.TypedCollation, error) { lt := left.SQLType() rt := right.SQLType() - mc, coerceLeft, coerceRight, err := mergeCollations(evalCollation(left), evalCollation(right), lt, rt) + mc, coerceLeft, coerceRight, err := mergeCollations(evalCollation(left), evalCollation(right), lt, rt, env) if err != nil { return nil, nil, collations.TypedCollation{}, err } @@ -112,7 +111,7 @@ type collationAggregation struct { cur collations.TypedCollation } -func (ca *collationAggregation) add(env *collations.Environment, tc collations.TypedCollation) error { +func (ca *collationAggregation) add(tc collations.TypedCollation, env *collations.Environment) error { if ca.cur.Collation == collations.Unknown { ca.cur = tc } else { diff --git a/go/vt/vtgate/evalengine/compare.go b/go/vt/vtgate/evalengine/compare.go index aa452c61729..836ca7c5043 100644 --- a/go/vt/vtgate/evalengine/compare.go +++ b/go/vt/vtgate/evalengine/compare.go @@ -18,7 +18,9 @@ package evalengine import ( "bytes" + "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/mysql/json" @@ -121,6 +123,42 @@ func compareDates(l, r *evalTemporal) int { return l.dt.Compare(r.dt) } +func compareEnums(l, r *evalEnum) int { + if l.value == -1 || r.value == -1 { + // If the values are equal normally the strings + // are equal too. In case we didn't find the proper + // value in the enum we return the string comparison. + // This is not always correct, but a best effort and still + // works for the cases where we only care about + // equality. + return strings.Compare(l.string, r.string) + } + if l.value == r.value { + return 0 + } + if l.value < r.value { + return -1 + } + return 1 +} + +func compareSets(l, r *evalSet) int { + if l.set == r.set { + if l.set == 0 && (len(l.string) != 0 || len(r.string) != 0) { + // In this case we didn't have the proper values passed + // in when creating the evalSet. We can't compare the set + // values then, but fall back to string comparison to at + // least compare something and to handle equality checks. + return strings.Compare(l.string, r.string) + } + return 0 + } + if l.set < r.set { + return -1 + } + return 1 +} + func compareDateAndString(l, r eval) int { if tt, ok := l.(*evalTemporal); ok { return tt.dt.Compare(r.(*evalBytes).toDateBestEffort()) @@ -133,8 +171,8 @@ func compareDateAndString(l, r eval) int { // More on string collations coercibility on MySQL documentation: // - https://dev.mysql.com/doc/refman/8.0/en/charset-collation-coercibility.html -func compareStrings(l, r eval) (int, error) { - l, r, col, err := mergeAndCoerceCollations(l, r) +func compareStrings(l, r eval, env *collations.Environment) (int, error) { + l, r, col, err := mergeAndCoerceCollations(l, r, env) if err != nil { return 0, err } diff --git a/go/vt/vtgate/evalengine/compiler.go b/go/vt/vtgate/evalengine/compiler.go index 21a25ad3163..d9de15aa571 100644 --- a/go/vt/vtgate/evalengine/compiler.go +++ b/go/vt/vtgate/evalengine/compiler.go @@ -17,13 +17,17 @@ limitations under the License. package evalengine import ( + "slices" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" ) @@ -33,6 +37,8 @@ type compiler struct { collation collations.ID dynamicTypes []ctype asm assembler + sqlmode SQLMode + env *vtenv.Environment } type CompilerLog interface { @@ -46,31 +52,145 @@ type compiledCoercion struct { right colldata.Coercion } +type EnumSetValues []string + type ctype struct { - Type sqltypes.Type - Flag typeFlag - Col collations.TypedCollation + Type sqltypes.Type + Flag typeFlag + Size, Scale int32 + Col collations.TypedCollation + Values *EnumSetValues } type Type struct { - Type sqltypes.Type - Coll collations.ID - Nullable bool + typ sqltypes.Type + collation collations.ID + nullable bool + init bool + size, scale int32 + values *EnumSetValues +} + +func (v *EnumSetValues) Equal(other *EnumSetValues) bool { + if v == nil && other == nil { + return true + } + if v == nil || other == nil { + return false + } + return slices.Equal(*v, *other) +} + +func NewType(t sqltypes.Type, collation collations.ID) Type { + // New types default to being nullable + return NewTypeEx(t, collation, true, 0, 0, nil) } -func UnknownType() Type { - return Type{Type: sqltypes.Unknown, Coll: collations.Unknown} +func NewTypeEx(t sqltypes.Type, collation collations.ID, nullable bool, size, scale int32, values *EnumSetValues) Type { + return Type{ + typ: t, + collation: collation, + nullable: nullable, + init: true, + size: size, + scale: scale, + values: values, + } } -func (ct ctype) nullable() bool { +func NewTypeFromField(f *querypb.Field) Type { + return Type{ + typ: f.Type, + collation: collations.ID(f.Charset), + nullable: f.Flags&uint32(querypb.MySqlFlag_NOT_NULL_FLAG) == 0, + init: true, + size: int32(f.ColumnLength), + scale: int32(f.Decimals), + } +} + +func (t *Type) ToField(name string) *querypb.Field { + // need to get the proper flags for the type; usually leaving flags + // to 0 is OK, because Vitess' MySQL client will generate the right + // ones for the column's type, but here we're also setting the NotNull + // flag, so it needs to be set with the full flags for the column + _, flags := sqltypes.TypeToMySQL(t.typ) + if !t.nullable { + flags |= int64(querypb.MySqlFlag_NOT_NULL_FLAG) + } + + f := &querypb.Field{ + Name: name, + Type: t.typ, + Charset: uint32(t.collation), + ColumnLength: uint32(t.size), + Decimals: uint32(t.scale), + Flags: uint32(flags), + } + return f +} + +func (t *Type) Type() sqltypes.Type { + if t.init { + return t.typ + } + return sqltypes.Unknown +} + +func (t *Type) Collation() collations.ID { + return t.collation +} + +func (t *Type) Size() int32 { + return t.size +} + +func (t *Type) Scale() int32 { + return t.scale +} + +func (t *Type) Nullable() bool { + if t.init { + return t.nullable + } + return true // nullable by default for unknown types +} + +func (t *Type) Values() *EnumSetValues { + return t.values +} + +func (t *Type) Valid() bool { + return t.init +} + +func (t *Type) Equal(other *Type) bool { + return t.typ == other.typ && + t.collation == other.collation && + t.nullable == other.nullable && + t.size == other.size && + t.scale == other.scale && + t.values.Equal(other.values) +} + +func (ct *ctype) equal(other ctype) bool { + return ct.Type == other.Type && + ct.Flag == other.Flag && + ct.Size == other.Size && + ct.Scale == other.Scale && + ct.Col == other.Col && + ct.Values.Equal(other.Values) +} + +func (ct *ctype) nullable() bool { return ct.Flag&flagNullable != 0 } -func (ct ctype) isTextual() bool { - return sqltypes.IsText(ct.Type) || sqltypes.IsBinary(ct.Type) +func (ct *ctype) isTextual() bool { + return sqltypes.IsTextOrBinary(ct.Type) } -func (ct ctype) isHexOrBitLiteral() bool { +func (ct *ctype) isHexOrBitLiteral() bool { return ct.Flag&flagBit != 0 || ct.Flag&flagHex != 0 } @@ -102,36 +222,40 @@ func (c *compiler) compileToNumeric(ct ctype, offset int, fallback sqltypes.Type if ct.Type == sqltypes.VarBinary { if (ct.Flag & flagHex) != 0 { c.asm.Convert_hex(offset) - return ctype{sqltypes.Uint64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Uint64, Flag: ct.Flag, Col: collationNumeric} } if (ct.Flag & flagBit) != 0 { c.asm.Convert_bit(offset) - return ctype{sqltypes.Int64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Int64, Flag: ct.Flag, Col: collationNumeric} } } if sqltypes.IsDateOrTime(ct.Type) { if preciseDatetime { - c.asm.Convert_Ti(offset) - return ctype{sqltypes.Int64, ct.Flag, collationNumeric} + if ct.Size == 0 { + c.asm.Convert_Ti(offset) + return ctype{Type: sqltypes.Int64, Flag: ct.Flag, Col: collationNumeric} + } + c.asm.Convert_Td(offset) + return ctype{Type: sqltypes.Decimal, Flag: ct.Flag, Col: collationNumeric, Size: ct.Size + decimalSizeBase, Scale: ct.Size} } c.asm.Convert_Tf(offset) - return ctype{sqltypes.Float64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Float64, Flag: ct.Flag, Col: collationNumeric} } switch fallback { case sqltypes.Int64: c.asm.Convert_xi(offset) - return ctype{sqltypes.Int64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Int64, Flag: ct.Flag, Col: collationNumeric} case sqltypes.Uint64: c.asm.Convert_xu(offset) - return ctype{sqltypes.Uint64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Uint64, Flag: ct.Flag, Col: collationNumeric} case sqltypes.Decimal: c.asm.Convert_xd(offset, 0, 0) - return ctype{sqltypes.Decimal, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Decimal, Flag: ct.Flag, Col: collationNumeric} } c.asm.Convert_xf(offset) - return ctype{sqltypes.Float64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Float64, Flag: ct.Flag, Col: collationNumeric} } func (c *compiler) compileToInt64(ct ctype, offset int) ctype { @@ -144,7 +268,7 @@ func (c *compiler) compileToInt64(ct ctype, offset int) ctype { default: c.asm.Convert_xi(offset) } - return ctype{sqltypes.Int64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Int64, Flag: ct.Flag, Col: collationNumeric} } func (c *compiler) compileToUint64(ct ctype, offset int) ctype { @@ -157,7 +281,7 @@ func (c *compiler) compileToUint64(ct ctype, offset int) ctype { default: c.asm.Convert_xu(offset) } - return ctype{sqltypes.Uint64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Uint64, Flag: ct.Flag, Col: collationNumeric} } func (c *compiler) compileToBitwiseUint64(ct ctype, offset int) ctype { @@ -172,7 +296,7 @@ func (c *compiler) compileToBitwiseUint64(ct ctype, offset int) ctype { default: c.asm.Convert_xu(offset) } - return ctype{sqltypes.Uint64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Uint64, Flag: ct.Flag, Col: collationNumeric} } func (c *compiler) compileToFloat(ct ctype, offset int) ctype { @@ -189,22 +313,28 @@ func (c *compiler) compileToFloat(ct ctype, offset int) ctype { default: c.asm.Convert_xf(offset) } - return ctype{sqltypes.Float64, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Float64, Flag: ct.Flag, Col: collationNumeric} } func (c *compiler) compileToDecimal(ct ctype, offset int) ctype { if sqltypes.IsDecimal(ct.Type) { return ct } + var scale int32 + var size int32 switch ct.Type { case sqltypes.Int64: c.asm.Convert_id(offset) case sqltypes.Uint64: c.asm.Convert_ud(offset) + case sqltypes.Datetime, sqltypes.Time: + scale = ct.Size + size = ct.Size + decimalSizeBase + fallthrough default: c.asm.Convert_xd(offset, 0, 0) } - return ctype{sqltypes.Decimal, ct.Flag, collationNumeric} + return ctype{Type: sqltypes.Decimal, Flag: ct.Flag, Col: collationNumeric, Scale: scale, Size: size} } func (c *compiler) compileToDate(doct ctype, offset int) ctype { @@ -212,7 +342,7 @@ func (c *compiler) compileToDate(doct ctype, offset int) ctype { case sqltypes.Date: return doct default: - c.asm.Convert_xD(offset) + c.asm.Convert_xD(offset, c.sqlmode.AllowZeroDate()) } return ctype{Type: sqltypes.Date, Col: collationBinary, Flag: flagNullable} } @@ -223,9 +353,9 @@ func (c *compiler) compileToDateTime(doct ctype, offset, prec int) ctype { c.asm.Convert_tp(offset, prec) return doct default: - c.asm.Convert_xDT(offset, prec) + c.asm.Convert_xDT(offset, prec, c.sqlmode.AllowZeroDate()) } - return ctype{Type: sqltypes.Datetime, Col: collationBinary, Flag: flagNullable} + return ctype{Type: sqltypes.Datetime, Size: int32(prec), Col: collationBinary, Flag: flagNullable} } func (c *compiler) compileToTime(doct ctype, offset, prec int) ctype { @@ -236,7 +366,7 @@ func (c *compiler) compileToTime(doct ctype, offset, prec int) ctype { default: c.asm.Convert_xT(offset, prec) } - return ctype{Type: sqltypes.Time, Col: collationBinary, Flag: flagNullable} + return ctype{Type: sqltypes.Time, Size: int32(prec), Col: collationBinary, Flag: flagNullable} } func (c *compiler) compileNullCheck1(ct ctype) *jump { @@ -275,6 +405,15 @@ func (c *compiler) compileNullCheck3(arg1, arg2, arg3 ctype) *jump { return nil } +func (c *compiler) compileNullCheck4(arg1, arg2, arg3, arg4 ctype) *jump { + if arg1.nullable() || arg2.nullable() || arg3.nullable() || arg4.nullable() { + j := c.asm.jumpFrom() + c.asm.NullCheck4(j) + return j + } + return nil +} + func (c *compiler) compileNullCheckArg(ct ctype, offset int) *jump { if ct.nullable() { j := c.asm.jumpFrom() @@ -369,7 +508,7 @@ func (c *compiler) compareNumericTypes(lt ctype, rt ctype) (swapped bool) { } func (c *compiler) compareAsStrings(lt ctype, rt ctype) error { - merged, coerceLeft, coerceRight, err := mergeCollations(lt.Col, rt.Col, lt.Type, rt.Type) + merged, coerceLeft, coerceRight, err := mergeCollations(lt.Col, rt.Col, lt.Type, rt.Type, c.env.CollationEnv()) if err != nil { return err } @@ -473,7 +612,7 @@ func (c *compiler) compileToJSONKey(key ctype) error { if key.Type == sqltypes.VarBinary { return nil } - c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, 0, false) + c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, nil) return nil } diff --git a/go/vt/vtgate/evalengine/compiler_asm.go b/go/vt/vtgate/evalengine/compiler_asm.go index 6230627c26a..6c8896bb1f4 100644 --- a/go/vt/vtgate/evalengine/compiler_asm.go +++ b/go/vt/vtgate/evalengine/compiler_asm.go @@ -35,6 +35,7 @@ import ( "github.com/google/uuid" + "vitess.io/vitess/go/mysql/collations/charset/types" "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/hack" @@ -50,7 +51,6 @@ import ( "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vthash" ) @@ -288,13 +288,13 @@ func (asm *assembler) BitShiftLeft_bu() { r := env.vm.stack[env.vm.sp-1].(*evalUint64) var ( - bits = int(r.u & 7) - bytes = int(r.u >> 3) - length = len(l.bytes) + bits = int64(r.u & 7) + bytes = int64(r.u >> 3) + length = int64(len(l.bytes)) out = make([]byte, length) ) - for i := 0; i < length; i++ { + for i := int64(0); i < length; i++ { pos := i + bytes + 1 switch { case pos < length: @@ -332,9 +332,9 @@ func (asm *assembler) BitShiftRight_bu() { r := env.vm.stack[env.vm.sp-1].(*evalUint64) var ( - bits = int(r.u & 7) - bytes = int(r.u >> 3) - length = len(l.bytes) + bits = int64(r.u & 7) + bytes = int64(r.u >> 3) + length = int64(len(l.bytes)) out = make([]byte, length) ) @@ -516,7 +516,7 @@ func (asm *assembler) Cmp_ne_n() { }, "CMPFLAG NE [NULL]") } -func (asm *assembler) CmpCase(cases int, hasElse bool, tt sqltypes.Type, cc collations.TypedCollation) { +func (asm *assembler) CmpCase(cases int, hasElse bool, tt sqltypes.Type, size, scale int32, cc collations.TypedCollation, allowZeroDate bool) { elseOffset := 0 if hasElse { elseOffset = 1 @@ -528,13 +528,13 @@ func (asm *assembler) CmpCase(cases int, hasElse bool, tt sqltypes.Type, cc coll asm.emit(func(env *ExpressionEnv) int { end := env.vm.sp - elseOffset for sp := env.vm.sp - stackDepth; sp < end; sp += 2 { - if env.vm.stack[sp].(*evalInt64).i != 0 { - env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[sp+1], tt, cc.Collation, env.now) + if env.vm.stack[sp] != nil && env.vm.stack[sp].(*evalInt64).i != 0 { + env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[sp+1], tt, size, scale, cc.Collation, env.now, allowZeroDate) goto done } } if elseOffset != 0 { - env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[env.vm.sp-1], tt, cc.Collation, env.now) + env.vm.stack[env.vm.sp-stackDepth], env.vm.err = evalCoerce(env.vm.stack[env.vm.sp-1], tt, size, scale, cc.Collation, env.now, allowZeroDate) } else { env.vm.stack[env.vm.sp-stackDepth] = nil } @@ -717,25 +717,25 @@ func (asm *assembler) CmpJSON() { }, "CMP JSON(SP-2), JSON(SP-1)") } -func (asm *assembler) CmpTuple(fullEquality bool) { +func (asm *assembler) CmpTuple(collationEnv *collations.Environment, fullEquality bool) { asm.adjustStack(-2) asm.emit(func(env *ExpressionEnv) int { l := env.vm.stack[env.vm.sp-2].(*evalTuple) r := env.vm.stack[env.vm.sp-1].(*evalTuple) env.vm.sp -= 2 - env.vm.flags.cmp, env.vm.flags.null, env.vm.err = evalCompareMany(l.t, r.t, fullEquality) + env.vm.flags.cmp, env.vm.flags.null, env.vm.err = evalCompareMany(l.t, r.t, fullEquality, collationEnv) return 1 }, "CMP TUPLE(SP-2), TUPLE(SP-1)") } -func (asm *assembler) CmpTupleNullsafe() { +func (asm *assembler) CmpTupleNullsafe(collationsEnv *collations.Environment) { asm.adjustStack(-1) asm.emit(func(env *ExpressionEnv) int { l := env.vm.stack[env.vm.sp-2].(*evalTuple) r := env.vm.stack[env.vm.sp-1].(*evalTuple) var equals int - equals, env.vm.err = evalCompareTuplesNullSafe(l.t, r.t) + equals, env.vm.err = evalCompareTuplesNullSafe(l.t, r.t, collationsEnv) env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalBool(equals == 0) env.vm.sp -= 1 @@ -782,8 +782,8 @@ func (asm *assembler) Convert_bB(offset int) { var f float64 if arg != nil { f, _ = fastparse.ParseFloat64(arg.(*evalBytes).string()) + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(f != 0.0) } - env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(f != 0.0) return 1 }, "CONV VARBINARY(SP-%d), BOOL", offset) } @@ -791,7 +791,9 @@ func (asm *assembler) Convert_bB(offset int) { func (asm *assembler) Convert_TB(offset int) { asm.emit(func(env *ExpressionEnv) int { arg := env.vm.stack[env.vm.sp-offset] - env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && !arg.(*evalTemporal).isZero()) + if arg != nil { + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(!arg.(*evalTemporal).isZero()) + } return 1 }, "CONV SQLTYPES(SP-%d), BOOL", offset) } @@ -839,7 +841,9 @@ func (asm *assembler) Convert_Tj(offset int) { func (asm *assembler) Convert_dB(offset int) { asm.emit(func(env *ExpressionEnv) int { arg := env.vm.stack[env.vm.sp-offset] - env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && !arg.(*evalDecimal).dec.IsZero()) + if arg != nil { + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(!arg.(*evalDecimal).dec.IsZero()) + } return 1 }, "CONV DECIMAL(SP-%d), BOOL", offset) } @@ -859,7 +863,9 @@ func (asm *assembler) Convert_dbit(offset int) { func (asm *assembler) Convert_fB(offset int) { asm.emit(func(env *ExpressionEnv) int { arg := env.vm.stack[env.vm.sp-offset] - env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && arg.(*evalFloat).f != 0.0) + if arg != nil { + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg.(*evalFloat).f != 0.0) + } return 1 }, "CONV FLOAT64(SP-%d), BOOL", offset) } @@ -898,7 +904,7 @@ func (asm *assembler) Convert_Ti(offset int) { asm.emit(func(env *ExpressionEnv) int { v := env.vm.stack[env.vm.sp-offset].(*evalTemporal) if v.prec != 0 { - env.vm.err = errDeoptimize + env.vm.err = vterrors.NewErrorf(vtrpc.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "temporal type with non-zero precision") return 1 } env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalInt64(v.toInt64()) @@ -914,10 +920,24 @@ func (asm *assembler) Convert_Tf(offset int) { }, "CONV SQLTIME(SP-%d), FLOAT64", offset) } +func (asm *assembler) Convert_Td(offset int) { + asm.emit(func(env *ExpressionEnv) int { + v := env.vm.stack[env.vm.sp-offset].(*evalTemporal) + if v.prec == 0 { + env.vm.err = vterrors.NewErrorf(vtrpc.Code_INVALID_ARGUMENT, vterrors.DataOutOfRange, "temporal type with zero precision") + return 1 + } + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalDecimalWithPrec(v.toDecimal(), int32(v.prec)) + return 1 + }, "CONV SQLTIME(SP-%d), DECIMAL", offset) +} + func (asm *assembler) Convert_iB(offset int) { asm.emit(func(env *ExpressionEnv) int { arg := env.vm.stack[env.vm.sp-offset] - env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && arg.(*evalInt64).i != 0) + if arg != nil { + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg.(*evalInt64).i != 0) + } return 1 }, "CONV INT64(SP-%d), BOOL", offset) } @@ -997,7 +1017,9 @@ func (asm *assembler) Convert_Nj(offset int) { func (asm *assembler) Convert_uB(offset int) { asm.emit(func(env *ExpressionEnv) int { arg := env.vm.stack[env.vm.sp-offset] - env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg != nil && arg.(*evalUint64).u != 0) + if arg != nil { + env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalBool(arg.(*evalUint64).u != 0) + } return 1 }, "CONV UINT64(SP-%d), BOOL", offset) } @@ -1026,15 +1048,16 @@ func (asm *assembler) Convert_ui(offset int) { }, "CONV UINT64(SP-%d), INT64", offset) } -func (asm *assembler) Convert_xb(offset int, t sqltypes.Type, length int, hasLength bool) { - if hasLength { +func (asm *assembler) Convert_xb(offset int, t sqltypes.Type, length *int) { + if length != nil { + l := *length asm.emit(func(env *ExpressionEnv) int { arg := evalToBinary(env.vm.stack[env.vm.sp-offset]) - arg.truncateInPlace(length) + arg.truncateInPlace(l) arg.tt = int16(t) env.vm.stack[env.vm.sp-offset] = arg return 1 - }, "CONV (SP-%d), VARBINARY[%d]", offset, length) + }, "CONV (SP-%d), VARBINARY[%d]", offset, l) } else { asm.emit(func(env *ExpressionEnv) int { arg := evalToBinary(env.vm.stack[env.vm.sp-offset]) @@ -1045,19 +1068,20 @@ func (asm *assembler) Convert_xb(offset int, t sqltypes.Type, length int, hasLen } } -func (asm *assembler) Convert_xc(offset int, t sqltypes.Type, collation collations.ID, length int, hasLength bool) { - if hasLength { +func (asm *assembler) Convert_xc(offset int, t sqltypes.Type, collation collations.ID, length *int) { + if length != nil { + l := *length asm.emit(func(env *ExpressionEnv) int { arg, err := evalToVarchar(env.vm.stack[env.vm.sp-offset], collation, true) if err != nil { env.vm.stack[env.vm.sp-offset] = nil } else { - arg.truncateInPlace(length) + arg.truncateInPlace(l) arg.tt = int16(t) env.vm.stack[env.vm.sp-offset] = arg } return 1 - }, "CONV (SP-%d), VARCHAR[%d]", offset, length) + }, "CONV (SP-%d), VARCHAR[%d]", offset, l) } else { asm.emit(func(env *ExpressionEnv) int { arg, err := evalToVarchar(env.vm.stack[env.vm.sp-offset], collation, true) @@ -1116,12 +1140,12 @@ func (asm *assembler) Convert_xu(offset int) { }, "CONV (SP-%d), UINT64", offset) } -func (asm *assembler) Convert_xD(offset int) { +func (asm *assembler) Convert_xD(offset int, allowZero bool) { asm.emit(func(env *ExpressionEnv) int { // Need to explicitly check here or we otherwise // store a nil wrapper in an interface vs. a direct // nil. - d := evalToDate(env.vm.stack[env.vm.sp-offset], env.now) + d := evalToDate(env.vm.stack[env.vm.sp-offset], env.now, allowZero) if d == nil { env.vm.stack[env.vm.sp-offset] = nil } else { @@ -1131,27 +1155,12 @@ func (asm *assembler) Convert_xD(offset int) { }, "CONV (SP-%d), DATE", offset) } -func (asm *assembler) Convert_xD_nz(offset int) { +func (asm *assembler) Convert_xDT(offset, prec int, allowZero bool) { asm.emit(func(env *ExpressionEnv) int { // Need to explicitly check here or we otherwise // store a nil wrapper in an interface vs. a direct // nil. - d := evalToDate(env.vm.stack[env.vm.sp-offset], env.now) - if d == nil || d.isZero() { - env.vm.stack[env.vm.sp-offset] = nil - } else { - env.vm.stack[env.vm.sp-offset] = d - } - return 1 - }, "CONV (SP-%d), DATE(NOZERO)", offset) -} - -func (asm *assembler) Convert_xDT(offset, prec int) { - asm.emit(func(env *ExpressionEnv) int { - // Need to explicitly check here or we otherwise - // store a nil wrapper in an interface vs. a direct - // nil. - dt := evalToDateTime(env.vm.stack[env.vm.sp-offset], prec, env.now) + dt := evalToDateTime(env.vm.stack[env.vm.sp-offset], prec, env.now, allowZero) if dt == nil { env.vm.stack[env.vm.sp-offset] = nil } else { @@ -1161,21 +1170,6 @@ func (asm *assembler) Convert_xDT(offset, prec int) { }, "CONV (SP-%d), DATETIME", offset) } -func (asm *assembler) Convert_xDT_nz(offset, prec int) { - asm.emit(func(env *ExpressionEnv) int { - // Need to explicitly check here or we otherwise - // store a nil wrapper in an interface vs. a direct - // nil. - dt := evalToDateTime(env.vm.stack[env.vm.sp-offset], prec, env.now) - if dt == nil || dt.isZero() { - env.vm.stack[env.vm.sp-offset] = nil - } else { - env.vm.stack[env.vm.sp-offset] = dt - } - return 1 - }, "CONV (SP-%d), DATETIME(NOZERO)", offset) -} - func (asm *assembler) Convert_xT(offset, prec int) { asm.emit(func(env *ExpressionEnv) int { t := evalToTime(env.vm.stack[env.vm.sp-offset], prec) @@ -1438,6 +1432,29 @@ func (asm *assembler) Fn_ASCII() { }, "FN ASCII VARCHAR(SP-1)") } +func (asm *assembler) Fn_REVERSE() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalBytes) + + arg.tt = int16(sqltypes.VarChar) + arg.bytes = reverse(arg) + return 1 + }, "FN REVERSE VARCHAR(SP-1)") +} + +func (asm *assembler) Fn_SPACE(col collations.TypedCollation) { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalInt64).i + + if !validMaxLength(1, arg) { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText(space(arg), col) + return 1 + }, "FN SPACE INT64(SP-1)") +} + func (asm *assembler) Fn_ORD(col collations.ID) { asm.emit(func(env *ExpressionEnv) int { arg := env.vm.stack[env.vm.sp-1].(*evalBytes) @@ -1993,7 +2010,7 @@ func (asm *assembler) Fn_CONV_bu(offset int, baseOffset int) { i, err := fastparse.ParseInt64(arg.string(), int(base.i)) u = uint64(i) if errors.Is(err, fastparse.ErrOverflow) { - u, _ = fastparse.ParseUint64(arg.string(), int(base.i)) + u, _ = fastparse.ParseUint64WithNeg(arg.string(), int(base.i)) } env.vm.stack[env.vm.sp-offset] = env.vm.arena.newEvalUint64(u) return 1 @@ -2034,10 +2051,10 @@ func (asm *assembler) Fn_CONV_uc(t sqltypes.Type, col collations.TypedCollation) }, "FN CONV VARCHAR(SP-3) INT64(SP-2) INT64(SP-1)") } -func (asm *assembler) Fn_COLLATION(col collations.TypedCollation) { +func (asm *assembler) Fn_COLLATION(collationEnv *collations.Environment, col collations.TypedCollation) { asm.emit(func(env *ExpressionEnv) int { v := evalCollation(env.vm.stack[env.vm.sp-1]) - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText([]byte(collations.Local().LookupName(v.Collation)), col) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText([]byte(collationEnv.LookupName(v.Collation)), col) return 1 }, "FN COLLATION (SP-1)") } @@ -2330,6 +2347,189 @@ func (asm *assembler) Fn_BIT_LENGTH() { }, "FN BIT_LENGTH VARCHAR(SP-1)") } +func (asm *assembler) Fn_FIELD_i(args int) { + asm.adjustStack(-args + 1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-args] == nil { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= args - 1 + return 1 + } + + tar := env.vm.stack[env.vm.sp-args].(*evalInt64) + + for i := range args - 1 { + if env.vm.stack[env.vm.sp-args+i+1] == nil { + continue + } + + arg := env.vm.stack[env.vm.sp-args+i+1].(*evalInt64) + + if tar.i == arg.i { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(int64(i + 1)) + env.vm.sp -= args - 1 + return 1 + } + } + + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= args - 1 + return 1 + }, "FN FIELD INT64(SP-%d)...INT64(SP-1)", args) +} + +func (asm *assembler) Fn_FIELD_b(args int, col colldata.Collation) { + asm.adjustStack(-args + 1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-args] == nil { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= args - 1 + return 1 + } + + tar := env.vm.stack[env.vm.sp-args].(*evalBytes) + + for i := range args - 1 { + if env.vm.stack[env.vm.sp-args+i+1] == nil { + continue + } + + str := env.vm.stack[env.vm.sp-args+i+1].(*evalBytes) + + // We cannot do these comparison earlier in the compilation, + // because if we convert everything first, we error on cases + // where there is a match. MySQL will do an element for element + // comparison where if there's a match already, it doesn't matter + // if there was an invalid conversion later on. + // + // This means we also must convert here in this compiler function + // and can't eagerly do the conversion. + toCharset := col.Charset() + fromCharset := colldata.Lookup(str.col.Collation).Charset() + if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { + str, env.vm.err = evalToVarchar(str, col.ID(), true) + if env.vm.err != nil { + env.vm.stack[env.vm.sp-args] = nil + env.vm.sp -= args - 1 + return 1 + } + } + + // Compare target and current string + if col.Collate(tar.bytes, str.bytes, false) == 0 { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(int64(i + 1)) + env.vm.sp -= args - 1 + return 1 + } + } + + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= args - 1 + return 1 + }, "FN FIELD VARCHAR(SP-%d)...VARCHAR(SP-1)", args) +} + +func (asm *assembler) Fn_FIELD_d(args int) { + asm.adjustStack(-args + 1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-args] == nil { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= args - 1 + return 1 + } + + tar := env.vm.stack[env.vm.sp-args].(*evalDecimal) + + for i := range args - 1 { + if env.vm.stack[env.vm.sp-args+i+1] == nil { + continue + } + + arg := env.vm.stack[env.vm.sp-args+i+1].(*evalDecimal) + + if tar.dec.Equal(arg.dec) { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(int64(i + 1)) + env.vm.sp -= args - 1 + return 1 + } + } + + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= args - 1 + return 1 + }, "FN FIELD DECIMAL(SP-%d)...DECIMAL(SP-1)", args) +} + +func (asm *assembler) Fn_FIELD_f(args int) { + asm.adjustStack(-args + 1) + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-args] == nil { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= args - 1 + return 1 + } + + tar := env.vm.stack[env.vm.sp-args].(*evalFloat) + + for i := range args - 1 { + if env.vm.stack[env.vm.sp-args+i+1] == nil { + continue + } + + arg := env.vm.stack[env.vm.sp-args+i+1].(*evalFloat) + + if tar.f == arg.f { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(int64(i + 1)) + env.vm.sp -= args - 1 + return 1 + } + } + + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalInt64(0) + env.vm.sp -= args - 1 + return 1 + }, "FN FIELD FLOAT64(SP-%d)...FLOAT64(SP-1)", args) +} + +func (asm *assembler) Fn_ELT(args int, tt sqltypes.Type, tc collations.TypedCollation) { + asm.adjustStack(-args + 1) + asm.emit(func(env *ExpressionEnv) int { + i := env.vm.stack[env.vm.sp-args].(*evalInt64) + + if i.i < 1 || int(i.i) >= args || env.vm.stack[env.vm.sp-args+int(i.i)] == nil { + env.vm.stack[env.vm.sp-args] = nil + } else { + b := env.vm.stack[env.vm.sp-args+int(i.i)].(*evalBytes) + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalRaw(b.bytes, tt, tc) + } + + env.vm.sp -= args - 1 + return 1 + }, "FN ELT INT64(SP-%d) VARCHAR(SP-%d)...VARCHAR(SP-1)", args, args-1) +} + +func (asm *assembler) Fn_INSERT(col collations.TypedCollation) { + asm.adjustStack(-3) + + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-4].(*evalBytes) + pos := env.vm.stack[env.vm.sp-3].(*evalInt64).i + l := env.vm.stack[env.vm.sp-2].(*evalInt64).i + newstr := env.vm.stack[env.vm.sp-1].(*evalBytes) + + res := insert(str, newstr, int(pos), int(l)) + if !validMaxLength(int64(len(res)), 1) { + env.vm.stack[env.vm.sp-4] = nil + env.vm.sp -= 3 + return 1 + } + + env.vm.stack[env.vm.sp-4] = env.vm.arena.newEvalText(res, col) + env.vm.sp -= 3 + return 1 + }, "FN INSERT VARCHAR(SP-4) INT64(SP-3) INT64(SP-2) VARCHAR(SP-1)") +} + func (asm *assembler) Fn_LUCASE(upcase bool) { if upcase { asm.emit(func(env *ExpressionEnv) int { @@ -2733,6 +2933,65 @@ func (asm *assembler) Fn_TRIM2(col collations.TypedCollation) { }, "FN TRIM VARCHAR(SP-2) VARCHAR(SP-1)") } +func (asm *assembler) Fn_SUBSTRING2(tt sqltypes.Type, cs types.Charset, col collations.TypedCollation) { + asm.adjustStack(-1) + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-2].(*evalBytes) + pos := env.vm.stack[env.vm.sp-1].(*evalInt64) + + end := int64(charset.Length(cs, str.bytes)) + if pos.i < 0 { + pos.i += end + 1 + } + str.tt = int16(tt) + if pos.i < 1 || pos.i > end { + str.bytes = nil + str.col = col + env.vm.sp-- + return 1 + } + + res := charset.Slice(cs, str.bytes, int(pos.i-1), int(end)) + str.bytes = res + str.col = col + env.vm.sp-- + return 1 + }, "FN SUBSTRING VARCHAR(SP-2) INT64(SP-1)") +} + +func (asm *assembler) Fn_SUBSTRING3(tt sqltypes.Type, cs types.Charset, col collations.TypedCollation) { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-3].(*evalBytes) + pos := env.vm.stack[env.vm.sp-2].(*evalInt64) + ll := env.vm.stack[env.vm.sp-1].(*evalInt64) + + end := int64(charset.Length(cs, str.bytes)) + if pos.i < 0 { + pos.i += end + 1 + } + str.tt = int16(tt) + + if pos.i < 1 || pos.i > end || ll.i < 1 { + str.bytes = nil + str.col = col + env.vm.sp -= 2 + return 1 + } + + if ll.i > end-pos.i+1 { + ll.i = end - pos.i + 1 + } + end = pos.i + ll.i - 1 + res := charset.Slice(cs, str.bytes, int(pos.i-1), int(end)) + str.tt = int16(tt) + str.bytes = res + str.col = col + env.vm.sp -= 2 + return 1 + }, "FN SUBSTRING VARCHAR(SP-3) INT64(SP-2) INT64(SP-1)") +} + func (asm *assembler) Fn_TO_BASE64(t sqltypes.Type, col collations.TypedCollation) { asm.emit(func(env *ExpressionEnv) int { str := env.vm.stack[env.vm.sp-1].(*evalBytes) @@ -2783,7 +3042,7 @@ func (asm *assembler) In_table(not bool, table map[vthash.Hash]struct{}) { } } -func (asm *assembler) In_slow(not bool) { +func (asm *assembler) In_slow(collationsEnv *collations.Environment, not bool) { asm.adjustStack(-1) if not { @@ -2792,7 +3051,7 @@ func (asm *assembler) In_slow(not bool) { rhs := env.vm.stack[env.vm.sp-1].(*evalTuple) var in boolean - in, env.vm.err = evalInExpr(lhs, rhs) + in, env.vm.err = evalInExpr(collationsEnv, lhs, rhs) env.vm.stack[env.vm.sp-2] = in.not().eval() env.vm.sp -= 1 @@ -2804,7 +3063,7 @@ func (asm *assembler) In_slow(not bool) { rhs := env.vm.stack[env.vm.sp-1].(*evalTuple) var in boolean - in, env.vm.err = evalInExpr(lhs, rhs) + in, env.vm.err = evalInExpr(collationsEnv, lhs, rhs) env.vm.stack[env.vm.sp-2] = in.eval() env.vm.sp -= 1 @@ -2890,6 +3149,53 @@ func (asm *assembler) Like_collate(expr *LikeExpr, collation colldata.Collation) }, "LIKE VARCHAR(SP-2), VARCHAR(SP-1) COLLATE '%s'", collation.Name()) } +func (asm *assembler) Locate3(collation colldata.Collation) { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + substr := env.vm.stack[env.vm.sp-3].(*evalBytes) + str := env.vm.stack[env.vm.sp-2].(*evalBytes) + pos := env.vm.stack[env.vm.sp-1].(*evalInt64) + env.vm.sp -= 2 + + if pos.i < 1 || pos.i > math.MaxInt { + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(0) + return 1 + } + + found := colldata.Index(collation, str.bytes, substr.bytes, int(pos.i)-1) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(found) + 1) + return 1 + }, "LOCATE VARCHAR(SP-3), VARCHAR(SP-2) INT64(SP-1) COLLATE '%s'", collation.Name()) +} + +func (asm *assembler) Locate2(collation colldata.Collation) { + asm.adjustStack(-1) + + asm.emit(func(env *ExpressionEnv) int { + substr := env.vm.stack[env.vm.sp-2].(*evalBytes) + str := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.sp-- + + found := colldata.Index(collation, str.bytes, substr.bytes, 0) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(found) + 1) + return 1 + }, "LOCATE VARCHAR(SP-2), VARCHAR(SP-1) COLLATE '%s'", collation.Name()) +} + +func (asm *assembler) Replace() { + asm.adjustStack(-2) + + asm.emit(func(env *ExpressionEnv) int { + str := env.vm.stack[env.vm.sp-3].(*evalBytes) + from := env.vm.stack[env.vm.sp-2].(*evalBytes) + to := env.vm.stack[env.vm.sp-1].(*evalBytes) + env.vm.sp -= 2 + str.bytes = replace(str.bytes, from.bytes, to.bytes) + return 1 + }, "REPLACE VARCHAR(SP-3), VARCHAR(SP-2) VARCHAR(SP-1)") +} + func (asm *assembler) Strcmp(collation collations.TypedCollation) { asm.adjustStack(-1) @@ -3073,6 +3379,17 @@ func (asm *assembler) NullCheck3(j *jump) { }, "NULLCHECK SP-1, SP-2, SP-3") } +func (asm *assembler) NullCheck4(j *jump) { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-4] == nil || env.vm.stack[env.vm.sp-3] == nil || env.vm.stack[env.vm.sp-2] == nil || env.vm.stack[env.vm.sp-1] == nil { + env.vm.stack[env.vm.sp-4] = nil + env.vm.sp -= 3 + return j.offset() + } + return 1 + }, "NULLCHECK SP-1, SP-2, SP-3, SP-4") +} + func (asm *assembler) NullCheckArg(j *jump, offset int) { asm.emit(func(env *ExpressionEnv) int { if env.vm.stack[env.vm.sp-1] == nil { @@ -3235,31 +3552,32 @@ func cmpnum[N interface{ int64 | uint64 | float64 }](a, b N) int { } } -func (asm *assembler) Fn_Now(t querypb.Type, format *datetime.Strftime, prec uint8, utc bool) { +func (asm *assembler) Fn_Now(prec uint8, utc bool) { + asm.adjustStack(1) + asm.emit(func(env *ExpressionEnv) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalDateTime(env.time(utc), int(prec)) + env.vm.sp++ + return 1 + }, "FN NOW(DATETIME)") +} + +func (asm *assembler) Fn_NowTime(prec uint8, utc bool) { asm.adjustStack(1) asm.emit(func(env *ExpressionEnv) int { - val := env.vm.arena.newEvalBytesEmpty() - val.tt = int16(t) - val.bytes = format.Format(env.time(utc), prec) - val.col = collationBinary - env.vm.stack[env.vm.sp] = val + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalTime(env.time(utc).Time, int(prec)) env.vm.sp++ return 1 - }, "FN NOW") + }, "FN NOW(TIME)") } func (asm *assembler) Fn_Sysdate(prec uint8) { asm.adjustStack(1) asm.emit(func(env *ExpressionEnv) int { - val := env.vm.arena.newEvalBytesEmpty() - val.tt = int16(sqltypes.Datetime) now := SystemTime() if tz := env.currentTimezone(); tz != nil { now = now.In(tz) } - val.bytes = datetime.NewDateTimeFromStd(now).Format(prec) - val.col = collationBinary - env.vm.stack[env.vm.sp] = val + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalDateTime(datetime.NewDateTimeFromStd(now), int(prec)) env.vm.sp++ return 1 }, "FN SYSDATE") @@ -3268,11 +3586,7 @@ func (asm *assembler) Fn_Sysdate(prec uint8) { func (asm *assembler) Fn_Curdate() { asm.adjustStack(1) asm.emit(func(env *ExpressionEnv) int { - val := env.vm.arena.newEvalBytesEmpty() - val.tt = int16(sqltypes.Date) - val.bytes = datetime.Date_YYYY_MM_DD.Format(env.time(false), 0) - val.col = collationBinary - env.vm.stack[env.vm.sp] = val + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalDate(env.time(false).Date) env.vm.sp++ return 1 }, "FN CURDATE") @@ -3281,11 +3595,7 @@ func (asm *assembler) Fn_Curdate() { func (asm *assembler) Fn_UtcDate() { asm.adjustStack(1) asm.emit(func(env *ExpressionEnv) int { - val := env.vm.arena.newEvalBytesEmpty() - val.tt = int16(sqltypes.Date) - val.bytes = datetime.Date_YYYY_MM_DD.Format(env.time(true), 0) - val.col = collationBinary - env.vm.stack[env.vm.sp] = val + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalDate(env.time(true).Date) env.vm.sp++ return 1 }, "FN UTC_DATE") @@ -3317,7 +3627,7 @@ func (asm *assembler) Fn_Database() { func (asm *assembler) Fn_Version() { asm.adjustStack(1) asm.emit(func(env *ExpressionEnv) int { - env.vm.stack[env.vm.sp] = env.vm.arena.newEvalText([]byte(servenv.MySQLServerVersion()), collationUtf8mb3) + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalText([]byte(env.currentVersion()), collationUtf8mb3) env.vm.sp++ return 1 }, "FN VERSION") @@ -3587,7 +3897,7 @@ func (asm *assembler) Fn_MAKEDATE() { y := env.vm.stack[env.vm.sp-1].(*evalInt64) yd := env.vm.stack[env.vm.sp-2].(*evalInt64) - t := yearDayToTime(y.i, yd.i) + t := yearDayToTime(env.currentTimezone(), y.i, yd.i) if t.IsZero() { env.vm.stack[env.vm.sp-2] = nil } else { @@ -3624,6 +3934,34 @@ func (asm *assembler) Fn_MAKETIME_i() { }, "FN MAKETIME INT64(SP-3) INT64(SP-2) INT64(SP-1)") } +func (asm *assembler) Fn_MAKETIME_D() { + asm.adjustStack(-2) + asm.emit(func(env *ExpressionEnv) int { + h := env.vm.stack[env.vm.sp-3].(*evalInt64) + m := env.vm.stack[env.vm.sp-2].(*evalInt64) + sec := env.vm.stack[env.vm.sp-1].(*evalTemporal) + + s := newEvalDecimalWithPrec(sec.toDecimal(), int32(sec.prec)) + + d, ok := makeTime_d(h.i, m.i, s.dec) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + t, l, ok := datetime.ParseTimeDecimal(d, s.length, -1) + if !ok { + env.vm.stack[env.vm.sp-3] = nil + env.vm.sp -= 2 + return 1 + } + + env.vm.stack[env.vm.sp-3] = env.vm.arena.newEvalTime(t, l) + env.vm.sp -= 2 + return 1 + }, "FN MAKETIME INT64(SP-3) INT64(SP-2) TEMPORAL(SP-1)") +} + func (asm *assembler) Fn_MAKETIME_d() { asm.adjustStack(-2) asm.emit(func(env *ExpressionEnv) int { @@ -3727,6 +4065,89 @@ func (asm *assembler) Fn_MONTHNAME(col collations.TypedCollation) { }, "FN MONTHNAME DATE(SP-1)") } +func (asm *assembler) Fn_LAST_DAY() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + d := lastDay(env.currentTimezone(), arg.dt) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDate(d) + return 1 + }, "FN LAST_DAY DATETIME(SP-1)") +} + +func (asm *assembler) Fn_TO_DAYS() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + numDays := datetime.MysqlDayNumber(arg.dt.Date.Year(), arg.dt.Date.Month(), arg.dt.Date.Day()) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(int64(numDays)) + return 1 + }, "FN TO_DAYS DATE(SP-1)") +} + +func (asm *assembler) Fn_FROM_DAYS() { + asm.emit(func(env *ExpressionEnv) int { + arg := env.vm.stack[env.vm.sp-1].(*evalInt64) + d := datetime.DateFromDayNumber(int(arg.i)) + if d.Year() > 9999 { + env.vm.stack[env.vm.sp-1] = nil + return 1 + } + + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalDate(d) + return 1 + }, "FN FROM_DAYS INT64(SP-1)") +} + +func (asm *assembler) Fn_SEC_TO_TIME_D() { + asm.emit(func(env *ExpressionEnv) int { + e := env.vm.stack[env.vm.sp-1].(*evalTemporal) + prec := int(e.prec) + + sec := newEvalDecimalWithPrec(e.toDecimal(), int32(prec)) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalTime(datetime.NewTimeFromSeconds(sec.dec), prec) + return 1 + }, "FN SEC_TO_TIME TEMPORAL(SP-1)") +} + +func (asm *assembler) Fn_SEC_TO_TIME_d() { + asm.emit(func(env *ExpressionEnv) int { + e := env.vm.stack[env.vm.sp-1].(*evalDecimal) + prec := min(evalDecimalPrecision(e), datetime.DefaultPrecision) + + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalTime(datetime.NewTimeFromSeconds(e.dec), int(prec)) + return 1 + }, "FN SEC_TO_TIME DECIMAL(SP-1)") +} + +func (asm *assembler) Fn_TIME_TO_SEC() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + d := env.vm.stack[env.vm.sp-1].(*evalTemporal) + + sec := d.dt.Time.ToSeconds() + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(sec) + return 1 + }, "FN TIME_TO_SEC TIME(SP-1)") +} + +func (asm *assembler) Fn_TO_SECONDS() { + asm.emit(func(env *ExpressionEnv) int { + if env.vm.stack[env.vm.sp-1] == nil { + return 1 + } + arg := env.vm.stack[env.vm.sp-1].(*evalTemporal) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalInt64(arg.dt.ToSeconds()) + return 1 + }, "FN TO_SECONDS DATETIME(SP-1)") +} + func (asm *assembler) Fn_QUARTER() { asm.emit(func(env *ExpressionEnv) int { if env.vm.stack[env.vm.sp-1] == nil { @@ -3761,9 +4182,6 @@ func (asm *assembler) Fn_UNIX_TIMESTAMP0() { func (asm *assembler) Fn_UNIX_TIMESTAMP1() { asm.emit(func(env *ExpressionEnv) int { res := dateTimeUnixTimestamp(env, env.vm.stack[env.vm.sp-1]) - if _, ok := res.(*evalInt64); !ok { - env.vm.err = errDeoptimize - } env.vm.stack[env.vm.sp-1] = res return 1 }, "FN UNIX_TIMESTAMP (SP-1)") @@ -3859,20 +4277,6 @@ func (asm *assembler) Fn_YEARWEEK() { }, "FN YEARWEEK DATE(SP-1)") } -func (asm *assembler) Interval_i(l int) { - asm.adjustStack(-l) - asm.emit(func(env *ExpressionEnv) int { - if env.vm.stack[env.vm.sp-l] == nil { - env.vm.stack[env.vm.sp-l] = env.vm.arena.newEvalInt64(-1) - env.vm.sp -= l - return 1 - } - - env.vm.sp -= l - return 1 - }, "INTERVAL INT64(SP-1)...INT64(SP-%d)", l) -} - func (asm *assembler) Interval(l int) { asm.adjustStack(-l) asm.emit(func(env *ExpressionEnv) int { @@ -4038,6 +4442,29 @@ func (asm *assembler) Fn_CONCAT_WS(tt querypb.Type, tc collations.TypedCollation }, "FN CONCAT_WS VARCHAR(SP-1) VARCHAR(SP-2)...VARCHAR(SP-N)") } +func (asm *assembler) Fn_CHAR(tt querypb.Type, tc collations.TypedCollation, args int) { + cs := colldata.Lookup(tc.Collation).Charset() + asm.adjustStack(-(args - 1)) + asm.emit(func(env *ExpressionEnv) int { + buf := make([]byte, 0, args) + for i := 0; i < args; i++ { + if env.vm.stack[env.vm.sp-args+i] == nil { + continue + } + arg := env.vm.stack[env.vm.sp-args+i].(*evalInt64) + buf = encodeChar(buf, uint32(arg.i)) + } + + if charset.Validate(cs, buf) { + env.vm.stack[env.vm.sp-args] = env.vm.arena.newEvalRaw(buf, tt, tc) + } else { + env.vm.stack[env.vm.sp-args] = nil + } + env.vm.sp -= args - 1 + return 1 + }, "FN CHAR INT64(SP-1) INT64(SP-2)...INT64(SP-N)") +} + func (asm *assembler) Fn_BIN_TO_UUID0(col collations.TypedCollation) { asm.emit(func(env *ExpressionEnv) int { arg := env.vm.stack[env.vm.sp-1].(*evalBytes) @@ -4182,7 +4609,7 @@ func (asm *assembler) Fn_DATEADD_s(unit datetime.IntervalType, sub bool, col col goto baddate } - tmp = evalToTemporal(env.vm.stack[env.vm.sp-2]) + tmp = evalToTemporal(env.vm.stack[env.vm.sp-2], true) if tmp == nil { goto baddate } @@ -4641,7 +5068,8 @@ func (asm *assembler) Fn_REGEXP_REPLACE_slow(merged collations.TypedCollation, f func (asm *assembler) Introduce(offset int, t sqltypes.Type, col collations.TypedCollation) { asm.emit(func(env *ExpressionEnv) int { - arg := evalToBinary(env.vm.stack[env.vm.sp-offset]) + var arg *evalBytes + arg, env.vm.err = introducerCast(env.vm.stack[env.vm.sp-offset], col.Collation) arg.tt = int16(t) arg.col = col env.vm.stack[env.vm.sp-offset] = arg diff --git a/go/vt/vtgate/evalengine/compiler_asm_push.go b/go/vt/vtgate/evalengine/compiler_asm_push.go index ab1371f1e11..87d2ee9af9b 100644 --- a/go/vt/vtgate/evalengine/compiler_asm_push.go +++ b/go/vt/vtgate/evalengine/compiler_asm_push.go @@ -105,6 +105,18 @@ func push_d(env *ExpressionEnv, raw []byte) int { return 1 } +func push_enum(env *ExpressionEnv, raw []byte, values *EnumSetValues) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalEnum(raw, values) + env.vm.sp++ + return 1 +} + +func push_set(env *ExpressionEnv, raw []byte, values *EnumSetValues) int { + env.vm.stack[env.vm.sp] = env.vm.arena.newEvalSet(raw, values) + env.vm.sp++ + return 1 +} + func (asm *assembler) PushColumn_d(offset int) { asm.adjustStack(1) @@ -117,6 +129,30 @@ func (asm *assembler) PushColumn_d(offset int) { }, "PUSH DECIMAL(:%d)", offset) } +func (asm *assembler) PushColumn_enum(offset int, values *EnumSetValues) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + col := env.Row[offset] + if col.IsNull() { + return push_null(env) + } + return push_enum(env, col.Raw(), values) + }, "PUSH ENUM(:%d)", offset) +} + +func (asm *assembler) PushColumn_set(offset int, values *EnumSetValues) { + asm.adjustStack(1) + + asm.emit(func(env *ExpressionEnv) int { + col := env.Row[offset] + if col.IsNull() { + return push_null(env) + } + return push_set(env, col.Raw(), values) + }, "PUSH SET(:%d)", offset) +} + func (asm *assembler) PushBVar_d(key string) { asm.adjustStack(1) diff --git a/go/vt/vtgate/evalengine/compiler_fn.go b/go/vt/vtgate/evalengine/compiler_fn.go index d4157929546..72197f6a492 100644 --- a/go/vt/vtgate/evalengine/compiler_fn.go +++ b/go/vt/vtgate/evalengine/compiler_fn.go @@ -76,7 +76,7 @@ func (c *compiler) compileFn_length(arg IR, asm_ins func()) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, 0, false) + c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, nil) } asm_ins() diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go index 4fba65aeb75..04eb72ad4f2 100644 --- a/go/vt/vtgate/evalengine/compiler_test.go +++ b/go/vt/vtgate/evalengine/compiler_test.go @@ -17,6 +17,7 @@ limitations under the License. package evalengine_test import ( + "context" "fmt" "strconv" "strings" @@ -24,11 +25,13 @@ import ( "time" "github.com/olekukonko/tablewriter" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/evalengine/testcases" ) @@ -97,16 +100,16 @@ func TestCompilerReference(t *testing.T) { defer func() { evalengine.SystemTime = time.Now }() track := NewTracker() - + venv := vtenv.NewTestEnv() for _, tc := range testcases.Cases { t.Run(tc.Name(), func(t *testing.T) { var supported, total int - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(venv) tc.Run(func(query string, row []sqltypes.Value) { env.Row = row - stmt, err := sqlparser.ParseExpr(query) + stmt, err := venv.Parser().ParseExpr(query) if err != nil { // no need to test un-parseable queries return @@ -117,6 +120,7 @@ func TestCompilerReference(t *testing.T) { ResolveColumn: fields.Column, ResolveType: fields.Type, Collation: collations.CollationUtf8mb4ID, + Environment: venv, NoConstantFolding: true, } @@ -165,6 +169,7 @@ func TestCompilerSingle(t *testing.T) { values []sqltypes.Value result string collation collations.ID + typeWanted evalengine.Type }{ { expression: "1 + column0", @@ -496,13 +501,235 @@ func TestCompilerSingle(t *testing.T) { result: `VARCHAR("0")`, collation: collations.CollationUtf8mb4ID, }, + { + expression: `UNIX_TIMESTAMP(0.0) + 1`, + result: `DECIMAL(1.0)`, + }, + { + expression: `UNIX_TIMESTAMP(0.000) + 1`, + result: `DECIMAL(1.000)`, + }, + { + expression: `UNIX_TIMESTAMP(-1.5) + 1`, + result: `DECIMAL(1.0)`, + }, + { + expression: `UNIX_TIMESTAMP(-1.500) + 1`, + result: `DECIMAL(1.000)`, + }, + { + expression: `UNIX_TIMESTAMP(0x0) + 1`, + result: `INT64(1)`, + }, + { + expression: `UNIX_TIMESTAMP(timestamp '2000-01-01 10:34:58.123456') + 1`, + result: `DECIMAL(946719299.123456)`, + }, + { + expression: `UNIX_TIMESTAMP('200001011034581111111') + 1`, + result: `INT64(946719299)`, + }, + { + expression: `CONV(-0x1, 13e0, 13e0)`, + result: `VARCHAR("219505A9511A867B72")`, + }, + { + expression: `UNIX_TIMESTAMP('20000101103458.111111') + 1`, + result: `DECIMAL(946719299.111111)`, + }, + { + expression: `cast(null * 1 as CHAR)`, + result: `NULL`, + }, + { + expression: `cast(null + 1 as CHAR)`, + result: `NULL`, + }, + { + expression: `cast(null - 1 as CHAR)`, + result: `NULL`, + }, + { + expression: `cast(null / 1 as CHAR)`, + result: `NULL`, + }, + { + expression: `cast(null % 1 as CHAR)`, + result: `NULL`, + }, + { + expression: `1 AND NULL * 1`, + result: `NULL`, + }, + { + expression: `case 0 when NULL then 1 else 0 end`, + result: `INT64(0)`, + }, + { + expression: `case when null is null then 23 else null end`, + result: `INT64(23)`, + }, + { + expression: `CAST(0 AS DATE)`, + result: `NULL`, + }, + { + expression: `DAYOFMONTH(0)`, + result: `INT64(0)`, + }, + { + expression: `week('2023-12-31', 4)`, + result: `INT64(53)`, + }, + { + expression: `week('2023-12-31', 2)`, + result: `INT64(53)`, + }, + { + expression: `week('2024-12-31', 1)`, + result: `INT64(53)`, + }, + { + expression: `week('2024-12-31', 5)`, + result: `INT64(53)`, + }, + { + expression: `FROM_UNIXTIME(time '10:04:58.5')`, + result: `DATETIME("1970-01-02 04:54:18.5")`, + }, + { + expression: `0 = time '10:04:58.1'`, + result: `INT64(0)`, + }, + { + expression: `CAST(time '32:34:58.5' AS TIME)`, + result: `TIME("32:34:59")`, + }, + { + expression: `now(6) + interval 1 day`, + result: `DATETIME("2023-10-25 12:00:00.123456")`, + }, + { + expression: `now() + interval 654321 microsecond`, + result: `DATETIME("2023-10-24 12:00:00.654321")`, + }, + { + expression: `time('1111:66:56')`, + result: `NULL`, + }, + { + expression: `locate('Å', 'a')`, + result: `INT64(1)`, + }, + { + expression: `locate('a', 'Å')`, + result: `INT64(1)`, + }, + { + expression: `locate("", "😊😂🤢", 3)`, + result: `INT64(3)`, + }, + { + expression: `REPLACE('www.mysql.com', '', 'Ww')`, + result: `VARCHAR("www.mysql.com")`, + }, + { + expression: `1 * unix_timestamp(utc_timestamp(1))`, + result: `DECIMAL(1698134400.1)`, + }, + { + expression: `1 * unix_timestamp(CONVERT_TZ(20040101120000.10e0,'+00:00','+10:00'))`, + result: `DECIMAL(1072990800.101563)`, + }, + { + expression: `1 * unix_timestamp(CONVERT_TZ(20040101120000.10,'+00:00','+10:00'))`, + result: `DECIMAL(1072990800.10)`, + }, + { + expression: `1 * unix_timestamp(CONVERT_TZ(timestamp'2004-01-01 12:00:00.10','+00:00','+10:00'))`, + result: `DECIMAL(1072990800.10)`, + }, + { + expression: `1 * unix_timestamp(CONVERT_TZ('2004-01-01 12:00:00.10','+00:00','+10:00'))`, + result: `DECIMAL(1072990800.10)`, + }, + { + expression: `1 * unix_timestamp('2004-01-01 12:00:00.10')`, + result: `DECIMAL(1072954800.10)`, + }, + { + expression: `1 * unix_timestamp(from_unixtime(1447430881.123))`, + result: `DECIMAL(1447430881.123)`, + }, + { + expression: `1 * unix_timestamp(from_unixtime('1447430881.123'))`, + result: `DECIMAL(1447430881.123000)`, + }, + { + expression: `1 * unix_timestamp(from_unixtime(time '31:34:58'))`, + result: `INT64(313458)`, + }, + { + expression: `1 * unix_timestamp(from_unixtime(time '31:34:58.123'))`, + result: `DECIMAL(313458.123)`, + }, + { + expression: `1 * unix_timestamp(time('1.0000'))`, + result: `DECIMAL(1698098401.0000)`, + }, + { + expression: `(case + when 'PROMOTION' like 'PROMO%' + then 0.01 + else 0 + end) * 0.01`, + result: `DECIMAL(0.0001)`, + typeWanted: evalengine.NewTypeEx(sqltypes.Decimal, collations.CollationBinaryID, false, 4, 4, nil), + }, + { + expression: `case when true then 0.02 else 1.000 end`, + result: `DECIMAL(0.02)`, + }, + { + expression: `case + when false + then timestamp'2023-10-24 12:00:00.123456' + else timestamp'2023-10-24 12:00:00' + end`, + result: `DATETIME("2023-10-24 12:00:00.000000")`, + typeWanted: evalengine.NewTypeEx(sqltypes.Datetime, collations.CollationBinaryID, false, 6, 0, nil), + }, + { + expression: `convert(0xFF using utf16)`, + result: `VARCHAR("ÿ")`, + }, + { + expression: `_utf16 0xFF`, + result: `VARCHAR("ÿ")`, + }, + { + expression: `convert(0xFF using utf32)`, + result: `NULL`, + }, + { + expression: `cast(_utf32 0xFF as binary)`, + result: `VARBINARY("\x00\x00\x00\xff")`, + }, + { + expression: `cast(_utf32 0x00FF as binary)`, + result: `VARBINARY("\x00\x00\x00\xff")`, + }, + { + expression: `cast(_utf32 0x0000FF as binary)`, + result: `VARBINARY("\x00\x00\x00\xff")`, + }, } tz, _ := time.LoadLocation("Europe/Madrid") - + venv := vtenv.NewTestEnv() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := venv.Parser().ParseExpr(tc.expression) if err != nil { t.Fatal(err) } @@ -512,6 +739,7 @@ func TestCompilerSingle(t *testing.T) { ResolveColumn: fields.Column, ResolveType: fields.Type, Collation: collations.CollationUtf8mb4ID, + Environment: venv, NoConstantFolding: true, } @@ -520,8 +748,8 @@ func TestCompilerSingle(t *testing.T) { t.Fatal(err) } - env := evalengine.EmptyExpressionEnv() - env.SetTime(time.Date(2023, 10, 24, 12, 0, 0, 0, tz)) + env := evalengine.NewExpressionEnv(context.Background(), nil, evalengine.NewEmptyVCursor(venv, tz)) + env.SetTime(time.Date(2023, 10, 24, 12, 0, 0, 123456000, tz)) env.Row = tc.values expected, err := env.EvaluateAST(converted) @@ -535,6 +763,12 @@ func TestCompilerSingle(t *testing.T) { t.Fatalf("bad collation evaluation from eval engine: got %d, want %d", expected.Collation(), tc.collation) } + if tc.typeWanted.Type() != sqltypes.Unknown { + typ, err := env.TypeOf(converted) + require.NoError(t, err) + require.True(t, tc.typeWanted.Equal(&typ)) + } + // re-run the same evaluation multiple times to ensure results are always consistent for i := 0; i < 8; i++ { res, err := env.Evaluate(converted) @@ -578,9 +812,10 @@ func TestBindVarLiteral(t *testing.T) { }, } + venv := vtenv.NewTestEnv() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := venv.Parser().ParseExpr(tc.expression) if err != nil { t.Fatal(err) } @@ -592,6 +827,7 @@ func TestBindVarLiteral(t *testing.T) { ResolveColumn: fields.Column, ResolveType: fields.Type, Collation: collations.CollationUtf8mb4ID, + Environment: venv, NoConstantFolding: true, } @@ -602,7 +838,7 @@ func TestBindVarLiteral(t *testing.T) { result := `VARCHAR("ÿ")` - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(venv) env.BindVars = map[string]*querypb.BindVariable{ "vtg1": tc.bindVar, } @@ -642,15 +878,17 @@ func TestCompilerNonConstant(t *testing.T) { }, } + venv := vtenv.NewTestEnv() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := venv.Parser().ParseExpr(tc.expression) if err != nil { t.Fatal(err) } cfg := &evalengine.Config{ Collation: collations.CollationUtf8mb4ID, + Environment: venv, NoConstantFolding: true, } @@ -659,7 +897,7 @@ func TestCompilerNonConstant(t *testing.T) { t.Fatal(err) } - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(venv) var prev string for i := 0; i < 1000; i++ { expected, err := env.EvaluateAST(converted) diff --git a/go/vt/vtgate/evalengine/eval.go b/go/vt/vtgate/evalengine/eval.go index e327b9d5651..49423979379 100644 --- a/go/vt/vtgate/evalengine/eval.go +++ b/go/vt/vtgate/evalengine/eval.go @@ -21,7 +21,6 @@ import ( "time" "unicode/utf8" - "vitess.io/vitess/go/hack" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/mysql/fastparse" @@ -73,6 +72,8 @@ func (f typeFlag) Nullable() bool { type eval interface { ToRawBytes() []byte SQLType() sqltypes.Type + Size() int32 + Scale() int32 } type hashable interface { @@ -87,50 +88,45 @@ func evalToSQLValue(e eval) sqltypes.Value { return sqltypes.MakeTrusted(e.SQLType(), e.ToRawBytes()) } -func evalToSQLValueWithType(e eval, resultType sqltypes.Type) sqltypes.Value { +func evalToSQLValueWithType(e eval, resultType Type) sqltypes.Value { + tt := resultType.Type() switch { - case sqltypes.IsSigned(resultType): + case sqltypes.IsSigned(tt): switch e := e.(type) { case *evalInt64: - return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, e.i, 10)) + return sqltypes.MakeTrusted(tt, strconv.AppendInt(nil, e.i, 10)) case *evalUint64: - return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, e.u, 10)) + return sqltypes.MakeTrusted(tt, strconv.AppendUint(nil, e.u, 10)) case *evalFloat: - return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, int64(e.f), 10)) + return sqltypes.MakeTrusted(tt, strconv.AppendInt(nil, int64(e.f), 10)) } - case sqltypes.IsUnsigned(resultType): + case sqltypes.IsUnsigned(tt): switch e := e.(type) { case *evalInt64: - return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(e.i), 10)) + return sqltypes.MakeTrusted(tt, strconv.AppendUint(nil, uint64(e.i), 10)) case *evalUint64: - return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, e.u, 10)) + return sqltypes.MakeTrusted(tt, strconv.AppendUint(nil, e.u, 10)) case *evalFloat: - return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, uint64(e.f), 10)) + return sqltypes.MakeTrusted(tt, strconv.AppendUint(nil, uint64(e.f), 10)) } - case sqltypes.IsFloat(resultType): + case sqltypes.IsFloat(tt): switch e := e.(type) { case *evalInt64: - return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, e.i, 10)) + return sqltypes.MakeTrusted(tt, strconv.AppendInt(nil, e.i, 10)) case *evalUint64: - return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, e.u, 10)) + return sqltypes.MakeTrusted(tt, strconv.AppendUint(nil, e.u, 10)) case *evalFloat: - return sqltypes.MakeTrusted(resultType, format.FormatFloat(e.f)) + return sqltypes.MakeTrusted(tt, format.FormatFloat(e.f)) case *evalDecimal: - return sqltypes.MakeTrusted(resultType, e.dec.FormatMySQL(e.length)) + return sqltypes.MakeTrusted(tt, e.dec.FormatMySQL(e.length)) } - case sqltypes.IsDecimal(resultType): - switch e := e.(type) { - case *evalInt64: - return sqltypes.MakeTrusted(resultType, strconv.AppendInt(nil, e.i, 10)) - case *evalUint64: - return sqltypes.MakeTrusted(resultType, strconv.AppendUint(nil, e.u, 10)) - case *evalFloat: - return sqltypes.MakeTrusted(resultType, hack.StringBytes(strconv.FormatFloat(e.f, 'f', -1, 64))) - case *evalDecimal: - return sqltypes.MakeTrusted(resultType, e.dec.FormatMySQL(e.length)) + case sqltypes.IsDecimal(tt): + if numeric, ok := e.(evalNumeric); ok { + dec := numeric.toDecimal(resultType.size, resultType.scale) + return sqltypes.MakeTrusted(tt, dec.dec.FormatMySQL(dec.length)) } - default: - return sqltypes.MakeTrusted(resultType, e.ToRawBytes()) + case e != nil: + return sqltypes.MakeTrusted(tt, e.ToRawBytes()) } return sqltypes.NULL } @@ -176,7 +172,7 @@ func evalIsTruthy(e eval) boolean { } } -func evalCoerce(e eval, typ sqltypes.Type, col collations.ID, now time.Time) (eval, error) { +func evalCoerce(e eval, typ sqltypes.Type, size, scale int32, col collations.ID, now time.Time, allowZero bool) (eval, error) { if e == nil { return nil, nil } @@ -187,7 +183,7 @@ func evalCoerce(e eval, typ sqltypes.Type, col collations.ID, now time.Time) (ev // if we have an explicit VARCHAR coercion, always force it so the collation is replaced in the target return evalToVarchar(e, col, false) } - if e.SQLType() == typ { + if e.SQLType() == typ && e.Size() == size && e.Scale() == scale { // nothing to be done here return e, nil } @@ -208,17 +204,17 @@ func evalCoerce(e eval, typ sqltypes.Type, col collations.ID, now time.Time) (ev case sqltypes.Uint8, sqltypes.Uint16, sqltypes.Uint32, sqltypes.Uint64: return evalToInt64(e).toUint64(), nil case sqltypes.Date: - return evalToDate(e, now), nil + return evalToDate(e, now, allowZero), nil case sqltypes.Datetime, sqltypes.Timestamp: - return evalToDateTime(e, -1, now), nil + return evalToDateTime(e, int(size), now, allowZero), nil case sqltypes.Time: - return evalToTime(e, -1), nil + return evalToTime(e, int(size)), nil default: return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s", typ.String()) } } -func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.ID) (eval, error) { +func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.ID, values *EnumSetValues, sqlmode SQLMode) (eval, error) { switch { case typ == sqltypes.Null: return nil, nil @@ -238,7 +234,7 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I fval, _ := fastparse.ParseFloat64(v.RawStr()) return newEvalFloat(fval), nil default: - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } @@ -265,7 +261,7 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I fval, _ := fastparse.ParseFloat64(v.RawStr()) dec = decimal.NewFromFloat(fval) default: - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } @@ -285,7 +281,7 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I i, err := fastparse.ParseInt64(v.RawStr(), 10) return newEvalInt64(i), err default: - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } @@ -304,7 +300,7 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I u, err := fastparse.ParseUint64(v.RawStr(), 10) return newEvalUint64(u), err default: - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } @@ -312,18 +308,18 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I return newEvalUint64(uint64(i.i)), nil } - case sqltypes.IsText(typ) || sqltypes.IsBinary(typ): + case sqltypes.IsTextOrBinary(typ): switch { case v.IsText() || v.IsBinary(): return newEvalRaw(v.Type(), v.Raw(), typedCoercionCollation(v.Type(), collation)), nil case sqltypes.IsText(typ): - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } return evalToVarchar(e, collation, true) default: - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } @@ -333,29 +329,29 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I case typ == sqltypes.TypeJSON: return json.NewFromSQL(v) case typ == sqltypes.Date: - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } // Separate return here to avoid nil wrapped in interface type - d := evalToDate(e, time.Now()) + d := evalToDate(e, time.Now(), sqlmode.AllowZeroDate()) if d == nil { return nil, nil } return d, nil case typ == sqltypes.Datetime || typ == sqltypes.Timestamp: - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } // Separate return here to avoid nil wrapped in interface type - dt := evalToDateTime(e, -1, time.Now()) + dt := evalToDateTime(e, -1, time.Now(), sqlmode.AllowZeroDate()) if dt == nil { return nil, nil } return dt, nil case typ == sqltypes.Time: - e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation)) + e, err := valueToEval(v, typedCoercionCollation(v.Type(), collation), values) if err != nil { return nil, err } @@ -365,39 +361,15 @@ func valueToEvalCast(v sqltypes.Value, typ sqltypes.Type, collation collations.I return nil, nil } return t, nil + case typ == sqltypes.Enum: + return newEvalEnum(v.Raw(), values), nil + case typ == sqltypes.Set: + return newEvalSet(v.Raw(), values), nil } return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "coercion should not try to coerce this value: %v", v) } -func valueToEvalNumeric(v sqltypes.Value) (eval, error) { - switch { - case v.IsSigned(): - ival, err := v.ToInt64() - if err != nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) - } - return &evalInt64{i: ival}, nil - case v.IsUnsigned(): - var uval uint64 - uval, err := v.ToUint64() - if err != nil { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%v", err) - } - return newEvalUint64(uval), nil - default: - uval, err := strconv.ParseUint(v.RawStr(), 10, 64) - if err == nil { - return newEvalUint64(uval), nil - } - ival, err := strconv.ParseInt(v.RawStr(), 10, 64) - if err == nil { - return &evalInt64{i: ival}, nil - } - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse value: '%s'", v.RawStr()) - } -} - -func valueToEval(value sqltypes.Value, collation collations.TypedCollation) (eval, error) { +func valueToEval(value sqltypes.Value, collation collations.TypedCollation, values *EnumSetValues) (eval, error) { wrap := func(err error) error { if err == nil { return nil @@ -418,6 +390,10 @@ func valueToEval(value sqltypes.Value, collation collations.TypedCollation) (eva case tt == sqltypes.Decimal: dec, err := decimal.NewFromMySQL(value.Raw()) return newEvalDecimal(dec, 0, 0), wrap(err) + case tt == sqltypes.Enum: + return newEvalEnum(value.Raw(), values), nil + case tt == sqltypes.Set: + return newEvalSet(value.Raw(), values), nil case sqltypes.IsText(tt): if tt == sqltypes.HexNum { raw, err := parseHexNumber(value.Raw()) diff --git a/go/vt/vtgate/evalengine/eval_bytes.go b/go/vt/vtgate/evalengine/eval_bytes.go index caa516acbe4..027c4bb652d 100644 --- a/go/vt/vtgate/evalengine/eval_bytes.go +++ b/go/vt/vtgate/evalengine/eval_bytes.go @@ -138,6 +138,14 @@ func (e *evalBytes) SQLType() sqltypes.Type { return sqltypes.Type(e.tt) } +func (e *evalBytes) Size() int32 { + return 0 +} + +func (e *evalBytes) Scale() int32 { + return 0 +} + func (e *evalBytes) ToRawBytes() []byte { return e.bytes } diff --git a/go/vt/vtgate/evalengine/eval_enum.go b/go/vt/vtgate/evalengine/eval_enum.go new file mode 100644 index 00000000000..fa9675d7c0e --- /dev/null +++ b/go/vt/vtgate/evalengine/eval_enum.go @@ -0,0 +1,48 @@ +package evalengine + +import ( + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/sqltypes" +) + +type evalEnum struct { + value int + string string +} + +func newEvalEnum(val []byte, values *EnumSetValues) *evalEnum { + s := string(val) + return &evalEnum{ + value: valueIdx(values, s), + string: s, + } +} + +func (e *evalEnum) ToRawBytes() []byte { + return hack.StringBytes(e.string) +} + +func (e *evalEnum) SQLType() sqltypes.Type { + return sqltypes.Enum +} + +func (e *evalEnum) Size() int32 { + return 0 +} + +func (e *evalEnum) Scale() int32 { + return 0 +} + +func valueIdx(values *EnumSetValues, value string) int { + if values == nil { + return -1 + } + for i, v := range *values { + v, _ = sqltypes.DecodeStringSQL(v) + if v == value { + return i + } + } + return -1 +} diff --git a/go/vt/vtgate/evalengine/eval_numeric.go b/go/vt/vtgate/evalengine/eval_numeric.go index 8584fa4a714..04f844566b1 100644 --- a/go/vt/vtgate/evalengine/eval_numeric.go +++ b/go/vt/vtgate/evalengine/eval_numeric.go @@ -81,10 +81,14 @@ func newEvalFloat(f float64) *evalFloat { } func newEvalDecimal(dec decimal.Decimal, m, d int32) *evalDecimal { - if m == 0 && d == 0 { + switch { + case m == 0 && d == 0: return newEvalDecimalWithPrec(dec, -dec.Exponent()) + case m == 0: + return newEvalDecimalWithPrec(dec, d) + default: + return newEvalDecimalWithPrec(dec.Clamp(m-d, d), d) } - return newEvalDecimalWithPrec(dec.Clamp(m-d, d), d) } func newEvalDecimalWithPrec(dec decimal.Decimal, prec int32) *evalDecimal { @@ -145,6 +149,10 @@ func evalToNumeric(e eval, preciseDatetime bool) evalNumeric { return newEvalDecimalWithPrec(e.toDecimal(), int32(e.prec)) } return &evalFloat{f: e.toFloat()} + case *evalEnum: + return &evalFloat{f: float64(e.value)} + case *evalSet: + return &evalFloat{f: float64(e.set)} default: panic("unsupported") } @@ -201,6 +209,10 @@ func evalToFloat(e eval) (*evalFloat, bool) { } case *evalTemporal: return &evalFloat{f: e.toFloat()}, true + case *evalEnum: + return &evalFloat{f: float64(e.value)}, e.value != -1 + case *evalSet: + return &evalFloat{f: float64(e.set)}, true default: panic(fmt.Sprintf("unsupported type %T", e)) } @@ -265,6 +277,10 @@ func evalToDecimal(e eval, m, d int32) *evalDecimal { } case *evalTemporal: return newEvalDecimal(e.toDecimal(), m, d) + case *evalEnum: + return newEvalDecimal(decimal.NewFromInt(int64(e.value)), m, d) + case *evalSet: + return newEvalDecimal(decimal.NewFromUint(e.set), m, d) default: panic("unsupported") } @@ -328,6 +344,10 @@ func evalToInt64(e eval) *evalInt64 { } case *evalTemporal: return newEvalInt64(e.toInt64()) + case *evalEnum: + return newEvalInt64(int64(e.value)) + case *evalSet: + return newEvalInt64(int64(e.set)) default: panic(fmt.Sprintf("unsupported type: %T", e)) } @@ -346,6 +366,14 @@ func (e *evalInt64) SQLType() sqltypes.Type { return sqltypes.Int64 } +func (e *evalInt64) Size() int32 { + return 0 +} + +func (e *evalInt64) Scale() int32 { + return 0 +} + func (e *evalInt64) ToRawBytes() []byte { return strconv.AppendInt(nil, e.i, 10) } @@ -389,6 +417,14 @@ func (e *evalUint64) SQLType() sqltypes.Type { return sqltypes.Uint64 } +func (e *evalUint64) Size() int32 { + return 0 +} + +func (e *evalUint64) Scale() int32 { + return 0 +} + func (e *evalUint64) ToRawBytes() []byte { return strconv.AppendUint(nil, e.u, 10) } @@ -432,6 +468,14 @@ func (e *evalFloat) SQLType() sqltypes.Type { return sqltypes.Float64 } +func (e *evalFloat) Size() int32 { + return 0 +} + +func (e *evalFloat) Scale() int32 { + return 0 +} + func (e *evalFloat) ToRawBytes() []byte { return format.FormatFloat(e.f) } @@ -508,6 +552,14 @@ func (e *evalDecimal) SQLType() sqltypes.Type { return sqltypes.Decimal } +func (e *evalDecimal) Size() int32 { + return e.length +} + +func (e *evalDecimal) Scale() int32 { + return -e.dec.Exponent() +} + func (e *evalDecimal) ToRawBytes() []byte { return e.dec.FormatMySQL(e.length) } diff --git a/go/vt/vtgate/evalengine/eval_result.go b/go/vt/vtgate/evalengine/eval_result.go index 19a6ea59220..d9916af03be 100644 --- a/go/vt/vtgate/evalengine/eval_result.go +++ b/go/vt/vtgate/evalengine/eval_result.go @@ -28,7 +28,8 @@ import ( ) type EvalResult struct { - v eval + v eval + collationEnv *collations.Environment } // Value allows for retrieval of the value we expose for public consumption. @@ -56,7 +57,7 @@ func (er EvalResult) Collation() collations.ID { } func (er EvalResult) String() string { - return er.Value(collations.Default()).String() + return er.Value(er.collationEnv.DefaultConnectionCharset()).String() } // TupleValues allows for retrieval of the value we expose for public consumption diff --git a/go/vt/vtgate/evalengine/eval_set.go b/go/vt/vtgate/evalengine/eval_set.go new file mode 100644 index 00000000000..bc75a527edc --- /dev/null +++ b/go/vt/vtgate/evalengine/eval_set.go @@ -0,0 +1,57 @@ +package evalengine + +import ( + "strings" + + "vitess.io/vitess/go/hack" + "vitess.io/vitess/go/sqltypes" +) + +type evalSet struct { + set uint64 + string string +} + +func newEvalSet(val []byte, values *EnumSetValues) *evalSet { + value := string(val) + + return &evalSet{ + set: evalSetBits(values, value), + string: value, + } +} + +func (e *evalSet) ToRawBytes() []byte { + return hack.StringBytes(e.string) +} + +func (e *evalSet) SQLType() sqltypes.Type { + return sqltypes.Set +} + +func (e *evalSet) Size() int32 { + return 0 +} + +func (e *evalSet) Scale() int32 { + return 0 +} + +func evalSetBits(values *EnumSetValues, value string) uint64 { + if values != nil && len(*values) > 64 { + // This never would happen as MySQL limits SET + // to 64 elements. Safeguard here just in case though. + panic("too many values for set") + } + + set := uint64(0) + for _, val := range strings.Split(value, ",") { + idx := valueIdx(values, val) + if idx == -1 { + continue + } + set |= 1 << idx + } + + return set +} diff --git a/go/vt/vtgate/evalengine/eval_temporal.go b/go/vt/vtgate/evalengine/eval_temporal.go index d44839a6853..d73485441c3 100644 --- a/go/vt/vtgate/evalengine/eval_temporal.go +++ b/go/vt/vtgate/evalengine/eval_temporal.go @@ -42,6 +42,14 @@ func (e *evalTemporal) SQLType() sqltypes.Type { return e.t } +func (e *evalTemporal) Size() int32 { + return int32(e.prec) +} + +func (e *evalTemporal) Scale() int32 { + return 0 +} + func (e *evalTemporal) toInt64() int64 { switch e.SQLType() { case sqltypes.Date: @@ -153,7 +161,7 @@ func (e *evalTemporal) addInterval(interval *datetime.Interval, coll collations. tmp.dt.Time, tmp.prec, ok = e.dt.Time.AddInterval(interval, coll != collations.Unknown) case tt == sqltypes.Datetime || tt == sqltypes.Timestamp || (tt == sqltypes.Date && interval.Unit().HasTimeParts()) || (tt == sqltypes.Time && interval.Unit().HasDateParts()): tmp = e.toDateTime(int(e.prec), now) - tmp.dt, tmp.prec, ok = e.dt.AddInterval(interval, coll != collations.Unknown) + tmp.dt, tmp.prec, ok = e.dt.AddInterval(interval, tmp.prec, coll != collations.Unknown) } if !ok { return nil @@ -164,11 +172,17 @@ func (e *evalTemporal) addInterval(interval *datetime.Interval, coll collations. return tmp } -func newEvalDateTime(dt datetime.DateTime, l int) *evalTemporal { +func newEvalDateTime(dt datetime.DateTime, l int, allowZero bool) *evalTemporal { + if !allowZero && dt.IsZero() { + return nil + } return &evalTemporal{t: sqltypes.Datetime, dt: dt.Round(l), prec: uint8(l)} } -func newEvalDate(d datetime.Date) *evalTemporal { +func newEvalDate(d datetime.Date, allowZero bool) *evalTemporal { + if !allowZero && d.IsZero() { + return nil + } return &evalTemporal{t: sqltypes.Date, dt: datetime.DateTime{Date: d}} } @@ -185,7 +199,7 @@ func parseDate(s []byte) (*evalTemporal, error) { if !ok { return nil, errIncorrectTemporal("DATE", s) } - return newEvalDate(t), nil + return newEvalDate(t, true), nil } func parseDateTime(s []byte) (*evalTemporal, error) { @@ -193,12 +207,12 @@ func parseDateTime(s []byte) (*evalTemporal, error) { if !ok { return nil, errIncorrectTemporal("DATETIME", s) } - return newEvalDateTime(t, l), nil + return newEvalDateTime(t, l, true), nil } func parseTime(s []byte) (*evalTemporal, error) { - t, l, ok := datetime.ParseTime(hack.String(s), -1) - if !ok { + t, l, state := datetime.ParseTime(hack.String(s), -1) + if state != datetime.TimeOK { return nil, errIncorrectTemporal("TIME", s) } return newEvalTime(t, l), nil @@ -211,56 +225,56 @@ func precision(req, got int) int { return req } -func evalToTemporal(e eval) *evalTemporal { +func evalToTemporal(e eval, allowZero bool) *evalTemporal { switch e := e.(type) { case *evalTemporal: return e case *evalBytes: if t, l, ok := datetime.ParseDateTime(e.string(), -1); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDate(e.string()); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } - if t, l, ok := datetime.ParseTime(e.string(), -1); ok { + if t, l, state := datetime.ParseTime(e.string(), -1); state == datetime.TimeOK { return newEvalTime(t, l) } case *evalInt64: if t, ok := datetime.ParseDateTimeInt64(e.i); ok { - return newEvalDateTime(t, 0) + return newEvalDateTime(t, 0, allowZero) } if d, ok := datetime.ParseDateInt64(e.i); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if t, ok := datetime.ParseTimeInt64(e.i); ok { return newEvalTime(t, 0) } case *evalUint64: if t, ok := datetime.ParseDateTimeInt64(int64(e.u)); ok { - return newEvalDateTime(t, 0) + return newEvalDateTime(t, 0, allowZero) } if d, ok := datetime.ParseDateInt64(int64(e.u)); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if t, ok := datetime.ParseTimeInt64(int64(e.u)); ok { return newEvalTime(t, 0) } case *evalFloat: if t, l, ok := datetime.ParseDateTimeFloat(e.f, -1); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDateFloat(e.f); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if t, l, ok := datetime.ParseTimeFloat(e.f, -1); ok { return newEvalTime(t, l) } case *evalDecimal: if t, l, ok := datetime.ParseDateTimeDecimal(e.dec, e.length, -1); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDateDecimal(e.dec); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } if d, l, ok := datetime.ParseTimeDecimal(e.dec, e.length, -1); ok { return newEvalTime(d, l) @@ -271,9 +285,9 @@ func evalToTemporal(e eval) *evalTemporal { return newEvalTime(dt.Time, datetime.DefaultPrecision) } if dt.Time.IsZero() { - return newEvalDate(dt.Date) + return newEvalDate(dt.Date, allowZero) } - return newEvalDateTime(dt, datetime.DefaultPrecision) + return newEvalDateTime(dt, datetime.DefaultPrecision, allowZero) } } return nil @@ -287,7 +301,7 @@ func evalToTime(e eval, l int) *evalTemporal { if dt, l, _ := datetime.ParseDateTime(e.string(), l); !dt.IsZero() { return newEvalTime(dt.Time, l) } - if t, l, ok := datetime.ParseTime(e.string(), l); ok || !t.IsZero() { + if t, l, state := datetime.ParseTime(e.string(), l); state != datetime.TimeInvalid { return newEvalTime(t, l) } case *evalInt64: @@ -326,74 +340,74 @@ func evalToTime(e eval, l int) *evalTemporal { return nil } -func evalToDateTime(e eval, l int, now time.Time) *evalTemporal { +func evalToDateTime(e eval, l int, now time.Time, allowZero bool) *evalTemporal { switch e := e.(type) { case *evalTemporal: return e.toDateTime(precision(l, int(e.prec)), now) case *evalBytes: if t, l, _ := datetime.ParseDateTime(e.string(), l); !t.IsZero() { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, _ := datetime.ParseDate(e.string()); !d.IsZero() { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalInt64: if t, ok := datetime.ParseDateTimeInt64(e.i); ok { - return newEvalDateTime(t, precision(l, 0)) + return newEvalDateTime(t, precision(l, 0), allowZero) } if d, ok := datetime.ParseDateInt64(e.i); ok { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalUint64: if t, ok := datetime.ParseDateTimeInt64(int64(e.u)); ok { - return newEvalDateTime(t, precision(l, 0)) + return newEvalDateTime(t, precision(l, 0), allowZero) } if d, ok := datetime.ParseDateInt64(int64(e.u)); ok { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalFloat: if t, l, ok := datetime.ParseDateTimeFloat(e.f, l); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDateFloat(e.f); ok { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalDecimal: if t, l, ok := datetime.ParseDateTimeDecimal(e.dec, e.length, l); ok { - return newEvalDateTime(t, l) + return newEvalDateTime(t, l, allowZero) } if d, ok := datetime.ParseDateDecimal(e.dec); ok { - return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0)) + return newEvalDateTime(datetime.DateTime{Date: d}, precision(l, 0), allowZero) } case *evalJSON: if dt, ok := e.DateTime(); ok { - return newEvalDateTime(dt, precision(l, datetime.DefaultPrecision)) + return newEvalDateTime(dt, precision(l, datetime.DefaultPrecision), allowZero) } } return nil } -func evalToDate(e eval, now time.Time) *evalTemporal { +func evalToDate(e eval, now time.Time, allowZero bool) *evalTemporal { switch e := e.(type) { case *evalTemporal: return e.toDate(now) case *evalBytes: if t, _ := datetime.ParseDate(e.string()); !t.IsZero() { - return newEvalDate(t) + return newEvalDate(t, allowZero) } if dt, _, _ := datetime.ParseDateTime(e.string(), -1); !dt.IsZero() { - return newEvalDate(dt.Date) + return newEvalDate(dt.Date, allowZero) } case evalNumeric: if t, ok := datetime.ParseDateInt64(e.toInt64().i); ok { - return newEvalDate(t) + return newEvalDate(t, allowZero) } if dt, ok := datetime.ParseDateTimeInt64(e.toInt64().i); ok { - return newEvalDate(dt.Date) + return newEvalDate(dt.Date, allowZero) } case *evalJSON: if d, ok := e.Date(); ok { - return newEvalDate(d) + return newEvalDate(d, allowZero) } } return nil diff --git a/go/vt/vtgate/evalengine/eval_tuple.go b/go/vt/vtgate/evalengine/eval_tuple.go index 73e7fcc2051..81fa3317977 100644 --- a/go/vt/vtgate/evalengine/eval_tuple.go +++ b/go/vt/vtgate/evalengine/eval_tuple.go @@ -33,3 +33,11 @@ func (e *evalTuple) ToRawBytes() []byte { func (e *evalTuple) SQLType() sqltypes.Type { return sqltypes.Tuple } + +func (e *evalTuple) Size() int32 { + return 0 +} + +func (e *evalTuple) Scale() int32 { + return 0 +} diff --git a/go/vt/vtgate/evalengine/expr_arithmetic.go b/go/vt/vtgate/evalengine/expr_arithmetic.go index cb80e57c365..026892fc0ac 100644 --- a/go/vt/vtgate/evalengine/expr_arithmetic.go +++ b/go/vt/vtgate/evalengine/expr_arithmetic.go @@ -66,19 +66,6 @@ func (b *ArithmeticExpr) eval(env *ExpressionEnv) (eval, error) { return b.Op.eval(left, right) } -func makeNumericalType(t sqltypes.Type, f typeFlag) (sqltypes.Type, typeFlag) { - if sqltypes.IsNumber(t) { - return t, f - } - if t == sqltypes.VarBinary && (f&flagHex) != 0 { - return sqltypes.Uint64, f - } - if sqltypes.IsDateOrTime(t) { - return sqltypes.Int64, f | flagAmbiguousType - } - return sqltypes.Float64, f -} - func (b *ArithmeticExpr) compile(c *compiler) (ctype, error) { return b.Op.compile(c, b.Left, b.Right) } @@ -107,12 +94,12 @@ func (op *opArithAdd) compile(c *compiler, left, right IR) (ctype, error) { rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) lt, rt, swap = c.compileNumericPriority(lt, rt) - var sumtype sqltypes.Type + ct := ctype{Flag: nullableFlags(lt.Flag | rt.Flag), Col: collationNumeric} switch lt.Type { case sqltypes.Int64: c.asm.Add_ii() - sumtype = sqltypes.Int64 + ct.Type = sqltypes.Int64 case sqltypes.Uint64: switch rt.Type { case sqltypes.Int64: @@ -120,7 +107,7 @@ func (op *opArithAdd) compile(c *compiler, left, right IR) (ctype, error) { case sqltypes.Uint64: c.asm.Add_uu() } - sumtype = sqltypes.Uint64 + ct.Type = sqltypes.Uint64 case sqltypes.Decimal: if swap { c.compileToDecimal(rt, 2) @@ -128,7 +115,9 @@ func (op *opArithAdd) compile(c *compiler, left, right IR) (ctype, error) { c.compileToDecimal(rt, 1) } c.asm.Add_dd() - sumtype = sqltypes.Decimal + ct.Type = sqltypes.Decimal + ct.Size = max(lt.Size, rt.Size) + ct.Scale = max(lt.Scale, rt.Scale) case sqltypes.Float64: if swap { c.compileToFloat(rt, 2) @@ -136,11 +125,11 @@ func (op *opArithAdd) compile(c *compiler, left, right IR) (ctype, error) { c.compileToFloat(rt, 1) } c.asm.Add_ff() - sumtype = sqltypes.Float64 + ct.Type = sqltypes.Float64 } c.asm.jumpDestination(skip1, skip2) - return ctype{Type: sumtype, Col: collationNumeric}, nil + return ct, nil } func (op *opArithSub) eval(left, right eval) (eval, error) { @@ -164,66 +153,71 @@ func (op *opArithSub) compile(c *compiler, left, right IR) (ctype, error) { lt = c.compileToNumeric(lt, 2, sqltypes.Float64, true) rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) - var subtype sqltypes.Type - + ct := ctype{Flag: nullableFlags(lt.Flag | rt.Flag), Col: collationNumeric} switch lt.Type { case sqltypes.Int64: switch rt.Type { case sqltypes.Int64: c.asm.Sub_ii() - subtype = sqltypes.Int64 + ct.Type = sqltypes.Int64 case sqltypes.Uint64: c.asm.Sub_iu() - subtype = sqltypes.Uint64 + ct.Type = sqltypes.Uint64 case sqltypes.Float64: c.compileToFloat(lt, 2) c.asm.Sub_ff() - subtype = sqltypes.Float64 + ct.Type = sqltypes.Float64 case sqltypes.Decimal: c.compileToDecimal(lt, 2) c.asm.Sub_dd() - subtype = sqltypes.Decimal + ct.Type = sqltypes.Decimal + ct.Size = max(lt.Size, rt.Size) + ct.Scale = max(lt.Scale, rt.Scale) } case sqltypes.Uint64: switch rt.Type { case sqltypes.Int64: c.asm.Sub_ui() - subtype = sqltypes.Uint64 + ct.Type = sqltypes.Uint64 case sqltypes.Uint64: c.asm.Sub_uu() - subtype = sqltypes.Uint64 + ct.Type = sqltypes.Uint64 case sqltypes.Float64: c.compileToFloat(lt, 2) c.asm.Sub_ff() - subtype = sqltypes.Float64 + ct.Type = sqltypes.Float64 case sqltypes.Decimal: c.compileToDecimal(lt, 2) c.asm.Sub_dd() - subtype = sqltypes.Decimal + ct.Type = sqltypes.Decimal + ct.Size = max(lt.Size, rt.Size) + ct.Scale = max(lt.Scale, rt.Scale) } case sqltypes.Float64: c.compileToFloat(rt, 1) c.asm.Sub_ff() - subtype = sqltypes.Float64 + ct.Type = sqltypes.Float64 case sqltypes.Decimal: switch rt.Type { case sqltypes.Float64: c.compileToFloat(lt, 2) c.asm.Sub_ff() - subtype = sqltypes.Float64 + ct.Type = sqltypes.Float64 default: c.compileToDecimal(rt, 1) c.asm.Sub_dd() - subtype = sqltypes.Decimal + ct.Type = sqltypes.Decimal + ct.Size = max(lt.Size, rt.Size) + ct.Scale = max(lt.Scale, rt.Scale) } } - if subtype == 0 { + if ct.Type == 0 { panic("did not compile?") } c.asm.jumpDestination(skip1, skip2) - return ctype{Type: subtype, Col: collationNumeric}, nil + return ct, nil } func (op *opArithMul) eval(left, right eval) (eval, error) { @@ -250,12 +244,11 @@ func (op *opArithMul) compile(c *compiler, left, right IR) (ctype, error) { rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) lt, rt, swap = c.compileNumericPriority(lt, rt) - var multype sqltypes.Type - + ct := ctype{Flag: nullableFlags(lt.Flag | rt.Flag), Col: collationNumeric} switch lt.Type { case sqltypes.Int64: c.asm.Mul_ii() - multype = sqltypes.Int64 + ct.Type = sqltypes.Int64 case sqltypes.Uint64: switch rt.Type { case sqltypes.Int64: @@ -263,7 +256,7 @@ func (op *opArithMul) compile(c *compiler, left, right IR) (ctype, error) { case sqltypes.Uint64: c.asm.Mul_uu() } - multype = sqltypes.Uint64 + ct.Type = sqltypes.Uint64 case sqltypes.Float64: if swap { c.compileToFloat(rt, 2) @@ -271,7 +264,7 @@ func (op *opArithMul) compile(c *compiler, left, right IR) (ctype, error) { c.compileToFloat(rt, 1) } c.asm.Mul_ff() - multype = sqltypes.Float64 + ct.Type = sqltypes.Float64 case sqltypes.Decimal: if swap { c.compileToDecimal(rt, 2) @@ -279,11 +272,13 @@ func (op *opArithMul) compile(c *compiler, left, right IR) (ctype, error) { c.compileToDecimal(rt, 1) } c.asm.Mul_dd() - multype = sqltypes.Decimal + ct.Type = sqltypes.Decimal + ct.Size = lt.Size + rt.Size + ct.Scale = lt.Scale + rt.Scale } c.asm.jumpDestination(skip1, skip2) - return ctype{Type: multype, Col: collationNumeric}, nil + return ct, nil } func (op *opArithDiv) eval(left, right eval) (eval, error) { @@ -319,6 +314,8 @@ func (op *opArithDiv) compile(c *compiler, left, right IR) (ctype, error) { c.compileToDecimal(lt, 2) c.compileToDecimal(rt, 1) c.asm.Div_dd() + ct.Size = lt.Size + divPrecisionIncrement + ct.Scale = lt.Scale + divPrecisionIncrement } c.asm.jumpDestination(skip1, skip2) return ct, nil @@ -432,7 +429,7 @@ func (op *opArithMod) compile(c *compiler, left, right IR) (ctype, error) { lt = c.compileToNumeric(lt, 2, sqltypes.Float64, true) rt = c.compileToNumeric(rt, 1, sqltypes.Float64, true) - ct := ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagNullable} + ct := ctype{Col: collationNumeric, Flag: flagNullable} switch lt.Type { case sqltypes.Int64: ct.Type = sqltypes.Int64 @@ -447,6 +444,8 @@ func (op *opArithMod) compile(c *compiler, left, right IR) (ctype, error) { c.asm.Mod_ff() case sqltypes.Decimal: ct.Type = sqltypes.Decimal + ct.Size = max(lt.Size, rt.Size) + ct.Scale = max(lt.Scale, rt.Scale) c.asm.Convert_xd(2, 0, 0) c.asm.Mod_dd() } @@ -463,6 +462,8 @@ func (op *opArithMod) compile(c *compiler, left, right IR) (ctype, error) { c.asm.Mod_ff() case sqltypes.Decimal: ct.Type = sqltypes.Decimal + ct.Size = max(lt.Size, rt.Size) + ct.Scale = max(lt.Scale, rt.Scale) c.asm.Convert_xd(2, 0, 0) c.asm.Mod_dd() } @@ -536,5 +537,15 @@ func (expr *NegateExpr) compile(c *compiler) (ctype, error) { } c.asm.jumpDestination(skip) - return ctype{Type: neg, Col: collationNumeric}, nil + return ctype{ + Type: neg, + Flag: nullableFlags(arg.Flag), + Size: arg.Size, + Scale: arg.Scale, + Col: collationNumeric, + }, nil +} + +func nullableFlags(flag typeFlag) typeFlag { + return flag & (flagNull | flagNullable) } diff --git a/go/vt/vtgate/evalengine/expr_bit.go b/go/vt/vtgate/evalengine/expr_bit.go index e95d54c5b6c..6200875d1fc 100644 --- a/go/vt/vtgate/evalengine/expr_bit.go +++ b/go/vt/vtgate/evalengine/expr_bit.go @@ -104,9 +104,9 @@ func (o opBitShr) numeric(num, shift uint64) uint64 { return num >> shift } func (o opBitShr) binary(num []byte, shift uint64) []byte { var ( - bits = int(shift % 8) - bytes = int(shift / 8) - length = len(num) + bits = int64(shift % 8) + bytes = int64(shift / 8) + length = int64(len(num)) out = make([]byte, length) ) @@ -127,13 +127,13 @@ func (o opBitShl) numeric(num, shift uint64) uint64 { return num << shift } func (o opBitShl) binary(num []byte, shift uint64) []byte { var ( - bits = int(shift % 8) - bytes = int(shift / 8) - length = len(num) + bits = int64(shift % 8) + bytes = int64(shift / 8) + length = int64(len(num)) out = make([]byte, length) ) - for i := 0; i < length; i++ { + for i := int64(0); i < length; i++ { pos := i + bytes + 1 switch { case pos < length: @@ -270,7 +270,7 @@ func (expr *BitwiseExpr) compileBinary(c *compiler, asm_ins_bb, asm_ins_uu func( asm_ins_uu() c.asm.jumpDestination(skip1, skip2) - return ctype{Type: sqltypes.Uint64, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Uint64, Flag: nullableFlags(lt.Flag | rt.Flag), Col: collationNumeric}, nil } func (expr *BitwiseExpr) compileShift(c *compiler, i int) (ctype, error) { @@ -299,8 +299,8 @@ func (expr *BitwiseExpr) compileShift(c *compiler, i int) (ctype, error) { return ctype{Type: sqltypes.VarBinary, Col: collationBinary}, nil } - _ = c.compileToBitwiseUint64(lt, 2) - _ = c.compileToUint64(rt, 1) + lt = c.compileToBitwiseUint64(lt, 2) + rt = c.compileToUint64(rt, 1) if i < 0 { c.asm.BitShiftLeft_uu() @@ -309,7 +309,7 @@ func (expr *BitwiseExpr) compileShift(c *compiler, i int) (ctype, error) { } c.asm.jumpDestination(skip1, skip2) - return ctype{Type: sqltypes.Uint64, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Uint64, Flag: nullableFlags(lt.Flag | rt.Flag), Col: collationNumeric}, nil } func (expr *BitwiseExpr) compile(c *compiler) (ctype, error) { diff --git a/go/vt/vtgate/evalengine/expr_bvar.go b/go/vt/vtgate/evalengine/expr_bvar.go index 6bc49caf660..0fffe3140a2 100644 --- a/go/vt/vtgate/evalengine/expr_bvar.go +++ b/go/vt/vtgate/evalengine/expr_bvar.go @@ -65,12 +65,12 @@ func (bv *BindVariable) eval(env *ExpressionEnv) (eval, error) { switch bvar.Type { case sqltypes.Tuple: if bv.Type != sqltypes.Tuple { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query argument '%s' cannot be a tuple", bv.Key) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query argument '%s' must be a tuple (is %s)", bv.Key, bvar.Type) } tuple := make([]eval, 0, len(bvar.Values)) for _, value := range bvar.Values { - e, err := valueToEval(sqltypes.MakeTrusted(value.Type, value.Value), typedCoercionCollation(value.Type, collations.CollationForType(value.Type, bv.Collation))) + e, err := valueToEval(sqltypes.MakeTrusted(value.Type, value.Value), typedCoercionCollation(value.Type, collations.CollationForType(value.Type, bv.Collation)), nil) if err != nil { return nil, err } @@ -80,13 +80,13 @@ func (bv *BindVariable) eval(env *ExpressionEnv) (eval, error) { default: if bv.Type == sqltypes.Tuple { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query argument '%s' must be a tuple (is %s)", bv.Key, bvar.Type) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query argument '%s' cannot be a tuple", bv.Key) } typ := bvar.Type if bv.typed() { typ = bv.Type } - return valueToEval(sqltypes.MakeTrusted(typ, bvar.Value), typedCoercionCollation(typ, collations.CollationForType(typ, bv.Collation))) + return valueToEval(sqltypes.MakeTrusted(typ, bvar.Value), typedCoercionCollation(typ, collations.CollationForType(typ, bv.Collation)), nil) } } @@ -106,11 +106,11 @@ func (bv *BindVariable) typeof(env *ExpressionEnv) (ctype, error) { case sqltypes.Null: return ctype{Type: sqltypes.Null, Flag: flagNull | flagNullable, Col: collationNull}, nil case sqltypes.HexNum, sqltypes.HexVal: - return ctype{Type: sqltypes.VarBinary, Flag: flagHex, Col: collationNumeric}, nil + return ctype{Type: sqltypes.VarBinary, Flag: flagHex | flagNullable, Col: collationNumeric}, nil case sqltypes.BitNum: - return ctype{Type: sqltypes.VarBinary, Flag: flagBit, Col: collationNumeric}, nil + return ctype{Type: sqltypes.VarBinary, Flag: flagBit | flagNullable, Col: collationNumeric}, nil default: - return ctype{Type: tt, Flag: 0, Col: typedCoercionCollation(tt, collations.CollationForType(tt, bv.Collation))}, nil + return ctype{Type: tt, Flag: flagNullable, Col: typedCoercionCollation(tt, collations.CollationForType(tt, bv.Collation))}, nil } } diff --git a/go/vt/vtgate/evalengine/expr_collate.go b/go/vt/vtgate/evalengine/expr_collate.go index 47e65a0dcc7..be0eb78882b 100644 --- a/go/vt/vtgate/evalengine/expr_collate.go +++ b/go/vt/vtgate/evalengine/expr_collate.go @@ -18,6 +18,8 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/charset" + "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -63,11 +65,13 @@ type ( CollateExpr struct { UnaryExpr TypedCollation collations.TypedCollation + CollationEnv *collations.Environment } IntroducerExpr struct { UnaryExpr TypedCollation collations.TypedCollation + CollationEnv *collations.Environment } ) @@ -84,7 +88,7 @@ func (c *CollateExpr) eval(env *ExpressionEnv) (eval, error) { case nil: return nil, nil case *evalBytes: - if err := collations.Local().EnsureCollate(e.col.Collation, c.TypedCollation.Collation); err != nil { + if err := env.collationEnv.EnsureCollate(e.col.Collation, c.TypedCollation.Collation); err != nil { return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, err.Error()) } b = e.withCollation(c.TypedCollation) @@ -109,18 +113,19 @@ func (expr *CollateExpr) compile(c *compiler) (ctype, error) { switch ct.Type { case sqltypes.VarChar: - if err := collations.Local().EnsureCollate(ct.Col.Collation, expr.TypedCollation.Collation); err != nil { + if err := c.env.CollationEnv().EnsureCollate(ct.Col.Collation, expr.TypedCollation.Collation); err != nil { return ctype{}, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, err.Error()) } fallthrough case sqltypes.VarBinary: c.asm.Collate(expr.TypedCollation.Collation) default: - return ctype{}, c.unsupported(expr) + c.asm.Convert_xc(1, sqltypes.VarChar, expr.TypedCollation.Collation, nil) } c.asm.jumpDestination(skip) + ct.Type = sqltypes.VarChar ct.Col = expr.TypedCollation ct.Flag |= flagExplicitCollation | flagNullable return ct, nil @@ -128,20 +133,48 @@ func (expr *CollateExpr) compile(c *compiler) (ctype, error) { var _ IR = (*IntroducerExpr)(nil) +func introducerCast(e eval, col collations.ID) (*evalBytes, error) { + if col == collations.CollationBinaryID { + return evalToBinary(e), nil + } + + var bytes []byte + if b, ok := e.(*evalBytes); !ok { + bytes = b.ToRawBytes() + } else { + cs := colldata.Lookup(col).Charset() + bytes = b.bytes + // We only need to pad here for encodings that have a minimum + // character byte width larger than 1, which is all UTF-16 + // variations and UTF-32. + switch cs.(type) { + case charset.Charset_utf16, charset.Charset_utf16le, charset.Charset_ucs2: + if len(bytes)%2 != 0 { + bytes = append([]byte{0}, bytes...) + } + case charset.Charset_utf32: + if mod := len(bytes) % 4; mod != 0 { + bytes = append(make([]byte, 4-mod), bytes...) + } + } + } + typedcol := collations.TypedCollation{ + Collation: col, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + return newEvalText(bytes, typedcol), nil +} + func (expr *IntroducerExpr) eval(env *ExpressionEnv) (eval, error) { e, err := expr.Inner.eval(env) if err != nil { return nil, err } - var b *evalBytes - if expr.TypedCollation.Collation == collations.CollationBinaryID { - b = evalToBinary(e) - } else { - b, err = evalToVarchar(e, expr.TypedCollation.Collation, false) - if err != nil { - return nil, err - } + b, err := introducerCast(e, expr.TypedCollation.Collation) + if err != nil { + return nil, err } b.flag |= flagExplicitCollation return b, nil diff --git a/go/vt/vtgate/evalengine/expr_column.go b/go/vt/vtgate/evalengine/expr_column.go index 741d04c6a06..d53585ceb8b 100644 --- a/go/vt/vtgate/evalengine/expr_column.go +++ b/go/vt/vtgate/evalengine/expr_column.go @@ -29,8 +29,12 @@ type ( Column struct { Offset int Type sqltypes.Type + Size int32 + Scale int32 Collation collations.TypedCollation Original sqlparser.Expr + Nullable bool + Values *EnumSetValues // For ENUM and SET types // dynamicTypeOffset is set when the type of this column cannot be calculated // at translation time. Since expressions with dynamic types cannot be compiled ahead of time, @@ -51,12 +55,16 @@ func (c *Column) IsExpr() {} // eval implements the expression interface func (c *Column) eval(env *ExpressionEnv) (eval, error) { - return valueToEval(env.Row[c.Offset], c.Collation) + return valueToEval(env.Row[c.Offset], c.Collation, c.Values) } func (c *Column) typeof(env *ExpressionEnv) (ctype, error) { if c.typed() { - return ctype{Type: c.Type, Flag: flagNullable, Col: c.Collation}, nil + var nullable typeFlag + if c.Nullable { + nullable = flagNullable + } + return ctype{Type: c.Type, Size: c.Size, Scale: c.Scale, Flag: nullable, Col: c.Collation, Values: c.Values}, nil } if c.Offset < len(env.Fields) { field := env.Fields[c.Offset] @@ -67,14 +75,16 @@ func (c *Column) typeof(env *ExpressionEnv) (ctype, error) { } return ctype{ - Type: field.Type, - Col: typedCoercionCollation(field.Type, collations.ID(field.Charset)), - Flag: f, + Type: field.Type, + Col: typedCoercionCollation(field.Type, collations.ID(field.Charset)), + Flag: f, + Size: int32(field.ColumnLength), + Scale: int32(field.Decimals), }, nil } if c.Offset < len(env.Row) { value := env.Row[c.Offset] - return ctype{Type: value.Type(), Flag: 0, Col: c.Collation}, nil + return ctype{Type: value.Type(), Flag: 0, Col: c.Collation, Values: c.Values}, nil } return ctype{}, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "no column at offset %d", c.Offset) } @@ -85,7 +95,12 @@ func (column *Column) compile(c *compiler) (ctype, error) { if column.typed() { typ.Type = column.Type typ.Col = column.Collation - typ.Flag = flagNullable + if column.Nullable { + typ.Flag = flagNullable + } + typ.Size = column.Size + typ.Scale = column.Scale + typ.Values = column.Values } else if c.dynamicTypes != nil { typ = c.dynamicTypes[column.dynamicTypeOffset] } else { @@ -108,6 +123,10 @@ func (column *Column) compile(c *compiler) (ctype, error) { typ.Type = sqltypes.Float64 case sqltypes.IsDecimal(tt): c.asm.PushColumn_d(column.Offset) + case tt == sqltypes.Enum: + c.asm.PushColumn_enum(column.Offset, column.Values) + case tt == sqltypes.Set: + c.asm.PushColumn_set(column.Offset, column.Values) case sqltypes.IsText(tt): if tt == sqltypes.HexNum { c.asm.PushColumn_hexnum(column.Offset) diff --git a/go/vt/vtgate/evalengine/expr_column_test.go b/go/vt/vtgate/evalengine/expr_column_test.go index b8bc5b9c640..bd7fd4250fd 100644 --- a/go/vt/vtgate/evalengine/expr_column_test.go +++ b/go/vt/vtgate/evalengine/expr_column_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -28,8 +29,9 @@ func TestTypeOf(t *testing.T) { t.Skipf("TODO: these tests are not green") env := &ExpressionEnv{ - BindVars: make(map[string]*querypb.BindVariable), - now: time.Now(), + BindVars: make(map[string]*querypb.BindVariable), + now: time.Now(), + collationEnv: collations.MySQL8(), } c := &Column{ Type: sqltypes.Unknown, diff --git a/go/vt/vtgate/evalengine/expr_compare.go b/go/vt/vtgate/evalengine/expr_compare.go index 6639bb7f7e2..f3bd44588ee 100644 --- a/go/vt/vtgate/evalengine/expr_compare.go +++ b/go/vt/vtgate/evalengine/expr_compare.go @@ -52,7 +52,7 @@ type ( ComparisonOp interface { String() string - compare(left, right eval) (boolean, error) + compare(collationEnv *collations.Environment, left, right eval) (boolean, error) } compareEQ struct{} @@ -72,49 +72,49 @@ func (*ComparisonExpr) filterExpr() {} func (*InExpr) filterExpr() {} func (compareEQ) String() string { return "=" } -func (compareEQ) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, true) +func (compareEQ) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, true, collationEnv) return makeboolean2(cmp == 0, isNull), err } func (compareNE) String() string { return "!=" } -func (compareNE) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, true) +func (compareNE) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, true, collationEnv) return makeboolean2(cmp != 0, isNull), err } func (compareLT) String() string { return "<" } -func (compareLT) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, false) +func (compareLT) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, false, collationEnv) return makeboolean2(cmp < 0, isNull), err } func (compareLE) String() string { return "<=" } -func (compareLE) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, false) +func (compareLE) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, false, collationEnv) return makeboolean2(cmp <= 0, isNull), err } func (compareGT) String() string { return ">" } -func (compareGT) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, false) +func (compareGT) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, false, collationEnv) return makeboolean2(cmp > 0, isNull), err } func (compareGE) String() string { return ">=" } -func (compareGE) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, false) +func (compareGE) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, false, collationEnv) return makeboolean2(cmp >= 0, isNull), err } func (compareNullSafeEQ) String() string { return "<=>" } -func (compareNullSafeEQ) compare(left, right eval) (boolean, error) { - cmp, err := evalCompareNullSafe(left, right) +func (compareNullSafeEQ) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, err := evalCompareNullSafe(left, right, collationEnv) return makeboolean(cmp == 0), err } func typeIsTextual(tt sqltypes.Type) bool { - return sqltypes.IsText(tt) || sqltypes.IsBinary(tt) || tt == sqltypes.Time + return sqltypes.IsTextOrBinary(tt) || tt == sqltypes.Time || tt == sqltypes.Enum || tt == sqltypes.Set } func compareAsStrings(l, r sqltypes.Type) bool { @@ -143,6 +143,14 @@ func compareAsDates(l, r sqltypes.Type) bool { return sqltypes.IsDateOrTime(l) && sqltypes.IsDateOrTime(r) } +func compareAsEnums(l, r sqltypes.Type) bool { + return sqltypes.IsEnum(l) && sqltypes.IsEnum(r) +} + +func compareAsSets(l, r sqltypes.Type) bool { + return sqltypes.IsSet(l) && sqltypes.IsSet(r) +} + func compareAsDateAndString(l, r sqltypes.Type) bool { return (sqltypes.IsDate(l) && typeIsTextual(r)) || (typeIsTextual(l) && sqltypes.IsDate(r)) } @@ -164,7 +172,7 @@ func compareAsJSON(l, r sqltypes.Type) bool { return l == sqltypes.TypeJSON || r == sqltypes.TypeJSON } -func evalCompareNullSafe(lVal, rVal eval) (int, error) { +func evalCompareNullSafe(lVal, rVal eval, collationEnv *collations.Environment) (int, error) { if lVal == nil { if rVal == nil { return 0, nil @@ -175,18 +183,18 @@ func evalCompareNullSafe(lVal, rVal eval) (int, error) { return 1, nil } if left, right, ok := compareAsTuples(lVal, rVal); ok { - return evalCompareTuplesNullSafe(left.t, right.t) + return evalCompareTuplesNullSafe(left.t, right.t, collationEnv) } - n, err := evalCompare(lVal, rVal) + n, err := evalCompare(lVal, rVal, collationEnv) return n, err } -func evalCompareMany(left, right []eval, fulleq bool) (int, bool, error) { +func evalCompareMany(left, right []eval, fulleq bool, collationEnv *collations.Environment) (int, bool, error) { // For row comparisons, (a, b) = (x, y) is equivalent to: (a = x) AND (b = y) var seenNull bool for idx, lResult := range left { rResult := right[idx] - n, isNull, err := evalCompareAll(lResult, rResult, fulleq) + n, isNull, err := evalCompareAll(lResult, rResult, fulleq, collationEnv) if err != nil { return 0, false, err } @@ -203,28 +211,32 @@ func evalCompareMany(left, right []eval, fulleq bool) (int, bool, error) { return 0, seenNull, nil } -func evalCompareAll(lVal, rVal eval, fulleq bool) (int, bool, error) { +func evalCompareAll(lVal, rVal eval, fulleq bool, collationEnv *collations.Environment) (int, bool, error) { if lVal == nil || rVal == nil { return 0, true, nil } if left, right, ok := compareAsTuples(lVal, rVal); ok { - return evalCompareMany(left.t, right.t, fulleq) + return evalCompareMany(left.t, right.t, fulleq, collationEnv) } - n, err := evalCompare(lVal, rVal) + n, err := evalCompare(lVal, rVal, collationEnv) return n, false, err } // For more details on comparison expression evaluation and type conversion: // - https://dev.mysql.com/doc/refman/8.0/en/type-conversion.html -func evalCompare(left, right eval) (comp int, err error) { +func evalCompare(left, right eval, collationEnv *collations.Environment) (comp int, err error) { lt := left.SQLType() rt := right.SQLType() switch { case compareAsDates(lt, rt): return compareDates(left.(*evalTemporal), right.(*evalTemporal)), nil + case compareAsEnums(lt, rt): + return compareEnums(left.(*evalEnum), right.(*evalEnum)), nil + case compareAsSets(lt, rt): + return compareSets(left.(*evalSet), right.(*evalSet)), nil case compareAsStrings(lt, rt): - return compareStrings(left, right) + return compareStrings(left, right, collationEnv) case compareAsSameNumericType(lt, rt) || compareAsDecimal(lt, rt): return compareNumeric(left, right) case compareAsDateAndString(lt, rt): @@ -269,12 +281,12 @@ func fallbackBinary(t sqltypes.Type) bool { return false } -func evalCompareTuplesNullSafe(left, right []eval) (int, error) { +func evalCompareTuplesNullSafe(left, right []eval, collationEnv *collations.Environment) (int, error) { if len(left) != len(right) { panic("did not typecheck cardinality") } for idx, lResult := range left { - res, err := evalCompareNullSafe(lResult, right[idx]) + res, err := evalCompareNullSafe(lResult, right[idx], collationEnv) if err != nil { return 0, err } @@ -302,7 +314,7 @@ func (c *ComparisonExpr) eval(env *ExpressionEnv) (eval, error) { if _, ok := c.Op.(compareNullSafeEQ); !ok && right == nil { return nil, nil } - cmp, err := c.Op.compare(left, right) + cmp, err := c.Op.compare(env.collationEnv, left, right) if err != nil { return nil, err } @@ -312,25 +324,25 @@ func (c *ComparisonExpr) eval(env *ExpressionEnv) (eval, error) { func (expr *ComparisonExpr) compileAsTuple(c *compiler) (ctype, error) { switch expr.Op.(type) { case compareNullSafeEQ: - c.asm.CmpTupleNullsafe() + c.asm.CmpTupleNullsafe(c.env.CollationEnv()) return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean}, nil case compareEQ: - c.asm.CmpTuple(true) + c.asm.CmpTuple(c.env.CollationEnv(), true) c.asm.Cmp_eq_n() case compareNE: - c.asm.CmpTuple(true) + c.asm.CmpTuple(c.env.CollationEnv(), true) c.asm.Cmp_ne_n() case compareLT: - c.asm.CmpTuple(false) + c.asm.CmpTuple(c.env.CollationEnv(), false) c.asm.Cmp_lt_n() case compareLE: - c.asm.CmpTuple(false) + c.asm.CmpTuple(c.env.CollationEnv(), false) c.asm.Cmp_le_n() case compareGT: - c.asm.CmpTuple(false) + c.asm.CmpTuple(c.env.CollationEnv(), false) c.asm.Cmp_gt_n() case compareGE: - c.asm.CmpTuple(false) + c.asm.CmpTuple(c.env.CollationEnv(), false) c.asm.Cmp_ge_n() default: panic("invalid comparison operator") @@ -365,11 +377,13 @@ func (expr *ComparisonExpr) compile(c *compiler) (ctype, error) { swapped := false var skip2 *jump + nullable := true switch expr.Op.(type) { case compareNullSafeEQ: skip2 = c.asm.jumpFrom() c.asm.Cmp_nullsafe(skip2) + nullable = false default: skip2 = c.compileNullCheck1r(rt) } @@ -387,12 +401,22 @@ func (expr *ComparisonExpr) compile(c *compiler) (ctype, error) { c.asm.CmpDateString() case compareAsDateAndNumeric(lt.Type, rt.Type): if sqltypes.IsDateOrTime(lt.Type) { - c.asm.Convert_Ti(2) - lt.Type = sqltypes.Int64 + if lt.Size == 0 { + c.asm.Convert_Ti(2) + lt.Type = sqltypes.Int64 + } else { + c.asm.Convert_Tf(2) + lt.Type = sqltypes.Float64 + } } if sqltypes.IsDateOrTime(rt.Type) { - c.asm.Convert_Ti(1) - rt.Type = sqltypes.Int64 + if rt.Size == 0 { + c.asm.Convert_Ti(1) + rt.Type = sqltypes.Int64 + } else { + c.asm.Convert_Tf(1) + rt.Type = sqltypes.Float64 + } } swapped = c.compareNumericTypes(lt, rt) case compareAsJSON(lt.Type, rt.Type): @@ -407,6 +431,9 @@ func (expr *ComparisonExpr) compile(c *compiler) (ctype, error) { } cmptype := ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean} + if nullable { + cmptype.Flag |= nullableFlags(lt.Flag | rt.Flag) + } switch expr.Op.(type) { case compareEQ: @@ -450,14 +477,14 @@ func (expr *ComparisonExpr) compile(c *compiler) (ctype, error) { return cmptype, nil } -func evalInExpr(lhs eval, rhs *evalTuple) (boolean, error) { +func evalInExpr(collationEnv *collations.Environment, lhs eval, rhs *evalTuple) (boolean, error) { if lhs == nil { return boolNULL, nil } var foundNull, found bool for _, rtuple := range rhs.t { - numeric, isNull, err := evalCompareAll(lhs, rtuple, true) + numeric, isNull, err := evalCompareAll(lhs, rtuple, true, collationEnv) if err != nil { return boolNULL, err } @@ -491,7 +518,7 @@ func (i *InExpr) eval(env *ExpressionEnv) (eval, error) { if !ok { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "rhs of an In operation should be a tuple") } - in, err := evalInExpr(left, rtuple) + in, err := evalInExpr(env.collationEnv, left, rtuple) if err != nil { return nil, err } @@ -540,18 +567,20 @@ func (expr *InExpr) compile(c *compiler) (ctype, error) { switch rhs := expr.Right.(type) { case TupleExpr: + var rt ctype if table := expr.compileTable(lhs, rhs); table != nil { c.asm.In_table(expr.Negate, table) } else { - _, err := rhs.compile(c) + rt, err = rhs.compile(c) if err != nil { return ctype{}, err } - c.asm.In_slow(expr.Negate) + c.asm.In_slow(c.env.CollationEnv(), expr.Negate) } - return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean}, nil + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean | (nullableFlags(lhs.Flag) | (rt.Flag & flagNullable))}, nil case *BindVariable: - return ctype{}, c.unsupported(expr) + return ctype{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "rhs of an In operation should be a tuple") default: panic("unreachable") } @@ -573,7 +602,7 @@ func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) { } var col collations.TypedCollation - left, right, col, err = mergeAndCoerceCollations(left, right) + left, right, col, err = mergeAndCoerceCollations(left, right, env.collationEnv) if err != nil { return nil, err } @@ -606,7 +635,7 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) { skip := c.compileNullCheck2(lt, rt) if !lt.isTextual() { - c.asm.Convert_xc(2, sqltypes.VarChar, c.collation, 0, false) + c.asm.Convert_xc(2, sqltypes.VarChar, c.collation, nil) lt.Col = collations.TypedCollation{ Collation: c.collation, Coercibility: collations.CoerceCoercible, @@ -615,7 +644,7 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) { } if !rt.isTextual() { - c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, 0, false) + c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, nil) rt.Col = collations.TypedCollation{ Collation: c.collation, Coercibility: collations.CoerceCoercible, @@ -626,10 +655,9 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) { var merged collations.TypedCollation var coerceLeft colldata.Coercion var coerceRight colldata.Coercion - var env = collations.Local() if lt.Col.Collation != rt.Col.Collation { - merged, coerceLeft, coerceRight, err = colldata.Merge(env, lt.Col, rt.Col, colldata.CoercionOptions{ + merged, coerceLeft, coerceRight, err = colldata.Merge(c.env.CollationEnv(), lt.Col, rt.Col, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) diff --git a/go/vt/vtgate/evalengine/expr_convert.go b/go/vt/vtgate/evalengine/expr_convert.go index 900d4e37f8f..a63b8197d77 100644 --- a/go/vt/vtgate/evalengine/expr_convert.go +++ b/go/vt/vtgate/evalengine/expr_convert.go @@ -19,6 +19,7 @@ package evalengine import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/ptr" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -27,15 +28,16 @@ import ( type ( ConvertExpr struct { UnaryExpr - Type string - Length, Scale int - HasLength, HasScale bool - Collation collations.ID + Type string + Length, Scale *int + Collation collations.ID + CollationEnv *collations.Environment } ConvertUsingExpr struct { UnaryExpr - Collation collations.ID + Collation collations.ID + CollationEnv *collations.Environment } ) @@ -45,10 +47,10 @@ var _ IR = (*ConvertUsingExpr)(nil) func (c *ConvertExpr) returnUnsupportedError() error { var err error switch { - case c.HasLength && c.HasScale: - err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s(%d,%d)", c.Type, c.Length, c.Scale) - case c.HasLength: - err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s(%d)", c.Type, c.Length) + case c.Length != nil && c.Scale != nil: + err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s(%d,%d)", c.Type, *c.Length, *c.Scale) + case c.Length != nil: + err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s(%d)", c.Type, *c.Length) default: err = vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "Unsupported type conversion: %s", c.Type) } @@ -58,11 +60,11 @@ func (c *ConvertExpr) returnUnsupportedError() error { func (c *ConvertExpr) decimalPrecision() (int32, int32) { m := 10 d := 0 - if c.HasLength { - m = c.Length + if c.Length != nil { + m = *c.Length } - if c.HasScale { - d = c.Scale + if c.Scale != nil { + d = *c.Scale } if m == 0 && d == 0 { m = 10 @@ -82,8 +84,8 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { switch c.Type { case "BINARY": b := evalToBinary(e) - if c.HasLength { - b.truncateInPlace(c.Length) + if c.Length != nil { + b.truncateInPlace(*c.Length) } b.tt = int16(c.convertToBinaryType(e.SQLType())) return b, nil @@ -94,8 +96,8 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { // return NULL on error return nil, nil } - if c.HasLength { - t.truncateInPlace(c.Length) + if c.Length != nil { + t.truncateInPlace(*c.Length) } t.tt = int16(c.convertToCharType(e.SQLType())) return t, nil @@ -106,8 +108,8 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { f, _ := evalToFloat(e) return f, nil case "FLOAT": - if c.HasLength { - switch p := c.Length; { + if c.Length != nil { + switch p := *c.Length; { case p > 53: return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 53.", p) } @@ -120,25 +122,25 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { case "JSON": return evalToJSON(e) case "DATETIME": - switch p := c.Length; { - case p > 6: + p := ptr.Unwrap(c.Length, 0) + if p > 6 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 6.", p) } - if dt := evalToDateTime(e, c.Length, env.now); dt != nil { + if dt := evalToDateTime(e, p, env.now, env.sqlmode.AllowZeroDate()); dt != nil { return dt, nil } return nil, nil case "DATE": - if d := evalToDate(e, env.now); d != nil { + if d := evalToDate(e, env.now, env.sqlmode.AllowZeroDate()); d != nil { return d, nil } return nil, nil case "TIME": - switch p := c.Length; { - case p > 6: + p := ptr.Unwrap(c.Length, 0) + if p > 6 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for 'CONVERT'. Maximum is 6.", p) } - if t := evalToTime(e, c.Length); t != nil { + if t := evalToTime(e, p); t != nil { return t, nil } return nil, nil @@ -150,8 +152,8 @@ func (c *ConvertExpr) eval(env *ExpressionEnv) (eval, error) { } func (c *ConvertExpr) convertToBinaryType(tt sqltypes.Type) sqltypes.Type { - if c.HasLength { - if c.Length > 64*1024 { + if c.Length != nil { + if *c.Length > 64*1024 { return sqltypes.Blob } } else if tt == sqltypes.Blob || tt == sqltypes.TypeJSON { @@ -161,9 +163,9 @@ func (c *ConvertExpr) convertToBinaryType(tt sqltypes.Type) sqltypes.Type { } func (c *ConvertExpr) convertToCharType(tt sqltypes.Type) sqltypes.Type { - if c.HasLength { + if c.Length != nil { col := colldata.Lookup(c.Collation) - length := c.Length * col.Charset().MaxWidth() + length := *c.Length * col.Charset().MaxWidth() if length > 64*1024 { return sqltypes.Text } @@ -185,25 +187,25 @@ func (conv *ConvertExpr) compile(c *compiler) (ctype, error) { switch conv.Type { case "BINARY": convt = ctype{Type: conv.convertToBinaryType(arg.Type), Col: collationBinary} - c.asm.Convert_xb(1, convt.Type, conv.Length, conv.HasLength) + c.asm.Convert_xb(1, convt.Type, conv.Length) case "CHAR", "NCHAR": convt = ctype{ Type: conv.convertToCharType(arg.Type), Col: collations.TypedCollation{Collation: conv.Collation}, } - c.asm.Convert_xc(1, convt.Type, convt.Col.Collation, conv.Length, conv.HasLength) + c.asm.Convert_xc(1, convt.Type, convt.Col.Collation, conv.Length) case "DECIMAL": - convt = ctype{Type: sqltypes.Decimal, Col: collationNumeric} m, d := conv.decimalPrecision() + convt = ctype{Type: sqltypes.Decimal, Col: collationNumeric, Size: m, Scale: d} c.asm.Convert_xd(1, m, d) case "DOUBLE", "REAL": convt = c.compileToFloat(arg, 1) case "FLOAT": - return ctype{}, c.unsupported(conv) + return ctype{}, conv.returnUnsupportedError() case "SIGNED", "SIGNED INTEGER": convt = c.compileToInt64(arg, 1) @@ -222,18 +224,18 @@ func (conv *ConvertExpr) compile(c *compiler) (ctype, error) { convt = c.compileToDate(arg, 1) case "DATETIME": - switch p := conv.Length; { - case p > 6: + p := ptr.Unwrap(conv.Length, 0) + if p > 6 { return ctype{}, c.unsupported(conv) } - convt = c.compileToDateTime(arg, 1, conv.Length) + convt = c.compileToDateTime(arg, 1, p) case "TIME": - switch p := conv.Length; { - case p > 6: + p := ptr.Unwrap(conv.Length, 0) + if p > 6 { return ctype{}, c.unsupported(conv) } - convt = c.compileToTime(arg, 1, conv.Length) + convt = c.compileToTime(arg, 1, p) default: return ctype{}, c.unsupported(conv) @@ -267,7 +269,7 @@ func (conv *ConvertUsingExpr) compile(c *compiler) (ctype, error) { } skip := c.compileNullCheck1(ct) - c.asm.Convert_xc(1, sqltypes.VarChar, conv.Collation, 0, false) + c.asm.Convert_xc(1, sqltypes.VarChar, conv.Collation, nil) c.asm.jumpDestination(skip) col := collations.TypedCollation{ diff --git a/go/vt/vtgate/evalengine/expr_env.go b/go/vt/vtgate/evalengine/expr_env.go index ffcde05d2a0..38a65f9b4e0 100644 --- a/go/vt/vtgate/evalengine/expr_env.go +++ b/go/vt/vtgate/evalengine/expr_env.go @@ -21,15 +21,20 @@ import ( "strings" "time" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/datetime" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/vtenv" ) type VCursor interface { TimeZone() *time.Location GetKeyspace() string + SQLMode() string + Environment() *vtenv.Environment } type ( @@ -43,9 +48,11 @@ type ( Fields []*querypb.Field // internal state - now time.Time - vc VCursor - user *querypb.VTGateCallerID + now time.Time + vc VCursor + user *querypb.VTGateCallerID + sqlmode SQLMode + collationEnv *collations.Environment } ) @@ -68,16 +75,14 @@ func (env *ExpressionEnv) currentUser() string { } func (env *ExpressionEnv) currentDatabase() string { - if env.vc == nil { - return "" - } return env.vc.GetKeyspace() } +func (env *ExpressionEnv) currentVersion() string { + return env.vc.Environment().MySQLVersion() +} + func (env *ExpressionEnv) currentTimezone() *time.Location { - if env.vc == nil { - return nil - } return env.vc.TimeZone() } @@ -86,12 +91,12 @@ func (env *ExpressionEnv) Evaluate(expr Expr) (EvalResult, error) { return env.EvaluateVM(p) } e, err := expr.eval(env) - return EvalResult{e}, err + return EvalResult{v: e, collationEnv: env.collationEnv}, err } func (env *ExpressionEnv) EvaluateAST(expr Expr) (EvalResult, error) { e, err := expr.eval(env) - return EvalResult{e}, err + return EvalResult{v: e, collationEnv: env.collationEnv}, err } func (env *ExpressionEnv) TypeOf(expr Expr) (Type, error) { @@ -99,11 +104,7 @@ func (env *ExpressionEnv) TypeOf(expr Expr) (Type, error) { if err != nil { return Type{}, err } - return Type{ - Type: ty.Type, - Coll: ty.Col.Collation, - Nullable: ty.Flag&flagNullable != 0, - }, nil + return NewTypeEx(ty.Type, ty.Col.Collation, ty.Flag&flagNullable != 0, ty.Size, ty.Scale, ty.Values), nil } func (env *ExpressionEnv) SetTime(now time.Time) { @@ -115,9 +116,38 @@ func (env *ExpressionEnv) SetTime(now time.Time) { } } +func (env *ExpressionEnv) VCursor() VCursor { + return env.vc +} + +type emptyVCursor struct { + env *vtenv.Environment + tz *time.Location +} + +func (e *emptyVCursor) Environment() *vtenv.Environment { + return e.env +} + +func (e *emptyVCursor) TimeZone() *time.Location { + return e.tz +} + +func (e *emptyVCursor) GetKeyspace() string { + return "" +} + +func (e *emptyVCursor) SQLMode() string { + return config.DefaultSQLMode +} + +func NewEmptyVCursor(env *vtenv.Environment, tz *time.Location) VCursor { + return &emptyVCursor{env: env, tz: tz} +} + // EmptyExpressionEnv returns a new ExpressionEnv with no bind vars or row -func EmptyExpressionEnv() *ExpressionEnv { - return NewExpressionEnv(context.Background(), nil, nil) +func EmptyExpressionEnv(env *vtenv.Environment) *ExpressionEnv { + return NewExpressionEnv(context.Background(), nil, NewEmptyVCursor(env, time.Local)) } // NewExpressionEnv returns an expression environment with no current row, but with bindvars @@ -125,5 +155,31 @@ func NewExpressionEnv(ctx context.Context, bindVars map[string]*querypb.BindVari env := &ExpressionEnv{BindVars: bindVars, vc: vc} env.user = callerid.ImmediateCallerIDFromContext(ctx) env.SetTime(time.Now()) + env.sqlmode = ParseSQLMode(vc.SQLMode()) + env.collationEnv = vc.Environment().CollationEnv() return env } + +const ( + sqlModeParsed = 1 << iota + sqlModeNoZeroDate +) + +type SQLMode uint32 + +func (mode SQLMode) AllowZeroDate() bool { + if mode == 0 { + // default: do not allow zero-date if the sqlmode is not set + return false + } + return (mode & sqlModeNoZeroDate) == 0 +} + +func ParseSQLMode(sqlmode string) SQLMode { + var mode SQLMode + if strings.Contains(sqlmode, "NO_ZERO_DATE") { + mode |= sqlModeNoZeroDate + } + mode |= sqlModeParsed + return mode +} diff --git a/go/vt/vtgate/evalengine/expr_env_test.go b/go/vt/vtgate/evalengine/expr_env_test.go new file mode 100644 index 00000000000..f75cc6f1376 --- /dev/null +++ b/go/vt/vtgate/evalengine/expr_env_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" +) + +// TestExpressionEnvTypeOf tests the functionality of the TypeOf method on ExpressionEnv +func TestExpressionEnvTypeOf(t *testing.T) { + sumCol := &Column{ + Type: sqltypes.Unknown, + Offset: 0, + Original: &sqlparser.Sum{ + Arg: sqlparser.NewColName("l_discount"), + }, + dynamicTypeOffset: 0, + } + countCol := &Column{ + Type: sqltypes.Unknown, + Offset: 1, + Original: &sqlparser.Count{ + Args: sqlparser.Exprs{ + sqlparser.NewColName("l_discount"), + }, + }, + dynamicTypeOffset: 1, + } + + tests := []struct { + name string + env *ExpressionEnv + expr Expr + wantedScale int32 + wantedType sqltypes.Type + }{ + { + name: "Decimal divided by integer", + env: &ExpressionEnv{ + Fields: []*querypb.Field{ + { + Name: "avg_disc", + Type: querypb.Type_DECIMAL, + ColumnLength: 39, + Decimals: 2, + }, + { + Name: "count(l_discount)", + Type: querypb.Type_INT64, + ColumnLength: 21, + }, + }, + sqlmode: 3, + }, + expr: &UntypedExpr{ + env: vtenv.NewTestEnv(), + mu: sync.Mutex{}, + collation: 255, + typed: nil, + needTypes: []typedIR{sumCol, countCol}, + ir: &ArithmeticExpr{ + Op: &opArithDiv{}, + BinaryExpr: BinaryExpr{ + Left: sumCol, + Right: countCol, + }, + }, + }, + wantedScale: 6, + wantedType: sqltypes.Decimal, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.env.TypeOf(tt.expr) + require.NoError(t, err) + require.EqualValues(t, tt.wantedType, got.Type()) + require.EqualValues(t, tt.wantedScale, got.Scale()) + }) + } +} diff --git a/go/vt/vtgate/evalengine/expr_literal.go b/go/vt/vtgate/evalengine/expr_literal.go index 5058c157229..2291356fcc7 100644 --- a/go/vt/vtgate/evalengine/expr_literal.go +++ b/go/vt/vtgate/evalengine/expr_literal.go @@ -70,6 +70,10 @@ func (l *Literal) typeof(*ExpressionEnv) (ctype, error) { if e.u > math.MaxInt64+1 { f |= flagIntegerOvf } + case *evalTemporal: + return ctype{Type: e.t, Col: collationNumeric, Size: int32(e.prec)}, nil + case *evalDecimal: + return ctype{Type: sqltypes.Decimal, Col: collationNumeric, Size: e.length, Scale: -e.dec.Exponent()}, nil } return ctype{Type: l.inner.SQLType(), Flag: f, Col: evalCollation(l.inner)}, nil } diff --git a/go/vt/vtgate/evalengine/expr_logical.go b/go/vt/vtgate/evalengine/expr_logical.go index 9d2f17becec..561915f600c 100644 --- a/go/vt/vtgate/evalengine/expr_logical.go +++ b/go/vt/vtgate/evalengine/expr_logical.go @@ -17,7 +17,6 @@ limitations under the License. package evalengine import ( - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" ) @@ -379,7 +378,7 @@ func (expr *NotExpr) compile(c *compiler) (ctype, error) { c.asm.Not_i() } c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Flag: flagNullable | flagIsBoolean, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Int64, Flag: nullableFlags(arg.Flag) | flagIsBoolean, Col: collationNumeric}, nil } func (l *LogicalExpr) eval(env *ExpressionEnv) (eval, error) { @@ -450,7 +449,7 @@ func (expr *LogicalExpr) compile(c *compiler) (ctype, error) { expr.op.compileRight(c) c.asm.jumpDestination(jump) - return ctype{Type: sqltypes.Int64, Flag: flagNullable | flagIsBoolean, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Int64, Flag: ((lt.Flag | rt.Flag) & flagNullable) | flagIsBoolean, Col: collationNumeric}, nil } func intervalCompare(n, val eval) (int, bool, error) { @@ -586,7 +585,6 @@ func (is *IsExpr) compile(c *compiler) (ctype, error) { func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { var ta typeAggregation var ca collationAggregation - var local = collations.Local() var result eval var matched = false @@ -606,7 +604,7 @@ func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { return nil, err } ta.addEval(then) - if err := ca.add(local, evalCollation(then)); err != nil { + if err := ca.add(evalCollation(then), env.collationEnv); err != nil { return nil, err } @@ -621,7 +619,7 @@ func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { return nil, err } ta.addEval(e) - if err := ca.add(local, evalCollation(e)); err != nil { + if err := ca.add(evalCollation(e), env.collationEnv); err != nil { return nil, err } if !matched { @@ -633,7 +631,7 @@ func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { if !matched { return nil, nil } - return evalCoerce(result, ta.result(), ca.result().Collation, env.now) + return evalCoerce(result, ta.result(), ta.size, ta.scale, ca.result().Collation, env.now, env.sqlmode.AllowZeroDate()) } func (c *CaseExpr) constant() bool { @@ -676,7 +674,6 @@ func (c *CaseExpr) simplify(env *ExpressionEnv) error { func (cs *CaseExpr) compile(c *compiler) (ctype, error) { var ca collationAggregation var ta typeAggregation - var local = collations.Local() for _, wt := range cs.cases { when, err := wt.when.compile(c) @@ -693,8 +690,8 @@ func (cs *CaseExpr) compile(c *compiler) (ctype, error) { return ctype{}, err } - ta.add(then.Type, then.Flag) - if err := ca.add(local, then.Col); err != nil { + ta.add(then.Type, then.Flag, then.Size, then.Scale) + if err := ca.add(then.Col, c.env.CollationEnv()); err != nil { return ctype{}, err } } @@ -705,14 +702,18 @@ func (cs *CaseExpr) compile(c *compiler) (ctype, error) { return ctype{}, err } - ta.add(els.Type, els.Flag) - if err := ca.add(local, els.Col); err != nil { + ta.add(els.Type, els.Flag, els.Size, els.Scale) + if err := ca.add(els.Col, c.env.CollationEnv()); err != nil { return ctype{}, err } } - ct := ctype{Type: ta.result(), Col: ca.result()} - c.asm.CmpCase(len(cs.cases), cs.Else != nil, ct.Type, ct.Col) + var f typeFlag + if ta.nullable { + f |= flagNullable + } + ct := ctype{Type: ta.result(), Flag: f, Col: ca.result(), Scale: ta.scale, Size: ta.size} + c.asm.CmpCase(len(cs.cases), cs.Else != nil, ct.Type, ct.Size, ct.Scale, ct.Col, c.sqlmode.AllowZeroDate()) return ct, nil } diff --git a/go/vt/vtgate/evalengine/expr_tuple.go b/go/vt/vtgate/evalengine/expr_tuple.go index 132d38108a0..d4943271ccb 100644 --- a/go/vt/vtgate/evalengine/expr_tuple.go +++ b/go/vt/vtgate/evalengine/expr_tuple.go @@ -66,5 +66,5 @@ func (tuple TupleExpr) FormatFast(buf *sqlparser.TrackedBuffer) { } func (tuple TupleExpr) typeof(*ExpressionEnv) (ctype, error) { - return ctype{Type: sqltypes.Tuple}, nil + return ctype{Type: sqltypes.Tuple, Col: collationBinary}, nil } diff --git a/go/vt/vtgate/evalengine/expr_tuple_bvar.go b/go/vt/vtgate/evalengine/expr_tuple_bvar.go new file mode 100644 index 00000000000..14cfbd95a8b --- /dev/null +++ b/go/vt/vtgate/evalengine/expr_tuple_bvar.go @@ -0,0 +1,104 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "errors" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" +) + +type ( + TupleBindVariable struct { + Key string + + Index int + Type sqltypes.Type + Collation collations.ID + } +) + +var _ IR = (*TupleBindVariable)(nil) +var _ Expr = (*TupleBindVariable)(nil) + +func (bv *TupleBindVariable) IR() IR { + return bv +} + +func (bv *TupleBindVariable) IsExpr() {} + +// eval implements the expression interface +func (bv *TupleBindVariable) eval(env *ExpressionEnv) (eval, error) { + bvar, err := env.lookupBindVar(bv.Key) + if err != nil { + return nil, err + } + + if bvar.Type != sqltypes.Tuple { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "query argument '%s' must be a tuple (is %s)", bv.Key, bvar.Type.String()) + } + + tuple := make([]eval, 0, len(bvar.Values)) + for _, value := range bvar.Values { + if value.Type != sqltypes.Tuple { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "result value must be a tuple (is %s)", value.Type.String()) + } + sValue := sqltypes.ProtoToValue(value) + var evalErr error + idx := 0 + found := false + // looking for a single index on each Tuple Value. + loopErr := sValue.ForEachValue(func(val sqltypes.Value) { + if found || idx != bv.Index { + idx++ + return + } + found = true + e, err := valueToEval(val, typedCoercionCollation(val.Type(), collations.CollationForType(val.Type(), bv.Collation)), nil) + if err != nil { + evalErr = err + return + } + tuple = append(tuple, e) + + }) + if err = errors.Join(loopErr, evalErr); err != nil { + return nil, err + } + if !found { + return nil, vterrors.VT13001("value not found in the bind variable") + } + } + return &evalTuple{t: tuple}, nil +} + +// typeof implements the expression interface +func (bv *TupleBindVariable) typeof(env *ExpressionEnv) (ctype, error) { + _, err := env.lookupBindVar(bv.Key) + if err != nil { + return ctype{}, err + } + + return ctype{Type: sqltypes.Tuple}, nil +} + +func (bv *TupleBindVariable) compile(c *compiler) (ctype, error) { + return ctype{}, c.unsupported(bv) +} diff --git a/go/vt/vtgate/evalengine/expr_tuple_bvar_test.go b/go/vt/vtgate/evalengine/expr_tuple_bvar_test.go new file mode 100644 index 00000000000..cad4d030d1a --- /dev/null +++ b/go/vt/vtgate/evalengine/expr_tuple_bvar_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evalengine + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +// TestTupleBindVarEval tests TupleBindVariable eval function. +func TestTupleBindVarEval(t *testing.T) { + key := "vals" + c := &TupleBindVariable{ + Key: key, + Index: 1, + } + collation := collations.TypedCollation{ + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireUnicode, + } + + tcases := []struct { + tName string + bv *querypb.BindVariable + + expEval []eval + expErr string + }{{ + tName: "bind variable not provided", + expErr: "query arguments missing for vals", + }, { + tName: "bind variable provided - wrong type", + bv: sqltypes.Int64BindVariable(1), + expErr: "query argument 'vals' must be a tuple (is INT64)", + }, { + tName: "bind variable provided", + bv: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(1), sqltypes.NewVarChar("a")))}, + }, + expEval: []eval{newEvalText([]byte("a"), collation)}, + }, { + tName: "bind variable provided - multi values", + bv: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(1), sqltypes.NewVarChar("a"))), + sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(2), sqltypes.NewVarChar("b"))), + sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(3), sqltypes.NewVarChar("c"))), + }, + }, + expEval: []eval{ + newEvalText([]byte("a"), collation), + newEvalText([]byte("b"), collation), + newEvalText([]byte("c"), collation)}, + }} + + for _, tcase := range tcases { + t.Run(tcase.tName, func(t *testing.T) { + env := &ExpressionEnv{ + BindVars: make(map[string]*querypb.BindVariable), + } + if tcase.bv != nil { + env.BindVars[key] = tcase.bv + } + + res, err := c.eval(env) + if tcase.expErr != "" { + require.ErrorContains(t, err, tcase.expErr) + return + } + require.Equal(t, sqltypes.Tuple, res.SQLType()) + resTuple := res.(*evalTuple) + require.Len(t, resTuple.t, len(tcase.expEval)) + for idx, e := range tcase.expEval { + require.Equal(t, e, resTuple.t[idx]) + } + }) + } +} + +// TestTupleBindVarTypeOf tests TupleBindVariable typeOf function. +func TestTupleBindVarTypeOf(t *testing.T) { + key := "vals" + c := &TupleBindVariable{ + Key: key, + Index: 1, + } + + tcases := []struct { + tName string + bv *querypb.BindVariable + + expErr string + }{{ + tName: "bind variable not provided", + expErr: "query arguments missing for vals", + }, { + // typeOf does not evaluate the bind variable value + tName: "bind variable provided - wrong type", + bv: sqltypes.Int64BindVariable(1), + }, { + tName: "bind variable provided", + bv: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(1), sqltypes.NewVarChar("a")))}, + }, + }, { + tName: "bind variable provided - multi values", + bv: &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{ + sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(1), sqltypes.NewVarChar("a"))), + sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(2), sqltypes.NewVarChar("b"))), + sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(3), sqltypes.NewVarChar("c"))), + }, + }, + }} + + for _, tcase := range tcases { + t.Run(tcase.tName, func(t *testing.T) { + env := &ExpressionEnv{ + BindVars: make(map[string]*querypb.BindVariable), + } + if tcase.bv != nil { + env.BindVars[key] = tcase.bv + } + + res, err := c.typeof(env) + if tcase.expErr != "" { + require.ErrorContains(t, err, tcase.expErr) + return + } + require.Equal(t, sqltypes.Tuple, res.Type) + }) + } +} diff --git a/go/vt/vtgate/evalengine/fn_base64.go b/go/vt/vtgate/evalengine/fn_base64.go index d404d391dd6..77baf060eb9 100644 --- a/go/vt/vtgate/evalengine/fn_base64.go +++ b/go/vt/vtgate/evalengine/fn_base64.go @@ -103,14 +103,14 @@ func (call *builtinToBase64) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xb(1, t, 0, false) + c.asm.Convert_xb(1, t, nil) } col := typedCoercionCollation(t, c.collation) c.asm.Fn_TO_BASE64(t, col) c.asm.jumpDestination(skip) - return ctype{Type: t, Col: col}, nil + return ctype{Type: t, Flag: nullableFlags(str.Flag), Col: col}, nil } func (call *builtinFromBase64) eval(env *ExpressionEnv) (eval, error) { @@ -149,11 +149,11 @@ func (call *builtinFromBase64) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xb(1, t, 0, false) + c.asm.Convert_xb(1, t, nil) } c.asm.Fn_FROM_BASE64(t) c.asm.jumpDestination(skip) - return ctype{Type: t, Col: collationBinary}, nil + return ctype{Type: t, Flag: nullableFlags(str.Flag), Col: collationBinary}, nil } diff --git a/go/vt/vtgate/evalengine/fn_bit.go b/go/vt/vtgate/evalengine/fn_bit.go index 66edffe268f..9444ee086af 100644 --- a/go/vt/vtgate/evalengine/fn_bit.go +++ b/go/vt/vtgate/evalengine/fn_bit.go @@ -61,11 +61,11 @@ func (expr *builtinBitCount) compile(c *compiler) (ctype, error) { if ct.Type == sqltypes.VarBinary && !ct.isHexOrBitLiteral() { c.asm.BitCount_b() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Col: collationBinary}, nil + return ctype{Type: sqltypes.Int64, Flag: nullableFlags(ct.Flag), Col: collationBinary}, nil } _ = c.compileToBitwiseUint64(ct, 1) c.asm.BitCount_u() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Col: collationBinary}, nil + return ctype{Type: sqltypes.Int64, Flag: nullableFlags(ct.Flag), Col: collationBinary}, nil } diff --git a/go/vt/vtgate/evalengine/fn_compare.go b/go/vt/vtgate/evalengine/fn_compare.go index cf40deae94b..1deec6752ef 100644 --- a/go/vt/vtgate/evalengine/fn_compare.go +++ b/go/vt/vtgate/evalengine/fn_compare.go @@ -32,7 +32,7 @@ type ( CallExpr } - multiComparisonFunc func(args []eval, cmp int) (eval, error) + multiComparisonFunc func(collationEnv *collations.Environment, args []eval, cmp int) (eval, error) builtinMultiComparison struct { CallExpr @@ -58,18 +58,21 @@ func (b *builtinCoalesce) eval(env *ExpressionEnv) (eval, error) { func (b *builtinCoalesce) compile(c *compiler) (ctype, error) { var ( - ta typeAggregation - ca collationAggregation - local = collations.Local() + ta typeAggregation + ca collationAggregation ) + f := flagNullable for _, arg := range b.Arguments { tt, err := arg.compile(c) if err != nil { return ctype{}, err } - ta.add(tt.Type, tt.Flag) - if err := ca.add(local, tt.Col); err != nil { + if !tt.nullable() { + f = 0 + } + ta.add(tt.Type, tt.Flag, tt.Size, tt.Scale) + if err := ca.add(tt.Col, c.env.CollationEnv()); err != nil { return ctype{}, err } } @@ -87,7 +90,7 @@ func (b *builtinCoalesce) compile(c *compiler) (ctype, error) { return 1 }, "COALESCE (SP-%d) ... (SP-1)", args) - return ctype{Type: ta.result(), Flag: flagNullable, Col: ca.result()}, nil + return ctype{Type: ta.result(), Flag: f, Col: ca.result()}, nil } func getMultiComparisonFunc(args []eval) multiComparisonFunc { @@ -111,7 +114,7 @@ func getMultiComparisonFunc(args []eval) multiComparisonFunc { for _, arg := range args { if arg == nil { - return func(args []eval, cmp int) (eval, error) { + return func(collationEnv *collations.Environment, args []eval, cmp int) (eval, error) { return nil, nil } } @@ -162,7 +165,7 @@ func getMultiComparisonFunc(args []eval) multiComparisonFunc { panic("unexpected argument type") } -func compareAllInteger_u(args []eval, cmp int) (eval, error) { +func compareAllInteger_u(_ *collations.Environment, args []eval, cmp int) (eval, error) { x := args[0].(*evalUint64) for _, arg := range args[1:] { y := arg.(*evalUint64) @@ -173,7 +176,7 @@ func compareAllInteger_u(args []eval, cmp int) (eval, error) { return x, nil } -func compareAllInteger_i(args []eval, cmp int) (eval, error) { +func compareAllInteger_i(_ *collations.Environment, args []eval, cmp int) (eval, error) { x := args[0].(*evalInt64) for _, arg := range args[1:] { y := arg.(*evalInt64) @@ -184,7 +187,7 @@ func compareAllInteger_i(args []eval, cmp int) (eval, error) { return x, nil } -func compareAllFloat(args []eval, cmp int) (eval, error) { +func compareAllFloat(_ *collations.Environment, args []eval, cmp int) (eval, error) { candidateF, ok := evalToFloat(args[0]) if !ok { return nil, errDecimalOutOfRange @@ -209,7 +212,7 @@ func evalDecimalPrecision(e eval) int32 { return 0 } -func compareAllDecimal(args []eval, cmp int) (eval, error) { +func compareAllDecimal(_ *collations.Environment, args []eval, cmp int) (eval, error) { decExtreme := evalToDecimal(args[0], 0, 0).dec precExtreme := evalDecimalPrecision(args[0]) @@ -226,14 +229,12 @@ func compareAllDecimal(args []eval, cmp int) (eval, error) { return newEvalDecimalWithPrec(decExtreme, precExtreme), nil } -func compareAllText(args []eval, cmp int) (eval, error) { - env := collations.Local() - +func compareAllText(collationEnv *collations.Environment, args []eval, cmp int) (eval, error) { var charsets = make([]charset.Charset, 0, len(args)) var ca collationAggregation for _, arg := range args { col := evalCollation(arg) - if err := ca.add(env, col); err != nil { + if err := ca.add(col, collationEnv); err != nil { return nil, err } charsets = append(charsets, colldata.Lookup(col.Collation).Charset()) @@ -261,7 +262,7 @@ func compareAllText(args []eval, cmp int) (eval, error) { return newEvalText(b1, tc), nil } -func compareAllBinary(args []eval, cmp int) (eval, error) { +func compareAllBinary(_ *collations.Environment, args []eval, cmp int) (eval, error) { candidateB := args[0].ToRawBytes() for _, arg := range args[1:] { @@ -279,30 +280,36 @@ func (call *builtinMultiComparison) eval(env *ExpressionEnv) (eval, error) { if err != nil { return nil, err } - return getMultiComparisonFunc(args)(args, call.cmp) + return getMultiComparisonFunc(args)(env.collationEnv, args, call.cmp) } func (call *builtinMultiComparison) compile_c(c *compiler, args []ctype) (ctype, error) { - env := collations.Local() - var ca collationAggregation + var f typeFlag for _, arg := range args { - if err := ca.add(env, arg.Col); err != nil { + f |= nullableFlags(arg.Flag) + if err := ca.add(arg.Col, c.env.CollationEnv()); err != nil { return ctype{}, err } } tc := ca.result() c.asm.Fn_MULTICMP_c(len(args), call.cmp < 0, tc) - return ctype{Type: sqltypes.VarChar, Col: tc}, nil + return ctype{Type: sqltypes.VarChar, Flag: f, Col: tc}, nil } func (call *builtinMultiComparison) compile_d(c *compiler, args []ctype) (ctype, error) { + var f typeFlag + var size int32 + var scale int32 for i, tt := range args { + f |= nullableFlags(tt.Flag) + size = max(size, tt.Size) + scale = max(scale, tt.Scale) c.compileToDecimal(tt, len(args)-i) } c.asm.Fn_MULTICMP_d(len(args), call.cmp < 0) - return ctype{Type: sqltypes.Decimal, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Decimal, Flag: f, Col: collationNumeric, Size: size, Scale: scale}, nil } func (call *builtinMultiComparison) compile(c *compiler) (ctype, error) { @@ -314,6 +321,7 @@ func (call *builtinMultiComparison) compile(c *compiler) (ctype, error) { text int binary int args []ctype + nullable bool ) /* @@ -333,6 +341,7 @@ func (call *builtinMultiComparison) compile(c *compiler) (ctype, error) { args = append(args, tt) + nullable = nullable || tt.nullable() switch tt.Type { case sqltypes.Int64: signed++ @@ -346,19 +355,25 @@ func (call *builtinMultiComparison) compile(c *compiler) (ctype, error) { text++ case sqltypes.Blob, sqltypes.Binary, sqltypes.VarBinary: binary++ + case sqltypes.Null: + nullable = true default: - return ctype{}, c.unsupported(call) + panic("unexpected argument type") } } + var f typeFlag + if nullable { + f |= flagNullable + } if signed+unsigned == len(args) { if signed == len(args) { c.asm.Fn_MULTICMP_i(len(args), call.cmp < 0) - return ctype{Type: sqltypes.Int64, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Int64, Flag: f, Col: collationNumeric}, nil } if unsigned == len(args) { c.asm.Fn_MULTICMP_u(len(args), call.cmp < 0) - return ctype{Type: sqltypes.Uint64, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Uint64, Flag: f, Col: collationNumeric}, nil } return call.compile_d(c, args) } @@ -367,14 +382,14 @@ func (call *builtinMultiComparison) compile(c *compiler) (ctype, error) { return call.compile_c(c, args) } c.asm.Fn_MULTICMP_b(len(args), call.cmp < 0) - return ctype{Type: sqltypes.VarBinary, Col: collationBinary}, nil + return ctype{Type: sqltypes.VarBinary, Flag: f, Col: collationBinary}, nil } else { if floats > 0 { for i, tt := range args { c.compileToFloat(tt, len(args)-i) } c.asm.Fn_MULTICMP_f(len(args), call.cmp < 0) - return ctype{Type: sqltypes.Float64, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Float64, Flag: f, Col: collationNumeric}, nil } if decimals > 0 { return call.compile_d(c, args) diff --git a/go/vt/vtgate/evalengine/fn_crypto.go b/go/vt/vtgate/evalengine/fn_crypto.go index 31783291ce7..8a3765028d9 100644 --- a/go/vt/vtgate/evalengine/fn_crypto.go +++ b/go/vt/vtgate/evalengine/fn_crypto.go @@ -62,13 +62,13 @@ func (call *builtinMD5) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.Binary, 0, false) + c.asm.Convert_xb(1, sqltypes.Binary, nil) } col := typedCoercionCollation(sqltypes.VarChar, c.collation) c.asm.Fn_MD5(col) c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.VarChar, Col: col, Flag: str.Flag}, nil + return ctype{Type: sqltypes.VarChar, Col: col, Flag: nullableFlags(str.Flag)}, nil } type builtinSHA1 struct { @@ -105,12 +105,12 @@ func (call *builtinSHA1) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.Binary, 0, false) + c.asm.Convert_xb(1, sqltypes.Binary, nil) } col := typedCoercionCollation(sqltypes.VarChar, c.collation) c.asm.Fn_SHA1(col) c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.VarChar, Col: col, Flag: str.Flag}, nil + return ctype{Type: sqltypes.VarChar, Col: col, Flag: nullableFlags(str.Flag)}, nil } type builtinSHA2 struct { @@ -174,7 +174,7 @@ func (call *builtinSHA2) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xb(2, sqltypes.Binary, 0, false) + c.asm.Convert_xb(2, sqltypes.Binary, nil) } switch bits.Type { @@ -189,7 +189,7 @@ func (call *builtinSHA2) compile(c *compiler) (ctype, error) { col := typedCoercionCollation(sqltypes.VarChar, c.collation) c.asm.Fn_SHA2(col) c.asm.jumpDestination(skip1, skip2) - return ctype{Type: sqltypes.VarChar, Col: col, Flag: str.Flag | flagNullable}, nil + return ctype{Type: sqltypes.VarChar, Col: col, Flag: nullableFlags(str.Flag)}, nil } type builtinRandomBytes struct { @@ -244,5 +244,5 @@ func (call *builtinRandomBytes) compile(c *compiler) (ctype, error) { c.asm.Fn_RandomBytes() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.VarBinary, Col: collationBinary, Flag: arg.Flag | flagNullable}, nil + return ctype{Type: sqltypes.VarBinary, Col: collationBinary, Flag: nullableFlags(arg.Flag) | flagNullable}, nil } diff --git a/go/vt/vtgate/evalengine/fn_hex.go b/go/vt/vtgate/evalengine/fn_hex.go index 8552ab888ae..90d3bd1a208 100644 --- a/go/vt/vtgate/evalengine/fn_hex.go +++ b/go/vt/vtgate/evalengine/fn_hex.go @@ -73,13 +73,13 @@ func (call *builtinHex) compile(c *compiler) (ctype, error) { case str.isTextual(): c.asm.Fn_HEX_c(t, col) default: - c.asm.Convert_xc(1, t, c.collation, 0, false) + c.asm.Convert_xc(1, t, c.collation, nil) c.asm.Fn_HEX_c(t, col) } c.asm.jumpDestination(skip) - return ctype{Type: t, Col: col}, nil + return ctype{Type: t, Flag: nullableFlags(str.Flag), Col: col}, nil } type builtinUnhex struct { @@ -191,7 +191,7 @@ func (call *builtinUnhex) compile(c *compiler) (ctype, error) { case str.Type == sqltypes.TypeJSON: c.asm.Fn_UNHEX_j(t) default: - c.asm.Convert_xb(1, t, 0, false) + c.asm.Convert_xb(1, t, nil) c.asm.Fn_UNHEX_b(t) } diff --git a/go/vt/vtgate/evalengine/fn_info.go b/go/vt/vtgate/evalengine/fn_info.go index d8a8aa41947..53d2a1a8892 100644 --- a/go/vt/vtgate/evalengine/fn_info.go +++ b/go/vt/vtgate/evalengine/fn_info.go @@ -18,7 +18,6 @@ package evalengine import ( "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/servenv" ) type builtinUser struct { @@ -47,12 +46,12 @@ type builtinVersion struct { var _ IR = (*builtinVersion)(nil) func (call *builtinVersion) eval(env *ExpressionEnv) (eval, error) { - return newEvalText([]byte(servenv.MySQLServerVersion()), collationUtf8mb3), nil + return newEvalText([]byte(env.currentVersion()), collationUtf8mb3), nil } func (*builtinVersion) compile(c *compiler) (ctype, error) { c.asm.Fn_Version() - return ctype{Type: sqltypes.Datetime, Col: collationUtf8mb3}, nil + return ctype{Type: sqltypes.VarChar, Col: collationUtf8mb3}, nil } type builtinDatabase struct { @@ -71,7 +70,7 @@ func (call *builtinDatabase) eval(env *ExpressionEnv) (eval, error) { func (*builtinDatabase) compile(c *compiler) (ctype, error) { c.asm.Fn_Database() - return ctype{Type: sqltypes.Datetime, Col: collationUtf8mb3}, nil + return ctype{Type: sqltypes.VarChar, Col: collationUtf8mb3}, nil } func (call *builtinDatabase) constant() bool { diff --git a/go/vt/vtgate/evalengine/fn_json.go b/go/vt/vtgate/evalengine/fn_json.go index 53930b4678b..54038e28339 100644 --- a/go/vt/vtgate/evalengine/fn_json.go +++ b/go/vt/vtgate/evalengine/fn_json.go @@ -402,7 +402,7 @@ func (call *builtinJSONContainsPath) compile(c *compiler) (ctype, error) { } c.asm.Fn_JSON_CONTAINS_PATH(match, paths) - return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean}, nil + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean | flagNullable}, nil } type jsonMatch int8 diff --git a/go/vt/vtgate/evalengine/fn_misc.go b/go/vt/vtgate/evalengine/fn_misc.go index 2f228ff55fa..8813b62f823 100644 --- a/go/vt/vtgate/evalengine/fn_misc.go +++ b/go/vt/vtgate/evalengine/fn_misc.go @@ -120,7 +120,7 @@ func (call *builtinInetAton) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } c.asm.Fn_INET_ATON() @@ -185,7 +185,7 @@ func (call *builtinInet6Aton) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } c.asm.Fn_INET6_ATON() @@ -291,13 +291,13 @@ func (call *builtinIsIPV4) compile(c *compiler) (ctype, error) { switch { case arg.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } c.asm.Fn_IS_IPV4() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Int64, Flag: nullableFlags(arg.Flag) | flagIsBoolean, Col: collationNumeric}, nil } func (call *builtinIsIPV4Compat) eval(env *ExpressionEnv) (eval, error) { @@ -328,7 +328,7 @@ func (call *builtinIsIPV4Compat) compile(c *compiler) (ctype, error) { c.asm.SetBool(1, false) } c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Int64, Flag: nullableFlags(arg.Flag) | flagIsBoolean, Col: collationNumeric}, nil } func (call *builtinIsIPV4Mapped) eval(env *ExpressionEnv) (eval, error) { @@ -359,7 +359,7 @@ func (call *builtinIsIPV4Mapped) compile(c *compiler) (ctype, error) { c.asm.SetBool(1, false) } c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Int64, Flag: nullableFlags(arg.Flag) | flagIsBoolean, Col: collationNumeric}, nil } func (call *builtinIsIPV6) eval(env *ExpressionEnv) (eval, error) { @@ -385,13 +385,13 @@ func (call *builtinIsIPV6) compile(c *compiler) (ctype, error) { switch { case arg.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } c.asm.Fn_IS_IPV6() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Int64, Flag: nullableFlags(arg.Flag) | flagIsBoolean, Col: collationNumeric}, nil } func errIncorrectUUID(in []byte, f string) error { @@ -459,11 +459,11 @@ func (call *builtinBinToUUID) compile(c *compiler) (ctype, error) { switch { case arg.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } col := typedCoercionCollation(sqltypes.VarChar, call.collate) - ct := ctype{Type: sqltypes.VarChar, Flag: arg.Flag, Col: col} + ct := ctype{Type: sqltypes.VarChar, Flag: nullableFlags(arg.Flag), Col: col} if len(call.Arguments) == 1 { c.asm.Fn_BIN_TO_UUID0(col) @@ -512,12 +512,12 @@ func (call *builtinIsUUID) compile(c *compiler) (ctype, error) { switch { case arg.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } c.asm.Fn_IS_UUID() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Flag: arg.Flag | flagIsBoolean, Col: collationNumeric}, nil + return ctype{Type: sqltypes.Int64, Flag: nullableFlags(arg.Flag) | flagIsBoolean, Col: collationNumeric}, nil } func (call *builtinUUID) eval(env *ExpressionEnv) (eval, error) { @@ -580,10 +580,10 @@ func (call *builtinUUIDToBin) compile(c *compiler) (ctype, error) { switch { case arg.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } - ct := ctype{Type: sqltypes.VarBinary, Flag: arg.Flag, Col: collationBinary} + ct := ctype{Type: sqltypes.VarBinary, Flag: nullableFlags(arg.Flag), Col: collationBinary} if len(call.Arguments) == 1 { c.asm.Fn_UUID_TO_BIN0() diff --git a/go/vt/vtgate/evalengine/fn_numeric.go b/go/vt/vtgate/evalengine/fn_numeric.go index 7bdd8d8b92e..bd835d88d78 100644 --- a/go/vt/vtgate/evalengine/fn_numeric.go +++ b/go/vt/vtgate/evalengine/fn_numeric.go @@ -149,7 +149,7 @@ func (expr *builtinAbs) compile(c *compiler) (ctype, error) { skip := c.compileNullCheck1(arg) - convt := ctype{Type: arg.Type, Col: collationNumeric, Flag: arg.Flag} + convt := ctype{Type: arg.Type, Col: collationNumeric, Flag: nullableFlags(arg.Flag)} switch arg.Type { case sqltypes.Int64: c.asm.Fn_ABS_i() @@ -302,7 +302,7 @@ func (expr *builtinAtan2) compile(c *compiler) (ctype, error) { c.compileToFloat(arg2, 1) c.asm.Fn_ATAN2() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: arg1.Flag | arg2.Flag}, nil + return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: nullableFlags(arg1.Flag | arg2.Flag)}, nil } type builtinCos struct { @@ -538,7 +538,7 @@ func (expr *builtinLog) compile(c *compiler) (ctype, error) { c.compileToFloat(arg2, 1) c.asm.Fn_LOG() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: arg1.Flag | arg2.Flag}, nil + return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: nullableFlags(arg1.Flag | arg2.Flag)}, nil } type builtinLog10 struct { @@ -638,7 +638,7 @@ func (expr *builtinPow) compile(c *compiler) (ctype, error) { c.compileToFloat(arg2, 1) c.asm.Fn_POW() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: arg1.Flag | arg2.Flag | flagNullable}, nil + return ctype{Type: sqltypes.Float64, Col: collationNumeric, Flag: nullableFlags(arg1.Flag | arg2.Flag)}, nil } type builtinSign struct { @@ -718,7 +718,7 @@ func (expr *builtinSign) compile(c *compiler) (ctype, error) { } c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag}, nil + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: nullableFlags(arg.Flag)}, nil } type builtinSqrt struct { @@ -1267,7 +1267,7 @@ func (expr *builtinCrc32) compile(c *compiler) (ctype, error) { switch { case arg.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.Binary, 0, false) + c.asm.Convert_xb(1, sqltypes.Binary, nil) } c.asm.Fn_CRC32() @@ -1332,7 +1332,7 @@ func (call *builtinConv) eval(env *ExpressionEnv) (eval, error) { i, err := fastparse.ParseInt64(nStr.string(), int(fromBase)) u = uint64(i) if errors.Is(err, fastparse.ErrOverflow) { - u, _ = fastparse.ParseUint64(nStr.string(), int(fromBase)) + u, _ = fastparse.ParseUint64WithNeg(nStr.string(), int(fromBase)) } } @@ -1374,7 +1374,7 @@ func (expr *builtinConv) compile(c *compiler) (ctype, error) { switch { case n.isTextual(): default: - c.asm.Convert_xb(3, t, 0, false) + c.asm.Convert_xb(3, t, nil) } if n.isHexOrBitLiteral() { diff --git a/go/vt/vtgate/evalengine/fn_regexp.go b/go/vt/vtgate/evalengine/fn_regexp.go index 4897ba63f6a..a94b9a83aee 100644 --- a/go/vt/vtgate/evalengine/fn_regexp.go +++ b/go/vt/vtgate/evalengine/fn_regexp.go @@ -91,7 +91,7 @@ func position(val *evalInt64, limit int64, f string) (int64, error) { return pos, nil } -func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.TypedCollation, icuregex.RegexpFlag, error) { +func evalRegexpCollation(env *collations.Environment, input, pat eval, f string) (eval, eval, collations.TypedCollation, icuregex.RegexpFlag, error) { var typedCol collations.TypedCollation var err error @@ -101,7 +101,6 @@ func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.Type patCol := patBytes.col.Collation if (inputCol == collations.CollationBinaryID && patCol != collations.CollationBinaryID) || (inputCol != collations.CollationBinaryID && patCol == collations.CollationBinaryID) { - env := collations.Local() inputColName := env.LookupName(inputCol) patColName := env.LookupName(patCol) return nil, nil, typedCol, 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CharacterSetMismatch, "Character set '%s' cannot be used in conjunction with '%s' in call to %s.", inputColName, patColName, f) @@ -109,13 +108,13 @@ func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.Type } } - input, pat, typedCol, err = mergeAndCoerceCollations(input, pat) + input, pat, typedCol, err = mergeAndCoerceCollations(input, pat, env) if err != nil { return nil, nil, collations.TypedCollation{}, 0, err } var flags icuregex.RegexpFlag - var collation = collations.Local().LookupName(typedCol.Collation) + collation := env.LookupName(typedCol.Collation) if strings.Contains(collation, "_ci") { flags |= icuregex.CaseInsensitive } @@ -123,11 +122,10 @@ func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.Type return input, pat, typedCol, flags, nil } -func compileRegexpCollation(input, pat ctype, f string) (collations.TypedCollation, icuregex.RegexpFlag, error) { +func compileRegexpCollation(env *collations.Environment, input, pat ctype, f string) (collations.TypedCollation, icuregex.RegexpFlag, error) { var merged collations.TypedCollation var err error - env := collations.Local() if input.isTextual() && pat.isTextual() { inputCol := input.Col.Collation patCol := pat.Col.Collation @@ -140,7 +138,7 @@ func compileRegexpCollation(input, pat ctype, f string) (collations.TypedCollati } if input.Col.Collation != pat.Col.Collation { - merged, _, _, err = mergeCollations(input.Col, pat.Col, input.Type, pat.Type) + merged, _, _, err = mergeCollations(input.Col, pat.Col, input.Type, pat.Type, env) } else { merged = input.Col } @@ -212,13 +210,15 @@ func compileRegex(pat eval, c colldata.Charset, flags icuregex.RegexpFlag) (*icu return nil, err } +var errNonConstantRegexp = errors.New("non-constant regexp") + func compileConstantRegex(c *compiler, args TupleExpr, pat, mt int, cs collations.TypedCollation, flags icuregex.RegexpFlag, f string) (*icuregex.Pattern, error) { pattern := args[pat] if !pattern.constant() { - return nil, c.unsupported(pattern) + return nil, errNonConstantRegexp } var err error - staticEnv := EmptyExpressionEnv() + staticEnv := EmptyExpressionEnv(c.env) pattern, err = simplifyExpr(staticEnv, pattern) if err != nil { return nil, err @@ -227,7 +227,7 @@ func compileConstantRegex(c *compiler, args TupleExpr, pat, mt int, cs collation if len(args) > mt { fl := args[mt] if !fl.constant() { - return nil, c.unsupported(fl) + return nil, errNonConstantRegexp } fl, err = simplifyExpr(staticEnv, fl) if err != nil { @@ -240,7 +240,7 @@ func compileConstantRegex(c *compiler, args TupleExpr, pat, mt int, cs collation } if pattern.(*Literal).inner == nil { - return nil, c.unsupported(pattern) + return nil, errNonConstantRegexp } innerPat, err := evalToVarchar(pattern.(*Literal).inner, cs.Collation, true) @@ -278,7 +278,7 @@ func (r *builtinRegexpLike) eval(env *ExpressionEnv) (eval, error) { return nil, err } - input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_like") + input, pat, typedCol, flags, err := evalRegexpCollation(env.collationEnv, input, pat, "regexp_like") if err != nil { return nil, err } @@ -348,7 +348,7 @@ func (r *builtinRegexpLike) compile(c *compiler) (ctype, error) { skips = append(skips, c.compileNullCheckArg(f, 2)) } - merged, flags, err := compileRegexpCollation(input, pat, "regexp_like") + merged, flags, err := compileRegexpCollation(c.env.CollationEnv(), input, pat, "regexp_like") if err != nil { return ctype{}, err } @@ -387,7 +387,7 @@ func (r *builtinRegexpInstr) eval(env *ExpressionEnv) (eval, error) { return nil, err } - input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_instr") + input, pat, typedCol, flags, err := evalRegexpCollation(env.collationEnv, input, pat, "regexp_instr") if err != nil { return nil, err } @@ -551,11 +551,11 @@ func (r *builtinRegexpInstr) compile(c *compiler) (ctype, error) { switch { case matchType.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } } - merged, flags, err := compileRegexpCollation(input, pat, "regexp_instr") + merged, flags, err := compileRegexpCollation(c.env.CollationEnv(), input, pat, "regexp_instr") if err != nil { return ctype{}, err } @@ -594,7 +594,7 @@ func (r *builtinRegexpSubstr) eval(env *ExpressionEnv) (eval, error) { return nil, err } - input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_substr") + input, pat, typedCol, flags, err := evalRegexpCollation(env.collationEnv, input, pat, "regexp_substr") if err != nil { return nil, err } @@ -728,11 +728,11 @@ func (r *builtinRegexpSubstr) compile(c *compiler) (ctype, error) { switch { case matchType.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } } - merged, flags, err := compileRegexpCollation(input, pat, "regexp_substr") + merged, flags, err := compileRegexpCollation(c.env.CollationEnv(), input, pat, "regexp_substr") if err != nil { return ctype{}, err } @@ -828,7 +828,7 @@ func (r *builtinRegexpReplace) eval(env *ExpressionEnv) (eval, error) { return nil, err } - input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_replace") + input, pat, typedCol, flags, err := evalRegexpCollation(env.collationEnv, input, pat, "regexp_replace") if err != nil { return nil, err } @@ -968,11 +968,11 @@ func (r *builtinRegexpReplace) compile(c *compiler) (ctype, error) { switch { case matchType.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } } - merged, flags, err := compileRegexpCollation(input, pat, "regexp_replace") + merged, flags, err := compileRegexpCollation(c.env.CollationEnv(), input, pat, "regexp_replace") if err != nil { return ctype{}, err } diff --git a/go/vt/vtgate/evalengine/fn_string.go b/go/vt/vtgate/evalengine/fn_string.go index 8d61905d237..663475327e5 100644 --- a/go/vt/vtgate/evalengine/fn_string.go +++ b/go/vt/vtgate/evalengine/fn_string.go @@ -18,6 +18,7 @@ package evalengine import ( "bytes" + "math" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/charset" @@ -29,6 +30,21 @@ import ( ) type ( + builtinField struct { + CallExpr + collate collations.ID + } + + builtinElt struct { + CallExpr + collate collations.ID + } + + builtinInsert struct { + CallExpr + collate collations.ID + } + builtinChangeCase struct { CallExpr upcase bool @@ -47,6 +63,16 @@ type ( CallExpr } + builtinReverse struct { + CallExpr + collate collations.ID + } + + builtinSpace struct { + CallExpr + collate collations.ID + } + builtinOrd struct { CallExpr collate collations.ID @@ -62,9 +88,8 @@ type ( builtinWeightString struct { CallExpr - Cast string - Len int - HasLen bool + Cast string + Len *int } builtinLeftRight struct { @@ -89,19 +114,473 @@ type ( collate collations.ID trim sqlparser.TrimType } + + builtinSubstring struct { + CallExpr + collate collations.ID + } + + builtinLocate struct { + CallExpr + collate collations.ID + } + + builtinChar struct { + CallExpr + collate collations.ID + } + + builtinRepeat struct { + CallExpr + collate collations.ID + } + + builtinConcat struct { + CallExpr + collate collations.ID + } + + builtinConcatWs struct { + CallExpr + collate collations.ID + } + + builtinReplace struct { + CallExpr + collate collations.ID + } ) +var _ IR = (*builtinField)(nil) +var _ IR = (*builtinElt)(nil) +var _ IR = (*builtinInsert)(nil) var _ IR = (*builtinChangeCase)(nil) var _ IR = (*builtinCharLength)(nil) var _ IR = (*builtinLength)(nil) var _ IR = (*builtinASCII)(nil) +var _ IR = (*builtinReverse)(nil) +var _ IR = (*builtinSpace)(nil) var _ IR = (*builtinOrd)(nil) var _ IR = (*builtinBitLength)(nil) var _ IR = (*builtinCollation)(nil) var _ IR = (*builtinWeightString)(nil) var _ IR = (*builtinLeftRight)(nil) var _ IR = (*builtinPad)(nil) +var _ IR = (*builtinStrcmp)(nil) var _ IR = (*builtinTrim)(nil) +var _ IR = (*builtinSubstring)(nil) +var _ IR = (*builtinLocate)(nil) +var _ IR = (*builtinChar)(nil) +var _ IR = (*builtinRepeat)(nil) +var _ IR = (*builtinConcat)(nil) +var _ IR = (*builtinConcatWs)(nil) +var _ IR = (*builtinReplace)(nil) + +func fieldSQLType(arg sqltypes.Type, tt sqltypes.Type) sqltypes.Type { + if sqltypes.IsNull(arg) { + // If we have a NULL combined with only so far numerical types, + // we have to convert it all to DOUBLE. + if sqltypes.IsIntegral(tt) || sqltypes.IsDecimal(tt) { + return sqltypes.Float64 + } + return tt + } + + if typeIsTextual(arg) && typeIsTextual(tt) { + return sqltypes.VarChar + } else if sqltypes.IsIntegral(arg) && sqltypes.IsIntegral(tt) { + return sqltypes.Int64 + } + + if (sqltypes.IsIntegral(arg) || sqltypes.IsDecimal(arg)) && (sqltypes.IsIntegral(tt) || sqltypes.IsDecimal(tt)) { + return sqltypes.Decimal + } + + return sqltypes.Float64 +} + +func (call *builtinField) eval(env *ExpressionEnv) (eval, error) { + args, err := call.args(env) + if err != nil { + return nil, err + } + if args[0] == nil { + return newEvalInt64(0), nil + } + + // If the arguments contain both integral and string values + // MySQL converts all the arguments to DOUBLE + tt := args[0].SQLType() + + for _, arg := range args[1:] { + var at sqltypes.Type + if arg == nil { + at = sqltypes.Null + } else { + at = arg.SQLType() + } + + tt = fieldSQLType(at, tt) + } + + if tt == sqltypes.Int64 { + tar := evalToInt64(args[0]) + + for i, arg := range args[1:] { + if arg == nil { + continue + } + + e := evalToInt64(arg) + if tar.i == e.i { + return newEvalInt64(int64(i + 1)), nil + } + } + } else if tt == sqltypes.VarChar { + col := evalCollation(args[0]) + collation := colldata.Lookup(col.Collation) + tar := args[0].(*evalBytes) + + for i, arg := range args[1:] { + if arg == nil { + continue + } + + e, err := evalToVarchar(arg, col.Collation, true) + if err != nil { + return nil, err + } + + // Compare target and current string + if collation.Collate(tar.bytes, e.bytes, false) == 0 { + return newEvalInt64(int64(i + 1)), nil + } + } + } else if tt == sqltypes.Decimal { + tar := evalToDecimal(args[0], 0, 0) + + for i, arg := range args[1:] { + if arg == nil { + continue + } + + e := evalToDecimal(arg, 0, 0) + if tar.dec.Equal(e.dec) { + return newEvalInt64(int64(i + 1)), nil + } + } + } else { + tar, _ := evalToFloat(args[0]) + + for i, arg := range args[1:] { + if arg == nil { + continue + } + + e, _ := evalToFloat(arg) + if tar.f == e.f { + return newEvalInt64(int64(i + 1)), nil + } + } + } + + return newEvalInt64(0), nil +} + +func (call *builtinField) compile(c *compiler) (ctype, error) { + strs := make([]ctype, len(call.Arguments)) + + for i, arg := range call.Arguments { + var err error + strs[i], err = arg.compile(c) + if err != nil { + return ctype{}, err + } + } + + // If the arguments contain both integral and string values + // MySQL converts all the arguments to DOUBLE + tt := strs[0].Type + col := strs[0].Col + + for _, str := range strs { + tt = fieldSQLType(str.Type, tt) + } + + if tt == sqltypes.Int64 { + for i, str := range strs { + offset := len(strs) - i + skip := c.compileNullCheckOffset(str, offset) + + switch str.Type { + case sqltypes.Int64: + default: + c.asm.Convert_xi(offset) + } + c.asm.jumpDestination(skip) + } + + c.asm.Fn_FIELD_i(len(call.Arguments)) + } else if tt == sqltypes.VarChar { + collation := colldata.Lookup(col.Collation) + c.asm.Fn_FIELD_b(len(call.Arguments), collation) + } else if tt == sqltypes.Decimal { + for i, str := range strs { + offset := len(strs) - i + skip := c.compileNullCheckOffset(str, offset) + + switch str.Type { + case sqltypes.Decimal: + default: + c.asm.Convert_xd(offset, 0, 0) + } + c.asm.jumpDestination(skip) + } + + c.asm.Fn_FIELD_d(len(call.Arguments)) + } else { + for i, str := range strs { + offset := len(strs) - i + skip := c.compileNullCheckOffset(str, offset) + + switch str.Type { + case sqltypes.Float64: + default: + c.asm.Convert_xf(offset) + } + + c.asm.jumpDestination(skip) + } + + c.asm.Fn_FIELD_f(len(call.Arguments)) + } + + return ctype{Type: sqltypes.Int64, Col: collationNumeric}, nil +} + +func (call *builtinElt) eval(env *ExpressionEnv) (eval, error) { + var ca collationAggregation + tt := sqltypes.VarChar + + args, err := call.args(env) + if err != nil { + return nil, err + } + + if args[0] == nil { + return nil, nil + } + + i := evalToInt64(args[0]).i + if i < 1 || i >= int64(len(args)) || args[i] == nil { + return nil, nil + } + + for _, arg := range args[1:] { + if arg == nil { + continue + } + + tt = concatSQLType(arg.SQLType(), tt) + err = ca.add(evalCollation(arg), env.collationEnv) + if err != nil { + return nil, err + } + } + + tc := ca.result() + // If we only had numbers, we instead fall back to the default + // collation instead of using the numeric collation. + if tc.Coercibility == collations.CoerceNumeric { + tc = typedCoercionCollation(tt, call.collate) + } + + b, err := evalToVarchar(args[i], tc.Collation, true) + if err != nil { + return nil, err + } + + return newEvalRaw(tt, b.bytes, b.col), nil +} + +func (call *builtinElt) compile(c *compiler) (ctype, error) { + args := make([]ctype, len(call.Arguments)) + + var ca collationAggregation + tt := sqltypes.VarChar + + var skip *jump + for i, arg := range call.Arguments { + var err error + args[i], err = arg.compile(c) + if err != nil { + return ctype{}, nil + } + + if i == 0 { + skip = c.compileNullCheck1(args[i]) + continue + } + + tt = concatSQLType(args[i].Type, tt) + err = ca.add(args[i].Col, c.env.CollationEnv()) + if err != nil { + return ctype{}, err + } + } + + tc := ca.result() + // If we only had numbers, we instead fall back to the default + // collation instead of using the numeric collation. + if tc.Coercibility == collations.CoerceNumeric { + tc = typedCoercionCollation(tt, call.collate) + } + + _ = c.compileToInt64(args[0], len(args)) + + for i, arg := range args[1:] { + offset := len(args) - (i + 1) + skip := c.compileNullCheckOffset(arg, offset) + + switch arg.Type { + case sqltypes.VarBinary, sqltypes.Binary, sqltypes.Blob: + if tc.Collation != collations.CollationBinaryID { + c.asm.Convert_xce(offset, arg.Type, tc.Collation) + } + case sqltypes.VarChar, sqltypes.Char, sqltypes.Text: + fromCharset := colldata.Lookup(arg.Col.Collation).Charset() + toCharset := colldata.Lookup(tc.Collation).Charset() + if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { + c.asm.Convert_xce(offset, arg.Type, tc.Collation) + } + default: + c.asm.Convert_xce(offset, arg.Type, tc.Collation) + } + + c.asm.jumpDestination(skip) + } + + c.asm.Fn_ELT(len(args), tt, tc) + c.asm.jumpDestination(skip) + + return ctype{Type: tt, Col: tc, Flag: flagNullable}, nil +} + +func insert(str, newstr *evalBytes, pos, l int) []byte { + pos-- + + cs := colldata.Lookup(str.col.Collation).Charset() + strLen := charset.Length(cs, str.bytes) + + if pos < 0 || strLen <= pos { + return str.bytes + } + if l < 0 { + l = strLen + } + + front := charset.Slice(cs, str.bytes, 0, pos) + var back []byte + if pos <= math.MaxInt-l && pos+l < strLen { + back = charset.Slice(cs, str.bytes, pos+l, strLen) + } + + res := make([]byte, len(front)+len(newstr.bytes)+len(back)) + + copy(res[:len(front)], front) + copy(res[len(front):], newstr.bytes) + copy(res[len(front)+len(newstr.bytes):], back) + + return res +} + +func (call *builtinInsert) eval(env *ExpressionEnv) (eval, error) { + args, err := call.args(env) + if err != nil { + return nil, err + } + if args[0] == nil || args[1] == nil || args[2] == nil || args[3] == nil { + return nil, nil + } + + str, ok := args[0].(*evalBytes) + if !ok { + str, err = evalToVarchar(args[0], call.collate, true) + if err != nil { + return nil, err + } + } + + pos := evalToInt64(args[1]).i + l := evalToInt64(args[2]).i + + newstr, err := evalToVarchar(args[3], str.col.Collation, true) + if err != nil { + return nil, err + } + + res := insert(str, newstr, int(pos), int(l)) + if !validMaxLength(int64(len(res)), 1) { + return nil, nil + } + return newEvalText(res, str.col), nil +} + +func (call *builtinInsert) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + pos, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + l, err := call.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + + newstr, err := call.Arguments[3].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck4(str, pos, l, newstr) + + _ = c.compileToInt64(pos, 3) + _ = c.compileToInt64(l, 2) + + if err != nil { + return ctype{}, nil + } + + col := str.Col + + switch { + case str.isTextual(): + default: + c.asm.Convert_xce(4, sqltypes.VarChar, c.collation) + col = typedCoercionCollation(sqltypes.VarChar, c.collation) + } + + switch { + case newstr.isTextual(): + fromCharset := colldata.Lookup(newstr.Col.Collation).Charset() + toCharset := colldata.Lookup(col.Collation).Charset() + if fromCharset != toCharset && !toCharset.IsSuperset(fromCharset) { + c.asm.Convert_xce(1, sqltypes.VarChar, col.Collation) + } + default: + c.asm.Convert_xce(1, sqltypes.VarChar, col.Collation) + } + + c.asm.Fn_INSERT(col) + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.VarChar, Col: col, Flag: flagNullable}, nil +} func (call *builtinChangeCase) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) @@ -146,7 +625,7 @@ func (call *builtinChangeCase) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, 0, false) + c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, nil) } c.asm.Fn_LUCASE(call.upcase) @@ -175,11 +654,94 @@ func (call *builtinCharLength) eval(env *ExpressionEnv) (eval, error) { } } -func (call *builtinCharLength) compile(c *compiler) (ctype, error) { - return c.compileFn_length(call.Arguments[0], c.asm.Fn_CHAR_LENGTH) +func (call *builtinCharLength) compile(c *compiler) (ctype, error) { + return c.compileFn_length(call.Arguments[0], c.asm.Fn_CHAR_LENGTH) +} + +func (call *builtinLength) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + return newEvalInt64(int64(len(arg.ToRawBytes()))), nil +} + +func (call *builtinLength) compile(c *compiler) (ctype, error) { + return c.compileFn_length(call.Arguments[0], c.asm.Fn_LENGTH) +} + +func (call *builtinBitLength) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + return newEvalInt64(int64(len(arg.ToRawBytes())) * 8), nil +} + +func (call *builtinBitLength) compile(c *compiler) (ctype, error) { + return c.compileFn_length(call.Arguments[0], c.asm.Fn_BIT_LENGTH) +} + +func (call *builtinASCII) eval(env *ExpressionEnv) (eval, error) { + arg, err := call.arg1(env) + if err != nil { + return nil, err + } + if arg == nil { + return nil, nil + } + + b, ok := arg.(*evalBytes) + if !ok { + b = evalToBinary(arg) + } + if len(b.bytes) == 0 { + return newEvalInt64(0), nil + } + return newEvalInt64(int64(b.bytes[0])), nil +} + +func (call *builtinASCII) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(str) + + switch { + case str.isTextual(): + default: + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) + } + + c.asm.Fn_ASCII() + c.asm.jumpDestination(skip) + + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: nullableFlags(str.Flag)}, nil +} + +func reverse(in *evalBytes) []byte { + cs := colldata.Lookup(in.col.Collation).Charset() + b := in.bytes + + out, end := make([]byte, len(b)), len(b) + for len(b) > 0 { + _, size := cs.DecodeRune(b) + copy(out[end-size:end], b[:size]) + b = b[size:] + end -= size + } + return out } -func (call *builtinLength) eval(env *ExpressionEnv) (eval, error) { +func (call *builtinReverse) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { return nil, err @@ -187,29 +749,45 @@ func (call *builtinLength) eval(env *ExpressionEnv) (eval, error) { if arg == nil { return nil, nil } - return newEvalInt64(int64(len(arg.ToRawBytes()))), nil -} -func (call *builtinLength) compile(c *compiler) (ctype, error) { - return c.compileFn_length(call.Arguments[0], c.asm.Fn_LENGTH) + b, ok := arg.(*evalBytes) + if !ok { + b, err = evalToVarchar(arg, call.collate, true) + if err != nil { + return nil, err + } + } + + return newEvalText(reverse(b), b.col), nil } -func (call *builtinBitLength) eval(env *ExpressionEnv) (eval, error) { - arg, err := call.arg1(env) +func (call *builtinReverse) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) if err != nil { - return nil, err + return ctype{}, err } - if arg == nil { - return nil, nil + + skip := c.compileNullCheck1(arg) + + switch { + case arg.isTextual(): + default: + c.asm.Convert_xc(1, sqltypes.VarChar, c.collation, nil) } - return newEvalInt64(int64(len(arg.ToRawBytes())) * 8), nil + + c.asm.Fn_REVERSE() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Col: arg.Col, Flag: flagNullable}, nil } -func (call *builtinBitLength) compile(c *compiler) (ctype, error) { - return c.compileFn_length(call.Arguments[0], c.asm.Fn_BIT_LENGTH) +func space(num int64) []byte { + num = max(num, 0) + + spaces := bytes.Repeat([]byte{0x20}, int(num)) + return spaces } -func (call *builtinASCII) eval(env *ExpressionEnv) (eval, error) { +func (call *builtinSpace) eval(env *ExpressionEnv) (eval, error) { arg, err := call.arg1(env) if err != nil { return nil, err @@ -218,34 +796,29 @@ func (call *builtinASCII) eval(env *ExpressionEnv) (eval, error) { return nil, nil } - b, ok := arg.(*evalBytes) - if !ok { - b = evalToBinary(arg) - } - if len(b.bytes) == 0 { - return newEvalInt64(0), nil + num := evalToInt64(arg).i + + if !validMaxLength(1, num) { + return nil, nil } - return newEvalInt64(int64(b.bytes[0])), nil + col := typedCoercionCollation(sqltypes.VarChar, call.collate) + return newEvalText(space(num), col), nil } -func (call *builtinASCII) compile(c *compiler) (ctype, error) { - str, err := call.Arguments[0].compile(c) +func (call *builtinSpace) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) if err != nil { return ctype{}, err } - skip := c.compileNullCheck1(str) + skip := c.compileNullCheck1(arg) - switch { - case str.isTextual(): - default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) - } + _ = c.compileToInt64(arg, 1) - c.asm.Fn_ASCII() + col := typedCoercionCollation(sqltypes.VarChar, call.collate) + c.asm.Fn_SPACE(col) c.asm.jumpDestination(skip) - - return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: str.Flag}, nil + return ctype{Type: sqltypes.VarChar, Col: col, Flag: flagNullable}, nil } func charOrd(b []byte, coll collations.ID) int64 { @@ -294,13 +867,13 @@ func (call *builtinOrd) compile(c *compiler) (ctype, error) { case str.isTextual(): col = str.Col.Collation default: - c.asm.Convert_xc(1, sqltypes.VarChar, call.collate, 0, false) + c.asm.Convert_xc(1, sqltypes.VarChar, call.collate, nil) } c.asm.Fn_ORD(col) c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: str.Flag}, nil + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: nullableFlags(str.Flag)}, nil } // maxRepeatLength is the maximum number of times a string can be repeated. @@ -317,11 +890,6 @@ func (call *builtinOrd) compile(c *compiler) (ctype, error) { // - `> max_allowed_packet`, no error and returns `NULL`. const maxRepeatLength = 1073741824 -type builtinRepeat struct { - CallExpr - collate collations.ID -} - func (call *builtinRepeat) eval(env *ExpressionEnv) (eval, error) { arg1, arg2, err := call.arg2(env) if err != nil { @@ -377,7 +945,7 @@ func (expr *builtinRepeat) compile(c *compiler) (ctype, error) { switch { case str.isTextual(): default: - c.asm.Convert_xc(2, sqltypes.VarChar, c.collation, 0, false) + c.asm.Convert_xc(2, sqltypes.VarChar, c.collation, nil) } _ = c.compileToInt64(repeat, 1) @@ -396,7 +964,7 @@ func (c *builtinCollation) eval(env *ExpressionEnv) (eval, error) { // the collation of a `COLLATION` expr is hardcoded to `utf8mb3_general_ci`, // not to the default collation of our connection. this is probably a bug in MySQL, but we match it - return newEvalText([]byte(collations.Local().LookupName(col.Collation)), collationUtf8mb3), nil + return newEvalText([]byte(env.collationEnv.LookupName(col.Collation)), collationUtf8mb3), nil } func (expr *builtinCollation) compile(c *compiler) (ctype, error) { @@ -407,7 +975,7 @@ func (expr *builtinCollation) compile(c *compiler) (ctype, error) { skip := c.asm.jumpFrom() - c.asm.Fn_COLLATION(collationUtf8mb3) + c.asm.Fn_COLLATION(c.env.CollationEnv(), collationUtf8mb3) c.asm.jumpDestination(skip) return ctype{Type: sqltypes.VarChar, Col: collationUtf8mb3}, nil @@ -429,7 +997,7 @@ func (c *builtinWeightString) eval(env *ExpressionEnv) (eval, error) { typ = sqltypes.Blob } - weights, _, err = evalWeightString(weights, evalToBinary(input), c.Len, 0) + weights, _, err = evalWeightString(weights, evalToBinary(input), *c.Len, 0) if err != nil { return nil, err } @@ -464,7 +1032,7 @@ func (c *builtinWeightString) eval(env *ExpressionEnv) (eval, error) { } else { var strLen int if c.Cast == "char" { - strLen = c.Len + strLen = *c.Len } weights, _, err = evalWeightString(weights, val, strLen, 0) } @@ -494,14 +1062,14 @@ func (call *builtinWeightString) compile(c *compiler) (ctype, error) { skip := c.compileNullCheck1(str) if call.Cast == "binary" { if !sqltypes.IsBinary(str.Type) { - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } switch str.Type { case sqltypes.Blob, sqltypes.Text, sqltypes.TypeJSON: typ = sqltypes.Blob } - c.asm.Fn_WEIGHT_STRING(typ, call.Len) + c.asm.Fn_WEIGHT_STRING(typ, *call.Len) c.asm.jumpDestination(skip) return ctype{Type: sqltypes.VarBinary, Flag: flagNullable | flagNull, Col: collationBinary}, nil } @@ -522,7 +1090,7 @@ func (call *builtinWeightString) compile(c *compiler) (ctype, error) { } var strLen int if call.Cast == "char" { - strLen = call.Len + strLen = *call.Len } c.asm.Fn_WEIGHT_STRING(typ, strLen) @@ -592,7 +1160,7 @@ func (call *builtinLeftRight) compile(c *compiler) (ctype, error) { case str.isTextual(): col = str.Col default: - c.asm.Convert_xc(2, sqltypes.VarChar, col.Collation, 0, false) + c.asm.Convert_xc(2, sqltypes.VarChar, col.Collation, nil) } _ = c.compileToInt64(l, 1) @@ -719,7 +1287,7 @@ func (call *builtinPad) compile(c *compiler) (ctype, error) { c.asm.Fn_RPAD(col) } c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.VarChar, Col: col}, nil + return ctype{Type: sqltypes.VarChar, Flag: flagNullable, Col: col}, nil } func strcmpCollate(left, right []byte, col collations.ID) int64 { @@ -755,7 +1323,7 @@ func (l *builtinStrcmp) eval(env *ExpressionEnv) (eval, error) { col1 := evalCollation(left) col2 := evalCollation(right) - mcol, _, _, err := colldata.Merge(collations.Local(), col1, col2, colldata.CoercionOptions{ + mcol, _, _, err := colldata.Merge(env.collationEnv, col1, col2, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -795,7 +1363,7 @@ func (expr *builtinStrcmp) compile(c *compiler) (ctype, error) { if sqltypes.IsNumber(lt.Type) || sqltypes.IsNumber(rt.Type) { mcol = collationNumeric } else { - mcol, _, _, err = colldata.Merge(collations.Local(), lt.Col, rt.Col, colldata.CoercionOptions{ + mcol, _, _, err = colldata.Merge(c.env.CollationEnv(), lt.Col, rt.Col, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -814,10 +1382,10 @@ func (expr *builtinStrcmp) compile(c *compiler) (ctype, error) { c.asm.Strcmp(mcol) c.asm.jumpDestination(skip1, skip2) - return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagNullable}, nil + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: nullableFlags(lt.Flag | rt.Flag)}, nil } -func (call builtinTrim) eval(env *ExpressionEnv) (eval, error) { +func (call *builtinTrim) eval(env *ExpressionEnv) (eval, error) { str, err := call.arg1(env) if err != nil { return nil, err @@ -872,7 +1440,7 @@ func (call builtinTrim) eval(env *ExpressionEnv) (eval, error) { } } -func (call builtinTrim) compile(c *compiler) (ctype, error) { +func (call *builtinTrim) compile(c *compiler) (ctype, error) { str, err := call.Arguments[0].compile(c) if err != nil { return ctype{}, err @@ -885,7 +1453,7 @@ func (call builtinTrim) compile(c *compiler) (ctype, error) { case str.isTextual(): col = str.Col default: - c.asm.Convert_xc(1, sqltypes.VarChar, col.Collation, 0, false) + c.asm.Convert_xc(1, sqltypes.VarChar, col.Collation, nil) } if len(call.Arguments) == 1 { @@ -898,7 +1466,7 @@ func (call builtinTrim) compile(c *compiler) (ctype, error) { c.asm.Fn_TRIM1(col) } c.asm.jumpDestination(skip1) - return ctype{Type: sqltypes.VarChar, Col: col}, nil + return ctype{Type: sqltypes.VarChar, Flag: nullableFlags(str.Flag), Col: col}, nil } pat, err := call.Arguments[1].compile(c) @@ -929,12 +1497,211 @@ func (call builtinTrim) compile(c *compiler) (ctype, error) { } c.asm.jumpDestination(skip1, skip2) - return ctype{Type: sqltypes.VarChar, Col: col}, nil + return ctype{Type: sqltypes.VarChar, Flag: flagNullable, Col: col}, nil +} + +func (call *builtinSubstring) eval(env *ExpressionEnv) (eval, error) { + str, err := call.Arguments[0].eval(env) + if err != nil || str == nil { + return nil, err + } + + tt := str.SQLType() + text, ok := str.(*evalBytes) + if !ok { + text, err = evalToVarchar(str, call.collate, true) + if err != nil { + return nil, err + } + tt = sqltypes.VarChar + } + + p, err := call.Arguments[1].eval(env) + if err != nil || p == nil { + return nil, err + } + + var l eval + if len(call.Arguments) > 2 { + l, err = call.Arguments[2].eval(env) + if err != nil || l == nil { + return nil, err + } + } + + pos := evalToInt64(p).i + if pos == 0 { + return newEvalRaw(tt, nil, text.col), nil + } + cs := colldata.Lookup(text.col.Collation).Charset() + end := int64(charset.Length(cs, text.bytes)) + + if pos < 0 { + pos += end + 1 + } + if pos < 1 || pos > end { + return newEvalRaw(tt, nil, text.col), nil + } + + if len(call.Arguments) > 2 { + ll := evalToInt64(l).i + if ll < 1 { + return newEvalRaw(tt, nil, text.col), nil + } + if ll > end-pos+1 { + ll = end - pos + 1 + } + end = pos + ll - 1 + } + res := charset.Slice(cs, text.bytes, int(pos-1), int(end)) + return newEvalRaw(tt, res, text.col), nil +} + +func (call *builtinSubstring) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + p, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + tt := str.Type + skip1 := c.compileNullCheck2(str, p) + + col := typedCoercionCollation(sqltypes.VarChar, c.collation) + switch { + case str.isTextual(): + col = str.Col + default: + tt = sqltypes.VarChar + c.asm.Convert_xc(2, tt, col.Collation, nil) + } + _ = c.compileToInt64(p, 1) + + cs := colldata.Lookup(str.Col.Collation).Charset() + var skip2 *jump + if len(call.Arguments) > 2 { + l, err := call.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skip2 = c.compileNullCheck2(str, l) + _ = c.compileToInt64(l, 1) + c.asm.Fn_SUBSTRING3(tt, cs, col) + } else { + c.asm.Fn_SUBSTRING2(tt, cs, col) + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: tt, Col: col, Flag: flagNullable}, nil +} + +func (call *builtinLocate) eval(env *ExpressionEnv) (eval, error) { + substr, err := call.Arguments[0].eval(env) + if err != nil || substr == nil { + return nil, err + } + + str, err := call.Arguments[1].eval(env) + if err != nil || str == nil { + return nil, err + } + + if _, ok := str.(*evalBytes); !ok { + str, err = evalToVarchar(str, call.collate, true) + if err != nil { + return nil, err + } + } + + col := str.(*evalBytes).col.Collation + substr, err = evalToVarchar(substr, col, true) + if err != nil { + return nil, err + } + + pos := int64(1) + if len(call.Arguments) > 2 { + p, err := call.Arguments[2].eval(env) + if err != nil || p == nil { + return nil, err + } + pos = evalToInt64(p).i + if pos < 1 || pos > math.MaxInt { + return newEvalInt64(0), nil + } + } + + var coll colldata.Collation + if typeIsTextual(substr.SQLType()) && typeIsTextual(str.SQLType()) { + coll = colldata.Lookup(col) + } else { + coll = colldata.Lookup(collations.CollationBinaryID) + } + found := colldata.Index(coll, str.ToRawBytes(), substr.ToRawBytes(), int(pos)-1) + return newEvalInt64(int64(found) + 1), nil } -type builtinConcat struct { - CallExpr - collate collations.ID +func (call *builtinLocate) compile(c *compiler) (ctype, error) { + substr, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + str, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + skip1 := c.compileNullCheck2(substr, str) + var skip2 *jump + if len(call.Arguments) > 2 { + l, err := call.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + skip2 = c.compileNullCheck2(str, l) + _ = c.compileToInt64(l, 1) + } + + if !str.isTextual() { + c.asm.Convert_xce(len(call.Arguments)-1, sqltypes.VarChar, c.collation) + str.Col = collations.TypedCollation{ + Collation: c.collation, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + } + + fromCharset := colldata.Lookup(substr.Col.Collation).Charset() + toCharset := colldata.Lookup(str.Col.Collation).Charset() + if !substr.isTextual() || (fromCharset != toCharset && !toCharset.IsSuperset(fromCharset)) { + c.asm.Convert_xce(len(call.Arguments), sqltypes.VarChar, str.Col.Collation) + substr.Col = collations.TypedCollation{ + Collation: str.Col.Collation, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + } + + var coll colldata.Collation + if typeIsTextual(substr.Type) && typeIsTextual(str.Type) { + coll = colldata.Lookup(str.Col.Collation) + } else { + coll = colldata.Lookup(collations.CollationBinaryID) + } + + if len(call.Arguments) > 2 { + c.asm.Locate3(coll) + } else { + c.asm.Locate2(coll) + } + + c.asm.jumpDestination(skip1, skip2) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagNullable}, nil } func concatSQLType(arg sqltypes.Type, tt sqltypes.Type) sqltypes.Type { @@ -966,7 +1733,6 @@ func concatConvert(buf []byte, str *evalBytes, tc collations.TypedCollation) ([] } func (call *builtinConcat) eval(env *ExpressionEnv) (eval, error) { - local := collations.Local() var ca collationAggregation tt := sqltypes.VarChar @@ -979,7 +1745,7 @@ func (call *builtinConcat) eval(env *ExpressionEnv) (eval, error) { args = append(args, a) tt = concatSQLType(a.SQLType(), tt) - err = ca.add(local, evalCollation(a)) + err = ca.add(evalCollation(a), env.collationEnv) if err != nil { return nil, err } @@ -1014,7 +1780,6 @@ func (call *builtinConcat) eval(env *ExpressionEnv) (eval, error) { } func (call *builtinConcat) compile(c *compiler) (ctype, error) { - local := collations.Local() var ca collationAggregation tt := sqltypes.VarChar var f typeFlag @@ -1031,7 +1796,7 @@ func (call *builtinConcat) compile(c *compiler) (ctype, error) { args = append(args, a) tt = concatSQLType(a.Type, tt) - err = ca.add(local, a.Col) + err = ca.add(a.Col, c.env.CollationEnv()) if err != nil { return ctype{}, err } @@ -1067,13 +1832,7 @@ func (call *builtinConcat) compile(c *compiler) (ctype, error) { return ctype{Type: tt, Flag: f, Col: tc}, nil } -type builtinConcatWs struct { - CallExpr - collate collations.ID -} - func (call *builtinConcatWs) eval(env *ExpressionEnv) (eval, error) { - local := collations.Local() var ca collationAggregation tt := sqltypes.VarChar @@ -1093,7 +1852,7 @@ func (call *builtinConcatWs) eval(env *ExpressionEnv) (eval, error) { args = append(args, a) tt = concatSQLType(a.SQLType(), tt) - err = ca.add(local, evalCollation(a)) + err = ca.add(evalCollation(a), env.collationEnv) if err != nil { return nil, err } @@ -1143,7 +1902,6 @@ func (call *builtinConcatWs) eval(env *ExpressionEnv) (eval, error) { } func (call *builtinConcatWs) compile(c *compiler) (ctype, error) { - local := collations.Local() var ca collationAggregation tt := sqltypes.VarChar @@ -1156,7 +1914,7 @@ func (call *builtinConcatWs) compile(c *compiler) (ctype, error) { } tt = concatSQLType(a.Type, tt) - err = ca.add(local, a.Col) + err = ca.add(a.Col, c.env.CollationEnv()) if err != nil { return ctype{}, err } @@ -1204,3 +1962,196 @@ func (call *builtinConcatWs) compile(c *compiler) (ctype, error) { return ctype{Type: tt, Flag: args[0].Flag, Col: tc}, nil } + +func (call *builtinChar) eval(env *ExpressionEnv) (eval, error) { + vals := make([]eval, 0, len(call.Arguments)) + for _, arg := range call.Arguments { + a, err := arg.eval(env) + if err != nil { + return nil, err + } + if a == nil { + continue + } + vals = append(vals, a) + } + + buf := make([]byte, 0, len(vals)) + for _, v := range vals { + buf = encodeChar(buf, uint32(evalToInt64(v).i)) + } + if call.collate == collations.CollationBinaryID { + return newEvalBinary(buf), nil + } + + cs := colldata.Lookup(call.collate).Charset() + if !charset.Validate(cs, buf) { + return nil, nil + } + + return newEvalText(buf, collations.TypedCollation{ + Collation: call.collate, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + }), nil +} + +func (call *builtinChar) compile(c *compiler) (ctype, error) { + for _, arg := range call.Arguments { + a, err := arg.compile(c) + if err != nil { + return ctype{}, err + } + j := c.compileNullCheck1(a) + switch a.Type { + case sqltypes.Int64: + // No-op, already correct type + case sqltypes.Uint64: + c.asm.Convert_ui(1) + default: + c.asm.Convert_xi(1) + } + c.asm.jumpDestination(j) + } + tt := sqltypes.VarBinary + if call.collate != collations.CollationBinaryID { + tt = sqltypes.VarChar + } + col := collations.TypedCollation{ + Collation: call.collate, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + c.asm.Fn_CHAR(tt, col, len(call.Arguments)) + return ctype{Type: tt, Flag: flagNullable, Col: col}, nil +} + +func encodeChar(buf []byte, i uint32) []byte { + switch { + case i < 0x100: + buf = append(buf, byte(i)) + case i < 0x10000: + buf = append(buf, byte(i>>8), byte(i)) + case i < 0x1000000: + buf = append(buf, byte(i>>16), byte(i>>8), byte(i)) + default: + buf = append(buf, byte(i>>24), byte(i>>16), byte(i>>8), byte(i)) + } + return buf +} + +func (call *builtinReplace) eval(env *ExpressionEnv) (eval, error) { + str, err := call.Arguments[0].eval(env) + if err != nil || str == nil { + return nil, err + } + + fromStr, err := call.Arguments[1].eval(env) + if err != nil || fromStr == nil { + return nil, err + } + + toStr, err := call.Arguments[2].eval(env) + if err != nil || toStr == nil { + return nil, err + } + + if _, ok := str.(*evalBytes); !ok { + str, err = evalToVarchar(str, call.collate, true) + if err != nil { + return nil, err + } + } + + col := str.(*evalBytes).col + fromStr, err = evalToVarchar(fromStr, col.Collation, true) + if err != nil { + return nil, err + } + + toStr, err = evalToVarchar(toStr, col.Collation, true) + if err != nil { + return nil, err + } + + strBytes := str.(*evalBytes).bytes + fromBytes := fromStr.(*evalBytes).bytes + toBytes := toStr.(*evalBytes).bytes + + out := replace(strBytes, fromBytes, toBytes) + return newEvalRaw(str.SQLType(), out, col), nil +} + +func (call *builtinReplace) compile(c *compiler) (ctype, error) { + str, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + fromStr, err := call.Arguments[1].compile(c) + if err != nil { + return ctype{}, err + } + + toStr, err := call.Arguments[2].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck3(str, fromStr, toStr) + if !str.isTextual() { + c.asm.Convert_xce(3, sqltypes.VarChar, c.collation) + str.Col = collations.TypedCollation{ + Collation: c.collation, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + } + + fromCharset := colldata.Lookup(fromStr.Col.Collation).Charset() + toCharset := colldata.Lookup(toStr.Col.Collation).Charset() + strCharset := colldata.Lookup(str.Col.Collation).Charset() + if !fromStr.isTextual() || (fromCharset != strCharset && !strCharset.IsSuperset(fromCharset)) { + c.asm.Convert_xce(2, sqltypes.VarChar, str.Col.Collation) + fromStr.Col = collations.TypedCollation{ + Collation: str.Col.Collation, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + } + + if !toStr.isTextual() || (toCharset != strCharset && !strCharset.IsSuperset(toCharset)) { + c.asm.Convert_xce(1, sqltypes.VarChar, str.Col.Collation) + toStr.Col = collations.TypedCollation{ + Collation: str.Col.Collation, + Coercibility: collations.CoerceCoercible, + Repertoire: collations.RepertoireASCII, + } + } + + c.asm.Replace() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.VarChar, Col: str.Col, Flag: flagNullable}, nil +} + +func replace(str, from, to []byte) []byte { + if len(from) == 0 { + return str + } + n := bytes.Count(str, from) + if n == 0 { + return str + } + + out := make([]byte, len(str)+n*(len(to)-len(from))) + end := 0 + start := 0 + for i := 0; i < n; i++ { + pos := start + bytes.Index(str[start:], from) + end += copy(out[end:], str[start:pos]) + end += copy(out[end:], to) + start = pos + len(from) + } + end += copy(out[end:], str[start:]) + return out[0:end] +} diff --git a/go/vt/vtgate/evalengine/fn_time.go b/go/vt/vtgate/evalengine/fn_time.go index 430b975974b..5a253799b7f 100644 --- a/go/vt/vtgate/evalengine/fn_time.go +++ b/go/vt/vtgate/evalengine/fn_time.go @@ -29,6 +29,14 @@ import ( var SystemTime = time.Now +const maxTimePrec = datetime.DefaultPrecision + +// The length of a datetime converted to a numerical value is always 14 characters, +// see for example "20240404102732". We also have a `.` since we know it's a decimal +// and then additionally the number of decimals behind the dot. So total is always +// the input datetime size + 15. +const decimalSizeBase = 15 + type ( builtinNow struct { CallExpr @@ -105,6 +113,30 @@ type ( collate collations.ID } + builtinLastDay struct { + CallExpr + } + + builtinToDays struct { + CallExpr + } + + builtinFromDays struct { + CallExpr + } + + builtinSecToTime struct { + CallExpr + } + + builtinTimeToSec struct { + CallExpr + } + + builtinToSeconds struct { + CallExpr + } + builtinQuarter struct { CallExpr } @@ -166,6 +198,12 @@ var _ IR = (*builtinMicrosecond)(nil) var _ IR = (*builtinMinute)(nil) var _ IR = (*builtinMonth)(nil) var _ IR = (*builtinMonthName)(nil) +var _ IR = (*builtinLastDay)(nil) +var _ IR = (*builtinToDays)(nil) +var _ IR = (*builtinFromDays)(nil) +var _ IR = (*builtinSecToTime)(nil) +var _ IR = (*builtinTimeToSec)(nil) +var _ IR = (*builtinToSeconds)(nil) var _ IR = (*builtinQuarter)(nil) var _ IR = (*builtinSecond)(nil) var _ IR = (*builtinTime)(nil) @@ -179,27 +217,23 @@ var _ IR = (*builtinYearWeek)(nil) func (call *builtinNow) eval(env *ExpressionEnv) (eval, error) { now := env.time(call.utc) if call.onlyTime { - buf := datetime.Time_hh_mm_ss.Format(now, call.prec) - return newEvalRaw(sqltypes.Time, buf, collationBinary), nil + return newEvalTime(now.Time, int(call.prec)), nil } else { - buf := datetime.DateTime_YYYY_MM_DD_hh_mm_ss.Format(now, call.prec) - return newEvalRaw(sqltypes.Datetime, buf, collationBinary), nil + return newEvalDateTime(now, int(call.prec), false), nil } } func (call *builtinNow) compile(c *compiler) (ctype, error) { - var format *datetime.Strftime var t sqltypes.Type if call.onlyTime { - format = datetime.Time_hh_mm_ss t = sqltypes.Time + c.asm.Fn_NowTime(call.prec, call.utc) } else { - format = datetime.DateTime_YYYY_MM_DD_hh_mm_ss t = sqltypes.Datetime + c.asm.Fn_Now(call.prec, call.utc) } - c.asm.Fn_Now(t, format, call.prec, call.utc) - return ctype{Type: t, Col: collationBinary}, nil + return ctype{Type: t, Col: collationBinary, Size: int32(call.prec)}, nil } func (call *builtinNow) constant() bool { @@ -211,12 +245,12 @@ func (call *builtinSysdate) eval(env *ExpressionEnv) (eval, error) { if tz := env.currentTimezone(); tz != nil { now = now.In(tz) } - return newEvalRaw(sqltypes.Datetime, datetime.NewDateTimeFromStd(now).Format(call.prec), collationBinary), nil + return newEvalDateTime(datetime.NewDateTimeFromStd(now), int(call.prec), false), nil } func (call *builtinSysdate) compile(c *compiler) (ctype, error) { c.asm.Fn_Sysdate(call.prec) - return ctype{Type: sqltypes.Datetime, Col: collationBinary}, nil + return ctype{Type: sqltypes.Datetime, Col: collationBinary, Size: int32(call.prec)}, nil } func (call *builtinSysdate) constant() bool { @@ -225,7 +259,7 @@ func (call *builtinSysdate) constant() bool { func (call *builtinCurdate) eval(env *ExpressionEnv) (eval, error) { now := env.time(false) - return newEvalRaw(sqltypes.Date, datetime.Date_YYYY_MM_DD.Format(now, 0), collationBinary), nil + return newEvalDate(now.Date, false), nil } func (*builtinCurdate) compile(c *compiler) (ctype, error) { @@ -239,7 +273,7 @@ func (call *builtinCurdate) constant() bool { func (call *builtinUtcDate) eval(env *ExpressionEnv) (eval, error) { now := env.time(true) - return newEvalRaw(sqltypes.Date, datetime.Date_YYYY_MM_DD.Format(now, 0), collationBinary), nil + return newEvalDate(now.Date, false), nil } func (*builtinUtcDate) compile(c *compiler) (ctype, error) { @@ -264,8 +298,8 @@ func (b *builtinDateFormat) eval(env *ExpressionEnv) (eval, error) { case *evalTemporal: t = e.toDateTime(datetime.DefaultPrecision, env.now) default: - t = evalToDateTime(date, datetime.DefaultPrecision, env.now) - if t == nil || t.isZero() { + t = evalToDateTime(date, datetime.DefaultPrecision, env.now, false) + if t == nil { return nil, nil } } @@ -289,7 +323,7 @@ func (call *builtinDateFormat) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Datetime, sqltypes.Date: default: - c.asm.Convert_xDT_nz(1, datetime.DefaultPrecision) + c.asm.Convert_xDT(1, datetime.DefaultPrecision, false) } format, err := call.Arguments[1].compile(c) @@ -302,7 +336,7 @@ func (call *builtinDateFormat) compile(c *compiler) (ctype, error) { switch format.Type { case sqltypes.VarChar, sqltypes.VarBinary: default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } col := typedCoercionCollation(sqltypes.VarChar, c.collation) @@ -323,6 +357,10 @@ func convertTz(dt datetime.DateTime, from, to *time.Location) (datetime.DateTime if err != nil { return datetime.DateTime{}, false } + + if ts.Unix() < 0 || ts.Unix() >= maxUnixtime { + return dt, true + } return datetime.NewDateTimeFromStd(ts.In(to)), true } @@ -357,8 +395,8 @@ func (call *builtinConvertTz) eval(env *ExpressionEnv) (eval, error) { return nil, nil } - dt := evalToDateTime(n, -1, env.now) - if dt == nil || dt.isZero() { + dt := evalToDateTime(n, -1, env.now, false) + if dt == nil { return nil, nil } @@ -366,7 +404,7 @@ func (call *builtinConvertTz) eval(env *ExpressionEnv) (eval, error) { if !ok { return nil, nil } - return newEvalDateTime(out, int(dt.prec)), nil + return newEvalDateTime(out, int(dt.prec), false), nil } func (call *builtinConvertTz) compile(c *compiler) (ctype, error) { @@ -388,24 +426,40 @@ func (call *builtinConvertTz) compile(c *compiler) (ctype, error) { switch { case from.isTextual(): default: - c.asm.Convert_xb(2, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(2, sqltypes.VarBinary, nil) } switch { case to.isTextual(): default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } + var prec int32 switch n.Type { case sqltypes.Datetime, sqltypes.Date: + prec = n.Size + case sqltypes.Decimal: + prec = n.Scale + c.asm.Convert_xDT(3, -1, false) + case sqltypes.Time: + prec = n.Size + c.asm.Convert_xDT(3, -1, false) + case sqltypes.VarChar, sqltypes.VarBinary: + if lit, ok := call.Arguments[0].(*Literal); ok && !n.isHexOrBitLiteral() { + if dt := evalToDateTime(lit.inner, -1, time.Now(), c.sqlmode.AllowZeroDate()); dt != nil { + prec = int32(dt.prec) + } + } + c.asm.Convert_xDT(3, -1, false) default: - c.asm.Convert_xDT_nz(3, -1) + prec = maxTimePrec + c.asm.Convert_xDT(3, -1, false) } c.asm.Fn_CONVERT_TZ() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Datetime, Col: collationBinary, Flag: n.Flag | flagNullable}, nil + return ctype{Type: sqltypes.Datetime, Col: collationBinary, Flag: n.Flag | flagNullable, Size: prec}, nil } func (b *builtinDate) eval(env *ExpressionEnv) (eval, error) { @@ -416,7 +470,7 @@ func (b *builtinDate) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, env.sqlmode.AllowZeroDate()) if d == nil { return nil, nil } @@ -434,7 +488,7 @@ func (call *builtinDate) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, c.sqlmode.AllowZeroDate()) } c.asm.jumpDestination(skip) @@ -449,7 +503,7 @@ func (b *builtinDayOfMonth) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, true) if d == nil { return nil, nil } @@ -467,7 +521,7 @@ func (call *builtinDayOfMonth) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, true) } c.asm.Fn_DAYOFMONTH() c.asm.jumpDestination(skip) @@ -482,11 +536,11 @@ func (b *builtinDayOfWeek) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) - if d == nil || d.isZero() { + d := evalToDate(date, env.now, false) + if d == nil { return nil, nil } - return newEvalInt64(int64(d.dt.Date.ToStdTime(time.Local).Weekday() + 1)), nil + return newEvalInt64(int64(d.dt.Date.Weekday() + 1)), nil } func (call *builtinDayOfWeek) compile(c *compiler) (ctype, error) { @@ -500,7 +554,7 @@ func (call *builtinDayOfWeek) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } c.asm.Fn_DAYOFWEEK() c.asm.jumpDestination(skip) @@ -515,11 +569,11 @@ func (b *builtinDayOfYear) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) - if d == nil || d.isZero() { + d := evalToDate(date, env.now, false) + if d == nil { return nil, nil } - return newEvalInt64(int64(d.dt.Date.ToStdTime(time.Local).YearDay())), nil + return newEvalInt64(int64(d.dt.Date.ToStdTime(env.currentTimezone()).YearDay())), nil } func (call *builtinDayOfYear) compile(c *compiler) (ctype, error) { @@ -533,7 +587,7 @@ func (call *builtinDayOfYear) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } c.asm.Fn_DAYOFYEAR() c.asm.jumpDestination(skip) @@ -557,50 +611,79 @@ func (b *builtinFromUnixtime) eval(env *ExpressionEnv) (eval, error) { switch ts := ts.(type) { case *evalInt64: + if ts.i < 0 || ts.i >= maxUnixtime { + return nil, nil + } sec = ts.i case *evalUint64: + if ts.u >= maxUnixtime { + return nil, nil + } sec = int64(ts.u) case *evalFloat: + if ts.f < 0 || ts.f >= maxUnixtime { + return nil, nil + } sf, ff := math.Modf(ts.f) sec = int64(sf) frac = int64(ff * 1e9) - prec = 6 + prec = maxTimePrec case *evalDecimal: + if ts.dec.Sign() < 0 { + return nil, nil + } sd, fd := ts.dec.QuoRem(decimal.New(1, 0), 0) sec, _ = sd.Int64() + if sec >= maxUnixtime { + return nil, nil + } frac, _ = fd.Mul(decimal.New(1, 9)).Int64() prec = int(ts.length) case *evalTemporal: if ts.prec == 0 { sec = ts.toInt64() + if sec < 0 || sec >= maxUnixtime { + return nil, nil + } } else { dec := ts.toDecimal() + if dec.Sign() < 0 { + return nil, nil + } sd, fd := dec.QuoRem(decimal.New(1, 0), 0) sec, _ = sd.Int64() + if sec >= maxUnixtime { + return nil, nil + } frac, _ = fd.Mul(decimal.New(1, 9)).Int64() prec = int(ts.prec) } case *evalBytes: if ts.isHexOrBitLiteral() { u, _ := ts.toNumericHex() + if u.u >= maxUnixtime { + return nil, nil + } sec = int64(u.u) } else { f, _ := evalToFloat(ts) + if f.f < 0 || f.f >= maxUnixtime { + return nil, nil + } sf, ff := math.Modf(f.f) sec = int64(sf) frac = int64(ff * 1e9) - prec = 6 + prec = maxTimePrec } default: f, _ := evalToFloat(ts) + if f.f < 0 || f.f >= maxUnixtime { + return nil, nil + } sf, ff := math.Modf(f.f) sec = int64(sf) frac = int64(ff * 1e9) - prec = 6 - } - - if sec < 0 || sec >= maxUnixtime { - return nil, nil + prec = maxTimePrec } t := time.Unix(sec, frac) @@ -608,7 +691,7 @@ func (b *builtinFromUnixtime) eval(env *ExpressionEnv) (eval, error) { t = t.In(tz) } - dt := newEvalDateTime(datetime.NewDateTimeFromStd(t), prec) + dt := newEvalDateTime(datetime.NewDateTimeFromStd(t), prec, env.sqlmode.AllowZeroDate()) if len(b.Arguments) == 1 { return dt, nil @@ -633,34 +716,45 @@ func (call *builtinFromUnixtime) compile(c *compiler) (ctype, error) { } skip1 := c.compileNullCheck1(arg) + var prec int32 switch arg.Type { case sqltypes.Int64: c.asm.Fn_FROM_UNIXTIME_i() case sqltypes.Uint64: c.asm.Fn_FROM_UNIXTIME_u() case sqltypes.Float64: + prec = maxTimePrec c.asm.Fn_FROM_UNIXTIME_f() case sqltypes.Decimal: + prec = arg.Size c.asm.Fn_FROM_UNIXTIME_d() case sqltypes.Datetime, sqltypes.Date, sqltypes.Time: - c.asm.Convert_Ti(1) - c.asm.Fn_FROM_UNIXTIME_i() + prec = arg.Size + if prec == 0 { + c.asm.Convert_Ti(1) + c.asm.Fn_FROM_UNIXTIME_i() + } else { + c.asm.Convert_Td(1) + c.asm.Fn_FROM_UNIXTIME_d() + } case sqltypes.VarChar, sqltypes.VarBinary: if arg.isHexOrBitLiteral() { c.asm.Convert_xu(1) c.asm.Fn_FROM_UNIXTIME_u() } else { + prec = maxTimePrec c.asm.Convert_xf(1) c.asm.Fn_FROM_UNIXTIME_f() } default: + prec = maxTimePrec c.asm.Convert_xf(1) c.asm.Fn_FROM_UNIXTIME_f() } if len(call.Arguments) == 1 { c.asm.jumpDestination(skip1) - return ctype{Type: sqltypes.Datetime, Col: collationBinary, Flag: arg.Flag | flagNullable}, nil + return ctype{Type: sqltypes.Datetime, Col: collationBinary, Flag: arg.Flag | flagNullable, Size: prec}, nil } format, err := call.Arguments[1].compile(c) @@ -673,7 +767,7 @@ func (call *builtinFromUnixtime) compile(c *compiler) (ctype, error) { switch format.Type { case sqltypes.VarChar, sqltypes.VarBinary: default: - c.asm.Convert_xb(1, sqltypes.VarBinary, 0, false) + c.asm.Convert_xb(1, sqltypes.VarBinary, nil) } col := typedCoercionCollation(sqltypes.VarChar, c.collation) @@ -715,7 +809,7 @@ func (call *builtinHour) compile(c *compiler) (ctype, error) { return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil } -func yearDayToTime(y, yd int64) time.Time { +func yearDayToTime(loc *time.Location, y, yd int64) time.Time { if y >= 0 && y < 100 { if y < 70 { y += 2000 @@ -727,7 +821,7 @@ func yearDayToTime(y, yd int64) time.Time { if y < 0 || y > 9999 || yd < 1 || yd > math.MaxInt32 { return time.Time{} } - t := time.Date(int(y), time.January, 1, 0, 0, 0, 0, time.Local).AddDate(0, 0, int(yd-1)) + t := time.Date(int(y), time.January, 1, 0, 0, 0, 0, loc).AddDate(0, 0, int(yd-1)) if t.Year() > 9999 { return time.Time{} } @@ -755,11 +849,11 @@ func (b *builtinMakedate) eval(env *ExpressionEnv) (eval, error) { y := evalToInt64(year).i yd := evalToInt64(yearDay).i - t := yearDayToTime(y, yd) + t := yearDayToTime(env.currentTimezone(), y, yd) if t.IsZero() { return nil, nil } - return newEvalDate(datetime.NewDateTimeFromStd(t).Date), nil + return newEvalDate(datetime.NewDateTimeFromStd(t).Date, false), nil } func (call *builtinMakedate) compile(c *compiler) (ctype, error) { @@ -797,12 +891,12 @@ func (call *builtinMakedate) compile(c *compiler) (ctype, error) { func clampHourMinute(h, m int64) (int64, int64, bool, bool) { var clamped bool - if h > 838 || h < -838 { + if h > datetime.MaxHours || h < -datetime.MaxHours { clamped = true if h > 0 { - h = 838 + h = datetime.MaxHours } else { - h = -838 + h = -datetime.MaxHours } m = 59 } @@ -915,7 +1009,7 @@ func (b *builtinMaketime) eval(env *ExpressionEnv) (eval, error) { } m := evalToInt64(min).i - s := evalToNumeric(sec, false) + s := evalToNumeric(sec, true) var ok bool var t datetime.Time @@ -1017,6 +1111,8 @@ func (call *builtinMaketime) compile(c *compiler) (ctype, error) { c.asm.Convert_xf(1) c.asm.Fn_MAKETIME_f() } + case sqltypes.Datetime, sqltypes.Date, sqltypes.Timestamp, sqltypes.Time: + c.asm.Fn_MAKETIME_D() default: c.asm.Convert_xf(1) c.asm.Fn_MAKETIME_f() @@ -1100,7 +1196,7 @@ func (b *builtinMonth) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, true) if d == nil { return nil, nil } @@ -1118,7 +1214,7 @@ func (call *builtinMonth) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, true) } c.asm.Fn_MONTH() c.asm.jumpDestination(skip) @@ -1133,7 +1229,7 @@ func (b *builtinMonthName) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, false) if d == nil { return nil, nil } @@ -1156,7 +1252,7 @@ func (call *builtinMonthName) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, false) } col := typedCoercionCollation(sqltypes.VarChar, call.collate) c.asm.Fn_MONTHNAME(col) @@ -1164,6 +1260,257 @@ func (call *builtinMonthName) compile(c *compiler) (ctype, error) { return ctype{Type: sqltypes.VarChar, Col: col, Flag: arg.Flag | flagNullable}, nil } +func lastDay(loc *time.Location, dt datetime.DateTime) datetime.Date { + ts := dt.Date.ToStdTime(loc) + firstDayOfMonth := time.Date(ts.Year(), ts.Month(), 1, 0, 0, 0, 0, loc) + lastDayOfMonth := firstDayOfMonth.AddDate(0, 1, -1) + + date := datetime.NewDateFromStd(lastDayOfMonth) + return date +} + +func (b *builtinLastDay) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + dt := evalToDateTime(date, -1, env.now, env.sqlmode.AllowZeroDate()) + if dt == nil || dt.isZero() { + return nil, nil + } + + d := lastDay(env.currentTimezone(), dt.dt) + return newEvalDate(d, env.sqlmode.AllowZeroDate()), nil +} + +func (call *builtinLastDay) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD(1, c.sqlmode.AllowZeroDate()) + } + c.asm.Fn_LAST_DAY() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Date, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinToDays) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + dt := evalToDate(date, env.now, false) + if dt == nil { + return nil, nil + } + + numDays := datetime.MysqlDayNumber(dt.dt.Date.Year(), dt.dt.Date.Month(), dt.dt.Date.Day()) + return newEvalInt64(int64(numDays)), nil +} + +func (call *builtinToDays) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xD(1, false) + } + c.asm.Fn_TO_DAYS() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinFromDays) eval(env *ExpressionEnv) (eval, error) { + arg, err := b.arg1(env) + if arg == nil { + return nil, nil + } + if err != nil { + return nil, err + } + + d := datetime.DateFromDayNumber(int(evalToInt64(arg).i)) + + // mysql returns NULL if year is greater than 9999 + if d.Year() > 9999 { + return nil, nil + } + return newEvalDate(d, true), nil +} + +func (call *builtinFromDays) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Int64: + default: + c.asm.Convert_xi(1) + } + + c.asm.Fn_FROM_DAYS() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Date, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinSecToTime) eval(env *ExpressionEnv) (eval, error) { + arg, err := b.arg1(env) + if arg == nil { + return nil, nil + } + if err != nil { + return nil, err + } + + var e *evalDecimal + prec := datetime.DefaultPrecision + + switch { + case sqltypes.IsDecimal(arg.SQLType()): + e = arg.(*evalDecimal) + case sqltypes.IsIntegral(arg.SQLType()): + e = evalToDecimal(arg, 0, 0) + case sqltypes.IsTextOrBinary(arg.SQLType()): + b := arg.(*evalBytes) + if b.isHexOrBitLiteral() { + e = evalToDecimal(arg, 0, 0) + } else { + e = evalToDecimal(arg, 0, datetime.DefaultPrecision) + } + case sqltypes.IsDateOrTime(arg.SQLType()): + d := arg.(*evalTemporal) + e = evalToDecimal(d, 0, int32(d.prec)) + prec = int(d.prec) + default: + e = evalToDecimal(arg, 0, datetime.DefaultPrecision) + } + + prec = min(int(evalDecimalPrecision(e)), prec) + return newEvalTime(datetime.NewTimeFromSeconds(e.dec), prec), nil +} + +func (call *builtinSecToTime) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch { + case sqltypes.IsDecimal(arg.Type): + c.asm.Fn_SEC_TO_TIME_d() + case sqltypes.IsIntegral(arg.Type): + c.asm.Convert_xd(1, 0, 0) + c.asm.Fn_SEC_TO_TIME_d() + case sqltypes.IsTextOrBinary(arg.Type) && arg.isHexOrBitLiteral(): + c.asm.Convert_xd(1, 0, 0) + c.asm.Fn_SEC_TO_TIME_d() + case sqltypes.IsDateOrTime(arg.Type): + c.asm.Fn_SEC_TO_TIME_D() + default: + c.asm.Convert_xd(1, 0, datetime.DefaultPrecision) + c.asm.Fn_SEC_TO_TIME_d() + } + + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Time, Flag: arg.Flag}, nil +} + +func (b *builtinTimeToSec) eval(env *ExpressionEnv) (eval, error) { + arg, err := b.arg1(env) + if arg == nil { + return nil, nil + } + if err != nil { + return nil, err + } + + d := evalToTime(arg, -1) + if d == nil { + return nil, nil + } + + sec := d.dt.Time.ToSeconds() + return newEvalInt64(sec), nil +} + +func (call *builtinTimeToSec) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime, sqltypes.Time: + default: + c.asm.Convert_xT(1, -1) + } + + c.asm.Fn_TIME_TO_SEC() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + +func (b *builtinToSeconds) eval(env *ExpressionEnv) (eval, error) { + date, err := b.arg1(env) + if err != nil { + return nil, err + } + if date == nil { + return nil, nil + } + dt := evalToDateTime(date, -1, env.now, false) + if dt == nil { + return nil, nil + } + + return newEvalInt64(dt.dt.ToSeconds()), nil +} + +func (call *builtinToSeconds) compile(c *compiler) (ctype, error) { + arg, err := call.Arguments[0].compile(c) + if err != nil { + return ctype{}, err + } + + skip := c.compileNullCheck1(arg) + + switch arg.Type { + case sqltypes.Date, sqltypes.Datetime: + default: + c.asm.Convert_xDT(1, -1, false) + } + c.asm.Fn_TO_SECONDS() + c.asm.jumpDestination(skip) + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag | flagNullable}, nil +} + func (b *builtinQuarter) eval(env *ExpressionEnv) (eval, error) { date, err := b.arg1(env) if err != nil { @@ -1172,7 +1519,7 @@ func (b *builtinQuarter) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, true) if d == nil { return nil, nil } @@ -1190,7 +1537,7 @@ func (call *builtinQuarter) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, true) } c.asm.Fn_QUARTER() c.asm.jumpDestination(skip) @@ -1253,13 +1600,28 @@ func (call *builtinTime) compile(c *compiler) (ctype, error) { skip := c.compileNullCheck1(arg) + var prec int32 switch arg.Type { case sqltypes.Time: + case sqltypes.Datetime, sqltypes.Date: + prec = arg.Size + c.asm.Convert_xT(1, -1) + case sqltypes.Decimal: + prec = arg.Scale + c.asm.Convert_xT(1, -1) + case sqltypes.VarChar, sqltypes.VarBinary: + if lit, ok := call.Arguments[0].(*Literal); ok && !arg.isHexOrBitLiteral() { + if t := evalToTime(lit.inner, -1); t != nil { + prec = int32(t.prec) + } + } + c.asm.Convert_xT(1, -1) default: + prec = maxTimePrec c.asm.Convert_xT(1, -1) } c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Time, Col: collationBinary, Flag: arg.Flag | flagNullable}, nil + return ctype{Type: sqltypes.Time, Col: collationBinary, Flag: arg.Flag | flagNullable, Size: prec}, nil } func dateTimeUnixTimestamp(env *ExpressionEnv, date eval) evalNumeric { @@ -1268,8 +1630,8 @@ func dateTimeUnixTimestamp(env *ExpressionEnv, date eval) evalNumeric { case *evalTemporal: dt = e.toDateTime(int(e.prec), env.now) default: - dt = evalToDateTime(date, -1, env.now) - if dt == nil || dt.isZero() { + dt = evalToDateTime(date, -1, env.now, false) + if dt == nil { var prec int32 switch d := date.(type) { case *evalInt64, *evalUint64: @@ -1280,15 +1642,19 @@ func dateTimeUnixTimestamp(env *ExpressionEnv, date eval) evalNumeric { if d.isHexOrBitLiteral() { return newEvalInt64(0) } - prec = 6 + prec = maxTimePrec default: - prec = 6 + prec = maxTimePrec } return newEvalDecimalWithPrec(decimal.Zero, prec) } } ts := dt.dt.ToStdTime(env.now) + if ts.Unix() < 0 || ts.Unix() >= maxUnixtime { + return newEvalInt64(0) + } + if dt.prec == 0 { return newEvalInt64(ts.Unix()) } @@ -1336,7 +1702,30 @@ func (call *builtinUnixTimestamp) compile(c *compiler) (ctype, error) { c.asm.Fn_UNIX_TIMESTAMP1() c.asm.jumpDestination(skip) - return ctype{Type: sqltypes.Int64, Col: collationBinary, Flag: arg.Flag | flagAmbiguousType}, nil + switch arg.Type { + case sqltypes.Datetime, sqltypes.Time, sqltypes.Decimal: + if arg.Size == 0 { + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag}, nil + } + return ctype{Type: sqltypes.Decimal, Size: decimalSizeBase + arg.Size, Scale: arg.Size, Col: collationNumeric, Flag: arg.Flag}, nil + case sqltypes.Date, sqltypes.Int64, sqltypes.Uint64: + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag}, nil + case sqltypes.VarChar, sqltypes.VarBinary: + if arg.isHexOrBitLiteral() { + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag}, nil + } + if lit, ok := call.Arguments[0].(*Literal); ok { + if dt := evalToDateTime(lit.inner, -1, time.Now(), c.sqlmode.AllowZeroDate()); dt != nil { + if dt.prec == 0 { + return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: arg.Flag}, nil + } + return ctype{Type: sqltypes.Decimal, Size: decimalSizeBase + int32(dt.prec), Scale: int32(dt.prec), Col: collationNumeric, Flag: arg.Flag}, nil + } + } + fallthrough + default: + return ctype{Type: sqltypes.Decimal, Size: decimalSizeBase + maxTimePrec, Scale: maxTimePrec, Col: collationNumeric, Flag: arg.Flag}, nil + } } func (b *builtinWeek) eval(env *ExpressionEnv) (eval, error) { @@ -1348,8 +1737,8 @@ func (b *builtinWeek) eval(env *ExpressionEnv) (eval, error) { return nil, nil } - d := evalToDate(date, env.now) - if d == nil || d.isZero() { + d := evalToDate(date, env.now, false) + if d == nil { return nil, nil } @@ -1381,7 +1770,7 @@ func (call *builtinWeek) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } if len(call.Arguments) == 1 { @@ -1408,8 +1797,8 @@ func (b *builtinWeekDay) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) - if d == nil || d.isZero() { + d := evalToDate(date, env.now, false) + if d == nil { return nil, nil } return newEvalInt64(int64(d.dt.Date.Weekday()+6) % 7), nil @@ -1426,7 +1815,7 @@ func (call *builtinWeekDay) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } c.asm.Fn_WEEKDAY() @@ -1442,8 +1831,8 @@ func (b *builtinWeekOfYear) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) - if d == nil || d.isZero() { + d := evalToDate(date, env.now, false) + if d == nil { return nil, nil } @@ -1462,7 +1851,7 @@ func (call *builtinWeekOfYear) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } c.asm.Fn_WEEKOFYEAR() @@ -1478,7 +1867,7 @@ func (b *builtinYear) eval(env *ExpressionEnv) (eval, error) { if date == nil { return nil, nil } - d := evalToDate(date, env.now) + d := evalToDate(date, env.now, true) if d == nil { return nil, nil } @@ -1497,7 +1886,7 @@ func (call *builtinYear) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD(1) + c.asm.Convert_xD(1, true) } c.asm.Fn_YEAR() @@ -1514,8 +1903,8 @@ func (b *builtinYearWeek) eval(env *ExpressionEnv) (eval, error) { return nil, nil } - d := evalToDate(date, env.now) - if d == nil || d.isZero() { + d := evalToDate(date, env.now, false) + if d == nil { return nil, nil } @@ -1547,7 +1936,7 @@ func (call *builtinYearWeek) compile(c *compiler) (ctype, error) { switch arg.Type { case sqltypes.Date, sqltypes.Datetime: default: - c.asm.Convert_xD_nz(1) + c.asm.Convert_xD(1, false) } if len(call.Arguments) == 1 { @@ -1599,7 +1988,7 @@ func (call *builtinDateMath) eval(env *ExpressionEnv) (eval, error) { return tmp.addInterval(interval, collations.Unknown, env.now), nil } - if tmp := evalToTemporal(date); tmp != nil { + if tmp := evalToTemporal(date, env.sqlmode.AllowZeroDate()); tmp != nil { return tmp.addInterval(interval, call.collate, env.now), nil } diff --git a/go/vt/vtgate/evalengine/format.go b/go/vt/vtgate/evalengine/format.go index db473fd418e..7576a8b0cda 100644 --- a/go/vt/vtgate/evalengine/format.go +++ b/go/vt/vtgate/evalengine/format.go @@ -152,6 +152,18 @@ func (bv *BindVariable) format(buf *sqlparser.TrackedBuffer) { } } +func (bv *TupleBindVariable) Format(buf *sqlparser.TrackedBuffer) { + bv.format(buf) +} + +func (bv *TupleBindVariable) FormatFast(buf *sqlparser.TrackedBuffer) { + bv.format(buf) +} + +func (bv *TupleBindVariable) format(buf *sqlparser.TrackedBuffer) { + buf.WriteString(fmt.Sprintf("%s:%d", bv.Key, bv.Index)) +} + func (c *Column) Format(buf *sqlparser.TrackedBuffer) { c.format(buf) } @@ -206,12 +218,12 @@ func (tuple TupleExpr) format(buf *sqlparser.TrackedBuffer) { func (c *CollateExpr) format(buf *sqlparser.TrackedBuffer) { formatExpr(buf, c, c.Inner, true) buf.WriteLiteral(" COLLATE ") - buf.WriteString(collations.Local().LookupName(c.TypedCollation.Collation)) + buf.WriteString(c.CollationEnv.LookupName(c.TypedCollation.Collation)) } func (i *IntroducerExpr) format(buf *sqlparser.TrackedBuffer) { buf.WriteString("_") - buf.WriteString(collations.Local().LookupName(i.TypedCollation.Collation)) + buf.WriteString(i.CollationEnv.LookupName(i.TypedCollation.Collation)) formatExpr(buf, i, i.Inner, true) } @@ -261,7 +273,7 @@ func (c *builtinWeightString) format(buf *sqlparser.TrackedBuffer) { if c.Cast != "" { buf.WriteLiteral(" as ") buf.WriteLiteral(c.Cast) - _, _ = fmt.Fprintf(buf, "(%d)", c.Len) + _, _ = fmt.Fprintf(buf, "(%d)", *c.Len) } buf.WriteByte(')') } @@ -285,16 +297,16 @@ func (c *ConvertExpr) format(buf *sqlparser.TrackedBuffer) { formatExpr(buf, c, c.Inner, true) switch { - case c.HasLength && c.HasScale: - _, _ = fmt.Fprintf(buf, ", %s(%d,%d)", c.Type, c.Length, c.Scale) - case c.HasLength: - _, _ = fmt.Fprintf(buf, ", %s(%d)", c.Type, c.Length) + case c.Length != nil && c.Scale != nil: + _, _ = fmt.Fprintf(buf, ", %s(%d,%d)", c.Type, *c.Length, *c.Scale) + case c.Length != nil: + _, _ = fmt.Fprintf(buf, ", %s(%d)", c.Type, *c.Length) default: _, _ = fmt.Fprintf(buf, ", %s", c.Type) } if c.Collation != collations.Unknown { buf.WriteLiteral(" character set ") - buf.WriteString(collations.Local().LookupName(c.Collation)) + buf.WriteString(c.CollationEnv.LookupName(c.Collation)) } buf.WriteByte(')') } @@ -303,7 +315,7 @@ func (c *ConvertUsingExpr) format(buf *sqlparser.TrackedBuffer) { buf.WriteLiteral("convert(") formatExpr(buf, c, c.Inner, true) buf.WriteLiteral(" using ") - buf.WriteString(collations.Local().LookupName(c.Collation)) + buf.WriteString(c.CollationEnv.LookupName(c.Collation)) buf.WriteByte(')') } diff --git a/go/vt/vtgate/evalengine/integration/comparison_test.go b/go/vt/vtgate/evalengine/integration/comparison_test.go index 649dc7b5583..ea327601975 100644 --- a/go/vt/vtgate/evalengine/integration/comparison_test.go +++ b/go/vt/vtgate/evalengine/integration/comparison_test.go @@ -28,9 +28,11 @@ import ( "time" "github.com/spf13/pflag" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/format" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" @@ -38,13 +40,12 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/evalengine/testcases" ) var ( - collationEnv *collations.Environment - debugGolden = false debugNormalize = true debugSimplify = time.Now().UnixNano()&1 != 0 @@ -81,7 +82,7 @@ func normalizeValue(v sqltypes.Value, coll collations.ID) sqltypes.Value { return v } -func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mysql.Conn, expr string, fields []*querypb.Field, cmp *testcases.Comparison) { +func compareRemoteExprEnv(t *testing.T, collationEnv *collations.Environment, env *evalengine.ExpressionEnv, conn *mysql.Conn, expr string, fields []*querypb.Field, cmp *testcases.Comparison) { t.Helper() localQuery := "SELECT " + expr @@ -144,7 +145,7 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys var localVal, remoteVal sqltypes.Value var localCollation, remoteCollation collations.ID if localErr == nil { - v := local.Value(collations.Default()) + v := local.Value(collations.MySQL8().DefaultConnectionCharset()) if debugCheckCollations { if v.IsNull() { localCollation = collations.CollationBinaryID @@ -205,6 +206,7 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys var seenGoldenTests []GoldenTest type vcursor struct { + env *vtenv.Environment } func (vc *vcursor) GetKeyspace() string { @@ -215,6 +217,14 @@ func (vc *vcursor) TimeZone() *time.Location { return time.Local } +func (vc *vcursor) SQLMode() string { + return config.DefaultSQLMode +} + +func (vc *vcursor) Environment() *vtenv.Environment { + return vc.env +} + func initTimezoneData(t *testing.T, conn *mysql.Conn) { // We load the timezone information into MySQL. The evalengine assumes // our backend MySQL is configured with the timezone information as well @@ -247,20 +257,23 @@ func TestMySQL(t *testing.T) { // We require MySQL 8.0 collations for the comparisons in the tests - servenv.SetMySQLServerVersionForTest(conn.ServerVersion) - collationEnv = collations.NewEnvironment(conn.ServerVersion) + collationEnv := collations.NewEnvironment(conn.ServerVersion) servenv.OnParse(registerFlags) initTimezoneData(t, conn) + venv, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: conn.ServerVersion, + }) + require.NoError(t, err) for _, tc := range testcases.Cases { t.Run(tc.Name(), func(t *testing.T) { ctx := callerid.NewContext(context.Background(), &vtrpc.CallerID{Principal: "testuser"}, &querypb.VTGateCallerID{ Username: "vt_dba", }) - env := evalengine.NewExpressionEnv(ctx, nil, &vcursor{}) + env := evalengine.NewExpressionEnv(ctx, nil, &vcursor{env: venv}) tc.Run(func(query string, row []sqltypes.Value) { env.Row = row - compareRemoteExprEnv(t, env, conn, query, tc.Schema, tc.Compare) + compareRemoteExprEnv(t, collationEnv, env, conn, query, tc.Schema, tc.Compare) }) }) } diff --git a/go/vt/vtgate/evalengine/integration/fuzz_test.go b/go/vt/vtgate/evalengine/integration/fuzz_test.go index 657fbcb7c68..17a721edde9 100644 --- a/go/vt/vtgate/evalengine/integration/fuzz_test.go +++ b/go/vt/vtgate/evalengine/integration/fuzz_test.go @@ -19,10 +19,9 @@ limitations under the License. package integration import ( - "context" "encoding/json" "fmt" - "math/rand" + "math/rand/v2" "os" "regexp" "strings" @@ -35,6 +34,7 @@ import ( "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/evalengine/testcases" "vitess.io/vitess/go/vt/vtgate/simplifier" @@ -42,7 +42,6 @@ import ( type ( gencase struct { - rand *rand.Rand ratioTuple int ratioSubexpr int tupleLen int @@ -62,24 +61,24 @@ var rhsOfIs = []string{ } func (g *gencase) arg(tuple bool) string { - if tuple || g.rand.Intn(g.ratioTuple) == 0 { + if tuple || rand.IntN(g.ratioTuple) == 0 { var exprs []string for i := 0; i < g.tupleLen; i++ { exprs = append(exprs, g.arg(false)) } return fmt.Sprintf("(%s)", strings.Join(exprs, ", ")) } - if g.rand.Intn(g.ratioSubexpr) == 0 { + if rand.IntN(g.ratioSubexpr) == 0 { return fmt.Sprintf("(%s)", g.expr()) } - return g.primitives[g.rand.Intn(len(g.primitives))] + return g.primitives[rand.IntN(len(g.primitives))] } func (g *gencase) expr() string { - op := g.operators[g.rand.Intn(len(g.operators))] + op := g.operators[rand.IntN(len(g.operators))] rhs := g.arg(op == "IN" || op == "NOT IN") if op == "IS" { - rhs = rhsOfIs[g.rand.Intn(len(rhsOfIs))] + rhs = rhsOfIs[rand.IntN(len(rhsOfIs))] } return fmt.Sprintf("%s %s %s", g.arg(false), op, rhs) } @@ -132,7 +131,7 @@ func errorsMatch(remote, local error) bool { } func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string, fields []*querypb.Field) (evalengine.EvalResult, error) { - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { return evalengine.EvalResult{}, err } @@ -147,6 +146,7 @@ func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string, fields cfg := &evalengine.Config{ ResolveColumn: evalengine.FieldResolver(fields).Column, Collation: collations.CollationUtf8mb4ID, + Environment: env.VCursor().Environment(), NoConstantFolding: !debugSimplify, } expr, err = evalengine.Translate(astExpr, cfg) @@ -182,7 +182,6 @@ func TestGenerateFuzzCases(t *testing.T) { t.Skipf("skipping fuzz test generation") } var gen = gencase{ - rand: rand.New(rand.NewSource(fuzzSeed)), ratioTuple: 8, ratioSubexpr: 8, tupleLen: 4, @@ -197,10 +196,11 @@ func TestGenerateFuzzCases(t *testing.T) { var conn = mysqlconn(t) defer conn.Close() + venv := vtenv.NewTestEnv() compareWithMySQL := func(expr sqlparser.Expr) *mismatch { query := "SELECT " + sqlparser.String(expr) - env := evalengine.NewExpressionEnv(context.Background(), nil, nil) + env := evalengine.EmptyExpressionEnv(venv) eval, localErr := evaluateLocalEvalengine(env, query, nil) remote, remoteErr := conn.ExecuteFetch(query, 1, false) @@ -218,7 +218,7 @@ func TestGenerateFuzzCases(t *testing.T) { remoteErr: remoteErr, } if localErr == nil { - res.localVal = eval.Value(collations.Default()) + res.localVal = eval.Value(collations.MySQL8().DefaultConnectionCharset()) } if remoteErr == nil { res.remoteVal = remote.Rows[0][0] @@ -233,7 +233,7 @@ func TestGenerateFuzzCases(t *testing.T) { var start = time.Now() for len(failures) < fuzzMaxFailures { query := "SELECT " + gen.expr() - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { t.Fatal(err) } @@ -333,7 +333,7 @@ func compareResult(local, remote Result, cmp *testcases.Comparison) error { var localCollationName string var remoteCollationName string - env := collations.Local() + env := collations.MySQL8() if coll := local.Collation; coll != collations.Unknown { localCollationName = env.LookupName(coll) } diff --git a/go/vt/vtgate/evalengine/mysql_test.go b/go/vt/vtgate/evalengine/mysql_test.go index bfa503d82dd..6434d7dcfbf 100644 --- a/go/vt/vtgate/evalengine/mysql_test.go +++ b/go/vt/vtgate/evalengine/mysql_test.go @@ -17,7 +17,6 @@ limitations under the License. package evalengine import ( - "context" "encoding/json" "errors" "os" @@ -28,6 +27,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" ) func internalExpression(e Expr) IR { @@ -63,13 +63,14 @@ func knownBadQuery(e Expr) bool { var errKnownBadQuery = errors.New("this query is known to give bad results in MySQL") func convert(t *testing.T, query string, simplify bool) (Expr, error) { - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { t.Fatalf("failed to parse '%s': %v", query, err) } cfg := &Config{ Collation: collations.CollationUtf8mb4ID, + Environment: vtenv.NewTestEnv(), NoConstantFolding: !simplify, } @@ -89,7 +90,7 @@ func testSingle(t *testing.T, query string) (EvalResult, error) { if err != nil { return EvalResult{}, err } - return NewExpressionEnv(context.Background(), nil, nil).Evaluate(converted) + return EmptyExpressionEnv(vtenv.NewTestEnv()).Evaluate(converted) } func TestMySQLGolden(t *testing.T) { @@ -141,11 +142,11 @@ func TestMySQLGolden(t *testing.T) { continue } if tc.Error != "" { - t.Errorf("query %d: %s\nmysql err: %s\nvitess val: %s", testcount, tc.Query, tc.Error, eval.Value(collations.Default())) + t.Errorf("query %d: %s\nmysql err: %s\nvitess val: %s", testcount, tc.Query, tc.Error, eval.Value(collations.MySQL8().DefaultConnectionCharset())) continue } if eval.String() != tc.Value { - t.Errorf("query %d: %s\nmysql val: %s\nvitess val: %s", testcount, tc.Query, tc.Value, eval.Value(collations.Default())) + t.Errorf("query %d: %s\nmysql val: %s\nvitess val: %s", testcount, tc.Query, tc.Value, eval.Value(collations.MySQL8().DefaultConnectionCharset())) continue } ok++ @@ -159,5 +160,5 @@ func TestMySQLGolden(t *testing.T) { func TestDebug1(t *testing.T) { // Debug eval, err := testSingle(t, `SELECT _latin1 0xFF regexp _latin1 '[[:lower:]]' COLLATE latin1_bin`) - t.Logf("eval=%s err=%v coll=%s", eval.String(), err, collations.Local().LookupName(eval.Collation())) + t.Logf("eval=%s err=%v coll=%s", eval.String(), err, collations.MySQL8().LookupName(eval.Collation())) } diff --git a/go/vt/vtgate/evalengine/perf_test.go b/go/vt/vtgate/evalengine/perf_test.go index 10974cd313d..f3e0b32de08 100644 --- a/go/vt/vtgate/evalengine/perf_test.go +++ b/go/vt/vtgate/evalengine/perf_test.go @@ -5,7 +5,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -22,8 +22,9 @@ func BenchmarkCompilerExpressions(b *testing.B) { {"comparison_f", "column0 = 12", []sqltypes.Value{sqltypes.NewFloat64(420.0)}}, } + venv := vtenv.NewTestEnv() for _, tc := range testCases { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := venv.Parser().ParseExpr(tc.expression) if err != nil { b.Fatal(err) } @@ -33,6 +34,7 @@ func BenchmarkCompilerExpressions(b *testing.B) { ResolveColumn: fields.Column, ResolveType: fields.Type, Collation: collations.CollationUtf8mb4ID, + Environment: venv, } translated, err := evalengine.Translate(expr, cfg) diff --git a/go/vt/vtgate/evalengine/testcases/cases.go b/go/vt/vtgate/evalengine/testcases/cases.go index cd52631c00c..c937e7dbbc0 100644 --- a/go/vt/vtgate/evalengine/testcases/cases.go +++ b/go/vt/vtgate/evalengine/testcases/cases.go @@ -63,12 +63,17 @@ var Cases = []TestCase{ {Run: TupleComparisons}, {Run: Comparisons}, {Run: InStatement}, + {Run: FnField}, + {Run: FnElt}, + {Run: FnInsert}, {Run: FnLower}, {Run: FnUpper}, {Run: FnCharLength}, {Run: FnLength}, {Run: FnBitLength}, {Run: FnAscii}, + {Run: FnReverse}, + {Run: FnSpace}, {Run: FnOrd}, {Run: FnRepeat}, {Run: FnLeft}, @@ -78,8 +83,12 @@ var Cases = []TestCase{ {Run: FnLTrim}, {Run: FnRTrim}, {Run: FnTrim}, + {Run: FnSubstr}, + {Run: FnLocate}, + {Run: FnReplace}, {Run: FnConcat}, {Run: FnConcatWs}, + {Run: FnChar}, {Run: FnHex}, {Run: FnUnhex}, {Run: FnCeil}, @@ -111,6 +120,8 @@ var Cases = []TestCase{ {Run: FnTruncate}, {Run: FnCrc32}, {Run: FnConv}, + {Run: FnBin}, + {Run: FnOct}, {Run: FnMD5}, {Run: FnSHA1}, {Run: FnSHA2}, @@ -129,6 +140,12 @@ var Cases = []TestCase{ {Run: FnMinute}, {Run: FnMonth}, {Run: FnMonthName}, + {Run: FnLastDay}, + {Run: FnToDays}, + {Run: FnFromDays}, + {Run: FnSecToTime}, + {Run: FnTimeToSec}, + {Run: FnToSeconds}, {Run: FnQuarter}, {Run: FnSecond}, {Run: FnTime}, @@ -655,6 +672,24 @@ func FnConv(yield Query) { } } +func FnBin(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("BIN(%s)", num), nil) + } + for _, num := range inputBitwise { + yield(fmt.Sprintf("BIN(%s)", num), nil) + } +} + +func FnOct(yield Query) { + for _, num := range radianInputs { + yield(fmt.Sprintf("OCT(%s)", num), nil) + } + for _, num := range inputBitwise { + yield(fmt.Sprintf("OCT(%s)", num), nil) + } +} + func FnMD5(yield Query) { for _, num := range radianInputs { yield(fmt.Sprintf("MD5(%s)", num), nil) @@ -1307,6 +1342,112 @@ var JSONExtract_Schema = []*querypb.Field{ }, } +func FnField(yield Query) { + for _, s1 := range inputStrings { + for _, s2 := range inputStrings { + for _, s3 := range inputStrings { + yield(fmt.Sprintf("FIELD(%s, %s, %s)", s1, s2, s3), nil) + } + } + } + + for _, s1 := range radianInputs { + for _, s2 := range radianInputs { + for _, s3 := range radianInputs { + yield(fmt.Sprintf("FIELD(%s, %s, %s)", s1, s2, s3), nil) + } + } + } + + // Contains failing testcases + for _, s1 := range inputStrings { + for _, s2 := range radianInputs { + for _, s3 := range inputStrings { + yield(fmt.Sprintf("FIELD(%s, %s, %s)", s1, s2, s3), nil) + } + } + } + + // Contains failing testcases + for _, s1 := range inputBitwise { + for _, s2 := range inputBitwise { + for _, s3 := range inputBitwise { + yield(fmt.Sprintf("FIELD(%s, %s, %s)", s1, s2, s3), nil) + } + } + } + + mysqlDocSamples := []string{ + "FIELD('Bb', 'Aa', 'Bb', 'Cc', 'Dd', 'Ff')", + "FIELD('Gg', 'Aa', 'Bb', 'Cc', 'Dd', 'Ff')", + } + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + +func FnElt(yield Query) { + for _, s1 := range inputStrings { + for _, n := range inputBitwise { + yield(fmt.Sprintf("ELT(%s, %s)", n, s1), nil) + } + } + + for _, s1 := range inputStrings { + for _, s2 := range inputStrings { + for _, n := range inputBitwise { + yield(fmt.Sprintf("ELT(%s, %s, %s)", n, s1, s2), nil) + } + } + } + + validIndex := []string{ + "1", + "2", + "3", + } + for _, s1 := range inputStrings { + for _, s2 := range inputStrings { + for _, s3 := range inputStrings { + for _, n := range validIndex { + yield(fmt.Sprintf("ELT(%s, %s, %s, %s)", n, s1, s2, s3), nil) + } + } + } + } + + mysqlDocSamples := []string{ + "ELT(1, 'Aa', 'Bb', 'Cc', 'Dd')", + "ELT(4, 'Aa', 'Bb', 'Cc', 'Dd')", + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + +func FnInsert(yield Query) { + for _, s := range insertStrings { + for _, ns := range insertStrings { + for _, l := range inputBitwise { + for _, p := range inputBitwise { + yield(fmt.Sprintf("INSERT(%s, %s, %s, %s)", s, p, l, ns), nil) + } + } + } + } + + mysqlDocSamples := []string{ + "INSERT('Quadratic', 3, 4, 'What')", + "INSERT('Quadratic', -1, 4, 'What')", + "INSERT('Quadratic', 3, 100, 'What')", + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + func FnLower(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("LOWER(%s)", str), nil) @@ -1347,6 +1488,34 @@ func FnAscii(yield Query) { } } +func FnReverse(yield Query) { + for _, str := range inputStrings { + yield(fmt.Sprintf("REVERSE(%s)", str), nil) + } +} + +func FnSpace(yield Query) { + counts := []string{ + "0", + "12", + "23", + "-1", + "-12393128120", + "-432766734237843674326423876243876234786", + "'-432766734237843674326423876243876234786'", + "432766734237843674326423876243876234786", + "1073741825", + "1.5", + "-3.2", + "'jhgjhg'", + "6", + } + + for _, c := range counts { + yield(fmt.Sprintf("SPACE(%s)", c), nil) + } +} + func FnOrd(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("ORD(%s)", str), nil) @@ -1436,6 +1605,94 @@ func FnTrim(yield Query) { } } +func FnSubstr(yield Query) { + mysqlDocSamples := []string{ + `SUBSTRING('Quadratically',5)`, + `SUBSTRING('foobarbar' FROM 4)`, + `SUBSTRING('Quadratically',5,6)`, + `SUBSTRING('Sakila', -3)`, + `SUBSTRING('Sakila', -5, 3)`, + `SUBSTRING('Sakila' FROM -4 FOR 2)`, + `SUBSTR('Quadratically',5)`, + `SUBSTR('foobarbar' FROM 4)`, + `SUBSTR('Quadratically',5,6)`, + `SUBSTR('Sakila', -3)`, + `SUBSTR('Sakila', -5, 3)`, + `SUBSTR('Sakila' FROM -4 FOR 2)`, + `MID('Quadratically',5,6)`, + `MID('Sakila', -5, 3)`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } + + for _, str := range inputStrings { + for _, i := range radianInputs { + yield(fmt.Sprintf("SUBSTRING(%s, %s)", str, i), nil) + + for _, j := range radianInputs { + yield(fmt.Sprintf("SUBSTRING(%s, %s, %s)", str, i, j), nil) + } + } + } +} + +func FnLocate(yield Query) { + mysqlDocSamples := []string{ + `LOCATE('bar', 'foobarbar')`, + `LOCATE('xbar', 'foobar')`, + `LOCATE('bar', 'foobarbar', 5)`, + `INSTR('foobarbar', 'bar')`, + `INSTR('xbar', 'foobar')`, + `POSITION('bar' IN 'foobarbar')`, + `POSITION('xbar' IN 'foobar')`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } + + for _, substr := range locateStrings { + for _, str := range locateStrings { + yield(fmt.Sprintf("LOCATE(%s, %s)", substr, str), nil) + yield(fmt.Sprintf("INSTR(%s, %s)", str, substr), nil) + yield(fmt.Sprintf("POSITION(%s IN %s)", str, substr), nil) + + for _, i := range radianInputs { + yield(fmt.Sprintf("LOCATE(%s, %s, %s)", substr, str, i), nil) + } + } + } +} + +func FnReplace(yield Query) { + cases := []string{ + `REPLACE('www.mysql.com', 'w', 'Ww')`, + // MySQL doesn't do collation matching for replace, only + // byte equivalence, but make sure to check. + `REPLACE('straße', 'ss', 'b')`, + `REPLACE('straße', 'ß', 'b')`, + // From / to strings are converted into the collation of + // the input string. + `REPLACE('fooÿbar', _latin1 0xFF, _latin1 0xFE)`, + // First occurence is replaced + `replace('fff', 'ff', 'gg')`, + } + + for _, q := range cases { + yield(q, nil) + } + + for _, substr := range inputStrings { + for _, str := range inputStrings { + for _, i := range inputStrings { + yield(fmt.Sprintf("REPLACE(%s, %s, %s)", substr, str, i), nil) + } + } + } +} + func FnConcat(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("CONCAT(%s)", str), nil) @@ -1486,6 +1743,31 @@ func FnConcatWs(yield Query) { } } +func FnChar(yield Query) { + mysqlDocSamples := []string{ + `CHAR(77,121,83,81,'76')`, + `CHAR(77,77.3,'77.3')`, + `CHAR(77,121,83,81,'76' USING utf8mb4)`, + `CHAR(77,77.3,'77.3' USING utf8mb4)`, + `HEX(CHAR(1,0))`, + `HEX(CHAR(256))`, + `HEX(CHAR(1,0,0))`, + `HEX(CHAR(256*256)`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } + + for _, i1 := range radianInputs { + for _, i2 := range inputBitwise { + for _, i3 := range inputConversions { + yield(fmt.Sprintf("CHAR(%s, %s, %s)", i1, i2, i3), nil) + } + } + } +} + func FnHex(yield Query) { for _, str := range inputStrings { yield(fmt.Sprintf("hex(%s)", str), nil) @@ -1710,6 +1992,148 @@ func FnMonthName(yield Query) { } } +func FnLastDay(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("LAST_DAY(%s)", d), nil) + } + + dates := []string{ + `DATE'2024-02-18'`, + `DATE'2023-02-01'`, + `DATE'2100-02-01'`, + `TIMESTAMP'2020-12-31 23:59:59'`, + `TIMESTAMP'2025-01-01 00:00:00'`, + `'2000-02-01'`, + `'2020-12-31 23:59:59'`, + `'2025-01-01 00:00:00'`, + `20250101`, + `'20250101'`, + } + + for _, d := range dates { + yield(fmt.Sprintf("LAST_DAY(%s)", d), nil) + } +} + +func FnToDays(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("TO_DAYS(%s)", d), nil) + } + + dates := []string{ + `DATE'0000-00-00'`, + `0`, + `'0000-00-00'`, + `DATE'2023-09-03 00:00:00'`, + `DATE'2023-09-03 07:00:00'`, + `DATE'0000-00-00 00:00:00'`, + `950501`, + `'2007-10-07'`, + `'2008-10-07'`, + `'08-10-07'`, + `'0000-01-01'`, + } + + for _, d := range dates { + yield(fmt.Sprintf("TO_DAYS(%s)", d), nil) + } +} + +func FnFromDays(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("FROM_DAYS(%s)", d), nil) + } + + days := []string{ + "0", + "1", + "366", + "365242", + "3652424", + "3652425", + "3652500", + "3652499", + "730669", + } + + for _, d := range days { + yield(fmt.Sprintf("FROM_DAYS(%s)", d), nil) + } +} + +func FnSecToTime(yield Query) { + for _, s := range inputConversions { + yield(fmt.Sprintf("SEC_TO_TIME(%s)", s), nil) + } + + mysqlDocSamples := []string{ + `SEC_TO_TIME(2378)`, + `SEC_TO_TIME(2378) + 0`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + +func FnTimeToSec(yield Query) { + for _, d := range inputConversions { + yield(fmt.Sprintf("TIME_TO_SEC(%s)", d), nil) + } + + time := []string{ + `0`, + `'00:00:00'`, + `'22:23:00'`, + `'00:39:38'`, + `TIME'00:39:38'`, + `TIME'102:39:38'`, + `TIME'838:59:59'`, + `TIME'-838:59:59'`, + `'000220`, + `'2003-09-03 00:39:38'`, + `'2003-09-03'`, + } + + for _, t := range time { + yield(fmt.Sprintf("TIME_TO_SEC(%s)", t), nil) + } +} + +func FnToSeconds(yield Query) { + for _, t := range inputConversions { + yield(fmt.Sprintf("TO_SECONDS(%s)", t), nil) + } + + timeInputs := []string{ + `DATE'0000-00-00'`, + `0`, + `'0000-00-00'`, + `'00:00:00'`, + `DATE'2023-09-03 00:00:00'`, + `DATE'0000-00-00 00:00:00'`, + `950501`, + `'2007-10-07'`, + `'0000-01-01'`, + `TIME'00:00:00'`, + `TIME'120:01:12'`, + } + + for _, t := range timeInputs { + yield(fmt.Sprintf("TO_SECONDS(%s)", t), nil) + } + + mysqlDocSamples := []string{ + `TO_SECONDS(950501)`, + `TO_SECONDS('2009-11-29')`, + `TO_SECONDS('2009-11-29 13:43:32')`, + } + + for _, q := range mysqlDocSamples { + yield(q, nil) + } +} + func FnQuarter(yield Query) { for _, d := range inputConversions { yield(fmt.Sprintf("QUARTER(%s)", d), nil) @@ -1726,6 +2150,25 @@ func FnTime(yield Query) { for _, d := range inputConversions { yield(fmt.Sprintf("TIME(%s)", d), nil) } + times := []string{ + "'00:00:00'", + "'asdadsasd'", + "'312sadd'", + "'11-12-23'", + "'0000-11-23'", + "'0-0-0'", + "00:00", + "00:00-00", + "00:00:0:0:0:0", + "00::00", + "12::00", + "'00000001'", + "'11116656'", + } + + for _, d := range times { + yield(fmt.Sprintf("TIME(%s)", d), nil) + } } func FnUnixTimestamp(yield Query) { @@ -1767,7 +2210,7 @@ func FnYear(yield Query) { } func FnYearWeek(yield Query) { - for i := 0; i < 4; i++ { + for i := 0; i < 8; i++ { for _, d := range inputConversions { yield(fmt.Sprintf("YEARWEEK(%s, %d)", d, i), nil) } diff --git a/go/vt/vtgate/evalengine/testcases/helpers.go b/go/vt/vtgate/evalengine/testcases/helpers.go index 245d59992aa..a908b8196c8 100644 --- a/go/vt/vtgate/evalengine/testcases/helpers.go +++ b/go/vt/vtgate/evalengine/testcases/helpers.go @@ -187,12 +187,12 @@ func (cmp *Comparison) Equals(local, remote sqltypes.Value, now time.Time) (bool } return cmp.closeDatetime(localDatetime.ToStdTime(now), remoteDatetime.ToStdTime(now), 1*time.Second), nil case cmp.LooseTime && local.IsTime() && remote.IsTime(): - localTime, _, ok := datetime.ParseTime(local.ToString(), -1) - if !ok { + localTime, _, state := datetime.ParseTime(local.ToString(), -1) + if state != datetime.TimeOK { return false, fmt.Errorf("error converting local value '%s' to time", local) } - remoteTime, _, ok := datetime.ParseTime(remote.ToString(), -1) - if !ok { + remoteTime, _, state := datetime.ParseTime(remote.ToString(), -1) + if state != datetime.TimeOK { return false, fmt.Errorf("error converting remote value '%s' to time", remote) } return cmp.closeDatetime(localTime.ToStdTime(now), remoteTime.ToStdTime(now), 1*time.Second), nil diff --git a/go/vt/vtgate/evalengine/testcases/inputs.go b/go/vt/vtgate/evalengine/testcases/inputs.go index c6796ba5d32..eb94235d9b4 100644 --- a/go/vt/vtgate/evalengine/testcases/inputs.go +++ b/go/vt/vtgate/evalengine/testcases/inputs.go @@ -93,19 +93,22 @@ var inputConversions = []string{ `0x0`, `0x1`, `0xff`, `X'00'`, `X'01'`, `X'ff'`, `0b1001`, `b'1001'`, `0x9`, `x'09'`, "NULL", "true", "false", + "NULL * 1", "1 * NULL", "NULL * NULL", "NULL / 1", "1 / NULL", "NULL / NULL", + "NULL + 1", "1 + NULL", "NULL + NULL", "NULL - 1", "1 - NULL", "NULL - NULL", "0xFF666F6F626172FF", "0x666F6F626172FF", "0xFF666F6F626172", "9223372036854775807", "-9223372036854775808", "18446744073709551615", "18446744073709540000e0", "-18446744073709540000e0", "JSON_OBJECT()", "JSON_ARRAY()", - "time '10:04:58'", "time '31:34:58'", "time '32:34:58'", "time '130:34:58'", "time '5 10:34:58'", "date '2000-01-01'", + "time '10:04:58'", "time '31:34:58'", "time '32:34:58'", "time '130:34:58'", "time '5 10:34:58'", + "time '10:04:58.1'", "time '31:34:58.4'", "time '32:34:58.5'", "time '130:34:58.6'", "time '5 10:34:58.9'", "date '2000-01-01'", "timestamp '2000-01-01 10:34:58'", "timestamp '2000-01-01 10:34:58.123456'", "timestamp '2000-01-01 10:34:58.978654'", "20000101103458", "20000101103458.1234", "20000101103458.123456", "20000101", "103458", "103458.123456", "'20000101103458'", "'20000101103458.1234'", "'20000101103458.123456'", "'20000101'", "'103458'", "'103458.123456'", "'20000101103458foo'", "'20000101103458.1234foo'", "'20000101103458.123456foo'", "'20000101foo'", "'103458foo'", "'103458.123456foo'", - "time '-10:04:58'", "time '-31:34:58'", "time '-32:34:58'", + "time '-10:04:58'", "time '-31:34:58'", "time '-32:34:58'", "time'00:00:01.0000'", "time'00:00:59.01011'", "time'00:00:59.2132234'", "time '-101:34:58'", "time '-5 10:34:58'", - "'10:04:58'", "'101:34:58'", "'5 10:34:58'", "'2000-01-01'", "'2000-01-01 12:34:58'", + "'10:04:58'", "'101:34:58'", "'5 10:34:58'", "'2000-01-01'", "'2000-01-01 12:34:58'", "'0000-02-29'", "'0000-01-03'", "'1969-02-18'", "'1970-01-01 00:00:01'", "'3001-02-19 00:00:00'", "'3001-02-18 23:59:59'", "cast(0 as json)", "cast(1 as json)", "cast(true as json)", "cast(false as json)", "cast('{}' as json)", "cast('[]' as json)", @@ -196,6 +199,77 @@ var inputStrings = []string{ // "_ucs2 'AabcÅå'", } +var insertStrings = []string{ + "NULL", + "\"\"", + "\"a\"", + "\"abc\"", + "1", + "-1", + "0123", + "0xAACC", + "3.1415926", + // MySQL has broken behavior for these inputs, + // see https://github.com/mysql/mysql-server/pull/517 + // "\"Å å\"", + // "\"中文测试\"", + // "\"日本語テスト\"", + // "\"한국어 시험\"", + // "\"😊😂🤢\"", + // "_utf8mb4 'abcABCÅå'", + "DATE '2022-10-11'", + "TIME '11:02:23'", + "'123'", + "9223372036854775807", + "-9223372036854775808", + "999999999999999999999999", + "-999999999999999999999999", + "_binary 'Müller' ", + "_latin1 0xFF", + // TODO: support other multibyte encodings + // "_dec8 'ÒòÅå'", + // "_utf8mb3 'abcABCÅå'", + // "_utf16 'AabcÅå'", + // "_utf32 'AabcÅå'", + // "_ucs2 'AabcÅå'", +} + +var locateStrings = []string{ + "NULL", + "\"\"", + "\"a\"", + "\"abc\"", + "1", + "-1", + "0123", + "0xAACC", + "3.1415926", + // MySQL has broken behavior for these inputs, + // see https://bugs.mysql.com/bug.php?id=113933 + // "\"Å å\"", + // "\"中文测试\"", + // "\"日本語テスト\"", + // "\"한국어 시험\"", + // "\"😊😂🤢\"", + // "_utf8mb4 'abcABCÅå'", + "DATE '2022-10-11'", + "TIME '11:02:23'", + "'123'", + "9223372036854775807", + "-9223372036854775808", + "999999999999999999999999", + "-999999999999999999999999", + "_binary 'Müller' ", + "_utf8mb4 'abcABCÅå'", + "_latin1 0xFF", + // TODO: support other multibyte encodings + // "_dec8 'ÒòÅå'", + // "_utf8mb3 'abcABCÅå'", + // "_utf16 'AabcÅå'", + // "_utf32 'AabcÅå'", + // "_ucs2 'AabcÅå'", +} + var inputConversionTypes = []string{ "BINARY", "BINARY(1)", "BINARY(0)", "BINARY(16)", "BINARY(-1)", "CHAR", "CHAR(1)", "CHAR(0)", "CHAR(16)", "CHAR(-1)", diff --git a/go/vt/vtgate/evalengine/translate.go b/go/vt/vtgate/evalengine/translate.go index c8f6f7d1337..0091f06a633 100644 --- a/go/vt/vtgate/evalengine/translate.go +++ b/go/vt/vtgate/evalengine/translate.go @@ -27,6 +27,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" ) @@ -193,7 +194,7 @@ func (ast *astCompiler) translateIsExpr(left sqlparser.Expr, op sqlparser.IsExpr } func (ast *astCompiler) translateBindVar(arg *sqlparser.Argument) (IR, error) { - bvar := NewBindVar(arg.Name, Type{Type: arg.Type, Coll: ast.cfg.Collation}) + bvar := NewBindVar(arg.Name, NewType(arg.Type, ast.cfg.Collation)) if !bvar.typed() { bvar.dynamicTypeOffset = len(ast.untyped) @@ -203,12 +204,12 @@ func (ast *astCompiler) translateBindVar(arg *sqlparser.Argument) (IR, error) { } func (ast *astCompiler) translateColOffset(col *sqlparser.Offset) (IR, error) { - typ := UnknownType() + var typ Type if ast.cfg.ResolveType != nil { typ, _ = ast.cfg.ResolveType(col.Original) } - if typ.Coll == collations.Unknown { - typ.Coll = ast.cfg.Collation + if typ.Valid() && typ.collation == collations.Unknown { + typ.collation = ast.cfg.Collation } column := NewColumn(col.V, typ, col.Original) @@ -227,12 +228,12 @@ func (ast *astCompiler) translateColName(colname *sqlparser.ColName) (IR, error) if err != nil { return nil, err } - typ := UnknownType() + var typ Type if ast.cfg.ResolveType != nil { typ, _ = ast.cfg.ResolveType(colname) } - if typ.Coll == collations.Unknown { - typ.Coll = ast.cfg.Collation + if typ.Valid() && typ.collation == collations.Unknown { + typ.collation = ast.cfg.Collation } column := NewColumn(idx, typ, colname) @@ -334,7 +335,7 @@ func (ast *astCompiler) translateCollateExpr(collate *sqlparser.CollateExpr) (IR if err != nil { return nil, err } - coll := collations.Local().LookupByName(collate.Collation) + coll := ast.cfg.Environment.CollationEnv().LookupByName(collate.Collation) if coll == collations.Unknown { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unknown collation: '%s'", collate.Collation) } @@ -345,6 +346,7 @@ func (ast *astCompiler) translateCollateExpr(collate *sqlparser.CollateExpr) (IR Coercibility: collations.CoerceExplicit, Repertoire: collations.RepertoireUnicode, }, + CollationEnv: ast.cfg.Environment.CollationEnv(), }, nil } @@ -358,7 +360,7 @@ func (ast *astCompiler) translateIntroducerExpr(introduced *sqlparser.Introducer if strings.ToLower(introduced.CharacterSet) == "_binary" { collation = collations.CollationBinaryID } else { - defaultCollation := collations.Local().DefaultCollationForCharset(introduced.CharacterSet[1:]) + defaultCollation := ast.cfg.Environment.CollationEnv().DefaultCollationForCharset(introduced.CharacterSet[1:]) if defaultCollation == collations.Unknown { panic(fmt.Sprintf("unknown character set: %s", introduced.CharacterSet)) } @@ -371,7 +373,7 @@ func (ast *astCompiler) translateIntroducerExpr(introduced *sqlparser.Introducer case collations.CollationBinaryID: lit.inner = evalToBinary(lit.inner) default: - lit.inner, err = evalToVarchar(lit.inner, collation, false) + lit.inner, err = introducerCast(lit.inner, collation) if err != nil { return nil, err } @@ -389,6 +391,7 @@ func (ast *astCompiler) translateIntroducerExpr(introduced *sqlparser.Introducer Coercibility: collations.CoerceExplicit, Repertoire: collations.RepertoireUnicode, }, + CollationEnv: ast.cfg.Environment.CollationEnv(), }, nil default: panic("character set introducers are only supported for literals and arguments") @@ -420,7 +423,7 @@ func (ast *astCompiler) translateUnaryExpr(unary *sqlparser.UnaryExpr) (IR, erro case sqlparser.TildaOp: return &BitwiseNotExpr{UnaryExpr: UnaryExpr{expr}}, nil case sqlparser.NStringOp: - return &ConvertExpr{UnaryExpr: UnaryExpr{expr}, Type: "NCHAR", Collation: collations.CollationUtf8mb3ID}, nil + return &ConvertExpr{UnaryExpr: UnaryExpr{expr}, Type: "NCHAR", Collation: collations.CollationUtf8mb3ID, CollationEnv: ast.cfg.Environment.CollationEnv()}, nil default: return nil, translateExprNotSupported(unary) } @@ -569,16 +572,11 @@ type Config struct { Collation collations.ID NoConstantFolding bool NoCompilation bool + SQLMode SQLMode + Environment *vtenv.Environment } func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { - if cfg == nil { - cfg = &Config{} - } - if cfg.Collation == collations.Unknown { - cfg.Collation = collations.Default() - } - ast := astCompiler{cfg: cfg} expr, err := ast.translateExpr(e) @@ -591,7 +589,7 @@ func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { } if !cfg.NoConstantFolding { - staticEnv := EmptyExpressionEnv() + staticEnv := EmptyExpressionEnv(cfg.Environment) expr, err = simplifyExpr(staticEnv, expr) if err != nil { return nil, err @@ -603,11 +601,12 @@ func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { } if len(ast.untyped) == 0 && !cfg.NoCompilation { - comp := compiler{collation: cfg.Collation} + comp := compiler{collation: cfg.Collation, env: cfg.Environment, sqlmode: cfg.SQLMode} return comp.compile(expr) } return &UntypedExpr{ + env: cfg.Environment, ir: expr, collation: cfg.Collation, needTypes: ast.untyped, @@ -626,9 +625,14 @@ type typedExpr struct { err error } -func (typed *typedExpr) compile(expr IR, collation collations.ID) (*CompiledExpr, error) { +func (typed *typedExpr) compile(env *vtenv.Environment, expr IR, collation collations.ID, sqlmode SQLMode) (*CompiledExpr, error) { typed.once.Do(func() { - comp := compiler{collation: collation, dynamicTypes: typed.types} + comp := compiler{ + env: env, + collation: collation, + dynamicTypes: typed.types, + sqlmode: sqlmode, + } typed.compiled, typed.err = comp.compile(expr) }) return typed.compiled, typed.err @@ -642,6 +646,7 @@ type typedIR interface { // UntypedExpr is a translated expression that cannot be compiled ahead of time because it // contains dynamic types. type UntypedExpr struct { + env *vtenv.Environment // ir is the translated IR for the expression ir IR // collation is the default collation for the translated expression @@ -681,7 +686,9 @@ func (u *UntypedExpr) loadTypedExpression(env *ExpressionEnv) (*typedExpr, error defer u.mu.Unlock() for _, typed := range u.typed { - if slices.Equal(typed.types, dynamicTypes) { + if slices.EqualFunc(typed.types, dynamicTypes, func(a, b ctype) bool { + return a.equal(b) + }) { return typed, nil } } @@ -695,7 +702,7 @@ func (u *UntypedExpr) Compile(env *ExpressionEnv) (*CompiledExpr, error) { if err != nil { return nil, err } - return typed.compile(u.ir, u.collation) + return typed.compile(u.env, u.ir, u.collation, env.sqlmode) } func (u *UntypedExpr) typeof(env *ExpressionEnv) (ctype, error) { @@ -734,9 +741,9 @@ func (fields FieldResolver) Type(expr sqlparser.Expr) (Type, bool) { name := expr.CompliantName() for _, f := range fields { if f.Name == name { - return Type{Type: f.Type, Coll: collations.ID(f.Charset)}, true + return NewType(f.Type, collations.ID(f.Charset)), true } } } - return UnknownType(), false + return Type{}, false } diff --git a/go/vt/vtgate/evalengine/translate_builtin.go b/go/vt/vtgate/evalengine/translate_builtin.go index 04bd2ca3428..d4c6bcdae5a 100644 --- a/go/vt/vtgate/evalengine/translate_builtin.go +++ b/go/vt/vtgate/evalengine/translate_builtin.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + "vitess.io/vitess/go/mysql/collations" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -46,11 +47,7 @@ func (ast *astCompiler) translateFuncArgs(fnargs []sqlparser.Expr) ([]IR, error) func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (IR, error) { var args TupleExpr for _, expr := range fn.Exprs { - aliased, ok := expr.(*sqlparser.AliasedExpr) - if !ok { - return nil, translateExprNotSupported(fn) - } - convertedExpr, err := ast.translateExpr(aliased.Expr) + convertedExpr, err := ast.translateExpr(expr) if err != nil { return nil, err } @@ -255,6 +252,22 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (IR, error) { return nil, argError(method) } return &builtinConv{CallExpr: call, collate: ast.cfg.Collation}, nil + case "bin": + if len(args) != 1 { + return nil, argError(method) + } + args = append(args, NewLiteralInt(10)) + args = append(args, NewLiteralInt(2)) + var cexpr = CallExpr{Arguments: args, Method: "BIN"} + return &builtinConv{CallExpr: cexpr, collate: ast.cfg.Collation}, nil + case "oct": + if len(args) != 1 { + return nil, argError(method) + } + args = append(args, NewLiteralInt(10)) + args = append(args, NewLiteralInt(8)) + var cexpr = CallExpr{Arguments: args, Method: "OCT"} + return &builtinConv{CallExpr: cexpr, collate: ast.cfg.Collation}, nil case "left", "right": if len(args) != 2 { return nil, argError(method) @@ -265,6 +278,16 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (IR, error) { return nil, argError(method) } return &builtinPad{CallExpr: call, collate: ast.cfg.Collation, left: method == "lpad"}, nil + case "field": + if len(args) < 2 { + return nil, argError(method) + } + return &builtinField{CallExpr: call, collate: ast.cfg.Collation}, nil + case "elt": + if len(args) < 2 { + return nil, argError(method) + } + return &builtinElt{CallExpr: call, collate: ast.cfg.Collation}, nil case "lower", "lcase": if len(args) != 1 { return nil, argError(method) @@ -295,6 +318,16 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (IR, error) { return nil, argError(method) } return &builtinASCII{CallExpr: call}, nil + case "reverse": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinReverse{CallExpr: call, collate: ast.cfg.Collation}, nil + case "space": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinSpace{CallExpr: call, collate: ast.cfg.Collation}, nil case "ord": if len(args) != 1 { return nil, argError(method) @@ -414,6 +447,36 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (IR, error) { return nil, argError(method) } return &builtinMonthName{CallExpr: call, collate: ast.cfg.Collation}, nil + case "last_day": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinLastDay{CallExpr: call}, nil + case "to_days": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinToDays{CallExpr: call}, nil + case "from_days": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinFromDays{CallExpr: call}, nil + case "sec_to_time": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinSecToTime{CallExpr: call}, nil + case "time_to_sec": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinTimeToSec{CallExpr: call}, nil + case "to_seconds": + if len(args) != 1 { + return nil, argError(method) + } + return &builtinToSeconds{CallExpr: call}, nil case "quarter": if len(args) != 1 { return nil, argError(method) @@ -574,6 +637,17 @@ func (ast *astCompiler) translateFuncExpr(fn *sqlparser.FuncExpr) (IR, error) { return nil, argError(method) } return &builtinStrcmp{CallExpr: call, collate: ast.cfg.Collation}, nil + case "instr": + if len(args) != 2 { + return nil, argError(method) + } + call = CallExpr{Arguments: []IR{call.Arguments[1], call.Arguments[0]}, Method: method} + return &builtinLocate{CallExpr: call, collate: ast.cfg.Collation}, nil + case "replace": + if len(args) != 3 { + return nil, argError(method) + } + return &builtinReplace{CallExpr: call, collate: ast.cfg.Collation}, nil default: return nil, translateExprNotSupported(fn) } @@ -603,10 +677,7 @@ func (ast *astCompiler) translateCallable(call sqlparser.Callable) (IR, error) { } if call.As != nil { ws.Cast = strings.ToLower(call.As.Type) - ws.Len, ws.HasLen, err = ast.translateIntegral(call.As.Length) - if err != nil { - return nil, err - } + ws.Len = call.As.Length } return ws, nil @@ -699,7 +770,7 @@ func (ast *astCompiler) translateCallable(call sqlparser.Callable) (IR, error) { case *sqlparser.CurTimeFuncExpr: if call.Fsp > 6 { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision 12 specified for '%s'. Maximum is 6.", call.Name.String()) + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Too-big precision %d specified for '%s'. Maximum is 6.", call.Fsp, call.Name.String()) } var cexpr = CallExpr{Arguments: nil, Method: call.Name.String()} @@ -747,6 +818,56 @@ func (ast *astCompiler) translateCallable(call sqlparser.Callable) (IR, error) { trim: call.Type, }, nil + case *sqlparser.SubstrExpr: + var args []IR + str, err := ast.translateExpr(call.Name) + if err != nil { + return nil, err + } + args = append(args, str) + pos, err := ast.translateExpr(call.From) + if err != nil { + return nil, err + } + args = append(args, pos) + + if call.To != nil { + to, err := ast.translateExpr(call.To) + if err != nil { + return nil, err + } + args = append(args, to) + } + var cexpr = CallExpr{Arguments: args, Method: "SUBSTRING"} + return &builtinSubstring{ + CallExpr: cexpr, + collate: ast.cfg.Collation, + }, nil + case *sqlparser.LocateExpr: + var args []IR + substr, err := ast.translateExpr(call.SubStr) + if err != nil { + return nil, err + } + args = append(args, substr) + str, err := ast.translateExpr(call.Str) + if err != nil { + return nil, err + } + args = append(args, str) + + if call.Pos != nil { + to, err := ast.translateExpr(call.Pos) + if err != nil { + return nil, err + } + args = append(args, to) + } + var cexpr = CallExpr{Arguments: args, Method: "LOCATE"} + return &builtinLocate{ + CallExpr: cexpr, + collate: ast.cfg.Collation, + }, nil case *sqlparser.IntervalDateExpr: var err error args := make([]IR, 2) @@ -929,6 +1050,61 @@ func (ast *astCompiler) translateCallable(call sqlparser.Callable) (IR, error) { return &builtinRegexpReplace{ CallExpr: CallExpr{Arguments: args, Method: "REGEXP_REPLACE"}, }, nil + + case *sqlparser.InsertExpr: + str, err := ast.translateExpr(call.Str) + if err != nil { + return nil, err + } + + pos, err := ast.translateExpr(call.Pos) + if err != nil { + return nil, err + } + + len, err := ast.translateExpr(call.Len) + if err != nil { + return nil, err + } + + newstr, err := ast.translateExpr(call.NewStr) + if err != nil { + return nil, err + } + + args := []IR{str, pos, len, newstr} + + var cexpr = CallExpr{Arguments: args, Method: "INSERT"} + return &builtinInsert{ + CallExpr: cexpr, + collate: ast.cfg.Collation, + }, nil + case *sqlparser.CharExpr: + args := make([]IR, 0, len(call.Exprs)) + for _, expr := range call.Exprs { + arg, err := ast.translateExpr(expr) + if err != nil { + return nil, err + } + args = append(args, arg) + } + + var cexpr = CallExpr{Arguments: args, Method: "CHAR"} + var coll collations.ID + if call.Charset == "" { + coll = collations.CollationBinaryID + } else { + var err error + coll, err = ast.translateConvertCharset(call.Charset, false) + if err != nil { + return nil, err + } + } + + return &builtinChar{ + CallExpr: cexpr, + collate: coll, + }, nil default: return nil, translateExprNotSupported(call) } diff --git a/go/vt/vtgate/evalengine/translate_convert.go b/go/vt/vtgate/evalengine/translate_convert.go index 29216716b2b..b47aa6f1fd9 100644 --- a/go/vt/vtgate/evalengine/translate_convert.go +++ b/go/vt/vtgate/evalengine/translate_convert.go @@ -32,7 +32,7 @@ func (ast *astCompiler) binaryCollationForCollation(collation collations.ID) col if binary == nil { return collations.Unknown } - return collations.Local().BinaryCollationForCharset(binary.Charset().Name()) + return ast.cfg.Environment.CollationEnv().BinaryCollationForCharset(binary.Charset().Name()) } func (ast *astCompiler) translateConvertCharset(charset string, binary bool) (collations.ID, error) { @@ -47,7 +47,7 @@ func (ast *astCompiler) translateConvertCharset(charset string, binary bool) (co return collation, nil } charset = strings.ToLower(charset) - collationID := collations.Local().DefaultCollationForCharset(charset) + collationID := ast.cfg.Environment.CollationEnv().DefaultCollationForCharset(charset) if collationID == collations.Unknown { return collations.Unknown, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unknown character set: '%s'", charset) } @@ -66,39 +66,36 @@ func (ast *astCompiler) translateConvertExpr(expr sqlparser.Expr, convertType *s err error ) + convert.CollationEnv = ast.cfg.Environment.CollationEnv() convert.Inner, err = ast.translateExpr(expr) if err != nil { return nil, err } - convert.Length, convert.HasLength, err = ast.translateIntegral(convertType.Length) - if err != nil { - return nil, err - } - - convert.Scale, convert.HasScale, err = ast.translateIntegral(convertType.Scale) - if err != nil { - return nil, err - } - + convert.Length = convertType.Length + convert.Scale = convertType.Scale convert.Type = strings.ToUpper(convertType.Type) switch convert.Type { case "DECIMAL": - if convert.Length < convert.Scale { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, - "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%s').", - "", // TODO: column name - ) - } - if convert.Length > decimal.MyMaxPrecision { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, - "Too-big precision %d specified for '%s'. Maximum is %d.", - convert.Length, sqlparser.String(expr), decimal.MyMaxPrecision) - } - if convert.Scale > decimal.MyMaxScale { - return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, - "Too big scale %d specified for column '%s'. Maximum is %d.", - convert.Scale, sqlparser.String(expr), decimal.MyMaxScale) + if convert.Length != nil { + if *convert.Length > decimal.MyMaxPrecision { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "Too-big precision %d specified for '%s'. Maximum is %d.", + *convert.Length, sqlparser.String(expr), decimal.MyMaxPrecision) + } + if convert.Scale != nil { + if *convert.Scale > decimal.MyMaxScale { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "Too big scale %d specified for column '%s'. Maximum is %d.", + *convert.Scale, sqlparser.String(expr), decimal.MyMaxScale) + } + if *convert.Length < *convert.Scale { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%s').", + "", // TODO: column name + ) + } + } } case "NCHAR": convert.Collation = collations.CollationUtf8mb3ID @@ -123,6 +120,7 @@ func (ast *astCompiler) translateConvertUsingExpr(expr *sqlparser.ConvertUsingEx err error ) + using.CollationEnv = ast.cfg.Environment.CollationEnv() using.Inner, err = ast.translateExpr(expr.Expr) if err != nil { return nil, err diff --git a/go/vt/vtgate/evalengine/translate_simplify.go b/go/vt/vtgate/evalengine/translate_simplify.go index 2537ad20b2b..6af7f7646a0 100644 --- a/go/vt/vtgate/evalengine/translate_simplify.go +++ b/go/vt/vtgate/evalengine/translate_simplify.go @@ -26,6 +26,10 @@ func (expr *BindVariable) constant() bool { return false } +func (expr *TupleBindVariable) constant() bool { + return false +} + func (expr *Column) constant() bool { return false } @@ -55,6 +59,10 @@ func (expr *BindVariable) simplify(_ *ExpressionEnv) error { return nil } +func (expr *TupleBindVariable) simplify(_ *ExpressionEnv) error { + return nil +} + func (expr *Column) simplify(_ *ExpressionEnv) error { return nil } diff --git a/go/vt/vtgate/evalengine/translate_test.go b/go/vt/vtgate/evalengine/translate_test.go index 377f34db8f2..3702230e22e 100644 --- a/go/vt/vtgate/evalengine/translate_test.go +++ b/go/vt/vtgate/evalengine/translate_test.go @@ -20,10 +20,12 @@ import ( "context" "strings" "testing" + "time" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -112,9 +114,10 @@ func TestTranslateSimplification(t *testing.T) { {"json->>\"$.c\"", ok("JSON_UNQUOTE(JSON_EXTRACT(`json`, '$.c'))"), ok("JSON_UNQUOTE(JSON_EXTRACT(`json`, '$.c'))")}, } + venv := vtenv.NewTestEnv() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - stmt, err := sqlparser.Parse("select " + tc.expression) + stmt, err := venv.Parser().Parse("select " + tc.expression) if err != nil { t.Fatal(err) } @@ -125,7 +128,8 @@ func TestTranslateSimplification(t *testing.T) { cfg := &Config{ ResolveColumn: fields.Column, - Collation: collations.Default(), + Collation: venv.CollationEnv().DefaultConnectionCharset(), + Environment: venv, NoConstantFolding: true, NoCompilation: true, } @@ -295,13 +299,17 @@ func TestEvaluate(t *testing.T) { expected: False, }} + venv := vtenv.NewTestEnv() for _, test := range tests { t.Run(test.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + test.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + test.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - sqltypesExpr, err := Translate(astExpr, &Config{Collation: collations.Default()}) + sqltypesExpr, err := Translate(astExpr, &Config{ + Collation: venv.CollationEnv().DefaultConnectionCharset(), + Environment: venv, + }) require.Nil(t, err) require.NotNil(t, sqltypesExpr) env := NewExpressionEnv(context.Background(), map[string]*querypb.BindVariable{ @@ -311,14 +319,14 @@ func TestEvaluate(t *testing.T) { "uint32_bind_variable": sqltypes.Uint32BindVariable(21), "uint64_bind_variable": sqltypes.Uint64BindVariable(22), "float_bind_variable": sqltypes.Float64BindVariable(2.2), - }, nil) + }, NewEmptyVCursor(venv, time.Local)) // When r, err := env.Evaluate(sqltypesExpr) // Then require.NoError(t, err) - assert.Equal(t, test.expected, r.Value(collations.Default()), "expected %s", test.expected.String()) + assert.Equal(t, test.expected, r.Value(collations.MySQL8().DefaultConnectionCharset()), "expected %s", test.expected.String()) }) } } @@ -340,18 +348,22 @@ func TestEvaluateTuple(t *testing.T) { expected: []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewVarChar("2"), sqltypes.NewDecimal("4.0")}, }} + venv := vtenv.NewTestEnv() for _, test := range tests { t.Run(test.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + test.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + test.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - sqltypesExpr, err := Translate(astExpr, &Config{Collation: collations.Default()}) + sqltypesExpr, err := Translate(astExpr, &Config{ + Collation: venv.CollationEnv().DefaultConnectionCharset(), + Environment: venv, + }) require.Nil(t, err) require.NotNil(t, sqltypesExpr) // When - r, err := EmptyExpressionEnv().Evaluate(sqltypesExpr) + r, err := EmptyExpressionEnv(venv).Evaluate(sqltypesExpr) // Then require.NoError(t, err) @@ -377,13 +389,17 @@ func TestTranslationFailures(t *testing.T) { }, } + venv := vtenv.NewTestEnv() for _, testcase := range testcases { t.Run(testcase.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + testcase.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + testcase.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - _, err = Translate(astExpr, &Config{Collation: collations.Default()}) + _, err = Translate(astExpr, &Config{ + Collation: venv.CollationEnv().DefaultConnectionCharset(), + Environment: venv, + }) require.EqualError(t, err, testcase.expectedErr) }) } @@ -410,16 +426,21 @@ func TestCardinalityWithBindVariables(t *testing.T) { {expr: `1 IN ::bar`}, } + venv := vtenv.NewTestEnv() for _, testcase := range testcases { t.Run(testcase.expr, func(t *testing.T) { err := func() error { - stmt, err := sqlparser.Parse("select " + testcase.expr) + stmt, err := sqlparser.NewTestParser().Parse("select " + testcase.expr) if err != nil { return err } astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - _, err = Translate(astExpr, &Config{Collation: collations.Default(), NoCompilation: true}) + _, err = Translate(astExpr, &Config{ + Collation: venv.CollationEnv().DefaultConnectionCharset(), + Environment: venv, + NoCompilation: true, + }) return err }() diff --git a/go/vt/vtgate/evalengine/vm.go b/go/vt/vtgate/evalengine/vm.go index df1d6aa6405..28c3af70e0e 100644 --- a/go/vt/vtgate/evalengine/vm.go +++ b/go/vt/vtgate/evalengine/vm.go @@ -87,12 +87,12 @@ func (env *ExpressionEnv) EvaluateVM(p *CompiledExpr) (EvalResult, error) { goto err } } - return EvalResult{env.vm.stack[env.vm.sp-1]}, nil + return EvalResult{v: env.vm.stack[env.vm.sp-1], collationEnv: env.collationEnv}, nil err: if env.vm.err == errDeoptimize { e, err := p.ir.eval(env) - return EvalResult{e}, err + return EvalResult{v: e, collationEnv: env.collationEnv}, err } - return EvalResult{}, env.vm.err + return EvalResult{collationEnv: env.collationEnv}, env.vm.err } diff --git a/go/vt/vtgate/evalengine/weights.go b/go/vt/vtgate/evalengine/weights.go index fa7fa7e11a6..3eb9aa290c5 100644 --- a/go/vt/vtgate/evalengine/weights.go +++ b/go/vt/vtgate/evalengine/weights.go @@ -41,11 +41,11 @@ import ( // externally communicates with the `WEIGHT_STRING` function, so that we // can also use this to order / sort other types like Float and Decimal // as well. -func WeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int) ([]byte, bool, error) { +func WeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int, values *EnumSetValues, sqlmode SQLMode) ([]byte, bool, error) { // We optimize here for the case where we already have the desired type. // Otherwise, we fall back to the general evalengine conversion logic. if v.Type() != coerceTo { - return fallbackWeightString(dst, v, coerceTo, col, length, precision) + return fallbackWeightString(dst, v, coerceTo, col, length, precision, values, sqlmode) } switch { @@ -116,13 +116,17 @@ func WeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col coll return dst, false, err } return j.WeightString(dst), false, nil + case coerceTo == sqltypes.Enum: + return evalWeightString(dst, newEvalEnum(v.Raw(), values), length, precision) + case coerceTo == sqltypes.Set: + return evalWeightString(dst, newEvalSet(v.Raw(), values), length, precision) default: - return fallbackWeightString(dst, v, coerceTo, col, length, precision) + return fallbackWeightString(dst, v, coerceTo, col, length, precision, values, sqlmode) } } -func fallbackWeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int) ([]byte, bool, error) { - e, err := valueToEvalCast(v, coerceTo, col) +func fallbackWeightString(dst []byte, v sqltypes.Value, coerceTo sqltypes.Type, col collations.ID, length, precision int, values *EnumSetValues, sqlmode SQLMode) ([]byte, bool, error) { + e, err := valueToEvalCast(v, coerceTo, col, values, sqlmode) if err != nil { return dst, false, err } @@ -174,6 +178,14 @@ func evalWeightString(dst []byte, e eval, length, precision int) ([]byte, bool, return e.dt.WeightString(dst), true, nil case *evalJSON: return e.WeightString(dst), false, nil + case *evalEnum: + raw := uint64(e.value) + raw = raw ^ (1 << 63) + return binary.BigEndian.AppendUint64(dst, raw), true, nil + case *evalSet: + raw := e.set + raw = raw ^ (1 << 63) + return binary.BigEndian.AppendUint64(dst, raw), true, nil } return dst, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected type %v", e.SQLType()) @@ -192,7 +204,7 @@ func TinyWeighter(f *querypb.Field, collation collations.ID) func(v *sqltypes.Va case sqltypes.IsNull(f.Type): return nil - case sqltypes.IsSigned(f.Type): + case sqltypes.IsSigned(f.Type), f.Type == sqltypes.Enum, f.Type == sqltypes.Set: return func(v *sqltypes.Value) { i, err := v.ToInt64() if err != nil { @@ -301,7 +313,6 @@ func TinyWeighter(f *querypb.Field, collation collations.ID) func(v *sqltypes.Va copy(w32[:4], j.WeightString(nil)) v.SetTinyWeight(binary.BigEndian.Uint32(w32[:4])) } - default: return nil } diff --git a/go/vt/vtgate/evalengine/weights_test.go b/go/vt/vtgate/evalengine/weights_test.go index 0dee4c72d03..95764d3c3a4 100644 --- a/go/vt/vtgate/evalengine/weights_test.go +++ b/go/vt/vtgate/evalengine/weights_test.go @@ -32,11 +32,12 @@ func TestTinyWeightStrings(t *testing.T) { const Length = 10000 var cases = []struct { - typ sqltypes.Type - gen func() sqltypes.Value - col collations.ID - len int - prec int + typ sqltypes.Type + gen func() sqltypes.Value + col collations.ID + len int + prec int + values *EnumSetValues }{ {typ: sqltypes.Int32, gen: sqltypes.RandomGenerators[sqltypes.Int32], col: collations.CollationBinaryID}, {typ: sqltypes.Int64, gen: sqltypes.RandomGenerators[sqltypes.Int64], col: collations.CollationBinaryID}, @@ -47,6 +48,8 @@ func TestTinyWeightStrings(t *testing.T) { {typ: sqltypes.VarBinary, gen: sqltypes.RandomGenerators[sqltypes.VarBinary], col: collations.CollationBinaryID}, {typ: sqltypes.Decimal, gen: sqltypes.RandomGenerators[sqltypes.Decimal], col: collations.CollationBinaryID, len: 20, prec: 10}, {typ: sqltypes.TypeJSON, gen: sqltypes.RandomGenerators[sqltypes.TypeJSON], col: collations.CollationBinaryID}, + {typ: sqltypes.Enum, gen: sqltypes.RandomGenerators[sqltypes.Enum], col: collations.CollationBinaryID, values: &EnumSetValues{"'xxsmall'", "'xsmall'", "'small'", "'medium'", "'large'", "'xlarge'", "'xxlarge'"}}, + {typ: sqltypes.Set, gen: sqltypes.RandomGenerators[sqltypes.Set], col: collations.CollationBinaryID, values: &EnumSetValues{"'a'", "'b'", "'c'", "'d'", "'e'", "'f'", "'g'"}}, } for _, tc := range cases { @@ -77,7 +80,7 @@ func TestTinyWeightStrings(t *testing.T) { return cmp } - cmp, err := NullsafeCompare(a, b, tc.col) + cmp, err := NullsafeCompare(a, b, collations.MySQL8(), tc.col, tc.values) require.NoError(t, err) fullComparisons++ @@ -88,7 +91,7 @@ func TestTinyWeightStrings(t *testing.T) { a := items[i] b := items[i+1] - cmp, err := NullsafeCompare(a, b, tc.col) + cmp, err := NullsafeCompare(a, b, collations.MySQL8(), tc.col, tc.values) require.NoError(t, err) if cmp > 0 { @@ -110,12 +113,13 @@ func TestWeightStrings(t *testing.T) { } var cases = []struct { - name string - gen func() sqltypes.Value - types []sqltypes.Type - col collations.ID - len int - prec int + name string + gen func() sqltypes.Value + types []sqltypes.Type + col collations.ID + len int + prec int + values *EnumSetValues }{ {name: "int64", gen: sqltypes.RandomGenerators[sqltypes.Int64], types: []sqltypes.Type{sqltypes.Int64, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, {name: "uint64", gen: sqltypes.RandomGenerators[sqltypes.Uint64], types: []sqltypes.Type{sqltypes.Uint64, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, @@ -128,6 +132,8 @@ func TestWeightStrings(t *testing.T) { {name: "datetime", gen: sqltypes.RandomGenerators[sqltypes.Datetime], types: []sqltypes.Type{sqltypes.Datetime, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, {name: "timestamp", gen: sqltypes.RandomGenerators[sqltypes.Timestamp], types: []sqltypes.Type{sqltypes.Timestamp, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, {name: "time", gen: sqltypes.RandomGenerators[sqltypes.Time], types: []sqltypes.Type{sqltypes.Time, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID}, + {name: "enum", gen: sqltypes.RandomGenerators[sqltypes.Enum], types: []sqltypes.Type{sqltypes.Enum, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID, values: &EnumSetValues{"'xxsmall'", "'xsmall'", "'small'", "'medium'", "'large'", "'xlarge'", "'xxlarge'"}}, + {name: "set", gen: sqltypes.RandomGenerators[sqltypes.Set], types: []sqltypes.Type{sqltypes.Set, sqltypes.VarChar, sqltypes.TypeJSON}, col: collations.CollationBinaryID, values: &EnumSetValues{"'a'", "'b'", "'c'", "'d'", "'e'", "'f'", "'g'"}}, } for _, tc := range cases { @@ -136,7 +142,7 @@ func TestWeightStrings(t *testing.T) { items := make([]item, 0, Length) for i := 0; i < Length; i++ { v := tc.gen() - w, _, err := WeightString(nil, v, typ, tc.col, tc.len, tc.prec) + w, _, err := WeightString(nil, v, typ, tc.col, tc.len, tc.prec, tc.values, 0) require.NoError(t, err) items = append(items, item{value: v, weight: string(w)}) @@ -156,12 +162,12 @@ func TestWeightStrings(t *testing.T) { a := items[i] b := items[i+1] - v1, err := valueToEvalCast(a.value, typ, tc.col) + v1, err := valueToEvalCast(a.value, typ, tc.col, tc.values, 0) require.NoError(t, err) - v2, err := valueToEvalCast(b.value, typ, tc.col) + v2, err := valueToEvalCast(b.value, typ, tc.col, tc.values, 0) require.NoError(t, err) - cmp, err := evalCompareNullSafe(v1, v2) + cmp, err := evalCompareNullSafe(v1, v2, collations.MySQL8()) require.NoError(t, err) if cmp > 0 { diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 152533f2c0d..2679cf5a2fd 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -30,18 +30,19 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/cache/theine" - "vitess.io/vitess/go/streamlog" - "vitess.io/vitess/go/vt/vthash" - "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -52,6 +53,7 @@ import ( "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/sysvars" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -61,6 +63,7 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vtgate/vschemaacl" "vitess.io/vitess/go/vt/vtgate/vtgateservice" + "vitess.io/vitess/go/vt/vthash" ) var ( @@ -73,6 +76,10 @@ var ( queriesProcessedByTable = stats.NewCountersWithMultiLabels("QueriesProcessedByTable", "Queries processed at vtgate by plan type, keyspace and table", []string{"Plan", "Keyspace", "Table"}) queriesRoutedByTable = stats.NewCountersWithMultiLabels("QueriesRoutedByTable", "Queries routed from vtgate to vttablet by plan type, keyspace and table", []string{"Plan", "Keyspace", "Table"}) + + exceedMemoryRowsLogger = logutil.NewThrottledLogger("ExceedMemoryRows", 1*time.Minute) + + errorTransform errorTransformer = nullErrorTransformer{} ) const ( @@ -93,6 +100,7 @@ func init() { // Executor is the engine that executes queries by utilizing // the abilities of the underlying vttablets. type Executor struct { + env *vtenv.Environment serv srvtopo.Server cell string resolver *Resolver @@ -142,6 +150,7 @@ func DefaultPlanCache() *PlanCache { // NewExecutor creates a new Executor. func NewExecutor( ctx context.Context, + env *vtenv.Environment, serv srvtopo.Server, cell string, resolver *Resolver, @@ -154,6 +163,7 @@ func NewExecutor( warmingReadsPercent int, ) *Executor { e := &Executor{ + env: env, serv: serv, cell: cell, resolver: resolver, @@ -177,6 +187,7 @@ func NewExecutor( serv: serv, cell: cell, schema: e.schemaTracker, + parser: env.Parser(), } serv.WatchSrvVSchema(ctx, cell, e.vm.VSchemaUpdate) @@ -223,16 +234,24 @@ func (e *Executor) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConn } if result != nil && len(result.Rows) > warnMemoryRows { warnings.Add("ResultsExceeded", 1) - piiSafeSQL, err := sqlparser.RedactSQLQuery(sql) + piiSafeSQL, err := e.env.Parser().RedactSQLQuery(sql) if err != nil { piiSafeSQL = logStats.StmtType } - log.Warningf("%q exceeds warning threshold of max memory rows: %v. Actual memory rows: %v", piiSafeSQL, warnMemoryRows, len(result.Rows)) + warningMsg := fmt.Sprintf("%q exceeds warning threshold of max memory rows: %v. Actual memory rows: %v", piiSafeSQL, warnMemoryRows, len(result.Rows)) + exceedMemoryRowsLogger.Warningf(warningMsg) + safeSession.RecordWarning(&querypb.QueryWarning{ + Code: uint32(sqlerror.EROutOfMemory), + Message: warningMsg, + }) } logStats.SaveEndTime() e.queryLogger.Send(logStats) + + err = errorTransform.TransformError(err) err = vterrors.TruncateError(err, truncateErrorLen) + return result, err } @@ -357,17 +376,25 @@ func (e *Executor) StreamExecute( saveSessionStats(safeSession, srr.stmtType, srr.rowsAffected, srr.insertID, srr.rowsReturned, err) if srr.rowsReturned > warnMemoryRows { warnings.Add("ResultsExceeded", 1) - piiSafeSQL, err := sqlparser.RedactSQLQuery(sql) + piiSafeSQL, err := e.env.Parser().RedactSQLQuery(sql) if err != nil { piiSafeSQL = logStats.StmtType } - log.Warningf("%q exceeds warning threshold of max memory rows: %v. Actual memory rows: %v", piiSafeSQL, warnMemoryRows, srr.rowsReturned) + warningMsg := fmt.Sprintf("%q exceeds warning threshold of max memory rows: %v. Actual memory rows: %v", piiSafeSQL, warnMemoryRows, srr.rowsReturned) + exceedMemoryRowsLogger.Warningf(warningMsg) + safeSession.RecordWarning(&querypb.QueryWarning{ + Code: uint32(sqlerror.EROutOfMemory), + Message: warningMsg, + }) } logStats.SaveEndTime() e.queryLogger.Send(logStats) - return vterrors.TruncateError(err, truncateErrorLen) + err = errorTransform.TransformError(err) + err = vterrors.TruncateError(err, truncateErrorLen) + + return err } func canReturnRows(stmtType sqlparser.StatementType) bool { @@ -456,7 +483,11 @@ func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlpars }) bindVars[key] = sqltypes.Int64BindVariable(v) case sysvars.TransactionMode.Name: - bindVars[key] = sqltypes.StringBindVariable(session.TransactionMode.String()) + txMode := session.TransactionMode + if txMode == vtgatepb.TransactionMode_UNSPECIFIED { + txMode = getTxMode() + } + bindVars[key] = sqltypes.StringBindVariable(txMode.String()) case sysvars.Workload.Name: var v string ifOptionsExist(session, func(options *querypb.ExecuteOptions) { @@ -499,16 +530,20 @@ func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlpars bindVars[key] = sqltypes.StringBindVariable(mysqlSocketPath()) default: if value, hasSysVar := session.SystemVariables[sysVar]; hasSysVar { - expr, err := sqlparser.ParseExpr(value) + expr, err := e.env.Parser().ParseExpr(value) if err != nil { return err } - evalExpr, err := evalengine.Translate(expr, nil) + evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ + Collation: vcursor.collation, + Environment: e.env, + SQLMode: evalengine.ParseSQLMode(vcursor.SQLMode()), + }) if err != nil { return err } - evaluated, err := evalengine.EmptyExpressionEnv().Evaluate(evalExpr) + evaluated, err := evalengine.NewExpressionEnv(context.Background(), nil, vcursor).Evaluate(evalExpr) if err != nil { return err } @@ -917,7 +952,7 @@ func (e *Executor) showVitessReplicationStatus(ctx context.Context, filter *sqlp tabletHostPort := ts.GetTabletHostPort() throttlerStatus, err := getTabletThrottlerStatus(tabletHostPort) if err != nil { - log.Warningf("Could not get throttler status from %s: %v", tabletHostPort, err) + log.Warningf("Could not get throttler status from %s: %v", topoproto.TabletAliasString(ts.Tablet.Alias), err) } replSourceHost := "" @@ -925,19 +960,50 @@ func (e *Executor) showVitessReplicationStatus(ctx context.Context, filter *sqlp replIOThreadHealth := "" replSQLThreadHealth := "" replLastError := "" - replLag := int64(-1) - sql := "show slave status" + replLag := "-1" // A string to support NULL as a value + replicaQueries, _ := capabilities.MySQLVersionHasCapability(e.env.MySQLVersion(), capabilities.ReplicaTerminologyCapability) + sql := "show replica status" + sourceHostField := "Source_Host" + sourcePortField := "Source_Port" + replicaIORunningField := "Replica_IO_Running" + replicaSQLRunningField := "Replica_SQL_Running" + secondsBehindSourceField := "Seconds_Behind_Source" + if !replicaQueries { + sql = "show slave status" + sourceHostField = "Master_Host" + sourcePortField = "Master_Port" + replicaIORunningField = "Slave_IO_Running" + replicaSQLRunningField = "Slave_SQL_Running" + secondsBehindSourceField = "Seconds_Behind_Master" + } results, err := e.txConn.tabletGateway.Execute(ctx, ts.Target, sql, nil, 0, 0, nil) if err != nil || results == nil { log.Warningf("Could not get replication status from %s: %v", tabletHostPort, err) } else if row := results.Named().Row(); row != nil { - replSourceHost = row["Master_Host"].ToString() - replSourcePort, _ = row["Master_Port"].ToInt64() - replIOThreadHealth = row["Slave_IO_Running"].ToString() - replSQLThreadHealth = row["Slave_SQL_Running"].ToString() + replSourceHost = row[sourceHostField].ToString() + replSourcePort, _ = row[sourcePortField].ToInt64() + replIOThreadHealth = row[replicaIORunningField].ToString() + replSQLThreadHealth = row[replicaSQLRunningField].ToString() replLastError = row["Last_Error"].ToString() - if ts.Stats != nil { - replLag = int64(ts.Stats.ReplicationLagSeconds) + // We cannot check the tablet's tabletenv config from here so + // we only use the tablet's stat -- which is managed by the + // ReplicationTracker -- if we can tell that it's enabled, + // meaning that it has a non-zero value. If it's actually + // enabled AND zero (rather than the zeroval), then mysqld + // should also return 0 so in this case the value is correct + // and equivalent either way. The only reason that we would + // want to use the ReplicationTracker based value, when we + // can, is because the polling method allows us to get the + // estimated lag value when replication is not running (based + // on how long we've seen that it's not been running). + if ts.Stats != nil && ts.Stats.ReplicationLagSeconds > 0 { // Use the value we get from the ReplicationTracker + replLag = fmt.Sprintf("%d", ts.Stats.ReplicationLagSeconds) + } else { // Use the value from mysqld + if row[secondsBehindSourceField].IsNull() { + replLag = strings.ToUpper(sqltypes.NullStr) // Uppercase to match mysqld's output in SHOW REPLICA STATUS + } else { + replLag = row[secondsBehindSourceField].ToString() + } } } replicationHealth := fmt.Sprintf("{\"EventStreamRunning\":\"%s\",\"EventApplierRunning\":\"%s\",\"LastError\":\"%s\"}", replIOThreadHealth, replSQLThreadHealth, replLastError) @@ -950,7 +1016,7 @@ func (e *Executor) showVitessReplicationStatus(ctx context.Context, filter *sqlp ts.Tablet.Hostname, fmt.Sprintf("%s:%d", replSourceHost, replSourcePort), replicationHealth, - fmt.Sprintf("%d", replLag), + replLag, throttlerStatus, )) } @@ -1042,6 +1108,7 @@ func (e *Executor) getPlan( vcursor.SetIgnoreMaxMemoryRows(sqlparser.IgnoreMaxMaxMemoryRowsDirective(stmt)) vcursor.SetConsolidator(sqlparser.Consolidator(stmt)) vcursor.SetWorkloadName(sqlparser.GetWorkloadNameFromStatement(stmt)) + vcursor.UpdateForeignKeyChecksState(sqlparser.ForeignKeyChecksState(stmt)) priority, err := sqlparser.GetPriorityFromStatement(stmt) if err != nil { return nil, err @@ -1066,6 +1133,7 @@ func (e *Executor) getPlan( vcursor.safeSession.getSelectLimit(), setVarComment, vcursor.safeSession.SystemVariables, + vcursor.GetForeignKeyChecksState(), vcursor, ) if err != nil { @@ -1292,7 +1360,11 @@ func (e *Executor) Prepare(ctx context.Context, method string, safeSession *Safe logStats.SaveEndTime() e.queryLogger.Send(logStats) } - return fld, vterrors.TruncateError(err, truncateErrorLen) + + err = errorTransform.TransformError(err) + err = vterrors.TruncateError(err, truncateErrorLen) + + return fld, err } func (e *Executor) prepare(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, logStats *logstats.LogStats) ([]*querypb.Field, error) { @@ -1335,7 +1407,7 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, query, comments := sqlparser.SplitMarginComments(sql) vcursor, _ := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) - stmt, reservedVars, err := parseAndValidateQuery(query) + stmt, reservedVars, err := parseAndValidateQuery(query, e.env.Parser()) if err != nil { return nil, err } @@ -1370,8 +1442,8 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, return qr.Fields, err } -func parseAndValidateQuery(query string) (sqlparser.Statement, *sqlparser.ReservedVars, error) { - stmt, reserved, err := sqlparser.Parse2(query) +func parseAndValidateQuery(query string, parser *sqlparser.Parser) (sqlparser.Statement, *sqlparser.ReservedVars, error) { + stmt, reserved, err := parser.Parse2(query) if err != nil { return nil, nil, err } @@ -1463,11 +1535,14 @@ func (e *Executor) checkThatPlanIsValid(stmt sqlparser.Statement, plan *engine.P return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "plan includes scatter, which is disallowed using the `no_scatter` command line argument") } +// getTabletThrottlerStatus uses HTTP to get the throttler status +// on a tablet. It uses HTTP because the CheckThrottler RPC is a +// tmclient RPC and you cannot use tmclient outside of a tablet. func getTabletThrottlerStatus(tabletHostPort string) (string, error) { client := http.Client{ Timeout: 100 * time.Millisecond, } - resp, err := client.Get(fmt.Sprintf("http://%s/throttler/check?app=vtgate", tabletHostPort)) + resp, err := client.Get(fmt.Sprintf("http://%s/throttler/check-self", tabletHostPort)) if err != nil { return "", err } @@ -1506,7 +1581,7 @@ func (e *Executor) ReleaseLock(ctx context.Context, session *SafeSession) error // planPrepareStmt implements the IExecutor interface func (e *Executor) planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) { - stmt, reservedVars, err := parseAndValidateQuery(query) + stmt, reservedVars, err := parseAndValidateQuery(query, e.env.Parser()) if err != nil { return nil, nil, err } @@ -1539,3 +1614,18 @@ func (e *Executor) Close() { topo.Close() e.plans.Close() } + +func (e *Executor) environment() *vtenv.Environment { + return e.env +} + +type ( + errorTransformer interface { + TransformError(err error) error + } + nullErrorTransformer struct{} +) + +func (nullErrorTransformer) TransformError(err error) error { + return err +} diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index 961e6e32eca..3dce4e212ef 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -25,8 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" @@ -532,7 +532,7 @@ func TestUpdateMultiOwned(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) sbc1.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult( @@ -876,8 +876,10 @@ func TestUpdateUseHigherCostVindexIfBackfilling(t *testing.T) { Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col, lu_col = 5 from t2_lookup where wo_lu_col = 2 and lu_col in (1, 2) for update", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in (1, 2)", - BindVariables: map[string]*querypb.BindVariable{}, + Sql: "update t2_lookup set lu_col = 5 where wo_lu_col = 2 and lu_col in ::__vals", + BindVariables: map[string]*querypb.BindVariable{ + "__vals": sqltypes.TestBindVariable([]any{int64(1), int64(2)}), + }, }} vars, _ := sqltypes.BuildBindVariable([]any{ @@ -1109,8 +1111,10 @@ func TestDeleteUseHigherCostVindexIfBackfilling(t *testing.T) { Sql: "select id, wo_lu_col, erl_lu_col, srl_lu_col, nrl_lu_col, nv_lu_col, lu_col from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2) for update", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "delete from t2_lookup where wo_lu_col = 1 and lu_col in (1, 2)", - BindVariables: map[string]*querypb.BindVariable{}, + Sql: "delete from t2_lookup where wo_lu_col = 1 and lu_col in ::__vals", + BindVariables: map[string]*querypb.BindVariable{ + "__vals": sqltypes.TestBindVariable([]any{int64(1), int64(2)}), + }, }} vars, _ := sqltypes.BuildBindVariable([]any{ @@ -1406,7 +1410,7 @@ func TestInsertShardedKeyrange(t *testing.T) { TargetString: "@primary", } _, err := executorExec(ctx, executor, session, "insert into keyrange_table(krcol_unique, krcol) values(1, 1)", nil) - require.EqualError(t, err, "could not map [INT64(1)] to a unique keyspace id: DestinationKeyRange(-10)") + require.EqualError(t, err, "VT09024: could not map [INT64(1)] to a unique keyspace id: DestinationKeyRange(-10)") } func TestInsertShardedAutocommitLookup(t *testing.T) { @@ -1469,7 +1473,7 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) _, err := executorExecSession(ctx, executor, "insert into user(id, v, name, music) values (1, 2, 'myname', 'star')", nil, &vtgatepb.Session{}) require.NoError(t, err) @@ -1503,145 +1507,210 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { func TestInsertShardedIgnore(t *testing.T) { executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - // Build the sequence of responses for sbclookup. This should - // match the sequence of queries we validate below. + int1 := sqltypes.Int64BindVariable(1) + int2 := sqltypes.Int64BindVariable(2) + int3 := sqltypes.Int64BindVariable(3) + int4 := sqltypes.Int64BindVariable(4) + int5 := sqltypes.Int64BindVariable(5) + int6 := sqltypes.Int64BindVariable(6) + uint1 := sqltypes.Uint64BindVariable(1) + uint3 := sqltypes.Uint64BindVariable(3) + + var1 := &querypb.BindVariable{Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{Type: int1.Type, Value: int1.Value}}, + } + var2 := &querypb.BindVariable{Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{Type: int2.Type, Value: int2.Value}}, + } + var3 := &querypb.BindVariable{Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{Type: int3.Type, Value: int3.Value}}, + } + var4 := &querypb.BindVariable{Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{Type: int4.Type, Value: int4.Value}}, + } + var5 := &querypb.BindVariable{Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{Type: int5.Type, Value: int5.Value}}, + } + var6 := &querypb.BindVariable{Type: querypb.Type_TUPLE, + Values: []*querypb.Value{{Type: int6.Type, Value: int6.Value}}, + } fields := sqltypes.MakeTestFields("b|a", "int64|int64") field := sqltypes.MakeTestFields("a", "int64") - sbclookup.SetResults([]*sqltypes.Result{ - // select music_id - sqltypes.MakeTestResult(fields, "1|1", "3|1", "4|1", "5|1", "6|3"), - // insert ins_lookup - {}, - // select ins_lookup 1 - sqltypes.MakeTestResult(field, "1"), - // select ins_lookup 3 - {}, - // select ins_lookup 4 - sqltypes.MakeTestResult(field, "4"), - // select ins_lookup 5 - sqltypes.MakeTestResult(field, "5"), - // select ins_lookup 6 - sqltypes.MakeTestResult(field, "6"), - }) - // First row: first shard. - // Second row: will fail because primary vindex will fail to map. - // Third row: will fail because verification will fail on owned vindex after Create. - // Fourth row: will fail because verification will fail on unowned hash vindex. - // Fifth row: first shard. - // Sixth row: second shard (because 3 hash maps to 40-60). - query := "insert ignore into insert_ignore_test(pv, owned, verify) values (1, 1, 1), (2, 2, 2), (3, 3, 1), (4, 4, 4), (5, 5, 1), (6, 6, 3)" - session := &vtgatepb.Session{ - TargetString: "@primary", - } - _, err := executorExec(ctx, executor, session, query, nil) - require.NoError(t, err) - wantQueries := []*querypb.BoundQuery{{ - Sql: "insert ignore into insert_ignore_test(pv, owned, verify) values (:_pv_0, :_owned_0, :_verify_0),(:_pv_4, :_owned_4, :_verify_4)", - BindVariables: map[string]*querypb.BindVariable{ - "_pv_0": sqltypes.Int64BindVariable(1), - "_pv_4": sqltypes.Int64BindVariable(5), - "_owned_0": sqltypes.Int64BindVariable(1), - "_owned_4": sqltypes.Int64BindVariable(5), - "_verify_0": sqltypes.Int64BindVariable(1), - "_verify_4": sqltypes.Int64BindVariable(1), - }, - }} - assertQueries(t, sbc1, wantQueries) - wantQueries = []*querypb.BoundQuery{{ - Sql: "insert ignore into insert_ignore_test(pv, owned, verify) values (:_pv_5, :_owned_5, :_verify_5)", - BindVariables: map[string]*querypb.BindVariable{ - "_pv_5": sqltypes.Int64BindVariable(6), - "_owned_5": sqltypes.Int64BindVariable(6), - "_verify_5": sqltypes.Int64BindVariable(3), - }, - }} - assertQueries(t, sbc2, wantQueries) + tcases := []struct { + query string + input []*sqltypes.Result - vars, err := sqltypes.BuildBindVariable([]any{ - sqltypes.NewInt64(1), - sqltypes.NewInt64(2), - sqltypes.NewInt64(3), - sqltypes.NewInt64(4), - sqltypes.NewInt64(5), - sqltypes.NewInt64(6), - }) - require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{ - Sql: "select music_id, user_id from music_user_map where music_id in ::music_id for update", - BindVariables: map[string]*querypb.BindVariable{ - "music_id": vars, - }, - }, { - Sql: "insert ignore into ins_lookup(fromcol, tocol) values (:fromcol_0, :tocol_0), (:fromcol_1, :tocol_1), (:fromcol_2, :tocol_2), (:fromcol_3, :tocol_3), (:fromcol_4, :tocol_4)", - BindVariables: map[string]*querypb.BindVariable{ - "fromcol_0": sqltypes.Int64BindVariable(1), - "tocol_0": sqltypes.Uint64BindVariable(1), - "fromcol_1": sqltypes.Int64BindVariable(3), - "tocol_1": sqltypes.Uint64BindVariable(1), - "fromcol_2": sqltypes.Int64BindVariable(4), - "tocol_2": sqltypes.Uint64BindVariable(1), - "fromcol_3": sqltypes.Int64BindVariable(5), - "tocol_3": sqltypes.Uint64BindVariable(1), - "fromcol_4": sqltypes.Int64BindVariable(6), - "tocol_4": sqltypes.Uint64BindVariable(3), + expectedQueries [3][]*querypb.BoundQuery + errString string + }{{ + // First row: first shard. + query: "insert ignore into insert_ignore_test(pv, owned, verify) values (1, 1, 1)", + input: []*sqltypes.Result{ + // select music_id + sqltypes.MakeTestResult(fields, "1|1"), + // insert ins_lookup 1 + sqltypes.MakeTestResult(nil), + // select ins_lookup 1 + sqltypes.MakeTestResult(field, "1"), + }, + expectedQueries: [3][]*querypb.BoundQuery{ + {{ + Sql: "insert ignore into insert_ignore_test(pv, owned, verify) values (:_pv_0, :_owned_0, :_verify_0)", + BindVariables: map[string]*querypb.BindVariable{"_pv_0": int1, "_owned_0": int1, "_verify_0": int1}, + }}, + nil, + {{ + Sql: "select music_id, user_id from music_user_map where music_id in ::music_id for update", + BindVariables: map[string]*querypb.BindVariable{"music_id": var1}, + }, { + Sql: "insert ignore into ins_lookup(fromcol, tocol) values (:fromcol_0, :tocol_0)", + BindVariables: map[string]*querypb.BindVariable{"fromcol_0": int1, "tocol_0": uint1}, + }, { + Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", + BindVariables: map[string]*querypb.BindVariable{"fromcol": int1, "tocol": uint1}, + }}, }, }, { - Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", - BindVariables: map[string]*querypb.BindVariable{ - "fromcol": sqltypes.Int64BindVariable(1), - "tocol": sqltypes.Uint64BindVariable(1), - }, + // Second row: will fail because primary vindex will fail to map. + query: "insert ignore into insert_ignore_test(pv, owned, verify) values (2, 2, 2)", + input: []*sqltypes.Result{ + // select music_id + sqltypes.MakeTestResult(fields), + }, + expectedQueries: [3][]*querypb.BoundQuery{ + nil, + nil, + {{ + Sql: "select music_id, user_id from music_user_map where music_id in ::music_id for update", + BindVariables: map[string]*querypb.BindVariable{"music_id": var2}, + }}, + }, + errString: "could not map [INT64(2)] to a keyspace id", }, { - Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", - BindVariables: map[string]*querypb.BindVariable{ - "fromcol": sqltypes.Int64BindVariable(3), - "tocol": sqltypes.Uint64BindVariable(1), + // Third row: will fail because verification will fail on owned vindex after Create. + query: "insert ignore into insert_ignore_test(pv, owned, verify) values (3, 3, 1)", + input: []*sqltypes.Result{ + // select music_id + sqltypes.MakeTestResult(fields, "3|1"), + // insert ins_lookup 3 + sqltypes.MakeTestResult(nil), + // select ins_lookup 3 + sqltypes.MakeTestResult(field), + }, + expectedQueries: [3][]*querypb.BoundQuery{ + nil, + nil, + {{ + Sql: "select music_id, user_id from music_user_map where music_id in ::music_id for update", + BindVariables: map[string]*querypb.BindVariable{"music_id": var3}, + }, { + Sql: "insert ignore into ins_lookup(fromcol, tocol) values (:fromcol_0, :tocol_0)", + BindVariables: map[string]*querypb.BindVariable{"fromcol_0": int3, "tocol_0": uint1}, + }, { + Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", + BindVariables: map[string]*querypb.BindVariable{"fromcol": int3, "tocol": uint1}, + }}, }, }, { - Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", - BindVariables: map[string]*querypb.BindVariable{ - "fromcol": sqltypes.Int64BindVariable(4), - "tocol": sqltypes.Uint64BindVariable(1), + // Fourth row: will fail because verification will fail on unowned hash vindex. + query: "insert ignore into insert_ignore_test(pv, owned, verify) values (4, 4, 4)", + input: []*sqltypes.Result{ + // select music_id + sqltypes.MakeTestResult(fields, "4|1"), + // insert ins_lookup 4 + sqltypes.MakeTestResult(nil), + // select ins_lookup 4 + sqltypes.MakeTestResult(field, "4"), + sqltypes.MakeTestResult(nil), + }, + expectedQueries: [3][]*querypb.BoundQuery{ + nil, + nil, + {{ + Sql: "select music_id, user_id from music_user_map where music_id in ::music_id for update", + BindVariables: map[string]*querypb.BindVariable{"music_id": var4}, + }, { + Sql: "insert ignore into ins_lookup(fromcol, tocol) values (:fromcol_0, :tocol_0)", + BindVariables: map[string]*querypb.BindVariable{"fromcol_0": int4, "tocol_0": uint1}, + }, { + Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", + BindVariables: map[string]*querypb.BindVariable{"fromcol": int4, "tocol": uint1}, + }}, }, }, { - Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", - BindVariables: map[string]*querypb.BindVariable{ - "fromcol": sqltypes.Int64BindVariable(5), - "tocol": sqltypes.Uint64BindVariable(1), + // Fifth row: first shard. + query: "insert ignore into insert_ignore_test(pv, owned, verify) values (5, 5, 1)", + input: []*sqltypes.Result{ + // select music_id + sqltypes.MakeTestResult(fields, "5|1"), + // select ins_lookup 5 + sqltypes.MakeTestResult(field, "5"), + }, + expectedQueries: [3][]*querypb.BoundQuery{ + {{ + Sql: "insert ignore into insert_ignore_test(pv, owned, verify) values (:_pv_0, :_owned_0, :_verify_0)", + BindVariables: map[string]*querypb.BindVariable{"_pv_0": int5, "_owned_0": int5, "_verify_0": int1}, + }}, + nil, + {{ + Sql: "select music_id, user_id from music_user_map where music_id in ::music_id for update", + BindVariables: map[string]*querypb.BindVariable{"music_id": var5}, + }, { + Sql: "insert ignore into ins_lookup(fromcol, tocol) values (:fromcol_0, :tocol_0)", + BindVariables: map[string]*querypb.BindVariable{"fromcol_0": int5, "tocol_0": uint1}, + }, { + Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", + BindVariables: map[string]*querypb.BindVariable{"fromcol": int5, "tocol": uint1}, + }}, }, }, { - Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", - BindVariables: map[string]*querypb.BindVariable{ - "fromcol": sqltypes.Int64BindVariable(6), - "tocol": sqltypes.Uint64BindVariable(3), - }, - }} - assertQueries(t, sbclookup, wantQueries) + // Sixth row: second shard (because 3 hash maps to 40-60). + query: "insert ignore into insert_ignore_test(pv, owned, verify) values (6, 6, 3)", + input: []*sqltypes.Result{ + // select music_id + sqltypes.MakeTestResult(fields, "6|3"), + // select ins_lookup 6 + sqltypes.MakeTestResult(field, "6"), + }, + expectedQueries: [3][]*querypb.BoundQuery{ + nil, + {{ + Sql: "insert ignore into insert_ignore_test(pv, owned, verify) values (:_pv_0, :_owned_0, :_verify_0)", + BindVariables: map[string]*querypb.BindVariable{"_pv_0": int6, "_owned_0": int6, "_verify_0": int3}, + }}, + {{ + Sql: "select music_id, user_id from music_user_map where music_id in ::music_id for update", + BindVariables: map[string]*querypb.BindVariable{"music_id": var6}, + }, { + Sql: "insert ignore into ins_lookup(fromcol, tocol) values (:fromcol_0, :tocol_0)", + BindVariables: map[string]*querypb.BindVariable{"fromcol_0": int6, "tocol_0": uint3}, + }, { + Sql: "select fromcol from ins_lookup where fromcol = :fromcol and tocol = :tocol", + BindVariables: map[string]*querypb.BindVariable{"fromcol": int6, "tocol": uint3}, + }}, + }, + }} + + session := &vtgatepb.Session{Autocommit: true} + for _, tcase := range tcases { + t.Run(tcase.query, func(t *testing.T) { + // reset + sbc1.Queries = nil + sbc2.Queries = nil + sbclookup.Queries = nil - // Test the 0 rows case, - sbc1.Queries = nil - sbc2.Queries = nil - sbclookup.Queries = nil - sbclookup.SetResults([]*sqltypes.Result{ - {}, - }) - query = "insert ignore into insert_ignore_test(pv, owned, verify) values (1, 1, 1)" - qr, err := executorExec(ctx, executor, session, query, nil) - require.NoError(t, err) - if !qr.Equal(&sqltypes.Result{}) { - t.Errorf("qr: %v, want empty result", qr) + // Build the sequence of responses for sbclookup. This should + // match the sequence of queries we validate below. + sbclookup.SetResults(tcase.input) + _, err := executorExec(ctx, executor, session, tcase.query, nil) + if tcase.errString != "" { + require.ErrorContains(t, err, tcase.errString) + } + utils.MustMatch(t, tcase.expectedQueries[0], sbc1.Queries, "sbc1 queries do not match") + utils.MustMatch(t, tcase.expectedQueries[1], sbc2.Queries, "sbc2 queries do not match") + utils.MustMatch(t, tcase.expectedQueries[2], sbclookup.Queries, "sbclookup queries do not match") + }) } - assertQueries(t, sbc1, nil) - assertQueries(t, sbc2, nil) - vars, err = sqltypes.BuildBindVariable([]any{sqltypes.NewInt64(1)}) - require.NoError(t, err) - wantQueries = []*querypb.BoundQuery{{ - Sql: "select music_id, user_id from music_user_map where music_id in ::music_id for update", - BindVariables: map[string]*querypb.BindVariable{ - "music_id": vars, - }, - }} - assertQueries(t, sbclookup, wantQueries) } func TestInsertOnDupKey(t *testing.T) { @@ -2268,7 +2337,7 @@ func TestInsertBadAutoInc(t *testing.T) { } } ` - executor, _, _, _, ctx := createCustomExecutor(t, vschema) + executor, _, _, _, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) // If auto inc table cannot be found, the table should not be added to vschema. session := &vtgatepb.Session{ @@ -3017,3 +3086,57 @@ func TestInsertReference(t *testing.T) { _, err = executorExec(ctx, executor, session, "insert into TestExecutor.zip_detail(id, status) values (1, 'CLOSED')", nil) require.NoError(t, err) // Gen4 planner can redirect the query to correct source for update when reference table is involved. } + +func TestDeleteMultiTable(t *testing.T) { + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) + executor.vschema.Keyspaces["TestExecutor"].Tables["user"].PrimaryKey = sqlparser.Columns{sqlparser.NewIdentifierCI("id")} + + logChan := executor.queryLogger.Subscribe("TestDeleteMultiTable") + defer executor.queryLogger.Unsubscribe(logChan) + + session := &vtgatepb.Session{TargetString: "@primary"} + _, err := executorExec(ctx, executor, session, "delete user from user join music on user.col = music.col where music.user_id = 1", nil) + require.NoError(t, err) + + var dmlVals []*querypb.Value + for i := 0; i < 8; i++ { + dmlVals = append(dmlVals, sqltypes.ValueToProto(sqltypes.NewInt32(1))) + } + + bq := &querypb.BoundQuery{ + Sql: "select 1 from music where music.user_id = 1 and music.col = :user_col", + BindVariables: map[string]*querypb.BindVariable{"user_col": sqltypes.StringBindVariable("foo")}, + } + wantQueries := []*querypb.BoundQuery{ + {Sql: "select `user`.id, `user`.col from `user`", BindVariables: map[string]*querypb.BindVariable{}}, + bq, bq, bq, bq, bq, bq, bq, bq, + {Sql: "select `user`.Id, `user`.`name` from `user` where `user`.id in ::dml_vals for update", BindVariables: map[string]*querypb.BindVariable{"dml_vals": {Type: querypb.Type_TUPLE, Values: dmlVals}}}, + {Sql: "delete from `user` where `user`.id in ::dml_vals", BindVariables: map[string]*querypb.BindVariable{"__vals": sqltypes.TestBindVariable([]any{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}), "dml_vals": {Type: querypb.Type_TUPLE, Values: dmlVals}}}} + assertQueries(t, sbc1, wantQueries) + + wantQueries = []*querypb.BoundQuery{ + {Sql: "select `user`.id, `user`.col from `user`", BindVariables: map[string]*querypb.BindVariable{}}, + {Sql: "select `user`.Id, `user`.`name` from `user` where `user`.id in ::dml_vals for update", BindVariables: map[string]*querypb.BindVariable{"dml_vals": {Type: querypb.Type_TUPLE, Values: dmlVals}}}, + {Sql: "delete from `user` where `user`.id in ::dml_vals", BindVariables: map[string]*querypb.BindVariable{"dml_vals": {Type: querypb.Type_TUPLE, Values: dmlVals}}}, + } + assertQueries(t, sbc2, wantQueries) + + bq = &querypb.BoundQuery{ + Sql: "delete from name_user_map where `name` = :name and user_id = :user_id", + BindVariables: map[string]*querypb.BindVariable{ + "name": sqltypes.StringBindVariable("foo"), + "user_id": sqltypes.Uint64BindVariable(1), + }} + wantQueries = []*querypb.BoundQuery{ + bq, bq, bq, bq, bq, bq, bq, bq, + } + assertQueries(t, sbclookup, wantQueries) + + testQueryLog(t, executor, logChan, "MarkSavepoint", "SAVEPOINT", "savepoint s1", 8) + testQueryLog(t, executor, logChan, "VindexDelete", "DELETE", "delete from name_user_map where `name` = :name and user_id = :user_id", 1) + // select `user`.id, `user`.col from `user` - 8 shard + // select 1 from music where music.user_id = 1 and music.col = :user_col - 8 shards + // select Id, `name` from `user` where (`user`.id) in ::dml_vals for update - 1 shard + // delete from `user` where (`user`.id) in ::dml_vals - 1 shard + testQueryLog(t, executor, logChan, "TestExecute", "DELETE", "delete `user` from `user` join music on `user`.col = music.col where music.user_id = 1", 18) +} diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index 8baffdfde09..332139c4a78 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -26,27 +26,24 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/vtgate/logstats" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/cache/theine" - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) @@ -184,7 +181,7 @@ func createExecutorEnvCallback(t testing.TB, eachShard func(shard, ks string, ta // one-off queries from thrashing the cache. Disable the doorkeeper in the tests to prevent flakiness. plans := theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, false) - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) executor.SetQueryLogger(queryLogger) key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} @@ -212,7 +209,7 @@ func createExecutorEnv(t testing.TB) (executor *Executor, sbc1, sbc2, sbclookup return } -func createCustomExecutor(t testing.TB, vschema string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { +func createCustomExecutor(t testing.TB, vschema string, mysqlVersion string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { var cancel context.CancelFunc ctx, cancel = context.WithCancel(context.Background()) cell := "aa" @@ -231,7 +228,9 @@ func createCustomExecutor(t testing.TB, vschema string) (executor *Executor, sbc queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + env, err := vtenv.New(vtenv.Options{MySQLServerVersion: mysqlVersion}) + require.NoError(t, err) + executor = NewExecutor(ctx, env, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -268,7 +267,7 @@ func createCustomExecutorSetValues(t testing.TB, vschema string, values []*sqlty sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -293,7 +292,7 @@ func createExecutorEnvWithPrimaryReplicaConn(t testing.TB, ctx context.Context, replica = hc.AddTestTablet(cell, "0-replica", 1, KsTestUnsharded, "0", topodatapb.TabletType_REPLICA, true, 1, nil) queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent) + executor = NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -367,8 +366,8 @@ func assertQueries(t *testing.T, sbc *sandboxconn.SandboxConn, wantQueries []*qu } got := query.Sql expected := wantQueries[idx].Sql - utils.MustMatch(t, expected, got) - utils.MustMatch(t, wantQueries[idx].BindVariables, query.BindVariables) + utils.MustMatch(t, expected, got, fmt.Sprintf("query did not match on index: %d", idx)) + utils.MustMatch(t, wantQueries[idx].BindVariables, query.BindVariables, fmt.Sprintf("bind variables did not match on index: %d", idx)) idx++ } } diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index f3544c1362e..bd24907af9b 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -26,29 +26,26 @@ import ( "testing" "time" - _flag "vitess.io/vitess/go/internal/flag" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/streamlog" - "vitess.io/vitess/go/vt/topo/topoproto" - "vitess.io/vitess/go/vt/vtgate/logstats" - - "vitess.io/vitess/go/vt/sqlparser" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + _flag "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/vterrors" - _ "vitess.io/vitess/go/vt/vtgate/vindexes" - "vitess.io/vitess/go/vt/vttablet/sandboxconn" - querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/logstats" + _ "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) func TestSelectNext(t *testing.T) { @@ -160,18 +157,16 @@ func TestSelectDBA(t *testing.T) { } func TestSystemVariablesMySQLBelow80(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "5.7.0") executor.normalize = true - - sqlparser.SetParserVersion("57000") setVarEnabled = true session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"}) sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -196,10 +191,9 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) { } func TestSystemVariablesWithSetVarDisabled(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - sqlparser.SetParserVersion("80000") setVarEnabled = false defer func() { setVarEnabled = true @@ -208,8 +202,8 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -234,11 +228,9 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { } func TestSetSystemVariablesTx(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "8.0.1") executor.normalize = true - sqlparser.SetParserVersion("80001") - session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"}) _, err := executor.Execute(context.Background(), nil, "TestBegin", session, "begin", map[string]*querypb.BindVariable{}) @@ -250,8 +242,8 @@ func TestSetSystemVariablesTx(t *testing.T) { sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -285,16 +277,14 @@ func TestSetSystemVariables(t *testing.T) { executor, _, _, lookup, _ := createExecutorEnv(t) executor.normalize = true - sqlparser.SetParserVersion("80001") - session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded, SystemVariables: map[string]string{}}) // Set @@sql_mode and execute a select statement. We should have SET_VAR in the select statement lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -327,7 +317,7 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "sql_safe_updates", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "sql_safe_updates", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("0"), @@ -350,7 +340,7 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("4"), @@ -373,7 +363,7 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("1"), @@ -402,8 +392,8 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) { sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("only_full_group_by"), @@ -614,7 +604,7 @@ func TestStreamBuffering(t *testing.T) { sbclookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), @@ -646,7 +636,7 @@ func TestStreamBuffering(t *testing.T) { wantResults := []*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, }, { Rows: [][]sqltypes.Value{{ @@ -690,7 +680,7 @@ func TestStreamLimitOffset(t *testing.T) { conn.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, {Name: "weight_string(id)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, Rows: returnRows[shard], @@ -719,7 +709,7 @@ func TestStreamLimitOffset(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ @@ -755,7 +745,7 @@ func TestSelectLastInsertId(t *testing.T) { result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "last_insert_id()", Type: sqltypes.Uint64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "last_insert_id()", Type: sqltypes.Uint64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewUint64(52), @@ -787,20 +777,20 @@ func TestSelectSystemVariables(t *testing.T) { result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@@autocommit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@client_found_rows", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@skip_query_plan_cache", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@enable_system_settings", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@sql_select_limit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@transaction_mode", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, - {Name: "@@workload", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, - {Name: "@@read_after_write_gtid", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, - {Name: "@@read_after_write_timeout", Type: sqltypes.Float64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@session_track_gtids", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, - {Name: "@@ddl_strategy", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, - {Name: "@@migration_context", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, - {Name: "@@socket", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, - {Name: "@@query_timeout", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@autocommit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@client_found_rows", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@skip_query_plan_cache", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@enable_system_settings", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@sql_select_limit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@transaction_mode", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@workload", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@read_after_write_gtid", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@read_after_write_timeout", Type: sqltypes.Float64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@session_track_gtids", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@ddl_strategy", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@migration_context", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@socket", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@query_timeout", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ // the following are the uninitialised session values @@ -809,7 +799,7 @@ func TestSelectSystemVariables(t *testing.T) { sqltypes.NewInt64(0), sqltypes.NewInt64(0), sqltypes.NewInt64(0), - sqltypes.NewVarChar("UNSPECIFIED"), + sqltypes.NewVarChar("MULTI"), sqltypes.NewVarChar(""), // these have been set at the beginning of the test sqltypes.NewVarChar("a fine gtid"), @@ -843,9 +833,9 @@ func TestSelectInitializedVitessAwareVariable(t *testing.T) { result, err := executorExec(ctx, executor, session, sql, nil) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@@autocommit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@enable_system_settings", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@query_timeout", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@autocommit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@enable_system_settings", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "@@query_timeout", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(1), @@ -884,7 +874,7 @@ func TestSelectUserDefinedVariable(t *testing.T) { require.NoError(t, err) wantResult = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@foo", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "@foo", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("bar"), @@ -910,7 +900,7 @@ func TestFoundRows(t *testing.T) { result, err := executorExec(ctx, executor, session, sql, map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "found_rows()", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "found_rows()", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(1), @@ -943,7 +933,7 @@ func testRowCount(t *testing.T, ctx context.Context, executor *Executor, session result, err := executorExec(ctx, executor, session, "select row_count()", map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "row_count()", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "row_count()", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt64(wantRowCount), @@ -964,7 +954,7 @@ func TestSelectLastInsertIdInUnion(t *testing.T) { result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -978,7 +968,7 @@ func TestSelectLastInsertIdInUnion(t *testing.T) { require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(52), @@ -1046,7 +1036,7 @@ func TestLastInsertIDInSubQueryExpression(t *testing.T) { require.NoError(t, err) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "x", Type: sqltypes.Uint64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG | querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, + {Name: "x", Type: sqltypes.Uint64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG | querypb.MySqlFlag_UNSIGNED_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewUint64(12345), @@ -1077,7 +1067,7 @@ func TestSelectDatabase(t *testing.T) { map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "database()", Type: sqltypes.VarChar, Charset: uint32(collations.Default()), Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG)}, + {Name: "database()", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("TestExecutor@primary"), @@ -1275,7 +1265,7 @@ func TestSelectINFromOR(t *testing.T) { _, err := executorExec(ctx, executor, session, "select 1 from user where id = 1 and name = 'apa' or id = 2 and name = 'toto'", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select 1 from `user` where id = 1 and `name` = 'apa' or id = 2 and `name` = 'toto'", + Sql: "select 1 from `user` where id in ::__vals and (id = 1 or `name` = 'toto') and (`name` = 'apa' or id = 2) and `name` in ('apa', 'toto')", BindVariables: map[string]*querypb.BindVariable{ "__vals": sqltypes.TestBindVariable([]any{int64(1), int64(2)}), }, @@ -1560,10 +1550,79 @@ func TestStreamSelectIN(t *testing.T) { utils.MustMatch(t, wantQueries, sbclookup.Queries) } +// TestSelectListArg tests list arg filter with select query +func TestSelectListArg(t *testing.T) { + executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) + session := &vtgatepb.Session{ + TargetString: "@primary", + } + + tupleBV := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: []*querypb.Value{sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(1), sqltypes.NewVarChar("a")))}, + } + bvMap := map[string]*querypb.BindVariable{"vals": tupleBV} + _, err := executorExec(ctx, executor, session, "select id from user where (id, col) in ::vals", bvMap) + require.NoError(t, err) + wantQueries := []*querypb.BoundQuery{{ + Sql: "select id from `user` where (id, col) in ::vals", + BindVariables: bvMap, + }} + utils.MustMatch(t, wantQueries, sbc1.Queries) + assert.Nil(t, sbc2.Queries, "sbc2.Queries: %+v, want nil", sbc2.Queries) + + sbc1.Queries = nil + // get c0-e0 sandbox connection. + tbh, err := executor.scatterConn.gateway.hc.GetTabletHealthByAlias(&topodatapb.TabletAlias{ + Cell: "aa", + Uid: 7, + }) + require.NoError(t, err) + sbc := tbh.Conn.(*sandboxconn.SandboxConn) + sbc.Queries = nil + + _, err = executorExec(ctx, executor, session, "select id from multicol_tbl where (cola, colb) in ::vals", bvMap) + require.NoError(t, err) + + wantQueries = []*querypb.BoundQuery{{ + Sql: "select id from multicol_tbl where (cola, colb) in ::vals", + BindVariables: bvMap, + }} + utils.MustMatch(t, wantQueries, sbc.Queries) + assert.Nil(t, sbc1.Queries, "sbc1.Queries: %+v, want nil", sbc2.Queries) + assert.Nil(t, sbc2.Queries, "sbc2.Queries: %+v, want nil", sbc2.Queries) + + tupleBV.Values[0] = sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewInt64(1), sqltypes.NewInt64(42), sqltypes.NewVarChar("a"))) + sbc.Queries = nil + _, err = executorExec(ctx, executor, session, "select id from multicol_tbl where (cola, colx, colb) in ::vals", bvMap) + require.NoError(t, err) + + wantQueries = []*querypb.BoundQuery{{ + Sql: "select id from multicol_tbl where (cola, colx, colb) in ::vals", + BindVariables: bvMap, + }} + utils.MustMatch(t, wantQueries, sbc.Queries) + assert.Nil(t, sbc1.Queries, "sbc1.Queries: %+v, want nil", sbc2.Queries) + assert.Nil(t, sbc2.Queries, "sbc2.Queries: %+v, want nil", sbc2.Queries) + + tupleBV.Values[0] = sqltypes.ValueToProto(sqltypes.TestTuple(sqltypes.NewVarChar("a"), sqltypes.NewInt64(42), sqltypes.NewInt64(1))) + sbc.Queries = nil + _, err = executorExec(ctx, executor, session, "select id from multicol_tbl where (colb, colx, cola) in ::vals", bvMap) + require.NoError(t, err) + + wantQueries = []*querypb.BoundQuery{{ + Sql: "select id from multicol_tbl where (colb, colx, cola) in ::vals", + BindVariables: bvMap, + }} + utils.MustMatch(t, wantQueries, sbc.Queries) + assert.Nil(t, sbc1.Queries, "sbc1.Queries: %+v, want nil", sbc2.Queries) + assert.Nil(t, sbc2.Queries, "sbc2.Queries: %+v, want nil", sbc2.Queries) +} + func createExecutor(ctx context.Context, serv *sandboxTopo, cell string, resolver *Resolver) *Executor { queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - ex := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + ex := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) ex.SetQueryLogger(queryLogger) return ex } @@ -1873,7 +1932,7 @@ func TestSelectScatterOrderBy(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, col2, weight_string(col2) from `user` order by col2 desc", + Sql: "select col1, col2, weight_string(col2) from `user` order by `user`.col2 desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1920,7 +1979,8 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "weight_string(textcol)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -1929,6 +1989,7 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { // This will allow us to test that cross-shard ordering // still works correctly. sqltypes.NewVarChar(fmt.Sprintf("%d", i%4)), + sqltypes.NewVarBinary(fmt.Sprintf("%d", i%4)), }}, }}) conns = append(conns, sbc) @@ -1944,7 +2005,7 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, textcol from `user` order by textcol desc", + Sql: "select col1, textcol, weight_string(textcol) from `user` order by `user`.textcol desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -1954,7 +2015,7 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, InsertID: 0, } @@ -2010,7 +2071,7 @@ func TestStreamSelectScatterOrderBy(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id, col, weight_string(col) from `user` order by col desc", + Sql: "select id, col, weight_string(col) from `user` order by `user`.col desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -2052,12 +2113,14 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "weight_string(textcol)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), sqltypes.NewVarChar(fmt.Sprintf("%d", i%4)), + sqltypes.NewVarBinary(fmt.Sprintf("%d", i%4)), }}, }}) conns = append(conns, sbc) @@ -2070,7 +2133,7 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id, textcol from `user` order by textcol desc", + Sql: "select id, textcol, weight_string(textcol) from `user` order by `user`.textcol desc", BindVariables: map[string]*querypb.BindVariable{}, }} for _, conn := range conns { @@ -2080,7 +2143,7 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, } for i := 0; i < 4; i++ { @@ -2266,7 +2329,7 @@ func TestSelectScatterLimit(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, col2, weight_string(col2) from `user` order by col2 desc limit :__upper_limit", + Sql: "select col1, col2, weight_string(col2) from `user` order by `user`.col2 desc limit 3", BindVariables: map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)}, }} for _, conn := range conns { @@ -2338,7 +2401,7 @@ func TestStreamSelectScatterLimit(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select col1, col2, weight_string(col2) from `user` order by col2 desc limit :__upper_limit", + Sql: "select col1, col2, weight_string(col2) from `user` order by `user`.col2 desc limit 3", BindVariables: map[string]*querypb.BindVariable{"__upper_limit": sqltypes.Int64BindVariable(3)}, }} for _, conn := range conns { @@ -2800,11 +2863,11 @@ func TestEmptyJoinRecursiveStream(t *testing.T) { } } -func TestCrossShardSubquery(t *testing.T) { +func TestCrossShardDerivedTable(t *testing.T) { executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32}, + {Name: "id1", Type: sqltypes.Int32}, {Name: "col", Type: sqltypes.Int32}, }, InsertID: 0, @@ -2820,7 +2883,7 @@ func TestCrossShardSubquery(t *testing.T) { result, err := executorExec(ctx, executor, session, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t", nil) require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where u1.id = 1) as t", + Sql: "select t.id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where u1.id = 1) as t", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) @@ -2831,10 +2894,8 @@ func TestCrossShardSubquery(t *testing.T) { }} utils.MustMatch(t, wantQueries, sbc2.Queries) - wantResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("id", "int32"), "1") - if !result.Equal(wantResult) { - t.Errorf("result: %+v, want %+v", result, wantResult) - } + wantResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields("id1", "int32"), "1") + assert.Equal(t, wantResult, result) } func TestSubQueryAndQueryWithLimit(t *testing.T) { @@ -2883,7 +2944,7 @@ func TestCrossShardSubqueryStream(t *testing.T) { executor, sbc1, sbc2, _, ctx := createExecutorEnv(t) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, InsertID: 0, @@ -2896,7 +2957,7 @@ func TestCrossShardSubqueryStream(t *testing.T) { result, err := executorStream(ctx, executor, "select id1 from (select u1.id id1, u2.id from user u1 join user u2 on u2.id = u1.col where u1.id = 1) as t") require.NoError(t, err) wantQueries := []*querypb.BoundQuery{{ - Sql: "select id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where u1.id = 1) as t", + Sql: "select t.id1, t.`u1.col` from (select u1.id as id1, u1.col as `u1.col` from `user` as u1 where u1.id = 1) as t", BindVariables: map[string]*querypb.BindVariable{}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) @@ -2908,18 +2969,16 @@ func TestCrossShardSubqueryStream(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), }}, } - if !result.Equal(wantResult) { - t.Errorf("result: %+v, want %+v", result, wantResult) - } + assert.Equal(t, wantResult, result) } -func TestCrossShardSubqueryGetFields(t *testing.T) { +func TestCrossShardDerivedTableGetFields(t *testing.T) { executor, sbc1, _, sbclookup, ctx := createExecutorEnv(t) sbclookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ @@ -2928,7 +2987,7 @@ func TestCrossShardSubqueryGetFields(t *testing.T) { }}) result1 := []*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, }} @@ -2952,12 +3011,10 @@ func TestCrossShardSubqueryGetFields(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "col", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, + {Name: "id1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, } - if !result.Equal(wantResult) { - t.Errorf("result: %+v, want %+v", result, wantResult) - } + assert.Equal(t, wantResult, result) } func TestSelectBindvarswithPrepare(t *testing.T) { @@ -2979,9 +3036,7 @@ func TestSelectBindvarswithPrepare(t *testing.T) { BindVariables: map[string]*querypb.BindVariable{"id": sqltypes.Int64BindVariable(1)}, }} utils.MustMatch(t, wantQueries, sbc1.Queries) - if sbc2.Queries != nil { - t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries) - } + assert.Empty(t, sbc2.Queries) } func TestSelectDatabasePrepare(t *testing.T) { @@ -3189,7 +3244,7 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { } queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) executor.SetQueryLogger(queryLogger) defer executor.Close() // some sleep for all goroutines to start @@ -3280,18 +3335,12 @@ func TestGen4SelectStraightJoin(t *testing.T) { require.NoError(t, err) wantQueries := []*querypb.BoundQuery{ { - Sql: "select u.id from `user` as u, user2 as u2 where u.id = u2.id", + Sql: "select u.id from `user` as u straight_join user2 as u2 on u.id = u2.id", BindVariables: map[string]*querypb.BindVariable{}, }, } - wantWarnings := []*querypb.QueryWarning{ - { - Code: 1235, - Message: "straight join is converted to normal join", - }, - } utils.MustMatch(t, wantQueries, sbc1.Queries) - utils.MustMatch(t, wantWarnings, session.Warnings) + require.Empty(t, session.Warnings) } func TestGen4MultiColumnVindexEqual(t *testing.T) { @@ -3851,14 +3900,14 @@ func TestSelectAggregationNoData(t *testing.T) { { sql: `select count(*) from (select col1, col2 from user limit 2) x`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1", "int64|int64|int64")), - expSandboxQ: "select col1, col2, 1 from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, 1 from (select col1, col2 from `user`) as x limit 2", expField: `[name:"count(*)" type:INT64]`, expRow: `[[INT64(0)]]`, }, { sql: `select col2, count(*) from (select col1, col2 from user limit 2) x group by col2`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1|weight_string(col2)", "int64|int64|int64|varbinary")), - expSandboxQ: "select col1, col2, 1, weight_string(col2) from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, 1, weight_string(x.col2) from (select col1, col2 from `user`) as x limit 2", expField: `[name:"col2" type:INT64 name:"count(*)" type:INT64]`, expRow: `[]`, }, @@ -3943,70 +3992,70 @@ func TestSelectAggregationData(t *testing.T) { { sql: `select count(*) from (select col1, col2 from user limit 2) x`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1", "int64|int64|int64"), "100|200|1", "200|300|1"), - expSandboxQ: "select col1, col2, 1 from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, 1 from (select col1, col2 from `user`) as x limit 2", expField: `[name:"count(*)" type:INT64]`, expRow: `[[INT64(2)]]`, }, { sql: `select col2, count(*) from (select col1, col2 from user limit 9) x group by col2`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|1|weight_string(col2)", "int64|int64|int64|varbinary"), "100|3|1|NULL", "200|2|1|NULL"), - expSandboxQ: "select col1, col2, 1, weight_string(col2) from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, 1, weight_string(x.col2) from (select col1, col2 from `user`) as x limit 9", expField: `[name:"col2" type:INT64 name:"count(*)" type:INT64]`, expRow: `[[INT64(2) INT64(4)] [INT64(3) INT64(5)]]`, }, { sql: `select count(col1) from (select id, col1 from user limit 2) x`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("id|col1", "int64|varchar"), "1|a", "2|b"), - expSandboxQ: "select id, col1 from (select id, col1 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.id, x.col1 from (select id, col1 from `user`) as x limit 2", expField: `[name:"count(col1)" type:INT64]`, expRow: `[[INT64(2)]]`, }, { sql: `select count(col1), col2 from (select col2, col1 from user limit 9) x group by col2`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col2|col1|weight_string(col2)", "int64|varchar|varbinary"), "3|a|NULL", "2|b|NULL"), - expSandboxQ: "select col2, col1, weight_string(col2) from (select col2, col1 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col2, x.col1, weight_string(x.col2) from (select col2, col1 from `user`) as x limit 9", expField: `[name:"count(col1)" type:INT64 name:"col2" type:INT64]`, expRow: `[[INT64(4) INT64(2)] [INT64(5) INT64(3)]]`, }, { sql: `select col1, count(col2) from (select col1, col2 from user limit 9) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "a|1|a", "b|null|b"), - expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, weight_string(x.col1) from (select col1, col2 from `user`) as x limit 9", expField: `[name:"col1" type:VARCHAR name:"count(col2)" type:INT64]`, expRow: `[[VARCHAR("a") INT64(5)] [VARCHAR("b") INT64(0)]]`, }, { sql: `select col1, count(col2) from (select col1, col2 from user limit 32) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "null|1|null", "null|null|null", "a|1|a", "b|null|b"), - expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, weight_string(x.col1) from (select col1, col2 from `user`) as x limit 32", expField: `[name:"col1" type:VARCHAR name:"count(col2)" type:INT64]`, expRow: `[[NULL INT64(8)] [VARCHAR("a") INT64(8)] [VARCHAR("b") INT64(0)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|int64|varbinary"), "a|3|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, weight_string(x.col1) from (select col1, col2 from `user`) as x limit 4", expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:DECIMAL]`, expRow: `[[VARCHAR("a") DECIMAL(12)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|2|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, weight_string(x.col1) from (select col1, col2 from `user`) as x limit 4", expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, expRow: `[[VARCHAR("a") FLOAT64(8)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|x|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, weight_string(x.col1) from (select col1, col2 from `user`) as x limit 4", expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, expRow: `[[VARCHAR("a") FLOAT64(0)]]`, }, { sql: `select col1, sum(col2) from (select col1, col2 from user limit 4) x group by col1`, sandboxRes: sqltypes.MakeTestResult(sqltypes.MakeTestFields("col1|col2|weight_string(col1)", "varchar|varchar|varbinary"), "a|null|a"), - expSandboxQ: "select col1, col2, weight_string(col1) from (select col1, col2 from `user`) as x limit :__upper_limit", + expSandboxQ: "select x.col1, x.col2, weight_string(x.col1) from (select col1, col2 from `user`) as x limit 4", expField: `[name:"col1" type:VARCHAR name:"sum(col2)" type:FLOAT64]`, expRow: `[[VARCHAR("a") NULL]]`, }, @@ -4124,7 +4173,7 @@ func TestSelectCFC(t *testing.T) { func TestSelectView(t *testing.T) { executor, sbc, _, _, _ := createExecutorEnv(t) // add the view to local vschema - err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id") + err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id", executor.vm.parser) require.NoError(t, err) executor.normalize = true @@ -4170,7 +4219,7 @@ func TestWarmingReads(t *testing.T) { executor.normalize = true session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) - // Since queries on the replica will run in a separate go-routine, we need sycnronization for the Queries field in the sandboxconn. + // Since queries on the replica will run in a separate go-routine, we need synchronization for the Queries field in the sandboxconn. replica.RequireQueriesLocking() _, err := executor.Execute(ctx, nil, "TestWarmingReads", session, "select age, city from user", map[string]*querypb.BindVariable{}) diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go index e71a41eeb7f..5e66899db44 100644 --- a/go/vt/vtgate/executor_set_test.go +++ b/go/vt/vtgate/executor_set_test.go @@ -21,8 +21,6 @@ import ( "testing" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/sqlparser" - querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/test/utils" @@ -321,6 +319,10 @@ func TestExecutorSetOp(t *testing.T) { }, { in: "set foreign_key_checks = 1", result: returnNoResult("foreign_key_checks", "int64"), + }, { + in: "set foreign_key_checks = 0", + sysVars: map[string]string{"foreign_key_checks": "0"}, + result: returnResult("foreign_key_checks", "int64", "0"), }, { in: "set unique_checks = 0", sysVars: map[string]string{"unique_checks": "0"}, @@ -503,14 +505,9 @@ func createMap(keys []string, values []any) map[string]*querypb.BindVariable { } func TestSetVar(t *testing.T) { - executor, _, _, sbc, ctx := createExecutorEnv(t) + executor, _, _, sbc, ctx := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - oldVersion := sqlparser.GetParserVersion() - sqlparser.SetParserVersion("80000") - defer func() { - sqlparser.SetParserVersion(oldVersion) - }() session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded}) sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( @@ -547,14 +544,9 @@ func TestSetVar(t *testing.T) { } func TestSetVarShowVariables(t *testing.T) { - executor, _, _, sbc, ctx := createExecutorEnv(t) + executor, _, _, sbc, ctx := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - oldVersion := sqlparser.GetParserVersion() - sqlparser.SetParserVersion("80000") - defer func() { - sqlparser.SetParserVersion(oldVersion) - }() session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded}) sbc.SetResults([]*sqltypes.Result{ diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go index 5ef00fd0691..b8cfeaf3cd5 100644 --- a/go/vt/vtgate/executor_stream_test.go +++ b/go/vt/vtgate/executor_stream_test.go @@ -21,18 +21,17 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/logstats" - - "vitess.io/vitess/go/vt/discovery" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/sqltypes" _ "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) @@ -68,7 +67,7 @@ func TestStreamSQLSharded(t *testing.T) { queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor := NewExecutor(ctx, vtenv.NewTestEnv(), serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) executor.SetQueryLogger(queryLogger) defer executor.Close() diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 2a44d0a8b00..cce717674d6 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -42,11 +42,6 @@ import ( "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/discovery" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtgate/buffer" @@ -55,6 +50,12 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vtgate/vschemaacl" "vitess.io/vitess/go/vt/vtgate/vtgateservice" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) func TestExecutorResultsExceeded(t *testing.T) { @@ -103,7 +104,7 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { for _, test := range testCases { sbclookup.SetResults([]*sqltypes.Result{result}) - stmt, err := sqlparser.Parse(test.query) + stmt, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) _, err = executor.Execute(ctx, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil) @@ -587,14 +588,14 @@ func TestExecutorShow(t *testing.T) { _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show create table %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show create table unknown" + wantQuery = "show create table `unknown`" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) // SHOW KEYS with two different syntax _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show keys from %v.unknown", KsTestUnsharded), nil) require.NoError(t, err) lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql - wantQuery = "show indexes from unknown" + wantQuery = "show indexes from `unknown`" assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) _, err = executor.Execute(ctx, nil, "TestExecute", session, fmt.Sprintf("show keys from unknown from %v", KsTestUnsharded), nil) @@ -640,7 +641,7 @@ func TestExecutorShow(t *testing.T) { lastQuery = sbclookup.Queries[len(sbclookup.Queries)-1].Sql assert.Equal(t, wantQuery, lastQuery, "Got: %v. Want: %v", lastQuery, wantQuery) - // Set desitation keyspace in session + // Set destination keyspace in session session.TargetString = KsTestUnsharded _, err = executor.Execute(ctx, nil, "TestExecute", session, "show create table unknown", nil) require.NoError(t, err) @@ -667,7 +668,7 @@ func TestExecutorShow(t *testing.T) { append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - collations.Local().LookupName(collations.Default())), + collations.MySQL8().LookupName(collations.MySQL8().DefaultConnectionCharset())), sqltypes.NewUint32(4)), }, } @@ -712,7 +713,7 @@ func TestExecutorShow(t *testing.T) { append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - collations.Local().LookupName(collations.Default())), + collations.MySQL8().LookupName(collations.MySQL8().DefaultConnectionCharset())), sqltypes.NewUint32(4)), }, } @@ -763,7 +764,7 @@ func TestExecutorShow(t *testing.T) { wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "value", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "value", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{ {sqltypes.NewInt32(1), sqltypes.NewVarChar("foo")}, @@ -889,6 +890,7 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("TestExecutor", "keyspace_id", "numeric", "", ""), buildVarCharRow("TestExecutor", "krcol_unique_vdx", "keyrange_lookuper_unique", "", ""), buildVarCharRow("TestExecutor", "krcol_vdx", "keyrange_lookuper", "", ""), + buildVarCharRow("TestExecutor", "multicol_vdx", "multicol", "column_count=2; column_vindex=xxhash,binary", ""), buildVarCharRow("TestExecutor", "music_user_map", "lookup_hash_unique", "from=music_id; table=music_user_map; to=user_id", "music"), buildVarCharRow("TestExecutor", "name_lastname_keyspace_id_map", "lookup", "from=name,lastname; table=name_lastname_keyspace_id_map; to=keyspace_id", "user2"), buildVarCharRow("TestExecutor", "name_user_map", "lookup_hash", "from=name; table=name_user_map; to=user_id", "user"), @@ -1155,97 +1157,6 @@ func TestExecutorComment(t *testing.T) { } } -func TestExecutorOther(t *testing.T) { - executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) - - type cnts struct { - Sbc1Cnt int64 - Sbc2Cnt int64 - SbcLookupCnt int64 - } - - tcs := []struct { - targetStr string - - hasNoKeyspaceErr bool - hasDestinationShardErr bool - wantCnts cnts - }{ - { - targetStr: "", - hasNoKeyspaceErr: true, - }, - { - targetStr: "TestExecutor[-]", - hasDestinationShardErr: true, - }, - { - targetStr: KsTestUnsharded, - wantCnts: cnts{ - Sbc1Cnt: 0, - Sbc2Cnt: 0, - SbcLookupCnt: 1, - }, - }, - { - targetStr: "TestExecutor", - wantCnts: cnts{ - Sbc1Cnt: 1, - Sbc2Cnt: 0, - SbcLookupCnt: 0, - }, - }, - { - targetStr: "TestExecutor/-20", - wantCnts: cnts{ - Sbc1Cnt: 1, - Sbc2Cnt: 0, - SbcLookupCnt: 0, - }, - }, - { - targetStr: "TestExecutor[00]", - wantCnts: cnts{ - Sbc1Cnt: 1, - Sbc2Cnt: 0, - SbcLookupCnt: 0, - }, - }, - } - - stmts := []string{ - "describe select * from t1", - "explain select * from t1", - "repair table t1", - "optimize table t1", - } - - for _, stmt := range stmts { - for _, tc := range tcs { - t.Run(fmt.Sprintf("%s-%s", stmt, tc.targetStr), func(t *testing.T) { - sbc1.ExecCount.Store(0) - sbc2.ExecCount.Store(0) - sbclookup.ExecCount.Store(0) - - _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) - if tc.hasNoKeyspaceErr { - assert.Error(t, err, errNoKeyspace) - } else if tc.hasDestinationShardErr { - assert.Errorf(t, err, "Destination can only be a single shard for statement: %s", stmt) - } else { - assert.NoError(t, err) - } - - utils.MustMatch(t, tc.wantCnts, cnts{ - Sbc1Cnt: sbc1.ExecCount.Load(), - Sbc2Cnt: sbc2.ExecCount.Load(), - SbcLookupCnt: sbclookup.ExecCount.Load(), - }) - }) - } - } -} - func TestExecutorDDL(t *testing.T) { executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) @@ -1306,7 +1217,7 @@ func TestExecutorDDL(t *testing.T) { "drop table t2", `create table test_partitioned ( id bigint, - date_create int, + date_create int, primary key(id) ) Engine=InnoDB /*!50100 PARTITION BY RANGE (date_create) (PARTITION p2018_06_14 VALUES LESS THAN (1528959600) ENGINE = InnoDB, @@ -1406,7 +1317,7 @@ func TestExecutorAlterVSchemaKeyspace(t *testing.T) { session := NewSafeSession(&vtgatepb.Session{TargetString: "@primary", Autocommit: true}) vschemaUpdates := make(chan *vschemapb.SrvVSchema, 2) - executor.serv.WatchSrvVSchema(ctx, "aa", func(vschema *vschemapb.SrvVSchema, err error) bool { + executor.serv.WatchSrvVSchema(ctx, executor.cell, func(vschema *vschemapb.SrvVSchema, err error) bool { vschemaUpdates <- vschema return true }) @@ -1697,7 +1608,7 @@ func getPlanCached(t *testing.T, ctx context.Context, e *Executor, vcursor *vcur Options: &querypb.ExecuteOptions{SkipQueryPlanCache: skipQueryPlanCache}}, } - stmt, reservedVars, err := parseAndValidateQuery(sql) + stmt, reservedVars, err := parseAndValidateQuery(sql, sqlparser.NewTestParser()) require.NoError(t, err) plan, err := e.getPlan(context.Background(), vcursor, sql, stmt, comments, bindVars, reservedVars /* normalize */, e.normalize, logStats) require.NoError(t, err) @@ -1865,7 +1776,7 @@ func TestGetPlanPriority(t *testing.T) { vCursor, err := newVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) assert.NoError(t, err) - stmt, err := sqlparser.Parse(testCase.sql) + stmt, err := sqlparser.NewTestParser().Parse(testCase.sql) assert.NoError(t, err) crticalityFromStatement, _ := sqlparser.GetPriorityFromStatement(stmt) @@ -2145,8 +2056,8 @@ func TestServingKeyspaces(t *testing.T) { require.Equal(t, `[[VARCHAR("TestUnsharded")]]`, fmt.Sprintf("%v", result.Rows)) } -func TestExecutorOtherRead(t *testing.T) { - executor, sbc1, sbc2, sbclookup, _ := createExecutorEnv(t) +func TestExecutorOther(t *testing.T) { + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) type cnts struct { Sbc1Cnt int64 @@ -2185,26 +2096,42 @@ func TestExecutorOtherRead(t *testing.T) { SbcLookupCnt: 0, }, }, + { + targetStr: "TestExecutor/-20", + wantCnts: cnts{ + Sbc1Cnt: 1, + Sbc2Cnt: 0, + SbcLookupCnt: 0, + }, + }, + { + targetStr: "TestExecutor[00]", + wantCnts: cnts{ + Sbc1Cnt: 1, + Sbc2Cnt: 0, + SbcLookupCnt: 0, + }, + }, } stmts := []string{ - "describe select * from t1", - "explain select * from t1", + "repair table t1", + "optimize table t1", "do 1", } for _, stmt := range stmts { for _, tc := range tcs { - t.Run(stmt+tc.targetStr, func(t *testing.T) { + t.Run(fmt.Sprintf("%s-%s", stmt, tc.targetStr), func(t *testing.T) { sbc1.ExecCount.Store(0) sbc2.ExecCount.Store(0) sbclookup.ExecCount.Store(0) - _, err := executor.Execute(context.Background(), nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) if tc.hasNoKeyspaceErr { - assert.EqualError(t, err, errNoKeyspace.Error()) + assert.Error(t, err, errNoKeyspace) } else if tc.hasDestinationShardErr { - assert.Errorf(t, err, "Destination can only be a single shard for statement: %s, got: DestinationExactKeyRange(-)", stmt) + assert.Errorf(t, err, "Destination can only be a single shard for statement: %s", stmt) } else { assert.NoError(t, err) } @@ -2213,7 +2140,7 @@ func TestExecutorOtherRead(t *testing.T) { Sbc1Cnt: sbc1.ExecCount.Load(), Sbc2Cnt: sbc2.ExecCount.Load(), SbcLookupCnt: sbclookup.ExecCount.Load(), - }, "count did not match") + }) }) } } @@ -2268,6 +2195,71 @@ func TestExecutorAnalyze(t *testing.T) { } } +func TestExecutorExplainStmt(t *testing.T) { + executor, sbc1, sbc2, sbclookup, ctx := createExecutorEnv(t) + + type cnts struct { + Sbc1Cnt int64 + Sbc2Cnt int64 + SbcLookupCnt int64 + } + + tcs := []struct { + targetStr string + + wantCnts cnts + }{ + { + targetStr: "", + wantCnts: cnts{Sbc1Cnt: 1}, + }, + { + targetStr: "TestExecutor[-]", + wantCnts: cnts{Sbc1Cnt: 1, Sbc2Cnt: 1}, + }, + { + targetStr: KsTestUnsharded, + wantCnts: cnts{SbcLookupCnt: 1}, + }, + { + targetStr: "TestExecutor", + wantCnts: cnts{Sbc1Cnt: 1}, + }, + { + targetStr: "TestExecutor/-20", + wantCnts: cnts{Sbc1Cnt: 1}, + }, + { + targetStr: "TestExecutor[00]", + wantCnts: cnts{Sbc1Cnt: 1}, + }, + } + + stmts := []string{ + "describe select * from t1", + "explain select * from t1", + } + + for _, stmt := range stmts { + for _, tc := range tcs { + t.Run(fmt.Sprintf("%s-%s", stmt, tc.targetStr), func(t *testing.T) { + sbc1.ExecCount.Store(0) + sbc2.ExecCount.Store(0) + sbclookup.ExecCount.Store(0) + + _, err := executor.Execute(ctx, nil, "TestExecute", NewSafeSession(&vtgatepb.Session{TargetString: tc.targetStr}), stmt, nil) + assert.NoError(t, err) + + utils.MustMatch(t, tc.wantCnts, cnts{ + Sbc1Cnt: sbc1.ExecCount.Load(), + Sbc2Cnt: sbc2.ExecCount.Load(), + SbcLookupCnt: sbclookup.ExecCount.Load(), + }) + }) + } + } +} + func TestExecutorVExplain(t *testing.T) { executor, _, _, _, ctx := createExecutorEnv(t) @@ -2628,6 +2620,7 @@ func TestExecutorCallProc(t *testing.T) { func TestExecutorTempTable(t *testing.T) { executor, _, _, sbcUnsharded, ctx := createExecutorEnv(t) + initialWarningsCount := warnings.Counts()["WarnUnshardedOnly"] executor.warnShardedOnly = true creatQuery := "create temporary table temp_t(id bigint primary key)" session := NewSafeSession(&vtgatepb.Session{TargetString: KsTestUnsharded}) @@ -2635,6 +2628,7 @@ func TestExecutorTempTable(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, 1, sbcUnsharded.ExecCount.Load()) assert.NotEmpty(t, session.Warnings) + assert.Equal(t, initialWarningsCount+1, warnings.Counts()["WarnUnshardedOnly"], "warnings count") before := executor.plans.Len() diff --git a/go/vt/vtgate/executor_vschema_ddl_test.go b/go/vt/vtgate/executor_vschema_ddl_test.go index 1c2813a33c4..1c912ed0d62 100644 --- a/go/vt/vtgate/executor_vschema_ddl_test.go +++ b/go/vt/vtgate/executor_vschema_ddl_test.go @@ -17,26 +17,23 @@ limitations under the License. package vtgate import ( - "context" "reflect" "slices" "testing" "time" - "vitess.io/vitess/go/test/utils" - - "vitess.io/vitess/go/vt/callerid" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/vtgate/vschemaacl" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - + querypb "vitess.io/vitess/go/vt/proto/query" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) func waitForVindex(t *testing.T, ks, name string, watch chan *vschemapb.SrvVSchema, executor *Executor) (*vschemapb.SrvVSchema, *vschemapb.Vindex) { @@ -426,9 +423,7 @@ func TestExecutorDropSequenceDDL(t *testing.T) { _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) - ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - if !waitForNewerVSchema(ctxWithTimeout, executor, ts) { + if !waitForNewerVSchema(ctx, executor, ts, 5*time.Second) { t.Fatalf("vschema did not drop the sequene 'test_seq'") } @@ -464,9 +459,7 @@ func TestExecutorDropAutoIncDDL(t *testing.T) { stmt = "alter vschema on test_table add auto_increment id using `db-name`.`test_seq`" _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) - ctxWithTimeout, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - if !waitForNewerVSchema(ctxWithTimeout, executor, ts) { + if !waitForNewerVSchema(ctx, executor, ts, 5*time.Second) { t.Fatalf("vschema did not update with auto_increment for 'test_table'") } ts = executor.VSchema().GetCreated() @@ -480,9 +473,7 @@ func TestExecutorDropAutoIncDDL(t *testing.T) { _, err = executor.Execute(ctx, nil, "TestExecute", session, stmt, nil) require.NoError(t, err) - ctxWithTimeout, cancel2 := context.WithTimeout(ctx, 5*time.Second) - defer cancel2() - if !waitForNewerVSchema(ctxWithTimeout, executor, ts) { + if !waitForNewerVSchema(ctx, executor, ts, 5*time.Second) { t.Fatalf("vschema did not drop the auto_increment for 'test_table'") } if executor.vm.GetCurrentSrvVschema().Keyspaces[ks].Tables["test_table"].AutoIncrement != nil { diff --git a/go/vt/vtgate/fakerpcvtgateconn/conn.go b/go/vt/vtgate/fakerpcvtgateconn/conn.go index 442c8997979..3f6236ea9ec 100644 --- a/go/vt/vtgate/fakerpcvtgateconn/conn.go +++ b/go/vt/vtgate/fakerpcvtgateconn/conn.go @@ -23,7 +23,7 @@ import ( "context" "fmt" "io" - "math/rand" + "math/rand/v2" "reflect" "vitess.io/vitess/go/sqltypes" @@ -211,7 +211,7 @@ func newSession( Shard: shard, TabletType: tabletType, }, - TransactionId: rand.Int63(), + TransactionId: rand.Int64(), }) } return &vtgatepb.Session{ diff --git a/go/vt/vtgate/grpcvtgateconn/conn_rpc_test.go b/go/vt/vtgate/grpcvtgateconn/conn_rpc_test.go index cf272fe3606..55a067807bd 100644 --- a/go/vt/vtgate/grpcvtgateconn/conn_rpc_test.go +++ b/go/vt/vtgate/grpcvtgateconn/conn_rpc_test.go @@ -108,6 +108,7 @@ func TestGRPCVTGateConnAuth(t *testing.T) { fs := pflag.NewFlagSet("", pflag.ContinueOnError) grpcclient.RegisterFlags(fs) + grpcclient.ResetStaticAuth() err = fs.Parse([]string{ "--grpc_auth_static_client_creds", f.Name(), @@ -148,6 +149,7 @@ func TestGRPCVTGateConnAuth(t *testing.T) { fs = pflag.NewFlagSet("", pflag.ContinueOnError) grpcclient.RegisterFlags(fs) + grpcclient.ResetStaticAuth() err = fs.Parse([]string{ "--grpc_auth_static_client_creds", f.Name(), diff --git a/go/vt/vtgate/logstats/logstats.go b/go/vt/vtgate/logstats/logstats.go index 5ea7820a72e..8f8ba41e3cd 100644 --- a/go/vt/vtgate/logstats/logstats.go +++ b/go/vt/vtgate/logstats/logstats.go @@ -18,21 +18,16 @@ package logstats import ( "context" - "encoding/json" - "fmt" "io" "net/url" "time" "github.com/google/safehtml" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/logstats" "vitess.io/vitess/go/streamlog" - "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/callinfo" - "vitess.io/vitess/go/vt/log" - querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -128,69 +123,60 @@ func (stats *LogStats) Logf(w io.Writer, params url.Values) error { return nil } - // FormatBindVariables call might panic so we're going to catch it here - // and print out the stack trace for debugging. - defer func() { - if x := recover(); x != nil { - log.Errorf("Uncaught panic:\n%v\n%s", x, tb.Stack(4)) - } - }() - - formattedBindVars := "\"[REDACTED]\"" - if !streamlog.GetRedactDebugUIQueries() { - _, fullBindParams := params["full"] - formattedBindVars = sqltypes.FormatBindVariables( - stats.BindVariables, - fullBindParams, - streamlog.GetQueryLogFormat() == streamlog.QueryLogFormatJSON, - ) - } - - // TODO: remove username here we fully enforce immediate caller id + redacted := streamlog.GetRedactDebugUIQueries() + _, fullBindParams := params["full"] remoteAddr, username := stats.RemoteAddrUsername() - var fmtString string - switch streamlog.GetQueryLogFormat() { - case streamlog.QueryLogFormatText: - fmtString = "%v\t%v\t%v\t'%v'\t'%v'\t%v\t%v\t%.6f\t%.6f\t%.6f\t%.6f\t%v\t%q\t%v\t%v\t%v\t%q\t%q\t%q\t%v\t%v\t%q\n" - case streamlog.QueryLogFormatJSON: - fmtString = "{\"Method\": %q, \"RemoteAddr\": %q, \"Username\": %q, \"ImmediateCaller\": %q, \"Effective Caller\": %q, \"Start\": \"%v\", \"End\": \"%v\", \"TotalTime\": %.6f, \"PlanTime\": %v, \"ExecuteTime\": %v, \"CommitTime\": %v, \"StmtType\": %q, \"SQL\": %q, \"BindVars\": %v, \"ShardQueries\": %v, \"RowsAffected\": %v, \"Error\": %q, \"TabletType\": %q, \"SessionUUID\": %q, \"Cached Plan\": %v, \"TablesUsed\": %v, \"ActiveKeyspace\": %q}\n" - } - - tables := stats.TablesUsed - if tables == nil { - tables = []string{} - } - tablesUsed, marshalErr := json.Marshal(tables) - if marshalErr != nil { - return marshalErr + log := logstats.NewLogger() + log.Init(streamlog.GetQueryLogFormat() == streamlog.QueryLogFormatJSON) + log.Key("Method") + log.StringUnquoted(stats.Method) + log.Key("RemoteAddr") + log.StringUnquoted(remoteAddr) + log.Key("Username") + log.StringUnquoted(username) + log.Key("ImmediateCaller") + log.StringSingleQuoted(stats.ImmediateCaller()) + log.Key("Effective Caller") + log.StringSingleQuoted(stats.EffectiveCaller()) + log.Key("Start") + log.Time(stats.StartTime) + log.Key("End") + log.Time(stats.EndTime) + log.Key("TotalTime") + log.Duration(stats.TotalTime()) + log.Key("PlanTime") + log.Duration(stats.PlanTime) + log.Key("ExecuteTime") + log.Duration(stats.ExecuteTime) + log.Key("CommitTime") + log.Duration(stats.CommitTime) + log.Key("StmtType") + log.StringUnquoted(stats.StmtType) + log.Key("SQL") + log.String(stats.SQL) + log.Key("BindVars") + if redacted { + log.Redacted() + } else { + log.BindVariables(stats.BindVariables, fullBindParams) } - _, err := fmt.Fprintf( - w, - fmtString, - stats.Method, - remoteAddr, - username, - stats.ImmediateCaller(), - stats.EffectiveCaller(), - stats.StartTime.Format("2006-01-02 15:04:05.000000"), - stats.EndTime.Format("2006-01-02 15:04:05.000000"), - stats.TotalTime().Seconds(), - stats.PlanTime.Seconds(), - stats.ExecuteTime.Seconds(), - stats.CommitTime.Seconds(), - stats.StmtType, - stats.SQL, - formattedBindVars, - stats.ShardQueries, - stats.RowsAffected, - stats.ErrorStr(), - stats.TabletType, - stats.SessionUUID, - stats.CachedPlan, - string(tablesUsed), - stats.ActiveKeyspace, - ) - - return err + log.Key("ShardQueries") + log.Uint(stats.ShardQueries) + log.Key("RowsAffected") + log.Uint(stats.RowsAffected) + log.Key("Error") + log.String(stats.ErrorStr()) + log.Key("TabletType") + log.String(stats.TabletType) + log.Key("SessionUUID") + log.String(stats.SessionUUID) + log.Key("Cached Plan") + log.Bool(stats.CachedPlan) + log.Key("TablesUsed") + log.Strings(stats.TablesUsed) + log.Key("ActiveKeyspace") + log.String(stats.ActiveKeyspace) + + return log.Flush(w) } diff --git a/go/vt/vtgate/logstats/logstats_test.go b/go/vt/vtgate/logstats/logstats_test.go index dbe49b200b8..ae3c01e0f0b 100644 --- a/go/vt/vtgate/logstats/logstats_test.go +++ b/go/vt/vtgate/logstats/logstats_test.go @@ -79,7 +79,7 @@ func TestLogStatsFormat(t *testing.T) { { // 0 redact: false, format: "text", - expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n", + expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\t{\"intVal\": {\"type\": \"INT64\", \"value\": 1}}\t0\t0\t\"\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n", bindVars: intBindVar, }, { // 1 redact: true, @@ -99,7 +99,7 @@ func TestLogStatsFormat(t *testing.T) { }, { // 4 redact: false, format: "text", - expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\tmap[strVal:type:VARCHAR value:\"abc\"]\t0\t0\t\"\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n", + expected: "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1\"\t{\"strVal\": {\"type\": \"VARCHAR\", \"value\": \"abc\"}}\t0\t0\t\"\"\t\"PRIMARY\"\t\"suuid\"\tfalse\t[\"ks1.tbl1\",\"ks2.tbl2\"]\t\"db\"\n", bindVars: stringBindVar, }, { // 5 redact: true, @@ -129,14 +129,13 @@ func TestLogStatsFormat(t *testing.T) { streamlog.SetQueryLogFormat(test.format) if test.format == "text" { got := testFormat(t, logStats, params) + t.Logf("got: %s", got) assert.Equal(t, test.expected, got) - for _, variable := range logStats.BindVariables { - fmt.Println("->" + fmt.Sprintf("%v", variable)) - } return } got := testFormat(t, logStats, params) + t.Logf("got: %s", got) var parsed map[string]any err := json.Unmarshal([]byte(got), &parsed) assert.NoError(t, err) @@ -157,12 +156,12 @@ func TestLogStatsFilter(t *testing.T) { params := map[string][]string{"full": {}} got := testFormat(t, logStats, params) - want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n" + want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\t{\"intVal\": {\"type\": \"INT64\", \"value\": 1}}\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n" assert.Equal(t, want, got) streamlog.SetQueryLogFilterTag("LOG_THIS_QUERY") got = testFormat(t, logStats, params) - want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n" + want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\t{\"intVal\": {\"type\": \"INT64\", \"value\": 1}}\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n" assert.Equal(t, want, got) streamlog.SetQueryLogFilterTag("NOT_THIS_QUERY") @@ -180,12 +179,12 @@ func TestLogStatsRowThreshold(t *testing.T) { params := map[string][]string{"full": {}} got := testFormat(t, logStats, params) - want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n" + want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\t{\"intVal\": {\"type\": \"INT64\", \"value\": 1}}\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n" assert.Equal(t, want, got) streamlog.SetQueryLogRowThreshold(0) got = testFormat(t, logStats, params) - want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n" + want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t0.000000\t0.000000\t0.000000\t\t\"sql1 /* LOG_THIS_QUERY */\"\t{\"intVal\": {\"type\": \"INT64\", \"value\": 1}}\t0\t0\t\"\"\t\"\"\t\"\"\tfalse\t[]\t\"\"\n" assert.Equal(t, want, got) streamlog.SetQueryLogRowThreshold(1) got = testFormat(t, logStats, params) diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go index 5d2414ac275..199892842ee 100644 --- a/go/vt/vtgate/plan_execute.go +++ b/go/vt/vtgate/plan_execute.go @@ -24,20 +24,20 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/vtgate/vtgateservice" + + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) type planExec func(ctx context.Context, plan *engine.Plan, vc *vcursorImpl, bindVars map[string]*querypb.BindVariable, startTime time.Time) error type txResult func(sqlparser.StatementType, *sqltypes.Result) error -func waitForNewerVSchema(ctx context.Context, e *Executor, lastVSchemaCreated time.Time) bool { - timeout := 30 * time.Second +func waitForNewerVSchema(ctx context.Context, e *Executor, lastVSchemaCreated time.Time, timeout time.Duration) bool { pollingInterval := 10 * time.Millisecond waitCtx, cancel := context.WithTimeout(ctx, timeout) ticker := time.NewTicker(pollingInterval) @@ -48,7 +48,7 @@ func waitForNewerVSchema(ctx context.Context, e *Executor, lastVSchemaCreated ti case <-waitCtx.Done(): return false case <-ticker.C: - if e.VSchema().GetCreated().After(lastVSchemaCreated) { + if e.VSchema() != nil && e.VSchema().GetCreated().After(lastVSchemaCreated) { return true } } @@ -64,11 +64,11 @@ func (e *Executor) newExecute( logStats *logstats.LogStats, execPlan planExec, // used when there is a plan to execute recResult txResult, // used when it's something simple like begin/commit/rollback/savepoint -) error { - // 1: Prepare before planning and execution +) (err error) { + // 1: Prepare before planning and execution. // Start an implicit transaction if necessary. - err := e.startTxIfNecessary(ctx, safeSession) + err = e.startTxIfNecessary(ctx, safeSession) if err != nil { return err } @@ -79,21 +79,35 @@ func (e *Executor) newExecute( query, comments := sqlparser.SplitMarginComments(sql) - // 2: Parse and Validate query - stmt, reservedVars, err := parseAndValidateQuery(query) + // 2: Parse and Validate query. + stmt, reservedVars, err := parseAndValidateQuery(query, e.env.Parser()) if err != nil { return err } - var lastVSchemaCreated time.Time - vs := e.VSchema() - lastVSchemaCreated = vs.GetCreated() + var ( + vs = e.VSchema() + lastVSchemaCreated = vs.GetCreated() + result *sqltypes.Result + plan *engine.Plan + ) + for try := 0; try < MaxBufferingRetries; try++ { - if try > 0 && !vs.GetCreated().After(lastVSchemaCreated) { - // There is a race due to which the executor's vschema may not have been updated yet. - // Without a wait we fail non-deterministically since the previous vschema will not have the updated routing rules - if waitForNewerVSchema(ctx, e, lastVSchemaCreated) { + if try > 0 && !vs.GetCreated().After(lastVSchemaCreated) { // We need to wait for a vschema update + // Without a wait we fail non-deterministically since the previous vschema will not have + // the updated routing rules. + // We retry MaxBufferingRetries-1 (2) times before giving up. How long we wait before each retry + // -- IF we don't see a newer vschema come in -- affects how long we retry in total and how quickly + // we retry the query and (should) succeed when the traffic switch fails or we otherwise hit the + // max buffer failover time without resolving the keyspace event and marking it as consistent. + // This calculation attemps to ensure that we retry at a sensible interval and number of times + // based on the buffering configuration. This way we should be able to perform the max retries + // within the given window of time for most queries and we should not end up waiting too long + // after the traffic switch fails or the buffer window has ended, retrying old queries. + timeout := e.resolver.scatterConn.gateway.buffer.GetConfig().MaxFailoverDuration / (MaxBufferingRetries - 1) + if waitForNewerVSchema(ctx, e, lastVSchemaCreated, timeout) { vs = e.VSchema() + lastVSchemaCreated = vs.GetCreated() } } @@ -102,16 +116,13 @@ func (e *Executor) newExecute( return err } - // 3: Create a plan for the query + // 3: Create a plan for the query. // If we are retrying, it is likely that the routing rules have changed and hence we need to // replan the query since the target keyspace of the resolved shards may have changed as a - // result of MoveTables. So we cannot reuse the plan from the first try. - // When buffering ends, many queries might be getting planned at the same time. Ideally we - // should be able to reuse plans once the first drained query has been planned. For now, we - // punt on this and choose not to prematurely optimize since it is not clear how much caching - // will help and if it will result in hard-to-track edge cases. - - var plan *engine.Plan + // result of MoveTables SwitchTraffic which does a RebuildSrvVSchema which in turn causes + // the vtgate to clear the cached plans when processing the new serving vschema. + // When buffering ends, many queries might be getting planned at the same time and we then + // take full advatange of the cached plan. plan, err = e.getPlan(ctx, vcursor, query, stmt, comments, bindVars, reservedVars, e.normalize, logStats) execStart := e.logPlanningFinished(logStats, plan) @@ -124,12 +135,12 @@ func (e *Executor) newExecute( safeSession.ClearWarnings() } - // add any warnings that the planner wants to add + // Add any warnings that the planner wants to add. for _, warning := range plan.Warnings { safeSession.RecordWarning(warning) } - result, err := e.handleTransactions(ctx, mysqlCtx, safeSession, plan, logStats, vcursor, stmt) + result, err = e.handleTransactions(ctx, mysqlCtx, safeSession, plan, logStats, vcursor, stmt) if err != nil { return err } @@ -137,14 +148,14 @@ func (e *Executor) newExecute( return recResult(plan.Type, result) } - // 4: Prepare for execution + // 4: Prepare for execution. err = e.addNeededBindVars(vcursor, plan.BindVarNeeds, bindVars, safeSession) if err != nil { logStats.Error = err return err } - // 5: Execute the plan and retry if needed + // 5: Execute the plan. if plan.Instructions.NeedsTransaction() { err = e.insideTransaction(ctx, safeSession, logStats, func() error { @@ -158,10 +169,39 @@ func (e *Executor) newExecute( return err } + // 6: Retry if needed. rootCause := vterrors.RootCause(err) if rootCause != nil && strings.Contains(rootCause.Error(), "enforce denied tables") { log.V(2).Infof("Retry: %d, will retry query %s due to %v", try, query, err) - lastVSchemaCreated = vs.GetCreated() + if try == 0 { // We are going to retry at least once + defer func() { + // Prevent any plan cache pollution from queries planned against the wrong keyspace during a MoveTables + // traffic switching operation. + if err != nil { // The error we're checking here is the return value from the newExecute function + cause := vterrors.RootCause(err) + if cause != nil && strings.Contains(cause.Error(), "enforce denied tables") { + // The executor's VSchemaManager clears the plan cache when it receives a new vschema via its + // SrvVSchema watcher (it calls executor.SaveVSchema() in its watch's subscriber callback). This + // happens concurrently with the KeyspaceEventWatcher also receiving the new vschema in its + // SrvVSchema watcher and in its subscriber callback processing it (which includes getting info + // on all shards from the topo), and eventually determining that the keyspace is consistent and + // ending the buffering window. So there's race with query retries such that a query could be + // planned against the wrong side just as the keyspace event is getting resolved and the buffers + // drained. Then that bad plan is the cached plan for the query until you do another + // topo.RebuildSrvVSchema/vtctldclient RebuildVSchemaGraph which then causes the VSchemaManager + // to clear the plan cache. It's essentially a race between the two SrvVSchema watchers and the + // work they do when a new one is received. If we DID a retry AND the last time we retried + // still encountered the error, we know that the plan used was 1) not valid/correct and going to + // the wrong side of the traffic switch as it failed with the denied tables error and 2) it will + // remain the plan in the cache if we do not clear the plans after it was added to to the cache. + // So here we clear the plan cache in order to prevent this scenario where the bad plan is + // cached indefinitely and re-used after the buffering window ends and the keyspace event is + // resolved. + e.ClearPlans() + } + } + }() + } continue } diff --git a/go/vt/vtgate/planbuilder/builder.go b/go/vt/vtgate/planbuilder/builder.go index a67878d7119..5d1d4ecd622 100644 --- a/go/vt/vtgate/planbuilder/builder.go +++ b/go/vt/vtgate/planbuilder/builder.go @@ -22,8 +22,8 @@ import ( "sort" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/vschemawrapper" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" @@ -47,10 +47,6 @@ var ( ) type ( - truncater interface { - SetTruncateColumnCount(int) - } - planResult struct { primitive engine.Primitive tables []string @@ -70,11 +66,24 @@ func singleTable(ks, tbl string) string { // TestBuilder builds a plan for a query based on the specified vschema. // This method is only used from tests func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) { - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := vschema.Environment().Parser().Parse2(query) if err != nil { return nil, err } - result, err := sqlparser.RewriteAST(stmt, keyspace, sqlparser.SQLSelectLimitUnset, "", nil, vschema) + // Store the foreign key mode like we do for vcursor. + vw, isVw := vschema.(*vschemawrapper.VSchemaWrapper) + if isVw { + fkState := sqlparser.ForeignKeyChecksState(stmt) + if fkState != nil { + // Restore the old volue of ForeignKeyChecksState to not interfere with the next test cases. + oldVal := vw.ForeignKeyChecksState + vw.ForeignKeyChecksState = fkState + defer func() { + vw.ForeignKeyChecksState = oldVal + }() + } + } + result, err := sqlparser.RewriteAST(stmt, keyspace, sqlparser.SQLSelectLimitUnset, "", nil, vschema.GetForeignKeyChecksState(), vschema) if err != nil { return nil, err } @@ -116,7 +125,6 @@ func getConfiguredPlanner(vschema plancontext.VSchema, stmt sqlparser.Statement, case Gen4Left2Right, Gen4GreedyOnly, Gen4: default: // default is gen4 plan - log.Infof("Using Gen4 planner instead of %s", planner.String()) planner = Gen4 } return gen4Planner(query, planner), nil @@ -145,25 +153,7 @@ func buildRoutePlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVa func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { switch stmt := stmt.(type) { - case *sqlparser.Select: - configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) - if err != nil { - return nil, err - } - return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) - case *sqlparser.Insert: - configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) - if err != nil { - return nil, err - } - return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) - case *sqlparser.Update: - configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) - if err != nil { - return nil, err - } - return buildRoutePlan(stmt, reservedVars, vschema, configuredPlanner) - case *sqlparser.Delete: + case *sqlparser.Select, *sqlparser.Insert, *sqlparser.Update, *sqlparser.Delete: configuredPlanner, err := getConfiguredPlanner(vschema, stmt, query) if err != nil { return nil, err @@ -191,8 +181,10 @@ func createInstructionFor(ctx context.Context, query string, stmt sqlparser.Stat return buildVSchemaDDLPlan(stmt, vschema) case *sqlparser.Use: return buildUsePlan(stmt) - case sqlparser.Explain: - return buildExplainPlan(ctx, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + case *sqlparser.ExplainTab: + return explainTabPlan(stmt, vschema) + case *sqlparser.ExplainStmt: + return buildRoutePlan(stmt, reservedVars, vschema, buildExplainStmtPlan) case *sqlparser.VExplainStmt: return buildVExplainPlan(ctx, stmt, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) case *sqlparser.OtherAdmin: @@ -246,7 +238,7 @@ func buildAnalyzePlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vsche var err error dest := key.Destination(key.DestinationAllShards{}) - if !analyzeStmt.Table.Qualifier.IsEmpty() && sqlparser.SystemSchema(analyzeStmt.Table.Qualifier.String()) { + if analyzeStmt.Table.Qualifier.NotEmpty() && sqlparser.SystemSchema(analyzeStmt.Table.Qualifier.String()) { ks, err = vschema.AnyKeyspace() if err != nil { return nil, err @@ -370,18 +362,12 @@ func buildFlushOptions(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*pla dest = key.DestinationAllShards{} } - tc := &tableCollector{} - for _, tbl := range stmt.TableNames { - tc.addASTTable(keyspace.Name, tbl) - } - return newPlanResult(&engine.Send{ - Keyspace: keyspace, - TargetDestination: dest, - Query: sqlparser.String(stmt), - IsDML: false, - SingleShardOnly: false, - }, tc.getTables()...), nil + Keyspace: keyspace, + TargetDestination: dest, + Query: sqlparser.String(stmt), + ReservedConnectionNeeded: stmt.WithLock, + }), nil } func buildFlushTables(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*planResult, error) { @@ -429,9 +415,10 @@ func buildFlushTables(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*plan if len(tablesMap) == 1 { for sendDest, tables := range tablesMap { return newPlanResult(&engine.Send{ - Keyspace: sendDest.ks, - TargetDestination: sendDest.dest, - Query: sqlparser.String(newFlushStmt(stmt, tables)), + Keyspace: sendDest.ks, + TargetDestination: sendDest.dest, + Query: sqlparser.String(newFlushStmt(stmt, tables)), + ReservedConnectionNeeded: stmt.WithLock, }, tc.getTables()...), nil } } @@ -443,9 +430,10 @@ func buildFlushTables(stmt *sqlparser.Flush, vschema plancontext.VSchema) (*plan var sources []engine.Primitive for _, sendDest := range keys { plan := &engine.Send{ - Keyspace: sendDest.ks, - TargetDestination: sendDest.dest, - Query: sqlparser.String(newFlushStmt(stmt, tablesMap[sendDest])), + Keyspace: sendDest.ks, + TargetDestination: sendDest.dest, + Query: sqlparser.String(newFlushStmt(stmt, tablesMap[sendDest])), + ReservedConnectionNeeded: stmt.WithLock, } sources = append(sources, plan) } diff --git a/go/vt/vtgate/planbuilder/bypass.go b/go/vt/vtgate/planbuilder/bypass.go index 52286816a11..62cae9655b1 100644 --- a/go/vt/vtgate/planbuilder/bypass.go +++ b/go/vt/vtgate/planbuilder/bypass.go @@ -49,13 +49,21 @@ func buildPlanForBypass(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, vsc } } + hints := &queryHints{} + if comments, ok := stmt.(sqlparser.Commented); ok { + if qh := getHints(comments.GetParsedComments()); qh != nil { + hints = qh + } + } + send := &engine.Send{ Keyspace: keyspace, TargetDestination: vschema.Destination(), Query: sqlparser.String(stmt), IsDML: sqlparser.IsDMLStatement(stmt), SingleShardOnly: false, - MultishardAutocommit: sqlparser.MultiShardAutocommitDirective(stmt), + MultishardAutocommit: hints.multiShardAutocommit, + QueryTimeout: hints.queryTimeout, } return newPlanResult(send), nil } diff --git a/go/vt/vtgate/planbuilder/call_proc.go b/go/vt/vtgate/planbuilder/call_proc.go index 13fe5cc60e4..34f475689aa 100644 --- a/go/vt/vtgate/planbuilder/call_proc.go +++ b/go/vt/vtgate/planbuilder/call_proc.go @@ -25,7 +25,7 @@ import ( func buildCallProcPlan(stmt *sqlparser.CallProc, vschema plancontext.VSchema) (*planResult, error) { var ks string - if !stmt.Name.Qualifier.IsEmpty() { + if stmt.Name.Qualifier.NotEmpty() { ks = stmt.Name.Qualifier.String() } diff --git a/go/vt/vtgate/planbuilder/collations_test.go b/go/vt/vtgate/planbuilder/collations_test.go index 7eaf3968f74..b393e186679 100644 --- a/go/vt/vtgate/planbuilder/collations_test.go +++ b/go/vt/vtgate/planbuilder/collations_test.go @@ -20,11 +20,11 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/test/vschemawrapper" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/test/vschemawrapper" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -45,6 +45,7 @@ func (tc *collationTestCase) run(t *testing.T) { V: loadSchema(t, "vschemas/schema.json", false), SysVarEnabled: true, Version: Gen4, + Env: vtenv.NewTestEnv(), } tc.addCollationsToSchema(vschemaWrapper) @@ -59,7 +60,6 @@ func (tc *collationTestCase) addCollationsToSchema(vschema *vschemawrapper.VSche for i, c := range tbl.Columns { if c.Name.EqualString(collation.colName) { tbl.Columns[i].CollationName = collation.collationName - break } } } @@ -67,7 +67,7 @@ func (tc *collationTestCase) addCollationsToSchema(vschema *vschemawrapper.VSche func TestOrderedAggregateCollations(t *testing.T) { collid := func(collname string) collations.ID { - return collations.Local().LookupByName(collname) + return collations.MySQL8().LookupByName(collname) } testCases := []collationTestCase{ { @@ -76,7 +76,7 @@ func TestOrderedAggregateCollations(t *testing.T) { check: func(t *testing.T, colls []collationInTable, primitive engine.Primitive) { oa, isOA := primitive.(*engine.OrderedAggregate) require.True(t, isOA, "should be an OrderedAggregate") - require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].Type.Coll) + require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].Type.Collation()) }, }, { @@ -85,7 +85,7 @@ func TestOrderedAggregateCollations(t *testing.T) { check: func(t *testing.T, colls []collationInTable, primitive engine.Primitive) { distinct, isDistinct := primitive.(*engine.Distinct) require.True(t, isDistinct, "should be a distinct") - require.Equal(t, collid(colls[0].collationName), distinct.CheckCols[0].Type.Coll) + require.Equal(t, collid(colls[0].collationName), distinct.CheckCols[0].Type.Collation()) }, }, { @@ -97,8 +97,8 @@ func TestOrderedAggregateCollations(t *testing.T) { check: func(t *testing.T, colls []collationInTable, primitive engine.Primitive) { oa, isOA := primitive.(*engine.OrderedAggregate) require.True(t, isOA, "should be an OrderedAggregate") - require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].Type.Coll) - require.Equal(t, collid(colls[1].collationName), oa.GroupByKeys[1].Type.Coll) + require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].Type.Collation()) + require.Equal(t, collid(colls[1].collationName), oa.GroupByKeys[1].Type.Collation()) }, }, { @@ -109,7 +109,7 @@ func TestOrderedAggregateCollations(t *testing.T) { check: func(t *testing.T, colls []collationInTable, primitive engine.Primitive) { oa, isOA := primitive.(*engine.OrderedAggregate) require.True(t, isOA, "should be an OrderedAggregate") - require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].Type.Coll) + require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].Type.Collation()) }, }, { @@ -122,7 +122,7 @@ func TestOrderedAggregateCollations(t *testing.T) { require.True(t, isMemSort, "should be a MemorySort") oa, isOA := memSort.Input.(*engine.OrderedAggregate) require.True(t, isOA, "should be an OrderedAggregate") - require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].Type.Coll) + require.Equal(t, collid(colls[0].collationName), oa.GroupByKeys[0].Type.Collation()) }, }, } diff --git a/go/vt/vtgate/planbuilder/concatenate.go b/go/vt/vtgate/planbuilder/concatenate.go deleted file mode 100644 index 81cbe3d5b65..00000000000 --- a/go/vt/vtgate/planbuilder/concatenate.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -type concatenate struct { - sources []logicalPlan - - // These column offsets do not need to be typed checked - they usually contain weight_string() - // columns that are not going to be returned to the user - noNeedToTypeCheck []int -} - -var _ logicalPlan = (*concatenate)(nil) - -// Primitive implements the logicalPlan interface -func (c *concatenate) Primitive() engine.Primitive { - var sources []engine.Primitive - for _, source := range c.sources { - sources = append(sources, source.Primitive()) - } - - return engine.NewConcatenate(sources, c.noNeedToTypeCheck) -} diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go index 41e5d64346e..351bf42672c 100644 --- a/go/vt/vtgate/planbuilder/ddl.go +++ b/go/vt/vtgate/planbuilder/ddl.go @@ -2,6 +2,7 @@ package planbuilder import ( "context" + "errors" "fmt" "vitess.io/vitess/go/vt/key" @@ -111,9 +112,9 @@ func buildDDLPlans(ctx context.Context, sql string, ddlStatement sqlparser.DDLSt } err = checkFKError(vschema, ddlStatement, keyspace) case *sqlparser.CreateView: - destination, keyspace, err = buildCreateView(ctx, vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL) + destination, keyspace, err = buildCreateViewCommon(ctx, vschema, reservedVars, enableOnlineDDL, enableDirectDDL, ddl.Select, ddl) case *sqlparser.AlterView: - destination, keyspace, err = buildAlterView(ctx, vschema, ddl, reservedVars, enableOnlineDDL, enableDirectDDL) + destination, keyspace, err = buildCreateViewCommon(ctx, vschema, reservedVars, enableOnlineDDL, enableDirectDDL, ddl.Select, ddl) case *sqlparser.DropView: destination, keyspace, err = buildDropView(vschema, ddlStatement) case *sqlparser.DropTable: @@ -172,7 +173,8 @@ func findTableDestinationAndKeyspace(vschema plancontext.VSchema, ddlStatement s var err error table, _, _, _, destination, err = vschema.FindTableOrVindex(ddlStatement.GetTable()) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } @@ -190,15 +192,28 @@ func findTableDestinationAndKeyspace(vschema plancontext.VSchema, ddlStatement s return destination, keyspace, nil } -func buildAlterView(ctx context.Context, vschema plancontext.VSchema, ddl *sqlparser.AlterView, reservedVars *sqlparser.ReservedVars, enableOnlineDDL, enableDirectDDL bool) (key.Destination, *vindexes.Keyspace, error) { - // For Alter View, we require that the view exist and the select query can be satisfied within the keyspace itself +func buildCreateViewCommon( + ctx context.Context, + vschema plancontext.VSchema, + reservedVars *sqlparser.ReservedVars, + enableOnlineDDL, enableDirectDDL bool, + ddlSelect sqlparser.SelectStatement, + ddl sqlparser.DDLStatement, +) (key.Destination, *vindexes.Keyspace, error) { + // For Create View, we require that the keyspace exist and the select query can be satisfied within the keyspace itself // We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name destination, keyspace, err := findTableDestinationAndKeyspace(vschema, ddl) if err != nil { return nil, nil, err } - selectPlan, err := createInstructionFor(ctx, sqlparser.String(ddl.Select), ddl.Select, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) + // because we don't trust the schema tracker to have up-to-date info, we don't want to expand any SELECT * here + var expressions []sqlparser.SelectExprs + _ = sqlparser.VisitAllSelects(ddlSelect, func(p *sqlparser.Select, idx int) error { + expressions = append(expressions, sqlparser.CloneSelectExprs(p.SelectExprs)) + return nil + }) + selectPlan, err := createInstructionFor(ctx, sqlparser.String(ddlSelect), ddlSelect, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) if err != nil { return nil, nil, err } @@ -206,52 +221,15 @@ func buildAlterView(ctx context.Context, vschema plancontext.VSchema, ddl *sqlpa if keyspace.Name != selPlanKs { return nil, nil, vterrors.VT12001(ViewDifferentKeyspace) } - if vschema.IsViewsEnabled() { - if keyspace == nil { - return nil, nil, vterrors.VT09005() - } - return destination, keyspace, nil - } - isRoutePlan, opCode := tryToGetRoutePlan(selectPlan.primitive) - if !isRoutePlan { - return nil, nil, vterrors.VT12001(ViewComplex) - } - if opCode != engine.Unsharded && opCode != engine.EqualUnique && opCode != engine.Scatter { - return nil, nil, vterrors.VT12001(ViewComplex) - } - _ = sqlparser.SafeRewrite(ddl.Select, nil, func(cursor *sqlparser.Cursor) bool { - switch tableName := cursor.Node().(type) { - case sqlparser.TableName: - cursor.Replace(sqlparser.TableName{ - Name: tableName.Name, - }) - } - return true + + _ = sqlparser.VisitAllSelects(ddlSelect, func(p *sqlparser.Select, idx int) error { + p.SelectExprs = expressions[idx] + return nil }) - return destination, keyspace, nil -} -func buildCreateView(ctx context.Context, vschema plancontext.VSchema, ddl *sqlparser.CreateView, reservedVars *sqlparser.ReservedVars, enableOnlineDDL, enableDirectDDL bool) (key.Destination, *vindexes.Keyspace, error) { - // For Create View, we require that the keyspace exist and the select query can be satisfied within the keyspace itself - // We should remove the keyspace name from the table name, as the database name in MySQL might be different than the keyspace name - destination, keyspace, _, err := vschema.TargetDestination(ddl.ViewName.Qualifier.String()) - if err != nil { - return nil, nil, err - } - ddl.ViewName.Qualifier = sqlparser.NewIdentifierCS("") + sqlparser.RemoveKeyspace(ddl) - selectPlan, err := createInstructionFor(ctx, sqlparser.String(ddl.Select), ddl.Select, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) - if err != nil { - return nil, nil, err - } - selPlanKs := selectPlan.primitive.GetKeyspaceName() - if keyspace.Name != selPlanKs { - return nil, nil, vterrors.VT12001(ViewDifferentKeyspace) - } if vschema.IsViewsEnabled() { - if keyspace == nil { - return nil, nil, vterrors.VT09005() - } return destination, keyspace, nil } isRoutePlan, opCode := tryToGetRoutePlan(selectPlan.primitive) @@ -261,15 +239,6 @@ func buildCreateView(ctx context.Context, vschema plancontext.VSchema, ddl *sqlp if opCode != engine.Unsharded && opCode != engine.EqualUnique && opCode != engine.Scatter { return nil, nil, vterrors.VT12001(ViewComplex) } - _ = sqlparser.SafeRewrite(ddl.Select, nil, func(cursor *sqlparser.Cursor) bool { - switch tableName := cursor.Node().(type) { - case sqlparser.TableName: - cursor.Replace(sqlparser.TableName{ - Name: tableName.Name, - }) - } - return true - }) return destination, keyspace, nil } @@ -312,7 +281,8 @@ func buildDropTable(vschema plancontext.VSchema, ddlStatement sqlparser.DDLState table, _, _, _, destinationTab, err = vschema.FindTableOrVindex(tab) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } @@ -355,7 +325,8 @@ func buildRenameTable(vschema plancontext.VSchema, renameTable *sqlparser.Rename table, _, _, _, destinationFrom, err = vschema.FindTableOrVindex(tabPair.FromTable) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } diff --git a/go/vt/vtgate/planbuilder/delete.go b/go/vt/vtgate/planbuilder/delete.go index 188c1485d1d..239e7bae0ae 100644 --- a/go/vt/vtgate/planbuilder/delete.go +++ b/go/vt/vtgate/planbuilder/delete.go @@ -23,7 +23,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -50,7 +49,7 @@ func gen4DeleteStmtPlanner( return nil, err } - err = rewriteRoutedTables(deleteStmt, vschema) + err = queryRewrite(ctx, deleteStmt) if err != nil { return nil, err } @@ -64,17 +63,14 @@ func gen4DeleteStmtPlanner( if ks, tables := ctx.SemTable.SingleUnshardedKeyspace(); ks != nil { if !ctx.SemTable.ForeignKeysPresent() { plan := deleteUnshardedShortcut(deleteStmt, ks, tables) - return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil + return newPlanResult(plan, operators.QualifiedTables(ks, tables)...), nil } } - if err := checkIfDeleteSupported(deleteStmt, ctx.SemTable); err != nil { - return nil, err - } - - err = queryRewrite(ctx.SemTable, reservedVars, deleteStmt) - if err != nil { - return nil, err + // error out here if delete query cannot bypass the planner and + // planner cannot plan such query due to different reason like missing full information, etc. + if ctx.SemTable.NotUnshardedErr != nil { + return nil, ctx.SemTable.NotUnshardedErr } op, err := operators.PlanQuery(ctx, deleteStmt) @@ -82,12 +78,12 @@ func gen4DeleteStmtPlanner( return nil, err } - plan, err := transformToLogicalPlan(ctx, op) + plan, err := transformToPrimitive(ctx, op) if err != nil { return nil, err } - return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil + return newPlanResult(plan, operators.TablesUsed(op)...), nil } func rewriteSingleTbl(del *sqlparser.Delete) (*sqlparser.Delete, error) { @@ -95,7 +91,7 @@ func rewriteSingleTbl(del *sqlparser.Delete) (*sqlparser.Delete, error) { if !ok { return del, nil } - if !atExpr.As.IsEmpty() && !sqlparser.Equals.IdentifierCS(del.Targets[0].Name, atExpr.As) { + if atExpr.As.NotEmpty() && !sqlparser.Equals.IdentifierCS(del.Targets[0].Name, atExpr.As) { // Unknown table in MULTI DELETE return nil, vterrors.VT03003(del.Targets[0].Name.String()) } @@ -127,7 +123,7 @@ func rewriteSingleTbl(del *sqlparser.Delete) (*sqlparser.Delete, error) { return del, nil } -func deleteUnshardedShortcut(stmt *sqlparser.Delete, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { +func deleteUnshardedShortcut(stmt *sqlparser.Delete, ks *vindexes.Keyspace, tables []*vindexes.Table) engine.Primitive { edml := engine.NewDML() edml.Keyspace = ks edml.Opcode = engine.Unsharded @@ -135,42 +131,5 @@ func deleteUnshardedShortcut(stmt *sqlparser.Delete, ks *vindexes.Keyspace, tabl for _, tbl := range tables { edml.TableNames = append(edml.TableNames, tbl.Name.String()) } - return &primitiveWrapper{prim: &engine.Delete{DML: edml}} -} - -// checkIfDeleteSupported checks if the delete query is supported or we must return an error. -func checkIfDeleteSupported(del *sqlparser.Delete, semTable *semantics.SemTable) error { - if semTable.NotUnshardedErr != nil { - return semTable.NotUnshardedErr - } - - // Delete is only supported for a single TableExpr which is supposed to be an aliased expression - multiShardErr := vterrors.VT12001("multi-shard or vindex write statement") - if len(del.TableExprs) != 1 { - return multiShardErr - } - _, isAliasedExpr := del.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !isAliasedExpr { - return multiShardErr - } - - if len(del.Targets) > 1 { - return vterrors.VT12001("multi-table DELETE statement in a sharded keyspace") - } - - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node.(type) { - case *sqlparser.Subquery, *sqlparser.DerivedTable: - // We have a subquery, so we must fail the planning. - // If this subquery and the table expression were all belonging to the same unsharded keyspace, - // we would have already created a plan for them before doing these checks. - return false, vterrors.VT12001("subqueries in DML") - } - return true, nil - }, del) - if err != nil { - return err - } - - return nil + return &engine.Delete{DML: edml} } diff --git a/go/vt/vtgate/planbuilder/distinct.go b/go/vt/vtgate/planbuilder/distinct.go deleted file mode 100644 index 2a9f58a9942..00000000000 --- a/go/vt/vtgate/planbuilder/distinct.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*distinct)(nil) - -// distinct is the logicalPlan for engine.Distinct. -type distinct struct { - logicalPlanCommon - checkCols []engine.CheckCol - truncateColumn int - - // needToTruncate is the old way to check weight_string column and set truncation. - needToTruncate bool -} - -func newDistinct(source logicalPlan, checkCols []engine.CheckCol, truncateColumn int) logicalPlan { - return &distinct{ - logicalPlanCommon: newBuilderCommon(source), - checkCols: checkCols, - truncateColumn: truncateColumn, - } -} - -func (d *distinct) Primitive() engine.Primitive { - truncate := d.truncateColumn - if d.needToTruncate { - wsColFound := false - for _, col := range d.checkCols { - if col.WsCol != nil { - wsColFound = true - break - } - } - if wsColFound { - truncate = len(d.checkCols) - } - } - return &engine.Distinct{ - Source: d.input.Primitive(), - CheckCols: d.checkCols, - Truncate: truncate, - } -} diff --git a/go/vt/vtgate/planbuilder/dml_planner.go b/go/vt/vtgate/planbuilder/dml_planner.go deleted file mode 100644 index 7ec616f7f36..00000000000 --- a/go/vt/vtgate/planbuilder/dml_planner.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -func rewriteRoutedTables(stmt sqlparser.Statement, vschema plancontext.VSchema) error { - // Rewrite routed tables - return sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { - aliasTbl, isAlias := node.(*sqlparser.AliasedTableExpr) - if !isAlias { - return true, nil - } - tableName, ok := aliasTbl.Expr.(sqlparser.TableName) - if !ok { - return true, nil - } - vschemaTable, vindexTbl, _, _, _, err := vschema.FindTableOrVindex(tableName) - if err != nil { - return false, err - } - if vindexTbl != nil { - // vindex cannot be present in a dml statement. - return false, vterrors.VT09014() - } - - if vschemaTable.Name.String() != tableName.Name.String() { - name := tableName.Name - if aliasTbl.As.IsEmpty() { - // if the user hasn't specified an alias, we'll insert one here so the old table name still works - aliasTbl.As = sqlparser.NewIdentifierCS(name.String()) - } - tableName.Name = sqlparser.NewIdentifierCS(vschemaTable.Name.String()) - aliasTbl.Expr = tableName - } - - return true, nil - }, stmt) -} - -func generateQuery(statement sqlparser.Statement) string { - buf := sqlparser.NewTrackedBuffer(dmlFormatter) - statement.Format(buf) - return buf.String() -} diff --git a/go/vt/vtgate/planbuilder/expression_converter.go b/go/vt/vtgate/planbuilder/expression_converter.go index 61eebbe7f99..961c4f1fb4b 100644 --- a/go/vt/vtgate/planbuilder/expression_converter.go +++ b/go/vt/vtgate/planbuilder/expression_converter.go @@ -20,16 +20,18 @@ import ( "fmt" "strings" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type expressionConverter struct { tabletExpressions []sqlparser.Expr + env *vtenv.Environment + collation collations.ID } func booleanValues(astExpr sqlparser.Expr) evalengine.Expr { @@ -81,12 +83,15 @@ func (ec *expressionConverter) convert(astExpr sqlparser.Expr, boolean, identifi return evalExpr, nil } } - evalExpr, err := evalengine.Translate(astExpr, nil) + evalExpr, err := evalengine.Translate(astExpr, &evalengine.Config{ + Collation: ec.collation, + Environment: ec.env, + }) if err != nil { if !strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) { return nil, err } - evalExpr = evalengine.NewColumn(len(ec.tabletExpressions), evalengine.UnknownType(), nil) + evalExpr = evalengine.NewColumn(len(ec.tabletExpressions), evalengine.Type{}, nil) ec.tabletExpressions = append(ec.tabletExpressions, astExpr) } return evalExpr, nil diff --git a/go/vt/vtgate/planbuilder/expression_converter_test.go b/go/vt/vtgate/planbuilder/expression_converter_test.go index 798ed1e1635..c92cc184262 100644 --- a/go/vt/vtgate/planbuilder/expression_converter_test.go +++ b/go/vt/vtgate/planbuilder/expression_converter_test.go @@ -22,6 +22,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -40,16 +41,20 @@ func TestConversion(t *testing.T) { expressionsOut: e(evalengine.NewLiteralInt(1)), }, { expressionsIn: "@@foo", - expressionsOut: e(evalengine.NewColumn(0, evalengine.UnknownType(), nil)), + expressionsOut: e(evalengine.NewColumn(0, evalengine.Type{}, nil)), }} + venv := vtenv.NewTestEnv() for _, tc := range queries { t.Run(tc.expressionsIn, func(t *testing.T) { - statement, err := sqlparser.Parse("select " + tc.expressionsIn) + statement, err := sqlparser.NewTestParser().Parse("select " + tc.expressionsIn) require.NoError(t, err) slct := statement.(*sqlparser.Select) exprs := extract(slct.SelectExprs) - ec := &expressionConverter{} + ec := &expressionConverter{ + env: venv, + collation: venv.CollationEnv().DefaultConnectionCharset(), + } var result []evalengine.Expr for _, expr := range exprs { evalExpr, err := ec.convert(expr, false, false) diff --git a/go/vt/vtgate/planbuilder/fk_cascade.go b/go/vt/vtgate/planbuilder/fk_cascade.go deleted file mode 100644 index f2ca67ef5d0..00000000000 --- a/go/vt/vtgate/planbuilder/fk_cascade.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*fkCascade)(nil) - -// fkCascade is the logicalPlan for engine.FkCascade. -type fkCascade struct { - parent logicalPlan - selection logicalPlan - children []*engine.FkChild -} - -// newFkCascade builds a new fkCascade. -func newFkCascade(parent, selection logicalPlan, children []*engine.FkChild) *fkCascade { - return &fkCascade{ - parent: parent, - selection: selection, - children: children, - } -} - -// Primitive implements the logicalPlan interface -func (fkc *fkCascade) Primitive() engine.Primitive { - return &engine.FkCascade{ - Parent: fkc.parent.Primitive(), - Selection: fkc.selection.Primitive(), - Children: fkc.children, - } -} diff --git a/go/vt/vtgate/planbuilder/fk_verify.go b/go/vt/vtgate/planbuilder/fk_verify.go deleted file mode 100644 index 206bad90fea..00000000000 --- a/go/vt/vtgate/planbuilder/fk_verify.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*fkVerify)(nil) - -type verifyLP struct { - verify logicalPlan - typ string -} - -// fkVerify is the logicalPlan for engine.FkVerify. -type fkVerify struct { - input logicalPlan - verify []*verifyLP -} - -// newFkVerify builds a new fkVerify. -func newFkVerify(input logicalPlan, verify []*verifyLP) *fkVerify { - return &fkVerify{ - input: input, - verify: verify, - } -} - -// Primitive implements the logicalPlan interface -func (fkc *fkVerify) Primitive() engine.Primitive { - var verify []*engine.Verify - for _, v := range fkc.verify { - verify = append(verify, &engine.Verify{ - Exec: v.verify.Primitive(), - Typ: v.typ, - }) - } - return &engine.FkVerify{ - Exec: fkc.input.Primitive(), - Verify: verify, - } -} diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index c187cd7efdc..80516871623 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -33,7 +33,7 @@ func gen4InsertStmtPlanner(version querypb.ExecuteOptions_PlannerVersion, insStm return nil, err } - err = rewriteRoutedTables(insStmt, vschema) + err = queryRewrite(ctx, insStmt) if err != nil { return nil, err } @@ -53,7 +53,7 @@ func gen4InsertStmtPlanner(version querypb.ExecuteOptions_PlannerVersion, insStm if tables[0].AutoIncrement == nil && !ctx.SemTable.ForeignKeysPresent() { plan := insertUnshardedShortcut(insStmt, ks, tables) setCommentDirectivesOnPlan(plan, insStmt) - return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil + return newPlanResult(plan, operators.QualifiedTables(ks, tables)...), nil } } @@ -62,12 +62,11 @@ func gen4InsertStmtPlanner(version querypb.ExecuteOptions_PlannerVersion, insStm return nil, err } - if err = errOutIfPlanCannotBeConstructed(ctx, tblInfo.GetVindexTable(), insStmt, ctx.SemTable.ForeignKeysPresent()); err != nil { - return nil, err + if _, isVindex := tblInfo.(*semantics.VindexTable); isVindex { + return nil, vterrors.VT09014() } - err = queryRewrite(ctx.SemTable, reservedVars, insStmt) - if err != nil { + if err = errOutIfPlanCannotBeConstructed(ctx, tblInfo.GetVindexTable()); err != nil { return nil, err } @@ -76,50 +75,29 @@ func gen4InsertStmtPlanner(version querypb.ExecuteOptions_PlannerVersion, insStm return nil, err } - plan, err := transformToLogicalPlan(ctx, op) + plan, err := transformToPrimitive(ctx, op) if err != nil { return nil, err } - return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil + return newPlanResult(plan, operators.TablesUsed(op)...), nil } -func errOutIfPlanCannotBeConstructed(ctx *plancontext.PlanningContext, vTbl *vindexes.Table, insStmt *sqlparser.Insert, fkPlanNeeded bool) error { - if vTbl.Keyspace.Sharded && ctx.SemTable.NotUnshardedErr != nil { - return ctx.SemTable.NotUnshardedErr - } - if insStmt.Action != sqlparser.ReplaceAct { +func errOutIfPlanCannotBeConstructed(ctx *plancontext.PlanningContext, vTbl *vindexes.Table) error { + if !vTbl.Keyspace.Sharded { return nil } - if fkPlanNeeded { - return vterrors.VT12001("REPLACE INTO with foreign keys") - } - return nil -} - -func insertUnshardedShortcut(stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { - eIns := &engine.Insert{} - eIns.Keyspace = ks - eIns.TableName = tables[0].Name.String() - eIns.Opcode = engine.InsertUnsharded - eIns.Query = generateQuery(stmt) - return &insert{eInsert: eIns} + return ctx.SemTable.NotUnshardedErr } -type insert struct { - eInsert *engine.Insert - source logicalPlan -} - -var _ logicalPlan = (*insert)(nil) - -func (i *insert) Primitive() engine.Primitive { - if i.source != nil { - i.eInsert.Input = i.source.Primitive() +func insertUnshardedShortcut(stmt *sqlparser.Insert, ks *vindexes.Keyspace, tables []*vindexes.Table) engine.Primitive { + eIns := &engine.Insert{ + InsertCommon: engine.InsertCommon{ + Opcode: engine.InsertUnsharded, + Keyspace: ks, + TableName: tables[0].Name.String(), + }, } - return i.eInsert -} - -func (i *insert) ContainsTables() semantics.TableSet { - panic("does not expect insert to get contains tables call") + eIns.Query = generateQuery(stmt) + return eIns } diff --git a/go/vt/vtgate/planbuilder/join.go b/go/vt/vtgate/planbuilder/join.go deleted file mode 100644 index 02027a8b49e..00000000000 --- a/go/vt/vtgate/planbuilder/join.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*join)(nil) - -// join is used to build a Join primitive. -// It's used to build an inner join and only used by the Gen4 planner -type join struct { - // Left and Right are the nodes for the join. - Left, Right logicalPlan - - // The Opcode tells us if this is an inner or outer join - Opcode engine.JoinOpcode - - // These are the columns that will be produced by this plan. - // Negative offsets come from the LHS, and positive from the RHS - Cols []int - - // Vars are the columns that will be sent from the LHS to the RHS - // the number is the offset on the LHS result, and the string is the bind variable name used in the RHS - Vars map[string]int - - // LHSColumns are the columns from the LHS used for the join. - // These are the same columns pushed on the LHS that are now used in the Vars field - LHSColumns []*sqlparser.ColName -} - -// Primitive implements the logicalPlan interface -func (j *join) Primitive() engine.Primitive { - return &engine.Join{ - Left: j.Left.Primitive(), - Right: j.Right.Primitive(), - Cols: j.Cols, - Vars: j.Vars, - Opcode: j.Opcode, - } -} diff --git a/go/vt/vtgate/planbuilder/limit.go b/go/vt/vtgate/planbuilder/limit.go deleted file mode 100644 index 5cfd27dfe06..00000000000 --- a/go/vt/vtgate/planbuilder/limit.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*limit)(nil) - -// limit is the logicalPlan for engine.Limit. -// This gets built if a limit needs to be applied -// after rows are returned from an underlying -// operation. Since a limit is the final operation -// of a SELECT, most pushes are not applicable. -type limit struct { - logicalPlanCommon - elimit *engine.Limit -} - -// newLimit builds a new limit. -func newLimit(plan logicalPlan) *limit { - return &limit{ - logicalPlanCommon: newBuilderCommon(plan), - elimit: &engine.Limit{}, - } -} - -// Primitive implements the logicalPlan interface -func (l *limit) Primitive() engine.Primitive { - l.elimit.Input = l.input.Primitive() - return l.elimit -} diff --git a/go/vt/vtgate/planbuilder/locktables.go b/go/vt/vtgate/planbuilder/locktables.go index 9c3a5fa44e9..e8776d13e65 100644 --- a/go/vt/vtgate/planbuilder/locktables.go +++ b/go/vt/vtgate/planbuilder/locktables.go @@ -33,6 +33,5 @@ func buildLockPlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, _ planco // buildUnlockPlan plans lock tables statement. func buildUnlockPlan(stmt sqlparser.Statement, _ *sqlparser.ReservedVars, _ plancontext.VSchema) (*planResult, error) { - log.Warningf("Unlock Tables statement is ignored: %v", stmt) - return newPlanResult(engine.NewRowsPrimitive(make([][]sqltypes.Value, 0), make([]*querypb.Field, 0))), nil + return newPlanResult(&engine.Unlock{}), nil } diff --git a/go/vt/vtgate/planbuilder/logical_plan.go b/go/vt/vtgate/planbuilder/logical_plan.go deleted file mode 100644 index fac0bb59b5f..00000000000 --- a/go/vt/vtgate/planbuilder/logical_plan.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -// logicalPlan defines the interface that a primitive must -// satisfy. -type logicalPlan interface { - // Primitive returns the underlying primitive. - Primitive() engine.Primitive -} - -// ------------------------------------------------------------------------- - -// logicalPlanCommon implements some common functionality of builders. -// Make sure to override in case behavior needs to be changed. -type logicalPlanCommon struct { - order int - input logicalPlan -} - -func newBuilderCommon(input logicalPlan) logicalPlanCommon { - return logicalPlanCommon{input: input} -} - -func (bc *logicalPlanCommon) Order() int { - return bc.order -} - -// ------------------------------------------------------------------------- - -// resultsBuilder is a superset of logicalPlanCommon. It also handles -// resultsColumn functionality. -type resultsBuilder struct { - logicalPlanCommon - truncater truncater -} - -func newResultsBuilder(input logicalPlan, truncater truncater) resultsBuilder { - return resultsBuilder{ - logicalPlanCommon: newBuilderCommon(input), - truncater: truncater, - } -} diff --git a/go/vt/vtgate/planbuilder/memory_sort.go b/go/vt/vtgate/planbuilder/memory_sort.go deleted file mode 100644 index e1d3cf311dc..00000000000 --- a/go/vt/vtgate/planbuilder/memory_sort.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*memorySort)(nil) - -// memorySort is the logicalPlan for engine.Limit. -// This gets built if a limit needs to be applied -// after rows are returned from an underlying -// operation. Since a limit is the final operation -// of a SELECT, most pushes are not applicable. -type memorySort struct { - resultsBuilder - eMemorySort *engine.MemorySort -} - -// Primitive implements the logicalPlan interface -func (ms *memorySort) Primitive() engine.Primitive { - ms.eMemorySort.Input = ms.input.Primitive() - return ms.eMemorySort -} - -// SetLimit implements the logicalPlan interface -func (ms *memorySort) SetLimit(limit *sqlparser.Limit) error { - return vterrors.VT13001("memorySort.Limit: unreachable") -} diff --git a/go/vt/vtgate/planbuilder/merge_sort.go b/go/vt/vtgate/planbuilder/merge_sort.go deleted file mode 100644 index edca9194ccf..00000000000 --- a/go/vt/vtgate/planbuilder/merge_sort.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*mergeSort)(nil) - -// mergeSort is a pseudo-primitive. It amends the -// the underlying Route to perform a merge sort. -// It's differentiated as a separate primitive -// because some operations cannot be pushed down, -// which would otherwise be possible with a simple route. -// Since ORDER BY happens near the end of the SQL processing, -// most functions of this primitive are unreachable. -type mergeSort struct { - resultsBuilder - truncateColumnCount int -} - -// SetTruncateColumnCount satisfies the truncater interface. -// This function records the truncate column count and sets -// it later on the eroute during wire-up phase. -func (ms *mergeSort) SetTruncateColumnCount(count int) { - ms.truncateColumnCount = count -} - -// Primitive implements the logicalPlan interface -func (ms *mergeSort) Primitive() engine.Primitive { - return ms.input.Primitive() -} diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go index a04c4b00c2c..76c4ddd476c 100644 --- a/go/vt/vtgate/planbuilder/operator_transformers.go +++ b/go/vt/vtgate/planbuilder/operator_transformers.go @@ -22,21 +22,22 @@ import ( "strconv" "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/sysvars" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator) (logicalPlan, error) { +func transformToPrimitive(ctx *plancontext.PlanningContext, op operators.Operator) (engine.Primitive, error) { switch op := op.(type) { case *operators.Route: return transformRoutePlan(ctx, op) @@ -68,12 +69,86 @@ func transformToLogicalPlan(ctx *plancontext.PlanningContext, op ops.Operator) ( return transformFkVerify(ctx, op) case *operators.InsertSelection: return transformInsertionSelection(ctx, op) + case *operators.Upsert: + return transformUpsert(ctx, op) + case *operators.HashJoin: + return transformHashJoin(ctx, op) + case *operators.Sequential: + return transformSequential(ctx, op) + case *operators.DMLWithInput: + return transformDMLWithInput(ctx, op) } - return nil, vterrors.VT13001(fmt.Sprintf("unknown type encountered: %T (transformToLogicalPlan)", op)) + return nil, vterrors.VT13001(fmt.Sprintf("unknown type encountered: %T (transformToPrimitive)", op)) } -func transformInsertionSelection(ctx *plancontext.PlanningContext, op *operators.InsertSelection) (logicalPlan, error) { +func transformDMLWithInput(ctx *plancontext.PlanningContext, op *operators.DMLWithInput) (engine.Primitive, error) { + input, err := transformToPrimitive(ctx, op.Source) + if err != nil { + return nil, err + } + + var dmls []engine.Primitive + for _, dml := range op.DML { + del, err := transformToPrimitive(ctx, dml) + if err != nil { + return nil, err + } + dmls = append(dmls, del) + } + + return &engine.DMLWithInput{ + DMLs: dmls, + Input: input, + OutputCols: op.Offsets, + BVList: op.BvList, + }, nil +} + +func transformUpsert(ctx *plancontext.PlanningContext, op *operators.Upsert) (engine.Primitive, error) { + upsert := &engine.Upsert{} + for _, source := range op.Sources { + iLp, uLp, err := transformOneUpsert(ctx, source) + if err != nil { + return nil, err + } + upsert.AddUpsert(iLp, uLp) + } + return upsert, nil +} + +func transformOneUpsert(ctx *plancontext.PlanningContext, source operators.UpsertSource) (iLp, uLp engine.Primitive, err error) { + iLp, err = transformToPrimitive(ctx, source.Insert) + if err != nil { + return + } + ins, ok := iLp.(*engine.Insert) + if ok { + ins.PreventAutoCommit = true + } + uLp, err = transformToPrimitive(ctx, source.Update) + return +} + +func transformSequential(ctx *plancontext.PlanningContext, op *operators.Sequential) (engine.Primitive, error) { + var prims []engine.Primitive + for _, source := range op.Sources { + prim, err := transformToPrimitive(ctx, source) + if err != nil { + return nil, err + } + ins, ok := prim.(*engine.Insert) + if ok { + ins.PreventAutoCommit = true + } + + prims = append(prims, prim) + } + + return engine.NewSequential(prims), nil +} + +func transformInsertionSelection(ctx *plancontext.PlanningContext, op *operators.InsertSelection) (engine.Primitive, error) { rb, isRoute := op.Insert.(*operators.Route) if !isRoute { return nil, vterrors.VT13001(fmt.Sprintf("Incorrect type encountered: %T (transformInsertionSelection)", op.Insert)) @@ -89,42 +164,41 @@ func transformInsertionSelection(ctx *plancontext.PlanningContext, op *operators } ins := dmlOp.(*operators.Insert) - eins := &engine.Insert{ - Opcode: mapToInsertOpCode(rb.Routing.OpCode(), true), - Keyspace: rb.Routing.Keyspace(), - TableName: ins.VTable.Name.String(), - Ignore: ins.Ignore, - ForceNonStreaming: op.ForceNonStreaming, - Generate: autoIncGenerate(ins.AutoIncrement), - ColVindexes: ins.ColVindexes, - VindexValues: ins.VindexValues, + eins := &engine.InsertSelect{ + InsertCommon: engine.InsertCommon{ + Keyspace: rb.Routing.Keyspace(), + TableName: ins.VTable.Name.String(), + Ignore: ins.Ignore, + ForceNonStreaming: op.ForceNonStreaming, + Generate: autoIncGenerate(ins.AutoIncrement), + ColVindexes: ins.ColVindexes, + }, VindexValueOffset: ins.VindexValueOffset, } - lp := &insert{eInsert: eins} - eins.Prefix, eins.Mid, eins.Suffix = generateInsertShardedQuery(ins.AST) + eins.Prefix, _, eins.Suffix = generateInsertShardedQuery(ins.AST) - selectionPlan, err := transformToLogicalPlan(ctx, op.Select) + selectionPlan, err := transformToPrimitive(ctx, op.Select) if err != nil { return nil, err } - lp.source = selectionPlan - return lp, nil + eins.Input = selectionPlan + return eins, nil } -// transformFkCascade transforms a FkCascade operator into a logical plan. -func transformFkCascade(ctx *plancontext.PlanningContext, fkc *operators.FkCascade) (logicalPlan, error) { - // We convert the parent operator to a logical plan. - parentLP, err := transformToLogicalPlan(ctx, fkc.Parent) +// transformFkCascade transforms a FkCascade operator into an engine primitive +func transformFkCascade(ctx *plancontext.PlanningContext, fkc *operators.FkCascade) (engine.Primitive, error) { + // We convert the parent operator to a primitive + parentLP, err := transformToPrimitive(ctx, fkc.Parent) if err != nil { return nil, nil } - // Once we have the parent logical plan, we can create the selection logical plan and the primitives for the children operators. + // Once we have the parent primitive, we can create the selection primitive and the primitives for the children operators. // For all of these, we don't need the semTable anymore. We set it to nil, to avoid using an incorrect one. ctx.SemTable = nil - selLP, err := transformToLogicalPlan(ctx, fkc.Selection) + selLP, err := transformToPrimitive(ctx, fkc.Selection) if err != nil { return nil, err } @@ -132,29 +206,34 @@ func transformFkCascade(ctx *plancontext.PlanningContext, fkc *operators.FkCasca // Go over the children and convert them to Primitives too. var children []*engine.FkChild for _, child := range fkc.Children { - childLP, err := transformToLogicalPlan(ctx, child.Op) + childLP, err := transformToPrimitive(ctx, child.Op) if err != nil { return nil, err } - childEngine := childLP.Primitive() + childEngine := childLP children = append(children, &engine.FkChild{ - BVName: child.BVName, - Cols: child.Cols, - Exec: childEngine, + BVName: child.BVName, + Cols: child.Cols, + NonLiteralInfo: child.NonLiteralInfo, + Exec: childEngine, }) } - return newFkCascade(parentLP, selLP, children), nil + return &engine.FkCascade{ + Selection: selLP, + Children: children, + Parent: parentLP, + }, nil } -func transformSubQuery(ctx *plancontext.PlanningContext, op *operators.SubQuery) (logicalPlan, error) { - outer, err := transformToLogicalPlan(ctx, op.Outer) +func transformSubQuery(ctx *plancontext.PlanningContext, op *operators.SubQuery) (engine.Primitive, error) { + outer, err := transformToPrimitive(ctx, op.Outer) if err != nil { return nil, err } - inner, err := transformToLogicalPlan(ctx, op.Subquery) + inner, err := transformToPrimitive(ctx, op.Subquery) if err != nil { return nil, err } @@ -165,92 +244,120 @@ func transformSubQuery(ctx *plancontext.PlanningContext, op *operators.SubQuery) } if len(cols) == 0 { // no correlation, so uncorrelated it is - return newUncorrelatedSubquery(op.FilterType, op.SubqueryValueName, op.HasValuesName, inner, outer), nil - } - - lhsCols, err := op.OuterExpressionsNeeded(ctx, op.Outer) - if err != nil { - return nil, err - } - return newSemiJoin(outer, inner, op.Vars, lhsCols), nil + return &engine.UncorrelatedSubquery{ + Opcode: op.FilterType, + SubqueryResult: op.SubqueryValueName, + HasValues: op.HasValuesName, + Subquery: inner, + Outer: outer, + }, nil + } + + return &engine.SemiJoin{ + Left: outer, + Right: inner, + Vars: op.Vars, + }, nil } -// transformFkVerify transforms a FkVerify operator into a logical plan. -func transformFkVerify(ctx *plancontext.PlanningContext, fkv *operators.FkVerify) (logicalPlan, error) { - inputLP, err := transformToLogicalPlan(ctx, fkv.Input) +// transformFkVerify transforms a FkVerify operator into a engine primitive +func transformFkVerify(ctx *plancontext.PlanningContext, fkv *operators.FkVerify) (engine.Primitive, error) { + inputLP, err := transformToPrimitive(ctx, fkv.Input) if err != nil { return nil, err } - // Once we have the input logical plan, we can create the primitives for the verification operators. + // Once we have the input primitive, we can create the primitives for the verification operators. // For all of these, we don't need the semTable anymore. We set it to nil, to avoid using an incorrect one. ctx.SemTable = nil // Go over the children and convert them to Primitives too. - var verify []*verifyLP + var verify []*engine.Verify for _, v := range fkv.Verify { - lp, err := transformToLogicalPlan(ctx, v.Op) + lp, err := transformToPrimitive(ctx, v.Op) if err != nil { return nil, err } - verify = append(verify, &verifyLP{ - verify: lp, - typ: v.Typ, + verify = append(verify, &engine.Verify{ + Exec: lp, + Typ: v.Typ, }) } - return newFkVerify(inputLP, verify), nil + return &engine.FkVerify{ + Verify: verify, + Exec: inputLP, + }, nil + } -func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggregator) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, op.Source) +func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggregator) (engine.Primitive, error) { + if op.WithRollup { + return nil, vterrors.VT12001("GROUP BY WITH ROLLUP not supported for sharded queries") + } + src, err := transformToPrimitive(ctx, op.Source) if err != nil { return nil, err } - oa := &orderedAggregate{ - resultsBuilder: newResultsBuilder(plan, nil), - } + var aggregates []*engine.AggregateParams + var groupByKeys []*engine.GroupByParams for _, aggr := range op.Aggregations { if aggr.OpCode == opcode.AggregateUnassigned { return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Original))) } - aggrParam := engine.NewAggregateParam(aggr.OpCode, aggr.ColOffset, aggr.Alias) + aggrParam := engine.NewAggregateParam(aggr.OpCode, aggr.ColOffset, aggr.Alias, ctx.VSchema.Environment().CollationEnv()) aggrParam.Expr = aggr.Func aggrParam.Original = aggr.Original aggrParam.OrigOpcode = aggr.OriginalOpCode aggrParam.WCol = aggr.WSOffset aggrParam.Type = aggr.GetTypeCollation(ctx) - oa.aggregates = append(oa.aggregates, aggrParam) + aggregates = append(aggregates, aggrParam) } + for _, groupBy := range op.Grouping { - typ, _ := ctx.SemTable.TypeForExpr(groupBy.SimplifiedExpr) - oa.groupByKeys = append(oa.groupByKeys, &engine.GroupByParams{ + typ, _ := ctx.SemTable.TypeForExpr(groupBy.Inner) + groupByKeys = append(groupByKeys, &engine.GroupByParams{ KeyCol: groupBy.ColOffset, WeightStringCol: groupBy.WSOffset, - Expr: groupBy.AsAliasedExpr().Expr, + Expr: groupBy.Inner, Type: typ, + CollationEnv: ctx.VSchema.Environment().CollationEnv(), }) } - if err != nil { - return nil, err + if len(groupByKeys) == 0 { + return &engine.ScalarAggregate{ + Aggregates: aggregates, + TruncateColumnCount: op.ResultColumns, + Input: src, + }, nil } - oa.truncateColumnCount = op.ResultColumns - return oa, nil + + return &engine.OrderedAggregate{ + Aggregates: aggregates, + GroupByKeys: groupByKeys, + TruncateColumnCount: op.ResultColumns, + Input: src, + }, nil } -func transformDistinct(ctx *plancontext.PlanningContext, op *operators.Distinct) (logicalPlan, error) { - src, err := transformToLogicalPlan(ctx, op.Source) +func transformDistinct(ctx *plancontext.PlanningContext, op *operators.Distinct) (engine.Primitive, error) { + src, err := transformToPrimitive(ctx, op.Source) if err != nil { return nil, err } - return newDistinct(src, op.Columns, op.Truncate), nil + + return &engine.Distinct{ + Source: src, + CheckCols: op.Columns, + Truncate: op.Truncate, + }, nil } -func transformOrdering(ctx *plancontext.PlanningContext, op *operators.Ordering) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, op.Source) +func transformOrdering(ctx *plancontext.PlanningContext, op *operators.Ordering) (engine.Primitive, error) { + plan, err := transformToPrimitive(ctx, op.Source) if err != nil { return nil, err } @@ -258,38 +365,39 @@ func transformOrdering(ctx *plancontext.PlanningContext, op *operators.Ordering) return createMemorySort(ctx, plan, op) } -func createMemorySort(ctx *plancontext.PlanningContext, src logicalPlan, ordering *operators.Ordering) (logicalPlan, error) { - primitive := &engine.MemorySort{ +func createMemorySort(ctx *plancontext.PlanningContext, src engine.Primitive, ordering *operators.Ordering) (engine.Primitive, error) { + prim := &engine.MemorySort{ + Input: src, TruncateColumnCount: ordering.ResultColumns, } - ms := &memorySort{ - resultsBuilder: newResultsBuilder(src, primitive), - eMemorySort: primitive, - } for idx, order := range ordering.Order { typ, _ := ctx.SemTable.TypeForExpr(order.SimplifiedExpr) - ms.eMemorySort.OrderBy = append(ms.eMemorySort.OrderBy, evalengine.OrderByParams{ + prim.OrderBy = append(prim.OrderBy, evalengine.OrderByParams{ Col: ordering.Offset[idx], WeightStringCol: ordering.WOffset[idx], Desc: order.Inner.Direction == sqlparser.DescOrder, Type: typ, + CollationEnv: ctx.VSchema.Environment().CollationEnv(), }) } - return ms, nil + return prim, nil } -func transformProjection(ctx *plancontext.PlanningContext, op *operators.Projection) (logicalPlan, error) { - src, err := transformToLogicalPlan(ctx, op.Source) +func transformProjection(ctx *plancontext.PlanningContext, op *operators.Projection) (engine.Primitive, error) { + src, err := transformToPrimitive(ctx, op.Source) if err != nil { return nil, err } - if cols := op.AllOffsets(); cols != nil { + if cols, colNames := op.AllOffsets(); cols != nil { // if all this op is doing is passing through columns from the input, we // can use the faster SimpleProjection - return useSimpleProjection(ctx, op, cols, src) + if len(op.Source.GetColumns(ctx)) == len(cols) && offsetInInputOrder(cols) { + cols = nil + } + return newSimpleProjection(cols, colNames, src), nil } ap, err := op.GetAliasedProjections() @@ -300,7 +408,7 @@ func transformProjection(ctx *plancontext.PlanningContext, op *operators.Project var evalengineExprs []evalengine.Expr var columnNames []string for _, pe := range ap { - ee, err := getEvalEngingeExpr(ctx, pe) + ee, err := getEvalEngineExpr(ctx, pe) if err != nil { return nil, err } @@ -308,18 +416,24 @@ func transformProjection(ctx *plancontext.PlanningContext, op *operators.Project columnNames = append(columnNames, pe.Original.ColumnName()) } - primitive := &engine.Projection{ + return &engine.Projection{ + Input: src, Cols: columnNames, Exprs: evalengineExprs, - } - - return &projection{ - source: src, - primitive: primitive, }, nil } -func getEvalEngingeExpr(ctx *plancontext.PlanningContext, pe *operators.ProjExpr) (evalengine.Expr, error) { +// offsetInInputOrder returns true if the columns are in the same order as the input +func offsetInInputOrder(cols []int) bool { + for i, c := range cols { + if c != i { + return false + } + } + return true +} + +func getEvalEngineExpr(ctx *plancontext.PlanningContext, pe *operators.ProjExpr) (evalengine.Expr, error) { switch e := pe.Info.(type) { case *operators.EvalEngine: return e.EExpr, nil @@ -332,77 +446,54 @@ func getEvalEngingeExpr(ctx *plancontext.PlanningContext, pe *operators.ProjExpr } -// useSimpleProjection uses nothing at all if the output is already correct, -// or SimpleProjection when we have to reorder or truncate the columns -func useSimpleProjection(ctx *plancontext.PlanningContext, op *operators.Projection, cols []int, src logicalPlan) (logicalPlan, error) { - columns := op.Source.GetColumns(ctx) - if len(columns) == len(cols) && elementsMatchIndices(cols) { - // the columns are already in the right order. we don't need anything at all here - return src, nil - } - return &simpleProjection{ - logicalPlanCommon: newBuilderCommon(src), - eSimpleProj: &engine.SimpleProjection{ - Cols: cols, - }, - }, nil -} - -// elementsMatchIndices checks if the elements of the input slice match -// their corresponding index values. It returns true if all elements match -// their indices, and false otherwise. -func elementsMatchIndices(in []int) bool { - for idx, val := range in { - if val != idx { - return false - } +// newSimpleProjection creates a simple projections +func newSimpleProjection(cols []int, colNames []string, src engine.Primitive) engine.Primitive { + return &engine.SimpleProjection{ + Input: src, + Cols: cols, + ColNames: colNames, } - return true } -func transformFilter(ctx *plancontext.PlanningContext, op *operators.Filter) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, op.Source) +func transformFilter(ctx *plancontext.PlanningContext, op *operators.Filter) (engine.Primitive, error) { + src, err := transformToPrimitive(ctx, op.Source) if err != nil { return nil, err } predicate := op.PredicateWithOffsets - ast := ctx.SemTable.AndExpressions(op.Predicates...) - if predicate == nil { panic("this should have already been done") } - return &filter{ - logicalPlanCommon: newBuilderCommon(plan), - efilter: &engine.Filter{ - Predicate: predicate, - ASTPredicate: ast, - Truncate: op.Truncate, - }, + return &engine.Filter{ + Input: src, + Predicate: predicate, + ASTPredicate: ctx.SemTable.AndExpressions(op.Predicates...), + Truncate: op.Truncate, }, nil } -func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *operators.ApplyJoin) (logicalPlan, error) { - lhs, err := transformToLogicalPlan(ctx, n.LHS) +func transformApplyJoinPlan(ctx *plancontext.PlanningContext, n *operators.ApplyJoin) (engine.Primitive, error) { + lhs, err := transformToPrimitive(ctx, n.LHS) if err != nil { return nil, err } - rhs, err := transformToLogicalPlan(ctx, n.RHS) + rhs, err := transformToPrimitive(ctx, n.RHS) if err != nil { return nil, err } opCode := engine.InnerJoin - if n.LeftJoin { + if !n.JoinType.IsInner() { opCode = engine.LeftJoin } - return &join{ + return &engine.Join{ + Opcode: opCode, Left: lhs, Right: rhs, Cols: n.Columns, Vars: n.Vars, - Opcode: opCode, }, nil } @@ -413,10 +504,7 @@ func routeToEngineRoute(ctx *plancontext.PlanningContext, op *operators.Route, h } rp := newRoutingParams(ctx, op.Routing.OpCode()) - err = op.Routing.UpdateRoutingParams(ctx, rp) - if err != nil { - return nil, err - } + op.Routing.UpdateRoutingParams(ctx, rp) e := &engine.Route{ TableName: strings.Join(tableNames, ", "), @@ -464,7 +552,7 @@ func getHints(cmt *sqlparser.ParsedComments) *queryHints { } } -func transformRoutePlan(ctx *plancontext.PlanningContext, op *operators.Route) (logicalPlan, error) { +func transformRoutePlan(ctx *plancontext.PlanningContext, op *operators.Route) (engine.Primitive, error) { stmt, dmlOp, err := operators.ToSQL(ctx, op.Source) if err != nil { return nil, err @@ -481,22 +569,26 @@ func transformRoutePlan(ctx *plancontext.PlanningContext, op *operators.Route) ( if op.Lock != sqlparser.NoLock { stmt.SetLock(op.Lock) } - return buildRouteLogicalPlan(ctx, op, stmt, hints) + return buildRoutePrimitive(ctx, op, stmt, hints) case *sqlparser.Update: - return buildUpdateLogicalPlan(ctx, op, dmlOp, stmt, hints) + return buildUpdatePrimitive(ctx, op, dmlOp, stmt, hints) case *sqlparser.Delete: - return buildDeleteLogicalPlan(ctx, op, dmlOp, hints) + return buildDeletePrimitive(ctx, op, dmlOp, stmt, hints) case *sqlparser.Insert: - return buildInsertLogicalPlan(op, dmlOp, stmt, hints) + return buildInsertPrimitive(op, dmlOp, stmt, hints) default: return nil, vterrors.VT13001(fmt.Sprintf("dont know how to %T", stmt)) } } -func buildRouteLogicalPlan(ctx *plancontext.PlanningContext, op *operators.Route, stmt sqlparser.SelectStatement, hints *queryHints) (logicalPlan, error) { - _ = updateSelectedVindexPredicate(op) +func buildRoutePrimitive(ctx *plancontext.PlanningContext, op *operators.Route, stmt sqlparser.SelectStatement, hints *queryHints) (engine.Primitive, error) { + _ = updateSelectedVindexPredicate(op.Routing) eroute, err := routeToEngineRoute(ctx, op, hints) + if err != nil { + return nil, err + } + for _, order := range op.Ordering { typ, _ := ctx.SemTable.TypeForExpr(order.AST) eroute.OrderBy = append(eroute.OrderBy, evalengine.OrderByParams{ @@ -504,62 +596,61 @@ func buildRouteLogicalPlan(ctx *plancontext.PlanningContext, op *operators.Route WeightStringCol: order.WOffset, Desc: order.Direction == sqlparser.DescOrder, Type: typ, + CollationEnv: ctx.VSchema.Environment().CollationEnv(), }) } + + prepareTheAST(stmt) + + res, err := WireupRoute(ctx, eroute, stmt) if err != nil { return nil, err } - r := &route{ - eroute: eroute, - Select: stmt, - tables: operators.TableID(op), - } - if err = r.Wireup(ctx); err != nil { - return nil, err - } - return r, nil + return res, nil } -func buildInsertLogicalPlan( - rb *operators.Route, op ops.Operator, stmt *sqlparser.Insert, +func buildInsertPrimitive( + rb *operators.Route, op operators.Operator, stmt *sqlparser.Insert, hints *queryHints, -) (logicalPlan, error) { +) (engine.Primitive, error) { ins := op.(*operators.Insert) + + ic := engine.InsertCommon{ + Opcode: mapToInsertOpCode(rb.Routing.OpCode()), + Keyspace: rb.Routing.Keyspace(), + TableName: ins.VTable.Name.String(), + Ignore: ins.Ignore, + Generate: autoIncGenerate(ins.AutoIncrement), + ColVindexes: ins.ColVindexes, + } + if hints != nil { + ic.MultiShardAutocommit = hints.multiShardAutocommit + ic.QueryTimeout = hints.queryTimeout + } + eins := &engine.Insert{ - Opcode: mapToInsertOpCode(rb.Routing.OpCode(), false), - Keyspace: rb.Routing.Keyspace(), - TableName: ins.VTable.Name.String(), - Ignore: ins.Ignore, - Generate: autoIncGenerate(ins.AutoIncrement), - ColVindexes: ins.ColVindexes, - VindexValues: ins.VindexValues, - VindexValueOffset: ins.VindexValueOffset, + InsertCommon: ic, + VindexValues: ins.VindexValues, } - lp := &insert{eInsert: eins} // we would need to generate the query on the fly. The only exception here is // when unsharded query with autoincrement for that there is no input operator. if eins.Opcode != engine.InsertUnsharded { eins.Prefix, eins.Mid, eins.Suffix = generateInsertShardedQuery(ins.AST) - } - - if hints != nil { - eins.MultiShardAutocommit = hints.multiShardAutocommit - eins.QueryTimeout = hints.queryTimeout + if ins.AST.RowAlias != nil { + eins.Alias = sqlparser.String(ins.AST.RowAlias) + } } eins.Query = generateQuery(stmt) - return lp, nil + return eins, nil } -func mapToInsertOpCode(code engine.Opcode, insertSelect bool) engine.InsertOpcode { +func mapToInsertOpCode(code engine.Opcode) engine.InsertOpcode { if code == engine.Unsharded { return engine.InsertUnsharded } - if insertSelect { - return engine.InsertSelect - } return engine.InsertSharded } @@ -579,7 +670,7 @@ func autoIncGenerate(gen *operators.Generate) *engine.Generate { } } -func generateInsertShardedQuery(ins *sqlparser.Insert) (prefix string, mids sqlparser.Values, suffix string) { +func generateInsertShardedQuery(ins *sqlparser.Insert) (prefix string, mids sqlparser.Values, suffix sqlparser.OnDup) { mids, isValues := ins.Rows.(sqlparser.Values) prefixFormat := "insert %v%sinto %v%v " if isValues { @@ -591,12 +682,16 @@ func generateInsertShardedQuery(ins *sqlparser.Insert) (prefix string, mids sqlp prefixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) prefixBuf.Myprintf(prefixFormat, ins.Comments, ins.Ignore.ToString(), - ins.Table, ins.Columns) + ins.Table, ins.Columns, ins.RowAlias) prefix = prefixBuf.String() - suffixBuf := sqlparser.NewTrackedBuffer(dmlFormatter) - suffixBuf.Myprintf("%v", ins.OnDup) - suffix = suffixBuf.String() + suffix = sqlparser.CopyOnRewrite(ins.OnDup, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + if tblName, ok := cursor.Node().(sqlparser.TableName); ok { + if tblName.Qualifier != sqlparser.NewIdentifierCS("") { + cursor.Replace(sqlparser.NewTableName(tblName.Name.String())) + } + } + }, nil).(sqlparser.OnDup) return } @@ -610,84 +705,80 @@ func dmlFormatter(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { node.Format(buf) } -func buildUpdateLogicalPlan( +func buildUpdatePrimitive( ctx *plancontext.PlanningContext, rb *operators.Route, - dmlOp ops.Operator, + dmlOp operators.Operator, stmt *sqlparser.Update, hints *queryHints, -) (logicalPlan, error) { +) (engine.Primitive, error) { upd := dmlOp.(*operators.Update) - rp := newRoutingParams(ctx, rb.Routing.OpCode()) - err := rb.Routing.UpdateRoutingParams(ctx, rp) - if err != nil { - return nil, err + var vindexes []*vindexes.ColumnVindex + vQuery := "" + if len(upd.ChangedVindexValues) > 0 { + upd.OwnedVindexQuery.From = stmt.GetFrom() + upd.OwnedVindexQuery.Where = stmt.Where + vQuery = sqlparser.String(upd.OwnedVindexQuery) + vindexes = upd.Target.VTable.ColumnVindexes + if upd.OwnedVindexQuery.Limit != nil && len(upd.OwnedVindexQuery.OrderBy) == 0 { + return nil, vterrors.VT12001("Vindex update should have ORDER BY clause when using LIMIT") + } } - edml := &engine.DML{ - Query: generateQuery(stmt), - TableNames: []string{upd.VTable.Name.String()}, - Vindexes: upd.VTable.ColumnVindexes, - OwnedVindexQuery: upd.OwnedVindexQuery, - RoutingParameters: rp, + if upd.VerifyAll { + stmt.SetComments(stmt.GetParsedComments().SetMySQLSetVarValue(sysvars.ForeignKeyChecks, "OFF")) } + _ = updateSelectedVindexPredicate(rb.Routing) + edml := createDMLPrimitive(ctx, rb, hints, upd.Target.VTable, generateQuery(stmt), vindexes, vQuery) - transformDMLPlan(upd.VTable, edml, rb.Routing, len(upd.ChangedVindexValues) > 0) - - e := &engine.Update{ - ChangedVindexValues: upd.ChangedVindexValues, + return &engine.Update{ DML: edml, + ChangedVindexValues: upd.ChangedVindexValues, + }, nil +} + +func buildDeletePrimitive(ctx *plancontext.PlanningContext, rb *operators.Route, dmlOp operators.Operator, stmt *sqlparser.Delete, hints *queryHints) (engine.Primitive, error) { + del := dmlOp.(*operators.Delete) + + var vindexes []*vindexes.ColumnVindex + vQuery := "" + if del.OwnedVindexQuery != nil { + del.OwnedVindexQuery.From = stmt.GetFrom() + del.OwnedVindexQuery.Where = stmt.Where + vQuery = sqlparser.String(del.OwnedVindexQuery) + vindexes = del.Target.VTable.Owned } - if hints != nil { - e.MultiShardAutocommit = hints.multiShardAutocommit - e.QueryTimeout = hints.queryTimeout - } + _ = updateSelectedVindexPredicate(rb.Routing) + edml := createDMLPrimitive(ctx, rb, hints, del.Target.VTable, generateQuery(stmt), vindexes, vQuery) - return &primitiveWrapper{prim: e}, nil + return &engine.Delete{DML: edml}, nil } -func buildDeleteLogicalPlan( - ctx *plancontext.PlanningContext, - rb *operators.Route, - dmlOp ops.Operator, - hints *queryHints, -) (logicalPlan, error) { - del := dmlOp.(*operators.Delete) +func createDMLPrimitive(ctx *plancontext.PlanningContext, rb *operators.Route, hints *queryHints, vTbl *vindexes.Table, query string, colVindexes []*vindexes.ColumnVindex, vindexQuery string) *engine.DML { rp := newRoutingParams(ctx, rb.Routing.OpCode()) - err := rb.Routing.UpdateRoutingParams(ctx, rp) - if err != nil { - return nil, err - } + rb.Routing.UpdateRoutingParams(ctx, rp) edml := &engine.DML{ - Query: generateQuery(del.AST), - TableNames: []string{del.VTable.Name.String()}, - Vindexes: del.VTable.Owned, - OwnedVindexQuery: del.OwnedVindexQuery, + Query: query, + TableNames: []string{vTbl.Name.String()}, + Vindexes: colVindexes, + OwnedVindexQuery: vindexQuery, RoutingParameters: rp, } - transformDMLPlan(del.VTable, edml, rb.Routing, del.OwnedVindexQuery != "") - - e := &engine.Delete{ - DML: edml, - } - if hints != nil { - e.MultiShardAutocommit = hints.multiShardAutocommit - e.QueryTimeout = hints.queryTimeout - } - - return &primitiveWrapper{prim: e}, nil -} - -func transformDMLPlan(vtable *vindexes.Table, edml *engine.DML, routing operators.Routing, setVindex bool) { - if routing.OpCode() != engine.Unsharded && setVindex { - primary := vtable.ColumnVindexes[0] + if rb.Routing.OpCode() != engine.Unsharded && vindexQuery != "" { + primary := vTbl.ColumnVindexes[0] edml.KsidVindex = primary.Vindex edml.KsidLength = len(primary.Columns) } + + if hints != nil { + edml.MultiShardAutocommit = hints.multiShardAutocommit + edml.QueryTimeout = hints.queryTimeout + } + return edml } -func updateSelectedVindexPredicate(op *operators.Route) sqlparser.Expr { - tr, ok := op.Routing.(*operators.ShardedRouting) +func updateSelectedVindexPredicate(routing operators.Routing) sqlparser.Expr { + tr, ok := routing.(*operators.ShardedRouting) if !ok || tr.Selected == nil { return nil } @@ -702,7 +793,9 @@ func updateSelectedVindexPredicate(op *operators.Route) sqlparser.Expr { if !ok { continue } - + if sqlparser.Equals.Expr(cmp.Right, sqlparser.ListArg(engine.DmlVals)) { + continue + } var argName string if isMultiColumn { argName = engine.ListVarName + strconv.Itoa(idx) @@ -717,7 +810,7 @@ func updateSelectedVindexPredicate(op *operators.Route) sqlparser.Expr { func getAllTableNames(op *operators.Route) ([]string, error) { tableNameMap := map[string]any{} - err := rewrite.Visit(op, func(op ops.Operator) error { + err := operators.Visit(op, func(op operators.Operator) error { tbl, isTbl := op.(*operators.Table) var name string if isTbl { @@ -741,13 +834,13 @@ func getAllTableNames(op *operators.Route) ([]string, error) { return tableNames, nil } -func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union) (logicalPlan, error) { - sources, err := slice.MapWithError(op.Sources, func(src ops.Operator) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, src) +func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union) (engine.Primitive, error) { + sources, err := slice.MapWithError(op.Sources, func(src operators.Operator) (engine.Primitive, error) { + primitive, err := transformToPrimitive(ctx, src) if err != nil { return nil, err } - return plan, nil + return primitive, nil }) if err != nil { return nil, err @@ -756,37 +849,132 @@ func transformUnionPlan(ctx *plancontext.PlanningContext, op *operators.Union) ( if len(sources) == 1 { return sources[0], nil } - return &concatenate{ - sources: sources, - noNeedToTypeCheck: nil, - }, nil + return engine.NewConcatenate(sources, nil), nil } -func transformLimit(ctx *plancontext.PlanningContext, op *operators.Limit) (logicalPlan, error) { - plan, err := transformToLogicalPlan(ctx, op.Source) +func transformLimit(ctx *plancontext.PlanningContext, op *operators.Limit) (engine.Primitive, error) { + plan, err := transformToPrimitive(ctx, op.Source) if err != nil { return nil, err } - return createLimit(plan, op.AST) + return createLimit(plan, op.AST, ctx.VSchema.Environment(), ctx.VSchema.ConnCollation()) } -func createLimit(input logicalPlan, limit *sqlparser.Limit) (logicalPlan, error) { - plan := newLimit(input) - pv, err := evalengine.Translate(limit.Rowcount, nil) +func createLimit(input engine.Primitive, limit *sqlparser.Limit, env *vtenv.Environment, coll collations.ID) (engine.Primitive, error) { + cfg := &evalengine.Config{ + Collation: coll, + Environment: env, + } + count, err := evalengine.Translate(limit.Rowcount, cfg) if err != nil { return nil, vterrors.Wrap(err, "unexpected expression in LIMIT") } - plan.elimit.Count = pv - + var offset evalengine.Expr if limit.Offset != nil { - pv, err = evalengine.Translate(limit.Offset, nil) + offset, err = evalengine.Translate(limit.Offset, cfg) if err != nil { return nil, vterrors.Wrap(err, "unexpected expression in OFFSET") } - plan.elimit.Offset = pv } - return plan, nil + return &engine.Limit{ + Input: input, + Count: count, + Offset: offset, + }, nil +} + +func transformHashJoin(ctx *plancontext.PlanningContext, op *operators.HashJoin) (engine.Primitive, error) { + lhs, err := transformToPrimitive(ctx, op.LHS) + if err != nil { + return nil, err + } + rhs, err := transformToPrimitive(ctx, op.RHS) + if err != nil { + return nil, err + } + + if len(op.LHSKeys) != 1 { + return nil, vterrors.VT12001("hash joins must have exactly one join predicate") + } + + joinOp := engine.InnerJoin + if op.LeftJoin { + joinOp = engine.LeftJoin + } + + var missingTypes []string + + ltyp, found := ctx.SemTable.TypeForExpr(op.JoinComparisons[0].LHS) + if !found { + missingTypes = append(missingTypes, sqlparser.String(op.JoinComparisons[0].LHS)) + } + rtyp, found := ctx.SemTable.TypeForExpr(op.JoinComparisons[0].RHS) + if !found { + missingTypes = append(missingTypes, sqlparser.String(op.JoinComparisons[0].RHS)) + } + + if len(missingTypes) > 0 { + return nil, vterrors.VT12001( + fmt.Sprintf("missing type information for [%s]", strings.Join(missingTypes, ", "))) + } + + comparisonType, err := evalengine.CoerceTypes(ltyp, rtyp, ctx.VSchema.Environment().CollationEnv()) + if err != nil { + return nil, err + } + + return &engine.HashJoin{ + Left: lhs, + Right: rhs, + Opcode: joinOp, + Cols: op.ColumnOffsets, + LHSKey: op.LHSKeys[0], + RHSKey: op.RHSKeys[0], + ASTPred: op.JoinPredicate(), + Collation: comparisonType.Collation(), + ComparisonType: comparisonType.Type(), + CollationEnv: ctx.VSchema.Environment().CollationEnv(), + Values: comparisonType.Values(), + }, nil +} + +func transformVindexPlan(ctx *plancontext.PlanningContext, op *operators.Vindex) (engine.Primitive, error) { + single, ok := op.Vindex.(vindexes.SingleColumn) + if !ok { + return nil, vterrors.VT12001("multi-column vindexes not supported") + } + + expr, err := evalengine.Translate(op.Value, &evalengine.Config{ + Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + Environment: ctx.VSchema.Environment(), + }) + if err != nil { + return nil, err + } + prim := &engine.VindexFunc{ + Opcode: op.OpCode, + Vindex: single, + Value: expr, + } + + for _, col := range op.Columns { + err := SupplyProjection(prim, &sqlparser.AliasedExpr{ + Expr: col, + As: sqlparser.IdentifierCI{}, + }, false) + if err != nil { + return nil, err + } + } + return prim, nil +} + +func generateQuery(statement sqlparser.Statement) string { + buf := sqlparser.NewTrackedBuffer(dmlFormatter) + statement.Format(buf) + return buf.String() } diff --git a/go/vt/vtgate/planbuilder/operators/SQL_builder.go b/go/vt/vtgate/planbuilder/operators/SQL_builder.go index 5201818951d..0a2e545ea48 100644 --- a/go/vt/vtgate/planbuilder/operators/SQL_builder.go +++ b/go/vt/vtgate/planbuilder/operators/SQL_builder.go @@ -21,9 +21,9 @@ import ( "slices" "sort" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -33,15 +33,18 @@ type ( ctx *plancontext.PlanningContext stmt sqlparser.Statement tableNames []string - dmlOperator ops.Operator + dmlOperator Operator } ) func (qb *queryBuilder) asSelectStatement() sqlparser.SelectStatement { return qb.stmt.(sqlparser.SelectStatement) } +func (qb *queryBuilder) asOrderAndLimit() sqlparser.OrderAndLimit { + return qb.stmt.(sqlparser.OrderAndLimit) +} -func ToSQL(ctx *plancontext.PlanningContext, op ops.Operator) (_ sqlparser.Statement, _ ops.Operator, err error) { +func ToSQL(ctx *plancontext.PlanningContext, op Operator) (_ sqlparser.Statement, _ Operator, err error) { defer PanicHandler(&err) q := &queryBuilder{ctx: ctx} @@ -70,22 +73,20 @@ func (qb *queryBuilder) addTableExpr( if qb.stmt == nil { qb.stmt = &sqlparser.Select{} } - sel := qb.stmt.(*sqlparser.Select) - elems := &sqlparser.AliasedTableExpr{ + tbl := &sqlparser.AliasedTableExpr{ Expr: tblExpr, Partitions: nil, As: sqlparser.NewIdentifierCS(alias), Hints: hints, Columns: columnAliases, } - qb.ctx.SemTable.ReplaceTableSetFor(tableID, elems) - sel.From = append(sel.From, elems) - qb.stmt = sel + qb.ctx.SemTable.ReplaceTableSetFor(tableID, tbl) + qb.stmt.(FromStatement).SetFrom(append(qb.stmt.(FromStatement).GetFrom(), tbl)) qb.tableNames = append(qb.tableNames, tableName) } func (qb *queryBuilder) addPredicate(expr sqlparser.Expr) { - if _, toBeSkipped := qb.ctx.SkipPredicates[expr]; toBeSkipped { + if qb.ctx.ShouldSkip(expr) { // This is a predicate that was added to the RHS of an ApplyJoin. // The original predicate will be added, so we don't have to add this here return @@ -95,7 +96,7 @@ func (qb *queryBuilder) addPredicate(expr sqlparser.Expr) { switch stmt := qb.stmt.(type) { case *sqlparser.Select: - if containsAggr(expr) { + if ContainsAggr(qb.ctx, expr) { addPred = stmt.AddHaving } else { addPred = stmt.AddWhere @@ -115,7 +116,12 @@ func (qb *queryBuilder) addPredicate(expr sqlparser.Expr) { func (qb *queryBuilder) addGroupBy(original sqlparser.Expr) { sel := qb.stmt.(*sqlparser.Select) - sel.GroupBy = append(sel.GroupBy, original) + sel.AddGroupBy(original) +} + +func (qb *queryBuilder) setWithRollup() { + sel := qb.stmt.(*sqlparser.Select) + sel.GroupBy.WithRollup = true } func (qb *queryBuilder) addProjection(projection sqlparser.SelectExpr) { @@ -201,62 +207,75 @@ func (qb *queryBuilder) unionWith(other *queryBuilder, distinct bool) { } } -func (qb *queryBuilder) joinInnerWith(other *queryBuilder, onCondition sqlparser.Expr) { - sel := qb.stmt.(*sqlparser.Select) - otherSel := other.stmt.(*sqlparser.Select) - sel.From = append(sel.From, otherSel.From...) - sel.SelectExprs = append(sel.SelectExprs, otherSel.SelectExprs...) +type FromStatement interface { + GetFrom() []sqlparser.TableExpr + SetFrom([]sqlparser.TableExpr) + GetWherePredicate() sqlparser.Expr + SetWherePredicate(sqlparser.Expr) +} - var predicate sqlparser.Expr - if sel.Where != nil { - predicate = sel.Where.Expr +var _ FromStatement = (*sqlparser.Select)(nil) +var _ FromStatement = (*sqlparser.Update)(nil) +var _ FromStatement = (*sqlparser.Delete)(nil) + +func (qb *queryBuilder) joinWith(other *queryBuilder, onCondition sqlparser.Expr, joinType sqlparser.JoinType) { + stmt := qb.stmt.(FromStatement) + otherStmt := other.stmt.(FromStatement) + + if sel, isSel := stmt.(*sqlparser.Select); isSel { + otherSel := otherStmt.(*sqlparser.Select) + sel.SelectExprs = append(sel.SelectExprs, otherSel.SelectExprs...) } - if otherSel.Where != nil { + + qb.mergeWhereClauses(stmt, otherStmt) + + var newFromClause []sqlparser.TableExpr + switch joinType { + case sqlparser.NormalJoinType: + newFromClause = append(stmt.GetFrom(), otherStmt.GetFrom()...) + qb.addPredicate(onCondition) + default: + newFromClause = []sqlparser.TableExpr{buildJoin(stmt, otherStmt, onCondition, joinType)} + } + + stmt.SetFrom(newFromClause) +} + +func (qb *queryBuilder) mergeWhereClauses(stmt, otherStmt FromStatement) { + predicate := stmt.GetWherePredicate() + if otherPredicate := otherStmt.GetWherePredicate(); otherPredicate != nil { predExprs := sqlparser.SplitAndExpression(nil, predicate) - otherExprs := sqlparser.SplitAndExpression(nil, otherSel.Where.Expr) + otherExprs := sqlparser.SplitAndExpression(nil, otherPredicate) predicate = qb.ctx.SemTable.AndExpressions(append(predExprs, otherExprs...)...) } if predicate != nil { - sel.Where = &sqlparser.Where{Type: sqlparser.WhereClause, Expr: predicate} + stmt.SetWherePredicate(predicate) } - - qb.addPredicate(onCondition) } -func (qb *queryBuilder) joinOuterWith(other *queryBuilder, onCondition sqlparser.Expr) { - sel := qb.stmt.(*sqlparser.Select) - otherSel := other.stmt.(*sqlparser.Select) +func buildJoin(stmt FromStatement, otherStmt FromStatement, onCondition sqlparser.Expr, joinType sqlparser.JoinType) *sqlparser.JoinTableExpr { var lhs sqlparser.TableExpr - if len(sel.From) == 1 { - lhs = sel.From[0] + fromClause := stmt.GetFrom() + if len(fromClause) == 1 { + lhs = fromClause[0] } else { - lhs = &sqlparser.ParenTableExpr{Exprs: sel.From} + lhs = &sqlparser.ParenTableExpr{Exprs: fromClause} } var rhs sqlparser.TableExpr - if len(otherSel.From) == 1 { - rhs = otherSel.From[0] + otherFromClause := otherStmt.GetFrom() + if len(otherFromClause) == 1 { + rhs = otherFromClause[0] } else { - rhs = &sqlparser.ParenTableExpr{Exprs: otherSel.From} + rhs = &sqlparser.ParenTableExpr{Exprs: otherFromClause} } - sel.From = []sqlparser.TableExpr{&sqlparser.JoinTableExpr{ + + return &sqlparser.JoinTableExpr{ LeftExpr: lhs, RightExpr: rhs, - Join: sqlparser.LeftJoinType, + Join: joinType, Condition: &sqlparser.JoinCondition{ On: onCondition, }, - }} - - sel.SelectExprs = append(sel.SelectExprs, otherSel.SelectExprs...) - var predicate sqlparser.Expr - if sel.Where != nil { - predicate = sel.Where.Expr - } - if otherSel.Where != nil { - predicate = qb.ctx.SemTable.AndExpressions(predicate, otherSel.Where.Expr) - } - if predicate != nil { - sel.Where = &sqlparser.Where{Type: sqlparser.WhereClause, Expr: predicate} } } @@ -310,7 +329,7 @@ func (ts *tableSorter) Swap(i, j int) { func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) { switch expr := expr.(type) { case *sqlparser.AliasedExpr: - sqlparser.RemoveKeyspaceFromColName(expr.Expr) + sqlparser.RemoveKeyspaceInCol(expr.Expr) case *sqlparser.StarExpr: expr.TableName.Qualifier = sqlparser.NewIdentifierCS("") } @@ -347,7 +366,7 @@ func stripDownQuery(from, to sqlparser.SelectStatement) { } // buildQuery recursively builds the query into an AST, from an operator tree -func buildQuery(op ops.Operator, qb *queryBuilder) { +func buildQuery(op Operator, qb *queryBuilder) { switch op := op.(type) { case *Table: buildTable(op, qb) @@ -377,7 +396,7 @@ func buildQuery(op ops.Operator, qb *queryBuilder) { case *Update: buildUpdate(op, qb) case *Delete: - buildDML(op, qb) + buildDelete(op, qb) case *Insert: buildDML(op, qb) default: @@ -385,12 +404,28 @@ func buildQuery(op ops.Operator, qb *queryBuilder) { } } +func buildDelete(op *Delete, qb *queryBuilder) { + qb.stmt = &sqlparser.Delete{ + Ignore: op.Ignore, + Targets: sqlparser.TableNames{op.Target.Name}, + } + buildQuery(op.Source, qb) + + qb.dmlOperator = op +} + func buildUpdate(op *Update, qb *queryBuilder) { - tblName := sqlparser.NewTableName(op.QTable.Table.Name.String()) - aTblExpr := &sqlparser.AliasedTableExpr{ - Expr: tblName, - As: op.QTable.Alias.As, + updExprs := getUpdateExprs(op) + upd := &sqlparser.Update{ + Ignore: op.Ignore, + Exprs: updExprs, } + qb.stmt = upd + qb.dmlOperator = op + buildQuery(op.Source, qb) +} + +func getUpdateExprs(op *Update) sqlparser.UpdateExprs { updExprs := make(sqlparser.UpdateExprs, 0, len(op.Assignments)) for _, se := range op.Assignments { updExprs = append(updExprs, &sqlparser.UpdateExpr{ @@ -398,24 +433,11 @@ func buildUpdate(op *Update, qb *queryBuilder) { Expr: se.Expr.EvalExpr, }) } - - qb.stmt = &sqlparser.Update{ - Ignore: op.Ignore, - TableExprs: sqlparser.TableExprs{aTblExpr}, - Exprs: updExprs, - OrderBy: op.OrderBy, - Limit: op.Limit, - } - - for _, pred := range op.QTable.Predicates { - qb.addPredicate(pred) - } - - qb.dmlOperator = op + return updExprs } type OpWithAST interface { - ops.Operator + Operator Statement() sqlparser.Statement } @@ -436,24 +458,27 @@ func buildAggregation(op *Aggregator, qb *queryBuilder) { for _, by := range op.Grouping { qb.addGroupBy(by.Inner) - simplified := by.SimplifiedExpr + simplified := by.Inner if by.WSOffset != -1 { qb.addGroupBy(weightStringFor(simplified)) } } + if op.WithRollup { + qb.setWithRollup() + } } func buildOrdering(op *Ordering, qb *queryBuilder) { buildQuery(op.Source, qb) for _, order := range op.Order { - qb.asSelectStatement().AddOrder(order.Inner) + qb.asOrderAndLimit().AddOrder(order.Inner) } } func buildLimit(op *Limit, qb *queryBuilder) { buildQuery(op.Source, qb) - qb.asSelectStatement().SetLimit(op.AST) + qb.asOrderAndLimit().SetLimit(op.AST) } func buildTable(op *Table, qb *queryBuilder) { @@ -502,21 +527,21 @@ func buildProjection(op *Projection, qb *queryBuilder) { } func buildApplyJoin(op *ApplyJoin, qb *queryBuilder) { + predicates := slice.Map(op.JoinPredicates.columns, func(jc applyJoinColumn) sqlparser.Expr { + // since we are adding these join predicates, we need to mark to broken up version (RHSExpr) of it as done + err := qb.ctx.SkipJoinPredicates(jc.Original) + if err != nil { + panic(err) + } + return jc.Original + }) + pred := sqlparser.AndExpressions(predicates...) + buildQuery(op.LHS, qb) - // If we are going to add the predicate used in join here - // We should not add the predicate's copy of when it was split into - // two parts. To avoid this, we use the SkipPredicates map. - for _, expr := range qb.ctx.JoinPredicates[op.Predicate] { - qb.ctx.SkipPredicates[expr] = nil - } + qbR := &queryBuilder{ctx: qb.ctx} buildQuery(op.RHS, qbR) - - if op.LeftJoin { - qb.joinOuterWith(qbR, op.Predicate) - } else { - qb.joinInnerWith(qbR, op.Predicate) - } + qb.joinWith(qbR, pred, op.JoinType) } func buildUnion(op *Union, qb *queryBuilder) { @@ -546,7 +571,7 @@ func buildFilter(op *Filter, qb *queryBuilder) { func buildDerived(op *Horizon, qb *queryBuilder) { buildQuery(op.Source, qb) - sqlparser.RemoveKeyspace(op.Query) + sqlparser.RemoveKeyspaceInCol(op.Query) stmt := qb.stmt qb.stmt = nil @@ -586,6 +611,7 @@ func buildDerivedSelect(op *Horizon, qb *queryBuilder, sel *sqlparser.Select) { sel.GroupBy = opQuery.GroupBy sel.Having = mergeHaving(sel.Having, opQuery.Having) sel.SelectExprs = opQuery.SelectExprs + sel.Distinct = opQuery.Distinct qb.addTableExpr(op.Alias, op.Alias, TableID(op), &sqlparser.DerivedTable{ Select: sel, }, nil, op.ColumnAliases) @@ -596,15 +622,8 @@ func buildDerivedSelect(op *Horizon, qb *queryBuilder, sel *sqlparser.Select) { func buildHorizon(op *Horizon, qb *queryBuilder) { buildQuery(op.Source, qb) - stripDownQuery(op.Query, qb.asSelectStatement()) - - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if aliasedExpr, ok := node.(sqlparser.SelectExpr); ok { - removeKeyspaceFromSelectExpr(aliasedExpr) - } - return true, nil - }, qb.stmt) + sqlparser.RemoveKeyspaceInCol(qb.stmt) } func mergeHaving(h1, h2 *sqlparser.Where) *sqlparser.Where { diff --git a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go index 85c50427364..43a88d82871 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go +++ b/go/vt/vtgate/planbuilder/operators/aggregation_pushing.go @@ -24,42 +24,55 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" ) -func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output ops.Operator, applyResult *rewrite.ApplyResult, err error) { +func errDistinctAggrWithMultiExpr(f sqlparser.AggrFunc) { + if f == nil { + panic(vterrors.VT12001("distinct aggregation function with multiple expressions")) + } + panic(vterrors.VT12001(fmt.Sprintf("distinct aggregation function with multiple expressions '%s'", sqlparser.String(f)))) +} + +func tryPushAggregator(ctx *plancontext.PlanningContext, aggregator *Aggregator) (output Operator, applyResult *ApplyResult) { if aggregator.Pushed { - return aggregator, rewrite.SameTree, nil + return aggregator, NoRewrite + } + + // this rewrite is always valid, and we should do it whenever possible + if route, ok := aggregator.Source.(*Route); ok && (route.IsSingleShard() || overlappingUniqueVindex(ctx, aggregator.Grouping)) { + return Swap(aggregator, route, "push down aggregation under route - remove original") + } + + // other rewrites require us to have reached this phase before we can consider them + if !reachedPhase(ctx, delegateAggregation) { + return aggregator, NoRewrite + } + + // if we have not yet been able to push this aggregation down, + // we need to turn AVG into SUM/COUNT to support this over a sharded keyspace + if needAvgBreaking(aggregator.Aggregations) { + return splitAvgAggregations(ctx, aggregator) } + switch src := aggregator.Source.(type) { case *Route: // if we have a single sharded route, we can push it down - output, applyResult, err = pushAggregationThroughRoute(ctx, aggregator, src) + output, applyResult = pushAggregationThroughRoute(ctx, aggregator, src) case *ApplyJoin: - if reachedPhase(ctx, delegateAggregation) { - output, applyResult, err = pushAggregationThroughJoin(ctx, aggregator, src) - } + output, applyResult = pushAggregationThroughApplyJoin(ctx, aggregator, src) + case *HashJoin: + output, applyResult = pushAggregationThroughHashJoin(ctx, aggregator, src) case *Filter: - if reachedPhase(ctx, delegateAggregation) { - output, applyResult, err = pushAggregationThroughFilter(ctx, aggregator, src) - } + output, applyResult = pushAggregationThroughFilter(ctx, aggregator, src) case *SubQueryContainer: - if reachedPhase(ctx, delegateAggregation) { - output, applyResult, err = pushAggregationThroughSubquery(ctx, aggregator, src) - } + output, applyResult = pushAggregationThroughSubquery(ctx, aggregator, src) default: - return aggregator, rewrite.SameTree, nil - } - - if err != nil { - return nil, nil, err + return aggregator, NoRewrite } if output == nil { - return aggregator, rewrite.SameTree, nil + return aggregator, NoRewrite } aggregator.Pushed = true @@ -81,16 +94,10 @@ func pushAggregationThroughSubquery( ctx *plancontext.PlanningContext, rootAggr *Aggregator, src *SubQueryContainer, -) (ops.Operator, *rewrite.ApplyResult, error) { - pushedAggr := rootAggr.Clone([]ops.Operator{src.Outer}).(*Aggregator) - pushedAggr.Original = false - pushedAggr.Pushed = false - +) (Operator, *ApplyResult) { + pushedAggr := rootAggr.SplitAggregatorBelowOperators([]Operator{src.Outer}) for _, subQuery := range src.Inner { - lhsCols, err := subQuery.OuterExpressionsNeeded(ctx, src.Outer) - if err != nil { - return nil, nil, err - } + lhsCols := subQuery.OuterExpressionsNeeded(ctx, src.Outer) for _, colName := range lhsCols { idx := slices.IndexFunc(pushedAggr.Columns, func(ae *sqlparser.AliasedExpr) bool { return ctx.SemTable.EqualsExpr(ae.Expr, colName) @@ -104,13 +111,17 @@ func pushAggregationThroughSubquery( src.Outer = pushedAggr + for _, aggregation := range pushedAggr.Aggregations { + aggregation.Original.Expr = rewriteColNameToArgument(ctx, aggregation.Original.Expr, aggregation.SubQueryExpression, src.Inner...) + } + if !rootAggr.Original { - return src, rewrite.NewTree("push Aggregation under subquery - keep original", rootAggr), nil + return src, Rewrote("push Aggregation under subquery - keep original") } rootAggr.aggregateTheAggregates() - return rootAggr, rewrite.NewTree("push Aggregation under subquery", rootAggr), nil + return rootAggr, Rewrote("push Aggregation under subquery") } func (a *Aggregator) aggregateTheAggregates() { @@ -134,24 +145,12 @@ func pushAggregationThroughRoute( ctx *plancontext.PlanningContext, aggregator *Aggregator, route *Route, -) (ops.Operator, *rewrite.ApplyResult, error) { - // If the route is single-shard, or we are grouping by sharding keys, we can just push down the aggregation - if route.IsSingleShard() || overlappingUniqueVindex(ctx, aggregator.Grouping) { - return rewrite.Swap(aggregator, route, "push down aggregation under route - remove original") - } - - if !reachedPhase(ctx, delegateAggregation) { - return nil, nil, nil - } - +) (Operator, *ApplyResult) { // Create a new aggregator to be placed below the route. - aggrBelowRoute := aggregator.SplitAggregatorBelowRoute(route.Inputs()) + aggrBelowRoute := aggregator.SplitAggregatorBelowOperators(route.Inputs()) aggrBelowRoute.Aggregations = nil - err := pushAggregations(ctx, aggregator, aggrBelowRoute) - if err != nil { - return nil, nil, err - } + pushAggregations(ctx, aggregator, aggrBelowRoute) // Set the source of the route to the new aggregator placed below the route. route.Source = aggrBelowRoute @@ -159,18 +158,15 @@ func pushAggregationThroughRoute( if !aggregator.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return aggregator.Source, rewrite.NewTree("push aggregation under route - remove original", aggregator), nil + return aggregator.Source, Rewrote("push aggregation under route - remove original") } - return aggregator, rewrite.NewTree("push aggregation under route - keep original", aggregator), nil + return aggregator, Rewrote("push aggregation under route - keep original") } // pushAggregations splits aggregations between the original aggregator and the one we are pushing down -func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, aggrBelowRoute *Aggregator) error { - canPushDistinctAggr, distinctExpr, err := checkIfWeCanPush(ctx, aggregator) - if err != nil { - return err - } +func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, aggrBelowRoute *Aggregator) { + canPushDistinctAggr, distinctExprs := checkIfWeCanPush(ctx, aggregator) distinctAggrGroupByAdded := false @@ -181,16 +177,20 @@ func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, continue } + if len(distinctExprs) != 1 { + errDistinctAggrWithMultiExpr(aggr.Func) + } + // We handle a distinct aggregation by turning it into a group by and // doing the aggregating on the vtgate level instead - aeDistinctExpr := aeWrap(distinctExpr) + aeDistinctExpr := aeWrap(distinctExprs[0]) aggrBelowRoute.Columns[aggr.ColOffset] = aeDistinctExpr // We handle a distinct aggregation by turning it into a group by and // doing the aggregating on the vtgate level instead // Adding to group by can be done only once even though there are multiple distinct aggregation with same expression. if !distinctAggrGroupByAdded { - groupBy := NewGroupBy(distinctExpr, distinctExpr, aeDistinctExpr) + groupBy := NewGroupBy(distinctExprs[0]) groupBy.ColOffset = aggr.ColOffset aggrBelowRoute.Grouping = append(aggrBelowRoute.Grouping, groupBy) distinctAggrGroupByAdded = true @@ -198,15 +198,13 @@ func pushAggregations(ctx *plancontext.PlanningContext, aggregator *Aggregator, } if !canPushDistinctAggr { - aggregator.DistinctExpr = distinctExpr + aggregator.DistinctExpr = distinctExprs[0] } - - return nil } -func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) (bool, sqlparser.Expr, error) { +func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) (bool, sqlparser.Exprs) { canPush := true - var distinctExpr sqlparser.Expr + var distinctExprs sqlparser.Exprs var differentExpr *sqlparser.AliasedExpr for _, aggr := range aggregator.Aggregations { @@ -214,42 +212,47 @@ func checkIfWeCanPush(ctx *plancontext.PlanningContext, aggregator *Aggregator) continue } - innerExpr := aggr.Func.GetArg() - if !exprHasUniqueVindex(ctx, innerExpr) { + args := aggr.Func.GetArgs() + hasUniqVindex := false + for _, arg := range args { + if exprHasUniqueVindex(ctx, arg) { + hasUniqVindex = true + break + } + } + if !hasUniqVindex { canPush = false } - if distinctExpr == nil { - distinctExpr = innerExpr + if len(distinctExprs) == 0 { + distinctExprs = args } - if !ctx.SemTable.EqualsExpr(distinctExpr, innerExpr) { - differentExpr = aggr.Original + for idx, expr := range distinctExprs { + if !ctx.SemTable.EqualsExpr(expr, args[idx]) { + differentExpr = aggr.Original + break + } } } if !canPush && differentExpr != nil { - return false, nil, vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(differentExpr))) + panic(vterrors.VT12001(fmt.Sprintf("only one DISTINCT aggregation is allowed in a SELECT: %s", sqlparser.String(differentExpr)))) } - return canPush, distinctExpr, nil + return canPush, distinctExprs } func pushAggregationThroughFilter( ctx *plancontext.PlanningContext, aggregator *Aggregator, filter *Filter, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { columnsNeeded := collectColNamesNeeded(ctx, filter) - - // Create a new aggregator to be placed below the route. - pushedAggr := aggregator.Clone([]ops.Operator{filter.Source}).(*Aggregator) - pushedAggr.Pushed = false - pushedAggr.Original = false - + pushedAggr := aggregator.SplitAggregatorBelowOperators([]Operator{filter.Source}) withNextColumn: for _, col := range columnsNeeded { for _, gb := range pushedAggr.Grouping { - if ctx.SemTable.EqualsExpr(col, gb.SimplifiedExpr) { + if ctx.SemTable.EqualsExpr(col, gb.Inner) { continue withNextColumn } } @@ -262,10 +265,10 @@ withNextColumn: if !aggregator.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return aggregator.Source, rewrite.NewTree("push aggregation under filter - remove original", aggregator), nil + return aggregator.Source, Rewrote("push aggregation under filter - remove original") } aggregator.aggregateTheAggregates() - return aggregator, rewrite.NewTree("push aggregation under filter - keep original", aggregator), nil + return aggregator, Rewrote("push aggregation under filter - keep original") } func collectColNamesNeeded(ctx *plancontext.PlanningContext, f *Filter) (columnsNeeded []*sqlparser.ColName) { @@ -289,7 +292,7 @@ func collectColNamesNeeded(ctx *plancontext.PlanningContext, f *Filter) (columns func overlappingUniqueVindex(ctx *plancontext.PlanningContext, groupByExprs []GroupBy) bool { for _, groupByExpr := range groupByExprs { - if exprHasUniqueVindex(ctx, groupByExpr.SimplifiedExpr) { + if exprHasUniqueVindex(ctx, groupByExpr.Inner) { return true } } @@ -324,116 +327,153 @@ func exprHasVindex(ctx *plancontext.PlanningContext, expr sqlparser.Expr, hasToB } /* -We push down aggregations using the logic from the paper Orthogonal Optimization of Subqueries and Aggregation, by -Cesar A. Galindo-Legaria and Milind M. Joshi from Microsoft Corp. - -It explains how one can split an aggregation into local aggregates that depend on only one side of the join. -The local aggregates can then be gathered together to produce the global -group by/aggregate query that the user asked for. +Using techniques from "Orthogonal Optimization of Subqueries and Aggregation" by Cesar A. Galindo-Legaria +and Milind M. Joshi (Microsoft Corp), we push down aggregations. It splits an aggregation +into local aggregates depending on one side of a join, and pushes these into the inputs of the join. +These then combine to form the final group by/aggregate query. -In Vitess, this is particularly useful because it allows us to push aggregation down to the routes, even when -we have to join the results at the vtgate level. Instead of doing all the grouping and aggregation at the -vtgate level, we can offload most of the work to MySQL, and at the vtgate just summarize the results. +In Vitess, this technique is extremely useful. It enables pushing aggregations to routes, +even with joins at the vtgate level. Thus, rather than handling all grouping and +aggregation at vtgate, most work is offloaded to MySQL, with vtgate summarizing results. -# For a query, such as +# For a query like: -select count(*) from R1 JOIN R2 on R1.id = R2.id +select count(*) from L JOIN R on L.id = R.id Original: - GB <- This is the original grouping, doing count(*) - | - JOIN + Aggr <- Original grouping, doing count(*) + | + Join / \ - R1 R2 + L R Transformed: - rootAggr <- This grouping is now SUMing together the distributed `count(*)` we got back + rootAggr <- New grouping SUMs distributed `count(*)` | - Proj <- This projection makes sure that the columns are lined up as expected + Proj <- Projection multiplying `count(*)` from each side of the join | - Sort <- Here we are sorting the input so that the OrderedAggregate can do its thing - | - JOIN + Join / \ - lAggr rAggr + lhsAggr rhsAggr <- `count(*)` aggregation can now be pushed under a route / \ - R1 R2 + L R */ -func pushAggregationThroughJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { - lhs := &joinPusher{ - orig: rootAggr, - pushed: &Aggregator{ - Source: join.LHS, - QP: rootAggr.QP, - }, - columns: initColReUse(len(rootAggr.Columns)), - tableID: TableID(join.LHS), - } - rhs := &joinPusher{ - orig: rootAggr, - pushed: &Aggregator{ - Source: join.RHS, - QP: rootAggr.QP, - }, - columns: initColReUse(len(rootAggr.Columns)), - tableID: TableID(join.RHS), - } +func pushAggregationThroughApplyJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin) (Operator, *ApplyResult) { + lhs := createJoinPusher(rootAggr, join.LHS) + rhs := createJoinPusher(rootAggr, join.RHS) - joinColumns, output, err := splitAggrColumnsToLeftAndRight(ctx, rootAggr, join, lhs, rhs) + columns := &applyJoinColumns{} + output, err := splitAggrColumnsToLeftAndRight(ctx, rootAggr, join, !join.JoinType.IsInner(), columns, lhs, rhs) + join.JoinColumns = columns if err != nil { // if we get this error, we just abort the splitting and fall back on simpler ways of solving the same query if errors.Is(err, errAbortAggrPushing) { - return nil, nil, nil + return nil, nil } - return nil, nil, err + panic(err) } - groupingJCs, err := splitGroupingToLeftAndRight(ctx, rootAggr, lhs, rhs) - if err != nil { - return nil, nil, err - } - joinColumns = append(joinColumns, groupingJCs...) + splitGroupingToLeftAndRight(ctx, rootAggr, lhs, rhs, join.JoinColumns) // We need to add any columns coming from the lhs of the join to the group by on that side // If we don't, the LHS will not be able to return the column, and it can't be used to send down to the RHS - err = addColumnsFromLHSInJoinPredicates(ctx, rootAggr, join, lhs) + addColumnsFromLHSInJoinPredicates(ctx, join, lhs) + + join.LHS, join.RHS = lhs.pushed, rhs.pushed + + if !rootAggr.Original { + // we only keep the root aggregation, if this aggregator was created + // by splitting one and pushing under a join, we can get rid of this one + return output, Rewrote("push Aggregation under join - keep original") + } + + rootAggr.aggregateTheAggregates() + rootAggr.Source = output + return rootAggr, Rewrote("push Aggregation under join") +} + +// pushAggregationThroughHashJoin pushes aggregation through a hash-join in a similar way to pushAggregationThroughApplyJoin +func pushAggregationThroughHashJoin(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *HashJoin) (Operator, *ApplyResult) { + lhs := createJoinPusher(rootAggr, join.LHS) + rhs := createJoinPusher(rootAggr, join.RHS) + + columns := &hashJoinColumns{} + output, err := splitAggrColumnsToLeftAndRight(ctx, rootAggr, join, join.LeftJoin, columns, lhs, rhs) if err != nil { - return nil, nil, err + // if we get this error, we just abort the splitting and fall back on simpler ways of solving the same query + if errors.Is(err, errAbortAggrPushing) { + return nil, nil + } + panic(err) + } + + // The two sides of the hash comparisons are added as grouping expressions + for _, cmp := range join.JoinComparisons { + lhs.addGrouping(ctx, NewGroupBy(cmp.LHS)) + columns.addLeft(cmp.LHS) + + rhs.addGrouping(ctx, NewGroupBy(cmp.RHS)) + columns.addRight(cmp.RHS) + } + + // The grouping columns need to be pushed down as grouping columns on the respective sides + for _, groupBy := range rootAggr.Grouping { + deps := ctx.SemTable.RecursiveDeps(groupBy.Inner) + switch { + case deps.IsSolvedBy(lhs.tableID): + lhs.addGrouping(ctx, groupBy) + columns.addLeft(groupBy.Inner) + case deps.IsSolvedBy(rhs.tableID): + rhs.addGrouping(ctx, groupBy) + columns.addRight(groupBy.Inner) + case deps.IsSolvedBy(lhs.tableID.Merge(rhs.tableID)): + // TODO: Support this as well + return nil, nil + default: + panic(vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.Inner))) + } } join.LHS, join.RHS = lhs.pushed, rhs.pushed - join.JoinColumns = joinColumns + join.columns = columns if !rootAggr.Original { // we only keep the root aggregation, if this aggregator was created // by splitting one and pushing under a join, we can get rid of this one - return output, rewrite.NewTree("push Aggregation under join - keep original", rootAggr), nil + return output, Rewrote("push Aggregation under hash join - keep original") } rootAggr.aggregateTheAggregates() rootAggr.Source = output - return rootAggr, rewrite.NewTree("push Aggregation under join", rootAggr), nil + return rootAggr, Rewrote("push Aggregation under hash join") } var errAbortAggrPushing = fmt.Errorf("abort aggregation pushing") -func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAggr *Aggregator, join *ApplyJoin, lhs *joinPusher) error { - for _, pred := range join.JoinPredicates { +func createJoinPusher(rootAggr *Aggregator, operator Operator) *joinPusher { + return &joinPusher{ + orig: rootAggr, + pushed: &Aggregator{ + Source: operator, + QP: rootAggr.QP, + }, + columns: initColReUse(len(rootAggr.Columns)), + tableID: TableID(operator), + } +} + +func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, join *ApplyJoin, lhs *joinPusher) { + for _, pred := range join.JoinPredicates.columns { for _, bve := range pred.LHSExprs { - expr := bve.Expr - wexpr, err := rootAggr.QP.GetSimplifiedExpr(ctx, expr) - if err != nil { - return err - } - idx, found := canReuseColumn(ctx, lhs.pushed.Columns, expr, extractExpr) + idx, found := canReuseColumn(ctx, lhs.pushed.Columns, bve.Expr, extractExpr) if !found { idx = len(lhs.pushed.Columns) - lhs.pushed.Columns = append(lhs.pushed.Columns, aeWrap(expr)) + lhs.pushed.Columns = append(lhs.pushed.Columns, aeWrap(bve.Expr)) } - _, found = canReuseColumn(ctx, lhs.pushed.Grouping, wexpr, func(by GroupBy) sqlparser.Expr { - return by.SimplifiedExpr + _, found = canReuseColumn(ctx, lhs.pushed.Grouping, bve.Expr, func(by GroupBy) sqlparser.Expr { + return by.Inner }) if found { @@ -441,50 +481,40 @@ func addColumnsFromLHSInJoinPredicates(ctx *plancontext.PlanningContext, rootAgg } lhs.pushed.Grouping = append(lhs.pushed.Grouping, GroupBy{ - Inner: expr, - SimplifiedExpr: wexpr, - ColOffset: idx, - WSOffset: -1, + Inner: bve.Expr, + ColOffset: idx, + WSOffset: -1, }) } } - return nil } -func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Aggregator, lhs, rhs *joinPusher) ([]JoinColumn, error) { - var groupingJCs []JoinColumn - +func splitGroupingToLeftAndRight( + ctx *plancontext.PlanningContext, + rootAggr *Aggregator, + lhs, rhs *joinPusher, + columns joinColumns, +) { for _, groupBy := range rootAggr.Grouping { deps := ctx.SemTable.RecursiveDeps(groupBy.Inner) - expr := groupBy.Inner switch { case deps.IsSolvedBy(lhs.tableID): lhs.addGrouping(ctx, groupBy) - groupingJCs = append(groupingJCs, JoinColumn{ - Original: aeWrap(groupBy.Inner), - LHSExprs: []BindVarExpr{{Expr: expr}}, - }) + columns.addLeft(groupBy.Inner) case deps.IsSolvedBy(rhs.tableID): rhs.addGrouping(ctx, groupBy) - groupingJCs = append(groupingJCs, JoinColumn{ - Original: aeWrap(groupBy.Inner), - RHSExpr: expr, - }) + columns.addRight(groupBy.Inner) case deps.IsSolvedBy(lhs.tableID.Merge(rhs.tableID)): - jc, err := BreakExpressionInLHSandRHS(ctx, groupBy.SimplifiedExpr, lhs.tableID) - if err != nil { - return nil, err - } + jc := breakExpressionInLHSandRHS(ctx, groupBy.Inner, lhs.tableID) for _, lhsExpr := range jc.LHSExprs { e := lhsExpr.Expr - lhs.addGrouping(ctx, NewGroupBy(e, e, aeWrap(e))) + lhs.addGrouping(ctx, NewGroupBy(e)) } - rhs.addGrouping(ctx, NewGroupBy(jc.RHSExpr, jc.RHSExpr, aeWrap(jc.RHSExpr))) + rhs.addGrouping(ctx, NewGroupBy(jc.RHSExpr)) default: - return nil, vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.SimplifiedExpr)) + panic(vterrors.VT13001(fmt.Sprintf("grouping with bad dependencies %s", groupBy.Inner))) } } - return groupingJCs, nil } // splitAggrColumnsToLeftAndRight pushes all aggregations on the aggregator above a join and @@ -493,28 +523,31 @@ func splitGroupingToLeftAndRight(ctx *plancontext.PlanningContext, rootAggr *Agg func splitAggrColumnsToLeftAndRight( ctx *plancontext.PlanningContext, aggregator *Aggregator, - join *ApplyJoin, + join Operator, + leftJoin bool, + columns joinColumns, lhs, rhs *joinPusher, -) ([]JoinColumn, ops.Operator, error) { +) (Operator, error) { proj := newAliasedProjection(join) proj.FromAggr = true builder := &aggBuilder{ - lhs: lhs, - rhs: rhs, - proj: proj, - outerJoin: join.LeftJoin, + lhs: lhs, + rhs: rhs, + joinColumns: columns, + proj: proj, + outerJoin: leftJoin, } - canPushDistinctAggr, distinctExpr, err := checkIfWeCanPush(ctx, aggregator) - if err != nil { - return nil, nil, err - } + canPushDistinctAggr, distinctExprs := checkIfWeCanPush(ctx, aggregator) // Distinct aggregation cannot be pushed down in the join. // We keep node of the distinct aggregation expression to be used later for ordering. if !canPushDistinctAggr { - aggregator.DistinctExpr = distinctExpr - return nil, nil, errAbortAggrPushing + if len(distinctExprs) != 1 { + errDistinctAggrWithMultiExpr(nil) + } + aggregator.DistinctExpr = distinctExprs[0] + return nil, errAbortAggrPushing } outer: @@ -524,285 +557,102 @@ outer: if aggr.ColOffset == colIdx { err := builder.handleAggr(ctx, aggr) if err != nil { - return nil, nil, err + return nil, err } continue outer } } - _, err := builder.proj.addUnexploredExpr(col, col.Expr) - if err != nil { - return nil, nil, err - } + builder.proj.addUnexploredExpr(col, col.Expr) } - return builder.joinColumns, builder.proj, nil -} -type ( - // aggBuilder is a helper struct that aids in pushing down an Aggregator through a join - // it accumulates the projections (if any) that need to be evaluated on top of the join - aggBuilder struct { - lhs, rhs *joinPusher - joinColumns []JoinColumn - proj *Projection - outerJoin bool - } - // joinPusher is a helper struct that aids in pushing down an Aggregator into one side of a Join. - // It creates a new Aggregator that is pushed down and keeps track of the column dependencies that the new Aggregator has. - joinPusher struct { - orig *Aggregator // The original Aggregator before pushing. - pushed *Aggregator // The new Aggregator created for push-down. - columns []int // List of column offsets used in the new Aggregator. - tableID semantics.TableSet // The TableSet denoting the side of the Join where the new Aggregator is pushed. - - // csAE keeps the copy of the countStar expression that has already been added to split an aggregation. - // No need to have multiple countStars, so we cache it here - csAE *sqlparser.AliasedExpr - } -) - -func (ab *aggBuilder) leftCountStar(ctx *plancontext.PlanningContext) *sqlparser.AliasedExpr { - ae, created := ab.lhs.countStar(ctx) - if created { - ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: ae, - LHSExprs: []BindVarExpr{{Expr: ae.Expr}}, - }) - } - return ae + return builder.proj, nil } -func (ab *aggBuilder) rightCountStar(ctx *plancontext.PlanningContext) *sqlparser.AliasedExpr { - ae, created := ab.rhs.countStar(ctx) - if created { - ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: ae, - RHSExpr: ae.Expr, - }) - } - return ae -} - -func (p *joinPusher) countStar(ctx *plancontext.PlanningContext) (*sqlparser.AliasedExpr, bool) { - if p.csAE != nil { - return p.csAE, false - } - cs := &sqlparser.CountStar{} - ae := aeWrap(cs) - csAggr := NewAggr(opcode.AggregateCountStar, cs, ae, "") - expr := p.addAggr(ctx, csAggr) - p.csAE = aeWrap(expr) - return p.csAE, true -} - -func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) error { - switch aggr.OpCode { - case opcode.AggregateCountStar: - return ab.handleCountStar(ctx, aggr) - case opcode.AggregateCount, opcode.AggregateSum: - return ab.handleAggrWithCountStarMultiplier(ctx, aggr) - case opcode.AggregateMax, opcode.AggregateMin, opcode.AggregateAnyValue: - return ab.handlePushThroughAggregation(ctx, aggr) - case opcode.AggregateGroupConcat: - f := aggr.Func.(*sqlparser.GroupConcatExpr) - if f.Distinct || len(f.OrderBy) > 0 || f.Separator != "" { - panic("fail here") - } - // this needs special handling, currently aborting the push of function - // and later will try pushing the column instead. - // TODO: this should be handled better by pushing the function down. - return errAbortAggrPushing - case opcode.AggregateUnassigned: - return vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Original))) - case opcode.AggregateGtid: - // this is only used for SHOW GTID queries that will never contain joins - return vterrors.VT13001("cannot do join with vgtid") - case opcode.AggregateSumDistinct, opcode.AggregateCountDistinct: - // we are not going to see values multiple times, so we don't need to multiply with the count(*) from the other side - return ab.handlePushThroughAggregation(ctx, aggr) - default: - return vterrors.VT12001(fmt.Sprintf("aggregation not planned: %s", aggr.OpCode.String())) +func coalesceFunc(e sqlparser.Expr) sqlparser.Expr { + // `coalesce(e,1)` will return `e` if `e` is not `NULL`, otherwise it will return `1` + return &sqlparser.FuncExpr{ + Name: sqlparser.NewIdentifierCI("coalesce"), + Exprs: sqlparser.Exprs{ + e, + sqlparser.NewIntLiteral("1"), + }, } } -// pushThroughLeft and Right are used for extremums and random, -// which are not split and then arithmetics is used to aggregate the per-shard aggregations. -// For these, we just copy the aggregation to one side of the join and then pick the max of the max:es returned -func (ab *aggBuilder) pushThroughLeft(aggr Aggr) { - ab.lhs.pushThroughAggr(aggr) - ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: aggr.Original, - LHSExprs: []BindVarExpr{{Expr: aggr.Original.Expr}}, - }) -} - -func (ab *aggBuilder) pushThroughRight(aggr Aggr) { - ab.rhs.pushThroughAggr(aggr) - ab.joinColumns = append(ab.joinColumns, JoinColumn{ - Original: aggr.Original, - RHSExpr: aggr.Original.Expr, - }) -} - -func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningContext, aggr Aggr) error { - _, err := ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) - if err != nil { - return err - } - - deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) - switch { - case deps.IsSolvedBy(ab.lhs.tableID): - ab.pushThroughLeft(aggr) - case deps.IsSolvedBy(ab.rhs.tableID): - ab.pushThroughRight(aggr) - default: - return errAbortAggrPushing +func initColReUse(size int) []int { + cols := make([]int, size) + for i := 0; i < size; i++ { + cols[i] = -1 } - return nil + return cols } -func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) error { - // Add the aggregate to both sides of the join. - lhsAE := ab.leftCountStar(ctx) - rhsAE := ab.rightCountStar(ctx) - - return ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) -} +func extractExpr(expr *sqlparser.AliasedExpr) sqlparser.Expr { return expr.Expr } -func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.PlanningContext, aggr Aggr) error { - var lhsAE, rhsAE *sqlparser.AliasedExpr - var addCoalesce bool - - deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) - switch { - case deps.IsSolvedBy(ab.lhs.tableID): - ab.pushThroughLeft(aggr) - lhsAE = aggr.Original - rhsAE = ab.rightCountStar(ctx) - if ab.outerJoin { - addCoalesce = true +func needAvgBreaking(aggrs []Aggr) bool { + for _, aggr := range aggrs { + if aggr.OpCode == opcode.AggregateAvg { + return true } - - case deps.IsSolvedBy(ab.rhs.tableID): - ab.pushThroughRight(aggr) - lhsAE = ab.leftCountStar(ctx) - rhsAE = aggr.Original - - default: - return errAbortAggrPushing } - - return ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) + return false } -func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) error { - // We expect the expressions to be different on each side of the join, otherwise it's an error. - if lhsAE.Expr == rhsAE.Expr { - panic(fmt.Sprintf("Need the two produced expressions to be different. %T %T", lhsAE, rhsAE)) - } +// splitAvgAggregations takes an aggregator that has AVG aggregations in it and splits +// these into sum/count expressions that can be spread out to shards +func splitAvgAggregations(ctx *plancontext.PlanningContext, aggr *Aggregator) (Operator, *ApplyResult) { + proj := newAliasedProjection(aggr) - rhsExpr := rhsAE.Expr + var columns []*sqlparser.AliasedExpr + var aggregations []Aggr - // When dealing with outer joins, we don't want null values from the RHS to ruin the calculations we are doing, - // so we use the MySQL `coalesce` after the join is applied to multiply the count from LHS with 1. - if ab.outerJoin && coalesce { - rhsExpr = coalesceFunc(rhsExpr) - } + for offset, col := range aggr.Columns { + avg, ok := col.Expr.(*sqlparser.Avg) + if !ok { + proj.addColumnWithoutPushing(ctx, col, false /* addToGroupBy */) + continue + } - // The final COUNT is obtained by multiplying the counts from both sides. - // This is equivalent to transforming a "select count(*) from t1 join t2" into - // "select count_t1*count_t2 from - // (select count(*) as count_t1 from t1) as x, - // (select count(*) as count_t2 from t2) as y". - projExpr := &sqlparser.BinaryExpr{ - Operator: sqlparser.MultOp, - Left: lhsAE.Expr, - Right: rhsExpr, - } - projAE := &sqlparser.AliasedExpr{ - Expr: aggr.Original.Expr, - As: sqlparser.NewIdentifierCI(aggr.Original.ColumnName()), - } + if avg.Distinct { + panic(vterrors.VT12001("AVG(distinct <>)")) + } - _, err := ab.proj.addUnexploredExpr(projAE, projExpr) - return err -} + // We have an AVG that we need to split + sumExpr := &sqlparser.Sum{Arg: avg.Arg} + countExpr := &sqlparser.Count{Args: []sqlparser.Expr{avg.Arg}} + calcExpr := &sqlparser.BinaryExpr{ + Operator: sqlparser.DivOp, + Left: sumExpr, + Right: countExpr, + } -func coalesceFunc(e sqlparser.Expr) sqlparser.Expr { - // `coalesce(e,1)` will return `e` if `e` is not `NULL`, otherwise it will return `1` - return &sqlparser.FuncExpr{ - Name: sqlparser.NewIdentifierCI("coalesce"), - Exprs: sqlparser.SelectExprs{ - aeWrap(e), - aeWrap(sqlparser.NewIntLiteral("1")), - }, + outputColumn := aeWrap(col.Expr) + outputColumn.As = sqlparser.NewIdentifierCI(col.ColumnName()) + proj.addUnexploredExpr(sqlparser.CloneRefOfAliasedExpr(col), calcExpr) + col.Expr = sumExpr + found := false + for aggrOffset, aggregation := range aggr.Aggregations { + if offset == aggregation.ColOffset { + // We have found the AVG column. We'll change it to SUM, and then we add a COUNT as well + aggr.Aggregations[aggrOffset].OpCode = opcode.AggregateSum + + countExprAlias := aeWrap(countExpr) + countAggr := NewAggr(opcode.AggregateCount, countExpr, countExprAlias, sqlparser.String(countExpr)) + countAggr.ColOffset = len(aggr.Columns) + len(columns) + aggregations = append(aggregations, countAggr) + columns = append(columns, countExprAlias) + found = true + break // no need to search the remaining aggregations + } + } + if !found { + // if we get here, it's because we didn't find the aggregation. Something is wrong + panic(vterrors.VT13001("no aggregation pointing to this column was found")) + } } -} - -// addAggr creates a copy of the given aggregation, updates its column offset to point to the correct location in the new Aggregator, -// and adds it to the list of Aggregations of the new Aggregator. It also updates the semantic analysis information to reflect the new structure. -// It returns the expression of the aggregation as it should be used in the parent Aggregator. -func (p *joinPusher) addAggr(ctx *plancontext.PlanningContext, aggr Aggr) sqlparser.Expr { - copyAggr := aggr - expr := sqlparser.CloneExpr(aggr.Original.Expr) - copyAggr.Original = aeWrap(expr) - // copy dependencies so we can keep track of which side expressions need to be pushed to - ctx.SemTable.Direct[expr] = p.tableID - ctx.SemTable.Recursive[expr] = p.tableID - copyAggr.ColOffset = len(p.pushed.Columns) - p.pushed.Columns = append(p.pushed.Columns, copyAggr.Original) - p.pushed.Aggregations = append(p.pushed.Aggregations, copyAggr) - return expr -} - -// pushThroughAggr pushes through an aggregation without changing dependencies. -// Can be used for aggregations we can push in one piece -func (p *joinPusher) pushThroughAggr(aggr Aggr) { - newAggr := NewAggr(aggr.OpCode, aggr.Func, aggr.Original, aggr.Alias) - newAggr.ColOffset = len(p.pushed.Columns) - p.pushed.Columns = append(p.pushed.Columns, newAggr.Original) - p.pushed.Aggregations = append(p.pushed.Aggregations, newAggr) -} -// addGrouping creates a copy of the given GroupBy, updates its column offset to point to the correct location in the new Aggregator, -// and adds it to the list of GroupBy expressions of the new Aggregator. It also updates the semantic analysis information to reflect the new structure. -// It returns the expression of the GroupBy as it should be used in the parent Aggregator. -func (p *joinPusher) addGrouping(ctx *plancontext.PlanningContext, gb GroupBy) sqlparser.Expr { - copyGB := gb - expr := sqlparser.CloneExpr(gb.Inner) - // copy dependencies so we can keep track of which side expressions need to be pushed to - ctx.SemTable.CopyDependencies(gb.Inner, expr) - // if the column exists in the selection then copy it down to the pushed aggregator operator. - if copyGB.ColOffset != -1 { - offset := p.useColumn(copyGB.ColOffset) - copyGB.ColOffset = offset - } else { - copyGB.ColOffset = len(p.pushed.Columns) - p.pushed.Columns = append(p.pushed.Columns, aeWrap(copyGB.Inner)) - } - p.pushed.Grouping = append(p.pushed.Grouping, copyGB) - return expr -} + aggr.Columns = append(aggr.Columns, columns...) + aggr.Aggregations = append(aggr.Aggregations, aggregations...) -// useColumn checks whether the column corresponding to the given offset has been used in the new Aggregator. -// If it has not been used before, it adds the column to the new Aggregator -// and updates the columns mapping to reflect the new location of the column. -// It returns the offset of the column in the new Aggregator. -func (p *joinPusher) useColumn(offset int) int { - if p.columns[offset] == -1 { - p.columns[offset] = len(p.pushed.Columns) - // still haven't used this expression on this side - p.pushed.Columns = append(p.pushed.Columns, p.orig.Columns[offset]) - } - return p.columns[offset] + return proj, Rewrote("split avg aggregation") } - -func initColReUse(size int) []int { - cols := make([]int, size) - for i := 0; i < size; i++ { - cols[i] = -1 - } - return cols -} - -func extractExpr(expr *sqlparser.AliasedExpr) sqlparser.Expr { return expr.Expr } diff --git a/go/vt/vtgate/planbuilder/operators/aggregation_pushing_helper.go b/go/vt/vtgate/planbuilder/operators/aggregation_pushing_helper.go new file mode 100644 index 00000000000..eb14f83b7df --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/aggregation_pushing_helper.go @@ -0,0 +1,320 @@ +/* +Copyright 2023 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + "slices" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type ( + // aggBuilder is a helper struct that aids in pushing down an Aggregator through a join + // it accumulates the projections (if any) that need to be evaluated on top of the join + aggBuilder struct { + lhs, rhs *joinPusher + joinColumns joinColumns + proj *Projection + outerJoin bool + } + + // joinPusher is a helper struct that aids in pushing down an Aggregator into one side of a Join. + // It creates a new Aggregator that is pushed down and keeps track of the column dependencies that the new Aggregator has. + joinPusher struct { + orig *Aggregator // The original Aggregator before pushing. + pushed *Aggregator // The new Aggregator created for push-down. + columns []int // List of column offsets used in the new Aggregator. + tableID semantics.TableSet // The TableSet denoting the side of the Join where the new Aggregator is pushed. + + // csAE keeps the copy of the countStar expression that has already been added to split an aggregation. + // No need to have multiple countStars, so we cache it here + csAE *sqlparser.AliasedExpr + } + + joinColumns interface { + addLeft(expr sqlparser.Expr) + addRight(expr sqlparser.Expr) + } + + applyJoinColumns struct { + columns []applyJoinColumn + } + + hashJoinColumns struct { + columns []hashJoinColumn + } +) + +func (jc *applyJoinColumns) addLeft(expr sqlparser.Expr) { + jc.columns = append(jc.columns, applyJoinColumn{ + Original: expr, + LHSExprs: []BindVarExpr{{Expr: expr}}, + }) +} + +func (jc *applyJoinColumns) addRight(expr sqlparser.Expr) { + jc.columns = append(jc.columns, applyJoinColumn{ + Original: expr, + RHSExpr: expr, + }) +} + +func (jc *applyJoinColumns) clone() *applyJoinColumns { + return &applyJoinColumns{columns: slices.Clone(jc.columns)} +} + +func (jc *applyJoinColumns) add(col applyJoinColumn) { + jc.columns = append(jc.columns, col) +} + +func (hj *hashJoinColumns) addLeft(expr sqlparser.Expr) { + hj.columns = append(hj.columns, hashJoinColumn{ + expr: expr, + side: Left, + }) +} + +func (hj *hashJoinColumns) add(expr sqlparser.Expr) { + hj.columns = append(hj.columns, hashJoinColumn{ + expr: expr, + }) +} + +func (hj *hashJoinColumns) addRight(expr sqlparser.Expr) { + hj.columns = append(hj.columns, hashJoinColumn{ + expr: expr, + side: Right, + }) +} + +func (hj *hashJoinColumns) clone() *hashJoinColumns { + return &hashJoinColumns{columns: slices.Clone(hj.columns)} +} + +func (ab *aggBuilder) leftCountStar(ctx *plancontext.PlanningContext) *sqlparser.AliasedExpr { + ae, created := ab.lhs.countStar(ctx) + if created { + ab.joinColumns.addLeft(ae.Expr) + } + return ae +} + +func (ab *aggBuilder) rightCountStar(ctx *plancontext.PlanningContext) *sqlparser.AliasedExpr { + ae, created := ab.rhs.countStar(ctx) + if created { + ab.joinColumns.addRight(ae.Expr) + } + return ae +} + +func (ab *aggBuilder) handleAggr(ctx *plancontext.PlanningContext, aggr Aggr) error { + switch aggr.OpCode { + case opcode.AggregateCountStar: + ab.handleCountStar(ctx, aggr) + return nil + case opcode.AggregateCount, opcode.AggregateSum: + return ab.handleAggrWithCountStarMultiplier(ctx, aggr) + case opcode.AggregateMax, opcode.AggregateMin, opcode.AggregateAnyValue: + return ab.handlePushThroughAggregation(ctx, aggr) + case opcode.AggregateGroupConcat: + f := aggr.Func.(*sqlparser.GroupConcatExpr) + if f.Distinct || len(f.OrderBy) > 0 || f.Separator != "" { + panic("fail here") + } + // this needs special handling, currently aborting the push of function + // and later will try pushing the column instead. + // TODO: this should be handled better by pushing the function down. + return errAbortAggrPushing + case opcode.AggregateUnassigned: + panic(vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Original)))) + case opcode.AggregateGtid: + // this is only used for SHOW GTID queries that will never contain joins + panic(vterrors.VT13001("cannot do join with vgtid")) + case opcode.AggregateSumDistinct, opcode.AggregateCountDistinct: + // we are not going to see values multiple times, so we don't need to multiply with the count(*) from the other side + return ab.handlePushThroughAggregation(ctx, aggr) + default: + panic(vterrors.VT12001(fmt.Sprintf("aggregation not planned: %s", aggr.OpCode.String()))) + } +} + +// pushThroughLeft and Right are used for extremums and random, +// which are not split and then arithmetics is used to aggregate the per-shard aggregations. +// For these, we just copy the aggregation to one side of the join and then pick the max of the max:es returned +func (ab *aggBuilder) pushThroughLeft(aggr Aggr) { + ab.lhs.pushThroughAggr(aggr) + ab.joinColumns.addLeft(aggr.Original.Expr) +} + +func (ab *aggBuilder) pushThroughRight(aggr Aggr) { + ab.rhs.pushThroughAggr(aggr) + ab.joinColumns.addRight(aggr.Original.Expr) +} + +func (ab *aggBuilder) handlePushThroughAggregation(ctx *plancontext.PlanningContext, aggr Aggr) error { + ab.proj.addUnexploredExpr(aggr.Original, aggr.Original.Expr) + + deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) + switch { + case deps.IsSolvedBy(ab.lhs.tableID): + ab.pushThroughLeft(aggr) + case deps.IsSolvedBy(ab.rhs.tableID): + ab.pushThroughRight(aggr) + default: + return errAbortAggrPushing + } + return nil +} + +func (ab *aggBuilder) handleCountStar(ctx *plancontext.PlanningContext, aggr Aggr) { + // Add the aggregate to both sides of the join. + lhsAE := ab.leftCountStar(ctx) + rhsAE := ab.rightCountStar(ctx) + + ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, true) +} + +func (ab *aggBuilder) handleAggrWithCountStarMultiplier(ctx *plancontext.PlanningContext, aggr Aggr) error { + var lhsAE, rhsAE *sqlparser.AliasedExpr + var addCoalesce bool + + deps := ctx.SemTable.RecursiveDeps(aggr.Original.Expr) + switch { + case deps.IsSolvedBy(ab.lhs.tableID): + ab.pushThroughLeft(aggr) + lhsAE = aggr.Original + rhsAE = ab.rightCountStar(ctx) + if ab.outerJoin { + addCoalesce = true + } + + case deps.IsSolvedBy(ab.rhs.tableID): + ab.pushThroughRight(aggr) + lhsAE = ab.leftCountStar(ctx) + rhsAE = aggr.Original + + default: + return errAbortAggrPushing + } + + ab.buildProjectionForAggr(lhsAE, rhsAE, aggr, addCoalesce) + return nil +} + +func (ab *aggBuilder) buildProjectionForAggr(lhsAE *sqlparser.AliasedExpr, rhsAE *sqlparser.AliasedExpr, aggr Aggr, coalesce bool) { + // We expect the expressions to be different on each side of the join, otherwise it's an error. + if lhsAE.Expr == rhsAE.Expr { + panic(fmt.Sprintf("Need the two produced expressions to be different. %T %T", lhsAE, rhsAE)) + } + + rhsExpr := rhsAE.Expr + + // When dealing with outer joins, we don't want null values from the RHS to ruin the calculations we are doing, + // so we use the MySQL `coalesce` after the join is applied to multiply the count from LHS with 1. + if ab.outerJoin && coalesce { + rhsExpr = coalesceFunc(rhsExpr) + } + + // The final COUNT is obtained by multiplying the counts from both sides. + // This is equivalent to transforming a "select count(*) from t1 join t2" into + // "select count_t1*count_t2 from + // (select count(*) as count_t1 from t1) as x, + // (select count(*) as count_t2 from t2) as y". + projExpr := &sqlparser.BinaryExpr{ + Operator: sqlparser.MultOp, + Left: lhsAE.Expr, + Right: rhsExpr, + } + projAE := &sqlparser.AliasedExpr{ + Expr: aggr.Original.Expr, + As: sqlparser.NewIdentifierCI(aggr.Original.ColumnName()), + } + + ab.proj.addUnexploredExpr(projAE, projExpr) +} + +func (p *joinPusher) countStar(ctx *plancontext.PlanningContext) (*sqlparser.AliasedExpr, bool) { + if p.csAE != nil { + return p.csAE, false + } + cs := &sqlparser.CountStar{} + ae := aeWrap(cs) + csAggr := NewAggr(opcode.AggregateCountStar, cs, ae, "") + expr := p.addAggr(ctx, csAggr) + p.csAE = aeWrap(expr) + return p.csAE, true +} + +// addAggr creates a copy of the given aggregation, updates its column offset to point to the correct location in the new Aggregator, +// and adds it to the list of Aggregations of the new Aggregator. It also updates the semantic analysis information to reflect the new structure. +// It returns the expression of the aggregation as it should be used in the parent Aggregator. +func (p *joinPusher) addAggr(ctx *plancontext.PlanningContext, aggr Aggr) sqlparser.Expr { + copyAggr := aggr + expr := sqlparser.CloneExpr(aggr.Original.Expr) + copyAggr.Original = aeWrap(expr) + // copy dependencies so we can keep track of which side expressions need to be pushed to + ctx.SemTable.Direct[expr] = p.tableID + ctx.SemTable.Recursive[expr] = p.tableID + copyAggr.ColOffset = len(p.pushed.Columns) + p.pushed.Columns = append(p.pushed.Columns, copyAggr.Original) + p.pushed.Aggregations = append(p.pushed.Aggregations, copyAggr) + return expr +} + +// pushThroughAggr pushes through an aggregation without changing dependencies. +// Can be used for aggregations we can push in one piece +func (p *joinPusher) pushThroughAggr(aggr Aggr) { + newAggr := NewAggr(aggr.OpCode, aggr.Func, aggr.Original, aggr.Alias) + newAggr.ColOffset = len(p.pushed.Columns) + p.pushed.Columns = append(p.pushed.Columns, newAggr.Original) + p.pushed.Aggregations = append(p.pushed.Aggregations, newAggr) +} + +// addGrouping creates a copy of the given GroupBy, updates its column offset to point to the correct location in the new Aggregator, +// and adds it to the list of GroupBy expressions of the new Aggregator. It also updates the semantic analysis information to reflect the new structure. +// It returns the expression of the GroupBy as it should be used in the parent Aggregator. +func (p *joinPusher) addGrouping(ctx *plancontext.PlanningContext, gb GroupBy) sqlparser.Expr { + copyGB := gb + expr := sqlparser.CloneExpr(gb.Inner) + // copy dependencies so we can keep track of which side expressions need to be pushed to + ctx.SemTable.CopyDependencies(gb.Inner, expr) + // if the column exists in the selection then copy it down to the pushed aggregator operator. + if copyGB.ColOffset != -1 { + offset := p.useColumn(copyGB.ColOffset) + copyGB.ColOffset = offset + } else { + copyGB.ColOffset = len(p.pushed.Columns) + p.pushed.Columns = append(p.pushed.Columns, aeWrap(copyGB.Inner)) + } + p.pushed.Grouping = append(p.pushed.Grouping, copyGB) + return expr +} + +// useColumn checks whether the column corresponding to the given offset has been used in the new Aggregator. +// If it has not been used before, it adds the column to the new Aggregator +// and updates the columns mapping to reflect the new location of the column. +// It returns the offset of the column in the new Aggregator. +func (p *joinPusher) useColumn(offset int) int { + if p.columns[offset] == -1 { + p.columns[offset] = len(p.pushed.Columns) + // still haven't used this expression on this side + p.pushed.Columns = append(p.pushed.Columns, p.orig.Columns[offset]) + } + return p.columns[offset] +} diff --git a/go/vt/vtgate/planbuilder/operators/aggregator.go b/go/vt/vtgate/planbuilder/operators/aggregator.go index 45ccb041ddd..49081eb6a10 100644 --- a/go/vt/vtgate/planbuilder/operators/aggregator.go +++ b/go/vt/vtgate/planbuilder/operators/aggregator.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -35,9 +34,10 @@ type ( // Both all aggregations and no grouping, and the inverse // of all grouping and no aggregations are valid configurations of this operator Aggregator struct { - Source ops.Operator + Source Operator Columns []*sqlparser.AliasedExpr + WithRollup bool Grouping []GroupBy Aggregations []Aggr @@ -60,7 +60,7 @@ type ( } ) -func (a *Aggregator) Clone(inputs []ops.Operator) ops.Operator { +func (a *Aggregator) Clone(inputs []Operator) Operator { kopy := *a kopy.Source = inputs[0] kopy.Columns = slices.Clone(a.Columns) @@ -69,22 +69,19 @@ func (a *Aggregator) Clone(inputs []ops.Operator) ops.Operator { return &kopy } -func (a *Aggregator) Inputs() []ops.Operator { - return []ops.Operator{a.Source} +func (a *Aggregator) Inputs() []Operator { + return []Operator{a.Source} } -func (a *Aggregator) SetInputs(operators []ops.Operator) { +func (a *Aggregator) SetInputs(operators []Operator) { if len(operators) != 1 { panic(fmt.Sprintf("unexpected number of operators as input in aggregator: %d", len(operators))) } a.Source = operators[0] } -func (a *Aggregator) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { - return &Filter{ - Source: a, - Predicates: []sqlparser.Expr{expr}, - } +func (a *Aggregator) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) Operator { + return newFilter(a, expr) } func (a *Aggregator) addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, addToGroupBy bool) int { @@ -92,7 +89,7 @@ func (a *Aggregator) addColumnWithoutPushing(ctx *plancontext.PlanningContext, e a.Columns = append(a.Columns, expr) if addToGroupBy { - groupBy := NewGroupBy(expr.Expr, expr.Expr, expr) + groupBy := NewGroupBy(expr.Expr) groupBy.ColOffset = offset a.Grouping = append(a.Grouping, groupBy) } else { @@ -100,6 +97,12 @@ func (a *Aggregator) addColumnWithoutPushing(ctx *plancontext.PlanningContext, e switch e := expr.Expr.(type) { case sqlparser.AggrFunc: aggr = createAggrFromAggrFunc(e, expr) + case *sqlparser.FuncExpr: + if IsAggr(ctx, e) { + aggr = NewAggr(opcode.AggregateUDF, nil, expr, expr.As.String()) + } else { + aggr = NewAggr(opcode.AggregateAnyValue, nil, expr, expr.As.String()) + } default: aggr = NewAggr(opcode.AggregateAnyValue, nil, expr, expr.As.String()) } @@ -121,7 +124,21 @@ func (a *Aggregator) isDerived() bool { return a.DT != nil } -func (a *Aggregator) FindCol(ctx *plancontext.PlanningContext, in sqlparser.Expr, _ bool) int { +func (a *Aggregator) derivedName() string { + if a.DT == nil { + return "" + } + + return a.DT.Alias +} + +func (a *Aggregator) FindCol(ctx *plancontext.PlanningContext, in sqlparser.Expr, underRoute bool) int { + if underRoute && a.isDerived() { + // We don't want to use columns on this operator if it's a derived table under a route. + // In this case, we need to add a Projection on top of this operator to make the column available + return -1 + } + expr := a.DT.RewriteExpression(ctx, in) if offset, found := canReuseColumn(ctx, a.Columns, expr, extractExpr); found { return offset @@ -149,7 +166,7 @@ func (a *Aggregator) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gro // This process also sets the weight string column offset, eliminating the need for a later addition in the aggregator operator's planOffset. if wsExpr, isWS := rewritten.(*sqlparser.WeightStringFuncExpr); isWS { idx := slices.IndexFunc(a.Grouping, func(by GroupBy) bool { - return ctx.SemTable.EqualsExprWithDeps(wsExpr.Expr, by.SimplifiedExpr) + return ctx.SemTable.EqualsExprWithDeps(wsExpr.Expr, by.Inner) }) if idx >= 0 { a.Grouping[idx].WSOffset = len(a.Columns) @@ -174,13 +191,70 @@ func (a *Aggregator) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gro return offset } +func (a *Aggregator) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + if len(a.Columns) <= offset { + panic(vterrors.VT13001("offset out of range")) + } + + var expr sqlparser.Expr + // first search for the offset among the groupings + for i, by := range a.Grouping { + if by.ColOffset != offset { + continue + } + if by.WSOffset >= 0 { + // ah, we already have a weigh_string for this column. let's return it as is + return by.WSOffset + } + + // we need to add a WS column + a.Grouping[i].WSOffset = len(a.Columns) + expr = a.Columns[offset].Expr + break + } + + if expr == nil { + for _, aggr := range a.Aggregations { + if aggr.ColOffset != offset { + continue + } + if aggr.WSOffset >= 0 { + // ah, we already have a weigh_string for this column. let's return it as is + return aggr.WSOffset + } + + panic(vterrors.VT13001("expected to find a weight string for aggregation")) + } + + panic(vterrors.VT13001("could not find expression at offset")) + } + + wsExpr := weightStringFor(expr) + wsAe := aeWrap(wsExpr) + + wsOffset := len(a.Columns) + a.Columns = append(a.Columns, wsAe) + if underRoute { + // if we are under a route, we are done here. + // the column will be use when creating the query to send to the tablet, and that is all we need + return wsOffset + } + + incomingOffset := a.Source.AddWSColumn(ctx, offset, false) + + if wsOffset != incomingOffset { + // TODO: we could handle this case by adding a projection on under the aggregator to make the columns line up + panic(errFailedToPlan(wsAe)) + } + return wsOffset +} + func (a *Aggregator) findColInternal(ctx *plancontext.PlanningContext, ae *sqlparser.AliasedExpr, addToGroupBy bool) int { expr := ae.Expr offset := a.FindCol(ctx, expr, false) if offset >= 0 { return offset } - expr = a.DT.RewriteExpression(ctx, expr) // Aggregator is little special and cannot work if the input offset are not matched with the aggregation columns. // So, before pushing anything from above the aggregator offset planning needs to be completed. @@ -188,22 +262,27 @@ func (a *Aggregator) findColInternal(ctx *plancontext.PlanningContext, ae *sqlpa if offset, found := canReuseColumn(ctx, a.Columns, expr, extractExpr); found { return offset } - colName, isColName := expr.(*sqlparser.ColName) - for i, col := range a.Columns { - if isColName && colName.Name.EqualString(col.As.String()) { - return i - } - } if addToGroupBy { - panic(vterrors.VT13001("did not expect to add group by here")) + panic(vterrors.VT13001(fmt.Sprintf("did not expect to add group by here: %s", sqlparser.String(expr)))) } return -1 } +func isDerived(op Operator) bool { + switch op := op.(type) { + case *Horizon: + return op.IsDerived() + case selectExpressions: + return op.derivedName() != "" + default: + return false + } +} + func (a *Aggregator) GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr { - if _, isSourceDerived := a.Source.(*Horizon); isSourceDerived { + if isDerived(a.Source) { return a.Columns } @@ -243,38 +322,49 @@ func (a *Aggregator) ShortDescription() string { var grouping []string for _, gb := range a.Grouping { - grouping = append(grouping, sqlparser.String(gb.SimplifiedExpr)) + grouping = append(grouping, sqlparser.String(gb.Inner)) + } + var rollUp string + if a.WithRollup { + rollUp = " with rollup" } - return fmt.Sprintf("%s%s group by %s", org, strings.Join(columns, ", "), strings.Join(grouping, ",")) + return fmt.Sprintf( + "%s%s group by %s%s", + org, + strings.Join(columns, ", "), + strings.Join(grouping, ","), + rollUp, + ) } -func (a *Aggregator) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (a *Aggregator) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return a.Source.GetOrdering(ctx) } -func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) { +func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) Operator { if a.offsetPlanned { - return + return nil } defer func() { a.offsetPlanned = true }() if !a.Pushed { a.planOffsetsNotPushed(ctx) - return + return nil } for idx, gb := range a.Grouping { if gb.ColOffset == -1 { offset := a.internalAddColumn(ctx, aeWrap(gb.Inner), false) a.Grouping[idx].ColOffset = offset + gb.ColOffset = offset } - if gb.WSOffset != -1 || !ctx.SemTable.NeedsWeightString(gb.SimplifiedExpr) { + if gb.WSOffset != -1 || !ctx.SemTable.NeedsWeightString(gb.Inner) { continue } - offset := a.internalAddColumn(ctx, aeWrap(weightStringFor(gb.SimplifiedExpr)), true) + offset := a.internalAddColumn(ctx, aeWrap(weightStringFor(gb.Inner)), true) a.Grouping[idx].WSOffset = offset } @@ -282,9 +372,11 @@ func (a *Aggregator) planOffsets(ctx *plancontext.PlanningContext) { if !aggr.NeedsWeightString(ctx) { continue } - offset := a.internalAddColumn(ctx, aeWrap(weightStringFor(aggr.Func.GetArg())), true) + arg := aggr.getPushColumn() + offset := a.internalAddColumn(ctx, aeWrap(weightStringFor(arg)), true) a.Aggregations[idx].WSOffset = offset } + return nil } func (aggr Aggr) getPushColumn() sqlparser.Expr { @@ -295,10 +387,13 @@ func (aggr Aggr) getPushColumn() sqlparser.Expr { return sqlparser.NewIntLiteral("1") case opcode.AggregateGroupConcat: if len(aggr.Func.GetArgs()) > 1 { - panic("more than 1 column") + panic(vterrors.VT12001("group_concat with more than 1 column")) } - fallthrough + return aggr.Func.GetArg() default: + if len(aggr.Func.GetArgs()) > 1 { + panic(vterrors.VT03001(sqlparser.String(aggr.Func))) + } return aggr.Func.GetArg() } } @@ -368,11 +463,11 @@ func (a *Aggregator) pushRemainingGroupingColumnsAndWeightStrings(ctx *planconte a.Grouping[idx].ColOffset = offset } - if gb.WSOffset != -1 || !ctx.SemTable.NeedsWeightString(gb.SimplifiedExpr) { + if gb.WSOffset != -1 || !ctx.SemTable.NeedsWeightString(gb.Inner) { continue } - offset := a.internalAddColumn(ctx, aeWrap(weightStringFor(gb.SimplifiedExpr)), false) + offset := a.internalAddColumn(ctx, aeWrap(weightStringFor(gb.Inner)), false) a.Grouping[idx].WSOffset = offset } for idx, aggr := range a.Aggregations { @@ -380,7 +475,8 @@ func (a *Aggregator) pushRemainingGroupingColumnsAndWeightStrings(ctx *planconte continue } - offset := a.internalAddColumn(ctx, aeWrap(weightStringFor(aggr.Func.GetArg())), false) + arg := aggr.getPushColumn() + offset := a.internalAddColumn(ctx, aeWrap(weightStringFor(arg)), false) a.Aggregations[idx].WSOffset = offset } } @@ -399,10 +495,10 @@ func (a *Aggregator) internalAddColumn(ctx *plancontext.PlanningContext, aliased return offset } -// SplitAggregatorBelowRoute returns the aggregator that will live under the Route. +// SplitAggregatorBelowOperators returns the aggregator that will live under the Route. // This is used when we are splitting the aggregation so one part is done // at the mysql level and one part at the vtgate level -func (a *Aggregator) SplitAggregatorBelowRoute(input []ops.Operator) *Aggregator { +func (a *Aggregator) SplitAggregatorBelowOperators(input []Operator) *Aggregator { newOp := a.Clone(input).(*Aggregator) newOp.Pushed = false newOp.Original = false @@ -414,4 +510,21 @@ func (a *Aggregator) introducesTableID() semantics.TableSet { return a.DT.introducesTableID() } -var _ ops.Operator = (*Aggregator)(nil) +func (a *Aggregator) checkForInvalidAggregations() { + for _, aggr := range a.Aggregations { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + aggrFunc, isAggregate := node.(sqlparser.AggrFunc) + if !isAggregate { + return true, nil + } + args := aggrFunc.GetArgs() + if args != nil && len(args) != 1 { + panic(vterrors.VT03001(sqlparser.String(node))) + } + return true, nil + + }, aggr.Original.Expr) + } +} + +var _ Operator = (*Aggregator)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/apply_join.go b/go/vt/vtgate/planbuilder/operators/apply_join.go index 138c17f2da7..0d214c545d1 100644 --- a/go/vt/vtgate/planbuilder/operators/apply_join.go +++ b/go/vt/vtgate/planbuilder/operators/apply_join.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -33,19 +32,19 @@ type ( // ApplyJoin is a nested loop join - for each row on the LHS, // we'll execute the plan on the RHS, feeding data from left to right ApplyJoin struct { - LHS, RHS ops.Operator + LHS, RHS Operator + // JoinType is permitted to store only 3 of the possible values + // NormalJoinType, StraightJoinType and LeftJoinType. + JoinType sqlparser.JoinType // LeftJoin will be true in the case of an outer join LeftJoin bool - // Before offset planning - Predicate sqlparser.Expr - // JoinColumns keeps track of what AST expression is represented in the Columns array - JoinColumns []JoinColumn + JoinColumns *applyJoinColumns // JoinPredicates are join predicates that have been broken up into left hand side and right hand side parts. - JoinPredicates []JoinColumn + JoinPredicates *applyJoinColumns // ExtraVars are columns we need to copy from left to right not needed by any predicates or projections, // these are needed by other operators further down the right hand side of the join @@ -61,7 +60,7 @@ type ( Vars map[string]int } - // JoinColumn is where we store information about columns passing through the join operator + // applyJoinColumn is where we store information about columns passing through the join operator // It can be in one of three possible configurations: // - Pure left // We are projecting a column that comes from the left. The RHSExpr will be nil for these @@ -71,11 +70,12 @@ type ( // Here we need to transmit columns from the LHS to the RHS, // so they can be used for the result of this expression that is using data from both sides. // All fields will be used for these - JoinColumn struct { - Original *sqlparser.AliasedExpr // this is the original expression being passed through - LHSExprs []BindVarExpr - RHSExpr sqlparser.Expr - GroupBy bool // if this is true, we need to push this down to our inputs with addToGroupBy set to true + applyJoinColumn struct { + Original sqlparser.Expr // this is the original expression being passed through + LHSExprs []BindVarExpr // These are the expressions we are pushing to the left hand side which we'll receive as bind variables + RHSExpr sqlparser.Expr // This the expression that we'll evaluate on the right hand side. This is nil, if the right hand side has nothing. + DTColName *sqlparser.ColName // This is the output column name that the parent of JOIN will be seeing. If this is unset, then the colname is the String(Original). We set this when we push Projections with derived tables underneath a Join. + GroupBy bool // if this is true, we need to push this down to our inputs with addToGroupBy set to true } // BindVarExpr is an expression needed from one side of a join/subquery, and the argument name for it. @@ -86,110 +86,129 @@ type ( } ) -func NewApplyJoin(lhs, rhs ops.Operator, predicate sqlparser.Expr, leftOuterJoin bool) *ApplyJoin { - return &ApplyJoin{ - LHS: lhs, - RHS: rhs, - Vars: map[string]int{}, - Predicate: predicate, - LeftJoin: leftOuterJoin, +func NewApplyJoin(ctx *plancontext.PlanningContext, lhs, rhs Operator, predicate sqlparser.Expr, joinType sqlparser.JoinType) *ApplyJoin { + aj := &ApplyJoin{ + LHS: lhs, + RHS: rhs, + Vars: map[string]int{}, + JoinType: joinType, + JoinColumns: &applyJoinColumns{}, + JoinPredicates: &applyJoinColumns{}, } + aj.AddJoinPredicate(ctx, predicate) + return aj } // Clone implements the Operator interface -func (aj *ApplyJoin) Clone(inputs []ops.Operator) ops.Operator { +func (aj *ApplyJoin) Clone(inputs []Operator) Operator { kopy := *aj kopy.LHS = inputs[0] kopy.RHS = inputs[1] kopy.Columns = slices.Clone(aj.Columns) - kopy.JoinColumns = slices.Clone(aj.JoinColumns) - kopy.JoinPredicates = slices.Clone(aj.JoinPredicates) + kopy.JoinColumns = aj.JoinColumns.clone() + kopy.JoinPredicates = aj.JoinPredicates.clone() kopy.Vars = maps.Clone(aj.Vars) - kopy.Predicate = sqlparser.CloneExpr(aj.Predicate) kopy.ExtraLHSVars = slices.Clone(aj.ExtraLHSVars) return &kopy } -func (aj *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { - return AddPredicate(ctx, aj, expr, false, newFilter) +func (aj *ApplyJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { + return AddPredicate(ctx, aj, expr, false, newFilterSinglePredicate) } // Inputs implements the Operator interface -func (aj *ApplyJoin) Inputs() []ops.Operator { - return []ops.Operator{aj.LHS, aj.RHS} +func (aj *ApplyJoin) Inputs() []Operator { + return []Operator{aj.LHS, aj.RHS} } // SetInputs implements the Operator interface -func (aj *ApplyJoin) SetInputs(inputs []ops.Operator) { +func (aj *ApplyJoin) SetInputs(inputs []Operator) { aj.LHS, aj.RHS = inputs[0], inputs[1] } -func (aj *ApplyJoin) GetLHS() ops.Operator { +func (aj *ApplyJoin) GetLHS() Operator { return aj.LHS } -func (aj *ApplyJoin) GetRHS() ops.Operator { +func (aj *ApplyJoin) GetRHS() Operator { return aj.RHS } -func (aj *ApplyJoin) SetLHS(operator ops.Operator) { +func (aj *ApplyJoin) SetLHS(operator Operator) { aj.LHS = operator } -func (aj *ApplyJoin) SetRHS(operator ops.Operator) { +func (aj *ApplyJoin) SetRHS(operator Operator) { aj.RHS = operator } func (aj *ApplyJoin) MakeInner() { - aj.LeftJoin = false + if aj.IsInner() { + return + } + aj.JoinType = sqlparser.NormalJoinType } func (aj *ApplyJoin) IsInner() bool { - return !aj.LeftJoin + return aj.JoinType.IsInner() } -func (aj *ApplyJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error { - aj.Predicate = ctx.SemTable.AndExpressions(expr, aj.Predicate) - - col, err := BreakExpressionInLHSandRHS(ctx, expr, TableID(aj.LHS)) - if err != nil { - return err +func (aj *ApplyJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { + if expr == nil { + return + } + rhs := aj.RHS + predicates := sqlparser.SplitAndExpression(nil, expr) + for _, pred := range predicates { + col := breakExpressionInLHSandRHS(ctx, pred, TableID(aj.LHS)) + aj.JoinPredicates.add(col) + ctx.AddJoinPredicates(pred, col.RHSExpr) + rhs = rhs.AddPredicate(ctx, col.RHSExpr) } - aj.JoinPredicates = append(aj.JoinPredicates, col) - rhs := aj.RHS.AddPredicate(ctx, col.RHSExpr) aj.RHS = rhs - - return nil -} - -func (aj *ApplyJoin) pushColRight(ctx *plancontext.PlanningContext, e *sqlparser.AliasedExpr, addToGroupBy bool) (int, error) { - offset := aj.RHS.AddColumn(ctx, true, addToGroupBy, e) - return offset, nil } -func (aj *ApplyJoin) GetColumns(*plancontext.PlanningContext) []*sqlparser.AliasedExpr { - return slice.Map(aj.JoinColumns, joinColumnToAliasedExpr) +func (aj *ApplyJoin) GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr { + colSize := len(aj.Columns) + if colSize == 0 { + // we've yet to do offset planning - let's return what we have for now + return slice.Map(aj.JoinColumns.columns, func(from applyJoinColumn) *sqlparser.AliasedExpr { + return aeWrap(from.Original) + }) + } + cols := make([]*sqlparser.AliasedExpr, colSize) + var lhsCols, rhsCols []*sqlparser.AliasedExpr + for idx, column := range aj.Columns { + if column < 0 { + if lhsCols == nil { + lhsCols = aj.LHS.GetColumns(ctx) + } + cols[idx] = lhsCols[FromLeftOffset(column)] + } else { + if rhsCols == nil { + rhsCols = aj.RHS.GetColumns(ctx) + } + cols[idx] = rhsCols[FromRightOffset(column)] + } + } + return cols } func (aj *ApplyJoin) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs { return transformColumnsToSelectExprs(ctx, aj) } -func (aj *ApplyJoin) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (aj *ApplyJoin) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return aj.LHS.GetOrdering(ctx) } -func joinColumnToAliasedExpr(c JoinColumn) *sqlparser.AliasedExpr { - return c.Original -} - -func joinColumnToExpr(column JoinColumn) sqlparser.Expr { - return column.Original.Expr +func joinColumnToExpr(column applyJoinColumn) sqlparser.Expr { + return column.Original } -func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sqlparser.AliasedExpr, e sqlparser.Expr, addToGroupBy bool) (col JoinColumn, err error) { +func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sqlparser.AliasedExpr, e sqlparser.Expr, addToGroupBy bool) (col applyJoinColumn) { defer func() { - col.Original = orig + col.Original = orig.Expr }() lhs := TableID(aj.LHS) rhs := TableID(aj.RHS) @@ -203,23 +222,23 @@ func (aj *ApplyJoin) getJoinColumnFor(ctx *plancontext.PlanningContext, orig *sq case deps.IsSolvedBy(rhs): col.RHSExpr = e case deps.IsSolvedBy(both): - col, err = BreakExpressionInLHSandRHS(ctx, e, TableID(aj.LHS)) - if err != nil { - return JoinColumn{}, err - } + col = breakExpressionInLHSandRHS(ctx, e, TableID(aj.LHS)) default: - return JoinColumn{}, vterrors.VT13002(sqlparser.String(e)) + panic(vterrors.VT13002(sqlparser.String(e))) } return } -func (aj *ApplyJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) int { - offset, found := canReuseColumn(ctx, aj.JoinColumns, expr, joinColumnToExpr) - if !found { - return -1 +func applyJoinCompare(ctx *plancontext.PlanningContext, expr sqlparser.Expr) func(e applyJoinColumn) bool { + return func(e applyJoinColumn) bool { + // e.DTColName is how the outside world will be using this expression. So we should check for an equality with that too. + return ctx.SemTable.EqualsExprWithDeps(e.Original, expr) || ctx.SemTable.EqualsExprWithDeps(e.DTColName, expr) } - return offset +} + +func (aj *ApplyJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) int { + return slices.IndexFunc(aj.JoinColumns.columns, applyJoinCompare(ctx, expr)) } func (aj *ApplyJoin) AddColumn( @@ -234,34 +253,61 @@ func (aj *ApplyJoin) AddColumn( return offset } } - col, err := aj.getJoinColumnFor(ctx, expr, expr.Expr, groupBy) - if err != nil { - panic(err) - } - offset := len(aj.JoinColumns) - aj.JoinColumns = append(aj.JoinColumns, col) + col := aj.getJoinColumnFor(ctx, expr, expr.Expr, groupBy) + offset := len(aj.JoinColumns.columns) + aj.JoinColumns.add(col) return offset } -func (aj *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) { - for _, col := range aj.JoinColumns { - // Read the type description for JoinColumn to understand the following code - for _, lhsExpr := range col.LHSExprs { - offset := aj.LHS.AddColumn(ctx, true, col.GroupBy, aeWrap(lhsExpr.Expr)) - if col.RHSExpr == nil { - // if we don't have an RHS expr, it means that this is a pure LHS expression - aj.addOffset(-offset - 1) - } else { - aj.Vars[lhsExpr.Name] = offset - } - } - if col.RHSExpr != nil { - offset := aj.RHS.AddColumn(ctx, true, col.GroupBy, aeWrap(col.RHSExpr)) - aj.addOffset(offset + 1) - } +func (aj *ApplyJoin) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + if len(aj.Columns) == 0 { + aj.planOffsets(ctx) + } + + if len(aj.Columns) <= offset { + panic(vterrors.VT13001("offset out of range")) } - for _, col := range aj.JoinPredicates { + wsExpr := weightStringFor(aj.JoinColumns.columns[offset].Original) + if index := aj.FindCol(ctx, wsExpr, false); index != -1 { + // nice, we already have this column. no need to add anything + return index + } + + i := aj.Columns[offset] + out := 0 + if i < 0 { + out = aj.LHS.AddWSColumn(ctx, FromLeftOffset(i), underRoute) + out = ToLeftOffset(out) + aj.JoinColumns.addLeft(wsExpr) + } else { + out = aj.RHS.AddWSColumn(ctx, FromRightOffset(i), underRoute) + out = ToRightOffset(out) + aj.JoinColumns.addRight(wsExpr) + } + + if out >= 0 { + aj.addOffset(out) + } else { + col := aj.getJoinColumnFor(ctx, aeWrap(wsExpr), wsExpr, !ContainsAggr(ctx, wsExpr)) + aj.JoinColumns.add(col) + aj.planOffsetFor(ctx, col) + } + + return len(aj.Columns) - 1 +} + +func (aj *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) Operator { + if len(aj.Columns) > 0 { + // we've already done offset planning + return aj + } + for _, col := range aj.JoinColumns.columns { + // Read the type description for applyJoinColumn to understand the following code + aj.planOffsetFor(ctx, col) + } + + for _, col := range aj.JoinPredicates.columns { for _, lhsExpr := range col.LHSExprs { offset := aj.LHS.AddColumn(ctx, true, false, aeWrap(lhsExpr.Expr)) aj.Vars[lhsExpr.Name] = offset @@ -272,6 +318,40 @@ func (aj *ApplyJoin) planOffsets(ctx *plancontext.PlanningContext) { offset := aj.LHS.AddColumn(ctx, true, false, aeWrap(lhsExpr.Expr)) aj.Vars[lhsExpr.Name] = offset } + + return nil +} + +func (aj *ApplyJoin) planOffsetFor(ctx *plancontext.PlanningContext, col applyJoinColumn) { + if col.DTColName != nil { + // If DTColName is set, then we already pushed the parts of the expression down while planning. + // We need to use this name and ask the correct side of the join for it. Nothing else is required. + if col.IsPureLeft() { + offset := aj.LHS.AddColumn(ctx, true, col.GroupBy, aeWrap(col.DTColName)) + aj.addOffset(ToLeftOffset(offset)) + } else { + for _, lhsExpr := range col.LHSExprs { + offset := aj.LHS.AddColumn(ctx, true, col.GroupBy, aeWrap(lhsExpr.Expr)) + aj.Vars[lhsExpr.Name] = offset + } + offset := aj.RHS.AddColumn(ctx, true, col.GroupBy, aeWrap(col.DTColName)) + aj.addOffset(ToRightOffset(offset)) + } + return + } + for _, lhsExpr := range col.LHSExprs { + offset := aj.LHS.AddColumn(ctx, true, col.GroupBy, aeWrap(lhsExpr.Expr)) + if col.RHSExpr == nil { + // if we don't have an RHS expr, it means that this is a pure LHS expression + aj.addOffset(ToLeftOffset(offset)) + } else { + aj.Vars[lhsExpr.Name] = offset + } + } + if col.RHSExpr != nil { + offset := aj.RHS.AddColumn(ctx, true, col.GroupBy, aeWrap(col.RHSExpr)) + aj.addOffset(ToRightOffset(offset)) + } } func (aj *ApplyJoin) addOffset(offset int) { @@ -279,11 +359,14 @@ func (aj *ApplyJoin) addOffset(offset int) { } func (aj *ApplyJoin) ShortDescription() string { - pred := sqlparser.String(aj.Predicate) - columns := slice.Map(aj.JoinColumns, func(from JoinColumn) string { - return sqlparser.String(from.Original) - }) - firstPart := fmt.Sprintf("on %s columns: %s", pred, strings.Join(columns, ", ")) + fn := func(cols *applyJoinColumns) string { + out := slice.Map(cols.columns, func(jc applyJoinColumn) string { + return jc.String() + }) + return strings.Join(out, ", ") + } + + firstPart := fmt.Sprintf("on %s columns: %s", fn(aj.JoinPredicates), fn(aj.JoinColumns)) if len(aj.ExtraLHSVars) == 0 { return firstPart } @@ -293,14 +376,14 @@ func (aj *ApplyJoin) ShortDescription() string { } func (aj *ApplyJoin) isColNameMovedFromL2R(bindVarName string) bool { - for _, jc := range aj.JoinColumns { + for _, jc := range aj.JoinColumns.columns { for _, bve := range jc.LHSExprs { if bve.Name == bindVarName { return true } } } - for _, jp := range aj.JoinPredicates { + for _, jp := range aj.JoinPredicates.columns { for _, bve := range jp.LHSExprs { if bve.Name == bindVarName { return true @@ -316,9 +399,9 @@ func (aj *ApplyJoin) isColNameMovedFromL2R(bindVarName string) bool { } // findOrAddColNameBindVarName goes through the JoinColumns and looks for the given colName coming from the LHS of the join -// and returns the argument name if found. if it's not found, a new JoinColumn passing this through will be added -func (aj *ApplyJoin) findOrAddColNameBindVarName(ctx *plancontext.PlanningContext, col *sqlparser.ColName) (string, error) { - for i, thisCol := range aj.JoinColumns { +// and returns the argument name if found. if it's not found, a new applyJoinColumn passing this through will be added +func (aj *ApplyJoin) findOrAddColNameBindVarName(ctx *plancontext.PlanningContext, col *sqlparser.ColName) string { + for i, thisCol := range aj.JoinColumns.columns { idx := slices.IndexFunc(thisCol.LHSExprs, func(e BindVarExpr) bool { return ctx.SemTable.EqualsExpr(e.Expr, col) }) @@ -330,17 +413,17 @@ func (aj *ApplyJoin) findOrAddColNameBindVarName(ctx *plancontext.PlanningContex expr := thisCol.LHSExprs[idx] bvname := ctx.GetReservedArgumentFor(expr.Expr) expr.Name = bvname - aj.JoinColumns[i].LHSExprs[idx] = expr + aj.JoinColumns.columns[i].LHSExprs[idx] = expr } - return thisCol.LHSExprs[idx].Name, nil + return thisCol.LHSExprs[idx].Name } } - for _, thisCol := range aj.JoinPredicates { + for _, thisCol := range aj.JoinPredicates.columns { idx := slices.IndexFunc(thisCol.LHSExprs, func(e BindVarExpr) bool { return ctx.SemTable.EqualsExpr(e.Expr, col) }) if idx != -1 { - return thisCol.LHSExprs[idx].Name, nil + return thisCol.LHSExprs[idx].Name } } @@ -348,7 +431,7 @@ func (aj *ApplyJoin) findOrAddColNameBindVarName(ctx *plancontext.PlanningContex return ctx.SemTable.EqualsExpr(e.Expr, col) }) if idx != -1 { - return aj.ExtraLHSVars[idx].Name, nil + return aj.ExtraLHSVars[idx].Name } // we didn't find it, so we need to add it @@ -357,32 +440,40 @@ func (aj *ApplyJoin) findOrAddColNameBindVarName(ctx *plancontext.PlanningContex Name: bvName, Expr: col, }) - return bvName, nil + return bvName } func (a *ApplyJoin) LHSColumnsNeeded(ctx *plancontext.PlanningContext) (needed sqlparser.Exprs) { f := func(from BindVarExpr) sqlparser.Expr { return from.Expr } - for _, jc := range a.JoinColumns { + for _, jc := range a.JoinColumns.columns { needed = append(needed, slice.Map(jc.LHSExprs, f)...) } - for _, jc := range a.JoinPredicates { + for _, jc := range a.JoinPredicates.columns { needed = append(needed, slice.Map(jc.LHSExprs, f)...) } needed = append(needed, slice.Map(a.ExtraLHSVars, f)...) return ctx.SemTable.Uniquify(needed) } -func (jc JoinColumn) IsPureLeft() bool { +func (jc applyJoinColumn) String() string { + rhs := sqlparser.String(jc.RHSExpr) + lhs := slice.Map(jc.LHSExprs, func(e BindVarExpr) string { + return sqlparser.String(e.Expr) + }) + return fmt.Sprintf("[%s | %s | %s]", strings.Join(lhs, ", "), rhs, sqlparser.String(jc.Original)) +} + +func (jc applyJoinColumn) IsPureLeft() bool { return jc.RHSExpr == nil } -func (jc JoinColumn) IsPureRight() bool { +func (jc applyJoinColumn) IsPureRight() bool { return len(jc.LHSExprs) == 0 } -func (jc JoinColumn) IsMixedLeftAndRight() bool { +func (jc applyJoinColumn) IsMixedLeftAndRight() bool { return len(jc.LHSExprs) > 0 && jc.RHSExpr != nil } diff --git a/go/vt/vtgate/planbuilder/operators/ast_to_op.go b/go/vt/vtgate/planbuilder/operators/ast_to_op.go index bc4aaf8a4e6..0d838610866 100644 --- a/go/vt/vtgate/planbuilder/operators/ast_to_op.go +++ b/go/vt/vtgate/planbuilder/operators/ast_to_op.go @@ -21,48 +21,37 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) const foreignKeyConstraintValues = "fkc_vals" +const foreignKeyUpdateExpr = "fkc_upd" // translateQueryToOp creates an operator tree that represents the input SELECT or UNION query -func translateQueryToOp(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) (op ops.Operator, err error) { +func translateQueryToOp(ctx *plancontext.PlanningContext, selStmt sqlparser.Statement) Operator { switch node := selStmt.(type) { case *sqlparser.Select: - op, err = createOperatorFromSelect(ctx, node) + return createOperatorFromSelect(ctx, node) case *sqlparser.Union: - op, err = createOperatorFromUnion(ctx, node) + return createOperatorFromUnion(ctx, node) case *sqlparser.Update: - op, err = createOperatorFromUpdate(ctx, node) + return createOperatorFromUpdate(ctx, node) case *sqlparser.Delete: - op, err = createOperatorFromDelete(ctx, node) + return createOperatorFromDelete(ctx, node) case *sqlparser.Insert: - op, err = createOperatorFromInsert(ctx, node) + return createOperatorFromInsert(ctx, node) default: - err = vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt)) + panic(vterrors.VT12001(fmt.Sprintf("operator: %T", selStmt))) } - if err != nil { - return nil, err - } - - return op, nil } -func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (ops.Operator, error) { - op, err := crossJoin(ctx, sel.From) - if err != nil { - return nil, err - } +func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) Operator { + op := crossJoin(ctx, sel.From) if sel.Where != nil { - op, err = addWherePredicates(ctx, sel.Where.Expr, op) - if err != nil { - return nil, err - } + op = addWherePredicates(ctx, sel.Where.Expr, op) } if sel.Comments != nil || sel.Lock != sqlparser.NoLock { @@ -75,26 +64,28 @@ func createOperatorFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.S op = newHorizon(op, sel) - return op, nil + return op } -func addWherePredicates(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (ops.Operator, error) { +func addWherePredicates(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator) Operator { sqc := &SubQueryBuilder{} + op = addWherePredsToSubQueryBuilder(ctx, expr, op, sqc) + return sqc.getRootOperator(op, nil) +} + +func addWherePredsToSubQueryBuilder(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator, sqc *SubQueryBuilder) Operator { outerID := TableID(op) exprs := sqlparser.SplitAndExpression(nil, expr) for _, expr := range exprs { - sqlparser.RemoveKeyspaceFromColName(expr) - subq, err := sqc.handleSubquery(ctx, expr, outerID) - if err != nil { - return nil, err - } + sqlparser.RemoveKeyspaceInCol(expr) + subq := sqc.handleSubquery(ctx, expr, outerID) if subq != nil { continue } op = op.AddPredicate(ctx, expr) addColumnEquality(ctx, expr) } - return sqc.getRootOperator(op, nil), nil + return op } // cloneASTAndSemState clones the AST and the semantic state of the input node. @@ -110,7 +101,7 @@ func cloneASTAndSemState[T sqlparser.SQLNode](ctx *plancontext.PlanningContext, // findTablesContained returns the TableSet of all the contained func findTablesContained(ctx *plancontext.PlanningContext, node sqlparser.SQLNode) (result semantics.TableSet) { - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { t, ok := node.(*sqlparser.AliasedTableExpr) if !ok { return true, nil @@ -122,32 +113,13 @@ func findTablesContained(ctx *plancontext.PlanningContext, node sqlparser.SQLNod return } -func rewriteRemainingColumns( - ctx *plancontext.PlanningContext, - stmt sqlparser.SelectStatement, - subqID semantics.TableSet, -) sqlparser.SelectStatement { - return sqlparser.CopyOnRewrite(stmt, nil, func(cursor *sqlparser.CopyOnWriteCursor) { - colname, isColname := cursor.Node().(*sqlparser.ColName) - if !isColname { - return - } - deps := ctx.SemTable.RecursiveDeps(colname) - if deps.IsSolvedBy(subqID) { - return - } - rsv := ctx.GetReservedArgumentFor(colname) - cursor.Replace(sqlparser.NewArgument(rsv)) - }, nil).(sqlparser.SelectStatement) -} - // joinPredicateCollector is used to inspect the predicates inside the subquery, looking for any // comparisons between the inner and the outer side. // They can be used for merging the two parts of the query together type joinPredicateCollector struct { predicates sqlparser.Exprs remainingPredicates sqlparser.Exprs - joinColumns []JoinColumn + joinColumns []applyJoinColumn totalID, subqID, @@ -157,73 +129,63 @@ type joinPredicateCollector struct { func (jpc *joinPredicateCollector) inspectPredicate( ctx *plancontext.PlanningContext, predicate sqlparser.Expr, -) error { +) { pred := predicate deps := ctx.SemTable.RecursiveDeps(predicate) // if the subquery is not enough, but together we have all we need, // then we can use this predicate to connect the subquery to the outer query if !deps.IsSolvedBy(jpc.subqID) && deps.IsSolvedBy(jpc.totalID) { jpc.predicates = append(jpc.predicates, predicate) - jc, err := BreakExpressionInLHSandRHS(ctx, predicate, jpc.outerID) - if err != nil { - return err - } + jc := breakExpressionInLHSandRHS(ctx, predicate, jpc.outerID) jpc.joinColumns = append(jpc.joinColumns, jc) pred = jc.RHSExpr } jpc.remainingPredicates = append(jpc.remainingPredicates, pred) - return nil } -func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) (ops.Operator, error) { - opLHS, err := translateQueryToOp(ctx, node.Left) - if err != nil { - return nil, err - } - +func createOperatorFromUnion(ctx *plancontext.PlanningContext, node *sqlparser.Union) Operator { _, isRHSUnion := node.Right.(*sqlparser.Union) if isRHSUnion { - return nil, vterrors.VT12001("nesting of UNIONs on the right-hand side") + panic(vterrors.VT12001("nesting of UNIONs on the right-hand side")) } - opRHS, err := translateQueryToOp(ctx, node.Right) - if err != nil { - return nil, err - } - + opLHS := translateQueryToOp(ctx, node.Left) + opRHS := translateQueryToOp(ctx, node.Right) lexprs := ctx.SemTable.SelectExprs(node.Left) rexprs := ctx.SemTable.SelectExprs(node.Right) unionCols := ctx.SemTable.SelectExprs(node) - union := newUnion([]ops.Operator{opLHS, opRHS}, []sqlparser.SelectExprs{lexprs, rexprs}, unionCols, node.Distinct) - return newHorizon(union, node), nil + union := newUnion([]Operator{opLHS, opRHS}, []sqlparser.SelectExprs{lexprs, rexprs}, unionCols, node.Distinct) + return newHorizon(union, node) } // createOpFromStmt creates an operator from the given statement. It takes in two additional arguments— // 1. verifyAllFKs: For this given statement, do we need to verify validity of all the foreign keys on the vtgate level. // 2. fkToIgnore: The foreign key constraint to specifically ignore while planning the statement. This field is used in UPDATE CASCADE planning, wherein while planning the child update // query, we need to ignore the parent foreign key constraint that caused the cascade in question. -func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement, verifyAllFKs bool, fkToIgnore string) (ops.Operator, error) { - var err error - ctx, err = plancontext.CreatePlanningContext(stmt, ctx.ReservedVars, ctx.VSchema, ctx.PlannerVersion) +func createOpFromStmt(inCtx *plancontext.PlanningContext, stmt sqlparser.Statement, verifyAllFKs bool, fkToIgnore string) Operator { + ctx, err := plancontext.CreatePlanningContext(stmt, inCtx.ReservedVars, inCtx.VSchema, inCtx.PlannerVersion) if err != nil { - return nil, err + panic(err) } // TODO (@GuptaManan100, @harshit-gangal): When we add cross-shard foreign keys support, // we should augment the semantic analysis to also tell us whether the given query has any cross shard parent foreign keys to validate. // If there are, then we have to run the query with FOREIGN_KEY_CHECKS off because we can't be sure if the DML will succeed on MySQL with the checks on. // So, we should set VerifyAllFKs to true. i.e. we should add `|| ctx.SemTable.RequireForeignKeyChecksOff()` to the below condition. - ctx.VerifyAllFKs = verifyAllFKs + if verifyAllFKs { + // If ctx.VerifyAllFKs is already true we don't want to turn it off. + ctx.VerifyAllFKs = verifyAllFKs + } // From all the parent foreign keys involved, we should remove the one that we need to ignore. err = ctx.SemTable.RemoveParentForeignKey(fkToIgnore) if err != nil { - return nil, err + panic(err) } // Now, we can filter the foreign keys further based on the planning context, specifically whether we are running // this query with FOREIGN_KEY_CHECKS off or not. If the foreign key checks are enabled, then we don't need to verify - // the validity of shard-scoped RESTRICT foreign keys, since MySQL will do that for us. Similarily, we don't need to verify + // the validity of shard-scoped RESTRICT foreign keys, since MySQL will do that for us. Similarly, we don't need to verify // if the shard-scoped parent foreign key constraints are valid. switch stmt.(type) { case *sqlparser.Update, *sqlparser.Insert: @@ -232,13 +194,21 @@ func createOpFromStmt(ctx *plancontext.PlanningContext, stmt sqlparser.Statement err = ctx.SemTable.RemoveNonRequiredForeignKeys(ctx.VerifyAllFKs, vindexes.DeleteAction) } if err != nil { - return nil, err + panic(err) } - return PlanQuery(ctx, stmt) + op, err := PlanQuery(ctx, stmt) + if err != nil { + panic(err) + } + + // need to remember which predicates have been broken up during join planning + inCtx.KeepPredicateInfo(ctx) + + return op } -func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, onlyTable bool) (ops.Operator, error) { +func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, onlyTable bool) Operator { switch tableExpr := tableExpr.(type) { case *sqlparser.AliasedTableExpr: return getOperatorFromAliasedTableExpr(ctx, tableExpr, onlyTable) @@ -247,37 +217,33 @@ func getOperatorFromTableExpr(ctx *plancontext.PlanningContext, tableExpr sqlpar case *sqlparser.ParenTableExpr: return crossJoin(ctx, tableExpr.Exprs) default: - return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr)) + panic(vterrors.VT13001(fmt.Sprintf("unable to use: %T table type", tableExpr))) } } -func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) (ops.Operator, error) { - lhs, err := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr, false) - if err != nil { - return nil, err - } - rhs, err := getOperatorFromTableExpr(ctx, tableExpr.RightExpr, false) - if err != nil { - return nil, err - } +func getOperatorFromJoinTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr) Operator { + lhs := getOperatorFromTableExpr(ctx, tableExpr.LeftExpr, false) + rhs := getOperatorFromTableExpr(ctx, tableExpr.RightExpr, false) switch tableExpr.Join { case sqlparser.NormalJoinType: return createInnerJoin(ctx, tableExpr, lhs, rhs) case sqlparser.LeftJoinType, sqlparser.RightJoinType: - return createOuterJoin(tableExpr, lhs, rhs) + return createLeftOuterJoin(ctx, tableExpr, lhs, rhs) + case sqlparser.StraightJoinType: + return createStraightJoin(ctx, tableExpr, lhs, rhs) default: - return nil, vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString()) + panic(vterrors.VT13001("unsupported: %s", tableExpr.Join.ToString())) } } -func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr, onlyTable bool) (ops.Operator, error) { +func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr *sqlparser.AliasedTableExpr, onlyTable bool) Operator { tableID := ctx.SemTable.TableSetFor(tableExpr) switch tbl := tableExpr.Expr.(type) { case sqlparser.TableName: tableInfo, err := ctx.SemTable.TableInfoFor(tableID) if err != nil { - return nil, err + panic(err) } if vt, isVindex := tableInfo.(*semantics.VindexTable); isVindex { @@ -291,73 +257,68 @@ func getOperatorFromAliasedTableExpr(ctx *plancontext.PlanningContext, tableExpr }, Vindex: vt.Vindex, Solved: solves, - }, nil + } } qg := newQueryGraph() isInfSchema := tableInfo.IsInfSchema() qt := &QueryTable{Alias: tableExpr, Table: tbl, ID: tableID, IsInfSchema: isInfSchema} qg.Tables = append(qg.Tables, qt) - return qg, nil + return qg case *sqlparser.DerivedTable: if onlyTable && tbl.Select.GetLimit() == nil { tbl.Select.SetOrderBy(nil) } - inner, err := translateQueryToOp(ctx, tbl.Select) - if err != nil { - return nil, err - } + inner := translateQueryToOp(ctx, tbl.Select) if horizon, ok := inner.(*Horizon); ok { horizon.TableId = &tableID horizon.Alias = tableExpr.As.String() horizon.ColumnAliases = tableExpr.Columns - qp, err := CreateQPFromSelectStatement(ctx, tbl.Select) - if err != nil { - return nil, err - } + qp := CreateQPFromSelectStatement(ctx, tbl.Select) horizon.QP = qp } - return inner, nil + return inner default: - return nil, vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl)) + panic(vterrors.VT13001(fmt.Sprintf("unable to use: %T", tbl))) } } -func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) (ops.Operator, error) { - var output ops.Operator +func crossJoin(ctx *plancontext.PlanningContext, exprs sqlparser.TableExprs) Operator { + var output Operator for _, tableExpr := range exprs { - op, err := getOperatorFromTableExpr(ctx, tableExpr, len(exprs) == 1) - if err != nil { - return nil, err - } + op := getOperatorFromTableExpr(ctx, tableExpr, len(exprs) == 1) if output == nil { output = op } else { output = createJoin(ctx, output, op) } } - return output, nil + return output } -func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparser.TableExpr, whereClause *sqlparser.Where) (semantics.TableInfo, *QueryTable, error) { +func createQueryTableForDML( + ctx *plancontext.PlanningContext, + tableExpr sqlparser.TableExpr, + whereClause *sqlparser.Where, +) (semantics.TableInfo, *QueryTable) { alTbl, ok := tableExpr.(*sqlparser.AliasedTableExpr) if !ok { - return nil, nil, vterrors.VT13001("expected AliasedTableExpr") + panic(vterrors.VT13001("expected AliasedTableExpr")) } tblName, ok := alTbl.Expr.(sqlparser.TableName) if !ok { - return nil, nil, vterrors.VT13001("expected TableName") + panic(vterrors.VT13001("expected TableName")) } tableID := ctx.SemTable.TableSetFor(alTbl) tableInfo, err := ctx.SemTable.TableInfoFor(tableID) if err != nil { - return nil, nil, err + panic(err) } if tableInfo.IsInfSchema() { - return nil, nil, vterrors.VT12001("update information schema tables") + panic(vterrors.VT12001("update information schema tables")) } var predicates []sqlparser.Expr @@ -370,7 +331,7 @@ func createQueryTableForDML(ctx *plancontext.PlanningContext, tableExpr sqlparse Table: tblName, Predicates: predicates, } - return tableInfo, qt, nil + return tableInfo, qt } func addColumnEquality(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { @@ -397,27 +358,18 @@ func createSelectionOp( selectExprs []sqlparser.SelectExpr, tableExprs sqlparser.TableExprs, where *sqlparser.Where, + orderBy sqlparser.OrderBy, limit *sqlparser.Limit, lock sqlparser.Lock, -) (ops.Operator, error) { +) Operator { selectionStmt := &sqlparser.Select{ SelectExprs: selectExprs, From: tableExprs, Where: where, + OrderBy: orderBy, Limit: limit, Lock: lock, } // There are no foreign keys to check for a select query, so we can pass anything for verifyAllFKs and fkToIgnore. return createOpFromStmt(ctx, selectionStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) } - -func selectParentColumns(fk vindexes.ChildFKInfo, lastOffset int) ([]int, []sqlparser.SelectExpr) { - var cols []int - var exprs []sqlparser.SelectExpr - for _, column := range fk.ParentColumns { - cols = append(cols, lastOffset) - exprs = append(exprs, aeWrap(sqlparser.NewColName(column.String()))) - lastOffset++ - } - return cols, exprs -} diff --git a/go/vt/vtgate/planbuilder/operators/comments.go b/go/vt/vtgate/planbuilder/operators/comments.go index 9ede4b9e0da..7e7749a61b5 100644 --- a/go/vt/vtgate/planbuilder/operators/comments.go +++ b/go/vt/vtgate/planbuilder/operators/comments.go @@ -21,32 +21,31 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // LockAndComment contains any comments or locking directives we want on all queries down from this operator type LockAndComment struct { - Source ops.Operator + Source Operator Comments *sqlparser.ParsedComments Lock sqlparser.Lock } -func (l *LockAndComment) Clone(inputs []ops.Operator) ops.Operator { +func (l *LockAndComment) Clone(inputs []Operator) Operator { klon := *l klon.Source = inputs[0] return &klon } -func (l *LockAndComment) Inputs() []ops.Operator { - return []ops.Operator{l.Source} +func (l *LockAndComment) Inputs() []Operator { + return []Operator{l.Source} } -func (l *LockAndComment) SetInputs(operators []ops.Operator) { +func (l *LockAndComment) SetInputs(operators []Operator) { l.Source = operators[0] } -func (l *LockAndComment) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (l *LockAndComment) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { l.Source = l.Source.AddPredicate(ctx, expr) return l } @@ -55,6 +54,10 @@ func (l *LockAndComment) AddColumn(ctx *plancontext.PlanningContext, reuseExisti return l.Source.AddColumn(ctx, reuseExisting, addToGroupBy, expr) } +func (l *LockAndComment) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + return l.Source.AddWSColumn(ctx, offset, underRoute) +} + func (l *LockAndComment) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { return l.Source.FindCol(ctx, expr, underRoute) } @@ -76,6 +79,6 @@ func (l *LockAndComment) ShortDescription() string { return strings.Join(s, " ") } -func (l *LockAndComment) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (l *LockAndComment) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return l.Source.GetOrdering(ctx) } diff --git a/go/vt/vtgate/planbuilder/operators/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go index f02444671c1..a3c45e79135 100644 --- a/go/vt/vtgate/planbuilder/operators/delete.go +++ b/go/vt/vtgate/planbuilder/operators/delete.go @@ -17,194 +17,372 @@ limitations under the License. package operators import ( - "fmt" - + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) type Delete struct { - QTable *QueryTable - VTable *vindexes.Table - OwnedVindexQuery string - AST *sqlparser.Delete + *DMLCommon - noInputs noColumns noPredicates } -// Introduces implements the PhysicalOperator interface -func (d *Delete) introducesTableID() semantics.TableSet { - return d.QTable.ID +// Clone implements the Operator interface +func (d *Delete) Clone(inputs []Operator) Operator { + newD := *d + newD.SetInputs(inputs) + return &newD } -// Clone implements the Operator interface -func (d *Delete) Clone([]ops.Operator) ops.Operator { - return &Delete{ - QTable: d.QTable, - VTable: d.VTable, - OwnedVindexQuery: d.OwnedVindexQuery, - AST: d.AST, - } +func (d *Delete) Inputs() []Operator { + return []Operator{d.Source} } -func (d *Delete) TablesUsed() []string { - if d.VTable != nil { - return SingleQualifiedIdentifier(d.VTable.Keyspace, d.VTable.Name) +func (d *Delete) SetInputs(inputs []Operator) { + if len(inputs) != 1 { + panic(vterrors.VT13001("unexpected number of inputs for Delete operator")) } - return nil + d.Source = inputs[0] } -func (d *Delete) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (d *Delete) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -func (d *Delete) ShortDescription() string { - return fmt.Sprintf("%s.%s %s", d.VTable.Keyspace.Name, d.VTable.Name.String(), sqlparser.String(d.AST.Where)) +func (d *Delete) TablesUsed() []string { + return SingleQualifiedIdentifier(d.Target.VTable.Keyspace, d.Target.VTable.Name) } -func (d *Delete) Statement() sqlparser.Statement { - return d.AST +func (d *Delete) ShortDescription() string { + return shortDesc(d.Target, d.OwnedVindexQuery) } -func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) - if err != nil { - return nil, err - } +func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (op Operator) { + childFks := ctx.SemTable.GetChildForeignKeysForTargets() - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "delete") - if err != nil { - return nil, err + // We check if delete with input plan is required. DML with input planning is generally + // slower, because it does a selection and then creates a delete statement wherein we have to + // list all the primary key values. + if deleteWithInputPlanningRequired(childFks, deleteStmt) { + return createDeleteWithInputOp(ctx, deleteStmt) } delClone := sqlparser.CloneRefOfDelete(deleteStmt) - // Create the delete operator first. - delOp, err := createDeleteOperator(ctx, deleteStmt, qt, vindexTable, routing) - if err != nil { - return nil, err - } + var vTbl *vindexes.Table + op, vTbl = createDeleteOperator(ctx, deleteStmt) if deleteStmt.Comments != nil { - delOp = &LockAndComment{ - Source: delOp, + op = &LockAndComment{ + Source: op, Comments: deleteStmt.Comments, } } - childFks := ctx.SemTable.GetChildForeignKeysList() - // If there are no foreign key constraints, then we don't need to do anything. + var err error + childFks, err = ctx.SemTable.GetChildForeignKeysForTable(deleteStmt.Targets[0]) + if err != nil { + panic(err) + } + // If there are no foreign key constraints, then we don't need to do anything special. if len(childFks) == 0 { - return delOp, nil + return op } - // If the delete statement has a limit, we don't support it yet. + + return createFkCascadeOpForDelete(ctx, op, delClone, childFks, vTbl) +} + +func deleteWithInputPlanningRequired(childFks []vindexes.ChildFKInfo, deleteStmt *sqlparser.Delete) bool { + if len(deleteStmt.Targets) > 1 { + return true + } + // If there are no foreign keys, we don't need to use delete with input. + if len(childFks) == 0 { + return false + } + // Limit requires delete with input. if deleteStmt.Limit != nil { - return nil, vterrors.VT12001("foreign keys management at vitess with limit") + return true } - - return createFkCascadeOpForDelete(ctx, delOp, delClone, childFks) + // If there are no limit clauses, and it is not a multi-delete, we don't need delete with input. + // TODO: In the future, we can check if the tables involved in the multi-table delete are related by foreign keys or not. + // If they aren't then we don't need the multi-table delete. But this check isn't so straight-forward. We need to check if the two + // tables are connected in the undirected graph built from the tables related by foreign keys. + return !deleteStmt.IsSingleAliasExpr() } -func createDeleteOperator( - ctx *plancontext.PlanningContext, - deleteStmt *sqlparser.Delete, - qt *QueryTable, - vindexTable *vindexes.Table, - routing Routing) (ops.Operator, error) { - del := &Delete{ - QTable: qt, - VTable: vindexTable, - AST: deleteStmt, +func createDeleteWithInputOp(ctx *plancontext.PlanningContext, del *sqlparser.Delete) (op Operator) { + delClone := ctx.SemTable.Clone(del).(*sqlparser.Delete) + del.Limit = nil + del.OrderBy = nil + + selectStmt := &sqlparser.Select{ + From: delClone.TableExprs, + Where: delClone.Where, + OrderBy: delClone.OrderBy, + Limit: delClone.Limit, + Lock: sqlparser.ForUpdateLock, } - route := &Route{ - Source: del, - Routing: routing, + + var delOps []dmlOp + for _, target := range ctx.SemTable.Targets.Constituents() { + op := createDeleteOpWithTarget(ctx, target, del.Ignore) + delOps = append(delOps, op) + } + + delOps = sortDmlOps(delOps) + + // now map the operator and column list. + var colsList [][]*sqlparser.ColName + dmls := slice.Map(delOps, func(from dmlOp) Operator { + colsList = append(colsList, from.cols) + for _, col := range from.cols { + selectStmt.SelectExprs = append(selectStmt.SelectExprs, aeWrap(col)) + } + return from.op + }) + + op = &DMLWithInput{ + DML: dmls, + Source: createOperatorFromSelect(ctx, selectStmt), + cols: colsList, } - if !vindexTable.Keyspace.Sharded { - return route, nil + if del.Comments != nil { + op = &LockAndComment{ + Source: op, + Comments: del.Comments, + } + } + return op +} + +// getFirstVindex returns the first Vindex, if available +func getFirstVindex(vTbl *vindexes.Table) vindexes.Vindex { + if len(vTbl.ColumnVindexes) > 0 { + return vTbl.ColumnVindexes[0].Vindex } + return nil +} - primaryVindex, vindexAndPredicates, err := getVindexInformation(qt.ID, vindexTable) +func createDeleteOpWithTarget(ctx *plancontext.PlanningContext, target semantics.TableSet, ignore sqlparser.Ignore) dmlOp { + ti, err := ctx.SemTable.TableInfoFor(target) if err != nil { - return nil, err + panic(vterrors.VT13001(err.Error())) } - tr, ok := routing.(*ShardedRouting) - if ok { - tr.VindexPreds = vindexAndPredicates + vTbl := ti.GetVindexTable() + if len(vTbl.PrimaryKey) == 0 { + panic(vterrors.VT09015()) + } + tblName, err := ti.Name() + if err != nil { + panic(err) } - var ovq string - if len(vindexTable.Owned) > 0 { - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: vindexTable.Name}, As: qt.Alias.As} - ovq = generateOwnedVindexQuery(tblExpr, deleteStmt, vindexTable, primaryVindex.Columns) + var leftComp sqlparser.ValTuple + cols := make([]*sqlparser.ColName, 0, len(vTbl.PrimaryKey)) + for _, col := range vTbl.PrimaryKey { + colName := sqlparser.NewColNameWithQualifier(col.String(), tblName) + cols = append(cols, colName) + leftComp = append(leftComp, colName) + ctx.SemTable.Recursive[colName] = target + } + // optimize for case when there is only single column on left hand side. + var lhs sqlparser.Expr = leftComp + if len(leftComp) == 1 { + lhs = leftComp[0] } + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, lhs, sqlparser.ListArg(engine.DmlVals), nil) - del.OwnedVindexQuery = ovq + del := &sqlparser.Delete{ + Ignore: ignore, + TableExprs: sqlparser.TableExprs{ti.GetAliasedTableExpr()}, + Targets: sqlparser.TableNames{tblName}, + Where: sqlparser.NewWhere(sqlparser.WhereClause, compExpr), + } + return dmlOp{ + op: createOperatorFromDelete(ctx, del), + vTbl: vTbl, + cols: cols, + } +} + +func createDeleteOperator(ctx *plancontext.PlanningContext, del *sqlparser.Delete) (Operator, *vindexes.Table) { + op := crossJoin(ctx, del.TableExprs) sqc := &SubQueryBuilder{} - for _, predicate := range qt.Predicates { - if subq, err := sqc.handleSubquery(ctx, predicate, qt.ID); err != nil { - return nil, err - } else if subq != nil { - continue + if del.Where != nil { + op = addWherePredsToSubQueryBuilder(ctx, del.Where.Expr, op, sqc) + } + + tblID, err := ctx.SemTable.GetTargetTableSetForTableName(del.Targets[0]) + if err != nil { + panic(err) + } + tblInfo, err := ctx.SemTable.TableInfoFor(tblID) + if err != nil { + panic(err) + } + + vTbl := tblInfo.GetVindexTable() + // Reference table should delete from the source table. + if vTbl.Type == vindexes.TypeReference && vTbl.Source != nil { + vTbl = updateQueryGraphWithSource(ctx, op, tblID, vTbl) + } + + name, err := tblInfo.Name() + if err != nil { + panic(err) + } + + targetTbl := TargetTable{ + ID: tblID, + VTable: vTbl, + Name: name, + } + + var ovq *sqlparser.Select + if vTbl.Keyspace.Sharded && vTbl.Type == vindexes.TypeTable { + primaryVindex := getVindexInformation(tblID, vTbl) + if len(vTbl.Owned) > 0 { + ovq = generateOwnedVindexQuery(del, targetTbl, primaryVindex.Columns) } - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err + } + + delOp := &Delete{ + DMLCommon: &DMLCommon{ + Ignore: del.Ignore, + Target: targetTbl, + OwnedVindexQuery: ovq, + Source: op, + }, + } + + if del.Limit != nil { + addOrdering(ctx, del.OrderBy, delOp) + delOp.Source = &Limit{ + Source: delOp.Source, + AST: del.Limit, } } - if routing.OpCode() == engine.Scatter && deleteStmt.Limit != nil { - // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard DELETE with LIMIT") + return sqc.getRootOperator(delOp, nil), vTbl +} + +func generateOwnedVindexQuery(del *sqlparser.Delete, table TargetTable, ksidCols []sqlparser.IdentifierCI) *sqlparser.Select { + var selExprs sqlparser.SelectExprs + for _, col := range ksidCols { + colName := makeColName(col, table, sqlparser.MultiTable(del.TableExprs)) + selExprs = append(selExprs, aeWrap(colName)) } + for _, cv := range table.VTable.Owned { + for _, col := range cv.Columns { + colName := makeColName(col, table, sqlparser.MultiTable(del.TableExprs)) + selExprs = append(selExprs, aeWrap(colName)) + } + } + return &sqlparser.Select{ + SelectExprs: selExprs, + OrderBy: del.OrderBy, + Limit: del.Limit, + Lock: sqlparser.ForUpdateLock, + } +} - return sqc.getRootOperator(route, nil), nil +func makeColName(col sqlparser.IdentifierCI, table TargetTable, isMultiTbl bool) *sqlparser.ColName { + if isMultiTbl { + return sqlparser.NewColNameWithQualifier(col.String(), table.Name) + } + return sqlparser.NewColName(col.String()) } -func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp ops.Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo) (ops.Operator, error) { +func addOrdering(ctx *plancontext.PlanningContext, orderBy sqlparser.OrderBy, op Operator) { + es := &expressionSet{} + ordering := &Ordering{} + ordering.SetInputs(op.Inputs()) + for _, order := range orderBy { + if sqlparser.IsNull(order.Expr) { + // ORDER BY null can safely be ignored + continue + } + if !es.add(ctx, order.Expr) { + continue + } + ordering.Order = append(ordering.Order, OrderBy{ + Inner: sqlparser.CloneRefOfOrder(order), + SimplifiedExpr: order.Expr, + }) + } + if len(ordering.Order) > 0 { + op.SetInputs([]Operator{ordering}) + } +} + +func updateQueryGraphWithSource(ctx *plancontext.PlanningContext, input Operator, tblID semantics.TableSet, vTbl *vindexes.Table) *vindexes.Table { + sourceTable, _, _, _, _, err := ctx.VSchema.FindTableOrVindex(vTbl.Source.TableName) + if err != nil { + panic(err) + } + vTbl = sourceTable + TopDown(input, TableID, func(op Operator, lhsTables semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { + qg, ok := op.(*QueryGraph) + if !ok { + return op, NoRewrite + } + if len(qg.Tables) > 1 { + panic(vterrors.VT12001("DELETE on reference table with join")) + } + for _, tbl := range qg.Tables { + if tbl.ID != tblID { + continue + } + tbl.Alias = sqlparser.NewAliasedTableExpr(sqlparser.NewTableName(vTbl.Name.String()), tbl.Alias.As.String()) + tbl.Table, _ = tbl.Alias.TableName() + } + return op, Rewrote("change query table point to source table") + }, func(operator Operator) VisitRule { + _, ok := operator.(*QueryGraph) + return VisitRule(ok) + }) + return vTbl +} + +func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo, deletedTbl *vindexes.Table) Operator { var fkChildren []*FkChild var selectExprs []sqlparser.SelectExpr + tblName := delStmt.Targets[0] for _, fk := range childFks { // Any RESTRICT type foreign keys that arrive here, // are cross-shard/cross-keyspace RESTRICT cases, which we don't currently support. if fk.OnDelete.IsRestrict() { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) } // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. - cols, exprs := selectParentColumns(fk, len(selectExprs)) - selectExprs = append(selectExprs, exprs...) + var offsets []int + offsets, selectExprs = addColumns(ctx, fk.ParentColumns, selectExprs, tblName) - fkChild, err := createFkChildForDelete(ctx, fk, cols) - if err != nil { - return nil, err - } - fkChildren = append(fkChildren, fkChild) - } - selectionOp, err := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, sqlparser.ForUpdateLock) - if err != nil { - return nil, err + fkChildren = append(fkChildren, + createFkChildForDelete(ctx, fk, offsets)) } + selectionOp := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, nil, getUpdateLock(deletedTbl)) return &FkCascade{ Selection: selectionOp, Children: fkChildren, Parent: parentOp, - }, nil + } } -func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, cols []int) (*FkChild, error) { +func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, cols []int) *FkChild { bvName := ctx.ReservedVars.ReserveVariable(foreignKeyConstraintValues) - + parsedComments := getParsedCommentsForFkChecks(ctx) var childStmt sqlparser.Statement switch fk.OnDelete { case sqlparser.Cascade: @@ -216,6 +394,7 @@ func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildF } compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) childStmt = &sqlparser.Delete{ + Comments: parsedComments, TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: compExpr}, } @@ -234,22 +413,20 @@ func createFkChildForDelete(ctx *plancontext.PlanningContext, fk vindexes.ChildF compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) childStmt = &sqlparser.Update{ Exprs: updExprs, + Comments: parsedComments, TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: compExpr}, } case sqlparser.SetDefault: - return nil, vterrors.VT09016() + panic(vterrors.VT09016()) } // For the child statement of a DELETE query, we don't need to verify all the FKs on VTgate or ignore any foreign key explicitly. - childOp, err := createOpFromStmt(ctx, childStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) - if err != nil { - return nil, err - } + childOp := createOpFromStmt(ctx, childStmt, false /* verifyAllFKs */, "" /* fkToIgnore */) return &FkChild{ BVName: bvName, Cols: cols, Op: childOp, - }, nil + } } diff --git a/go/vt/vtgate/planbuilder/operators/distinct.go b/go/vt/vtgate/planbuilder/operators/distinct.go index 88503514615..9c893a878cd 100644 --- a/go/vt/vtgate/planbuilder/operators/distinct.go +++ b/go/vt/vtgate/planbuilder/operators/distinct.go @@ -21,13 +21,12 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type ( Distinct struct { - Source ops.Operator + Source Operator QP *QueryProjection // When we go from AST to operator, we place DISTINCT ops in the required places in the op tree @@ -46,31 +45,27 @@ type ( } ) -func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) { +func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) Operator { columns := d.GetColumns(ctx) for idx, col := range columns { - e, err := d.QP.GetSimplifiedExpr(ctx, col.Expr) - if err != nil { - // ambiguous columns are not a problem for DISTINCT - e = col.Expr - } + e := col.Expr var wsCol *int - typ, _ := ctx.SemTable.TypeForExpr(e) - if ctx.SemTable.NeedsWeightString(e) { - offset := d.Source.AddColumn(ctx, true, false, aeWrap(weightStringFor(e))) + offset := d.Source.AddWSColumn(ctx, idx, false) wsCol = &offset } - + typ, _ := ctx.SemTable.TypeForExpr(e) d.Columns = append(d.Columns, engine.CheckCol{ - Col: idx, - WsCol: wsCol, - Type: typ, + Col: idx, + WsCol: wsCol, + Type: typ, + CollationEnv: ctx.VSchema.Environment().CollationEnv(), }) } + return nil } -func (d *Distinct) Clone(inputs []ops.Operator) ops.Operator { +func (d *Distinct) Clone(inputs []Operator) Operator { return &Distinct{ Required: d.Required, Source: inputs[0], @@ -81,15 +76,15 @@ func (d *Distinct) Clone(inputs []ops.Operator) ops.Operator { } } -func (d *Distinct) Inputs() []ops.Operator { - return []ops.Operator{d.Source} +func (d *Distinct) Inputs() []Operator { + return []Operator{d.Source} } -func (d *Distinct) SetInputs(operators []ops.Operator) { +func (d *Distinct) SetInputs(operators []Operator) { d.Source = operators[0] } -func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { d.Source = d.Source.AddPredicate(ctx, expr) return d } @@ -97,6 +92,9 @@ func (d *Distinct) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser func (d *Distinct) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) int { return d.Source.AddColumn(ctx, reuse, gb, expr) } +func (d *Distinct) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + return d.Source.AddWSColumn(ctx, offset, underRoute) +} func (d *Distinct) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { return d.Source.FindCol(ctx, expr, underRoute) @@ -117,7 +115,7 @@ func (d *Distinct) ShortDescription() string { return "Performance" } -func (d *Distinct) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (d *Distinct) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return d.Source.GetOrdering(ctx) } diff --git a/go/vt/vtgate/planbuilder/operators/dml_planning.go b/go/vt/vtgate/planbuilder/operators/dml_planning.go index 8f87a71c95f..866c308956c 100644 --- a/go/vt/vtgate/planbuilder/operators/dml_planning.go +++ b/go/vt/vtgate/planbuilder/operators/dml_planning.go @@ -18,142 +18,79 @@ package operators import ( "fmt" - - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "sort" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -// getVindexInformation returns the vindex and VindexPlusPredicates for the DML, -// If it cannot find a unique vindex match, it returns an error. -func getVindexInformation(id semantics.TableSet, table *vindexes.Table) ( - *vindexes.ColumnVindex, - []*VindexPlusPredicates, - error) { - - // Check that we have a primary vindex which is valid - if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() { - return nil, nil, vterrors.VT09001(table.Name) - } - primaryVindex := table.ColumnVindexes[0] - - var vindexesAndPredicates []*VindexPlusPredicates - for _, colVindex := range table.Ordered { - if lu, isLu := colVindex.Vindex.(vindexes.LookupBackfill); isLu && lu.IsBackfilling() { - // Checking if the Vindex is currently backfilling or not, if it isn't we can read from the vindex table, - // and we will be able to do a delete equal. Otherwise, we continue to look for next best vindex. - continue - } - - vindexesAndPredicates = append(vindexesAndPredicates, &VindexPlusPredicates{ - ColVindex: colVindex, - TableID: id, - }) - } - return primaryVindex, vindexesAndPredicates, nil +type DMLCommon struct { + Ignore sqlparser.Ignore + Target TargetTable + OwnedVindexQuery *sqlparser.Select + Source Operator } -func buildChangedVindexesValues(ctx *plancontext.PlanningContext, update *sqlparser.Update, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI, assignments []SetExpr) (vv map[string]*engine.VindexValues, ownedVindexQuery string, subQueriesArgOnChangedVindex []string, err error) { - changedVindexes := make(map[string]*engine.VindexValues) - buf, offset := initialQuery(ksidCols, table) - for i, vindex := range table.ColumnVindexes { - vindexValueMap := make(map[string]evalengine.Expr) - first := true - for _, vcol := range vindex.Columns { - // Searching in order of columns in colvindex. - found := false - for _, assignment := range assignments { - if !vcol.Equal(assignment.Name.Name) { - continue - } - if found { - return nil, "", nil, vterrors.VT03015(assignment.Name.Name) - } - found = true - pv, err := evalengine.Translate(assignment.Expr.EvalExpr, &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, - }) - if err != nil { - return nil, "", nil, invalidUpdateExpr(assignment.Name.Name.String(), assignment.Expr.EvalExpr) - } +type TargetTable struct { + ID semantics.TableSet + VTable *vindexes.Table + Name sqlparser.TableName +} - if assignment.Expr.Info != nil { - sqe, ok := assignment.Expr.Info.(SubQueryExpression) - if ok { - for _, sq := range sqe { - subQueriesArgOnChangedVindex = append(subQueriesArgOnChangedVindex, sq.ArgName) - } - } - } +// dmlOp stores intermediary value for Update/Delete Operator with the vindexes. Table for ordering. +type dmlOp struct { + op Operator + vTbl *vindexes.Table + cols []*sqlparser.ColName + updList updList +} - vindexValueMap[vcol.String()] = pv - if first { - buf.Myprintf(", %s", assignment.String()) - first = false - } else { - buf.Myprintf(" and %s", assignment.String()) - } - } - } - if len(vindexValueMap) == 0 { - // Vindex not changing, continue - continue +// sortDmlOps sort the operator based on sharding vindex type. +// Unsharded < Lookup Vindex < Any +// This is needed to ensure all the rows are deleted from unowned sharding tables first. +// Otherwise, those table rows will be missed from getting deleted as +// the owned table row won't have matching values. +func sortDmlOps(dmlOps []dmlOp) []dmlOp { + sort.Slice(dmlOps, func(i, j int) bool { + a, b := dmlOps[i], dmlOps[j] + // Get the first Vindex of a and b, if available + aVdx, bVdx := getFirstVindex(a.vTbl), getFirstVindex(b.vTbl) + + // Sort nil Vindexes to the start + if aVdx == nil || bVdx == nil { + return aVdx != nil // true if bVdx is nil and aVdx is not nil } - if update.Limit != nil && len(update.OrderBy) == 0 { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: %v", vindex.Name)) - } - if i == 0 { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name)) - } - if _, ok := vindex.Vindex.(vindexes.Lookup); !ok { - return nil, "", nil, vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name)) - } - changedVindexes[vindex.Name] = &engine.VindexValues{ - EvalExprMap: vindexValueMap, - Offset: offset, - } - offset++ - } - if len(changedVindexes) == 0 { - return nil, "", nil, nil - } - // generate rest of the owned vindex query. - aTblExpr, ok := update.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !ok { - return nil, "", nil, vterrors.VT12001("UPDATE on complex table expression") - } - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: table.Name}, As: aTblExpr.As} - buf.Myprintf(" from %v%v%v%v for update", tblExpr, update.Where, update.OrderBy, update.Limit) - return changedVindexes, buf.String(), subQueriesArgOnChangedVindex, nil + // Among non-nil Vindexes, those that need VCursor come first + return aVdx.NeedsVCursor() && !bVdx.NeedsVCursor() + }) + return dmlOps } -func initialQuery(ksidCols []sqlparser.IdentifierCI, table *vindexes.Table) (*sqlparser.TrackedBuffer, int) { - buf := sqlparser.NewTrackedBuffer(nil) - offset := 0 - for _, col := range ksidCols { - if offset == 0 { - buf.Myprintf("select %v", col) - } else { - buf.Myprintf(", %v", col) +func shortDesc(target TargetTable, ovq *sqlparser.Select) string { + ovqString := "" + if ovq != nil { + var cols, orderby, limit string + cols = fmt.Sprintf("COLUMNS: [%s]", sqlparser.String(ovq.SelectExprs)) + if len(ovq.OrderBy) > 0 { + orderby = fmt.Sprintf(" ORDERBY: [%s]", sqlparser.String(ovq.OrderBy)) } - offset++ - } - for _, cv := range table.Owned { - for _, column := range cv.Columns { - buf.Myprintf(", %v", column) - offset++ + if ovq.Limit != nil { + limit = fmt.Sprintf(" LIMIT: [%s]", sqlparser.String(ovq.Limit)) } + ovqString = fmt.Sprintf(" vindexQuery(%s%s%s)", cols, orderby, limit) } - return buf, offset + return fmt.Sprintf("%s.%s%s", target.VTable.Keyspace.Name, target.VTable.Name.String(), ovqString) } -func invalidUpdateExpr(upd string, expr sqlparser.Expr) error { - return vterrors.VT12001(fmt.Sprintf("only values are supported; invalid update on column: `%s` with expr: [%s]", upd, sqlparser.String(expr))) +// getVindexInformation returns the vindex and VindexPlusPredicates for the DML, +// If it cannot find a unique vindex match, it returns an error. +func getVindexInformation(id semantics.TableSet, table *vindexes.Table) *vindexes.ColumnVindex { + // Check that we have a primary vindex which is valid + if len(table.ColumnVindexes) == 0 || !table.ColumnVindexes[0].IsUnique() { + panic(vterrors.VT09001(table.Name)) + } + return table.ColumnVindexes[0] } diff --git a/go/vt/vtgate/planbuilder/operators/dml_with_input.go b/go/vt/vtgate/planbuilder/operators/dml_with_input.go new file mode 100644 index 00000000000..09859b90bac --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/dml_with_input.go @@ -0,0 +1,120 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +// DMLWithInput is used to represent a DML Operator taking input from a Source Operator +type DMLWithInput struct { + Source Operator + + DML []Operator + cols [][]*sqlparser.ColName + Offsets [][]int + + updList []updList + BvList []map[string]int + + noColumns + noPredicates +} + +func (d *DMLWithInput) Clone(inputs []Operator) Operator { + newD := *d + newD.SetInputs(inputs) + return &newD +} + +func (d *DMLWithInput) Inputs() []Operator { + return append([]Operator{d.Source}, d.DML...) +} + +func (d *DMLWithInput) SetInputs(inputs []Operator) { + if len(inputs) < 2 { + panic("unexpected number of inputs for DMLWithInput operator") + } + d.Source = inputs[0] + d.DML = inputs[1:] +} + +func (d *DMLWithInput) ShortDescription() string { + colStrings := "" + for idx, columns := range d.cols { + var offsets []int + if len(d.Offsets) > idx { + offsets = d.Offsets[idx] + } + colStrings += fmt.Sprintf("[%s]", getShortDesc(columns, offsets)) + } + return colStrings +} + +func getShortDesc(cols []*sqlparser.ColName, offsets []int) string { + colStrings := slice.Map(cols, func(from *sqlparser.ColName) string { + return sqlparser.String(from) + }) + out := "" + for idx, colString := range colStrings { + out += colString + if len(offsets) > idx { + out += fmt.Sprintf(":%d", offsets[idx]) + } + out += " " + } + return out +} + +func (d *DMLWithInput) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { + return nil +} + +func (d *DMLWithInput) planOffsets(ctx *plancontext.PlanningContext) Operator { + // go through the primary key columns to get offset from the input + offsets := make([][]int, len(d.cols)) + for idx, columns := range d.cols { + for _, col := range columns { + offset := d.Source.AddColumn(ctx, true, false, aeWrap(col)) + offsets[idx] = append(offsets[idx], offset) + } + } + d.Offsets = offsets + + // go through the update list and get offset for input columns + bvList := make([]map[string]int, len(d.updList)) + for idx, ul := range d.updList { + vars := make(map[string]int) + for _, updCol := range ul { + for _, bvExpr := range updCol.jc.LHSExprs { + offset := d.Source.AddColumn(ctx, true, false, aeWrap(bvExpr.Expr)) + vars[bvExpr.Name] = offset + } + } + if len(vars) > 0 { + bvList[idx] = vars + } + } + d.BvList = bvList + return d +} + +var _ Operator = (*DMLWithInput)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/expressions.go b/go/vt/vtgate/planbuilder/operators/expressions.go index 7ab27e787e8..a39ae96fa88 100644 --- a/go/vt/vtgate/planbuilder/operators/expressions.go +++ b/go/vt/vtgate/planbuilder/operators/expressions.go @@ -22,16 +22,16 @@ import ( "vitess.io/vitess/go/vt/vtgate/semantics" ) -// BreakExpressionInLHSandRHS takes an expression and +// breakExpressionInLHSandRHS takes an expression and // extracts the parts that are coming from one of the sides into `ColName`s that are needed -func BreakExpressionInLHSandRHS( +func breakExpressionInLHSandRHS( ctx *plancontext.PlanningContext, expr sqlparser.Expr, lhs semantics.TableSet, -) (col JoinColumn, err error) { +) (col applyJoinColumn) { rewrittenExpr := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { nodeExpr, ok := cursor.Node().(sqlparser.Expr) - if !ok || !fetchByOffset(nodeExpr) { + if !ok || !mustFetchFromInput(ctx, nodeExpr) { return } deps := ctx.SemTable.RecursiveDeps(nodeExpr) @@ -51,10 +51,7 @@ func BreakExpressionInLHSandRHS( cursor.Replace(arg) }, nil).(sqlparser.Expr) - if err != nil { - return JoinColumn{}, err - } - ctx.JoinPredicates[expr] = append(ctx.JoinPredicates[expr], rewrittenExpr) col.RHSExpr = rewrittenExpr + col.Original = expr return } diff --git a/go/vt/vtgate/planbuilder/operators/filter.go b/go/vt/vtgate/planbuilder/operators/filter.go index ed43910b75d..babc309db72 100644 --- a/go/vt/vtgate/planbuilder/operators/filter.go +++ b/go/vt/vtgate/planbuilder/operators/filter.go @@ -24,14 +24,12 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type Filter struct { - Source ops.Operator + Source Operator Predicates []sqlparser.Expr // PredicateWithOffsets is the evalengine expression that will finally be used. @@ -41,14 +39,18 @@ type Filter struct { Truncate int } -func newFilter(op ops.Operator, expr sqlparser.Expr) ops.Operator { +func newFilterSinglePredicate(op Operator, expr sqlparser.Expr) Operator { + return newFilter(op, expr) +} + +func newFilter(op Operator, expr ...sqlparser.Expr) Operator { return &Filter{ - Source: op, Predicates: []sqlparser.Expr{expr}, + Source: op, Predicates: expr, } } // Clone implements the Operator interface -func (f *Filter) Clone(inputs []ops.Operator) ops.Operator { +func (f *Filter) Clone(inputs []Operator) Operator { return &Filter{ Source: inputs[0], Predicates: slices.Clone(f.Predicates), @@ -58,12 +60,12 @@ func (f *Filter) Clone(inputs []ops.Operator) ops.Operator { } // Inputs implements the Operator interface -func (f *Filter) Inputs() []ops.Operator { - return []ops.Operator{f.Source} +func (f *Filter) Inputs() []Operator { + return []Operator{f.Source} } // SetInputs implements the Operator interface -func (f *Filter) SetInputs(ops []ops.Operator) { +func (f *Filter) SetInputs(ops []Operator) { f.Source = ops[0] } @@ -80,7 +82,7 @@ func (f *Filter) UnsolvedPredicates(st *semantics.SemTable) []sqlparser.Expr { return result } -func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (f *Filter) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { f.Source = f.Source.AddPredicate(ctx, expr) return f } @@ -93,6 +95,10 @@ func (f *Filter) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, return f.Source.FindCol(ctx, expr, underRoute) } +func (f *Filter) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + return f.Source.AddWSColumn(ctx, offset, underRoute) +} + func (f *Filter) GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr { return f.Source.GetColumns(ctx) } @@ -101,28 +107,29 @@ func (f *Filter) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return f.Source.GetSelectExprs(ctx) } -func (f *Filter) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (f *Filter) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return f.Source.GetOrdering(ctx) } -func (f *Filter) Compact(*plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { +func (f *Filter) Compact(*plancontext.PlanningContext) (Operator, *ApplyResult) { if len(f.Predicates) == 0 { - return f.Source, rewrite.NewTree("filter with no predicates removed", f), nil + return f.Source, Rewrote("filter with no predicates removed") } other, isFilter := f.Source.(*Filter) if !isFilter { - return f, rewrite.SameTree, nil + return f, NoRewrite } f.Source = other.Source f.Predicates = append(f.Predicates, other.Predicates...) - return f, rewrite.NewTree("two filters merged into one", f), nil + return f, Rewrote("two filters merged into one") } -func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) { +func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) Operator { cfg := &evalengine.Config{ ResolveType: ctx.SemTable.TypeForExpr, Collation: ctx.SemTable.Collation, + Environment: ctx.VSchema.Environment(), } predicate := sqlparser.AndExpressions(f.Predicates...) @@ -136,6 +143,7 @@ func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) { } f.PredicateWithOffsets = eexpr + return nil } func (f *Filter) ShortDescription() string { diff --git a/go/vt/vtgate/planbuilder/operators/fk_cascade.go b/go/vt/vtgate/planbuilder/operators/fk_cascade.go index 90c797d55e8..f24b59ca5ab 100644 --- a/go/vt/vtgate/planbuilder/operators/fk_cascade.go +++ b/go/vt/vtgate/planbuilder/operators/fk_cascade.go @@ -19,15 +19,16 @@ package operators import ( "slices" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // FkChild is used to represent a foreign key child table operation type FkChild struct { - BVName string - Cols []int // indexes - Op ops.Operator + BVName string + Cols []int // indexes + NonLiteralInfo []engine.NonLiteralUpdateInfo + Op Operator noColumns noPredicates @@ -37,19 +38,19 @@ type FkChild struct { // as an operator. This operator is created for DML queries that require // cascades (for example, ON DELETE CASCADE). type FkCascade struct { - Selection ops.Operator + Selection Operator Children []*FkChild - Parent ops.Operator + Parent Operator noColumns noPredicates } -var _ ops.Operator = (*FkCascade)(nil) +var _ Operator = (*FkCascade)(nil) // Inputs implements the Operator interface -func (fkc *FkCascade) Inputs() []ops.Operator { - var inputs []ops.Operator +func (fkc *FkCascade) Inputs() []Operator { + var inputs []Operator inputs = append(inputs, fkc.Parent) inputs = append(inputs, fkc.Selection) for _, child := range fkc.Children { @@ -59,7 +60,7 @@ func (fkc *FkCascade) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (fkc *FkCascade) SetInputs(operators []ops.Operator) { +func (fkc *FkCascade) SetInputs(operators []Operator) { if len(operators) < 2 { panic("incorrect count of inputs for FkCascade") } @@ -74,7 +75,7 @@ func (fkc *FkCascade) SetInputs(operators []ops.Operator) { } // Clone implements the Operator interface -func (fkc *FkCascade) Clone(inputs []ops.Operator) ops.Operator { +func (fkc *FkCascade) Clone(inputs []Operator) Operator { if len(inputs) < 2 { panic("incorrect count of inputs for FkCascade") } @@ -88,16 +89,17 @@ func (fkc *FkCascade) Clone(inputs []ops.Operator) ops.Operator { } newFkc.Children = append(newFkc.Children, &FkChild{ - BVName: fkc.Children[idx-2].BVName, - Cols: slices.Clone(fkc.Children[idx-2].Cols), - Op: operator, + BVName: fkc.Children[idx-2].BVName, + Cols: slices.Clone(fkc.Children[idx-2].Cols), + NonLiteralInfo: slices.Clone(fkc.Children[idx-2].NonLiteralInfo), + Op: operator, }) } return newFkc } // GetOrdering implements the Operator interface -func (fkc *FkCascade) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (fkc *FkCascade) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/fk_verify.go b/go/vt/vtgate/planbuilder/operators/fk_verify.go index 39e1092c8d9..8275a8d462f 100644 --- a/go/vt/vtgate/planbuilder/operators/fk_verify.go +++ b/go/vt/vtgate/planbuilder/operators/fk_verify.go @@ -17,14 +17,13 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // VerifyOp keeps the information about the foreign key verification operation. // It is a Parent verification or a Child verification. type VerifyOp struct { - Op ops.Operator + Op Operator Typ string } @@ -33,17 +32,17 @@ type VerifyOp struct { // verifications on the existence of the rows in the parent table (for example, INSERT and UPDATE). type FkVerify struct { Verify []*VerifyOp - Input ops.Operator + Input Operator noColumns noPredicates } -var _ ops.Operator = (*FkVerify)(nil) +var _ Operator = (*FkVerify)(nil) // Inputs implements the Operator interface -func (fkv *FkVerify) Inputs() []ops.Operator { - inputs := []ops.Operator{fkv.Input} +func (fkv *FkVerify) Inputs() []Operator { + inputs := []Operator{fkv.Input} for _, v := range fkv.Verify { inputs = append(inputs, v.Op) } @@ -51,7 +50,7 @@ func (fkv *FkVerify) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (fkv *FkVerify) SetInputs(operators []ops.Operator) { +func (fkv *FkVerify) SetInputs(operators []Operator) { fkv.Input = operators[0] if len(fkv.Verify) != len(operators)-1 { panic("mismatched number of verify inputs") @@ -62,7 +61,7 @@ func (fkv *FkVerify) SetInputs(operators []ops.Operator) { } // Clone implements the Operator interface -func (fkv *FkVerify) Clone(inputs []ops.Operator) ops.Operator { +func (fkv *FkVerify) Clone(inputs []Operator) Operator { newFkv := &FkVerify{ Verify: fkv.Verify, } @@ -71,7 +70,7 @@ func (fkv *FkVerify) Clone(inputs []ops.Operator) ops.Operator { } // GetOrdering implements the Operator interface -func (fkv *FkVerify) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (fkv *FkVerify) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } diff --git a/go/vt/vtgate/planbuilder/operators/fuzz.go b/go/vt/vtgate/planbuilder/operators/fuzz.go index 6ee6b0bab83..c92810e3ae8 100644 --- a/go/vt/vtgate/planbuilder/operators/fuzz.go +++ b/go/vt/vtgate/planbuilder/operators/fuzz.go @@ -30,7 +30,7 @@ func FuzzAnalyse(data []byte) int { if err != nil { return 0 } - tree, err := sqlparser.Parse(query) + tree, err := sqlparser.NewTestParser().Parse(query) if err != nil { return -1 } diff --git a/go/vt/vtgate/planbuilder/operators/hash_join.go b/go/vt/vtgate/planbuilder/operators/hash_join.go new file mode 100644 index 00000000000..d2ba6522691 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/hash_join.go @@ -0,0 +1,494 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + "slices" + "strings" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type ( + HashJoin struct { + LHS, RHS Operator + + // LeftJoin will be true in the case of an outer join + LeftJoin bool + + // Before offset planning + JoinComparisons []Comparison + + // These columns are the output columns of the hash join. While in operator mode we keep track of complex expression, + // but once we move to the engine primitives, the hash join only passes through column from either left or right. + // anything more complex will be solved by a projection on top of the hash join + columns *hashJoinColumns + + // After offset planning + + // Columns stores the column indexes of the columns coming from the left and right side + // negative value comes from LHS and positive from RHS + ColumnOffsets []int + + // These are the values that will be hashed together + LHSKeys, RHSKeys []int + + offset bool + } + + Comparison struct { + LHS, RHS sqlparser.Expr + } + + hashJoinColumn struct { + side joinSide + expr sqlparser.Expr + } + + joinSide int +) + +const ( + Unknown joinSide = iota + Left + Right +) + +var _ Operator = (*HashJoin)(nil) +var _ JoinOp = (*HashJoin)(nil) + +func NewHashJoin(lhs, rhs Operator, outerJoin bool) *HashJoin { + hj := &HashJoin{ + LHS: lhs, + RHS: rhs, + LeftJoin: outerJoin, + columns: &hashJoinColumns{}, + } + return hj +} + +func (hj *HashJoin) Clone(inputs []Operator) Operator { + kopy := *hj + kopy.LHS, kopy.RHS = inputs[0], inputs[1] + kopy.columns = hj.columns.clone() + kopy.LHSKeys = slices.Clone(hj.LHSKeys) + kopy.RHSKeys = slices.Clone(hj.RHSKeys) + kopy.JoinComparisons = slices.Clone(hj.JoinComparisons) + return &kopy +} + +func (hj *HashJoin) Inputs() []Operator { + return []Operator{hj.LHS, hj.RHS} +} + +func (hj *HashJoin) SetInputs(operators []Operator) { + hj.LHS, hj.RHS = operators[0], operators[1] +} + +func (hj *HashJoin) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { + return AddPredicate(ctx, hj, expr, false, newFilterSinglePredicate) +} + +func (hj *HashJoin) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) int { + if reuseExisting { + offset := hj.FindCol(ctx, expr.Expr, false) + if offset >= 0 { + return offset + } + } + + hj.columns.add(expr.Expr) + return len(hj.columns.columns) - 1 +} + +func (hj *HashJoin) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + hj.planOffsets(ctx) + + if len(hj.ColumnOffsets) <= offset { + panic(vterrors.VT13001("offset out of range")) + } + + // check if it already exists + wsExpr := weightStringFor(hj.columns.columns[offset].expr) + if index := hj.FindCol(ctx, wsExpr, false); index != -1 { + return index + } + + i := hj.ColumnOffsets[offset] + out := 0 + if i < 0 { + out = hj.LHS.AddWSColumn(ctx, FromLeftOffset(i), underRoute) + out = ToLeftOffset(out) + } else { + out = hj.RHS.AddWSColumn(ctx, FromRightOffset(i), underRoute) + out = ToRightOffset(out) + } + hj.ColumnOffsets = append(hj.ColumnOffsets, out) + return len(hj.ColumnOffsets) - 1 +} + +func (hj *HashJoin) planOffsets(ctx *plancontext.PlanningContext) Operator { + if hj.offset { + return nil + } + hj.offset = true + for _, cmp := range hj.JoinComparisons { + lOffset := hj.LHS.AddColumn(ctx, true, false, aeWrap(cmp.LHS)) + hj.LHSKeys = append(hj.LHSKeys, lOffset) + rOffset := hj.RHS.AddColumn(ctx, true, false, aeWrap(cmp.RHS)) + hj.RHSKeys = append(hj.RHSKeys, rOffset) + } + + needsProj := false + lID := TableID(hj.LHS) + rID := TableID(hj.RHS) + eexprs := slice.Map(hj.columns.columns, func(in hashJoinColumn) *ProjExpr { + var column *ProjExpr + var pureOffset bool + + switch in.side { + case Unknown: + column, pureOffset = hj.addColumn(ctx, in.expr) + case Left: + column, pureOffset = hj.addSingleSidedColumn(ctx, in.expr, lID, hj.LHS, lhsOffset) + case Right: + column, pureOffset = hj.addSingleSidedColumn(ctx, in.expr, rID, hj.RHS, rhsOffset) + default: + panic("not expected") + } + if !pureOffset { + needsProj = true + } + return column + }) + + if !needsProj { + return nil + } + proj := newAliasedProjection(hj) + proj.addProjExpr(eexprs...) + return proj +} + +func (hj *HashJoin) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) int { + for offset, col := range hj.columns.columns { + if ctx.SemTable.EqualsExprWithDeps(expr, col.expr) { + return offset + } + } + return -1 +} + +func (hj *HashJoin) GetColumns(*plancontext.PlanningContext) []*sqlparser.AliasedExpr { + return slice.Map(hj.columns.columns, func(from hashJoinColumn) *sqlparser.AliasedExpr { + return aeWrap(from.expr) + }) +} + +func (hj *HashJoin) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs { + return transformColumnsToSelectExprs(ctx, hj) +} + +func (hj *HashJoin) ShortDescription() string { + comparisons := slice.Map(hj.JoinComparisons, func(from Comparison) string { + return from.String() + }) + cmp := strings.Join(comparisons, " AND ") + + if len(hj.columns.columns) > 0 { + cols := slice.Map(hj.columns.columns, func(from hashJoinColumn) (result string) { + switch from.side { + case Unknown: + result = "U" + case Left: + result = "L" + case Right: + result = "R" + } + result += fmt.Sprintf("(%s)", sqlparser.String(from.expr)) + return + }) + return fmt.Sprintf("%s columns [%v]", cmp, strings.Join(cols, ", ")) + } + + return cmp +} + +func (hj *HashJoin) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { + return nil // hash joins will never promise an output order +} + +func (hj *HashJoin) GetLHS() Operator { + return hj.LHS +} + +func (hj *HashJoin) GetRHS() Operator { + return hj.RHS +} + +func (hj *HashJoin) SetLHS(op Operator) { + hj.LHS = op +} + +func (hj *HashJoin) SetRHS(op Operator) { + hj.RHS = op +} + +func (hj *HashJoin) MakeInner() { + hj.LeftJoin = false +} + +func (hj *HashJoin) IsInner() bool { + return !hj.LeftJoin +} + +func (hj *HashJoin) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { + cmp, ok := expr.(*sqlparser.ComparisonExpr) + if !ok || !canBeSolvedWithHashJoin(cmp.Operator) { + panic(vterrors.VT12001(fmt.Sprintf("can't use [%s] with hash joins", sqlparser.String(expr)))) + } + lExpr := cmp.Left + lDeps := ctx.SemTable.RecursiveDeps(lExpr) + rExpr := cmp.Right + rDeps := ctx.SemTable.RecursiveDeps(rExpr) + lID := TableID(hj.LHS) + rID := TableID(hj.RHS) + if !lDeps.IsSolvedBy(lID) || !rDeps.IsSolvedBy(rID) { + // we'll switch and see if things work out then + lExpr, rExpr = rExpr, lExpr + lDeps, rDeps = rDeps, lDeps + } + + if !lDeps.IsSolvedBy(lID) || !rDeps.IsSolvedBy(rID) { + panic(vterrors.VT12001(fmt.Sprintf("can't use [%s] with hash joins", sqlparser.String(expr)))) + } + + hj.JoinComparisons = append(hj.JoinComparisons, Comparison{ + LHS: lExpr, + RHS: rExpr, + }) +} + +func canBeSolvedWithHashJoin(op sqlparser.ComparisonExprOperator) bool { + switch op { + case sqlparser.EqualOp, sqlparser.NullSafeEqualOp: + return true + default: + return false + } +} + +func (c Comparison) String() string { + return sqlparser.String(c.LHS) + " = " + sqlparser.String(c.RHS) +} +func lhsOffset(i int) int { return (i * -1) - 1 } +func rhsOffset(i int) int { return i + 1 } +func (hj *HashJoin) addColumn(ctx *plancontext.PlanningContext, in sqlparser.Expr) (*ProjExpr, bool) { + lId, rId := TableID(hj.LHS), TableID(hj.RHS) + r := new(replacer) // this is the expression we will put in instead of whatever we find there + pre := func(node, parent sqlparser.SQLNode) bool { + expr, ok := node.(sqlparser.Expr) + if !ok { + return true + } + deps := ctx.SemTable.RecursiveDeps(expr) + check := func(id semantics.TableSet, op Operator, offsetter func(int) int) int { + if !deps.IsSolvedBy(id) { + return -1 + } + inOffset := op.FindCol(ctx, expr, false) + if inOffset == -1 { + if !mustFetchFromInput(ctx, expr) { + return -1 + } + + // aha! this is an expression that we have to get from the input. let's force it in there + inOffset = op.AddColumn(ctx, false, false, aeWrap(expr)) + } + + // we turn the + internalOffset := offsetter(inOffset) + + // ok, we have an offset from the input operator. Let's check if we already have it + // in our list of incoming columns + + for idx, offset := range hj.ColumnOffsets { + if internalOffset == offset { + return idx + } + } + + hj.ColumnOffsets = append(hj.ColumnOffsets, internalOffset) + + return len(hj.ColumnOffsets) - 1 + } + + if lOffset := check(lId, hj.LHS, lhsOffset); lOffset >= 0 { + r.replaceExpr = sqlparser.NewOffset(lOffset, expr) + return false // we want to stop going down the expression tree and start coming back up again + } + + if rOffset := check(rId, hj.RHS, rhsOffset); rOffset >= 0 { + r.replaceExpr = sqlparser.NewOffset(rOffset, expr) + return false + } + + return true + } + + rewrittenExpr := sqlparser.CopyOnRewrite(in, pre, r.post, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr) + cfg := &evalengine.Config{ + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + Environment: ctx.VSchema.Environment(), + } + eexpr, err := evalengine.Translate(rewrittenExpr, cfg) + if err != nil { + panic(err) + } + + _, isPureOffset := rewrittenExpr.(*sqlparser.Offset) + + return &ProjExpr{ + Original: aeWrap(in), + EvalExpr: rewrittenExpr, + ColExpr: rewrittenExpr, + Info: &EvalEngine{EExpr: eexpr}, + }, isPureOffset +} + +// JoinPredicate produces an AST representation of the join condition this join has +func (hj *HashJoin) JoinPredicate() sqlparser.Expr { + exprs := slice.Map(hj.JoinComparisons, func(from Comparison) sqlparser.Expr { + return &sqlparser.ComparisonExpr{ + Left: from.LHS, + Right: from.RHS, + } + }) + return sqlparser.AndExpressions(exprs...) +} + +type replacer struct { + replaceExpr sqlparser.Expr +} + +func (r *replacer) post(cursor *sqlparser.CopyOnWriteCursor) { + if r.replaceExpr != nil { + node := cursor.Node() + _, ok := node.(sqlparser.Expr) + if !ok { + panic(fmt.Sprintf("can't replace this node with an expression: %s", sqlparser.String(node))) + } + cursor.Replace(r.replaceExpr) + r.replaceExpr = nil + } +} + +func (hj *HashJoin) addSingleSidedColumn( + ctx *plancontext.PlanningContext, + in sqlparser.Expr, + tableID semantics.TableSet, + op Operator, + offsetter func(int) int, +) (*ProjExpr, bool) { + r := new(replacer) + pre := func(node, parent sqlparser.SQLNode) bool { + expr, ok := node.(sqlparser.Expr) + if !ok { + return true + } + deps := ctx.SemTable.RecursiveDeps(expr) + check := func(op Operator) int { + if !deps.IsSolvedBy(tableID) { + return -1 + } + inOffset := op.FindCol(ctx, expr, false) + if inOffset == -1 { + if !mustFetchFromInput(ctx, expr) { + return -1 + } + + // aha! this is an expression that we have to get from the input. let's force it in there + inOffset = op.AddColumn(ctx, false, false, aeWrap(expr)) + } + + // we have to turn the incoming offset to an outgoing offset of the columns this operator is exposing + internalOffset := offsetter(inOffset) + + // ok, we have an offset from the input operator. Let's check if we already have it + // in our list of incoming columns + for idx, offset := range hj.ColumnOffsets { + if internalOffset == offset { + return idx + } + } + + hj.ColumnOffsets = append(hj.ColumnOffsets, internalOffset) + + return len(hj.ColumnOffsets) - 1 + } + + if offset := check(op); offset >= 0 { + r.replaceExpr = sqlparser.NewOffset(offset, expr) + return false // we want to stop going down the expression tree and start coming back up again + } + + return true + } + + rewrittenExpr := sqlparser.CopyOnRewrite(in, pre, r.post, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr) + cfg := &evalengine.Config{ + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + Environment: ctx.VSchema.Environment(), + } + eexpr, err := evalengine.Translate(rewrittenExpr, cfg) + if err != nil { + panic(err) + } + + _, isPureOffset := rewrittenExpr.(*sqlparser.Offset) + + return &ProjExpr{ + Original: aeWrap(in), + EvalExpr: rewrittenExpr, + ColExpr: rewrittenExpr, + Info: &EvalEngine{EExpr: eexpr}, + }, isPureOffset +} + +func FromLeftOffset(i int) int { + return -i - 1 +} + +func ToLeftOffset(i int) int { + return -i - 1 +} + +func FromRightOffset(i int) int { + return i - 1 +} + +func ToRightOffset(i int) int { + return i + 1 +} diff --git a/go/vt/vtgate/planbuilder/operators/hash_join_test.go b/go/vt/vtgate/planbuilder/operators/hash_join_test.go new file mode 100644 index 00000000000..2bf1d08d2b6 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/hash_join_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/vschemawrapper" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func TestJoinPredicates(t *testing.T) { + lcol := sqlparser.NewColName("lhs") + rcol := sqlparser.NewColName("rhs") + ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()} + lid := semantics.SingleTableSet(0) + rid := semantics.SingleTableSet(1) + ctx.SemTable.Recursive[lcol] = lid + ctx.SemTable.Recursive[rcol] = rid + lhs := &fakeOp{id: lid} + rhs := &fakeOp{id: rid} + hj := &HashJoin{ + LHS: lhs, + RHS: rhs, + LeftJoin: false, + columns: &hashJoinColumns{}, + } + + cmp := &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: lcol, + Right: rcol, + } + hj.AddJoinPredicate(ctx, cmp) + require.Len(t, hj.JoinComparisons, 1) + hj.planOffsets(ctx) + require.Len(t, hj.LHSKeys, 1) + require.Len(t, hj.RHSKeys, 1) +} + +func TestOffsetPlanning(t *testing.T) { + lcol1, lcol2 := sqlparser.NewColName("lhs1"), sqlparser.NewColName("lhs2") + rcol1, rcol2 := sqlparser.NewColName("rhs1"), sqlparser.NewColName("rhs2") + ctx := &plancontext.PlanningContext{ + SemTable: semantics.EmptySemTable(), + VSchema: &vschemawrapper.VSchemaWrapper{ + V: &vindexes.VSchema{}, + SysVarEnabled: true, + Env: vtenv.NewTestEnv(), + }, + } + lid := semantics.SingleTableSet(0) + rid := semantics.SingleTableSet(1) + ctx.SemTable.Recursive[lcol1] = lid + ctx.SemTable.Recursive[lcol2] = lid + ctx.SemTable.Recursive[rcol1] = rid + ctx.SemTable.Recursive[rcol2] = rid + lhs := &fakeOp{id: lid} + rhs := &fakeOp{id: rid} + + tests := []struct { + expr sqlparser.Expr + expectedColOffsets []int + }{{ + expr: lcol1, + expectedColOffsets: []int{-1}, + }, { + expr: rcol1, + expectedColOffsets: []int{1}, + }, { + expr: sqlparser.AndExpressions(lcol1, lcol2), + expectedColOffsets: []int{-1, -2}, + }, { + expr: sqlparser.AndExpressions(lcol1, rcol1, lcol2, rcol2), + expectedColOffsets: []int{-1, 1, -2, 2}, + }} + + for _, test := range tests { + t.Run(sqlparser.String(test.expr), func(t *testing.T) { + hj := &HashJoin{ + LHS: lhs, + RHS: rhs, + LeftJoin: false, + columns: &hashJoinColumns{}, + } + hj.AddColumn(ctx, true, false, aeWrap(test.expr)) + hj.planOffsets(ctx) + assert.Equal(t, test.expectedColOffsets, hj.ColumnOffsets) + }) + } +} diff --git a/go/vt/vtgate/planbuilder/operators/helpers.go b/go/vt/vtgate/planbuilder/operators/helpers.go index 21be634d7d8..0049a919e2a 100644 --- a/go/vt/vtgate/planbuilder/operators/helpers.go +++ b/go/vt/vtgate/planbuilder/operators/helpers.go @@ -21,46 +21,44 @@ import ( "sort" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) // compact will optimise the operator tree into a smaller but equivalent version -func compact(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func compact(ctx *plancontext.PlanningContext, op Operator) Operator { type compactable interface { // Compact implement this interface for operators that have easy to see optimisations - Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) + Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) } - newOp, err := rewrite.BottomUp(op, TableID, func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { + newOp := BottomUp(op, TableID, func(op Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { newOp, ok := op.(compactable) if !ok { - return op, rewrite.SameTree, nil + return op, NoRewrite } return newOp.Compact(ctx) }, stopAtRoute) - return newOp, err + return newOp } -func checkValid(op ops.Operator) error { +func checkValid(op Operator) { type checkable interface { - CheckValid() error + CheckValid() } - return rewrite.Visit(op, func(this ops.Operator) error { + _ = Visit(op, func(this Operator) error { if chk, ok := this.(checkable); ok { - return chk.CheckValid() + chk.CheckValid() } return nil }) } -func Clone(op ops.Operator) ops.Operator { +func Clone(op Operator) Operator { inputs := op.Inputs() - clones := make([]ops.Operator, len(inputs)) + clones := make([]Operator, len(inputs)) for i, input := range inputs { clones[i] = Clone(input) } @@ -72,8 +70,8 @@ type tableIDIntroducer interface { introducesTableID() semantics.TableSet } -func TableID(op ops.Operator) (result semantics.TableSet) { - _ = rewrite.Visit(op, func(this ops.Operator) error { +func TableID(op Operator) (result semantics.TableSet) { + _ = Visit(op, func(this Operator) error { if tbl, ok := this.(tableIDIntroducer); ok { result = result.Merge(tbl.introducesTableID()) } @@ -87,9 +85,9 @@ type TableUser interface { TablesUsed() []string } -func TablesUsed(op ops.Operator) []string { +func TablesUsed(op Operator) []string { addString, collect := collectSortedUniqueStrings() - _ = rewrite.Visit(op, func(this ops.Operator) error { + _ = Visit(op, func(this Operator) error { if tbl, ok := this.(TableUser); ok { for _, u := range tbl.TablesUsed() { addString(u) @@ -100,29 +98,7 @@ func TablesUsed(op ops.Operator) []string { return collect() } -func UnresolvedPredicates(op ops.Operator, st *semantics.SemTable) (result []sqlparser.Expr) { - type unresolved interface { - // UnsolvedPredicates returns any predicates that have dependencies on the given Operator and - // on the outside of it (a parent Select expression, any other table not used by Operator, etc.). - // This is used for sub-queries. An example query could be: - // SELECT * FROM tbl WHERE EXISTS (SELECT 1 FROM otherTbl WHERE tbl.col = otherTbl.col) - // The subquery would have one unsolved predicate: `tbl.col = otherTbl.col` - // It's a predicate that belongs to the inner query, but it needs data from the outer query - // These predicates dictate which data we have to send from the outer side to the inner - UnsolvedPredicates(semTable *semantics.SemTable) []sqlparser.Expr - } - - _ = rewrite.Visit(op, func(this ops.Operator) error { - if tbl, ok := this.(unresolved); ok { - result = append(result, tbl.UnsolvedPredicates(st)...) - } - - return nil - }) - return -} - -func CostOf(op ops.Operator) (cost int) { +func CostOf(op Operator) (cost int) { type costly interface { // Cost returns the cost for this operator. All the costly operators in the tree are summed together to get the // total cost of the operator tree. @@ -131,7 +107,7 @@ func CostOf(op ops.Operator) (cost int) { Cost() int } - _ = rewrite.Visit(op, func(op ops.Operator) error { + _ = Visit(op, func(op Operator) error { if costlyOp, ok := op.(costly); ok { cost += costlyOp.Cost() } diff --git a/go/vt/vtgate/planbuilder/operators/horizon.go b/go/vt/vtgate/planbuilder/operators/horizon.go index 919767d550f..532441d6a34 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon.go +++ b/go/vt/vtgate/planbuilder/operators/horizon.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -35,7 +34,7 @@ import ( // Project/Aggregate/Sort/Limit operations, some which can be pushed down, // and some that have to be evaluated at the vtgate level. type Horizon struct { - Source ops.Operator + Source Operator // If this is a derived table, the two following fields will contain the tableID and name of it TableId *semantics.TableSet @@ -52,12 +51,12 @@ type Horizon struct { ColumnsOffset []int } -func newHorizon(src ops.Operator, query sqlparser.SelectStatement) *Horizon { +func newHorizon(src Operator, query sqlparser.SelectStatement) *Horizon { return &Horizon{Source: src, Query: query} } // Clone implements the Operator interface -func (h *Horizon) Clone(inputs []ops.Operator) ops.Operator { +func (h *Horizon) Clone(inputs []Operator) Operator { klone := *h klone.Source = inputs[0] klone.ColumnAliases = sqlparser.CloneColumns(h.ColumnAliases) @@ -77,16 +76,16 @@ func (h *Horizon) IsMergeable(ctx *plancontext.PlanningContext) bool { } // Inputs implements the Operator interface -func (h *Horizon) Inputs() []ops.Operator { - return []ops.Operator{h.Source} +func (h *Horizon) Inputs() []Operator { + return []Operator{h.Source} } // SetInputs implements the Operator interface -func (h *Horizon) SetInputs(ops []ops.Operator) { +func (h *Horizon) SetInputs(ops []Operator) { h.Source = ops[0] } -func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { if _, isUNion := h.Source.(*Union); isUNion { // If we have a derived table on top of a UNION, we can let the UNION do the expression rewriting h.Source = h.Source.AddPredicate(ctx, expr) @@ -95,17 +94,14 @@ func (h *Horizon) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser. tableInfo, err := ctx.SemTable.TableInfoForExpr(expr) if err != nil { if errors.Is(err, semantics.ErrNotSingleTable) { - return &Filter{ - Source: h, - Predicates: []sqlparser.Expr{expr}, - } + return newFilter(h, expr) } panic(err) } - newExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo) - if sqlparser.ContainsAggregation(newExpr) { - return &Filter{Source: h, Predicates: []sqlparser.Expr{expr}} + newExpr := ctx.RewriteDerivedTableExpression(expr, tableInfo) + if ContainsAggr(ctx, newExpr) { + return newFilter(h, expr) } h.Source = h.Source.AddPredicate(ctx, newExpr) return h @@ -126,6 +122,10 @@ func (h *Horizon) AddColumn(ctx *plancontext.PlanningContext, reuse bool, _ bool return offset } +func (h *Horizon) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + panic(errNoNewColumns) +} + var errNoNewColumns = vterrors.VT13001("can't add new columns to Horizon") // canReuseColumn is generic, so it can be used with slices of different types. @@ -145,7 +145,13 @@ func canReuseColumn[T any]( return } -func (h *Horizon) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) int { +func (h *Horizon) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { + if underRoute && h.IsDerived() { + // We don't want to use columns on this operator if it's a derived table under a route. + // In this case, we need to add a Projection on top of this operator to make the column available + return -1 + } + for idx, se := range sqlparser.GetFirstSelect(h.Query).SelectExprs { ae, ok := se.(*sqlparser.AliasedExpr) if !ok { @@ -175,12 +181,9 @@ func (h *Horizon) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectE return sqlparser.GetFirstSelect(h.Query).SelectExprs } -func (h *Horizon) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (h *Horizon) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { if h.QP == nil { - _, err := h.getQP(ctx) - if err != nil { - panic(err) - } + h.getQP(ctx) } return h.QP.OrderExprs } @@ -190,20 +193,15 @@ func (h *Horizon) selectStatement() sqlparser.SelectStatement { return h.Query } -func (h *Horizon) src() ops.Operator { +func (h *Horizon) src() Operator { return h.Source } -func (h *Horizon) getQP(ctx *plancontext.PlanningContext) (*QueryProjection, error) { - if h.QP != nil { - return h.QP, nil - } - qp, err := CreateQPFromSelectStatement(ctx, h.Query) - if err != nil { - return nil, err +func (h *Horizon) getQP(ctx *plancontext.PlanningContext) *QueryProjection { + if h.QP == nil { + h.QP = CreateQPFromSelectStatement(ctx, h.Query) } - h.QP = qp - return h.QP, nil + return h.QP } func (h *Horizon) ShortDescription() string { diff --git a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go index 7ec141a1b8b..fc980038f7f 100644 --- a/go/vt/vtgate/planbuilder/operators/horizon_expanding.go +++ b/go/vt/vtgate/planbuilder/operators/horizon_expanding.go @@ -23,12 +23,10 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) -func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (Operator, *ApplyResult) { statement := horizon.selectStatement() switch sel := statement.(type) { case *sqlparser.Select: @@ -36,16 +34,13 @@ func expandHorizon(ctx *plancontext.PlanningContext, horizon *Horizon) (ops.Oper case *sqlparser.Union: return expandUnionHorizon(ctx, horizon, sel) } - return nil, nil, vterrors.VT13001(fmt.Sprintf("unexpected statement type %T", statement)) + panic(vterrors.VT13001(fmt.Sprintf("unexpected statement type %T", statement))) } -func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, union *sqlparser.Union) (ops.Operator, *rewrite.ApplyResult, error) { +func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, union *sqlparser.Union) (Operator, *ApplyResult) { op := horizon.Source - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, nil, err - } + qp := horizon.getQP(ctx) if len(qp.OrderExprs) > 0 { op = &Ordering{ @@ -72,20 +67,15 @@ func expandUnionHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, unio } if op == horizon.Source { - return op, rewrite.NewTree("removed UNION horizon not used", op), nil + return op, Rewrote("removed UNION horizon not used") } - return op, rewrite.NewTree("expand UNION horizon into smaller components", op), nil + return op, Rewrote("expand UNION horizon into smaller components") } -func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (ops.Operator, *rewrite.ApplyResult, error) { +func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel *sqlparser.Select) (Operator, *ApplyResult) { op := createProjectionFromSelect(ctx, horizon) - - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, nil, err - } - + qp := horizon.getQP(ctx) var extracted []string if qp.HasAggr { extracted = append(extracted, "Aggregation") @@ -103,18 +93,12 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel } if sel.Having != nil { - op, err = addWherePredicates(ctx, sel.Having.Expr, op) - if err != nil { - return nil, nil, err - } + op = addWherePredicates(ctx, sel.Having.Expr, op) extracted = append(extracted, "Filter") } if len(qp.OrderExprs) > 0 { - op = &Ordering{ - Source: op, - Order: qp.OrderExprs, - } + op = expandOrderBy(ctx, op, qp) extracted = append(extracted, "Ordering") } @@ -122,19 +106,51 @@ func expandSelectHorizon(ctx *plancontext.PlanningContext, horizon *Horizon, sel op = &Limit{ Source: op, AST: sel.Limit, + Top: true, } extracted = append(extracted, "Limit") } - return op, rewrite.NewTree(fmt.Sprintf("expand SELECT horizon into (%s)", strings.Join(extracted, ", ")), op), nil + return op, Rewrote(fmt.Sprintf("expand SELECT horizon into (%s)", strings.Join(extracted, ", "))) } -func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) (out ops.Operator) { - qp, err := horizon.getQP(ctx) - if err != nil { - panic(err) +func expandOrderBy(ctx *plancontext.PlanningContext, op Operator, qp *QueryProjection) Operator { + proj := newAliasedProjection(op) + var newOrder []OrderBy + sqc := &SubQueryBuilder{} + for _, expr := range qp.OrderExprs { + newExpr, subqs := sqc.pullOutValueSubqueries(ctx, expr.SimplifiedExpr, TableID(op), false) + if newExpr == nil { + // no subqueries found, let's move on + newOrder = append(newOrder, expr) + continue + } + proj.addSubqueryExpr(aeWrap(newExpr), newExpr, subqs...) + newOrder = append(newOrder, OrderBy{ + Inner: &sqlparser.Order{ + Expr: newExpr, + Direction: expr.Inner.Direction, + }, + SimplifiedExpr: newExpr, + }) + } + if len(proj.Columns.GetColumns()) > 0 { + // if we had to project columns for the ordering, + // we need the projection as source + op = proj + } + + return &Ordering{ + Source: op, + Order: newOrder, + } +} + +func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horizon) Operator { + qp := horizon.getQP(ctx) + var dt *DerivedTable if horizon.TableId != nil { dt = &DerivedTable{ @@ -147,32 +163,44 @@ func createProjectionFromSelect(ctx *plancontext.PlanningContext, horizon *Horiz if !qp.NeedsAggregation() { projX := createProjectionWithoutAggr(ctx, qp, horizon.src()) projX.DT = dt - out = projX - - return out + return projX } - aggregations, complexAggr, err := qp.AggregationExpressions(ctx, true) - if err != nil { - panic(err) - } + return createProjectionWithAggr(ctx, qp, dt, horizon.src()) +} - a := &Aggregator{ - Source: horizon.src(), +func createProjectionWithAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, dt *DerivedTable, src Operator) Operator { + aggregations, complexAggr := qp.AggregationExpressions(ctx, true) + aggrOp := &Aggregator{ + Source: src, Original: true, QP: qp, Grouping: qp.GetGrouping(), + WithRollup: qp.WithRollup, Aggregations: aggregations, DT: dt, } + // Go through all aggregations and check for any subquery. + sqc := &SubQueryBuilder{} + outerID := TableID(src) + for idx, aggr := range aggregations { + expr := aggr.Original.Expr + newExpr, subqs := sqc.pullOutValueSubqueries(ctx, expr, outerID, false) + if newExpr != nil { + aggregations[idx].SubQueryExpression = subqs + } + } + aggrOp.Source = sqc.getRootOperator(src, nil) + + // create the projection columns from aggregator. if complexAggr { - return createProjectionForComplexAggregation(a, qp) + return createProjectionForComplexAggregation(aggrOp, qp) } - return createProjectionForSimpleAggregation(ctx, a, qp) + return createProjectionForSimpleAggregation(ctx, aggrOp, qp) } -func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) ops.Operator { +func createProjectionForSimpleAggregation(ctx *plancontext.PlanningContext, a *Aggregator, qp *QueryProjection) Operator { outer: for colIdx, expr := range qp.SelectExprs { ae, err := expr.GetAliasedExpr() @@ -181,7 +209,7 @@ outer: } addedToCol := false for idx, groupBy := range a.Grouping { - if ctx.SemTable.EqualsExprWithDeps(groupBy.SimplifiedExpr, ae.Expr) { + if ctx.SemTable.EqualsExprWithDeps(groupBy.Inner, ae.Expr) { if !addedToCol { a.Columns = append(a.Columns, ae) addedToCol = true @@ -206,23 +234,23 @@ outer: return a } -func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) ops.Operator { +func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) Operator { p := newAliasedProjection(a) p.DT = a.DT + // We don't want to keep the derived table in both Aggregator and Projection. + // If we do, then we end up re-writing the same column twice. + a.DT = nil for _, expr := range qp.SelectExprs { ae, err := expr.GetAliasedExpr() if err != nil { panic(err) } - _, err = p.addProjExpr(newProjExpr(ae)) - if err != nil { - panic(err) - } + p.addProjExpr(newProjExpr(ae)) } for i, by := range a.Grouping { a.Grouping[i].ColOffset = len(a.Columns) - a.Columns = append(a.Columns, aeWrap(by.SimplifiedExpr)) + a.Columns = append(a.Columns, aeWrap(by.Inner)) } for i, aggregation := range a.Aggregations { a.Aggregations[i].ColOffset = len(a.Columns) @@ -231,7 +259,7 @@ func createProjectionForComplexAggregation(a *Aggregator, qp *QueryProjection) o return p } -func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) *Projection { +func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProjection, src Operator) *Projection { // first we need to check if we have all columns or there are still unexpanded stars aes, err := slice.MapWithError(qp.SelectExprs, func(from SelectExpr) (*sqlparser.AliasedExpr, error) { ae, ok := from.Col.(*sqlparser.AliasedExpr) @@ -250,43 +278,31 @@ func createProjectionWithoutAggr(ctx *plancontext.PlanningContext, qp *QueryProj sqc := &SubQueryBuilder{} outerID := TableID(src) for _, ae := range aes { - org := sqlparser.CloneRefOfAliasedExpr(ae) + org := ctx.SemTable.Clone(ae).(*sqlparser.AliasedExpr) expr := ae.Expr - newExpr, subqs, err := sqc.pullOutValueSubqueries(ctx, expr, outerID, false) - if err != nil { - panic(err) - } + newExpr, subqs := sqc.pullOutValueSubqueries(ctx, expr, outerID, false) if newExpr == nil { // there was no subquery in this expression - _, err := proj.addUnexploredExpr(org, expr) - if err != nil { - panic(err) - } + proj.addUnexploredExpr(org, expr) } else { - err := proj.addSubqueryExpr(org, newExpr, subqs...) - if err != nil { - panic(err) - } + proj.addSubqueryExpr(org, newExpr, subqs...) } } proj.Source = sqc.getRootOperator(src, nil) return proj } -func newStarProjection(src ops.Operator, qp *QueryProjection) *Projection { +func newStarProjection(src Operator, qp *QueryProjection) *Projection { cols := sqlparser.SelectExprs{} for _, expr := range qp.SelectExprs { - err := sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { _, isSubQ := node.(*sqlparser.Subquery) if !isSubQ { return true, nil } - return false, vterrors.VT09015() + panic(vterrors.VT09015()) }, expr.Col) - if err != nil { - panic(err) - } cols = append(cols, expr.Col) } diff --git a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go index 4f096e1ac65..f8dc9b9d281 100644 --- a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go +++ b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go @@ -23,7 +23,6 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -41,15 +40,16 @@ type InfoSchemaRouting struct { Table *QueryTable } -func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (isr *InfoSchemaRouting) UpdateRoutingParams(ctx *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.SysTableTableSchema = nil for _, expr := range isr.SysTableTableSchema { eexpr, err := evalengine.Translate(expr, &evalengine.Config{ Collation: collations.SystemCollation.Collation, ResolveColumn: NotImplementedSchemaInfoResolver, + Environment: ctx.VSchema.Environment(), }) if err != nil { - return err + panic(err) } rp.SysTableTableSchema = append(rp.SysTableTableSchema, eexpr) } @@ -59,14 +59,14 @@ func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext eexpr, err := evalengine.Translate(expr, &evalengine.Config{ Collation: collations.SystemCollation.Collation, ResolveColumn: NotImplementedSchemaInfoResolver, + Environment: ctx.VSchema.Environment(), }) if err != nil { - return err + panic(err) } rp.SysTableTableName[k] = eexpr } - return nil } func (isr *InfoSchemaRouting) Clone() Routing { @@ -77,10 +77,10 @@ func (isr *InfoSchemaRouting) Clone() Routing { } } -func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) { +func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing { isTableSchema, bvName, out := extractInfoSchemaRoutingPredicate(ctx, expr) if out == nil { - return isr, nil + return isr } if isr.SysTableTableName == nil { @@ -92,14 +92,14 @@ func (isr *InfoSchemaRouting) updateRoutingLogic(ctx *plancontext.PlanningContex if sqlparser.Equals.Expr(out, s) { // we already have this expression in the list // stating it again does not add value - return isr, nil + return isr } } isr.SysTableTableSchema = append(isr.SysTableTableSchema, out) } else { isr.SysTableTableName[bvName] = out } - return isr, nil + return isr } func (isr *InfoSchemaRouting) Cost() int { @@ -122,7 +122,7 @@ func extractInfoSchemaRoutingPredicate(ctx *plancontext.PlanningContext, in sqlp return false, "", nil } - isSchemaName, col := isTableOrSchemaRoutable(cmp) + isSchemaName, col := isTableOrSchemaRoutable(cmp, ctx.VSchema.Environment().MySQLVersion()) rhs := cmp.Right if col == nil || !shouldRewrite(rhs) { return false, "", nil @@ -133,6 +133,7 @@ func extractInfoSchemaRoutingPredicate(ctx *plancontext.PlanningContext, in sqlp _, err := evalengine.Translate(rhs, &evalengine.Config{ Collation: collations.SystemCollation.Collation, ResolveColumn: NotImplementedSchemaInfoResolver, + Environment: ctx.VSchema.Environment(), }) if err != nil { // if we can't translate this to an evalengine expression, @@ -153,14 +154,14 @@ func extractInfoSchemaRoutingPredicate(ctx *plancontext.PlanningContext, in sqlp // isTableOrSchemaRoutable searches for a comparison where one side is a table or schema name column. // if it finds the correct column name being used, // it also makes sure that the LHS of the comparison contains the column, and the RHS the value sought after -func isTableOrSchemaRoutable(cmp *sqlparser.ComparisonExpr) ( +func isTableOrSchemaRoutable(cmp *sqlparser.ComparisonExpr, version string) ( isSchema bool, // tells if we are dealing with a table or a schema name comparator col *sqlparser.ColName, // which is the colName we are comparing against ) { - if col, schema, table := IsTableSchemaOrName(cmp.Left); schema || table { + if col, schema, table := IsTableSchemaOrName(cmp.Left, version); schema || table { return schema, col } - if col, schema, table := IsTableSchemaOrName(cmp.Right); schema || table { + if col, schema, table := IsTableSchemaOrName(cmp.Right, version); schema || table { // to make the rest of the code easier, we shuffle these around so the ColName is always on the LHS cmp.Right, cmp.Left = cmp.Left, cmp.Right return schema, col @@ -286,16 +287,15 @@ func shouldRewrite(e sqlparser.Expr) bool { return true } -func IsTableSchemaOrName(e sqlparser.Expr) (col *sqlparser.ColName, isTableSchema bool, isTableName bool) { +func IsTableSchemaOrName(e sqlparser.Expr, version string) (col *sqlparser.ColName, isTableSchema bool, isTableName bool) { col, ok := e.(*sqlparser.ColName) if !ok { return nil, false, false } - return col, isDbNameCol(col), isTableNameCol(col) + return col, isDbNameCol(col, version), isTableNameCol(col) } -func isDbNameCol(col *sqlparser.ColName) bool { - version := servenv.MySQLServerVersion() +func isDbNameCol(col *sqlparser.ColName, version string) bool { var schemaColumns map[string]any if strings.HasPrefix(version, "5.7") { schemaColumns = schemaColumns57 diff --git a/go/vt/vtgate/planbuilder/operators/insert.go b/go/vt/vtgate/planbuilder/operators/insert.go index a48e53c18b1..7c6e242ae9c 100644 --- a/go/vt/vtgate/planbuilder/operators/insert.go +++ b/go/vt/vtgate/planbuilder/operators/insert.go @@ -24,7 +24,6 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -79,13 +78,13 @@ func (i *Insert) ShortDescription() string { return i.VTable.String() } -func (i *Insert) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (i *Insert) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -var _ ops.Operator = (*Insert)(nil) +var _ Operator = (*Insert)(nil) -func (i *Insert) Clone([]ops.Operator) ops.Operator { +func (i *Insert) Clone([]Operator) Operator { return &Insert{ VTable: i.VTable, AST: i.AST, @@ -105,52 +104,258 @@ func (i *Insert) Statement() sqlparser.Statement { return i.AST } -func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, ins.Table, nil) - if err != nil { - return nil, err +func createOperatorFromInsert(ctx *plancontext.PlanningContext, ins *sqlparser.Insert) Operator { + tableInfo, qt := createQueryTableForDML(ctx, ins.Table, nil) + + vTbl, routing := buildVindexTableForDML(ctx, tableInfo, qt, ins, "insert") + + deleteBeforeInsert := false + if ins.Action == sqlparser.ReplaceAct && + (ctx.SemTable.ForeignKeysPresent() || vTbl.Keyspace.Sharded) && + (len(vTbl.PrimaryKey) > 0 || len(vTbl.UniqueKeys) > 0) { + // this needs a delete before insert as there can be row clash which needs to be deleted first. + ins.Action = sqlparser.InsertAct + deleteBeforeInsert = true } - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "insert") - if err != nil { - return nil, err + insOp := checkAndCreateInsertOperator(ctx, ins, vTbl, routing) + + if !deleteBeforeInsert { + return insOp } - insOp, err := createInsertOperator(ctx, ins, vindexTable, routing) - if err != nil { - return nil, err + rows, isRows := ins.Rows.(sqlparser.Values) + if !isRows { + panic(vterrors.VT12001("REPLACE INTO using select statement")) } - if ins.Comments != nil { - insOp = &LockAndComment{ - Source: insOp, - Comments: ins.Comments, - } + pkCompExpr := pkCompExpression(vTbl, ins, rows) + uniqKeyCompExprs := uniqKeyCompExpressions(vTbl, ins, rows) + whereExpr := getWhereCondExpr(append(uniqKeyCompExprs, pkCompExpr)) + + delStmt := &sqlparser.Delete{ + Comments: ins.Comments, + TableExprs: sqlparser.TableExprs{sqlparser.CloneRefOfAliasedTableExpr(ins.Table)}, + Where: sqlparser.NewWhere(sqlparser.WhereClause, whereExpr), } + delOp := createOpFromStmt(ctx, delStmt, false, "") + return &Sequential{Sources: []Operator{delOp, insOp}} +} + +func checkAndCreateInsertOperator(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) Operator { + insOp := createInsertOperator(ctx, ins, vTbl, routing) // Find the foreign key mode and for unmanaged foreign-key-mode, we don't need to do anything. - ksMode, err := ctx.VSchema.ForeignKeyMode(vindexTable.Keyspace.Name) + ksMode, err := ctx.VSchema.ForeignKeyMode(vTbl.Keyspace.Name) if err != nil { - return nil, err + panic(err) } if ksMode != vschemapb.Keyspace_managed { - return insOp, nil + return insOp } parentFKs := ctx.SemTable.GetParentForeignKeysList() childFks := ctx.SemTable.GetChildForeignKeysList() - if len(childFks) == 0 && len(parentFKs) == 0 { - return insOp, nil - } if len(parentFKs) > 0 { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) + } + if len(childFks) > 0 { + if ins.Action == sqlparser.ReplaceAct { + panic(vterrors.VT12001("REPLACE INTO with foreign keys")) + } + if len(ins.OnDup) > 0 { + rows := getRowsOrError(ins) + return createUpsertOperator(ctx, ins, insOp, rows, vTbl) + } + } + return insOp +} + +func getRowsOrError(ins *sqlparser.Insert) sqlparser.Values { + if rows, ok := ins.Rows.(sqlparser.Values); ok { + return rows + } + panic(vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys with select statement")) +} + +func getWhereCondExpr(compExprs []*sqlparser.ComparisonExpr) sqlparser.Expr { + var outputExpr sqlparser.Expr + for _, expr := range compExprs { + if expr == nil { + continue + } + if outputExpr == nil { + outputExpr = expr + continue + } + outputExpr = &sqlparser.OrExpr{ + Left: outputExpr, + Right: expr, + } + } + return outputExpr +} + +func pkCompExpression(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sqlparser.Values) *sqlparser.ComparisonExpr { + if len(vTbl.PrimaryKey) == 0 { + return nil + } + pIndexes, pColTuple := findPKIndexes(vTbl, ins) + + var pValTuple sqlparser.ValTuple + for _, row := range rows { + var rowTuple sqlparser.ValTuple + for _, pIdx := range pIndexes { + if pIdx.idx == -1 { + rowTuple = append(rowTuple, pIdx.def) + } else { + rowTuple = append(rowTuple, row[pIdx.idx]) + } + } + pValTuple = append(pValTuple, rowTuple) + } + return sqlparser.NewComparisonExpr(sqlparser.InOp, pColTuple, pValTuple, nil) +} + +type pComp struct { + idx int + def sqlparser.Expr + col sqlparser.IdentifierCI +} + +func findPKIndexes(vTbl *vindexes.Table, ins *sqlparser.Insert) (pIndexes []pComp, pColTuple sqlparser.ValTuple) { + for _, pCol := range vTbl.PrimaryKey { + var def sqlparser.Expr + idx := ins.Columns.FindColumn(pCol) + if idx == -1 { + def = findDefault(vTbl, pCol) + if def == nil { + // If default value is empty, nothing to compare as it will always be false. + return nil, nil + } + } + pIndexes = append(pIndexes, pComp{idx, def, pCol}) + pColTuple = append(pColTuple, sqlparser.NewColName(pCol.String())) + } + return +} + +func findDefault(vTbl *vindexes.Table, pCol sqlparser.IdentifierCI) sqlparser.Expr { + for _, column := range vTbl.Columns { + if column.Name.Equal(pCol) { + return column.Default + } } - return nil, vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys") + panic(vterrors.VT03014(pCol.String(), vTbl.Name.String())) } -func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) (ops.Operator, error) { +type uComp struct { + idx int + def sqlparser.Expr +} + +func uniqKeyCompExpressions(vTbl *vindexes.Table, ins *sqlparser.Insert, rows sqlparser.Values) (comps []*sqlparser.ComparisonExpr) { + noOfUniqKeys := len(vTbl.UniqueKeys) + if noOfUniqKeys == 0 { + return nil + } + + type uIdx struct { + Indexes [][]uComp + uniqKey sqlparser.Exprs + } + + allIndexes := make([]uIdx, 0, noOfUniqKeys) + allColTuples := make([]sqlparser.ValTuple, 0, noOfUniqKeys) + for _, uniqKey := range vTbl.UniqueKeys { + var uIndexes [][]uComp + var uColTuple sqlparser.ValTuple + skipKey := false + for _, expr := range uniqKey { + var offsets []uComp + offsets, skipKey = createUniqueKeyComp(ins, expr, vTbl) + if skipKey { + break + } + uIndexes = append(uIndexes, offsets) + uColTuple = append(uColTuple, expr) + } + if skipKey { + continue + } + allIndexes = append(allIndexes, uIdx{uIndexes, uniqKey}) + allColTuples = append(allColTuples, uColTuple) + } + + allValTuples := make([]sqlparser.ValTuple, len(allColTuples)) + for _, row := range rows { + for i, uk := range allIndexes { + var rowTuple sqlparser.ValTuple + for j, offsets := range uk.Indexes { + colIdx := 0 + valExpr := sqlparser.CopyOnRewrite(uk.uniqKey[j], nil, func(cursor *sqlparser.CopyOnWriteCursor) { + _, isCol := cursor.Node().(*sqlparser.ColName) + if !isCol { + return + } + if offsets[colIdx].idx == -1 { + cursor.Replace(offsets[colIdx].def) + } else { + cursor.Replace(row[offsets[colIdx].idx]) + } + colIdx++ + }, nil).(sqlparser.Expr) + rowTuple = append(rowTuple, valExpr) + } + allValTuples[i] = append(allValTuples[i], rowTuple) + } + } + + compExprs := make([]*sqlparser.ComparisonExpr, 0, noOfUniqKeys) + for i, valTuple := range allValTuples { + compExprs = append(compExprs, sqlparser.NewComparisonExpr(sqlparser.InOp, allColTuples[i], valTuple, nil)) + } + return compExprs +} + +func createUniqueKeyComp(ins *sqlparser.Insert, expr sqlparser.Expr, vTbl *vindexes.Table) ([]uComp, bool) { + col, isCol := expr.(*sqlparser.ColName) + if isCol { + var def sqlparser.Expr + idx := ins.Columns.FindColumn(col.Name) + if idx == -1 { + def = findDefault(vTbl, col.Name) + if def == nil { + // default value is empty, nothing to compare as it will always be false. + return nil, true + } + } + return []uComp{{idx, def}}, false + } + var offsets []uComp + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { + col, ok := node.(*sqlparser.ColName) + if !ok { + return true, nil + } + var def sqlparser.Expr + idx := ins.Columns.FindColumn(col.Name) + if idx == -1 { + def = findDefault(vTbl, col.Name) + // no default, replace it with null value. + if def == nil { + def = &sqlparser.NullVal{} + } + } + offsets = append(offsets, uComp{idx, def}) + return false, nil + }, expr) + return offsets, false +} + +func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.Insert, vTbl *vindexes.Table, routing Routing) (op Operator) { if _, target := routing.(*TargetedRouting); target { - return nil, vterrors.VT09017("INSERT with a target destination is not allowed") + panic(vterrors.VT09017("INSERT with a target destination is not allowed")) } insOp := &Insert{ @@ -169,15 +374,12 @@ func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.I if vTbl.ColumnListAuthoritative { insStmt = populateInsertColumnlist(insStmt, vTbl) } else { - return nil, vterrors.VT09004() + panic(vterrors.VT09004()) } } // modify column list or values for autoincrement column. - autoIncGen, err := modifyForAutoinc(ctx, insStmt, vTbl) - if err != nil { - return nil, err - } + autoIncGen := modifyForAutoinc(ctx, insStmt, vTbl) insOp.AutoIncrement = autoIncGen // set insert ignore. @@ -186,24 +388,34 @@ func createInsertOperator(ctx *plancontext.PlanningContext, insStmt *sqlparser.I insOp.ColVindexes = getColVindexes(insOp) switch rows := insStmt.Rows.(type) { case sqlparser.Values: - route.Source, err = insertRowsPlan(ctx, insOp, insStmt, rows) - if err != nil { - return nil, err - } + op = route + route.Source = insertRowsPlan(ctx, insOp, insStmt, rows) case sqlparser.SelectStatement: - return insertSelectPlan(ctx, insOp, route, insStmt, rows) + op = insertSelectPlan(ctx, insOp, route, insStmt, rows) + } + if insStmt.Comments != nil { + op = &LockAndComment{ + Source: op, + Comments: insStmt.Comments, + } } - return route, nil + return op } -func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, routeOp *Route, ins *sqlparser.Insert, sel sqlparser.SelectStatement) (*InsertSelection, error) { +func insertSelectPlan( + ctx *plancontext.PlanningContext, + insOp *Insert, + routeOp *Route, + ins *sqlparser.Insert, + sel sqlparser.SelectStatement, +) *InsertSelection { if columnMismatch(insOp.AutoIncrement, ins, sel) { - return nil, vterrors.VT03006() + panic(vterrors.VT03006()) } selOp, err := PlanQuery(ctx, sel) if err != nil { - return nil, err + panic(err) } // output of the select plan will be used to insert rows into the table. @@ -228,28 +440,24 @@ func insertSelectPlan(ctx *plancontext.PlanningContext, insOp *Insert, routeOp * } if len(insOp.ColVindexes) == 0 { - return insertSelect, nil + return insertSelect } colVindexes := insOp.ColVindexes vv := make([][]int, len(colVindexes)) for idx, colVindex := range colVindexes { for _, col := range colVindex.Columns { - err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) - if err != nil { - return nil, err - } - + checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) colNum := findColumn(ins, col) // sharding column values should be provided in the insert. if colNum == -1 && idx == 0 { - return nil, vterrors.VT09003(col) + panic(vterrors.VT09003(col)) } vv[idx] = append(vv[idx], colNum) } } insOp.VindexValueOffset = vv - return insertSelect, nil + return insertSelect } func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectStatement) bool { @@ -277,15 +485,15 @@ func columnMismatch(gen *Generate, ins *sqlparser.Insert, sel sqlparser.SelectSt return false } -func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) (*Insert, error) { +func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlparser.Insert, rows sqlparser.Values) *Insert { for _, row := range rows { if len(ins.Columns) != len(row) { - return nil, vterrors.VT03006() + panic(vterrors.VT03006()) } } if len(insOp.ColVindexes) == 0 { - return insOp, nil + return insOp } colVindexes := insOp.ColVindexes @@ -293,19 +501,17 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar for vIdx, colVindex := range colVindexes { routeValues[vIdx] = make([][]evalengine.Expr, len(colVindex.Columns)) for colIdx, col := range colVindex.Columns { - err := checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) - if err != nil { - return nil, err - } + checkAndErrIfVindexChanging(sqlparser.UpdateExprs(ins.OnDup), col) routeValues[vIdx][colIdx] = make([]evalengine.Expr, len(rows)) colNum, _ := findOrAddColumn(ins, col) for rowNum, row := range rows { innerpv, err := evalengine.Translate(row[colNum], &evalengine.Config{ ResolveType: ctx.SemTable.TypeForExpr, Collation: ctx.SemTable.Collation, + Environment: ctx.VSchema.Environment(), }) if err != nil { - return nil, err + panic(err) } routeValues[vIdx][colIdx][rowNum] = innerpv } @@ -322,7 +528,7 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar } } insOp.VindexValues = routeValues - return insOp, nil + return insOp } func valuesProvided(rows sqlparser.InsertRows) bool { @@ -350,18 +556,17 @@ func getColVindexes(insOp *Insert) (colVindexes []*vindexes.ColumnVindex) { return } -func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) error { +func checkAndErrIfVindexChanging(setClauses sqlparser.UpdateExprs, col sqlparser.IdentifierCI) { for _, assignment := range setClauses { if col.Equal(assignment.Name.Name) { valueExpr, isValuesFuncExpr := assignment.Expr.(*sqlparser.ValuesFuncExpr) // update on duplicate key is changing the vindex column, not supported. if !isValuesFuncExpr || !valueExpr.Name.Name.Equal(assignment.Name.Name) { - return vterrors.VT12001("DML cannot update vindex column") + panic(vterrors.VT12001("DML cannot update vindex column")) } - return nil + return } } - return nil } // findOrAddColumn finds the position of a column in the insert. If it's @@ -404,9 +609,9 @@ func populateInsertColumnlist(ins *sqlparser.Insert, table *vindexes.Table) *sql // modifyForAutoinc modifies the AST and the plan to generate necessary autoinc values. // For row values cases, bind variable names are generated using baseName. -func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTable *vindexes.Table) (*Generate, error) { +func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, vTable *vindexes.Table) *Generate { if vTable.AutoIncrement == nil { - return nil, nil + return nil } gen := &Generate{ Keyspace: vTable.AutoIncrement.Sequence.Keyspace, @@ -420,6 +625,9 @@ func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, v case sqlparser.Values: autoIncValues := make(sqlparser.ValTuple, 0, len(rows)) for rowNum, row := range rows { + if len(ins.Columns) != len(row) { + panic(vterrors.VT03006()) + } // Support the DEFAULT keyword by treating it as null if _, ok := row[colNum].(*sqlparser.Default); ok { row[colNum] = &sqlparser.NullVal{} @@ -431,10 +639,11 @@ func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, v gen.Values, err = evalengine.Translate(autoIncValues, &evalengine.Config{ ResolveType: ctx.SemTable.TypeForExpr, Collation: ctx.SemTable.Collation, + Environment: ctx.VSchema.Environment(), }) if err != nil { - return nil, err + panic(err) } } - return gen, nil + return gen } diff --git a/go/vt/vtgate/planbuilder/operators/insert_selection.go b/go/vt/vtgate/planbuilder/operators/insert_selection.go index 5ae49ee2c55..70bda0a990a 100644 --- a/go/vt/vtgate/planbuilder/operators/insert_selection.go +++ b/go/vt/vtgate/planbuilder/operators/insert_selection.go @@ -17,15 +17,14 @@ limitations under the License. package operators import ( - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // InsertSelection operator represents an INSERT into SELECT FROM query. // It holds the operators for running the selection and insertion. type InsertSelection struct { - Select ops.Operator - Insert ops.Operator + Select Operator + Insert Operator // ForceNonStreaming when true, select first then insert, this is to avoid locking rows by select for insert. ForceNonStreaming bool @@ -34,7 +33,7 @@ type InsertSelection struct { noPredicates } -func (is *InsertSelection) Clone(inputs []ops.Operator) ops.Operator { +func (is *InsertSelection) Clone(inputs []Operator) Operator { return &InsertSelection{ Select: inputs[0], Insert: inputs[1], @@ -42,11 +41,11 @@ func (is *InsertSelection) Clone(inputs []ops.Operator) ops.Operator { } } -func (is *InsertSelection) Inputs() []ops.Operator { - return []ops.Operator{is.Select, is.Insert} +func (is *InsertSelection) Inputs() []Operator { + return []Operator{is.Select, is.Insert} } -func (is *InsertSelection) SetInputs(inputs []ops.Operator) { +func (is *InsertSelection) SetInputs(inputs []Operator) { is.Select = inputs[0] is.Insert = inputs[1] } @@ -58,8 +57,8 @@ func (is *InsertSelection) ShortDescription() string { return "" } -func (is *InsertSelection) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (is *InsertSelection) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -var _ ops.Operator = (*InsertSelection)(nil) +var _ Operator = (*InsertSelection)(nil) diff --git a/go/vt/vtgate/planbuilder/operators/join.go b/go/vt/vtgate/planbuilder/operators/join.go index 828b15f5b79..d13f79e010f 100644 --- a/go/vt/vtgate/planbuilder/operators/join.go +++ b/go/vt/vtgate/planbuilder/operators/join.go @@ -19,24 +19,24 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // Join represents a join. If we have a predicate, this is an inner join. If no predicate exists, it is a cross join type Join struct { - LHS, RHS ops.Operator + LHS, RHS Operator Predicate sqlparser.Expr - LeftJoin bool + // JoinType is permitted to store only 3 of the possible values + // NormalJoinType, StraightJoinType and LeftJoinType. + JoinType sqlparser.JoinType noColumns } -var _ ops.Operator = (*Join)(nil) +var _ Operator = (*Join)(nil) // Clone implements the Operator interface -func (j *Join) Clone(inputs []ops.Operator) ops.Operator { +func (j *Join) Clone(inputs []Operator) Operator { clone := *j clone.LHS = inputs[0] clone.RHS = inputs[1] @@ -44,34 +44,34 @@ func (j *Join) Clone(inputs []ops.Operator) ops.Operator { LHS: inputs[0], RHS: inputs[1], Predicate: j.Predicate, - LeftJoin: j.LeftJoin, + JoinType: j.JoinType, } } -func (j *Join) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (j *Join) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } // Inputs implements the Operator interface -func (j *Join) Inputs() []ops.Operator { - return []ops.Operator{j.LHS, j.RHS} +func (j *Join) Inputs() []Operator { + return []Operator{j.LHS, j.RHS} } // SetInputs implements the Operator interface -func (j *Join) SetInputs(ops []ops.Operator) { +func (j *Join) SetInputs(ops []Operator) { j.LHS, j.RHS = ops[0], ops[1] } -func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { - if j.LeftJoin { - // we can't merge outer joins into a single QG - return j, rewrite.SameTree, nil +func (j *Join) Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) { + if !j.JoinType.IsCommutative() { + // if we can't move tables around, we can't merge these inputs + return j, NoRewrite } lqg, lok := j.LHS.(*QueryGraph) rqg, rok := j.RHS.(*QueryGraph) if !lok || !rok { - return j, rewrite.SameTree, nil + return j, NoRewrite } newOp := &QueryGraph{ @@ -82,89 +82,116 @@ func (j *Join) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite if j.Predicate != nil { newOp.collectPredicate(ctx, j.Predicate) } - return newOp, rewrite.NewTree("merge querygraphs into a single one", newOp), nil + return newOp, Rewrote("merge querygraphs into a single one") } -func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) { - if tableExpr.Join == sqlparser.RightJoinType { +func createStraightJoin(ctx *plancontext.PlanningContext, join *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator { + // for inner joins we can treat the predicates as filters on top of the join + joinOp := &Join{LHS: lhs, RHS: rhs, JoinType: join.Join} + + return addJoinPredicates(ctx, join.Condition.On, joinOp) +} + +func createLeftOuterJoin(ctx *plancontext.PlanningContext, join *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator { + // first we switch sides, so we always deal with left outer joins + switch join.Join { + case sqlparser.RightJoinType: lhs, rhs = rhs, lhs + join.Join = sqlparser.LeftJoinType + case sqlparser.NaturalRightJoinType: + lhs, rhs = rhs, lhs + join.Join = sqlparser.NaturalLeftJoinType } - subq, _ := getSubQuery(tableExpr.Condition.On) + + joinOp := &Join{LHS: lhs, RHS: rhs, JoinType: join.Join} + + // for outer joins we have to be careful with the predicates we use + var op Operator + subq, _ := getSubQuery(join.Condition.On) if subq != nil { - return nil, vterrors.VT12001("subquery in outer join predicate") + panic(vterrors.VT12001("subquery in outer join predicate")) } - predicate := tableExpr.Condition.On - sqlparser.RemoveKeyspaceFromColName(predicate) - return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate}, nil -} + predicate := join.Condition.On + sqlparser.RemoveKeyspaceInCol(predicate) + joinOp.Predicate = predicate + op = joinOp -func createJoin(ctx *plancontext.PlanningContext, LHS, RHS ops.Operator) ops.Operator { - lqg, lok := LHS.(*QueryGraph) - rqg, rok := RHS.(*QueryGraph) - if lok && rok { - op := &QueryGraph{ - Tables: append(lqg.Tables, rqg.Tables...), - innerJoins: append(lqg.innerJoins, rqg.innerJoins...), - NoDeps: ctx.SemTable.AndExpressions(lqg.NoDeps, rqg.NoDeps), - } - return op - } - return &Join{LHS: LHS, RHS: RHS} + return op } -func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs ops.Operator) (ops.Operator, error) { +func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Operator { op := createJoin(ctx, lhs, rhs) + return addJoinPredicates(ctx, tableExpr.Condition.On, op) +} + +func addJoinPredicates( + ctx *plancontext.PlanningContext, + joinPredicate sqlparser.Expr, + op Operator, +) Operator { sqc := &SubQueryBuilder{} outerID := TableID(op) - joinPredicate := tableExpr.Condition.On - sqlparser.RemoveKeyspaceFromColName(joinPredicate) + sqlparser.RemoveKeyspaceInCol(joinPredicate) exprs := sqlparser.SplitAndExpression(nil, joinPredicate) for _, pred := range exprs { - subq, err := sqc.handleSubquery(ctx, pred, outerID) - if err != nil { - return nil, err - } + subq := sqc.handleSubquery(ctx, pred, outerID) if subq != nil { continue } op = op.AddPredicate(ctx, pred) } - return sqc.getRootOperator(op, nil), nil + return sqc.getRootOperator(op, nil) +} + +func createJoin(ctx *plancontext.PlanningContext, LHS, RHS Operator) Operator { + lqg, lok := LHS.(*QueryGraph) + rqg, rok := RHS.(*QueryGraph) + if lok && rok { + op := &QueryGraph{ + Tables: append(lqg.Tables, rqg.Tables...), + innerJoins: append(lqg.innerJoins, rqg.innerJoins...), + NoDeps: ctx.SemTable.AndExpressions(lqg.NoDeps, rqg.NoDeps), + } + return op + } + return &Join{LHS: LHS, RHS: RHS} } -func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { - return AddPredicate(ctx, j, expr, false, newFilter) +func (j *Join) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { + return AddPredicate(ctx, j, expr, false, newFilterSinglePredicate) } var _ JoinOp = (*Join)(nil) -func (j *Join) GetLHS() ops.Operator { +func (j *Join) GetLHS() Operator { return j.LHS } -func (j *Join) GetRHS() ops.Operator { +func (j *Join) GetRHS() Operator { return j.RHS } -func (j *Join) SetLHS(operator ops.Operator) { +func (j *Join) SetLHS(operator Operator) { j.LHS = operator } -func (j *Join) SetRHS(operator ops.Operator) { +func (j *Join) SetRHS(operator Operator) { j.RHS = operator } func (j *Join) MakeInner() { - j.LeftJoin = false + if j.IsInner() { + return + } + j.JoinType = sqlparser.NormalJoinType } func (j *Join) IsInner() bool { - return !j.LeftJoin + return j.JoinType.IsInner() } -func (j *Join) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error { +func (j *Join) AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { j.Predicate = ctx.SemTable.AndExpressions(j.Predicate, expr) - return nil } func (j *Join) ShortDescription() string { diff --git a/go/vt/vtgate/planbuilder/operators/join_merging.go b/go/vt/vtgate/planbuilder/operators/join_merging.go index 52c9c4e5837..5edc812b1b7 100644 --- a/go/vt/vtgate/planbuilder/operators/join_merging.go +++ b/go/vt/vtgate/planbuilder/operators/join_merging.go @@ -21,14 +21,13 @@ import ( "reflect" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // mergeJoinInputs checks whether two operators can be merged into a single one. // If they can be merged, a new operator with the merged routing is returned // If they cannot be merged, nil is returned. -func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, m merger) *Route { +func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, m merger) *Route { lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) if lhsRoute == nil { return nil @@ -66,7 +65,7 @@ func mergeJoinInputs(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, jo } } -func prepareInputRoutes(lhs ops.Operator, rhs ops.Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { +func prepareInputRoutes(lhs Operator, rhs Operator) (*Route, *Route, Routing, Routing, routingType, routingType, bool) { lhsRoute, rhsRoute := operatorsToRoutes(lhs, rhs) if lhsRoute == nil || rhsRoute == nil { return nil, nil, nil, nil, 0, 0, false @@ -93,7 +92,9 @@ type ( joinMerger struct { predicates []sqlparser.Expr - innerJoin bool + // joinType is permitted to store only 3 of the possible values + // NormalJoinType, StraightJoinType and LeftJoinType. + joinType sqlparser.JoinType } routingType int @@ -177,10 +178,10 @@ func getRoutingType(r Routing) routingType { panic(fmt.Sprintf("switch should be exhaustive, got %T", r)) } -func newJoinMerge(predicates []sqlparser.Expr, innerJoin bool) merger { +func newJoinMerge(predicates []sqlparser.Expr, joinType sqlparser.JoinType) merger { return &joinMerger{ predicates: predicates, - innerJoin: innerJoin, + joinType: joinType, } } @@ -204,7 +205,7 @@ func mergeShardedRouting(r1 *ShardedRouting, r2 *ShardedRouting) *ShardedRouting } func (jm *joinMerger) getApplyJoin(ctx *plancontext.PlanningContext, op1, op2 *Route) *ApplyJoin { - return NewApplyJoin(op1.Source, op2.Source, ctx.SemTable.AndExpressions(jm.predicates...), !jm.innerJoin) + return NewApplyJoin(ctx, op1.Source, op2.Source, ctx.SemTable.AndExpressions(jm.predicates...), jm.joinType) } func (jm *joinMerger) merge(ctx *plancontext.PlanningContext, op1, op2 *Route, r Routing) *Route { diff --git a/go/vt/vtgate/planbuilder/operators/joins.go b/go/vt/vtgate/planbuilder/operators/joins.go index 3b5c31c5dce..d0d0fa770c8 100644 --- a/go/vt/vtgate/planbuilder/operators/joins.go +++ b/go/vt/vtgate/planbuilder/operators/joins.go @@ -18,20 +18,23 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) type JoinOp interface { - ops.Operator - GetLHS() ops.Operator - GetRHS() ops.Operator - SetLHS(ops.Operator) - SetRHS(ops.Operator) + Operator + GetLHS() Operator + GetRHS() Operator + SetLHS(Operator) + SetRHS(Operator) MakeInner() IsInner() bool - AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) error + AddJoinPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) +} + +func IsOuter(outer JoinOp) bool { + return !outer.IsInner() } func AddPredicate( @@ -39,8 +42,8 @@ func AddPredicate( join JoinOp, expr sqlparser.Expr, joinPredicates bool, - newFilter func(ops.Operator, sqlparser.Expr) ops.Operator, -) ops.Operator { + newFilter func(Operator, sqlparser.Expr) Operator, +) Operator { deps := ctx.SemTable.RecursiveDeps(expr) switch { case deps.IsSolvedBy(TableID(join.GetLHS())): @@ -51,11 +54,11 @@ func AddPredicate( case deps.IsSolvedBy(TableID(join.GetRHS())): // if we are dealing with an outer join, always start by checking if this predicate can turn // the join into an inner join - if !joinPredicates && !join.IsInner() && canConvertToInner(ctx, expr, TableID(join.GetRHS())) { + if !joinPredicates && IsOuter(join) && canConvertToInner(ctx, expr, TableID(join.GetRHS())) { join.MakeInner() } - if !joinPredicates && !join.IsInner() { + if !joinPredicates && IsOuter(join) { // if we still are dealing with an outer join // we need to filter after the join has been evaluated return newFilter(join, expr) @@ -69,20 +72,17 @@ func AddPredicate( case deps.IsSolvedBy(TableID(join)): // if we are dealing with an outer join, always start by checking if this predicate can turn // the join into an inner join - if !joinPredicates && !join.IsInner() && canConvertToInner(ctx, expr, TableID(join.GetRHS())) { + if !joinPredicates && IsOuter(join) && canConvertToInner(ctx, expr, TableID(join.GetRHS())) { join.MakeInner() } - if !joinPredicates && !join.IsInner() { + if !joinPredicates && IsOuter(join) { // if we still are dealing with an outer join // we need to filter after the join has been evaluated return newFilter(join, expr) } - err := join.AddJoinPredicate(ctx, expr) - if err != nil { - panic(err) - } + join.AddJoinPredicate(ctx, expr) return join } diff --git a/go/vt/vtgate/planbuilder/operators/limit.go b/go/vt/vtgate/planbuilder/operators/limit.go index a6ea925b135..0d4857d5aaa 100644 --- a/go/vt/vtgate/planbuilder/operators/limit.go +++ b/go/vt/vtgate/planbuilder/operators/limit.go @@ -18,36 +18,39 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Limit struct { - Source ops.Operator + Source Operator AST *sqlparser.Limit - // Pushed marks whether the limit has been pushed down to the inputs but still need to keep the operator around. - // For example, `select * from user order by id limit 10`. Even after we push the limit to the route, we need a limit on top - // since it is a scatter. + // Top is true if the limit is a top level limit. To optimise, we push LIMIT to the RHS of joins, + // but we need to still LIMIT the total result set to the top level limit. + Top bool + + // Once we have pushed the top level Limit down, we mark it as pushed so that we don't push it down again. Pushed bool } -func (l *Limit) Clone(inputs []ops.Operator) ops.Operator { +func (l *Limit) Clone(inputs []Operator) Operator { return &Limit{ Source: inputs[0], AST: sqlparser.CloneRefOfLimit(l.AST), + Top: l.Top, + Pushed: l.Pushed, } } -func (l *Limit) Inputs() []ops.Operator { - return []ops.Operator{l.Source} +func (l *Limit) Inputs() []Operator { + return []Operator{l.Source} } -func (l *Limit) SetInputs(operators []ops.Operator) { +func (l *Limit) SetInputs(operators []Operator) { l.Source = operators[0] } -func (l *Limit) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (l *Limit) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { l.Source = l.Source.AddPredicate(ctx, expr) return l } @@ -56,6 +59,10 @@ func (l *Limit) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, return l.Source.AddColumn(ctx, reuse, gb, expr) } +func (l *Limit) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + return l.Source.AddWSColumn(ctx, offset, underRoute) +} + func (l *Limit) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { return l.Source.FindCol(ctx, expr, underRoute) } @@ -68,10 +75,17 @@ func (l *Limit) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Selec return l.Source.GetSelectExprs(ctx) } -func (l *Limit) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (l *Limit) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return l.Source.GetOrdering(ctx) } func (l *Limit) ShortDescription() string { - return sqlparser.String(l.AST) + r := sqlparser.String(l.AST) + if l.Top { + r += " Top" + } + if l.Pushed { + r += " Pushed" + } + return r } diff --git a/go/vt/vtgate/planbuilder/operators/misc_routing.go b/go/vt/vtgate/planbuilder/operators/misc_routing.go index 81301f975b4..575aa7b4e9a 100644 --- a/go/vt/vtgate/planbuilder/operators/misc_routing.go +++ b/go/vt/vtgate/planbuilder/operators/misc_routing.go @@ -64,10 +64,9 @@ var ( _ Routing = (*SequenceRouting)(nil) ) -func (tr *TargetedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (tr *TargetedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = tr.keyspace rp.TargetDestination = tr.TargetDestination - return nil } func (tr *TargetedRouting) Clone() Routing { @@ -75,8 +74,8 @@ func (tr *TargetedRouting) Clone() Routing { return &newTr } -func (tr *TargetedRouting) updateRoutingLogic(_ *plancontext.PlanningContext, _ sqlparser.Expr) (Routing, error) { - return tr, nil +func (tr *TargetedRouting) updateRoutingLogic(_ *plancontext.PlanningContext, _ sqlparser.Expr) Routing { + return tr } func (tr *TargetedRouting) Cost() int { @@ -91,17 +90,16 @@ func (tr *TargetedRouting) Keyspace() *vindexes.Keyspace { return tr.keyspace } -func (n *NoneRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (n *NoneRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = n.keyspace - return nil } func (n *NoneRouting) Clone() Routing { return n } -func (n *NoneRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return n, nil +func (n *NoneRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return n } func (n *NoneRouting) Cost() int { @@ -116,9 +114,8 @@ func (n *NoneRouting) Keyspace() *vindexes.Keyspace { return n.keyspace } -func (rr *AnyShardRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (rr *AnyShardRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = rr.keyspace - return nil } func (rr *AnyShardRouting) Clone() Routing { @@ -128,8 +125,8 @@ func (rr *AnyShardRouting) Clone() Routing { } } -func (rr *AnyShardRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return rr, nil +func (rr *AnyShardRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return rr } func (rr *AnyShardRouting) Cost() int { @@ -159,16 +156,14 @@ func (rr *AnyShardRouting) AlternateInKeyspace(keyspace *vindexes.Keyspace) *Rou return nil } -func (dr *DualRouting) UpdateRoutingParams(*plancontext.PlanningContext, *engine.RoutingParameters) error { - return nil -} +func (dr *DualRouting) UpdateRoutingParams(*plancontext.PlanningContext, *engine.RoutingParameters) {} func (dr *DualRouting) Clone() Routing { return &DualRouting{} } -func (dr *DualRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return dr, nil +func (dr *DualRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return dr } func (dr *DualRouting) Cost() int { @@ -183,18 +178,17 @@ func (dr *DualRouting) Keyspace() *vindexes.Keyspace { return nil } -func (sr *SequenceRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (sr *SequenceRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Opcode = engine.Next rp.Keyspace = sr.keyspace - return nil } func (sr *SequenceRouting) Clone() Routing { return &SequenceRouting{keyspace: sr.keyspace} } -func (sr *SequenceRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) (Routing, error) { - return sr, nil +func (sr *SequenceRouting) updateRoutingLogic(*plancontext.PlanningContext, sqlparser.Expr) Routing { + return sr } func (sr *SequenceRouting) Cost() int { diff --git a/go/vt/vtgate/planbuilder/operators/offset_planning.go b/go/vt/vtgate/planbuilder/operators/offset_planning.go index 7e7be49874a..eb92cdf0920 100644 --- a/go/vt/vtgate/planbuilder/operators/offset_planning.go +++ b/go/vt/vtgate/planbuilder/operators/offset_planning.go @@ -21,56 +21,64 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) // planOffsets will walk the tree top down, adding offset information to columns in the tree for use in further optimization, -func planOffsets(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { +func planOffsets(ctx *plancontext.PlanningContext, root Operator) Operator { type offsettable interface { - planOffsets(ctx *plancontext.PlanningContext) + Operator + planOffsets(ctx *plancontext.PlanningContext) Operator } - visitor := func(in ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { - var err error + visitor := func(in Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { switch op := in.(type) { case *Horizon: - return nil, nil, vterrors.VT13001(fmt.Sprintf("should not see %T here", in)) + panic(vterrors.VT13001(fmt.Sprintf("should not see %T here", in))) case offsettable: - op.planOffsets(ctx) - } - if err != nil { - return nil, nil, err + newOp := op.planOffsets(ctx) + + if newOp == nil { + newOp = op + } + + if DebugOperatorTree { + fmt.Println("Planned offsets for:") + fmt.Println(ToTree(newOp)) + } + return newOp, nil } - return in, rewrite.SameTree, nil + return in, NoRewrite } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } -func fetchByOffset(e sqlparser.SQLNode) bool { - switch e.(type) { +// mustFetchFromInput returns true for expressions that have to be fetched from the input and cannot be evaluated +func mustFetchFromInput(ctx *plancontext.PlanningContext, e sqlparser.SQLNode) bool { + switch fun := e.(type) { case *sqlparser.ColName, sqlparser.AggrFunc: return true + case *sqlparser.FuncExpr: + return fun.Name.EqualsAnyString(ctx.VSchema.GetAggregateUDFs()) default: return false } } // useOffsets rewrites an expression to use values from the input -func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) sqlparser.Expr { +func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op Operator) sqlparser.Expr { var exprOffset *sqlparser.Offset in := op.Inputs()[0] found := func(e sqlparser.Expr, offset int) { exprOffset = sqlparser.NewOffset(offset, e) } - notFound := func(e sqlparser.Expr) error { + notFound := func(e sqlparser.Expr) { _, addToGroupBy := e.(*sqlparser.ColName) offset := in.AddColumn(ctx, true, addToGroupBy, aeWrap(e)) exprOffset = sqlparser.NewOffset(offset, e) - return nil } visitor := getOffsetRewritingVisitor(ctx, in.FindCol, found, notFound) @@ -88,27 +96,26 @@ func useOffsets(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Op return rewritten.(sqlparser.Expr) } -// addColumnsToInput adds columns needed by an operator to its input. -// This happens only when the filter expression can be retrieved as an offset from the underlying mysql. -func addColumnsToInput(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func addColumnsToInput(ctx *plancontext.PlanningContext, root Operator) Operator { + // addColumnsToInput adds columns needed by an operator to its input. + // This happens only when the filter expression can be retrieved as an offset from the underlying mysql. + addColumnsNeededByFilter := func(in Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { filter, ok := in.(*Filter) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } proj, areOnTopOfProj := filter.Source.(selectExpressions) if !areOnTopOfProj { // not much we can do here - return in, rewrite.SameTree, nil + return in, NoRewrite } addedColumns := false found := func(expr sqlparser.Expr, i int) {} - notFound := func(e sqlparser.Expr) error { + notFound := func(e sqlparser.Expr) { _, addToGroupBy := e.(*sqlparser.ColName) proj.addColumnWithoutPushing(ctx, aeWrap(e), addToGroupBy) addedColumns = true - return nil } visitor := getOffsetRewritingVisitor(ctx, proj.FindCol, found, notFound) @@ -116,22 +123,43 @@ func addColumnsToInput(ctx *plancontext.PlanningContext, root ops.Operator) (ops _ = sqlparser.CopyOnRewrite(expr, visitor, nil, ctx.SemTable.CopySemanticInfo) } if addedColumns { - return in, rewrite.NewTree("added columns because filter needs it", in), nil + return in, Rewrote("added columns because filter needs it") + } + + return in, NoRewrite + } + + // while we are out here walking the operator tree, if we find a UDF in an aggregation, we should fail + failUDFAggregation := func(in Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { + aggrOp, ok := in.(*Aggregator) + if !ok { + return in, NoRewrite + } + for _, aggr := range aggrOp.Aggregations { + if aggr.OpCode == opcode.AggregateUDF { + // we don't support UDFs in aggregation if it's still above a route + message := fmt.Sprintf("Aggregate UDF '%s' must be pushed down to MySQL", sqlparser.String(aggr.Original.Expr)) + panic(vterrors.VT12001(message)) + } } + return in, NoRewrite + } - return in, rewrite.SameTree, nil + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { + out, res := addColumnsNeededByFilter(in, semantics.EmptyTableSet(), isRoot) + failUDFAggregation(in, semantics.EmptyTableSet(), isRoot) + return out, res } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } -// addColumnsToInput adds columns needed by an operator to its input. -// This happens only when the filter expression can be retrieved as an offset from the underlying mysql. -func pullDistinctFromUNION(_ *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +// isolateDistinctFromUnion will pull out the distinct from a union operator +func isolateDistinctFromUnion(_ *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { union, ok := in.(*Union) if !ok || !union.distinct { - return in, rewrite.SameTree, nil + return in, NoRewrite } union.distinct = false @@ -140,10 +168,10 @@ func pullDistinctFromUNION(_ *plancontext.PlanningContext, root ops.Operator) (o Required: true, Source: union, } - return distinct, rewrite.NewTree("pulled out DISTINCT from union", union), nil + return distinct, Rewrote("pulled out DISTINCT from union") } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } func getOffsetRewritingVisitor( @@ -153,13 +181,9 @@ func getOffsetRewritingVisitor( // this function will be called when an expression has been found on the input found func(sqlparser.Expr, int), // if we have an expression that mush be fetched, this method will be called - notFound func(sqlparser.Expr) error, + notFound func(sqlparser.Expr), ) func(node, parent sqlparser.SQLNode) bool { - var err error return func(node, parent sqlparser.SQLNode) bool { - if err != nil { - return false - } e, ok := node.(sqlparser.Expr) if !ok { return true @@ -170,8 +194,8 @@ func getOffsetRewritingVisitor( return false } - if fetchByOffset(e) { - err = notFound(e) + if mustFetchFromInput(ctx, e) { + notFound(e) return false } diff --git a/go/vt/vtgate/planbuilder/operators/operator.go b/go/vt/vtgate/planbuilder/operators/operator.go index 4e58fca9214..f1a38974c93 100644 --- a/go/vt/vtgate/planbuilder/operators/operator.go +++ b/go/vt/vtgate/planbuilder/operators/operator.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Vitess Authors. +Copyright 2022 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ The operators go through a few phases while planning: All the post-processing - aggregations, sorting, limit etc. are at this stage contained in Horizon structs. We try to push these down under routes, and expand the ones that can't be pushed down into individual operators such as Projection, - Agreggation, Limit, etc. + Aggregation, Limit, etc. 2. Planning Once the initial plan has been fully built, we go through a number of phases. recursively running rewriters on the tree in a fixed point fashion, until we've gone @@ -36,138 +36,64 @@ The operators go through a few phases while planning: package operators import ( - "fmt" - - "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type ( - // helper type that implements Inputs() returning nil - noInputs struct{} - - // helper type that implements AddColumn() returning an error - noColumns struct{} - - // helper type that implements AddPredicate() returning an error - noPredicates struct{} -) - -// PlanQuery creates a query plan for a given SQL statement -func PlanQuery(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (result ops.Operator, err error) { - defer PanicHandler(&err) - - op, err := translateQueryToOp(ctx, stmt) - if err != nil { - return nil, err - } - - if rewrite.DebugOperatorTree { - fmt.Println("Initial tree:") - fmt.Println(ops.ToTree(op)) - } + // Operator forms the tree of operators, representing the declarative query provided. + // The operator tree is no actually runnable, it's an intermediate representation used + // while query planning + // The mental model are operators that pull data from each other, the root being the + // full query output, and the leaves are most often `Route`s, representing communication + // with one or more shards. We want to push down as much work as possible under these Routes + Operator interface { + // Clone will return a copy of this operator, protected so changed to the original will not impact the clone + Clone(inputs []Operator) Operator - if op, err = compact(ctx, op); err != nil { - return nil, err - } + // Inputs returns the inputs for this operator + Inputs() []Operator - if err = checkValid(op); err != nil { - return nil, err - } + // SetInputs changes the inputs for this op + SetInputs([]Operator) - if op, err = planQuery(ctx, op); err != nil { - return nil, err - } + // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree. + // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts, + // where data is fetched from the LHS of the join to be used in the evaluation on the RHS + // TODO: we should remove this and replace it with rewriters + AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator - _, isRoute := op.(*Route) - if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { - // If we got here, we don't have a single shard plan - return nil, ctx.SemTable.NotSingleRouteErr - } + AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) int - return op, err -} + // AddWSColumn is used to add a weight_string column to the operator + AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int -func PanicHandler(err *error) { - if r := recover(); r != nil { - badness, ok := r.(error) - if !ok { - panic(r) - } + FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int - *err = badness - } -} + GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr + GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs -// Inputs implements the Operator interface -func (noInputs) Inputs() []ops.Operator { - return nil -} + ShortDescription() string -// SetInputs implements the Operator interface -func (noInputs) SetInputs(ops []ops.Operator) { - if len(ops) > 0 { - panic("the noInputs operator does not have inputs") + GetOrdering(ctx *plancontext.PlanningContext) []OrderBy } -} - -// AddColumn implements the Operator interface -func (noColumns) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser.AliasedExpr) int { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -func (noColumns) GetColumns(*plancontext.PlanningContext) []*sqlparser.AliasedExpr { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -func (noColumns) FindCol(*plancontext.PlanningContext, sqlparser.Expr, bool) int { - panic(vterrors.VT13001("noColumns operators have no column")) -} -func (noColumns) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectExprs { - panic(vterrors.VT13001("noColumns operators have no column")) -} - -// AddPredicate implements the Operator interface -func (noPredicates) AddPredicate(*plancontext.PlanningContext, sqlparser.Expr) ops.Operator { - panic(vterrors.VT13001("the noColumns operator cannot accept predicates")) -} - -// tryTruncateColumnsAt will see if we can truncate the columns by just asking the operator to do it for us -func tryTruncateColumnsAt(op ops.Operator, truncateAt int) bool { - type columnTruncator interface { - setTruncateColumnCount(offset int) - } + // OrderBy contains the expression to used in order by and also if ordering is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. + OrderBy struct { + Inner *sqlparser.Order - truncator, ok := op.(columnTruncator) - if ok { - truncator.setTruncateColumnCount(truncateAt) - return true + // See GroupBy#SimplifiedExpr for more details about this + SimplifiedExpr sqlparser.Expr } +) - switch op := op.(type) { - case *Limit: - return tryTruncateColumnsAt(op.Source, truncateAt) - case *SubQuery: - for _, offset := range op.Vars { - if offset >= truncateAt { - return false - } - } - return tryTruncateColumnsAt(op.Outer, truncateAt) - default: - return false +// Map takes in a mapping function and applies it to both the expression in OrderBy. +func (ob OrderBy) Map(mappingFunc func(sqlparser.Expr) sqlparser.Expr) OrderBy { + return OrderBy{ + Inner: &sqlparser.Order{ + Expr: mappingFunc(ob.Inner.Expr), + Direction: ob.Inner.Direction, + }, + SimplifiedExpr: mappingFunc(ob.SimplifiedExpr), } } - -func transformColumnsToSelectExprs(ctx *plancontext.PlanningContext, op ops.Operator) sqlparser.SelectExprs { - columns := op.GetColumns(ctx) - selExprs := slice.Map(columns, func(from *sqlparser.AliasedExpr) sqlparser.SelectExpr { - return from - }) - return selExprs -} diff --git a/go/vt/vtgate/planbuilder/operators/operator_funcs.go b/go/vt/vtgate/planbuilder/operators/operator_funcs.go deleted file mode 100644 index 7f7aaff29c5..00000000000 --- a/go/vt/vtgate/planbuilder/operators/operator_funcs.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "fmt" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -// RemovePredicate is used when we turn a predicate into a plan operator, -// and the predicate needs to be removed as an AST construct -func RemovePredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr, op ops.Operator) (ops.Operator, error) { - switch op := op.(type) { - case *Route: - newSrc, err := RemovePredicate(ctx, expr, op.Source) - if err != nil { - return nil, err - } - op.Source = newSrc - return op, err - case *ApplyJoin: - isRemoved := false - deps := ctx.SemTable.RecursiveDeps(expr) - if deps.IsSolvedBy(TableID(op.LHS)) { - newSrc, err := RemovePredicate(ctx, expr, op.LHS) - if err != nil { - return nil, err - } - op.LHS = newSrc - isRemoved = true - } - - if deps.IsSolvedBy(TableID(op.RHS)) { - newSrc, err := RemovePredicate(ctx, expr, op.RHS) - if err != nil { - return nil, err - } - op.RHS = newSrc - isRemoved = true - } - - var keep []sqlparser.Expr - for _, e := range sqlparser.SplitAndExpression(nil, op.Predicate) { - if ctx.SemTable.EqualsExprWithDeps(expr, e) { - isRemoved = true - } else { - keep = append(keep, e) - } - } - - if !isRemoved { - return nil, vterrors.VT12001(fmt.Sprintf("remove '%s' predicate on cross-shard join query", sqlparser.String(expr))) - } - - op.Predicate = ctx.SemTable.AndExpressions(keep...) - return op, nil - case *Filter: - idx := -1 - for i, predicate := range op.Predicates { - if ctx.SemTable.EqualsExprWithDeps(predicate, expr) { - idx = i - } - } - if idx == -1 { - // the predicate is not here. let's remove it from our source - newSrc, err := RemovePredicate(ctx, expr, op.Source) - if err != nil { - return nil, err - } - op.Source = newSrc - return op, nil - } - if len(op.Predicates) == 1 { - // no predicates left on this operator, so we just remove it - return op.Source, nil - } - - // remove the predicate from this filter - op.Predicates = append(op.Predicates[:idx], op.Predicates[idx+1:]...) - return op, nil - - default: - return nil, vterrors.VT13001("this should not happen - tried to remove predicate from the operator table") - } -} diff --git a/go/vt/vtgate/planbuilder/operators/ops/op.go b/go/vt/vtgate/planbuilder/operators/ops/op.go deleted file mode 100644 index 1117b947814..00000000000 --- a/go/vt/vtgate/planbuilder/operators/ops/op.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ops - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" -) - -type ( - // Operator forms the tree of operators, representing the declarative query provided. - // The operator tree is no actually runnable, it's an intermediate representation used - // while query planning - // The mental model are operators that pull data from each other, the root being the - // full query output, and the leaves are most often `Route`s, representing communication - // with one or more shards. We want to push down as much work as possible under these Routes - Operator interface { - // Clone will return a copy of this operator, protected so changed to the original will not impact the clone - Clone(inputs []Operator) Operator - - // Inputs returns the inputs for this operator - Inputs() []Operator - - // SetInputs changes the inputs for this op - SetInputs([]Operator) - - // AddPredicate is used to push predicates. It pushed it as far down as is possible in the tree. - // If we encounter a join and the predicate depends on both sides of the join, the predicate will be split into two parts, - // where data is fetched from the LHS of the join to be used in the evaluation on the RHS - // TODO: we should remove this and replace it with rewriters - AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator - - AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, addToGroupBy bool, expr *sqlparser.AliasedExpr) int - - FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int - - GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr - GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs - - ShortDescription() string - - GetOrdering(ctx *plancontext.PlanningContext) []OrderBy - } - - // OrderBy contains the expression to used in order by and also if ordering is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. - OrderBy struct { - Inner *sqlparser.Order - - // See GroupBy#SimplifiedExpr for more details about this - SimplifiedExpr sqlparser.Expr - } -) - -// Map takes in a mapping function and applies it to both the expression in OrderBy. -func (ob OrderBy) Map(mappingFunc func(sqlparser.Expr) sqlparser.Expr) OrderBy { - return OrderBy{ - Inner: &sqlparser.Order{ - Expr: mappingFunc(ob.Inner.Expr), - Direction: ob.Inner.Direction, - }, - SimplifiedExpr: mappingFunc(ob.SimplifiedExpr), - } -} diff --git a/go/vt/vtgate/planbuilder/operators/ordering.go b/go/vt/vtgate/planbuilder/operators/ordering.go index b3d0310eadb..f8008022511 100644 --- a/go/vt/vtgate/planbuilder/operators/ordering.go +++ b/go/vt/vtgate/planbuilder/operators/ordering.go @@ -22,20 +22,19 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Ordering struct { - Source ops.Operator + Source Operator Offset []int WOffset []int - Order []ops.OrderBy + Order []OrderBy ResultColumns int } -func (o *Ordering) Clone(inputs []ops.Operator) ops.Operator { +func (o *Ordering) Clone(inputs []Operator) Operator { return &Ordering{ Source: inputs[0], Offset: slices.Clone(o.Offset), @@ -45,15 +44,15 @@ func (o *Ordering) Clone(inputs []ops.Operator) ops.Operator { } } -func (o *Ordering) Inputs() []ops.Operator { - return []ops.Operator{o.Source} +func (o *Ordering) Inputs() []Operator { + return []Operator{o.Source} } -func (o *Ordering) SetInputs(operators []ops.Operator) { +func (o *Ordering) SetInputs(operators []Operator) { o.Source = operators[0] } -func (o *Ordering) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (o *Ordering) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { o.Source = o.Source.AddPredicate(ctx, expr) return o } @@ -62,6 +61,10 @@ func (o *Ordering) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bo return o.Source.AddColumn(ctx, reuse, gb, expr) } +func (o *Ordering) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + return o.Source.AddWSColumn(ctx, offset, underRoute) +} + func (o *Ordering) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { return o.Source.FindCol(ctx, expr, underRoute) } @@ -74,11 +77,11 @@ func (o *Ordering) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Se return o.Source.GetSelectExprs(ctx) } -func (o *Ordering) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (o *Ordering) GetOrdering(*plancontext.PlanningContext) []OrderBy { return o.Order } -func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) { +func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) Operator { for _, order := range o.Order { offset := o.Source.AddColumn(ctx, true, false, aeWrap(order.SimplifiedExpr)) o.Offset = append(o.Offset, offset) @@ -92,10 +95,11 @@ func (o *Ordering) planOffsets(ctx *plancontext.PlanningContext) { offset = o.Source.AddColumn(ctx, true, false, aeWrap(wsExpr)) o.WOffset = append(o.WOffset, offset) } + return nil } func (o *Ordering) ShortDescription() string { - ordering := slice.Map(o.Order, func(o ops.OrderBy) string { + ordering := slice.Map(o.Order, func(o OrderBy) string { return sqlparser.String(o.SimplifiedExpr) }) return strings.Join(ordering, ", ") diff --git a/go/vt/vtgate/planbuilder/operators/phases.go b/go/vt/vtgate/planbuilder/operators/phases.go index ba13a828d0b..3864b514aa9 100644 --- a/go/vt/vtgate/planbuilder/operators/phases.go +++ b/go/vt/vtgate/planbuilder/operators/phases.go @@ -17,10 +17,12 @@ limitations under the License. package operators import ( + "io" + "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -36,6 +38,7 @@ const ( delegateAggregation addAggrOrdering cleanOutPerfDistinct + dmlWithInput subquerySettling DONE ) @@ -47,7 +50,7 @@ func (p Phase) String() string { case initialPlanning: return "initial horizon planning optimization" case pullDistinctFromUnion: - return "pull distinct from UNION1" + return "pull distinct from UNION" case delegateAggregation: return "split aggregation between vtgate and mysql" case addAggrOrdering: @@ -56,9 +59,11 @@ func (p Phase) String() string { return "optimize Distinct operations" case subquerySettling: return "settle subqueries" + case dmlWithInput: + return "expand update/delete to dml with input" + default: + panic(vterrors.VT13001("unhandled default case")) } - - return "unknown" } func (p Phase) shouldRun(s semantics.QuerySignature) bool { @@ -73,134 +78,222 @@ func (p Phase) shouldRun(s semantics.QuerySignature) bool { return s.Distinct case subquerySettling: return s.SubQueries + case dmlWithInput: + return s.DML + default: + return true } - return true } -func (p Phase) act(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func (p Phase) act(ctx *plancontext.PlanningContext, op Operator) Operator { switch p { case pullDistinctFromUnion: - return pullDistinctFromUNION(ctx, op) + return isolateDistinctFromUnion(ctx, op) case delegateAggregation: return enableDelegateAggregation(ctx, op) case addAggrOrdering: - return addOrderBysForAggregations(ctx, op) + return addOrderingForAllAggregations(ctx, op) case cleanOutPerfDistinct: return removePerformanceDistinctAboveRoute(ctx, op) case subquerySettling: - return settleSubqueries(ctx, op), nil + return settleSubqueries(ctx, op) + case dmlWithInput: + return findDMLAboveRoute(ctx, op) + default: + return op } +} + +type phaser struct { + current Phase +} - return op, nil +func (p *phaser) next(ctx *plancontext.PlanningContext) Phase { + for { + curr := p.current + if curr == DONE { + return DONE + } + + p.current++ + + if curr.shouldRun(ctx.SemTable.QuerySignature) { + return curr + } + } } -// getPhases returns the ordered phases that the planner will undergo. -// These phases ensure the appropriate collaboration between rewriters. -func getPhases(ctx *plancontext.PlanningContext) (phases []Phase) { - for p := Phase(0); p < DONE; p++ { - if p.shouldRun(ctx.SemTable.QuerySignature) { - phases = append(phases, p) +func findDMLAboveRoute(ctx *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { + switch op := in.(type) { + case *Delete: + return createDMLWithInput(ctx, op, op.Source, op.DMLCommon) + case *Update: + return createDMLWithInput(ctx, op, op.Source, op.DMLCommon) + } + return in, NoRewrite + } + + return BottomUp(root, TableID, visitor, stopAtRoute) +} + +func createDMLWithInput(ctx *plancontext.PlanningContext, op, src Operator, in *DMLCommon) (Operator, *ApplyResult) { + if len(in.Target.VTable.PrimaryKey) == 0 { + panic(vterrors.VT09015()) + } + dm := &DMLWithInput{} + var leftComp sqlparser.ValTuple + proj := newAliasedProjection(src) + dm.cols = make([][]*sqlparser.ColName, 1) + for _, col := range in.Target.VTable.PrimaryKey { + colName := sqlparser.NewColNameWithQualifier(col.String(), in.Target.Name) + ctx.SemTable.Recursive[colName] = in.Target.ID + proj.AddColumn(ctx, true, false, aeWrap(colName)) + dm.cols[0] = append(dm.cols[0], colName) + leftComp = append(leftComp, colName) + } + + dm.Source = proj + + var targetTable *Table + _ = Visit(src, func(operator Operator) error { + if tbl, ok := operator.(*Table); ok && tbl.QTable.ID == in.Target.ID { + targetTable = tbl + return io.EOF } + return nil + }) + if targetTable == nil { + panic(vterrors.VT13001("target DELETE table not found")) + } + + // optimize for case when there is only single column on left hand side. + var lhs sqlparser.Expr = leftComp + if len(leftComp) == 1 { + lhs = leftComp[0] + } + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, lhs, sqlparser.ListArg(engine.DmlVals), nil) + targetQT := targetTable.QTable + qt := &QueryTable{ + ID: targetQT.ID, + Alias: sqlparser.CloneRefOfAliasedTableExpr(targetQT.Alias), + Table: sqlparser.CloneTableName(targetQT.Table), + Predicates: []sqlparser.Expr{compExpr}, } - return + + qg := &QueryGraph{Tables: []*QueryTable{qt}} + in.Source = qg + + if in.OwnedVindexQuery != nil { + in.OwnedVindexQuery.From = sqlparser.TableExprs{targetQT.Alias} + in.OwnedVindexQuery.Where = sqlparser.NewWhere(sqlparser.WhereClause, compExpr) + in.OwnedVindexQuery.OrderBy = nil + in.OwnedVindexQuery.Limit = nil + } + dm.DML = append(dm.DML, op) + + return dm, Rewrote("changed Delete to DMLWithInput") } -func removePerformanceDistinctAboveRoute(_ *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { - return rewrite.BottomUp(op, TableID, func(innerOp ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { +func removePerformanceDistinctAboveRoute(_ *plancontext.PlanningContext, op Operator) Operator { + return BottomUp(op, TableID, func(innerOp Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { d, ok := innerOp.(*Distinct) if !ok || d.Required { - return innerOp, rewrite.SameTree, nil + return innerOp, NoRewrite } - return d.Source, rewrite.NewTree("removed distinct not required that was not pushed under route", d), nil + return d.Source, Rewrote("removed distinct not required that was not pushed under route") }, stopAtRoute) } -func enableDelegateAggregation(ctx *plancontext.PlanningContext, op ops.Operator) (ops.Operator, error) { +func enableDelegateAggregation(ctx *plancontext.PlanningContext, op Operator) Operator { return addColumnsToInput(ctx, op) } -func addOrderBysForAggregations(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +// addOrderingForAllAggregations is run we have pushed down Aggregators as far down as possible. +func addOrderingForAllAggregations(ctx *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { aggrOp, ok := in.(*Aggregator) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } - requireOrdering, err := needsOrdering(ctx, aggrOp) - if err != nil { - return nil, nil, err - } - if !requireOrdering { - return in, rewrite.SameTree, nil + requireOrdering := needsOrdering(ctx, aggrOp) + var res *ApplyResult + if requireOrdering { + addOrderingFor(aggrOp) + res = Rewrote("added ordering before aggregation") } - orderBys := slice.Map(aggrOp.Grouping, func(from GroupBy) ops.OrderBy { - return from.AsOrderBy() - }) - if aggrOp.DistinctExpr != nil { - orderBys = append(orderBys, ops.OrderBy{ - Inner: &sqlparser.Order{ - Expr: aggrOp.DistinctExpr, - }, - SimplifiedExpr: aggrOp.DistinctExpr, - }) - } - aggrOp.Source = &Ordering{ - Source: aggrOp.Source, - Order: orderBys, - } - return in, rewrite.NewTree("added ordering before aggregation", in), nil + return in, res } - return rewrite.BottomUp(root, TableID, visitor, stopAtRoute) + return BottomUp(root, TableID, visitor, stopAtRoute) +} + +func addOrderingFor(aggrOp *Aggregator) { + orderBys := slice.Map(aggrOp.Grouping, func(from GroupBy) OrderBy { + return from.AsOrderBy() + }) + if aggrOp.DistinctExpr != nil { + orderBys = append(orderBys, OrderBy{ + Inner: &sqlparser.Order{ + Expr: aggrOp.DistinctExpr, + }, + SimplifiedExpr: aggrOp.DistinctExpr, + }) + } + aggrOp.Source = &Ordering{ + Source: aggrOp.Source, + Order: orderBys, + } } -func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) (bool, error) { +func needsOrdering(ctx *plancontext.PlanningContext, in *Aggregator) bool { requiredOrder := slice.Map(in.Grouping, func(from GroupBy) sqlparser.Expr { - return from.SimplifiedExpr + return from.Inner }) if in.DistinctExpr != nil { requiredOrder = append(requiredOrder, in.DistinctExpr) } if len(requiredOrder) == 0 { - return false, nil + return false } srcOrdering := in.Source.GetOrdering(ctx) if len(srcOrdering) < len(requiredOrder) { - return true, nil + return true } for idx, gb := range requiredOrder { if !ctx.SemTable.EqualsExprWithDeps(srcOrdering[idx].SimplifiedExpr, gb) { - return true, nil + return true } } - return false, nil + return false } -func addGroupByOnRHSOfJoin(root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func addGroupByOnRHSOfJoin(root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { join, ok := in.(*ApplyJoin) if !ok { - return in, rewrite.SameTree, nil + return in, NoRewrite } return addLiteralGroupingToRHS(join) } - return rewrite.TopDown(root, TableID, visitor, stopAtRoute) + return TopDown(root, TableID, visitor, stopAtRoute) } -func addLiteralGroupingToRHS(in *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { - _ = rewrite.Visit(in.RHS, func(op ops.Operator) error { +func addLiteralGroupingToRHS(in *ApplyJoin) (Operator, *ApplyResult) { + _ = Visit(in.RHS, func(op Operator) error { aggr, isAggr := op.(*Aggregator) if !isAggr { return nil } if len(aggr.Grouping) == 0 { gb := sqlparser.NewIntLiteral(".0") - aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb, gb, aeWrap(gb))) + aggr.Grouping = append(aggr.Grouping, NewGroupBy(gb)) } return nil }) - return in, rewrite.SameTree, nil + return in, NoRewrite } diff --git a/go/vt/vtgate/planbuilder/operators/plan_query.go b/go/vt/vtgate/planbuilder/operators/plan_query.go new file mode 100644 index 00000000000..ea6b88f752d --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/plan_query.go @@ -0,0 +1,167 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package operators contains the operators used to plan queries. +/* +The operators go through a few phases while planning: +1. Initial plan + In this first pass, we build an operator tree from the incoming parsed query. + At the leaves, it will contain QueryGraphs - these are the tables in the FROM clause + that we can easily do join ordering on because they are all inner joins. + All the post-processing - aggregations, sorting, limit etc. are at this stage + contained in Horizon structs. We try to push these down under routes, and expand + the ones that can't be pushed down into individual operators such as Projection, + Agreggation, Limit, etc. +2. Planning + Once the initial plan has been fully built, we go through a number of phases. + recursively running rewriters on the tree in a fixed point fashion, until we've gone + over all phases and the tree has stop changing. +3. Offset planning + Now is the time to stop working with AST objects and transform remaining expressions being + used on top of vtgate to either offsets on inputs or evalengine expressions. +*/ +package operators + +import ( + "fmt" + "runtime" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +type ( + // helper type that implements Inputs() returning nil + noInputs struct{} + + // helper type that implements AddColumn() returning an error + noColumns struct{} + + // helper type that implements AddPredicate() returning an error + noPredicates struct{} +) + +// PlanQuery creates a query plan for a given SQL statement +func PlanQuery(ctx *plancontext.PlanningContext, stmt sqlparser.Statement) (result Operator, err error) { + defer PanicHandler(&err) + + op := translateQueryToOp(ctx, stmt) + + if DebugOperatorTree { + fmt.Println("Initial tree:") + fmt.Println(ToTree(op)) + } + + op = compact(ctx, op) + checkValid(op) + op = planQuery(ctx, op) + + _, isRoute := op.(*Route) + if !isRoute && ctx.SemTable.NotSingleRouteErr != nil { + // If we got here, we don't have a single shard plan + return nil, ctx.SemTable.NotSingleRouteErr + } + + return op, err +} + +func PanicHandler(err *error) { + if r := recover(); r != nil { + switch badness := r.(type) { + case runtime.Error: + panic(r) + case error: + *err = badness + default: + panic(r) + } + } +} + +// Inputs implements the Operator interface +func (noInputs) Inputs() []Operator { + return nil +} + +// SetInputs implements the Operator interface +func (noInputs) SetInputs(ops []Operator) { + if len(ops) > 0 { + panic("the noInputs operator does not have inputs") + } +} + +// AddColumn implements the Operator interface +func (noColumns) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser.AliasedExpr) int { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) GetColumns(*plancontext.PlanningContext) []*sqlparser.AliasedExpr { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) FindCol(*plancontext.PlanningContext, sqlparser.Expr, bool) int { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +func (noColumns) GetSelectExprs(*plancontext.PlanningContext) sqlparser.SelectExprs { + panic(vterrors.VT13001("noColumns operators have no column")) +} + +// AddPredicate implements the Operator interface +func (noPredicates) AddPredicate(*plancontext.PlanningContext, sqlparser.Expr) Operator { + panic(vterrors.VT13001("the noColumns operator cannot accept predicates")) +} + +// tryTruncateColumnsAt will see if we can truncate the columns by just asking the operator to do it for us +func tryTruncateColumnsAt(op Operator, truncateAt int) bool { + type columnTruncator interface { + setTruncateColumnCount(offset int) + } + + truncator, ok := op.(columnTruncator) + if ok { + truncator.setTruncateColumnCount(truncateAt) + return true + } + + switch op := op.(type) { + case *Limit: + return tryTruncateColumnsAt(op.Source, truncateAt) + case *SubQuery: + for _, offset := range op.Vars { + if offset >= truncateAt { + return false + } + } + return tryTruncateColumnsAt(op.Outer, truncateAt) + default: + return false + } +} + +func transformColumnsToSelectExprs(ctx *plancontext.PlanningContext, op Operator) sqlparser.SelectExprs { + columns := op.GetColumns(ctx) + selExprs := slice.Map(columns, func(from *sqlparser.AliasedExpr) sqlparser.SelectExpr { + return from + }) + return selExprs +} diff --git a/go/vt/vtgate/planbuilder/operators/projection.go b/go/vt/vtgate/planbuilder/operators/projection.go index 1c751467890..41b83c8f7fe 100644 --- a/go/vt/vtgate/planbuilder/operators/projection.go +++ b/go/vt/vtgate/planbuilder/operators/projection.go @@ -25,8 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -34,7 +32,7 @@ import ( // Projection is used when we need to evaluate expressions on the vtgate // It uses the evalengine to accomplish its goal type Projection struct { - Source ops.Operator + Source Operator // Columns contain the expressions as viewed from the outside of this operator Columns ProjCols @@ -79,7 +77,6 @@ type ( ProjCols interface { GetColumns() []*sqlparser.AliasedExpr GetSelectExprs() sqlparser.SelectExprs - AddColumn(*sqlparser.AliasedExpr) (ProjCols, int, error) } // Used when there are stars in the expressions that we were unable to expand @@ -128,7 +125,7 @@ func newProjExprWithInner(ae *sqlparser.AliasedExpr, in sqlparser.Expr) *ProjExp } } -func newAliasedProjection(src ops.Operator) *Projection { +func newAliasedProjection(src Operator) *Projection { return &Projection{ Source: src, Columns: AliasedProjections{}, @@ -139,17 +136,13 @@ func (sp StarProjections) GetColumns() []*sqlparser.AliasedExpr { panic(vterrors.VT09015()) } -func (sp StarProjections) AddColumn(*sqlparser.AliasedExpr) (ProjCols, int, error) { - return nil, 0, vterrors.VT09015() -} - func (sp StarProjections) GetSelectExprs() sqlparser.SelectExprs { return sqlparser.SelectExprs(sp) } func (ap AliasedProjections) GetColumns() []*sqlparser.AliasedExpr { return slice.Map(ap, func(from *ProjExpr) *sqlparser.AliasedExpr { - return aeWrap(from.ColExpr) + return from.Original }) } @@ -159,14 +152,9 @@ func (ap AliasedProjections) GetSelectExprs() sqlparser.SelectExprs { }) } -func (ap AliasedProjections) AddColumn(col *sqlparser.AliasedExpr) (ProjCols, int, error) { - offset := len(ap) - return append(ap, newProjExpr(col)), offset, nil -} - func (pe *ProjExpr) String() string { var alias, expr, info string - if !pe.Original.As.IsEmpty() { + if pe.Original.As.NotEmpty() { alias = " AS " + pe.Original.As.String() } if sqlparser.Equals.Expr(pe.EvalExpr, pe.ColExpr) { @@ -194,22 +182,30 @@ var _ selectExpressions = (*Projection)(nil) // createSimpleProjection returns a projection where all columns are offsets. // used to change the name and order of the columns in the final output -func createSimpleProjection(ctx *plancontext.PlanningContext, qp *QueryProjection, src ops.Operator) (*Projection, error) { +func createSimpleProjection(ctx *plancontext.PlanningContext, selExprs []sqlparser.SelectExpr, src Operator) *Projection { p := newAliasedProjection(src) - for _, e := range qp.SelectExprs { - ae, err := e.GetAliasedExpr() - if err != nil { - return nil, err + for _, e := range selExprs { + ae, isAe := e.(*sqlparser.AliasedExpr) + if !isAe { + panic(vterrors.VT09015()) } + + if ae.As.IsEmpty() { + // if we don't have an alias, we can use the column name as the alias + // the expectation is that when users use columns without aliases, they want the column name as the alias + // for more complex expressions, we just assume they'll use column offsets instead of column names + col, ok := ae.Expr.(*sqlparser.ColName) + if ok { + ae.As = col.Name + } + } + offset := p.Source.AddColumn(ctx, true, false, ae) expr := newProjExpr(ae) expr.Info = Offset(offset) - _, err = p.addProjExpr(expr) - if err != nil { - return nil, err - } + p.addProjExpr(expr) } - return p, nil + return p } // canPush returns false if the projection has subquery expressions in it and the subqueries have not yet @@ -244,6 +240,14 @@ func (p *Projection) isDerived() bool { return p.DT != nil } +func (p *Projection) derivedName() string { + if p.DT == nil { + return "" + } + + return p.DT.Alias +} + func (p *Projection) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { ap, err := p.GetAliasedProjections() if err != nil { @@ -263,57 +267,79 @@ func (p *Projection) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Ex return -1 } -func (p *Projection) addProjExpr(pe *ProjExpr) (int, error) { +func (p *Projection) addProjExpr(pe ...*ProjExpr) int { ap, err := p.GetAliasedProjections() if err != nil { - return 0, err + panic(err) } offset := len(ap) - ap = append(ap, pe) + ap = append(ap, pe...) p.Columns = ap - return offset, nil + return offset } -func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) (int, error) { +func (p *Projection) addUnexploredExpr(ae *sqlparser.AliasedExpr, e sqlparser.Expr) int { return p.addProjExpr(newProjExprWithInner(ae, e)) } -func (p *Projection) addSubqueryExpr(ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) error { +func (p *Projection) addSubqueryExpr(ae *sqlparser.AliasedExpr, expr sqlparser.Expr, sqs ...*SubQuery) { pe := newProjExprWithInner(ae, expr) pe.Info = SubQueryExpression(sqs) - _, err := p.addProjExpr(pe) - return err + _ = p.addProjExpr(pe) } func (p *Projection) addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, _ bool) int { - column, err := p.addColumn(ctx, true, false, expr, false) - if err != nil { - panic(err) - } - return column + return p.addColumn(ctx, true, false, expr, false) } func (p *Projection) addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, _ []bool, exprs []*sqlparser.AliasedExpr) []int { offsets := make([]int, len(exprs)) for idx, expr := range exprs { - offset, err := p.addColumn(ctx, reuse, false, expr, false) - if err != nil { - panic(err) - } + offset := p.addColumn(ctx, reuse, false, expr, false) offsets[idx] = offset } return offsets } -func (p *Projection) AddColumn(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy bool, ae *sqlparser.AliasedExpr) int { - column, err := p.addColumn(ctx, reuse, addToGroupBy, ae, true) - if err != nil { - panic(err) +func (p *Projection) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + cols, aliased := p.Columns.(AliasedProjections) + if !aliased { + panic(vterrors.VT09015()) + } + + if offset >= len(cols) || offset < 0 { + panic(vterrors.VT13001(fmt.Sprintf("offset [%d] out of range [%d]", offset, len(cols)))) + } + + expr := cols[offset].EvalExpr + ws := weightStringFor(expr) + if offset := p.FindCol(ctx, ws, underRoute); offset >= 0 { + // if we already have this column, we can just return the offset + return offset + } + + aeWs := aeWrap(ws) + pe := newProjExprWithInner(aeWs, ws) + if underRoute { + return p.addProjExpr(pe) + } + + // we need to push down this column to our input + offsetOnInput := p.Source.FindCol(ctx, expr, false) + if offsetOnInput >= 0 { + // if we are not getting this from the source, we can solve this at offset planning time + inputOffset := p.Source.AddWSColumn(ctx, offsetOnInput, false) + pe.Info = Offset(inputOffset) } - return column + + return p.addProjExpr(pe) +} + +func (p *Projection) AddColumn(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy bool, ae *sqlparser.AliasedExpr) int { + return p.addColumn(ctx, reuse, addToGroupBy, ae, true) } func (p *Projection) addColumn( @@ -322,13 +348,13 @@ func (p *Projection) addColumn( addToGroupBy bool, ae *sqlparser.AliasedExpr, push bool, -) (int, error) { +) int { expr := p.DT.RewriteExpression(ctx, ae.Expr) if reuse { offset := p.FindCol(ctx, expr, false) if offset >= 0 { - return offset, nil + return offset } } @@ -337,7 +363,7 @@ func (p *Projection) addColumn( if ok { cols, ok := p.Columns.(AliasedProjections) if !ok { - return 0, vterrors.VT09015() + panic(vterrors.VT09015()) } for _, projExpr := range cols { if ctx.SemTable.EqualsExprWithDeps(ws.Expr, projExpr.ColExpr) { @@ -364,7 +390,7 @@ func (po Offset) expr() {} func (po *EvalEngine) expr() {} func (po SubQueryExpression) expr() {} -func (p *Projection) Clone(inputs []ops.Operator) ops.Operator { +func (p *Projection) Clone(inputs []Operator) Operator { return &Projection{ Source: inputs[0], Columns: p.Columns, // TODO don't think we need to deep clone here @@ -373,15 +399,15 @@ func (p *Projection) Clone(inputs []ops.Operator) ops.Operator { } } -func (p *Projection) Inputs() []ops.Operator { - return []ops.Operator{p.Source} +func (p *Projection) Inputs() []Operator { + return []Operator{p.Source} } -func (p *Projection) SetInputs(operators []ops.Operator) { +func (p *Projection) SetInputs(operators []Operator) { p.Source = operators[0] } -func (p *Projection) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (p *Projection) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { // we just pass through the predicate to our source p.Source = p.Source.AddPredicate(ctx, expr) return p @@ -399,7 +425,7 @@ func (p *Projection) GetSelectExprs(*plancontext.PlanningContext) sqlparser.Sele var output sqlparser.SelectExprs for _, pe := range cols { ae := &sqlparser.AliasedExpr{Expr: pe.EvalExpr} - if !pe.Original.As.IsEmpty() { + if pe.Original.As.NotEmpty() { ae.As = pe.Original.As } else if !sqlparser.Equals.Expr(ae.Expr, pe.Original.Expr) { ae.As = sqlparser.NewIdentifierCI(pe.Original.ColumnName()) @@ -412,24 +438,29 @@ func (p *Projection) GetSelectExprs(*plancontext.PlanningContext) sqlparser.Sele } } -func (p *Projection) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (p *Projection) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return p.Source.GetOrdering(ctx) } // AllOffsets returns a slice of integer offsets for all columns in the Projection // if all columns are of type Offset. If any column is not of type Offset, it returns nil. -func (p *Projection) AllOffsets() (cols []int) { +func (p *Projection) AllOffsets() (cols []int, colNames []string) { ap, err := p.GetAliasedProjections() if err != nil { - return nil + return nil, nil } for _, c := range ap { offset, ok := c.Info.(Offset) if !ok { - return nil + return nil, nil + } + colName := "" + if c.Original.As.NotEmpty() { + colName = c.Original.As.String() } cols = append(cols, int(offset)) + colNames = append(colNames, colName) } return } @@ -454,24 +485,24 @@ func (p *Projection) ShortDescription() string { return strings.Join(result, ", ") } -func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) Compact(ctx *plancontext.PlanningContext) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } // for projections that are not derived tables, we can check if it is safe to remove or not needed := false for i, projection := range ap { e, ok := projection.Info.(Offset) - if !ok || int(e) != i { + if !ok || int(e) != i || projection.Original.As.NotEmpty() { needed = true break } } if !needed { - return p.Source, rewrite.NewTree("removed projection only passing through the input", p), nil + return p.Source, Rewrote("removed projection only passing through the input") } switch src := p.Source.(type) { @@ -480,68 +511,66 @@ func (p *Projection) Compact(ctx *plancontext.PlanningContext) (ops.Operator, *r case *ApplyJoin: return p.compactWithJoin(ctx, src) } - return p, rewrite.SameTree, nil + return p, NoRewrite } -func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) compactWithJoin(ctx *plancontext.PlanningContext, join *ApplyJoin) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } var newColumns []int - var newColumnsAST []JoinColumn + newColumnsAST := &applyJoinColumns{} for _, col := range ap { switch colInfo := col.Info.(type) { case Offset: + if col.Original.As.NotEmpty() { + return p, NoRewrite + } newColumns = append(newColumns, join.Columns[colInfo]) - newColumnsAST = append(newColumnsAST, join.JoinColumns[colInfo]) + newColumnsAST.add(join.JoinColumns.columns[colInfo]) case nil: if !ctx.SemTable.EqualsExprWithDeps(col.EvalExpr, col.ColExpr) { // the inner expression is different from what we are presenting to the outside - this means we need to evaluate - return p, rewrite.SameTree, nil + return p, NoRewrite } - offset := slices.IndexFunc(join.JoinColumns, func(jc JoinColumn) bool { - return ctx.SemTable.EqualsExprWithDeps(jc.Original.Expr, col.ColExpr) - }) + offset := slices.IndexFunc(join.JoinColumns.columns, applyJoinCompare(ctx, col.ColExpr)) if offset < 0 { - return p, rewrite.SameTree, nil + return p, NoRewrite } if len(join.Columns) > 0 { newColumns = append(newColumns, join.Columns[offset]) } - newColumnsAST = append(newColumnsAST, join.JoinColumns[offset]) + newColumnsAST.add(join.JoinColumns.columns[offset]) default: - return p, rewrite.SameTree, nil + return p, NoRewrite } } join.Columns = newColumns join.JoinColumns = newColumnsAST - return join, rewrite.NewTree("remove projection from before join", join), nil + return join, Rewrote("remove projection from before join") } -func (p *Projection) compactWithRoute(ctx *plancontext.PlanningContext, rb *Route) (ops.Operator, *rewrite.ApplyResult, error) { +func (p *Projection) compactWithRoute(ctx *plancontext.PlanningContext, rb *Route) (Operator, *ApplyResult) { ap, err := p.GetAliasedProjections() if err != nil { - return p, rewrite.SameTree, nil + return p, NoRewrite } for i, col := range ap { offset, ok := col.Info.(Offset) if !ok || int(offset) != i { - return p, rewrite.SameTree, nil + return p, NoRewrite } } columns := rb.GetColumns(ctx) - if err != nil { - return nil, nil, err - } if len(columns) == len(ap) { - return rb, rewrite.NewTree("remove projection from before route", rb), nil + return rb, Rewrote("remove projection from before route") } rb.ResultColumns = len(columns) - return rb, rewrite.SameTree, nil + return rb, NoRewrite } // needsEvaluation finds the expression given by this argument and checks if the inside and outside expressions match @@ -561,7 +590,7 @@ func (p *Projection) needsEvaluation(ctx *plancontext.PlanningContext, e sqlpars return false } -func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) { +func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) Operator { ap, err := p.GetAliasedProjections() if err != nil { panic(err) @@ -569,7 +598,10 @@ func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) { for _, pe := range ap { switch pe.Info.(type) { - case *Offset, *EvalEngine: + case Offset: + pe.EvalExpr = useOffsets(ctx, pe.EvalExpr, p) + continue + case *EvalEngine: continue } @@ -588,6 +620,7 @@ func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) { eexpr, err := evalengine.Translate(rewritten, &evalengine.Config{ ResolveType: ctx.SemTable.TypeForExpr, Collation: ctx.SemTable.Collation, + Environment: ctx.VSchema.Environment(), }) if err != nil { panic(err) @@ -597,6 +630,7 @@ func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) { EExpr: eexpr, } } + return nil } func (p *Projection) introducesTableID() semantics.TableSet { diff --git a/go/vt/vtgate/planbuilder/operators/projection_pushing.go b/go/vt/vtgate/planbuilder/operators/projection_pushing.go new file mode 100644 index 00000000000..56f829fc7f5 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/projection_pushing.go @@ -0,0 +1,497 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "fmt" + "slices" + + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type ( + projector struct { + columns []*ProjExpr + columnAliases []string + explicitColumnAliases bool + tableName sqlparser.TableName + } +) + +// add introduces a new projection with the specified alias to the projector. +func (p *projector) add(pe *ProjExpr, alias string) { + p.columns = append(p.columns, pe) + if alias != "" && slices.Index(p.columnAliases, alias) > -1 { + panic("alias already used") + } + p.columnAliases = append(p.columnAliases, alias) +} + +// get finds or adds an expression in the projector, returning its SQL representation with the appropriate alias +func (p *projector) get(ctx *plancontext.PlanningContext, expr sqlparser.Expr) sqlparser.Expr { + for _, column := range p.columns { + if ctx.SemTable.EqualsExprWithDeps(expr, column.ColExpr) { + out := sqlparser.NewColName(column.Original.ColumnName()) + out.Qualifier = p.tableName + + ctx.SemTable.CopySemanticInfo(expr, out) + return out + } + } + + // we could not find the expression, so we add it + alias := sqlparser.UnescapedString(expr) + pe := newProjExpr(sqlparser.NewAliasedExpr(expr, alias)) + p.columns = append(p.columns, pe) + p.columnAliases = append(p.columnAliases, alias) + + out := sqlparser.NewColName(alias) + out.Qualifier = p.tableName + + ctx.SemTable.CopySemanticInfo(expr, out) + + return out +} + +// tryPushProjection attempts to optimize a projection by pushing it down in the query plan +func tryPushProjection( + ctx *plancontext.PlanningContext, + p *Projection, +) (Operator, *ApplyResult) { + switch src := p.Source.(type) { + case *Route: + return Swap(p, src, "push projection under route") + case *Limit: + return Swap(p, src, "push projection under limit") + case *ApplyJoin: + if p.FromAggr || !p.canPush(ctx) { + return p, NoRewrite + } + return pushProjectionInApplyJoin(ctx, p, src) + case *HashJoin: + if !p.canPush(ctx) { + return p, NoRewrite + } + return pushProjectionThroughHashJoin(ctx, p, src) + case *Vindex: + if !p.canPush(ctx) { + return p, NoRewrite + } + return pushProjectionInVindex(ctx, p, src) + case *SubQueryContainer: + if !p.canPush(ctx) { + return p, NoRewrite + } + return pushProjectionToOuterContainer(ctx, p, src) + case *SubQuery: + return pushProjectionToOuter(ctx, p, src) + default: + return p, NoRewrite + } +} + +// pushProjectionThroughHashJoin optimizes projection operations within a hash join +func pushProjectionThroughHashJoin(ctx *plancontext.PlanningContext, p *Projection, hj *HashJoin) (Operator, *ApplyResult) { + cols := p.Columns.(AliasedProjections) + for _, col := range cols { + if !col.isSameInAndOut(ctx) { + return p, NoRewrite + } + hj.columns.add(col.ColExpr) + } + return hj, Rewrote("merged projection into hash join") +} + +func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq *SubQuery) (Operator, *ApplyResult) { + ap, err := p.GetAliasedProjections() + if err != nil { + return p, NoRewrite + } + + if !reachedPhase(ctx, subquerySettling) { + return p, NoRewrite + } + + outer := TableID(sq.Outer) + for _, pe := range ap { + _, isOffset := pe.Info.(Offset) + if isOffset { + continue + } + + if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { + return p, NoRewrite + } + + se, ok := pe.Info.(SubQueryExpression) + if ok { + pe.EvalExpr = rewriteColNameToArgument(ctx, pe.EvalExpr, se, sq) + } + } + // all projections can be pushed to the outer + sq.Outer, p.Source = p, sq.Outer + return sq, Rewrote("push projection into outer side of subquery") +} + +func pushProjectionInVindex( + ctx *plancontext.PlanningContext, + p *Projection, + src *Vindex, +) (Operator, *ApplyResult) { + ap, err := p.GetAliasedProjections() + if err != nil { + panic(err) + } + for _, pe := range ap { + src.AddColumn(ctx, true, false, aeWrap(pe.EvalExpr)) + } + return src, Rewrote("push projection into vindex") +} + +func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Projection, src *SubQueryContainer) (Operator, *ApplyResult) { + ap, err := p.GetAliasedProjections() + if err != nil { + return p, NoRewrite + } + + outer := TableID(src.Outer) + for _, pe := range ap { + _, isOffset := pe.Info.(Offset) + if isOffset { + continue + } + + if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { + return p, NoRewrite + } + + if se, ok := pe.Info.(SubQueryExpression); ok { + pe.EvalExpr = rewriteColNameToArgument(ctx, pe.EvalExpr, se, src.Inner...) + } + } + // all projections can be pushed to the outer + src.Outer, p.Source = p, src.Outer + return src, Rewrote("push projection into outer side of subquery container") +} + +// nullInNullOutExpr returns true if the expression will return NULL if any of its inputs are NULL +// When we are evaluating an ApplyJoin, the expressions that have any dependency on the outer side of the join +// will be sent to the outer side of the join. If the expression is null intolerant, then we can push it down, +// and the result would be NULL for missing matches from the outer side. If the expression is something that can +// return values other than NULL, like `COALESCE(tbl.foo, 'bar')`, then we can't push it down, because we would +// get a different result if the outer side is missing. +func nullInNullOutExpr(expr sqlparser.Expr) bool { + // TODO: This is a very basic implementation. We should expand this to handle more cases. + switch expr.(type) { + case *sqlparser.ColName: + return true + default: + return false + } +} + +// pushProjectionInApplyJoin optimizes the ApplyJoin operation by pushing down the projection operation into it. This function works as follows: +// +// 1. It traverses each input column of the projection operation. +// 2. For each column, it generates new JoinPredicates for the ApplyJoin operation. These predicates are derived from the column's expression. +/* +Here's an ASCII representation of the transformation: + Before: + Projection[L.colX, R.colY] + | + ApplyJoin + / \ + LHS RHS + After: + ApplyJoin + / \ + Projection[L.colX] Projection[R.colY] + | | + LHS RHS +*/ +// In the transformed state, if necessary, new Projection operators are created for the left and right children of the ApplyJoin operation. +// These Projections can then hopefully be pushed down under a Route or Limit operation. +func pushProjectionInApplyJoin( + ctx *plancontext.PlanningContext, + p *Projection, + src *ApplyJoin, +) (Operator, *ApplyResult) { + ap, err := p.GetAliasedProjections() + if err != nil { + // we can't push down expression evaluation to the rhs if we are not sure if it will even be executed + return p, NoRewrite + } + if IsOuter(src) { + // for outer joins, we have to check that we can send down the projection to the rhs + for _, expr := range ap.GetColumns() { + if !nullInNullOutExpr(expr.Expr) { + return p, NoRewrite + } + } + } + + lhs, rhs := &projector{}, &projector{} + if p.DT != nil && len(p.DT.Columns) > 0 { + lhs.explicitColumnAliases = true + rhs.explicitColumnAliases = true + } + + // We store the original join columns to reuse them. + originalJoinColumns := src.JoinColumns + src.JoinColumns = &applyJoinColumns{} + for idx, pe := range ap { + // First we check if we have already done the work to find how to push this expression. + // If we find it then we can directly use it. This is not just a performance improvement, but + // is also required for pushing a projection that is just an alias. + foundIdx := slices.IndexFunc(originalJoinColumns.columns, applyJoinCompare(ctx, pe.ColExpr)) + if foundIdx != -1 { + src.JoinColumns.add(originalJoinColumns.columns[foundIdx]) + continue + } + var alias string + if p.DT != nil && len(p.DT.Columns) > 0 { + if len(p.DT.Columns) <= idx { + panic(vterrors.VT13001("no such alias found for derived table")) + } + alias = p.DT.Columns[idx].String() + } + splitProjectionAcrossJoin(ctx, src, lhs, rhs, pe, alias, p.DT) + } + + if p.isDerived() { + exposeColumnsThroughDerivedTable(ctx, p, src, lhs, rhs) + } + + // Create and update the Projection operators for the left and right children, if needed. + src.LHS = createProjectionWithTheseColumns(ctx, src.LHS, lhs, p.DT) + src.RHS = createProjectionWithTheseColumns(ctx, src.RHS, rhs, p.DT) + + return src, Rewrote("split projection to either side of join") +} + +// splitProjectionAcrossJoin creates JoinPredicates for all projections, +// and pushes down columns as needed between the LHS and RHS of a join +func splitProjectionAcrossJoin( + ctx *plancontext.PlanningContext, + join *ApplyJoin, + lhs, rhs *projector, + pe *ProjExpr, + colAlias string, + dt *DerivedTable, +) { + switch pe.Info.(type) { + case Offset, nil: + // for offsets, we'll just treat the expression as unexplored, and later stages will handle the new offset + join.JoinColumns.add(splitUnexploredExpression(ctx, join, lhs, rhs, pe, colAlias, dt)) + case SubQueryExpression: + join.JoinColumns.add(splitSubqueryExpression(ctx, join, lhs, rhs, pe, colAlias)) + default: + panic(vterrors.VT13001(fmt.Sprintf("unknown projection type %T", pe.Info))) + } +} + +func splitSubqueryExpression( + ctx *plancontext.PlanningContext, + join *ApplyJoin, + lhs, rhs *projector, + pe *ProjExpr, + alias string, +) applyJoinColumn { + col := join.getJoinColumnFor(ctx, pe.Original, pe.ColExpr, false) + return pushDownSplitJoinCol(col, lhs, pe, alias, rhs) +} + +func splitUnexploredExpression( + ctx *plancontext.PlanningContext, + join *ApplyJoin, + lhs, rhs *projector, + pe *ProjExpr, + alias string, + dt *DerivedTable, +) applyJoinColumn { + original := sqlparser.CloneRefOfAliasedExpr(pe.Original) + expr := pe.ColExpr + + var colName *sqlparser.ColName + if dt != nil { + if !pe.isSameInAndOut(ctx) { + panic(vterrors.VT13001("derived table columns must be the same in and out")) + } + colName = sqlparser.NewColNameWithQualifier(pe.Original.ColumnName(), sqlparser.NewTableName(dt.Alias)) + ctx.SemTable.CopySemanticInfo(expr, colName) + } + + // Get a applyJoinColumn for the current expression. + col := join.getJoinColumnFor(ctx, original, expr, false) + col.DTColName = colName + + return pushDownSplitJoinCol(col, lhs, pe, alias, rhs) +} + +func pushDownSplitJoinCol(col applyJoinColumn, lhs *projector, pe *ProjExpr, alias string, rhs *projector) applyJoinColumn { + // Update the left and right child columns and names based on the applyJoinColumn type. + switch { + case col.IsPureLeft(): + lhs.add(pe, alias) + case col.IsPureRight(): + rhs.add(pe, alias) + case col.IsMixedLeftAndRight(): + for _, lhsExpr := range col.LHSExprs { + var lhsAlias string + if alias != "" { + // we need to add an explicit column alias here. let's try just the ColName as is first + lhsAlias = sqlparser.String(lhsExpr.Expr) + } + lhs.add(newProjExpr(aeWrap(lhsExpr.Expr)), lhsAlias) + } + innerPE := newProjExprWithInner(pe.Original, col.RHSExpr) + innerPE.ColExpr = col.RHSExpr + innerPE.Info = pe.Info + rhs.add(innerPE, alias) + } + return col +} + +// exposeColumnsThroughDerivedTable rewrites expressions within a join that is inside a derived table +// in order to make them accessible outside the derived table. This is necessary when swapping the +// positions of the derived table and join operation. +// +// For example, consider the input query: +// select ... from (select T1.foo from T1 join T2 on T1.id = T2.id) as t +// If we push the derived table under the join, with T1 on the LHS of the join, we need to expose +// the values of T1.id through the derived table, or they will not be accessible on the RHS. +// +// The function iterates through each join predicate, rewriting the expressions in the predicate's +// LHS expressions to include the derived table. This allows the expressions to be accessed outside +// the derived table. +func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs, rhs *projector) { + derivedTbl, err := ctx.SemTable.TableInfoFor(p.DT.TableID) + if err != nil { + panic(err) + } + derivedTblName, err := derivedTbl.Name() + if err != nil { + panic(err) + } + lhs.tableName = derivedTblName + rhs.tableName = derivedTblName + + lhsIDs := TableID(src.LHS) + rhsIDs := TableID(src.RHS) + rewriteColumnsForJoin(ctx, src.JoinPredicates.columns, lhsIDs, rhsIDs, lhs, rhs) +} + +func rewriteColumnsForJoin( + ctx *plancontext.PlanningContext, + columns []applyJoinColumn, + lhsIDs, rhsIDs semantics.TableSet, + lhs, rhs *projector, +) { + for colIdx, column := range columns { + for lhsIdx, bve := range column.LHSExprs { + // since this is on the LHSExprs, we know that dependencies are from that side of the join + column.LHSExprs[lhsIdx].Expr = lhs.get(ctx, bve.Expr) + } + if column.IsPureLeft() { + continue + } + + // The RHSExprs are the expressions on the RHS of the join, and these have already been pushed down on the RHS + // of the ApplyJoin. These expressions don't need to be exposed through the derived table, they are just + // receiving the expressions from the LHS of the join using parameters. + + var rewriteTo sqlparser.Expr + + pre := func(node, _ sqlparser.SQLNode) bool { + // We are looking for ColNames that belong to either the RHS or LHS of the join + // We'll replace these with columns being passed through the derived table + var col *sqlparser.ColName + switch node := node.(type) { + case *sqlparser.ColName: + col = node + case *sqlparser.Subquery: + return false + default: + return true + } + + deps := ctx.SemTable.RecursiveDeps(col) + + switch { + case deps.IsSolvedBy(lhsIDs): + rewriteTo = lhs.get(ctx, col) + return false + case deps.IsSolvedBy(rhsIDs): + return false + default: + return true + } + } + + post := func(cursor *sqlparser.CopyOnWriteCursor) { + if rewriteTo != nil { + cursor.Replace(rewriteTo) + rewriteTo = nil + return + } + } + newOriginal := sqlparser.CopyOnRewrite(column.Original, pre, post, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr) + column.Original = newOriginal + + columns[colIdx] = column + } +} + +// prefixColNames adds qualifier prefixes to all ColName:s. +// We want to be more explicit than the user was to make sure we never produce invalid SQL +func prefixColNames(ctx *plancontext.PlanningContext, tblName sqlparser.TableName, e sqlparser.Expr) sqlparser.Expr { + return sqlparser.CopyOnRewrite(e, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + col, ok := cursor.Node().(*sqlparser.ColName) + if !ok { + return + } + cursor.Replace(sqlparser.NewColNameWithQualifier(col.Name.String(), tblName)) + }, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr) +} + +func createProjectionWithTheseColumns( + ctx *plancontext.PlanningContext, + src Operator, + p *projector, + dt *DerivedTable, +) Operator { + if len(p.columns) == 0 { + return src + } + proj := createProjection(ctx, src, "") + proj.Columns = AliasedProjections(p.columns) + if dt != nil { + kopy := *dt + if p.explicitColumnAliases { + kopy.Columns = slice.Map(p.columnAliases, func(s string) sqlparser.IdentifierCI { + return sqlparser.NewIdentifierCI(s) + }) + } + proj.DT = &kopy + } + + return proj +} diff --git a/go/vt/vtgate/planbuilder/operators/query_planning.go b/go/vt/vtgate/planbuilder/operators/query_planning.go index e66abd02feb..f2625bcb90b 100644 --- a/go/vt/vtgate/planbuilder/operators/query_planning.go +++ b/go/vt/vtgate/planbuilder/operators/query_planning.go @@ -20,78 +20,60 @@ import ( "fmt" "io" + "vitess.io/vitess/go/vt/vtgate/engine" + + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" + "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) -type ( - projector struct { - columns []*ProjExpr - columnAliases sqlparser.Columns - explicitColumnAliases bool +func planQuery(ctx *plancontext.PlanningContext, root Operator) Operator { + var selExpr sqlparser.SelectExprs + if horizon, isHorizon := root.(*Horizon); isHorizon { + sel := sqlparser.GetFirstSelect(horizon.Query) + selExpr = sqlparser.CloneSelectExprs(sel.SelectExprs) } -) -func planQuery(ctx *plancontext.PlanningContext, root ops.Operator) (output ops.Operator, err error) { - output, err = runPhases(ctx, root) - if err != nil { - return nil, err - } + output := runPhases(ctx, root) + output = planOffsets(ctx, output) - output, err = planOffsets(ctx, output) - if err != nil { - return nil, err - } - - if rewrite.DebugOperatorTree { + if DebugOperatorTree { fmt.Println("After offset planning:") - fmt.Println(ops.ToTree(output)) + fmt.Println(ToTree(output)) } - output, err = compact(ctx, output) - if err != nil { - return nil, err - } + output = compact(ctx, output) - return addTruncationOrProjectionToReturnOutput(ctx, root, output) + return addTruncationOrProjectionToReturnOutput(ctx, selExpr, output) } // runPhases is the process of figuring out how to perform the operations in the Horizon // If we can push it under a route - done. // If we can't, we will instead expand the Horizon into // smaller operators and try to push these down as far as possible -func runPhases(ctx *plancontext.PlanningContext, root ops.Operator) (op ops.Operator, err error) { - op = root - for _, phase := range getPhases(ctx) { +func runPhases(ctx *plancontext.PlanningContext, root Operator) Operator { + op := root + + p := phaser{} + for phase := p.next(ctx); phase != DONE; phase = p.next(ctx) { ctx.CurrentPhase = int(phase) - if rewrite.DebugOperatorTree { + if DebugOperatorTree { fmt.Printf("PHASE: %s\n", phase.String()) } - op, err = phase.act(ctx, op) - if err != nil { - return nil, err - } - - op, err = runRewriters(ctx, op) - if err != nil { - return nil, err - } - - op, err = compact(ctx, op) - if err != nil { - return nil, err - } + op = phase.act(ctx, op) + op = runRewriters(ctx, op) + op = compact(ctx, op) } return addGroupByOnRHSOfJoin(op) } -func runRewriters(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Operator, error) { - visitor := func(in ops.Operator, _ semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func runRewriters(ctx *plancontext.PlanningContext, root Operator) Operator { + visitor := func(in Operator, _ semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { switch in := in.(type) { case *Horizon: return pushOrExpandHorizon(ctx, in) @@ -100,7 +82,7 @@ func runRewriters(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Oper case *Projection: return tryPushProjection(ctx, in) case *Limit: - return tryPushLimit(in) + return tryPushLimit(ctx, in) case *Ordering: return tryPushOrdering(ctx, in) case *Aggregator: @@ -117,24 +99,71 @@ func runRewriters(ctx *plancontext.PlanningContext, root ops.Operator) (ops.Oper return optimizeQueryGraph(ctx, in) case *LockAndComment: return pushLockAndComment(in) + case *Delete: + return tryPushDelete(in) + case *Update: + return tryPushUpdate(in) default: - return in, rewrite.SameTree, nil + return in, NoRewrite } } - return rewrite.FixedPointBottomUp(root, TableID, visitor, stopAtRoute) + return FixedPointBottomUp(root, TableID, visitor, stopAtRoute) } -func pushLockAndComment(l *LockAndComment) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushDelete(in *Delete) (Operator, *ApplyResult) { + if src, ok := in.Source.(*Route); ok { + return pushDMLUnderRoute(in, src, "pushed delete under route") + } + return in, NoRewrite +} + +func tryPushUpdate(in *Update) (Operator, *ApplyResult) { + if src, ok := in.Source.(*Route); ok { + return pushDMLUnderRoute(in, src, "pushed update under route") + } + return in, NoRewrite +} + +func pushDMLUnderRoute(in Operator, src *Route, msg string) (Operator, *ApplyResult) { + switch r := src.Routing.(type) { + case *SequenceRouting: + // Sequences are just unsharded routes + src.Routing = &AnyShardRouting{ + keyspace: r.keyspace, + } + case *AnyShardRouting: + // References would have an unsharded source + // Alternates are not required. + r.Alternates = nil + } + return Swap(in, src, msg) +} + +func pushLockAndComment(l *LockAndComment) (Operator, *ApplyResult) { switch src := l.Source.(type) { case *Horizon, *QueryGraph: // we want to wait until the horizons have been pushed under a route or expanded // that way we know that we've replaced the QueryGraphs with Routes - return l, rewrite.SameTree, nil + return l, NoRewrite case *Route: src.Comments = l.Comments - src.Lock = l.Lock - return src, rewrite.NewTree("put lock and comment into route", l), nil + src.Lock = l.Lock.GetHighestOrderLock(src.Lock) + return src, Rewrote("put lock and comment into route") + case *SubQueryContainer: + src.Outer = &LockAndComment{ + Source: src.Outer, + Comments: l.Comments, + Lock: l.Lock, + } + for _, sq := range src.Inner { + sq.Subquery = &LockAndComment{ + Source: sq.Subquery, + Comments: l.Comments, + Lock: l.Lock, + } + } + return src, Rewrote("push lock and comment into subquery container") default: inputs := src.Inputs() for i, op := range inputs { @@ -145,23 +174,20 @@ func pushLockAndComment(l *LockAndComment) (ops.Operator, *rewrite.ApplyResult, } } src.SetInputs(inputs) - return src, rewrite.NewTree("pushed down lock and comments", l), nil + return src, Rewrote("pushed down lock and comments") } } -func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (Operator, *ApplyResult) { if in.IsDerived() { - newOp, result, err := pushDerived(ctx, in) - if err != nil { - return nil, nil, err - } - if result != rewrite.SameTree { - return newOp, result, nil + newOp, result := pushDerived(ctx, in) + if result != NoRewrite { + return newOp, result } } if !reachedPhase(ctx, initialPlanning) { - return in, rewrite.SameTree, nil + return in, NoRewrite } if ctx.SemTable.QuerySignature.SubQueries { @@ -170,15 +196,12 @@ func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Ope rb, isRoute := in.src().(*Route) if isRoute && rb.IsSingleShard() { - return rewrite.Swap(in, rb, "push horizon into route") + return Swap(in, rb, "push horizon into route") } sel, isSel := in.selectStatement().(*sqlparser.Select) - qp, err := in.getQP(ctx) - if err != nil { - return nil, nil, err - } + qp := in.getQP(ctx) needsOrdering := len(qp.OrderExprs) > 0 hasHaving := isSel && sel.Having != nil @@ -191,376 +214,167 @@ func pushOrExpandHorizon(ctx *plancontext.PlanningContext, in *Horizon) (ops.Ope in.selectStatement().GetLimit() == nil if canPush { - return rewrite.Swap(in, rb, "push horizon into route") + return Swap(in, rb, "push horizon into route") } return expandHorizon(ctx, in) } -func tryPushProjection( - ctx *plancontext.PlanningContext, - p *Projection, -) (ops.Operator, *rewrite.ApplyResult, error) { - switch src := p.Source.(type) { +func tryPushLimit(ctx *plancontext.PlanningContext, in *Limit) (Operator, *ApplyResult) { + switch src := in.Source.(type) { case *Route: - return rewrite.Swap(p, src, "push projection under route") + return tryPushingDownLimitInRoute(ctx, in, src) + case *Aggregator: + return in, NoRewrite case *ApplyJoin: - if p.FromAggr || !p.canPush(ctx) { - return p, rewrite.SameTree, nil - } - return pushProjectionInApplyJoin(ctx, p, src) - case *Vindex: - if !p.canPush(ctx) { - return p, rewrite.SameTree, nil - } - return pushProjectionInVindex(ctx, p, src) - case *SubQueryContainer: - if !p.canPush(ctx) { - return p, rewrite.SameTree, nil - } - return pushProjectionToOuterContainer(ctx, p, src) - case *SubQuery: - return pushProjectionToOuter(ctx, p, src) - case *Limit: - return rewrite.Swap(p, src, "push projection under limit") - default: - return p, rewrite.SameTree, nil - } -} - -func pushProjectionToOuter(ctx *plancontext.PlanningContext, p *Projection, sq *SubQuery) (ops.Operator, *rewrite.ApplyResult, error) { - ap, err := p.GetAliasedProjections() - if err != nil { - return p, rewrite.SameTree, nil - } - - if !reachedPhase(ctx, subquerySettling) || err != nil { - return p, rewrite.SameTree, nil - } - - outer := TableID(sq.Outer) - for _, pe := range ap { - _, isOffset := pe.Info.(*Offset) - if isOffset { - continue + if in.Pushed { + // This is the Top limit, and it's already pushed down + return in, NoRewrite } - - if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { - return p, rewrite.SameTree, nil + side := "RHS" + src.RHS = createPushedLimit(ctx, src.RHS, in) + if IsOuter(src) { + // for outer joins, we are guaranteed that all rows from the LHS will be returned, + // so we can push down the LIMIT to the LHS + src.LHS = createPushedLimit(ctx, src.LHS, in) + side = "RHS and LHS" } - se, ok := pe.Info.(SubQueryExpression) - if ok { - pe.EvalExpr = rewriteColNameToArgument(ctx, pe.EvalExpr, se, sq) + if in.Top { + in.Pushed = true + return in, Rewrote(fmt.Sprintf("add limit to %s of apply join", side)) } - } - // all projections can be pushed to the outer - sq.Outer, p.Source = p, sq.Outer - return sq, rewrite.NewTree("push projection into outer side of subquery", p), nil -} - -func pushProjectionInVindex( - ctx *plancontext.PlanningContext, - p *Projection, - src *Vindex, -) (ops.Operator, *rewrite.ApplyResult, error) { - ap, err := p.GetAliasedProjections() - if err != nil { - return nil, nil, err - } - for _, pe := range ap { - src.AddColumn(ctx, true, false, aeWrap(pe.EvalExpr)) - } - return src, rewrite.NewTree("push projection into vindex", p), nil -} -func (p *projector) add(pe *ProjExpr, col *sqlparser.IdentifierCI) { - p.columns = append(p.columns, pe) - if col != nil { - p.columnAliases = append(p.columnAliases, *col) + return src, Rewrote(fmt.Sprintf("push limit to %s of apply join", side)) + default: + return setUpperLimit(in) } } -// pushProjectionInApplyJoin pushes down a projection operation into an ApplyJoin operation. -// It processes each input column and creates new JoinPredicates for the ApplyJoin operation based on -// the input column's expression. It also creates new Projection operators for the left and right -// children of the ApplyJoin operation, if needed. -func pushProjectionInApplyJoin( - ctx *plancontext.PlanningContext, - p *Projection, - src *ApplyJoin, -) (ops.Operator, *rewrite.ApplyResult, error) { - ap, err := p.GetAliasedProjections() - if src.LeftJoin || err != nil { - // we can't push down expression evaluation to the rhs if we are not sure if it will even be executed - return p, rewrite.SameTree, nil - } - lhs, rhs := &projector{}, &projector{} - if p.DT != nil && len(p.DT.Columns) > 0 { - lhs.explicitColumnAliases = true - rhs.explicitColumnAliases = true - } - - src.JoinColumns = nil - for idx, pe := range ap { - var col *sqlparser.IdentifierCI - if p.DT != nil && idx < len(p.DT.Columns) { - col = &p.DT.Columns[idx] - } - err := splitProjectionAcrossJoin(ctx, src, lhs, rhs, pe, col) - if err != nil { - return nil, nil, err +func createPushedLimit(ctx *plancontext.PlanningContext, src Operator, orig *Limit) Operator { + pushedLimit := sqlparser.CloneRefOfLimit(orig.AST) + if pushedLimit.Offset != nil { + // we can't push down an offset, so we need to convert it to a rowcount + // by adding it to the already existing rowcount, and then let the LIMIT running on the vtgate do the rest + // this way we can still limit the number of rows that are returned + plus := &sqlparser.BinaryExpr{ + Operator: sqlparser.PlusOp, + Left: pushedLimit.Rowcount, + Right: pushedLimit.Offset, } + pushedLimit.Rowcount = getLimitExpression(ctx, plus) + pushedLimit.Offset = nil } - - if p.isDerived() { - err := exposeColumnsThroughDerivedTable(ctx, p, src, lhs) - if err != nil { - return nil, nil, err - } - } - - // Create and update the Projection operators for the left and right children, if needed. - src.LHS, err = createProjectionWithTheseColumns(ctx, src.LHS, lhs, p.DT) - if err != nil { - return nil, nil, err - } - - src.RHS, err = createProjectionWithTheseColumns(ctx, src.RHS, rhs, p.DT) - if err != nil { - return nil, nil, err - } - - return src, rewrite.NewTree("split projection to either side of join", src), nil -} - -// splitProjectionAcrossJoin creates JoinPredicates for all projections, -// and pushes down columns as needed between the LHS and RHS of a join -func splitProjectionAcrossJoin( - ctx *plancontext.PlanningContext, - join *ApplyJoin, - lhs, rhs *projector, - pe *ProjExpr, - colAlias *sqlparser.IdentifierCI, -) error { - - // Check if the current expression can reuse an existing column in the ApplyJoin. - if _, found := canReuseColumn(ctx, join.JoinColumns, pe.EvalExpr, joinColumnToExpr); found { - return nil - } - - col, err := splitUnexploredExpression(ctx, join, lhs, rhs, pe, colAlias) - if err != nil { - return err + return &Limit{ + Source: src, + AST: pushedLimit, } - - // Add the new JoinColumn to the ApplyJoin's JoinPredicates. - join.JoinColumns = append(join.JoinColumns, col) - return nil -} - -func splitUnexploredExpression( - ctx *plancontext.PlanningContext, - join *ApplyJoin, - lhs, rhs *projector, - pe *ProjExpr, - colAlias *sqlparser.IdentifierCI, -) (JoinColumn, error) { - // Get a JoinColumn for the current expression. - col, err := join.getJoinColumnFor(ctx, pe.Original, pe.ColExpr, false) - if err != nil { - return JoinColumn{}, err - } - - // Update the left and right child columns and names based on the JoinColumn type. - switch { - case col.IsPureLeft(): - lhs.add(pe, colAlias) - case col.IsPureRight(): - rhs.add(pe, colAlias) - case col.IsMixedLeftAndRight(): - for _, lhsExpr := range col.LHSExprs { - var lhsAlias *sqlparser.IdentifierCI - if colAlias != nil { - // we need to add an explicit column alias here. let's try just the ColName as is first - ci := sqlparser.NewIdentifierCI(sqlparser.String(lhsExpr.Expr)) - lhsAlias = &ci - } - lhs.add(newProjExpr(aeWrap(lhsExpr.Expr)), lhsAlias) - } - innerPE := newProjExprWithInner(pe.Original, col.RHSExpr) - innerPE.ColExpr = col.RHSExpr - innerPE.Info = pe.Info - rhs.add(innerPE, colAlias) - } - return col, nil } -// exposeColumnsThroughDerivedTable rewrites expressions within a join that is inside a derived table -// in order to make them accessible outside the derived table. This is necessary when swapping the -// positions of the derived table and join operation. -// -// For example, consider the input query: -// select ... from (select T1.foo from T1 join T2 on T1.id = T2.id) as t -// If we push the derived table under the join, with T1 on the LHS of the join, we need to expose -// the values of T1.id through the derived table, or they will not be accessible on the RHS. -// -// The function iterates through each join predicate, rewriting the expressions in the predicate's -// LHS expressions to include the derived table. This allows the expressions to be accessed outside -// the derived table. -func exposeColumnsThroughDerivedTable(ctx *plancontext.PlanningContext, p *Projection, src *ApplyJoin, lhs *projector) error { - derivedTbl, err := ctx.SemTable.TableInfoFor(p.DT.TableID) - if err != nil { - return err +// getLimitExpression is a helper function to simplify an expression using the evalengine +// if it's not able to simplify the expression to a literal, it will return an argument expression for :__upper_limit +func getLimitExpression(ctx *plancontext.PlanningContext, expr sqlparser.Expr) sqlparser.Expr { + cfg := evalengine.Config{ + Environment: ctx.VSchema.Environment(), } - derivedTblName, err := derivedTbl.Name() + translated, err := evalengine.Translate(expr, &cfg) if err != nil { - return err + panic(vterrors.VT13001("failed to translate expression: " + err.Error())) } - for _, predicate := range src.JoinPredicates { - for idx, bve := range predicate.LHSExprs { - expr := bve.Expr - tbl, err := ctx.SemTable.TableInfoForExpr(expr) - if err != nil { - return err - } - tblName, err := tbl.Name() - if err != nil { - return err - } - - expr = semantics.RewriteDerivedTableExpression(expr, derivedTbl) - out := prefixColNames(tblName, expr) - - alias := sqlparser.UnescapedString(out) - predicate.LHSExprs[idx].Expr = sqlparser.NewColNameWithQualifier(alias, derivedTblName) - identifierCI := sqlparser.NewIdentifierCI(alias) - projExpr := newProjExprWithInner(&sqlparser.AliasedExpr{Expr: out, As: identifierCI}, out) - var colAlias *sqlparser.IdentifierCI - if lhs.explicitColumnAliases { - colAlias = &identifierCI - } - lhs.add(projExpr, colAlias) - } + _, isLit := translated.(*evalengine.Literal) + if isLit { + return translated } - return nil -} -// prefixColNames adds qualifier prefixes to all ColName:s. -// We want to be more explicit than the user was to make sure we never produce invalid SQL -func prefixColNames(tblName sqlparser.TableName, e sqlparser.Expr) sqlparser.Expr { - return sqlparser.CopyOnRewrite(e, nil, func(cursor *sqlparser.CopyOnWriteCursor) { - col, ok := cursor.Node().(*sqlparser.ColName) - if !ok { - return - } - col.Qualifier = tblName - }, nil).(sqlparser.Expr) + // we were not able to calculate the expression, so we can't push it down + // the LIMIT above us will set an argument for us that we can use here + return sqlparser.NewArgument(engine.UpperLimitStr) } -func createProjectionWithTheseColumns( - ctx *plancontext.PlanningContext, - src ops.Operator, - p *projector, - dt *DerivedTable, -) (ops.Operator, error) { - if len(p.columns) == 0 { - return src, nil - } - proj, err := createProjection(ctx, src) - if err != nil { - return nil, err +func tryPushingDownLimitInRoute(ctx *plancontext.PlanningContext, in *Limit, src *Route) (Operator, *ApplyResult) { + if src.IsSingleShardOrByDestination() { + return Swap(in, src, "push limit under route") } - proj.Columns = AliasedProjections(p.columns) - if dt != nil { - kopy := *dt - kopy.Columns = p.columnAliases - proj.DT = &kopy - } - - return proj, nil -} -func tryPushLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { - switch src := in.Source.(type) { - case *Route: - return tryPushingDownLimitInRoute(in, src) - case *Aggregator: - return in, rewrite.SameTree, nil - default: + if sqlparser.IsDMLStatement(ctx.Statement) { return setUpperLimit(in) } -} -func tryPushingDownLimitInRoute(in *Limit, src *Route) (ops.Operator, *rewrite.ApplyResult, error) { - if src.IsSingleShard() { - return rewrite.Swap(in, src, "push limit under route") + // this limit has already been pushed down, nothing to do here + if in.Pushed { + return in, NoRewrite } - return setUpperLimit(in) + // when pushing a LIMIT into a Route that is not single sharded, + // we leave a LIMIT on top of the Route, and push a LIMIT under the Route + // This way we can still limit the number of rows that are returned + // from the Route and that way minimize unneeded processing + src.Source = createPushedLimit(ctx, src.Source, in) + in.Pushed = true + + return in, Rewrote("pushed limit under route") } -func setUpperLimit(in *Limit) (ops.Operator, *rewrite.ApplyResult, error) { +func setUpperLimit(in *Limit) (Operator, *ApplyResult) { if in.Pushed { - return in, rewrite.SameTree, nil + return in, NoRewrite } in.Pushed = true - visitor := func(op ops.Operator, _ semantics.TableSet, _ bool) (ops.Operator, *rewrite.ApplyResult, error) { - return op, rewrite.SameTree, nil + visitor := func(op Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { + return op, NoRewrite } - var result *rewrite.ApplyResult - shouldVisit := func(op ops.Operator) rewrite.VisitRule { + var result *ApplyResult + shouldVisit := func(op Operator) VisitRule { switch op := op.(type) { case *Join, *ApplyJoin, *SubQueryContainer, *SubQuery: // we can't push limits down on either side - return rewrite.SkipChildren + return SkipChildren case *Route: newSrc := &Limit{ Source: op.Source, - AST: &sqlparser.Limit{Rowcount: sqlparser.NewArgument("__upper_limit")}, - Pushed: false, + AST: &sqlparser.Limit{Rowcount: sqlparser.NewArgument(engine.UpperLimitStr)}, } op.Source = newSrc - result = result.Merge(rewrite.NewTree("push limit under route", newSrc)) - return rewrite.SkipChildren + result = result.Merge(Rewrote("push upper limit under route")) + return SkipChildren default: - return rewrite.VisitChildren + return VisitChildren } } - _, err := rewrite.TopDown(in.Source, TableID, visitor, shouldVisit) - if err != nil { - return nil, nil, err - } - return in, result, nil + TopDown(in.Source, TableID, visitor, shouldVisit) + + return in, result } -func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (Operator, *ApplyResult) { switch src := in.Source.(type) { case *Route: - return rewrite.Swap(in, src, "push ordering under route") + return Swap(in, src, "push ordering under route") + case *Filter: + return Swap(in, src, "push ordering under filter") case *ApplyJoin: if canPushLeft(ctx, src, in.Order) { // ApplyJoin is stable in regard to the columns coming from the LHS, // so if all the ordering columns come from the LHS, we can push down the Ordering there src.LHS, in.Source = in, src.LHS - return src, rewrite.NewTree("push down ordering on the LHS of a join", in), nil + return src, Rewrote("push down ordering on the LHS of a join") } case *Ordering: // we'll just remove the order underneath. The top order replaces whatever was incoming in.Source = src.Source - return in, rewrite.NewTree("remove double ordering", src), nil + return in, Rewrote("remove double ordering") case *Projection: // we can move ordering under a projection if it's not introducing a column we're sorting by for _, by := range in.Order { - if !fetchByOffset(by.SimplifiedExpr) { - return in, rewrite.SameTree, nil + if !mustFetchFromInput(ctx, by.SimplifiedExpr) { + return in, NoRewrite } } - return rewrite.Swap(in, src, "push ordering under projection") + return Swap(in, src, "push ordering under projection") case *Aggregator: if !src.QP.AlignGroupByAndOrderBy(ctx) && !overlaps(ctx, in.Order, src.Grouping) { - return in, rewrite.SameTree, nil + return in, NoRewrite } return pushOrderingUnderAggr(ctx, in, src) @@ -569,30 +383,30 @@ func tryPushOrdering(ctx *plancontext.PlanningContext, in *Ordering) (ops.Operat for _, order := range in.Order { deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push ordering into outer side of subquery", in), nil + return src, Rewrote("push ordering into outer side of subquery") case *SubQuery: outerTableID := TableID(src.Outer) for _, order := range in.Order { deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push ordering into outer side of subquery", in), nil + return src, Rewrote("push ordering into outer side of subquery") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func overlaps(ctx *plancontext.PlanningContext, order []ops.OrderBy, grouping []GroupBy) bool { +func overlaps(ctx *plancontext.PlanningContext, order []OrderBy, grouping []GroupBy) bool { ordering: for _, orderBy := range order { for _, groupBy := range grouping { - if ctx.SemTable.EqualsExprWithDeps(orderBy.SimplifiedExpr, groupBy.SimplifiedExpr) { + if ctx.SemTable.EqualsExprWithDeps(orderBy.SimplifiedExpr, groupBy.Inner) { continue ordering } } @@ -602,13 +416,13 @@ ordering: return true } -func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, aggregator *Aggregator) (Operator, *ApplyResult) { // If Aggregator is a derived table, then we should rewrite the ordering before pushing. if aggregator.isDerived() { for idx, orderExpr := range order.Order { ti, err := ctx.SemTable.TableInfoFor(aggregator.DT.TableID) if err != nil { - return nil, nil, err + panic(err) } newOrderExpr := orderExpr.Map(func(expr sqlparser.Expr) sqlparser.Expr { return semantics.RewriteDerivedTableExpression(expr, ti) @@ -624,7 +438,7 @@ func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, ag used := make([]bool, len(aggregator.Grouping)) for _, orderExpr := range order.Order { for grpIdx, by := range aggregator.Grouping { - if !used[grpIdx] && ctx.SemTable.EqualsExprWithDeps(by.SimplifiedExpr, orderExpr.SimplifiedExpr) { + if !used[grpIdx] && ctx.SemTable.EqualsExprWithDeps(by.Inner, orderExpr.SimplifiedExpr) { newGrouping = append(newGrouping, by) used[grpIdx] = true } @@ -662,15 +476,15 @@ func pushOrderingUnderAggr(ctx *plancontext.PlanningContext, order *Ordering, ag order.Source = aggrSource.Source aggrSource.Source = nil // removing from plan tree aggregator.Source = order - return aggregator, rewrite.NewTree("push ordering under aggregation, removing extra ordering", aggregator), nil + return aggregator, Rewrote("push ordering under aggregation, removing extra ordering") } - return rewrite.Swap(order, aggregator, "push ordering under aggregation") + return Swap(order, aggregator, "push ordering under aggregation") } -func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.OrderBy) bool { +func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []OrderBy) bool { lhs := TableID(aj.LHS) for _, order := range order { - deps := ctx.SemTable.DirectDeps(order.Inner.Expr) + deps := ctx.SemTable.RecursiveDeps(order.Inner.Expr) if !deps.IsSolvedBy(lhs) { return false } @@ -678,9 +492,9 @@ func canPushLeft(ctx *plancontext.PlanningContext, aj *ApplyJoin, order []ops.Or return true } -func isOuterTable(op ops.Operator, ts semantics.TableSet) bool { +func isOuterTable(op Operator, ts semantics.TableSet) bool { aj, ok := op.(*ApplyJoin) - if ok && aj.LeftJoin && TableID(aj.RHS).IsOverlapping(ts) { + if ok && !aj.IsInner() && TableID(aj.RHS).IsOverlapping(ts) { return true } @@ -693,43 +507,39 @@ func isOuterTable(op ops.Operator, ts semantics.TableSet) bool { return false } -func tryPushFilter(ctx *plancontext.PlanningContext, in *Filter) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushFilter(ctx *plancontext.PlanningContext, in *Filter) (Operator, *ApplyResult) { switch src := in.Source.(type) { case *Projection: return pushFilterUnderProjection(ctx, in, src) case *Route: for _, pred := range in.Predicates { - var err error deps := ctx.SemTable.RecursiveDeps(pred) if !isOuterTable(src, deps) { // we can only update based on predicates on inner tables - src.Routing, err = src.Routing.updateRoutingLogic(ctx, pred) - if err != nil { - return nil, nil, err - } + src.Routing = src.Routing.updateRoutingLogic(ctx, pred) } } - return rewrite.Swap(in, src, "push filter into Route") + return Swap(in, src, "push filter into Route") case *SubQuery: outerTableID := TableID(src.Outer) for _, pred := range in.Predicates { deps := ctx.SemTable.RecursiveDeps(pred) if !deps.IsSolvedBy(outerTableID) { - return in, rewrite.SameTree, nil + return in, NoRewrite } } src.Outer, in.Source = in, src.Outer - return src, rewrite.NewTree("push filter to outer query in subquery container", in), nil + return src, Rewrote("push filter to outer query in subquery container") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, projection *Projection) (ops.Operator, *rewrite.ApplyResult, error) { +func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, projection *Projection) (Operator, *ApplyResult) { for _, p := range filter.Predicates { cantPush := false _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - if !fetchByOffset(node) { + if !mustFetchFromInput(ctx, node) { return true, nil } @@ -742,64 +552,64 @@ func pushFilterUnderProjection(ctx *plancontext.PlanningContext, filter *Filter, }, p) if cantPush { - return filter, rewrite.SameTree, nil + return filter, NoRewrite } } - return rewrite.Swap(filter, projection, "push filter under projection") + return Swap(filter, projection, "push filter under projection") } -func tryPushDistinct(in *Distinct) (ops.Operator, *rewrite.ApplyResult, error) { +func tryPushDistinct(in *Distinct) (Operator, *ApplyResult) { if in.Required && in.PushedPerformance { - return in, rewrite.SameTree, nil + return in, NoRewrite } switch src := in.Source.(type) { case *Route: if isDistinct(src.Source) && src.IsSingleShard() { - return src, rewrite.NewTree("distinct not needed", in), nil + return src, Rewrote("distinct not needed") } if src.IsSingleShard() || !in.Required { - return rewrite.Swap(in, src, "push distinct under route") + return Swap(in, src, "push distinct under route") } if isDistinct(src.Source) { - return in, rewrite.SameTree, nil + return in, NoRewrite } src.Source = &Distinct{Source: src.Source} in.PushedPerformance = true - return in, rewrite.NewTree("added distinct under route - kept original", src), nil + return in, Rewrote("added distinct under route - kept original") case *Distinct: src.Required = false src.PushedPerformance = false - return src, rewrite.NewTree("remove double distinct", src), nil + return src, Rewrote("remove double distinct") case *Union: for i := range src.Sources { src.Sources[i] = &Distinct{Source: src.Sources[i]} } in.PushedPerformance = true - return in, rewrite.NewTree("push down distinct under union", src), nil - case *ApplyJoin: - src.LHS = &Distinct{Source: src.LHS} - src.RHS = &Distinct{Source: src.RHS} + return in, Rewrote("push down distinct under union") + case JoinOp: + src.SetLHS(&Distinct{Source: src.GetLHS()}) + src.SetRHS(&Distinct{Source: src.GetRHS()}) in.PushedPerformance = true if in.Required { - return in, rewrite.NewTree("push distinct under join - kept original", in.Source), nil + return in, Rewrote("push distinct under join - kept original") } - return in.Source, rewrite.NewTree("push distinct under join", in.Source), nil + return in.Source, Rewrote("push distinct under join") case *Ordering: in.Source = src.Source - return in, rewrite.NewTree("remove ordering under distinct", in), nil + return in, Rewrote("remove ordering under distinct") } - return in, rewrite.SameTree, nil + return in, NoRewrite } -func isDistinct(op ops.Operator) bool { +func isDistinct(op Operator) bool { switch op := op.(type) { case *Distinct: return true @@ -814,73 +624,89 @@ func isDistinct(op ops.Operator) bool { } } -func tryPushUnion(ctx *plancontext.PlanningContext, op *Union) (ops.Operator, *rewrite.ApplyResult, error) { - if res := compactUnion(op); res != rewrite.SameTree { - return op, res, nil +func tryPushUnion(ctx *plancontext.PlanningContext, op *Union) (Operator, *ApplyResult) { + if res := compactUnion(op); res != NoRewrite { + return op, res } - var sources []ops.Operator + var sources []Operator var selects []sqlparser.SelectExprs - var err error if op.distinct { - sources, selects, err = mergeUnionInputInAnyOrder(ctx, op) + sources, selects = mergeUnionInputInAnyOrder(ctx, op) } else { - sources, selects, err = mergeUnionInputsInOrder(ctx, op) - } - if err != nil { - return nil, nil, err + sources, selects = mergeUnionInputsInOrder(ctx, op) } if len(sources) == 1 { result := sources[0].(*Route) if result.IsSingleShard() || !op.distinct { - return result, rewrite.NewTree("push union under route", op), nil + return result, Rewrote("push union under route") } return &Distinct{ Source: result, Required: true, - }, rewrite.NewTree("push union under route", op), nil + }, Rewrote("push union under route") } if len(sources) == len(op.Sources) { - return op, rewrite.SameTree, nil + return op, NoRewrite } - return newUnion(sources, selects, op.unionColumns, op.distinct), rewrite.NewTree("merge union inputs", op), nil + return newUnion(sources, selects, op.unionColumns, op.distinct), Rewrote("merge union inputs") } // addTruncationOrProjectionToReturnOutput uses the original Horizon to make sure that the output columns line up with what the user asked for -func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, oldHorizon ops.Operator, output ops.Operator) (ops.Operator, error) { - horizon, ok := oldHorizon.(*Horizon) - if !ok { - return output, nil +func addTruncationOrProjectionToReturnOutput(ctx *plancontext.PlanningContext, selExprs sqlparser.SelectExprs, output Operator) Operator { + if len(selExprs) == 0 { + return output } cols := output.GetSelectExprs(ctx) - sel := sqlparser.GetFirstSelect(horizon.Query) - if len(sel.SelectExprs) == len(cols) { - return output, nil + sizeCorrect := len(selExprs) == len(cols) || tryTruncateColumnsAt(output, len(selExprs)) + if sizeCorrect && colNamesAlign(selExprs, cols) { + return output } - if tryTruncateColumnsAt(output, len(sel.SelectExprs)) { - return output, nil + return createSimpleProjection(ctx, selExprs, output) +} + +func colNamesAlign(expected, actual sqlparser.SelectExprs) bool { + for i, seE := range expected { + switch se := seE.(type) { + case *sqlparser.AliasedExpr: + if !areColumnNamesAligned(se, actual[i]) { + return false + } + case *sqlparser.StarExpr: + actualStar, isStar := actual[i].(*sqlparser.StarExpr) + if !isStar { + panic("I DONT THINK THIS CAN HAPPEN") + } + if !sqlparser.Equals.RefOfStarExpr(se, actualStar) { + return false + } + } } + return true +} - qp, err := horizon.getQP(ctx) - if err != nil { - return nil, err +func areColumnNamesAligned(expectation *sqlparser.AliasedExpr, actual sqlparser.SelectExpr) bool { + _, isCol := expectation.Expr.(*sqlparser.ColName) + if expectation.As.IsEmpty() && !isCol { + // is the user didn't specify a name, we don't care + return true } - proj, err := createSimpleProjection(ctx, qp, output) - if err != nil { - return nil, err + actualAE, isAe := actual.(*sqlparser.AliasedExpr) + if !isAe { + panic(vterrors.VT13001("used star expression when user did not")) } - return proj, nil + return expectation.ColumnName() == actualAE.ColumnName() } -func stopAtRoute(operator ops.Operator) rewrite.VisitRule { +func stopAtRoute(operator Operator) VisitRule { _, isRoute := operator.(*Route) - return rewrite.VisitRule(!isRoute) + return VisitRule(!isRoute) } func aeWrap(e sqlparser.Expr) *sqlparser.AliasedExpr { diff --git a/go/vt/vtgate/planbuilder/operators/querygraph.go b/go/vt/vtgate/planbuilder/operators/querygraph.go index b0e6b4440be..bc731f29df6 100644 --- a/go/vt/vtgate/planbuilder/operators/querygraph.go +++ b/go/vt/vtgate/planbuilder/operators/querygraph.go @@ -20,7 +20,6 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -63,7 +62,7 @@ type ( } ) -var _ ops.Operator = (*QueryGraph)(nil) +var _ Operator = (*QueryGraph)(nil) // Introduces implements the tableIDIntroducer interface func (qg *QueryGraph) introducesTableID() semantics.TableSet { @@ -163,7 +162,7 @@ func (qg *QueryGraph) UnsolvedPredicates(_ *semantics.SemTable) []sqlparser.Expr } // Clone implements the Operator interface -func (qg *QueryGraph) Clone([]ops.Operator) ops.Operator { +func (qg *QueryGraph) Clone([]Operator) Operator { result := &QueryGraph{ Tables: nil, innerJoins: nil, @@ -176,11 +175,11 @@ func (qg *QueryGraph) Clone([]ops.Operator) ops.Operator { return result } -func (qg *QueryGraph) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (qg *QueryGraph) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } -func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (qg *QueryGraph) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { for _, e := range sqlparser.SplitAndExpression(nil, expr) { qg.collectPredicate(ctx, e) } diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection.go b/go/vt/vtgate/planbuilder/operators/queryprojection.go index b7040807300..cbacdd25e90 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection.go @@ -28,9 +28,7 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" ) type ( @@ -46,35 +44,23 @@ type ( SelectExprs []SelectExpr HasAggr bool Distinct bool + WithRollup bool groupByExprs []GroupBy - OrderExprs []ops.OrderBy - HasStar bool + OrderExprs []OrderBy // AddedColumn keeps a counter for expressions added to solve HAVING expressions the user is not selecting AddedColumn int hasCheckedAlignment bool - - // TODO Remove once all horizon planning is done on the operators - CanPushSorting bool } // GroupBy contains the expression to used in group by and also if grouping is needed at VTGate level then what the weight_string function expression to be sent down for evaluation. GroupBy struct { Inner sqlparser.Expr - // The simplified expressions is the "unaliased expression". - // In the following query, the group by has the inner expression - // `x` and the `SimplifiedExpr` is `table.col + 10`: - // select table.col + 10 as x, count(*) from tbl group by x - SimplifiedExpr sqlparser.Expr - // The index at which the user expects to see this column. Set to nil, if the user does not ask for it InnerIndex *int - // The original aliased expression that this group by is referring - aliasedExpr *sqlparser.AliasedExpr - // points to the column on the same aggregator ColOffset int WSOffset int @@ -100,12 +86,8 @@ type ( // the offsets point to columns on the same aggregator ColOffset int WSOffset int - } - AggrRewriter struct { - qp *QueryProjection - st *semantics.SemTable - Err error + SubQueryExpression []*SubQuery } ) @@ -115,7 +97,7 @@ func (aggr Aggr) NeedsWeightString(ctx *plancontext.PlanningContext) bool { func (aggr Aggr) GetTypeCollation(ctx *plancontext.PlanningContext) evalengine.Type { if aggr.Func == nil { - return evalengine.UnknownType() + return evalengine.Type{} } switch aggr.OpCode { case opcode.AggregateMin, opcode.AggregateMax, opcode.AggregateSumDistinct, opcode.AggregateCountDistinct: @@ -123,17 +105,15 @@ func (aggr Aggr) GetTypeCollation(ctx *plancontext.PlanningContext) evalengine.T return typ } - return evalengine.UnknownType() + return evalengine.Type{} } // NewGroupBy creates a new group by from the given fields. -func NewGroupBy(inner, simplified sqlparser.Expr, aliasedExpr *sqlparser.AliasedExpr) GroupBy { +func NewGroupBy(inner sqlparser.Expr) GroupBy { return GroupBy{ - Inner: inner, - SimplifiedExpr: simplified, - aliasedExpr: aliasedExpr, - ColOffset: -1, - WSOffset: -1, + Inner: inner, + ColOffset: -1, + WSOffset: -1, } } @@ -148,33 +128,13 @@ func NewAggr(opCode opcode.AggregateOpcode, f sqlparser.AggrFunc, original *sqlp } } -func (b GroupBy) AsOrderBy() ops.OrderBy { - return ops.OrderBy{ +func (b GroupBy) AsOrderBy() OrderBy { + return OrderBy{ Inner: &sqlparser.Order{ Expr: b.Inner, Direction: sqlparser.AscOrder, }, - SimplifiedExpr: b.SimplifiedExpr, - } -} - -func (b GroupBy) AsAliasedExpr() *sqlparser.AliasedExpr { - if b.aliasedExpr != nil { - return b.aliasedExpr - } - col, isColName := b.Inner.(*sqlparser.ColName) - if isColName && b.SimplifiedExpr != b.Inner { - return &sqlparser.AliasedExpr{ - Expr: b.SimplifiedExpr, - As: col.Name, - } - } - if !isColName && b.SimplifiedExpr != b.Inner { - panic("this should not happen - different inner and weighStringExpr and not a column alias") - } - - return &sqlparser.AliasedExpr{ - Expr: b.SimplifiedExpr, + SimplifiedExpr: b.Inner, } } @@ -202,121 +162,62 @@ func (s SelectExpr) GetAliasedExpr() (*sqlparser.AliasedExpr, error) { } // createQPFromSelect creates the QueryProjection for the input *sqlparser.Select -func createQPFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) (*QueryProjection, error) { +func createQPFromSelect(ctx *plancontext.PlanningContext, sel *sqlparser.Select) *QueryProjection { qp := &QueryProjection{ Distinct: sel.Distinct, } - if err := qp.addSelectExpressions(sel); err != nil { - return nil, err - } - if err := qp.addGroupBy(ctx, sel.GroupBy); err != nil { - return nil, err - } - if err := qp.addOrderBy(ctx, sel.OrderBy); err != nil { - return nil, err - } + qp.addSelectExpressions(ctx, sel) + qp.addGroupBy(ctx, sel.GroupBy) + qp.addOrderBy(ctx, sel.OrderBy) if !qp.HasAggr && sel.Having != nil { - qp.HasAggr = containsAggr(sel.Having.Expr) - } - - if err := qp.calculateDistinct(ctx); err != nil { - return nil, err - } - - return qp, nil -} - -// RewriteDown stops the walker from entering inside aggregation functions -func (ar *AggrRewriter) RewriteDown() func(sqlparser.SQLNode, sqlparser.SQLNode) bool { - return func(node, _ sqlparser.SQLNode) bool { - if ar.Err != nil { - return true - } - _, ok := node.(sqlparser.AggrFunc) - return !ok + qp.HasAggr = ContainsAggr(ctx, sel.Having.Expr) } -} + qp.calculateDistinct(ctx) -// RewriteUp will go through an expression, add aggregations to the QP, and rewrite them to use column offset -func (ar *AggrRewriter) RewriteUp() func(*sqlparser.Cursor) bool { - return func(cursor *sqlparser.Cursor) bool { - if ar.Err != nil { - return false - } - sqlNode := cursor.Node() - fExp, ok := sqlNode.(sqlparser.AggrFunc) - if !ok { - return true - } - for offset, expr := range ar.qp.SelectExprs { - ae, err := expr.GetAliasedExpr() - if err != nil { - ar.Err = err - return false - } - if ar.st.EqualsExprWithDeps(ae.Expr, fExp) { - cursor.Replace(sqlparser.NewOffset(offset, fExp)) - return true - } - } - - col := SelectExpr{ - Aggr: true, - Col: &sqlparser.AliasedExpr{Expr: fExp}, - } - ar.qp.HasAggr = true - cursor.Replace(sqlparser.NewOffset(len(ar.qp.SelectExprs), fExp)) - ar.qp.SelectExprs = append(ar.qp.SelectExprs, col) - ar.qp.AddedColumn++ - - return true - } -} - -// AggrRewriter extracts -func (qp *QueryProjection) AggrRewriter(ctx *plancontext.PlanningContext) *AggrRewriter { - return &AggrRewriter{ - qp: qp, - st: ctx.SemTable, - } + return qp } -func (qp *QueryProjection) addSelectExpressions(sel *sqlparser.Select) error { +func (qp *QueryProjection) addSelectExpressions(ctx *plancontext.PlanningContext, sel *sqlparser.Select) { for _, selExp := range sel.SelectExprs { switch selExp := selExp.(type) { case *sqlparser.AliasedExpr: - err := checkForInvalidAggregations(selExp) - if err != nil { - return err - } col := SelectExpr{ Col: selExp, } - if containsAggr(selExp.Expr) { + if ContainsAggr(ctx, selExp.Expr) { col.Aggr = true qp.HasAggr = true } qp.SelectExprs = append(qp.SelectExprs, col) case *sqlparser.StarExpr: - qp.HasStar = true col := SelectExpr{ Col: selExp, } qp.SelectExprs = append(qp.SelectExprs, col) default: - return vterrors.VT13001(fmt.Sprintf("%T in select list", selExp)) + panic(vterrors.VT13001(fmt.Sprintf("%T in select list", selExp))) } } - return nil } -func containsAggr(e sqlparser.SQLNode) (hasAggr bool) { +func IsAggr(ctx *plancontext.PlanningContext, e sqlparser.SQLNode) bool { + switch node := e.(type) { + case sqlparser.AggrFunc: + return true + case *sqlparser.FuncExpr: + return node.Name.EqualsAnyString(ctx.VSchema.GetAggregateUDFs()) + } + + return false +} + +func ContainsAggr(ctx *plancontext.PlanningContext, e sqlparser.SQLNode) (hasAggr bool) { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { switch node.(type) { case *sqlparser.Offset: - // offsets here indicate that a possible aggregation has already been handled by an input + // offsets here indicate that a possible aggregation has already been handled by an input, // so we don't need to worry about aggregation in the original return false, nil case sqlparser.AggrFunc: @@ -324,6 +225,11 @@ func containsAggr(e sqlparser.SQLNode) (hasAggr bool) { return false, io.EOF case *sqlparser.Subquery: return false, nil + case *sqlparser.FuncExpr: + if IsAggr(ctx, node) { + hasAggr = true + return false, io.EOF + } } return true, nil @@ -332,21 +238,14 @@ func containsAggr(e sqlparser.SQLNode) (hasAggr bool) { } // createQPFromUnion creates the QueryProjection for the input *sqlparser.Union -func createQPFromUnion(ctx *plancontext.PlanningContext, union *sqlparser.Union) (*QueryProjection, error) { +func createQPFromUnion(ctx *plancontext.PlanningContext, union *sqlparser.Union) *QueryProjection { qp := &QueryProjection{} sel := sqlparser.GetFirstSelect(union) - err := qp.addSelectExpressions(sel) - if err != nil { - return nil, err - } + qp.addSelectExpressions(ctx, sel) + qp.addOrderBy(ctx, union.OrderBy) - err = qp.addOrderBy(ctx, union.OrderBy) - if err != nil { - return nil, err - } - - return qp, nil + return qp } type expressionSet struct { @@ -366,37 +265,28 @@ func (es *expressionSet) add(ctx *plancontext.PlanningContext, e sqlparser.Expr) return true } -func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy sqlparser.OrderBy) error { +func (qp *QueryProjection) addOrderBy(ctx *plancontext.PlanningContext, orderBy sqlparser.OrderBy) { canPushSorting := true es := &expressionSet{} for _, order := range orderBy { - simpleExpr, err := qp.GetSimplifiedExpr(ctx, order.Expr) - if err != nil { - return err - } - if sqlparser.IsNull(simpleExpr) { + if sqlparser.IsNull(order.Expr) { // ORDER BY null can safely be ignored continue } - if !es.add(ctx, simpleExpr) { + if !es.add(ctx, order.Expr) { continue } - qp.OrderExprs = append(qp.OrderExprs, ops.OrderBy{ - Inner: sqlparser.CloneRefOfOrder(order), - SimplifiedExpr: simpleExpr, + qp.OrderExprs = append(qp.OrderExprs, OrderBy{ + Inner: ctx.SemTable.Clone(order).(*sqlparser.Order), + SimplifiedExpr: order.Expr, }) - canPushSorting = canPushSorting && !containsAggr(simpleExpr) + canPushSorting = canPushSorting && !ContainsAggr(ctx, order.Expr) } - qp.CanPushSorting = canPushSorting - return nil } -func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) error { +func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) { if qp.Distinct && !qp.HasAggr { - distinct, err := qp.useGroupingOverDistinct(ctx) - if err != nil { - return err - } + distinct := qp.useGroupingOverDistinct(ctx) if distinct { // if order by exists with overlap with select expressions, we can use the aggregation with ordering over distinct. qp.Distinct = false @@ -412,11 +302,11 @@ func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) e } if !qp.Distinct || len(qp.groupByExprs) == 0 { - return nil + return } for _, gb := range qp.groupByExprs { - _, found := canReuseColumn(ctx, qp.SelectExprs, gb.SimplifiedExpr, func(expr SelectExpr) sqlparser.Expr { + _, found := canReuseColumn(ctx, qp.SelectExprs, gb.Inner, func(expr SelectExpr) sqlparser.Expr { getExpr, err := expr.GetExpr() if err != nil { panic(err) @@ -424,38 +314,33 @@ func (qp *QueryProjection) calculateDistinct(ctx *plancontext.PlanningContext) e return getExpr }) if !found { - return nil + return } } // since we are returning all grouping expressions, we know the results are guaranteed to be unique qp.Distinct = false - return nil } -func (qp *QueryProjection) addGroupBy(ctx *plancontext.PlanningContext, groupBy sqlparser.GroupBy) error { +func (qp *QueryProjection) addGroupBy(ctx *plancontext.PlanningContext, groupBy *sqlparser.GroupBy) { + if groupBy == nil { + return + } + qp.WithRollup = groupBy.WithRollup es := &expressionSet{} - for _, group := range groupBy { - selectExprIdx, aliasExpr := qp.FindSelectExprIndexForExpr(ctx, group) - simpleExpr, err := qp.GetSimplifiedExpr(ctx, group) - if err != nil { - return err - } - - if err = checkForInvalidGroupingExpressions(simpleExpr); err != nil { - return err - } + for _, grouping := range groupBy.Exprs { + selectExprIdx := qp.FindSelectExprIndexForExpr(ctx, grouping) + checkForInvalidGroupingExpressions(ctx, grouping) - if !es.add(ctx, simpleExpr) { + if !es.add(ctx, grouping) { continue } - groupBy := NewGroupBy(group, simpleExpr, aliasExpr) + groupBy := NewGroupBy(grouping) groupBy.InnerIndex = selectExprIdx qp.groupByExprs = append(qp.groupByExprs, groupBy) } - return nil } // GetGrouping returns a copy of the grouping parameters of the QP @@ -463,88 +348,15 @@ func (qp *QueryProjection) GetGrouping() []GroupBy { return slices.Clone(qp.groupByExprs) } -func checkForInvalidAggregations(exp *sqlparser.AliasedExpr) error { - return sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - aggrFunc, isAggregate := node.(sqlparser.AggrFunc) - if !isAggregate { - return true, nil - } - args := aggrFunc.GetArgs() - if args != nil && len(args) != 1 { - return false, vterrors.VT03001(sqlparser.String(node)) - } - return true, nil - - }, exp.Expr) -} - func (qp *QueryProjection) isExprInGroupByExprs(ctx *plancontext.PlanningContext, expr sqlparser.Expr) bool { for _, groupByExpr := range qp.groupByExprs { - if ctx.SemTable.EqualsExprWithDeps(groupByExpr.SimplifiedExpr, expr) { + if ctx.SemTable.EqualsExprWithDeps(groupByExpr.Inner, expr) { return true } } return false } -// GetSimplifiedExpr takes an expression used in ORDER BY or GROUP BY, and returns an expression that is simpler to evaluate -func (qp *QueryProjection) GetSimplifiedExpr(ctx *plancontext.PlanningContext, e sqlparser.Expr) (found sqlparser.Expr, err error) { - if qp == nil { - return e, nil - } - // If the ORDER BY is against a column alias, we need to remember the expression - // behind the alias. The weightstring(.) calls needs to be done against that expression and not the alias. - // Eg - select music.foo as bar, weightstring(music.foo) from music order by bar - - in, isColName := e.(*sqlparser.ColName) - if !(isColName && in.Qualifier.IsEmpty()) { - // we are only interested in unqualified column names. if it's not a column name and not unqualified, we're done - return e, nil - } - - check := func(e sqlparser.Expr) error { - if found != nil && !ctx.SemTable.EqualsExprWithDeps(found, e) { - return &semantics.AmbiguousColumnError{Column: sqlparser.String(in)} - } - found = e - return nil - } - - for _, selectExpr := range qp.SelectExprs { - ae, ok := selectExpr.Col.(*sqlparser.AliasedExpr) - if !ok { - continue - } - aliased := !ae.As.IsEmpty() - if aliased { - if in.Name.Equal(ae.As) { - err = check(ae.Expr) - if err != nil { - return nil, err - } - } - } else { - seCol, ok := ae.Expr.(*sqlparser.ColName) - if !ok { - continue - } - if seCol.Name.Equal(in.Name) { - // If the column name matches, we have a match, even if the table name is not listed - err = check(ae.Expr) - if err != nil { - return nil, err - } - } - } - } - - if found == nil { - found = e - } - - return found, nil -} - // toString should only be used for tests func (qp *QueryProjection) toString() string { type output struct { @@ -585,83 +397,6 @@ func (qp *QueryProjection) NeedsAggregation() bool { return qp.HasAggr || len(qp.groupByExprs) > 0 } -// NeedsProjecting returns true if we have projections that need to be evaluated at the vtgate level -// and can't be pushed down to MySQL -func (qp *QueryProjection) NeedsProjecting( - ctx *plancontext.PlanningContext, - pusher func(expr *sqlparser.AliasedExpr) (int, error), -) (needsVtGateEval bool, expressions []sqlparser.Expr, colNames []string, err error) { - for _, se := range qp.SelectExprs { - var ae *sqlparser.AliasedExpr - ae, err = se.GetAliasedExpr() - if err != nil { - return false, nil, nil, err - } - - expr := ae.Expr - colNames = append(colNames, ae.ColumnName()) - - if _, isCol := expr.(*sqlparser.ColName); isCol { - offset, err := pusher(ae) - if err != nil { - return false, nil, nil, err - } - expressions = append(expressions, sqlparser.NewOffset(offset, expr)) - continue - } - - stopOnError := func(sqlparser.SQLNode, sqlparser.SQLNode) bool { - return err == nil - } - rewriter := func(cursor *sqlparser.CopyOnWriteCursor) { - col, isCol := cursor.Node().(*sqlparser.ColName) - if !isCol { - return - } - var tableInfo semantics.TableInfo - tableInfo, err = ctx.SemTable.TableInfoForExpr(col) - if err != nil { - return - } - dt, isDT := tableInfo.(*semantics.DerivedTable) - if !isDT { - return - } - - rewritten := semantics.RewriteDerivedTableExpression(col, dt) - if containsAggr(rewritten) { - offset, tErr := pusher(&sqlparser.AliasedExpr{Expr: col}) - if tErr != nil { - err = tErr - return - } - cursor.Replace(sqlparser.NewOffset(offset, col)) - } - } - newExpr := sqlparser.CopyOnRewrite(expr, stopOnError, rewriter, nil) - - if err != nil { - return - } - - if newExpr != expr { - // if we changed the expression, it means that we have to evaluate the rest at the vtgate level - expressions = append(expressions, newExpr.(sqlparser.Expr)) - needsVtGateEval = true - continue - } - - // we did not need to push any parts of this expression down. Let's check if we can push all of it - offset, err := pusher(ae) - if err != nil { - return false, nil, nil, err - } - expressions = append(expressions, sqlparser.NewOffset(offset, expr)) - } - - return -} - func (qp *QueryProjection) onlyAggr() bool { if !qp.HasAggr { return false @@ -685,41 +420,29 @@ func (qp *QueryProjection) NeedsDistinct() bool { return true } -func (qp *QueryProjection) AggregationExpressions(ctx *plancontext.PlanningContext, allowComplexExpression bool) (out []Aggr, complex bool, err error) { -orderBy: - for _, orderExpr := range qp.OrderExprs { - orderExpr := orderExpr.SimplifiedExpr - for _, expr := range qp.SelectExprs { - col, ok := expr.Col.(*sqlparser.AliasedExpr) - if !ok { - continue - } - if ctx.SemTable.EqualsExprWithDeps(col.Expr, orderExpr) { - continue orderBy // we found the expression we were looking for! - } - } - qp.SelectExprs = append(qp.SelectExprs, SelectExpr{ - Col: &sqlparser.AliasedExpr{Expr: orderExpr}, - Aggr: containsAggr(orderExpr), - }) - qp.AddedColumn++ +func (qp *QueryProjection) AggregationExpressions(ctx *plancontext.PlanningContext, allowComplexExpression bool) (out []Aggr, complex bool) { + qp.addOrderByToSelect(ctx) + addAggr := func(a Aggr) { + out = append(out, a) + } + makeComplex := func() { + complex = true } - // Here we go over the expressions we are returning. Since we know we are aggregating, // all expressions have to be either grouping expressions or aggregate expressions. // If we find an expression that is neither, we treat is as a special aggregation function AggrRandom for idx, expr := range qp.SelectExprs { aliasedExpr, err := expr.GetAliasedExpr() if err != nil { - return nil, false, err + panic(err) } idxCopy := idx - if !containsAggr(expr.Col) { + if !ContainsAggr(ctx, expr.Col) { getExpr, err := expr.GetExpr() if err != nil { - return nil, false, err + panic(err) } if !qp.isExprInGroupByExprs(ctx, getExpr) { aggr := NewAggr(opcode.AggregateAnyValue, nil, aliasedExpr, aliasedExpr.ColumnName()) @@ -728,39 +451,78 @@ orderBy: } continue } - _, isAggregate := aliasedExpr.Expr.(sqlparser.AggrFunc) - if !isAggregate && !allowComplexExpression { - return nil, false, vterrors.VT12001("in scatter query: complex aggregate expression") + if !IsAggr(ctx, aliasedExpr.Expr) && !allowComplexExpression { + panic(vterrors.VT12001("in scatter query: complex aggregate expression")) } - sqlparser.CopyOnRewrite(aliasedExpr.Expr, func(node, parent sqlparser.SQLNode) bool { - ex, isExpr := node.(sqlparser.Expr) - if !isExpr { - return true - } - if aggr, isAggr := node.(sqlparser.AggrFunc); isAggr { - ae := aeWrap(aggr) - if aggr == aliasedExpr.Expr { - ae = aliasedExpr - } - aggrFunc := createAggrFromAggrFunc(aggr, ae) - aggrFunc.Index = &idxCopy - out = append(out, aggrFunc) - return false + sqlparser.CopyOnRewrite(aliasedExpr.Expr, qp.extractAggr(ctx, idx, aliasedExpr, addAggr, makeComplex), nil, nil) + } + return +} + +func (qp *QueryProjection) extractAggr( + ctx *plancontext.PlanningContext, + idx int, + aliasedExpr *sqlparser.AliasedExpr, + addAggr func(a Aggr), + makeComplex func(), +) func(node sqlparser.SQLNode, parent sqlparser.SQLNode) bool { + return func(node, parent sqlparser.SQLNode) bool { + ex, isExpr := node.(sqlparser.Expr) + if !isExpr { + return true + } + if aggr, isAggr := node.(sqlparser.AggrFunc); isAggr { + ae := aeWrap(aggr) + if aggr == aliasedExpr.Expr { + ae = aliasedExpr } - if containsAggr(node) { - complex = true - return true + aggrFunc := createAggrFromAggrFunc(aggr, ae) + aggrFunc.Index = &idx + addAggr(aggrFunc) + return false + } + if IsAggr(ctx, node) { + // If we are here, we have a function that is an aggregation but not parsed into an AggrFunc. + // This is the case for UDFs - we have to be careful with these because we can't evaluate them in VTGate. + aggr := NewAggr(opcode.AggregateUDF, nil, aeWrap(ex), "") + aggr.Index = &idx + addAggr(aggr) + return false + } + if ContainsAggr(ctx, node) { + makeComplex() + return true + } + if !qp.isExprInGroupByExprs(ctx, ex) { + aggr := NewAggr(opcode.AggregateAnyValue, nil, aeWrap(ex), "") + aggr.Index = &idx + addAggr(aggr) + } + return false + } +} + +func (qp *QueryProjection) addOrderByToSelect(ctx *plancontext.PlanningContext) { +orderBy: + // We need to return all columns that are being used for ordering + for _, orderExpr := range qp.OrderExprs { + orderExpr := orderExpr.SimplifiedExpr + for _, expr := range qp.SelectExprs { + col, ok := expr.Col.(*sqlparser.AliasedExpr) + if !ok { + continue } - if !qp.isExprInGroupByExprs(ctx, ex) { - aggr := NewAggr(opcode.AggregateAnyValue, nil, aeWrap(ex), "") - aggr.Index = &idxCopy - out = append(out, aggr) + if ctx.SemTable.EqualsExprWithDeps(col.Expr, orderExpr) { + continue orderBy // we found the expression we were looking for! } - return false - }, nil, nil) + } + qp.SelectExprs = append(qp.SelectExprs, SelectExpr{ + Col: &sqlparser.AliasedExpr{Expr: orderExpr}, + Aggr: ContainsAggr(ctx, orderExpr), + }) + qp.AddedColumn++ } - return } func createAggrFromAggrFunc(fnc sqlparser.AggrFunc, aliasedExpr *sqlparser.AliasedExpr) Aggr { @@ -789,7 +551,7 @@ func createAggrFromAggrFunc(fnc sqlparser.AggrFunc, aliasedExpr *sqlparser.Alias // FindSelectExprIndexForExpr returns the index of the given expression in the select expressions, if it is part of it // returns -1 otherwise. -func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (*int, *sqlparser.AliasedExpr) { +func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningContext, expr sqlparser.Expr) *int { colExpr, isCol := expr.(*sqlparser.ColName) for idx, selectExpr := range qp.SelectExprs { @@ -798,16 +560,16 @@ func (qp *QueryProjection) FindSelectExprIndexForExpr(ctx *plancontext.PlanningC continue } if isCol { - isAliasExpr := !aliasedExpr.As.IsEmpty() + isAliasExpr := aliasedExpr.As.NotEmpty() if isAliasExpr && colExpr.Name.Equal(aliasedExpr.As) { - return &idx, aliasedExpr + return &idx } } if ctx.SemTable.EqualsExprWithDeps(aliasedExpr.Expr, expr) { - return &idx, aliasedExpr + return &idx } } - return nil, nil + return nil } // OldAlignGroupByAndOrderBy TODO Remove once all of horizon planning is done on the operators @@ -829,7 +591,7 @@ func (qp *QueryProjection) OldAlignGroupByAndOrderBy(ctx *plancontext.PlanningCo used := make([]bool, len(qp.groupByExprs)) for _, orderExpr := range qp.OrderExprs { for i, groupingExpr := range qp.groupByExprs { - if !used[i] && ctx.SemTable.EqualsExpr(groupingExpr.SimplifiedExpr, orderExpr.SimplifiedExpr) { + if !used[i] && ctx.SemTable.EqualsExpr(groupingExpr.Inner, orderExpr.SimplifiedExpr) { newGrouping = append(newGrouping, groupingExpr) used[i] = true } @@ -869,7 +631,7 @@ func (qp *QueryProjection) AlignGroupByAndOrderBy(ctx *plancontext.PlanningConte outer: for _, orderBy := range qp.OrderExprs { for gidx, groupBy := range qp.groupByExprs { - if ctx.SemTable.EqualsExprWithDeps(groupBy.SimplifiedExpr, orderBy.SimplifiedExpr) { + if ctx.SemTable.EqualsExprWithDeps(groupBy.Inner, orderBy.SimplifiedExpr) { newGrouping = append(newGrouping, groupBy) used[gidx] = true continue outer @@ -900,7 +662,7 @@ func (qp *QueryProjection) GetColumnCount() int { func (qp *QueryProjection) orderByOverlapWithSelectExpr(ctx *plancontext.PlanningContext) bool { for _, expr := range qp.OrderExprs { - idx, _ := qp.FindSelectExprIndexForExpr(ctx, expr.SimplifiedExpr) + idx := qp.FindSelectExprIndexForExpr(ctx, expr.SimplifiedExpr) if idx != nil { return true } @@ -908,47 +670,43 @@ func (qp *QueryProjection) orderByOverlapWithSelectExpr(ctx *plancontext.Plannin return false } -func (qp *QueryProjection) useGroupingOverDistinct(ctx *plancontext.PlanningContext) (bool, error) { +func (qp *QueryProjection) useGroupingOverDistinct(ctx *plancontext.PlanningContext) bool { if !qp.orderByOverlapWithSelectExpr(ctx) { - return false, nil + return false } var gbs []GroupBy for idx, selExpr := range qp.SelectExprs { ae, err := selExpr.GetAliasedExpr() if err != nil { // not an alias Expr, cannot continue forward. - return false, nil - } - sExpr, err := qp.GetSimplifiedExpr(ctx, ae.Expr) - if err != nil { - return false, err + return false } // check if the grouping already exists on that column. found := slices.IndexFunc(qp.groupByExprs, func(gb GroupBy) bool { - return ctx.SemTable.EqualsExprWithDeps(gb.SimplifiedExpr, sExpr) + return ctx.SemTable.EqualsExprWithDeps(gb.Inner, ae.Expr) }) if found != -1 { continue } - groupBy := NewGroupBy(ae.Expr, sExpr, ae) + groupBy := NewGroupBy(ae.Expr) selectExprIdx := idx groupBy.InnerIndex = &selectExprIdx gbs = append(gbs, groupBy) } qp.groupByExprs = append(qp.groupByExprs, gbs...) - return true, nil + return true } -func checkForInvalidGroupingExpressions(expr sqlparser.Expr) error { - return sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { - if _, isAggregate := node.(sqlparser.AggrFunc); isAggregate { - return false, vterrors.VT03005(sqlparser.String(expr)) +func checkForInvalidGroupingExpressions(ctx *plancontext.PlanningContext, expr sqlparser.Expr) { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { + if IsAggr(ctx, node) { + panic(vterrors.VT03005(sqlparser.String(expr))) } _, isSubQ := node.(*sqlparser.Subquery) arg, isArg := node.(*sqlparser.Argument) if isSubQ || (isArg && strings.HasPrefix(arg.Name, "__sq")) { - return false, vterrors.VT12001("subqueries in GROUP BY") + panic(vterrors.VT12001("subqueries in GROUP BY")) } return true, nil }, expr) @@ -972,12 +730,12 @@ func CompareRefInt(a *int, b *int) bool { return *a < *b } -func CreateQPFromSelectStatement(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement) (*QueryProjection, error) { +func CreateQPFromSelectStatement(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement) *QueryProjection { switch sel := stmt.(type) { case *sqlparser.Select: return createQPFromSelect(ctx, sel) case *sqlparser.Union: return createQPFromUnion(ctx, sel) } - return nil, vterrors.VT13001("can only create query projection from Union and Select statements") + panic(vterrors.VT13001("can only create query projection from Union and Select statements")) } diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go deleted file mode 100644 index 7c92b716d7c..00000000000 --- a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go +++ /dev/null @@ -1,245 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package operators - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" -) - -func TestQP(t *testing.T) { - tcases := []struct { - sql string - - expErr string - expOrder []ops.OrderBy - }{ - { - sql: "select * from user", - }, - { - sql: "select 1, count(1) from user", - }, - { - sql: "select max(id) from user", - }, - { - sql: "select 1, count(1) from user order by 1", - expOrder: []ops.OrderBy{ - {Inner: &sqlparser.Order{Expr: sqlparser.NewIntLiteral("1")}, SimplifiedExpr: sqlparser.NewIntLiteral("1")}, - }, - }, - { - sql: "select id from user order by col, id, 1", - expOrder: []ops.OrderBy{ - {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("col")}, SimplifiedExpr: sqlparser.NewColName("col")}, - {Inner: &sqlparser.Order{Expr: sqlparser.NewColName("id")}, SimplifiedExpr: sqlparser.NewColName("id")}, - }, - }, - { - sql: "SELECT CONCAT(last_name,', ',first_name) AS full_name FROM mytable ORDER BY full_name", // alias in order not supported - expOrder: []ops.OrderBy{ - { - Inner: &sqlparser.Order{Expr: sqlparser.NewColName("full_name")}, - SimplifiedExpr: &sqlparser.FuncExpr{ - Name: sqlparser.NewIdentifierCI("CONCAT"), - Exprs: sqlparser.SelectExprs{ - &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("last_name")}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(", ")}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewColName("first_name")}, - }, - }, - }, - }, - }, { - sql: "select count(*) b from user group by b", - expErr: "cannot group on 'count(*)'", - }, - } - ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()} - for _, tcase := range tcases { - t.Run(tcase.sql, func(t *testing.T) { - stmt, err := sqlparser.Parse(tcase.sql) - require.NoError(t, err) - - sel := stmt.(*sqlparser.Select) - _, err = semantics.Analyze(sel, "", &semantics.FakeSI{}) - require.NoError(t, err) - - qp, err := createQPFromSelect(ctx, sel) - if tcase.expErr != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tcase.expErr) - } else { - require.NoError(t, err) - assert.Equal(t, len(sel.SelectExprs), len(qp.SelectExprs)) - require.Equal(t, len(tcase.expOrder), len(qp.OrderExprs), "not enough order expressions in QP") - for index, expOrder := range tcase.expOrder { - assert.True(t, sqlparser.Equals.SQLNode(expOrder.Inner, qp.OrderExprs[index].Inner), "want: %+v, got %+v", sqlparser.String(expOrder.Inner), sqlparser.String(qp.OrderExprs[index].Inner)) - assert.True(t, sqlparser.Equals.SQLNode(expOrder.SimplifiedExpr, qp.OrderExprs[index].SimplifiedExpr), "want: %v, got %v", sqlparser.String(expOrder.SimplifiedExpr), sqlparser.String(qp.OrderExprs[index].SimplifiedExpr)) - } - } - }) - } -} - -func TestQPSimplifiedExpr(t *testing.T) { - testCases := []struct { - query, expected string - }{ - { - query: "select intcol, count(*) from user group by 1", - expected: ` -{ - "Select": [ - "intcol", - "aggr: count(*)" - ], - "Grouping": [ - "intcol" - ], - "OrderBy": [], - "Distinct": false -}`, - }, - { - query: "select intcol, textcol from user order by 1, textcol", - expected: ` -{ - "Select": [ - "intcol", - "textcol" - ], - "Grouping": [], - "OrderBy": [ - "intcol asc", - "textcol asc" - ], - "Distinct": false -}`, - }, - { - query: "select intcol, textcol, count(id) from user group by intcol, textcol, extracol order by 2 desc", - expected: ` -{ - "Select": [ - "intcol", - "textcol", - "aggr: count(id)" - ], - "Grouping": [ - "intcol", - "textcol", - "extracol" - ], - "OrderBy": [ - "textcol desc" - ], - "Distinct": false -}`, - }, - { - query: "select distinct col1, col2 from user group by col1, col2", - expected: ` -{ - "Select": [ - "col1", - "col2" - ], - "Grouping": [], - "OrderBy": [], - "Distinct": true -}`, - }, - { - query: "select distinct count(*) from user", - expected: ` -{ - "Select": [ - "aggr: count(*)" - ], - "Grouping": [], - "OrderBy": [], - "Distinct": false -}`, - }, - } - - for _, tc := range testCases { - t.Run(tc.query, func(t *testing.T) { - ast, err := sqlparser.Parse(tc.query) - require.NoError(t, err) - sel := ast.(*sqlparser.Select) - _, err = semantics.Analyze(sel, "", &semantics.FakeSI{}) - require.NoError(t, err) - ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()} - qp, err := createQPFromSelect(ctx, sel) - require.NoError(t, err) - require.Equal(t, tc.expected[1:], qp.toString()) - }) - } -} - -func TestCompareRefInt(t *testing.T) { - one := 1 - two := 2 - tests := []struct { - name string - a *int - b *int - want bool - }{ - { - name: "1<2", - a: &one, - b: &two, - want: true, - }, { - name: "2<1", - a: &two, - b: &one, - want: false, - }, { - name: "2>>>>>>> " + message) } - return &ApplyResult{Transformations: []Rewrite{{Message: message, Op: op}}} + return &ApplyResult{Transformations: []Rewrite{{Message: message}}} } func (ar *ApplyResult) Merge(other *ApplyResult) *ApplyResult { @@ -83,13 +81,13 @@ func (ar *ApplyResult) Changed() bool { } // Visit allows for the walking of the operator tree. If any error is returned, the walk is aborted -func Visit(root ops.Operator, visitor func(ops.Operator) error) error { - _, _, err := breakableTopDown(root, func(op ops.Operator) (ops.Operator, *ApplyResult, VisitRule, error) { +func Visit(root Operator, visitor func(Operator) error) error { + _, _, err := breakableTopDown(root, func(op Operator) (Operator, *ApplyResult, VisitRule, error) { err := visitor(op) if err != nil { - return nil, SameTree, SkipChildren, err + return nil, NoRewrite, SkipChildren, err } - return op, SameTree, VisitChildren, nil + return op, NoRewrite, VisitChildren, nil }) return err } @@ -98,16 +96,13 @@ func Visit(root ops.Operator, visitor func(ops.Operator) error) error { // the given operator tree from the bottom up. Each callback [f] returns a ApplyResult that is aggregated // into a final output indicating whether the operator tree was changed. func BottomUp( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (ops.Operator, error) { - op, _, err := bottomUp(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } - return op, nil +) Operator { + op, _ := bottomUp(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) + return op } var DebugOperatorTree = false @@ -123,39 +118,24 @@ func EnableDebugPrinting() (reset func()) { // FixedPointBottomUp rewrites an operator tree much like BottomUp does, // but does the rewriting repeatedly, until a tree walk is done with no changes to the tree. func FixedPointBottomUp( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (op ops.Operator, err error) { +) (op Operator) { var id *ApplyResult op = root // will loop while the rewriting changes anything - for ok := true; ok; ok = id != SameTree { + for ok := true; ok; ok = id != NoRewrite { if DebugOperatorTree { - fmt.Println(ops.ToTree(op)) + fmt.Println("Full tree:") + fmt.Println(ToTree(op)) } // Continue the top-down rewriting process as long as changes were made during the last traversal - op, id, err = bottomUp(op, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } + op, id = bottomUp(op, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) } - return op, nil -} - -// BottomUpAll rewrites an operator tree from the bottom up. BottomUp applies a transformation function to -// the given operator tree from the bottom up. Each callback [f] returns a ApplyResult that is aggregated -// into a final output indicating whether the operator tree was changed. -func BottomUpAll( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, - visit VisitF, -) (ops.Operator, error) { - return BottomUp(root, resolveID, visit, func(ops.Operator) VisitRule { - return VisitChildren - }) + return op } // TopDown rewrites an operator tree from the bottom up. BottomUp applies a transformation function to @@ -169,31 +149,28 @@ func BottomUpAll( // - shouldVisit: The ShouldVisit function to control which nodes and ancestors to visit and which to skip. // // Returns: -// - ops.Operator: The root of the (potentially) transformed operator tree. +// - Operator: The root of the (potentially) transformed operator tree. // - error: An error if any occurred during the traversal. func TopDown( - root ops.Operator, - resolveID func(ops.Operator) semantics.TableSet, + root Operator, + resolveID func(Operator) semantics.TableSet, visit VisitF, shouldVisit ShouldVisit, -) (op ops.Operator, err error) { - op, _, err = topDown(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - if err != nil { - return nil, err - } +) Operator { + op, _ := topDown(root, semantics.EmptyTableSet(), resolveID, visit, shouldVisit, true) - return op, nil + return op } // Swap takes a tree like a->b->c and swaps `a` and `b`, so we end up with b->a->c -func Swap(parent, child ops.Operator, message string) (ops.Operator, *ApplyResult, error) { +func Swap(parent, child Operator, message string) (Operator, *ApplyResult) { c := child.Inputs() if len(c) != 1 { - return nil, nil, vterrors.VT13001("Swap can only be used on single input operators") + panic(vterrors.VT13001("Swap can only be used on single input operators")) } aInputs := slices.Clone(parent.Inputs()) - var tmp ops.Operator + var tmp Operator for i, in := range aInputs { if in == child { tmp = aInputs[i] @@ -202,30 +179,30 @@ func Swap(parent, child ops.Operator, message string) (ops.Operator, *ApplyResul } } if tmp == nil { - return nil, nil, vterrors.VT13001("Swap can only be used when the second argument is an input to the first") + panic(vterrors.VT13001("Swap can only be used when the second argument is an input to the first")) } - child.SetInputs([]ops.Operator{parent}) + child.SetInputs([]Operator{parent}) parent.SetInputs(aInputs) - return child, NewTree(message, parent), nil + return child, Rewrote(message) } func bottomUp( - root ops.Operator, + root Operator, rootID semantics.TableSet, - resolveID func(ops.Operator) semantics.TableSet, + resolveID func(Operator) semantics.TableSet, rewriter VisitF, shouldVisit ShouldVisit, isRoot bool, -) (ops.Operator, *ApplyResult, error) { +) (Operator, *ApplyResult) { if shouldVisit != nil && !shouldVisit(root) { - return root, SameTree, nil + return root, NoRewrite } oldInputs := root.Inputs() var anythingChanged *ApplyResult - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) childID := rootID // noLHSTableSet is used to mark which operators that do not send data from the LHS to the RHS @@ -241,9 +218,9 @@ func bottomUp( if _, isUnion := root.(noLHSTableSet); !isUnion && i > 0 { childID = childID.Merge(resolveID(oldInputs[0])) } - in, changed, err := bottomUp(operator, childID, resolveID, rewriter, shouldVisit, false) - if err != nil { - return nil, nil, err + in, changed := bottomUp(operator, childID, resolveID, rewriter, shouldVisit, false) + if DebugOperatorTree && changed.Changed() { + fmt.Println(ToTree(in)) } anythingChanged = anythingChanged.Merge(changed) newInputs[i] = in @@ -253,18 +230,15 @@ func bottomUp( root = root.Clone(newInputs) } - newOp, treeIdentity, err := rewriter(root, rootID, isRoot) - if err != nil { - return nil, nil, err - } + newOp, treeIdentity := rewriter(root, rootID, isRoot) anythingChanged = anythingChanged.Merge(treeIdentity) - return newOp, anythingChanged, nil + return newOp, anythingChanged } func breakableTopDown( - in ops.Operator, - rewriter func(ops.Operator) (ops.Operator, *ApplyResult, VisitRule, error), -) (ops.Operator, *ApplyResult, error) { + in Operator, + rewriter func(Operator) (Operator, *ApplyResult, VisitRule, error), +) (Operator, *ApplyResult, error) { newOp, identity, visit, err := rewriter(in) if err != nil || visit == SkipChildren { return newOp, identity, err @@ -273,17 +247,17 @@ func breakableTopDown( var anythingChanged *ApplyResult oldInputs := newOp.Inputs() - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) for i, oldInput := range oldInputs { newInputs[i], identity, err = breakableTopDown(oldInput, rewriter) anythingChanged = anythingChanged.Merge(identity) if err != nil { - return nil, SameTree, err + return nil, NoRewrite, err } } if anythingChanged.Changed() { - return newOp, SameTree, nil + return newOp, NoRewrite, nil } return newOp.Clone(newInputs), anythingChanged, nil @@ -293,20 +267,17 @@ func breakableTopDown( // top down and applies the given transformation function. It also returns the ApplyResult // indicating whether the tree was changed func topDown( - root ops.Operator, + root Operator, rootID semantics.TableSet, - resolveID func(ops.Operator) semantics.TableSet, + resolveID func(Operator) semantics.TableSet, rewriter VisitF, shouldVisit ShouldVisit, isRoot bool, -) (ops.Operator, *ApplyResult, error) { - newOp, anythingChanged, err := rewriter(root, rootID, isRoot) - if err != nil { - return nil, nil, err - } +) (Operator, *ApplyResult) { + newOp, anythingChanged := rewriter(root, rootID, isRoot) if !shouldVisit(root) { - return newOp, anythingChanged, nil + return newOp, anythingChanged } if anythingChanged.Changed() { @@ -314,7 +285,7 @@ func topDown( } oldInputs := root.Inputs() - newInputs := make([]ops.Operator, len(oldInputs)) + newInputs := make([]Operator, len(oldInputs)) childID := rootID type noLHSTableSet interface{ NoLHSTableSet() } @@ -323,17 +294,14 @@ func topDown( if _, isUnion := root.(noLHSTableSet); !isUnion && i > 0 { childID = childID.Merge(resolveID(oldInputs[0])) } - in, changed, err := topDown(operator, childID, resolveID, rewriter, shouldVisit, false) - if err != nil { - return nil, nil, err - } + in, changed := topDown(operator, childID, resolveID, rewriter, shouldVisit, false) anythingChanged = anythingChanged.Merge(changed) newInputs[i] = in } - if anythingChanged != SameTree { - return root.Clone(newInputs), anythingChanged, nil + if anythingChanged != NoRewrite { + return root.Clone(newInputs), anythingChanged } - return root, SameTree, nil + return root, NoRewrite } diff --git a/go/vt/vtgate/planbuilder/operators/route.go b/go/vt/vtgate/planbuilder/operators/route.go index d4b2c43ecff..feeb091a725 100644 --- a/go/vt/vtgate/planbuilder/operators/route.go +++ b/go/vt/vtgate/planbuilder/operators/route.go @@ -21,11 +21,13 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/key" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -33,7 +35,7 @@ import ( type ( Route struct { - Source ops.Operator + Source Operator // Routes that have been merged into this one. MergedWith []*Route @@ -89,7 +91,7 @@ type ( Routing interface { // UpdateRoutingParams allows a Routing to control the routing params that will be used by the engine Route // OpCode is already set, and the default keyspace is set for read queries - UpdateRoutingParams(ctx *plancontext.PlanningContext, rp *engine.RoutingParameters) error + UpdateRoutingParams(ctx *plancontext.PlanningContext, rp *engine.RoutingParameters) // Clone returns a copy of the routing. Since we are trying different variation of merging, // one Routing can be used in different constellations. @@ -103,27 +105,27 @@ type ( // updateRoutingLogic updates the routing to take predicates into account. This can be used for routing // using vindexes or for figuring out which keyspace an information_schema query should be sent to. - updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) + updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing } ) // UpdateRoutingLogic first checks if we are dealing with a predicate that -func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r Routing) (Routing, error) { +func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r Routing) Routing { ks := r.Keyspace() if ks == nil { var err error ks, err = ctx.VSchema.AnyKeyspace() if err != nil { - return nil, err + panic(err) } } nr := &NoneRouting{keyspace: ks} - if isConstantFalse(expr) { - return nr, nil + if isConstantFalse(ctx.VSchema.Environment(), expr, ctx.VSchema.ConnCollation()) { + return nr } - exit := func() (Routing, error) { + exit := func() Routing { return r.updateRoutingLogic(ctx, expr) } @@ -135,7 +137,7 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r if cmp.Operator != sqlparser.NullSafeEqualOp && (sqlparser.IsNull(cmp.Left) || sqlparser.IsNull(cmp.Right)) { // any comparison against a literal null, except a null safe equality (<=>), will return null - return nr, nil + return nr } tuples, ok := cmp.Right.(sqlparser.ValTuple) @@ -148,13 +150,13 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r for _, n := range tuples { // If any of the values in the tuple is a literal null, we know that this comparison will always return NULL if sqlparser.IsNull(n) { - return nr, nil + return nr } } case sqlparser.InOp: // WHERE col IN (null) if len(tuples) == 1 && sqlparser.IsNull(tuples[0]) { - return nr, nil + return nr } } @@ -163,9 +165,12 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r // isConstantFalse checks whether this predicate can be evaluated at plan-time. If it returns `false` or `null`, // we know that the query will not return anything, and this can be used to produce better plans -func isConstantFalse(expr sqlparser.Expr) bool { - eenv := evalengine.EmptyExpressionEnv() - eexpr, err := evalengine.Translate(expr, nil) +func isConstantFalse(env *vtenv.Environment, expr sqlparser.Expr, collation collations.ID) bool { + eenv := evalengine.EmptyExpressionEnv(env) + eexpr, err := evalengine.Translate(expr, &evalengine.Config{ + Collation: collation, + Environment: env, + }) if err != nil { return false } @@ -173,7 +178,7 @@ func isConstantFalse(expr sqlparser.Expr) bool { if err != nil { return false } - if eres.Value(collations.Default()).IsNull() { + if eres.Value(collation).IsNull() { return false } b, err := eres.ToBooleanStrict() @@ -189,7 +194,7 @@ func (r *Route) Cost() int { } // Clone implements the Operator interface -func (r *Route) Clone(inputs []ops.Operator) ops.Operator { +func (r *Route) Clone(inputs []Operator) Operator { cloneRoute := *r cloneRoute.Source = inputs[0] cloneRoute.Routing = r.Routing.Clone() @@ -197,12 +202,12 @@ func (r *Route) Clone(inputs []ops.Operator) ops.Operator { } // Inputs implements the Operator interface -func (r *Route) Inputs() []ops.Operator { - return []ops.Operator{r.Source} +func (r *Route) Inputs() []Operator { + return []Operator{r.Source} } // SetInputs implements the Operator interface -func (r *Route) SetInputs(ops []ops.Operator) { +func (r *Route) SetInputs(ops []Operator) { r.Source = ops[0] } @@ -256,7 +261,9 @@ func (option *VindexOption) updateWithNewColumn( opcode func(*vindexes.ColumnVindex) engine.Opcode, ) bool { option.ColsSeen[colLoweredName] = true - option.ValueExprs = append(option.ValueExprs, valueExpr) + if valueExpr != nil { + option.ValueExprs = append(option.ValueExprs, valueExpr) + } option.Values[indexOfCol] = value option.Predicates[indexOfCol] = node option.Ready = len(option.ColsSeen) == len(colVindex.Columns) @@ -276,6 +283,14 @@ func (r *Route) IsSingleShard() bool { return false } +func (r *Route) IsSingleShardOrByDestination() bool { + switch r.Routing.OpCode() { + case engine.Unsharded, engine.DBA, engine.Next, engine.EqualUnique, engine.Reference, engine.ByDestination: + return true + } + return false +} + func tupleAccess(expr sqlparser.Expr, coordinates []int) sqlparser.Expr { tuple, _ := expr.(sqlparser.ValTuple) for _, idx := range coordinates { @@ -356,12 +371,11 @@ func (vpp *VindexPlusPredicates) bestOption() *VindexOption { func createRoute( ctx *plancontext.PlanningContext, queryTable *QueryTable, - solves semantics.TableSet, -) (ops.Operator, error) { +) Operator { if queryTable.IsInfSchema { return createInfSchemaRoute(ctx, queryTable) } - return findVSchemaTableAndCreateRoute(ctx, queryTable, queryTable.Table, solves, true /*planAlternates*/) + return findVSchemaTableAndCreateRoute(ctx, queryTable, queryTable.Table, true /*planAlternates*/) } // findVSchemaTableAndCreateRoute consults the VSchema to find a suitable @@ -370,34 +384,64 @@ func findVSchemaTableAndCreateRoute( ctx *plancontext.PlanningContext, queryTable *QueryTable, tableName sqlparser.TableName, - solves semantics.TableSet, planAlternates bool, -) (*Route, error) { - vschemaTable, _, _, _, target, err := ctx.VSchema.FindTableOrVindex(tableName) - if target != nil { - return nil, vterrors.VT09017("SELECT with a target destination is not allowed") - } +) *Route { + vschemaTable, _, _, tabletType, target, err := ctx.VSchema.FindTableOrVindex(tableName) if err != nil { - return nil, err + panic(err) } + targeted := createTargetedRouting(ctx, target, tabletType, vschemaTable) + return createRouteFromVSchemaTable( ctx, queryTable, vschemaTable, - solves, planAlternates, + targeted, ) } +func createTargetedRouting(ctx *plancontext.PlanningContext, target key.Destination, tabletType topodatapb.TabletType, vschemaTable *vindexes.Table) Routing { + switch ctx.Statement.(type) { + case *sqlparser.Update: + if tabletType != topodatapb.TabletType_PRIMARY { + panic(vterrors.VT09002("update")) + } + case *sqlparser.Delete: + if tabletType != topodatapb.TabletType_PRIMARY { + panic(vterrors.VT09002("delete")) + } + case *sqlparser.Insert: + if tabletType != topodatapb.TabletType_PRIMARY { + panic(vterrors.VT09002("insert")) + } + if target != nil { + panic(vterrors.VT09017("INSERT with a target destination is not allowed")) + } + case sqlparser.SelectStatement: + if target != nil { + panic(vterrors.VT09017("SELECT with a target destination is not allowed")) + } + } + + if target != nil { + return &TargetedRouting{ + keyspace: vschemaTable.Keyspace, + TargetDestination: target, + } + } + return nil +} + // createRouteFromTable creates a route from the given VSchema table. func createRouteFromVSchemaTable( ctx *plancontext.PlanningContext, queryTable *QueryTable, vschemaTable *vindexes.Table, - solves semantics.TableSet, planAlternates bool, -) (*Route, error) { + targeted Routing, +) *Route { if vschemaTable.Name.String() != queryTable.Table.Name.String() { // we are dealing with a routed table queryTable = queryTable.Clone() @@ -405,7 +449,7 @@ func createRouteFromVSchemaTable( queryTable.Table.Name = vschemaTable.Name astTable, ok := queryTable.Alias.Expr.(sqlparser.TableName) if !ok { - return nil, vterrors.VT13001("a derived table should never be a routed table") + panic(vterrors.VT13001("a derived table should never be a routed table")) } realTableName := sqlparser.NewIdentifierCS(vschemaTable.Name.String()) astTable.Name = realTableName @@ -421,14 +465,16 @@ func createRouteFromVSchemaTable( }, } - // We create the appropiate Routing struct here, depending on the type of table we are dealing with. - routing := createRoutingForVTable(vschemaTable, solves) + // We create the appropriate Routing struct here, depending on the type of table we are dealing with. + var routing Routing + if targeted != nil { + routing = targeted + } else { + routing = createRoutingForVTable(ctx, vschemaTable, queryTable.ID) + } + for _, predicate := range queryTable.Predicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } plan.Routing = routing @@ -436,27 +482,19 @@ func createRouteFromVSchemaTable( switch routing := routing.(type) { case *ShardedRouting: if routing.isScatter() && len(queryTable.Predicates) > 0 { - var err error // If we have a scatter query, it's worth spending a little extra time seeing if we can't improve it - plan.Routing, err = routing.tryImprove(ctx, queryTable) - if err != nil { - return nil, err - } + plan.Routing = routing.tryImprove(ctx, queryTable) } case *AnyShardRouting: if planAlternates { - alternates, err := createAlternateRoutesFromVSchemaTable(ctx, queryTable, vschemaTable, solves) - if err != nil { - return nil, err - } - routing.Alternates = alternates + routing.Alternates = createAlternateRoutesFromVSchemaTable(ctx, queryTable, vschemaTable) } } - return plan, nil + return plan } -func createRoutingForVTable(vschemaTable *vindexes.Table, id semantics.TableSet) Routing { +func createRoutingForVTable(ctx *plancontext.PlanningContext, vschemaTable *vindexes.Table, id semantics.TableSet) Routing { switch { case vschemaTable.Type == vindexes.TypeSequence: return &SequenceRouting{keyspace: vschemaTable.Keyspace} @@ -465,7 +503,7 @@ func createRoutingForVTable(vschemaTable *vindexes.Table, id semantics.TableSet) case vschemaTable.Type == vindexes.TypeReference || !vschemaTable.Keyspace.Sharded: return &AnyShardRouting{keyspace: vschemaTable.Keyspace} default: - return newShardedRouting(vschemaTable, id) + return newShardedRouting(ctx, vschemaTable, id) } } @@ -473,40 +511,31 @@ func createAlternateRoutesFromVSchemaTable( ctx *plancontext.PlanningContext, queryTable *QueryTable, vschemaTable *vindexes.Table, - solves semantics.TableSet, -) (map[*vindexes.Keyspace]*Route, error) { +) map[*vindexes.Keyspace]*Route { routes := make(map[*vindexes.Keyspace]*Route) switch vschemaTable.Type { case "", vindexes.TypeReference: for ksName, referenceTable := range vschemaTable.ReferencedBy { - route, err := findVSchemaTableAndCreateRoute( + route := findVSchemaTableAndCreateRoute( ctx, queryTable, sqlparser.TableName{ Name: referenceTable.Name, Qualifier: sqlparser.NewIdentifierCS(ksName), }, - solves, false, /*planAlternates*/ ) - if err != nil { - return nil, err - } routes[referenceTable.Keyspace] = route } if vschemaTable.Source != nil { - route, err := findVSchemaTableAndCreateRoute( + route := findVSchemaTableAndCreateRoute( ctx, queryTable, vschemaTable.Source.TableName, - solves, false, /*planAlternates*/ ) - if err != nil { - return nil, err - } keyspace := route.Routing.Keyspace() if keyspace != nil { routes[keyspace] = route @@ -514,15 +543,12 @@ func createAlternateRoutesFromVSchemaTable( } } - return routes, nil + return routes } -func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { // first we see if the predicate changes how we route - newRouting, err := UpdateRoutingLogic(ctx, expr, r.Routing) - if err != nil { - panic(err) - } + newRouting := UpdateRoutingLogic(ctx, expr, r.Routing) r.Routing = newRouting // we also need to push the predicate down into the query @@ -530,16 +556,23 @@ func (r *Route) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex return r } -func createProjection(ctx *plancontext.PlanningContext, src ops.Operator) (*Projection, error) { +func createProjection(ctx *plancontext.PlanningContext, src Operator, derivedName string) *Projection { proj := newAliasedProjection(src) cols := src.GetColumns(ctx) for _, col := range cols { - _, err := proj.addUnexploredExpr(col, col.Expr) - if err != nil { - return nil, err + if derivedName == "" { + proj.addUnexploredExpr(col, col.Expr) + continue } + + // for derived tables, we want to use the exposed colname + tableName := sqlparser.NewTableName(derivedName) + columnName := col.ColumnName() + colName := sqlparser.NewColNameWithQualifier(columnName, tableName) + ctx.SemTable.CopySemanticInfo(col.Expr, colName) + proj.addUnexploredExpr(aeWrap(colName), colName) } - return proj, nil + return proj } func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) int { @@ -554,17 +587,14 @@ func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, // if at least one column is not already present, we check if we can easily find a projection // or aggregation in our source that we can add to - op, ok, offsets := addMultipleColumnsToInput(ctx, r.Source, reuse, []bool{gb}, []*sqlparser.AliasedExpr{expr}) + derived, op, ok, offsets := addMultipleColumnsToInput(ctx, r.Source, reuse, []bool{gb}, []*sqlparser.AliasedExpr{expr}) r.Source = op if ok { return offsets[0] } // If no-one could be found, we probably don't have one yet, so we add one here - src, err := createProjection(ctx, r.Source) - if err != nil { - panic(err) - } + src := createProjection(ctx, r.Source, derived) r.Source = src offsets = src.addColumnsWithoutPushing(ctx, reuse, []bool{gb}, []*sqlparser.AliasedExpr{expr}) @@ -572,77 +602,146 @@ func (r *Route) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, } type selectExpressions interface { - ops.Operator + Operator addColumnWithoutPushing(ctx *plancontext.PlanningContext, expr *sqlparser.AliasedExpr, addToGroupBy bool) int addColumnsWithoutPushing(ctx *plancontext.PlanningContext, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) []int - isDerived() bool + derivedName() string } -// addColumnToInput adds a column to an operator without pushing it down. -// It will return a bool indicating whether the addition was successful or not, -// and an offset to where the column can be found -func addMultipleColumnsToInput(ctx *plancontext.PlanningContext, operator ops.Operator, reuse bool, addToGroupBy []bool, exprs []*sqlparser.AliasedExpr) (ops.Operator, bool, []int) { +// addColumnToInput adds columns to an operator without pushing them down +func addMultipleColumnsToInput( + ctx *plancontext.PlanningContext, + operator Operator, + reuse bool, + addToGroupBy []bool, + exprs []*sqlparser.AliasedExpr, +) (derivedName string, // if we found a derived table, this will contain its name + projection Operator, // if an operator needed to be built, it will be returned here + found bool, // whether a matching op was found or not + offsets []int, // the offsets the expressions received +) { switch op := operator.(type) { case *SubQuery: - src, added, offset := addMultipleColumnsToInput(ctx, op.Outer, reuse, addToGroupBy, exprs) + derivedName, src, added, offset := addMultipleColumnsToInput(ctx, op.Outer, reuse, addToGroupBy, exprs) if added { op.Outer = src } - return op, added, offset + return derivedName, op, added, offset case *Distinct: - src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + derivedName, src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) if added { op.Source = src } - return op, added, offset + return derivedName, op, added, offset case *Limit: - src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + derivedName, src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) if added { op.Source = src } - return op, added, offset + return derivedName, op, added, offset case *Ordering: - src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + derivedName, src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) if added { op.Source = src } - return op, added, offset + return derivedName, op, added, offset case *LockAndComment: - src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) + derivedName, src, added, offset := addMultipleColumnsToInput(ctx, op.Source, reuse, addToGroupBy, exprs) if added { op.Source = src } - return op, added, offset + return derivedName, op, added, offset + + case *Horizon: + // if the horizon has an alias, then it is a derived table, + // we have to add a new projection and can't build on this one + return op.Alias, op, false, nil case selectExpressions: - if op.isDerived() { + name := op.derivedName() + if name != "" { // if the only thing we can push to is a derived table, // we have to add a new projection and can't build on this one - return op, false, nil + return name, op, false, nil } offset := op.addColumnsWithoutPushing(ctx, reuse, addToGroupBy, exprs) - return op, true, offset + return "", op, true, offset case *Union: - tableID := semantics.SingleTableSet(len(ctx.SemTable.Tables)) - ctx.SemTable.Tables = append(ctx.SemTable.Tables, nil) - unionColumns := op.GetColumns(ctx) - proj := &Projection{ - Source: op, - Columns: AliasedProjections(slice.Map(unionColumns, newProjExpr)), - DT: &DerivedTable{ - TableID: tableID, - Alias: "dt", - }, - } + proj := addDerivedProj(ctx, op) return addMultipleColumnsToInput(ctx, proj, reuse, addToGroupBy, exprs) default: - return op, false, nil + return "", op, false, nil + } +} + +func (r *Route) AddWSColumn(ctx *plancontext.PlanningContext, offset int, _ bool) int { + columns := r.GetColumns(ctx) + if offset > len(columns) { + panic(vterrors.VT13001(fmt.Sprintf("column %d not found", offset))) + } + col := columns[offset] + if offset := r.FindCol(ctx, weightStringFor(col.Expr), true); offset >= 0 { + return offset } + + ok, foundOffset := addWSColumnToInput(ctx, r.Source, offset) + if !ok { + src := addDerivedProj(ctx, r.Source) + r.Source = src + return src.AddWSColumn(ctx, offset, true) + } + return foundOffset +} + +func addWSColumnToInput(ctx *plancontext.PlanningContext, source Operator, offset int) (bool, int) { + switch op := source.(type) { + case *SubQuery: + return addWSColumnToInput(ctx, op.Outer, offset) + case *Distinct: + return addWSColumnToInput(ctx, op.Source, offset) + case *Filter: + return addWSColumnToInput(ctx, op.Source, offset) + case *Projection: + return true, op.AddWSColumn(ctx, offset, true) + case *Aggregator: + return true, op.AddWSColumn(ctx, offset, true) + } + return false, -1 +} + +func addDerivedProj( + ctx *plancontext.PlanningContext, + op Operator, +) (projection *Projection) { + unionColumns := op.GetColumns(ctx) + columns := make(sqlparser.Columns, 0, len(unionColumns)) + for i := range unionColumns { + columns = append(columns, sqlparser.NewIdentifierCI(fmt.Sprintf("c%d", i))) + } + derivedProj := &Projection{ + Source: op, + Columns: AliasedProjections(slice.Map(unionColumns, newProjExpr)), + DT: &DerivedTable{ + TableID: ctx.SemTable.NewTableId(), + Alias: "dt", + Columns: columns, + }, + } + + proj := newAliasedProjection(derivedProj) + tbl := sqlparser.NewTableName("dt") + for i, col := range unionColumns { + projExpr := newProjExpr(col) + projExpr.EvalExpr = sqlparser.NewColNameWithQualifier(fmt.Sprintf("c%d", i), tbl) + proj.addProjExpr(projExpr) + } + + return proj } func (r *Route) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, _ bool) int { @@ -657,7 +756,7 @@ func (r *Route) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Selec return r.Source.GetSelectExprs(ctx) } -func (r *Route) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (r *Route) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return r.Source.GetOrdering(ctx) } @@ -673,7 +772,7 @@ func (r *Route) TablesUsed() []string { return collect() } -func isSpecialOrderBy(o ops.OrderBy) bool { +func isSpecialOrderBy(o OrderBy) bool { if sqlparser.IsNull(o.Inner.Expr) { return true } @@ -681,17 +780,17 @@ func isSpecialOrderBy(o ops.OrderBy) bool { return isFunction && f.Name.Lowered() == "rand" } -func (r *Route) planOffsets(ctx *plancontext.PlanningContext) { +func (r *Route) planOffsets(ctx *plancontext.PlanningContext) Operator { // if operator is returning data from a single shard, we don't need to do anything more if r.IsSingleShard() { - return + return nil } // if we are getting results from multiple shards, we need to do a merge-sort // between them to get the final output correctly sorted ordering := r.Source.GetOrdering(ctx) if len(ordering) == 0 { - return + return nil } for _, order := range ordering { @@ -713,6 +812,7 @@ func (r *Route) planOffsets(ctx *plancontext.PlanningContext) { } r.Ordering = append(r.Ordering, o) } + return nil } func weightStringFor(expr sqlparser.Expr) sqlparser.Expr { diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go index 079813388b3..47405e3b935 100644 --- a/go/vt/vtgate/planbuilder/operators/route_planning.go +++ b/go/vt/vtgate/planbuilder/operators/route_planning.go @@ -20,14 +20,11 @@ import ( "bytes" "io" - "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -38,34 +35,34 @@ type ( left, right semantics.TableSet } - opCacheMap map[tableSetPair]ops.Operator + opCacheMap map[tableSetPair]Operator ) -func pushDerived(ctx *plancontext.PlanningContext, op *Horizon) (ops.Operator, *rewrite.ApplyResult, error) { +func pushDerived(ctx *plancontext.PlanningContext, op *Horizon) (Operator, *ApplyResult) { innerRoute, ok := op.Source.(*Route) if !ok { - return op, rewrite.SameTree, nil + return op, NoRewrite } if !(innerRoute.Routing.OpCode() == engine.EqualUnique) && !op.IsMergeable(ctx) { // no need to check anything if we are sure that we will only hit a single shard - return op, rewrite.SameTree, nil + return op, NoRewrite } - return rewrite.Swap(op, op.Source, "push derived under route") + return Swap(op, op.Source, "push derived under route") } -func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (ops.Operator, *rewrite.ApplyResult, error) { - return mergeOrJoin(ctx, op.LHS, op.RHS, sqlparser.SplitAndExpression(nil, op.Predicate), !op.LeftJoin) +func optimizeJoin(ctx *plancontext.PlanningContext, op *Join) (Operator, *ApplyResult) { + return mergeOrJoin(ctx, op.LHS, op.RHS, sqlparser.SplitAndExpression(nil, op.Predicate), op.JoinType) } -func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result ops.Operator, changed *rewrite.ApplyResult, err error) { +func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (result Operator, changed *ApplyResult) { switch { case ctx.PlannerVersion == querypb.ExecuteOptions_Gen4Left2Right: - result, err = leftToRightSolve(ctx, op) + result = leftToRightSolve(ctx, op) default: - result, err = greedySolve(ctx, op) + result = greedySolve(ctx, op) } unresolved := op.UnsolvedPredicates(ctx.SemTable) @@ -75,7 +72,7 @@ func optimizeQueryGraph(ctx *plancontext.PlanningContext, op *QueryGraph) (resul result = newFilter(result, ctx.SemTable.AndExpressions(unresolved...)) } - changed = rewrite.NewTree("solved query graph", result) + changed = Rewrote("solved query graph") return } @@ -83,43 +80,44 @@ func buildVindexTableForDML( ctx *plancontext.PlanningContext, tableInfo semantics.TableInfo, table *QueryTable, + ins *sqlparser.Insert, dmlType string, -) (*vindexes.Table, Routing, error) { +) (*vindexes.Table, Routing) { vindexTable := tableInfo.GetVindexTable() - if vindexTable.Source != nil { + if tableInfo.GetVindexTable().Type == vindexes.TypeReference && vindexTable.Source != nil { sourceTable, _, _, _, _, err := ctx.VSchema.FindTableOrVindex(vindexTable.Source.TableName) if err != nil { - return nil, nil, err + panic(err) } vindexTable = sourceTable + refTbl := sqlparser.NewAliasedTableExpr(vindexTable.GetTableName(), "") + ins.Table.Expr = refTbl.Expr + // We don't need to process the alias because you cannot define aliases for inserts. } if !vindexTable.Keyspace.Sharded { - return vindexTable, &AnyShardRouting{keyspace: vindexTable.Keyspace}, nil + return vindexTable, &AnyShardRouting{keyspace: vindexTable.Keyspace} } - var dest key.Destination - var typ topodatapb.TabletType - var err error tblName, ok := table.Alias.Expr.(sqlparser.TableName) if !ok { - return nil, nil, vterrors.VT12001("multi shard UPDATE with LIMIT") + panic(vterrors.VT12001("multi shard UPDATE with LIMIT")) } - _, _, _, typ, dest, err = ctx.VSchema.FindTableOrVindex(tblName) + _, _, _, typ, dest, err := ctx.VSchema.FindTableOrVindex(tblName) if err != nil { - return nil, nil, err + panic(err) } if dest == nil { routing := &ShardedRouting{ keyspace: vindexTable.Keyspace, RouteOpCode: engine.Scatter, } - return vindexTable, routing, nil + return vindexTable, routing } if typ != topodatapb.TabletType_PRIMARY { - return nil, nil, vterrors.VT09002(dmlType) + panic(vterrors.VT09002(dmlType)) } // we are dealing with an explicitly targeted DML @@ -127,48 +125,7 @@ func buildVindexTableForDML( keyspace: vindexTable.Keyspace, TargetDestination: dest, } - return vindexTable, routing, nil -} - -func generateOwnedVindexQuery(tblExpr sqlparser.TableExpr, del *sqlparser.Delete, table *vindexes.Table, ksidCols []sqlparser.IdentifierCI) string { - buf := sqlparser.NewTrackedBuffer(nil) - for idx, col := range ksidCols { - if idx == 0 { - buf.Myprintf("select %v", col) - } else { - buf.Myprintf(", %v", col) - } - } - for _, cv := range table.Owned { - for _, column := range cv.Columns { - buf.Myprintf(", %v", column) - } - } - buf.Myprintf(" from %v%v%v%v for update", tblExpr, del.Where, del.OrderBy, del.Limit) - return buf.String() -} - -func getUpdateVindexInformation( - ctx *plancontext.PlanningContext, - updStmt *sqlparser.Update, - vindexTable *vindexes.Table, - tableID semantics.TableSet, - assignments []SetExpr, -) ([]*VindexPlusPredicates, map[string]*engine.VindexValues, string, []string, error) { - if !vindexTable.Keyspace.Sharded { - return nil, nil, "", nil, nil - } - - primaryVindex, vindexAndPredicates, err := getVindexInformation(tableID, vindexTable) - if err != nil { - return nil, nil, "", nil, err - } - - changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex, err := buildChangedVindexesValues(ctx, updStmt, vindexTable, primaryVindex.Columns, assignments) - if err != nil { - return nil, nil, "", nil, err - } - return vindexAndPredicates, changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex, nil + return vindexTable, routing } /* @@ -177,67 +134,50 @@ func getUpdateVindexInformation( and removes the two inputs to this cheapest plan and instead adds the join. As an optimization, it first only considers joining tables that have predicates defined between them */ -func greedySolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Operator, error) { - routeOps, err := seedOperatorList(ctx, qg) +func greedySolve(ctx *plancontext.PlanningContext, qg *QueryGraph) Operator { + routeOps := seedOperatorList(ctx, qg) planCache := opCacheMap{} - if err != nil { - return nil, err - } - op, err := mergeRoutes(ctx, qg, routeOps, planCache, false) - if err != nil { - return nil, err - } - return op, nil + return mergeRoutes(ctx, qg, routeOps, planCache, false) } -func leftToRightSolve(ctx *plancontext.PlanningContext, qg *QueryGraph) (ops.Operator, error) { - plans, err := seedOperatorList(ctx, qg) - if err != nil { - return nil, err - } +func leftToRightSolve(ctx *plancontext.PlanningContext, qg *QueryGraph) Operator { + plans := seedOperatorList(ctx, qg) - var acc ops.Operator + var acc Operator for _, plan := range plans { if acc == nil { acc = plan continue } joinPredicates := qg.GetPredicates(TableID(acc), TableID(plan)) - acc, _, err = mergeOrJoin(ctx, acc, plan, joinPredicates, true) - if err != nil { - return nil, err - } + acc, _ = mergeOrJoin(ctx, acc, plan, joinPredicates, sqlparser.NormalJoinType) } - return acc, nil + return acc } // seedOperatorList returns a route for each table in the qg -func seedOperatorList(ctx *plancontext.PlanningContext, qg *QueryGraph) ([]ops.Operator, error) { - plans := make([]ops.Operator, len(qg.Tables)) +func seedOperatorList(ctx *plancontext.PlanningContext, qg *QueryGraph) []Operator { + plans := make([]Operator, len(qg.Tables)) // we start by seeding the table with the single routes for i, table := range qg.Tables { - solves := ctx.SemTable.TableSetFor(table.Alias) - plan, err := createRoute(ctx, table, solves) - if err != nil { - return nil, err - } + plan := createRoute(ctx, table) if qg.NoDeps != nil { plan = plan.AddPredicate(ctx, qg.NoDeps) } plans[i] = plan } - return plans, nil + return plans } -func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) (ops.Operator, error) { +func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) Operator { ks, err := ctx.VSchema.AnyKeyspace() if err != nil { - return nil, err + panic(err) } - var src ops.Operator = &Table{ + var src Operator = &Table{ QTable: table, VTable: &vindexes.Table{ Name: table.Table.Name, @@ -246,26 +186,20 @@ func createInfSchemaRoute(ctx *plancontext.PlanningContext, table *QueryTable) ( } var routing Routing = &InfoSchemaRouting{} for _, pred := range table.Predicates { - routing, err = UpdateRoutingLogic(ctx, pred, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, pred, routing) } return &Route{ Source: src, Routing: routing, - }, nil + } } -func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps []ops.Operator, planCache opCacheMap, crossJoinsOK bool) (ops.Operator, error) { +func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps []Operator, planCache opCacheMap, crossJoinsOK bool) Operator { if len(physicalOps) == 0 { - return nil, nil + return nil } for len(physicalOps) > 1 { - bestTree, lIdx, rIdx, err := findBestJoin(ctx, qg, physicalOps, planCache, crossJoinsOK) - if err != nil { - return nil, err - } + bestTree, lIdx, rIdx := findBestJoin(ctx, qg, physicalOps, planCache, crossJoinsOK) // if we found a plan, we'll replace the two plans that were joined with the join plan created if bestTree != nil { // we remove one plan, and replace the other @@ -279,7 +213,7 @@ func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps [ physicalOps = append(physicalOps, bestTree) } else { if crossJoinsOK { - return nil, vterrors.VT13001("should not happen: we should be able to merge cross joins") + panic(vterrors.VT13001("should not happen: we should be able to merge cross joins")) } // we will only fail to find a join plan when there are only cross joins left // when that happens, we switch over to allow cross joins as well. @@ -287,20 +221,20 @@ func mergeRoutes(ctx *plancontext.PlanningContext, qg *QueryGraph, physicalOps [ crossJoinsOK = true } } - return physicalOps[0], nil + return physicalOps[0] } -func removeAt(plans []ops.Operator, idx int) []ops.Operator { +func removeAt(plans []Operator, idx int) []Operator { return append(plans[:idx], plans[idx+1:]...) } func findBestJoin( ctx *plancontext.PlanningContext, qg *QueryGraph, - plans []ops.Operator, + plans []Operator, planCache opCacheMap, crossJoinsOK bool, -) (bestPlan ops.Operator, lIdx int, rIdx int, err error) { +) (bestPlan Operator, lIdx int, rIdx int) { for i, lhs := range plans { for j, rhs := range plans { if i == j { @@ -313,10 +247,7 @@ func findBestJoin( // cartesian product, which is almost always a bad idea continue } - plan, err := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates) - if err != nil { - return nil, 0, 0, err - } + plan := getJoinFor(ctx, planCache, lhs, rhs, joinPredicates) if bestPlan == nil || CostOf(plan) < CostOf(bestPlan) { bestPlan = plan // remember which plans we based on, so we can remove them later @@ -325,30 +256,25 @@ func findBestJoin( } } } - return bestPlan, lIdx, rIdx, nil + return bestPlan, lIdx, rIdx } -func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr) (ops.Operator, error) { +func getJoinFor(ctx *plancontext.PlanningContext, cm opCacheMap, lhs, rhs Operator, joinPredicates []sqlparser.Expr) Operator { solves := tableSetPair{left: TableID(lhs), right: TableID(rhs)} cachedPlan := cm[solves] if cachedPlan != nil { - return cachedPlan, nil + return cachedPlan } - join, _, err := mergeOrJoin(ctx, lhs, rhs, joinPredicates, true) - if err != nil { - return nil, err - } + join, _ := mergeOrJoin(ctx, lhs, rhs, joinPredicates, sqlparser.NormalJoinType) cm[solves] = join - return join, nil + return join } // requiresSwitchingSides will return true if any of the operators with the root from the given operator tree // is of the type that should not be on the RHS of a join -func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) bool { - required := false - - _ = rewrite.Visit(op, func(current ops.Operator) error { +func requiresSwitchingSides(ctx *plancontext.PlanningContext, op Operator) (required bool) { + _ = Visit(op, func(current Operator) error { horizon, isHorizon := current.(*Horizon) if isHorizon && !horizon.IsMergeable(ctx) { @@ -358,42 +284,37 @@ func requiresSwitchingSides(ctx *plancontext.PlanningContext, op ops.Operator) b return nil }) - - return required + return } -func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs ops.Operator, joinPredicates []sqlparser.Expr, inner bool) (ops.Operator, *rewrite.ApplyResult, error) { - newPlan := mergeJoinInputs(ctx, lhs, rhs, joinPredicates, newJoinMerge(joinPredicates, inner)) +func mergeOrJoin(ctx *plancontext.PlanningContext, lhs, rhs Operator, joinPredicates []sqlparser.Expr, joinType sqlparser.JoinType) (Operator, *ApplyResult) { + newPlan := mergeJoinInputs(ctx, lhs, rhs, joinPredicates, newJoinMerge(joinPredicates, joinType)) if newPlan != nil { - return newPlan, rewrite.NewTree("merge routes into single operator", newPlan), nil + return newPlan, Rewrote("merge routes into single operator") } if len(joinPredicates) > 0 && requiresSwitchingSides(ctx, rhs) { - if !inner { - return nil, nil, vterrors.VT12001("LEFT JOIN with derived tables") - } - - if requiresSwitchingSides(ctx, lhs) { - return nil, nil, vterrors.VT12001("JOIN between derived tables") + if !joinType.IsCommutative() || requiresSwitchingSides(ctx, lhs) { + // we can't switch sides, so let's see if we can use a HashJoin to solve it + join := NewHashJoin(lhs, rhs, !joinType.IsInner()) + for _, pred := range joinPredicates { + join.AddJoinPredicate(ctx, pred) + } + ctx.SemTable.QuerySignature.HashJoin = true + return join, Rewrote("use a hash join because we have LIMIT on the LHS") } - join := NewApplyJoin(Clone(rhs), Clone(lhs), nil, !inner) - newOp, err := pushJoinPredicates(ctx, joinPredicates, join) - if err != nil { - return nil, nil, err - } - return newOp, rewrite.NewTree("logical join to applyJoin, switching side because derived table", newOp), nil + join := NewApplyJoin(ctx, Clone(rhs), Clone(lhs), nil, joinType) + newOp := pushJoinPredicates(ctx, joinPredicates, join) + return newOp, Rewrote("logical join to applyJoin, switching side because LIMIT") } - join := NewApplyJoin(Clone(lhs), Clone(rhs), nil, !inner) - newOp, err := pushJoinPredicates(ctx, joinPredicates, join) - if err != nil { - return nil, nil, err - } - return newOp, rewrite.NewTree("logical join to applyJoin ", newOp), nil + join := NewApplyJoin(ctx, Clone(lhs), Clone(rhs), nil, joinType) + newOp := pushJoinPredicates(ctx, joinPredicates, join) + return newOp, Rewrote("logical join to applyJoin ") } -func operatorsToRoutes(a, b ops.Operator) (*Route, *Route) { +func operatorsToRoutes(a, b Operator) (*Route, *Route) { aRoute, ok := a.(*Route) if !ok { return nil, nil @@ -431,7 +352,7 @@ func canMergeOnFilter(ctx *plancontext.PlanningContext, a, b *Route, predicate s return rVindex == lVindex } -func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlparser.Expr) vindexes.SingleColumn { +func findColumnVindex(ctx *plancontext.PlanningContext, a Operator, exp sqlparser.Expr) vindexes.SingleColumn { _, isCol := exp.(*sqlparser.ColName) if !isCol { return nil @@ -456,7 +377,7 @@ func findColumnVindex(ctx *plancontext.PlanningContext, a ops.Operator, exp sqlp deps := ctx.SemTable.RecursiveDeps(expr) - _ = rewrite.Visit(a, func(rel ops.Operator) error { + _ = Visit(a, func(rel Operator) error { to, isTableOp := rel.(tableIDIntroducer) if !isTableOp { return nil @@ -610,14 +531,14 @@ func hexEqual(a, b *sqlparser.Literal) bool { return false } -func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) (ops.Operator, error) { +func pushJoinPredicates(ctx *plancontext.PlanningContext, exprs []sqlparser.Expr, op *ApplyJoin) Operator { if len(exprs) == 0 { - return op, nil + return op } for _, expr := range exprs { - AddPredicate(ctx, op, expr, true, newFilter) + AddPredicate(ctx, op, expr, true, newFilterSinglePredicate) } - return op, nil + return op } diff --git a/go/vt/vtgate/planbuilder/operators/sequential.go b/go/vt/vtgate/planbuilder/operators/sequential.go new file mode 100644 index 00000000000..2db376a97bb --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/sequential.go @@ -0,0 +1,53 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" +) + +type Sequential struct { + Sources []Operator + + noPredicates + noColumns +} + +// Clone implements the Operator interface +func (s *Sequential) Clone(inputs []Operator) Operator { + newOp := *s + newOp.Sources = inputs + return &newOp +} + +func (s *Sequential) GetOrdering(*plancontext.PlanningContext) []OrderBy { + return nil +} + +// Inputs implements the Operator interface +func (s *Sequential) Inputs() []Operator { + return s.Sources +} + +// SetInputs implements the Operator interface +func (s *Sequential) SetInputs(ops []Operator) { + s.Sources = ops +} + +func (s *Sequential) ShortDescription() string { + return "" +} diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go index d54db071d46..29ee88787b5 100644 --- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go +++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go @@ -52,7 +52,7 @@ type ShardedRouting struct { var _ Routing = (*ShardedRouting)(nil) -func newShardedRouting(vtable *vindexes.Table, id semantics.TableSet) Routing { +func newShardedRouting(ctx *plancontext.PlanningContext, vtable *vindexes.Table, id semantics.TableSet) Routing { routing := &ShardedRouting{ RouteOpCode: engine.Scatter, keyspace: vtable.Keyspace, @@ -77,7 +77,33 @@ func newShardedRouting(vtable *vindexes.Table, id semantics.TableSet) Routing { } } + // Find the tableInfo for the given id + ti, err := ctx.SemTable.TableInfoFor(id) + if err != nil { + panic(err) + } + + // If the tableInfo is a realTable, then get the vindexHint from it. + var vindexHint *sqlparser.IndexHint + rt, isRt := ti.(*semantics.RealTable) + if isRt { + vindexHint = rt.GetVindexHint() + } for _, columnVindex := range vtable.ColumnVindexes { + if vindexHint != nil { + switch vindexHint.Type { + case sqlparser.UseVindexOp: + // For a USE VINDEX type vindex hint, we want to skip any vindex that isn't in the indexes list. + if !indexesContains(vindexHint.Indexes, columnVindex.Name) { + continue + } + case sqlparser.IgnoreVindexOp: + // For a IGNORE VINDEX type vindex hint, we want to skip any vindex that is in the indexes list. + if indexesContains(vindexHint.Indexes, columnVindex.Name) { + continue + } + } + } // ignore any backfilling vindexes from vindex selection. if columnVindex.IsBackfilling() { continue @@ -87,6 +113,13 @@ func newShardedRouting(vtable *vindexes.Table, id semantics.TableSet) Routing { return routing } +// indexesContains is a helper function that returns whether a given string is part of the IdentifierCI list. +func indexesContains(indexes []sqlparser.IdentifierCI, name string) bool { + return slices.ContainsFunc(indexes, func(ci sqlparser.IdentifierCI) bool { + return ci.EqualString(name) + }) +} + func (tr *ShardedRouting) isScatter() bool { return tr.RouteOpCode == engine.Scatter } @@ -97,28 +130,23 @@ func (tr *ShardedRouting) isScatter() bool { // This can sometimes push a predicate to the top, so it's not hiding inside an OR // 2. If that is not enough, an additional rewrite pass is performed where we try to // turn ORs into IN, which is easier for the planner to plan -func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTable *QueryTable) (Routing, error) { +func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTable *QueryTable) Routing { oldPredicates := queryTable.Predicates queryTable.Predicates = nil tr.SeenPredicates = nil var routing Routing = tr - var err error for _, pred := range oldPredicates { rewritten := sqlparser.RewritePredicate(pred) predicates := sqlparser.SplitAndExpression(nil, rewritten.(sqlparser.Expr)) for _, predicate := range predicates { queryTable.Predicates = append(queryTable.Predicates, predicate) - - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } } // If we have something other than a sharded routing with scatter, we are done if sr, ok := routing.(*ShardedRouting); !ok || !sr.isScatter() { - return routing, nil + return routing } // if we _still_ haven't found a better route, we can run this additional rewrite on any ORs we have @@ -128,23 +156,19 @@ func (tr *ShardedRouting) tryImprove(ctx *plancontext.PlanningContext, queryTabl continue } for _, predicate := range sqlparser.ExtractINFromOR(or) { - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } } - return routing, nil + return routing } -func (tr *ShardedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) error { +func (tr *ShardedRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.Keyspace = tr.keyspace if tr.Selected != nil { rp.Vindex = tr.Selected.FoundVindex rp.Values = tr.Selected.Values } - return nil } func (tr *ShardedRouting) Clone() Routing { @@ -166,17 +190,13 @@ func (tr *ShardedRouting) Clone() Routing { } } -func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) (Routing, error) { +func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Routing { tr.SeenPredicates = append(tr.SeenPredicates, expr) - newRouting, newVindexFound, err := tr.searchForNewVindexes(ctx, expr) - if err != nil { - return nil, err - } - + newRouting, newVindexFound := tr.searchForNewVindexes(ctx, expr) if newRouting != nil { // we found something that we can route with something other than ShardedRouting - return newRouting, nil + return newRouting } // if we didn't open up any new vindex Options, no need to enter here @@ -184,10 +204,10 @@ func (tr *ShardedRouting) updateRoutingLogic(ctx *plancontext.PlanningContext, e tr.PickBestAvailableVindex() } - return tr, nil + return tr } -func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) (Routing, error) { +func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) Routing { tr.RouteOpCode = engine.Scatter tr.Selected = nil for i, vp := range tr.VindexPreds { @@ -196,16 +216,12 @@ func (tr *ShardedRouting) resetRoutingLogic(ctx *plancontext.PlanningContext) (R var routing Routing = tr for _, predicate := range tr.SeenPredicates { - var err error - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + routing = UpdateRoutingLogic(ctx, predicate, routing) } - return routing, nil + return routing } -func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) (Routing, bool, error) { +func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, predicate sqlparser.Expr) (Routing, bool) { newVindexFound := false switch node := predicate.(type) { case *sqlparser.ComparisonExpr: @@ -216,23 +232,23 @@ func (tr *ShardedRouting) searchForNewVindexes(ctx *plancontext.PlanningContext, newVindexFound = newVindexFound || found } - return nil, newVindexFound, nil + return nil, newVindexFound } -func (tr *ShardedRouting) planComparison(ctx *plancontext.PlanningContext, cmp *sqlparser.ComparisonExpr) (routing Routing, foundNew bool, err error) { +func (tr *ShardedRouting) planComparison(ctx *plancontext.PlanningContext, cmp *sqlparser.ComparisonExpr) (routing Routing, foundNew bool) { switch cmp.Operator { case sqlparser.EqualOp: found := tr.planEqualOp(ctx, cmp) - return nil, found, nil + return nil, found case sqlparser.InOp: found := tr.planInOp(ctx, cmp) - return nil, found, nil + return nil, found case sqlparser.LikeOp: found := tr.planLikeOp(ctx, cmp) - return nil, found, nil + return nil, found } - return nil, false, nil + return nil, false } func (tr *ShardedRouting) planIsExpr(ctx *plancontext.PlanningContext, node *sqlparser.IsExpr) bool { @@ -276,13 +292,13 @@ func (tr *ShardedRouting) planInOp(ctx *plancontext.PlanningContext, cmp *sqlpar opcode := func(*vindexes.ColumnVindex) engine.Opcode { return engine.IN } return tr.haveMatchingVindex(ctx, cmp, vdValue, left, value, opcode, justTheVindex) case sqlparser.ValTuple: - right, rightIsValTuple := cmp.Right.(sqlparser.ValTuple) - if !rightIsValTuple { - return false + switch right := cmp.Right.(type) { + case sqlparser.ValTuple: + return tr.planCompositeInOpRecursive(ctx, cmp, left, right, nil) + case sqlparser.ListArg: + return tr.planCompositeInOpArg(ctx, cmp, left, right) } - return tr.planCompositeInOpRecursive(ctx, cmp, left, right, nil) } - return false } @@ -365,7 +381,6 @@ func (tr *ShardedRouting) haveMatchingVindex( switch v.ColVindex.Vindex.(type) { case vindexes.SingleColumn: newVindexFound = tr.processSingleColumnVindex(node, valueExpr, column, value, opcode, vfunc, v, newVindexFound) - case vindexes.MultiColumn: newVindexFound = tr.processMultiColumnVindex(node, valueExpr, column, value, opcode, vfunc, v, newVindexFound) } @@ -395,15 +410,19 @@ func (tr *ShardedRouting) processSingleColumnVindex( return newVindexFound } - vindexPlusPredicates.Options = append(vindexPlusPredicates.Options, &VindexOption{ + vo := &VindexOption{ Values: []evalengine.Expr{value}, - ValueExprs: []sqlparser.Expr{valueExpr}, Predicates: []sqlparser.Expr{node}, OpCode: routeOpcode, FoundVindex: vindex, Cost: costFor(vindexPlusPredicates.ColVindex, routeOpcode), Ready: true, - }) + } + if valueExpr != nil { + vo.ValueExprs = []sqlparser.Expr{valueExpr} + } + vindexPlusPredicates.Options = append(vindexPlusPredicates.Options, vo) + return true } @@ -532,6 +551,40 @@ func (tr *ShardedRouting) planCompositeInOpRecursive( return foundVindex } +func (tr *ShardedRouting) planCompositeInOpArg( + ctx *plancontext.PlanningContext, + cmp *sqlparser.ComparisonExpr, + left sqlparser.ValTuple, + right sqlparser.ListArg, +) bool { + foundVindex := false + for idx, expr := range left { + col, ok := expr.(*sqlparser.ColName) + if !ok { + continue + } + + // check if left col is a vindex + if !tr.hasVindex(col) { + continue + } + + value := &evalengine.TupleBindVariable{ + Key: right.String(), + Index: idx, + } + if typ, found := ctx.SemTable.TypeForExpr(col); found { + value.Type = typ.Type() + value.Collation = typ.Collation() + } + + opcode := func(*vindexes.ColumnVindex) engine.Opcode { return engine.MultiEqual } + newVindex := tr.haveMatchingVindex(ctx, cmp, nil, col, value, opcode, justTheVindex) + foundVindex = newVindex || foundVindex + } + return foundVindex +} + func (tr *ShardedRouting) hasVindex(column *sqlparser.ColName) bool { for _, v := range tr.VindexPreds { for _, col := range v.ColVindex.Columns { @@ -565,6 +618,14 @@ func (tr *ShardedRouting) extraInfo() string { ) } + if len(tr.Selected.ValueExprs) == 0 { + return fmt.Sprintf( + "Vindex[%s] Seen:[%s]", + tr.Selected.FoundVindex.String(), + sqlparser.String(sqlparser.AndExpressions(tr.SeenPredicates...)), + ) + } + return fmt.Sprintf( "Vindex[%s] Values[%s] Seen:[%s]", tr.Selected.FoundVindex.String(), @@ -627,6 +688,7 @@ func makeEvalEngineExpr(ctx *plancontext.PlanningContext, n sqlparser.Expr) eval ee, _ := evalengine.Translate(expr, &evalengine.Config{ Collation: ctx.SemTable.Collation, ResolveType: ctx.SemTable.TypeForExpr, + Environment: ctx.VSchema.Environment(), }) if ee != nil { return ee diff --git a/go/vt/vtgate/planbuilder/operators/subquery.go b/go/vt/vtgate/planbuilder/operators/subquery.go index e06e595f689..03a482185d8 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery.go +++ b/go/vt/vtgate/planbuilder/operators/subquery.go @@ -25,7 +25,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -34,8 +33,8 @@ import ( // outer query through a join. type SubQuery struct { // Fields filled in at the time of construction: - Outer ops.Operator // Outer query operator. - Subquery ops.Operator // Subquery operator. + Outer Operator // Outer query operator. + Subquery Operator // Subquery operator. FilterType opcode.PulloutOpcode // Type of subquery filter. Original sqlparser.Expr // This is the expression we should use if we can merge the inner to the outer originalSubquery *sqlparser.Subquery // Subquery representation, e.g., (SELECT foo from user LIMIT 1). @@ -43,18 +42,21 @@ type SubQuery struct { OuterPredicate sqlparser.Expr // This is the predicate that is using the subquery expression. It will not be empty for projections ArgName string // This is the name of the ColName or Argument used to replace the subquery TopLevel bool // will be false if the subquery is deeply nested - JoinColumns []JoinColumn // Broken up join predicates. + JoinColumns []applyJoinColumn // Broken up join predicates. SubqueryValueName string // Value name returned by the subquery (uncorrelated queries). HasValuesName string // Argument name passed to the subquery (uncorrelated queries). // Fields related to correlated subqueries: Vars map[string]int // Arguments copied from outer to inner, set during offset planning. outerID semantics.TableSet + // correlated stores whether this subquery is correlated or not. + // We use this information to fail the planning if we are unable to merge the subquery with a route. + correlated bool IsProjection bool } -func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) { +func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) Operator { sq.Vars = make(map[string]int) columns, err := sq.GetJoinColumns(ctx, sq.Outer) if err != nil { @@ -66,26 +68,27 @@ func (sq *SubQuery) planOffsets(ctx *plancontext.PlanningContext) { sq.Vars[lhsExpr.Name] = offset } } + return nil } -func (sq *SubQuery) OuterExpressionsNeeded(ctx *plancontext.PlanningContext, outer ops.Operator) (result []*sqlparser.ColName, err error) { +func (sq *SubQuery) OuterExpressionsNeeded(ctx *plancontext.PlanningContext, outer Operator) (result []*sqlparser.ColName) { joinColumns, err := sq.GetJoinColumns(ctx, outer) if err != nil { - return nil, err + return nil } for _, jc := range joinColumns { for _, lhsExpr := range jc.LHSExprs { col, ok := lhsExpr.Expr.(*sqlparser.ColName) if !ok { - return nil, vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(lhsExpr.Expr)) + panic(vterrors.VT13001("joins can only compare columns: %s", sqlparser.String(lhsExpr.Expr))) } result = append(result, col) } } - return result, nil + return result } -func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.Operator) ([]JoinColumn, error) { +func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer Operator) ([]applyJoinColumn, error) { if outer == nil { return nil, vterrors.VT13001("outer operator cannot be nil") } @@ -96,8 +99,8 @@ func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.O } } sq.outerID = outerID - mapper := func(in sqlparser.Expr) (JoinColumn, error) { - return BreakExpressionInLHSandRHS(ctx, in, outerID) + mapper := func(in sqlparser.Expr) (applyJoinColumn, error) { + return breakExpressionInLHSandRHS(ctx, in, outerID), nil } joinPredicates, err := slice.MapWithError(sq.Predicates, mapper) if err != nil { @@ -108,7 +111,7 @@ func (sq *SubQuery) GetJoinColumns(ctx *plancontext.PlanningContext, outer ops.O } // Clone implements the Operator interface -func (sq *SubQuery) Clone(inputs []ops.Operator) ops.Operator { +func (sq *SubQuery) Clone(inputs []Operator) Operator { klone := *sq switch len(inputs) { case 1: @@ -125,21 +128,21 @@ func (sq *SubQuery) Clone(inputs []ops.Operator) ops.Operator { return &klone } -func (sq *SubQuery) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (sq *SubQuery) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return sq.Outer.GetOrdering(ctx) } // Inputs implements the Operator interface -func (sq *SubQuery) Inputs() []ops.Operator { +func (sq *SubQuery) Inputs() []Operator { if sq.Outer == nil { - return []ops.Operator{sq.Subquery} + return []Operator{sq.Subquery} } - return []ops.Operator{sq.Outer, sq.Subquery} + return []Operator{sq.Outer, sq.Subquery} } // SetInputs implements the Operator interface -func (sq *SubQuery) SetInputs(inputs []ops.Operator) { +func (sq *SubQuery) SetInputs(inputs []Operator) { switch len(inputs) { case 1: sq.Subquery = inputs[0] @@ -164,10 +167,10 @@ func (sq *SubQuery) ShortDescription() string { preds := append(sq.Predicates, sq.OuterPredicate) pred = " MERGE ON " + sqlparser.String(sqlparser.AndExpressions(preds...)) } - return fmt.Sprintf("%s %v%s", typ, sq.FilterType.String(), pred) + return fmt.Sprintf(":%s %s %v%s", sq.ArgName, typ, sq.FilterType.String(), pred) } -func (sq *SubQuery) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (sq *SubQuery) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { sq.Outer = sq.Outer.AddPredicate(ctx, expr) return sq } @@ -176,6 +179,10 @@ func (sq *SubQuery) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bo return sq.Outer.AddColumn(ctx, reuseExisting, addToGroupBy, exprs) } +func (sq *SubQuery) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + return sq.Outer.AddWSColumn(ctx, offset, underRoute) +} + func (sq *SubQuery) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { return sq.Outer.FindCol(ctx, expr, underRoute) } @@ -196,17 +203,20 @@ func (sq *SubQuery) GetMergePredicates() []sqlparser.Expr { return sq.Predicates } -func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer ops.Operator) (ops.Operator, error) { +func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer Operator) Operator { if !sq.TopLevel { - return nil, subqueryNotAtTopErr + panic(subqueryNotAtTopErr) + } + if sq.correlated && sq.FilterType != opcode.PulloutExists { + panic(correlatedSubqueryErr) } if sq.IsProjection { if len(sq.GetMergePredicates()) > 0 { // this means that we have a correlated subquery on our hands - return nil, correlatedSubqueryErr + panic(correlatedSubqueryErr) } sq.SubqueryValueName = sq.ArgName - return outer, nil + return outer } return sq.settleFilter(ctx, outer) } @@ -214,12 +224,12 @@ func (sq *SubQuery) settle(ctx *plancontext.PlanningContext, outer ops.Operator) var correlatedSubqueryErr = vterrors.VT12001("correlated subquery is only supported for EXISTS") var subqueryNotAtTopErr = vterrors.VT12001("unmergable subquery can not be inside complex expression") -func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer ops.Operator) (ops.Operator, error) { +func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer Operator) Operator { if len(sq.Predicates) > 0 { if sq.FilterType != opcode.PulloutExists { - return nil, correlatedSubqueryErr + panic(correlatedSubqueryErr) } - return outer, nil + return outer } hasValuesArg := func() string { @@ -254,16 +264,16 @@ func (sq *SubQuery) settleFilter(ctx *plancontext.PlanningContext, outer ops.Ope predicates = append(predicates, sqlparser.NewArgument(hasValuesArg()), rhsPred) sq.SubqueryValueName = sq.ArgName case opcode.PulloutNotIn: - predicates = append(predicates, sqlparser.NewNotExpr(sqlparser.NewArgument(hasValuesArg())), rhsPred) + predicates = append(predicates, &sqlparser.OrExpr{ + Left: sqlparser.NewNotExpr(sqlparser.NewArgument(hasValuesArg())), + Right: rhsPred, + }) sq.SubqueryValueName = sq.ArgName case opcode.PulloutValue: predicates = append(predicates, rhsPred) sq.SubqueryValueName = sq.ArgName } - return &Filter{ - Source: outer, - Predicates: predicates, - }, nil + return newFilter(outer, predicates...) } func dontEnterSubqueries(node, _ sqlparser.SQLNode) bool { @@ -278,22 +288,8 @@ func (sq *SubQuery) isMerged(ctx *plancontext.PlanningContext) bool { } // mapExpr rewrites all expressions according to the provided function -func (sq *SubQuery) mapExpr(f func(expr sqlparser.Expr) (sqlparser.Expr, error)) error { - newPredicates, err := slice.MapWithError(sq.Predicates, f) - if err != nil { - return err - } - sq.Predicates = newPredicates - - sq.Original, err = f(sq.Original) - if err != nil { - return err - } - - originalSubquery, err := f(sq.originalSubquery) - if err != nil { - return err - } - sq.originalSubquery = originalSubquery.(*sqlparser.Subquery) - return nil +func (sq *SubQuery) mapExpr(f func(expr sqlparser.Expr) sqlparser.Expr) { + sq.Predicates = slice.Map(sq.Predicates, f) + sq.Original = f(sq.Original) + sq.originalSubquery = f(sq.originalSubquery).(*sqlparser.Subquery) } diff --git a/go/vt/vtgate/planbuilder/operators/subquery_builder.go b/go/vt/vtgate/planbuilder/operators/subquery_builder.go index 1d1d12bbfe3..4caf3530075 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_builder.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_builder.go @@ -19,7 +19,6 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -32,7 +31,7 @@ type SubQueryBuilder struct { outerID semantics.TableSet } -func (sqb *SubQueryBuilder) getRootOperator(op ops.Operator, decorator func(operator ops.Operator) ops.Operator) ops.Operator { +func (sqb *SubQueryBuilder) getRootOperator(op Operator, decorator func(operator Operator) Operator) Operator { if len(sqb.Inner) == 0 { return op } @@ -53,19 +52,16 @@ func (sqb *SubQueryBuilder) handleSubquery( ctx *plancontext.PlanningContext, expr sqlparser.Expr, outerID semantics.TableSet, -) (*SubQuery, error) { +) *SubQuery { subq, parentExpr := getSubQuery(expr) if subq == nil { - return nil, nil + return nil } argName := ctx.GetReservedArgumentFor(subq) - sqInner, err := createSubqueryOp(ctx, parentExpr, expr, subq, outerID, argName) - if err != nil { - return nil, err - } + sqInner := createSubqueryOp(ctx, parentExpr, expr, subq, outerID, argName) sqb.Inner = append(sqb.Inner, sqInner) - return sqInner, nil + return sqInner } func getSubQuery(expr sqlparser.Expr) (subqueryExprExists *sqlparser.Subquery, parentExpr sqlparser.Expr) { @@ -99,7 +95,7 @@ func createSubqueryOp( subq *sqlparser.Subquery, outerID semantics.TableSet, name string, -) (*SubQuery, error) { +) *SubQuery { switch parent := parent.(type) { case *sqlparser.NotExpr: switch parent.Expr.(type) { @@ -120,20 +116,14 @@ func createSubqueryOp( // and extracts subqueries into operators func (sqb *SubQueryBuilder) inspectStatement(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, -) (sqlparser.Exprs, []JoinColumn, error) { +) (sqlparser.Exprs, []applyJoinColumn) { switch stmt := stmt.(type) { case *sqlparser.Select: return sqb.inspectSelect(ctx, stmt) case *sqlparser.Union: - exprs1, cols1, err := sqb.inspectStatement(ctx, stmt.Left) - if err != nil { - return nil, nil, err - } - exprs2, cols2, err := sqb.inspectStatement(ctx, stmt.Right) - if err != nil { - return nil, nil, err - } - return append(exprs1, exprs2...), append(cols1, cols2...), nil + exprs1, cols1 := sqb.inspectStatement(ctx, stmt.Left) + exprs2, cols2 := sqb.inspectStatement(ctx, stmt.Right) + return append(exprs1, exprs2...), append(cols1, cols2...) } panic("unknown type") } @@ -144,22 +134,12 @@ func (sqb *SubQueryBuilder) inspectStatement(ctx *plancontext.PlanningContext, func (sqb *SubQueryBuilder) inspectSelect( ctx *plancontext.PlanningContext, sel *sqlparser.Select, -) (sqlparser.Exprs, []JoinColumn, error) { +) (sqlparser.Exprs, []applyJoinColumn) { // first we need to go through all the places where one can find predicates // and search for subqueries - newWhere, wherePreds, whereJoinCols, err := sqb.inspectWhere(ctx, sel.Where) - if err != nil { - return nil, nil, err - } - newHaving, havingPreds, havingJoinCols, err := sqb.inspectWhere(ctx, sel.Having) - if err != nil { - return nil, nil, err - } - - newFrom, onPreds, onJoinCols, err := sqb.inspectOnExpr(ctx, sel.From) - if err != nil { - return nil, nil, err - } + newWhere, wherePreds, whereJoinCols := sqb.inspectWhere(ctx, sel.Where) + newHaving, havingPreds, havingJoinCols := sqb.inspectWhere(ctx, sel.Having) + newFrom, onPreds, onJoinCols := sqb.inspectOnExpr(ctx, sel.From) // then we use the updated AST structs to build the operator // these AST elements have any subqueries replace by arguments @@ -168,8 +148,7 @@ func (sqb *SubQueryBuilder) inspectSelect( sel.From = newFrom return append(append(wherePreds, havingPreds...), onPreds...), - append(append(whereJoinCols, havingJoinCols...), onJoinCols...), - nil + append(append(whereJoinCols, havingJoinCols...), onJoinCols...) } func createSubquery( @@ -181,7 +160,7 @@ func createSubquery( argName string, filterType opcode.PulloutOpcode, isProjection bool, -) (*SubQuery, error) { +) *SubQuery { topLevel := ctx.SemTable.EqualsExpr(original, parent) original = cloneASTAndSemState(ctx, original) originalSq := cloneASTAndSemState(ctx, subq) @@ -189,20 +168,10 @@ func createSubquery( totalID := subqID.Merge(outerID) sqc := &SubQueryBuilder{totalID: totalID, subqID: subqID, outerID: outerID} - predicates, joinCols, err := sqc.inspectStatement(ctx, subq.Select) - if err != nil { - return nil, err - } - - stmt := rewriteRemainingColumns(ctx, subq.Select, subqID) - - // TODO: this should not be needed. We are using CopyOnRewrite above, but somehow this is not getting copied - ctx.SemTable.CopySemanticInfo(subq.Select, stmt) + predicates, joinCols := sqc.inspectStatement(ctx, subq.Select) + correlated := !ctx.SemTable.RecursiveDeps(subq).IsEmpty() - opInner, err := translateQueryToOp(ctx, stmt) - if err != nil { - return nil, err - } + opInner := translateQueryToOp(ctx, subq.Select) opInner = sqc.getRootOperator(opInner, nil) return &SubQuery{ @@ -215,15 +184,16 @@ func createSubquery( IsProjection: isProjection, TopLevel: topLevel, JoinColumns: joinCols, - }, nil + correlated: correlated, + } } func (sqb *SubQueryBuilder) inspectWhere( ctx *plancontext.PlanningContext, in *sqlparser.Where, -) (*sqlparser.Where, sqlparser.Exprs, []JoinColumn, error) { +) (*sqlparser.Where, sqlparser.Exprs, []applyJoinColumn) { if in == nil { - return nil, nil, nil, nil + return nil, nil, nil } jpc := &joinPredicateCollector{ totalID: sqb.totalID, @@ -231,17 +201,12 @@ func (sqb *SubQueryBuilder) inspectWhere( outerID: sqb.outerID, } for _, predicate := range sqlparser.SplitAndExpression(nil, in.Expr) { - sqlparser.RemoveKeyspaceFromColName(predicate) - subq, err := sqb.handleSubquery(ctx, predicate, sqb.totalID) - if err != nil { - return nil, nil, nil, err - } + sqlparser.RemoveKeyspaceInCol(predicate) + subq := sqb.handleSubquery(ctx, predicate, sqb.totalID) if subq != nil { continue } - if err = jpc.inspectPredicate(ctx, predicate); err != nil { - return nil, nil, nil, err - } + jpc.inspectPredicate(ctx, predicate) } if len(jpc.remainingPredicates) == 0 { @@ -250,13 +215,13 @@ func (sqb *SubQueryBuilder) inspectWhere( in.Expr = sqlparser.AndExpressions(jpc.remainingPredicates...) } - return in, jpc.predicates, jpc.joinColumns, nil + return in, jpc.predicates, jpc.joinColumns } func (sqb *SubQueryBuilder) inspectOnExpr( ctx *plancontext.PlanningContext, from []sqlparser.TableExpr, -) (newFrom []sqlparser.TableExpr, onPreds sqlparser.Exprs, onJoinCols []JoinColumn, err error) { +) (newFrom []sqlparser.TableExpr, onPreds sqlparser.Exprs, onJoinCols []applyJoinColumn) { for _, tbl := range from { tbl := sqlparser.CopyOnRewrite(tbl, dontEnterSubqueries, func(cursor *sqlparser.CopyOnWriteCursor) { cond, ok := cursor.Node().(*sqlparser.JoinCondition) @@ -271,20 +236,11 @@ func (sqb *SubQueryBuilder) inspectOnExpr( } for _, pred := range sqlparser.SplitAndExpression(nil, cond.On) { - subq, innerErr := sqb.handleSubquery(ctx, pred, sqb.totalID) - if err != nil { - err = innerErr - cursor.StopTreeWalk() - return - } + subq := sqb.handleSubquery(ctx, pred, sqb.totalID) if subq != nil { continue } - if err = jpc.inspectPredicate(ctx, pred); err != nil { - err = innerErr - cursor.StopTreeWalk() - return - } + jpc.inspectPredicate(ctx, pred) } if len(jpc.remainingPredicates) == 0 { cond.On = nil @@ -294,9 +250,6 @@ func (sqb *SubQueryBuilder) inspectOnExpr( onPreds = append(onPreds, jpc.predicates...) onJoinCols = append(onJoinCols, jpc.joinColumns...) }, ctx.SemTable.CopySemanticInfo) - if err != nil { - return - } newFrom = append(newFrom, tbl.(sqlparser.TableExpr)) } return @@ -309,7 +262,7 @@ func createComparisonSubQuery( subFromOutside *sqlparser.Subquery, outerID semantics.TableSet, name string, -) (*SubQuery, error) { +) *SubQuery { subq, outside := semantics.GetSubqueryAndOtherSide(parent) if outside == nil || subq != subFromOutside { panic("uh oh") @@ -323,10 +276,7 @@ func createComparisonSubQuery( filterType = opcode.PulloutNotIn } - subquery, err := createSubquery(ctx, original, subq, outerID, parent, name, filterType, false) - if err != nil { - return nil, err - } + subquery := createSubquery(ctx, original, subq, outerID, parent, name, filterType, false) // if we are comparing with a column from the inner subquery, // we add this extra predicate to check if the two sides are mergable or not @@ -338,7 +288,7 @@ func createComparisonSubQuery( } } - return subquery, err + return subquery } func (sqb *SubQueryBuilder) pullOutValueSubqueries( @@ -346,25 +296,22 @@ func (sqb *SubQueryBuilder) pullOutValueSubqueries( expr sqlparser.Expr, outerID semantics.TableSet, isDML bool, -) (sqlparser.Expr, []*SubQuery, error) { +) (sqlparser.Expr, []*SubQuery) { original := sqlparser.CloneExpr(expr) sqe := extractSubQueries(ctx, expr, isDML) if sqe == nil { - return nil, nil, nil + return nil, nil } var newSubqs []*SubQuery for idx, subq := range sqe.subq { - sqInner, err := createSubquery(ctx, original, subq, outerID, original, sqe.cols[idx], sqe.pullOutCode[idx], true) - if err != nil { - return nil, nil, err - } + sqInner := createSubquery(ctx, original, subq, outerID, original, sqe.cols[idx], sqe.pullOutCode[idx], true) newSubqs = append(newSubqs, sqInner) } sqb.Inner = append(sqb.Inner, newSubqs...) - return sqe.new, newSubqs, nil + return sqe.new, newSubqs } type subqueryExtraction struct { diff --git a/go/vt/vtgate/planbuilder/operators/subquery_container.go b/go/vt/vtgate/planbuilder/operators/subquery_container.go index ab8d1104623..edbbec1125e 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_container.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_container.go @@ -18,7 +18,6 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) @@ -27,15 +26,15 @@ type ( // The inner subqueries can be executed in any order, so we store them like this so we can see more opportunities // for merging SubQueryContainer struct { - Outer ops.Operator + Outer Operator Inner []*SubQuery } ) -var _ ops.Operator = (*SubQueryContainer)(nil) +var _ Operator = (*SubQueryContainer)(nil) // Clone implements the Operator interface -func (sqc *SubQueryContainer) Clone(inputs []ops.Operator) ops.Operator { +func (sqc *SubQueryContainer) Clone(inputs []Operator) Operator { result := &SubQueryContainer{ Outer: inputs[0], } @@ -49,13 +48,13 @@ func (sqc *SubQueryContainer) Clone(inputs []ops.Operator) ops.Operator { return result } -func (sqc *SubQueryContainer) GetOrdering(ctx *plancontext.PlanningContext) []ops.OrderBy { +func (sqc *SubQueryContainer) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { return sqc.Outer.GetOrdering(ctx) } // Inputs implements the Operator interface -func (sqc *SubQueryContainer) Inputs() []ops.Operator { - operators := []ops.Operator{sqc.Outer} +func (sqc *SubQueryContainer) Inputs() []Operator { + operators := []Operator{sqc.Outer} for _, inner := range sqc.Inner { operators = append(operators, inner) } @@ -63,7 +62,7 @@ func (sqc *SubQueryContainer) Inputs() []ops.Operator { } // SetInputs implements the Operator interface -func (sqc *SubQueryContainer) SetInputs(ops []ops.Operator) { +func (sqc *SubQueryContainer) SetInputs(ops []Operator) { sqc.Outer = ops[0] } @@ -71,7 +70,7 @@ func (sqc *SubQueryContainer) ShortDescription() string { return "" } -func (sqc *SubQueryContainer) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (sqc *SubQueryContainer) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { sqc.Outer = sqc.Outer.AddPredicate(ctx, expr) return sqc } @@ -80,6 +79,10 @@ func (sqc *SubQueryContainer) AddColumn(ctx *plancontext.PlanningContext, reuseE return sqc.Outer.AddColumn(ctx, reuseExisting, addToGroupBy, exprs) } +func (sqc *SubQueryContainer) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + return sqc.Outer.AddWSColumn(ctx, offset, underRoute) +} + func (sqc *SubQueryContainer) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { return sqc.Outer.FindCol(ctx, expr, underRoute) } diff --git a/go/vt/vtgate/planbuilder/operators/subquery_planning.go b/go/vt/vtgate/planbuilder/operators/subquery_planning.go index 7740ca3d46d..145a0707347 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_planning.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_planning.go @@ -26,13 +26,11 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine/opcode" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) -func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op ops.Operator) bool { +func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStatement, op Operator) bool { validVindex := func(expr sqlparser.Expr) bool { sc := findColumnVindex(ctx, op, expr) return sc != nil && sc.IsUnique() @@ -44,11 +42,11 @@ func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStateme switch node := query.(type) { case *sqlparser.Select: - if len(node.GroupBy) > 0 { + if node.GroupBy != nil && len(node.GroupBy.Exprs) > 0 { // iff we are grouping, we need to check that we can perform the grouping inside a single shard, and we check that // by checking that one of the grouping expressions used is a unique single column vindex. // TODO: we could also support the case where all the columns of a multi-column vindex are used in the grouping - for _, gb := range node.GroupBy { + for _, gb := range node.GroupBy.Exprs { if validVindex(gb) { return true } @@ -58,11 +56,11 @@ func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStateme // if we have grouping, we have already checked that it's safe, and don't need to check for aggregations // but if we don't have groupings, we need to check if there are aggregations that will mess with us - if sqlparser.ContainsAggregation(node.SelectExprs) { + if ContainsAggr(ctx, node.SelectExprs) { return false } - if sqlparser.ContainsAggregation(node.Having) { + if ContainsAggr(ctx, node.Having) { return false } @@ -74,24 +72,20 @@ func isMergeable(ctx *plancontext.PlanningContext, query sqlparser.SelectStateme } } -func settleSubqueries(ctx *plancontext.PlanningContext, op ops.Operator) ops.Operator { - visit := func(op ops.Operator, lhsTables semantics.TableSet, isRoot bool) (ops.Operator, *rewrite.ApplyResult, error) { +func settleSubqueries(ctx *plancontext.PlanningContext, op Operator) Operator { + visit := func(op Operator, lhsTables semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { switch op := op.(type) { case *SubQueryContainer: outer := op.Outer for _, subq := range op.Inner { - newOuter, err := subq.settle(ctx, outer) - if err != nil { - return nil, nil, err - } - subq.Outer = newOuter + subq.Outer = subq.settle(ctx, outer) outer = subq } - return outer, rewrite.NewTree("extracted subqueries from subquery container", outer), nil + return outer, Rewrote("extracted subqueries from subquery container") case *Projection: ap, err := op.GetAliasedProjections() if err != nil { - return nil, nil, err + panic(err) } for _, pe := range ap { @@ -101,14 +95,18 @@ func settleSubqueries(ctx *plancontext.PlanningContext, op ops.Operator) ops.Ope for _, setExpr := range op.Assignments { mergeSubqueryExpr(ctx, setExpr.Expr) } + case *Aggregator: + for _, aggr := range op.Aggregations { + newExpr, rewritten := rewriteMergedSubqueryExpr(ctx, aggr.SubQueryExpression, aggr.Original.Expr) + if rewritten { + aggr.Original.Expr = newExpr + } + } } - return op, rewrite.SameTree, nil - } - op, err := rewrite.BottomUp(op, TableID, visit, nil) - if err != nil { - panic(err) + return op, NoRewrite } - return op + + return BottomUp(op, TableID, visit, nil) } func mergeSubqueryExpr(ctx *plancontext.PlanningContext, pe *ProjExpr) { @@ -194,7 +192,7 @@ func tryPushSubQueryInJoin( ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { lhs := TableID(outer.LHS) rhs := TableID(outer.RHS) joinID := TableID(outer) @@ -213,12 +211,9 @@ func tryPushSubQueryInJoin( // in general, we don't want to push down uncorrelated subqueries into the RHS of a join, // since this side is executed once per row from the LHS, so we would unnecessarily execute // the subquery multiple times. The exception is if we can merge the subquery with the RHS of the join. - merged, result, err := tryMergeWithRHS(ctx, inner, outer) - if err != nil { - return nil, nil, err - } + merged, result := tryMergeWithRHS(ctx, inner, outer) if merged != nil { - return merged, result, nil + return merged, result } _, ok := inner.Subquery.(*Projection) @@ -227,41 +222,37 @@ func tryPushSubQueryInJoin( // Projections are easy to push down, so if this is still at the top, // it means we have not tried pushing it yet. // Let's give it a chance to push down before we push it on the left - return nil, rewrite.SameTree, nil + return nil, NoRewrite } if deps.IsSolvedBy(lhs) { // we can safely push down the subquery on the LHS outer.LHS = addSubQuery(outer.LHS, inner) - return outer, rewrite.NewTree("push subquery into LHS of join", inner), nil + return outer, Rewrote("push subquery into LHS of join") } - if outer.LeftJoin || len(inner.Predicates) == 0 { + if !outer.IsInner() || len(inner.Predicates) == 0 { // we can't push any filters on the RHS of an outer join, and // we don't want to push uncorrelated subqueries to the RHS of a join - return nil, rewrite.SameTree, nil + return nil, NoRewrite } if deps.IsSolvedBy(rhs) { // we can push down the subquery filter on RHS of the join outer.RHS = addSubQuery(outer.RHS, inner) - return outer, rewrite.NewTree("push subquery into RHS of join", inner), nil + return outer, Rewrote("push subquery into RHS of join") } if deps.IsSolvedBy(joinID) { // we can rewrite the predicate to not use the values from the lhs, // and instead use arguments for these dependencies. // this way we can push the subquery into the RHS of this join - err := inner.mapExpr(extractLHSExpr(ctx, outer, lhs)) - if err != nil { - return nil, nil, err - } - + inner.mapExpr(extractLHSExpr(ctx, outer, lhs)) outer.RHS = addSubQuery(outer.RHS, inner) - return outer, rewrite.NewTree("push subquery into RHS of join rewriting predicates", inner), nil + return outer, Rewrote("push subquery into RHS of join rewriting predicates") } - return nil, rewrite.SameTree, nil + return nil, NoRewrite } // extractLHSExpr will return a function that extracts any ColName coming from the LHS table, @@ -270,37 +261,34 @@ func extractLHSExpr( ctx *plancontext.PlanningContext, outer *ApplyJoin, lhs semantics.TableSet, -) func(expr sqlparser.Expr) (sqlparser.Expr, error) { - return func(expr sqlparser.Expr) (sqlparser.Expr, error) { - col, err := BreakExpressionInLHSandRHS(ctx, expr, lhs) - if err != nil { - return nil, err - } +) func(expr sqlparser.Expr) sqlparser.Expr { + return func(expr sqlparser.Expr) sqlparser.Expr { + col := breakExpressionInLHSandRHS(ctx, expr, lhs) if col.IsPureLeft() { - return nil, vterrors.VT13001("did not expect to find any predicates that do not need data from the inner here") + panic(vterrors.VT13001("did not expect to find any predicates that do not need data from the inner here")) } for _, bve := range col.LHSExprs { if !outer.isColNameMovedFromL2R(bve.Name) { outer.ExtraLHSVars = append(outer.ExtraLHSVars, bve) } } - return col.RHSExpr, nil + return col.RHSExpr } } // tryMergeWithRHS attempts to merge a subquery with the RHS of a join -func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin) (ops.Operator, *rewrite.ApplyResult, error) { - if outer.LeftJoin { - return nil, nil, nil +func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *ApplyJoin) (Operator, *ApplyResult) { + if !outer.IsInner() { + return nil, nil } // both sides need to be routes outerRoute, ok := outer.RHS.(*Route) if !ok { - return nil, nil, nil + return nil, nil } innerRoute, ok := inner.Subquery.(*Route) if !ok { - return nil, nil, nil + return nil, nil } newExpr := rewriteOriginalPushedToRHS(ctx, inner.Original, outer) @@ -311,18 +299,18 @@ func tryMergeWithRHS(ctx *plancontext.PlanningContext, inner *SubQuery, outer *A } newOp := mergeSubqueryInputs(ctx, innerRoute, outerRoute, inner.GetMergePredicates(), sqm) if newOp == nil { - return nil, nil, nil + return nil, nil } outer.RHS = newOp ctx.MergedSubqueries = append(ctx.MergedSubqueries, inner.originalSubquery) - return outer, rewrite.NewTree("merged subquery with rhs of join", inner), nil + return outer, Rewrote("merged subquery with rhs of join") } // addSubQuery adds a SubQuery to the given operator. If the operator is a SubQueryContainer, // it will add the SubQuery to the SubQueryContainer. If the operator is something else, it will // create a new SubQueryContainer with the given operator as the outer and the SubQuery as the inner. -func addSubQuery(in ops.Operator, inner *SubQuery) ops.Operator { +func addSubQuery(in Operator, inner *SubQuery) Operator { sql, ok := in.(*SubQueryContainer) if !ok { return &SubQueryContainer{ @@ -339,7 +327,6 @@ func addSubQuery(in ops.Operator, inner *SubQuery) ops.Operator { // this is necessary because we are pushing the subquery into the RHS of the join, and we need to use the argument names // instead of the column names func rewriteOriginalPushedToRHS(ctx *plancontext.PlanningContext, expression sqlparser.Expr, outer *ApplyJoin) sqlparser.Expr { - var err error outerID := TableID(outer.LHS) result := sqlparser.CopyOnRewrite(expression, nil, func(cursor *sqlparser.CopyOnWriteCursor) { col, ok := cursor.Node().(*sqlparser.ColName) @@ -350,46 +337,12 @@ func rewriteOriginalPushedToRHS(ctx *plancontext.PlanningContext, expression sql // this is a dependency we are being fed from the LHS of the join, so we // need to find the argument name for it and use that instead // we can't use the column name directly, because we're in the RHS of the join - name, innerErr := outer.findOrAddColNameBindVarName(ctx, col) - if err != nil { - err = innerErr - cursor.StopTreeWalk() - return - } + name := outer.findOrAddColNameBindVarName(ctx, col) cursor.Replace(sqlparser.NewArgument(name)) }, nil) - if err != nil { - panic(err) - } return result.(sqlparser.Expr) } -func pushProjectionToOuterContainer(ctx *plancontext.PlanningContext, p *Projection, src *SubQueryContainer) (ops.Operator, *rewrite.ApplyResult, error) { - ap, err := p.GetAliasedProjections() - if err != nil { - return p, rewrite.SameTree, nil - } - - outer := TableID(src.Outer) - for _, pe := range ap { - _, isOffset := pe.Info.(*Offset) - if isOffset { - continue - } - - if !ctx.SemTable.RecursiveDeps(pe.EvalExpr).IsSolvedBy(outer) { - return p, rewrite.SameTree, nil - } - - if se, ok := pe.Info.(SubQueryExpression); ok { - pe.EvalExpr = rewriteColNameToArgument(ctx, pe.EvalExpr, se, src.Inner...) - } - } - // all projections can be pushed to the outer - src.Outer, p.Source = p, src.Outer - return src, rewrite.NewTree("push projection into outer side of subquery container", p), nil -} - func rewriteColNameToArgument(ctx *plancontext.PlanningContext, in sqlparser.Expr, se SubQueryExpression, subqueries ...*SubQuery) sqlparser.Expr { rewriteIt := func(s string) sqlparser.SQLNode { for _, sq1 := range se { @@ -433,19 +386,16 @@ func rewriteColNameToArgument(ctx *plancontext.PlanningContext, in sqlparser.Exp return result.(sqlparser.Expr) } -func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQueryContainer) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQueryContainer) (Operator, *ApplyResult) { if !reachedPhase(ctx, initialPlanning) { - return in, rewrite.SameTree, nil + return in, NoRewrite } var remaining []*SubQuery - var result *rewrite.ApplyResult + var result *ApplyResult for _, inner := range in.Inner { - newOuter, _result, err := pushOrMerge(ctx, in.Outer, inner) - if err != nil { - return nil, nil, err - } - if _result == rewrite.SameTree { + newOuter, _result := pushOrMerge(ctx, in.Outer, inner) + if _result == NoRewrite { remaining = append(remaining, inner) continue } @@ -455,26 +405,26 @@ func pushOrMergeSubQueryContainer(ctx *plancontext.PlanningContext, in *SubQuery } if len(remaining) == 0 { - return in.Outer, result, nil + return in.Outer, result } in.Inner = remaining - return in, result, nil + return in, result } func tryMergeSubQuery( ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, -) (newOuter ops.Operator, result *rewrite.ApplyResult, err error) { +) (newOuter Operator, result *ApplyResult) { switch inner := subQuery.Subquery.(type) { case *Route: return tryMergeSubqueryWithOuter(ctx, subQuery, outer, inner) case *SubQueryContainer: return tryMergeSubqueriesRecursively(ctx, subQuery, outer, inner) } - return outer, rewrite.SameTree, nil + return outer, NoRewrite } // tryMergeSubqueriesRecursively attempts to merge a SubQueryContainer with the outer Route. @@ -483,7 +433,7 @@ func tryMergeSubqueriesRecursively( subQuery *SubQuery, outer *Route, inner *SubQueryContainer, -) (ops.Operator, *rewrite.ApplyResult, error) { +) (Operator, *ApplyResult) { exprs := subQuery.GetMergePredicates() merger := &subqueryRouteMerger{ outer: outer, @@ -492,34 +442,32 @@ func tryMergeSubqueriesRecursively( } op := mergeSubqueryInputs(ctx, inner.Outer, outer, exprs, merger) if op == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } op = Clone(op).(*Route) op.Source = outer.Source - var finalResult *rewrite.ApplyResult + var finalResult *ApplyResult for _, subq := range inner.Inner { - newOuter, res, err := tryMergeSubQuery(ctx, subq, op) - if err != nil { - return nil, nil, err - } - if res == rewrite.SameTree { + newOuter, res := tryMergeSubQuery(ctx, subq, op) + if res == NoRewrite { // we failed to merge one of the inners - we need to abort - return nil, rewrite.SameTree, nil + return nil, NoRewrite } op = newOuter.(*Route) finalResult = finalResult.Merge(res) } - op.Source = &Filter{Source: outer.Source, Predicates: []sqlparser.Expr{subQuery.Original}} - return op, finalResult.Merge(rewrite.NewTree("merge outer of two subqueries", subQuery)), nil + op.Source = newFilter(outer.Source, subQuery.Original) + return op, finalResult.Merge(Rewrote("merge outer of two subqueries")) } -func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, inner ops.Operator) (ops.Operator, *rewrite.ApplyResult, error) { +func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQuery, outer *Route, inner Operator) (Operator, *ApplyResult) { if updOp, ok := outer.Source.(*Update); ok && mergingIsBlocked(subQuery, updOp) { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } exprs := subQuery.GetMergePredicates() + sqlparser.RemoveKeyspace(subQuery.Original) merger := &subqueryRouteMerger{ outer: outer, original: subQuery.Original, @@ -527,13 +475,13 @@ func tryMergeSubqueryWithOuter(ctx *plancontext.PlanningContext, subQuery *SubQu } op := mergeSubqueryInputs(ctx, inner, outer, exprs, merger) if op == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } if !subQuery.IsProjection { - op.Source = &Filter{Source: outer.Source, Predicates: []sqlparser.Expr{subQuery.Original}} + op.Source = newFilter(outer.Source, subQuery.Original) } ctx.MergedSubqueries = append(ctx.MergedSubqueries, subQuery.originalSubquery) - return op, rewrite.NewTree("merged subquery with outer", subQuery), nil + return op, Rewrote("merged subquery with outer") } // This checked if subquery is part of the changed vindex values. Subquery cannot be merged with the outer route. @@ -546,21 +494,18 @@ func mergingIsBlocked(subQuery *SubQuery, updOp *Update) bool { return false } -func pushOrMerge(ctx *plancontext.PlanningContext, outer ops.Operator, inner *SubQuery) (ops.Operator, *rewrite.ApplyResult, error) { +func pushOrMerge(ctx *plancontext.PlanningContext, outer Operator, inner *SubQuery) (Operator, *ApplyResult) { switch o := outer.(type) { case *Route: return tryMergeSubQuery(ctx, inner, o) case *ApplyJoin: - join, applyResult, err := tryPushSubQueryInJoin(ctx, inner, o) - if err != nil { - return nil, nil, err - } + join, applyResult := tryPushSubQueryInJoin(ctx, inner, o) if join == nil { - return outer, rewrite.SameTree, nil + return outer, NoRewrite } - return join, applyResult, nil + return join, applyResult default: - return outer, rewrite.SameTree, nil + return outer, NoRewrite } } @@ -618,10 +563,7 @@ func (s *subqueryRouteMerger) mergeShardedRouting(ctx *plancontext.PlanningConte }) } - routing, err := tr.resetRoutingLogic(ctx) - if err != nil { - panic(err) - } + routing := tr.resetRoutingLogic(ctx) return s.merge(ctx, old1, old2, routing) } @@ -637,14 +579,11 @@ func (s *subqueryRouteMerger) merge(ctx *plancontext.PlanningContext, inner, out } } _, isSharded := r.(*ShardedRouting) - var src ops.Operator + var src Operator if isSharded { src = s.outer.Source if !s.subq.IsProjection { - src = &Filter{ - Source: s.outer.Source, - Predicates: []sqlparser.Expr{s.original}, - } + src = newFilter(s.outer.Source, s.original) } } else { src = s.rewriteASTExpression(ctx, inner) @@ -665,7 +604,7 @@ func (s *subqueryRouteMerger) merge(ctx *plancontext.PlanningContext, inner, out // we should be able to use this method for all plan types, // but using this method for sharded queries introduces bugs // We really need to figure out why this is not working as expected -func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningContext, inner *Route) ops.Operator { +func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningContext, inner *Route) Operator { src := s.outer.Source stmt, _, err := ToSQL(ctx, inner.Source) if err != nil { @@ -714,10 +653,7 @@ func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningCont cursor.Replace(subq) } }, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr) - src = &Filter{ - Source: s.outer.Source, - Predicates: []sqlparser.Expr{sQuery}, - } + src = newFilter(s.outer.Source, sQuery) } return src } @@ -726,7 +662,7 @@ func (s *subqueryRouteMerger) rewriteASTExpression(ctx *plancontext.PlanningCont // If they can be merged, a new operator with the merged routing is returned // If they cannot be merged, nil is returned. // These rules are similar but different from join merging -func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out ops.Operator, joinPredicates []sqlparser.Expr, m *subqueryRouteMerger) *Route { +func mergeSubqueryInputs(ctx *plancontext.PlanningContext, in, out Operator, joinPredicates []sqlparser.Expr, m *subqueryRouteMerger) *Route { inRoute, outRoute := operatorsToRoutes(in, out) if inRoute == nil || outRoute == nil { return nil diff --git a/go/vt/vtgate/planbuilder/operators/table.go b/go/vt/vtgate/planbuilder/operators/table.go index 09a99170932..14207fe3b3e 100644 --- a/go/vt/vtgate/planbuilder/operators/table.go +++ b/go/vt/vtgate/planbuilder/operators/table.go @@ -22,7 +22,6 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -43,7 +42,7 @@ type ( ) // Clone implements the Operator interface -func (to *Table) Clone([]ops.Operator) ops.Operator { +func (to *Table) Clone([]Operator) Operator { var columns []*sqlparser.ColName for _, name := range to.Columns { columns = append(columns, sqlparser.CloneRefOfColName(name)) @@ -61,7 +60,7 @@ func (to *Table) introducesTableID() semantics.TableSet { } // AddPredicate implements the PhysicalOperator interface -func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (to *Table) AddPredicate(_ *plancontext.PlanningContext, expr sqlparser.Expr) Operator { return newFilter(to, expr) } @@ -69,6 +68,10 @@ func (to *Table) AddColumn(*plancontext.PlanningContext, bool, bool, *sqlparser. panic(vterrors.VT13001("did not expect this method to be called")) } +func (*Table) AddWSColumn(*plancontext.PlanningContext, int, bool) int { + panic(vterrors.VT13001("did not expect this method to be called")) +} + func (to *Table) FindCol(ctx *plancontext.PlanningContext, expr sqlparser.Expr, underRoute bool) int { colToFind, ok := expr.(*sqlparser.ColName) if !ok { @@ -92,7 +95,7 @@ func (to *Table) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return transformColumnsToSelectExprs(ctx, to) } -func (to *Table) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (to *Table) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } @@ -116,7 +119,7 @@ func addColumn(ctx *plancontext.PlanningContext, op ColNameColumns, e sqlparser. if !ok { panic(vterrors.VT09018(fmt.Sprintf("cannot add '%s' expression to a table/vindex", sqlparser.String(e)))) } - sqlparser.RemoveKeyspaceFromColName(col) + sqlparser.RemoveKeyspaceInCol(col) cols := op.GetColNames() colAsExpr := func(c *sqlparser.ColName) sqlparser.Expr { return c } if offset, found := canReuseColumn(ctx, cols, e, colAsExpr); found { @@ -130,7 +133,7 @@ func addColumn(ctx *plancontext.PlanningContext, op ColNameColumns, e sqlparser. func (to *Table) ShortDescription() string { tbl := to.VTable.String() var alias, where string - if !to.QTable.Alias.As.IsEmpty() { + if to.QTable.Alias.As.NotEmpty() { alias = " AS " + to.QTable.Alias.As.String() } diff --git a/go/vt/vtgate/planbuilder/operators/ops/to_json.go b/go/vt/vtgate/planbuilder/operators/to_json.go similarity index 98% rename from go/vt/vtgate/planbuilder/operators/ops/to_json.go rename to go/vt/vtgate/planbuilder/operators/to_json.go index 2b8b747f433..48b7fa9a247 100644 --- a/go/vt/vtgate/planbuilder/operators/ops/to_json.go +++ b/go/vt/vtgate/planbuilder/operators/to_json.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ops +package operators import ( "fmt" diff --git a/go/vt/vtgate/planbuilder/operators/union.go b/go/vt/vtgate/planbuilder/operators/union.go index b3d866a00a3..fedfc362017 100644 --- a/go/vt/vtgate/planbuilder/operators/union.go +++ b/go/vt/vtgate/planbuilder/operators/union.go @@ -23,12 +23,11 @@ import ( "vitess.io/vitess/go/slice" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type Union struct { - Sources []ops.Operator + Sources []Operator // These are the select expressions coming from each source Selects []sqlparser.SelectExprs @@ -38,7 +37,7 @@ type Union struct { unionColumnsAsAlisedExprs []*sqlparser.AliasedExpr } -func newUnion(srcs []ops.Operator, sourceSelects []sqlparser.SelectExprs, columns sqlparser.SelectExprs, distinct bool) *Union { +func newUnion(srcs []Operator, sourceSelects []sqlparser.SelectExprs, columns sqlparser.SelectExprs, distinct bool) *Union { if columns == nil { panic("rt") } @@ -51,24 +50,24 @@ func newUnion(srcs []ops.Operator, sourceSelects []sqlparser.SelectExprs, column } // Clone implements the Operator interface -func (u *Union) Clone(inputs []ops.Operator) ops.Operator { +func (u *Union) Clone(inputs []Operator) Operator { newOp := *u newOp.Sources = inputs newOp.Selects = slices.Clone(u.Selects) return &newOp } -func (u *Union) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (u *Union) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } // Inputs implements the Operator interface -func (u *Union) Inputs() []ops.Operator { +func (u *Union) Inputs() []Operator { return u.Sources } // SetInputs implements the Operator interface -func (u *Union) SetInputs(ops []ops.Operator) { +func (u *Union) SetInputs(ops []Operator) { u.Sources = ops } @@ -93,12 +92,9 @@ Notice how `X.col = 42` has been translated to `foo = 42` and `id = 42` on respe The first SELECT of the union dictates the column names, and the second is whatever expression can be found on the same offset. The names of the RHS are discarded. */ -func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { offsets := make(map[string]int) - sel, err := u.GetSelectFor(0) - if err != nil { - panic(err) - } + sel := u.GetSelectFor(0) for i, selectExpr := range sel.SelectExprs { ae, ok := selectExpr.(*sqlparser.AliasedExpr) if !ok { @@ -107,15 +103,9 @@ func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex offsets[ae.ColumnName()] = i } - needsFilter, exprPerSource, err := u.predicatePerSource(expr, offsets) - if err != nil { - panic(err) - } + needsFilter, exprPerSource := u.predicatePerSource(expr, offsets) if needsFilter { - return &Filter{ - Source: u, - Predicates: []sqlparser.Expr{expr}, - } + return newFilter(u, expr) } for i, src := range u.Sources { @@ -125,11 +115,10 @@ func (u *Union) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Ex return u } -func (u *Union) predicatePerSource(expr sqlparser.Expr, offsets map[string]int) (bool, []sqlparser.Expr, error) { +func (u *Union) predicatePerSource(expr sqlparser.Expr, offsets map[string]int) (bool, []sqlparser.Expr) { needsFilter := false exprPerSource := make([]sqlparser.Expr, len(u.Sources)) for i := range u.Sources { - var err error predicate := sqlparser.CopyOnRewrite(expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { col, ok := cursor.Node().(*sqlparser.ColName) if !ok { @@ -143,43 +132,37 @@ func (u *Union) predicatePerSource(expr sqlparser.Expr, offsets map[string]int) return } - var sel *sqlparser.Select - sel, err = u.GetSelectFor(i) - if err != nil { - cursor.StopTreeWalk() - return - } - + sel := u.GetSelectFor(i) ae, ok := sel.SelectExprs[idx].(*sqlparser.AliasedExpr) if !ok { - err = vterrors.VT09015() - cursor.StopTreeWalk() - return + panic(vterrors.VT09015()) } cursor.Replace(ae.Expr) }, nil).(sqlparser.Expr) - if err != nil || needsFilter { - return needsFilter, nil, err - } + exprPerSource[i] = predicate } - return needsFilter, exprPerSource, nil + return needsFilter, exprPerSource } -func (u *Union) GetSelectFor(source int) (*sqlparser.Select, error) { +func (u *Union) GetSelectFor(source int) *sqlparser.Select { src := u.Sources[source] for { switch op := src.(type) { case *Horizon: - return sqlparser.GetFirstSelect(op.Query), nil + return sqlparser.GetFirstSelect(op.Query) case *Route: src = op.Source default: - return nil, vterrors.VT13001("expected all sources of the UNION to be horizons") + panic(vterrors.VT13001("expected all sources of the UNION to be horizons")) } } } +func (u *Union) AddWSColumn(ctx *plancontext.PlanningContext, offset int, underRoute bool) int { + return u.addWeightStringToOffset(ctx, offset) +} + func (u *Union) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, expr *sqlparser.AliasedExpr) int { if reuse { offset := u.FindCol(ctx, expr.Expr, false) @@ -209,33 +192,39 @@ func (u *Union) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool, panic(vterrors.VT13001(fmt.Sprintf("could not find the argument to the weight_string function: %s", sqlparser.String(wsArg)))) } - outputOffset, err := u.addWeightStringToOffset(ctx, argIdx, gb) - if err != nil { - panic(err) - } - - return outputOffset + return u.addWeightStringToOffset(ctx, argIdx) + case *sqlparser.Literal, *sqlparser.Argument: + return u.addConstantToUnion(ctx, expr) default: panic(vterrors.VT13001(fmt.Sprintf("only weight_string function is expected - got %s", sqlparser.String(expr)))) } } -func (u *Union) addWeightStringToOffset(ctx *plancontext.PlanningContext, argIdx int, addToGroupBy bool) (outputOffset int, err error) { +func (u *Union) addConstantToUnion(ctx *plancontext.PlanningContext, aexpr *sqlparser.AliasedExpr) (outputOffset int) { for i, src := range u.Sources { - exprs := u.Selects[i] - selectExpr := exprs[argIdx] - ae, ok := selectExpr.(*sqlparser.AliasedExpr) - if !ok { - return 0, vterrors.VT09015() + thisOffset := src.AddColumn(ctx, true, false, aexpr) + // all offsets for the newly added ws need to line up + if i == 0 { + outputOffset = thisOffset + } else { + if thisOffset != outputOffset { + panic(vterrors.VT12001("argument offsets did not line up for UNION")) + } } - thisOffset := src.AddColumn(ctx, false, addToGroupBy, aeWrap(weightStringFor(ae.Expr))) + } + return +} + +func (u *Union) addWeightStringToOffset(ctx *plancontext.PlanningContext, argIdx int) (outputOffset int) { + for i, src := range u.Sources { + thisOffset := src.AddWSColumn(ctx, argIdx, false) // all offsets for the newly added ws need to line up if i == 0 { outputOffset = thisOffset } else { if thisOffset != outputOffset { - return 0, vterrors.VT12001("weight_string offsets did not line up for UNION") + panic(vterrors.VT12001("weight_string offsets did not line up for UNION")) } } } diff --git a/go/vt/vtgate/planbuilder/operators/union_merging.go b/go/vt/vtgate/planbuilder/operators/union_merging.go index 4c8b02f76d8..81ca2f5623e 100644 --- a/go/vt/vtgate/planbuilder/operators/union_merging.go +++ b/go/vt/vtgate/planbuilder/operators/union_merging.go @@ -19,14 +19,13 @@ package operators import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) // mergeUnionInputInAnyOrder merges sources the sources of the union in any order // can be used for UNION DISTINCT -func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { +func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]Operator, []sqlparser.SelectExprs) { sources := op.Sources selects := op.Selects @@ -43,10 +42,7 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o } selA := selects[idx] selB := selects[j] - newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) - if err != nil { - return nil, nil, err - } + newPlan, sel := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) if newPlan != nil { sources[idx] = newPlan selects[idx] = sel @@ -57,10 +53,10 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o } } if !merged { - return sources, selects, nil + return sources, selects } - var newSources []ops.Operator + var newSources []Operator var newSelects []sqlparser.SelectExprs for i, source := range sources { if keep[i] || i <= idx { @@ -73,10 +69,10 @@ func mergeUnionInputInAnyOrder(ctx *plancontext.PlanningContext, op *Union) ([]o selects = newSelects } - return sources, selects, nil + return sources, selects } -func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops.Operator, []sqlparser.SelectExprs, error) { +func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]Operator, []sqlparser.SelectExprs) { sources := op.Sources selects := op.Selects for { @@ -85,10 +81,7 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops j := i + 1 srcA, selA := sources[i], selects[i] srcB, selB := sources[j], selects[j] - newPlan, sel, err := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) - if err != nil { - return nil, nil, err - } + newPlan, sel := mergeUnionInputs(ctx, srcA, srcB, selA, selB, op.distinct) if newPlan != nil { sources[i] = newPlan selects[i] = sel @@ -102,7 +95,7 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops } } - return sources, selects, nil + return sources, selects } // mergeUnionInputs checks whether two operators can be merged into a single one. @@ -111,13 +104,13 @@ func mergeUnionInputsInOrder(ctx *plancontext.PlanningContext, op *Union) ([]ops // this function is very similar to mergeJoinInputs func mergeUnionInputs( ctx *plancontext.PlanningContext, - lhs, rhs ops.Operator, + lhs, rhs Operator, lhsExprs, rhsExprs sqlparser.SelectExprs, distinct bool, -) (ops.Operator, sqlparser.SelectExprs, error) { +) (Operator, sqlparser.SelectExprs) { lhsRoute, rhsRoute, routingA, routingB, a, b, sameKeyspace := prepareInputRoutes(lhs, rhs) if lhsRoute == nil { - return nil, nil, nil + return nil, nil } switch { @@ -134,12 +127,12 @@ func mergeUnionInputs( return createMergedUnion(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct, routingA) case a == sharded && b == sharded && sameKeyspace: - res, exprs, err := tryMergeUnionShardedRouting(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct) - if err != nil || res != nil { - return res, exprs, err + res, exprs := tryMergeUnionShardedRouting(ctx, lhsRoute, rhsRoute, lhsExprs, rhsExprs, distinct) + if res != nil { + return res, exprs } } - return nil, nil, nil + return nil, nil } func tryMergeUnionShardedRouting( @@ -147,7 +140,7 @@ func tryMergeUnionShardedRouting( routeA, routeB *Route, exprsA, exprsB sqlparser.SelectExprs, distinct bool, -) (ops.Operator, sqlparser.SelectExprs, error) { +) (Operator, sqlparser.SelectExprs) { tblA := routeA.Routing.(*ShardedRouting) tblB := routeB.Routing.(*ShardedRouting) @@ -173,7 +166,7 @@ func tryMergeUnionShardedRouting( } } - return nil, nil, nil + return nil, nil } func createMergedUnion( @@ -181,45 +174,61 @@ func createMergedUnion( lhsRoute, rhsRoute *Route, lhsExprs, rhsExprs sqlparser.SelectExprs, distinct bool, - routing Routing) (ops.Operator, sqlparser.SelectExprs, error) { + routing Routing) (Operator, sqlparser.SelectExprs) { // if there are `*` on either side, or a different number of SelectExpr items, // we give up aligning the expressions and trust that we can push everything down cols := make(sqlparser.SelectExprs, len(lhsExprs)) noDeps := len(lhsExprs) != len(rhsExprs) for idx, col := range lhsExprs { - ae, ok := col.(*sqlparser.AliasedExpr) + lae, ok := col.(*sqlparser.AliasedExpr) if !ok { cols[idx] = col noDeps = true continue } - col := sqlparser.NewColName(ae.ColumnName()) + col := sqlparser.NewColName(lae.ColumnName()) cols[idx] = aeWrap(col) if noDeps { continue } - deps := ctx.SemTable.RecursiveDeps(ae.Expr) - ae, ok = rhsExprs[idx].(*sqlparser.AliasedExpr) + deps := ctx.SemTable.RecursiveDeps(lae.Expr) + rae, ok := rhsExprs[idx].(*sqlparser.AliasedExpr) if !ok { noDeps = true continue } - deps = deps.Merge(ctx.SemTable.RecursiveDeps(ae.Expr)) + deps = deps.Merge(ctx.SemTable.RecursiveDeps(rae.Expr)) + rt, foundR := ctx.SemTable.TypeForExpr(rae.Expr) + lt, foundL := ctx.SemTable.TypeForExpr(lae.Expr) + if foundR && foundL { + collations := ctx.VSchema.Environment().CollationEnv() + var typer evalengine.TypeAggregator + + if err := typer.Add(rt, collations); err != nil { + panic(err) + } + if err := typer.Add(lt, collations); err != nil { + panic(err) + } + + ctx.SemTable.ExprTypes[col] = typer.Type() + } + ctx.SemTable.Recursive[col] = deps } - union := newUnion([]ops.Operator{lhsRoute.Source, rhsRoute.Source}, []sqlparser.SelectExprs{lhsExprs, rhsExprs}, cols, distinct) + union := newUnion([]Operator{lhsRoute.Source, rhsRoute.Source}, []sqlparser.SelectExprs{lhsExprs, rhsExprs}, cols, distinct) selectExprs := unionSelects(lhsExprs) return &Route{ Source: union, MergedWith: []*Route{rhsRoute}, Routing: routing, - }, selectExprs, nil + }, selectExprs } -func compactUnion(u *Union) *rewrite.ApplyResult { +func compactUnion(u *Union) *ApplyResult { if u.distinct { // first we remove unnecessary DISTINCTs for idx, source := range u.Sources { @@ -231,7 +240,7 @@ func compactUnion(u *Union) *rewrite.ApplyResult { } } - var newSources []ops.Operator + var newSources []Operator var newSelects []sqlparser.SelectExprs merged := false @@ -250,10 +259,10 @@ func compactUnion(u *Union) *rewrite.ApplyResult { } if !merged { - return rewrite.SameTree + return NoRewrite } u.Sources = newSources u.Selects = newSelects - return rewrite.NewTree("merged UNIONs", u) + return Rewrote("merged UNIONs") } diff --git a/go/vt/vtgate/planbuilder/operators/update.go b/go/vt/vtgate/planbuilder/operators/update.go index 743812f9dd7..4abf319ad08 100644 --- a/go/vt/vtgate/planbuilder/operators/update.go +++ b/go/vt/vtgate/planbuilder/operators/update.go @@ -20,12 +20,15 @@ import ( "fmt" "maps" "slices" - "strings" + "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/sysvars" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" + "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -33,21 +36,18 @@ import ( type ( Update struct { - QTable *QueryTable - VTable *vindexes.Table + *DMLCommon + Assignments []SetExpr ChangedVindexValues map[string]*engine.VindexValues - OwnedVindexQuery string - Ignore sqlparser.Ignore - OrderBy sqlparser.OrderBy - Limit *sqlparser.Limit // these subqueries cannot be merged as they are part of the changed vindex values // these values are needed to be sent over to lookup vindex for update. // On merging this information will be lost, so subquery merge is blocked. SubQueriesArgOnChangedVindex []string - noInputs + VerifyAll bool + noColumns noPredicates } @@ -58,86 +58,296 @@ type ( } ) -func (se SetExpr) String() string { - return fmt.Sprintf("%s = %s", sqlparser.String(se.Name), sqlparser.String(se.Expr.EvalExpr)) +func (u *Update) Inputs() []Operator { + if u.Source == nil { + return nil + } + return []Operator{u.Source} +} + +func (u *Update) SetInputs(inputs []Operator) { + if len(inputs) != 1 { + panic(vterrors.VT13001("unexpected number of inputs for Update operator")) + } + u.Source = inputs[0] } -// Introduces implements the PhysicalOperator interface -func (u *Update) introducesTableID() semantics.TableSet { - return u.QTable.ID +func (se SetExpr) String() string { + return fmt.Sprintf("%s = %s", sqlparser.String(se.Name), sqlparser.String(se.Expr.EvalExpr)) } // Clone implements the Operator interface -func (u *Update) Clone([]ops.Operator) ops.Operator { +func (u *Update) Clone(inputs []Operator) Operator { upd := *u upd.Assignments = slices.Clone(u.Assignments) upd.ChangedVindexValues = maps.Clone(u.ChangedVindexValues) + upd.SetInputs(inputs) return &upd } -func (u *Update) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (u *Update) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } func (u *Update) TablesUsed() []string { - if u.VTable != nil { - return SingleQualifiedIdentifier(u.VTable.Keyspace, u.VTable.Name) - } - return nil + return SingleQualifiedIdentifier(u.Target.VTable.Keyspace, u.Target.VTable.Name) } func (u *Update) ShortDescription() string { - s := []string{u.VTable.String()} - if u.Limit != nil { - s = append(s, sqlparser.String(u.Limit)) + return shortDesc(u.Target, u.OwnedVindexQuery) +} + +func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (op Operator) { + errIfUpdateNotSupported(ctx, updStmt) + parentFks := ctx.SemTable.GetParentForeignKeysForTargets() + childFks := ctx.SemTable.GetChildForeignKeysForTargets() + + // We check if dml with input plan is required. DML with input planning is generally + // slower, because it does a selection and then creates an update statement wherein we have to + // list all the primary key values. + if updateWithInputPlanningRequired(ctx, childFks, parentFks, updStmt) { + return createUpdateWithInputOp(ctx, updStmt) } - if len(u.OrderBy) > 0 { - s = append(s, sqlparser.String(u.OrderBy)) + + var updClone *sqlparser.Update + var targetTbl TargetTable + op, targetTbl, updClone = createUpdateOperator(ctx, updStmt) + + op = &LockAndComment{ + Source: op, + Comments: updStmt.Comments, + Lock: sqlparser.ShareModeLock, + } + + parentFks = ctx.SemTable.GetParentForeignKeysForTableSet(targetTbl.ID) + childFks = ctx.SemTable.GetChildForeignKeysForTableSet(targetTbl.ID) + if len(childFks) == 0 && len(parentFks) == 0 { + return op } - return strings.Join(s, " ") + return buildFkOperator(ctx, op, updClone, parentFks, childFks, targetTbl) } -func createOperatorFromUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (ops.Operator, error) { - tableInfo, qt, err := createQueryTableForDML(ctx, updStmt.TableExprs[0], updStmt.Where) - if err != nil { - return nil, err +func updateWithInputPlanningRequired( + ctx *plancontext.PlanningContext, + childFks []vindexes.ChildFKInfo, + parentFks []vindexes.ParentFKInfo, + updateStmt *sqlparser.Update, +) bool { + if isMultiTargetUpdate(ctx, updateStmt) { + return true + } + // If there are no foreign keys, we don't need to use delete with input. + if len(childFks) == 0 && len(parentFks) == 0 { + return false + } + // Limit requires dml with input. + if updateStmt.Limit != nil { + return true } + return false +} - vindexTable, routing, err := buildVindexTableForDML(ctx, tableInfo, qt, "update") - if err != nil { - return nil, err +func isMultiTargetUpdate(ctx *plancontext.PlanningContext, updateStmt *sqlparser.Update) bool { + var targetTS semantics.TableSet + for _, ue := range updateStmt.Exprs { + targetTS = targetTS.Merge(ctx.SemTable.DirectDeps(ue.Name)) + targetTS = targetTS.Merge(ctx.SemTable.RecursiveDeps(ue.Expr)) } + return targetTS.NumberOfTables() > 1 +} - updClone := sqlparser.CloneRefOfUpdate(updStmt) - updOp, err := createUpdateOperator(ctx, updStmt, vindexTable, qt, routing) +type updColumn struct { + updCol *sqlparser.ColName + jc applyJoinColumn +} + +type updList []updColumn + +func createUpdateWithInputOp(ctx *plancontext.PlanningContext, upd *sqlparser.Update) (op Operator) { + updClone := ctx.SemTable.Clone(upd).(*sqlparser.Update) + upd.Limit = nil + + // Prepare the update expressions list + ueMap := prepareUpdateExpressionList(ctx, upd) + + var updOps []dmlOp + for _, target := range ctx.SemTable.Targets.Constituents() { + op := createUpdateOpWithTarget(ctx, upd, target, ueMap[target]) + updOps = append(updOps, op) + } + + updOps = sortDmlOps(updOps) + + selectStmt := &sqlparser.Select{ + From: updClone.TableExprs, + Where: updClone.Where, + OrderBy: updClone.OrderBy, + Limit: updClone.Limit, + Lock: sqlparser.ForUpdateLock, + } + + // now map the operator, column list and update list + var colsList [][]*sqlparser.ColName + var uList []updList + dmls := slice.Map(updOps, func(from dmlOp) Operator { + colsList = append(colsList, from.cols) + uList = append(uList, from.updList) + for _, col := range from.cols { + selectStmt.SelectExprs = append(selectStmt.SelectExprs, aeWrap(col)) + } + return from.op + }) + + op = &DMLWithInput{ + DML: dmls, + Source: createOperatorFromSelect(ctx, selectStmt), + cols: colsList, + updList: uList, + } + + if upd.Comments != nil { + op = &LockAndComment{ + Source: op, + Comments: upd.Comments, + } + } + return op +} + +func prepareUpdateExpressionList(ctx *plancontext.PlanningContext, upd *sqlparser.Update) map[semantics.TableSet]updList { + // Any update expression requiring column value from any other table is rewritten to take it as bindvar column. + // E.g. UPDATE t1 join t2 on t1.col = t2.col SET t1.col = t2.col + 1 where t2.col = 10; + // SET t1.col = t2.col + 1 -> SET t1.col = :t2_col + 1 (t2_col is the bindvar column which will be provided from the input) + ueMap := make(map[semantics.TableSet]updList) + for _, ue := range upd.Exprs { + target := ctx.SemTable.DirectDeps(ue.Name) + exprDeps := ctx.SemTable.RecursiveDeps(ue.Expr) + jc := breakExpressionInLHSandRHS(ctx, ue.Expr, exprDeps.Remove(target)) + ueMap[target] = append(ueMap[target], updColumn{ue.Name, jc}) + } + + // Check if any of the dependent columns are updated in the same query. + // This can result in a mismatch of rows on how MySQL interprets it and how Vitess would have updated those rows. + // It is safe to fail for those cases. + errIfDependentColumnUpdated(ctx, upd, ueMap) + + return ueMap +} + +func errIfDependentColumnUpdated(ctx *plancontext.PlanningContext, upd *sqlparser.Update, ueMap map[semantics.TableSet]updList) { + for _, ue := range upd.Exprs { + for _, list := range ueMap { + for _, dc := range list { + for _, bvExpr := range dc.jc.LHSExprs { + if ctx.SemTable.EqualsExprWithDeps(ue.Name, bvExpr.Expr) { + panic(vterrors.VT12001( + fmt.Sprintf("'%s' column referenced in update expression '%s' is itself updated", sqlparser.String(ue.Name), sqlparser.String(dc.jc.Original)))) + } + } + } + } + } +} + +func createUpdateOpWithTarget(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, target semantics.TableSet, uList updList) dmlOp { + if len(uList) == 0 { + panic(vterrors.VT13001("no update expression for the target")) + } + + ti, err := ctx.SemTable.TableInfoFor(target) if err != nil { - return nil, err + panic(vterrors.VT13001(err.Error())) + } + vTbl := ti.GetVindexTable() + tblName, err := ti.Name() + if err != nil { + panic(err) } - parentFks := ctx.SemTable.GetParentForeignKeysList() - childFks := ctx.SemTable.GetChildForeignKeysList() - if len(childFks) == 0 && len(parentFks) == 0 { - return updOp, nil + var leftComp sqlparser.ValTuple + cols := make([]*sqlparser.ColName, 0, len(vTbl.PrimaryKey)) + for _, col := range vTbl.PrimaryKey { + colName := sqlparser.NewColNameWithQualifier(col.String(), tblName) + cols = append(cols, colName) + leftComp = append(leftComp, colName) + ctx.SemTable.Recursive[colName] = target } + // optimize for case when there is only single column on left hand side. + var lhs sqlparser.Expr = leftComp + if len(leftComp) == 1 { + lhs = leftComp[0] + } + compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, lhs, sqlparser.ListArg(engine.DmlVals), nil) - // If the delete statement has a limit, we don't support it yet. - if updStmt.Limit != nil { - return nil, vterrors.VT12001("update with limit with foreign key constraints") + var updExprs sqlparser.UpdateExprs + for _, expr := range uList { + ue := &sqlparser.UpdateExpr{ + Name: expr.updCol, + Expr: expr.jc.RHSExpr, + } + updExprs = append(updExprs, ue) + } + upd := &sqlparser.Update{ + Ignore: updStmt.Ignore, + TableExprs: sqlparser.TableExprs{ti.GetAliasedTableExpr()}, + Exprs: updExprs, + Where: sqlparser.NewWhere(sqlparser.WhereClause, compExpr), + OrderBy: updStmt.OrderBy, + } + return dmlOp{ + op: createOperatorFromUpdate(ctx, upd), + vTbl: vTbl, + cols: cols, + updList: uList, + } +} + +func errIfUpdateNotSupported(ctx *plancontext.PlanningContext, stmt *sqlparser.Update) { + for _, ue := range stmt.Exprs { + tblInfo, err := ctx.SemTable.TableInfoForExpr(ue.Name) + if err != nil { + panic(err) + } + if _, isATable := tblInfo.(*semantics.RealTable); !isATable { + var tblName string + ate := tblInfo.GetAliasedTableExpr() + if ate != nil { + tblName = sqlparser.String(ate) + } + panic(vterrors.VT03032(tblName)) + } } - return buildFkOperator(ctx, updOp, updClone, parentFks, childFks, vindexTable) + // Now we check if any of the foreign key columns that are being udpated have dependencies on other updated columns. + // This is unsafe, and we currently don't support this in Vitess. + if err := ctx.SemTable.ErrIfFkDependentColumnUpdated(stmt.Exprs); err != nil { + panic(err) + } } -func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, vindexTable *vindexes.Table, qt *QueryTable, routing Routing) (ops.Operator, error) { +func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update) (Operator, TargetTable, *sqlparser.Update) { + op := crossJoin(ctx, updStmt.TableExprs) + sqc := &SubQueryBuilder{} + if updStmt.Where != nil { + op = addWherePredsToSubQueryBuilder(ctx, updStmt.Where.Expr, op, sqc) + } + + outerID := TableID(op) assignments := make([]SetExpr, len(updStmt.Exprs)) + // updClone is used in foreign key planning to create the selection statements to be used for verification and selection. + // If we encounter subqueries, we want to fix the updClone to use the replaced expression, so that the pulled out subquery's + // result is used everywhere instead of running the subquery multiple times, which is wasteful. + updClone := sqlparser.CloneRefOfUpdate(updStmt) + var tblInfo semantics.TableInfo + var err error for idx, updExpr := range updStmt.Exprs { - expr, subqs, err := sqc.pullOutValueSubqueries(ctx, updExpr.Expr, qt.ID, true) - if err != nil { - return nil, err - } + expr, subqs := sqc.pullOutValueSubqueries(ctx, updExpr.Expr, outerID, true) if len(subqs) == 0 { expr = updExpr.Expr + } else { + updClone.Exprs[idx].Expr = sqlparser.CloneExpr(expr) + ctx.SemTable.UpdateChildFKExpr(updExpr, expr) } proj := newProjExpr(aeWrap(expr)) if len(subqs) != 0 { @@ -147,94 +357,99 @@ func createUpdateOperator(ctx *plancontext.PlanningContext, updStmt *sqlparser.U Name: updExpr.Name, Expr: proj, } + tblInfo, err = ctx.SemTable.TableInfoForExpr(updExpr.Name) + if err != nil { + panic(err) + } } - vp, cvv, ovq, subQueriesArgOnChangedVindex, err := getUpdateVindexInformation(ctx, updStmt, vindexTable, qt.ID, assignments) - if err != nil { - return nil, err + tblID := ctx.SemTable.TableSetFor(tblInfo.GetAliasedTableExpr()) + vTbl := tblInfo.GetVindexTable() + // Reference table should update the source table. + if vTbl.Type == vindexes.TypeReference && vTbl.Source != nil { + vTbl = updateQueryGraphWithSource(ctx, op, tblID, vTbl) } - tr, ok := routing.(*ShardedRouting) - if ok { - tr.VindexPreds = vp + name, err := tblInfo.Name() + if err != nil { + panic(err) } - for _, predicate := range qt.Predicates { - if subq, err := sqc.handleSubquery(ctx, predicate, qt.ID); err != nil { - return nil, err - } else if subq != nil { - continue - } - routing, err = UpdateRoutingLogic(ctx, predicate, routing) - if err != nil { - return nil, err - } + targetTbl := TargetTable{ + ID: tblID, + VTable: vTbl, + Name: name, } - if routing.OpCode() == engine.Scatter && updStmt.Limit != nil { - // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - return nil, vterrors.VT12001("multi shard UPDATE with LIMIT") - } + cvv, ovq, subQueriesArgOnChangedVindex := getUpdateVindexInformation(ctx, updStmt, targetTbl, assignments) - route := &Route{ - Source: &Update{ - QTable: qt, - VTable: vindexTable, - Assignments: assignments, - ChangedVindexValues: cvv, - OwnedVindexQuery: ovq, - Ignore: updStmt.Ignore, - Limit: updStmt.Limit, - OrderBy: updStmt.OrderBy, - SubQueriesArgOnChangedVindex: subQueriesArgOnChangedVindex, + updOp := &Update{ + DMLCommon: &DMLCommon{ + Ignore: updStmt.Ignore, + Target: targetTbl, + OwnedVindexQuery: ovq, + Source: op, }, - Routing: routing, - Comments: updStmt.Comments, + Assignments: assignments, + ChangedVindexValues: cvv, + SubQueriesArgOnChangedVindex: subQueriesArgOnChangedVindex, + VerifyAll: ctx.VerifyAllFKs, } - decorator := func(op ops.Operator) ops.Operator { - return &LockAndComment{ - Source: op, - Lock: sqlparser.ShareModeLock, + if len(updStmt.OrderBy) > 0 { + addOrdering(ctx, updStmt.OrderBy, updOp) + } + + if updStmt.Limit != nil { + updOp.Source = &Limit{ + Source: updOp.Source, + AST: updStmt.Limit, } } - return sqc.getRootOperator(route, decorator), nil + return sqc.getRootOperator(updOp, nil), targetTbl, updClone } -func buildFkOperator(ctx *plancontext.PlanningContext, updOp ops.Operator, updClone *sqlparser.Update, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { - // We only support simple expressions in update queries for foreign key handling. - if isNonLiteral(updClone.Exprs, parentFks, childFks) { - return nil, vterrors.VT12001("update expression with non-literal values with foreign key constraints") +func getUpdateVindexInformation( + ctx *plancontext.PlanningContext, + updStmt *sqlparser.Update, + table TargetTable, + assignments []SetExpr, +) (map[string]*engine.VindexValues, *sqlparser.Select, []string) { + if !table.VTable.Keyspace.Sharded { + return nil, nil, nil } - restrictChildFks, cascadeChildFks := splitChildFks(childFks) + primaryVindex := getVindexInformation(table.ID, table.VTable) + changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex := buildChangedVindexesValues(ctx, updStmt, table.VTable, primaryVindex.Columns, assignments) + return changedVindexValues, ownedVindexQuery, subQueriesArgOnChangedVindex +} - op, err := createFKCascadeOp(ctx, updOp, updClone, cascadeChildFks, updatedTable) - if err != nil { - return nil, err +func buildFkOperator(ctx *plancontext.PlanningContext, updOp Operator, updClone *sqlparser.Update, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo, targetTbl TargetTable) Operator { + // If there is a subquery container above update operator, we want to do the foreign key planning inside it, + // because we want the Inner of the subquery to execute first and its result be used for the entire foreign key update planning. + foundSubqc := false + TopDown(updOp, TableID, func(in Operator, _ semantics.TableSet, _ bool) (Operator, *ApplyResult) { + if op, isSubqc := in.(*SubQueryContainer); isSubqc { + foundSubqc = true + op.Outer = buildFkOperator(ctx, op.Outer, updClone, parentFks, childFks, targetTbl) + } + return in, NoRewrite + }, stopAtUpdateOp) + if foundSubqc { + return updOp } - return createFKVerifyOp(ctx, op, updClone, parentFks, restrictChildFks) + restrictChildFks, cascadeChildFks := splitChildFks(childFks) + + op := createFKCascadeOp(ctx, updOp, updClone, cascadeChildFks, targetTbl) + + return createFKVerifyOp(ctx, op, updClone, parentFks, restrictChildFks, targetTbl.VTable) } -func isNonLiteral(updExprs sqlparser.UpdateExprs, parentFks []vindexes.ParentFKInfo, childFks []vindexes.ChildFKInfo) bool { - for _, updateExpr := range updExprs { - if sqlparser.IsLiteral(updateExpr.Expr) { - continue - } - for _, parentFk := range parentFks { - if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { - return true - } - } - for _, childFk := range childFks { - if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { - return true - } - } - } - return false +func stopAtUpdateOp(operator Operator) VisitRule { + _, isUpdate := operator.(*Update) + return VisitRule(!isUpdate) } // splitChildFks splits the child foreign keys into restrict and cascade list as restrict is handled through Verify operator and cascade is handled through Cascade operator. @@ -256,9 +471,9 @@ func splitChildFks(fks []vindexes.ChildFKInfo) (restrictChildFks, cascadeChildFk return } -func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, updStmt *sqlparser.Update, childFks []vindexes.ChildFKInfo, updatedTable *vindexes.Table) (ops.Operator, error) { +func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp Operator, updStmt *sqlparser.Update, childFks []vindexes.ChildFKInfo, targetTbl TargetTable) Operator { if len(childFks) == 0 { - return parentOp, nil + return parentOp } var fkChildren []*FkChild @@ -267,34 +482,153 @@ func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp ops.Operator, for _, fk := range childFks { // We should have already filtered out update restrict foreign keys. if fk.OnUpdate.IsRestrict() { - return nil, vterrors.VT13001("ON UPDATE RESTRICT foreign keys should already be filtered") + panic(vterrors.VT13001("ON UPDATE RESTRICT foreign keys should already be filtered")) } // We need to select all the parent columns for the foreign key constraint, to use in the update of the child table. - cols, exprs := selectParentColumns(fk, len(selectExprs)) - selectExprs = append(selectExprs, exprs...) + var selectOffsets []int + selectOffsets, selectExprs = addColumns(ctx, fk.ParentColumns, selectExprs, targetTbl.Name) - fkChild, err := createFkChildForUpdate(ctx, fk, updStmt, cols, updatedTable) - if err != nil { - return nil, err + // If we are updating a foreign key column to a non-literal value then, need information about + // 1. whether the new value is different from the old value + // 2. the new value itself. + // 3. the bind variable to assign to this value. + var nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo + ue := ctx.SemTable.GetUpdateExpressionsForFk(fk.String(targetTbl.VTable)) + // We only need to store these offsets and add these expressions to SELECT when there are non-literal updates present. + if hasNonLiteralUpdate(ue) { + for _, updExpr := range ue { + // We add the expression and a comparison expression to the SELECT exprssion while storing their offsets. + var info engine.NonLiteralUpdateInfo + info, selectExprs = addNonLiteralUpdExprToSelect(ctx, targetTbl.VTable, updExpr, selectExprs) + nonLiteralUpdateInfo = append(nonLiteralUpdateInfo, info) + } } + + fkChild := createFkChildForUpdate(ctx, fk, selectOffsets, nonLiteralUpdateInfo, targetTbl.VTable) fkChildren = append(fkChildren, fkChild) } - selectionOp, err := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, nil, sqlparser.ForUpdateLock) - if err != nil { - return nil, err - } + selectionOp := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, updStmt.OrderBy, nil, getUpdateLock(targetTbl.VTable)) return &FkCascade{ Selection: selectionOp, Children: fkChildren, Parent: parentOp, - }, nil + } +} + +// hasNonLiteralUpdate checks if any of the update expressions have a non-literal update. +func hasNonLiteralUpdate(exprs sqlparser.UpdateExprs) bool { + for _, expr := range exprs { + if !sqlparser.IsLiteral(expr.Expr) { + return true + } + } + return false +} + +// addColumns adds the given set of columns to the select expressions provided. It tries to reuse the columns if already present in it. +// It returns the list of offsets for the columns and the updated select expressions. +func addColumns(ctx *plancontext.PlanningContext, columns sqlparser.Columns, exprs []sqlparser.SelectExpr, tableName sqlparser.TableName) ([]int, []sqlparser.SelectExpr) { + var offsets []int + selectExprs := exprs + for _, column := range columns { + ae := aeWrap(sqlparser.NewColNameWithQualifier(column.String(), tableName)) + exists := false + for idx, expr := range exprs { + if ctx.SemTable.EqualsExpr(expr.(*sqlparser.AliasedExpr).Expr, ae.Expr) { + offsets = append(offsets, idx) + exists = true + break + } + } + if !exists { + offsets = append(offsets, len(selectExprs)) + selectExprs = append(selectExprs, ae) + + } + } + return offsets, selectExprs +} + +// For an update query having non-literal updates, we add the updated expression and a comparison expression to the select query. +// For example, for a query like `update fk_table set col = id * 100 + 1` +// We would add the expression `id * 100 + 1` and the comparison expression `col <=> id * 100 + 1` to the select query. +func addNonLiteralUpdExprToSelect(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updExpr *sqlparser.UpdateExpr, exprs []sqlparser.SelectExpr) (engine.NonLiteralUpdateInfo, []sqlparser.SelectExpr) { + // Create the comparison expression. + castedExpr := getCastedUpdateExpression(updatedTable, updExpr) + compExpr := sqlparser.NewComparisonExpr(sqlparser.NullSafeEqualOp, updExpr.Name, castedExpr, nil) + info := engine.NonLiteralUpdateInfo{ + CompExprCol: -1, + UpdateExprCol: -1, + } + // Add the expressions to the select expressions. We make sure to reuse the offset if it has already been added once. + for idx, selectExpr := range exprs { + if ctx.SemTable.EqualsExpr(selectExpr.(*sqlparser.AliasedExpr).Expr, compExpr) { + info.CompExprCol = idx + } + if ctx.SemTable.EqualsExpr(selectExpr.(*sqlparser.AliasedExpr).Expr, castedExpr) { + info.UpdateExprCol = idx + } + } + // If the expression doesn't exist, then we add the expression and store the offset. + if info.CompExprCol == -1 { + info.CompExprCol = len(exprs) + exprs = append(exprs, aeWrap(compExpr)) + } + if info.UpdateExprCol == -1 { + info.UpdateExprCol = len(exprs) + exprs = append(exprs, aeWrap(castedExpr)) + } + return info, exprs +} + +func getCastedUpdateExpression(updatedTable *vindexes.Table, updExpr *sqlparser.UpdateExpr) sqlparser.Expr { + castTypeStr := getCastTypeForColumn(updatedTable, updExpr) + if castTypeStr == "" { + return updExpr.Expr + } + return &sqlparser.CastExpr{ + Expr: updExpr.Expr, + Type: &sqlparser.ConvertType{ + Type: castTypeStr, + }, + } +} + +func getCastTypeForColumn(updatedTable *vindexes.Table, updExpr *sqlparser.UpdateExpr) string { + var ty querypb.Type + for _, column := range updatedTable.Columns { + if updExpr.Name.Name.Equal(column.Name) { + ty = column.Type + break + } + } + switch { + case sqltypes.IsNull(ty): + return "" + case sqltypes.IsSigned(ty): + return "SIGNED" + case sqltypes.IsUnsigned(ty): + return "UNSIGNED" + case sqltypes.IsFloat(ty): + return "FLOAT" + case sqltypes.IsDecimal(ty): + return "DECIMAL" + case sqltypes.IsDateOrTime(ty): + return "DATETIME" + case sqltypes.IsBinary(ty): + return "BINARY" + case sqltypes.IsText(ty): + return "CHAR" + default: + return "" + } } // createFkChildForUpdate creates the update query operator for the child table based on the foreign key constraints. -func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, cols []int, updatedTable *vindexes.Table) (*FkChild, error) { +func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, selectOffsets []int, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) *FkChild { // Create a ValTuple of child column names var valTuple sqlparser.ValTuple for _, column := range fk.ChildColumns { @@ -307,36 +641,42 @@ func createFkChildForUpdate(ctx *plancontext.PlanningContext, fk vindexes.ChildF compExpr := sqlparser.NewComparisonExpr(sqlparser.InOp, valTuple, sqlparser.NewListArg(bvName), nil) var childWhereExpr sqlparser.Expr = compExpr - var childOp ops.Operator - var err error + // In the case of non-literal updates, we need to assign bindvariables for storing the updated value of the columns + // coming from the SELECT query. + if len(nonLiteralUpdateInfo) > 0 { + for idx, info := range nonLiteralUpdateInfo { + info.UpdateExprBvName = ctx.ReservedVars.ReserveVariable(foreignKeyUpdateExpr) + nonLiteralUpdateInfo[idx] = info + } + } + + var childOp Operator switch fk.OnUpdate { case sqlparser.Cascade: - childOp, err = buildChildUpdOpForCascade(ctx, fk, updStmt, childWhereExpr, updatedTable) + childOp = buildChildUpdOpForCascade(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) case sqlparser.SetNull: - childOp, err = buildChildUpdOpForSetNull(ctx, fk, updStmt, childWhereExpr) + childOp = buildChildUpdOpForSetNull(ctx, fk, childWhereExpr, nonLiteralUpdateInfo, updatedTable) case sqlparser.SetDefault: - return nil, vterrors.VT09016() - } - if err != nil { - return nil, err + panic(vterrors.VT09016()) } return &FkChild{ - BVName: bvName, - Cols: cols, - Op: childOp, - }, nil + BVName: bvName, + Cols: selectOffsets, + Op: childOp, + NonLiteralInfo: nonLiteralUpdateInfo, + } } // buildChildUpdOpForCascade builds the child update statement operator for the CASCADE type foreign key constraint. // The query looks like this - // // `UPDATE SET WHERE IN ()` -func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, childWhereExpr sqlparser.Expr, updatedTable *vindexes.Table) (ops.Operator, error) { +func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, childWhereExpr sqlparser.Expr, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, updatedTable *vindexes.Table) Operator { // The update expressions are the same as the update expressions in the parent update query // with the column names replaced with the child column names. var childUpdateExprs sqlparser.UpdateExprs - for _, updateExpr := range updStmt.Exprs { + for idx, updateExpr := range ctx.SemTable.GetUpdateExpressionsForFk(fk.String(updatedTable)) { colIdx := fk.ParentColumns.FindColumn(updateExpr.Name.Name) if colIdx == -1 { continue @@ -344,17 +684,19 @@ func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.Chi // The where condition is the same as the comparison expression above // with the column names replaced with the child column names. + childUpdateExpr := updateExpr.Expr + if len(nonLiteralUpdateInfo) > 0 && nonLiteralUpdateInfo[idx].UpdateExprBvName != "" { + childUpdateExpr = sqlparser.NewArgument(nonLiteralUpdateInfo[idx].UpdateExprBvName) + } childUpdateExprs = append(childUpdateExprs, &sqlparser.UpdateExpr{ Name: sqlparser.NewColName(fk.ChildColumns[colIdx].String()), - Expr: updateExpr.Expr, + Expr: childUpdateExpr, }) } // Because we could be updating the child to a non-null value, // We have to run with foreign key checks OFF because the parent isn't guaranteed to have // the data being updated to. - parsedComments := sqlparser.Comments{ - "/*+ SET_VAR(foreign_key_checks=OFF) */", - }.Parsed() + parsedComments := (&sqlparser.ParsedComments{}).SetMySQLSetVarValue(sysvars.ForeignKeyChecks, "OFF").Parsed() childUpdStmt := &sqlparser.Update{ Comments: parsedComments, Exprs: childUpdateExprs, @@ -373,7 +715,13 @@ func buildChildUpdOpForCascade(ctx *plancontext.PlanningContext, fk vindexes.Chi // `UPDATE SET // WHERE IN () // [AND ({ IS NULL OR}... NOT IN ())]` -func buildChildUpdOpForSetNull(ctx *plancontext.PlanningContext, fk vindexes.ChildFKInfo, updStmt *sqlparser.Update, childWhereExpr sqlparser.Expr) (ops.Operator, error) { +func buildChildUpdOpForSetNull( + ctx *plancontext.PlanningContext, + fk vindexes.ChildFKInfo, + childWhereExpr sqlparser.Expr, + nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, + updatedTable *vindexes.Table, +) Operator { // For the SET NULL type constraint, we need to set all the child columns to NULL. var childUpdateExprs sqlparser.UpdateExprs for _, column := range fk.ChildColumns { @@ -392,34 +740,57 @@ func buildChildUpdOpForSetNull(ctx *plancontext.PlanningContext, fk vindexes.Chi // For example, if we are setting `update parent cola = :v1 and colb = :v2`, then on the child, the where condition would look something like this - // `:v1 IS NULL OR :v2 IS NULL OR (child_cola, child_colb) NOT IN ((:v1,:v2))` // So, if either of :v1 or :v2 is NULL, then the entire condition is true (which is the same as not having the condition when :v1 or :v2 is NULL). - compExpr := nullSafeNotInComparison(updStmt.Exprs, fk) + updateExprs := ctx.SemTable.GetUpdateExpressionsForFk(fk.String(updatedTable)) + compExpr := nullSafeNotInComparison(ctx, + updatedTable, + updateExprs, fk, updatedTable.GetTableName(), nonLiteralUpdateInfo, false /* appendQualifier */) if compExpr != nil { childWhereExpr = &sqlparser.AndExpr{ Left: childWhereExpr, Right: compExpr, } } + parsedComments := getParsedCommentsForFkChecks(ctx) childUpdStmt := &sqlparser.Update{ Exprs: childUpdateExprs, + Comments: parsedComments, TableExprs: []sqlparser.TableExpr{sqlparser.NewAliasedTableExpr(fk.Table.GetTableName(), "")}, Where: &sqlparser.Where{Type: sqlparser.WhereClause, Expr: childWhereExpr}, } return createOpFromStmt(ctx, childUpdStmt, false, "") } +// getParsedCommentsForFkChecks gets the parsed comments to be set on a child query related to foreign_key_checks session variable. +// We only use this function if foreign key checks are either unspecified or on. +// If foreign key checks are explicity turned on, then we should add the set_var parsed comment too +// since underlying MySQL might have foreign_key_checks as off. +// We run with foreign key checks on because the DML might still fail on MySQL due to a child table +// with RESTRICT constraints. +func getParsedCommentsForFkChecks(ctx *plancontext.PlanningContext) (parsedComments *sqlparser.ParsedComments) { + fkState := ctx.VSchema.GetForeignKeyChecksState() + if fkState != nil && *fkState { + parsedComments = parsedComments.SetMySQLSetVarValue(sysvars.ForeignKeyChecks, "ON").Parsed() + } + return parsedComments +} + // createFKVerifyOp creates the verify operator for the parent foreign key constraints. -func createFKVerifyOp(ctx *plancontext.PlanningContext, childOp ops.Operator, updStmt *sqlparser.Update, parentFks []vindexes.ParentFKInfo, restrictChildFks []vindexes.ChildFKInfo) (ops.Operator, error) { +func createFKVerifyOp( + ctx *plancontext.PlanningContext, + childOp Operator, + updStmt *sqlparser.Update, + parentFks []vindexes.ParentFKInfo, + restrictChildFks []vindexes.ChildFKInfo, + updatedTable *vindexes.Table, +) Operator { if len(parentFks) == 0 && len(restrictChildFks) == 0 { - return childOp, nil + return childOp } var Verify []*VerifyOp // This validates that new values exists on the parent table. for _, fk := range parentFks { - op, err := createFkVerifyOpForParentFKForUpdate(ctx, updStmt, fk) - if err != nil { - return nil, err - } + op := createFkVerifyOpForParentFKForUpdate(ctx, updatedTable, updStmt, fk) Verify = append(Verify, &VerifyOp{ Op: op, Typ: engine.ParentVerify, @@ -427,10 +798,8 @@ func createFKVerifyOp(ctx *plancontext.PlanningContext, childOp ops.Operator, up } // This validates that the old values don't exist on the child table. for _, fk := range restrictChildFks { - op, err := createFkVerifyOpForChildFKForUpdate(ctx, updStmt, fk) - if err != nil { - return nil, err - } + op := createFkVerifyOpForChildFKForUpdate(ctx, updatedTable, updStmt, fk) + Verify = append(Verify, &VerifyOp{ Op: op, Typ: engine.ChildVerify, @@ -440,29 +809,31 @@ func createFKVerifyOp(ctx *plancontext.PlanningContext, childOp ops.Operator, up return &FkVerify{ Verify: Verify, Input: childOp, - }, nil + } } // Each parent foreign key constraint is verified by an anti join query of the form: // select 1 from child_tbl left join parent_tbl on -// where and and limit 1 +// where and and and and limit 1 // E.g: // Child (c1, c2) references Parent (p1, p2) -// update Child set c1 = 1 where id = 1 +// update Child set c1 = c2 + 1 where id = 1 // verify query: -// select 1 from Child left join Parent on Parent.p1 = 1 and Parent.p2 = Child.c2 -// where Parent.p1 is null and Parent.p2 is null and Child.id = 1 -// and Child.c2 is not null +// select 1 from Child left join Parent on Parent.p1 = Child.c2 + 1 and Parent.p2 = Child.c2 +// where Parent.p1 is null and Parent.p2 is null and Child.id = 1 and Child.c2 + 1 is not null +// and Child.c2 is not null and not ((Child.c1) <=> (Child.c2 + 1)) // limit 1 -func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, pFK vindexes.ParentFKInfo) (ops.Operator, error) { +func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, pFK vindexes.ParentFKInfo) Operator { childTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) childTbl, err := childTblExpr.TableName() if err != nil { - return nil, err + panic(err) } parentTbl := pFK.Table.GetTableName() var whereCond sqlparser.Expr var joinCond sqlparser.Expr + var notEqualColNames sqlparser.ValTuple + var notEqualExprs sqlparser.ValTuple for idx, column := range pFK.ChildColumns { var matchedExpr *sqlparser.UpdateExpr for _, updateExpr := range updStmt.Exprs { @@ -479,7 +850,7 @@ func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updS var joinExpr sqlparser.Expr if matchedExpr == nil { predicate = &sqlparser.AndExpr{ - Left: parentIsNullExpr, + Left: predicate, Right: &sqlparser.IsExpr{ Left: sqlparser.NewColNameWithQualifier(pFK.ChildColumns[idx].String(), childTbl), Right: sqlparser.IsNotNullOp, @@ -491,10 +862,20 @@ func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updS Right: sqlparser.NewColNameWithQualifier(pFK.ChildColumns[idx].String(), childTbl), } } else { + notEqualColNames = append(notEqualColNames, prefixColNames(ctx, childTbl, matchedExpr.Name)) + prefixedMatchExpr := prefixColNames(ctx, childTbl, getCastedUpdateExpression(updatedTable, matchedExpr)) + notEqualExprs = append(notEqualExprs, prefixedMatchExpr) joinExpr = &sqlparser.ComparisonExpr{ Operator: sqlparser.EqualOp, Left: sqlparser.NewColNameWithQualifier(pFK.ParentColumns[idx].String(), parentTbl), - Right: prefixColNames(childTbl, matchedExpr.Expr), + Right: prefixedMatchExpr, + } + predicate = &sqlparser.AndExpr{ + Left: predicate, + Right: &sqlparser.IsExpr{ + Left: prefixedMatchExpr, + Right: sqlparser.IsNotNullOp, + }, } } @@ -505,9 +886,19 @@ func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updS joinCond = &sqlparser.AndExpr{Left: joinCond, Right: joinExpr} whereCond = &sqlparser.AndExpr{Left: whereCond, Right: predicate} } + whereCond = &sqlparser.AndExpr{ + Left: whereCond, + Right: &sqlparser.NotExpr{ + Expr: &sqlparser.ComparisonExpr{ + Operator: sqlparser.NullSafeEqualOp, + Left: notEqualColNames, + Right: notEqualExprs, + }, + }, + } // add existing where condition on the update statement if updStmt.Where != nil { - whereCond = &sqlparser.AndExpr{Left: whereCond, Right: prefixColNames(childTbl, updStmt.Where.Expr)} + whereCond = &sqlparser.AndExpr{Left: whereCond, Right: prefixColNames(ctx, childTbl, updStmt.Where.Expr)} } return createSelectionOp(ctx, sqlparser.SelectExprs{sqlparser.NewAliasedExpr(sqlparser.NewIntLiteral("1"), "")}, @@ -519,28 +910,43 @@ func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, updS sqlparser.NewJoinCondition(joinCond, nil)), }, sqlparser.NewWhere(sqlparser.WhereClause, whereCond), + nil, sqlparser.NewLimitWithoutOffset(1), - sqlparser.ShareModeLock) + getVerifyLock(updatedTable)) +} + +func getVerifyLock(vTbl *vindexes.Table) sqlparser.Lock { + if len(vTbl.UniqueKeys) > 0 { + return sqlparser.ForShareLockNoWait + } + return sqlparser.ForShareLock +} + +func getUpdateLock(vTbl *vindexes.Table) sqlparser.Lock { + if len(vTbl.UniqueKeys) > 0 { + return sqlparser.ForUpdateLockNoWait + } + return sqlparser.ForUpdateLock } // Each child foreign key constraint is verified by a join query of the form: // select 1 from child_tbl join parent_tbl on where [AND ({ IS NULL OR}... NOT IN ())] limit 1 // E.g: // Child (c1, c2) references Parent (p1, p2) -// update Parent set p1 = 1 where id = 1 +// update Parent set p1 = col + 1 where id = 1 // verify query: // select 1 from Child join Parent on Parent.p1 = Child.c1 and Parent.p2 = Child.c2 -// where Parent.id = 1 and (1 IS NULL OR (child.c1) NOT IN ((1))) limit 1 -func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updStmt *sqlparser.Update, cFk vindexes.ChildFKInfo) (ops.Operator, error) { +// where Parent.id = 1 and ((Parent.col + 1) IS NULL OR (child.c1) NOT IN ((Parent.col + 1))) limit 1 +func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updStmt *sqlparser.Update, cFk vindexes.ChildFKInfo) Operator { // ON UPDATE RESTRICT foreign keys that require validation, should only be allowed in the case where we // are verifying all the FKs on vtgate level. if !ctx.VerifyAllFKs { - return nil, vterrors.VT12002() + panic(vterrors.VT12002()) } parentTblExpr := updStmt.TableExprs[0].(*sqlparser.AliasedTableExpr) parentTbl, err := parentTblExpr.TableName() if err != nil { - return nil, err + panic(err) } childTbl := cFk.Table.GetTableName() var joinCond sqlparser.Expr @@ -561,7 +967,7 @@ func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updSt var whereCond sqlparser.Expr // add existing where condition on the update statement if updStmt.Where != nil { - whereCond = prefixColNames(parentTbl, updStmt.Where.Expr) + whereCond = prefixColNames(ctx, parentTbl, updStmt.Where.Expr) } // We don't want to fail the RESTRICT for the case where the parent columns remains unchanged on the update. @@ -573,7 +979,7 @@ func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updSt // For example, if we are setting `update child cola = :v1 and colb = :v2`, then on the parent, the where condition would look something like this - // `:v1 IS NULL OR :v2 IS NULL OR (cola, colb) NOT IN ((:v1,:v2))` // So, if either of :v1 or :v2 is NULL, then the entire condition is true (which is the same as not having the condition when :v1 or :v2 is NULL). - compExpr := nullSafeNotInComparison(updStmt.Exprs, cFk) + compExpr := nullSafeNotInComparison(ctx, updatedTable, updStmt.Exprs, cFk, parentTbl, nil /* nonLiteralUpdateInfo */, true /* appendQualifier */) if compExpr != nil { whereCond = sqlparser.AndExpressions(whereCond, compExpr) } @@ -588,31 +994,38 @@ func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updSt sqlparser.NewJoinCondition(joinCond, nil)), }, sqlparser.NewWhere(sqlparser.WhereClause, whereCond), + nil, sqlparser.NewLimitWithoutOffset(1), - sqlparser.ShareModeLock) + getVerifyLock(updatedTable)) } // nullSafeNotInComparison is used to compare the child columns in the foreign key constraint aren't the same as the updateExpressions exactly. -// This comparison has to be null safe so we create an expression which looks like the following for a query like `update child cola = :v1 and colb = :v2` - +// This comparison has to be null safe, so we create an expression which looks like the following for a query like `update child cola = :v1 and colb = :v2` - // `:v1 IS NULL OR :v2 IS NULL OR (cola, colb) NOT IN ((:v1,:v2))` // So, if either of :v1 or :v2 is NULL, then the entire condition is true (which is the same as not having the condition when :v1 or :v2 is NULL) // This expression is used in cascading SET NULLs and in verifying whether an update should be restricted. -func nullSafeNotInComparison(updateExprs sqlparser.UpdateExprs, cFk vindexes.ChildFKInfo) sqlparser.Expr { +func nullSafeNotInComparison(ctx *plancontext.PlanningContext, updatedTable *vindexes.Table, updateExprs sqlparser.UpdateExprs, cFk vindexes.ChildFKInfo, parentTbl sqlparser.TableName, nonLiteralUpdateInfo []engine.NonLiteralUpdateInfo, appendQualifier bool) sqlparser.Expr { + var valTuple sqlparser.ValTuple var updateValues sqlparser.ValTuple - for _, updateExpr := range updateExprs { + for idx, updateExpr := range updateExprs { colIdx := cFk.ParentColumns.FindColumn(updateExpr.Name.Name) if colIdx >= 0 { if sqlparser.IsNull(updateExpr.Expr) { return nil } - updateValues = append(updateValues, updateExpr.Expr) + childUpdateExpr := prefixColNames(ctx, parentTbl, getCastedUpdateExpression(updatedTable, updateExpr)) + if len(nonLiteralUpdateInfo) > 0 && nonLiteralUpdateInfo[idx].UpdateExprBvName != "" { + childUpdateExpr = sqlparser.NewArgument(nonLiteralUpdateInfo[idx].UpdateExprBvName) + } + updateValues = append(updateValues, childUpdateExpr) + if appendQualifier { + valTuple = append(valTuple, sqlparser.NewColNameWithQualifier(cFk.ChildColumns[colIdx].String(), cFk.Table.GetTableName())) + } else { + valTuple = append(valTuple, sqlparser.NewColName(cFk.ChildColumns[colIdx].String())) + } } } - // Create a ValTuple of child column names - var valTuple sqlparser.ValTuple - for _, column := range cFk.ChildColumns { - valTuple = append(valTuple, sqlparser.NewColNameWithQualifier(column.String(), cFk.Table.GetTableName())) - } + var finalExpr sqlparser.Expr = sqlparser.NewComparisonExpr(sqlparser.NotInOp, valTuple, sqlparser.ValTuple{updateValues}, nil) for _, value := range updateValues { finalExpr = &sqlparser.OrExpr{ @@ -626,3 +1039,113 @@ func nullSafeNotInComparison(updateExprs sqlparser.UpdateExprs, cFk vindexes.Chi return finalExpr } + +func buildChangedVindexesValues( + ctx *plancontext.PlanningContext, + update *sqlparser.Update, + table *vindexes.Table, + ksidCols []sqlparser.IdentifierCI, + assignments []SetExpr, +) (changedVindexes map[string]*engine.VindexValues, ovq *sqlparser.Select, subQueriesArgOnChangedVindex []string) { + changedVindexes = make(map[string]*engine.VindexValues) + selExprs, offset := initialQuery(ksidCols, table) + for i, vindex := range table.ColumnVindexes { + vindexValueMap := make(map[string]evalengine.Expr) + var compExprs []sqlparser.Expr + for _, vcol := range vindex.Columns { + subQueriesArgOnChangedVindex, compExprs = + createAssignmentExpressions(ctx, assignments, vcol, subQueriesArgOnChangedVindex, vindexValueMap, compExprs) + } + if len(vindexValueMap) == 0 { + // Vindex not changing, continue + continue + } + if i == 0 { + panic(vterrors.VT12001(fmt.Sprintf("you cannot UPDATE primary vindex columns; invalid update on vindex: %v", vindex.Name))) + } + if _, ok := vindex.Vindex.(vindexes.Lookup); !ok { + panic(vterrors.VT12001(fmt.Sprintf("you can only UPDATE lookup vindexes; invalid update on vindex: %v", vindex.Name))) + } + + // Checks done, let's actually add the expressions and the vindex map + selExprs = append(selExprs, aeWrap(sqlparser.AndExpressions(compExprs...))) + changedVindexes[vindex.Name] = &engine.VindexValues{ + EvalExprMap: vindexValueMap, + Offset: offset, + } + offset++ + } + if len(changedVindexes) == 0 { + return nil, nil, nil + } + // generate rest of the owned vindex query. + ovq = &sqlparser.Select{ + SelectExprs: selExprs, + OrderBy: update.OrderBy, + Limit: update.Limit, + Lock: sqlparser.ForUpdateLock, + } + return changedVindexes, ovq, subQueriesArgOnChangedVindex +} + +func initialQuery(ksidCols []sqlparser.IdentifierCI, table *vindexes.Table) (sqlparser.SelectExprs, int) { + var selExprs sqlparser.SelectExprs + offset := 0 + for _, col := range ksidCols { + selExprs = append(selExprs, aeWrap(sqlparser.NewColName(col.String()))) + offset++ + } + for _, cv := range table.Owned { + for _, column := range cv.Columns { + selExprs = append(selExprs, aeWrap(sqlparser.NewColName(column.String()))) + offset++ + } + } + return selExprs, offset +} + +func createAssignmentExpressions( + ctx *plancontext.PlanningContext, + assignments []SetExpr, + vcol sqlparser.IdentifierCI, + subQueriesArgOnChangedVindex []string, + vindexValueMap map[string]evalengine.Expr, + compExprs []sqlparser.Expr, +) ([]string, []sqlparser.Expr) { + // Searching in order of columns in colvindex. + found := false + for _, assignment := range assignments { + if !vcol.Equal(assignment.Name.Name) { + continue + } + if found { + panic(vterrors.VT03015(assignment.Name.Name)) + } + found = true + pv, err := evalengine.Translate(assignment.Expr.EvalExpr, &evalengine.Config{ + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + Environment: ctx.VSchema.Environment(), + }) + if err != nil { + panic(invalidUpdateExpr(assignment.Name.Name.String(), assignment.Expr.EvalExpr)) + } + + if assignment.Expr.Info != nil { + sqe, ok := assignment.Expr.Info.(SubQueryExpression) + if ok { + for _, sq := range sqe { + subQueriesArgOnChangedVindex = append(subQueriesArgOnChangedVindex, sq.ArgName) + } + } + } + + vindexValueMap[vcol.String()] = pv + compExprs = append(compExprs, sqlparser.NewComparisonExpr(sqlparser.EqualOp, assignment.Name, assignment.Expr.EvalExpr, nil)) + } + return subQueriesArgOnChangedVindex, compExprs +} + +func invalidUpdateExpr(upd string, expr sqlparser.Expr) error { + return vterrors.VT12001(fmt.Sprintf("only values are supported; invalid update on column: `%s` with expr: [%s]", upd, sqlparser.String(expr))) +} diff --git a/go/vt/vtgate/planbuilder/operators/update_test.go b/go/vt/vtgate/planbuilder/operators/update_test.go new file mode 100644 index 00000000000..6cdf7be3f7d --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/update_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "testing" + + "github.com/stretchr/testify/require" + + querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +// TestGetCastTypeForColumn tests that we get the correct string value to use in a CAST function based on the type of the column. +func TestGetCastTypeForColumn(t *testing.T) { + tests := []struct { + name string + typ querypb.Type + want string + }{ + { + name: "VARCHAR column", + typ: querypb.Type_VARCHAR, + want: "CHAR", + }, + { + name: "CHAR column", + typ: querypb.Type_CHAR, + want: "CHAR", + }, + { + name: "VARBINARY column", + typ: querypb.Type_VARBINARY, + want: "BINARY", + }, + { + name: "BINARY column", + typ: querypb.Type_BINARY, + want: "BINARY", + }, + { + name: "UINT16 column", + typ: querypb.Type_UINT16, + want: "UNSIGNED", + }, + { + name: "UINT24 column", + typ: querypb.Type_UINT24, + want: "UNSIGNED", + }, + { + name: "UINT32 column", + typ: querypb.Type_UINT32, + want: "UNSIGNED", + }, + { + name: "UINT64 column", + typ: querypb.Type_UINT64, + want: "UNSIGNED", + }, + { + name: "INT16 column", + typ: querypb.Type_INT16, + want: "SIGNED", + }, + { + name: "INT24 column", + typ: querypb.Type_INT24, + want: "SIGNED", + }, + { + name: "INT32 column", + typ: querypb.Type_INT32, + want: "SIGNED", + }, + { + name: "INT64 column", + typ: querypb.Type_INT64, + want: "SIGNED", + }, + { + name: "FLOAT32 column", + typ: querypb.Type_FLOAT32, + want: "FLOAT", + }, + { + name: "FLOAT64 column", + typ: querypb.Type_FLOAT64, + want: "FLOAT", + }, + { + name: "DECIMAL column", + typ: querypb.Type_DECIMAL, + want: "DECIMAL", + }, + { + name: "DATETIME column", + typ: querypb.Type_DATETIME, + want: "DATETIME", + }, + { + name: "NULL column", + typ: querypb.Type_NULL_TYPE, + want: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + updExpr := &sqlparser.UpdateExpr{ + Name: sqlparser.NewColName("col"), + } + updatedTable := &vindexes.Table{ + Columns: []vindexes.Column{ + { + Name: sqlparser.NewIdentifierCI("col"), + Type: tt.typ, + }, + }, + } + tyStr := getCastTypeForColumn(updatedTable, updExpr) + require.EqualValues(t, tt.want, tyStr) + }) + } +} diff --git a/go/vt/vtgate/planbuilder/operators/upsert.go b/go/vt/vtgate/planbuilder/operators/upsert.go new file mode 100644 index 00000000000..8f028a790b2 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/upsert.go @@ -0,0 +1,143 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var _ Operator = (*Upsert)(nil) + +// Upsert represents an insert on duplicate key operation on a table. +type Upsert struct { + Sources []UpsertSource + + noColumns + noPredicates +} + +type UpsertSource struct { + Insert Operator + Update Operator +} + +func (u *Upsert) Clone(inputs []Operator) Operator { + up := &Upsert{} + up.setInputs(inputs) + return up +} + +func (u *Upsert) setInputs(inputs []Operator) { + for i := 0; i < len(inputs); i += 2 { + u.Sources = append(u.Sources, UpsertSource{ + Insert: inputs[i], + Update: inputs[i+1], + }) + } +} + +func (u *Upsert) Inputs() []Operator { + var inputs []Operator + for _, source := range u.Sources { + inputs = append(inputs, source.Insert, source.Update) + } + return inputs +} + +func (u *Upsert) SetInputs(inputs []Operator) { + u.Sources = nil + u.setInputs(inputs) +} + +func (u *Upsert) ShortDescription() string { + return "" +} + +func (u *Upsert) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { + return nil +} + +func createUpsertOperator(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, insOp Operator, rows sqlparser.Values, vTbl *vindexes.Table) Operator { + if len(vTbl.UniqueKeys) != 0 { + panic(vterrors.VT12001("ON DUPLICATE KEY UPDATE with foreign keys with unique keys")) + } + + pIndexes, _ := findPKIndexes(vTbl, ins) + if len(pIndexes) == 0 { + // nothing to compare for update. + // Hence, only perform insert. + return insOp + } + + upsert := &Upsert{} + for _, row := range rows { + var comparisons []sqlparser.Expr + for _, pIdx := range pIndexes { + var expr sqlparser.Expr + if pIdx.idx == -1 { + expr = pIdx.def + } else { + expr = row[pIdx.idx] + } + comparisons = append(comparisons, + sqlparser.NewComparisonExpr(sqlparser.EqualOp, sqlparser.NewColName(pIdx.col.String()), expr, nil)) + } + whereExpr := sqlparser.AndExpressions(comparisons...) + + var updExprs sqlparser.UpdateExprs + for _, ue := range ins.OnDup { + expr := sqlparser.CopyOnRewrite(ue.Expr, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + vfExpr, ok := cursor.Node().(*sqlparser.ValuesFuncExpr) + if !ok { + return + } + idx := ins.Columns.FindColumn(vfExpr.Name.Name) + if idx == -1 { + panic(vterrors.VT03014(sqlparser.String(vfExpr.Name), "field list")) + } + cursor.Replace(row[idx]) + }, nil).(sqlparser.Expr) + updExprs = append(updExprs, &sqlparser.UpdateExpr{ + Name: ue.Name, + Expr: expr, + }) + } + + upd := &sqlparser.Update{ + Comments: ins.Comments, + TableExprs: sqlparser.TableExprs{ins.Table}, + Exprs: updExprs, + Where: sqlparser.NewWhere(sqlparser.WhereClause, whereExpr), + } + updOp := createOpFromStmt(ctx, upd, false, "") + + // replan insert statement without on duplicate key update. + newInsert := sqlparser.CloneRefOfInsert(ins) + newInsert.OnDup = nil + newInsert.Rows = sqlparser.Values{row} + insOp = createOpFromStmt(ctx, newInsert, false, "") + upsert.Sources = append(upsert.Sources, UpsertSource{ + Insert: insOp, + Update: updOp, + }) + } + + return upsert +} diff --git a/go/vt/vtgate/planbuilder/operators/utils_test.go b/go/vt/vtgate/planbuilder/operators/utils_test.go new file mode 100644 index 00000000000..035a273e964 --- /dev/null +++ b/go/vt/vtgate/planbuilder/operators/utils_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operators + +import ( + "slices" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/semantics" +) + +type fakeOp struct { + id semantics.TableSet + inputs []Operator + cols []*sqlparser.AliasedExpr +} + +var _ Operator = (*fakeOp)(nil) + +func (f *fakeOp) Clone(inputs []Operator) Operator { + return f +} + +func (f *fakeOp) Inputs() []Operator { + return f.inputs +} + +func (f *fakeOp) SetInputs(operators []Operator) { + // TODO implement me + panic("implement me") +} + +func (f *fakeOp) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { + // TODO implement me + panic("implement me") +} + +func (f *fakeOp) AddColumn(ctx *plancontext.PlanningContext, reuseExisting bool, _ bool, expr *sqlparser.AliasedExpr) int { + if offset := f.FindCol(ctx, expr.Expr, false); reuseExisting && offset >= 0 { + return offset + } + f.cols = append(f.cols, expr) + return len(f.cols) - 1 +} + +func (*fakeOp) AddWSColumn(*plancontext.PlanningContext, int, bool) int { + panic("implement me") +} + +func (f *fakeOp) FindCol(ctx *plancontext.PlanningContext, a sqlparser.Expr, underRoute bool) int { + return slices.IndexFunc(f.cols, func(b *sqlparser.AliasedExpr) bool { + return a == b.Expr + }) +} + +func (f *fakeOp) GetColumns(ctx *plancontext.PlanningContext) []*sqlparser.AliasedExpr { + // TODO implement me + panic("implement me") +} + +func (f *fakeOp) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.SelectExprs { + // TODO implement me + panic("implement me") +} + +func (f *fakeOp) ShortDescription() string { + // TODO implement me + panic("implement me") +} + +func (f *fakeOp) GetOrdering(ctx *plancontext.PlanningContext) []OrderBy { + // TODO implement me + panic("implement me") +} + +func (f *fakeOp) introducesTableID() semantics.TableSet { + return f.id +} diff --git a/go/vt/vtgate/planbuilder/operators/vindex.go b/go/vt/vtgate/planbuilder/operators/vindex.go index 2fe2bf4d3e5..fd907fdad27 100644 --- a/go/vt/vtgate/planbuilder/operators/vindex.go +++ b/go/vt/vtgate/planbuilder/operators/vindex.go @@ -21,7 +21,6 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -57,7 +56,7 @@ func (v *Vindex) introducesTableID() semantics.TableSet { } // Clone implements the Operator interface -func (v *Vindex) Clone([]ops.Operator) ops.Operator { +func (v *Vindex) Clone([]Operator) Operator { clone := *v return &clone } @@ -76,6 +75,10 @@ func (v *Vindex) AddColumn(ctx *plancontext.PlanningContext, reuse bool, gb bool return addColumn(ctx, v, ae.Expr) } +func (*Vindex) AddWSColumn(*plancontext.PlanningContext, int, bool) int { + panic(vterrors.VT13001("did not expect this method to be called")) +} + func colNameToExpr(c *sqlparser.ColName) *sqlparser.AliasedExpr { return &sqlparser.AliasedExpr{ Expr: c, @@ -101,7 +104,7 @@ func (v *Vindex) GetSelectExprs(ctx *plancontext.PlanningContext) sqlparser.Sele return transformColumnsToSelectExprs(ctx, v) } -func (v *Vindex) GetOrdering(*plancontext.PlanningContext) []ops.OrderBy { +func (v *Vindex) GetOrdering(*plancontext.PlanningContext) []OrderBy { return nil } @@ -113,15 +116,13 @@ func (v *Vindex) AddCol(col *sqlparser.ColName) { v.Columns = append(v.Columns, col) } -func (v *Vindex) CheckValid() error { +func (v *Vindex) CheckValid() { if len(v.Table.Predicates) == 0 { - return vterrors.VT09018(wrongWhereCond + " (where clause missing)") + panic(vterrors.VT09018(wrongWhereCond + " (where clause missing)")) } - - return nil } -func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) ops.Operator { +func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.Expr) Operator { for _, e := range sqlparser.SplitAndExpression(nil, expr) { deps := ctx.SemTable.RecursiveDeps(e) if deps.NumberOfTables() > 1 { @@ -149,15 +150,12 @@ func (v *Vindex) AddPredicate(ctx *plancontext.PlanningContext, expr sqlparser.E } // check RHS - var err error if sqlparser.IsValue(comparison.Right) || sqlparser.IsSimpleTuple(comparison.Right) { v.Value = comparison.Right } else { panic(vterrors.VT09018(wrongWhereCond + " (rhs is not a value)")) } - if err != nil { - panic(vterrors.VT09018(wrongWhereCond+": %v", err)) - } + v.OpCode = engine.VindexMap v.Table.Predicates = append(v.Table.Predicates, e) } diff --git a/go/vt/vtgate/planbuilder/ordered_aggregate.go b/go/vt/vtgate/planbuilder/ordered_aggregate.go deleted file mode 100644 index 34646fa3dea..00000000000 --- a/go/vt/vtgate/planbuilder/ordered_aggregate.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*orderedAggregate)(nil) - -// orderedAggregate is the logicalPlan for engine.OrderedAggregate. -// This gets built if there are aggregations on a SelectScatter -// route. The primitive requests the underlying route to order -// the results by the grouping columns. This will allow the -// engine code to aggregate the results as they come. -// For example: 'select col1, col2, count(*) from t group by col1, col2' -// will be sent to the scatter route as: -// 'select col1, col2, count(*) from t group by col1, col2 order by col1, col2` -// The orderAggregate primitive built for this will be: -// -// &engine.OrderedAggregate { -// // Aggregates has one column. It computes the count -// // using column 2 of the underlying route. -// Aggregates: []AggregateParams{{ -// Opcode: AggregateCount, -// Col: 2, -// }}, -// -// // Keys has the two group by values for col1 and col2. -// // The column numbers are from the underlying route. -// // These values will be used to perform the grouping -// // of the ordered results as they come from the underlying -// // route. -// Keys: []int{0, 1}, -// Input: (Scatter Route with the order by request), -// } -type orderedAggregate struct { - resultsBuilder - - // aggregates specifies the aggregation parameters for each - // aggregation function: function opcode and input column number. - aggregates []*engine.AggregateParams - - // groupByKeys specifies the input values that must be used for - // the aggregation key. - groupByKeys []*engine.GroupByParams - - truncateColumnCount int -} - -// Primitive implements the logicalPlan interface -func (oa *orderedAggregate) Primitive() engine.Primitive { - input := oa.input.Primitive() - if len(oa.groupByKeys) == 0 { - return &engine.ScalarAggregate{ - Aggregates: oa.aggregates, - TruncateColumnCount: oa.truncateColumnCount, - Input: input, - } - } - - return &engine.OrderedAggregate{ - Aggregates: oa.aggregates, - GroupByKeys: oa.groupByKeys, - TruncateColumnCount: oa.truncateColumnCount, - Input: input, - } -} - -// SetTruncateColumnCount sets the truncate column count. -func (oa *orderedAggregate) SetTruncateColumnCount(count int) { - oa.truncateColumnCount = count -} diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index 472648828ef..387684149bf 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -21,13 +21,15 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + "math/rand/v2" "os" "path/filepath" "runtime/debug" "strings" "testing" + "github.com/stretchr/testify/suite" + "github.com/nsf/jsondiff" "github.com/stretchr/testify/require" @@ -36,32 +38,54 @@ import ( "vitess.io/vitess/go/test/vschemawrapper" "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/engine" - oprewriters "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/rewrite" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func makeTestOutput(t *testing.T) string { - testOutputTempDir := utils.MakeTestOutput(t, "testdata", "plan_test") +var expectedDir = "testdata/expected" + +func getTestExpectationDir() string { + return filepath.Clean(expectedDir) +} + +type planTestSuite struct { + suite.Suite + outputDir string +} + +func (s *planTestSuite) SetupSuite() { + dir := getTestExpectationDir() + err := os.RemoveAll(dir) + require.NoError(s.T(), err) + err = os.Mkdir(dir, 0755) + require.NoError(s.T(), err) + s.outputDir = dir +} - return testOutputTempDir +func TestPlanTestSuite(t *testing.T) { + suite.Run(t, new(planTestSuite)) } -func TestPlan(t *testing.T) { - defer utils.EnsureNoLeaks(t) +func (s *planTestSuite) TestPlan() { + defer utils.EnsureNoLeaks(s.T()) vschemaWrapper := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), TabletType_: topodatapb.TabletType_PRIMARY, SysVarEnabled: true, TestBuilder: TestBuilder, + Env: vtenv.NewTestEnv(), } - testOutputTempDir := makeTestOutput(t) + s.addPKs(vschemaWrapper.V, "user", []string{"user", "music"}) + s.addPKsProvided(vschemaWrapper.V, "user", []string{"user_extra"}, []string{"id", "user_id"}) + s.addPKsProvided(vschemaWrapper.V, "ordering", []string{"order"}, []string{"oid", "region_id"}) + s.addPKsProvided(vschemaWrapper.V, "ordering", []string{"order_event"}, []string{"oid", "ename"}) // You will notice that some tests expect user.Id instead of user.id. // This is because we now pre-create vindex columns in the symbol @@ -69,51 +93,80 @@ func TestPlan(t *testing.T) { // the column is named as Id. This is to make sure that // column names are case-preserved, but treated as // case-insensitive even if they come from the vschema. - testFile(t, "aggr_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "dml_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "from_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "filter_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "postprocess_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "select_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "symtab_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "unsupported_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "unknown_schema_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "vindex_func_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "wireup_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "memory_sort_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "use_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "set_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "union_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "large_union_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "transaction_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "lock_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "large_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "ddl_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "flush_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "show_cases_no_default_keyspace.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "stream_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "info_schema80_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "reference_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "vexplain_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "misc_cases.json", testOutputTempDir, vschemaWrapper, false) - testFile(t, "cte_cases.json", testOutputTempDir, vschemaWrapper, false) + s.testFile("aggr_cases.json", vschemaWrapper, false) + s.testFile("dml_cases.json", vschemaWrapper, false) + s.testFile("from_cases.json", vschemaWrapper, false) + s.testFile("filter_cases.json", vschemaWrapper, false) + s.testFile("postprocess_cases.json", vschemaWrapper, false) + s.testFile("select_cases.json", vschemaWrapper, false) + s.testFile("symtab_cases.json", vschemaWrapper, false) + s.testFile("unsupported_cases.json", vschemaWrapper, false) + s.testFile("unknown_schema_cases.json", vschemaWrapper, false) + s.testFile("vindex_func_cases.json", vschemaWrapper, false) + s.testFile("wireup_cases.json", vschemaWrapper, false) + s.testFile("memory_sort_cases.json", vschemaWrapper, false) + s.testFile("use_cases.json", vschemaWrapper, false) + s.testFile("set_cases.json", vschemaWrapper, false) + s.testFile("union_cases.json", vschemaWrapper, false) + s.testFile("large_union_cases.json", vschemaWrapper, false) + s.testFile("transaction_cases.json", vschemaWrapper, false) + s.testFile("lock_cases.json", vschemaWrapper, false) + s.testFile("large_cases.json", vschemaWrapper, false) + s.testFile("ddl_cases_no_default_keyspace.json", vschemaWrapper, false) + s.testFile("flush_cases_no_default_keyspace.json", vschemaWrapper, false) + s.testFile("show_cases_no_default_keyspace.json", vschemaWrapper, false) + s.testFile("stream_cases.json", vschemaWrapper, false) + s.testFile("info_schema80_cases.json", vschemaWrapper, false) + s.testFile("reference_cases.json", vschemaWrapper, false) + s.testFile("vexplain_cases.json", vschemaWrapper, false) + s.testFile("misc_cases.json", vschemaWrapper, false) + s.testFile("cte_cases.json", vschemaWrapper, false) } // TestForeignKeyPlanning tests the planning of foreign keys in a managed mode by Vitess. -func TestForeignKeyPlanning(t *testing.T) { - vschema := loadSchema(t, "vschemas/schema.json", true) - setFks(t, vschema) +func (s *planTestSuite) TestForeignKeyPlanning() { + vschema := loadSchema(s.T(), "vschemas/schema.json", true) + s.setFks(vschema) vschemaWrapper := &vschemawrapper.VSchemaWrapper{ V: vschema, TestBuilder: TestBuilder, + Env: vtenv.NewTestEnv(), } - testOutputTempDir := makeTestOutput(t) + s.testFile("foreignkey_cases.json", vschemaWrapper, false) +} + +// TestForeignKeyChecksOn tests the planning when the session variable for foreign_key_checks is set to ON. +func (s *planTestSuite) TestForeignKeyChecksOn() { + vschema := loadSchema(s.T(), "vschemas/schema.json", true) + s.setFks(vschema) + fkChecksState := true + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: vschema, + TestBuilder: TestBuilder, + ForeignKeyChecksState: &fkChecksState, + Env: vtenv.NewTestEnv(), + } + + s.testFile("foreignkey_checks_on_cases.json", vschemaWrapper, false) +} + +// TestForeignKeyChecksOff tests the planning when the session variable for foreign_key_checks is set to OFF. +func (s *planTestSuite) TestForeignKeyChecksOff() { + vschema := loadSchema(s.T(), "vschemas/schema.json", true) + s.setFks(vschema) + fkChecksState := false + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: vschema, + TestBuilder: TestBuilder, + ForeignKeyChecksState: &fkChecksState, + Env: vtenv.NewTestEnv(), + } - testFile(t, "foreignkey_cases.json", testOutputTempDir, vschemaWrapper, false) + s.testFile("foreignkey_checks_off_cases.json", vschemaWrapper, false) } -func setFks(t *testing.T, vschema *vindexes.VSchema) { +func (s *planTestSuite) setFks(vschema *vindexes.VSchema) { if vschema.Keyspaces["sharded_fk_allow"] != nil { // FK from multicol_tbl2 referencing multicol_tbl1 that is shard scoped. _ = vschema.AddForeignKey("sharded_fk_allow", "multicol_tbl2", createFkDefinition([]string{"colb", "cola", "x", "colc", "y"}, "multicol_tbl1", []string{"colb", "cola", "y", "colc", "x"}, sqlparser.Cascade, sqlparser.Cascade)) @@ -146,6 +199,10 @@ func setFks(t *testing.T, vschema *vindexes.VSchema) { // FK from tblrefDef referencing tbl20 that is shard scoped of SET-Default types. _ = vschema.AddForeignKey("sharded_fk_allow", "tblrefDef", createFkDefinition([]string{"ref"}, "tbl20", []string{"col2"}, sqlparser.SetDefault, sqlparser.SetDefault)) + // FK from tbl_auth referencing tbl20 that is shard scoped of CASCADE types. + _ = vschema.AddForeignKey("sharded_fk_allow", "tbl_auth", createFkDefinition([]string{"id"}, "tbl20", []string{"col2"}, sqlparser.Cascade, sqlparser.Cascade)) + s.addPKs(vschema, "sharded_fk_allow", []string{"tbl1", "tbl2", "tbl3", "tbl4", "tbl5", "tbl6", "tbl7", "tbl9", "tbl10", + "multicol_tbl1", "multicol_tbl2", "tbl_auth", "tblrefDef", "tbl20"}) } if vschema.Keyspaces["unsharded_fk_allow"] != nil { // u_tbl2(col2) -> u_tbl1(col1) Cascade. @@ -168,161 +225,218 @@ func setFks(t *testing.T, vschema *vindexes.VSchema) { _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl4", createFkDefinition([]string{"col4"}, "u_tbl3", []string{"col3"}, sqlparser.Restrict, sqlparser.Restrict)) _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl6", createFkDefinition([]string{"col6"}, "u_tbl5", []string{"col5"}, sqlparser.DefaultAction, sqlparser.DefaultAction)) _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl8", createFkDefinition([]string{"col8"}, "u_tbl9", []string{"col9"}, sqlparser.SetNull, sqlparser.SetNull)) - _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl8", createFkDefinition([]string{"col8"}, "u_tbl6", []string{"col6"}, sqlparser.Cascade, sqlparser.CASCADE)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl8", createFkDefinition([]string{"col8"}, "u_tbl6", []string{"col6"}, sqlparser.Cascade, sqlparser.Cascade)) _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl4", createFkDefinition([]string{"col4"}, "u_tbl7", []string{"col7"}, sqlparser.Cascade, sqlparser.Cascade)) _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl9", createFkDefinition([]string{"col9"}, "u_tbl4", []string{"col4"}, sqlparser.Restrict, sqlparser.Restrict)) + _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl11", createFkDefinition([]string{"col"}, "u_tbl10", []string{"col"}, sqlparser.Cascade, sqlparser.Cascade)) _ = vschema.AddForeignKey("unsharded_fk_allow", "u_tbl", createFkDefinition([]string{"col"}, "sharded_fk_allow.s_tbl", []string{"col"}, sqlparser.Restrict, sqlparser.Restrict)) _ = vschema.AddForeignKey("unsharded_fk_allow", "u_multicol_tbl2", createFkDefinition([]string{"cola", "colb"}, "u_multicol_tbl1", []string{"cola", "colb"}, sqlparser.SetNull, sqlparser.SetNull)) _ = vschema.AddForeignKey("unsharded_fk_allow", "u_multicol_tbl3", createFkDefinition([]string{"cola", "colb"}, "u_multicol_tbl2", []string{"cola", "colb"}, sqlparser.Cascade, sqlparser.Cascade)) + + _ = vschema.AddUniqueKey("unsharded_fk_allow", "u_tbl9", sqlparser.Exprs{sqlparser.NewColName("col9")}) + _ = vschema.AddUniqueKey("unsharded_fk_allow", "u_tbl9", sqlparser.Exprs{&sqlparser.BinaryExpr{Operator: sqlparser.MultOp, Left: sqlparser.NewColName("col9"), Right: sqlparser.NewColName("foo")}}) + _ = vschema.AddUniqueKey("unsharded_fk_allow", "u_tbl9", sqlparser.Exprs{sqlparser.NewColName("col9"), sqlparser.NewColName("foo")}) + _ = vschema.AddUniqueKey("unsharded_fk_allow", "u_tbl9", sqlparser.Exprs{sqlparser.NewColName("foo"), sqlparser.NewColName("bar")}) + _ = vschema.AddUniqueKey("unsharded_fk_allow", "u_tbl9", sqlparser.Exprs{sqlparser.NewColName("bar"), sqlparser.NewColName("col9")}) + _ = vschema.AddUniqueKey("unsharded_fk_allow", "u_tbl8", sqlparser.Exprs{sqlparser.NewColName("col8")}) + + s.addPKs(vschema, "unsharded_fk_allow", []string{"u_tbl1", "u_tbl2", "u_tbl3", "u_tbl4", "u_tbl5", "u_tbl6", "u_tbl7", "u_tbl8", "u_tbl9", "u_tbl10", "u_tbl11", + "u_multicol_tbl1", "u_multicol_tbl2", "u_multicol_tbl3"}) } + } -func TestSystemTables57(t *testing.T) { +func (s *planTestSuite) addPKs(vschema *vindexes.VSchema, ks string, tbls []string) { + for _, tbl := range tbls { + require.NoError(s.T(), + vschema.AddPrimaryKey(ks, tbl, []string{"id"})) + } +} + +func (s *planTestSuite) addPKsProvided(vschema *vindexes.VSchema, ks string, tbls []string, pks []string) { + for _, tbl := range tbls { + require.NoError(s.T(), + vschema.AddPrimaryKey(ks, tbl, pks)) + } +} + +func (s *planTestSuite) TestSystemTables57() { // first we move everything to use 5.7 logic - oldVer := servenv.MySQLServerVersion() - servenv.SetMySQLServerVersionForTest("5.7") - defer func() { - servenv.SetMySQLServerVersionForTest(oldVer) - }() - vschemaWrapper := &vschemawrapper.VSchemaWrapper{V: loadSchema(t, "vschemas/schema.json", true)} - testOutputTempDir := makeTestOutput(t) - testFile(t, "info_schema57_cases.json", testOutputTempDir, vschemaWrapper, false) + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: "5.7.9", + }) + require.NoError(s.T(), err) + vschemaWrapper := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(s.T(), "vschemas/schema.json", true), + Env: env, + } + s.testFile("info_schema57_cases.json", vschemaWrapper, false) } -func TestSysVarSetDisabled(t *testing.T) { +func (s *planTestSuite) TestSysVarSetDisabled() { vschemaWrapper := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), SysVarEnabled: false, + Env: vtenv.NewTestEnv(), } - testFile(t, "set_sysvar_disabled_cases.json", makeTestOutput(t), vschemaWrapper, false) + s.testFile("set_sysvar_disabled_cases.json", vschemaWrapper, false) } -func TestViews(t *testing.T) { +func (s *planTestSuite) TestViews() { vschemaWrapper := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), EnableViews: true, + Env: vtenv.NewTestEnv(), } - testFile(t, "view_cases.json", makeTestOutput(t), vschemaWrapper, false) + s.testFile("view_cases.json", vschemaWrapper, false) } -func TestOne(t *testing.T) { - reset := oprewriters.EnableDebugPrinting() +func (s *planTestSuite) TestOne() { + reset := operators.EnableDebugPrinting() defer reset() - lv := loadSchema(t, "vschemas/schema.json", true) - setFks(t, lv) + lv := loadSchema(s.T(), "vschemas/schema.json", true) + s.setFks(lv) + s.addPKs(lv, "user", []string{"user", "music"}) + s.addPKs(lv, "main", []string{"unsharded"}) + s.addPKsProvided(lv, "user", []string{"user_extra"}, []string{"id", "user_id"}) + s.addPKsProvided(lv, "ordering", []string{"order"}, []string{"oid", "region_id"}) + s.addPKsProvided(lv, "ordering", []string{"order_event"}, []string{"oid", "ename"}) vschema := &vschemawrapper.VSchemaWrapper{ V: lv, TestBuilder: TestBuilder, + Env: vtenv.NewTestEnv(), } - testFile(t, "onecase.json", "", vschema, false) + s.testFile("onecase.json", vschema, false) } -func TestOneTPCC(t *testing.T) { +func (s *planTestSuite) TestOneTPCC() { + reset := operators.EnableDebugPrinting() + defer reset() + vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/tpcc_schema.json", true), + V: loadSchema(s.T(), "vschemas/tpcc_schema.json", true), + Env: vtenv.NewTestEnv(), } - testFile(t, "onecase.json", "", vschema, false) + s.testFile("onecase.json", vschema, false) } -func TestOneWithMainAsDefault(t *testing.T) { +func (s *planTestSuite) TestOneWithMainAsDefault() { + reset := operators.EnableDebugPrinting() + defer reset() vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, + Env: vtenv.NewTestEnv(), } - testFile(t, "onecase.json", "", vschema, false) + s.testFile("onecase.json", vschema, false) } -func TestOneWithSecondUserAsDefault(t *testing.T) { +func (s *planTestSuite) TestOneWithSecondUserAsDefault() { + reset := operators.EnableDebugPrinting() + defer reset() vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "second_user", Sharded: true, }, + Env: vtenv.NewTestEnv(), } - testFile(t, "onecase.json", "", vschema, false) + s.testFile("onecase.json", vschema, false) } -func TestOneWithUserAsDefault(t *testing.T) { +func (s *planTestSuite) TestOneWithUserAsDefault() { + reset := operators.EnableDebugPrinting() + defer reset() vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "user", Sharded: true, }, + Env: vtenv.NewTestEnv(), } - testFile(t, "onecase.json", "", vschema, false) + s.testFile("onecase.json", vschema, false) } -func TestOneWithTPCHVSchema(t *testing.T) { - reset := oprewriters.EnableDebugPrinting() +func (s *planTestSuite) TestOneWithTPCHVSchema() { + reset := operators.EnableDebugPrinting() defer reset() vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/tpch_schema.json", true), + V: loadSchema(s.T(), "vschemas/tpch_schema.json", true), SysVarEnabled: true, + Env: vtenv.NewTestEnv(), } - testFile(t, "onecase.json", "", vschema, false) + s.testFile("onecase.json", vschema, false) } -func TestOneWith57Version(t *testing.T) { +func (s *planTestSuite) TestOneWith57Version() { + reset := operators.EnableDebugPrinting() + defer reset() // first we move everything to use 5.7 logic - oldVer := servenv.MySQLServerVersion() - servenv.SetMySQLServerVersionForTest("5.7") - defer func() { - servenv.SetMySQLServerVersionForTest(oldVer) - }() - vschema := &vschemawrapper.VSchemaWrapper{V: loadSchema(t, "vschemas/schema.json", true)} + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: "5.7.9", + }) + require.NoError(s.T(), err) + vschema := &vschemawrapper.VSchemaWrapper{ + V: loadSchema(s.T(), "vschemas/schema.json", true), + Env: env, + } - testFile(t, "onecase.json", "", vschema, false) + s.testFile("onecase.json", vschema, false) } -func TestRubyOnRailsQueries(t *testing.T) { +func (s *planTestSuite) TestRubyOnRailsQueries() { vschemaWrapper := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/rails_schema.json", true), + V: loadSchema(s.T(), "vschemas/rails_schema.json", true), SysVarEnabled: true, + Env: vtenv.NewTestEnv(), } - testFile(t, "rails_cases.json", makeTestOutput(t), vschemaWrapper, false) + s.testFile("rails_cases.json", vschemaWrapper, false) } -func TestOLTP(t *testing.T) { +func (s *planTestSuite) TestOLTP() { vschemaWrapper := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/oltp_schema.json", true), + V: loadSchema(s.T(), "vschemas/oltp_schema.json", true), SysVarEnabled: true, + Env: vtenv.NewTestEnv(), } - testFile(t, "oltp_cases.json", makeTestOutput(t), vschemaWrapper, false) + s.testFile("oltp_cases.json", vschemaWrapper, false) } -func TestTPCC(t *testing.T) { +func (s *planTestSuite) TestTPCC() { vschemaWrapper := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/tpcc_schema.json", true), + V: loadSchema(s.T(), "vschemas/tpcc_schema.json", true), SysVarEnabled: true, + Env: vtenv.NewTestEnv(), } - testFile(t, "tpcc_cases.json", makeTestOutput(t), vschemaWrapper, false) + s.testFile("tpcc_cases.json", vschemaWrapper, false) } -func TestTPCH(t *testing.T) { +func (s *planTestSuite) TestTPCH() { vschemaWrapper := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/tpch_schema.json", true), + V: loadSchema(s.T(), "vschemas/tpch_schema.json", true), SysVarEnabled: true, + Env: vtenv.NewTestEnv(), } - testFile(t, "tpch_cases.json", makeTestOutput(t), vschemaWrapper, false) + s.testFile("tpch_cases.json", vschemaWrapper, false) } func BenchmarkOLTP(b *testing.B) { @@ -341,6 +455,7 @@ func benchmarkWorkload(b *testing.B, name string) { vschemaWrapper := &vschemawrapper.VSchemaWrapper{ V: loadSchema(b, "vschemas/"+name+"_schema.json", true), SysVarEnabled: true, + Env: vtenv.NewTestEnv(), } testCases := readJSONTests(name + "_cases.json") @@ -352,46 +467,50 @@ func benchmarkWorkload(b *testing.B, name string) { } } -func TestBypassPlanningShardTargetFromFile(t *testing.T) { +func (s *planTestSuite) TestBypassPlanningShardTargetFromFile() { vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, TabletType_: topodatapb.TabletType_PRIMARY, - Dest: key.DestinationShard("-80")} + Dest: key.DestinationShard("-80"), + Env: vtenv.NewTestEnv(), + } - testFile(t, "bypass_shard_cases.json", makeTestOutput(t), vschema, false) + s.testFile("bypass_shard_cases.json", vschema, false) } -func TestBypassPlanningKeyrangeTargetFromFile(t *testing.T) { +func (s *planTestSuite) TestBypassPlanningKeyrangeTargetFromFile() { keyRange, _ := key.ParseShardingSpec("-") vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, TabletType_: topodatapb.TabletType_PRIMARY, Dest: key.DestinationExactKeyRange{KeyRange: keyRange[0]}, + Env: vtenv.NewTestEnv(), } - testFile(t, "bypass_keyrange_cases.json", makeTestOutput(t), vschema, false) + s.testFile("bypass_keyrange_cases.json", vschema, false) } -func TestWithDefaultKeyspaceFromFile(t *testing.T) { +func (s *planTestSuite) TestWithDefaultKeyspaceFromFile() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // We are testing this separately so we can set a default keyspace vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, TabletType_: topodatapb.TabletType_PRIMARY, + Env: vtenv.NewTestEnv(), } ts := memorytopo.NewServer(ctx, "cell1") ts.CreateKeyspace(ctx, "main", &topodatapb.Keyspace{}) @@ -405,95 +524,88 @@ func TestWithDefaultKeyspaceFromFile(t *testing.T) { } return ki.SidecarDbName, nil }) - require.True(t, created) + require.True(s.T(), created) - testOutputTempDir := makeTestOutput(t) - testFile(t, "alterVschema_cases.json", testOutputTempDir, vschema, false) - testFile(t, "ddl_cases.json", testOutputTempDir, vschema, false) - testFile(t, "migration_cases.json", testOutputTempDir, vschema, false) - testFile(t, "flush_cases.json", testOutputTempDir, vschema, false) - testFile(t, "show_cases.json", testOutputTempDir, vschema, false) - testFile(t, "call_cases.json", testOutputTempDir, vschema, false) + s.testFile("alterVschema_cases.json", vschema, false) + s.testFile("ddl_cases.json", vschema, false) + s.testFile("migration_cases.json", vschema, false) + s.testFile("flush_cases.json", vschema, false) + s.testFile("show_cases.json", vschema, false) + s.testFile("call_cases.json", vschema, false) } -func TestWithDefaultKeyspaceFromFileSharded(t *testing.T) { +func (s *planTestSuite) TestWithDefaultKeyspaceFromFileSharded() { // We are testing this separately so we can set a default keyspace vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "second_user", Sharded: true, }, TabletType_: topodatapb.TabletType_PRIMARY, + Env: vtenv.NewTestEnv(), } - testOutputTempDir := makeTestOutput(t) - testFile(t, "select_cases_with_default.json", testOutputTempDir, vschema, false) + s.testFile("select_cases_with_default.json", vschema, false) } -func TestWithUserDefaultKeyspaceFromFileSharded(t *testing.T) { +func (s *planTestSuite) TestWithUserDefaultKeyspaceFromFileSharded() { // We are testing this separately so we can set a default keyspace vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "user", Sharded: true, }, TabletType_: topodatapb.TabletType_PRIMARY, + Env: vtenv.NewTestEnv(), } - testOutputTempDir := makeTestOutput(t) - testFile(t, "select_cases_with_user_as_default.json", testOutputTempDir, vschema, false) + s.testFile("select_cases_with_user_as_default.json", vschema, false) } -func TestWithSystemSchemaAsDefaultKeyspace(t *testing.T) { +func (s *planTestSuite) TestWithSystemSchemaAsDefaultKeyspace() { // We are testing this separately so we can set a default keyspace vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{Name: "information_schema"}, TabletType_: topodatapb.TabletType_PRIMARY, + Env: vtenv.NewTestEnv(), } - testFile(t, "sysschema_default.json", makeTestOutput(t), vschema, false) + s.testFile("sysschema_default.json", vschema, false) } -func TestOtherPlanningFromFile(t *testing.T) { +func (s *planTestSuite) TestOtherPlanningFromFile() { // We are testing this separately so we can set a default keyspace vschema := &vschemawrapper.VSchemaWrapper{ - V: loadSchema(t, "vschemas/schema.json", true), + V: loadSchema(s.T(), "vschemas/schema.json", true), Keyspace: &vindexes.Keyspace{ Name: "main", Sharded: false, }, TabletType_: topodatapb.TabletType_PRIMARY, + Env: vtenv.NewTestEnv(), } - testOutputTempDir := makeTestOutput(t) - testFile(t, "other_read_cases.json", testOutputTempDir, vschema, false) - testFile(t, "other_admin_cases.json", testOutputTempDir, vschema, false) + s.testFile("other_read_cases.json", vschema, false) + s.testFile("other_admin_cases.json", vschema, false) } func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSchema { formal, err := vindexes.LoadFormal(locateFile(filename)) - if err != nil { - t.Fatal(err) - } - vschema := vindexes.BuildVSchema(formal) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + vschema := vindexes.BuildVSchema(formal, sqlparser.NewTestParser()) + require.NoError(t, err) for _, ks := range vschema.Keyspaces { - if ks.Error != nil { - t.Fatal(ks.Error) - } + require.NoError(t, ks.Error) // adding view in user keyspace if ks.Keyspace.Name == "user" { - if err = vschema.AddView(ks.Keyspace.Name, - "user_details_view", - "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id"); err != nil { - t.Fatal(err) - } + err = vschema.AddView(ks.Keyspace.Name, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id", sqlparser.NewTestParser()) + require.NoError(t, err) + err = vschema.AddUDF(ks.Keyspace.Name, "udf_aggr") + require.NoError(t, err) } // setting a default value to all the text columns in the tables of this keyspace @@ -502,7 +614,7 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch if setCollation { for _, table := range ks.Tables { for i, col := range table.Columns { - if sqltypes.IsText(col.Type) { + if sqltypes.IsText(col.Type) && col.CollationName == "" { table.Columns[i].CollationName = "latin1_swedish_ci" } } @@ -514,7 +626,7 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch // createFkDefinition is a helper function to create a Foreign key definition struct from the columns used in it provided as list of strings. func createFkDefinition(childCols []string, parentTableName string, parentCols []string, onUpdate, onDelete sqlparser.ReferenceAction) *sqlparser.ForeignKeyDefinition { - pKs, pTbl, _ := sqlparser.ParseTable(parentTableName) + pKs, pTbl, _ := sqlparser.NewTestParser().ParseTable(parentTableName) return &sqlparser.ForeignKeyDefinition{ Source: sqlparser.MakeColumns(childCols...), ReferenceDefinition: &sqlparser.ReferenceDefinition{ @@ -535,10 +647,11 @@ type ( } ) -func testFile(t *testing.T, filename, tempDir string, vschema *vschemawrapper.VSchemaWrapper, render bool) { +func (s *planTestSuite) testFile(filename string, vschema *vschemawrapper.VSchemaWrapper, render bool) { opts := jsondiff.DefaultConsoleOptions() - t.Run(filename, func(t *testing.T) { + s.T().Run(filename, func(t *testing.T) { + failed := false var expected []planTest for _, tcase := range readJSONTests(filename) { testName := tcase.Comment @@ -560,6 +673,11 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemawrapper.VS // - produces a different plan than expected // - fails to produce a plan t.Run(testName, func(t *testing.T) { + defer func() { + if t.Failed() { + failed = true + } + }() compare, s := jsondiff.Compare(tcase.Plan, []byte(out), &opts) if compare != jsondiff.FullMatch { message := fmt.Sprintf("%s\nDiff:\n%s\n[%s] \n[%s]", filename, s, tcase.Plan, out) @@ -575,9 +693,9 @@ func testFile(t *testing.T, filename, tempDir string, vschema *vschemawrapper.VS }) expected = append(expected, current) } - if tempDir != "" { + if s.outputDir != "" && failed { name := strings.TrimSuffix(filename, filepath.Ext(filename)) - name = filepath.Join(tempDir, name+".json") + name = filepath.Join(s.outputDir, name+".json") file, err := os.Create(name) require.NoError(t, err) enc := json.NewEncoder(file) @@ -598,6 +716,7 @@ func readJSONTests(filename string) []planTest { panic(err) } dec := json.NewDecoder(file) + dec.DisallowUnknownFields() err = dec.Decode(&output) if err != nil { panic(err) @@ -646,6 +765,7 @@ func BenchmarkPlanner(b *testing.B) { vschema := &vschemawrapper.VSchemaWrapper{ V: loadSchema(b, "vschemas/schema.json", true), SysVarEnabled: true, + Env: vtenv.NewTestEnv(), } for _, filename := range benchMarkFiles { testCases := readJSONTests(filename) @@ -662,6 +782,7 @@ func BenchmarkSemAnalysis(b *testing.B) { vschema := &vschemawrapper.VSchemaWrapper{ V: loadSchema(b, "vschemas/schema.json", true), SysVarEnabled: true, + Env: vtenv.NewTestEnv(), } for i := 0; i < b.N; i++ { @@ -679,7 +800,7 @@ func exerciseAnalyzer(query, database string, s semantics.SchemaInformation) { recover() }() - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) if err != nil { return } @@ -696,6 +817,7 @@ func BenchmarkSelectVsDML(b *testing.B) { V: loadSchema(b, "vschemas/schema.json", true), SysVarEnabled: true, Version: Gen4, + Env: vtenv.NewTestEnv(), } dmlCases := readJSONTests("dml_cases.json") diff --git a/go/vt/vtgate/planbuilder/plan_test_vindex.go b/go/vt/vtgate/planbuilder/plan_test_vindex.go index 432ef7b8479..30d72f8c03a 100644 --- a/go/vt/vtgate/planbuilder/plan_test_vindex.go +++ b/go/vt/vtgate/planbuilder/plan_test_vindex.go @@ -72,7 +72,10 @@ func newLookupIndex(name string, _ map[string]string) (vindexes.Vindex, error) { var _ vindexes.Lookup = (*lookupIndex)(nil) // nameLkpIndex satisfies Lookup, NonUnique. -type nameLkpIndex struct{ name string } +type nameLkpIndex struct { + name string + inBackfill bool +} func (v *nameLkpIndex) String() string { return v.name } func (*nameLkpIndex) Cost() int { return 3 } @@ -102,13 +105,21 @@ func (*nameLkpIndex) Query() (string, []string) { func (*nameLkpIndex) MapResult([]sqltypes.Value, []*sqltypes.Result) ([]key.Destination, error) { return nil, nil } -func newNameLkpIndex(name string, _ map[string]string) (vindexes.Vindex, error) { - return &nameLkpIndex{name: name}, nil + +func (v *nameLkpIndex) IsBackfilling() bool { return v.inBackfill } + +func newNameLkpIndex(name string, m map[string]string) (vindexes.Vindex, error) { + vdx := &nameLkpIndex{name: name} + if val, ok := m["write_only"]; ok { + vdx.inBackfill = val == "true" + } + return vdx, nil } var _ vindexes.Vindex = (*nameLkpIndex)(nil) var _ vindexes.Lookup = (*nameLkpIndex)(nil) var _ vindexes.LookupPlanable = (*nameLkpIndex)(nil) +var _ vindexes.LookupBackfill = (*nameLkpIndex)(nil) // costlyIndex satisfies Lookup, NonUnique. type costlyIndex struct{ name string } diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context.go b/go/vt/vtgate/planbuilder/plancontext/planning_context.go index 68ccc95b9fd..3c2a1c97434 100644 --- a/go/vt/vtgate/planbuilder/plancontext/planning_context.go +++ b/go/vt/vtgate/planbuilder/plancontext/planning_context.go @@ -19,6 +19,7 @@ package plancontext import ( querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -27,12 +28,16 @@ type PlanningContext struct { SemTable *semantics.SemTable VSchema VSchema - // here we add all predicates that were created because of a join condition - // e.g. [FROM tblA JOIN tblB ON a.colA = b.colB] will be rewritten to [FROM tblB WHERE :a_colA = b.colB], - // if we assume that tblB is on the RHS of the join. This last predicate in the WHERE clause is added to the - // map below - JoinPredicates map[sqlparser.Expr][]sqlparser.Expr - SkipPredicates map[sqlparser.Expr]any + // joinPredicates maps each original join predicate (key) to a slice of + // variations of the RHS predicates (value). This map is used to handle + // different scenarios in join planning, where the RHS predicates are + // modified to accommodate dependencies from the LHS, represented as Arguments. + joinPredicates map[sqlparser.Expr][]sqlparser.Expr + + // skipPredicates tracks predicates that should be skipped, typically when + // a join predicate is reverted to its original form during planning. + skipPredicates map[sqlparser.Expr]any + PlannerVersion querypb.ExecuteOptions_PlannerVersion // If we during planning have turned this expression into an argument name, @@ -49,11 +54,17 @@ type PlanningContext struct { // CurrentPhase keeps track of how far we've gone in the planning process // The type should be operators.Phase, but depending on that would lead to circular dependencies CurrentPhase int + + // Statement contains the originally parsed statement + Statement sqlparser.Statement } +// CreatePlanningContext initializes a new PlanningContext with the given parameters. +// It analyzes the SQL statement within the given virtual schema context, +// handling default keyspace settings and semantic analysis. +// Returns an error if semantic analysis fails. func CreatePlanningContext(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, - vschema VSchema, version querypb.ExecuteOptions_PlannerVersion, ) (*PlanningContext, error) { @@ -74,13 +85,17 @@ func CreatePlanningContext(stmt sqlparser.Statement, ReservedVars: reservedVars, SemTable: semTable, VSchema: vschema, - JoinPredicates: map[sqlparser.Expr][]sqlparser.Expr{}, - SkipPredicates: map[sqlparser.Expr]any{}, + joinPredicates: map[sqlparser.Expr][]sqlparser.Expr{}, + skipPredicates: map[sqlparser.Expr]any{}, PlannerVersion: version, ReservedArguments: map[sqlparser.Expr]string{}, + Statement: stmt, }, nil } +// GetReservedArgumentFor retrieves a reserved argument name for a given expression. +// If the expression already has a reserved argument, it returns that name; +// otherwise, it reserves a new name based on the expression type. func (ctx *PlanningContext) GetReservedArgumentFor(expr sqlparser.Expr) string { for key, name := range ctx.ReservedArguments { if ctx.SemTable.EqualsExpr(key, expr) { @@ -101,13 +116,88 @@ func (ctx *PlanningContext) GetReservedArgumentFor(expr sqlparser.Expr) string { return bvName } -func (ctx *PlanningContext) GetArgumentFor(expr sqlparser.Expr, f func() string) string { - for key, name := range ctx.ReservedArguments { - if ctx.SemTable.EqualsExpr(key, expr) { - return name +// ShouldSkip determines if a given expression should be ignored in the SQL output building. +// It checks against expressions that have been marked to be excluded from further processing. +func (ctx *PlanningContext) ShouldSkip(expr sqlparser.Expr) bool { + for k := range ctx.skipPredicates { + if ctx.SemTable.EqualsExpr(expr, k) { + return true } } - bvName := f() - ctx.ReservedArguments[expr] = bvName - return bvName + return false +} + +// AddJoinPredicates associates additional RHS predicates with an existing join predicate. +// This is used to dynamically adjust the RHS predicates based on evolving join conditions. +func (ctx *PlanningContext) AddJoinPredicates(joinPred sqlparser.Expr, predicates ...sqlparser.Expr) { + fn := func(original sqlparser.Expr, rhsExprs []sqlparser.Expr) { + ctx.joinPredicates[original] = append(rhsExprs, predicates...) + } + if ctx.execOnJoinPredicateEqual(joinPred, fn) { + return + } + + // we didn't find an existing entry + ctx.joinPredicates[joinPred] = predicates +} + +// SkipJoinPredicates marks the predicates related to a specific join predicate as irrelevant +// for the current planning stage. This is used when a join has been pushed under a route and +// the original predicate will be used. +func (ctx *PlanningContext) SkipJoinPredicates(joinPred sqlparser.Expr) error { + fn := func(_ sqlparser.Expr, rhsExprs []sqlparser.Expr) { + ctx.skipThesePredicates(rhsExprs...) + } + if ctx.execOnJoinPredicateEqual(joinPred, fn) { + return nil + } + return vterrors.VT13001("predicate does not exist: " + sqlparser.String(joinPred)) +} + +// KeepPredicateInfo transfers join predicate information from another context. +// This is useful when nesting queries, ensuring consistent predicate handling across contexts. +func (ctx *PlanningContext) KeepPredicateInfo(other *PlanningContext) { + for k, v := range other.joinPredicates { + ctx.AddJoinPredicates(k, v...) + } + for expr := range other.skipPredicates { + ctx.skipThesePredicates(expr) + } +} + +// skipThesePredicates is a utility function to exclude certain predicates from SQL building +func (ctx *PlanningContext) skipThesePredicates(preds ...sqlparser.Expr) { +outer: + for _, expr := range preds { + for k := range ctx.skipPredicates { + if ctx.SemTable.EqualsExpr(expr, k) { + // already skipped + continue outer + } + } + ctx.skipPredicates[expr] = nil + } +} + +func (ctx *PlanningContext) execOnJoinPredicateEqual(joinPred sqlparser.Expr, fn func(original sqlparser.Expr, rhsExprs []sqlparser.Expr)) bool { + for key, values := range ctx.joinPredicates { + if ctx.SemTable.EqualsExpr(joinPred, key) { + fn(key, values) + return true + } + } + return false +} + +func (ctx *PlanningContext) RewriteDerivedTableExpression(expr sqlparser.Expr, tableInfo semantics.TableInfo) sqlparser.Expr { + modifiedExpr := semantics.RewriteDerivedTableExpression(expr, tableInfo) + for key, exprs := range ctx.joinPredicates { + for _, rhsExpr := range exprs { + if ctx.SemTable.EqualsExpr(expr, rhsExpr) { + ctx.joinPredicates[key] = append(ctx.joinPredicates[key], modifiedExpr) + return modifiedExpr + } + } + } + return modifiedExpr } diff --git a/go/vt/vtgate/planbuilder/plancontext/vschema.go b/go/vt/vtgate/planbuilder/plancontext/vschema.go index 9dde6dee31c..8ac4c57bfd7 100644 --- a/go/vt/vtgate/planbuilder/plancontext/vschema.go +++ b/go/vt/vtgate/planbuilder/plancontext/vschema.go @@ -6,6 +6,7 @@ import ( vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/mysql/collations" @@ -41,6 +42,7 @@ type VSchema interface { Planner() PlannerVersion SetPlannerVersion(pv PlannerVersion) ConnCollation() collations.ID + Environment() *vtenv.Environment // ErrorIfShardedF will return an error if the keyspace is sharded, // and produce a warning if the vtgate if configured to do so @@ -60,6 +62,8 @@ type VSchema interface { // KeyspaceError returns any error in the keyspace vschema. KeyspaceError(keyspace string) error + GetForeignKeyChecksState() *bool + // GetVSchema returns the latest cached vindexes.VSchema GetVSchema() *vindexes.VSchema @@ -89,6 +93,9 @@ type VSchema interface { // StorePrepareData stores the prepared data in the session. StorePrepareData(name string, v *vtgatepb.PrepareData) + + // GetAggregateUDFs returns the list of aggregate UDFs. + GetAggregateUDFs() []string } // PlannerNameToVersion returns the numerical representation of the planner diff --git a/go/vt/vtgate/planbuilder/planner.go b/go/vt/vtgate/planbuilder/planner.go index b7a918260b7..cf4e110913d 100644 --- a/go/vt/vtgate/planbuilder/planner.go +++ b/go/vt/vtgate/planbuilder/planner.go @@ -45,7 +45,7 @@ func gen4Planner(query string, plannerVersion querypb.ExecuteOptions_PlannerVers } // setCommentDirectivesOnPlan adds comments to queries -func setCommentDirectivesOnPlan(plan logicalPlan, stmt sqlparser.Statement) { +func setCommentDirectivesOnPlan(plan engine.Primitive, stmt sqlparser.Statement) { var directives *sqlparser.CommentDirectives cmt, ok := stmt.(sqlparser.Commented) if !ok { @@ -57,28 +57,23 @@ func setCommentDirectivesOnPlan(plan logicalPlan, stmt sqlparser.Statement) { timeout := queryTimeout(directives) multiShardAutoCommit := directives.IsSet(sqlparser.DirectiveMultiShardAutocommit) - switch plan := plan.(type) { - case *route: - plan.eroute.ScatterErrorsAsWarnings = scatterAsWarns - plan.eroute.QueryTimeout = timeout - case *primitiveWrapper: - setDirective(plan.prim, multiShardAutoCommit, timeout) - case *insert: - setDirective(plan.eInsert, multiShardAutoCommit, timeout) - } + setDirective(plan, multiShardAutoCommit, timeout, scatterAsWarns) } -func setDirective(prim engine.Primitive, msac bool, timeout int) { - switch edml := prim.(type) { +func setDirective(prim engine.Primitive, msac bool, timeout int, scatterAsWarns bool) { + switch prim := prim.(type) { case *engine.Insert: - edml.MultiShardAutocommit = msac - edml.QueryTimeout = timeout + prim.MultiShardAutocommit = msac + prim.QueryTimeout = timeout case *engine.Update: - edml.MultiShardAutocommit = msac - edml.QueryTimeout = timeout + prim.MultiShardAutocommit = msac + prim.QueryTimeout = timeout case *engine.Delete: - edml.MultiShardAutocommit = msac - edml.QueryTimeout = timeout + prim.MultiShardAutocommit = msac + prim.QueryTimeout = timeout + case *engine.Route: + prim.ScatterErrorsAsWarnings = scatterAsWarns + prim.QueryTimeout = timeout } } diff --git a/go/vt/vtgate/planbuilder/planner_test.go b/go/vt/vtgate/planbuilder/planner_test.go index 38c579502fe..6ad1bb4116c 100644 --- a/go/vt/vtgate/planbuilder/planner_test.go +++ b/go/vt/vtgate/planbuilder/planner_test.go @@ -19,6 +19,8 @@ package planbuilder import ( "testing" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/sqlparser" @@ -58,7 +60,7 @@ func TestBindingSubquery(t *testing.T) { } for _, testcase := range testcases { t.Run(testcase.query, func(t *testing.T) { - parse, err := sqlparser.Parse(testcase.query) + parse, err := sqlparser.NewTestParser().Parse(testcase.query) require.NoError(t, err) selStmt := parse.(*sqlparser.Select) semTable, err := semantics.Analyze(selStmt, "d", &semantics.FakeSI{ @@ -67,9 +69,13 @@ func TestBindingSubquery(t *testing.T) { "foo": {Name: sqlparser.NewIdentifierCS("foo")}, }, }) + ctx := &plancontext.PlanningContext{ + ReservedVars: sqlparser.NewReservedVars("vt", make(sqlparser.BindVars)), + SemTable: semTable, + } require.NoError(t, err) if testcase.rewrite { - err = queryRewrite(semTable, sqlparser.NewReservedVars("vt", make(sqlparser.BindVars)), selStmt) + err = queryRewrite(ctx, selStmt) require.NoError(t, err) } expr := testcase.extractor(selStmt) diff --git a/go/vt/vtgate/planbuilder/predicate_rewrite_test.go b/go/vt/vtgate/planbuilder/predicate_rewrite_test.go index 369a99bf5d3..f103709d9e3 100644 --- a/go/vt/vtgate/planbuilder/predicate_rewrite_test.go +++ b/go/vt/vtgate/planbuilder/predicate_rewrite_test.go @@ -18,7 +18,7 @@ package planbuilder import ( "fmt" - "math/rand" + "math/rand/v2" "strconv" "testing" "time" @@ -30,6 +30,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -52,12 +53,12 @@ const ( func (tc testCase) createPredicate(lvl int) sqlparser.Expr { if lvl >= tc.depth { // we're at max depth, so we just return one of the nodes - n := rand.Intn(tc.nodes) + n := rand.IntN(tc.nodes) return sqlparser.NewColName(fmt.Sprintf("n%d", n)) } - switch nodeType(rand.Intn(int(SIZE))) { + switch nodeType(rand.IntN(int(SIZE))) { case NODE: - n := rand.Intn(tc.nodes) + n := rand.IntN(tc.nodes) return sqlparser.NewColName(fmt.Sprintf("n%d", n)) case NOT: return &sqlparser.NotExpr{ @@ -90,11 +91,12 @@ func TestFuzzRewriting(t *testing.T) { // values - trying TRUE, FALSE and NULL. If the two expressions do not return the same value, // this is considered a test failure. + venv := vtenv.NewTestEnv() start := time.Now() for time.Since(start) < 1*time.Second { tc := testCase{ - nodes: rand.Intn(4) + 1, - depth: rand.Intn(4) + 1, + nodes: rand.IntN(4) + 1, + depth: rand.IntN(4) + 1, } predicate := tc.createPredicate(0) @@ -103,17 +105,19 @@ func TestFuzzRewriting(t *testing.T) { simplified := sqlparser.RewritePredicate(predicate) original, err := evalengine.Translate(predicate, &evalengine.Config{ - Collation: collations.Default(), + Environment: venv, + Collation: collations.MySQL8().DefaultConnectionCharset(), ResolveColumn: resolveForFuzz, }) require.NoError(t, err) simpler, err := evalengine.Translate(simplified.(sqlparser.Expr), &evalengine.Config{ - Collation: collations.Default(), + Environment: venv, + Collation: collations.MySQL8().DefaultConnectionCharset(), ResolveColumn: resolveForFuzz, }) require.NoError(t, err) - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(venv) env.Row = make([]sqltypes.Value, tc.nodes) for i := range env.Row { env.Row[i] = sqltypes.NewInt32(1) @@ -139,7 +143,7 @@ func testValues(t *testing.T, env *evalengine.ExpressionEnv, i int, original, si require.NoError(t, err) v2, err := env.Evaluate(simpler) require.NoError(t, err) - assert.Equal(t, v1.Value(collations.Default()), v2.Value(collations.Default())) + assert.Equal(t, v1.Value(collations.MySQL8().DefaultConnectionCharset()), v2.Value(collations.MySQL8().DefaultConnectionCharset())) if len(env.Row) > i+1 { testValues(t, env, i+1, original, simpler) } diff --git a/go/vt/vtgate/planbuilder/rewrite.go b/go/vt/vtgate/planbuilder/rewrite.go index f59441c77ac..30423229038 100644 --- a/go/vt/vtgate/planbuilder/rewrite.go +++ b/go/vt/vtgate/planbuilder/rewrite.go @@ -18,48 +18,35 @@ package planbuilder import ( "vitess.io/vitess/go/vt/sqlparser" - - "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) type rewriter struct { - semTable *semantics.SemTable - reservedVars *sqlparser.ReservedVars - inSubquery int - err error + err error + ctx *plancontext.PlanningContext } -func queryRewrite(semTable *semantics.SemTable, reservedVars *sqlparser.ReservedVars, statement sqlparser.Statement) error { +func queryRewrite(ctx *plancontext.PlanningContext, statement sqlparser.Statement) error { r := rewriter{ - semTable: semTable, - reservedVars: reservedVars, + ctx: ctx, } - sqlparser.Rewrite(statement, r.rewriteDown, r.rewriteUp) + sqlparser.Rewrite(statement, r.rewriteDown, nil) return nil } -func (r *rewriter) rewriteUp(cursor *sqlparser.Cursor) bool { - _, ok := cursor.Node().(*sqlparser.Subquery) - if ok { - r.inSubquery-- - } - return true -} - func (r *rewriter) rewriteDown(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { case *sqlparser.Select: - rewriteHavingClause(node) + r.rewriteHavingClause(node) case *sqlparser.AliasedTableExpr: - // rewrite names of the routed tables for the subquery - // We only need to do this for non-derived tables and if they are in a subquery - if _, isDerived := node.Expr.(*sqlparser.DerivedTable); isDerived || r.inSubquery == 0 { + if _, isDerived := node.Expr.(*sqlparser.DerivedTable); isDerived { break } // find the tableSet and tableInfo that this table points to // tableInfo should contain the information for the original table that the routed table points to - tableSet := r.semTable.TableSetFor(node) - tableInfo, err := r.semTable.TableInfoFor(tableSet) + tableSet := r.ctx.SemTable.TableSetFor(node) + tableInfo, err := r.ctx.SemTable.TableInfoFor(tableSet) if err != nil { // Fail-safe code, should never happen break @@ -82,28 +69,18 @@ func (r *rewriter) rewriteDown(cursor *sqlparser.Cursor) bool { node.As = tableName.Name } // replace the table name with the original table + tableName.Qualifier = sqlparser.IdentifierCS{} tableName.Name = vindexTable.Name node.Expr = tableName - case *sqlparser.Subquery: - r.inSubquery++ } return true } -func rewriteHavingClause(node *sqlparser.Select) { +func (r *rewriter) rewriteHavingClause(node *sqlparser.Select) { if node.Having == nil { return } - selectExprMap := map[string]sqlparser.Expr{} - for _, selectExpr := range node.SelectExprs { - aliasedExpr, isAliased := selectExpr.(*sqlparser.AliasedExpr) - if !isAliased || aliasedExpr.As.IsEmpty() { - continue - } - selectExprMap[aliasedExpr.As.Lowered()] = aliasedExpr.Expr - } - // for each expression in the having clause, we check if it contains aggregation. // if it does, we keep the expression in the having clause ; and if it does not // and the expression is in the select list, we replace the expression by the one @@ -111,40 +88,10 @@ func rewriteHavingClause(node *sqlparser.Select) { exprs := sqlparser.SplitAndExpression(nil, node.Having.Expr) node.Having = nil for _, expr := range exprs { - hasAggr := sqlparser.ContainsAggregation(expr) - if !hasAggr { - sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool { - visitColName(cursor.Node(), selectExprMap, func(original sqlparser.Expr) { - if sqlparser.ContainsAggregation(original) { - hasAggr = true - } - }) - return true - }, nil) - } - if hasAggr { + if operators.ContainsAggr(r.ctx, expr) { node.AddHaving(expr) } else { - sqlparser.Rewrite(expr, func(cursor *sqlparser.Cursor) bool { - visitColName(cursor.Node(), selectExprMap, func(original sqlparser.Expr) { - cursor.Replace(original) - }) - return true - }, nil) node.AddWhere(expr) } } } -func visitColName(cursor sqlparser.SQLNode, selectExprMap map[string]sqlparser.Expr, f func(original sqlparser.Expr)) { - switch x := cursor.(type) { - case *sqlparser.ColName: - if !x.Qualifier.IsEmpty() { - return - } - originalExpr, isInMap := selectExprMap[x.Name.Lowered()] - if isInMap { - f(originalExpr) - } - return - } -} diff --git a/go/vt/vtgate/planbuilder/rewrite_test.go b/go/vt/vtgate/planbuilder/rewrite_test.go index 292c94f448a..7902b69e8f9 100644 --- a/go/vt/vtgate/planbuilder/rewrite_test.go +++ b/go/vt/vtgate/planbuilder/rewrite_test.go @@ -19,6 +19,8 @@ package planbuilder import ( "testing" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -74,7 +76,11 @@ func TestHavingRewrite(t *testing.T) { for _, tcase := range tcases { t.Run(tcase.input, func(t *testing.T) { semTable, reservedVars, sel := prepTest(t, tcase.input) - err := queryRewrite(semTable, reservedVars, sel) + ctx := &plancontext.PlanningContext{ + ReservedVars: reservedVars, + SemTable: semTable, + } + err := queryRewrite(ctx, sel) require.NoError(t, err) assert.Equal(t, tcase.output, sqlparser.String(sel)) }) @@ -82,7 +88,7 @@ func TestHavingRewrite(t *testing.T) { } func prepTest(t *testing.T, sql string) (*semantics.SemTable, *sqlparser.ReservedVars, *sqlparser.Select) { - ast, vars, err := sqlparser.Parse2(sql) + ast, vars, err := sqlparser.NewTestParser().Parse2(sql) require.NoError(t, err) sel, isSelectStatement := ast.(*sqlparser.Select) diff --git a/go/vt/vtgate/planbuilder/route.go b/go/vt/vtgate/planbuilder/route.go index 63f6d0ea612..e320df14416 100644 --- a/go/vt/vtgate/planbuilder/route.go +++ b/go/vt/vtgate/planbuilder/route.go @@ -22,87 +22,54 @@ import ( "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -var _ logicalPlan = (*route)(nil) - -// route is used to build a Route primitive. -// It's used to build one of the Select routes like -// SelectScatter, etc. Portions of the original Select AST -// are moved into this node, which will be used to build -// the final SQL for this route. -type route struct { - - // Select is the AST for the query fragment that will be - // executed by this route. - Select sqlparser.SelectStatement - - // eroute is the primitive being built. - eroute *engine.Route - - // is the engine primitive we will return from the Primitive() method. Note that it could be different than eroute - enginePrimitive engine.Primitive - - // tables keeps track of which tables this route is covering - tables semantics.TableSet -} - -// Primitive implements the logicalPlan interface -func (rb *route) Primitive() engine.Primitive { - return rb.enginePrimitive -} - -// Wireup implements the logicalPlan interface -func (rb *route) Wireup(ctx *plancontext.PlanningContext) error { - rb.prepareTheAST() - +// WireupRoute returns an engine primitive for the given route. +func WireupRoute(ctx *plancontext.PlanningContext, eroute *engine.Route, sel sqlparser.SelectStatement) (engine.Primitive, error) { // prepare the queries we will pass down - rb.eroute.Query = sqlparser.String(rb.Select) + eroute.Query = sqlparser.String(sel) buffer := sqlparser.NewTrackedBuffer(sqlparser.FormatImpossibleQuery) - node := buffer.WriteNode(rb.Select) - parsedQuery := node.ParsedQuery() - rb.eroute.FieldQuery = parsedQuery.Query + node := buffer.WriteNode(sel) + eroute.FieldQuery = node.ParsedQuery().Query // if we have a planable vindex lookup, let's extract it into its own primitive - planableVindex, ok := rb.eroute.RoutingParameters.Vindex.(vindexes.LookupPlanable) + planableVindex, ok := eroute.RoutingParameters.Vindex.(vindexes.LookupPlanable) if !ok { - rb.enginePrimitive = rb.eroute - return nil + return eroute, nil } query, args := planableVindex.Query() - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := ctx.VSchema.Environment().Parser().Parse2(query) if err != nil { - return err + return nil, err } reservedVars := sqlparser.NewReservedVars("vtg", reserved) lookupPrimitive, err := gen4SelectStmtPlanner(query, querypb.ExecuteOptions_Gen4, stmt.(sqlparser.SelectStatement), reservedVars, ctx.VSchema) if err != nil { - return vterrors.Wrapf(err, "failed to plan the lookup query: [%s]", query) + return nil, vterrors.Wrapf(err, "failed to plan the lookup query: [%s]", query) } - rb.enginePrimitive = &engine.VindexLookup{ - Opcode: rb.eroute.Opcode, + vdxLookup := &engine.VindexLookup{ + Opcode: eroute.Opcode, Vindex: planableVindex, - Keyspace: rb.eroute.Keyspace, - Values: rb.eroute.Values, - SendTo: rb.eroute, + Keyspace: eroute.Keyspace, + Values: eroute.Values, + SendTo: eroute, Arguments: args, Lookup: lookupPrimitive.primitive, } - rb.eroute.RoutingParameters.Opcode = engine.ByDestination - rb.eroute.RoutingParameters.Values = nil - rb.eroute.RoutingParameters.Vindex = nil + eroute.RoutingParameters.Opcode = engine.ByDestination + eroute.RoutingParameters.Values = nil + eroute.RoutingParameters.Vindex = nil - return nil + return vdxLookup, nil } // prepareTheAST does minor fixups of the SELECT struct before producing the query string -func (rb *route) prepareTheAST() { +func prepareTheAST(sel sqlparser.SelectStatement) { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { switch node := node.(type) { case *sqlparser.Select: @@ -115,19 +82,12 @@ func (rb *route) prepareTheAST() { } case *sqlparser.ComparisonExpr: // 42 = colName -> colName = 42 - b := node.Operator == sqlparser.EqualOp - value := sqlparser.IsValue(node.Left) - name := sqlparser.IsColName(node.Right) - if b && - value && - name { + if node.Operator.IsCommutative() && + !sqlparser.IsColName(node.Left) && + sqlparser.IsColName(node.Right) { node.Left, node.Right = node.Right, node.Left } } return true, nil - }, rb.Select) -} - -func (rb *route) isSingleShard() bool { - return rb.eroute.Opcode.IsSingleShard() + }, sel) } diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index 44976815bd2..01dfd8aa387 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -26,7 +26,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators/ops" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -63,7 +62,7 @@ func gen4SelectStmtPlanner( sel.SQLCalcFoundRows = false } - getPlan := func(selStatement sqlparser.SelectStatement) (logicalPlan, []string, error) { + getPlan := func(selStatement sqlparser.SelectStatement) (engine.Primitive, []string, error) { return newBuildSelectPlan(selStatement, reservedVars, vschema, plannerVersion) } @@ -75,30 +74,29 @@ func gen4SelectStmtPlanner( if shouldRetryAfterPredicateRewriting(plan) { // by transforming the predicates to CNF, the planner will sometimes find better plans // TODO: this should move to the operator side of planning - plan2, tablesUsed := gen4PredicateRewrite(stmt, getPlan) - if plan2 != nil { - return newPlanResult(plan2.Primitive(), tablesUsed...), nil + prim2, tablesUsed := gen4PredicateRewrite(stmt, getPlan) + if prim2 != nil { + return newPlanResult(prim2, tablesUsed...), nil } } - primitive := plan.Primitive() if !isSel { - return newPlanResult(primitive, tablesUsed...), nil + return newPlanResult(plan, tablesUsed...), nil } // this is done because engine.Route doesn't handle the empty result well // if it doesn't find a shard to send the query to. // All other engine primitives can handle this, so we only need it when // Route is the last (and only) instruction before the user sees a result - if isOnlyDual(sel) || (len(sel.GroupBy) == 0 && sel.SelectExprs.AllAggregation()) { - switch prim := primitive.(type) { + if isOnlyDual(sel) || (sel.GroupBy == nil && sel.SelectExprs.AllAggregation()) { + switch prim := plan.(type) { case *engine.Route: prim.NoRoutesSpecialHandling = true case *engine.VindexLookup: prim.SendTo.NoRoutesSpecialHandling = true } } - return newPlanResult(primitive, tablesUsed...), nil + return newPlanResult(plan, tablesUsed...), nil } func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select, query string, reservedVars *sqlparser.ReservedVars) (*planResult, error) { @@ -117,7 +115,7 @@ func gen4planSQLCalcFoundRows(vschema plancontext.VSchema, sel *sqlparser.Select if err != nil { return nil, err } - return newPlanResult(plan.Primitive(), tablesUsed...), nil + return newPlanResult(plan, tablesUsed...), nil } func buildSQLCalcFoundRowsPlan( @@ -125,13 +123,13 @@ func buildSQLCalcFoundRowsPlan( sel *sqlparser.Select, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, -) (logicalPlan, []string, error) { +) (engine.Primitive, []string, error) { limitPlan, _, err := newBuildSelectPlan(sel, reservedVars, vschema, Gen4) if err != nil { return nil, nil, err } - statement2, reserved2, err := sqlparser.Parse2(originalQuery) + statement2, reserved2, err := vschema.Environment().Parser().Parse2(originalQuery) if err != nil { return nil, nil, err } @@ -170,10 +168,19 @@ func buildSQLCalcFoundRowsPlan( if err != nil { return nil, nil, err } - return &sqlCalcFoundRows{LimitQuery: limitPlan, CountQuery: countPlan}, tablesUsed, nil + + rb, ok := countPlan.(*engine.Route) + if ok { + // if our count query is an aggregation, we want the no-match result to still return a zero + rb.NoRoutesSpecialHandling = true + } + return &engine.SQLCalcFoundRows{ + LimitPrimitive: limitPlan, + CountPrimitive: countPlan, + }, tablesUsed, nil } -func gen4PredicateRewrite(stmt sqlparser.Statement, getPlan func(selStatement sqlparser.SelectStatement) (logicalPlan, []string, error)) (logicalPlan, []string) { +func gen4PredicateRewrite(stmt sqlparser.Statement, getPlan func(selStatement sqlparser.SelectStatement) (engine.Primitive, []string, error)) (engine.Primitive, []string) { rewritten, isSel := sqlparser.RewritePredicate(stmt).(sqlparser.SelectStatement) if !isSel { // Fail-safe code, should never happen @@ -192,7 +199,7 @@ func newBuildSelectPlan( reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, version querypb.ExecuteOptions_PlannerVersion, -) (plan logicalPlan, tablesUsed []string, err error) { +) (plan engine.Primitive, tablesUsed []string, err error) { ctx, err := plancontext.CreatePlanningContext(selStmt, reservedVars, vschema, version) if err != nil { return nil, nil, err @@ -217,7 +224,7 @@ func newBuildSelectPlan( return nil, nil, err } - plan, err = transformToLogicalPlan(ctx, op) + plan, err = transformToPrimitive(ctx, op) if err != nil { return nil, nil, err } @@ -225,8 +232,8 @@ func newBuildSelectPlan( return plan, operators.TablesUsed(op), nil } -func createSelectOperator(ctx *plancontext.PlanningContext, selStmt sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars) (ops.Operator, error) { - err := queryRewrite(ctx.SemTable, reservedVars, selStmt) +func createSelectOperator(ctx *plancontext.PlanningContext, selStmt sqlparser.SelectStatement, reservedVars *sqlparser.ReservedVars) (operators.Operator, error) { + err := queryRewrite(ctx, selStmt) if err != nil { return nil, err } @@ -252,24 +259,16 @@ func isOnlyDual(sel *sqlparser.Select) bool { return ok && tableName.Name.String() == "dual" && tableName.Qualifier.IsEmpty() } -func shouldRetryAfterPredicateRewriting(plan logicalPlan) bool { +func shouldRetryAfterPredicateRewriting(plan engine.Primitive) bool { // if we have a I_S query, but have not found table_schema or table_name, let's try CNF - var opcode engine.Opcode - var sysTableTableName map[string]evalengine.Expr - var sysTableTableSchema []evalengine.Expr - - switch routePlan := plan.(type) { - case *route: - opcode = routePlan.eroute.Opcode - sysTableTableName = routePlan.eroute.SysTableTableName - sysTableTableSchema = routePlan.eroute.SysTableTableSchema + switch eroute := plan.(type) { + case *engine.Route: + return eroute.Opcode == engine.DBA && + len(eroute.SysTableTableName) == 0 && + len(eroute.SysTableTableSchema) == 0 default: return false } - - return opcode == engine.DBA && - len(sysTableTableName) == 0 && - len(sysTableTableSchema) == 0 } func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engine.Primitive, error) { @@ -290,7 +289,10 @@ func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engi if isLFunc { elem := &engine.LockFunc{Typ: expr.Expr.(*sqlparser.LockingFunc)} if lFunc.Name != nil { - n, err := evalengine.Translate(lFunc.Name, nil) + n, err := evalengine.Translate(lFunc.Name, &evalengine.Config{ + Collation: vschema.ConnCollation(), + Environment: vschema.Environment(), + }) if err != nil { return nil, err } @@ -302,7 +304,10 @@ func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engi if len(lockFunctions) > 0 { return nil, vterrors.VT12001(fmt.Sprintf("LOCK function and other expression: [%s] in same select query", sqlparser.String(expr))) } - exprs[i], err = evalengine.Translate(expr.Expr, &evalengine.Config{Collation: vschema.ConnCollation()}) + exprs[i], err = evalengine.Translate(expr.Expr, &evalengine.Config{ + Collation: vschema.ConnCollation(), + Environment: vschema.Environment(), + }) if err != nil { return nil, nil } diff --git a/go/vt/vtgate/planbuilder/semi_join.go b/go/vt/vtgate/planbuilder/semi_join.go deleted file mode 100644 index b12b04a1ed5..00000000000 --- a/go/vt/vtgate/planbuilder/semi_join.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2021 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*semiJoin)(nil) - -// semiJoin is the logicalPlan for engine.SemiJoin. -// This gets built if a rhs is correlated and can -// be pulled out but requires some variables to be supplied from outside. -type semiJoin struct { - rhs logicalPlan - lhs logicalPlan - cols []int - - vars map[string]int - - // LHSColumns are the columns from the LHS used for the join. - // These are the same columns pushed on the LHS that are now used in the vars field - LHSColumns []*sqlparser.ColName -} - -// newSemiJoin builds a new semiJoin. -func newSemiJoin(lhs, rhs logicalPlan, vars map[string]int, lhsCols []*sqlparser.ColName) *semiJoin { - return &semiJoin{ - rhs: rhs, - lhs: lhs, - vars: vars, - LHSColumns: lhsCols, - } -} - -// Primitive implements the logicalPlan interface -func (ps *semiJoin) Primitive() engine.Primitive { - return &engine.SemiJoin{ - Left: ps.lhs.Primitive(), - Right: ps.rhs.Primitive(), - Vars: ps.vars, - Cols: ps.cols, - } -} diff --git a/go/vt/vtgate/planbuilder/set.go b/go/vt/vtgate/planbuilder/set.go index 7b1e584132d..bf6820b7489 100644 --- a/go/vt/vtgate/planbuilder/set.go +++ b/go/vt/vtgate/planbuilder/set.go @@ -55,12 +55,15 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult var setOps []engine.SetOp var err error - ec := new(expressionConverter) + ec := &expressionConverter{ + env: vschema.Environment(), + collation: vschema.ConnCollation(), + } for _, expr := range stmt.Exprs { // AST struct has been prepared before getting here, so no scope here means that // we have a UDV. If the original query didn't explicitly specify the scope, it - // would have been explictly set to sqlparser.SessionStr before reaching this + // would have been explicitly set to sqlparser.SessionStr before reaching this // phase of planning switch expr.Var.Scope { case sqlparser.GlobalScope: @@ -80,7 +83,7 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult } setOps = append(setOps, setOp) case sqlparser.NextTxScope, sqlparser.SessionScope: - planFunc, err := sysvarPlanningFuncs.Get(expr) + planFunc, err := sysvarPlanningFuncs.Get(vschema.Environment(), expr) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go index aba5b1a9016..734885c9dd9 100644 --- a/go/vt/vtgate/planbuilder/show.go +++ b/go/vt/vtgate/planbuilder/show.go @@ -20,6 +20,7 @@ import ( "fmt" "regexp" "sort" + "strconv" "strings" "sync" @@ -116,6 +117,8 @@ func buildShowBasicPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) return buildShowTargetPlan(vschema) case sqlparser.VschemaTables: return buildVschemaTablesPlan(vschema) + case sqlparser.VschemaKeyspaces: + return buildVschemaKeyspacesPlan(vschema) case sqlparser.VschemaVindexes: return buildVschemaVindexesPlan(show, vschema) } @@ -164,7 +167,7 @@ func buildVariablePlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) ( } func buildShowTblPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine.Primitive, error) { - if !show.DbName.IsEmpty() { + if show.DbName.NotEmpty() { show.Tbl.Qualifier = sqlparser.NewIdentifierCS(show.DbName.String()) // Remove Database Name from the query. show.DbName = sqlparser.NewIdentifierCS("") @@ -174,7 +177,7 @@ func buildShowTblPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (e var ks *vindexes.Keyspace var err error - if !show.Tbl.Qualifier.IsEmpty() && sqlparser.SystemSchema(show.Tbl.Qualifier.String()) { + if show.Tbl.Qualifier.NotEmpty() && sqlparser.SystemSchema(show.Tbl.Qualifier.String()) { ks, err = vschema.AnyKeyspace() if err != nil { return nil, err @@ -486,7 +489,7 @@ func buildCreateTblPlan(show *sqlparser.ShowCreate, vschema plancontext.VSchema) var ks *vindexes.Keyspace var err error - if !show.Op.Qualifier.IsEmpty() && sqlparser.SystemSchema(show.Op.Qualifier.String()) { + if show.Op.Qualifier.NotEmpty() && sqlparser.SystemSchema(show.Op.Qualifier.String()) { ks, err = vschema.AnyKeyspace() if err != nil { return nil, err @@ -519,7 +522,7 @@ func buildCreateTblPlan(show *sqlparser.ShowCreate, vschema plancontext.VSchema) func buildCreatePlan(show *sqlparser.ShowCreate, vschema plancontext.VSchema) (engine.Primitive, error) { dbName := "" - if !show.Op.Qualifier.IsEmpty() { + if show.Op.Qualifier.NotEmpty() { dbName = show.Op.Qualifier.String() } @@ -558,7 +561,7 @@ func buildShowVGtidPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) } return &engine.OrderedAggregate{ Aggregates: []*engine.AggregateParams{ - engine.NewAggregateParam(popcode.AggregateGtid, 1, "global vgtid_executed"), + engine.NewAggregateParam(popcode.AggregateGtid, 1, "global vgtid_executed", vschema.Environment().CollationEnv()), }, TruncateColumnCount: 2, Input: send, @@ -567,7 +570,7 @@ func buildShowVGtidPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) func buildShowGtidPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) (engine.Primitive, error) { dbName := "" - if !show.DbName.IsEmpty() { + if show.DbName.NotEmpty() { dbName = show.DbName.String() } dest, ks, _, err := vschema.TargetDestination(dbName) @@ -641,6 +644,26 @@ func buildEnginesPlan() (engine.Primitive, error) { buildVarCharFields("Engine", "Support", "Comment", "Transactions", "XA", "Savepoints")), nil } +func buildVschemaKeyspacesPlan(vschema plancontext.VSchema) (engine.Primitive, error) { + vs := vschema.GetVSchema() + var rows [][]sqltypes.Value + for ksName, ks := range vs.Keyspaces { + var row []sqltypes.Value + row = append(row, sqltypes.NewVarChar(ksName)) + row = append(row, sqltypes.NewVarChar(strconv.FormatBool(ks.Keyspace.Sharded))) + fkMode, _ := vschema.ForeignKeyMode(ksName) + row = append(row, sqltypes.NewVarChar(fkMode.String())) + ksError := "" + if ks.Error != nil { + ksError = ks.Error.Error() + } + row = append(row, sqltypes.NewVarChar(ksError)) + rows = append(rows, row) + } + + return engine.NewRowsPrimitive(rows, buildVarCharFields("Keyspace", "Sharded", "Foreign Key", "Comment")), nil +} + func buildVschemaTablesPlan(vschema plancontext.VSchema) (engine.Primitive, error) { vs := vschema.GetVSchema() ks, err := vschema.DefaultKeyspace() diff --git a/go/vt/vtgate/planbuilder/show_test.go b/go/vt/vtgate/planbuilder/show_test.go index b36133bb1c7..931c914149d 100644 --- a/go/vt/vtgate/planbuilder/show_test.go +++ b/go/vt/vtgate/planbuilder/show_test.go @@ -22,6 +22,7 @@ import ( "testing" "vitess.io/vitess/go/test/vschemawrapper" + "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/require" @@ -35,6 +36,7 @@ import ( func TestBuildDBPlan(t *testing.T) { vschema := &vschemawrapper.VSchemaWrapper{ Keyspace: &vindexes.Keyspace{Name: "main"}, + Env: vtenv.NewTestEnv(), } testCases := []struct { @@ -50,7 +52,7 @@ func TestBuildDBPlan(t *testing.T) { for _, s := range testCases { t.Run(s.query, func(t *testing.T) { - parserOut, err := sqlparser.Parse(s.query) + parserOut, err := sqlparser.NewTestParser().Parse(s.query) require.NoError(t, err) show := parserOut.(*sqlparser.Show) @@ -76,7 +78,7 @@ func TestGenerateCharsetRows(t *testing.T) { append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - collations.Local().LookupName(collations.Default())), + collations.MySQL8().LookupName(collations.MySQL8().DefaultConnectionCharset())), sqltypes.NewUint32(4)), } rows2 := [][]sqltypes.Value{ @@ -88,7 +90,7 @@ func TestGenerateCharsetRows(t *testing.T) { append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - collations.Local().LookupName(collations.Default())), + collations.MySQL8().LookupName(collations.MySQL8().DefaultConnectionCharset())), sqltypes.NewUint32(4)), } @@ -110,7 +112,7 @@ func TestGenerateCharsetRows(t *testing.T) { for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { - stmt, err := sqlparser.Parse(tc.input) + stmt, err := sqlparser.NewTestParser().Parse(tc.input) require.NoError(t, err) match := stmt.(*sqlparser.Show).Internal.(*sqlparser.ShowBasic) filter := match.Filter diff --git a/go/vt/vtgate/planbuilder/simple_projection.go b/go/vt/vtgate/planbuilder/simple_projection.go deleted file mode 100644 index 4c29ef0ae9a..00000000000 --- a/go/vt/vtgate/planbuilder/simple_projection.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*simpleProjection)(nil) - -// simpleProjection is used for wrapping a derived table. -// This primitive wraps any derived table that results -// in something that's not a route. It builds a -// 'table' for the derived table allowing higher level -// constructs to reference its columns. If a derived table -// results in a route primitive, we instead build -// a new route that keeps the subquery in the FROM -// clause, because a route is more versatile than -// a simpleProjection. -type simpleProjection struct { - logicalPlanCommon - eSimpleProj *engine.SimpleProjection -} - -// Primitive implements the logicalPlan interface -func (sq *simpleProjection) Primitive() engine.Primitive { - sq.eSimpleProj.Input = sq.input.Primitive() - return sq.eSimpleProj -} diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go index 1e106adacc0..61ed220bd44 100644 --- a/go/vt/vtgate/planbuilder/simplifier_test.go +++ b/go/vt/vtgate/planbuilder/simplifier_test.go @@ -21,19 +21,15 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/test/vschemawrapper" - - "vitess.io/vitess/go/vt/vterrors" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/vtgate/simplifier" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/vschemawrapper" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/simplifier" ) // TestSimplifyBuggyQuery should be used to whenever we get a planner bug reported @@ -45,10 +41,11 @@ func TestSimplifyBuggyQuery(t *testing.T) { vschema := &vschemawrapper.VSchemaWrapper{ V: loadSchema(t, "vschemas/schema.json", true), Version: Gen4, + Env: vtenv.NewTestEnv(), } - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(query) require.NoError(t, err) - rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) simplified := simplifier.SimplifyStatement( @@ -67,10 +64,11 @@ func TestSimplifyPanic(t *testing.T) { vschema := &vschemawrapper.VSchemaWrapper{ V: loadSchema(t, "vschemas/schema.json", true), Version: Gen4, + Env: vtenv.NewTestEnv(), } - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(query) require.NoError(t, err) - rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) simplified := simplifier.SimplifyStatement( @@ -88,19 +86,20 @@ func TestUnsupportedFile(t *testing.T) { vschema := &vschemawrapper.VSchemaWrapper{ V: loadSchema(t, "vschemas/schema.json", true), Version: Gen4, + Env: vtenv.NewTestEnv(), } fmt.Println(vschema) for _, tcase := range readJSONTests("unsupported_cases.txt") { t.Run(tcase.Query, func(t *testing.T) { log.Errorf("unsupported_cases.txt - %s", tcase.Query) - stmt, reserved, err := sqlparser.Parse2(tcase.Query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(tcase.Query) require.NoError(t, err) _, ok := stmt.(sqlparser.SelectStatement) if !ok { t.Skip() return } - rewritten, err := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, err := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil) if err != nil { t.Skip() } @@ -109,7 +108,7 @@ func TestUnsupportedFile(t *testing.T) { reservedVars := sqlparser.NewReservedVars("vtg", reserved) ast := rewritten.AST origQuery := sqlparser.String(ast) - stmt, _, _ = sqlparser.Parse2(tcase.Query) + stmt, _, _ = sqlparser.NewTestParser().Parse2(tcase.Query) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), vschema.CurrentDb(), @@ -130,11 +129,11 @@ func TestUnsupportedFile(t *testing.T) { } func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemawrapper.VSchemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := sqlparser.NewTestParser().Parse2(query) if err != nil { panic(err) } - rewritten, _ := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil) + rewritten, _ := sqlparser.RewriteAST(stmt, vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil) ast := rewritten.AST _, expected := BuildFromStmt(context.Background(), query, ast, reservedVars, vschema, rewritten.BindVarNeeds, true, true) if expected == nil { @@ -169,7 +168,7 @@ func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema * return false } - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := sqlparser.NewTestParser().Parse2(query) if err != nil { panic(err.Error()) } diff --git a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go index daf19ced859..5d877cd341d 100644 --- a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go +++ b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go @@ -20,16 +20,15 @@ import ( "sort" "strings" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func selectUnshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, ks *vindexes.Keyspace) (logicalPlan, []string, error) { +func selectUnshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.SelectStatement, ks *vindexes.Keyspace) (engine.Primitive, []string, error) { // this method is used when the query we are handling has all tables in the same unsharded keyspace sqlparser.SafeRewrite(stmt, nil, func(cursor *sqlparser.Cursor) bool { switch node := cursor.Node().(type) { @@ -47,21 +46,18 @@ func selectUnshardedShortcut(ctx *plancontext.PlanningContext, stmt sqlparser.Se if err != nil { return nil, nil, err } - plan := &route{ - eroute: &engine.Route{ - RoutingParameters: &engine.RoutingParameters{ - Opcode: engine.Unsharded, - Keyspace: ks, - }, - TableName: strings.Join(escapedTableNames(tableNames), ", "), + eroute := &engine.Route{ + RoutingParameters: &engine.RoutingParameters{ + Opcode: engine.Unsharded, + Keyspace: ks, }, - Select: stmt, + TableName: strings.Join(escapedTableNames(tableNames), ", "), } - - if err := plan.Wireup(ctx); err != nil { + prim, err := WireupRoute(ctx, eroute, stmt) + if err != nil { return nil, nil, err } - return plan, operators.QualifiedTableNames(ks, tableNames), nil + return prim, operators.QualifiedTableNames(ks, tableNames), nil } func escapedTableNames(tableNames []sqlparser.TableName) []string { @@ -106,7 +102,7 @@ func getTableNames(semTable *semantics.SemTable) ([]sqlparser.TableName, error) func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) { switch expr := expr.(type) { case *sqlparser.AliasedExpr: - sqlparser.RemoveKeyspaceFromColName(expr.Expr) + sqlparser.RemoveKeyspaceInCol(expr.Expr) case *sqlparser.StarExpr: expr.TableName.Qualifier = sqlparser.NewIdentifierCS("") } diff --git a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go b/go/vt/vtgate/planbuilder/sql_calc_found_rows.go deleted file mode 100644 index 62823a8c10e..00000000000 --- a/go/vt/vtgate/planbuilder/sql_calc_found_rows.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2020 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" -) - -var _ logicalPlan = (*sqlCalcFoundRows)(nil) - -type sqlCalcFoundRows struct { - LimitQuery, CountQuery logicalPlan -} - -// Primitive implements the logicalPlan interface -func (s *sqlCalcFoundRows) Primitive() engine.Primitive { - countPrim := s.CountQuery.Primitive() - rb, ok := countPrim.(*engine.Route) - if ok { - // if our count query is an aggregation, we want the no-match result to still return a zero - rb.NoRoutesSpecialHandling = true - } - return engine.SQLCalcFoundRows{ - LimitPrimitive: s.LimitQuery.Primitive(), - CountPrimitive: countPrim, - } -} diff --git a/go/vt/vtgate/planbuilder/system_variables.go b/go/vt/vtgate/planbuilder/system_variables.go index eccb263c65a..8ef968a2ac8 100644 --- a/go/vt/vtgate/planbuilder/system_variables.go +++ b/go/vt/vtgate/planbuilder/system_variables.go @@ -22,6 +22,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/sysvars" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -29,6 +30,7 @@ import ( type sysvarPlanCache struct { funcs map[string]planFunc once sync.Once + env *vtenv.Environment } func (pc *sysvarPlanCache) initForSettings(systemVariables []sysvars.SystemVariable, f func(setting) planFunc) { @@ -53,21 +55,25 @@ func (pc *sysvarPlanCache) initForSettings(systemVariables []sysvars.SystemVaria } func (pc *sysvarPlanCache) parseAndBuildDefaultValue(sysvar sysvars.SystemVariable) evalengine.Expr { - stmt, err := sqlparser.Parse(fmt.Sprintf("select %s", sysvar.Default)) + stmt, err := pc.env.Parser().Parse(fmt.Sprintf("select %s", sysvar.Default)) if err != nil { panic(fmt.Sprintf("bug in set plan init - default value for %s not parsable: %s", sysvar.Name, sysvar.Default)) } sel := stmt.(*sqlparser.Select) aliasedExpr := sel.SelectExprs[0].(*sqlparser.AliasedExpr) - def, err := evalengine.Translate(aliasedExpr.Expr, nil) + def, err := evalengine.Translate(aliasedExpr.Expr, &evalengine.Config{ + Collation: pc.env.CollationEnv().DefaultConnectionCharset(), + Environment: pc.env, + }) if err != nil { panic(fmt.Sprintf("bug in set plan init - default value for %s not able to convert to evalengine.Expr: %s", sysvar.Name, sysvar.Default)) } return def } -func (pc *sysvarPlanCache) init() { +func (pc *sysvarPlanCache) init(env *vtenv.Environment) { pc.once.Do(func() { + pc.env = env pc.funcs = make(map[string]planFunc) pc.initForSettings(sysvars.ReadOnly, buildSetOpReadOnly) pc.initForSettings(sysvars.IgnoreThese, buildSetOpIgnore) @@ -80,8 +86,8 @@ func (pc *sysvarPlanCache) init() { var sysvarPlanningFuncs sysvarPlanCache -func (pc *sysvarPlanCache) Get(expr *sqlparser.SetExpr) (planFunc, error) { - pc.init() +func (pc *sysvarPlanCache) Get(env *vtenv.Environment, expr *sqlparser.SetExpr) (planFunc, error) { + pc.init(env) pf, ok := pc.funcs[expr.Var.Name.Lowered()] if !ok { return nil, vterrors.VT05006(sqlparser.String(expr)) diff --git a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json index e47b17cd2ae..b7328dc5c0d 100644 --- a/go/vt/vtgate/planbuilder/testdata/aggr_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/aggr_cases.json @@ -539,6 +539,50 @@ ] } }, + { + "comment": "WITH ROLLUP on unsharded keyspaces", + "query": "select a, b, count(*) from unsharded group by a, b with rollup", + "plan": { + "QueryType": "SELECT", + "Original": "select a, b, count(*) from unsharded group by a, b with rollup", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select a, b, count(*) from unsharded where 1 != 1 group by a, b with rollup", + "Query": "select a, b, count(*) from unsharded group by a, b with rollup", + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } + }, + { + "comment": "WITH ROLLUP that is pushed to single shard", + "query": "select id, user_id, count(*) from music group by id, user_id with rollup", + "plan": { + "QueryType": "SELECT", + "Original": "select id, user_id, count(*) from music group by id, user_id with rollup", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, user_id, count(*) from music where 1 != 1 group by id, user_id with rollup", + "Query": "select id, user_id, count(*) from music group by id, user_id with rollup", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, { "comment": "count with distinct no unique vindex, count expression aliased", "query": "select col1, count(distinct col2) c2 from user group by col1", @@ -675,10 +719,15 @@ } }, { - "comment": "scatter aggregate group by aggregate function", + "comment": "scatter aggregate group by aggregate function - since we don't have authoratative columns for user, we can't be sure that the user isn't referring a column named b", "query": "select count(*) b from user group by b", "plan": "VT03005: cannot group on 'count(*)'" }, + { + "comment": "scatter aggregate group by aggregate function with column information", + "query": "select count(*) b from authoritative group by b", + "plan": "VT03005: cannot group on 'b'" + }, { "comment": "scatter aggregate multiple group by (columns)", "query": "select a, b, count(*) from user group by a, b", @@ -808,7 +857,7 @@ { "comment": "scatter aggregate group by invalid column number", "query": "select col from user group by 2", - "plan": "Unknown column '2' in 'group statement'" + "plan": "Unknown column '2' in 'group clause'" }, { "comment": "scatter aggregate order by null", @@ -893,7 +942,7 @@ }, "FieldQuery": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` where 1 != 1 group by a, b, c, weight_string(a), weight_string(b), weight_string(c)", "OrderBy": "(0|5) ASC, (1|6) ASC, (2|7) ASC", - "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, b, c, weight_string(a), weight_string(b), weight_string(c) order by a asc, b asc, c asc", + "Query": "select a, b, c, d, count(*), weight_string(a), weight_string(b), weight_string(c) from `user` group by a, b, c, weight_string(a), weight_string(b), weight_string(c) order by `user`.a asc, `user`.b asc, `user`.c asc", "Table": "`user`" } ] @@ -925,7 +974,7 @@ }, "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", - "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", + "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by `user`.d asc, `user`.b asc, `user`.a asc, `user`.c asc", "Table": "`user`" } ] @@ -957,7 +1006,7 @@ }, "FieldQuery": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` where 1 != 1 group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c)", "OrderBy": "(3|5) ASC, (1|6) ASC, (0|7) ASC, (2|8) ASC", - "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by d asc, b asc, a asc, c asc", + "Query": "select a, b, c, d, count(*), weight_string(d), weight_string(b), weight_string(a), weight_string(c) from `user` group by d, b, a, c, weight_string(d), weight_string(b), weight_string(a), weight_string(c) order by `user`.d asc, `user`.b asc, `user`.a asc, `user`.c asc", "Table": "`user`" } ] @@ -989,7 +1038,7 @@ }, "FieldQuery": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` where 1 != 1 group by a, c, b, weight_string(a), weight_string(c), weight_string(b)", "OrderBy": "(0|4) DESC, (2|5) DESC, (1|6) ASC", - "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by a, c, b, weight_string(a), weight_string(c), weight_string(b) order by a desc, c desc, b asc", + "Query": "select a, b, c, count(*), weight_string(a), weight_string(c), weight_string(b) from `user` group by a, c, b, weight_string(a), weight_string(c), weight_string(b) order by a desc, c desc, `user`.b asc", "Table": "`user`" } ] @@ -1041,32 +1090,6 @@ ] } }, - { - "comment": "Group by with collate operator", - "query": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", - "plan": { - "QueryType": "SELECT", - "Original": "select user.col1 as a from user where user.id = 5 group by a collate utf8_general_ci", - "Instructions": { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col1 as a from `user` where 1 != 1 group by a collate utf8_general_ci", - "Query": "select `user`.col1 as a from `user` where `user`.id = 5 group by a collate utf8_general_ci", - "Table": "`user`", - "Values": [ - "5" - ], - "Vindex": "user_index" - }, - "TablesUsed": [ - "user.user" - ] - } - }, { "comment": "routing rules for aggregates", "query": "select id, count(*) from route2 group by id", @@ -1103,7 +1126,7 @@ "Sharded": true }, "FieldQuery": "select col from ref where 1 != 1", - "Query": "select col from ref order by col asc", + "Query": "select col from ref order by ref.col asc", "Table": "ref" }, "TablesUsed": [ @@ -1207,7 +1230,7 @@ { "comment": "Group by out of range column number (code is duplicated from symab).", "query": "select id from user group by 2", - "plan": "Unknown column '2' in 'group statement'" + "plan": "Unknown column '2' in 'group clause'" }, { "comment": "here it is safe to remove the order by on the derived table since it will not influence the output of the count(*)", @@ -1584,10 +1607,10 @@ }, { "comment": "weight_string addition to group by", - "query": "select lower(textcol1) as v, count(*) from user group by v", + "query": "select lower(col1) as v, count(*) from authoritative group by v", "plan": { "QueryType": "SELECT", - "Original": "select lower(textcol1) as v, count(*) from user group by v", + "Original": "select lower(col1) as v, count(*) from authoritative group by v", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", @@ -1602,24 +1625,24 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` where 1 != 1 group by v, weight_string(lower(textcol1))", + "FieldQuery": "select lower(col1) as v, count(*), weight_string(lower(col1)) from authoritative where 1 != 1 group by lower(col1), weight_string(lower(col1))", "OrderBy": "(0|2) ASC", - "Query": "select lower(textcol1) as v, count(*), weight_string(lower(textcol1)) from `user` group by v, weight_string(lower(textcol1)) order by v asc", - "Table": "`user`" + "Query": "select lower(col1) as v, count(*), weight_string(lower(col1)) from authoritative group by lower(col1), weight_string(lower(col1)) order by lower(col1) asc", + "Table": "authoritative" } ] }, "TablesUsed": [ - "user.user" + "user.authoritative" ] } }, { "comment": "weight_string addition to group by when also there in order by", - "query": "select char_length(texcol1) as a, count(*) from user group by a order by a", + "query": "select char_length(col1) as a, count(*) from authoritative group by a order by a", "plan": { "QueryType": "SELECT", - "Original": "select char_length(texcol1) as a, count(*) from user group by a order by a", + "Original": "select char_length(col1) as a, count(*) from authoritative group by a order by a", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", @@ -1634,15 +1657,15 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` where 1 != 1 group by a, weight_string(char_length(texcol1))", + "FieldQuery": "select char_length(col1) as a, count(*), weight_string(char_length(col1)) from authoritative where 1 != 1 group by char_length(col1), weight_string(char_length(col1))", "OrderBy": "(0|2) ASC", - "Query": "select char_length(texcol1) as a, count(*), weight_string(char_length(texcol1)) from `user` group by a, weight_string(char_length(texcol1)) order by a asc", - "Table": "`user`" + "Query": "select char_length(col1) as a, count(*), weight_string(char_length(col1)) from authoritative group by char_length(col1), weight_string(char_length(col1)) order by char_length(authoritative.col1) asc", + "Table": "authoritative" } ] }, "TablesUsed": [ - "user.user" + "user.authoritative" ] } }, @@ -1665,7 +1688,7 @@ }, "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "Query": "select id, weight_string(id) from `user` order by id asc limit 2", "ResultColumns": 1, "Table": "`user`" } @@ -1699,7 +1722,7 @@ }, "FieldQuery": "select col, id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(1|2) ASC", - "Query": "select col, id, weight_string(id) from `user` order by id asc", + "Query": "select col, id, weight_string(id) from `user` order by `user`.id asc", "ResultColumns": 2, "Table": "`user`" }, @@ -2009,19 +2032,20 @@ }, { "comment": "Less Equal filter on scatter with grouping", - "query": "select col, count(*) a from user group by col having a <= 10", + "query": "select col1, count(*) a from user group by col1 having a <= 10", "plan": { "QueryType": "SELECT", - "Original": "select col, count(*) a from user group by col having a <= 10", + "Original": "select col1, count(*) a from user group by col1 having a <= 10", "Instructions": { "OperatorType": "Filter", "Predicate": "count(*) <= 10", + "ResultColumns": 2, "Inputs": [ { "OperatorType": "Aggregate", "Variant": "Ordered", "Aggregates": "sum_count_star(1) AS a", - "GroupBy": "0", + "GroupBy": "(0|2)", "Inputs": [ { "OperatorType": "Route", @@ -2030,9 +2054,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col, count(*) as a from `user` where 1 != 1 group by col", - "OrderBy": "0 ASC", - "Query": "select col, count(*) as a from `user` group by col order by col asc", + "FieldQuery": "select col1, count(*) as a, weight_string(col1) from `user` where 1 != 1 group by col1, weight_string(col1)", + "OrderBy": "(0|2) ASC", + "Query": "select col1, count(*) as a, weight_string(col1) from `user` group by col1, weight_string(col1) order by col1 asc", "Table": "`user`" } ] @@ -2046,10 +2070,10 @@ }, { "comment": "We should be able to find grouping keys on ordered aggregates", - "query": "select count(*) as a, val1 from user group by val1 having a = 1.00", + "query": "select count(*) as a, col2 from user group by col2 having a = 1.00", "plan": { "QueryType": "SELECT", - "Original": "select count(*) as a, val1 from user group by val1 having a = 1.00", + "Original": "select count(*) as a, col2 from user group by col2 having a = 1.00", "Instructions": { "OperatorType": "Filter", "Predicate": "count(*) = 1.00", @@ -2068,9 +2092,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select count(*) as a, val1, weight_string(val1) from `user` where 1 != 1 group by val1, weight_string(val1)", + "FieldQuery": "select count(*) as a, col2, weight_string(col2) from `user` where 1 != 1 group by col2, weight_string(col2)", "OrderBy": "(1|2) ASC", - "Query": "select count(*) as a, val1, weight_string(val1) from `user` group by val1, weight_string(val1) order by val1 asc", + "Query": "select count(*) as a, col2, weight_string(col2) from `user` group by col2, weight_string(col2) order by col2 asc", "Table": "`user`" } ] @@ -2620,10 +2644,10 @@ }, { "comment": "group by column alias", - "query": "select ascii(val1) as a, count(*) from user group by a", + "query": "select ascii(col2) as a, count(*) from user group by a", "plan": { "QueryType": "SELECT", - "Original": "select ascii(val1) as a, count(*) from user group by a", + "Original": "select ascii(col2) as a, count(*) from user group by a", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", @@ -2638,9 +2662,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` where 1 != 1 group by a, weight_string(ascii(val1))", + "FieldQuery": "select ascii(col2) as a, count(*), weight_string(ascii(col2)) from `user` where 1 != 1 group by ascii(col2), weight_string(ascii(col2))", "OrderBy": "(0|2) ASC", - "Query": "select ascii(val1) as a, count(*), weight_string(ascii(val1)) from `user` group by a, weight_string(ascii(val1)) order by a asc", + "Query": "select ascii(col2) as a, count(*), weight_string(ascii(col2)) from `user` group by ascii(col2), weight_string(ascii(col2)) order by ascii(col2) asc", "Table": "`user`" } ] @@ -2843,11 +2867,6 @@ ] } }, - { - "comment": "select count(distinct user_id, name) from user", - "query": "select count(distinct user_id, name) from user", - "plan": "VT03001: aggregate functions take a single argument 'count(distinct user_id, `name`)'" - }, { "comment": "select sum(col) from (select user.col as col, 32 from user join user_extra) t", "query": "select sum(col) from (select user.col as col, 32 from user join user_extra) t", @@ -2878,8 +2897,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select sum(col), 32 from (select `user`.col as col, 32 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select sum(col), 32 from (select `user`.col as col, 32 from `user`) as t", + "FieldQuery": "select sum(col) from (select `user`.col as col, 32 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select sum(col) from (select `user`.col as col, 32 from `user`) as t", "Table": "`user`" }, { @@ -2989,7 +3008,7 @@ "Original": "select foo, sum(foo) as fooSum, sum(bar) as barSum from user group by foo having fooSum+sum(bar) = 42", "Instructions": { "OperatorType": "Filter", - "Predicate": "sum(foo) + sum(bar) = 42", + "Predicate": "sum(`user`.foo) + sum(bar) = 42", "ResultColumns": 3, "Inputs": [ { @@ -3333,10 +3352,10 @@ }, { "comment": "group by and ',' joins", - "query": "select user.id from user, user_extra group by id", + "query": "select user.id from user, user_extra group by user.id", "plan": { "QueryType": "SELECT", - "Original": "select user.id from user, user_extra group by id", + "Original": "select user.id from user, user_extra group by user.id", "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", @@ -3356,9 +3375,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id, weight_string(`user`.id) from `user` where 1 != 1 group by id, weight_string(`user`.id)", + "FieldQuery": "select `user`.id, weight_string(`user`.id) from `user` where 1 != 1 group by `user`.id, weight_string(`user`.id)", "OrderBy": "(0|1) ASC", - "Query": "select `user`.id, weight_string(`user`.id) from `user` group by id, weight_string(`user`.id) order by id asc", + "Query": "select `user`.id, weight_string(`user`.id) from `user` group by `user`.id, weight_string(`user`.id) order by `user`.id asc", "Table": "`user`" }, { @@ -3395,9 +3414,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 2 - ], + "Columns": "2", "Inputs": [ { "OperatorType": "Limit", @@ -3410,8 +3427,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select phone, id, city from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", - "Query": "select phone, id, city from (select phone, id, city from `user` where id > 12) as x limit :__upper_limit", + "FieldQuery": "select x.phone, x.id, x.city from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", + "Query": "select x.phone, x.id, x.city from (select phone, id, city from `user` where id > 12) as x limit 10", "Table": "`user`" } ] @@ -3438,9 +3455,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 3 - ], + "Columns": "3", "Inputs": [ { "OperatorType": "Limit", @@ -3453,8 +3468,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select phone, id, city, 1 from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", - "Query": "select phone, id, city, 1 from (select phone, id, city from `user` where id > 12) as x limit :__upper_limit", + "FieldQuery": "select x.phone, x.id, x.city, 1 from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", + "Query": "select x.phone, x.id, x.city, 1 from (select phone, id, city from `user` where id > 12) as x limit 10", "Table": "`user`" } ] @@ -3493,26 +3508,38 @@ "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select x.`user.id` from (select `user`.id as `user.id` from `user` where 1 != 1) as x where 1 != 1", + "Query": "select x.`user.id` from (select `user`.id as `user.id` from `user`) as x limit 10", + "Table": "`user`" + } + ] }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.id = :user_id", - "Table": "user_extra" + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select x.col from (select user_extra.col as col from user_extra where 1 != 1) as x where 1 != 1", + "Query": "select x.col from (select user_extra.col as col from user_extra where user_extra.id = :user_id) as x limit 10", + "Table": "user_extra" + } + ] } ] } @@ -3541,11 +3568,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 2, - 3 - ], + "Columns": "1,2,3", "Inputs": [ { "OperatorType": "Limit", @@ -3558,9 +3581,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, val1, 1, weight_string(val1) from (select id, val1 from `user` where 1 != 1) as x where 1 != 1", + "FieldQuery": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where 1 != 1) as x where 1 != 1", "OrderBy": "(1|3) ASC", - "Query": "select id, val1, 1, weight_string(val1) from (select id, val1 from `user` where val2 < 4) as x order by val1 asc limit :__upper_limit", + "Query": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where val2 < 4) as x order by `user`.val1 asc limit 2", "Table": "`user`" } ] @@ -4845,7 +4868,8 @@ "Instructions": { "OperatorType": "Aggregate", "Variant": "Scalar", - "Aggregates": "min(0 COLLATE latin1_swedish_ci) AS min(textcol1), max(1 COLLATE latin1_swedish_ci) AS max(textcol2), sum_distinct(2 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(3 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", + "Aggregates": "min(0 COLLATE latin1_swedish_ci) AS min(textcol1), max(1|4) AS max(textcol2), sum_distinct(2 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(3 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", + "ResultColumns": 4, "Inputs": [ { "OperatorType": "Route", @@ -4854,9 +4878,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select min(textcol1), max(textcol2), textcol1, textcol1 from `user` where 1 != 1 group by textcol1", + "FieldQuery": "select min(textcol1), max(textcol2), textcol1, textcol1, weight_string(textcol2) from `user` where 1 != 1 group by textcol1, weight_string(textcol2)", "OrderBy": "2 ASC COLLATE latin1_swedish_ci", - "Query": "select min(textcol1), max(textcol2), textcol1, textcol1 from `user` group by textcol1 order by textcol1 asc", + "Query": "select min(textcol1), max(textcol2), textcol1, textcol1, weight_string(textcol2) from `user` group by textcol1, weight_string(textcol2) order by textcol1 asc", "Table": "`user`" } ] @@ -4875,8 +4899,9 @@ "Instructions": { "OperatorType": "Aggregate", "Variant": "Ordered", - "Aggregates": "min(1 COLLATE latin1_swedish_ci) AS min(textcol1), max(2 COLLATE latin1_swedish_ci) AS max(textcol2), sum_distinct(3 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(4 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", + "Aggregates": "min(1 COLLATE latin1_swedish_ci) AS min(textcol1), max(2|5) AS max(textcol2), sum_distinct(3 COLLATE latin1_swedish_ci) AS sum(distinct textcol1), count_distinct(4 COLLATE latin1_swedish_ci) AS count(distinct textcol1)", "GroupBy": "0", + "ResultColumns": 5, "Inputs": [ { "OperatorType": "Route", @@ -4885,9 +4910,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select col, min(textcol1), max(textcol2), textcol1, textcol1 from `user` where 1 != 1 group by col, textcol1", + "FieldQuery": "select col, min(textcol1), max(textcol2), textcol1, textcol1, weight_string(textcol2) from `user` where 1 != 1 group by col, textcol1, weight_string(textcol2)", "OrderBy": "0 ASC, 3 ASC COLLATE latin1_swedish_ci", - "Query": "select col, min(textcol1), max(textcol2), textcol1, textcol1 from `user` group by col, textcol1 order by col asc, textcol1 asc", + "Query": "select col, min(textcol1), max(textcol2), textcol1, textcol1, weight_string(textcol2) from `user` group by col, textcol1, weight_string(textcol2) order by col asc, textcol1 asc", "Table": "`user`" } ] @@ -5490,11 +5515,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 0, - 2 - ], + "Columns": "1,0,2", "Inputs": [ { "OperatorType": "Aggregate", @@ -5845,10 +5866,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 1 - ], + "Columns": "1,1", "Inputs": [ { "OperatorType": "Limit", @@ -5861,8 +5879,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, val2 from (select id, val2 from `user` where 1 != 1) as x where 1 != 1", - "Query": "select id, val2 from (select id, val2 from `user` where val2 is null) as x limit :__upper_limit", + "FieldQuery": "select x.id, x.val2 from (select id, val2 from `user` where 1 != 1) as x where 1 != 1", + "Query": "select x.id, x.val2 from (select id, val2 from `user` where val2 is null) as x limit 2", "Table": "`user`" } ] @@ -5918,10 +5936,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 2, - 1 - ], + "Columns": "2,1", "Inputs": [ { "OperatorType": "Aggregate", @@ -6038,5 +6053,1018 @@ "user.user_extra" ] } + }, + { + "comment": "avg function on scatter query", + "query": "select avg(id) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select avg(id) from user", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "sum(id) / count(id) as avg(id)" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS avg(id), sum_count(1) AS count(id)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(id), count(id) from `user` where 1 != 1", + "Query": "select sum(id), count(id) from `user`", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "avg function on scatter query deep inside the output expression", + "query": "select avg(id)+count(foo)+bar from user group by bar", + "plan": { + "QueryType": "SELECT", + "Original": "select avg(id)+count(foo)+bar from user group by bar", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "avg(id) + count(foo) + bar as avg(id) + count(foo) + bar" + ], + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + ":0 as bar", + "sum(id) / count(id) as avg(id)", + ":2 as count(foo)" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS avg(id), sum_count(2) AS count(foo), sum_count(3) AS count(id)", + "GroupBy": "(0|4)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select bar, sum(id), count(foo), count(id), weight_string(bar) from `user` where 1 != 1 group by bar, weight_string(bar)", + "OrderBy": "(0|4) ASC", + "Query": "select bar, sum(id), count(foo), count(id), weight_string(bar) from `user` group by bar, weight_string(bar) order by bar asc", + "Table": "`user`" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "avg function on scatter query deep inside the output expression", + "query": "select avg(id)+count(foo)+bar from user group by bar", + "plan": { + "QueryType": "SELECT", + "Original": "select avg(id)+count(foo)+bar from user group by bar", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "avg(id) + count(foo) + bar as avg(id) + count(foo) + bar" + ], + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + ":0 as bar", + "sum(id) / count(id) as avg(id)", + ":2 as count(foo)" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS avg(id), sum_count(2) AS count(foo), sum_count(3) AS count(id)", + "GroupBy": "(0|4)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select bar, sum(id), count(foo), count(id), weight_string(bar) from `user` where 1 != 1 group by bar, weight_string(bar)", + "OrderBy": "(0|4) ASC", + "Query": "select bar, sum(id), count(foo), count(id), weight_string(bar) from `user` group by bar, weight_string(bar) order by bar asc", + "Table": "`user`" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "two avg aggregations", + "query": "select avg(foo), avg(bar) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select avg(foo), avg(bar) from user", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "sum(foo) / count(foo) as avg(foo)", + "sum(bar) / count(bar) as avg(bar)" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS avg(foo), sum(1) AS avg(bar), sum_count(2) AS count(foo), sum_count(3) AS count(bar)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(foo), sum(bar), count(foo), count(bar) from `user` where 1 != 1", + "Query": "select sum(foo), sum(bar), count(foo), count(bar) from `user`", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "avg and count on the same argument", + "query": "select avg(foo), count(foo) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select avg(foo), count(foo) from user", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + "sum(foo) / count(foo) as avg(foo)", + ":1 as count(foo)" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS avg(foo), sum_count(1) AS count(foo), sum_count(2) AS count(foo)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(foo), count(foo), count(foo) from `user` where 1 != 1", + "Query": "select sum(foo), count(foo), count(foo) from `user`", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "GROUP BY inside derived table on the RHS should not be a problem", + "query": "SELECT c.column_name FROM user c JOIN (SELECT table_name FROM user WHERE id = 143 GROUP BY 1) AS tables ON tables.table_name = c.table_name", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT c.column_name FROM user c JOIN (SELECT table_name FROM user WHERE id = 143 GROUP BY 1) AS tables ON tables.table_name = c.table_name", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "tables_table_name": 0 + }, + "TableName": "`user`_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `tables`.table_name from (select table_name from `user` where 1 != 1 group by table_name) as `tables` where 1 != 1", + "Query": "select `tables`.table_name from (select table_name from `user` where id = 143 group by table_name) as `tables`", + "Table": "`user`", + "Values": [ + "143" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select c.column_name from `user` as c where 1 != 1", + "Query": "select c.column_name from `user` as c where c.table_name = :tables_table_name", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Group by aggregated column should not be a problem", + "query": "SELECT b.col FROM music AS b JOIN (SELECT MIN(bb.id) AS min_id, MAX(bb.id) AS max_id FROM user AS bb) AS foobars WHERE b.id > foobars.min_id GROUP BY b.col", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT b.col FROM music AS b JOIN (SELECT MIN(bb.id) AS min_id, MAX(bb.id) AS max_id FROM user AS bb) AS foobars WHERE b.id > foobars.min_id GROUP BY b.col", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "(0|1)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|1) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1", + "JoinVars": { + "foobars_min_id": 0 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "GroupBy": "0 COLLATE utf8mb4_0900_ai_ci", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "min(0|2) AS min_id, max(1|2) AS max_id", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select min(bb.id) as min_id, max(bb.id) as max_id, weight_string(bb.id) from `user` as bb where 1 != 1 group by weight_string(bb.id)", + "OrderBy": "0 ASC COLLATE utf8mb4_0900_ai_ci", + "Query": "select min(bb.id) as min_id, max(bb.id) as max_id, weight_string(bb.id) from `user` as bb group by weight_string(bb.id) order by min(bb.id) asc", + "Table": "`user`" + } + ] + } + ] + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select b.col, weight_string(b.col) from music as b where 1 != 1 group by b.col, weight_string(b.col)", + "Query": "select b.col, weight_string(b.col) from music as b where b.id > :foobars_min_id group by b.col, weight_string(b.col)", + "Table": "music" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "Group by aliases on both sides of a join", + "query": "select count(*), cast(user.foo as datetime) as f1, cast(music.foo as datetime) as f2 from user join music group by f1, f2", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*), cast(user.foo as datetime) as f1, cast(music.foo as datetime) as f2 from user join music group by f1, f2", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "(1|3), (2|4)", + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|3) ASC, (2|4) ASC", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "count(*) * count(*) as count(*)", + ":2 as f1", + ":3 as f2", + ":4 as weight_string(cast(`user`.foo as datetime))", + ":5 as weight_string(cast(music.foo as datetime))" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1,L:2,R:2", + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), cast(`user`.foo as datetime) as f1, weight_string(cast(`user`.foo as datetime)) from `user` where 1 != 1 group by cast(`user`.foo as datetime), weight_string(cast(`user`.foo as datetime))", + "Query": "select count(*), cast(`user`.foo as datetime) as f1, weight_string(cast(`user`.foo as datetime)) from `user` group by cast(`user`.foo as datetime), weight_string(cast(`user`.foo as datetime))", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), cast(music.foo as datetime) as f2, weight_string(cast(music.foo as datetime)) from music where 1 != 1 group by cast(music.foo as datetime), weight_string(cast(music.foo as datetime))", + "Query": "select count(*), cast(music.foo as datetime) as f2, weight_string(cast(music.foo as datetime)) from music group by cast(music.foo as datetime), weight_string(cast(music.foo as datetime))", + "Table": "music" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "count(*) push down through left hash join", + "query": "select count(*) from user left join (select col, bar from user_extra limit 10) ue on user.col = ue.col group by user.foo, ue.bar", + "plan": { + "QueryType": "SELECT", + "Original": "select count(*) from user left join (select col, bar from user_extra limit 10) ue on user.col = ue.col group by user.foo, ue.bar", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum_count_star(0) AS count(*)", + "GroupBy": "(1|2), (3|4)", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "count(*) * coalesce(count(*), 1) as count(*)", + ":4 as foo", + ":6 as weight_string(`user`.foo)", + ":5 as bar", + ":7 as weight_string(ue.bar)" + ], + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(4|6) ASC, (5|7) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "HashLeftJoin", + "Collation": "binary", + "ComparisonType": "INT16", + "JoinColumnIndexes": "-1,1,-2,2,-3,3", + "Predicate": "`user`.col = ue.col", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*), `user`.col, `user`.foo from `user` where 1 != 1 group by `user`.col, `user`.foo", + "Query": "select count(*), `user`.col, `user`.foo from `user` group by `user`.col, `user`.foo", + "Table": "`user`" + }, + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "count_star(0)", + "GroupBy": "1, (2|3)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": "2,0,1,3", + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "0 ASC, (1|3) ASC", + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra where 1 != 1) as ue where 1 != 1", + "Query": "select ue.col, ue.bar, 1, weight_string(ue.bar) from (select col, bar from user_extra) as ue limit 10", + "Table": "user_extra" + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "sharded subquery inside aggregation function on a dual table", + "query": "select max((select min(col) from user where id = 1))", + "plan": { + "QueryType": "SELECT", + "Original": "select max((select min(col) from user where id = 1))", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max((select min(col) from `user` where 1 != 1)) from dual where 1 != 1", + "Query": "select max((select min(col) from `user` where id = 1)) from dual", + "Table": "dual", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "main.dual", + "user.user" + ] + } + }, + { + "comment": "unsharded subquery inside aggregation function on a sharded table", + "query": "select max((select min(col) from unsharded)) from user where id = 1", + "plan": { + "QueryType": "SELECT", + "Original": "select max((select min(col) from unsharded)) from user where id = 1", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0|1) AS max((select min(col) from unsharded))", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select min(col) from unsharded where 1 != 1", + "Query": "select min(col) from unsharded", + "Table": "unsharded" + }, + { + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max(:__sq1), weight_string(:__sq1) from `user` where 1 != 1 group by weight_string(:__sq1)", + "Query": "select max(:__sq1), weight_string(:__sq1) from `user` where id = 1 group by weight_string(:__sq1)", + "Table": "`user`", + "Values": [ + "1" + ], + "Vindex": "user_index" + } + ] + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "sharded subquery inside aggregation function on a sharded table on different vindex value", + "query": "select max((select min(col) from user where id = 1)) from user where id = 2", + "plan": { + "QueryType": "SELECT", + "Original": "select max((select min(col) from user where id = 1)) from user where id = 2", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0|1) AS max((select min(col) from `user` where id = 1))", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select min(col) from `user` where 1 != 1", + "Query": "select min(col) from `user` where id = 1", + "Table": "`user`", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + { + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max(:__sq1), weight_string(:__sq1) from `user` where 1 != 1 group by weight_string(:__sq1)", + "Query": "select max(:__sq1), weight_string(:__sq1) from `user` where id = 2 group by weight_string(:__sq1)", + "Table": "`user`", + "Values": [ + "2" + ], + "Vindex": "user_index" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "sharded subquery inside group_concat multi-column aggregation function on a dual table", + "query": "select max((select group_concat(col1, col2) from user where id = 1))", + "plan": { + "QueryType": "SELECT", + "Original": "select max((select group_concat(col1, col2) from user where id = 1))", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max((select group_concat(col1, col2) from `user` where 1 != 1)) from dual where 1 != 1", + "Query": "select max((select group_concat(col1, col2) from `user` where id = 1)) from dual", + "Table": "dual", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "main.dual", + "user.user" + ] + } + }, + { + "comment": "sharded subquery inside group_concat multi-column aggregation function on a sharded table on same vindex value", + "query": "select max((select group_concat(col1, col2) from user where id = 1)) from user where id = 1", + "plan": { + "QueryType": "SELECT", + "Original": "select max((select group_concat(col1, col2) from user where id = 1)) from user where id = 1", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max((select group_concat(col1, col2) from `user` where 1 != 1)) from `user` where 1 != 1", + "Query": "select max((select group_concat(col1, col2) from `user` where id = 1)) from `user` where id = 1", + "Table": "`user`", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "sharded subquery inside group_concat multi-column aggregation function on a sharded table", + "query": "select max((select group_concat(col1, col2) from user where id = 1)) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select max((select group_concat(col1, col2) from user where id = 1)) from user", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0|1) AS max((select group_concat(col1, col2) from `user` where id = 1))", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select group_concat(col1, col2) from `user` where 1 != 1", + "Query": "select group_concat(col1, col2) from `user` where id = 1", + "Table": "`user`", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + { + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max(:__sq1), weight_string(:__sq1) from `user` where 1 != 1 group by weight_string(:__sq1)", + "Query": "select max(:__sq1), weight_string(:__sq1) from `user` group by weight_string(:__sq1)", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "sharded correlated subquery inside aggregation function on a sharded table on same vindex", + "query": "select max((select max(col2) from user u1 where u1.id = u2.id)) from user u2", + "plan": { + "QueryType": "SELECT", + "Original": "select max((select max(col2) from user u1 where u1.id = u2.id)) from user u2", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "max(0|1) AS max((select max(col2) from `user` as u1 where u1.id = u2.id))", + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select max((select max(col2) from `user` as u1 where 1 != 1)), weight_string((select max(col2) from `user` as u1 where 1 != 1)) from `user` as u2 where 1 != 1 group by weight_string((select max(col2) from `user` as u1 where 1 != 1))", + "Query": "select max((select max(col2) from `user` as u1 where u1.id = u2.id)), weight_string((select max(col2) from `user` as u1 where u1.id = u2.id)) from `user` as u2 group by weight_string((select max(col2) from `user` as u1 where u1.id = u2.id))", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Multi-value aggregates pushed as function without splitting", + "query": "select count(a,b) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select count(a,b) from user", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count(0) AS count(a, b)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(a, b) from `user` where 1 != 1", + "Query": "select count(a, b) from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "group_concat with multi column - pushed without splitting", + "query": "select group_concat(col1, col2) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select group_concat(col1, col2) from user", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "group_concat(0) AS group_concat(col1, col2)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select group_concat(col1, col2) from `user` where 1 != 1", + "Query": "select group_concat(col1, col2) from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "select count(distinct name, id) from user", + "query": "select count(distinct name, id) from user", + "plan": { + "QueryType": "SELECT", + "Original": "select count(distinct name, id) from user", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_distinct(0) AS count(distinct `name`, id)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(distinct `name`, id) from `user` where 1 != 1", + "Query": "select count(distinct `name`, id) from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "valid but slightly confusing query should work - col in the order by should not get expanded to the column alias col", + "query": "select id, from_unixtime(min(col)) as col from user group by id order by min(col)", + "plan": { + "QueryType": "SELECT", + "Original": "select id, from_unixtime(min(col)) as col from user group by id order by min(col)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, from_unixtime(min(col)) as col, min(col) from `user` where 1 != 1 group by id", + "OrderBy": "2 ASC COLLATE utf8mb4_0900_ai_ci", + "Query": "select id, from_unixtime(min(col)) as col, min(col) from `user` group by id order by min(col) asc", + "ResultColumns": 2, + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "col is a column on user, but the HAVING is referring to an alias", + "query": "select sum(x) col from user where x > 0 having col = 2", + "plan": { + "QueryType": "SELECT", + "Original": "select sum(x) col from user where x > 0 having col = 2", + "Instructions": { + "OperatorType": "Filter", + "Predicate": "sum(`user`.x) = 2", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum(0) AS col", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(x) as col from `user` where 1 != 1", + "Query": "select sum(x) as col from `user` where x > 0", + "Table": "`user`" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "baz in the HAVING clause can't be accessed because of the GROUP BY", + "query": "select foo, count(bar) as x from user group by foo having baz > avg(baz) order by x", + "plan": "Unknown column 'baz' in 'having clause'" + }, + { + "comment": "Aggregate UDFs can't be handled by vtgate", + "query": "select id from t1 group by id having udf_aggr(foo) > 1 and sum(foo) = 10", + "plan": "VT12001: unsupported: Aggregate UDF 'udf_aggr(foo)' must be pushed down to MySQL" + }, + { + "comment": "Valid to run since we can push down the aggregate function because of the grouping", + "query": "select id from user group by id having udf_aggr(foo) > 1", + "plan": { + "QueryType": "SELECT", + "Original": "select id from user group by id having udf_aggr(foo) > 1", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from `user` where 1 != 1 group by id", + "Query": "select id from `user` group by id having udf_aggr(foo) > 1", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Valid to run since we can push down the aggregate function because it's unsharded", + "query": "select bar, udf_aggr(foo) from unsharded group by bar", + "plan": { + "QueryType": "SELECT", + "Original": "select bar, udf_aggr(foo) from unsharded group by bar", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select bar, udf_aggr(foo) from unsharded where 1 != 1 group by bar", + "Query": "select bar, udf_aggr(foo) from unsharded group by bar", + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } + }, + { + "comment": "Valid to run since we can push down the aggregate function because the where clause using the sharding key", + "query": "select bar, udf_aggr(foo) from user where id = 17 group by bar", + "plan": { + "QueryType": "SELECT", + "Original": "select bar, udf_aggr(foo) from user where id = 17 group by bar", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select bar, udf_aggr(foo) from `user` where 1 != 1 group by bar", + "Query": "select bar, udf_aggr(foo) from `user` where id = 17 group by bar", + "Table": "`user`", + "Values": [ + "17" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json index 6f2be325b6b..02a00444724 100644 --- a/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/bypass_shard_cases.json @@ -123,6 +123,7 @@ } }, { + "comment": "load data from s3 'x.txt' into table x", "query": "load data from s3 'x.txt' into table x", "plan": { "QueryType": "OTHER", @@ -141,6 +142,7 @@ } }, { + "comment": "load data from s3 'x.txt'", "query": "load data from s3 'x.txt'", "plan": { "QueryType": "OTHER", @@ -174,5 +176,80 @@ "Query": "create /* test */ table t1(id bigint, primary key(id)) /* comments */" } } + }, + { + "comment": "select bypass with query timeout hint", + "query": "select /*vt+ QUERY_TIMEOUT_MS=100 */ count(*), col from user", + "plan": { + "QueryType": "SELECT", + "Original": "select /*vt+ QUERY_TIMEOUT_MS=100 */ count(*), col from user", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "Shard(-80)", + "Query": "select /*vt+ QUERY_TIMEOUT_MS=100 */ count(*), col from `user`", + "QueryTimeout": 100 + } + } + }, + { + "comment": "update bypass with query timeout hint", + "query": "update /*vt+ QUERY_TIMEOUT_MS=100 */ user set val = 1 where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update /*vt+ QUERY_TIMEOUT_MS=100 */ user set val = 1 where id = 1", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "Shard(-80)", + "IsDML": true, + "Query": "update /*vt+ QUERY_TIMEOUT_MS=100 */ `user` set val = 1 where id = 1", + "QueryTimeout": 100 + } + } + }, + { + "comment": "delete bypass with query timeout hint", + "query": "DELETE /*vt+ QUERY_TIMEOUT_MS=100 */ FROM USER WHERE ID = 42", + "plan": { + "QueryType": "DELETE", + "Original": "DELETE /*vt+ QUERY_TIMEOUT_MS=100 */ FROM USER WHERE ID = 42", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "Shard(-80)", + "IsDML": true, + "Query": "delete /*vt+ QUERY_TIMEOUT_MS=100 */ from `USER` where ID = 42", + "QueryTimeout": 100 + } + } + }, + { + "comment": "insert bypass with query timeout hint", + "query": "INSERT /*vt+ QUERY_TIMEOUT_MS=100 */ INTO USER (ID, NAME) VALUES (42, 'ms X')", + "plan": { + "QueryType": "INSERT", + "Original": "INSERT /*vt+ QUERY_TIMEOUT_MS=100 */ INTO USER (ID, NAME) VALUES (42, 'ms X')", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "Shard(-80)", + "IsDML": true, + "Query": "insert /*vt+ QUERY_TIMEOUT_MS=100 */ into `USER`(ID, `NAME`) values (42, 'ms X')", + "QueryTimeout": 100 + } + } } -] \ No newline at end of file +] diff --git a/go/vt/vtgate/planbuilder/testdata/cte_cases.json b/go/vt/vtgate/planbuilder/testdata/cte_cases.json index e41aa27ce1b..8181c13cd0b 100644 --- a/go/vt/vtgate/planbuilder/testdata/cte_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/cte_cases.json @@ -143,8 +143,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select sum(col), 32 from (select `user`.col as col, 32 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select sum(col), 32 from (select `user`.col as col, 32 from `user`) as t", + "FieldQuery": "select sum(col) from (select `user`.col as col, 32 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select sum(col) from (select `user`.col as col, 32 from `user`) as t", "Table": "`user`" }, { @@ -183,9 +183,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 2 - ], + "Columns": "2", "Inputs": [ { "OperatorType": "Limit", @@ -198,8 +196,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select phone, id, city from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", - "Query": "select phone, id, city from (select phone, id, city from `user` where id > 12) as x limit :__upper_limit", + "FieldQuery": "select x.phone, x.id, x.city from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", + "Query": "select x.phone, x.id, x.city from (select phone, id, city from `user` where id > 12) as x limit 10", "Table": "`user`" } ] @@ -226,9 +224,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 3 - ], + "Columns": "3", "Inputs": [ { "OperatorType": "Limit", @@ -241,8 +237,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select phone, id, city, 1 from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", - "Query": "select phone, id, city, 1 from (select phone, id, city from `user` where id > 12) as x limit :__upper_limit", + "FieldQuery": "select x.phone, x.id, x.city, 1 from (select phone, id, city from `user` where 1 != 1) as x where 1 != 1", + "Query": "select x.phone, x.id, x.city, 1 from (select phone, id, city from `user` where id > 12) as x limit 10", "Table": "`user`" } ] @@ -281,26 +277,38 @@ "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id from `user` where 1 != 1", - "Query": "select `user`.id from `user`", - "Table": "`user`" + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select x.`user.id` from (select `user`.id as `user.id` from `user` where 1 != 1) as x where 1 != 1", + "Query": "select x.`user.id` from (select `user`.id as `user.id` from `user`) as x limit 10", + "Table": "`user`" + } + ] }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select user_extra.col from user_extra where user_extra.id = :user_id", - "Table": "user_extra" + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select x.col from (select user_extra.col as col from user_extra where 1 != 1) as x where 1 != 1", + "Query": "select x.col from (select user_extra.col as col from user_extra where user_extra.id = :user_id) as x limit 10", + "Table": "user_extra" + } + ] } ] } @@ -329,11 +337,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 2, - 3 - ], + "Columns": "1,2,3", "Inputs": [ { "OperatorType": "Limit", @@ -346,9 +350,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, val1, 1, weight_string(val1) from (select id, val1 from `user` where 1 != 1) as x where 1 != 1", + "FieldQuery": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where 1 != 1) as x where 1 != 1", "OrderBy": "(1|3) ASC", - "Query": "select id, val1, 1, weight_string(val1) from (select id, val1 from `user` where val2 < 4) as x order by val1 asc limit :__upper_limit", + "Query": "select x.id, x.val1, 1, weight_string(x.val1) from (select id, val1 from `user` where val2 < 4) as x order by `user`.val1 asc limit 2", "Table": "`user`" } ] @@ -562,11 +566,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 0, - 2 - ], + "Columns": "1,0,2", "Inputs": [ { "OperatorType": "Aggregate", @@ -675,10 +675,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 1 - ], + "Columns": "1,1", "Inputs": [ { "OperatorType": "Limit", @@ -691,8 +688,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, val2 from (select id, val2 from `user` where 1 != 1) as x where 1 != 1", - "Query": "select id, val2 from (select id, val2 from `user` where val2 is null) as x limit :__upper_limit", + "FieldQuery": "select x.id, x.val2 from (select id, val2 from `user` where 1 != 1) as x where 1 != 1", + "Query": "select x.id, x.val2 from (select id, val2 from `user` where val2 is null) as x limit 2", "Table": "`user`" } ] @@ -719,9 +716,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1 - ], + "Columns": "1", "Inputs": [ { "OperatorType": "Aggregate", @@ -1053,7 +1048,7 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", + "JoinColumnIndexes": "L:1,L:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1063,8 +1058,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.col1, t.id from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.col1, t.id from (select `user`.id, `user`.col1 from `user`) as t", + "FieldQuery": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user`) as t", "Table": "`user`" }, { @@ -1111,7 +1106,7 @@ "Variant": "Join", "JoinColumnIndexes": "L:0", "JoinVars": { - "user_col": 1 + "user_col": 2 }, "TableName": "`user`_user_extra", "Inputs": [ @@ -1122,8 +1117,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.id, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.id, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user`) as t", + "FieldQuery": "select t.id, t.col1, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col1, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user`) as t", "Table": "`user`" }, { @@ -1171,7 +1166,7 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0", + "JoinColumnIndexes": "L:1", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1181,8 +1176,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.col1 from (select `user`.id, `user`.col1 from `user`) as t", + "FieldQuery": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user`) as t", "Table": "`user`" }, { @@ -1236,7 +1231,7 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0", + "JoinColumnIndexes": "L:1", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1246,8 +1241,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.col1 from (select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id) as t", + "FieldQuery": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id) as t", "Table": "`user`", "Values": [ ":ua_id" @@ -1283,41 +1278,32 @@ "QueryType": "SELECT", "Original": "with t as (select user.id from user join user_extra) select id, t.id from t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 0 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id from (select `user`.id from `user`) as t", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id from (select `user`.id from `user`) as t", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -1334,20 +1320,28 @@ "QueryType": "SELECT", "Original": "with t as (select count(*) as a from user) select a as k from t", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count_star(0) AS a", + "OperatorType": "SimpleProjection", + "ColumnNames": [ + "0:k" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", - "Table": "`user`" + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS a", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) as a from `user` where 1 != 1", + "Query": "select count(*) as a from `user`", + "Table": "`user`" + } + ] } ] }, @@ -1369,8 +1363,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select u.* from (select * from unsharded where 1 != 1) as u where 1 != 1", - "Query": "select u.* from (select * from unsharded) as u", + "FieldQuery": "with u as (select * from unsharded where 1 != 1) select u.* from u where 1 != 1", + "Query": "with u as (select * from unsharded) select u.* from u", "Table": "unsharded" }, "TablesUsed": [ @@ -1397,8 +1391,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id from (select `user`.id, `user`.col from `user` where `user`.id = 5) as t", + "FieldQuery": "select t.id, t.col from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col from (select `user`.id, `user`.col from `user` where `user`.id = 5) as t", "Table": "`user`", "Values": [ "5" @@ -1709,8 +1703,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select col from (select col from unsharded join unsharded_b where 1 != 1) as u join unsharded_a as ua where 1 != 1", - "Query": "select col from (select col from unsharded join unsharded_b) as u join unsharded_a as ua limit 1", + "FieldQuery": "with u as (select col from unsharded join unsharded_b where 1 != 1) select col from u join unsharded_a as ua where 1 != 1", + "Query": "with u as (select col from unsharded join unsharded_b) select col from u join unsharded_a as ua limit 1", "Table": "unsharded, unsharded_a, unsharded_b" }, "TablesUsed": [ @@ -1767,15 +1761,21 @@ ] }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra as ue where 1 != 1", - "Query": "select 1 from user_extra as ue", - "Table": "user_extra" + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra as ue where 1 != 1", + "Query": "select 1 from user_extra as ue limit 1", + "Table": "user_extra" + } + ] } ] } @@ -1830,8 +1830,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, foo, weight_string(id), weight_string(foo) from (select id, foo from (select id, foo from `user` where 1 != 1) as x where 1 != 1 union select id, foo from (select id, foo from `user` where 1 != 1) as x where 1 != 1) as dt where 1 != 1", - "Query": "select id, foo, weight_string(id), weight_string(foo) from (select id, foo from (select id, foo from `user`) as x union select id, foo from (select id, foo from `user`) as x) as dt", + "FieldQuery": "select dt.c0 as id, dt.c1 as foo, weight_string(dt.c0), weight_string(dt.c1) from (select id, foo from (select id, foo from `user` where 1 != 1) as x where 1 != 1 union select id, foo from (select id, foo from `user` where 1 != 1) as x where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as id, dt.c1 as foo, weight_string(dt.c0), weight_string(dt.c1) from (select id, foo from (select id, foo from `user`) as x union select id, foo from (select id, foo from `user`) as x) as dt(c0, c1)", "Table": "`user`" } ] @@ -1840,5 +1840,255 @@ "user.user" ] } + }, + { + "comment": "recursive WITH against an unsharded database", + "query": "WITH RECURSIVE cte (n) AS ( SELECT 1 UNION ALL SELECT n + 1 FROM cte WHERE n < 5 ) SELECT cte.n FROM unsharded join cte on unsharded.id = cte.n ", + "plan": { + "QueryType": "SELECT", + "Original": "WITH RECURSIVE cte (n) AS ( SELECT 1 UNION ALL SELECT n + 1 FROM cte WHERE n < 5 ) SELECT cte.n FROM unsharded join cte on unsharded.id = cte.n ", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "with recursive cte(n) as (select 1 from dual where 1 != 1 union all select n + 1 from cte where 1 != 1) select cte.n from unsharded join cte on unsharded.id = cte.n where 1 != 1", + "Query": "with recursive cte(n) as (select 1 from dual union all select n + 1 from cte where n < 5) select cte.n from unsharded join cte on unsharded.id = cte.n", + "Table": "dual, unsharded" + }, + "TablesUsed": [ + "main.dual", + "main.unsharded" + ] + } + }, + { + "comment": "WITH two common expressions against an unsharded datbase and a SELECT UNION against those expressions", + "query": "WITH `count_a` AS (SELECT COUNT(`id`) AS `num` FROM `unsharded_a`), `count_b` AS (SELECT COUNT(`id`) AS `num` FROM `unsharded_b`) SELECT 'count_a' AS `tab`, `num` FROM `count_a` UNION SELECT 'count_b' AS `tab`, `num` FROM `count_b`", + "plan": { + "QueryType": "SELECT", + "Original": "WITH `count_a` AS (SELECT COUNT(`id`) AS `num` FROM `unsharded_a`), `count_b` AS (SELECT COUNT(`id`) AS `num` FROM `unsharded_b`) SELECT 'count_a' AS `tab`, `num` FROM `count_a` UNION SELECT 'count_b' AS `tab`, `num` FROM `count_b`", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select 'count_a' as tab, num from count_a where 1 != 1 union select 'count_b' as tab, num from count_b where 1 != 1", + "Query": "with count_a as (select count(id) as num from unsharded_a) , count_b as (select count(id) as num from unsharded_b) select 'count_a' as tab, num from count_a union select 'count_b' as tab, num from count_b", + "Table": "unsharded_a, unsharded_b" + }, + "TablesUsed": [ + "main.unsharded_a", + "main.unsharded_b" + ] + } + }, + { + "comment": "WITH two common expressions against a sharded datbase and a SELECT UNION against those expressions", + "query": "WITH `count_a` AS (SELECT COUNT(`user_id`) AS `num` FROM `user_metadata`), `count_b` AS (SELECT COUNT(`user_id`) AS `num` FROM `user_extra`) SELECT 'count_a' AS `tab`, `num` FROM `count_a` UNION SELECT 'count_b' AS `tab`, `num` FROM `count_b`", + "plan": { + "QueryType": "SELECT", + "Original": "WITH `count_a` AS (SELECT COUNT(`user_id`) AS `num` FROM `user_metadata`), `count_b` AS (SELECT COUNT(`user_id`) AS `num` FROM `user_extra`) SELECT 'count_a' AS `tab`, `num` FROM `count_a` UNION SELECT 'count_b' AS `tab`, `num` FROM `count_b`", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "0: utf8mb4_0900_ai_ci", + "1" + ], + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "'count_a' as tab", + ":0 as num" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count(0) AS num", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(user_id) as num from user_metadata where 1 != 1", + "Query": "select count(user_id) as num from user_metadata", + "Table": "user_metadata" + } + ] + } + ] + }, + { + "OperatorType": "Projection", + "Expressions": [ + "'count_b' as tab", + ":0 as num" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count(0) AS num", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(user_id) as num from user_extra where 1 != 1", + "Query": "select count(user_id) as num from user_extra", + "Table": "user_extra" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user_extra", + "user.user_metadata" + ] + } + }, + { + "comment": "CTE expression using unions and complex aggregation with literal", + "query": "WITH `open` AS (SELECT COUNT(*) as `num` FROM (SELECT `user`.`id` FROM `user` WHERE `user`.`textcol1` = 'open' AND `user`.`intcol` = 1 LIMIT 1000) `t` LIMIT 1 ), `closed` AS (SELECT COUNT(*) as `num` FROM ( SELECT `user`.`id` FROM `user` WHERE `user`.`textcol1` = 'closed' AND `user`.`intcol` = 1 LIMIT 1000) `t` LIMIT 1 ), `all` AS (SELECT LEAST(1000, SUM(`num`)) AS `num` FROM ( SELECT `num` FROM `open` UNION ALL SELECT `num` FROM `closed` ) `t` LIMIT 1 )SELECT 'all' AS `tab`, `num`FROM `all`", + "plan": { + "QueryType": "SELECT", + "Original": "WITH `open` AS (SELECT COUNT(*) as `num` FROM (SELECT `user`.`id` FROM `user` WHERE `user`.`textcol1` = 'open' AND `user`.`intcol` = 1 LIMIT 1000) `t` LIMIT 1 ), `closed` AS (SELECT COUNT(*) as `num` FROM ( SELECT `user`.`id` FROM `user` WHERE `user`.`textcol1` = 'closed' AND `user`.`intcol` = 1 LIMIT 1000) `t` LIMIT 1 ), `all` AS (SELECT LEAST(1000, SUM(`num`)) AS `num` FROM ( SELECT `num` FROM `open` UNION ALL SELECT `num` FROM `closed` ) `t` LIMIT 1 )SELECT 'all' AS `tab`, `num`FROM `all`", + "Instructions": { + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "'all' as tab", + ":0 as num" + ], + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "least(1000, sum(num)) as num" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "any_value(0), sum(1) AS sum(num)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": "1,0", + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "count_star(0) AS num, any_value(1)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": "1,2", + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "1000", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.id, 1, 1000 from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, 1, 1000 from (select `user`.id from `user` where `user`.textcol1 = 'open' and `user`.intcol = 1) as t limit :__upper_limit", + "Table": "`user`" + } + ] + } + ] + } + ] + } + ] + }, + { + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "count_star(0) AS num, any_value(1)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": "1,2", + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "1000", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.id, 1, 1000 from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, 1, 1000 from (select `user`.id from `user` where `user`.textcol1 = 'closed' and `user`.intcol = 1) as t limit :__upper_limit", + "Table": "`user`" + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json index e31cc3e29e1..1ae7578854b 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases.json @@ -241,7 +241,7 @@ "Name": "main", "Sharded": false }, - "Query": "alter table unknown add key a (id)" + "Query": "alter table `unknown` add key a (id)" }, "TablesUsed": [ "main.unknown" @@ -260,7 +260,7 @@ "Name": "main", "Sharded": false }, - "Query": "create view view_a as select col1, col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a" + "Query": "create view view_a as select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a" }, "TablesUsed": [ "main.view_a" @@ -374,7 +374,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view tmp_view as select user_id, col1, col2 from authoritative" + "Query": "create view tmp_view as select * from authoritative" }, "TablesUsed": [ "user.tmp_view" diff --git a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json index c2e1b1ed94e..b62988b2e38 100644 --- a/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json +++ b/go/vt/vtgate/planbuilder/testdata/ddl_cases_no_default_keyspace.json @@ -125,7 +125,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user_id, col1, col2 from authoritative" + "Query": "create view view_a as select * from authoritative" }, "TablesUsed": [ "user.view_a" @@ -144,7 +144,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a join authoritative as b on a.user_id = b.user_id" + "Query": "create view view_a as select * from authoritative as a join authoritative as b on a.user_id = b.user_id" }, "TablesUsed": [ "user.view_a" @@ -163,7 +163,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user_id, col1, col2 from authoritative as a" + "Query": "create view view_a as select a.* from authoritative as a" }, "TablesUsed": [ "user.view_a" @@ -201,7 +201,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id" + "Query": "create view view_a as select `user`.id, a.*, `user`.col1 from authoritative as a join `user` on a.user_id = `user`.id" }, "TablesUsed": [ "user.view_a" @@ -472,7 +472,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc" + "Query": "create view view_a as select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc" }, "TablesUsed": [ "user.view_a" diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.json b/go/vt/vtgate/planbuilder/testdata/dml_cases.json index eebcf63edf3..9c2ed1920ee 100644 --- a/go/vt/vtgate/planbuilder/testdata/dml_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.json @@ -1207,7 +1207,7 @@ { "comment": "insert with mimatched column list", "query": "insert into user(id) values (1, 2)", - "plan": "VT03006: column count does not match value count at row 1" + "plan": "VT03006: column count does not match value count with the row" }, { "comment": "insert no column list for sharded authoritative table", @@ -2157,7 +2157,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "Query": "update user_extra set val = 1 where user_id in (1, 2)", + "Query": "update user_extra set val = 1 where user_id in ::__vals", "Table": "user_extra", "Values": [ "(1, 2)" @@ -2383,7 +2383,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "Query": "delete from user_extra where user_id in (1, 2)", + "Query": "delete from user_extra where user_id in ::__vals", "Table": "user_extra", "Values": [ "(1, 2)" @@ -2516,7 +2516,7 @@ "KsidLength": 1, "KsidVindex": "user_index", "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = null from `user` where id in (1, 2, 3) for update", - "Query": "update `user` set `name` = null where id in (1, 2, 3)", + "Query": "update `user` set `name` = null where id in ::__vals", "Table": "user", "Values": [ "(1, 2, 3)" @@ -2601,7 +2601,7 @@ "KsidLength": 1, "KsidVindex": "user_index", "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id in (1, 2, 3) for update", - "Query": "delete from `user` where id in (1, 2, 3)", + "Query": "delete from `user` where id in ::__vals", "Table": "user", "Values": [ "(1, 2, 3)" @@ -3101,7 +3101,7 @@ "KsidLength": 1, "KsidVindex": "xxhash", "OwnedVindexQuery": "select c1, c2, c3 from t1 where c2 = 10 and c3 in (20, 21) for update", - "Query": "delete from t1 where c2 = 10 and c3 in (20, 21)", + "Query": "delete from t1 where c2 = 10 and c3 in ::__vals", "Table": "t1", "Values": [ "(20, 21)" @@ -3133,7 +3133,7 @@ "KsidLength": 1, "KsidVindex": "xxhash", "OwnedVindexQuery": "select c1, c2, c3, c2 = 1 from t1 where c2 = 10 and c3 in (20, 21) for update", - "Query": "update t1 set c2 = 1 where c2 = 10 and c3 in (20, 21)", + "Query": "update t1 set c2 = 1 where c2 = 10 and c3 in ::__vals", "Table": "t1", "Values": [ "(20, 21)" @@ -3266,7 +3266,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola = 1", + "Query": "update multicol_tbl set x = 1 where colb in ::__vals1 and cola = 1", "Table": "multicol_tbl", "Values": [ "1", @@ -3293,7 +3293,7 @@ "Sharded": true }, "TargetTabletType": "PRIMARY", - "Query": "update multicol_tbl set x = 1 where colb in (1, 2) and cola in (3, 4)", + "Query": "update multicol_tbl set x = 1 where colb in ::__vals1 and cola in ::__vals0", "Table": "multicol_tbl", "Values": [ "(3, 4)", @@ -3383,7 +3383,7 @@ "KsidLength": 2, "KsidVindex": "multicolIdx", "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola = 1 for update", - "Query": "delete from multicol_tbl where colb in (1, 2) and cola = 1", + "Query": "delete from multicol_tbl where colb in ::__vals1 and cola = 1", "Table": "multicol_tbl", "Values": [ "1", @@ -3413,7 +3413,7 @@ "KsidLength": 2, "KsidVindex": "multicolIdx", "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where colb in (1, 2) and cola in (3, 4) for update", - "Query": "delete from multicol_tbl where colb in (1, 2) and cola in (3, 4)", + "Query": "delete from multicol_tbl where colb in ::__vals1 and cola in ::__vals0", "Table": "multicol_tbl", "Values": [ "(3, 4)", @@ -3563,7 +3563,7 @@ "KsidLength": 2, "KsidVindex": "multicolIdx", "OwnedVindexQuery": "select cola, colb, colc, `name`, `name` = 'bar' from multicol_tbl where cola in (1, 2) for update", - "Query": "update multicol_tbl set `name` = 'bar' where cola in (1, 2)", + "Query": "update multicol_tbl set `name` = 'bar' where cola in ::__vals0", "Table": "multicol_tbl", "Values": [ "(1, 2)" @@ -3676,7 +3676,7 @@ "KsidLength": 2, "KsidVindex": "multicolIdx", "OwnedVindexQuery": "select cola, colb, colc, `name` from multicol_tbl where cola in (1, 2) for update", - "Query": "delete from multicol_tbl where cola in (1, 2)", + "Query": "delete from multicol_tbl where cola in ::__vals0", "Table": "multicol_tbl", "Values": [ "(1, 2)" @@ -3759,17 +3759,17 @@ { "comment": "insert using select with more columns in insert", "query": "insert into music(id, user_id) select 1", - "plan": "VT03006: column count does not match value count at row 1" + "plan": "VT03006: column count does not match value count with the row" }, { "comment": "insert using select with more columns in select", "query": "insert into music(id, user_id) select id, count(user_id), sum(user_id) from user group by id", - "plan": "VT03006: column count does not match value count at row 1" + "plan": "VT03006: column count does not match value count with the row" }, { "comment": "insert using select with more columns in select after accounting for star column", "query": "insert into music(id, user_id) select id, *, 2 from user", - "plan": "VT03006: column count does not match value count at row 1" + "plan": "VT03006: column count does not match value count with the row" }, { "comment": "insert using select with auto-inc column using vitess sequence, sequence column not present", @@ -4084,7 +4084,7 @@ "Original": "insert into unsharded(col) select col from unsharded_tab", "Instructions": { "OperatorType": "Insert", - "Variant": "Unsharded", + "Variant": "Select", "Keyspace": { "Name": "main", "Sharded": false @@ -4119,7 +4119,7 @@ "Original": "insert into unsharded(col) select col from t1", "Instructions": { "OperatorType": "Insert", - "Variant": "Unsharded", + "Variant": "Select", "Keyspace": { "Name": "main", "Sharded": false @@ -4410,8 +4410,8 @@ } }, { - "comment": "explain dml without any directive should fail", - "query": "explain format=vtexplain delete from user", + "comment": "vexplain all dml without any directive should fail", + "query": "vexplain all delete from user", "plan": "VT09008: vexplain queries/all will actually run queries" }, { @@ -4889,5 +4889,2223 @@ "comment": "Unsupported update statement with a replica target destination", "query": "update `user[-]@replica`.user_metadata set id=2", "plan": "VT09002: update statement with a replica target" + }, + { + "comment": "insert row values smaller than number of columns", + "query": "insert into user(one, two, three, four) values (1, 2, 3)", + "plan": "VT03006: column count does not match value count with the row" + }, + { + "comment": "insert row values greater than number of columns", + "query": "insert into user(one, two, three) values (1, 2, 3, 4)", + "plan": "VT03006: column count does not match value count with the row" + }, + { + "comment": "insert on duplicate key update with database qualifier", + "query": "insert into user.music(id, user_id, col) values (1, 2, 3) on duplicate key update user.music.col = 5", + "plan": { + "QueryType": "INSERT", + "Original": "insert into user.music(id, user_id, col) values (1, 2, 3) on duplicate key update user.music.col = 5", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "InsertIgnore": true, + "Query": "insert into music(id, user_id, col) values (:_id_0, :_user_id_0, 3) on duplicate key update music.col = 5", + "TableName": "music", + "VindexValues": { + "music_user_map": "1", + "user_index": "2" + } + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "delete from reference table - query send to source table", + "query": "delete from user.ref_with_source where col = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user.ref_with_source where col = 1", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from source_of_ref where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "delete from reference table - no source", + "query": "delete from user.ref", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user.ref", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Reference", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from ref", + "Table": "ref" + }, + "TablesUsed": [ + "user.ref" + ] + } + }, + { + "comment": "delete by target destination with limit", + "query": "delete from `user[-]`.`user` limit 20", + "plan": { + "QueryType": "DELETE", + "Original": "delete from `user[-]`.`user` limit 20", + "Instructions": { + "OperatorType": "Delete", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` limit 20 for update", + "Query": "delete from `user` limit 20", + "Table": "user" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "delete sharded table with join with reference table", + "query": "delete u from user u join ref_with_source r on u.col = r.col", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from user u join ref_with_source r on u.col = r.col", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select u.Id, u.`Name`, u.Costly from `user` as u, ref_with_source as r where u.col = r.col for update", + "Query": "delete u from `user` as u, ref_with_source as r where u.col = r.col", + "Table": "user" + }, + "TablesUsed": [ + "user.ref_with_source", + "user.user" + ] + } + }, + { + "comment": "delete sharded table with join with another sharded table on vindex column", + "query": "delete u from user u join music m on u.id = m.user_id", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from user u join music m on u.id = m.user_id", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select u.Id, u.`Name`, u.Costly from `user` as u, music as m where u.id = m.user_id for update", + "Query": "delete u from `user` as u, music as m where u.id = m.user_id", + "Table": "user" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "multi delete multi table", + "query": "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'", + "plan": { + "QueryType": "DELETE", + "Original": "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_extra_id": 0 + }, + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.id from user_extra where 1 != 1", + "Query": "select user_extra.id from user_extra", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user` where `user`.`name` = 'foo' and `user`.id = :user_extra_id", + "Table": "`user`", + "Values": [ + ":user_extra_id" + ], + "Vindex": "user_index" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select `user`.Id, `user`.`Name`, `user`.Costly from `user` where `user`.id in ::dml_vals for update", + "Query": "delete from `user` where `user`.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "multi delete multi table with alias", + "query": "delete u from user u join music m on u.col = m.col", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from user u join music m on u.col = m.col", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "JoinVars": { + "u_col": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", + "Query": "select u.id, u.col from `user` as u", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.col = :u_col", + "Table": "music" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select u.Id, u.`Name`, u.Costly from `user` as u where u.id in ::dml_vals for update", + "Query": "delete from `user` as u where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "reverse the join order for delete", + "query": "delete u from music m join user u where u.col = m.col and m.foo = 42", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from music m join user u where u.col = m.col and m.foo = 42", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "m_col": 0 + }, + "TableName": "music_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select m.col from music as m where 1 != 1", + "Query": "select m.col from music as m where m.foo = 42", + "Table": "music" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id from `user` as u where 1 != 1", + "Query": "select u.id from `user` as u where u.col = :m_col", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select u.Id, u.`Name`, u.Costly from `user` as u where u.id in ::dml_vals for update", + "Query": "delete from `user` as u where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "multi table delete with join on vindex column", + "query": "delete u from user u join music m where u.id = m.user_id and m.foo = 42", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from user u join music m where u.id = m.user_id and m.foo = 42", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select u.Id, u.`Name`, u.Costly from `user` as u, music as m where m.foo = 42 and u.id = m.user_id for update", + "Query": "delete u from `user` as u, music as m where m.foo = 42 and u.id = m.user_id", + "Table": "user" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "delete 3 way join with sharding key and primary key same", + "query": "delete u from user u join music m on u.col = m.col join user_extra ue on m.user_id = ue.user_id where ue.foo = 20 and u.col = 30 and m.bar = 40", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from user u join music m on u.col = m.col join user_extra ue on m.user_id = ue.user_id where ue.foo = 20 and u.col = 30 and m.bar = 40", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "JoinVars": { + "u_col": 1 + }, + "TableName": "`user`_music, user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", + "Query": "select u.id, u.col from `user` as u where u.col = 30", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m, user_extra as ue where 1 != 1", + "Query": "select 1 from music as m, user_extra as ue where m.bar = 40 and m.col = :u_col and ue.foo = 20 and m.user_id = ue.user_id", + "Table": "music, user_extra" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select u.Id, u.`Name`, u.Costly from `user` as u where u.id in ::dml_vals for update", + "Query": "delete from `user` as u where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "delete 3 way join with sharding key and primary key different", + "query": "delete m from user u join music m on u.col = m.col join user_extra ue on m.user_id = ue.user_id where ue.foo = 20 and u.col = 30 and m.bar = 40", + "plan": { + "QueryType": "DELETE", + "Original": "delete m from user u join music m on u.col = m.col join user_extra ue on m.user_id = ue.user_id where ue.foo = 20 and u.col = 30 and m.bar = 40", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "u_col": 0 + }, + "TableName": "`user`_music, user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.col from `user` as u where 1 != 1", + "Query": "select u.col from `user` as u where u.col = 30", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select m.id from music as m, user_extra as ue where 1 != 1", + "Query": "select m.id from music as m, user_extra as ue where m.bar = 40 and m.col = :u_col and ue.foo = 20 and m.user_id = ue.user_id", + "Table": "music, user_extra" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select m.user_id, m.id from music as m where m.id in ::dml_vals for update", + "Query": "delete from music as m where m.id in ::dml_vals", + "Table": "music", + "Values": [ + "::dml_vals" + ], + "Vindex": "music_user_map" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "sharded delete with limit clause", + "query": "delete from user limit 10", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user limit 10", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user` limit :__upper_limit", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `user`.id in ::dml_vals for update", + "Query": "delete from `user` where `user`.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "sharded delete with order by and limit clause", + "query": "delete from user order by name, col limit 5", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user order by name, col limit 5", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "5", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id, `name`, weight_string(`name`), col from `user` where 1 != 1", + "OrderBy": "(1|2) ASC, 3 ASC", + "Query": "select `user`.id, `name`, weight_string(`name`), col from `user` order by `name` asc, col asc limit :__upper_limit", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `user`.id in ::dml_vals for update", + "Query": "delete from `user` where `user`.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "update with limit clause", + "query": "update user set val = 1 where (name = 'foo' or id = 1) limit 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update user set val = 1 where (name = 'foo' or id = 1) limit 1", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user` where `name` = 'foo' or id = 1 limit :__upper_limit lock in share mode", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update `user` set val = 1 where `user`.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "update a vindex column with limit", + "query": "update user set name = 'abc' where id > 10 limit 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update user set name = 'abc' where id > 10 limit 1", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user` where id > 10 limit :__upper_limit lock in share mode", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "ChangedVindexValues": [ + "name_user_map:3" + ], + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly, `name` = 'abc' from `user` where `user`.id in ::dml_vals for update", + "Query": "update `user` set `name` = 'abc' where `user`.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "update with multi table join with single target", + "query": "update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id", + "plan": { + "QueryType": "UPDATE", + "Original": "update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "ue_id": 0 + }, + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select ue.id from user_extra as ue where 1 != 1", + "Query": "select ue.id from user_extra as ue lock in share mode", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id from `user` as u where 1 != 1", + "Query": "select u.id from `user` as u where u.id = :ue_id lock in share mode", + "Table": "`user`", + "Values": [ + ":ue_id" + ], + "Vindex": "user_index" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "ChangedVindexValues": [ + "name_user_map:3" + ], + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly, u.`name` = 'foo' from `user` as u where u.id in ::dml_vals for update", + "Query": "update `user` as u set u.`name` = 'foo' where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "update with multi table join with single target modifying lookup vindex", + "query": "update user join user_extra on user.id = user_extra.id set user.name = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update user join user_extra on user.id = user_extra.id set user.name = 'foo'", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_extra_id": 0 + }, + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.id from user_extra where 1 != 1", + "Query": "select user_extra.id from user_extra lock in share mode", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user` where `user`.id = :user_extra_id lock in share mode", + "Table": "`user`", + "Values": [ + ":user_extra_id" + ], + "Vindex": "user_index" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "ChangedVindexValues": [ + "name_user_map:3" + ], + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly, `user`.`name` = 'foo' from `user` where `user`.id in ::dml_vals for update", + "Query": "update `user` set `user`.`name` = 'foo' where `user`.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "update with multi table join with single target having dependent column update", + "query": "update user as u, user_extra as ue set u.col = ue.col where u.id = ue.id", + "plan": { + "QueryType": "UPDATE", + "Original": "update user as u, user_extra as ue set u.col = ue.col where u.id = ue.id", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "BindVars": [ + "0:[ue_col:1]" + ], + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0", + "JoinVars": { + "ue_id": 1 + }, + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select ue.col, ue.id from user_extra as ue where 1 != 1", + "Query": "select ue.col, ue.id from user_extra as ue for update", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id from `user` as u where 1 != 1", + "Query": "select u.id from `user` as u where u.id = :ue_id for update", + "Table": "`user`", + "Values": [ + ":ue_id" + ], + "Vindex": "user_index" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update `user` as u set u.col = :ue_col where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "update with multi table join with single target having multiple dependent column update", + "query": "update user as u, user_extra as ue set u.col = ue.foo + ue.bar + u.baz where u.id = ue.id", + "plan": { + "QueryType": "UPDATE", + "Original": "update user as u, user_extra as ue set u.col = ue.foo + ue.bar + u.baz where u.id = ue.id", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "BindVars": [ + "0:[ue_bar:2 ue_foo:1]" + ], + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,L:1", + "JoinVars": { + "ue_id": 2 + }, + "TableName": "user_extra_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select ue.foo, ue.bar, ue.id from user_extra as ue where 1 != 1", + "Query": "select ue.foo, ue.bar, ue.id from user_extra as ue for update", + "Table": "user_extra" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id from `user` as u where 1 != 1", + "Query": "select u.id from `user` as u where u.id = :ue_id for update", + "Table": "`user`", + "Values": [ + ":ue_id" + ], + "Vindex": "user_index" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update `user` as u set u.col = :ue_foo + :ue_bar + u.baz where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "update with multi table join with multi target having dependent column update", + "query": "update user, user_extra ue set user.name = ue.id + 'foo', ue.bar = user.baz where user.id = ue.id and user.id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update user, user_extra ue set user.name = ue.id + 'foo', ue.bar = user.baz where user.id = ue.id and user.id = 1", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "BindVars": [ + "0:[ue_id:1]", + "1:[user_baz:3]" + ], + "Offset": [ + "0:[0]", + "1:[1 2]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1", + "JoinVars": { + "user_id": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id, `user`.baz from `user` where 1 != 1", + "Query": "select `user`.id, `user`.baz from `user` where `user`.id = 1 for update", + "Table": "`user`", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select ue.id, ue.user_id from user_extra as ue where 1 != 1", + "Query": "select ue.id, ue.user_id from user_extra as ue where ue.id = :user_id for update", + "Table": "user_extra" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "ChangedVindexValues": [ + "name_user_map:3" + ], + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly, `user`.`name` = :ue_id + 'foo' from `user` where `user`.id in ::dml_vals for update", + "Query": "update `user` set `user`.`name` = :ue_id + 'foo' where `user`.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Update", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update user_extra as ue set ue.bar = :user_baz where (ue.id, ue.user_id) in ::dml_vals", + "Table": "user_extra", + "Values": [ + "dml_vals:1" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "update with multi table reference with multi target update on a derived table", + "query": "update ignore (select foo, col, bar from user) u, music m set u.foo = 21, u.bar = 'abc' where u.col = m.col", + "plan": "VT03032: the target table (select foo, col, bar from `user`) as u of the UPDATE is not updatable" + }, + { + "comment": "update with derived table", + "query": "update (select id from user) as u set id = 4", + "plan": "VT03032: the target table (select id from `user`) as u of the UPDATE is not updatable" + }, + { + "comment": "Delete with routed table on music", + "query": "delete from second_user.bar", + "plan": { + "QueryType": "DELETE", + "Original": "delete from second_user.bar", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select user_id, id from music as bar for update", + "Query": "delete from music as bar", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Update with routed table on music", + "query": "update second_user.bar set col = 23", + "plan": { + "QueryType": "UPDATE", + "Original": "update second_user.bar set col = 23", + "Instructions": { + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update music as bar set col = 23", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "Insert with routed table on music", + "query": "insert into second_user.bar(id) values (2)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into second_user.bar(id) values (2)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into music(id, user_id) values (:_id_0, :_user_id_0)", + "TableName": "music", + "VindexValues": { + "music_user_map": "2", + "user_index": "null" + } + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "sharded subquery in sharded delete", + "query": "delete from user where id = (select id from music where user_id = 1)", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user where id = (select id from music where user_id = 1)", + "Instructions": { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from music where 1 != 1", + "Query": "select id from music where user_id = 1", + "Table": "music", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + { + "InputName": "Outer", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where id = :__sq1 for update", + "Query": "delete from `user` where id = :__sq1", + "Table": "user", + "Values": [ + ":__sq1" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "unsharded subquery in sharded delete", + "query": "delete from user where col = (select id from unsharded)", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user where col = (select id from unsharded)", + "Instructions": { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select id from unsharded where 1 != 1", + "Query": "select id from unsharded", + "Table": "unsharded" + }, + { + "InputName": "Outer", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where col = :__sq1 for update", + "Query": "delete from `user` where col = :__sq1", + "Table": "user" + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "sharded subquery in unsharded delete", + "query": "delete from unsharded where col = (select id from user)", + "plan": { + "QueryType": "DELETE", + "Original": "delete from unsharded where col = (select id from user)", + "Instructions": { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" + }, + { + "InputName": "Outer", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from unsharded where col = :__sq1", + "Table": "unsharded" + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "sharded subquery in unsharded subquery in unsharded delete", + "query": "delete from unsharded where col = (select id from unsharded where id = (select id from user))", + "plan": { + "QueryType": "DELETE", + "Original": "delete from unsharded where col = (select id from unsharded where id = (select id from user))", + "Instructions": { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq2" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user`", + "Table": "`user`" + }, + { + "InputName": "Outer", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select id from unsharded where 1 != 1", + "Query": "select id from unsharded where id = :__sq2", + "Table": "unsharded" + } + ] + }, + { + "InputName": "Outer", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from unsharded where col = :__sq1", + "Table": "unsharded" + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "sharded join unsharded subquery in unsharded delete", + "query": "delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)", + "plan": { + "QueryType": "DELETE", + "Original": "delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)", + "Instructions": { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "unsharded_id": 0 + }, + "TableName": "unsharded_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select unsharded.id from unsharded where 1 != 1", + "Query": "select unsharded.id from unsharded", + "Table": "unsharded" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id from `user` where 1 != 1", + "Query": "select id from `user` where `user`.id = :unsharded_id", + "Table": "`user`", + "Values": [ + ":unsharded_id" + ], + "Vindex": "user_index" + } + ] + }, + { + "InputName": "Outer", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from unsharded where col = :__sq1", + "Table": "unsharded" + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "multi target delete on sharded table", + "query": "delete u, m from user u, music m where u.col = m.col and u.foo = m.bar and u.baz = 12 and m.baz = 21", + "plan": { + "QueryType": "DELETE", + "Original": "delete u, m from user u, music m where u.col = m.col and u.foo = m.bar and u.baz = 12 and m.baz = 21", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]", + "1:[1]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "u_col": 1, + "u_foo": 2 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, u.col, u.foo from `user` as u where 1 != 1", + "Query": "select u.id, u.col, u.foo from `user` as u where u.baz = 12 for update", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select m.id from music as m where 1 != 1", + "Query": "select m.id from music as m where m.baz = 21 and m.bar = :u_foo and m.col = :u_col for update", + "Table": "music" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.id in ::dml_vals for update", + "Query": "delete from `user` as u where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select user_id, id from music as m where m.id in ::dml_vals for update", + "Query": "delete from music as m where m.id in ::dml_vals", + "Table": "music", + "Values": [ + "::dml_vals" + ], + "Vindex": "music_user_map" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "delete with multi-table targets", + "query": "delete music,user from music inner join user where music.id = user.id", + "plan": { + "QueryType": "DELETE", + "Original": "delete music,user from music inner join user where music.id = user.id", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]", + "1:[1]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "music_id": 0 + }, + "TableName": "music_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select music.id from music where 1 != 1", + "Query": "select music.id from music for update", + "Table": "music" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user` where `user`.id = :music_id for update", + "Table": "`user`", + "Values": [ + ":music_id" + ], + "Vindex": "user_index" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select user_id, id from music where music.id in ::dml_vals for update", + "Query": "delete from music where music.id in ::dml_vals", + "Table": "music", + "Values": [ + "::dml_vals" + ], + "Vindex": "music_user_map" + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` where `user`.id in ::dml_vals for update", + "Query": "delete from `user` where `user`.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "multi table delete with 2 sharded tables join on vindex column", + "query": "delete u, m from user u join music m on u.id = m.user_id", + "plan": { + "QueryType": "DELETE", + "Original": "delete u, m from user u join music m on u.id = m.user_id", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]", + "1:[1]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, m.id from `user` as u, music as m where 1 != 1", + "Query": "select u.id, m.id from `user` as u, music as m where u.id = m.user_id for update", + "Table": "`user`, music" + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.id in ::dml_vals for update", + "Query": "delete from `user` as u where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select user_id, id from music as m where m.id in ::dml_vals for update", + "Query": "delete from music as m where m.id in ::dml_vals", + "Table": "music", + "Values": [ + "::dml_vals" + ], + "Vindex": "music_user_map" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "multi table delete with 2 sharded tables join on non-vindex column", + "query": "delete u, m from user u join music m on u.col = m.col", + "plan": { + "QueryType": "DELETE", + "Original": "delete u, m from user u join music m on u.col = m.col", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]", + "1:[1]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "u_col": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", + "Query": "select u.id, u.col from `user` as u for update", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select m.id from music as m where 1 != 1", + "Query": "select m.id from music as m where m.col = :u_col for update", + "Table": "music" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.id in ::dml_vals for update", + "Query": "delete from `user` as u where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select user_id, id from music as m where m.id in ::dml_vals for update", + "Query": "delete from music as m where m.id in ::dml_vals", + "Table": "music", + "Values": [ + "::dml_vals" + ], + "Vindex": "music_user_map" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "multi target delete with composite primary key having single column vindex", + "query": "delete u, ue from user u join user_extra ue on u.id = ue.user_id", + "plan": { + "QueryType": "DELETE", + "Original": "delete u, ue from user u join user_extra ue on u.id = ue.user_id", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]", + "1:[1 2]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, ue.id, ue.user_id from `user` as u, user_extra as ue where 1 != 1", + "Query": "select u.id, ue.id, ue.user_id from `user` as u, user_extra as ue where u.id = ue.user_id for update", + "Table": "`user`, user_extra" + }, + { + "OperatorType": "Delete", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u where u.id in ::dml_vals for update", + "Query": "delete from `user` as u where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Delete", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from user_extra as ue where (ue.id, ue.user_id) in ::dml_vals", + "Table": "user_extra", + "Values": [ + "dml_vals:1" + ], + "Vindex": "user_index" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "multi target delete with composite primary key with lookup vindex as sharding column", + "query": "delete o, ev from `order` o join order_event ev where o.oid = ev.oid and ev.ename = 'a'", + "plan": { + "QueryType": "DELETE", + "Original": "delete o, ev from `order` o join order_event ev where o.oid = ev.oid and ev.ename = 'a'", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0 1]", + "1:[2 3]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "ordering", + "Sharded": true + }, + "FieldQuery": "select ev.oid, ev.ename, o.oid, o.region_id from `order` as o, order_event as ev where 1 != 1", + "Query": "select ev.oid, ev.ename, o.oid, o.region_id from `order` as o, order_event as ev where ev.ename = 'a' and o.oid = ev.oid for update", + "Table": "`order`, order_event" + }, + { + "OperatorType": "Delete", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "ordering", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from order_event as ev where (ev.oid, ev.ename) in ::dml_vals", + "Table": "order_event", + "Values": [ + "dml_vals:0" + ], + "Vindex": "oid_vdx" + }, + { + "OperatorType": "Delete", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "ordering", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "xxhash", + "OwnedVindexQuery": "select region_id, oid from `order` as o where (o.oid, o.region_id) in ::dml_vals for update", + "Query": "delete from `order` as o where (o.oid, o.region_id) in ::dml_vals", + "Table": "order", + "Values": [ + "dml_vals:1" + ], + "Vindex": "xxhash" + } + ] + }, + "TablesUsed": [ + "ordering.order", + "ordering.order_event" + ] + } + }, + { + "comment": "update with multi table reference with multi target update", + "query": "update ignore user u, music m set u.foo = 21, m.bar = 'abc' where u.col = m.col", + "plan": { + "QueryType": "UPDATE", + "Original": "update ignore user u, music m set u.foo = 21, m.bar = 'abc' where u.col = m.col", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]", + "1:[1]" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "u_col": 1 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, u.col from `user` as u where 1 != 1", + "Query": "select u.id, u.col from `user` as u for update", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select m.id from music as m where 1 != 1", + "Query": "select m.id from music as m where m.col = :u_col for update", + "Table": "music" + } + ] + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update ignore `user` as u set u.foo = 21 where u.id in ::dml_vals", + "Table": "user", + "Values": [ + "::dml_vals" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Update", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update ignore music as m set m.bar = 'abc' where m.id in ::dml_vals", + "Table": "music", + "Values": [ + "::dml_vals" + ], + "Vindex": "music_user_map" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "RowAlias in INSERT", + "query": "INSERT INTO authoritative (user_id,col1,col2) VALUES (1,'2',3),(4,'5',6) AS new ON DUPLICATE KEY UPDATE col2 = new.user_id+new.col1", + "plan": { + "QueryType": "INSERT", + "Original": "INSERT INTO authoritative (user_id,col1,col2) VALUES (1,'2',3),(4,'5',6) AS new ON DUPLICATE KEY UPDATE col2 = new.user_id+new.col1", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "InsertIgnore": true, + "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, '2', 3), (:_user_id_1, '5', 6) as new on duplicate key update col2 = new.user_id + new.col1", + "TableName": "authoritative", + "VindexValues": { + "user_index": "1, 4" + } + }, + "TablesUsed": [ + "user.authoritative" + ] + } + }, + { + "comment": "RowAlias with explicit columns in INSERT", + "query": "INSERT INTO authoritative (user_id,col1,col2) VALUES (1,'2',3),(4,'5',6) AS new(a,b,c) ON DUPLICATE KEY UPDATE col1 = a+c", + "plan": { + "QueryType": "INSERT", + "Original": "INSERT INTO authoritative (user_id,col1,col2) VALUES (1,'2',3),(4,'5',6) AS new(a,b,c) ON DUPLICATE KEY UPDATE col1 = a+c", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "InsertIgnore": true, + "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, '2', 3), (:_user_id_1, '5', 6) as new (a, b, c) on duplicate key update col1 = a + c", + "TableName": "authoritative", + "VindexValues": { + "user_index": "1, 4" + } + }, + "TablesUsed": [ + "user.authoritative" + ] + } + }, + { + "comment": "RowAlias in INSERT (no column list)", + "query": "INSERT INTO authoritative VALUES (1,'2',3),(4,'5',6) AS new ON DUPLICATE KEY UPDATE col2 = new.user_id+new.col1", + "plan": { + "QueryType": "INSERT", + "Original": "INSERT INTO authoritative VALUES (1,'2',3),(4,'5',6) AS new ON DUPLICATE KEY UPDATE col2 = new.user_id+new.col1", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "InsertIgnore": true, + "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, '2', 3), (:_user_id_1, '5', 6) as new on duplicate key update col2 = new.user_id + new.col1", + "TableName": "authoritative", + "VindexValues": { + "user_index": "1, 4" + } + }, + "TablesUsed": [ + "user.authoritative" + ] + } + }, + { + "comment": "RowAlias with explicit columns in INSERT (no column list)", + "query": "INSERT INTO authoritative VALUES (1,'2',3),(4,'5',6) AS new(a,b,c) ON DUPLICATE KEY UPDATE col1 = a+c", + "plan": { + "QueryType": "INSERT", + "Original": "INSERT INTO authoritative VALUES (1,'2',3),(4,'5',6) AS new(a,b,c) ON DUPLICATE KEY UPDATE col1 = a+c", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "InsertIgnore": true, + "Query": "insert into authoritative(user_id, col1, col2) values (:_user_id_0, '2', 3), (:_user_id_1, '5', 6) as new (a, b, c) on duplicate key update col1 = a + c", + "TableName": "authoritative", + "VindexValues": { + "user_index": "1, 4" + } + }, + "TablesUsed": [ + "user.authoritative" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/filter_cases.json b/go/vt/vtgate/planbuilder/testdata/filter_cases.json index 83e675d89f6..6eae5c603b3 100644 --- a/go/vt/vtgate/planbuilder/testdata/filter_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/filter_cases.json @@ -870,6 +870,29 @@ ] } }, + { + "comment": "Merging subqueries should remove keyspace from query", + "query": "select u.id from user.user as u where not exists (select 1 from user.user_extra as ue where u.id = ue.user_id)", + "plan": { + "QueryType": "SELECT", + "Original": "select u.id from user.user as u where not exists (select 1 from user.user_extra as ue where u.id = ue.user_id)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id from `user` as u where 1 != 1", + "Query": "select u.id from `user` as u where not exists (select 1 from user_extra as ue where u.id = ue.user_id)", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, { "comment": "Single table equality route with unsigned value", "query": "select id from user where name = 18446744073709551615", @@ -1966,7 +1989,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where not :__sq_has_values and id not in ::__sq1", + "Query": "select id from `user` where not :__sq_has_values or id not in ::__sq1", "Table": "`user`" } ] @@ -2503,7 +2526,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where not :__sq_has_values and id not in ::__sq1 and :__sq_has_values1 and id in ::__vals", + "Query": "select id from `user` where (not :__sq_has_values or id not in ::__sq1) and :__sq_has_values1 and id in ::__vals", "Table": "`user`", "Values": [ "::__sq2" @@ -2950,7 +2973,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and not :__sq_has_values and id not in ::__sq1", + "Query": "select id from `user` where id = 5 and id in (select user_extra.col from user_extra where user_extra.user_id = 5) and (not :__sq_has_values or id not in ::__sq1)", "Table": "`user`", "Values": [ "5" @@ -4018,7 +4041,31 @@ "Sharded": true }, "FieldQuery": "select a + 2 as a from `user` where 1 != 1", - "Query": "select a + 2 as a from `user` where a + 2 = 42", + "Query": "select a + 2 as a from `user` where `user`.a + 2 = 42", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Order by aliases are expanded", + "query": "select a+2 as a from user order by a", + "plan": { + "QueryType": "SELECT", + "Original": "select a+2 as a from user order by a", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select a + 2 as a, weight_string(a + 2) from `user` where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select a + 2 as a, weight_string(a + 2) from `user` order by `user`.a + 2 asc", + "ResultColumns": 1, "Table": "`user`" }, "TablesUsed": [ @@ -4110,7 +4157,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where id = 5 and `name` = 'foo' or id = 12 and `name` = 'bar'", + "Query": "select id from `user` where id in ::__vals and (id = 5 or `name` = 'bar') and (`name` = 'foo' or id = 12) and `name` in ('foo', 'bar')", "Table": "`user`", "Values": [ "(5, 12)" @@ -4364,7 +4411,7 @@ }, "FieldQuery": "select 1, ts, weight_string(ts) from `user` where 1 != 1", "OrderBy": "(1|2) ASC", - "Query": "select 1, ts, weight_string(ts) from `user` where shard_key = 1 and is_removed = 1 and cmd in ('A', 'B', 'C') and (not user_id = 1 or not user_id is not null or not ts >= 1 or not ts <= 2) and (not user_id = 1 or not user_id is not null or not ts >= 12 or not ts <= 13) and (not user_id = 1 or not user_id is not null or not ts >= 14 or not ts <= 15) and (not user_id = 1 or not user_id is not null or not ts >= 16 or not ts <= 17) and (not user_id = 1 or not user_id is not null or not ts >= 18 or not ts <= 19) and (not user_id = 1 or not user_id is not null or not ts >= 110 or not ts <= 111) and (not user_id = 1 or not user_id is not null or not ts >= 112 or not ts <= 113) and (not user_id = 1 or not user_id is not null or not ts >= 114 or not ts <= 115) and (not user_id = 1 or not user_id is not null or not ts >= 116 or not ts <= 117) and (not user_id = 1 or not user_id is not null or not ts >= 118 or not ts <= 119) and (not user_id = 1 or not user_id is not null or not ts >= 120 or not ts <= 121) and (not user_id = 1 or not user_id is not null or not ts >= 122 or not ts <= 123) and (not user_id = 1 or not user_id is not null or not ts >= 124 or not ts <= 125) and (not user_id = 1 or not user_id is not null or not ts >= 126 or not ts <= 127) and (not user_id = 1 or not user_id is not null or not ts >= 128 or not ts <= 129) and (not user_id = 1 or not user_id is not null or not ts >= 130 or not ts <= 131) and (not user_id = 1 or not user_id is not null or not ts >= 132 or not ts <= 133) and (not user_id = 1 or not user_id is not null or not ts >= 134 or not ts <= 135) and (not user_id = 1 or not user_id is not null or not ts >= 136 or not ts <= 137) and (not user_id = 1 or not user_id is not null or not ts >= 138 or not ts <= 139) and (not user_id = 1 or not user_id is not null or not ts >= 140 or not ts <= 141) and (not user_id = 1 or not user_id is not null or not ts >= 142 or not ts <= 143) and (not user_id = 1 or not user_id is not null or not ts >= 144 or not ts <= 145) and (not user_id = 1 or not user_id is not null or not ts >= 146 or not ts <= 147) and (not user_id = 1 or not user_id is not null or not ts >= 148 or not ts <= 149) and (not user_id = 1 or not user_id is not null or not ts >= 150 or not ts <= 151) and (not user_id = 1 or not user_id is not null or not ts >= 152 or not ts <= 153) and (not user_id = 1 or not user_id is not null or not ts >= 154 or not ts <= 155) and (not user_id = 1 or not user_id is not null or not ts >= 156 or not ts <= 157) and (not user_id = 1 or not user_id is not null or not ts >= 158 or not ts <= 159) and (not user_id = 1 or not user_id is not null or not ts >= 160 or not ts <= 161) and (not user_id = 1 or not user_id is not null or not ts >= 162 or not ts <= 163) and (not user_id = 1 or not user_id is not null or not ts >= 164 or not ts <= 165) and (not user_id = 1 or not user_id is not null or not ts >= 166 or not ts <= 167) and (not user_id = 1 or not user_id is not null or not ts >= 168 or not ts <= 169) and (not user_id = 1 or not user_id is not null or not ts >= 170 or not ts <= 171) and (not user_id = 1 or not user_id is not null or not ts >= 172 or not ts <= 173) and (not user_id = 1 or not user_id is not null or not ts >= 174 or not ts <= 175) and (not user_id = 1 or not user_id is not null or not ts >= 176 or not ts <= 177) and (not user_id = 1 or not user_id is not null or not ts >= 178 or not ts <= 179) and (not user_id = 1 or not user_id is not null or not ts >= 180 or not ts <= 181) and (not user_id = 1 or not user_id is not null or not ts >= 182 or not ts <= 183) and (not user_id = 1 or not user_id is not null or not ts >= 184 or not ts <= 185) and (not user_id = 1 or not user_id is not null or not ts >= 186 or not ts <= 187) and (not user_id = 1 or not user_id is not null or not ts >= 188 or not ts <= 189) and (not user_id = 1 or not user_id is not null or not ts >= 190 or not ts <= 191) and (not user_id = 1 or not user_id is not null or not ts >= 192 or not ts <= 193) and (not user_id = 1 or not user_id is not null or not ts >= 194 or not ts <= 195) and (not user_id = 1 or not user_id is not null or not ts >= 196 or not ts <= 197) and (not user_id = 1 or not user_id is not null or not ts >= 198 or not ts <= 199) and (not user_id = 1 or not user_id is not null or not ts >= 1100 or not ts <= 1101) and (not user_id = 1 or not user_id is not null or not ts >= 1102 or not ts <= 1103) and (not user_id = 1 or not user_id is not null or not ts >= 1104 or not ts <= 1105) and (not user_id = 1 or not user_id is not null or not ts >= 1106 or not ts <= 1107) and (not user_id = 1 or not user_id is not null or not ts >= 1108 or not ts <= 1109) and (not user_id = 1 or not user_id is not null or not ts >= 1110 or not ts <= 1111) and (not user_id = 1 or not user_id is not null or not ts >= 1112 or not ts <= 1113) and (not user_id = 1 or not user_id is not null or not ts >= 1114 or not ts <= 1115) and (not user_id = 1 or not user_id is not null or not ts >= 1116 or not ts <= 1117) and (not user_id = 1 or not user_id is not null or not ts >= 1118 or not ts <= 1119) and (not user_id = 1 or not user_id is not null or not ts >= 1120 or not ts <= 1121) and (not user_id = 1 or not user_id is not null or not ts >= 1122 or not ts <= 1123) and (not user_id = 1 or not user_id is not null or not ts >= 1124 or not ts <= 1125) and (not user_id = 1 or not user_id is not null or not ts >= 1126 or not ts <= 1127) and (not user_id = 1 or not user_id is not null or not ts >= 1128 or not ts <= 1129) and (not user_id = 1 or not user_id is not null or not ts >= 1130 or not ts <= 1131) and (not user_id = 1 or not user_id is not null or not ts >= 1132 or not ts <= 1133) and (not user_id = 1 or not user_id is not null or not ts >= 1134 or not ts <= 1135) and (not user_id = 1 or not user_id is not null or not ts >= 1136 or not ts <= 1137) and (not user_id = 1 or not user_id is not null or not ts >= 1138 or not ts <= 1139) and (not user_id = 1 or not user_id is not null or not ts >= 1140 or not ts <= 1141) and (not user_id = 1 or not user_id is not null or not ts >= 1142 or not ts <= 1143) and (not user_id = 1 or not user_id is not null or not ts >= 1144 or not ts <= 1145) and (not user_id = 1 or not user_id is not null or not ts >= 1146 or not ts <= 1147) and (not user_id = 1 or not user_id is not null or not ts >= 1148 or not ts <= 1149) and (not user_id = 1 or not user_id is not null or not ts >= 1150 or not ts <= 1151) and (not user_id = 1 or not user_id is not null or not ts >= 1152 or not ts <= 1153) and (not user_id = 1 or not user_id is not null or not ts >= 1154 or not ts <= 1155) and (not user_id = 1 or not user_id is not null or not ts >= 1156 or not ts <= 1157) and (not user_id = 1 or not user_id is not null or not ts >= 1158 or not ts <= 1159) and (not user_id = 1 or not user_id is not null or not ts >= 1160 or not ts <= 1161) and (not user_id = 1 or not user_id is not null or not ts >= 1162 or not ts <= 1163) and (not user_id = 1 or not user_id is not null or not ts >= 1164 or not ts <= 1165) and (not user_id = 1 or not user_id is not null or not ts >= 1166 or not ts <= 1167) and (not user_id = 1 or not user_id is not null or not ts >= 1168 or not ts <= 1169) and (not user_id = 1 or not user_id is not null or not ts >= 1170 or not ts <= 1171) and (not user_id = 1 or not user_id is not null or not ts >= 1172 or not ts <= 1173) and (not user_id = 1 or not user_id is not null or not ts >= 1174 or not ts <= 1175) and (not user_id = 1 or not user_id is not null or not ts >= 1176 or not ts <= 1177) and (not user_id = 1 or not user_id is not null or not ts >= 1178 or not ts <= 1179) and (not user_id = 1 or not user_id is not null or not ts >= 1180 or not ts <= 1181) and (not user_id = 1 or not user_id is not null or not ts >= 1182 or not ts <= 1183) and (not user_id = 1 or not user_id is not null or not ts >= 1184 or not ts <= 1185) and (not user_id = 1 or not user_id is not null or not ts >= 1186 or not ts <= 1187) and (not user_id = 1 or not user_id is not null or not ts >= 1188 or not ts <= 1189) and (not user_id = 1 or not user_id is not null or not ts >= 1190 or not ts <= 1191) and (not user_id = 1 or not user_id is not null or not ts >= 1192 or not ts <= 1193) and (not user_id = 1 or not user_id is not null or not ts >= 1194 or not ts <= 1195) and (not user_id = 1 or not user_id is not null or not ts >= 1196 or not ts <= 1197) and (not user_id = 1 or not user_id is not null or not ts >= 1198 or not ts <= 1199) and (not user_id = 1 or not user_id is not null or not ts >= 1200 or not ts <= 1201) and (not user_id = 1 or not user_id is not null or not ts >= 1202 or not ts <= 1203) and (not user_id = 1 or not user_id is not null or not ts >= 1204 or not ts <= 1205) and (not user_id = 1 or not user_id is not null or not ts >= 1206 or not ts <= 1207) and (not user_id = 1 or not user_id is not null or not ts >= 1208 or not ts <= 1209) and (not user_id = 1 or not user_id is not null or not ts >= 1210 or not ts <= 1211) and (not user_id = 1 or not user_id is not null or not ts >= 1212 or not ts <= 1213) and (not user_id = 1 or not user_id is not null or not ts >= 1214 or not ts <= 1215) and (not user_id = 1 or not user_id is not null or not ts >= 1216 or not ts <= 1217) and (not user_id = 1 or not user_id is not null or not ts >= 1218 or not ts <= 1219) and (not user_id = 1 or not user_id is not null or not ts >= 1220 or not ts <= 1221) and (not user_id = 1 or not user_id is not null or not ts >= 1222 or not ts <= 1223) and (not user_id = 1 or not user_id is not null or not ts >= 1224 or not ts <= 1225) and (not user_id = 1 or not user_id is not null or not ts >= 1226 or not ts <= 1227) and (not user_id = 1 or not user_id is not null or not ts >= 1228 or not ts <= 1229) and (not user_id = 1 or not user_id is not null or not ts >= 1230 or not ts <= 1231) and (not user_id = 1 or not user_id is not null or not ts >= 1232 or not ts <= 1233) and (not user_id = 1 or not user_id is not null or not ts >= 1234 or not ts <= 1235) and (not user_id = 1 or not user_id is not null or not ts >= 1236 or not ts <= 1237) and (not user_id = 1 or not user_id is not null or not ts >= 1238 or not ts <= 1239) and (not user_id = 1 or not user_id is not null or not ts >= 1240 or not ts <= 1241) and (not user_id = 1 or not user_id is not null or not ts >= 1242 or not ts <= 1243) and (not user_id = 1 or not user_id is not null or not ts >= 1244 or not ts <= 1245) and (not user_id = 1 or not user_id is not null or not ts >= 1246 or not ts <= 1247) and (not user_id = 1 or not user_id is not null or not ts >= 1248 or not ts <= 1249) and (not user_id = 1 or not user_id is not null or not ts >= 1250 or not ts <= 1251) and (not user_id = 1 or not user_id is not null or not ts >= 1252 or not ts <= 1253) and (not user_id = 1 or not user_id is not null or not ts >= 1254 or not ts <= 1255) and (not user_id = 1 or not user_id is not null or not ts >= 1256 or not ts <= 1257) and (not user_id = 1 or not user_id is not null or not ts >= 1258 or not ts <= 1259) and (not user_id = 1 or not user_id is not null or not ts >= 1260 or not ts <= 1261) and (not user_id = 1 or not user_id is not null or not ts >= 1262 or not ts <= 1263) and (not user_id = 1 or not user_id is not null or not ts >= 1264 or not ts <= 1265) and (not user_id = 1 or not user_id is not null or not ts >= 1266 or not ts <= 1267) and (not user_id = 1 or not user_id is not null or not ts >= 1268 or not ts <= 1269) and (not user_id = 1 or not user_id is not null or not ts >= 1270 or not ts <= 1271) and (not user_id = 1 or not user_id is not null or not ts >= 1272 or not ts <= 1273) and (not user_id = 1 or not user_id is not null or not ts >= 1274 or not ts <= 1275) and (not user_id = 1 or not user_id is not null or not ts >= 1276 or not ts <= 1277) and (not user_id = 1 or not user_id is not null or not ts >= 1278 or not ts <= 1279) and (not user_id = 1 or not user_id is not null or not ts >= 1280 or not ts <= 1281) and (not user_id = 1 or not user_id is not null or not ts >= 1282 or not ts <= 1283) and (not user_id = 1 or not user_id is not null or not ts >= 1284 or not ts <= 1285) and (not user_id = 1 or not user_id is not null or not ts >= 1286 or not ts <= 1287) and (not user_id = 1 or not user_id is not null or not ts >= 1288 or not ts <= 1289) and (not user_id = 1 or not user_id is not null or not ts >= 1290 or not ts <= 1291) and (not user_id = 1 or not user_id is not null or not ts >= 1292 or not ts <= 1293) and (not user_id = 1 or not user_id is not null or not ts >= 1294 or not ts <= 1295) and (not user_id = 1 or not user_id is not null or not ts >= 1296 or not ts <= 1297) and (not user_id = 1 or not user_id is not null or not ts >= 1298 or not ts <= 1299) and (not user_id = 1 or not user_id is not null or not ts >= 1300 or not ts <= 1301) and (not user_id = 1 or not user_id is not null or not ts >= 1302 or not ts <= 1303) and (not user_id = 1 or not user_id is not null or not ts >= 1304 or not ts <= 1305) and (not user_id = 1 or not user_id is not null or not ts >= 1306 or not ts <= 1307) and (not user_id = 1 or not user_id is not null or not ts >= 1308 or not ts <= 1309) and (not user_id = 1 or not user_id is not null or not ts >= 1310 or not ts <= 1311) and (not user_id = 1 or not user_id is not null or not ts >= 1312 or not ts <= 1313) and (not user_id = 1 or not user_id is not null or not ts >= 1314 or not ts <= 1315) and (not user_id = 1 or not user_id is not null or not ts >= 1316 or not ts <= 1317) and (not user_id = 1 or not user_id is not null or not ts >= 1318 or not ts <= 1319) and (not user_id = 1 or not user_id is not null or not ts >= 1320 or not ts <= 1321) and (not user_id = 1 or not user_id is not null or not ts >= 1322 or not ts <= 1323) and (not user_id = 1 or not user_id is not null or not ts >= 1324 or not ts <= 1325) and (not user_id = 1 or not user_id is not null or not ts >= 1326 or not ts <= 1327) and (not user_id = 1 or not user_id is not null or not ts >= 1328 or not ts <= 1329) and (not user_id = 1 or not user_id is not null or not ts >= 1330 or not ts <= 1331) and (not user_id = 1 or not user_id is not null or not ts >= 1332 or not ts <= 1333) and (not user_id = 1 or not user_id is not null or not ts >= 1334 or not ts <= 1335) and (not user_id = 1 or not user_id is not null or not ts >= 1336 or not ts <= 1337) and (not user_id = 1 or not user_id is not null or not ts >= 1338 or not ts <= 1339) and (not user_id = 1 or not user_id is not null or not ts >= 1340 or not ts <= 1341) and (not user_id = 1 or not user_id is not null or not ts >= 1342 or not ts <= 1343) and (not user_id = 1 or not user_id is not null or not ts >= 1344 or not ts <= 1345) and (not user_id = 1 or not user_id is not null or not ts >= 1346 or not ts <= 1347) and (not user_id = 1 or not user_id is not null or not ts >= 1348 or not ts <= 1349) and (not user_id = 1 or not user_id is not null or not ts >= 1350 or not ts <= 1351) and (not user_id = 1 or not user_id is not null or not ts >= 1352 or not ts <= 1353) and (not user_id = 1 or not user_id is not null or not ts >= 1354 or not ts <= 1355) and (not user_id = 1 or not user_id is not null or not ts >= 1356 or not ts <= 1357) and (not user_id = 1 or not user_id is not null or not ts >= 1358 or not ts <= 1359) and (not user_id = 1 or not user_id is not null or not ts >= 1360 or not ts <= 1361) and (not user_id = 1 or not user_id is not null or not ts >= 1362 or not ts <= 1363) and (not user_id = 1 or not user_id is not null or not ts >= 1364 or not ts <= 1365) and (not user_id = 1 or not user_id is not null or not ts >= 1366 or not ts <= 1367) and (not user_id = 1 or not user_id is not null or not ts >= 1368 or not ts <= 1369) and (not user_id = 1 or not user_id is not null or not ts >= 1370 or not ts <= 1371) and (not user_id = 1 or not user_id is not null or not ts >= 1372 or not ts <= 1373) and (not user_id = 1 or not user_id is not null or not ts >= 1374 or not ts <= 1375) and (not user_id = 1 or not user_id is not null or not ts >= 1376 or not ts <= 1377) and (not user_id = 1 or not user_id is not null or not ts >= 1378 or not ts <= 1379) and (not user_id = 1 or not user_id is not null or not ts >= 1380 or not ts <= 1381) and (not user_id = 1 or not user_id is not null or not ts >= 1382 or not ts <= 1383) and (not user_id = 1 or not user_id is not null or not ts >= 1384 or not ts <= 1385) and (not user_id = 1 or not user_id is not null or not ts >= 1386 or not ts <= 1387) and (not user_id = 1 or not user_id is not null or not ts >= 1388 or not ts <= 1389) and (not user_id = 1 or not user_id is not null or not ts >= 1390 or not ts <= 1391) and (not user_id = 1 or not user_id is not null or not ts >= 1392 or not ts <= 1393) and (not user_id = 1 or not user_id is not null or not ts >= 1394 or not ts <= 1395) and (not user_id = 1 or not user_id is not null or not ts >= 1396 or not ts <= 1397) and (not user_id = 1 or not user_id is not null or not ts >= 1398 or not ts <= 1399) and (not user_id = 1 or not user_id is not null or not ts >= 1400 or not ts <= 1401) and (not user_id = 1 or not user_id is not null or not ts >= 1402 or not ts <= 1403) and (not user_id = 1 or not user_id is not null or not ts >= 1404 or not ts <= 1405) and (not user_id = 1 or not user_id is not null or not ts >= 1406 or not ts <= 1407) and (not user_id = 1 or not user_id is not null or not ts >= 1408 or not ts <= 1409) and (not user_id = 1 or not user_id is not null or not ts >= 1410 or not ts <= 1411) and (not user_id = 1 or not user_id is not null or not ts >= 1412 or not ts <= 1413) and (not user_id = 1 or not user_id is not null or not ts >= 1414 or not ts <= 1415) and (not user_id = 1 or not user_id is not null or not ts >= 1416 or not ts <= 1417) and (not user_id = 1 or not user_id is not null or not ts >= 1418 or not ts <= 1419) and (not user_id = 1 or not user_id is not null or not ts >= 1420 or not ts <= 1421) and (not user_id = 1 or not user_id is not null or not ts >= 1422 or not ts <= 1423) and (not user_id = 1 or not user_id is not null or not ts >= 1424 or not ts <= 1425) and (not user_id = 1 or not user_id is not null or not ts >= 1426 or not ts <= 1427) and (not user_id = 1 or not user_id is not null or not ts >= 1428 or not ts <= 1429) and (not user_id = 1 or not user_id is not null or not ts >= 1430 or not ts <= 1431) and (not user_id = 1 or not user_id is not null or not ts >= 1432 or not ts <= 1433) and (not user_id = 1 or not user_id is not null or not ts >= 1434 or not ts <= 1435) and (not user_id = 1 or not user_id is not null or not ts >= 1436 or not ts <= 1437) and (not user_id = 1 or not user_id is not null or not ts >= 1438 or not ts <= 1439) and (not user_id = 1 or not user_id is not null or not ts >= 1440 or not ts <= 1441) and (not user_id = 1 or not user_id is not null or not ts >= 1442 or not ts <= 1443) and (not user_id = 1 or not user_id is not null or not ts >= 1444 or not ts <= 1445) and (not user_id = 1 or not user_id is not null or not ts >= 1446 or not ts <= 1447) and (not user_id = 1 or not user_id is not null or not ts >= 1448 or not ts <= 1449) and (not user_id = 1 or not user_id is not null or not ts >= 1450 or not ts <= 1451) and (not user_id = 1 or not user_id is not null or not ts >= 1452 or not ts <= 1453) and (not user_id = 1 or not user_id is not null or not ts >= 1454 or not ts <= 1455) and (not user_id = 1 or not user_id is not null or not ts >= 1456 or not ts <= 1457) and (not user_id = 1 or not user_id is not null or not ts >= 1458 or not ts <= 1459) and (not user_id = 1 or not user_id is not null or not ts >= 1460 or not ts <= 1461) and (not user_id = 1 or not user_id is not null or not ts >= 1462 or not ts <= 1463) and (not user_id = 1 or not user_id is not null or not ts >= 1464 or not ts <= 1465) and (not user_id = 1 or not user_id is not null or not ts >= 1466 or not ts <= 1467) and (not user_id = 1 or not user_id is not null or not ts >= 1468 or not ts <= 1469) and (not user_id = 1 or not user_id is not null or not ts >= 1470 or not ts <= 1471) and (not user_id = 1 or not user_id is not null or not ts >= 1472 or not ts <= 1473) and (not user_id = 1 or not user_id is not null or not ts >= 1474 or not ts <= 1475) and (not user_id = 1 or not user_id is not null or not ts >= 1476 or not ts <= 1477) and (not user_id = 1 or not user_id is not null or not ts >= 1478 or not ts <= 1479) and (not user_id = 1 or not user_id is not null or not ts >= 1480 or not ts <= 1481) and (not user_id = 1 or not user_id is not null or not ts >= 1482 or not ts <= 1483) and (not user_id = 1 or not user_id is not null or not ts >= 1484 or not ts <= 1485) and (not user_id = 1 or not user_id is not null or not ts >= 1486 or not ts <= 1487) and (not user_id = 1 or not user_id is not null or not ts >= 1488 or not ts <= 1489) and (not user_id = 1 or not user_id is not null or not ts >= 1490 or not ts <= 1491) and (not user_id = 1 or not user_id is not null or not ts >= 1492 or not ts <= 1493) and (not user_id = 1 or not user_id is not null or not ts >= 1494 or not ts <= 1495) and (not user_id = 1 or not user_id is not null or not ts >= 1496 or not ts <= 1497) and (not user_id = 1 or not user_id is not null or not ts >= 1498 or not ts <= 1499) and (not user_id = 1 or not user_id is not null or not ts >= 1500 or not ts <= 1501) and (not user_id = 1 or not user_id is not null or not ts >= 1502 or not ts <= 1503) and (not user_id = 1 or not user_id is not null or not ts >= 1504 or not ts <= 1505) and (not user_id = 1 or not user_id is not null or not ts >= 1506 or not ts <= 1507) and (not user_id = 1 or not user_id is not null or not ts >= 1508 or not ts <= 1509) and (not user_id = 1 or not user_id is not null or not ts >= 1510 or not ts <= 1511) and (not user_id = 1 or not user_id is not null or not ts >= 1512 or not ts <= 1513) and (not user_id = 1 or not user_id is not null or not ts >= 1514 or not ts <= 1515) and (not user_id = 1 or not user_id is not null or not ts >= 1516 or not ts <= 1517) and (not user_id = 1 or not user_id is not null or not ts >= 1518 or not ts <= 1519) and (not user_id = 1 or not user_id is not null or not ts >= 1520 or not ts <= 1521) and (not user_id = 1 or not user_id is not null or not ts >= 1522 or not ts <= 1523) and (not user_id = 1 or not user_id is not null or not ts >= 1524 or not ts <= 1525) and (not user_id = 1 or not user_id is not null or not ts >= 1526 or not ts <= 1527) and (not user_id = 1 or not user_id is not null or not ts >= 1528 or not ts <= 1529) and (not user_id = 1 or not user_id is not null or not ts >= 1530 or not ts <= 1531) and (not user_id = 1 or not user_id is not null or not ts >= 1532 or not ts <= 1533) and (not user_id = 1 or not user_id is not null or not ts >= 1534 or not ts <= 1535) and (not user_id = 1 or not user_id is not null or not ts >= 1536 or not ts <= 1537) and (not user_id = 1 or not user_id is not null or not ts >= 1538 or not ts <= 1539) and (not user_id = 1 or not user_id is not null or not ts >= 1540 or not ts <= 1541) and (not user_id = 1 or not user_id is not null or not ts >= 1542 or not ts <= 1543) and (not user_id = 1 or not user_id is not null or not ts >= 1544 or not ts <= 1545) and (not user_id = 1 or not user_id is not null or not ts >= 1546 or not ts <= 1547) and (not user_id = 1 or not user_id is not null or not ts >= 1548 or not ts <= 1549) and (not user_id = 1 or not user_id is not null or not ts >= 1550 or not ts <= 1551) and (not user_id = 1 or not user_id is not null or not ts >= 1552 or not ts <= 1553) and (not user_id = 1 or not user_id is not null or not ts >= 1554 or not ts <= 1555) and (not user_id = 1 or not user_id is not null or not ts >= 1556 or not ts <= 1557) and (not user_id = 1 or not user_id is not null or not ts >= 1558 or not ts <= 1559) and (not user_id = 1 or not user_id is not null or not ts >= 1560 or not ts <= 1561) and (not user_id = 1 or not user_id is not null or not ts >= 1562 or not ts <= 1563) and (not user_id = 1 or not user_id is not null or not ts >= 1564 or not ts <= 1565) and (not user_id = 1 or not user_id is not null or not ts >= 1566 or not ts <= 1567) and (not user_id = 1 or not user_id is not null or not ts >= 1568 or not ts <= 1569) and (not user_id = 1 or not user_id is not null or not ts >= 1570 or not ts <= 1571) and (not user_id = 1 or not user_id is not null or not ts >= 1572 or not ts <= 1573) and (not user_id = 1 or not user_id is not null or not ts >= 1574 or not ts <= 1575) and (not user_id = 1 or not user_id is not null or not ts >= 1576 or not ts <= 1577) and (not user_id = 1 or not user_id is not null or not ts >= 1578 or not ts <= 1579) and (not user_id = 1 or not user_id is not null or not ts >= 1580 or not ts <= 1581) and (not user_id = 1 or not user_id is not null or not ts >= 1582 or not ts <= 1583) and (not user_id = 1 or not user_id is not null or not ts >= 1584 or not ts <= 1585) and (not user_id = 1 or not user_id is not null or not ts >= 1586 or not ts <= 1587) and (not user_id = 1 or not user_id is not null or not ts >= 1588 or not ts <= 1589) and (not user_id = 1 or not user_id is not null or not ts >= 1590 or not ts <= 1591) and (not user_id = 1 or not user_id is not null or not ts >= 1592 or not ts <= 1593) and (not user_id = 1 or not user_id is not null or not ts >= 1594 or not ts <= 1595) and (not user_id = 1 or not user_id is not null or not ts >= 1596 or not ts <= 1597) and (not user_id = 1 or not user_id is not null or not ts >= 1598 or not ts <= 1599) and (not user_id = 1 or not user_id is not null or not ts >= 1600 or not ts <= 1601) and (not user_id = 1 or not user_id is not null or not ts >= 1602 or not ts <= 1603) and (not user_id = 1 or not user_id is not null or not ts >= 1604 or not ts <= 1605) and (not user_id = 1 or not user_id is not null or not ts >= 1606 or not ts <= 1607) and (not user_id = 1 or not user_id is not null or not ts >= 1608 or not ts <= 1609) and (not user_id = 1 or not user_id is not null or not ts >= 1610 or not ts <= 1611) and (not user_id = 1 or not user_id is not null or not ts >= 1612 or not ts <= 1613) and (not user_id = 1 or not user_id is not null or not ts >= 1614 or not ts <= 1615) and (not user_id = 1 or not user_id is not null or not ts >= 1616 or not ts <= 1617) and (not user_id = 1 or not user_id is not null or not ts >= 1618 or not ts <= 1619) and (not user_id = 1 or not user_id is not null or not ts >= 1620 or not ts <= 1621) and (not user_id = 1 or not user_id is not null or not ts >= 1622 or not ts <= 1623) and (not user_id = 1 or not user_id is not null or not ts >= 1624 or not ts <= 1625) and (not user_id = 1 or not user_id is not null or not ts >= 1626 or not ts <= 1627) and (not user_id = 1 or not user_id is not null or not ts >= 1628 or not ts <= 1629) and (not user_id = 1 or not user_id is not null or not ts >= 1630 or not ts <= 1631) and (not user_id = 1 or not user_id is not null or not ts >= 1632 or not ts <= 1633) and (not user_id = 1 or not user_id is not null or not ts >= 1634 or not ts <= 1635) and (not user_id = 1 or not user_id is not null or not ts >= 1636 or not ts <= 1637) and (not user_id = 1 or not user_id is not null or not ts >= 1638 or not ts <= 1639) and (not user_id = 1 or not user_id is not null or not ts >= 1640 or not ts <= 1641) and (not user_id = 1 or not user_id is not null or not ts >= 1642 or not ts <= 1643) and (not user_id = 1 or not user_id is not null or not ts >= 1644 or not ts <= 1645) and (not user_id = 1 or not user_id is not null or not ts >= 1646 or not ts <= 1647) and (not user_id = 1 or not user_id is not null or not ts >= 1648 or not ts <= 1649) and (not user_id = 1 or not user_id is not null or not ts >= 1650 or not ts <= 1651) and (not user_id = 1 or not user_id is not null or not ts >= 1652 or not ts <= 1653) and (not user_id = 1 or not user_id is not null or not ts >= 1654 or not ts <= 1655) and (not user_id = 1 or not user_id is not null or not ts >= 1656 or not ts <= 1657) and (not user_id = 1 or not user_id is not null or not ts >= 1658 or not ts <= 1659) and (not user_id = 1 or not user_id is not null or not ts >= 1660 or not ts <= 1661) and (not user_id = 1 or not user_id is not null or not ts >= 1662 or not ts <= 1663) and (not user_id = 1 or not user_id is not null or not ts >= 1664 or not ts <= 1665) and (not user_id = 1 or not user_id is not null or not ts >= 1666 or not ts <= 1667) and (not user_id = 1 or not user_id is not null or not ts >= 1668 or not ts <= 1669) and (not user_id = 1 or not user_id is not null or not ts >= 1670 or not ts <= 1671) and (not user_id = 1 or not user_id is not null or not ts >= 1672 or not ts <= 1673) and (not user_id = 1 or not user_id is not null or not ts >= 1674 or not ts <= 1675) and (not user_id = 1 or not user_id is not null or not ts >= 1676 or not ts <= 1677) and (not user_id = 1 or not user_id is not null or not ts >= 1678 or not ts <= 1679) and (not user_id = 1 or not user_id is not null or not ts >= 1680 or not ts <= 1681) and (not user_id = 1 or not user_id is not null or not ts >= 1682 or not ts <= 1683) and (not user_id = 1 or not user_id is not null or not ts >= 1684 or not ts <= 1685) and (not user_id = 1 or not user_id is not null or not ts >= 1686 or not ts <= 1687) and (not user_id = 1 or not user_id is not null or not ts >= 1688 or not ts <= 1689) and (not user_id = 1 or not user_id is not null or not ts >= 1690 or not ts <= 1691) and (not user_id = 1 or not user_id is not null or not ts >= 1692 or not ts <= 1693) and (not user_id = 1 or not user_id is not null or not ts >= 1694 or not ts <= 1695) and (not user_id = 1 or not user_id is not null or not ts >= 1696 or not ts <= 1697) and (not user_id = 1 or not user_id is not null or not ts >= 1698 or not ts <= 1699) and (not user_id = 1 or not user_id is not null or not ts >= 1700 or not ts <= 1701) and (not user_id = 1 or not user_id is not null or not ts >= 1702 or not ts <= 1703) and (not user_id = 1 or not user_id is not null or not ts >= 1704 or not ts <= 1705) and (not user_id = 1 or not user_id is not null or not ts >= 1706 or not ts <= 1707) and (not user_id = 1 or not user_id is not null or not ts >= 1708 or not ts <= 1709) and (not user_id = 1 or not user_id is not null or not ts >= 1710 or not ts <= 1711) and (not user_id = 1 or not user_id is not null or not ts >= 1712 or not ts <= 1713) and (not user_id = 1 or not user_id is not null or not ts >= 1714 or not ts <= 1715) and (not user_id = 1 or not user_id is not null or not ts >= 1716 or not ts <= 1717) and (not user_id = 1 or not user_id is not null or not ts >= 1718 or not ts <= 1719) and (not user_id = 1 or not user_id is not null or not ts >= 1720 or not ts <= 1721) and (not user_id = 1 or not user_id is not null or not ts >= 1722 or not ts <= 1723) and (not user_id = 1 or not user_id is not null or not ts >= 1724 or not ts <= 1725) and (not user_id = 1 or not user_id is not null or not ts >= 1726 or not ts <= 1727) and (not user_id = 1 or not user_id is not null or not ts >= 1728 or not ts <= 1729) and (not user_id = 1 or not user_id is not null or not ts >= 1730 or not ts <= 1731) and (not user_id = 1 or not user_id is not null or not ts >= 1732 or not ts <= 1733) and (not user_id = 1 or not user_id is not null or not ts >= 1734 or not ts <= 1735) and (not user_id = 1 or not user_id is not null or not ts >= 1736 or not ts <= 1737) and (not user_id = 1 or not user_id is not null or not ts >= 1738 or not ts <= 1739) and (not user_id = 1 or not user_id is not null or not ts >= 1740 or not ts <= 1741) and (not user_id = 1 or not user_id is not null or not ts >= 1742 or not ts <= 1743) and (not user_id = 1 or not user_id is not null or not ts >= 1744 or not ts <= 1745) and (not user_id = 1 or not user_id is not null or not ts >= 1746 or not ts <= 1747) and (not user_id = 1 or not user_id is not null or not ts >= 1748 or not ts <= 1749) and (not user_id = 1 or not user_id is not null or not ts >= 1750 or not ts <= 1751) and (not user_id = 1 or not user_id is not null or not ts >= 1752 or not ts <= 1753) and (not user_id = 1 or not user_id is not null or not ts >= 1754 or not ts <= 1755) and (not user_id = 1 or not user_id is not null or not ts >= 1756 or not ts <= 1757) and (not user_id = 1 or not user_id is not null or not ts >= 1758 or not ts <= 1759) and (not user_id = 1 or not user_id is not null or not ts >= 1760 or not ts <= 1761) and (not user_id = 1 or not user_id is not null or not ts >= 1762 or not ts <= 1763) and (not user_id = 1 or not user_id is not null or not ts >= 1764 or not ts <= 1765) and (not user_id = 1 or not user_id is not null or not ts >= 1766 or not ts <= 1767) and (not user_id = 1 or not user_id is not null or not ts >= 1768 or not ts <= 1769) and (not user_id = 1 or not user_id is not null or not ts >= 1770 or not ts <= 1771) and (not user_id = 1 or not user_id is not null or not ts >= 1772 or not ts <= 1773) and (not user_id = 1 or not user_id is not null or not ts >= 1774 or not ts <= 1775) and (not user_id = 1 or not user_id is not null or not ts >= 1776 or not ts <= 1777) and (not user_id = 1 or not user_id is not null or not ts >= 1778 or not ts <= 1779) and (not user_id = 1 or not user_id is not null or not ts >= 1780 or not ts <= 1781) and (not user_id = 1 or not user_id is not null or not ts >= 1782 or not ts <= 1783) and (not user_id = 1 or not user_id is not null or not ts >= 1784 or not ts <= 1785) and (not user_id = 1 or not user_id is not null or not ts >= 1786 or not ts <= 1787) and (not user_id = 1 or not user_id is not null or not ts >= 1788 or not ts <= 1789) and (not user_id = 1 or not user_id is not null or not ts >= 1790 or not ts <= 1791) and (not user_id = 1 or not user_id is not null or not ts >= 1792 or not ts <= 1793) and (not user_id = 1 or not user_id is not null or not ts >= 1794 or not ts <= 1795) and (not user_id = 1 or not user_id is not null or not ts >= 1796 or not ts <= 1797) and (not user_id = 1 or not user_id is not null or not ts >= 1798 or not ts <= 1799) and (not user_id = 1 or not user_id is not null or not ts >= 1800 or not ts <= 1801) and (not user_id = 1 or not user_id is not null or not ts >= 1802 or not ts <= 1803) and (not user_id = 1 or not user_id is not null or not ts >= 1804 or not ts <= 1805) and (not user_id = 1 or not user_id is not null or not ts >= 1806 or not ts <= 1807) and (not user_id = 1 or not user_id is not null or not ts >= 1808 or not ts <= 1809) and (not user_id = 1 or not user_id is not null or not ts >= 1810 or not ts <= 1811) and (not user_id = 1 or not user_id is not null or not ts >= 1812 or not ts <= 1813) and (not user_id = 1 or not user_id is not null or not ts >= 1814 or not ts <= 1815) and (not user_id = 1 or not user_id is not null or not ts >= 1816 or not ts <= 1817) and (not user_id = 1 or not user_id is not null or not ts >= 1818 or not ts <= 1819) and (not user_id = 1 or not user_id is not null or not ts >= 1820 or not ts <= 1821) and (not user_id = 1 or not user_id is not null or not ts >= 1822 or not ts <= 1823) and (not user_id = 1 or not user_id is not null or not ts >= 1824 or not ts <= 1825) and (not user_id = 1 or not user_id is not null or not ts >= 1826 or not ts <= 1827) and (not user_id = 1 or not user_id is not null or not ts >= 1828 or not ts <= 1829) and (not user_id = 1 or not user_id is not null or not ts >= 1830 or not ts <= 1831) and (not user_id = 1 or not user_id is not null or not ts >= 1832 or not ts <= 1833) and (not user_id = 1 or not user_id is not null or not ts >= 1834 or not ts <= 1835) and (not user_id = 1 or not user_id is not null or not ts >= 1836 or not ts <= 1837) and (not user_id = 1 or not user_id is not null or not ts >= 1838 or not ts <= 1839) and (not user_id = 1 or not user_id is not null or not ts >= 1840 or not ts <= 1841) and (not user_id = 1 or not user_id is not null or not ts >= 1842 or not ts <= 1843) and (not user_id = 1 or not user_id is not null or not ts >= 1844 or not ts <= 1845) and (not user_id = 1 or not user_id is not null or not ts >= 1846 or not ts <= 1847) and (not user_id = 1 or not user_id is not null or not ts >= 1848 or not ts <= 1849) and (not user_id = 1 or not user_id is not null or not ts >= 1850 or not ts <= 1851) and (not user_id = 1 or not user_id is not null or not ts >= 1852 or not ts <= 1853) and (not user_id = 1 or not user_id is not null or not ts >= 1854 or not ts <= 1855) and (not user_id = 1 or not user_id is not null or not ts >= 1856 or not ts <= 1857) and (not user_id = 1 or not user_id is not null or not ts >= 1858 or not ts <= 1859) and (not user_id = 1 or not user_id is not null or not ts >= 1860 or not ts <= 1861) and (not user_id = 1 or not user_id is not null or not ts >= 1862 or not ts <= 1863) and (not user_id = 1 or not user_id is not null or not ts >= 1864 or not ts <= 1865) and (not user_id = 1 or not user_id is not null or not ts >= 1866 or not ts <= 1867) and (not user_id = 1 or not user_id is not null or not ts >= 1868 or not ts <= 1869) and (not user_id = 1 or not user_id is not null or not ts >= 1870 or not ts <= 1871) and (not user_id = 1 or not user_id is not null or not ts >= 1872 or not ts <= 1873) and (not user_id = 1 or not user_id is not null or not ts >= 1874 or not ts <= 1875) and (not user_id = 1 or not user_id is not null or not ts >= 1876 or not ts <= 1877) and (not user_id = 1 or not user_id is not null or not ts >= 1878 or not ts <= 1879) and (not user_id = 1 or not user_id is not null or not ts >= 1880 or not ts <= 1881) and (not user_id = 1 or not user_id is not null or not ts >= 1882 or not ts <= 1883) and (not user_id = 1 or not user_id is not null or not ts >= 1884 or not ts <= 1885) and (not user_id = 1 or not user_id is not null or not ts >= 1886 or not ts <= 1887) and (not user_id = 1 or not user_id is not null or not ts >= 1888 or not ts <= 1889) and (not user_id = 1 or not user_id is not null or not ts >= 1890 or not ts <= 1891) and (not user_id = 1 or not user_id is not null or not ts >= 1892 or not ts <= 1893) and (not user_id = 1 or not user_id is not null or not ts >= 1894 or not ts <= 1895) and (not user_id = 1 or not user_id is not null or not ts >= 1896 or not ts <= 1897) and (not user_id = 1 or not user_id is not null or not ts >= 1898 or not ts <= 1899) and (not user_id = 1 or not user_id is not null or not ts >= 1900 or not ts <= 1901) and (not user_id = 1 or not user_id is not null or not ts >= 1902 or not ts <= 1903) and (not user_id = 1 or not user_id is not null or not ts >= 1904 or not ts <= 1905) and (not user_id = 1 or not user_id is not null or not ts >= 1906 or not ts <= 1907) and (not user_id = 1 or not user_id is not null or not ts >= 1908 or not ts <= 1909) and (not user_id = 1 or not user_id is not null or not ts >= 1910 or not ts <= 1911) and (not user_id = 1 or not user_id is not null or not ts >= 1912 or not ts <= 1913) and (not user_id = 1 or not user_id is not null or not ts >= 1914 or not ts <= 1915) and (not user_id = 1 or not user_id is not null or not ts >= 1916 or not ts <= 1917) and (not user_id = 1 or not user_id is not null or not ts >= 1918 or not ts <= 1919) and (not user_id = 1 or not user_id is not null or not ts >= 1920 or not ts <= 1921) and (not user_id = 1 or not user_id is not null or not ts >= 1922 or not ts <= 1923) and (not user_id = 1 or not user_id is not null or not ts >= 1924 or not ts <= 1925) and (not user_id = 1 or not user_id is not null or not ts >= 1926 or not ts <= 1927) and (not user_id = 1 or not user_id is not null or not ts >= 1928 or not ts <= 1929) and (not user_id = 1 or not user_id is not null or not ts >= 1930 or not ts <= 1931) and (not user_id = 1 or not user_id is not null or not ts >= 1932 or not ts <= 1933) and (not user_id = 1 or not user_id is not null or not ts >= 1934 or not ts <= 1935) and (not user_id = 1 or not user_id is not null or not ts >= 1936 or not ts <= 1937) and (not user_id = 1 or not user_id is not null or not ts >= 1938 or not ts <= 1939) and (not user_id = 1 or not user_id is not null or not ts >= 1940 or not ts <= 1941) and (not user_id = 1 or not user_id is not null or not ts >= 1942 or not ts <= 1943) and (not user_id = 1 or not user_id is not null or not ts >= 1944 or not ts <= 1945) and (not user_id = 1 or not user_id is not null or not ts >= 1946 or not ts <= 1947) and (not user_id = 1 or not user_id is not null or not ts >= 1948 or not ts <= 1949) and (not user_id = 1 or not user_id is not null or not ts >= 1950 or not ts <= 1951) and (not user_id = 1 or not user_id is not null or not ts >= 1952 or not ts <= 1953) and (not user_id = 1 or not user_id is not null or not ts >= 1954 or not ts <= 1955) and (not user_id = 1 or not user_id is not null or not ts >= 1956 or not ts <= 1957) and (not user_id = 1 or not user_id is not null or not ts >= 1958 or not ts <= 1959) and (not user_id = 1 or not user_id is not null or not ts >= 1960 or not ts <= 1961) and (not user_id = 1 or not user_id is not null or not ts >= 1962 or not ts <= 1963) and (not user_id = 1 or not user_id is not null or not ts >= 1964 or not ts <= 1965) and (not user_id = 1 or not user_id is not null or not ts >= 1966 or not ts <= 1967) and (not user_id = 1 or not user_id is not null or not ts >= 1968 or not ts <= 1969) and (not user_id = 1 or not user_id is not null or not ts >= 1970 or not ts <= 1971) and (not user_id = 1 or not user_id is not null or not ts >= 1972 or not ts <= 1973) and (not user_id = 1 or not user_id is not null or not ts >= 1974 or not ts <= 1975) and (not user_id = 1 or not user_id is not null or not ts >= 1976 or not ts <= 1977) and (not user_id = 1 or not user_id is not null or not ts >= 1978 or not ts <= 1979) and (not user_id = 1 or not user_id is not null or not ts >= 1980 or not ts <= 1981) and (not user_id = 1 or not user_id is not null or not ts >= 1982 or not ts <= 1983) and (not user_id = 1 or not user_id is not null or not ts >= 1984 or not ts <= 1985) and (not user_id = 1 or not user_id is not null or not ts >= 1986 or not ts <= 1987) and (not user_id = 1 or not user_id is not null or not ts >= 1988 or not ts <= 1989) and (not user_id = 1 or not user_id is not null or not ts >= 1990 or not ts <= 1991) and (not user_id = 1 or not user_id is not null or not ts >= 1992 or not ts <= 1993) and (not user_id = 1 or not user_id is not null or not ts >= 1994 or not ts <= 1995) and (not user_id = 1 or not user_id is not null or not ts >= 1996 or not ts <= 1997) and (not user_id = 1 or not user_id is not null or not ts >= 1998 or not ts <= 1999) and (not user_id = 1 or not user_id is not null or not ts >= 11000 or not ts <= 11001) and (not user_id = 1 or not user_id is not null or not ts >= 11002 or not ts <= 11003) and (not user_id = 1 or not user_id is not null or not ts >= 11004 or not ts <= 11005) and (not user_id = 1 or not user_id is not null or not ts >= 11006 or not ts <= 11007) and (not user_id = 1 or not user_id is not null or not ts >= 11008 or not ts <= 11009) and (not user_id = 1 or not user_id is not null or not ts >= 11010 or not ts <= 11011) and (not user_id = 1 or not user_id is not null or not ts >= 11012 or not ts <= 11013) and (not user_id = 1 or not user_id is not null or not ts >= 11014 or not ts <= 11015) and (not user_id = 1 or not user_id is not null or not ts >= 11016 or not ts <= 11017) and (not user_id = 1 or not user_id is not null or not ts >= 11018 or not ts <= 11019) and (not user_id = 1 or not user_id is not null or not ts >= 11020 or not ts <= 11021) and (not user_id = 1 or not user_id is not null or not ts >= 11022 or not ts <= 11023) and (not user_id = 1 or not user_id is not null or not ts >= 11024 or not ts <= 11025) and (not user_id = 1 or not user_id is not null or not ts >= 11026 or not ts <= 11027) and (not user_id = 1 or not user_id is not null or not ts >= 11028 or not ts <= 11029) and (not user_id = 1 or not user_id is not null or not ts >= 11030 or not ts <= 11031) and (not user_id = 1 or not user_id is not null or not ts >= 11032 or not ts <= 11033) and (not user_id = 1 or not user_id is not null or not ts >= 11034 or not ts <= 11035) and (not user_id = 1 or not user_id is not null or not ts >= 11036 or not ts <= 11037) and (not user_id = 1 or not user_id is not null or not ts >= 11038 or not ts <= 11039) and (not user_id = 1 or not user_id is not null or not ts >= 11040 or not ts <= 11041) and (not user_id = 1 or not user_id is not null or not ts >= 11042 or not ts <= 11043) and (not user_id = 1 or not user_id is not null or not ts >= 11044 or not ts <= 11045) and (not user_id = 1 or not user_id is not null or not ts >= 11046 or not ts <= 11047) and (not user_id = 1 or not user_id is not null or not ts >= 11048 or not ts <= 11049) and (not user_id = 1 or not user_id is not null or not ts >= 11050 or not ts <= 11051) and (not user_id = 1 or not user_id is not null or not ts >= 11052 or not ts <= 11053) and (not user_id = 1 or not user_id is not null or not ts >= 11054 or not ts <= 11055) and (not user_id = 1 or not user_id is not null or not ts >= 11056 or not ts <= 11057) and (not user_id = 1 or not user_id is not null or not ts >= 11058 or not ts <= 11059) and (not user_id = 1 or not user_id is not null or not ts >= 11060 or not ts <= 11061) and (not user_id = 1 or not user_id is not null or not ts >= 11062 or not ts <= 11063) and (not user_id = 1 or not user_id is not null or not ts >= 11064 or not ts <= 11065) and (not user_id = 1 or not user_id is not null or not ts >= 11066 or not ts <= 11067) and (not user_id = 1 or not user_id is not null or not ts >= 11068 or not ts <= 11069) and (not user_id = 1 or not user_id is not null or not ts >= 11070 or not ts <= 11071) and (not user_id = 1 or not user_id is not null or not ts >= 11072 or not ts <= 11073) and (not user_id = 1 or not user_id is not null or not ts >= 11074 or not ts <= 11075) and (not user_id = 1 or not user_id is not null or not ts >= 11076 or not ts <= 11077) and (not user_id = 1 or not user_id is not null or not ts >= 11078 or not ts <= 11079) and (not user_id = 1 or not user_id is not null or not ts >= 11080 or not ts <= 11081) and (not user_id = 1 or not user_id is not null or not ts >= 11082 or not ts <= 11083) and (not user_id = 1 or not user_id is not null or not ts >= 11084 or not ts <= 11085) and (not user_id = 1 or not user_id is not null or not ts >= 11086 or not ts <= 11087) and (not user_id = 1 or not user_id is not null or not ts >= 11088 or not ts <= 11089) and (not user_id = 1 or not user_id is not null or not ts >= 11090 or not ts <= 11091) and (not user_id = 1 or not user_id is not null or not ts >= 11092 or not ts <= 11093) and (not user_id = 1 or not user_id is not null or not ts >= 11094 or not ts <= 11095) and (not user_id = 1 or not user_id is not null or not ts >= 11096 or not ts <= 11097) and (not user_id = 1 or not user_id is not null or not ts >= 11098 or not ts <= 11099) and (not user_id = 1 or not user_id is not null or not ts >= 11100 or not ts <= 11101) and (not user_id = 1 or not user_id is not null or not ts >= 11102 or not ts <= 11103) and (not user_id = 1 or not user_id is not null or not ts >= 11104 or not ts <= 11105) and (not user_id = 1 or not user_id is not null or not ts >= 11106 or not ts <= 11107) and (not user_id = 1 or not user_id is not null or not ts >= 11108 or not ts <= 11109) and (not user_id = 1 or not user_id is not null or not ts >= 11110 or not ts <= 11111) and (not user_id = 1 or not user_id is not null or not ts >= 11112 or not ts <= 11113) and (not user_id = 1 or not user_id is not null or not ts >= 11114 or not ts <= 11115) and (not user_id = 1 or not user_id is not null or not ts >= 11116 or not ts <= 11117) and (not user_id = 1 or not user_id is not null or not ts >= 11118 or not ts <= 11119) and (not user_id = 1 or not user_id is not null or not ts >= 11120 or not ts <= 11121) and (not user_id = 1 or not user_id is not null or not ts >= 11122 or not ts <= 11123) and (not user_id = 1 or not user_id is not null or not ts >= 11124 or not ts <= 11125) and (not user_id = 1 or not user_id is not null or not ts >= 11126 or not ts <= 11127) and (not user_id = 1 or not user_id is not null or not ts >= 11128 or not ts <= 11129) and (not user_id = 1 or not user_id is not null or not ts >= 11130 or not ts <= 11131) and (not user_id = 1 or not user_id is not null or not ts >= 11132 or not ts <= 11133) and (not user_id = 1 or not user_id is not null or not ts >= 11134 or not ts <= 11135) and (not user_id = 1 or not user_id is not null or not ts >= 11136 or not ts <= 11137) and (not user_id = 1 or not user_id is not null or not ts >= 11138 or not ts <= 11139) and (not user_id = 1 or not user_id is not null or not ts >= 11140 or not ts <= 11141) and (not user_id = 1 or not user_id is not null or not ts >= 11142 or not ts <= 11143) and (not user_id = 1 or not user_id is not null or not ts >= 11144 or not ts <= 11145) and (not user_id = 1 or not user_id is not null or not ts >= 11146 or not ts <= 11147) and (not user_id = 1 or not user_id is not null or not ts >= 11148 or not ts <= 11149) and (not user_id = 1 or not user_id is not null or not ts >= 11150 or not ts <= 11151) and (not user_id = 1 or not user_id is not null or not ts >= 11152 or not ts <= 11153) and (not user_id = 1 or not user_id is not null or not ts >= 11154 or not ts <= 11155) and (not user_id = 1 or not user_id is not null or not ts >= 11156 or not ts <= 11157) and (not user_id = 1 or not user_id is not null or not ts >= 11158 or not ts <= 11159) and (not user_id = 1 or not user_id is not null or not ts >= 11160 or not ts <= 11161) and (not user_id = 1 or not user_id is not null or not ts >= 11162 or not ts <= 11163) and (not user_id = 1 or not user_id is not null or not ts >= 11164 or not ts <= 11165) and (not user_id = 1 or not user_id is not null or not ts >= 11166 or not ts <= 11167) and (not user_id = 1 or not user_id is not null or not ts >= 11168 or not ts <= 11169) and (not user_id = 1 or not user_id is not null or not ts >= 11170 or not ts <= 11171) and (not user_id = 1 or not user_id is not null or not ts >= 11172 or not ts <= 11173) and (not user_id = 1 or not user_id is not null or not ts >= 11174 or not ts <= 11175) and (not user_id = 1 or not user_id is not null or not ts >= 11176 or not ts <= 11177) and (not user_id = 1 or not user_id is not null or not ts >= 11178 or not ts <= 11179) and (not user_id = 1 or not user_id is not null or not ts >= 11180 or not ts <= 11181) and (not user_id = 1 or not user_id is not null or not ts >= 11182 or not ts <= 11183) and (not user_id = 1 or not user_id is not null or not ts >= 11184 or not ts <= 11185) and (not user_id = 1 or not user_id is not null or not ts >= 11186 or not ts <= 11187) and (not user_id = 1 or not user_id is not null or not ts >= 11188 or not ts <= 11189) and (not user_id = 1 or not user_id is not null or not ts >= 11190 or not ts <= 11191) and (not user_id = 1 or not user_id is not null or not ts >= 11192 or not ts <= 11193) and (not user_id = 1 or not user_id is not null or not ts >= 11194 or not ts <= 11195) and (not user_id = 1 or not user_id is not null or not ts >= 11196 or not ts <= 11197) and (not user_id = 1 or not user_id is not null or not ts >= 11198 or not ts <= 11199) and (not user_id = 1 or not user_id is not null or not ts >= 11200 or not ts <= 11201) and (not user_id = 1 or not user_id is not null or not ts >= 11202 or not ts <= 11203) and (not user_id = 1 or not user_id is not null or not ts >= 11204 or not ts <= 11205) and (not user_id = 1 or not user_id is not null or not ts >= 11206 or not ts <= 11207) and (not user_id = 1 or not user_id is not null or not ts >= 11208 or not ts <= 11209) and (not user_id = 1 or not user_id is not null or not ts >= 11210 or not ts <= 11211) and (not user_id = 1 or not user_id is not null or not ts >= 11212 or not ts <= 11213) and (not user_id = 1 or not user_id is not null or not ts >= 11214 or not ts <= 11215) and (not user_id = 1 or not user_id is not null or not ts >= 11216 or not ts <= 11217) and (not user_id = 1 or not user_id is not null or not ts >= 11218 or not ts <= 11219) and (not user_id = 1 or not user_id is not null or not ts >= 11220 or not ts <= 11221) and (not user_id = 1 or not user_id is not null or not ts >= 11222 or not ts <= 11223) and (not user_id = 1 or not user_id is not null or not ts >= 11224 or not ts <= 11225) and (not user_id = 1 or not user_id is not null or not ts >= 11226 or not ts <= 11227) and (not user_id = 1 or not user_id is not null or not ts >= 11228 or not ts <= 11229) and (not user_id = 1 or not user_id is not null or not ts >= 11230 or not ts <= 11231) and (not user_id = 1 or not user_id is not null or not ts >= 11232 or not ts <= 11233) and (not user_id = 1 or not user_id is not null or not ts >= 11234 or not ts <= 11235) and (not user_id = 1 or not user_id is not null or not ts >= 11236 or not ts <= 11237) and (not user_id = 1 or not user_id is not null or not ts >= 11238 or not ts <= 11239) and (not user_id = 1 or not user_id is not null or not ts >= 11240 or not ts <= 11241) and (not user_id = 1 or not user_id is not null or not ts >= 11242 or not ts <= 11243) and (not user_id = 1 or not user_id is not null or not ts >= 11244 or not ts <= 11245) and (not user_id = 1 or not user_id is not null or not ts >= 11246 or not ts <= 11247) and (not user_id = 1 or not user_id is not null or not ts >= 11248 or not ts <= 11249) and (not user_id = 1 or not user_id is not null or not ts >= 11250 or not ts <= 11251) and (not user_id = 1 or not user_id is not null or not ts >= 11252 or not ts <= 11253) and (not user_id = 1 or not user_id is not null or not ts >= 11254 or not ts <= 11255) and (not user_id = 1 or not user_id is not null or not ts >= 11256 or not ts <= 11257) and (not user_id = 1 or not user_id is not null or not ts >= 11258 or not ts <= 11259) and (not user_id = 1 or not user_id is not null or not ts >= 11260 or not ts <= 11261) and (not user_id = 1 or not user_id is not null or not ts >= 11262 or not ts <= 11263) and (not user_id = 1 or not user_id is not null or not ts >= 11264 or not ts <= 11265) and (not user_id = 1 or not user_id is not null or not ts >= 11266 or not ts <= 11267) and (not user_id = 1 or not user_id is not null or not ts >= 11268 or not ts <= 11269) and (not user_id = 1 or not user_id is not null or not ts >= 11270 or not ts <= 11271) and (not user_id = 1 or not user_id is not null or not ts >= 11272 or not ts <= 11273) and (not user_id = 1 or not user_id is not null or not ts >= 11274 or not ts <= 11275) and (not user_id = 1 or not user_id is not null or not ts >= 11276 or not ts <= 11277) and (not user_id = 1 or not user_id is not null or not ts >= 11278 or not ts <= 11279) and (not user_id = 1 or not user_id is not null or not ts >= 11280 or not ts <= 11281) and (not user_id = 1 or not user_id is not null or not ts >= 11282 or not ts <= 11283) and (not user_id = 1 or not user_id is not null or not ts >= 11284 or not ts <= 11285) and (not user_id = 1 or not user_id is not null or not ts >= 11286 or not ts <= 11287) and (not user_id = 1 or not user_id is not null or not ts >= 11288 or not ts <= 11289) and (not user_id = 1 or not user_id is not null or not ts >= 11290 or not ts <= 11291) and (not user_id = 1 or not user_id is not null or not ts >= 11292 or not ts <= 11293) and (not user_id = 1 or not user_id is not null or not ts >= 11294 or not ts <= 11295) and (not user_id = 1 or not user_id is not null or not ts >= 11296 or not ts <= 11297) and (not user_id = 1 or not user_id is not null or not ts >= 11298 or not ts <= 11299) and (not user_id = 1 or not user_id is not null or not ts >= 11300 or not ts <= 11301) and (not user_id = 1 or not user_id is not null or not ts >= 11302 or not ts <= 11303) and (not user_id = 1 or not user_id is not null or not ts >= 11304 or not ts <= 11305) and (not user_id = 1 or not user_id is not null or not ts >= 11306 or not ts <= 11307) and (not user_id = 1 or not user_id is not null or not ts >= 11308 or not ts <= 11309) and (not user_id = 1 or not user_id is not null or not ts >= 11310 or not ts <= 11311) and (not user_id = 1 or not user_id is not null or not ts >= 11312 or not ts <= 11313) and (not user_id = 1 or not user_id is not null or not ts >= 11314 or not ts <= 11315) and (not user_id = 1 or not user_id is not null or not ts >= 11316 or not ts <= 11317) and (not user_id = 1 or not user_id is not null or not ts >= 11318 or not ts <= 11319) and (not user_id = 1 or not user_id is not null or not ts >= 11320 or not ts <= 11321) and (not user_id = 1 or not user_id is not null or not ts >= 11322 or not ts <= 11323) and (not user_id = 1 or not user_id is not null or not ts >= 11324 or not ts <= 11325) and (not user_id = 1 or not user_id is not null or not ts >= 11326 or not ts <= 11327) and (not user_id = 1 or not user_id is not null or not ts >= 11328 or not ts <= 11329) and (not user_id = 1 or not user_id is not null or not ts >= 11330 or not ts <= 11331) and (not user_id = 1 or not user_id is not null or not ts >= 11332 or not ts <= 11333) and (not user_id = 1 or not user_id is not null or not ts >= 11334 or not ts <= 11335) and (not user_id = 1 or not user_id is not null or not ts >= 11336 or not ts <= 11337) and (not user_id = 1 or not user_id is not null or not ts >= 11338 or not ts <= 11339) and (not user_id = 1 or not user_id is not null or not ts >= 11340 or not ts <= 11341) and (not user_id = 1 or not user_id is not null or not ts >= 11342 or not ts <= 11343) and (not user_id = 1 or not user_id is not null or not ts >= 11344 or not ts <= 11345) and (not user_id = 1 or not user_id is not null or not ts >= 11346 or not ts <= 11347) and (not user_id = 1 or not user_id is not null or not ts >= 11348 or not ts <= 11349) and (not user_id = 1 or not user_id is not null or not ts >= 11350 or not ts <= 11351) and (not user_id = 1 or not user_id is not null or not ts >= 11352 or not ts <= 11353) and (not user_id = 1 or not user_id is not null or not ts >= 11354 or not ts <= 11355) and (not user_id = 1 or not user_id is not null or not ts >= 11356 or not ts <= 11357) and (not user_id = 1 or not user_id is not null or not ts >= 11358 or not ts <= 11359) and (not user_id = 1 or not user_id is not null or not ts >= 11360 or not ts <= 11361) and (not user_id = 1 or not user_id is not null or not ts >= 11362 or not ts <= 11363) and (not user_id = 1 or not user_id is not null or not ts >= 11364 or not ts <= 11365) and (not user_id = 1 or not user_id is not null or not ts >= 11366 or not ts <= 11367) and (not user_id = 1 or not user_id is not null or not ts >= 11368 or not ts <= 11369) and (not user_id = 1 or not user_id is not null or not ts >= 11370 or not ts <= 11371) and (not user_id = 1 or not user_id is not null or not ts >= 11372 or not ts <= 11373) and (not user_id = 1 or not user_id is not null or not ts >= 11374 or not ts <= 11375) and (not user_id = 1 or not user_id is not null or not ts >= 11376 or not ts <= 11377) and (not user_id = 1 or not user_id is not null or not ts >= 11378 or not ts <= 11379) and (not user_id = 1 or not user_id is not null or not ts >= 11380 or not ts <= 11381) and (not user_id = 1 or not user_id is not null or not ts >= 11382 or not ts <= 11383) and (not user_id = 1 or not user_id is not null or not ts >= 11384 or not ts <= 11385) and (not user_id = 1 or not user_id is not null or not ts >= 11386 or not ts <= 11387) and (not user_id = 1 or not user_id is not null or not ts >= 11388 or not ts <= 11389) and (not user_id = 1 or not user_id is not null or not ts >= 11390 or not ts <= 11391) and (not user_id = 1 or not user_id is not null or not ts >= 11392 or not ts <= 11393) and (not user_id = 1 or not user_id is not null or not ts >= 11394 or not ts <= 11395) and (not user_id = 1 or not user_id is not null or not ts >= 11396 or not ts <= 11397) and (not user_id = 1 or not user_id is not null or not ts >= 11398 or not ts <= 11399) and (not user_id = 1 or not user_id is not null or not ts >= 11400 or not ts <= 11401) and (not user_id = 1 or not user_id is not null or not ts >= 11402 or not ts <= 11403) and (not user_id = 1 or not user_id is not null or not ts >= 11404 or not ts <= 11405) and (not user_id = 1 or not user_id is not null or not ts >= 11406 or not ts <= 11407) and (not user_id = 1 or not user_id is not null or not ts >= 11408 or not ts <= 11409) and (not user_id = 1 or not user_id is not null or not ts >= 11410 or not ts <= 11411) and (not user_id = 1 or not user_id is not null or not ts >= 11412 or not ts <= 11413) and (not user_id = 1 or not user_id is not null or not ts >= 11414 or not ts <= 11415) and (not user_id = 1 or not user_id is not null or not ts >= 11416 or not ts <= 11417) and (not user_id = 1 or not user_id is not null or not ts >= 11418 or not ts <= 11419) and (not user_id = 1 or not user_id is not null or not ts >= 11420 or not ts <= 11421) and (not user_id = 1 or not user_id is not null or not ts >= 11422 or not ts <= 11423) and (not user_id = 1 or not user_id is not null or not ts >= 11424 or not ts <= 11425) and (not user_id = 1 or not user_id is not null or not ts >= 11426 or not ts <= 11427) and (not user_id = 1 or not user_id is not null or not ts >= 11428 or not ts <= 11429) and (not user_id = 1 or not user_id is not null or not ts >= 11430 or not ts <= 11431) and (not user_id = 1 or not user_id is not null or not ts >= 11432 or not ts <= 11433) and (not user_id = 1 or not user_id is not null or not ts >= 11434 or not ts <= 11435) and (not user_id = 1 or not user_id is not null or not ts >= 11436 or not ts <= 11437) and (not user_id = 1 or not user_id is not null or not ts >= 11438 or not ts <= 11439) and (not user_id = 1 or not user_id is not null or not ts >= 11440 or not ts <= 11441) and (not user_id = 1 or not user_id is not null or not ts >= 11442 or not ts <= 11443) and (not user_id = 1 or not user_id is not null or not ts >= 11444 or not ts <= 11445) and (not user_id = 1 or not user_id is not null or not ts >= 11446 or not ts <= 11447) and (not user_id = 1 or not user_id is not null or not ts >= 11448 or not ts <= 11449) and (not user_id = 1 or not user_id is not null or not ts >= 11450 or not ts <= 11451) and (not user_id = 1 or not user_id is not null or not ts >= 11452 or not ts <= 11453) and (not user_id = 1 or not user_id is not null or not ts >= 11454 or not ts <= 11455) and (not user_id = 1 or not user_id is not null or not ts >= 11456 or not ts <= 11457) and (not user_id = 1 or not user_id is not null or not ts >= 11458 or not ts <= 11459) and (not user_id = 1 or not user_id is not null or not ts >= 11460 or not ts <= 11461) and (not user_id = 1 or not user_id is not null or not ts >= 11462 or not ts <= 11463) and (not user_id = 1 or not user_id is not null or not ts >= 11464 or not ts <= 11465) and (not user_id = 1 or not user_id is not null or not ts >= 11466 or not ts <= 11467) and (not user_id = 1 or not user_id is not null or not ts >= 11468 or not ts <= 11469) and (not user_id = 1 or not user_id is not null or not ts >= 11470 or not ts <= 11471) and (not user_id = 1 or not user_id is not null or not ts >= 11472 or not ts <= 11473) and (not user_id = 1 or not user_id is not null or not ts >= 11474 or not ts <= 11475) and (not user_id = 1 or not user_id is not null or not ts >= 11476 or not ts <= 11477) and (not user_id = 1 or not user_id is not null or not ts >= 11478 or not ts <= 11479) and (not user_id = 1 or not user_id is not null or not ts >= 11480 or not ts <= 11481) and (not user_id = 1 or not user_id is not null or not ts >= 11482 or not ts <= 11483) and (not user_id = 1 or not user_id is not null or not ts >= 11484 or not ts <= 11485) and (not user_id = 1 or not user_id is not null or not ts >= 11486 or not ts <= 11487) and (not user_id = 1 or not user_id is not null or not ts >= 11488 or not ts <= 11489) and (not user_id = 1 or not user_id is not null or not ts >= 11490 or not ts <= 11491) and (not user_id = 1 or not user_id is not null or not ts >= 11492 or not ts <= 11493) and (not user_id = 1 or not user_id is not null or not ts >= 11494 or not ts <= 11495) and (not user_id = 1 or not user_id is not null or not ts >= 11496 or not ts <= 11497) and (not user_id = 1 or not user_id is not null or not ts >= 11498 or not ts <= 11499) and (not user_id = 1 or not user_id is not null or not ts >= 11500 or not ts <= 11501) and (not user_id = 1 or not user_id is not null or not ts >= 11502 or not ts <= 11503) and (not user_id = 1 or not user_id is not null or not ts >= 11504 or not ts <= 11505) and (not user_id = 1 or not user_id is not null or not ts >= 11506 or not ts <= 11507) and (not user_id = 1 or not user_id is not null or not ts >= 11508 or not ts <= 11509) and (not user_id = 1 or not user_id is not null or not ts >= 11510 or not ts <= 11511) and (not user_id = 1 or not user_id is not null or not ts >= 11512 or not ts <= 11513) and (not user_id = 1 or not user_id is not null or not ts >= 11514 or not ts <= 11515) and (not user_id = 1 or not user_id is not null or not ts >= 11516 or not ts <= 11517) and (not user_id = 1 or not user_id is not null or not ts >= 11518 or not ts <= 11519) and (not user_id = 1 or not user_id is not null or not ts >= 11520 or not ts <= 11521) and (not user_id = 1 or not user_id is not null or not ts >= 11522 or not ts <= 11523) and (not user_id = 1 or not user_id is not null or not ts >= 11524 or not ts <= 11525) and (not user_id = 1 or not user_id is not null or not ts >= 11526 or not ts <= 11527) and (not user_id = 1 or not user_id is not null or not ts >= 11528 or not ts <= 11529) and (not user_id = 1 or not user_id is not null or not ts >= 11530 or not ts <= 11531) and (not user_id = 1 or not user_id is not null or not ts >= 11532 or not ts <= 11533) and (not user_id = 1 or not user_id is not null or not ts >= 11534 or not ts <= 11535) and (not user_id = 1 or not user_id is not null or not ts >= 11536 or not ts <= 11537) and (not user_id = 1 or not user_id is not null or not ts >= 11538 or not ts <= 11539) and (not user_id = 1 or not user_id is not null or not ts >= 11540 or not ts <= 11541) and (not user_id = 1 or not user_id is not null or not ts >= 11542 or not ts <= 11543) and (not user_id = 1 or not user_id is not null or not ts >= 11544 or not ts <= 11545) and (not user_id = 1 or not user_id is not null or not ts >= 11546 or not ts <= 11547) and (not user_id = 1 or not user_id is not null or not ts >= 11548 or not ts <= 11549) and (not user_id = 1 or not user_id is not null or not ts >= 11550 or not ts <= 11551) and (not user_id = 1 or not user_id is not null or not ts >= 11552 or not ts <= 11553) and (not user_id = 1 or not user_id is not null or not ts >= 11554 or not ts <= 11555) and (not user_id = 1 or not user_id is not null or not ts >= 11556 or not ts <= 11557) and (not user_id = 1 or not user_id is not null or not ts >= 11558 or not ts <= 11559) and (not user_id = 1 or not user_id is not null or not ts >= 11560 or not ts <= 11561) and (not user_id = 1 or not user_id is not null or not ts >= 11562 or not ts <= 11563) and (not user_id = 1 or not user_id is not null or not ts >= 11564 or not ts <= 11565) and (not user_id = 1 or not user_id is not null or not ts >= 11566 or not ts <= 11567) and (not user_id = 1 or not user_id is not null or not ts >= 11568 or not ts <= 11569) and (not user_id = 1 or not user_id is not null or not ts >= 11570 or not ts <= 11571) and (not user_id = 1 or not user_id is not null or not ts >= 11572 or not ts <= 11573) and (not user_id = 1 or not user_id is not null or not ts >= 11574 or not ts <= 11575) and (not user_id = 1 or not user_id is not null or not ts >= 11576 or not ts <= 11577) and (not user_id = 1 or not user_id is not null or not ts >= 11578 or not ts <= 11579) and (not user_id = 1 or not user_id is not null or not ts >= 11580 or not ts <= 11581) and (not user_id = 1 or not user_id is not null or not ts >= 11582 or not ts <= 11583) and (not user_id = 1 or not user_id is not null or not ts >= 11584 or not ts <= 11585) and (not user_id = 1 or not user_id is not null or not ts >= 11586 or not ts <= 11587) and (not user_id = 1 or not user_id is not null or not ts >= 11588 or not ts <= 11589) and (not user_id = 1 or not user_id is not null or not ts >= 11590 or not ts <= 11591) and (not user_id = 1 or not user_id is not null or not ts >= 11592 or not ts <= 11593) and (not user_id = 1 or not user_id is not null or not ts >= 11594 or not ts <= 11595) and (not user_id = 1 or not user_id is not null or not ts >= 11596 or not ts <= 11597) and (not user_id = 1 or not user_id is not null or not ts >= 11598 or not ts <= 11599) and (not user_id = 1 or not user_id is not null or not ts >= 11600 or not ts <= 11601) and (not user_id = 1 or not user_id is not null or not ts >= 11602 or not ts <= 11603) and (not user_id = 1 or not user_id is not null or not ts >= 11604 or not ts <= 11605) and (not user_id = 1 or not user_id is not null or not ts >= 11606 or not ts <= 11607) and (not user_id = 1 or not user_id is not null or not ts >= 11608 or not ts <= 11609) and (not user_id = 1 or not user_id is not null or not ts >= 11610 or not ts <= 11611) and (not user_id = 1 or not user_id is not null or not ts >= 11612 or not ts <= 11613) and (not user_id = 1 or not user_id is not null or not ts >= 11614 or not ts <= 11615) and (not user_id = 1 or not user_id is not null or not ts >= 11616 or not ts <= 11617) and (not user_id = 1 or not user_id is not null or not ts >= 11618 or not ts <= 11619) and (not user_id = 1 or not user_id is not null or not ts >= 11620 or not ts <= 11621) and (not user_id = 1 or not user_id is not null or not ts >= 11622 or not ts <= 11623) and (not user_id = 1 or not user_id is not null or not ts >= 11624 or not ts <= 11625) and (not user_id = 1 or not user_id is not null or not ts >= 11626 or not ts <= 11627) and (not user_id = 1 or not user_id is not null or not ts >= 11628 or not ts <= 11629) and (not user_id = 1 or not user_id is not null or not ts >= 11630 or not ts <= 11631) and (not user_id = 1 or not user_id is not null or not ts >= 11632 or not ts <= 11633) and (not user_id = 1 or not user_id is not null or not ts >= 11634 or not ts <= 11635) and (not user_id = 1 or not user_id is not null or not ts >= 11636 or not ts <= 11637) and (not user_id = 1 or not user_id is not null or not ts >= 11638 or not ts <= 11639) and (not user_id = 1 or not user_id is not null or not ts >= 11640 or not ts <= 11641) and (not user_id = 1 or not user_id is not null or not ts >= 11642 or not ts <= 11643) and (not user_id = 1 or not user_id is not null or not ts >= 11644 or not ts <= 11645) and (not user_id = 1 or not user_id is not null or not ts >= 11646 or not ts <= 11647) and (not user_id = 1 or not user_id is not null or not ts >= 11648 or not ts <= 11649) and (not user_id = 1 or not user_id is not null or not ts >= 11650 or not ts <= 11651) and (not user_id = 1 or not user_id is not null or not ts >= 11652 or not ts <= 11653) and (not user_id = 1 or not user_id is not null or not ts >= 11654 or not ts <= 11655) and (not user_id = 1 or not user_id is not null or not ts >= 11656 or not ts <= 11657) and (not user_id = 1 or not user_id is not null or not ts >= 11658 or not ts <= 11659) and (not user_id = 1 or not user_id is not null or not ts >= 11660 or not ts <= 11661) and (not user_id = 1 or not user_id is not null or not ts >= 11662 or not ts <= 11663) and (not user_id = 1 or not user_id is not null or not ts >= 11664 or not ts <= 11665) and (not user_id = 1 or not user_id is not null or not ts >= 11666 or not ts <= 11667) and (not user_id = 1 or not user_id is not null or not ts >= 11668 or not ts <= 11669) and (not user_id = 1 or not user_id is not null or not ts >= 11670 or not ts <= 11671) and (not user_id = 1 or not user_id is not null or not ts >= 11672 or not ts <= 11673) and (not user_id = 1 or not user_id is not null or not ts >= 11674 or not ts <= 11675) and (not user_id = 1 or not user_id is not null or not ts >= 11676 or not ts <= 11677) and (not user_id = 1 or not user_id is not null or not ts >= 11678 or not ts <= 11679) and (not user_id = 1 or not user_id is not null or not ts >= 11680 or not ts <= 11681) and (not user_id = 1 or not user_id is not null or not ts >= 11682 or not ts <= 11683) and (not user_id = 1 or not user_id is not null or not ts >= 11684 or not ts <= 11685) and (not user_id = 1 or not user_id is not null or not ts >= 11686 or not ts <= 11687) and (not user_id = 1 or not user_id is not null or not ts >= 11688 or not ts <= 11689) and (not user_id = 1 or not user_id is not null or not ts >= 11690 or not ts <= 11691) and (not user_id = 1 or not user_id is not null or not ts >= 11692 or not ts <= 11693) and (not user_id = 1 or not user_id is not null or not ts >= 11694 or not ts <= 11695) and (not user_id = 1 or not user_id is not null or not ts >= 11696 or not ts <= 11697) and (not user_id = 1 or not user_id is not null or not ts >= 11698 or not ts <= 11699) and (not user_id = 1 or not user_id is not null or not ts >= 11700 or not ts <= 11701) and (not user_id = 1 or not user_id is not null or not ts >= 11702 or not ts <= 11703) and (not user_id = 1 or not user_id is not null or not ts >= 11704 or not ts <= 11705) and (not user_id = 1 or not user_id is not null or not ts >= 11706 or not ts <= 11707) and (not user_id = 1 or not user_id is not null or not ts >= 11708 or not ts <= 11709) and (not user_id = 1 or not user_id is not null or not ts >= 11710 or not ts <= 11711) and (not user_id = 1 or not user_id is not null or not ts >= 11712 or not ts <= 11713) and (not user_id = 1 or not user_id is not null or not ts >= 11714 or not ts <= 11715) and (not user_id = 1 or not user_id is not null or not ts >= 11716 or not ts <= 11717) and (not user_id = 1 or not user_id is not null or not ts >= 11718 or not ts <= 11719) and (not user_id = 1 or not user_id is not null or not ts >= 11720 or not ts <= 11721) and (not user_id = 1 or not user_id is not null or not ts >= 11722 or not ts <= 11723) and (not user_id = 1 or not user_id is not null or not ts >= 11724 or not ts <= 11725) and (not user_id = 1 or not user_id is not null or not ts >= 11726 or not ts <= 11727) and (not user_id = 1 or not user_id is not null or not ts >= 11728 or not ts <= 11729) and (not user_id = 1 or not user_id is not null or not ts >= 11730 or not ts <= 11731) and (not user_id = 1 or not user_id is not null or not ts >= 11732 or not ts <= 11733) and (not user_id = 1 or not user_id is not null or not ts >= 11734 or not ts <= 11735) and (not user_id = 1 or not user_id is not null or not ts >= 11736 or not ts <= 11737) and (not user_id = 1 or not user_id is not null or not ts >= 11738 or not ts <= 11739) and (not user_id = 1 or not user_id is not null or not ts >= 11740 or not ts <= 11741) and (not user_id = 1 or not user_id is not null or not ts >= 11742 or not ts <= 11743) and (not user_id = 1 or not user_id is not null or not ts >= 11744 or not ts <= 11745) and (not user_id = 1 or not user_id is not null or not ts >= 11746 or not ts <= 11747) and (not user_id = 1 or not user_id is not null or not ts >= 11748 or not ts <= 11749) and (not user_id = 1 or not user_id is not null or not ts >= 11750 or not ts <= 11751) and (not user_id = 1 or not user_id is not null or not ts >= 11752 or not ts <= 11753) and (not user_id = 1 or not user_id is not null or not ts >= 11754 or not ts <= 11755) and (not user_id = 1 or not user_id is not null or not ts >= 11756 or not ts <= 11757) and (not user_id = 1 or not user_id is not null or not ts >= 11758 or not ts <= 11759) and (not user_id = 1 or not user_id is not null or not ts >= 11760 or not ts <= 11761) and (not user_id = 1 or not user_id is not null or not ts >= 11762 or not ts <= 11763) and (not user_id = 1 or not user_id is not null or not ts >= 11764 or not ts <= 11765) and (not user_id = 1 or not user_id is not null or not ts >= 11766 or not ts <= 11767) and (not user_id = 1 or not user_id is not null or not ts >= 11768 or not ts <= 11769) and (not user_id = 1 or not user_id is not null or not ts >= 11770 or not ts <= 11771) and (not user_id = 1 or not user_id is not null or not ts >= 11772 or not ts <= 11773) and (not user_id = 1 or not user_id is not null or not ts >= 11774 or not ts <= 11775) and (not user_id = 1 or not user_id is not null or not ts >= 11776 or not ts <= 11777) and (not user_id = 1 or not user_id is not null or not ts >= 11778 or not ts <= 11779) and (not user_id = 1 or not user_id is not null or not ts >= 11780 or not ts <= 11781) and (not user_id = 1 or not user_id is not null or not ts >= 11782 or not ts <= 11783) and (not user_id = 1 or not user_id is not null or not ts >= 11784 or not ts <= 11785) and (not user_id = 1 or not user_id is not null or not ts >= 11786 or not ts <= 11787) and (not user_id = 1 or not user_id is not null or not ts >= 11788 or not ts <= 11789) and (not user_id = 1 or not user_id is not null or not ts >= 11790 or not ts <= 11791) and (not user_id = 1 or not user_id is not null or not ts >= 11792 or not ts <= 11793) and (not user_id = 1 or not user_id is not null or not ts >= 11794 or not ts <= 11795) and (not user_id = 1 or not user_id is not null or not ts >= 11796 or not ts <= 11797) and (not user_id = 1 or not user_id is not null or not ts >= 11798 or not ts <= 11799) and (not user_id = 1 or not user_id is not null or not ts >= 11800 or not ts <= 11801) and (not user_id = 1 or not user_id is not null or not ts >= 11802 or not ts <= 11803) and (not user_id = 1 or not user_id is not null or not ts >= 11804 or not ts <= 11805) and (not user_id = 1 or not user_id is not null or not ts >= 11806 or not ts <= 11807) and (not user_id = 1 or not user_id is not null or not ts >= 11808 or not ts <= 11809) and (not user_id = 1 or not user_id is not null or not ts >= 11810 or not ts <= 11811) and (not user_id = 1 or not user_id is not null or not ts >= 11812 or not ts <= 11813) and (not user_id = 1 or not user_id is not null or not ts >= 11814 or not ts <= 11815) and (not user_id = 1 or not user_id is not null or not ts >= 11816 or not ts <= 11817) and (not user_id = 1 or not user_id is not null or not ts >= 11818 or not ts <= 11819) and (not user_id = 1 or not user_id is not null or not ts >= 11820 or not ts <= 11821) and (not user_id = 1 or not user_id is not null or not ts >= 11822 or not ts <= 11823) and (not user_id = 1 or not user_id is not null or not ts >= 11824 or not ts <= 11825) and (not user_id = 1 or not user_id is not null or not ts >= 11826 or not ts <= 11827) and (not user_id = 1 or not user_id is not null or not ts >= 11828 or not ts <= 11829) and (not user_id = 1 or not user_id is not null or not ts >= 11830 or not ts <= 11831) and (not user_id = 1 or not user_id is not null or not ts >= 11832 or not ts <= 11833) and (not user_id = 1 or not user_id is not null or not ts >= 11834 or not ts <= 11835) and (not user_id = 1 or not user_id is not null or not ts >= 11836 or not ts <= 11837) and (not user_id = 1 or not user_id is not null or not ts >= 11838 or not ts <= 11839) and (not user_id = 1 or not user_id is not null or not ts >= 11840 or not ts <= 11841) and (not user_id = 1 or not user_id is not null or not ts >= 11842 or not ts <= 11843) and (not user_id = 1 or not user_id is not null or not ts >= 11844 or not ts <= 11845) and (not user_id = 1 or not user_id is not null or not ts >= 11846 or not ts <= 11847) and (not user_id = 1 or not user_id is not null or not ts >= 11848 or not ts <= 11849) and (not user_id = 1 or not user_id is not null or not ts >= 11850 or not ts <= 11851) and (not user_id = 1 or not user_id is not null or not ts >= 11852 or not ts <= 11853) and (not user_id = 1 or not user_id is not null or not ts >= 11854 or not ts <= 11855) and (not user_id = 1 or not user_id is not null or not ts >= 11856 or not ts <= 11857) and (not user_id = 1 or not user_id is not null or not ts >= 11858 or not ts <= 11859) and (not user_id = 1 or not user_id is not null or not ts >= 11860 or not ts <= 11861) and (not user_id = 1 or not user_id is not null or not ts >= 11862 or not ts <= 11863) and (not user_id = 1 or not user_id is not null or not ts >= 11864 or not ts <= 11865) and (not user_id = 1 or not user_id is not null or not ts >= 11866 or not ts <= 11867) and (not user_id = 1 or not user_id is not null or not ts >= 11868 or not ts <= 11869) and (not user_id = 1 or not user_id is not null or not ts >= 11870 or not ts <= 11871) and (not user_id = 1 or not user_id is not null or not ts >= 11872 or not ts <= 11873) and (not user_id = 1 or not user_id is not null or not ts >= 11874 or not ts <= 11875) and (not user_id = 1 or not user_id is not null or not ts >= 11876 or not ts <= 11877) and (not user_id = 1 or not user_id is not null or not ts >= 11878 or not ts <= 11879) and (not user_id = 1 or not user_id is not null or not ts >= 11880 or not ts <= 11881) and (not user_id = 1 or not user_id is not null or not ts >= 11882 or not ts <= 11883) and (not user_id = 1 or not user_id is not null or not ts >= 11884 or not ts <= 11885) and (not user_id = 1 or not user_id is not null or not ts >= 11886 or not ts <= 11887) and (not user_id = 1 or not user_id is not null or not ts >= 11888 or not ts <= 11889) and (not user_id = 1 or not user_id is not null or not ts >= 11890 or not ts <= 11891) and (not user_id = 1 or not user_id is not null or not ts >= 11892 or not ts <= 11893) and (not user_id = 1 or not user_id is not null or not ts >= 11894 or not ts <= 11895) and (not user_id = 1 or not user_id is not null or not ts >= 11896 or not ts <= 11897) and (not user_id = 1 or not user_id is not null or not ts >= 11898 or not ts <= 11899) and (not user_id = 1 or not user_id is not null or not ts >= 11900 or not ts <= 11901) and (not user_id = 1 or not user_id is not null or not ts >= 11902 or not ts <= 11903) and (not user_id = 1 or not user_id is not null or not ts >= 11904 or not ts <= 11905) and (not user_id = 1 or not user_id is not null or not ts >= 11906 or not ts <= 11907) and (not user_id = 1 or not user_id is not null or not ts >= 11908 or not ts <= 11909) and (not user_id = 1 or not user_id is not null or not ts >= 11910 or not ts <= 11911) and (not user_id = 1 or not user_id is not null or not ts >= 11912 or not ts <= 11913) and (not user_id = 1 or not user_id is not null or not ts >= 11914 or not ts <= 11915) and (not user_id = 1 or not user_id is not null or not ts >= 11916 or not ts <= 11917) and (not user_id = 1 or not user_id is not null or not ts >= 11918 or not ts <= 11919) and (not user_id = 1 or not user_id is not null or not ts >= 11920 or not ts <= 11921) and (not user_id = 1 or not user_id is not null or not ts >= 11922 or not ts <= 11923) and (not user_id = 1 or not user_id is not null or not ts >= 11924 or not ts <= 11925) and (not user_id = 1 or not user_id is not null or not ts >= 11926 or not ts <= 11927) and (not user_id = 1 or not user_id is not null or not ts >= 11928 or not ts <= 11929) and (not user_id = 1 or not user_id is not null or not ts >= 11930 or not ts <= 11931) and (not user_id = 1 or not user_id is not null or not ts >= 11932 or not ts <= 11933) and (not user_id = 1 or not user_id is not null or not ts >= 11934 or not ts <= 11935) and (not user_id = 1 or not user_id is not null or not ts >= 11936 or not ts <= 11937) and (not user_id = 1 or not user_id is not null or not ts >= 11938 or not ts <= 11939) and (not user_id = 1 or not user_id is not null or not ts >= 11940 or not ts <= 11941) and (not user_id = 1 or not user_id is not null or not ts >= 11942 or not ts <= 11943) and (not user_id = 1 or not user_id is not null or not ts >= 11944 or not ts <= 11945) and (not user_id = 1 or not user_id is not null or not ts >= 11946 or not ts <= 11947) and (not user_id = 1 or not user_id is not null or not ts >= 11948 or not ts <= 11949) and (not user_id = 1 or not user_id is not null or not ts >= 11950 or not ts <= 11951) and (not user_id = 1 or not user_id is not null or not ts >= 11952 or not ts <= 11953) and (not user_id = 1 or not user_id is not null or not ts >= 11954 or not ts <= 11955) and (not user_id = 1 or not user_id is not null or not ts >= 11956 or not ts <= 11957) and (not user_id = 1 or not user_id is not null or not ts >= 11958 or not ts <= 11959) and (not user_id = 1 or not user_id is not null or not ts >= 11960 or not ts <= 11961) and (not user_id = 1 or not user_id is not null or not ts >= 11962 or not ts <= 11963) and (not user_id = 1 or not user_id is not null or not ts >= 11964 or not ts <= 11965) and (not user_id = 1 or not user_id is not null or not ts >= 11966 or not ts <= 11967) and (not user_id = 1 or not user_id is not null or not ts >= 11968 or not ts <= 11969) and (not user_id = 1 or not user_id is not null or not ts >= 11970 or not ts <= 11971) and (not user_id = 1 or not user_id is not null or not ts >= 11972 or not ts <= 11973) and (not user_id = 1 or not user_id is not null or not ts >= 11974 or not ts <= 11975) and (not user_id = 1 or not user_id is not null or not ts >= 11976 or not ts <= 11977) and (not user_id = 1 or not user_id is not null or not ts >= 11978 or not ts <= 11979) and (not user_id = 1 or not user_id is not null or not ts >= 11980 or not ts <= 11981) and (not user_id = 1 or not user_id is not null or not ts >= 11982 or not ts <= 11983) and (not user_id = 1 or not user_id is not null or not ts >= 11984 or not ts <= 11985) and (not user_id = 1 or not user_id is not null or not ts >= 11986 or not ts <= 11987) and (not user_id = 1 or not user_id is not null or not ts >= 11988 or not ts <= 11989) and (not user_id = 1 or not user_id is not null or not ts >= 11990 or not ts <= 11991) and (not user_id = 1 or not user_id is not null or not ts >= 11992 or not ts <= 11993) and ts >= 113898 and parent_id = 1 order by ts asc limit :__upper_limit", + "Query": "select 1, ts, weight_string(ts) from `user` where shard_key = 1 and is_removed = 1 and cmd in ('A', 'B', 'C') and (not user_id = 1 or not user_id is not null or not ts >= 1 or not ts <= 2) and (not user_id = 1 or not user_id is not null or not ts >= 12 or not ts <= 13) and (not user_id = 1 or not user_id is not null or not ts >= 14 or not ts <= 15) and (not user_id = 1 or not user_id is not null or not ts >= 16 or not ts <= 17) and (not user_id = 1 or not user_id is not null or not ts >= 18 or not ts <= 19) and (not user_id = 1 or not user_id is not null or not ts >= 110 or not ts <= 111) and (not user_id = 1 or not user_id is not null or not ts >= 112 or not ts <= 113) and (not user_id = 1 or not user_id is not null or not ts >= 114 or not ts <= 115) and (not user_id = 1 or not user_id is not null or not ts >= 116 or not ts <= 117) and (not user_id = 1 or not user_id is not null or not ts >= 118 or not ts <= 119) and (not user_id = 1 or not user_id is not null or not ts >= 120 or not ts <= 121) and (not user_id = 1 or not user_id is not null or not ts >= 122 or not ts <= 123) and (not user_id = 1 or not user_id is not null or not ts >= 124 or not ts <= 125) and (not user_id = 1 or not user_id is not null or not ts >= 126 or not ts <= 127) and (not user_id = 1 or not user_id is not null or not ts >= 128 or not ts <= 129) and (not user_id = 1 or not user_id is not null or not ts >= 130 or not ts <= 131) and (not user_id = 1 or not user_id is not null or not ts >= 132 or not ts <= 133) and (not user_id = 1 or not user_id is not null or not ts >= 134 or not ts <= 135) and (not user_id = 1 or not user_id is not null or not ts >= 136 or not ts <= 137) and (not user_id = 1 or not user_id is not null or not ts >= 138 or not ts <= 139) and (not user_id = 1 or not user_id is not null or not ts >= 140 or not ts <= 141) and (not user_id = 1 or not user_id is not null or not ts >= 142 or not ts <= 143) and (not user_id = 1 or not user_id is not null or not ts >= 144 or not ts <= 145) and (not user_id = 1 or not user_id is not null or not ts >= 146 or not ts <= 147) and (not user_id = 1 or not user_id is not null or not ts >= 148 or not ts <= 149) and (not user_id = 1 or not user_id is not null or not ts >= 150 or not ts <= 151) and (not user_id = 1 or not user_id is not null or not ts >= 152 or not ts <= 153) and (not user_id = 1 or not user_id is not null or not ts >= 154 or not ts <= 155) and (not user_id = 1 or not user_id is not null or not ts >= 156 or not ts <= 157) and (not user_id = 1 or not user_id is not null or not ts >= 158 or not ts <= 159) and (not user_id = 1 or not user_id is not null or not ts >= 160 or not ts <= 161) and (not user_id = 1 or not user_id is not null or not ts >= 162 or not ts <= 163) and (not user_id = 1 or not user_id is not null or not ts >= 164 or not ts <= 165) and (not user_id = 1 or not user_id is not null or not ts >= 166 or not ts <= 167) and (not user_id = 1 or not user_id is not null or not ts >= 168 or not ts <= 169) and (not user_id = 1 or not user_id is not null or not ts >= 170 or not ts <= 171) and (not user_id = 1 or not user_id is not null or not ts >= 172 or not ts <= 173) and (not user_id = 1 or not user_id is not null or not ts >= 174 or not ts <= 175) and (not user_id = 1 or not user_id is not null or not ts >= 176 or not ts <= 177) and (not user_id = 1 or not user_id is not null or not ts >= 178 or not ts <= 179) and (not user_id = 1 or not user_id is not null or not ts >= 180 or not ts <= 181) and (not user_id = 1 or not user_id is not null or not ts >= 182 or not ts <= 183) and (not user_id = 1 or not user_id is not null or not ts >= 184 or not ts <= 185) and (not user_id = 1 or not user_id is not null or not ts >= 186 or not ts <= 187) and (not user_id = 1 or not user_id is not null or not ts >= 188 or not ts <= 189) and (not user_id = 1 or not user_id is not null or not ts >= 190 or not ts <= 191) and (not user_id = 1 or not user_id is not null or not ts >= 192 or not ts <= 193) and (not user_id = 1 or not user_id is not null or not ts >= 194 or not ts <= 195) and (not user_id = 1 or not user_id is not null or not ts >= 196 or not ts <= 197) and (not user_id = 1 or not user_id is not null or not ts >= 198 or not ts <= 199) and (not user_id = 1 or not user_id is not null or not ts >= 1100 or not ts <= 1101) and (not user_id = 1 or not user_id is not null or not ts >= 1102 or not ts <= 1103) and (not user_id = 1 or not user_id is not null or not ts >= 1104 or not ts <= 1105) and (not user_id = 1 or not user_id is not null or not ts >= 1106 or not ts <= 1107) and (not user_id = 1 or not user_id is not null or not ts >= 1108 or not ts <= 1109) and (not user_id = 1 or not user_id is not null or not ts >= 1110 or not ts <= 1111) and (not user_id = 1 or not user_id is not null or not ts >= 1112 or not ts <= 1113) and (not user_id = 1 or not user_id is not null or not ts >= 1114 or not ts <= 1115) and (not user_id = 1 or not user_id is not null or not ts >= 1116 or not ts <= 1117) and (not user_id = 1 or not user_id is not null or not ts >= 1118 or not ts <= 1119) and (not user_id = 1 or not user_id is not null or not ts >= 1120 or not ts <= 1121) and (not user_id = 1 or not user_id is not null or not ts >= 1122 or not ts <= 1123) and (not user_id = 1 or not user_id is not null or not ts >= 1124 or not ts <= 1125) and (not user_id = 1 or not user_id is not null or not ts >= 1126 or not ts <= 1127) and (not user_id = 1 or not user_id is not null or not ts >= 1128 or not ts <= 1129) and (not user_id = 1 or not user_id is not null or not ts >= 1130 or not ts <= 1131) and (not user_id = 1 or not user_id is not null or not ts >= 1132 or not ts <= 1133) and (not user_id = 1 or not user_id is not null or not ts >= 1134 or not ts <= 1135) and (not user_id = 1 or not user_id is not null or not ts >= 1136 or not ts <= 1137) and (not user_id = 1 or not user_id is not null or not ts >= 1138 or not ts <= 1139) and (not user_id = 1 or not user_id is not null or not ts >= 1140 or not ts <= 1141) and (not user_id = 1 or not user_id is not null or not ts >= 1142 or not ts <= 1143) and (not user_id = 1 or not user_id is not null or not ts >= 1144 or not ts <= 1145) and (not user_id = 1 or not user_id is not null or not ts >= 1146 or not ts <= 1147) and (not user_id = 1 or not user_id is not null or not ts >= 1148 or not ts <= 1149) and (not user_id = 1 or not user_id is not null or not ts >= 1150 or not ts <= 1151) and (not user_id = 1 or not user_id is not null or not ts >= 1152 or not ts <= 1153) and (not user_id = 1 or not user_id is not null or not ts >= 1154 or not ts <= 1155) and (not user_id = 1 or not user_id is not null or not ts >= 1156 or not ts <= 1157) and (not user_id = 1 or not user_id is not null or not ts >= 1158 or not ts <= 1159) and (not user_id = 1 or not user_id is not null or not ts >= 1160 or not ts <= 1161) and (not user_id = 1 or not user_id is not null or not ts >= 1162 or not ts <= 1163) and (not user_id = 1 or not user_id is not null or not ts >= 1164 or not ts <= 1165) and (not user_id = 1 or not user_id is not null or not ts >= 1166 or not ts <= 1167) and (not user_id = 1 or not user_id is not null or not ts >= 1168 or not ts <= 1169) and (not user_id = 1 or not user_id is not null or not ts >= 1170 or not ts <= 1171) and (not user_id = 1 or not user_id is not null or not ts >= 1172 or not ts <= 1173) and (not user_id = 1 or not user_id is not null or not ts >= 1174 or not ts <= 1175) and (not user_id = 1 or not user_id is not null or not ts >= 1176 or not ts <= 1177) and (not user_id = 1 or not user_id is not null or not ts >= 1178 or not ts <= 1179) and (not user_id = 1 or not user_id is not null or not ts >= 1180 or not ts <= 1181) and (not user_id = 1 or not user_id is not null or not ts >= 1182 or not ts <= 1183) and (not user_id = 1 or not user_id is not null or not ts >= 1184 or not ts <= 1185) and (not user_id = 1 or not user_id is not null or not ts >= 1186 or not ts <= 1187) and (not user_id = 1 or not user_id is not null or not ts >= 1188 or not ts <= 1189) and (not user_id = 1 or not user_id is not null or not ts >= 1190 or not ts <= 1191) and (not user_id = 1 or not user_id is not null or not ts >= 1192 or not ts <= 1193) and (not user_id = 1 or not user_id is not null or not ts >= 1194 or not ts <= 1195) and (not user_id = 1 or not user_id is not null or not ts >= 1196 or not ts <= 1197) and (not user_id = 1 or not user_id is not null or not ts >= 1198 or not ts <= 1199) and (not user_id = 1 or not user_id is not null or not ts >= 1200 or not ts <= 1201) and (not user_id = 1 or not user_id is not null or not ts >= 1202 or not ts <= 1203) and (not user_id = 1 or not user_id is not null or not ts >= 1204 or not ts <= 1205) and (not user_id = 1 or not user_id is not null or not ts >= 1206 or not ts <= 1207) and (not user_id = 1 or not user_id is not null or not ts >= 1208 or not ts <= 1209) and (not user_id = 1 or not user_id is not null or not ts >= 1210 or not ts <= 1211) and (not user_id = 1 or not user_id is not null or not ts >= 1212 or not ts <= 1213) and (not user_id = 1 or not user_id is not null or not ts >= 1214 or not ts <= 1215) and (not user_id = 1 or not user_id is not null or not ts >= 1216 or not ts <= 1217) and (not user_id = 1 or not user_id is not null or not ts >= 1218 or not ts <= 1219) and (not user_id = 1 or not user_id is not null or not ts >= 1220 or not ts <= 1221) and (not user_id = 1 or not user_id is not null or not ts >= 1222 or not ts <= 1223) and (not user_id = 1 or not user_id is not null or not ts >= 1224 or not ts <= 1225) and (not user_id = 1 or not user_id is not null or not ts >= 1226 or not ts <= 1227) and (not user_id = 1 or not user_id is not null or not ts >= 1228 or not ts <= 1229) and (not user_id = 1 or not user_id is not null or not ts >= 1230 or not ts <= 1231) and (not user_id = 1 or not user_id is not null or not ts >= 1232 or not ts <= 1233) and (not user_id = 1 or not user_id is not null or not ts >= 1234 or not ts <= 1235) and (not user_id = 1 or not user_id is not null or not ts >= 1236 or not ts <= 1237) and (not user_id = 1 or not user_id is not null or not ts >= 1238 or not ts <= 1239) and (not user_id = 1 or not user_id is not null or not ts >= 1240 or not ts <= 1241) and (not user_id = 1 or not user_id is not null or not ts >= 1242 or not ts <= 1243) and (not user_id = 1 or not user_id is not null or not ts >= 1244 or not ts <= 1245) and (not user_id = 1 or not user_id is not null or not ts >= 1246 or not ts <= 1247) and (not user_id = 1 or not user_id is not null or not ts >= 1248 or not ts <= 1249) and (not user_id = 1 or not user_id is not null or not ts >= 1250 or not ts <= 1251) and (not user_id = 1 or not user_id is not null or not ts >= 1252 or not ts <= 1253) and (not user_id = 1 or not user_id is not null or not ts >= 1254 or not ts <= 1255) and (not user_id = 1 or not user_id is not null or not ts >= 1256 or not ts <= 1257) and (not user_id = 1 or not user_id is not null or not ts >= 1258 or not ts <= 1259) and (not user_id = 1 or not user_id is not null or not ts >= 1260 or not ts <= 1261) and (not user_id = 1 or not user_id is not null or not ts >= 1262 or not ts <= 1263) and (not user_id = 1 or not user_id is not null or not ts >= 1264 or not ts <= 1265) and (not user_id = 1 or not user_id is not null or not ts >= 1266 or not ts <= 1267) and (not user_id = 1 or not user_id is not null or not ts >= 1268 or not ts <= 1269) and (not user_id = 1 or not user_id is not null or not ts >= 1270 or not ts <= 1271) and (not user_id = 1 or not user_id is not null or not ts >= 1272 or not ts <= 1273) and (not user_id = 1 or not user_id is not null or not ts >= 1274 or not ts <= 1275) and (not user_id = 1 or not user_id is not null or not ts >= 1276 or not ts <= 1277) and (not user_id = 1 or not user_id is not null or not ts >= 1278 or not ts <= 1279) and (not user_id = 1 or not user_id is not null or not ts >= 1280 or not ts <= 1281) and (not user_id = 1 or not user_id is not null or not ts >= 1282 or not ts <= 1283) and (not user_id = 1 or not user_id is not null or not ts >= 1284 or not ts <= 1285) and (not user_id = 1 or not user_id is not null or not ts >= 1286 or not ts <= 1287) and (not user_id = 1 or not user_id is not null or not ts >= 1288 or not ts <= 1289) and (not user_id = 1 or not user_id is not null or not ts >= 1290 or not ts <= 1291) and (not user_id = 1 or not user_id is not null or not ts >= 1292 or not ts <= 1293) and (not user_id = 1 or not user_id is not null or not ts >= 1294 or not ts <= 1295) and (not user_id = 1 or not user_id is not null or not ts >= 1296 or not ts <= 1297) and (not user_id = 1 or not user_id is not null or not ts >= 1298 or not ts <= 1299) and (not user_id = 1 or not user_id is not null or not ts >= 1300 or not ts <= 1301) and (not user_id = 1 or not user_id is not null or not ts >= 1302 or not ts <= 1303) and (not user_id = 1 or not user_id is not null or not ts >= 1304 or not ts <= 1305) and (not user_id = 1 or not user_id is not null or not ts >= 1306 or not ts <= 1307) and (not user_id = 1 or not user_id is not null or not ts >= 1308 or not ts <= 1309) and (not user_id = 1 or not user_id is not null or not ts >= 1310 or not ts <= 1311) and (not user_id = 1 or not user_id is not null or not ts >= 1312 or not ts <= 1313) and (not user_id = 1 or not user_id is not null or not ts >= 1314 or not ts <= 1315) and (not user_id = 1 or not user_id is not null or not ts >= 1316 or not ts <= 1317) and (not user_id = 1 or not user_id is not null or not ts >= 1318 or not ts <= 1319) and (not user_id = 1 or not user_id is not null or not ts >= 1320 or not ts <= 1321) and (not user_id = 1 or not user_id is not null or not ts >= 1322 or not ts <= 1323) and (not user_id = 1 or not user_id is not null or not ts >= 1324 or not ts <= 1325) and (not user_id = 1 or not user_id is not null or not ts >= 1326 or not ts <= 1327) and (not user_id = 1 or not user_id is not null or not ts >= 1328 or not ts <= 1329) and (not user_id = 1 or not user_id is not null or not ts >= 1330 or not ts <= 1331) and (not user_id = 1 or not user_id is not null or not ts >= 1332 or not ts <= 1333) and (not user_id = 1 or not user_id is not null or not ts >= 1334 or not ts <= 1335) and (not user_id = 1 or not user_id is not null or not ts >= 1336 or not ts <= 1337) and (not user_id = 1 or not user_id is not null or not ts >= 1338 or not ts <= 1339) and (not user_id = 1 or not user_id is not null or not ts >= 1340 or not ts <= 1341) and (not user_id = 1 or not user_id is not null or not ts >= 1342 or not ts <= 1343) and (not user_id = 1 or not user_id is not null or not ts >= 1344 or not ts <= 1345) and (not user_id = 1 or not user_id is not null or not ts >= 1346 or not ts <= 1347) and (not user_id = 1 or not user_id is not null or not ts >= 1348 or not ts <= 1349) and (not user_id = 1 or not user_id is not null or not ts >= 1350 or not ts <= 1351) and (not user_id = 1 or not user_id is not null or not ts >= 1352 or not ts <= 1353) and (not user_id = 1 or not user_id is not null or not ts >= 1354 or not ts <= 1355) and (not user_id = 1 or not user_id is not null or not ts >= 1356 or not ts <= 1357) and (not user_id = 1 or not user_id is not null or not ts >= 1358 or not ts <= 1359) and (not user_id = 1 or not user_id is not null or not ts >= 1360 or not ts <= 1361) and (not user_id = 1 or not user_id is not null or not ts >= 1362 or not ts <= 1363) and (not user_id = 1 or not user_id is not null or not ts >= 1364 or not ts <= 1365) and (not user_id = 1 or not user_id is not null or not ts >= 1366 or not ts <= 1367) and (not user_id = 1 or not user_id is not null or not ts >= 1368 or not ts <= 1369) and (not user_id = 1 or not user_id is not null or not ts >= 1370 or not ts <= 1371) and (not user_id = 1 or not user_id is not null or not ts >= 1372 or not ts <= 1373) and (not user_id = 1 or not user_id is not null or not ts >= 1374 or not ts <= 1375) and (not user_id = 1 or not user_id is not null or not ts >= 1376 or not ts <= 1377) and (not user_id = 1 or not user_id is not null or not ts >= 1378 or not ts <= 1379) and (not user_id = 1 or not user_id is not null or not ts >= 1380 or not ts <= 1381) and (not user_id = 1 or not user_id is not null or not ts >= 1382 or not ts <= 1383) and (not user_id = 1 or not user_id is not null or not ts >= 1384 or not ts <= 1385) and (not user_id = 1 or not user_id is not null or not ts >= 1386 or not ts <= 1387) and (not user_id = 1 or not user_id is not null or not ts >= 1388 or not ts <= 1389) and (not user_id = 1 or not user_id is not null or not ts >= 1390 or not ts <= 1391) and (not user_id = 1 or not user_id is not null or not ts >= 1392 or not ts <= 1393) and (not user_id = 1 or not user_id is not null or not ts >= 1394 or not ts <= 1395) and (not user_id = 1 or not user_id is not null or not ts >= 1396 or not ts <= 1397) and (not user_id = 1 or not user_id is not null or not ts >= 1398 or not ts <= 1399) and (not user_id = 1 or not user_id is not null or not ts >= 1400 or not ts <= 1401) and (not user_id = 1 or not user_id is not null or not ts >= 1402 or not ts <= 1403) and (not user_id = 1 or not user_id is not null or not ts >= 1404 or not ts <= 1405) and (not user_id = 1 or not user_id is not null or not ts >= 1406 or not ts <= 1407) and (not user_id = 1 or not user_id is not null or not ts >= 1408 or not ts <= 1409) and (not user_id = 1 or not user_id is not null or not ts >= 1410 or not ts <= 1411) and (not user_id = 1 or not user_id is not null or not ts >= 1412 or not ts <= 1413) and (not user_id = 1 or not user_id is not null or not ts >= 1414 or not ts <= 1415) and (not user_id = 1 or not user_id is not null or not ts >= 1416 or not ts <= 1417) and (not user_id = 1 or not user_id is not null or not ts >= 1418 or not ts <= 1419) and (not user_id = 1 or not user_id is not null or not ts >= 1420 or not ts <= 1421) and (not user_id = 1 or not user_id is not null or not ts >= 1422 or not ts <= 1423) and (not user_id = 1 or not user_id is not null or not ts >= 1424 or not ts <= 1425) and (not user_id = 1 or not user_id is not null or not ts >= 1426 or not ts <= 1427) and (not user_id = 1 or not user_id is not null or not ts >= 1428 or not ts <= 1429) and (not user_id = 1 or not user_id is not null or not ts >= 1430 or not ts <= 1431) and (not user_id = 1 or not user_id is not null or not ts >= 1432 or not ts <= 1433) and (not user_id = 1 or not user_id is not null or not ts >= 1434 or not ts <= 1435) and (not user_id = 1 or not user_id is not null or not ts >= 1436 or not ts <= 1437) and (not user_id = 1 or not user_id is not null or not ts >= 1438 or not ts <= 1439) and (not user_id = 1 or not user_id is not null or not ts >= 1440 or not ts <= 1441) and (not user_id = 1 or not user_id is not null or not ts >= 1442 or not ts <= 1443) and (not user_id = 1 or not user_id is not null or not ts >= 1444 or not ts <= 1445) and (not user_id = 1 or not user_id is not null or not ts >= 1446 or not ts <= 1447) and (not user_id = 1 or not user_id is not null or not ts >= 1448 or not ts <= 1449) and (not user_id = 1 or not user_id is not null or not ts >= 1450 or not ts <= 1451) and (not user_id = 1 or not user_id is not null or not ts >= 1452 or not ts <= 1453) and (not user_id = 1 or not user_id is not null or not ts >= 1454 or not ts <= 1455) and (not user_id = 1 or not user_id is not null or not ts >= 1456 or not ts <= 1457) and (not user_id = 1 or not user_id is not null or not ts >= 1458 or not ts <= 1459) and (not user_id = 1 or not user_id is not null or not ts >= 1460 or not ts <= 1461) and (not user_id = 1 or not user_id is not null or not ts >= 1462 or not ts <= 1463) and (not user_id = 1 or not user_id is not null or not ts >= 1464 or not ts <= 1465) and (not user_id = 1 or not user_id is not null or not ts >= 1466 or not ts <= 1467) and (not user_id = 1 or not user_id is not null or not ts >= 1468 or not ts <= 1469) and (not user_id = 1 or not user_id is not null or not ts >= 1470 or not ts <= 1471) and (not user_id = 1 or not user_id is not null or not ts >= 1472 or not ts <= 1473) and (not user_id = 1 or not user_id is not null or not ts >= 1474 or not ts <= 1475) and (not user_id = 1 or not user_id is not null or not ts >= 1476 or not ts <= 1477) and (not user_id = 1 or not user_id is not null or not ts >= 1478 or not ts <= 1479) and (not user_id = 1 or not user_id is not null or not ts >= 1480 or not ts <= 1481) and (not user_id = 1 or not user_id is not null or not ts >= 1482 or not ts <= 1483) and (not user_id = 1 or not user_id is not null or not ts >= 1484 or not ts <= 1485) and (not user_id = 1 or not user_id is not null or not ts >= 1486 or not ts <= 1487) and (not user_id = 1 or not user_id is not null or not ts >= 1488 or not ts <= 1489) and (not user_id = 1 or not user_id is not null or not ts >= 1490 or not ts <= 1491) and (not user_id = 1 or not user_id is not null or not ts >= 1492 or not ts <= 1493) and (not user_id = 1 or not user_id is not null or not ts >= 1494 or not ts <= 1495) and (not user_id = 1 or not user_id is not null or not ts >= 1496 or not ts <= 1497) and (not user_id = 1 or not user_id is not null or not ts >= 1498 or not ts <= 1499) and (not user_id = 1 or not user_id is not null or not ts >= 1500 or not ts <= 1501) and (not user_id = 1 or not user_id is not null or not ts >= 1502 or not ts <= 1503) and (not user_id = 1 or not user_id is not null or not ts >= 1504 or not ts <= 1505) and (not user_id = 1 or not user_id is not null or not ts >= 1506 or not ts <= 1507) and (not user_id = 1 or not user_id is not null or not ts >= 1508 or not ts <= 1509) and (not user_id = 1 or not user_id is not null or not ts >= 1510 or not ts <= 1511) and (not user_id = 1 or not user_id is not null or not ts >= 1512 or not ts <= 1513) and (not user_id = 1 or not user_id is not null or not ts >= 1514 or not ts <= 1515) and (not user_id = 1 or not user_id is not null or not ts >= 1516 or not ts <= 1517) and (not user_id = 1 or not user_id is not null or not ts >= 1518 or not ts <= 1519) and (not user_id = 1 or not user_id is not null or not ts >= 1520 or not ts <= 1521) and (not user_id = 1 or not user_id is not null or not ts >= 1522 or not ts <= 1523) and (not user_id = 1 or not user_id is not null or not ts >= 1524 or not ts <= 1525) and (not user_id = 1 or not user_id is not null or not ts >= 1526 or not ts <= 1527) and (not user_id = 1 or not user_id is not null or not ts >= 1528 or not ts <= 1529) and (not user_id = 1 or not user_id is not null or not ts >= 1530 or not ts <= 1531) and (not user_id = 1 or not user_id is not null or not ts >= 1532 or not ts <= 1533) and (not user_id = 1 or not user_id is not null or not ts >= 1534 or not ts <= 1535) and (not user_id = 1 or not user_id is not null or not ts >= 1536 or not ts <= 1537) and (not user_id = 1 or not user_id is not null or not ts >= 1538 or not ts <= 1539) and (not user_id = 1 or not user_id is not null or not ts >= 1540 or not ts <= 1541) and (not user_id = 1 or not user_id is not null or not ts >= 1542 or not ts <= 1543) and (not user_id = 1 or not user_id is not null or not ts >= 1544 or not ts <= 1545) and (not user_id = 1 or not user_id is not null or not ts >= 1546 or not ts <= 1547) and (not user_id = 1 or not user_id is not null or not ts >= 1548 or not ts <= 1549) and (not user_id = 1 or not user_id is not null or not ts >= 1550 or not ts <= 1551) and (not user_id = 1 or not user_id is not null or not ts >= 1552 or not ts <= 1553) and (not user_id = 1 or not user_id is not null or not ts >= 1554 or not ts <= 1555) and (not user_id = 1 or not user_id is not null or not ts >= 1556 or not ts <= 1557) and (not user_id = 1 or not user_id is not null or not ts >= 1558 or not ts <= 1559) and (not user_id = 1 or not user_id is not null or not ts >= 1560 or not ts <= 1561) and (not user_id = 1 or not user_id is not null or not ts >= 1562 or not ts <= 1563) and (not user_id = 1 or not user_id is not null or not ts >= 1564 or not ts <= 1565) and (not user_id = 1 or not user_id is not null or not ts >= 1566 or not ts <= 1567) and (not user_id = 1 or not user_id is not null or not ts >= 1568 or not ts <= 1569) and (not user_id = 1 or not user_id is not null or not ts >= 1570 or not ts <= 1571) and (not user_id = 1 or not user_id is not null or not ts >= 1572 or not ts <= 1573) and (not user_id = 1 or not user_id is not null or not ts >= 1574 or not ts <= 1575) and (not user_id = 1 or not user_id is not null or not ts >= 1576 or not ts <= 1577) and (not user_id = 1 or not user_id is not null or not ts >= 1578 or not ts <= 1579) and (not user_id = 1 or not user_id is not null or not ts >= 1580 or not ts <= 1581) and (not user_id = 1 or not user_id is not null or not ts >= 1582 or not ts <= 1583) and (not user_id = 1 or not user_id is not null or not ts >= 1584 or not ts <= 1585) and (not user_id = 1 or not user_id is not null or not ts >= 1586 or not ts <= 1587) and (not user_id = 1 or not user_id is not null or not ts >= 1588 or not ts <= 1589) and (not user_id = 1 or not user_id is not null or not ts >= 1590 or not ts <= 1591) and (not user_id = 1 or not user_id is not null or not ts >= 1592 or not ts <= 1593) and (not user_id = 1 or not user_id is not null or not ts >= 1594 or not ts <= 1595) and (not user_id = 1 or not user_id is not null or not ts >= 1596 or not ts <= 1597) and (not user_id = 1 or not user_id is not null or not ts >= 1598 or not ts <= 1599) and (not user_id = 1 or not user_id is not null or not ts >= 1600 or not ts <= 1601) and (not user_id = 1 or not user_id is not null or not ts >= 1602 or not ts <= 1603) and (not user_id = 1 or not user_id is not null or not ts >= 1604 or not ts <= 1605) and (not user_id = 1 or not user_id is not null or not ts >= 1606 or not ts <= 1607) and (not user_id = 1 or not user_id is not null or not ts >= 1608 or not ts <= 1609) and (not user_id = 1 or not user_id is not null or not ts >= 1610 or not ts <= 1611) and (not user_id = 1 or not user_id is not null or not ts >= 1612 or not ts <= 1613) and (not user_id = 1 or not user_id is not null or not ts >= 1614 or not ts <= 1615) and (not user_id = 1 or not user_id is not null or not ts >= 1616 or not ts <= 1617) and (not user_id = 1 or not user_id is not null or not ts >= 1618 or not ts <= 1619) and (not user_id = 1 or not user_id is not null or not ts >= 1620 or not ts <= 1621) and (not user_id = 1 or not user_id is not null or not ts >= 1622 or not ts <= 1623) and (not user_id = 1 or not user_id is not null or not ts >= 1624 or not ts <= 1625) and (not user_id = 1 or not user_id is not null or not ts >= 1626 or not ts <= 1627) and (not user_id = 1 or not user_id is not null or not ts >= 1628 or not ts <= 1629) and (not user_id = 1 or not user_id is not null or not ts >= 1630 or not ts <= 1631) and (not user_id = 1 or not user_id is not null or not ts >= 1632 or not ts <= 1633) and (not user_id = 1 or not user_id is not null or not ts >= 1634 or not ts <= 1635) and (not user_id = 1 or not user_id is not null or not ts >= 1636 or not ts <= 1637) and (not user_id = 1 or not user_id is not null or not ts >= 1638 or not ts <= 1639) and (not user_id = 1 or not user_id is not null or not ts >= 1640 or not ts <= 1641) and (not user_id = 1 or not user_id is not null or not ts >= 1642 or not ts <= 1643) and (not user_id = 1 or not user_id is not null or not ts >= 1644 or not ts <= 1645) and (not user_id = 1 or not user_id is not null or not ts >= 1646 or not ts <= 1647) and (not user_id = 1 or not user_id is not null or not ts >= 1648 or not ts <= 1649) and (not user_id = 1 or not user_id is not null or not ts >= 1650 or not ts <= 1651) and (not user_id = 1 or not user_id is not null or not ts >= 1652 or not ts <= 1653) and (not user_id = 1 or not user_id is not null or not ts >= 1654 or not ts <= 1655) and (not user_id = 1 or not user_id is not null or not ts >= 1656 or not ts <= 1657) and (not user_id = 1 or not user_id is not null or not ts >= 1658 or not ts <= 1659) and (not user_id = 1 or not user_id is not null or not ts >= 1660 or not ts <= 1661) and (not user_id = 1 or not user_id is not null or not ts >= 1662 or not ts <= 1663) and (not user_id = 1 or not user_id is not null or not ts >= 1664 or not ts <= 1665) and (not user_id = 1 or not user_id is not null or not ts >= 1666 or not ts <= 1667) and (not user_id = 1 or not user_id is not null or not ts >= 1668 or not ts <= 1669) and (not user_id = 1 or not user_id is not null or not ts >= 1670 or not ts <= 1671) and (not user_id = 1 or not user_id is not null or not ts >= 1672 or not ts <= 1673) and (not user_id = 1 or not user_id is not null or not ts >= 1674 or not ts <= 1675) and (not user_id = 1 or not user_id is not null or not ts >= 1676 or not ts <= 1677) and (not user_id = 1 or not user_id is not null or not ts >= 1678 or not ts <= 1679) and (not user_id = 1 or not user_id is not null or not ts >= 1680 or not ts <= 1681) and (not user_id = 1 or not user_id is not null or not ts >= 1682 or not ts <= 1683) and (not user_id = 1 or not user_id is not null or not ts >= 1684 or not ts <= 1685) and (not user_id = 1 or not user_id is not null or not ts >= 1686 or not ts <= 1687) and (not user_id = 1 or not user_id is not null or not ts >= 1688 or not ts <= 1689) and (not user_id = 1 or not user_id is not null or not ts >= 1690 or not ts <= 1691) and (not user_id = 1 or not user_id is not null or not ts >= 1692 or not ts <= 1693) and (not user_id = 1 or not user_id is not null or not ts >= 1694 or not ts <= 1695) and (not user_id = 1 or not user_id is not null or not ts >= 1696 or not ts <= 1697) and (not user_id = 1 or not user_id is not null or not ts >= 1698 or not ts <= 1699) and (not user_id = 1 or not user_id is not null or not ts >= 1700 or not ts <= 1701) and (not user_id = 1 or not user_id is not null or not ts >= 1702 or not ts <= 1703) and (not user_id = 1 or not user_id is not null or not ts >= 1704 or not ts <= 1705) and (not user_id = 1 or not user_id is not null or not ts >= 1706 or not ts <= 1707) and (not user_id = 1 or not user_id is not null or not ts >= 1708 or not ts <= 1709) and (not user_id = 1 or not user_id is not null or not ts >= 1710 or not ts <= 1711) and (not user_id = 1 or not user_id is not null or not ts >= 1712 or not ts <= 1713) and (not user_id = 1 or not user_id is not null or not ts >= 1714 or not ts <= 1715) and (not user_id = 1 or not user_id is not null or not ts >= 1716 or not ts <= 1717) and (not user_id = 1 or not user_id is not null or not ts >= 1718 or not ts <= 1719) and (not user_id = 1 or not user_id is not null or not ts >= 1720 or not ts <= 1721) and (not user_id = 1 or not user_id is not null or not ts >= 1722 or not ts <= 1723) and (not user_id = 1 or not user_id is not null or not ts >= 1724 or not ts <= 1725) and (not user_id = 1 or not user_id is not null or not ts >= 1726 or not ts <= 1727) and (not user_id = 1 or not user_id is not null or not ts >= 1728 or not ts <= 1729) and (not user_id = 1 or not user_id is not null or not ts >= 1730 or not ts <= 1731) and (not user_id = 1 or not user_id is not null or not ts >= 1732 or not ts <= 1733) and (not user_id = 1 or not user_id is not null or not ts >= 1734 or not ts <= 1735) and (not user_id = 1 or not user_id is not null or not ts >= 1736 or not ts <= 1737) and (not user_id = 1 or not user_id is not null or not ts >= 1738 or not ts <= 1739) and (not user_id = 1 or not user_id is not null or not ts >= 1740 or not ts <= 1741) and (not user_id = 1 or not user_id is not null or not ts >= 1742 or not ts <= 1743) and (not user_id = 1 or not user_id is not null or not ts >= 1744 or not ts <= 1745) and (not user_id = 1 or not user_id is not null or not ts >= 1746 or not ts <= 1747) and (not user_id = 1 or not user_id is not null or not ts >= 1748 or not ts <= 1749) and (not user_id = 1 or not user_id is not null or not ts >= 1750 or not ts <= 1751) and (not user_id = 1 or not user_id is not null or not ts >= 1752 or not ts <= 1753) and (not user_id = 1 or not user_id is not null or not ts >= 1754 or not ts <= 1755) and (not user_id = 1 or not user_id is not null or not ts >= 1756 or not ts <= 1757) and (not user_id = 1 or not user_id is not null or not ts >= 1758 or not ts <= 1759) and (not user_id = 1 or not user_id is not null or not ts >= 1760 or not ts <= 1761) and (not user_id = 1 or not user_id is not null or not ts >= 1762 or not ts <= 1763) and (not user_id = 1 or not user_id is not null or not ts >= 1764 or not ts <= 1765) and (not user_id = 1 or not user_id is not null or not ts >= 1766 or not ts <= 1767) and (not user_id = 1 or not user_id is not null or not ts >= 1768 or not ts <= 1769) and (not user_id = 1 or not user_id is not null or not ts >= 1770 or not ts <= 1771) and (not user_id = 1 or not user_id is not null or not ts >= 1772 or not ts <= 1773) and (not user_id = 1 or not user_id is not null or not ts >= 1774 or not ts <= 1775) and (not user_id = 1 or not user_id is not null or not ts >= 1776 or not ts <= 1777) and (not user_id = 1 or not user_id is not null or not ts >= 1778 or not ts <= 1779) and (not user_id = 1 or not user_id is not null or not ts >= 1780 or not ts <= 1781) and (not user_id = 1 or not user_id is not null or not ts >= 1782 or not ts <= 1783) and (not user_id = 1 or not user_id is not null or not ts >= 1784 or not ts <= 1785) and (not user_id = 1 or not user_id is not null or not ts >= 1786 or not ts <= 1787) and (not user_id = 1 or not user_id is not null or not ts >= 1788 or not ts <= 1789) and (not user_id = 1 or not user_id is not null or not ts >= 1790 or not ts <= 1791) and (not user_id = 1 or not user_id is not null or not ts >= 1792 or not ts <= 1793) and (not user_id = 1 or not user_id is not null or not ts >= 1794 or not ts <= 1795) and (not user_id = 1 or not user_id is not null or not ts >= 1796 or not ts <= 1797) and (not user_id = 1 or not user_id is not null or not ts >= 1798 or not ts <= 1799) and (not user_id = 1 or not user_id is not null or not ts >= 1800 or not ts <= 1801) and (not user_id = 1 or not user_id is not null or not ts >= 1802 or not ts <= 1803) and (not user_id = 1 or not user_id is not null or not ts >= 1804 or not ts <= 1805) and (not user_id = 1 or not user_id is not null or not ts >= 1806 or not ts <= 1807) and (not user_id = 1 or not user_id is not null or not ts >= 1808 or not ts <= 1809) and (not user_id = 1 or not user_id is not null or not ts >= 1810 or not ts <= 1811) and (not user_id = 1 or not user_id is not null or not ts >= 1812 or not ts <= 1813) and (not user_id = 1 or not user_id is not null or not ts >= 1814 or not ts <= 1815) and (not user_id = 1 or not user_id is not null or not ts >= 1816 or not ts <= 1817) and (not user_id = 1 or not user_id is not null or not ts >= 1818 or not ts <= 1819) and (not user_id = 1 or not user_id is not null or not ts >= 1820 or not ts <= 1821) and (not user_id = 1 or not user_id is not null or not ts >= 1822 or not ts <= 1823) and (not user_id = 1 or not user_id is not null or not ts >= 1824 or not ts <= 1825) and (not user_id = 1 or not user_id is not null or not ts >= 1826 or not ts <= 1827) and (not user_id = 1 or not user_id is not null or not ts >= 1828 or not ts <= 1829) and (not user_id = 1 or not user_id is not null or not ts >= 1830 or not ts <= 1831) and (not user_id = 1 or not user_id is not null or not ts >= 1832 or not ts <= 1833) and (not user_id = 1 or not user_id is not null or not ts >= 1834 or not ts <= 1835) and (not user_id = 1 or not user_id is not null or not ts >= 1836 or not ts <= 1837) and (not user_id = 1 or not user_id is not null or not ts >= 1838 or not ts <= 1839) and (not user_id = 1 or not user_id is not null or not ts >= 1840 or not ts <= 1841) and (not user_id = 1 or not user_id is not null or not ts >= 1842 or not ts <= 1843) and (not user_id = 1 or not user_id is not null or not ts >= 1844 or not ts <= 1845) and (not user_id = 1 or not user_id is not null or not ts >= 1846 or not ts <= 1847) and (not user_id = 1 or not user_id is not null or not ts >= 1848 or not ts <= 1849) and (not user_id = 1 or not user_id is not null or not ts >= 1850 or not ts <= 1851) and (not user_id = 1 or not user_id is not null or not ts >= 1852 or not ts <= 1853) and (not user_id = 1 or not user_id is not null or not ts >= 1854 or not ts <= 1855) and (not user_id = 1 or not user_id is not null or not ts >= 1856 or not ts <= 1857) and (not user_id = 1 or not user_id is not null or not ts >= 1858 or not ts <= 1859) and (not user_id = 1 or not user_id is not null or not ts >= 1860 or not ts <= 1861) and (not user_id = 1 or not user_id is not null or not ts >= 1862 or not ts <= 1863) and (not user_id = 1 or not user_id is not null or not ts >= 1864 or not ts <= 1865) and (not user_id = 1 or not user_id is not null or not ts >= 1866 or not ts <= 1867) and (not user_id = 1 or not user_id is not null or not ts >= 1868 or not ts <= 1869) and (not user_id = 1 or not user_id is not null or not ts >= 1870 or not ts <= 1871) and (not user_id = 1 or not user_id is not null or not ts >= 1872 or not ts <= 1873) and (not user_id = 1 or not user_id is not null or not ts >= 1874 or not ts <= 1875) and (not user_id = 1 or not user_id is not null or not ts >= 1876 or not ts <= 1877) and (not user_id = 1 or not user_id is not null or not ts >= 1878 or not ts <= 1879) and (not user_id = 1 or not user_id is not null or not ts >= 1880 or not ts <= 1881) and (not user_id = 1 or not user_id is not null or not ts >= 1882 or not ts <= 1883) and (not user_id = 1 or not user_id is not null or not ts >= 1884 or not ts <= 1885) and (not user_id = 1 or not user_id is not null or not ts >= 1886 or not ts <= 1887) and (not user_id = 1 or not user_id is not null or not ts >= 1888 or not ts <= 1889) and (not user_id = 1 or not user_id is not null or not ts >= 1890 or not ts <= 1891) and (not user_id = 1 or not user_id is not null or not ts >= 1892 or not ts <= 1893) and (not user_id = 1 or not user_id is not null or not ts >= 1894 or not ts <= 1895) and (not user_id = 1 or not user_id is not null or not ts >= 1896 or not ts <= 1897) and (not user_id = 1 or not user_id is not null or not ts >= 1898 or not ts <= 1899) and (not user_id = 1 or not user_id is not null or not ts >= 1900 or not ts <= 1901) and (not user_id = 1 or not user_id is not null or not ts >= 1902 or not ts <= 1903) and (not user_id = 1 or not user_id is not null or not ts >= 1904 or not ts <= 1905) and (not user_id = 1 or not user_id is not null or not ts >= 1906 or not ts <= 1907) and (not user_id = 1 or not user_id is not null or not ts >= 1908 or not ts <= 1909) and (not user_id = 1 or not user_id is not null or not ts >= 1910 or not ts <= 1911) and (not user_id = 1 or not user_id is not null or not ts >= 1912 or not ts <= 1913) and (not user_id = 1 or not user_id is not null or not ts >= 1914 or not ts <= 1915) and (not user_id = 1 or not user_id is not null or not ts >= 1916 or not ts <= 1917) and (not user_id = 1 or not user_id is not null or not ts >= 1918 or not ts <= 1919) and (not user_id = 1 or not user_id is not null or not ts >= 1920 or not ts <= 1921) and (not user_id = 1 or not user_id is not null or not ts >= 1922 or not ts <= 1923) and (not user_id = 1 or not user_id is not null or not ts >= 1924 or not ts <= 1925) and (not user_id = 1 or not user_id is not null or not ts >= 1926 or not ts <= 1927) and (not user_id = 1 or not user_id is not null or not ts >= 1928 or not ts <= 1929) and (not user_id = 1 or not user_id is not null or not ts >= 1930 or not ts <= 1931) and (not user_id = 1 or not user_id is not null or not ts >= 1932 or not ts <= 1933) and (not user_id = 1 or not user_id is not null or not ts >= 1934 or not ts <= 1935) and (not user_id = 1 or not user_id is not null or not ts >= 1936 or not ts <= 1937) and (not user_id = 1 or not user_id is not null or not ts >= 1938 or not ts <= 1939) and (not user_id = 1 or not user_id is not null or not ts >= 1940 or not ts <= 1941) and (not user_id = 1 or not user_id is not null or not ts >= 1942 or not ts <= 1943) and (not user_id = 1 or not user_id is not null or not ts >= 1944 or not ts <= 1945) and (not user_id = 1 or not user_id is not null or not ts >= 1946 or not ts <= 1947) and (not user_id = 1 or not user_id is not null or not ts >= 1948 or not ts <= 1949) and (not user_id = 1 or not user_id is not null or not ts >= 1950 or not ts <= 1951) and (not user_id = 1 or not user_id is not null or not ts >= 1952 or not ts <= 1953) and (not user_id = 1 or not user_id is not null or not ts >= 1954 or not ts <= 1955) and (not user_id = 1 or not user_id is not null or not ts >= 1956 or not ts <= 1957) and (not user_id = 1 or not user_id is not null or not ts >= 1958 or not ts <= 1959) and (not user_id = 1 or not user_id is not null or not ts >= 1960 or not ts <= 1961) and (not user_id = 1 or not user_id is not null or not ts >= 1962 or not ts <= 1963) and (not user_id = 1 or not user_id is not null or not ts >= 1964 or not ts <= 1965) and (not user_id = 1 or not user_id is not null or not ts >= 1966 or not ts <= 1967) and (not user_id = 1 or not user_id is not null or not ts >= 1968 or not ts <= 1969) and (not user_id = 1 or not user_id is not null or not ts >= 1970 or not ts <= 1971) and (not user_id = 1 or not user_id is not null or not ts >= 1972 or not ts <= 1973) and (not user_id = 1 or not user_id is not null or not ts >= 1974 or not ts <= 1975) and (not user_id = 1 or not user_id is not null or not ts >= 1976 or not ts <= 1977) and (not user_id = 1 or not user_id is not null or not ts >= 1978 or not ts <= 1979) and (not user_id = 1 or not user_id is not null or not ts >= 1980 or not ts <= 1981) and (not user_id = 1 or not user_id is not null or not ts >= 1982 or not ts <= 1983) and (not user_id = 1 or not user_id is not null or not ts >= 1984 or not ts <= 1985) and (not user_id = 1 or not user_id is not null or not ts >= 1986 or not ts <= 1987) and (not user_id = 1 or not user_id is not null or not ts >= 1988 or not ts <= 1989) and (not user_id = 1 or not user_id is not null or not ts >= 1990 or not ts <= 1991) and (not user_id = 1 or not user_id is not null or not ts >= 1992 or not ts <= 1993) and (not user_id = 1 or not user_id is not null or not ts >= 1994 or not ts <= 1995) and (not user_id = 1 or not user_id is not null or not ts >= 1996 or not ts <= 1997) and (not user_id = 1 or not user_id is not null or not ts >= 1998 or not ts <= 1999) and (not user_id = 1 or not user_id is not null or not ts >= 11000 or not ts <= 11001) and (not user_id = 1 or not user_id is not null or not ts >= 11002 or not ts <= 11003) and (not user_id = 1 or not user_id is not null or not ts >= 11004 or not ts <= 11005) and (not user_id = 1 or not user_id is not null or not ts >= 11006 or not ts <= 11007) and (not user_id = 1 or not user_id is not null or not ts >= 11008 or not ts <= 11009) and (not user_id = 1 or not user_id is not null or not ts >= 11010 or not ts <= 11011) and (not user_id = 1 or not user_id is not null or not ts >= 11012 or not ts <= 11013) and (not user_id = 1 or not user_id is not null or not ts >= 11014 or not ts <= 11015) and (not user_id = 1 or not user_id is not null or not ts >= 11016 or not ts <= 11017) and (not user_id = 1 or not user_id is not null or not ts >= 11018 or not ts <= 11019) and (not user_id = 1 or not user_id is not null or not ts >= 11020 or not ts <= 11021) and (not user_id = 1 or not user_id is not null or not ts >= 11022 or not ts <= 11023) and (not user_id = 1 or not user_id is not null or not ts >= 11024 or not ts <= 11025) and (not user_id = 1 or not user_id is not null or not ts >= 11026 or not ts <= 11027) and (not user_id = 1 or not user_id is not null or not ts >= 11028 or not ts <= 11029) and (not user_id = 1 or not user_id is not null or not ts >= 11030 or not ts <= 11031) and (not user_id = 1 or not user_id is not null or not ts >= 11032 or not ts <= 11033) and (not user_id = 1 or not user_id is not null or not ts >= 11034 or not ts <= 11035) and (not user_id = 1 or not user_id is not null or not ts >= 11036 or not ts <= 11037) and (not user_id = 1 or not user_id is not null or not ts >= 11038 or not ts <= 11039) and (not user_id = 1 or not user_id is not null or not ts >= 11040 or not ts <= 11041) and (not user_id = 1 or not user_id is not null or not ts >= 11042 or not ts <= 11043) and (not user_id = 1 or not user_id is not null or not ts >= 11044 or not ts <= 11045) and (not user_id = 1 or not user_id is not null or not ts >= 11046 or not ts <= 11047) and (not user_id = 1 or not user_id is not null or not ts >= 11048 or not ts <= 11049) and (not user_id = 1 or not user_id is not null or not ts >= 11050 or not ts <= 11051) and (not user_id = 1 or not user_id is not null or not ts >= 11052 or not ts <= 11053) and (not user_id = 1 or not user_id is not null or not ts >= 11054 or not ts <= 11055) and (not user_id = 1 or not user_id is not null or not ts >= 11056 or not ts <= 11057) and (not user_id = 1 or not user_id is not null or not ts >= 11058 or not ts <= 11059) and (not user_id = 1 or not user_id is not null or not ts >= 11060 or not ts <= 11061) and (not user_id = 1 or not user_id is not null or not ts >= 11062 or not ts <= 11063) and (not user_id = 1 or not user_id is not null or not ts >= 11064 or not ts <= 11065) and (not user_id = 1 or not user_id is not null or not ts >= 11066 or not ts <= 11067) and (not user_id = 1 or not user_id is not null or not ts >= 11068 or not ts <= 11069) and (not user_id = 1 or not user_id is not null or not ts >= 11070 or not ts <= 11071) and (not user_id = 1 or not user_id is not null or not ts >= 11072 or not ts <= 11073) and (not user_id = 1 or not user_id is not null or not ts >= 11074 or not ts <= 11075) and (not user_id = 1 or not user_id is not null or not ts >= 11076 or not ts <= 11077) and (not user_id = 1 or not user_id is not null or not ts >= 11078 or not ts <= 11079) and (not user_id = 1 or not user_id is not null or not ts >= 11080 or not ts <= 11081) and (not user_id = 1 or not user_id is not null or not ts >= 11082 or not ts <= 11083) and (not user_id = 1 or not user_id is not null or not ts >= 11084 or not ts <= 11085) and (not user_id = 1 or not user_id is not null or not ts >= 11086 or not ts <= 11087) and (not user_id = 1 or not user_id is not null or not ts >= 11088 or not ts <= 11089) and (not user_id = 1 or not user_id is not null or not ts >= 11090 or not ts <= 11091) and (not user_id = 1 or not user_id is not null or not ts >= 11092 or not ts <= 11093) and (not user_id = 1 or not user_id is not null or not ts >= 11094 or not ts <= 11095) and (not user_id = 1 or not user_id is not null or not ts >= 11096 or not ts <= 11097) and (not user_id = 1 or not user_id is not null or not ts >= 11098 or not ts <= 11099) and (not user_id = 1 or not user_id is not null or not ts >= 11100 or not ts <= 11101) and (not user_id = 1 or not user_id is not null or not ts >= 11102 or not ts <= 11103) and (not user_id = 1 or not user_id is not null or not ts >= 11104 or not ts <= 11105) and (not user_id = 1 or not user_id is not null or not ts >= 11106 or not ts <= 11107) and (not user_id = 1 or not user_id is not null or not ts >= 11108 or not ts <= 11109) and (not user_id = 1 or not user_id is not null or not ts >= 11110 or not ts <= 11111) and (not user_id = 1 or not user_id is not null or not ts >= 11112 or not ts <= 11113) and (not user_id = 1 or not user_id is not null or not ts >= 11114 or not ts <= 11115) and (not user_id = 1 or not user_id is not null or not ts >= 11116 or not ts <= 11117) and (not user_id = 1 or not user_id is not null or not ts >= 11118 or not ts <= 11119) and (not user_id = 1 or not user_id is not null or not ts >= 11120 or not ts <= 11121) and (not user_id = 1 or not user_id is not null or not ts >= 11122 or not ts <= 11123) and (not user_id = 1 or not user_id is not null or not ts >= 11124 or not ts <= 11125) and (not user_id = 1 or not user_id is not null or not ts >= 11126 or not ts <= 11127) and (not user_id = 1 or not user_id is not null or not ts >= 11128 or not ts <= 11129) and (not user_id = 1 or not user_id is not null or not ts >= 11130 or not ts <= 11131) and (not user_id = 1 or not user_id is not null or not ts >= 11132 or not ts <= 11133) and (not user_id = 1 or not user_id is not null or not ts >= 11134 or not ts <= 11135) and (not user_id = 1 or not user_id is not null or not ts >= 11136 or not ts <= 11137) and (not user_id = 1 or not user_id is not null or not ts >= 11138 or not ts <= 11139) and (not user_id = 1 or not user_id is not null or not ts >= 11140 or not ts <= 11141) and (not user_id = 1 or not user_id is not null or not ts >= 11142 or not ts <= 11143) and (not user_id = 1 or not user_id is not null or not ts >= 11144 or not ts <= 11145) and (not user_id = 1 or not user_id is not null or not ts >= 11146 or not ts <= 11147) and (not user_id = 1 or not user_id is not null or not ts >= 11148 or not ts <= 11149) and (not user_id = 1 or not user_id is not null or not ts >= 11150 or not ts <= 11151) and (not user_id = 1 or not user_id is not null or not ts >= 11152 or not ts <= 11153) and (not user_id = 1 or not user_id is not null or not ts >= 11154 or not ts <= 11155) and (not user_id = 1 or not user_id is not null or not ts >= 11156 or not ts <= 11157) and (not user_id = 1 or not user_id is not null or not ts >= 11158 or not ts <= 11159) and (not user_id = 1 or not user_id is not null or not ts >= 11160 or not ts <= 11161) and (not user_id = 1 or not user_id is not null or not ts >= 11162 or not ts <= 11163) and (not user_id = 1 or not user_id is not null or not ts >= 11164 or not ts <= 11165) and (not user_id = 1 or not user_id is not null or not ts >= 11166 or not ts <= 11167) and (not user_id = 1 or not user_id is not null or not ts >= 11168 or not ts <= 11169) and (not user_id = 1 or not user_id is not null or not ts >= 11170 or not ts <= 11171) and (not user_id = 1 or not user_id is not null or not ts >= 11172 or not ts <= 11173) and (not user_id = 1 or not user_id is not null or not ts >= 11174 or not ts <= 11175) and (not user_id = 1 or not user_id is not null or not ts >= 11176 or not ts <= 11177) and (not user_id = 1 or not user_id is not null or not ts >= 11178 or not ts <= 11179) and (not user_id = 1 or not user_id is not null or not ts >= 11180 or not ts <= 11181) and (not user_id = 1 or not user_id is not null or not ts >= 11182 or not ts <= 11183) and (not user_id = 1 or not user_id is not null or not ts >= 11184 or not ts <= 11185) and (not user_id = 1 or not user_id is not null or not ts >= 11186 or not ts <= 11187) and (not user_id = 1 or not user_id is not null or not ts >= 11188 or not ts <= 11189) and (not user_id = 1 or not user_id is not null or not ts >= 11190 or not ts <= 11191) and (not user_id = 1 or not user_id is not null or not ts >= 11192 or not ts <= 11193) and (not user_id = 1 or not user_id is not null or not ts >= 11194 or not ts <= 11195) and (not user_id = 1 or not user_id is not null or not ts >= 11196 or not ts <= 11197) and (not user_id = 1 or not user_id is not null or not ts >= 11198 or not ts <= 11199) and (not user_id = 1 or not user_id is not null or not ts >= 11200 or not ts <= 11201) and (not user_id = 1 or not user_id is not null or not ts >= 11202 or not ts <= 11203) and (not user_id = 1 or not user_id is not null or not ts >= 11204 or not ts <= 11205) and (not user_id = 1 or not user_id is not null or not ts >= 11206 or not ts <= 11207) and (not user_id = 1 or not user_id is not null or not ts >= 11208 or not ts <= 11209) and (not user_id = 1 or not user_id is not null or not ts >= 11210 or not ts <= 11211) and (not user_id = 1 or not user_id is not null or not ts >= 11212 or not ts <= 11213) and (not user_id = 1 or not user_id is not null or not ts >= 11214 or not ts <= 11215) and (not user_id = 1 or not user_id is not null or not ts >= 11216 or not ts <= 11217) and (not user_id = 1 or not user_id is not null or not ts >= 11218 or not ts <= 11219) and (not user_id = 1 or not user_id is not null or not ts >= 11220 or not ts <= 11221) and (not user_id = 1 or not user_id is not null or not ts >= 11222 or not ts <= 11223) and (not user_id = 1 or not user_id is not null or not ts >= 11224 or not ts <= 11225) and (not user_id = 1 or not user_id is not null or not ts >= 11226 or not ts <= 11227) and (not user_id = 1 or not user_id is not null or not ts >= 11228 or not ts <= 11229) and (not user_id = 1 or not user_id is not null or not ts >= 11230 or not ts <= 11231) and (not user_id = 1 or not user_id is not null or not ts >= 11232 or not ts <= 11233) and (not user_id = 1 or not user_id is not null or not ts >= 11234 or not ts <= 11235) and (not user_id = 1 or not user_id is not null or not ts >= 11236 or not ts <= 11237) and (not user_id = 1 or not user_id is not null or not ts >= 11238 or not ts <= 11239) and (not user_id = 1 or not user_id is not null or not ts >= 11240 or not ts <= 11241) and (not user_id = 1 or not user_id is not null or not ts >= 11242 or not ts <= 11243) and (not user_id = 1 or not user_id is not null or not ts >= 11244 or not ts <= 11245) and (not user_id = 1 or not user_id is not null or not ts >= 11246 or not ts <= 11247) and (not user_id = 1 or not user_id is not null or not ts >= 11248 or not ts <= 11249) and (not user_id = 1 or not user_id is not null or not ts >= 11250 or not ts <= 11251) and (not user_id = 1 or not user_id is not null or not ts >= 11252 or not ts <= 11253) and (not user_id = 1 or not user_id is not null or not ts >= 11254 or not ts <= 11255) and (not user_id = 1 or not user_id is not null or not ts >= 11256 or not ts <= 11257) and (not user_id = 1 or not user_id is not null or not ts >= 11258 or not ts <= 11259) and (not user_id = 1 or not user_id is not null or not ts >= 11260 or not ts <= 11261) and (not user_id = 1 or not user_id is not null or not ts >= 11262 or not ts <= 11263) and (not user_id = 1 or not user_id is not null or not ts >= 11264 or not ts <= 11265) and (not user_id = 1 or not user_id is not null or not ts >= 11266 or not ts <= 11267) and (not user_id = 1 or not user_id is not null or not ts >= 11268 or not ts <= 11269) and (not user_id = 1 or not user_id is not null or not ts >= 11270 or not ts <= 11271) and (not user_id = 1 or not user_id is not null or not ts >= 11272 or not ts <= 11273) and (not user_id = 1 or not user_id is not null or not ts >= 11274 or not ts <= 11275) and (not user_id = 1 or not user_id is not null or not ts >= 11276 or not ts <= 11277) and (not user_id = 1 or not user_id is not null or not ts >= 11278 or not ts <= 11279) and (not user_id = 1 or not user_id is not null or not ts >= 11280 or not ts <= 11281) and (not user_id = 1 or not user_id is not null or not ts >= 11282 or not ts <= 11283) and (not user_id = 1 or not user_id is not null or not ts >= 11284 or not ts <= 11285) and (not user_id = 1 or not user_id is not null or not ts >= 11286 or not ts <= 11287) and (not user_id = 1 or not user_id is not null or not ts >= 11288 or not ts <= 11289) and (not user_id = 1 or not user_id is not null or not ts >= 11290 or not ts <= 11291) and (not user_id = 1 or not user_id is not null or not ts >= 11292 or not ts <= 11293) and (not user_id = 1 or not user_id is not null or not ts >= 11294 or not ts <= 11295) and (not user_id = 1 or not user_id is not null or not ts >= 11296 or not ts <= 11297) and (not user_id = 1 or not user_id is not null or not ts >= 11298 or not ts <= 11299) and (not user_id = 1 or not user_id is not null or not ts >= 11300 or not ts <= 11301) and (not user_id = 1 or not user_id is not null or not ts >= 11302 or not ts <= 11303) and (not user_id = 1 or not user_id is not null or not ts >= 11304 or not ts <= 11305) and (not user_id = 1 or not user_id is not null or not ts >= 11306 or not ts <= 11307) and (not user_id = 1 or not user_id is not null or not ts >= 11308 or not ts <= 11309) and (not user_id = 1 or not user_id is not null or not ts >= 11310 or not ts <= 11311) and (not user_id = 1 or not user_id is not null or not ts >= 11312 or not ts <= 11313) and (not user_id = 1 or not user_id is not null or not ts >= 11314 or not ts <= 11315) and (not user_id = 1 or not user_id is not null or not ts >= 11316 or not ts <= 11317) and (not user_id = 1 or not user_id is not null or not ts >= 11318 or not ts <= 11319) and (not user_id = 1 or not user_id is not null or not ts >= 11320 or not ts <= 11321) and (not user_id = 1 or not user_id is not null or not ts >= 11322 or not ts <= 11323) and (not user_id = 1 or not user_id is not null or not ts >= 11324 or not ts <= 11325) and (not user_id = 1 or not user_id is not null or not ts >= 11326 or not ts <= 11327) and (not user_id = 1 or not user_id is not null or not ts >= 11328 or not ts <= 11329) and (not user_id = 1 or not user_id is not null or not ts >= 11330 or not ts <= 11331) and (not user_id = 1 or not user_id is not null or not ts >= 11332 or not ts <= 11333) and (not user_id = 1 or not user_id is not null or not ts >= 11334 or not ts <= 11335) and (not user_id = 1 or not user_id is not null or not ts >= 11336 or not ts <= 11337) and (not user_id = 1 or not user_id is not null or not ts >= 11338 or not ts <= 11339) and (not user_id = 1 or not user_id is not null or not ts >= 11340 or not ts <= 11341) and (not user_id = 1 or not user_id is not null or not ts >= 11342 or not ts <= 11343) and (not user_id = 1 or not user_id is not null or not ts >= 11344 or not ts <= 11345) and (not user_id = 1 or not user_id is not null or not ts >= 11346 or not ts <= 11347) and (not user_id = 1 or not user_id is not null or not ts >= 11348 or not ts <= 11349) and (not user_id = 1 or not user_id is not null or not ts >= 11350 or not ts <= 11351) and (not user_id = 1 or not user_id is not null or not ts >= 11352 or not ts <= 11353) and (not user_id = 1 or not user_id is not null or not ts >= 11354 or not ts <= 11355) and (not user_id = 1 or not user_id is not null or not ts >= 11356 or not ts <= 11357) and (not user_id = 1 or not user_id is not null or not ts >= 11358 or not ts <= 11359) and (not user_id = 1 or not user_id is not null or not ts >= 11360 or not ts <= 11361) and (not user_id = 1 or not user_id is not null or not ts >= 11362 or not ts <= 11363) and (not user_id = 1 or not user_id is not null or not ts >= 11364 or not ts <= 11365) and (not user_id = 1 or not user_id is not null or not ts >= 11366 or not ts <= 11367) and (not user_id = 1 or not user_id is not null or not ts >= 11368 or not ts <= 11369) and (not user_id = 1 or not user_id is not null or not ts >= 11370 or not ts <= 11371) and (not user_id = 1 or not user_id is not null or not ts >= 11372 or not ts <= 11373) and (not user_id = 1 or not user_id is not null or not ts >= 11374 or not ts <= 11375) and (not user_id = 1 or not user_id is not null or not ts >= 11376 or not ts <= 11377) and (not user_id = 1 or not user_id is not null or not ts >= 11378 or not ts <= 11379) and (not user_id = 1 or not user_id is not null or not ts >= 11380 or not ts <= 11381) and (not user_id = 1 or not user_id is not null or not ts >= 11382 or not ts <= 11383) and (not user_id = 1 or not user_id is not null or not ts >= 11384 or not ts <= 11385) and (not user_id = 1 or not user_id is not null or not ts >= 11386 or not ts <= 11387) and (not user_id = 1 or not user_id is not null or not ts >= 11388 or not ts <= 11389) and (not user_id = 1 or not user_id is not null or not ts >= 11390 or not ts <= 11391) and (not user_id = 1 or not user_id is not null or not ts >= 11392 or not ts <= 11393) and (not user_id = 1 or not user_id is not null or not ts >= 11394 or not ts <= 11395) and (not user_id = 1 or not user_id is not null or not ts >= 11396 or not ts <= 11397) and (not user_id = 1 or not user_id is not null or not ts >= 11398 or not ts <= 11399) and (not user_id = 1 or not user_id is not null or not ts >= 11400 or not ts <= 11401) and (not user_id = 1 or not user_id is not null or not ts >= 11402 or not ts <= 11403) and (not user_id = 1 or not user_id is not null or not ts >= 11404 or not ts <= 11405) and (not user_id = 1 or not user_id is not null or not ts >= 11406 or not ts <= 11407) and (not user_id = 1 or not user_id is not null or not ts >= 11408 or not ts <= 11409) and (not user_id = 1 or not user_id is not null or not ts >= 11410 or not ts <= 11411) and (not user_id = 1 or not user_id is not null or not ts >= 11412 or not ts <= 11413) and (not user_id = 1 or not user_id is not null or not ts >= 11414 or not ts <= 11415) and (not user_id = 1 or not user_id is not null or not ts >= 11416 or not ts <= 11417) and (not user_id = 1 or not user_id is not null or not ts >= 11418 or not ts <= 11419) and (not user_id = 1 or not user_id is not null or not ts >= 11420 or not ts <= 11421) and (not user_id = 1 or not user_id is not null or not ts >= 11422 or not ts <= 11423) and (not user_id = 1 or not user_id is not null or not ts >= 11424 or not ts <= 11425) and (not user_id = 1 or not user_id is not null or not ts >= 11426 or not ts <= 11427) and (not user_id = 1 or not user_id is not null or not ts >= 11428 or not ts <= 11429) and (not user_id = 1 or not user_id is not null or not ts >= 11430 or not ts <= 11431) and (not user_id = 1 or not user_id is not null or not ts >= 11432 or not ts <= 11433) and (not user_id = 1 or not user_id is not null or not ts >= 11434 or not ts <= 11435) and (not user_id = 1 or not user_id is not null or not ts >= 11436 or not ts <= 11437) and (not user_id = 1 or not user_id is not null or not ts >= 11438 or not ts <= 11439) and (not user_id = 1 or not user_id is not null or not ts >= 11440 or not ts <= 11441) and (not user_id = 1 or not user_id is not null or not ts >= 11442 or not ts <= 11443) and (not user_id = 1 or not user_id is not null or not ts >= 11444 or not ts <= 11445) and (not user_id = 1 or not user_id is not null or not ts >= 11446 or not ts <= 11447) and (not user_id = 1 or not user_id is not null or not ts >= 11448 or not ts <= 11449) and (not user_id = 1 or not user_id is not null or not ts >= 11450 or not ts <= 11451) and (not user_id = 1 or not user_id is not null or not ts >= 11452 or not ts <= 11453) and (not user_id = 1 or not user_id is not null or not ts >= 11454 or not ts <= 11455) and (not user_id = 1 or not user_id is not null or not ts >= 11456 or not ts <= 11457) and (not user_id = 1 or not user_id is not null or not ts >= 11458 or not ts <= 11459) and (not user_id = 1 or not user_id is not null or not ts >= 11460 or not ts <= 11461) and (not user_id = 1 or not user_id is not null or not ts >= 11462 or not ts <= 11463) and (not user_id = 1 or not user_id is not null or not ts >= 11464 or not ts <= 11465) and (not user_id = 1 or not user_id is not null or not ts >= 11466 or not ts <= 11467) and (not user_id = 1 or not user_id is not null or not ts >= 11468 or not ts <= 11469) and (not user_id = 1 or not user_id is not null or not ts >= 11470 or not ts <= 11471) and (not user_id = 1 or not user_id is not null or not ts >= 11472 or not ts <= 11473) and (not user_id = 1 or not user_id is not null or not ts >= 11474 or not ts <= 11475) and (not user_id = 1 or not user_id is not null or not ts >= 11476 or not ts <= 11477) and (not user_id = 1 or not user_id is not null or not ts >= 11478 or not ts <= 11479) and (not user_id = 1 or not user_id is not null or not ts >= 11480 or not ts <= 11481) and (not user_id = 1 or not user_id is not null or not ts >= 11482 or not ts <= 11483) and (not user_id = 1 or not user_id is not null or not ts >= 11484 or not ts <= 11485) and (not user_id = 1 or not user_id is not null or not ts >= 11486 or not ts <= 11487) and (not user_id = 1 or not user_id is not null or not ts >= 11488 or not ts <= 11489) and (not user_id = 1 or not user_id is not null or not ts >= 11490 or not ts <= 11491) and (not user_id = 1 or not user_id is not null or not ts >= 11492 or not ts <= 11493) and (not user_id = 1 or not user_id is not null or not ts >= 11494 or not ts <= 11495) and (not user_id = 1 or not user_id is not null or not ts >= 11496 or not ts <= 11497) and (not user_id = 1 or not user_id is not null or not ts >= 11498 or not ts <= 11499) and (not user_id = 1 or not user_id is not null or not ts >= 11500 or not ts <= 11501) and (not user_id = 1 or not user_id is not null or not ts >= 11502 or not ts <= 11503) and (not user_id = 1 or not user_id is not null or not ts >= 11504 or not ts <= 11505) and (not user_id = 1 or not user_id is not null or not ts >= 11506 or not ts <= 11507) and (not user_id = 1 or not user_id is not null or not ts >= 11508 or not ts <= 11509) and (not user_id = 1 or not user_id is not null or not ts >= 11510 or not ts <= 11511) and (not user_id = 1 or not user_id is not null or not ts >= 11512 or not ts <= 11513) and (not user_id = 1 or not user_id is not null or not ts >= 11514 or not ts <= 11515) and (not user_id = 1 or not user_id is not null or not ts >= 11516 or not ts <= 11517) and (not user_id = 1 or not user_id is not null or not ts >= 11518 or not ts <= 11519) and (not user_id = 1 or not user_id is not null or not ts >= 11520 or not ts <= 11521) and (not user_id = 1 or not user_id is not null or not ts >= 11522 or not ts <= 11523) and (not user_id = 1 or not user_id is not null or not ts >= 11524 or not ts <= 11525) and (not user_id = 1 or not user_id is not null or not ts >= 11526 or not ts <= 11527) and (not user_id = 1 or not user_id is not null or not ts >= 11528 or not ts <= 11529) and (not user_id = 1 or not user_id is not null or not ts >= 11530 or not ts <= 11531) and (not user_id = 1 or not user_id is not null or not ts >= 11532 or not ts <= 11533) and (not user_id = 1 or not user_id is not null or not ts >= 11534 or not ts <= 11535) and (not user_id = 1 or not user_id is not null or not ts >= 11536 or not ts <= 11537) and (not user_id = 1 or not user_id is not null or not ts >= 11538 or not ts <= 11539) and (not user_id = 1 or not user_id is not null or not ts >= 11540 or not ts <= 11541) and (not user_id = 1 or not user_id is not null or not ts >= 11542 or not ts <= 11543) and (not user_id = 1 or not user_id is not null or not ts >= 11544 or not ts <= 11545) and (not user_id = 1 or not user_id is not null or not ts >= 11546 or not ts <= 11547) and (not user_id = 1 or not user_id is not null or not ts >= 11548 or not ts <= 11549) and (not user_id = 1 or not user_id is not null or not ts >= 11550 or not ts <= 11551) and (not user_id = 1 or not user_id is not null or not ts >= 11552 or not ts <= 11553) and (not user_id = 1 or not user_id is not null or not ts >= 11554 or not ts <= 11555) and (not user_id = 1 or not user_id is not null or not ts >= 11556 or not ts <= 11557) and (not user_id = 1 or not user_id is not null or not ts >= 11558 or not ts <= 11559) and (not user_id = 1 or not user_id is not null or not ts >= 11560 or not ts <= 11561) and (not user_id = 1 or not user_id is not null or not ts >= 11562 or not ts <= 11563) and (not user_id = 1 or not user_id is not null or not ts >= 11564 or not ts <= 11565) and (not user_id = 1 or not user_id is not null or not ts >= 11566 or not ts <= 11567) and (not user_id = 1 or not user_id is not null or not ts >= 11568 or not ts <= 11569) and (not user_id = 1 or not user_id is not null or not ts >= 11570 or not ts <= 11571) and (not user_id = 1 or not user_id is not null or not ts >= 11572 or not ts <= 11573) and (not user_id = 1 or not user_id is not null or not ts >= 11574 or not ts <= 11575) and (not user_id = 1 or not user_id is not null or not ts >= 11576 or not ts <= 11577) and (not user_id = 1 or not user_id is not null or not ts >= 11578 or not ts <= 11579) and (not user_id = 1 or not user_id is not null or not ts >= 11580 or not ts <= 11581) and (not user_id = 1 or not user_id is not null or not ts >= 11582 or not ts <= 11583) and (not user_id = 1 or not user_id is not null or not ts >= 11584 or not ts <= 11585) and (not user_id = 1 or not user_id is not null or not ts >= 11586 or not ts <= 11587) and (not user_id = 1 or not user_id is not null or not ts >= 11588 or not ts <= 11589) and (not user_id = 1 or not user_id is not null or not ts >= 11590 or not ts <= 11591) and (not user_id = 1 or not user_id is not null or not ts >= 11592 or not ts <= 11593) and (not user_id = 1 or not user_id is not null or not ts >= 11594 or not ts <= 11595) and (not user_id = 1 or not user_id is not null or not ts >= 11596 or not ts <= 11597) and (not user_id = 1 or not user_id is not null or not ts >= 11598 or not ts <= 11599) and (not user_id = 1 or not user_id is not null or not ts >= 11600 or not ts <= 11601) and (not user_id = 1 or not user_id is not null or not ts >= 11602 or not ts <= 11603) and (not user_id = 1 or not user_id is not null or not ts >= 11604 or not ts <= 11605) and (not user_id = 1 or not user_id is not null or not ts >= 11606 or not ts <= 11607) and (not user_id = 1 or not user_id is not null or not ts >= 11608 or not ts <= 11609) and (not user_id = 1 or not user_id is not null or not ts >= 11610 or not ts <= 11611) and (not user_id = 1 or not user_id is not null or not ts >= 11612 or not ts <= 11613) and (not user_id = 1 or not user_id is not null or not ts >= 11614 or not ts <= 11615) and (not user_id = 1 or not user_id is not null or not ts >= 11616 or not ts <= 11617) and (not user_id = 1 or not user_id is not null or not ts >= 11618 or not ts <= 11619) and (not user_id = 1 or not user_id is not null or not ts >= 11620 or not ts <= 11621) and (not user_id = 1 or not user_id is not null or not ts >= 11622 or not ts <= 11623) and (not user_id = 1 or not user_id is not null or not ts >= 11624 or not ts <= 11625) and (not user_id = 1 or not user_id is not null or not ts >= 11626 or not ts <= 11627) and (not user_id = 1 or not user_id is not null or not ts >= 11628 or not ts <= 11629) and (not user_id = 1 or not user_id is not null or not ts >= 11630 or not ts <= 11631) and (not user_id = 1 or not user_id is not null or not ts >= 11632 or not ts <= 11633) and (not user_id = 1 or not user_id is not null or not ts >= 11634 or not ts <= 11635) and (not user_id = 1 or not user_id is not null or not ts >= 11636 or not ts <= 11637) and (not user_id = 1 or not user_id is not null or not ts >= 11638 or not ts <= 11639) and (not user_id = 1 or not user_id is not null or not ts >= 11640 or not ts <= 11641) and (not user_id = 1 or not user_id is not null or not ts >= 11642 or not ts <= 11643) and (not user_id = 1 or not user_id is not null or not ts >= 11644 or not ts <= 11645) and (not user_id = 1 or not user_id is not null or not ts >= 11646 or not ts <= 11647) and (not user_id = 1 or not user_id is not null or not ts >= 11648 or not ts <= 11649) and (not user_id = 1 or not user_id is not null or not ts >= 11650 or not ts <= 11651) and (not user_id = 1 or not user_id is not null or not ts >= 11652 or not ts <= 11653) and (not user_id = 1 or not user_id is not null or not ts >= 11654 or not ts <= 11655) and (not user_id = 1 or not user_id is not null or not ts >= 11656 or not ts <= 11657) and (not user_id = 1 or not user_id is not null or not ts >= 11658 or not ts <= 11659) and (not user_id = 1 or not user_id is not null or not ts >= 11660 or not ts <= 11661) and (not user_id = 1 or not user_id is not null or not ts >= 11662 or not ts <= 11663) and (not user_id = 1 or not user_id is not null or not ts >= 11664 or not ts <= 11665) and (not user_id = 1 or not user_id is not null or not ts >= 11666 or not ts <= 11667) and (not user_id = 1 or not user_id is not null or not ts >= 11668 or not ts <= 11669) and (not user_id = 1 or not user_id is not null or not ts >= 11670 or not ts <= 11671) and (not user_id = 1 or not user_id is not null or not ts >= 11672 or not ts <= 11673) and (not user_id = 1 or not user_id is not null or not ts >= 11674 or not ts <= 11675) and (not user_id = 1 or not user_id is not null or not ts >= 11676 or not ts <= 11677) and (not user_id = 1 or not user_id is not null or not ts >= 11678 or not ts <= 11679) and (not user_id = 1 or not user_id is not null or not ts >= 11680 or not ts <= 11681) and (not user_id = 1 or not user_id is not null or not ts >= 11682 or not ts <= 11683) and (not user_id = 1 or not user_id is not null or not ts >= 11684 or not ts <= 11685) and (not user_id = 1 or not user_id is not null or not ts >= 11686 or not ts <= 11687) and (not user_id = 1 or not user_id is not null or not ts >= 11688 or not ts <= 11689) and (not user_id = 1 or not user_id is not null or not ts >= 11690 or not ts <= 11691) and (not user_id = 1 or not user_id is not null or not ts >= 11692 or not ts <= 11693) and (not user_id = 1 or not user_id is not null or not ts >= 11694 or not ts <= 11695) and (not user_id = 1 or not user_id is not null or not ts >= 11696 or not ts <= 11697) and (not user_id = 1 or not user_id is not null or not ts >= 11698 or not ts <= 11699) and (not user_id = 1 or not user_id is not null or not ts >= 11700 or not ts <= 11701) and (not user_id = 1 or not user_id is not null or not ts >= 11702 or not ts <= 11703) and (not user_id = 1 or not user_id is not null or not ts >= 11704 or not ts <= 11705) and (not user_id = 1 or not user_id is not null or not ts >= 11706 or not ts <= 11707) and (not user_id = 1 or not user_id is not null or not ts >= 11708 or not ts <= 11709) and (not user_id = 1 or not user_id is not null or not ts >= 11710 or not ts <= 11711) and (not user_id = 1 or not user_id is not null or not ts >= 11712 or not ts <= 11713) and (not user_id = 1 or not user_id is not null or not ts >= 11714 or not ts <= 11715) and (not user_id = 1 or not user_id is not null or not ts >= 11716 or not ts <= 11717) and (not user_id = 1 or not user_id is not null or not ts >= 11718 or not ts <= 11719) and (not user_id = 1 or not user_id is not null or not ts >= 11720 or not ts <= 11721) and (not user_id = 1 or not user_id is not null or not ts >= 11722 or not ts <= 11723) and (not user_id = 1 or not user_id is not null or not ts >= 11724 or not ts <= 11725) and (not user_id = 1 or not user_id is not null or not ts >= 11726 or not ts <= 11727) and (not user_id = 1 or not user_id is not null or not ts >= 11728 or not ts <= 11729) and (not user_id = 1 or not user_id is not null or not ts >= 11730 or not ts <= 11731) and (not user_id = 1 or not user_id is not null or not ts >= 11732 or not ts <= 11733) and (not user_id = 1 or not user_id is not null or not ts >= 11734 or not ts <= 11735) and (not user_id = 1 or not user_id is not null or not ts >= 11736 or not ts <= 11737) and (not user_id = 1 or not user_id is not null or not ts >= 11738 or not ts <= 11739) and (not user_id = 1 or not user_id is not null or not ts >= 11740 or not ts <= 11741) and (not user_id = 1 or not user_id is not null or not ts >= 11742 or not ts <= 11743) and (not user_id = 1 or not user_id is not null or not ts >= 11744 or not ts <= 11745) and (not user_id = 1 or not user_id is not null or not ts >= 11746 or not ts <= 11747) and (not user_id = 1 or not user_id is not null or not ts >= 11748 or not ts <= 11749) and (not user_id = 1 or not user_id is not null or not ts >= 11750 or not ts <= 11751) and (not user_id = 1 or not user_id is not null or not ts >= 11752 or not ts <= 11753) and (not user_id = 1 or not user_id is not null or not ts >= 11754 or not ts <= 11755) and (not user_id = 1 or not user_id is not null or not ts >= 11756 or not ts <= 11757) and (not user_id = 1 or not user_id is not null or not ts >= 11758 or not ts <= 11759) and (not user_id = 1 or not user_id is not null or not ts >= 11760 or not ts <= 11761) and (not user_id = 1 or not user_id is not null or not ts >= 11762 or not ts <= 11763) and (not user_id = 1 or not user_id is not null or not ts >= 11764 or not ts <= 11765) and (not user_id = 1 or not user_id is not null or not ts >= 11766 or not ts <= 11767) and (not user_id = 1 or not user_id is not null or not ts >= 11768 or not ts <= 11769) and (not user_id = 1 or not user_id is not null or not ts >= 11770 or not ts <= 11771) and (not user_id = 1 or not user_id is not null or not ts >= 11772 or not ts <= 11773) and (not user_id = 1 or not user_id is not null or not ts >= 11774 or not ts <= 11775) and (not user_id = 1 or not user_id is not null or not ts >= 11776 or not ts <= 11777) and (not user_id = 1 or not user_id is not null or not ts >= 11778 or not ts <= 11779) and (not user_id = 1 or not user_id is not null or not ts >= 11780 or not ts <= 11781) and (not user_id = 1 or not user_id is not null or not ts >= 11782 or not ts <= 11783) and (not user_id = 1 or not user_id is not null or not ts >= 11784 or not ts <= 11785) and (not user_id = 1 or not user_id is not null or not ts >= 11786 or not ts <= 11787) and (not user_id = 1 or not user_id is not null or not ts >= 11788 or not ts <= 11789) and (not user_id = 1 or not user_id is not null or not ts >= 11790 or not ts <= 11791) and (not user_id = 1 or not user_id is not null or not ts >= 11792 or not ts <= 11793) and (not user_id = 1 or not user_id is not null or not ts >= 11794 or not ts <= 11795) and (not user_id = 1 or not user_id is not null or not ts >= 11796 or not ts <= 11797) and (not user_id = 1 or not user_id is not null or not ts >= 11798 or not ts <= 11799) and (not user_id = 1 or not user_id is not null or not ts >= 11800 or not ts <= 11801) and (not user_id = 1 or not user_id is not null or not ts >= 11802 or not ts <= 11803) and (not user_id = 1 or not user_id is not null or not ts >= 11804 or not ts <= 11805) and (not user_id = 1 or not user_id is not null or not ts >= 11806 or not ts <= 11807) and (not user_id = 1 or not user_id is not null or not ts >= 11808 or not ts <= 11809) and (not user_id = 1 or not user_id is not null or not ts >= 11810 or not ts <= 11811) and (not user_id = 1 or not user_id is not null or not ts >= 11812 or not ts <= 11813) and (not user_id = 1 or not user_id is not null or not ts >= 11814 or not ts <= 11815) and (not user_id = 1 or not user_id is not null or not ts >= 11816 or not ts <= 11817) and (not user_id = 1 or not user_id is not null or not ts >= 11818 or not ts <= 11819) and (not user_id = 1 or not user_id is not null or not ts >= 11820 or not ts <= 11821) and (not user_id = 1 or not user_id is not null or not ts >= 11822 or not ts <= 11823) and (not user_id = 1 or not user_id is not null or not ts >= 11824 or not ts <= 11825) and (not user_id = 1 or not user_id is not null or not ts >= 11826 or not ts <= 11827) and (not user_id = 1 or not user_id is not null or not ts >= 11828 or not ts <= 11829) and (not user_id = 1 or not user_id is not null or not ts >= 11830 or not ts <= 11831) and (not user_id = 1 or not user_id is not null or not ts >= 11832 or not ts <= 11833) and (not user_id = 1 or not user_id is not null or not ts >= 11834 or not ts <= 11835) and (not user_id = 1 or not user_id is not null or not ts >= 11836 or not ts <= 11837) and (not user_id = 1 or not user_id is not null or not ts >= 11838 or not ts <= 11839) and (not user_id = 1 or not user_id is not null or not ts >= 11840 or not ts <= 11841) and (not user_id = 1 or not user_id is not null or not ts >= 11842 or not ts <= 11843) and (not user_id = 1 or not user_id is not null or not ts >= 11844 or not ts <= 11845) and (not user_id = 1 or not user_id is not null or not ts >= 11846 or not ts <= 11847) and (not user_id = 1 or not user_id is not null or not ts >= 11848 or not ts <= 11849) and (not user_id = 1 or not user_id is not null or not ts >= 11850 or not ts <= 11851) and (not user_id = 1 or not user_id is not null or not ts >= 11852 or not ts <= 11853) and (not user_id = 1 or not user_id is not null or not ts >= 11854 or not ts <= 11855) and (not user_id = 1 or not user_id is not null or not ts >= 11856 or not ts <= 11857) and (not user_id = 1 or not user_id is not null or not ts >= 11858 or not ts <= 11859) and (not user_id = 1 or not user_id is not null or not ts >= 11860 or not ts <= 11861) and (not user_id = 1 or not user_id is not null or not ts >= 11862 or not ts <= 11863) and (not user_id = 1 or not user_id is not null or not ts >= 11864 or not ts <= 11865) and (not user_id = 1 or not user_id is not null or not ts >= 11866 or not ts <= 11867) and (not user_id = 1 or not user_id is not null or not ts >= 11868 or not ts <= 11869) and (not user_id = 1 or not user_id is not null or not ts >= 11870 or not ts <= 11871) and (not user_id = 1 or not user_id is not null or not ts >= 11872 or not ts <= 11873) and (not user_id = 1 or not user_id is not null or not ts >= 11874 or not ts <= 11875) and (not user_id = 1 or not user_id is not null or not ts >= 11876 or not ts <= 11877) and (not user_id = 1 or not user_id is not null or not ts >= 11878 or not ts <= 11879) and (not user_id = 1 or not user_id is not null or not ts >= 11880 or not ts <= 11881) and (not user_id = 1 or not user_id is not null or not ts >= 11882 or not ts <= 11883) and (not user_id = 1 or not user_id is not null or not ts >= 11884 or not ts <= 11885) and (not user_id = 1 or not user_id is not null or not ts >= 11886 or not ts <= 11887) and (not user_id = 1 or not user_id is not null or not ts >= 11888 or not ts <= 11889) and (not user_id = 1 or not user_id is not null or not ts >= 11890 or not ts <= 11891) and (not user_id = 1 or not user_id is not null or not ts >= 11892 or not ts <= 11893) and (not user_id = 1 or not user_id is not null or not ts >= 11894 or not ts <= 11895) and (not user_id = 1 or not user_id is not null or not ts >= 11896 or not ts <= 11897) and (not user_id = 1 or not user_id is not null or not ts >= 11898 or not ts <= 11899) and (not user_id = 1 or not user_id is not null or not ts >= 11900 or not ts <= 11901) and (not user_id = 1 or not user_id is not null or not ts >= 11902 or not ts <= 11903) and (not user_id = 1 or not user_id is not null or not ts >= 11904 or not ts <= 11905) and (not user_id = 1 or not user_id is not null or not ts >= 11906 or not ts <= 11907) and (not user_id = 1 or not user_id is not null or not ts >= 11908 or not ts <= 11909) and (not user_id = 1 or not user_id is not null or not ts >= 11910 or not ts <= 11911) and (not user_id = 1 or not user_id is not null or not ts >= 11912 or not ts <= 11913) and (not user_id = 1 or not user_id is not null or not ts >= 11914 or not ts <= 11915) and (not user_id = 1 or not user_id is not null or not ts >= 11916 or not ts <= 11917) and (not user_id = 1 or not user_id is not null or not ts >= 11918 or not ts <= 11919) and (not user_id = 1 or not user_id is not null or not ts >= 11920 or not ts <= 11921) and (not user_id = 1 or not user_id is not null or not ts >= 11922 or not ts <= 11923) and (not user_id = 1 or not user_id is not null or not ts >= 11924 or not ts <= 11925) and (not user_id = 1 or not user_id is not null or not ts >= 11926 or not ts <= 11927) and (not user_id = 1 or not user_id is not null or not ts >= 11928 or not ts <= 11929) and (not user_id = 1 or not user_id is not null or not ts >= 11930 or not ts <= 11931) and (not user_id = 1 or not user_id is not null or not ts >= 11932 or not ts <= 11933) and (not user_id = 1 or not user_id is not null or not ts >= 11934 or not ts <= 11935) and (not user_id = 1 or not user_id is not null or not ts >= 11936 or not ts <= 11937) and (not user_id = 1 or not user_id is not null or not ts >= 11938 or not ts <= 11939) and (not user_id = 1 or not user_id is not null or not ts >= 11940 or not ts <= 11941) and (not user_id = 1 or not user_id is not null or not ts >= 11942 or not ts <= 11943) and (not user_id = 1 or not user_id is not null or not ts >= 11944 or not ts <= 11945) and (not user_id = 1 or not user_id is not null or not ts >= 11946 or not ts <= 11947) and (not user_id = 1 or not user_id is not null or not ts >= 11948 or not ts <= 11949) and (not user_id = 1 or not user_id is not null or not ts >= 11950 or not ts <= 11951) and (not user_id = 1 or not user_id is not null or not ts >= 11952 or not ts <= 11953) and (not user_id = 1 or not user_id is not null or not ts >= 11954 or not ts <= 11955) and (not user_id = 1 or not user_id is not null or not ts >= 11956 or not ts <= 11957) and (not user_id = 1 or not user_id is not null or not ts >= 11958 or not ts <= 11959) and (not user_id = 1 or not user_id is not null or not ts >= 11960 or not ts <= 11961) and (not user_id = 1 or not user_id is not null or not ts >= 11962 or not ts <= 11963) and (not user_id = 1 or not user_id is not null or not ts >= 11964 or not ts <= 11965) and (not user_id = 1 or not user_id is not null or not ts >= 11966 or not ts <= 11967) and (not user_id = 1 or not user_id is not null or not ts >= 11968 or not ts <= 11969) and (not user_id = 1 or not user_id is not null or not ts >= 11970 or not ts <= 11971) and (not user_id = 1 or not user_id is not null or not ts >= 11972 or not ts <= 11973) and (not user_id = 1 or not user_id is not null or not ts >= 11974 or not ts <= 11975) and (not user_id = 1 or not user_id is not null or not ts >= 11976 or not ts <= 11977) and (not user_id = 1 or not user_id is not null or not ts >= 11978 or not ts <= 11979) and (not user_id = 1 or not user_id is not null or not ts >= 11980 or not ts <= 11981) and (not user_id = 1 or not user_id is not null or not ts >= 11982 or not ts <= 11983) and (not user_id = 1 or not user_id is not null or not ts >= 11984 or not ts <= 11985) and (not user_id = 1 or not user_id is not null or not ts >= 11986 or not ts <= 11987) and (not user_id = 1 or not user_id is not null or not ts >= 11988 or not ts <= 11989) and (not user_id = 1 or not user_id is not null or not ts >= 11990 or not ts <= 11991) and (not user_id = 1 or not user_id is not null or not ts >= 11992 or not ts <= 11993) and ts >= 113898 and parent_id = 1 order by ts asc limit 100", "ResultColumns": 1, "Table": "`user`" } @@ -4402,5 +4449,268 @@ "user.user_extra" ] } + }, + { + "comment": "list args: single column vindex", + "query": "select 1 from user where (id, col) in ::vals", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from user where (id, col) in ::vals", + "Instructions": { + "OperatorType": "Route", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where (id, col) in ::vals", + "Table": "`user`", + "Values": [ + "vals:0" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "list args: single column vindex on non-zero offset", + "query": "select 1 from user where (col, id) in ::vals", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from user where (col, id) in ::vals", + "Instructions": { + "OperatorType": "Route", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1", + "Query": "select 1 from `user` where (col, id) in ::vals", + "Table": "`user`", + "Values": [ + "vals:1" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "list args: multi column vindex", + "query": "select 1 from multicol_tbl where (cola, colb) in ::vals", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from multicol_tbl where (cola, colb) in ::vals", + "Instructions": { + "OperatorType": "Route", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from multicol_tbl where 1 != 1", + "Query": "select 1 from multicol_tbl where (cola, colb) in ::vals", + "Table": "multicol_tbl", + "Values": [ + "vals:0", + "vals:1" + ], + "Vindex": "multicolIdx" + }, + "TablesUsed": [ + "user.multicol_tbl" + ] + } + }, + { + "comment": "list args: multi column vindex - subshard", + "query": "select 1 from multicol_tbl where (cola) in ::vals", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from multicol_tbl where (cola) in ::vals", + "Instructions": { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from multicol_tbl where 1 != 1", + "Query": "select 1 from multicol_tbl where cola in ::__vals0", + "Table": "multicol_tbl", + "Values": [ + "::vals" + ], + "Vindex": "multicolIdx" + }, + "TablesUsed": [ + "user.multicol_tbl" + ] + } + }, + { + "comment": "list args: multi column vindex - more columns", + "query": "select 1 from multicol_tbl where (cola, colx, colb) in ::vals", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from multicol_tbl where (cola, colx, colb) in ::vals", + "Instructions": { + "OperatorType": "Route", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from multicol_tbl where 1 != 1", + "Query": "select 1 from multicol_tbl where (cola, colx, colb) in ::vals", + "Table": "multicol_tbl", + "Values": [ + "vals:0", + "vals:2" + ], + "Vindex": "multicolIdx" + }, + "TablesUsed": [ + "user.multicol_tbl" + ] + } + }, + { + "comment": "list args: multi column vindex - columns rearranged", + "query": "select 1 from multicol_tbl where (colb, colx, cola) in ::vals", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from multicol_tbl where (colb, colx, cola) in ::vals", + "Instructions": { + "OperatorType": "Route", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from multicol_tbl where 1 != 1", + "Query": "select 1 from multicol_tbl where (colb, colx, cola) in ::vals", + "Table": "multicol_tbl", + "Values": [ + "vals:2", + "vals:0" + ], + "Vindex": "multicolIdx" + }, + "TablesUsed": [ + "user.multicol_tbl" + ] + } + }, + { + "comment": "order by with filter removing the keyspace from order by", + "query": "select col from user.user where id = 1 order by user.user.user_id", + "plan": { + "QueryType": "SELECT", + "Original": "select col from user.user where id = 1 order by user.user.user_id", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` where id = 1 order by `user`.user_id asc", + "Table": "`user`", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "group by with filter removing the keyspace from order by", + "query": "select col from user.user where id = 1 group by user.user.user_id", + "plan": { + "QueryType": "SELECT", + "Original": "select col from user.user where id = 1 group by user.user.user_id", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col from `user` where 1 != 1 group by `user`.user_id", + "Query": "select col from `user` where id = 1 group by `user`.user_id", + "Table": "`user`", + "Values": [ + "1" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "order with authoritative table - removing keyspace from group by", + "query": "select * from user.authoritative where user_id = 5 order by user_id", + "plan": { + "QueryType": "SELECT", + "Original": "select * from user.authoritative where user_id = 5 order by user_id", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1", + "Query": "select user_id, col1, col2 from authoritative where user_id = 5 order by authoritative.user_id asc", + "Table": "authoritative", + "Values": [ + "5" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.authoritative" + ] + } + }, + { + "comment": "group by and having with authoritative table - removing keyspace from having", + "query": "select * from user.authoritative where user_id = 5 group by user_id having count(user_id) = 6 order by user_id", + "plan": { + "QueryType": "SELECT", + "Original": "select * from user.authoritative where user_id = 5 group by user_id having count(user_id) = 6 order by user_id", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1 group by user_id", + "Query": "select user_id, col1, col2 from authoritative where user_id = 5 group by user_id having count(user_id) = 6 order by authoritative.user_id asc", + "Table": "authoritative", + "Values": [ + "5" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.authoritative" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases.json b/go/vt/vtgate/planbuilder/testdata/flush_cases.json index 8298c6de649..26a1f218c8d 100644 --- a/go/vt/vtgate/planbuilder/testdata/flush_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/flush_cases.json @@ -33,7 +33,8 @@ "Sharded": false }, "TargetDestination": "AllShards()", - "Query": "flush local tables with read lock" + "Query": "flush local tables with read lock", + "ReservedConnectionNeeded": true } } }, @@ -53,5 +54,42 @@ "Query": "flush local hosts, logs" } } + }, + { + "comment": "Flush statement with multiple tables in different keyspace with read lock", + "query": "flush tables user.music, main.unsharded with read lock", + "plan": { + "QueryType": "FLUSH", + "Original": "flush tables user.music, main.unsharded with read lock", + "Instructions": { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AllShards()", + "Query": "flush tables unsharded with read lock", + "ReservedConnectionNeeded": true + }, + { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AllShards()", + "Query": "flush tables music with read lock", + "ReservedConnectionNeeded": true + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.music" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json index a3370a74f5d..7afd090ba21 100644 --- a/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json +++ b/go/vt/vtgate/planbuilder/testdata/flush_cases_no_default_keyspace.json @@ -15,7 +15,8 @@ "Sharded": false }, "TargetDestination": "AllShards()", - "Query": "flush local tables unsharded_a with read lock" + "Query": "flush local tables unsharded_a with read lock", + "ReservedConnectionNeeded": true }, { "OperatorType": "Send", @@ -24,7 +25,8 @@ "Sharded": true }, "TargetDestination": "AllShards()", - "Query": "flush local tables `user`, user_extra with read lock" + "Query": "flush local tables `user`, user_extra with read lock", + "ReservedConnectionNeeded": true } ] }, @@ -105,7 +107,8 @@ "Sharded": false }, "TargetDestination": "AllShards()", - "Query": "flush tables a with read lock" + "Query": "flush tables a with read lock", + "ReservedConnectionNeeded": true }, "TablesUsed": [ "main.a" @@ -128,7 +131,8 @@ "Sharded": false }, "TargetDestination": "AllShards()", - "Query": "flush local tables unsharded_a with read lock" + "Query": "flush local tables unsharded_a with read lock", + "ReservedConnectionNeeded": true }, { "OperatorType": "Send", @@ -137,7 +141,8 @@ "Sharded": false }, "TargetDestination": "AllShards()", - "Query": "flush local tables unsharded_tab with read lock" + "Query": "flush local tables unsharded_tab with read lock", + "ReservedConnectionNeeded": true }, { "OperatorType": "Send", @@ -146,7 +151,8 @@ "Sharded": true }, "TargetDestination": "AllShards()", - "Query": "flush local tables `user`, user_extra with read lock" + "Query": "flush local tables `user`, user_extra with read lock", + "ReservedConnectionNeeded": true } ] }, diff --git a/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json b/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json index 065691d2356..47f10cd273b 100644 --- a/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json @@ -81,8 +81,8 @@ "Name": "sharded_fk_allow", "Sharded": true }, - "FieldQuery": "select colb, cola, y, colc, x from multicol_tbl1 where 1 != 1", - "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", + "FieldQuery": "select multicol_tbl1.colb, multicol_tbl1.cola, multicol_tbl1.y, multicol_tbl1.colc, multicol_tbl1.x from multicol_tbl1 where 1 != 1", + "Query": "select multicol_tbl1.colb, multicol_tbl1.cola, multicol_tbl1.y, multicol_tbl1.colc, multicol_tbl1.x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", "Table": "multicol_tbl1", "Values": [ "1", @@ -94,7 +94,7 @@ { "InputName": "CascadeChild-1", "OperatorType": "Delete", - "Variant": "Scatter", + "Variant": "MultiEqual", "Keyspace": { "Name": "sharded_fk_allow", "Sharded": true @@ -109,7 +109,13 @@ 4 ], "Query": "delete from multicol_tbl2 where (colb, cola, x, colc, y) in ::fkc_vals", - "Table": "multicol_tbl2" + "Table": "multicol_tbl2", + "Values": [ + "fkc_vals:1", + "fkc_vals:0", + "fkc_vals:3" + ], + "Vindex": "multicolIdx" }, { "InputName": "Parent", @@ -154,14 +160,14 @@ "Name": "sharded_fk_allow", "Sharded": true }, - "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", - "Query": "select col5, t5col5 from tbl5 for update", + "FieldQuery": "select tbl5.col5, tbl5.t5col5 from tbl5 where 1 != 1", + "Query": "select tbl5.col5, tbl5.t5col5 from tbl5 for update", "Table": "tbl5" }, { "InputName": "CascadeChild-1", "OperatorType": "Delete", - "Variant": "Scatter", + "Variant": "MultiEqual", "Keyspace": { "Name": "sharded_fk_allow", "Sharded": true @@ -172,7 +178,11 @@ 0 ], "Query": "delete from tbl4 where (col4) in ::fkc_vals", - "Table": "tbl4" + "Table": "tbl4", + "Values": [ + "fkc_vals:0" + ], + "Vindex": "hash_vin" }, { "InputName": "CascadeChild-2", @@ -232,8 +242,8 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col9 from u_tbl9 where 1 != 1", - "Query": "select col9 from u_tbl9 where col9 = 5 for update", + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where col9 = 5 for update nowait", "Table": "u_tbl9" }, { @@ -311,8 +321,8 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = 1 for update", + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where id = 1 for update", "Table": "u_tbl2" }, { @@ -328,7 +338,7 @@ "Cols": [ 0 ], - "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (u_tbl3.col3) not in (('bar'))", + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (col3) not in ((cast('bar' as CHAR)))", "Table": "u_tbl3" }, { @@ -422,8 +432,8 @@ "Name": "sharded_fk_allow", "Sharded": true }, - "FieldQuery": "select t5col5 from tbl5 where 1 != 1", - "Query": "select t5col5 from tbl5 for update", + "FieldQuery": "select tbl5.t5col5 from tbl5 where 1 != 1", + "Query": "select tbl5.t5col5 from tbl5 for update", "Table": "tbl5" }, { @@ -439,7 +449,7 @@ "Cols": [ 0 ], - "Query": "update tbl4 set t4col4 = null where (t4col4) in ::fkc_vals and (tbl4.t4col4) not in (('foo'))", + "Query": "update tbl4 set t4col4 = null where (t4col4) in ::fkc_vals and (t4col4) not in (('foo'))", "Table": "tbl4" }, { @@ -516,7 +526,7 @@ { "OperatorType": "Join", "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0,R:0", + "JoinColumnIndexes": "R:0", "TableName": "tbl10_tbl3", "Inputs": [ { @@ -527,7 +537,7 @@ "Sharded": true }, "FieldQuery": "select 1 from tbl10 where 1 != 1", - "Query": "select 1 from tbl10 lock in share mode", + "Query": "select 1 from tbl10 where not (tbl10.col) <=> ('foo') for share", "Table": "tbl10" }, { @@ -538,7 +548,7 @@ "Sharded": true }, "FieldQuery": "select tbl3.col from tbl3 where 1 != 1", - "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' lock in share mode", + "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' for share", "Table": "tbl3" } ] @@ -591,8 +601,8 @@ "Name": "sharded_fk_allow", "Sharded": true }, - "FieldQuery": "select col9 from tbl9 where 1 != 1", - "Query": "select col9 from tbl9 where col9 = 34 for update", + "FieldQuery": "select tbl9.col9 from tbl9 where 1 != 1", + "Query": "select tbl9.col9 from tbl9 where col9 = 34 for update", "Table": "tbl9", "Values": [ "34" @@ -656,8 +666,8 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col1, col1 from u_tbl1 where 1 != 1", - "Query": "select col1, col1 from u_tbl1 for update", + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 for update", "Table": "u_tbl1" }, { @@ -676,8 +686,8 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -693,7 +703,7 @@ "Cols": [ 0 ], - "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (u_tbl3.col3) not in (('foo'))", + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((cast('foo' as CHAR)))", "Table": "u_tbl3" }, { @@ -715,7 +725,7 @@ "OperatorType": "FkCascade", "BvName": "fkc_vals2", "Cols": [ - 1 + 0 ], "Inputs": [ { @@ -726,8 +736,8 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col9 from u_tbl9 where 1 != 1", - "Query": "select col9 from u_tbl9 where (col9) in ::fkc_vals2 and (u_tbl9.col9) not in (('foo')) for update", + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((cast('foo' as CHAR))) for update nowait", "Table": "u_tbl9" }, { @@ -755,7 +765,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (u_tbl9.col9) not in (('foo'))", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((cast('foo' as CHAR)))", "Table": "u_tbl9" } ] @@ -784,83 +794,94 @@ } }, { - "comment": "update in a table with limit - disallowed", - "query": "update u_tbl2 set col2 = 'bar' limit 2", - "plan": "VT12001: unsupported: update with limit with foreign key constraints" - }, - { - "comment": "update in a table with non-literal value - set null fail due to child update where condition", + "comment": "update in a table with non-literal value - set null", "query": "update u_tbl2 set m = 2, col2 = col1 + 'bar' where id = 1", - "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" - }, - { - "comment": "update in a table with non-literal value - with cascade fail as the cascade value is not known", - "query": "update u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", - "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" - }, - { - "comment": "update in a table with set null, non-literal value on non-foreign key column - allowed", - "query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", "plan": { "QueryType": "UPDATE", - "Original": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "Original": "update u_tbl2 set m = 2, col2 = col1 + 'bar' where id = 1", "Instructions": { - "OperatorType": "FkCascade", + "OperatorType": "FKVerify", "Inputs": [ { - "InputName": "Selection", + "InputName": "VerifyParent-1", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = 1 for update", - "Table": "u_tbl2" - }, - { - "InputName": "CascadeChild-1", - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "unsharded_fk_allow", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "BvName": "fkc_vals", - "Cols": [ - 0 - ], - "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (u_tbl3.col3) not in ((2))", - "Table": "u_tbl3" + "FieldQuery": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where u_tbl1.col1 is null and cast(u_tbl2.col1 + 'bar' as CHAR) is not null and not (u_tbl2.col2) <=> (cast(u_tbl2.col1 + 'bar' as CHAR)) and u_tbl2.id = 1 limit 1 for share", + "Table": "u_tbl1, u_tbl2" }, { - "InputName": "Parent", - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "unsharded_fk_allow", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", - "Table": "u_tbl2" + "InputName": "PostVerify", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 1, + "UpdateExprCol": 2, + "UpdateExprBvName": "fkc_upd" + } + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (:fkc_upd is null or (col3) not in ((:fkc_upd)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set m = 2, col2 = col1 + 'bar' where id = 1", + "Table": "u_tbl2" + } + ] } ] }, "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", "unsharded_fk_allow.u_tbl2", "unsharded_fk_allow.u_tbl3" ] } }, { - "comment": "update in a table with cascade, non-literal value on non-foreign key column - allowed", - "query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "comment": "update in a table with non-literal value - with cascade", + "query": "update u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", "plan": { "QueryType": "UPDATE", - "Original": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Original": "update u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", "Instructions": { "OperatorType": "FkCascade", "Inputs": [ @@ -872,8 +893,8 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col1, col1 from u_tbl1 where 1 != 1", - "Query": "select col1, col1 from u_tbl1 where id = 1 for update", + "FieldQuery": "select u_tbl1.col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where id = 1 for update", "Table": "u_tbl1" }, { @@ -883,6 +904,13 @@ "Cols": [ 0 ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 1, + "UpdateExprCol": 2, + "UpdateExprBvName": "fkc_upd" + } + ], "Inputs": [ { "InputName": "Selection", @@ -892,8 +920,8 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -909,7 +937,7 @@ "Cols": [ 0 ], - "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (u_tbl3.col3) not in ((2))", + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (cast(:fkc_upd as CHAR) is null or (col3) not in ((cast(:fkc_upd as CHAR))))", "Table": "u_tbl3" }, { @@ -921,7 +949,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 2 where (col2) in ::fkc_vals", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = :fkc_upd where (col2) in ::fkc_vals", "Table": "u_tbl2" } ] @@ -931,7 +959,14 @@ "OperatorType": "FkCascade", "BvName": "fkc_vals2", "Cols": [ - 1 + 0 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 1, + "UpdateExprCol": 2, + "UpdateExprBvName": "fkc_upd1" + } ], "Inputs": [ { @@ -942,8 +977,8 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col9 from u_tbl9 where 1 != 1", - "Query": "select col9 from u_tbl9 where (col9) in ::fkc_vals2 and (u_tbl9.col9) not in ((2)) for update", + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (:fkc_upd1 is null or (col9) not in ((:fkc_upd1))) for update nowait", "Table": "u_tbl9" }, { @@ -971,7 +1006,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (u_tbl9.col9) not in ((2))", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (:fkc_upd1 is null or (col9) not in ((:fkc_upd1)))", "Table": "u_tbl9" } ] @@ -985,7 +1020,7 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", "Table": "u_tbl1" } ] @@ -1000,106 +1035,11 @@ } }, { - "comment": "update in a table with a child table having SET DEFAULT constraint - disallowed", - "query": "update tbl20 set col2 = 'bar'", - "plan": "VT09016: Cannot delete or update a parent row: a foreign key constraint fails" - }, - { - "comment": "delete in a table with limit - disallowed", - "query": "delete from u_tbl2 limit 2", - "plan": "VT12001: unsupported: foreign keys management at vitess with limit" - }, - { - "comment": "update with fk on cross-shard with a where condition on non-literal value - disallowed", - "query": "update tbl3 set coly = colx + 10 where coly = 10", - "plan": "VT12001: unsupported: update expression with non-literal values with foreign key constraints" - }, - { - "comment": "update with fk on cross-shard with a where condition", - "query": "update tbl3 set coly = 20 where coly = 10", - "plan": { - "QueryType": "UPDATE", - "Original": "update tbl3 set coly = 20 where coly = 10", - "Instructions": { - "OperatorType": "FKVerify", - "Inputs": [ - { - "InputName": "VerifyParent-1", - "OperatorType": "Limit", - "Count": "1", - "Inputs": [ - { - "OperatorType": "Projection", - "Expressions": [ - "1 as 1" - ], - "Inputs": [ - { - "OperatorType": "Filter", - "Predicate": "tbl1.t1col1 is null", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "LeftJoin", - "JoinColumnIndexes": "R:0,R:0", - "TableName": "tbl3_tbl1", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "sharded_fk_allow", - "Sharded": true - }, - "FieldQuery": "select 1 from tbl3 where 1 != 1", - "Query": "select 1 from tbl3 where tbl3.coly = 10 lock in share mode", - "Table": "tbl3" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "sharded_fk_allow", - "Sharded": true - }, - "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", - "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 lock in share mode", - "Table": "tbl1" - } - ] - } - ] - } - ] - } - ] - }, - { - "InputName": "PostVerify", - "OperatorType": "Update", - "Variant": "Scatter", - "Keyspace": { - "Name": "sharded_fk_allow", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", - "Query": "update tbl3 set coly = 20 where tbl3.coly = 10", - "Table": "tbl3" - } - ] - }, - "TablesUsed": [ - "sharded_fk_allow.tbl1", - "sharded_fk_allow.tbl3" - ] - } - }, - { - "comment": "Update in a table with shard-scoped foreign keys with cascade that requires a validation of a different parent foreign key", - "query": "update u_tbl6 set col6 = 'foo'", + "comment": "update in a table with set null, non-literal value on non-foreign key column", + "query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", "plan": { "QueryType": "UPDATE", - "Original": "update u_tbl6 set col6 = 'foo'", + "Original": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", "Instructions": { "OperatorType": "FkCascade", "Inputs": [ @@ -1111,43 +1051,25 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col6 from u_tbl6 where 1 != 1", - "Query": "select col6 from u_tbl6 for update", - "Table": "u_tbl6" + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" }, { "InputName": "CascadeChild-1", - "OperatorType": "FKVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", "BvName": "fkc_vals", "Cols": [ 0 ], - "Inputs": [ - { - "InputName": "VerifyParent-1", - "OperatorType": "Route", - "Variant": "Unsharded", - "Keyspace": { - "Name": "unsharded_fk_allow", - "Sharded": false - }, - "FieldQuery": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = 'foo' where 1 != 1", - "Query": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = 'foo' where (u_tbl8.col8) in ::fkc_vals and u_tbl9.col9 is null limit 1 lock in share mode", - "Table": "u_tbl8, u_tbl9" - }, - { - "InputName": "PostVerify", - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "unsharded_fk_allow", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl8 set col8 = 'foo' where (u_tbl8.col8) in ::fkc_vals", - "Table": "u_tbl8" - } - ] + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (col3) not in ((cast(2 as CHAR)))", + "Table": "u_tbl3" }, { "InputName": "Parent", @@ -1158,24 +1080,23 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update u_tbl6 set col6 = 'foo'", - "Table": "u_tbl6" + "Query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "Table": "u_tbl2" } ] }, "TablesUsed": [ - "unsharded_fk_allow.u_tbl6", - "unsharded_fk_allow.u_tbl8", - "unsharded_fk_allow.u_tbl9" + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" ] } }, { - "comment": "Update that cascades and requires parent fk and restrict child fk verification", - "query": "update u_tbl7 set col7 = 'foo'", + "comment": "update in a table with cascade, non-literal value on non-foreign key column", + "query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", "plan": { "QueryType": "UPDATE", - "Original": "update u_tbl7 set col7 = 'foo'", + "Original": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", "Instructions": { "OperatorType": "FkCascade", "Inputs": [ @@ -1187,44 +1108,82 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col7 from u_tbl7 where 1 != 1", - "Query": "select col7 from u_tbl7 for update", - "Table": "u_tbl7" + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where id = 1 for update", + "Table": "u_tbl1" }, { "InputName": "CascadeChild-1", - "OperatorType": "FKVerify", + "OperatorType": "FkCascade", "BvName": "fkc_vals", "Cols": [ 0 ], "Inputs": [ { - "InputName": "VerifyParent-1", + "InputName": "Selection", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = 'foo' where 1 != 1", - "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = 'foo' where (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 lock in share mode", - "Table": "u_tbl3, u_tbl4" + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" }, { - "InputName": "VerifyChild-2", + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((cast(2 as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 2 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", - "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (u_tbl9.col9) not in (('foo')) and u_tbl4.col4 = u_tbl9.col9 limit 1 lock in share mode", - "Table": "u_tbl4, u_tbl9" + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((cast(2 as CHAR))) for update nowait", + "Table": "u_tbl9" }, { - "InputName": "PostVerify", + "InputName": "CascadeChild-1", "OperatorType": "Update", "Variant": "Unsharded", "Keyspace": { @@ -1232,8 +1191,24 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = 'foo' where (u_tbl4.col4) in ::fkc_vals", - "Table": "u_tbl4" + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((cast(2 as CHAR)))", + "Table": "u_tbl9" } ] }, @@ -1246,174 +1221,2703 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update u_tbl7 set col7 = 'foo'", - "Table": "u_tbl7" + "Query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Table": "u_tbl1" } ] }, "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", "unsharded_fk_allow.u_tbl3", - "unsharded_fk_allow.u_tbl4", - "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl8", "unsharded_fk_allow.u_tbl9" ] } }, { - "comment": "Update that cascades and requires parent fk and restrict child fk verification - bindVariable", - "query": "update u_tbl7 set col7 = :v1", + "comment": "update in a table with a child table having SET DEFAULT constraint - disallowed", + "query": "update tbl20 set col2 = 'bar'", + "plan": "VT09016: Cannot delete or update a parent row: a foreign key constraint fails" + }, + { + "comment": "delete in a table with limit", + "query": "delete from u_tbl2 limit 2", "plan": { - "QueryType": "UPDATE", - "Original": "update u_tbl7 set col7 = :v1", + "QueryType": "DELETE", + "Original": "delete from u_tbl2 limit 2", "Instructions": { - "OperatorType": "FkCascade", + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], "Inputs": [ { - "InputName": "Selection", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select col7 from u_tbl7 where 1 != 1", - "Query": "select col7 from u_tbl7 for update", - "Table": "u_tbl7" + "FieldQuery": "select u_tbl2.id from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.id from u_tbl2 limit 2 for update", + "Table": "u_tbl2" }, { - "InputName": "CascadeChild-1", - "OperatorType": "FKVerify", - "BvName": "fkc_vals", - "Cols": [ - 0 - ], + "OperatorType": "FkCascade", "Inputs": [ { - "InputName": "VerifyParent-1", + "InputName": "Selection", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = :v1 where 1 != 1", - "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = :v1 where (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 lock in share mode", - "Table": "u_tbl3, u_tbl4" + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where u_tbl2.id in ::dml_vals for update", + "Table": "u_tbl2" }, { - "InputName": "VerifyChild-2", - "OperatorType": "Route", + "InputName": "CascadeChild-1", + "OperatorType": "Update", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", - "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (:v1 is null or (u_tbl9.col9) not in ((:v1))) and u_tbl4.col4 = u_tbl9.col9 limit 1 lock in share mode", - "Table": "u_tbl4, u_tbl9" + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals", + "Table": "u_tbl3" }, { - "InputName": "PostVerify", - "OperatorType": "Update", + "InputName": "Parent", + "OperatorType": "Delete", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = :v1 where (u_tbl4.col4) in ::fkc_vals", - "Table": "u_tbl4" + "Query": "delete from u_tbl2 where u_tbl2.id in ::dml_vals", + "Table": "u_tbl2" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update with fk on cross-shard with a update condition on non-literal value", + "query": "update tbl3 set coly = colx + 10 where coly = 10", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl3 set coly = colx + 10 where coly = 10", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "1 as 1" + ], + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl1.t1col1 is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "tbl3_colx": 0 + }, + "TableName": "tbl3_tbl1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl3.colx from tbl3 where 1 != 1", + "Query": "select tbl3.colx from tbl3 where tbl3.colx + 10 is not null and not (tbl3.coly) <=> (tbl3.colx + 10) and tbl3.coly = 10 for share", + "Table": "tbl3" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = :tbl3_colx + 10 for share", + "Table": "tbl1" + } + ] + } + ] + } + ] } ] }, { - "InputName": "Parent", + "InputName": "PostVerify", "OperatorType": "Update", - "Variant": "Unsharded", + "Variant": "Scatter", "Keyspace": { - "Name": "unsharded_fk_allow", - "Sharded": false + "Name": "sharded_fk_allow", + "Sharded": true }, "TargetTabletType": "PRIMARY", - "Query": "update u_tbl7 set col7 = :v1", - "Table": "u_tbl7" + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ tbl3 set coly = colx + 10 where coly = 10", + "Table": "tbl3" } ] }, "TablesUsed": [ - "unsharded_fk_allow.u_tbl3", - "unsharded_fk_allow.u_tbl4", - "unsharded_fk_allow.u_tbl7", - "unsharded_fk_allow.u_tbl9" + "sharded_fk_allow.tbl1", + "sharded_fk_allow.tbl3" ] } }, { - "comment": "Insert with on duplicate key update - foreign keys disallowed", - "query": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = 5", - "plan": "VT12001: unsupported: ON DUPLICATE KEY UPDATE with foreign keys" - }, - { - "comment": "Insert with on duplicate key update - foreign keys not on update column - allowed", - "query": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "comment": "update with fk on cross-shard with a where condition", + "query": "update tbl3 set coly = 20 where coly = 10", "plan": { - "QueryType": "INSERT", - "Original": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "QueryType": "UPDATE", + "Original": "update tbl3 set coly = 20 where coly = 10", "Instructions": { - "OperatorType": "Insert", - "Variant": "Unsharded", - "Keyspace": { - "Name": "unsharded_fk_allow", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "Query": "insert into u_tbl1(id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", - "TableName": "u_tbl1" + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "1 as 1" + ], + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl1.t1col1 is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "TableName": "tbl3_tbl1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select 1 from tbl3 where 1 != 1", + "Query": "select 1 from tbl3 where not (tbl3.coly) <=> (20) and tbl3.coly = 10 for share", + "Table": "tbl3" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 for share", + "Table": "tbl1" + } + ] + } + ] + } + ] + } + ] + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update tbl3 set coly = 20 where coly = 10", + "Table": "tbl3" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1", + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "Update in a table with shard-scoped foreign keys with cascade that requires a validation of a different parent foreign key", + "query": "update u_tbl6 set col6 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl6 set col6 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl6.col6 from u_tbl6 where 1 != 1", + "Query": "select u_tbl6.col6 from u_tbl6 for update", + "Table": "u_tbl6" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = cast('foo' as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = cast('foo' as CHAR) where u_tbl9.col9 is null and cast('foo' as CHAR) is not null and not (u_tbl8.col8) <=> (cast('foo' as CHAR)) and (u_tbl8.col8) in ::fkc_vals limit 1 for share nowait", + "Table": "u_tbl8, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl8 set col8 = 'foo' where (col8) in ::fkc_vals", + "Table": "u_tbl8" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl6 set col6 = 'foo'", + "Table": "u_tbl6" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl6", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Update that cascades and requires parent fk and restrict child fk verification", + "query": "update u_tbl7 set col7 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl7 set col7 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl7.col7 from u_tbl7 where 1 != 1", + "Query": "select u_tbl7.col7 from u_tbl7 for update", + "Table": "u_tbl7" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where u_tbl3.col3 is null and cast('foo' as CHAR) is not null and not (u_tbl4.col4) <=> (cast('foo' as CHAR)) and (u_tbl4.col4) in ::fkc_vals limit 1 for share", + "Table": "u_tbl3, u_tbl4" + }, + { + "InputName": "VerifyChild-2", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", + "Query": "select 1 from u_tbl4, u_tbl9 where u_tbl4.col4 = u_tbl9.col9 and (u_tbl4.col4) in ::fkc_vals and (cast('foo' as CHAR) is null or (u_tbl9.col9) not in ((cast('foo' as CHAR)))) limit 1 for share", + "Table": "u_tbl4, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = 'foo' where (col4) in ::fkc_vals", + "Table": "u_tbl4" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl7 set col7 = 'foo'", + "Table": "u_tbl7" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl4", + "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Update that cascades and requires parent fk and restrict child fk verification - bindVariable", + "query": "update u_tbl7 set col7 = :v1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl7 set col7 = :v1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl7.col7 from u_tbl7 where 1 != 1", + "Query": "select u_tbl7.col7 from u_tbl7 for update", + "Table": "u_tbl7" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where u_tbl3.col3 is null and cast(:v1 as CHAR) is not null and not (u_tbl4.col4) <=> (cast(:v1 as CHAR)) and (u_tbl4.col4) in ::fkc_vals limit 1 for share", + "Table": "u_tbl3, u_tbl4" + }, + { + "InputName": "VerifyChild-2", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", + "Query": "select 1 from u_tbl4, u_tbl9 where u_tbl4.col4 = u_tbl9.col9 and (u_tbl4.col4) in ::fkc_vals and (cast(:v1 as CHAR) is null or (u_tbl9.col9) not in ((cast(:v1 as CHAR)))) limit 1 for share", + "Table": "u_tbl4, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = :v1 where (col4) in ::fkc_vals", + "Table": "u_tbl4" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl7 set col7 = :v1", + "Table": "u_tbl7" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl4", + "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Insert with on duplicate key update - foreign key with new value", + "query": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = 5", + "plan": { + "QueryType": "INSERT", + "Original": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = 5", + "Instructions": { + "OperatorType": "Upsert", + "TargetTabletType": "PRIMARY", + "Inputs": [ + { + "InputName": "Insert-1", + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert into u_tbl1(id, col1) values (1, 3)", + "TableName": "u_tbl1" + }, + { + "InputName": "Update-1", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where id = 1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((cast(5 as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 5 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((cast(5 as CHAR))) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((cast(5 as CHAR)))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl1 set col1 = 5 where id = 1", + "Table": "u_tbl1" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Insert with on duplicate key update - foreign keys not on update column - allowed", + "query": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "plan": { + "QueryType": "INSERT", + "Original": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into u_tbl1(id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "TableName": "u_tbl1" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1" + ] + } + }, + { + "comment": "Insert with unsharded table having fk reference in sharded table", + "query": "insert into u_tbl (id, col) values (1, 2)", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "replace into with table having primary key", + "query": "replace into u_tbl1 (id, col1) values (1, 2)", + "plan": { + "QueryType": "INSERT", + "Original": "replace into u_tbl1 (id, col1) values (1, 2)", + "Instructions": { + "OperatorType": "Sequential", + "Inputs": [ + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where (id) in ((1)) for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl2 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl1 where (id) in ((1))", + "Table": "u_tbl1" + } + ] + }, + { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert into u_tbl1(id, col1) values (1, 2)", + "TableName": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update on a multicol foreign key that set nulls and then cascades", + "query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb from u_multicol_tbl1 where 1 != 1", + "Query": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb from u_multicol_tbl1 where id = 3 for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where 1 != 1", + "Query": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2)) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2))", + "Table": "u_multicol_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Table": "u_multicol_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + }, + { + "comment": "update on a multicol foreign key that set nulls and then cascades - bindVariables", + "query": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb from u_multicol_tbl1 where 1 != 1", + "Query": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb from u_multicol_tbl1 where id = :v3 for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where 1 != 1", + "Query": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (cola, colb) not in ((:v1, :v2)))) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (cola, colb) not in ((:v1, :v2))))", + "Table": "u_multicol_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "Table": "u_multicol_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + }, + { + "comment": "Cascaded delete run from prepared statement", + "query": "execute prep_delete using @foo", + "plan": { + "QueryType": "EXECUTE", + "Original": "execute prep_delete using @foo", + "Instructions": { + "OperatorType": "EXECUTE", + "Parameters": [ + "foo" + ], + "Inputs": [ + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl5.col5, tbl5.t5col5 from tbl5 where 1 != 1", + "Query": "select tbl5.col5, tbl5.t5col5 from tbl5 where id = :v1 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete from tbl4 where (col4) in ::fkc_vals", + "Table": "tbl4", + "Values": [ + "fkc_vals:0" + ], + "Vindex": "hash_vin" + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 1 + ], + "Query": "delete from tbl4 where (t4col4) in ::fkc_vals1", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from tbl5 where id = :v1", + "Table": "tbl5" + } + ] + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "foreign key column updated by using a column which is also getting updated", + "query": "update u_tbl1 set foo = 100, col1 = baz + 1 + foo where bar = 42", + "plan": "VT12001: unsupported: foo column referenced in foreign key column col1 is itself updated" + }, + { + "comment": "foreign key column updated by using a column which is also getting updated - self reference column is allowed", + "query": "update u_tbl7 set foo = 100, col7 = baz + 1 + col7 where bar = 42", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl7 set foo = 100, col7 = baz + 1 + col7 where bar = 42", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl7.col7, col7 <=> cast(baz + 1 + col7 as CHAR), cast(baz + 1 + col7 as CHAR) from u_tbl7 where 1 != 1", + "Query": "select u_tbl7.col7, col7 <=> cast(baz + 1 + col7 as CHAR), cast(baz + 1 + col7 as CHAR) from u_tbl7 where bar = 42 for update", + "Table": "u_tbl7" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 1, + "UpdateExprCol": 2, + "UpdateExprBvName": "fkc_upd" + } + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:fkc_upd as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:fkc_upd as CHAR) where u_tbl3.col3 is null and cast(:fkc_upd as CHAR) is not null and not (u_tbl4.col4) <=> (cast(:fkc_upd as CHAR)) and (u_tbl4.col4) in ::fkc_vals limit 1 for share", + "Table": "u_tbl3, u_tbl4" + }, + { + "InputName": "VerifyChild-2", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", + "Query": "select 1 from u_tbl4, u_tbl9 where u_tbl4.col4 = u_tbl9.col9 and (u_tbl4.col4) in ::fkc_vals and (cast(:fkc_upd as CHAR) is null or (u_tbl9.col9) not in ((cast(:fkc_upd as CHAR)))) limit 1 for share", + "Table": "u_tbl4, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = :fkc_upd where (col4) in ::fkc_vals", + "Table": "u_tbl4" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl7 set foo = 100, col7 = baz + 1 + col7 where bar = 42", + "Table": "u_tbl7" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl4", + "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Single column updated in a multi-col table", + "query": "update u_multicol_tbl1 set cola = cola + 3 where id = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_multicol_tbl1 set cola = cola + 3 where id = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb, cola <=> cola + 3, cola + 3 from u_multicol_tbl1 where 1 != 1", + "Query": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb, cola <=> cola + 3, cola + 3 from u_multicol_tbl1 where id = 3 for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 2, + "UpdateExprCol": 3, + "UpdateExprBvName": "fkc_upd" + } + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where 1 != 1", + "Query": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:fkc_upd is null or (cola) not in ((:fkc_upd))) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (:fkc_upd is null or (cola) not in ((:fkc_upd)))", + "Table": "u_multicol_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl1 set cola = cola + 3 where id = 3", + "Table": "u_multicol_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + }, + { + "comment": "updating multiple columns of a fk constraint such that one uses the other", + "query": "update u_multicol_tbl3 set cola = id, colb = 5 * (cola + (1 - (cola))) where id = 2", + "plan": "VT12001: unsupported: cola column referenced in foreign key column colb is itself updated" + }, + { + "comment": "multicol foreign key updates with one literal and one non-literal update", + "query": "update u_multicol_tbl2 set cola = 2, colb = colc - (2) where id = 7", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_multicol_tbl2 set cola = 2, colb = colc - (2) where id = 7", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_multicol_tbl2 left join u_multicol_tbl1 on u_multicol_tbl1.cola = 2 and u_multicol_tbl1.colb = u_multicol_tbl2.colc - 2 where 1 != 1", + "Query": "select 1 from u_multicol_tbl2 left join u_multicol_tbl1 on u_multicol_tbl1.cola = 2 and u_multicol_tbl1.colb = u_multicol_tbl2.colc - 2 where u_multicol_tbl1.cola is null and 2 is not null and u_multicol_tbl1.colb is null and u_multicol_tbl2.colc - 2 is not null and not (u_multicol_tbl2.cola, u_multicol_tbl2.colb) <=> (2, u_multicol_tbl2.colc - 2) and u_multicol_tbl2.id = 7 limit 1 for share", + "Table": "u_multicol_tbl1, u_multicol_tbl2" + }, + { + "InputName": "PostVerify", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb, cola <=> 2, 2, colb <=> colc - 2, colc - 2 from u_multicol_tbl2 where 1 != 1", + "Query": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb, cola <=> 2, 2, colb <=> colc - 2, colc - 2 from u_multicol_tbl2 where id = 7 for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 2, + "UpdateExprCol": 3, + "UpdateExprBvName": "fkc_upd" + }, + { + "CompExprCol": 4, + "UpdateExprCol": 5, + "UpdateExprBvName": "fkc_upd1" + } + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = :fkc_upd, colb = :fkc_upd1 where (cola, colb) in ::fkc_vals", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl2 set cola = 2, colb = colc - 2 where id = 7", + "Table": "u_multicol_tbl2" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + }, + { + "comment": "replace into with table having unique key and primary key", + "query": "replace into u_tbl9(id, col9) values (1, 10),(2, 20),(3, 30)", + "plan": { + "QueryType": "INSERT", + "Original": "replace into u_tbl9(id, col9) values (1, 10),(2, 20),(3, 30)", + "Instructions": { + "OperatorType": "Sequential", + "Inputs": [ + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ((10), (20), (30)) or (col9 * foo) in ((10 * null), (20 * null), (30 * null)) or (bar, col9) in ((1, 10), (1, 20), (1, 30)) or (id) in ((1), (2), (3)) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl9 where (col9) in ((10), (20), (30)) or (col9 * foo) in ((10 * null), (20 * null), (30 * null)) or (bar, col9) in ((1, 10), (1, 20), (1, 30)) or (id) in ((1), (2), (3))", + "Table": "u_tbl9" + } + ] + }, + { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert into u_tbl9(id, col9) values (1, 10), (2, 20), (3, 30)", + "TableName": "u_tbl9" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Delete with foreign key checks off", + "query": "delete /*+ SET_VAR(foreign_key_checks=off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "plan": { + "QueryType": "DELETE", + "Original": "delete /*+ SET_VAR(foreign_key_checks=off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Instructions": { + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=Off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Table": "multicol_tbl1", + "Values": [ + "1", + "2", + "3" + ], + "Vindex": "multicolIdx" + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl1" + ] + } + }, + { + "comment": "Update with foreign key checks off", + "query": "update /*+ SET_VAR(foreign_key_checks=0) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update /*+ SET_VAR(foreign_key_checks=0) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=Off) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Table": "u_multicol_tbl1" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1" + ] + } + }, + { + "comment": "Insert with cross shard foreign keys and foreign key checks off", + "query": "insert /*+ SET_VAR(foreign_key_checks=0) */ into tbl3 (col3, coly) values (1, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert /*+ SET_VAR(foreign_key_checks=0) */ into tbl3 (col3, coly) values (1, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=Off) */ into tbl3(col3, coly) values (:_col3_0, 3)", + "TableName": "tbl3", + "VindexValues": { + "hash_vin": "1" + } + }, + "TablesUsed": [ + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "Insert with on duplicate key update - foreign key with values function", + "query": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = values(col1)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = values(col1)", + "Instructions": { + "OperatorType": "Upsert", + "TargetTabletType": "PRIMARY", + "Inputs": [ + { + "InputName": "Insert-1", + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert into u_tbl1(id, col1) values (1, 3)", + "TableName": "u_tbl1" + }, + { + "InputName": "Update-1", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where id = 1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((cast(3 as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 3 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((cast(3 as CHAR))) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((cast(3 as CHAR)))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl1 set col1 = 3 where id = 1", + "Table": "u_tbl1" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "insert with on duplicate key update with multiple rows", + "query": "insert into u_tbl2 (id, col2) values (:v1, :v2),(:v3, :v4), (:v5, :v6) on duplicate key update col2 = values(col2)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into u_tbl2 (id, col2) values (:v1, :v2),(:v3, :v4), (:v5, :v6) on duplicate key update col2 = values(col2)", + "Instructions": { + "OperatorType": "Upsert", + "TargetTabletType": "PRIMARY", + "Inputs": [ + { + "InputName": "Insert-1", + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert into u_tbl2(id, col2) values (:v1, :v2)", + "TableName": "u_tbl2" + }, + { + "InputName": "Update-1", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where id = :v1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (cast(:v2 as CHAR) is null or (col3) not in ((cast(:v2 as CHAR))))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set col2 = :v2 where id = :v1", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "Insert-2", + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert into u_tbl2(id, col2) values (:v3, :v4)", + "TableName": "u_tbl2" + }, + { + "InputName": "Update-2", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where id = :v3 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (cast(:v4 as CHAR) is null or (col3) not in ((cast(:v4 as CHAR))))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set col2 = :v4 where id = :v3", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "Insert-3", + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert into u_tbl2(id, col2) values (:v5, :v6)", + "TableName": "u_tbl2" + }, + { + "InputName": "Update-3", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where id = :v5 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals2 and (cast(:v6 as CHAR) is null or (col3) not in ((cast(:v6 as CHAR))))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl2 set col2 = :v6 where id = :v5", + "Table": "u_tbl2" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "Unknown update column in foreign keys", + "query": "update tbl_auth set unknown_col = 'verified' where id = 1", + "plan": "column 'unknown_col' not found in table 'tbl_auth'" + }, + { + "comment": "Unsharded multi-table delete with foreign keys", + "query": "delete u from u_tbl6 u join u_tbl5 m on u.col = m.col where u.col2 = 4 and m.col3 = 6", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from u_tbl6 u join u_tbl5 m on u.col = m.col where u.col2 = 4 and m.col3 = 6", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u.id from u_tbl6 as u, u_tbl5 as m where 1 != 1", + "Query": "select u.id from u_tbl6 as u, u_tbl5 as m where u.col2 = 4 and m.col3 = 6 and u.col = m.col for update", + "Table": "u_tbl5, u_tbl6" + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u.col6 from u_tbl6 as u where 1 != 1", + "Query": "select u.col6 from u_tbl6 as u where u.id in ::dml_vals for update", + "Table": "u_tbl6" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete from u_tbl8 where (col8) in ::fkc_vals", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl6 as u where u.id in ::dml_vals", + "Table": "u_tbl6" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl5", + "unsharded_fk_allow.u_tbl6", + "unsharded_fk_allow.u_tbl8" + ] + } + }, + { + "comment": "Multi table delete with using", + "query": "delete u_tbl10 from u_tbl10 join u_tbl11 using (id) where id = 5", + "plan": { + "QueryType": "DELETE", + "Original": "delete u_tbl10 from u_tbl10 join u_tbl11 using (id) where id = 5", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl10.id from u_tbl10, u_tbl11 where 1 != 1", + "Query": "select u_tbl10.id from u_tbl10, u_tbl11 where u_tbl10.id = 5 and u_tbl10.id = u_tbl11.id for update", + "Table": "u_tbl10, u_tbl11" + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl10.col from u_tbl10 where 1 != 1", + "Query": "select u_tbl10.col from u_tbl10 where u_tbl10.id in ::dml_vals for update", + "Table": "u_tbl10" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete from u_tbl11 where (col) in ::fkc_vals", + "Table": "u_tbl11" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl10 where u_tbl10.id in ::dml_vals", + "Table": "u_tbl10" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl10", + "unsharded_fk_allow.u_tbl11" + ] + } + }, + { + "comment": "Multi table delete with unrelated tables", + "query": "delete u_tbl1 from u_tbl10 join u_tbl1 on u_tbl10.col = u_tbl1.col", + "plan": { + "QueryType": "DELETE", + "Original": "delete u_tbl1 from u_tbl10 join u_tbl1 on u_tbl10.col = u_tbl1.col", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.id from u_tbl10, u_tbl1 where 1 != 1", + "Query": "select u_tbl1.id from u_tbl10, u_tbl1 where u_tbl10.col = u_tbl1.col for update", + "Table": "u_tbl1, u_tbl10" + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where u_tbl1.id in ::dml_vals for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl2 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl1 where u_tbl1.id in ::dml_vals", + "Table": "u_tbl1" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl10", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "Delete with limit", + "query": "delete from u_tbl1 order by id limit 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from u_tbl1 order by id limit 1", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.id from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.id from u_tbl1 order by id asc limit 1 for update", + "Table": "u_tbl1" + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where u_tbl1.id in ::dml_vals for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl2 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl1 where u_tbl1.id in ::dml_vals", + "Table": "u_tbl1" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update query with an uncorrelated subquery", + "query": "update u_tbl4 set col41 = (select col14 from u_tbl1 where x = 2 and y = 4) where col4 = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl4 set col41 = (select col14 from u_tbl1 where x = 2 and y = 4) where col4 = 3", + "Instructions": { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select col14 from u_tbl1 where 1 != 1", + "Query": "select col14 from u_tbl1 where x = 2 and y = 4 lock in share mode", + "Table": "u_tbl1" + }, + { + "InputName": "Outer", + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl1 on u_tbl1.col14 = cast(:__sq1 as SIGNED) where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl1 on u_tbl1.col14 = cast(:__sq1 as SIGNED) where u_tbl1.col14 is null and cast(:__sq1 as SIGNED) is not null and not (u_tbl4.col41) <=> (cast(:__sq1 as SIGNED)) and u_tbl4.col4 = 3 limit 1 for share", + "Table": "u_tbl1, u_tbl4" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col41 = :__sq1 where col4 = 3", + "Table": "u_tbl4" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl4" + ] + } + }, + { + "comment": "update with a subquery", + "query": "update u_tbl1 set col1 = (select foo from u_tbl1 where id = 1) order by id desc", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl1 set col1 = (select foo from u_tbl1 where id = 1) order by id desc", + "Instructions": { + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], + "Inputs": [ + { + "InputName": "SubQuery", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select foo from u_tbl1 where 1 != 1", + "Query": "select foo from u_tbl1 where id = 1 lock in share mode", + "Table": "u_tbl1" + }, + { + "InputName": "Outer", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 order by id desc for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (cast(:__sq1 as CHAR) is null or (col3) not in ((cast(:__sq1 as CHAR))))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = :__sq1 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (cast(:__sq1 as CHAR) is null or (col9) not in ((cast(:__sq1 as CHAR)))) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (cast(:__sq1 as CHAR) is null or (col9) not in ((cast(:__sq1 as CHAR))))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl1 set col1 = :__sq1 order by id desc", + "Table": "u_tbl1" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Multi table delete such that the two tables are foreign key related", + "query": "delete u_tbl6 from u_tbl6 join u_tbl8 on u_tbl6.id = u_tbl8.id where u_tbl6.id = 4", + "plan": { + "QueryType": "DELETE", + "Original": "delete u_tbl6 from u_tbl6 join u_tbl8 on u_tbl6.id = u_tbl8.id where u_tbl6.id = 4", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl6.id from u_tbl6, u_tbl8 where 1 != 1", + "Query": "select u_tbl6.id from u_tbl6, u_tbl8 where u_tbl6.id = 4 and u_tbl6.id = u_tbl8.id for update", + "Table": "u_tbl6, u_tbl8" + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl6.col6 from u_tbl6 where 1 != 1", + "Query": "select u_tbl6.col6 from u_tbl6 where u_tbl6.id in ::dml_vals for update", + "Table": "u_tbl6" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete from u_tbl8 where (col8) in ::fkc_vals", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl6 where u_tbl6.id in ::dml_vals", + "Table": "u_tbl6" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl6", + "unsharded_fk_allow.u_tbl8" + ] + } + }, + { + "comment": "multi table delete on foreign key enabled tables", + "query": "delete u, m from u_tbl6 u join u_tbl5 m on u.col = m.col where u.col2 = 4 and m.col3 = 6", + "plan": { + "QueryType": "DELETE", + "Original": "delete u, m from u_tbl6 u join u_tbl5 m on u.col = m.col where u.col2 = 4 and m.col3 = 6", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]", + "1:[1]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u.id, m.id from u_tbl6 as u, u_tbl5 as m where 1 != 1", + "Query": "select u.id, m.id from u_tbl6 as u, u_tbl5 as m where u.col2 = 4 and m.col3 = 6 and u.col = m.col for update", + "Table": "u_tbl5, u_tbl6" + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u.col6 from u_tbl6 as u where 1 != 1", + "Query": "select u.col6 from u_tbl6 as u where u.id in ::dml_vals for update", + "Table": "u_tbl6" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete from u_tbl8 where (col8) in ::fkc_vals", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl6 as u where u.id in ::dml_vals", + "Table": "u_tbl6" + } + ] + }, + { + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from u_tbl5 as m where m.id in ::dml_vals", + "Table": "u_tbl5" + } + ] }, "TablesUsed": [ - "unsharded_fk_allow.u_tbl1" + "unsharded_fk_allow.u_tbl5", + "unsharded_fk_allow.u_tbl6", + "unsharded_fk_allow.u_tbl8" ] } }, { - "comment": "Insert with unsharded table having fk reference in sharded table", - "query": "insert into u_tbl (id, col) values (1, 2)", - "plan": "VT12002: unsupported: cross-shard foreign keys" - }, - { - "comment": "replace with fk reference unsupported", - "query": "replace into u_tbl1 (id, col1) values (1, 2)", - "plan": "VT12001: unsupported: REPLACE INTO with foreign keys" - }, - { - "comment": "update on a multicol foreign key that set nulls and then cascades", - "query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "comment": "update with limit with foreign keys", + "query": "update u_tbl2 set col2 = 'bar' limit 2", "plan": { "QueryType": "UPDATE", - "Original": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Original": "update u_tbl2 set col2 = 'bar' limit 2", "Instructions": { - "OperatorType": "FkCascade", + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], "Inputs": [ { - "InputName": "Selection", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl1 where id = 3 for update", - "Table": "u_multicol_tbl1" + "FieldQuery": "select u_tbl2.id from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.id from u_tbl2 limit 2 for update", + "Table": "u_tbl2" }, { - "InputName": "CascadeChild-1", "OperatorType": "FkCascade", - "BvName": "fkc_vals", - "Cols": [ - 0, - 1 - ], "Inputs": [ { "InputName": "Selection", @@ -1423,9 +3927,9 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (u_multicol_tbl2.cola, u_multicol_tbl2.colb) not in ((1, 2)) for update", - "Table": "u_multicol_tbl2" + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where u_tbl2.id in ::dml_vals for update", + "Table": "u_tbl2" }, { "InputName": "CascadeChild-1", @@ -1436,13 +3940,12 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "BvName": "fkc_vals1", + "BvName": "fkc_vals", "Cols": [ - 0, - 1 + 0 ], - "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", - "Table": "u_multicol_tbl3" + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (col3) not in ((cast('bar' as CHAR)))", + "Table": "u_tbl3" }, { "InputName": "Parent", @@ -1453,38 +3956,128 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (u_multicol_tbl2.cola, u_multicol_tbl2.colb) not in ((1, 2))", - "Table": "u_multicol_tbl2" + "Query": "update u_tbl2 set col2 = 'bar' where u_tbl2.id in ::dml_vals", + "Table": "u_tbl2" } ] - }, + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "non literal update with order by and limit", + "query": "update u_tbl2 set col2 = id + 1 order by id limit 2", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set col2 = id + 1 order by id limit 2", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ { - "InputName": "Parent", - "OperatorType": "Update", + "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "TargetTabletType": "PRIMARY", - "Query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", - "Table": "u_multicol_tbl1" + "FieldQuery": "select u_tbl2.id from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.id from u_tbl2 order by id asc limit 2 for update", + "Table": "u_tbl2" + }, + { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.id + 1 as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.id + 1 as CHAR) where u_tbl1.col1 is null and cast(u_tbl2.id + 1 as CHAR) is not null and not (u_tbl2.col2) <=> (cast(u_tbl2.id + 1 as CHAR)) and u_tbl2.id in ::dml_vals limit 1 for share", + "Table": "u_tbl1, u_tbl2" + }, + { + "InputName": "PostVerify", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2, col2 <=> cast(id + 1 as CHAR), cast(id + 1 as CHAR) from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2, col2 <=> cast(id + 1 as CHAR), cast(id + 1 as CHAR) from u_tbl2 where u_tbl2.id in ::dml_vals order by id asc for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 1, + "UpdateExprCol": 2, + "UpdateExprBvName": "fkc_upd" + } + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals and (:fkc_upd is null or (col3) not in ((:fkc_upd)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = id + 1 where u_tbl2.id in ::dml_vals order by id asc", + "Table": "u_tbl2" + } + ] + } + ] } ] }, "TablesUsed": [ - "unsharded_fk_allow.u_multicol_tbl1", - "unsharded_fk_allow.u_multicol_tbl2", - "unsharded_fk_allow.u_multicol_tbl3" + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" ] } }, { - "comment": "update on a multicol foreign key that set nulls and then cascades - bindVariables", - "query": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "comment": "multi table update", + "query": "update u_tbl6 u join u_tbl5 m on u.col = m.col set u.col6 = 'foo' where u.col2 = 4 and m.col3 = 6", "plan": { "QueryType": "UPDATE", - "Original": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "Original": "update u_tbl6 u join u_tbl5 m on u.col = m.col set u.col6 = 'foo' where u.col2 = 4 and m.col3 = 6", "Instructions": { "OperatorType": "FkCascade", "Inputs": [ @@ -1496,50 +4089,32 @@ "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl1 where id = :v3 for update", - "Table": "u_multicol_tbl1" + "FieldQuery": "select u.col6 from u_tbl6 as u, u_tbl5 as m where 1 != 1", + "Query": "select u.col6 from u_tbl6 as u, u_tbl5 as m where u.col = m.col and u.col2 = 4 and m.col3 = 6 for update", + "Table": "u_tbl5, u_tbl6" }, { "InputName": "CascadeChild-1", - "OperatorType": "FkCascade", + "OperatorType": "FKVerify", "BvName": "fkc_vals", "Cols": [ - 0, - 1 + 0 ], "Inputs": [ { - "InputName": "Selection", + "InputName": "VerifyParent-1", "OperatorType": "Route", "Variant": "Unsharded", "Keyspace": { "Name": "unsharded_fk_allow", "Sharded": false }, - "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (u_multicol_tbl2.cola, u_multicol_tbl2.colb) not in ((:v1, :v2)))) for update", - "Table": "u_multicol_tbl2" - }, - { - "InputName": "CascadeChild-1", - "OperatorType": "Update", - "Variant": "Unsharded", - "Keyspace": { - "Name": "unsharded_fk_allow", - "Sharded": false - }, - "TargetTabletType": "PRIMARY", - "BvName": "fkc_vals1", - "Cols": [ - 0, - 1 - ], - "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", - "Table": "u_multicol_tbl3" + "FieldQuery": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = cast('foo' as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = cast('foo' as CHAR) where u_tbl9.col9 is null and cast('foo' as CHAR) is not null and not (u_tbl8.col8) <=> (cast('foo' as CHAR)) and (u_tbl8.col8) in ::fkc_vals limit 1 for share nowait", + "Table": "u_tbl8, u_tbl9" }, { - "InputName": "Parent", + "InputName": "PostVerify", "OperatorType": "Update", "Variant": "Unsharded", "Keyspace": { @@ -1547,8 +4122,8 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (u_multicol_tbl2.cola, u_multicol_tbl2.colb) not in ((:v1, :v2))))", - "Table": "u_multicol_tbl2" + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl8 set col8 = 'foo' where (col8) in ::fkc_vals", + "Table": "u_tbl8" } ] }, @@ -1561,96 +4136,265 @@ "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "update u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", - "Table": "u_multicol_tbl1" + "Query": "update u_tbl6 as u, u_tbl5 as m set u.col6 = 'foo' where u.col2 = 4 and m.col3 = 6 and u.col = m.col", + "Table": "u_tbl6" } ] }, "TablesUsed": [ - "unsharded_fk_allow.u_multicol_tbl1", - "unsharded_fk_allow.u_multicol_tbl2", - "unsharded_fk_allow.u_multicol_tbl3" + "unsharded_fk_allow.u_tbl5", + "unsharded_fk_allow.u_tbl6", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" ] } }, { - "comment": "Cascaded delete run from prepared statement", - "query": "execute prep_delete using @foo", + "comment": "multi target update", + "query": "update u_tbl1 u join u_multicol_tbl1 m on u.col = m.col set u.col1 = 'foo', m.cola = 'bar' where u.foo = 4 and m.bar = 6", "plan": { - "QueryType": "EXECUTE", - "Original": "execute prep_delete using @foo", + "QueryType": "UPDATE", + "Original": "update u_tbl1 u join u_multicol_tbl1 m on u.col = m.col set u.col1 = 'foo', m.cola = 'bar' where u.foo = 4 and m.bar = 6", "Instructions": { - "OperatorType": "EXECUTE", - "Parameters": [ - "foo" + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]", + "1:[1]" ], "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u.id, m.id from u_tbl1 as u, u_multicol_tbl1 as m where 1 != 1", + "Query": "select u.id, m.id from u_tbl1 as u, u_multicol_tbl1 as m where u.foo = 4 and m.bar = 6 and u.col = m.col for update", + "Table": "u_multicol_tbl1, u_tbl1" + }, { "OperatorType": "FkCascade", "Inputs": [ { "InputName": "Selection", "OperatorType": "Route", - "Variant": "Scatter", + "Variant": "Unsharded", "Keyspace": { - "Name": "sharded_fk_allow", - "Sharded": true + "Name": "unsharded_fk_allow", + "Sharded": false }, - "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", - "Query": "select col5, t5col5 from tbl5 where id = :v1 for update", - "Table": "tbl5" + "FieldQuery": "select u.col1 from u_tbl1 as u where 1 != 1", + "Query": "select u.col1 from u_tbl1 as u where u.id in ::dml_vals for update", + "Table": "u_tbl1" }, { "InputName": "CascadeChild-1", - "OperatorType": "Delete", - "Variant": "Scatter", - "Keyspace": { - "Name": "sharded_fk_allow", - "Sharded": true - }, - "TargetTabletType": "PRIMARY", + "OperatorType": "FkCascade", "BvName": "fkc_vals", "Cols": [ 0 ], - "Query": "delete from tbl4 where (col4) in ::fkc_vals", - "Table": "tbl4" + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((cast('foo' as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 'foo' where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] }, { "InputName": "CascadeChild-2", - "OperatorType": "Delete", - "Variant": "Scatter", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((cast('foo' as CHAR))) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((cast('foo' as CHAR)))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", "Keyspace": { - "Name": "sharded_fk_allow", - "Sharded": true + "Name": "unsharded_fk_allow", + "Sharded": false }, "TargetTabletType": "PRIMARY", - "BvName": "fkc_vals1", + "Query": "update u_tbl1 as u set u.col1 = 'foo' where u.id in ::dml_vals", + "Table": "u_tbl1" + } + ] + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select m.cola, m.colb from u_multicol_tbl1 as m where 1 != 1", + "Query": "select m.cola, m.colb from u_multicol_tbl1 as m where m.id in ::dml_vals for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals4", "Cols": [ + 0, 1 ], - "Query": "delete from tbl4 where (t4col4) in ::fkc_vals1", - "Table": "tbl4" + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where 1 != 1", + "Query": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals4 and (cola) not in (('bar')) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals5", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals5", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals4 and (cola) not in (('bar'))", + "Table": "u_multicol_tbl2" + } + ] }, { "InputName": "Parent", - "OperatorType": "Delete", - "Variant": "Scatter", + "OperatorType": "Update", + "Variant": "Unsharded", "Keyspace": { - "Name": "sharded_fk_allow", - "Sharded": true + "Name": "unsharded_fk_allow", + "Sharded": false }, "TargetTabletType": "PRIMARY", - "Query": "delete from tbl5 where id = :v1", - "Table": "tbl5" + "Query": "update u_multicol_tbl1 as m set m.cola = 'bar' where m.id in ::dml_vals", + "Table": "u_multicol_tbl1" } ] } ] }, "TablesUsed": [ - "sharded_fk_allow.tbl4", - "sharded_fk_allow.tbl5" + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3", + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" ] } } diff --git a/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_off_cases.json b/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_off_cases.json new file mode 100644 index 00000000000..264311696a3 --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_off_cases.json @@ -0,0 +1,497 @@ +[ + { + "comment": "Insertion in a table with cross-shard foreign keys works with foreign_key_checks off", + "query": "insert into tbl3 (col3, coly) values (1, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into tbl3 (col3, coly) values (1, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=Off) */ into tbl3(col3, coly) values (:_col3_0, 3)", + "TableName": "tbl3", + "VindexValues": { + "hash_vin": "1" + } + }, + "TablesUsed": [ + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "Insertion in a table with shard-scoped multiple column foreign key is allowed", + "query": "insert into multicol_tbl2 (cola, colb, colc) values (1, 2, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into multicol_tbl2 (cola, colb, colc) values (1, 2, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=Off) */ into multicol_tbl2(cola, colb, colc) values (:_cola_0, :_colb_0, :_colc_0)", + "TableName": "multicol_tbl2", + "VindexValues": { + "multicolIdx": "1, 2, 3" + } + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl2" + ] + } + }, + { + "comment": "Delete in a table with cross-shard foreign key works with foreign_key_checks off ", + "query": "delete from tbl1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl1", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=Off) */ from tbl1", + "Table": "tbl1" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1" + ] + } + }, + { + "comment": "Delete in a table with not all column shard-scoped foreign keys works with foreign_key_checks off", + "query": "delete from tbl7", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl7", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=Off) */ from tbl7", + "Table": "tbl7" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl7" + ] + } + }, + { + "comment": "Delete in a table with shard-scoped multiple column foreign key with cascade with foreign key checks on", + "query": "delete /*+ SET_VAR(foreign_key_checks=1) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "plan": { + "QueryType": "DELETE", + "Original": "delete /*+ SET_VAR(foreign_key_checks=1) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select multicol_tbl1.colb, multicol_tbl1.cola, multicol_tbl1.y, multicol_tbl1.colc, multicol_tbl1.x from multicol_tbl1 where 1 != 1", + "Query": "select multicol_tbl1.colb, multicol_tbl1.cola, multicol_tbl1.y, multicol_tbl1.colc, multicol_tbl1.x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", + "Table": "multicol_tbl1", + "Values": [ + "1", + "2", + "3" + ], + "Vindex": "multicolIdx" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1, + 2, + 3, + 4 + ], + "Query": "delete /*+ SET_VAR(foreign_key_checks=ON) */ from multicol_tbl2 where (colb, cola, x, colc, y) in ::fkc_vals", + "Table": "multicol_tbl2", + "Values": [ + "fkc_vals:1", + "fkc_vals:0", + "fkc_vals:3" + ], + "Vindex": "multicolIdx" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Table": "multicol_tbl1", + "Values": [ + "1", + "2", + "3" + ], + "Vindex": "multicolIdx" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl1", + "sharded_fk_allow.multicol_tbl2" + ] + } + }, + { + "comment": "Delete in a table with shard-scoped foreign keys with SET NULL", + "query": "delete from tbl8 where col8 = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl8 where col8 = 1", + "Instructions": { + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=Off) */ from tbl8 where col8 = 1", + "Table": "tbl8", + "Values": [ + "1" + ], + "Vindex": "hash_vin" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl8" + ] + } + }, + { + "comment": "Update in a table with cross-shard foreign keys works with foreign_key_checks off", + "query": "update tbl1 set t1col1 = 'foo' where col1 = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl1 set t1col1 = 'foo' where col1 = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=Off) */ tbl1 set t1col1 = 'foo' where col1 = 1", + "Table": "tbl1", + "Values": [ + "1" + ], + "Vindex": "hash_vin" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1" + ] + } + }, + { + "comment": "Update in a table with column modified not shard-scoped foreign key whereas other column referencing same table is works with foreign_key_checks off", + "query": "update tbl7 set t7col7 = 'foo', t7col72 = 42", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl7 set t7col7 = 'foo', t7col72 = 42", + "Instructions": { + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=Off) */ tbl7 set t7col7 = 'foo', t7col72 = 42", + "Table": "tbl7" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl7" + ] + } + }, + { + "comment": "Update in a table with shard-scoped foreign keys with cascade", + "query": "update /*+ SET_VAR(foreign_key_checks=On) */ tbl5 set t5col5 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update /*+ SET_VAR(foreign_key_checks=On) */ tbl5 set t5col5 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl5.t5col5 from tbl5 where 1 != 1", + "Query": "select tbl5.t5col5 from tbl5 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ tbl4 set t4col4 = null where (t4col4) in ::fkc_vals and (t4col4) not in (('foo'))", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ tbl5 set t5col5 = 'foo'", + "Table": "tbl5" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "Insertion in a table with 2 foreign keys constraint with same table on different columns - both are not shard scoped - works with foreign_key_checks off", + "query": "insert into tbl6 (col6, t6col6) values (100, 'foo')", + "plan": { + "QueryType": "INSERT", + "Original": "insert into tbl6 (col6, t6col6) values (100, 'foo')", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=Off) */ into tbl6(col6, t6col6) values (:_col6_0, 'foo')", + "TableName": "tbl6", + "VindexValues": { + "hash_vin": "100" + } + }, + "TablesUsed": [ + "sharded_fk_allow.tbl6" + ] + } + }, + { + "comment": "delete table with shard scoped foreign key set default works with foreign_key_checks off", + "query": "delete from tbl20 where col = 'bar'", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl20 where col = 'bar'", + "Instructions": { + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=Off) */ from tbl20 where col = 'bar'", + "Table": "tbl20", + "Values": [ + "'bar'" + ], + "Vindex": "hash_vin" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl20" + ] + } + }, + { + "comment": "Delete table with cross-shard foreign key with set null - should be eventually allowed", + "query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from tbl9 where col9 = 34", + "plan": { + "QueryType": "DELETE", + "Original": "delete /*+ SET_VAR(foreign_key_checks=On) */ from tbl9 where col9 = 34", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl9.col9 from tbl9 where 1 != 1", + "Query": "select tbl9.col9 from tbl9 where col9 = 34 for update", + "Table": "tbl9", + "Values": [ + "34" + ], + "Vindex": "hash_vin" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ tbl4 set col_ref = null where (col_ref) in ::fkc_vals", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from tbl9 where col9 = 34", + "Table": "tbl9", + "Values": [ + "34" + ], + "Vindex": "hash_vin" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl9" + ] + } + }, + { + "comment": "Delete with foreign key checks off", + "query": "delete /*+ SET_VAR(foreign_key_checks=off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "plan": { + "QueryType": "DELETE", + "Original": "delete /*+ SET_VAR(foreign_key_checks=off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Instructions": { + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=Off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Table": "multicol_tbl1", + "Values": [ + "1", + "2", + "3" + ], + "Vindex": "multicolIdx" + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl1" + ] + } + }, + { + "comment": "Update with foreign key checks off", + "query": "update /*+ SET_VAR(foreign_key_checks=0) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update /*+ SET_VAR(foreign_key_checks=0) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=Off) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Table": "u_multicol_tbl1" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1" + ] + } + }, + { + "comment": "Insert with cross shard foreign keys and foreign key checks off", + "query": "insert /*+ SET_VAR(foreign_key_checks=0) */ into tbl3 (col3, coly) values (1, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert /*+ SET_VAR(foreign_key_checks=0) */ into tbl3 (col3, coly) values (1, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=Off) */ into tbl3(col3, coly) values (:_col3_0, 3)", + "TableName": "tbl3", + "VindexValues": { + "hash_vin": "1" + } + }, + "TablesUsed": [ + "sharded_fk_allow.tbl3" + ] + } + } +] diff --git a/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_on_cases.json b/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_on_cases.json new file mode 100644 index 00000000000..7b525b2dcc9 --- /dev/null +++ b/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_on_cases.json @@ -0,0 +1,2462 @@ +[ + { + "comment": "Insertion in a table with cross-shard foreign keys disallowed", + "query": "insert into tbl3 (col3, coly) values (1, 3)", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Insertion in a table with shard-scoped foreign keys is allowed", + "query": "insert into tbl2 (col2, coly) values (1, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into tbl2 (col2, coly) values (1, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=On) */ into tbl2(col2, coly) values (:_col2_0, 3)", + "TableName": "tbl2", + "VindexValues": { + "hash_vin": "1" + } + }, + "TablesUsed": [ + "sharded_fk_allow.tbl2" + ] + } + }, + { + "comment": "Insertion in a table with shard-scoped multiple column foreign key is allowed", + "query": "insert into multicol_tbl2 (cola, colb, colc) values (1, 2, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into multicol_tbl2 (cola, colb, colc) values (1, 2, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=On) */ into multicol_tbl2(cola, colb, colc) values (:_cola_0, :_colb_0, :_colc_0)", + "TableName": "multicol_tbl2", + "VindexValues": { + "multicolIdx": "1, 2, 3" + } + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl2" + ] + } + }, + { + "comment": "Delete in a table with cross-shard foreign keys disallowed", + "query": "delete from tbl1", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Delete in a table with not all column shard-scoped foreign keys - disallowed", + "query": "delete from tbl7", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Delete in a table with shard-scoped multiple column foreign key with cascade", + "query": "delete from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "plan": { + "QueryType": "DELETE", + "Original": "delete from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select multicol_tbl1.colb, multicol_tbl1.cola, multicol_tbl1.y, multicol_tbl1.colc, multicol_tbl1.x from multicol_tbl1 where 1 != 1", + "Query": "select multicol_tbl1.colb, multicol_tbl1.cola, multicol_tbl1.y, multicol_tbl1.colc, multicol_tbl1.x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", + "Table": "multicol_tbl1", + "Values": [ + "1", + "2", + "3" + ], + "Vindex": "multicolIdx" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1, + 2, + 3, + 4 + ], + "Query": "delete /*+ SET_VAR(foreign_key_checks=ON) */ from multicol_tbl2 where (colb, cola, x, colc, y) in ::fkc_vals", + "Table": "multicol_tbl2", + "Values": [ + "fkc_vals:1", + "fkc_vals:0", + "fkc_vals:3" + ], + "Vindex": "multicolIdx" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Table": "multicol_tbl1", + "Values": [ + "1", + "2", + "3" + ], + "Vindex": "multicolIdx" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl1", + "sharded_fk_allow.multicol_tbl2" + ] + } + }, + { + "comment": "Delete in a table with shard-scoped foreign keys with cascade", + "query": "delete from tbl5", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl5", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl5.col5, tbl5.t5col5 from tbl5 where 1 != 1", + "Query": "select tbl5.col5, tbl5.t5col5 from tbl5 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete /*+ SET_VAR(foreign_key_checks=ON) */ from tbl4 where (col4) in ::fkc_vals", + "Table": "tbl4", + "Values": [ + "fkc_vals:0" + ], + "Vindex": "hash_vin" + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 1 + ], + "Query": "delete /*+ SET_VAR(foreign_key_checks=ON) */ from tbl4 where (t4col4) in ::fkc_vals1", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from tbl5", + "Table": "tbl5" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "Delete in a table with shard-scoped foreign keys with SET NULL", + "query": "delete from tbl8 where col8 = 1", + "plan": "VT12001: unsupported: you cannot UPDATE primary vindex columns; invalid update on vindex: hash_vin" + }, + { + "comment": "Delete in a table with unsharded foreign key with SET NULL", + "query": "delete from u_tbl9 where col9 = 5", + "plan": { + "QueryType": "DELETE", + "Original": "delete from u_tbl9 where col9 = 5", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where col9 = 5 for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl8 set col8 = null where (col8) in ::fkc_vals", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from u_tbl9 where col9 = 5", + "Table": "u_tbl9" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in unsharded table with restrict", + "query": "update u_tbl5 set col5 = 'foo' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl5 set col5 = 'foo' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl5 set col5 = 'foo' where id = 1", + "Table": "u_tbl5" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl5" + ] + } + }, + { + "comment": "update in unsharded table with cascade", + "query": "update u_tbl2 set col2 = 'bar' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set col2 = 'bar' where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals and (col3) not in ((cast('bar' as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl2 set col2 = 'bar' where id = 1", + "Table": "u_tbl2" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update in unsharded table with cascade - on non-referenced column", + "query": "update u_tbl2 set col_no_ref = 'baz' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set col_no_ref = 'baz' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl2 set col_no_ref = 'baz' where id = 1", + "Table": "u_tbl2" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2" + ] + } + }, + { + "comment": "Update in a table with cross-shard foreign keys disallowed", + "query": "update tbl1 set t1col1 = 'foo' where col1 = 1", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update in a table with cross-shard foreign keys, column not in update expression - allowed", + "query": "update tbl1 set not_ref_col = 'foo' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl1 set not_ref_col = 'foo' where id = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ tbl1 set not_ref_col = 'foo' where id = 1", + "Table": "tbl1" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1" + ] + } + }, + { + "comment": "Update in a table with column modified not shard-scoped foreign key whereas other column referencing same table is - disallowed", + "query": "update tbl7 set t7col7 = 'foo', t7col72 = 42", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update in a table with shard-scoped foreign keys with cascade", + "query": "update tbl5 set t5col5 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl5 set t5col5 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl5.t5col5 from tbl5 where 1 != 1", + "Query": "select tbl5.t5col5 from tbl5 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ tbl4 set t4col4 = null where (t4col4) in ::fkc_vals and (t4col4) not in (('foo'))", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ tbl5 set t5col5 = 'foo'", + "Table": "tbl5" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "Insertion in a table with 2 foreign keys constraint with same table on different columns - both are not shard scoped - disallowed", + "query": "insert into tbl6 (col6, t6col6) values (100, 'foo')", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "Update a table with parent and child foreign keys - shard scoped", + "query": "update tbl2 set col = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl2 set col = 'foo'", + "Instructions": { + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ tbl2 set col = 'foo'", + "Table": "tbl2" + }, + "TablesUsed": [ + "sharded_fk_allow.tbl2" + ] + } + }, + { + "comment": "update table with column's parent foreign key cross shard", + "query": "update tbl10 set col = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl10 set col = 'foo'", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "1 as 1" + ], + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl3.col is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "TableName": "tbl10_tbl3", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select 1 from tbl10 where 1 != 1", + "Query": "select 1 from tbl10 where not (tbl10.col) <=> ('foo') for share", + "Table": "tbl10" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl3.col from tbl3 where 1 != 1", + "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' for share", + "Table": "tbl3" + } + ] + } + ] + } + ] + } + ] + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ tbl10 set col = 'foo'", + "Table": "tbl10" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl10", + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "delete table with shard scoped foreign key set default - disallowed", + "query": "delete from tbl20 where col = 'bar'", + "plan": "VT09016: Cannot delete or update a parent row: a foreign key constraint fails" + }, + { + "comment": "Delete table with cross-shard foreign key with set null - should be eventually allowed", + "query": "delete from tbl9 where col9 = 34", + "plan": { + "QueryType": "DELETE", + "Original": "delete from tbl9 where col9 = 34", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl9.col9 from tbl9 where 1 != 1", + "Query": "select tbl9.col9 from tbl9 where col9 = 34 for update", + "Table": "tbl9", + "Values": [ + "34" + ], + "Vindex": "hash_vin" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ tbl4 set col_ref = null where (col_ref) in ::fkc_vals", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from tbl9 where col9 = 34", + "Table": "tbl9", + "Values": [ + "34" + ], + "Vindex": "hash_vin" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl9" + ] + } + }, + { + "comment": "update table with same column having reference to different tables, one with on update cascade other with on update set null - child table have further reference", + "query": "update u_tbl1 set col1 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl1 set col1 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((cast('foo' as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 'foo' where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((cast('foo' as CHAR))) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((cast('foo' as CHAR)))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl1 set col1 = 'foo'", + "Table": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update with limit with foreign keys", + "query": "update u_tbl2 set col2 = 'bar' limit 2", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set col2 = 'bar' limit 2", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.id from u_tbl2 where 1 != 1", + "Query": "select /*+ SET_VAR(foreign_key_checks=On) */ u_tbl2.id from u_tbl2 limit 2 for update", + "Table": "u_tbl2" + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select /*+ SET_VAR(foreign_key_checks=On) */ u_tbl2.col2 from u_tbl2 where u_tbl2.id in ::dml_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals and (col3) not in ((cast('bar' as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl2 set col2 = 'bar' where u_tbl2.id in ::dml_vals", + "Table": "u_tbl2" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update in a table with non-literal value - set null fail due to child update where condition", + "query": "update u_tbl2 set m = 2, col2 = col1 + 'bar' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set m = 2, col2 = col1 + 'bar' where id = 1", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where u_tbl1.col1 is null and cast(u_tbl2.col1 + 'bar' as CHAR) is not null and not (u_tbl2.col2) <=> (cast(u_tbl2.col1 + 'bar' as CHAR)) and u_tbl2.id = 1 limit 1 for share", + "Table": "u_tbl1, u_tbl2" + }, + { + "InputName": "PostVerify", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 1, + "UpdateExprCol": 2, + "UpdateExprBvName": "fkc_upd" + } + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals and (:fkc_upd is null or (col3) not in ((:fkc_upd)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set m = 2, col2 = col1 + 'bar' where id = 1", + "Table": "u_tbl2" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update in a table with non-literal value - with cascade fail as the cascade value is not known", + "query": "update u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where id = 1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 1, + "UpdateExprCol": 2, + "UpdateExprBvName": "fkc_upd" + } + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (cast(:fkc_upd as CHAR) is null or (col3) not in ((cast(:fkc_upd as CHAR))))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = :fkc_upd where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "NonLiteralUpdateInfo": [ + { + "CompExprCol": 1, + "UpdateExprCol": 2, + "UpdateExprBvName": "fkc_upd1" + } + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (:fkc_upd1 is null or (col9) not in ((:fkc_upd1))) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (:fkc_upd1 is null or (col9) not in ((:fkc_upd1)))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl1 set m = 2, col1 = x + 'bar' where id = 1", + "Table": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in a table with set null, non-literal value on non-foreign key column - allowed", + "query": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where id = 1 for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals and (col3) not in ((cast(2 as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl2 set m = col1 + 'bar', col2 = 2 where id = 1", + "Table": "u_tbl2" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update in a table with cascade, non-literal value on non-foreign key column - allowed", + "query": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where id = 1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((cast(2 as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 2 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((cast(2 as CHAR))) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((cast(2 as CHAR)))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl1 set m = x + 'bar', col1 = 2 where id = 1", + "Table": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "update in a table with a child table having SET DEFAULT constraint - disallowed", + "query": "update tbl20 set col2 = 'bar'", + "plan": "VT09016: Cannot delete or update a parent row: a foreign key constraint fails" + }, + { + "comment": "delete in a table with limit", + "query": "delete from u_tbl2 limit 2", + "plan": { + "QueryType": "DELETE", + "Original": "delete from u_tbl2 limit 2", + "Instructions": { + "OperatorType": "DMLWithInput", + "TargetTabletType": "PRIMARY", + "Offset": [ + "0:[0]" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.id from u_tbl2 where 1 != 1", + "Query": "select /*+ SET_VAR(foreign_key_checks=On) */ u_tbl2.id from u_tbl2 limit 2 for update", + "Table": "u_tbl2" + }, + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select /*+ SET_VAR(foreign_key_checks=On) */ u_tbl2.col2 from u_tbl2 where u_tbl2.id in ::dml_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from u_tbl2 where u_tbl2.id in ::dml_vals", + "Table": "u_tbl2" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update with fk on cross-shard with a where condition on non-literal value - disallowed", + "query": "update tbl3 set coly = colx + 10 where coly = 10", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl3 set coly = colx + 10 where coly = 10", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "1 as 1" + ], + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl1.t1col1 is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "tbl3_colx": 0 + }, + "TableName": "tbl3_tbl1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl3.colx from tbl3 where 1 != 1", + "Query": "select tbl3.colx from tbl3 where tbl3.colx + 10 is not null and not (tbl3.coly) <=> (tbl3.colx + 10) and tbl3.coly = 10 for share", + "Table": "tbl3" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = :tbl3_colx + 10 for share", + "Table": "tbl1" + } + ] + } + ] + } + ] + } + ] + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ tbl3 set coly = colx + 10 where coly = 10", + "Table": "tbl3" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1", + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "update with fk on cross-shard with a where condition", + "query": "update tbl3 set coly = 20 where coly = 10", + "plan": { + "QueryType": "UPDATE", + "Original": "update tbl3 set coly = 20 where coly = 10", + "Instructions": { + "OperatorType": "FKVerify", + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "1 as 1" + ], + "Inputs": [ + { + "OperatorType": "Filter", + "Predicate": "tbl1.t1col1 is null", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "TableName": "tbl3_tbl1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select 1 from tbl3 where 1 != 1", + "Query": "select 1 from tbl3 where not (tbl3.coly) <=> (20) and tbl3.coly = 10 for share", + "Table": "tbl3" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 for share", + "Table": "tbl1" + } + ] + } + ] + } + ] + } + ] + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ tbl3 set coly = 20 where coly = 10", + "Table": "tbl3" + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl1", + "sharded_fk_allow.tbl3" + ] + } + }, + { + "comment": "Update in a table with shard-scoped foreign keys with cascade that requires a validation of a different parent foreign key", + "query": "update u_tbl6 set col6 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl6 set col6 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl6.col6 from u_tbl6 where 1 != 1", + "Query": "select u_tbl6.col6 from u_tbl6 for update", + "Table": "u_tbl6" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = cast('foo' as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl8 left join u_tbl9 on u_tbl9.col9 = cast('foo' as CHAR) where u_tbl9.col9 is null and cast('foo' as CHAR) is not null and not (u_tbl8.col8) <=> (cast('foo' as CHAR)) and (u_tbl8.col8) in ::fkc_vals limit 1 for share nowait", + "Table": "u_tbl8, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl8 set col8 = 'foo' where (col8) in ::fkc_vals", + "Table": "u_tbl8" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl6 set col6 = 'foo'", + "Table": "u_tbl6" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl6", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Update that cascades and requires parent fk and restrict child fk verification", + "query": "update u_tbl7 set col7 = 'foo'", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl7 set col7 = 'foo'", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl7.col7 from u_tbl7 where 1 != 1", + "Query": "select u_tbl7.col7 from u_tbl7 for update", + "Table": "u_tbl7" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where u_tbl3.col3 is null and cast('foo' as CHAR) is not null and not (u_tbl4.col4) <=> (cast('foo' as CHAR)) and (u_tbl4.col4) in ::fkc_vals limit 1 for share", + "Table": "u_tbl3, u_tbl4" + }, + { + "InputName": "VerifyChild-2", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", + "Query": "select 1 from u_tbl4, u_tbl9 where u_tbl4.col4 = u_tbl9.col9 and (u_tbl4.col4) in ::fkc_vals and (cast('foo' as CHAR) is null or (u_tbl9.col9) not in ((cast('foo' as CHAR)))) limit 1 for share", + "Table": "u_tbl4, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = 'foo' where (col4) in ::fkc_vals", + "Table": "u_tbl4" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl7 set col7 = 'foo'", + "Table": "u_tbl7" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl4", + "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Update that cascades and requires parent fk and restrict child fk verification - bindVariable", + "query": "update u_tbl7 set col7 = :v1", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_tbl7 set col7 = :v1", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl7.col7 from u_tbl7 where 1 != 1", + "Query": "select u_tbl7.col7 from u_tbl7 for update", + "Table": "u_tbl7" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FKVerify", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "VerifyParent-1", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where 1 != 1", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where u_tbl3.col3 is null and cast(:v1 as CHAR) is not null and not (u_tbl4.col4) <=> (cast(:v1 as CHAR)) and (u_tbl4.col4) in ::fkc_vals limit 1 for share", + "Table": "u_tbl3, u_tbl4" + }, + { + "InputName": "VerifyChild-2", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", + "Query": "select 1 from u_tbl4, u_tbl9 where u_tbl4.col4 = u_tbl9.col9 and (u_tbl4.col4) in ::fkc_vals and (cast(:v1 as CHAR) is null or (u_tbl9.col9) not in ((cast(:v1 as CHAR)))) limit 1 for share", + "Table": "u_tbl4, u_tbl9" + }, + { + "InputName": "PostVerify", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl4 set col4 = :v1 where (col4) in ::fkc_vals", + "Table": "u_tbl4" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl7 set col7 = :v1", + "Table": "u_tbl7" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl4", + "unsharded_fk_allow.u_tbl7", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Insert with on duplicate key update - foreign keys disallowed", + "query": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = 5", + "plan": { + "QueryType": "INSERT", + "Original": "insert into u_tbl1 (id, col1) values (1, 3) on duplicate key update col1 = 5", + "Instructions": { + "OperatorType": "Upsert", + "TargetTabletType": "PRIMARY", + "Inputs": [ + { + "InputName": "Insert-1", + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert /*+ SET_VAR(foreign_key_checks=On) */ into u_tbl1(id, col1) values (1, 3)", + "TableName": "u_tbl1" + }, + { + "InputName": "Update-1", + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where id = 1 for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals1 and (col3) not in ((cast(5 as CHAR)))", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_tbl2 set col2 = 5 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "FkCascade", + "BvName": "fkc_vals2", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl9.col9 from u_tbl9 where 1 != 1", + "Query": "select u_tbl9.col9 from u_tbl9 where (col9) in ::fkc_vals2 and (col9) not in ((cast(5 as CHAR))) for update nowait", + "Table": "u_tbl9" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals3", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl8 set col8 = null where (col8) in ::fkc_vals3", + "Table": "u_tbl8" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl9 set col9 = null where (col9) in ::fkc_vals2 and (col9) not in ((cast(5 as CHAR)))", + "Table": "u_tbl9" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_tbl1 set col1 = 5 where id = 1", + "Table": "u_tbl1" + } + ] + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3", + "unsharded_fk_allow.u_tbl8", + "unsharded_fk_allow.u_tbl9" + ] + } + }, + { + "comment": "Insert with on duplicate key update - foreign keys not on update column - allowed", + "query": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "plan": { + "QueryType": "INSERT", + "Original": "insert into u_tbl1 (id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=On) */ into u_tbl1(id, col1, foo) values (1, 3, 'bar') on duplicate key update foo = 'baz'", + "TableName": "u_tbl1" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1" + ] + } + }, + { + "comment": "Insert with unsharded table having fk reference in sharded table", + "query": "insert into u_tbl (id, col) values (1, 2)", + "plan": "VT12002: unsupported: cross-shard foreign keys" + }, + { + "comment": "replace with fk reference unsupported", + "query": "replace into u_tbl1 (id, col1) values (1, 2)", + "plan": { + "QueryType": "INSERT", + "Original": "replace into u_tbl1 (id, col1) values (1, 2)", + "Instructions": { + "OperatorType": "Sequential", + "Inputs": [ + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl1.col1 from u_tbl1 where 1 != 1", + "Query": "select u_tbl1.col1 from u_tbl1 where (id) in ((1)) for update", + "Table": "u_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_tbl2.col2 from u_tbl2 where 1 != 1", + "Query": "select u_tbl2.col2 from u_tbl2 where (col2) in ::fkc_vals for update", + "Table": "u_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_tbl3 set col3 = null where (col3) in ::fkc_vals1", + "Table": "u_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=ON) */ from u_tbl2 where (col2) in ::fkc_vals", + "Table": "u_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from u_tbl1 where (id) in ((1))", + "Table": "u_tbl1" + } + ] + }, + { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "NoAutoCommit": true, + "Query": "insert /*+ SET_VAR(foreign_key_checks=On) */ into u_tbl1(id, col1) values (1, 2)", + "TableName": "u_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_tbl1", + "unsharded_fk_allow.u_tbl2", + "unsharded_fk_allow.u_tbl3" + ] + } + }, + { + "comment": "update on a multicol foreign key that set nulls and then cascades", + "query": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb from u_multicol_tbl1 where 1 != 1", + "Query": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb from u_multicol_tbl1 where id = 3 for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where 1 != 1", + "Query": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2)) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2))", + "Table": "u_multicol_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Table": "u_multicol_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + }, + { + "comment": "update on a multicol foreign key that set nulls and then cascades - bindVariables", + "query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "plan": { + "QueryType": "UPDATE", + "Original": "update /*+ SET_VAR(foreign_key_checks=On) */ u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "Instructions": { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb from u_multicol_tbl1 where 1 != 1", + "Query": "select u_multicol_tbl1.cola, u_multicol_tbl1.colb from u_multicol_tbl1 where id = :v3 for update", + "Table": "u_multicol_tbl1" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "FkCascade", + "BvName": "fkc_vals", + "Cols": [ + 0, + 1 + ], + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "FieldQuery": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where 1 != 1", + "Query": "select u_multicol_tbl2.cola, u_multicol_tbl2.colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (cola, colb) not in ((:v1, :v2)))) for update", + "Table": "u_multicol_tbl2" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 0, + 1 + ], + "Query": "update /*+ SET_VAR(foreign_key_checks=OFF) */ u_multicol_tbl3 set cola = null, colb = null where (cola, colb) in ::fkc_vals1", + "Table": "u_multicol_tbl3" + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=ON) */ u_multicol_tbl2 set cola = null, colb = null where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (cola, colb) not in ((:v1, :v2))))", + "Table": "u_multicol_tbl2" + } + ] + }, + { + "InputName": "Parent", + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=On) */ u_multicol_tbl1 set cola = :v1, colb = :v2 where id = :v3", + "Table": "u_multicol_tbl1" + } + ] + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1", + "unsharded_fk_allow.u_multicol_tbl2", + "unsharded_fk_allow.u_multicol_tbl3" + ] + } + }, + { + "comment": "Cascaded delete run from prepared statement", + "query": "execute prep_delete using @foo", + "plan": { + "QueryType": "EXECUTE", + "Original": "execute prep_delete using @foo", + "Instructions": { + "OperatorType": "EXECUTE", + "Parameters": [ + "foo" + ], + "Inputs": [ + { + "OperatorType": "FkCascade", + "Inputs": [ + { + "InputName": "Selection", + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "FieldQuery": "select tbl5.col5, tbl5.t5col5 from tbl5 where 1 != 1", + "Query": "select tbl5.col5, tbl5.t5col5 from tbl5 where id = :v1 for update", + "Table": "tbl5" + }, + { + "InputName": "CascadeChild-1", + "OperatorType": "Delete", + "Variant": "MultiEqual", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals", + "Cols": [ + 0 + ], + "Query": "delete /*+ SET_VAR(foreign_key_checks=ON) */ from tbl4 where (col4) in ::fkc_vals", + "Table": "tbl4", + "Values": [ + "fkc_vals:0" + ], + "Vindex": "hash_vin" + }, + { + "InputName": "CascadeChild-2", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "BvName": "fkc_vals1", + "Cols": [ + 1 + ], + "Query": "delete /*+ SET_VAR(foreign_key_checks=ON) */ from tbl4 where (t4col4) in ::fkc_vals1", + "Table": "tbl4" + }, + { + "InputName": "Parent", + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=On) */ from tbl5 where id = :v1", + "Table": "tbl5" + } + ] + } + ] + }, + "TablesUsed": [ + "sharded_fk_allow.tbl4", + "sharded_fk_allow.tbl5" + ] + } + }, + { + "comment": "Delete with foreign key checks off", + "query": "delete /*+ SET_VAR(foreign_key_checks=off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "plan": { + "QueryType": "DELETE", + "Original": "delete /*+ SET_VAR(foreign_key_checks=off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Instructions": { + "OperatorType": "Delete", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete /*+ SET_VAR(foreign_key_checks=Off) */ from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3", + "Table": "multicol_tbl1", + "Values": [ + "1", + "2", + "3" + ], + "Vindex": "multicolIdx" + }, + "TablesUsed": [ + "sharded_fk_allow.multicol_tbl1" + ] + } + }, + { + "comment": "Update with foreign key checks off", + "query": "update /*+ SET_VAR(foreign_key_checks=0) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "plan": { + "QueryType": "UPDATE", + "Original": "update /*+ SET_VAR(foreign_key_checks=0) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "unsharded_fk_allow", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update /*+ SET_VAR(foreign_key_checks=Off) */ u_multicol_tbl1 set cola = 1, colb = 2 where id = 3", + "Table": "u_multicol_tbl1" + }, + "TablesUsed": [ + "unsharded_fk_allow.u_multicol_tbl1" + ] + } + }, + { + "comment": "Insert with cross shard foreign keys and foreign key checks off", + "query": "insert /*+ SET_VAR(foreign_key_checks=0) */ into tbl3 (col3, coly) values (1, 3)", + "plan": { + "QueryType": "INSERT", + "Original": "insert /*+ SET_VAR(foreign_key_checks=0) */ into tbl3 (col3, coly) values (1, 3)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Sharded", + "Keyspace": { + "Name": "sharded_fk_allow", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "insert /*+ SET_VAR(foreign_key_checks=Off) */ into tbl3(col3, coly) values (:_col3_0, 3)", + "TableName": "tbl3", + "VindexValues": { + "hash_vin": "1" + } + }, + "TablesUsed": [ + "sharded_fk_allow.tbl3" + ] + } + } +] diff --git a/go/vt/vtgate/planbuilder/testdata/from_cases.json b/go/vt/vtgate/planbuilder/testdata/from_cases.json index 7a3c13c3635..81381f3d7d7 100644 --- a/go/vt/vtgate/planbuilder/testdata/from_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/from_cases.json @@ -409,6 +409,28 @@ ] } }, + { + "comment": "DISTINCT inside derived table", + "query": "select * from (select distinct name from user) as t", + "plan": { + "QueryType": "SELECT", + "Original": "select * from (select distinct name from user) as t", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name` from (select `name` from `user` where 1 != 1) as t where 1 != 1", + "Query": "select `name` from (select distinct `name` from `user`) as t", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, { "comment": "',' join unsharded", "query": "select u1.a, u2.a from unsharded u1, unsharded u2", @@ -711,8 +733,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select m1.col from unsharded as m1 join unsharded as m2 where 1 != 1", - "Query": "select m1.col from unsharded as m1 join unsharded as m2", + "FieldQuery": "select m1.col from unsharded as m1 straight_join unsharded as m2 where 1 != 1", + "Query": "select m1.col from unsharded as m1 straight_join unsharded as m2", "Table": "unsharded" }, "TablesUsed": [ @@ -1821,7 +1843,7 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", + "JoinColumnIndexes": "L:1,L:0", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1831,8 +1853,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.col1, t.id from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.col1, t.id from (select `user`.id, `user`.col1 from `user`) as t", + "FieldQuery": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user`) as t", "Table": "`user`" }, { @@ -1879,7 +1901,7 @@ "Variant": "Join", "JoinColumnIndexes": "L:0", "JoinVars": { - "user_col": 1 + "user_col": 2 }, "TableName": "`user`_user_extra", "Inputs": [ @@ -1890,8 +1912,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.id, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.id, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user`) as t", + "FieldQuery": "select t.id, t.col1, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col1, t.`user.col` from (select `user`.id, `user`.col1, `user`.col as `user.col` from `user`) as t", "Table": "`user`" }, { @@ -1939,7 +1961,7 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0", + "JoinColumnIndexes": "L:1", "TableName": "`user`_user_extra", "Inputs": [ { @@ -1949,8 +1971,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.col1 from (select `user`.id, `user`.col1 from `user`) as t", + "FieldQuery": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user`) as t", "Table": "`user`" }, { @@ -2004,7 +2026,7 @@ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0", + "JoinColumnIndexes": "L:1", "TableName": "`user`_user_extra", "Inputs": [ { @@ -2014,8 +2036,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.col1 from (select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id) as t", + "FieldQuery": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col1 from (select `user`.id, `user`.col1 from `user` where `user`.id = :ua_id) as t", "Table": "`user`", "Values": [ ":ua_id" @@ -2443,41 +2465,32 @@ "QueryType": "SELECT", "Original": "select id, t.id from (select user.id from user join user_extra) as t", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 0 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select id from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id from (select `user`.id from `user`) as t", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select t.id from (select `user`.id from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id from (select `user`.id from `user`) as t", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] }, @@ -2835,20 +2848,28 @@ "QueryType": "SELECT", "Original": "select a as k from (select count(*) as a from user) t", "Instructions": { - "OperatorType": "Aggregate", - "Variant": "Scalar", - "Aggregates": "sum_count_star(0) AS a", + "OperatorType": "SimpleProjection", + "ColumnNames": [ + "0:k" + ], "Inputs": [ { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select count(*) as a from `user` where 1 != 1", - "Query": "select count(*) as a from `user`", - "Table": "`user`" + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "sum_count_star(0) AS a", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select count(*) as a from `user` where 1 != 1", + "Query": "select count(*) as a from `user`", + "Table": "`user`" + } + ] } ] }, @@ -2898,8 +2919,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id from (select `user`.id, `user`.col from `user` where `user`.id = 5) as t", + "FieldQuery": "select t.id, t.col from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id, t.col from (select `user`.id, `user`.col from `user` where `user`.id = 5) as t", "Table": "`user`", "Values": [ "5" @@ -3137,7 +3158,7 @@ "Sharded": true }, "FieldQuery": "select user_id from user_extra where 1 != 1", - "Query": "select user_id from user_extra limit :__upper_limit", + "Query": "select user_id from user_extra limit 1", "Table": "user_extra" } ] @@ -3507,15 +3528,21 @@ ] }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra as ue where 1 != 1", - "Query": "select 1 from user_extra as ue", - "Table": "user_extra" + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra as ue where 1 != 1", + "Query": "select 1 from user_extra as ue limit 1", + "Table": "user_extra" + } + ] } ] } @@ -3760,8 +3787,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative where 1 != 1", - "Query": "select authoritative.col1 as col1, authoritative.user_id as user_id, authoritative.col2 as col2 from authoritative", + "FieldQuery": "select authoritative.col1, authoritative.user_id, authoritative.col2 from authoritative where 1 != 1", + "Query": "select authoritative.col1, authoritative.user_id, authoritative.col2 from authoritative", "Table": "authoritative" }, { @@ -3771,8 +3798,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where 1 != 1", - "Query": "select unsharded_authoritative.col2 as col2 from unsharded_authoritative where unsharded_authoritative.col1 = :authoritative_col1", + "FieldQuery": "select unsharded_authoritative.col2 from unsharded_authoritative where 1 != 1", + "Query": "select unsharded_authoritative.col2 from unsharded_authoritative where unsharded_authoritative.col1 = :authoritative_col1", "Table": "unsharded_authoritative" } ] @@ -3989,8 +4016,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select A.col1 as col1, A.col2 as col2, B.col2 as col2 from unsharded_authoritative as A left join unsharded_authoritative as B on A.col1 = B.col1 where 1 != 1", - "Query": "select A.col1 as col1, A.col2 as col2, B.col2 as col2 from unsharded_authoritative as A left join unsharded_authoritative as B on A.col1 = B.col1", + "FieldQuery": "select * from unsharded_authoritative as A left join unsharded_authoritative as B using (col1) where 1 != 1", + "Query": "select * from unsharded_authoritative as A left join unsharded_authoritative as B using (col1)", "Table": "unsharded_authoritative" }, "TablesUsed": [ @@ -4050,5 +4077,417 @@ "comment": "select with a target destination", "query": "select * from `user[-]`.user_metadata", "plan": "VT09017: SELECT with a target destination is not allowed" + }, + { + "comment": "query that needs a hash join", + "query": "select id from user left join (select col from user_extra limit 10) ue on user.col = ue.col", + "plan": { + "QueryType": "SELECT", + "Original": "select id from user left join (select col from user_extra limit 10) ue on user.col = ue.col", + "Instructions": { + "OperatorType": "Join", + "Variant": "HashLeftJoin", + "Collation": "binary", + "ComparisonType": "INT16", + "JoinColumnIndexes": "-2", + "Predicate": "`user`.col = ue.col", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.col, id from `user` where 1 != 1", + "Query": "select `user`.col, id from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select ue.col from (select col from user_extra where 1 != 1) as ue where 1 != 1", + "Query": "select ue.col from (select col from user_extra) as ue limit 10", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "query that needs a hash join - both sides have limits", + "query": "select id, user_id from (select id, col from user limit 10) u join (select col, user_id from user_extra limit 10) ue on u.col = ue.col", + "plan": { + "QueryType": "SELECT", + "Original": "select id, user_id from (select id, col from user limit 10) u join (select col, user_id from user_extra limit 10) ue on u.col = ue.col", + "Instructions": { + "OperatorType": "Join", + "Variant": "HashJoin", + "Collation": "binary", + "ComparisonType": "INT16", + "JoinColumnIndexes": "-1,2", + "Predicate": "u.col = ue.col", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, u.col from (select id, col from `user` where 1 != 1) as u where 1 != 1", + "Query": "select u.id, u.col from (select id, col from `user`) as u limit 10", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select ue.col, ue.user_id from (select col, user_id from user_extra where 1 != 1) as ue where 1 != 1", + "Query": "select ue.col, ue.user_id from (select col, user_id from user_extra) as ue limit 10", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "query that needs a hash join - both sides have limits. check that it can be merged even with the hash join", + "query": "select id, user_id from (select id, col from user where id = 17 limit 10) u join (select col, user_id from user_extra where user_id = 17 limit 10) ue on u.col = ue.col", + "plan": { + "QueryType": "SELECT", + "Original": "select id, user_id from (select id, col from user where id = 17 limit 10) u join (select col, user_id from user_extra where user_id = 17 limit 10) ue on u.col = ue.col", + "Instructions": { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select id, user_id from (select id, col from `user` where 1 != 1) as u, (select col, user_id from user_extra where 1 != 1) as ue where 1 != 1", + "Query": "select id, user_id from (select id, col from `user` where id = 17 limit 10) as u, (select col, user_id from user_extra where user_id = 17 limit 10) as ue where u.col = ue.col", + "Table": "`user`, user_extra", + "Values": [ + "17" + ], + "Vindex": "user_index" + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "query that needs a hash join - outer side has LIMIT. distinct should be pushed down", + "query": "select distinct id, user_id from (select id, col from user) u left join (select col, user_id from user_extra limit 10) ue on u.col = ue.col", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct id, user_id from (select id, col from user) u left join (select col, user_id from user_extra limit 10) ue on u.col = ue.col", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:3)" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "HashLeftJoin", + "Collation": "binary", + "ComparisonType": "INT16", + "JoinColumnIndexes": "-1,2,-3,3", + "Predicate": "u.col = ue.col", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, u.col, weight_string(u.id) from (select id, col from `user` where 1 != 1) as u where 1 != 1", + "Query": "select distinct u.id, u.col, weight_string(u.id) from (select id, col from `user`) as u", + "Table": "`user`" + }, + { + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select ue.col, ue.user_id, weight_string(ue.user_id) from (select col, user_id from user_extra where 1 != 1) as ue where 1 != 1", + "Query": "select ue.col, ue.user_id, weight_string(ue.user_id) from (select col, user_id from user_extra) as ue limit 10", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "unexpanded columns are fine if we can push down into single route", + "query": "select x from (select t.*, 1 as x from unsharded t union select t.*, 1 as x from unsharded t) as x", + "plan": { + "QueryType": "SELECT", + "Original": "select x from (select t.*, 1 as x from unsharded t union select t.*, 1 as x from unsharded t) as x", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select x from (select t.*, 1 as x from unsharded as t where 1 != 1 union select t.*, 1 as x from unsharded as t where 1 != 1) as x where 1 != 1", + "Query": "select x from (select t.*, 1 as x from unsharded as t union select t.*, 1 as x from unsharded as t) as x", + "Table": "unsharded" + }, + "TablesUsed": [ + "main.unsharded" + ] + } + }, + { + "comment": "pushing derived projection under the join should not cause problems", + "query": "SELECT count(*) FROM (SELECT DISTINCT u.user_id FROM user u JOIN user_extra ue ON u.id = ue.user_id JOIN music m ON m.id = u.id) subquery_for_count", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT count(*) FROM (SELECT DISTINCT u.user_id FROM user u JOIN user_extra ue ON u.id = ue.user_id JOIN music m ON m.id = u.id) subquery_for_count", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Scalar", + "Aggregates": "count_star(0) AS count(*)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": "1", + "Inputs": [ + { + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "1" + ], + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:1,R:1", + "JoinVars": { + "m_id": 0 + }, + "TableName": "music_`user`, user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select subquery_for_count.`m.id`, 1 from (select m.id as `m.id` from music as m where 1 != 1) as subquery_for_count where 1 != 1", + "Query": "select distinct subquery_for_count.`m.id`, 1 from (select m.id as `m.id` from music as m) as subquery_for_count", + "Table": "music" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select subquery_for_count.user_id, weight_string(subquery_for_count.user_id) from (select u.user_id from `user` as u, user_extra as ue where 1 != 1) as subquery_for_count where 1 != 1", + "Query": "select distinct subquery_for_count.user_id, weight_string(subquery_for_count.user_id) from (select u.user_id from `user` as u, user_extra as ue where u.id = :m_id and u.id = ue.user_id) as subquery_for_count", + "Table": "`user`, user_extra", + "Values": [ + ":m_id" + ], + "Vindex": "user_index" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "join table influencing vindex selection with ignore and use vindex syntax", + "query": "select u.intcol, u.id from user u use vindex (name_user_map) join music m ignore vindex(user_index) on u.col = m.col where u.name = 'bb' and u.id = 3 and m.user_id = 5 and m.id = 20", + "plan": { + "QueryType": "SELECT", + "Original": "select u.intcol, u.id from user u use vindex (name_user_map) join music m ignore vindex(user_index) on u.col = m.col where u.name = 'bb' and u.id = 3 and m.user_id = 5 and m.id = 20", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1", + "JoinVars": { + "u_col": 2 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "VindexLookup", + "Variant": "Equal", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Values": [ + "'bb'" + ], + "Vindex": "name_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.intcol, u.id, u.col from `user` as u where 1 != 1", + "Query": "select u.intcol, u.id, u.col from `user` as u where u.`name` = 'bb' and u.id = 3", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.user_id = 5 and m.id = 20 and m.col = :u_col", + "Table": "music", + "Values": [ + "20" + ], + "Vindex": "music_user_map" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "Select everything from a derived table having a cross-shard join", + "query": "select * from (select u.foo * ue.bar from user u join user_extra ue) as dt", + "plan": { + "QueryType": "SELECT", + "Original": "select * from (select u.foo * ue.bar from user u join user_extra ue) as dt", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "u_foo": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select dt.foo from (select u.foo from `user` as u where 1 != 1) as dt where 1 != 1", + "Query": "select dt.foo from (select u.foo from `user` as u) as dt", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select dt.`u.foo * ue.bar` from (select :u_foo * ue.bar as `u.foo * ue.bar` from user_extra as ue where 1 != 1) as dt where 1 != 1", + "Query": "select dt.`u.foo * ue.bar` from (select :u_foo * ue.bar as `u.foo * ue.bar` from user_extra as ue) as dt", + "Table": "user_extra" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/hash_joins.txt b/go/vt/vtgate/planbuilder/testdata/hash_joins.txt deleted file mode 100644 index afc175a581c..00000000000 --- a/go/vt/vtgate/planbuilder/testdata/hash_joins.txt +++ /dev/null @@ -1,531 +0,0 @@ -# Test cases in this file are currently turned off -# Multi-route unique vindex constraint (with hash join) -"select /*vt+ ALLOW_HASH_JOIN */ user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5" -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "1", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } -} -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.id from user join user_extra on user.col = user_extra.col where user.id = 5", - "Instructions": { - "OperatorType": "Join", - "Variant": "HashJoin", - "ComparisonType": "INT16", - "JoinColumnIndexes": "2", - "Predicate": "`user`.col = user_extra.col", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ `user`.col from `user` where `user`.id = 5", - "Table": "`user`", - "Values": [ - "INT64(5)" - ], - "Vindex": "user_index" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col, user_extra.id from user_extra where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.col, user_extra.id from user_extra", - "Table": "user_extra" - } - ] - } -} - - -# Multi-route with non-route constraint, should use first route. -"select /*vt+ ALLOW_HASH_JOIN */ user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1" -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "1", - "JoinVars": { - "user_col": 0 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ `user`.col from `user` where 1 = 1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.id from user_extra where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.id from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } -} -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.id from user join user_extra on user.col = user_extra.col where 1 = 1", - "Instructions": { - "OperatorType": "Join", - "Variant": "HashJoin", - "ComparisonType": "INT16", - "JoinColumnIndexes": "2", - "Predicate": "`user`.col = user_extra.col", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col from `user` where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ `user`.col from `user` where 1 = 1", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col, user_extra.id from user_extra where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.col, user_extra.id from user_extra where 1 = 1", - "Table": "user_extra" - } - ] - } -} - -# wire-up on within cross-shard derived table (hash-join version) -"select /*vt+ ALLOW_HASH_JOIN */ t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t" -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-1,-2", - "JoinVars": { - "user_col": 2 - }, - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.id, `user`.col1, `user`.col from `user` where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ `user`.id, `user`.col1, `user`.col from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ 1 from user_extra where user_extra.col = :user_col", - "Table": "user_extra" - } - ] - } - ] - } -} -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ t.id from (select user.id, user.col1 from user join user_extra on user_extra.col = user.col) as t", - "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "HashJoin", - "ComparisonType": "INT16", - "JoinColumnIndexes": "-2,-3", - "Predicate": "user_extra.col = `user`.col", - "TableName": "`user`_user_extra", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `user`.col, `user`.id, `user`.col1 from `user` where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ `user`.col, `user`.id, `user`.col1 from `user`", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select user_extra.col from user_extra where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ user_extra.col from user_extra", - "Table": "user_extra" - } - ] - } - ] - } -} - -# hash join on int columns -"select /*vt+ ALLOW_HASH_JOIN */ u.id from user as u join user as uu on u.intcol = uu.intcol" -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ u.id from user as u join user as uu on u.intcol = uu.intcol", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-1", - "JoinVars": { - "u_intcol": 1 - }, - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.id, u.intcol from `user` as u where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ u.id, u.intcol from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from `user` as uu where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ 1 from `user` as uu where uu.intcol = :u_intcol", - "Table": "`user`" - } - ] - } -} -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ u.id from user as u join user as uu on u.intcol = uu.intcol", - "Instructions": { - "OperatorType": "Join", - "Variant": "HashJoin", - "ComparisonType": "INT16", - "JoinColumnIndexes": "-2", - "Predicate": "u.intcol = uu.intcol", - "TableName": "`user`_`user`", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select u.intcol, u.id from `user` as u where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ u.intcol, u.id from `user` as u", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select uu.intcol from `user` as uu where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ uu.intcol from `user` as uu", - "Table": "`user`" - } - ] - } -} - -# Author5.joins(books: [{orders: :customer}, :supplier]) (with hash join) -"select /*vt+ ALLOW_HASH_JOIN */ author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id" -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-1,-2,-3,-4", - "JoinVars": { - "book6s_supplier5_id": 4 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s_supplier5s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-1,-2,-3,-4,-5", - "JoinVars": { - "order2s_customer2_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s_customer2s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-1,-2,-3,-4,-5,1", - "JoinVars": { - "book6s_order2s_order2_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s_order2s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "-1,-2,-3,-4,-5,1", - "JoinVars": { - "book6s_id": 5 - }, - "TableName": "author5s, book6s_book6s_order2s", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s join book6s on book6s.author5_id = author5s.id", - "Table": "author5s, book6s" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select book6s_order2s.order2_id from book6s_order2s where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ book6s_order2s.order2_id from book6s_order2s where book6s_order2s.book6_id = :book6s_id", - "Table": "book6s_order2s", - "Values": [ - ":book6s_id" - ], - "Vindex": "binary_md5" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select order2s.customer2_id from order2s where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ order2s.customer2_id from order2s where order2s.id = :book6s_order2s_order2_id", - "Table": "order2s" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from customer2s where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ 1 from customer2s where customer2s.id = :order2s_customer2_id", - "Table": "customer2s", - "Values": [ - ":order2s_customer2_id" - ], - "Vindex": "binary_md5" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from supplier5s where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ 1 from supplier5s where supplier5s.id = :book6s_supplier5_id", - "Table": "supplier5s", - "Values": [ - ":book6s_supplier5_id" - ], - "Vindex": "binary_md5" - } - ] - } -} -{ - "QueryType": "SELECT", - "Original": "select /*vt+ ALLOW_HASH_JOIN */ author5s.* from author5s join book6s on book6s.author5_id = author5s.id join book6s_order2s on book6s_order2s.book6_id = book6s.id join order2s on order2s.id = book6s_order2s.order2_id join customer2s on customer2s.id = order2s.customer2_id join supplier5s on supplier5s.id = book6s.supplier5_id", - "Instructions": { - "OperatorType": "Join", - "Variant": "HashJoin", - "ComparisonType": "INT64", - "JoinColumnIndexes": "2,3,4,5", - "Predicate": "order2s.id = book6s_order2s.order2_id", - "TableName": "customer2s, order2s_author5s, book6s_book6s_order2s_supplier5s", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select order2s.id from order2s, customer2s where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ order2s.id from order2s, customer2s where customer2s.id = order2s.customer2_id", - "Table": "customer2s, order2s" - }, - { - "OperatorType": "Join", - "Variant": "HashJoin", - "ComparisonType": "INT64", - "JoinColumnIndexes": "-1,-2,-3,-4,-5", - "Predicate": "supplier5s.id = book6s.supplier5_id", - "TableName": "author5s, book6s_book6s_order2s_supplier5s", - "Inputs": [ - { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "1,-3,-4,-5,-6", - "JoinVars": { - "book6s_id": 0 - }, - "Predicate": "book6s_order2s.book6_id = book6s.id", - "TableName": "author5s, book6s_book6s_order2s", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ book6s.id, book6s.supplier5_id, author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at from author5s, book6s where book6s.author5_id = author5s.id", - "Table": "author5s, book6s" - }, - { - "OperatorType": "Route", - "Variant": "EqualUnique", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select book6s_order2s.order2_id from book6s_order2s where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ book6s_order2s.order2_id from book6s_order2s where book6s_order2s.book6_id = :book6s_id", - "Table": "book6s_order2s", - "Values": [ - ":book6s_id" - ], - "Vindex": "binary_md5" - } - ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select supplier5s.id from supplier5s where 1 != 1", - "Query": "select /*vt+ ALLOW_HASH_JOIN */ supplier5s.id from supplier5s", - "Table": "supplier5s" - } - ] - } - ] - } -} diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json index 50234d5ed73..09e04b47343 100644 --- a/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/info_schema57_cases.json @@ -103,7 +103,7 @@ "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci" + "0: utf8mb3_general_ci" ], "Inputs": [ { @@ -146,13 +146,13 @@ "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci", - "1: utf8mb4_0900_ai_ci", - "2: utf8mb4_0900_ai_ci", - "3: utf8mb4_0900_ai_ci", - "4: utf8mb4_0900_ai_ci", + "0: utf8mb3_general_ci", + "1: utf8mb3_general_ci", + "2: utf8mb3_general_ci", + "3: utf8mb3_general_ci", + "4: utf8mb3_general_ci", "5", - "6: utf8mb4_0900_ai_ci", + "6: utf8mb3_general_ci", "7", "8", "9", @@ -163,10 +163,10 @@ "14", "15", "16", - "17: utf8mb4_0900_ai_ci", + "17: utf8mb3_general_ci", "18", - "19: utf8mb4_0900_ai_ci", - "20: utf8mb4_0900_ai_ci" + "19: utf8mb3_general_ci", + "20: utf8mb3_general_ci" ], "Inputs": [ { @@ -307,7 +307,7 @@ "Sharded": false }, "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where 1 != 1", - "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.referenced_column_name is not null order by ordinal_position asc", + "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.referenced_column_name is not null order by kcu.ordinal_position asc", "SysTableTableSchema": "[:v1]", "Table": "information_schema.key_column_usage" }, @@ -750,8 +750,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select a.VARIABLE_NAME as VARIABLE_NAME, a.VARIABLE_VALUE as VARIABLE_VALUE, b.CHARACTER_SET_NAME as CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME as DEFAULT_COLLATE_NAME, b.DESCRIPTION as DESCRIPTION, b.MAXLEN as MAXLEN from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b where 1 != 1", - "Query": "select a.VARIABLE_NAME as VARIABLE_NAME, a.VARIABLE_VALUE as VARIABLE_VALUE, b.CHARACTER_SET_NAME as CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME as DEFAULT_COLLATE_NAME, b.DESCRIPTION as DESCRIPTION, b.MAXLEN as MAXLEN from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b", + "FieldQuery": "select a.VARIABLE_NAME, a.VARIABLE_VALUE, b.CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME, b.DESCRIPTION, b.MAXLEN from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b where 1 != 1", + "Query": "select a.VARIABLE_NAME, a.VARIABLE_VALUE, b.CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME, b.DESCRIPTION, b.MAXLEN from information_schema.GLOBAL_STATUS as a, information_schema.CHARACTER_SETS as b", "Table": "information_schema.CHARACTER_SETS, information_schema.GLOBAL_STATUS" } } @@ -1000,7 +1000,7 @@ "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci" + "0: utf8mb3_general_ci" ], "Inputs": [ { @@ -1037,10 +1037,10 @@ }, { "comment": "merge even one side have schema name in subquery", - "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", + "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `TABLE_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", "plan": { "QueryType": "SELECT", - "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", + "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `TABLE_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", "Instructions": { "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", @@ -1053,7 +1053,7 @@ "InputName": "SubQuery", "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci" + "0: utf8mb3_general_ci" ], "Inputs": [ { @@ -1066,8 +1066,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select :COLUMN_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select distinct :COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "['a']", "Table": "information_schema.`tables`" }, @@ -1139,5 +1139,47 @@ "Table": "information_schema.apa" } } + }, + { + "comment": "LIMIT 1 inside derived table on the RHS should not be a problem", + "query": "SELECT c.column_name FROM information_schema.columns c JOIN ( SELECT table_name FROM information_schema.tables WHERE table_schema != 'information_schema' LIMIT 1 ) AS tables ON tables.table_name = c.table_name", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT c.column_name FROM information_schema.columns c JOIN ( SELECT table_name FROM information_schema.tables WHERE table_schema != 'information_schema' LIMIT 1 ) AS tables ON tables.table_name = c.table_name", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "tables_table_name": 0 + }, + "TableName": "information_schema.`tables`_information_schema.`columns`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select `tables`.table_name from (select table_name from information_schema.`tables` where 1 != 1) as `tables` where 1 != 1", + "Query": "select `tables`.table_name from (select table_name from information_schema.`tables` where table_schema != 'information_schema' limit 1) as `tables`", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select c.column_name from information_schema.`columns` as c where 1 != 1", + "Query": "select c.column_name from information_schema.`columns` as c where c.table_name = :c_table_name /* VARCHAR */", + "SysTableTableName": "[c_table_name::tables_table_name]", + "Table": "information_schema.`columns`" + } + ] + } + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json index b37804b9584..3df016e0aa3 100644 --- a/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/info_schema80_cases.json @@ -103,7 +103,7 @@ "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci" + "0: utf8mb3_general_ci" ], "Inputs": [ { @@ -146,13 +146,13 @@ "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci", - "1: utf8mb4_0900_ai_ci", - "2: utf8mb4_0900_ai_ci", - "3", - "4: utf8mb4_0900_ai_ci", + "0: utf8mb3_general_ci", + "1: utf8mb3_general_ci", + "2: utf8mb3_general_ci", + "3: binary", + "4: utf8mb3_general_ci", "5", - "6", + "6: binary", "7", "8", "9", @@ -163,10 +163,10 @@ "14", "15", "16", - "17: utf8mb4_0900_ai_ci", + "17: utf8mb3_general_ci", "18", - "19: utf8mb4_0900_ai_ci", - "20: utf8mb4_0900_ai_ci" + "19: utf8mb3_general_ci", + "20" ], "Inputs": [ { @@ -307,7 +307,7 @@ "Sharded": false }, "FieldQuery": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where 1 != 1", - "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.referenced_column_name is not null order by ordinal_position asc", + "Query": "select kcu.constraint_name as constraint_name, kcu.column_name as column_name, kcu.referenced_table_name as referenced_table_name, kcu.referenced_column_name as referenced_column_name, kcu.ordinal_position as ordinal_position, kcu.table_name as table_name from information_schema.key_column_usage as kcu where kcu.table_schema = :__vtschemaname /* VARCHAR */ and kcu.referenced_column_name is not null order by kcu.ordinal_position asc", "SysTableTableSchema": "[:v1]", "Table": "information_schema.key_column_usage" }, @@ -815,8 +815,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select a.CONSTRAINT_CATALOG as CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA as CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME as CONSTRAINT_NAME, a.CHECK_CLAUSE as CHECK_CLAUSE, b.CHARACTER_SET_NAME as CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME as DEFAULT_COLLATE_NAME, b.DESCRIPTION as DESCRIPTION, b.MAXLEN as MAXLEN from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b where 1 != 1", - "Query": "select a.CONSTRAINT_CATALOG as CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA as CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME as CONSTRAINT_NAME, a.CHECK_CLAUSE as CHECK_CLAUSE, b.CHARACTER_SET_NAME as CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME as DEFAULT_COLLATE_NAME, b.DESCRIPTION as DESCRIPTION, b.MAXLEN as MAXLEN from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b", + "FieldQuery": "select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.CHECK_CLAUSE, b.CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME, b.DESCRIPTION, b.MAXLEN from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b where 1 != 1", + "Query": "select a.CONSTRAINT_CATALOG, a.CONSTRAINT_SCHEMA, a.CONSTRAINT_NAME, a.CHECK_CLAUSE, b.CHARACTER_SET_NAME, b.DEFAULT_COLLATE_NAME, b.DESCRIPTION, b.MAXLEN from information_schema.CHECK_CONSTRAINTS as a, information_schema.CHARACTER_SETS as b", "Table": "information_schema.CHARACTER_SETS, information_schema.CHECK_CONSTRAINTS" } } @@ -1065,7 +1065,7 @@ "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci" + "0: utf8mb3_general_ci" ], "Inputs": [ { @@ -1102,10 +1102,10 @@ }, { "comment": "merge even one side have schema name in subquery", - "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", + "query": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `TABLE_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", "plan": { "QueryType": "SELECT", - "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `COLUMN_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", + "Original": "select `COLLATION_NAME` from information_schema.`COLUMNS` t where `COLUMN_NAME` in (select `TABLE_NAME` from information_schema.tables t where t.TABLE_SCHEMA = 'a' union select `COLUMN_NAME` from information_schema.columns)", "Instructions": { "OperatorType": "UncorrelatedSubquery", "Variant": "PulloutIn", @@ -1118,7 +1118,7 @@ "InputName": "SubQuery", "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci" + "0: utf8mb3_general_ci" ], "Inputs": [ { @@ -1131,8 +1131,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select :COLUMN_NAME from information_schema.`tables` as t where 1 != 1", - "Query": "select distinct :COLUMN_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", + "FieldQuery": "select TABLE_NAME from information_schema.`tables` as t where 1 != 1", + "Query": "select distinct TABLE_NAME from information_schema.`tables` as t where t.TABLE_SCHEMA = :__vtschemaname /* VARCHAR */", "SysTableTableSchema": "['a']", "Table": "information_schema.`tables`" }, @@ -1261,5 +1261,47 @@ "Table": "information_schema.apa" } } + }, + { + "comment": "LIMIT 1 inside derived table on the RHS should not be a problem", + "query": "SELECT c.column_name FROM information_schema.columns c JOIN ( SELECT table_name FROM information_schema.tables WHERE table_schema != 'information_schema' LIMIT 1 ) AS tables ON tables.table_name = c.table_name", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT c.column_name FROM information_schema.columns c JOIN ( SELECT table_name FROM information_schema.tables WHERE table_schema != 'information_schema' LIMIT 1 ) AS tables ON tables.table_name = c.table_name", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "tables_table_name": 0 + }, + "TableName": "information_schema.`tables`_information_schema.`columns`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select `tables`.table_name from (select table_name from information_schema.`tables` where 1 != 1) as `tables` where 1 != 1", + "Query": "select `tables`.table_name from (select table_name from information_schema.`tables` where table_schema != 'information_schema' limit 1) as `tables`", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select c.column_name from information_schema.`columns` as c where 1 != 1", + "Query": "select c.column_name from information_schema.`columns` as c where c.table_name = :c_table_name /* VARCHAR */", + "SysTableTableName": "[c_table_name::tables_table_name]", + "Table": "information_schema.`columns`" + } + ] + } + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/large_union_cases.json b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json index 2d66bc62d42..ac39682be4c 100644 --- a/go/vt/vtgate/planbuilder/testdata/large_union_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/large_union_cases.json @@ -23,8 +23,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where 1 != 1) union (select content, user_id from music where 1 != 1)) as dt where 1 != 1", - "Query": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11) union (select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11)) as dt", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from ((select content, user_id from music where 1 != 1) union (select content, user_id from music where 1 != 1)) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from ((select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11) union (select content, user_id from music where user_id = 1270698330 order by created_at asc, id asc limit 11)) as dt(c0, c1)", "Table": "music", "Values": [ "1270698330" @@ -38,8 +38,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where 1 != 1) union (select content, user_id from music where 1 != 1)) as dt where 1 != 1", - "Query": "select content, user_id, weight_string(content), weight_string(user_id) from ((select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11) union (select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11)) as dt", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from ((select content, user_id from music where 1 != 1) union (select content, user_id from music where 1 != 1)) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from ((select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11) union (select content, user_id from music where user_id = 1270699497 order by created_at asc, id asc limit 11)) as dt(c0, c1)", "Table": "music", "Values": [ "1270699497" @@ -53,8 +53,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270703806 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270703806 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270703806" @@ -68,8 +68,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270707364 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270707364" @@ -83,8 +83,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270714657 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270714657 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270714657" @@ -98,8 +98,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270721330 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270721330 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270721330" @@ -113,8 +113,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270812079 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270812079 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270812079" @@ -128,8 +128,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271011532 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271011532 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271011532" @@ -143,8 +143,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271034164 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271034164 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271034164" @@ -158,8 +158,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271034177 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271034177 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271034177" @@ -173,8 +173,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271066849 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271066849 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271066849" @@ -188,8 +188,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271098740 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271098740 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271098740" @@ -203,8 +203,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271355000 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271355000 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271355000" @@ -218,8 +218,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271639345" @@ -233,8 +233,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271914117" @@ -248,8 +248,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271924504 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271924504 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271924504" @@ -263,8 +263,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272086055 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272086055 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272086055" @@ -278,8 +278,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272127855 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272127855" @@ -293,8 +293,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272191137 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272191137" @@ -308,8 +308,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272468271 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272468271" @@ -323,8 +323,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270637436" @@ -338,8 +338,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270644941 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270644941" @@ -353,8 +353,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270650576 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270650576 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270650576" @@ -368,8 +368,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270652906 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270652906 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270652906" @@ -383,8 +383,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270660650 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270660650 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270660650" @@ -398,8 +398,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270670201 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270670201" @@ -413,8 +413,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270707364 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270707364 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270707364" @@ -428,8 +428,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271365691" @@ -443,8 +443,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271799956 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271799956" @@ -458,8 +458,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271914117" @@ -473,8 +473,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270637436" @@ -488,8 +488,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271799956 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271799956 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271799956" @@ -503,8 +503,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270637436 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270637436 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270637436" @@ -518,8 +518,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271639345" @@ -533,8 +533,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270644941 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270644941 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270644941" @@ -548,8 +548,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270649256 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270649256 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270649256" @@ -563,8 +563,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270653671 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270653671 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270653671" @@ -578,8 +578,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270670201 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270670201 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270670201" @@ -593,8 +593,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270717223 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270717223 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270717223" @@ -608,8 +608,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270720898 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270720898 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270720898" @@ -623,8 +623,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270982590" @@ -638,8 +638,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271346411 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271346411 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271346411" @@ -653,8 +653,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271352121 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271352121 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271352121" @@ -668,8 +668,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271354908 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271354908 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271354908" @@ -683,8 +683,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271365691" @@ -698,8 +698,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271367516 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271367516 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271367516" @@ -713,8 +713,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271472522 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271472522 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271472522" @@ -728,8 +728,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271607757" @@ -743,8 +743,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271639345 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271639345 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271639345" @@ -758,8 +758,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271821733 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271821733 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271821733" @@ -773,8 +773,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271914117 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271914117 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271914117" @@ -788,8 +788,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272068709 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272068709 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272068709" @@ -803,8 +803,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272127855 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272127855 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272127855" @@ -818,8 +818,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272191137 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272191137 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272191137" @@ -833,8 +833,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272244005 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272244005" @@ -848,8 +848,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272468271 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272468271 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272468271" @@ -863,8 +863,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270982590" @@ -878,8 +878,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271365691" @@ -893,8 +893,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271607757" @@ -908,8 +908,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1270982590 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1270982590 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1270982590" @@ -923,8 +923,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271365691 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271365691 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271365691" @@ -938,8 +938,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1271607757 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1271607757 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1271607757" @@ -953,8 +953,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select content, user_id, weight_string(content), weight_string(user_id) from music where 1 != 1", - "Query": "select distinct content, user_id, weight_string(content), weight_string(user_id) from music where user_id = 1272244005 order by created_at asc, id asc limit 11", + "FieldQuery": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select content, user_id from music where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as content, dt.c1 as user_id, weight_string(dt.c0), weight_string(dt.c1) from (select distinct content, user_id from music where user_id = 1272244005 order by created_at asc, id asc limit 11) as dt(c0, c1)", "Table": "music", "Values": [ "1272244005" diff --git a/go/vt/vtgate/planbuilder/testdata/lock_cases.json b/go/vt/vtgate/planbuilder/testdata/lock_cases.json index c14ba026869..2490424a1ec 100644 --- a/go/vt/vtgate/planbuilder/testdata/lock_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/lock_cases.json @@ -97,7 +97,7 @@ "QueryType": "UNLOCK_TABLES", "Original": "unlock tables", "Instructions": { - "OperatorType": "Rows" + "OperatorType": "UnlockTables" } } }, @@ -124,5 +124,95 @@ "main.dual" ] } + }, + { + "comment": "select nowait", + "query": "select u.col, u.bar from user u join music m on u.foo = m.foo for update nowait", + "plan": { + "QueryType": "SELECT", + "Original": "select u.col, u.bar from user u join music m on u.foo = m.foo for update nowait", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1", + "JoinVars": { + "u_foo": 2 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.col, u.bar, u.foo from `user` as u where 1 != 1", + "Query": "select u.col, u.bar, u.foo from `user` as u for update nowait", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.foo = :u_foo for update nowait", + "Table": "music" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } + }, + { + "comment": "select skip locked", + "query": "select u.col, u.bar from user u join music m on u.foo = m.foo for share skip locked", + "plan": { + "QueryType": "SELECT", + "Original": "select u.col, u.bar from user u join music m on u.foo = m.foo for share skip locked", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1", + "JoinVars": { + "u_foo": 2 + }, + "TableName": "`user`_music", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.col, u.bar, u.foo from `user` as u where 1 != 1", + "Query": "select u.col, u.bar, u.foo from `user` as u for share skip locked", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as m where 1 != 1", + "Query": "select 1 from music as m where m.foo = :u_foo for share skip locked", + "Table": "music" + } + ] + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json index cc09c95282f..34f198abb96 100644 --- a/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/memory_sort_cases.json @@ -24,9 +24,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)", + "FieldQuery": "select a, b, count(*), weight_string(a), weight_string(`user`.b) from `user` where 1 != 1 group by a, weight_string(a)", "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*), weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by a asc", + "Query": "select a, b, count(*), weight_string(a), weight_string(`user`.b) from `user` group by a, weight_string(a) order by a asc", "Table": "`user`" } ] @@ -102,9 +102,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, b, count(*) as k, weight_string(a), weight_string(b) from `user` where 1 != 1 group by a, weight_string(a)", + "FieldQuery": "select a, b, count(*) as k, weight_string(a), weight_string(`user`.b) from `user` where 1 != 1 group by a, weight_string(a)", "OrderBy": "(0|3) ASC", - "Query": "select a, b, count(*) as k, weight_string(a), weight_string(b) from `user` group by a, weight_string(a) order by a asc", + "Query": "select a, b, count(*) as k, weight_string(a), weight_string(`user`.b) from `user` group by a, weight_string(a) order by a asc", "Table": "`user`" } ] @@ -257,9 +257,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from (select `user`.id, `user`.col from `user`) as t order by id asc", + "FieldQuery": "select t.id, t.col, weight_string(t.id) from (select `user`.id, `user`.col from `user` where 1 != 1) as t where 1 != 1", + "OrderBy": "(0|2) ASC", + "Query": "select t.id, t.col, weight_string(t.id) from (select `user`.id, `user`.col from `user`) as t order by t.id asc", "Table": "`user`" }, { @@ -552,9 +552,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` where 1 != 1", + "FieldQuery": "select a, convert(`user`.a, binary), weight_string(convert(`user`.a, binary)) from `user` where 1 != 1", "OrderBy": "(1|2) DESC", - "Query": "select a, convert(a, binary), weight_string(convert(a, binary)) from `user` order by convert(a, binary) desc", + "Query": "select a, convert(`user`.a, binary), weight_string(convert(`user`.a, binary)) from `user` order by convert(`user`.a, binary) desc", "ResultColumns": 1, "Table": "`user`" }, @@ -585,9 +585,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u where 1 != 1", + "FieldQuery": "select u.a, convert(u.a, binary), weight_string(convert(u.a, binary)) from `user` as u where 1 != 1", "OrderBy": "(1|2) DESC", - "Query": "select u.a, convert(a, binary), weight_string(convert(a, binary)) from `user` as u order by convert(a, binary) desc", + "Query": "select u.a, convert(u.a, binary), weight_string(convert(u.a, binary)) from `user` as u order by convert(u.a, binary) desc", "Table": "`user`" }, { @@ -624,7 +624,7 @@ }, "FieldQuery": "select id, intcol from `user` where 1 != 1", "OrderBy": "1 ASC", - "Query": "select id, intcol from `user` order by intcol asc", + "Query": "select id, intcol from `user` order by `user`.intcol asc", "Table": "`user`" }, "TablesUsed": [ @@ -655,5 +655,55 @@ "user.user" ] } + }, + { + "comment": "Derived table split across two shards, and ordered by both", + "query": "select * from (select u.foo, ue.bar from user u, user_extra ue) tbl order by tbl.bar, tbl.foo", + "plan": { + "QueryType": "SELECT", + "Original": "select * from (select u.foo, ue.bar from user u, user_extra ue) tbl order by tbl.bar, tbl.foo", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|2) ASC, (0|3) ASC", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1,L:1", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select tbl.foo, weight_string(tbl.foo) from (select u.foo from `user` as u where 1 != 1) as tbl where 1 != 1", + "Query": "select tbl.foo, weight_string(tbl.foo) from (select u.foo from `user` as u) as tbl", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select tbl.bar, weight_string(tbl.bar) from (select ue.bar from user_extra as ue where 1 != 1) as tbl where 1 != 1", + "Query": "select tbl.bar, weight_string(tbl.bar) from (select ue.bar from user_extra as ue) as tbl", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/oltp_cases.json b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json index 3af909415f9..45f1ac8c618 100644 --- a/go/vt/vtgate/planbuilder/testdata/oltp_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/oltp_cases.json @@ -91,7 +91,7 @@ }, "FieldQuery": "select c from sbtest1 where 1 != 1", "OrderBy": "0 ASC COLLATE latin1_swedish_ci", - "Query": "select c from sbtest1 where id between 50 and 235 order by c asc", + "Query": "select c from sbtest1 where id between 50 and 235 order by sbtest1.c asc", "Table": "sbtest1" }, "TablesUsed": [ @@ -119,7 +119,7 @@ }, "FieldQuery": "select c from sbtest30 where 1 != 1 group by c", "OrderBy": "0 ASC COLLATE latin1_swedish_ci", - "Query": "select c from sbtest30 where id between 1 and 10 group by c order by c asc", + "Query": "select c from sbtest30 where id between 1 and 10 group by c order by sbtest30.c asc", "Table": "sbtest30" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/other_read_cases.json b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json index 92c8d132eda..2258bcd768c 100644 --- a/go/vt/vtgate/planbuilder/testdata/other_read_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/other_read_cases.json @@ -14,22 +14,10 @@ "TargetDestination": "AnyShard()", "Query": "explain select * from `user`", "SingleShardOnly": true - } - } - }, - { - "comment": "Explain Vitess statement", - "query": "explain format=vitess select * from user", - "plan": { - "QueryType": "EXPLAIN", - "Original": "explain format=vitess select * from user", - "Instructions": { - "OperatorType": "Rows", - "Fields": { - "JSON": "VARCHAR" - }, - "RowCount": 1 - } + }, + "TablesUsed": [ + "main.user" + ] } }, { @@ -67,7 +55,10 @@ "TargetDestination": "AnyShard()", "Query": "explain select * from t", "SingleShardOnly": true - } + }, + "TablesUsed": [ + "main.t" + ] } }, { @@ -85,7 +76,96 @@ "TargetDestination": "AnyShard()", "Query": "explain select * from t", "SingleShardOnly": true - } + }, + "TablesUsed": [ + "main.t" + ] + } + }, + { + "comment": "explain - routed table with same name", + "query": "explain select 1, second_user.user.id from second_user.user", + "plan": { + "QueryType": "EXPLAIN", + "Original": "explain select 1, second_user.user.id from second_user.user", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AnyShard()", + "Query": "explain select 1, `user`.id from `user`", + "SingleShardOnly": true + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "explain - routed table with different name", + "query": "explain select 1, second_user.foo.id, foo.col from second_user.foo", + "plan": { + "QueryType": "EXPLAIN", + "Original": "explain select 1, second_user.foo.id, foo.col from second_user.foo", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AnyShard()", + "Query": "explain select 1, foo.id, foo.col from `user` as foo", + "SingleShardOnly": true + }, + "TablesUsed": [ + "user.foo" + ] + } + }, + { + "comment": "explain - routed table with join on different table on routed keyspace", + "query": "explain select 1, second_user.foo.id, foo.col from second_user.foo join user.user join user.music", + "plan": { + "QueryType": "EXPLAIN", + "Original": "explain select 1, second_user.foo.id, foo.col from second_user.foo join user.user join user.music", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetDestination": "AnyShard()", + "Query": "explain select 1, foo.id, foo.col from `user` as foo join `user` join music", + "SingleShardOnly": true + }, + "TablesUsed": [ + "user.foo", + "user.user", + "user.music" + ] + } + }, + { + "comment": "describe info_schema table", + "query": "describe information_schema.administrable_role_authorizations", + "plan": { + "QueryType": "EXPLAIN", + "Original": "describe information_schema.administrable_role_authorizations", + "Instructions": { + "OperatorType": "Send", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetDestination": "AnyShard()", + "Query": "explain information_schema.administrable_role_authorizations", + "SingleShardOnly": true + }, + "TablesUsed": [ + "main.administrable_role_authorizations" + ] } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json index cad8e9be1eb..96a92d5894d 100644 --- a/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/postprocess_cases.json @@ -88,7 +88,7 @@ "Sharded": true }, "FieldQuery": "select `user`.col1 as a, `user`.col2 from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2 from `user` where `user`.col1 = 1 and `user`.col1 = `user`.col2 and 1 = 1", + "Query": "select `user`.col1 as a, `user`.col2 from `user` where `user`.col1 = 1 and `user`.col1 = `user`.col2", "Table": "`user`" }, { @@ -99,7 +99,7 @@ "Sharded": true }, "FieldQuery": "select user_extra.col3 from user_extra where 1 != 1", - "Query": "select user_extra.col3 from user_extra where user_extra.col3 = 1 and 1 = 1", + "Query": "select user_extra.col3 from user_extra where user_extra.col3 = 1", "Table": "user_extra" } ] @@ -145,7 +145,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` where :__sq_has_values and id in ::__vals", + "Query": "select id from `user` where :__sq_has_values and `user`.id in ::__vals", "Table": "`user`", "Values": [ "::__sq1" @@ -226,7 +226,7 @@ }, "FieldQuery": "select col from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select col from `user` order by col asc", + "Query": "select col from `user` order by `user`.col asc", "Table": "`user`" }, "TablesUsed": [ @@ -249,7 +249,7 @@ }, "FieldQuery": "select user_id, col1, col2, weight_string(user_id) from authoritative where 1 != 1", "OrderBy": "(0|3) ASC", - "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by user_id asc", + "Query": "select user_id, col1, col2, weight_string(user_id) from authoritative order by authoritative.user_id asc", "ResultColumns": 3, "Table": "authoritative" }, @@ -258,6 +258,69 @@ ] } }, + { + "comment": "Test that LIMIT can be pushed to the route even in the presence of an outer join", + "query": "SELECT user_extra.`id` FROM user LEFT JOIN user_extra ON user_extra.`b` = 2 AND user.`c` = user_extra.`c` WHERE user.`a` = 1 LIMIT 1", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT user_extra.`id` FROM user LEFT JOIN user_extra ON user_extra.`b` = 2 AND user.`c` = user_extra.`c` WHERE user.`a` = 1 LIMIT 1", + "Instructions": { + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "user_c": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.c from `user` where 1 != 1", + "Query": "select `user`.c from `user` where `user`.a = 1 limit 1", + "Table": "`user`" + } + ] + }, + { + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.id from user_extra where 1 != 1", + "Query": "select user_extra.id from user_extra where user_extra.c = :user_c and user_extra.b = 2 limit 1", + "Table": "user_extra" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, { "comment": "ORDER BY works for select * from authoritative table", "query": "select * from authoritative order by col1", @@ -273,7 +336,7 @@ }, "FieldQuery": "select user_id, col1, col2 from authoritative where 1 != 1", "OrderBy": "1 ASC COLLATE latin1_swedish_ci", - "Query": "select user_id, col1, col2 from authoritative order by col1 asc", + "Query": "select user_id, col1, col2 from authoritative order by authoritative.col1 asc", "Table": "authoritative" }, "TablesUsed": [ @@ -296,7 +359,7 @@ }, "FieldQuery": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1", "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC", - "Query": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc", + "Query": "select a, textcol1, b, weight_string(a), weight_string(b) from `user` order by `user`.a asc, `user`.textcol1 asc, `user`.b asc", "ResultColumns": 3, "Table": "`user`" }, @@ -320,7 +383,7 @@ }, "FieldQuery": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` where 1 != 1", "OrderBy": "(0|3) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|4) ASC", - "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc", + "Query": "select a, `user`.textcol1, b, weight_string(a), weight_string(b) from `user` order by `user`.a asc, `user`.textcol1 asc, `user`.b asc", "ResultColumns": 3, "Table": "`user`" }, @@ -342,9 +405,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` where 1 != 1", - "OrderBy": "(0|4) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|5) ASC, 3 ASC COLLATE latin1_swedish_ci", - "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b) from `user` order by a asc, textcol1 asc, b asc, textcol2 asc", + "FieldQuery": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b), weight_string(textcol2) from `user` where 1 != 1", + "OrderBy": "(0|4) ASC, 1 ASC COLLATE latin1_swedish_ci, (2|5) ASC, (3|6) ASC COLLATE ", + "Query": "select a, textcol1, b, textcol2, weight_string(a), weight_string(b), weight_string(textcol2) from `user` order by `user`.a asc, `user`.textcol1 asc, `user`.b asc, `user`.textcol2 asc", "ResultColumns": 4, "Table": "`user`" }, @@ -373,7 +436,7 @@ }, "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select id as foo, weight_string(id) from music order by foo asc", + "Query": "select id as foo, weight_string(id) from music order by id asc", "ResultColumns": 1, "Table": "music" }, @@ -440,7 +503,7 @@ }, "FieldQuery": "select col from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select col from `user` where :__sq_has_values and col in ::__sq1 order by col asc", + "Query": "select col from `user` where :__sq_has_values and col in ::__sq1 order by `user`.col asc", "Table": "`user`" } ] @@ -526,7 +589,7 @@ "Sharded": true }, "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc", + "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by `user`.col1 asc", "Table": "`user`", "Values": [ "1" @@ -579,7 +642,7 @@ "Sharded": true }, "FieldQuery": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where 1 != 1", - "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by a asc", + "Query": "select `user`.col1 as a, `user`.col2, `user`.id from `user` where `user`.id = 1 order by `user`.col1 asc", "Table": "`user`", "Values": [ "1" @@ -1079,7 +1142,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` as route1 where 1 != 1", - "Query": "select col from `user` as route1 where id = 1 order by col asc", + "Query": "select col from `user` as route1 where id = 1 order by route1.col asc", "Table": "`user`", "Values": [ "1" @@ -1145,15 +1208,21 @@ "Table": "`user`" }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra limit 1", + "Table": "user_extra" + } + ] } ] } @@ -1183,7 +1252,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", + "Query": "select col from `user` limit 1", "Table": "`user`" } ] @@ -1211,7 +1280,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", + "Query": "select col from `user` limit :a", "Table": "`user`" } ] @@ -1239,7 +1308,7 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit :__upper_limit", + "Query": "select * from `user` where id1 = 4 and name1 = 'abc' limit 5", "Table": "`user`" } ] @@ -1340,7 +1409,7 @@ "Sharded": true }, "FieldQuery": "select id from `user` where 1 != 1", - "Query": "select id from `user` limit :__upper_limit", + "Query": "select id from `user` limit 1 + 1", "Table": "`user`" } ] @@ -1365,7 +1434,7 @@ }, "FieldQuery": "select id as foo, weight_string(id) from music where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select id as foo, weight_string(id) from music order by foo asc", + "Query": "select id as foo, weight_string(id) from music order by music.id asc", "ResultColumns": 1, "Table": "music" }, @@ -1389,7 +1458,7 @@ }, "FieldQuery": "select id as foo, id2 as id, weight_string(id2) from music where 1 != 1", "OrderBy": "(1|2) ASC", - "Query": "select id as foo, id2 as id, weight_string(id2) from music order by id asc", + "Query": "select id as foo, id2 as id, weight_string(id2) from music order by music.id2 asc", "ResultColumns": 2, "Table": "music" }, @@ -1419,7 +1488,7 @@ }, "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc", + "Query": "select `name`, weight_string(`name`) from `user` order by `user`.`name` asc", "Table": "`user`" }, { @@ -1585,42 +1654,33 @@ "QueryType": "SELECT", "Original": "select name, name from user, music order by name", "Instructions": { - "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 0 - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:0", + "TableName": "`user`_music", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select `name`, weight_string(`name`) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select `name`, weight_string(`name`) from `user` order by `name` asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music where 1 != 1", - "Query": "select 1 from music", - "Table": "music" - } - ] + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, `name`, weight_string(`name`) from `user` where 1 != 1", + "OrderBy": "(0|2) ASC", + "Query": "select `name`, `name`, weight_string(`name`) from `user` order by `user`.`name` asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music where 1 != 1", + "Query": "select 1 from music", + "Table": "music" } ] }, @@ -1645,7 +1705,7 @@ }, "FieldQuery": "select id, id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|2) ASC", - "Query": "select id, id, weight_string(id) from `user` order by id asc", + "Query": "select id, id, weight_string(id) from `user` order by `user`.id asc", "ResultColumns": 2, "Table": "`user`" }, @@ -1902,9 +1962,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col1 as a, a collate utf8_general_ci, weight_string(a collate utf8_general_ci) from `user` where 1 != 1", + "FieldQuery": "select `user`.col1 as a, `user`.col1 collate utf8_general_ci, weight_string(`user`.col1 collate utf8_general_ci) from `user` where 1 != 1", "OrderBy": "(1|2) ASC", - "Query": "select `user`.col1 as a, a collate utf8_general_ci, weight_string(a collate utf8_general_ci) from `user` order by a collate utf8_general_ci asc", + "Query": "select `user`.col1 as a, `user`.col1 collate utf8_general_ci, weight_string(`user`.col1 collate utf8_general_ci) from `user` order by `user`.col1 collate utf8_general_ci asc", "ResultColumns": 1, "Table": "`user`" }, @@ -1950,9 +2010,9 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.col1 as a, a collate utf8_general_ci, weight_string(a collate utf8_general_ci) from `user` where 1 != 1", + "FieldQuery": "select `user`.col1 as a, `user`.col1 collate utf8_general_ci, weight_string(`user`.col1 collate utf8_general_ci) from `user` where 1 != 1", "OrderBy": "(1|2) ASC", - "Query": "select `user`.col1 as a, a collate utf8_general_ci, weight_string(a collate utf8_general_ci) from `user` order by a collate utf8_general_ci asc", + "Query": "select `user`.col1 as a, `user`.col1 collate utf8_general_ci, weight_string(`user`.col1 collate utf8_general_ci) from `user` order by `user`.col1 collate utf8_general_ci asc", "ResultColumns": 1, "Table": "`user`" }, @@ -2021,54 +2081,47 @@ "QueryType": "SELECT", "Original": "select a.tcol1 from user a join music b where a.tcol1 = b.tcol2 group by a.tcol1 having repeat(a.tcol1,min(a.id)) like \"A\\%B\" order by a.tcol1", "Instructions": { - "OperatorType": "Sort", - "Variant": "Memory", - "OrderBy": "(0|2) ASC", + "OperatorType": "Filter", + "Predicate": "repeat(a.tcol1, min(a.id)) like 'A\\%B'", "ResultColumns": 1, "Inputs": [ { - "OperatorType": "Filter", - "Predicate": "repeat(a.tcol1, min(a.id)) like 'A\\%B'", + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "min(1|3) AS min(a.id)", + "GroupBy": "(0|2)", "Inputs": [ { - "OperatorType": "Aggregate", - "Variant": "Ordered", - "Aggregates": "min(1|3) AS min(a.id)", - "GroupBy": "(0|2)", + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:1,L:0,L:2,L:3", + "JoinVars": { + "a_tcol1": 1 + }, + "TableName": "`user`_music", "Inputs": [ { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:1,L:0,L:2,L:3", - "JoinVars": { - "a_tcol1": 1 + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true }, - "TableName": "`user`_music", - "Inputs": [ - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select min(a.id), a.tcol1, weight_string(a.tcol1), weight_string(a.id) from `user` as a where 1 != 1 group by a.tcol1, weight_string(a.tcol1), weight_string(a.id)", - "OrderBy": "(1|2) ASC", - "Query": "select min(a.id), a.tcol1, weight_string(a.tcol1), weight_string(a.id) from `user` as a group by a.tcol1, weight_string(a.tcol1), weight_string(a.id) order by a.tcol1 asc", - "Table": "`user`" - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from music as b where 1 != 1 group by .0", - "Query": "select 1 from music as b where b.tcol2 = :a_tcol1 group by .0", - "Table": "music" - } - ] + "FieldQuery": "select min(a.id), a.tcol1, weight_string(a.tcol1), weight_string(a.id) from `user` as a where 1 != 1 group by a.tcol1, weight_string(a.tcol1), weight_string(a.id)", + "OrderBy": "(1|2) ASC", + "Query": "select min(a.id), a.tcol1, weight_string(a.tcol1), weight_string(a.id) from `user` as a group by a.tcol1, weight_string(a.tcol1), weight_string(a.id) order by a.tcol1 asc", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from music as b where 1 != 1 group by .0", + "Query": "select 1 from music as b where b.tcol2 = :a_tcol1 group by .0", + "Table": "music" } ] } @@ -2102,7 +2155,7 @@ }, "FieldQuery": "select col from `user` where 1 != 1 group by col", "OrderBy": "0 ASC", - "Query": "select col from `user` where id between :vtg1 and :vtg2 group by col order by col asc", + "Query": "select col from `user` where id between :vtg1 and :vtg2 group by col order by `user`.col asc", "Table": "`user`" } ] @@ -2133,7 +2186,7 @@ }, "FieldQuery": "select foo, col, weight_string(foo) from `user` where 1 != 1 group by col, foo, weight_string(foo)", "OrderBy": "1 ASC, (0|2) ASC", - "Query": "select foo, col, weight_string(foo) from `user` where id between :vtg1 and :vtg2 group by col, foo, weight_string(foo) order by col asc, foo asc", + "Query": "select foo, col, weight_string(foo) from `user` where id between :vtg1 and :vtg2 group by col, foo, weight_string(foo) order by `user`.col asc, foo asc", "Table": "`user`" } ] @@ -2181,5 +2234,200 @@ "user.user" ] } + }, + { + "comment": "DISTINCT on an unsupported collation should fall back on weightstrings", + "query": "select distinct textcol2 from user", + "plan": { + "QueryType": "SELECT", + "Original": "select distinct textcol2 from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:1): " + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select textcol2, weight_string(textcol2) from `user` where 1 != 1", + "Query": "select distinct textcol2, weight_string(textcol2) from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "No weightstrings or derived table needed", + "query": "select textcol1 from user union select textcol1 from user", + "plan": { + "QueryType": "SELECT", + "Original": "select textcol1 from user union select textcol1 from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "0: latin1_swedish_ci" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select textcol1 from `user` where 1 != 1 union select textcol1 from `user` where 1 != 1", + "Query": "select textcol1 from `user` union select textcol1 from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "ORDER BY literal works fine even when the columns have the same name", + "query": "select a.id, b.id from user as a, user_extra as b union all select 1, 2 order by 1", + "plan": { + "QueryType": "SELECT", + "Original": "select a.id, b.id from user as a, user_extra as b union all select 1, 2 order by 1", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|2) ASC", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select a.id, weight_string(a.id) from `user` as a where 1 != 1", + "Query": "select a.id, weight_string(a.id) from `user` as a", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select b.id from user_extra as b where 1 != 1", + "Query": "select b.id from user_extra as b", + "Table": "user_extra" + } + ] + }, + { + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select dt.c0 as `1`, dt.c1 as `2`, weight_string(dt.c0) from (select 1, 2 from dual where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as `1`, dt.c1 as `2`, weight_string(dt.c0) from (select 1, 2 from dual) as dt(c0, c1)", + "Table": "dual" + } + ] + } + ] + }, + "TablesUsed": [ + "main.dual", + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "ORDER BY literal works fine even when the columns have the same name", + "query": "select a.id, b.id from user as a, user_extra as b union all select 1, 2 order by 2", + "plan": { + "QueryType": "SELECT", + "Original": "select a.id, b.id from user as a, user_extra as b union all select 1, 2 order by 2", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(1|2) ASC", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,R:1", + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select a.id from `user` as a where 1 != 1", + "Query": "select a.id from `user` as a", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select b.id, weight_string(b.id) from user_extra as b where 1 != 1", + "Query": "select b.id, weight_string(b.id) from user_extra as b", + "Table": "user_extra" + } + ] + }, + { + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select dt.c0 as `1`, dt.c1 as `2`, weight_string(dt.c1) from (select 1, 2 from dual where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as `1`, dt.c1 as `2`, weight_string(dt.c1) from (select 1, 2 from dual) as dt(c0, c1)", + "Table": "dual" + } + ] + } + ] + }, + "TablesUsed": [ + "main.dual", + "user.user", + "user.user_extra" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/rails_cases.json b/go/vt/vtgate/planbuilder/testdata/rails_cases.json index ef36b79c855..c8ab8b7b9d8 100644 --- a/go/vt/vtgate/planbuilder/testdata/rails_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/rails_cases.json @@ -50,8 +50,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at, book6s.supplier5_id, book6s.id from author5s, book6s where 1 != 1", - "Query": "select author5s.id as id, author5s.`name` as `name`, author5s.created_at as created_at, author5s.updated_at as updated_at, book6s.supplier5_id, book6s.id from author5s, book6s where book6s.author5_id = author5s.id", + "FieldQuery": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s, book6s where 1 != 1", + "Query": "select author5s.id, author5s.`name`, author5s.created_at, author5s.updated_at, book6s.supplier5_id, book6s.id from author5s, book6s where book6s.author5_id = author5s.id", "Table": "author5s, book6s" }, { diff --git a/go/vt/vtgate/planbuilder/testdata/reference_cases.json b/go/vt/vtgate/planbuilder/testdata/reference_cases.json index 42240ce56c7..a89fa103923 100644 --- a/go/vt/vtgate/planbuilder/testdata/reference_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/reference_cases.json @@ -161,6 +161,50 @@ ] } }, + { + "comment": "Reference tables using left join with a derived table having a limit clause", + "query": "SELECT u.id FROM ( SELECT a.id, a.u_id FROM user.ref_with_source AS a WHERE a.id IN (3) ORDER BY a.d_at LIMIT 1) as u LEFT JOIN user.ref_with_source AS u0 ON u.u_id = u0.u_uid ORDER BY u.id", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT u.id FROM ( SELECT a.id, a.u_id FROM user.ref_with_source AS a WHERE a.id IN (3) ORDER BY a.d_at LIMIT 1) as u LEFT JOIN user.ref_with_source AS u0 ON u.u_id = u0.u_uid ORDER BY u.id", + "Instructions": { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0", + "JoinVars": { + "u_u_id": 1 + }, + "TableName": "ref_with_source_ref_with_source", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select u.id, u.u_id from (select a.id, a.u_id from ref_with_source as a where 1 != 1) as u where 1 != 1", + "Query": "select u.id, u.u_id from (select a.id, a.u_id from ref_with_source as a where a.id in (3) order by a.d_at asc limit 1) as u order by u.id asc", + "Table": "ref_with_source" + }, + { + "OperatorType": "Route", + "Variant": "Reference", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from ref_with_source as u0 where 1 != 1", + "Query": "select 1 from ref_with_source as u0 where u0.u_uid = :u_u_id", + "Table": "ref_with_source" + } + ] + }, + "TablesUsed": [ + "user.ref_with_source" + ] + } + }, { "comment": "insert into qualified ambiguous reference table routes to source", "query": "insert into user.ambiguous_ref_with_source(col) values(1)", @@ -384,5 +428,323 @@ "main.global_ref" ] } + }, + { + "comment": "delete from reference table with another name - query send to source table", + "query": "delete from user.ref_with_source where col = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user.ref_with_source where col = 1", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from source_of_ref where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "update from reference table with another name - query send to source table", + "query": "update user.ref_with_source set x = 4 where col = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update user.ref_with_source set x = 4 where col = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update source_of_ref set x = 4 where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "insert from reference table with another name - query send to source table", + "query": "insert into user.ref_with_source(x) values(4)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into user.ref_with_source(x) values(4)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into source_of_ref(x) values (4)", + "TableName": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "delete from reference table - query send to source table", + "query": "delete from source_of_ref where col = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from source_of_ref where col = 1", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from source_of_ref where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "update from reference table - query send to source table", + "query": "update source_of_ref set x = 4 where col = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update source_of_ref set x = 4 where col = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update source_of_ref set x = 4 where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "insert from reference table - query send to source table", + "query": "insert into source_of_ref(x) values(4)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into source_of_ref(x) values(4)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into source_of_ref(x) values (4)", + "TableName": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "delete from reference table qualified with unsharded - query send to source table", + "query": "delete from main.source_of_ref where col = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from main.source_of_ref where col = 1", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from source_of_ref where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "update from reference table qualified with unsharded - query send to source table", + "query": "update main.source_of_ref set x = 4 where col = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update main.source_of_ref set x = 4 where col = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update source_of_ref set x = 4 where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "insert from reference table qualified with unsharded - query send to source table", + "query": "insert into main.source_of_ref(x) values(4)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into main.source_of_ref(x) values(4)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into source_of_ref(x) values (4)", + "TableName": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "delete from reference table with another name - query send to source table", + "query": "delete from user.ref_with_source where col = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user.ref_with_source where col = 1", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from source_of_ref where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "update from reference table with another name - query send to source table", + "query": "update user.ref_with_source set x = 4 where col = 1", + "plan": { + "QueryType": "UPDATE", + "Original": "update user.ref_with_source set x = 4 where col = 1", + "Instructions": { + "OperatorType": "Update", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "update source_of_ref set x = 4 where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "insert from reference table with another name - query send to source table", + "query": "insert into user.ref_with_source(x) values(4)", + "plan": { + "QueryType": "INSERT", + "Original": "insert into user.ref_with_source(x) values(4)", + "Instructions": { + "OperatorType": "Insert", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "insert into source_of_ref(x) values (4)", + "TableName": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "select with join to reference table in sharded keyspace: should route shard-scoped", + "query": "select * from user.ref_with_source ref, `user`.`user` u where ref.id = u.ref_id and u.id = 2", + "plan": { + "QueryType": "SELECT", + "Original": "select * from user.ref_with_source ref, `user`.`user` u where ref.id = u.ref_id and u.id = 2", + "Instructions": { + "FieldQuery": "select * from ref_with_source as ref, `user` as u where 1 != 1", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Vindex": "user_index", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "select * from ref_with_source as ref, `user` as u where u.id = 2 and ref.id = u.ref_id", + "Table": "`user`, ref_with_source", + "Values": [ + "2" + ] + }, + "TablesUsed": [ + "user.ref_with_source", + "user.user" + ] + } + }, + { + "comment": "select with join to reference table in unsharded keyspace: should route shard-scoped", + "query": "select * from source_of_ref ref, `user`.`user` u where ref.id = u.ref_id and u.id = 2", + "plan": { + "QueryType": "SELECT", + "Original": "select * from source_of_ref ref, `user`.`user` u where ref.id = u.ref_id and u.id = 2", + "Instructions": { + "FieldQuery": "select * from ref_with_source as ref, `user` as u where 1 != 1", + "OperatorType": "Route", + "Variant": "EqualUnique", + "Vindex": "user_index", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "select * from ref_with_source as ref, `user` as u where u.id = 2 and ref.id = u.ref_id", + "Table": "`user`, ref_with_source", + "Values": [ + "2" + ] + }, + "TablesUsed": [ + "user.ref_with_source", + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/select_cases.json b/go/vt/vtgate/planbuilder/testdata/select_cases.json index f26cfc4f065..c6a91350d89 100644 --- a/go/vt/vtgate/planbuilder/testdata/select_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/select_cases.json @@ -136,7 +136,7 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit :__upper_limit", + "Query": "select /*vt+ QUERY_TIMEOUT_MS=1000 */ * from `user` limit 10", "QueryTimeout": 1000, "Table": "`user`" } @@ -271,7 +271,7 @@ "Sharded": true }, "FieldQuery": "select * from `user` where 1 != 1", - "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit :__upper_limit", + "Query": "select /*vt+ SCATTER_ERRORS_AS_WARNINGS=1 */ * from `user` limit 10", "ScatterErrorsAsWarnings": true, "Table": "`user`" } @@ -361,8 +361,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where 1 != 1", - "Query": "select a.user_id as user_id, a.col1 as col1, a.col2 as col2, b.user_id as user_id, b.col1 as col1, b.col2 as col2 from authoritative as a, authoritative as b where a.user_id = b.user_id", + "FieldQuery": "select a.user_id, a.col1, a.col2, b.user_id, b.col1, b.col2 from authoritative as a, authoritative as b where 1 != 1", + "Query": "select a.user_id, a.col1, a.col2, b.user_id, b.col1, b.col2 from authoritative as a, authoritative as b where a.user_id = b.user_id", "Table": "authoritative" }, "TablesUsed": [ @@ -433,8 +433,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where 1 != 1", - "Query": "select `user`.id, a.user_id as user_id, a.col1 as col1, a.col2 as col2, `user`.col1 from authoritative as a, `user` where a.user_id = `user`.id", + "FieldQuery": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a, `user` where 1 != 1", + "Query": "select `user`.id, a.user_id, a.col1, a.col2, `user`.col1 from authoritative as a, `user` where a.user_id = `user`.id", "Table": "`user`, authoritative" }, "TablesUsed": [ @@ -980,6 +980,84 @@ ] } }, + { + "comment": "Selection but explicitly ignore a vindex", + "query": "select * from user ignore vindex (user_index) where id = 1", + "plan": { + "QueryType": "SELECT", + "Original": "select * from user ignore vindex (user_index) where id = 1", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from `user` where 1 != 1", + "Query": "select * from `user` where id = 1", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Selection but make the planner explicitly use a vindex", + "query": "select intcol, id from user use vindex (name_user_map) where costly = 'aa' and name = 'bb' and id = 3", + "plan": { + "QueryType": "SELECT", + "Original": "select intcol, id from user use vindex (name_user_map) where costly = 'aa' and name = 'bb' and id = 3", + "Instructions": { + "OperatorType": "VindexLookup", + "Variant": "Equal", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Values": [ + "'bb'" + ], + "Vindex": "name_user_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name`, keyspace_id from name_user_vdx where 1 != 1", + "Query": "select `name`, keyspace_id from name_user_vdx where `name` in ::__vals", + "Table": "name_user_vdx", + "Values": [ + "::name" + ], + "Vindex": "user_index" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select intcol, id from `user` where 1 != 1", + "Query": "select intcol, id from `user` where costly = 'aa' and `name` = 'bb' and id = 3", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Vindex hint on a non-existing vindex", + "query": "select * from user use vindex (does_not_exist) where id = 1", + "plan": "VT09021: Vindex 'does_not_exist' does not exist in table 'user.user'" + }, { "comment": "sharded limit offset", "query": "select user_id from music order by user_id limit 10, 20", @@ -1000,7 +1078,38 @@ }, "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select user_id, weight_string(user_id) from music order by user_id asc limit :__upper_limit", + "Query": "select user_id, weight_string(user_id) from music order by music.user_id asc limit 30", + "ResultColumns": 1, + "Table": "music" + } + ] + }, + "TablesUsed": [ + "user.music" + ] + } + }, + { + "comment": "sharded limit offset with arguments", + "query": "select user_id from music order by user_id limit :limit, :offset", + "plan": { + "QueryType": "SELECT", + "Original": "select user_id from music order by user_id limit :limit, :offset", + "Instructions": { + "OperatorType": "Limit", + "Count": ":offset", + "Offset": ":limit", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_id, weight_string(user_id) from music where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select user_id, weight_string(user_id) from music order by music.user_id asc limit :__upper_limit", "ResultColumns": 1, "Table": "music" } @@ -1129,7 +1238,7 @@ "Sharded": true }, "FieldQuery": "select user0_.col as col0_ from `user` as user0_ where 1 != 1", - "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by col0_ desc limit 3", + "Query": "select user0_.col as col0_ from `user` as user0_ where id = 1 order by user0_.col desc limit 3", "Table": "`user`", "Values": [ "1" @@ -1328,8 +1437,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id1 from (select `user`.id as id1 from `user` where 1 != 1) as t where 1 != 1", - "Query": "select id1 from (select `user`.id as id1 from `user`) as t", + "FieldQuery": "select t.id1 from (select `user`.id as id1 from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.id1 from (select `user`.id as id1 from `user`) as t", "Table": "`user`" }, { @@ -1339,8 +1448,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id2 from (select user_extra.id as id2 from user_extra where 1 != 1) as t where 1 != 1", - "Query": "select id2 from (select user_extra.id as id2 from user_extra) as t", + "FieldQuery": "select t.id2 from (select user_extra.id as id2 from user_extra where 1 != 1) as t where 1 != 1", + "Query": "select t.id2 from (select user_extra.id as id2 from user_extra) as t", "Table": "user_extra" } ] @@ -1428,8 +1537,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select col1, col2 from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1", - "Query": "select col1, col2 from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a", + "FieldQuery": "select * from (select col1, col2 from unsharded where 1 != 1 union select col1, col2 from unsharded where 1 != 1) as a where 1 != 1", + "Query": "select * from (select col1, col2 from unsharded where id = 1 union select col1, col2 from unsharded where id = 3) as a", "Table": "unsharded" }, "TablesUsed": [ @@ -1573,6 +1682,28 @@ ] } }, + { + "comment": "routing table on music", + "query": "select * from second_user.bar where id > 2", + "plan": { + "QueryType": "SELECT", + "Original": "select * from second_user.bar where id > 2", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from music as bar where 1 != 1", + "Query": "select * from music as bar where id > 2", + "Table": "music" + }, + "TablesUsed": [ + "user.music" + ] + } + }, { "comment": "testing SingleRow Projection", "query": "select 42", @@ -1595,6 +1726,56 @@ ] } }, + { + "comment": "avg in sharded keyspace with group by without selecting the group by columns", + "query": "select avg(intcol) as avg_col from user group by textcol1, textcol2 order by textcol1, textcol2;", + "plan": { + "QueryType": "SELECT", + "Original": "select avg(intcol) as avg_col from user group by textcol1, textcol2 order by textcol1, textcol2;", + "Instructions": { + "OperatorType": "SimpleProjection", + "ColumnNames": [ + "0:avg_col" + ], + "Columns": "0", + "Inputs": [ + { + "OperatorType": "Projection", + "Expressions": [ + "sum(intcol) / count(intcol) as avg_col", + ":1 as textcol1", + ":2 as textcol2" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(0) AS avg_col, sum_count(3) AS count(intcol)", + "GroupBy": "1 COLLATE latin1_swedish_ci, (2|4) COLLATE ", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select sum(intcol) as avg_col, textcol1, textcol2, count(intcol), weight_string(textcol2) from `user` where 1 != 1 group by textcol1, textcol2, weight_string(textcol2)", + "OrderBy": "1 ASC COLLATE latin1_swedish_ci, (2|4) ASC COLLATE ", + "Query": "select sum(intcol) as avg_col, textcol1, textcol2, count(intcol), weight_string(textcol2) from `user` group by textcol1, textcol2, weight_string(textcol2) order by textcol1 asc, textcol2 asc", + "Table": "`user`" + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, { "comment": "don't filter on the vtgate", "query": "select 42 from dual where false", @@ -1686,7 +1867,7 @@ "Sharded": true }, "FieldQuery": "select * from music where 1 != 1", - "Query": "select * from music limit :__upper_limit", + "Query": "select * from music limit 100", "Table": "music" } ] @@ -1784,7 +1965,7 @@ }, "FieldQuery": "select user_id, count(id), weight_string(user_id) from music where 1 != 1 group by user_id", "OrderBy": "(0|2) ASC", - "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by user_id asc limit :__upper_limit", + "Query": "select user_id, count(id), weight_string(user_id) from music group by user_id having count(user_id) = 1 order by music.user_id asc limit 2", "ResultColumns": 2, "Table": "music" } @@ -2021,11 +2202,11 @@ } }, { - "comment": "select (select col from user limit 1) as a from user join user_extra order by a", - "query": "select (select col from user limit 1) as a from user join user_extra order by a", + "comment": "subquery in select expression of derived table", + "query": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", "plan": { "QueryType": "SELECT", - "Original": "select (select col from user limit 1) as a from user join user_extra order by a", + "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", "Instructions": { "OperatorType": "Join", "Variant": "Join", @@ -2052,7 +2233,7 @@ "Sharded": true }, "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", + "Query": "select col from `user` limit 1", "Table": "`user`" } ] @@ -2065,9 +2246,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select :__sq1 as a, weight_string(:__sq1) from `user` where 1 != 1", - "OrderBy": "(0|1) ASC", - "Query": "select :__sq1 as a, weight_string(:__sq1) from `user` order by a asc", + "FieldQuery": "select t.a from (select :__sq1 as a from `user` where 1 != 1) as t where 1 != 1", + "Query": "select t.a from (select :__sq1 as a from `user`) as t", "Table": "`user`" } ] @@ -2092,66 +2272,76 @@ } }, { - "comment": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", - "query": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", + "comment": "select (select col from user limit 1) as a from user join user_extra order by a", + "query": "select (select col from user limit 1) as a from user join user_extra order by a", "plan": { "QueryType": "SELECT", - "Original": "select t.a from (select (select col from user limit 1) as a from user join user_extra) t", + "Original": "select (select col from user limit 1) as a from user join user_extra order by a", "Instructions": { - "OperatorType": "Join", - "Variant": "Join", - "JoinColumnIndexes": "L:0", - "TableName": "`user`_user_extra", + "OperatorType": "SimpleProjection", + "ColumnNames": [ + "0:a" + ], + "Columns": "1", "Inputs": [ { - "OperatorType": "UncorrelatedSubquery", - "Variant": "PulloutValue", - "PulloutVars": [ - "__sq1" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0", + "TableName": "`user`_user_extra", "Inputs": [ { - "InputName": "SubQuery", - "OperatorType": "Limit", - "Count": "1", + "OperatorType": "UncorrelatedSubquery", + "Variant": "PulloutValue", + "PulloutVars": [ + "__sq1" + ], "Inputs": [ { + "InputName": "SubQuery", + "OperatorType": "Limit", + "Count": "1", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select col from `user` where 1 != 1", + "Query": "select col from `user` limit 1", + "Table": "`user`" + } + ] + }, + { + "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select col from `user` where 1 != 1", - "Query": "select col from `user` limit :__upper_limit", + "FieldQuery": "select :__sq1 as __sq1, weight_string(:__sq1) from `user` where 1 != 1", + "OrderBy": "(0|1) ASC", + "Query": "select :__sq1 as __sq1, weight_string(:__sq1) from `user` order by __sq1 asc", "Table": "`user`" } ] }, { - "InputName": "Outer", "OperatorType": "Route", "Variant": "Scatter", "Keyspace": { "Name": "user", "Sharded": true }, - "FieldQuery": "select t.a from (select :__sq1 as a from `user` where 1 != 1) as t where 1 != 1", - "Query": "select t.a from (select :__sq1 as a from `user`) as t", - "Table": "`user`" + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra", + "Table": "user_extra" } ] - }, - { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select 1 from user_extra where 1 != 1", - "Query": "select 1 from user_extra", - "Table": "user_extra" } ] }, @@ -2228,6 +2418,74 @@ ] } }, + { + "comment": "Straight Join ensures specific ordering of joins", + "query": "select user.id, user_extra.user_id from user straight_join user_extra where user.id = user_extra.foo", + "plan": { + "QueryType": "SELECT", + "Original": "select user.id, user_extra.user_id from user straight_join user_extra where user.id = user_extra.foo", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0", + "JoinVars": { + "user_id": 0 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id from `user` where 1 != 1", + "Query": "select `user`.id from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select user_extra.user_id from user_extra where 1 != 1", + "Query": "select user_extra.user_id from user_extra where user_extra.foo = :user_id", + "Table": "user_extra" + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Straight Join preserved in MySQL query", + "query": "select user.id, user_extra.user_id from user straight_join user_extra where user.id = user_extra.user_id", + "plan": { + "QueryType": "SELECT", + "Original": "select user.id, user_extra.user_id from user straight_join user_extra where user.id = user_extra.user_id", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `user`.id, user_extra.user_id from `user` straight_join user_extra on `user`.id = user_extra.user_id where 1 != 1", + "Query": "select `user`.id, user_extra.user_id from `user` straight_join user_extra on `user`.id = user_extra.user_id", + "Table": "`user`, user_extra" + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, { "comment": "correlated subquery in exists clause", "query": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)", @@ -2236,9 +2494,10 @@ "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id)", "Instructions": { "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "ColumnNames": [ + "0:col" ], + "Columns": "0", "Inputs": [ { "OperatorType": "SemiJoin", @@ -2293,9 +2552,10 @@ "Original": "select col from user where exists(select user_id from user_extra where user_id = 3 and user_id < user.id) order by col", "Instructions": { "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "ColumnNames": [ + "0:col" ], + "Columns": "0", "Inputs": [ { "OperatorType": "SemiJoin", @@ -2314,7 +2574,7 @@ }, "FieldQuery": "select col, `user`.id from `user` where 1 != 1", "OrderBy": "0 ASC", - "Query": "select col, `user`.id from `user` order by col asc", + "Query": "select col, `user`.id from `user` order by `user`.col asc", "Table": "`user`" }, { @@ -2418,9 +2678,7 @@ "Original": "select 1 from user u where exists (select 1 from user_extra ue where ue.col = u.col and u.col = ue.col2)", "Instructions": { "OperatorType": "SimpleProjection", - "Columns": [ - 0 - ], + "Columns": "0", "Inputs": [ { "OperatorType": "SemiJoin", @@ -2490,6 +2748,30 @@ ] } }, + { + "comment": "Complex join with multiple conditions merged into single route", + "query": "select 0 from user as u join user_extra as s on u.id = s.user_id join music as m on m.user_id = u.id and (s.foo or m.bar)", + "plan": { + "QueryType": "SELECT", + "Original": "select 0 from user as u join user_extra as s on u.id = s.user_id join music as m on m.user_id = u.id and (s.foo or m.bar)", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 0 from `user` as u, user_extra as s, music as m where 1 != 1", + "Query": "select 0 from `user` as u, user_extra as s, music as m where u.id = s.user_id and m.user_id = u.id and (s.foo or m.bar)", + "Table": "`user`, music, user_extra" + }, + "TablesUsed": [ + "user.music", + "user.user", + "user.user_extra" + ] + } + }, { "comment": "union as a derived table", "query": "select found from (select id as found from user union all (select id from unsharded)) as t", @@ -2615,7 +2897,7 @@ "Sharded": false }, "FieldQuery": "select 1 from (select col, count(*) as a from unsharded where 1 != 1 group by col) as f left join unsharded as u on f.col = u.id where 1 != 1", - "Query": "select 1 from (select col, count(*) as a from unsharded group by col having count(*) > 0 limit 0, 12) as f left join unsharded as u on f.col = u.id", + "Query": "select 1 from (select col, count(*) as a from unsharded group by col having a > 0 limit 0, 12) as f left join unsharded as u on f.col = u.id", "Table": "unsharded" }, "TablesUsed": [ @@ -2740,7 +3022,7 @@ }, "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "Query": "select id, weight_string(id) from `user` order by `user`.id asc limit 1", "Table": "`user`" } ] @@ -2753,8 +3035,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select :__sq1 as `(select id from ``user`` order by id asc limit 1)` from user_extra where 1 != 1", - "Query": "select :__sq1 as `(select id from ``user`` order by id asc limit 1)` from user_extra", + "FieldQuery": "select :__sq1 as `(select id from ``user`` order by ``user``.id asc limit 1)` from user_extra where 1 != 1", + "Query": "select :__sq1 as `(select id from ``user`` order by ``user``.id asc limit 1)` from user_extra", "Table": "user_extra" } ] @@ -3048,15 +3330,15 @@ "QueryType": "SELECT", "Original": "select insert('Quadratic', 3, 4, 'What')", "Instructions": { - "OperatorType": "Route", - "Variant": "Reference", - "Keyspace": { - "Name": "main", - "Sharded": false - }, - "FieldQuery": "select insert('Quadratic', 3, 4, 'What') from dual where 1 != 1", - "Query": "select insert('Quadratic', 3, 4, 'What') from dual", - "Table": "dual" + "OperatorType": "Projection", + "Expressions": [ + "'QuWhattic' as insert('Quadratic', 3, 4, 'What')" + ], + "Inputs": [ + { + "OperatorType": "SingleRow" + } + ] }, "TablesUsed": [ "main.dual" @@ -3230,7 +3512,7 @@ }, "FieldQuery": "select id, `name`, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|2) ASC", - "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by id asc limit :__upper_limit", + "Query": "select id, `name`, weight_string(id) from `user` where `name` = 'aa' order by `user`.id asc limit 2", "ResultColumns": 2, "Table": "`user`" } @@ -3640,7 +3922,7 @@ "Sharded": true }, "FieldQuery": "select music.id from music where 1 != 1", - "Query": "select music.id from music where music.genre = 'pop' limit :__upper_limit", + "Query": "select music.id from music where music.genre = 'pop' limit 10", "Table": "music" } ] @@ -3966,7 +4248,7 @@ "Sharded": true }, "FieldQuery": "select id from (select id from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1", - "Query": "select id from (select id from (select music.id from music where music.user_id in ::__vals) as subquery_for_limit limit :__upper_limit) as subquery_for_limit limit :__upper_limit", + "Query": "select id from (select id from (select music.id from music where music.user_id in ::__vals) as subquery_for_limit limit 10) as subquery_for_limit", "Table": "music", "Values": [ "(5, 6)" @@ -4025,7 +4307,7 @@ "Sharded": true }, "FieldQuery": "select id from (select id from (select music.id from music where 1 != 1) as subquery_for_limit where 1 != 1) as subquery_for_limit where 1 != 1", - "Query": "select id from (select id from (select music.id from music) as subquery_for_limit limit :__upper_limit) as subquery_for_limit limit :__upper_limit", + "Query": "select id from (select id from (select music.id from music) as subquery_for_limit limit 10) as subquery_for_limit", "Table": "music" } ] @@ -4223,8 +4505,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select user_id from (select user_id from user_extra where 1 != 1) as ue where 1 != 1", - "Query": "select user_id from (select user_id from user_extra) as ue limit :__upper_limit", + "FieldQuery": "select ue.user_id from (select user_id from user_extra where 1 != 1) as ue where 1 != 1", + "Query": "select ue.user_id from (select user_id from user_extra) as ue limit 10", "Table": "user_extra" } ] @@ -4269,10 +4551,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 0 - ], + "Columns": "1,0", "Inputs": [ { "OperatorType": "Aggregate", @@ -4664,6 +4943,28 @@ ] } }, + { + "comment": "name is in backfill vindex - not selected for vindex lookup", + "query": "select * from customer where name = 'x'", + "plan": { + "QueryType": "SELECT", + "Original": "select * from customer where name = 'x'", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select * from customer where 1 != 1", + "Query": "select * from customer where `name` = 'x'", + "Table": "customer" + }, + "TablesUsed": [ + "user.customer" + ] + } + }, { "comment": "email vindex is costly than phone vindex - but phone vindex is backfiling hence ignored", "query": "select * from customer where email = 'a@mail.com' and phone = 123456", @@ -4809,7 +5110,7 @@ }, "FieldQuery": "select u.foo, weight_string(u.foo) from `user` as u where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select u.foo, weight_string(u.foo) from `user` as u order by foo asc", + "Query": "select u.foo, weight_string(u.foo) from `user` as u order by u.foo asc", "Table": "`user`" }, { @@ -4830,5 +5131,149 @@ "user.user_extra" ] } + }, + { + "comment": "Derived tables going to a single shard still need to expand derived table columns", + "query": "SELECT c.column_name FROM user c JOIN (SELECT table_name FROM unsharded LIMIT 1) AS tables ON tables.table_name = c.table_name", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT c.column_name FROM user c JOIN (SELECT table_name FROM unsharded LIMIT 1) AS tables ON tables.table_name = c.table_name", + "Instructions": { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0", + "JoinVars": { + "tables_table_name": 0 + }, + "TableName": "unsharded_`user`", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select `tables`.table_name from (select table_name from unsharded where 1 != 1) as `tables` where 1 != 1", + "Query": "select `tables`.table_name from (select table_name from unsharded limit 1) as `tables`", + "Table": "unsharded" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select c.column_name from `user` as c where 1 != 1", + "Query": "select c.column_name from `user` as c where c.table_name = :tables_table_name", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "main.unsharded", + "user.user" + ] + } + }, + { + "comment": "column name aliases in outer join queries", + "query": "select name as t0, name as t1 from user left outer join user_extra on user.cola = user_extra.cola", + "plan": { + "QueryType": "SELECT", + "Original": "select name as t0, name as t1 from user left outer join user_extra on user.cola = user_extra.cola", + "Instructions": { + "OperatorType": "SimpleProjection", + "ColumnNames": [ + "0:t0", + "1:t1" + ], + "Columns": "0,0", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "LeftJoin", + "JoinColumnIndexes": "L:0,L:0", + "JoinVars": { + "user_cola": 2 + }, + "TableName": "`user`_user_extra", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select `name` as t0, `name` as t1, `user`.cola from `user` where 1 != 1", + "Query": "select `name` as t0, `name` as t1, `user`.cola from `user`", + "Table": "`user`" + }, + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from user_extra where 1 != 1", + "Query": "select 1 from user_extra where user_extra.cola = :user_cola", + "Table": "user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, + { + "comment": "Over clause works for unsharded tables", + "query": "SELECT val, CUME_DIST() OVER w, ROW_NUMBER() OVER w, DENSE_RANK() OVER w, PERCENT_RANK() OVER w, RANK() OVER w AS 'cd' FROM unsharded_a", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT val, CUME_DIST() OVER w, ROW_NUMBER() OVER w, DENSE_RANK() OVER w, PERCENT_RANK() OVER w, RANK() OVER w AS 'cd' FROM unsharded_a", + "Instructions": { + "OperatorType": "Route", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select val, cume_dist() over w, row_number() over w, dense_rank() over w, percent_rank() over w, rank() over w as cd from unsharded_a where 1 != 1", + "Query": "select val, cume_dist() over w, row_number() over w, dense_rank() over w, percent_rank() over w, rank() over w as cd from unsharded_a", + "Table": "unsharded_a" + }, + "TablesUsed": [ + "main.unsharded_a" + ] + } + }, + { + "comment": "join with derived table with alias and join condition - merge into route", + "query": "select 1 from user join (select id as uid from user) as t where t.uid = user.id", + "plan": { + "QueryType": "SELECT", + "Original": "select 1 from user join (select id as uid from user) as t where t.uid = user.id", + "Instructions": { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from (select id as uid from `user` where 1 != 1) as t, `user` where 1 != 1", + "Query": "select 1 from (select id as uid from `user`) as t, `user` where t.uid = `user`.id", + "Table": "`user`" + }, + "TablesUsed": [ + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/show_cases.json b/go/vt/vtgate/planbuilder/testdata/show_cases.json index c20a1c79f5a..45ad277677c 100644 --- a/go/vt/vtgate/planbuilder/testdata/show_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/show_cases.json @@ -407,7 +407,7 @@ "Sharded": false }, "TargetDestination": "AnyShard()", - "Query": "show create table unknown", + "Query": "show create table `unknown`", "SingleShardOnly": true } } @@ -720,6 +720,24 @@ } } }, + { + "comment": "show vschema keyspaces", + "query": "show vschema keyspaces", + "plan": { + "QueryType": "SHOW", + "Original": "show vschema keyspaces", + "Instructions": { + "OperatorType": "Rows", + "Fields": { + "Comment": "VARCHAR", + "Foreign Key": "VARCHAR", + "Keyspace": "VARCHAR", + "Sharded": "VARCHAR" + }, + "RowCount": 8 + } + } + }, { "comment": "show vschema vindexes", "query": "show vschema vindexes", diff --git a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json index ee38e7d0538..f6072bcd9a5 100644 --- a/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/tpcc_cases.json @@ -556,7 +556,7 @@ "Sharded": true }, "FieldQuery": "select c_balance, c_first, c_middle, c_id from customer1 where 1 != 1", - "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by c_first asc", + "Query": "select c_balance, c_first, c_middle, c_id from customer1 where c_w_id = 840 and c_d_id = 1 and c_last = 'test' order by customer1.c_first asc", "Table": "customer1", "Values": [ "840" @@ -608,7 +608,7 @@ "Sharded": true }, "FieldQuery": "select o_id, o_carrier_id, o_entry_d from orders1 where 1 != 1", - "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by o_id desc", + "Query": "select o_id, o_carrier_id, o_entry_d from orders1 where o_w_id = 9894 and o_d_id = 3 and o_c_id = 159 order by orders1.o_id desc", "Table": "orders1", "Values": [ "9894" @@ -660,7 +660,7 @@ "Sharded": true }, "FieldQuery": "select no_o_id from new_orders1 where 1 != 1", - "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by no_o_id asc limit 1 for update", + "Query": "select no_o_id from new_orders1 where no_d_id = 689 and no_w_id = 15 order by new_orders1.no_o_id asc limit 1 for update", "Table": "new_orders1", "Values": [ "15" diff --git a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json index f40ea961334..e9f7a37a4aa 100644 --- a/go/vt/vtgate/planbuilder/testdata/tpch_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/tpch_cases.json @@ -2,7 +2,50 @@ { "comment": "TPC-H query 1", "query": "select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order from lineitem where l_shipdate <= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus", - "plan": "VT12001: unsupported: in scatter query: aggregation function 'avg(l_quantity) as avg_qty'" + "plan": { + "QueryType": "SELECT", + "Original": "select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, avg(l_quantity) as avg_qty, avg(l_extendedprice) as avg_price, avg(l_discount) as avg_disc, count(*) as count_order from lineitem where l_shipdate <= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus order by l_returnflag, l_linestatus", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + ":0 as l_returnflag", + ":1 as l_linestatus", + ":2 as sum_qty", + ":3 as sum_base_price", + ":4 as sum_disc_price", + ":5 as sum_charge", + "sum(l_quantity) / count(l_quantity) as avg_qty", + "sum(l_extendedprice) / count(l_extendedprice) as avg_price", + "sum(l_discount) / count(l_discount) as avg_disc", + ":9 as count_order" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(2) AS sum_qty, sum(3) AS sum_base_price, sum(4) AS sum_disc_price, sum(5) AS sum_charge, sum(6) AS avg_qty, sum(7) AS avg_price, sum(8) AS avg_disc, sum_count_star(9) AS count_order, sum_count(10) AS count(l_quantity), sum_count(11) AS count(l_extendedprice), sum_count(12) AS count(l_discount)", + "GroupBy": "(0|13), (1|14)", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, sum(l_quantity) as avg_qty, sum(l_extendedprice) as avg_price, sum(l_discount) as avg_disc, count(*) as count_order, count(l_quantity), count(l_extendedprice), count(l_discount), weight_string(l_returnflag), weight_string(l_linestatus) from lineitem where 1 != 1 group by l_returnflag, l_linestatus, weight_string(l_returnflag), weight_string(l_linestatus)", + "OrderBy": "(0|13) ASC, (1|14) ASC", + "Query": "select l_returnflag, l_linestatus, sum(l_quantity) as sum_qty, sum(l_extendedprice) as sum_base_price, sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, sum(l_quantity) as avg_qty, sum(l_extendedprice) as avg_price, sum(l_discount) as avg_disc, count(*) as count_order, count(l_quantity), count(l_extendedprice), count(l_discount), weight_string(l_returnflag), weight_string(l_linestatus) from lineitem where l_shipdate <= '1998-12-01' - interval '108' day group by l_returnflag, l_linestatus, weight_string(l_returnflag), weight_string(l_linestatus) order by lineitem.l_returnflag asc, lineitem.l_linestatus asc", + "Table": "lineitem" + } + ] + } + ] + }, + "TablesUsed": [ + "main.lineitem" + ] + } }, { "comment": "TPC-H query 2", @@ -170,7 +213,7 @@ }, "FieldQuery": "select o_orderpriority, count(*) as order_count, o_orderkey, weight_string(o_orderpriority) from orders where 1 != 1 group by o_orderpriority, o_orderkey, weight_string(o_orderpriority)", "OrderBy": "(0|3) ASC", - "Query": "select o_orderpriority, count(*) as order_count, o_orderkey, weight_string(o_orderpriority) from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month group by o_orderpriority, o_orderkey, weight_string(o_orderpriority) order by o_orderpriority asc", + "Query": "select o_orderpriority, count(*) as order_count, o_orderkey, weight_string(o_orderpriority) from orders where o_orderdate >= date('1993-07-01') and o_orderdate < date('1993-07-01') + interval '3' month group by o_orderpriority, o_orderkey, weight_string(o_orderpriority) order by orders.o_orderpriority asc", "Table": "orders" }, { @@ -521,67 +564,220 @@ "ResultColumns": 4, "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - ":2 as supp_nation", - ":3 as cust_nation", - ":4 as l_year", - "sum(volume) * count(*) as revenue", - ":5 as weight_string(supp_nation)", - ":6 as weight_string(cust_nation)", - ":7 as weight_string(l_year)" - ], + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|4) ASC, (1|5) ASC, (2|6) ASC", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1,R:1,L:2,L:5,R:2,L:6", + "JoinColumnIndexes": "L:0,R:0,L:1,L:2,L:4,R:1,L:5", "JoinVars": { - "n1_n_name": 4, + "n1_n_name": 0, "o_custkey": 3 }, "TableName": "lineitem_orders_supplier_nation_customer_nation", "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "sum(volume) * count(*) as revenue", - ":2 as supp_nation", - ":3 as l_year", - ":4 as orders.o_custkey", - ":5 as n1.n_name", - ":6 as weight_string(supp_nation)", - ":7 as weight_string(l_year)" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,L:1,L:2,R:1,L:4", + "JoinVars": { + "l_suppkey": 3 + }, + "TableName": "lineitem_orders_supplier_nation", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,R:1,L:1,L:2,L:3,R:2,L:5", + "JoinColumnIndexes": "L:0,L:1,R:0,L:2,L:4", "JoinVars": { - "l_suppkey": 4 + "l_orderkey": 3 }, - "TableName": "lineitem_orders_supplier_nation", + "TableName": "lineitem_orders", "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "sum(volume) * count(*) as revenue", - ":2 as l_year", - ":3 as orders.o_custkey", - ":4 as n1.n_name", - ":5 as lineitem.l_suppkey", - ":6 as weight_string(l_year)" + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select shipping.l_year, shipping.volume, shipping.l_suppkey, shipping.l_orderkey, weight_string(shipping.l_year) from (select extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, l_suppkey as l_suppkey, l_orderkey as l_orderkey from lineitem where 1 != 1) as shipping where 1 != 1", + "Query": "select shipping.l_year, shipping.volume, shipping.l_suppkey, shipping.l_orderkey, weight_string(shipping.l_year) from (select extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, l_suppkey as l_suppkey, l_orderkey as l_orderkey from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping", + "Table": "lineitem" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select shipping.o_custkey from (select o_custkey as o_custkey from orders where 1 != 1) as shipping where 1 != 1", + "Query": "select shipping.o_custkey from (select o_custkey as o_custkey from orders where o_orderkey = :l_orderkey) as shipping", + "Table": "orders", + "Values": [ + ":l_orderkey" + ], + "Vindex": "hash" + } + ] + }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1", + "JoinVars": { + "s_nationkey": 0 + }, + "TableName": "supplier_nation", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select shipping.s_nationkey from (select s_nationkey as s_nationkey from supplier where 1 != 1) as shipping where 1 != 1", + "Query": "select shipping.s_nationkey from (select s_nationkey as s_nationkey from supplier where s_suppkey = :l_suppkey) as shipping", + "Table": "supplier", + "Values": [ + ":l_suppkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select shipping.supp_nation, weight_string(shipping.supp_nation) from (select n1.n_name as supp_nation from nation as n1 where 1 != 1) as shipping where 1 != 1", + "Query": "select shipping.supp_nation, weight_string(shipping.supp_nation) from (select n1.n_name as supp_nation from nation as n1 where n1.n_nationkey = :s_nationkey) as shipping", + "Table": "nation", + "Values": [ + ":s_nationkey" ], + "Vindex": "hash" + } + ] + } + ] + }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1", + "JoinVars": { + "c_nationkey": 0 + }, + "TableName": "customer_nation", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select shipping.c_nationkey from (select c_nationkey as c_nationkey from customer where 1 != 1) as shipping where 1 != 1", + "Query": "select shipping.c_nationkey from (select c_nationkey as c_nationkey from customer where c_custkey = :o_custkey) as shipping", + "Table": "customer", + "Values": [ + ":o_custkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select shipping.cust_nation, weight_string(shipping.cust_nation) from (select n2.n_name as cust_nation from nation as n2 where 1 != 1) as shipping where 1 != 1", + "Query": "select shipping.cust_nation, weight_string(shipping.cust_nation) from (select n2.n_name as cust_nation from nation as n2 where (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') and n2.n_nationkey = :c_nationkey) as shipping", + "Table": "nation", + "Values": [ + ":c_nationkey" + ], + "Vindex": "hash" + } + ] + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "main.customer", + "main.lineitem", + "main.nation", + "main.orders", + "main.supplier" + ] + } + }, + { + "comment": "TPC-H query 8", + "query": "select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year", + "plan": { + "QueryType": "SELECT", + "Original": "select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year", + "Instructions": { + "OperatorType": "Projection", + "Expressions": [ + ":0 as o_year", + "sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share" + ], + "Inputs": [ + { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(1) AS sum(case when nation = 'BRAZIL' then volume else 0 end), sum(2) AS sum(volume)", + "GroupBy": "(0|3)", + "Inputs": [ + { + "OperatorType": "SimpleProjection", + "Columns": "0,3,1,4", + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|4) ASC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,L:1,L:3,R:1", + "JoinVars": { + "l_orderkey": 2 + }, + "TableName": "lineitem_part_supplier_nation_orders_customer_nation_region", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:1,R:1", + "JoinVars": { + "l_suppkey": 2, + "volume": 0 + }, + "TableName": "lineitem_part_supplier_nation", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,L:1,L:2,L:3,L:4,L:6", + "JoinColumnIndexes": "L:0,L:1,L:2", "JoinVars": { - "l_orderkey": 5 + "l_partkey": 3 }, - "TableName": "lineitem_orders", + "TableName": "lineitem_part", "Inputs": [ { "OperatorType": "Route", @@ -590,9 +786,8 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select sum(volume) as revenue, l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year), supp_nation, weight_string(supp_nation), cust_nation, weight_string(cust_nation) from (select extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, orders.o_custkey as `orders.o_custkey`, lineitem.l_suppkey as `lineitem.l_suppkey`, lineitem.l_orderkey as `lineitem.l_orderkey` from lineitem where 1 != 1) as shipping where 1 != 1 group by l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year)", - "OrderBy": "(7|8) ASC, (9|10) ASC, (1|6) ASC", - "Query": "select sum(volume) as revenue, l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year), supp_nation, weight_string(supp_nation), cust_nation, weight_string(cust_nation) from (select extract(year from l_shipdate) as l_year, l_extendedprice * (1 - l_discount) as volume, orders.o_custkey as `orders.o_custkey`, lineitem.l_suppkey as `lineitem.l_suppkey`, lineitem.l_orderkey as `lineitem.l_orderkey` from lineitem where l_shipdate between date('1995-01-01') and date('1996-12-31')) as shipping group by l_year, shipping.`orders.o_custkey`, shipping.`n1.n_name`, shipping.`lineitem.l_suppkey`, shipping.`lineitem.l_orderkey`, weight_string(l_year) order by supp_nation asc, cust_nation asc, l_year asc", + "FieldQuery": "select all_nations.volume, all_nations.l_orderkey, all_nations.l_suppkey, all_nations.l_partkey from (select l_extendedprice * (1 - l_discount) as volume, l_orderkey as l_orderkey, l_suppkey as l_suppkey, l_partkey as l_partkey from lineitem where 1 != 1) as all_nations where 1 != 1", + "Query": "select all_nations.volume, all_nations.l_orderkey, all_nations.l_suppkey, all_nations.l_partkey from (select l_extendedprice * (1 - l_discount) as volume, l_orderkey as l_orderkey, l_suppkey as l_suppkey, l_partkey as l_partkey from lineitem) as all_nations", "Table": "lineitem" }, { @@ -602,11 +797,52 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select count(*) from orders where 1 != 1 group by .0", - "Query": "select count(*) from orders where o_orderkey = :l_orderkey group by .0", - "Table": "orders", + "FieldQuery": "select 1 from part where 1 != 1", + "Query": "select 1 from part where p_type = 'ECONOMY ANODIZED STEEL' and p_partkey = :l_partkey", + "Table": "part", "Values": [ - ":l_orderkey" + ":l_partkey" + ], + "Vindex": "hash" + } + ] + }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1", + "JoinVars": { + "s_nationkey": 0 + }, + "TableName": "supplier_nation", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select all_nations.s_nationkey from (select s_nationkey as s_nationkey from supplier where 1 != 1) as all_nations where 1 != 1", + "Query": "select all_nations.s_nationkey from (select s_nationkey as s_nationkey from supplier where s_suppkey = :l_suppkey) as all_nations", + "Table": "supplier", + "Values": [ + ":l_suppkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select all_nations.nation, case when nation = 'BRAZIL' then :volume else 0 end from (select n2.n_name as nation from nation as n2 where 1 != 1) as all_nations where 1 != 1", + "Query": "select all_nations.nation, case when nation = 'BRAZIL' then :volume else 0 end from (select n2.n_name as nation from nation as n2 where n2.n_nationkey = :s_nationkey) as all_nations", + "Table": "nation", + "Values": [ + ":s_nationkey" ], "Vindex": "hash" } @@ -615,21 +851,22 @@ ] }, { - "OperatorType": "Projection", - "Expressions": [ - "count(*) * count(*) as count(*)", - ":2 as supp_nation", - ":3 as weight_string(supp_nation)" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:2", + "JoinVars": { + "c_nationkey": 1 + }, + "TableName": "orders_customer_nation_region", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinColumnIndexes": "L:0,R:0,L:2", "JoinVars": { - "s_nationkey": 1 + "o_custkey": 1 }, - "TableName": "supplier_nation", + "TableName": "orders_customer", "Inputs": [ { "OperatorType": "Route", @@ -638,11 +875,11 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select count(*), shipping.`supplier.s_nationkey` from (select supplier.s_nationkey as `supplier.s_nationkey` from supplier where 1 != 1) as shipping where 1 != 1 group by shipping.`supplier.s_nationkey`", - "Query": "select count(*), shipping.`supplier.s_nationkey` from (select supplier.s_nationkey as `supplier.s_nationkey` from supplier where s_suppkey = :l_suppkey) as shipping group by shipping.`supplier.s_nationkey`", - "Table": "supplier", + "FieldQuery": "select all_nations.o_year, all_nations.o_custkey, weight_string(all_nations.o_year) from (select extract(year from o_orderdate) as o_year, o_custkey as o_custkey from orders where 1 != 1) as all_nations where 1 != 1", + "Query": "select all_nations.o_year, all_nations.o_custkey, weight_string(all_nations.o_year) from (select extract(year from o_orderdate) as o_year, o_custkey as o_custkey from orders where o_orderdate between date'1995-01-01' and date('1996-12-31') and o_orderkey = :l_orderkey) as all_nations", + "Table": "orders", "Values": [ - ":l_suppkey" + ":l_orderkey" ], "Vindex": "hash" }, @@ -653,11 +890,51 @@ "Name": "main", "Sharded": true }, - "FieldQuery": "select count(*), supp_nation, weight_string(supp_nation) from (select n1.n_name as supp_nation from nation as n1 where 1 != 1) as shipping where 1 != 1 group by supp_nation, weight_string(supp_nation)", - "Query": "select count(*), supp_nation, weight_string(supp_nation) from (select n1.n_name as supp_nation from nation as n1 where n1.n_nationkey = :s_nationkey) as shipping group by supp_nation, weight_string(supp_nation)", + "FieldQuery": "select all_nations.c_nationkey from (select c_nationkey as c_nationkey from customer where 1 != 1) as all_nations where 1 != 1", + "Query": "select all_nations.c_nationkey from (select c_nationkey as c_nationkey from customer where c_custkey = :o_custkey) as all_nations", + "Table": "customer", + "Values": [ + ":o_custkey" + ], + "Vindex": "hash" + } + ] + }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinVars": { + "n1_n_regionkey": 0 + }, + "TableName": "nation_region", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select n1.n_regionkey from nation as n1 where 1 != 1", + "Query": "select n1.n_regionkey from nation as n1 where n1.n_nationkey = :c_nationkey", "Table": "nation", "Values": [ - ":s_nationkey" + ":c_nationkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select 1 from region where 1 != 1", + "Query": "select 1 from region where r_name = 'AMERICA' and r_regionkey = :n1_n_regionkey", + "Table": "region", + "Values": [ + ":n1_n_regionkey" ], "Vindex": "hash" } @@ -668,57 +945,234 @@ ] } ] - }, + } + ] + } + ] + } + ] + }, + "TablesUsed": [ + "main.customer", + "main.lineitem", + "main.nation", + "main.orders", + "main.part", + "main.region", + "main.supplier" + ] + } + }, + { + "comment": "TPC-H query 9", + "query": "select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc", + "plan": { + "QueryType": "SELECT", + "Original": "select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc", + "Instructions": { + "OperatorType": "Aggregate", + "Variant": "Ordered", + "Aggregates": "sum(2) AS sum_profit", + "GroupBy": "(0|3), (1|4)", + "ResultColumns": 3, + "Inputs": [ + { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|3) ASC, (1|4) DESC", + "Inputs": [ + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,L:0,L:1,R:1,L:3", + "JoinVars": { + "l_suppkey": 2 + }, + "TableName": "orders_lineitem_part_partsupp_supplier_nation", + "Inputs": [ { - "OperatorType": "Projection", - "Expressions": [ - "count(*) * count(*) as count(*)", - ":2 as cust_nation", - ":3 as weight_string(cust_nation)" - ], + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,R:0,L:4,L:6", + "JoinVars": { + "l_discount": 2, + "l_extendedprice": 1, + "l_partkey": 5, + "l_quantity": 3, + "l_suppkey": 4 + }, + "TableName": "orders_lineitem_part_partsupp", "Inputs": [ { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,R:0,R:1,R:2", + "JoinColumnIndexes": "L:0,R:0,R:1,R:2,R:3,R:4,L:2", "JoinVars": { - "c_nationkey": 1 + "o_orderkey": 1 }, - "TableName": "customer_nation", + "TableName": "orders_lineitem_part", "Inputs": [ { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "Scatter", "Keyspace": { "Name": "main", "Sharded": true }, - "FieldQuery": "select count(*), shipping.`customer.c_nationkey` from (select customer.c_nationkey as `customer.c_nationkey` from customer where 1 != 1) as shipping where 1 != 1 group by shipping.`customer.c_nationkey`", - "Query": "select count(*), shipping.`customer.c_nationkey` from (select customer.c_nationkey as `customer.c_nationkey` from customer where c_custkey = :o_custkey) as shipping group by shipping.`customer.c_nationkey`", - "Table": "customer", - "Values": [ - ":o_custkey" - ], - "Vindex": "hash" + "FieldQuery": "select profit.o_year, profit.o_orderkey, weight_string(profit.o_year) from (select extract(year from o_orderdate) as o_year, o_orderkey as o_orderkey from orders where 1 != 1) as profit where 1 != 1", + "Query": "select profit.o_year, profit.o_orderkey, weight_string(profit.o_year) from (select extract(year from o_orderdate) as o_year, o_orderkey as o_orderkey from orders) as profit", + "Table": "orders" }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "L:0,L:1,L:2,L:3,L:4", + "JoinVars": { + "l_partkey": 4 + }, + "TableName": "lineitem_part", + "Inputs": [ + { + "OperatorType": "VindexLookup", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "Values": [ + ":o_orderkey" + ], + "Vindex": "lineitem_map", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "IN", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select l_orderkey, l_linenumber from lineitem_map where 1 != 1", + "Query": "select l_orderkey, l_linenumber from lineitem_map where l_orderkey in ::__vals", + "Table": "lineitem_map", + "Values": [ + "::l_orderkey" + ], + "Vindex": "md5" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select profit.l_extendedprice, profit.l_discount, profit.l_quantity, profit.l_suppkey, profit.l_partkey from (select l_extendedprice, l_discount, l_quantity, l_suppkey as l_suppkey, l_partkey as l_partkey from lineitem where 1 != 1) as profit where 1 != 1", + "Query": "select profit.l_extendedprice, profit.l_discount, profit.l_quantity, profit.l_suppkey, profit.l_partkey from (select l_extendedprice, l_discount, l_quantity, l_suppkey as l_suppkey, l_partkey as l_partkey from lineitem where l_orderkey = :o_orderkey) as profit", + "Table": "lineitem" + } + ] + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select 1 from part where 1 != 1", + "Query": "select 1 from part where p_name like '%green%' and p_partkey = :l_partkey", + "Table": "part", + "Values": [ + ":l_partkey" + ], + "Vindex": "hash" + } + ] + } + ] + }, + { + "OperatorType": "VindexLookup", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "Values": [ + ":l_partkey" + ], + "Vindex": "partsupp_map", + "Inputs": [ { "OperatorType": "Route", - "Variant": "EqualUnique", + "Variant": "IN", "Keyspace": { "Name": "main", "Sharded": true }, - "FieldQuery": "select count(*), cust_nation, weight_string(cust_nation) from (select n2.n_name as cust_nation from nation as n2 where 1 != 1) as shipping where 1 != 1 group by cust_nation, weight_string(cust_nation)", - "Query": "select count(*), cust_nation, weight_string(cust_nation) from (select n2.n_name as cust_nation from nation as n2 where (:n1_n_name = 'FRANCE' and n2.n_name = 'GERMANY' or :n1_n_name = 'GERMANY' and n2.n_name = 'FRANCE') and n2.n_nationkey = :c_nationkey) as shipping group by cust_nation, weight_string(cust_nation)", - "Table": "nation", + "FieldQuery": "select ps_partkey, ps_suppkey from partsupp_map where 1 != 1", + "Query": "select ps_partkey, ps_suppkey from partsupp_map where ps_partkey in ::__vals", + "Table": "partsupp_map", "Values": [ - ":c_nationkey" + "::ps_partkey" ], - "Vindex": "hash" + "Vindex": "md5" + }, + { + "OperatorType": "Route", + "Variant": "ByDestination", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select profit.amount from (select :l_extendedprice * (1 - :l_discount) - ps_supplycost * :l_quantity as amount from partsupp where 1 != 1) as profit where 1 != 1", + "Query": "select profit.amount from (select :l_extendedprice * (1 - :l_discount) - ps_supplycost * :l_quantity as amount from partsupp where ps_partkey = :l_partkey and ps_suppkey = :l_suppkey) as profit", + "Table": "partsupp" } ] } ] + }, + { + "OperatorType": "Join", + "Variant": "Join", + "JoinColumnIndexes": "R:0,R:1", + "JoinVars": { + "s_nationkey": 0 + }, + "TableName": "supplier_nation", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select profit.s_nationkey from (select s_nationkey as s_nationkey from supplier where 1 != 1) as profit where 1 != 1", + "Query": "select profit.s_nationkey from (select s_nationkey as s_nationkey from supplier where s_suppkey = :l_suppkey) as profit", + "Table": "supplier", + "Values": [ + ":l_suppkey" + ], + "Vindex": "hash" + }, + { + "OperatorType": "Route", + "Variant": "EqualUnique", + "Keyspace": { + "Name": "main", + "Sharded": true + }, + "FieldQuery": "select profit.nation, weight_string(profit.nation) from (select n_name as nation from nation where 1 != 1) as profit where 1 != 1", + "Query": "select profit.nation, weight_string(profit.nation) from (select n_name as nation from nation where n_nationkey = :s_nationkey) as profit", + "Table": "nation", + "Values": [ + ":s_nationkey" + ], + "Vindex": "hash" + } + ] } ] } @@ -727,24 +1181,15 @@ ] }, "TablesUsed": [ - "main.customer", "main.lineitem", "main.nation", "main.orders", + "main.part", + "main.partsupp", "main.supplier" ] } }, - { - "comment": "TPC-H query 8", - "query": "select o_year, sum(case when nation = 'BRAZIL' then volume else 0 end) / sum(volume) as mkt_share from ( select extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) as volume, n2.n_name as nation from part, supplier, lineitem, orders, customer, nation n1, nation n2, region where p_partkey = l_partkey and s_suppkey = l_suppkey and l_orderkey = o_orderkey and o_custkey = c_custkey and c_nationkey = n1.n_nationkey and n1.n_regionkey = r_regionkey and r_name = 'AMERICA' and s_nationkey = n2.n_nationkey and o_orderdate between date '1995-01-01' and date('1996-12-31') and p_type = 'ECONOMY ANODIZED STEEL' ) as all_nations group by o_year order by o_year", - "plan": "VT13002: unexpected AST struct for query: o_year" - }, - { - "comment": "TPC-H query 9", - "query": "select nation, o_year, sum(amount) as sum_profit from ( select n_name as nation, extract(year from o_orderdate) as o_year, l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount from part, supplier, lineitem, partsupp, orders, nation where s_suppkey = l_suppkey and ps_suppkey = l_suppkey and ps_partkey = l_partkey and p_partkey = l_partkey and o_orderkey = l_orderkey and s_nationkey = n_nationkey and p_name like '%green%' ) as profit group by nation, o_year order by nation, o_year desc", - "plan": "VT13002: unexpected AST struct for query: nation" - }, { "comment": "TPC-H query 10", "query": "select c_custkey, c_name, sum(l_extendedprice * (1 - l_discount)) as revenue, c_acctbal, n_name, c_address, c_phone, c_comment from customer, orders, lineitem, nation where c_custkey = o_custkey and l_orderkey = o_orderkey and o_orderdate >= date('1993-10-01') and o_orderdate < date('1993-10-01') + interval '3' month and l_returnflag = 'R' and c_nationkey = n_nationkey group by c_custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment order by revenue desc limit 20", @@ -1292,10 +1737,7 @@ "Inputs": [ { "OperatorType": "SimpleProjection", - "Columns": [ - 1, - 3 - ], + "Columns": "1,3", "Inputs": [ { "OperatorType": "Aggregate", @@ -1477,7 +1919,7 @@ }, "FieldQuery": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where 1 != 1", "OrderBy": "(0|5) ASC", - "Query": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = :__sq1 order by s_suppkey asc", + "Query": "select s_suppkey, s_name, s_address, s_phone, total_revenue, weight_string(s_suppkey) from supplier, revenue0 where s_suppkey = supplier_no and total_revenue = :__sq1 order by supplier.s_suppkey asc", "ResultColumns": 5, "Table": "revenue0, supplier" } @@ -1551,7 +1993,7 @@ "Sharded": true }, "FieldQuery": "select ps_suppkey, weight_string(ps_suppkey), ps_partkey from partsupp where 1 != 1", - "Query": "select ps_suppkey, weight_string(ps_suppkey), ps_partkey from partsupp where not :__sq_has_values and ps_suppkey not in ::__sq1", + "Query": "select ps_suppkey, weight_string(ps_suppkey), ps_partkey from partsupp where not :__sq_has_values or ps_suppkey not in ::__sq1", "Table": "partsupp" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/union_cases.json b/go/vt/vtgate/planbuilder/testdata/union_cases.json index 9c1f376b652..3cd698342a5 100644 --- a/go/vt/vtgate/planbuilder/testdata/union_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/union_cases.json @@ -42,8 +42,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1) as dt where 1 != 1", - "Query": "select id, weight_string(id) from (select id from `user` union select id from music) as dt", + "FieldQuery": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` where 1 != 1 union select id from music where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` union select id from music) as dt(c0)", "Table": "`user`, music" } ] @@ -108,9 +108,10 @@ "Original": "(SELECT id FROM user ORDER BY id DESC LIMIT 1) UNION ALL (SELECT id FROM music ORDER BY id DESC LIMIT 1)", "Instructions": { "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "ColumnNames": [ + "0:id" ], + "Columns": "0", "Inputs": [ { "OperatorType": "Concatenate", @@ -128,7 +129,7 @@ }, "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", + "Query": "select id, weight_string(id) from `user` order by `user`.id desc limit 1", "Table": "`user`" } ] @@ -146,7 +147,7 @@ }, "FieldQuery": "select id, weight_string(id) from music where 1 != 1", "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", + "Query": "select id, weight_string(id) from music order by music.id desc limit 1", "Table": "music" } ] @@ -238,9 +239,10 @@ "Original": "(select id from user order by id limit 5) union all (select id from music order by id desc limit 5)", "Instructions": { "OperatorType": "SimpleProjection", - "Columns": [ - 0 + "ColumnNames": [ + "0:id" ], + "Columns": "0", "Inputs": [ { "OperatorType": "Concatenate", @@ -258,7 +260,7 @@ }, "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "Query": "select id, weight_string(id) from `user` order by `user`.id asc limit 5", "Table": "`user`" } ] @@ -276,7 +278,7 @@ }, "FieldQuery": "select id, weight_string(id) from music where 1 != 1", "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from music order by id desc limit :__upper_limit", + "Query": "select id, weight_string(id) from music order by music.id desc limit 5", "Table": "music" } ] @@ -322,8 +324,9 @@ "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci" + "(0:1)" ], + "ResultColumns": 1, "Inputs": [ { "OperatorType": "Concatenate", @@ -335,8 +338,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS where 1 != 1", - "Query": "select distinct CHARACTER_SET_NAME from information_schema.CHARACTER_SETS", + "FieldQuery": "select dt.c0 as CHARACTER_SET_NAME, weight_string(dt.c0) from (select CHARACTER_SET_NAME from information_schema.CHARACTER_SETS where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as CHARACTER_SET_NAME, weight_string(dt.c0) from (select distinct CHARACTER_SET_NAME from information_schema.CHARACTER_SETS) as dt(c0)", "Table": "information_schema.CHARACTER_SETS" }, { @@ -346,8 +349,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select user_name from unsharded where 1 != 1", - "Query": "select distinct user_name from unsharded", + "FieldQuery": "select dt.c0 as user_name, weight_string(dt.c0) from (select user_name from unsharded where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as user_name, weight_string(dt.c0) from (select distinct user_name from unsharded) as dt(c0)", "Table": "unsharded" } ] @@ -384,8 +387,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1 union select 1 from dual where 1 != 1) as dt where 1 != 1", - "Query": "select id, weight_string(id) from (select id from `user` union select id from music union select 1 from dual) as dt", + "FieldQuery": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` where 1 != 1 union select id from music where 1 != 1 union select 1 from dual where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` union select id from music union select 1 from dual) as dt(c0)", "Table": "`user`, dual, music" } ] @@ -503,8 +506,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from ((select id from `user` where 1 != 1) union (select id from `user` where 1 != 1)) as dt where 1 != 1", - "Query": "select id, weight_string(id) from ((select id from `user` order by id desc) union (select id from `user` order by id asc)) as dt", + "FieldQuery": "select dt.c0 as id, weight_string(dt.c0) from ((select id from `user` where 1 != 1) union (select id from `user` where 1 != 1)) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as id, weight_string(dt.c0) from ((select id from `user` order by id desc) union (select id from `user` order by id asc)) as dt(c0)", "Table": "`user`" } ] @@ -534,8 +537,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `1`, weight_string(`1`) from (select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1 union select 2.0 from `user` where 1 != 1) as dt where 1 != 1", - "Query": "select `1`, weight_string(`1`) from (select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual union select 2.0 from `user`) as dt", + "FieldQuery": "select dt.c0 as `1`, weight_string(dt.c0) from (select 1 from dual where 1 != 1 union select null from dual where 1 != 1 union select 1.0 from dual where 1 != 1 union select '1' from dual where 1 != 1 union select 2 from dual where 1 != 1 union select 2.0 from `user` where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as `1`, weight_string(dt.c0) from (select 1 from dual union select null from dual union select 1.0 from dual union select '1' from dual union select 2 from dual union select 2.0 from `user`) as dt(c0)", "Table": "`user`, dual" } ] @@ -600,8 +603,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 'b', 'c', weight_string('b'), weight_string('c') from `user` where 1 != 1", - "Query": "select distinct 'b', 'c', weight_string('b'), weight_string('c') from `user`", + "FieldQuery": "select dt.c0 as b, dt.c1 as c, weight_string(dt.c0), weight_string(dt.c1) from (select 'b', 'c' from `user` where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as b, dt.c1 as c, weight_string(dt.c0), weight_string(dt.c1) from (select distinct 'b', 'c' from `user`) as dt(c0, c1)", "Table": "`user`" } ] @@ -623,9 +626,10 @@ "Instructions": { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci", - "1: utf8mb4_0900_ai_ci" + "(0:2)", + "(1:3)" ], + "ResultColumns": 2, "Inputs": [ { "OperatorType": "Concatenate", @@ -637,14 +641,14 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select 'b', 'c' from `user` where 1 != 1", - "Query": "select distinct 'b', 'c' from `user`", + "FieldQuery": "select dt.c0 as b, dt.c1 as c, weight_string(dt.c0), weight_string(dt.c1) from (select 'b', 'c' from `user` where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as b, dt.c1 as c, weight_string(dt.c0), weight_string(dt.c1) from (select distinct 'b', 'c' from `user`) as dt(c0, c1)", "Table": "`user`" }, { "OperatorType": "Join", "Variant": "Join", - "JoinColumnIndexes": "L:0,L:1", + "JoinColumnIndexes": "L:0,L:1,L:2,L:3", "TableName": "`user`_user_extra", "Inputs": [ { @@ -654,8 +658,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select `user`.id, `user`.`name` from `user` where 1 != 1", - "Query": "select distinct `user`.id, `user`.`name` from `user`", + "FieldQuery": "select `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user` where 1 != 1", + "Query": "select distinct `user`.id, `user`.`name`, weight_string(`user`.id), weight_string(`user`.`name`) from `user`", "Table": "`user`" }, { @@ -762,8 +766,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id + 1 from `user` where 1 != 1 union select user_id from user_extra where 1 != 1) as dt where 1 != 1", - "Query": "select id, weight_string(id) from (select id from `user` union select id + 1 from `user` union select user_id from user_extra) as dt", + "FieldQuery": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` where 1 != 1 union select id + 1 from `user` where 1 != 1 union select user_id from user_extra where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` union select id + 1 from `user` union select user_id from user_extra) as dt(c0)", "Table": "`user`, user_extra" } ] @@ -797,8 +801,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select id from music where 1 != 1) as dt where 1 != 1", - "Query": "select id, weight_string(id) from (select id from `user` union select id from music) as dt", + "FieldQuery": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` where 1 != 1 union select id from music where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` union select id from music) as dt(c0)", "Table": "`user`, music" }, { @@ -808,8 +812,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select 1, weight_string(1) from unsharded where 1 != 1", - "Query": "select distinct 1, weight_string(1) from unsharded", + "FieldQuery": "select dt.c0 as `1`, weight_string(dt.c0) from (select 1 from unsharded where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as `1`, weight_string(dt.c0) from (select distinct 1 from unsharded) as dt(c0)", "Table": "unsharded" } ] @@ -847,8 +851,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select 3 from dual where 1 != 1) as dt where 1 != 1", - "Query": "select id, weight_string(id) from (select id from `user` union select 3 from dual limit :__upper_limit) as dt", + "FieldQuery": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` where 1 != 1 union select 3 from dual where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` union select 3 from dual limit :__upper_limit) as dt(c0)", "Table": "`user`, dual" } ] @@ -883,6 +887,49 @@ ] } }, + { + "comment": "test handling of TEXT column type handling", + "query": "select table_comment from information_schema.tables union select table_comment from information_schema.tables", + "plan": { + "QueryType": "SELECT", + "Original": "select table_comment from information_schema.tables union select table_comment from information_schema.tables", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "0" + ], + "Inputs": [ + { + "OperatorType": "Concatenate", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1", + "Query": "select distinct table_comment from information_schema.`tables`", + "Table": "information_schema.`tables`" + }, + { + "OperatorType": "Route", + "Variant": "DBA", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "FieldQuery": "select table_comment from information_schema.`tables` where 1 != 1", + "Query": "select distinct table_comment from information_schema.`tables`", + "Table": "information_schema.`tables`" + } + ] + } + ] + } + } + }, { "comment": "UNION that needs to be reordered to be merged more aggressively. Gen4 is able to get it down to 2 routes", "query": "select col from unsharded union select id from user union select col2 from unsharded union select col from user_extra", @@ -906,8 +953,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select col, weight_string(col) from (select col from unsharded where 1 != 1 union select col2 from unsharded where 1 != 1) as dt where 1 != 1", - "Query": "select col, weight_string(col) from (select col from unsharded union select col2 from unsharded) as dt", + "FieldQuery": "select dt.c0 as col, weight_string(dt.c0) from (select col from unsharded where 1 != 1 union select col2 from unsharded where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as col, weight_string(dt.c0) from (select col from unsharded union select col2 from unsharded) as dt(c0)", "Table": "unsharded" }, { @@ -917,8 +964,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select col from user_extra where 1 != 1) as dt where 1 != 1", - "Query": "select id, weight_string(id) from (select id from `user` union select col from user_extra) as dt", + "FieldQuery": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` where 1 != 1 union select col from user_extra where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` union select col from user_extra) as dt(c0)", "Table": "`user`, user_extra" } ] @@ -963,7 +1010,7 @@ }, "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) ASC", - "Query": "select id, weight_string(id) from `user` order by id asc limit :__upper_limit", + "Query": "select id, weight_string(id) from `user` order by `user`.id asc limit 5", "Table": "`user`" } ] @@ -981,7 +1028,7 @@ }, "FieldQuery": "select id, weight_string(id) from `user` where 1 != 1", "OrderBy": "(0|1) DESC", - "Query": "select id, weight_string(id) from `user` order by id desc limit :__upper_limit", + "Query": "select id, weight_string(id) from `user` order by `user`.id desc limit 5", "Table": "`user`" } ] @@ -1056,8 +1103,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id, weight_string(id) from (select id from `user` where 1 != 1 union select 3 from dual where 1 != 1) as dt where 1 != 1", - "Query": "select id, weight_string(id) from (select id from `user` union select 3 from dual) as dt", + "FieldQuery": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` where 1 != 1 union select 3 from dual where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as id, weight_string(dt.c0) from (select id from `user` union select 3 from dual) as dt(c0)", "Table": "`user`, dual" } ] @@ -1070,6 +1117,47 @@ ] } }, + { + "comment": "Conflicting column names in union", + "query": "select id, id from user union select id, bar from user_extra order by 1", + "plan": { + "QueryType": "SELECT", + "Original": "select id, id from user union select id, bar from user_extra order by 1", + "Instructions": { + "OperatorType": "Sort", + "Variant": "Memory", + "OrderBy": "(0|2) ASC", + "ResultColumns": 2, + "Inputs": [ + { + "OperatorType": "Distinct", + "Collations": [ + "(0:2)", + "(1:3)", + "2" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select dt.c0 as id, dt.c1 as id, weight_string(dt.c0), weight_string(dt.c1) from (select id, id from `user` where 1 != 1 union select id, bar from user_extra where 1 != 1) as dt(c0, c1) where 1 != 1", + "Query": "select dt.c0 as id, dt.c1 as id, weight_string(dt.c0), weight_string(dt.c1) from (select id, id from `user` union select id, bar from user_extra) as dt(c0, c1)", + "Table": "`user`, user_extra" + } + ] + } + ] + }, + "TablesUsed": [ + "user.user", + "user.user_extra" + ] + } + }, { "comment": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", "query": "select 1 from (select id+42 as foo from user union select 1+id as foo from unsharded) as t", @@ -1098,8 +1186,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select id + 42 as foo, weight_string(id + 42) from `user` where 1 != 1", - "Query": "select distinct id + 42 as foo, weight_string(id + 42) from `user`", + "FieldQuery": "select dt.c0 as foo, weight_string(dt.c0) from (select id + 42 as foo from `user` where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as foo, weight_string(dt.c0) from (select distinct id + 42 as foo from `user`) as dt(c0)", "Table": "`user`" }, { @@ -1109,8 +1197,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select 1 + id as foo, weight_string(1 + id) from unsharded where 1 != 1", - "Query": "select distinct 1 + id as foo, weight_string(1 + id) from unsharded", + "FieldQuery": "select dt.c0 as foo, weight_string(dt.c0) from (select 1 + id as foo from unsharded where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as foo, weight_string(dt.c0) from (select distinct 1 + id as foo from unsharded) as dt(c0)", "Table": "unsharded" } ] @@ -1138,7 +1226,7 @@ { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci" + "0: utf8mb3_general_ci" ], "Inputs": [ { @@ -1218,18 +1306,18 @@ { "OperatorType": "Distinct", "Collations": [ - "0: utf8mb4_0900_ai_ci", - "1: utf8mb4_0900_ai_ci", - "2: utf8mb4_0900_ai_ci", - "3: utf8mb4_0900_ai_ci", - "4: utf8mb4_0900_ai_ci", - "5: utf8mb4_0900_ai_ci", - "6: utf8mb4_0900_ai_ci", + "0: utf8mb3_general_ci", + "1: utf8mb3_general_ci", + "2: utf8mb3_general_ci", + "3: utf8mb3_general_ci", + "4: utf8mb3_general_ci", + "5: utf8mb3_general_ci", + "6: utf8mb3_general_ci", "7", "8", - "9: utf8mb4_0900_ai_ci", - "10: utf8mb4_0900_ai_ci", - "11: utf8mb4_0900_ai_ci" + "9: utf8mb3_general_ci", + "10: utf8mb3_general_ci", + "11: utf8mb3_general_ci" ], "Inputs": [ { @@ -1316,8 +1404,8 @@ "Name": "main", "Sharded": false }, - "FieldQuery": "select id, foo, bar, weight_string(id), weight_string(foo), weight_string(bar) from unsharded where 1 != 1", - "Query": "select distinct id, foo, bar, weight_string(id), weight_string(foo), weight_string(bar) from unsharded", + "FieldQuery": "select dt.c0 as id, dt.c1 as foo, dt.c2 as bar, weight_string(dt.c0), weight_string(dt.c1), weight_string(dt.c2) from (select id, foo, bar from unsharded where 1 != 1) as dt(c0, c1, c2) where 1 != 1", + "Query": "select dt.c0 as id, dt.c1 as foo, dt.c2 as bar, weight_string(dt.c0), weight_string(dt.c1), weight_string(dt.c2) from (select distinct id, foo, bar from unsharded) as dt(c0, c1, c2)", "Table": "unsharded" }, { @@ -1429,8 +1517,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select bar, baz, toto, weight_string(bar), weight_string(baz), weight_string(toto) from (select bar, baz, toto from music where 1 != 1 union select foo, foo, foo from `user` where 1 != 1) as dt where 1 != 1", - "Query": "select bar, baz, toto, weight_string(bar), weight_string(baz), weight_string(toto) from (select bar, baz, toto from music union select foo, foo, foo from `user`) as dt", + "FieldQuery": "select dt.c0 as bar, dt.c1 as baz, dt.c2 as toto, weight_string(dt.c0), weight_string(dt.c1), weight_string(dt.c2) from (select bar, baz, toto from music where 1 != 1 union select foo, foo, foo from `user` where 1 != 1) as dt(c0, c1, c2) where 1 != 1", + "Query": "select dt.c0 as bar, dt.c1 as baz, dt.c2 as toto, weight_string(dt.c0), weight_string(dt.c1), weight_string(dt.c2) from (select bar, baz, toto from music union select foo, foo, foo from `user`) as dt(c0, c1, c2)", "Table": "`user`, music" } ] @@ -1451,8 +1539,8 @@ "OperatorType": "Distinct", "Collations": [ "(0:3)", - "(1:3)", - "(2:3)" + "(1:4)", + "(2:5)" ], "ResultColumns": 3, "Inputs": [ @@ -1463,8 +1551,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select foo, foo, foo, weight_string(foo) from (select foo, foo, foo from `user` where 1 != 1 union select bar, baz, toto from music where 1 != 1) as dt where 1 != 1", - "Query": "select foo, foo, foo, weight_string(foo) from (select foo, foo, foo from `user` union select bar, baz, toto from music) as dt", + "FieldQuery": "select dt.c0 as foo, dt.c1 as foo, dt.c2 as foo, weight_string(dt.c0), weight_string(dt.c1), weight_string(dt.c2) from (select foo, foo, foo from `user` where 1 != 1 union select bar, baz, toto from music where 1 != 1) as dt(c0, c1, c2) where 1 != 1", + "Query": "select dt.c0 as foo, dt.c1 as foo, dt.c2 as foo, weight_string(dt.c0), weight_string(dt.c1), weight_string(dt.c2) from (select foo, foo, foo from `user` union select bar, baz, toto from music) as dt(c0, c1, c2)", "Table": "`user`, music" } ] @@ -1503,8 +1591,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select foo, weight_string(foo) from (select foo from `user` where 1 != 1 union select foo from `user` where 1 != 1) as dt where 1 != 1", - "Query": "select foo, weight_string(foo) from (select foo from `user` where bar = 12 union select foo from `user` where bar = 134) as dt", + "FieldQuery": "select dt.c0 as foo, weight_string(dt.c0) from (select foo from `user` where 1 != 1 union select foo from `user` where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as foo, weight_string(dt.c0) from (select foo from `user` where bar = 12 union select foo from `user` where bar = 134) as dt(c0)", "Table": "`user`" } ] @@ -1522,8 +1610,8 @@ "Name": "user", "Sharded": true }, - "FieldQuery": "select bar, weight_string(bar) from (select bar from music where 1 != 1 union select bar from music where 1 != 1) as dt where 1 != 1", - "Query": "select bar, weight_string(bar) from (select bar from music where foo = 12 and bar = :t1_foo union select bar from music where foo = 1234 and bar = :t1_foo) as dt", + "FieldQuery": "select dt.c0 as bar, weight_string(dt.c0) from (select bar from music where 1 != 1 union select bar from music where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as bar, weight_string(dt.c0) from (select bar from music where foo = 12 and bar = :t1_foo union select bar from music where foo = 1234 and bar = :t1_foo) as dt(c0)", "Table": "music" } ] @@ -1535,5 +1623,190 @@ "user.user" ] } + }, + { + "comment": "Select literals from table union Select literals from table", + "query": "SELECT 1 from user UNION SELECT 2 from user", + "plan": { + "QueryType": "SELECT", + "Original": "SELECT 1 from user UNION SELECT 2 from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "0" + ], + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select 1 from `user` where 1 != 1 union select 2 from `user` where 1 != 1", + "Query": "select 1 from `user` union select 2 from `user`", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Select column from table union Select literals from table", + "query": "select col1 from user union select 3 from user", + "plan": { + "QueryType": "SELECT", + "Original": "select col1 from user union select 3 from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select dt.c0 as col1, weight_string(dt.c0) from (select col1 from `user` where 1 != 1 union select 3 from `user` where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as col1, weight_string(dt.c0) from (select col1 from `user` union select 3 from `user`) as dt(c0)", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Select literals from table union Select column from table", + "query": "select 3 from user union select col1 from user", + "plan": { + "QueryType": "SELECT", + "Original": "select 3 from user union select col1 from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select dt.c0 as `3`, weight_string(dt.c0) from (select 3 from `user` where 1 != 1 union select col1 from `user` where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as `3`, weight_string(dt.c0) from (select 3 from `user` union select col1 from `user`) as dt(c0)", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Select literals from table union Select now() from table", + "query": "select 3 from user union select now() from user", + "plan": { + "QueryType": "SELECT", + "Original": "select 3 from user union select now() from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select dt.c0 as `3`, weight_string(dt.c0) from (select 3 from `user` where 1 != 1 union select now() from `user` where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as `3`, weight_string(dt.c0) from (select 3 from `user` union select now() from `user`) as dt(c0)", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Select now() from table union Select literals from table", + "query": "select now() from user union select 3 from user", + "plan": { + "QueryType": "SELECT", + "Original": "select now() from user union select 3 from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select dt.c0 as `now()`, weight_string(dt.c0) from (select now() from `user` where 1 != 1 union select 3 from `user` where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as `now()`, weight_string(dt.c0) from (select now() from `user` union select 3 from `user`) as dt(c0)", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "Select now() from table union Select column from table", + "query": "select now() from user union select id from user", + "plan": { + "QueryType": "SELECT", + "Original": "select now() from user union select id from user", + "Instructions": { + "OperatorType": "Distinct", + "Collations": [ + "(0:1)" + ], + "ResultColumns": 1, + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select dt.c0 as `now()`, weight_string(dt.c0) from (select now() from `user` where 1 != 1 union select id from `user` where 1 != 1) as dt(c0) where 1 != 1", + "Query": "select dt.c0 as `now()`, weight_string(dt.c0) from (select now() from `user` union select id from `user`) as dt(c0)", + "Table": "`user`" + } + ] + }, + "TablesUsed": [ + "user.user" + ] + } } ] diff --git a/go/vt/vtgate/planbuilder/testdata/unknown_schema_cases.json b/go/vt/vtgate/planbuilder/testdata/unknown_schema_cases.json index 7bbc4b5b509..df4459d9e0f 100644 --- a/go/vt/vtgate/planbuilder/testdata/unknown_schema_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/unknown_schema_cases.json @@ -52,7 +52,7 @@ { "comment": "Group by column number, used with non-aliased expression (duplicated code)", "query": "select * from user group by 1", - "plan": "cannot use column offsets in group statement when using `*`" + "plan": "cannot use column offsets in group clause when using `*`" }, { "comment": "create view with Cannot auto-resolve for cross-shard joins", @@ -68,5 +68,15 @@ "comment": "unsharded insert, no col list with auto-inc", "query": "insert into unsharded_auto values(1,1)", "plan": "VT09004: INSERT should contain column list or the table should have authoritative columns in vschema" + }, + { + "comment": "We need schema tracking to allow unexpanded columns inside UNION", + "query": "select x from (select t.*, 0 as x from user t union select t.*, 1 as x from user_extra t) AS t", + "plan": "VT09015: schema tracking required" + }, + { + "comment": "multi table delete with 1 sharded and 1 reference table", + "query": "delete u, r from user u join ref_with_source r on u.col = r.col", + "plan": "VT09015: schema tracking required" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json index 923e7804782..251af436d27 100644 --- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json @@ -14,51 +14,11 @@ "query": "select * from user natural right join user_extra", "plan": "VT12001: unsupported: natural right join" }, - { - "comment": "Multi-value aggregates not supported", - "query": "select count(a,b) from user", - "plan": "VT03001: aggregate functions take a single argument 'count(a, b)'" - }, { "comment": "subqueries not supported in group by", "query": "select id from user group by id, (select id from user_extra)", "plan": "VT12001: unsupported: subqueries in GROUP BY" }, - { - "comment": "subqueries in delete", - "query": "delete from user where col = (select id from unsharded)", - "plan": "VT12001: unsupported: subqueries in DML" - }, - { - "comment": "sharded subqueries in unsharded delete", - "query": "delete from unsharded where col = (select id from user)", - "plan": "VT12001: unsupported: subqueries in DML" - }, - { - "comment": "sharded delete with limit clasue", - "query": "delete from user_extra limit 10", - "plan": "VT12001: unsupported: multi shard DELETE with LIMIT" - }, - { - "comment": "sharded subquery in unsharded subquery in unsharded delete", - "query": "delete from unsharded where col = (select id from unsharded where id = (select id from user))", - "plan": "VT12001: unsupported: subqueries in DML" - }, - { - "comment": "sharded join unsharded subqueries in unsharded delete", - "query": "delete from unsharded where col = (select id from unsharded join user on unsharded.id = user.id)", - "plan": "VT12001: unsupported: subqueries in DML" - }, - { - "comment": "scatter update with limit clause", - "query": "update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1", - "plan": "VT12001: unsupported: multi shard UPDATE with LIMIT" - }, - { - "comment": "multi delete multi table", - "query": "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'", - "plan": "VT12001: unsupported: multi-shard or vindex write statement" - }, { "comment": "update changes primary vindex column", "query": "update user set id = 1 where id = 1", @@ -82,27 +42,17 @@ { "comment": "update by primary keyspace id, changing one vindex column, limit without order clause", "query": "update user_metadata set email = 'juan@vitess.io' where user_id = 1 limit 10", - "plan": "VT12001: unsupported: you need to provide the ORDER BY clause when using LIMIT; invalid update on vindex: email_user_map" - }, - { - "comment": "update with derived table", - "query": "update (select id from user) as u set id = 4", - "plan": "The target table u of the UPDATE is not updatable" + "plan": "VT12001: unsupported: Vindex update should have ORDER BY clause when using LIMIT" }, { - "comment": "join in update tables", - "query": "update user join user_extra on user.id = user_extra.id set user.name = 'foo'", - "plan": "VT12001: unsupported: unaliased multiple tables in update" - }, - { - "comment": "multiple tables in update", - "query": "update user as u, user_extra as ue set u.name = 'foo' where u.id = ue.id", - "plan": "VT12001: unsupported: multiple (2) tables in update" + "comment": "multi table update with dependent column getting updated", + "query": "update user u, user_extra ue set u.name = 'test' + ue.col, ue.col = 5 where u.id = ue.id and u.id = 1;", + "plan": "VT12001: unsupported: 'ue.col' column referenced in update expression ''test' + ue.col' is itself updated" }, { "comment": "unsharded insert, col list does not match values", "query": "insert into unsharded_auto(id, val) values(1)", - "plan": "VT03006: column count does not match value count at row 1" + "plan": "VT03006: column count does not match value count with the row" }, { "comment": "sharded upsert can't change vindex", @@ -159,11 +109,6 @@ "query": "replace into user(id) values (1), (2)", "plan": "VT12001: unsupported: REPLACE INTO with sharded keyspace" }, - { - "comment": "delete with multi-table targets", - "query": "delete music,user from music inner join user where music.id = user.id", - "plan": "VT12001: unsupported: multi-shard or vindex write statement" - }, { "comment": "select get_lock with non-dual table", "query": "select get_lock('xyz', 10) from user", @@ -224,11 +169,6 @@ "query": "create view main.view_a as select * from user.user_extra", "plan": "VT12001: unsupported: Select query does not belong to the same keyspace as the view statement" }, - { - "comment": "avg function on scatter query", - "query": "select avg(id) from user", - "plan": "VT12001: unsupported: in scatter query: aggregation function 'avg(id)'" - }, { "comment": "outer and inner subquery route reference the same \"uu.id\" name\n# but they refer to different things. The first reference is to the outermost query,\n# and the second reference is to the innermost 'from' subquery.\n# This query will never work as the inner derived table is only selecting one of the column", "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select id from user_extra where user_id = 5) uu where uu.user_id = uu.id))", @@ -239,11 +179,6 @@ "query": "select id2 from user uu where id in (select id from user where id = uu.id and user.col in (select col from (select col, id, user_id from user_extra where user_id = 5) uu where uu.user_id = uu.id))", "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" }, - { - "comment": "rewrite of 'order by 2' that becomes 'order by id', leading to ambiguous binding.", - "query": "select a.id, b.id from user as a, user_extra as b union select 1, 2 order by 2", - "plan": "Column 'id' in field list is ambiguous" - }, { "comment": "unsupported with clause in delete statement", "query": "with x as (select * from user) delete from x", @@ -299,6 +234,11 @@ "query": "delete from user where x = (@val := 42)", "plan": "VT12001: unsupported: Assignment expression" }, + { + "comment": "explain - routed table with join on different keyspace table", + "query": "explain select 1, second_user.foo.id, foo.col from second_user.foo join user.user join main.unsharded", + "plan": "VT03031: EXPLAIN is only supported for single keyspace" + }, { "comment": "extremum on input from both sides", "query": "insert into music(user_id, id) select foo, bar from music on duplicate key update id = id+1", @@ -335,14 +275,9 @@ "plan": "VT12001: unsupported: unmergable subquery can not be inside complex expression" }, { - "comment": "cant switch sides for outer joins", - "query": "select id from user left join (select user_id from user_extra limit 10) ue on user.id = ue.user_id", - "plan": "VT12001: unsupported: LEFT JOIN with derived tables" - }, - { - "comment": "limit on both sides means that we can't evaluate this at all", + "comment": "this query needs better type information to be able to use the hash join", "query": "select id from (select id from user limit 10) u join (select user_id from user_extra limit 10) ue on u.id = ue.user_id", - "plan": "VT12001: unsupported: JOIN between derived tables" + "plan": "VT12001: unsupported: missing type information for [u.id, ue.user_id]" }, { "comment": "multi-shard union", @@ -388,5 +323,40 @@ "comment": "Alias cannot clash with base tables", "query": "WITH user AS (SELECT col FROM user) SELECT * FROM user", "plan": "VT12001: unsupported: do not support CTE that use the CTE alias inside the CTE query" + }, + { + "comment": "correlated subqueries in select expressions are unsupported", + "query": "SELECT (SELECT sum(user.name) FROM music LIMIT 1) FROM user", + "plan": "VT12001: unsupported: correlated subquery is only supported for EXISTS" + }, + { + "comment": "reference table delete with join", + "query": "delete r from user u join ref_with_source r on u.col = r.col", + "plan": "VT12001: unsupported: DELETE on reference table with join" + }, + { + "comment": "group_concat unsupported when needs full evaluation at vtgate with more than 1 column", + "query": "select group_concat(user.col1, music.col2) x from user join music on user.col = music.col order by x", + "plan": "VT12001: unsupported: group_concat with more than 1 column" + }, + { + "comment": "count aggregation function having multiple column", + "query": "select count(distinct user_id, name) from user", + "plan": "VT12001: unsupported: distinct aggregation function with multiple expressions 'count(distinct user_id, `name`)'" + }, + { + "comment": "count and sum distinct on different columns", + "query": "SELECT COUNT(DISTINCT col), SUM(DISTINCT id) FROM user", + "plan": "VT12001: unsupported: only one DISTINCT aggregation is allowed in a SELECT: sum(distinct id)" + }, + { + "comment": "Over clause isn't supported in sharded cases", + "query": "SELECT val, CUME_DIST() OVER w, ROW_NUMBER() OVER w, DENSE_RANK() OVER w, PERCENT_RANK() OVER w, RANK() OVER w AS 'cd' FROM user", + "plan": "VT12001: unsupported: OVER CLAUSE with sharded keyspace" + }, + { + "comment": "WITH ROLLUP not supported on sharded queries", + "query": "select a, b, c, sum(d) from user group by a, b, c with rollup", + "plan": "VT12001: unsupported: GROUP BY WITH ROLLUP not supported for sharded queries" } ] diff --git a/go/vt/vtgate/planbuilder/testdata/view_cases.json b/go/vt/vtgate/planbuilder/testdata/view_cases.json index decc6a117cf..5d5dcd81a33 100644 --- a/go/vt/vtgate/planbuilder/testdata/view_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/view_cases.json @@ -11,7 +11,7 @@ "Name": "user", "Sharded": true }, - "Query": "alter view user_extra as select * from `user`.`user`" + "Query": "alter view user_extra as select * from `user`" }, "TablesUsed": [ "user.user_extra" @@ -35,7 +35,7 @@ "Name": "user", "Sharded": true }, - "Query": "create view view_ac as select user_id, col1, col2 from authoritative" + "Query": "create view view_ac as select * from authoritative" }, "TablesUsed": [ "user.view_ac" diff --git a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json index 4c6256d93cc..de5356346b2 100644 --- a/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/vindex_func_cases.json @@ -73,11 +73,12 @@ "Original": "select id, keyspace_id, id from user_index where id = :id", "Instructions": { "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1, - 0 + "ColumnNames": [ + "0:id", + "1:keyspace_id", + "2:id" ], + "Columns": "0,1,0", "Inputs": [ { "OperatorType": "VindexFunc", @@ -113,11 +114,12 @@ "Original": "select id, keyspace_id, id from second_user.hash_dup where id = :id", "Instructions": { "OperatorType": "SimpleProjection", - "Columns": [ - 0, - 1, - 0 + "ColumnNames": [ + "0:id", + "1:keyspace_id", + "2:id" ], + "Columns": "0,1,0", "Inputs": [ { "OperatorType": "VindexFunc", diff --git a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json index a7824126c98..a8fe91e5d49 100644 --- a/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json +++ b/go/vt/vtgate/planbuilder/testdata/vschemas/schema.json @@ -25,6 +25,12 @@ "user.user" ] }, + { + "from_table": "second_user.bar", + "to_tables": [ + "user.music" + ] + }, { "from_table": "primary_redirect@primary", "to_tables": [ @@ -166,6 +172,16 @@ "to": "keyspace_id", "cost": "300" } + }, + "lkp_bf_vdx": { + "type": "name_lkp_test", + "owner": "customer", + "params": { + "table": "lkp_shard_vdx", + "from": " ", + "to": "keyspace_id", + "write_only": "true" + } } }, "tables": { @@ -209,7 +225,8 @@ }, { "name": "textcol2", - "type": "VARCHAR" + "type": "VARCHAR", + "collation_name": "big5_bin" } ] }, @@ -476,6 +493,10 @@ { "column": "phone", "name": "unq_lkp_bf_vdx" + }, + { + "column": "name", + "name": "lkp_bf_vdx" } ] }, @@ -493,8 +514,7 @@ "sharded": true, "vindexes": { "hash_dup": { - "type": "hash_test", - "owner": "user" + "type": "hash_test" } }, "tables": { @@ -723,6 +743,20 @@ } ] }, + "tbl_auth": { + "columns": [ + { + "name": "id" + } + ], + "column_vindexes": [ + { + "column": "id", + "name": "hash_vin" + } + ], + "column_list_authoritative": true + }, "tblrefDef": { "column_vindexes": [ { @@ -752,20 +786,162 @@ "unsharded_fk_allow": { "foreignKeyMode": "managed", "tables": { - "u_tbl1": {}, - "u_tbl2": {}, - "u_tbl3": {}, - "u_tbl4": {}, - "u_tbl5": {}, - "u_tbl6": {}, - "u_tbl7": {}, - "u_tbl8": {}, - "u_tbl9": {}, + "u_tbl1": { + "columns": [ + { + "name": "col1", + "type": "VARCHAR" + }, + { + "name": "col14", + "type": "INT16" + } + ] + }, + "u_tbl2": { + "columns": [ + { + "name": "col2", + "type": "VARCHAR" + } + ] + }, + "u_tbl3": { + "columns": [ + { + "name": "col3", + "type": "VARCHAR" + } + ] + }, + "u_tbl4": { + "columns": [ + { + "name": "col41", + "type": "INT16" + }, + { + "name": "col4", + "type": "VARCHAR" + } + ] + }, + "u_tbl5": { + "columns": [ + { + "name": "col5", + "type": "VARCHAR" + } + ] + }, + "u_tbl6": { + "columns": [ + { + "name": "col6", + "type": "VARCHAR" + } + ] + }, + "u_tbl7": { + "columns": [ + { + "name": "col7", + "type": "VARCHAR" + } + ] + }, + "u_tbl8": { + "columns": [ + { + "name": "col8", + "type": "VARCHAR" + } + ] + }, + "u_tbl9": { + "columns": [ + { + "name": "col9", + "type": "VARCHAR" + }, + {"name": "foo"}, + {"name": "bar", "default": "1"} + ] + }, + "u_tbl10": { + "columns": [ + { + "name": "col10", + "type": "VARCHAR" + }, + {"name": "col"}, + {"name": "id"} + ], + "column_list_authoritative": true + }, + "u_tbl11": { + "columns": [ + { + "name": "col11", + "type": "VARCHAR" + }, + {"name": "col"}, + {"name": "id"} + ], + "column_list_authoritative": true + }, "u_tbl": {}, "u_multicol_tbl1": {}, "u_multicol_tbl2": {}, "u_multicol_tbl3": {} } + }, + "ordering": { + "sharded": true, + "vindexes": { + "xxhash": { + "type": "xxhash" + }, + "oid_vdx": { + "type": "consistent_lookup_unique", + "params": { + "table": "oid_idx", + "from": "oid", + "to": "keyspace_id" + }, + "owner": "order" + } + }, + "tables": { + "order": { + "column_vindexes": [ + { + "column": "region_id", + "name": "xxhash" + }, + { + "column": "oid", + "name": "oid_vdx" + } + ] + }, + "oid_idx": { + "column_vindexes": [ + { + "column": "oid", + "name": "xxhash" + } + ] + }, + "order_event": { + "column_vindexes": [ + { + "column": "oid", + "name": "oid_vdx" + } + ] + } + } } } } diff --git a/go/vt/vtgate/planbuilder/testdata/wireup_cases.json b/go/vt/vtgate/planbuilder/testdata/wireup_cases.json index 8231b087d6c..3aca1f1dc66 100644 --- a/go/vt/vtgate/planbuilder/testdata/wireup_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/wireup_cases.json @@ -580,15 +580,21 @@ "Table": "`user`" }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select e.id from user_extra as e where 1 != 1", - "Query": "select e.id from user_extra as e where e.id = :u_col", - "Table": "user_extra" + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select e.id from user_extra as e where 1 != 1", + "Query": "select e.id from user_extra as e where e.id = :u_col limit 10", + "Table": "user_extra" + } + ] } ] } @@ -641,15 +647,21 @@ "Table": "`user`" }, { - "OperatorType": "Route", - "Variant": "Scatter", - "Keyspace": { - "Name": "user", - "Sharded": true - }, - "FieldQuery": "select :u_id + e.id as `u.id + e.id` from user_extra as e where 1 != 1", - "Query": "select :u_id + e.id as `u.id + e.id` from user_extra as e where e.id = :u_col", - "Table": "user_extra" + "OperatorType": "Limit", + "Count": "10", + "Inputs": [ + { + "OperatorType": "Route", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "FieldQuery": "select :u_id + e.id as `u.id + e.id` from user_extra as e where 1 != 1", + "Query": "select :u_id + e.id as `u.id + e.id` from user_extra as e where e.id = :u_col limit 10", + "Table": "user_extra" + } + ] } ] } diff --git a/go/vt/vtgate/planbuilder/uncorrelated_subquery.go b/go/vt/vtgate/planbuilder/uncorrelated_subquery.go deleted file mode 100644 index edb46e5b4fe..00000000000 --- a/go/vt/vtgate/planbuilder/uncorrelated_subquery.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/vtgate/engine" - popcode "vitess.io/vitess/go/vt/vtgate/engine/opcode" -) - -var _ logicalPlan = (*uncorrelatedSubquery)(nil) - -// uncorrelatedSubquery is the logicalPlan for engine.UncorrelatedSubquery. -// This gets built if a subquery is not correlated and can -// therefore can be pulled out and executed upfront. -type uncorrelatedSubquery struct { - subquery logicalPlan - outer logicalPlan - eSubquery *engine.UncorrelatedSubquery -} - -// newUncorrelatedSubquery builds a new uncorrelatedSubquery. -func newUncorrelatedSubquery(opcode popcode.PulloutOpcode, sqName, hasValues string, subquery, outer logicalPlan) *uncorrelatedSubquery { - return &uncorrelatedSubquery{ - subquery: subquery, - outer: outer, - eSubquery: &engine.UncorrelatedSubquery{ - Opcode: opcode, - SubqueryResult: sqName, - HasValues: hasValues, - }, - } -} - -// Primitive implements the logicalPlan interface -func (ps *uncorrelatedSubquery) Primitive() engine.Primitive { - ps.eSubquery.Subquery = ps.subquery.Primitive() - ps.eSubquery.Outer = ps.outer.Primitive() - return ps.eSubquery -} diff --git a/go/vt/vtgate/planbuilder/update.go b/go/vt/vtgate/planbuilder/update.go index eced4251ab3..d653df867fb 100644 --- a/go/vt/vtgate/planbuilder/update.go +++ b/go/vt/vtgate/planbuilder/update.go @@ -41,11 +41,17 @@ func gen4UpdateStmtPlanner( return nil, err } - err = rewriteRoutedTables(updStmt, vschema) + err = queryRewrite(ctx, updStmt) if err != nil { return nil, err } + // If there are non-literal foreign key updates, we have to run the query with foreign key checks off. + if ctx.SemTable.HasNonLiteralForeignKeyUpdate(updStmt.Exprs) { + // Since we are running the query with foreign key checks off, we have to verify all the foreign keys validity on vtgate. + ctx.VerifyAllFKs = true + } + // Remove all the foreign keys that don't require any handling. err = ctx.SemTable.RemoveNonRequiredForeignKeys(ctx.VerifyAllFKs, vindexes.UpdateAction) if err != nil { @@ -55,7 +61,7 @@ func gen4UpdateStmtPlanner( if !ctx.SemTable.ForeignKeysPresent() { plan := updateUnshardedShortcut(updStmt, ks, tables) setCommentDirectivesOnPlan(plan, updStmt) - return newPlanResult(plan.Primitive(), operators.QualifiedTables(ks, tables)...), nil + return newPlanResult(plan, operators.QualifiedTables(ks, tables)...), nil } } @@ -63,25 +69,20 @@ func gen4UpdateStmtPlanner( return nil, ctx.SemTable.NotUnshardedErr } - err = queryRewrite(ctx.SemTable, reservedVars, updStmt) - if err != nil { - return nil, err - } - op, err := operators.PlanQuery(ctx, updStmt) if err != nil { return nil, err } - plan, err := transformToLogicalPlan(ctx, op) + plan, err := transformToPrimitive(ctx, op) if err != nil { return nil, err } - return newPlanResult(plan.Primitive(), operators.TablesUsed(op)...), nil + return newPlanResult(plan, operators.TablesUsed(op)...), nil } -func updateUnshardedShortcut(stmt *sqlparser.Update, ks *vindexes.Keyspace, tables []*vindexes.Table) logicalPlan { +func updateUnshardedShortcut(stmt *sqlparser.Update, ks *vindexes.Keyspace, tables []*vindexes.Table) engine.Primitive { edml := engine.NewDML() edml.Keyspace = ks edml.Opcode = engine.Unsharded @@ -89,5 +90,5 @@ func updateUnshardedShortcut(stmt *sqlparser.Update, ks *vindexes.Keyspace, tabl for _, tbl := range tables { edml.TableNames = append(edml.TableNames, tbl.Name.String()) } - return &primitiveWrapper{prim: &engine.Update{DML: edml}} + return &engine.Update{DML: edml} } diff --git a/go/vt/vtgate/planbuilder/vexplain.go b/go/vt/vtgate/planbuilder/vexplain.go index 5c99ab87a95..ef75dc15a21 100644 --- a/go/vt/vtgate/planbuilder/vexplain.go +++ b/go/vt/vtgate/planbuilder/vexplain.go @@ -19,9 +19,8 @@ package planbuilder import ( "context" "encoding/json" - "fmt" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" @@ -30,28 +29,10 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" ) -// Builds an explain-plan for the given Primitive -func buildExplainPlan(ctx context.Context, stmt sqlparser.Explain, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { - switch explain := stmt.(type) { - case *sqlparser.ExplainTab: - return explainTabPlan(explain, vschema) - case *sqlparser.ExplainStmt: - switch explain.Type { - case sqlparser.VitessType: - vschema.PlannerWarning("EXPLAIN FORMAT = VITESS is deprecated, please use VEXPLAIN PLAN instead.") - return buildVExplainVtgatePlan(ctx, explain.Statement, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) - case sqlparser.VTExplainType: - vschema.PlannerWarning("EXPLAIN FORMAT = VTEXPLAIN is deprecated, please use VEXPLAIN QUERIES instead.") - return buildVExplainLoggingPlan(ctx, &sqlparser.VExplainStmt{Type: sqlparser.QueriesVExplainType, Statement: explain.Statement, Comments: explain.Comments}, reservedVars, vschema, enableOnlineDDL, enableDirectDDL) - default: - return buildOtherReadAndAdmin(sqlparser.String(explain), vschema) - } - } - return nil, vterrors.VT13001(fmt.Sprintf("unexpected explain type: %T", stmt)) -} - func buildVExplainPlan(ctx context.Context, vexplainStmt *sqlparser.VExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema, enableOnlineDDL, enableDirectDDL bool) (*planResult, error) { switch vexplainStmt.Type { case sqlparser.QueriesVExplainType, sqlparser.AllVExplainType: @@ -63,24 +44,37 @@ func buildVExplainPlan(ctx context.Context, vexplainStmt *sqlparser.VExplainStmt } func explainTabPlan(explain *sqlparser.ExplainTab, vschema plancontext.VSchema) (*planResult, error) { - _, _, ks, _, destination, err := vschema.FindTableOrVindex(explain.Table) - if err != nil { - return nil, err + var keyspace *vindexes.Keyspace + var destination key.Destination + + if sqlparser.SystemSchema(explain.Table.Qualifier.String()) { + var err error + keyspace, err = vschema.AnyKeyspace() + if err != nil { + return nil, err + } + } else { + var err error + var ks string + _, _, ks, _, destination, err = vschema.FindTableOrVindex(explain.Table) + if err != nil { + return nil, err + } + explain.Table.Qualifier = sqlparser.NewIdentifierCS("") + + keyspace, err = vschema.FindKeyspace(ks) + if err != nil { + return nil, err + } + if keyspace == nil { + return nil, vterrors.VT14004(ks) + } } - explain.Table.Qualifier = sqlparser.NewIdentifierCS("") if destination == nil { destination = key.DestinationAnyShard{} } - keyspace, err := vschema.FindKeyspace(ks) - if err != nil { - return nil, err - } - if keyspace == nil { - return nil, vterrors.VT14004(ks) - } - return newPlanResult(&engine.Send{ Keyspace: keyspace, TargetDestination: destination, @@ -125,3 +119,51 @@ func buildVExplainLoggingPlan(ctx context.Context, explain *sqlparser.VExplainSt return &planResult{primitive: &engine.VExplain{Input: input.primitive, Type: explain.Type}, tables: input.tables}, nil } + +// buildExplainStmtPlan takes an EXPLAIN query and if possible sends the whole query to a single shard +func buildExplainStmtPlan(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { + explain := stmt.(*sqlparser.ExplainStmt) + switch explain.Statement.(type) { + case sqlparser.SelectStatement, *sqlparser.Update, *sqlparser.Delete, *sqlparser.Insert: + return explainPlan(explain, reservedVars, vschema) + default: + return buildOtherReadAndAdmin(sqlparser.String(explain), vschema) + } +} + +func explainPlan(explain *sqlparser.ExplainStmt, reservedVars *sqlparser.ReservedVars, vschema plancontext.VSchema) (*planResult, error) { + ctx, err := plancontext.CreatePlanningContext(explain.Statement, reservedVars, vschema, Gen4) + if err != nil { + return nil, err + } + + ks := ctx.SemTable.SingleKeyspace() + if ks == nil { + return nil, vterrors.VT03031() + } + + if err = queryRewrite(ctx, explain.Statement); err != nil { + return nil, err + } + + // Remove keyspace qualifier from columns and tables. + sqlparser.RemoveKeyspace(explain.Statement) + + var tables []string + for _, table := range ctx.SemTable.Tables { + name, err := table.Name() + if err != nil { + // this is just for reporting which tables we are touching + // it's OK to ignore errors here + continue + } + tables = append(tables, operators.QualifiedString(ks, name.Name.String())) + } + + return newPlanResult(&engine.Send{ + Keyspace: ks, + TargetDestination: key.DestinationAnyShard{}, + Query: sqlparser.String(explain), + SingleShardOnly: true, + }, tables...), nil +} diff --git a/go/vt/vtgate/planbuilder/vindex_func.go b/go/vt/vtgate/planbuilder/vindex_func.go index abfd2d1d9b3..6db9adab051 100644 --- a/go/vt/vtgate/planbuilder/vindex_func.go +++ b/go/vt/vtgate/planbuilder/vindex_func.go @@ -20,8 +20,6 @@ import ( "fmt" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/semantics" - "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" @@ -29,63 +27,36 @@ import ( "vitess.io/vitess/go/vt/vtgate/engine" ) -var _ logicalPlan = (*vindexFunc)(nil) - -// vindexFunc is used to build a VindexFunc primitive. -type vindexFunc struct { - order int - - // the tableID field is only used by the gen4 planner - tableID semantics.TableSet - - // eVindexFunc is the primitive being built. - eVindexFunc *engine.VindexFunc -} - -var colnames = []string{ - "id", - "keyspace_id", - "range_start", - "range_end", - "hex_keyspace_id", - "shard", -} - -// Primitive implements the logicalPlan interface -func (vf *vindexFunc) Primitive() engine.Primitive { - return vf.eVindexFunc -} - // SupplyProjection pushes the given aliased expression into the fields and cols slices of the // vindexFunc engine primitive. The method returns the offset of the new expression in the columns // list. -func (vf *vindexFunc) SupplyProjection(expr *sqlparser.AliasedExpr, reuse bool) (int, error) { +func SupplyProjection(eVindexFunc *engine.VindexFunc, expr *sqlparser.AliasedExpr, reuse bool) error { colName, isColName := expr.Expr.(*sqlparser.ColName) if !isColName { - return 0, vterrors.VT12001("expression on results of a vindex function") + return vterrors.VT12001("expression on results of a vindex function") } enum := vindexColumnToIndex(colName) if enum == -1 { - return 0, vterrors.VT03016(colName.Name.String()) + return vterrors.VT03016(colName.Name.String()) } if reuse { - for i, col := range vf.eVindexFunc.Cols { + for _, col := range eVindexFunc.Cols { if col == enum { - return i, nil + return nil } } } - vf.eVindexFunc.Fields = append(vf.eVindexFunc.Fields, &querypb.Field{ + eVindexFunc.Fields = append(eVindexFunc.Fields, &querypb.Field{ Name: expr.ColumnName(), Type: querypb.Type_VARBINARY, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }) - vf.eVindexFunc.Cols = append(vf.eVindexFunc.Cols, enum) - return len(vf.eVindexFunc.Cols) - 1, nil + eVindexFunc.Cols = append(eVindexFunc.Cols, enum) + return nil } // UnsupportedSupplyWeightString represents the error where the supplying a weight string is not supported diff --git a/go/vt/vtgate/planbuilder/vindex_op.go b/go/vt/vtgate/planbuilder/vindex_op.go deleted file mode 100644 index c439dec1701..00000000000 --- a/go/vt/vtgate/planbuilder/vindex_op.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2022 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package planbuilder - -import ( - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/vtgate/vindexes" -) - -func transformVindexPlan(ctx *plancontext.PlanningContext, op *operators.Vindex) (logicalPlan, error) { - single, ok := op.Vindex.(vindexes.SingleColumn) - if !ok { - return nil, vterrors.VT12001("multi-column vindexes not supported") - } - - expr, err := evalengine.Translate(op.Value, &evalengine.Config{ - Collation: ctx.SemTable.Collation, - ResolveType: ctx.SemTable.TypeForExpr, - }) - if err != nil { - return nil, err - } - plan := &vindexFunc{ - order: 1, - tableID: op.Solved, - eVindexFunc: &engine.VindexFunc{ - Opcode: op.OpCode, - Vindex: single, - Value: expr, - }, - } - - for _, col := range op.Columns { - _, err := plan.SupplyProjection(&sqlparser.AliasedExpr{ - Expr: col, - As: sqlparser.IdentifierCI{}, - }, false) - if err != nil { - return nil, err - } - } - return plan, nil -} diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index 273592b5bf7..175f4b2cc8f 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -45,6 +45,7 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttls" ) @@ -74,6 +75,8 @@ var ( mysqlDefaultWorkloadName = "OLTP" mysqlDefaultWorkload int32 + + mysqlServerFlushDelay = 100 * time.Millisecond ) func registerPluginFlags(fs *pflag.FlagSet) { @@ -97,6 +100,7 @@ func registerPluginFlags(fs *pflag.FlagSet) { fs.DurationVar(&mysqlQueryTimeout, "mysql_server_query_timeout", mysqlQueryTimeout, "mysql query timeout") fs.BoolVar(&mysqlConnBufferPooling, "mysql-server-pool-conn-read-buffers", mysqlConnBufferPooling, "If set, the server will pool incoming connection read buffers") fs.DurationVar(&mysqlKeepAlivePeriod, "mysql-server-keepalive-period", mysqlKeepAlivePeriod, "TCP period between keep-alives") + fs.DurationVar(&mysqlServerFlushDelay, "mysql_server_flush_delay", mysqlServerFlushDelay, "Delay after which buffered response will be flushed to the client.") fs.StringVar(&mysqlDefaultWorkloadName, "mysql_default_workload", mysqlDefaultWorkloadName, "Default session workload (OLTP, OLAP, DBA)") } @@ -417,6 +421,10 @@ func (vh *vtgateHandler) KillQuery(connectionID uint32) error { return nil } +func (vh *vtgateHandler) Env() *vtenv.Environment { + return vh.vtg.executor.env +} + func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { session, _ := c.ClientData.(*vtgatepb.Session) if session == nil { @@ -479,7 +487,7 @@ func initTLSConfig(ctx context.Context, srv *mysqlServer, mysqlSslCert, mysqlSsl return nil } -// initiMySQLProtocol starts the mysql protocol. +// initMySQLProtocol starts the mysql protocol. // It should be called only once in a process. func initMySQLProtocol(vtgate *VTGate) *mysqlServer { // Flag is not set, just return. @@ -526,11 +534,11 @@ func initMySQLProtocol(vtgate *VTGate) *mysqlServer { mysqlProxyProtocol, mysqlConnBufferPooling, mysqlKeepAlivePeriod, + mysqlServerFlushDelay, ) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) } - srv.tcpListener.ServerVersion = servenv.MySQLServerVersion() if mysqlSslCert != "" && mysqlSslKey != "" { tlsVersion, err := vttls.TLSVersionToNumber(mysqlTLSMinVersion) if err != nil { @@ -550,17 +558,10 @@ func initMySQLProtocol(vtgate *VTGate) *mysqlServer { } if mysqlServerSocketPath != "" { - // Let's create this unix socket with permissions to all users. In this way, - // clients can connect to vtgate mysql server without being vtgate user - oldMask := syscall.Umask(000) - srv.unixListener, err = newMysqlUnixSocket(mysqlServerSocketPath, authServer, srv.vtgateHandle) - _ = syscall.Umask(oldMask) + err = setupUnixSocket(srv, authServer, mysqlServerSocketPath) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) - return nil } - // Listen for unix socket - go srv.unixListener.Accept() } return srv } @@ -578,6 +579,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys false, mysqlConnBufferPooling, mysqlKeepAlivePeriod, + mysqlServerFlushDelay, ) switch err := err.(type) { @@ -610,6 +612,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys false, mysqlConnBufferPooling, mysqlKeepAlivePeriod, + mysqlServerFlushDelay, ) return listener, listenerErr default: diff --git a/go/vt/vtgate/plugin_mysql_server_test.go b/go/vt/vtgate/plugin_mysql_server_test.go index 1aa201b5d4c..5da79b9fe17 100644 --- a/go/vt/vtgate/plugin_mysql_server_test.go +++ b/go/vt/vtgate/plugin_mysql_server_test.go @@ -30,15 +30,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/utils" - - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/trace" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/tlstest" + "vitess.io/vitess/go/vt/vtenv" ) type testHandler struct { @@ -83,6 +82,10 @@ func (th *testHandler) WarningCount(c *mysql.Conn) uint16 { return 0 } +func (th *testHandler) Env() *vtenv.Environment { + return vtenv.NewTestEnv() +} + func TestConnectionUnixSocket(t *testing.T) { th := &testHandler{} @@ -348,7 +351,7 @@ func TestGracefulShutdown(t *testing.T) { vh := newVtgateHandler(&VTGate{executor: executor, timings: timings, rowsReturned: rowsReturned, rowsAffected: rowsAffected}) th := &testHandler{} - listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0) + listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer listener.Close() @@ -378,7 +381,7 @@ func TestGracefulShutdownWithTransaction(t *testing.T) { vh := newVtgateHandler(&VTGate{executor: executor, timings: timings, rowsReturned: rowsReturned, rowsAffected: rowsAffected}) th := &testHandler{} - listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0) + listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0) require.NoError(t, err) defer listener.Close() diff --git a/go/vt/vtgate/plugin_mysql_server_unix.go b/go/vt/vtgate/plugin_mysql_server_unix.go new file mode 100644 index 00000000000..95c9731fccc --- /dev/null +++ b/go/vt/vtgate/plugin_mysql_server_unix.go @@ -0,0 +1,40 @@ +//go:build !windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "syscall" + + "vitess.io/vitess/go/mysql" +) + +func setupUnixSocket(srv *mysqlServer, authServer mysql.AuthServer, path string) error { + // Let's create this unix socket with permissions to all users. In this way, + // clients can connect to vtgate mysql server without being vtgate user + var err error + oldMask := syscall.Umask(000) + srv.unixListener, err = newMysqlUnixSocket(path, authServer, srv.vtgateHandle) + _ = syscall.Umask(oldMask) + if err != nil { + return err + } + // Listen for unix socket + go srv.unixListener.Accept() + return nil +} diff --git a/go/vt/vtgate/plugin_mysql_server_windows.go b/go/vt/vtgate/plugin_mysql_server_windows.go new file mode 100644 index 00000000000..0502cadf863 --- /dev/null +++ b/go/vt/vtgate/plugin_mysql_server_windows.go @@ -0,0 +1,29 @@ +//go:build windows + +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "errors" + + "vitess.io/vitess/go/mysql" +) + +func setupUnixSocket(srv *mysqlServer, authServer mysql.AuthServer, path string) error { + return errors.New("unix sockets are not supported on windows") +} diff --git a/go/vt/vtgate/querylog.go b/go/vt/vtgate/querylog.go index 7425f2feba9..bddc799363d 100644 --- a/go/vt/vtgate/querylog.go +++ b/go/vt/vtgate/querylog.go @@ -42,7 +42,7 @@ func (e *Executor) defaultQueryLogger() error { servenv.HTTPHandleFunc(QueryLogzHandler, func(w http.ResponseWriter, r *http.Request) { ch := queryLogger.Subscribe("querylogz") defer queryLogger.Unsubscribe(ch) - querylogzHandler(ch, w, r) + querylogzHandler(ch, w, r, e.env.Parser()) }) servenv.HTTPHandleFunc(QueryzHandler, func(w http.ResponseWriter, r *http.Request) { diff --git a/go/vt/vtgate/querylogz.go b/go/vt/vtgate/querylogz.go index acfb970df5a..7c72e950d4a 100644 --- a/go/vt/vtgate/querylogz.go +++ b/go/vt/vtgate/querylogz.go @@ -55,10 +55,9 @@ var ( `) querylogzFuncMap = template.FuncMap{ - "stampMicro": func(t time.Time) string { return t.Format(time.StampMicro) }, - "cssWrappable": logz.Wrappable, - "truncateQuery": sqlparser.TruncateForUI, - "unquote": func(s string) string { return strings.Trim(s, "\"") }, + "stampMicro": func(t time.Time) string { return t.Format(time.StampMicro) }, + "cssWrappable": logz.Wrappable, + "unquote": func(s string) string { return strings.Trim(s, "\"") }, } querylogzTmpl = template.Must(template.New("example").Funcs(querylogzFuncMap).Parse(` @@ -74,7 +73,7 @@ var ( - + @@ -84,7 +83,7 @@ var ( // querylogzHandler serves a human readable snapshot of the // current query log. -func querylogzHandler(ch chan *logstats.LogStats, w http.ResponseWriter, r *http.Request) { +func querylogzHandler(ch chan *logstats.LogStats, w http.ResponseWriter, r *http.Request, parser *sqlparser.Parser) { if err := acl.CheckAccessHTTP(r, acl.DEBUGGING); err != nil { acl.SendError(w, err) return @@ -115,7 +114,8 @@ func querylogzHandler(ch chan *logstats.LogStats, w http.ResponseWriter, r *http tmplData := struct { *logstats.LogStats ColorLevel string - }{stats, level} + Parser *sqlparser.Parser + }{stats, level, parser} if err := querylogzTmpl.Execute(w, tmplData); err != nil { log.Errorf("querylogz: couldn't execute template: %v", err) } diff --git a/go/vt/vtgate/querylogz_test.go b/go/vt/vtgate/querylogz_test.go index ce0f4d4311b..3cecb983b3f 100644 --- a/go/vt/vtgate/querylogz_test.go +++ b/go/vt/vtgate/querylogz_test.go @@ -26,6 +26,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/streamlog" @@ -73,7 +74,7 @@ func TestQuerylogzHandlerFormatting(t *testing.T) { response := httptest.NewRecorder() ch := make(chan *logstats.LogStats, 1) ch <- logStats - querylogzHandler(ch, response, req) + querylogzHandler(ch, response, req, sqlparser.NewTestParser()) close(ch) body, _ := io.ReadAll(response.Body) checkQuerylogzHasStats(t, fastQueryPattern, logStats, body) @@ -103,7 +104,7 @@ func TestQuerylogzHandlerFormatting(t *testing.T) { response = httptest.NewRecorder() ch = make(chan *logstats.LogStats, 1) ch <- logStats - querylogzHandler(ch, response, req) + querylogzHandler(ch, response, req, sqlparser.NewTestParser()) close(ch) body, _ = io.ReadAll(response.Body) checkQuerylogzHasStats(t, mediumQueryPattern, logStats, body) @@ -132,7 +133,7 @@ func TestQuerylogzHandlerFormatting(t *testing.T) { logStats.EndTime = logStats.StartTime.Add(500 * time.Millisecond) ch = make(chan *logstats.LogStats, 1) ch <- logStats - querylogzHandler(ch, response, req) + querylogzHandler(ch, response, req, sqlparser.NewTestParser()) close(ch) body, _ = io.ReadAll(response.Body) checkQuerylogzHasStats(t, slowQueryPattern, logStats, body) @@ -142,7 +143,7 @@ func TestQuerylogzHandlerFormatting(t *testing.T) { defer func() { streamlog.SetQueryLogFilterTag("") }() ch = make(chan *logstats.LogStats, 1) ch <- logStats - querylogzHandler(ch, response, req) + querylogzHandler(ch, response, req, sqlparser.NewTestParser()) close(ch) body, _ = io.ReadAll(response.Body) checkQuerylogzHasStats(t, slowQueryPattern, logStats, body) diff --git a/go/vt/vtgate/queryz.go b/go/vt/vtgate/queryz.go index e546fc68c6f..540b014e11c 100644 --- a/go/vt/vtgate/queryz.go +++ b/go/vt/vtgate/queryz.go @@ -27,7 +27,6 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -145,7 +144,7 @@ func queryzHandler(e *Executor, w http.ResponseWriter, r *http.Request) { e.ForEachPlan(func(plan *engine.Plan) bool { Value := &queryzRow{ - Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), + Query: logz.Wrappable(e.env.Parser().TruncateForUI(plan.Original)), } Value.Count, Value.tm, Value.ShardQueries, Value.RowsAffected, Value.RowsReturned, Value.Errors = plan.Stats() var timepq time.Duration diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index e2f3c235c94..45fff46f629 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -73,6 +73,7 @@ type ( mu sync.Mutex entries []engine.ExecuteEntry lastID int + parser *sqlparser.Parser } // autocommitState keeps track of whether a single round-trip @@ -435,7 +436,7 @@ func (session *SafeSession) AppendOrUpdate(shardSession *vtgatepb.Session_ShardS if session.queryFromVindex { break } - // isSingle is enforced only for normmal commit order operations. + // isSingle is enforced only for normal commit order operations. if session.isSingleDB(txMode) && len(session.ShardSessions) > 1 { count := actualNoOfShardSession(session.ShardSessions) if count <= 1 { @@ -572,6 +573,26 @@ func (session *SafeSession) TimeZone() *time.Location { return loc } +// ForeignKeyChecks returns the foreign_key_checks stored in system_variables map in the session. +func (session *SafeSession) ForeignKeyChecks() *bool { + session.mu.Lock() + fkVal, ok := session.SystemVariables[sysvars.ForeignKeyChecks] + session.mu.Unlock() + + if !ok { + return nil + } + switch strings.ToLower(fkVal) { + case "off", "0": + fkCheckBool := false + return &fkCheckBool + case "on", "1": + fkCheckBool := true + return &fkCheckBool + } + return nil +} + // SetOptions sets the options func (session *SafeSession) SetOptions(options *querypb.ExecuteOptions) { session.mu.Lock() @@ -921,11 +942,13 @@ func (session *SafeSession) ClearAdvisoryLock() { session.AdvisoryLock = nil } -func (session *SafeSession) EnableLogging() { +func (session *SafeSession) EnableLogging(parser *sqlparser.Parser) { session.mu.Lock() defer session.mu.Unlock() - session.logging = &executeLogger{} + session.logging = &executeLogger{ + parser: parser, + } } // GetUDV returns the bind variable value for the user defined variable. @@ -978,7 +1001,7 @@ func (l *executeLogger) log(primitive engine.Primitive, target *querypb.Target, FiredFrom: primitive, }) } - ast, err := sqlparser.Parse(query) + ast, err := l.parser.Parse(query) if err != nil { panic("query not able to parse. this should not happen") } diff --git a/go/vt/vtgate/sandbox_test.go b/go/vt/vtgate/sandbox_test.go index 1629e9a4faa..dc3c1f103af 100644 --- a/go/vt/vtgate/sandbox_test.go +++ b/go/vt/vtgate/sandbox_test.go @@ -41,10 +41,9 @@ import ( // sandbox_test.go provides a sandbox for unit testing VTGate. const ( - KsTestSharded = "TestExecutor" - KsTestUnsharded = "TestUnsharded" - KsTestUnshardedServedFrom = "TestUnshardedServedFrom" - KsTestBadVSchema = "TestXBadVSchema" + KsTestSharded = "TestExecutor" + KsTestUnsharded = "TestUnsharded" + KsTestBadVSchema = "TestXBadVSchema" ) func init() { @@ -172,18 +171,6 @@ func createShardedSrvKeyspace(shardSpec, servedFromKeyspace string) (*topodatapb }, }, } - if servedFromKeyspace != "" { - shardedSrvKeyspace.ServedFrom = []*topodatapb.SrvKeyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_RDONLY, - Keyspace: servedFromKeyspace, - }, - { - TabletType: topodatapb.TabletType_PRIMARY, - Keyspace: servedFromKeyspace, - }, - } - } return shardedSrvKeyspace, nil } @@ -259,27 +246,11 @@ func (sct *sandboxTopo) GetSrvKeyspace(ctx context.Context, cell, keyspace strin return nil, fmt.Errorf("topo error GetSrvKeyspace") } switch keyspace { - case KsTestUnshardedServedFrom: - servedFromKeyspace, err := createUnshardedKeyspace() - if err != nil { - return nil, err - } - servedFromKeyspace.ServedFrom = []*topodatapb.SrvKeyspace_ServedFrom{ - { - TabletType: topodatapb.TabletType_RDONLY, - Keyspace: KsTestUnsharded, - }, - { - TabletType: topodatapb.TabletType_PRIMARY, - Keyspace: KsTestUnsharded, - }, - } - return servedFromKeyspace, nil case KsTestUnsharded: return createUnshardedKeyspace() + default: + return createShardedSrvKeyspace(sand.ShardSpec, sand.KeyspaceServedFrom) } - - return createShardedSrvKeyspace(sand.ShardSpec, sand.KeyspaceServedFrom) } func (sct *sandboxTopo) WatchSrvKeyspace(ctx context.Context, cell, keyspace string, callback func(*topodatapb.SrvKeyspace, error) bool) { @@ -310,7 +281,10 @@ func (sct *sandboxTopo) WatchSrvVSchema(ctx context.Context, cell string, callba } sct.topoServer.UpdateSrvVSchema(ctx, cell, srvVSchema) - current, updateChan, _ := sct.topoServer.WatchSrvVSchema(ctx, cell) + current, updateChan, err := sct.topoServer.WatchSrvVSchema(ctx, cell) + if err != nil { + panic(fmt.Sprintf("sandboxTopo WatchSrvVSchema returned an error: %v", err)) + } if !callback(current.Value, nil) { panic("sandboxTopo callback returned false") } @@ -344,7 +318,7 @@ func (sct *sandboxTopo) WatchSrvVSchema(ctx context.Context, cell string, callba }() } -func sandboxDialer(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { +func sandboxDialer(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { sand := getSandbox(tablet.Keyspace) sand.sandmu.Lock() defer sand.sandmu.Unlock() diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index ede88e2d9b8..8b571f7b67d 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -19,7 +19,9 @@ package vtgate import ( "context" "io" + "runtime/debug" "sync" + "sync/atomic" "time" "vitess.io/vitess/go/mysql/sqlerror" @@ -186,7 +188,7 @@ func (stc *ScatterConn) ExecuteMultiShard( } } - qs, err = getQueryService(rs, info, session, false) + qs, err = getQueryService(ctx, rs, info, session, false) if err != nil { return nil, err } @@ -298,11 +300,11 @@ func checkAndResetShardSession(info *shardActionInfo, err error, session *SafeSe return retry } -func getQueryService(rs *srvtopo.ResolvedShard, info *shardActionInfo, session *SafeSession, skipReset bool) (queryservice.QueryService, error) { +func getQueryService(ctx context.Context, rs *srvtopo.ResolvedShard, info *shardActionInfo, session *SafeSession, skipReset bool) (queryservice.QueryService, error) { if info.alias == nil { return rs.Gateway, nil } - qs, err := rs.Gateway.QueryServiceByAlias(info.alias, rs.Target) + qs, err := rs.Gateway.QueryServiceByAlias(ctx, info.alias, rs.Target) if err == nil || skipReset { return qs, err } @@ -384,7 +386,7 @@ func (stc *ScatterConn) StreamExecuteMulti( } } - qs, err = getQueryService(rs, info, session, false) + qs, err = getQueryService(ctx, rs, info, session, false) if err != nil { return nil, err } @@ -603,6 +605,12 @@ func (stc *ScatterConn) multiGo( return allErrors } +// panicData is used to capture panics during parallel execution. +type panicData struct { + p any + trace []byte +} + // multiGoTransaction performs the requested 'action' on the specified // ResolvedShards in parallel. For each shard, if the requested // session is in a transaction, it opens a new transactions on the connection, @@ -660,15 +668,28 @@ func (stc *ScatterConn) multiGoTransaction( oneShard(rs, i) } } else { + var panicRecord atomic.Value var wg sync.WaitGroup for i, rs := range rss { wg.Add(1) go func(rs *srvtopo.ResolvedShard, i int) { defer wg.Done() + defer func() { + if r := recover(); r != nil { + panicRecord.Store(&panicData{ + p: r, + trace: debug.Stack(), + }) + } + }() oneShard(rs, i) }(rs, i) } wg.Wait() + if pr, ok := panicRecord.Load().(*panicData); ok { + log.Errorf("caught a panic during parallel execution:\n%s", string(pr.trace)) + panic(pr.p) // rethrow the captured panic in the main thread + } } if session.MustRollback() { @@ -711,7 +732,7 @@ func (stc *ScatterConn) ExecuteLock(ctx context.Context, rs *srvtopo.ResolvedSha _ = stc.txConn.ReleaseLock(ctx, session) return nil, vterrors.Wrap(err, "Any previous held locks are released") } - qs, err := getQueryService(rs, info, nil, true) + qs, err := getQueryService(ctx, rs, info, nil, true) if err != nil { return nil, err } diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index 6e57c10bbbd..0e863805d9c 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -17,8 +17,11 @@ limitations under the License. package vtgate import ( + "fmt" "testing" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/mysql/sqlerror" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -105,6 +108,85 @@ func TestExecuteFailOnAutocommit(t *testing.T) { utils.MustMatch(t, []*querypb.BoundQuery{queries[1]}, sbc1.Queries, "") } +func TestExecutePanic(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + createSandbox("TestExecutePanic") + hc := discovery.NewFakeHealthCheck(nil) + sc := newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") + sbc0 := hc.AddTestTablet("aa", "0", 1, "TestExecutePanic", "0", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc1 := hc.AddTestTablet("aa", "1", 1, "TestExecutePanic", "1", topodatapb.TabletType_PRIMARY, true, 1, nil) + sbc0.SetPanic(42) + sbc1.SetPanic(42) + rss := []*srvtopo.ResolvedShard{ + { + Target: &querypb.Target{ + Keyspace: "TestExecutePanic", + Shard: "0", + TabletType: topodatapb.TabletType_PRIMARY, + }, + Gateway: sbc0, + }, + { + Target: &querypb.Target{ + Keyspace: "TestExecutePanic", + Shard: "1", + TabletType: topodatapb.TabletType_PRIMARY, + }, + Gateway: sbc1, + }, + } + queries := []*querypb.BoundQuery{ + { + // This will fail to go to shard. It will be rejected at vtgate. + Sql: "query1", + BindVariables: map[string]*querypb.BindVariable{ + "bv0": sqltypes.Int64BindVariable(0), + }, + }, + { + // This will go to shard. + Sql: "query2", + BindVariables: map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1), + }, + }, + } + // shard 0 - has transaction + // shard 1 - does not have transaction. + session := &vtgatepb.Session{ + InTransaction: true, + ShardSessions: []*vtgatepb.Session_ShardSession{ + { + Target: &querypb.Target{Keyspace: "TestExecutePanic", Shard: "0", TabletType: topodatapb.TabletType_PRIMARY, Cell: "aa"}, + TransactionId: 123, + TabletAlias: nil, + }, + }, + Autocommit: false, + } + + original := log.Errorf + defer func() { + log.Errorf = original + }() + + var logMessage string + log.Errorf = func(format string, args ...any) { + logMessage = fmt.Sprintf(format, args...) + } + + defer func() { + r := recover() + require.NotNil(t, r, "The code did not panic") + // assert we are seeing the stack trace + require.Contains(t, logMessage, "(*ScatterConn).multiGoTransaction") + }() + + _, _ = sc.ExecuteMultiShard(ctx, nil, rss, queries, NewSafeSession(session), true /*autocommit*/, false) + +} + func TestReservedOnMultiReplica(t *testing.T) { ctx := utils.LeakCheckContext(t) diff --git a/go/vt/vtgate/schema/tracker.go b/go/vt/vtgate/schema/tracker.go index 369ab178986..a1b2009d0e1 100644 --- a/go/vt/vtgate/schema/tracker.go +++ b/go/vt/vtgate/schema/tracker.go @@ -19,10 +19,12 @@ package schema import ( "context" "maps" + "slices" "strings" "sync" "time" + "vitess.io/vitess/go/ptr" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" @@ -44,12 +46,15 @@ type ( mu sync.Mutex tables *tableMap views *viewMap + udfs map[keyspaceStr][]string ctx context.Context signal func() // a function that we'll call whenever we have new schema data // map of keyspace currently tracked tracked map[keyspaceStr]*updateController consumeDelay time.Duration + + parser *sqlparser.Parser } ) @@ -57,17 +62,21 @@ type ( const defaultConsumeDelay = 1 * time.Second // NewTracker creates the tracker object. -func NewTracker(ch chan *discovery.TabletHealth, enableViews bool) *Tracker { +func NewTracker(ch chan *discovery.TabletHealth, enableViews, enableUDFs bool, parser *sqlparser.Parser) *Tracker { t := &Tracker{ ctx: context.Background(), ch: ch, tables: &tableMap{m: make(map[keyspaceStr]map[tableNameStr]*vindexes.TableInfo)}, tracked: map[keyspaceStr]*updateController{}, consumeDelay: defaultConsumeDelay, + parser: parser, } if enableViews { - t.views = &viewMap{m: map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement{}} + t.views = &viewMap{m: map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement{}, parser: parser} + } + if enableUDFs { + t.udfs = map[keyspaceStr][]string{} } return t } @@ -82,6 +91,10 @@ func (t *Tracker) LoadKeyspace(conn queryservice.QueryService, target *querypb.T if err != nil { return err } + err = t.loadUDFs(conn, target) + if err != nil { + return err + } t.tracked[target.Keyspace].setLoaded(true) return nil @@ -142,6 +155,34 @@ func (t *Tracker) loadViews(conn queryservice.QueryService, target *querypb.Targ return nil } +func (t *Tracker) loadUDFs(conn queryservice.QueryService, target *querypb.Target) error { + if t.udfs == nil { + // This happens only when UDFs are not enabled. + return nil + } + + t.mu.Lock() + defer t.mu.Unlock() + + var udfs []string + err := conn.GetSchema(t.ctx, target, querypb.SchemaTableType_UDFS, nil, func(schemaRes *querypb.GetSchemaResponse) error { + for _, udf := range schemaRes.Udfs { + if !udf.Aggregating { + continue + } + udfs = append(udfs, udf.Name) + } + return nil + }) + if err != nil { + log.Errorf("error fetching new UDFs for %v: %w", target.Keyspace, err) + return err + } + t.udfs[target.Keyspace] = udfs + log.Infof("finished loading %d UDFs for keyspace %s", len(udfs), target.Keyspace) + return nil +} + // Start starts the schema tracking. func (t *Tracker) Start() { log.Info("Starting schema tracking") @@ -204,6 +245,9 @@ func (t *Tracker) GetColumns(ks string, tbl string) []vindexes.Column { defer t.mu.Unlock() tblInfo := t.tables.get(ks, tbl) + if tblInfo == nil { + return nil + } return tblInfo.Columns } @@ -216,6 +260,15 @@ func (t *Tracker) GetForeignKeys(ks string, tbl string) []*sqlparser.ForeignKeyD return tblInfo.ForeignKeys } +// GetIndexes returns the indexes for table in the given keyspace. +func (t *Tracker) GetIndexes(ks string, tbl string) []*sqlparser.IndexDefinition { + t.mu.Lock() + defer t.mu.Unlock() + + tblInfo := t.tables.get(ks, tbl) + return tblInfo.Indexes +} + // Tables returns a map with the columns for all known tables in the keyspace func (t *Tracker) Tables(ks string) map[string]*vindexes.TableInfo { t.mu.Lock() @@ -231,27 +284,47 @@ func (t *Tracker) Tables(ks string) map[string]*vindexes.TableInfo { // Views returns all known views in the keyspace with their definition. func (t *Tracker) Views(ks string) map[string]sqlparser.SelectStatement { - t.mu.Lock() - defer t.mu.Unlock() - if t.views == nil { return nil } + t.mu.Lock() + defer t.mu.Unlock() + m := t.views.m[ks] return maps.Clone(m) } +func (t *Tracker) UDFs(ks string) []string { + if t.udfs == nil { + return nil + } + + t.mu.Lock() + defer t.mu.Unlock() + + return slices.Clone(t.udfs[ks]) +} + func (t *Tracker) updateSchema(th *discovery.TabletHealth) bool { success := true if th.Stats.TableSchemaChanged != nil { success = t.updatedTableSchema(th) } - if !success || th.Stats.ViewSchemaChanged == nil { - return success + if !success { + return false } + // there is view definition change in the tablet - return t.updatedViewSchema(th) + if th.Stats.ViewSchemaChanged != nil { + success = t.updatedViewSchema(th) + } + + if !success || !th.Stats.UdfsChanged { + return success + } + + return t.loadUDFs(th.Conn, th.Target) == nil } func (t *Tracker) updatedTableSchema(th *discovery.TabletHealth) bool { @@ -280,7 +353,7 @@ func (t *Tracker) updatedTableSchema(th *discovery.TabletHealth) bool { func (t *Tracker) updateTables(keyspace string, res map[string]string) { for tableName, tableDef := range res { - stmt, err := sqlparser.Parse(tableDef) + stmt, err := t.parser.Parse(tableDef) if err != nil { log.Warningf("error parsing table definition for %s: %v", tableName, err) continue @@ -293,7 +366,7 @@ func (t *Tracker) updateTables(keyspace string, res map[string]string) { cols := getColumns(ddl.TableSpec) fks := getForeignKeys(ddl.TableSpec) - t.tables.set(keyspace, tableName, cols, fks) + t.tables.set(keyspace, tableName, cols, fks, ddl.TableSpec.Indexes) } } @@ -302,12 +375,20 @@ func getColumns(tblSpec *sqlparser.TableSpec) []vindexes.Column { cols := make([]vindexes.Column, 0, len(tblSpec.Columns)) for _, column := range tblSpec.Columns { colCollation := getColumnCollation(tblCollation, column) + size := ptr.Unwrap(column.Type.Length, 0) + scale := ptr.Unwrap(column.Type.Scale, 0) + nullable := ptr.Unwrap(column.Type.Options.Null, true) cols = append(cols, vindexes.Column{ Name: column.Name, Type: column.Type.SQLType(), CollationName: colCollation, + Default: column.Type.Options.Default, Invisible: column.Type.Invisible(), + Size: int32(size), + Scale: int32(scale), + Nullable: nullable, + Values: column.Type.EnumValues, }) } return cols @@ -343,7 +424,13 @@ func getTableCollation(tblSpec *sqlparser.TableSpec) string { func getColumnCollation(defaultCollation string, column *sqlparser.ColumnDefinition) string { if column.Type.Options == nil || column.Type.Options.Collate == "" { - return defaultCollation + switch strings.ToLower(column.Type.Type) { + case "enum", "set", "text", "tinytext", "mediumtext", "longtext", "varchar", "char": + return defaultCollation + case "json": + return "utf8mb4_bin" + } + return "binary" } return column.Type.Options.Collate } @@ -403,13 +490,13 @@ type tableMap struct { m map[keyspaceStr]map[tableNameStr]*vindexes.TableInfo } -func (tm *tableMap) set(ks, tbl string, cols []vindexes.Column, fks []*sqlparser.ForeignKeyDefinition) { +func (tm *tableMap) set(ks, tbl string, cols []vindexes.Column, fks []*sqlparser.ForeignKeyDefinition, indexes []*sqlparser.IndexDefinition) { m := tm.m[ks] if m == nil { m = make(map[tableNameStr]*vindexes.TableInfo) tm.m[ks] = m } - m[tbl] = &vindexes.TableInfo{Columns: cols, ForeignKeys: fks} + m[tbl] = &vindexes.TableInfo{Columns: cols, ForeignKeys: fks, Indexes: indexes} } func (tm *tableMap) get(ks, tbl string) *vindexes.TableInfo { @@ -438,7 +525,8 @@ func (t *Tracker) clearKeyspaceTables(ks string) { } type viewMap struct { - m map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement + m map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement + parser *sqlparser.Parser } func (vm *viewMap) set(ks, tbl, sql string) { @@ -447,7 +535,7 @@ func (vm *viewMap) set(ks, tbl, sql string) { m = make(map[tableNameStr]sqlparser.SelectStatement) vm.m[ks] = m } - stmt, err := sqlparser.Parse(sql) + stmt, err := vm.parser.Parse(sql) if err != nil { log.Warningf("ignoring view '%s', parsing error in view definition: '%s'", tbl, sql) return diff --git a/go/vt/vtgate/schema/tracker_test.go b/go/vt/vtgate/schema/tracker_test.go index 4f514fec101..1ee15ba99cb 100644 --- a/go/vt/vtgate/schema/tracker_test.go +++ b/go/vt/vtgate/schema/tracker_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" @@ -81,7 +83,7 @@ func TestTrackingUnHealthyTablet(t *testing.T) { sbc := sandboxconn.NewSandboxConn(tablet) ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, false) + tracker := NewTracker(ch, false, false, sqlparser.NewTestParser()) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() @@ -104,7 +106,7 @@ func TestTrackingUnHealthyTablet(t *testing.T) { serving: true, }, { - name: "initial load", + name: "first update", serving: true, updatedTbls: []string{"a"}, }, @@ -113,24 +115,26 @@ func TestTrackingUnHealthyTablet(t *testing.T) { serving: false, }, { - name: "now serving tablet", + name: "serving tablet", serving: true, }, } for _, tcase := range tcases { - ch <- &discovery.TabletHealth{ - Conn: sbc, - Tablet: tablet, - Target: target, - Serving: tcase.serving, - Stats: &querypb.RealtimeStats{TableSchemaChanged: tcase.updatedTbls}, - } - time.Sleep(5 * time.Millisecond) + t.Run(tcase.name, func(t *testing.T) { + ch <- &discovery.TabletHealth{ + Conn: sbc, + Tablet: tablet, + Target: target, + Serving: tcase.serving, + Stats: &querypb.RealtimeStats{TableSchemaChanged: tcase.updatedTbls}, + } + time.Sleep(5 * time.Millisecond) + }) } require.False(t, waitTimeout(&wg, 5*time.Second), "schema was updated but received no signal") - require.EqualValues(t, 3, sbc.GetSchemaCount.Load()) + assert.EqualValues(t, 3, sbc.GetSchemaCount.Load()) } // TestTrackerGetKeyspaceUpdateController tests table update controller initialization. @@ -166,135 +170,152 @@ func TestTrackerGetKeyspaceUpdateController(t *testing.T) { assert.Nil(t, ks3.reloadKeyspace, "ks3 already initialized") } +type myTable struct { + name, create string +} + +func tbl(name, create string) myTable { + return myTable{name: name, create: create} +} + +func tables(tables ...myTable) sandboxconn.SchemaResult { + m := map[string]string{} + for _, table := range tables { + m[table.name] = table.create + } + return sandboxconn.SchemaResult{TablesAndViews: m} +} + // TestTableTracking tests that the tracker is able to track table schema changes. func TestTableTracking(t *testing.T) { - schemaDefResult := []map[string]string{{ - "prior": "create table prior(id int primary key)", - }, { - // initial load of view - kept empty - }, { - "t1": "create table t1(id bigint primary key, name varchar(50))", - "t2": "create table t2(id varchar(50) primary key)", - }, { - "t2": "create table t2(id varchar(50) primary key, name varchar(50))", - "t3": "create table t3(id datetime primary key)", - }, { - "t4": "create table t4(name varchar(50) primary key)", - }} + schemaResponse := []sandboxconn.SchemaResult{ + tables(tbl("prior", "create table prior(id int primary key)")), + empty(), /*initial load of view*/ + tables( + tbl("t1", "create table t1(id bigint primary key, name varchar(50), email varchar(50) not null default 'a@b.com')"), + tbl("T1", "create table T1(id varchar(50) primary key)"), + ), + tables( + tbl("T1", "create table T1(id varchar(50) primary key, name varchar(50))"), + tbl("t3", "create table t3(id datetime primary key)"), + ), + tables( + tbl("t4", "create table t4(name varchar(50) primary key)"), + ), + } testcases := []testCases{{ testName: "initial table load", expTbl: map[string][]vindexes.Column{ - "prior": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}}, + "prior": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32, CollationName: "binary", Nullable: true}}, }, }, { testName: "new tables", - updTbl: []string{"t1", "t2"}, + updTbl: []string{"t1", "T1"}, expTbl: map[string][]vindexes.Column{ - "prior": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32}}, - "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, - "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}}, + "prior": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT32, CollationName: "binary", Nullable: true}}, + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: true}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}, {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: false, Default: &sqlparser.Literal{Val: "a@b.com"}}}, + "T1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}}, }, }, { - testName: "delete prior, updated t2 and new t3", - updTbl: []string{"prior", "t2", "t3"}, + testName: "delete prior, updated T1 and new t3", + updTbl: []string{"prior", "T1", "t3"}, expTbl: map[string][]vindexes.Column{ - "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, - "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, - "t3": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: true}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}, {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: false, Default: &sqlparser.Literal{Val: "a@b.com"}}}, + "T1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}}, + "t3": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME, CollationName: "binary", Size: 0, Nullable: true}}, }, }, { testName: "new t4", updTbl: []string{"t4"}, expTbl: map[string][]vindexes.Column{ - "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, - "t2": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, - "t3": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME}}, - "t4": {{Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR}}, + "t1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: true}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}, {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: false, Default: &sqlparser.Literal{Val: "a@b.com"}}}, + "T1": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}, {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}}, + "t3": {{Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_DATETIME, CollationName: "binary", Size: 0, Nullable: true}}, + "t4": {{Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, Nullable: true}}, }, }} - testTracker(t, schemaDefResult, testcases) + testTracker(t, false, schemaResponse, testcases) } // TestViewsTracking tests that the tracker is able to track views. func TestViewsTracking(t *testing.T) { - schemaDefResult := []map[string]string{{ - // initial load of table - kept empty - }, { - "prior": "create view prior as select 1 from tbl", - }, { - "t1": "create view t1 as select 1 from tbl1", - "t2": "create view t2 as select 1 from tbl2", - }, { - "t2": "create view t2 as select 1,2 from tbl2", - "t3": "create view t3 as select 1 from tbl3", - }, { - "t4": "create view t4 as select 1 from tbl4", - }} + schemaDefResult := []sandboxconn.SchemaResult{ + empty(), /*initial load of view*/ + tables(tbl("prior", "create view prior as select 1 from tbl")), + tables( + tbl("t1", "create view t1 as select 1 from tbl1"), + tbl("V1", "create view V1 as select 1 from tbl2"), + ), + tables( + tbl("V1", "create view V1 as select 1,2 from tbl2"), + tbl("t3", "create view t3 as select 1 from tbl3"), + ), + tables(tbl("t4", "create view t4 as select 1 from tbl4")), + } testcases := []testCases{{ testName: "initial view load", expView: map[string]string{ "prior": "select 1 from tbl"}, }, { - testName: "new view t1, t2", - updView: []string{"t1", "t2"}, + testName: "new view t1, V1", + updView: []string{"t1", "V1"}, expView: map[string]string{ "t1": "select 1 from tbl1", - "t2": "select 1 from tbl2", + "V1": "select 1 from tbl2", "prior": "select 1 from tbl"}, }, { - testName: "delete prior, updated t2 and new t3", - updView: []string{"prior", "t2", "t3"}, + testName: "delete prior, updated V1 and new t3", + updView: []string{"prior", "V1", "t3"}, expView: map[string]string{ "t1": "select 1 from tbl1", - "t2": "select 1, 2 from tbl2", + "V1": "select 1, 2 from tbl2", "t3": "select 1 from tbl3"}, }, { testName: "new t4", updView: []string{"t4"}, expView: map[string]string{ "t1": "select 1 from tbl1", - "t2": "select 1, 2 from tbl2", + "V1": "select 1, 2 from tbl2", "t3": "select 1 from tbl3", "t4": "select 1 from tbl4"}, }} - testTracker(t, schemaDefResult, testcases) + testTracker(t, false, schemaDefResult, testcases) } -// TestTableInfoRetrieval tests that the tracker is able to retrieve required information from ddl statement. -func TestTableInfoRetrieval(t *testing.T) { - schemaDefResult := []map[string]string{{ - "my_tbl": "CREATE TABLE `my_tbl` (" + - "`id` bigint NOT NULL AUTO_INCREMENT," + - "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL," + - "`email` varbinary(100) DEFAULT NULL," + - "PRIMARY KEY (`id`)," + - "KEY `id` (`id`,`name`)) " + - "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", - }, { - // initial load of view - kept empty - }, { - "my_child_tbl": "CREATE TABLE `my_child_tbl` (" + - "`id` bigint NOT NULL AUTO_INCREMENT," + - "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL," + - "`code` varchar(6) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL," + - "`my_id` bigint DEFAULT NULL," + - "PRIMARY KEY (`id`)," + - "KEY `my_id` (`my_id`,`name`)," + - "CONSTRAINT `my_child_tbl_ibfk_1` FOREIGN KEY (`my_id`, `name`) REFERENCES `my_tbl` (`id`, `name`) ON DELETE CASCADE) " + - "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", - }} +// TestFKInfoRetrieval tests that the tracker is able to retrieve required foreign key information from ddl statement. +func TestFKInfoRetrieval(t *testing.T) { + schemaDefResult := []sandboxconn.SchemaResult{ + tables(tbl("my_tbl", "CREATE TABLE `my_tbl` ("+ + "`id` bigint NOT NULL AUTO_INCREMENT,"+ + "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL,"+ + "`email` varbinary(100) DEFAULT NULL,"+ + "PRIMARY KEY (`id`),"+ + "KEY `id` (`id`,`name`)) "+ + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci")), + empty(), + tables(tbl( + "my_child_tbl", "CREATE TABLE `my_child_tbl` ("+ + "`id` bigint NOT NULL AUTO_INCREMENT,"+ + "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL,"+ + "`code` varchar(6) CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci DEFAULT NULL,"+ + "`my_id` bigint DEFAULT NULL,"+ + "PRIMARY KEY (`id`),"+ + "KEY `my_id` (`my_id`,`name`),"+ + "CONSTRAINT `my_child_tbl_ibfk_1` FOREIGN KEY (`my_id`, `name`) REFERENCES `my_tbl` (`id`, `name`) ON DELETE CASCADE) "+ + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci")), + } testcases := []testCases{{ testName: "initial table load", expTbl: map[string][]vindexes.Column{ "my_tbl": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, - {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: false}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, CollationName: "latin1_swedish_ci", Nullable: true, Default: &sqlparser.NullVal{}}, + {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, Size: 100, CollationName: "binary", Nullable: true, Default: &sqlparser.NullVal{}}, }, }, }, { @@ -302,15 +323,15 @@ func TestTableInfoRetrieval(t *testing.T) { updTbl: []string{"my_child_tbl"}, expTbl: map[string][]vindexes.Column{ "my_tbl": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, - {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: false}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, CollationName: "latin1_swedish_ci", Nullable: true, Default: &sqlparser.NullVal{}}, + {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, Size: 100, CollationName: "binary", Nullable: true, Default: &sqlparser.NullVal{}}, }, "my_child_tbl": { - {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, - {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci"}, - {Name: sqlparser.NewIdentifierCI("code"), Type: querypb.Type_VARCHAR, CollationName: "utf8mb4_0900_ai_ci"}, - {Name: sqlparser.NewIdentifierCI("my_id"), Type: querypb.Type_INT64, CollationName: "utf8mb4_0900_ai_ci"}, + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: false}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, Size: 50, CollationName: "latin1_swedish_ci", Nullable: true, Default: &sqlparser.NullVal{}}, + {Name: sqlparser.NewIdentifierCI("code"), Type: querypb.Type_VARCHAR, Size: 6, CollationName: "utf8mb4_0900_ai_ci", Nullable: true, Default: &sqlparser.NullVal{}}, + {Name: sqlparser.NewIdentifierCI("my_id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: true, Default: &sqlparser.NullVal{}}, }, }, expFk: map[string]string{ @@ -319,7 +340,117 @@ func TestTableInfoRetrieval(t *testing.T) { }, }} - testTracker(t, schemaDefResult, testcases) + testTracker(t, false, schemaDefResult, testcases) +} + +// TestIndexInfoRetrieval tests that the tracker is able to retrieve required index information from ddl statement. +func TestIndexInfoRetrieval(t *testing.T) { + schemaDefResult := []sandboxconn.SchemaResult{ + tables(tbl( + "my_tbl", "CREATE TABLE `my_tbl` ("+ + "`id` bigint NOT NULL AUTO_INCREMENT,"+ + "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL,"+ + "`email` varbinary(100) DEFAULT NULL,"+ + "PRIMARY KEY (`id`),"+ + "KEY `id` (`id`,`name`)) "+ + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci")), + empty(), /*initial load of view*/ + tables(tbl( + "my_tbl", "CREATE TABLE `my_tbl` ("+ + "`id` bigint NOT NULL AUTO_INCREMENT,"+ + "`name` varchar(50) CHARACTER SET latin1 COLLATE latin1_swedish_ci DEFAULT NULL,"+ + "`email` varbinary(100) DEFAULT NULL,"+ + "PRIMARY KEY (`id`),"+ + "KEY `id` (`id`,`name`), "+ + "UNIQUE KEY `email` (`email`)) "+ + "ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci")), + } + + testcases := []testCases{{ + testName: "initial table load", + expTbl: map[string][]vindexes.Column{ + "my_tbl": { + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: false}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci", Size: 50, Nullable: true, Default: &sqlparser.NullVal{}}, + {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, CollationName: "binary", Size: 100, Nullable: true, Default: &sqlparser.NullVal{}}, + }, + }, + expIdx: map[string][]string{ + "my_tbl": { + "primary key (id)", + "key id (id, `name`)", + }, + }, + }, { + testName: "next load", + updTbl: []string{"my_tbl"}, + expTbl: map[string][]vindexes.Column{ + "my_tbl": { + {Name: sqlparser.NewIdentifierCI("id"), Type: querypb.Type_INT64, CollationName: "binary", Nullable: false}, + {Name: sqlparser.NewIdentifierCI("name"), Type: querypb.Type_VARCHAR, CollationName: "latin1_swedish_ci", Size: 50, Nullable: true, Default: &sqlparser.NullVal{}}, + {Name: sqlparser.NewIdentifierCI("email"), Type: querypb.Type_VARBINARY, CollationName: "binary", Size: 100, Nullable: true, Default: &sqlparser.NullVal{}}, + }, + }, + expIdx: map[string][]string{ + "my_tbl": { + "primary key (id)", + "key id (id, `name`)", + "unique key email (email)", + }, + }, + }} + + testTracker(t, false, schemaDefResult, testcases) +} + +func empty() sandboxconn.SchemaResult { + return sandboxconn.SchemaResult{TablesAndViews: map[string]string{}} +} + +// TestUDFRetrieval tests that the tracker is able to retrieve required UDF information. +func TestUDFRetrieval(t *testing.T) { + schemaDefResult := []sandboxconn.SchemaResult{ + empty(), // initial load of table + empty(), + udfs(udf("my_udf", true, sqltypes.Int32)), + udfs( + udf("my_udf2", true, sqltypes.Char), + udf("my_udf3", true, sqltypes.Int32), + ), + udfs( + udf("my_udf2", true, sqltypes.Char), + udf("my_udf4", true, sqltypes.Int32), + )} + + testcases := []testCases{{ + testName: "initial load", + expUDFs: []string{"my_udf"}, + }, { + testName: "next load 1", + updUdfs: true, + expUDFs: []string{"my_udf2", "my_udf3"}, + }, { + testName: "next load 2", + updUdfs: true, + expUDFs: []string{"my_udf2", "my_udf4"}, + }} + + testTracker(t, true, schemaDefResult, testcases) +} + +func udfs(udfs ...*querypb.UDFInfo) sandboxconn.SchemaResult { + return sandboxconn.SchemaResult{ + TablesAndViews: map[string]string{}, + UDFs: udfs, + } +} + +func udf(name string, aggr bool, typ querypb.Type) *querypb.UDFInfo { + return &querypb.UDFInfo{ + Name: name, + Aggregating: aggr, + ReturnType: typ, + } } type testCases struct { @@ -328,14 +459,18 @@ type testCases struct { updTbl []string expTbl map[string][]vindexes.Column expFk map[string]string + expIdx map[string][]string updView []string expView map[string]string + + updUdfs bool + expUDFs []string } -func testTracker(t *testing.T, schemaDefResult []map[string]string, tcases []testCases) { +func testTracker(t *testing.T, enableUDFs bool, schemaDefResult []sandboxconn.SchemaResult, tcases []testCases) { ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, true) + tracker := NewTracker(ch, true, enableUDFs, sqlparser.NewTestParser()) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() @@ -351,6 +486,10 @@ func testTracker(t *testing.T, schemaDefResult []map[string]string, tcases []tes sbc := sandboxconn.NewSandboxConn(tablet) sbc.SetSchemaResult(schemaDefResult) + initialLoadCount := 2 + if enableUDFs { + initialLoadCount = 3 + } for count, tcase := range tcases { t.Run(tcase.testName, func(t *testing.T) { wg.Add(1) @@ -359,28 +498,39 @@ func testTracker(t *testing.T, schemaDefResult []map[string]string, tcases []tes Tablet: tablet, Target: target, Serving: true, - Stats: &querypb.RealtimeStats{TableSchemaChanged: tcase.updTbl, ViewSchemaChanged: tcase.updView}, + Stats: &querypb.RealtimeStats{TableSchemaChanged: tcase.updTbl, ViewSchemaChanged: tcase.updView, UdfsChanged: tcase.updUdfs}, } require.False(t, waitTimeout(&wg, time.Second), "schema was updated but received no signal") - require.EqualValues(t, count+2, sbc.GetSchemaCount.Load()) + require.EqualValues(t, count+initialLoadCount, sbc.GetSchemaCount.Load()) _, keyspacePresent := tracker.tracked[target.Keyspace] require.Equal(t, true, keyspacePresent) - for k, v := range tcase.expTbl { - utils.MustMatch(t, v, tracker.GetColumns(keyspace, k), "mismatch columns for table: ", k) + for k, expectedCols := range tcase.expTbl { + actualCols := tracker.GetColumns(keyspace, k) + utils.MustMatch(t, expectedCols, actualCols, "mismatch columns for table: ", k) if len(tcase.expFk[k]) > 0 { fks := tracker.GetForeignKeys(keyspace, k) for _, fk := range fks { - utils.MustMatch(t, tcase.expFk[k], sqlparser.String(fk), "mismatch foreign keys for table: ", k) + assert.Equal(t, tcase.expFk[k], sqlparser.String(fk), "mismatch foreign keys for table: ", k) + } + } + expIndexes := tcase.expIdx[k] + if len(expIndexes) > 0 { + idxs := tracker.GetIndexes(keyspace, k) + require.Equal(t, len(expIndexes), len(idxs)) + for i, idx := range idxs { + assert.Equal(t, expIndexes[i], sqlparser.String(idx), "mismatch index for table: ", k) } } } for k, v := range tcase.expView { - utils.MustMatch(t, v, sqlparser.String(tracker.GetViews(keyspace, k)), "mismatch for view: ", k) + assert.Equal(t, v, sqlparser.String(tracker.GetViews(keyspace, k)), "mismatch for view: ", k) } + + assert.Equal(t, tcase.expUDFs, tracker.UDFs(keyspace), "mismatch for udfs") }) } } diff --git a/go/vt/vtgate/schema/update_controller.go b/go/vt/vtgate/schema/update_controller.go index f68a9448d55..f30b2a679e6 100644 --- a/go/vt/vtgate/schema/update_controller.go +++ b/go/vt/vtgate/schema/update_controller.go @@ -148,11 +148,11 @@ func (u *updateController) add(th *discovery.TabletHealth) { } // If the keyspace schema is loaded and there is no schema change detected. Then there is nothing to process. - if len(th.Stats.TableSchemaChanged) == 0 && len(th.Stats.ViewSchemaChanged) == 0 && u.loaded { + if len(th.Stats.TableSchemaChanged) == 0 && len(th.Stats.ViewSchemaChanged) == 0 && !th.Stats.UdfsChanged && u.loaded { return } - if (len(th.Stats.TableSchemaChanged) > 0 || len(th.Stats.ViewSchemaChanged) > 0) && u.ignore { + if (len(th.Stats.TableSchemaChanged) > 0 || len(th.Stats.ViewSchemaChanged) > 0 || th.Stats.UdfsChanged) && u.ignore { // we got an update for this keyspace - we need to stop ignoring it, and reload everything u.ignore = false u.loaded = false diff --git a/go/vt/vtgate/schema/update_controller_flaky_test.go b/go/vt/vtgate/schema/update_controller_flaky_test.go index 971389af822..597705963b8 100644 --- a/go/vt/vtgate/schema/update_controller_flaky_test.go +++ b/go/vt/vtgate/schema/update_controller_flaky_test.go @@ -60,6 +60,16 @@ func TestMultipleUpdatesFromDifferentShards(t *testing.T) { }}, updateTables: []string{"a", "b"}, signalExpected: 1, + }, { + inputs: []input{{ + shard: "0", + tablesUpdates: []string{"a"}, + }, { + shard: "0", + tablesUpdates: []string{"A"}, + }}, + updateTables: []string{"a", "A"}, + signalExpected: 1, }, { inputs: []input{{ shard: "0", @@ -205,6 +215,11 @@ func TestViewsUpdates(t *testing.T) { inputs: []input{{shard: "0", viewUpdates: []string{"a"}}, {shard: "0", viewUpdates: []string{"b"}}}, updateViews: []string{"a", "b"}, signalExpected: 1, + }, { + desc: "received different view updates from shards - case sensitive names", + inputs: []input{{shard: "0", viewUpdates: []string{"a"}}, {shard: "0", viewUpdates: []string{"A"}}}, + updateViews: []string{"a", "A"}, + signalExpected: 1, }, { desc: "delay between inputs - different signals from each input", inputs: []input{{shard: "0", viewUpdates: []string{"a"}}, {shard: "0", viewUpdates: []string{"b"}}}, diff --git a/go/vt/vtgate/semantics/FakeSI.go b/go/vt/vtgate/semantics/FakeSI.go index b7043b42980..1ca6718f1a8 100644 --- a/go/vt/vtgate/semantics/FakeSI.go +++ b/go/vt/vtgate/semantics/FakeSI.go @@ -24,6 +24,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -35,6 +36,7 @@ type FakeSI struct { VindexTables map[string]vindexes.Vindex KsForeignKeyMode map[string]vschemapb.Keyspace_ForeignKeyMode KsError map[string]error + UDFs []string } // FindTableOrVindex implements the SchemaInformation interface @@ -47,7 +49,11 @@ func (s *FakeSI) FindTableOrVindex(tablename sqlparser.TableName) (*vindexes.Tab } func (*FakeSI) ConnCollation() collations.ID { - return 45 + return collations.CollationUtf8mb4ID +} + +func (s *FakeSI) Environment() *vtenv.Environment { + return vtenv.NewTestEnv() } func (s *FakeSI) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { @@ -61,6 +67,10 @@ func (s *FakeSI) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyM return vschemapb.Keyspace_unmanaged, nil } +func (s *FakeSI) GetForeignKeyChecksState() *bool { + return nil +} + func (s *FakeSI) KeyspaceError(keyspace string) error { if s.KsError != nil { fkErr, isPresent := s.KsError[keyspace] @@ -71,3 +81,7 @@ func (s *FakeSI) KeyspaceError(keyspace string) error { } return nil } + +func (s *FakeSI) GetAggregateUDFs() []string { + return s.UDFs +} diff --git a/go/vt/vtgate/semantics/analyzer.go b/go/vt/vtgate/semantics/analyzer.go index e524b1a33cf..b872a1dde04 100644 --- a/go/vt/vtgate/semantics/analyzer.go +++ b/go/vt/vtgate/semantics/analyzer.go @@ -28,47 +28,73 @@ import ( // analyzer controls the flow of the analysis. // It starts the tree walking and controls which part of the analysis sees which parts of the tree type analyzer struct { - scoper *scoper - tables *tableCollector - binder *binder - typer *typer - rewriter *earlyRewriter - sig QuerySignature + scoper *scoper + earlyTables *earlyTableCollector + tables *tableCollector + binder *binder + typer *typer + rewriter *earlyRewriter + fk *fkManager + sig QuerySignature + si SchemaInformation + currentDb string + recheck bool err error inProjection int - projErr error - unshardedErr error - warning string + projErr error + unshardedErr error + warning string + singleUnshardedKeyspace bool + fullAnalysis bool } // newAnalyzer create the semantic analyzer -func newAnalyzer(dbName string, si SchemaInformation) *analyzer { +func newAnalyzer(dbName string, si SchemaInformation, fullAnalysis bool) *analyzer { // TODO dependencies between these components are a little tangled. We should try to clean up - s := newScoper() + s := newScoper(si) a := &analyzer{ - scoper: s, - tables: newTableCollector(s, si, dbName), - typer: newTyper(), + scoper: s, + earlyTables: newEarlyTableCollector(si, dbName), + typer: newTyper(si.Environment().CollationEnv()), + si: si, + currentDb: dbName, + fullAnalysis: fullAnalysis, } s.org = a - a.tables.org = a + return a +} - b := newBinder(s, a, a.tables, a.typer) - a.binder = b +func (a *analyzer) lateInit() { + a.tables = a.earlyTables.newTableCollector(a.scoper, a) + a.binder = newBinder(a.scoper, a, a.tables, a.typer) + a.scoper.binder = a.binder a.rewriter = &earlyRewriter{ - scoper: s, - binder: b, + binder: a.binder, + scoper: a.scoper, expandedColumns: map[sqlparser.TableName][]*sqlparser.ColName{}, + env: a.si.Environment(), + aliasMapCache: map[*sqlparser.Select]map[string]exprContainer{}, + reAnalyze: a.reAnalyze, + tables: a.tables, + aggrUDFs: a.si.GetAggregateUDFs(), + } + a.fk = &fkManager{ + binder: a.binder, + tables: a.tables, + si: a.si, + getError: a.getError, } - s.binder = b - return a } // Analyze analyzes the parsed query. func Analyze(statement sqlparser.Statement, currentDb string, si SchemaInformation) (*SemTable, error) { - analyzer := newAnalyzer(currentDb, newSchemaInfo(si)) + return analyseAndGetSemTable(statement, currentDb, si, false) +} + +func analyseAndGetSemTable(statement sqlparser.Statement, currentDb string, si SchemaInformation, fullAnalysis bool) (*SemTable, error) { + analyzer := newAnalyzer(currentDb, newSchemaInfo(si), fullAnalysis) // Analysis for initial scope err := analyzer.analyze(statement) @@ -77,12 +103,12 @@ func Analyze(statement sqlparser.Statement, currentDb string, si SchemaInformati } // Creation of the semantic table - return analyzer.newSemTable(statement, si.ConnCollation()) + return analyzer.newSemTable(statement, si.ConnCollation(), si.GetForeignKeyChecksState(), si.Environment().CollationEnv()) } // AnalyzeStrict analyzes the parsed query, and fails the analysis for any possible errors func AnalyzeStrict(statement sqlparser.Statement, currentDb string, si SchemaInformation) (*SemTable, error) { - st, err := Analyze(statement, currentDb, si) + st, err := analyseAndGetSemTable(statement, currentDb, si, true) if err != nil { return nil, err } @@ -97,18 +123,47 @@ func AnalyzeStrict(statement sqlparser.Statement, currentDb string, si SchemaInf return st, nil } -func (a *analyzer) newSemTable(statement sqlparser.Statement, coll collations.ID) (*SemTable, error) { +func (a *analyzer) newSemTable( + statement sqlparser.Statement, + coll collations.ID, + fkChecksState *bool, + env *collations.Environment, +) (*SemTable, error) { var comments *sqlparser.ParsedComments commentedStmt, isCommented := statement.(sqlparser.Commented) if isCommented { comments = commentedStmt.GetParsedComments() } + + if a.singleUnshardedKeyspace { + return &SemTable{ + Tables: a.earlyTables.Tables, + Comments: comments, + Warning: a.warning, + Collation: coll, + ExprTypes: map[sqlparser.Expr]evalengine.Type{}, + NotSingleRouteErr: a.projErr, + NotUnshardedErr: a.unshardedErr, + Recursive: ExprDependencies{}, + Direct: ExprDependencies{}, + ColumnEqualities: map[columnName][]sqlparser.Expr{}, + ExpandedColumns: map[sqlparser.TableName][]*sqlparser.ColName{}, + columns: map[*sqlparser.Union]sqlparser.SelectExprs{}, + StatementIDs: a.scoper.statementIDs, + QuerySignature: QuerySignature{}, + childForeignKeysInvolved: map[TableSet][]vindexes.ChildFKInfo{}, + parentForeignKeysInvolved: map[TableSet][]vindexes.ParentFKInfo{}, + childFkToUpdExprs: map[string]sqlparser.UpdateExprs{}, + collEnv: env, + }, nil + } + columns := map[*sqlparser.Union]sqlparser.SelectExprs{} for union, info := range a.tables.unionInfo { columns[union] = info.exprs } - childFks, parentFks, err := a.getInvolvedForeignKeys(statement) + childFks, parentFks, childFkToUpdExprs, err := a.fk.getInvolvedForeignKeys(statement, fkChecksState) if err != nil { return nil, err } @@ -118,6 +173,7 @@ func (a *analyzer) newSemTable(statement sqlparser.Statement, coll collations.ID Direct: a.binder.direct, ExprTypes: a.typer.m, Tables: a.tables.Tables, + Targets: a.binder.targets, NotSingleRouteErr: a.projErr, NotUnshardedErr: a.unshardedErr, Warning: a.warning, @@ -130,6 +186,8 @@ func (a *analyzer) newSemTable(statement sqlparser.Statement, coll collations.ID QuerySignature: a.sig, childForeignKeysInvolved: childFks, parentForeignKeysInvolved: parentFks, + childFkToUpdExprs: childFkToUpdExprs, + collEnv: env, }, nil } @@ -184,10 +242,6 @@ func (a *analyzer) analyzeUp(cursor *sqlparser.Cursor) bool { return false } - if err := a.scoper.up(cursor); err != nil { - a.setError(err) - return false - } if err := a.tables.up(cursor); err != nil { a.setError(err) return false @@ -203,9 +257,17 @@ func (a *analyzer) analyzeUp(cursor *sqlparser.Cursor) bool { return false } - if err := a.rewriter.up(cursor); err != nil { + if !a.recheck { + // no need to run the rewriter on rechecking + if err := a.rewriter.up(cursor); err != nil { + a.setError(err) + return true + } + } + + if err := a.scoper.up(cursor); err != nil { a.setError(err) - return true + return false } a.leaveProjection(cursor) @@ -265,6 +327,12 @@ func isParentSelect(cursor *sqlparser.Cursor) bool { return isSelect } +func isParentDeleteOrUpdate(cursor *sqlparser.Cursor) bool { + _, isDelete := cursor.Parent().(*sqlparser.Delete) + _, isUpdate := cursor.Parent().(*sqlparser.Update) + return isDelete || isUpdate +} + func isParentSelectStatement(cursor *sqlparser.Cursor) bool { _, isSelect := cursor.Parent().(sqlparser.SelectStatement) return isSelect @@ -273,6 +341,7 @@ func isParentSelectStatement(cursor *sqlparser.Cursor) bool { type originable interface { tableSetFor(t *sqlparser.AliasedTableExpr) TableSet depsForExpr(expr sqlparser.Expr) (direct, recursive TableSet, typ evalengine.Type) + collationEnv() *collations.Environment } func (a *analyzer) depsForExpr(expr sqlparser.Expr) (direct, recursive TableSet, typ evalengine.Type) { @@ -282,11 +351,83 @@ func (a *analyzer) depsForExpr(expr sqlparser.Expr) (direct, recursive TableSet, return } +func (a *analyzer) collationEnv() *collations.Environment { + return a.typer.collationEnv +} + func (a *analyzer) analyze(statement sqlparser.Statement) error { + _ = sqlparser.Rewrite(statement, nil, a.earlyUp) + if a.err != nil { + return a.err + } + + if a.canShortCut(statement) { + return nil + } + + a.lateInit() + + return a.lateAnalyze(statement) +} + +func (a *analyzer) lateAnalyze(statement sqlparser.SQLNode) error { _ = sqlparser.Rewrite(statement, a.analyzeDown, a.analyzeUp) return a.err } +func (a *analyzer) reAnalyze(statement sqlparser.SQLNode) error { + a.recheck = true + defer func() { + a.recheck = false + }() + return a.lateAnalyze(statement) +} + +// canShortCut checks if we are dealing with a single unsharded keyspace and no tables that have managed foreign keys +// if so, we can stop the analyzer early +func (a *analyzer) canShortCut(statement sqlparser.Statement) (canShortCut bool) { + if a.fullAnalysis { + return false + } + ks, _ := singleUnshardedKeyspace(a.earlyTables.Tables) + if ks == nil { + return false + } + + defer func() { + a.singleUnshardedKeyspace = canShortCut + }() + + if !sqlparser.IsDMLStatement(statement) { + return true + } + + fkMode, err := a.si.ForeignKeyMode(ks.Name) + if err != nil { + a.err = err + return false + } + if fkMode != vschemapb.Keyspace_managed { + return true + } + + for _, table := range a.earlyTables.Tables { + vtbl := table.GetVindexTable() + if len(vtbl.ChildForeignKeys) > 0 || len(vtbl.ParentForeignKeys) > 0 { + return false + } + } + + return true +} + +// earlyUp collects tables in the query, so we can check +// if this a single unsharded query we are dealing with +func (a *analyzer) earlyUp(cursor *sqlparser.Cursor) bool { + a.earlyTables.up(cursor) + return true +} + func (a *analyzer) shouldContinue() bool { return a.err == nil } @@ -313,174 +454,20 @@ func (a *analyzer) noteQuerySignature(node sqlparser.SQLNode) { } case sqlparser.AggrFunc: a.sig.Aggregation = true + case *sqlparser.Delete, *sqlparser.Update, *sqlparser.Insert: + a.sig.DML = true } } -// getInvolvedForeignKeys gets the foreign keys that might require taking care off when executing the given statement. -func (a *analyzer) getInvolvedForeignKeys(statement sqlparser.Statement) (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo, error) { - // There are only the DML statements that require any foreign keys handling. - switch stmt := statement.(type) { - case *sqlparser.Delete: - // For DELETE statements, none of the parent foreign keys require handling. - // So we collect all the child foreign keys. - allChildFks, _, err := a.getAllManagedForeignKeys() - return allChildFks, nil, err - case *sqlparser.Insert: - // For INSERT statements, we have 3 different cases: - // 1. REPLACE statement: REPLACE statements are essentially DELETEs and INSERTs rolled into one. - // So we need to the parent foreign keys to ensure we are inserting the correct values, and the child foreign keys - // to ensure we don't change a row that breaks the constraint or cascade any operations on the child tables. - // 2. Normal INSERT statement: We don't need to check anything on the child foreign keys, so we just get all the parent foreign keys. - // 3. INSERT with ON DUPLICATE KEY UPDATE: This might trigger an update on the columns specified in the ON DUPLICATE KEY UPDATE clause. - allChildFks, allParentFKs, err := a.getAllManagedForeignKeys() - if err != nil { - return nil, nil, err - } - if stmt.Action == sqlparser.ReplaceAct { - return allChildFks, allParentFKs, nil - } - if len(stmt.OnDup) == 0 { - return nil, allParentFKs, nil - } - // If only a certain set of columns are being updated, then there might be some child foreign keys that don't need any consideration since their columns aren't being updated. - // So, we filter these child foreign keys out. We can't filter any parent foreign keys because the statement will INSERT a row too, which requires validating all the parent foreign keys. - updatedChildFks, _ := a.filterForeignKeysUsingUpdateExpressions(allChildFks, nil, sqlparser.UpdateExprs(stmt.OnDup)) - return updatedChildFks, allParentFKs, nil - case *sqlparser.Update: - // For UPDATE queries we get all the parent and child foreign keys, but we can filter some of them out if the columns that they consist off aren't being updated or are set to NULLs. - allChildFks, allParentFks, err := a.getAllManagedForeignKeys() - if err != nil { - return nil, nil, err - } - childFks, parentFks := a.filterForeignKeysUsingUpdateExpressions(allChildFks, allParentFks, stmt.Exprs) - return childFks, parentFks, nil - default: - return nil, nil, nil - } -} - -// filterForeignKeysUsingUpdateExpressions filters the child and parent foreign key constraints that don't require any validations/cascades given the updated expressions. -func (a *analyzer) filterForeignKeysUsingUpdateExpressions(allChildFks map[TableSet][]vindexes.ChildFKInfo, allParentFks map[TableSet][]vindexes.ParentFKInfo, updExprs sqlparser.UpdateExprs) (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo) { - if len(allChildFks) == 0 && len(allParentFks) == 0 { - return nil, nil - } - - pFksRequired := make(map[TableSet][]bool, len(allParentFks)) - cFksRequired := make(map[TableSet][]bool, len(allChildFks)) - for ts, fks := range allParentFks { - pFksRequired[ts] = make([]bool, len(fks)) - } - for ts, fks := range allChildFks { - cFksRequired[ts] = make([]bool, len(fks)) - } - - // updExprToTableSet stores the tables that the updated expressions are from. - updExprToTableSet := make(map[*sqlparser.ColName]TableSet) - - // Go over all the update expressions - for _, updateExpr := range updExprs { - deps := a.binder.direct.dependencies(updateExpr.Name) - if deps.NumberOfTables() != 1 { - panic("expected to have single table dependency") - } - updExprToTableSet[updateExpr.Name] = deps - // Get all the child and parent foreign keys for the given table that the update expression belongs to. - childFks := allChildFks[deps] - parentFKs := allParentFks[deps] - - // Any foreign key to a child table for a column that has been updated - // will require the cascade operations or restrict verification to happen, so we include all such foreign keys. - for idx, childFk := range childFks { - if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { - cFksRequired[deps][idx] = true - } - } - // If we are setting a column to NULL, then we don't need to verify the existance of an - // equivalent row in the parent table, even if this column was part of a foreign key to a parent table. - if sqlparser.IsNull(updateExpr.Expr) { - continue - } - // We add all the possible parent foreign key constraints that need verification that an equivalent row - // exists, given that this column has changed. - for idx, parentFk := range parentFKs { - if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { - pFksRequired[deps][idx] = true - } - } - } - // For the parent foreign keys, if any of the columns part of the fk is set to NULL, - // then, we don't care for the existence of an equivalent row in the parent table. - for _, updateExpr := range updExprs { - if !sqlparser.IsNull(updateExpr.Expr) { - continue - } - ts := updExprToTableSet[updateExpr.Name] - parentFKs := allParentFks[ts] - for idx, parentFk := range parentFKs { - if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { - pFksRequired[ts][idx] = false - } - } - } - - // Create new maps with only the required foreign keys. - pFksNeedsHandling := map[TableSet][]vindexes.ParentFKInfo{} - cFksNeedsHandling := map[TableSet][]vindexes.ChildFKInfo{} - for ts, parentFks := range allParentFks { - var pFKNeeded []vindexes.ParentFKInfo - for idx, fk := range parentFks { - if pFksRequired[ts][idx] { - pFKNeeded = append(pFKNeeded, fk) - } - } - pFksNeedsHandling[ts] = pFKNeeded - - } - for ts, childFks := range allChildFks { - var cFKNeeded []vindexes.ChildFKInfo - for idx, fk := range childFks { - if cFksRequired[ts][idx] { - cFKNeeded = append(cFKNeeded, fk) - } - } - cFksNeedsHandling[ts] = cFKNeeded - +// getError gets the error stored in the analyzer during previous phases. +func (a *analyzer) getError() error { + if a.projErr != nil { + return a.projErr } - return cFksNeedsHandling, pFksNeedsHandling -} - -// getAllManagedForeignKeys gets all the foreign keys for the query we are analyzing that Vitess is reposible for managing. -func (a *analyzer) getAllManagedForeignKeys() (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo, error) { - allChildFKs := make(map[TableSet][]vindexes.ChildFKInfo) - allParentFKs := make(map[TableSet][]vindexes.ParentFKInfo) - - // Go over all the tables and collect the foreign keys. - for idx, table := range a.tables.Tables { - vi := table.GetVindexTable() - if vi == nil || vi.Keyspace == nil { - // If is not a real table, so should be skipped. - continue - } - // Check whether Vitess needs to manage the foreign keys in this keyspace or not. - fkMode, err := a.tables.si.ForeignKeyMode(vi.Keyspace.Name) - if err != nil { - return nil, nil, err - } - if fkMode != vschemapb.Keyspace_managed { - continue - } - // Cyclic foreign key constraints error is stored in the keyspace. - ksErr := a.tables.si.KeyspaceError(vi.Keyspace.Name) - if ksErr != nil { - return nil, nil, ksErr - } - - // Add all the child and parent foreign keys to our map. - ts := SingleTableSet(idx) - allChildFKs[ts] = vi.ChildForeignKeys - allParentFKs[ts] = vi.ParentForeignKeys + if a.unshardedErr != nil { + return a.unshardedErr } - return allChildFKs, allParentFKs, nil + return a.err } // ProjError is used to mark an error as something that should only be returned @@ -499,6 +486,10 @@ type ShardedError struct { Inner error } +func (p ShardedError) Unwrap() error { + return p.Inner +} + func (p ShardedError) Error() string { return p.Inner.Error() } diff --git a/go/vt/vtgate/semantics/analyzer_dml_test.go b/go/vt/vtgate/semantics/analyzer_dml_test.go index c792b2301a0..3e50f98f77a 100644 --- a/go/vt/vtgate/semantics/analyzer_dml_test.go +++ b/go/vt/vtgate/semantics/analyzer_dml_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/sqlparser" ) @@ -87,3 +88,53 @@ func TestUpdBindingExpr(t *testing.T) { func extractFromUpdateSet(in *sqlparser.Update, idx int) *sqlparser.UpdateExpr { return in.Exprs[idx] } + +func TestInsertBindingColName(t *testing.T) { + queries := []string{ + "insert into t2 (uid, name, textcol) values (1,'foo','bar') as new on duplicate key update textcol = new.uid + new.name", + "insert into t2 (uid, name, textcol) values (1,'foo','bar') as new(x, y, z) on duplicate key update textcol = x + y", + "insert into t2 values (1,'foo','bar') as new(x, y, z) on duplicate key update textcol = x + y", + "insert into t3(uid, name, invcol) values (1,'foo','bar') as new on duplicate key update textcol = new.invcol", + "insert into t3 values (1,'foo','bar') as new on duplicate key update textcol = new.uid+new.name+new.textcol", + "insert into t3 values (1,'foo','bar') as new on duplicate key update textcol = new.uid+new.name+new.textcol, uid = new.name", + } + for _, query := range queries { + t.Run(query, func(t *testing.T) { + stmt, semTable := parseAndAnalyzeStrict(t, query, "d") + ins, _ := stmt.(*sqlparser.Insert) + for _, ue := range ins.OnDup { + // check deps on the column + ts := semTable.RecursiveDeps(ue.Name) + assert.Equal(t, SingleTableSet(0), ts) + // check deps on the expression + ts = semTable.RecursiveDeps(ue.Expr) + assert.Equal(t, SingleTableSet(0), ts) + } + }) + } +} + +func TestInsertBindingColNameErrorCases(t *testing.T) { + tcases := []struct { + query string + expErr string + }{{ + "insert into t2 values (1,'foo','bar') as new on duplicate key update textcol = new.unknowncol", + "column 'new.unknowncol' not found", + }, { + "insert into t3 values (1,'foo','bar', 'baz') as new on duplicate key update textcol = new.invcol", + "column 'new.invcol' not found", + }, { + "insert into t3(uid, name) values (1,'foo') as new(x, y, z) on duplicate key update textcol = x + y", + "VT03033: In definition of view, derived table or common table expression, SELECT list and column names list have different column counts", + }} + for _, tc := range tcases { + t.Run(tc.query, func(t *testing.T) { + parse, err := sqlparser.NewTestParser().Parse(tc.query) + require.NoError(t, err) + + _, err = AnalyzeStrict(parse, "d", fakeSchemaInfo()) + require.ErrorContains(t, err, tc.expErr) + }) + } +} diff --git a/go/vt/vtgate/semantics/analyzer_test.go b/go/vt/vtgate/semantics/analyzer_test.go index c8251dd36c3..0fbf0911f3a 100644 --- a/go/vt/vtgate/semantics/analyzer_test.go +++ b/go/vt/vtgate/semantics/analyzer_test.go @@ -17,7 +17,6 @@ limitations under the License. package semantics import ( - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -25,12 +24,11 @@ import ( "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" - vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -var T0 TableSet +var NoTables TableSet var ( // Just here to make outputs more readable @@ -122,7 +120,7 @@ func TestBindingSingleTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "d", &FakeSI{}) require.NoError(t, err) @@ -142,7 +140,7 @@ func TestBindingSingleAliasedTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -240,7 +238,7 @@ func TestBindingMultiTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) _, err = Analyze(parse, "d", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -264,7 +262,7 @@ func TestBindingMultiAliasedTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) _, err = Analyze(parse, "d", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -277,6 +275,27 @@ func TestBindingMultiAliasedTableNegative(t *testing.T) { } } +func TestBindingDelete(t *testing.T) { + queries := []string{ + "delete tbl from tbl", + "delete from tbl", + "delete t1 from t1, t2", + } + for _, query := range queries { + t.Run(query, func(t *testing.T) { + stmt, semTable := parseAndAnalyze(t, query, "d") + del := stmt.(*sqlparser.Delete) + t1 := del.TableExprs[0].(*sqlparser.AliasedTableExpr) + ts := semTable.TableSetFor(t1) + assert.Equal(t, SingleTableSet(0), ts) + + actualTs, err := semTable.GetTargetTableSetForTableName(del.Targets[0]) + require.NoError(t, err) + assert.Equal(t, ts, actualTs) + }) + } +} + func TestNotUniqueTableName(t *testing.T) { queries := []string{ "select * from t, t", @@ -287,7 +306,7 @@ func TestNotUniqueTableName(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) _, err := Analyze(parse, "test", &FakeSI{}) require.Error(t, err) require.Contains(t, err.Error(), "VT03013: not unique table/alias") @@ -302,7 +321,7 @@ func TestMissingTable(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) st, err := Analyze(parse, "", &FakeSI{}) require.NoError(t, err) require.ErrorContains(t, st.NotUnshardedErr, "column 't.col' not found") @@ -390,7 +409,7 @@ func TestUnknownColumnMap2(t *testing.T) { queries := []string{"select col from a, b", "select col from a as user, b as extra"} for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) expr := extract(parse.(*sqlparser.Select), 0) for _, test := range tests { @@ -404,7 +423,7 @@ func TestUnknownColumnMap2(t *testing.T) { require.NoError(t, tbl.NotSingleRouteErr) typ, found := tbl.TypeForExpr(expr) assert.True(t, found) - assert.Equal(t, test.typ, typ.Type) + assert.Equal(t, test.typ, typ.Type()) } }) } @@ -421,7 +440,7 @@ func TestUnknownPredicate(t *testing.T) { Name: sqlparser.NewIdentifierCS("b"), } - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) tests := []struct { name string @@ -459,7 +478,7 @@ func TestScoping(t *testing.T) { } for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -539,7 +558,7 @@ func TestSubqueryOrderByBinding(t *testing.T) { for _, tc := range queries { t.Run(tc.query, func(t *testing.T) { - ast, err := sqlparser.Parse(tc.query) + ast, err := sqlparser.NewTestParser().Parse(tc.query) require.NoError(t, err) sel := ast.(*sqlparser.Select) @@ -580,7 +599,7 @@ func TestOrderByBindingTable(t *testing.T) { TS0, }, { "select 1 as c from tabl order by c", - T0, + NoTables, }, { "select name, name from t1, t2 order by name", TS1, @@ -628,6 +647,37 @@ func TestOrderByBindingTable(t *testing.T) { } } +func TestVindexHints(t *testing.T) { + // tests that vindex hints point to existing vindexes, or an error should be returned + tcases := []struct { + sql string + expectedErr string + }{{ + sql: "select col from t1 use vindex (does_not_exist)", + expectedErr: "Vindex 'does_not_exist' does not exist in table 'ks2.t1'", + }, { + sql: "select col from t1 ignore vindex (does_not_exist)", + expectedErr: "Vindex 'does_not_exist' does not exist in table 'ks2.t1'", + }, { + sql: "select id from t1 use vindex (id_vindex)", + }, { + sql: "select id from t1 ignore vindex (id_vindex)", + }} + for _, tc := range tcases { + t.Run(tc.sql, func(t *testing.T) { + parse, err := sqlparser.NewTestParser().Parse(tc.sql) + require.NoError(t, err) + + _, err = AnalyzeStrict(parse, "d", fakeSchemaInfo()) + if tc.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tc.expectedErr) + } + }) + } +} + func TestGroupByBinding(t *testing.T) { tcases := []struct { sql string @@ -661,7 +711,7 @@ func TestGroupByBinding(t *testing.T) { TS0, }, { "select 1 as c from tabl group by c", - T0, + NoTables, }, { "select t1.id from t1, t2 group by id", TS0, @@ -673,7 +723,10 @@ func TestGroupByBinding(t *testing.T) { TS1, }, { "select a.id from t as a, t1 group by id", - TS0, + // since we have authoritative info on t1, we know that it does have an `id` column, + // and we are missing column info for `t`, we just assume this is coming from t1. + // we really need schema tracking here + TS1, }, { "select a.id from t, t1 as a group by id", TS1, @@ -682,7 +735,7 @@ func TestGroupByBinding(t *testing.T) { t.Run(tc.sql, func(t *testing.T) { stmt, semTable := parseAndAnalyze(t, tc.sql, "d") sel, _ := stmt.(*sqlparser.Select) - grp := sel.GroupBy[0] + grp := sel.GroupBy.Exprs[0] d := semTable.RecursiveDeps(grp) require.Equal(t, tc.deps, d, tc.sql) }) @@ -691,44 +744,47 @@ func TestGroupByBinding(t *testing.T) { func TestHavingBinding(t *testing.T) { tcases := []struct { - sql string - deps TableSet + sql, err string + deps TableSet }{{ - "select col from tabl having col = 1", - TS0, + sql: "select col from tabl having col = 1", + deps: TS0, }, { - "select col from tabl having tabl.col = 1", - TS0, + sql: "select col from tabl having tabl.col = 1", + deps: TS0, }, { - "select col from tabl having d.tabl.col = 1", - TS0, + sql: "select col from tabl having d.tabl.col = 1", + deps: TS0, }, { - "select tabl.col as x from tabl having x = 1", - TS0, + sql: "select tabl.col as x from tabl having col = 1", + deps: TS0, }, { - "select tabl.col as x from tabl having col", - TS0, + sql: "select tabl.col as x from tabl having x = 1", + deps: TS0, }, { - "select col from tabl having 1 = 1", - T0, + sql: "select tabl.col as x from tabl having col", + deps: TS0, }, { - "select col as c from tabl having c = 1", - TS0, + sql: "select col from tabl having 1 = 1", + deps: NoTables, }, { - "select 1 as c from tabl having c = 1", - T0, + sql: "select col as c from tabl having c = 1", + deps: TS0, }, { - "select t1.id from t1, t2 having id = 1", - TS0, + sql: "select 1 as c from tabl having c = 1", + deps: NoTables, }, { - "select t.id from t, t1 having id = 1", - TS0, + sql: "select t1.id from t1, t2 having id = 1", + deps: TS0, }, { - "select t.id, count(*) as a from t, t1 group by t.id having a = 1", - MergeTableSets(TS0, TS1), + sql: "select t.id from t, t1 having id = 1", + deps: TS0, }, { - "select t.id, sum(t2.name) as a from t, t2 group by t.id having a = 1", - TS1, + sql: "select t.id, count(*) as a from t, t1 group by t.id having a = 1", + deps: MergeTableSets(TS0, TS1), + }, { + sql: "select t.id, sum(t2.name) as a from t, t2 group by t.id having a = 1", + deps: TS1, }, { sql: "select u2.a, u1.a from u1, u2 having u2.a = 2", deps: TS1, @@ -844,7 +900,7 @@ func TestInvalidQueries(t *testing.T) { for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { - parse, err := sqlparser.Parse(tc.sql) + parse, err := sqlparser.NewTestParser().Parse(tc.sql) require.NoError(t, err) st, err := Analyze(parse, "dbName", fakeSchemaInfo()) @@ -883,109 +939,6 @@ func TestUnionWithOrderBy(t *testing.T) { assert.Equal(t, TS1, d2) } -func TestScopingWDerivedTables(t *testing.T) { - queries := []struct { - query string - errorMessage string - recursiveExpectation TableSet - expectation TableSet - }{ - { - query: "select id from (select x as id from user) as t", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select id from (select foo as id from user) as t", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select id from (select foo as id from (select x as foo from user) as c) as t", - recursiveExpectation: TS0, - expectation: TS2, - }, { - query: "select t.id from (select foo as id from user) as t", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select t.id2 from (select foo as id from user) as t", - errorMessage: "column 't.id2' not found", - }, { - query: "select id from (select 42 as id) as t", - recursiveExpectation: T0, - expectation: TS1, - }, { - query: "select t.id from (select 42 as id) as t", - recursiveExpectation: T0, - expectation: TS1, - }, { - query: "select ks.t.id from (select 42 as id) as t", - errorMessage: "column 'ks.t.id' not found", - }, { - query: "select * from (select id, id from user) as t", - errorMessage: "Duplicate column name 'id'", - }, { - query: "select t.baz = 1 from (select id as baz from user) as t", - expectation: TS1, - recursiveExpectation: TS0, - }, { - query: "select t.id from (select * from user, music) as t", - expectation: TS2, - recursiveExpectation: MergeTableSets(TS0, TS1), - }, { - query: "select t.id from (select * from user, music) as t order by t.id", - expectation: TS2, - recursiveExpectation: MergeTableSets(TS0, TS1), - }, { - query: "select t.id from (select * from user) as t join user as u on t.id = u.id", - expectation: TS1, - recursiveExpectation: TS0, - }, { - query: "select t.col1 from t3 ua join (select t1.id, t1.col1 from t1 join t2) as t", - expectation: TS3, - recursiveExpectation: TS1, - }, { - query: "select uu.test from (select id from t1) uu", - errorMessage: "column 'uu.test' not found", - }, { - query: "select uu.id from (select id as col from t1) uu", - errorMessage: "column 'uu.id' not found", - }, { - query: "select uu.id from (select id as col from t1) uu", - errorMessage: "column 'uu.id' not found", - }, { - query: "select uu.id from (select id from t1) as uu where exists (select * from t2 as uu where uu.id = uu.uid)", - expectation: TS1, - recursiveExpectation: TS0, - }, { - query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", - expectation: T0, - recursiveExpectation: T0, - }} - for _, query := range queries { - t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) - require.NoError(t, err) - st, err := Analyze(parse, "user", &FakeSI{ - Tables: map[string]*vindexes.Table{ - "t": {Name: sqlparser.NewIdentifierCS("t")}, - }, - }) - - switch { - case query.errorMessage != "" && err != nil: - require.EqualError(t, err, query.errorMessage) - case query.errorMessage != "": - require.EqualError(t, st.NotUnshardedErr, query.errorMessage) - default: - require.NoError(t, err) - sel := parse.(*sqlparser.Select) - assert.Equal(t, query.recursiveExpectation, st.RecursiveDeps(extract(sel, 0)), "RecursiveDeps") - assert.Equal(t, query.expectation, st.DirectDeps(extract(sel, 0)), "DirectDeps") - } - }) - } -} - func TestScopingWithWITH(t *testing.T) { queries := []struct { query string @@ -1003,7 +956,7 @@ func TestScopingWithWITH(t *testing.T) { }, { query: "with c as (select x as foo from user), t as (select foo as id from c) select id from t", recursive: TS0, - direct: TS2, + direct: TS3, }, { query: "with t as (select foo as id from user) select t.id from t", recursive: TS0, @@ -1013,11 +966,11 @@ func TestScopingWithWITH(t *testing.T) { errorMessage: "column 't.id2' not found", }, { query: "with t as (select 42 as id) select id from t", - recursive: T0, + recursive: NoTables, direct: TS1, }, { query: "with t as (select 42 as id) select t.id from t", - recursive: T0, + recursive: NoTables, direct: TS1, }, { query: "with t as (select 42 as id) select ks.t.id from t", @@ -1039,12 +992,12 @@ func TestScopingWithWITH(t *testing.T) { recursive: MergeTableSets(TS0, TS1), }, { query: "with t as (select * from user) select t.id from t join user as u on t.id = u.id", - direct: TS1, + direct: TS2, recursive: TS0, }, { query: "with t as (select t1.id, t1.col1 from t1 join t2) select t.col1 from t3 ua join t", direct: TS3, - recursive: TS1, + recursive: TS0, }, { query: "with uu as (select id from t1) select uu.test from uu", errorMessage: "column 'uu.test' not found", @@ -1056,16 +1009,16 @@ func TestScopingWithWITH(t *testing.T) { errorMessage: "column 'uu.id' not found", }, { query: "select uu.id from (select id from t1) as uu where exists (select * from t2 as uu where uu.id = uu.uid)", - direct: TS1, + direct: TS2, recursive: TS0, }, { query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", - direct: T0, - recursive: T0, + direct: NoTables, + recursive: NoTables, }} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -1103,20 +1056,20 @@ func TestJoinPredicateDependencies(t *testing.T) { directExpect: MergeTableSets(TS0, TS1), }, { query: "select 1 from (select * from t1) x join t2 on x.id = t2.uid", - recursiveExpect: MergeTableSets(TS0, TS2), + recursiveExpect: MergeTableSets(TS0, TS1), directExpect: MergeTableSets(TS1, TS2), }, { query: "select 1 from (select id from t1) x join t2 on x.id = t2.uid", - recursiveExpect: MergeTableSets(TS0, TS2), + recursiveExpect: MergeTableSets(TS0, TS1), directExpect: MergeTableSets(TS1, TS2), }, { query: "select 1 from (select id from t1 union select id from t) x join t2 on x.id = t2.uid", - recursiveExpect: MergeTableSets(TS0, TS1, TS3), + recursiveExpect: MergeTableSets(TS0, TS1, TS2), directExpect: MergeTableSets(TS2, TS3), }} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", fakeSchemaInfo()) @@ -1130,107 +1083,6 @@ func TestJoinPredicateDependencies(t *testing.T) { } } -func TestDerivedTablesOrderClause(t *testing.T) { - queries := []struct { - query string - recursiveExpectation TableSet - expectation TableSet - }{{ - query: "select 1 from (select id from user) as t order by id", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select id from (select id from user) as t order by id", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select id from (select id from user) as t order by t.id", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select id as foo from (select id from user) as t order by foo", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select bar from (select id as bar from user) as t order by bar", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select bar as foo from (select id as bar from user) as t order by bar", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select bar as foo from (select id as bar from user) as t order by foo", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select bar as foo from (select id as bar, oo from user) as t order by oo", - recursiveExpectation: TS0, - expectation: TS1, - }, { - query: "select bar as foo from (select id, oo from user) as t(bar,oo) order by bar", - recursiveExpectation: TS0, - expectation: TS1, - }} - si := &FakeSI{Tables: map[string]*vindexes.Table{"t": {Name: sqlparser.NewIdentifierCS("t")}}} - for _, query := range queries { - t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) - require.NoError(t, err) - - st, err := Analyze(parse, "user", si) - require.NoError(t, err) - - sel := parse.(*sqlparser.Select) - assert.Equal(t, query.recursiveExpectation, st.RecursiveDeps(sel.OrderBy[0].Expr), "RecursiveDeps") - assert.Equal(t, query.expectation, st.DirectDeps(sel.OrderBy[0].Expr), "DirectDeps") - - }) - } -} - -func TestScopingWComplexDerivedTables(t *testing.T) { - queries := []struct { - query string - errorMessage string - rightExpectation TableSet - leftExpectation TableSet - }{ - { - query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", - rightExpectation: TS0, - leftExpectation: TS0, - }, - { - query: "select 1 from user.user uu where exists (select 1 from user.user as uu where exists (select 1 from (select 1 from user.t1) uu where uu.user_id = uu.id))", - rightExpectation: TS1, - leftExpectation: TS1, - }, - } - for _, query := range queries { - t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) - require.NoError(t, err) - st, err := Analyze(parse, "user", &FakeSI{ - Tables: map[string]*vindexes.Table{ - "t": {Name: sqlparser.NewIdentifierCS("t")}, - }, - }) - if query.errorMessage != "" { - require.EqualError(t, err, query.errorMessage) - } else { - require.NoError(t, err) - sel := parse.(*sqlparser.Select) - comparisonExpr := sel.Where.Expr.(*sqlparser.ExistsExpr).Subquery.Select.(*sqlparser.Select).Where.Expr.(*sqlparser.ExistsExpr).Subquery.Select.(*sqlparser.Select).Where.Expr.(*sqlparser.ComparisonExpr) - left := comparisonExpr.Left - right := comparisonExpr.Right - assert.Equal(t, query.leftExpectation, st.RecursiveDeps(left), "Left RecursiveDeps") - assert.Equal(t, query.rightExpectation, st.RecursiveDeps(right), "Right RecursiveDeps") - } - }) - } -} - func TestScopingWVindexTables(t *testing.T) { queries := []struct { query string @@ -1250,7 +1102,7 @@ func TestScopingWVindexTables(t *testing.T) { } for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) hash, _ := vindexes.CreateVindex("hash", "user_index", nil) st, err := Analyze(parse, "user", &FakeSI{ @@ -1292,7 +1144,7 @@ func BenchmarkAnalyzeMultipleDifferentQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1316,7 +1168,7 @@ func BenchmarkAnalyzeUnionQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1342,37 +1194,7 @@ func BenchmarkAnalyzeSubQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) - require.NoError(b, err) - - _, _ = Analyze(parse, "d", fakeSchemaInfo()) - } - } -} - -func BenchmarkAnalyzeDerivedTableQueries(b *testing.B) { - queries := []string{ - "select id from (select x as id from user) as t", - "select id from (select foo as id from user) as t", - "select id from (select foo as id from (select x as foo from user) as c) as t", - "select t.id from (select foo as id from user) as t", - "select t.id2 from (select foo as id from user) as t", - "select id from (select 42 as id) as t", - "select t.id from (select 42 as id) as t", - "select ks.t.id from (select 42 as id) as t", - "select * from (select id, id from user) as t", - "select t.baz = 1 from (select id as baz from user) as t", - "select t.id from (select * from user, music) as t", - "select t.id from (select * from user, music) as t order by t.id", - "select t.id from (select * from user) as t join user as u on t.id = u.id", - "select t.col1 from t3 ua join (select t1.id, t1.col1 from t1 join t2) as t", - "select uu.id from (select id from t1) as uu where exists (select * from t2 as uu where uu.id = uu.uid)", - "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", - } - - for i := 0; i < b.N; i++ { - for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1398,7 +1220,7 @@ func BenchmarkAnalyzeHavingQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1427,7 +1249,7 @@ func BenchmarkAnalyzeGroupByQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1450,7 +1272,7 @@ func BenchmarkAnalyzeOrderByQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1460,7 +1282,7 @@ func BenchmarkAnalyzeOrderByQueries(b *testing.B) { func parseAndAnalyze(t *testing.T, query, dbName string) (sqlparser.Statement, *SemTable) { t.Helper() - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) semTable, err := Analyze(parse, dbName, fakeSchemaInfo()) @@ -1468,47 +1290,44 @@ func parseAndAnalyze(t *testing.T, query, dbName string) (sqlparser.Statement, * return parse, semTable } +func parseAndAnalyzeStrict(t *testing.T, query, dbName string) (sqlparser.Statement, *SemTable) { + t.Helper() + parse, err := sqlparser.NewTestParser().Parse(query) + require.NoError(t, err) + + semTable, err := AnalyzeStrict(parse, dbName, fakeSchemaInfo()) + require.NoError(t, err) + return parse, semTable +} + func TestSingleUnshardedKeyspace(t *testing.T) { tests := []struct { query string unsharded *vindexes.Keyspace - tables []*vindexes.Table }{ { query: "select 1 from t, t1", unsharded: nil, // both tables are unsharded, but from different keyspaces - tables: nil, }, { query: "select 1 from t2", unsharded: nil, - tables: nil, }, { query: "select 1 from t, t2", unsharded: nil, - tables: nil, }, { query: "select 1 from t as A, t as B", - unsharded: ks1, - tables: []*vindexes.Table{ - {Keyspace: ks1, Name: sqlparser.NewIdentifierCS("t")}, - {Keyspace: ks1, Name: sqlparser.NewIdentifierCS("t")}, - }, + unsharded: unsharded, }, { query: "insert into t select * from t", - unsharded: ks1, - tables: []*vindexes.Table{ - {Keyspace: ks1, Name: sqlparser.NewIdentifierCS("t")}, - {Keyspace: ks1, Name: sqlparser.NewIdentifierCS("t")}, - }, + unsharded: unsharded, }, } for _, test := range tests { t.Run(test.query, func(t *testing.T) { _, semTable := parseAndAnalyze(t, test.query, "d") - queryIsUnsharded, tables := semTable.SingleUnshardedKeyspace() + queryIsUnsharded, _ := semTable.SingleUnshardedKeyspace() assert.Equal(t, test.unsharded, queryIsUnsharded) - assert.Equal(t, test.tables, tables) }) } } @@ -1531,7 +1350,7 @@ func TestNextErrors(t *testing.T) { for _, test := range tests { t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) + parse, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) _, err = Analyze(parse, "d", fakeSchemaInfo()) @@ -1540,40 +1359,13 @@ func TestNextErrors(t *testing.T) { } } -func TestUpdateErrors(t *testing.T) { - tests := []struct { - query, expectedError string - }{ - { - query: "update t1, t2 set id = 12", - expectedError: "VT12001: unsupported: multiple (2) tables in update", - }, { - query: "update (select 1 from dual) dt set id = 1", - expectedError: "The target table dt of the UPDATE is not updatable", - }, - } - - for _, test := range tests { - t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) - require.NoError(t, err) - - st, err := Analyze(parse, "d", fakeSchemaInfo()) - if err == nil { - err = st.NotUnshardedErr - } - assert.EqualError(t, err, test.expectedError) - }) - } -} - // TestScopingSubQueryJoinClause tests the scoping behavior of a subquery containing a join clause. // The test ensures that the scoping analysis correctly identifies and handles the relationships // between the tables involved in the join operation with the outer query. func TestScopingSubQueryJoinClause(t *testing.T) { query := "select (select 1 from u1 join u2 on u1.id = u2.id and u2.id = u3.id) x from u3" - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ @@ -1589,13 +1381,13 @@ func TestScopingSubQueryJoinClause(t *testing.T) { } -var ks1 = &vindexes.Keyspace{ - Name: "ks1", +var unsharded = &vindexes.Keyspace{ + Name: "unsharded", Sharded: false, } var ks2 = &vindexes.Keyspace{ Name: "ks2", - Sharded: false, + Sharded: true, } var ks3 = &vindexes.Keyspace{ Name: "ks3", @@ -1606,576 +1398,78 @@ var ks3 = &vindexes.Keyspace{ // create table t1(id bigint) // create table t2(uid bigint, name varchar(255)) func fakeSchemaInfo() *FakeSI { - cols1 := []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("id"), - Type: querypb.Type_INT64, - }} - cols2 := []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("uid"), - Type: querypb.Type_INT64, - }, { - Name: sqlparser.NewIdentifierCI("name"), - Type: querypb.Type_VARCHAR, - }} - si := &FakeSI{ Tables: map[string]*vindexes.Table{ - "t": {Name: sqlparser.NewIdentifierCS("t"), Keyspace: ks1}, - "t1": {Name: sqlparser.NewIdentifierCS("t1"), Columns: cols1, ColumnListAuthoritative: true, Keyspace: ks2}, - "t2": {Name: sqlparser.NewIdentifierCS("t2"), Columns: cols2, ColumnListAuthoritative: true, Keyspace: ks3}, + "t": tableT(), + "t1": tableT1(), + "t2": tableT2(), + "t3": tableT3(), }, } return si } -var tbl = map[string]TableInfo{ - "t0": &RealTable{ - Table: &vindexes.Table{ - Keyspace: &vindexes.Keyspace{Name: "ks"}, - ChildForeignKeys: []vindexes.ChildFKInfo{ - ckInfo(nil, []string{"col"}, []string{"col"}, sqlparser.Restrict), - ckInfo(nil, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), - }, - ParentForeignKeys: []vindexes.ParentFKInfo{ - pkInfo(nil, []string{"colb"}, []string{"colb"}), - pkInfo(nil, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), - }, - }, - }, - "t1": &RealTable{ - Table: &vindexes.Table{ - Keyspace: &vindexes.Keyspace{Name: "ks_unmanaged", Sharded: true}, - ChildForeignKeys: []vindexes.ChildFKInfo{ - ckInfo(nil, []string{"cola"}, []string{"cola"}, sqlparser.Restrict), - ckInfo(nil, []string{"cola1", "cola2"}, []string{"ccola1", "ccola2"}, sqlparser.SetNull), - }, - }, - }, - "t2": &RealTable{ - Table: &vindexes.Table{ - Keyspace: &vindexes.Keyspace{Name: "ks"}, - }, - }, - "t3": &RealTable{ - Table: &vindexes.Table{ - Keyspace: &vindexes.Keyspace{Name: "undefined_ks", Sharded: true}, - }, - }, -} - -// TestGetAllManagedForeignKeys tests the functionality of getAllManagedForeignKeys. -func TestGetAllManagedForeignKeys(t *testing.T) { - tests := []struct { - name string - analyzer *analyzer - childFkWanted map[TableSet][]vindexes.ChildFKInfo - parentFkWanted map[TableSet][]vindexes.ParentFKInfo - expectedErr string - }{ - { - name: "Collect all foreign key constraints", - analyzer: &analyzer{ - tables: &tableCollector{ - Tables: []TableInfo{tbl["t0"], tbl["t1"], - &DerivedTable{}, - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - "ks_unmanaged": vschemapb.Keyspace_unmanaged, - }, - }, - }, - }, - childFkWanted: map[TableSet][]vindexes.ChildFKInfo{ - SingleTableSet(0): { - ckInfo(nil, []string{"col"}, []string{"col"}, sqlparser.Restrict), - ckInfo(nil, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), - }, - }, - parentFkWanted: map[TableSet][]vindexes.ParentFKInfo{ - SingleTableSet(0): { - pkInfo(nil, []string{"colb"}, []string{"colb"}), - pkInfo(nil, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), - }, - }, - }, - { - name: "keyspace not found in schema information", - analyzer: &analyzer{ - tables: &tableCollector{ - Tables: []TableInfo{ - tbl["t2"], - tbl["t3"], - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - }, - }, - }, - }, - expectedErr: "undefined_ks keyspace not found", - }, - { - name: "Cyclic fk constraints error", - analyzer: &analyzer{ - tables: &tableCollector{ - Tables: []TableInfo{ - tbl["t0"], tbl["t1"], - &DerivedTable{}, - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - "ks_unmanaged": vschemapb.Keyspace_unmanaged, - }, - KsError: map[string]error{ - "ks": fmt.Errorf("VT09019: ks has cyclic foreign keys"), - }, - }, - }, - }, - expectedErr: "VT09019: ks has cyclic foreign keys", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - childFk, parentFk, err := tt.analyzer.getAllManagedForeignKeys() - if tt.expectedErr != "" { - require.EqualError(t, err, tt.expectedErr) - return - } - require.EqualValues(t, tt.childFkWanted, childFk) - require.EqualValues(t, tt.parentFkWanted, parentFk) - }) - } -} - -// TestFilterForeignKeysUsingUpdateExpressions tests the functionality of filterForeignKeysUsingUpdateExpressions. -func TestFilterForeignKeysUsingUpdateExpressions(t *testing.T) { - cola := sqlparser.NewColName("cola") - colb := sqlparser.NewColName("colb") - colc := sqlparser.NewColName("colc") - cold := sqlparser.NewColName("cold") - a := &analyzer{ - binder: &binder{ - direct: map[sqlparser.Expr]TableSet{ - cola: SingleTableSet(0), - colb: SingleTableSet(0), - colc: SingleTableSet(1), - cold: SingleTableSet(1), - }, - }, - } - updateExprs := sqlparser.UpdateExprs{ - &sqlparser.UpdateExpr{Name: cola, Expr: sqlparser.NewIntLiteral("1")}, - &sqlparser.UpdateExpr{Name: colb, Expr: &sqlparser.NullVal{}}, - &sqlparser.UpdateExpr{Name: colc, Expr: sqlparser.NewIntLiteral("1")}, - &sqlparser.UpdateExpr{Name: cold, Expr: &sqlparser.NullVal{}}, - } - tests := []struct { - name string - analyzer *analyzer - allChildFks map[TableSet][]vindexes.ChildFKInfo - allParentFks map[TableSet][]vindexes.ParentFKInfo - updExprs sqlparser.UpdateExprs - childFksWanted map[TableSet][]vindexes.ChildFKInfo - parentFksWanted map[TableSet][]vindexes.ParentFKInfo - }{ - { - name: "Child Foreign Keys Filtering", - analyzer: a, - allParentFks: nil, - allChildFks: map[TableSet][]vindexes.ChildFKInfo{ - SingleTableSet(0): { - ckInfo(nil, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), - ckInfo(nil, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), - ckInfo(nil, []string{"colx", "coly"}, []string{"child_colx", "child_coly"}, sqlparser.Cascade), - ckInfo(nil, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), - }, - SingleTableSet(1): { - ckInfo(nil, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), - ckInfo(nil, []string{"colc", "colx"}, []string{"child_colc", "child_colx"}, sqlparser.SetNull), - ckInfo(nil, []string{"colx", "coly"}, []string{"child_colx", "child_coly"}, sqlparser.Cascade), - }, - }, - updExprs: updateExprs, - childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ - SingleTableSet(0): { - ckInfo(nil, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), - ckInfo(nil, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), - }, - SingleTableSet(1): { - ckInfo(nil, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), - ckInfo(nil, []string{"colc", "colx"}, []string{"child_colc", "child_colx"}, sqlparser.SetNull), - }, - }, - parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{}, - }, { - name: "Parent Foreign Keys Filtering", - analyzer: a, - allParentFks: map[TableSet][]vindexes.ParentFKInfo{ - SingleTableSet(0): { - pkInfo(nil, []string{"pcola", "pcolx"}, []string{"cola", "colx"}), - pkInfo(nil, []string{"pcolc"}, []string{"colc"}), - pkInfo(nil, []string{"pcolb", "pcola"}, []string{"colb", "cola"}), - pkInfo(nil, []string{"pcolb"}, []string{"colb"}), - pkInfo(nil, []string{"pcola"}, []string{"cola"}), - pkInfo(nil, []string{"pcolb", "pcolx"}, []string{"colb", "colx"}), - }, - SingleTableSet(1): { - pkInfo(nil, []string{"pcolc", "pcolx"}, []string{"colc", "colx"}), - pkInfo(nil, []string{"pcola"}, []string{"cola"}), - pkInfo(nil, []string{"pcold", "pcolc"}, []string{"cold", "colc"}), - pkInfo(nil, []string{"pcold"}, []string{"cold"}), - pkInfo(nil, []string{"pcold", "pcolx"}, []string{"cold", "colx"}), - }, - }, - allChildFks: nil, - updExprs: updateExprs, - childFksWanted: map[TableSet][]vindexes.ChildFKInfo{}, - parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ - SingleTableSet(0): { - pkInfo(nil, []string{"pcola", "pcolx"}, []string{"cola", "colx"}), - pkInfo(nil, []string{"pcola"}, []string{"cola"}), - }, - SingleTableSet(1): { - pkInfo(nil, []string{"pcolc", "pcolx"}, []string{"colc", "colx"}), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - childFks, parentFks := tt.analyzer.filterForeignKeysUsingUpdateExpressions(tt.allChildFks, tt.allParentFks, tt.updExprs) - require.EqualValues(t, tt.childFksWanted, childFks) - require.EqualValues(t, tt.parentFksWanted, parentFks) - }) +func tableT() *vindexes.Table { + return &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t"), + Keyspace: unsharded, } } - -// TestGetInvolvedForeignKeys tests the functionality of getInvolvedForeignKeys. -func TestGetInvolvedForeignKeys(t *testing.T) { - cola := sqlparser.NewColName("cola") - colb := sqlparser.NewColName("colb") - colc := sqlparser.NewColName("colc") - cold := sqlparser.NewColName("cold") - tests := []struct { - name string - stmt sqlparser.Statement - analyzer *analyzer - childFksWanted map[TableSet][]vindexes.ChildFKInfo - parentFksWanted map[TableSet][]vindexes.ParentFKInfo - expectedErr string - }{ - { - name: "Delete Query", - stmt: &sqlparser.Delete{}, - analyzer: &analyzer{ - tables: &tableCollector{ - Tables: []TableInfo{ - tbl["t0"], - tbl["t1"], - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - "ks_unmanaged": vschemapb.Keyspace_unmanaged, - }, - }, - }, - }, - childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ - SingleTableSet(0): { - ckInfo(nil, []string{"col"}, []string{"col"}, sqlparser.Restrict), - ckInfo(nil, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), - }, - }, - }, - { - name: "Update statement", - stmt: &sqlparser.Update{ - Exprs: sqlparser.UpdateExprs{ - &sqlparser.UpdateExpr{ - Name: cola, - Expr: sqlparser.NewIntLiteral("1"), - }, - &sqlparser.UpdateExpr{ - Name: colb, - Expr: &sqlparser.NullVal{}, - }, - &sqlparser.UpdateExpr{ - Name: colc, - Expr: sqlparser.NewIntLiteral("1"), - }, - &sqlparser.UpdateExpr{ - Name: cold, - Expr: &sqlparser.NullVal{}, - }, - }, - }, - analyzer: &analyzer{ - binder: &binder{ - direct: map[sqlparser.Expr]TableSet{ - cola: SingleTableSet(0), - colb: SingleTableSet(0), - colc: SingleTableSet(1), - cold: SingleTableSet(1), - }, - }, - tables: &tableCollector{ - Tables: []TableInfo{ - &RealTable{ - Table: &vindexes.Table{ - Keyspace: &vindexes.Keyspace{Name: "ks"}, - ChildForeignKeys: []vindexes.ChildFKInfo{ - ckInfo(nil, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), - ckInfo(nil, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), - ckInfo(nil, []string{"colx", "coly"}, []string{"child_colx", "child_coly"}, sqlparser.Cascade), - ckInfo(nil, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), - }, - ParentForeignKeys: []vindexes.ParentFKInfo{ - pkInfo(nil, []string{"pcola", "pcolx"}, []string{"cola", "colx"}), - pkInfo(nil, []string{"pcolc"}, []string{"colc"}), - pkInfo(nil, []string{"pcolb", "pcola"}, []string{"colb", "cola"}), - pkInfo(nil, []string{"pcolb"}, []string{"colb"}), - pkInfo(nil, []string{"pcola"}, []string{"cola"}), - pkInfo(nil, []string{"pcolb", "pcolx"}, []string{"colb", "colx"}), - }, - }, - }, - &RealTable{ - Table: &vindexes.Table{ - Keyspace: &vindexes.Keyspace{Name: "ks"}, - ChildForeignKeys: []vindexes.ChildFKInfo{ - ckInfo(nil, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), - ckInfo(nil, []string{"colc", "colx"}, []string{"child_colc", "child_colx"}, sqlparser.SetNull), - ckInfo(nil, []string{"colx", "coly"}, []string{"child_colx", "child_coly"}, sqlparser.Cascade), - }, - ParentForeignKeys: []vindexes.ParentFKInfo{ - pkInfo(nil, []string{"pcolc", "pcolx"}, []string{"colc", "colx"}), - pkInfo(nil, []string{"pcola"}, []string{"cola"}), - pkInfo(nil, []string{"pcold", "pcolc"}, []string{"cold", "colc"}), - pkInfo(nil, []string{"pcold"}, []string{"cold"}), - pkInfo(nil, []string{"pcold", "pcolx"}, []string{"cold", "colx"}), - }, - }, - }, - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - }, - }, - }, - }, - childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ - SingleTableSet(0): { - ckInfo(nil, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), - ckInfo(nil, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), - }, - SingleTableSet(1): { - ckInfo(nil, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), - ckInfo(nil, []string{"colc", "colx"}, []string{"child_colc", "child_colx"}, sqlparser.SetNull), - }, - }, - parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ - SingleTableSet(0): { - pkInfo(nil, []string{"pcola", "pcolx"}, []string{"cola", "colx"}), - pkInfo(nil, []string{"pcola"}, []string{"cola"}), - }, - SingleTableSet(1): { - pkInfo(nil, []string{"pcolc", "pcolx"}, []string{"colc", "colx"}), - }, - }, - }, - { - name: "Replace Query", - stmt: &sqlparser.Insert{ - Action: sqlparser.ReplaceAct, - }, - analyzer: &analyzer{ - tables: &tableCollector{ - Tables: []TableInfo{ - tbl["t0"], - tbl["t1"], - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - "ks_unmanaged": vschemapb.Keyspace_unmanaged, - }, - }, - }, - }, - childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ - SingleTableSet(0): { - ckInfo(nil, []string{"col"}, []string{"col"}, sqlparser.Restrict), - ckInfo(nil, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), - }, - }, - parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ - SingleTableSet(0): { - pkInfo(nil, []string{"colb"}, []string{"colb"}), - pkInfo(nil, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), - }, - }, - }, - { - name: "Insert Query", - stmt: &sqlparser.Insert{ - Action: sqlparser.InsertAct, - }, - analyzer: &analyzer{ - tables: &tableCollector{ - Tables: []TableInfo{ - tbl["t0"], - tbl["t1"], - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - "ks_unmanaged": vschemapb.Keyspace_unmanaged, - }, - }, - }, - }, - childFksWanted: nil, - parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ - SingleTableSet(0): { - pkInfo(nil, []string{"colb"}, []string{"colb"}), - pkInfo(nil, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), - }, - }, - }, - { - name: "Insert Query with On Duplicate", - stmt: &sqlparser.Insert{ - Action: sqlparser.InsertAct, - OnDup: sqlparser.OnDup{ - &sqlparser.UpdateExpr{ - Name: cola, - Expr: sqlparser.NewIntLiteral("1"), - }, - &sqlparser.UpdateExpr{ - Name: colb, - Expr: &sqlparser.NullVal{}, - }, - }, - }, - analyzer: &analyzer{ - binder: &binder{ - direct: map[sqlparser.Expr]TableSet{ - cola: SingleTableSet(0), - colb: SingleTableSet(0), - }, - }, - tables: &tableCollector{ - Tables: []TableInfo{ - &RealTable{ - Table: &vindexes.Table{ - Keyspace: &vindexes.Keyspace{Name: "ks"}, - ChildForeignKeys: []vindexes.ChildFKInfo{ - ckInfo(nil, []string{"col"}, []string{"col"}, sqlparser.Restrict), - ckInfo(nil, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), - ckInfo(nil, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), - ckInfo(nil, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), - ckInfo(nil, []string{"colx", "coly"}, []string{"child_colx", "child_coly"}, sqlparser.Cascade), - ckInfo(nil, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), - }, - ParentForeignKeys: []vindexes.ParentFKInfo{ - pkInfo(nil, []string{"colb"}, []string{"colb"}), - pkInfo(nil, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), - }, - }, - }, - tbl["t1"], - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - "ks_unmanaged": vschemapb.Keyspace_unmanaged, - }, - }, - }, - }, - childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ - SingleTableSet(0): { - ckInfo(nil, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), - ckInfo(nil, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), - }, - }, - parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ - SingleTableSet(0): { - pkInfo(nil, []string{"colb"}, []string{"colb"}), - pkInfo(nil, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), - }, - }, - }, - { - name: "Insert error", - stmt: &sqlparser.Insert{}, - analyzer: &analyzer{ - tables: &tableCollector{ - Tables: []TableInfo{ - tbl["t2"], - tbl["t3"], - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - }, - }, - }, - }, - expectedErr: "undefined_ks keyspace not found", - }, - { - name: "Update error", - stmt: &sqlparser.Update{}, - analyzer: &analyzer{ - tables: &tableCollector{ - Tables: []TableInfo{ - tbl["t2"], - tbl["t3"], - }, - si: &FakeSI{ - KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ - "ks": vschemapb.Keyspace_managed, - }, - }, - }, - }, - expectedErr: "undefined_ks keyspace not found", +func tableT1() *vindexes.Table { + return &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("id"), + Type: querypb.Type_INT64, + }}, + ColumnListAuthoritative: true, + ColumnVindexes: []*vindexes.ColumnVindex{ + {Name: "id_vindex"}, }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - childFks, parentFks, err := tt.analyzer.getInvolvedForeignKeys(tt.stmt) - if tt.expectedErr != "" { - require.EqualError(t, err, tt.expectedErr) - return - } - require.EqualValues(t, tt.childFksWanted, childFks) - require.EqualValues(t, tt.parentFksWanted, parentFks) - }) + Keyspace: ks2, } } - -func ckInfo(cTable *vindexes.Table, pCols []string, cCols []string, refAction sqlparser.ReferenceAction) vindexes.ChildFKInfo { - return vindexes.ChildFKInfo{ - Table: cTable, - ParentColumns: sqlparser.MakeColumns(pCols...), - ChildColumns: sqlparser.MakeColumns(cCols...), - OnDelete: refAction, +func tableT2() *vindexes.Table { + return &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("uid"), + Type: querypb.Type_INT64, + }, { + Name: sqlparser.NewIdentifierCI("name"), + Type: querypb.Type_VARCHAR, + CollationName: "utf8_bin", + }, { + Name: sqlparser.NewIdentifierCI("textcol"), + Type: querypb.Type_VARCHAR, + CollationName: "big5_bin", + }}, + ColumnListAuthoritative: true, + Keyspace: ks3, } } -func pkInfo(parentTable *vindexes.Table, pCols []string, cCols []string) vindexes.ParentFKInfo { - return vindexes.ParentFKInfo{ - Table: parentTable, - ParentColumns: sqlparser.MakeColumns(pCols...), - ChildColumns: sqlparser.MakeColumns(cCols...), +func tableT3() *vindexes.Table { + return &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t3"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("uid"), + Type: querypb.Type_INT64, + }, { + Name: sqlparser.NewIdentifierCI("name"), + Type: querypb.Type_VARCHAR, + CollationName: "utf8_bin", + }, { + Name: sqlparser.NewIdentifierCI("textcol"), + Type: querypb.Type_VARCHAR, + CollationName: "big5_bin", + }, { + Name: sqlparser.NewIdentifierCI("invcol"), + Type: querypb.Type_VARCHAR, + CollationName: "big5_bin", + Invisible: true, + }}, + ColumnListAuthoritative: true, + Keyspace: ks3, } } diff --git a/go/vt/vtgate/semantics/binder.go b/go/vt/vtgate/semantics/binder.go index f7fc5d64c1a..8b4d6d2163d 100644 --- a/go/vt/vtgate/semantics/binder.go +++ b/go/vt/vtgate/semantics/binder.go @@ -19,8 +19,9 @@ package semantics import ( "strings" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) // binder is responsible for finding all the column references in @@ -30,6 +31,7 @@ import ( type binder struct { recursive ExprDependencies direct ExprDependencies + targets TableSet scoper *scoper tc *tableCollector org originable @@ -56,62 +58,131 @@ func newBinder(scoper *scoper, org originable, tc *tableCollector, typer *typer) func (b *binder) up(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { case *sqlparser.Subquery: - currScope := b.scoper.currentScope() - b.setSubQueryDependencies(node, currScope) + return b.setSubQueryDependencies(node) case *sqlparser.JoinCondition: - currScope := b.scoper.currentScope() - for _, ident := range node.Using { - name := sqlparser.NewColName(ident.String()) - deps, err := b.resolveColumn(name, currScope, true) - if err != nil { - return err - } - currScope.joinUsing[ident.Lowered()] = deps.direct - } + return b.bindJoinCondition(node) case *sqlparser.ColName: - currentScope := b.scoper.currentScope() - deps, err := b.resolveColumn(node, currentScope, false) + return b.bindColName(node) + case *sqlparser.CountStar: + return b.bindCountStar(node) + case *sqlparser.Union: + return b.bindUnion(node) + case sqlparser.TableNames: + return b.bindTableNames(cursor, node) + case *sqlparser.UpdateExpr: + return b.bindUpdateExpr(node) + default: + return nil + } +} + +func (b *binder) bindUpdateExpr(ue *sqlparser.UpdateExpr) error { + ts, ok := b.direct[ue.Name] + if !ok { + return nil + } + b.targets = b.targets.Merge(ts) + return nil +} + +func (b *binder) bindTableNames(cursor *sqlparser.Cursor, tables sqlparser.TableNames) error { + _, isDelete := cursor.Parent().(*sqlparser.Delete) + if !isDelete { + return nil + } + current := b.scoper.currentScope() + for _, target := range tables { + finalDep, err := b.findDependentTableSet(current, target) if err != nil { - if deps.direct.IsEmpty() || - !strings.HasSuffix(err.Error(), "is ambiguous") || - !b.canRewriteUsingJoin(deps, node) { - return err - } + return err + } + b.targets = b.targets.Merge(finalDep.direct) + } + return nil +} - // if we got here it means we are dealing with a ColName that is involved in a JOIN USING. - // we do the rewriting of these ColName structs here because it would be difficult to copy all the - // needed state over to the earlyRewriter - deps, err = b.rewriteJoinUsingColName(deps, node, currentScope) - if err != nil { - return err - } +func (b *binder) bindUnion(union *sqlparser.Union) error { + info := b.tc.unionInfo[union] + // TODO: this check can be removed and available type information should be used. + if !info.isAuthoritative { + return nil + } + + for i, expr := range info.exprs { + ae := expr.(*sqlparser.AliasedExpr) + b.recursive[ae.Expr] = info.recursive[i] + if t := info.types[i]; t.Valid() { + b.typer.m[ae.Expr] = t } - b.recursive[node] = deps.recursive - b.direct[node] = deps.direct - if deps.typ.Type != sqltypes.Unknown { - b.typer.setTypeFor(node, deps.typ) + } + return nil +} + +func (b *binder) bindColName(col *sqlparser.ColName) error { + currentScope := b.scoper.currentScope() + deps, err := b.resolveColumn(col, currentScope, false, true) + if err != nil { + s := err.Error() + if deps.direct.IsEmpty() || + !strings.HasSuffix(s, "is ambiguous") || + !b.canRewriteUsingJoin(deps, col) { + return err } - case *sqlparser.CountStar: - b.bindCountStar(node) - case *sqlparser.Union: - info := b.tc.unionInfo[node] - // TODO: this check can be removed and available type information should be used. - if !info.isAuthoritative { - return nil + + // if we got here it means we are dealing with a ColName that is involved in a JOIN USING. + // we do the rewriting of these ColName structs here because it would be difficult to copy all the + // needed state over to the earlyRewriter + deps, err = b.rewriteJoinUsingColName(deps, col, currentScope) + if err != nil { + return err } + } + b.recursive[col] = deps.recursive + b.direct[col] = deps.direct + if deps.typ.Valid() { + b.typer.setTypeFor(col, deps.typ) + } + return nil +} - for i, expr := range info.exprs { - ae := expr.(*sqlparser.AliasedExpr) - b.recursive[ae.Expr] = info.recursive[i] - if t := info.types[i]; t.Type != sqltypes.Unknown { - b.typer.m[ae.Expr] = t - } +func (b *binder) bindJoinCondition(condition *sqlparser.JoinCondition) error { + currScope := b.scoper.currentScope() + for _, ident := range condition.Using { + name := sqlparser.NewColName(ident.String()) + deps, err := b.resolveColumn(name, currScope, true, true) + if err != nil { + return err } + currScope.joinUsing[ident.Lowered()] = deps.direct } return nil } -func (b *binder) bindCountStar(node *sqlparser.CountStar) { +func (b *binder) findDependentTableSet(current *scope, target sqlparser.TableName) (dependency, error) { + var deps dependencies = ¬hing{} + for _, table := range current.tables { + tblName, err := table.Name() + if err != nil { + continue + } + if tblName.Name.String() != target.Name.String() { + continue + } + ts := b.org.tableSetFor(table.GetAliasedTableExpr()) + c := createCertain(ts, ts, evalengine.Type{}) + deps = deps.merge(c, false) + } + finalDep, err := deps.get(nil) + if err != nil { + return dependency{}, err + } + if finalDep.direct != finalDep.recursive { + return dependency{}, vterrors.VT03004(target.Name.String()) + } + return finalDep, nil +} + +func (b *binder) bindCountStar(node *sqlparser.CountStar) error { scope := b.scoper.currentScope() var ts TableSet for _, tbl := range scope.tables { @@ -128,6 +199,7 @@ func (b *binder) bindCountStar(node *sqlparser.CountStar) { } b.recursive[node] = ts b.direct[node] = ts + return nil } func (b *binder) rewriteJoinUsingColName(deps dependency, node *sqlparser.ColName, currentScope *scope) (dependency, error) { @@ -145,7 +217,7 @@ func (b *binder) rewriteJoinUsingColName(deps dependency, node *sqlparser.ColNam return dependency{}, err } node.Qualifier = name - deps, err = b.resolveColumn(node, currentScope, false) + deps, err = b.resolveColumn(node, currentScope, false, true) if err != nil { return dependency{}, err } @@ -169,7 +241,8 @@ func (b *binder) canRewriteUsingJoin(deps dependency, node *sqlparser.ColName) b // the binder usually only sets the dependencies of ColNames, but we need to // handle the subquery dependencies differently, so they are set manually here // this method will only keep dependencies to tables outside the subquery -func (b *binder) setSubQueryDependencies(subq *sqlparser.Subquery, currScope *scope) { +func (b *binder) setSubQueryDependencies(subq *sqlparser.Subquery) error { + currScope := b.scoper.currentScope() subqRecursiveDeps := b.recursive.dependencies(subq) subqDirectDeps := b.direct.dependencies(subq) @@ -184,31 +257,35 @@ func (b *binder) setSubQueryDependencies(subq *sqlparser.Subquery, currScope *sc b.recursive[subq] = subqRecursiveDeps.KeepOnly(tablesToKeep) b.direct[subq] = subqDirectDeps.KeepOnly(tablesToKeep) + return nil } -func (b *binder) resolveColumn(colName *sqlparser.ColName, current *scope, allowMulti bool) (dependency, error) { +func (b *binder) resolveColumn(colName *sqlparser.ColName, current *scope, allowMulti, singleTableFallBack bool) (dependency, error) { + if !current.stmtScope && current.inGroupBy { + return b.resolveColInGroupBy(colName, current, allowMulti) + } + if !current.stmtScope && current.inHaving && !current.inHavingAggr { + return b.resolveColumnInHaving(colName, current, allowMulti) + } + var thisDeps dependencies first := true var tableName *sqlparser.TableName + for current != nil { var err error thisDeps, err = b.resolveColumnInScope(current, colName, allowMulti) if err != nil { - err = makeAmbiguousError(colName, err) - if thisDeps == nil { - return dependency{}, err - } + return dependency{}, err } if !thisDeps.empty() { - deps, thisErr := thisDeps.get() - if thisErr != nil { - err = makeAmbiguousError(colName, thisErr) - } - return deps, err - } else if err != nil { - return dependency{}, err + return thisDeps.get(colName) } - if current.parent == nil && len(current.tables) == 1 && first && colName.Qualifier.IsEmpty() { + if current.parent == nil && + len(current.tables) == 1 && + first && + colName.Qualifier.IsEmpty() && + singleTableFallBack { // if this is the top scope, and we still haven't been able to find a match, we know we are about to fail // we can check this last scope and see if there is a single table. if there is just one table in the scope // we assume that the column is meant to come from this table. @@ -223,13 +300,147 @@ func (b *binder) resolveColumn(colName *sqlparser.ColName, current *scope, allow first = false current = current.parent } - return dependency{}, ShardedError{&ColumnNotFoundError{Column: colName, Table: tableName}} + return dependency{}, ShardedError{ColumnNotFoundError{Column: colName, Table: tableName}} +} + +func isColumnNotFound(err error) bool { + switch err := err.(type) { + case ColumnNotFoundError: + return true + case ShardedError: + return isColumnNotFound(err.Inner) + default: + return false + } +} + +func (b *binder) resolveColumnInHaving(colName *sqlparser.ColName, current *scope, allowMulti bool) (dependency, error) { + if current.inHavingAggr { + // when inside an aggregation, we'll search the FROM clause before the SELECT expressions + deps, err := b.resolveColumn(colName, current.parent, allowMulti, true) + if deps.direct.NotEmpty() || (err != nil && !isColumnNotFound(err)) { + return deps, err + } + } + + // Here we are searching among the SELECT expressions for a match + thisDeps, err := b.resolveColumnInScope(current, colName, allowMulti) + if err != nil { + return dependency{}, err + } + + if !thisDeps.empty() { + // we found something! let's return it + return thisDeps.get(colName) + } + + notFoundErr := &ColumnNotFoundClauseError{Column: colName.Name.String(), Clause: "having clause"} + if current.inHavingAggr { + // if we are inside an aggregation, we've already looked everywhere. now it's time to give up + return dependency{}, notFoundErr + } + + // Now we'll search the FROM clause, but with a twist. If we find it in the FROM clause, the column must also + // exist as a standalone expression in the SELECT list + deps, err := b.resolveColumn(colName, current.parent, allowMulti, true) + if deps.direct.IsEmpty() { + return dependency{}, notFoundErr + } + + sel := current.stmt.(*sqlparser.Select) // we can be sure of this, since HAVING doesn't exist on UNION + if selDeps := b.searchInSelectExpressions(colName, deps, sel); selDeps.direct.NotEmpty() { + return selDeps, nil + } + + if !current.inHavingAggr && sel.GroupBy == nil { + // if we are not inside an aggregation, and there is no GROUP BY, we consider the FROM clause before failing + if deps.direct.NotEmpty() || (err != nil && !isColumnNotFound(err)) { + return deps, err + } + } + + return dependency{}, notFoundErr +} + +// searchInSelectExpressions searches for the ColName among the SELECT and GROUP BY expressions +// It used dependency information to match the columns +func (b *binder) searchInSelectExpressions(colName *sqlparser.ColName, deps dependency, stmt *sqlparser.Select) dependency { + for _, selectExpr := range stmt.SelectExprs { + ae, ok := selectExpr.(*sqlparser.AliasedExpr) + if !ok { + continue + } + selectCol, ok := ae.Expr.(*sqlparser.ColName) + if !ok || !selectCol.Name.Equal(colName.Name) { + continue + } + + _, direct, _ := b.org.depsForExpr(selectCol) + if deps.direct == direct { + // we have found the ColName in the SELECT expressions, so it's safe to use here + direct, recursive, typ := b.org.depsForExpr(ae.Expr) + return dependency{certain: true, direct: direct, recursive: recursive, typ: typ} + } + } + for _, gb := range stmt.GroupByExprs() { + selectCol, ok := gb.(*sqlparser.ColName) + if !ok || !selectCol.Name.Equal(colName.Name) { + continue + } + + _, direct, _ := b.org.depsForExpr(selectCol) + if deps.direct == direct { + // we have found the ColName in the GROUP BY expressions, so it's safe to use here + direct, recursive, typ := b.org.depsForExpr(gb) + return dependency{certain: true, direct: direct, recursive: recursive, typ: typ} + } + } + return dependency{} +} + +// resolveColInGroupBy handles the special rules we have when binding on the GROUP BY column +func (b *binder) resolveColInGroupBy( + colName *sqlparser.ColName, + current *scope, + allowMulti bool, +) (dependency, error) { + if current.parent == nil { + return dependency{}, vterrors.VT13001("did not expect this to be the last scope") + } + // if we are in GROUP BY, we have to search the FROM clause before we search the SELECT expressions + deps, firstErr := b.resolveColumn(colName, current.parent, allowMulti, false) + if firstErr == nil { + return deps, nil + } + + // either we didn't find the column on a table, or it was ambiguous. + // in either case, next step is to search the SELECT expressions + if colName.Qualifier.NonEmpty() { + // if the col name has a qualifier, none of the SELECT expressions are going to match + return dependency{}, nil + } + vtbl, ok := current.tables[0].(*vTableInfo) + if !ok { + return dependency{}, vterrors.VT13001("expected the table info to be a *vTableInfo") + } + + dependencies, err := vtbl.dependenciesInGroupBy(colName.Name.String(), b.org) + if err != nil { + return dependency{}, err + } + if dependencies.empty() { + if isColumnNotFound(firstErr) { + return dependency{}, &ColumnNotFoundClauseError{Column: colName.Name.String(), Clause: "group statement"} + } + return deps, firstErr + } + return dependencies.get(colName) } func (b *binder) resolveColumnInScope(current *scope, expr *sqlparser.ColName, allowMulti bool) (dependencies, error) { var deps dependencies = ¬hing{} for _, table := range current.tables { - if !expr.Qualifier.IsEmpty() && !table.matches(expr.Qualifier) { + if !expr.Qualifier.IsEmpty() && !table.matches(expr.Qualifier) && !current.isUnion { continue } thisDeps, err := table.dependencies(expr.Name.String(), b.org) @@ -240,18 +451,11 @@ func (b *binder) resolveColumnInScope(current *scope, expr *sqlparser.ColName, a } if deps, isUncertain := deps.(*uncertain); isUncertain && deps.fail { // if we have a failure from uncertain, we matched the column to multiple non-authoritative tables - return nil, ProjError{Inner: &AmbiguousColumnError{Column: sqlparser.String(expr)}} + return nil, ProjError{Inner: newAmbiguousColumnError(expr)} } return deps, nil } -func makeAmbiguousError(colName *sqlparser.ColName, err error) error { - if err == ambigousErr { - err = &AmbiguousColumnError{Column: sqlparser.String(colName)} - } - return err -} - // GetSubqueryAndOtherSide returns the subquery and other side of a comparison, iff one of the sides is a SubQuery func GetSubqueryAndOtherSide(node *sqlparser.ComparisonExpr) (*sqlparser.Subquery, sqlparser.Expr) { var subq *sqlparser.Subquery diff --git a/go/vt/vtgate/semantics/bitset/bitset_test.go b/go/vt/vtgate/semantics/bitset/bitset_test.go index 87bef299963..a283cbf1f35 100644 --- a/go/vt/vtgate/semantics/bitset/bitset_test.go +++ b/go/vt/vtgate/semantics/bitset/bitset_test.go @@ -19,6 +19,7 @@ package bitset import ( "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -38,3 +39,335 @@ func TestSingletons(t *testing.T) { require.True(t, called) } } + +func TestSingleBitReturnsNegativeOne(t *testing.T) { + bs := Bitset("\x0F") + result := bs.SingleBit() + + assert.Equal(t, -1, result) +} + +func TestToBitsetPanic(t *testing.T) { + defer func() { + if r := recover(); r == nil { + require.NotNil(t, r, "Expected panic, but none occurred") + } + }() + + byteEndsWithZero := []byte{8, 0} + + _ = toBitset(byteEndsWithZero) +} + +func TestBuild(t *testing.T) { + tt := []struct { + name string + bits []int + want Bitset + }{ + {"Empty Bits", []int{}, ""}, + {"Single Bit", []int{3}, "\x08"}, + {"Multiple Bits", []int{1, 3, 5, 7}, "\xAA"}, + {"Large Bits", []int{10, 11, 12}, "\x00\x1C"}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + got := Build(tc.bits...) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestAnd(t *testing.T) { + tt := []struct { + name string + bs1, bs2 Bitset + expected Bitset + }{ + { + name: "Two NonEmpty", + bs1: Build(1, 2, 3, 4, 5), + bs2: Build(3, 4, 5, 6, 7), + expected: Build(3, 4, 5), + }, + { + name: "One Empty", + bs1: Build(1, 2, 3, 4, 5), + bs2: Build(), + expected: "", + }, + { + name: "Both Empty", + bs1: Build(), + bs2: Build(), + expected: "", + }, + { + name: "Different Word Sizes", + bs1: Build(1, 2, 3, 4, 5, 33), + bs2: Build(3, 4, 5, 6, 7), + expected: Build(3, 4, 5), + }, + { + name: "One Empty One NonEmpty", + bs1: Build(), + bs2: Build(3, 4, 5, 6, 7), + expected: "", + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + result := tc.bs1.And(tc.bs2) + require.Equal(t, tc.expected, result) + }) + } +} + +func TestAndNot(t *testing.T) { + tt := []struct { + name string + bs1 Bitset + bs2 Bitset + result Bitset + }{ + { + "Empty AndNot Empty", + "", + Build(1, 2, 3), + "", + }, + { + "NonEmpty And Empty", + Build(1, 2, 3), + "", + Build(1, 2, 3), + }, + { + "NonEmpty And NotEmpty", + Build(1, 2, 3), + Build(2, 3, 4), + Build(1), + }, + { + "Common BitsSet AndNot", + Build(1, 2, 3, 4, 5, 6, 7, 8), + Build(3, 4, 5, 6, 7, 8, 9, 10), + Build(1, 2), + }, + { + "bs1 Greater than bs2", + Build(1, 2, 3, 4, 5, 6, 7, 8), + Build(2, 3, 4), + Build(1, 5, 6, 7, 8), + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + got := tc.bs1.AndNot(tc.bs2) + assert.Equal(t, tc.result, got) + }) + } +} + +func TestOr(t *testing.T) { + tt := []struct { + name string + bs1 Bitset + bs2 Bitset + result Bitset + }{ + { + "Empty Or Empty", + "", + "", + "", + }, + { + "Empty Or NonEmpty", + "", + Build(1, 2, 3), + Build(1, 2, 3), + }, + { + "NonEmpty Or Empty", + Build(1, 2, 3), + "", + Build(1, 2, 3), + }, + { + "NonEmpty Or NonEmpty", + Build(1, 2, 3), + Build(4, 5, 6), + Build(1, 2, 3, 4, 5, 6), + }, + { + "Common BitsSet", + Build(1, 2, 3, 4), + Build(3, 4, 5, 6), + Build(1, 2, 3, 4, 5, 6), + }, + { + "Bs1 Larger Than Bs2", + Build(3, 4, 5, 6, 7, 8, 9, 10), + Build(1, 2), + Build(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + got := tc.bs1.Or(tc.bs2) + assert.Equal(t, tc.result, got) + }) + } +} + +func TestSet(t *testing.T) { + tt := []struct { + name string + bs Bitset + offset int + result Bitset + }{ + { + "Set On Empty Bitset", + "", + 3, + Build(3), + }, + { + "Set On NonEmpty Bitset", + Build(1, 2, 3), + 10, + Build(1, 2, 3, 10), + }, + { + "Set On Existing Bit", + Build(1, 2, 3, 4), + 3, + Build(1, 2, 3, 4), + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + got := tc.bs.Set(tc.offset) + assert.Equal(t, tc.result, got) + }) + } +} + +func TestIsContainedBy(t *testing.T) { + tt := []struct { + name string + bs1 Bitset + bs2 Bitset + expected bool + }{ + { + "Empty Is Contained By Empty", + "", + "", + true, + }, + { + "Empty Is Contained By NonEmpty", + "", + Build(1, 2, 3), + true, + }, + { + "NonEmpty Is Contained By Empty", + Build(1, 2, 3), + "", + false, + }, + { + "Subset Is Contained By Superset", + Build(1, 2, 3), + Build(1, 2, 3, 4, 5, 6), + true, + }, + { + "Not Contained", + Build(1, 2, 3), + Build(4, 5, 6), + false, + }, + { + "Equal Bitsets", + Build(1, 2, 3), + Build(1, 2, 3), + true, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + got := tc.bs1.IsContainedBy(tc.bs2) + assert.Equal(t, tc.expected, got) + }) + } +} + +func TestOverlaps(t *testing.T) { + tt := []struct { + name string + bs1 Bitset + bs2 Bitset + expected bool + }{ + { + "Empty Does Not Overlap Empty", + "", + "", + false, + }, + { + "Empty Does Not Overlap NonEmpty", + "", + Build(1, 2, 3), + false, + }, + { + "NonEmpty Does Not Overlap Empty", + Build(1, 2, 3), + "", + false, + }, + { + "Common Bits Overlap", + Build(1, 2, 3, 4), + Build(3, 4, 5, 6), + true, + }, + { + "No Common Bits Do Not Overlap", + Build(1, 2, 3, 4), + Build(5, 6, 7, 8), + false, + }, + { + "Partial Overlap", + Build(1, 2, 3, 4, 5), + Build(4, 5, 6), + true, + }, + { + "Equal Bitsets Overlap", + Build(1, 2, 3), + Build(1, 2, 3), + true, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + got := tc.bs1.Overlaps(tc.bs2) + assert.Equal(t, tc.expected, got) + }) + } +} diff --git a/go/vt/vtgate/semantics/check_invalid.go b/go/vt/vtgate/semantics/check_invalid.go index 54b2de5c36f..2cf16aa0417 100644 --- a/go/vt/vtgate/semantics/check_invalid.go +++ b/go/vt/vtgate/semantics/check_invalid.go @@ -24,12 +24,12 @@ import ( func (a *analyzer) checkForInvalidConstructs(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { - case *sqlparser.Update: - return checkUpdate(node) case *sqlparser.Select: return a.checkSelect(cursor, node) case *sqlparser.Nextval: return a.checkNextVal() + case *sqlparser.AliasedTableExpr: + return checkAliasedTableExpr(node) case *sqlparser.JoinTableExpr: return a.checkJoin(node) case *sqlparser.LockingFunc: @@ -52,6 +52,8 @@ func (a *analyzer) checkForInvalidConstructs(cursor *sqlparser.Cursor) error { if node.Action == sqlparser.ReplaceAct { return ShardedError{Inner: &UnsupportedConstruct{errString: "REPLACE INTO with sharded keyspace"}} } + case *sqlparser.OverClause: + return ShardedError{Inner: &UnsupportedConstruct{errString: "OVER CLAUSE with sharded keyspace"}} } return nil @@ -177,17 +179,25 @@ func (a *analyzer) checkSelect(cursor *sqlparser.Cursor, node *sqlparser.Select) return nil } -func checkUpdate(node *sqlparser.Update) error { - if len(node.TableExprs) != 1 { - return ShardedError{Inner: &UnsupportedMultiTablesInUpdateError{ExprCount: len(node.TableExprs)}} - } - alias, isAlias := node.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !isAlias { - return ShardedError{Inner: &UnsupportedMultiTablesInUpdateError{NotAlias: true}} +// checkAliasedTableExpr checks the validity of AliasedTableExpr. +func checkAliasedTableExpr(node *sqlparser.AliasedTableExpr) error { + if len(node.Hints) == 0 { + return nil } - _, isDerived := alias.Expr.(*sqlparser.DerivedTable) - if isDerived { - return &TableNotUpdatableError{Table: alias.As.String()} + alreadySeenVindexHint := false + for _, hint := range node.Hints { + if hint.Type.IsVindexHint() { + if alreadySeenVindexHint { + // TableName is safe to call, because only TableExpr can have hints. + // And we already checked for hints being empty. + tableName, err := node.TableName() + if err != nil { + return err + } + return &CantUseMultipleVindexHints{Table: sqlparser.String(tableName)} + } + alreadySeenVindexHint = true + } } return nil } diff --git a/go/vt/vtgate/semantics/check_invalid_test.go b/go/vt/vtgate/semantics/check_invalid_test.go new file mode 100644 index 00000000000..004c37fd854 --- /dev/null +++ b/go/vt/vtgate/semantics/check_invalid_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" +) + +func TestCheckAliasedTableExpr(t *testing.T) { + tests := []struct { + name string + tableString string + wantErr string + }{ + { + name: "Valid AliasedTable - USE VINDEX", + tableString: "payment_pulls use vindex (lookup_vindex_name, x, t)", + }, { + name: "Valid AliasedTable - IGNORE VINDEX", + tableString: "payment_pulls ignore vindex (lookup_vindex_name, x, t)", + }, { + name: "Invalid AliasedTable - multiple USE VINDEX", + tableString: "payment_pulls use vindex (lookup_vindex_name, x, t) use vindex (x)", + wantErr: "VT09020: can not use multiple vindex hints for table payment_pulls", + }, { + name: "Invalid AliasedTable - mixed vindex hints", + tableString: "t.payment_pulls use vindex (lookup_vindex_name, x, t) ignore vindex (x)", + wantErr: "VT09020: can not use multiple vindex hints for table t.payment_pulls", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stmt, err := sqlparser.NewTestParser().Parse("select * from " + tt.tableString) + require.NoError(t, err) + node := stmt.(*sqlparser.Select).From[0].(*sqlparser.AliasedTableExpr) + err = checkAliasedTableExpr(node) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/go/vt/vtgate/semantics/dependencies.go b/go/vt/vtgate/semantics/dependencies.go index 89b6da7045d..70167ff02fc 100644 --- a/go/vt/vtgate/semantics/dependencies.go +++ b/go/vt/vtgate/semantics/dependencies.go @@ -18,8 +18,7 @@ package semantics import ( querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -28,10 +27,11 @@ type ( // tables and figure out bindings and/or errors by merging dependencies together dependencies interface { empty() bool - get() (dependency, error) + get(col *sqlparser.ColName) (dependency, error) merge(other dependencies, allowMulti bool) dependencies } dependency struct { + certain bool direct TableSet recursive TableSet typ evalengine.Type @@ -39,7 +39,7 @@ type ( nothing struct{} certain struct { dependency - err error + err bool } uncertain struct { dependency @@ -47,17 +47,15 @@ type ( } ) -var ambigousErr = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "ambiguous") - func createCertain(direct TableSet, recursive TableSet, qt evalengine.Type) *certain { c := &certain{ dependency: dependency{ + certain: true, direct: direct, recursive: recursive, - typ: evalengine.UnknownType(), }, } - if qt.Type != querypb.Type_NULL_TYPE { + if qt.Valid() && qt.Type() != querypb.Type_NULL_TYPE { c.typ = qt } return c @@ -66,9 +64,9 @@ func createCertain(direct TableSet, recursive TableSet, qt evalengine.Type) *cer func createUncertain(direct TableSet, recursive TableSet) *uncertain { return &uncertain{ dependency: dependency{ + certain: false, direct: direct, recursive: recursive, - typ: evalengine.UnknownType(), }, } } @@ -81,9 +79,9 @@ func (u *uncertain) empty() bool { return false } -func (u *uncertain) get() (dependency, error) { +func (u *uncertain) get(col *sqlparser.ColName) (dependency, error) { if u.fail { - return dependency{}, ambigousErr + return dependency{}, newAmbiguousColumnError(col) } return u.dependency, nil } @@ -106,8 +104,11 @@ func (c *certain) empty() bool { return false } -func (c *certain) get() (dependency, error) { - return c.dependency, c.err +func (c *certain) get(col *sqlparser.ColName) (dependency, error) { + if c.err { + return c.dependency, newAmbiguousColumnError(col) + } + return c.dependency, nil } func (c *certain) merge(d dependencies, allowMulti bool) dependencies { @@ -119,7 +120,7 @@ func (c *certain) merge(d dependencies, allowMulti bool) dependencies { c.direct = c.direct.Merge(d.direct) c.recursive = c.recursive.Merge(d.recursive) if !allowMulti { - c.err = ambigousErr + c.err = true } return c @@ -132,8 +133,8 @@ func (n *nothing) empty() bool { return true } -func (n *nothing) get() (dependency, error) { - return dependency{}, nil +func (n *nothing) get(*sqlparser.ColName) (dependency, error) { + return dependency{certain: true}, nil } func (n *nothing) merge(d dependencies, _ bool) dependencies { diff --git a/go/vt/vtgate/semantics/derived_table.go b/go/vt/vtgate/semantics/derived_table.go index 9001848f6b4..aabbe9f0b22 100644 --- a/go/vt/vtgate/semantics/derived_table.go +++ b/go/vt/vtgate/semantics/derived_table.go @@ -77,7 +77,7 @@ func handleAliasedExpr(vTbl *DerivedTable, expr *sqlparser.AliasedExpr, cols sql return } - if !expr.As.IsEmpty() { + if expr.As.NotEmpty() { vTbl.columnNames = append(vTbl.columnNames, expr.As.String()) return } @@ -107,12 +107,16 @@ func (dt *DerivedTable) dependencies(colName string, org originable) (dependenci if !strings.EqualFold(name, colName) { continue } + if len(dt.recursive) == 0 { + // we have unexpanded columns and can't figure this out + return nil, ShardedError{Inner: vterrors.VT09015()} + } recursiveDeps, qt := dt.recursive[i], dt.types[i] return createCertain(directDeps, recursiveDeps, qt), nil } - if !dt.hasStar() { + if dt.authoritative() { return ¬hing{}, nil } @@ -137,7 +141,7 @@ func (dt *DerivedTable) Name() (sqlparser.TableName, error) { return dt.ASTNode.TableName() } -func (dt *DerivedTable) getAliasedTableExpr() *sqlparser.AliasedTableExpr { +func (dt *DerivedTable) GetAliasedTableExpr() *sqlparser.AliasedTableExpr { return dt.ASTNode } @@ -150,7 +154,7 @@ func (dt *DerivedTable) GetVindexTable() *vindexes.Table { return nil } -func (dt *DerivedTable) getColumns() []ColumnInfo { +func (dt *DerivedTable) getColumns(bool) []ColumnInfo { cols := make([]ColumnInfo, 0, len(dt.columnNames)) for _, col := range dt.columnNames { cols = append(cols, ColumnInfo{ @@ -160,10 +164,6 @@ func (dt *DerivedTable) getColumns() []ColumnInfo { return cols } -func (dt *DerivedTable) hasStar() bool { - return dt.tables.NonEmpty() -} - // GetTables implements the TableInfo interface func (dt *DerivedTable) getTableSet(_ originable) TableSet { return dt.tables diff --git a/go/vt/vtgate/semantics/derived_test.go b/go/vt/vtgate/semantics/derived_test.go new file mode 100644 index 00000000000..8344fd1e261 --- /dev/null +++ b/go/vt/vtgate/semantics/derived_test.go @@ -0,0 +1,265 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +func TestScopingWDerivedTables(t *testing.T) { + queries := []struct { + query string + errorMessage string + recursiveDeps TableSet + directDeps TableSet + }{ + { + query: "select id from (select x as id from user) as t", + recursiveDeps: TS0, + directDeps: TS1, + }, { + query: "select id from (select foo as id from user) as t", + recursiveDeps: TS0, + directDeps: TS1, + }, { + query: "select id from (select foo as id from (select x as foo from user) as c) as t", + recursiveDeps: TS0, + directDeps: TS2, + }, { + query: "select t.id from (select foo as id from user) as t", + recursiveDeps: TS0, + directDeps: TS1, + }, { + query: "select t.id2 from (select foo as id from user) as t", + errorMessage: "column 't.id2' not found", + }, { + query: "select id from (select 42 as id) as t", + recursiveDeps: NoTables, + directDeps: TS1, + }, { + query: "select t.id from (select 42 as id) as t", + recursiveDeps: NoTables, + directDeps: TS1, + }, { + query: "select ks.t.id from (select 42 as id) as t", + errorMessage: "column 'ks.t.id' not found", + }, { + query: "select * from (select id, id from user) as t", + errorMessage: "Duplicate column name 'id'", + }, { + query: "select t.baz = 1 from (select id as baz from user) as t", + directDeps: TS1, + recursiveDeps: TS0, + }, { + query: "select t.id from (select * from user, music) as t", + directDeps: TS2, + recursiveDeps: MergeTableSets(TS0, TS1), + }, { + query: "select t.id from (select * from user, music) as t order by t.id", + directDeps: TS2, + recursiveDeps: MergeTableSets(TS0, TS1), + }, { + query: "select t.id from (select * from user) as t join user as u on t.id = u.id", + directDeps: TS2, + recursiveDeps: TS0, + }, { + query: "select t.col1 from t3 ua join (select t1.id, t1.col1 from t1 join t2) as t", + directDeps: TS3, + recursiveDeps: TS1, + }, { + query: "select uu.test from (select id from t1) uu", + errorMessage: "column 'uu.test' not found", + }, { + query: "select uu.id from (select id as col from t1) uu", + errorMessage: "column 'uu.id' not found", + }, { + query: "select uu.id from (select id as col from t1) uu", + errorMessage: "column 'uu.id' not found", + }, { + query: "select uu.id from (select id from t1) as uu where exists (select * from t2 as uu where uu.id = uu.uid)", + directDeps: TS2, + recursiveDeps: TS0, + }, { + query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", + directDeps: NoTables, + recursiveDeps: NoTables, + }, { + query: "select uu.count from (select count(*) as `count` from t1) uu", + directDeps: TS1, + recursiveDeps: TS0, + }} + for _, query := range queries { + t.Run(query.query, func(t *testing.T) { + parse, err := sqlparser.NewTestParser().Parse(query.query) + require.NoError(t, err) + st, err := Analyze(parse, "user", &FakeSI{ + Tables: map[string]*vindexes.Table{ + "t": {Name: sqlparser.NewIdentifierCS("t"), Keyspace: ks2}, + }, + }) + + switch { + case query.errorMessage != "" && err != nil: + require.EqualError(t, err, query.errorMessage) + case query.errorMessage != "": + require.EqualError(t, st.NotUnshardedErr, query.errorMessage) + default: + require.NoError(t, err) + sel := parse.(*sqlparser.Select) + assert.Equal(t, query.recursiveDeps, st.RecursiveDeps(extract(sel, 0)), "RecursiveDeps") + assert.Equal(t, query.directDeps, st.DirectDeps(extract(sel, 0)), "DirectDeps") + } + }) + } +} + +func TestDerivedTablesOrderClause(t *testing.T) { + queries := []struct { + query string + recursiveExpectation TableSet + expectation TableSet + }{{ + query: "select 1 from (select id from user) as t order by id", + recursiveExpectation: TS0, + expectation: TS1, + }, { + query: "select id from (select id from user) as t order by id", + recursiveExpectation: TS0, + expectation: TS1, + }, { + query: "select id from (select id from user) as t order by t.id", + recursiveExpectation: TS0, + expectation: TS1, + }, { + query: "select id as foo from (select id from user) as t order by foo", + recursiveExpectation: TS0, + expectation: TS1, + }, { + query: "select bar from (select id as bar from user) as t order by bar", + recursiveExpectation: TS0, + expectation: TS1, + }, { + query: "select bar as foo from (select id as bar from user) as t order by bar", + recursiveExpectation: TS0, + expectation: TS1, + }, { + query: "select bar as foo from (select id as bar from user) as t order by foo", + recursiveExpectation: TS0, + expectation: TS1, + }, { + query: "select bar as foo from (select id as bar, oo from user) as t order by oo", + recursiveExpectation: TS0, + expectation: TS1, + }, { + query: "select bar as foo from (select id, oo from user) as t(bar,oo) order by bar", + recursiveExpectation: TS0, + expectation: TS1, + }} + si := &FakeSI{Tables: map[string]*vindexes.Table{"t": {Name: sqlparser.NewIdentifierCS("t")}}} + for _, query := range queries { + t.Run(query.query, func(t *testing.T) { + parse, err := sqlparser.NewTestParser().Parse(query.query) + require.NoError(t, err) + + st, err := Analyze(parse, "user", si) + require.NoError(t, err) + + sel := parse.(*sqlparser.Select) + assert.Equal(t, query.recursiveExpectation, st.RecursiveDeps(sel.OrderBy[0].Expr), "RecursiveDeps") + assert.Equal(t, query.expectation, st.DirectDeps(sel.OrderBy[0].Expr), "DirectDeps") + + }) + } +} + +func TestScopingWComplexDerivedTables(t *testing.T) { + queries := []struct { + query string + errorMessage string + rightExpectation TableSet + leftExpectation TableSet + }{ + { + query: "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", + rightExpectation: TS0, + leftExpectation: TS0, + }, + { + query: "select 1 from user.user uu where exists (select 1 from user.user as uu where exists (select 1 from (select 1 from user.t1) uu where uu.user_id = uu.id))", + rightExpectation: TS1, + leftExpectation: TS1, + }, + } + for _, query := range queries { + t.Run(query.query, func(t *testing.T) { + parse, err := sqlparser.NewTestParser().Parse(query.query) + require.NoError(t, err) + st, err := Analyze(parse, "user", &FakeSI{ + Tables: map[string]*vindexes.Table{ + "t": {Name: sqlparser.NewIdentifierCS("t")}, + }, + }) + if query.errorMessage != "" { + require.EqualError(t, err, query.errorMessage) + } else { + require.NoError(t, err) + sel := parse.(*sqlparser.Select) + comparisonExpr := sel.Where.Expr.(*sqlparser.ExistsExpr).Subquery.Select.(*sqlparser.Select).Where.Expr.(*sqlparser.ExistsExpr).Subquery.Select.(*sqlparser.Select).Where.Expr.(*sqlparser.ComparisonExpr) + left := comparisonExpr.Left + right := comparisonExpr.Right + assert.Equal(t, query.leftExpectation, st.RecursiveDeps(left), "Left RecursiveDeps") + assert.Equal(t, query.rightExpectation, st.RecursiveDeps(right), "Right RecursiveDeps") + } + }) + } +} + +func BenchmarkAnalyzeDerivedTableQueries(b *testing.B) { + queries := []string{ + "select id from (select x as id from user) as t", + "select id from (select foo as id from user) as t", + "select id from (select foo as id from (select x as foo from user) as c) as t", + "select t.id from (select foo as id from user) as t", + "select t.id2 from (select foo as id from user) as t", + "select id from (select 42 as id) as t", + "select t.id from (select 42 as id) as t", + "select ks.t.id from (select 42 as id) as t", + "select * from (select id, id from user) as t", + "select t.baz = 1 from (select id as baz from user) as t", + "select t.id from (select * from user, music) as t", + "select t.id from (select * from user, music) as t order by t.id", + "select t.id from (select * from user) as t join user as u on t.id = u.id", + "select t.col1 from t3 ua join (select t1.id, t1.col1 from t1 join t2) as t", + "select uu.id from (select id from t1) as uu where exists (select * from t2 as uu where uu.id = uu.uid)", + "select 1 from user uu where exists (select 1 from user where exists (select 1 from (select 1 from t1) uu where uu.user_id = uu.id))", + } + + for i := 0; i < b.N; i++ { + for _, query := range queries { + parse, err := sqlparser.NewTestParser().Parse(query) + require.NoError(b, err) + + _, _ = Analyze(parse, "d", fakeSchemaInfo()) + } + } +} diff --git a/go/vt/vtgate/semantics/early_rewriter.go b/go/vt/vtgate/semantics/early_rewriter.go index 36060ed8334..51ed110adf9 100644 --- a/go/vt/vtgate/semantics/early_rewriter.go +++ b/go/vt/vtgate/semantics/early_rewriter.go @@ -20,9 +20,9 @@ import ( "fmt" "strconv" - "vitess.io/vitess/go/mysql/collations" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -33,41 +33,92 @@ type earlyRewriter struct { clause string warning string expandedColumns map[sqlparser.TableName][]*sqlparser.ColName + env *vtenv.Environment + aliasMapCache map[*sqlparser.Select]map[string]exprContainer + tables *tableCollector + + // reAnalyze is used when we are running in the late stage, after the other parts of semantic analysis + // have happened, and we are introducing or changing the AST. We invoke it so all parts of the query have been + // typed, scoped and bound correctly + reAnalyze func(n sqlparser.SQLNode) error + aggrUDFs []string } func (r *earlyRewriter) down(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { - case *sqlparser.Where: - handleWhereClause(node, cursor.Parent()) case sqlparser.SelectExprs: - return handleSelectExprs(r, cursor, node) - case *sqlparser.JoinTableExpr: - handleJoinTableExpr(r, node) - case sqlparser.OrderBy: - handleOrderBy(r, cursor, node) + return r.handleSelectExprs(cursor, node) case *sqlparser.OrExpr: - rewriteOrExpr(cursor, node) + rewriteOrExpr(r.env, cursor, node) + case *sqlparser.AndExpr: + rewriteAndExpr(r.env, cursor, node) case *sqlparser.NotExpr: rewriteNotExpr(cursor, node) - case sqlparser.GroupBy: - r.clause = "group statement" - case *sqlparser.Literal: - return handleLiteral(r, cursor, node) - case *sqlparser.CollateExpr: - return handleCollateExpr(r, node) case *sqlparser.ComparisonExpr: return handleComparisonExpr(cursor, node) case *sqlparser.With: return r.handleWith(node) case *sqlparser.AliasedTableExpr: return r.handleAliasedTable(node) + case *sqlparser.Delete: + return handleDelete(node) + } + return nil +} + +func (r *earlyRewriter) up(cursor *sqlparser.Cursor) error { + switch node := cursor.Node().(type) { + case *sqlparser.JoinTableExpr: + return r.handleJoinTableExprUp(node) + case *sqlparser.AliasedTableExpr: + // this rewriting is done in the `up` phase, because we need the vindex hints to have been + // processed while collecting the tables. + return removeVindexHints(node) + case *sqlparser.GroupBy: + r.clause = "group clause" + iter := &exprIterator{ + node: node.Exprs, + idx: -1, + } + return r.handleGroupBy(cursor.Parent(), iter) + case sqlparser.OrderBy: + r.clause = "order clause" + iter := &orderByIterator{ + node: node, + idx: -1, + r: r, + } + return r.handleOrderBy(cursor.Parent(), iter) + case *sqlparser.Where: + if node.Type == sqlparser.HavingClause { + return r.handleHavingClause(node, cursor.Parent()) + } + + } + return nil +} + +func handleDelete(del *sqlparser.Delete) error { + // When we do not have any target, it is a single table delete. + // In a single table delete, the table references is always a single aliased table expression. + if len(del.Targets) != 0 { + return nil + } + tblExpr, ok := del.TableExprs[0].(*sqlparser.AliasedTableExpr) + if !ok { + return nil + } + tblName, err := tblExpr.TableName() + if err != nil { + return err } + del.Targets = append(del.Targets, tblName) return nil } func (r *earlyRewriter) handleAliasedTable(node *sqlparser.AliasedTableExpr) error { tbl, ok := node.Expr.(sqlparser.TableName) - if !ok || !tbl.Qualifier.IsEmpty() { + if !ok || tbl.Qualifier.NotEmpty() { return nil } scope := r.scoper.currentScope() @@ -105,47 +156,65 @@ func rewriteNotExpr(cursor *sqlparser.Cursor, node *sqlparser.NotExpr) { return } - cmp.Operator = sqlparser.Inverse(cmp.Operator) + // There is no inverse operator for NullSafeEqualOp. + // There doesn't exist a null safe non-equality. + if cmp.Operator == sqlparser.NullSafeEqualOp { + return + } + cmp.Operator = cmp.Operator.Inverse() cursor.Replace(cmp) } -func (r *earlyRewriter) up(cursor *sqlparser.Cursor) error { +func (r *earlyRewriter) handleJoinTableExprUp(join *sqlparser.JoinTableExpr) error { // this rewriting is done in the `up` phase, because we need the scope to have been // filled in with the available tables - node, ok := cursor.Node().(*sqlparser.JoinTableExpr) - if !ok || len(node.Condition.Using) == 0 { + if len(join.Condition.Using) == 0 { return nil } - err := rewriteJoinUsing(r.binder, node) + err := rewriteJoinUsing(r.binder, join) if err != nil { return err } // since the binder has already been over the join, we need to invoke it again, so it // can bind columns to the right tables - sqlparser.Rewrite(node.Condition.On, nil, func(cursor *sqlparser.Cursor) bool { - innerErr := r.binder.up(cursor) - if innerErr == nil { - return true - } - err = innerErr - return false - }) - return err + return r.reAnalyze(join.Condition.On) } -// handleWhereClause processes WHERE clauses, specifically the HAVING clause. -func handleWhereClause(node *sqlparser.Where, parent sqlparser.SQLNode) { - if node.Type != sqlparser.HavingClause { - return +// removeVindexHints removes the vindex hints from the aliased table expression provided. +func removeVindexHints(node *sqlparser.AliasedTableExpr) error { + if len(node.Hints) == 0 { + return nil + } + var newHints sqlparser.IndexHints + for _, hint := range node.Hints { + if hint.Type.IsVindexHint() { + continue + } + newHints = append(newHints, hint) } - rewriteHavingAndOrderBy(node, parent) + node.Hints = newHints + return nil +} + +// handleHavingClause processes the HAVING clause +func (r *earlyRewriter) handleHavingClause(node *sqlparser.Where, parent sqlparser.SQLNode) error { + sel, ok := parent.(*sqlparser.Select) + if !ok { + return nil + } + expr, err := r.rewriteAliasesInHaving(node.Expr, sel) + if err != nil { + return err + } + node.Expr = expr + return r.reAnalyze(expr) } // handleSelectExprs expands * in SELECT expressions. -func handleSelectExprs(r *earlyRewriter, cursor *sqlparser.Cursor, node sqlparser.SelectExprs) error { +func (r *earlyRewriter) handleSelectExprs(cursor *sqlparser.Cursor, node sqlparser.SelectExprs) error { _, isSel := cursor.Parent().(*sqlparser.Select) if !isSel { return nil @@ -153,102 +222,220 @@ func handleSelectExprs(r *earlyRewriter, cursor *sqlparser.Cursor, node sqlparse return r.expandStar(cursor, node) } -// handleJoinTableExpr processes JOIN table expressions and handles the Straight Join type. -func handleJoinTableExpr(r *earlyRewriter, node *sqlparser.JoinTableExpr) { - if node.Join != sqlparser.StraightJoinType { - return +type orderByIterator struct { + node sqlparser.OrderBy + idx int + r *earlyRewriter +} + +func (it *orderByIterator) next() sqlparser.Expr { + it.idx++ + + if it.idx >= len(it.node) { + return nil } - node.Join = sqlparser.NormalJoinType - r.warning = "straight join is converted to normal join" + + return it.node[it.idx].Expr } -// handleOrderBy processes the ORDER BY clause. -func handleOrderBy(r *earlyRewriter, cursor *sqlparser.Cursor, node sqlparser.OrderBy) { - r.clause = "order clause" - rewriteHavingAndOrderBy(node, cursor.Parent()) +func (it *orderByIterator) replace(e sqlparser.Expr) (err error) { + if it.idx >= len(it.node) { + return vterrors.VT13001("went past the last item") + } + it.node[it.idx].Expr = e + return nil } -// rewriteOrExpr rewrites OR expressions when the right side is FALSE. -func rewriteOrExpr(cursor *sqlparser.Cursor, node *sqlparser.OrExpr) { - newNode := rewriteOrFalse(*node) - if newNode != nil { - cursor.ReplaceAndRevisit(newNode) +type exprIterator struct { + node []sqlparser.Expr + idx int +} + +func (it *exprIterator) next() sqlparser.Expr { + it.idx++ + + if it.idx >= len(it.node) { + return nil } + + return it.node[it.idx] +} + +func (it *exprIterator) replace(e sqlparser.Expr) error { + if it.idx >= len(it.node) { + return vterrors.VT13001("went past the last item") + } + it.node[it.idx] = e + return nil +} + +type iterator interface { + next() sqlparser.Expr + replace(e sqlparser.Expr) error } -// handleLiteral processes literals within the context of ORDER BY expressions. -func handleLiteral(r *earlyRewriter, cursor *sqlparser.Cursor, node *sqlparser.Literal) error { - newNode, err := r.rewriteOrderByExpr(node) +func (r *earlyRewriter) replaceLiteralsInOrderBy(e sqlparser.Expr, iter iterator) (bool, error) { + lit := getIntLiteral(e) + if lit == nil { + return false, nil + } + + newExpr, recheck, err := r.rewriteOrderByLiteral(lit) if err != nil { - return err + return false, err } - if newNode != nil { - cursor.Replace(newNode) + + if getIntLiteral(newExpr) == nil { + coll, ok := e.(*sqlparser.CollateExpr) + if ok { + coll.Expr = newExpr + newExpr = coll + } + } else { + // the expression is still a literal int. that means that we don't really need to sort by it. + // we'll just replace the number with a string instead, just like mysql would do in this situation + // mysql> explain select 1 as foo from user group by 1; + // + // mysql> show warnings; + // +-------+------+-----------------------------------------------------------------+ + // | Level | Code | Message | + // +-------+------+-----------------------------------------------------------------+ + // | Note | 1003 | /* select#1 */ select 1 AS `foo` from `test`.`user` group by '' | + // +-------+------+-----------------------------------------------------------------+ + newExpr = sqlparser.NewStrLiteral("") } - return nil + + err = iter.replace(newExpr) + if err != nil { + return false, err + } + if recheck { + err = r.reAnalyze(newExpr) + } + if err != nil { + return false, err + } + return true, nil } -// handleCollateExpr processes COLLATE expressions. -func handleCollateExpr(r *earlyRewriter, node *sqlparser.CollateExpr) error { - lit, ok := node.Expr.(*sqlparser.Literal) - if !ok { - return nil +func (r *earlyRewriter) replaceLiteralsInGroupBy(e sqlparser.Expr) (sqlparser.Expr, error) { + lit := getIntLiteral(e) + if lit == nil { + return nil, nil } - newNode, err := r.rewriteOrderByExpr(lit) + + newExpr, err := r.rewriteGroupByExpr(lit) if err != nil { - return err + return nil, err } - if newNode != nil { - node.Expr = newNode + + if getIntLiteral(newExpr) == nil { + coll, ok := e.(*sqlparser.CollateExpr) + if ok { + coll.Expr = newExpr + newExpr = coll + } + } else { + // the expression is still a literal int. that means that we don't really need to sort by it. + // we'll just replace the number with a string instead, just like mysql would do in this situation + // mysql> explain select 1 as foo from user group by 1; + // + // mysql> show warnings; + // +-------+------+-----------------------------------------------------------------+ + // | Level | Code | Message | + // +-------+------+-----------------------------------------------------------------+ + // | Note | 1003 | /* select#1 */ select 1 AS `foo` from `test`.`user` group by '' | + // +-------+------+-----------------------------------------------------------------+ + newExpr = sqlparser.NewStrLiteral("") } - return nil + + return newExpr, nil } -// handleComparisonExpr processes Comparison expressions, specifically for tuples with equal length and EqualOp operator. -func handleComparisonExpr(cursor *sqlparser.Cursor, node *sqlparser.ComparisonExpr) error { - lft, lftOK := node.Left.(sqlparser.ValTuple) - rgt, rgtOK := node.Right.(sqlparser.ValTuple) - if !lftOK || !rgtOK || len(lft) != len(rgt) || node.Operator != sqlparser.EqualOp { +func getIntLiteral(e sqlparser.Expr) *sqlparser.Literal { + var lit *sqlparser.Literal + switch node := e.(type) { + case *sqlparser.Literal: + lit = node + case *sqlparser.CollateExpr: + expr, ok := node.Expr.(*sqlparser.Literal) + if !ok { + return nil + } + lit = expr + default: return nil } - var predicates []sqlparser.Expr - for i, l := range lft { - r := rgt[i] - predicates = append(predicates, &sqlparser.ComparisonExpr{ - Operator: sqlparser.EqualOp, - Left: l, - Right: r, - Escape: node.Escape, - }) + if lit.Type != sqlparser.IntVal { + return nil } - cursor.Replace(sqlparser.AndExpressions(predicates...)) - return nil + return lit } -func (r *earlyRewriter) expandStar(cursor *sqlparser.Cursor, node sqlparser.SelectExprs) error { - currentScope := r.scoper.currentScope() - var selExprs sqlparser.SelectExprs - changed := false - for _, selectExpr := range node { - starExpr, isStarExpr := selectExpr.(*sqlparser.StarExpr) - if !isStarExpr { - selExprs = append(selExprs, selectExpr) +// handleOrderBy processes the ORDER BY clause. +func (r *earlyRewriter) handleOrderBy(parent sqlparser.SQLNode, iter iterator) error { + stmt, ok := parent.(sqlparser.SelectStatement) + if !ok { + return nil + } + + sel := sqlparser.GetFirstSelect(stmt) + for e := iter.next(); e != nil; e = iter.next() { + lit, err := r.replaceLiteralsInOrderBy(e, iter) + if err != nil { + return err + } + if lit { continue } - starExpanded, colNames, err := r.expandTableColumns(starExpr, currentScope.tables, r.binder.usingJoinInfo, r.scoper.org) + + expr, err := r.rewriteAliasesInOrderBy(e, sel) if err != nil { return err } - if !starExpanded || colNames == nil { - selExprs = append(selExprs, selectExpr) - continue + + if err = iter.replace(expr); err != nil { + return err + } + + if err = r.reAnalyze(expr); err != nil { + return err } - selExprs = append(selExprs, colNames...) - changed = true } - if changed { - cursor.ReplaceAndRevisit(selExprs) + + return nil +} + +// handleGroupBy processes the GROUP BY clause. +func (r *earlyRewriter) handleGroupBy(parent sqlparser.SQLNode, iter iterator) error { + stmt, ok := parent.(sqlparser.SelectStatement) + if !ok { + return nil } + + sel := sqlparser.GetFirstSelect(stmt) + for e := iter.next(); e != nil; e = iter.next() { + expr, err := r.replaceLiteralsInGroupBy(e) + if err != nil { + return err + } + if expr == nil { + expr, err = r.rewriteAliasesInGroupBy(e, sel) + if err != nil { + return err + } + + } + err = iter.replace(expr) + if err != nil { + return err + } + + if err = r.reAnalyze(expr); err != nil { + return err + } + } + return nil } @@ -258,88 +445,383 @@ func (r *earlyRewriter) expandStar(cursor *sqlparser.Cursor, node sqlparser.Sele // in SELECT points to that expression, not any table column. // - However, if the aliased expression is an aggregation and the column identifier in // the HAVING/ORDER BY clause is inside an aggregation function, the rule does not apply. -func rewriteHavingAndOrderBy(node, parent sqlparser.SQLNode) { - sel, isSel := parent.(*sqlparser.Select) - if !isSel { - return +func (r *earlyRewriter) rewriteAliasesInGroupBy(node sqlparser.Expr, sel *sqlparser.Select) (expr sqlparser.Expr, err error) { + type ExprContainer struct { + expr sqlparser.Expr + ambiguous bool } - sqlparser.SafeRewrite(node, avoidSubqueries, - func(cursor *sqlparser.Cursor) bool { - col, ok := cursor.Node().(*sqlparser.ColName) - if !ok || !col.Qualifier.IsEmpty() { + currentScope := r.scoper.currentScope() + aliases := r.getAliasMap(sel) + aggrTrack := &aggrTracker{} + + output := sqlparser.CopyOnRewrite(node, aggrTrack.down, func(cursor *sqlparser.CopyOnWriteCursor) { + switch col := cursor.Node().(type) { + case sqlparser.AggrFunc: + aggrTrack.popAggr() + case *sqlparser.ColName: + if col.Qualifier.NonEmpty() { // we are only interested in columns not qualified by table names - return true + break } - _, parentIsAggr := cursor.Parent().(sqlparser.AggrFunc) + item, found := aliases[col.Name.Lowered()] + if !found { + break + } - // Iterate through SELECT expressions. - for _, e := range sel.SelectExprs { - ae, ok := e.(*sqlparser.AliasedExpr) - if !ok || !ae.As.Equal(col.Name) { - // we are searching for aliased expressions that match the column we have found - continue - } + isColumnOnTable, sure := r.isColumnOnTable(col, currentScope) + if found && isColumnOnTable { + r.warning = fmt.Sprintf("Column '%s' in group statement is ambiguous", sqlparser.String(col)) + } - expr := ae.Expr - if parentIsAggr { - if _, aliasPointsToAggr := expr.(sqlparser.AggrFunc); aliasPointsToAggr { - return false - } - } + if isColumnOnTable && sure { + break + } + + if !sure { + r.warning = "Missing table info, so not binding to anything on the FROM clause" + } + + if item.ambiguous { + err = newAmbiguousColumnError(col) + } else if aggrTrack.insideAggr && sqlparser.ContainsAggregation(item.expr) { + err = &InvalidUseOfGroupFunction{} + } + if err != nil { + cursor.StopTreeWalk() + return + } + + cursor.Replace(sqlparser.CloneExpr(item.expr)) + } + }, nil) + + expr = output.(sqlparser.Expr) + return +} + +func (r *earlyRewriter) rewriteAliasesInHaving(node sqlparser.Expr, sel *sqlparser.Select) (expr sqlparser.Expr, err error) { + currentScope := r.scoper.currentScope() + if currentScope.isUnion { + // It is not safe to rewrite order by clauses in unions. + return node, nil + } + + aliases := r.getAliasMap(sel) + aggrTrack := &aggrTracker{ + insideAggr: false, + aggrUDFs: r.aggrUDFs, + } + output := sqlparser.CopyOnRewrite(node, aggrTrack.down, func(cursor *sqlparser.CopyOnWriteCursor) { + var col *sqlparser.ColName - if isSafeToRewrite(expr) { - cursor.Replace(expr) + switch node := cursor.Node().(type) { + case sqlparser.AggrFunc: + aggrTrack.popAggr() + return + case *sqlparser.FuncExpr: + if node.Name.EqualsAnyString(r.aggrUDFs) { + aggrTrack.popAggr() + } + return + case *sqlparser.ColName: + col = node + default: + return + } + + if col.Qualifier.NonEmpty() { + // we are only interested in columns not qualified by table names + return + } + + item, found := aliases[col.Name.Lowered()] + if aggrTrack.insideAggr { + // inside aggregations, we want to first look for columns in the FROM clause + isColumnOnTable, sure := r.isColumnOnTable(col, currentScope) + if isColumnOnTable { + if found && sure { + r.warning = fmt.Sprintf("Column '%s' in having clause is ambiguous", sqlparser.String(col)) } + return } - return true - }) + } else if !found { + // if outside aggregations, we don't care about FROM columns + // if there is no matching alias, there is no rewriting needed + return + } + + // If we get here, it means we have found an alias and want to use it + if item.ambiguous { + err = newAmbiguousColumnError(col) + } else if aggrTrack.insideAggr && sqlparser.ContainsAggregation(item.expr) { + err = &InvalidUseOfGroupFunction{} + } + if err != nil { + cursor.StopTreeWalk() + return + } + + newColName := sqlparser.CopyOnRewrite(item.expr, nil, r.fillInQualifiers, nil) + + cursor.Replace(newColName) + }, nil) + + expr = output.(sqlparser.Expr) + return } -func avoidSubqueries(node, _ sqlparser.SQLNode) bool { - _, isSubQ := node.(*sqlparser.Subquery) - return !isSubQ +type aggrTracker struct { + insideAggr bool + aggrUDFs []string } -func isSafeToRewrite(e sqlparser.Expr) bool { - safeToRewrite := true - _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { - switch node.(type) { - case *sqlparser.ColName: - safeToRewrite = false - return false, nil +func (at *aggrTracker) down(node, _ sqlparser.SQLNode) bool { + switch node := node.(type) { + case *sqlparser.Subquery: + return false + case sqlparser.AggrFunc: + at.insideAggr = true + case *sqlparser.FuncExpr: + if node.Name.EqualsAnyString(at.aggrUDFs) { + at.insideAggr = true + } + } + + return true +} + +func (at *aggrTracker) popAggr() { + at.insideAggr = false +} + +// rewriteAliasesInOrderBy rewrites columns in the ORDER BY to use aliases +// from the SELECT expressions when applicable, following MySQL scoping rules: +// - A column identifier without a table qualifier that matches an alias introduced +// in SELECT points to that expression, not any table column. +// - However, if the aliased expression is an aggregation and the column identifier in +// the HAVING/ORDER BY clause is inside an aggregation function, the rule does not apply. +func (r *earlyRewriter) rewriteAliasesInOrderBy(node sqlparser.Expr, sel *sqlparser.Select) (expr sqlparser.Expr, err error) { + currentScope := r.scoper.currentScope() + if currentScope.isUnion { + // It is not safe to rewrite order by clauses in unions. + return node, nil + } + + aliases := r.getAliasMap(sel) + aggrTrack := &aggrTracker{} + output := sqlparser.CopyOnRewrite(node, aggrTrack.down, func(cursor *sqlparser.CopyOnWriteCursor) { + var col *sqlparser.ColName + + switch node := cursor.Node().(type) { case sqlparser.AggrFunc: - return false, nil + aggrTrack.popAggr() + return + case *sqlparser.ColName: + col = node + default: + return + } + + if col.Qualifier.NonEmpty() { + // we are only interested in columns not qualified by table names + return + } + + var item exprContainer + var found bool + + item, found = aliases[col.Name.Lowered()] + if !found { + // if there is no matching alias, there is no rewriting needed + return + } + isColumnOnTable, sure := r.isColumnOnTable(col, currentScope) + if found && isColumnOnTable && sure { + r.warning = fmt.Sprintf("Column '%s' in order by statement is ambiguous", sqlparser.String(col)) + } + + topLevel := col == node + if isColumnOnTable && sure && !topLevel { + // we only want to replace columns that are not coming from the table + return + } + + if !sure { + r.warning = "Missing table info, so not binding to anything on the FROM clause" + } + + if item.ambiguous { + err = newAmbiguousColumnError(col) + } else if aggrTrack.insideAggr && sqlparser.ContainsAggregation(item.expr) { + err = &InvalidUseOfGroupFunction{} + } + if err != nil { + cursor.StopTreeWalk() + return } - return true, nil - }, e) - return safeToRewrite + + newColName := sqlparser.CopyOnRewrite(item.expr, nil, r.fillInQualifiers, nil) + + cursor.Replace(newColName) + }, nil) + + expr = output.(sqlparser.Expr) + return } -func (r *earlyRewriter) rewriteOrderByExpr(node *sqlparser.Literal) (sqlparser.Expr, error) { - currScope, found := r.scoper.specialExprScopes[node] +// fillInQualifiers adds qualifiers to any columns we have rewritten +func (r *earlyRewriter) fillInQualifiers(cursor *sqlparser.CopyOnWriteCursor) { + col, ok := cursor.Node().(*sqlparser.ColName) + if !ok || col.Qualifier.NonEmpty() { + return + } + ts, found := r.binder.direct[col] if !found { - return nil, nil + panic("uh oh") + } + offset := ts.TableOffset() + if offset < 0 { + // this is a column that is not coming from a table - it's an alias introduced in a SELECT expression + // Example: select (1+1) as foo from bar order by foo + // we don't want to add a qualifier to foo here + cursor.Replace(sqlparser.NewColName(col.Name.String())) + return + } + tbl := r.tables.Tables[offset] + tblName, err := tbl.Name() + if err != nil { + panic(err) + } + cursor.Replace(sqlparser.NewColNameWithQualifier(col.Name.String(), tblName)) +} + +func (r *earlyRewriter) isColumnOnTable(col *sqlparser.ColName, currentScope *scope) (isColumn bool, isCertain bool) { + if !currentScope.stmtScope && currentScope.parent != nil { + currentScope = currentScope.parent + } + deps, err := r.binder.resolveColumn(col, currentScope, false, false) + if err != nil { + return false, true + } + return true, deps.certain +} + +func (r *earlyRewriter) getAliasMap(sel *sqlparser.Select) (aliases map[string]exprContainer) { + var found bool + aliases, found = r.aliasMapCache[sel] + if found { + return + } + aliases = map[string]exprContainer{} + for _, e := range sel.SelectExprs { + ae, ok := e.(*sqlparser.AliasedExpr) + if !ok { + continue + } + + var alias string + + item := exprContainer{expr: ae.Expr} + if ae.As.NotEmpty() { + alias = ae.As.Lowered() + } else if col, ok := ae.Expr.(*sqlparser.ColName); ok { + alias = col.Name.Lowered() + } + + if old, alreadyExists := aliases[alias]; alreadyExists && !sqlparser.Equals.Expr(old.expr, item.expr) { + item.ambiguous = true + } + + aliases[alias] = item + } + return aliases +} + +type exprContainer struct { + expr sqlparser.Expr + ambiguous bool +} + +func (r *earlyRewriter) rewriteOrderByLiteral(node *sqlparser.Literal) (expr sqlparser.Expr, needReAnalysis bool, err error) { + scope, found := r.scoper.specialExprScopes[node] + if !found { + return node, false, nil + } + num, err := strconv.Atoi(node.Val) + if err != nil { + return nil, false, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error parsing column number: %s", node.Val) + } + + stmt, isSel := scope.stmt.(*sqlparser.Select) + if !isSel { + return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error invalid statement type, expect Select, got: %T", scope.stmt) + } + + if num < 1 || num > len(stmt.SelectExprs) { + return nil, false, &ColumnNotFoundClauseError{ + Column: fmt.Sprintf("%d", num), + Clause: r.clause, + } + } + + // We loop like this instead of directly accessing the offset, to make sure there are no unexpanded `*` before + for i := 0; i < num; i++ { + if _, ok := stmt.SelectExprs[i].(*sqlparser.AliasedExpr); !ok { + return nil, false, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot use column offsets in %s when using `%s`", r.clause, sqlparser.String(stmt.SelectExprs[i])) + } + } + + colOffset := num - 1 + aliasedExpr, ok := stmt.SelectExprs[colOffset].(*sqlparser.AliasedExpr) + if !ok { + return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "don't know how to handle %s", sqlparser.String(node)) + } + + if scope.isUnion { + colName := sqlparser.NewColName(aliasedExpr.ColumnName()) + vtabl, ok := scope.tables[0].(*vTableInfo) + if !ok { + panic("BUG: not expected") + } + + // since column names can be ambiguous here, we want to do the binding by offset and not by column name + allColExprs := vtabl.cols[colOffset] + direct, recursive, typ := r.binder.org.depsForExpr(allColExprs) + r.binder.direct[colName] = direct + r.binder.recursive[colName] = recursive + r.binder.typer.m[colName] = typ + + return colName, false, nil + } + + return realCloneOfColNames(aliasedExpr.Expr, false), true, nil +} + +func (r *earlyRewriter) rewriteGroupByExpr(node *sqlparser.Literal) (sqlparser.Expr, error) { + scope, found := r.scoper.specialExprScopes[node] + if !found { + return node, nil } num, err := strconv.Atoi(node.Val) if err != nil { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "error parsing column number: %s", node.Val) } - stmt, isSel := currScope.stmt.(*sqlparser.Select) + + stmt, isSel := scope.stmt.(*sqlparser.Select) if !isSel { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error invalid statement type, expect Select, got: %T", currScope.stmt) + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error invalid statement type, expect Select, got: %T", scope.stmt) } if num < 1 || num > len(stmt.SelectExprs) { return nil, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.BadFieldError, "Unknown column '%d' in '%s'", num, r.clause) } + // We loop like this instead of directly accessing the offset, to make sure there are no unexpanded `*` before for i := 0; i < num; i++ { - expr := stmt.SelectExprs[i] - _, ok := expr.(*sqlparser.AliasedExpr) - if !ok { - return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot use column offsets in %s when using `%s`", r.clause, sqlparser.String(expr)) + if _, ok := stmt.SelectExprs[i].(*sqlparser.AliasedExpr); !ok { + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "cannot use column offsets in %s when using `%s`", r.clause, sqlparser.String(stmt.SelectExprs[i])) } } @@ -348,12 +830,111 @@ func (r *earlyRewriter) rewriteOrderByExpr(node *sqlparser.Literal) (sqlparser.E return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "don't know how to handle %s", sqlparser.String(node)) } - if !aliasedExpr.As.IsEmpty() { - return sqlparser.NewColName(aliasedExpr.As.String()), nil + if scope.isUnion { + colName := sqlparser.NewColName(aliasedExpr.ColumnName()) + return colName, nil + } + + return realCloneOfColNames(aliasedExpr.Expr, false), nil +} + +// rewriteOrExpr rewrites OR expressions when the right side is FALSE. +func rewriteOrExpr(env *vtenv.Environment, cursor *sqlparser.Cursor, node *sqlparser.OrExpr) { + newNode := rewriteOrFalse(env, *node) + if newNode != nil { + cursor.ReplaceAndRevisit(newNode) + } +} + +// rewriteAndExpr rewrites AND expressions when either side is TRUE. +func rewriteAndExpr(env *vtenv.Environment, cursor *sqlparser.Cursor, node *sqlparser.AndExpr) { + newNode := rewriteAndTrue(env, *node) + if newNode != nil { + cursor.ReplaceAndRevisit(newNode) + } +} + +func rewriteAndTrue(env *vtenv.Environment, andExpr sqlparser.AndExpr) sqlparser.Expr { + // we are looking for the pattern `WHERE c = 1 AND 1 = 1` + isTrue := func(subExpr sqlparser.Expr) bool { + coll := env.CollationEnv().DefaultConnectionCharset() + evalEnginePred, err := evalengine.Translate(subExpr, &evalengine.Config{ + Environment: env, + Collation: coll, + }) + if err != nil { + return false + } + + env := evalengine.EmptyExpressionEnv(env) + res, err := env.Evaluate(evalEnginePred) + if err != nil { + return false + } + + boolValue, err := res.Value(coll).ToBool() + if err != nil { + return false + } + + return boolValue + } + + if isTrue(andExpr.Left) { + return andExpr.Right + } else if isTrue(andExpr.Right) { + return andExpr.Left } - expr := realCloneOfColNames(aliasedExpr.Expr, currScope.isUnion) - return expr, nil + return nil +} + +// handleComparisonExpr processes Comparison expressions, specifically for tuples with equal length and EqualOp operator. +func handleComparisonExpr(cursor *sqlparser.Cursor, node *sqlparser.ComparisonExpr) error { + lft, lftOK := node.Left.(sqlparser.ValTuple) + rgt, rgtOK := node.Right.(sqlparser.ValTuple) + if !lftOK || !rgtOK || len(lft) != len(rgt) || node.Operator != sqlparser.EqualOp { + return nil + } + var predicates []sqlparser.Expr + for i, l := range lft { + r := rgt[i] + predicates = append(predicates, &sqlparser.ComparisonExpr{ + Operator: sqlparser.EqualOp, + Left: l, + Right: r, + Escape: node.Escape, + }) + } + cursor.Replace(sqlparser.AndExpressions(predicates...)) + return nil +} + +func (r *earlyRewriter) expandStar(cursor *sqlparser.Cursor, node sqlparser.SelectExprs) error { + currentScope := r.scoper.currentScope() + var selExprs sqlparser.SelectExprs + changed := false + for _, selectExpr := range node { + starExpr, isStarExpr := selectExpr.(*sqlparser.StarExpr) + if !isStarExpr { + selExprs = append(selExprs, selectExpr) + continue + } + starExpanded, colNames, err := r.expandTableColumns(starExpr, currentScope.tables, r.binder.usingJoinInfo, r.scoper.org) + if err != nil { + return err + } + if !starExpanded || colNames == nil { + selExprs = append(selExprs, selectExpr) + continue + } + selExprs = append(selExprs, colNames...) + changed = true + } + if changed { + cursor.ReplaceAndRevisit(selExprs) + } + return nil } // realCloneOfColNames clones all the expressions including ColName. @@ -373,21 +954,25 @@ func realCloneOfColNames(expr sqlparser.Expr, union bool) sqlparser.Expr { }, nil).(sqlparser.Expr) } -func rewriteOrFalse(orExpr sqlparser.OrExpr) sqlparser.Expr { +func rewriteOrFalse(env *vtenv.Environment, orExpr sqlparser.OrExpr) sqlparser.Expr { // we are looking for the pattern `WHERE c = 1 OR 1 = 0` isFalse := func(subExpr sqlparser.Expr) bool { - evalEnginePred, err := evalengine.Translate(subExpr, nil) + coll := env.CollationEnv().DefaultConnectionCharset() + evalEnginePred, err := evalengine.Translate(subExpr, &evalengine.Config{ + Environment: env, + Collation: coll, + }) if err != nil { return false } - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(env) res, err := env.Evaluate(evalEnginePred) if err != nil { return false } - boolValue, err := res.Value(collations.Default()).ToBool() + boolValue, err := res.Value(coll).ToBool() if err != nil { return false } @@ -454,7 +1039,7 @@ func findOnlyOneTableInfoThatHasColumn(b *binder, tbl sqlparser.TableExpr, colum case *sqlparser.AliasedTableExpr: ts := b.tc.tableSetFor(tbl) tblInfo := b.tc.Tables[ts.TableOffset()] - for _, info := range tblInfo.getColumns() { + for _, info := range tblInfo.getColumns(false /* ignoreInvisibleCol */) { if column.EqualString(info.Name) { return []TableInfo{tblInfo}, nil } @@ -611,10 +1196,7 @@ func (e *expanderState) processColumnsFor(tbl TableInfo) error { outer: // in this first loop we just find columns used in any JOIN USING used on this table - for _, col := range tbl.getColumns() { - if col.Invisible { - continue - } + for _, col := range tbl.getColumns(true /* ignoreInvisibleCol */) { ts, found := usingCols[col.Name] if found { for i, ts := range ts.Constituents() { @@ -630,11 +1212,7 @@ outer: } // and this time around we are printing any columns not involved in any JOIN USING - for _, col := range tbl.getColumns() { - if col.Invisible { - continue - } - + for _, col := range tbl.getColumns(true /* ignoreInvisibleCol */) { if ts, found := usingCols[col.Name]; found && currTable.IsSolvedBy(ts) { continue } @@ -655,17 +1233,13 @@ type expanderState struct { // addColumn adds columns to the expander state. If we have vschema info about the query, // we also store which columns were expanded func (e *expanderState) addColumn(col ColumnInfo, tbl TableInfo, tblName sqlparser.TableName) { - withQualifier := e.needsQualifier var colName *sqlparser.ColName var alias sqlparser.IdentifierCI - if withQualifier { + if e.needsQualifier { colName = sqlparser.NewColNameWithQualifier(col.Name, tblName) } else { colName = sqlparser.NewColName(col.Name) } - if e.needsQualifier { - alias = sqlparser.NewIdentifierCI(col.Name) - } e.colNames = append(e.colNames, &sqlparser.AliasedExpr{Expr: colName, As: alias}) e.storeExpandInfo(tbl, tblName, colName) } diff --git a/go/vt/vtgate/semantics/early_rewriter_test.go b/go/vt/vtgate/semantics/early_rewriter_test.go index bf09d2d5cc3..81d3ed8c450 100644 --- a/go/vt/vtgate/semantics/early_rewriter_test.go +++ b/go/vt/vtgate/semantics/early_rewriter_test.go @@ -32,7 +32,7 @@ import ( func TestExpandStar(t *testing.T) { ks := &vindexes.Keyspace{ Name: "main", - Sharded: false, + Sharded: true, } schemaInfo := &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -122,17 +122,17 @@ func TestExpandStar(t *testing.T) { expSQL: "select 42, a, b, c from t1", }, { sql: "select * from t1, t2", - expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1, t2", + expSQL: "select t1.a, t1.b, t1.c, t2.c1, t2.c2 from t1, t2", expanded: "main.t1.a, main.t1.b, main.t1.c, main.t2.c1, main.t2.c2", }, { sql: "select t1.* from t1, t2", - expSQL: "select t1.a as a, t1.b as b, t1.c as c from t1, t2", + expSQL: "select t1.a, t1.b, t1.c from t1, t2", }, { sql: "select *, t1.* from t1, t2", - expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2, t1.a as a, t1.b as b, t1.c as c from t1, t2", + expSQL: "select t1.a, t1.b, t1.c, t2.c1, t2.c2, t1.a, t1.b, t1.c from t1, t2", }, { // aliased table sql: "select * from t1 a, t2 b", - expSQL: "select a.a as a, a.b as b, a.c as c, b.c1 as c1, b.c2 as c2 from t1 as a, t2 as b", + expSQL: "select a.a, a.b, a.c, b.c1, b.c2 from t1 as a, t2 as b", }, { // t3 is non-authoritative table sql: "select * from t3", expSQL: "select * from t3", @@ -141,39 +141,39 @@ func TestExpandStar(t *testing.T) { expSQL: "select * from t1, t2, t3", }, { // t3 is non-authoritative table sql: "select t1.*, t2.*, t3.* from t1, t2, t3", - expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2, t3.* from t1, t2, t3", + expSQL: "select t1.a, t1.b, t1.c, t2.c1, t2.c2, t3.* from t1, t2, t3", }, { sql: "select foo.* from t1, t2", expErr: "Unknown table 'foo'", }, { sql: "select * from t1 join t2 on t1.a = t2.c1", - expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 join t2 on t1.a = t2.c1", + expSQL: "select t1.a, t1.b, t1.c, t2.c1, t2.c2 from t1 join t2 on t1.a = t2.c1", }, { sql: "select * from t1 left join t2 on t1.a = t2.c1", - expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 left join t2 on t1.a = t2.c1", + expSQL: "select t1.a, t1.b, t1.c, t2.c1, t2.c2 from t1 left join t2 on t1.a = t2.c1", }, { sql: "select * from t1 right join t2 on t1.a = t2.c1", - expSQL: "select t1.a as a, t1.b as b, t1.c as c, t2.c1 as c1, t2.c2 as c2 from t1 right join t2 on t1.a = t2.c1", + expSQL: "select t1.a, t1.b, t1.c, t2.c1, t2.c2 from t1 right join t2 on t1.a = t2.c1", }, { sql: "select * from t2 join t4 using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4 from t2 join t4 on t2.c1 = t4.c1", + expSQL: "select t2.c1, t2.c2, t4.c4 from t2 join t4 on t2.c1 = t4.c1", expanded: "main.t2.c1, main.t2.c2, main.t4.c4", }, { sql: "select * from t2 join t4 using (c1) join t2 as X using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, X.c2 as c2 from t2 join t4 on t2.c1 = t4.c1 join t2 as X on t2.c1 = t4.c1 and t2.c1 = X.c1 and t4.c1 = X.c1", + expSQL: "select t2.c1, t2.c2, t4.c4, X.c2 from t2 join t4 on t2.c1 = t4.c1 join t2 as X on t2.c1 = t4.c1 and t2.c1 = X.c1 and t4.c1 = X.c1", }, { sql: "select * from t2 join t4 using (c1), t2 as t2b join t4 as t4b using (c1)", - expSQL: "select t2.c1 as c1, t2.c2 as c2, t4.c4 as c4, t2b.c1 as c1, t2b.c2 as c2, t4b.c4 as c4 from t2 join t4 on t2.c1 = t4.c1, t2 as t2b join t4 as t4b on t2b.c1 = t4b.c1", + expSQL: "select t2.c1, t2.c2, t4.c4, t2b.c1, t2b.c2, t4b.c4 from t2 join t4 on t2.c1 = t4.c1, t2 as t2b join t4 as t4b on t2b.c1 = t4b.c1", }, { sql: "select * from t1 join t5 using (b)", - expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 on t1.b = t5.b", + expSQL: "select t1.b, t1.a, t1.c, t5.a from t1 join t5 on t1.b = t5.b", expanded: "main.t1.a, main.t1.b, main.t1.c, main.t5.a", }, { sql: "select * from t1 join t5 using (b) having b = 12", - expSQL: "select t1.b as b, t1.a as a, t1.c as c, t5.a as a from t1 join t5 on t1.b = t5.b having b = 12", + expSQL: "select t1.b, t1.a, t1.c, t5.a from t1 join t5 on t1.b = t5.b having t1.b = 12", }, { - sql: "select 1 from t1 join t5 using (b) having b = 12", - expSQL: "select 1 from t1 join t5 on t1.b = t5.b having t1.b = 12", + sql: "select 1 from t1 join t5 using (b) where b = 12", + expSQL: "select 1 from t1 join t5 on t1.b = t5.b where t1.b = 12", }, { sql: "select * from (select 12) as t", expSQL: "select `12` from (select 12 from dual) as t", @@ -183,11 +183,11 @@ func TestExpandStar(t *testing.T) { }, { // if we are only star-expanding authoritative tables, we don't need to stop the expansion sql: "SELECT * FROM (SELECT t2.*, 12 AS foo FROM t3, t2) as results", - expSQL: "select c1, c2, foo from (select t2.c1 as c1, t2.c2 as c2, 12 as foo from t3, t2) as results", + expSQL: "select c1, c2, foo from (select t2.c1, t2.c2, 12 as foo from t3, t2) as results", }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -288,7 +288,7 @@ func TestRewriteJoinUsingColumns(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -304,33 +304,168 @@ func TestRewriteJoinUsingColumns(t *testing.T) { } -func TestOrderByGroupByLiteral(t *testing.T) { +func TestGroupByColumnName(t *testing.T) { schemaInfo := &FakeSI{ - Tables: map[string]*vindexes.Table{}, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("id"), + Type: sqltypes.Int32, + }, { + Name: sqlparser.NewIdentifierCI("col1"), + Type: sqltypes.Int32, + }}, + ColumnListAuthoritative: true, + }, + "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("id"), + Type: sqltypes.Int32, + }, { + Name: sqlparser.NewIdentifierCI("col2"), + Type: sqltypes.Int32, + }}, + ColumnListAuthoritative: true, + }, + }, } cDB := "db" tcases := []struct { - sql string - expSQL string - expErr string + sql string + expSQL string + expDeps TableSet + expErr string + warning string }{{ - sql: "select 1 as id from t1 order by 1", - expSQL: "select 1 as id from t1 order by id asc", + sql: "select t3.col from t3 group by kj", + expSQL: "select t3.col from t3 group by kj", + expDeps: TS0, + }, { + sql: "select t2.col2 as xyz from t2 group by xyz", + expSQL: "select t2.col2 as xyz from t2 group by t2.col2", + expDeps: TS0, }, { - sql: "select t1.col from t1 order by 1", - expSQL: "select t1.col from t1 order by t1.col asc", + sql: "select id from t1 group by unknown", + expErr: "Unknown column 'unknown' in 'group statement'", }, { - sql: "select t1.col from t1 group by 1", - expSQL: "select t1.col from t1 group by t1.col", + sql: "select t1.c as x, sum(t2.id) as x from t1 join t2 group by x", + expErr: "VT03005: cannot group on 'x'", }, { - sql: "select t1.col as xyz from t1 group by 1", - expSQL: "select t1.col as xyz from t1 group by xyz", + sql: "select t1.col1, sum(t2.id) as col1 from t1 join t2 group by col1", + expSQL: "select t1.col1, sum(t2.id) as col1 from t1 join t2 group by col1", + expDeps: TS0, + warning: "Column 'col1' in group statement is ambiguous", }, { - sql: "select t1.col as xyz, count(*) from t1 group by 1 order by 2", - expSQL: "select t1.col as xyz, count(*) from t1 group by xyz order by count(*) asc", + sql: "select t2.col2 as id, sum(t2.id) as x from t1 join t2 group by id", + expSQL: "select t2.col2 as id, sum(t2.id) as x from t1 join t2 group by t2.col2", + expDeps: TS1, + }, { + sql: "select sum(t2.col2) as id, sum(t2.id) as x from t1 join t2 group by id", + expErr: "VT03005: cannot group on 'id'", + }, { + sql: "select count(*) as x from t1 group by x", + expErr: "VT03005: cannot group on 'x'", + }} + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) + require.NoError(t, err) + selectStatement := ast.(*sqlparser.Select) + st, err := AnalyzeStrict(selectStatement, cDB, schemaInfo) + if tcase.expErr == "" { + require.NoError(t, err) + assert.Equal(t, tcase.expSQL, sqlparser.String(selectStatement)) + gb := selectStatement.GroupBy + deps := st.RecursiveDeps(gb.Exprs[0]) + assert.Equal(t, tcase.expDeps, deps) + assert.Equal(t, tcase.warning, st.Warning) + } else { + require.EqualError(t, err, tcase.expErr) + } + }) + } +} + +func TestGroupByLiteral(t *testing.T) { + schemaInfo := &FakeSI{ + Tables: map[string]*vindexes.Table{}, + } + cDB := "db" + tcases := []struct { + sql string + expSQL string + expDeps TableSet + expErr string + }{{ + sql: "select t1.col from t1 group by 1", + expSQL: "select t1.col from t1 group by t1.col", + expDeps: TS0, + }, { + sql: "select t1.col as xyz from t1 group by 1", + expSQL: "select t1.col as xyz from t1 group by t1.col", + expDeps: TS0, }, { sql: "select id from t1 group by 2", - expErr: "Unknown column '2' in 'group statement'", + expErr: "Unknown column '2' in 'group clause'", + }, { + sql: "select *, id from t1 group by 2", + expErr: "cannot use column offsets in group clause when using `*`", + }} + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) + require.NoError(t, err) + selectStatement := ast.(*sqlparser.Select) + st, err := Analyze(selectStatement, cDB, schemaInfo) + if tcase.expErr == "" { + require.NoError(t, err) + assert.Equal(t, tcase.expSQL, sqlparser.String(selectStatement)) + gb := selectStatement.GroupBy + deps := st.RecursiveDeps(gb.Exprs[0]) + assert.Equal(t, tcase.expDeps, deps) + } else { + require.EqualError(t, err, tcase.expErr) + } + }) + } +} + +func TestOrderByLiteral(t *testing.T) { + schemaInfo := &FakeSI{ + Tables: map[string]*vindexes.Table{}, + } + cDB := "db" + tcases := []struct { + sql string + expSQL string + expDeps TableSet + expErr string + }{{ + sql: "select 1 as id from t1 order by 1", + expSQL: "select 1 as id from t1 order by '' asc", + expDeps: NoTables, + }, { + sql: "select t1.col from t1 order by 1", + expSQL: "select t1.col from t1 order by t1.col asc", + expDeps: TS0, + }, { + sql: "select t1.col from t1 order by 1.0", + expSQL: "select t1.col from t1 order by 1.0 asc", + expDeps: NoTables, + }, { + sql: "select t1.col from t1 order by 'fubick'", + expSQL: "select t1.col from t1 order by 'fubick' asc", + expDeps: NoTables, + }, { + sql: "select t1.col as foo from t1 order by 1", + expSQL: "select t1.col as foo from t1 order by t1.col asc", + expDeps: TS0, + }, { + sql: "select t1.col as xyz, count(*) from t1 group by 1 order by 2", + expSQL: "select t1.col as xyz, count(*) from t1 group by t1.col order by count(*) asc", + expDeps: TS0, }, { sql: "select id from t1 order by 2", expErr: "Unknown column '2' in 'order clause'", @@ -338,21 +473,141 @@ func TestOrderByGroupByLiteral(t *testing.T) { sql: "select *, id from t1 order by 2", expErr: "cannot use column offsets in order clause when using `*`", }, { - sql: "select *, id from t1 group by 2", - expErr: "cannot use column offsets in group statement when using `*`", + sql: "select id from t1 order by 1 collate utf8_general_ci", + expSQL: "select id from t1 order by id collate utf8_general_ci asc", + expDeps: TS0, + }, { + sql: "select id from `user` union select 1 from dual order by 1", + expSQL: "select id from `user` union select 1 from dual order by id asc", + expDeps: TS0, + }, { + sql: "select id from t1 order by 2", + expErr: "Unknown column '2' in 'order clause'", + }, { + sql: "select a.id, b.id from user as a, user_extra as b union select 1, 2 order by 1", + expSQL: "select a.id, b.id from `user` as a, user_extra as b union select 1, 2 from dual order by id asc", + expDeps: TS0, + }, { + sql: "select a.id, b.id from user as a, user_extra as b union select 1, 2 order by 2", + expSQL: "select a.id, b.id from `user` as a, user_extra as b union select 1, 2 from dual order by id asc", + expDeps: TS1, }, { - sql: "select id from t1 order by 1 collate utf8_general_ci", - expSQL: "select id from t1 order by id collate utf8_general_ci asc", + sql: "select user.id as foo from user union select col from user_extra order by 1", + expSQL: "select `user`.id as foo from `user` union select col from user_extra order by foo asc", + expDeps: MergeTableSets(TS0, TS1), }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) + require.NoError(t, err) + selectStatement := ast.(sqlparser.SelectStatement) + st, err := Analyze(selectStatement, cDB, schemaInfo) + if tcase.expErr == "" { + require.NoError(t, err) + assert.Equal(t, tcase.expSQL, sqlparser.String(selectStatement)) + ordering := selectStatement.GetOrderBy() + deps := st.RecursiveDeps(ordering[0].Expr) + assert.Equal(t, tcase.expDeps, deps) + } else { + require.EqualError(t, err, tcase.expErr) + } + }) + } +} + +func TestHavingColumnName(t *testing.T) { + schemaInfo := getSchemaWithKnownColumns() + cDB := "db" + tcases := []struct { + sql string + expSQL string + expDeps TableSet + expErr string + warning string + }{{ + sql: "select id, sum(foo) as sumOfFoo from t1 having sumOfFoo > 1", + expSQL: "select id, sum(foo) as sumOfFoo from t1 having sum(t1.foo) > 1", + expDeps: TS0, + }, { + sql: "select id as X, sum(foo) as X from t1 having X > 1", + expErr: "Column 'X' in field list is ambiguous", + }, { + sql: "select id, sum(t1.foo) as foo from t1 having sum(foo) > 1", + expSQL: "select id, sum(t1.foo) as foo from t1 having sum(foo) > 1", + expDeps: TS0, + warning: "Column 'foo' in having clause is ambiguous", + }, { + sql: "select id, sum(t1.foo) as foo from t1 having custom_udf(foo) > 1", + expSQL: "select id, sum(t1.foo) as foo from t1 having custom_udf(foo) > 1", + expDeps: TS0, + warning: "Column 'foo' in having clause is ambiguous", + }, { + sql: "select id, custom_udf(t1.foo) as foo from t1 having foo > 1", + expSQL: "select id, custom_udf(t1.foo) as foo from t1 having custom_udf(t1.foo) > 1", + expDeps: TS0, + }, { + sql: "select id, sum(t1.foo) as XYZ from t1 having sum(XYZ) > 1", + expErr: "Invalid use of group function", + }, { + sql: "select foo + 2 as foo from t1 having foo = 42", + expSQL: "select foo + 2 as foo from t1 having t1.foo + 2 = 42", + expDeps: TS0, + }, { + sql: "select count(*), ename from emp group by ename having comm > 1000", + expErr: "Unknown column 'comm' in 'having clause'", + }, { + sql: "select sal, ename from emp having empno > 1000", + expSQL: "select sal, ename from emp having empno > 1000", + expDeps: TS0, + }, { + sql: "select foo, count(*) foo from t1 group by foo having foo > 1000", + expErr: "Column 'foo' in field list is ambiguous", + }, { + sql: "select foo, count(*) foo from t1, emp group by foo having sum(sal) > 1000", + expSQL: "select foo, count(*) as foo from t1, emp group by foo having sum(sal) > 1000", + expDeps: TS1, + warning: "Column 'foo' in group statement is ambiguous", + }, { + sql: "select foo as X, sal as foo from t1, emp having sum(X) > 1000", + expSQL: "select foo as X, sal as foo from t1, emp having sum(t1.foo) > 1000", + expDeps: TS0, + }, { + sql: "select count(*) a from someTable having a = 10", + expSQL: "select count(*) as a from someTable having count(*) = 10", + expDeps: TS0, + }, { + sql: "select count(*) from emp having ename = 10", + expSQL: "select count(*) from emp having ename = 10", + expDeps: TS0, + }, { + sql: "select sum(sal) empno from emp where ename > 0 having empno = 2", + expSQL: "select sum(sal) as empno from emp where ename > 0 having sum(emp.sal) = 2", + expDeps: TS0, + }, { + // test with missing schema info + sql: "select foo, count(bar) as x from someTable group by foo having id > avg(baz)", + expErr: "Unknown column 'id' in 'having clause'", + }, { + sql: "select t1.foo as alias, count(bar) as x from t1 group by foo having foo+54 = 56", + expSQL: "select t1.foo as alias, count(bar) as x from t1 group by foo having foo + 54 = 56", + expDeps: TS0, + }, { + sql: "select 1 from t1 group by foo having foo = 1 and count(*) > 1", + expSQL: "select 1 from t1 group by foo having foo = 1 and count(*) > 1", + expDeps: TS0, + }} + + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement := ast.(*sqlparser.Select) - _, err = Analyze(selectStatement, cDB, schemaInfo) + semTbl, err := AnalyzeStrict(selectStatement, cDB, schemaInfo) if tcase.expErr == "" { require.NoError(t, err) assert.Equal(t, tcase.expSQL, sqlparser.String(selectStatement)) + assert.Equal(t, tcase.expDeps, semTbl.RecursiveDeps(selectStatement.Having.Expr)) + assert.Equal(t, tcase.warning, semTbl.Warning, "warning") } else { require.EqualError(t, err, tcase.expErr) } @@ -360,34 +615,133 @@ func TestOrderByGroupByLiteral(t *testing.T) { } } -func TestHavingAndOrderByColumnName(t *testing.T) { +func getSchemaWithKnownColumns() *FakeSI { schemaInfo := &FakeSI{ - Tables: map[string]*vindexes.Table{}, + Tables: map[string]*vindexes.Table{ + "t1": { + Keyspace: &vindexes.Keyspace{Name: "ks", Sharded: true}, + Name: sqlparser.NewIdentifierCS("t1"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("id"), + Type: sqltypes.VarChar, + }, { + Name: sqlparser.NewIdentifierCI("foo"), + Type: sqltypes.VarChar, + }, { + Name: sqlparser.NewIdentifierCI("bar"), + Type: sqltypes.VarChar, + }}, + ColumnListAuthoritative: true, + }, + "emp": { + Keyspace: &vindexes.Keyspace{Name: "ks", Sharded: true}, + Name: sqlparser.NewIdentifierCS("emp"), + Columns: []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("empno"), + Type: sqltypes.Int64, + }, { + Name: sqlparser.NewIdentifierCI("ename"), + Type: sqltypes.VarChar, + }, { + Name: sqlparser.NewIdentifierCI("sal"), + Type: sqltypes.Int64, + }}, + ColumnListAuthoritative: true, + }, + }, + UDFs: []string{"custom_udf"}, } + return schemaInfo +} + +func TestOrderByColumnName(t *testing.T) { + schemaInfo := getSchemaWithKnownColumns() cDB := "db" tcases := []struct { - sql string - expSQL string - expErr string + sql string + expSQL string + expErr string + warning string + deps TableSet }{{ - sql: "select id, sum(foo) as sumOfFoo from t1 having sumOfFoo > 1", - expSQL: "select id, sum(foo) as sumOfFoo from t1 having sum(foo) > 1", - }, { sql: "select id, sum(foo) as sumOfFoo from t1 order by sumOfFoo", - expSQL: "select id, sum(foo) as sumOfFoo from t1 order by sum(foo) asc", - }, { - sql: "select id, sum(foo) as foo from t1 having sum(foo) > 1", - expSQL: "select id, sum(foo) as foo from t1 having sum(foo) > 1", + expSQL: "select id, sum(foo) as sumOfFoo from t1 order by sum(t1.foo) asc", + deps: TS0, + }, { + sql: "select id, sum(foo) as sumOfFoo from t1 order by sumOfFoo + 1", + expSQL: "select id, sum(foo) as sumOfFoo from t1 order by sum(t1.foo) + 1 asc", + deps: TS0, + }, { + sql: "select id, sum(foo) as sumOfFoo from t1 order by abs(sumOfFoo)", + expSQL: "select id, sum(foo) as sumOfFoo from t1 order by abs(sum(t1.foo)) asc", + deps: TS0, + }, { + sql: "select id, sum(foo) as sumOfFoo from t1 order by max(sumOfFoo)", + expErr: "Invalid use of group function", + }, { + sql: "select id, sum(foo) as foo from t1 order by foo + 1", + expSQL: "select id, sum(foo) as foo from t1 order by foo + 1 asc", + deps: TS0, + warning: "Column 'foo' in order by statement is ambiguous", + }, { + sql: "select id, sum(foo) as foo from t1 order by foo", + expSQL: "select id, sum(foo) as foo from t1 order by sum(t1.foo) asc", + deps: TS0, + warning: "Column 'foo' in order by statement is ambiguous", + }, { + sql: "select id, lower(min(foo)) as foo from t1 order by min(foo)", + expSQL: "select id, lower(min(foo)) as foo from t1 order by min(foo) asc", + deps: TS0, + warning: "Column 'foo' in order by statement is ambiguous", + }, { + sql: "select id, lower(min(foo)) as foo from t1 order by foo", + expSQL: "select id, lower(min(foo)) as foo from t1 order by lower(min(t1.foo)) asc", + deps: TS0, + warning: "Column 'foo' in order by statement is ambiguous", + }, { + sql: "select id, lower(min(foo)) as foo from t1 order by abs(foo)", + expSQL: "select id, lower(min(foo)) as foo from t1 order by abs(foo) asc", + deps: TS0, + warning: "Column 'foo' in order by statement is ambiguous", + }, { + sql: "select id, t1.bar as foo from t1 group by id order by min(foo)", + expSQL: "select id, t1.bar as foo from t1 group by id order by min(foo) asc", + deps: TS0, + warning: "Column 'foo' in order by statement is ambiguous", + }, { + sql: "select id, bar as id, count(*) from t1 order by id", + expErr: "Column 'id' in field list is ambiguous", + }, { + sql: "select id, id, count(*) from t1 order by id", + expSQL: "select id, id, count(*) from t1 order by t1.id asc", + deps: TS0, + warning: "Column 'id' in order by statement is ambiguous", + }, { + sql: "select id, count(distinct foo) k from t1 group by id order by k", + expSQL: "select id, count(distinct foo) as k from t1 group by id order by count(distinct t1.foo) asc", + deps: TS0, + warning: "Column 'id' in group statement is ambiguous", + }, { + sql: "select user.id as foo from user union select col from user_extra order by foo", + expSQL: "select `user`.id as foo from `user` union select col from user_extra order by foo asc", + deps: MergeTableSets(TS0, TS1), + }, { + sql: "select foo as X, sal as foo from t1, emp order by sum(X)", + expSQL: "select foo as X, sal as foo from t1, emp order by sum(t1.foo) asc", + deps: TS0, }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) - selectStatement := ast.(*sqlparser.Select) - _, err = Analyze(selectStatement, cDB, schemaInfo) + selectStatement := ast.(sqlparser.SelectStatement) + semTable, err := AnalyzeStrict(selectStatement, cDB, schemaInfo) if tcase.expErr == "" { require.NoError(t, err) assert.Equal(t, tcase.expSQL, sqlparser.String(selectStatement)) + orderByExpr := selectStatement.GetOrderBy()[0].Expr + assert.Equal(t, tcase.deps, semTable.RecursiveDeps(orderByExpr)) + assert.Equal(t, tcase.warning, semTable.Warning) } else { require.EqualError(t, err, tcase.expErr) } @@ -417,16 +771,16 @@ func TestSemTableDependenciesAfterExpandStar(t *testing.T) { otherTbl: -1, sameTbl: 0, expandedCol: 1, }, { sql: "select t2.a, t1.a, t1.* from t1, t2", - expSQL: "select t2.a, t1.a, t1.a as a from t1, t2", + expSQL: "select t2.a, t1.a, t1.a from t1, t2", otherTbl: 0, sameTbl: 1, expandedCol: 2, }, { sql: "select t2.a, t.a, t.* from t1 t, t2", - expSQL: "select t2.a, t.a, t.a as a from t1 as t, t2", + expSQL: "select t2.a, t.a, t.a from t1 as t, t2", otherTbl: 0, sameTbl: 1, expandedCol: 2, }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -452,7 +806,7 @@ func TestSemTableDependenciesAfterExpandStar(t *testing.T) { func TestRewriteNot(t *testing.T) { ks := &vindexes.Keyspace{ Name: "main", - Sharded: false, + Sharded: true, } schemaInfo := &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -486,7 +840,7 @@ func TestRewriteNot(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -504,7 +858,7 @@ func TestRewriteNot(t *testing.T) { func TestConstantFolding(t *testing.T) { ks := &vindexes.Keyspace{ Name: "main", - Sharded: false, + Sharded: true, } schemaInfo := &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -538,7 +892,7 @@ func TestConstantFolding(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) _, err = Analyze(ast, cDB, schemaInfo) require.NoError(t, err) @@ -565,7 +919,7 @@ func TestCTEToDerivedTableRewrite(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) _, err = Analyze(ast, cDB, fakeSchemaInfo()) require.NoError(t, err) @@ -573,3 +927,33 @@ func TestCTEToDerivedTableRewrite(t *testing.T) { }) } } + +// TestDeleteTargetTableRewrite checks that delete target rewrite is done correctly. +func TestDeleteTargetTableRewrite(t *testing.T) { + cDB := "db" + tcases := []struct { + sql string + target string + }{{ + sql: "delete from t1", + target: "t1", + }, { + sql: "delete from t1 XYZ", + target: "XYZ", + }, { + sql: "delete t2 from t1 t1, t t2", + target: "t2", + }, { + sql: "delete t2,t1 from t t1, t t2", + target: "t2, t1", + }} + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) + require.NoError(t, err) + _, err = Analyze(ast, cDB, fakeSchemaInfo()) + require.NoError(t, err) + require.Equal(t, tcase.target, sqlparser.String(ast.(*sqlparser.Delete).Targets)) + }) + } +} diff --git a/go/vt/vtgate/semantics/errors.go b/go/vt/vtgate/semantics/errors.go index 8d0b23d7f82..3a66a7adb24 100644 --- a/go/vt/vtgate/semantics/errors.go +++ b/go/vt/vtgate/semantics/errors.go @@ -51,6 +51,14 @@ type ( AmbiguousColumnError struct{ Column string } SubqueryColumnCountError struct{ Expected int } ColumnsMissingInSchemaError struct{} + CantUseMultipleVindexHints struct{ Table string } + InvalidUseOfGroupFunction struct{} + CantGroupOn struct{ Column string } + + NoSuchVindexFound struct { + Table string + VindexName string + } UnsupportedMultiTablesInUpdateError struct { ExprCount int @@ -64,6 +72,10 @@ type ( Column *sqlparser.ColName Table *sqlparser.TableName } + ColumnNotFoundClauseError struct { + Column string + Clause string + } ) func eprintf(e error, format string, args ...any) string { @@ -76,6 +88,10 @@ func eprintf(e error, format string, args ...any) string { return fmt.Sprintf(format, args...) } +func newAmbiguousColumnError(name *sqlparser.ColName) error { + return &AmbiguousColumnError{Column: sqlparser.String(name)} +} + // Specific error implementations follow // UnionColumnsDoNotMatchError @@ -207,18 +223,18 @@ func (e *BuggyError) Error() string { func (e *BuggyError) bug() {} // ColumnNotFoundError -func (e *ColumnNotFoundError) Error() string { +func (e ColumnNotFoundError) Error() string { if e.Table == nil { return eprintf(e, "column '%s' not found", sqlparser.String(e.Column)) } return eprintf(e, "column '%s' not found in table '%s'", sqlparser.String(e.Column), sqlparser.String(e.Table)) } -func (e *ColumnNotFoundError) ErrorCode() vtrpcpb.Code { +func (e ColumnNotFoundError) ErrorCode() vtrpcpb.Code { return vtrpcpb.Code_INVALID_ARGUMENT } -func (e *ColumnNotFoundError) ErrorState() vterrors.State { +func (e ColumnNotFoundError) ErrorState() vterrors.State { return vterrors.BadFieldError } @@ -235,6 +251,7 @@ func (e *AmbiguousColumnError) ErrorCode() vtrpcpb.Code { return vtrpcpb.Code_INVALID_ARGUMENT } +// UnsupportedConstruct func (e *UnsupportedConstruct) unsupported() {} func (e *UnsupportedConstruct) ErrorCode() vtrpcpb.Code { @@ -245,6 +262,7 @@ func (e *UnsupportedConstruct) Error() string { return eprintf(e, e.errString) } +// SubqueryColumnCountError func (e *SubqueryColumnCountError) ErrorCode() vtrpcpb.Code { return vtrpcpb.Code_INVALID_ARGUMENT } @@ -253,7 +271,7 @@ func (e *SubqueryColumnCountError) Error() string { return fmt.Sprintf("Operand should contain %d column(s)", e.Expected) } -// MissingInVSchemaError +// ColumnsMissingInSchemaError func (e *ColumnsMissingInSchemaError) Error() string { return "VT09015: schema tracking required" } @@ -261,3 +279,60 @@ func (e *ColumnsMissingInSchemaError) Error() string { func (e *ColumnsMissingInSchemaError) ErrorCode() vtrpcpb.Code { return vtrpcpb.Code_INVALID_ARGUMENT } + +// CantUseMultipleVindexHints +func (c *CantUseMultipleVindexHints) Error() string { + return vterrors.VT09020(c.Table).Error() +} + +func (c *CantUseMultipleVindexHints) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_FAILED_PRECONDITION +} + +// NoSuchVindexFound +func (c *NoSuchVindexFound) Error() string { + return vterrors.VT09021(c.VindexName, c.Table).Error() +} + +func (c *NoSuchVindexFound) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_FAILED_PRECONDITION +} + +// InvalidUseOfGroupFunction +func (*InvalidUseOfGroupFunction) Error() string { + return "Invalid use of group function" +} + +func (*InvalidUseOfGroupFunction) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +func (*InvalidUseOfGroupFunction) ErrorState() vterrors.State { + return vterrors.InvalidGroupFuncUse +} + +// CantGroupOn +func (e *CantGroupOn) Error() string { + return vterrors.VT03005(e.Column).Error() +} + +func (*CantGroupOn) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +func (e *CantGroupOn) ErrorState() vterrors.State { + return vterrors.VT03005(e.Column).State +} + +// ColumnNotFoundInGroupByError +func (e *ColumnNotFoundClauseError) Error() string { + return fmt.Sprintf("Unknown column '%s' in '%s'", e.Column, e.Clause) +} + +func (*ColumnNotFoundClauseError) ErrorCode() vtrpcpb.Code { + return vtrpcpb.Code_INVALID_ARGUMENT +} + +func (e *ColumnNotFoundClauseError) ErrorState() vterrors.State { + return vterrors.BadFieldError +} diff --git a/go/vt/vtgate/semantics/foreign_keys.go b/go/vt/vtgate/semantics/foreign_keys.go new file mode 100644 index 00000000000..4da2f5a232f --- /dev/null +++ b/go/vt/vtgate/semantics/foreign_keys.go @@ -0,0 +1,207 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +type fkManager struct { + binder *binder + tables *tableCollector + si SchemaInformation + getError func() error +} + +// getInvolvedForeignKeys gets the foreign keys that might require taking care off when executing the given statement. +func (fk *fkManager) getInvolvedForeignKeys(statement sqlparser.Statement, fkChecksState *bool) (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo, map[string]sqlparser.UpdateExprs, error) { + if fkChecksState != nil && !*fkChecksState { + return nil, nil, nil, nil + } + // There are only the DML statements that require any foreign keys handling. + switch stmt := statement.(type) { + case *sqlparser.Delete: + // For DELETE statements, none of the parent foreign keys require handling. + // So we collect all the child foreign keys. + allChildFks, _, err := fk.getAllManagedForeignKeys() + return allChildFks, nil, nil, err + case *sqlparser.Insert: + // For INSERT statements, we have 3 different cases: + // 1. REPLACE statement: REPLACE statements are essentially DELETEs and INSERTs rolled into one. + // So we need to the parent foreign keys to ensure we are inserting the correct values, and the child foreign keys + // to ensure we don't change a row that breaks the constraint or cascade any operations on the child tables. + // 2. Normal INSERT statement: We don't need to check anything on the child foreign keys, so we just get all the parent foreign keys. + // 3. INSERT with ON DUPLICATE KEY UPDATE: This might trigger an update on the columns specified in the ON DUPLICATE KEY UPDATE clause. + allChildFks, allParentFKs, err := fk.getAllManagedForeignKeys() + if err != nil { + return nil, nil, nil, err + } + if stmt.Action == sqlparser.ReplaceAct { + return allChildFks, allParentFKs, nil, nil + } + if len(stmt.OnDup) == 0 { + return nil, allParentFKs, nil, nil + } + // If only a certain set of columns are being updated, then there might be some child foreign keys that don't need any consideration since their columns aren't being updated. + // So, we filter these child foreign keys out. We can't filter any parent foreign keys because the statement will INSERT a row too, which requires validating all the parent foreign keys. + updatedChildFks, _, childFkToUpdExprs, err := fk.filterForeignKeysUsingUpdateExpressions(allChildFks, nil, sqlparser.UpdateExprs(stmt.OnDup)) + return updatedChildFks, allParentFKs, childFkToUpdExprs, err + case *sqlparser.Update: + // For UPDATE queries we get all the parent and child foreign keys, but we can filter some of them out if the columns that they consist off aren't being updated or are set to NULLs. + allChildFks, allParentFks, err := fk.getAllManagedForeignKeys() + if err != nil { + return nil, nil, nil, err + } + return fk.filterForeignKeysUsingUpdateExpressions(allChildFks, allParentFks, stmt.Exprs) + default: + return nil, nil, nil, nil + } +} + +// filterForeignKeysUsingUpdateExpressions filters the child and parent foreign key constraints that don't require any validations/cascades given the updated expressions. +func (fk *fkManager) filterForeignKeysUsingUpdateExpressions(allChildFks map[TableSet][]vindexes.ChildFKInfo, allParentFks map[TableSet][]vindexes.ParentFKInfo, updExprs sqlparser.UpdateExprs) (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo, map[string]sqlparser.UpdateExprs, error) { + if len(allChildFks) == 0 && len(allParentFks) == 0 { + return nil, nil, nil, nil + } + + pFksRequired := make(map[TableSet][]bool, len(allParentFks)) + cFksRequired := make(map[TableSet][]bool, len(allChildFks)) + for ts, fks := range allParentFks { + pFksRequired[ts] = make([]bool, len(fks)) + } + for ts, fks := range allChildFks { + cFksRequired[ts] = make([]bool, len(fks)) + } + + // updExprToTableSet stores the tables that the updated expressions are from. + updExprToTableSet := make(map[*sqlparser.ColName]TableSet) + + // childFKToUpdExprs stores child foreign key to update expressions mapping. + childFKToUpdExprs := map[string]sqlparser.UpdateExprs{} + + // Go over all the update expressions + for _, updateExpr := range updExprs { + deps := fk.binder.direct.dependencies(updateExpr.Name) + if deps.NumberOfTables() != 1 { + // If we don't get exactly one table for the given update expression, we would have definitely run into an error + // during the binder phase that we would have stored. We should return that error, since we can't safely proceed with + // foreign key related changes without having all the information. + return nil, nil, nil, fk.getError() + } + updExprToTableSet[updateExpr.Name] = deps + // Get all the child and parent foreign keys for the given table that the update expression belongs to. + childFks := allChildFks[deps] + parentFKs := allParentFks[deps] + + // Any foreign key to a child table for a column that has been updated + // will require the cascade operations or restrict verification to happen, so we include all such foreign keys. + for idx, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + cFksRequired[deps][idx] = true + tbl, _ := fk.tables.tableInfoFor(deps) + ue := childFKToUpdExprs[childFk.String(tbl.GetVindexTable())] + ue = append(ue, updateExpr) + childFKToUpdExprs[childFk.String(tbl.GetVindexTable())] = ue + } + } + // If we are setting a column to NULL, then we don't need to verify the existence of an + // equivalent row in the parent table, even if this column was part of a foreign key to a parent table. + if sqlparser.IsNull(updateExpr.Expr) { + continue + } + // We add all the possible parent foreign key constraints that need verification that an equivalent row + // exists, given that this column has changed. + for idx, parentFk := range parentFKs { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + pFksRequired[deps][idx] = true + } + } + } + // For the parent foreign keys, if any of the columns part of the fk is set to NULL, + // then, we don't care for the existence of an equivalent row in the parent table. + for _, updateExpr := range updExprs { + if !sqlparser.IsNull(updateExpr.Expr) { + continue + } + ts := updExprToTableSet[updateExpr.Name] + parentFKs := allParentFks[ts] + for idx, parentFk := range parentFKs { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + pFksRequired[ts][idx] = false + } + } + } + + // Create new maps with only the required foreign keys. + pFksNeedsHandling := map[TableSet][]vindexes.ParentFKInfo{} + cFksNeedsHandling := map[TableSet][]vindexes.ChildFKInfo{} + for ts, parentFks := range allParentFks { + var pFKNeeded []vindexes.ParentFKInfo + for idx, fk := range parentFks { + if pFksRequired[ts][idx] { + pFKNeeded = append(pFKNeeded, fk) + } + } + pFksNeedsHandling[ts] = pFKNeeded + } + for ts, childFks := range allChildFks { + var cFKNeeded []vindexes.ChildFKInfo + for idx, fk := range childFks { + if cFksRequired[ts][idx] { + cFKNeeded = append(cFKNeeded, fk) + } + } + cFksNeedsHandling[ts] = cFKNeeded + } + return cFksNeedsHandling, pFksNeedsHandling, childFKToUpdExprs, nil +} + +// getAllManagedForeignKeys gets all the foreign keys for the query we are analyzing that Vitess is responsible for managing. +func (fk *fkManager) getAllManagedForeignKeys() (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo, error) { + allChildFKs := make(map[TableSet][]vindexes.ChildFKInfo) + allParentFKs := make(map[TableSet][]vindexes.ParentFKInfo) + + // Go over all the tables and collect the foreign keys. + for idx, table := range fk.tables.Tables { + vi := table.GetVindexTable() + if vi == nil || vi.Keyspace == nil { + // If is not a real table, so should be skipped. + continue + } + // Check whether Vitess needs to manage the foreign keys in this keyspace or not. + fkMode, err := fk.si.ForeignKeyMode(vi.Keyspace.Name) + if err != nil { + return nil, nil, err + } + if fkMode != vschemapb.Keyspace_managed { + continue + } + // Cyclic foreign key constraints error is stored in the keyspace. + ksErr := fk.si.KeyspaceError(vi.Keyspace.Name) + if ksErr != nil { + return nil, nil, ksErr + } + + // Add all the child and parent foreign keys to our map. + ts := SingleTableSet(idx) + allChildFKs[ts] = vi.ChildForeignKeys + allParentFKs[ts] = vi.ParentForeignKeys + } + return allChildFKs, allParentFKs, nil +} diff --git a/go/vt/vtgate/semantics/foreign_keys_test.go b/go/vt/vtgate/semantics/foreign_keys_test.go new file mode 100644 index 00000000000..e1c26ecf569 --- /dev/null +++ b/go/vt/vtgate/semantics/foreign_keys_test.go @@ -0,0 +1,602 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package semantics + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" +) + +var parentTbl = &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("parentt"), + Keyspace: &vindexes.Keyspace{ + Name: "ks", + }, +} + +var tbl = map[string]TableInfo{ + "t0": &RealTable{ + Table: &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t0"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + ChildForeignKeys: []vindexes.ChildFKInfo{ + ckInfo(parentTbl, []string{"col"}, []string{"col"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), + }, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(parentTbl, []string{"colb"}, []string{"colb"}), + pkInfo(parentTbl, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), + }, + }, + }, + "t1": &RealTable{ + Table: &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: "ks_unmanaged", Sharded: true}, + ChildForeignKeys: []vindexes.ChildFKInfo{ + ckInfo(parentTbl, []string{"cola"}, []string{"cola"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"cola1", "cola2"}, []string{"ccola1", "ccola2"}, sqlparser.SetNull), + }, + }, + }, + "t2": &RealTable{ + Table: &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + }, + }, + "t3": &RealTable{ + Table: &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t3"), + Keyspace: &vindexes.Keyspace{Name: "undefined_ks", Sharded: true}, + }, + }, + "t4": &RealTable{ + Table: &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t4"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + ChildForeignKeys: []vindexes.ChildFKInfo{ + ckInfo(parentTbl, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), + ckInfo(parentTbl, []string{"colx", "coly"}, []string{"child_colx", "child_coly"}, sqlparser.Cascade), + ckInfo(parentTbl, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), + }, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(parentTbl, []string{"pcola", "pcolx"}, []string{"cola", "colx"}), + pkInfo(parentTbl, []string{"pcolc"}, []string{"colc"}), + pkInfo(parentTbl, []string{"pcolb", "pcola"}, []string{"colb", "cola"}), + pkInfo(parentTbl, []string{"pcolb"}, []string{"colb"}), + pkInfo(parentTbl, []string{"pcola"}, []string{"cola"}), + pkInfo(parentTbl, []string{"pcolb", "pcolx"}, []string{"colb", "colx"}), + }, + }, + }, + "t5": &RealTable{ + Table: &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t5"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + ChildForeignKeys: []vindexes.ChildFKInfo{ + ckInfo(parentTbl, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"colc", "colx"}, []string{"child_colc", "child_colx"}, sqlparser.SetNull), + ckInfo(parentTbl, []string{"colx", "coly"}, []string{"child_colx", "child_coly"}, sqlparser.Cascade), + }, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(parentTbl, []string{"pcolc", "pcolx"}, []string{"colc", "colx"}), + pkInfo(parentTbl, []string{"pcola"}, []string{"cola"}), + pkInfo(parentTbl, []string{"pcold", "pcolc"}, []string{"cold", "colc"}), + pkInfo(parentTbl, []string{"pcold"}, []string{"cold"}), + pkInfo(parentTbl, []string{"pcold", "pcolx"}, []string{"cold", "colx"}), + }, + }, + }, + "t6": &RealTable{ + Table: &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t6"), + Keyspace: &vindexes.Keyspace{Name: "ks"}, + ChildForeignKeys: []vindexes.ChildFKInfo{ + ckInfo(parentTbl, []string{"col"}, []string{"col"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), + ckInfo(parentTbl, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), + ckInfo(parentTbl, []string{"colx", "coly"}, []string{"child_colx", "child_coly"}, sqlparser.Cascade), + ckInfo(parentTbl, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), + }, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(parentTbl, []string{"colb"}, []string{"colb"}), + pkInfo(parentTbl, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), + }, + }, + }, +} + +// TestGetAllManagedForeignKeys tests the functionality of getAllManagedForeignKeys. +func TestGetAllManagedForeignKeys(t *testing.T) { + tests := []struct { + name string + fkManager *fkManager + childFkWanted map[TableSet][]vindexes.ChildFKInfo + parentFkWanted map[TableSet][]vindexes.ParentFKInfo + expectedErr string + }{ + { + name: "Collect all foreign key constraints", + fkManager: &fkManager{ + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t0"], + tbl["t1"], + &DerivedTable{}, + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + "ks_unmanaged": vschemapb.Keyspace_unmanaged, + }, + }, + }, + childFkWanted: map[TableSet][]vindexes.ChildFKInfo{ + SingleTableSet(0): { + ckInfo(parentTbl, []string{"col"}, []string{"col"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), + }, + }, + parentFkWanted: map[TableSet][]vindexes.ParentFKInfo{ + SingleTableSet(0): { + pkInfo(parentTbl, []string{"colb"}, []string{"colb"}), + pkInfo(parentTbl, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), + }, + }, + }, + { + name: "keyspace not found in schema information", + fkManager: &fkManager{ + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t2"], + tbl["t3"], + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + }, + }, + }, + expectedErr: "undefined_ks keyspace not found", + }, + { + name: "Cyclic fk constraints error", + fkManager: &fkManager{ + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t0"], tbl["t1"], + &DerivedTable{}, + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + "ks_unmanaged": vschemapb.Keyspace_unmanaged, + }, + KsError: map[string]error{ + "ks": fmt.Errorf("VT09019: keyspace 'ks' has cyclic foreign keys"), + }, + }, + }, + expectedErr: "VT09019: keyspace 'ks' has cyclic foreign keys", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + childFk, parentFk, err := tt.fkManager.getAllManagedForeignKeys() + if tt.expectedErr != "" { + require.EqualError(t, err, tt.expectedErr) + return + } + require.EqualValues(t, tt.childFkWanted, childFk) + require.EqualValues(t, tt.parentFkWanted, parentFk) + }) + } +} + +// TestFilterForeignKeysUsingUpdateExpressions tests the functionality of filterForeignKeysUsingUpdateExpressions. +func TestFilterForeignKeysUsingUpdateExpressions(t *testing.T) { + cola := sqlparser.NewColName("cola") + colb := sqlparser.NewColName("colb") + colc := sqlparser.NewColName("colc") + cold := sqlparser.NewColName("cold") + a := &fkManager{ + binder: &binder{ + direct: map[sqlparser.Expr]TableSet{ + cola: SingleTableSet(0), + colb: SingleTableSet(0), + colc: SingleTableSet(1), + cold: SingleTableSet(1), + }, + }, + getError: func() error { return fmt.Errorf("ambiguous test error") }, + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t4"], + tbl["t5"], + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + }, + }, + }, + } + updateExprs := sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: cola, Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: colb, Expr: &sqlparser.NullVal{}}, + &sqlparser.UpdateExpr{Name: colc, Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: cold, Expr: &sqlparser.NullVal{}}, + } + tests := []struct { + name string + fkManager *fkManager + allChildFks map[TableSet][]vindexes.ChildFKInfo + allParentFks map[TableSet][]vindexes.ParentFKInfo + updExprs sqlparser.UpdateExprs + childFksWanted map[TableSet][]vindexes.ChildFKInfo + parentFksWanted map[TableSet][]vindexes.ParentFKInfo + errWanted string + }{ + { + name: "Child Foreign Keys Filtering", + fkManager: a, + allParentFks: nil, + allChildFks: map[TableSet][]vindexes.ChildFKInfo{ + SingleTableSet(0): tbl["t4"].(*RealTable).Table.ChildForeignKeys, + SingleTableSet(1): tbl["t5"].(*RealTable).Table.ChildForeignKeys, + }, + updExprs: updateExprs, + childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ + SingleTableSet(0): { + ckInfo(parentTbl, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), + }, + SingleTableSet(1): { + ckInfo(parentTbl, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"colc", "colx"}, []string{"child_colc", "child_colx"}, sqlparser.SetNull), + }, + }, + parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{}, + }, { + name: "Parent Foreign Keys Filtering", + fkManager: a, + allParentFks: map[TableSet][]vindexes.ParentFKInfo{ + SingleTableSet(0): tbl["t4"].(*RealTable).Table.ParentForeignKeys, + SingleTableSet(1): tbl["t5"].(*RealTable).Table.ParentForeignKeys, + }, + allChildFks: nil, + updExprs: updateExprs, + childFksWanted: map[TableSet][]vindexes.ChildFKInfo{}, + parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ + SingleTableSet(0): { + pkInfo(parentTbl, []string{"pcola", "pcolx"}, []string{"cola", "colx"}), + pkInfo(parentTbl, []string{"pcola"}, []string{"cola"}), + }, + SingleTableSet(1): { + pkInfo(parentTbl, []string{"pcolc", "pcolx"}, []string{"colc", "colx"}), + }, + }, + }, { + name: "Unknown column", + fkManager: a, + allParentFks: map[TableSet][]vindexes.ParentFKInfo{ + SingleTableSet(0): tbl["t4"].(*RealTable).Table.ParentForeignKeys, + SingleTableSet(1): tbl["t5"].(*RealTable).Table.ParentForeignKeys, + }, + allChildFks: nil, + updExprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: sqlparser.NewColName("unknownCol"), Expr: sqlparser.NewIntLiteral("1")}, + }, + errWanted: "ambiguous test error", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + childFks, parentFks, _, err := tt.fkManager.filterForeignKeysUsingUpdateExpressions(tt.allChildFks, tt.allParentFks, tt.updExprs) + require.EqualValues(t, tt.childFksWanted, childFks) + require.EqualValues(t, tt.parentFksWanted, parentFks) + if tt.errWanted != "" { + require.EqualError(t, err, tt.errWanted) + } else { + require.NoError(t, err) + } + }) + } +} + +// TestGetInvolvedForeignKeys tests the functionality of getInvolvedForeignKeys. +func TestGetInvolvedForeignKeys(t *testing.T) { + cola := sqlparser.NewColName("cola") + colb := sqlparser.NewColName("colb") + colc := sqlparser.NewColName("colc") + cold := sqlparser.NewColName("cold") + tests := []struct { + name string + stmt sqlparser.Statement + fkManager *fkManager + childFksWanted map[TableSet][]vindexes.ChildFKInfo + parentFksWanted map[TableSet][]vindexes.ParentFKInfo + childFkUpdateExprsWanted map[string]sqlparser.UpdateExprs + expectedErr string + }{ + { + name: "Delete Query", + stmt: &sqlparser.Delete{}, + fkManager: &fkManager{ + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t0"], + tbl["t1"], + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + "ks_unmanaged": vschemapb.Keyspace_unmanaged, + }, + }, + }, + childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ + SingleTableSet(0): { + ckInfo(parentTbl, []string{"col"}, []string{"col"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), + }, + }, + }, + { + name: "Update statement", + stmt: &sqlparser.Update{ + Exprs: sqlparser.UpdateExprs{ + &sqlparser.UpdateExpr{Name: cola, Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: colb, Expr: &sqlparser.NullVal{}}, + &sqlparser.UpdateExpr{Name: colc, Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: cold, Expr: &sqlparser.NullVal{}}, + }, + }, + fkManager: &fkManager{ + binder: &binder{ + direct: map[sqlparser.Expr]TableSet{ + cola: SingleTableSet(0), + colb: SingleTableSet(0), + colc: SingleTableSet(1), + cold: SingleTableSet(1), + }, + }, + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t4"], + tbl["t5"], + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + }, + }, + }, + childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ + SingleTableSet(0): { + ckInfo(parentTbl, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), + }, + SingleTableSet(1): { + ckInfo(parentTbl, []string{"cold"}, []string{"child_cold"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"colc", "colx"}, []string{"child_colc", "child_colx"}, sqlparser.SetNull), + }, + }, + parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ + SingleTableSet(0): { + pkInfo(parentTbl, []string{"pcola", "pcolx"}, []string{"cola", "colx"}), + pkInfo(parentTbl, []string{"pcola"}, []string{"cola"}), + }, + SingleTableSet(1): { + pkInfo(parentTbl, []string{"pcolc", "pcolx"}, []string{"colc", "colx"}), + }, + }, + childFkUpdateExprsWanted: map[string]sqlparser.UpdateExprs{ + "ks.parentt|child_cola|child_colx||ks.t4|cola|colx": {&sqlparser.UpdateExpr{Name: cola, Expr: sqlparser.NewIntLiteral("1")}}, + "ks.parentt|child_colb||ks.t4|colb": {&sqlparser.UpdateExpr{Name: colb, Expr: &sqlparser.NullVal{}}}, + "ks.parentt|child_colc|child_colx||ks.t5|colc|colx": {&sqlparser.UpdateExpr{Name: colc, Expr: sqlparser.NewIntLiteral("1")}}, + "ks.parentt|child_cold||ks.t5|cold": {&sqlparser.UpdateExpr{Name: cold, Expr: &sqlparser.NullVal{}}}, + }, + }, + { + name: "Replace Query", + stmt: &sqlparser.Insert{ + Action: sqlparser.ReplaceAct, + }, + fkManager: &fkManager{ + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t0"], + tbl["t1"], + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + "ks_unmanaged": vschemapb.Keyspace_unmanaged, + }, + }, + }, + childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ + SingleTableSet(0): { + ckInfo(parentTbl, []string{"col"}, []string{"col"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"col1", "col2"}, []string{"ccol1", "ccol2"}, sqlparser.SetNull), + }, + }, + parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ + SingleTableSet(0): { + pkInfo(parentTbl, []string{"colb"}, []string{"colb"}), + pkInfo(parentTbl, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), + }, + }, + }, + { + name: "Insert Query", + stmt: &sqlparser.Insert{ + Action: sqlparser.InsertAct, + }, + fkManager: &fkManager{ + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t0"], + tbl["t1"], + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + "ks_unmanaged": vschemapb.Keyspace_unmanaged, + }, + }, + }, + childFksWanted: nil, + parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ + SingleTableSet(0): { + pkInfo(parentTbl, []string{"colb"}, []string{"colb"}), + pkInfo(parentTbl, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), + }, + }, + }, + { + name: "Insert Query with On Duplicate", + stmt: &sqlparser.Insert{ + Action: sqlparser.InsertAct, + OnDup: sqlparser.OnDup{ + &sqlparser.UpdateExpr{Name: cola, Expr: sqlparser.NewIntLiteral("1")}, + &sqlparser.UpdateExpr{Name: colb, Expr: &sqlparser.NullVal{}}, + }, + }, + fkManager: &fkManager{ + binder: &binder{ + direct: map[sqlparser.Expr]TableSet{ + cola: SingleTableSet(0), + colb: SingleTableSet(0), + }, + }, + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t6"], + tbl["t1"], + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + "ks_unmanaged": vschemapb.Keyspace_unmanaged, + }, + }, + }, + childFksWanted: map[TableSet][]vindexes.ChildFKInfo{ + SingleTableSet(0): { + ckInfo(parentTbl, []string{"colb"}, []string{"child_colb"}, sqlparser.Restrict), + ckInfo(parentTbl, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}, sqlparser.SetNull), + }, + }, + parentFksWanted: map[TableSet][]vindexes.ParentFKInfo{ + SingleTableSet(0): { + pkInfo(parentTbl, []string{"colb"}, []string{"colb"}), + pkInfo(parentTbl, []string{"colb1", "colb2"}, []string{"ccolb1", "ccolb2"}), + }, + }, + childFkUpdateExprsWanted: map[string]sqlparser.UpdateExprs{ + "ks.parentt|child_cola|child_colx||ks.t6|cola|colx": {&sqlparser.UpdateExpr{Name: cola, Expr: sqlparser.NewIntLiteral("1")}}, + "ks.parentt|child_colb||ks.t6|colb": {&sqlparser.UpdateExpr{Name: colb, Expr: &sqlparser.NullVal{}}}, + }, + }, + { + name: "Insert error", + stmt: &sqlparser.Insert{}, + fkManager: &fkManager{ + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t2"], + tbl["t3"], + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + }, + }, + }, + expectedErr: "undefined_ks keyspace not found", + }, + { + name: "Update error", + stmt: &sqlparser.Update{}, + fkManager: &fkManager{ + tables: &tableCollector{ + Tables: []TableInfo{ + tbl["t2"], + tbl["t3"], + }, + }, + si: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + "ks": vschemapb.Keyspace_managed, + }, + }, + }, + expectedErr: "undefined_ks keyspace not found", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fkState := true + childFks, parentFks, childFkUpdateExprs, err := tt.fkManager.getInvolvedForeignKeys(tt.stmt, &fkState) + if tt.expectedErr != "" { + require.EqualError(t, err, tt.expectedErr) + return + } + require.EqualValues(t, tt.childFksWanted, childFks) + require.EqualValues(t, tt.childFkUpdateExprsWanted, childFkUpdateExprs) + require.EqualValues(t, tt.parentFksWanted, parentFks) + }) + } +} + +func ckInfo(cTable *vindexes.Table, pCols []string, cCols []string, refAction sqlparser.ReferenceAction) vindexes.ChildFKInfo { + return vindexes.ChildFKInfo{ + Table: cTable, + ParentColumns: sqlparser.MakeColumns(pCols...), + ChildColumns: sqlparser.MakeColumns(cCols...), + OnDelete: refAction, + } +} + +func pkInfo(parentTable *vindexes.Table, pCols []string, cCols []string) vindexes.ParentFKInfo { + return vindexes.ParentFKInfo{ + Table: parentTable, + ParentColumns: sqlparser.MakeColumns(pCols...), + ChildColumns: sqlparser.MakeColumns(cCols...), + } +} diff --git a/go/vt/vtgate/semantics/info_schema.go b/go/vt/vtgate/semantics/info_schema.go index 838f6276472..11e577f3fa7 100644 --- a/go/vt/vtgate/semantics/info_schema.go +++ b/go/vt/vtgate/semantics/info_schema.go @@ -17,6 +17,7 @@ limitations under the License. package semantics import ( + "fmt" "strings" "vitess.io/vitess/go/mysql/collations" @@ -24,1646 +25,1576 @@ import ( "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func createCol(name string, typ int) vindexes.Column { - return vindexes.Column{Name: sqlparser.NewIdentifierCI(name), Type: query.Type(typ)} +func createCol(parser *sqlparser.Parser, name string, typ int, collation string, def string, size, scale int32, notNullable bool, values string) vindexes.Column { + var expr sqlparser.Expr + if def != "" { + var err error + expr, err = parser.ParseExpr(def) + if err != nil { + panic(fmt.Sprintf("Failed to parse %q: %v", def, err)) + } + } + var vals []string + if values != "" { + quotedVals := strings.Split(values, ",") + vals = make([]string, 0, len(quotedVals)) + for _, v := range quotedVals { + u := strings.TrimFunc(v, func(r rune) bool { + return r == '\'' + }) + vals = append(vals, u) + } + } + + return vindexes.Column{ + Name: sqlparser.NewIdentifierCI(name), + Type: query.Type(typ), + CollationName: collation, + Default: expr, + Size: size, + Scale: scale, + Nullable: !notNullable, + Values: vals, + } } // getInfoSchema57 returns a map of all information_schema tables and their columns with types // To recreate this information from MySQL, you can run the test in info_schema_gen_test.go func getInfoSchema57() map[string][]vindexes.Column { + parser, err := sqlparser.New(sqlparser.Options{MySQLServerVersion: "5.7.9"}) + if err != nil { + panic(err) + } infSchema := map[string][]vindexes.Column{} var cols []vindexes.Column - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("DEFAULT_COLLATE_NAME", 6165)) - cols = append(cols, createCol("DESCRIPTION", 6165)) - cols = append(cols, createCol("MAXLEN", 265)) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 60, 0, true, "")) + cols = append(cols, createCol(parser, "MAXLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) infSchema["CHARACTER_SETS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["COLLATION_CHARACTER_SET_APPLICABILITY"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("ID", 265)) - cols = append(cols, createCol("IS_DEFAULT", 6165)) - cols = append(cols, createCol("IS_COMPILED", 6165)) - cols = append(cols, createCol("SORTLEN", 265)) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 265, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SORTLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) infSchema["COLLATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["COLUMN_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("ORDINAL_POSITION", 265)) - cols = append(cols, createCol("COLUMN_DEFAULT", 6163)) - cols = append(cols, createCol("IS_NULLABLE", 6165)) - cols = append(cols, createCol("DATA_TYPE", 6165)) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265)) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265)) - cols = append(cols, createCol("NUMERIC_PRECISION", 265)) - cols = append(cols, createCol("NUMERIC_SCALE", 265)) - cols = append(cols, createCol("DATETIME_PRECISION", 265)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("COLUMN_TYPE", 6163)) - cols = append(cols, createCol("COLUMN_KEY", 6165)) - cols = append(cols, createCol("EXTRA", 6165)) - cols = append(cols, createCol("PRIVILEGES", 6165)) - cols = append(cols, createCol("COLUMN_COMMENT", 6165)) - cols = append(cols, createCol("GENERATION_EXPRESSION", 6163)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_KEY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGES", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) + cols = append(cols, createCol(parser, "GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ENGINE", 6165)) - cols = append(cols, createCol("SUPPORT", 6165)) - cols = append(cols, createCol("COMMENT", 6165)) - cols = append(cols, createCol("TRANSACTIONS", 6165)) - cols = append(cols, createCol("XA", 6165)) - cols = append(cols, createCol("SAVEPOINTS", 6165)) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["ENGINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("EVENT_CATALOG", 6165)) - cols = append(cols, createCol("EVENT_SCHEMA", 6165)) - cols = append(cols, createCol("EVENT_NAME", 6165)) - cols = append(cols, createCol("DEFINER", 6165)) - cols = append(cols, createCol("TIME_ZONE", 6165)) - cols = append(cols, createCol("EVENT_BODY", 6165)) - cols = append(cols, createCol("EVENT_DEFINITION", 6163)) - cols = append(cols, createCol("EVENT_TYPE", 6165)) - cols = append(cols, createCol("EXECUTE_AT", 2064)) - cols = append(cols, createCol("INTERVAL_VALUE", 6165)) - cols = append(cols, createCol("INTERVAL_FIELD", 6165)) - cols = append(cols, createCol("SQL_MODE", 6165)) - cols = append(cols, createCol("STARTS", 2064)) - cols = append(cols, createCol("ENDS", 2064)) - cols = append(cols, createCol("STATUS", 6165)) - cols = append(cols, createCol("ON_COMPLETION", 6165)) - cols = append(cols, createCol("CREATED", 2064)) - cols = append(cols, createCol("LAST_ALTERED", 2064)) - cols = append(cols, createCol("LAST_EXECUTED", 2064)) - cols = append(cols, createCol("EVENT_COMMENT", 6165)) - cols = append(cols, createCol("ORIGINATOR", 265)) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165)) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165)) - cols = append(cols, createCol("DATABASE_COLLATION", 6165)) + cols = append(cols, createCol(parser, "EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_FIELD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORIGINATOR", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["EVENTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("FILE_ID", 265)) - cols = append(cols, createCol("FILE_NAME", 6165)) - cols = append(cols, createCol("FILE_TYPE", 6165)) - cols = append(cols, createCol("TABLESPACE_NAME", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165)) - cols = append(cols, createCol("LOGFILE_GROUP_NUMBER", 265)) - cols = append(cols, createCol("ENGINE", 6165)) - cols = append(cols, createCol("FULLTEXT_KEYS", 6165)) - cols = append(cols, createCol("DELETED_ROWS", 265)) - cols = append(cols, createCol("UPDATE_COUNT", 265)) - cols = append(cols, createCol("FREE_EXTENTS", 265)) - cols = append(cols, createCol("TOTAL_EXTENTS", 265)) - cols = append(cols, createCol("EXTENT_SIZE", 265)) - cols = append(cols, createCol("INITIAL_SIZE", 265)) - cols = append(cols, createCol("MAXIMUM_SIZE", 265)) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265)) - cols = append(cols, createCol("CREATION_TIME", 2064)) - cols = append(cols, createCol("LAST_UPDATE_TIME", 2064)) - cols = append(cols, createCol("LAST_ACCESS_TIME", 2064)) - cols = append(cols, createCol("RECOVER_TIME", 265)) - cols = append(cols, createCol("TRANSACTION_COUNTER", 265)) - cols = append(cols, createCol("VERSION", 265)) - cols = append(cols, createCol("ROW_FORMAT", 6165)) - cols = append(cols, createCol("TABLE_ROWS", 265)) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265)) - cols = append(cols, createCol("DATA_LENGTH", 265)) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265)) - cols = append(cols, createCol("INDEX_LENGTH", 265)) - cols = append(cols, createCol("DATA_FREE", 265)) - cols = append(cols, createCol("CREATE_TIME", 2064)) - cols = append(cols, createCol("UPDATE_TIME", 2064)) - cols = append(cols, createCol("CHECK_TIME", 2064)) - cols = append(cols, createCol("CHECKSUM", 265)) - cols = append(cols, createCol("STATUS", 6165)) - cols = append(cols, createCol("EXTRA", 6165)) + cols = append(cols, createCol(parser, "FILE_ID", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "FILE_NAME", 6165, "utf8mb3_general_ci", "", 4000, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_TYPE", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "FULLTEXT_KEYS", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DELETED_ROWS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_COUNT", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATION_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_ACCESS_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "RECOVER_TIME", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "TRANSACTION_COUNTER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) infSchema["FILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VARIABLE_NAME", 6165)) - cols = append(cols, createCol("VARIABLE_VALUE", 6165)) + cols = append(cols, createCol(parser, "VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) infSchema["GLOBAL_STATUS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VARIABLE_NAME", 6165)) - cols = append(cols, createCol("VARIABLE_VALUE", 6165)) + cols = append(cols, createCol(parser, "VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) infSchema["GLOBAL_VARIABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265)) - cols = append(cols, createCol("BLOCK_ID", 265)) - cols = append(cols, createCol("SPACE", 265)) - cols = append(cols, createCol("PAGE_NUMBER", 265)) - cols = append(cols, createCol("PAGE_TYPE", 6165)) - cols = append(cols, createCol("FLUSH_TYPE", 265)) - cols = append(cols, createCol("FIX_COUNT", 265)) - cols = append(cols, createCol("IS_HASHED", 6165)) - cols = append(cols, createCol("NEWEST_MODIFICATION", 265)) - cols = append(cols, createCol("OLDEST_MODIFICATION", 265)) - cols = append(cols, createCol("ACCESS_TIME", 265)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("INDEX_NAME", 6165)) - cols = append(cols, createCol("NUMBER_RECORDS", 265)) - cols = append(cols, createCol("DATA_SIZE", 265)) - cols = append(cols, createCol("COMPRESSED_SIZE", 265)) - cols = append(cols, createCol("PAGE_STATE", 6165)) - cols = append(cols, createCol("IO_FIX", 6165)) - cols = append(cols, createCol("IS_OLD", 6165)) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 265)) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "BLOCK_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_PAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265)) - cols = append(cols, createCol("LRU_POSITION", 265)) - cols = append(cols, createCol("SPACE", 265)) - cols = append(cols, createCol("PAGE_NUMBER", 265)) - cols = append(cols, createCol("PAGE_TYPE", 6165)) - cols = append(cols, createCol("FLUSH_TYPE", 265)) - cols = append(cols, createCol("FIX_COUNT", 265)) - cols = append(cols, createCol("IS_HASHED", 6165)) - cols = append(cols, createCol("NEWEST_MODIFICATION", 265)) - cols = append(cols, createCol("OLDEST_MODIFICATION", 265)) - cols = append(cols, createCol("ACCESS_TIME", 265)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("INDEX_NAME", 6165)) - cols = append(cols, createCol("NUMBER_RECORDS", 265)) - cols = append(cols, createCol("DATA_SIZE", 265)) - cols = append(cols, createCol("COMPRESSED_SIZE", 265)) - cols = append(cols, createCol("COMPRESSED", 6165)) - cols = append(cols, createCol("IO_FIX", 6165)) - cols = append(cols, createCol("IS_OLD", 6165)) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 265)) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_PAGE_LRU"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265)) - cols = append(cols, createCol("POOL_SIZE", 265)) - cols = append(cols, createCol("FREE_BUFFERS", 265)) - cols = append(cols, createCol("DATABASE_PAGES", 265)) - cols = append(cols, createCol("OLD_DATABASE_PAGES", 265)) - cols = append(cols, createCol("MODIFIED_DATABASE_PAGES", 265)) - cols = append(cols, createCol("PENDING_DECOMPRESS", 265)) - cols = append(cols, createCol("PENDING_READS", 265)) - cols = append(cols, createCol("PENDING_FLUSH_LRU", 265)) - cols = append(cols, createCol("PENDING_FLUSH_LIST", 265)) - cols = append(cols, createCol("PAGES_MADE_YOUNG", 265)) - cols = append(cols, createCol("PAGES_NOT_MADE_YOUNG", 265)) - cols = append(cols, createCol("PAGES_MADE_YOUNG_RATE", 1036)) - cols = append(cols, createCol("PAGES_MADE_NOT_YOUNG_RATE", 1036)) - cols = append(cols, createCol("NUMBER_PAGES_READ", 265)) - cols = append(cols, createCol("NUMBER_PAGES_CREATED", 265)) - cols = append(cols, createCol("NUMBER_PAGES_WRITTEN", 265)) - cols = append(cols, createCol("PAGES_READ_RATE", 1036)) - cols = append(cols, createCol("PAGES_CREATE_RATE", 1036)) - cols = append(cols, createCol("PAGES_WRITTEN_RATE", 1036)) - cols = append(cols, createCol("NUMBER_PAGES_GET", 265)) - cols = append(cols, createCol("HIT_RATE", 265)) - cols = append(cols, createCol("YOUNG_MAKE_PER_THOUSAND_GETS", 265)) - cols = append(cols, createCol("NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 265)) - cols = append(cols, createCol("NUMBER_PAGES_READ_AHEAD", 265)) - cols = append(cols, createCol("NUMBER_READ_AHEAD_EVICTED", 265)) - cols = append(cols, createCol("READ_AHEAD_RATE", 1036)) - cols = append(cols, createCol("READ_AHEAD_EVICTED_RATE", 1036)) - cols = append(cols, createCol("LRU_IO_TOTAL", 265)) - cols = append(cols, createCol("LRU_IO_CURRENT", 265)) - cols = append(cols, createCol("UNCOMPRESS_TOTAL", 265)) - cols = append(cols, createCol("UNCOMPRESS_CURRENT", 265)) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FREE_BUFFERS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLD_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_DECOMPRESS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_READS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LRU", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LIST", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_NOT_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_NOT_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_CREATED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_WRITTEN", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_READ_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_CREATE_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_WRITTEN_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_GET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "HIT_RATE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ_AHEAD", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_READ_AHEAD_EVICTED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_EVICTED_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_POOL_STATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263)) - cols = append(cols, createCol("compress_ops", 263)) - cols = append(cols, createCol("compress_ops_ok", 263)) - cols = append(cols, createCol("compress_time", 263)) - cols = append(cols, createCol("uncompress_ops", 263)) - cols = append(cols, createCol("uncompress_time", 263)) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165)) - cols = append(cols, createCol("table_name", 6165)) - cols = append(cols, createCol("index_name", 6165)) - cols = append(cols, createCol("compress_ops", 263)) - cols = append(cols, createCol("compress_ops_ok", 263)) - cols = append(cols, createCol("compress_time", 263)) - cols = append(cols, createCol("uncompress_ops", 263)) - cols = append(cols, createCol("uncompress_time", 263)) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165)) - cols = append(cols, createCol("table_name", 6165)) - cols = append(cols, createCol("index_name", 6165)) - cols = append(cols, createCol("compress_ops", 263)) - cols = append(cols, createCol("compress_ops_ok", 263)) - cols = append(cols, createCol("compress_time", 263)) - cols = append(cols, createCol("uncompress_ops", 263)) - cols = append(cols, createCol("uncompress_time", 263)) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263)) - cols = append(cols, createCol("compress_ops", 263)) - cols = append(cols, createCol("compress_ops_ok", 263)) - cols = append(cols, createCol("compress_time", 263)) - cols = append(cols, createCol("uncompress_ops", 263)) - cols = append(cols, createCol("uncompress_time", 263)) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263)) - cols = append(cols, createCol("buffer_pool_instance", 263)) - cols = append(cols, createCol("pages_used", 263)) - cols = append(cols, createCol("pages_free", 263)) - cols = append(cols, createCol("relocation_ops", 265)) - cols = append(cols, createCol("relocation_time", 263)) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMPMEM"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263)) - cols = append(cols, createCol("buffer_pool_instance", 263)) - cols = append(cols, createCol("pages_used", 263)) - cols = append(cols, createCol("pages_free", 263)) - cols = append(cols, createCol("relocation_ops", 265)) - cols = append(cols, createCol("relocation_time", 263)) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMPMEM_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 265)) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_BEING_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("KEY", 6165)) - cols = append(cols, createCol("VALUE", 6165)) + cols = append(cols, createCol(parser, "KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_FT_CONFIG"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("value", 6165)) + cols = append(cols, createCol(parser, "value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) infSchema["INNODB_FT_DEFAULT_STOPWORD"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 265)) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165)) - cols = append(cols, createCol("FIRST_DOC_ID", 265)) - cols = append(cols, createCol("LAST_DOC_ID", 265)) - cols = append(cols, createCol("DOC_COUNT", 265)) - cols = append(cols, createCol("DOC_ID", 265)) - cols = append(cols, createCol("POSITION", 265)) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_INDEX_CACHE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165)) - cols = append(cols, createCol("FIRST_DOC_ID", 265)) - cols = append(cols, createCol("LAST_DOC_ID", 265)) - cols = append(cols, createCol("DOC_COUNT", 265)) - cols = append(cols, createCol("DOC_ID", 265)) - cols = append(cols, createCol("POSITION", 265)) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_INDEX_TABLE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("requesting_trx_id", 6165)) - cols = append(cols, createCol("requested_lock_id", 6165)) - cols = append(cols, createCol("blocking_trx_id", 6165)) - cols = append(cols, createCol("blocking_lock_id", 6165)) - infSchema["INNODB_LOCK_WAITS"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("lock_id", 6165)) - cols = append(cols, createCol("lock_trx_id", 6165)) - cols = append(cols, createCol("lock_mode", 6165)) - cols = append(cols, createCol("lock_type", 6165)) - cols = append(cols, createCol("lock_table", 6165)) - cols = append(cols, createCol("lock_index", 6165)) - cols = append(cols, createCol("lock_space", 265)) - cols = append(cols, createCol("lock_page", 265)) - cols = append(cols, createCol("lock_rec", 265)) - cols = append(cols, createCol("lock_data", 6165)) - infSchema["INNODB_LOCKS"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("SUBSYSTEM", 6165)) - cols = append(cols, createCol("COUNT", 265)) - cols = append(cols, createCol("MAX_COUNT", 265)) - cols = append(cols, createCol("MIN_COUNT", 265)) - cols = append(cols, createCol("AVG_COUNT", 1036)) - cols = append(cols, createCol("COUNT_RESET", 265)) - cols = append(cols, createCol("MAX_COUNT_RESET", 265)) - cols = append(cols, createCol("MIN_COUNT_RESET", 265)) - cols = append(cols, createCol("AVG_COUNT_RESET", 1036)) - cols = append(cols, createCol("TIME_ENABLED", 2064)) - cols = append(cols, createCol("TIME_DISABLED", 2064)) - cols = append(cols, createCol("TIME_ELAPSED", 265)) - cols = append(cols, createCol("TIME_RESET", 2064)) - cols = append(cols, createCol("STATUS", 6165)) - cols = append(cols, createCol("TYPE", 6165)) - cols = append(cols, createCol("COMMENT", 6165)) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "COUNT_RESET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT_RESET", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_METRICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 265)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("POS", 265)) - cols = append(cols, createCol("MTYPE", 263)) - cols = append(cols, createCol("PRTYPE", 263)) - cols = append(cols, createCol("LEN", 263)) - infSchema["INNODB_SYS_COLUMNS"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 263)) - cols = append(cols, createCol("PATH", 6165)) - infSchema["INNODB_SYS_DATAFILES"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("INDEX_ID", 265)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("POS", 263)) - infSchema["INNODB_SYS_FIELDS"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 6165)) - cols = append(cols, createCol("FOR_NAME", 6165)) - cols = append(cols, createCol("REF_NAME", 6165)) - cols = append(cols, createCol("N_COLS", 263)) - cols = append(cols, createCol("TYPE", 263)) - infSchema["INNODB_SYS_FOREIGN"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 6165)) - cols = append(cols, createCol("FOR_COL_NAME", 6165)) - cols = append(cols, createCol("REF_COL_NAME", 6165)) - cols = append(cols, createCol("POS", 263)) - infSchema["INNODB_SYS_FOREIGN_COLS"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("INDEX_ID", 265)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("TABLE_ID", 265)) - cols = append(cols, createCol("TYPE", 263)) - cols = append(cols, createCol("N_FIELDS", 263)) - cols = append(cols, createCol("PAGE_NO", 263)) - cols = append(cols, createCol("SPACE", 263)) - cols = append(cols, createCol("MERGE_THRESHOLD", 263)) - infSchema["INNODB_SYS_INDEXES"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 265)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("FLAG", 263)) - cols = append(cols, createCol("N_COLS", 263)) - cols = append(cols, createCol("SPACE", 263)) - cols = append(cols, createCol("FILE_FORMAT", 6165)) - cols = append(cols, createCol("ROW_FORMAT", 6165)) - cols = append(cols, createCol("ZIP_PAGE_SIZE", 263)) - cols = append(cols, createCol("SPACE_TYPE", 6165)) - infSchema["INNODB_SYS_TABLES"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 263)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("FLAG", 263)) - cols = append(cols, createCol("FILE_FORMAT", 6165)) - cols = append(cols, createCol("ROW_FORMAT", 6165)) - cols = append(cols, createCol("PAGE_SIZE", 263)) - cols = append(cols, createCol("ZIP_PAGE_SIZE", 263)) - cols = append(cols, createCol("SPACE_TYPE", 6165)) - cols = append(cols, createCol("FS_BLOCK_SIZE", 263)) - cols = append(cols, createCol("FILE_SIZE", 265)) - cols = append(cols, createCol("ALLOCATED_SIZE", 265)) - infSchema["INNODB_SYS_TABLESPACES"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 265)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("STATS_INITIALIZED", 6165)) - cols = append(cols, createCol("NUM_ROWS", 265)) - cols = append(cols, createCol("CLUST_INDEX_SIZE", 265)) - cols = append(cols, createCol("OTHER_INDEX_SIZE", 265)) - cols = append(cols, createCol("MODIFIED_COUNTER", 265)) - cols = append(cols, createCol("AUTOINC", 265)) - cols = append(cols, createCol("REF_COUNT", 263)) - infSchema["INNODB_SYS_TABLESTATS"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 265)) - cols = append(cols, createCol("POS", 263)) - cols = append(cols, createCol("BASE_POS", 263)) - infSchema["INNODB_SYS_VIRTUAL"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 265)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("N_COLS", 263)) - cols = append(cols, createCol("SPACE", 263)) - cols = append(cols, createCol("PER_TABLE_TABLESPACE", 6165)) - cols = append(cols, createCol("IS_COMPRESSED", 6165)) + cols = append(cols, createCol(parser, "TABLE_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 202, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "PER_TABLE_TABLESPACE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_COMPRESSED", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["INNODB_TEMP_TABLE_INFO"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("trx_id", 6165)) - cols = append(cols, createCol("trx_state", 6165)) - cols = append(cols, createCol("trx_started", 2064)) - cols = append(cols, createCol("trx_requested_lock_id", 6165)) - cols = append(cols, createCol("trx_wait_started", 2064)) - cols = append(cols, createCol("trx_weight", 265)) - cols = append(cols, createCol("trx_mysql_thread_id", 265)) - cols = append(cols, createCol("trx_query", 6165)) - cols = append(cols, createCol("trx_operation_state", 6165)) - cols = append(cols, createCol("trx_tables_in_use", 265)) - cols = append(cols, createCol("trx_tables_locked", 265)) - cols = append(cols, createCol("trx_lock_structs", 265)) - cols = append(cols, createCol("trx_lock_memory_bytes", 265)) - cols = append(cols, createCol("trx_rows_locked", 265)) - cols = append(cols, createCol("trx_rows_modified", 265)) - cols = append(cols, createCol("trx_concurrency_tickets", 265)) - cols = append(cols, createCol("trx_isolation_level", 6165)) - cols = append(cols, createCol("trx_unique_checks", 263)) - cols = append(cols, createCol("trx_foreign_key_checks", 263)) - cols = append(cols, createCol("trx_last_foreign_key_error", 6165)) - cols = append(cols, createCol("trx_adaptive_hash_latched", 263)) - cols = append(cols, createCol("trx_adaptive_hash_timeout", 265)) - cols = append(cols, createCol("trx_is_read_only", 263)) - cols = append(cols, createCol("trx_autocommit_non_locking", 263)) + cols = append(cols, createCol(parser, "trx_id", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) + cols = append(cols, createCol(parser, "trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 81, 0, false, "")) + cols = append(cols, createCol(parser, "trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "trx_weight", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_mysql_thread_id", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "trx_tables_in_use", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_tables_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_structs", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_memory_bytes", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_modified", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_concurrency_tickets", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "trx_unique_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_foreign_key_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_timeout", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_is_read_only", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) infSchema["INNODB_TRX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("ORDINAL_POSITION", 265)) - cols = append(cols, createCol("POSITION_IN_UNIQUE_CONSTRAINT", 265)) - cols = append(cols, createCol("REFERENCED_TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165)) - cols = append(cols, createCol("REFERENCED_COLUMN_NAME", 6165)) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION_IN_UNIQUE_CONSTRAINT", 265, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["KEY_COLUMN_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY", 6163)) - cols = append(cols, createCol("TRACE", 6163)) - cols = append(cols, createCol("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263)) - cols = append(cols, createCol("INSUFFICIENT_PRIVILEGES", 257)) + cols = append(cols, createCol(parser, "QUERY", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TRACE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "0", 1, 0, true, "")) infSchema["OPTIMIZER_TRACE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165)) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165)) - cols = append(cols, createCol("SPECIFIC_NAME", 6165)) - cols = append(cols, createCol("ORDINAL_POSITION", 263)) - cols = append(cols, createCol("PARAMETER_MODE", 6165)) - cols = append(cols, createCol("PARAMETER_NAME", 6165)) - cols = append(cols, createCol("DATA_TYPE", 6165)) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 263)) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 263)) - cols = append(cols, createCol("NUMERIC_PRECISION", 265)) - cols = append(cols, createCol("NUMERIC_SCALE", 263)) - cols = append(cols, createCol("DATETIME_PRECISION", 265)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163)) - cols = append(cols, createCol("ROUTINE_TYPE", 6165)) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 263, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) infSchema["PARAMETERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("PARTITION_NAME", 6165)) - cols = append(cols, createCol("SUBPARTITION_NAME", 6165)) - cols = append(cols, createCol("PARTITION_ORDINAL_POSITION", 265)) - cols = append(cols, createCol("SUBPARTITION_ORDINAL_POSITION", 265)) - cols = append(cols, createCol("PARTITION_METHOD", 6165)) - cols = append(cols, createCol("SUBPARTITION_METHOD", 6165)) - cols = append(cols, createCol("PARTITION_EXPRESSION", 6163)) - cols = append(cols, createCol("SUBPARTITION_EXPRESSION", 6163)) - cols = append(cols, createCol("PARTITION_DESCRIPTION", 6163)) - cols = append(cols, createCol("TABLE_ROWS", 265)) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265)) - cols = append(cols, createCol("DATA_LENGTH", 265)) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265)) - cols = append(cols, createCol("INDEX_LENGTH", 265)) - cols = append(cols, createCol("DATA_FREE", 265)) - cols = append(cols, createCol("CREATE_TIME", 2064)) - cols = append(cols, createCol("UPDATE_TIME", 2064)) - cols = append(cols, createCol("CHECK_TIME", 2064)) - cols = append(cols, createCol("CHECKSUM", 265)) - cols = append(cols, createCol("PARTITION_COMMENT", 6165)) - cols = append(cols, createCol("NODEGROUP", 6165)) - cols = append(cols, createCol("TABLESPACE_NAME", 6165)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "NODEGROUP", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["PARTITIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("PLUGIN_NAME", 6165)) - cols = append(cols, createCol("PLUGIN_VERSION", 6165)) - cols = append(cols, createCol("PLUGIN_STATUS", 6165)) - cols = append(cols, createCol("PLUGIN_TYPE", 6165)) - cols = append(cols, createCol("PLUGIN_TYPE_VERSION", 6165)) - cols = append(cols, createCol("PLUGIN_LIBRARY", 6165)) - cols = append(cols, createCol("PLUGIN_LIBRARY_VERSION", 6165)) - cols = append(cols, createCol("PLUGIN_AUTHOR", 6165)) - cols = append(cols, createCol("PLUGIN_DESCRIPTION", 6163)) - cols = append(cols, createCol("PLUGIN_LICENSE", 6165)) - cols = append(cols, createCol("LOAD_OPTION", 6165)) + cols = append(cols, createCol(parser, "PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["PLUGINS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 265)) - cols = append(cols, createCol("USER", 6165)) - cols = append(cols, createCol("HOST", 6165)) - cols = append(cols, createCol("DB", 6165)) - cols = append(cols, createCol("COMMAND", 6165)) - cols = append(cols, createCol("TIME", 263)) - cols = append(cols, createCol("STATE", 6165)) - cols = append(cols, createCol("INFO", 6163)) + cols = append(cols, createCol(parser, "ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "TIME", 263, "utf8mb3_general_ci", "0", 7, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INFO", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["PROCESSLIST"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY_ID", 263)) - cols = append(cols, createCol("SEQ", 263)) - cols = append(cols, createCol("STATE", 6165)) - cols = append(cols, createCol("DURATION", 18)) - cols = append(cols, createCol("CPU_USER", 18)) - cols = append(cols, createCol("CPU_SYSTEM", 18)) - cols = append(cols, createCol("CONTEXT_VOLUNTARY", 263)) - cols = append(cols, createCol("CONTEXT_INVOLUNTARY", 263)) - cols = append(cols, createCol("BLOCK_OPS_IN", 263)) - cols = append(cols, createCol("BLOCK_OPS_OUT", 263)) - cols = append(cols, createCol("MESSAGES_SENT", 263)) - cols = append(cols, createCol("MESSAGES_RECEIVED", 263)) - cols = append(cols, createCol("PAGE_FAULTS_MAJOR", 263)) - cols = append(cols, createCol("PAGE_FAULTS_MINOR", 263)) - cols = append(cols, createCol("SWAPS", 263)) - cols = append(cols, createCol("SOURCE_FUNCTION", 6165)) - cols = append(cols, createCol("SOURCE_FILE", 6165)) - cols = append(cols, createCol("SOURCE_LINE", 263)) + cols = append(cols, createCol(parser, "QUERY_ID", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "DURATION", 18, "utf8mb3_general_ci", "0.000000", 9, 6, true, "")) + cols = append(cols, createCol(parser, "CPU_USER", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) + cols = append(cols, createCol(parser, "CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SWAPS", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_LINE", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) infSchema["PROFILING"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("MATCH_OPTION", 6165)) - cols = append(cols, createCol("UPDATE_RULE", 6165)) - cols = append(cols, createCol("DELETE_RULE", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165)) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "MATCH_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DELETE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["REFERENTIAL_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_NAME", 6165)) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165)) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6165)) - cols = append(cols, createCol("ROUTINE_NAME", 6165)) - cols = append(cols, createCol("ROUTINE_TYPE", 6165)) - cols = append(cols, createCol("DATA_TYPE", 6165)) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 263)) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 263)) - cols = append(cols, createCol("NUMERIC_PRECISION", 265)) - cols = append(cols, createCol("NUMERIC_SCALE", 263)) - cols = append(cols, createCol("DATETIME_PRECISION", 265)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163)) - cols = append(cols, createCol("ROUTINE_BODY", 6165)) - cols = append(cols, createCol("ROUTINE_DEFINITION", 6163)) - cols = append(cols, createCol("EXTERNAL_NAME", 6165)) - cols = append(cols, createCol("EXTERNAL_LANGUAGE", 6165)) - cols = append(cols, createCol("PARAMETER_STYLE", 6165)) - cols = append(cols, createCol("IS_DETERMINISTIC", 6165)) - cols = append(cols, createCol("SQL_DATA_ACCESS", 6165)) - cols = append(cols, createCol("SQL_PATH", 6165)) - cols = append(cols, createCol("SECURITY_TYPE", 6165)) - cols = append(cols, createCol("CREATED", 2064)) - cols = append(cols, createCol("LAST_ALTERED", 2064)) - cols = append(cols, createCol("SQL_MODE", 6165)) - cols = append(cols, createCol("ROUTINE_COMMENT", 6163)) - cols = append(cols, createCol("DEFINER", 6165)) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165)) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165)) - cols = append(cols, createCol("DATABASE_COLLATION", 6165)) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_DATA_ACCESS", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["ROUTINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["SCHEMA_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165)) - cols = append(cols, createCol("SCHEMA_NAME", 6165)) - cols = append(cols, createCol("DEFAULT_CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("DEFAULT_COLLATION_NAME", 6165)) - cols = append(cols, createCol("SQL_PATH", 6165)) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 6165, "utf8mb3_general_ci", "", 512, 0, false, "")) infSchema["SCHEMATA"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VARIABLE_NAME", 6165)) - cols = append(cols, createCol("VARIABLE_VALUE", 6165)) - infSchema["SESSION_STATUS"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("VARIABLE_NAME", 6165)) - cols = append(cols, createCol("VARIABLE_VALUE", 6165)) - infSchema["SESSION_VARIABLES"] = cols - cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("NON_UNIQUE", 265)) - cols = append(cols, createCol("INDEX_SCHEMA", 6165)) - cols = append(cols, createCol("INDEX_NAME", 6165)) - cols = append(cols, createCol("SEQ_IN_INDEX", 265)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("COLLATION", 6165)) - cols = append(cols, createCol("CARDINALITY", 265)) - cols = append(cols, createCol("SUB_PART", 265)) - cols = append(cols, createCol("PACKED", 6165)) - cols = append(cols, createCol("NULLABLE", 6165)) - cols = append(cols, createCol("INDEX_TYPE", 6165)) - cols = append(cols, createCol("COMMENT", 6165)) - cols = append(cols, createCol("INDEX_COMMENT", 6165)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "NON_UNIQUE", 265, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ_IN_INDEX", 265, "utf8mb3_general_ci", "0", 2, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "CARDINALITY", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "SUB_PART", 265, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "PACKED", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 16, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) infSchema["STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("CONSTRAINT_TYPE", 6165)) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["TABLE_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("TABLE_TYPE", 6165)) - cols = append(cols, createCol("ENGINE", 6165)) - cols = append(cols, createCol("VERSION", 265)) - cols = append(cols, createCol("ROW_FORMAT", 6165)) - cols = append(cols, createCol("TABLE_ROWS", 265)) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265)) - cols = append(cols, createCol("DATA_LENGTH", 265)) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265)) - cols = append(cols, createCol("INDEX_LENGTH", 265)) - cols = append(cols, createCol("DATA_FREE", 265)) - cols = append(cols, createCol("AUTO_INCREMENT", 265)) - cols = append(cols, createCol("CREATE_TIME", 2064)) - cols = append(cols, createCol("UPDATE_TIME", 2064)) - cols = append(cols, createCol("CHECK_TIME", 2064)) - cols = append(cols, createCol("TABLE_COLLATION", 6165)) - cols = append(cols, createCol("CHECKSUM", 265)) - cols = append(cols, createCol("CREATE_OPTIONS", 6165)) - cols = append(cols, createCol("TABLE_COMMENT", 6165)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTO_INCREMENT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) infSchema["TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165)) - cols = append(cols, createCol("ENGINE", 6165)) - cols = append(cols, createCol("TABLESPACE_TYPE", 6165)) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165)) - cols = append(cols, createCol("EXTENT_SIZE", 265)) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265)) - cols = append(cols, createCol("MAXIMUM_SIZE", 265)) - cols = append(cols, createCol("NODEGROUP_ID", 265)) - cols = append(cols, createCol("TABLESPACE_COMMENT", 6165)) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NODEGROUP_ID", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TRIGGER_CATALOG", 6165)) - cols = append(cols, createCol("TRIGGER_SCHEMA", 6165)) - cols = append(cols, createCol("TRIGGER_NAME", 6165)) - cols = append(cols, createCol("EVENT_MANIPULATION", 6165)) - cols = append(cols, createCol("EVENT_OBJECT_CATALOG", 6165)) - cols = append(cols, createCol("EVENT_OBJECT_SCHEMA", 6165)) - cols = append(cols, createCol("EVENT_OBJECT_TABLE", 6165)) - cols = append(cols, createCol("ACTION_ORDER", 265)) - cols = append(cols, createCol("ACTION_CONDITION", 6163)) - cols = append(cols, createCol("ACTION_STATEMENT", 6163)) - cols = append(cols, createCol("ACTION_ORIENTATION", 6165)) - cols = append(cols, createCol("ACTION_TIMING", 6165)) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_TABLE", 6165)) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_TABLE", 6165)) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_ROW", 6165)) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_ROW", 6165)) - cols = append(cols, createCol("CREATED", 2064)) - cols = append(cols, createCol("SQL_MODE", 6165)) - cols = append(cols, createCol("DEFINER", 6165)) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165)) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165)) - cols = append(cols, createCol("DATABASE_COLLATION", 6165)) + cols = append(cols, createCol(parser, "TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_MANIPULATION", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORDER", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_CONDITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_TIMING", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 2, 0, false, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["TRIGGERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["USER_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("VIEW_DEFINITION", 6163)) - cols = append(cols, createCol("CHECK_OPTION", 6165)) - cols = append(cols, createCol("IS_UPDATABLE", 6165)) - cols = append(cols, createCol("DEFINER", 6165)) - cols = append(cols, createCol("SECURITY_TYPE", 6165)) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165)) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CHECK_OPTION", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "IS_UPDATABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["VIEWS"] = cols - return infSchema } // getInfoSchema80 returns a map of all information_schema tables and their columns with types // To recreate this information from MySQL, you can run the test in info_schema_gen_test.go func getInfoSchema80() map[string][]vindexes.Column { + parser, err := sqlparser.New(sqlparser.Options{MySQLServerVersion: "8.0.30"}) + if err != nil { + panic(err) + } infSchema := map[string][]vindexes.Column{} var cols []vindexes.Column - cols = append(cols, createCol("USER", 6165)) - cols = append(cols, createCol("HOST", 6165)) - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("GRANTEE_HOST", 6165)) - cols = append(cols, createCol("ROLE_NAME", 6165)) - cols = append(cols, createCol("ROLE_HOST", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) - cols = append(cols, createCol("IS_DEFAULT", 6165)) - cols = append(cols, createCol("IS_MANDATORY", 6165)) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ADMINISTRABLE_ROLE_AUTHORIZATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("USER", 6165)) - cols = append(cols, createCol("HOST", 6165)) - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("GRANTEE_HOST", 6165)) - cols = append(cols, createCol("ROLE_NAME", 6165)) - cols = append(cols, createCol("ROLE_HOST", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) - cols = append(cols, createCol("IS_DEFAULT", 6165)) - cols = append(cols, createCol("IS_MANDATORY", 6165)) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["APPLICABLE_ROLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("DEFAULT_COLLATE_NAME", 6165)) - cols = append(cols, createCol("DESCRIPTION", 6165)) - cols = append(cols, createCol("MAXLEN", 776)) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "MAXLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["CHARACTER_SETS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("CHECK_CLAUSE", 6163)) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHECK_CLAUSE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["CHECK_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["COLLATION_CHARACTER_SET_APPLICABILITY"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("ID", 778)) - cols = append(cols, createCol("IS_DEFAULT", 6165)) - cols = append(cols, createCol("IS_COMPILED", 6165)) - cols = append(cols, createCol("SORTLEN", 776)) - cols = append(cols, createCol("PAD_ATTRIBUTE", 2074)) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SORTLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAD_ATTRIBUTE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'PAD SPACE','NO PAD'")) infSchema["COLLATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["COLUMN_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SCHEMA_NAME", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("HISTOGRAM", 2078)) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "HISTOGRAM", 2078, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["COLUMN_STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("ORDINAL_POSITION", 776)) - cols = append(cols, createCol("COLUMN_DEFAULT", 6163)) - cols = append(cols, createCol("IS_NULLABLE", 6165)) - cols = append(cols, createCol("DATA_TYPE", 6163)) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265)) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265)) - cols = append(cols, createCol("NUMERIC_PRECISION", 778)) - cols = append(cols, createCol("NUMERIC_SCALE", 778)) - cols = append(cols, createCol("DATETIME_PRECISION", 776)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("COLUMN_TYPE", 6163)) - cols = append(cols, createCol("COLUMN_KEY", 2074)) - cols = append(cols, createCol("EXTRA", 6165)) - cols = append(cols, createCol("PRIVILEGES", 6165)) - cols = append(cols, createCol("COLUMN_COMMENT", 6163)) - cols = append(cols, createCol("GENERATION_EXPRESSION", 6163)) - cols = append(cols, createCol("SRS_ID", 776)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_KEY", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'','PRI','UNI','MUL'")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "PRIVILEGES", 6165, "utf8mb3_general_ci", "", 154, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078)) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["COLUMNS_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ROLE_NAME", 6165)) - cols = append(cols, createCol("ROLE_HOST", 6165)) - cols = append(cols, createCol("IS_DEFAULT", 6165)) - cols = append(cols, createCol("IS_MANDATORY", 6165)) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ENABLED_ROLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ENGINE", 6165)) - cols = append(cols, createCol("SUPPORT", 6165)) - cols = append(cols, createCol("COMMENT", 6165)) - cols = append(cols, createCol("TRANSACTIONS", 6165)) - cols = append(cols, createCol("XA", 6165)) - cols = append(cols, createCol("SAVEPOINTS", 6165)) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["ENGINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("EVENT_CATALOG", 6165)) - cols = append(cols, createCol("EVENT_SCHEMA", 6165)) - cols = append(cols, createCol("EVENT_NAME", 6165)) - cols = append(cols, createCol("DEFINER", 6165)) - cols = append(cols, createCol("TIME_ZONE", 6165)) - cols = append(cols, createCol("EVENT_BODY", 6165)) - cols = append(cols, createCol("EVENT_DEFINITION", 6163)) - cols = append(cols, createCol("EVENT_TYPE", 6165)) - cols = append(cols, createCol("EXECUTE_AT", 2064)) - cols = append(cols, createCol("INTERVAL_VALUE", 6165)) - cols = append(cols, createCol("INTERVAL_FIELD", 2074)) - cols = append(cols, createCol("SQL_MODE", 2075)) - cols = append(cols, createCol("STARTS", 2064)) - cols = append(cols, createCol("ENDS", 2064)) - cols = append(cols, createCol("STATUS", 2074)) - cols = append(cols, createCol("ON_COMPLETION", 6165)) - cols = append(cols, createCol("CREATED", 2061)) - cols = append(cols, createCol("LAST_ALTERED", 2061)) - cols = append(cols, createCol("LAST_EXECUTED", 2064)) - cols = append(cols, createCol("EVENT_COMMENT", 6165)) - cols = append(cols, createCol("ORIGINATOR", 776)) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165)) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165)) - cols = append(cols, createCol("DATABASE_COLLATION", 6165)) + cols = append(cols, createCol(parser, "EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_BODY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_FIELD", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND'")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'ENABLED','DISABLED','SLAVESIDE_DISABLED'")) + cols = append(cols, createCol(parser, "ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "ORIGINATOR", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["EVENTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("FILE_ID", 265)) - cols = append(cols, createCol("FILE_NAME", 6163)) - cols = append(cols, createCol("FILE_TYPE", 6165)) - cols = append(cols, createCol("TABLESPACE_NAME", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6167)) - cols = append(cols, createCol("TABLE_SCHEMA", 10264)) - cols = append(cols, createCol("TABLE_NAME", 10264)) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165)) - cols = append(cols, createCol("LOGFILE_GROUP_NUMBER", 265)) - cols = append(cols, createCol("ENGINE", 6165)) - cols = append(cols, createCol("FULLTEXT_KEYS", 10264)) - cols = append(cols, createCol("DELETED_ROWS", 10264)) - cols = append(cols, createCol("UPDATE_COUNT", 10264)) - cols = append(cols, createCol("FREE_EXTENTS", 265)) - cols = append(cols, createCol("TOTAL_EXTENTS", 265)) - cols = append(cols, createCol("EXTENT_SIZE", 265)) - cols = append(cols, createCol("INITIAL_SIZE", 265)) - cols = append(cols, createCol("MAXIMUM_SIZE", 265)) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265)) - cols = append(cols, createCol("CREATION_TIME", 10264)) - cols = append(cols, createCol("LAST_UPDATE_TIME", 10264)) - cols = append(cols, createCol("LAST_ACCESS_TIME", 10264)) - cols = append(cols, createCol("RECOVER_TIME", 10264)) - cols = append(cols, createCol("TRANSACTION_COUNTER", 10264)) - cols = append(cols, createCol("VERSION", 265)) - cols = append(cols, createCol("ROW_FORMAT", 6165)) - cols = append(cols, createCol("TABLE_ROWS", 10264)) - cols = append(cols, createCol("AVG_ROW_LENGTH", 10264)) - cols = append(cols, createCol("DATA_LENGTH", 10264)) - cols = append(cols, createCol("MAX_DATA_LENGTH", 10264)) - cols = append(cols, createCol("INDEX_LENGTH", 10264)) - cols = append(cols, createCol("DATA_FREE", 265)) - cols = append(cols, createCol("CREATE_TIME", 10264)) - cols = append(cols, createCol("UPDATE_TIME", 10264)) - cols = append(cols, createCol("CHECK_TIME", 10264)) - cols = append(cols, createCol("CHECKSUM", 10264)) - cols = append(cols, createCol("STATUS", 6165)) - cols = append(cols, createCol("EXTRA", 6165)) + cols = append(cols, createCol(parser, "FILE_ID", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_TYPE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6167, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "FULLTEXT_KEYS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DELETED_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_COUNT", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATION_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_ACCESS_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "RECOVER_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TRANSACTION_COUNTER", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) infSchema["FILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778)) - cols = append(cols, createCol("BLOCK_ID", 778)) - cols = append(cols, createCol("SPACE", 778)) - cols = append(cols, createCol("PAGE_NUMBER", 778)) - cols = append(cols, createCol("PAGE_TYPE", 6165)) - cols = append(cols, createCol("FLUSH_TYPE", 778)) - cols = append(cols, createCol("FIX_COUNT", 778)) - cols = append(cols, createCol("IS_HASHED", 6165)) - cols = append(cols, createCol("NEWEST_MODIFICATION", 778)) - cols = append(cols, createCol("OLDEST_MODIFICATION", 778)) - cols = append(cols, createCol("ACCESS_TIME", 778)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("INDEX_NAME", 6165)) - cols = append(cols, createCol("NUMBER_RECORDS", 778)) - cols = append(cols, createCol("DATA_SIZE", 778)) - cols = append(cols, createCol("COMPRESSED_SIZE", 778)) - cols = append(cols, createCol("PAGE_STATE", 6165)) - cols = append(cols, createCol("IO_FIX", 6165)) - cols = append(cols, createCol("IS_OLD", 6165)) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 778)) - cols = append(cols, createCol("IS_STALE", 6165)) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "BLOCK_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_STALE", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["INNODB_BUFFER_PAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778)) - cols = append(cols, createCol("LRU_POSITION", 778)) - cols = append(cols, createCol("SPACE", 778)) - cols = append(cols, createCol("PAGE_NUMBER", 778)) - cols = append(cols, createCol("PAGE_TYPE", 6165)) - cols = append(cols, createCol("FLUSH_TYPE", 778)) - cols = append(cols, createCol("FIX_COUNT", 778)) - cols = append(cols, createCol("IS_HASHED", 6165)) - cols = append(cols, createCol("NEWEST_MODIFICATION", 778)) - cols = append(cols, createCol("OLDEST_MODIFICATION", 778)) - cols = append(cols, createCol("ACCESS_TIME", 778)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("INDEX_NAME", 6165)) - cols = append(cols, createCol("NUMBER_RECORDS", 778)) - cols = append(cols, createCol("DATA_SIZE", 778)) - cols = append(cols, createCol("COMPRESSED_SIZE", 778)) - cols = append(cols, createCol("COMPRESSED", 6165)) - cols = append(cols, createCol("IO_FIX", 6165)) - cols = append(cols, createCol("IS_OLD", 6165)) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 778)) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_BUFFER_PAGE_LRU"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778)) - cols = append(cols, createCol("POOL_SIZE", 778)) - cols = append(cols, createCol("FREE_BUFFERS", 778)) - cols = append(cols, createCol("DATABASE_PAGES", 778)) - cols = append(cols, createCol("OLD_DATABASE_PAGES", 778)) - cols = append(cols, createCol("MODIFIED_DATABASE_PAGES", 778)) - cols = append(cols, createCol("PENDING_DECOMPRESS", 778)) - cols = append(cols, createCol("PENDING_READS", 778)) - cols = append(cols, createCol("PENDING_FLUSH_LRU", 778)) - cols = append(cols, createCol("PENDING_FLUSH_LIST", 778)) - cols = append(cols, createCol("PAGES_MADE_YOUNG", 778)) - cols = append(cols, createCol("PAGES_NOT_MADE_YOUNG", 778)) - cols = append(cols, createCol("PAGES_MADE_YOUNG_RATE", 1035)) - cols = append(cols, createCol("PAGES_MADE_NOT_YOUNG_RATE", 1035)) - cols = append(cols, createCol("NUMBER_PAGES_READ", 778)) - cols = append(cols, createCol("NUMBER_PAGES_CREATED", 778)) - cols = append(cols, createCol("NUMBER_PAGES_WRITTEN", 778)) - cols = append(cols, createCol("PAGES_READ_RATE", 1035)) - cols = append(cols, createCol("PAGES_CREATE_RATE", 1035)) - cols = append(cols, createCol("PAGES_WRITTEN_RATE", 1035)) - cols = append(cols, createCol("NUMBER_PAGES_GET", 778)) - cols = append(cols, createCol("HIT_RATE", 778)) - cols = append(cols, createCol("YOUNG_MAKE_PER_THOUSAND_GETS", 778)) - cols = append(cols, createCol("NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 778)) - cols = append(cols, createCol("NUMBER_PAGES_READ_AHEAD", 778)) - cols = append(cols, createCol("NUMBER_READ_AHEAD_EVICTED", 778)) - cols = append(cols, createCol("READ_AHEAD_RATE", 1035)) - cols = append(cols, createCol("READ_AHEAD_EVICTED_RATE", 1035)) - cols = append(cols, createCol("LRU_IO_TOTAL", 778)) - cols = append(cols, createCol("LRU_IO_CURRENT", 778)) - cols = append(cols, createCol("UNCOMPRESS_TOTAL", 778)) - cols = append(cols, createCol("UNCOMPRESS_CURRENT", 778)) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FREE_BUFFERS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLD_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_DECOMPRESS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_READS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LRU", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LIST", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_NOT_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_NOT_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_CREATED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_WRITTEN", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_READ_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_CREATE_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_WRITTEN_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_GET", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "HIT_RATE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ_AHEAD", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_READ_AHEAD_EVICTED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_EVICTED_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_BUFFER_POOL_STATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE_ID", 776)) - cols = append(cols, createCol("INDEX_ID", 778)) - cols = append(cols, createCol("N_CACHED_PAGES", 778)) + cols = append(cols, createCol(parser, "SPACE_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_CACHED_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CACHED_INDEXES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263)) - cols = append(cols, createCol("compress_ops", 263)) - cols = append(cols, createCol("compress_ops_ok", 263)) - cols = append(cols, createCol("compress_time", 263)) - cols = append(cols, createCol("uncompress_ops", 263)) - cols = append(cols, createCol("uncompress_time", 263)) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165)) - cols = append(cols, createCol("table_name", 6165)) - cols = append(cols, createCol("index_name", 6165)) - cols = append(cols, createCol("compress_ops", 263)) - cols = append(cols, createCol("compress_ops_ok", 263)) - cols = append(cols, createCol("compress_time", 263)) - cols = append(cols, createCol("uncompress_ops", 263)) - cols = append(cols, createCol("uncompress_time", 263)) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165)) - cols = append(cols, createCol("table_name", 6165)) - cols = append(cols, createCol("index_name", 6165)) - cols = append(cols, createCol("compress_ops", 263)) - cols = append(cols, createCol("compress_ops_ok", 263)) - cols = append(cols, createCol("compress_time", 263)) - cols = append(cols, createCol("uncompress_ops", 263)) - cols = append(cols, createCol("uncompress_time", 263)) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263)) - cols = append(cols, createCol("compress_ops", 263)) - cols = append(cols, createCol("compress_ops_ok", 263)) - cols = append(cols, createCol("compress_time", 263)) - cols = append(cols, createCol("uncompress_ops", 263)) - cols = append(cols, createCol("uncompress_time", 263)) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263)) - cols = append(cols, createCol("buffer_pool_instance", 263)) - cols = append(cols, createCol("pages_used", 263)) - cols = append(cols, createCol("pages_free", 263)) - cols = append(cols, createCol("relocation_ops", 265)) - cols = append(cols, createCol("relocation_time", 263)) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMPMEM"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263)) - cols = append(cols, createCol("buffer_pool_instance", 263)) - cols = append(cols, createCol("pages_used", 263)) - cols = append(cols, createCol("pages_free", 263)) - cols = append(cols, createCol("relocation_ops", 265)) - cols = append(cols, createCol("relocation_time", 263)) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMPMEM_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("POS", 778)) - cols = append(cols, createCol("MTYPE", 263)) - cols = append(cols, createCol("PRTYPE", 263)) - cols = append(cols, createCol("LEN", 263)) - cols = append(cols, createCol("HAS_DEFAULT", 263)) - cols = append(cols, createCol("DEFAULT_VALUE", 6163)) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PRTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LEN", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "HAS_DEFAULT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_VALUE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["INNODB_COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 10262)) - cols = append(cols, createCol("PATH", 6165)) + cols = append(cols, createCol(parser, "SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) infSchema["INNODB_DATAFILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("INDEX_ID", 10262)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("POS", 778)) + cols = append(cols, createCol(parser, "INDEX_ID", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) infSchema["INNODB_FIELDS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 6165)) - cols = append(cols, createCol("FOR_NAME", 6165)) - cols = append(cols, createCol("REF_NAME", 6165)) - cols = append(cols, createCol("N_COLS", 265)) - cols = append(cols, createCol("TYPE", 778)) + cols = append(cols, createCol(parser, "ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "FOR_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "REF_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 265, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) infSchema["INNODB_FOREIGN"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 6165)) - cols = append(cols, createCol("FOR_COL_NAME", 6165)) - cols = append(cols, createCol("REF_COL_NAME", 6165)) - cols = append(cols, createCol("POS", 776)) + cols = append(cols, createCol(parser, "ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "FOR_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REF_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FOREIGN_COLS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 778)) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_BEING_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("KEY", 6165)) - cols = append(cols, createCol("VALUE", 6165)) + cols = append(cols, createCol(parser, "KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_FT_CONFIG"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("value", 6165)) + cols = append(cols, createCol(parser, "value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) infSchema["INNODB_FT_DEFAULT_STOPWORD"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 778)) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165)) - cols = append(cols, createCol("FIRST_DOC_ID", 778)) - cols = append(cols, createCol("LAST_DOC_ID", 778)) - cols = append(cols, createCol("DOC_COUNT", 778)) - cols = append(cols, createCol("DOC_ID", 778)) - cols = append(cols, createCol("POSITION", 778)) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_INDEX_CACHE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165)) - cols = append(cols, createCol("FIRST_DOC_ID", 778)) - cols = append(cols, createCol("LAST_DOC_ID", 778)) - cols = append(cols, createCol("DOC_COUNT", 778)) - cols = append(cols, createCol("DOC_ID", 778)) - cols = append(cols, createCol("POSITION", 778)) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_INDEX_TABLE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("INDEX_ID", 778)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("TABLE_ID", 778)) - cols = append(cols, createCol("TYPE", 263)) - cols = append(cols, createCol("N_FIELDS", 263)) - cols = append(cols, createCol("PAGE_NO", 263)) - cols = append(cols, createCol("SPACE", 263)) - cols = append(cols, createCol("MERGE_THRESHOLD", 263)) + cols = append(cols, createCol(parser, "INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_FIELDS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NO", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MERGE_THRESHOLD", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_INDEXES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("SUBSYSTEM", 6165)) - cols = append(cols, createCol("COUNT", 265)) - cols = append(cols, createCol("MAX_COUNT", 265)) - cols = append(cols, createCol("MIN_COUNT", 265)) - cols = append(cols, createCol("AVG_COUNT", 1035)) - cols = append(cols, createCol("COUNT_RESET", 265)) - cols = append(cols, createCol("MAX_COUNT_RESET", 265)) - cols = append(cols, createCol("MIN_COUNT_RESET", 265)) - cols = append(cols, createCol("AVG_COUNT_RESET", 1035)) - cols = append(cols, createCol("TIME_ENABLED", 2064)) - cols = append(cols, createCol("TIME_DISABLED", 2064)) - cols = append(cols, createCol("TIME_ELAPSED", 265)) - cols = append(cols, createCol("TIME_RESET", 2064)) - cols = append(cols, createCol("STATUS", 6165)) - cols = append(cols, createCol("TYPE", 6165)) - cols = append(cols, createCol("COMMENT", 6165)) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COUNT", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT_RESET", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_METRICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 776)) - cols = append(cols, createCol("SPACE", 776)) - cols = append(cols, createCol("PATH", 6165)) - cols = append(cols, createCol("SIZE", 778)) - cols = append(cols, createCol("STATE", 6165)) - cols = append(cols, createCol("PURPOSE", 6165)) + cols = append(cols, createCol(parser, "ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 4001, 0, true, "")) + cols = append(cols, createCol(parser, "SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "PURPOSE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) infSchema["INNODB_SESSION_TEMP_TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("FLAG", 263)) - cols = append(cols, createCol("N_COLS", 263)) - cols = append(cols, createCol("SPACE", 265)) - cols = append(cols, createCol("ROW_FORMAT", 6165)) - cols = append(cols, createCol("ZIP_PAGE_SIZE", 776)) - cols = append(cols, createCol("SPACE_TYPE", 6165)) - cols = append(cols, createCol("INSTANT_COLS", 263)) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "INSTANT_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TOTAL_ROW_VERSIONS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 776)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("FLAG", 776)) - cols = append(cols, createCol("ROW_FORMAT", 6165)) - cols = append(cols, createCol("PAGE_SIZE", 776)) - cols = append(cols, createCol("ZIP_PAGE_SIZE", 776)) - cols = append(cols, createCol("SPACE_TYPE", 6165)) - cols = append(cols, createCol("FS_BLOCK_SIZE", 776)) - cols = append(cols, createCol("FILE_SIZE", 778)) - cols = append(cols, createCol("ALLOCATED_SIZE", 778)) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 778)) - cols = append(cols, createCol("SERVER_VERSION", 6165)) - cols = append(cols, createCol("SPACE_VERSION", 776)) - cols = append(cols, createCol("ENCRYPTION", 6165)) - cols = append(cols, createCol("STATE", 6165)) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 22, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "FS_BLOCK_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FILE_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ALLOCATED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SERVER_VERSION", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "SPACE_VERSION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ENCRYPTION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) infSchema["INNODB_TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 10262)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("PATH", 6165)) - cols = append(cols, createCol("FLAG", 10262)) - cols = append(cols, createCol("SPACE_TYPE", 6165)) + cols = append(cols, createCol(parser, "SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) infSchema["INNODB_TABLESPACES_BRIEF"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("STATS_INITIALIZED", 6165)) - cols = append(cols, createCol("NUM_ROWS", 778)) - cols = append(cols, createCol("CLUST_INDEX_SIZE", 778)) - cols = append(cols, createCol("OTHER_INDEX_SIZE", 778)) - cols = append(cols, createCol("MODIFIED_COUNTER", 778)) - cols = append(cols, createCol("AUTOINC", 778)) - cols = append(cols, createCol("REF_COUNT", 263)) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "STATS_INITIALIZED", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "NUM_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CLUST_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OTHER_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_COUNTER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "AUTOINC", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "REF_COUNT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TABLESTATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778)) - cols = append(cols, createCol("NAME", 6165)) - cols = append(cols, createCol("N_COLS", 776)) - cols = append(cols, createCol("SPACE", 776)) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TEMP_TABLE_INFO"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("trx_id", 778)) - cols = append(cols, createCol("trx_state", 6165)) - cols = append(cols, createCol("trx_started", 2064)) - cols = append(cols, createCol("trx_requested_lock_id", 6165)) - cols = append(cols, createCol("trx_wait_started", 2064)) - cols = append(cols, createCol("trx_weight", 778)) - cols = append(cols, createCol("trx_mysql_thread_id", 778)) - cols = append(cols, createCol("trx_query", 6165)) - cols = append(cols, createCol("trx_operation_state", 6165)) - cols = append(cols, createCol("trx_tables_in_use", 778)) - cols = append(cols, createCol("trx_tables_locked", 778)) - cols = append(cols, createCol("trx_lock_structs", 778)) - cols = append(cols, createCol("trx_lock_memory_bytes", 778)) - cols = append(cols, createCol("trx_rows_locked", 778)) - cols = append(cols, createCol("trx_rows_modified", 778)) - cols = append(cols, createCol("trx_concurrency_tickets", 778)) - cols = append(cols, createCol("trx_isolation_level", 6165)) - cols = append(cols, createCol("trx_unique_checks", 263)) - cols = append(cols, createCol("trx_foreign_key_checks", 263)) - cols = append(cols, createCol("trx_last_foreign_key_error", 6165)) - cols = append(cols, createCol("trx_adaptive_hash_latched", 263)) - cols = append(cols, createCol("trx_adaptive_hash_timeout", 778)) - cols = append(cols, createCol("trx_is_read_only", 263)) - cols = append(cols, createCol("trx_autocommit_non_locking", 263)) - cols = append(cols, createCol("trx_schedule_weight", 778)) + cols = append(cols, createCol(parser, "trx_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) + cols = append(cols, createCol(parser, "trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 105, 0, false, "")) + cols = append(cols, createCol(parser, "trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "trx_weight", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_mysql_thread_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "trx_tables_in_use", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_tables_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_structs", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_memory_bytes", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_modified", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_concurrency_tickets", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "trx_unique_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_foreign_key_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_timeout", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_is_read_only", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_schedule_weight", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["INNODB_TRX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778)) - cols = append(cols, createCol("POS", 776)) - cols = append(cols, createCol("BASE_POS", 776)) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "BASE_POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_VIRTUAL"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("ORDINAL_POSITION", 776)) - cols = append(cols, createCol("POSITION_IN_UNIQUE_CONSTRAINT", 776)) - cols = append(cols, createCol("REFERENCED_TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165)) - cols = append(cols, createCol("REFERENCED_COLUMN_NAME", 6165)) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 776, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION_IN_UNIQUE_CONSTRAINT", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["KEY_COLUMN_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165)) - cols = append(cols, createCol("RESERVED", 263)) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 128, 0, false, "")) + cols = append(cols, createCol(parser, "RESERVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["KEYWORDS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY", 6165)) - cols = append(cols, createCol("TRACE", 6165)) - cols = append(cols, createCol("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263)) - cols = append(cols, createCol("INSUFFICIENT_PRIVILEGES", 257)) + cols = append(cols, createCol(parser, "QUERY", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) + cols = append(cols, createCol(parser, "TRACE", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) + cols = append(cols, createCol(parser, "MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) infSchema["OPTIMIZER_TRACE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165)) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165)) - cols = append(cols, createCol("SPECIFIC_NAME", 6165)) - cols = append(cols, createCol("ORDINAL_POSITION", 778)) - cols = append(cols, createCol("PARAMETER_MODE", 6165)) - cols = append(cols, createCol("PARAMETER_NAME", 6165)) - cols = append(cols, createCol("DATA_TYPE", 6163)) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265)) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265)) - cols = append(cols, createCol("NUMERIC_PRECISION", 776)) - cols = append(cols, createCol("NUMERIC_SCALE", 265)) - cols = append(cols, createCol("DATETIME_PRECISION", 776)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163)) - cols = append(cols, createCol("ROUTINE_TYPE", 2074)) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) infSchema["PARAMETERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("PARTITION_NAME", 6165)) - cols = append(cols, createCol("SUBPARTITION_NAME", 6165)) - cols = append(cols, createCol("PARTITION_ORDINAL_POSITION", 776)) - cols = append(cols, createCol("SUBPARTITION_ORDINAL_POSITION", 776)) - cols = append(cols, createCol("PARTITION_METHOD", 6165)) - cols = append(cols, createCol("SUBPARTITION_METHOD", 6165)) - cols = append(cols, createCol("PARTITION_EXPRESSION", 6165)) - cols = append(cols, createCol("SUBPARTITION_EXPRESSION", 6165)) - cols = append(cols, createCol("PARTITION_DESCRIPTION", 6163)) - cols = append(cols, createCol("TABLE_ROWS", 778)) - cols = append(cols, createCol("AVG_ROW_LENGTH", 778)) - cols = append(cols, createCol("DATA_LENGTH", 778)) - cols = append(cols, createCol("MAX_DATA_LENGTH", 778)) - cols = append(cols, createCol("INDEX_LENGTH", 778)) - cols = append(cols, createCol("DATA_FREE", 778)) - cols = append(cols, createCol("CREATE_TIME", 2061)) - cols = append(cols, createCol("UPDATE_TIME", 2064)) - cols = append(cols, createCol("CHECK_TIME", 2064)) - cols = append(cols, createCol("CHECKSUM", 265)) - cols = append(cols, createCol("PARTITION_COMMENT", 6163)) - cols = append(cols, createCol("NODEGROUP", 6165)) - cols = append(cols, createCol("TABLESPACE_NAME", 6165)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NODEGROUP", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, false, "")) infSchema["PARTITIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("PLUGIN_NAME", 6165)) - cols = append(cols, createCol("PLUGIN_VERSION", 6165)) - cols = append(cols, createCol("PLUGIN_STATUS", 6165)) - cols = append(cols, createCol("PLUGIN_TYPE", 6165)) - cols = append(cols, createCol("PLUGIN_TYPE_VERSION", 6165)) - cols = append(cols, createCol("PLUGIN_LIBRARY", 6165)) - cols = append(cols, createCol("PLUGIN_LIBRARY_VERSION", 6165)) - cols = append(cols, createCol("PLUGIN_AUTHOR", 6165)) - cols = append(cols, createCol("PLUGIN_DESCRIPTION", 6165)) - cols = append(cols, createCol("PLUGIN_LICENSE", 6165)) - cols = append(cols, createCol("LOAD_OPTION", 6165)) + cols = append(cols, createCol(parser, "PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_DESCRIPTION", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["PLUGINS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 778)) - cols = append(cols, createCol("USER", 6165)) - cols = append(cols, createCol("HOST", 6165)) - cols = append(cols, createCol("DB", 6165)) - cols = append(cols, createCol("COMMAND", 6165)) - cols = append(cols, createCol("TIME", 263)) - cols = append(cols, createCol("STATE", 6165)) - cols = append(cols, createCol("INFO", 6165)) + cols = append(cols, createCol(parser, "ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 261, 0, true, "")) + cols = append(cols, createCol(parser, "DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "TIME", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INFO", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) infSchema["PROCESSLIST"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY_ID", 263)) - cols = append(cols, createCol("SEQ", 263)) - cols = append(cols, createCol("STATE", 6165)) - cols = append(cols, createCol("DURATION", 18)) - cols = append(cols, createCol("CPU_USER", 18)) - cols = append(cols, createCol("CPU_SYSTEM", 18)) - cols = append(cols, createCol("CONTEXT_VOLUNTARY", 263)) - cols = append(cols, createCol("CONTEXT_INVOLUNTARY", 263)) - cols = append(cols, createCol("BLOCK_OPS_IN", 263)) - cols = append(cols, createCol("BLOCK_OPS_OUT", 263)) - cols = append(cols, createCol("MESSAGES_SENT", 263)) - cols = append(cols, createCol("MESSAGES_RECEIVED", 263)) - cols = append(cols, createCol("PAGE_FAULTS_MAJOR", 263)) - cols = append(cols, createCol("PAGE_FAULTS_MINOR", 263)) - cols = append(cols, createCol("SWAPS", 263)) - cols = append(cols, createCol("SOURCE_FUNCTION", 6165)) - cols = append(cols, createCol("SOURCE_FILE", 6165)) - cols = append(cols, createCol("SOURCE_LINE", 263)) + cols = append(cols, createCol(parser, "QUERY_ID", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "DURATION", 18, "utf8mb3_general_ci", "", 905, 0, true, "")) + cols = append(cols, createCol(parser, "CPU_USER", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) + cols = append(cols, createCol(parser, "CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SWAPS", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_LINE", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["PROFILING"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("MATCH_OPTION", 2074)) - cols = append(cols, createCol("UPDATE_RULE", 2074)) - cols = append(cols, createCol("DELETE_RULE", 2074)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165)) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "MATCH_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NONE','PARTIAL','FULL'")) + cols = append(cols, createCol(parser, "UPDATE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) + cols = append(cols, createCol(parser, "DELETE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["REFERENTIAL_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("RESOURCE_GROUP_NAME", 6165)) - cols = append(cols, createCol("RESOURCE_GROUP_TYPE", 2074)) - cols = append(cols, createCol("RESOURCE_GROUP_ENABLED", 257)) - cols = append(cols, createCol("VCPU_IDS", 10260)) - cols = append(cols, createCol("THREAD_PRIORITY", 263)) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'SYSTEM','USER'")) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_ENABLED", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) + cols = append(cols, createCol(parser, "VCPU_IDS", 10260, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "THREAD_PRIORITY", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["RESOURCE_GROUPS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165)) - cols = append(cols, createCol("GRANTOR_HOST", 6165)) - cols = append(cols, createCol("GRANTEE", 6167)) - cols = append(cols, createCol("GRANTEE_HOST", 6167)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6167)) - cols = append(cols, createCol("TABLE_NAME", 6167)) - cols = append(cols, createCol("COLUMN_NAME", 6167)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','References'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_COLUMN_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165)) - cols = append(cols, createCol("GRANTOR_HOST", 6165)) - cols = append(cols, createCol("GRANTEE", 6167)) - cols = append(cols, createCol("GRANTEE_HOST", 6167)) - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165)) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6167)) - cols = append(cols, createCol("SPECIFIC_NAME", 6167)) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165)) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6167)) - cols = append(cols, createCol("ROUTINE_NAME", 6167)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Execute','Alter Routine','Grant'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_ROUTINE_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165)) - cols = append(cols, createCol("GRANTOR_HOST", 6165)) - cols = append(cols, createCol("GRANTEE", 6167)) - cols = append(cols, createCol("GRANTEE_HOST", 6167)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6167)) - cols = append(cols, createCol("TABLE_NAME", 6167)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_TABLE_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_NAME", 6165)) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165)) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6165)) - cols = append(cols, createCol("ROUTINE_NAME", 6165)) - cols = append(cols, createCol("ROUTINE_TYPE", 2074)) - cols = append(cols, createCol("DATA_TYPE", 6163)) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265)) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265)) - cols = append(cols, createCol("NUMERIC_PRECISION", 776)) - cols = append(cols, createCol("NUMERIC_SCALE", 776)) - cols = append(cols, createCol("DATETIME_PRECISION", 776)) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("COLLATION_NAME", 6165)) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163)) - cols = append(cols, createCol("ROUTINE_BODY", 6165)) - cols = append(cols, createCol("ROUTINE_DEFINITION", 6163)) - cols = append(cols, createCol("EXTERNAL_NAME", 10264)) - cols = append(cols, createCol("EXTERNAL_LANGUAGE", 6165)) - cols = append(cols, createCol("PARAMETER_STYLE", 6165)) - cols = append(cols, createCol("IS_DETERMINISTIC", 6165)) - cols = append(cols, createCol("SQL_DATA_ACCESS", 2074)) - cols = append(cols, createCol("SQL_PATH", 10264)) - cols = append(cols, createCol("SECURITY_TYPE", 2074)) - cols = append(cols, createCol("CREATED", 2061)) - cols = append(cols, createCol("LAST_ALTERED", 2061)) - cols = append(cols, createCol("SQL_MODE", 2075)) - cols = append(cols, createCol("ROUTINE_COMMENT", 6163)) - cols = append(cols, createCol("DEFINER", 6165)) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165)) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165)) - cols = append(cols, createCol("DATABASE_COLLATION", 6165)) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "SQL", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_DATA_ACCESS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'CONTAINS SQL','NO SQL','READS SQL DATA','MODIFIES SQL DATA'")) + cols = append(cols, createCol(parser, "SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'DEFAULT','INVOKER','DEFINER'")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["ROUTINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["SCHEMA_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165)) - cols = append(cols, createCol("SCHEMA_NAME", 6165)) - cols = append(cols, createCol("DEFAULT_CHARACTER_SET_NAME", 6165)) - cols = append(cols, createCol("DEFAULT_COLLATION_NAME", 6165)) - cols = append(cols, createCol("SQL_PATH", 10264)) - cols = append(cols, createCol("DEFAULT_ENCRYPTION", 2074)) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DEFAULT_ENCRYPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO','YES'")) infSchema["SCHEMATA"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165)) - cols = append(cols, createCol("SCHEMA_NAME", 6165)) - cols = append(cols, createCol("OPTIONS", 6165)) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) infSchema["SCHEMATA_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("SRS_NAME", 6165)) - cols = append(cols, createCol("SRS_ID", 776)) - cols = append(cols, createCol("GEOMETRY_TYPE_NAME", 6163)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "GEOMETRY_TYPE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["ST_GEOMETRY_COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SRS_NAME", 6165)) - cols = append(cols, createCol("SRS_ID", 776)) - cols = append(cols, createCol("ORGANIZATION", 6165)) - cols = append(cols, createCol("ORGANIZATION_COORDSYS_ID", 776)) - cols = append(cols, createCol("DEFINITION", 6165)) - cols = append(cols, createCol("DESCRIPTION", 6165)) + cols = append(cols, createCol(parser, "SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ORGANIZATION", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ORGANIZATION_COORDSYS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DEFINITION", 6165, "utf8mb3_general_ci", "", 4096, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["ST_SPATIAL_REFERENCE_SYSTEMS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("UNIT_NAME", 6165)) - cols = append(cols, createCol("UNIT_TYPE", 6165)) - cols = append(cols, createCol("CONVERSION_FACTOR", 1036)) - cols = append(cols, createCol("DESCRIPTION", 6165)) + cols = append(cols, createCol(parser, "UNIT_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "UNIT_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) + cols = append(cols, createCol(parser, "CONVERSION_FACTOR", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) infSchema["ST_UNITS_OF_MEASURE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("NON_UNIQUE", 263)) - cols = append(cols, createCol("INDEX_SCHEMA", 6165)) - cols = append(cols, createCol("INDEX_NAME", 6165)) - cols = append(cols, createCol("SEQ_IN_INDEX", 776)) - cols = append(cols, createCol("COLUMN_NAME", 6165)) - cols = append(cols, createCol("COLLATION", 6165)) - cols = append(cols, createCol("CARDINALITY", 265)) - cols = append(cols, createCol("SUB_PART", 265)) - cols = append(cols, createCol("PACKED", 10264)) - cols = append(cols, createCol("NULLABLE", 6165)) - cols = append(cols, createCol("INDEX_TYPE", 6165)) - cols = append(cols, createCol("COMMENT", 6165)) - cols = append(cols, createCol("INDEX_COMMENT", 6165)) - cols = append(cols, createCol("IS_VISIBLE", 6165)) - cols = append(cols, createCol("EXPRESSION", 6163)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "NON_UNIQUE", 263, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SEQ_IN_INDEX", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "CARDINALITY", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUB_PART", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PACKED", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "IS_VISIBLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("CONSTRAINT_TYPE", 6165)) - cols = append(cols, createCol("ENFORCED", 6165)) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) + cols = append(cols, createCol(parser, "ENFORCED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165)) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165)) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078)) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078)) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLE_CONSTRAINTS_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("TABLE_TYPE", 2074)) - cols = append(cols, createCol("ENGINE", 6165)) - cols = append(cols, createCol("VERSION", 263)) - cols = append(cols, createCol("ROW_FORMAT", 2074)) - cols = append(cols, createCol("TABLE_ROWS", 778)) - cols = append(cols, createCol("AVG_ROW_LENGTH", 778)) - cols = append(cols, createCol("DATA_LENGTH", 778)) - cols = append(cols, createCol("MAX_DATA_LENGTH", 778)) - cols = append(cols, createCol("INDEX_LENGTH", 778)) - cols = append(cols, createCol("DATA_FREE", 778)) - cols = append(cols, createCol("AUTO_INCREMENT", 778)) - cols = append(cols, createCol("CREATE_TIME", 2061)) - cols = append(cols, createCol("UPDATE_TIME", 2064)) - cols = append(cols, createCol("CHECK_TIME", 2064)) - cols = append(cols, createCol("TABLE_COLLATION", 6165)) - cols = append(cols, createCol("CHECKSUM", 265)) - cols = append(cols, createCol("CREATE_OPTIONS", 6165)) - cols = append(cols, createCol("TABLE_COMMENT", 6163)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BASE TABLE','VIEW','SYSTEM VIEW'")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'Fixed','Dynamic','Compressed','Redundant','Compact','Paged'")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTO_INCREMENT", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078)) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLES_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165)) - cols = append(cols, createCol("ENGINE", 6165)) - cols = append(cols, createCol("TABLESPACE_TYPE", 6165)) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165)) - cols = append(cols, createCol("EXTENT_SIZE", 778)) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 778)) - cols = append(cols, createCol("MAXIMUM_SIZE", 778)) - cols = append(cols, createCol("NODEGROUP_ID", 778)) - cols = append(cols, createCol("TABLESPACE_COMMENT", 6165)) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NODEGROUP_ID", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165)) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078)) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLESPACES_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TRIGGER_CATALOG", 6165)) - cols = append(cols, createCol("TRIGGER_SCHEMA", 6165)) - cols = append(cols, createCol("TRIGGER_NAME", 6165)) - cols = append(cols, createCol("EVENT_MANIPULATION", 2074)) - cols = append(cols, createCol("EVENT_OBJECT_CATALOG", 6165)) - cols = append(cols, createCol("EVENT_OBJECT_SCHEMA", 6165)) - cols = append(cols, createCol("EVENT_OBJECT_TABLE", 6165)) - cols = append(cols, createCol("ACTION_ORDER", 776)) - cols = append(cols, createCol("ACTION_CONDITION", 10264)) - cols = append(cols, createCol("ACTION_STATEMENT", 6163)) - cols = append(cols, createCol("ACTION_ORIENTATION", 6165)) - cols = append(cols, createCol("ACTION_TIMING", 2074)) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_TABLE", 10264)) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_TABLE", 10264)) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_ROW", 6165)) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_ROW", 6165)) - cols = append(cols, createCol("CREATED", 2061)) - cols = append(cols, createCol("SQL_MODE", 2075)) - cols = append(cols, createCol("DEFINER", 6165)) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165)) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165)) - cols = append(cols, createCol("DATABASE_COLLATION", 6165)) + cols = append(cols, createCol(parser, "TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_MANIPULATION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'INSERT','UPDATE','DELETE'")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_ORDER", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_CONDITION", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_TIMING", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BEFORE','AFTER'")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 2, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["TRIGGERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("USER", 6167)) - cols = append(cols, createCol("HOST", 6167)) - cols = append(cols, createCol("ATTRIBUTE", 6163)) + cols = append(cols, createCol(parser, "USER", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "ATTRIBUTE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["USER_ATTRIBUTES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165)) - cols = append(cols, createCol("IS_GRANTABLE", 6165)) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["USER_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165)) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165)) - cols = append(cols, createCol("SPECIFIC_NAME", 6165)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["VIEW_ROUTINE_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VIEW_CATALOG", 6165)) - cols = append(cols, createCol("VIEW_SCHEMA", 6165)) - cols = append(cols, createCol("VIEW_NAME", 6165)) - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) + cols = append(cols, createCol(parser, "VIEW_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["VIEW_TABLE_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165)) - cols = append(cols, createCol("TABLE_SCHEMA", 6165)) - cols = append(cols, createCol("TABLE_NAME", 6165)) - cols = append(cols, createCol("VIEW_DEFINITION", 6163)) - cols = append(cols, createCol("CHECK_OPTION", 2074)) - cols = append(cols, createCol("IS_UPDATABLE", 2074)) - cols = append(cols, createCol("DEFINER", 6165)) - cols = append(cols, createCol("SECURITY_TYPE", 6165)) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165)) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165)) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NONE','LOCAL','CASCADED'")) + cols = append(cols, createCol(parser, "IS_UPDATABLE", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NO','YES'")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["VIEWS"] = cols - return infSchema } @@ -1672,17 +1603,18 @@ type infoSchemaWithColumns struct { infoSchemaData map[string][]vindexes.Column } +// MySQLVersion implements SchemaInformation. + // We cache this information, since these are maps that are not changed var infoSchema57 = getInfoSchema57() var infoSchema80 = getInfoSchema80() // newSchemaInfo returns a SchemaInformation that has the column information for all info_schema tables func newSchemaInfo(inner SchemaInformation) SchemaInformation { - return &infoSchemaWithColumns{inner: inner, infoSchemaData: loadSchemaInfo()} + return &infoSchemaWithColumns{inner: inner, infoSchemaData: loadSchemaInfo(inner.Environment().MySQLVersion())} } -func loadSchemaInfo() map[string][]vindexes.Column { - version := servenv.MySQLServerVersion() +func loadSchemaInfo(version string) map[string][]vindexes.Column { if strings.HasPrefix(version, "5.7") { return infoSchema57 } @@ -1714,10 +1646,22 @@ func (i *infoSchemaWithColumns) ConnCollation() collations.ID { return i.inner.ConnCollation() } +func (i *infoSchemaWithColumns) Environment() *vtenv.Environment { + return i.inner.Environment() +} + func (i *infoSchemaWithColumns) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { return i.inner.ForeignKeyMode(keyspace) } +func (i *infoSchemaWithColumns) GetForeignKeyChecksState() *bool { + return nil +} + func (i *infoSchemaWithColumns) KeyspaceError(keyspace string) error { return i.inner.KeyspaceError(keyspace) } + +func (i *infoSchemaWithColumns) GetAggregateUDFs() []string { + return i.inner.GetAggregateUDFs() +} diff --git a/go/vt/vtgate/semantics/info_schema_gen_test.go b/go/vt/vtgate/semantics/info_schema_gen_test.go index 61241d96653..efa7433dd05 100644 --- a/go/vt/vtgate/semantics/info_schema_gen_test.go +++ b/go/vt/vtgate/semantics/info_schema_gen_test.go @@ -20,12 +20,15 @@ import ( "database/sql" "fmt" "regexp" + "strconv" "strings" "testing" _ "github.com/go-sql-driver/mysql" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" ) @@ -37,11 +40,16 @@ func TestGenerateInfoSchemaMap(t *testing.T) { require.NoError(t, err) defer db.Close() + collationName := collations.MySQL8().LookupName(collations.SystemCollation.Collation) + for _, tbl := range informationSchemaTables80 { - b.WriteString("cols = []vindexes.Column{}\n") result, err := db.Query(fmt.Sprintf("show columns from information_schema.`%s`", tbl)) - require.NoError(t, err) + if err != nil { + t.Logf("error querying table %s: %v", tbl, err) + continue + } defer result.Close() + b.WriteString("cols = []vindexes.Column{}\n") for result.Next() { var r row result.Scan(&r.Field, &r.Type, &r.Null, &r.Key, &r.Default, &r.Extra) @@ -61,7 +69,24 @@ func TestGenerateInfoSchemaMap(t *testing.T) { if int(i2) == 0 { t.Fatalf("%s %s", tbl, r.Field) } - b.WriteString(fmt.Sprintf("cols = append(cols, createCol(\"%s\", %d))\n", r.Field, int(i2))) + var size, scale int64 + var values string + switch i2 { + case sqltypes.Enum, sqltypes.Set: + values = allString[2] + default: + if len(allString) > 1 && allString[2] != "" { + parts := strings.Split(allString[2], ",") + size, err = strconv.ParseInt(parts[0], 10, 32) + require.NoError(t, err) + if len(parts) > 1 { + scale, err = strconv.ParseInt(parts[1], 10, 32) + require.NoError(t, err) + } + } + } + // createCol(name string, typ int, collation string, def string, invisible bool, size, scale int32, notNullable bool) + b.WriteString(fmt.Sprintf("cols = append(cols, createCol(\"%s\", %d, \"%s\", \"%s\", %d, %d, %t, \"%s\"))\n", r.Field, int(i2), collationName, r.Default, size, scale, r.Null == "NO", values)) } b.WriteString(fmt.Sprintf("infSchema[\"%s\"] = cols\n", tbl)) } @@ -85,6 +110,8 @@ var ( "ENGINES", "EVENTS", "FILES", + "GLOBAL_STATUS", + "GLOBAL_VARIABLES", "INNODB_BUFFER_PAGE", "INNODB_BUFFER_PAGE_LRU", "INNODB_BUFFER_POOL_STATS", @@ -158,7 +185,7 @@ type row struct { Type string Null string Key any - Default any + Default string Extra any } diff --git a/go/vt/vtgate/semantics/real_table.go b/go/vt/vtgate/semantics/real_table.go index cf7811f4404..4f1639d0897 100644 --- a/go/vt/vtgate/semantics/real_table.go +++ b/go/vt/vtgate/semantics/real_table.go @@ -20,11 +20,9 @@ import ( "strings" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -33,7 +31,9 @@ type RealTable struct { dbName, tableName string ASTNode *sqlparser.AliasedTableExpr Table *vindexes.Table + VindexHint *sqlparser.IndexHint isInfSchema bool + collationEnv *collations.Environment } var _ TableInfo = (*RealTable)(nil) @@ -41,7 +41,7 @@ var _ TableInfo = (*RealTable)(nil) // dependencies implements the TableInfo interface func (r *RealTable) dependencies(colName string, org originable) (dependencies, error) { ts := org.tableSetFor(r.ASTNode) - for _, info := range r.getColumns() { + for _, info := range r.getColumns(false /* ignoreInvisbleCol */) { if strings.EqualFold(info.Name, colName) { return createCertain(ts, ts, info.Type), nil } @@ -69,12 +69,44 @@ func (r *RealTable) IsInfSchema() bool { } // GetColumns implements the TableInfo interface -func (r *RealTable) getColumns() []ColumnInfo { - return vindexTableToColumnInfo(r.Table) +func (r *RealTable) getColumns(ignoreInvisbleCol bool) []ColumnInfo { + if r.Table == nil { + return nil + } + nameMap := map[string]any{} + cols := make([]ColumnInfo, 0, len(r.Table.Columns)) + for _, col := range r.Table.Columns { + if col.Invisible && ignoreInvisbleCol { + continue + } + cols = append(cols, ColumnInfo{ + Name: col.Name.String(), + Type: col.ToEvalengineType(r.collationEnv), + Invisible: col.Invisible, + }) + nameMap[col.Name.String()] = nil + } + // If table is authoritative, we do not need ColumnVindexes to help in resolving the unqualified columns. + if r.Table.ColumnListAuthoritative { + return cols + } + for _, vindex := range r.Table.ColumnVindexes { + for _, column := range vindex.Columns { + name := column.String() + if _, exists := nameMap[name]; exists { + continue + } + cols = append(cols, ColumnInfo{ + Name: name, + }) + nameMap[name] = nil + } + } + return cols } // GetExpr implements the TableInfo interface -func (r *RealTable) getAliasedTableExpr() *sqlparser.AliasedTableExpr { +func (r *RealTable) GetAliasedTableExpr() *sqlparser.AliasedTableExpr { return r.ASTNode } @@ -103,6 +135,11 @@ func (r *RealTable) GetVindexTable() *vindexes.Table { return r.Table } +// GetVindexHint implements the TableInfo interface +func (r *RealTable) GetVindexHint() *sqlparser.IndexHint { + return r.VindexHint +} + // Name implements the TableInfo interface func (r *RealTable) Name() (sqlparser.TableName, error) { return r.ASTNode.TableName() @@ -117,47 +154,3 @@ func (r *RealTable) authoritative() bool { func (r *RealTable) matches(name sqlparser.TableName) bool { return (name.Qualifier.IsEmpty() || name.Qualifier.String() == r.dbName) && r.tableName == name.Name.String() } - -func vindexTableToColumnInfo(tbl *vindexes.Table) []ColumnInfo { - if tbl == nil { - return nil - } - nameMap := map[string]any{} - cols := make([]ColumnInfo, 0, len(tbl.Columns)) - for _, col := range tbl.Columns { - collation := collations.DefaultCollationForType(col.Type) - if sqltypes.IsText(col.Type) { - coll, found := collations.Local().LookupID(col.CollationName) - if found { - collation = coll - } - } - - cols = append(cols, ColumnInfo{ - Name: col.Name.String(), - Type: evalengine.Type{ - Type: col.Type, - Coll: collation, - }, - Invisible: col.Invisible, - }) - nameMap[col.Name.String()] = nil - } - // If table is authoritative, we do not need ColumnVindexes to help in resolving the unqualified columns. - if tbl.ColumnListAuthoritative { - return cols - } - for _, vindex := range tbl.ColumnVindexes { - for _, column := range vindex.Columns { - name := column.String() - if _, exists := nameMap[name]; exists { - continue - } - cols = append(cols, ColumnInfo{ - Name: name, - }) - nameMap[name] = nil - } - } - return cols -} diff --git a/go/vt/vtgate/semantics/scoper.go b/go/vt/vtgate/semantics/scoper.go index 458e08b1f15..ae3e5b7e88d 100644 --- a/go/vt/vtgate/semantics/scoper.go +++ b/go/vt/vtgate/semantics/scoper.go @@ -37,25 +37,30 @@ type ( // These scopes are only used for rewriting ORDER BY 1 and GROUP BY 1 specialExprScopes map[*sqlparser.Literal]*scope statementIDs map[sqlparser.Statement]TableSet + si SchemaInformation } scope struct { - parent *scope - stmt sqlparser.Statement - tables []TableInfo - isUnion bool - joinUsing map[string]TableSet - stmtScope bool - ctes map[string]*sqlparser.CommonTableExpr + parent *scope + stmt sqlparser.Statement + tables []TableInfo + isUnion bool + joinUsing map[string]TableSet + stmtScope bool + ctes map[string]*sqlparser.CommonTableExpr + inGroupBy bool + inHaving bool + inHavingAggr bool } ) -func newScoper() *scoper { +func newScoper(si SchemaInformation) *scoper { return &scoper{ rScope: map[*sqlparser.Select]*scope{}, wScope: map[*sqlparser.Select]*scope{}, specialExprScopes: map[*sqlparser.Literal]*scope{}, statementIDs: map[sqlparser.Statement]TableSet{}, + si: si, } } @@ -74,11 +79,31 @@ func (s *scoper) down(cursor *sqlparser.Cursor) error { s.copySelectExprs(cursor, node) case sqlparser.OrderBy: return s.addColumnInfoForOrderBy(cursor, node) - case sqlparser.GroupBy: + case *sqlparser.GroupBy: + if node == nil { + break + } return s.addColumnInfoForGroupBy(cursor, node) + case sqlparser.AggrFunc: + if !s.currentScope().inHaving { + break + } + s.currentScope().inHavingAggr = true + case *sqlparser.FuncExpr: + if !s.currentScope().inHaving { + break + } + if node.Name.EqualsAnyString(s.si.GetAggregateUDFs()) { + s.currentScope().inHavingAggr = true + } case *sqlparser.Where: if node.Type == sqlparser.HavingClause { - return s.createSpecialScopePostProjection(cursor.Parent()) + err := s.createSpecialScopePostProjection(cursor.Parent()) + if err != nil { + return err + } + s.currentScope().inHaving = true + return nil } } return nil @@ -92,15 +117,17 @@ func (s *scoper) pushUnionScope(union *sqlparser.Union) { s.push(currScope) } -func (s *scoper) addColumnInfoForGroupBy(cursor *sqlparser.Cursor, node sqlparser.GroupBy) error { +func (s *scoper) addColumnInfoForGroupBy(cursor *sqlparser.Cursor, node *sqlparser.GroupBy) error { err := s.createSpecialScopePostProjection(cursor.Parent()) if err != nil { return err } - for _, expr := range node { + currentScope := s.currentScope() + currentScope.inGroupBy = true + for _, expr := range node.Exprs { lit := keepIntLiteral(expr) if lit != nil { - s.specialExprScopes[lit] = s.currentScope() + s.specialExprScopes[lit] = currentScope } } return nil @@ -197,7 +224,7 @@ func (s *scoper) up(cursor *sqlparser.Cursor) error { if isParentSelectStatement(cursor) { s.popScope() } - case *sqlparser.Select, sqlparser.GroupBy, *sqlparser.Update, *sqlparser.Delete, *sqlparser.Insert, *sqlparser.Union: + case *sqlparser.Select, *sqlparser.GroupBy, *sqlparser.Update, *sqlparser.Insert, *sqlparser.Union, *sqlparser.Delete: id := EmptyTableSet() for _, tableInfo := range s.currentScope().tables { set := tableInfo.getTableSet(s.org) @@ -210,6 +237,8 @@ func (s *scoper) up(cursor *sqlparser.Cursor) error { break } s.popScope() + case sqlparser.AggrFunc: + s.currentScope().inHavingAggr = false case sqlparser.TableExpr: if isParentSelect(cursor) { curScope := s.currentScope() @@ -223,6 +252,12 @@ func (s *scoper) up(cursor *sqlparser.Cursor) error { } } } + if isParentDeleteOrUpdate(cursor) { + usingMap := s.currentScope().prepareUsingMap() + for ts, m := range usingMap { + s.binder.usingJoinInfo[ts] = m + } + } } return nil } @@ -256,6 +291,7 @@ func (s *scoper) createSpecialScopePostProjection(parent sqlparser.SQLNode) erro nScope.stmt = sel tableInfo = createVTableInfoForExpressions(sel.SelectExprs, nil /*needed for star expressions*/, s.org) nScope.tables = append(nScope.tables, tableInfo) + continue } thisTableInfo := createVTableInfoForExpressions(sel.SelectExprs, nil /*needed for star expressions*/, s.org) if len(tableInfo.cols) != len(thisTableInfo.cols) { @@ -265,7 +301,10 @@ func (s *scoper) createSpecialScopePostProjection(parent sqlparser.SQLNode) erro // at this stage, we don't store the actual dependencies, we only store the expressions. // only later will we walk the expression tree and figure out the deps. so, we need to create a // composite expression that contains all the expressions in the SELECTs that this UNION consists of - tableInfo.cols[i] = sqlparser.AndExpressions(col, thisTableInfo.cols[i]) + tableInfo.cols[i] = &sqlparser.AndExpr{ + Left: col, + Right: thisTableInfo.cols[i], + } } } @@ -320,7 +359,7 @@ func checkForInvalidAliasUse(cte *sqlparser.CommonTableExpr, name string) (err e // TODO I'm sure there is a better. way, but we need to do this to stop infinite loops from occurring down := func(node sqlparser.SQLNode, parent sqlparser.SQLNode) bool { tbl, ok := node.(sqlparser.TableName) - if !ok || !tbl.Qualifier.IsEmpty() { + if !ok || tbl.Qualifier.NotEmpty() { return err == nil } if tbl.Name.String() == name { diff --git a/go/vt/vtgate/semantics/semantic_state.go b/go/vt/vtgate/semantics/semantic_state.go index 94b1302b357..f6f62a3eba5 100644 --- a/go/vt/vtgate/semantics/semantic_state.go +++ b/go/vt/vtgate/semantics/semantic_state.go @@ -26,6 +26,7 @@ import ( vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -50,14 +51,14 @@ type ( authoritative() bool // getAliasedTableExpr returns the AST struct behind this table - getAliasedTableExpr() *sqlparser.AliasedTableExpr + GetAliasedTableExpr() *sqlparser.AliasedTableExpr // canShortCut will return nil when the keyspace needs to be checked, // and a true/false if the decision has been made already canShortCut() shortCut // getColumns returns the known column information for this table - getColumns() []ColumnInfo + getColumns(ignoreInvisibleCol bool) []ColumnInfo dependencies(colName string, org originable) (dependencies, error) getExprFor(s string) (sqlparser.Expr, error) @@ -76,10 +77,12 @@ type ( // QuerySignature is used to identify shortcuts in the planning process QuerySignature struct { - Union bool Aggregation bool + DML bool Distinct bool + HashJoin bool SubQueries bool + Union bool } // SemTable contains semantic analysis information about the query. @@ -116,6 +119,9 @@ type ( // It doesn't recurse inside derived tables to find the original dependencies. Direct ExprDependencies + // Targets contains the TableSet of each table getting modified by the update/delete statement. + Targets TableSet + // ColumnEqualities is used for transitive closures (e.g., if a == b and b == c, then a == c). ColumnEqualities map[columnName][]sqlparser.Expr @@ -137,6 +143,8 @@ type ( // The map is keyed by the tableset of the table that each of the foreign key belongs to. childForeignKeysInvolved map[TableSet][]vindexes.ChildFKInfo parentForeignKeysInvolved map[TableSet][]vindexes.ParentFKInfo + childFkToUpdExprs map[string]sqlparser.UpdateExprs + collEnv *collations.Environment } columnName struct { @@ -144,13 +152,16 @@ type ( ColumnName string } - // SchemaInformation is used tp provide table information from Vschema. + // SchemaInformation is used to provide table information from Vschema. SchemaInformation interface { FindTableOrVindex(tablename sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) ConnCollation() collations.ID + Environment() *vtenv.Environment // ForeignKeyMode returns the foreign_key flag value ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) + GetForeignKeyChecksState() *bool KeyspaceError(keyspace string) error + GetAggregateUDFs() []string } shortCut = int @@ -173,11 +184,40 @@ func (st *SemTable) CopyDependencies(from, to sqlparser.Expr) { st.Recursive[to] = st.RecursiveDeps(from) st.Direct[to] = st.DirectDeps(from) if ValidAsMapKey(from) { - st.ExprTypes[to] = st.ExprTypes[from] + if typ, found := st.ExprTypes[from]; found { + st.ExprTypes[to] = typ + } } } } +// GetChildForeignKeysForTargets gets the child foreign keys as a list for all the target tables. +func (st *SemTable) GetChildForeignKeysForTargets() (fks []vindexes.ChildFKInfo) { + for _, ts := range st.Targets.Constituents() { + fks = append(fks, st.childForeignKeysInvolved[ts]...) + } + return fks +} + +// GetChildForeignKeysForTableSet gets the child foreign keys as a listfor the TableSet. +func (st *SemTable) GetChildForeignKeysForTableSet(target TableSet) (fks []vindexes.ChildFKInfo) { + for _, ts := range st.Targets.Constituents() { + if target.IsSolvedBy(ts) { + fks = append(fks, st.childForeignKeysInvolved[ts]...) + } + } + return fks +} + +// GetChildForeignKeysForTable gets the child foreign keys as a list for the specified TableName. +func (st *SemTable) GetChildForeignKeysForTable(tbl sqlparser.TableName) ([]vindexes.ChildFKInfo, error) { + ts, err := st.GetTargetTableSetForTableName(tbl) + if err != nil { + return nil, err + } + return st.childForeignKeysInvolved[ts], nil +} + // GetChildForeignKeysList gets the child foreign keys as a list. func (st *SemTable) GetChildForeignKeysList() []vindexes.ChildFKInfo { var childFkInfos []vindexes.ChildFKInfo @@ -187,6 +227,24 @@ func (st *SemTable) GetChildForeignKeysList() []vindexes.ChildFKInfo { return childFkInfos } +// GetParentForeignKeysForTargets gets the parent foreign keys as a list for all the target tables. +func (st *SemTable) GetParentForeignKeysForTargets() (fks []vindexes.ParentFKInfo) { + for _, ts := range st.Targets.Constituents() { + fks = append(fks, st.parentForeignKeysInvolved[ts]...) + } + return fks +} + +// GetParentForeignKeysForTableSet gets the parent foreign keys as a list for the TableSet. +func (st *SemTable) GetParentForeignKeysForTableSet(target TableSet) (fks []vindexes.ParentFKInfo) { + for _, ts := range st.Targets.Constituents() { + if target.IsSolvedBy(ts) { + fks = append(fks, st.parentForeignKeysInvolved[ts]...) + } + } + return fks +} + // GetParentForeignKeysList gets the parent foreign keys as a list. func (st *SemTable) GetParentForeignKeysList() []vindexes.ParentFKInfo { var parentFkInfos []vindexes.ParentFKInfo @@ -196,6 +254,11 @@ func (st *SemTable) GetParentForeignKeysList() []vindexes.ParentFKInfo { return parentFkInfos } +// GetUpdateExpressionsForFk gets the update expressions for the given serialized foreign key constraint. +func (st *SemTable) GetUpdateExpressionsForFk(foreignKey string) sqlparser.UpdateExprs { + return st.childFkToUpdExprs[foreignKey] +} + // RemoveParentForeignKey removes the given foreign key from the parent foreign keys that sem table stores. func (st *SemTable) RemoveParentForeignKey(fkToIgnore string) error { for ts, fkInfos := range st.parentForeignKeysInvolved { @@ -279,6 +342,89 @@ func (st *SemTable) RemoveNonRequiredForeignKeys(verifyAllFks bool, getAction fu return nil } +// ErrIfFkDependentColumnUpdated checks if a foreign key column that is being updated is dependent on another column which also being updated. +func (st *SemTable) ErrIfFkDependentColumnUpdated(updateExprs sqlparser.UpdateExprs) error { + // Go over all the update expressions + for _, updateExpr := range updateExprs { + deps := st.RecursiveDeps(updateExpr.Name) + if deps.NumberOfTables() != 1 { + return vterrors.VT13001("expected to have single table dependency") + } + // Get all the child and parent foreign keys for the given table that the update expression belongs to. + childFks := st.childForeignKeysInvolved[deps] + parentFKs := st.parentForeignKeysInvolved[deps] + + involvedInFk := false + // Check if this updated column is part of any child or parent foreign key. + for _, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + involvedInFk = true + break + } + } + for _, parentFk := range parentFKs { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + involvedInFk = true + break + } + } + + if !involvedInFk { + continue + } + + // We cannot support updating a foreign key column that is using a column which is also being updated for 2 reasons— + // 1. For the child foreign keys, we aren't sure what the final value of the updated foreign key column will be. So we don't know + // what to cascade to the child. The selection that we do isn't enough to know if the updated value, since one of the columns used in the update is also being updated. + // 2. For the parent foreign keys, we don't know if we need to reject this update. Because we don't know the final updated value, the update might need to be failed, + // but we can't say for certain. + var dependencyUpdatedErr error + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + col, ok := node.(*sqlparser.ColName) + if !ok { + return true, nil + } + // self reference column dependency is not considered a dependent column being updated. + if st.EqualsExpr(updateExpr.Name, col) { + return true, nil + } + for _, updExpr := range updateExprs { + if st.EqualsExpr(updExpr.Name, col) { + dependencyUpdatedErr = vterrors.VT12001(fmt.Sprintf("%v column referenced in foreign key column %v is itself updated", sqlparser.String(col), sqlparser.String(updateExpr.Name))) + return false, nil + } + } + return false, nil + }, updateExpr.Expr) + if dependencyUpdatedErr != nil { + return dependencyUpdatedErr + } + } + return nil +} + +// HasNonLiteralForeignKeyUpdate checks for non-literal updates in expressions linked to a foreign key. +func (st *SemTable) HasNonLiteralForeignKeyUpdate(updExprs sqlparser.UpdateExprs) bool { + for _, updateExpr := range updExprs { + if sqlparser.IsLiteral(updateExpr.Expr) { + continue + } + parentFks := st.parentForeignKeysInvolved[st.RecursiveDeps(updateExpr.Name)] + for _, parentFk := range parentFks { + if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + childFks := st.childForeignKeysInvolved[st.RecursiveDeps(updateExpr.Name)] + for _, childFk := range childFks { + if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 { + return true + } + } + } + return false +} + // isShardScoped checks if the foreign key constraint is shard-scoped or not. It uses the vindex information to make this call. func isShardScoped(pTable *vindexes.Table, cTable *vindexes.Table, pCols sqlparser.Columns, cCols sqlparser.Columns) bool { if !pTable.Keyspace.Sharded { @@ -416,13 +562,14 @@ func EmptySemTable() *SemTable { Direct: map[sqlparser.Expr]TableSet{}, ColumnEqualities: map[columnName][]sqlparser.Expr{}, columns: map[*sqlparser.Union]sqlparser.SelectExprs{}, + ExprTypes: make(map[sqlparser.Expr]evalengine.Type), } } // TableSetFor returns the bitmask for this particular table func (st *SemTable) TableSetFor(t *sqlparser.AliasedTableExpr) TableSet { for idx, t2 := range st.Tables { - if t == t2.getAliasedTableExpr() { + if t == t2.GetAliasedTableExpr() { return SingleTableSet(idx) } } @@ -517,16 +664,13 @@ func (st *SemTable) TypeForExpr(e sqlparser.Expr) (evalengine.Type, bool) { // We add a lot of WeightString() expressions to queries at late stages of the planning, // which means that they don't have any type information. We can safely assume that they // are VarBinary, since that's the only type that WeightString() can return. - _, isWS := e.(*sqlparser.WeightStringFuncExpr) + ws, isWS := e.(*sqlparser.WeightStringFuncExpr) if isWS { - return evalengine.Type{ - Type: sqltypes.VarBinary, - Coll: collations.CollationBinaryID, - Nullable: false, // TODO: we should check if the argument is nullable - }, true + wt, _ := st.TypeForExpr(ws.Expr) + return evalengine.NewTypeEx(sqltypes.VarBinary, collations.CollationBinaryID, wt.Nullable(), 0, 0, nil), true } - return evalengine.UnknownType(), false + return evalengine.Type{}, false } // NeedsWeightString returns true if the given expression needs weight_string to do safe comparisons @@ -539,7 +683,12 @@ func (st *SemTable) NeedsWeightString(e sqlparser.Expr) bool { if !found { return true } - return typ.Coll == collations.Unknown && !sqltypes.IsNumber(typ.Type) + + if !sqltypes.IsText(typ.Type()) { + return false + } + + return !st.collEnv.IsSupported(typ.Collation()) } } @@ -609,8 +758,7 @@ func RewriteDerivedTableExpression(expr sqlparser.Expr, vt TableInfo) sqlparser. // CopyExprInfo lookups src in the ExprTypes map and, if a key is found, assign // the corresponding Type value of src to dest. func (st *SemTable) CopyExprInfo(src, dest sqlparser.Expr) { - srcType, found := st.ExprTypes[src] - if found { + if srcType, found := st.ExprTypes[src]; found { st.ExprTypes[dest] = srcType } } @@ -624,6 +772,10 @@ func (st *SemTable) ColumnLookup(col *sqlparser.ColName) (int, error) { // SingleUnshardedKeyspace returns the single keyspace if all tables in the query are in the same, unsharded keyspace func (st *SemTable) SingleUnshardedKeyspace() (ks *vindexes.Keyspace, tables []*vindexes.Table) { + return singleUnshardedKeyspace(st.Tables) +} + +func singleUnshardedKeyspace(tableInfos []TableInfo) (ks *vindexes.Keyspace, tables []*vindexes.Table) { validKS := func(this *vindexes.Keyspace) bool { if this == nil || this.Sharded { return false @@ -638,7 +790,7 @@ func (st *SemTable) SingleUnshardedKeyspace() (ks *vindexes.Keyspace, tables []* return true } - for _, table := range st.Tables { + for _, table := range tableInfos { if _, isDT := table.(*DerivedTable); isDT { continue } @@ -669,6 +821,34 @@ func (st *SemTable) SingleUnshardedKeyspace() (ks *vindexes.Keyspace, tables []* return ks, tables } +// SingleUnshardedKeyspace returns the single keyspace if all tables in the query are in the same keyspace +func (st *SemTable) SingleKeyspace() (ks *vindexes.Keyspace) { + validKS := func(this *vindexes.Keyspace) bool { + if this == nil { + return true + } + if ks == nil { + // first keyspace we see + ks = this + } else if ks != this { + return false + } + return true + } + + for _, table := range st.Tables { + if _, isDT := table.(*DerivedTable); isDT { + continue + } + + vtbl := table.GetVindexTable() + if !validKS(vtbl.Keyspace) { + return nil + } + } + return +} + // EqualsExpr compares two expressions using the semantic analysis information. // This means that we use the binding info to recognize that two ColName's can point to the same // table column even though they are written differently. Example would be the `foobar` column in the following query: @@ -678,7 +858,7 @@ func (st *SemTable) SingleUnshardedKeyspace() (ks *vindexes.Keyspace, tables []* func (st *SemTable) EqualsExpr(a, b sqlparser.Expr) bool { // If there is no SemTable, then we cannot compare the expressions. if st == nil { - return false + return sqlparser.Equals.Expr(a, b) } return st.ASTEquals().Expr(a, b) } @@ -779,3 +959,45 @@ func (st *SemTable) ASTEquals() *sqlparser.Comparator { } return st.comparator } + +func (st *SemTable) Clone(n sqlparser.SQLNode) sqlparser.SQLNode { + return sqlparser.CopyOnRewrite(n, nil, func(cursor *sqlparser.CopyOnWriteCursor) { + expr, isExpr := cursor.Node().(sqlparser.Expr) + if !isExpr { + return + } + cursor.Replace(sqlparser.CloneExpr(expr)) + }, st.CopySemanticInfo) +} + +// UpdateChildFKExpr updates the child foreign key expression with the new expression. +func (st *SemTable) UpdateChildFKExpr(origUpdExpr *sqlparser.UpdateExpr, newExpr sqlparser.Expr) { + for _, exprs := range st.childFkToUpdExprs { + for idx, updateExpr := range exprs { + if updateExpr == origUpdExpr { + exprs[idx].Expr = newExpr + } + } + } +} + +// GetTargetTableSetForTableName returns the TableSet for the given table name from the target tables. +func (st *SemTable) GetTargetTableSetForTableName(name sqlparser.TableName) (TableSet, error) { + for _, target := range st.Targets.Constituents() { + tbl, err := st.Tables[target.TableOffset()].Name() + if err != nil { + return "", err + } + if tbl.Name == name.Name { + return target, nil + } + } + return "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "target table '%s' not found", sqlparser.String(name)) +} + +// NewTableId creates a new table id and returns it. +func (st *SemTable) NewTableId() TableSet { + tableID := SingleTableSet(len(st.Tables)) + st.Tables = append(st.Tables, nil) + return tableID +} diff --git a/go/vt/vtgate/semantics/semantic_state_test.go b/go/vt/vtgate/semantics/semantic_state_test.go index ab855322d76..84f8cec6cf9 100644 --- a/go/vt/vtgate/semantics/semantic_state_test.go +++ b/go/vt/vtgate/semantics/semantic_state_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" querypb "vitess.io/vitess/go/vt/proto/query" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -45,7 +46,7 @@ func TestBindingAndExprEquality(t *testing.T) { for _, test := range tests { t.Run(test.expressions, func(t *testing.T) { - parse, err := sqlparser.Parse(fmt.Sprintf("select %s from t1, t2", test.expressions)) + parse, err := sqlparser.NewTestParser().Parse(fmt.Sprintf("select %s from t1, t2", test.expressions)) require.NoError(t, err) st, err := Analyze(parse, "db", fakeSchemaInfoTest()) require.NoError(t, err) @@ -418,7 +419,7 @@ func TestRemoveParentForeignKey(t *testing.T) { }, }, }, - fkToIgnore: "ks.t2child_coldks.t3cold", + fkToIgnore: "ks.t2|child_cold||ks.t3|cold", parentFksWanted: []vindexes.ParentFKInfo{ pkInfo(t3Table, []string{"colb"}, []string{"child_colb"}), pkInfo(t3Table, []string{"cola", "colx"}, []string{"child_cola", "child_colx"}), @@ -748,3 +749,233 @@ func TestRemoveNonRequiredForeignKeys(t *testing.T) { }) } } + +func TestIsFkDependentColumnUpdated(t *testing.T) { + keyspaceName := "ks" + t3Table := &vindexes.Table{ + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + Name: sqlparser.NewIdentifierCS("t3"), + } + tests := []struct { + name string + query string + fakeSi *FakeSI + updatedErr string + }{ + { + name: "updated child foreign key column is dependent on another updated column", + query: "update t1 set col = id + 1, id = 6 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + ChildForeignKeys: []vindexes.ChildFKInfo{ + ckInfo(t3Table, []string{"col"}, []string{"col"}, sqlparser.Cascade), + }, + }, + }, + }, + updatedErr: "VT12001: unsupported: id column referenced in foreign key column col is itself updated", + }, { + name: "updated parent foreign key column is dependent on another updated column", + query: "update t1 set col = id + 1, id = 6 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(t3Table, []string{"col"}, []string{"col"}), + }, + }, + }, + }, + updatedErr: "VT12001: unsupported: id column referenced in foreign key column col is itself updated", + }, { + name: "no foreign key column is dependent on a updated value", + query: "update t1 set col = id + 1 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(t3Table, []string{"col"}, []string{"col"}), + }, + }, + }, + }, + updatedErr: "", + }, { + name: "self-referenced foreign key", + query: "update t1 set col = col + 1 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(t3Table, []string{"col"}, []string{"col"}), + }, + }, + }, + }, + updatedErr: "", + }, { + name: "no foreign keys", + query: "update t1 set col = id + 1, id = 6 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName, Sharded: true}, + }, + }, + }, + updatedErr: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stmt, err := sqlparser.NewTestParser().Parse(tt.query) + require.NoError(t, err) + semTable, err := Analyze(stmt, keyspaceName, tt.fakeSi) + require.NoError(t, err) + got := semTable.ErrIfFkDependentColumnUpdated(stmt.(*sqlparser.Update).Exprs) + if tt.updatedErr == "" { + require.NoError(t, got) + } else { + require.EqualError(t, got, tt.updatedErr) + } + }) + } +} + +func TestHasNonLiteralForeignKeyUpdate(t *testing.T) { + keyspaceName := "ks" + t3Table := &vindexes.Table{ + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + Name: sqlparser.NewIdentifierCS("t3"), + } + tests := []struct { + name string + query string + fakeSi *FakeSI + hasNonLiteral bool + }{ + { + name: "non literal child foreign key update", + query: "update t1 set col = id + 1 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + ChildForeignKeys: []vindexes.ChildFKInfo{ + ckInfo(t3Table, []string{"col"}, []string{"col"}, sqlparser.Cascade), + }, + }, + }, + }, + hasNonLiteral: true, + }, { + name: "non literal parent foreign key update", + query: "update t1 set col = id + 1 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(t3Table, []string{"col"}, []string{"col"}), + }, + }, + }, + }, + hasNonLiteral: true, + }, { + name: "literal updates only", + query: "update t1 set col = 1 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(t3Table, []string{"col"}, []string{"col"}), + }, + }, + }, + }, + hasNonLiteral: false, + }, { + name: "self-referenced foreign key", + query: "update t1 set col = col + 1 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + ParentForeignKeys: []vindexes.ParentFKInfo{ + pkInfo(t3Table, []string{"col"}, []string{"col"}), + }, + }, + }, + }, + hasNonLiteral: true, + }, { + name: "no foreign keys", + query: "update t1 set col = id + 1 where foo = 3", + fakeSi: &FakeSI{ + KsForeignKeyMode: map[string]vschemapb.Keyspace_ForeignKeyMode{ + keyspaceName: vschemapb.Keyspace_managed, + }, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: &vindexes.Keyspace{Name: keyspaceName}, + }, + }, + }, + hasNonLiteral: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stmt, err := sqlparser.NewTestParser().Parse(tt.query) + require.NoError(t, err) + semTable, err := Analyze(stmt, keyspaceName, tt.fakeSi) + require.NoError(t, err) + got := semTable.HasNonLiteralForeignKeyUpdate(stmt.(*sqlparser.Update).Exprs) + require.EqualValues(t, tt.hasNonLiteral, got) + }) + } +} diff --git a/go/vt/vtgate/semantics/table_collector.go b/go/vt/vtgate/semantics/table_collector.go index 12fb691874f..ae107cc070c 100644 --- a/go/vt/vtgate/semantics/table_collector.go +++ b/go/vt/vtgate/semantics/table_collector.go @@ -17,6 +17,12 @@ limitations under the License. package semantics import ( + "fmt" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -33,28 +39,103 @@ type tableCollector struct { currentDb string org originable unionInfo map[*sqlparser.Union]unionInfo + done map[*sqlparser.AliasedTableExpr]TableInfo +} + +type earlyTableCollector struct { + si SchemaInformation + currentDb string + Tables []TableInfo + done map[*sqlparser.AliasedTableExpr]TableInfo + withTables map[sqlparser.IdentifierCS]any +} + +func newEarlyTableCollector(si SchemaInformation, currentDb string) *earlyTableCollector { + return &earlyTableCollector{ + si: si, + currentDb: currentDb, + done: map[*sqlparser.AliasedTableExpr]TableInfo{}, + withTables: map[sqlparser.IdentifierCS]any{}, + } +} + +func (etc *earlyTableCollector) up(cursor *sqlparser.Cursor) { + switch node := cursor.Node().(type) { + case *sqlparser.AliasedTableExpr: + etc.visitAliasedTableExpr(node) + case *sqlparser.With: + for _, cte := range node.CTEs { + etc.withTables[cte.ID] = nil + } + } + } -func newTableCollector(scoper *scoper, si SchemaInformation, currentDb string) *tableCollector { +func (etc *earlyTableCollector) visitAliasedTableExpr(aet *sqlparser.AliasedTableExpr) { + tbl, ok := aet.Expr.(sqlparser.TableName) + if !ok { + return + } + etc.handleTableName(tbl, aet) +} + +func (etc *earlyTableCollector) newTableCollector(scoper *scoper, org originable) *tableCollector { return &tableCollector{ + Tables: etc.Tables, scoper: scoper, - si: si, - currentDb: currentDb, + si: etc.si, + currentDb: etc.currentDb, unionInfo: map[*sqlparser.Union]unionInfo{}, + done: etc.done, + org: org, } } +func (etc *earlyTableCollector) handleTableName(tbl sqlparser.TableName, aet *sqlparser.AliasedTableExpr) { + if tbl.Qualifier.IsEmpty() { + _, isCTE := etc.withTables[tbl.Name] + if isCTE { + // no need to handle these tables here, we wait for the late phase instead + return + } + } + tableInfo, err := getTableInfo(aet, tbl, etc.si, etc.currentDb) + if err != nil { + // this could just be a CTE that we haven't processed, so we'll give it the benefit of the doubt for now + return + } + + etc.done[aet] = tableInfo + etc.Tables = append(etc.Tables, tableInfo) +} + func (tc *tableCollector) up(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { case *sqlparser.AliasedTableExpr: return tc.visitAliasedTableExpr(node) case *sqlparser.Union: return tc.visitUnion(node) + case *sqlparser.RowAlias: + ins, ok := cursor.Parent().(*sqlparser.Insert) + if !ok { + return vterrors.VT13001("RowAlias is expected to hang off an Insert statement") + } + return tc.visitRowAlias(ins, node) default: return nil } } +func (tc *tableCollector) visitAliasedTableExpr(node *sqlparser.AliasedTableExpr) error { + switch t := node.Expr.(type) { + case *sqlparser.DerivedTable: + return tc.handleDerivedTable(node, t) + case sqlparser.TableName: + return tc.handleTableName(node, t) + } + return nil +} + func (tc *tableCollector) visitUnion(union *sqlparser.Union) error { firstSelect := sqlparser.GetFirstSelect(union) expanded, selectExprs := getColumnNames(firstSelect.SelectExprs) @@ -69,9 +150,10 @@ func (tc *tableCollector) visitUnion(union *sqlparser.Union) error { size := len(firstSelect.SelectExprs) info.recursive = make([]TableSet, size) - info.types = make([]evalengine.Type, size) + typers := make([]evalengine.TypeAggregator, size) + collations := tc.org.collationEnv() - _ = sqlparser.VisitAllSelects(union, func(s *sqlparser.Select, idx int) error { + err := sqlparser.VisitAllSelects(union, func(s *sqlparser.Select, idx int) error { for i, expr := range s.SelectExprs { ae, ok := expr.(*sqlparser.AliasedExpr) if !ok { @@ -79,47 +161,181 @@ func (tc *tableCollector) visitUnion(union *sqlparser.Union) error { } _, recursiveDeps, qt := tc.org.depsForExpr(ae.Expr) info.recursive[i] = info.recursive[i].Merge(recursiveDeps) - if idx == 0 { - // TODO: we probably should coerce these types together somehow, but I'm not sure how - info.types[i] = qt + if err := typers[i].Add(qt, collations); err != nil { + return err } } return nil }) + if err != nil { + return err + } + + for _, ts := range typers { + info.types = append(info.types, ts.Type()) + } tc.unionInfo[union] = info return nil } -func (tc *tableCollector) visitAliasedTableExpr(node *sqlparser.AliasedTableExpr) error { - switch t := node.Expr.(type) { - case *sqlparser.DerivedTable: - return tc.handleDerivedTable(node, t) +func (tc *tableCollector) visitRowAlias(ins *sqlparser.Insert, rowAlias *sqlparser.RowAlias) error { + origTableInfo := tc.Tables[0] - case sqlparser.TableName: - return tc.handleTableName(node, t) + colNames, types, err := tc.getColumnNamesAndTypes(ins, rowAlias, origTableInfo) + if err != nil { + return err } - return nil + + derivedTable := buildDerivedTable(colNames, rowAlias, types) + tc.Tables = append(tc.Tables, derivedTable) + current := tc.scoper.currentScope() + return current.addTable(derivedTable) +} + +func (tc *tableCollector) getColumnNamesAndTypes(ins *sqlparser.Insert, rowAlias *sqlparser.RowAlias, origTableInfo TableInfo) (colNames []string, types []evalengine.Type, err error) { + switch { + case len(rowAlias.Columns) > 0 && len(ins.Columns) > 0: + return tc.handleExplicitColumns(ins, rowAlias, origTableInfo) + case len(rowAlias.Columns) > 0: + return tc.handleRowAliasColumns(origTableInfo, rowAlias) + case len(ins.Columns) > 0: + colNames, types = tc.handleInsertColumns(ins, origTableInfo) + return colNames, types, nil + default: + return tc.handleDefaultColumns(origTableInfo) + } +} + +// handleDefaultColumns have no explicit column list on the insert statement and no column list on the row alias +func (tc *tableCollector) handleDefaultColumns(origTableInfo TableInfo) ([]string, []evalengine.Type, error) { + if !origTableInfo.authoritative() { + return nil, nil, vterrors.VT09015() + } + var colNames []string + var types []evalengine.Type + for _, column := range origTableInfo.getColumns(true /* ignoreInvisibleCol */) { + colNames = append(colNames, column.Name) + types = append(types, column.Type) + } + return colNames, types, nil +} + +// handleInsertColumns have explicit column list on the insert statement and no column list on the row alias +func (tc *tableCollector) handleInsertColumns(ins *sqlparser.Insert, origTableInfo TableInfo) ([]string, []evalengine.Type) { + var colNames []string + var types []evalengine.Type + origCols := origTableInfo.getColumns(false /* ignoreInvisbleCol */) +for2: + for _, column := range ins.Columns { + colNames = append(colNames, column.String()) + for _, origCol := range origCols { + if column.EqualString(origCol.Name) { + types = append(types, origCol.Type) + continue for2 + } + } + types = append(types, evalengine.NewType(sqltypes.Unknown, collations.Unknown)) + } + return colNames, types +} + +// handleRowAliasColumns have explicit column list on the row alias and no column list on the insert statement +func (tc *tableCollector) handleRowAliasColumns(origTableInfo TableInfo, rowAlias *sqlparser.RowAlias) ([]string, []evalengine.Type, error) { + if !origTableInfo.authoritative() { + return nil, nil, vterrors.VT09015() + } + origCols := origTableInfo.getColumns(true /* ignoreInvisibleCol */) + if len(rowAlias.Columns) != len(origCols) { + return nil, nil, vterrors.VT03033() + } + var colNames []string + var types []evalengine.Type + for idx, column := range rowAlias.Columns { + colNames = append(colNames, column.String()) + types = append(types, origCols[idx].Type) + } + return colNames, types, nil +} + +// handleExplicitColumns have explicit column list on the row alias and the insert statement +func (tc *tableCollector) handleExplicitColumns(ins *sqlparser.Insert, rowAlias *sqlparser.RowAlias, origTableInfo TableInfo) ([]string, []evalengine.Type, error) { + if len(rowAlias.Columns) != len(ins.Columns) { + return nil, nil, vterrors.VT03033() + } + var colNames []string + var types []evalengine.Type + origCols := origTableInfo.getColumns(false /* ignoreInvisbleCol */) +for1: + for idx, column := range rowAlias.Columns { + colNames = append(colNames, column.String()) + col := ins.Columns[idx] + for _, origCol := range origCols { + if col.EqualString(origCol.Name) { + types = append(types, origCol.Type) + continue for1 + } + } + return nil, nil, vterrors.NewErrorf(vtrpcpb.Code_NOT_FOUND, vterrors.BadFieldError, "Unknown column '%s' in 'field list'", col) + } + return colNames, types, nil +} + +func buildDerivedTable(colNames []string, rowAlias *sqlparser.RowAlias, types []evalengine.Type) *DerivedTable { + deps := make([]TableSet, len(colNames)) + for i := range colNames { + deps[i] = SingleTableSet(0) + } + + derivedTable := &DerivedTable{ + tableName: rowAlias.TableName.String(), + ASTNode: &sqlparser.AliasedTableExpr{ + Expr: sqlparser.NewTableName(rowAlias.TableName.String()), + }, + columnNames: colNames, + tables: SingleTableSet(0), + recursive: deps, + isAuthoritative: true, + types: types, + } + return derivedTable +} + +func (tc *tableCollector) handleTableName(node *sqlparser.AliasedTableExpr, t sqlparser.TableName) (err error) { + var tableInfo TableInfo + var found bool + + tableInfo, found = tc.done[node] + if !found { + tableInfo, err = getTableInfo(node, t, tc.si, tc.currentDb) + if err != nil { + return err + } + tc.Tables = append(tc.Tables, tableInfo) + } + + scope := tc.scoper.currentScope() + return scope.addTable(tableInfo) } -func (tc *tableCollector) handleTableName(node *sqlparser.AliasedTableExpr, t sqlparser.TableName) error { +func getTableInfo(node *sqlparser.AliasedTableExpr, t sqlparser.TableName, si SchemaInformation, currentDb string) (TableInfo, error) { var tbl *vindexes.Table var vindex vindexes.Vindex isInfSchema := sqlparser.SystemSchema(t.Qualifier.String()) var err error - tbl, vindex, _, _, _, err = tc.si.FindTableOrVindex(t) + tbl, vindex, _, _, _, err = si.FindTableOrVindex(t) if err != nil && !isInfSchema { // if we are dealing with a system table, it might not be available in the vschema, but that is OK - return err + return nil, err } if tbl == nil && vindex != nil { tbl = newVindexTable(t.Name) } - scope := tc.scoper.currentScope() - tableInfo := tc.createTable(t, node, tbl, isInfSchema, vindex) - - tc.Tables = append(tc.Tables, tableInfo) - return scope.addTable(tableInfo) + tableInfo, err := createTable(t, node, tbl, isInfSchema, vindex, si, currentDb) + if err != nil { + return nil, err + } + return tableInfo, nil } func (tc *tableCollector) handleDerivedTable(node *sqlparser.AliasedTableExpr, t *sqlparser.DerivedTable) error { @@ -207,7 +423,7 @@ func newVindexTable(t sqlparser.IdentifierCS) *vindexes.Table { // The code lives in this file since it is only touching tableCollector data func (tc *tableCollector) tableSetFor(t *sqlparser.AliasedTableExpr) TableSet { for i, t2 := range tc.Tables { - if t == t2.getAliasedTableExpr() { + if t == t2.GetAliasedTableExpr() { return SingleTableSet(i) } } @@ -223,24 +439,34 @@ func (tc *tableCollector) tableInfoFor(id TableSet) (TableInfo, error) { return tc.Tables[offset], nil } -func (tc *tableCollector) createTable( +func createTable( t sqlparser.TableName, alias *sqlparser.AliasedTableExpr, tbl *vindexes.Table, isInfSchema bool, vindex vindexes.Vindex, -) TableInfo { + si SchemaInformation, + currentDb string, +) (TableInfo, error) { + hint := getVindexHint(alias.Hints) + + if err := checkValidVindexHints(hint, tbl); err != nil { + return nil, err + } + table := &RealTable{ - tableName: alias.As.String(), - ASTNode: alias, - Table: tbl, - isInfSchema: isInfSchema, + tableName: alias.As.String(), + ASTNode: alias, + Table: tbl, + VindexHint: hint, + isInfSchema: isInfSchema, + collationEnv: si.Environment().CollationEnv(), } if alias.As.IsEmpty() { dbName := t.Qualifier.String() if dbName == "" { - dbName = tc.currentDb + dbName = currentDb } table.dbName = dbName @@ -251,7 +477,37 @@ func (tc *tableCollector) createTable( return &VindexTable{ Table: table, Vindex: vindex, + }, nil + } + return table, nil +} + +func checkValidVindexHints(hint *sqlparser.IndexHint, tbl *vindexes.Table) error { + if hint == nil { + return nil + } +outer: + for _, index := range hint.Indexes { + for _, columnVindex := range tbl.ColumnVindexes { + if index.EqualString(columnVindex.Name) { + continue outer + } + } + // we found a hint on a non-existing vindex + return &NoSuchVindexFound{ + Table: fmt.Sprintf("%s.%s", tbl.Keyspace.Name, tbl.Name.String()), + VindexName: index.String(), } } - return table + return nil +} + +// getVindexHint gets the vindex hint from the list of IndexHints. +func getVindexHint(hints sqlparser.IndexHints) *sqlparser.IndexHint { + for _, hint := range hints { + if hint.Type.IsVindexHint() { + return hint + } + } + return nil } diff --git a/go/vt/vtgate/semantics/table_set.go b/go/vt/vtgate/semantics/table_set.go index 0ddbc87a224..acc83306869 100644 --- a/go/vt/vtgate/semantics/table_set.go +++ b/go/vt/vtgate/semantics/table_set.go @@ -57,7 +57,7 @@ func (ts TableSet) NumberOfTables() int { } // NonEmpty returns true if there are tables in the tableset -func (ts TableSet) NonEmpty() bool { +func (ts TableSet) NotEmpty() bool { return !ts.IsEmpty() } diff --git a/go/vt/vtgate/semantics/table_set_test.go b/go/vt/vtgate/semantics/table_set_test.go index 03d0d91cc9e..3730f91c533 100644 --- a/go/vt/vtgate/semantics/table_set_test.go +++ b/go/vt/vtgate/semantics/table_set_test.go @@ -17,7 +17,7 @@ limitations under the License. package semantics import ( - "math/rand" + "math/rand/v2" "testing" "github.com/stretchr/testify/assert" @@ -69,7 +69,7 @@ func TestTableSet_LargeTablesConstituents(t *testing.T) { var table int for t := 0; t < 256; t++ { - table += rand.Intn(GapSize) + 1 + table += rand.IntN(GapSize) + 1 expected = append(expected, SingleTableSet(table)) ts = ts.WithTable(table) } diff --git a/go/vt/vtgate/semantics/typer.go b/go/vt/vtgate/semantics/typer.go index 625077f4da1..b56c836a740 100644 --- a/go/vt/vtgate/semantics/typer.go +++ b/go/vt/vtgate/semantics/typer.go @@ -18,7 +18,6 @@ package semantics import ( "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine/opcode" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -27,45 +26,41 @@ import ( // typer is responsible for setting the type for expressions // it does it's work after visiting the children (up), since the children types is often needed to type a node. type typer struct { - m map[sqlparser.Expr]evalengine.Type + m map[sqlparser.Expr]evalengine.Type + collationEnv *collations.Environment } -func newTyper() *typer { +func newTyper(collationEnv *collations.Environment) *typer { return &typer{ - m: map[sqlparser.Expr]evalengine.Type{}, + m: map[sqlparser.Expr]evalengine.Type{}, + collationEnv: collationEnv, } } func (t *typer) exprType(expr sqlparser.Expr) evalengine.Type { - res, ok := t.m[expr] - if ok { - return res - } - - return evalengine.UnknownType() + return t.m[expr] } func (t *typer) up(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { case *sqlparser.Literal: - t.m[node] = evalengine.Type{Type: node.SQLType(), Coll: collations.DefaultCollationForType(node.SQLType())} + t.m[node] = evalengine.NewType(node.SQLType(), collations.CollationForType(node.SQLType(), t.collationEnv.DefaultConnectionCharset())) case *sqlparser.Argument: if node.Type >= 0 { - t.m[node] = evalengine.Type{Type: node.Type, Coll: collations.DefaultCollationForType(node.Type)} + t.m[node] = evalengine.NewTypeEx(node.Type, collations.CollationForType(node.Type, t.collationEnv.DefaultConnectionCharset()), true, node.Size, node.Scale, nil) } case sqlparser.AggrFunc: code, ok := opcode.SupportedAggregates[node.AggrName()] if !ok { return nil } - inputType := sqltypes.Unknown + var inputType evalengine.Type if arg := node.GetArg(); arg != nil { if tt, ok := t.m[arg]; ok { - inputType = tt.Type + inputType = tt } } - type_ := code.Type(inputType) - t.m[node] = evalengine.Type{Type: type_, Coll: collations.DefaultCollationForType(type_)} + t.m[node] = code.ResolveType(inputType, t.collationEnv) } return nil } diff --git a/go/vt/vtgate/semantics/typer_test.go b/go/vt/vtgate/semantics/typer_test.go index 4c77e6f5657..7de5ecf1340 100644 --- a/go/vt/vtgate/semantics/typer_test.go +++ b/go/vt/vtgate/semantics/typer_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations/colldata" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" ) @@ -40,7 +41,7 @@ func TestNormalizerAndSemanticAnalysisIntegration(t *testing.T) { for _, test := range tests { t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) + parse, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) err = sqlparser.Normalize(parse, sqlparser.NewReservedVars("bv", sqlparser.BindVars{}), map[string]*querypb.BindVariable{}) @@ -51,8 +52,43 @@ func TestNormalizerAndSemanticAnalysisIntegration(t *testing.T) { bv := parse.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr.(*sqlparser.Argument) typ, found := st.ExprTypes[bv] require.True(t, found, "bindvar was not typed") - require.Equal(t, test.typ, typ.Type.String()) + require.Equal(t, test.typ, typ.Type().String()) }) } +} + +// Tests that the types correctly picks up and sets the collation on columns +func TestColumnCollations(t *testing.T) { + tests := []struct { + query, collation string + }{ + {query: "select textcol from t2"}, + {query: "select name from t2", collation: "utf8mb3_bin"}, + } + + for _, test := range tests { + t.Run(test.query, func(t *testing.T) { + parse, err := sqlparser.NewTestParser().Parse(test.query) + require.NoError(t, err) + err = sqlparser.Normalize(parse, sqlparser.NewReservedVars("bv", sqlparser.BindVars{}), map[string]*querypb.BindVariable{}) + require.NoError(t, err) + + st, err := Analyze(parse, "d", fakeSchemaInfo()) + require.NoError(t, err) + col := extract(parse.(*sqlparser.Select), 0) + typ, found := st.TypeForExpr(col) + require.True(t, found, "column was not typed") + + require.Equal(t, "VARCHAR", typ.Type().String()) + collation := colldata.Lookup(typ.Collation()) + if test.collation != "" { + collation := colldata.Lookup(typ.Collation()) + require.NotNil(t, collation) + require.Equal(t, test.collation, collation.Name()) + } else { + require.Nil(t, collation) + } + }) + } } diff --git a/go/vt/vtgate/semantics/vindex_table.go b/go/vt/vtgate/semantics/vindex_table.go index f78e68cbd5b..b598c93f36a 100644 --- a/go/vt/vtgate/semantics/vindex_table.go +++ b/go/vt/vtgate/semantics/vindex_table.go @@ -67,8 +67,8 @@ func (v *VindexTable) Name() (sqlparser.TableName, error) { } // GetExpr implements the TableInfo interface -func (v *VindexTable) getAliasedTableExpr() *sqlparser.AliasedTableExpr { - return v.Table.getAliasedTableExpr() +func (v *VindexTable) GetAliasedTableExpr() *sqlparser.AliasedTableExpr { + return v.Table.GetAliasedTableExpr() } func (v *VindexTable) canShortCut() shortCut { @@ -76,8 +76,8 @@ func (v *VindexTable) canShortCut() shortCut { } // GetColumns implements the TableInfo interface -func (v *VindexTable) getColumns() []ColumnInfo { - return v.Table.getColumns() +func (v *VindexTable) getColumns(ignoreInvisbleCol bool) []ColumnInfo { + return v.Table.getColumns(ignoreInvisbleCol) } // IsInfSchema implements the TableInfo interface diff --git a/go/vt/vtgate/semantics/vtable.go b/go/vt/vtgate/semantics/vtable.go index 48439694b47..14519a7e938 100644 --- a/go/vt/vtgate/semantics/vtable.go +++ b/go/vt/vtgate/semantics/vtable.go @@ -42,10 +42,25 @@ func (v *vTableInfo) dependencies(colName string, org originable) (dependencies, if name != colName { continue } - directDeps, recursiveDeps, qt := org.depsForExpr(v.cols[i]) + deps = deps.merge(v.createCertainForCol(org, i), false) + } + if deps.empty() && v.hasStar() { + return createUncertain(v.tables, v.tables), nil + } + return deps, nil +} - newDeps := createCertain(directDeps, recursiveDeps, qt) - deps = deps.merge(newDeps, false) +func (v *vTableInfo) dependenciesInGroupBy(colName string, org originable) (dependencies, error) { + // this method is consciously very similar to vTableInfo.dependencies and should remain so + var deps dependencies = ¬hing{} + for i, name := range v.columnNames { + if name != colName { + continue + } + if sqlparser.ContainsAggregation(v.cols[i]) { + return nil, &CantGroupOn{name} + } + deps = deps.merge(v.createCertainForCol(org, i), false) } if deps.empty() && v.hasStar() { return createUncertain(v.tables, v.tables), nil @@ -53,6 +68,12 @@ func (v *vTableInfo) dependencies(colName string, org originable) (dependencies, return deps, nil } +func (v *vTableInfo) createCertainForCol(org originable, i int) *certain { + directDeps, recursiveDeps, qt := org.depsForExpr(v.cols[i]) + newDeps := createCertain(directDeps, recursiveDeps, qt) + return newDeps +} + // IsInfSchema implements the TableInfo interface func (v *vTableInfo) IsInfSchema() bool { return false @@ -70,7 +91,7 @@ func (v *vTableInfo) Name() (sqlparser.TableName, error) { return sqlparser.TableName{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "oh noes") } -func (v *vTableInfo) getAliasedTableExpr() *sqlparser.AliasedTableExpr { +func (v *vTableInfo) GetAliasedTableExpr() *sqlparser.AliasedTableExpr { return nil } @@ -83,7 +104,7 @@ func (v *vTableInfo) GetVindexTable() *vindexes.Table { return nil } -func (v *vTableInfo) getColumns() []ColumnInfo { +func (v *vTableInfo) getColumns(bool) []ColumnInfo { cols := make([]ColumnInfo, 0, len(v.columnNames)) for _, col := range v.columnNames { cols = append(cols, ColumnInfo{ @@ -94,7 +115,7 @@ func (v *vTableInfo) getColumns() []ColumnInfo { } func (v *vTableInfo) hasStar() bool { - return v.tables.NonEmpty() + return v.tables.NotEmpty() } // GetTables implements the TableInfo interface diff --git a/go/vt/vtgate/simplifier/expression_simplifier.go b/go/vt/vtgate/simplifier/expression_simplifier.go index 4537a137e76..86e3471baea 100644 --- a/go/vt/vtgate/simplifier/expression_simplifier.go +++ b/go/vt/vtgate/simplifier/expression_simplifier.go @@ -21,7 +21,6 @@ import ( "strconv" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sqlparser" ) @@ -44,10 +43,10 @@ func SimplifyExpr(in sqlparser.Expr, test CheckF) sqlparser.Expr { cursor.Replace(expr) valid := test(smallestKnown[0]) - log.Errorf("test: %t: simplified %s to %s, full expr: %s", valid, sqlparser.String(node), sqlparser.String(expr), sqlparser.String(smallestKnown)) if valid { break // we will still continue trying to simplify other expressions at this level } else { + log.Errorf("failed attempt: tried changing {%s} to {%s} in {%s}", sqlparser.String(node), sqlparser.String(expr), sqlparser.String(in)) // undo the change cursor.Replace(node) } @@ -105,6 +104,8 @@ func (s *shrinker) fillQueue() bool { s.queue = append(s.queue, e.Left, e.Right) case *sqlparser.BinaryExpr: s.queue = append(s.queue, e.Left, e.Right) + case *sqlparser.BetweenExpr: + s.queue = append(s.queue, e.Left, e.From, e.To) case *sqlparser.Literal: switch e.Type { case sqlparser.StrVal: @@ -176,12 +177,8 @@ func (s *shrinker) fillQueue() bool { s.queue = append(s.queue, append(e[:i], e[i+1:]...)) } case *sqlparser.FuncExpr: - for _, ae := range e.Exprs { - expr, ok := ae.(*sqlparser.AliasedExpr) - if !ok { - continue - } - s.queue = append(s.queue, expr.Expr) + for _, expr := range e.Exprs { + s.queue = append(s.queue, expr) } case sqlparser.AggrFunc: for _, ae := range e.GetArgs() { diff --git a/go/vt/vtgate/simplifier/simplifier.go b/go/vt/vtgate/simplifier/simplifier.go index 0e19935caba..e96660c99ec 100644 --- a/go/vt/vtgate/simplifier/simplifier.go +++ b/go/vt/vtgate/simplifier/simplifier.go @@ -201,7 +201,7 @@ func tryRemoveTable(tables []semantics.TableInfo, in sqlparser.SelectStatement, simplified := removeTable(clone, searchedTS, currentDB, si) name, _ := tbl.Name() if simplified && test(clone) { - log.Errorf("removed table %s: %s -> %s", sqlparser.String(name), sqlparser.String(in), sqlparser.String(clone)) + log.Errorf("removed table `%s`: \n%s\n%s", sqlparser.String(name), sqlparser.String(in), sqlparser.String(clone)) return clone } } @@ -283,8 +283,8 @@ func removeTable(clone sqlparser.SelectStatement, searchedTS semantics.TableSet, simplified = removeTableinWhere(node, shouldKeepExpr, simplified) case sqlparser.SelectExprs: simplified = removeTableinSelectExprs(node, cursor, shouldKeepExpr, simplified) - case sqlparser.GroupBy: - simplified = removeTableinGroupBy(node, cursor, shouldKeepExpr, simplified) + case *sqlparser.GroupBy: + simplified = removeTableInGroupBy(node, cursor, shouldKeepExpr, simplified) case sqlparser.OrderBy: simplified = removeTableinOrderBy(node, cursor, shouldKeepExpr, simplified) } @@ -376,16 +376,20 @@ func removeTableinSelectExprs(node sqlparser.SelectExprs, cursor *sqlparser.Curs return simplified } -func removeTableinGroupBy(node sqlparser.GroupBy, cursor *sqlparser.Cursor, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { - var newExprs sqlparser.GroupBy - for _, expr := range node { +func removeTableInGroupBy(node *sqlparser.GroupBy, cursor *sqlparser.Cursor, shouldKeepExpr func(sqlparser.Expr) bool, simplified bool) bool { + var newExprs []sqlparser.Expr + for _, expr := range node.Exprs { if shouldKeepExpr(expr) { newExprs = append(newExprs, expr) } else { simplified = true } } - cursor.Replace(newExprs) + if len(newExprs) == 0 { + cursor.Replace(nil) + } else { + cursor.Replace(&sqlparser.GroupBy{Exprs: newExprs}) + } return simplified } @@ -437,8 +441,10 @@ func visitAllExpressionsInAST(clone sqlparser.SelectStatement, visit func(expres return visitWhere(node, visit) case *sqlparser.JoinCondition: return visitJoinCondition(node, cursor, visit) - case sqlparser.GroupBy: - return visitGroupBy(node, cursor, visit) + case *sqlparser.Select: + if node.GroupBy != nil { + return visitGroupBy(node, visit) + } case sqlparser.OrderBy: return visitOrderBy(node, cursor, visit) case *sqlparser.Limit: @@ -540,12 +546,15 @@ func visitJoinCondition(node *sqlparser.JoinCondition, cursor *sqlparser.Cursor, return visitExpressions(exprs, set, visit, minExprs) } -func visitGroupBy(node sqlparser.GroupBy, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { +func visitGroupBy(node *sqlparser.Select, visit func(expressionCursor) bool) bool { set := func(input []sqlparser.Expr) { - node = input - cursor.Replace(node) + if len(input) == 0 { + node.GroupBy = nil + } else { + node.GroupBy = &sqlparser.GroupBy{Exprs: input} + } } - return visitExpressions(node, set, visit, 0) + return visitExpressions(node.GroupBy.Exprs, set, visit, 0) } func visitOrderBy(node sqlparser.OrderBy, cursor *sqlparser.Cursor, visit func(expressionCursor) bool) bool { diff --git a/go/vt/vtgate/simplifier/simplifier_test.go b/go/vt/vtgate/simplifier/simplifier_test.go index c9edbbab8d8..340497da8ef 100644 --- a/go/vt/vtgate/simplifier/simplifier_test.go +++ b/go/vt/vtgate/simplifier/simplifier_test.go @@ -20,14 +20,13 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) func TestFindAllExpressions(t *testing.T) { @@ -51,7 +50,7 @@ order by unsharded.orderByExpr2 asc limit 123 offset 456 ` - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) visitAllExpressionsInAST(ast.(sqlparser.SelectStatement), func(cursor expressionCursor) bool { fmt.Printf(">> found expression: %s\n", sqlparser.String(cursor.expr)) @@ -69,7 +68,7 @@ limit 123 offset 456 func TestAbortExpressionCursor(t *testing.T) { query := "select user.id, count(*), unsharded.name from user join unsharded on 13 = 14 where unsharded.id = 42 and name = 'foo' and user.id = unsharded.id" - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) visitAllExpressionsInAST(ast.(sqlparser.SelectStatement), func(cursor expressionCursor) bool { fmt.Println(sqlparser.String(cursor.expr)) @@ -121,16 +120,21 @@ func TestSimplifyEvalEngineExpr(t *testing.T) { // L0 p0 := plus(p11, p12) + venv := vtenv.NewTestEnv() expr := SimplifyExpr(p0, func(expr sqlparser.Expr) bool { - local, err := evalengine.Translate(expr, nil) + collationEnv := collations.MySQL8() + local, err := evalengine.Translate(expr, &evalengine.Config{ + Environment: venv, + Collation: collationEnv.DefaultConnectionCharset(), + }) if err != nil { return false } - res, err := evalengine.EmptyExpressionEnv().Evaluate(local) + res, err := evalengine.EmptyExpressionEnv(venv).Evaluate(local) if err != nil { return false } - toInt64, err := res.Value(collations.Default()).ToInt64() + toInt64, err := res.Value(collationEnv.DefaultConnectionCharset()).ToInt64() if err != nil { return false } diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index de63da87907..63ae836d715 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -19,7 +19,7 @@ package vtgate import ( "context" "fmt" - "math/rand" + "math/rand/v2" "runtime/debug" "sort" "sync" @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" @@ -49,17 +50,16 @@ var ( // CellsToWatch is the list of cells the healthcheck operates over. If it is empty, only the local cell is watched CellsToWatch string - bufferImplementation = "keyspace_events" initialTabletTimeout = 30 * time.Second // retryCount is the number of times a query will be retried on error retryCount = 2 + + logCollations = logutil.NewThrottledLogger("CollationInconsistent", 1*time.Minute) ) func init() { servenv.OnParseFor("vtgate", func(fs *pflag.FlagSet) { fs.StringVar(&CellsToWatch, "cells_to_watch", "", "comma-separated list of cells for watching tablets") - fs.StringVar(&bufferImplementation, "buffer_implementation", "keyspace_events", "Allowed values: healthcheck (legacy implementation), keyspace_events (default)") - fs.MarkDeprecated("buffer_implementation", "The 'healthcheck' buffer implementation has been removed in v18 and this option will be removed in v19") fs.DurationVar(&initialTabletTimeout, "gateway_initial_tablet_timeout", 30*time.Second, "At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type") fs.IntVar(&retryCount, "retry-count", 2, "retry count") }) @@ -74,7 +74,7 @@ type TabletGateway struct { srvTopoServer srvtopo.Server localCell string retryCount int - defaultConnCollation uint32 + defaultConnCollation atomic.Uint32 // mu protects the fields of this group. mu sync.Mutex @@ -92,7 +92,7 @@ func createHealthCheck(ctx context.Context, retryDelay, timeout time.Duration, t // NewTabletGateway creates and returns a new TabletGateway func NewTabletGateway(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, localCell string) *TabletGateway { - // hack to accomodate various users of gateway + tests + // hack to accommodate various users of gateway + tests if hc == nil { var topoServer *topo.Server if serv != nil { @@ -146,8 +146,8 @@ func (gw *TabletGateway) setupBuffering(ctx context.Context) { } // QueryServiceByAlias satisfies the Gateway interface -func (gw *TabletGateway) QueryServiceByAlias(alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) { - qs, err := gw.hc.TabletConnection(alias, target) +func (gw *TabletGateway) QueryServiceByAlias(ctx context.Context, alias *topodatapb.TabletAlias, target *querypb.Target) (queryservice.QueryService, error) { + qs, err := gw.hc.TabletConnection(ctx, alias, target) return queryservice.Wrap(qs, gw.withShardError), NewShardError(err, target) } @@ -191,7 +191,7 @@ func (gw *TabletGateway) WaitForTablets(ctx context.Context, tabletTypesToWait [ } // Finds the targets to look for. - targets, err := srvtopo.FindAllTargets(ctx, gw.srvTopoServer, gw.localCell, tabletTypesToWait) + targets, err := srvtopo.FindAllTargets(ctx, gw.srvTopoServer, gw.localCell, discovery.KeyspacesToWatch, tabletTypesToWait) if err != nil { return err } @@ -377,50 +377,25 @@ func (gw *TabletGateway) getStatsAggregator(target *querypb.Target) *TabletStatu } func (gw *TabletGateway) shuffleTablets(cell string, tablets []*discovery.TabletHealth) { - sameCell, diffCell, sameCellMax := 0, 0, -1 - length := len(tablets) - - // move all same cell tablets to the front, this is O(n) - for { - sameCellMax = diffCell - 1 - sameCell = gw.nextTablet(cell, tablets, sameCell, length, true) - diffCell = gw.nextTablet(cell, tablets, diffCell, length, false) - // either no more diffs or no more same cells should stop the iteration - if sameCell < 0 || diffCell < 0 { - break - } - if sameCell < diffCell { - // fast forward the `sameCell` lookup to `diffCell + 1`, `diffCell` unchanged - sameCell = diffCell + 1 + // Randomly shuffle the list of tablets, putting the same-cell hosts at the front + // of the list and the other-cell hosts at the back + // + // Only need to do n-1 swaps since the last tablet is always in the right place. + n := len(tablets) + head := 0 + tail := n - 1 + for i := 0; i < n-1; i++ { + j := head + rand.IntN(tail-head+1) + + if tablets[j].Tablet.Alias.Cell == cell { + tablets[head], tablets[j] = tablets[j], tablets[head] + head++ } else { - // sameCell > diffCell, swap needed - tablets[sameCell], tablets[diffCell] = tablets[diffCell], tablets[sameCell] - sameCell++ - diffCell++ + tablets[tail], tablets[j] = tablets[j], tablets[tail] + tail-- } } - - // shuffle in same cell tablets - for i := sameCellMax; i > 0; i-- { - swap := rand.Intn(i + 1) - tablets[i], tablets[swap] = tablets[swap], tablets[i] - } - - // shuffle in diff cell tablets - for i, diffCellMin := length-1, sameCellMax+1; i > diffCellMin; i-- { - swap := rand.Intn(i-sameCellMax) + diffCellMin - tablets[i], tablets[swap] = tablets[swap], tablets[i] - } -} - -func (gw *TabletGateway) nextTablet(cell string, tablets []*discovery.TabletHealth, offset, length int, sameCell bool) int { - for ; offset < length; offset++ { - if (tablets[offset].Tablet.Alias.Cell == cell) == sameCell { - return offset - } - } - return -1 } // TabletsCacheStatus returns a displayable version of the health check cache. @@ -428,18 +403,23 @@ func (gw *TabletGateway) TabletsCacheStatus() discovery.TabletsCacheStatusList { return gw.hc.CacheStatus() } +// TabletsHealthyStatus returns a displayable version of the health check healthy list. +func (gw *TabletGateway) TabletsHealthyStatus() discovery.TabletsCacheStatusList { + return gw.hc.HealthyStatus() +} + func (gw *TabletGateway) updateDefaultConnCollation(tablet *topodatapb.Tablet) { - if atomic.CompareAndSwapUint32(&gw.defaultConnCollation, 0, tablet.DefaultConnCollation) { + if gw.defaultConnCollation.CompareAndSwap(0, tablet.DefaultConnCollation) { return } - if atomic.LoadUint32(&gw.defaultConnCollation) != tablet.DefaultConnCollation { - log.Warning("this Vitess cluster has tablets with different default connection collations") + if gw.defaultConnCollation.Load() != tablet.DefaultConnCollation { + logCollations.Warningf("this Vitess cluster has tablets with different default connection collations") } } // DefaultConnCollation returns the default connection collation of this TabletGateway func (gw *TabletGateway) DefaultConnCollation() collations.ID { - return collations.ID(atomic.LoadUint32(&gw.defaultConnCollation)) + return collations.ID(gw.defaultConnCollation.Load()) } // NewShardError returns a new error with the shard info amended. diff --git a/go/vt/vtgate/tabletgateway_flaky_test.go b/go/vt/vtgate/tabletgateway_flaky_test.go index f625b5599cd..21107c8d30e 100644 --- a/go/vt/vtgate/tabletgateway_flaky_test.go +++ b/go/vt/vtgate/tabletgateway_flaky_test.go @@ -22,15 +22,14 @@ import ( "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/vtgate/buffer" + querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/vtgate/buffer" ) // TestGatewayBufferingWhenPrimarySwitchesServingState is used to test that the buffering mechanism buffers the queries when a primary goes to a non serving state and @@ -61,15 +60,29 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { tg := NewTabletGateway(ctx, hc, ts, "cell") defer tg.Close(ctx) - // add a primary tabelt which is serving + // add a primary tablet which is serving sbc := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) + bufferingWaitTimeout := 60 * time.Second + waitForBuffering := func(enabled bool) { + timer := time.NewTimer(bufferingWaitTimeout) + defer timer.Stop() + for _, buffering := tg.kev.PrimaryIsNotServing(ctx, target); buffering != enabled; _, buffering = tg.kev.PrimaryIsNotServing(ctx, target) { + select { + case <-timer.C: + require.Fail(t, "timed out waiting for buffering of enabled: %t", enabled) + default: + } + time.Sleep(10 * time.Millisecond) + } + } + // add a result to the sandbox connection sqlResult1 := &sqltypes.Result{ Fields: []*querypb.Field{{ Name: "col1", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, RowsAffected: 1, Rows: [][]sqltypes.Value{{ @@ -94,6 +107,8 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { // add another result to the sandbox connection sbc.SetResults([]*sqltypes.Result{sqlResult1}) + waitForBuffering(true) + // execute the query in a go routine since it should be buffered, and check that it eventually succeed queryChan := make(chan struct{}) go func() { @@ -102,17 +117,17 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { }() // set the serving type for the primary tablet true and broadcast it so that the buffering code registers this change - // this should stop the buffering and the query executed in the go routine should work. This should be done with some delay so - // that we know that the query was buffered - time.Sleep(1 * time.Second) + // this should stop the buffering and the query executed in the go routine should work. hc.SetServing(primaryTablet, true) hc.Broadcast(primaryTablet) + waitForBuffering(false) + // wait for the query to execute before checking for results select { case <-queryChan: require.NoError(t, err) - require.Equal(t, res, sqlResult1) + require.Equal(t, sqlResult1, res) case <-time.After(15 * time.Second): t.Fatalf("timed out waiting for query to execute") } @@ -148,7 +163,7 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { tg := NewTabletGateway(ctx, hc, ts, "cell") defer tg.Close(ctx) - // add a primary tabelt which is serving + // add a primary tablet which is serving sbc := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) // also add a replica which is serving sbcReplica := hc.AddTestTablet("cell", hostReplica, portReplica, keyspace, shard, topodatapb.TabletType_REPLICA, true, 0, nil) @@ -158,7 +173,7 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { Fields: []*querypb.Field{{ Name: "col1", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, RowsAffected: 1, Rows: [][]sqltypes.Value{{ @@ -279,7 +294,7 @@ func TestInconsistentStateDetectedBuffering(t *testing.T) { tg.retryCount = 0 - // add a primary tabelt which is serving + // add a primary tablet which is serving sbc := hc.AddTestTablet("cell", host, port, keyspace, shard, tabletType, true, 10, nil) // add a result to the sandbox connection @@ -287,7 +302,7 @@ func TestInconsistentStateDetectedBuffering(t *testing.T) { Fields: []*querypb.Field{{ Name: "col1", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, RowsAffected: 1, Rows: [][]sqltypes.Value{{ diff --git a/go/vt/vtgate/testdata/executorVSchema.json b/go/vt/vtgate/testdata/executorVSchema.json index da12a3b9946..ba917708df8 100644 --- a/go/vt/vtgate/testdata/executorVSchema.json +++ b/go/vt/vtgate/testdata/executorVSchema.json @@ -132,6 +132,13 @@ }, "cfc": { "type": "cfc" + }, + "multicol_vdx": { + "type": "multicol", + "params": { + "column_count": "2", + "column_vindex": "xxhash,binary" + } } }, "tables": { @@ -346,6 +353,14 @@ "zip_detail": { "type": "reference", "source": "TestUnsharded.zip_detail" - } - } + }, + "multicol_tbl": { + "column_vindexes": [ + { + "columns": ["cola", "colb"], + "name": "multicol_vdx" + } + ] + } + } } diff --git a/go/vt/vtgate/tx_conn.go b/go/vt/vtgate/tx_conn.go index 9170093c23e..2eccdc54992 100644 --- a/go/vt/vtgate/tx_conn.go +++ b/go/vt/vtgate/tx_conn.go @@ -19,8 +19,10 @@ package vtgate import ( "context" "fmt" + "strings" "sync" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/concurrency" "vitess.io/vitess/go/vt/dtids" "vitess.io/vitess/go/vt/log" @@ -34,6 +36,10 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +// nonAtomicCommitWarnMaxShards limits the number of shard names reported in +// non-atomic commit warnings. +const nonAtomicCommitWarnMaxShards = 16 + // TxConn is used for executing transactional requests. type TxConn struct { tabletGateway *TabletGateway @@ -98,11 +104,11 @@ func (txc *TxConn) Commit(ctx context.Context, session *SafeSession) error { return txc.commitNormal(ctx, session) } -func (txc *TxConn) queryService(alias *topodatapb.TabletAlias) (queryservice.QueryService, error) { +func (txc *TxConn) queryService(ctx context.Context, alias *topodatapb.TabletAlias) (queryservice.QueryService, error) { if alias == nil { return txc.tabletGateway, nil } - return txc.tabletGateway.QueryServiceByAlias(alias, nil) + return txc.tabletGateway.QueryServiceByAlias(ctx, alias, nil) } func (txc *TxConn) commitShard(ctx context.Context, s *vtgatepb.Session_ShardSession, logging *executeLogger) error { @@ -111,7 +117,7 @@ func (txc *TxConn) commitShard(ctx context.Context, s *vtgatepb.Session_ShardSes } var qs queryservice.QueryService var err error - qs, err = txc.queryService(s.TabletAlias) + qs, err = txc.queryService(ctx, s.TabletAlias) if err != nil { return err } @@ -132,8 +138,28 @@ func (txc *TxConn) commitNormal(ctx context.Context, session *SafeSession) error } // Retain backward compatibility on commit order for the normal session. - for _, shardSession := range session.ShardSessions { + for i, shardSession := range session.ShardSessions { if err := txc.commitShard(ctx, shardSession, session.logging); err != nil { + if i > 0 { + nShards := i + elipsis := false + if i > nonAtomicCommitWarnMaxShards { + nShards = nonAtomicCommitWarnMaxShards + elipsis = true + } + sNames := make([]string, nShards, nShards+1 /*...*/) + for j := 0; j < nShards; j++ { + sNames[j] = session.ShardSessions[j].Target.Shard + } + if elipsis { + sNames = append(sNames, "...") + } + session.RecordWarning(&querypb.QueryWarning{ + Code: uint32(sqlerror.ERNonAtomicCommit), + Message: fmt.Sprintf("multi-db commit failed after committing to %d shards: %s", i, strings.Join(sNames, ", ")), + }) + warnings.Add("NonAtomicCommit", 1) + } _ = txc.Release(ctx, session) return err } @@ -217,7 +243,7 @@ func (txc *TxConn) Rollback(ctx context.Context, session *SafeSession) error { if s.TransactionId == 0 { return nil } - qs, err := txc.queryService(s.TabletAlias) + qs, err := txc.queryService(ctx, s.TabletAlias) if err != nil { return err } @@ -253,7 +279,7 @@ func (txc *TxConn) Release(ctx context.Context, session *SafeSession) error { if s.ReservedId == 0 && s.TransactionId == 0 { return nil } - qs, err := txc.queryService(s.TabletAlias) + qs, err := txc.queryService(ctx, s.TabletAlias) if err != nil { return err } @@ -279,7 +305,7 @@ func (txc *TxConn) ReleaseLock(ctx context.Context, session *SafeSession) error if ls.ReservedId == 0 { return nil } - qs, err := txc.queryService(ls.TabletAlias) + qs, err := txc.queryService(ctx, ls.TabletAlias) if err != nil { return err } @@ -303,7 +329,7 @@ func (txc *TxConn) ReleaseAll(ctx context.Context, session *SafeSession) error { if s.ReservedId == 0 && s.TransactionId == 0 { return nil } - qs, err := txc.queryService(s.TabletAlias) + qs, err := txc.queryService(ctx, s.TabletAlias) if err != nil { return err } @@ -336,7 +362,7 @@ func (txc *TxConn) Resolve(ctx context.Context, dtid string) error { case querypb.TransactionState_PREPARE: // If state is PREPARE, make a decision to rollback and // fallthrough to the rollback workflow. - qs, err := txc.queryService(mmShard.TabletAlias) + qs, err := txc.queryService(ctx, mmShard.TabletAlias) if err != nil { return err } diff --git a/go/vt/vtgate/tx_conn_test.go b/go/vt/vtgate/tx_conn_test.go index 3fc141c64ac..4d77ea16c92 100644 --- a/go/vt/vtgate/tx_conn_test.go +++ b/go/vt/vtgate/tx_conn_test.go @@ -19,6 +19,7 @@ package vtgate import ( "context" "fmt" + "strconv" "testing" "github.com/stretchr/testify/assert" @@ -27,6 +28,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/srvtopo" @@ -41,6 +43,7 @@ import ( var queries = []*querypb.BoundQuery{{Sql: "query1"}} var twoQueries = []*querypb.BoundQuery{{Sql: "query1"}, {Sql: "query1"}} +var threeQueries = []*querypb.BoundQuery{{Sql: "query1"}, {Sql: "query1"}, {Sql: "query1"}} func TestTxConnBegin(t *testing.T) { ctx := utils.LeakCheckContext(t) @@ -67,12 +70,14 @@ func TestTxConnBegin(t *testing.T) { func TestTxConnCommitFailure(t *testing.T) { ctx := utils.LeakCheckContext(t) - sc, sbc0, sbc1, rss0, rss1, rss01 := newTestTxConnEnv(t, ctx, "TestTxConn") + sc, sbcs, rssm, rssa := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 3) sc.txConn.mode = vtgatepb.TransactionMode_MULTI + nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"] // Sequence the executes to ensure commit order + session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) - sc.ExecuteMultiShard(ctx, nil, rss0, queries, session, false, false) + sc.ExecuteMultiShard(ctx, nil, rssm[0], queries, session, false, false) wantSession := vtgatepb.Session{ InTransaction: true, ShardSessions: []*vtgatepb.Session_ShardSession{{ @@ -82,11 +87,12 @@ func TestTxConnCommitFailure(t *testing.T) { TabletType: topodatapb.TabletType_PRIMARY, }, TransactionId: 1, - TabletAlias: sbc0.Tablet().Alias, + TabletAlias: sbcs[0].Tablet().Alias, }}, } utils.MustMatch(t, &wantSession, session.Session, "Session") - sc.ExecuteMultiShard(ctx, nil, rss01, twoQueries, session, false, false) + + sc.ExecuteMultiShard(ctx, nil, rssm[1], queries, session, false, false) wantSession = vtgatepb.Session{ InTransaction: true, ShardSessions: []*vtgatepb.Session_ShardSession{{ @@ -96,7 +102,7 @@ func TestTxConnCommitFailure(t *testing.T) { TabletType: topodatapb.TabletType_PRIMARY, }, TransactionId: 1, - TabletAlias: sbc0.Tablet().Alias, + TabletAlias: sbcs[0].Tablet().Alias, }, { Target: &querypb.Target{ Keyspace: "TestTxConn", @@ -104,23 +110,170 @@ func TestTxConnCommitFailure(t *testing.T) { TabletType: topodatapb.TabletType_PRIMARY, }, TransactionId: 1, - TabletAlias: sbc1.Tablet().Alias, + TabletAlias: sbcs[1].Tablet().Alias, + }}, + } + utils.MustMatch(t, &wantSession, session.Session, "Session") + + sc.ExecuteMultiShard(ctx, nil, rssa, threeQueries, session, false, false) + wantSession = vtgatepb.Session{ + InTransaction: true, + ShardSessions: []*vtgatepb.Session_ShardSession{{ + Target: &querypb.Target{ + Keyspace: "TestTxConn", + Shard: "0", + TabletType: topodatapb.TabletType_PRIMARY, + }, + TransactionId: 1, + TabletAlias: sbcs[0].Tablet().Alias, + }, { + Target: &querypb.Target{ + Keyspace: "TestTxConn", + Shard: "1", + TabletType: topodatapb.TabletType_PRIMARY, + }, + TransactionId: 1, + TabletAlias: sbcs[1].Tablet().Alias, + }, { + Target: &querypb.Target{ + Keyspace: "TestTxConn", + Shard: "2", + TabletType: topodatapb.TabletType_PRIMARY, + }, + TransactionId: 1, + TabletAlias: sbcs[2].Tablet().Alias, }}, } utils.MustMatch(t, &wantSession, session.Session, "Session") - sbc1.MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 1 + sbcs[2].MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 1 expectErr := NewShardError(vterrors.New( vtrpcpb.Code_DEADLINE_EXCEEDED, fmt.Sprintf("%v error", vtrpcpb.Code_DEADLINE_EXCEEDED)), - rss1[0].Target) + rssm[2][0].Target) require.ErrorContains(t, sc.txConn.Commit(ctx, session), expectErr.Error()) - wantSession = vtgatepb.Session{} + wantSession = vtgatepb.Session{ + Warnings: []*querypb.QueryWarning{ + { + Code: uint32(sqlerror.ERNonAtomicCommit), + Message: "multi-db commit failed after committing to 2 shards: 0, 1", + }, + }, + } utils.MustMatch(t, &wantSession, session.Session, "Session") - assert.EqualValues(t, 1, sbc0.CommitCount.Load(), "sbc0.CommitCount") - assert.EqualValues(t, 1, sbc1.CommitCount.Load(), "sbc1.CommitCount") + assert.EqualValues(t, 1, sbcs[0].CommitCount.Load(), "sbc0.CommitCount") + + require.Equal(t, nonAtomicCommitCount+1, warnings.Counts()["NonAtomicCommit"]) +} + +func TestTxConnCommitFailureAfterNonAtomicCommitMaxShards(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + sc, sbcs, rssm, _ := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 18) + sc.txConn.mode = vtgatepb.TransactionMode_MULTI + nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"] + + // Sequence the executes to ensure commit order + + session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) + wantSession := vtgatepb.Session{ + InTransaction: true, + ShardSessions: []*vtgatepb.Session_ShardSession{}, + } + + for i := 0; i < 18; i++ { + sc.ExecuteMultiShard(ctx, nil, rssm[i], queries, session, false, false) + wantSession.ShardSessions = append(wantSession.ShardSessions, &vtgatepb.Session_ShardSession{ + Target: &querypb.Target{ + Keyspace: "TestTxConn", + Shard: rssm[i][0].Target.Shard, + TabletType: topodatapb.TabletType_PRIMARY, + }, + TransactionId: 1, + TabletAlias: sbcs[i].Tablet().Alias, + }) + utils.MustMatch(t, &wantSession, session.Session, "Session") + } + + sbcs[17].MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 1 + + expectErr := NewShardError(vterrors.New( + vtrpcpb.Code_DEADLINE_EXCEEDED, + fmt.Sprintf("%v error", vtrpcpb.Code_DEADLINE_EXCEEDED)), + rssm[17][0].Target) + + require.ErrorContains(t, sc.txConn.Commit(ctx, session), expectErr.Error()) + wantSession = vtgatepb.Session{ + Warnings: []*querypb.QueryWarning{ + { + Code: uint32(sqlerror.ERNonAtomicCommit), + Message: "multi-db commit failed after committing to 17 shards: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ...", + }, + }, + } + + utils.MustMatch(t, &wantSession, session.Session, "Session") + for i := 0; i < 17; i++ { + assert.EqualValues(t, 1, sbcs[i].CommitCount.Load(), fmt.Sprintf("sbc%d.CommitCount", i)) + } + + require.Equal(t, nonAtomicCommitCount+1, warnings.Counts()["NonAtomicCommit"]) +} + +func TestTxConnCommitFailureBeforeNonAtomicCommitMaxShards(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + sc, sbcs, rssm, _ := newTestTxConnEnvNShards(t, ctx, "TestTxConn", 17) + sc.txConn.mode = vtgatepb.TransactionMode_MULTI + nonAtomicCommitCount := warnings.Counts()["NonAtomicCommit"] + + // Sequence the executes to ensure commit order + + session := NewSafeSession(&vtgatepb.Session{InTransaction: true}) + wantSession := vtgatepb.Session{ + InTransaction: true, + ShardSessions: []*vtgatepb.Session_ShardSession{}, + } + + for i := 0; i < 17; i++ { + sc.ExecuteMultiShard(ctx, nil, rssm[i], queries, session, false, false) + wantSession.ShardSessions = append(wantSession.ShardSessions, &vtgatepb.Session_ShardSession{ + Target: &querypb.Target{ + Keyspace: "TestTxConn", + Shard: rssm[i][0].Target.Shard, + TabletType: topodatapb.TabletType_PRIMARY, + }, + TransactionId: 1, + TabletAlias: sbcs[i].Tablet().Alias, + }) + utils.MustMatch(t, &wantSession, session.Session, "Session") + } + + sbcs[16].MustFailCodes[vtrpcpb.Code_DEADLINE_EXCEEDED] = 1 + + expectErr := NewShardError(vterrors.New( + vtrpcpb.Code_DEADLINE_EXCEEDED, + fmt.Sprintf("%v error", vtrpcpb.Code_DEADLINE_EXCEEDED)), + rssm[16][0].Target) + + require.ErrorContains(t, sc.txConn.Commit(ctx, session), expectErr.Error()) + wantSession = vtgatepb.Session{ + Warnings: []*querypb.QueryWarning{ + { + Code: uint32(sqlerror.ERNonAtomicCommit), + Message: "multi-db commit failed after committing to 16 shards: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15", + }, + }, + } + + utils.MustMatch(t, &wantSession, session.Session, "Session") + for i := 0; i < 16; i++ { + assert.EqualValues(t, 1, sbcs[i].CommitCount.Load(), fmt.Sprintf("sbc%d.CommitCount", i)) + } + + require.Equal(t, nonAtomicCommitCount+1, warnings.Counts()["NonAtomicCommit"]) } func TestTxConnCommitSuccess(t *testing.T) { @@ -1359,17 +1512,40 @@ func TestTxConnAccessModeReset(t *testing.T) { func newTestTxConnEnv(t *testing.T, ctx context.Context, name string) (sc *ScatterConn, sbc0, sbc1 *sandboxconn.SandboxConn, rss0, rss1, rss01 []*srvtopo.ResolvedShard) { t.Helper() createSandbox(name) + sc, sbcs, rssl, rssa := newTestTxConnEnvNShards(t, ctx, name, 2) + return sc, sbcs[0], sbcs[1], rssl[0], rssl[1], rssa +} + +func newTestTxConnEnvNShards(t *testing.T, ctx context.Context, name string, n int) ( + sc *ScatterConn, sbcl []*sandboxconn.SandboxConn, rssl [][]*srvtopo.ResolvedShard, rssa []*srvtopo.ResolvedShard, +) { + t.Helper() + createSandbox(name) + hc := discovery.NewFakeHealthCheck(nil) sc = newTestScatterConn(ctx, hc, newSandboxForCells(ctx, []string{"aa"}), "aa") - sbc0 = hc.AddTestTablet("aa", "0", 1, name, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) - sbc1 = hc.AddTestTablet("aa", "1", 1, name, "1", topodatapb.TabletType_PRIMARY, true, 1, nil) + + sNames := make([]string, n) + for i := 0; i < n; i++ { + sNames[i] = strconv.FormatInt(int64(i), 10) + } + + sbcl = make([]*sandboxconn.SandboxConn, len(sNames)) + for i, sName := range sNames { + sbcl[i] = hc.AddTestTablet("aa", sName, int32(i)+1, name, sName, topodatapb.TabletType_PRIMARY, true, 1, nil) + } + res := srvtopo.NewResolver(newSandboxForCells(ctx, []string{"aa"}), sc.gateway, "aa") - var err error - rss0, err = res.ResolveDestination(ctx, name, topodatapb.TabletType_PRIMARY, key.DestinationShard("0")) - require.NoError(t, err) - rss1, err = res.ResolveDestination(ctx, name, topodatapb.TabletType_PRIMARY, key.DestinationShard("1")) - require.NoError(t, err) - rss01, err = res.ResolveDestination(ctx, name, topodatapb.TabletType_PRIMARY, key.DestinationShards([]string{"0", "1"})) + + rssl = make([][]*srvtopo.ResolvedShard, len(sNames)) + for i, sName := range sNames { + rss, err := res.ResolveDestination(ctx, name, topodatapb.TabletType_PRIMARY, key.DestinationShard(sName)) + require.NoError(t, err) + rssl[i] = rss + } + + rssa, err := res.ResolveDestination(ctx, name, topodatapb.TabletType_PRIMARY, key.DestinationShards(sNames)) require.NoError(t, err) - return sc, sbc0, sbc1, rss0, rss1, rss01 + + return sc, sbcl, rssl, rssa } diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index 0e89d6fbc95..9372012f77d 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -27,9 +27,9 @@ import ( "github.com/google/uuid" - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/discovery" @@ -46,6 +46,7 @@ import ( "vitess.io/vitess/go/vt/topo" topoprotopb "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/buffer" "vitess.io/vitess/go/vt/vtgate/engine" @@ -83,6 +84,8 @@ type iExecute interface { ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error) VSchema() *vindexes.VSchema planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) + + environment() *vtenv.Environment } // VSchemaOperator is an interface to Vschema Operations @@ -105,6 +108,11 @@ type vcursorImpl struct { logStats *logstats.LogStats collation collations.ID + // fkChecksState stores the state of foreign key checks variable. + // This state is meant to be the final fk checks state after consulting the + // session state, and the given query's comments for `SET_VAR` optimizer hints. + // A nil value represents that no foreign_key_checks value was provided. + fkChecksState *bool ignoreMaxMemoryRows bool vschema *vindexes.VSchema vm VSchemaOperator @@ -157,7 +165,7 @@ func newVCursorImpl( } } if connCollation == collations.Unknown { - connCollation = collations.Default() + connCollation = executor.env.CollationEnv().DefaultConnectionCharset() } warmingReadsPct := 0 @@ -201,10 +209,21 @@ func (vc *vcursorImpl) ConnCollation() collations.ID { return vc.collation } +// Environment returns the vtenv associated with this session +func (vc *vcursorImpl) Environment() *vtenv.Environment { + return vc.executor.environment() +} + func (vc *vcursorImpl) TimeZone() *time.Location { return vc.safeSession.TimeZone() } +func (vc *vcursorImpl) SQLMode() string { + // TODO: Implement return the current sql_mode. + // This is currently hardcoded to the default in MySQL 8.0. + return config.DefaultSQLMode +} + // MaxMemoryRows returns the maxMemoryRows flag value. func (vc *vcursorImpl) MaxMemoryRows() int { return maxMemoryRows @@ -1021,6 +1040,7 @@ func (vc *vcursorImpl) WarnUnshardedOnly(format string, params ...any) { Code: uint32(sqlerror.ERNotSupportedYet), Message: fmt.Sprintf(format, params...), }) + warnings.Add("WarnUnshardedOnly", 1) } } @@ -1055,6 +1075,10 @@ func (vc *vcursorImpl) KeyspaceError(keyspace string) error { return ks.Error } +func (vc *vcursorImpl) GetAggregateUDFs() []string { + return vc.vschema.GetAggregateUDFs() +} + // ParseDestinationTarget parses destination target string and sets default keyspace if possible. func parseDestinationTarget(targetString string, vschema *vindexes.VSchema) (string, topodatapb.TabletType, key.Destination, error) { destKeyspace, destTabletType, dest, err := topoprotopb.ParseDestination(targetString, defaultTabletType) @@ -1071,7 +1095,7 @@ func (vc *vcursorImpl) keyForPlan(ctx context.Context, query string, buf io.Stri _, _ = buf.WriteString(vc.keyspace) _, _ = buf.WriteString(vindexes.TabletTypeSuffix[vc.tabletType]) _, _ = buf.WriteString("+Collate:") - _, _ = buf.WriteString(collations.Local().LookupName(vc.collation)) + _, _ = buf.WriteString(vc.Environment().CollationEnv().LookupName(vc.collation)) if vc.destination != nil { switch vc.destination.(type) { @@ -1229,7 +1253,7 @@ func (vc *vcursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topoda } func (vc *vcursorImpl) CanUseSetVar() bool { - return sqlparser.IsMySQL80AndAbove() && setVarEnabled + return vc.Environment().Parser().IsMySQL80AndAbove() && setVarEnabled } func (vc *vcursorImpl) ReleaseLock(ctx context.Context) error { @@ -1258,7 +1282,7 @@ func (vc *vcursorImpl) cloneWithAutocommitSession() *vcursorImpl { } func (vc *vcursorImpl) VExplainLogging() { - vc.safeSession.EnableLogging() + vc.safeSession.EnableLogging(vc.Environment().Parser()) } func (vc *vcursorImpl) GetVExplainLogs() []engine.ExecuteEntry { @@ -1304,7 +1328,7 @@ func (vc *vcursorImpl) CloneForReplicaWarming(ctx context.Context) engine.VCurso callerId := callerid.EffectiveCallerIDFromContext(ctx) immediateCallerId := callerid.ImmediateCallerIDFromContext(ctx) - timedCtx, _ := context.WithTimeout(context.Background(), warmingReadsQueryTimeout) //nolint + timedCtx, _ := context.WithTimeout(context.Background(), warmingReadsQueryTimeout) // nolint clonedCtx := callerid.NewContext(timedCtx, callerId, immediateCallerId) v := &vcursorImpl{ @@ -1331,3 +1355,22 @@ func (vc *vcursorImpl) CloneForReplicaWarming(ctx context.Context) engine.VCurso return v } + +// UpdateForeignKeyChecksState updates the foreign key checks state of the vcursor. +func (vc *vcursorImpl) UpdateForeignKeyChecksState(fkStateFromQuery *bool) { + // Initialize the state to unspecified. + vc.fkChecksState = nil + // If the query has a SET_VAR optimizer hint that explicitly sets the foreign key checks state, + // we should use that. + if fkStateFromQuery != nil { + vc.fkChecksState = fkStateFromQuery + return + } + // If the query doesn't have anything, then we consult the session state. + vc.fkChecksState = vc.safeSession.ForeignKeyChecks() +} + +// GetForeignKeyChecksState gets the stored foreign key checks state in the vcursor. +func (vc *vcursorImpl) GetForeignKeyChecksState() *bool { + return vc.fkChecksState +} diff --git a/go/vt/vtgate/vcursor_impl_test.go b/go/vt/vtgate/vcursor_impl_test.go index 3160b8a9b1a..b8e4a0d3a0a 100644 --- a/go/vt/vtgate/vcursor_impl_test.go +++ b/go/vt/vtgate/vcursor_impl_test.go @@ -184,9 +184,10 @@ func TestDestinationKeyspace(t *testing.T) { expectedError: errNoKeyspace.Error(), }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(strconv.Itoa(i)+tc.targetString, func(t *testing.T) { - impl, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) + impl, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) impl.vschema = tc.vschema dest, keyspace, tabletType, err := impl.TargetDestination(tc.qualifier) if tc.expectedError == "" { @@ -242,9 +243,10 @@ func TestSetTarget(t *testing.T) { expectedError: "can't execute the given command because you have an active transaction", }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) { - vc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) + vc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) vc.vschema = tc.vschema err := vc.SetTarget(tc.targetString) if tc.expectedError == "" { @@ -280,13 +282,22 @@ func TestKeyForPlan(t *testing.T) { vschema: vschemaWith1KS, targetString: "ks1[deadbeef]", expectedPlanPrefixKey: "ks1@primary+Collate:utf8mb4_0900_ai_ci+KsIDsResolved:80-+Query:SELECT 1", + }, { + vschema: vschemaWith1KS, + targetString: "", + expectedPlanPrefixKey: "ks1@primary+Collate:utf8mb4_0900_ai_ci+Query:SELECT 1", + }, { + vschema: vschemaWith1KS, + targetString: "ks1@replica", + expectedPlanPrefixKey: "ks1@replica+Collate:utf8mb4_0900_ai_ci+Query:SELECT 1", }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) { ss := NewSafeSession(&vtgatepb.Session{InTransaction: false}) ss.SetTargetString(tc.targetString) - vc, err := newVCursorImpl(ss, sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) + vc, err := newVCursorImpl(ss, sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) require.NoError(t, err) vc.vschema = tc.vschema @@ -308,7 +319,8 @@ func TestFirstSortedKeyspace(t *testing.T) { ks3Schema.Keyspace.Name: ks3Schema, }} - vc, err := newVCursorImpl(NewSafeSession(nil), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) + r, _, _, _, _ := createExecutorEnv(t) + vc, err := newVCursorImpl(NewSafeSession(nil), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) require.NoError(t, err) ks, err := vc.FirstSortedKeyspace() require.NoError(t, err) diff --git a/go/vt/vtgate/vindexes/cfc_test.go b/go/vt/vtgate/vindexes/cfc_test.go index 553d36de6c6..aaf639adec6 100644 --- a/go/vt/vtgate/vindexes/cfc_test.go +++ b/go/vt/vtgate/vindexes/cfc_test.go @@ -199,7 +199,7 @@ func TestCFCComputeKsid(t *testing.T) { testName: "misaligned prefix", id: [][]byte{{3, 4, 5}, {1}}, prefix: true, - // use the first component that's availabe + // use the first component that's available expected: expectedHash([][]byte{{3, 4, 5}}), err: nil, }, @@ -207,7 +207,7 @@ func TestCFCComputeKsid(t *testing.T) { testName: "misaligned prefix", id: [][]byte{{3, 4}}, prefix: true, - // use the first component that's availabe + // use the first component that's available expected: nil, err: nil, }, @@ -286,7 +286,7 @@ func TestCFCComputeKsidXxhash(t *testing.T) { testName: "misaligned prefix", id: [][]byte{{3, 4, 5}, {1}}, prefix: true, - // use the first component that's availabe + // use the first component that's available expected: expectedHashXX([][]byte{{3, 4, 5}}), err: nil, }, @@ -294,7 +294,7 @@ func TestCFCComputeKsidXxhash(t *testing.T) { testName: "misaligned prefix", id: [][]byte{{3, 4}}, prefix: true, - // use the first component that's availabe + // use the first component that's available expected: nil, err: nil, }, diff --git a/go/vt/vtgate/vindexes/consistent_lookup.go b/go/vt/vtgate/vindexes/consistent_lookup.go index d73631cc6ca..d231f358a37 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup.go +++ b/go/vt/vtgate/vindexes/consistent_lookup.go @@ -21,11 +21,13 @@ import ( "context" "encoding/json" "fmt" + "strings" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" querypb "vitess.io/vitess/go/vt/proto/query" @@ -170,7 +172,7 @@ func (lu *ConsistentLookup) UnknownParams() []string { return lu.unknownParams } -//==================================================================== +// ==================================================================== // ConsistentLookupUnique defines a vindex that uses a lookup table. // The table is expected to define the id column as unique. It's @@ -270,7 +272,7 @@ func (lu *ConsistentLookupUnique) AutoCommitEnabled() bool { return lu.lkp.Autocommit } -//==================================================================== +// ==================================================================== // clCommon defines a vindex that uses a lookup table. // The table is expected to define the id column as unique. It's @@ -308,7 +310,7 @@ func (lu *clCommon) SetOwnerInfo(keyspace, table string, cols []sqlparser.Identi lu.keyspace = keyspace lu.ownerTable = sqlparser.String(sqlparser.NewIdentifierCS(table)) if len(cols) != len(lu.lkp.FromColumns) { - return fmt.Errorf("owner table column count does not match vindex %s", lu.name) + return vterrors.VT03029(lu.name) } lu.ownerColumns = make([]string, len(cols)) for i, col := range cols { @@ -382,8 +384,7 @@ func (lu *clCommon) handleDup(ctx context.Context, vcursor VCursor, values []sql return err } // Lock the target row using normal transaction priority. - // TODO: context needs to be passed on. - qr, err = vcursor.ExecuteKeyspaceID(context.Background(), lu.keyspace, existingksid, lu.lockOwnerQuery, bindVars, false /* rollbackOnError */, false /* autocommit */) + qr, err = vcursor.ExecuteKeyspaceID(ctx, lu.keyspace, existingksid, lu.lockOwnerQuery, bindVars, false /* rollbackOnError */, false /* autocommit */) if err != nil { return err } @@ -411,7 +412,7 @@ func (lu *clCommon) Delete(ctx context.Context, vcursor VCursor, rowsColValues [ func (lu *clCommon) Update(ctx context.Context, vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { equal := true for i := range oldValues { - result, err := evalengine.NullsafeCompare(oldValues[i], newValues[i], vcursor.ConnCollation()) + result, err := evalengine.NullsafeCompare(oldValues[i], newValues[i], vcursor.Environment().CollationEnv(), vcursor.ConnCollation(), nil) // errors from NullsafeCompare can be ignored. if they are real problems, we'll see them in the Create/Update if err != nil || result != 0 { equal = false @@ -433,7 +434,7 @@ func (lu *clCommon) MarshalJSON() ([]byte, error) { } func (lu *clCommon) generateLockLookup() string { - var buf bytes.Buffer + var buf strings.Builder fmt.Fprintf(&buf, "select %s from %s", lu.lkp.To, lu.lkp.Table) lu.addWhere(&buf, lu.lkp.FromColumns) fmt.Fprintf(&buf, " for update") @@ -441,7 +442,7 @@ func (lu *clCommon) generateLockLookup() string { } func (lu *clCommon) generateLockOwner() string { - var buf bytes.Buffer + var buf strings.Builder fmt.Fprintf(&buf, "select %s from %s", lu.ownerColumns[0], lu.ownerTable) lu.addWhere(&buf, lu.ownerColumns) // We can lock in share mode because we only want to check @@ -452,7 +453,7 @@ func (lu *clCommon) generateLockOwner() string { } func (lu *clCommon) generateInsertLookup() string { - var buf bytes.Buffer + var buf strings.Builder fmt.Fprintf(&buf, "insert into %s(", lu.lkp.Table) for _, col := range lu.lkp.FromColumns { fmt.Fprintf(&buf, "%s, ", col) @@ -466,13 +467,13 @@ func (lu *clCommon) generateInsertLookup() string { } func (lu *clCommon) generateUpdateLookup() string { - var buf bytes.Buffer + var buf strings.Builder fmt.Fprintf(&buf, "update %s set %s=:%s", lu.lkp.Table, lu.lkp.To, lu.lkp.To) lu.addWhere(&buf, lu.lkp.FromColumns) return buf.String() } -func (lu *clCommon) addWhere(buf *bytes.Buffer, cols []string) { +func (lu *clCommon) addWhere(buf *strings.Builder, cols []string) { buf.WriteString(" where ") for colIdx, column := range cols { if colIdx != 0 { @@ -488,7 +489,7 @@ func (lu *clCommon) GetCommitOrder() vtgatepb.CommitOrder { } // IsBackfilling implements the LookupBackfill interface -func (lu *ConsistentLookupUnique) IsBackfilling() bool { +func (lu *clCommon) IsBackfilling() bool { return lu.writeOnly } diff --git a/go/vt/vtgate/vindexes/consistent_lookup_test.go b/go/vt/vtgate/vindexes/consistent_lookup_test.go index deecc23ebdd..0279ecaba78 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup_test.go +++ b/go/vt/vtgate/vindexes/consistent_lookup_test.go @@ -40,6 +40,7 @@ import ( vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" ) @@ -116,8 +117,9 @@ func TestConsistentLookupMap(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(makeTestResultLookup([]int{2, 2}), nil) + ctx := newTestContext() - got, err := lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationKeyspaceIDs([][]byte{ @@ -135,10 +137,11 @@ func TestConsistentLookupMap(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre select fromc1, toc from t where fromc1 in ::fromc1 [{fromc1 }] false", }) + vc.verifyContext(t, ctx) // Test query fail. vc.AddResult(nil, fmt.Errorf("execute failed")) - _, err = lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}) + _, err = lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1)}) wantErr := "lookup.Map: execute failed" if err == nil || err.Error() != wantErr { t.Errorf("lookup(query fail) err: %v, want %s", err, wantErr) @@ -167,8 +170,9 @@ func TestConsistentLookupUniqueMap(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup_unique", false) vc := &loggingVCursor{} vc.AddResult(makeTestResultLookup([]int{0, 1}), nil) + ctx := newTestContext() - got, err := lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationNone{}, @@ -180,10 +184,11 @@ func TestConsistentLookupUniqueMap(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre select fromc1, toc from t where fromc1 in ::fromc1 [{fromc1 }] false", }) + vc.verifyContext(t, ctx) // More than one result is invalid vc.AddResult(makeTestResultLookup([]int{2}), nil) - _, err = lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}) + _, err = lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1)}) wanterr := "Lookup.Map: unexpected multiple results from vindex t: INT64(1)" if err == nil || err.Error() != wanterr { t.Errorf("lookup(query fail) err: %v, want %s", err, wanterr) @@ -212,8 +217,9 @@ func TestConsistentLookupMapAbsent(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(makeTestResultLookup([]int{0, 0}), nil) + ctx := newTestContext() - got, err := lookup.Map(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + got, err := lookup.Map(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) require.NoError(t, err) want := []key.Destination{ key.DestinationNone{}, @@ -225,6 +231,7 @@ func TestConsistentLookupMapAbsent(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre select fromc1, toc from t where fromc1 in ::fromc1 [{fromc1 }] false", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupVerify(t *testing.T) { @@ -232,17 +239,19 @@ func TestConsistentLookupVerify(t *testing.T) { vc := &loggingVCursor{} vc.AddResult(makeTestResult(1), nil) vc.AddResult(makeTestResult(1), nil) + ctx := newTestContext() - _, err := lookup.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + _, err := lookup.Verify(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) require.NoError(t, err) vc.verifyLog(t, []string{ "ExecutePre select fromc1 from t where fromc1 = :fromc1 and toc = :toc [{fromc1 1} {toc test1}] false", "ExecutePre select fromc1 from t where fromc1 = :fromc1 and toc = :toc [{fromc1 2} {toc test2}] false", }) + vc.verifyContext(t, ctx) // Test query fail. vc.AddResult(nil, fmt.Errorf("execute failed")) - _, err = lookup.Verify(context.Background(), vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) + _, err = lookup.Verify(ctx, vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}) want := "lookup.Verify: execute failed" if err == nil || err.Error() != want { t.Errorf("lookup(query fail) err: %v, want %s", err, want) @@ -250,7 +259,7 @@ func TestConsistentLookupVerify(t *testing.T) { // Test write_only. lookup = createConsistentLookup(t, "consistent_lookup", true) - got, err := lookup.Verify(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) + got, err := lookup.Verify(ctx, nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) require.NoError(t, err) wantBools := []bool{true, true} if !reflect.DeepEqual(got, wantBools) { @@ -262,8 +271,9 @@ func TestConsistentLookupCreateSimple(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }, { @@ -275,6 +285,7 @@ func TestConsistentLookupCreateSimple(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1_0, :fromc2_0, :toc_0), (:fromc1_1, :fromc2_1, :toc_1) [{fromc1_0 1} {fromc1_1 3} {fromc2_0 2} {fromc2_1 4} {toc_0 test1} {toc_1 test2}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenRecreate(t *testing.T) { @@ -283,8 +294,9 @@ func TestConsistentLookupCreateThenRecreate(t *testing.T) { vc.AddResult(nil, sqlerror.NewSQLError(sqlerror.ERDupEntry, sqlerror.SSConstraintViolation, "Duplicate entry")) vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false); err != nil { @@ -295,6 +307,7 @@ func TestConsistentLookupCreateThenRecreate(t *testing.T) { "ExecutePre select toc from t where fromc1 = :fromc1 and fromc2 = :fromc2 for update [{fromc1 1} {fromc2 2} {toc test1}] false", "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1, :fromc2, :toc) [{fromc1 1} {fromc2 2} {toc test1}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenUpdate(t *testing.T) { @@ -304,8 +317,9 @@ func TestConsistentLookupCreateThenUpdate(t *testing.T) { vc.AddResult(makeTestResult(1), nil) vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false); err != nil { @@ -317,6 +331,7 @@ func TestConsistentLookupCreateThenUpdate(t *testing.T) { "ExecuteKeyspaceID select fc1 from `dot.t1` where fc1 = :fromc1 and fc2 = :fromc2 lock in share mode [{fromc1 1} {fromc2 2} {toc test1}] false", "ExecutePre update t set toc=:toc where fromc1 = :fromc1 and fromc2 = :fromc2 [{fromc1 1} {fromc2 2} {toc test1}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenSkipUpdate(t *testing.T) { @@ -326,8 +341,9 @@ func TestConsistentLookupCreateThenSkipUpdate(t *testing.T) { vc.AddResult(makeTestResult(1), nil) vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("1")}, false); err != nil { @@ -338,6 +354,7 @@ func TestConsistentLookupCreateThenSkipUpdate(t *testing.T) { "ExecutePre select toc from t where fromc1 = :fromc1 and fromc2 = :fromc2 for update [{fromc1 1} {fromc2 2} {toc 1}] false", "ExecuteKeyspaceID select fc1 from `dot.t1` where fc1 = :fromc1 and fc2 = :fromc2 lock in share mode [{fromc1 1} {fromc2 2} {toc 1}] false", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenDupkey(t *testing.T) { @@ -347,8 +364,9 @@ func TestConsistentLookupCreateThenDupkey(t *testing.T) { vc.AddResult(makeTestResult(1), nil) vc.AddResult(makeTestResult(1), nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false) @@ -359,14 +377,16 @@ func TestConsistentLookupCreateThenDupkey(t *testing.T) { "ExecutePre select toc from t where fromc1 = :fromc1 and fromc2 = :fromc2 for update [{fromc1 1} {fromc2 2} {toc test1}] false", "ExecuteKeyspaceID select fc1 from `dot.t1` where fc1 = :fromc1 and fc2 = :fromc2 lock in share mode [{fromc1 1} {fromc2 2} {toc test1}] false", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateNonDupError(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(nil, errors.New("general error")) + ctx := newTestContext() - err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false) @@ -377,6 +397,7 @@ func TestConsistentLookupCreateNonDupError(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1_0, :fromc2_0, :toc_0) [{fromc1_0 1} {fromc2_0 2} {toc_0 test1}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupCreateThenBadRows(t *testing.T) { @@ -384,8 +405,9 @@ func TestConsistentLookupCreateThenBadRows(t *testing.T) { vc := &loggingVCursor{} vc.AddResult(nil, vterrors.New(vtrpcpb.Code_ALREADY_EXISTS, "(errno 1062) (sqlstate 23000) Duplicate entry")) vc.AddResult(makeTestResult(2), nil) + ctx := newTestContext() - err := lookup.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{ + err := lookup.(Lookup).Create(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, [][]byte{[]byte("test1")}, false) @@ -397,14 +419,16 @@ func TestConsistentLookupCreateThenBadRows(t *testing.T) { "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1_0, :fromc2_0, :toc_0) [{fromc1_0 1} {fromc2_0 2} {toc_0 test1}] true", "ExecutePre select toc from t where fromc1 = :fromc1 and fromc2 = :fromc2 for update [{fromc1 1} {fromc2 2} {toc test1}] false", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupDelete(t *testing.T) { lookup := createConsistentLookup(t, "consistent_lookup", false) vc := &loggingVCursor{} vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{ + if err := lookup.(Lookup).Delete(ctx, vc, [][]sqltypes.Value{{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }}, []byte("test")); err != nil { @@ -413,6 +437,7 @@ func TestConsistentLookupDelete(t *testing.T) { vc.verifyLog(t, []string{ "ExecutePost delete from t where fromc1 = :fromc1 and fromc2 = :fromc2 and toc = :toc [{fromc1 1} {fromc2 2} {toc test}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupUpdate(t *testing.T) { @@ -420,8 +445,9 @@ func TestConsistentLookupUpdate(t *testing.T) { vc := &loggingVCursor{} vc.AddResult(&sqltypes.Result{}, nil) vc.AddResult(&sqltypes.Result{}, nil) + ctx := newTestContext() - if err := lookup.(Lookup).Update(context.Background(), vc, []sqltypes.Value{ + if err := lookup.(Lookup).Update(ctx, vc, []sqltypes.Value{ sqltypes.NewInt64(1), sqltypes.NewInt64(2), }, []byte("test"), []sqltypes.Value{ @@ -434,6 +460,7 @@ func TestConsistentLookupUpdate(t *testing.T) { "ExecutePost delete from t where fromc1 = :fromc1 and fromc2 = :fromc2 and toc = :toc [{fromc1 1} {fromc2 2} {toc test}] true", "ExecutePre insert into t(fromc1, fromc2, toc) values(:fromc1_0, :fromc2_0, :toc_0) [{fromc1_0 3} {fromc2_0 4} {toc_0 test}] true", }) + vc.verifyContext(t, ctx) } func TestConsistentLookupNoUpdate(t *testing.T) { @@ -510,13 +537,19 @@ func createConsistentLookup(t *testing.T, name string, writeOnly bool) SingleCol return l.(SingleColumn) } +func newTestContext() context.Context { + type testContextKey string // keep static checks from complaining about built-in types as context keys + return context.WithValue(context.Background(), (testContextKey)("test"), "foo") +} + var _ VCursor = (*loggingVCursor)(nil) type loggingVCursor struct { - results []*sqltypes.Result - errors []error - index int - log []string + results []*sqltypes.Result + errors []error + index int + log []string + contexts []context.Context } func (vc *loggingVCursor) LookupRowLockShardSession() vtgatepb.CommitOrder { @@ -528,7 +561,11 @@ func (vc *loggingVCursor) InTransactionAndIsDML() bool { } func (vc *loggingVCursor) ConnCollation() collations.ID { - return collations.Default() + return vc.Environment().CollationEnv().DefaultConnectionCharset() +} + +func (vc *loggingVCursor) Environment() *vtenv.Environment { + return vtenv.NewTestEnv() } type bv struct { @@ -553,14 +590,14 @@ func (vc *loggingVCursor) Execute(ctx context.Context, method string, query stri case vtgatepb.CommitOrder_AUTOCOMMIT: name = "ExecuteAutocommit" } - return vc.execute(name, query, bindvars, rollbackOnError) + return vc.execute(ctx, name, query, bindvars, rollbackOnError) } func (vc *loggingVCursor) ExecuteKeyspaceID(ctx context.Context, keyspace string, ksid []byte, query string, bindVars map[string]*querypb.BindVariable, rollbackOnError, autocommit bool) (*sqltypes.Result, error) { - return vc.execute("ExecuteKeyspaceID", query, bindVars, rollbackOnError) + return vc.execute(ctx, "ExecuteKeyspaceID", query, bindVars, rollbackOnError) } -func (vc *loggingVCursor) execute(method string, query string, bindvars map[string]*querypb.BindVariable, rollbackOnError bool) (*sqltypes.Result, error) { +func (vc *loggingVCursor) execute(ctx context.Context, method string, query string, bindvars map[string]*querypb.BindVariable, rollbackOnError bool) (*sqltypes.Result, error) { if vc.index >= len(vc.results) { return nil, fmt.Errorf("ran out of results to return: %s", query) } @@ -570,6 +607,7 @@ func (vc *loggingVCursor) execute(method string, query string, bindvars map[stri } sort.Slice(bvl, func(i, j int) bool { return bvl[i].Name < bvl[j].Name }) vc.log = append(vc.log, fmt.Sprintf("%s %s %v %v", method, query, bvl, rollbackOnError)) + vc.contexts = append(vc.contexts, ctx) idx := vc.index vc.index++ if vc.errors[idx] != nil { @@ -593,6 +631,15 @@ func (vc *loggingVCursor) verifyLog(t *testing.T, want []string) { } } +func (vc *loggingVCursor) verifyContext(t *testing.T, want context.Context) { + t.Helper() + for i, got := range vc.contexts { + if got != want { + t.Errorf("context(%d):\ngot: %v\nwant: %v", i, got, want) + } + } +} + // create lookup result with one to one mapping func makeTestResult(numRows int) *sqltypes.Result { result := &sqltypes.Result{ diff --git a/go/vt/vtgate/vindexes/foreign_keys.go b/go/vt/vtgate/vindexes/foreign_keys.go index db984462b25..275a0674998 100644 --- a/go/vt/vtgate/vindexes/foreign_keys.go +++ b/go/vt/vtgate/vindexes/foreign_keys.go @@ -46,13 +46,13 @@ func (fk *ParentFKInfo) MarshalJSON() ([]byte, error) { func (fk *ParentFKInfo) String(childTable *Table) string { var str strings.Builder - str.WriteString(childTable.String()) + str.WriteString(sqlparser.String(childTable.GetTableName())) for _, column := range fk.ChildColumns { - str.WriteString(column.String()) + str.WriteString("|" + sqlparser.String(column)) } - str.WriteString(fk.Table.String()) + str.WriteString("||" + sqlparser.String(fk.Table.GetTableName())) for _, column := range fk.ParentColumns { - str.WriteString(column.String()) + str.WriteString("|" + sqlparser.String(column)) } return str.String() } @@ -91,13 +91,13 @@ func (fk *ChildFKInfo) MarshalJSON() ([]byte, error) { func (fk *ChildFKInfo) String(parentTable *Table) string { var str strings.Builder - str.WriteString(fk.Table.String()) + str.WriteString(sqlparser.String(fk.Table.GetTableName())) for _, column := range fk.ChildColumns { - str.WriteString(column.String()) + str.WriteString("|" + sqlparser.String(column)) } - str.WriteString(parentTable.String()) + str.WriteString("||" + sqlparser.String(parentTable.GetTableName())) for _, column := range fk.ParentColumns { - str.WriteString(column.String()) + str.WriteString("|" + sqlparser.String(column)) } return str.String() } @@ -144,3 +144,33 @@ func (vschema *VSchema) AddForeignKey(ksname, childTableName string, fkConstrain cTbl.ParentForeignKeys = append(cTbl.ParentForeignKeys, NewParentFkInfo(pTbl, fkConstraint)) return nil } + +// AddPrimaryKey is for testing only. +func (vschema *VSchema) AddPrimaryKey(ksname, tblName string, cols []string) error { + ks, ok := vschema.Keyspaces[ksname] + if !ok { + return fmt.Errorf("keyspace %s not found in vschema", ksname) + } + tbl, ok := ks.Tables[tblName] + if !ok { + return fmt.Errorf("table %s not found in keyspace %s", tblName, ksname) + } + for _, col := range cols { + tbl.PrimaryKey = append(tbl.PrimaryKey, sqlparser.NewIdentifierCI(col)) + } + return nil +} + +// AddUniqueKey is for testing only. +func (vschema *VSchema) AddUniqueKey(ksname, tblName string, exprs sqlparser.Exprs) error { + ks, ok := vschema.Keyspaces[ksname] + if !ok { + return fmt.Errorf("keyspace %s not found in vschema", ksname) + } + tbl, ok := ks.Tables[tblName] + if !ok { + return fmt.Errorf("table %s not found in keyspace %s", tblName, ksname) + } + tbl.UniqueKeys = append(tbl.UniqueKeys, exprs) + return nil +} diff --git a/go/vt/vtgate/vindexes/lookup.go b/go/vt/vtgate/vindexes/lookup.go index b3e14fa01f6..33462470010 100644 --- a/go/vt/vtgate/vindexes/lookup.go +++ b/go/vt/vtgate/vindexes/lookup.go @@ -181,6 +181,11 @@ func (ln *LookupNonUnique) MarshalJSON() ([]byte, error) { return json.Marshal(ln.lkp) } +// IsBackfilling implements the LookupBackfill interface +func (ln *LookupNonUnique) IsBackfilling() bool { + return ln.writeOnly +} + // Query implements the LookupPlanable interface func (ln *LookupNonUnique) Query() (selQuery string, arguments []string) { return ln.lkp.query() diff --git a/go/vt/vtgate/vindexes/lookup_hash.go b/go/vt/vtgate/vindexes/lookup_hash.go index de3d078f556..28f38942afa 100644 --- a/go/vt/vtgate/vindexes/lookup_hash.go +++ b/go/vt/vtgate/vindexes/lookup_hash.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/vt/key" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/vterrors" ) const ( @@ -52,7 +53,7 @@ func init() { Register("lookup_hash_unique", newLookupHashUnique) } -//==================================================================== +// ==================================================================== // LookupHash defines a vindex that uses a lookup table. // The table is expected to define the id column as unique. It's @@ -205,7 +206,7 @@ func (lh *LookupHash) Verify(ctx context.Context, vcursor VCursor, ids []sqltype values, err := unhashList(ksids) if err != nil { - return nil, fmt.Errorf("lookup.Verify.vunhash: %v", err) + return nil, vterrors.Wrap(err, "lookup.Verify.vunhash") } return lh.lkp.Verify(ctx, vcursor, ids, values) } @@ -214,7 +215,7 @@ func (lh *LookupHash) Verify(ctx context.Context, vcursor VCursor, ids []sqltype func (lh *LookupHash) Create(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltypes.Value, ksids [][]byte, ignoreMode bool) error { values, err := unhashList(ksids) if err != nil { - return fmt.Errorf("lookup.Create.vunhash: %v", err) + return vterrors.Wrap(err, "lookup.Create.vunhash") } return lh.lkp.Create(ctx, vcursor, rowsColValues, values, ignoreMode) } @@ -223,7 +224,7 @@ func (lh *LookupHash) Create(ctx context.Context, vcursor VCursor, rowsColValues func (lh *LookupHash) Update(ctx context.Context, vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { v, err := vunhash(ksid) if err != nil { - return fmt.Errorf("lookup.Update.vunhash: %v", err) + return vterrors.Wrap(err, "lookup.Update.vunhash") } return lh.lkp.Update(ctx, vcursor, oldValues, ksid, sqltypes.NewUint64(v), newValues) } @@ -232,7 +233,7 @@ func (lh *LookupHash) Update(ctx context.Context, vcursor VCursor, oldValues []s func (lh *LookupHash) Delete(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltypes.Value, ksid []byte) error { v, err := vunhash(ksid) if err != nil { - return fmt.Errorf("lookup.Delete.vunhash: %v", err) + return vterrors.Wrap(err, "lookup.Delete.vunhash") } return lh.lkp.Delete(ctx, vcursor, rowsColValues, sqltypes.NewUint64(v), vtgatepb.CommitOrder_NORMAL) } @@ -242,7 +243,7 @@ func (lh *LookupHash) MarshalJSON() ([]byte, error) { return json.Marshal(lh.lkp) } -// UnknownParams satisifes the ParamValidating interface. +// UnknownParams satisfies the ParamValidating interface. func (lh *LookupHash) UnknownParams() []string { return lh.unknownParams } @@ -260,12 +261,12 @@ func unhashList(ksids [][]byte) ([]sqltypes.Value, error) { return values, nil } -//==================================================================== +// ==================================================================== // LookupHashUnique defines a vindex that uses a lookup table. // The table is expected to define the id column as unique. It's // Unique and a Lookup. -// Warning: This Vindex is being depcreated in favor of LookupUnique +// Warning: This Vindex is being deprecated in favor of LookupUnique type LookupHashUnique struct { name string writeOnly bool @@ -383,7 +384,7 @@ func (lhu *LookupHashUnique) Verify(ctx context.Context, vcursor VCursor, ids [] values, err := unhashList(ksids) if err != nil { - return nil, fmt.Errorf("lookup.Verify.vunhash: %v", err) + return nil, vterrors.Wrap(err, "lookup.Verify.vunhash") } return lhu.lkp.Verify(ctx, vcursor, ids, values) } @@ -392,7 +393,7 @@ func (lhu *LookupHashUnique) Verify(ctx context.Context, vcursor VCursor, ids [] func (lhu *LookupHashUnique) Create(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltypes.Value, ksids [][]byte, ignoreMode bool) error { values, err := unhashList(ksids) if err != nil { - return fmt.Errorf("lookup.Create.vunhash: %v", err) + return vterrors.Wrap(err, "lookup.Create.vunhash") } return lhu.lkp.Create(ctx, vcursor, rowsColValues, values, ignoreMode) } @@ -401,7 +402,7 @@ func (lhu *LookupHashUnique) Create(ctx context.Context, vcursor VCursor, rowsCo func (lhu *LookupHashUnique) Delete(ctx context.Context, vcursor VCursor, rowsColValues [][]sqltypes.Value, ksid []byte) error { v, err := vunhash(ksid) if err != nil { - return fmt.Errorf("lookup.Delete.vunhash: %v", err) + return vterrors.Wrap(err, "lookup.Delete.vunhash") } return lhu.lkp.Delete(ctx, vcursor, rowsColValues, sqltypes.NewUint64(v), vtgatepb.CommitOrder_NORMAL) } @@ -410,7 +411,7 @@ func (lhu *LookupHashUnique) Delete(ctx context.Context, vcursor VCursor, rowsCo func (lhu *LookupHashUnique) Update(ctx context.Context, vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { v, err := vunhash(ksid) if err != nil { - return fmt.Errorf("lookup.Update.vunhash: %v", err) + return vterrors.Wrap(err, "lookup.Update.vunhash") } return lhu.lkp.Update(ctx, vcursor, oldValues, ksid, sqltypes.NewUint64(v), newValues) } diff --git a/go/vt/vtgate/vindexes/lookup_hash_test.go b/go/vt/vtgate/vindexes/lookup_hash_test.go index 69bff9f6f34..fd07f6ab7d8 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_test.go @@ -236,10 +236,7 @@ func TestLookupHashCreate(t *testing.T) { } err = lookuphash.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NULL}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) - want := "lookup.Create: input has null values: row: 0, col: 0" - if err == nil || err.Error() != want { - t.Errorf("lookuphash.Create(NULL) err: %v, want %s", err, want) - } + require.ErrorContains(t, err, "VT03028: Column 'fromc' cannot be null on row 0, col 0") vc.queries = nil lookuphash.(*LookupHash).lkp.IgnoreNulls = true @@ -250,10 +247,7 @@ func TestLookupHashCreate(t *testing.T) { } err = lookuphash.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("bogus")}, false /* ignoreMode */) - want = "lookup.Create.vunhash: invalid keyspace id: 626f677573" - if err == nil || err.Error() != want { - t.Errorf("lookuphash.Create(bogus) err: %v, want %s", err, want) - } + require.ErrorContains(t, err, "lookup.Create.vunhash: invalid keyspace id: 626f677573") } func TestLookupHashDelete(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup_internal.go b/go/vt/vtgate/vindexes/lookup_internal.go index 673b3fcb64b..5e224259d1d 100644 --- a/go/vt/vtgate/vindexes/lookup_internal.go +++ b/go/vt/vtgate/vindexes/lookup_internal.go @@ -24,13 +24,11 @@ import ( "strconv" "strings" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) const ( @@ -143,7 +141,7 @@ func (lkp *lookupInternal) Init(lookupQueryParams map[string]string, autocommit, // Lookup performs a lookup for the ids. func (lkp *lookupInternal) Lookup(ctx context.Context, vcursor VCursor, ids []sqltypes.Value, co vtgatepb.CommitOrder) ([]*sqltypes.Result, error) { if vcursor == nil { - return nil, fmt.Errorf("cannot perform lookup: no vcursor provided") + return nil, vterrors.VT13001("cannot perform lookup: no vcursor provided") } results := make([]*sqltypes.Result, 0, len(ids)) if lkp.Autocommit { @@ -159,14 +157,14 @@ func (lkp *lookupInternal) Lookup(ctx context.Context, vcursor VCursor, ids []sq // for integral types, batch query all ids and then map them back to the input order vars, err := sqltypes.BuildBindVariable(ids) if err != nil { - return nil, fmt.Errorf("lookup.Map: %v", err) + return nil, err } bindVars := map[string]*querypb.BindVariable{ lkp.FromColumns[0]: vars, } result, err := vcursor.Execute(ctx, "VindexLookup", sel, bindVars, false /* rollbackOnError */, co) if err != nil { - return nil, fmt.Errorf("lookup.Map: %v", err) + return nil, vterrors.Wrap(err, "lookup.Map") } resultMap := make(map[string][][]sqltypes.Value) for _, row := range result.Rows { @@ -183,7 +181,7 @@ func (lkp *lookupInternal) Lookup(ctx context.Context, vcursor VCursor, ids []sq for _, id := range ids { vars, err := sqltypes.BuildBindVariable([]any{id}) if err != nil { - return nil, fmt.Errorf("lookup.Map: %v", err) + return nil, err } bindVars := map[string]*querypb.BindVariable{ lkp.FromColumns[0]: vars, @@ -191,7 +189,7 @@ func (lkp *lookupInternal) Lookup(ctx context.Context, vcursor VCursor, ids []sq var result *sqltypes.Result result, err = vcursor.Execute(ctx, "VindexLookup", sel, bindVars, false /* rollbackOnError */, co) if err != nil { - return nil, fmt.Errorf("lookup.Map: %v", err) + return nil, vterrors.Wrap(err, "lookup.Map") } rows := make([][]sqltypes.Value, 0, len(result.Rows)) for _, row := range result.Rows { @@ -223,7 +221,7 @@ func (lkp *lookupInternal) VerifyCustom(ctx context.Context, vcursor VCursor, id } result, err := vcursor.Execute(ctx, "VindexVerify", lkp.ver, bindVars, false /* rollbackOnError */, co) if err != nil { - return nil, fmt.Errorf("lookup.Verify: %v", err) + return nil, vterrors.Wrap(err, "lookup.Verify") } out[i] = (len(result.Rows) != 0) } @@ -290,7 +288,8 @@ nextRow: for j, col := range row { if col.IsNull() { if !lkp.IgnoreNulls { - return fmt.Errorf("lookup.Create: input has null values: row: %d, col: %d", i, j) + cols := strings.Join(lkp.FromColumns, ",") + return vterrors.VT03028(cols, i, j) } continue nextRow } @@ -304,7 +303,7 @@ nextRow: // We only need to check the first row. Number of cols per row // is guaranteed by the engine to be uniform. if len(trimmedRowsCols[0]) != len(lkp.FromColumns) { - return fmt.Errorf("lookup.Create: column vindex count does not match the columns in the lookup: %d vs %v", len(trimmedRowsCols[0]), lkp.FromColumns) + return vterrors.VT03030(lkp.FromColumns, len(trimmedRowsCols[0])) } sort.Sort(&sorter{rowsColValues: trimmedRowsCols, toValues: trimmedToValues}) @@ -312,16 +311,16 @@ nextRow: if lkp.MultiShardAutocommit { insStmt = "insert /*vt+ MULTI_SHARD_AUTOCOMMIT=1 */" } - buf := new(bytes.Buffer) + var buf strings.Builder if ignoreMode { - fmt.Fprintf(buf, "%s ignore into %s(", insStmt, lkp.Table) + fmt.Fprintf(&buf, "%s ignore into %s(", insStmt, lkp.Table) } else { - fmt.Fprintf(buf, "%s into %s(", insStmt, lkp.Table) + fmt.Fprintf(&buf, "%s into %s(", insStmt, lkp.Table) } for _, col := range lkp.FromColumns { - fmt.Fprintf(buf, "%s, ", col) + fmt.Fprintf(&buf, "%s, ", col) } - fmt.Fprintf(buf, "%s) values(", lkp.To) + fmt.Fprintf(&buf, "%s) values(", lkp.To) bindVars := make(map[string]*querypb.BindVariable, 2*len(trimmedRowsCols)) for rowIdx := range trimmedToValues { @@ -340,15 +339,15 @@ nextRow: } if lkp.Upsert { - fmt.Fprintf(buf, " on duplicate key update ") + fmt.Fprintf(&buf, " on duplicate key update ") for _, col := range lkp.FromColumns { - fmt.Fprintf(buf, "%s=values(%s), ", col, col) + fmt.Fprintf(&buf, "%s=values(%s), ", col, col) } - fmt.Fprintf(buf, "%s=values(%s)", lkp.To, lkp.To) + fmt.Fprintf(&buf, "%s=values(%s)", lkp.To, lkp.To) } if _, err := vcursor.Execute(ctx, "VindexCreate", buf.String(), bindVars, true /* rollbackOnError */, co); err != nil { - return fmt.Errorf("lookup.Create: %v", err) + return vterrors.Wrap(err, "lookup.Create") } return nil } @@ -356,7 +355,7 @@ nextRow: // Delete deletes the association between ids and value. // rowsColValues contains all the rows that are being deleted. // For each row, we store the value of each column defined in the vindex. -// value cointains the keyspace_id of the vindex entry being deleted. +// value contains the keyspace_id of the vindex entry being deleted. // // Given the following information in a vindex table with two columns: // @@ -380,7 +379,7 @@ func (lkp *lookupInternal) Delete(ctx context.Context, vcursor VCursor, rowsColV // We only need to check the first row. Number of cols per row // is guaranteed by the engine to be uniform. if len(rowsColValues[0]) != len(lkp.FromColumns) { - return fmt.Errorf("lookup.Delete: column vindex count does not match the columns in the lookup: %d vs %v", len(rowsColValues[0]), lkp.FromColumns) + return vterrors.VT03030(lkp.FromColumns, len(rowsColValues[0])) } for _, column := range rowsColValues { bindVars := make(map[string]*querypb.BindVariable, len(rowsColValues)) @@ -390,7 +389,7 @@ func (lkp *lookupInternal) Delete(ctx context.Context, vcursor VCursor, rowsColV bindVars[lkp.To] = sqltypes.ValueBindVariable(value) _, err := vcursor.Execute(ctx, "VindexDelete", lkp.del, bindVars, true /* rollbackOnError */, co) if err != nil { - return fmt.Errorf("lookup.Delete: %v", err) + return vterrors.Wrap(err, "lookup.Delete") } } return nil @@ -405,7 +404,7 @@ func (lkp *lookupInternal) Update(ctx context.Context, vcursor VCursor, oldValue } func (lkp *lookupInternal) initDelStmt() string { - var delBuffer bytes.Buffer + var delBuffer strings.Builder fmt.Fprintf(&delBuffer, "delete from %s where ", lkp.Table) for colIdx, column := range lkp.FromColumns { if colIdx != 0 { diff --git a/go/vt/vtgate/vindexes/lookup_test.go b/go/vt/vtgate/vindexes/lookup_test.go index a59fcbf1da9..8041a395a8e 100644 --- a/go/vt/vtgate/vindexes/lookup_test.go +++ b/go/vt/vtgate/vindexes/lookup_test.go @@ -35,6 +35,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" ) @@ -115,7 +116,11 @@ func (vc *vcursor) execute(query string, bindvars map[string]*querypb.BindVariab } func (vc *vcursor) ConnCollation() collations.ID { - return collations.Default() + return vc.Environment().CollationEnv().DefaultConnectionCharset() +} + +func (vc *vcursor) Environment() *vtenv.Environment { + return vtenv.NewTestEnv() } func lookupCreateVindexTestCase( @@ -369,7 +374,7 @@ func TestLookupNonUniqueNew(t *testing.T) { func TestLookupNilVCursor(t *testing.T) { lnu := createLookup(t, "lookup", false /* writeOnly */) _, err := lnu.Map(context.Background(), nil, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) - require.EqualError(t, err, "cannot perform lookup: no vcursor provided") + require.EqualError(t, err, "VT13001: [BUG] cannot perform lookup: no vcursor provided") } func TestLookupNonUniqueMap(t *testing.T) { @@ -620,7 +625,7 @@ func TestLookupNonUniqueCreate(t *testing.T) { // With ignore_nulls off err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(2)}, {sqltypes.NULL}}, [][]byte{[]byte("test2"), []byte("test1")}, true /* ignoreMode */) - assert.EqualError(t, err, "lookup.Create: input has null values: row: 1, col: 0") + assert.EqualError(t, err, "VT03028: Column 'fromc' cannot be null on row 1, col 0") // With ignore_nulls on vc.queries = nil @@ -644,7 +649,7 @@ func TestLookupNonUniqueCreate(t *testing.T) { // Test column mismatch. err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) - assert.EqualError(t, err, "lookup.Create: column vindex count does not match the columns in the lookup: 2 vs [fromc]") + assert.EqualError(t, err, "VT03030: lookup column count does not match value count with the row (columns, count): ([fromc], 2)") } func TestLookupNonUniqueCreateAutocommit(t *testing.T) { @@ -710,7 +715,7 @@ func TestLookupNonUniqueDelete(t *testing.T) { // Test column count fail. err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) - assert.EqualError(t, err, "lookup.Delete: column vindex count does not match the columns in the lookup: 2 vs [fromc]") + assert.EqualError(t, err, "VT03030: lookup column count does not match value count with the row (columns, count): ([fromc], 2)") } func TestLookupNonUniqueDeleteAutocommit(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go index 070aee90305..f7af93187da 100644 --- a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go +++ b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash.go @@ -56,7 +56,7 @@ func init() { // LookupUnicodeLooseMD5Hash defines a vindex that uses a lookup table. // The table is expected to define the id column as unique. It's // NonUnique and a Lookup and stores the from value in a hashed form. -// Warning: This Vindex is being depcreated in favor of Lookup +// Warning: This Vindex is being deprecated in favor of Lookup type LookupUnicodeLooseMD5Hash struct { name string writeOnly bool @@ -246,7 +246,7 @@ func (lh *LookupUnicodeLooseMD5Hash) UnknownParams() []string { // LookupUnicodeLooseMD5HashUnique defines a vindex that uses a lookup table. // The table is expected to define the id column as unique. It's // Unique and a Lookup and will store the from value in a hashed format. -// Warning: This Vindex is being depcreated in favor of LookupUnique +// Warning: This Vindex is being deprecated in favor of LookupUnique type LookupUnicodeLooseMD5HashUnique struct { name string writeOnly bool diff --git a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go index 989458ccc13..c0e4611d684 100644 --- a/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_unicodeloosemd5_hash_test.go @@ -320,10 +320,7 @@ func TestLookupUnicodeLooseMD5HashCreate(t *testing.T) { // Test column mismatch. err = lnu.(Lookup).Create(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(10), sqltypes.NewInt64(20)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false) - want = "lookup.Create: column vindex count does not match the columns in the lookup: 2 vs [fromc]" - if err == nil || err.Error() != want { - t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) - } + require.ErrorContains(t, err, "VT03030: lookup column count does not match value count with the row (columns, count): ([fromc], 2)") } func TestLookupUnicodeLooseMD5HashCreateAutocommit(t *testing.T) { @@ -443,10 +440,7 @@ func TestLookupUnicodeLooseMD5HashDelete(t *testing.T) { // Test column count fail. err = lnu.(Lookup).Delete(context.Background(), vc, [][]sqltypes.Value{{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}}, []byte("\x16k@\xb4J\xbaK\xd6")) - want = "lookup.Delete: column vindex count does not match the columns in the lookup: 2 vs [fromc]" - if err == nil || err.Error() != want { - t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) - } + require.ErrorContains(t, err, "VT03030: lookup column count does not match value count with the row (columns, count): ([fromc], 2)") } func TestLookupUnicodeLooseMD5HashDeleteAutocommit(t *testing.T) { diff --git a/go/vt/vtgate/vindexes/vindex.go b/go/vt/vtgate/vindexes/vindex.go index a5295681248..e3d5a6d7e4d 100644 --- a/go/vt/vtgate/vindexes/vindex.go +++ b/go/vt/vtgate/vindexes/vindex.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" @@ -44,6 +45,7 @@ type ( InTransactionAndIsDML() bool LookupRowLockShardSession() vtgatepb.CommitOrder ConnCollation() collations.ID + Environment() *vtenv.Environment } // Vindex defines the interface required to register a vindex. diff --git a/go/vt/vtgate/vindexes/vschema.go b/go/vt/vtgate/vindexes/vschema.go index 4e9f527eb83..020b07f7073 100644 --- a/go/vt/vtgate/vindexes/vschema.go +++ b/go/vt/vtgate/vindexes/vschema.go @@ -25,17 +25,19 @@ import ( "strings" "time" - "vitess.io/vitess/go/sqlescape" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/ptr" "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/sqlparser" - querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) // TabletTypeSuffix maps the tablet type to its suffix string. @@ -53,6 +55,7 @@ var TabletTypeSuffix = map[topodatapb.TabletType]string{ // The following constants represent table types. const ( + TypeTable = "" TypeSequence = "sequence" TypeReference = "reference" ) @@ -66,10 +69,11 @@ type VSchema struct { // table is uniquely named, the value will be the qualified Table object // with the keyspace where this table exists. If multiple keyspaces have a // table with the same name, the value will be a `nil`. - globalTables map[string]*Table - uniqueVindexes map[string]Vindex - Keyspaces map[string]*KeyspaceSchema `json:"keyspaces"` - ShardRoutingRules map[string]string `json:"shard_routing_rules"` + globalTables map[string]*Table + uniqueVindexes map[string]Vindex + Keyspaces map[string]*KeyspaceSchema `json:"keyspaces"` + ShardRoutingRules map[string]string `json:"shard_routing_rules"` + KeyspaceRoutingRules map[string]string `json:"keyspace_routing_rules"` // created is the time when the VSchema object was created. Used to detect if a cached // copy of the vschema is stale. created time.Time @@ -118,6 +122,12 @@ type Table struct { ChildForeignKeys []ChildFKInfo `json:"child_foreign_keys,omitempty"` ParentForeignKeys []ParentFKInfo `json:"parent_foreign_keys,omitempty"` + + // index can be columns or expression. + // For Primary key, functional indexes are not allowed, therefore it will only be columns. + // MySQL error message: ERROR 3756 (HY000): The primary key cannot be a functional index + PrimaryKey sqlparser.Columns `json:"primary_key,omitempty"` + UniqueKeys []sqlparser.Exprs `json:"unique_keys,omitempty"` } // GetTableName gets the sqlparser.TableName for the vindex Table. @@ -148,6 +158,7 @@ type ColumnVindex struct { type TableInfo struct { Columns []Column ForeignKeys []*sqlparser.ForeignKeyDefinition + Indexes []*sqlparser.IndexDefinition } // IsUnique is used to tell whether the ColumnVindex @@ -178,39 +189,75 @@ type Column struct { Name sqlparser.IdentifierCI `json:"name"` Type querypb.Type `json:"type"` CollationName string `json:"collation_name"` + Default sqlparser.Expr `json:"default,omitempty"` // Invisible marks this as a column that will not be automatically included in `*` projections - Invisible bool `json:"invisible"` + Invisible bool `json:"invisible,omitempty"` + Size int32 `json:"size,omitempty"` + Scale int32 `json:"scale,omitempty"` + Nullable bool `json:"nullable,omitempty"` + // Values contains the list of values for enum and set types. + Values []string `json:"values,omitempty"` } // MarshalJSON returns a JSON representation of Column. func (col *Column) MarshalJSON() ([]byte, error) { - return json.Marshal(struct { - Name string `json:"name"` - Type string `json:"type,omitempty"` + cj := struct { + Name string `json:"name"` + Type string `json:"type,omitempty"` + Invisible bool `json:"invisible,omitempty"` + Default string `json:"default,omitempty"` + Size int32 `json:"size,omitempty"` + Scale int32 `json:"scale,omitempty"` + Nullable bool `json:"nullable,omitempty"` + Values []string `json:"values,omitempty"` }{ - Name: col.Name.String(), - Type: querypb.Type_name[int32(col.Type)], - }) + Name: col.Name.String(), + Type: querypb.Type_name[int32(col.Type)], + Invisible: col.Invisible, + Size: col.Size, + Scale: col.Scale, + Nullable: col.Nullable, + Values: col.Values, + } + if col.Default != nil { + cj.Default = sqlparser.String(col.Default) + } + return json.Marshal(cj) +} + +func (col *Column) ToEvalengineType(collationEnv *collations.Environment) evalengine.Type { + var collation collations.ID + if sqltypes.IsText(col.Type) { + collation, _ = collationEnv.LookupID(col.CollationName) + } else { + collation = collations.CollationForType(col.Type, collationEnv.DefaultConnectionCharset()) + } + return evalengine.NewTypeEx(col.Type, collation, col.Nullable, col.Size, col.Scale, ptr.Of(evalengine.EnumSetValues(col.Values))) } // KeyspaceSchema contains the schema(table) for a keyspace. type KeyspaceSchema struct { - Keyspace *Keyspace - ForeignKeyMode vschemapb.Keyspace_ForeignKeyMode - Tables map[string]*Table - Vindexes map[string]Vindex - Views map[string]sqlparser.SelectStatement - Error error + Keyspace *Keyspace + ForeignKeyMode vschemapb.Keyspace_ForeignKeyMode + Tables map[string]*Table + Vindexes map[string]Vindex + Views map[string]sqlparser.SelectStatement + Error error + MultiTenantSpec *vschemapb.MultiTenantSpec + + // These are the UDFs that exist in the schema and are aggregations + AggregateUDFs []string } type ksJSON struct { - Sharded bool `json:"sharded,omitempty"` - ForeignKeyMode string `json:"foreignKeyMode,omitempty"` - Tables map[string]*Table `json:"tables,omitempty"` - Vindexes map[string]Vindex `json:"vindexes,omitempty"` - Views map[string]string `json:"views,omitempty"` - Error string `json:"error,omitempty"` + Sharded bool `json:"sharded,omitempty"` + ForeignKeyMode string `json:"foreignKeyMode,omitempty"` + Tables map[string]*Table `json:"tables,omitempty"` + Vindexes map[string]Vindex `json:"vindexes,omitempty"` + Views map[string]string `json:"views,omitempty"` + Error string `json:"error,omitempty"` + MultiTenantSpec *vschemapb.MultiTenantSpec `json:"multi_tenant_spec,omitempty"` } // findTable looks for the table with the requested tablename in the keyspace. @@ -239,10 +286,11 @@ func (ks *KeyspaceSchema) findTable( // MarshalJSON returns a JSON representation of KeyspaceSchema. func (ks *KeyspaceSchema) MarshalJSON() ([]byte, error) { ksJ := ksJSON{ - Sharded: ks.Keyspace.Sharded, - Tables: ks.Tables, - ForeignKeyMode: ks.ForeignKeyMode.String(), - Vindexes: ks.Vindexes, + Sharded: ks.Keyspace.Sharded, + Tables: ks.Tables, + ForeignKeyMode: ks.ForeignKeyMode.String(), + Vindexes: ks.Vindexes, + MultiTenantSpec: ks.MultiTenantSpec, } if ks.Error != nil { ksJ.Error = ks.Error.Error() @@ -274,7 +322,7 @@ func (source *Source) String() string { } // BuildVSchema builds a VSchema from a SrvVSchema. -func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { +func BuildVSchema(source *vschemapb.SrvVSchema, parser *sqlparser.Parser) (vschema *VSchema) { vschema = &VSchema{ RoutingRules: make(map[string]*RoutingRule), globalTables: make(map[string]*Table), @@ -282,22 +330,23 @@ func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { Keyspaces: make(map[string]*KeyspaceSchema), created: time.Now(), } - buildKeyspaces(source, vschema) + buildKeyspaces(source, vschema, parser) // buildGlobalTables before buildReferences so that buildReferences can // resolve sources which reference global tables. buildGlobalTables(source, vschema) buildReferences(source, vschema) - buildRoutingRule(source, vschema) + buildRoutingRule(source, vschema, parser) buildShardRoutingRule(source, vschema) + buildKeyspaceRoutingRule(source, vschema) // Resolve auto-increments after routing rules are built since sequence tables also obey routing rules. - resolveAutoIncrement(source, vschema) + resolveAutoIncrement(source, vschema, parser) return vschema } // BuildKeyspaceSchema builds the vschema portion for one keyspace. // The build ignores sequence references because those dependencies can // go cross-keyspace. -func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string) (*KeyspaceSchema, error) { +func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string, parser *sqlparser.Parser) (*KeyspaceSchema, error) { if input == nil { input = &vschemapb.Keyspace{} } @@ -311,30 +360,31 @@ func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string) (*KeyspaceS uniqueVindexes: make(map[string]Vindex), Keyspaces: make(map[string]*KeyspaceSchema), } - buildKeyspaces(formal, vschema) + buildKeyspaces(formal, vschema, parser) err := vschema.Keyspaces[keyspace].Error return vschema.Keyspaces[keyspace], err } // BuildKeyspace ensures that the keyspace vschema is valid. // External references (like sequence) are not validated. -func BuildKeyspace(input *vschemapb.Keyspace) (*KeyspaceSchema, error) { - return BuildKeyspaceSchema(input, "") +func BuildKeyspace(input *vschemapb.Keyspace, parser *sqlparser.Parser) (*KeyspaceSchema, error) { + return BuildKeyspaceSchema(input, "", parser) } -func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { +func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { for ksname, ks := range source.Keyspaces { ksvschema := &KeyspaceSchema{ Keyspace: &Keyspace{ Name: ksname, Sharded: ks.Sharded, }, - ForeignKeyMode: replaceUnspecifiedForeignKeyMode(ks.ForeignKeyMode), - Tables: make(map[string]*Table), - Vindexes: make(map[string]Vindex), + ForeignKeyMode: replaceUnspecifiedForeignKeyMode(ks.ForeignKeyMode), + Tables: make(map[string]*Table), + Vindexes: make(map[string]Vindex), + MultiTenantSpec: ks.MultiTenantSpec, } vschema.Keyspaces[ksname] = ksvschema - ksvschema.Error = buildTables(ks, vschema, ksvschema) + ksvschema.Error = buildTables(ks, vschema, ksvschema, parser) } } @@ -346,12 +396,14 @@ func replaceUnspecifiedForeignKeyMode(fkMode vschemapb.Keyspace_ForeignKeyMode) return fkMode } -func (vschema *VSchema) AddView(ksname string, viewName, query string) error { +// AddView adds a view to an existing keyspace in the VSchema. +// It's only used from tests. +func (vschema *VSchema) AddView(ksname, viewName, query string, parser *sqlparser.Parser) error { ks, ok := vschema.Keyspaces[ksname] if !ok { return fmt.Errorf("keyspace %s not found in vschema", ksname) } - ast, err := sqlparser.Parse(query) + ast, err := parser.Parse(query) if err != nil { return err } @@ -373,6 +425,18 @@ func (vschema *VSchema) AddView(ksname string, viewName, query string) error { return nil } +// AddUDF adds a UDF to an existing keyspace in the VSchema. +// It's only used from tests. +func (vschema *VSchema) AddUDF(ksname, udfName string) error { + ks, ok := vschema.Keyspaces[ksname] + if !ok { + return fmt.Errorf("keyspace %s not found in vschema", ksname) + } + + ks.AggregateUDFs = append(ks.AggregateUDFs, udfName) + return nil +} + func buildGlobalTables(source *vschemapb.SrvVSchema, vschema *VSchema) { for ksname, ks := range source.Keyspaces { ksvschema := vschema.Keyspaces[ksname] @@ -520,7 +584,7 @@ func buildKeyspaceReferences(vschema *VSchema, ksvschema *KeyspaceSchema) error return nil } -func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSchema) error { +func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSchema, parser *sqlparser.Parser) error { keyspace := ksvschema.Keyspace for vname, vindexInfo := range ks.Vindexes { vindex, err := CreateVindex(vindexInfo.Type, vname, vindexInfo.Params) @@ -612,8 +676,31 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc tname, ) } + var colDefault sqlparser.Expr + if col.Default != "" { + var err error + colDefault, err = parser.ParseExpr(col.Default) + if err != nil { + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, + "could not parse the '%s' column's default expression '%s' for table '%s'", col.Name, col.Default, tname) + } + } + nullable := true + if col.Nullable != nil { + nullable = *col.Nullable + } colNames[name.Lowered()] = true - t.Columns = append(t.Columns, Column{Name: name, Type: col.Type, Invisible: col.Invisible}) + t.Columns = append(t.Columns, Column{ + Name: name, + Type: col.Type, + CollationName: col.CollationName, + Default: colDefault, + Invisible: col.Invisible, + Size: col.Size, + Scale: col.Scale, + Nullable: nullable, + Values: col.Values, + }) } // Initialize ColumnVindexes. @@ -751,7 +838,7 @@ func (vschema *VSchema) addTableName(t *Table) { } } -func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { +func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { for ksname, ks := range source.Keyspaces { ksvschema := vschema.Keyspaces[ksname] for tname, table := range ks.Tables { @@ -759,7 +846,7 @@ func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { if t == nil || table.AutoIncrement == nil { continue } - seqks, seqtab, err := sqlparser.ParseTable(table.AutoIncrement.Sequence) + seqks, seqtab, err := parser.ParseTable(table.AutoIncrement.Sequence) var seq *Table if err == nil { // Ensure that sequence tables also obey routing rules. @@ -795,10 +882,16 @@ func escapeQualifiedTable(qualifiedTableName string) (string, error) { if err != nil { return "", err } - return fmt.Sprintf("%s.%s", - // unescape() first in case an already escaped string was passed - sqlescape.EscapeID(sqlescape.UnescapeID(keyspace)), - sqlescape.EscapeID(sqlescape.UnescapeID(tableName))), nil + // unescape() first in case an already escaped string was passed + keyspace, err = sqlescape.EnsureEscaped(keyspace) + if err != nil { + return "", err + } + tableName, err = sqlescape.EnsureEscaped(tableName) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", keyspace, tableName), nil } func extractTableParts(tableName string, allowUnqualified bool) (string, string, error) { @@ -835,7 +928,7 @@ func parseTable(tableName string) (sqlparser.TableName, error) { }, nil } -func buildRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema) { +func buildRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { var err error if source.RoutingRules == nil { return @@ -878,7 +971,7 @@ outer: continue outer } - toKeyspace, toTableName, err := sqlparser.ParseTable(toTable) + toKeyspace, toTableName, err := parser.ParseTable(toTable) if err != nil { vschema.RoutingRules[rule.FromTable] = &RoutingRule{ @@ -919,6 +1012,19 @@ func buildShardRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema) { } } +func buildKeyspaceRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema) { + vschema.KeyspaceRoutingRules = nil + sourceRules := source.GetKeyspaceRoutingRules().GetRules() + if len(sourceRules) == 0 { + return + } + rulesMap := make(map[string]string, len(sourceRules)) + for _, rr := range sourceRules { + rulesMap[rr.FromKeyspace] = rr.ToKeyspace + } + vschema.KeyspaceRoutingRules = rulesMap +} + // FindTable returns a pointer to the Table. If a keyspace is specified, only tables // from that keyspace are searched. If the specified keyspace is unsharded // and no tables matched, it's considered valid: FindTable will construct a table @@ -1029,8 +1135,33 @@ func (vschema *VSchema) FirstKeyspace() *Keyspace { return ks.Keyspace } +// findRoutedKeyspace checks if there is a keyspace routing rule for the given keyspace and tablet type. +func (vschema *VSchema) findRoutedKeyspace(keyspace string, tabletType topodatapb.TabletType) string { + if len(vschema.KeyspaceRoutingRules) == 0 { + return keyspace + } + tabletTypeSuffix := TabletTypeSuffix[tabletType] + if tabletTypeSuffix == "@primary" { + tabletTypeSuffix = "" + } + routedKeyspace, ok := vschema.KeyspaceRoutingRules[keyspace+tabletTypeSuffix] + if ok { + return routedKeyspace + } else { + if tabletTypeSuffix != "" { + // if it was @replica or @rdonly and had no route, default to the route for @primary + routedKeyspace, ok = vschema.KeyspaceRoutingRules[keyspace] + if ok { + return routedKeyspace + } + } + } + return keyspace +} + // FindRoutedTable finds a table checking the routing rules. func (vschema *VSchema) FindRoutedTable(keyspace, tablename string, tabletType topodatapb.TabletType) (*Table, error) { + keyspace = vschema.findRoutedKeyspace(keyspace, tabletType) qualified := tablename if keyspace != "" { qualified = keyspace + "." + tablename @@ -1180,6 +1311,20 @@ func (vschema *VSchema) ResetCreated() { vschema.created = time.Time{} } +func (vschema *VSchema) GetAggregateUDFs() (udfs []string) { + seen := make(map[string]bool) + for _, ks := range vschema.Keyspaces { + for _, udf := range ks.AggregateUDFs { + if seen[udf] { + continue + } + seen[udf] = true + udfs = append(udfs, udf) + } + } + return +} + // ByCost provides the interface needed for ColumnVindexes to // be sorted by cost order. type ByCost []*ColumnVindex diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index a59ec78139d..40cba720a0c 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -235,7 +235,7 @@ func init() { } func buildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { - vs := BuildVSchema(source) + vs := BuildVSchema(source, sqlparser.NewTestParser()) if vs != nil { vs.ResetCreated() } @@ -247,10 +247,58 @@ func TestUnshardedVSchemaValid(t *testing.T) { Sharded: false, Vindexes: make(map[string]*vschemapb.Vindex), Tables: make(map[string]*vschemapb.Table), - }) + }, sqlparser.NewTestParser()) require.NoError(t, err) } +// TestMultiTenantAttribute verifies that the MultiTenantSpec attribute is updated in KeyspaceSchema. +func TestMultiTenantAttribute(t *testing.T) { + tests := []struct { + name string + multiTenantSpec *vschemapb.MultiTenantSpec + wanted *vschemapb.MultiTenantSpec + }{ + { + name: "Not Set", + }, { + name: "Empty MultiTenantSpec", + multiTenantSpec: &vschemapb.MultiTenantSpec{ + TenantIdColumnName: "", + }, + wanted: &vschemapb.MultiTenantSpec{}, + }, { + name: "String", + multiTenantSpec: &vschemapb.MultiTenantSpec{ + TenantIdColumnName: "tenant_id", + TenantIdColumnType: querypb.Type_VARCHAR, + }, + wanted: &vschemapb.MultiTenantSpec{ + TenantIdColumnName: "tenant_id", + TenantIdColumnType: querypb.Type_VARCHAR, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ksSchema, err := BuildKeyspace(&vschemapb.Keyspace{ + Sharded: false, + MultiTenantSpec: test.multiTenantSpec, + Vindexes: make(map[string]*vschemapb.Vindex), + Tables: make(map[string]*vschemapb.Table), + }, sqlparser.NewTestParser()) + require.NoError(t, err) + if test.multiTenantSpec == nil { + require.Empty(t, test.wanted) + } else { + require.NotNil(t, test.wanted) + require.Equal(t, test.wanted.TenantIdColumnName, ksSchema.MultiTenantSpec.TenantIdColumnName) + require.Equal(t, test.wanted.TenantIdColumnType, ksSchema.MultiTenantSpec.TenantIdColumnType) + } + + }) + } +} + func TestForeignKeyMode(t *testing.T) { tests := []struct { name string @@ -282,7 +330,7 @@ func TestForeignKeyMode(t *testing.T) { ForeignKeyMode: test.fkMode, Vindexes: make(map[string]*vschemapb.Vindex), Tables: make(map[string]*vschemapb.Table), - }) + }, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, test.wantedFkMode, ksSchema.ForeignKeyMode) }) @@ -297,7 +345,7 @@ func TestUnshardedVSchema(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": {}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, got.Keyspaces["unsharded"].Error) table, err := got.FindTable("unsharded", "t1") @@ -315,18 +363,22 @@ func TestVSchemaColumns(t *testing.T) { "unsharded": { Tables: map[string]*vschemapb.Table{ "t1": { - Columns: []*vschemapb.Column{{ - Name: "c1"}, { - Name: "c2", - Type: sqltypes.VarChar}}}}}}} - - got := BuildVSchema(&good) + Columns: []*vschemapb.Column{ + {Name: "c1"}, + {Name: "c2", Type: sqltypes.VarChar}, + {Name: "c3", Type: sqltypes.VarChar, Default: "''"}, + {Name: "c4", Type: sqltypes.TypeJSON, Default: "json_array()"}, + }}}}}} + + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, got.Keyspaces["unsharded"].Error) t1, err := got.FindTable("unsharded", "t1") require.NoError(t, err) assertColumn(t, t1.Columns[0], "c1", sqltypes.Null) assertColumn(t, t1.Columns[1], "c2", sqltypes.VarChar) + assertColumnWithDefault(t, t1.Columns[2], "c3", sqltypes.VarChar, sqlparser.NewStrLiteral("")) + assertColumnWithDefault(t, t1.Columns[3], "c4", sqltypes.TypeJSON, &sqlparser.JSONArrayExpr{}) } func TestVSchemaViews(t *testing.T) { @@ -348,11 +400,11 @@ func TestVSchemaViews(t *testing.T) { }, { Name: "c2", Type: sqltypes.VarChar}}}}}}} - vschema := BuildVSchema(&good) + vschema := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["unsharded"].Error) // add view to unsharded keyspace. - vschema.AddView("unsharded", "v1", "SELECT c1+c2 AS added FROM t1") + vschema.AddView("unsharded", "v1", "SELECT c1+c2 AS added FROM t1", sqlparser.NewTestParser()) view := vschema.FindView("unsharded", "v1") assert.Equal(t, "select c1 + c2 as added from t1", sqlparser.String(view)) @@ -372,10 +424,12 @@ func TestVSchemaViews(t *testing.T) { "columns": [ { "name": "c1", + "nullable": true, "type": "NULL_TYPE" }, { "name": "c2", + "nullable": true, "type": "VARCHAR" } ] @@ -388,6 +442,51 @@ func TestVSchemaViews(t *testing.T) { require.JSONEq(t, want, got) } +func TestColumnMarshal(t *testing.T) { + tests := []struct { + name string + col Column + wanted string + }{ + { + name: "Decimal column", + col: Column{ + Name: sqlparser.NewIdentifierCI("col1"), + Type: sqltypes.Decimal, + Size: 15, + Scale: 2, + }, + wanted: `{"name":"col1", "scale":2, "size":15, "type":"DECIMAL"}`, + }, + { + name: "Decimal column with no scale", + col: Column{ + Name: sqlparser.NewIdentifierCI("col1"), + Type: sqltypes.Decimal, + Size: 15, + Scale: 0, + }, + wanted: `{"name":"col1", "size":15, "type":"DECIMAL"}`, + }, + { + name: "enum with values column", + col: Column{ + Name: sqlparser.NewIdentifierCI("col1"), + Type: sqltypes.Enum, + Values: []string{"{A", "B\"", "C"}, + }, + wanted: `{"name":"col1","type":"ENUM","values":["{A","B\"","C"]}`, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + res, err := test.col.MarshalJSON() + require.NoError(t, err) + require.JSONEq(t, test.wanted, string(res), string(res)) + }) + } +} + func TestVSchemaForeignKeys(t *testing.T) { good := vschemapb.SrvVSchema{ Keyspaces: map[string]*vschemapb.Keyspace{ @@ -407,10 +506,10 @@ func TestVSchemaForeignKeys(t *testing.T) { }, { Name: "c2", Type: sqltypes.VarChar}}}}}}} - vschema := BuildVSchema(&good) + vschema := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["main"].Error) - // add fk containst a keyspace. + // add fk constraints to a keyspace. vschema.AddForeignKey("main", "t1", &sqlparser.ForeignKeyDefinition{ Source: sqlparser.Columns{sqlparser.NewIdentifierCI("c2")}, ReferenceDefinition: &sqlparser.ReferenceDefinition{ @@ -430,10 +529,12 @@ func TestVSchemaForeignKeys(t *testing.T) { "columns": [ { "name": "c1", + "nullable": true, "type": "NULL_TYPE" }, { "name": "c2", + "nullable": true, "type": "VARCHAR" } ], @@ -470,7 +571,7 @@ func TestVSchemaColumnListAuthoritative(t *testing.T) { Type: sqltypes.VarChar}}, ColumnListAuthoritative: true}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) t1, err := got.FindTable("unsharded", "t1") require.NoError(t, err) @@ -489,7 +590,7 @@ func TestVSchemaColumnsFail(t *testing.T) { Name: "c1"}, { Name: "c1"}}}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.EqualError(t, got.Keyspaces["unsharded"].Error, "duplicate column name 'c1' for table: t1") } @@ -502,7 +603,7 @@ func TestVSchemaPinned(t *testing.T) { "t1": { Pinned: "80"}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) @@ -534,7 +635,7 @@ func TestShardedVSchemaOwned(t *testing.T) { Column: "c2", Name: "stln1"}}}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) @@ -604,7 +705,7 @@ func TestShardedVSchemaOwnerInfo(t *testing.T) { }, }, } - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) results := []struct { @@ -706,7 +807,7 @@ func TestVSchemaRoutingRules(t *testing.T) { }, }, } - got := BuildVSchema(&input) + got := BuildVSchema(&input, sqlparser.NewTestParser()) ks1 := &Keyspace{ Name: "ks1", Sharded: true, @@ -954,7 +1055,7 @@ func TestFindBestColVindex(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t2": {}}}}} - vs := BuildVSchema(testSrvVSchema) + vs := BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { tablename string @@ -1270,7 +1371,7 @@ func TestBuildVSchemaVindexNotFoundFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `vindexType "noexist" not found` if err == nil || err.Error() != want { @@ -1294,7 +1395,7 @@ func TestBuildVSchemaNoColumnVindexFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "missing primary col vindex for table: t1" if err == nil || err.Error() != want { @@ -1579,7 +1680,7 @@ func TestBuildVSchemaNoindexFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "vindex notexist not found for table t1" if err == nil || err.Error() != want { @@ -1611,7 +1712,7 @@ func TestBuildVSchemaColumnAndColumnsFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `can't use column and columns at the same time in vindex (stfu) and table (t1)` if err == nil || err.Error() != want { @@ -1641,7 +1742,7 @@ func TestBuildVSchemaNoColumnsFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `must specify at least one column for vindex (stfu) and table (t1)` if err == nil || err.Error() != want { @@ -1672,7 +1773,7 @@ func TestBuildVSchemaNotUniqueFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "primary vindex stln is not Unique for table t1" if err == nil || err.Error() != want { @@ -1704,7 +1805,7 @@ func TestBuildVSchemaPrimaryCannotBeOwned(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "primary vindex stlu cannot be owned for table t1" if err == nil || err.Error() != want { @@ -1732,7 +1833,7 @@ func TestBuildVSchemaReferenceTableSourceMayBeUnqualified(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["unsharded"].Error) require.NoError(t, vschema.Keyspaces["sharded"].Error) } @@ -1764,7 +1865,7 @@ func TestBuildVSchemaReferenceTableSourceMustBeInDifferentKeyspace(t *testing.T) }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"sharded.src\" may not reference a table in the same keyspace as table: ref") @@ -1784,7 +1885,7 @@ func TestBuildVSchemaReferenceTableSourceKeyspaceMustExist(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" references a non-existent keyspace \"unsharded\"") @@ -1810,7 +1911,7 @@ func TestBuildVSchemaReferenceTableSourceTableMustExist(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" references a table \"src\" that is not present in the VSchema of keyspace \"unsharded\"") @@ -1848,7 +1949,7 @@ func TestBuildVSchemaReferenceTableSourceMayUseShardedKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["sharded1"].Error) require.NoError(t, vschema.Keyspaces["sharded2"].Error) } @@ -1915,7 +2016,7 @@ func TestBuildVSchemaReferenceTableSourceTableMustBeBasicOrReferenceWithoutSourc }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded1"].Error) require.EqualError(t, vschema.Keyspaces["sharded1"].Error, "source \"unsharded1.src1\" may not reference a table of type \"sequence\": ref1") @@ -1949,7 +2050,7 @@ func TestBuildVSchemaSourceMayBeReferencedAtMostOncePerKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" may not be referenced more than once per keyspace: ref1, ref2") @@ -1987,7 +2088,7 @@ func TestBuildVSchemaMayNotChainReferences(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["unsharded1"].Error) require.EqualError(t, vschema.Keyspaces["unsharded1"].Error, "reference chaining is not allowed ref => unsharded2.ref => unsharded3.ref: ref") @@ -2189,7 +2290,7 @@ func TestBadSequence(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "cannot resolve sequence invalid_seq: table invalid_seq not found" if err == nil || err.Error() != want { @@ -2237,7 +2338,7 @@ func TestBadSequenceName(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "invalid table name: a.b.seq" if err == nil || !strings.Contains(err.Error(), want) { @@ -2261,7 +2362,7 @@ func TestBadShardedSequence(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "sequence table has to be in an unsharded keyspace or must be pinned: t1" if err == nil || err.Error() != want { @@ -2312,7 +2413,7 @@ func TestFindTable(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vschema.FindTable("", "t1") require.EqualError(t, err, "ambiguous table reference: t1") @@ -2436,7 +2537,7 @@ func TestFindTableOrVindex(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) ta := vschema.Keyspaces["ksa"].Tables["ta"] t1 := vschema.Keyspaces["ksb"].Tables["t1"] @@ -2539,7 +2640,7 @@ func TestBuildKeyspaceSchema(t *testing.T) { "t2": {}, }, } - got, _ := BuildKeyspaceSchema(good, "ks") + got, _ := BuildKeyspaceSchema(good, "ks", sqlparser.NewTestParser()) err := got.Error require.NoError(t, err) ks := &Keyspace{ @@ -2581,7 +2682,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - _, err := BuildKeyspace(good) + _, err := BuildKeyspace(good, sqlparser.NewTestParser()) require.NoError(t, err) bad := &vschemapb.Keyspace{ Sharded: true, @@ -2594,7 +2695,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - _, err = BuildKeyspace(bad) + _, err = BuildKeyspace(bad, sqlparser.NewTestParser()) want := `vindexType "absent" not found` if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Validate: %v, must start with %s", err, want) @@ -2605,6 +2706,11 @@ func TestVSchemaPBJSON(t *testing.T) { in := ` { "sharded": true, + "foreignKeyMode": "unmanaged", + "multi_tenant_spec": { + "tenant_id_column_name": "tenant_id", + "tenant_id_column_type": 265 + }, "tables": { "t1": { "column_vindexes":[{ @@ -2633,7 +2739,12 @@ func TestVSchemaPBJSON(t *testing.T) { t.Error(err) } want := vschemapb.Keyspace{ - Sharded: true, + Sharded: true, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + MultiTenantSpec: &vschemapb.MultiTenantSpec{ + TenantIdColumnName: "tenant_id", + TenantIdColumnType: querypb.Type_INT64, + }, Tables: map[string]*vschemapb.Table{ "t1": { ColumnVindexes: []*vschemapb.ColumnVindex{ @@ -2678,6 +2789,10 @@ func TestVSchemaJSON(t *testing.T) { in := map[string]*KeyspaceSchema{ "unsharded": { ForeignKeyMode: vschemapb.Keyspace_managed, + MultiTenantSpec: &vschemapb.MultiTenantSpec{ + TenantIdColumnName: "tenant_id", + TenantIdColumnType: querypb.Type_INT64, + }, Keyspace: &Keyspace{ Name: "k1", }, @@ -2687,8 +2802,9 @@ func TestVSchemaJSON(t *testing.T) { Columns: []Column{{ Name: sqlparser.NewIdentifierCI("c1"), }, { - Name: sqlparser.NewIdentifierCI("c2"), - Type: sqltypes.VarChar, + Name: sqlparser.NewIdentifierCI("c2"), + Type: sqltypes.VarChar, + Invisible: true, }}, }, "t2": { @@ -2761,7 +2877,8 @@ func TestVSchemaJSON(t *testing.T) { }, { "name": "c2", - "type": "VARCHAR" + "type": "VARCHAR", + "invisible": true } ] }, @@ -2769,6 +2886,10 @@ func TestVSchemaJSON(t *testing.T) { "type": "sequence", "name": "n2" } + }, + "multi_tenant_spec": { + "tenant_id_column_name": "tenant_id", + "tenant_id_column_type": 265 } } }` @@ -2788,7 +2909,7 @@ func TestFindSingleKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) none := &Table{ Name: sqlparser.NewIdentifierCS("none"), Keyspace: &Keyspace{ @@ -2829,7 +2950,7 @@ func TestFindSingleKeyspace(t *testing.T) { }, }, } - vschema = BuildVSchema(&input) + vschema = BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vschema.FindTable("", "none") wantErr := "table none not found" if err == nil || err.Error() != wantErr { @@ -2863,7 +2984,7 @@ func TestMultiColVindexPartialAllowed(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) table, err := vschema.FindTable("ksa", "user_region") require.NoError(t, err) require.Len(t, table.ColumnVindexes, 2) @@ -2896,7 +3017,7 @@ func TestMultiColVindexPartialNotAllowed(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) table, err := vschema.FindTable("ksa", "multiColTbl") require.NoError(t, err) require.Len(t, table.ColumnVindexes, 1) @@ -2933,7 +3054,7 @@ func TestSourceTableHasReferencedBy(t *testing.T) { }, }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) ref1, err := vs.FindTable("sharded1", "ref") require.NoError(t, err) ref2, err := vs.FindTable("sharded2", "ref") @@ -2967,7 +3088,7 @@ func TestReferenceTableAndSourceAreGloballyRoutable(t *testing.T) { }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) t1, err := vs.FindTable("unsharded", "t1") require.NoError(t, err) // If the source of a reference table does not require explicit routing, @@ -2977,11 +3098,11 @@ func TestReferenceTableAndSourceAreGloballyRoutable(t *testing.T) { require.Equal(t, t1, globalT1) input.Keyspaces["unsharded"].RequireExplicitRouting = true - vs = BuildVSchema(&input) + vs = BuildVSchema(&input, sqlparser.NewTestParser()) _, err = vs.FindTable("sharded", "t1") require.NoError(t, err) // If the source of a reference table requires explicit routing, then - // neither the reference table nor its souce can be globally routed. + // neither the reference table nor its source can be globally routed. _, err = vs.FindTable("", "t1") require.Error(t, err) require.EqualError(t, err, "table t1 not found") @@ -3013,7 +3134,7 @@ func TestOtherTablesMakeReferenceTableAndSourceAmbiguous(t *testing.T) { }, }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vs.FindTable("", "t1") require.Error(t, err) } @@ -3114,7 +3235,7 @@ func TestFindTableWithSequences(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) notFoundError := func(table string) string { return fmt.Sprintf("table %s not found", table) @@ -3165,5 +3286,11 @@ func assertVindexMatches(t *testing.T, cv *ColumnVindex, v Vindex, name string, func assertColumn(t *testing.T, col Column, expectedName string, expectedType querypb.Type) { assert.True(t, col.Name.EqualString(expectedName), "column name does not match") assert.Equal(t, expectedType, col.Type, "column type does not match") +} +func assertColumnWithDefault(t *testing.T, col Column, expectedName string, expectedType querypb.Type, expDefault sqlparser.Expr) { + assertColumn(t, col, expectedName, expectedType) + if expDefault != nil { + assert.Equal(t, expDefault, col.Default, "column default does not match") + } } diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go index 7f2b7267dc0..dbac5589ce8 100644 --- a/go/vt/vtgate/vschema_manager.go +++ b/go/vt/vtgate/vschema_manager.go @@ -23,6 +23,7 @@ import ( "vitess.io/vitess/go/vt/graph" "vitess.io/vitess/go/vt/log" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" @@ -44,12 +45,14 @@ type VSchemaManager struct { cell string subscriber func(vschema *vindexes.VSchema, stats *VSchemaStats) schema SchemaInfo + parser *sqlparser.Parser } // SchemaInfo is an interface to schema tracker. type SchemaInfo interface { Tables(ks string) map[string]*vindexes.TableInfo Views(ks string) map[string]sqlparser.SelectStatement + UDFs(ks string) []string } // GetCurrentSrvVschema returns a copy of the latest SrvVschema from the @@ -71,7 +74,7 @@ func (vm *VSchemaManager) UpdateVSchema(ctx context.Context, ksName string, vsch ks := vschema.Keyspaces[ksName] - _, err = vindexes.BuildKeyspace(ks) + _, err = vindexes.BuildKeyspace(ks, vm.parser) if err != nil { return err } @@ -132,7 +135,7 @@ func (vm *VSchemaManager) VSchemaUpdate(v *vschemapb.SrvVSchema, err error) bool if v == nil { // We encountered an error, build an empty vschema. if vm.currentVschema == nil { - vschema = vindexes.BuildVSchema(&vschemapb.SrvVSchema{}) + vschema = vindexes.BuildVSchema(&vschemapb.SrvVSchema{}, vm.parser) } } else { vschema = vm.buildAndEnhanceVSchema(v) @@ -187,7 +190,7 @@ func (vm *VSchemaManager) Rebuild() { // buildAndEnhanceVSchema builds a new VSchema and uses information from the schema tracker to update it func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) *vindexes.VSchema { - vschema := vindexes.BuildVSchema(v) + vschema := vindexes.BuildVSchema(v, vm.parser) if vm.schema != nil { vm.updateFromSchema(vschema) // We mark the keyspaces that have foreign key management in Vitess and have cyclic foreign keys @@ -199,54 +202,75 @@ func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) *vinde func (vm *VSchemaManager) updateFromSchema(vschema *vindexes.VSchema) { for ksName, ks := range vschema.Keyspaces { - m := vm.schema.Tables(ksName) - // Before we add the foreign key definitions in the tables, we need to make sure that all the tables - // are created in the Vschema, so that later when we try to find the routed tables, we don't end up - // getting dummy tables. - for tblName, tblInfo := range m { - setColumns(ks, tblName, tblInfo.Columns) + vm.updateTableInfo(vschema, ks, ksName) + vm.updateViewInfo(ks, ksName) + vm.updateUDFsInfo(ks, ksName) + } +} + +func (vm *VSchemaManager) updateViewInfo(ks *vindexes.KeyspaceSchema, ksName string) { + views := vm.schema.Views(ksName) + if views != nil { + ks.Views = make(map[string]sqlparser.SelectStatement, len(views)) + for name, def := range views { + ks.Views[name] = sqlparser.CloneSelectStatement(def) } + } +} +func (vm *VSchemaManager) updateTableInfo(vschema *vindexes.VSchema, ks *vindexes.KeyspaceSchema, ksName string) { + m := vm.schema.Tables(ksName) + // Before we add the foreign key definitions in the tables, we need to make sure that all the tables + // are created in the Vschema, so that later when we try to find the routed tables, we don't end up + // getting dummy tables. + for tblName, tblInfo := range m { + setColumns(ks, tblName, tblInfo.Columns) + } - // Now that we have ensured that all the tables are created, we can start populating the foreign keys - // in the tables. - for tblName, tblInfo := range m { - for _, fkDef := range tblInfo.ForeignKeys { - parentTbl, err := vschema.FindRoutedTable(ksName, fkDef.ReferenceDefinition.ReferencedTable.Name.String(), topodatapb.TabletType_PRIMARY) - if err != nil { - log.Errorf("error finding parent table %s: %v", fkDef.ReferenceDefinition.ReferencedTable.Name.String(), err) - continue - } - childTbl, err := vschema.FindRoutedTable(ksName, tblName, topodatapb.TabletType_PRIMARY) - if err != nil { - log.Errorf("error finding child table %s: %v", tblName, err) - continue - } - childTbl.ParentForeignKeys = append(childTbl.ParentForeignKeys, vindexes.NewParentFkInfo(parentTbl, fkDef)) - parentTbl.ChildForeignKeys = append(parentTbl.ChildForeignKeys, vindexes.NewChildFkInfo(childTbl, fkDef)) + // Now that we have ensured that all the tables are created, we can start populating the foreign keys + // in the tables. + for tblName, tblInfo := range m { + rTbl, err := vschema.FindRoutedTable(ksName, tblName, topodatapb.TabletType_PRIMARY) + if err != nil { + log.Errorf("error finding routed table %s: %v", tblName, err) + continue + } + for _, fkDef := range tblInfo.ForeignKeys { + // Ignore internal tables as part of foreign key references. + if schema.IsInternalOperationTableName(fkDef.ReferenceDefinition.ReferencedTable.Name.String()) { + continue + } + parentTbl, err := vschema.FindRoutedTable(ksName, fkDef.ReferenceDefinition.ReferencedTable.Name.String(), topodatapb.TabletType_PRIMARY) + if err != nil { + log.Errorf("error finding parent table %s: %v", fkDef.ReferenceDefinition.ReferencedTable.Name.String(), err) + continue } + rTbl.ParentForeignKeys = append(rTbl.ParentForeignKeys, vindexes.NewParentFkInfo(parentTbl, fkDef)) + parentTbl.ChildForeignKeys = append(parentTbl.ChildForeignKeys, vindexes.NewChildFkInfo(rTbl, fkDef)) } - - views := vm.schema.Views(ksName) - if views != nil { - ks.Views = make(map[string]sqlparser.SelectStatement, len(views)) - for name, def := range views { - ks.Views[name] = sqlparser.CloneSelectStatement(def) + for _, idxDef := range tblInfo.Indexes { + switch idxDef.Info.Type { + case sqlparser.IndexTypePrimary: + for _, idxCol := range idxDef.Columns { + rTbl.PrimaryKey = append(rTbl.PrimaryKey, idxCol.Column) + } + case sqlparser.IndexTypeUnique: + var uniqueKey sqlparser.Exprs + for _, idxCol := range idxDef.Columns { + if idxCol.Expression == nil { + uniqueKey = append(uniqueKey, sqlparser.NewColName(idxCol.Column.String())) + } else { + uniqueKey = append(uniqueKey, idxCol.Expression) + } + } + rTbl.UniqueKeys = append(rTbl.UniqueKeys, uniqueKey) } } } } -type tableCol struct { - tableName sqlparser.TableName - colNames sqlparser.Columns -} - -var tableColHash = func(tc tableCol) string { - res := sqlparser.String(tc.tableName) - for _, colName := range tc.colNames { - res += "|" + sqlparser.String(colName) - } - return res +// updateUDFsInfo updates the aggregate UDFs in the Vschema. +func (vm *VSchemaManager) updateUDFsInfo(ks *vindexes.KeyspaceSchema, ksName string) { + ks.AggregateUDFs = vm.schema.UDFs(ksName) } func markErrorIfCyclesInFk(vschema *vindexes.VSchema) { @@ -256,23 +280,53 @@ func markErrorIfCyclesInFk(vschema *vindexes.VSchema) { if ks.ForeignKeyMode != vschemapb.Keyspace_managed { continue } + /* + 3 cases for creating the graph for cycle detection: + 1. ON DELETE RESTRICT ON UPDATE RESTRICT: This is the simplest case where no update/delete is required on the child table, we only need to verify whether a value exists or not. So we don't need to add any edge for this case. + 2. ON DELETE SET NULL, ON UPDATE SET NULL, ON UPDATE CASCADE: In this case having any update/delete on any of the columns in the parent side of the foreign key will make a corresponding delete/update on all the column in the child side of the foreign key. So we will add an edge from all the columns in the parent side to all the columns in the child side. + 3. ON DELETE CASCADE: This is a special case wherein a deletion on the parent table will affect all the columns in the child table irrespective of the columns involved in the foreign key! So, we'll add an edge from all the columns in the parent side of the foreign key to all the columns of the child table. + */ g := graph.NewGraph[string]() for _, table := range ks.Tables { for _, cfk := range table.ChildForeignKeys { + // Check for case 1. + if cfk.OnUpdate.IsRestrict() && cfk.OnDelete.IsRestrict() { + continue + } + childTable := cfk.Table - parentVertex := tableCol{ - tableName: table.GetTableName(), - colNames: cfk.ParentColumns, + var parentVertices []string + var childVertices []string + for _, column := range cfk.ParentColumns { + parentVertices = append(parentVertices, sqlparser.String(sqlparser.NewColNameWithQualifier(column.String(), table.GetTableName()))) } - childVertex := tableCol{ - tableName: childTable.GetTableName(), - colNames: cfk.ChildColumns, + + // Check for case 3. + if cfk.OnDelete.IsCascade() { + for _, column := range childTable.Columns { + childVertices = append(childVertices, sqlparser.String(sqlparser.NewColNameWithQualifier(column.Name.String(), childTable.GetTableName()))) + } + } else { + // Case 2. + for _, column := range cfk.ChildColumns { + childVertices = append(childVertices, sqlparser.String(sqlparser.NewColNameWithQualifier(column.String(), childTable.GetTableName()))) + } } - g.AddEdge(tableColHash(parentVertex), tableColHash(childVertex)) + addCrossEdges(g, parentVertices, childVertices) } } - if g.HasCycles() { - ks.Error = vterrors.VT09019(ksName) + hasCycle, cycle := g.HasCycles() + if hasCycle { + ks.Error = vterrors.VT09019(ksName, cycle) + } + } +} + +// addCrossEdges adds the edges from all the vertices in the first list to all the vertices in the second list. +func addCrossEdges(g *graph.Graph[string], from []string, to []string) { + for _, fromStr := range from { + for _, toStr := range to { + g.AddEdge(fromStr, toStr) } } } diff --git a/go/vt/vtgate/vschema_manager_test.go b/go/vt/vtgate/vschema_manager_test.go index 9c51266c26a..32f83f0021a 100644 --- a/go/vt/vtgate/vschema_manager_test.go +++ b/go/vt/vtgate/vschema_manager_test.go @@ -18,11 +18,13 @@ func TestVSchemaUpdate(t *testing.T) { Type: querypb.Type_INT64, }} cols2 := []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("uid"), - Type: querypb.Type_INT64, + Name: sqlparser.NewIdentifierCI("uid"), + Type: querypb.Type_INT64, + Nullable: true, }, { - Name: sqlparser.NewIdentifierCI("name"), - Type: querypb.Type_VARCHAR, + Name: sqlparser.NewIdentifierCI("name"), + Type: querypb.Type_VARCHAR, + Nullable: true, }} ks := &vindexes.Keyspace{Name: "ks"} tblNoCol := &vindexes.Table{Name: sqlparser.NewIdentifierCS("tbl"), Keyspace: ks, ColumnListAuthoritative: true} @@ -82,6 +84,27 @@ func TestVSchemaUpdate(t *testing.T) { ParentColumns: sqlparserCols1, }) + idxTbl1 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("idxTbl1"), + Keyspace: ks, + ColumnListAuthoritative: true, + PrimaryKey: sqlparser.Columns{sqlparser.NewIdentifierCI("a")}, + UniqueKeys: []sqlparser.Exprs{ + {sqlparser.NewColName("b")}, + {sqlparser.NewColName("c"), sqlparser.NewColName("d")}, + }, + } + idxTbl2 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("idxTbl2"), + Keyspace: ks, + ColumnListAuthoritative: true, + PrimaryKey: sqlparser.Columns{sqlparser.NewIdentifierCI("a")}, + UniqueKeys: []sqlparser.Exprs{ + {&sqlparser.BinaryExpr{Operator: sqlparser.DivOp, Left: sqlparser.NewColName("b"), Right: sqlparser.NewIntLiteral("2")}}, + {sqlparser.NewColName("c"), &sqlparser.BinaryExpr{Operator: sqlparser.PlusOp, Left: sqlparser.NewColName("d"), Right: sqlparser.NewColName("e")}}, + }, + } + tcases := []struct { name string srvVschema *vschemapb.SrvVSchema @@ -192,41 +215,18 @@ func TestVSchemaUpdate(t *testing.T) { Sharded: false, ForeignKeyMode: vschemapb.Keyspace_managed, Tables: map[string]*vschemapb.Table{ - "t1": { - Columns: []*vschemapb.Column{ - { - Name: "id", - Type: querypb.Type_INT64, - }, - }, - }, - "t2": { - Columns: []*vschemapb.Column{ - { - Name: "id", - Type: querypb.Type_INT64, - }, - }, - }, + "t1": {Columns: []*vschemapb.Column{{Name: "id", Type: querypb.Type_INT64}}}, + "t2": {Columns: []*vschemapb.Column{{Name: "id", Type: querypb.Type_INT64}}}, "multicol_t1": { Columns: []*vschemapb.Column{ - { - Name: "uid", - Type: querypb.Type_INT64, - }, { - Name: "name", - Type: querypb.Type_VARCHAR, - }, + {Name: "uid", Type: querypb.Type_INT64}, + {Name: "name", Type: querypb.Type_VARCHAR}, }, - }, "multicol_t2": { + }, + "multicol_t2": { Columns: []*vschemapb.Column{ - { - Name: "uid", - Type: querypb.Type_INT64, - }, { - Name: "name", - Type: querypb.Type_VARCHAR, - }, + {Name: "uid", Type: querypb.Type_INT64}, + {Name: "name", Type: querypb.Type_VARCHAR}, }, }, }, @@ -249,6 +249,69 @@ func TestVSchemaUpdate(t *testing.T) { }, }, }, + }, { + name: "indexes in schema using columns", + currentVSchema: &vindexes.VSchema{}, + schema: map[string]*vindexes.TableInfo{ + "idxTbl1": { + Indexes: []*sqlparser.IndexDefinition{{ + Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypePrimary}, + Columns: []*sqlparser.IndexColumn{ + {Column: sqlparser.NewIdentifierCI("a")}, + }, + }, { + Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypeUnique}, + Columns: []*sqlparser.IndexColumn{ + {Column: sqlparser.NewIdentifierCI("b")}, + }, + }, { + Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypeDefault}, + Columns: []*sqlparser.IndexColumn{ + {Column: sqlparser.NewIdentifierCI("x")}, + {Column: sqlparser.NewIdentifierCI("y")}, + }, + }, { + Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypeUnique}, + Columns: []*sqlparser.IndexColumn{ + {Column: sqlparser.NewIdentifierCI("c")}, + {Column: sqlparser.NewIdentifierCI("d")}, + }, + }}, + }, + }, + srvVschema: makeTestSrvVSchema("ks", false, nil), + expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"idxTbl1": idxTbl1}), + }, { + name: "indexes in schema using expressions", + currentVSchema: &vindexes.VSchema{}, + schema: map[string]*vindexes.TableInfo{ + "idxTbl2": { + Indexes: []*sqlparser.IndexDefinition{{ + Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypePrimary}, + Columns: []*sqlparser.IndexColumn{ + {Column: sqlparser.NewIdentifierCI("a")}, + }, + }, { + Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypeUnique}, + Columns: []*sqlparser.IndexColumn{ + {Expression: &sqlparser.BinaryExpr{Operator: sqlparser.DivOp, Left: sqlparser.NewColName("b"), Right: sqlparser.NewIntLiteral("2")}}, + }, + }, { + Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypeDefault}, + Columns: []*sqlparser.IndexColumn{ + {Expression: &sqlparser.BinaryExpr{Operator: sqlparser.PlusOp, Left: sqlparser.NewColName("x"), Right: sqlparser.NewColName("y")}}, + }, + }, { + Info: &sqlparser.IndexInfo{Type: sqlparser.IndexTypeUnique}, + Columns: []*sqlparser.IndexColumn{ + {Column: sqlparser.NewIdentifierCI("c")}, + {Expression: &sqlparser.BinaryExpr{Operator: sqlparser.PlusOp, Left: sqlparser.NewColName("d"), Right: sqlparser.NewColName("e")}}, + }, + }}, + }, + }, + srvVschema: makeTestSrvVSchema("ks", false, nil), + expected: makeTestVSchema("ks", false, map[string]*vindexes.Table{"idxTbl2": idxTbl2}), }} vm := &VSchemaManager{} @@ -279,11 +342,13 @@ func TestRebuildVSchema(t *testing.T) { Type: querypb.Type_INT64, }} cols2 := []vindexes.Column{{ - Name: sqlparser.NewIdentifierCI("uid"), - Type: querypb.Type_INT64, + Name: sqlparser.NewIdentifierCI("uid"), + Type: querypb.Type_INT64, + Nullable: true, }, { - Name: sqlparser.NewIdentifierCI("name"), - Type: querypb.Type_VARCHAR, + Name: sqlparser.NewIdentifierCI("name"), + Type: querypb.Type_VARCHAR, + Nullable: true, }} ks := &vindexes.Keyspace{Name: "ks"} tblNoCol := &vindexes.Table{Name: sqlparser.NewIdentifierCS("tbl"), Keyspace: ks, ColumnListAuthoritative: true} @@ -373,56 +438,38 @@ func TestRebuildVSchema(t *testing.T) { } } -func makeTestVSchema(ks string, sharded bool, tbls map[string]*vindexes.Table) *vindexes.VSchema { - keyspaceSchema := &vindexes.KeyspaceSchema{ - Keyspace: &vindexes.Keyspace{ - Name: ks, - Sharded: sharded, - }, - // Default foreign key mode - ForeignKeyMode: vschemapb.Keyspace_unmanaged, - Tables: tbls, - Vindexes: map[string]vindexes.Vindex{}, - } - vs := makeTestEmptyVSchema() - vs.Keyspaces[ks] = keyspaceSchema - vs.ResetCreated() - return vs -} +// TestVSchemaUDFsUpdate tests that the UDFs are updated in the VSchema. +func TestVSchemaUDFsUpdate(t *testing.T) { + ks := &vindexes.Keyspace{Name: "ks", Sharded: true} -func makeTestEmptyVSchema() *vindexes.VSchema { - return &vindexes.VSchema{ - RoutingRules: map[string]*vindexes.RoutingRule{}, - Keyspaces: map[string]*vindexes.KeyspaceSchema{}, - } -} - -func makeTestSrvVSchema(ks string, sharded bool, tbls map[string]*vschemapb.Table) *vschemapb.SrvVSchema { - keyspaceSchema := &vschemapb.Keyspace{ - Sharded: sharded, - Tables: tbls, - // Default foreign key mode - ForeignKeyMode: vschemapb.Keyspace_unmanaged, - } - return &vschemapb.SrvVSchema{ - Keyspaces: map[string]*vschemapb.Keyspace{ks: keyspaceSchema}, + vm := &VSchemaManager{} + var vs *vindexes.VSchema + vm.subscriber = func(vschema *vindexes.VSchema, _ *VSchemaStats) { + vs = vschema + vs.ResetCreated() } -} - -type fakeSchema struct { - t map[string]*vindexes.TableInfo -} - -func (f *fakeSchema) Tables(string) map[string]*vindexes.TableInfo { - return f.t -} + vm.schema = &fakeSchema{udfs: []string{"udf1", "udf2"}} + vm.VSchemaUpdate(&vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "ks": {Sharded: true}, + }, + }, nil) -func (f *fakeSchema) Views(string) map[string]sqlparser.SelectStatement { - return nil + utils.MustMatchFn(".globalTables", ".uniqueVindexes")(t, &vindexes.VSchema{ + RoutingRules: map[string]*vindexes.RoutingRule{}, + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + "ks": { + Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Tables: map[string]*vindexes.Table{}, + Vindexes: map[string]vindexes.Vindex{}, + AggregateUDFs: []string{"udf1", "udf2"}, + }, + }, + }, vs) + utils.MustMatch(t, vs, vm.currentVschema, "currentVschema does not match Vschema") } -var _ SchemaInfo = (*fakeSchema)(nil) - func TestMarkErrorIfCyclesInFk(t *testing.T) { ksName := "ks" keyspace := &vindexes.Keyspace{ @@ -434,7 +481,7 @@ func TestMarkErrorIfCyclesInFk(t *testing.T) { errWanted string }{ { - name: "Has a cycle", + name: "Has a direct cycle", getVschema: func() *vindexes.VSchema { vschema := &vindexes.VSchema{ Keyspaces: map[string]*vindexes.KeyspaceSchema{ @@ -457,12 +504,43 @@ func TestMarkErrorIfCyclesInFk(t *testing.T) { }, }, } - _ = vschema.AddForeignKey("ks", "t2", createFkDefinition([]string{"col"}, "t1", []string{"col"}, sqlparser.Cascade, sqlparser.Cascade)) - _ = vschema.AddForeignKey("ks", "t3", createFkDefinition([]string{"col"}, "t2", []string{"col"}, sqlparser.Cascade, sqlparser.Cascade)) - _ = vschema.AddForeignKey("ks", "t1", createFkDefinition([]string{"col"}, "t3", []string{"col"}, sqlparser.Cascade, sqlparser.Cascade)) + _ = vschema.AddForeignKey("ks", "t2", createFkDefinition([]string{"col"}, "t1", []string{"col"}, sqlparser.SetNull, sqlparser.SetNull)) + _ = vschema.AddForeignKey("ks", "t3", createFkDefinition([]string{"col"}, "t2", []string{"col"}, sqlparser.SetNull, sqlparser.SetNull)) + _ = vschema.AddForeignKey("ks", "t1", createFkDefinition([]string{"col"}, "t3", []string{"col"}, sqlparser.SetNull, sqlparser.SetNull)) + return vschema + }, + errWanted: "VT09019: keyspace 'ks' has cyclic foreign keys", + }, + { + name: "Has a direct cycle but there is a restrict constraint in between", + getVschema: func() *vindexes.VSchema { + vschema := &vindexes.VSchema{ + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + ksName: { + ForeignKeyMode: vschemapb.Keyspace_managed, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: keyspace, + }, + "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: keyspace, + }, + "t3": { + Name: sqlparser.NewIdentifierCS("t3"), + Keyspace: keyspace, + }, + }, + }, + }, + } + _ = vschema.AddForeignKey("ks", "t2", createFkDefinition([]string{"col"}, "t1", []string{"col"}, sqlparser.SetNull, sqlparser.SetNull)) + _ = vschema.AddForeignKey("ks", "t3", createFkDefinition([]string{"col"}, "t2", []string{"col"}, sqlparser.Restrict, sqlparser.Restrict)) + _ = vschema.AddForeignKey("ks", "t1", createFkDefinition([]string{"col"}, "t3", []string{"col"}, sqlparser.SetNull, sqlparser.SetNull)) return vschema }, - errWanted: "VT09019: ks has cyclic foreign keys", + errWanted: "", }, { name: "No cycle", @@ -493,6 +571,134 @@ func TestMarkErrorIfCyclesInFk(t *testing.T) { return vschema }, errWanted: "", + }, { + name: "Self-referencing foreign key with delete cascade", + getVschema: func() *vindexes.VSchema { + vschema := &vindexes.VSchema{ + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + ksName: { + ForeignKeyMode: vschemapb.Keyspace_managed, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: keyspace, + Columns: []vindexes.Column{ + { + Name: sqlparser.NewIdentifierCI("id"), + }, + { + Name: sqlparser.NewIdentifierCI("manager_id"), + }, + }, + }, + }, + }, + }, + } + _ = vschema.AddForeignKey("ks", "t1", createFkDefinition([]string{"manager_id"}, "t1", []string{"id"}, sqlparser.SetNull, sqlparser.Cascade)) + return vschema + }, + errWanted: "VT09019: keyspace 'ks' has cyclic foreign keys. Cycle exists between [ks.t1.id ks.t1.id]", + }, { + name: "Self-referencing foreign key without delete cascade", + getVschema: func() *vindexes.VSchema { + vschema := &vindexes.VSchema{ + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + ksName: { + ForeignKeyMode: vschemapb.Keyspace_managed, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: keyspace, + Columns: []vindexes.Column{ + { + Name: sqlparser.NewIdentifierCI("id"), + }, + { + Name: sqlparser.NewIdentifierCI("manager_id"), + }, + }, + }, + }, + }, + }, + } + _ = vschema.AddForeignKey("ks", "t1", createFkDefinition([]string{"manager_id"}, "t1", []string{"id"}, sqlparser.SetNull, sqlparser.SetNull)) + return vschema + }, + errWanted: "", + }, { + name: "Has an indirect cycle because of cascades", + getVschema: func() *vindexes.VSchema { + vschema := &vindexes.VSchema{ + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + ksName: { + ForeignKeyMode: vschemapb.Keyspace_managed, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: keyspace, + Columns: []vindexes.Column{ + { + Name: sqlparser.NewIdentifierCI("a"), + }, + { + Name: sqlparser.NewIdentifierCI("b"), + }, + { + Name: sqlparser.NewIdentifierCI("c"), + }, + }, + }, + "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: keyspace, + Columns: []vindexes.Column{ + { + Name: sqlparser.NewIdentifierCI("d"), + }, + { + Name: sqlparser.NewIdentifierCI("e"), + }, + { + Name: sqlparser.NewIdentifierCI("f"), + }, + }, + }, + }, + }, + }, + } + _ = vschema.AddForeignKey("ks", "t2", createFkDefinition([]string{"f"}, "t1", []string{"a"}, sqlparser.SetNull, sqlparser.Cascade)) + _ = vschema.AddForeignKey("ks", "t1", createFkDefinition([]string{"b"}, "t2", []string{"e"}, sqlparser.SetNull, sqlparser.Cascade)) + return vschema + }, + errWanted: "VT09019: keyspace 'ks' has cyclic foreign keys", + }, { + name: "Cycle part of a multi-column foreign key", + getVschema: func() *vindexes.VSchema { + vschema := &vindexes.VSchema{ + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + ksName: { + ForeignKeyMode: vschemapb.Keyspace_managed, + Tables: map[string]*vindexes.Table{ + "t1": { + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: keyspace, + }, + "t2": { + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: keyspace, + }, + }, + }, + }, + } + _ = vschema.AddForeignKey("ks", "t2", createFkDefinition([]string{"e", "f"}, "t1", []string{"a", "b"}, sqlparser.SetNull, sqlparser.SetNull)) + _ = vschema.AddForeignKey("ks", "t1", createFkDefinition([]string{"b"}, "t2", []string{"e"}, sqlparser.SetNull, sqlparser.SetNull)) + return vschema + }, + errWanted: "VT09019: keyspace 'ks' has cyclic foreign keys", }, } for _, tt := range tests { @@ -500,7 +706,7 @@ func TestMarkErrorIfCyclesInFk(t *testing.T) { vschema := tt.getVschema() markErrorIfCyclesInFk(vschema) if tt.errWanted != "" { - require.EqualError(t, vschema.Keyspaces[ksName].Error, tt.errWanted) + require.ErrorContains(t, vschema.Keyspaces[ksName].Error, tt.errWanted) return } require.NoError(t, vschema.Keyspaces[ksName].Error) @@ -508,9 +714,89 @@ func TestMarkErrorIfCyclesInFk(t *testing.T) { } } +// TestVSchemaUpdateWithFKReferenceToInternalTables tests that any internal table as part of fk reference is ignored. +func TestVSchemaUpdateWithFKReferenceToInternalTables(t *testing.T) { + ks := &vindexes.Keyspace{Name: "ks"} + cols1 := []vindexes.Column{{ + Name: sqlparser.NewIdentifierCI("id"), + Type: querypb.Type_INT64, + }} + sqlparserCols1 := sqlparser.MakeColumns("id") + + vindexTable_t1 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t1"), + Keyspace: ks, + Columns: cols1, + ColumnListAuthoritative: true, + } + vindexTable_t2 := &vindexes.Table{ + Name: sqlparser.NewIdentifierCS("t2"), + Keyspace: ks, + Columns: cols1, + ColumnListAuthoritative: true, + } + + vindexTable_t1.ChildForeignKeys = append(vindexTable_t1.ChildForeignKeys, vindexes.ChildFKInfo{ + Table: vindexTable_t2, + ChildColumns: sqlparserCols1, + ParentColumns: sqlparserCols1, + OnDelete: sqlparser.SetNull, + OnUpdate: sqlparser.Cascade, + }) + vindexTable_t2.ParentForeignKeys = append(vindexTable_t2.ParentForeignKeys, vindexes.ParentFKInfo{ + Table: vindexTable_t1, + ChildColumns: sqlparserCols1, + ParentColumns: sqlparserCols1, + }) + + vm := &VSchemaManager{} + var vs *vindexes.VSchema + vm.subscriber = func(vschema *vindexes.VSchema, _ *VSchemaStats) { + vs = vschema + vs.ResetCreated() + } + vm.schema = &fakeSchema{t: map[string]*vindexes.TableInfo{ + "t1": {Columns: cols1}, + "t2": { + Columns: cols1, + ForeignKeys: []*sqlparser.ForeignKeyDefinition{ + createFkDefinition([]string{"id"}, "t1", []string{"id"}, sqlparser.Cascade, sqlparser.SetNull), + createFkDefinition([]string{"id"}, "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", []string{"id"}, sqlparser.Cascade, sqlparser.SetNull), + }, + }, + }} + vm.VSchemaUpdate(&vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ + "ks": { + ForeignKeyMode: vschemapb.Keyspace_managed, + Tables: map[string]*vschemapb.Table{ + "t1": {Columns: []*vschemapb.Column{{Name: "id", Type: querypb.Type_INT64}}}, + "t2": {Columns: []*vschemapb.Column{{Name: "id", Type: querypb.Type_INT64}}}, + }, + }, + }, + }, nil) + + utils.MustMatchFn(".globalTables", ".uniqueVindexes")(t, &vindexes.VSchema{ + RoutingRules: map[string]*vindexes.RoutingRule{}, + Keyspaces: map[string]*vindexes.KeyspaceSchema{ + "ks": { + Keyspace: ks, + ForeignKeyMode: vschemapb.Keyspace_managed, + Vindexes: map[string]vindexes.Vindex{}, + Tables: map[string]*vindexes.Table{ + "t1": vindexTable_t1, + "t2": vindexTable_t2, + }, + }, + }, + }, vs) + utils.MustMatch(t, vs, vm.currentVschema, "currentVschema should have same reference as Vschema") +} + // createFkDefinition is a helper function to create a Foreign key definition struct from the columns used in it provided as list of strings. func createFkDefinition(childCols []string, parentTableName string, parentCols []string, onUpdate, onDelete sqlparser.ReferenceAction) *sqlparser.ForeignKeyDefinition { - pKs, pTbl, _ := sqlparser.ParseTable(parentTableName) + pKs, pTbl, _ := sqlparser.NewTestParser().ParseTable(parentTableName) return &sqlparser.ForeignKeyDefinition{ Source: sqlparser.MakeColumns(childCols...), ReferenceDefinition: &sqlparser.ReferenceDefinition{ @@ -521,3 +807,55 @@ func createFkDefinition(childCols []string, parentTableName string, parentCols [ }, } } + +func makeTestVSchema(ks string, sharded bool, tbls map[string]*vindexes.Table) *vindexes.VSchema { + keyspaceSchema := &vindexes.KeyspaceSchema{ + Keyspace: &vindexes.Keyspace{ + Name: ks, + Sharded: sharded, + }, + // Default foreign key mode + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + Tables: tbls, + Vindexes: map[string]vindexes.Vindex{}, + } + vs := makeTestEmptyVSchema() + vs.Keyspaces[ks] = keyspaceSchema + vs.ResetCreated() + return vs +} + +func makeTestEmptyVSchema() *vindexes.VSchema { + return &vindexes.VSchema{ + RoutingRules: map[string]*vindexes.RoutingRule{}, + Keyspaces: map[string]*vindexes.KeyspaceSchema{}, + } +} + +func makeTestSrvVSchema(ks string, sharded bool, tbls map[string]*vschemapb.Table) *vschemapb.SrvVSchema { + keyspaceSchema := &vschemapb.Keyspace{ + Sharded: sharded, + Tables: tbls, + // Default foreign key mode + ForeignKeyMode: vschemapb.Keyspace_unmanaged, + } + return &vschemapb.SrvVSchema{ + Keyspaces: map[string]*vschemapb.Keyspace{ks: keyspaceSchema}, + } +} + +type fakeSchema struct { + t map[string]*vindexes.TableInfo + udfs []string +} + +func (f *fakeSchema) Tables(string) map[string]*vindexes.TableInfo { + return f.t +} + +func (f *fakeSchema) Views(string) map[string]sqlparser.SelectStatement { + return nil +} +func (f *fakeSchema) UDFs(string) []string { return f.udfs } + +var _ SchemaInfo = (*fakeSchema)(nil) diff --git a/go/vt/vtgate/vstream_manager.go b/go/vt/vtgate/vstream_manager.go index ffb8989ca5d..e0d195853cf 100644 --- a/go/vt/vtgate/vstream_manager.go +++ b/go/vt/vtgate/vstream_manager.go @@ -25,12 +25,16 @@ import ( "sync" "time" + "golang.org/x/exp/maps" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -497,31 +501,47 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha var err error cells := vs.getCells() - tp, err := discovery.NewTabletPicker(ctx, vs.ts, cells, vs.vsm.cell, sgtid.Keyspace, sgtid.Shard, vs.tabletType.String(), vs.tabletPickerOptions, ignoreTablets...) + tpo := vs.tabletPickerOptions + resharded, err := vs.keyspaceHasBeenResharded(ctx, sgtid.Keyspace) if err != nil { - log.Errorf(err.Error()) - return err + return vterrors.Wrapf(err, "failed to determine if keyspace %s has been resharded", sgtid.Keyspace) + } + if resharded { + // The non-serving tablet in the old / non-serving shard will contain all of + // the GTIDs that we need before transitioning to the new shards along with + // the journal event that will then allow us to automatically transition to + // the new shards (provided the stop_on_reshard option is not set). + tpo.IncludeNonServingTablets = true } + tabletPickerErr := func(err error) error { + tperr := vterrors.Wrapf(err, "failed to find a %s tablet for VStream in %s/%s within the %s cell(s)", + vs.tabletType.String(), sgtid.GetKeyspace(), sgtid.GetShard(), strings.Join(cells, ",")) + log.Errorf("%v", tperr) + return tperr + } + tp, err := discovery.NewTabletPicker(ctx, vs.ts, cells, vs.vsm.cell, sgtid.GetKeyspace(), sgtid.GetShard(), vs.tabletType.String(), tpo, ignoreTablets...) + if err != nil { + return tabletPickerErr(err) + } // Create a child context with a stricter timeout when picking a tablet. // This will prevent hanging in the case no tablets are found. tpCtx, tpCancel := context.WithTimeout(ctx, tabletPickerContextTimeout) defer tpCancel() - tablet, err := tp.PickForStreaming(tpCtx) if err != nil { - log.Errorf(err.Error()) - return err + return tabletPickerErr(err) } - log.Infof("Picked tablet %s for for %s/%s/%s/%s", tablet.Alias.String(), strings.Join(cells, ","), - sgtid.Keyspace, sgtid.Shard, vs.tabletType.String()) + log.Infof("Picked a %s tablet for VStream in %s/%s within the %s cell(s)", + vs.tabletType.String(), sgtid.GetKeyspace(), sgtid.GetShard(), strings.Join(cells, ",")) + target := &querypb.Target{ Keyspace: sgtid.Keyspace, Shard: sgtid.Shard, TabletType: vs.tabletType, Cell: vs.vsm.cell, } - tabletConn, err := vs.vsm.resolver.GetGateway().QueryServiceByAlias(tablet.Alias, target) + tabletConn, err := vs.vsm.resolver.GetGateway().QueryServiceByAlias(ctx, tablet.Alias, target) if err != nil { log.Errorf(err.Error()) return err @@ -531,18 +551,23 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha go func() { _ = tabletConn.StreamHealth(ctx, func(shr *querypb.StreamHealthResponse) error { var err error - if ctx.Err() != nil { + switch { + case ctx.Err() != nil: err = fmt.Errorf("context has ended") - } else if shr == nil || shr.RealtimeStats == nil || shr.Target == nil { - err = fmt.Errorf("health check failed") - } else if vs.tabletType != shr.Target.TabletType { - err = fmt.Errorf("tablet type has changed from %s to %s, restarting vstream", - vs.tabletType, shr.Target.TabletType) - } else if shr.RealtimeStats.HealthError != "" { + case shr == nil || shr.RealtimeStats == nil || shr.Target == nil: + err = fmt.Errorf("health check failed on %s", topoproto.TabletAliasString(tablet.Alias)) + case vs.tabletType != shr.Target.TabletType: + err = fmt.Errorf("tablet %s type has changed from %s to %s, restarting vstream", + topoproto.TabletAliasString(tablet.Alias), vs.tabletType, shr.Target.TabletType) + case shr.RealtimeStats.HealthError != "": err = fmt.Errorf("tablet %s is no longer healthy: %s, restarting vstream", - tablet.Alias, shr.RealtimeStats.HealthError) + topoproto.TabletAliasString(tablet.Alias), shr.RealtimeStats.HealthError) + case shr.RealtimeStats.ReplicationLagSeconds > uint32(discovery.GetLowReplicationLag().Seconds()): + err = fmt.Errorf("tablet %s has a replication lag of %d seconds which is beyond the value provided in --discovery_low_replication_lag of %s so the tablet is no longer considered healthy, restarting vstream", + topoproto.TabletAliasString(tablet.Alias), shr.RealtimeStats.ReplicationLagSeconds, discovery.GetLowReplicationLag()) } if err != nil { + log.Warningf("Tablet state changed: %s, attempting to restart", err) errCh <- err return err } @@ -573,7 +598,6 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha case <-ctx.Done(): return ctx.Err() case streamErr := <-errCh: - log.Warningf("Tablet state changed: %s, attempting to restart", streamErr) return vterrors.New(vtrpcpb.Code_UNAVAILABLE, streamErr.Error()) case <-journalDone: // Unreachable. @@ -705,7 +729,7 @@ func (vs *vstream) streamFromTablet(ctx context.Context, sgtid *binlogdatapb.Sha // shouldRetry determines whether we should exit immediately or retry the vstream. // The first return value determines if the error can be retried, while the second -// indicates whether the tablet with which the error occurred should be ommitted +// indicates whether the tablet with which the error occurred should be omitted // from the candidate list of tablets to choose from on the retry. // // An error should be retried if it is expected to be transient. @@ -737,7 +761,7 @@ func (vs *vstream) sendAll(ctx context.Context, sgtid *binlogdatapb.ShardGtid, e if err := vs.getError(); err != nil { return err } - // convert all gtids to vgtids. This should be done here while holding the lock. + // Convert all gtids to vgtids. This should be done here while holding the lock. for j, event := range events { if event.Type == binlogdatapb.VEventType_GTID { // Update the VGtid and send that instead. @@ -921,3 +945,56 @@ func (vs *vstream) getJournalEvent(ctx context.Context, sgtid *binlogdatapb.Shar close(je.done) return je, nil } + +// keyspaceHasBeenResharded returns true if the keyspace's serving shard set has changed +// since the last VStream as indicated by the shard definitions provided in the VGTID. +func (vs *vstream) keyspaceHasBeenResharded(ctx context.Context, keyspace string) (bool, error) { + shards, err := vs.ts.FindAllShardsInKeyspace(ctx, keyspace, nil) + if err != nil || len(shards) == 0 { + return false, err + } + + // First check the typical case, where the VGTID shards match the serving shards. + // In that case it's NOT possible that an applicable reshard has happened because + // the VGTID contains shards that are all serving. + reshardPossible := false + ksShardGTIDs := make([]*binlogdatapb.ShardGtid, 0, len(vs.vgtid.ShardGtids)) + for _, s := range vs.vgtid.ShardGtids { + if s.GetKeyspace() == keyspace { + ksShardGTIDs = append(ksShardGTIDs, s) + } + } + for _, s := range ksShardGTIDs { + shard := shards[s.GetShard()] + if shard == nil { + return false, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "shard provided in VGTID, %s, not found in the %s keyspace", s.GetShard(), keyspace) + } + if !shard.GetIsPrimaryServing() { + reshardPossible = true + break + } + } + if !reshardPossible { + return false, nil + } + + // Now that we know there MAY have been an applicable reshard, let's make a + // definitive determination by looking at the shard keyranges. + // All we care about are the shard info records now. + sis := maps.Values(shards) + for i := range sis { + for j := range sis { + if sis[i].ShardName() == sis[j].ShardName() && key.KeyRangeEqual(sis[i].GetKeyRange(), sis[j].GetKeyRange()) { + // It's the same shard so skip it. + continue + } + if key.KeyRangeIntersect(sis[i].GetKeyRange(), sis[j].GetKeyRange()) { + // We have different shards with overlapping keyranges so we know + // that a reshard has happened. + return true, nil + } + } + } + + return false, nil +} diff --git a/go/vt/vtgate/vstream_manager_test.go b/go/vt/vtgate/vstream_manager_test.go index 4c1e9ec6764..e51bd2785dd 100644 --- a/go/vt/vtgate/vstream_manager_test.go +++ b/go/vt/vtgate/vstream_manager_test.go @@ -25,24 +25,26 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/sandboxconn" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - - "vitess.io/vitess/go/test/utils" ) var mu sync.Mutex @@ -1279,6 +1281,409 @@ func TestVStreamIdleHeartbeat(t *testing.T) { } } +func TestKeyspaceHasBeenSharded(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + cell := "zone1" + ks := "testks" + + type testcase struct { + name string + oldshards []string + newshards []string + vgtid *binlogdatapb.VGtid + trafficSwitched bool + want bool + wantErr string + } + testcases := []testcase{ + { + name: "2 to 4, split both, traffic not switched", + oldshards: []string{ + "-80", + "80-", + }, + newshards: []string{ + "-40", + "40-80", + "80-c0", + "c0-", + }, + vgtid: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + { + Keyspace: ks, + Shard: "-80", + }, + { + Keyspace: ks, + Shard: "80-", + }, + }, + }, + trafficSwitched: false, + want: false, + }, + { + name: "2 to 4, split both, traffic not switched", + oldshards: []string{ + "-80", + "80-", + }, + newshards: []string{ + "-40", + "40-80", + "80-c0", + "c0-", + }, + vgtid: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + { + Keyspace: ks, + Shard: "-80", + }, + { + Keyspace: ks, + Shard: "80-", + }, + }, + }, + trafficSwitched: false, + want: false, + }, + { + name: "2 to 8, split both, traffic switched", + oldshards: []string{ + "-80", + "80-", + }, + newshards: []string{ + "-20", + "20-40", + "40-60", + "60-80", + "80-a0", + "a0-c0", + "c0-e0", + "e0-", + }, + vgtid: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + { + Keyspace: ks, + Shard: "-80", + }, + { + Keyspace: ks, + Shard: "80-", + }, + }, + }, + trafficSwitched: true, + want: true, + }, + { + name: "2 to 4, split only first shard, traffic switched", + oldshards: []string{ + "-80", + "80-", + }, + newshards: []string{ + "-20", + "20-40", + "40-60", + "60-80", + // 80- is not being resharded. + }, + vgtid: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + { + Keyspace: ks, + Shard: "-80", + }, + { + Keyspace: ks, + Shard: "80-", + }, + }, + }, + trafficSwitched: true, + want: true, + }, + { + name: "4 to 2, merge both shards, traffic switched", + oldshards: []string{ + "-40", + "40-80", + "80-c0", + "c0-", + }, + newshards: []string{ + "-80", + "80-", + }, + vgtid: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + { + Keyspace: ks, + Shard: "-40", + }, + { + Keyspace: ks, + Shard: "40-80", + }, + { + Keyspace: ks, + Shard: "80-c0", + }, + { + Keyspace: ks, + Shard: "c0-", + }, + }, + }, + trafficSwitched: true, + want: true, + }, + { + name: "4 to 3, merge second half, traffic not switched", + oldshards: []string{ + "-40", + "40-80", + "80-c0", + "c0-", + }, + newshards: []string{ + // -40 and 40-80 are not being resharded. + "80-", // Merge of 80-c0 and c0- + }, + vgtid: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + { + Keyspace: ks, + Shard: "-40", + }, + { + Keyspace: ks, + Shard: "40-80", + }, + { + Keyspace: ks, + Shard: "80-c0", + }, + { + Keyspace: ks, + Shard: "c0-", + }, + }, + }, + trafficSwitched: false, + want: false, + }, + { + name: "4 to 3, merge second half, traffic switched", + oldshards: []string{ + "-40", + "40-80", + "80-c0", + "c0-", + }, + newshards: []string{ + // -40 and 40-80 are not being resharded. + "80-", // Merge of 80-c0 and c0- + }, + vgtid: &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{ + { + Keyspace: ks, + Shard: "-40", + }, + { + Keyspace: ks, + Shard: "40-80", + }, + { + Keyspace: ks, + Shard: "80-c0", + }, + { + Keyspace: ks, + Shard: "c0-", + }, + }, + }, + trafficSwitched: true, + want: true, + }, + } + + addTablet := func(t *testing.T, ctx context.Context, host string, port int32, cell, ks, shard string, ts *topo.Server, hc *discovery.FakeHealthCheck, serving bool) { + tabletconn := hc.AddTestTablet(cell, host, port, ks, shard, topodatapb.TabletType_PRIMARY, serving, 0, nil) + err := ts.CreateTablet(ctx, tabletconn.Tablet()) + require.NoError(t, err) + var alias *topodatapb.TabletAlias + if serving { + alias = tabletconn.Tablet().Alias + } + _, err = ts.UpdateShardFields(ctx, ks, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = alias + si.IsPrimaryServing = serving + return nil + }) + require.NoError(t, err) + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + hc := discovery.NewFakeHealthCheck(nil) + _ = createSandbox(ks) + st := getSandboxTopo(ctx, cell, ks, append(tc.oldshards, tc.newshards...)) + vsm := newTestVStreamManager(ctx, hc, st, cell) + vs := vstream{ + vgtid: tc.vgtid, + tabletType: topodatapb.TabletType_PRIMARY, + optCells: cell, + vsm: vsm, + ts: st.topoServer, + } + for i, shard := range tc.oldshards { + addTablet(t, ctx, fmt.Sprintf("1.1.0.%d", i), int32(1000+i), cell, ks, shard, st.topoServer, hc, !tc.trafficSwitched) + } + for i, shard := range tc.newshards { + addTablet(t, ctx, fmt.Sprintf("1.1.1.%d", i), int32(2000+i), cell, ks, shard, st.topoServer, hc, tc.trafficSwitched) + } + got, err := vs.keyspaceHasBeenResharded(ctx, ks) + if tc.wantErr != "" { + require.EqualError(t, err, tc.wantErr) + } else { + require.NoError(t, err) + } + require.Equal(t, tc.want, got) + }) + } +} + +// TestVStreamManagerHealthCheckResponseHandling tests the handling of healthcheck responses by +// the vstream manager to confirm that we are correctly restarting the vstream when we should. +func TestVStreamManagerHealthCheckResponseHandling(t *testing.T) { + ctx := utils.LeakCheckContext(t) + + // Capture the vstream warning log. Otherwise we need to re-implement the vstream error + // handling in SandboxConn's implementation and then we're not actually testing the + // production code. + logger := logutil.NewMemoryLogger() + log.Warningf = logger.Warningf + + cell := "aa" + ks := "TestVStream" + shard := "0" + tabletType := topodatapb.TabletType_REPLICA + _ = createSandbox(ks) + hc := discovery.NewFakeHealthCheck(nil) + st := getSandboxTopo(ctx, cell, ks, []string{shard}) + vsm := newTestVStreamManager(ctx, hc, st, cell) + vgtid := &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: ks, + Shard: shard, + }}, + } + source := hc.AddTestTablet(cell, "1.1.1.1", 1001, ks, shard, tabletType, true, 0, nil) + tabletAlias := topoproto.TabletAliasString(source.Tablet().Alias) + addTabletToSandboxTopo(t, ctx, st, ks, shard, source.Tablet()) + target := &querypb.Target{ + Cell: cell, + Keyspace: ks, + Shard: shard, + TabletType: tabletType, + } + highLag := uint32(discovery.GetLowReplicationLag().Seconds()) + 1 + + type testcase struct { + name string + hcRes *querypb.StreamHealthResponse + wantErr string + } + testcases := []testcase{ + { + name: "all healthy", // Will hit the context timeout + }, + { + name: "failure", + hcRes: &querypb.StreamHealthResponse{ + TabletAlias: source.Tablet().Alias, + Target: nil, // This is seen as a healthcheck stream failure + }, + wantErr: fmt.Sprintf("health check failed on %s", tabletAlias), + }, + { + name: "tablet type changed", + hcRes: &querypb.StreamHealthResponse{ + TabletAlias: source.Tablet().Alias, + Target: &querypb.Target{ + Cell: cell, + Keyspace: ks, + Shard: shard, + TabletType: topodatapb.TabletType_PRIMARY, + }, + PrimaryTermStartTimestamp: time.Now().Unix(), + RealtimeStats: &querypb.RealtimeStats{}, + }, + wantErr: fmt.Sprintf("tablet %s type has changed from %s to %s", + tabletAlias, tabletType, topodatapb.TabletType_PRIMARY.String()), + }, + { + name: "unhealthy", + hcRes: &querypb.StreamHealthResponse{ + TabletAlias: source.Tablet().Alias, + Target: target, + RealtimeStats: &querypb.RealtimeStats{ + HealthError: "unhealthy", + }, + }, + wantErr: fmt.Sprintf("tablet %s is no longer healthy", tabletAlias), + }, + { + name: "replication lag too high", + hcRes: &querypb.StreamHealthResponse{ + TabletAlias: source.Tablet().Alias, + Target: target, + RealtimeStats: &querypb.RealtimeStats{ + ReplicationLagSeconds: highLag, + }, + }, + wantErr: fmt.Sprintf("%s has a replication lag of %d seconds which is beyond the value provided", + tabletAlias, highLag), + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + done := make(chan struct{}) + go func() { + sctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + defer close(done) + // SandboxConn's VStream implementation always waits for the context to timeout. + err := vsm.VStream(sctx, tabletType, vgtid, nil, nil, func(events []*binlogdatapb.VEvent) error { + require.Fail(t, "unexpected event", "Received unexpected events: %v", events) + return nil + }) + if tc.wantErr != "" { // Otherwise we simply expect the context to timeout + if !strings.Contains(logger.String(), tc.wantErr) { + require.Fail(t, "unexpected vstream error", "vstream ended with error: %v, which did not contain: %s", err, tc.wantErr) + } + } + }() + if tc.wantErr != "" { + source.SetStreamHealthResponse(tc.hcRes) + } + <-done + logger.Clear() + }) + } +} + func newTestVStreamManager(ctx context.Context, hc discovery.HealthCheck, serv srvtopo.Server, cell string) *vstreamManager { gw := NewTabletGateway(ctx, hc, serv, cell) srvResolver := srvtopo.NewResolver(serv, gw, cell) diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index b66ea93226e..8f8aa8b0061 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -30,6 +30,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" @@ -94,13 +96,13 @@ var ( enableOnlineDDL = true enableDirectDDL = true - // vtgate schema tracking flags + // schema tracking flags enableSchemaChangeSignal = true - - queryTimeout int + enableViews bool + enableUdfs bool // vtgate views flags - enableViews bool + queryTimeout int // queryLogToFile controls whether query logs are sent to a file queryLogToFile string @@ -147,20 +149,13 @@ func registerFlags(fs *pflag.FlagSet) { fs.IntVar(&queryLogBufferSize, "querylog-buffer-size", queryLogBufferSize, "Maximum number of buffered query logs before throttling log output") fs.DurationVar(&messageStreamGracePeriod, "message_stream_grace_period", messageStreamGracePeriod, "the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent.") fs.BoolVar(&enableViews, "enable-views", enableViews, "Enable views support in vtgate.") + fs.BoolVar(&enableUdfs, "track-udfs", enableUdfs, "Track UDFs in vtgate.") fs.BoolVar(&allowKillStmt, "allow-kill-statement", allowKillStmt, "Allows the execution of kill statement") fs.IntVar(&warmingReadsPercent, "warming-reads-percent", 0, "Percentage of reads on the primary to forward to replicas. Useful for keeping buffer pools warm") fs.IntVar(&warmingReadsConcurrency, "warming-reads-concurrency", 500, "Number of concurrent warming reads allowed") fs.DurationVar(&warmingReadsQueryTimeout, "warming-reads-query-timeout", 5*time.Second, "Timeout of warming read queries") - - _ = fs.String("schema_change_signal_user", "", "User to be used to send down query to vttablet to retrieve schema changes") - _ = fs.MarkDeprecated("schema_change_signal_user", "schema tracking uses an internal api and does not require a user to be specified") - - fs.Int64("gate_query_cache_size", 0, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache.") - _ = fs.MarkDeprecated("gate_query_cache_size", "`--gate_query_cache_size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported.") - - fs.Bool("gate_query_cache_lfu", false, "gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") - _ = fs.MarkDeprecated("gate_query_cache_lfu", "`--gate_query_cache_lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now.") } + func init() { servenv.OnParseFor("vtgate", registerFlags) servenv.OnParseFor("vtcombo", registerFlags) @@ -193,12 +188,12 @@ var ( // Error counters should be global so they can be set from anywhere errorCounts = stats.NewCountersWithMultiLabels("VtgateApiErrorCounts", "Vtgate API error counts per error type", []string{"Operation", "Keyspace", "DbType", "Code"}) - warnings = stats.NewCountersWithSingleLabel("VtGateWarnings", "Vtgate warnings", "type", "IgnoredSet", "ResultsExceeded", "WarnPayloadSizeExceeded") + warnings = stats.NewCountersWithSingleLabel("VtGateWarnings", "Vtgate warnings", "type", "IgnoredSet", "NonAtomicCommit", "ResultsExceeded", "WarnPayloadSizeExceeded", "WarnUnshardedOnly") vstreamSkewDelayCount = stats.NewCounter("VStreamEventsDelayedBySkewAlignment", "Number of events that had to wait because the skew across shards was too high") - vindexUnknownParams = stats.NewGauge("VindexUnknownParameters", "Number of parameterss unrecognized by Vindexes") + vindexUnknownParams = stats.NewGauge("VindexUnknownParameters", "Number of parameters unrecognized by Vindexes") timings = stats.NewMultiTimings( "VtgateApi", @@ -249,6 +244,7 @@ var RegisterVTGates []RegisterVTGate // Init initializes VTGate server. func Init( ctx context.Context, + env *vtenv.Environment, hc discovery.HealthCheck, serv srvtopo.Server, cell string, @@ -307,7 +303,7 @@ func Init( var si SchemaInfo // default nil var st *vtschema.Tracker if enableSchemaChangeSignal { - st = vtschema.NewTracker(gw.hc.Subscribe(), enableViews) + st = vtschema.NewTracker(gw.hc.Subscribe(), enableViews, enableUdfs, env.Parser()) addKeyspacesToTracker(ctx, srvResolver, st, gw) si = st } @@ -316,6 +312,7 @@ func Init( executor := NewExecutor( ctx, + env, serv, cell, resolver, @@ -358,8 +355,10 @@ func Init( st.Start() } srv := initMySQLProtocol(vtgateInst) - servenv.OnTermSync(srv.shutdownMysqlProtocolAndDrain) - servenv.OnClose(srv.rollbackAtShutdown) + if srv != nil { + servenv.OnTermSync(srv.shutdownMysqlProtocolAndDrain) + servenv.OnClose(srv.rollbackAtShutdown) + } }) servenv.OnTerm(func() { if st != nil && enableSchemaChangeSignal { @@ -468,7 +467,7 @@ func (vtg *VTGate) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConn "BindVariables": bindVariables, "Session": session, } - err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute, vtg.executor.vm.parser) return session, nil, err } @@ -534,7 +533,7 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MyS "BindVariables": bindVariables, "Session": session, } - return safeSession.Session, recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute) + return safeSession.Session, recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute, vtg.executor.vm.parser) } return safeSession.Session, nil } @@ -574,7 +573,7 @@ handleError: "BindVariables": bindVariables, "Session": session, } - err = recordAndAnnotateError(err, statsKey, query, vtg.logPrepare) + err = recordAndAnnotateError(err, statsKey, query, vtg.logPrepare, vtg.executor.vm.parser) return session, nil, err } @@ -593,7 +592,7 @@ func (vtg *VTGate) VSchemaStats() *VSchemaStats { return vtg.executor.VSchemaStats() } -func truncateErrorStrings(data map[string]any) map[string]any { +func truncateErrorStrings(data map[string]any, parser *sqlparser.Parser) map[string]any { ret := map[string]any{} if terseErrors { // request might have PII information. Return an empty map @@ -602,16 +601,16 @@ func truncateErrorStrings(data map[string]any) map[string]any { for key, val := range data { mapVal, ok := val.(map[string]any) if ok { - ret[key] = truncateErrorStrings(mapVal) + ret[key] = truncateErrorStrings(mapVal, parser) } else { strVal := fmt.Sprintf("%v", val) - ret[key] = sqlparser.TruncateForLog(strVal) + ret[key] = parser.TruncateForLog(strVal) } } return ret } -func recordAndAnnotateError(err error, statsKey []string, request map[string]any, logger *logutil.ThrottledLogger) error { +func recordAndAnnotateError(err error, statsKey []string, request map[string]any, logger *logutil.ThrottledLogger, parser *sqlparser.Parser) error { ec := vterrors.Code(err) fullKey := []string{ statsKey[0], @@ -627,7 +626,7 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]any } // Traverse the request structure and truncate any long values - request = truncateErrorStrings(request) + request = truncateErrorStrings(request, parser) errorCounts.Add(fullKey, 1) @@ -642,7 +641,7 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]any if !exists { return err } - piiSafeSQL, err2 := sqlparser.RedactSQLQuery(sql.(string)) + piiSafeSQL, err2 := parser.RedactSQLQuery(sql.(string)) if err2 != nil { return err } diff --git a/go/vt/vthash/highway/highwayhash_test.go b/go/vt/vthash/highway/highwayhash_test.go index 896b6d13763..360b0bfce08 100644 --- a/go/vt/vthash/highway/highwayhash_test.go +++ b/go/vt/vthash/highway/highwayhash_test.go @@ -21,8 +21,8 @@ package highway import ( "bytes" + "crypto/rand" "encoding/hex" - "math/rand" "runtime" "sync/atomic" "testing" @@ -203,11 +203,10 @@ func benchmarkParallel(b *testing.B, size int) { var key [32]byte - rng := rand.New(rand.NewSource(0xabadc0cac01a)) data := make([][]byte, c) for i := range data { data[i] = make([]byte, size) - rng.Read(data[i]) + _, _ = rand.Read(data[i]) } b.SetBytes(int64(size)) diff --git a/go/vt/vtorc/collection/collection.go b/go/vt/vtorc/collection/collection.go index 0ef9a71b9a3..753e24fd2c1 100644 --- a/go/vt/vtorc/collection/collection.go +++ b/go/vt/vtorc/collection/collection.go @@ -128,13 +128,6 @@ func (c *Collection) SetExpirePeriod(duration time.Duration) { c.expirePeriod = duration } -// ExpirePeriod returns the currently configured expiration period -func (c *Collection) ExpirePeriod() time.Duration { - c.Lock() - defer c.Unlock() - return c.expirePeriod -} - // StopAutoExpiration prepares to stop by terminating the auto-expiration process func (c *Collection) StopAutoExpiration() { if c == nil { @@ -181,20 +174,6 @@ func (c *Collection) StartAutoExpiration() { } } -// Metrics returns a slice containing all the metric values -func (c *Collection) Metrics() []Metric { - if c == nil { - return nil - } - c.Lock() - defer c.Unlock() - - if len(c.collection) == 0 { - return nil // nothing to return - } - return c.collection -} - // Since returns the Metrics on or after the given time. We assume // the metrics are stored in ascending time. // Iterate backwards until we reach the first value before the given time @@ -260,8 +239,8 @@ func (c *Collection) removeBefore(t time.Time) error { // get the interval we need. if first == len(c.collection) { c.collection = nil // remove all entries - } else if first != -1 { - c.collection = c.collection[first:] + } else { + c.collection = c.collection[first+1:] } return nil // no errors } diff --git a/go/vt/vtorc/collection/collection_test.go b/go/vt/vtorc/collection/collection_test.go index 23679245c26..9a336f970f8 100644 --- a/go/vt/vtorc/collection/collection_test.go +++ b/go/vt/vtorc/collection/collection_test.go @@ -19,6 +19,8 @@ package collection import ( "testing" "time" + + "github.com/stretchr/testify/assert" ) var randomString = []string{ @@ -28,6 +30,7 @@ var randomString = []string{ // some random base timestamp var ts = time.Date(2016, 12, 27, 13, 36, 40, 0, time.Local) +var ts2 = ts.AddDate(-1, 0, 0) // TestCreateOrReturn tests the creation of a named Collection func TestCreateOrReturnCollection(t *testing.T) { @@ -58,27 +61,6 @@ func TestCreateOrReturnCollection(t *testing.T) { } } -// TestExpirePeriod checks that the set expire period is returned -func TestExpirePeriod(t *testing.T) { - oneSecond := time.Second - twoSeconds := 2 * oneSecond - - // create a new collection - c := &Collection{} - - // check if we change it we get back the value we provided - c.SetExpirePeriod(oneSecond) - if c.ExpirePeriod() != oneSecond { - t.Errorf("TestExpirePeriod: did not get back oneSecond") - } - - // change the period and check again - c.SetExpirePeriod(twoSeconds) - if c.ExpirePeriod() != twoSeconds { - t.Errorf("TestExpirePeriod: did not get back twoSeconds") - } -} - // dummy structure for testing type testMetric struct { } @@ -87,18 +69,127 @@ func (tm *testMetric) When() time.Time { return ts } +type testMetric2 struct { +} + +func (tm *testMetric2) When() time.Time { + return ts2 +} + // check that Append() works as expected func TestAppend(t *testing.T) { c := &Collection{} + // Test for nil metric + err := c.Append(nil) + assert.Error(t, err) + assert.Equal(t, err.Error(), "Collection.Append: m == nil") +} - if len(c.Metrics()) != 0 { - t.Errorf("TestAppend: len(Metrics) = %d, expecting %d", len(c.Metrics()), 0) - } - for _, v := range []int{1, 2, 3} { - tm := &testMetric{} - _ = c.Append(tm) - if len(c.Metrics()) != v { - t.Errorf("TestExpirePeriod: len(Metrics) = %d, expecting %d", len(c.Metrics()), v) - } - } +func TestNilCollection(t *testing.T) { + var c *Collection + + err := c.Append(nil) + assert.Error(t, err) + assert.Equal(t, err.Error(), "Collection.Append: c == nil") + + err = c.removeBefore(ts) + assert.Error(t, err) + assert.Equal(t, err.Error(), "Collection.removeBefore: c == nil") + + // Should not throw any error for nil Collection + c.StartAutoExpiration() + c.StopAutoExpiration() +} + +func TestStopAutoExpiration(t *testing.T) { + oldNamedCollection := namedCollection + defer func() { + namedCollection = oldNamedCollection + }() + // Clear Collection map + namedCollection = make(map[string]*Collection) + + name := randomString[0] + c := CreateOrReturnCollection(name) + + c.StopAutoExpiration() + assert.False(t, c.monitoring) + + // Test when c.monitoring == true before calling StartAutoExpiration + c.monitoring = true + c.StartAutoExpiration() + assert.True(t, c.monitoring) +} + +func TestSince(t *testing.T) { + oldNamedCollection := namedCollection + defer func() { + namedCollection = oldNamedCollection + }() + // Clear Collection map + namedCollection = make(map[string]*Collection) + + name := randomString[0] + + var c *Collection + metrics, err := c.Since(ts) + + assert.Nil(t, metrics) + assert.Error(t, err) + assert.Equal(t, err.Error(), "Collection.Since: c == nil") + + c = CreateOrReturnCollection(name) + metrics, err = c.Since(ts) + assert.Nil(t, metrics) + assert.Nil(t, err) + + tm := &testMetric{} + tm2 := &testMetric2{} + _ = c.Append(tm2) + _ = c.Append(tm) + + metrics, err = c.Since(ts2) + assert.Equal(t, []Metric{tm2, tm}, metrics) + assert.Nil(t, err) + + metrics, err = c.Since(ts) + assert.Equal(t, []Metric{tm}, metrics) + assert.Nil(t, err) +} + +func TestRemoveBefore(t *testing.T) { + oldNamedCollection := namedCollection + defer func() { + namedCollection = oldNamedCollection + }() + // Clear Collection map + namedCollection = make(map[string]*Collection) + + name := randomString[0] + c := CreateOrReturnCollection(name) + + tm := &testMetric{} + tm2 := &testMetric2{} + + err := c.Append(tm2) + assert.Nil(t, err) + + err = c.Append(tm) + assert.Nil(t, err) + + err = c.removeBefore(ts) + assert.NoError(t, err) + assert.Equal(t, []Metric{tm}, c.collection) + + ts3 := ts.AddDate(1, 0, 0) + err = c.removeBefore(ts3) + assert.NoError(t, err) + assert.Nil(t, c.collection) + + name = randomString[1] + c = CreateOrReturnCollection(name) + + err = c.removeBefore(ts) + assert.NoError(t, err) + assert.Equal(t, []Metric(nil), c.collection) } diff --git a/go/vt/vtorc/config/config.go b/go/vt/vtorc/config/config.go index 83a39303acb..2d21e377cb6 100644 --- a/go/vt/vtorc/config/config.go +++ b/go/vt/vtorc/config/config.go @@ -27,15 +27,10 @@ import ( "vitess.io/vitess/go/vt/log" ) -const ( - LostInRecoveryDowntimeSeconds int = 60 * 60 * 24 * 365 -) - var configurationLoaded = make(chan bool) const ( HealthPollSeconds = 1 - ActiveNodeExpireSeconds = 5 AuditPageSize = 20 DebugMetricsIntervalSeconds = 10 StaleInstanceCoordinatesExpireSeconds = 60 @@ -44,7 +39,6 @@ const ( DiscoveryQueueMaxStatisticsSize = 120 DiscoveryCollectionRetentionSeconds = 120 UnseenInstanceForgetHours = 240 // Number of hours after which an unseen instance is forgotten - FailureDetectionPeriodBlockMinutes = 60 // The time for which an instance's failure discovery is kept "active", so as to avoid concurrent "discoveries" of the instance's failure; this preceeds any recovery process, if any. ) var ( @@ -59,6 +53,7 @@ var ( recoveryPeriodBlockDuration = 30 * time.Second preventCrossCellFailover = false waitReplicasTimeout = 30 * time.Second + tolerableReplicationLag = 0 * time.Second topoInformationRefreshDuration = 15 * time.Second recoveryPollDuration = 1 * time.Second ersEnabled = true @@ -76,8 +71,10 @@ func RegisterFlags(fs *pflag.FlagSet) { fs.BoolVar(&auditToSyslog, "audit-to-syslog", auditToSyslog, "Whether to store the audit log in the syslog") fs.DurationVar(&auditPurgeDuration, "audit-purge-duration", auditPurgeDuration, "Duration for which audit logs are held before being purged. Should be in multiples of days") fs.DurationVar(&recoveryPeriodBlockDuration, "recovery-period-block-duration", recoveryPeriodBlockDuration, "Duration for which a new recovery is blocked on an instance after running a recovery") + fs.MarkDeprecated("recovery-period-block-duration", "As of v20 this is ignored and will be removed in a future release.") fs.BoolVar(&preventCrossCellFailover, "prevent-cross-cell-failover", preventCrossCellFailover, "Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover") fs.DurationVar(&waitReplicasTimeout, "wait-replicas-timeout", waitReplicasTimeout, "Duration for which to wait for replica's to respond when issuing RPCs") + fs.DurationVar(&tolerableReplicationLag, "tolerable-replication-lag", tolerableReplicationLag, "Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS") fs.DurationVar(&topoInformationRefreshDuration, "topo-information-refresh-duration", topoInformationRefreshDuration, "Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topology server") fs.DurationVar(&recoveryPollDuration, "recovery-poll-duration", recoveryPollDuration, "Timer duration on which VTOrc polls its database to run a recovery") fs.BoolVar(&ersEnabled, "allow-emergency-reparent", ersEnabled, "Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary") @@ -85,7 +82,7 @@ func RegisterFlags(fs *pflag.FlagSet) { } // Configuration makes for vtorc configuration input, which can be provided by user via JSON formatted file. -// Some of the parameteres have reasonable default values, and some (like database credentials) are +// Some of the parameters have reasonable default values, and some (like database credentials) are // strictly expected from user. // TODO(sougou): change this to yaml parsing, and possible merge with tabletenv. type Configuration struct { @@ -100,6 +97,7 @@ type Configuration struct { RecoveryPeriodBlockSeconds int // (overrides `RecoveryPeriodBlockMinutes`) The time for which an instance's recovery is kept "active", so as to avoid concurrent recoveries on smae instance as well as flapping PreventCrossDataCenterPrimaryFailover bool // When true (default: false), cross-DC primary failover are not allowed, vtorc will do all it can to only fail over within same DC, or else not fail over at all. WaitReplicasTimeoutSeconds int // Timeout on amount of time to wait for the replicas in case of ERS. Should be a small value because we should fail-fast. Should not be larger than LockTimeout since that is the total time we use for an ERS. + TolerableReplicationLagSeconds int // Amount of replication lag that is considered acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary in PRS. TopoInformationRefreshSeconds int // Timer duration on which VTOrc refreshes the keyspace and vttablet records from the topo-server. RecoveryPollSeconds int // Timer duration on which VTOrc recovery analysis runs } @@ -129,6 +127,7 @@ func UpdateConfigValuesFromFlags() { Config.RecoveryPeriodBlockSeconds = int(recoveryPeriodBlockDuration / time.Second) Config.PreventCrossDataCenterPrimaryFailover = preventCrossCellFailover Config.WaitReplicasTimeoutSeconds = int(waitReplicasTimeout / time.Second) + Config.TolerableReplicationLagSeconds = int(tolerableReplicationLag / time.Second) Config.TopoInformationRefreshSeconds = int(topoInformationRefreshDuration / time.Second) Config.RecoveryPollSeconds = int(recoveryPollDuration / time.Second) } diff --git a/go/vt/vtorc/db/db.go b/go/vt/vtorc/db/db.go index d565c9bbdc4..00f5b5b2550 100644 --- a/go/vt/vtorc/db/db.go +++ b/go/vt/vtorc/db/db.go @@ -42,17 +42,6 @@ func (m *vtorcDB) QueryVTOrc(query string, argsArray []any, onRow func(sqlutils. return QueryVTOrc(query, argsArray, onRow) } -type DummySQLResult struct { -} - -func (dummyRes DummySQLResult) LastInsertId() (int64, error) { - return 0, nil -} - -func (dummyRes DummySQLResult) RowsAffected() (int64, error) { - return 1, nil -} - // OpenTopology returns the DB instance for the vtorc backed database func OpenVTOrc() (db *sql.DB, err error) { var fromCache bool @@ -88,7 +77,7 @@ func registerVTOrcDeployment(db *sql.DB) error { } // deployStatements will issue given sql queries that are not already known to be deployed. -// This iterates both lists (to-run and already-deployed) and also verifies no contraditions. +// This iterates both lists (to-run and already-deployed) and also verifies no contradictions. func deployStatements(db *sql.DB, queries []string) error { tx, err := db.Begin() if err != nil { diff --git a/go/vt/vtorc/db/generate_base.go b/go/vt/vtorc/db/generate_base.go index 73238802920..f997dc6ac0a 100644 --- a/go/vt/vtorc/db/generate_base.go +++ b/go/vt/vtorc/db/generate_base.go @@ -16,6 +16,24 @@ package db +var TableNames = []string{ + "database_instance", + "audit", + "node_health", + "topology_recovery", + "database_instance_topology_history", + "recovery_detection", + "database_instance_last_analysis", + "database_instance_analysis_changelog", + "vtorc_db_deployments", + "global_recovery_disable", + "topology_recovery_steps", + "database_instance_stale_binlog_coordinates", + "vitess_tablet", + "vitess_keyspace", + "vitess_shard", +} + // vtorcBackend is a list of SQL statements required to build the vtorc backend var vtorcBackend = []string{ ` @@ -37,6 +55,8 @@ CREATE TABLE database_instance ( binary_log_pos bigint NOT NULL, source_host varchar(128) NOT NULL, source_port smallint NOT NULL, + replica_net_timeout int NOT NULL, + heartbeat_interval decimal(11,4) NOT NULL, replica_sql_running tinyint NOT NULL, replica_io_running tinyint NOT NULL, source_log_file varchar(128) NOT NULL, @@ -116,32 +136,11 @@ CREATE INDEX audit_timestamp_idx_audit ON audit (audit_timestamp) CREATE INDEX alias_idx_audit ON audit (alias, audit_timestamp) `, ` -DROP TABLE IF EXISTS active_node -`, - ` -CREATE TABLE active_node ( - anchor tinyint NOT NULL, - hostname varchar(128) NOT NULL, - token varchar(128) NOT NULL, - last_seen_active timestamp not null default (''), - first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - PRIMARY KEY (anchor) -)`, - ` DROP TABLE IF EXISTS node_health `, ` CREATE TABLE node_health ( - hostname varchar(128) NOT NULL, - token varchar(128) NOT NULL, - last_seen_active timestamp not null default (''), - extra_info varchar(128) not null default '', - command varchar(128) not null default '', - app_version varchar(64) NOT NULL DEFAULT "", - first_seen_active timestamp NOT NULL DEFAULT '1971-01-01 00:00:00', - db_backend varchar(255) NOT NULL DEFAULT "", - incrementing_indicator bigint not null default 0, - PRIMARY KEY (hostname, token) + last_seen_active timestamp not null default ('') )`, ` DROP TABLE IF EXISTS topology_recovery @@ -150,35 +149,19 @@ DROP TABLE IF EXISTS topology_recovery CREATE TABLE topology_recovery ( recovery_id integer, alias varchar(256) NOT NULL, - in_active_period tinyint NOT NULL DEFAULT 0, - start_active_period timestamp not null default (''), - end_active_period_unixtime int, + start_recovery timestamp NOT NULL DEFAULT (''), end_recovery timestamp NULL DEFAULT NULL, - processing_node_hostname varchar(128) NOT NULL, - processcing_node_token varchar(128) NOT NULL, successor_alias varchar(256) DEFAULT NULL, analysis varchar(128) not null default '', keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, - count_affected_replicas int not null default 0, is_successful TINYint NOT NULL DEFAULT 0, - acknowledged TINYint NOT NULL DEFAULT 0, - acknowledged_by varchar(128) not null default '', - acknowledge_comment text not null default '', all_errors text not null default '', - acknowledged_at TIMESTAMP NULL, - last_detection_id bigint not null default 0, - uid varchar(128) not null default '', + detection_id bigint not null default 0, PRIMARY KEY (recovery_id) )`, ` -CREATE INDEX in_active_start_period_idx_topology_recovery ON topology_recovery (in_active_period, start_active_period) - `, - ` -CREATE INDEX start_active_period_idx_topology_recovery ON topology_recovery (start_active_period) - `, - ` -CREATE UNIQUE INDEX alias_active_period_uidx_topology_recovery ON topology_recovery (alias, in_active_period, end_active_period_unixtime) +CREATE INDEX start_recovery_idx_topology_recovery ON topology_recovery (start_recovery) `, ` DROP TABLE IF EXISTS database_instance_topology_history @@ -200,58 +183,19 @@ CREATE TABLE database_instance_topology_history ( CREATE INDEX keyspace_shard_idx_database_instance_topology_history ON database_instance_topology_history (snapshot_unix_timestamp, keyspace, shard) `, ` -DROP TABLE IF EXISTS candidate_database_instance -`, - ` -CREATE TABLE candidate_database_instance ( - alias varchar(256) NOT NULL, - last_suggested timestamp not null default (''), - priority TINYINT SIGNED NOT NULL DEFAULT 1, - promotion_rule text check(promotion_rule in ('must', 'prefer', 'neutral', 'prefer_not', 'must_not')) NOT NULL DEFAULT 'neutral', - PRIMARY KEY (alias) -)`, - ` -CREATE INDEX last_suggested_idx_candidate_database_instance ON candidate_database_instance (last_suggested) - `, - ` -DROP TABLE IF EXISTS topology_failure_detection +DROP TABLE IF EXISTS recovery_detection `, ` -CREATE TABLE topology_failure_detection ( +CREATE TABLE recovery_detection ( detection_id integer, alias varchar(256) NOT NULL, - in_active_period tinyint NOT NULL DEFAULT '0', - start_active_period timestamp not null default (''), - end_active_period_unixtime int NOT NULL, - processing_node_hostname varchar(128) NOT NULL, - processcing_node_token varchar(128) NOT NULL, analysis varchar(128) NOT NULL, keyspace varchar(128) NOT NULL, shard varchar(128) NOT NULL, - count_affected_replicas int NOT NULL, - is_actionable tinyint not null default 0, + detection_timestamp timestamp NOT NULL default (''), PRIMARY KEY (detection_id) )`, ` -CREATE INDEX in_active_start_period_idx_topology_failure_detection ON topology_failure_detection (in_active_period, start_active_period) - `, - ` -DROP TABLE IF EXISTS blocked_topology_recovery -`, - ` -CREATE TABLE blocked_topology_recovery ( - alias varchar(256) NOT NULL, - keyspace varchar(128) NOT NULL, - shard varchar(128) NOT NULL, - analysis varchar(128) NOT NULL, - last_blocked_timestamp timestamp not null default (''), - blocking_recovery_id bigint, - PRIMARY KEY (alias) -)`, - ` -CREATE INDEX keyspace_shard_blocked_idx_blocked_topology_recovery ON blocked_topology_recovery (keyspace, shard, last_blocked_timestamp) - `, - ` DROP TABLE IF EXISTS database_instance_last_analysis `, ` @@ -279,26 +223,6 @@ CREATE TABLE database_instance_analysis_changelog ( CREATE INDEX analysis_timestamp_idx_database_instance_analysis_changelog ON database_instance_analysis_changelog (analysis_timestamp) `, ` -DROP TABLE IF EXISTS node_health_history -`, - ` -CREATE TABLE node_health_history ( - history_id integer, - hostname varchar(128) NOT NULL, - token varchar(128) NOT NULL, - first_seen_active timestamp NOT NULL, - extra_info varchar(128) NOT NULL, - command varchar(128) not null default '', - app_version varchar(64) NOT NULL DEFAULT "", - PRIMARY KEY (history_id) -)`, - ` -CREATE INDEX first_seen_active_idx_node_health_history ON node_health_history (first_seen_active) - `, - ` -CREATE UNIQUE INDEX hostname_token_idx_node_health_history ON node_health_history (hostname, token) - `, - ` DROP TABLE IF EXISTS vtorc_db_deployments `, ` @@ -321,7 +245,7 @@ DROP TABLE IF EXISTS topology_recovery_steps ` CREATE TABLE topology_recovery_steps ( recovery_step_id integer, - recovery_uid varchar(128) NOT NULL, + recovery_id integer NOT NULL, audit_at timestamp not null default (''), message text NOT NULL, PRIMARY KEY (recovery_step_id) @@ -387,33 +311,18 @@ CREATE TABLE vitess_shard ( CREATE INDEX source_host_port_idx_database_instance_database_instance on database_instance (source_host, source_port) `, ` -CREATE INDEX keyspace_shard_in_active_idx_topology_recovery on topology_recovery (keyspace, shard, in_active_period) +CREATE INDEX keyspace_shard_idx_topology_recovery on topology_recovery (keyspace, shard) `, ` CREATE INDEX end_recovery_idx_topology_recovery on topology_recovery (end_recovery) `, ` -CREATE INDEX acknowledged_idx_topology_recovery on topology_recovery (acknowledged, acknowledged_at) - `, - ` -CREATE INDEX last_blocked_idx_blocked_topology_recovery on blocked_topology_recovery (last_blocked_timestamp) - `, - ` CREATE INDEX instance_timestamp_idx_database_instance_analysis_changelog on database_instance_analysis_changelog (alias, analysis_timestamp) `, ` -CREATE INDEX last_detection_idx_topology_recovery on topology_recovery (last_detection_id) - `, - ` -CREATE INDEX last_seen_active_idx_node_health on node_health (last_seen_active) - `, - ` -CREATE INDEX uid_idx_topology_recovery ON topology_recovery(uid) - `, - ` -CREATE INDEX recovery_uid_idx_topology_recovery_steps ON topology_recovery_steps(recovery_uid) +CREATE INDEX detection_idx_topology_recovery on topology_recovery (detection_id) `, ` -CREATE UNIQUE INDEX alias_active_recoverable_uidx_topology_failure_detection ON topology_failure_detection (alias, in_active_period, end_active_period_unixtime, is_actionable) +CREATE INDEX recovery_id_idx_topology_recovery_steps ON topology_recovery_steps(recovery_id) `, } diff --git a/go/vt/vtorc/discovery/funcs.go b/go/vt/vtorc/discovery/funcs.go index e468d10a420..eeafe2e20a4 100644 --- a/go/vt/vtorc/discovery/funcs.go +++ b/go/vt/vtorc/discovery/funcs.go @@ -47,15 +47,6 @@ func max(values stats.Float64Data) float64 { return s } -// internal routine to return the minimum value or 9e9 -func min(values stats.Float64Data) float64 { - s, err := stats.Min(values) - if err != nil { - return 9e9 // a large number (should use something better than this but it's ok for now) - } - return s -} - // internal routine to return the median or 0 func median(values stats.Float64Data) float64 { s, err := stats.Median(values) diff --git a/go/vt/vtorc/discovery/queue_aggregated_stats.go b/go/vt/vtorc/discovery/queue_aggregated_stats.go deleted file mode 100644 index 79f2e310a58..00000000000 --- a/go/vt/vtorc/discovery/queue_aggregated_stats.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - Copyright 2017 Simon J Mudd - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package discovery - -import ( - "github.com/montanaflynn/stats" - - "vitess.io/vitess/go/vt/log" -) - -// AggregatedQueueMetrics contains aggregate information some part queue metrics -type AggregatedQueueMetrics struct { - ActiveMinEntries float64 - ActiveMeanEntries float64 - ActiveMedianEntries float64 - ActiveP95Entries float64 - ActiveMaxEntries float64 - QueuedMinEntries float64 - QueuedMeanEntries float64 - QueuedMedianEntries float64 - QueuedP95Entries float64 - QueuedMaxEntries float64 -} - -// we pull out values in ints so convert to float64 for metric calculations -func intSliceToFloat64Slice(someInts []int) stats.Float64Data { - var slice stats.Float64Data - - for _, v := range someInts { - slice = append(slice, float64(v)) - } - - return slice -} - -// DiscoveryQueueMetrics returns some raw queue metrics based on the -// period (last N entries) requested. -func (q *Queue) DiscoveryQueueMetrics(period int) []QueueMetric { - q.Lock() - defer q.Unlock() - - // adjust period in case we ask for something that's too long - if period > len(q.metrics) { - log.Infof("DiscoveryQueueMetrics: wanted: %d, adjusting period to %d", period, len(q.metrics)) - period = len(q.metrics) - } - - a := q.metrics[len(q.metrics)-period:] - log.Infof("DiscoveryQueueMetrics: returning values: %+v", a) - return a -} - -// AggregatedDiscoveryQueueMetrics Returns some aggregate statistics -// based on the period (last N entries) requested. We store up to -// config.Config.DiscoveryQueueMaxStatisticsSize values and collect once -// a second so we expect period to be a smaller value. -func (q *Queue) AggregatedDiscoveryQueueMetrics(period int) *AggregatedQueueMetrics { - wanted := q.DiscoveryQueueMetrics(period) - - var activeEntries, queuedEntries []int - // fill vars - for i := range wanted { - activeEntries = append(activeEntries, wanted[i].Active) - queuedEntries = append(queuedEntries, wanted[i].Queued) - } - - a := &AggregatedQueueMetrics{ - ActiveMinEntries: min(intSliceToFloat64Slice(activeEntries)), - ActiveMeanEntries: mean(intSliceToFloat64Slice(activeEntries)), - ActiveMedianEntries: median(intSliceToFloat64Slice(activeEntries)), - ActiveP95Entries: percentile(intSliceToFloat64Slice(activeEntries), 95), - ActiveMaxEntries: max(intSliceToFloat64Slice(activeEntries)), - QueuedMinEntries: min(intSliceToFloat64Slice(queuedEntries)), - QueuedMeanEntries: mean(intSliceToFloat64Slice(queuedEntries)), - QueuedMedianEntries: median(intSliceToFloat64Slice(queuedEntries)), - QueuedP95Entries: percentile(intSliceToFloat64Slice(queuedEntries), 95), - QueuedMaxEntries: max(intSliceToFloat64Slice(queuedEntries)), - } - log.Infof("AggregatedDiscoveryQueueMetrics: returning values: %+v", a) - return a -} diff --git a/go/vt/vtorc/inst/analysis.go b/go/vt/vtorc/inst/analysis.go index 54500621cb9..66d6c6dd9ce 100644 --- a/go/vt/vtorc/inst/analysis.go +++ b/go/vt/vtorc/inst/analysis.go @@ -46,6 +46,7 @@ const ( ReplicationStopped AnalysisCode = "ReplicationStopped" ReplicaSemiSyncMustBeSet AnalysisCode = "ReplicaSemiSyncMustBeSet" ReplicaSemiSyncMustNotBeSet AnalysisCode = "ReplicaSemiSyncMustNotBeSet" + ReplicaMisconfigured AnalysisCode = "ReplicaMisconfigured" UnreachablePrimaryWithLaggingReplicas AnalysisCode = "UnreachablePrimaryWithLaggingReplicas" UnreachablePrimary AnalysisCode = "UnreachablePrimary" PrimarySingleReplicaNotReplicating AnalysisCode = "PrimarySingleReplicaNotReplicating" @@ -54,9 +55,6 @@ const ( AllPrimaryReplicasNotReplicatingOrDead AnalysisCode = "AllPrimaryReplicasNotReplicatingOrDead" LockedSemiSyncPrimaryHypothesis AnalysisCode = "LockedSemiSyncPrimaryHypothesis" LockedSemiSyncPrimary AnalysisCode = "LockedSemiSyncPrimary" - PrimaryWithoutReplicas AnalysisCode = "PrimaryWithoutReplicas" - BinlogServerFailingToConnectToPrimary AnalysisCode = "BinlogServerFailingToConnectToPrimary" - GraceFulPrimaryTakeover AnalysisCode = "GracefulPrimaryTakeover" ErrantGTIDDetected AnalysisCode = "ErrantGTIDDetected" ) @@ -83,48 +81,32 @@ type ReplicationAnalysisHints struct { AuditAnalysis bool } -type AnalysisInstanceType string - -const ( - AnalysisInstanceTypePrimary AnalysisInstanceType = "primary" - AnalysisInstanceTypeCoPrimary AnalysisInstanceType = "co-primary" - AnalysisInstanceTypeIntermediatePrimary AnalysisInstanceType = "intermediate-primary" -) - // ReplicationAnalysis notes analysis on replication chain status, per instance type ReplicationAnalysis struct { - AnalyzedInstanceHostname string - AnalyzedInstancePort int AnalyzedInstanceAlias string AnalyzedInstancePrimaryAlias string TabletType topodatapb.TabletType PrimaryTimeStamp time.Time ClusterDetails ClusterInfo - AnalyzedInstanceDataCenter string - AnalyzedInstanceRegion string AnalyzedKeyspace string AnalyzedShard string // ShardPrimaryTermTimestamp is the primary term start time stored in the shard record. ShardPrimaryTermTimestamp string - AnalyzedInstancePhysicalEnvironment string AnalyzedInstanceBinlogCoordinates BinlogCoordinates IsPrimary bool IsClusterPrimary bool - IsCoPrimary bool LastCheckValid bool LastCheckPartialSuccess bool CountReplicas uint CountValidReplicas uint CountValidReplicatingReplicas uint - CountReplicasFailingToConnectToPrimary uint - ReplicationDepth uint - IsFailingToConnectToPrimary bool ReplicationStopped bool ErrantGTID string + ReplicaNetTimeout int32 + HeartbeatInterval float64 Analysis AnalysisCode Description string StructureAnalysis []StructureAnalysisCode - IsBinlogServer bool OracleGTIDImmediateTopology bool MariaDBGTIDImmediateTopology bool BinlogServerImmediateTopology bool @@ -142,9 +124,7 @@ type ReplicationAnalysis struct { CountDelayedReplicas uint CountLaggingReplicas uint IsActionableRecovery bool - ProcessingNodeHostname string - ProcessingNodeToken string - StartActivePeriod string + RecoveryId int64 GTIDMode string MinReplicaGTIDMode string MaxReplicaGTIDMode string @@ -161,18 +141,6 @@ func (replicationAnalysis *ReplicationAnalysis) MarshalJSON() ([]byte, error) { return json.Marshal(i) } -// Get a string description of the analyzed instance type (primary? co-primary? intermediate-primary?) -func (replicationAnalysis *ReplicationAnalysis) GetAnalysisInstanceType() AnalysisInstanceType { - if replicationAnalysis.IsCoPrimary { - return AnalysisInstanceTypeCoPrimary - } - - if replicationAnalysis.IsPrimary { - return AnalysisInstanceTypePrimary - } - return AnalysisInstanceTypeIntermediatePrimary -} - // ValidSecondsFromSeenToLastAttemptedCheck returns the maximum allowed elapsed time // between last_attempted_check to last_checked before we consider the instance as invalid. func ValidSecondsFromSeenToLastAttemptedCheck() uint { diff --git a/go/vt/vtorc/inst/analysis_dao.go b/go/vt/vtorc/inst/analysis_dao.go index 25082f133da..b9bf1fba236 100644 --- a/go/vt/vtorc/inst/analysis_dao.go +++ b/go/vt/vtorc/inst/analysis_dao.go @@ -18,33 +18,30 @@ package inst import ( "fmt" + "math" "time" - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/topo/topoproto" - + "github.com/patrickmn/go-cache" "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/external/golib/sqlutils" + "vitess.io/vitess/go/vt/log" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" - "vitess.io/vitess/go/vt/vtorc/process" "vitess.io/vitess/go/vt/vtorc/util" - - "github.com/patrickmn/go-cache" - "github.com/rcrowley/go-metrics" ) -var analysisChangeWriteCounter = metrics.NewCounter() +// The metric is registered with a deprecated name. The old metric name can be removed in v21. +var analysisChangeWriteCounter = stats.NewCounterWithDeprecatedName("AnalysisChangeWrite", "analysis.change.write", "Number of times analysis has changed") var recentInstantAnalysis *cache.Cache func init() { - _ = metrics.Register("analysis.change.write", analysisChangeWriteCounter) - go initializeAnalysisDaoPostConfiguration() } @@ -76,8 +73,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna query := ` SELECT vitess_tablet.info AS tablet_info, - vitess_tablet.hostname, - vitess_tablet.port, vitess_tablet.tablet_type, vitess_tablet.primary_timestamp, vitess_tablet.shard AS shard, @@ -88,11 +83,10 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna primary_instance.read_only AS read_only, MIN(primary_instance.gtid_errant) AS gtid_errant, MIN(primary_instance.alias) IS NULL AS is_invalid, - MIN(primary_instance.data_center) AS data_center, - MIN(primary_instance.region) AS region, - MIN(primary_instance.physical_environment) AS physical_environment, MIN(primary_instance.binary_log_file) AS binary_log_file, MIN(primary_instance.binary_log_pos) AS binary_log_pos, + MIN(primary_instance.replica_net_timeout) AS replica_net_timeout, + MIN(primary_instance.heartbeat_interval) AS heartbeat_interval, MIN(primary_tablet.info) AS primary_tablet_info, MIN( IFNULL( @@ -116,7 +110,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna OR substr(primary_instance.source_host, 1, 2) = '//' ) ) AS is_primary, - MIN(primary_instance.is_co_primary) AS is_co_primary, MIN(primary_instance.gtid_mode) AS gtid_mode, COUNT(replica_instance.server_id) AS count_replicas, IFNULL( @@ -142,19 +135,10 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna ), 0 ) AS count_replicas_failing_to_connect_to_primary, - MIN(primary_instance.replication_depth) AS replication_depth, - MIN( - primary_instance.replica_sql_running = 1 - AND primary_instance.replica_io_running = 0 - AND primary_instance.last_io_error like '%%error %%connecting to master%%' - ) AS is_failing_to_connect_to_primary, MIN( primary_instance.replica_sql_running = 0 OR primary_instance.replica_io_running = 0 ) AS replication_stopped, - MIN( - primary_instance.binlog_server - ) AS is_binlog_server, MIN( primary_instance.supports_oracle_gtid ) AS supports_oracle_gtid, @@ -173,7 +157,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna MIN( primary_instance.semi_sync_replica_enabled ) AS semi_sync_replica_enabled, - SUM(replica_instance.is_co_primary) AS count_co_primary_replicas, SUM(replica_instance.oracle_gtid) AS count_oracle_gtid_replicas, IFNULL( SUM( @@ -302,9 +285,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna clusters := make(map[string]*clusterAnalysis) err := db.Db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { a := &ReplicationAnalysis{ - Analysis: NoProblem, - ProcessingNodeHostname: process.ThisHostname, - ProcessingNodeToken: util.ProcessToken.Hash, + Analysis: NoProblem, } tablet := &topodatapb.Tablet{} @@ -334,15 +315,8 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.ShardPrimaryTermTimestamp = m.GetString("shard_primary_term_timestamp") a.IsPrimary = m.GetBool("is_primary") - countCoPrimaryReplicas := m.GetUint("count_co_primary_replicas") - a.IsCoPrimary = m.GetBool("is_co_primary") || (countCoPrimaryReplicas > 0) - a.AnalyzedInstanceHostname = m.GetString("hostname") - a.AnalyzedInstancePort = m.GetInt("port") a.AnalyzedInstanceAlias = topoproto.TabletAliasString(tablet.Alias) a.AnalyzedInstancePrimaryAlias = topoproto.TabletAliasString(primaryTablet.Alias) - a.AnalyzedInstanceDataCenter = m.GetString("data_center") - a.AnalyzedInstanceRegion = m.GetString("region") - a.AnalyzedInstancePhysicalEnvironment = m.GetString("physical_environment") a.AnalyzedInstanceBinlogCoordinates = BinlogCoordinates{ LogFile: m.GetString("binary_log_file"), LogPos: m.GetUint32("binary_log_pos"), @@ -357,12 +331,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.CountReplicas = m.GetUint("count_replicas") a.CountValidReplicas = m.GetUint("count_valid_replicas") a.CountValidReplicatingReplicas = m.GetUint("count_valid_replicating_replicas") - a.CountReplicasFailingToConnectToPrimary = m.GetUint("count_replicas_failing_to_connect_to_primary") - a.ReplicationDepth = m.GetUint("replication_depth") - a.IsFailingToConnectToPrimary = m.GetBool("is_failing_to_connect_to_primary") a.ReplicationStopped = m.GetBool("replication_stopped") - a.IsBinlogServer = m.GetBool("is_binlog_server") - a.ClusterDetails.ReadRecoveryInfo() a.ErrantGTID = m.GetString("gtid_errant") countValidOracleGTIDReplicas := m.GetUint("count_valid_oracle_gtid_replicas") @@ -391,12 +360,14 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.CountDelayedReplicas = m.GetUint("count_delayed_replicas") a.CountLaggingReplicas = m.GetUint("count_lagging_replicas") + a.ReplicaNetTimeout = m.GetInt32("replica_net_timeout") + a.HeartbeatInterval = m.GetFloat64("heartbeat_interval") a.IsReadOnly = m.GetUint("read_only") == 1 if !a.LastCheckValid { - analysisMessage := fmt.Sprintf("analysis: Alias: %+v, Keyspace: %+v, Shard: %+v, IsPrimary: %+v, LastCheckValid: %+v, LastCheckPartialSuccess: %+v, CountReplicas: %+v, CountValidReplicas: %+v, CountValidReplicatingReplicas: %+v, CountLaggingReplicas: %+v, CountDelayedReplicas: %+v, CountReplicasFailingToConnectToPrimary: %+v", - a.AnalyzedInstanceAlias, a.ClusterDetails.Keyspace, a.ClusterDetails.Shard, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, a.CountReplicasFailingToConnectToPrimary, + analysisMessage := fmt.Sprintf("analysis: Alias: %+v, Keyspace: %+v, Shard: %+v, IsPrimary: %+v, LastCheckValid: %+v, LastCheckPartialSuccess: %+v, CountReplicas: %+v, CountValidReplicas: %+v, CountValidReplicatingReplicas: %+v, CountLaggingReplicas: %+v, CountDelayedReplicas: %+v", + a.AnalyzedInstanceAlias, a.ClusterDetails.Keyspace, a.ClusterDetails.Shard, a.IsPrimary, a.LastCheckValid, a.LastCheckPartialSuccess, a.CountReplicas, a.CountValidReplicas, a.CountValidReplicatingReplicas, a.CountLaggingReplicas, a.CountDelayedReplicas, ) if util.ClearToLog("analysis_dao", analysisMessage) { log.Infof(analysisMessage) @@ -499,6 +470,10 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = NotConnectedToPrimary a.Description = "Not connected to the primary" // + } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && math.Round(a.HeartbeatInterval*2) != float64(a.ReplicaNetTimeout) { + a.Analysis = ReplicaMisconfigured + a.Description = "Replica has been misconfigured" + // } else if topo.IsReplicaType(a.TabletType) && !a.IsPrimary && ca.primaryAlias != "" && a.AnalyzedInstancePrimaryAlias != ca.primaryAlias { a.Analysis = ConnectedToWrongPrimary a.Description = "Connected to wrong primary" @@ -521,12 +496,7 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Description = "Primary cannot be reached by vtorc and all of its replicas are lagging" // } else if a.IsPrimary && !a.LastCheckValid && !a.LastCheckPartialSuccess && a.CountValidReplicas > 0 && a.CountValidReplicatingReplicas > 0 { - // partial success is here to redice noise - a.Analysis = UnreachablePrimary - a.Description = "Primary cannot be reached by vtorc but it has replicating replicas; possibly a network/host issue" - // - } else if a.IsPrimary && !a.LastCheckValid && a.LastCheckPartialSuccess && a.CountReplicasFailingToConnectToPrimary > 0 && a.CountValidReplicas > 0 && a.CountValidReplicatingReplicas > 0 { - // there's partial success, but also at least one replica is failing to connect to primary + // partial success is here to reduce noise a.Analysis = UnreachablePrimary a.Description = "Primary cannot be reached by vtorc but it has replicating replicas; possibly a network/host issue" // @@ -554,10 +524,6 @@ func GetReplicationAnalysis(keyspace string, shard string, hints *ReplicationAna a.Analysis = AllPrimaryReplicasNotReplicatingOrDead a.Description = "Primary is reachable but none of its replicas is replicating" // - } else if a.IsBinlogServer && a.IsFailingToConnectToPrimary { - a.Analysis = BinlogServerFailingToConnectToPrimary - a.Description = "Binlog server is unable to connect to its primary" - // } // else if a.IsPrimary && a.CountReplicas == 0 { // a.Analysis = PrimaryWithoutReplicas @@ -748,7 +714,7 @@ func auditInstanceAnalysisInChangelog(tabletAlias string, analysisCode AnalysisC tabletAlias, string(analysisCode), ) if err == nil { - analysisChangeWriteCounter.Inc(1) + analysisChangeWriteCounter.Add(1) } else { log.Error(err) } diff --git a/go/vt/vtorc/inst/analysis_dao_test.go b/go/vt/vtorc/inst/analysis_dao_test.go index c1926fca089..a83e975c747 100644 --- a/go/vt/vtorc/inst/analysis_dao_test.go +++ b/go/vt/vtorc/inst/analysis_dao_test.go @@ -21,7 +21,6 @@ import ( "time" "github.com/patrickmn/go-cache" - "github.com/rcrowley/go-metrics" "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/external/golib/sqlutils" @@ -34,10 +33,10 @@ var ( // The initialSQL is a set of insert commands copied from a dump of an actual running VTOrc instances. The relevant insert commands are here. // This is a dump taken from a test running 4 tablets, zone1-101 is the primary, zone1-100 is a replica, zone1-112 is a rdonly and zone2-200 is a cross-cell replica. initialSQL = []string{ - `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`, - `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`, - `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`, - `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000112','localhost',6747,'2022-12-28 07:26:04','2022-12-28 07:26:04',213696377,'8.0.31','ROW',1,1,'vt-0000000112-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000112-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-9240-92a06c3be3c2','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10816929,0,0,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-9240-92a06c3be3c2',1,1,'',1000000000000000000,1,0,0,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000100','localhost',6711,'2022-12-28 07:26:04','2022-12-28 07:26:04',1094500338,'8.0.31','ROW',1,1,'vt-0000000100-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000100-relay-bin.000002',15815,0,1,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a5138-8680-11ed-acf8-d6b0ef9f4eaa','2022-12-28 07:26:04','',1,0,0,'Homebrew','8.0','FULL',10103920,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a5138-8680-11ed-acf8-d6b0ef9f4eaa',1,1,'',1000000000000000000,1,0,1,0);`, + `INSERT INTO database_instance VALUES('zone1-0000000101','localhost',6714,'2022-12-28 07:26:04','2022-12-28 07:26:04',390954723,'8.0.31','ROW',1,1,'vt-0000000101-bin.000001',15583,'',0,0,0,0,0,'',0,'',0,NULL,NULL,0,'','',0,0,'',0,0,0,0,'zone1','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a4cc4-8680-11ed-a104-47706090afbd','2022-12-28 07:26:04','',0,0,0,'Homebrew','8.0','FULL',11366095,1,1,'ON',1,'','','729a4cc4-8680-11ed-a104-47706090afbd',-1,-1,'',1000000000000000000,1,1,0,2);`, + `INSERT INTO database_instance VALUES('zone2-0000000200','localhost',6756,'2022-12-28 07:26:05','2022-12-28 07:26:05',444286571,'8.0.31','ROW',1,1,'vt-0000000200-bin.000001',15963,'localhost',6714,8,4.0,1,1,'vt-0000000101-bin.000001',15583,'vt-0000000101-bin.000001',15583,0,0,1,'','',1,0,'vt-0000000200-relay-bin.000002',15815,0,1,0,'zone2','',0,0,0,1,'729a4cc4-8680-11ed-a104-47706090afbd:1-54','729a497c-8680-11ed-8ad4-3f51d747db75','2022-12-28 07:26:05','',1,0,0,'Homebrew','8.0','FULL',10443112,0,1,'ON',1,'729a4cc4-8680-11ed-a104-47706090afbd','','729a4cc4-8680-11ed-a104-47706090afbd,729a497c-8680-11ed-8ad4-3f51d747db75',1,1,'',1000000000000000000,1,0,1,0);`, `INSERT INTO vitess_tablet VALUES('zone1-0000000100','localhost',6711,'ks','0','zone1',2,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130307d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731307d20706f72745f6d61703a7b6b65793a227674222076616c75653a363730397d206b657973706163653a226b73222073686172643a22302220747970653a5245504c494341206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363731312064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, `INSERT INTO vitess_tablet VALUES('zone1-0000000101','localhost',6714,'ks','0','zone1',1,'2022-12-28 07:23:25.129898+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3130317d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363731337d20706f72745f6d61703a7b6b65793a227674222076616c75653a363731327d206b657973706163653a226b73222073686172643a22302220747970653a5052494d415259206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a36373134207072696d6172795f7465726d5f73746172745f74696d653a7b7365636f6e64733a31363732323132323035206e616e6f7365636f6e64733a3132393839383030307d2064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, `INSERT INTO vitess_tablet VALUES('zone1-0000000112','localhost',6747,'ks','0','zone1',3,'0001-01-01 00:00:00+00:00',X'616c6961733a7b63656c6c3a227a6f6e653122207569643a3131327d20686f73746e616d653a226c6f63616c686f73742220706f72745f6d61703a7b6b65793a2267727063222076616c75653a363734367d20706f72745f6d61703a7b6b65793a227674222076616c75653a363734357d206b657973706163653a226b73222073686172643a22302220747970653a52444f4e4c59206d7973716c5f686f73746e616d653a226c6f63616c686f737422206d7973716c5f706f72743a363734372064625f7365727665725f76657273696f6e3a22382e302e3331222064656661756c745f636f6e6e5f636f6c6c6174696f6e3a3435');`, @@ -425,6 +424,48 @@ func TestGetReplicationAnalysisDecision(t *testing.T) { keyspaceWanted: "ks", shardWanted: "0", codeWanted: ReplicationStopped, + }, { + name: "ReplicaMisconfigured", + info: []*test.InfoForRecoveryAnalysis{{ + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_PRIMARY, + MysqlHostname: "localhost", + MysqlPort: 6708, + }, + DurabilityPolicy: "none", + LastCheckValid: 1, + CountReplicas: 4, + CountValidReplicas: 4, + CountValidReplicatingReplicas: 3, + CountValidOracleGTIDReplicas: 4, + CountLoggingReplicas: 2, + IsPrimary: 1, + }, { + TabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 100}, + Hostname: "localhost", + Keyspace: "ks", + Shard: "0", + Type: topodatapb.TabletType_REPLICA, + MysqlHostname: "localhost", + MysqlPort: 6709, + }, + DurabilityPolicy: "none", + PrimaryTabletInfo: &topodatapb.Tablet{ + Alias: &topodatapb.TabletAlias{Cell: "zon1", Uid: 101}, + }, + LastCheckValid: 1, + ReadOnly: 1, + ReplicaNetTimeout: 30, + HeartbeatInterval: 30, + }}, + keyspaceWanted: "ks", + shardWanted: "0", + codeWanted: ReplicaMisconfigured, }, { name: "ReplicaSemiSyncMustBeSet", @@ -864,7 +905,7 @@ func TestAuditInstanceAnalysisInChangelog(t *testing.T) { oldAnalysisChangeWriteCounter := analysisChangeWriteCounter recentInstantAnalysis = cache.New(tt.cacheExpiration, 100*time.Millisecond) - analysisChangeWriteCounter = metrics.NewCounter() + before := analysisChangeWriteCounter.Get() defer func() { // Set the old values back. @@ -877,7 +918,7 @@ func TestAuditInstanceAnalysisInChangelog(t *testing.T) { updates := []struct { tabletAlias string analysisCode AnalysisCode - writeCounterExpectation int + writeCounterExpectation int64 wantErr string }{ { @@ -908,7 +949,7 @@ func TestAuditInstanceAnalysisInChangelog(t *testing.T) { continue } require.NoError(t, err) - require.EqualValues(t, upd.writeCounterExpectation, analysisChangeWriteCounter.Count()) + require.EqualValues(t, upd.writeCounterExpectation, analysisChangeWriteCounter.Get()-before) } }) } @@ -917,21 +958,19 @@ func TestAuditInstanceAnalysisInChangelog(t *testing.T) { // TestPostProcessAnalyses tests the functionality of the postProcessAnalyses function. func TestPostProcessAnalyses(t *testing.T) { ks0 := ClusterInfo{ - Keyspace: "ks", - Shard: "0", - CountInstances: 4, + Keyspace: "ks", + Shard: "0", } ks80 := ClusterInfo{ - Keyspace: "ks", - Shard: "80-", - CountInstances: 3, + Keyspace: "ks", + Shard: "80-", } clusters := map[string]*clusterAnalysis{ getKeyspaceShardName(ks0.Keyspace, ks0.Shard): { - totalTablets: int(ks0.CountInstances), + totalTablets: 4, }, getKeyspaceShardName(ks80.Keyspace, ks80.Shard): { - totalTablets: int(ks80.CountInstances), + totalTablets: 3, }, } diff --git a/go/vt/vtorc/inst/analysis_test.go b/go/vt/vtorc/inst/analysis_test.go deleted file mode 100644 index 70849379a5e..00000000000 --- a/go/vt/vtorc/inst/analysis_test.go +++ /dev/null @@ -1,48 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -import ( - "testing" - - "vitess.io/vitess/go/vt/vtorc/config" - - "github.com/stretchr/testify/require" -) - -func init() { - config.MarkConfigurationLoaded() -} - -func TestGetAnalysisInstanceType(t *testing.T) { - { - analysis := &ReplicationAnalysis{} - require.Equal(t, string(analysis.GetAnalysisInstanceType()), "intermediate-primary") - } - { - analysis := &ReplicationAnalysis{IsPrimary: true} - require.Equal(t, string(analysis.GetAnalysisInstanceType()), "primary") - } - { - analysis := &ReplicationAnalysis{IsCoPrimary: true} - require.Equal(t, string(analysis.GetAnalysisInstanceType()), "co-primary") - } - { - analysis := &ReplicationAnalysis{IsPrimary: true, IsCoPrimary: true} - require.Equal(t, string(analysis.GetAnalysisInstanceType()), "co-primary") - } -} diff --git a/go/vt/vtorc/inst/audit_dao.go b/go/vt/vtorc/inst/audit_dao.go index 96db7f32ccf..eb6eb226b70 100644 --- a/go/vt/vtorc/inst/audit_dao.go +++ b/go/vt/vtorc/inst/audit_dao.go @@ -18,35 +18,17 @@ package inst import ( "fmt" - "log/syslog" "os" "time" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" - - "github.com/rcrowley/go-metrics" - "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" ) -// syslogWriter is optional, and defaults to nil (disabled) -var syslogWriter *syslog.Writer - -var auditOperationCounter = metrics.NewCounter() - -func init() { - _ = metrics.Register("audit.write", auditOperationCounter) -} - -// EnableSyslogWriter enables, if possible, writes to syslog. These will execute _in addition_ to normal logging -func EnableAuditSyslog() (err error) { - syslogWriter, err = syslog.New(syslog.LOG_ERR, "vtorc") - if err != nil { - syslogWriter = nil - } - return err -} +// The metric is registered with a deprecated name. The old metric name can be removed in v21. +var auditOperationCounter = stats.NewCounterWithDeprecatedName("AuditWrite", "audit.write", "Number of audit operations performed") // AuditOperation creates and writes a new audit entry by given params func AuditOperation(auditType string, tabletAlias string, message string) error { @@ -94,16 +76,13 @@ func AuditOperation(auditType string, tabletAlias string, message string) error } } logMessage := fmt.Sprintf("auditType:%s alias:%s keyspace:%s shard:%s message:%s", auditType, tabletAlias, keyspace, shard, message) - if syslogWriter != nil { + if syslogMessage(logMessage) { auditWrittenToFile = true - go func() { - _ = syslogWriter.Info(logMessage) - }() } if !auditWrittenToFile { log.Infof(logMessage) } - auditOperationCounter.Inc(1) + auditOperationCounter.Add(1) return nil } diff --git a/go/vt/vtorc/process/host.go b/go/vt/vtorc/inst/audit_dao_nosyslog.go similarity index 62% rename from go/vt/vtorc/process/host.go rename to go/vt/vtorc/inst/audit_dao_nosyslog.go index 21e3909cbdd..a61b3eb8f42 100644 --- a/go/vt/vtorc/process/host.go +++ b/go/vt/vtorc/inst/audit_dao_nosyslog.go @@ -1,5 +1,7 @@ +//go:build windows + /* - Copyright 2015 Shlomi Noach, courtesy Booking.com + Copyright 2014 Outbrain Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,20 +16,17 @@ limitations under the License. */ -package process +package inst import ( - "os" - - "vitess.io/vitess/go/vt/log" + "errors" ) -var ThisHostname string +// EnableAuditSyslog enables, if possible, writes to syslog. These will execute _in addition_ to normal logging +func EnableAuditSyslog() (err error) { + return errors.New("syslog is not supported on windows") +} -func init() { - var err error - ThisHostname, err = os.Hostname() - if err != nil { - log.Fatalf("Cannot resolve self hostname; required. Aborting. %+v", err) - } +func syslogMessage(logMessage string) bool { + return false } diff --git a/go/vt/vtorc/inst/audit_dao_syslog.go b/go/vt/vtorc/inst/audit_dao_syslog.go new file mode 100644 index 00000000000..2567409f03e --- /dev/null +++ b/go/vt/vtorc/inst/audit_dao_syslog.go @@ -0,0 +1,43 @@ +//go:build !windows + +/* + Copyright 2014 Outbrain Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package inst + +import "log/syslog" + +// syslogWriter is optional, and defaults to nil (disabled) +var syslogWriter *syslog.Writer + +// EnableAuditSyslog enables, if possible, writes to syslog. These will execute _in addition_ to normal logging +func EnableAuditSyslog() (err error) { + syslogWriter, err = syslog.New(syslog.LOG_ERR, "vtorc") + if err != nil { + syslogWriter = nil + } + return err +} + +func syslogMessage(logMessage string) bool { + if syslogWriter == nil { + return false + } + go func() { + _ = syslogWriter.Info(logMessage) + }() + return true +} diff --git a/go/vt/vtorc/inst/binlog.go b/go/vt/vtorc/inst/binlog.go index 066c2f5c598..9c115e4e457 100644 --- a/go/vt/vtorc/inst/binlog.go +++ b/go/vt/vtorc/inst/binlog.go @@ -68,7 +68,7 @@ func (binlogCoordinates BinlogCoordinates) String() string { return binlogCoordinates.DisplayString() } -// Equals tests equality of this corrdinate and another one. +// Equals tests equality of this coordinate and another one. func (binlogCoordinates *BinlogCoordinates) Equals(other *BinlogCoordinates) bool { if other == nil { return false @@ -106,8 +106,8 @@ func (binlogCoordinates *BinlogCoordinates) FileSmallerThan(other *BinlogCoordin return binlogCoordinates.LogFile < other.LogFile } -// FileNumberDistance returns the numeric distance between this corrdinate's file number and the other's. -// Effectively it means "how many roatets/FLUSHes would make these coordinates's file reach the other's" +// FileNumberDistance returns the numeric distance between this coordinate's file number and the other's. +// Effectively it means "how many rotates/FLUSHes would make these coordinates's file reach the other's" func (binlogCoordinates *BinlogCoordinates) FileNumberDistance(other *BinlogCoordinates) int { thisNumber, _ := binlogCoordinates.FileNumber() otherNumber, _ := other.FileNumber() @@ -163,7 +163,7 @@ func (binlogCoordinates *BinlogCoordinates) NextFileCoordinates() (BinlogCoordin return result, nil } -// Detach returns a detahced form of coordinates +// Detach returns a detached form of coordinates func (binlogCoordinates *BinlogCoordinates) Detach() (detachedCoordinates BinlogCoordinates) { detachedCoordinates = BinlogCoordinates{LogFile: fmt.Sprintf("//%s:%d", binlogCoordinates.LogFile, binlogCoordinates.LogPos), LogPos: binlogCoordinates.LogPos} return detachedCoordinates diff --git a/go/vt/vtorc/inst/cluster.go b/go/vt/vtorc/inst/cluster.go index c3a77485e74..f163885a283 100644 --- a/go/vt/vtorc/inst/cluster.go +++ b/go/vt/vtorc/inst/cluster.go @@ -18,16 +18,6 @@ package inst // ClusterInfo makes for a cluster status/info summary type ClusterInfo struct { - Keyspace string - Shard string - CountInstances uint - HeuristicLag int64 - HasAutomatedPrimaryRecovery bool - HasAutomatedIntermediatePrimaryRecovery bool -} - -// ReadRecoveryInfo -func (clusterInfo *ClusterInfo) ReadRecoveryInfo() { - clusterInfo.HasAutomatedPrimaryRecovery = true - clusterInfo.HasAutomatedIntermediatePrimaryRecovery = true + Keyspace string + Shard string } diff --git a/go/vt/vtorc/inst/instance.go b/go/vt/vtorc/inst/instance.go index 1216d4c24ae..36f47b7ab0b 100644 --- a/go/vt/vtorc/inst/instance.go +++ b/go/vt/vtorc/inst/instance.go @@ -45,6 +45,8 @@ type Instance struct { SourceUUID string AncestryUUID string + ReplicaNetTimeout int32 + HeartbeatInterval float64 ReplicationSQLThreadRuning bool ReplicationIOThreadRuning bool ReplicationSQLThreadState ReplicationThreadState @@ -63,7 +65,7 @@ type Instance struct { LastSQLError string LastIOError string SecondsBehindPrimary sql.NullInt64 - SQLDelay uint + SQLDelay uint32 ExecutedGtidSet string GtidPurged string GtidErrant string diff --git a/go/vt/vtorc/inst/instance_dao.go b/go/vt/vtorc/inst/instance_dao.go index 211ddce69b1..dddfcf640fe 100644 --- a/go/vt/vtorc/inst/instance_dao.go +++ b/go/vt/vtorc/inst/instance_dao.go @@ -17,7 +17,7 @@ package inst import ( - "bytes" + "encoding/json" "errors" "fmt" "regexp" @@ -29,7 +29,6 @@ import ( "time" "github.com/patrickmn/go-cache" - "github.com/rcrowley/go-metrics" "github.com/sjmudd/stopwatch" "vitess.io/vitess/go/mysql/replication" @@ -52,26 +51,27 @@ const ( backendDBConcurrency = 20 ) -var instanceReadChan = make(chan bool, backendDBConcurrency) -var instanceWriteChan = make(chan bool, backendDBConcurrency) +var ( + instanceReadChan = make(chan bool, backendDBConcurrency) + instanceWriteChan = make(chan bool, backendDBConcurrency) +) var forgetAliases *cache.Cache -var accessDeniedCounter = metrics.NewCounter() -var readTopologyInstanceCounter = metrics.NewCounter() -var readInstanceCounter = metrics.NewCounter() -var writeInstanceCounter = metrics.NewCounter() -var backendWrites = collection.CreateOrReturnCollection("BACKEND_WRITES") -var writeBufferLatency = stopwatch.NewNamedStopwatch() +var ( + // The metrics are registered with deprecated names. The old metric names can be removed in v21. + readTopologyInstanceCounter = stats.NewCounterWithDeprecatedName("InstanceReadTopology", "instance.read_topology", "Number of times an instance was read from the topology") + readInstanceCounter = stats.NewCounterWithDeprecatedName("InstanceRead", "instance.read", "Number of times an instance was read") + backendWrites = collection.CreateOrReturnCollection("BACKEND_WRITES") + writeBufferLatency = stopwatch.NewNamedStopwatch() +) -var emptyQuotesRegexp = regexp.MustCompile(`^""$`) -var cacheInitializationCompleted atomic.Bool +var ( + emptyQuotesRegexp = regexp.MustCompile(`^""$`) + cacheInitializationCompleted atomic.Bool +) func init() { - _ = metrics.Register("instance.access_denied", accessDeniedCounter) - _ = metrics.Register("instance.read_topology", readTopologyInstanceCounter) - _ = metrics.Register("instance.read", readInstanceCounter) - _ = metrics.Register("instance.write", writeInstanceCounter) _ = writeBufferLatency.AddMany([]string{"wait", "write"}) writeBufferLatency.Start("wait") @@ -84,7 +84,7 @@ func initializeInstanceDao() { cacheInitializationCompleted.Store(true) } -// ExecDBWriteFunc chooses how to execute a write onto the database: whether synchronuously or not +// ExecDBWriteFunc chooses how to execute a write onto the database: whether synchronously or not func ExecDBWriteFunc(f func() error) error { m := query.NewMetric() @@ -113,9 +113,11 @@ func ExecDBWriteFunc(f func() error) error { } func ExpireTableData(tableName string, timestampColumn string) error { - query := fmt.Sprintf("delete from %s where %s < NOW() - INTERVAL ? DAY", tableName, timestampColumn) writeFunc := func() error { - _, err := db.ExecVTOrc(query, config.Config.AuditPurgeDays) + _, err := db.ExecVTOrc( + fmt.Sprintf("delete from %s where %s < NOW() - INTERVAL ? DAY", tableName, timestampColumn), + config.Config.AuditPurgeDays, + ) return err } return ExecDBWriteFunc(writeFunc) @@ -152,13 +154,6 @@ func RegisterStats() { }) } -// ReadTopologyInstance collects information on the state of a MySQL -// server and writes the result synchronously to the vtorc -// backend. -func ReadTopologyInstance(tabletAlias string) (*Instance, error) { - return ReadTopologyInstanceBufferable(tabletAlias, nil) -} - // ReadTopologyInstanceBufferable connects to a topology MySQL instance // and collects information on the server and its replication state. // It writes the information retrieved into vtorc's backend. @@ -173,7 +168,7 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named var waitGroup sync.WaitGroup var tablet *topodatapb.Tablet - var fullStatus *replicationdatapb.FullStatus + var fs *replicationdatapb.FullStatus readingStartTime := time.Now() instance := NewInstance() instanceFound := false @@ -203,7 +198,7 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named goto Cleanup } - fullStatus, err = FullStatus(tabletAlias) + fs, err = fullStatus(tabletAlias) if err != nil { goto Cleanup } @@ -213,48 +208,48 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named instance.Port = int(tablet.MysqlPort) { // We begin with a few operations we can run concurrently, and which do not depend on anything - instance.ServerID = uint(fullStatus.ServerId) - instance.Version = fullStatus.Version - instance.ReadOnly = fullStatus.ReadOnly - instance.LogBinEnabled = fullStatus.LogBinEnabled - instance.BinlogFormat = fullStatus.BinlogFormat - instance.LogReplicationUpdatesEnabled = fullStatus.LogReplicaUpdates - instance.VersionComment = fullStatus.VersionComment - - if instance.LogBinEnabled && fullStatus.PrimaryStatus != nil { - binlogPos, err := getBinlogCoordinatesFromPositionString(fullStatus.PrimaryStatus.FilePosition) + instance.ServerID = uint(fs.ServerId) + instance.Version = fs.Version + instance.ReadOnly = fs.ReadOnly + instance.LogBinEnabled = fs.LogBinEnabled + instance.BinlogFormat = fs.BinlogFormat + instance.LogReplicationUpdatesEnabled = fs.LogReplicaUpdates + instance.VersionComment = fs.VersionComment + + if instance.LogBinEnabled && fs.PrimaryStatus != nil { + binlogPos, err := getBinlogCoordinatesFromPositionString(fs.PrimaryStatus.FilePosition) instance.SelfBinlogCoordinates = binlogPos errorChan <- err } - instance.SemiSyncPrimaryEnabled = fullStatus.SemiSyncPrimaryEnabled - instance.SemiSyncReplicaEnabled = fullStatus.SemiSyncReplicaEnabled - instance.SemiSyncPrimaryWaitForReplicaCount = uint(fullStatus.SemiSyncWaitForReplicaCount) - instance.SemiSyncPrimaryTimeout = fullStatus.SemiSyncPrimaryTimeout + instance.SemiSyncPrimaryEnabled = fs.SemiSyncPrimaryEnabled + instance.SemiSyncReplicaEnabled = fs.SemiSyncReplicaEnabled + instance.SemiSyncPrimaryWaitForReplicaCount = uint(fs.SemiSyncWaitForReplicaCount) + instance.SemiSyncPrimaryTimeout = fs.SemiSyncPrimaryTimeout - instance.SemiSyncPrimaryClients = uint(fullStatus.SemiSyncPrimaryClients) - instance.SemiSyncPrimaryStatus = fullStatus.SemiSyncPrimaryStatus - instance.SemiSyncReplicaStatus = fullStatus.SemiSyncReplicaStatus + instance.SemiSyncPrimaryClients = uint(fs.SemiSyncPrimaryClients) + instance.SemiSyncPrimaryStatus = fs.SemiSyncPrimaryStatus + instance.SemiSyncReplicaStatus = fs.SemiSyncReplicaStatus if instance.IsOracleMySQL() || instance.IsPercona() { // Stuff only supported on Oracle / Percona MySQL // ... // @@gtid_mode only available in Oracle / Percona MySQL >= 5.6 - instance.GTIDMode = fullStatus.GtidMode - instance.ServerUUID = fullStatus.ServerUuid - if fullStatus.PrimaryStatus != nil { - GtidExecutedPos, err := replication.DecodePosition(fullStatus.PrimaryStatus.Position) + instance.GTIDMode = fs.GtidMode + instance.ServerUUID = fs.ServerUuid + if fs.PrimaryStatus != nil { + GtidExecutedPos, err := replication.DecodePosition(fs.PrimaryStatus.Position) errorChan <- err if err == nil && GtidExecutedPos.GTIDSet != nil { instance.ExecutedGtidSet = GtidExecutedPos.GTIDSet.String() } } - GtidPurgedPos, err := replication.DecodePosition(fullStatus.GtidPurged) + GtidPurgedPos, err := replication.DecodePosition(fs.GtidPurged) errorChan <- err if err == nil && GtidPurgedPos.GTIDSet != nil { instance.GtidPurged = GtidPurgedPos.GTIDSet.String() } - instance.BinlogRowImage = fullStatus.BinlogRowImage + instance.BinlogRowImage = fs.BinlogRowImage if instance.GTIDMode != "" && instance.GTIDMode != "OFF" { instance.SupportsOracleGTID = true @@ -264,45 +259,45 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named instance.ReplicationIOThreadState = ReplicationThreadStateNoThread instance.ReplicationSQLThreadState = ReplicationThreadStateNoThread - if fullStatus.ReplicationStatus != nil { - instance.HasReplicationCredentials = fullStatus.ReplicationStatus.SourceUser != "" + if fs.ReplicationStatus != nil { + instance.HasReplicationCredentials = fs.ReplicationStatus.SourceUser != "" - instance.ReplicationIOThreadState = ReplicationThreadStateFromReplicationState(replication.ReplicationState(fullStatus.ReplicationStatus.IoState)) - instance.ReplicationSQLThreadState = ReplicationThreadStateFromReplicationState(replication.ReplicationState(fullStatus.ReplicationStatus.SqlState)) + instance.ReplicationIOThreadState = ReplicationThreadStateFromReplicationState(replication.ReplicationState(fs.ReplicationStatus.IoState)) + instance.ReplicationSQLThreadState = ReplicationThreadStateFromReplicationState(replication.ReplicationState(fs.ReplicationStatus.SqlState)) instance.ReplicationIOThreadRuning = instance.ReplicationIOThreadState.IsRunning() instance.ReplicationSQLThreadRuning = instance.ReplicationSQLThreadState.IsRunning() - binlogPos, err := getBinlogCoordinatesFromPositionString(fullStatus.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition) + binlogPos, err := getBinlogCoordinatesFromPositionString(fs.ReplicationStatus.RelayLogSourceBinlogEquivalentPosition) instance.ReadBinlogCoordinates = binlogPos errorChan <- err - binlogPos, err = getBinlogCoordinatesFromPositionString(fullStatus.ReplicationStatus.FilePosition) + binlogPos, err = getBinlogCoordinatesFromPositionString(fs.ReplicationStatus.FilePosition) instance.ExecBinlogCoordinates = binlogPos errorChan <- err instance.IsDetached, _ = instance.ExecBinlogCoordinates.ExtractDetachedCoordinates() - binlogPos, err = getBinlogCoordinatesFromPositionString(fullStatus.ReplicationStatus.RelayLogFilePosition) + binlogPos, err = getBinlogCoordinatesFromPositionString(fs.ReplicationStatus.RelayLogFilePosition) instance.RelaylogCoordinates = binlogPos instance.RelaylogCoordinates.Type = RelayLog errorChan <- err - instance.LastSQLError = emptyQuotesRegexp.ReplaceAllString(strconv.QuoteToASCII(fullStatus.ReplicationStatus.LastSqlError), "") - instance.LastIOError = emptyQuotesRegexp.ReplaceAllString(strconv.QuoteToASCII(fullStatus.ReplicationStatus.LastIoError), "") + instance.LastSQLError = emptyQuotesRegexp.ReplaceAllString(strconv.QuoteToASCII(fs.ReplicationStatus.LastSqlError), "") + instance.LastIOError = emptyQuotesRegexp.ReplaceAllString(strconv.QuoteToASCII(fs.ReplicationStatus.LastIoError), "") - instance.SQLDelay = uint(fullStatus.ReplicationStatus.SqlDelay) - instance.UsingOracleGTID = fullStatus.ReplicationStatus.AutoPosition - instance.UsingMariaDBGTID = fullStatus.ReplicationStatus.UsingGtid - instance.SourceUUID = fullStatus.ReplicationStatus.SourceUuid - instance.HasReplicationFilters = fullStatus.ReplicationStatus.HasReplicationFilters + instance.SQLDelay = fs.ReplicationStatus.SqlDelay + instance.UsingOracleGTID = fs.ReplicationStatus.AutoPosition + instance.UsingMariaDBGTID = fs.ReplicationStatus.UsingGtid + instance.SourceUUID = fs.ReplicationStatus.SourceUuid + instance.HasReplicationFilters = fs.ReplicationStatus.HasReplicationFilters - instance.SourceHost = fullStatus.ReplicationStatus.SourceHost - instance.SourcePort = int(fullStatus.ReplicationStatus.SourcePort) + instance.SourceHost = fs.ReplicationStatus.SourceHost + instance.SourcePort = int(fs.ReplicationStatus.SourcePort) - if fullStatus.ReplicationStatus.ReplicationLagUnknown { + if fs.ReplicationStatus.ReplicationLagUnknown { instance.SecondsBehindPrimary.Valid = false } else { instance.SecondsBehindPrimary.Valid = true - instance.SecondsBehindPrimary.Int64 = int64(fullStatus.ReplicationStatus.ReplicationLagSeconds) + instance.SecondsBehindPrimary.Int64 = int64(fs.ReplicationStatus.ReplicationLagSeconds) } if instance.SecondsBehindPrimary.Valid && instance.SecondsBehindPrimary.Int64 < 0 { log.Warningf("Alias: %+v, instance.SecondsBehindPrimary < 0 [%+v], correcting to 0", tabletAlias, instance.SecondsBehindPrimary.Int64) @@ -311,7 +306,12 @@ func ReadTopologyInstanceBufferable(tabletAlias string, latency *stopwatch.Named // And until told otherwise: instance.ReplicationLagSeconds = instance.SecondsBehindPrimary - instance.AllowTLS = fullStatus.ReplicationStatus.SslAllowed + instance.AllowTLS = fs.ReplicationStatus.SslAllowed + } + + if fs.ReplicationConfiguration != nil { + instance.ReplicaNetTimeout = fs.ReplicationConfiguration.ReplicaNetTimeout + instance.HeartbeatInterval = fs.ReplicationConfiguration.HeartbeatInterval } instanceFound = true @@ -384,7 +384,7 @@ Cleanup: } latency.Stop("instance") - readTopologyInstanceCounter.Inc(1) + readTopologyInstanceCounter.Add(1) if instanceFound { instance.LastDiscoveryLatency = time.Since(readingStartTime) @@ -494,6 +494,8 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.LogReplicationUpdatesEnabled = m.GetBool("log_replica_updates") instance.SourceHost = m.GetString("source_host") instance.SourcePort = m.GetInt("source_port") + instance.ReplicaNetTimeout = m.GetInt32("replica_net_timeout") + instance.HeartbeatInterval = m.GetFloat64("heartbeat_interval") instance.ReplicationSQLThreadRuning = m.GetBool("replica_sql_running") instance.ReplicationIOThreadRuning = m.GetBool("replica_io_running") instance.ReplicationSQLThreadState = ReplicationThreadState(m.GetInt("replication_sql_thread_state")) @@ -522,7 +524,7 @@ func readInstanceRow(m sqlutils.RowMap) *Instance { instance.LastIOError = m.GetString("last_io_error") instance.SecondsBehindPrimary = m.GetNullInt64("replication_lag_seconds") instance.ReplicationLagSeconds = m.GetNullInt64("replica_lag_seconds") - instance.SQLDelay = m.GetUint("sql_delay") + instance.SQLDelay = m.GetUint32("sql_delay") instance.DataCenter = m.GetString("data_center") instance.Region = m.GetString("region") instance.PhysicalEnvironment = m.GetString("physical_environment") @@ -613,7 +615,7 @@ func ReadInstance(tabletAlias string) (*Instance, bool, error) { instances, err := readInstancesByCondition(condition, sqlutils.Args(tabletAlias), "") // We know there will be at most one (alias is the PK). // And we expect to find one. - readInstanceCounter.Inc(1) + readInstanceCounter.Add(1) if len(instances) == 0 { return nil, false, err } @@ -623,35 +625,6 @@ func ReadInstance(tabletAlias string) (*Instance, bool, error) { return instances[0], true, nil } -// ReadReplicaInstances reads replicas of a given primary -func ReadReplicaInstances(primaryHost string, primaryPort int) ([](*Instance), error) { - condition := ` - source_host = ? - and source_port = ? - ` - return readInstancesByCondition(condition, sqlutils.Args(primaryHost, primaryPort), "") -} - -// ReadReplicaInstancesIncludingBinlogServerSubReplicas returns a list of direct slves including any replicas -// of a binlog server replica -func ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost string, primaryPort int) ([](*Instance), error) { - replicas, err := ReadReplicaInstances(primaryHost, primaryPort) - if err != nil { - return replicas, err - } - for _, replica := range replicas { - replica := replica - if replica.IsBinlogServer() { - binlogServerReplicas, err := ReadReplicaInstancesIncludingBinlogServerSubReplicas(replica.Hostname, replica.Port) - if err != nil { - return replicas, err - } - replicas = append(replicas, binlogServerReplicas...) - } - } - return replicas, err -} - // ReadProblemInstances reads all instances with problems func ReadProblemInstances(keyspace string, shard string) ([](*Instance), error) { condition := ` @@ -749,12 +722,10 @@ func ReadOutdatedInstanceKeys() ([]string, error) { // We don;t return an error because we want to keep filling the outdated instances list. return nil }) - if err != nil { log.Error(err) } return res, err - } func mkInsertOdku(table string, columns []string, values []string, nrRows int, insertIgnore bool) (string, error) { @@ -768,21 +739,21 @@ func mkInsertOdku(table string, columns []string, values []string, nrRows int, i return "", errors.New("number of values must be equal to number of columns") } - var q bytes.Buffer + var q strings.Builder var ignore string if insertIgnore { ignore = "ignore" } - var valRow = fmt.Sprintf("(%s)", strings.Join(values, ", ")) - var val bytes.Buffer + valRow := fmt.Sprintf("(%s)", strings.Join(values, ", ")) + var val strings.Builder val.WriteString(valRow) for i := 1; i < nrRows; i++ { val.WriteString(",\n ") // indent VALUES, see below val.WriteString(valRow) } - var col = strings.Join(columns, ", ") - var odku bytes.Buffer + col := strings.Join(columns, ", ") + var odku strings.Builder odku.WriteString(fmt.Sprintf("%s=VALUES(%s)", columns[0], columns[0])) for _, c := range columns[1:] { odku.WriteString(", ") @@ -810,7 +781,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo if !instanceWasActuallyFound { insertIgnore = true } - var columns = []string{ + columns := []string{ "alias", "hostname", "port", @@ -832,6 +803,8 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "binary_log_pos", "source_host", "source_port", + "replica_net_timeout", + "heartbeat_interval", "replica_sql_running", "replica_io_running", "replication_sql_thread_state", @@ -876,7 +849,7 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo "last_discovery_latency", } - var values = make([]string, len(columns)) + values := make([]string, len(columns)) for i := range columns { values[i] = "?" } @@ -911,6 +884,8 @@ func mkInsertOdkuForInstances(instances []*Instance, instanceWasActuallyFound bo args = append(args, instance.SelfBinlogCoordinates.LogPos) args = append(args, instance.SourceHost) args = append(args, instance.SourcePort) + args = append(args, instance.ReplicaNetTimeout) + args = append(args, instance.HeartbeatInterval) args = append(args, instance.ReplicationSQLThreadRuning) args = append(args, instance.ReplicationIOThreadRuning) args = append(args, instance.ReplicationSQLThreadState) @@ -1102,7 +1077,7 @@ func ForgetInstance(tabletAlias string) error { return nil } -// ForgetLongUnseenInstances will remove entries of all instacnes that have long since been last seen. +// ForgetLongUnseenInstances will remove entries of all instances that have long since been last seen. func ForgetLongUnseenInstances() error { sqlResult, err := db.ExecVTOrc(` delete @@ -1152,43 +1127,6 @@ func SnapshotTopologies() error { return ExecDBWriteFunc(writeFunc) } -// RecordStaleInstanceBinlogCoordinates snapshots the binlog coordinates of instances -func RecordStaleInstanceBinlogCoordinates(tabletAlias string, binlogCoordinates *BinlogCoordinates) error { - args := sqlutils.Args( - tabletAlias, - binlogCoordinates.LogFile, binlogCoordinates.LogPos, - ) - _, err := db.ExecVTOrc(` - delete from - database_instance_stale_binlog_coordinates - where - alias = ? - and ( - binary_log_file != ? - or binary_log_pos != ? - ) - `, - args..., - ) - if err != nil { - log.Error(err) - return err - } - _, err = db.ExecVTOrc(` - insert ignore into - database_instance_stale_binlog_coordinates ( - alias, binary_log_file, binary_log_pos, first_seen - ) - values ( - ?, ?, ?, NOW() - )`, - args...) - if err != nil { - log.Error(err) - } - return err -} - func ExpireStaleInstanceBinlogCoordinates() error { expireSeconds := config.Config.ReasonableReplicationLagSeconds * 2 if expireSeconds < config.StaleInstanceCoordinatesExpireSeconds { @@ -1207,3 +1145,32 @@ func ExpireStaleInstanceBinlogCoordinates() error { } return ExecDBWriteFunc(writeFunc) } + +// GetDatabaseState takes the snapshot of the database and returns it. +func GetDatabaseState() (string, error) { + type tableState struct { + TableName string + Rows []sqlutils.RowMap + } + + var dbState []tableState + for _, tableName := range db.TableNames { + ts := tableState{ + TableName: tableName, + } + err := db.QueryVTOrc("select * from "+tableName, nil, func(rowMap sqlutils.RowMap) error { + ts.Rows = append(ts.Rows, rowMap) + return nil + }) + if err != nil { + return "", err + } + dbState = append(dbState, ts) + } + jsonData, err := json.MarshalIndent(dbState, "", "\t") + if err != nil { + return "", err + } + + return string(jsonData), nil +} diff --git a/go/vt/vtorc/inst/instance_dao_test.go b/go/vt/vtorc/inst/instance_dao_test.go index 549389f91fe..741fc48bca9 100644 --- a/go/vt/vtorc/inst/instance_dao_test.go +++ b/go/vt/vtorc/inst/instance_dao_test.go @@ -61,18 +61,18 @@ func TestMkInsertOdkuSingle(t *testing.T) { s1 := `INSERT ignore INTO database_instance (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, - binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, + binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_net_timeout, heartbeat_interval, replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) VALUES - (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) ON DUPLICATE KEY UPDATE - alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), + alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_net_timeout=VALUES(replica_net_timeout), heartbeat_interval=VALUES(heartbeat_interval), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), last_discovery_latency=VALUES(last_discovery_latency), last_seen=VALUES(last_seen) ` a1 := `zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, - FULL, false, false, , 0, , 0, + FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0,` sql1, args1, err := mkInsertOdkuForInstances(instances[:1], false, true) @@ -88,23 +88,23 @@ func TestMkInsertOdkuThree(t *testing.T) { s3 := `INSERT INTO database_instance (alias, hostname, port, last_checked, last_attempted_check, last_check_partial_success, server_id, server_uuid, version, major_version, version_comment, binlog_server, read_only, binlog_format, - binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, + binlog_row_image, log_bin, log_replica_updates, binary_log_file, binary_log_pos, source_host, source_port, replica_net_timeout, heartbeat_interval, replica_sql_running, replica_io_running, replication_sql_thread_state, replication_io_thread_state, has_replication_filters, supports_oracle_gtid, oracle_gtid, source_uuid, ancestry_uuid, executed_gtid_set, gtid_mode, gtid_purged, gtid_errant, mariadb_gtid, pseudo_gtid, source_log_file, read_source_log_pos, relay_source_log_file, exec_source_log_pos, relay_log_file, relay_log_pos, last_sql_error, last_io_error, replication_lag_seconds, replica_lag_seconds, sql_delay, data_center, region, physical_environment, replication_depth, is_co_primary, has_replication_credentials, allow_tls, semi_sync_enforced, semi_sync_primary_enabled, semi_sync_primary_timeout, semi_sync_primary_wait_for_replica_count, semi_sync_replica_enabled, semi_sync_primary_status, semi_sync_primary_clients, semi_sync_replica_status, last_discovery_latency, last_seen) VALUES - (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), - (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), - (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()), + (?, ?, ?, NOW(), NOW(), 1, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NOW()) ON DUPLICATE KEY UPDATE - alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), + alias=VALUES(alias), hostname=VALUES(hostname), port=VALUES(port), last_checked=VALUES(last_checked), last_attempted_check=VALUES(last_attempted_check), last_check_partial_success=VALUES(last_check_partial_success), server_id=VALUES(server_id), server_uuid=VALUES(server_uuid), version=VALUES(version), major_version=VALUES(major_version), version_comment=VALUES(version_comment), binlog_server=VALUES(binlog_server), read_only=VALUES(read_only), binlog_format=VALUES(binlog_format), binlog_row_image=VALUES(binlog_row_image), log_bin=VALUES(log_bin), log_replica_updates=VALUES(log_replica_updates), binary_log_file=VALUES(binary_log_file), binary_log_pos=VALUES(binary_log_pos), source_host=VALUES(source_host), source_port=VALUES(source_port), replica_net_timeout=VALUES(replica_net_timeout), heartbeat_interval=VALUES(heartbeat_interval), replica_sql_running=VALUES(replica_sql_running), replica_io_running=VALUES(replica_io_running), replication_sql_thread_state=VALUES(replication_sql_thread_state), replication_io_thread_state=VALUES(replication_io_thread_state), has_replication_filters=VALUES(has_replication_filters), supports_oracle_gtid=VALUES(supports_oracle_gtid), oracle_gtid=VALUES(oracle_gtid), source_uuid=VALUES(source_uuid), ancestry_uuid=VALUES(ancestry_uuid), executed_gtid_set=VALUES(executed_gtid_set), gtid_mode=VALUES(gtid_mode), gtid_purged=VALUES(gtid_purged), gtid_errant=VALUES(gtid_errant), mariadb_gtid=VALUES(mariadb_gtid), pseudo_gtid=VALUES(pseudo_gtid), source_log_file=VALUES(source_log_file), read_source_log_pos=VALUES(read_source_log_pos), relay_source_log_file=VALUES(relay_source_log_file), exec_source_log_pos=VALUES(exec_source_log_pos), relay_log_file=VALUES(relay_log_file), relay_log_pos=VALUES(relay_log_pos), last_sql_error=VALUES(last_sql_error), last_io_error=VALUES(last_io_error), replication_lag_seconds=VALUES(replication_lag_seconds), replica_lag_seconds=VALUES(replica_lag_seconds), sql_delay=VALUES(sql_delay), data_center=VALUES(data_center), region=VALUES(region), physical_environment=VALUES(physical_environment), replication_depth=VALUES(replication_depth), is_co_primary=VALUES(is_co_primary), has_replication_credentials=VALUES(has_replication_credentials), allow_tls=VALUES(allow_tls), semi_sync_enforced=VALUES(semi_sync_enforced), semi_sync_primary_enabled=VALUES(semi_sync_primary_enabled), semi_sync_primary_timeout=VALUES(semi_sync_primary_timeout), semi_sync_primary_wait_for_replica_count=VALUES(semi_sync_primary_wait_for_replica_count), semi_sync_replica_enabled=VALUES(semi_sync_replica_enabled), semi_sync_primary_status=VALUES(semi_sync_primary_status), semi_sync_primary_clients=VALUES(semi_sync_primary_clients), semi_sync_replica_status=VALUES(semi_sync_replica_status), last_discovery_latency=VALUES(last_discovery_latency), last_seen=VALUES(last_seen) ` a3 := ` - zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, - zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, - zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i710, i710, 3306, 710, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 10, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i720, i720, 3306, 720, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 20, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, + zone1-i730, i730, 3306, 730, , 5.6.7, 5.6, MySQL, false, false, STATEMENT, FULL, false, false, , 0, , 0, 0, 0, false, false, 0, 0, false, false, false, , , , , , , false, false, , 0, mysql.000007, 30, , 0, , , {0 false}, {0 false}, 0, , , , 0, false, false, false, false, false, 0, 0, false, false, 0, false, 0, ` sql3, args3, err := mkInsertOdkuForInstances(instances[:3], true, true) @@ -196,53 +196,6 @@ func TestReadInstance(t *testing.T) { } } -// TestReadReplicaInstances is used to test the functionality of ReadReplicaInstances and verify its failure modes and successes. -func TestReadReplicaInstances(t *testing.T) { - tests := []struct { - name string - tabletPort int - replicasLen int - }{ - { - name: "Read success - Multiple replicas", - // This tabletPort corresponds to zone1-0000000101. That is the primary for the data inserted. - // Check initialSQL for more details. - tabletPort: 6714, - replicasLen: 3, - }, { - name: "Unknown tablet", - // This tabletPort corresponds to none of the tablets. - // Check initialSQL for more details. - tabletPort: 343, - replicasLen: 0, - }, { - name: "Read success - No replicas", - // This tabletPort corresponds to zone1-0000000100. That is a replica tablet, with no replicas of its own. - // Check initialSQL for more details. - tabletPort: 6711, - replicasLen: 0, - }, - } - - // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. - defer func() { - db.ClearVTOrcDatabase() - }() - for _, query := range initialSQL { - _, err := db.ExecVTOrc(query) - require.NoError(t, err) - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - - instances, err := ReadReplicaInstances("localhost", tt.tabletPort) - require.NoError(t, err) - require.EqualValues(t, tt.replicasLen, len(instances)) - }) - } -} - // TestReadProblemInstances is used to test the functionality of ReadProblemInstances and verify its failure modes and successes. func TestReadProblemInstances(t *testing.T) { // The test is intended to be used as follows. The initial data is stored into the database. Following this, some specific queries are run that each individual test specifies to get the desired state. @@ -439,6 +392,16 @@ func TestReadInstancesByCondition(t *testing.T) { }, { name: "No qualifying tablets", condition: "replication_depth=15", + }, { + name: "Replica net timeout being 8", + condition: "replica_net_timeout=8", + sort: "alias asc", + instancesRequired: []string{"zone1-0000000100", "zone1-0000000112", "zone2-0000000200"}, + }, { + name: "heartbeat interval being 4", + condition: "heartbeat_interval=4.0", + sort: "alias asc", + instancesRequired: []string{"zone1-0000000100", "zone1-0000000112", "zone2-0000000200"}, }, } @@ -746,3 +709,76 @@ func waitForCacheInitialization() { time.Sleep(100 * time.Millisecond) } } + +func TestGetDatabaseState(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + + for _, query := range initialSQL { + _, err := db.ExecVTOrc(query) + require.NoError(t, err) + } + + ds, err := GetDatabaseState() + require.NoError(t, err) + require.Contains(t, ds, `"alias": "zone1-0000000112"`) +} + +func TestExpireTableData(t *testing.T) { + oldVal := config.Config.AuditPurgeDays + config.Config.AuditPurgeDays = 10 + defer func() { + config.Config.AuditPurgeDays = oldVal + }() + + tests := []struct { + name string + tableName string + insertQuery string + timestampColumn string + expectedRowCount int + }{ + { + name: "ExpireAudit", + tableName: "audit", + timestampColumn: "audit_timestamp", + expectedRowCount: 1, + insertQuery: `insert into audit (audit_id, audit_timestamp, audit_type, alias, message, keyspace, shard) values +(1, NOW() - INTERVAL 50 DAY, 'a','a','a','a','a'), +(2, NOW() - INTERVAL 5 DAY, 'a','a','a','a','a')`, + }, + { + name: "ExpireRecoveryDetectionHistory", + tableName: "recovery_detection", + timestampColumn: "detection_timestamp", + expectedRowCount: 2, + insertQuery: `insert into recovery_detection (detection_id, detection_timestamp, alias, analysis, keyspace, shard) values +(1, NOW() - INTERVAL 3 DAY,'a','a','a','a'), +(2, NOW() - INTERVAL 5 DAY,'a','a','a','a'), +(3, NOW() - INTERVAL 15 DAY,'a','a','a','a')`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + _, err := db.ExecVTOrc(tt.insertQuery) + require.NoError(t, err) + + err = ExpireTableData(tt.tableName, tt.timestampColumn) + require.NoError(t, err) + + rowsCount := 0 + err = db.QueryVTOrc(`select * from `+tt.tableName, nil, func(rowMap sqlutils.RowMap) error { + rowsCount++ + return nil + }) + require.NoError(t, err) + require.EqualValues(t, tt.expectedRowCount, rowsCount) + }) + } +} diff --git a/go/vt/vtorc/inst/instance_topology_dao.go b/go/vt/vtorc/inst/instance_topology_dao.go deleted file mode 100644 index 67060b184b7..00000000000 --- a/go/vt/vtorc/inst/instance_topology_dao.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package inst - -// Max concurrency for bulk topology operations -const topologyConcurrency = 128 - -var topologyConcurrencyChan = make(chan bool, topologyConcurrency) - -// ExecuteOnTopology will execute given function while maintaining concurrency limit -// on topology servers. It is safe in the sense that we will not leak tokens. -func ExecuteOnTopology(f func()) { - topologyConcurrencyChan <- true - defer func() { _ = recover(); <-topologyConcurrencyChan }() - f() -} diff --git a/go/vt/vtorc/inst/instance_utils.go b/go/vt/vtorc/inst/instance_utils.go index f6bde729822..01302f00b4c 100644 --- a/go/vt/vtorc/inst/instance_utils.go +++ b/go/vt/vtorc/inst/instance_utils.go @@ -17,7 +17,6 @@ package inst import ( - "regexp" "strings" ) @@ -29,13 +28,3 @@ func MajorVersion(version string) []string { } return tokens[:2] } - -// RegexpMatchPatterns returns true if s matches any of the provided regexpPatterns -func RegexpMatchPatterns(s string, regexpPatterns []string) bool { - for _, filter := range regexpPatterns { - if matched, err := regexp.MatchString(filter, s); err == nil && matched { - return true - } - } - return false -} diff --git a/go/vt/vtorc/inst/instance_utils_test.go b/go/vt/vtorc/inst/instance_utils_test.go deleted file mode 100644 index f6247d5d6d0..00000000000 --- a/go/vt/vtorc/inst/instance_utils_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package inst - -import ( - "testing" -) - -type testPatterns struct { - s string - patterns []string - expected bool -} - -func TestRegexpMatchPatterns(t *testing.T) { - patterns := []testPatterns{ - {"hostname", []string{}, false}, - {"hostname", []string{"blah"}, false}, - {"hostname", []string{"blah", "blah"}, false}, - {"hostname", []string{"host", "blah"}, true}, - {"hostname", []string{"blah", "host"}, true}, - {"hostname", []string{"ho.tname"}, true}, - {"hostname", []string{"ho.tname2"}, false}, - {"hostname", []string{"ho.*me"}, true}, - } - - for _, p := range patterns { - if match := RegexpMatchPatterns(p.s, p.patterns); match != p.expected { - t.Errorf("RegexpMatchPatterns failed with: %q, %+v, got: %+v, expected: %+v", p.s, p.patterns, match, p.expected) - } - } -} diff --git a/go/vt/vtorc/inst/keyspace_dao_test.go b/go/vt/vtorc/inst/keyspace_dao_test.go index 015d3e75256..dda3ffaa9d2 100644 --- a/go/vt/vtorc/inst/keyspace_dao_test.go +++ b/go/vt/vtorc/inst/keyspace_dao_test.go @@ -20,7 +20,6 @@ import ( "testing" "github.com/stretchr/testify/require" - _ "modernc.org/sqlite" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" diff --git a/go/vt/vtorc/inst/oracle_gtid_set.go b/go/vt/vtorc/inst/oracle_gtid_set.go index 0ddab05ef55..711232692f8 100644 --- a/go/vt/vtorc/inst/oracle_gtid_set.go +++ b/go/vt/vtorc/inst/oracle_gtid_set.go @@ -69,53 +69,6 @@ func (oracleGTIDSet *OracleGtidSet) RemoveUUID(uuid string) (removed bool) { return removed } -// RetainUUID retains only entries that belong to given UUID. -func (oracleGTIDSet *OracleGtidSet) RetainUUID(uuid string) (anythingRemoved bool) { - return oracleGTIDSet.RetainUUIDs([]string{uuid}) -} - -// RetainUUIDs retains only entries that belong to given UUIDs. -func (oracleGTIDSet *OracleGtidSet) RetainUUIDs(uuids []string) (anythingRemoved bool) { - retainUUIDs := map[string]bool{} - for _, uuid := range uuids { - retainUUIDs[uuid] = true - } - var filteredEntries []*OracleGtidSetEntry - for _, entry := range oracleGTIDSet.GtidEntries { - if retainUUIDs[entry.UUID] { - filteredEntries = append(filteredEntries, entry) - } else { - anythingRemoved = true - } - } - if anythingRemoved { - oracleGTIDSet.GtidEntries = filteredEntries - } - return anythingRemoved -} - -// SharedUUIDs returns UUIDs (range-less) that are shared between the two sets -func (oracleGTIDSet *OracleGtidSet) SharedUUIDs(other *OracleGtidSet) (shared []string) { - thisUUIDs := map[string]bool{} - for _, entry := range oracleGTIDSet.GtidEntries { - thisUUIDs[entry.UUID] = true - } - for _, entry := range other.GtidEntries { - if thisUUIDs[entry.UUID] { - shared = append(shared, entry.UUID) - } - } - return shared -} - -// Explode returns a user-friendly string representation of this entry -func (oracleGTIDSet *OracleGtidSet) Explode() (result []*OracleGtidSetEntry) { - for _, entries := range oracleGTIDSet.GtidEntries { - result = append(result, entries.Explode()...) - } - return result -} - func (oracleGTIDSet *OracleGtidSet) String() string { var tokens []string for _, entry := range oracleGTIDSet.GtidEntries { diff --git a/go/vt/vtorc/inst/oracle_gtid_set_entry.go b/go/vt/vtorc/inst/oracle_gtid_set_entry.go index 3affd326735..704b38760ef 100644 --- a/go/vt/vtorc/inst/oracle_gtid_set_entry.go +++ b/go/vt/vtorc/inst/oracle_gtid_set_entry.go @@ -18,16 +18,9 @@ package inst import ( "fmt" - "regexp" - "strconv" "strings" ) -var ( - singleValueInterval = regexp.MustCompile("^([0-9]+)$") - multiValueInterval = regexp.MustCompile("^([0-9]+)[-]([0-9]+)$") -) - // OracleGtidSetEntry represents an entry in a set of GTID ranges, // for example, the entry: "316d193c-70e5-11e5-adb2-ecf4bb2262ff:1-8935:8984-6124596" (may include gaps) type OracleGtidSetEntry struct { @@ -56,20 +49,3 @@ func NewOracleGtidSetEntry(gtidRangeString string) (*OracleGtidSetEntry, error) func (oracleGTIDSetEntry *OracleGtidSetEntry) String() string { return fmt.Sprintf("%s:%s", oracleGTIDSetEntry.UUID, oracleGTIDSetEntry.Ranges) } - -// String returns a user-friendly string representation of this entry -func (oracleGTIDSetEntry *OracleGtidSetEntry) Explode() (result [](*OracleGtidSetEntry)) { - intervals := strings.Split(oracleGTIDSetEntry.Ranges, ":") - for _, interval := range intervals { - if submatch := multiValueInterval.FindStringSubmatch(interval); submatch != nil { - intervalStart, _ := strconv.Atoi(submatch[1]) - intervalEnd, _ := strconv.Atoi(submatch[2]) - for i := intervalStart; i <= intervalEnd; i++ { - result = append(result, &OracleGtidSetEntry{UUID: oracleGTIDSetEntry.UUID, Ranges: fmt.Sprintf("%d", i)}) - } - } else if submatch := singleValueInterval.FindStringSubmatch(interval); submatch != nil { - result = append(result, &OracleGtidSetEntry{UUID: oracleGTIDSetEntry.UUID, Ranges: interval}) - } - } - return result -} diff --git a/go/vt/vtorc/inst/oracle_gtid_set_test.go b/go/vt/vtorc/inst/oracle_gtid_set_test.go index b62f9475696..7e5b61fe448 100644 --- a/go/vt/vtorc/inst/oracle_gtid_set_test.go +++ b/go/vt/vtorc/inst/oracle_gtid_set_test.go @@ -28,54 +28,6 @@ func TestNewOracleGtidSetEntry(t *testing.T) { } } -func TestExplode(t *testing.T) { - { - uuidSet := "00020194-3333-3333-3333-333333333333:7" - entry, err := NewOracleGtidSetEntry(uuidSet) - require.NoError(t, err) - - exploded := entry.Explode() - require.Equal(t, len(exploded), 1) - require.Equal(t, exploded[0].String(), "00020194-3333-3333-3333-333333333333:7") - } - { - uuidSet := "00020194-3333-3333-3333-333333333333:1-3" - entry, err := NewOracleGtidSetEntry(uuidSet) - require.NoError(t, err) - - exploded := entry.Explode() - require.Equal(t, len(exploded), 3) - require.Equal(t, exploded[0].String(), "00020194-3333-3333-3333-333333333333:1") - require.Equal(t, exploded[1].String(), "00020194-3333-3333-3333-333333333333:2") - require.Equal(t, exploded[2].String(), "00020194-3333-3333-3333-333333333333:3") - } - { - uuidSet := "00020194-3333-3333-3333-333333333333:1-3:6-7" - entry, err := NewOracleGtidSetEntry(uuidSet) - require.NoError(t, err) - - exploded := entry.Explode() - require.Equal(t, len(exploded), 5) - require.Equal(t, exploded[0].String(), "00020194-3333-3333-3333-333333333333:1") - require.Equal(t, exploded[1].String(), "00020194-3333-3333-3333-333333333333:2") - require.Equal(t, exploded[2].String(), "00020194-3333-3333-3333-333333333333:3") - require.Equal(t, exploded[3].String(), "00020194-3333-3333-3333-333333333333:6") - require.Equal(t, exploded[4].String(), "00020194-3333-3333-3333-333333333333:7") - } - { - gtidSetVal := "00020192-1111-1111-1111-111111111111:29-30, 00020194-3333-3333-3333-333333333333:7-8" - gtidSet, err := NewOracleGtidSet(gtidSetVal) - require.NoError(t, err) - - exploded := gtidSet.Explode() - require.Equal(t, len(exploded), 4) - require.Equal(t, exploded[0].String(), "00020192-1111-1111-1111-111111111111:29") - require.Equal(t, exploded[1].String(), "00020192-1111-1111-1111-111111111111:30") - require.Equal(t, exploded[2].String(), "00020194-3333-3333-3333-333333333333:7") - require.Equal(t, exploded[3].String(), "00020194-3333-3333-3333-333333333333:8") - } -} - func TestNewOracleGtidSet(t *testing.T) { { gtidSetVal := "00020192-1111-1111-1111-111111111111:20-30, 00020194-3333-3333-3333-333333333333:7-8" @@ -135,93 +87,3 @@ func TestRemoveUUID(t *testing.T) { require.True(t, gtidSet.IsEmpty()) } } - -func TestRetainUUID(t *testing.T) { - gtidSetVal := "00020192-1111-1111-1111-111111111111:20-30, 00020194-3333-3333-3333-333333333333:7-8" - { - gtidSet, err := NewOracleGtidSet(gtidSetVal) - require.NoError(t, err) - - require.Equal(t, len(gtidSet.GtidEntries), 2) - removed := gtidSet.RetainUUID("00020194-3333-3333-3333-333333333333") - require.True(t, removed) - require.Equal(t, len(gtidSet.GtidEntries), 1) - require.Equal(t, gtidSet.GtidEntries[0].String(), "00020194-3333-3333-3333-333333333333:7-8") - - removed = gtidSet.RetainUUID("00020194-3333-3333-3333-333333333333") - require.False(t, removed) - require.Equal(t, len(gtidSet.GtidEntries), 1) - require.Equal(t, gtidSet.GtidEntries[0].String(), "00020194-3333-3333-3333-333333333333:7-8") - - removed = gtidSet.RetainUUID("230ea8ea-81e3-11e4-972a-e25ec4bd140a") - require.True(t, removed) - require.Equal(t, len(gtidSet.GtidEntries), 0) - } -} - -func TestRetainUUIDs(t *testing.T) { - gtidSetVal := "00020192-1111-1111-1111-111111111111:20-30, 00020194-3333-3333-3333-333333333333:7-8" - { - gtidSet, err := NewOracleGtidSet(gtidSetVal) - require.NoError(t, err) - - require.Equal(t, len(gtidSet.GtidEntries), 2) - removed := gtidSet.RetainUUIDs([]string{"00020194-3333-3333-3333-333333333333", "00020194-5555-5555-5555-333333333333"}) - require.True(t, removed) - require.Equal(t, len(gtidSet.GtidEntries), 1) - require.Equal(t, gtidSet.GtidEntries[0].String(), "00020194-3333-3333-3333-333333333333:7-8") - - removed = gtidSet.RetainUUIDs([]string{"00020194-3333-3333-3333-333333333333", "00020194-5555-5555-5555-333333333333"}) - require.False(t, removed) - require.Equal(t, len(gtidSet.GtidEntries), 1) - require.Equal(t, gtidSet.GtidEntries[0].String(), "00020194-3333-3333-3333-333333333333:7-8") - - removed = gtidSet.RetainUUIDs([]string{"230ea8ea-81e3-11e4-972a-e25ec4bd140a"}) - require.True(t, removed) - require.Equal(t, len(gtidSet.GtidEntries), 0) - } -} - -func TestSharedUUIDs(t *testing.T) { - gtidSetVal := "00020192-1111-1111-1111-111111111111:20-30, 00020194-3333-3333-3333-333333333333:7-8" - gtidSet, err := NewOracleGtidSet(gtidSetVal) - require.NoError(t, err) - { - otherSet, err := NewOracleGtidSet("00020194-3333-3333-3333-333333333333:7-8,230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-2") - require.NoError(t, err) - { - shared := gtidSet.SharedUUIDs(otherSet) - require.Equal(t, len(shared), 1) - require.Equal(t, shared[0], "00020194-3333-3333-3333-333333333333") - } - { - shared := otherSet.SharedUUIDs(gtidSet) - require.Equal(t, len(shared), 1) - require.Equal(t, shared[0], "00020194-3333-3333-3333-333333333333") - } - } - { - otherSet, err := NewOracleGtidSet("00020194-4444-4444-4444-333333333333:7-8,230ea8ea-81e3-11e4-972a-e25ec4bd140a:1-2") - require.NoError(t, err) - { - shared := gtidSet.SharedUUIDs(otherSet) - require.Equal(t, len(shared), 0) - } - { - shared := otherSet.SharedUUIDs(gtidSet) - require.Equal(t, len(shared), 0) - } - } - { - otherSet, err := NewOracleGtidSet("00020194-3333-3333-3333-333333333333:7-8,00020192-1111-1111-1111-111111111111:1-2") - require.NoError(t, err) - { - shared := gtidSet.SharedUUIDs(otherSet) - require.Equal(t, len(shared), 2) - } - { - shared := otherSet.SharedUUIDs(gtidSet) - require.Equal(t, len(shared), 2) - } - } -} diff --git a/go/vt/vtorc/inst/shard_dao_test.go b/go/vt/vtorc/inst/shard_dao_test.go index 3357bd2ee36..84f6aef7a4a 100644 --- a/go/vt/vtorc/inst/shard_dao_test.go +++ b/go/vt/vtorc/inst/shard_dao_test.go @@ -21,7 +21,6 @@ import ( "time" "github.com/stretchr/testify/require" - _ "modernc.org/sqlite" "vitess.io/vitess/go/protoutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" diff --git a/go/vt/vtorc/inst/tablet_dao.go b/go/vt/vtorc/inst/tablet_dao.go index 3ee49a75781..af304292a70 100644 --- a/go/vt/vtorc/inst/tablet_dao.go +++ b/go/vt/vtorc/inst/tablet_dao.go @@ -35,29 +35,20 @@ import ( // ErrTabletAliasNil is a fixed error message. var ErrTabletAliasNil = errors.New("tablet alias is nil") +var tmc tmclient.TabletManagerClient -// ResetReplicationParameters resets the replication parameters on the given tablet. -func ResetReplicationParameters(tabletAlias string) error { - tablet, err := ReadTablet(tabletAlias) - if err != nil { - return err - } - tmc := tmclient.NewTabletManagerClient() - tmcCtx, tmcCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) - defer tmcCancel() - if err := tmc.ResetReplicationParameters(tmcCtx, tablet); err != nil { - return err - } - return nil +// InitializeTMC initializes the tablet manager client to use for all VTOrc RPC calls. +func InitializeTMC() tmclient.TabletManagerClient { + tmc = tmclient.NewTabletManagerClient() + return tmc } -// FullStatus gets the full status of the MySQL running in vttablet. -func FullStatus(tabletAlias string) (*replicationdatapb.FullStatus, error) { +// fullStatus gets the full status of the MySQL running in vttablet. +func fullStatus(tabletAlias string) (*replicationdatapb.FullStatus, error) { tablet, err := ReadTablet(tabletAlias) if err != nil { return nil, err } - tmc := tmclient.NewTabletManagerClient() tmcCtx, tmcCancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer tmcCancel() return tmc.FullStatus(tmcCtx, tablet) diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery.go b/go/vt/vtorc/logic/keyspace_shard_discovery.go index c79ace5bdc3..b1e93fe2a01 100644 --- a/go/vt/vtorc/logic/keyspace_shard_discovery.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery.go @@ -124,7 +124,12 @@ func refreshKeyspaceHelper(ctx context.Context, keyspaceName string) error { // refreshAllShards refreshes all the shard records in the given keyspace. func refreshAllShards(ctx context.Context, keyspaceName string) error { - shardInfos, err := ts.FindAllShardsInKeyspace(ctx, keyspaceName) + shardInfos, err := ts.FindAllShardsInKeyspace(ctx, keyspaceName, &topo.FindAllShardsInKeyspaceOptions{ + // Fetch shard records concurrently to speed up discovery. A typical + // Vitess cluster will have 1-3 vtorc instances deployed, so there is + // little risk of a thundering herd. + Concurrency: 8, + }) if err != nil { log.Error(err) return err diff --git a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go index 2911b3d29c2..097865db84a 100644 --- a/go/vt/vtorc/logic/keyspace_shard_discovery_test.go +++ b/go/vt/vtorc/logic/keyspace_shard_discovery_test.go @@ -23,7 +23,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - _ "modernc.org/sqlite" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" diff --git a/go/vt/vtorc/logic/tablet_discovery.go b/go/vt/vtorc/logic/tablet_discovery.go index dd2e65237bf..593b846a72e 100644 --- a/go/vt/vtorc/logic/tablet_discovery.go +++ b/go/vt/vtorc/logic/tablet_discovery.go @@ -36,10 +36,10 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" - "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" + "vitess.io/vitess/go/vt/vtorc/process" "vitess.io/vitess/go/vt/vttablet/tmclient" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -64,16 +64,24 @@ func RegisterFlags(fs *pflag.FlagSet) { // OpenTabletDiscovery opens the vitess topo if enables and returns a ticker // channel for polling. func OpenTabletDiscovery() <-chan time.Time { - // TODO(sougou): If there's a shutdown signal, we have to close the topo. ts = topo.Open() - tmc = tmclient.NewTabletManagerClient() + tmc = inst.InitializeTMC() // Clear existing cache and perform a new refresh. if _, err := db.ExecVTOrc("delete from vitess_tablet"); err != nil { log.Error(err) } + // We refresh all information from the topo once before we start the ticks to do it on a timer. + populateAllInformation() return time.Tick(time.Second * time.Duration(config.Config.TopoInformationRefreshSeconds)) //nolint SA1015: using time.Tick leaks the underlying ticker } +// populateAllInformation initializes all the information for VTOrc to function. +func populateAllInformation() { + refreshAllInformation() + // We have completed one full discovery cycle. We should update the process health. + process.FirstDiscoveryCycleComplete.Store(true) +} + // refreshAllTablets reloads the tablets from topo and discovers the ones which haven't been refreshed in a while func refreshAllTablets() { refreshTabletsUsing(func(tabletAlias string) { @@ -82,9 +90,6 @@ func refreshAllTablets() { } func refreshTabletsUsing(loader func(tabletAlias string), forceRefresh bool) { - if !IsLeaderOrActive() { - return - } if len(clustersToWatch) == 0 { // all known clusters ctx, cancel := context.WithTimeout(context.Background(), topo.RemoteOperationTimeout) defer cancel() @@ -280,22 +285,37 @@ func LockShard(ctx context.Context, tabletAlias string, lockAction string) (cont // tabletUndoDemotePrimary calls the said RPC for the given tablet. func tabletUndoDemotePrimary(ctx context.Context, tablet *topodatapb.Tablet, semiSync bool) error { - return tmc.UndoDemotePrimary(ctx, tablet, semiSync) + tmcCtx, tmcCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer tmcCancel() + return tmc.UndoDemotePrimary(tmcCtx, tablet, semiSync) } // setReadOnly calls the said RPC for the given tablet func setReadOnly(ctx context.Context, tablet *topodatapb.Tablet) error { - return tmc.SetReadOnly(ctx, tablet) + tmcCtx, tmcCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer tmcCancel() + return tmc.SetReadOnly(tmcCtx, tablet) } // changeTabletType calls the said RPC for the given tablet with the given parameters. func changeTabletType(ctx context.Context, tablet *topodatapb.Tablet, tabletType topodatapb.TabletType, semiSync bool) error { - return tmc.ChangeType(ctx, tablet, tabletType, semiSync) + tmcCtx, tmcCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer tmcCancel() + return tmc.ChangeType(tmcCtx, tablet, tabletType, semiSync) +} + +// resetReplicationParameters resets the replication parameters on the given tablet. +func resetReplicationParameters(ctx context.Context, tablet *topodatapb.Tablet) error { + tmcCtx, tmcCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer tmcCancel() + return tmc.ResetReplicationParameters(tmcCtx, tablet) } // setReplicationSource calls the said RPC with the parameters provided -func setReplicationSource(ctx context.Context, replica *topodatapb.Tablet, primary *topodatapb.Tablet, semiSync bool) error { - return tmc.SetReplicationSource(ctx, replica, primary.Alias, 0, "", true, semiSync) +func setReplicationSource(ctx context.Context, replica *topodatapb.Tablet, primary *topodatapb.Tablet, semiSync bool, heartbeatInterval float64) error { + tmcCtx, tmcCancel := context.WithTimeout(ctx, topo.RemoteOperationTimeout) + defer tmcCancel() + return tmc.SetReplicationSource(tmcCtx, replica, primary.Alias, 0, "", true, semiSync, heartbeatInterval) } // shardPrimary finds the primary of the given keyspace-shard by reading the vtorc backend @@ -324,38 +344,3 @@ func shardPrimary(keyspace string, shard string) (primary *topodatapb.Tablet, er } return primary, err } - -// restartsReplication restarts the replication on the provided replicaKey. It also sets the correct semi-sync settings when it starts replication -func restartReplication(replicaAlias string) error { - replicaTablet, err := inst.ReadTablet(replicaAlias) - if err != nil { - log.Info("Could not read tablet - %+v", replicaAlias) - return err - } - - primaryTablet, err := shardPrimary(replicaTablet.Keyspace, replicaTablet.Shard) - if err != nil { - log.Info("Could not compute primary for %v/%v", replicaTablet.Keyspace, replicaTablet.Shard) - return err - } - - durabilityPolicy, err := inst.GetDurabilityPolicy(replicaTablet.Keyspace) - if err != nil { - log.Info("Could not read the durability policy for %v/%v", replicaTablet.Keyspace, replicaTablet.Shard) - return err - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Duration(config.Config.WaitReplicasTimeoutSeconds)*time.Second) - defer cancel() - err = tmc.StopReplication(ctx, replicaTablet) - if err != nil { - log.Info("Could not stop replication on %v", replicaAlias) - return err - } - err = tmc.StartReplication(ctx, replicaTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, replicaTablet)) - if err != nil { - log.Info("Could not start replication on %v", replicaAlias) - return err - } - return nil -} diff --git a/go/vt/vtorc/logic/tablet_discovery_test.go b/go/vt/vtorc/logic/tablet_discovery_test.go index 0e8ac72fabf..7acb29dcc5b 100644 --- a/go/vt/vtorc/logic/tablet_discovery_test.go +++ b/go/vt/vtorc/logic/tablet_discovery_test.go @@ -21,6 +21,7 @@ import ( "fmt" "sync/atomic" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" @@ -30,10 +31,13 @@ import ( "vitess.io/vitess/go/vt/external/golib/sqlutils" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/proto/vttime" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver/testutil" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" + "vitess.io/vitess/go/vt/vtorc/process" ) var ( @@ -342,3 +346,278 @@ func TestGetLockAction(t *testing.T) { }) } } + +// TestProcessHealth tests that the health of the process reflects that we have run the first discovery once correctly. +func TestProcessHealth(t *testing.T) { + require.False(t, process.FirstDiscoveryCycleComplete.Load()) + originalTs := ts + defer func() { + ts = originalTs + process.FirstDiscoveryCycleComplete.Store(false) + }() + // Verify in the beginning, we have the first DiscoveredOnce field false. + _, discoveredOnce := process.HealthTest() + require.False(t, discoveredOnce) + ts = memorytopo.NewServer(context.Background(), cell1) + populateAllInformation() + require.True(t, process.FirstDiscoveryCycleComplete.Load()) + // Verify after we populate all information, we have the first DiscoveredOnce field true. + _, discoveredOnce = process.HealthTest() + require.True(t, discoveredOnce) +} + +func TestSetReadOnly(t *testing.T) { + tests := []struct { + name string + tablet *topodatapb.Tablet + tmc *testutil.TabletManagerClient + remoteOpTimeout time.Duration + errShouldContain string + }{ + { + name: "Success", + tablet: tab100, + tmc: &testutil.TabletManagerClient{ + SetReadOnlyResults: map[string]error{ + "zone-1-0000000100": nil, + }, + }, + }, { + name: "Failure", + tablet: tab100, + tmc: &testutil.TabletManagerClient{ + SetReadOnlyResults: map[string]error{ + "zone-1-0000000100": fmt.Errorf("testing error"), + }, + }, + errShouldContain: "testing error", + }, { + name: "Timeout", + tablet: tab100, + remoteOpTimeout: 100 * time.Millisecond, + tmc: &testutil.TabletManagerClient{ + SetReadOnlyResults: map[string]error{ + "zone-1-0000000100": nil, + }, + SetReadOnlyDelays: map[string]time.Duration{ + "zone-1-0000000100": 200 * time.Millisecond, + }, + }, + errShouldContain: "context deadline exceeded", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + oldTmc := tmc + oldRemoteOpTimeout := topo.RemoteOperationTimeout + defer func() { + tmc = oldTmc + topo.RemoteOperationTimeout = oldRemoteOpTimeout + }() + + tmc = tt.tmc + if tt.remoteOpTimeout != 0 { + topo.RemoteOperationTimeout = tt.remoteOpTimeout + } + + err := setReadOnly(context.Background(), tt.tablet) + if tt.errShouldContain == "" { + require.NoError(t, err) + return + } + require.ErrorContains(t, err, tt.errShouldContain) + }) + } +} + +func TestTabletUndoDemotePrimary(t *testing.T) { + tests := []struct { + name string + tablet *topodatapb.Tablet + tmc *testutil.TabletManagerClient + remoteOpTimeout time.Duration + errShouldContain string + }{ + { + name: "Success", + tablet: tab100, + tmc: &testutil.TabletManagerClient{ + UndoDemotePrimaryResults: map[string]error{ + "zone-1-0000000100": nil, + }, + }, + }, { + name: "Failure", + tablet: tab100, + tmc: &testutil.TabletManagerClient{ + UndoDemotePrimaryResults: map[string]error{ + "zone-1-0000000100": fmt.Errorf("testing error"), + }, + }, + errShouldContain: "testing error", + }, { + name: "Timeout", + tablet: tab100, + remoteOpTimeout: 100 * time.Millisecond, + tmc: &testutil.TabletManagerClient{ + UndoDemotePrimaryResults: map[string]error{ + "zone-1-0000000100": nil, + }, + UndoDemotePrimaryDelays: map[string]time.Duration{ + "zone-1-0000000100": 200 * time.Millisecond, + }, + }, + errShouldContain: "context deadline exceeded", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + oldTmc := tmc + oldRemoteOpTimeout := topo.RemoteOperationTimeout + defer func() { + tmc = oldTmc + topo.RemoteOperationTimeout = oldRemoteOpTimeout + }() + + tmc = tt.tmc + if tt.remoteOpTimeout != 0 { + topo.RemoteOperationTimeout = tt.remoteOpTimeout + } + + err := tabletUndoDemotePrimary(context.Background(), tt.tablet, false) + if tt.errShouldContain == "" { + require.NoError(t, err) + return + } + require.ErrorContains(t, err, tt.errShouldContain) + }) + } +} + +func TestChangeTabletType(t *testing.T) { + tests := []struct { + name string + tablet *topodatapb.Tablet + tmc *testutil.TabletManagerClient + remoteOpTimeout time.Duration + errShouldContain string + }{ + { + name: "Success", + tablet: tab100, + tmc: &testutil.TabletManagerClient{ + ChangeTabletTypeResult: map[string]error{ + "zone-1-0000000100": nil, + }, + }, + }, { + name: "Failure", + tablet: tab100, + tmc: &testutil.TabletManagerClient{ + ChangeTabletTypeResult: map[string]error{ + "zone-1-0000000100": fmt.Errorf("testing error"), + }, + }, + errShouldContain: "testing error", + }, { + name: "Timeout", + tablet: tab100, + remoteOpTimeout: 100 * time.Millisecond, + tmc: &testutil.TabletManagerClient{ + ChangeTabletTypeResult: map[string]error{ + "zone-1-0000000100": nil, + }, + ChangeTabletTypeDelays: map[string]time.Duration{ + "zone-1-0000000100": 200 * time.Millisecond, + }, + }, + errShouldContain: "context deadline exceeded", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + oldTmc := tmc + oldRemoteOpTimeout := topo.RemoteOperationTimeout + defer func() { + tmc = oldTmc + topo.RemoteOperationTimeout = oldRemoteOpTimeout + }() + + tmc = tt.tmc + if tt.remoteOpTimeout != 0 { + topo.RemoteOperationTimeout = tt.remoteOpTimeout + } + + err := changeTabletType(context.Background(), tt.tablet, topodatapb.TabletType_REPLICA, false) + if tt.errShouldContain == "" { + require.NoError(t, err) + return + } + require.ErrorContains(t, err, tt.errShouldContain) + }) + } +} + +func TestSetReplicationSource(t *testing.T) { + tests := []struct { + name string + tablet *topodatapb.Tablet + tmc *testutil.TabletManagerClient + remoteOpTimeout time.Duration + errShouldContain string + }{ + { + name: "Success", + tablet: tab100, + tmc: &testutil.TabletManagerClient{ + SetReplicationSourceResults: map[string]error{ + "zone-1-0000000100": nil, + }, + }, + }, { + name: "Failure", + tablet: tab100, + tmc: &testutil.TabletManagerClient{ + SetReplicationSourceResults: map[string]error{ + "zone-1-0000000100": fmt.Errorf("testing error"), + }, + }, + errShouldContain: "testing error", + }, { + name: "Timeout", + tablet: tab100, + remoteOpTimeout: 100 * time.Millisecond, + tmc: &testutil.TabletManagerClient{ + SetReplicationSourceResults: map[string]error{ + "zone-1-0000000100": nil, + }, + SetReplicationSourceDelays: map[string]time.Duration{ + "zone-1-0000000100": 200 * time.Millisecond, + }, + }, + errShouldContain: "context deadline exceeded", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + oldTmc := tmc + oldRemoteOpTimeout := topo.RemoteOperationTimeout + defer func() { + tmc = oldTmc + topo.RemoteOperationTimeout = oldRemoteOpTimeout + }() + + tmc = tt.tmc + if tt.remoteOpTimeout != 0 { + topo.RemoteOperationTimeout = tt.remoteOpTimeout + } + + err := setReplicationSource(context.Background(), tt.tablet, tab101, false, 0) + if tt.errShouldContain == "" { + require.NoError(t, err) + return + } + require.ErrorContains(t, err, tt.errShouldContain) + }) + } +} diff --git a/go/vt/vtorc/logic/topology_recovery.go b/go/vt/vtorc/logic/topology_recovery.go index d3e73c00886..aec137a45b4 100644 --- a/go/vt/vtorc/logic/topology_recovery.go +++ b/go/vt/vtorc/logic/topology_recovery.go @@ -20,11 +20,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + "math/rand/v2" "time" - "github.com/patrickmn/go-cache" - "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -35,11 +33,8 @@ import ( "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/inst" "vitess.io/vitess/go/vt/vtorc/util" - "vitess.io/vitess/go/vt/vttablet/tmclient" ) -type RecoveryType string - const ( CheckAndRecoverGenericProblemRecoveryName string = "CheckAndRecoverGenericProblem" RecoverDeadPrimaryRecoveryName string = "RecoverDeadPrimary" @@ -105,30 +100,17 @@ const ( // TopologyRecovery represents an entry in the topology_recovery table type TopologyRecovery struct { ID int64 - UID string AnalysisEntry inst.ReplicationAnalysis - SuccessorHostname string - SuccessorPort int SuccessorAlias string - IsActive bool IsSuccessful bool AllErrors []string RecoveryStartTimestamp string RecoveryEndTimestamp string - ProcessingNodeHostname string - ProcessingNodeToken string - Acknowledged bool - AcknowledgedAt string - AcknowledgedBy string - AcknowledgedComment string - LastDetectionID int64 - RelatedRecoveryID int64 - Type RecoveryType + DetectionID int64 } func NewTopologyRecovery(replicationAnalysis inst.ReplicationAnalysis) *TopologyRecovery { topologyRecovery := &TopologyRecovery{} - topologyRecovery.UID = util.PrettyUniqueToken() topologyRecovery.AnalysisEntry = replicationAnalysis topologyRecovery.AllErrors = []string{} return topologyRecovery @@ -148,33 +130,25 @@ func (topologyRecovery *TopologyRecovery) AddErrors(errs []error) { } type TopologyRecoveryStep struct { - ID int64 - RecoveryUID string - AuditAt string - Message string + ID int64 + RecoveryID int64 + AuditAt string + Message string } -func NewTopologyRecoveryStep(uid string, message string) *TopologyRecoveryStep { +func NewTopologyRecoveryStep(id int64, message string) *TopologyRecoveryStep { return &TopologyRecoveryStep{ - RecoveryUID: uid, - Message: message, + RecoveryID: id, + Message: message, } } -var emergencyReadTopologyInstanceMap *cache.Cache -var emergencyRestartReplicaTopologyInstanceMap *cache.Cache -var emergencyOperationGracefulPeriodMap *cache.Cache - func init() { go initializeTopologyRecoveryPostConfiguration() } func initializeTopologyRecoveryPostConfiguration() { config.WaitForConfigurationToBeLoaded() - - emergencyReadTopologyInstanceMap = cache.New(time.Second, time.Millisecond*250) - emergencyRestartReplicaTopologyInstanceMap = cache.New(time.Second*30, time.Second) - emergencyOperationGracefulPeriodMap = cache.New(time.Second*5, time.Millisecond*500) } // AuditTopologyRecovery audits a single step in a topology recovery process. @@ -184,7 +158,7 @@ func AuditTopologyRecovery(topologyRecovery *TopologyRecovery, message string) e return nil } - recoveryStep := NewTopologyRecoveryStep(topologyRecovery.UID, message) + recoveryStep := NewTopologyRecoveryStep(topologyRecovery.ID, message) return writeTopologyRecoveryStep(recoveryStep) } @@ -198,7 +172,7 @@ func resolveRecovery(topologyRecovery *TopologyRecovery, successorInstance *inst // recoverPrimaryHasPrimary resets the replication on the primary instance func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry) if topologyRecovery == nil { _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimaryHasPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err @@ -210,28 +184,27 @@ func recoverPrimaryHasPrimary(ctx context.Context, analysisEntry *inst.Replicati _ = resolveRecovery(topologyRecovery, nil) }() - // Reset replication on current primary. - err = inst.ResetReplicationParameters(analysisEntry.AnalyzedInstanceAlias) + // Read the tablet information from the database to find the shard and keyspace of the tablet + analyzedTablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { - return false, topologyRecovery, err + return false, nil, err } - return true, topologyRecovery, nil + + // Reset replication on current primary. + err = resetReplicationParameters(ctx, analyzedTablet) + return true, topologyRecovery, err } // runEmergencyReparentOp runs a recovery for which we have to run ERS. Here waitForAllTablets is a boolean telling ERS whether it should wait for all the tablets // or is it okay to skip 1. func runEmergencyReparentOp(ctx context.Context, analysisEntry *inst.ReplicationAnalysis, recoveryName string, waitForAllTablets bool) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - if !analysisEntry.ClusterDetails.HasAutomatedPrimaryRecovery { - return false, nil, nil - } - // Read the tablet information from the database to find the shard and keyspace of the tablet tablet, err := inst.ReadTablet(analysisEntry.AnalyzedInstanceAlias) if err != nil { return false, nil, err } - topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, true, true) + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry) if topologyRecovery == nil { _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another %v.", analysisEntry.AnalyzedInstanceAlias, recoveryName)) return false, nil, err @@ -244,7 +217,7 @@ func runEmergencyReparentOp(ctx context.Context, analysisEntry *inst.Replication _ = resolveRecovery(topologyRecovery, promotedReplica) }() - ev, err := reparentutil.NewEmergencyReparenter(ts, tmclient.NewTabletManagerClient(), logutil.NewCallbackLogger(func(event *logutilpb.Event) { + ev, err := reparentutil.NewEmergencyReparenter(ts, tmc, logutil.NewCallbackLogger(func(event *logutilpb.Event) { level := event.GetLevel() value := event.GetValue() // we only log the warnings and errors explicitly, everything gets logged as an information message anyways in auditing topology recovery @@ -308,93 +281,6 @@ func checkAndRecoverGenericProblem(ctx context.Context, analysisEntry *inst.Repl return false, nil, nil } -// Force a re-read of a topology instance; this is done because we need to substantiate a suspicion -// that we may have a failover scenario. we want to speed up reading the complete picture. -func emergentlyReadTopologyInstance(tabletAlias string, analysisCode inst.AnalysisCode) (instance *inst.Instance) { - if existsInCacheError := emergencyReadTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { - // Just recently attempted - return nil - } - instance, _ = inst.ReadTopologyInstance(tabletAlias) - _ = inst.AuditOperation("emergently-read-topology-instance", tabletAlias, string(analysisCode)) - return instance -} - -// Force reading of replicas of given instance. This is because we suspect the instance is dead, and want to speed up -// detection of replication failure from its replicas. -func emergentlyReadTopologyInstanceReplicas(primaryHost string, primaryPort int, analysisCode inst.AnalysisCode) { - replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost, primaryPort) - if err != nil { - return - } - for _, replica := range replicas { - go emergentlyReadTopologyInstance(replica.InstanceAlias, analysisCode) - } -} - -// emergentlyRestartReplicationOnTopologyInstance forces a RestartReplication on a given instance. -func emergentlyRestartReplicationOnTopologyInstance(tabletAlias string, analysisCode inst.AnalysisCode) { - if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { - // Just recently attempted on this specific replica - return - } - go inst.ExecuteOnTopology(func() { - _ = restartReplication(tabletAlias) - _ = inst.AuditOperation("emergently-restart-replication-topology-instance", tabletAlias, string(analysisCode)) - }) -} - -func beginEmergencyOperationGracefulPeriod(tabletAlias string) { - emergencyOperationGracefulPeriodMap.Set(tabletAlias, true, cache.DefaultExpiration) -} - -func isInEmergencyOperationGracefulPeriod(tabletAlias string) bool { - _, found := emergencyOperationGracefulPeriodMap.Get(tabletAlias) - return found -} - -// emergentlyRestartReplicationOnTopologyInstanceReplicas forces a stop slave + start slave on -// replicas of a given instance, in an attempt to cause them to re-evaluate their replication state. -// This can be useful in scenarios where the primary has Too Many Connections, but long-time connected -// replicas are not seeing this; when they stop+start replication, they need to re-authenticate and -// that's where we hope they realize the primary is bad. -func emergentlyRestartReplicationOnTopologyInstanceReplicas(primaryHost string, primaryPort int, tabletAlias string, analysisCode inst.AnalysisCode) { - if existsInCacheError := emergencyRestartReplicaTopologyInstanceMap.Add(tabletAlias, true, cache.DefaultExpiration); existsInCacheError != nil { - // While each replica's RestartReplication() is throttled on its own, it's also wasteful to - // iterate all replicas all the time. This is the reason why we do grand-throttle check. - return - } - beginEmergencyOperationGracefulPeriod(tabletAlias) - - replicas, err := inst.ReadReplicaInstancesIncludingBinlogServerSubReplicas(primaryHost, primaryPort) - if err != nil { - return - } - for _, replica := range replicas { - go emergentlyRestartReplicationOnTopologyInstance(replica.InstanceAlias, analysisCode) - } -} - -func emergentlyRecordStaleBinlogCoordinates(tabletAlias string, binlogCoordinates *inst.BinlogCoordinates) { - err := inst.RecordStaleInstanceBinlogCoordinates(tabletAlias, binlogCoordinates) - if err != nil { - log.Error(err) - } -} - -// checkAndExecuteFailureDetectionProcesses tries to register for failure detection and potentially executes -// failure-detection processes. -func checkAndExecuteFailureDetectionProcesses(analysisEntry *inst.ReplicationAnalysis) (detectionRegistrationSuccess bool, processesExecutionAttempted bool, err error) { - if ok, _ := AttemptFailureDetectionRegistration(analysisEntry); !ok { - if util.ClearToLog("checkAndExecuteFailureDetectionProcesses", analysisEntry.AnalyzedInstanceAlias) { - log.Infof("checkAndExecuteFailureDetectionProcesses: could not register %+v detection on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) - } - return false, false, nil - } - log.Infof("topology_recovery: detected %+v failure on %+v", analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) - return true, false, nil -} - // getCheckAndRecoverFunctionCode gets the recovery function code to use for the given analysis. func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, tabletAlias string) recoveryFunction { switch analysisCode { @@ -405,9 +291,6 @@ func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, tabletAlias log.Infof("VTOrc not configured to run ERS, skipping recovering %v", analysisCode) return noRecoveryFunc } - if isInEmergencyOperationGracefulPeriod(tabletAlias) { - return recoverGenericProblemFunc - } return recoverDeadPrimaryFunc case inst.PrimaryTabletDeleted: // If ERS is disabled, we have no way of repairing the cluster. @@ -415,9 +298,6 @@ func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, tabletAlias log.Infof("VTOrc not configured to run ERS, skipping recovering %v", analysisCode) return noRecoveryFunc } - if isInEmergencyOperationGracefulPeriod(tabletAlias) { - return recoverGenericProblemFunc - } return recoverPrimaryTabletDeletedFunc case inst.ErrantGTIDDetected: if !config.ConvertTabletWithErrantGTIDs() { @@ -428,9 +308,6 @@ func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, tabletAlias case inst.PrimaryHasPrimary: return recoverPrimaryHasPrimaryFunc case inst.LockedSemiSyncPrimary: - if isInEmergencyOperationGracefulPeriod(tabletAlias) { - return recoverGenericProblemFunc - } return recoverLockedSemiSyncPrimaryFunc case inst.ClusterHasNoPrimary: return electNewPrimaryFunc @@ -438,7 +315,7 @@ func getCheckAndRecoverFunctionCode(analysisCode inst.AnalysisCode, tabletAlias return fixPrimaryFunc // replica case inst.NotConnectedToPrimary, inst.ConnectedToWrongPrimary, inst.ReplicationStopped, inst.ReplicaIsWritable, - inst.ReplicaSemiSyncMustBeSet, inst.ReplicaSemiSyncMustNotBeSet: + inst.ReplicaSemiSyncMustBeSet, inst.ReplicaSemiSyncMustNotBeSet, inst.ReplicaMisconfigured: return fixReplicaFunc // primary, non actionable case inst.DeadPrimaryAndReplicas: @@ -564,27 +441,8 @@ func analysisEntriesHaveSameRecovery(prevAnalysis, newAnalysis *inst.Replication return prevRecoveryFunctionCode == newRecoveryFunctionCode } -func runEmergentOperations(analysisEntry *inst.ReplicationAnalysis) { - switch analysisEntry.Analysis { - case inst.DeadPrimaryAndReplicas: - go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstancePrimaryAlias, analysisEntry.Analysis) - case inst.UnreachablePrimary: - go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) - go emergentlyReadTopologyInstanceReplicas(analysisEntry.AnalyzedInstanceHostname, analysisEntry.AnalyzedInstancePort, analysisEntry.Analysis) - case inst.UnreachablePrimaryWithLaggingReplicas: - go emergentlyRestartReplicationOnTopologyInstanceReplicas(analysisEntry.AnalyzedInstanceHostname, analysisEntry.AnalyzedInstancePort, analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) - case inst.LockedSemiSyncPrimaryHypothesis: - go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) - go emergentlyRecordStaleBinlogCoordinates(analysisEntry.AnalyzedInstanceAlias, &analysisEntry.AnalyzedInstanceBinlogCoordinates) - case inst.AllPrimaryReplicasNotReplicating: - go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) - case inst.AllPrimaryReplicasNotReplicatingOrDead: - go emergentlyReadTopologyInstance(analysisEntry.AnalyzedInstanceAlias, analysisEntry.Analysis) - } -} - // executeCheckAndRecoverFunction will choose the correct check & recovery function based on analysis. -// It executes the function synchronuously +// It executes the function synchronously func executeCheckAndRecoverFunction(analysisEntry *inst.ReplicationAnalysis) (err error) { countPendingRecoveries.Add(1) defer countPendingRecoveries.Add(-1) @@ -592,7 +450,6 @@ func executeCheckAndRecoverFunction(analysisEntry *inst.ReplicationAnalysis) (er checkAndRecoverFunctionCode := getCheckAndRecoverFunctionCode(analysisEntry.Analysis, analysisEntry.AnalyzedInstanceAlias) isActionableRecovery := hasActionableRecovery(checkAndRecoverFunctionCode) analysisEntry.IsActionableRecovery = isActionableRecovery - runEmergentOperations(analysisEntry) if checkAndRecoverFunctionCode == noRecoveryFunc { // Unhandled problem type @@ -611,17 +468,12 @@ func executeCheckAndRecoverFunction(analysisEntry *inst.ReplicationAnalysis) (er } // At this point we have validated there's a failure scenario for which we have a recovery path. - - // Initiate detection: - _, _, err = checkAndExecuteFailureDetectionProcesses(analysisEntry) + // Record the failure detected in the logs. + err = InsertRecoveryDetection(analysisEntry) if err != nil { - log.Errorf("executeCheckAndRecoverFunction: error on failure detection: %+v", err) + log.Errorf("executeCheckAndRecoverFunction: error on inserting recovery detection record: %+v", err) return err } - // We don't mind whether detection really executed the processes or not - // (it may have been silenced due to previous detection). We only care there's no error. - - // We're about to embark on recovery shortly... // Check for recovery being disabled globally if recoveryDisabledGlobally, err := IsRecoveryDisabled(); err != nil { @@ -816,7 +668,7 @@ func postPrsCompletion(topologyRecovery *TopologyRecovery, analysisEntry *inst.R // electNewPrimary elects a new primary while none were present before. func electNewPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false /*failIfFailedInstanceInActiveRecovery*/, true /*failIfClusterInActiveRecovery*/) + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry) if topologyRecovery == nil || err != nil { _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another electNewPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err @@ -836,7 +688,7 @@ func electNewPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysi } _ = AuditTopologyRecovery(topologyRecovery, "starting PlannedReparentShard for electing new primary.") - ev, err := reparentutil.NewPlannedReparenter(ts, tmclient.NewTabletManagerClient(), logutil.NewCallbackLogger(func(event *logutilpb.Event) { + ev, err := reparentutil.NewPlannedReparenter(ts, tmc, logutil.NewCallbackLogger(func(event *logutilpb.Event) { level := event.GetLevel() value := event.GetValue() // we only log the warnings and errors explicitly, everything gets logged as an information message anyways in auditing topology recovery @@ -852,6 +704,7 @@ func electNewPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysi analyzedTablet.Shard, reparentutil.PlannedReparentOptions{ WaitReplicasTimeout: time.Duration(config.Config.WaitReplicasTimeoutSeconds) * time.Second, + TolerableReplLag: time.Duration(config.Config.TolerableReplicationLagSeconds) * time.Second, }, ) @@ -864,7 +717,7 @@ func electNewPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysi // fixPrimary sets the primary as read-write. func fixPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry) if topologyRecovery == nil { _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixPrimary.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err @@ -895,7 +748,7 @@ func fixPrimary(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (r // fixReplica sets the replica as read-only and points it at the current primary. func fixReplica(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry) if topologyRecovery == nil { _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another fixReplica.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err @@ -930,13 +783,13 @@ func fixReplica(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (r return true, topologyRecovery, err } - err = setReplicationSource(ctx, analyzedTablet, primaryTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet)) + err = setReplicationSource(ctx, analyzedTablet, primaryTablet, reparentutil.IsReplicaSemiSync(durabilityPolicy, primaryTablet, analyzedTablet), float64(analysisEntry.ReplicaNetTimeout)/2) return true, topologyRecovery, err } // recoverErrantGTIDDetected changes the tablet type of a replica tablet that has errant GTIDs. func recoverErrantGTIDDetected(ctx context.Context, analysisEntry *inst.ReplicationAnalysis) (recoveryAttempted bool, topologyRecovery *TopologyRecovery, err error) { - topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry, false, true) + topologyRecovery, err = AttemptRecoveryRegistration(analysisEntry) if topologyRecovery == nil { _ = AuditTopologyRecovery(topologyRecovery, fmt.Sprintf("found an active or recent recovery on %+v. Will not issue another recoverErrantGTIDDetected.", analysisEntry.AnalyzedInstanceAlias)) return false, nil, err diff --git a/go/vt/vtorc/logic/topology_recovery_dao.go b/go/vt/vtorc/logic/topology_recovery_dao.go index c835b9ecfe4..e8af34bdad4 100644 --- a/go/vt/vtorc/logic/topology_recovery_dao.go +++ b/go/vt/vtorc/logic/topology_recovery_dao.go @@ -25,87 +25,41 @@ import ( "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" - "vitess.io/vitess/go/vt/vtorc/process" - "vitess.io/vitess/go/vt/vtorc/util" ) -// AttemptFailureDetectionRegistration tries to add a failure-detection entry; if this fails that means the problem has already been detected -func AttemptFailureDetectionRegistration(analysisEntry *inst.ReplicationAnalysis) (registrationSuccessful bool, err error) { - args := sqlutils.Args( - analysisEntry.AnalyzedInstanceAlias, - process.ThisHostname, - util.ProcessToken.Hash, - string(analysisEntry.Analysis), - analysisEntry.ClusterDetails.Keyspace, - analysisEntry.ClusterDetails.Shard, - analysisEntry.CountReplicas, - analysisEntry.IsActionableRecovery, - ) - startActivePeriodHint := "now()" - if analysisEntry.StartActivePeriod != "" { - startActivePeriodHint = "?" - args = append(args, analysisEntry.StartActivePeriod) - } - - query := fmt.Sprintf(` +// InsertRecoveryDetection inserts the recovery analysis that has been detected. +func InsertRecoveryDetection(analysisEntry *inst.ReplicationAnalysis) error { + sqlResult, err := db.ExecVTOrc(` insert ignore - into topology_failure_detection ( + into recovery_detection ( alias, - in_active_period, - end_active_period_unixtime, - processing_node_hostname, - processcing_node_token, analysis, keyspace, shard, - count_affected_replicas, - is_actionable, - start_active_period + detection_timestamp ) values ( - ?, - 1, - 0, - ?, - ?, ?, ?, ?, ?, - ?, - %s - ) - `, startActivePeriodHint) - - sqlResult, err := db.ExecVTOrc(query, args...) - if err != nil { - log.Error(err) - return false, err - } - rows, err := sqlResult.RowsAffected() + now() + )`, + analysisEntry.AnalyzedInstanceAlias, + string(analysisEntry.Analysis), + analysisEntry.ClusterDetails.Keyspace, + analysisEntry.ClusterDetails.Shard, + ) if err != nil { log.Error(err) - return false, err + return err } - return (rows > 0), nil -} - -// ClearActiveFailureDetections clears the "in_active_period" flag for old-enough detections, thereby allowing for -// further detections on cleared instances. -func ClearActiveFailureDetections() error { - _, err := db.ExecVTOrc(` - update topology_failure_detection set - in_active_period = 0, - end_active_period_unixtime = UNIX_TIMESTAMP() - where - in_active_period = 1 - AND start_active_period < NOW() - INTERVAL ? MINUTE - `, - config.FailureDetectionPeriodBlockMinutes, - ) + id, err := sqlResult.LastInsertId() if err != nil { log.Error(err) + return err } - return err + analysisEntry.RecoveryId = id + return nil } func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecovery, error) { @@ -114,43 +68,29 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover insert ignore into topology_recovery ( recovery_id, - uid, alias, - in_active_period, - start_active_period, - end_active_period_unixtime, - processing_node_hostname, - processcing_node_token, + start_recovery, analysis, keyspace, shard, - count_affected_replicas, - last_detection_id + detection_id ) values ( ?, ?, - ?, - 1, NOW(), - 0, - ?, - ?, ?, ?, ?, - ?, - (select ifnull(max(detection_id), 0) from topology_failure_detection where alias = ?) + ? ) `, sqlutils.NilIfZero(topologyRecovery.ID), - topologyRecovery.UID, analysisEntry.AnalyzedInstanceAlias, - process.ThisHostname, util.ProcessToken.Hash, string(analysisEntry.Analysis), analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, - analysisEntry.CountReplicas, analysisEntry.AnalyzedInstanceAlias, + analysisEntry.RecoveryId, ) if err != nil { return nil, err @@ -171,224 +111,27 @@ func writeTopologyRecovery(topologyRecovery *TopologyRecovery) (*TopologyRecover } // AttemptRecoveryRegistration tries to add a recovery entry; if this fails that means recovery is already in place. -func AttemptRecoveryRegistration(analysisEntry *inst.ReplicationAnalysis, failIfFailedInstanceInActiveRecovery bool, failIfClusterInActiveRecovery bool) (*TopologyRecovery, error) { - if failIfFailedInstanceInActiveRecovery { - // Let's check if this instance has just been promoted recently and is still in active period. - // If so, we reject recovery registration to avoid flapping. - recoveries, err := ReadInActivePeriodSuccessorInstanceRecovery(analysisEntry.AnalyzedInstanceAlias) - if err != nil { - log.Error(err) - return nil, err - } - if len(recoveries) > 0 { - _ = RegisterBlockedRecoveries(analysisEntry, recoveries) - errMsg := fmt.Sprintf("AttemptRecoveryRegistration: tablet %+v has recently been promoted (by failover of %+v) and is in active period. It will not be failed over. You may acknowledge the failure on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias) - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - } - if failIfClusterInActiveRecovery { - // Let's check if this cluster has just experienced a failover of the same analysis and is still in active period. - // If so, we reject recovery registration to avoid flapping. - recoveries, err := ReadInActivePeriodClusterRecovery(analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, string(analysisEntry.Analysis)) - if err != nil { - log.Error(err) - return nil, err - } - if len(recoveries) > 0 { - _ = RegisterBlockedRecoveries(analysisEntry, recoveries) - errMsg := fmt.Sprintf("AttemptRecoveryRegistration: keyspace %+v shard %+v has recently experienced a failover (of %+v) and is in active period. It will not be failed over again. You may acknowledge the failure on this cluster (-c ack-cluster-recoveries) or on %+v (-c ack-instance-recoveries) to remove this blockage", analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias, recoveries[0].AnalysisEntry.AnalyzedInstanceAlias) - log.Errorf(errMsg) - return nil, fmt.Errorf(errMsg) - } - } - if !failIfFailedInstanceInActiveRecovery { - // Implicitly acknowledge this instance's possibly existing active recovery, provided they are completed. - _, _ = AcknowledgeInstanceCompletedRecoveries(analysisEntry.AnalyzedInstanceAlias, "vtorc", fmt.Sprintf("implicit acknowledge due to user invocation of recovery on same instance: %+v", analysisEntry.AnalyzedInstanceAlias)) - // The fact we only acknowledge a completed recovery solves the possible case of two DBAs simultaneously - // trying to recover the same instance at the same time - } - - topologyRecovery := NewTopologyRecovery(*analysisEntry) - - topologyRecovery, err := writeTopologyRecovery(topologyRecovery) +func AttemptRecoveryRegistration(analysisEntry *inst.ReplicationAnalysis) (*TopologyRecovery, error) { + // Check if there is an active recovery in progress for the cluster of the given instance. + recoveries, err := ReadActiveClusterRecoveries(analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard) if err != nil { log.Error(err) return nil, err } - return topologyRecovery, nil -} - -// ClearActiveRecoveries clears the "in_active_period" flag for old-enough recoveries, thereby allowing for -// further recoveries on cleared instances. -func ClearActiveRecoveries() error { - _, err := db.ExecVTOrc(` - update topology_recovery set - in_active_period = 0, - end_active_period_unixtime = UNIX_TIMESTAMP() - where - in_active_period = 1 - AND start_active_period < NOW() - INTERVAL ? SECOND - `, - config.Config.RecoveryPeriodBlockSeconds, - ) - if err != nil { - log.Error(err) + if len(recoveries) > 0 { + errMsg := fmt.Sprintf("AttemptRecoveryRegistration: Active recovery (id:%v) in the cluster %s:%s for %s", recoveries[0].ID, analysisEntry.ClusterDetails.Keyspace, analysisEntry.ClusterDetails.Shard, recoveries[0].AnalysisEntry.Analysis) + log.Errorf(errMsg) + return nil, fmt.Errorf(errMsg) } - return err -} -// RegisterBlockedRecoveries writes down currently blocked recoveries, and indicates what recovery they are blocked on. -// Recoveries are blocked thru the in_active_period flag, which comes to avoid flapping. -func RegisterBlockedRecoveries(analysisEntry *inst.ReplicationAnalysis, blockingRecoveries []*TopologyRecovery) error { - for _, recovery := range blockingRecoveries { - _, err := db.ExecVTOrc(` - insert - into blocked_topology_recovery ( - alias, - keyspace, - shard, - analysis, - last_blocked_timestamp, - blocking_recovery_id - ) values ( - ?, - ?, - ?, - ?, - NOW(), - ? - ) - on duplicate key update - keyspace=values(keyspace), - shard=values(shard), - analysis=values(analysis), - last_blocked_timestamp=values(last_blocked_timestamp), - blocking_recovery_id=values(blocking_recovery_id) - `, analysisEntry.AnalyzedInstanceAlias, - analysisEntry.ClusterDetails.Keyspace, - analysisEntry.ClusterDetails.Shard, - string(analysisEntry.Analysis), - recovery.ID, - ) - if err != nil { - log.Error(err) - } - } - return nil -} - -// ExpireBlockedRecoveries clears listing of blocked recoveries that are no longer actually blocked. -func ExpireBlockedRecoveries() error { - // Older recovery is acknowledged by now, hence blocked recovery should be released. - // Do NOTE that the data in blocked_topology_recovery is only used for auditing: it is NOT the data - // based on which we make automated decisions. - - query := ` - select - blocked_topology_recovery.alias - from - blocked_topology_recovery - left join topology_recovery on (blocking_recovery_id = topology_recovery.recovery_id and acknowledged = 0) - where - acknowledged is null - ` - var expiredAliases []string - err := db.QueryVTOrc(query, sqlutils.Args(), func(m sqlutils.RowMap) error { - expiredAliases = append(expiredAliases, m.GetString("alias")) - return nil - }) - - for _, expiredAlias := range expiredAliases { - _, err := db.ExecVTOrc(` - delete - from blocked_topology_recovery - where - alias = ? - `, - expiredAlias, - ) - if err != nil { - log.Error(err) - return err - } - } - - if err != nil { - log.Error(err) - return err - } - // Some oversampling, if a problem has not been noticed for some time (e.g. the server came up alive - // before action was taken), expire it. - // Recall that RegisterBlockedRecoveries continuously updates the last_blocked_timestamp column. - _, err = db.ExecVTOrc(` - delete - from blocked_topology_recovery - where - last_blocked_timestamp < NOW() - interval ? second - `, config.Config.RecoveryPollSeconds*2, - ) - if err != nil { - log.Error(err) - } - return err -} + topologyRecovery := NewTopologyRecovery(*analysisEntry) -// acknowledgeRecoveries sets acknowledged* details and clears the in_active_period flags from a set of entries -func acknowledgeRecoveries(owner string, comment string, markEndRecovery bool, whereClause string, args []any) (countAcknowledgedEntries int64, err error) { - additionalSet := `` - if markEndRecovery { - additionalSet = ` - end_recovery=IFNULL(end_recovery, NOW()), - ` - } - query := fmt.Sprintf(` - update topology_recovery set - in_active_period = 0, - end_active_period_unixtime = case when end_active_period_unixtime = 0 then UNIX_TIMESTAMP() else end_active_period_unixtime end, - %s - acknowledged = 1, - acknowledged_at = NOW(), - acknowledged_by = ?, - acknowledge_comment = ? - where - acknowledged = 0 - and - %s - `, additionalSet, whereClause) - args = append(sqlutils.Args(owner, comment), args...) - sqlResult, err := db.ExecVTOrc(query, args...) - if err != nil { - log.Error(err) - return 0, err - } - rows, err := sqlResult.RowsAffected() + topologyRecovery, err = writeTopologyRecovery(topologyRecovery) if err != nil { log.Error(err) + return nil, err } - return rows, err -} - -// AcknowledgeInstanceCompletedRecoveries marks active and COMPLETED recoveries for given instane as acknowledged. -// This also implied clearing their active period, which in turn enables further recoveries on those topologies -func AcknowledgeInstanceCompletedRecoveries(tabletAlias string, owner string, comment string) (countAcknowledgedEntries int64, err error) { - whereClause := ` - alias = ? - and end_recovery is not null - ` - return acknowledgeRecoveries(owner, comment, false, whereClause, sqlutils.Args(tabletAlias)) -} - -// AcknowledgeCrashedRecoveries marks recoveries whose processing nodes has crashed as acknowledged. -func AcknowledgeCrashedRecoveries() (countAcknowledgedEntries int64, err error) { - whereClause := ` - in_active_period = 1 - and end_recovery is null - and concat(processing_node_hostname, ':', processcing_node_token) not in ( - select concat(hostname, ':', token) from node_health - ) - ` - return acknowledgeRecoveries("vtorc", "detected crashed recovery", true, whereClause, sqlutils.Args()) + return topologyRecovery, nil } // ResolveRecovery is called on completion of a recovery process and updates the recovery status. @@ -401,11 +144,11 @@ func writeResolveRecovery(topologyRecovery *TopologyRecovery) error { all_errors = ?, end_recovery = NOW() where - uid = ? + recovery_id = ? `, topologyRecovery.IsSuccessful, topologyRecovery.SuccessorAlias, strings.Join(topologyRecovery.AllErrors, "\n"), - topologyRecovery.UID, + topologyRecovery.ID, ) if err != nil { log.Error(err) @@ -419,26 +162,16 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog query := fmt.Sprintf(` select recovery_id, - uid, alias, - (IFNULL(end_active_period_unixtime, 0) = 0) as is_active, - start_active_period, - IFNULL(end_active_period_unixtime, 0) as end_active_period_unixtime, + start_recovery, IFNULL(end_recovery, '') AS end_recovery, is_successful, - processing_node_hostname, - processcing_node_token, ifnull(successor_alias, '') as successor_alias, analysis, keyspace, shard, - count_affected_replicas, all_errors, - acknowledged, - acknowledged_at, - acknowledged_by, - acknowledge_comment, - last_detection_id + detection_id from topology_recovery %s @@ -449,33 +182,21 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog err := db.QueryVTOrc(query, args, func(m sqlutils.RowMap) error { topologyRecovery := *NewTopologyRecovery(inst.ReplicationAnalysis{}) topologyRecovery.ID = m.GetInt64("recovery_id") - topologyRecovery.UID = m.GetString("uid") - topologyRecovery.IsActive = m.GetBool("is_active") - topologyRecovery.RecoveryStartTimestamp = m.GetString("start_active_period") + topologyRecovery.RecoveryStartTimestamp = m.GetString("start_recovery") topologyRecovery.RecoveryEndTimestamp = m.GetString("end_recovery") topologyRecovery.IsSuccessful = m.GetBool("is_successful") - topologyRecovery.ProcessingNodeHostname = m.GetString("processing_node_hostname") - topologyRecovery.ProcessingNodeToken = m.GetString("processcing_node_token") topologyRecovery.AnalysisEntry.AnalyzedInstanceAlias = m.GetString("alias") topologyRecovery.AnalysisEntry.Analysis = inst.AnalysisCode(m.GetString("analysis")) topologyRecovery.AnalysisEntry.ClusterDetails.Keyspace = m.GetString("keyspace") topologyRecovery.AnalysisEntry.ClusterDetails.Shard = m.GetString("shard") - topologyRecovery.AnalysisEntry.CountReplicas = m.GetUint("count_affected_replicas") topologyRecovery.SuccessorAlias = m.GetString("successor_alias") - topologyRecovery.AnalysisEntry.ClusterDetails.ReadRecoveryInfo() - topologyRecovery.AllErrors = strings.Split(m.GetString("all_errors"), "\n") - topologyRecovery.Acknowledged = m.GetBool("acknowledged") - topologyRecovery.AcknowledgedAt = m.GetString("acknowledged_at") - topologyRecovery.AcknowledgedBy = m.GetString("acknowledged_by") - topologyRecovery.AcknowledgedComment = m.GetString("acknowledge_comment") - - topologyRecovery.LastDetectionID = m.GetInt64("last_detection_id") + topologyRecovery.DetectionID = m.GetInt64("detection_id") res = append(res, &topologyRecovery) return nil @@ -487,37 +208,21 @@ func readRecoveries(whereCondition string, limit string, args []any) ([]*Topolog return res, err } -// ReadInActivePeriodClusterRecovery reads recoveries (possibly complete!) that are in active period for the analysis. -// (may be used to block further recoveries of the same analysis on this cluster) -func ReadInActivePeriodClusterRecovery(keyspace string, shard, analysis string) ([]*TopologyRecovery, error) { +// ReadActiveClusterRecoveries reads recoveries that are ongoing for the given cluster. +func ReadActiveClusterRecoveries(keyspace string, shard string) ([]*TopologyRecovery, error) { whereClause := ` where - in_active_period=1 + end_recovery IS NULL and keyspace=? - and shard=? - and analysis=?` - return readRecoveries(whereClause, ``, sqlutils.Args(keyspace, shard, analysis)) -} - -// ReadInActivePeriodSuccessorInstanceRecovery reads completed recoveries for a given instance, where said instance -// was promoted as result, still in active period (may be used to block further recoveries should this instance die) -func ReadInActivePeriodSuccessorInstanceRecovery(tabletAlias string) ([]*TopologyRecovery, error) { - whereClause := ` - where - in_active_period=1 - and - successor_alias=?` - return readRecoveries(whereClause, ``, sqlutils.Args(tabletAlias)) + and shard=?` + return readRecoveries(whereClause, ``, sqlutils.Args(keyspace, shard)) } // ReadRecentRecoveries reads latest recovery entries from topology_recovery -func ReadRecentRecoveries(unacknowledgedOnly bool, page int) ([]*TopologyRecovery, error) { +func ReadRecentRecoveries(page int) ([]*TopologyRecovery, error) { whereConditions := []string{} whereClause := "" var args []any - if unacknowledgedOnly { - whereConditions = append(whereConditions, `acknowledged=0`) - } if len(whereConditions) > 0 { whereClause = fmt.Sprintf("where %s", strings.Join(whereConditions, " and ")) } @@ -533,9 +238,9 @@ func writeTopologyRecoveryStep(topologyRecoveryStep *TopologyRecoveryStep) error sqlResult, err := db.ExecVTOrc(` insert ignore into topology_recovery_steps ( - recovery_step_id, recovery_uid, audit_at, message + recovery_step_id, recovery_id, audit_at, message ) values (?, ?, now(), ?) - `, sqlutils.NilIfZero(topologyRecoveryStep.ID), topologyRecoveryStep.RecoveryUID, topologyRecoveryStep.Message, + `, sqlutils.NilIfZero(topologyRecoveryStep.ID), topologyRecoveryStep.RecoveryID, topologyRecoveryStep.Message, ) if err != nil { log.Error(err) @@ -548,17 +253,17 @@ func writeTopologyRecoveryStep(topologyRecoveryStep *TopologyRecoveryStep) error return err } -// ExpireFailureDetectionHistory removes old rows from the topology_failure_detection table -func ExpireFailureDetectionHistory() error { - return inst.ExpireTableData("topology_failure_detection", "start_active_period") +// ExpireRecoveryDetectionHistory removes old rows from the recovery_detection table +func ExpireRecoveryDetectionHistory() error { + return inst.ExpireTableData("recovery_detection", "detection_timestamp") } -// ExpireTopologyRecoveryHistory removes old rows from the topology_failure_detection table +// ExpireTopologyRecoveryHistory removes old rows from the topology_recovery table func ExpireTopologyRecoveryHistory() error { - return inst.ExpireTableData("topology_recovery", "start_active_period") + return inst.ExpireTableData("topology_recovery", "start_recovery") } -// ExpireTopologyRecoveryStepsHistory removes old rows from the topology_failure_detection table +// ExpireTopologyRecoveryStepsHistory removes old rows from the topology_recovery_steps table func ExpireTopologyRecoveryStepsHistory() error { return inst.ExpireTableData("topology_recovery_steps", "audit_at") } diff --git a/go/vt/vtorc/logic/topology_recovery_dao_test.go b/go/vt/vtorc/logic/topology_recovery_dao_test.go index f9a9026a4a1..354af82e2b3 100644 --- a/go/vt/vtorc/logic/topology_recovery_dao_test.go +++ b/go/vt/vtorc/logic/topology_recovery_dao_test.go @@ -17,11 +17,13 @@ limitations under the License. package logic import ( + "strconv" "testing" "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/external/golib/sqlutils" + "vitess.io/vitess/go/vt/vtorc/config" "vitess.io/vitess/go/vt/vtorc/db" "vitess.io/vitess/go/vt/vtorc/inst" ) @@ -59,7 +61,7 @@ func TestTopologyRecovery(t *testing.T) { }) t.Run("read recoveries", func(t *testing.T) { - recoveries, err := ReadRecentRecoveries(false, 0) + recoveries, err := ReadRecentRecoveries(0) require.NoError(t, err) require.Len(t, recoveries, 1) // Assert that the ID field matches the one that we just wrote @@ -67,35 +69,102 @@ func TestTopologyRecovery(t *testing.T) { }) } -// TestBlockedRecoveryInsertion tests that we are able to insert into the blocked_recovery table. -func TestBlockedRecoveryInsertion(t *testing.T) { - orcDb, err := db.OpenVTOrc() - require.NoError(t, err) +func TestExpireTableData(t *testing.T) { + oldVal := config.Config.AuditPurgeDays + config.Config.AuditPurgeDays = 10 defer func() { - _, err = orcDb.Exec("delete from blocked_topology_recovery") - require.NoError(t, err) + config.Config.AuditPurgeDays = oldVal }() - analysisEntry := &inst.ReplicationAnalysis{ - AnalyzedInstanceAlias: "zone1-0000000100", - ClusterDetails: inst.ClusterInfo{ - Keyspace: "ks", - Shard: "0", + tests := []struct { + name string + tableName string + insertQuery string + expectedRowCount int + expireFunc func() error + }{ + { + name: "ExpireRecoveryDetectionHistory", + tableName: "recovery_detection", + expectedRowCount: 2, + insertQuery: `insert into recovery_detection (detection_id, detection_timestamp, alias, analysis, keyspace, shard) values +(1, NOW() - INTERVAL 3 DAY,'a','a','a','a'), +(2, NOW() - INTERVAL 5 DAY,'a','a','a','a'), +(3, NOW() - INTERVAL 15 DAY,'a','a','a','a')`, + expireFunc: ExpireRecoveryDetectionHistory, + }, + { + name: "ExpireTopologyRecoveryHistory", + tableName: "topology_recovery", + expectedRowCount: 1, + insertQuery: `insert into topology_recovery (recovery_id, start_recovery, alias, analysis, keyspace, shard) values +(1, NOW() - INTERVAL 13 DAY,'a','a','a','a'), +(2, NOW() - INTERVAL 5 DAY,'a','a','a','a'), +(3, NOW() - INTERVAL 15 DAY,'a','a','a','a')`, + expireFunc: ExpireTopologyRecoveryHistory, }, - Analysis: inst.DeadPrimaryAndSomeReplicas, + { + name: "ExpireTopologyRecoveryStepsHistory", + tableName: "topology_recovery_steps", + expectedRowCount: 1, + insertQuery: `insert into topology_recovery_steps (recovery_step_id, audit_at, recovery_id, message) values +(1, NOW() - INTERVAL 13 DAY, 1, 'a'), +(2, NOW() - INTERVAL 5 DAY, 2, 'a'), +(3, NOW() - INTERVAL 15 DAY, 3, 'a')`, + expireFunc: ExpireTopologyRecoveryStepsHistory, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + _, err := db.ExecVTOrc(tt.insertQuery) + require.NoError(t, err) + + err = tt.expireFunc() + require.NoError(t, err) + + rowsCount := 0 + err = db.QueryVTOrc(`select * from `+tt.tableName, nil, func(rowMap sqlutils.RowMap) error { + rowsCount++ + return nil + }) + require.NoError(t, err) + require.EqualValues(t, tt.expectedRowCount, rowsCount) + }) } - blockedRecovery := &TopologyRecovery{ - ID: 1, +} + +func TestInsertRecoveryDetection(t *testing.T) { + // Clear the database after the test. The easiest way to do that is to run all the initialization commands again. + defer func() { + db.ClearVTOrcDatabase() + }() + ra := &inst.ReplicationAnalysis{ + AnalyzedInstanceAlias: "alias-1", + Analysis: inst.ClusterHasNoPrimary, + ClusterDetails: inst.ClusterInfo{ + Keyspace: keyspace, + Shard: shard, + }, } - err = RegisterBlockedRecoveries(analysisEntry, []*TopologyRecovery{blockedRecovery}) + err := InsertRecoveryDetection(ra) require.NoError(t, err) + require.NotEqual(t, 0, ra.RecoveryId) - totalBlockedRecoveries := 0 - err = db.QueryVTOrc("select count(*) as blocked_recoveries from blocked_topology_recovery", nil, func(rowMap sqlutils.RowMap) error { - totalBlockedRecoveries = rowMap.GetInt("blocked_recoveries") + var rows []map[string]sqlutils.CellData + err = db.QueryVTOrc("select * from recovery_detection", nil, func(rowMap sqlutils.RowMap) error { + rows = append(rows, rowMap) return nil }) require.NoError(t, err) - // There should be 1 blocked recovery after insertion - require.Equal(t, 1, totalBlockedRecoveries) + require.Len(t, rows, 1) + require.EqualValues(t, ra.AnalyzedInstanceAlias, rows[0]["alias"].String) + require.EqualValues(t, ra.Analysis, rows[0]["analysis"].String) + require.EqualValues(t, keyspace, rows[0]["keyspace"].String) + require.EqualValues(t, shard, rows[0]["shard"].String) + require.EqualValues(t, strconv.Itoa(int(ra.RecoveryId)), rows[0]["detection_id"].String) + require.NotEqual(t, "", rows[0]["detection_timestamp"].String) } diff --git a/go/vt/vtorc/logic/topology_recovery_status.go b/go/vt/vtorc/logic/topology_recovery_status.go index d1195963ba1..d128a0637bc 100644 --- a/go/vt/vtorc/logic/topology_recovery_status.go +++ b/go/vt/vtorc/logic/topology_recovery_status.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/vt/vtorc/logic/topology_recovery_test.go b/go/vt/vtorc/logic/topology_recovery_test.go index d517649fd13..f7658060b95 100644 --- a/go/vt/vtorc/logic/topology_recovery_test.go +++ b/go/vt/vtorc/logic/topology_recovery_test.go @@ -19,9 +19,7 @@ package logic import ( "context" "testing" - "time" - "github.com/patrickmn/go-cache" "github.com/stretchr/testify/require" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -88,7 +86,6 @@ func TestAnalysisEntriesHaveSameRecovery(t *testing.T) { shouldBeEqual: true, }, } - emergencyOperationGracefulPeriodMap = cache.New(time.Second*5, time.Millisecond*500) t.Parallel() for _, tt := range tests { t.Run(string(tt.prevAnalysisCode)+","+string(tt.newAnalysisCode), func(t *testing.T) { @@ -134,7 +131,7 @@ func TestElectNewPrimaryPanic(t *testing.T) { require.Error(t, err) } -func TestDifferentAnalysescHaveDifferentCooldowns(t *testing.T) { +func TestRecoveryRegistration(t *testing.T) { orcDb, err := db.OpenVTOrc() require.NoError(t, err) oldTs := ts @@ -184,13 +181,20 @@ func TestDifferentAnalysescHaveDifferentCooldowns(t *testing.T) { defer cancel() ts = memorytopo.NewServer(ctx, "zone1") - _, err = AttemptRecoveryRegistration(&replicaAnalysisEntry, false, true) - require.Nil(t, err) + tp, err := AttemptRecoveryRegistration(&replicaAnalysisEntry) + require.NoError(t, err) + + // because there is another recovery in progress for this shard, this will fail. + _, err = AttemptRecoveryRegistration(&primaryAnalysisEntry) + require.ErrorContains(t, err, "Active recovery") + + // Lets say the recovery finishes after some time. + err = resolveRecovery(tp, nil) + require.NoError(t, err) - // even though this is another recovery on the same cluster, allow it to go through - // because the analysis is different (ReplicationStopped vs DeadPrimary) - _, err = AttemptRecoveryRegistration(&primaryAnalysisEntry, true, true) - require.Nil(t, err) + // now this recovery registration should be successful. + _, err = AttemptRecoveryRegistration(&primaryAnalysisEntry) + require.NoError(t, err) } func TestGetCheckAndRecoverFunctionCode(t *testing.T) { @@ -256,12 +260,6 @@ func TestGetCheckAndRecoverFunctionCode(t *testing.T) { }, } - // Needed for the test to work - oldMap := emergencyOperationGracefulPeriodMap - emergencyOperationGracefulPeriodMap = cache.New(time.Second*5, time.Millisecond*500) - defer func() { - emergencyOperationGracefulPeriodMap = oldMap - }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { prevVal := config.ERSEnabled() diff --git a/go/vt/vtorc/logic/vtorc.go b/go/vt/vtorc/logic/vtorc.go index 02fb41daa21..0e38f6e3aae 100644 --- a/go/vt/vtorc/logic/vtorc.go +++ b/go/vt/vtorc/logic/vtorc.go @@ -25,9 +25,9 @@ import ( "time" "github.com/patrickmn/go-cache" - "github.com/rcrowley/go-metrics" "github.com/sjmudd/stopwatch" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtorc/collection" @@ -35,7 +35,6 @@ import ( "vitess.io/vitess/go/vt/vtorc/discovery" "vitess.io/vitess/go/vt/vtorc/inst" ometrics "vitess.io/vitess/go/vt/vtorc/metrics" - "vitess.io/vitess/go/vt/vtorc/process" "vitess.io/vitess/go/vt/vtorc/util" ) @@ -51,55 +50,30 @@ var snapshotDiscoveryKeys chan string var snapshotDiscoveryKeysMutex sync.Mutex var hasReceivedSIGTERM int32 -var discoveriesCounter = metrics.NewCounter() -var failedDiscoveriesCounter = metrics.NewCounter() -var instancePollSecondsExceededCounter = metrics.NewCounter() -var discoveryQueueLengthGauge = metrics.NewGauge() -var discoveryRecentCountGauge = metrics.NewGauge() -var isElectedGauge = metrics.NewGauge() -var isHealthyGauge = metrics.NewGauge() +// The metrics are registered with deprecated names. The old metric names can be removed in v21. +var discoveriesCounter = stats.NewCounterWithDeprecatedName("DiscoveriesAttempt", "discoveries.attempt", "Number of discoveries attempted") +var failedDiscoveriesCounter = stats.NewCounterWithDeprecatedName("DiscoveriesFail", "discoveries.fail", "Number of failed discoveries") +var instancePollSecondsExceededCounter = stats.NewCounterWithDeprecatedName("DiscoveriesInstancePollSecondsExceeded", "discoveries.instance_poll_seconds_exceeded", "Number of instances that took longer than InstancePollSeconds to poll") +var discoveryQueueLengthGauge = stats.NewGaugeWithDeprecatedName("DiscoveriesQueueLength", "discoveries.queue_length", "Length of the discovery queue") +var discoveryRecentCountGauge = stats.NewGaugeWithDeprecatedName("DiscoveriesRecentCount", "discoveries.recent_count", "Number of recent discoveries") var discoveryMetrics = collection.CreateOrReturnCollection(DiscoveryMetricsName) -var isElectedNode int64 - var recentDiscoveryOperationKeys *cache.Cache func init() { snapshotDiscoveryKeys = make(chan string, 10) - _ = metrics.Register("discoveries.attempt", discoveriesCounter) - _ = metrics.Register("discoveries.fail", failedDiscoveriesCounter) - _ = metrics.Register("discoveries.instance_poll_seconds_exceeded", instancePollSecondsExceededCounter) - _ = metrics.Register("discoveries.queue_length", discoveryQueueLengthGauge) - _ = metrics.Register("discoveries.recent_count", discoveryRecentCountGauge) - _ = metrics.Register("elect.is_elected", isElectedGauge) - _ = metrics.Register("health.is_healthy", isHealthyGauge) - ometrics.OnMetricsTick(func() { - discoveryQueueLengthGauge.Update(int64(discoveryQueue.QueueLen())) + discoveryQueueLengthGauge.Set(int64(discoveryQueue.QueueLen())) }) ometrics.OnMetricsTick(func() { if recentDiscoveryOperationKeys == nil { return } - discoveryRecentCountGauge.Update(int64(recentDiscoveryOperationKeys.ItemCount())) - }) - ometrics.OnMetricsTick(func() { - isElectedGauge.Update(atomic.LoadInt64(&isElectedNode)) - }) - ometrics.OnMetricsTick(func() { - isHealthyGauge.Update(atomic.LoadInt64(&process.LastContinousCheckHealthy)) + discoveryRecentCountGauge.Set(int64(recentDiscoveryOperationKeys.ItemCount())) }) } -func IsLeader() bool { - return atomic.LoadInt64(&isElectedNode) == 1 -} - -func IsLeaderOrActive() bool { - return atomic.LoadInt64(&isElectedNode) == 1 -} - // used in several places func instancePollSecondsDuration() time.Duration { return time.Duration(config.Config.InstancePollSeconds) * time.Second @@ -129,6 +103,7 @@ func closeVTOrc() { _ = inst.AuditOperation("shutdown", "", "Triggered via SIGTERM") // wait for the locks to be released waitForLocksRelease() + ts.Close() log.Infof("VTOrc closed") } @@ -160,15 +135,6 @@ func handleDiscoveryRequests() { go func() { for { tabletAlias := discoveryQueue.Consume() - // Possibly this used to be the elected node, but has - // been demoted, while still the queue is full. - if !IsLeaderOrActive() { - log.Infof("Node apparently demoted. Skipping discovery of %+v. "+ - "Remaining queue size: %+v", tabletAlias, discoveryQueue.QueueLen()) - discoveryQueue.Release(tabletAlias) - continue - } - DiscoverInstance(tabletAlias, false /* forceDiscovery */) discoveryQueue.Release(tabletAlias) } @@ -197,7 +163,7 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) { latency.Stop("total") discoveryTime := latency.Elapsed("total") if discoveryTime > instancePollSecondsDuration() { - instancePollSecondsExceededCounter.Inc(1) + instancePollSecondsExceededCounter.Add(1) log.Warningf("discoverInstance exceeded InstancePollSeconds for %+v, took %.4fs", tabletAlias, discoveryTime.Seconds()) if metric != nil { metric.InstancePollSecondsDurationCount = 1 @@ -225,7 +191,7 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) { return } - discoveriesCounter.Inc(1) + discoveriesCounter.Add(1) // First we've ever heard of this instance. Continue investigation: instance, err := inst.ReadTopologyInstanceBufferable(tabletAlias, latency) @@ -240,7 +206,7 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) { } if instance == nil { - failedDiscoveriesCounter.Inc(1) + failedDiscoveriesCounter.Add(1) metric = &discovery.Metric{ Timestamp: time.Now(), TabletAlias: tabletAlias, @@ -274,40 +240,11 @@ func DiscoverInstance(tabletAlias string, forceDiscovery bool) { // onHealthTick handles the actions to take to discover/poll instances func onHealthTick() { - wasAlreadyElected := IsLeader() - { - myIsElectedNode, err := process.AttemptElection() - if err != nil { - log.Error(err) - } - if myIsElectedNode { - atomic.StoreInt64(&isElectedNode, 1) - } else { - atomic.StoreInt64(&isElectedNode, 0) - } - if !myIsElectedNode { - if electedNode, _, err := process.ElectedNode(); err == nil { - log.Infof("Not elected as active node; active node: %v; polling", electedNode.Hostname) - } else { - log.Infof("Not elected as active node; active node: Unable to determine: %v; polling", err) - } - } - } - if !IsLeaderOrActive() { - return - } tabletAliases, err := inst.ReadOutdatedInstanceKeys() if err != nil { log.Error(err) } - if !wasAlreadyElected { - // Just turned to be leader! - go func() { - _, _ = process.RegisterNode(process.ThisNodeHealth) - }() - } - func() { // Normally onHealthTick() shouldn't run concurrently. It is kicked by a ticker. // However it _is_ invoked inside a goroutine. I like to be safe here. @@ -329,14 +266,12 @@ func onHealthTick() { } } -// ContinuousDiscovery starts an asynchronuous infinite discovery process where instances are +// ContinuousDiscovery starts an asynchronous infinite discovery process where instances are // periodically investigated and their status captured, and long since unseen instances are // purged and forgotten. // nolint SA1015: using time.Tick leaks the underlying ticker func ContinuousDiscovery() { log.Infof("continuous discovery: setting up") - continuousDiscoveryStartTime := time.Now() - checkAndRecoverWaitPeriod := 3 * instancePollSecondsDuration() recentDiscoveryOperationKeys = cache.New(instancePollSecondsDuration(), time.Second) go handleDiscoveryRequests() @@ -351,10 +286,6 @@ func ContinuousDiscovery() { snapshotTopologiesTick = time.Tick(time.Duration(config.Config.SnapshotTopologiesIntervalHours) * time.Hour) } - runCheckAndRecoverOperationsTimeRipe := func() bool { - return time.Since(continuousDiscoveryStartTime) >= checkAndRecoverWaitPeriod - } - go func() { _ = ometrics.InitMetrics() }() @@ -372,70 +303,56 @@ func ContinuousDiscovery() { case <-caretakingTick: // Various periodic internal maintenance tasks go func() { - if IsLeaderOrActive() { - - go inst.ForgetLongUnseenInstances() - go inst.ExpireAudit() - go inst.ExpireStaleInstanceBinlogCoordinates() - go process.ExpireNodesHistory() - go process.ExpireAvailableNodes() - go ExpireFailureDetectionHistory() - go ExpireTopologyRecoveryHistory() - go ExpireTopologyRecoveryStepsHistory() - } + go inst.ForgetLongUnseenInstances() + go inst.ExpireAudit() + go inst.ExpireStaleInstanceBinlogCoordinates() + go ExpireRecoveryDetectionHistory() + go ExpireTopologyRecoveryHistory() + go ExpireTopologyRecoveryStepsHistory() }() case <-recoveryTick: go func() { - if IsLeaderOrActive() { - go ClearActiveFailureDetections() - go ClearActiveRecoveries() - go ExpireBlockedRecoveries() - go AcknowledgeCrashedRecoveries() - go inst.ExpireInstanceAnalysisChangelog() - - go func() { - // This function is non re-entrant (it can only be running once at any point in time) - if atomic.CompareAndSwapInt64(&recoveryEntrance, 0, 1) { - defer atomic.StoreInt64(&recoveryEntrance, 0) - } else { - return - } - if runCheckAndRecoverOperationsTimeRipe() { - CheckAndRecover() - } else { - log.Infof("Waiting for %+v seconds to pass before running failure detection/recovery", checkAndRecoverWaitPeriod.Seconds()) - } - }() - } + go inst.ExpireInstanceAnalysisChangelog() + + go func() { + // This function is non re-entrant (it can only be running once at any point in time) + if atomic.CompareAndSwapInt64(&recoveryEntrance, 0, 1) { + defer atomic.StoreInt64(&recoveryEntrance, 0) + } else { + return + } + CheckAndRecover() + }() }() case <-snapshotTopologiesTick: go func() { - if IsLeaderOrActive() { - go inst.SnapshotTopologies() - } + go inst.SnapshotTopologies() }() case <-tabletTopoTick: - // Create a wait group - var wg sync.WaitGroup + refreshAllInformation() + } + } +} - // Refresh all keyspace information. - wg.Add(1) - go func() { - defer wg.Done() - RefreshAllKeyspacesAndShards() - }() +// refreshAllInformation refreshes both shard and tablet information. This is meant to be run on tablet topo ticks. +func refreshAllInformation() { + // Create a wait group + var wg sync.WaitGroup - // Refresh all tablets. - wg.Add(1) - go func() { - defer wg.Done() - refreshAllTablets() - }() + // Refresh all keyspace information. + wg.Add(1) + go func() { + defer wg.Done() + RefreshAllKeyspacesAndShards() + }() - // Wait for both the refreshes to complete - wg.Wait() - // We have completed one discovery cycle in the entirety of it. We should update the process health. - process.FirstDiscoveryCycleComplete.Store(true) - } - } + // Refresh all tablets. + wg.Add(1) + go func() { + defer wg.Done() + refreshAllTablets() + }() + + // Wait for both the refreshes to complete + wg.Wait() } diff --git a/go/vt/vtorc/metrics/query/aggregated.go b/go/vt/vtorc/metrics/query/aggregated.go deleted file mode 100644 index beece44d53a..00000000000 --- a/go/vt/vtorc/metrics/query/aggregated.go +++ /dev/null @@ -1,76 +0,0 @@ -// Package query provdes query metrics with this file providing -// aggregared metrics based on the underlying values. -package query - -import ( - "time" - - "github.com/montanaflynn/stats" - - "vitess.io/vitess/go/vt/vtorc/collection" -) - -type AggregatedQueryMetrics struct { - // fill me in here - Count int - MaxLatencySeconds float64 - MeanLatencySeconds float64 - MedianLatencySeconds float64 - P95LatencySeconds float64 - MaxWaitSeconds float64 - MeanWaitSeconds float64 - MedianWaitSeconds float64 - P95WaitSeconds float64 -} - -// AggregatedSince returns the aggregated query metrics for the period -// given from the values provided. -func AggregatedSince(c *collection.Collection, t time.Time) AggregatedQueryMetrics { - - // Initialise timing metrics - var waitTimings []float64 - var queryTimings []float64 - - // Retrieve values since the time specified - values, err := c.Since(t) - a := AggregatedQueryMetrics{} - if err != nil { - return a // empty data - } - - // generate the metrics - for _, v := range values { - waitTimings = append(waitTimings, v.(*Metric).WaitLatency.Seconds()) - queryTimings = append(queryTimings, v.(*Metric).ExecuteLatency.Seconds()) - } - - a.Count = len(waitTimings) - - // generate aggregate values - if s, err := stats.Max(stats.Float64Data(waitTimings)); err == nil { - a.MaxWaitSeconds = s - } - if s, err := stats.Mean(stats.Float64Data(waitTimings)); err == nil { - a.MeanWaitSeconds = s - } - if s, err := stats.Median(stats.Float64Data(waitTimings)); err == nil { - a.MedianWaitSeconds = s - } - if s, err := stats.Percentile(stats.Float64Data(waitTimings), 95); err == nil { - a.P95WaitSeconds = s - } - if s, err := stats.Max(stats.Float64Data(queryTimings)); err == nil { - a.MaxLatencySeconds = s - } - if s, err := stats.Mean(stats.Float64Data(queryTimings)); err == nil { - a.MeanLatencySeconds = s - } - if s, err := stats.Median(stats.Float64Data(queryTimings)); err == nil { - a.MedianLatencySeconds = s - } - if s, err := stats.Percentile(stats.Float64Data(queryTimings), 95); err == nil { - a.P95LatencySeconds = s - } - - return a -} diff --git a/go/vt/vtorc/process/election_dao.go b/go/vt/vtorc/process/election_dao.go deleted file mode 100644 index f723bd48dde..00000000000 --- a/go/vt/vtorc/process/election_dao.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package process - -import ( - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" - "vitess.io/vitess/go/vt/vtorc/util" -) - -// AttemptElection tries to grab leadership (become active node) -func AttemptElection() (bool, error) { - { - sqlResult, err := db.ExecVTOrc(` - insert ignore into active_node ( - anchor, hostname, token, first_seen_active, last_seen_active - ) values ( - 1, ?, ?, now(), now() - ) - `, - ThisHostname, util.ProcessToken.Hash, - ) - if err != nil { - log.Error(err) - return false, err - } - rows, err := sqlResult.RowsAffected() - if err != nil { - log.Error(err) - return false, err - } - if rows > 0 { - // We managed to insert a row - return true, nil - } - } - { - // takeover from a node that has been inactive - sqlResult, err := db.ExecVTOrc(` - update active_node set - hostname = ?, - token = ?, - first_seen_active=now(), - last_seen_active=now() - where - anchor = 1 - and last_seen_active < (now() - interval ? second) - `, - ThisHostname, util.ProcessToken.Hash, config.ActiveNodeExpireSeconds, - ) - if err != nil { - log.Error(err) - return false, err - } - rows, err := sqlResult.RowsAffected() - if err != nil { - log.Error(err) - return false, err - } - if rows > 0 { - // We managed to update a row: overtaking a previous leader - return true, nil - } - } - { - // Update last_seen_active is this very node is already the active node - sqlResult, err := db.ExecVTOrc(` - update active_node set - last_seen_active=now() - where - anchor = 1 - and hostname = ? - and token = ? - `, - ThisHostname, util.ProcessToken.Hash, - ) - if err != nil { - log.Error(err) - return false, err - } - rows, err := sqlResult.RowsAffected() - if err != nil { - log.Error(err) - return false, err - } - if rows > 0 { - // Reaffirmed our own leadership - return true, nil - } - } - return false, nil -} - -// GrabElection forcibly grabs leadership. Use with care!! -func GrabElection() error { - _, err := db.ExecVTOrc(` - replace into active_node ( - anchor, hostname, token, first_seen_active, last_seen_active - ) values ( - 1, ?, ?, now(), now() - ) - `, - ThisHostname, util.ProcessToken.Hash, - ) - if err != nil { - log.Error(err) - } - return err -} - -// Reelect clears the way for re-elections. Active node is immediately demoted. -func Reelect() error { - _, err := db.ExecVTOrc(`delete from active_node where anchor = 1`) - if err != nil { - log.Error(err) - } - return err -} - -// ElectedNode returns the details of the elected node, as well as answering the question "is this process the elected one"? -func ElectedNode() (node *NodeHealth, isElected bool, err error) { - node = &NodeHealth{} - query := ` - select - hostname, - token, - first_seen_active, - last_seen_Active - from - active_node - where - anchor = 1 - ` - err = db.QueryVTOrcRowsMap(query, func(m sqlutils.RowMap) error { - node.Hostname = m.GetString("hostname") - node.Token = m.GetString("token") - node.FirstSeenActive = m.GetString("first_seen_active") - node.LastSeenActive = m.GetString("last_seen_active") - - return nil - }) - - isElected = (node.Hostname == ThisHostname && node.Token == util.ProcessToken.Hash) - if err != nil { - log.Error(err) - } - return node, isElected, err //nolint copylocks: return copies lock value -} diff --git a/go/vt/vtorc/process/health.go b/go/vt/vtorc/process/health.go index 22db89e1d56..86101d6c5c0 100644 --- a/go/vt/vtorc/process/health.go +++ b/go/vt/vtorc/process/health.go @@ -17,151 +17,47 @@ package process import ( - "sync" "sync/atomic" "time" "vitess.io/vitess/go/vt/log" - - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/util" - - "github.com/patrickmn/go-cache" + "vitess.io/vitess/go/vt/vtorc/db" ) -var lastHealthCheckUnixNano int64 -var lastGoodHealthCheckUnixNano int64 -var LastContinousCheckHealthy int64 var FirstDiscoveryCycleComplete atomic.Bool -var lastHealthCheckCache = cache.New(config.HealthPollSeconds*time.Second, time.Second) - type NodeHealth struct { - Hostname string - Token string - AppVersion string - FirstSeenActive string - LastSeenActive string - ExtraInfo string - Command string - DBBackend string - + Healthy bool LastReported time.Time - onceHistory sync.Once - onceUpdate sync.Once -} - -func NewNodeHealth() *NodeHealth { - return &NodeHealth{ - Hostname: ThisHostname, - Token: util.ProcessToken.Hash, - } } -func (nodeHealth *NodeHealth) Update() *NodeHealth { - nodeHealth.onceUpdate.Do(func() { - nodeHealth.Hostname = ThisHostname - nodeHealth.Token = util.ProcessToken.Hash - }) - nodeHealth.LastReported = time.Now() - return nodeHealth -} - -var ThisNodeHealth = NewNodeHealth() - -type HealthStatus struct { - Healthy bool - Hostname string - Token string - IsActiveNode bool - DiscoveredOnce bool - ActiveNode *NodeHealth - Error error - AvailableNodes [](*NodeHealth) - RaftLeader string - IsRaftLeader bool - RaftLeaderURI string - RaftAdvertise string - RaftHealthyMembers []string -} - -type VTOrcExecutionMode string - -const ( - VTOrcExecutionCliMode VTOrcExecutionMode = "CLIMode" - VTOrcExecutionHTTPMode VTOrcExecutionMode = "HttpMode" -) - -var continuousRegistrationOnce sync.Once - -func RegisterNode(nodeHealth *NodeHealth) (healthy bool, err error) { - nodeHealth.Update() - healthy, err = WriteRegisterNode(nodeHealth) - atomic.StoreInt64(&lastHealthCheckUnixNano, time.Now().UnixNano()) - if healthy { - atomic.StoreInt64(&lastGoodHealthCheckUnixNano, time.Now().UnixNano()) - } - return healthy, err -} +var ThisNodeHealth = &NodeHealth{} -// HealthTest attempts to write to the backend database and get a result -func HealthTest() (health *HealthStatus, err error) { - cacheKey := util.ProcessToken.Hash - if healthStatus, found := lastHealthCheckCache.Get(cacheKey); found { - return healthStatus.(*HealthStatus), nil +// writeHealthToDatabase writes to the database and returns if it was successful. +func writeHealthToDatabase() bool { + _, err := db.ExecVTOrc("delete from node_health") + if err != nil { + log.Error(err) + return false } - - health = &HealthStatus{Healthy: false, Hostname: ThisHostname, Token: util.ProcessToken.Hash} - defer lastHealthCheckCache.Set(cacheKey, health, cache.DefaultExpiration) - - healthy, err := RegisterNode(ThisNodeHealth) + sqlResult, err := db.ExecVTOrc(`insert into node_health (last_seen_active) values (now())`) if err != nil { - health.Error = err log.Error(err) - return health, err + return false } - health.Healthy = healthy - health.DiscoveredOnce = FirstDiscoveryCycleComplete.Load() - - if health.ActiveNode, health.IsActiveNode, err = ElectedNode(); err != nil { - health.Error = err + rows, err := sqlResult.RowsAffected() + if err != nil { log.Error(err) - return health, err + return false } - health.AvailableNodes, _ = ReadAvailableNodes(true) - - return health, nil + return rows > 0 } -// ContinuousRegistration will continuously update the node_health -// table showing that the current process is still running. -func ContinuousRegistration(extraInfo string, command string) { - ThisNodeHealth.ExtraInfo = extraInfo - ThisNodeHealth.Command = command - continuousRegistrationOnce.Do(func() { - tickOperation := func() { - healthy, err := RegisterNode(ThisNodeHealth) - if err != nil { - log.Errorf("ContinuousRegistration: RegisterNode failed: %+v", err) - } - if healthy { - atomic.StoreInt64(&LastContinousCheckHealthy, 1) - } else { - atomic.StoreInt64(&LastContinousCheckHealthy, 0) - } - } - // First one is synchronous - tickOperation() - go func() { - registrationTick := time.Tick(config.HealthPollSeconds * time.Second) - for range registrationTick { - // We already run inside a go-routine so - // do not do this asynchronously. If we - // get stuck then we don't want to fill up - // the backend pool with connections running - // this maintenance operation. - tickOperation() - } - }() - }) +// HealthTest attempts to write to the backend database and get a result +func HealthTest() (health *NodeHealth, discoveredOnce bool) { + ThisNodeHealth.LastReported = time.Now() + discoveredOnce = FirstDiscoveryCycleComplete.Load() + ThisNodeHealth.Healthy = writeHealthToDatabase() + + return ThisNodeHealth, discoveredOnce } diff --git a/go/vt/vtorc/process/health_dao.go b/go/vt/vtorc/process/health_dao.go deleted file mode 100644 index 59ea557223d..00000000000 --- a/go/vt/vtorc/process/health_dao.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package process - -import ( - "time" - - "vitess.io/vitess/go/vt/external/golib/sqlutils" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/vtorc/config" - "vitess.io/vitess/go/vt/vtorc/db" -) - -// WriteRegisterNode writes down this node in the node_health table -func WriteRegisterNode(nodeHealth *NodeHealth) (healthy bool, err error) { - timeNow := time.Now() - reportedAgo := timeNow.Sub(nodeHealth.LastReported) - reportedSecondsAgo := int64(reportedAgo.Seconds()) - if reportedSecondsAgo > config.HealthPollSeconds*2 { - // This entry is too old. No reason to persist it; already expired. - return false, nil - } - - nodeHealth.onceHistory.Do(func() { - _, _ = db.ExecVTOrc(` - insert ignore into node_health_history - (hostname, token, first_seen_active, extra_info, command, app_version) - values - (?, ?, NOW(), ?, ?, ?) - `, - nodeHealth.Hostname, nodeHealth.Token, nodeHealth.ExtraInfo, nodeHealth.Command, - nodeHealth.AppVersion, - ) - }) - { - sqlResult, err := db.ExecVTOrc(` - update node_health set - last_seen_active = now() - interval ? second, - extra_info = case when ? != '' then ? else extra_info end, - app_version = ?, - incrementing_indicator = incrementing_indicator + 1 - where - hostname = ? - and token = ? - `, - reportedSecondsAgo, - nodeHealth.ExtraInfo, nodeHealth.ExtraInfo, - nodeHealth.AppVersion, - nodeHealth.Hostname, nodeHealth.Token, - ) - if err != nil { - log.Error(err) - return false, err - } - rows, err := sqlResult.RowsAffected() - if err != nil { - log.Error(err) - return false, err - } - if rows > 0 { - return true, nil - } - } - // Got here? The UPDATE didn't work. Row isn't there. - { - dbBackend := config.Config.SQLite3DataFile - sqlResult, err := db.ExecVTOrc(` - insert ignore into node_health - (hostname, token, first_seen_active, last_seen_active, extra_info, command, app_version, db_backend) - values ( - ?, ?, - now() - interval ? second, now() - interval ? second, - ?, ?, ?, ?) - `, - nodeHealth.Hostname, nodeHealth.Token, - reportedSecondsAgo, reportedSecondsAgo, - nodeHealth.ExtraInfo, nodeHealth.Command, - nodeHealth.AppVersion, dbBackend, - ) - if err != nil { - log.Error(err) - return false, err - } - rows, err := sqlResult.RowsAffected() - if err != nil { - log.Error(err) - return false, err - } - if rows > 0 { - return true, nil - } - } - return false, nil -} - -// ExpireAvailableNodes is an aggressive purging method to remove -// node entries who have skipped their keepalive for two times. -func ExpireAvailableNodes() { - _, err := db.ExecVTOrc(` - delete - from node_health - where - last_seen_active < now() - interval ? second - `, - config.HealthPollSeconds*5, - ) - if err != nil { - log.Errorf("ExpireAvailableNodes: failed to remove old entries: %+v", err) - } -} - -// ExpireNodesHistory cleans up the nodes history and is run by -// the vtorc active node. -func ExpireNodesHistory() error { - _, err := db.ExecVTOrc(` - delete - from node_health_history - where - first_seen_active < now() - interval ? hour - `, - config.UnseenInstanceForgetHours, - ) - if err != nil { - log.Error(err) - } - return err -} - -func ReadAvailableNodes(onlyHTTPNodes bool) (nodes [](*NodeHealth), err error) { - extraInfo := "" - if onlyHTTPNodes { - extraInfo = string(VTOrcExecutionHTTPMode) - } - query := ` - select - hostname, token, app_version, first_seen_active, last_seen_active, db_backend - from - node_health - where - last_seen_active > now() - interval ? second - and ? in (extra_info, '') - order by - hostname - ` - - err = db.QueryVTOrc(query, sqlutils.Args(config.HealthPollSeconds*2, extraInfo), func(m sqlutils.RowMap) error { - nodeHealth := &NodeHealth{ - Hostname: m.GetString("hostname"), - Token: m.GetString("token"), - AppVersion: m.GetString("app_version"), - FirstSeenActive: m.GetString("first_seen_active"), - LastSeenActive: m.GetString("last_seen_active"), - DBBackend: m.GetString("db_backend"), - } - nodes = append(nodes, nodeHealth) - return nil - }) - if err != nil { - log.Error(err) - } - return nodes, err -} diff --git a/go/vt/vtorc/server/api.go b/go/vt/vtorc/server/api.go index f053336e64e..5e9a84c0a29 100644 --- a/go/vt/vtorc/server/api.go +++ b/go/vt/vtorc/server/api.go @@ -45,6 +45,7 @@ const ( disableGlobalRecoveriesAPI = "/api/disable-global-recoveries" enableGlobalRecoveriesAPI = "/api/enable-global-recoveries" replicationAnalysisAPI = "/api/replication-analysis" + databaseStateAPI = "/api/database-state" healthAPI = "/debug/health" AggregatedDiscoveryMetricsAPI = "/api/aggregated-discovery-metrics" @@ -60,6 +61,7 @@ var ( disableGlobalRecoveriesAPI, enableGlobalRecoveriesAPI, replicationAnalysisAPI, + databaseStateAPI, healthAPI, AggregatedDiscoveryMetricsAPI, } @@ -86,6 +88,8 @@ func (v *vtorcAPI) ServeHTTP(response http.ResponseWriter, request *http.Request errantGTIDsAPIHandler(response, request) case replicationAnalysisAPI: replicationAnalysisAPIHandler(response, request) + case databaseStateAPI: + databaseStateAPIHandler(response) case AggregatedDiscoveryMetricsAPI: AggregatedDiscoveryMetricsAPIHandler(response, request) default: @@ -104,7 +108,7 @@ func getACLPermissionLevelForAPI(apiEndpoint string) string { return acl.ADMIN case replicationAnalysisAPI: return acl.MONITORING - case healthAPI: + case healthAPI, databaseStateAPI: return acl.MONITORING } return acl.ADMIN @@ -117,7 +121,7 @@ func RegisterVTOrcAPIEndpoints() { } } -// returnAsJSON returns the argument received on the resposeWriter as a json object +// returnAsJSON returns the argument received on the responseWriter as a json object func returnAsJSON(response http.ResponseWriter, code int, stuff any) { response.Header().Set("Content-Type", "application/json; charset=utf-8") response.WriteHeader(code) @@ -166,6 +170,16 @@ func errantGTIDsAPIHandler(response http.ResponseWriter, request *http.Request) returnAsJSON(response, http.StatusOK, instances) } +// databaseStateAPIHandler is the handler for the databaseStateAPI endpoint +func databaseStateAPIHandler(response http.ResponseWriter) { + ds, err := inst.GetDatabaseState() + if err != nil { + http.Error(response, err.Error(), http.StatusInternalServerError) + return + } + writePlainTextResponse(response, ds, http.StatusOK) +} + // AggregatedDiscoveryMetricsAPIHandler is the handler for the discovery metrics endpoint func AggregatedDiscoveryMetricsAPIHandler(response http.ResponseWriter, request *http.Request) { // return metrics for last x seconds @@ -233,14 +247,10 @@ func replicationAnalysisAPIHandler(response http.ResponseWriter, request *http.R // healthAPIHandler is the handler for the healthAPI endpoint func healthAPIHandler(response http.ResponseWriter, request *http.Request) { - health, err := process.HealthTest() - if err != nil { - http.Error(response, err.Error(), http.StatusInternalServerError) - return - } + health, discoveredOnce := process.HealthTest() code := http.StatusOK // If the process isn't healthy, or if the first discovery cycle hasn't completed, we return an internal server error. - if !health.Healthy || !health.DiscoveredOnce { + if !health.Healthy || !discoveredOnce { code = http.StatusInternalServerError } returnAsJSON(response, code, health) diff --git a/go/vt/vtorc/server/discovery.go b/go/vt/vtorc/server/discovery.go index 0e5cf5923c8..26e5c9e108e 100644 --- a/go/vt/vtorc/server/discovery.go +++ b/go/vt/vtorc/server/discovery.go @@ -19,13 +19,10 @@ package server import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtorc/logic" - "vitess.io/vitess/go/vt/vtorc/process" ) // StartVTOrcDiscovery starts VTOrc discovery serving func StartVTOrcDiscovery() { - process.ContinuousRegistration(string(process.VTOrcExecutionHTTPMode), "") - log.Info("Starting Discovery") go logic.ContinuousDiscovery() } diff --git a/go/vt/vtorc/test/recovery_analysis.go b/go/vt/vtorc/test/recovery_analysis.go index b2ae4ce9520..2a95d3b2b0e 100644 --- a/go/vt/vtorc/test/recovery_analysis.go +++ b/go/vt/vtorc/test/recovery_analysis.go @@ -48,21 +48,19 @@ type InfoForRecoveryAnalysis struct { LogPos uint32 IsStaleBinlogCoordinates int GTIDMode string + ReplicaNetTimeout int32 + HeartbeatInterval float64 ErrantGTID string LastCheckValid int LastCheckPartialSuccess int CountReplicas uint CountValidReplicas uint CountValidReplicatingReplicas uint - CountReplicasFailingToConnectToPrimary uint CountDowntimedReplicas uint - ReplicationDepth uint - IsFailingToConnectToPrimary int ReplicationStopped int IsDowntimed int DowntimeEndTimestamp string DowntimeRemainingSeconds int - IsBinlogServer int CountValidOracleGTIDReplicas uint CountValidMariaDBGTIDReplicas uint CountValidBinlogServerReplicas uint @@ -100,7 +98,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["count_mixed_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountMixedBasedLoggingReplicas), Valid: true} rowMap["count_oracle_gtid_replicas"] = sqlutils.CellData{Valid: false} rowMap["count_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountReplicas), Valid: true} - rowMap["count_replicas_failing_to_connect_to_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountReplicasFailingToConnectToPrimary), Valid: true} rowMap["count_row_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountRowBasedLoggingReplicas), Valid: true} rowMap["count_semi_sync_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountSemiSyncReplicasEnabled), Valid: true} rowMap["count_statement_based_logging_replicas"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.CountStatementBasedLoggingReplicas), Valid: true} @@ -116,10 +113,8 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["gtid_errant"] = sqlutils.CellData{String: info.ErrantGTID, Valid: true} rowMap["gtid_mode"] = sqlutils.CellData{String: info.GTIDMode, Valid: true} rowMap["hostname"] = sqlutils.CellData{String: info.Hostname, Valid: true} - rowMap["is_binlog_server"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsBinlogServer), Valid: true} rowMap["is_co_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsCoPrimary), Valid: true} rowMap["is_downtimed"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsDowntimed), Valid: true} - rowMap["is_failing_to_connect_to_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsFailingToConnectToPrimary), Valid: true} rowMap["is_invalid"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsInvalid), Valid: true} rowMap["is_last_check_valid"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.LastCheckValid), Valid: true} rowMap["is_primary"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.IsPrimary), Valid: true} @@ -129,6 +124,8 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["shard"] = sqlutils.CellData{String: info.Shard, Valid: true} rowMap["shard_primary_term_timestamp"] = sqlutils.CellData{String: info.ShardPrimaryTermTimestamp, Valid: true} rowMap["last_check_partial_success"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.LastCheckPartialSuccess), Valid: true} + rowMap["replica_net_timeout"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.ReplicaNetTimeout), Valid: true} + rowMap["heartbeat_interval"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.HeartbeatInterval), Valid: true} rowMap["max_replica_gtid_errant"] = sqlutils.CellData{String: info.MaxReplicaGTIDErrant, Valid: true} rowMap["max_replica_gtid_mode"] = sqlutils.CellData{String: info.MaxReplicaGTIDMode, Valid: true} rowMap["min_replica_gtid_mode"] = sqlutils.CellData{String: info.MinReplicaGTIDMode, Valid: true} @@ -143,7 +140,6 @@ func (info *InfoForRecoveryAnalysis) ConvertToRowMap() sqlutils.RowMap { rowMap["primary_timestamp"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.PrimaryTimestamp), Valid: true} rowMap["read_only"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.ReadOnly), Valid: true} rowMap["region"] = sqlutils.CellData{String: info.Region, Valid: true} - rowMap["replication_depth"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.ReplicationDepth), Valid: true} rowMap["replication_stopped"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.ReplicationStopped), Valid: true} rowMap["semi_sync_primary_clients"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncPrimaryClients), Valid: true} rowMap["semi_sync_primary_enabled"] = sqlutils.CellData{String: fmt.Sprintf("%v", info.SemiSyncPrimaryEnabled), Valid: true} diff --git a/go/vt/vtorc/util/token.go b/go/vt/vtorc/util/token.go deleted file mode 100644 index ff60e3e18ea..00000000000 --- a/go/vt/vtorc/util/token.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - Copyright 2014 Outbrain Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package util - -import ( - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "fmt" - "time" -) - -const ( - shortTokenLength = 8 -) - -func toHash(input []byte) string { - hasher := sha256.New() - hasher.Write(input) - return hex.EncodeToString(hasher.Sum(nil)) -} - -func getRandomData() []byte { - size := 64 - rb := make([]byte, size) - _, _ = rand.Read(rb) - return rb -} - -func RandomHash() string { - return toHash(getRandomData()) -} - -// Token is used to identify and validate requests to this service -type Token struct { - Hash string -} - -func (token *Token) Short() string { - if len(token.Hash) <= shortTokenLength { - return token.Hash - } - return token.Hash[0:shortTokenLength] -} - -var ProcessToken = NewToken() - -func NewToken() *Token { - return &Token{ - Hash: RandomHash(), - } -} - -func PrettyUniqueToken() string { - return fmt.Sprintf("%d:%s", time.Now().UnixNano(), NewToken().Hash) -} diff --git a/go/vt/vtorc/util/token_test.go b/go/vt/vtorc/util/token_test.go deleted file mode 100644 index 5e634c05f31..00000000000 --- a/go/vt/vtorc/util/token_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package util - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/vt/log" -) - -func init() { -} - -func TestNewToken(t *testing.T) { - token1 := NewToken() - - require.NotEqual(t, token1.Hash, "") - require.Equal(t, len(token1.Hash), 64) -} - -func TestNewTokenRandom(t *testing.T) { - log.Infof("test") - token1 := NewToken() - token2 := NewToken() - - // The following test can fail once in a quadrazillion eons - require.NotEqual(t, token1.Hash, token2.Hash) -} diff --git a/go/vt/vttablet/endtoend/acl_test.go b/go/vt/vttablet/endtoend/acl_test.go index 0894c2838d0..ed9cfa83817 100644 --- a/go/vt/vttablet/endtoend/acl_test.go +++ b/go/vt/vttablet/endtoend/acl_test.go @@ -21,7 +21,7 @@ import ( "encoding/json" "testing" - "gotest.tools/assert" + "github.com/stretchr/testify/assert" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" diff --git a/go/vt/vttablet/endtoend/call_test.go b/go/vt/vttablet/endtoend/call_test.go index 3a42eea3780..a1a2eae792a 100644 --- a/go/vt/vttablet/endtoend/call_test.go +++ b/go/vt/vttablet/endtoend/call_test.go @@ -75,12 +75,16 @@ func TestCallProcedure(t *testing.T) { wantErr bool } tcases := []testcases{{ + query: "call proc_dml()", + }, { query: "call proc_select1()", wantErr: true, }, { query: "call proc_select4()", wantErr: true, }, { + // Again, make sure the connection isn't dirty and does not contain leftover + // result sets from previous tests. query: "call proc_dml()", }} @@ -92,7 +96,6 @@ func TestCallProcedure(t *testing.T) { return } require.NoError(t, err) - }) } } @@ -149,7 +152,7 @@ func TestCallProcedureChangedTx(t *testing.T) { }) } - // This passes as this starts a new transaction by commiting the old transaction implicitly. + // This passes as this starts a new transaction by committing the old transaction implicitly. _, err = client.BeginExecute(`call proc_tx_begin()`, nil, nil) require.NoError(t, err) } diff --git a/go/vt/vttablet/endtoend/compatibility_test.go b/go/vt/vttablet/endtoend/compatibility_test.go index 9b89a602281..4dde4019a99 100644 --- a/go/vt/vttablet/endtoend/compatibility_test.go +++ b/go/vt/vttablet/endtoend/compatibility_test.go @@ -33,7 +33,7 @@ import ( var point12 = "\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@" -func TestCharaterSet(t *testing.T) { +func TestCharacterSet(t *testing.T) { qr, err := framework.NewClient().Execute("select * from vitess_test where intval=1", nil) if err != nil { t.Fatal(err) diff --git a/go/vt/vttablet/endtoend/config_test.go b/go/vt/vttablet/endtoend/config_test.go index 60303cf4bf5..b1dc7f5dcb9 100644 --- a/go/vt/vttablet/endtoend/config_test.go +++ b/go/vt/vttablet/endtoend/config_test.go @@ -108,70 +108,94 @@ func TestDisableConsolidator(t *testing.T) { } func TestConsolidatorReplicasOnly(t *testing.T) { - totalConsolidationsTag := "Waits/Histograms/Consolidations/Count" - initial := framework.FetchInt(framework.DebugVars(), totalConsolidationsTag) - var wg sync.WaitGroup - wg.Add(2) - go func() { - framework.NewClient().Execute("select sleep(0.5) from dual", nil) - wg.Done() - }() - go func() { - framework.NewClient().Execute("select sleep(0.5) from dual", nil) - wg.Done() - }() - wg.Wait() - afterOne := framework.FetchInt(framework.DebugVars(), totalConsolidationsTag) - assert.Equal(t, initial+1, afterOne, "expected one consolidation") - - revert := changeVar(t, "Consolidator", tabletenv.NotOnPrimary) - defer revert() - - // primary should not do query consolidation - var wg2 sync.WaitGroup - wg2.Add(2) - go func() { - framework.NewClient().Execute("select sleep(0.5) from dual", nil) - wg2.Done() - }() - go func() { - framework.NewClient().Execute("select sleep(0.5) from dual", nil) - wg2.Done() - }() - wg2.Wait() - noNewConsolidations := framework.FetchInt(framework.DebugVars(), totalConsolidationsTag) - assert.Equal(t, afterOne, noNewConsolidations, "expected no new consolidations") - - // become a replica, where query consolidation should happen - client := framework.NewClientWithTabletType(topodatapb.TabletType_REPLICA) - - err := client.SetServingType(topodatapb.TabletType_REPLICA) - require.NoError(t, err) - defer func() { - err = client.SetServingType(topodatapb.TabletType_PRIMARY) - require.NoError(t, err) - }() + type executeFn func( + query string, bindvars map[string]*querypb.BindVariable, + ) (*sqltypes.Result, error) + + testCases := []struct { + name string + getExecuteFn func(qc *framework.QueryClient) executeFn + totalConsolidationsTag string + }{ + { + name: "Execute", + getExecuteFn: func(qc *framework.QueryClient) executeFn { return qc.Execute }, + totalConsolidationsTag: "Waits/Histograms/Consolidations/Count", + }, + { + name: "StreamExecute", + getExecuteFn: func(qc *framework.QueryClient) executeFn { return qc.StreamExecute }, + totalConsolidationsTag: "Waits/Histograms/StreamConsolidations/Count", + }, + } - initial = framework.FetchInt(framework.DebugVars(), totalConsolidationsTag) - var wg3 sync.WaitGroup - wg3.Add(2) - go func() { - client.Execute("select sleep(0.5) from dual", nil) - wg3.Done() - }() - go func() { - client.Execute("select sleep(0.5) from dual", nil) - wg3.Done() - }() - wg3.Wait() - afterOne = framework.FetchInt(framework.DebugVars(), totalConsolidationsTag) - assert.Equal(t, initial+1, afterOne, "expected another consolidation") + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + initial := framework.FetchInt(framework.DebugVars(), testCase.totalConsolidationsTag) + var wg sync.WaitGroup + wg.Add(2) + go func() { + testCase.getExecuteFn(framework.NewClient())("select sleep(0.5) from dual", nil) + wg.Done() + }() + go func() { + testCase.getExecuteFn(framework.NewClient())("select sleep(0.5) from dual", nil) + wg.Done() + }() + wg.Wait() + afterOne := framework.FetchInt(framework.DebugVars(), testCase.totalConsolidationsTag) + assert.Equal(t, initial+1, afterOne, "expected one consolidation") + + revert := changeVar(t, "Consolidator", tabletenv.NotOnPrimary) + defer revert() + + // primary should not do query consolidation + var wg2 sync.WaitGroup + wg2.Add(2) + go func() { + testCase.getExecuteFn(framework.NewClient())("select sleep(0.5) from dual", nil) + wg2.Done() + }() + go func() { + testCase.getExecuteFn(framework.NewClient())("select sleep(0.5) from dual", nil) + wg2.Done() + }() + wg2.Wait() + noNewConsolidations := framework.FetchInt(framework.DebugVars(), testCase.totalConsolidationsTag) + assert.Equal(t, afterOne, noNewConsolidations, "expected no new consolidations") + + // become a replica, where query consolidation should happen + client := framework.NewClientWithTabletType(topodatapb.TabletType_REPLICA) + + err := client.SetServingType(topodatapb.TabletType_REPLICA) + require.NoError(t, err) + defer func() { + err = client.SetServingType(topodatapb.TabletType_PRIMARY) + require.NoError(t, err) + }() + + initial = framework.FetchInt(framework.DebugVars(), testCase.totalConsolidationsTag) + var wg3 sync.WaitGroup + wg3.Add(2) + go func() { + testCase.getExecuteFn(client)("select sleep(0.5) from dual", nil) + wg3.Done() + }() + go func() { + testCase.getExecuteFn(client)("select sleep(0.5) from dual", nil) + wg3.Done() + }() + wg3.Wait() + afterOne = framework.FetchInt(framework.DebugVars(), testCase.totalConsolidationsTag) + assert.Equal(t, initial+1, afterOne, "expected another consolidation") + }) + } } func TestQueryPlanCache(t *testing.T) { var cachedPlanSize = int((&tabletserver.TabletPlan{}).CachedSize(true)) - //sleep to avoid race between SchemaChanged event clearing out the plans cache which breaks this test + // sleep to avoid race between SchemaChanged event clearing out the plans cache which breaks this test framework.Server.WaitForSchemaReset(2 * time.Second) bindVars := map[string]*querypb.BindVariable{ @@ -252,7 +276,7 @@ func TestQueryTimeout(t *testing.T) { assert.Equal(t, vtrpcpb.Code_ABORTED, vterrors.Code(err)) vend := framework.DebugVars() verifyIntValue(t, vend, "QueryTimeout", int(100*time.Millisecond)) - compareIntDiff(t, vend, "Kills/Queries", vstart, 1) + compareIntDiff(t, vend, "Kills/Connections", vstart, 1) } func changeVar(t *testing.T, name, value string) (revert func()) { diff --git a/go/vt/vttablet/endtoend/connkilling/main_test.go b/go/vt/vttablet/endtoend/connkilling/main_test.go index e7486c397eb..3d0ec344715 100644 --- a/go/vt/vttablet/endtoend/connkilling/main_test.go +++ b/go/vt/vttablet/endtoend/connkilling/main_test.go @@ -23,6 +23,7 @@ import ( "fmt" "os" "testing" + "time" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/tableacl" @@ -81,7 +82,7 @@ func TestMain(m *testing.M) { connParams = cluster.MySQLConnParams() connAppDebugParams = cluster.MySQLAppDebugConnParams() config := tabletenv.NewDefaultConfig() - _ = config.Oltp.TxTimeoutSeconds.Set("3s") + config.Oltp.TxTimeout = 3 * time.Second ctx, cancel := context.WithCancel(context.Background()) defer cancel() err := framework.StartCustomServer(ctx, connParams, connAppDebugParams, cluster.DbName(), config) diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go index 3c06f9b465c..dc4b7f9f339 100644 --- a/go/vt/vttablet/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -57,6 +57,19 @@ func NewClient() *QueryClient { } } +// NewClientWithServer creates a new client for a given server. +func NewClientWithServer(server *tabletserver.TabletServer) *QueryClient { + return &QueryClient{ + ctx: callerid.NewContext( + context.Background(), + &vtrpcpb.CallerID{}, + &querypb.VTGateCallerID{Username: "dev"}, + ), + target: Target, + server: server, + } +} + // NewClientWithTabletType creates a new client for Server with the provided tablet type. func NewClientWithTabletType(tabletType topodatapb.TabletType) *QueryClient { targetCopy := Target.CloneVT() @@ -134,7 +147,7 @@ func (client *QueryClient) CommitPrepared(dtid string) error { return client.server.CommitPrepared(client.ctx, client.target, dtid) } -// RollbackPrepared rollsback a prepared transaction. +// RollbackPrepared rolls back a prepared transaction. func (client *QueryClient) RollbackPrepared(dtid string, originalID int64) error { return client.server.RollbackPrepared(client.ctx, client.target, dtid, originalID) } @@ -418,16 +431,17 @@ func (client *QueryClient) UpdateContext(ctx context.Context) { client.ctx = ctx } -func (client *QueryClient) GetSchema(tableType querypb.SchemaTableType, tableNames ...string) (map[string]string, error) { - schemaDef := make(map[string]string) - err := client.server.GetSchema(client.ctx, client.target, tableType, tableNames, func(schemaRes *querypb.GetSchemaResponse) error { +func (client *QueryClient) GetSchema( + tableType querypb.SchemaTableType, + tableNames ...string, +) (schemaDef map[string]string, udfs []*querypb.UDFInfo, err error) { + schemaDef = make(map[string]string) + err = client.server.GetSchema(client.ctx, client.target, tableType, tableNames, func(schemaRes *querypb.GetSchemaResponse) error { for tableName, schemaDefinition := range schemaRes.TableDefinition { schemaDef[tableName] = schemaDefinition } + udfs = append(udfs, schemaRes.Udfs...) return nil }) - if err != nil { - return nil, err - } - return schemaDef, nil + return } diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go index 4f8043fba5a..95c8114fd9f 100644 --- a/go/vt/vttablet/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -23,9 +23,11 @@ import ( "net/http" "time" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/yaml2" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -58,7 +60,7 @@ var ( // StartCustomServer starts the server and initializes // all the global variables. This function should only be called // once at the beginning of the test. -func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnParams, dbName string, config *tabletenv.TabletConfig) error { +func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnParams, dbName string, cfg *tabletenv.TabletConfig) error { // Setup a fake vtgate server. protocol := "resolveTest" vtgateconn.SetVTGateProtocol(protocol) @@ -77,7 +79,8 @@ func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql } TopoServer = memorytopo.NewServer(ctx, "") - Server = tabletserver.NewTabletServer(ctx, "", config, TopoServer, &topodatapb.TabletAlias{}) + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + Server = tabletserver.NewTabletServer(ctx, vtenv.NewTestEnv(), "", cfg, TopoServer, &topodatapb.TabletAlias{}, srvTopoCounts) Server.Register() err := Server.StartService(Target, dbcfgs, nil /* mysqld */) if err != nil { @@ -118,13 +121,14 @@ func StartServer(ctx context.Context, connParams, connAppDebugParams mysql.ConnP config.TwoPCCoordinatorAddress = "fake" config.HotRowProtection.Mode = tabletenv.Enable config.TrackSchemaVersions = true - _ = config.GracePeriods.ShutdownSeconds.Set("2s") + config.GracePeriods.Shutdown = 2 * time.Second config.SignalWhenSchemaChange = true - _ = config.Healthcheck.IntervalSeconds.Set("100ms") - _ = config.Oltp.TxTimeoutSeconds.Set("5s") - _ = config.Olap.TxTimeoutSeconds.Set("5s") + config.Healthcheck.Interval = 100 * time.Millisecond + config.Oltp.TxTimeout = 5 * time.Second + config.Olap.TxTimeout = 5 * time.Second config.EnableViews = true config.QueryCacheDoorkeeper = false + config.SchemaReloadInterval = 5 * time.Second gotBytes, _ := yaml2.Marshal(config) log.Infof("Config:\n%s", gotBytes) return StartCustomServer(ctx, connParams, connAppDebugParams, dbName, config) @@ -135,7 +139,7 @@ func StopServer() { Server.StopService() } -// txReolver transmits dtids to be resolved through ResolveChan. +// txResolver transmits dtids to be resolved through ResolveChan. type txResolver struct { fakerpcvtgateconn.FakeVTGateConn } diff --git a/go/vt/vttablet/endtoend/main_test.go b/go/vt/vttablet/endtoend/main_test.go index b5256be0994..939147cb139 100644 --- a/go/vt/vttablet/endtoend/main_test.go +++ b/go/vt/vttablet/endtoend/main_test.go @@ -37,6 +37,7 @@ import ( var ( connParams mysql.ConnParams connAppDebugParams mysql.ConnParams + cluster vttest.LocalCluster ) func TestMain(m *testing.M) { @@ -69,7 +70,7 @@ func TestMain(m *testing.M) { return 1 } defer os.RemoveAll(cfg.SchemaDir) - cluster := vttest.LocalCluster{ + cluster = vttest.LocalCluster{ Config: cfg, } if err := cluster.Setup(); err != nil { @@ -307,7 +308,7 @@ var tableACLConfig = `{ }, { "name": "sys_table", - "table_names_or_prefixes": ["tables", "user", "processlist", "mutex_instances", "columns", "a"], + "table_names_or_prefixes": ["tables", "user", "processlist", "mutex_instances", "columns", "a", "func"], "readers": ["dev"], "writers": ["dev"], "admins": ["dev"] @@ -332,6 +333,13 @@ var tableACLConfig = `{ "readers": ["dev"], "writers": ["dev"], "admins": ["dev"] + }, + { + "name": "vitess_internal", + "table_names_or_prefixes": ["udfs"], + "readers": ["dev"], + "writers": ["dev"], + "admins": ["dev"] } ] }` diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go index 5c37a5d9bb0..768399572db 100644 --- a/go/vt/vttablet/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "io" - "math" "net/http" "reflect" "strings" @@ -28,20 +27,17 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" ) @@ -265,8 +261,10 @@ func TestSidecarTables(t *testing.T) { } func TestConsolidation(t *testing.T) { - defer framework.Server.SetPoolSize(framework.Server.PoolSize()) - framework.Server.SetPoolSize(1) + defer framework.Server.SetPoolSize(context.Background(), framework.Server.PoolSize()) + + err := framework.Server.SetPoolSize(context.Background(), 1) + require.NoError(t, err) const tag = "Waits/Histograms/Consolidations/Count" @@ -628,66 +626,6 @@ func (tl *testLogger) getLog(i int) string { return fmt.Sprintf("ERROR: log %d/%d does not exist", i, len(tl.logs)) } -func TestLogTruncation(t *testing.T) { - client := framework.NewClient() - tl := newTestLogger() - defer tl.Close() - - // Test that a long error string is not truncated by default - _, err := client.Execute( - "insert into vitess_test values(123, null, :data, null)", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog := `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null)", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - wantErr := wantLog - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(0) != wantLog { - t.Errorf("log was unexpectedly truncated: got\n'%s', want\n'%s'", tl.getLog(0), wantLog) - } - - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } - - // Test that the data too long error is truncated once the option is set - sqlparser.SetTruncateErrLen(30) - _, err = client.Execute( - "insert into vitess_test values(123, null, :data, null)", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess [TRUNCATED]", BindVars: {data: " [TRUNCATED]` - wantErr = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null)", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(1) != wantLog { - t.Errorf("log was not truncated properly: got\n'%s', want\n'%s'", tl.getLog(1), wantLog) - } - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } - - // Test that trailing comments are preserved data too long error is truncated once the option is set - sqlparser.SetTruncateErrLen(30) - _, err = client.Execute( - "insert into vitess_test values(123, null, :data, null) /* KEEP ME */", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess [TRUNCATED] /* KEEP ME */", BindVars: {data: " [TRUNCATED]` - wantErr = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null) /* KEEP ME */", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(2) != wantLog { - t.Errorf("log was not truncated properly: got\n'%s', want\n'%s'", tl.getLog(2), wantLog) - } - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } -} - func TestClientFoundRows(t *testing.T) { client := framework.NewClient() if _, err := client.Execute("insert into vitess_test(intval, charval) values(124, 'aa')", nil); err != nil { @@ -970,28 +908,91 @@ func TestShowTablesWithSizes(t *testing.T) { _, err := conn.ExecuteFetch(query, 1, false) require.NoError(t, err) } - expectTables := map[string]([]string){ // TABLE_TYPE, TABLE_COMMENT - "show_tables_with_sizes_t1": {"BASE TABLE", ""}, - "show_tables_with_sizes_v1": {"VIEW", "VIEW"}, - "show_tables_with_sizes_employees": {"BASE TABLE", ""}, + + expectedTables := []string{ + "show_tables_with_sizes_t1", + "show_tables_with_sizes_v1", + "show_tables_with_sizes_employees", } + actualTables := []string{} - rs, err := conn.ExecuteFetch(conn.BaseShowTablesWithSizes(), math.MaxInt, false) + rs, err := conn.ExecuteFetch(conn.BaseShowTablesWithSizes(), -1, false) require.NoError(t, err) require.NotEmpty(t, rs.Rows) - assert.GreaterOrEqual(t, len(rs.Rows), len(expectTables)) - matchedTables := map[string]bool{} + assert.GreaterOrEqual(t, len(rs.Rows), len(expectedTables)) + for _, row := range rs.Rows { + assert.Equal(t, 6, len(row)) + tableName := row[0].ToString() - vals, ok := expectTables[tableName] - if ok { - assert.Equal(t, vals[0], row[1].ToString()) // TABLE_TYPE - assert.Equal(t, vals[1], row[3].ToString()) // TABLE_COMMENT - matchedTables[tableName] = true + if tableName == "show_tables_with_sizes_t1" { + // TABLE_TYPE + assert.Equal(t, "BASE TABLE", row[1].ToString()) + + assert.True(t, row[2].IsIntegral()) + createTime, err := row[2].ToCastInt64() + assert.NoError(t, err) + assert.Greater(t, createTime, int64(0)) + + // TABLE_COMMENT + assert.Equal(t, "", row[3].ToString()) + + assert.True(t, row[4].IsDecimal()) + fileSize, err := row[4].ToCastInt64() + assert.NoError(t, err) + assert.Greater(t, fileSize, int64(0)) + + assert.True(t, row[4].IsDecimal()) + allocatedSize, err := row[5].ToCastInt64() + assert.NoError(t, err) + assert.Greater(t, allocatedSize, int64(0)) + + actualTables = append(actualTables, tableName) + } else if tableName == "show_tables_with_sizes_v1" { + // TABLE_TYPE + assert.Equal(t, "VIEW", row[1].ToString()) + + assert.True(t, row[2].IsIntegral()) + createTime, err := row[2].ToCastInt64() + assert.NoError(t, err) + assert.Greater(t, createTime, int64(0)) + + // TABLE_COMMENT + assert.Equal(t, "VIEW", row[3].ToString()) + + assert.True(t, row[4].IsNull()) + assert.True(t, row[5].IsNull()) + + actualTables = append(actualTables, tableName) + } else if tableName == "show_tables_with_sizes_employees" { + // TABLE_TYPE + assert.Equal(t, "BASE TABLE", row[1].ToString()) + + assert.True(t, row[2].IsIntegral()) + createTime, err := row[2].ToCastInt64() + assert.NoError(t, err) + assert.Greater(t, createTime, int64(0)) + + // TABLE_COMMENT + assert.Equal(t, "", row[3].ToString()) + + assert.True(t, row[4].IsDecimal()) + fileSize, err := row[4].ToCastInt64() + assert.NoError(t, err) + assert.Greater(t, fileSize, int64(0)) + + assert.True(t, row[5].IsDecimal()) + allocatedSize, err := row[5].ToCastInt64() + assert.NoError(t, err) + assert.Greater(t, allocatedSize, int64(0)) + + actualTables = append(actualTables, tableName) } } - assert.Equalf(t, len(expectTables), len(matchedTables), "%v", matchedTables) + + assert.Equal(t, len(expectedTables), len(actualTables)) + assert.ElementsMatch(t, expectedTables, actualTables) } // TestTuple tests that bind variables having tuple values work with vttablet. diff --git a/go/vt/vttablet/endtoend/queries_test.go b/go/vt/vttablet/endtoend/queries_test.go index 3dad415c1c1..5a57f681a10 100644 --- a/go/vt/vttablet/endtoend/queries_test.go +++ b/go/vt/vttablet/endtoend/queries_test.go @@ -18,7 +18,7 @@ package endtoend import ( "fmt" - "math/rand" + "math/rand/v2" "testing" "github.com/stretchr/testify/require" @@ -1775,7 +1775,7 @@ func BenchmarkTabletQueries(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - tcase := TestQueryCases[rand.Intn(len(TestQueryCases))] + tcase := TestQueryCases[rand.IntN(len(TestQueryCases))] if err := tcase.Benchmark(client); err != nil { b.Error(err) } diff --git a/go/vt/vttablet/endtoend/reserve_test.go b/go/vt/vttablet/endtoend/reserve_test.go index 591512d44c6..d3fb685dd49 100644 --- a/go/vt/vttablet/endtoend/reserve_test.go +++ b/go/vt/vttablet/endtoend/reserve_test.go @@ -28,8 +28,6 @@ import ( "vitess.io/vitess/go/vt/vttablet/endtoend/framework" ) -//TODO: Add Counter checks in all the tests. - func TestMultipleReserveHaveDifferentConnection(t *testing.T) { framework.Server.Config().EnableSettingsPool = false defer func() { @@ -777,9 +775,9 @@ func TestReserveBeginExecuteWithPreQueriesAndCheckConnectionState(t *testing.T) require.NoError(t, err) assert.NotEqual(t, qr1.Rows, qr2.Rows) - // As the transaction is read commited it is not able to see #5. + // As the transaction is read committed it is not able to see #5. assert.Equal(t, `[[INT32(1)] [INT32(2)] [INT32(3)] [INT32(4)]]`, fmt.Sprintf("%v", qr1.Rows)) - // As the transaction is read uncommited it is able to see #4. + // As the transaction is read uncommitted it is able to see #4. assert.Equal(t, `[[INT32(1)] [INT32(2)] [INT32(3)] [INT32(4)] [INT32(5)]]`, fmt.Sprintf("%v", qr2.Rows)) err = rucClient.Commit() @@ -804,7 +802,7 @@ func TestReserveBeginExecuteWithPreQueriesAndCheckConnectionState(t *testing.T) qr2, err = rucClient.Execute(selQuery, nil) require.NoError(t, err) - // As the transaction on read committed client got rollbacked back, table will forget #4. + // As the transaction on read committed client got rolled back, table will forget #4. assert.Equal(t, qr1.Rows, qr2.Rows) assert.Equal(t, `[[INT32(1)] [INT32(2)] [INT32(3)] [INT32(5)]]`, fmt.Sprintf("%v", qr2.Rows)) @@ -1190,3 +1188,23 @@ func TestReserveQueryTimeout(t *testing.T) { assert.NoError(t, client.Release()) } + +// TestReserveFlushTables checks that `flush table with read lock` works only with reserve api. +func TestReserveFlushTables(t *testing.T) { + client := framework.NewClient() + + _, err := client.Execute("flush tables with read lock", nil) + assert.ErrorContains(t, err, "Flush not allowed without reserved connection") + + _, err = client.Execute("unlock tables", nil) + assert.ErrorContains(t, err, "unlock tables should be executed with an existing connection") + + _, err = client.ReserveExecute("flush tables with read lock", nil, nil) + assert.NoError(t, err) + + _, err = client.Execute("unlock tables", nil) + assert.NoError(t, err) + + assert.NoError(t, + client.Release()) +} diff --git a/go/vt/vttablet/endtoend/rpc_test.go b/go/vt/vttablet/endtoend/rpc_test.go index a186d444f8d..e1ee7dff411 100644 --- a/go/vt/vttablet/endtoend/rpc_test.go +++ b/go/vt/vttablet/endtoend/rpc_test.go @@ -169,6 +169,25 @@ func TestGetSchemaRPC(t *testing.T) { }, getSchemaQueryType: querypb.SchemaTableType_ALL, getSchemaTables: []string{"vitess_temp1", "vitess_temp3", "unknown_table", "vitess_view3", "vitess_view1", "unknown_view"}, + }, { + name: "Create some internal tables", + queries: []string{ + "create table if not exists _vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410(id bigint primary key);", + "create table vitess_temp1 (eid int);", + "create view vitess_view1 as select eid from vitess_a", + }, + deferQueries: []string{ + "drop table _vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "drop table vitess_temp1", + "drop view vitess_view1", + }, + mapToExpect: map[string]string{ + "vitess_view1": "CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view1` AS select `vitess_a`.`eid` AS `eid` from `vitess_a`", + "vitess_temp1": "CREATE TABLE `vitess_temp1` (\n `eid` int DEFAULT NULL\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci", + // These shouldn't be part of the result, so we verify it is empty. + "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_20200915120410": "", + }, + getSchemaQueryType: querypb.SchemaTableType_ALL, }, } @@ -199,8 +218,9 @@ func TestGetSchemaRPC(t *testing.T) { t.Errorf("Schema tracking hasn't caught up") return case <-time.After(1 * time.Second): - schemaDefs, err := client.GetSchema(testcase.getSchemaQueryType, testcase.getSchemaTables...) + schemaDefs, udfs, err := client.GetSchema(testcase.getSchemaQueryType, testcase.getSchemaTables...) require.NoError(t, err) + require.Empty(t, udfs) success := true for tableName, expectedCreateStatement := range testcase.mapToExpect { if schemaDefs[tableName] != expectedCreateStatement { diff --git a/go/vt/vttablet/endtoend/stream_test.go b/go/vt/vttablet/endtoend/stream_test.go index 05045fd6f7d..a3c73dd8152 100644 --- a/go/vt/vttablet/endtoend/stream_test.go +++ b/go/vt/vttablet/endtoend/stream_test.go @@ -17,6 +17,7 @@ limitations under the License. package endtoend import ( + "context" "errors" "fmt" "reflect" @@ -98,11 +99,13 @@ func TestStreamConsolidation(t *testing.T) { defaultPoolSize := framework.Server.StreamPoolSize() - framework.Server.SetStreamPoolSize(4) + err = framework.Server.SetStreamPoolSize(context.Background(), 4) + require.NoError(t, err) + framework.Server.SetStreamConsolidationBlocking(true) defer func() { - framework.Server.SetStreamPoolSize(defaultPoolSize) + _ = framework.Server.SetStreamPoolSize(context.Background(), defaultPoolSize) framework.Server.SetStreamConsolidationBlocking(false) }() diff --git a/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go b/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go index d69ce193ef9..9890efd427d 100644 --- a/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go +++ b/go/vt/vttablet/endtoend/streamtimeout/healthstream_test.go @@ -74,7 +74,7 @@ func TestSchemaChangeTimedout(t *testing.T) { // This is because the query timeout triggers the *DBConn.Kill() method, which in turn holds the mutex lock on the health_streamer. // Although not indefinitely, this can result in longer wait times. // It's worth noting that the behavior of *DBConn.Kill() is outside the scope of this test. - reloadInterval := config.SignalSchemaChangeReloadIntervalSeconds.Get() + reloadInterval := config.SignalSchemaChangeReloadInterval time.Sleep(reloadInterval) // pause simulating the mysql stall to allow the health_streamer to resume. diff --git a/go/vt/vttablet/endtoend/streamtimeout/main_test.go b/go/vt/vttablet/endtoend/streamtimeout/main_test.go index 68851bf901b..0b2f37a987c 100644 --- a/go/vt/vttablet/endtoend/streamtimeout/main_test.go +++ b/go/vt/vttablet/endtoend/streamtimeout/main_test.go @@ -84,7 +84,7 @@ func TestMain(m *testing.M) { connParams := cluster.MySQLConnParams() connAppDebugParams := cluster.MySQLAppDebugConnParams() config = tabletenv.NewDefaultConfig() - _ = config.SchemaReloadIntervalSeconds.Set("2100ms") + config.SchemaReloadInterval = (2 * time.Second) + (100 * time.Millisecond) config.SchemaChangeReloadTimeout = 10 * time.Second config.SignalWhenSchemaChange = true diff --git a/go/vt/vttablet/endtoend/transaction_test.go b/go/vt/vttablet/endtoend/transaction_test.go index 8f6546df5f1..b15e73585ba 100644 --- a/go/vt/vttablet/endtoend/transaction_test.go +++ b/go/vt/vttablet/endtoend/transaction_test.go @@ -471,7 +471,7 @@ func TestShutdownGracePeriodWithReserveExecute(t *testing.T) { client.Rollback() } -func TestShortTxTimeout(t *testing.T) { +func TestShortTxTimeoutOltp(t *testing.T) { client := framework.NewClient() defer framework.Server.Config().SetTxTimeoutForWorkload( framework.Server.Config().TxTimeoutForWorkload(querypb.ExecuteOptions_OLTP), @@ -488,6 +488,23 @@ func TestShortTxTimeout(t *testing.T) { client.Rollback() } +func TestShortTxTimeoutOlap(t *testing.T) { + client := framework.NewClient() + defer framework.Server.Config().SetTxTimeoutForWorkload( + framework.Server.Config().TxTimeoutForWorkload(querypb.ExecuteOptions_OLAP), + querypb.ExecuteOptions_OLAP, + ) + framework.Server.Config().SetTxTimeoutForWorkload(10*time.Millisecond, querypb.ExecuteOptions_OLAP) + + err := client.Begin(false) + require.NoError(t, err) + start := time.Now() + _, err = client.StreamExecute("select sleep(10) from dual", nil) + assert.Error(t, err) + assert.True(t, time.Since(start) < 5*time.Second, time.Since(start)) + client.Rollback() +} + func TestMMCommitFlow(t *testing.T) { client := framework.NewClient() defer client.Execute("delete from vitess_test where intval=4", nil) diff --git a/go/vt/vttablet/endtoend/udf.so b/go/vt/vttablet/endtoend/udf.so new file mode 100755 index 00000000000..b0af697aaf2 Binary files /dev/null and b/go/vt/vttablet/endtoend/udf.so differ diff --git a/go/vt/vttablet/endtoend/udfs_test.go b/go/vt/vttablet/endtoend/udfs_test.go new file mode 100644 index 00000000000..a8808ce2c96 --- /dev/null +++ b/go/vt/vttablet/endtoend/udfs_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endtoend + +import ( + "context" + "fmt" + "io" + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/callerid" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vttablet/endtoend/framework" +) + +const ( + soFileName = "udf.so" + udfRows = "select * from _vt.udfs" +) + +// TestUDFs will validate that UDFs signal is sent through the stream health. +func TestUDFs(t *testing.T) { + client := framework.NewClient() + + client.UpdateContext(callerid.NewContext( + context.Background(), + &vtrpcpb.CallerID{}, + &querypb.VTGateCallerID{Username: "dev"})) + + copySOFile(t, client) + + ch := make(chan any) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + err := client.StreamHealthWithContext(ctx, func(shr *querypb.StreamHealthResponse) error { + if shr.RealtimeStats.UdfsChanged { + ch <- true + } + return nil + }) + require.NoError(t, err) + }() + + // create a user defined function directly on mysql as it is not supported by vitess parser. + err := cluster.Execute([]string{"CREATE AGGREGATE FUNCTION myudf RETURNS REAL SONAME 'udf.so';"}, "vttest") + require.NoError(t, err) + + validateHealthStreamSignal(t, client, ch, + `[[BINARY("myudf") INT8(1) BINARY("udf.so") ENUM("aggregate")]]`, + `[[VARBINARY("myudf") VARBINARY("double") VARBINARY("aggregate")]]`) + + // dropping the user defined function. + err = cluster.Execute([]string{"drop function myudf"}, "vttest") + require.NoError(t, err) + + validateHealthStreamSignal(t, client, ch, + `[]`, + `[]`) +} + +func validateHealthStreamSignal(t *testing.T, client *framework.QueryClient, ch chan any, expected ...string) { + t.Helper() + + // validate the row in mysql.func. + qr, err := client.Execute("select * from mysql.func", nil) + require.NoError(t, err) + require.Equal(t, expected[0], fmt.Sprintf("%v", qr.Rows)) + + // wait for udf update + select { + case <-ch: + case <-time.After(30 * time.Second): + t.Fatal("timed out waiting for udf create signal") + } + + // validate the row in _vt.udfs. + qr, err = client.Execute(udfRows, nil) + require.NoError(t, err) + require.Equal(t, expected[1], fmt.Sprintf("%v", qr.Rows)) +} + +// TestUDF_RPC will validate that UDFs are received through the rpc call. +func TestUDF_RPC(t *testing.T) { + client := framework.NewClient() + + client.UpdateContext(callerid.NewContext( + context.Background(), + &vtrpcpb.CallerID{}, + &querypb.VTGateCallerID{Username: "dev"})) + + copySOFile(t, client) + + // create a user defined function directly on mysql as it is not supported by vitess parser. + err := cluster.Execute([]string{"CREATE AGGREGATE FUNCTION myudf RETURNS REAL SONAME 'udf.so';"}, "vttest") + require.NoError(t, err) + + validateRPC(t, client, func(udfs []*querypb.UDFInfo) bool { + // keep checking till the udf is added. + return len(udfs) == 0 + }) + + // dropping the user defined function. + err = cluster.Execute([]string{"drop function myudf"}, "vttest") + require.NoError(t, err) + + validateRPC(t, client, func(udfs []*querypb.UDFInfo) bool { + // keep checking till the udf is removed. + return len(udfs) != 0 + }) +} + +func validateRPC(t *testing.T, client *framework.QueryClient, cond func(udfs []*querypb.UDFInfo) bool) (<-chan time.Time, bool) { + timeout := time.After(30 * time.Second) + conditionNotMet := true + for conditionNotMet { + time.Sleep(1 * time.Second) + select { + case <-timeout: + t.Fatal("timed out waiting for updated udf") + default: + schemaDef, udfs, err := client.GetSchema(querypb.SchemaTableType_UDFS, "") + require.NoError(t, err) + require.Empty(t, schemaDef) + conditionNotMet = cond(udfs) + } + } + return timeout, conditionNotMet +} + +func copySOFile(t *testing.T, client *framework.QueryClient) { + t.Helper() + qr, err := client.Execute("select @@plugin_dir", nil) + require.NoError(t, err) + pluginDir := qr.Rows[0][0].ToString() + + source, err := os.Open(soFileName) + require.NoError(t, err) + defer source.Close() + + destination, err := os.Create(pluginDir + soFileName) + if err != nil && strings.Contains(err.Error(), "permission denied") { + t.Skip("permission denied to copy so file") + } + require.NoError(t, err) + defer destination.Close() + + _, err = io.Copy(destination, source) + require.NoError(t, err) +} diff --git a/go/vt/vttablet/endtoend/views_test.go b/go/vt/vttablet/endtoend/views_test.go index 4ef70345180..99a28b0f215 100644 --- a/go/vt/vttablet/endtoend/views_test.go +++ b/go/vt/vttablet/endtoend/views_test.go @@ -70,7 +70,7 @@ func TestCreateViewDDL(t *testing.T) { qr, err := client.Execute(qSelAllRows, nil) require.NoError(t, err) require.Equal(t, - "[[VARCHAR(\"vttest\") VARCHAR(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`eid` AS `eid`,`vitess_a`.`id` AS `id`,`vitess_a`.`name` AS `name`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", + "[[VARBINARY(\"vttest\") VARBINARY(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`eid` AS `eid`,`vitess_a`.`id` AS `id`,`vitess_a`.`name` AS `name`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", fmt.Sprintf("%v", qr.Rows)) // view already exists. This should fail. @@ -86,7 +86,7 @@ func TestCreateViewDDL(t *testing.T) { qr, err = client.Execute(qSelAllRows, nil) require.NoError(t, err) require.Equal(t, - "[[VARCHAR(\"vttest\") VARCHAR(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`id` AS `id`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", + "[[VARBINARY(\"vttest\") VARBINARY(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`id` AS `id`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", fmt.Sprintf("%v", qr.Rows)) } @@ -132,7 +132,7 @@ func TestAlterViewDDL(t *testing.T) { qr, err := client.Execute(qSelAllRows, nil) require.NoError(t, err) require.Equal(t, - "[[VARCHAR(\"vttest\") VARCHAR(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`eid` AS `eid`,`vitess_a`.`id` AS `id`,`vitess_a`.`name` AS `name`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", + "[[VARBINARY(\"vttest\") VARBINARY(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`eid` AS `eid`,`vitess_a`.`id` AS `id`,`vitess_a`.`name` AS `name`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", fmt.Sprintf("%v", qr.Rows)) // view exists, should PASS @@ -144,7 +144,7 @@ func TestAlterViewDDL(t *testing.T) { qr, err = client.Execute(qSelAllRows, nil) require.NoError(t, err) require.Equal(t, - "[[VARCHAR(\"vttest\") VARCHAR(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`id` AS `id`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", + "[[VARBINARY(\"vttest\") VARBINARY(\"vitess_view\") TEXT(\"CREATE ALGORITHM=UNDEFINED DEFINER=`vt_dba`@`localhost` SQL SECURITY DEFINER VIEW `vitess_view` AS select `vitess_a`.`id` AS `id`,`vitess_a`.`foo` AS `foo` from `vitess_a`\")]]", fmt.Sprintf("%v", qr.Rows)) } diff --git a/go/vt/vttablet/endtoend/vstreamer_test.go b/go/vt/vttablet/endtoend/vstreamer_test.go index 312273e0c84..997ab222255 100644 --- a/go/vt/vttablet/endtoend/vstreamer_test.go +++ b/go/vt/vttablet/endtoend/vstreamer_test.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "strings" + "sync" "testing" "time" @@ -65,6 +66,7 @@ func TestSchemaVersioning(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + wg := sync.WaitGroup{} tsv.EnableHistorian(true) tsv.SetTracking(true) time.Sleep(100 * time.Millisecond) // wait for _vt tables to be created @@ -155,7 +157,9 @@ func TestSchemaVersioning(t *testing.T) { } return nil } + wg.Add(1) go func() { + defer wg.Done() defer close(eventCh) req := &binlogdatapb.VStreamRequest{Target: target, Position: "current", TableLastPKs: nil, Filter: filter} if err := tsv.VStream(ctx, req, send); err != nil { @@ -186,6 +190,7 @@ func TestSchemaVersioning(t *testing.T) { } runCases(ctx, t, cases, eventCh) cancel() + wg.Wait() log.Infof("\n\n\n=============================================== PAST EVENTS WITH TRACK VERSIONS START HERE ======================\n\n\n") ctx, cancel = context.WithCancel(context.Background()) @@ -214,7 +219,9 @@ func TestSchemaVersioning(t *testing.T) { } return nil } + wg.Add(1) go func() { + defer wg.Done() defer close(eventCh) req := &binlogdatapb.VStreamRequest{Target: target, Position: startPos, TableLastPKs: nil, Filter: filter} if err := tsv.VStream(ctx, req, send); err != nil { @@ -257,6 +264,7 @@ func TestSchemaVersioning(t *testing.T) { expectLogs(ctx, t, "Past stream", eventCh, output) cancel() + wg.Wait() log.Infof("\n\n\n=============================================== PAST EVENTS WITHOUT TRACK VERSIONS START HERE ======================\n\n\n") tsv.EnableHistorian(false) @@ -286,7 +294,9 @@ func TestSchemaVersioning(t *testing.T) { } return nil } + wg.Add(1) go func() { + defer wg.Done() defer close(eventCh) req := &binlogdatapb.VStreamRequest{Target: target, Position: startPos, TableLastPKs: nil, Filter: filter} if err := tsv.VStream(ctx, req, send); err != nil { @@ -331,6 +341,7 @@ func TestSchemaVersioning(t *testing.T) { expectLogs(ctx, t, "Past stream", eventCh, output) cancel() + wg.Wait() client := framework.NewClient() client.Execute("drop table vitess_version", nil) diff --git a/go/vt/vttablet/faketmclient/fake_client.go b/go/vt/vttablet/faketmclient/fake_client.go index e8747b98fcc..a91497e925a 100644 --- a/go/vt/vttablet/faketmclient/fake_client.go +++ b/go/vt/vttablet/faketmclient/fake_client.go @@ -70,6 +70,14 @@ func (client *FakeTabletManagerClient) DeleteVReplicationWorkflow(ctx context.Co return nil, nil } +func (client *FakeTabletManagerClient) HasVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) { + return nil, nil +} + +func (client *FakeTabletManagerClient) ReadVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { + return nil, nil +} + func (client *FakeTabletManagerClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { return nil, nil } @@ -82,6 +90,10 @@ func (client *FakeTabletManagerClient) UpdateVReplicationWorkflow(ctx context.Co return nil, nil } +func (client *FakeTabletManagerClient) UpdateVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) { + return nil, nil +} + func (client *FakeTabletManagerClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { return nil, nil } @@ -180,6 +192,11 @@ func (client *FakeTabletManagerClient) ExecuteFetchAsDba(ctx context.Context, ta return &querypb.QueryResult{}, nil } +// FakeTabletManagerClient is part of the tmclient.TabletManagerClient interface. +func (client *FakeTabletManagerClient) ExecuteMultiFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) ([]*querypb.QueryResult, error) { + return []*querypb.QueryResult{}, nil +} + // ExecuteFetchAsAllPrivs is part of the tmclient.TabletManagerClient interface. func (client *FakeTabletManagerClient) ExecuteFetchAsAllPrivs(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) { return &querypb.QueryResult{}, nil @@ -309,7 +326,7 @@ func (client *FakeTabletManagerClient) UndoDemotePrimary(ctx context.Context, ta } // SetReplicationSource is part of the tmclient.TabletManagerClient interface. -func (client *FakeTabletManagerClient) SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool) error { +func (client *FakeTabletManagerClient) SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool, heartbeatInterval float64) error { return nil } diff --git a/go/vt/vttablet/filelogger/filelogger_test.go b/go/vt/vttablet/filelogger/filelogger_test.go index 1562c9626a8..f747ebba93b 100644 --- a/go/vt/vttablet/filelogger/filelogger_test.go +++ b/go/vt/vttablet/filelogger/filelogger_test.go @@ -60,7 +60,7 @@ func TestFileLog(t *testing.T) { for i := 0; i < 10; i++ { time.Sleep(10 * time.Millisecond) - want := "\t\t\t''\t''\t0001-01-01 00:00:00.000000\t0001-01-01 00:00:00.000000\t0.000000\t\t\"test 1\"\tmap[]\t1\t\"test 1 PII\"\tmysql\t0.000000\t0.000000\t0\t0\t0\t\"\"\t\n\t\t\t''\t''\t0001-01-01 00:00:00.000000\t0001-01-01 00:00:00.000000\t0.000000\t\t\"test 2\"\tmap[]\t1\t\"test 2 PII\"\tmysql\t0.000000\t0.000000\t0\t0\t0\t\"\"\t\n" + want := "\t\t\t''\t''\t0001-01-01 00:00:00.000000\t0001-01-01 00:00:00.000000\t0.000000\t\t\"test 1\"\t{}\t1\t\"test 1 PII\"\tmysql\t0.000000\t0.000000\t0\t0\t0\t\"\"\t\n\t\t\t''\t''\t0001-01-01 00:00:00.000000\t0001-01-01 00:00:00.000000\t0.000000\t\t\"test 2\"\t{}\t1\t\"test 2 PII\"\tmysql\t0.000000\t0.000000\t0\t0\t0\t\"\"\t\n" contents, _ := os.ReadFile(logPath) got := string(contents) if want == got { diff --git a/go/vt/vttablet/flags.go b/go/vt/vttablet/flags.go index 3ce2cd3b378..994080b95a5 100644 --- a/go/vt/vttablet/flags.go +++ b/go/vt/vttablet/flags.go @@ -25,11 +25,14 @@ import ( ) const ( + // VReplicationExperimentalFlags is a bitmask of experimental features in vreplication. VReplicationExperimentalFlagOptimizeInserts = int64(1) VReplicationExperimentalFlagAllowNoBlobBinlogRowImage = int64(2) + VReplicationExperimentalFlagVPlayerBatching = int64(4) ) var ( + // Default flags. VReplicationExperimentalFlags = VReplicationExperimentalFlagOptimizeInserts | VReplicationExperimentalFlagAllowNoBlobBinlogRowImage VReplicationNetReadTimeout = 300 VReplicationNetWriteTimeout = 600 diff --git a/go/vt/vttablet/grpctabletconn/conn.go b/go/vt/vttablet/grpctabletconn/conn.go index cb97abcbbae..fe446fbec27 100644 --- a/go/vt/vttablet/grpctabletconn/conn.go +++ b/go/vt/vttablet/grpctabletconn/conn.go @@ -83,7 +83,7 @@ type gRPCQueryClient struct { var _ queryservice.QueryService = (*gRPCQueryClient)(nil) // DialTablet creates and initializes gRPCQueryClient. -func DialTablet(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { +func DialTablet(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { // create the RPC client addr := "" if grpcPort, ok := tablet.PortMap["grpc"]; ok { @@ -95,7 +95,7 @@ func DialTablet(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (querys if err != nil { return nil, err } - cc, err := grpcclient.Dial(addr, failFast, opt) + cc, err := grpcclient.DialContext(ctx, addr, failFast, opt) if err != nil { return nil, err } @@ -188,7 +188,7 @@ func (conn *gRPCQueryClient) StreamExecute(ctx context.Context, target *querypb. fields = ser.Result.Fields } if err := callback(sqltypes.CustomProto3ToResult(fields, ser.Result)); err != nil { - if err == nil || err == io.EOF { + if err == io.EOF { return nil } return err @@ -417,7 +417,7 @@ func (conn *gRPCQueryClient) ConcludeTransaction(ctx context.Context, target *qu return nil } -// ReadTransaction returns the metadata for the sepcified dtid. +// ReadTransaction returns the metadata for the specified dtid. func (conn *gRPCQueryClient) ReadTransaction(ctx context.Context, target *querypb.Target, dtid string) (*querypb.TransactionMetadata, error) { conn.mu.RLock() defer conn.mu.RUnlock() @@ -473,6 +473,10 @@ func (conn *gRPCQueryClient) BeginExecute(ctx context.Context, target *querypb.T // BeginStreamExecute starts a transaction and runs an Execute. func (conn *gRPCQueryClient) BeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, query string, bindVars map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (state queryservice.TransactionState, err error) { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + conn.mu.RLock() defer conn.mu.RUnlock() if conn.cc == nil { @@ -537,7 +541,7 @@ func (conn *gRPCQueryClient) BeginStreamExecute(ctx context.Context, target *que fields = ser.Result.Fields } if err := callback(sqltypes.CustomProto3ToResult(fields, ser.Result)); err != nil { - if err == nil || err == io.EOF { + if err == io.EOF { return state, nil } return state, err @@ -583,7 +587,7 @@ func (conn *gRPCQueryClient) MessageStream(ctx context.Context, target *querypb. fields = msr.Result.Fields } if err := callback(sqltypes.CustomProto3ToResult(fields, msr.Result)); err != nil { - if err == nil || err == io.EOF { + if err == io.EOF { return nil } return err @@ -640,7 +644,7 @@ func (conn *gRPCQueryClient) StreamHealth(ctx context.Context, callback func(*qu return tabletconn.ErrorFromGRPC(err) } if err := callback(shr); err != nil { - if err == nil || err == io.EOF { + if err == io.EOF { return nil } return err @@ -650,6 +654,9 @@ func (conn *gRPCQueryClient) StreamHealth(ctx context.Context, callback func(*qu // VStream starts a VReplication stream. func (conn *gRPCQueryClient) VStream(ctx context.Context, request *binlogdatapb.VStreamRequest, send func([]*binlogdatapb.VEvent) error) error { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() stream, err := func() (queryservicepb.Query_VStreamClient, error) { conn.mu.RLock() defer conn.mu.RUnlock() @@ -695,6 +702,9 @@ func (conn *gRPCQueryClient) VStream(ctx context.Context, request *binlogdatapb. // VStreamRows streams rows of a query from the specified starting point. func (conn *gRPCQueryClient) VStreamRows(ctx context.Context, request *binlogdatapb.VStreamRowsRequest, send func(*binlogdatapb.VStreamRowsResponse) error) error { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() stream, err := func() (queryservicepb.Query_VStreamRowsClient, error) { conn.mu.RLock() defer conn.mu.RUnlock() @@ -737,6 +747,9 @@ func (conn *gRPCQueryClient) VStreamRows(ctx context.Context, request *binlogdat // VStreamTables streams rows of a query from the specified starting point. func (conn *gRPCQueryClient) VStreamTables(ctx context.Context, request *binlogdatapb.VStreamTablesRequest, send func(*binlogdatapb.VStreamTablesResponse) error) error { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() stream, err := func() (queryservicepb.Query_VStreamTablesClient, error) { conn.mu.RLock() defer conn.mu.RUnlock() @@ -777,6 +790,9 @@ func (conn *gRPCQueryClient) VStreamTables(ctx context.Context, request *binlogd // VStreamResults streams rows of a query from the specified starting point. func (conn *gRPCQueryClient) VStreamResults(ctx context.Context, target *querypb.Target, query string, send func(*binlogdatapb.VStreamResultsResponse) error) error { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() stream, err := func() (queryservicepb.Query_VStreamResultsClient, error) { conn.mu.RLock() defer conn.mu.RUnlock() @@ -856,6 +872,9 @@ func (conn *gRPCQueryClient) ReserveBeginExecute(ctx context.Context, target *qu // ReserveBeginStreamExecute implements the queryservice interface func (conn *gRPCQueryClient) ReserveBeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (state queryservice.ReservedTransactionState, err error) { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() conn.mu.RLock() defer conn.mu.RUnlock() if conn.cc == nil { @@ -924,7 +943,7 @@ func (conn *gRPCQueryClient) ReserveBeginStreamExecute(ctx context.Context, targ fields = ser.Result.Fields } if err := callback(sqltypes.CustomProto3ToResult(fields, ser.Result)); err != nil { - if err == nil || err == io.EOF { + if err == io.EOF { return state, nil } return state, err @@ -967,6 +986,9 @@ func (conn *gRPCQueryClient) ReserveExecute(ctx context.Context, target *querypb // ReserveStreamExecute implements the queryservice interface func (conn *gRPCQueryClient) ReserveStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (state queryservice.ReservedState, err error) { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() conn.mu.RLock() defer conn.mu.RUnlock() if conn.cc == nil { @@ -1029,7 +1051,7 @@ func (conn *gRPCQueryClient) ReserveStreamExecute(ctx context.Context, target *q fields = ser.Result.Fields } if err := callback(sqltypes.CustomProto3ToResult(fields, ser.Result)); err != nil { - if err == nil || err == io.EOF { + if err == io.EOF { return state, nil } return state, err @@ -1060,6 +1082,9 @@ func (conn *gRPCQueryClient) Release(ctx context.Context, target *querypb.Target // GetSchema implements the queryservice interface func (conn *gRPCQueryClient) GetSchema(ctx context.Context, target *querypb.Target, tableType querypb.SchemaTableType, tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { + // Please see comments in StreamExecute to see how this works. + ctx, cancel := context.WithCancel(ctx) + defer cancel() conn.mu.RLock() defer conn.mu.RUnlock() if conn.cc == nil { @@ -1092,7 +1117,7 @@ func (conn *gRPCQueryClient) GetSchema(ctx context.Context, target *querypb.Targ return tabletconn.ErrorFromGRPC(err) } if err := callback(shr); err != nil { - if err == nil || err == io.EOF { + if err == io.EOF { return nil } return err diff --git a/go/vt/vttablet/grpctabletconn/conn_test.go b/go/vt/vttablet/grpctabletconn/conn_test.go index fb182bfe2e4..74ed85a335f 100644 --- a/go/vt/vttablet/grpctabletconn/conn_test.go +++ b/go/vt/vttablet/grpctabletconn/conn_test.go @@ -17,13 +17,21 @@ limitations under the License. package grpctabletconn import ( + "context" + "fmt" "io" "net" "os" + "sync" "testing" + "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/sqltypes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + queryservicepb "vitess.io/vitess/go/vt/proto/queryservice" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vttablet/grpcqueryservice" "vitess.io/vitess/go/vt/vttablet/tabletconntest" @@ -48,9 +56,13 @@ func TestGRPCTabletConn(t *testing.T) { server := grpc.NewServer() grpcqueryservice.Register(server, service) go server.Serve(listener) + defer server.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // run the test suite - tabletconntest.TestSuite(t, protocolName, &topodatapb.Tablet{ + tabletconntest.TestSuite(ctx, t, protocolName, &topodatapb.Tablet{ Keyspace: tabletconntest.TestTarget.Keyspace, Shard: tabletconntest.TestTarget.Shard, Type: tabletconntest.TestTarget.TabletType, @@ -83,6 +95,7 @@ func TestGRPCTabletAuthConn(t *testing.T) { grpcqueryservice.Register(server, service) go server.Serve(listener) + defer server.Stop() authJSON := `{ "Username": "valid", @@ -101,8 +114,10 @@ func TestGRPCTabletAuthConn(t *testing.T) { t.Fatal(err) } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // run the test suite - tabletconntest.TestSuite(t, protocolName, &topodatapb.Tablet{ + tabletconntest.TestSuite(ctx, t, protocolName, &topodatapb.Tablet{ Keyspace: tabletconntest.TestTarget.Keyspace, Shard: tabletconntest.TestTarget.Shard, Type: tabletconntest.TestTarget.TabletType, @@ -113,3 +128,111 @@ func TestGRPCTabletAuthConn(t *testing.T) { }, }, service, f) } + +// mockQueryClient is a mock query client that returns an error from Streaming calls, +// but only after storing the context that was passed to the RPC. +type mockQueryClient struct { + lastCallCtx context.Context + queryservicepb.QueryClient +} + +func (m *mockQueryClient) StreamExecute(ctx context.Context, in *querypb.StreamExecuteRequest, opts ...grpc.CallOption) (queryservicepb.Query_StreamExecuteClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +func (m *mockQueryClient) BeginStreamExecute(ctx context.Context, in *querypb.BeginStreamExecuteRequest, opts ...grpc.CallOption) (queryservicepb.Query_BeginStreamExecuteClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +func (m *mockQueryClient) ReserveStreamExecute(ctx context.Context, in *querypb.ReserveStreamExecuteRequest, opts ...grpc.CallOption) (queryservicepb.Query_ReserveStreamExecuteClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +func (m *mockQueryClient) ReserveBeginStreamExecute(ctx context.Context, in *querypb.ReserveBeginStreamExecuteRequest, opts ...grpc.CallOption) (queryservicepb.Query_ReserveBeginStreamExecuteClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +func (m *mockQueryClient) VStream(ctx context.Context, in *binlogdatapb.VStreamRequest, opts ...grpc.CallOption) (queryservicepb.Query_VStreamClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +func (m *mockQueryClient) VStreamRows(ctx context.Context, in *binlogdatapb.VStreamRowsRequest, opts ...grpc.CallOption) (queryservicepb.Query_VStreamRowsClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +func (m *mockQueryClient) VStreamTables(ctx context.Context, in *binlogdatapb.VStreamTablesRequest, opts ...grpc.CallOption) (queryservicepb.Query_VStreamTablesClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +func (m *mockQueryClient) VStreamResults(ctx context.Context, in *binlogdatapb.VStreamResultsRequest, opts ...grpc.CallOption) (queryservicepb.Query_VStreamResultsClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +func (m *mockQueryClient) GetSchema(ctx context.Context, in *querypb.GetSchemaRequest, opts ...grpc.CallOption) (queryservicepb.Query_GetSchemaClient, error) { + m.lastCallCtx = ctx + return nil, fmt.Errorf("A general error") +} + +var _ queryservicepb.QueryClient = (*mockQueryClient)(nil) + +// TestGoRoutineLeakPrevention tests that after all the RPCs that stream queries, we end up closing the context that was passed to it, to prevent go routines from being leaked. +func TestGoRoutineLeakPrevention(t *testing.T) { + mqc := &mockQueryClient{} + qc := &gRPCQueryClient{ + mu: sync.RWMutex{}, + cc: &grpc.ClientConn{}, + c: mqc, + } + _ = qc.StreamExecute(context.Background(), nil, "", nil, 0, 0, nil, func(result *sqltypes.Result) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) + + _, _ = qc.BeginStreamExecute(context.Background(), nil, nil, "", nil, 0, nil, func(result *sqltypes.Result) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) + + _, _ = qc.ReserveBeginStreamExecute(context.Background(), nil, nil, nil, "", nil, nil, func(result *sqltypes.Result) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) + + _, _ = qc.ReserveStreamExecute(context.Background(), nil, nil, "", nil, 0, nil, func(result *sqltypes.Result) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) + + _ = qc.VStream(context.Background(), &binlogdatapb.VStreamRequest{}, func(events []*binlogdatapb.VEvent) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) + + _ = qc.VStreamRows(context.Background(), &binlogdatapb.VStreamRowsRequest{}, func(response *binlogdatapb.VStreamRowsResponse) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) + + _ = qc.VStreamResults(context.Background(), nil, "", func(response *binlogdatapb.VStreamResultsResponse) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) + + _ = qc.VStreamTables(context.Background(), &binlogdatapb.VStreamTablesRequest{}, func(response *binlogdatapb.VStreamTablesResponse) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) + + _ = qc.GetSchema(context.Background(), nil, querypb.SchemaTableType_TABLES, nil, func(schemaRes *querypb.GetSchemaResponse) error { + return nil + }) + require.Error(t, mqc.lastCallCtx.Err()) +} diff --git a/go/vt/vttablet/grpctmclient/cached_client_flaky_test.go b/go/vt/vttablet/grpctmclient/cached_client_flaky_test.go index c5346a99aa4..cf002cb7d18 100644 --- a/go/vt/vttablet/grpctmclient/cached_client_flaky_test.go +++ b/go/vt/vttablet/grpctmclient/cached_client_flaky_test.go @@ -20,7 +20,7 @@ import ( "context" "fmt" "io" - "math/rand" + "math/rand/v2" "net" "runtime" "sync" @@ -117,7 +117,7 @@ func BenchmarkCachedConnClientSteadyState(b *testing.B) { ctx, cancel := context.WithTimeout(ctx, time.Second*5) defer cancel() - x := rand.Intn(len(tablets)) + x := rand.IntN(len(tablets)) err := client.Ping(ctx, tablets[x]) assert.NoError(b, err) }() @@ -185,7 +185,7 @@ func BenchmarkCachedConnClientSteadyStateRedials(b *testing.B) { ctx, cancel := context.WithTimeout(ctx, time.Second*5) defer cancel() - x := rand.Intn(len(tablets)) + x := rand.IntN(len(tablets)) err := client.Ping(ctx, tablets[x]) assert.NoError(b, err) }() @@ -340,10 +340,10 @@ func TestCachedConnClient(t *testing.T) { longestDials <- longestDial return case <-time.After(jitter): - jitter = time.Millisecond * (time.Duration(rand.Intn(11) + 50)) + jitter = time.Millisecond * (time.Duration(rand.IntN(11) + 50)) attempts++ - tablet := tablets[rand.Intn(len(tablets))] + tablet := tablets[rand.IntN(len(tablets))] start := time.Now() _, closer, err := client.dialer.dial(context.Background(), tablet) if err != nil { diff --git a/go/vt/vttablet/grpctmclient/client.go b/go/vt/vttablet/grpctmclient/client.go index 0068ed74706..48e5de6b0ef 100644 --- a/go/vt/vttablet/grpctmclient/client.go +++ b/go/vt/vttablet/grpctmclient/client.go @@ -45,6 +45,15 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +type DialPoolGroup int + +const ( + dialPoolGroupThrottler DialPoolGroup = iota + dialPoolGroupVTOrc +) + +type invalidatorFunc func() + var ( concurrency = 8 cert string @@ -55,7 +64,7 @@ var ( ) func registerFlags(fs *pflag.FlagSet) { - fs.IntVar(&concurrency, "tablet_manager_grpc_concurrency", concurrency, "concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App})") + fs.IntVar(&concurrency, "tablet_manager_grpc_concurrency", concurrency, "concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,App}, CheckThrottler and FullStatus)") fs.StringVar(&cert, "tablet_manager_grpc_cert", cert, "the cert to use to connect") fs.StringVar(&key, "tablet_manager_grpc_key", key, "the key to use to connect") fs.StringVar(&ca, "tablet_manager_grpc_ca", ca, "the server ca to use to validate servers when connecting") @@ -92,15 +101,17 @@ type tmc struct { client tabletmanagerservicepb.TabletManagerClient } +type addrTmcMap map[string]*tmc + // grpcClient implements both dialer and poolDialer. type grpcClient struct { - // This cache of connections is to maximize QPS for ExecuteFetch. - // Note we'll keep the clients open and close them upon Close() only. - // But that's OK because usually the tasks that use them are - // one-purpose only. + // This cache of connections is to maximize QPS for ExecuteFetchAs{Dba,App}, + // CheckThrottler and FullStatus. Note we'll keep the clients open and close them upon Close() only. + // But that's OK because usually the tasks that use them are one-purpose only. // The map is protected by the mutex. - mu sync.Mutex - rpcClientMap map[string]chan *tmc + mu sync.Mutex + rpcClientMap map[string]chan *tmc + rpcDialPoolMap map[DialPoolGroup]addrTmcMap } type dialer interface { @@ -110,21 +121,23 @@ type dialer interface { type poolDialer interface { dialPool(ctx context.Context, tablet *topodatapb.Tablet) (tabletmanagerservicepb.TabletManagerClient, error) + dialDedicatedPool(ctx context.Context, dialPoolGroup DialPoolGroup, tablet *topodatapb.Tablet) (tabletmanagerservicepb.TabletManagerClient, invalidatorFunc, error) } // Client implements tmclient.TabletManagerClient. // // Connections are produced by the dialer implementation, which is either the -// grpcClient implementation, which reuses connections only for ExecuteFetch and -// otherwise makes single-purpose connections that are closed after use. +// grpcClient implementation, which reuses connections only for ExecuteFetchAs{Dba,App} +// CheckThrottler, and FullStatus, otherwise making single-purpose connections that are closed +// after use. // // In order to more efficiently use the underlying tcp connections, you can // instead use the cachedConnDialer implementation by specifying // -// -tablet_manager_protocol "grpc-cached" +// --tablet_manager_protocol "grpc-cached" // -// The cachedConnDialer keeps connections to up to -tablet_manager_grpc_connpool_size distinct -// tablets open at any given time, for faster per-RPC call time, and less +// The cachedConnDialer keeps connections to up to --tablet_manager_grpc_connpool_size +// distinct tablets open at any given time, for faster per-RPC call time, and less // connection churn. type Client struct { dialer dialer @@ -144,7 +157,7 @@ func (client *grpcClient) dial(ctx context.Context, tablet *topodatapb.Tablet) ( if err != nil { return nil, nil, err } - cc, err := grpcclient.Dial(addr, grpcclient.FailFast(false), opt) + cc, err := grpcclient.DialContext(ctx, addr, grpcclient.FailFast(false), opt) if err != nil { return nil, nil, err } @@ -152,6 +165,17 @@ func (client *grpcClient) dial(ctx context.Context, tablet *topodatapb.Tablet) ( return tabletmanagerservicepb.NewTabletManagerClient(cc), cc, nil } +func (client *grpcClient) createTmc(ctx context.Context, addr string, opt grpc.DialOption) (*tmc, error) { + cc, err := grpcclient.DialContext(ctx, addr, grpcclient.FailFast(false), opt) + if err != nil { + return nil, err + } + return &tmc{ + cc: cc, + client: tabletmanagerservicepb.NewTabletManagerClient(cc), + }, nil +} + func (client *grpcClient) dialPool(ctx context.Context, tablet *topodatapb.Tablet) (tabletmanagerservicepb.TabletManagerClient, error) { addr := netutil.JoinHostPort(tablet.Hostname, int32(tablet.PortMap["grpc"])) opt, err := grpcclient.SecureDialOption(cert, key, ca, crl, name) @@ -170,14 +194,11 @@ func (client *grpcClient) dialPool(ctx context.Context, tablet *topodatapb.Table client.mu.Unlock() for i := 0; i < cap(c); i++ { - cc, err := grpcclient.Dial(addr, grpcclient.FailFast(false), opt) + tm, err := client.createTmc(ctx, addr, opt) if err != nil { return nil, err } - c <- &tmc{ - cc: cc, - client: tabletmanagerservicepb.NewTabletManagerClient(cc), - } + c <- tm } } else { client.mu.Unlock() @@ -188,6 +209,40 @@ func (client *grpcClient) dialPool(ctx context.Context, tablet *topodatapb.Table return result.client, nil } +func (client *grpcClient) dialDedicatedPool(ctx context.Context, dialPoolGroup DialPoolGroup, tablet *topodatapb.Tablet) (tabletmanagerservicepb.TabletManagerClient, invalidatorFunc, error) { + addr := netutil.JoinHostPort(tablet.Hostname, int32(tablet.PortMap["grpc"])) + opt, err := grpcclient.SecureDialOption(cert, key, ca, crl, name) + if err != nil { + return nil, nil, err + } + + client.mu.Lock() + defer client.mu.Unlock() + if client.rpcDialPoolMap == nil { + client.rpcDialPoolMap = make(map[DialPoolGroup]addrTmcMap) + } + if _, ok := client.rpcDialPoolMap[dialPoolGroup]; !ok { + client.rpcDialPoolMap[dialPoolGroup] = make(addrTmcMap) + } + m := client.rpcDialPoolMap[dialPoolGroup] + if _, ok := m[addr]; !ok { + tm, err := client.createTmc(ctx, addr, opt) + if err != nil { + return nil, nil, err + } + m[addr] = tm + } + invalidator := func() { + client.mu.Lock() + defer client.mu.Unlock() + if tm := m[addr]; tm != nil && tm.cc != nil { + tm.cc.Close() + } + delete(m, addr) + } + return m[addr].client, invalidator, nil +} + // Close is part of the tmclient.TabletManagerClient interface. func (client *grpcClient) Close() { client.mu.Lock() @@ -400,12 +455,13 @@ func (client *Client) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet } defer closer.Close() response, err := c.ApplySchema(ctx, &tabletmanagerdatapb.ApplySchemaRequest{ - Sql: change.SQL, - Force: change.Force, - AllowReplication: change.AllowReplication, - BeforeSchema: change.BeforeSchema, - AfterSchema: change.AfterSchema, - SqlMode: change.SQLMode, + Sql: change.SQL, + Force: change.Force, + AllowReplication: change.AllowReplication, + BeforeSchema: change.BeforeSchema, + AfterSchema: change.AfterSchema, + SqlMode: change.SQLMode, + DisableForeignKeyChecks: change.DisableForeignKeyChecks, }) if err != nil { return nil, err @@ -488,11 +544,12 @@ func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb. } response, err := c.ExecuteFetchAsDba(ctx, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ - Query: req.Query, - DbName: topoproto.TabletDbName(tablet), - MaxRows: req.MaxRows, - DisableBinlogs: req.DisableBinlogs, - ReloadSchema: req.DisableBinlogs, + Query: req.Query, + DbName: topoproto.TabletDbName(tablet), + MaxRows: req.MaxRows, + DisableBinlogs: req.DisableBinlogs, + ReloadSchema: req.DisableBinlogs, + DisableForeignKeyChecks: req.DisableForeignKeyChecks, }) if err != nil { return nil, err @@ -500,6 +557,42 @@ func (client *Client) ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb. return response.Result, nil } +// ExecuteFetchAsDba is part of the tmclient.TabletManagerClient interface. +func (client *Client) ExecuteMultiFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) ([]*querypb.QueryResult, error) { + var c tabletmanagerservicepb.TabletManagerClient + var err error + if usePool { + if poolDialer, ok := client.dialer.(poolDialer); ok { + c, err = poolDialer.dialPool(ctx, tablet) + if err != nil { + return nil, err + } + } + } + + if !usePool || c == nil { + var closer io.Closer + c, closer, err = client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + } + + response, err := c.ExecuteMultiFetchAsDba(ctx, &tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest{ + Sql: req.Sql, + DbName: topoproto.TabletDbName(tablet), + MaxRows: req.MaxRows, + DisableBinlogs: req.DisableBinlogs, + ReloadSchema: req.DisableBinlogs, + DisableForeignKeyChecks: req.DisableForeignKeyChecks, + }) + if err != nil { + return nil, err + } + return response.Results, err +} + // ExecuteFetchAsAllPrivs is part of the tmclient.TabletManagerClient interface. func (client *Client) ExecuteFetchAsAllPrivs(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) { c, closer, err := client.dialer.dial(ctx, tablet) @@ -568,14 +661,34 @@ func (client *Client) ReplicationStatus(ctx context.Context, tablet *topodatapb. } // FullStatus is part of the tmclient.TabletManagerClient interface. +// It always tries to use a cached client via the dialer pool as this is +// called very frequently from VTOrc, and the overhead of creating a new gRPC connection/channel +// and dialing the other tablet every time is not practical. func (client *Client) FullStatus(ctx context.Context, tablet *topodatapb.Tablet) (*replicationdatapb.FullStatus, error) { - c, closer, err := client.dialer.dial(ctx, tablet) - if err != nil { - return nil, err + var c tabletmanagerservicepb.TabletManagerClient + var invalidator invalidatorFunc + var err error + if poolDialer, ok := client.dialer.(poolDialer); ok { + c, invalidator, err = poolDialer.dialDedicatedPool(ctx, dialPoolGroupVTOrc, tablet) + if err != nil { + return nil, err + } } - defer closer.Close() + + if c == nil { + var closer io.Closer + c, closer, err = client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + } + response, err := c.FullStatus(ctx, &tabletmanagerdatapb.FullStatusRequest{}) if err != nil { + if invalidator != nil { + invalidator() + } return nil, err } return response.Status, nil @@ -720,6 +833,32 @@ func (client *Client) DeleteVReplicationWorkflow(ctx context.Context, tablet *to return response, nil } +func (client *Client) HasVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.HasVReplicationWorkflows(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + +func (client *Client) ReadVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.ReadVReplicationWorkflows(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + func (client *Client) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { c, closer, err := client.dialer.dial(ctx, tablet) if err != nil { @@ -773,6 +912,19 @@ func (client *Client) UpdateVReplicationWorkflow(ctx context.Context, tablet *to return response, nil } +func (client *Client) UpdateVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) { + c, closer, err := client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + response, err := c.UpdateVReplicationWorkflows(ctx, request) + if err != nil { + return nil, err + } + return response, nil +} + // VDiff is part of the tmclient.TabletManagerClient interface. func (client *Client) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { log.Infof("VDiff for tablet %s, request %+v", tablet.Alias.String(), req) @@ -902,7 +1054,7 @@ func (client *Client) ResetReplicationParameters(ctx context.Context, tablet *to } // SetReplicationSource is part of the tmclient.TabletManagerClient interface. -func (client *Client) SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool) error { +func (client *Client) SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool, heartbeatInterval float64) error { c, closer, err := client.dialer.dial(ctx, tablet) if err != nil { return err @@ -915,6 +1067,7 @@ func (client *Client) SetReplicationSource(ctx context.Context, tablet *topodata WaitPosition: waitPosition, ForceStartReplication: forceStartReplication, SemiSync: semiSync, + HeartbeatInterval: heartbeatInterval, }) return err } @@ -1002,14 +1155,35 @@ func (client *Client) Backup(ctx context.Context, tablet *topodatapb.Tablet, req } // CheckThrottler is part of the tmclient.TabletManagerClient interface. +// It always tries to use a cached client via the dialer pool as this is +// called very frequently between tablets when the throttler is enabled in +// a keyspace and the overhead of creating a new gRPC connection/channel +// and dialing the other tablet every time is not practical. func (client *Client) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { - c, closer, err := client.dialer.dial(ctx, tablet) - if err != nil { - return nil, err + var c tabletmanagerservicepb.TabletManagerClient + var invalidator invalidatorFunc + var err error + if poolDialer, ok := client.dialer.(poolDialer); ok { + c, invalidator, err = poolDialer.dialDedicatedPool(ctx, dialPoolGroupThrottler, tablet) + if err != nil { + return nil, err + } } - defer closer.Close() + + if c == nil { + var closer io.Closer + c, closer, err = client.dialer.dial(ctx, tablet) + if err != nil { + return nil, err + } + defer closer.Close() + } + response, err := c.CheckThrottler(ctx, req) if err != nil { + if invalidator != nil { + invalidator() + } return nil, err } return response, nil diff --git a/go/vt/vttablet/grpctmclient/client_test.go b/go/vt/vttablet/grpctmclient/client_test.go new file mode 100644 index 00000000000..1487303163d --- /dev/null +++ b/go/vt/vttablet/grpctmclient/client_test.go @@ -0,0 +1,184 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package grpctmclient + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/connectivity" + + "vitess.io/vitess/go/netutil" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" +) + +func TestDialDedicatedPool(t *testing.T) { + ctx := context.Background() + client := NewClient() + tablet := &topodatapb.Tablet{ + Hostname: "localhost", + PortMap: map[string]int32{ + "grpc": 15991, + }, + } + addr := netutil.JoinHostPort(tablet.Hostname, int32(tablet.PortMap["grpc"])) + t.Run("dialPool", func(t *testing.T) { + poolDialer, ok := client.dialer.(poolDialer) + require.True(t, ok) + + cli, invalidator, err := poolDialer.dialDedicatedPool(ctx, dialPoolGroupThrottler, tablet) + assert.NoError(t, err) + assert.NotNil(t, invalidator) + assert.NotNil(t, cli) + _, invalidatorTwo, err := poolDialer.dialDedicatedPool(ctx, dialPoolGroupThrottler, tablet) + assert.NoError(t, err) + // Ensure that running both the invalidators doesn't cause any issues. + invalidator() + invalidatorTwo() + _, _, err = poolDialer.dialDedicatedPool(ctx, dialPoolGroupThrottler, tablet) + assert.NoError(t, err) + }) + + var cachedTmc *tmc + t.Run("maps", func(t *testing.T) { + rpcClient, ok := client.dialer.(*grpcClient) + require.True(t, ok) + assert.NotEmpty(t, rpcClient.rpcDialPoolMap) + assert.NotEmpty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) + + c := rpcClient.rpcDialPoolMap[dialPoolGroupThrottler][addr] + assert.NotNil(t, c) + assert.Contains(t, []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, c.cc.GetState()) + + cachedTmc = c + }) + + t.Run("CheckThrottler", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + req := &tabletmanagerdatapb.CheckThrottlerRequest{} + _, err := client.CheckThrottler(ctx, tablet, req) + assert.Error(t, err) + }) + t.Run("empty map", func(t *testing.T) { + rpcClient, ok := client.dialer.(*grpcClient) + require.True(t, ok) + assert.NotEmpty(t, rpcClient.rpcDialPoolMap) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) + + assert.Equal(t, connectivity.Shutdown, cachedTmc.cc.GetState()) + }) +} + +func TestDialPool(t *testing.T) { + ctx := context.Background() + client := NewClient() + tablet := &topodatapb.Tablet{ + Hostname: "localhost", + PortMap: map[string]int32{ + "grpc": 15991, + }, + } + addr := netutil.JoinHostPort(tablet.Hostname, int32(tablet.PortMap["grpc"])) + t.Run("dialPool", func(t *testing.T) { + poolDialer, ok := client.dialer.(poolDialer) + require.True(t, ok) + + cli, err := poolDialer.dialPool(ctx, tablet) + assert.NoError(t, err) + assert.NotNil(t, cli) + }) + + var cachedTmc *tmc + t.Run("maps", func(t *testing.T) { + rpcClient, ok := client.dialer.(*grpcClient) + require.True(t, ok) + assert.Empty(t, rpcClient.rpcDialPoolMap) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) + + assert.NotEmpty(t, rpcClient.rpcClientMap) + assert.NotEmpty(t, rpcClient.rpcClientMap[addr]) + + ch := rpcClient.rpcClientMap[addr] + cachedTmc = <-ch + ch <- cachedTmc + + assert.NotNil(t, cachedTmc) + assert.Contains(t, []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, cachedTmc.cc.GetState()) + }) + + t.Run("CheckThrottler", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + req := &tabletmanagerdatapb.CheckThrottlerRequest{} + _, err := client.CheckThrottler(ctx, tablet, req) + assert.Error(t, err) + }) + t.Run("post throttler maps", func(t *testing.T) { + rpcClient, ok := client.dialer.(*grpcClient) + require.True(t, ok) + + rpcClient.mu.Lock() + defer rpcClient.mu.Unlock() + + assert.NotEmpty(t, rpcClient.rpcDialPoolMap) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) + + assert.NotEmpty(t, rpcClient.rpcClientMap) + assert.NotEmpty(t, rpcClient.rpcClientMap[addr]) + + assert.Contains(t, []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, cachedTmc.cc.GetState()) + }) + t.Run("ExecuteFetchAsDba", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + req := &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{} + _, err := client.ExecuteFetchAsDba(ctx, tablet, true, req) + assert.Error(t, err) + }) + + t.Run("post ExecuteFetchAsDba maps", func(t *testing.T) { + + rpcClient, ok := client.dialer.(*grpcClient) + require.True(t, ok) + + rpcClient.mu.Lock() + defer rpcClient.mu.Unlock() + + assert.NotEmpty(t, rpcClient.rpcDialPoolMap) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupThrottler]) + assert.Empty(t, rpcClient.rpcDialPoolMap[dialPoolGroupVTOrc]) + + // The default pools are unaffected. Invalidator does not run, connections are not closed. + assert.NotEmpty(t, rpcClient.rpcClientMap) + assert.NotEmpty(t, rpcClient.rpcClientMap[addr]) + + assert.NotNil(t, cachedTmc) + assert.Contains(t, []connectivity.State{connectivity.Connecting, connectivity.TransientFailure}, cachedTmc.cc.GetState()) + }) +} diff --git a/go/vt/vttablet/grpctmserver/server.go b/go/vt/vttablet/grpctmserver/server.go index d0fe5a2cbe1..d98ddb135a1 100644 --- a/go/vt/vttablet/grpctmserver/server.go +++ b/go/vt/vttablet/grpctmserver/server.go @@ -161,12 +161,13 @@ func (s *server) ApplySchema(ctx context.Context, request *tabletmanagerdatapb.A ctx = callinfo.GRPCCallInfo(ctx) response = &tabletmanagerdatapb.ApplySchemaResponse{} scr, err := s.tm.ApplySchema(ctx, &tmutils.SchemaChange{ - SQL: request.Sql, - Force: request.Force, - AllowReplication: request.AllowReplication, - BeforeSchema: request.BeforeSchema, - AfterSchema: request.AfterSchema, - SQLMode: request.SqlMode, + SQL: request.Sql, + Force: request.Force, + AllowReplication: request.AllowReplication, + BeforeSchema: request.BeforeSchema, + AfterSchema: request.AfterSchema, + SQLMode: request.SqlMode, + DisableForeignKeyChecks: request.DisableForeignKeyChecks, }) if err == nil { response.BeforeSchema = scr.BeforeSchema @@ -227,6 +228,18 @@ func (s *server) ExecuteFetchAsDba(ctx context.Context, request *tabletmanagerda return response, nil } +func (s *server) ExecuteMultiFetchAsDba(ctx context.Context, request *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) (response *tabletmanagerdatapb.ExecuteMultiFetchAsDbaResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "ExecuteFetchAsDba", request, response, false /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.ExecuteMultiFetchAsDbaResponse{} + qrs, err := s.tm.ExecuteMultiFetchAsDba(ctx, request) + if err != nil { + return nil, vterrors.ToGRPC(err) + } + response.Results = qrs + return response, nil +} + func (s *server) ExecuteFetchAsAllPrivs(ctx context.Context, request *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (response *tabletmanagerdatapb.ExecuteFetchAsAllPrivsResponse, err error) { defer s.tm.HandleRPCPanic(ctx, "ExecuteFetchAsAllPrivs", request, response, false /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) @@ -300,7 +313,7 @@ func (s *server) PrimaryPosition(ctx context.Context, request *tabletmanagerdata } func (s *server) WaitForPosition(ctx context.Context, request *tabletmanagerdatapb.WaitForPositionRequest) (response *tabletmanagerdatapb.WaitForPositionResponse, err error) { - defer s.tm.HandleRPCPanic(ctx, "WaitForPosition", request, response, false /*verbose*/, &err) + defer s.tm.HandleRPCPanic(ctx, "WaitForPosition", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) response = &tabletmanagerdatapb.WaitForPositionResponse{} return response, s.tm.WaitForPosition(ctx, request.Position) @@ -367,6 +380,20 @@ func (s *server) DeleteVReplicationWorkflow(ctx context.Context, request *tablet return s.tm.DeleteVReplicationWorkflow(ctx, request) } +func (s *server) HasVReplicationWorkflows(ctx context.Context, request *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (response *tabletmanagerdatapb.HasVReplicationWorkflowsResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "HasVReplicationWorkflows", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.HasVReplicationWorkflowsResponse{} + return s.tm.HasVReplicationWorkflows(ctx, request) +} + +func (s *server) ReadVReplicationWorkflows(ctx context.Context, request *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (response *tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "ReadVReplicationWorkflows", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{} + return s.tm.ReadVReplicationWorkflows(ctx, request) +} + func (s *server) ReadVReplicationWorkflow(ctx context.Context, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (response *tabletmanagerdatapb.ReadVReplicationWorkflowResponse, err error) { defer s.tm.HandleRPCPanic(ctx, "ReadVReplicationWorkflow", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) @@ -396,6 +423,13 @@ func (s *server) UpdateVReplicationWorkflow(ctx context.Context, request *tablet return s.tm.UpdateVReplicationWorkflow(ctx, request) } +func (s *server) UpdateVReplicationWorkflows(ctx context.Context, request *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (response *tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, err error) { + defer s.tm.HandleRPCPanic(ctx, "UpdateVReplicationWorkflows", request, response, true /*verbose*/, &err) + ctx = callinfo.GRPCCallInfo(ctx) + response = &tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse{} + return s.tm.UpdateVReplicationWorkflows(ctx, request) +} + func (s *server) VDiff(ctx context.Context, request *tabletmanagerdatapb.VDiffRequest) (response *tabletmanagerdatapb.VDiffResponse, err error) { defer s.tm.HandleRPCPanic(ctx, "VDiff", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) @@ -426,7 +460,7 @@ func (s *server) InitPrimary(ctx context.Context, request *tabletmanagerdatapb.I } func (s *server) PopulateReparentJournal(ctx context.Context, request *tabletmanagerdatapb.PopulateReparentJournalRequest) (response *tabletmanagerdatapb.PopulateReparentJournalResponse, err error) { - defer s.tm.HandleRPCPanic(ctx, "PopulateReparentJournal", request, response, false /*verbose*/, &err) + defer s.tm.HandleRPCPanic(ctx, "PopulateReparentJournal", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) response = &tabletmanagerdatapb.PopulateReparentJournalResponse{} return response, s.tm.PopulateReparentJournal(ctx, request.TimeCreatedNs, request.ActionName, request.PrimaryAlias, request.ReplicationPosition) @@ -476,7 +510,7 @@ func (s *server) SetReplicationSource(ctx context.Context, request *tabletmanage defer s.tm.HandleRPCPanic(ctx, "SetReplicationSource", request, response, true /*verbose*/, &err) ctx = callinfo.GRPCCallInfo(ctx) response = &tabletmanagerdatapb.SetReplicationSourceResponse{} - return response, s.tm.SetReplicationSource(ctx, request.Parent, request.TimeCreatedNs, request.WaitPosition, request.ForceStartReplication, request.GetSemiSync()) + return response, s.tm.SetReplicationSource(ctx, request.Parent, request.TimeCreatedNs, request.WaitPosition, request.ForceStartReplication, request.GetSemiSync(), request.HeartbeatInterval) } func (s *server) ReplicaWasRestarted(ctx context.Context, request *tabletmanagerdatapb.ReplicaWasRestartedRequest) (response *tabletmanagerdatapb.ReplicaWasRestartedResponse, err error) { diff --git a/go/vt/vttablet/onlineddl/analysis.go b/go/vt/vttablet/onlineddl/analysis.go index 987f09124a1..970104877f2 100644 --- a/go/vt/vttablet/onlineddl/analysis.go +++ b/go/vt/vttablet/onlineddl/analysis.go @@ -19,11 +19,11 @@ package onlineddl import ( "context" "encoding/json" - "strings" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/capabilities" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/schemadiff" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" ) @@ -31,9 +31,8 @@ import ( type specialAlterOperation string const ( - instantDDLSpecialOperation specialAlterOperation = "instant-ddl" - dropRangePartitionSpecialOperation specialAlterOperation = "drop-range-partition" - addRangePartitionSpecialOperation specialAlterOperation = "add-range-partition" + instantDDLSpecialOperation specialAlterOperation = "instant-ddl" + rangePartitionSpecialOperation specialAlterOperation = "range-partition" ) type SpecialAlterPlan struct { @@ -75,7 +74,7 @@ func (e *Executor) getCreateTableStatement(ctx context.Context, tableName string if err != nil { return nil, vterrors.Wrapf(err, "in Executor.getCreateTableStatement()") } - stmt, err := sqlparser.ParseStrictDDL(showCreateTable) + stmt, err := e.env.Environment().Parser().ParseStrictDDL(showCreateTable) if err != nil { return nil, err } @@ -86,270 +85,24 @@ func (e *Executor) getCreateTableStatement(ctx context.Context, tableName string return createTable, nil } -// analyzeDropRangePartition sees if the online DDL drops a single partition in a range partitioned table -func analyzeDropRangePartition(alterTable *sqlparser.AlterTable, createTable *sqlparser.CreateTable) (*SpecialAlterPlan, error) { - // we are looking for a `ALTER TABLE
{{.ExecuteTime.Seconds}} {{.CommitTime.Seconds}} {{.StmtType}}{{.SQL | truncateQuery | unquote | cssWrappable}}{{.SQL | .Parser.TruncateForUI | unquote | cssWrappable}} {{.ShardQueries}} {{.RowsAffected}} {{.ErrorStr}}
DROP PARTITION ` statement with nothing else - if len(alterTable.AlterOptions) > 0 { - return nil, nil - } - if alterTable.PartitionOption != nil { - return nil, nil - } - spec := alterTable.PartitionSpec - if spec == nil { - return nil, nil - } - if spec.Action != sqlparser.DropAction { - return nil, nil - } - if len(spec.Names) != 1 { - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "vitess only supports dropping a single partition per query: %v", sqlparser.CanonicalString(alterTable)) - } - partitionName := spec.Names[0].String() - // OK then! - - // Now, is this query dropping the first partition in a RANGE partitioned table? - part := createTable.TableSpec.PartitionOption - if part.Type != sqlparser.RangeType { - return nil, nil - } - if len(part.Definitions) == 0 { - return nil, nil - } - var partitionDefinition *sqlparser.PartitionDefinition - var nextPartitionName string - for i, p := range part.Definitions { - if p.Name.String() == partitionName { - partitionDefinition = p - if i+1 < len(part.Definitions) { - nextPartitionName = part.Definitions[i+1].Name.String() - } - break - } - } - if partitionDefinition == nil { - // dropping a nonexistent partition. We'll let the "standard" migration execution flow deal with that. - return nil, nil - } - op := NewSpecialAlterOperation(dropRangePartitionSpecialOperation, alterTable, createTable) - op.SetDetail("partition_name", partitionName) - op.SetDetail("partition_definition", sqlparser.CanonicalString(partitionDefinition)) - op.SetDetail("next_partition_name", nextPartitionName) - return op, nil -} - -// analyzeAddRangePartition sees if the online DDL adds a partition in a range partitioned table -func analyzeAddRangePartition(alterTable *sqlparser.AlterTable, createTable *sqlparser.CreateTable) *SpecialAlterPlan { - // we are looking for a `ALTER TABLE
ADD PARTITION (PARTITION ...)` statement with nothing else - if len(alterTable.AlterOptions) > 0 { - return nil - } - if alterTable.PartitionOption != nil { - return nil - } - spec := alterTable.PartitionSpec - if spec == nil { - return nil - } - if spec.Action != sqlparser.AddAction { - return nil - } - if len(spec.Definitions) != 1 { - return nil - } - partitionDefinition := spec.Definitions[0] - partitionName := partitionDefinition.Name.String() - // OK then! - - // Now, is this query adding a partition in a RANGE partitioned table? - part := createTable.TableSpec.PartitionOption - if part.Type != sqlparser.RangeType { - return nil - } - if len(part.Definitions) == 0 { - return nil - } - op := NewSpecialAlterOperation(addRangePartitionSpecialOperation, alterTable, createTable) - op.SetDetail("partition_name", partitionName) - op.SetDetail("partition_definition", sqlparser.CanonicalString(partitionDefinition)) - return op -} - -// alterOptionAvailableViaInstantDDL chcks if the specific alter option is eligible to run via ALGORITHM=INSTANT -// reference: https://dev.mysql.com/doc/refman/8.0/en/innodb-online-ddl-operations.html -func alterOptionAvailableViaInstantDDL(alterOption sqlparser.AlterOption, createTable *sqlparser.CreateTable, capableOf mysql.CapableOf) (bool, error) { - findColumn := func(colName string) *sqlparser.ColumnDefinition { - if createTable == nil { - return nil - } - for _, col := range createTable.TableSpec.Columns { - if strings.EqualFold(colName, col.Name.String()) { - return col - } - } - return nil - } - findTableOption := func(optName string) *sqlparser.TableOption { - if createTable == nil { - return nil - } - for _, opt := range createTable.TableSpec.Options { - if strings.EqualFold(optName, opt.Name) { - return opt - } - } - return nil - } - isVirtualColumn := func(colName string) bool { - col := findColumn(colName) - if col == nil { - return false - } - if col.Type.Options == nil { - return false - } - if col.Type.Options.As == nil { - return false - } - return col.Type.Options.Storage == sqlparser.VirtualStorage - } - colStringStrippedDown := func(col *sqlparser.ColumnDefinition, stripDefault bool, stripEnum bool) string { - strippedCol := sqlparser.CloneRefOfColumnDefinition(col) - if stripDefault { - strippedCol.Type.Options.Default = nil - strippedCol.Type.Options.DefaultLiteral = false - } - if stripEnum { - strippedCol.Type.EnumValues = nil - } - return sqlparser.CanonicalString(strippedCol) - } - hasPrefix := func(vals []string, prefix []string) bool { - if len(vals) < len(prefix) { - return false - } - for i := range prefix { - if vals[i] != prefix[i] { - return false - } - } - return true - } - // Up to 8.0.26 we could only ADD COLUMN as last column - switch opt := alterOption.(type) { - case *sqlparser.ChangeColumn: - // We do not support INSTANT for renaming a column (ALTER TABLE ...CHANGE) because: - // 1. We discourage column rename - // 2. We do not produce CHANGE statements in declarative diff - // 3. The success of the operation depends on whether the column is referenced by a foreign key - // in another table. Which is a bit too much to compute here. - return false, nil - case *sqlparser.AddColumns: - if opt.First || opt.After != nil { - // not a "last" column. Only supported as of 8.0.29 - return capableOf(mysql.InstantAddDropColumnFlavorCapability) - } - // Adding a *last* column is supported in 8.0 - return capableOf(mysql.InstantAddLastColumnFlavorCapability) - case *sqlparser.DropColumn: - // not supported in COMPRESSED tables - if opt := findTableOption("ROW_FORMAT"); opt != nil { - if strings.EqualFold(opt.String, "COMPRESSED") { - return false, nil - } - } - if isVirtualColumn(opt.Name.Name.String()) { - // supported by all 8.0 versions - return capableOf(mysql.InstantAddDropVirtualColumnFlavorCapability) - } - return capableOf(mysql.InstantAddDropColumnFlavorCapability) - case *sqlparser.ModifyColumn: - if col := findColumn(opt.NewColDefinition.Name.String()); col != nil { - // Check if only diff is change of default - // we temporarily remove the DEFAULT expression (if any) from both - // table and ALTER statement, and compare the columns: if they're otherwise equal, - // then the only change can be an addition/change/removal of DEFAULT, which - // is instant-table. - tableColDefinition := colStringStrippedDown(col, true, false) - newColDefinition := colStringStrippedDown(opt.NewColDefinition, true, false) - if tableColDefinition == newColDefinition { - return capableOf(mysql.InstantChangeColumnDefaultFlavorCapability) - } - // Check if: - // 1. this an ENUM/SET - // 2. and the change is to append values to the end of the list - // 3. and the number of added values does not increase the storage size for the enum/set - // 4. while still not caring about a change in the default value - if len(col.Type.EnumValues) > 0 && len(opt.NewColDefinition.Type.EnumValues) > 0 { - // both are enum or set - if !hasPrefix(opt.NewColDefinition.Type.EnumValues, col.Type.EnumValues) { - return false, nil - } - // we know the new column definition is identical to, or extends, the old definition. - // Now validate storage: - if strings.EqualFold(col.Type.Type, "enum") { - if len(col.Type.EnumValues) <= 255 && len(opt.NewColDefinition.Type.EnumValues) > 255 { - // this increases the SET storage size (1 byte for up to 8 values, 2 bytes beyond) - return false, nil - } - } - if strings.EqualFold(col.Type.Type, "set") { - if (len(col.Type.EnumValues)+7)/8 != (len(opt.NewColDefinition.Type.EnumValues)+7)/8 { - // this increases the SET storage size (1 byte for up to 8 values, 2 bytes for 8-15, etc.) - return false, nil - } - } - // Now don't care about change of default: - tableColDefinition := colStringStrippedDown(col, true, true) - newColDefinition := colStringStrippedDown(opt.NewColDefinition, true, true) - if tableColDefinition == newColDefinition { - return capableOf(mysql.InstantExpandEnumCapability) - } - } - } - return false, nil - default: - return false, nil - } -} - -// AnalyzeInstantDDL takes declarative CreateTable and AlterTable, as well as a server version, and checks whether it is possible to run the ALTER -// using ALGORITM=INSTANT for that version. -// This function is INTENTIONALLY public, even though we do not guarantee that it will remain so. -func AnalyzeInstantDDL(alterTable *sqlparser.AlterTable, createTable *sqlparser.CreateTable, capableOf mysql.CapableOf) (*SpecialAlterPlan, error) { - capable, err := capableOf(mysql.InstantDDLFlavorCapability) +// analyzeInstantDDL takes declarative CreateTable and AlterTable, as well as a server version, and checks whether it is possible to run the ALTER +// using ALGORITHM=INSTANT for that version. +func analyzeInstantDDL(alterTable *sqlparser.AlterTable, createTable *sqlparser.CreateTable, capableOf capabilities.CapableOf) (*SpecialAlterPlan, error) { + capable, err := schemadiff.AlterTableCapableOfInstantDDL(alterTable, createTable, capableOf) if err != nil { return nil, err } if !capable { return nil, nil } - if alterTable.PartitionOption != nil { - // no INSTANT for partitions - return nil, nil - } - if alterTable.PartitionSpec != nil { - // no INSTANT for partitions - return nil, nil - } - // For the ALTER statement to qualify for ALGORITHM=INSTANT, all alter options must each qualify. - for _, alterOption := range alterTable.AlterOptions { - instantOK, err := alterOptionAvailableViaInstantDDL(alterOption, createTable, capableOf) - if err != nil { - return nil, err - } - if !instantOK { - return nil, nil - } - } op := NewSpecialAlterOperation(instantDDLSpecialOperation, alterTable, createTable) return op, nil } // analyzeSpecialAlterPlan checks if the given ALTER onlineDDL, and for the current state of affected table, // can be executed in a special way. If so, it returns with a "special plan" -func (e *Executor) analyzeSpecialAlterPlan(ctx context.Context, onlineDDL *schema.OnlineDDL, capableOf mysql.CapableOf) (*SpecialAlterPlan, error) { - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) +func (e *Executor) analyzeSpecialAlterPlan(ctx context.Context, onlineDDL *schema.OnlineDDL, capableOf capabilities.CapableOf) (*SpecialAlterPlan, error) { + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err != nil { return nil, err } @@ -365,21 +118,30 @@ func (e *Executor) analyzeSpecialAlterPlan(ctx context.Context, onlineDDL *schem } // special plans which support reverts are trivially desired: - // special plans which do not support reverts are flag protected: - if onlineDDL.StrategySetting().IsFastRangeRotationFlag() { - op, err := analyzeDropRangePartition(alterTable, createTable) + // + // - nothing here thus far + // + // special plans that do not support revert, but are always desired over Online DDL, + // hence not flag protected: + { + // Dropping a range partition has to run directly. It is incorrect to run with Online DDL + // because the table copy will make the second-oldest partition "adopt" the rows which + // we really want purged from the oldest partition. + // Adding a range partition _can_ technically run with Online DDL, but it is wasteful + // and pointless. The user fully expects the operation to run immediately and without + // any copy of data. + isRangeRotation, err := schemadiff.AlterTableRotatesRangePartition(createTable, alterTable) if err != nil { return nil, err } - if op != nil { - return op, nil - } - if op := analyzeAddRangePartition(alterTable, createTable); op != nil { + if isRangeRotation { + op := NewSpecialAlterOperation(rangePartitionSpecialOperation, alterTable, createTable) return op, nil } } + // special plans which do not support reverts are flag protected: if onlineDDL.StrategySetting().IsPreferInstantDDL() { - op, err := AnalyzeInstantDDL(alterTable, createTable, capableOf) + op, err := analyzeInstantDDL(alterTable, createTable, capableOf) if err != nil { return nil, err } diff --git a/go/vt/vttablet/onlineddl/analysis_test.go b/go/vt/vttablet/onlineddl/analysis_test.go index afaa3e8aa1f..819df415927 100644 --- a/go/vt/vttablet/onlineddl/analysis_test.go +++ b/go/vt/vttablet/onlineddl/analysis_test.go @@ -81,7 +81,7 @@ func TestAnalyzeInstantDDL(t *testing.T) { version: "8.0.21", create: "create table t(id int, i1 int not null, i2 int generated always as (i1 + 1) stored, primary key(id))", alter: "alter table t drop column i2", - instant: false, + instant: true, }, { // add mid column @@ -208,21 +208,22 @@ func TestAnalyzeInstantDDL(t *testing.T) { instant: false, }, } + parser := sqlparser.NewTestParser() for _, tc := range tt { name := tc.version + " " + tc.create t.Run(name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.create) + stmt, err := parser.ParseStrictDDL(tc.create) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - stmt, err = sqlparser.ParseStrictDDL(tc.alter) + stmt, err = parser.ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) - _, capableOf, _ := mysql.GetFlavor(tc.version, nil) - plan, err := AnalyzeInstantDDL(alterTable, createTable, capableOf) + capableOf := mysql.ServerVersionCapableOf(tc.version) + plan, err := analyzeInstantDDL(alterTable, createTable, capableOf) if tc.expectError { assert.Error(t, err) } else { diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index 8a3cf61348b..42b2a4f827b 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -24,7 +24,6 @@ import ( "context" "errors" "fmt" - "math" "os" "path" "strconv" @@ -39,10 +38,12 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/syscallutil" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -76,7 +77,7 @@ var ( ) var ( - // fixCompletedTimestampDone fixes a nil `completed_tiemstamp` columns, see + // fixCompletedTimestampDone fixes a nil `completed_timestamp` columns, see // https://github.com/vitessio/vitess/issues/13927 // The fix is in release-18.0 // TODO: remove in release-19.0 @@ -88,12 +89,16 @@ var acceptableDropTableIfExistsErrorCodes = []sqlerror.ErrorCode{sqlerror.ERCant var copyAlgorithm = sqlparser.AlgorithmValue(sqlparser.CopyStr) var ( - ghostOverridePath string - ptOSCOverridePath string + ghostBinaryPath = "gh-ost" + ptOSCBinaryPath = "/usr/bin/pt-online-schema-change" migrationCheckInterval = 1 * time.Minute retainOnlineDDLTables = 24 * time.Hour defaultCutOverThreshold = 10 * time.Second maxConcurrentOnlineDDLs = 256 + + migrationNextCheckIntervals = []time.Duration{1 * time.Second, 5 * time.Second, 10 * time.Second, 20 * time.Second} + maxConstraintNameLength = 64 + cutoverIntervals = []time.Duration{0, 1 * time.Minute, 5 * time.Minute, 10 * time.Minute, 30 * time.Minute} ) func init() { @@ -102,16 +107,13 @@ func init() { } func registerOnlineDDLFlags(fs *pflag.FlagSet) { - fs.StringVar(&ghostOverridePath, "gh-ost-path", ghostOverridePath, "override default gh-ost binary full path") - fs.StringVar(&ptOSCOverridePath, "pt-osc-path", ptOSCOverridePath, "override default pt-online-schema-change binary full path") + fs.StringVar(&ghostBinaryPath, "gh-ost-path", ghostBinaryPath, "override default gh-ost binary full path") + fs.StringVar(&ptOSCBinaryPath, "pt-osc-path", ptOSCBinaryPath, "override default pt-online-schema-change binary full path") fs.DurationVar(&migrationCheckInterval, "migration_check_interval", migrationCheckInterval, "Interval between migration checks") fs.DurationVar(&retainOnlineDDLTables, "retain_online_ddl_tables", retainOnlineDDLTables, "How long should vttablet keep an old migrated table before purging it") fs.IntVar(&maxConcurrentOnlineDDLs, "max_concurrent_online_ddl", maxConcurrentOnlineDDLs, "Maximum number of online DDL changes that may run concurrently") } -var migrationNextCheckIntervals = []time.Duration{1 * time.Second, 5 * time.Second, 10 * time.Second, 20 * time.Second} -var maxConstraintNameLength = 64 - const ( maxPasswordLength = 32 // MySQL's *replication* password may not exceed 32 characters staleMigrationMinutes = 180 @@ -176,6 +178,7 @@ type Executor struct { ts *topo.Server lagThrottler *throttle.Throttler toggleBufferTableFunc func(cancelCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool) + requestGCChecksFunc func() tabletAlias *topodatapb.TabletAlias keyspace string @@ -215,30 +218,13 @@ func newCancellableMigration(uuid string, message string) *cancellableMigration return &cancellableMigration{uuid: uuid, message: message} } -// GhostBinaryFileName returns the full path+name of the gh-ost binary -func GhostBinaryFileName() (fileName string, isOverride bool) { - if ghostOverridePath != "" { - return ghostOverridePath, true - } - return path.Join(os.TempDir(), "vt-gh-ost"), false -} - -// PTOSCFileName returns the full path+name of the pt-online-schema-change binary -// Note that vttablet does not include pt-online-schema-change -func PTOSCFileName() (fileName string, isOverride bool) { - if ptOSCOverridePath != "" { - return ptOSCOverridePath, true - } - return "/usr/bin/pt-online-schema-change", false -} - // newGCTableRetainTime returns the time until which a new GC table is to be retained func newGCTableRetainTime() time.Time { return time.Now().UTC().Add(retainOnlineDDLTables) } // getMigrationCutOverThreshold returns the cut-over threshold for the given migration. The migration's -// DDL Strategy may excplicitly set the threshold; otherwise, we return the default cut-over threshold. +// DDL Strategy may explicitly set the threshold; otherwise, we return the default cut-over threshold. func getMigrationCutOverThreshold(onlineDDL *schema.OnlineDDL) time.Duration { if threshold, _ := onlineDDL.StrategySetting().CutOverThreshold(); threshold != 0 { return threshold @@ -251,6 +237,7 @@ func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *top lagThrottler *throttle.Throttler, tabletTypeFunc func() topodatapb.TabletType, toggleBufferTableFunc func(cancelCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool), + requestGCChecksFunc func(), ) *Executor { // sanitize flags if maxConcurrentOnlineDDLs < 1 { @@ -261,13 +248,14 @@ func NewExecutor(env tabletenv.Env, tabletAlias *topodatapb.TabletAlias, ts *top tabletAlias: tabletAlias.CloneVT(), pool: connpool.NewPool(env, "OnlineDDLExecutorPool", tabletenv.ConnPoolConfig{ - Size: databasePoolSize, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: databasePoolSize, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), tabletTypeFunc: tabletTypeFunc, ts: ts, lagThrottler: lagThrottler, toggleBufferTableFunc: toggleBufferTableFunc, + requestGCChecksFunc: requestGCChecksFunc, ticks: timer.NewTimer(migrationCheckInterval), // Gracefully return an error if any caller tries to execute // a query before the executor has been fully opened. @@ -286,7 +274,7 @@ func (e *Executor) executeQuery(ctx context.Context, query string) (result *sqlt } defer conn.Recycle() - return conn.Conn.Exec(ctx, query, math.MaxInt32, true) + return conn.Conn.Exec(ctx, query, -1, true) } func (e *Executor) executeQueryWithSidecarDBReplacement(ctx context.Context, query string) (result *sqltypes.Result, err error) { @@ -299,11 +287,11 @@ func (e *Executor) executeQueryWithSidecarDBReplacement(ctx context.Context, que defer conn.Recycle() // Replace any provided sidecar DB qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + uq, err := e.env.Environment().Parser().ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } - return conn.Conn.Exec(ctx, uq, math.MaxInt32, true) + return conn.Conn.Exec(ctx, uq, -1, true) } // TabletAliasString returns tablet alias as string (duh) @@ -387,7 +375,7 @@ func (e *Executor) matchesShards(commaDelimitedShards string) bool { } // countOwnedRunningMigrations returns an estimate of current count of running migrations; this is -// normally an accurate number, but can be inexact because the exdcutor peridocially reviews +// normally an accurate number, but can be inexact because the executor periodically reviews // e.ownedRunningMigrations and adds/removes migrations based on actual migration state. func (e *Executor) countOwnedRunningMigrations() (count int) { e.ownedRunningMigrations.Range(func(_, val any) bool { @@ -408,7 +396,7 @@ func (e *Executor) allowConcurrentMigration(onlineDDL *schema.OnlineDDL) (action } var err error - action, err = onlineDDL.GetAction() + action, err = onlineDDL.GetAction(e.env.Environment().Parser()) if err != nil { return action, false } @@ -546,7 +534,7 @@ func (e *Executor) readMySQLVariables(ctx context.Context) (variables *mysqlVari } // createOnlineDDLUser creates a gh-ost or pt-osc user account with all -// neccessary privileges and with a random password +// necessary privileges and with a random password func (e *Executor) createOnlineDDLUser(ctx context.Context) (password string, err error) { conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaConnector()) if err != nil { @@ -641,6 +629,21 @@ func (e *Executor) executeDirectly(ctx context.Context, onlineDDL *schema.Online } _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusRunning, false, progressPctStarted, etaSecondsUnknown, rowsCopiedUnknown, emptyHint) + if onlineDDL.StrategySetting().IsAllowForeignKeysFlag() { + // Foreign key support is curently "unsafe". We further put the burden on the user + // by disabling foreign key checks. With this, the user is able to create cyclic + // foreign key references (e.g. t1<->t2) without going through the trouble of + // CREATE TABLE t1->CREATE TABLE t2->ALTER TABLE t1 ADD FOREIGN KEY ... REFERENCES ts + // Grab current sql_mode value + if _, err := conn.ExecuteFetch(`set @vt_onlineddl_foreign_key_checks=@@foreign_key_checks`, 0, false); err != nil { + return false, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "could not read foreign_key_checks: %v", err) + } + _, err = conn.ExecuteFetch("SET foreign_key_checks=0", 0, false) + if err != nil { + return false, err + } + defer conn.ExecuteFetch("SET foreign_key_checks=@vt_onlineddl_foreign_key_checks", 0, false) + } _, err = conn.ExecuteFetch(onlineDDL.SQL, 0, false) if err != nil { @@ -765,8 +768,100 @@ func (e *Executor) terminateVReplMigration(ctx context.Context, uuid string) err return nil } +// killTableLockHoldersAndAccessors kills any active queries using the given table, and also kills +// connections with open transactions, holding locks on the table. +// This is done on a best-effort basis, by issuing `KILL` and `KILL QUERY` commands. As MySQL goes, +// it is not guaranteed that the queries/transactions will terminate in a timely manner. +func (e *Executor) killTableLockHoldersAndAccessors(ctx context.Context, tableName string) error { + log.Infof("killTableLockHoldersAndAccessors: %v", tableName) + conn, err := dbconnpool.NewDBConnection(ctx, e.env.Config().DB.DbaWithDB()) + if err != nil { + return err + } + defer conn.Close() + + { + // First, let's look at PROCESSLIST for queries that _might_ be operating on our table. This may have + // plenty false positives as we're simply looking for the table name as a query substring. + likeVariable := "%" + tableName + "%" + query, err := sqlparser.ParseAndBind(sqlFindProcessByInfo, sqltypes.StringBindVariable(likeVariable)) + if err != nil { + return err + } + rs, err := conn.Conn.ExecuteFetch(query, -1, true) + if err != nil { + return err + } + + log.Infof("killTableLockHoldersAndAccessors: found %v potential queries", len(rs.Rows)) + // Now that we have some list of queries, we actually parse them to find whether the query actually references our table: + for _, row := range rs.Named().Rows { + threadId := row.AsInt64("id", 0) + infoQuery := row.AsString("info", "") + stmt, err := e.env.Environment().Parser().Parse(infoQuery) + if err != nil { + log.Error(vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unable to parse processlist Info query: %v", infoQuery)) + continue + } + queryUsesTable := false + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.TableName: + if node.Name.String() == tableName { + queryUsesTable = true + return false, nil + } + case *sqlparser.AliasedTableExpr: + if alasedTableName, ok := node.Expr.(sqlparser.TableName); ok { + if alasedTableName.Name.String() == tableName { + queryUsesTable = true + return false, nil + } + } + } + return true, nil + }, stmt) + + if queryUsesTable { + log.Infof("killTableLockHoldersAndAccessors: killing query %v: %.100s", threadId, infoQuery) + killQuery := fmt.Sprintf("KILL QUERY %d", threadId) + if _, err := conn.Conn.ExecuteFetch(killQuery, 1, false); err != nil { + log.Error(vterrors.Errorf(vtrpcpb.Code_ABORTED, "could not kill query %v. Ignoring", threadId)) + } + } + } + } + capableOf := mysql.ServerVersionCapableOf(conn.ServerVersion) + capable, err := capableOf(capabilities.PerformanceSchemaDataLocksTableCapability) + if err != nil { + return err + } + if capable { + { + // Kill connections that have open transactions locking the table. These potentially (probably?) are not + // actively running a query on our table. They're doing other things while holding locks on our table. + query, err := sqlparser.ParseAndBind(sqlProcessWithLocksOnTable, sqltypes.StringBindVariable(tableName)) + if err != nil { + return err + } + rs, err := conn.Conn.ExecuteFetch(query, -1, true) + if err != nil { + return err + } + log.Infof("killTableLockHoldersAndAccessors: found %v locking transactions", len(rs.Rows)) + for _, row := range rs.Named().Rows { + threadId := row.AsInt64("trx_mysql_thread_id", 0) + log.Infof("killTableLockHoldersAndAccessors: killing connection %v with transaction on table", threadId) + killConnection := fmt.Sprintf("KILL %d", threadId) + _, _ = conn.Conn.ExecuteFetch(killConnection, 1, false) + } + } + } + return nil +} + // cutOverVReplMigration stops vreplication, then removes the _vt.vreplication entry for the given migration -func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) error { +func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream, shouldForceCutOver bool) error { if err := e.incrementCutoverAttempts(ctx, s.workflow); err != nil { return err } @@ -775,7 +870,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er defer tmClient.Close() // sanity checks: - vreplTable, err := getVreplTable(ctx, s) + vreplTable, err := getVreplTable(s) if err != nil { return err } @@ -844,7 +939,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er } // This was a best effort optimization. Possibly the error is not nil. Which means we // still have a record of the sentry table, and gcArtifacts() will still be able to take - // care of it in the futre. + // care of it in the future. }() parsed := sqlparser.BuildParsedQuery(sqlCreateSentryTable, sentryTableName) if _, err := e.execQuery(ctx, parsed.Query); err != nil { @@ -879,7 +974,10 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er defer renameConn.Recycle() defer func() { if !renameWasSuccessful { - renameConn.Conn.Kill("premature exit while renaming tables", 0) + err := renameConn.Conn.Kill("premature exit while renaming tables", 0) + if err != nil { + log.Warningf("Failed to kill connection being used to rename tables in OnlineDDL migration %s: %v", onlineDDL.UUID, err) + } } }() // See if backend MySQL server supports 'rename_table_preserve_foreign_key' variable @@ -890,13 +988,11 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er if preserveFKSupported { // This code is only applicable when MySQL supports the 'rename_table_preserve_foreign_key' variable. This variable // does not exist in vanilla MySQL. - // See https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced - // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps1. - if _, err := renameConn.Conn.Exec(ctx, sqlEnablePreserveForeignKey, 1, false); err != nil { - return err - } - log.Infof("@@rename_table_preserve_foreign_key enabled") - defer renameConn.Conn.Exec(ctx, sqlDisablePreserveForeignKey, 1, false) + // See + // - https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced + // - https://github.com/planetscale/mysql-server/commit/c2f1344a6863518d749f2eb01a4c74ca08a5b889 + // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps3. + log.Infof("@@rename_table_preserve_foreign_key supported") } renameQuery := sqlparser.BuildParsedQuery(sqlSwapTables, onlineDDL.Table, sentryTableName, vreplTable, onlineDDL.Table, sentryTableName, vreplTable) @@ -904,7 +1000,7 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er waitForRenameProcess := func() error { // This function waits until it finds the RENAME TABLE... query running in MySQL's PROCESSLIST, or until timeout // The function assumes that one of the renamed tables is locked, thus causing the RENAME to block. If nothing - // is locked, then the RENAME will be near-instantaneious and it's unlikely that the function will find it. + // is locked, then the RENAME will be near-instantaneous and it's unlikely that the function will find it. renameWaitCtx, cancel := context.WithTimeout(ctx, migrationCutOverThreshold) defer cancel() @@ -974,6 +1070,12 @@ func (e *Executor) cutOverVReplMigration(ctx context.Context, s *VReplStream) er e.updateMigrationStage(ctx, onlineDDL.UUID, "graceful wait for buffering") time.Sleep(100 * time.Millisecond) + if shouldForceCutOver { + if err := e.killTableLockHoldersAndAccessors(ctx, onlineDDL.Table); err != nil { + return err + } + } + if isVreplicationTestSuite { // The testing suite may inject queries internally from the server via a recurring EVENT. // Those queries are unaffected by query rules (ACLs) because they don't go through Vitess. @@ -1187,7 +1289,7 @@ func (e *Executor) newConstraintName(onlineDDL *schema.OnlineDDL, constraintType // validateAndEditCreateTableStatement inspects the CreateTable AST and does the following: // - extra validation (no FKs for now...) // - generate new and unique names for all constraints (CHECK and FK; yes, why not handle FK names; even as we don't support FKs today, we may in the future) -func (e *Executor) validateAndEditCreateTableStatement(ctx context.Context, onlineDDL *schema.OnlineDDL, createTable *sqlparser.CreateTable) (constraintMap map[string]string, err error) { +func (e *Executor) validateAndEditCreateTableStatement(onlineDDL *schema.OnlineDDL, createTable *sqlparser.CreateTable) (constraintMap map[string]string, err error) { constraintMap = map[string]string{} hashExists := map[string]bool{} @@ -1214,7 +1316,12 @@ func (e *Executor) validateAndEditCreateTableStatement(ctx context.Context, onli // validateAndEditAlterTableStatement inspects the AlterTable statement and: // - modifies any CONSTRAINT name according to given name mapping // - explode ADD FULLTEXT KEY into multiple statements -func (e *Executor) validateAndEditAlterTableStatement(ctx context.Context, onlineDDL *schema.OnlineDDL, alterTable *sqlparser.AlterTable, constraintMap map[string]string) (alters []*sqlparser.AlterTable, err error) { +func (e *Executor) validateAndEditAlterTableStatement(capableOf capabilities.CapableOf, onlineDDL *schema.OnlineDDL, alterTable *sqlparser.AlterTable, constraintMap map[string]string) (alters []*sqlparser.AlterTable, err error) { + capableOfInstantDDLXtrabackup, err := capableOf(capabilities.InstantDDLXtrabackupCapability) + if err != nil { + return nil, err + } + hashExists := map[string]bool{} validateWalk := func(node sqlparser.SQLNode) (kontinue bool, err error) { switch node := node.(type) { @@ -1246,8 +1353,10 @@ func (e *Executor) validateAndEditAlterTableStatement(ctx context.Context, onlin opt := alterTable.AlterOptions[i] switch opt := opt.(type) { case sqlparser.AlgorithmValue: - // we do not pass ALGORITHM. We choose our own ALGORITHM. - continue + if !capableOfInstantDDLXtrabackup { + // we do not pass ALGORITHM. We choose our own ALGORITHM. + continue + } case *sqlparser.AddIndexDefinition: if opt.IndexDefinition.Info.Type == sqlparser.IndexTypeFullText { countAddFullTextStatements++ @@ -1256,7 +1365,10 @@ func (e *Executor) validateAndEditAlterTableStatement(ctx context.Context, onlin // in the same statement extraAlterTable := &sqlparser.AlterTable{ Table: alterTable.Table, - AlterOptions: []sqlparser.AlterOption{opt, copyAlgorithm}, + AlterOptions: []sqlparser.AlterOption{opt}, + } + if !capableOfInstantDDLXtrabackup { + extraAlterTable.AlterOptions = append(extraAlterTable.AlterOptions, copyAlgorithm) } alters = append(alters, extraAlterTable) continue @@ -1266,7 +1378,9 @@ func (e *Executor) validateAndEditAlterTableStatement(ctx context.Context, onlin redactedOptions = append(redactedOptions, opt) } alterTable.AlterOptions = redactedOptions - alterTable.AlterOptions = append(alterTable.AlterOptions, copyAlgorithm) + if !capableOfInstantDDLXtrabackup { + alterTable.AlterOptions = append(alterTable.AlterOptions, copyAlgorithm) + } return alters, nil } @@ -1280,7 +1394,7 @@ func (e *Executor) duplicateCreateTable(ctx context.Context, onlineDDL *schema.O constraintMap map[string]string, err error, ) { - stmt, err := sqlparser.ParseStrictDDL(originalShowCreateTable) + stmt, err := e.env.Environment().Parser().ParseStrictDDL(originalShowCreateTable) if err != nil { return nil, nil, nil, err } @@ -1292,7 +1406,7 @@ func (e *Executor) duplicateCreateTable(ctx context.Context, onlineDDL *schema.O newCreateTable.SetTable(newCreateTable.GetTable().Qualifier.CompliantName(), newTableName) // manipulate CreateTable statement: take care of constraints names which have to be // unique across the schema - constraintMap, err = e.validateAndEditCreateTableStatement(ctx, onlineDDL, newCreateTable) + constraintMap, err = e.validateAndEditCreateTableStatement(onlineDDL, newCreateTable) if err != nil { return nil, nil, nil, err } @@ -1301,7 +1415,7 @@ func (e *Executor) duplicateCreateTable(ctx context.Context, onlineDDL *schema.O // createDuplicateTableLike creates the table named by `newTableName` in the likeness of onlineDDL.Table // This function emulates MySQL's `CREATE TABLE LIKE ...` statement. The difference is that this function takes control over the generated CONSTRAINT names, -// if any, such that they are detrministic across shards, as well as preserve original names where possible. +// if any, such that they are deterministic across shards, as well as preserve original names where possible. func (e *Executor) createDuplicateTableLike(ctx context.Context, newTableName string, onlineDDL *schema.OnlineDDL, conn *dbconnpool.DBConnection) ( originalShowCreateTable string, constraintMap map[string]string, @@ -1337,7 +1451,10 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online return v, err } - vreplTableName := fmt.Sprintf("_%s_%s_vrepl", onlineDDL.UUID, ReadableTimestamp()) + vreplTableName, err := schema.GenerateInternalTableName(schema.InternalTableVreplicationHint.String(), onlineDDL.UUID, time.Now()) + if err != nil { + return v, err + } if err := e.updateArtifacts(ctx, onlineDDL.UUID, vreplTableName); err != nil { return v, err } @@ -1346,7 +1463,7 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online return nil, err } - stmt, err := sqlparser.ParseStrictDDL(onlineDDL.SQL) + stmt, err := e.env.Environment().Parser().ParseStrictDDL(onlineDDL.SQL) if err != nil { return nil, err } @@ -1357,7 +1474,9 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online // ALTER TABLE should apply to the vrepl table alterTable.SetTable(alterTable.GetTable().Qualifier.CompliantName(), vreplTableName) // Also, change any constraint names: - alters, err := e.validateAndEditAlterTableStatement(ctx, onlineDDL, alterTable, constraintMap) + + capableOf := mysql.ServerVersionCapableOf(conn.ServerVersion) + alters, err := e.validateAndEditAlterTableStatement(capableOf, onlineDDL, alterTable, constraintMap) if err != nil { return v, err } @@ -1373,7 +1492,7 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online return v, err } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, originalShowCreateTable, vreplShowCreateTable, onlineDDL.SQL, onlineDDL.StrategySetting().IsAnalyzeTableFlag()) + v = NewVRepl(e.env.Environment(), onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, originalShowCreateTable, vreplShowCreateTable, onlineDDL.SQL, onlineDDL.StrategySetting().IsAnalyzeTableFlag()) return v, nil } @@ -1419,7 +1538,7 @@ func (e *Executor) initVreplicationRevertMigration(ctx context.Context, onlineDD return nil, err } - vreplTableName, err := getVreplTable(ctx, revertStream) + vreplTableName, err := getVreplTable(revertStream) if err != nil { return nil, err } @@ -1427,7 +1546,7 @@ func (e *Executor) initVreplicationRevertMigration(ctx context.Context, onlineDD if err := e.updateArtifacts(ctx, onlineDDL.UUID, vreplTableName); err != nil { return v, err } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "", "", "", false) + v = NewVRepl(e.env.Environment(), onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "", "", "", false) v.pos = revertStream.pos return v, nil } @@ -1565,7 +1684,6 @@ func (e *Executor) ExecuteWithGhost(ctx context.Context, onlineDDL *schema.Onlin log.Errorf("Error creating temporary directory: %+v", err) return err } - binaryFileName, _ := GhostBinaryFileName() credentialsConfigFileContent := fmt.Sprintf(`[client] user=%s password=${ONLINE_DDL_PASSWORD} @@ -1587,7 +1705,7 @@ export ONLINE_DDL_PASSWORD exit_code=$? grep -o '\bFATAL\b.*' "$ghost_log_path/$ghost_log_file" | tail -1 > "$ghost_log_path/$ghost_log_failure_file" exit $exit_code - `, tempDir, migrationLogFileName, migrationFailureFileName, binaryFileName, + `, tempDir, migrationLogFileName, migrationFailureFileName, ghostBinaryPath, ) wrapperScriptFileName, err := createTempScript(tempDir, "gh-ost-wrapper.sh", wrapperScriptContent) if err != nil { @@ -1656,7 +1774,7 @@ exit $exit_code runGhost := func(execute bool) error { alterOptions := e.parseAlterOptions(ctx, onlineDDL) - forceTableNames := fmt.Sprintf("%s_%s", onlineDDL.UUID, ReadableTimestamp()) + forceTableNames := fmt.Sprintf("%s_%s", onlineDDL.UUID, schema.ReadableTimestamp()) if err := e.updateArtifacts(ctx, onlineDDL.UUID, fmt.Sprintf("_%s_gho", forceTableNames), @@ -1780,7 +1898,6 @@ func (e *Executor) ExecuteWithPTOSC(ctx context.Context, onlineDDL *schema.Onlin return err } - binaryFileName, _ := PTOSCFileName() wrapperScriptContent := fmt.Sprintf(`#!/bin/bash pt_log_path="%s" pt_log_file="%s" @@ -1789,7 +1906,7 @@ mkdir -p "$pt_log_path" export MYSQL_PWD %s "$@" > "$pt_log_path/$pt_log_file" 2>&1 - `, tempDir, migrationLogFileName, binaryFileName, + `, tempDir, migrationLogFileName, ptOSCBinaryPath, ) wrapperScriptFileName, err := createTempScript(tempDir, "pt-online-schema-change-wrapper.sh", wrapperScriptContent) if err != nil { @@ -1880,14 +1997,14 @@ export MYSQL_PWD // The following sleep() is temporary and artificial. Because we create a new user for this // migration, and because we throttle by replicas, we need to wait for the replicas to be // caught up with the new user creation. Otherwise, the OSC tools will fail connecting to the replicas... - // Once we have a built in throttling service , we will no longe rneed to have the OSC tools probe the + // Once we have a built in throttling service , we will no longer need to have the OSC tools probe the // replicas. Instead, they will consult with our throttling service. // TODO(shlomi): replace/remove this when we have a proper throttling solution time.Sleep(time.Second) runPTOSC := func(execute bool) error { os.Setenv("MYSQL_PWD", onlineDDLPassword) - newTableName := fmt.Sprintf("_%s_%s_new", onlineDDL.UUID, ReadableTimestamp()) + newTableName := fmt.Sprintf("_%s_%s_new", onlineDDL.UUID, schema.ReadableTimestamp()) if err := e.updateArtifacts(ctx, onlineDDL.UUID, fmt.Sprintf("_%s_old", onlineDDL.Table), @@ -2041,10 +2158,10 @@ func (e *Executor) terminateMigration(ctx context.Context, onlineDDL *schema.Onl foundRunning = true // Because pt-osc doesn't offer much control, we take a brute force approach to killing it, // revoking its privileges, and cleaning up its triggers. - if err := syscall.Kill(pid, syscall.SIGTERM); err != nil { + if err := syscallutil.Kill(pid, syscall.SIGTERM); err != nil { return foundRunning, nil } - if err := syscall.Kill(pid, syscall.SIGKILL); err != nil { + if err := syscallutil.Kill(pid, syscall.SIGKILL); err != nil { return foundRunning, nil } if err := e.dropOnlineDDLUser(ctx); err != nil { @@ -2229,7 +2346,7 @@ func (e *Executor) UnthrottleAllMigrations(ctx context.Context) (result *sqltype return emptyResult, nil } -// scheduleNextMigration attemps to schedule a single migration to run next. +// scheduleNextMigration attempts to schedule a single migration to run next. // possibly there are migrations to run. // The effect of this function is to move a migration from 'queued' state to 'ready' state, is all. func (e *Executor) scheduleNextMigration(ctx context.Context) error { @@ -2257,7 +2374,7 @@ func (e *Executor) scheduleNextMigration(ctx context.Context) error { if !readyToComplete { // see if we need to update ready_to_complete if isImmediateOperation { - // Whether postponsed or not, CREATE and DROP operations, as well as VIEW operations, + // Whether postponed or not, CREATE and DROP operations, as well as VIEW operations, // are inherently "ready to complete" because their operation is immediate. if err := e.updateMigrationReadyToComplete(ctx, uuid, true); err != nil { return err @@ -2297,7 +2414,7 @@ func (e *Executor) reviewEmptyTableRevertMigrations(ctx context.Context, onlineD // Try to update table name and ddl_action // Failure to do so fails the migration - revertUUID, err := onlineDDL.GetRevertUUID() + revertUUID, err := onlineDDL.GetRevertUUID(e.env.Environment().Parser()) if err != nil { return false, e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot analyze revert UUID for revert migration %s: %v", onlineDDL.UUID, err)) } @@ -2340,7 +2457,14 @@ func (e *Executor) reviewEmptyTableRevertMigrations(ctx context.Context, onlineD // Non immediate operations are: // - A gh-ost migration // - A vitess (vreplication) migration -func (e *Executor) reviewImmediateOperations(ctx context.Context, capableOf mysql.CapableOf, onlineDDL *schema.OnlineDDL, ddlAction string, isRevert bool, isView bool) (bool, error) { +func (e *Executor) reviewImmediateOperations( + ctx context.Context, + capableOf capabilities.CapableOf, + onlineDDL *schema.OnlineDDL, + ddlAction string, + isRevert bool, + isView bool, +) (bool, error) { switch ddlAction { case sqlparser.CreateStr, sqlparser.DropStr: return true, nil @@ -2364,9 +2488,9 @@ func (e *Executor) reviewImmediateOperations(ctx context.Context, capableOf mysq // reviewQueuedMigration investigates a single migration found in `queued` state. // It analyzes whether the migration can & should be fulfilled immediately (e.g. via INSTANT DDL or just because it's a CREATE or DROP), -// or backfils necessary information if it's a REVERT. +// or backfills necessary information if it's a REVERT. // If all goes well, it sets `reviewed_timestamp` which then allows the state machine to schedule the migration. -func (e *Executor) reviewQueuedMigration(ctx context.Context, uuid string, capableOf mysql.CapableOf) error { +func (e *Executor) reviewQueuedMigration(ctx context.Context, uuid string, capableOf capabilities.CapableOf) error { onlineDDL, row, err := e.readMigration(ctx, uuid) if err != nil { return err @@ -2430,7 +2554,7 @@ func (e *Executor) reviewQueuedMigrations(ctx context.Context) error { return err } defer conn.Close() - _, capableOf, _ := mysql.GetFlavor(conn.ServerVersion, nil) + capableOf := mysql.ServerVersionCapableOf(conn.ServerVersion) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -2451,7 +2575,7 @@ func (e *Executor) reviewQueuedMigrations(ctx context.Context) error { func (e *Executor) validateMigrationRevertible(ctx context.Context, revertMigration *schema.OnlineDDL, revertingMigrationUUID string) (err error) { // Validation: migration to revert exists and is in complete state - action, actionStr, err := revertMigration.GetActionStr() + action, actionStr, err := revertMigration.GetActionStr(e.env.Environment().Parser()) if err != nil { return err } @@ -2520,7 +2644,7 @@ func (e *Executor) validateMigrationRevertible(ctx context.Context, revertMigrat // - what type of migration we're reverting? (CREATE/DROP/ALTER) // - revert appropriately to the type of migration func (e *Executor) executeRevert(ctx context.Context, onlineDDL *schema.OnlineDDL) (err error) { - revertUUID, err := onlineDDL.GetRevertUUID() + revertUUID, err := onlineDDL.GetRevertUUID(e.env.Environment().Parser()) if err != nil { return fmt.Errorf("cannot run a revert migration %v: %+v", onlineDDL.UUID, err) } @@ -2633,7 +2757,7 @@ func (e *Executor) executeRevert(ctx context.Context, onlineDDL *schema.OnlineDD func (e *Executor) evaluateDeclarativeDiff(ctx context.Context, onlineDDL *schema.OnlineDDL) (diff schemadiff.EntityDiff, err error) { // Modify the CREATE TABLE statement to indicate a different, made up table name, known as the "comparison table" - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err != nil { return nil, err } @@ -2687,12 +2811,15 @@ func (e *Executor) evaluateDeclarativeDiff(ctx context.Context, onlineDDL *schem if newShowCreateTable == "" { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected: cannot find table or view even as it was just created: %v", onlineDDL.Table) } - hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementApplyHigher} + senv := schemadiff.NewEnv(e.env.Environment(), e.env.Environment().CollationEnv().DefaultConnectionCharset()) + hints := &schemadiff.DiffHints{ + AutoIncrementStrategy: schemadiff.AutoIncrementApplyHigher, + } switch ddlStmt.(type) { case *sqlparser.CreateTable: - diff, err = schemadiff.DiffCreateTablesQueries(existingShowCreateTable, newShowCreateTable, hints) + diff, err = schemadiff.DiffCreateTablesQueries(senv, existingShowCreateTable, newShowCreateTable, hints) case *sqlparser.CreateView: - diff, err = schemadiff.DiffCreateViewsQueries(existingShowCreateTable, newShowCreateTable, hints) + diff, err = schemadiff.DiffCreateViewsQueries(senv, existingShowCreateTable, newShowCreateTable, hints) default: return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expected CREATE TABLE or CREATE VIEW in online DDL statement: %v", onlineDDL.SQL) } @@ -2702,7 +2829,7 @@ func (e *Executor) evaluateDeclarativeDiff(ctx context.Context, onlineDDL *schem return diff, nil } -// getCompletedMigrationByContextAndSQL chceks if there exists a completed migration with exact same +// getCompletedMigrationByContextAndSQL checks if there exists a completed migration with exact same // context and SQL as given migration. If so, it returns its UUID. func (e *Executor) getCompletedMigrationByContextAndSQL(ctx context.Context, onlineDDL *schema.OnlineDDL) (completedUUID string, err error) { if onlineDDL.MigrationContext == "" { @@ -2753,7 +2880,7 @@ func (e *Executor) analyzeDropDDLActionMigration(ctx context.Context, onlineDDL } } } - stmt, err := sqlparser.ParseStrictDDL(originalShowCreateTable) + stmt, err := e.env.Environment().Parser().ParseStrictDDL(originalShowCreateTable) if err != nil { return err } @@ -2799,7 +2926,7 @@ func (e *Executor) executeDropDDLActionMigration(ctx context.Context, onlineDDL // We transform a DROP TABLE into a RENAME TABLE statement, so as to remove the table safely and asynchronously. - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err != nil { return failMigration(err) } @@ -2842,7 +2969,7 @@ func (e *Executor) executeCreateDDLActionMigration(ctx context.Context, onlineDD e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err != nil { return failMigration(err) } @@ -2863,6 +2990,17 @@ func (e *Executor) executeCreateDDLActionMigration(ctx context.Context, onlineDD } } } + if originalCreateTable, ok := ddlStmt.(*sqlparser.CreateTable); ok { + newCreateTable := sqlparser.CloneRefOfCreateTable(originalCreateTable) + // Rewrite this CREATE TABLE statement such that CONSTRAINT names are edited, + // specifically removing any prefix. + if _, err := e.validateAndEditCreateTableStatement(onlineDDL, newCreateTable); err != nil { + return failMigration(err) + } + ddlStmt = newCreateTable + onlineDDL.SQL = sqlparser.String(newCreateTable) + } + // from now on, whether a VIEW or a TABLE, they get the same treatment sentryArtifactTableName, err := schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime()) @@ -2918,7 +3056,7 @@ func (e *Executor) executeAlterViewOnline(ctx context.Context, onlineDDL *schema if err != nil { return err } - stmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + stmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err != nil { return err } @@ -3006,7 +3144,7 @@ func (e *Executor) executeSpecialAlterDDLActionMigrationIfApplicable(ctx context return false, err } defer conn.Close() - _, capableOf, _ := mysql.GetFlavor(conn.ServerVersion, nil) + capableOf := mysql.ServerVersionCapableOf(conn.ServerVersion) specialPlan, err := e.analyzeSpecialAlterPlan(ctx, onlineDDL, capableOf) if err != nil { @@ -3023,42 +3161,7 @@ func (e *Executor) executeSpecialAlterDDLActionMigrationIfApplicable(ctx context if _, err := e.executeDirectly(ctx, onlineDDL); err != nil { return false, err } - case dropRangePartitionSpecialOperation: - dropPartition := func() error { - artifactTableName, err := schema.GenerateGCTableName(schema.HoldTableGCState, newGCTableRetainTime()) - if err != nil { - return err - } - if err := e.updateArtifacts(ctx, onlineDDL.UUID, artifactTableName); err != nil { - return err - } - - // Apply CREATE TABLE for artifact table - if _, _, err := e.createDuplicateTableLike(ctx, artifactTableName, onlineDDL, conn); err != nil { - return err - } - // Remove partitioning - parsed := sqlparser.BuildParsedQuery(sqlAlterTableRemovePartitioning, artifactTableName) - if _, err := conn.ExecuteFetch(parsed.Query, 0, false); err != nil { - return err - } - // Exchange with partition - partitionName := specialPlan.Detail("partition_name") - parsed = sqlparser.BuildParsedQuery(sqlAlterTableExchangePartition, onlineDDL.Table, partitionName, artifactTableName) - if _, err := conn.ExecuteFetch(parsed.Query, 0, false); err != nil { - return err - } - // Drop table's partition - parsed = sqlparser.BuildParsedQuery(sqlAlterTableDropPartition, onlineDDL.Table, partitionName) - if _, err := conn.ExecuteFetch(parsed.Query, 0, false); err != nil { - return err - } - return nil - } - if err := dropPartition(); err != nil { - return false, err - } - case addRangePartitionSpecialOperation: + case rangePartitionSpecialOperation: if _, err := e.executeDirectly(ctx, onlineDDL); err != nil { return false, err } @@ -3077,7 +3180,7 @@ func (e *Executor) executeAlterDDLActionMigration(ctx context.Context, onlineDDL failMigration := func(err error) error { return e.failMigration(ctx, onlineDDL, err) } - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err != nil { return failMigration(err) } @@ -3150,7 +3253,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin return e.failMigration(ctx, onlineDDL, err) } - ddlAction, err := onlineDDL.GetAction() + ddlAction, err := onlineDDL.GetAction(e.env.Environment().Parser()) if err != nil { return failMigration(err) } @@ -3184,7 +3287,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin // - Implicitly do nothing, if the table does not exist { // Sanity: reject IF NOT EXISTS statements, because they don't make sense (or are ambiguous) in declarative mode - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err != nil { return failMigration(err) } @@ -3199,7 +3302,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin if exists { // table does exist, so this declarative DROP turns out to really be an actual DROP. No further action is needed here } else { - // table does not exist. We mark this DROP as implicitly sucessful + // table does not exist. We mark this DROP as implicitly successful _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow, rowsCopiedUnknown, emptyHint) _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, "no change") return nil @@ -3211,7 +3314,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin // - Implicitly do nothing, if the table exists and is identical to CREATE statement // Sanity: reject IF NOT EXISTS statements, because they don't make sense (or are ambiguous) in declarative mode - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err != nil { return failMigration(err) } @@ -3232,7 +3335,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin return failMigration(err) } if diff == nil || diff.IsEmpty() { - // No diff! We mark this CREATE as implicitly sucessful + // No diff! We mark this CREATE as implicitly successful _ = e.onSchemaMigrationStatus(ctx, onlineDDL.UUID, schema.OnlineDDLStatusComplete, false, progressPctFull, etaSecondsNow, rowsCopiedUnknown, emptyHint) _ = e.updateMigrationMessage(ctx, onlineDDL.UUID, "no change") return nil @@ -3352,7 +3455,7 @@ func (e *Executor) runNextMigration(ctx context.Context) error { } { // We strip out any VT query comments because our simplified parser doesn't work well with comments - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.Environment().Parser()) if err == nil { ddlStmt.SetComments(sqlparser.Comments{}) onlineDDL.SQL = sqlparser.String(ddlStmt) @@ -3471,8 +3574,10 @@ func (e *Executor) readVReplStream(ctx context.Context, uuid string, okIfMissing // isPreserveForeignKeySupported checks if the underlying MySQL server supports 'rename_table_preserve_foreign_key' // Online DDL is not possible on vanilla MySQL 8.0 for reasons described in https://vitess.io/blog/2021-06-15-online-ddl-why-no-fk/. -// However, Online DDL is made possible in via these changes: https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced -// as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps1. +// However, Online DDL is made possible in via these changes: +// - https://github.com/planetscale/mysql-server/commit/bb777e3e86387571c044fb4a2beb4f8c60462ced +// - https://github.com/planetscale/mysql-server/commit/c2f1344a6863518d749f2eb01a4c74ca08a5b889 +// as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps3. // Said changes introduce a new global/session boolean variable named 'rename_table_preserve_foreign_key'. It defaults 'false'/0 for backwards compatibility. // When enabled, a `RENAME TABLE` to a FK parent "pins" the children's foreign keys to the table name rather than the table pointer. Which means after the RENAME, // the children will point to the newly instated table rather than the original, renamed table. @@ -3496,7 +3601,7 @@ func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, onlineDDL } } { - // Both time_updated and transaction_timestamp must be in close priximity to each + // Both time_updated and transaction_timestamp must be in close proximity to each // other and to the time now, otherwise that means we're lagging and it's not a good time // to cut-over durationDiff := func(t1, t2 time.Time) time.Duration { @@ -3543,6 +3648,46 @@ func (e *Executor) isVReplMigrationReadyToCutOver(ctx context.Context, onlineDDL return true, nil } +// shouldCutOverAccordingToBackoff is called when a vitess migration (ALTER TABLE) is generally ready to cut-over. +// This function further determines whether the migration should cut-over or not, by considering: +// - backoff: we cut-over by increasing intervals, see `cutoverIntervals` +// - forced cut-over: either via `--force-cut-over-after` DDL strategy, or via user command, we override +// any backoff (and will also potentially KILL queries and connections holding locks on the migrated tabl) +func shouldCutOverAccordingToBackoff( + shouldForceCutOverIndicator bool, + forceCutOverAfter time.Duration, + sinceReadyToComplete time.Duration, + sinceLastCutoverAttempt time.Duration, + cutoverAttempts int64, +) ( + shouldCutOver bool, shouldForceCutOver bool, +) { + if shouldForceCutOverIndicator { + // That's very simple: the user indicated they want to force cut over. + return true, true + } + // shouldForceCutOver means the time since migration was ready to complete + // is beyond the --force-cut-over-after setting, or the column `force_cutover` is "1", and this means: + // - we do not want to backoff, we want to cutover asap + // - we agree to brute-force KILL any pending queries on the migrated table so as to ensure it's unlocked. + if forceCutOverAfter > 0 && sinceReadyToComplete > forceCutOverAfter { + // time since migration was ready to complete is beyond the --force-cut-over-after setting + return true, true + } + + // Backoff mechanism. Do not attempt to cut-over every single minute. Check how much time passed since last cut-over attempt + desiredTimeSinceLastCutover := cutoverIntervals[len(cutoverIntervals)-1] + if int(cutoverAttempts) < len(cutoverIntervals) { + desiredTimeSinceLastCutover = cutoverIntervals[cutoverAttempts] + } + if sinceLastCutoverAttempt >= desiredTimeSinceLastCutover { + // Yes! Time since last cut-over complies with our expected cut-over interval + return true, false + } + // Don't cut-over yet + return false, false +} + // reviewRunningMigrations iterates migrations in 'running' state. Normally there's only one running, which was // spawned by this tablet; but vreplication migrations could also resume from failure. func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning int, cancellable []*cancellableMigration, err error) { @@ -3574,30 +3719,42 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i uuidsFoundRunning := map[string]bool{} for _, row := range r.Named().Rows { uuid := row["migration_uuid"].ToString() + cutoverAttempts := row.AsInt64("cutover_attempts", 0) + sinceLastCutoverAttempt := time.Second * time.Duration(row.AsInt64("seconds_since_last_cutover_attempt", 0)) + sinceReadyToComplete := time.Second * time.Duration(row.AsInt64("seconds_since_ready_to_complete", 0)) onlineDDL, migrationRow, err := e.readMigration(ctx, uuid) if err != nil { return countRunnning, cancellable, err } postponeCompletion := row.AsBool("postpone_completion", false) + shouldForceCutOver := row.AsBool("force_cutover", false) elapsedSeconds := row.AsInt64("elapsed_seconds", 0) + strategySetting := onlineDDL.StrategySetting() + // --force-cut-over-after flag is validated when DDL strategy is first parsed. + // There should never be an error here. But if there is, we choose to skip it, + // otherwise migrations will never complete. + forceCutOverAfter, errForceCutOverAfter := strategySetting.ForceCutOverAfter() + if errForceCutOverAfter != nil { + forceCutOverAfter = 0 + } uuidsFoundRunning[uuid] = true _ = e.updateMigrationUserThrottleRatio(ctx, uuid, currentUserThrottleRatio) - switch onlineDDL.StrategySetting().Strategy { + switch strategySetting.Strategy { case schema.DDLStrategyOnline, schema.DDLStrategyVitess: - { + reviewVReplRunningMigration := func() error { // We check the _vt.vreplication table s, err := e.readVReplStream(ctx, uuid, true) if err != nil { - return countRunnning, cancellable, err + return err } - isVreplicationTestSuite := onlineDDL.StrategySetting().IsVreplicationTestSuite() + isVreplicationTestSuite := strategySetting.IsVreplicationTestSuite() if isVreplicationTestSuite { e.triggerNextCheckInterval() } if s == nil { - continue + return nil } // Let's see if vreplication indicates an error. Many errors are recoverable, and // we do not wish to fail on first sight. We will use LastError to repeatedly @@ -3614,65 +3771,77 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i if isTerminal || !lastError.ShouldRetry() { cancellable = append(cancellable, newCancellableMigration(uuid, s.message)) } - if s.isRunning() { - // This VRepl migration may have started from outside this tablet, so - // this executor may not own the migration _yet_. We make sure to own it. - // VReplication migrations are unique in this respect: we are able to complete - // a vreplicaiton migration started by another tablet. - e.ownedRunningMigrations.Store(uuid, onlineDDL) - if lastVitessLivenessIndicator := migrationRow.AsInt64("vitess_liveness_indicator", 0); lastVitessLivenessIndicator < s.livenessTimeIndicator() { - _ = e.updateMigrationTimestamp(ctx, "liveness_timestamp", uuid) - _ = e.updateVitessLivenessIndicator(ctx, uuid, s.livenessTimeIndicator()) - } - if onlineDDL.TabletAlias != e.TabletAliasString() { - _ = e.updateMigrationTablet(ctx, uuid) - log.Infof("migration %s adopted by tablet %s", uuid, e.TabletAliasString()) - } - _ = e.updateRowsCopied(ctx, uuid, s.rowsCopied) - _ = e.updateMigrationProgressByRowsCopied(ctx, uuid, s.rowsCopied) - _ = e.updateMigrationETASecondsByProgress(ctx, uuid) - _ = e.updateMigrationLastThrottled(ctx, uuid, time.Unix(s.timeThrottled, 0), s.componentThrottled) + if !s.isRunning() { + return nil + } + // This VRepl migration may have started from outside this tablet, so + // this executor may not own the migration _yet_. We make sure to own it. + // VReplication migrations are unique in this respect: we are able to complete + // a vreplication migration started by another tablet. + e.ownedRunningMigrations.Store(uuid, onlineDDL) + if lastVitessLivenessIndicator := migrationRow.AsInt64("vitess_liveness_indicator", 0); lastVitessLivenessIndicator < s.livenessTimeIndicator() { + _ = e.updateMigrationTimestamp(ctx, "liveness_timestamp", uuid) + _ = e.updateVitessLivenessIndicator(ctx, uuid, s.livenessTimeIndicator()) + } + if onlineDDL.TabletAlias != e.TabletAliasString() { + _ = e.updateMigrationTablet(ctx, uuid) + log.Infof("migration %s adopted by tablet %s", uuid, e.TabletAliasString()) + } + _ = e.updateRowsCopied(ctx, uuid, s.rowsCopied) + _ = e.updateMigrationProgressByRowsCopied(ctx, uuid, s.rowsCopied) + _ = e.updateMigrationETASecondsByProgress(ctx, uuid) + _ = e.updateMigrationLastThrottled(ctx, uuid, time.Unix(s.timeThrottled, 0), s.componentThrottled) - isReady, err := e.isVReplMigrationReadyToCutOver(ctx, onlineDDL, s) - if err != nil { - _ = e.updateMigrationMessage(ctx, uuid, err.Error()) - return countRunnning, cancellable, err - } - if isReady && isVreplicationTestSuite { - // This is a endtoend test suite execution. We intentionally delay it by at least - // vreplicationTestSuiteWaitSeconds - if elapsedSeconds < vreplicationTestSuiteWaitSeconds { - isReady = false - } - } - // Indicate to outside observers whether the migration is generally ready to complete. - // In the case of a postponed migration, we will not complete it, but the user will - // understand whether "now is a good time" or "not there yet" - _ = e.updateMigrationReadyToComplete(ctx, uuid, isReady) - if postponeCompletion { - // override. Even if migration is ready, we do not complete it. + isReady, err := e.isVReplMigrationReadyToCutOver(ctx, onlineDDL, s) + if err != nil { + _ = e.updateMigrationMessage(ctx, uuid, err.Error()) + return err + } + if isReady && isVreplicationTestSuite { + // This is a endtoend test suite execution. We intentionally delay it by at least + // vreplicationTestSuiteWaitSeconds + if elapsedSeconds < vreplicationTestSuiteWaitSeconds { isReady = false } - if isReady && onlineDDL.StrategySetting().IsInOrderCompletion() { - if len(pendingMigrationsUUIDs) > 0 && pendingMigrationsUUIDs[0] != onlineDDL.UUID { - // wait for earlier pending migrations to complete - isReady = false - } + } + // Indicate to outside observers whether the migration is generally ready to complete. + // In the case of a postponed migration, we will not complete it, but the user will + // understand whether "now is a good time" or "not there yet" + _ = e.updateMigrationReadyToComplete(ctx, uuid, isReady) + if !isReady { + return nil + } + if postponeCompletion { + // override. Even if migration is ready, we do not complete it. + return nil + } + if strategySetting.IsInOrderCompletion() { + if len(pendingMigrationsUUIDs) > 0 && pendingMigrationsUUIDs[0] != onlineDDL.UUID { + // wait for earlier pending migrations to complete + return nil } - if isReady { - if err := e.cutOverVReplMigration(ctx, s); err != nil { - _ = e.updateMigrationMessage(ctx, uuid, err.Error()) - log.Errorf("cutOverVReplMigration failed: err=%v", err) - if merr, ok := err.(*sqlerror.SQLError); ok { - switch merr.Num { - case sqlerror.ERTooLongIdent: - go e.CancelMigration(ctx, uuid, err.Error(), false) - } - } - return countRunnning, cancellable, err + } + shouldCutOver, shouldForceCutOver := shouldCutOverAccordingToBackoff( + shouldForceCutOver, forceCutOverAfter, sinceReadyToComplete, sinceLastCutoverAttempt, cutoverAttempts, + ) + if !shouldCutOver { + return nil + } + if err := e.cutOverVReplMigration(ctx, s, shouldForceCutOver); err != nil { + _ = e.updateMigrationMessage(ctx, uuid, err.Error()) + log.Errorf("cutOverVReplMigration failed: err=%v", err) + if merr, ok := err.(*sqlerror.SQLError); ok { + switch merr.Num { + case sqlerror.ERTooLongIdent: + go e.CancelMigration(ctx, uuid, err.Error(), false) } } + return err } + return nil + } + if err := reviewVReplRunningMigration(); err != nil { + return countRunnning, cancellable, err } case schema.DDLStrategyPTOSC: { @@ -3709,7 +3878,7 @@ func (e *Executor) reviewRunningMigrations(ctx context.Context) (countRunnning i countRunnning++ } { - // now, let's look at UUIDs we own and _think_ should be running, and see which of tham _isn't_ actually running or pending... + // now, let's look at UUIDs we own and _think_ should be running, and see which of them _isn't_ actually running or pending... uuidsFoundPending := map[string]bool{} for _, uuid := range pendingMigrationsUUIDs { uuidsFoundPending[uuid] = true @@ -3862,7 +4031,7 @@ func (e *Executor) gcArtifactTable(ctx context.Context, artifactTable, uuid stri // The fact we're here means the table is not needed anymore. We can throw it away. // We do so by renaming it into a GC table. We use the HOLD state and with a timestamp that is // in the past. So as we rename the table: - // - The Online DDL executor compeltely loses it and has no more access to its data + // - The Online DDL executor completely loses it and has no more access to its data // - TableGC will find it on next iteration, see that it's been on HOLD "long enough", and will // take it from there to transition it into PURGE or EVAC, or DROP, and eventually drop it. renameStatement, toTableName, err := schema.GenerateRenameStatementWithUUID(artifactTable, schema.HoldTableGCState, schema.OnlineDDLToGCUUID(uuid), t) @@ -3921,6 +4090,7 @@ func (e *Executor) gcArtifacts(ctx context.Context) error { if err == nil { // artifact was renamed away and is gone. There' no need to list it in `artifacts` column. e.clearSingleArtifact(ctx, uuid, artifactTable) + e.requestGCChecksFunc() } else { return vterrors.Wrapf(err, "in gcArtifacts() for %s", artifactTable) } @@ -4491,9 +4661,69 @@ func (e *Executor) CleanupMigration(ctx context.Context, uuid string) (result *s return nil, err } log.Infof("CleanupMigration: migration %s marked as ready to clean up", uuid) + defer e.triggerNextCheckInterval() return rs, nil } +// ForceCutOverMigration markes the given migration for forced cut-over. This has two implications: +// - No backoff for the given migration's cut-over (cut-over will be attempted at the next scheduler cycle, +// irrespective of how many cut-over attempts have been made and when these attempts have been made). +// - During the cut-over, Online DDL will try and temrinate all existing queries on the migrated table, and +// transactions (killing their connections) holding a lock on the migrated table. This is likely to cause the +// cut-over to succeed. Of course, it's not guaranteed, and it's possible that next cut-over will fail. +// The force_cutover flag, once set, remains set, and so all future cut-over attempts will again KILL interfering +// queries and connections. +func (e *Executor) ForceCutOverMigration(ctx context.Context, uuid string) (result *sqltypes.Result, err error) { + if atomic.LoadInt64(&e.isOpen) == 0 { + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) + } + if !schema.IsOnlineDDLUUID(uuid) { + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Not a valid migration ID in FORCE_CUTOVER: %s", uuid) + } + log.Infof("ForceCutOverMigration: request to force cut-over migration %s", uuid) + e.migrationMutex.Lock() + defer e.migrationMutex.Unlock() + + query, err := sqlparser.ParseAndBind(sqlUpdateForceCutOver, + sqltypes.StringBindVariable(uuid), + ) + if err != nil { + return nil, err + } + rs, err := e.execQuery(ctx, query) + if err != nil { + return nil, err + } + e.triggerNextCheckInterval() + log.Infof("ForceCutOverMigration: migration %s marked for forced cut-over", uuid) + return rs, nil +} + +// ForceCutOverPendingMigrations sets force_cutover flag for all pending migrations +func (e *Executor) ForceCutOverPendingMigrations(ctx context.Context) (result *sqltypes.Result, err error) { + if atomic.LoadInt64(&e.isOpen) == 0 { + return nil, vterrors.New(vtrpcpb.Code_FAILED_PRECONDITION, schema.ErrOnlineDDLDisabled.Error()) + } + + uuids, err := e.readPendingMigrationsUUIDs(ctx) + if err != nil { + return result, err + } + log.Infof("ForceCutOverPendingMigrations: iterating %v migrations %s", len(uuids)) + + result = &sqltypes.Result{} + for _, uuid := range uuids { + log.Infof("ForceCutOverPendingMigrations: applying to %s", uuid) + res, err := e.ForceCutOverMigration(ctx, uuid) + if err != nil { + return result, err + } + result.AppendResult(res) + } + log.Infof("ForceCutOverPendingMigrations: done iterating %v migrations %s", len(uuids)) + return result, nil +} + // CompleteMigration clears the postpone_completion flag for a given migration, assuming it was set in the first place func (e *Executor) CompleteMigration(ctx context.Context, uuid string) (result *sqltypes.Result, err error) { if atomic.LoadInt64(&e.isOpen) == 0 { @@ -4507,7 +4737,7 @@ func (e *Executor) CompleteMigration(ctx context.Context, uuid string) (result * e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - query, err := sqlparser.ParseAndBind(sqlUpdateCompleteMigration, + query, err := sqlparser.ParseAndBind(sqlClearPostponeCompletion, sqltypes.StringBindVariable(uuid), ) if err != nil { @@ -4565,7 +4795,7 @@ func (e *Executor) LaunchMigration(ctx context.Context, uuid string, shardsArg s // Does not apply to this shard! return &sqltypes.Result{}, nil } - log.Infof("LaunchMigration: request to execute migration %s", uuid) + log.Infof("LaunchMigration: request to launch migration %s", uuid) e.migrationMutex.Lock() defer e.migrationMutex.Unlock() @@ -4623,7 +4853,7 @@ func (e *Executor) submittedMigrationConflictsWithPendingMigrationInSingletonCon return false } // Let's see if the pending migration is a revert: - if _, err := pendingOnlineDDL.GetRevertUUID(); err != nil { + if _, err := pendingOnlineDDL.GetRevertUUID(e.env.Environment().Parser()); err != nil { // Not a revert. So the pending migration definitely conflicts with our migration. return true } @@ -4637,7 +4867,7 @@ func (e *Executor) submittedMigrationConflictsWithPendingMigrationInSingletonCon return true } -// submitCallbackIfNonConflicting is called internally by SubmitMigration, and is given a callack to execute +// submitCallbackIfNonConflicting is called internally by SubmitMigration, and is given a callback to execute // if the given migration does not conflict any terms. Specifically, this function looks for singleton or // singleton-context conflicts. // The call back can be an insertion of a new migration, or a retry of an existing migration, or whatnot. @@ -4743,10 +4973,10 @@ func (e *Executor) SubmitMigration( // So we will _mostly_ ignore the request: we will not submit a new migration. However, we will do // these things: - // 1. Check that the requested submmited migration macthes the existing one's migration-context, otherwise + // 1. Check that the requested submitted migration matches the existing one's migration-context, otherwise // this doesn't seem right, not the idempotency we were looking for if storedMigration.MigrationContext != onlineDDL.MigrationContext { - return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "migration rejected: found migration %s with different context: %s than submmitted migration's context: %s", onlineDDL.UUID, storedMigration.MigrationContext, onlineDDL.MigrationContext) + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "migration rejected: found migration %s with different context: %s than submitted migration's context: %s", onlineDDL.UUID, storedMigration.MigrationContext, onlineDDL.MigrationContext) } // 2. Possibly, the existing migration is in 'failed' or 'cancelled' state, in which case this // resubmission should retry the migration. @@ -4758,13 +4988,13 @@ func (e *Executor) SubmitMigration( // OK, this is a new UUID - _, actionStr, err := onlineDDL.GetActionStr() + _, actionStr, err := onlineDDL.GetActionStr(e.env.Environment().Parser()) if err != nil { return nil, err } log.Infof("SubmitMigration: request to submit migration %s; action=%s, table=%s", onlineDDL.UUID, actionStr, onlineDDL.Table) - revertedUUID, _ := onlineDDL.GetRevertUUID() // Empty value if the migration is not actually a REVERT. Safe to ignore error. + revertedUUID, _ := onlineDDL.GetRevertUUID(e.env.Environment().Parser()) // Empty value if the migration is not actually a REVERT. Safe to ignore error. retainArtifactsSeconds := int64((retainOnlineDDLTables).Seconds()) if retainArtifacts, _ := onlineDDL.StrategySetting().RetainArtifactsDuration(); retainArtifacts != 0 { // Explicit retention indicated by `--retain-artifact` DDL strategy flag for this migration. Override! @@ -4790,7 +5020,7 @@ func (e *Executor) SubmitMigration( sqltypes.BoolBindVariable(onlineDDL.StrategySetting().IsPostponeCompletion()), sqltypes.BoolBindVariable(allowConcurrentMigration), sqltypes.StringBindVariable(revertedUUID), - sqltypes.BoolBindVariable(onlineDDL.IsView()), + sqltypes.BoolBindVariable(onlineDDL.IsView(e.env.Environment().Parser())), ) if err != nil { return nil, err diff --git a/go/vt/vttablet/onlineddl/executor_test.go b/go/vt/vttablet/onlineddl/executor_test.go index 4eb0d54a418..92740548250 100644 --- a/go/vt/vttablet/onlineddl/executor_test.go +++ b/go/vt/vttablet/onlineddl/executor_test.go @@ -23,14 +23,23 @@ package onlineddl import ( "context" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" ) +var ( + testMySQLVersion = "8.0.34" +) + func TestGetConstraintType(t *testing.T) { { typ := GetConstraintType(&sqlparser.CheckConstraintDefinition{}) @@ -43,7 +52,9 @@ func TestGetConstraintType(t *testing.T) { } func TestValidateAndEditCreateTableStatement(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "ValidateAndEditCreateTableStatementTest"), + } tt := []struct { name string query string @@ -155,13 +166,13 @@ func TestValidateAndEditCreateTableStatement(t *testing.T) { } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.query) + stmt, err := e.env.Environment().Parser().ParseStrictDDL(tc.query) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) onlineDDL := &schema.OnlineDDL{UUID: "a5a563da_dc1a_11ec_a416_0a43f95f28a3", Table: "onlineddl_test", Options: tc.strategyOptions} - constraintMap, err := e.validateAndEditCreateTableStatement(context.Background(), onlineDDL, createTable) + constraintMap, err := e.validateAndEditCreateTableStatement(onlineDDL, createTable) if tc.expectError != "" { assert.ErrorContains(t, err, tc.expectError) return @@ -185,77 +196,87 @@ func TestValidateAndEditCreateTableStatement(t *testing.T) { } func TestValidateAndEditAlterTableStatement(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "TestValidateAndEditAlterTableStatementTest"), + } tt := []struct { - alter string - m map[string]string - expect []string + alter string + mySQLVersion string + m map[string]string + expect []string }{ { - alter: "alter table t add column i int", - expect: []string{"alter table t add column i int, algorithm = copy"}, + alter: "alter table t add column i int", + mySQLVersion: "8.0.29", + expect: []string{"alter table t add column i int, algorithm = copy"}, + }, + { + alter: "alter table t add column i int", + mySQLVersion: "8.0.32", + expect: []string{"alter table t add column i int"}, }, { alter: "alter table t add column i int, add fulltext key name1_ft (name1)", - expect: []string{"alter table t add column i int, add fulltext key name1_ft (name1), algorithm = copy"}, + expect: []string{"alter table t add column i int, add fulltext key name1_ft (name1)"}, }, { alter: "alter table t add column i int, add fulltext key name1_ft (name1), add fulltext key name2_ft (name2)", - expect: []string{"alter table t add column i int, add fulltext key name1_ft (name1), algorithm = copy", "alter table t add fulltext key name2_ft (name2), algorithm = copy"}, + expect: []string{"alter table t add column i int, add fulltext key name1_ft (name1)", "alter table t add fulltext key name2_ft (name2)"}, }, { alter: "alter table t add fulltext key name0_ft (name0), add column i int, add fulltext key name1_ft (name1), add fulltext key name2_ft (name2)", - expect: []string{"alter table t add fulltext key name0_ft (name0), add column i int, algorithm = copy", "alter table t add fulltext key name1_ft (name1), algorithm = copy", "alter table t add fulltext key name2_ft (name2), algorithm = copy"}, + expect: []string{"alter table t add fulltext key name0_ft (name0), add column i int", "alter table t add fulltext key name1_ft (name1)", "alter table t add fulltext key name2_ft (name2)"}, }, { alter: "alter table t add constraint check (id != 1)", - expect: []string{"alter table t add constraint chk_aulpn7bjeortljhguy86phdn9 check (id != 1), algorithm = copy"}, + expect: []string{"alter table t add constraint chk_aulpn7bjeortljhguy86phdn9 check (id != 1)"}, }, { alter: "alter table t add constraint t_chk_1 check (id != 1)", - expect: []string{"alter table t add constraint chk_1_aulpn7bjeortljhguy86phdn9 check (id != 1), algorithm = copy"}, + expect: []string{"alter table t add constraint chk_1_aulpn7bjeortljhguy86phdn9 check (id != 1)"}, }, { alter: "alter table t add constraint some_check check (id != 1)", - expect: []string{"alter table t add constraint some_check_aulpn7bjeortljhguy86phdn9 check (id != 1), algorithm = copy"}, + expect: []string{"alter table t add constraint some_check_aulpn7bjeortljhguy86phdn9 check (id != 1)"}, }, { alter: "alter table t add constraint some_check check (id != 1), add constraint another_check check (id != 2)", - expect: []string{"alter table t add constraint some_check_aulpn7bjeortljhguy86phdn9 check (id != 1), add constraint another_check_4fa197273p3w96267pzm3gfi3 check (id != 2), algorithm = copy"}, + expect: []string{"alter table t add constraint some_check_aulpn7bjeortljhguy86phdn9 check (id != 1), add constraint another_check_4fa197273p3w96267pzm3gfi3 check (id != 2)"}, }, { alter: "alter table t add foreign key (parent_id) references onlineddl_test_parent (id) on delete no action", - expect: []string{"alter table t add constraint fk_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action, algorithm = copy"}, + expect: []string{"alter table t add constraint fk_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action"}, }, { alter: "alter table t add constraint myfk foreign key (parent_id) references onlineddl_test_parent (id) on delete no action", - expect: []string{"alter table t add constraint myfk_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action, algorithm = copy"}, + expect: []string{"alter table t add constraint myfk_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action"}, }, { // strip out table name alter: "alter table t add constraint t_ibfk_1 foreign key (parent_id) references onlineddl_test_parent (id) on delete no action", - expect: []string{"alter table t add constraint ibfk_1_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action, algorithm = copy"}, + expect: []string{"alter table t add constraint ibfk_1_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action"}, }, { // stript out table name alter: "alter table t add constraint t_ibfk_1 foreign key (parent_id) references onlineddl_test_parent (id) on delete no action", - expect: []string{"alter table t add constraint ibfk_1_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action, algorithm = copy"}, + expect: []string{"alter table t add constraint ibfk_1_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action"}, }, { alter: "alter table t add constraint t_ibfk_1 foreign key (parent_id) references onlineddl_test_parent (id) on delete no action, add constraint some_check check (id != 1)", - expect: []string{"alter table t add constraint ibfk_1_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action, add constraint some_check_aulpn7bjeortljhguy86phdn9 check (id != 1), algorithm = copy"}, + expect: []string{"alter table t add constraint ibfk_1_6fmhzdlya89128u5j3xapq34i foreign key (parent_id) references onlineddl_test_parent (id) on delete no action, add constraint some_check_aulpn7bjeortljhguy86phdn9 check (id != 1)"}, }, { alter: "alter table t drop foreign key t_ibfk_1", m: map[string]string{ "t_ibfk_1": "ibfk_1_aaaaaaaaaaaaaa", }, - expect: []string{"alter table t drop foreign key ibfk_1_aaaaaaaaaaaaaa, algorithm = copy"}, + expect: []string{"alter table t drop foreign key ibfk_1_aaaaaaaaaaaaaa"}, }, } + for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.alter) + stmt, err := e.env.Environment().Parser().ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -264,10 +285,14 @@ func TestValidateAndEditAlterTableStatement(t *testing.T) { for k, v := range tc.m { m[k] = v } + if tc.mySQLVersion == "" { + tc.mySQLVersion = testMySQLVersion + } + capableOf := mysql.ServerVersionCapableOf(tc.mySQLVersion) onlineDDL := &schema.OnlineDDL{UUID: "a5a563da_dc1a_11ec_a416_0a43f95f28a3", Table: "t", Options: "--unsafe-allow-foreign-keys"} - alters, err := e.validateAndEditAlterTableStatement(context.Background(), onlineDDL, alterTable, m) + alters, err := e.validateAndEditAlterTableStatement(capableOf, onlineDDL, alterTable, m) assert.NoError(t, err) - altersStrings := []string{} + var altersStrings []string for _, alter := range alters { altersStrings = append(altersStrings, sqlparser.String(alter)) } @@ -277,7 +302,9 @@ func TestValidateAndEditAlterTableStatement(t *testing.T) { } func TestAddInstantAlgorithm(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "AddInstantAlgorithmTest"), + } tt := []struct { alter string expect string @@ -301,7 +328,7 @@ func TestAddInstantAlgorithm(t *testing.T) { } for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.alter) + stmt, err := e.env.Environment().Parser().ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -311,7 +338,7 @@ func TestAddInstantAlgorithm(t *testing.T) { assert.Equal(t, tc.expect, alterInstant) - stmt, err = sqlparser.ParseStrictDDL(alterInstant) + stmt, err = e.env.Environment().Parser().ParseStrictDDL(alterInstant) require.NoError(t, err) _, ok = stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -320,7 +347,9 @@ func TestAddInstantAlgorithm(t *testing.T) { } func TestDuplicateCreateTable(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "DuplicateCreateTableTest"), + } ctx := context.Background() onlineDDL := &schema.OnlineDDL{UUID: "a5a563da_dc1a_11ec_a416_0a43f95f28a3", Table: "something", Strategy: "vitess", Options: "--unsafe-allow-foreign-keys"} @@ -356,3 +385,109 @@ func TestDuplicateCreateTable(t *testing.T) { }) } } + +func TestShouldCutOverAccordingToBackoff(t *testing.T) { + tcases := []struct { + name string + + shouldForceCutOverIndicator bool + forceCutOverAfter time.Duration + sinceReadyToComplete time.Duration + sinceLastCutoverAttempt time.Duration + cutoverAttempts int64 + + expectShouldCutOver bool + expectShouldForceCutOver bool + }{ + { + name: "no reason why not, normal cutover", + expectShouldCutOver: true, + }, + { + name: "backoff", + cutoverAttempts: 1, + expectShouldCutOver: false, + }, + { + name: "more backoff", + cutoverAttempts: 3, + expectShouldCutOver: false, + }, + { + name: "more backoff, since last cutover", + cutoverAttempts: 3, + sinceLastCutoverAttempt: time.Second, + expectShouldCutOver: false, + }, + { + name: "no backoff, long since last cutover", + cutoverAttempts: 3, + sinceLastCutoverAttempt: time.Hour, + expectShouldCutOver: true, + }, + { + name: "many attempts, long since last cutover", + cutoverAttempts: 3000, + sinceLastCutoverAttempt: time.Hour, + expectShouldCutOver: true, + }, + { + name: "force cutover", + shouldForceCutOverIndicator: true, + expectShouldCutOver: true, + expectShouldForceCutOver: true, + }, + { + name: "force cutover overrides backoff", + cutoverAttempts: 3, + shouldForceCutOverIndicator: true, + expectShouldCutOver: true, + expectShouldForceCutOver: true, + }, + { + name: "backoff; cutover-after not in effect yet", + cutoverAttempts: 3, + forceCutOverAfter: time.Second, + expectShouldCutOver: false, + expectShouldForceCutOver: false, + }, + { + name: "backoff; cutover-after still not in effect yet", + cutoverAttempts: 3, + forceCutOverAfter: time.Second, + sinceReadyToComplete: time.Millisecond, + expectShouldCutOver: false, + expectShouldForceCutOver: false, + }, + { + name: "cutover-after overrides backoff", + cutoverAttempts: 3, + forceCutOverAfter: time.Second, + sinceReadyToComplete: time.Second * 2, + expectShouldCutOver: true, + expectShouldForceCutOver: true, + }, + { + name: "cutover-after overrides backoff, realistic value", + cutoverAttempts: 300, + sinceLastCutoverAttempt: time.Minute, + forceCutOverAfter: time.Hour, + sinceReadyToComplete: time.Hour * 2, + expectShouldCutOver: true, + expectShouldForceCutOver: true, + }, + } + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + shouldCutOver, shouldForceCutOver := shouldCutOverAccordingToBackoff( + tcase.shouldForceCutOverIndicator, + tcase.forceCutOverAfter, + tcase.sinceReadyToComplete, + tcase.sinceLastCutoverAttempt, + tcase.cutoverAttempts, + ) + assert.Equal(t, tcase.expectShouldCutOver, shouldCutOver) + assert.Equal(t, tcase.expectShouldForceCutOver, shouldForceCutOver) + }) + } +} diff --git a/go/vt/vttablet/onlineddl/schema.go b/go/vt/vttablet/onlineddl/schema.go index 1cef44e08d3..2ba566703e5 100644 --- a/go/vt/vttablet/onlineddl/schema.go +++ b/go/vt/vttablet/onlineddl/schema.go @@ -98,7 +98,7 @@ const ( ` sqlSetMigrationReadyToComplete = `UPDATE _vt.schema_migrations SET ready_to_complete=1, - ready_to_complete_timestamp=NOW(6) + ready_to_complete_timestamp=IFNULL(ready_to_complete_timestamp, NOW(6)) WHERE migration_uuid=%a ` @@ -159,7 +159,8 @@ const ( migration_uuid=%a ` sqlIncrementCutoverAttempts = `UPDATE _vt.schema_migrations - SET cutover_attempts=cutover_attempts+1 + SET cutover_attempts=cutover_attempts+1, + last_cutover_attempt_timestamp=NOW() WHERE migration_uuid=%a ` @@ -168,13 +169,18 @@ const ( WHERE migration_uuid=%a ` + sqlUpdateForceCutOver = `UPDATE _vt.schema_migrations + SET force_cutover=1 + WHERE + migration_uuid=%a + ` sqlUpdateLaunchMigration = `UPDATE _vt.schema_migrations SET postpone_launch=0 WHERE migration_uuid=%a AND postpone_launch != 0 ` - sqlUpdateCompleteMigration = `UPDATE _vt.schema_migrations + sqlClearPostponeCompletion = `UPDATE _vt.schema_migrations SET postpone_completion=0 WHERE migration_uuid=%a @@ -254,6 +260,7 @@ const ( liveness_timestamp=NULL, cancelled_timestamp=NULL, completed_timestamp=NULL, + last_cutover_attempt_timestamp=NULL, cleanup_timestamp=NULL WHERE migration_status IN ('failed', 'cancelled') @@ -274,6 +281,7 @@ const ( liveness_timestamp=NULL, cancelled_timestamp=NULL, completed_timestamp=NULL, + last_cutover_attempt_timestamp=NULL, cleanup_timestamp=NULL WHERE migration_status IN ('failed', 'cancelled') @@ -287,6 +295,10 @@ const ( sqlSelectRunningMigrations = `SELECT migration_uuid, postpone_completion, + force_cutover, + cutover_attempts, + ifnull(timestampdiff(second, ready_to_complete_timestamp, now()), 0) as seconds_since_ready_to_complete, + ifnull(timestampdiff(second, last_cutover_attempt_timestamp, now()), 0) as seconds_since_last_cutover_attempt, timestampdiff(second, started_timestamp, now()) as elapsed_seconds FROM _vt.schema_migrations WHERE @@ -526,8 +538,9 @@ const ( sqlAnalyzeTable = "ANALYZE NO_WRITE_TO_BINLOG TABLE `%a`" sqlShowCreateTable = "SHOW CREATE TABLE `%a`" sqlShowVariablesLikePreserveForeignKey = "show global variables like 'rename_table_preserve_foreign_key'" - sqlEnablePreserveForeignKey = "set @@rename_table_preserve_foreign_key = 1" - sqlDisablePreserveForeignKey = "set @@rename_table_preserve_foreign_key = 0" + sqlShowVariablesLikeFastAnalyzeTable = "show global variables like 'fast_analyze_table'" + sqlEnableFastAnalyzeTable = "set @@fast_analyze_table = 1" + sqlDisableFastAnalyzeTable = "set @@fast_analyze_table = 0" sqlGetAutoIncrement = ` SELECT AUTO_INCREMENT @@ -567,12 +580,22 @@ const ( _vt.copy_state WHERE vrepl_id=%a ` - sqlSwapTables = "RENAME TABLE `%a` TO `%a`, `%a` TO `%a`, `%a` TO `%a`" - sqlRenameTable = "RENAME TABLE `%a` TO `%a`" - sqlLockTwoTablesWrite = "LOCK TABLES `%a` WRITE, `%a` WRITE" - sqlUnlockTables = "UNLOCK TABLES" - sqlCreateSentryTable = "CREATE TABLE IF NOT EXISTS `%a` (id INT PRIMARY KEY)" - sqlFindProcess = "SELECT id, Info as info FROM information_schema.processlist WHERE id=%a AND Info LIKE %a" + sqlSwapTables = "RENAME TABLE `%a` TO `%a`, `%a` TO `%a`, `%a` TO `%a`" + sqlRenameTable = "RENAME TABLE `%a` TO `%a`" + sqlLockTwoTablesWrite = "LOCK TABLES `%a` WRITE, `%a` WRITE" + sqlUnlockTables = "UNLOCK TABLES" + sqlCreateSentryTable = "CREATE TABLE IF NOT EXISTS `%a` (id INT PRIMARY KEY)" + sqlFindProcess = "SELECT id, Info as info FROM information_schema.processlist WHERE id=%a AND Info LIKE %a" + sqlFindProcessByInfo = "SELECT id, Info as info FROM information_schema.processlist WHERE Info LIKE %a and id != connection_id()" + sqlProcessWithLocksOnTable = ` + SELECT + DISTINCT innodb_trx.trx_mysql_thread_id + from + performance_schema.data_locks + join information_schema.innodb_trx on (data_locks.ENGINE_TRANSACTION_ID=innodb_trx.trx_id) + where + data_locks.OBJECT_SCHEMA=database() AND data_locks.OBJECT_NAME=%a + ` ) var ( diff --git a/go/vt/vttablet/onlineddl/util.go b/go/vt/vttablet/onlineddl/util.go index 305b01c057f..3d06e6df60e 100644 --- a/go/vt/vttablet/onlineddl/util.go +++ b/go/vt/vttablet/onlineddl/util.go @@ -26,15 +26,10 @@ import ( "os/exec" "path/filepath" "strings" - "time" "vitess.io/vitess/go/vt/log" ) -const ( - readableTimeFormat = "20060102150405" -) - // execCmd searches the PATH for a command and runs it, logging the output. // If input is not nil, pipe it to the command's stdin. func execCmd(name string, args, env []string, dir string, input io.Reader, output io.Writer) (cmd *exec.Cmd, err error) { @@ -89,17 +84,3 @@ func RandomHash() string { hasher.Write(rb) return hex.EncodeToString(hasher.Sum(nil)) } - -// ToReadableTimestamp returns a timestamp, in seconds resolution, that is human readable -// (as opposed to unix timestamp which is just a number) -// Example: for Aug 25 2020, 16:04:25 we return "20200825160425" -func ToReadableTimestamp(t time.Time) string { - return t.Format(readableTimeFormat) -} - -// ReadableTimestamp returns a timestamp, in seconds resolution, that is human readable -// (as opposed to unix timestamp which is just a number), and which corresponds to the time now. -// Example: for Aug 25 2020, 16:04:25 we return "20200825160425" -func ReadableTimestamp() string { - return ToReadableTimestamp(time.Now()) -} diff --git a/go/vt/vttablet/onlineddl/util_test.go b/go/vt/vttablet/onlineddl/util_test.go index 707e321c6f5..4beb154c0ae 100644 --- a/go/vt/vttablet/onlineddl/util_test.go +++ b/go/vt/vttablet/onlineddl/util_test.go @@ -18,7 +18,6 @@ package onlineddl import ( "testing" - "time" "github.com/stretchr/testify/assert" ) @@ -31,11 +30,3 @@ func TestRandomHash(t *testing.T) { assert.Equal(t, len(h2), 64) assert.NotEqual(t, h1, h2) } - -func TestToReadableTimestamp(t *testing.T) { - ti, err := time.Parse(time.UnixDate, "Wed Feb 25 11:06:39 PST 2015") - assert.NoError(t, err) - - readableTimestamp := ToReadableTimestamp(ti) - assert.Equal(t, readableTimestamp, "20150225110639") -} diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go index 8432f79b506..847e40e3fbc 100644 --- a/go/vt/vttablet/onlineddl/vrepl.go +++ b/go/vt/vttablet/onlineddl/vrepl.go @@ -27,7 +27,7 @@ import ( "context" "errors" "fmt" - "math" + "net/url" "strconv" "strings" @@ -37,8 +37,10 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/dbconnpool" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/onlineddl/vrepl" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -135,10 +137,14 @@ type VRepl struct { parser *vrepl.AlterTableParser convertCharset map[string](*binlogdatapb.CharsetConversion) + + env *vtenv.Environment } // NewVRepl creates a VReplication handler for Online DDL -func NewVRepl(workflow string, +func NewVRepl( + env *vtenv.Environment, + workflow string, keyspace string, shard string, dbName string, @@ -150,6 +156,7 @@ func NewVRepl(workflow string, analyzeTable bool, ) *VRepl { return &VRepl{ + env: env, workflow: workflow, keyspace: keyspace, shard: shard, @@ -167,7 +174,7 @@ func NewVRepl(workflow string, } } -// readAutoIncrement reads the AUTO_INCREMENT vlaue, if any, for a give ntable +// readAutoIncrement reads the AUTO_INCREMENT value, if any, for a give ntable func (v *VRepl) readAutoIncrement(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) (autoIncrement uint64, err error) { query, err := sqlparser.ParseAndBind(sqlGetAutoIncrement, sqltypes.StringBindVariable(v.dbName), @@ -177,7 +184,7 @@ func (v *VRepl) readAutoIncrement(ctx context.Context, conn *dbconnpool.DBConnec return 0, err } - rs, err := conn.ExecuteFetch(query, math.MaxInt64, true) + rs, err := conn.ExecuteFetch(query, -1, true) if err != nil { return 0, err } @@ -191,7 +198,7 @@ func (v *VRepl) readAutoIncrement(ctx context.Context, conn *dbconnpool.DBConnec // readTableColumns reads column list from given table func (v *VRepl) readTableColumns(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) (columns *vrepl.ColumnList, virtualColumns *vrepl.ColumnList, pkColumns *vrepl.ColumnList, err error) { parsed := sqlparser.BuildParsedQuery(sqlShowColumnsFrom, tableName) - rs, err := conn.ExecuteFetch(parsed.Query, math.MaxInt64, true) + rs, err := conn.ExecuteFetch(parsed.Query, -1, true) if err != nil { return nil, nil, nil, err } @@ -229,7 +236,7 @@ func (v *VRepl) readTableUniqueKeys(ctx context.Context, conn *dbconnpool.DBConn if err != nil { return nil, err } - rs, err := conn.ExecuteFetch(query, math.MaxInt64, true) + rs, err := conn.ExecuteFetch(query, -1, true) if err != nil { return nil, err } @@ -247,17 +254,47 @@ func (v *VRepl) readTableUniqueKeys(ctx context.Context, conn *dbconnpool.DBConn return uniqueKeys, nil } +// isFastAnalyzeTableSupported checks if the underlying MySQL server supports 'fast_analyze_table', +// introduced by a fork of MySQL: https://github.com/planetscale/mysql-server/commit/c8a9d93686358dabfba8f3dc5cc0621e3149fe78 +// When `fast_analyze_table=1`, an `ANALYZE TABLE` command only analyzes the clustering index (normally the `PRIMARY KEY`). +// This is useful when you want to get a better estimate of the number of table rows, as fast as possible. +func (v *VRepl) isFastAnalyzeTableSupported(ctx context.Context, conn *dbconnpool.DBConnection) (isSupported bool, err error) { + rs, err := conn.ExecuteFetch(sqlShowVariablesLikeFastAnalyzeTable, -1, true) + if err != nil { + return false, err + } + return len(rs.Rows) > 0, nil +} + // executeAnalyzeTable runs an ANALYZE TABLE command func (v *VRepl) executeAnalyzeTable(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) error { + fastAnalyzeTableSupported, err := v.isFastAnalyzeTableSupported(ctx, conn) + if err != nil { + return err + } + if fastAnalyzeTableSupported { + // This code is only applicable when MySQL supports the 'fast_analyze_table' variable. This variable + // does not exist in vanilla MySQL. + // See https://github.com/planetscale/mysql-server/commit/c8a9d93686358dabfba8f3dc5cc0621e3149fe78 + // as part of https://github.com/planetscale/mysql-server/releases/tag/8.0.34-ps1. + if _, err := conn.ExecuteFetch(sqlEnableFastAnalyzeTable, 1, false); err != nil { + return err + } + log.Infof("@@fast_analyze_table enabled") + defer conn.ExecuteFetch(sqlDisableFastAnalyzeTable, 1, false) + } + parsed := sqlparser.BuildParsedQuery(sqlAnalyzeTable, tableName) - _, err := conn.ExecuteFetch(parsed.Query, 1, false) - return err + if _, err := conn.ExecuteFetch(parsed.Query, 1, false); err != nil { + return err + } + return nil } // readTableStatus reads table status information func (v *VRepl) readTableStatus(ctx context.Context, conn *dbconnpool.DBConnection, tableName string) (tableRows int64, err error) { parsed := sqlparser.BuildParsedQuery(sqlShowTableStatus, tableName) - rs, err := conn.ExecuteFetch(parsed.Query, math.MaxInt64, true) + rs, err := conn.ExecuteFetch(parsed.Query, -1, true) if err != nil { return 0, err } @@ -278,7 +315,7 @@ func (v *VRepl) applyColumnTypes(ctx context.Context, conn *dbconnpool.DBConnect if err != nil { return err } - rs, err := conn.ExecuteFetch(query, math.MaxInt64, true) + rs, err := conn.ExecuteFetch(query, -1, true) if err != nil { return err } @@ -353,7 +390,7 @@ func (v *VRepl) analyzeAlter(ctx context.Context) error { // Happens for REVERT return nil } - if err := v.parser.ParseAlterStatement(v.alterQuery); err != nil { + if err := v.parser.ParseAlterStatement(v.alterQuery, v.env.Parser()); err != nil { return err } if v.parser.IsRenameTable() { @@ -424,7 +461,7 @@ func (v *VRepl) analyzeTables(ctx context.Context, conn *dbconnpool.DBConnection } v.addedUniqueKeys = vrepl.AddedUniqueKeys(sourceUniqueKeys, targetUniqueKeys, v.parser.ColumnRenameMap()) v.removedUniqueKeys = vrepl.RemovedUniqueKeys(sourceUniqueKeys, targetUniqueKeys, v.parser.ColumnRenameMap()) - v.removedForeignKeyNames, err = vrepl.RemovedForeignKeyNames(v.originalShowCreateTable, v.vreplShowCreateTable) + v.removedForeignKeyNames, err = vrepl.RemovedForeignKeyNames(v.env, v.originalShowCreateTable, v.vreplShowCreateTable) if err != nil { return err } @@ -454,11 +491,26 @@ func (v *VRepl) analyzeTables(ctx context.Context, conn *dbconnpool.DBConnection for i := range v.sourceSharedColumns.Columns() { sourceColumn := v.sourceSharedColumns.Columns()[i] mappedColumn := v.targetSharedColumns.Columns()[i] - if sourceColumn.Type == vrepl.EnumColumnType && mappedColumn.Type != vrepl.EnumColumnType && mappedColumn.Charset != "" { - // A column is converted from ENUM type to textual type - v.targetSharedColumns.SetEnumToTextConversion(mappedColumn.Name, sourceColumn.EnumValues) - v.enumToTextMap[sourceColumn.Name] = sourceColumn.EnumValues + if sourceColumn.Type == vrepl.EnumColumnType { + switch { + // Either this is an ENUM column that stays an ENUM, or it is converted to a textual type. + // We take note of the enum values, and make it available in vreplication's Filter.Rule.ConvertEnumToText. + // This, in turn, will be used by vplayer (in TablePlan) like so: + // - In the binary log, enum values are integers. + // - Upon seeing this map, PlanBuilder will convert said int to the enum's logical string value. + // - And will apply the value as a string (`StringBindVariable`) in the query. + // What this allows is for enum values to have different ordering in the before/after table schema, + // so that for example you could modify an enum column: + // - from `('red', 'green', 'blue')` to `('red', 'blue')` + // - from `('red', 'green', 'blue')` to `('blue', 'red', 'green')` + case mappedColumn.Type == vrepl.EnumColumnType: + v.enumToTextMap[sourceColumn.Name] = sourceColumn.EnumValues + case mappedColumn.Charset != "": + v.enumToTextMap[sourceColumn.Name] = sourceColumn.EnumValues + v.targetSharedColumns.SetEnumToTextConversion(mappedColumn.Name, sourceColumn.EnumValues) + } } + if sourceColumn.IsIntegralType() && mappedColumn.Type == vrepl.EnumColumnType { v.intToEnumMap[sourceColumn.Name] = true } @@ -522,11 +574,11 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error { case sourceCol.Type == vrepl.StringColumnType: // Check source and target charset/encoding. If needed, create // a binlogdatapb.CharsetConversion entry (later written to vreplication) - fromCollation := collations.Local().DefaultCollationForCharset(sourceCol.Charset) + fromCollation := v.env.CollationEnv().DefaultCollationForCharset(sourceCol.Charset) if fromCollation == collations.Unknown { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", sourceCol.Charset, sourceCol.Name) } - toCollation := collations.Local().DefaultCollationForCharset(targetCol.Charset) + toCollation := v.env.CollationEnv().DefaultCollationForCharset(targetCol.Charset) // Let's see if target col is at all textual if targetCol.Type == vrepl.StringColumnType && toCollation == collations.Unknown { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", targetCol.Charset, targetCol.Name) @@ -582,6 +634,7 @@ func (v *VRepl) analyzeBinlogSource(ctx context.Context) { SourceUniqueKeyColumns: encodeColumns(&v.chosenSourceUniqueKey.Columns), TargetUniqueKeyColumns: encodeColumns(&v.chosenTargetUniqueKey.Columns), SourceUniqueKeyTargetColumns: encodeColumns(v.chosenSourceUniqueKey.Columns.MappedNamesColumnList(v.sharedColumnsMap)), + ForceUniqueKey: url.QueryEscape(v.chosenSourceUniqueKey.Name), } if len(v.convertCharset) > 0 { rule.ConvertCharset = v.convertCharset @@ -611,7 +664,7 @@ func (v *VRepl) analyze(ctx context.Context, conn *dbconnpool.DBConnection) erro return nil } -// generateInsertStatement generates the INSERT INTO _vt.replication stataement that creates the vreplication workflow +// generateInsertStatement generates the INSERT INTO _vt.replication statement that creates the vreplication workflow func (v *VRepl) generateInsertStatement(ctx context.Context) (string, error) { ig := vreplication.NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, v.dbName) ig.AddRow(v.workflow, v.bls, v.pos, "", "in_order:REPLICA,PRIMARY", @@ -628,16 +681,16 @@ func (v *VRepl) generateStartStatement(ctx context.Context) (string, error) { ) } -func getVreplTable(ctx context.Context, s *VReplStream) (string, error) { +func getVreplTable(s *VReplStream) (string, error) { // sanity checks: if s == nil { - return "", vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "No vreplication stream migration %s", s.workflow) + return "", vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "No vreplication stream migration") } if s.bls.Filter == nil { return "", vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "No binlog source filter for migration %s", s.workflow) } if len(s.bls.Filter.Rules) != 1 { - return "", vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Cannot detect filter rules for migration/vreplication %+v", s.workflow) + return "", vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "Cannot detect filter rules for migration/vreplication %s", s.workflow) } vreplTable := s.bls.Filter.Rules[0].Match return vreplTable, nil diff --git a/go/vt/vttablet/onlineddl/vrepl/columns.go b/go/vt/vttablet/onlineddl/vrepl/columns.go index 2937b1b2b2c..f2bb8f6d3f2 100644 --- a/go/vt/vttablet/onlineddl/vrepl/columns.go +++ b/go/vt/vttablet/onlineddl/vrepl/columns.go @@ -129,7 +129,7 @@ func isExpandedColumn(sourceColumn *Column, targetColumn *Column) (bool, string) return true, "source is unsigned, target is signed" } if sourceColumn.NumericPrecision <= targetColumn.NumericPrecision && !sourceColumn.IsUnsigned && targetColumn.IsUnsigned { - // e.g. INT SIGNED => INT UNSIGNED, INT SIGNED = BIGINT UNSIGNED + // e.g. INT SIGNED => INT UNSIGNED, INT SIGNED => BIGINT UNSIGNED return true, "target unsigned value exceeds source unsigned value" } if targetColumn.IsFloatingPoint() && !sourceColumn.IsFloatingPoint() { diff --git a/go/vt/vttablet/onlineddl/vrepl/columns_test.go b/go/vt/vttablet/onlineddl/vrepl/columns_test.go index b4d3ac9af58..32efd104cc1 100644 --- a/go/vt/vttablet/onlineddl/vrepl/columns_test.go +++ b/go/vt/vttablet/onlineddl/vrepl/columns_test.go @@ -133,3 +133,248 @@ func TestGetSharedColumns(t *testing.T) { }) } } + +func TestGetExpandedColumnNames(t *testing.T) { + var ( + columnsA = &ColumnList{ + columns: []Column{ + { + Name: "c1", + IsNullable: true, + }, + { + Name: "c2", + IsNullable: true, + }, + { + Name: "c3", + IsNullable: false, + }, + }, + Ordinals: ColumnsMap{}, + } + columnsB = &ColumnList{ + columns: []Column{ + { + Name: "c1", + IsNullable: true, + }, + { + Name: "c2", + IsNullable: false, + }, + { + Name: "c3", + IsNullable: true, + }, + }, + Ordinals: ColumnsMap{}, + } + ) + tcases := []struct { + name string + sourceCol Column + targetCol Column + expanded bool + }{ + { + "both nullable", + Column{ + IsNullable: true, + }, + Column{ + IsNullable: true, + }, + false, + }, + { + "nullable to non nullable", + Column{ + IsNullable: true, + }, + Column{ + IsNullable: false, + }, + false, + }, + { + "non nullable to nullable", + Column{ + IsNullable: false, + }, + Column{ + IsNullable: true, + }, + true, + }, + { + "signed to unsigned", + Column{ + Type: IntegerColumnType, + NumericPrecision: 4, + IsUnsigned: false, + }, + Column{ + Type: IntegerColumnType, + NumericPrecision: 4, + IsUnsigned: true, + }, + true, + }, + { + "unsigned to signed", + Column{ + Type: IntegerColumnType, + NumericPrecision: 4, + IsUnsigned: true, + }, + Column{ + Type: IntegerColumnType, + NumericPrecision: 4, + IsUnsigned: false, + }, + true, + }, + { + "signed to smaller unsigned", + Column{ + Type: IntegerColumnType, + NumericPrecision: 8, + IsUnsigned: false, + }, + Column{ + Type: IntegerColumnType, + NumericPrecision: 4, + IsUnsigned: true, + }, + false, + }, + { + "same char length", + Column{ + CharacterMaximumLength: 20, + }, + Column{ + CharacterMaximumLength: 20, + }, + false, + }, + { + "reduced char length", + Column{ + CharacterMaximumLength: 20, + }, + Column{ + CharacterMaximumLength: 19, + }, + false, + }, + { + "increased char length", + Column{ + CharacterMaximumLength: 20, + }, + Column{ + CharacterMaximumLength: 21, + }, + true, + }, + { + "expand temporal", + Column{ + DataType: "time", + }, + Column{ + DataType: "timestamp", + }, + true, + }, + { + "expand temporal", + Column{ + DataType: "date", + }, + Column{ + DataType: "timestamp", + }, + true, + }, + { + "expand temporal", + Column{ + DataType: "date", + }, + Column{ + DataType: "datetime", + }, + true, + }, + { + "non expand temporal", + Column{ + DataType: "datetime", + }, + Column{ + DataType: "timestamp", + }, + false, + }, + { + "expand temporal", + Column{ + DataType: "timestamp", + }, + Column{ + DataType: "datetime", + }, + true, + }, + { + "expand enum", + Column{ + Type: EnumColumnType, + EnumValues: "'a','b'", + }, + Column{ + Type: EnumColumnType, + EnumValues: "'a','x'", + }, + true, + }, + { + "expand enum", + Column{ + Type: EnumColumnType, + EnumValues: "'a','b'", + }, + Column{ + Type: EnumColumnType, + EnumValues: "'a','b','c'", + }, + true, + }, + { + "reduce enum", + Column{ + Type: EnumColumnType, + EnumValues: "'a','b','c'", + }, + Column{ + Type: EnumColumnType, + EnumValues: "'a','b'", + }, + false, + }, + } + + expectedExpandedColumnNames := []string{"c3"} + expandedColumnNames, _ := GetExpandedColumnNames(columnsA, columnsB) + assert.Equal(t, expectedExpandedColumnNames, expandedColumnNames) + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + expanded, _ := isExpandedColumn(&tcase.sourceCol, &tcase.targetCol) + assert.Equal(t, tcase.expanded, expanded) + }) + } +} diff --git a/go/vt/vttablet/onlineddl/vrepl/foreign_key.go b/go/vt/vttablet/onlineddl/vrepl/foreign_key.go index f0925594ec0..79e2df614f4 100644 --- a/go/vt/vttablet/onlineddl/vrepl/foreign_key.go +++ b/go/vt/vttablet/onlineddl/vrepl/foreign_key.go @@ -23,18 +23,23 @@ package vrepl import ( "vitess.io/vitess/go/vt/schemadiff" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" ) // RemovedForeignKeyNames returns the names of removed foreign keys, ignoring mere name changes func RemovedForeignKeyNames( + venv *vtenv.Environment, originalCreateTable string, vreplCreateTable string, ) (names []string, err error) { if originalCreateTable == "" || vreplCreateTable == "" { return nil, nil } - diffHints := schemadiff.DiffHints{ConstraintNamesStrategy: schemadiff.ConstraintNamesIgnoreAll} - diff, err := schemadiff.DiffCreateTablesQueries(originalCreateTable, vreplCreateTable, &diffHints) + env := schemadiff.NewEnv(venv, venv.CollationEnv().DefaultConnectionCharset()) + diffHints := schemadiff.DiffHints{ + ConstraintNamesStrategy: schemadiff.ConstraintNamesIgnoreAll, + } + diff, err := schemadiff.DiffCreateTablesQueries(env, originalCreateTable, vreplCreateTable, &diffHints) if err != nil { return nil, err } diff --git a/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go b/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go index 619ba4847d9..95b2c84e66e 100644 --- a/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go +++ b/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go @@ -24,6 +24,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/vtenv" ) func TestRemovedForeignKeyNames(t *testing.T) { @@ -66,7 +68,7 @@ func TestRemovedForeignKeyNames(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.before, func(t *testing.T) { - names, err := RemovedForeignKeyNames(tcase.before, tcase.after) + names, err := RemovedForeignKeyNames(vtenv.NewTestEnv(), tcase.before, tcase.after) assert.NoError(t, err) assert.Equal(t, tcase.names, names) }) diff --git a/go/vt/vttablet/onlineddl/vrepl/parser.go b/go/vt/vttablet/onlineddl/vrepl/parser.go index 87f82cb8096..b5648adeabe 100644 --- a/go/vt/vttablet/onlineddl/vrepl/parser.go +++ b/go/vt/vttablet/onlineddl/vrepl/parser.go @@ -78,8 +78,8 @@ func (p *AlterTableParser) analyzeAlter(alterTable *sqlparser.AlterTable) { } // ParseAlterStatement is the main function of th eparser, and parses an ALTER TABLE statement -func (p *AlterTableParser) ParseAlterStatement(alterQuery string) (err error) { - stmt, err := sqlparser.ParseStrictDDL(alterQuery) +func (p *AlterTableParser) ParseAlterStatement(alterQuery string, parser *sqlparser.Parser) (err error) { + stmt, err := parser.ParseStrictDDL(alterQuery) if err != nil { return err } @@ -112,7 +112,7 @@ func (p *AlterTableParser) DroppedColumnsMap() map[string]bool { return p.droppedColumns } -// IsRenameTable returns true when the ALTER TABLE statement inclusdes renaming the table +// IsRenameTable returns true when the ALTER TABLE statement includes renaming the table func (p *AlterTableParser) IsRenameTable() bool { return p.isRenameTable } diff --git a/go/vt/vttablet/onlineddl/vrepl/parser_test.go b/go/vt/vttablet/onlineddl/vrepl/parser_test.go index f849b1d741d..2a7031f3a98 100644 --- a/go/vt/vttablet/onlineddl/vrepl/parser_test.go +++ b/go/vt/vttablet/onlineddl/vrepl/parser_test.go @@ -24,12 +24,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" ) func TestParseAlterStatement(t *testing.T) { statement := "alter table t add column t int, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -38,7 +40,7 @@ func TestParseAlterStatement(t *testing.T) { func TestParseAlterStatementTrivialRename(t *testing.T) { statement := "alter table t add column t int, change ts ts timestamp, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -66,7 +68,7 @@ func TestParseAlterStatementWithAutoIncrement(t *testing.T) { for _, statement := range statements { parser := NewAlterTableParser() statement := "alter table t " + statement - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.True(t, parser.IsAutoIncrementDefined()) } @@ -75,7 +77,7 @@ func TestParseAlterStatementWithAutoIncrement(t *testing.T) { func TestParseAlterStatementTrivialRenames(t *testing.T) { statement := "alter table t add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -98,7 +100,7 @@ func TestParseAlterStatementNonTrivial(t *testing.T) { for _, statement := range statements { statement := "alter table t " + statement parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.IsAutoIncrementDefined()) renames := parser.GetNonTrivialRenames() @@ -113,7 +115,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 1) assert.True(t, parser.droppedColumns["b"]) @@ -121,7 +123,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop key c_idx, drop column `d`" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 2) assert.True(t, parser.droppedColumns["b"]) @@ -130,7 +132,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 3) assert.True(t, parser.droppedColumns["b"]) @@ -140,7 +142,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop bad statement, add column i int" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.Error(t, err) } } @@ -177,7 +179,7 @@ func TestParseAlterStatementRenameTable(t *testing.T) { for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { parser := NewAlterTableParser() - err := parser.ParseAlterStatement(tc.alter) + err := parser.ParseAlterStatement(tc.alter, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tc.isRename, parser.isRenameTable) }) diff --git a/go/vt/vttablet/onlineddl/vrepl/types.go b/go/vt/vttablet/onlineddl/vrepl/types.go index e4ddff6d58e..0ca834ffdf0 100644 --- a/go/vt/vttablet/onlineddl/vrepl/types.go +++ b/go/vt/vttablet/onlineddl/vrepl/types.go @@ -207,7 +207,7 @@ func (l *ColumnList) Equals(other *ColumnList) bool { return reflect.DeepEqual(l.Columns, other.Columns) } -// EqualsByNames chcks if the names in this list equals the names of another list, in order. Type is ignored. +// EqualsByNames checks if the names in this list equals the names of another list, in order. Type is ignored. func (l *ColumnList) EqualsByNames(other *ColumnList) bool { return reflect.DeepEqual(l.Names(), other.Names()) } @@ -252,7 +252,7 @@ func (l *ColumnList) MappedNamesColumnList(columnNamesMap map[string]string) *Co return NewColumnList(names) } -// SetEnumToTextConversion tells this column list that an enum is conveted to text +// SetEnumToTextConversion tells this column list that an enum is converted to text func (l *ColumnList) SetEnumToTextConversion(columnName string, enumValues string) { l.GetColumn(columnName).EnumToTextConversion = true l.GetColumn(columnName).EnumValues = enumValues diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index 0c8485f97e5..618a87b1d81 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -125,9 +125,21 @@ type SandboxConn struct { // this error will only happen once EphemeralShardErr error + // if this is not nil, any calls will panic the tablet + panicThis interface{} + NotServing bool - getSchemaResult []map[string]string + getSchemaResult []SchemaResult + + parser *sqlparser.Parser + + streamHealthResponse *querypb.StreamHealthResponse +} + +type SchemaResult struct { + TablesAndViews map[string]string + UDFs []*querypb.UDFInfo } var _ queryservice.QueryService = (*SandboxConn)(nil) // compile-time interface check @@ -139,6 +151,7 @@ func NewSandboxConn(t *topodatapb.Tablet) *SandboxConn { MustFailCodes: make(map[vtrpcpb.Code]int), MustFailExecute: make(map[sqlparser.StatementType]int), txIDToRID: make(map[int64]int64), + parser: sqlparser.NewTestParser(), } } @@ -197,12 +210,13 @@ func (sbc *SandboxConn) SetResults(r []*sqltypes.Result) { } // SetSchemaResult sets what GetSchema should return on each call. -func (sbc *SandboxConn) SetSchemaResult(r []map[string]string) { +func (sbc *SandboxConn) SetSchemaResult(r []SchemaResult) { sbc.getSchemaResult = r } // Execute is part of the QueryService interface. func (sbc *SandboxConn) Execute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, transactionID, reservedID int64, options *querypb.ExecuteOptions) (*sqltypes.Result, error) { + sbc.panicIfNeeded() sbc.execMu.Lock() defer sbc.execMu.Unlock() sbc.ExecCount.Add(1) @@ -225,7 +239,7 @@ func (sbc *SandboxConn) Execute(ctx context.Context, target *querypb.Target, que return nil, err } - stmt, _ := sqlparser.Parse(query) // knowingly ignoring the error + stmt, _ := sbc.parser.Parse(query) // knowingly ignoring the error if sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] > 0 { sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] = sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] - 1 return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "failed query: %v", query) @@ -235,6 +249,7 @@ func (sbc *SandboxConn) Execute(ctx context.Context, target *querypb.Target, que // StreamExecute is part of the QueryService interface. func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Target, query string, bindVars map[string]*querypb.BindVariable, transactionID int64, reservedID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { + sbc.panicIfNeeded() sbc.sExecMu.Lock() sbc.ExecCount.Add(1) bv := make(map[string]*querypb.BindVariable) @@ -251,7 +266,7 @@ func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Targe sbc.sExecMu.Unlock() return err } - parse, _ := sqlparser.Parse(query) + parse, _ := sbc.parser.Parse(query) if sbc.results == nil { nextRs := sbc.getNextResult(parse) @@ -275,6 +290,7 @@ func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Targe // Begin is part of the QueryService interface. func (sbc *SandboxConn) Begin(ctx context.Context, target *querypb.Target, options *querypb.ExecuteOptions) (queryservice.TransactionState, error) { + sbc.panicIfNeeded() return sbc.begin(ctx, target, nil, 0, options) } @@ -300,6 +316,7 @@ func (sbc *SandboxConn) begin(ctx context.Context, target *querypb.Target, preQu // Commit is part of the QueryService interface. func (sbc *SandboxConn) Commit(ctx context.Context, target *querypb.Target, transactionID int64) (int64, error) { + sbc.panicIfNeeded() sbc.CommitCount.Add(1) reservedID := sbc.getTxReservedID(transactionID) if reservedID != 0 { @@ -320,6 +337,7 @@ func (sbc *SandboxConn) Rollback(ctx context.Context, target *querypb.Target, tr // Prepare prepares the specified transaction. func (sbc *SandboxConn) Prepare(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { + sbc.panicIfNeeded() sbc.PrepareCount.Add(1) if sbc.MustFailPrepare > 0 { sbc.MustFailPrepare-- @@ -330,6 +348,7 @@ func (sbc *SandboxConn) Prepare(ctx context.Context, target *querypb.Target, tra // CommitPrepared commits the prepared transaction. func (sbc *SandboxConn) CommitPrepared(ctx context.Context, target *querypb.Target, dtid string) (err error) { + sbc.panicIfNeeded() sbc.CommitPreparedCount.Add(1) if sbc.MustFailCommitPrepared > 0 { sbc.MustFailCommitPrepared-- @@ -340,6 +359,7 @@ func (sbc *SandboxConn) CommitPrepared(ctx context.Context, target *querypb.Targ // RollbackPrepared rolls back the prepared transaction. func (sbc *SandboxConn) RollbackPrepared(ctx context.Context, target *querypb.Target, dtid string, originalID int64) (err error) { + sbc.panicIfNeeded() sbc.RollbackPreparedCount.Add(1) if sbc.MustFailRollbackPrepared > 0 { sbc.MustFailRollbackPrepared-- @@ -361,6 +381,7 @@ func (sbc *SandboxConn) CreateTransaction(ctx context.Context, target *querypb.T // StartCommit atomically commits the transaction along with the // decision to commit the associated 2pc transaction. func (sbc *SandboxConn) StartCommit(ctx context.Context, target *querypb.Target, transactionID int64, dtid string) (err error) { + sbc.panicIfNeeded() sbc.StartCommitCount.Add(1) if sbc.MustFailStartCommit > 0 { sbc.MustFailStartCommit-- @@ -372,6 +393,7 @@ func (sbc *SandboxConn) StartCommit(ctx context.Context, target *querypb.Target, // SetRollback transitions the 2pc transaction to the Rollback state. // If a transaction id is provided, that transaction is also rolled back. func (sbc *SandboxConn) SetRollback(ctx context.Context, target *querypb.Target, dtid string, transactionID int64) (err error) { + sbc.panicIfNeeded() sbc.SetRollbackCount.Add(1) if sbc.MustFailSetRollback > 0 { sbc.MustFailSetRollback-- @@ -391,7 +413,7 @@ func (sbc *SandboxConn) ConcludeTransaction(ctx context.Context, target *querypb return sbc.getError() } -// ReadTransaction returns the metadata for the sepcified dtid. +// ReadTransaction returns the metadata for the specified dtid. func (sbc *SandboxConn) ReadTransaction(ctx context.Context, target *querypb.Target, dtid string) (metadata *querypb.TransactionMetadata, err error) { sbc.ReadTransactionCount.Add(1) if err := sbc.getError(); err != nil { @@ -407,6 +429,7 @@ func (sbc *SandboxConn) ReadTransaction(ctx context.Context, target *querypb.Tar // BeginExecute is part of the QueryService interface. func (sbc *SandboxConn) BeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, query string, bindVars map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions) (queryservice.TransactionState, *sqltypes.Result, error) { + sbc.panicIfNeeded() state, err := sbc.begin(ctx, target, preQueries, reservedID, options) if state.TransactionID != 0 { sbc.setTxReservedID(state.TransactionID, reservedID) @@ -420,6 +443,7 @@ func (sbc *SandboxConn) BeginExecute(ctx context.Context, target *querypb.Target // BeginStreamExecute is part of the QueryService interface. func (sbc *SandboxConn) BeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, reservedID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.TransactionState, error) { + sbc.panicIfNeeded() state, err := sbc.begin(ctx, target, preQueries, reservedID, options) if state.TransactionID != 0 { sbc.setTxReservedID(state.TransactionID, reservedID) @@ -453,8 +477,21 @@ func (sbc *SandboxConn) MessageAck(ctx context.Context, target *querypb.Target, // SandboxSQRowCount is the default number of fake splits returned. var SandboxSQRowCount = int64(10) -// StreamHealth always mocks a "healthy" result. +// SetStreamHealthResponse sets the StreamHealthResponse to be returned in StreamHealth. +func (sbc *SandboxConn) SetStreamHealthResponse(res *querypb.StreamHealthResponse) { + sbc.mapMu.Lock() + defer sbc.mapMu.Unlock() + sbc.streamHealthResponse = res +} + +// StreamHealth always mocks a "healthy" result by default. If you want to override this behavior you +// can call SetStreamHealthResponse. func (sbc *SandboxConn) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { + sbc.mapMu.Lock() + defer sbc.mapMu.Unlock() + if sbc.streamHealthResponse != nil { + return callback(sbc.streamHealthResponse) + } return nil } @@ -549,7 +586,7 @@ func (sbc *SandboxConn) VStreamResults(ctx context.Context, target *querypb.Targ } // QueryServiceByAlias is part of the Gateway interface. -func (sbc *SandboxConn) QueryServiceByAlias(_ *topodatapb.TabletAlias, _ *querypb.Target) (queryservice.QueryService, error) { +func (sbc *SandboxConn) QueryServiceByAlias(_ context.Context, _ *topodatapb.TabletAlias, _ *querypb.Target) (queryservice.QueryService, error) { return sbc, nil } @@ -564,6 +601,7 @@ func (sbc *SandboxConn) HandlePanic(err *error) { // ReserveBeginExecute implements the QueryService interface func (sbc *SandboxConn) ReserveBeginExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions) (queryservice.ReservedTransactionState, *sqltypes.Result, error) { + sbc.panicIfNeeded() reservedID := sbc.reserve(ctx, target, preQueries, bindVariables, 0, options) state, result, err := sbc.BeginExecute(ctx, target, postBeginQueries, sql, bindVariables, reservedID, options) if state.TransactionID != 0 { @@ -578,6 +616,7 @@ func (sbc *SandboxConn) ReserveBeginExecute(ctx context.Context, target *querypb // ReserveBeginStreamExecute is part of the QueryService interface. func (sbc *SandboxConn) ReserveBeginStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, postBeginQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.ReservedTransactionState, error) { + sbc.panicIfNeeded() reservedID := sbc.reserve(ctx, target, preQueries, bindVariables, 0, options) state, err := sbc.BeginStreamExecute(ctx, target, postBeginQueries, sql, bindVariables, reservedID, options, callback) if state.TransactionID != 0 { @@ -592,6 +631,7 @@ func (sbc *SandboxConn) ReserveBeginStreamExecute(ctx context.Context, target *q // ReserveExecute implements the QueryService interface func (sbc *SandboxConn) ReserveExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions) (queryservice.ReservedState, *sqltypes.Result, error) { + sbc.panicIfNeeded() reservedID := sbc.reserve(ctx, target, preQueries, bindVariables, transactionID, options) result, err := sbc.Execute(ctx, target, sql, bindVariables, transactionID, reservedID, options) if transactionID != 0 { @@ -605,6 +645,7 @@ func (sbc *SandboxConn) ReserveExecute(ctx context.Context, target *querypb.Targ // ReserveStreamExecute is part of the QueryService interface. func (sbc *SandboxConn) ReserveStreamExecute(ctx context.Context, target *querypb.Target, preQueries []string, sql string, bindVariables map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) (queryservice.ReservedState, error) { + sbc.panicIfNeeded() reservedID := sbc.reserve(ctx, target, preQueries, bindVariables, transactionID, options) err := sbc.StreamExecute(ctx, target, sql, bindVariables, transactionID, reservedID, options, callback) if transactionID != 0 { @@ -641,7 +682,12 @@ func (sbc *SandboxConn) GetSchema(ctx context.Context, target *querypb.Target, t } resp := sbc.getSchemaResult[0] sbc.getSchemaResult = sbc.getSchemaResult[1:] - return callback(&querypb.GetSchemaResponse{TableDefinition: resp}) + + response := &querypb.GetSchemaResponse{ + TableDefinition: resp.TablesAndViews, + Udfs: resp.UDFs, + } + return callback(response) } // Close does not change ExecCount @@ -766,3 +812,13 @@ var StreamRowResult = &sqltypes.Result{ sqltypes.NewVarChar("foo"), }}, } + +func (sbc *SandboxConn) SetPanic(i interface{}) { + sbc.panicThis = i +} + +func (sbc *SandboxConn) panicIfNeeded() { + if sbc.panicThis != nil { + panic(sbc.panicThis) + } +} diff --git a/go/vt/vttablet/sysloglogger/sysloglogger.go b/go/vt/vttablet/sysloglogger/sysloglogger.go index e56d47bd902..37672911e23 100644 --- a/go/vt/vttablet/sysloglogger/sysloglogger.go +++ b/go/vt/vttablet/sysloglogger/sysloglogger.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. @@ -18,8 +20,8 @@ limitations under the License. package sysloglogger import ( - "bytes" "log/syslog" + "strings" "github.com/spf13/pflag" @@ -76,8 +78,10 @@ func run() { } formatParams := map[string][]string{"full": {}} + + var b strings.Builder for stats := range ch { - var b bytes.Buffer + b.Reset() if err := stats.Logf(&b, formatParams); err != nil { log.Errorf("Error formatting logStats: %v", err) continue diff --git a/go/vt/vttablet/sysloglogger/sysloglogger_test.go b/go/vt/vttablet/sysloglogger/sysloglogger_test.go index c62a4396ac6..3a06b98ed1c 100644 --- a/go/vt/vttablet/sysloglogger/sysloglogger_test.go +++ b/go/vt/vttablet/sysloglogger/sysloglogger_test.go @@ -1,3 +1,5 @@ +//go:build !windows + /* Copyright 2019 The Vitess Authors. @@ -85,7 +87,7 @@ func (fw *failingFakeWriter) Close() error { return nil } // expectedLogStatsText returns the results expected from the plugin processing a dummy message generated by mockLogStats(...). func expectedLogStatsText(originalSQL string) string { return fmt.Sprintf("Execute\t\t\t''\t''\t0001-01-01 00:00:00.000000\t0001-01-01 00:00:00.000000\t0.000000\tPASS_SELECT\t"+ - "\"%s\"\t%s\t1\t\"%s\"\tmysql\t0.000000\t0.000000\t0\t0\t0\t\"\"", originalSQL, "map[]", originalSQL) + "\"%s\"\t%s\t1\t\"%s\"\tmysql\t0.000000\t0.000000\t0\t0\t0\t\"\"", originalSQL, "{}", originalSQL) } // expectedRedactedLogStatsText returns the results expected from the plugin processing a dummy message generated by mockLogStats(...) diff --git a/go/vt/vttablet/tabletconn/tablet_conn.go b/go/vt/vttablet/tabletconn/tablet_conn.go index 0c91fdd55bc..1ed806bcc53 100644 --- a/go/vt/vttablet/tabletconn/tablet_conn.go +++ b/go/vt/vttablet/tabletconn/tablet_conn.go @@ -17,6 +17,7 @@ limitations under the License. package tabletconn import ( + "context" "sync" "github.com/spf13/pflag" @@ -65,7 +66,7 @@ func init() { // timeout represents the connection timeout. If set to 0, this // connection should be established in the background and the // TabletDialer should return right away. -type TabletDialer func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) +type TabletDialer func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) var dialers = make(map[string]TabletDialer) diff --git a/go/vt/vttablet/tabletconntest/fakequeryservice.go b/go/vt/vttablet/tabletconntest/fakequeryservice.go index cfe540ead42..2efd7d330ed 100644 --- a/go/vt/vttablet/tabletconntest/fakequeryservice.go +++ b/go/vt/vttablet/tabletconntest/fakequeryservice.go @@ -173,7 +173,7 @@ func (f *FakeQueryService) Commit(ctx context.Context, target *querypb.Target, t return 0, nil } -// rollbackTransactionID is a test transactin id for Rollback. +// rollbackTransactionID is a test transaction id for Rollback. const rollbackTransactionID int64 = 999044 // Rollback is part of the queryservice.QueryService interface @@ -709,7 +709,7 @@ func (f *FakeQueryService) VStreamResults(ctx context.Context, target *querypb.T } // QueryServiceByAlias satisfies the Gateway interface -func (f *FakeQueryService) QueryServiceByAlias(_ *topodatapb.TabletAlias, _ *querypb.Target) (queryservice.QueryService, error) { +func (f *FakeQueryService) QueryServiceByAlias(_ context.Context, _ *topodatapb.TabletAlias, _ *querypb.Target) (queryservice.QueryService, error) { panic("not implemented") } diff --git a/go/vt/vttablet/tabletconntest/tabletconntest.go b/go/vt/vttablet/tabletconntest/tabletconntest.go index b279ac53726..f8dafb0636e 100644 --- a/go/vt/vttablet/tabletconntest/tabletconntest.go +++ b/go/vt/vttablet/tabletconntest/tabletconntest.go @@ -922,7 +922,7 @@ func testStreamHealthPanics(t *testing.T, conn queryservice.QueryService, f *Fak // TestSuite runs all the tests. // If fake.TestingGateway is set, we only test the calls that can go through // a gateway. -func TestSuite(t *testing.T, protocol string, tablet *topodatapb.Tablet, fake *FakeQueryService, clientCreds *os.File) { +func TestSuite(ctx context.Context, t *testing.T, protocol string, tablet *topodatapb.Tablet, fake *FakeQueryService, clientCreds *os.File) { tests := []func(*testing.T, queryservice.QueryService, *FakeQueryService){ // positive test cases testBegin, @@ -1015,7 +1015,7 @@ func TestSuite(t *testing.T, protocol string, tablet *topodatapb.Tablet, fake *F require.NoError(t, err, "failed to set `--grpc_auth_static_client_creds=%s`", clientCreds.Name()) } - conn, err := tabletconn.GetDialer()(tablet, grpcclient.FailFast(false)) + conn, err := tabletconn.GetDialer()(ctx, tablet, grpcclient.FailFast(false)) if err != nil { t.Fatalf("dial failed: %v", err) } diff --git a/go/vt/vttablet/tabletmanager/framework_test.go b/go/vt/vttablet/tabletmanager/framework_test.go index 4734ab9ee96..27a3a562cd3 100644 --- a/go/vt/vttablet/tabletmanager/framework_test.go +++ b/go/vt/vttablet/tabletmanager/framework_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "regexp" + "strconv" "strings" "sync" "testing" @@ -33,6 +34,7 @@ import ( "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -54,7 +56,7 @@ const ( ) func init() { - tabletconn.RegisterDialer("grpc", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tabletconn.RegisterDialer("grpc", func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { return &tabletconntest.FakeQueryService{ StreamHealthResponse: &querypb.StreamHealthResponse{ Serving: true, @@ -96,7 +98,7 @@ func newTestEnv(t *testing.T, ctx context.Context, sourceKeyspace string, source tenv.tmc.sourceShards = sourceShards tenv.tmc.schema = defaultSchema - tabletconn.RegisterDialer(t.Name(), func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tabletconn.RegisterDialer(t.Name(), func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { tenv.mu.Lock() defer tenv.mu.Unlock() if qs, ok := tenv.tmc.tablets[int(tablet.Alias.Uid)]; ok { @@ -376,18 +378,70 @@ type fakeTMClient struct { sourceShards []string tablets map[int]*fakeTabletConn schema *tabletmanagerdatapb.SchemaDefinition + tabletSchemas map[int]*tabletmanagerdatapb.SchemaDefinition vreQueries map[int]map[string]*querypb.QueryResult + + mu sync.Mutex + // Keep track of how many times GetSchema is called per tablet. + getSchemaCounts map[string]int + // Used to confirm the number of times WorkflowDelete was called. + workflowDeleteCalls int } func newFakeTMClient() *fakeTMClient { return &fakeTMClient{ - tablets: make(map[int]*fakeTabletConn), - vreQueries: make(map[int]map[string]*querypb.QueryResult), - schema: &tabletmanagerdatapb.SchemaDefinition{}, + tablets: make(map[int]*fakeTabletConn), + vreQueries: make(map[int]map[string]*querypb.QueryResult), + schema: &tabletmanagerdatapb.SchemaDefinition{}, + tabletSchemas: make(map[int]*tabletmanagerdatapb.SchemaDefinition), // If we need to override the global schema for a tablet + getSchemaCounts: make(map[string]int), + } +} + +// Note: ONLY breaks up change.SQL into individual statements and executes it. Does NOT fully implement ApplySchema. +func (tmc *fakeTMClient) ApplySchema(ctx context.Context, tablet *topodatapb.Tablet, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) { + stmts := strings.Split(change.SQL, ";") + + for _, stmt := range stmts { + _, err := tmc.ExecuteFetchAsDba(ctx, tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(stmt), + MaxRows: 0, + ReloadSchema: true, + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + +func (tmc *fakeTMClient) schemaRequested(uid int) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + key := strconv.Itoa(int(uid)) + n, ok := tmc.getSchemaCounts[key] + if !ok { + tmc.getSchemaCounts[key] = 1 + } else { + tmc.getSchemaCounts[key] = n + 1 } } +func (tmc *fakeTMClient) getSchemaRequestCount(uid int) int { + tmc.mu.Lock() + defer tmc.mu.Unlock() + key := strconv.Itoa(int(uid)) + return tmc.getSchemaCounts[key] +} + func (tmc *fakeTMClient) GetSchema(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) { + tmc.schemaRequested(int(tablet.Alias.Uid)) + // Return the schema for the tablet if it exists. + if schema, ok := tmc.tabletSchemas[int(tablet.Alias.Uid)]; ok { + return schema, nil + } + // Otherwise use the global one. return tmc.schema, nil } @@ -432,41 +486,6 @@ func (tmc *fakeTMClient) VReplicationExec(ctx context.Context, tablet *topodatap return nil, fmt.Errorf("query %q not found for tablet %d", query, tablet.Alias.Uid) } -func (tmc *fakeTMClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { - return tmc.tablets[int(tablet.Alias.Uid)].tm.CreateVReplicationWorkflow(ctx, req) -} - -func (tmc *fakeTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { - resp := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{ - Workflow: req.Workflow, - WorkflowSubType: binlogdatapb.VReplicationWorkflowSubType_None, - WorkflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, - TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_PRIMARY}, - Streams: make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, len(tmc.sourceShards)), - } - rules := make([]*binlogdatapb.Rule, len(defaultSchema.TableDefinitions)) - for i, table := range defaultSchema.TableDefinitions { - rules[i] = &binlogdatapb.Rule{ - Match: table.Name, - Filter: tablet.Shard, - } - } - for i, shard := range tmc.sourceShards { - resp.Streams[i] = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{ - Id: int32(i + 1), - Bls: &binlogdatapb.BinlogSource{ - Keyspace: tmc.sourceKeyspace, - Shard: shard, - Filter: &binlogdatapb.Filter{ - Rules: rules, - }, - }, - } - } - - return resp, nil -} - func (tmc *fakeTMClient) PrimaryPosition(ctx context.Context, tablet *topodatapb.Tablet) (string, error) { return fmt.Sprintf("%s/%s", gtidFlavor, gtidPosition), nil } @@ -490,3 +509,34 @@ func (tmc *fakeTMClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, r }, }, nil } + +func (tmc *fakeTMClient) CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { + return tmc.tablets[int(tablet.Alias.Uid)].tm.CreateVReplicationWorkflow(ctx, req) +} + +func (tmc *fakeTMClient) DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (response *tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, err error) { + tmc.mu.Lock() + defer tmc.mu.Unlock() + tmc.workflowDeleteCalls++ + return &tabletmanagerdatapb.DeleteVReplicationWorkflowResponse{ + Result: &querypb.QueryResult{ + RowsAffected: 1, + }, + }, nil +} + +func (tmc *fakeTMClient) HasVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) { + return tmc.tablets[int(tablet.Alias.Uid)].tm.HasVReplicationWorkflows(ctx, req) +} + +func (tmc *fakeTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { + return tmc.tablets[int(tablet.Alias.Uid)].tm.ReadVReplicationWorkflow(ctx, req) +} + +func (tmc *fakeTMClient) ReadVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { + return tmc.tablets[int(tablet.Alias.Uid)].tm.ReadVReplicationWorkflows(ctx, req) +} + +func (tmc *fakeTMClient) UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) { + return tmc.tablets[int(tablet.Alias.Uid)].tm.UpdateVReplicationWorkflow(ctx, req) +} diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index 335302902be..236d048340b 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -131,7 +131,8 @@ func (tm *TabletManager) RestoreData( deleteBeforeRestore bool, backupTime time.Time, restoreToTimetamp time.Time, - restoreToPos string) error { + restoreToPos string, + mysqlShutdownTimeout time.Duration) error { if err := tm.lock(ctx); err != nil { return err } @@ -180,14 +181,14 @@ func (tm *TabletManager) RestoreData( RestoreToPos: restoreToPos, RestoreToTimestamp: protoutil.TimeToProto(restoreToTimetamp), } - err = tm.restoreDataLocked(ctx, logger, waitForBackupInterval, deleteBeforeRestore, req) + err = tm.restoreDataLocked(ctx, logger, waitForBackupInterval, deleteBeforeRestore, req, mysqlShutdownTimeout) if err != nil { return err } return nil } -func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.Logger, waitForBackupInterval time.Duration, deleteBeforeRestore bool, request *tabletmanagerdatapb.RestoreFromBackupRequest) error { +func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.Logger, waitForBackupInterval time.Duration, deleteBeforeRestore bool, request *tabletmanagerdatapb.RestoreFromBackupRequest, mysqlShutdownTimeout time.Duration) error { tablet := tm.Tablet() originalType := tablet.Type @@ -217,25 +218,26 @@ func (tm *TabletManager) restoreDataLocked(ctx context.Context, logger logutil.L } params := mysqlctl.RestoreParams{ - Cnf: tm.Cnf, - Mysqld: tm.MysqlDaemon, - Logger: logger, - Concurrency: restoreConcurrency, - HookExtraEnv: tm.hookExtraEnv(), - DeleteBeforeRestore: deleteBeforeRestore, - DbName: topoproto.TabletDbName(tablet), - Keyspace: keyspace, - Shard: tablet.Shard, - StartTime: startTime, - DryRun: request.DryRun, - Stats: backupstats.RestoreStats(), + Cnf: tm.Cnf, + Mysqld: tm.MysqlDaemon, + Logger: logger, + Concurrency: restoreConcurrency, + HookExtraEnv: tm.hookExtraEnv(), + DeleteBeforeRestore: deleteBeforeRestore, + DbName: topoproto.TabletDbName(tablet), + Keyspace: keyspace, + Shard: tablet.Shard, + StartTime: startTime, + DryRun: request.DryRun, + Stats: backupstats.RestoreStats(), + MysqlShutdownTimeout: mysqlShutdownTimeout, } restoreToTimestamp := protoutil.TimeFromProto(request.RestoreToTimestamp).UTC() if request.RestoreToPos != "" && !restoreToTimestamp.IsZero() { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "--restore-to-pos and --restore-to-timestamp are mutually exclusive") } if request.RestoreToPos != "" { - pos, err := replication.DecodePosition(request.RestoreToPos) + pos, _, err := replication.DecodePositionMySQL56(request.RestoreToPos) if err != nil { return vterrors.Wrapf(err, "restore failed: unable to decode --restore-to-pos: %s", request.RestoreToPos) } @@ -424,7 +426,7 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos replicati Port: connParams.Port, } dbCfgs.SetDbParams(*connParams, *connParams, *connParams) - vsClient := vreplication.NewReplicaConnector(connParams) + vsClient := vreplication.NewReplicaConnector(tm.Env, connParams) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -475,7 +477,7 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos replicati gtidsChan <- []string{"", ""} } }() - defer vsClient.Close(ctx) + defer vsClient.Close() select { case val := <-gtidsChan: return val[0], val[1], nil @@ -491,13 +493,13 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos replicati // waits till all events to GTID replicated // once done, it will reset the replication func (tm *TabletManager) catchupToGTID(ctx context.Context, afterGTIDPos string, beforeGTIDPos string) error { - var afterGTIDStr string + var afterGTID replication.Position if afterGTIDPos != "" { - afterGTIDParsed, err := replication.DecodePosition(afterGTIDPos) + var err error + afterGTID, err = replication.DecodePosition(afterGTIDPos) if err != nil { return err } - afterGTIDStr = afterGTIDParsed.GTIDSet.Last() } beforeGTIDPosParsed, err := replication.DecodePosition(beforeGTIDPos) @@ -505,48 +507,18 @@ func (tm *TabletManager) catchupToGTID(ctx context.Context, afterGTIDPos string, return err } - // it uses mysql specific queries here - cmds := []string{ - "STOP SLAVE FOR CHANNEL '' ", - "STOP SLAVE IO_THREAD FOR CHANNEL ''", - } - - if binlogSslCa != "" || binlogSslCert != "" { - // We need to use TLS - cmd := fmt.Sprintf("CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='%s', MASTER_PASSWORD='%s', MASTER_AUTO_POSITION=1, MASTER_SSL=1", binlogHost, binlogPort, binlogUser, binlogPwd) - if binlogSslCa != "" { - cmd += fmt.Sprintf(", MASTER_SSL_CA='%s'", binlogSslCa) - } - if binlogSslCert != "" { - cmd += fmt.Sprintf(", MASTER_SSL_CERT='%s'", binlogSslCert) - } - if binlogSslKey != "" { - cmd += fmt.Sprintf(", MASTER_SSL_KEY='%s'", binlogSslKey) - } - cmds = append(cmds, cmd+";") - } else { - // No TLS - cmds = append(cmds, fmt.Sprintf("CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER='%s', MASTER_PASSWORD='%s', MASTER_AUTO_POSITION=1;", binlogHost, binlogPort, binlogUser, binlogPwd)) - } - - if afterGTIDPos == "" { // when the there is no afterPos, that means need to replicate completely - cmds = append(cmds, "START SLAVE") - } else { - cmds = append(cmds, fmt.Sprintf("START SLAVE UNTIL SQL_BEFORE_GTIDS = '%s'", afterGTIDStr)) - } - - if err := tm.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds); err != nil { - return vterrors.Wrap(err, fmt.Sprintf("failed to restart the replication until %s GTID", afterGTIDStr)) + if err := tm.MysqlDaemon.CatchupToGTID(ctx, afterGTID); err != nil { + return vterrors.Wrap(err, fmt.Sprintf("failed to restart the replication until %s GTID", afterGTID.GTIDSet.Last())) } log.Infof("Waiting for position to reach", beforeGTIDPosParsed.GTIDSet.Last()) - // Could not use `agent.MysqlDaemon.WaitSourcePos` as replication is stopped with `START SLAVE UNTIL SQL_BEFORE_GTIDS` - // this is as per https://dev.mysql.com/doc/refman/5.6/en/start-slave.html + // Could not use `agent.MysqlDaemon.WaitSourcePos` as replication is stopped with `START REPLICA UNTIL SQL_BEFORE_GTIDS` + // this is as per https://dev.mysql.com/doc/refman/8.0/en/start-replica.html // We need to wait until replication catches upto the specified afterGTIDPos chGTIDCaughtup := make(chan bool) go func() { timeToWait := time.Now().Add(timeoutForGTIDLookup) for time.Now().Before(timeToWait) { - pos, err := tm.MysqlDaemon.PrimaryPosition() + pos, err := tm.MysqlDaemon.PrimaryPosition(ctx) if err != nil { chGTIDCaughtup <- false } @@ -565,13 +537,13 @@ func (tm *TabletManager) catchupToGTID(ctx context.Context, afterGTIDPos string, select { case resp := <-chGTIDCaughtup: if resp { - cmds := []string{ - "STOP SLAVE", - "RESET SLAVE ALL", - } - if err := tm.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds); err != nil { + if err := tm.MysqlDaemon.StopReplication(ctx, nil); err != nil { return vterrors.Wrap(err, "failed to stop replication") } + if err := tm.MysqlDaemon.ResetReplicationParameters(ctx); err != nil { + return vterrors.Wrap(err, "failed to reset replication") + } + return nil } return vterrors.Wrap(err, "error while fetching the current GTID position") @@ -581,18 +553,18 @@ func (tm *TabletManager) catchupToGTID(ctx context.Context, afterGTIDPos string, } } -// disableReplication stopes and resets replication on the mysql server. It moreover sets impossible replication +// disableReplication stops and resets replication on the mysql server. It moreover sets impossible replication // source params, so that the replica can't possibly reconnect. It would take a `CHANGE [MASTER|REPLICATION SOURCE] TO ...` to // make the mysql server replicate again (available via tm.MysqlDaemon.SetReplicationPosition) func (tm *TabletManager) disableReplication(ctx context.Context) error { - cmds := []string{ - "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget primary host:port. + if err := tm.MysqlDaemon.StopReplication(ctx, nil); err != nil { + return vterrors.Wrap(err, "failed to stop replication") } - if err := tm.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds); err != nil { + if err := tm.MysqlDaemon.ResetReplicationParameters(ctx); err != nil { return vterrors.Wrap(err, "failed to reset replication") } - if err := tm.MysqlDaemon.SetReplicationSource(ctx, "//", 0, false /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { + + if err := tm.MysqlDaemon.SetReplicationSource(ctx, "//", 0, 0, false, true); err != nil { return vterrors.Wrap(err, "failed to disable replication") } @@ -600,11 +572,10 @@ func (tm *TabletManager) disableReplication(ctx context.Context) error { } func (tm *TabletManager) startReplication(ctx context.Context, pos replication.Position, tabletType topodatapb.TabletType) error { - cmds := []string{ - "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget primary host:port. + if err := tm.MysqlDaemon.StopReplication(ctx, nil); err != nil { + return vterrors.Wrap(err, "failed to stop replication") } - if err := tm.MysqlDaemon.ExecuteSuperQueryList(ctx, cmds); err != nil { + if err := tm.MysqlDaemon.ResetReplicationParameters(ctx); err != nil { return vterrors.Wrap(err, "failed to reset replication") } @@ -649,7 +620,7 @@ func (tm *TabletManager) startReplication(ctx context.Context, pos replication.P if err := ctx.Err(); err != nil { return err } - status, err := tm.MysqlDaemon.ReplicationStatusWithContext(ctx) + status, err := tm.MysqlDaemon.ReplicationStatus(ctx) if err != nil { return vterrors.Wrap(err, "can't get replication status") } diff --git a/go/vt/vttablet/tabletmanager/rpc_actions.go b/go/vt/vttablet/tabletmanager/rpc_actions.go index 16d3513355c..8abb3fe702d 100644 --- a/go/vt/vttablet/tabletmanager/rpc_actions.go +++ b/go/vt/vttablet/tabletmanager/rpc_actions.go @@ -21,6 +21,7 @@ import ( "fmt" "time" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/hook" @@ -72,7 +73,7 @@ func (tm *TabletManager) SetReadOnly(ctx context.Context, rdonly bool) error { } defer tm.unlock() - return tm.MysqlDaemon.SetReadOnly(rdonly) + return tm.MysqlDaemon.SetReadOnly(ctx, rdonly) } // ChangeType changes the tablet type @@ -82,7 +83,7 @@ func (tm *TabletManager) ChangeType(ctx context.Context, tabletType topodatapb.T } defer tm.unlock() - semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, semiSync) if err != nil { return err } @@ -102,7 +103,7 @@ func (tm *TabletManager) changeTypeLocked(ctx context.Context, tabletType topoda } // Let's see if we need to fix semi-sync acking. - if err := tm.fixSemiSyncAndReplication(tm.Tablet().Type, semiSync); err != nil { + if err := tm.fixSemiSyncAndReplication(ctx, tm.Tablet().Type, semiSync); err != nil { return vterrors.Wrap(err, "fixSemiSyncAndReplication failed, may not ack correctly") } return nil @@ -147,19 +148,20 @@ func (tm *TabletManager) RunHealthCheck(ctx context.Context) { tm.QueryServiceControl.BroadcastHealth() } -func (tm *TabletManager) convertBoolToSemiSyncAction(semiSync bool) (SemiSyncAction, error) { - semiSyncExtensionLoaded, err := tm.MysqlDaemon.SemiSyncExtensionLoaded() +func (tm *TabletManager) convertBoolToSemiSyncAction(ctx context.Context, semiSync bool) (SemiSyncAction, error) { + semiSyncExtensionLoaded, err := tm.MysqlDaemon.SemiSyncExtensionLoaded(ctx) if err != nil { return SemiSyncActionNone, err } - if semiSyncExtensionLoaded { + switch semiSyncExtensionLoaded { + case mysql.SemiSyncTypeSource, mysql.SemiSyncTypeMaster: if semiSync { return SemiSyncActionSet, nil } else { return SemiSyncActionUnset, nil } - } else { + default: if semiSync { return SemiSyncActionNone, vterrors.VT09013() } else { diff --git a/go/vt/vttablet/tabletmanager/rpc_agent.go b/go/vt/vttablet/tabletmanager/rpc_agent.go index 06c0e5cda94..85ba5a2ef0f 100644 --- a/go/vt/vttablet/tabletmanager/rpc_agent.go +++ b/go/vt/vttablet/tabletmanager/rpc_agent.go @@ -73,6 +73,8 @@ type RPCTM interface { ExecuteFetchAsDba(ctx context.Context, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) + ExecuteMultiFetchAsDba(ctx context.Context, req *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) ([]*querypb.QueryResult, error) + ExecuteFetchAsAllPrivs(ctx context.Context, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) ExecuteFetchAsApp(ctx context.Context, req *tabletmanagerdatapb.ExecuteFetchAsAppRequest) (*querypb.QueryResult, error) @@ -101,10 +103,13 @@ type RPCTM interface { // VReplication API CreateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) DeleteVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) + HasVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) + ReadVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) ReadVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) VReplicationWaitForPos(ctx context.Context, id int32, pos string) error UpdateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) + UpdateVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) // VDiff API VDiff(ctx context.Context, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) @@ -127,7 +132,7 @@ type RPCTM interface { ResetReplicationParameters(ctx context.Context) error - SetReplicationSource(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool) error + SetReplicationSource(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool, heartbeatInterval float64) error StopReplicationAndGetStatus(ctx context.Context, stopReplicationMode replicationdatapb.StopReplicationMode) (StopReplicationAndGetStatusResponse, error) diff --git a/go/vt/vttablet/tabletmanager/rpc_backup.go b/go/vt/vttablet/tabletmanager/rpc_backup.go index b3d2e2794f6..906e34ca9d7 100644 --- a/go/vt/vttablet/tabletmanager/rpc_backup.go +++ b/go/vt/vttablet/tabletmanager/rpc_backup.go @@ -136,12 +136,12 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req } isSemiSync := reparentutil.IsReplicaSemiSync(durability, shardPrimary.Tablet, tabletInfo.Tablet) - semiSyncAction, err := tm.convertBoolToSemiSyncAction(isSemiSync) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(bgCtx, isSemiSync) if err != nil { l.Errorf("Failed to convert bool to semisync action, error: %v", err) return } - if err := tm.setReplicationSourceLocked(bgCtx, shardPrimary.Alias, 0, "", false, semiSyncAction); err != nil { + if err := tm.setReplicationSourceLocked(bgCtx, shardPrimary.Alias, 0, "", false, semiSyncAction, 0); err != nil { l.Errorf("Failed to set replication source, error: %v", err) } }() @@ -149,19 +149,20 @@ func (tm *TabletManager) Backup(ctx context.Context, logger logutil.Logger, req // Now we can run the backup. backupParams := mysqlctl.BackupParams{ - Cnf: tm.Cnf, - Mysqld: tm.MysqlDaemon, - Logger: l, - Concurrency: int(req.Concurrency), - IncrementalFromPos: req.IncrementalFromPos, - HookExtraEnv: tm.hookExtraEnv(), - TopoServer: tm.TopoServer, - Keyspace: tablet.Keyspace, - Shard: tablet.Shard, - TabletAlias: topoproto.TabletAliasString(tablet.Alias), - BackupTime: time.Now(), - Stats: backupstats.BackupStats(), - UpgradeSafe: req.UpgradeSafe, + Cnf: tm.Cnf, + Mysqld: tm.MysqlDaemon, + Logger: l, + Concurrency: int(req.Concurrency), + IncrementalFromPos: req.IncrementalFromPos, + HookExtraEnv: tm.hookExtraEnv(), + TopoServer: tm.TopoServer, + Keyspace: tablet.Keyspace, + Shard: tablet.Shard, + TabletAlias: topoproto.TabletAliasString(tablet.Alias), + BackupTime: time.Now(), + Stats: backupstats.BackupStats(), + UpgradeSafe: req.UpgradeSafe, + MysqlShutdownTimeout: mysqlShutdownTimeout, } returnErr := mysqlctl.Backup(ctx, backupParams) @@ -189,7 +190,7 @@ func (tm *TabletManager) RestoreFromBackup(ctx context.Context, logger logutil.L l := logutil.NewTeeLogger(logutil.NewConsoleLogger(), logger) // Now we can run restore. - err = tm.restoreDataLocked(ctx, l, 0 /* waitForBackupInterval */, true /* deleteBeforeRestore */, request) + err = tm.restoreDataLocked(ctx, l, 0 /* waitForBackupInterval */, true /* deleteBeforeRestore */, request, mysqlShutdownTimeout) // Re-run health check to be sure to capture any replication delay. tm.QueryServiceControl.BroadcastHealth() diff --git a/go/vt/vttablet/tabletmanager/rpc_query.go b/go/vt/vttablet/tabletmanager/rpc_query.go index 8b8ac605893..303bcd4614d 100644 --- a/go/vt/vttablet/tabletmanager/rpc_query.go +++ b/go/vt/vttablet/tabletmanager/rpc_query.go @@ -24,56 +24,139 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/proto/vtrpc" ) -// ExecuteFetchAsDba will execute the given query, possibly disabling binlogs and reload schema. -func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) { - // get a connection +// analyzeExecuteFetchAsDbaMultiQuery reutrns 'true' when at least one of the queries +// in the given SQL has a `/*vt+ allowZeroInDate=true */` directive. +func analyzeExecuteFetchAsDbaMultiQuery(sql string, parser *sqlparser.Parser) (queries []string, parseable bool, countCreate int, allowZeroInDate bool, err error) { + queries, err = parser.SplitStatementToPieces(sql) + if err != nil { + return nil, false, 0, false, err + } + if len(queries) == 0 { + return nil, false, 0, false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "no statements found in query: %s", sql) + } + parseable = true + for _, query := range queries { + // Some of the queries we receive here are legitimately non-parseable by our + // current parser, such as `CHANGE REPLICATION SOURCE TO...`. We must allow + // them and so we skip parsing errors. + stmt, err := parser.Parse(query) + if err != nil { + parseable = false + continue + } + switch stmt.(type) { + case *sqlparser.CreateTable, *sqlparser.CreateView: + countCreate++ + default: + } + + if cmnt, ok := stmt.(sqlparser.Commented); ok { + directives := cmnt.GetParsedComments().Directives() + if directives.IsSet("allowZeroInDate") { + allowZeroInDate = true + } + } + + } + return queries, parseable, countCreate, allowZeroInDate, nil +} + +// ExecuteMultiFetchAsDba will execute the given queries, possibly disabling binlogs and reload schema. +func (tm *TabletManager) executeMultiFetchAsDba( + ctx context.Context, + dbName string, + sql string, + maxRows int, + reloadSchema bool, + disableBinlogs bool, + disableForeignKeyChecks bool, + validateQueries func(queries []string, countCreate int) error, +) ([]*querypb.QueryResult, error) { + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } + // Get a connection. conn, err := tm.MysqlDaemon.GetDbaConnection(ctx) if err != nil { return nil, err } defer conn.Close() - // disable binlogs if necessary - if req.DisableBinlogs { + // Disable binlogs if necessary. + if disableBinlogs { _, err := conn.ExecuteFetch("SET sql_log_bin = OFF", 0, false) if err != nil { return nil, err } } - if req.DbName != "" { + // Disable FK checks if requested. + if disableForeignKeyChecks { + _, err := conn.ExecuteFetch("SET SESSION foreign_key_checks = OFF", 0, false) + if err != nil { + return nil, err + } + } + + if dbName != "" { // This execute might fail if db does not exist. // Error is ignored because given query might create this database. - _, _ = conn.ExecuteFetch("USE "+sqlescape.EscapeID(req.DbName), 1, false) + _, _ = conn.ExecuteFetch("USE "+sqlescape.EscapeID(dbName), 1, false) } - // Handle special possible directives - var directives *sqlparser.CommentDirectives - if stmt, err := sqlparser.Parse(string(req.Query)); err == nil { - if cmnt, ok := stmt.(sqlparser.Commented); ok { - directives = cmnt.GetParsedComments().Directives() + queries, _, countCreate, allowZeroInDate, err := analyzeExecuteFetchAsDbaMultiQuery(sql, tm.Env.Parser()) + if err != nil { + return nil, err + } + if validateQueries != nil { + if err := validateQueries(queries, countCreate); err != nil { + return nil, err } } - if directives.IsSet("allowZeroInDate") { + if allowZeroInDate { if _, err := conn.ExecuteFetch("set @@session.sql_mode=REPLACE(REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', ''), 'NO_ZERO_IN_DATE', '')", 1, false); err != nil { return nil, err } } - - // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.Env.Parser().ReplaceTableQualifiersMultiQuery(sql, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } - result, err := conn.ExecuteFetch(uq, int(req.MaxRows), true /*wantFields*/) + // TODO(shlomi): we use ExecuteFetchMulti for backwards compatibility. In v20 we will not accept + // multi statement queries in ExecuteFetchAsDBA. This will be rewritten as: + // (in v20): result, err := ExecuteFetch(uq, int(req.MaxRows), true /*wantFields*/) + results := make([]*querypb.QueryResult, 0, len(queries)) + result, more, err := conn.ExecuteFetchMulti(uq, maxRows, true /*wantFields*/) + if err == nil { + results = append(results, sqltypes.ResultToProto3(result)) + } + for more { + result, more, _, err = conn.ReadQueryResult(maxRows, true /*wantFields*/) + if err != nil { + return nil, err + } + results = append(results, sqltypes.ResultToProto3(result)) + } + + // Re-enable FK checks if necessary. + if disableForeignKeyChecks && !conn.IsClosed() { + _, err := conn.ExecuteFetch("SET SESSION foreign_key_checks = ON", 0, false) + if err != nil { + // If we can't reset the FK checks flag, + // let's just close the connection. + conn.Close() + } + } - // re-enable binlogs if necessary - if req.DisableBinlogs && !conn.IsClosed() { + // Re-enable binlogs if necessary. + if disableBinlogs && !conn.IsClosed() { _, err := conn.ExecuteFetch("SET sql_log_bin = ON", 0, false) if err != nil { // if we can't reset the sql_log_bin flag, @@ -82,17 +165,67 @@ func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanag } } - if err == nil && req.ReloadSchema { + if err == nil && reloadSchema { reloadErr := tm.QueryServiceControl.ReloadSchema(ctx) if reloadErr != nil { log.Errorf("failed to reload the schema %v", reloadErr) } } - return sqltypes.ResultToProto3(result), err + return results, err +} + +// ExecuteFetchAsDba will execute the given query, possibly disabling binlogs and reload schema. +func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) { + results, err := tm.executeMultiFetchAsDba( + ctx, + req.DbName, + string(req.Query), + int(req.MaxRows), + req.ReloadSchema, + req.DisableBinlogs, + req.DisableForeignKeyChecks, + func(queries []string, countCreate int) error { + // Up to v19, we allow multi-statement SQL in ExecuteFetchAsDba, but only for the specific case + // where all statements are CREATE TABLE or CREATE VIEW. This is to support `ApplySchema --batch-size`. + // In v20, we still support multi-statement SQL, but again only if all statements are CREATE TABLE or CREATE VIEW. + // We then also add ExecuteMultiFetchAsDba for future use of multiple statements. + // In v21 we will not tolerate multi-statement SQL in ExecuteFetchAsDba at all, and + // ExecuteMultiFetchAsDba will be the only way to execute multiple statements. + if len(queries) > 1 && len(queries) != countCreate { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "multi statement queries are not supported in ExecuteFetchAsDba unless all are CREATE TABLE or CREATE VIEW") + } + return nil + }, + ) + if err != nil { + return nil, err + } + if len(results) == 0 { + return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "received no query results in ExecuteFetchAsDba. Expcted at least 1") + } + return results[0], nil +} + +// ExecuteMultiFetchAsDba will execute the given queries, possibly disabling binlogs and reload schema. +func (tm *TabletManager) ExecuteMultiFetchAsDba(ctx context.Context, req *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) ([]*querypb.QueryResult, error) { + results, err := tm.executeMultiFetchAsDba( + ctx, + req.DbName, + string(req.Sql), + int(req.MaxRows), + req.ReloadSchema, + req.DisableBinlogs, + req.DisableForeignKeyChecks, + nil, // Validation query is not needed for ExecuteMultiFetchAsDba + ) + return results, err } // ExecuteFetchAsAllPrivs will execute the given query, possibly reloading schema. func (tm *TabletManager) ExecuteFetchAsAllPrivs(ctx context.Context, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) { + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } // get a connection conn, err := tm.MysqlDaemon.GetAllPrivsConnection(ctx) if err != nil { @@ -107,7 +240,7 @@ func (tm *TabletManager) ExecuteFetchAsAllPrivs(ctx context.Context, req *tablet } // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.Env.Parser().ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -124,6 +257,9 @@ func (tm *TabletManager) ExecuteFetchAsAllPrivs(ctx context.Context, req *tablet // ExecuteFetchAsApp will execute the given query. func (tm *TabletManager) ExecuteFetchAsApp(ctx context.Context, req *tabletmanagerdatapb.ExecuteFetchAsAppRequest) (*querypb.QueryResult, error) { + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } // get a connection conn, err := tm.MysqlDaemon.GetAppConnection(ctx) if err != nil { @@ -131,7 +267,7 @@ func (tm *TabletManager) ExecuteFetchAsApp(ctx context.Context, req *tabletmanag } defer conn.Recycle() // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.Env.Parser().ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -141,11 +277,14 @@ func (tm *TabletManager) ExecuteFetchAsApp(ctx context.Context, req *tabletmanag // ExecuteQuery submits a new online DDL request func (tm *TabletManager) ExecuteQuery(ctx context.Context, req *tabletmanagerdatapb.ExecuteQueryRequest) (*querypb.QueryResult, error) { + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } // get the db name from the tablet tablet := tm.Tablet() target := &querypb.Target{Keyspace: tablet.Keyspace, Shard: tablet.Shard, TabletType: tablet.Type} // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.Env.Parser().ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_query_test.go b/go/vt/vttablet/tabletmanager/rpc_query_test.go index 87a64b2d8b7..e30f63b362b 100644 --- a/go/vt/vttablet/tabletmanager/rpc_query_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_query_test.go @@ -21,6 +21,7 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" @@ -28,11 +29,91 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletservermock" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" ) +func TestAnalyzeExecuteFetchAsDbaMultiQuery(t *testing.T) { + tcases := []struct { + query string + count int + parseable bool + allowZeroInDate bool + allCreate bool + expectErr bool + }{ + { + query: "", + expectErr: true, + }, + { + query: "select * from t1 ; select * from t2", + count: 2, + parseable: true, + }, + { + query: "create table t(id int)", + count: 1, + allCreate: true, + parseable: true, + }, + { + query: "create table t(id int); create view v as select 1 from dual", + count: 2, + allCreate: true, + parseable: true, + }, + { + query: "create table t(id int); create view v as select 1 from dual; drop table t3", + count: 3, + allCreate: false, + parseable: true, + }, + { + query: "create /*vt+ allowZeroInDate=true */ table t (id int)", + count: 1, + allCreate: true, + allowZeroInDate: true, + parseable: true, + }, + { + query: "create table a (id int) ; create /*vt+ allowZeroInDate=true */ table b (id int)", + count: 2, + allCreate: true, + allowZeroInDate: true, + parseable: true, + }, + { + query: "stop replica; start replica", + count: 2, + parseable: false, + }, + { + query: "create table a (id int) ; --comment ; what", + count: 3, + parseable: false, + }, + } + for _, tcase := range tcases { + t.Run(tcase.query, func(t *testing.T) { + parser := sqlparser.NewTestParser() + queries, parseable, countCreate, allowZeroInDate, err := analyzeExecuteFetchAsDbaMultiQuery(tcase.query, parser) + if tcase.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tcase.count, len(queries)) + assert.Equal(t, tcase.parseable, parseable) + assert.Equal(t, tcase.allCreate, (countCreate == len(queries))) + assert.Equal(t, tcase.allowZeroInDate, allowZeroInDate) + } + }) + } +} + func TestTabletManager_ExecuteFetchAsDba(t *testing.T) { ctx := context.Background() cp := mysql.ConnParams{} @@ -42,10 +123,13 @@ func TestTabletManager_ExecuteFetchAsDba(t *testing.T) { dbName := " escap`e me " tm := &TabletManager{ - MysqlDaemon: daemon, - DBConfigs: dbconfigs.NewTestDBConfigs(cp, cp, dbName), - QueryServiceControl: tabletservermock.NewController(), + MysqlDaemon: daemon, + DBConfigs: dbconfigs.NewTestDBConfigs(cp, cp, dbName), + QueryServiceControl: tabletservermock.NewController(), + _waitForGrantsComplete: make(chan struct{}), + Env: vtenv.NewTestEnv(), } + close(tm._waitForGrantsComplete) _, err := tm.ExecuteFetchAsDba(ctx, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ Query: []byte("select 42"), diff --git a/go/vt/vttablet/tabletmanager/rpc_replication.go b/go/vt/vttablet/tabletmanager/rpc_replication.go index b11156f8a6c..3e745222092 100644 --- a/go/vt/vttablet/tabletmanager/rpc_replication.go +++ b/go/vt/vttablet/tabletmanager/rpc_replication.go @@ -39,7 +39,10 @@ import ( // ReplicationStatus returns the replication status func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdatapb.Status, error) { - status, err := tm.MysqlDaemon.ReplicationStatusWithContext(ctx) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } + status, err := tm.MysqlDaemon.ReplicationStatus(ctx) if err != nil { return nil, err } @@ -48,6 +51,9 @@ func (tm *TabletManager) ReplicationStatus(ctx context.Context) (*replicationdat // FullStatus returns the full status of MySQL including the replication information, semi-sync information, GTID information among others func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.FullStatus, error) { + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } // Server ID - "select @@global.server_id" serverID, err := tm.MysqlDaemon.GetServerID(ctx) if err != nil { @@ -61,7 +67,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful } // Replication status - "SHOW REPLICA STATUS" - replicationStatus, err := tm.MysqlDaemon.ReplicationStatusWithContext(ctx) + replicationStatus, err := tm.MysqlDaemon.ReplicationStatus(ctx) var replicationStatusProto *replicationdatapb.Status if err != nil && err != mysql.ErrNotReplica { return nil, err @@ -70,7 +76,7 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful replicationStatusProto = replication.ReplicationStatusToProto(replicationStatus) } - // Primary status - "SHOW MASTER STATUS" + // Primary status - "SHOW BINARY LOG STATUS" primaryStatus, err := tm.MysqlDaemon.PrimaryStatus(ctx) var primaryStatusProto *replicationdatapb.PrimaryStatus if err != nil && err != mysql.ErrNoPrimaryStatus { @@ -104,18 +110,18 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful } // Read only - "SHOW VARIABLES LIKE 'read_only'" - readOnly, err := tm.MysqlDaemon.IsReadOnly() + readOnly, err := tm.MysqlDaemon.IsReadOnly(ctx) if err != nil { return nil, err } // superReadOnly - "SELECT @@global.super_read_only" - superReadOnly, err := tm.MysqlDaemon.IsSuperReadOnly() + superReadOnly, err := tm.MysqlDaemon.IsSuperReadOnly(ctx) if err != nil { return nil, err } - // Binlog Information - "select @@global.binlog_format, @@global.log_bin, @@global.log_slave_updates, @@global.binlog_row_image" + // Binlog Information - "select @@global.binlog_format, @@global.log_bin, @@global.log_replica_updates, @@global.binlog_row_image" binlogFormat, logBin, logReplicaUpdates, binlogRowImage, err := tm.MysqlDaemon.GetBinlogInformation(ctx) if err != nil { return nil, err @@ -128,16 +134,21 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful } // Semi sync settings - "show global variables like 'rpl_semi_sync_%_enabled'" - primarySemiSync, replicaSemiSync := tm.MysqlDaemon.SemiSyncEnabled() + primarySemiSync, replicaSemiSync := tm.MysqlDaemon.SemiSyncEnabled(ctx) // Semi sync status - "show status like 'Rpl_semi_sync_%_status'" - primarySemiSyncStatus, replicaSemiSyncStatus := tm.MysqlDaemon.SemiSyncStatus() + primarySemiSyncStatus, replicaSemiSyncStatus := tm.MysqlDaemon.SemiSyncStatus(ctx) - // Semi sync clients count - "show status like 'semi_sync_primary_clients'" - semiSyncClients := tm.MysqlDaemon.SemiSyncClients() + // Semi sync clients count - "show status like 'semi_sync_source_clients'" + semiSyncClients := tm.MysqlDaemon.SemiSyncClients(ctx) // Semi sync settings - "show status like 'rpl_semi_sync_%' - semiSyncTimeout, semiSyncNumReplicas := tm.MysqlDaemon.SemiSyncSettings() + semiSyncTimeout, semiSyncNumReplicas := tm.MysqlDaemon.SemiSyncSettings(ctx) + + replConfiguration, err := tm.MysqlDaemon.ReplicationConfiguration(ctx) + if err != nil { + return nil, err + } return &replicationdatapb.FullStatus{ ServerId: serverID, @@ -161,11 +172,15 @@ func (tm *TabletManager) FullStatus(ctx context.Context) (*replicationdatapb.Ful SemiSyncPrimaryTimeout: semiSyncTimeout, SemiSyncWaitForReplicaCount: semiSyncNumReplicas, SuperReadOnly: superReadOnly, + ReplicationConfiguration: replConfiguration, }, nil } // PrimaryStatus returns the replication status for a primary tablet. func (tm *TabletManager) PrimaryStatus(ctx context.Context) (*replicationdatapb.PrimaryStatus, error) { + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } status, err := tm.MysqlDaemon.PrimaryStatus(ctx) if err != nil { return nil, err @@ -175,7 +190,10 @@ func (tm *TabletManager) PrimaryStatus(ctx context.Context) (*replicationdatapb. // PrimaryPosition returns the position of a primary database func (tm *TabletManager) PrimaryPosition(ctx context.Context) (string, error) { - pos, err := tm.MysqlDaemon.PrimaryPosition() + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return "", err + } + pos, err := tm.MysqlDaemon.PrimaryPosition(ctx) if err != nil { return "", err } @@ -185,6 +203,9 @@ func (tm *TabletManager) PrimaryPosition(ctx context.Context) (string, error) { // WaitForPosition waits until replication reaches the desired position func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error { log.Infof("WaitForPosition: %v", pos) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } mpos, err := replication.DecodePosition(pos) if err != nil { return err @@ -196,6 +217,9 @@ func (tm *TabletManager) WaitForPosition(ctx context.Context, pos string) error // replication or not (using hook if not). func (tm *TabletManager) StopReplication(ctx context.Context) error { log.Infof("StopReplication") + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } @@ -205,7 +229,7 @@ func (tm *TabletManager) StopReplication(ctx context.Context) error { } func (tm *TabletManager) stopReplicationLocked(ctx context.Context) error { - return tm.MysqlDaemon.StopReplication(tm.hookExtraEnv()) + return tm.MysqlDaemon.StopReplication(ctx, tm.hookExtraEnv()) } func (tm *TabletManager) stopIOThreadLocked(ctx context.Context) error { @@ -217,6 +241,9 @@ func (tm *TabletManager) stopIOThreadLocked(ctx context.Context) error { // replication or not (using hook if not). func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position string, waitTime time.Duration) (string, error) { log.Infof("StopReplicationMinimum: position: %v waitTime: %v", position, waitTime) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return "", err + } if err := tm.lock(ctx); err != nil { return "", err } @@ -234,7 +261,7 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st if err := tm.stopReplicationLocked(ctx); err != nil { return "", err } - pos, err = tm.MysqlDaemon.PrimaryPosition() + pos, err = tm.MysqlDaemon.PrimaryPosition(ctx) if err != nil { return "", err } @@ -245,26 +272,32 @@ func (tm *TabletManager) StopReplicationMinimum(ctx context.Context, position st // replication or not (using hook if not). func (tm *TabletManager) StartReplication(ctx context.Context, semiSync bool) error { log.Infof("StartReplication") + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } defer tm.unlock() - semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, semiSync) if err != nil { return err } - if err := tm.fixSemiSync(tm.Tablet().Type, semiSyncAction); err != nil { + if err := tm.fixSemiSync(ctx, tm.Tablet().Type, semiSyncAction); err != nil { return err } - return tm.MysqlDaemon.StartReplication(tm.hookExtraEnv()) + return tm.MysqlDaemon.StartReplication(ctx, tm.hookExtraEnv()) } // StartReplicationUntilAfter will start the replication and let it catch up // until and including the transactions in `position` func (tm *TabletManager) StartReplicationUntilAfter(ctx context.Context, position string, waitTime time.Duration) error { log.Infof("StartReplicationUntilAfter: position: %v waitTime: %v", position, waitTime) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } @@ -283,13 +316,19 @@ func (tm *TabletManager) StartReplicationUntilAfter(ctx context.Context, positio // GetReplicas returns the address of all the replicas func (tm *TabletManager) GetReplicas(ctx context.Context) ([]string, error) { - return mysqlctl.FindReplicas(tm.MysqlDaemon) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } + return mysqlctl.FindReplicas(ctx, tm.MysqlDaemon) } // ResetReplication completely resets the replication on the host. // All binary and relay logs are flushed. All replication positions are reset. func (tm *TabletManager) ResetReplication(ctx context.Context) error { log.Infof("ResetReplication") + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } @@ -301,13 +340,16 @@ func (tm *TabletManager) ResetReplication(ctx context.Context) error { // InitPrimary enables writes and returns the replication position. func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string, error) { log.Infof("InitPrimary with semiSync as %t", semiSync) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return "", err + } if err := tm.lock(ctx); err != nil { return "", err } defer tm.unlock() // Setting super_read_only `OFF` so that we can run the DDL commands - if _, err := tm.MysqlDaemon.SetSuperReadOnly(false); err != nil { + if _, err := tm.MysqlDaemon.SetSuperReadOnly(ctx, false); err != nil { if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { log.Warningf("server does not know about super_read_only, continuing anyway...") } else { @@ -322,12 +364,12 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string } // get the current replication position - pos, err := tm.MysqlDaemon.PrimaryPosition() + pos, err := tm.MysqlDaemon.PrimaryPosition(ctx) if err != nil { return "", err } - semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, semiSync) if err != nil { return "", err } @@ -341,7 +383,7 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string // Enforce semi-sync after changing the tablet type to PRIMARY. Otherwise, the // primary will hang while trying to create the database. - if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { + if err := tm.fixSemiSync(ctx, topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { return "", err } @@ -352,6 +394,9 @@ func (tm *TabletManager) InitPrimary(ctx context.Context, semiSync bool) (string func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreatedNS int64, actionName string, primaryAlias *topodatapb.TabletAlias, position string) error { log.Infof("PopulateReparentJournal: action: %v parent: %v position: %v timeCreatedNS: %d actionName: %s primaryAlias: %s", actionName, primaryAlias, position, timeCreatedNS, actionName, primaryAlias) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } pos, err := replication.DecodePosition(position) if err != nil { return err @@ -366,12 +411,15 @@ func (tm *TabletManager) PopulateReparentJournal(ctx context.Context, timeCreate // reparent_journal table entry up to context timeout func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.TabletAlias, position string, timeCreatedNS int64, semiSync bool) error { log.Infof("InitReplica: parent: %v position: %v timeCreatedNS: %d semisync: %t", parent, position, timeCreatedNS, semiSync) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } defer tm.unlock() - semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, semiSync) if err != nil { return err } @@ -401,14 +449,14 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab if tt == topodatapb.TabletType_PRIMARY { tt = topodatapb.TabletType_REPLICA } - if err := tm.fixSemiSync(tt, semiSyncAction); err != nil { + if err := tm.fixSemiSync(ctx, tt, semiSyncAction); err != nil { return err } if err := tm.MysqlDaemon.SetReplicationPosition(ctx, pos); err != nil { return err } - if err := tm.MysqlDaemon.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, ti.Tablet.MysqlPort, false /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { + if err := tm.MysqlDaemon.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, ti.Tablet.MysqlPort, 0, false, true); err != nil { return err } @@ -418,7 +466,7 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab // DemotePrimary prepares a PRIMARY tablet to give up leadership to another tablet. // -// It attemps to idempotently ensure the following guarantees upon returning +// It attempts to idempotently ensure the following guarantees upon returning // successfully: // - No future writes will be accepted. // - No writes are in-flight. @@ -433,6 +481,9 @@ func (tm *TabletManager) InitReplica(ctx context.Context, parent *topodatapb.Tab // If a step fails in the middle, it will try to undo any changes it made. func (tm *TabletManager) DemotePrimary(ctx context.Context) (*replicationdatapb.PrimaryStatus, error) { log.Infof("DemotePrimary") + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return nil, err + } // The public version always reverts on partial failure. return tm.demotePrimary(ctx, true /* revertPartialFailure */) } @@ -450,7 +501,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure tablet := tm.Tablet() wasPrimary := tablet.Type == topodatapb.TabletType_PRIMARY wasServing := tm.QueryServiceControl.IsServing() - wasReadOnly, err := tm.MysqlDaemon.IsReadOnly() + wasReadOnly, err := tm.MysqlDaemon.IsReadOnly(ctx) if err != nil { return nil, err } @@ -483,7 +534,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // set MySQL to super_read_only mode. If we are already super_read_only because of a // previous demotion, or because we are not primary anyway, this should be // idempotent. - if _, err := tm.MysqlDaemon.SetSuperReadOnly(true); err != nil { + if _, err := tm.MysqlDaemon.SetSuperReadOnly(ctx, true); err != nil { if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERUnknownSystemVariable { log.Warningf("server does not know about super_read_only, continuing anyway...") } else { @@ -494,7 +545,7 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure defer func() { if finalErr != nil && revertPartialFailure && !wasReadOnly { // setting read_only OFF will also set super_read_only OFF if it was set - if err := tm.MysqlDaemon.SetReadOnly(false); err != nil { + if err := tm.MysqlDaemon.SetReadOnly(ctx, false); err != nil { log.Warningf("SetReadOnly(false) failed during revert: %v", err) } } @@ -502,15 +553,15 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // Here, we check if the primary side semi sync is enabled or not. If it isn't enabled then we do not need to take any action. // If it is enabled then we should turn it off and revert in case of failure. - if tm.isPrimarySideSemiSyncEnabled() { + if tm.isPrimarySideSemiSyncEnabled(ctx) { // If using semi-sync, we need to disable primary-side. - if err := tm.fixSemiSync(topodatapb.TabletType_REPLICA, SemiSyncActionSet); err != nil { + if err := tm.fixSemiSync(ctx, topodatapb.TabletType_REPLICA, SemiSyncActionSet); err != nil { return nil, err } defer func() { if finalErr != nil && revertPartialFailure && wasPrimary { // enable primary-side semi-sync again - if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, SemiSyncActionSet); err != nil { + if err := tm.fixSemiSync(ctx, topodatapb.TabletType_PRIMARY, SemiSyncActionSet); err != nil { log.Warningf("fixSemiSync(PRIMARY) failed during revert: %v", err) } } @@ -530,23 +581,26 @@ func (tm *TabletManager) demotePrimary(ctx context.Context, revertPartialFailure // and returns its primary position. func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) error { log.Infof("UndoDemotePrimary") + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } defer tm.unlock() - semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, semiSync) if err != nil { return err } // If using semi-sync, we need to enable source-side. - if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { + if err := tm.fixSemiSync(ctx, topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { return err } // Now, set the server read-only false. - if err := tm.MysqlDaemon.SetReadOnly(false); err != nil { + if err := tm.MysqlDaemon.SetReadOnly(ctx, false); err != nil { return err } @@ -562,6 +616,9 @@ func (tm *TabletManager) UndoDemotePrimary(ctx context.Context, semiSync bool) e // ReplicaWasPromoted promotes a replica to primary, no questions asked. func (tm *TabletManager) ReplicaWasPromoted(ctx context.Context) error { log.Infof("ReplicaWasPromoted") + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } @@ -572,12 +629,15 @@ func (tm *TabletManager) ReplicaWasPromoted(ctx context.Context) error { // ResetReplicationParameters resets the replica replication parameters func (tm *TabletManager) ResetReplicationParameters(ctx context.Context) error { log.Infof("ResetReplicationParameters") + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } defer tm.unlock() - err := tm.MysqlDaemon.StopReplication(tm.hookExtraEnv()) + err := tm.MysqlDaemon.StopReplication(ctx, tm.hookExtraEnv()) if err != nil { return err } @@ -591,21 +651,24 @@ func (tm *TabletManager) ResetReplicationParameters(ctx context.Context) error { // SetReplicationSource sets replication primary, and waits for the // reparent_journal table entry up to context timeout -func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool) error { +func (tm *TabletManager) SetReplicationSource(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool, heartbeatInterval float64) error { log.Infof("SetReplicationSource: parent: %v position: %s force: %v semiSync: %v timeCreatedNS: %d", parentAlias, waitPosition, forceStartReplication, semiSync, timeCreatedNS) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } defer tm.unlock() - semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, semiSync) if err != nil { return err } // setReplicationSourceLocked also fixes the semi-sync. In case the tablet type is primary it assumes that it will become a replica if SetReplicationSource // is called, so we always call fixSemiSync with a non-primary tablet type. This will always set the source side replication to false. - return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, semiSyncAction) + return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, semiSyncAction, heartbeatInterval) } func (tm *TabletManager) setReplicationSourceSemiSyncNoAction(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool) error { @@ -615,10 +678,10 @@ func (tm *TabletManager) setReplicationSourceSemiSyncNoAction(ctx context.Contex } defer tm.unlock() - return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, SemiSyncActionNone) + return tm.setReplicationSourceLocked(ctx, parentAlias, timeCreatedNS, waitPosition, forceStartReplication, SemiSyncActionNone, 0) } -func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync SemiSyncAction) (err error) { +func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentAlias *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync SemiSyncAction, heartbeatInterval float64) (err error) { // Change our type to REPLICA if we used to be PRIMARY. // Being sent SetReplicationSource means another PRIMARY has been successfully promoted, // so we convert to REPLICA first, since we want to do it even if other @@ -635,7 +698,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA // See if we were replicating at all, and should be replicating. wasReplicating := false shouldbeReplicating := false - status, err := tm.MysqlDaemon.ReplicationStatusWithContext(ctx) + status, err := tm.MysqlDaemon.ReplicationStatus(ctx) if err == mysql.ErrNotReplica { // This is a special error that means we actually succeeded in reading // the status, but the status is empty because replication is not @@ -663,7 +726,7 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA if tabletType == topodatapb.TabletType_PRIMARY { tabletType = topodatapb.TabletType_REPLICA } - if err := tm.fixSemiSync(tabletType, semiSync); err != nil { + if err := tm.fixSemiSync(ctx, tabletType, semiSync); err != nil { return err } // Update the primary/source address only if needed. @@ -682,23 +745,23 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA if host == "" { return vterrors.New(vtrpc.Code_FAILED_PRECONDITION, "Shard primary has empty mysql hostname") } - if status.SourceHost != host || status.SourcePort != port { + if status.SourceHost != host || status.SourcePort != port || heartbeatInterval != 0 { // This handles both changing the address and starting replication. - if err := tm.MysqlDaemon.SetReplicationSource(ctx, host, port, wasReplicating, shouldbeReplicating); err != nil { - if err := tm.handleRelayLogError(err); err != nil { + if err := tm.MysqlDaemon.SetReplicationSource(ctx, host, port, heartbeatInterval, wasReplicating, shouldbeReplicating); err != nil { + if err := tm.handleRelayLogError(ctx, err); err != nil { return err } } } else if shouldbeReplicating { // The address is correct. We need to restart replication so that any semi-sync changes if any // are taken into account - if err := tm.MysqlDaemon.StopReplication(tm.hookExtraEnv()); err != nil { - if err := tm.handleRelayLogError(err); err != nil { + if err := tm.MysqlDaemon.StopReplication(ctx, tm.hookExtraEnv()); err != nil { + if err := tm.handleRelayLogError(ctx, err); err != nil { return err } } - if err := tm.MysqlDaemon.StartReplication(tm.hookExtraEnv()); err != nil { - if err := tm.handleRelayLogError(err); err != nil { + if err := tm.MysqlDaemon.StartReplication(ctx, tm.hookExtraEnv()); err != nil { + if err := tm.handleRelayLogError(ctx, err); err != nil { return err } } @@ -732,6 +795,9 @@ func (tm *TabletManager) setReplicationSourceLocked(ctx context.Context, parentA // ReplicaWasRestarted updates the parent record for a tablet. func (tm *TabletManager) ReplicaWasRestarted(ctx context.Context, parent *topodatapb.TabletAlias) error { log.Infof("ReplicaWasRestarted: parent: %v", parent) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return err + } if err := tm.lock(ctx); err != nil { return err } @@ -750,6 +816,9 @@ func (tm *TabletManager) ReplicaWasRestarted(ctx context.Context, parent *topoda // current status. func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopReplicationMode replicationdatapb.StopReplicationMode) (StopReplicationAndGetStatusResponse, error) { log.Infof("StopReplicationAndGetStatus: mode: %v", stopReplicationMode) + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return StopReplicationAndGetStatusResponse{}, err + } if err := tm.lock(ctx); err != nil { return StopReplicationAndGetStatusResponse{}, err } @@ -758,7 +827,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe // Get the status before we stop replication. // Doing this first allows us to return the status in the case that stopping replication // returns an error, so a user can optionally inspect the status before a stop was called. - rs, err := tm.MysqlDaemon.ReplicationStatusWithContext(ctx) + rs, err := tm.MysqlDaemon.ReplicationStatus(ctx) if err != nil { return StopReplicationAndGetStatusResponse{}, vterrors.Wrap(err, "before status failed") } @@ -800,7 +869,7 @@ func (tm *TabletManager) StopReplicationAndGetStatus(ctx context.Context, stopRe } // Get the status after we stop replication so we have up to date position and relay log positions. - rsAfter, err := tm.MysqlDaemon.ReplicationStatusWithContext(ctx) + rsAfter, err := tm.MysqlDaemon.ReplicationStatus(ctx) if err != nil { return StopReplicationAndGetStatusResponse{ Status: &replicationdatapb.StopReplicationStatus{ @@ -833,23 +902,26 @@ type StopReplicationAndGetStatusResponse struct { // PromoteReplica makes the current tablet the primary func (tm *TabletManager) PromoteReplica(ctx context.Context, semiSync bool) (string, error) { log.Infof("PromoteReplica") + if err := tm.waitForGrantsToHaveApplied(ctx); err != nil { + return "", err + } if err := tm.lock(ctx); err != nil { return "", err } defer tm.unlock() - pos, err := tm.MysqlDaemon.Promote(tm.hookExtraEnv()) + pos, err := tm.MysqlDaemon.Promote(ctx, tm.hookExtraEnv()) if err != nil { return "", err } - semiSyncAction, err := tm.convertBoolToSemiSyncAction(semiSync) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, semiSync) if err != nil { return "", err } // If using semi-sync, we need to enable it before going read-write. - if err := tm.fixSemiSync(topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { + if err := tm.fixSemiSync(ctx, topodatapb.TabletType_PRIMARY, semiSyncAction); err != nil { return "", err } @@ -868,27 +940,27 @@ func isPrimaryEligible(tabletType topodatapb.TabletType) bool { return false } -func (tm *TabletManager) fixSemiSync(tabletType topodatapb.TabletType, semiSync SemiSyncAction) error { +func (tm *TabletManager) fixSemiSync(ctx context.Context, tabletType topodatapb.TabletType, semiSync SemiSyncAction) error { switch semiSync { case SemiSyncActionNone: return nil case SemiSyncActionSet: // Always enable replica-side since it doesn't hurt to keep it on for a primary. // The primary-side needs to be off for a replica, or else it will get stuck. - return tm.MysqlDaemon.SetSemiSyncEnabled(tabletType == topodatapb.TabletType_PRIMARY, true) + return tm.MysqlDaemon.SetSemiSyncEnabled(ctx, tabletType == topodatapb.TabletType_PRIMARY, true) case SemiSyncActionUnset: - return tm.MysqlDaemon.SetSemiSyncEnabled(false, false) + return tm.MysqlDaemon.SetSemiSyncEnabled(ctx, false, false) default: return vterrors.Errorf(vtrpc.Code_INTERNAL, "Unknown SemiSyncAction - %v", semiSync) } } -func (tm *TabletManager) isPrimarySideSemiSyncEnabled() bool { - semiSyncEnabled, _ := tm.MysqlDaemon.SemiSyncEnabled() +func (tm *TabletManager) isPrimarySideSemiSyncEnabled(ctx context.Context) bool { + semiSyncEnabled, _ := tm.MysqlDaemon.SemiSyncEnabled(ctx) return semiSyncEnabled } -func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletType, semiSync SemiSyncAction) error { +func (tm *TabletManager) fixSemiSyncAndReplication(ctx context.Context, tabletType topodatapb.TabletType, semiSync SemiSyncAction) error { if semiSync == SemiSyncActionNone { // Semi-sync handling is not required. return nil @@ -901,14 +973,14 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT return nil } - if err := tm.fixSemiSync(tabletType, semiSync); err != nil { + if err := tm.fixSemiSync(ctx, tabletType, semiSync); err != nil { return vterrors.Wrapf(err, "failed to fixSemiSync(%v)", tabletType) } // If replication is running, but the status is wrong, // we should restart replication. First, let's make sure // replication is running. - status, err := tm.MysqlDaemon.ReplicationStatus() + status, err := tm.MysqlDaemon.ReplicationStatus(ctx) if err != nil { // Replication is not configured, nothing to do. return nil @@ -920,7 +992,7 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT // shouldAck := semiSync == SemiSyncActionSet shouldAck := isPrimaryEligible(tabletType) - acking, err := tm.MysqlDaemon.SemiSyncReplicationStatus() + acking, err := tm.MysqlDaemon.SemiSyncReplicationStatus(ctx) if err != nil { return vterrors.Wrap(err, "failed to get SemiSyncReplicationStatus") } @@ -930,10 +1002,10 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT // We need to restart replication log.Infof("Restarting replication for semi-sync flag change to take effect from %v to %v", acking, shouldAck) - if err := tm.MysqlDaemon.StopReplication(tm.hookExtraEnv()); err != nil { + if err := tm.MysqlDaemon.StopReplication(ctx, tm.hookExtraEnv()); err != nil { return vterrors.Wrap(err, "failed to StopReplication") } - if err := tm.MysqlDaemon.StartReplication(tm.hookExtraEnv()); err != nil { + if err := tm.MysqlDaemon.StartReplication(ctx, tm.hookExtraEnv()); err != nil { return vterrors.Wrap(err, "failed to StartReplication") } return nil @@ -943,18 +1015,29 @@ func (tm *TabletManager) fixSemiSyncAndReplication(tabletType topodatapb.TabletT // This is required because sometimes MySQL gets stuck due to improper initialization of // master info structure or related failures and throws errors like // ERROR 1201 (HY000): Could not initialize master info structure; more error messages can be found in the MySQL error log -// These errors can only be resolved by resetting the replication, otherwise START SLAVE fails. -func (tm *TabletManager) handleRelayLogError(err error) error { +// These errors can only be resolved by resetting the replication, otherwise START REPLICA fails. +func (tm *TabletManager) handleRelayLogError(ctx context.Context, err error) error { // attempt to fix this error: - // Slave failed to initialize relay log info structure from the repository (errno 1872) (sqlstate HY000) during query: START SLAVE + // Replica failed to initialize relay log info structure from the repository (errno 1872) (sqlstate HY000) during query: START REPLICA // see https://bugs.mysql.com/bug.php?id=83713 or https://github.com/vitessio/vitess/issues/5067 // The same fix also works for https://github.com/vitessio/vitess/issues/10955. - if strings.Contains(err.Error(), "Slave failed to initialize relay log info structure from the repository") || strings.Contains(err.Error(), "Could not initialize master info structure") { + if strings.Contains(err.Error(), "Replica failed to initialize relay log info structure from the repository") || + strings.Contains(err.Error(), "Could not initialize master info structure") { // Stop, reset and start replication again to resolve this error - if err := tm.MysqlDaemon.RestartReplication(tm.hookExtraEnv()); err != nil { + if err := tm.MysqlDaemon.RestartReplication(ctx, tm.hookExtraEnv()); err != nil { return err } return nil } return err } + +// waitForGrantsToHaveApplied wait for the grants to have applied for. +func (tm *TabletManager) waitForGrantsToHaveApplied(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-tm._waitForGrantsComplete: + } + return nil +} diff --git a/go/vt/vttablet/tabletmanager/rpc_replication_test.go b/go/vt/vttablet/tabletmanager/rpc_replication_test.go new file mode 100644 index 00000000000..c587f1e24b8 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/rpc_replication_test.go @@ -0,0 +1,44 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletmanager + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestWaitForGrantsToHaveApplied tests that waitForGrantsToHaveApplied only succeeds after waitForDBAGrants has been called. +func TestWaitForGrantsToHaveApplied(t *testing.T) { + tm := &TabletManager{ + _waitForGrantsComplete: make(chan struct{}), + } + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + err := tm.waitForGrantsToHaveApplied(ctx) + require.ErrorContains(t, err, "deadline exceeded") + + err = tm.waitForDBAGrants(nil, 0) + require.NoError(t, err) + + secondContext, secondCancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer secondCancel() + err = tm.waitForGrantsToHaveApplied(secondContext) + require.NoError(t, err) +} diff --git a/go/vt/vttablet/tabletmanager/rpc_server.go b/go/vt/vttablet/tabletmanager/rpc_server.go index da4d4e0b042..78beed43dad 100644 --- a/go/vt/vttablet/tabletmanager/rpc_server.go +++ b/go/vt/vttablet/tabletmanager/rpc_server.go @@ -70,7 +70,7 @@ func (tm *TabletManager) HandleRPCPanic(ctx context.Context, name string, args, if *err != nil { // error case log.Warningf("TabletManager.%v(%v)(on %v from %v) error: %v", name, args, topoproto.TabletAliasString(tm.tabletAlias), from, (*err).Error()) - *err = vterrors.Wrapf(*err, "TabletManager.%v on %v error: %v", name, topoproto.TabletAliasString(tm.tabletAlias), (*err).Error()) + *err = vterrors.Wrapf(*err, "TabletManager.%v on %v", name, topoproto.TabletAliasString(tm.tabletAlias)) } else { // success case log.Infof("TabletManager.%v(%v)(on %v from %v): %#v", name, args, topoproto.TabletAliasString(tm.tabletAlias), from, reply) diff --git a/go/vt/vttablet/tabletmanager/rpc_throttler.go b/go/vt/vttablet/tabletmanager/rpc_throttler.go index dfdc0d230fb..c961761c5f2 100644 --- a/go/vt/vttablet/tabletmanager/rpc_throttler.go +++ b/go/vt/vttablet/tabletmanager/rpc_throttler.go @@ -19,6 +19,7 @@ package tabletmanager import ( "context" + "vitess.io/vitess/go/stats" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -28,6 +29,7 @@ import ( // CheckThrottler executes a throttler check func (tm *TabletManager) CheckThrottler(ctx context.Context, req *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + go stats.GetOrNewCounter("ThrottlerCheckRequest", "CheckThrottler requests").Add(1) if req.AppName == "" { req.AppName = throttlerapp.VitessName.String() } diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index b18caa1063f..a274da98fdf 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -18,11 +18,14 @@ package tabletmanager import ( "context" + "fmt" "strings" + "golang.org/x/exp/maps" "google.golang.org/protobuf/encoding/prototext" "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/discovery" @@ -40,15 +43,27 @@ import ( const ( // Create a new VReplication workflow record. - sqlCreateVReplicationWorkflow = "insert into %s.vreplication (workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values (%a, %a, '', 0, 0, %a, %a, now(), 0, %a, %a, %a, %a, %a)" + sqlCreateVReplicationWorkflow = "insert into %s.vreplication (workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options) values (%a, %a, '', 0, 0, %a, %a, now(), 0, %a, %a, %a, %a, %a, %a)" + sqlHasVReplicationWorkflows = "select if(count(*) > 0, 1, 0) as has_workflows from %s.vreplication where db_name = %a" + // Read all VReplication workflows. The final format specifier is used to + // optionally add any additional predicates to the query. + sqlReadVReplicationWorkflows = "select workflow, id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys, options from %s.vreplication where db_name = %a%s group by workflow, id order by workflow, id" // Read a VReplication workflow. - sqlReadVReplicationWorkflow = "select id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys from %s.vreplication where workflow = %a and db_name = %a" + sqlReadVReplicationWorkflow = "select id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys, options from %s.vreplication where workflow = %a and db_name = %a" // Delete VReplication records for the given workflow. sqlDeleteVReplicationWorkflow = "delete from %s.vreplication where workflow = %a and db_name = %a" // Retrieve the current configuration values for a workflow's vreplication stream(s). sqlSelectVReplicationWorkflowConfig = "select id, source, cell, tablet_types, state, message from %s.vreplication where workflow = %a" // Update the configuration values for a workflow's vreplication stream. sqlUpdateVReplicationWorkflowStreamConfig = "update %s.vreplication set state = %a, source = %a, cell = %a, tablet_types = %a where id = %a" + // Update field values for multiple workflows. The final format specifier is + // used to optionally add any additional predicates to the query. + sqlUpdateVReplicationWorkflows = "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ %s.vreplication set%s where db_name = '%s'%s" +) + +var ( + errNoFieldsToUpdate = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "no field values provided to update") + errAllWithIncludeExcludeWorkflows = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "cannot specify all workflows along with either of include or exclude workflows") ) func (tm *TabletManager) CreateVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) { @@ -57,13 +72,14 @@ func (tm *TabletManager) CreateVReplicationWorkflow(ctx context.Context, req *ta } res := &sqltypes.Result{} for _, bls := range req.BinlogSource { + protoutil.SortBinlogSourceTables(bls) source, err := prototext.Marshal(bls) if err != nil { return nil, err } // Use the local cell if none are specified. if len(req.Cells) == 0 || strings.TrimSpace(req.Cells[0]) == "" { - req.Cells = append(req.Cells, tm.Tablet().Alias.Cell) + req.Cells = []string{tm.Tablet().Alias.Cell} } wfState := binlogdatapb.VReplicationWorkflowState_Stopped.String() tabletTypesStr := topoproto.MakeStringTypeCSV(req.TabletTypes) @@ -80,9 +96,11 @@ func (tm *TabletManager) CreateVReplicationWorkflow(ctx context.Context, req *ta "workflowType": sqltypes.Int64BindVariable(int64(req.WorkflowType)), "workflowSubType": sqltypes.Int64BindVariable(int64(req.WorkflowSubType)), "deferSecondaryKeys": sqltypes.BoolBindVariable(req.DeferSecondaryKeys), + "options": sqltypes.StringBindVariable(req.Options), } parsed := sqlparser.BuildParsedQuery(sqlCreateVReplicationWorkflow, sidecar.GetIdentifier(), - ":workflow", ":source", ":cells", ":tabletTypes", ":state", ":dbname", ":workflowType", ":workflowSubType", ":deferSecondaryKeys", + ":workflow", ":source", ":cells", ":tabletTypes", ":state", ":dbname", ":workflowType", ":workflowSubType", + ":deferSecondaryKeys", ":options", ) stmt, err := parsed.GenerateQuery(bindVars, nil) if err != nil { @@ -122,6 +140,140 @@ func (tm *TabletManager) DeleteVReplicationWorkflow(ctx context.Context, req *ta return &tabletmanagerdatapb.DeleteVReplicationWorkflowResponse{Result: sqltypes.ResultToProto3(res)}, nil } +func (tm *TabletManager) HasVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) { + bindVars := map[string]*querypb.BindVariable{ + "db": sqltypes.StringBindVariable(tm.DBConfigs.DBName), + } + parsed := sqlparser.BuildParsedQuery(sqlHasVReplicationWorkflows, sidecar.GetIdentifier(), ":db") + stmt, err := parsed.GenerateQuery(bindVars, nil) + if err != nil { + return nil, err + } + res, err := tm.VREngine.Exec(stmt) + if err != nil { + return nil, err + } + // This should never occur. Let the caller decide how to treat it. + if res == nil || len(res.Rows) == 0 { + return nil, nil + } + if len(res.Rows) != 1 || len(res.Rows[0]) != 1 { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected response to query %q: expected 1 row with 1 column but got %d row(s) with %d column(s)", + parsed.Query, len(res.Rows), len(res.Rows[0])) + } + has, err := res.Rows[0][0].ToBool() + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected response to query %q: could not convert %q to boolean", + parsed.Query, res.Rows[0][0].ToString()) + } + + return &tabletmanagerdatapb.HasVReplicationWorkflowsResponse{Has: has}, nil +} + +func (tm *TabletManager) ReadVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { + query, err := tm.buildReadVReplicationWorkflowsQuery(req) + if err != nil { + return nil, err + } + res, err := tm.VREngine.Exec(query) + if err != nil { + return nil, err + } + resp := &tabletmanagerdatapb.ReadVReplicationWorkflowsResponse{} + if res == nil || len(res.Rows) == 0 { + return resp, nil + } + rows := res.Named().Rows + workflows := make(map[string]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, len(rows)) + + for _, row := range rows { + workflow := row["workflow"].ToString() + if workflows[workflow] == nil { + workflows[workflow] = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{Workflow: workflow} + } + workflows[workflow].Cells = row["cell"].ToString() + tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(row["tablet_types"].ToString()) + if err != nil { + return nil, vterrors.Wrap(err, "error parsing the tablet_types field from vreplication table record") + } + workflows[workflow].TabletTypes = tabletTypes + workflows[workflow].TabletSelectionPreference = tabletmanagerdatapb.TabletSelectionPreference_ANY + if inorder { + workflows[workflow].TabletSelectionPreference = tabletmanagerdatapb.TabletSelectionPreference_INORDER + } + workflows[workflow].DbName = row["db_name"].ToString() + workflows[workflow].Tags = row["tags"].ToString() + wft, err := row["workflow_type"].ToInt32() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing workflow_type field from vreplication table record") + } + workflows[workflow].WorkflowType = binlogdatapb.VReplicationWorkflowType(wft) + wfst, err := row["workflow_sub_type"].ToInt32() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing workflow_sub_type field from vreplication table record") + } + workflows[workflow].WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType(wfst) + workflows[workflow].DeferSecondaryKeys = row["defer_secondary_keys"].ToString() == "1" + workflows[workflow].Options = row["options"].ToString() + // Now the individual streams (there can be more than 1 with shard merges). + if workflows[workflow].Streams == nil { + workflows[workflow].Streams = make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, 0, 1) + } + stream := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{} + if stream.Id, err = row["id"].ToInt32(); err != nil { + return nil, vterrors.Wrap(err, "error parsing id field from vreplication table record") + } + srcBytes, err := row["source"].ToBytes() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing binlog_source field from vreplication table record") + } + bls := &binlogdatapb.BinlogSource{} + err = prototext.Unmarshal(srcBytes, bls) + if err != nil { + return nil, vterrors.Wrap(err, "error unmarshaling binlog_source field from vreplication table record") + } + stream.Bls = bls + stream.Pos = row["pos"].ToString() + stream.StopPos = row["stop_pos"].ToString() + if stream.MaxTps, err = row["max_tps"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing max_tps field from vreplication table record") + } + if stream.MaxReplicationLag, err = row["max_replication_lag"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing max_replication_lag field from vreplication table record") + } + timeUpdated, err := row["time_updated"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_updated field from vreplication table record") + } + stream.TimeUpdated = &vttime.Time{Seconds: timeUpdated} + txTimestamp, err := row["transaction_timestamp"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing transaction_timestamp field from vreplication table record") + } + stream.TransactionTimestamp = &vttime.Time{Seconds: txTimestamp} + stream.State = binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[row["state"].ToString()]) + stream.Message = row["message"].ToString() + if stream.RowsCopied, err = row["rows_copied"].ToInt64(); err != nil { + return nil, vterrors.Wrap(err, "error parsing rows_copied field from vreplication table record") + } + timeHeartbeat, err := row["time_heartbeat"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_heartbeat field from vreplication table record") + } + stream.TimeHeartbeat = &vttime.Time{Seconds: timeHeartbeat} + timeThrottled, err := row["time_throttled"].ToInt64() + if err != nil { + return nil, vterrors.Wrap(err, "error parsing time_throttled field from vreplication table record") + } + stream.TimeThrottled = &vttime.Time{Seconds: timeThrottled} + stream.ComponentThrottled = row["component_throttled"].ToString() + workflows[workflow].Streams = append(workflows[workflow].Streams, stream) + } + resp.Workflows = maps.Values(workflows) + + return resp, nil +} + func (tm *TabletManager) ReadVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { if req == nil || req.Workflow == "" { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid request, no workflow provided") @@ -170,7 +322,7 @@ func (tm *TabletManager) ReadVReplicationWorkflow(ctx context.Context, req *tabl } resp.WorkflowSubType = binlogdatapb.VReplicationWorkflowSubType(wfst) resp.DeferSecondaryKeys = rows[0]["defer_secondary_keys"].ToString() == "1" - + resp.Options = rows[0]["options"].ToString() // Now the individual streams (there can be more than 1 with shard merges). for i, row := range rows { streams[i] = &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{} @@ -257,9 +409,13 @@ func (tm *TabletManager) UpdateVReplicationWorkflow(ctx context.Context, req *ta return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{Result: nil}, nil } + rowsAffected := uint64(0) for _, row := range res.Named().Rows { id := row.AsInt64("id", 0) cells := strings.Split(row.AsString("cell", ""), ",") + for i := range cells { + cells[i] = strings.TrimSpace(cells[i]) + } tabletTypes, inorder, err := discovery.ParseTabletTypesAndOrder(row.AsString("tablet_types", "")) if err != nil { return nil, err @@ -282,8 +438,8 @@ func (tm *TabletManager) UpdateVReplicationWorkflow(ctx context.Context, req *ta tabletTypes = req.TabletTypes } tabletTypesStr := topoproto.MakeStringTypeCSV(tabletTypes) - if inorder && req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN || - req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER { + if (inorder && req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN) || + (req.TabletSelectionPreference == tabletmanagerdatapb.TabletSelectionPreference_INORDER) { tabletTypesStr = discovery.InOrderHint + tabletTypesStr } if err = prototext.Unmarshal(source, bls); err != nil { @@ -317,19 +473,43 @@ func (tm *TabletManager) UpdateVReplicationWorkflow(ctx context.Context, req *ta if err != nil { return nil, err } + rowsAffected += res.RowsAffected } return &tabletmanagerdatapb.UpdateVReplicationWorkflowResponse{ Result: &querypb.QueryResult{ - RowsAffected: uint64(len(res.Rows)), + RowsAffected: rowsAffected, + }, + }, nil +} + +// UpdateVReplicationWorkflows operates in much the same way that +// UpdateVReplicationWorkflow does, but it allows you to update the +// metadata/flow control fields -- state, message, and stop_pos -- for +// multiple workflows. +// Note: today this is only used during Reshard as all of the vreplication +// streams need to be migrated from the old shards to the new ones. +func (tm *TabletManager) UpdateVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) { + query, err := tm.buildUpdateVReplicationWorkflowsQuery(req) + if err != nil { + return nil, err + } + res, err := tm.VREngine.Exec(query) + if err != nil { + return nil, err + } + + return &tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse{ + Result: &querypb.QueryResult{ + RowsAffected: res.RowsAffected, }, }, nil } // VReplicationExec executes a vreplication command. func (tm *TabletManager) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) { - // Replace any provided sidecar databsae qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + // Replace any provided sidecar database qualifiers with the correct one. + uq, err := tm.Env.Parser().ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -344,3 +524,131 @@ func (tm *TabletManager) VReplicationExec(ctx context.Context, query string) (*q func (tm *TabletManager) VReplicationWaitForPos(ctx context.Context, id int32, pos string) error { return tm.VREngine.WaitForPos(ctx, id, pos) } + +// buildReadVReplicationWorkflowsQuery builds the SQL query used to read N +// vreplication workflows based on the request. +func (tm *TabletManager) buildReadVReplicationWorkflowsQuery(req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (string, error) { + bindVars := map[string]*querypb.BindVariable{ + "db": sqltypes.StringBindVariable(tm.DBConfigs.DBName), + } + + additionalPredicates := strings.Builder{} + if req.GetExcludeFrozen() { + additionalPredicates.WriteString(fmt.Sprintf(" and message != '%s'", workflow.Frozen)) + } + if len(req.GetIncludeIds()) > 0 { + additionalPredicates.WriteString(" and id in (") + for i, id := range req.GetIncludeIds() { + if i > 0 { + additionalPredicates.WriteByte(',') + } + additionalPredicates.WriteString(fmt.Sprintf("%d", id)) + } + additionalPredicates.WriteByte(')') + } + if len(req.GetIncludeWorkflows()) > 0 { + additionalPredicates.WriteString(" and workflow in (") + for i, wf := range req.GetIncludeWorkflows() { + if i > 0 { + additionalPredicates.WriteByte(',') + } + additionalPredicates.WriteString(sqltypes.EncodeStringSQL(wf)) + } + additionalPredicates.WriteByte(')') + } + if len(req.GetExcludeWorkflows()) > 0 { + additionalPredicates.WriteString(" and workflow not in (") + for i, wf := range req.GetExcludeWorkflows() { + if i > 0 { + additionalPredicates.WriteByte(',') + } + additionalPredicates.WriteString(sqltypes.EncodeStringSQL(wf)) + } + additionalPredicates.WriteByte(')') + } + if len(req.GetIncludeStates()) > 0 { + additionalPredicates.WriteString(" and state in (") + for i, state := range req.GetIncludeStates() { + if i > 0 { + additionalPredicates.WriteByte(',') + } + additionalPredicates.WriteString(sqltypes.EncodeStringSQL(state.String())) + } + additionalPredicates.WriteByte(')') + } + if len(req.GetExcludeStates()) > 0 { + additionalPredicates.WriteString(" and state not in (") + for i, state := range req.GetExcludeStates() { + if i > 0 { + additionalPredicates.WriteByte(',') + } + additionalPredicates.WriteString(sqltypes.EncodeStringSQL(state.String())) + } + additionalPredicates.WriteByte(')') + } + + parsed := sqlparser.BuildParsedQuery(sqlReadVReplicationWorkflows, sidecar.GetIdentifier(), ":db", additionalPredicates.String()) + return parsed.GenerateQuery(bindVars, nil) +} + +// buildUpdateVReplicationWorkflowsQuery builds the SQL query used to update +// the metadata/flow control fields for N vreplication workflows based on the +// request. +func (tm *TabletManager) buildUpdateVReplicationWorkflowsQuery(req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (string, error) { + if req.GetAllWorkflows() && (len(req.GetIncludeWorkflows()) > 0 || len(req.GetExcludeWorkflows()) > 0) { + return "", errAllWithIncludeExcludeWorkflows + } + if textutil.ValueIsSimulatedNull(req.GetState()) && textutil.ValueIsSimulatedNull(req.GetMessage()) && textutil.ValueIsSimulatedNull(req.GetStopPosition()) { + return "", errNoFieldsToUpdate + } + sets := strings.Builder{} + predicates := strings.Builder{} + + // First add the SET clauses. + if !textutil.ValueIsSimulatedNull(req.GetState()) { + state, ok := binlogdatapb.VReplicationWorkflowState_name[int32(req.GetState())] + if !ok { + return "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid state value: %v", req.GetState()) + } + sets.WriteString(" state = ") + sets.WriteString(sqltypes.EncodeStringSQL(state)) + } + if !textutil.ValueIsSimulatedNull(req.GetMessage()) { + if sets.Len() > 0 { + sets.WriteByte(',') + } + sets.WriteString(" message = ") + sets.WriteString(sqltypes.EncodeStringSQL(req.GetMessage())) + } + if !textutil.ValueIsSimulatedNull(req.GetStopPosition()) { + if sets.Len() > 0 { + sets.WriteByte(',') + } + sets.WriteString(" stop_pos = ") + sets.WriteString(sqltypes.EncodeStringSQL(req.GetStopPosition())) + } + + // Now add any WHERE predicate clauses. + if len(req.GetIncludeWorkflows()) > 0 { + predicates.WriteString(" and workflow in (") + for i, wf := range req.GetIncludeWorkflows() { + if i > 0 { + predicates.WriteByte(',') + } + predicates.WriteString(sqltypes.EncodeStringSQL(wf)) + } + predicates.WriteByte(')') + } + if len(req.GetExcludeWorkflows()) > 0 { + predicates.WriteString(" and workflow not in (") + for i, wf := range req.GetExcludeWorkflows() { + if i > 0 { + predicates.WriteByte(',') + } + predicates.WriteString(sqltypes.EncodeStringSQL(wf)) + } + predicates.WriteByte(')') + } + + return sqlparser.BuildParsedQuery(sqlUpdateVReplicationWorkflows, sidecar.GetIdentifier(), sets.String(), tm.DBConfigs.DBName, predicates.String()).Query, nil +} diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go index a471750da19..42e5129b40e 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -21,11 +21,11 @@ import ( "errors" "fmt" "math" + "reflect" "runtime/debug" "strings" "testing" - - "vitess.io/vitess/go/vt/vttablet" + "time" "github.com/stretchr/testify/require" @@ -33,10 +33,16 @@ import ( "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/workflow" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" + "vitess.io/vitess/go/vt/vttablet" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" @@ -44,34 +50,35 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" - "vitess.io/vitess/go/vt/proto/vttime" + vttimepb "vitess.io/vitess/go/vt/proto/vttime" ) const ( - insertVReplicationPrefix = "insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys)" - getWorkflow = "select id from _vt.vreplication where db_name='vt_%s' and workflow='%s'" - checkForWorkflow = "select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'" - checkForFrozenWorkflow = "select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1" - freezeWorkflow = "update _vt.vreplication set message = 'FROZEN' where db_name='vt_%s' and workflow='%s'" + insertVReplicationPrefix = "insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options)" checkForJournal = "/select val from _vt.resharding_journal where id=" - getWorkflowStatus = "select id, workflow, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, message, tags, workflow_type, workflow_sub_type, time_heartbeat, defer_secondary_keys, component_throttled, time_throttled, rows_copied from _vt.vreplication where workflow = '%s' and db_name = 'vt_%s'" - getWorkflowState = "select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=1" + getWorkflowState = "select pos, stop_pos, max_tps, max_replication_lag, state, workflow_type, workflow, workflow_sub_type, defer_secondary_keys from _vt.vreplication where id=%d" getCopyState = "select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1" - getNumCopyStateTable = "select count(distinct table_name) from _vt.copy_state where vrepl_id=1" - getLatestCopyState = "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)" + getNumCopyStateTable = "select count(distinct table_name) from _vt.copy_state where vrepl_id=%d" + getLatestCopyState = "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%d) and id in (select max(id) from _vt.copy_state where vrepl_id in (%d) group by vrepl_id, table_name)" getAutoIncrementStep = "select @@session.auto_increment_increment" setSessionTZ = "set @@session.time_zone = '+00:00'" setNames = "set names 'binary'" getBinlogRowImage = "select @@binlog_row_image" insertStreamsCreatedLog = "insert into _vt.vreplication_log(vrepl_id, type, state, message) values(1, 'Stream Created', '', '%s'" - getVReplicationRecord = "select * from _vt.vreplication where id = 1" + getVReplicationRecord = "select * from _vt.vreplication where id = %d" startWorkflow = "update _vt.vreplication set state='Running' where db_name='vt_%s' and workflow='%s'" stopForCutover = "update _vt.vreplication set state='Stopped', message='stopped for cutover' where id=1" getMaxValForSequence = "select max(`id`) as maxval from `vt_%s`.`%s`" initSequenceTable = "insert into %a.%a (id, next_id, cache) values (0, %d, 1000) on duplicate key update next_id = if(next_id < %d, %d, next_id)" deleteWorkflow = "delete from _vt.vreplication where db_name = 'vt_%s' and workflow = '%s'" - updatePickedSourceTablet = `update _vt.vreplication set message='Picked source tablet: cell:\"%s\" uid:%d' where id=1` - getRowsCopied = "SELECT rows_copied FROM _vt.vreplication WHERE id=1" + updatePickedSourceTablet = `update _vt.vreplication set message='Picked source tablet: cell:\"%s\" uid:%d' where id=%d` + getRowsCopied = "SELECT rows_copied FROM _vt.vreplication WHERE id=%d" + hasWorkflows = "select if(count(*) > 0, 1, 0) as has_workflows from _vt.vreplication where db_name = '%s'" + readAllWorkflows = "select workflow, id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys, options from _vt.vreplication where db_name = '%s'%s group by workflow, id order by workflow, id" + readWorkflowsLimited = "select workflow, id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys, options from _vt.vreplication where db_name = '%s' and workflow in ('%s') group by workflow, id order by workflow, id" + readWorkflow = "select id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys, options from _vt.vreplication where workflow = '%s' and db_name = '%s'" + readWorkflowConfig = "select id, source, cell, tablet_types, state, message from _vt.vreplication where workflow = '%s'" + updateWorkflow = "update _vt.vreplication set state = '%s', source = '%s', cell = '%s', tablet_types = '%s' where id in (%d)" ) var ( @@ -111,7 +118,7 @@ func TestCreateVReplicationWorkflow(t *testing.T) { targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) defer tenv.deleteTablet(targetTablet.tablet) - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(vtenv.NewTestEnv(), tenv.ts, tenv.tmc) tests := []struct { name string @@ -128,7 +135,7 @@ func TestCreateVReplicationWorkflow(t *testing.T) { Cells: tenv.cells, AllTables: true, }, - query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}}', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}}', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 0, '{}')`, insertVReplicationPrefix, wf, sourceKs, shard, tenv.cells[0], tenv.dbName), }, { @@ -163,15 +170,91 @@ func TestCreateVReplicationWorkflow(t *testing.T) { DeferSecondaryKeys: true, AutoStart: true, }, - query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}} on_ddl:EXEC stop_after_copy:true source_time_zone:\"EDT\" target_time_zone:\"UTC\"', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 1)`, + query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"}} on_ddl:EXEC stop_after_copy:true source_time_zone:\"EDT\" target_time_zone:\"UTC\"', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 1, '{}')`, + insertVReplicationPrefix, wf, sourceKs, shard, tenv.cells[0], tenv.dbName), + }, + { + name: "binlog source order with include", + schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "zt", + Columns: []string{"id"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id", "int64"), + }, + { + Name: "t1", + Columns: []string{"id", "c2"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id|c2", "int64|int64"), + }, + { + Name: "wut", + Columns: []string{"id"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id", "int64"), + }, + }, + }, + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + IncludeTables: []string{"zt", "wut", "t1"}, + SourceTimeZone: "EDT", + OnDdl: binlogdatapb.OnDDLAction_EXEC.String(), + StopAfterCopy: true, + DropForeignKeys: true, + DeferSecondaryKeys: true, + AutoStart: true, + }, + query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"} rules:{match:\"wut\" filter:\"select * from wut\"} rules:{match:\"zt\" filter:\"select * from zt\"}} on_ddl:EXEC stop_after_copy:true source_time_zone:\"EDT\" target_time_zone:\"UTC\"', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 1, '{}')`, + insertVReplicationPrefix, wf, sourceKs, shard, tenv.cells[0], tenv.dbName), + }, + { + name: "binlog source order with all-tables", + schema: &tabletmanagerdatapb.SchemaDefinition{ + TableDefinitions: []*tabletmanagerdatapb.TableDefinition{ + { + Name: "zt", + Columns: []string{"id"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id", "int64"), + }, + { + Name: "t1", + Columns: []string{"id", "c2"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id|c2", "int64|int64"), + }, + { + Name: "wut", + Columns: []string{"id"}, + PrimaryKeyColumns: []string{"id"}, + Fields: sqltypes.MakeTestFields("id", "int64"), + }, + }, + }, + req: &vtctldatapb.MoveTablesCreateRequest{ + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + Workflow: wf, + Cells: tenv.cells, + AllTables: true, + SourceTimeZone: "EDT", + OnDdl: binlogdatapb.OnDDLAction_EXEC.String(), + StopAfterCopy: true, + DropForeignKeys: true, + DeferSecondaryKeys: true, + AutoStart: true, + }, + query: fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"} rules:{match:\"wut\" filter:\"select * from wut\"} rules:{match:\"zt\" filter:\"select * from zt\"}} on_ddl:EXEC stop_after_copy:true source_time_zone:\"EDT\" target_time_zone:\"UTC\"', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 1, '{}')`, insertVReplicationPrefix, wf, sourceKs, shard, tenv.cells[0], tenv.dbName), }, } - tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and workflow='%s'", - targetKs, wf), &sqltypes.Result{}) - tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1", - targetKs), &sqltypes.Result{}) tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, "select val from _vt.resharding_journal where id=7224776740563431192", &sqltypes.Result{}) for _, tt := range tests { @@ -192,11 +275,13 @@ func TestCreateVReplicationWorkflow(t *testing.T) { } tenv.tmc.SetSchema(tt.schema) - tenv.tmc.tablets[targetTabletUID].vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) // This is our expected query, which will also short circuit // the test with an error as at this point we've tested what // we wanted to test. - tenv.tmc.tablets[targetTabletUID].vrdbClient.ExpectRequest(tt.query, nil, errShortCircuit) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(tt.query, &sqltypes.Result{}, errShortCircuit) _, err := ws.MoveTablesCreate(ctx, tt.req) tenv.tmc.tablets[targetTabletUID].vrdbClient.Wait() require.ErrorIs(t, err, errShortCircuit) @@ -204,10 +289,11 @@ func TestCreateVReplicationWorkflow(t *testing.T) { } } -// TestMoveTables tests the query generated from a VtctldServer -// MoveTablesCreate request to ensure that the VReplication -// stream(s) are created correctly. Followed by ensuring that -// SwitchTraffic and ReverseTraffic work as expected. +// TestMoveTables tests the query sequence originating from a +// VtctldServer MoveTablesCreate request to ensure that the +// VReplication stream(s) are created correctly and expected +// results returned. Followed by ensuring that SwitchTraffic +// and ReverseTraffic also work as expected. func TestMoveTables(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -219,6 +305,7 @@ func TestMoveTables(t *testing.T) { globalKs := "global" globalShard := "0" wf := "testwf" + vreplID := 1 tabletTypes := []topodatapb.TabletType{ topodatapb.TabletType_PRIMARY, topodatapb.TabletType_REPLICA, @@ -239,7 +326,7 @@ func TestMoveTables(t *testing.T) { globalTablet := tenv.addTablet(t, 500, globalKs, globalShard) defer tenv.deleteTablet(globalTablet.tablet) - tenv.ts.SaveVSchema(ctx, globalKs, &vschemapb.Keyspace{ + err := tenv.ts.SaveVSchema(ctx, globalKs, &vschemapb.Keyspace{ Sharded: false, Tables: map[string]*vschemapb.Table{ "t1_seq": { @@ -247,7 +334,8 @@ func TestMoveTables(t *testing.T) { }, }, }) - tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + require.NoError(t, err) + err = tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "hash": { @@ -267,8 +355,20 @@ func TestMoveTables(t *testing.T) { }, }, }) + require.NoError(t, err) - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(vtenv.NewTestEnv(), tenv.ts, tenv.tmc) + + idQuery, err := sqlparser.ParseAndBind("select id from _vt.vreplication where id = %a", + sqltypes.Int64BindVariable(int64(vreplID))) + require.NoError(t, err) + idRes := sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + fmt.Sprintf("%d", vreplID), + ) tenv.mysqld.Schema = defaultSchema tenv.mysqld.Schema.DatabaseSchema = tenv.dbName @@ -289,118 +389,68 @@ func TestMoveTables(t *testing.T) { tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, checkForJournal, &sqltypes.Result{}) for _, ftc := range targetShards { - tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(checkForWorkflow, targetKs, wf), &sqltypes.Result{}) - tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(checkForFrozenWorkflow, targetKs), &sqltypes.Result{}) - tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "id", - "int64", - ), - "1", - ), - ) - tenv.tmc.setVReplicationExecResults(ftc.tablet, getCopyState, &sqltypes.Result{}) - tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflowStatus, wf, targetKs), - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type|time_heartbeat|defer_secondary_keys|component_throttled|time_throttled|rows_copied", - "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|int64|int64", - ), - fmt.Sprintf("1|%s|%s|%s|NULL|0|running|vt_%s|1686577659|0|||1|0|0|0||0|10", wf, bls, position, targetKs), - ), - ) - tenv.tmc.setVReplicationExecResults(ftc.tablet, getLatestCopyState, &sqltypes.Result{}) + addInvariants(ftc.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) - ftc.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - insert := fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(id, \'%s.hash\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,replica,rdonly', now(), 0, 'Stopped', '%s', 1, 0, 0)`, - insertVReplicationPrefix, wf, sourceKs, sourceShard, targetKs, ftc.tablet.Shard, tenv.cells[0], tenv.dbName) + tenv.tmc.setVReplicationExecResults(ftc.tablet, getCopyState, &sqltypes.Result{}) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + insert := fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(id, \'%s.hash\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,replica,rdonly', now(), 0, 'Stopped', '%s', %d, 0, 0, '{}')`, + insertVReplicationPrefix, wf, sourceKs, sourceShard, targetKs, ftc.tablet.Shard, tenv.cells[0], tenv.dbName, vreplID) ftc.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: 1}, nil) ftc.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) - ftc.vrdbClient.ExpectRequest(getVReplicationRecord, + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), sqltypes.MakeTestResult( sqltypes.MakeTestFields( "id|source", "int64|varchar", ), - fmt.Sprintf("1|%s", bls), + fmt.Sprintf("%d|%s", vreplID, bls), ), nil) - ftc.vrdbClient.ExpectRequest(fmt.Sprintf(updatePickedSourceTablet, tenv.cells[0], sourceTabletUID), &sqltypes.Result{}, nil) - ftc.vrdbClient.ExpectRequest(setSessionTZ, &sqltypes.Result{}, nil) - ftc.vrdbClient.ExpectRequest(setNames, &sqltypes.Result{}, nil) - ftc.vrdbClient.ExpectRequest(setNetReadTimeout, &sqltypes.Result{}, nil) - ftc.vrdbClient.ExpectRequest(setNetWriteTimeout, &sqltypes.Result{}, nil) - ftc.vrdbClient.ExpectRequest(getRowsCopied, - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "rows_copied", - "int64", - ), - "0", - ), - nil, - ) - ftc.vrdbClient.ExpectRequest(getWorkflowState, sqltypes.MakeTestResult( + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, wf, tenv.dbName), sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", - "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + "id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", ), - fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + fmt.Sprintf("%d|%s|%s|NULL|0|0|||1686577659|0|Stopped||%s|1||0|0|0||0|1", vreplID, bls, position, targetKs), ), nil) - ftc.vrdbClient.ExpectRequest(getNumCopyStateTable, sqltypes.MakeTestResult( + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflowConfig, wf), sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "count(distinct table_name)", - "int64", + "id|source|cell|tablet_types|state|message", + "int64|blob|varchar|varchar|varchar|varchar", ), - "1", + fmt.Sprintf("%d|%s|||Stopped|", vreplID, bls), ), nil) - ftc.vrdbClient.ExpectRequest(getWorkflowState, sqltypes.MakeTestResult( + ftc.vrdbClient.ExpectRequest(idQuery, idRes, nil) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(updateWorkflow, binlogdatapb.VReplicationWorkflowState_Running.String(), strings.Replace(bls, `"`, `\"`, -1), "", "", vreplID), &sqltypes.Result{}, nil) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("%d|%s", vreplID, bls), + ), nil) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, wf, tenv.dbName), sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", - "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + "id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", ), - fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), + fmt.Sprintf("%d|%s|%s|NULL|0|0|||1686577659|0|Running||%s|1||0|0|0||0|1", vreplID, bls, position, targetKs), ), nil) - ftc.vrdbClient.ExpectRequest(getNumCopyStateTable, sqltypes.MakeTestResult( + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflowsLimited, tenv.dbName, wf), sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "count(distinct table_name)", - "int64", + "workflow|id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "workflow|int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", ), - "1", + fmt.Sprintf("%s|%d|%s|%s|NULL|0|0|||1686577659|0|Running||%s|1||0|0|0||0|1", wf, vreplID, bls, position, targetKs), ), nil) - ftc.vrdbClient.ExpectRequest(getBinlogRowImage, sqltypes.MakeTestResult( + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, wf, tenv.dbName), sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "@@binlog_row_image", - "varchar", + "id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", ), - "FULL", + fmt.Sprintf("%d|%s|%s|NULL|0|0|||1686577659|0|Running||%s|1||0|0|0||0|1", vreplID, bls, position, targetKs), ), nil) - - ftc.vrdbClient.ExpectRequest(fmt.Sprintf(insertStreamsCreatedLog, bls), &sqltypes.Result{}, nil) - tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "id", - "int64", - ), - "1", - ), - ) - tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(startWorkflow, targetKs, wf), &sqltypes.Result{}) - ftc.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) - - tenv.tmc.setVReplicationExecResults(ftc.tablet, stopForCutover, &sqltypes.Result{}) - tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(freezeWorkflow, targetKs, wf), &sqltypes.Result{}) - - tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getMaxValForSequence, targetKs, "t1"), - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "maxval", - "int64", - ), - fmt.Sprintf("%d", ftc.tablet.Alias.Uid), // Use the tablet's UID as the max value - ), - ) + tenv.tmc.setVReplicationExecResults(ftc.tablet, fmt.Sprintf(getLatestCopyState, vreplID, vreplID), &sqltypes.Result{}) } // We use the tablet's UID in the mocked results for the max value used on each target shard. @@ -410,7 +460,7 @@ func TestMoveTables(t *testing.T) { &sqltypes.Result{RowsAffected: 0}, ) - _, err := ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ + _, err = ws.MoveTablesCreate(ctx, &vtctldatapb.MoveTablesCreateRequest{ SourceKeyspace: sourceKs, TargetKeyspace: targetKs, Workflow: wf, @@ -421,32 +471,65 @@ func TestMoveTables(t *testing.T) { }) require.NoError(t, err) + for _, ftc := range targetShards { + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflowsLimited, tenv.dbName, wf), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "workflow|id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "workflow|int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", + ), + fmt.Sprintf("%s|%d|%s|%s|NULL|0|0|||1686577659|0|Running||%s|1||0|0|0||0|1", wf, vreplID, bls, position, targetKs), + ), nil) + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, wf, tenv.dbName), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", + ), + fmt.Sprintf("%d|%s|%s|NULL|0|0|||1686577659|0|Running||%s|1||0|0|0||0|1", vreplID, bls, position, targetKs), + ), nil) + } + _, err = ws.WorkflowSwitchTraffic(ctx, &vtctldatapb.WorkflowSwitchTrafficRequest{ Keyspace: targetKs, Workflow: wf, Cells: tenv.cells, - MaxReplicationLagAllowed: &vttime.Duration{Seconds: 922337203}, + MaxReplicationLagAllowed: &vttimepb.Duration{Seconds: 922337203}, EnableReverseReplication: true, InitializeTargetSequences: true, Direction: int32(workflow.DirectionForward), }) require.NoError(t, err) - tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, fmt.Sprintf(getWorkflowStatus, workflow.ReverseWorkflowName(wf), sourceKs), - sqltypes.MakeTestResult( + for _, ftc := range targetShards { + ftc.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, wf, tenv.dbName), sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "id|workflow|source|pos|stop_pos|max_replication_log|state|db_name|time_updated|transaction_timestamp|message|tags|workflow_type|workflow_sub_type|time_heartbeat|defer_secondary_keys|component_throttled|time_throttled|rows_copied", - "int64|varchar|blob|varchar|varchar|int64|varchar|varchar|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|int64|int64", + "id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", ), - fmt.Sprintf("1|%s|%s|%s|NULL|0|running|vt_%s|1686577659|0|||1|0|0|0||0|10", workflow.ReverseWorkflowName(wf), bls, position, sourceKs), + fmt.Sprintf("%d|%s|%s|NULL|0|0|||1686577659|0|Running||%s|1||0|0|0||0|1", vreplID, bls, position, targetKs), + ), nil) + } + addInvariants(sourceTablet.vrdbClient, vreplID, sourceTabletUID, position, workflow.ReverseWorkflowName(wf), tenv.cells[0]) + sourceTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, workflow.ReverseWorkflowName(wf), tenv.dbName), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", ), - ) + fmt.Sprintf("%d|%s|%s|NULL|0|0|||1686577659|0|Running||%s|1||0|0|0||0|1", vreplID, bls, position, sourceKs), + ), nil) + sourceTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflowsLimited, tenv.dbName, workflow.ReverseWorkflowName(wf)), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "workflow|id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "workflow|int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", + ), + fmt.Sprintf("%s|%d|%s|%s|NULL|0|0|||1686577659|0|Running||%s|1||0|0|0||0|1", workflow.ReverseWorkflowName(wf), vreplID, bls, position, sourceKs), + ), nil) + sourceTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, wf, tenv.dbName), &sqltypes.Result{}, nil) _, err = ws.WorkflowSwitchTraffic(ctx, &vtctldatapb.WorkflowSwitchTrafficRequest{ Keyspace: targetKs, Workflow: wf, Cells: tenv.cells, - MaxReplicationLagAllowed: &vttime.Duration{Seconds: 922337203}, + MaxReplicationLagAllowed: &vttimepb.Duration{Seconds: 922337203}, EnableReverseReplication: true, Direction: int32(workflow.DirectionBackward), }) @@ -469,13 +552,13 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { tablet := tenv.addTablet(t, tabletUID, keyspace, shard) defer tenv.deleteTablet(tablet.tablet) - parsed := sqlparser.BuildParsedQuery(sqlSelectVReplicationWorkflowConfig, sidecar.DefaultName, ":wf") + parsed := sqlparser.BuildParsedQuery(sqlSelectVReplicationWorkflowConfig, sidecar.GetIdentifier(), ":wf") bindVars := map[string]*querypb.BindVariable{ "wf": sqltypes.StringBindVariable(workflow), } selectQuery, err := parsed.GenerateQuery(bindVars, nil) require.NoError(t, err) - blsStr := fmt.Sprintf(`keyspace:"%s" shard:"%s" filter:{rules:{match:"customer" filter:"select * from customer"} rules:{match:"corder" filter:"select * from corder"}}`, + blsStr := fmt.Sprintf(`keyspace:"%s" shard:"%s" filter:{rules:{match:"corder" filter:"select * from corder"} rules:{match:"customer" filter:"select * from customer"}}`, keyspace, shard) selectRes := sqltypes.MakeTestResult( sqltypes.MakeTestFields( @@ -508,7 +591,7 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { Cells: []string{"zone2"}, // TabletTypes is an empty value, so the current value should be cleared }, - query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"corder\" filter:\"select * from corder\"} rules:{match:\"customer\" filter:\"select * from customer\"}}', cell = '%s', tablet_types = '' where id in (%d)`, keyspace, shard, "zone2", vreplID), }, { @@ -519,7 +602,7 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { Cells: []string{"zone3"}, TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, // So keep the current value of replica }, - query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"corder\" filter:\"select * from corder\"} rules:{match:\"customer\" filter:\"select * from customer\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, keyspace, shard, "zone3", tabletTypes[0], vreplID), }, { @@ -530,7 +613,7 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { TabletSelectionPreference: tabletmanagerdatapb.TabletSelectionPreference_INORDER, TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA}, }, - query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '', tablet_types = '%s' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"corder\" filter:\"select * from corder\"} rules:{match:\"customer\" filter:\"select * from customer\"}}', cell = '', tablet_types = '%s' where id in (%d)`, keyspace, shard, "in_order:rdonly,replica", vreplID), }, { @@ -541,7 +624,7 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { Cells: textutil.SimulatedNullStringSlice, // So keep the current value of zone1 TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY}, }, - query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"corder\" filter:\"select * from corder\"} rules:{match:\"customer\" filter:\"select * from customer\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, keyspace, shard, cells[0], "rdonly", vreplID), }, { @@ -551,7 +634,7 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), OnDdl: binlogdatapb.OnDDLAction_EXEC, }, - query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}} on_ddl:%s', cell = '', tablet_types = '' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"corder\" filter:\"select * from corder\"} rules:{match:\"customer\" filter:\"select * from customer\"}} on_ddl:%s', cell = '', tablet_types = '' where id in (%d)`, keyspace, shard, binlogdatapb.OnDDLAction_EXEC.String(), vreplID), }, { @@ -563,7 +646,7 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { TabletTypes: []topodatapb.TabletType{topodatapb.TabletType_RDONLY, topodatapb.TabletType_REPLICA, topodatapb.TabletType_PRIMARY}, OnDdl: binlogdatapb.OnDDLAction_EXEC_IGNORE, }, - query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}} on_ddl:%s', cell = '%s', tablet_types = '%s' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = 'Running', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"corder\" filter:\"select * from corder\"} rules:{match:\"customer\" filter:\"select * from customer\"}} on_ddl:%s', cell = '%s', tablet_types = '%s' where id in (%d)`, keyspace, shard, binlogdatapb.OnDDLAction_EXEC_IGNORE.String(), "zone1,zone2,zone3", "rdonly,replica,primary", vreplID), }, { @@ -575,7 +658,7 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}, OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt), }, - query: fmt.Sprintf(`update _vt.vreplication set state = '%s', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"customer\" filter:\"select * from customer\"} rules:{match:\"corder\" filter:\"select * from corder\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, + query: fmt.Sprintf(`update _vt.vreplication set state = '%s', source = 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"corder\" filter:\"select * from corder\"} rules:{match:\"customer\" filter:\"select * from customer\"}}', cell = '%s', tablet_types = '%s' where id in (%d)`, binlogdatapb.VReplicationWorkflowState_Stopped.String(), keyspace, shard, cells[0], tabletTypes[0], vreplID), }, } @@ -594,9 +677,9 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { require.NotEqual(t, "", tt.query, "No expected query provided") // These are the same for each RPC call. - tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(selectQuery, selectRes, nil) - tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.DefaultName), &sqltypes.Result{}, nil) + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(idQuery, idRes, nil) // This is our expected query, which will also short circuit @@ -610,6 +693,109 @@ func TestUpdateVReplicationWorkflow(t *testing.T) { } } +func TestUpdateVReplicationWorkflows(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + keyspace := "testks" + tabletUID := 100 + // VREngine.Exec queries the records in the table and explicitly adds a where id in (...) clause. + vreplIDs := []string{"1", "2", "3"} + + tenv := newTestEnv(t, ctx, keyspace, []string{shard}) + defer tenv.close() + + tablet := tenv.addTablet(t, tabletUID, keyspace, shard) + defer tenv.deleteTablet(tablet.tablet) + + tests := []struct { + name string + request *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest + query string + }{ + { + name: "update only state=running for all workflows", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + AllWorkflows: true, + State: binlogdatapb.VReplicationWorkflowState_Running, + Message: textutil.SimulatedNullString, + StopPosition: textutil.SimulatedNullString, + }, + query: fmt.Sprintf(`update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running' where id in (%s)`, strings.Join(vreplIDs, ", ")), + }, + { + name: "update only state=running for all but reverse workflows", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + ExcludeWorkflows: []string{workflow.ReverseWorkflowName("testwf")}, + State: binlogdatapb.VReplicationWorkflowState_Running, + Message: textutil.SimulatedNullString, + StopPosition: textutil.SimulatedNullString, + }, + query: fmt.Sprintf(`update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running' where id in (%s)`, strings.Join(vreplIDs, ", ")), + }, + { + name: "update all vals for all workflows", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + AllWorkflows: true, + State: binlogdatapb.VReplicationWorkflowState_Running, + Message: "hi", + StopPosition: position, + }, + query: fmt.Sprintf(`update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running', message = 'hi', stop_pos = '%s' where id in (%s)`, position, strings.Join(vreplIDs, ", ")), + }, + { + name: "update state=stopped, messege=for vdiff for two workflows", + request: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + IncludeWorkflows: []string{"testwf", "testwf2"}, + State: binlogdatapb.VReplicationWorkflowState_Running, + Message: textutil.SimulatedNullString, + StopPosition: textutil.SimulatedNullString, + }, + query: fmt.Sprintf(`update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running' where id in (%s)`, strings.Join(vreplIDs, ", ")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This is needed because MockDBClient uses t.Fatal() + // which doesn't play well with subtests. + defer func() { + if err := recover(); err != nil { + t.Errorf("Recovered from panic: %v", err) + } + }() + + require.NotNil(t, tt.request, "No request provided") + require.NotEqual(t, "", tt.query, "No expected query provided") + + // These are the same for each RPC call. + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + addlPredicates := "" + if len(tt.request.GetIncludeWorkflows()) > 0 { + addlPredicates = fmt.Sprintf(" and workflow in ('%s')", strings.Join(tt.request.GetIncludeWorkflows(), "', '")) + } + if len(tt.request.GetExcludeWorkflows()) > 0 { + addlPredicates = fmt.Sprintf(" and workflow not in ('%s')", strings.Join(tt.request.GetExcludeWorkflows(), "', '")) + } + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(fmt.Sprintf("select id from _vt.vreplication where db_name = '%s'%s", tenv.dbName, addlPredicates), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id", + "int64", + ), + vreplIDs...), + nil) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + tenv.tmc.tablets[tabletUID].vrdbClient.ExpectRequest(tt.query, &sqltypes.Result{}, errShortCircuit) + _, err := tenv.tmc.tablets[tabletUID].tm.UpdateVReplicationWorkflows(ctx, tt.request) + tenv.tmc.tablets[tabletUID].vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) + }) + } +} + // TestSourceShardSelection tests the RPC calls made by VtctldServer to tablet // managers include the correct set of BLS settings. // @@ -656,9 +842,9 @@ func TestSourceShardSelection(t *testing.T) { defer tenv.deleteTablet(tt.tablet) } - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(vtenv.NewTestEnv(), tenv.ts, tenv.tmc) - tenv.ts.SaveVSchema(ctx, sourceKs, &vschemapb.Keyspace{ + err := tenv.ts.SaveVSchema(ctx, sourceKs, &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "hash": { @@ -674,7 +860,8 @@ func TestSourceShardSelection(t *testing.T) { }, }, }) - tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + require.NoError(t, err) + err = tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ "hash": { @@ -690,6 +877,7 @@ func TestSourceShardSelection(t *testing.T) { }, }, }) + require.NoError(t, err) tests := []struct { name string @@ -765,6 +953,7 @@ func TestSourceShardSelection(t *testing.T) { targetKs, wf), &sqltypes.Result{}) tenv.tmc.setVReplicationExecResults(tt.tablet, fmt.Sprintf("select 1 from _vt.vreplication where db_name='vt_%s' and message='FROZEN' and workflow_sub_type != 1", targetKs), &sqltypes.Result{}) + tenv.tmc.setVReplicationExecResults(tt.tablet, getCopyState, &sqltypes.Result{}) } for _, tt := range tests { @@ -786,13 +975,15 @@ func TestSourceShardSelection(t *testing.T) { tenv.tmc.SetSchema(tt.schema) if tt.vschema != nil { - tenv.ts.SaveVSchema(ctx, targetKs, tt.vschema) + err = tenv.ts.SaveVSchema(ctx, targetKs, tt.vschema) + require.NoError(t, err) } for uid, streams := range tt.streams { tt := targetTablets[uid] + tt.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + tt.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) for i, sourceShard := range streams { - tt.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) var err error if i == len(streams)-1 { // errShortCircuit is intentionally injected into the MoveTables @@ -800,8 +991,9 @@ func TestSourceShardSelection(t *testing.T) { // everything we wanted to in the test. err = errShortCircuit } + tt.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) tt.vrdbClient.ExpectRequest( - fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(id, \'%s.hash\', \'%s\')\"}}', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + fmt.Sprintf(`%s values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(id, \'%s.hash\', \'%s\')\"}}', '', 0, 0, '%s', '', now(), 0, 'Stopped', '%s', 1, 0, 0, '{}')`, insertVReplicationPrefix, wf, sourceKs, sourceShard, targetKs, tt.tablet.Shard, tenv.cells[0], tenv.dbName), &sqltypes.Result{InsertID: uint64(i + 1)}, err, @@ -849,13 +1041,14 @@ func TestFailedMoveTablesCreateCleanup(t *testing.T) { targetTabletUID := 300 targetKs := "targetks" wf := "testwf" + vreplID := 1 table := defaultSchema.TableDefinitions[0].Name invalidTimeZone := "NOPE" bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"%s\" filter:\"select * from %s\"}}", sourceKs, shard, table, table) tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) defer tenv.close() - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(vtenv.NewTestEnv(), tenv.ts, tenv.tmc) sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) defer tenv.deleteTablet(sourceTablet.tablet) @@ -878,22 +1071,13 @@ func TestFailedMoveTablesCreateCleanup(t *testing.T) { err := topotools.SaveRoutingRules(ctx, tenv.ts, nil) require.NoError(t, err, "failed to save routing rules") - tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(checkForWorkflow, targetKs, wf), &sqltypes.Result{}) - tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(checkForFrozenWorkflow, targetKs), &sqltypes.Result{}) - tenv.tmc.setVReplicationExecResults(targetTablet.tablet, fmt.Sprintf(getWorkflow, targetKs, wf), - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "id", - "int64", - ), - "1", - ), - ) - targetTablet.vrdbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + + tenv.tmc.tablets[targetTabletUID].vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) targetTablet.vrdbClient.ExpectRequest( fmt.Sprintf("%s %s", insertVReplicationPrefix, - fmt.Sprintf(`values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"%s\" filter:\"select * from %s\"}} source_time_zone:\"%s\" target_time_zone:\"UTC\"', '', 0, 0, '%s', 'primary', now(), 0, 'Stopped', '%s', 1, 0, 0)`, + fmt.Sprintf(`values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"%s\" filter:\"select * from %s\"}} source_time_zone:\"%s\" target_time_zone:\"UTC\"', '', 0, 0, '%s', 'primary', now(), 0, 'Stopped', '%s', 1, 0, 0, '{}')`, wf, sourceKs, shard, table, table, invalidTimeZone, strings.Join(tenv.cells, ","), tenv.dbName), ), &sqltypes.Result{ @@ -903,82 +1087,24 @@ func TestFailedMoveTablesCreateCleanup(t *testing.T) { nil, ) targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) - targetTablet.vrdbClient.ExpectRequest(getVReplicationRecord, + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), sqltypes.MakeTestResult( sqltypes.MakeTestFields( "id|source", "int64|varchar", ), - fmt.Sprintf("1|%s", bls), - ), - nil, - ) - targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(updatePickedSourceTablet, tenv.cells[0], sourceTabletUID), - &sqltypes.Result{}, nil) - targetTablet.vrdbClient.ExpectRequest(setSessionTZ, &sqltypes.Result{}, nil) - targetTablet.vrdbClient.ExpectRequest(setNames, &sqltypes.Result{}, nil) - targetTablet.vrdbClient.ExpectRequest(setNetReadTimeout, &sqltypes.Result{}, nil) - targetTablet.vrdbClient.ExpectRequest(setNetWriteTimeout, &sqltypes.Result{}, nil) - targetTablet.vrdbClient.ExpectRequest(getRowsCopied, - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "rows_copied", - "int64", - ), - "0", - ), - nil, - ) - targetTablet.vrdbClient.ExpectRequest(getWorkflowState, - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", - "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", - ), - fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), - ), - nil, - ) - targetTablet.vrdbClient.ExpectRequest(getNumCopyStateTable, - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "count(distinct table_name)", - "int64", - ), - "1", - ), - nil, - ) - targetTablet.vrdbClient.ExpectRequest(getWorkflowState, - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", - "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", - ), - fmt.Sprintf("||0|0|Stopped|1|%s|0|0", wf), - ), - nil, - ) - targetTablet.vrdbClient.ExpectRequest(getNumCopyStateTable, - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "count(distinct table_name)", - "int64", - ), - "1", + fmt.Sprintf("%d|%s", vreplID, bls), ), nil, ) - targetTablet.vrdbClient.ExpectRequest(getBinlogRowImage, - sqltypes.MakeTestResult( - sqltypes.MakeTestFields( - "@@binlog_row_image", - "varchar", - ), - "FULL", + + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, wf, tenv.dbName), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source|pos|stop_pos|max_tps|max_replication_lag|cell|tablet_types|time_updated|transaction_timestamp|state|message|db_name|rows_copied|tags|time_heartbeat|workflow_type|time_throttled|component_throttled|workflow_sub_type|defer_secondary_keys", + "int64|varchar|blob|varchar|int64|int64|varchar|varchar|int64|int64|varchar|varchar|varchar|int64|varchar|int64|int64|int64|varchar|int64|int64", ), - nil, - ) + fmt.Sprintf("%d|%s|%s|NULL|0|0|||1686577659|0|Stopped||%s|1||0|0|0||0|1", vreplID, bls, position, targetKs), + ), nil) targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(insertStreamsCreatedLog, bls), &sqltypes.Result{}, nil) tenv.tmc.setVReplicationExecResults(targetTablet.tablet, @@ -993,7 +1119,7 @@ func TestFailedMoveTablesCreateCleanup(t *testing.T) { ) // We expect the workflow creation to fail due to the invalid time - // zone and thus the workflow iteslf to be cleaned up. + // zone and thus the workflow itself to be cleaned up. tenv.tmc.setVReplicationExecResults(sourceTablet.tablet, fmt.Sprintf(deleteWorkflow, sourceKs, workflow.ReverseWorkflowName(wf)), &sqltypes.Result{RowsAffected: 1}, @@ -1028,3 +1154,2122 @@ func TestFailedMoveTablesCreateCleanup(t *testing.T) { require.NoError(t, err, "failed to get target vschema") require.Equal(t, vs, vs2, "expected vschema to be unchanged") } + +// TestHasVReplicationWorkflows tests the simple RPC to be sure +// that it generates the expected query and results for each +// request. +func TestHasVReplicationWorkflows(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + targetKs := "targetks" + targetTabletUID := 300 + shard := "0" + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + tests := []struct { + name string + tablet *fakeTabletConn + queryRes *sqltypes.Result + want *tabletmanagerdatapb.HasVReplicationWorkflowsResponse + wantErr bool + }{ + { + name: "source tablet", + tablet: sourceTablet, + queryRes: sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "has_workflows", + "int64", + ), + "0", + ), + want: &tabletmanagerdatapb.HasVReplicationWorkflowsResponse{ + Has: false, + }, + }, + { + name: "target tablet", + tablet: targetTablet, + queryRes: sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "has_workflows", + "int64", + ), + "1", + ), + want: &tabletmanagerdatapb.HasVReplicationWorkflowsResponse{ + Has: true, + }, + }, + { + name: "target tablet with error", + tablet: targetTablet, + queryRes: sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "wut|yu", + "varchar|varchar", + ), + "byeee|felicia", + "no|more", + ), + want: nil, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This is needed because MockDBClient uses t.Fatal() + // which doesn't play well with subtests. + defer func() { + if err := recover(); err != nil { + t.Errorf("Recovered from panic: %v; Stack: %s", err, string(debug.Stack())) + } + }() + + require.NotNil(t, tt.tablet, "No tablet provided") + + req := &tabletmanagerdatapb.HasVReplicationWorkflowsRequest{} + + tt.tablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + tt.tablet.vrdbClient.ExpectRequest(fmt.Sprintf(hasWorkflows, tenv.dbName), tt.queryRes, nil) + + got, err := tenv.tmc.HasVReplicationWorkflows(ctx, tt.tablet.tablet, req) + if (err != nil) != tt.wantErr { + t.Errorf("TabletManager.HasVReplicationWorkflows() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("TabletManager.HasVReplicationWorkflows() = %v, want %v", got, tt.want) + } + }) + } +} + +// TestReadVReplicationWorkflows tests the RPC requests are turned +// into the expected proper SQL query. +func TestReadVReplicationWorkflows(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tabletUID := 300 + ks := "targetks" + shard := "0" + tenv := newTestEnv(t, ctx, ks, []string{shard}) + defer tenv.close() + + tablet := tenv.addTablet(t, tabletUID, ks, shard) + defer tenv.deleteTablet(tablet.tablet) + + tests := []struct { + name string + req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest + wantPreds string // Additional query predicates + wantErr bool + }{ + { + name: "nothing", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{}, + // No additional query predicates. + }, + { + name: "all except frozen", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + ExcludeFrozen: true, + }, + wantPreds: " and message != 'FROZEN'", + }, + { + name: "1-3 unless frozen", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + IncludeIds: []int32{1, 2, 3}, + ExcludeFrozen: true, + }, + wantPreds: " and message != 'FROZEN' and id in (1,2,3)", + }, + { + name: "all but wf1 and wf2", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + ExcludeWorkflows: []string{"wf1", "wf2"}, + }, + wantPreds: " and workflow not in ('wf1','wf2')", + }, + { + name: "all but wf1 and wf2", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + ExcludeWorkflows: []string{"wf1", "wf2"}, + }, + wantPreds: " and workflow not in ('wf1','wf2')", + }, + { + name: "only wf1 and wf2", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + IncludeWorkflows: []string{"wf1", "wf2"}, + ExcludeStates: []binlogdatapb.VReplicationWorkflowState{ + binlogdatapb.VReplicationWorkflowState_Stopped, + }, + }, + wantPreds: " and workflow in ('wf1','wf2') and state not in ('Stopped')", + }, + { + name: "only copying or running", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + IncludeStates: []binlogdatapb.VReplicationWorkflowState{ + binlogdatapb.VReplicationWorkflowState_Copying, + binlogdatapb.VReplicationWorkflowState_Running, + }, + }, + wantPreds: " and state in ('Copying','Running')", + }, + { + name: "mess of predicates", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + IncludeIds: []int32{1, 3}, + IncludeWorkflows: []string{"wf1"}, + ExcludeWorkflows: []string{"wf2"}, + ExcludeStates: []binlogdatapb.VReplicationWorkflowState{ + binlogdatapb.VReplicationWorkflowState_Copying, + }, + ExcludeFrozen: true, + }, + wantPreds: " and message != 'FROZEN' and id in (1,3) and workflow in ('wf1') and workflow not in ('wf2') and state not in ('Copying')", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // This is needed because MockDBClient uses t.Fatal() + // which doesn't play well with subtests. + defer func() { + if err := recover(); err != nil { + t.Errorf("Recovered from panic: %v; Stack: %s", err, string(debug.Stack())) + } + }() + + require.NotNil(t, tt.req, "No request provided") + + if !tt.wantErr { // Errors we're testing for occur before executing any queries. + tablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + tablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, tt.wantPreds), &sqltypes.Result{}, nil) + } + + _, err := tenv.tmc.ReadVReplicationWorkflows(ctx, tablet.tablet, tt.req) + if (err != nil) != tt.wantErr { + t.Errorf("TabletManager.ReadVReplicationWorkflows() error = %v, wantErr %v", err, tt.wantErr) + return + } + }) + } +} + +// addInvariants adds handling for queries that can be injected into the +// sequence of queries, N times, in a non-deterministic order. +func addInvariants(dbClient *binlogplayer.MockDBClient, vreplID, sourceTabletUID int, position, workflow, cell string) { + // This reduces a lot of noise, but is also needed as it's executed when any of the + // other queries here are executed via engine.exec(). + dbClient.AddInvariant(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}) + + // The binlogplayer queries result from the controller starting up and the sequence + // within everything else is non-deterministic. + dbClient.AddInvariant(fmt.Sprintf(getWorkflowState, vreplID), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "pos|stop_pos|max_tps|max_replication_lag|state|workflow_type|workflow|workflow_sub_type|defer_secondary_keys", + "varchar|varchar|int64|int64|varchar|int64|varchar|int64|int64", + ), + fmt.Sprintf("%s||0|0|Stopped|1|%s|0|0", position, workflow), + )) + dbClient.AddInvariant(setSessionTZ, &sqltypes.Result{}) + dbClient.AddInvariant(setNames, &sqltypes.Result{}) + dbClient.AddInvariant(setNetReadTimeout, &sqltypes.Result{}) + dbClient.AddInvariant(setNetWriteTimeout, &sqltypes.Result{}) + + // Same for the vreplicator queries. + dbClient.AddInvariant(fmt.Sprintf(getNumCopyStateTable, vreplID), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "count(distinct table_name)", + "int64", + ), + "0", + )) + dbClient.AddInvariant(getBinlogRowImage, sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "@@binlog_row_image", + "varchar", + ), + "FULL", + )) + dbClient.AddInvariant(fmt.Sprintf(getRowsCopied, vreplID), sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "rows_copied", + "int64", + ), + "0", + )) + dbClient.AddInvariant(fmt.Sprintf(updatePickedSourceTablet, cell, sourceTabletUID, vreplID), &sqltypes.Result{}) +} + +func addMaterializeSettingsTablesToSchema(ms *vtctldatapb.MaterializeSettings, tenv *testEnv, venv *vtenv.Environment) { + schema := defaultSchema.CloneVT() + for _, ts := range ms.TableSettings { + tableName := ts.TargetTable + table, err := venv.Parser().TableFromStatement(ts.SourceExpression) + if err == nil { + tableName = table.Name.String() + } + schema.TableDefinitions = append(schema.TableDefinitions, &tabletmanagerdatapb.TableDefinition{ + Name: tableName, + Schema: fmt.Sprintf("%s_schema", tableName), + }) + schema.TableDefinitions = append(schema.TableDefinitions, &tabletmanagerdatapb.TableDefinition{ + Name: ts.TargetTable, + Schema: fmt.Sprintf("%s_schema", ts.TargetTable), + }) + } + tenv.tmc.SetSchema(schema) +} + +func TestExternalizeLookupVindex(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShard := "0" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + addInvariants(targetShards["-80"].vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + addInvariants(targetShards["80-"].vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + // Keyspace where the vindex is created. + SourceKeyspace: sourceKs, + // Keyspace where the lookup table and VReplication workflow is created. + TargetKeyspace: targetKs, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + sourceVschema := &vschemapb.Keyspace{ + Sharded: false, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + "owned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.owned_lookup", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + Owner: "t1", + }, + "unowned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.unowned_lookup", + "from": "c1", + "to": "c2", + "write_only": "true", + }, + }, + "unqualified_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "unqualified", + "from": "c1", + "to": "c2", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Name: "xxhash", + Column: "col1", + }, { + Name: "owned_lookup", + Column: "col2", + }}, + }, + }, + } + + trxTS := fmt.Sprintf("%d", time.Now().Unix()) + fields := sqltypes.MakeTestFields( + "id|state|message|source|workflow_type|workflow_sub_type|max_tps|max_replication_lag|time_updated|time_heartbeat|time_throttled|transaction_timestamp|rows_copied", + "int64|varbinary|varbinary|blob|int64|int64|int64|int64|int64|int64|int64|int64|int64", + ) + wftype := fmt.Sprintf("%d", binlogdatapb.VReplicationWorkflowType_CreateLookupIndex) + ownedSourceStopAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"owned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}} stop_after_copy:true`, + ms.SourceKeyspace, ms.SourceKeyspace) + ownedSourceKeepRunningAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"owned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}}`, + ms.SourceKeyspace, ms.SourceKeyspace) + ownedRunning := sqltypes.MakeTestResult(fields, "1|Running|msg|"+ownedSourceKeepRunningAfterCopy+"|"+wftype+"|0|0|0|0|0|0|"+trxTS+"|5") + ownedStopped := sqltypes.MakeTestResult(fields, "1|Stopped|Stopped after copy|"+ownedSourceStopAfterCopy+"|"+wftype+"|0|0|0|0|0|0|"+trxTS+"|5") + unownedSourceStopAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"unowned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}} stop_after_copy:true`, + ms.SourceKeyspace, ms.SourceKeyspace) + unownedSourceKeepRunningAfterCopy := fmt.Sprintf(`keyspace:"%s",shard:"0",filter:{rules:{match:"unowned_lookup" filter:"select * from t1 where in_keyrange(col1, '%s.xxhash', '-80')"}}`, + ms.SourceKeyspace, ms.SourceKeyspace) + unownedRunning := sqltypes.MakeTestResult(fields, "2|Running|msg|"+unownedSourceKeepRunningAfterCopy+"|"+wftype+"|0|0|0|0|0|0|"+trxTS+"|5") + unownedStopped := sqltypes.MakeTestResult(fields, "2|Stopped|Stopped after copy|"+unownedSourceStopAfterCopy+"|"+wftype+"|0|0|0|0|0|0|"+trxTS+"|5") + + testcases := []struct { + request *vtctldatapb.LookupVindexExternalizeRequest + vrResponse *sqltypes.Result + err string + expectedVschema *vschemapb.Keyspace + expectDelete bool + }{ + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "owned_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + vrResponse: ownedStopped, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "owned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.owned_lookup", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + }, + expectDelete: true, + }, + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "unowned_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + vrResponse: unownedStopped, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "unowned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.unowned_lookup", + "from": "c1", + "to": "c2", + }, + }, + }, + }, + err: "is not in Running state", + }, + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "owned_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + vrResponse: ownedRunning, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "owned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.owned_lookup", + "from": "c1", + "to": "c2", + }, + Owner: "t1", + }, + }, + }, + expectDelete: true, + }, + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "unowned_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + vrResponse: unownedRunning, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "unowned_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.unowned_lookup", + "from": "c1", + "to": "c2", + }, + }, + }, + }, + }, + { + request: &vtctldatapb.LookupVindexExternalizeRequest{ + Name: "absent_lookup", + Keyspace: ms.SourceKeyspace, + TableKeyspace: ms.TargetKeyspace, + }, + expectedVschema: &vschemapb.Keyspace{ + Vindexes: map[string]*vschemapb.Vindex{ + "absent_lookup": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "targetks.absent_lookup", + "from": "c1", + "to": "c2", + }, + }, + }, + }, + err: "vindex absent_lookup not found in the sourceks keyspace", + }, + } + for _, tcase := range testcases { + t.Run(tcase.request.Name, func(t *testing.T) { + // Resave the source schema for every iteration. + err := tenv.ts.SaveVSchema(ctx, tcase.request.Keyspace, sourceVschema) + require.NoError(t, err) + err = tenv.ts.RebuildSrvVSchema(ctx, []string{tenv.cells[0]}) + require.NoError(t, err) + + require.NotNil(t, tcase.request, "No request provided") + + for _, targetTablet := range targetShards { + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, tcase.request.Name, tenv.dbName), tcase.vrResponse, nil) + if tcase.err == "" { + // We query the workflow again to build the status output when + // it's successfully created. + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readWorkflow, tcase.request.Name, tenv.dbName), tcase.vrResponse, nil) + } + } + + preWorkflowDeleteCalls := tenv.tmc.workflowDeleteCalls + _, err = ws.LookupVindexExternalize(ctx, tcase.request) + if tcase.err != "" { + if err == nil || !strings.Contains(err.Error(), tcase.err) { + require.FailNow(t, "LookupVindexExternalize error", "ExternalizeVindex(%v) err: %v, must contain %v", tcase.request, err, tcase.err) + } + return + } + require.NoError(t, err) + expectedWorkflowDeleteCalls := preWorkflowDeleteCalls + if tcase.expectDelete { + // We expect the RPC to be called on each target shard. + expectedWorkflowDeleteCalls = preWorkflowDeleteCalls + (len(targetShards)) + } + require.Equal(t, expectedWorkflowDeleteCalls, tenv.tmc.workflowDeleteCalls) + + aftervschema, err := tenv.ts.GetVSchema(ctx, ms.SourceKeyspace) + require.NoError(t, err) + vindex := aftervschema.Vindexes[tcase.request.Name] + expectedVindex := tcase.expectedVschema.Vindexes[tcase.request.Name] + require.NotNil(t, vindex, "vindex %s not found in vschema", tcase.request.Name) + require.NotContains(t, vindex.Params, "write_only", tcase.request) + require.Equal(t, expectedVindex, vindex, "vindex mismatch. expected: %+v, got: %+v", expectedVindex, vindex) + }) + } +} + +func TestMaterializerOneToOne(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + targetKs := "targetks" + targetTabletUID := 300 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{ + { + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }, + { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }, + { + TargetTable: "t4", + SourceExpression: "", // empty + CreateDdl: "t4ddl", + }, + }, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"} rules:{match:\"t2\" filter:\"select * from t3\"} rules:{match:\"t4\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, shard, tenv.cells[0], tenv.dbName) + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{}, errShortCircuit) + + err := ws.Materialize(ctx, ms) + targetTablet.vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) +} + +func TestMaterializerManyToOne(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + sourceShards := make(map[string]*fakeTabletConn) + targetKs := "targetks" + targetTabletUID := 300 + targetShard := "0" + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceShards["-80"] = tenv.addTablet(t, sourceTabletUID, sourceKs, "-80") + defer tenv.deleteTablet(sourceShards["-80"].tablet) + sourceShards["80-"] = tenv.addTablet(t, sourceTabletUID+10, sourceKs, "80-") + defer tenv.deleteTablet(sourceShards["80-"].tablet) + + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, targetShard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }, { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + targetTablet.vrdbClient.AddInvariant("update _vt.vreplication set message='no schema defined' where id=1", &sqltypes.Result{}) // If the first workflow controller progresses ... + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + for _, sourceShard := range []string{"-80", "80-"} { // One insert per [binlog]source/stream + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"} rules:{match:\"t2\" filter:\"select * from t3\"}}", sourceKs, sourceShard) + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"} rules:{match:\"t2\" filter:\"select * from t3\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, sourceShard, tenv.cells[0], tenv.dbName) + if vreplID == 1 { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, nil) + targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("%d|%s", vreplID, bls), + ), nil) + vreplID++ + } else { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, errShortCircuit) + } + } + + err := ws.Materialize(ctx, ms) + targetTablet.vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) +} + +func TestMaterializerOneToMany(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShard := "0" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + for _, targetShard := range []string{"-80", "80-"} { + targetTablet := targetShards[targetShard] + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + targetTablet.vrdbClient.AddInvariant("update _vt.vreplication set message='no schema defined' where id=1", &sqltypes.Result{}) // If the first workflow controller progresses ... + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(c1, '%s.xxhash', '%s')\"}}", + sourceKs, sourceShard, targetKs, targetShard) + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(c1, \'%s.xxhash\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, sourceShard, targetKs, targetShard, tenv.cells[0], tenv.dbName) + if targetShard == "-80" { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, nil) + targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("%d|%s", vreplID, bls), + ), nil) + } else { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, errShortCircuit) + } + } + + err = ws.Materialize(ctx, ms) + for _, targetTablet := range targetShards { + targetTablet.vrdbClient.Wait() + } + require.ErrorIs(t, err, errShortCircuit) +} + +func TestMaterializerManyToMany(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShards := make(map[string]*fakeTabletConn) + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceShards["-40"] = tenv.addTablet(t, sourceTabletUID, sourceKs, "-40") + defer tenv.deleteTablet(sourceShards["-40"].tablet) + sourceShards["40-"] = tenv.addTablet(t, sourceTabletUID+10, sourceKs, "40-") + defer tenv.deleteTablet(sourceShards["40-"].tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + for _, targetShard := range []string{"-80", "80-"} { + targetTablet := targetShards[targetShard] + targetTablet.vrdbClient.AddInvariant("update _vt.vreplication set message='no schema defined' where id=1", &sqltypes.Result{}) // If the first workflow controller progresses ... + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + for i, sourceShard := range []string{"-40", "40-"} { // One insert per [binlog]source/stream + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID+(i*10), position, wf, tenv.cells[0]) + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(c1, '%s.xxhash', '%s')\"}}", + sourceKs, sourceShard, targetKs, targetShard) + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(c1, \'%s.xxhash\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, sourceShard, targetKs, targetShard, tenv.cells[0], tenv.dbName) + if targetShard == "80-" && sourceShard == "40-" { // Last insert + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, errShortCircuit) + } else { // Can't short circuit as we will do more inserts + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, nil) + targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("%d|%s", vreplID, bls), + ), nil) + } + } + } + + err = ws.Materialize(ctx, ms) + for _, targetTablet := range targetShards { + targetTablet.vrdbClient.Wait() + } + require.ErrorIs(t, err, errShortCircuit) +} + +func TestMaterializerMulticolumnVindex(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShard := "0" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "region": { + Type: "region_experimental", + Params: map[string]string{ + "region_bytes": "1", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Columns: []string{"c1", "c2"}, + Name: "region", + }}, + }, + }, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + for _, targetShard := range []string{"-80", "80-"} { + targetTablet := targetShards[targetShard] + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + targetTablet.vrdbClient.AddInvariant("update _vt.vreplication set message='no schema defined' where id=1", &sqltypes.Result{}) // If the first workflow controller progresses ... + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(c1, c2, '%s.region', '%s')\"}}", + sourceKs, sourceShard, targetKs, targetShard) + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1 where in_keyrange(c1, c2, \'%s.region\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, sourceShard, targetKs, targetShard, tenv.cells[0], tenv.dbName) + if targetShard == "-80" { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, nil) + targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("%d|%s", vreplID, bls), + ), nil) + } else { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, errShortCircuit) + } + } + + err = ws.Materialize(ctx, ms) + for _, targetTablet := range targetShards { + targetTablet.vrdbClient.Wait() + } + require.ErrorIs(t, err, errShortCircuit) +} + +func TestMaterializerDeploySchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }, { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // Remove t2 from the target tablet's schema so that it must + // be deployed. + schema := tenv.tmc.schema.CloneVT() + for i, sd := range schema.TableDefinitions { + if sd.Name == "t2" { + schema.TableDefinitions = append(schema.TableDefinitions[:i], schema.TableDefinitions[i+1:]...) + } + } + tenv.tmc.tabletSchemas[targetTabletUID] = schema + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, `t2ddl`, &sqltypes.Result{}) // Execute the fake CreateDdl + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"} rules:{match:\"t2\" filter:\"select * from t3\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, shard, tenv.cells[0], tenv.dbName) + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{}, errShortCircuit) + + err := ws.Materialize(ctx, ms) + targetTablet.vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) + require.Equal(t, 1, tenv.tmc.getSchemaRequestCount(sourceTabletUID)) + require.Equal(t, 1, tenv.tmc.getSchemaRequestCount(targetTabletUID)) +} + +func TestMaterializerCopySchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "copy", + }, { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // Remove t1 from the target tablet's schema so that it must + // be copied. The workflow should still succeed w/o it existing + // when we start. + schema := tenv.tmc.schema.CloneVT() + for i, sd := range schema.TableDefinitions { + if sd.Name == "t1" { + schema.TableDefinitions = append(schema.TableDefinitions[:i], schema.TableDefinitions[i+1:]...) + } + } + tenv.tmc.tabletSchemas[targetTabletUID] = schema + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"} rules:{match:\"t2\" filter:\"select * from t3\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, shard, tenv.cells[0], tenv.dbName) + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{}, errShortCircuit) + + err := ws.Materialize(ctx, ms) + targetTablet.vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) + require.Equal(t, 0, tenv.tmc.getSchemaRequestCount(sourceTabletUID)) + require.Equal(t, 1, tenv.tmc.getSchemaRequestCount(targetTabletUID)) +} + +func TestMaterializerExplicitColumns(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShard := "0" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select c1, c1+c2, c2 from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "region": { + Type: "region_experimental", + Params: map[string]string{ + "region_bytes": "1", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Columns: []string{"c1", "c2"}, + Name: "region", + }}, + }, + }, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + for _, targetShard := range []string{"-80", "80-"} { + targetTablet := targetShards[targetShard] + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + targetTablet.vrdbClient.AddInvariant("update _vt.vreplication set message='no schema defined' where id=1", &sqltypes.Result{}) // If the first workflow controller progresses ... + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select c1, c1 + c2, c2 from t1 where in_keyrange(c1, c2, '%s.region', '%s')\"}}", + sourceKs, sourceShard, targetKs, targetShard) + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select c1, c1 + c2, c2 from t1 where in_keyrange(c1, c2, \'%s.region\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, sourceShard, targetKs, targetShard, tenv.cells[0], tenv.dbName) + if targetShard == "-80" { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, nil) + targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("%d|%s", vreplID, bls), + ), nil) + } else { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, errShortCircuit) + } + } + + err = ws.Materialize(ctx, ms) + for _, targetTablet := range targetShards { + targetTablet.vrdbClient.Wait() + } + require.ErrorIs(t, err, errShortCircuit) +} + +func TestMaterializerRenamedColumns(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShard := "0" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select c3 as c1, c1+c2, c4 as c2 from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "region": { + Type: "region_experimental", + Params: map[string]string{ + "region_bytes": "1", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Columns: []string{"c1", "c2"}, + Name: "region", + }}, + }, + }, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + for _, targetShard := range []string{"-80", "80-"} { + targetTablet := targetShards[targetShard] + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + targetTablet.vrdbClient.AddInvariant("update _vt.vreplication set message='no schema defined' where id=1", &sqltypes.Result{}) // If the first workflow controller progresses ... + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + bls := fmt.Sprintf("keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select c3 as c1, c1 + c2, c4 as c2 from t1 where in_keyrange(c3, c4, '%s.region', '%s')\"}}", + sourceKs, sourceShard, targetKs, targetShard) + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select c3 as c1, c1 + c2, c4 as c2 from t1 where in_keyrange(c3, c4, \'%s.region\', \'%s\')\"}}', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, sourceShard, targetKs, targetShard, tenv.cells[0], tenv.dbName) + if targetShard == "-80" { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, nil) + targetTablet.vrdbClient.ExpectRequest(getAutoIncrementStep, &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(getVReplicationRecord, vreplID), + sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "id|source", + "int64|varchar", + ), + fmt.Sprintf("%d|%s", vreplID, bls), + ), nil) + } else { + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{InsertID: uint64(vreplID)}, errShortCircuit) + } + } + + err = ws.Materialize(ctx, ms) + for _, targetTablet := range targetShards { + targetTablet.vrdbClient.Wait() + } + require.ErrorIs(t, err, errShortCircuit) +} + +func TestMaterializerStopAfterCopy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 200 + targetKs := "targetks" + targetTabletUID := 300 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + StopAfterCopy: true, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }, { + TargetTable: "t2", + SourceExpression: "select * from t3", + CreateDdl: "t2ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + + // This is our expected query, which will also short circuit + // the test with an error as at this point we've tested what + // we wanted to test. + insert := insertVReplicationPrefix + + fmt.Sprintf(` values ('%s', 'keyspace:\"%s\" shard:\"%s\" filter:{rules:{match:\"t1\" filter:\"select * from t1\"} rules:{match:\"t2\" filter:\"select * from t3\"}} stop_after_copy:true', '', 0, 0, '%s', 'primary,rdonly', now(), 0, 'Stopped', '%s', 0, 0, 0, '{}')`, + wf, sourceKs, shard, tenv.cells[0], tenv.dbName) + targetTablet.vrdbClient.ExpectRequest(insert, &sqltypes.Result{}, errShortCircuit) + + err := ws.Materialize(ctx, ms) + targetTablet.vrdbClient.Wait() + require.ErrorIs(t, err, errShortCircuit) +} + +func TestMaterializerNoTargetVSchema(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + err = ws.Materialize(ctx, ms) + targetTablet.vrdbClient.Wait() + require.EqualError(t, err, fmt.Sprintf("table t1 not found in vschema for keyspace %s", targetKs)) +} + +func TestMaterializerNoDDL(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + // Clear out the schema on the target tablet. + tenv.tmc.tabletSchemas[targetTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + err := ws.Materialize(ctx, ms) + require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") + require.Equal(t, tenv.tmc.getSchemaRequestCount(100), 0) + require.Equal(t, tenv.tmc.getSchemaRequestCount(200), 1) + +} + +func TestMaterializerNoSourcePrimary(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "copy", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + tenv.tmc.tabletSchemas[targetTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + targetTablet.tablet.Type = topodatapb.TabletType_REPLICA + _, _ = tenv.ts.UpdateShardFields(tenv.ctx, targetKs, shard, func(si *topo.ShardInfo) error { + si.PrimaryAlias = nil + return nil + }) + + err := ws.Materialize(ctx, ms) + require.EqualError(t, err, "shard has no primary: 0") +} + +func TestMaterializerTableMismatchNonCopy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t2", + CreateDdl: "", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // Clear out the schema on the target tablet. + tenv.tmc.tabletSchemas[targetTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + err := ws.Materialize(ctx, ms) + require.EqualError(t, err, "target table t1 does not exist and there is no create ddl defined") +} + +func TestMaterializerTableMismatchCopy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t2", + CreateDdl: "copy", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // Clear out the schema on the target tablet. + tenv.tmc.tabletSchemas[targetTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + err := ws.Materialize(ctx, ms) + require.EqualError(t, err, "source and target table names must match for copying schema: t2 vs t1") +} + +func TestMaterializerNoSourceTable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "copy", + }}, + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // Clear out the schema on the source and target tablet. + tenv.tmc.tabletSchemas[sourceTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + tenv.tmc.tabletSchemas[targetTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + err := ws.Materialize(ctx, ms) + require.EqualError(t, err, "source table t1 does not exist") +} + +func TestMaterializerSyntaxError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "bad query", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // Clear out the schema on the source and target tablet. + tenv.tmc.tabletSchemas[sourceTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + tenv.tmc.tabletSchemas[targetTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, ms.TableSettings[0].CreateDdl, &sqltypes.Result{}) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + err := ws.Materialize(ctx, ms) + require.EqualError(t, err, "syntax error at position 4 near 'bad'") +} + +func TestMaterializerNotASelect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceTabletUID := 100 + targetKs := "targetks" + targetTabletUID := 200 + shard := "0" + wf := "testwf" + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) + defer tenv.deleteTablet(sourceTablet.tablet) + targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) + defer tenv.deleteTablet(targetTablet.tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "update t1 set val=1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // Clear out the schema on the source and target tablet. + tenv.tmc.tabletSchemas[sourceTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + tenv.tmc.tabletSchemas[targetTabletUID] = &tabletmanagerdatapb.SchemaDefinition{} + tenv.tmc.setVReplicationExecResults(targetTablet.tablet, ms.TableSettings[0].CreateDdl, &sqltypes.Result{}) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf("use %s", sidecar.GetIdentifier()), &sqltypes.Result{}, nil) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + + err := ws.Materialize(ctx, ms) + require.EqualError(t, err, "unrecognized statement: update t1 set val=1") +} + +func TestMaterializerNoGoodVindex(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShard := "0" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select * from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "lookup_unique": { + Type: "lookup_unique", + Params: map[string]string{ + "table": "t1", + "from": "c1", + "to": "c2", + }, + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "lookup_unique", + }}, + }, + }, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // This is aggregated from the two target shards. + errNoVindex := "could not find a vindex to compute keyspace id for table t1" + errs := make([]string, 0, len(targetShards)) + + for _, targetShard := range []string{"-80", "80-"} { + targetTablet := targetShards[targetShard] + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + errs = append(errs, errNoVindex) + } + + err = ws.Materialize(ctx, ms) + require.EqualError(t, err, strings.Join(errs, "\n")) +} + +func TestMaterializerComplexVindexExpression(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShard := "0" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select a+b as c1 from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // This is aggregated from the two target shards. + errNoVindex := "vindex column cannot be a complex expression: a + b as c1" + errs := make([]string, 0, len(targetShards)) + + for _, targetShard := range []string{"-80", "80-"} { + targetTablet := targetShards[targetShard] + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + errs = append(errs, errNoVindex) + } + + err = ws.Materialize(ctx, ms) + require.EqualError(t, err, strings.Join(errs, "\n")) +} + +func TestMaterializerNoVindexInExpression(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + sourceKs := "sourceks" + sourceShard := "0" + sourceTabletUID := 200 + targetKs := "targetks" + targetShards := make(map[string]*fakeTabletConn) + targetTabletUID := 300 + wf := "testwf" + vreplID := 1 + vtenv := vtenv.NewTestEnv() + tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) + defer tenv.close() + + sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, sourceShard) + defer tenv.deleteTablet(sourceTablet.tablet) + + targetShards["-80"] = tenv.addTablet(t, targetTabletUID, targetKs, "-80") + defer tenv.deleteTablet(targetShards["-80"].tablet) + targetShards["80-"] = tenv.addTablet(t, targetTabletUID+10, targetKs, "80-") + defer tenv.deleteTablet(targetShards["80-"].tablet) + + ws := workflow.NewServer(vtenv, tenv.ts, tenv.tmc) + ms := &vtctldatapb.MaterializeSettings{ + Workflow: wf, + SourceKeyspace: sourceKs, + TargetKeyspace: targetKs, + TableSettings: []*vtctldatapb.TableMaterializeSettings{{ + TargetTable: "t1", + SourceExpression: "select c2 from t1", + CreateDdl: "t1ddl", + }}, + Cell: tenv.cells[0], + TabletTypes: topoproto.MakeStringTypeCSV([]topodatapb.TabletType{ + topodatapb.TabletType_PRIMARY, + topodatapb.TabletType_RDONLY, + }), + } + + err := tenv.ts.SaveVSchema(ctx, targetKs, &vschemapb.Keyspace{ + Sharded: true, + Vindexes: map[string]*vschemapb.Vindex{ + "xxhash": { + Type: "xxhash", + }, + }, + Tables: map[string]*vschemapb.Table{ + "t1": { + ColumnVindexes: []*vschemapb.ColumnVindex{{ + Column: "c1", + Name: "xxhash", + }}, + }, + }, + }) + require.NoError(t, err) + + addMaterializeSettingsTablesToSchema(ms, tenv, vtenv) + + // This is aggregated from the two target shards. + errNoVindex := "could not find vindex column c1" + errs := make([]string, 0, len(targetShards)) + + for _, targetShard := range []string{"-80", "80-"} { + targetTablet := targetShards[targetShard] + addInvariants(targetTablet.vrdbClient, vreplID, sourceTabletUID, position, wf, tenv.cells[0]) + targetTablet.vrdbClient.ExpectRequest(fmt.Sprintf(readAllWorkflows, tenv.dbName, ""), &sqltypes.Result{}, nil) + errs = append(errs, errNoVindex) + } + + err = ws.Materialize(ctx, ms) + require.EqualError(t, err, strings.Join(errs, "\n")) +} + +func TestBuildReadVReplicationWorkflowsQuery(t *testing.T) { + tm := &TabletManager{ + DBConfigs: &dbconfigs.DBConfigs{ + DBName: "vt_testks", + }, + } + tests := []struct { + name string + req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest + want string + wantErr string + }{ + { + name: "all options", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + IncludeIds: []int32{1, 2, 3}, + IncludeWorkflows: []string{"wf1", "wf2"}, + ExcludeWorkflows: []string{"1wf"}, + IncludeStates: []binlogdatapb.VReplicationWorkflowState{binlogdatapb.VReplicationWorkflowState_Stopped, binlogdatapb.VReplicationWorkflowState_Error}, + ExcludeFrozen: true, + }, + want: "select workflow, id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys, options from _vt.vreplication where db_name = 'vt_testks' and message != 'FROZEN' and id in (1,2,3) and workflow in ('wf1','wf2') and workflow not in ('1wf') and state in ('Stopped','Error') group by workflow, id order by workflow, id", + }, + { + name: "2 workflows if running", + req: &tabletmanagerdatapb.ReadVReplicationWorkflowsRequest{ + IncludeWorkflows: []string{"wf1", "wf2"}, + IncludeStates: []binlogdatapb.VReplicationWorkflowState{binlogdatapb.VReplicationWorkflowState_Running}, + }, + want: "select workflow, id, source, pos, stop_pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, message, db_name, rows_copied, tags, time_heartbeat, workflow_type, time_throttled, component_throttled, workflow_sub_type, defer_secondary_keys, options from _vt.vreplication where db_name = 'vt_testks' and workflow in ('wf1','wf2') and state in ('Running') group by workflow, id order by workflow, id", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tm.buildReadVReplicationWorkflowsQuery(tt.req) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.want, got, "buildReadVReplicationWorkflowsQuery() = %v, want %v", got, tt.want) + }) + } +} + +func TestBuildUpdateVReplicationWorkflowsQuery(t *testing.T) { + tm := &TabletManager{ + DBConfigs: &dbconfigs.DBConfigs{ + DBName: "vt_testks", + }, + } + tests := []struct { + name string + req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest + want string + wantErr string + }{ + { + name: "nothing to update", + req: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + State: binlogdatapb.VReplicationWorkflowState(textutil.SimulatedNullInt), + Message: textutil.SimulatedNullString, + StopPosition: textutil.SimulatedNullString, + }, + wantErr: errNoFieldsToUpdate.Error(), + }, + { + name: "mutually exclusive options", + req: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + State: binlogdatapb.VReplicationWorkflowState_Running, + AllWorkflows: true, + ExcludeWorkflows: []string{"wf1"}, + }, + wantErr: errAllWithIncludeExcludeWorkflows.Error(), + }, + { + name: "all values and options", + req: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + State: binlogdatapb.VReplicationWorkflowState_Running, + Message: "test message", + StopPosition: "MySQL56/17b1039f-21b6-13ed-b365-1a43f95f28a3:1-20", + IncludeWorkflows: []string{"wf2", "wf3"}, + ExcludeWorkflows: []string{"1wf"}, + }, + want: "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running', message = 'test message', stop_pos = 'MySQL56/17b1039f-21b6-13ed-b365-1a43f95f28a3:1-20' where db_name = 'vt_testks' and workflow in ('wf2','wf3') and workflow not in ('1wf')", + }, + { + name: "state for all", + req: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + State: binlogdatapb.VReplicationWorkflowState_Running, + Message: textutil.SimulatedNullString, + StopPosition: textutil.SimulatedNullString, + AllWorkflows: true, + }, + want: "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running' where db_name = 'vt_testks'", + }, + { + name: "stop all for vdiff", + req: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + State: binlogdatapb.VReplicationWorkflowState_Stopped, + Message: "for vdiff", + StopPosition: textutil.SimulatedNullString, + AllWorkflows: true, + }, + want: "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Stopped', message = 'for vdiff' where db_name = 'vt_testks'", + }, + { + name: "start one until position", + req: &tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest{ + State: binlogdatapb.VReplicationWorkflowState_Running, + Message: "for until position", + StopPosition: "MySQL56/17b1039f-21b6-13ed-b365-1a43f95f28a3:1-9999", + IncludeWorkflows: []string{"wf1"}, + }, + want: "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running', message = 'for until position', stop_pos = 'MySQL56/17b1039f-21b6-13ed-b365-1a43f95f28a3:1-9999' where db_name = 'vt_testks' and workflow in ('wf1')", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tm.buildUpdateVReplicationWorkflowsQuery(tt.req) + if tt.wantErr != "" { + require.EqualError(t, err, tt.wantErr) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.want, got, "buildUpdateVReplicationWorkflowsQuery() = %v, want %v", got, tt.want) + }) + } +} diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index 2cd21c09a21..6046ed99727 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -36,8 +36,9 @@ package tabletmanager import ( "context" "encoding/hex" + "errors" "fmt" - "math/rand" + "math/rand/v2" "regexp" "strings" "sync" @@ -67,14 +68,18 @@ import ( "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tabletserver" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) -// Query rules from denylist -const denyListQueryList string = "DenyListQueryRules" +const ( + // Query rules from denylist + denyListQueryList string = "DenyListQueryRules" +) var ( // The following flags initialize the tablet record. @@ -86,8 +91,8 @@ var ( skipBuildInfoTags = "/.*/" initTags flagutil.StringMapValue - initPopulateMetadata bool initTimeout = 1 * time.Minute + mysqlShutdownTimeout = mysqlctl.DefaultShutdownTimeout ) func registerInitFlags(fs *pflag.FlagSet) { @@ -99,6 +104,7 @@ func registerInitFlags(fs *pflag.FlagSet) { fs.StringVar(&skipBuildInfoTags, "vttablet_skip_buildinfo_tags", skipBuildInfoTags, "comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'.") fs.Var(&initTags, "init_tags", "(init parameter) comma separated list of key:value pairs used to tag the tablet") fs.DurationVar(&initTimeout, "init_timeout", initTimeout, "(init parameter) timeout to use for the init phase.") + fs.DurationVar(&mysqlShutdownTimeout, "mysql-shutdown-timeout", mysqlShutdownTimeout, "timeout to use when MySQL is being shut down.") } var ( @@ -149,6 +155,7 @@ type TabletManager struct { UpdateStream binlog.UpdateStreamControl VREngine *vreplication.Engine VDiffEngine *vdiff.Engine + Env *vtenv.Environment // tmState manages the TabletManager state. tmState *tmState @@ -170,6 +177,10 @@ type TabletManager struct { // only hold the mutex to update the fields, nothing else. mutex sync.Mutex + // _waitForGrantsComplete is a channel for waiting until the grants for all the mysql + // users have been verified. + _waitForGrantsComplete chan struct{} + // _shardSyncChan is a channel for informing the shard sync goroutine that // it should wake up and recheck the tablet state, to make sure it and the // shard record are in sync. @@ -200,7 +211,7 @@ type TabletManager struct { } // BuildTabletFromInput builds a tablet record from input parameters. -func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, db *dbconfigs.DBConfigs) (*topodatapb.Tablet, error) { +func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, db *dbconfigs.DBConfigs, collationEnv *collations.Environment) (*topodatapb.Tablet, error) { hostname := tabletHostname if hostname == "" { var err error @@ -238,14 +249,14 @@ func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, d return nil, err } - var charset uint8 + var charset collations.ID if db != nil && db.Charset != "" { - charset, err = collations.Local().ParseConnectionCharset(db.Charset) + charset, err = collationEnv.ParseConnectionCharset(db.Charset) if err != nil { return nil, err } } else { - charset = collations.Local().DefaultConnectionCharset() + charset = collationEnv.DefaultConnectionCharset() } return &topodatapb.Tablet{ @@ -333,7 +344,7 @@ func mergeTags(a, b map[string]string) map[string]string { } // Start starts the TabletManager. -func (tm *TabletManager) Start(tablet *topodatapb.Tablet, healthCheckInterval time.Duration) error { +func (tm *TabletManager) Start(tablet *topodatapb.Tablet, config *tabletenv.TabletConfig) error { defer func() { log.Infof("TabletManager Start took ~%d ms", time.Since(servenv.GetInitStartTime()).Milliseconds()) }() @@ -342,6 +353,7 @@ func (tm *TabletManager) Start(tablet *topodatapb.Tablet, healthCheckInterval ti tm.tabletAlias = tablet.Alias tm.tmState = newTMState(tm, tablet) tm.actionSema = semaphore.NewWeighted(1) + tm._waitForGrantsComplete = make(chan struct{}) tm.baseTabletType = tablet.Type @@ -393,7 +405,7 @@ func (tm *TabletManager) Start(tablet *topodatapb.Tablet, healthCheckInterval ti tm.exportStats() servenv.OnRun(tm.registerTabletManager) - restoring, err := tm.handleRestore(tm.BatchCtx) + restoring, err := tm.handleRestore(tm.BatchCtx, config) if err != nil { return err } @@ -406,8 +418,17 @@ func (tm *TabletManager) Start(tablet *topodatapb.Tablet, healthCheckInterval ti // We shouldn't use the base tablet type directly, since the type could have changed to PRIMARY // earlier in tm.checkPrimaryShip code. _, err = tm.initializeReplication(ctx, tm.Tablet().Type) + if err != nil { + return err + } + + // Make sure we have the correct privileges for the DBA user before we start the state manager. + err = tm.waitForDBAGrants(config, mysqlctl.DbaGrantWaitTime) + if err != nil { + return err + } tm.tmState.Open() - return err + return nil } // Close prepares a tablet for shutdown. First we check our tablet ownership and @@ -444,7 +465,7 @@ func (tm *TabletManager) Close() { // Stop shuts down the tm. Normally this is not necessary, since we use // servenv OnTerm and OnClose hooks to coordinate shutdown automatically, // while taking lameduck into account. However, this may be useful for tests, -// when you want to clean up an tm immediately. +// when you want to clean up a tm immediately. func (tm *TabletManager) Stop() { // Stop the shard sync loop and wait for it to exit. This needs to be done // here in addition to in Close() because tests do not call Close(). @@ -536,7 +557,7 @@ func (tm *TabletManager) createKeyspaceShard(ctx context.Context) (*topo.ShardIn tm._rebuildKeyspaceDone = make(chan struct{}) go tm.rebuildKeyspace(rebuildKsCtx, tm._rebuildKeyspaceDone, tablet.Keyspace, rebuildKeyspaceRetryInterval) default: - return nil, vterrors.Wrap(err, "initeKeyspaceShardTopo: failed to read SrvKeyspace") + return nil, vterrors.Wrap(err, "initKeyspaceShardTopo: failed to read SrvKeyspace") } // Rebuild vschema graph if this is the first tablet in this keyspace/cell. @@ -546,16 +567,16 @@ func (tm *TabletManager) createKeyspaceShard(ctx context.Context) (*topo.ShardIn // Check if vschema was rebuilt after the initial creation of the keyspace. if _, keyspaceExists := srvVSchema.GetKeyspaces()[tablet.Keyspace]; !keyspaceExists { if err := tm.TopoServer.RebuildSrvVSchema(ctx, []string{tm.tabletAlias.Cell}); err != nil { - return nil, vterrors.Wrap(err, "initeKeyspaceShardTopo: failed to RebuildSrvVSchema") + return nil, vterrors.Wrap(err, "initKeyspaceShardTopo: failed to RebuildSrvVSchema") } } case topo.IsErrType(err, topo.NoNode): // There is no SrvSchema in this cell at all, so we definitely need to rebuild. if err := tm.TopoServer.RebuildSrvVSchema(ctx, []string{tm.tabletAlias.Cell}); err != nil { - return nil, vterrors.Wrap(err, "initeKeyspaceShardTopo: failed to RebuildSrvVSchema") + return nil, vterrors.Wrap(err, "initKeyspaceShardTopo: failed to RebuildSrvVSchema") } default: - return nil, vterrors.Wrap(err, "initeKeyspaceShardTopo: failed to read SrvVSchema") + return nil, vterrors.Wrap(err, "initKeyspaceShardTopo: failed to read SrvVSchema") } return shardInfo, nil } @@ -696,7 +717,7 @@ func (tm *TabletManager) checkMysql(ctx context.Context) error { tm.tmState.UpdateTablet(func(tablet *topodatapb.Tablet) { tablet.MysqlHostname = tablet.Hostname }) - mysqlPort, err := tm.MysqlDaemon.GetMysqlPort() + mysqlPort, err := tm.MysqlDaemon.GetMysqlPort(ctx) if err != nil { log.Warningf("Cannot get current mysql port, will keep retrying every %v: %v", mysqlPortRetryInterval, err) go tm.findMysqlPort(mysqlPortRetryInterval) @@ -709,10 +730,18 @@ func (tm *TabletManager) checkMysql(ctx context.Context) error { return nil } +const portCheckTimeout = 5 * time.Second + +func (tm *TabletManager) getMysqlPort() (int32, error) { + ctx, cancel := context.WithTimeout(context.Background(), portCheckTimeout) + defer cancel() + return tm.MysqlDaemon.GetMysqlPort(ctx) +} + func (tm *TabletManager) findMysqlPort(retryInterval time.Duration) { for { time.Sleep(retryInterval) - mport, err := tm.MysqlDaemon.GetMysqlPort() + mport, err := tm.getMysqlPort() if err != nil || mport == 0 { continue } @@ -762,7 +791,7 @@ func (tm *TabletManager) initTablet(ctx context.Context) error { return nil } -func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) { +func (tm *TabletManager) handleRestore(ctx context.Context, config *tabletenv.TabletConfig) (bool, error) { // Sanity check for inconsistent flags if tm.Cnf == nil && restoreFromBackup { return false, fmt.Errorf("you cannot enable --restore_from_backup without a my.cnf file") @@ -774,9 +803,6 @@ func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) { // Restore in the background if restoreFromBackup { go func() { - // Open the state manager after restore is done. - defer tm.tmState.Open() - // Zero date will cause us to use the latest, which is the default backupTime := time.Time{} // Or if a backup timestamp was specified then we use the last backup taken at or before that time @@ -798,9 +824,18 @@ func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) { } // restoreFromBackup will just be a regular action // (same as if it was triggered remotely) - if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime, restoreToTimestamp, restoreToPos); err != nil { + if err := tm.RestoreData(ctx, logutil.NewConsoleLogger(), waitForBackupInterval, false /* deleteBeforeRestore */, backupTime, restoreToTimestamp, restoreToPos, mysqlShutdownTimeout); err != nil { log.Exitf("RestoreFromBackup failed: %v", err) } + + // Make sure we have the correct privileges for the DBA user before we start the state manager. + err := tm.waitForDBAGrants(config, mysqlctl.DbaGrantWaitTime) + if err != nil { + log.Exitf("Failed waiting for DBA grants: %v", err) + } + + // Open the state manager after restore is done. + tm.tmState.Open() }() return true, nil } @@ -808,6 +843,22 @@ func (tm *TabletManager) handleRestore(ctx context.Context) (bool, error) { return false, nil } +// waitForDBAGrants waits for DBA user to have the required privileges to function properly. +func (tm *TabletManager) waitForDBAGrants(config *tabletenv.TabletConfig, waitTime time.Duration) (err error) { + // We should close the _waitForGrantsComplete channel in the end to signify that the wait for dba grants has completed. + defer func() { + if err == nil { + close(tm._waitForGrantsComplete) + } + }() + // We don't wait for grants if the tablet is externally managed. Permissions + // are then the responsibility of the DBA. + if config == nil || config.DB.HasGlobalSettings() || waitTime == 0 { + return nil + } + return tm.MysqlDaemon.WaitForDBAGrants(context.Background(), waitTime) +} + func (tm *TabletManager) exportStats() { tablet := tm.Tablet() statsKeyspace.Set(tablet.Keyspace) @@ -830,7 +881,7 @@ func (tm *TabletManager) withRetry(ctx context.Context, description string, work backoff := 1 * time.Second for { err := work() - if err == nil || err == context.Canceled || err == context.DeadlineExceeded { + if err == nil || errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { return err } @@ -842,8 +893,7 @@ func (tm *TabletManager) withRetry(ctx context.Context, description string, work // Exponential backoff with 1.3 as a factor, // and randomized down by at most 20 // percent. The generated time series looks - // good. Also note rand.Seed is called at - // init() time in binlog_players.go. + // good. f := float64(backoff) * 1.3 f -= f * 0.2 * rand.Float64() backoff = time.Duration(f) @@ -924,12 +974,12 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t tablet.Type = tabletType - semiSyncAction, err := tm.convertBoolToSemiSyncAction(reparentutil.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet)) + semiSyncAction, err := tm.convertBoolToSemiSyncAction(ctx, reparentutil.IsReplicaSemiSync(durability, currentPrimary.Tablet, tablet)) if err != nil { return nil, err } - if err := tm.fixSemiSync(tabletType, semiSyncAction); err != nil { + if err := tm.fixSemiSync(ctx, tabletType, semiSyncAction); err != nil { return nil, err } @@ -938,7 +988,7 @@ func (tm *TabletManager) initializeReplication(ctx context.Context, tabletType t log.Warningf("primary tablet in the shard record does not have mysql hostname specified, possibly because that tablet has been shut down.") return nil, nil } - if err := tm.MysqlDaemon.SetReplicationSource(ctx, currentPrimary.Tablet.MysqlHostname, currentPrimary.Tablet.MysqlPort, true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { + if err := tm.MysqlDaemon.SetReplicationSource(ctx, currentPrimary.Tablet.MysqlHostname, currentPrimary.Tablet.MysqlPort, 0, true, true); err != nil { return nil, vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed") } diff --git a/go/vt/vttablet/tabletmanager/tm_init_test.go b/go/vt/vttablet/tabletmanager/tm_init_test.go index 148042bd6b1..d0c0075eda3 100644 --- a/go/vt/vttablet/tabletmanager/tm_init_test.go +++ b/go/vt/vttablet/tabletmanager/tm_init_test.go @@ -34,11 +34,14 @@ import ( "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" + vttestpb "vitess.io/vitess/go/vt/proto/vttest" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletservermock" + "vitess.io/vitess/go/vt/vttest" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" @@ -71,16 +74,16 @@ func TestStartBuildTabletFromInput(t *testing.T) { Type: topodatapb.TabletType_REPLICA, Tags: map[string]string{}, DbNameOverride: "aa", - DefaultConnCollation: uint32(collations.Default()), + DefaultConnCollation: uint32(collations.MySQL8().DefaultConnectionCharset()), } - gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil) + gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) require.NoError(t, err) // Hostname should be resolved. assert.Equal(t, wantTablet, gotTablet) tabletHostname = "" - gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil) + gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) require.NoError(t, err) assert.NotEqual(t, "", gotTablet.Hostname) @@ -92,7 +95,7 @@ func TestStartBuildTabletFromInput(t *testing.T) { Start: []byte(""), End: []byte("\xc0"), } - gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil) + gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) require.NoError(t, err) // KeyRange check is explicit because the next comparison doesn't // show the diff well enough. @@ -102,25 +105,25 @@ func TestStartBuildTabletFromInput(t *testing.T) { // Invalid inputs. initKeyspace = "" initShard = "0" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "init_keyspace and init_shard must be specified") initKeyspace = "test_keyspace" initShard = "" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "init_keyspace and init_shard must be specified") initShard = "x-y" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "cannot validate shard name") initShard = "0" initTabletType = "bad" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "unknown TabletType bad") initTabletType = "primary" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "invalid init_tablet_type PRIMARY") } @@ -153,10 +156,10 @@ func TestBuildTabletFromInputWithBuildTags(t *testing.T) { Type: topodatapb.TabletType_REPLICA, Tags: servenv.AppVersion.ToStringMap(), DbNameOverride: "aa", - DefaultConnCollation: uint32(collations.Default()), + DefaultConnCollation: uint32(collations.MySQL8().DefaultConnectionCharset()), } - gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil) + gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) require.NoError(t, err) assert.Equal(t, wantTablet, gotTablet) } @@ -282,7 +285,7 @@ func TestCheckPrimaryShip(t *testing.T) { return nil }) require.NoError(t, err) - err = tm.Start(tablet, 0) + err = tm.Start(tablet, nil) require.NoError(t, err) ti, err = ts.GetTablet(ctx, alias) require.NoError(t, err) @@ -297,7 +300,7 @@ func TestCheckPrimaryShip(t *testing.T) { // correct and start as PRIMARY. err = ts.DeleteTablet(ctx, alias) require.NoError(t, err) - err = tm.Start(tablet, 0) + err = tm.Start(tablet, nil) require.NoError(t, err) ti, err = ts.GetTablet(ctx, alias) require.NoError(t, err) @@ -311,7 +314,7 @@ func TestCheckPrimaryShip(t *testing.T) { ti.Type = topodatapb.TabletType_PRIMARY err = ts.UpdateTablet(ctx, ti) require.NoError(t, err) - err = tm.Start(tablet, 0) + err = tm.Start(tablet, nil) require.NoError(t, err) ti, err = ts.GetTablet(ctx, alias) require.NoError(t, err) @@ -321,7 +324,7 @@ func TestCheckPrimaryShip(t *testing.T) { tm.Stop() // 5. Subsequent inits will still start the vttablet as PRIMARY. - err = tm.Start(tablet, 0) + err = tm.Start(tablet, nil) require.NoError(t, err) ti, err = ts.GetTablet(ctx, alias) require.NoError(t, err) @@ -353,7 +356,7 @@ func TestCheckPrimaryShip(t *testing.T) { return nil }) require.NoError(t, err) - err = tm.Start(tablet, 0) + err = tm.Start(tablet, nil) require.NoError(t, err) ti, err = ts.GetTablet(ctx, alias) require.NoError(t, err) @@ -376,11 +379,11 @@ func TestCheckPrimaryShip(t *testing.T) { fakeMysql := tm.MysqlDaemon.(*mysqlctl.FakeMysqlDaemon) fakeMysql.SetReplicationSourceInputs = append(fakeMysql.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", otherTablet.MysqlHostname, otherTablet.MysqlPort)) fakeMysql.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } - err = tm.Start(tablet, 0) + err = tm.Start(tablet, nil) require.NoError(t, err) ti, err = ts.GetTablet(ctx, alias) require.NoError(t, err) @@ -407,7 +410,7 @@ func TestStartCheckMysql(t *testing.T) { DBConfigs: dbconfigs.NewTestDBConfigs(cp, cp, ""), QueryServiceControl: tabletservermock.NewController(), } - err := tm.Start(tablet, 0) + err := tm.Start(tablet, nil) require.NoError(t, err) defer tm.Stop() @@ -435,7 +438,7 @@ func TestStartFindMysqlPort(t *testing.T) { DBConfigs: &dbconfigs.DBConfigs{}, QueryServiceControl: tabletservermock.NewController(), } - err := tm.Start(tablet, 0) + err := tm.Start(tablet, nil) require.NoError(t, err) defer tm.Stop() @@ -511,7 +514,7 @@ func TestStartDoesNotUpdateReplicationDataForTabletInWrongShard(t *testing.T) { tablet := newTestTablet(t, 1, "ks", "-d0") require.NoError(t, err) - err = tm.Start(tablet, 0) + err = tm.Start(tablet, nil) assert.Contains(t, err.Error(), "existing tablet keyspace and shard ks/0 differ") tablets, err := ts.FindAllTabletAliasesInShard(ctx, "ks", "-d0") @@ -548,7 +551,7 @@ func TestCheckTabletTypeResets(t *testing.T) { return nil }) require.NoError(t, err) - err = tm.Start(tablet, 0) + err = tm.Start(tablet, nil) require.NoError(t, err) assert.Equal(t, tm.tmState.tablet.Type, tm.tmState.displayState.tablet.Type) ti, err = ts.GetTablet(ctx, alias) @@ -630,7 +633,6 @@ func TestGetBuildTags(t *testing.T) { } for _, tt := range tests { - tt := tt t.Run(tt.skipCSV, func(t *testing.T) { t.Parallel() @@ -671,7 +673,7 @@ func newTestTM(t *testing.T, ts *topo.Server, uid int, keyspace, shard string) * DBConfigs: &dbconfigs.DBConfigs{}, QueryServiceControl: tabletservermock.NewController(), } - err := tm.Start(tablet, 0) + err := tm.Start(tablet, nil) require.NoError(t, err) // Wait for SrvKeyspace to be rebuilt. We know that it has been built @@ -733,3 +735,196 @@ func ensureSrvKeyspace(t *testing.T, ctx context.Context, ts *topo.Server, cell, } assert.True(t, found) } + +func TestWaitForDBAGrants(t *testing.T) { + tests := []struct { + name string + waitTime time.Duration + errWanted string + setupFunc func(t *testing.T) (*tabletenv.TabletConfig, func()) + }{ + { + name: "Success without any wait", + waitTime: 1 * time.Second, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + // Create a new mysql instance, and the dba user with required grants. + // Since all the grants already exist, this should pass without any waiting to be needed. + testUser := "vt_test_dba" + cluster, err := startMySQLAndCreateUser(t, testUser) + require.NoError(t, err) + grantAllPrivilegesToUser(t, cluster.MySQLConnParams(), testUser) + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{}, + } + connParams := cluster.MySQLConnParams() + connParams.Uname = testUser + tc.DB.SetDbParams(connParams, mysql.ConnParams{}, mysql.ConnParams{}) + return tc, func() { + cluster.TearDown() + } + }, + }, + { + name: "Success with wait", + waitTime: 1 * time.Second, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + // Create a new mysql instance, but delay granting the privileges to the dba user. + // This makes the waitForDBAGrants function retry the grant check. + testUser := "vt_test_dba" + cluster, err := startMySQLAndCreateUser(t, testUser) + require.NoError(t, err) + + go func() { + time.Sleep(500 * time.Millisecond) + grantAllPrivilegesToUser(t, cluster.MySQLConnParams(), testUser) + }() + + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{}, + } + connParams := cluster.MySQLConnParams() + connParams.Uname = testUser + tc.DB.SetDbParams(connParams, mysql.ConnParams{}, mysql.ConnParams{}) + return tc, func() { + cluster.TearDown() + } + }, + }, { + name: "Failure due to timeout", + waitTime: 300 * time.Millisecond, + errWanted: "timed out after 300ms waiting for the dba user to have the required permissions", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + // Create a new mysql but don't give the grants to the vt_dba user at all. + // This should cause a timeout after waiting, since the privileges are never granted. + testUser := "vt_test_dba" + cluster, err := startMySQLAndCreateUser(t, testUser) + require.NoError(t, err) + + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{}, + } + connParams := cluster.MySQLConnParams() + connParams.Uname = testUser + tc.DB.SetDbParams(connParams, mysql.ConnParams{}, mysql.ConnParams{}) + return tc, func() { + cluster.TearDown() + } + }, + }, { + name: "Success for externally managed tablet", + waitTime: 300 * time.Millisecond, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + // Create a new mysql but don't give the grants to the vt_dba user at all. + // This should cause a timeout after waiting, since the privileges are never granted. + testUser := "vt_test_dba" + cluster, err := startMySQLAndCreateUser(t, testUser) + require.NoError(t, err) + + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{ + Host: "some.unknown.host", + }, + } + connParams := cluster.MySQLConnParams() + connParams.Uname = testUser + tc.DB.SetDbParams(connParams, mysql.ConnParams{}, mysql.ConnParams{}) + return tc, func() { + cluster.TearDown() + } + }, + }, { + name: "Empty timeout", + waitTime: 0, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + tc := &tabletenv.TabletConfig{ + DB: &dbconfigs.DBConfigs{}, + } + return tc, func() {} + }, + }, { + name: "Empty config", + waitTime: 300 * time.Millisecond, + errWanted: "", + setupFunc: func(t *testing.T) (*tabletenv.TabletConfig, func()) { + return nil, func() {} + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config, cleanup := tt.setupFunc(t) + defer cleanup() + var dm mysqlctl.MysqlDaemon + if config != nil { + dm = mysqlctl.NewMysqld(config.DB) + } + tm := TabletManager{ + _waitForGrantsComplete: make(chan struct{}), + MysqlDaemon: dm, + } + err := tm.waitForDBAGrants(config, tt.waitTime) + if tt.errWanted == "" { + require.NoError(t, err) + // Verify the channel has been closed. + _, isOpen := <-tm._waitForGrantsComplete + require.False(t, isOpen) + } else { + require.EqualError(t, err, tt.errWanted) + } + }) + } +} + +// startMySQLAndCreateUser starts a MySQL instance and creates the given user +func startMySQLAndCreateUser(t *testing.T, testUser string) (vttest.LocalCluster, error) { + // Launch MySQL. + // We need a Keyspace in the topology, so the DbName is set. + // We need a Shard too, so the database 'vttest' is created. + cfg := vttest.Config{ + Topology: &vttestpb.VTTestTopology{ + Keyspaces: []*vttestpb.Keyspace{ + { + Name: "vttest", + Shards: []*vttestpb.Shard{ + { + Name: "0", + DbNameOverride: "vttest", + }, + }, + }, + }, + }, + OnlyMySQL: true, + Charset: "utf8mb4", + } + cluster := vttest.LocalCluster{ + Config: cfg, + } + err := cluster.Setup() + if err != nil { + return cluster, nil + } + + connParams := cluster.MySQLConnParams() + conn, err := mysql.Connect(context.Background(), &connParams) + require.NoError(t, err) + _, err = conn.ExecuteFetch(fmt.Sprintf(`CREATE USER '%v'@'localhost'`, testUser), 1000, false) + conn.Close() + + return cluster, err +} + +// grantAllPrivilegesToUser grants all the privileges to the user specified. +func grantAllPrivilegesToUser(t *testing.T, connParams mysql.ConnParams, testUser string) { + conn, err := mysql.Connect(context.Background(), &connParams) + require.NoError(t, err) + _, err = conn.ExecuteFetch(fmt.Sprintf(`GRANT ALL ON *.* TO '%v'@'localhost'`, testUser), 1000, false) + require.NoError(t, err) + _, err = conn.ExecuteFetch(fmt.Sprintf(`GRANT GRANT OPTION ON *.* TO '%v'@'localhost'`, testUser), 1000, false) + require.NoError(t, err) + conn.Close() +} diff --git a/go/vt/vttablet/tabletmanager/tm_state.go b/go/vt/vttablet/tabletmanager/tm_state.go index df814ba5bee..312c675fce7 100644 --- a/go/vt/vttablet/tabletmanager/tm_state.go +++ b/go/vt/vttablet/tabletmanager/tm_state.go @@ -216,7 +216,7 @@ func (ts *tmState) ChangeTabletType(ctx context.Context, tabletType topodatapb.T if action == DBActionSetReadWrite { // We call SetReadOnly only after the topo has been updated to avoid // situations where two tablets are primary at the DB level but not at the vitess level - if err := ts.tm.MysqlDaemon.SetReadOnly(false); err != nil { + if err := ts.tm.MysqlDaemon.SetReadOnly(ctx, false); err != nil { return err } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/action.go b/go/vt/vttablet/tabletmanager/vdiff/action.go index 59ee79077f7..0b9dd6f45ed 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/action.go +++ b/go/vt/vttablet/tabletmanager/vdiff/action.go @@ -63,7 +63,15 @@ var ( } ) -func (vde *Engine) PerformVDiffAction(ctx context.Context, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) { +func (vde *Engine) PerformVDiffAction(ctx context.Context, req *tabletmanagerdatapb.VDiffRequest) (resp *tabletmanagerdatapb.VDiffResponse, err error) { + defer func() { + if err != nil { + globalStats.ErrorCount.Add(1) + } + }() + if req == nil { + return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "nil vdiff request") + } if !vde.isOpen { return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "vdiff engine is closed") } @@ -71,7 +79,7 @@ func (vde *Engine) PerformVDiffAction(ctx context.Context, req *tabletmanagerdat return nil, vterrors.New(vtrpcpb.Code_UNAVAILABLE, "vdiff engine is still trying to open") } - resp := &tabletmanagerdatapb.VDiffResponse{ + resp = &tabletmanagerdatapb.VDiffResponse{ Id: 0, Output: nil, } @@ -232,9 +240,6 @@ func (vde *Engine) handleCreateResumeAction(ctx context.Context, dbClient binlog if qr.RowsAffected == 0 { msg := fmt.Sprintf("no completed or stopped vdiff found for UUID %s on tablet %v", req.VdiffUuid, vde.thisTablet.Alias) - if err != nil { - msg = fmt.Sprintf("%s (%v)", msg, err) - } return fmt.Errorf(msg) } } @@ -371,6 +376,9 @@ func (vde *Engine) handleDeleteAction(ctx context.Context, dbClient binlogplayer } controller.Stop() delete(vde.controllers, controller.id) + globalStats.mu.Lock() + defer globalStats.mu.Unlock() + delete(globalStats.controllers, controller.id) } switch req.ActionArg { diff --git a/go/vt/vttablet/tabletmanager/vdiff/action_test.go b/go/vt/vttablet/tabletmanager/vdiff/action_test.go index 1049bc8607d..4676238cf69 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/action_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/action_test.go @@ -56,8 +56,13 @@ func TestPerformVDiffAction(t *testing.T) { expectQueries []queryAndResult wantErr error }{ + { + name: "nil request", + wantErr: vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "nil vdiff request"), + }, { name: "engine not open", + req: &tabletmanagerdatapb.VDiffRequest{}, vde: &Engine{isOpen: false}, wantErr: vterrors.New(vtrpcpb.Code_UNAVAILABLE, "vdiff engine is closed"), }, @@ -208,6 +213,7 @@ func TestPerformVDiffAction(t *testing.T) { }, }, } + errCount := int64(0) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if tt.preFunc != nil { @@ -224,6 +230,9 @@ func TestPerformVDiffAction(t *testing.T) { vdiffenv.dbClient.ExpectRequest(queryResult.query, queryResult.result, nil) } got, err := tt.vde.PerformVDiffAction(ctx, tt.req) + if err != nil { + errCount++ + } vdiffenv.dbClient.Wait() if tt.wantErr != nil && !vterrors.Equals(err, tt.wantErr) { t.Errorf("Engine.PerformVDiffAction() error = %v, wantErr %v", err, tt.wantErr) @@ -239,6 +248,8 @@ func TestPerformVDiffAction(t *testing.T) { // No VDiffs should be running anymore. require.Equal(t, 0, len(vdiffenv.vde.controllers), "expected no controllers to be running, but found %d", len(vdiffenv.vde.controllers)) + require.Equal(t, int64(0), globalStats.numControllers(), "expected no controllers, but found %d") }) + require.Equal(t, errCount, globalStats.ErrorCount.Get(), "expected error count %d, got %d", errCount, globalStats.ErrorCount.Get()) } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/controller.go b/go/vt/vttablet/tabletmanager/vdiff/controller.go index 22b1d3f5374..20c1501989e 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/controller.go +++ b/go/vt/vttablet/tabletmanager/vdiff/controller.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -39,10 +40,8 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -/* -vdiff operation states: pending/started/stopped/completed/error/unknown -vdiff table states: pending/started/stopped/completed/error/unknown -*/ +// VDiff operation and table states: +// pending/started/stopped/completed/error/unknown type VDiffState string //nolint const ( PendingState VDiffState = "pending" @@ -55,28 +54,33 @@ const ( ) type controller struct { - id int64 // id from row in _vt.vdiff + id int64 // id from the row in _vt.vdiff uuid string workflow string workflowType binlogdatapb.VReplicationWorkflowType cancel context.CancelFunc dbClientFactory func() binlogplayer.DBClient ts *topo.Server - vde *Engine // the singleton vdiff engine + vde *Engine // The singleton vdiff engine done chan struct{} - sources map[string]*migrationSource // currently picked source tablets for this shard's data + sources map[string]*migrationSource // Currently picked source tablets for this shard's data workflowFilter string sourceKeyspace string tmc tmclient.TabletManagerClient targetShardStreamer *shardStreamer - filter *binlogdatapb.Filter // vreplication row filter - options *tabletmanagerdata.VDiffOptions // options initially from vtctld command and later from _vt.vdiff + filter *binlogdatapb.Filter // VReplication row filter + options *tabletmanagerdata.VDiffOptions // Options initially from vtctld command and later from _vt.vdiff + + sourceTimeZone, targetTimeZone string // Named time zones if conversions are necessary for datetime values - sourceTimeZone, targetTimeZone string // named time zones if conversions are necessary for datetime values + externalCluster string // For Mount+Migrate - externalCluster string // for Mount+Migrate + // Information used in vdiff stats/metrics. + Errors *stats.CountersWithSingleLabel + TableDiffRowCounts *stats.CountersWithSingleLabel + TableDiffPhaseTimings *stats.Timings } func newController(ctx context.Context, row sqltypes.RowNamedValues, dbClientFactory func() binlogplayer.DBClient, @@ -86,16 +90,19 @@ func newController(ctx context.Context, row sqltypes.RowNamedValues, dbClientFac id, _ := row["id"].ToInt64() ct := &controller{ - id: id, - uuid: row["vdiff_uuid"].ToString(), - workflow: row["workflow"].ToString(), - dbClientFactory: dbClientFactory, - ts: ts, - vde: vde, - done: make(chan struct{}), - tmc: vde.tmClientFactory(), - sources: make(map[string]*migrationSource), - options: options, + id: id, + uuid: row["vdiff_uuid"].ToString(), + workflow: row["workflow"].ToString(), + dbClientFactory: dbClientFactory, + ts: ts, + vde: vde, + done: make(chan struct{}), + tmc: vde.tmClientFactory(), + sources: make(map[string]*migrationSource), + options: options, + Errors: stats.NewCountersWithSingleLabel("", "", "Error"), + TableDiffRowCounts: stats.NewCountersWithSingleLabel("", "", "Rows"), + TableDiffPhaseTimings: stats.NewTimings("", "", "", "TablePhase"), } ctx, ct.cancel = context.WithCancel(ctx) go ct.run(ctx) @@ -185,7 +192,7 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") case <-ct.done: - return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") + return ErrVDiffStoppedByUser default: } ct.workflowFilter = fmt.Sprintf("where workflow = %s and db_name = %s", encodeString(ct.workflow), @@ -201,7 +208,7 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") case <-ct.done: - return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") + return ErrVDiffStoppedByUser default: } source := newMigrationSource() @@ -240,7 +247,7 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) return err } - wd, err := newWorkflowDiffer(ct, ct.options) + wd, err := newWorkflowDiffer(ct, ct.options, ct.vde.collationEnv) if err != nil { return err } @@ -328,7 +335,7 @@ func (ct *controller) saveErrorState(ctx context.Context, saveErr error) error { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "engine is shutting down") case <-ct.done: - return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") + return ErrVDiffStoppedByUser case <-time.After(retryDelay): if retryDelay < maxRetryDelay { retryDelay = time.Duration(float64(retryDelay) * 1.5) diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine.go b/go/vt/vttablet/tabletmanager/vdiff/engine.go index 72098eb52be..b2285a070fa 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine.go @@ -24,19 +24,18 @@ import ( "sync" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" - "vitess.io/vitess/go/vt/vttablet/tmclient" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/vt/vttablet/tmclient" ) type Engine struct { @@ -69,14 +68,19 @@ type Engine struct { // modified behavior for that env, e.g. not starting the retry goroutine. This should // NOT be set in production. fortests bool + + collationEnv *collations.Environment + parser *sqlparser.Parser } -func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, tablet *topodata.Tablet) *Engine { +func NewEngine(ts *topo.Server, tablet *topodata.Tablet, collationEnv *collations.Environment, parser *sqlparser.Parser) *Engine { vde := &Engine{ controllers: make(map[int64]*controller), ts: ts, thisTablet: tablet, tmClientFactory: func() tmclient.TabletManagerClient { return tmclient.NewTabletManagerClient() }, + collationEnv: collationEnv, + parser: parser, } return vde } @@ -94,20 +98,22 @@ func NewTestEngine(ts *topo.Server, tablet *topodata.Tablet, dbn string, dbcf fu dbClientFactoryDba: dbcf, tmClientFactory: tmcf, fortests: true, + collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } return vde } func (vde *Engine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) { - // If it's a test engine and we're already initilized then do nothing. + // If it's a test engine and we're already initialized then do nothing. if vde.fortests && vde.dbClientFactoryFiltered != nil && vde.dbClientFactoryDba != nil { return } vde.dbClientFactoryFiltered = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB()) + return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB(), vde.parser) } vde.dbClientFactoryDba = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.DbaWithDB()) + return binlogplayer.NewDBClient(dbcfgs.DbaWithDB(), vde.parser) } vde.dbName = dbcfgs.DBName } @@ -152,8 +158,9 @@ func (vde *Engine) openLocked(ctx context.Context) error { if err := vde.initControllers(rows); err != nil { return err } + vde.updateStats() - // At this point we've fully and succesfully opened so begin + // At this point we've fully and successfully opened so begin // retrying error'd VDiffs until the engine is closed. vde.wg.Add(1) go func() { @@ -193,7 +200,7 @@ func (vde *Engine) retry(ctx context.Context, err error) { if err := vde.openLocked(ctx); err == nil { log.Infof("VDiff engine: opened successfully") // Don't invoke cancelRetry because openLocked - // will hold on to this context for later cancelation. + // will hold on to this context for later cancellation. vde.cancelRetry = nil vde.mu.Unlock() return @@ -211,6 +218,9 @@ func (vde *Engine) addController(row sqltypes.RowNamedValues, options *tabletman row, vde.thisTablet.Alias) } vde.controllers[ct.id] = ct + globalStats.mu.Lock() + defer globalStats.mu.Unlock() + globalStats.controllers[ct.id] = ct return nil } @@ -385,4 +395,16 @@ func (vde *Engine) resetControllers() { ct.Stop() } vde.controllers = make(map[int64]*controller) + vde.updateStats() +} + +// updateStats must only be called while holding the engine lock. +func (vre *Engine) updateStats() { + globalStats.mu.Lock() + defer globalStats.mu.Unlock() + + globalStats.controllers = make(map[int64]*controller, len(vre.controllers)) + for id, ct := range vre.controllers { + globalStats.controllers[id] = ct + } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go index 75b0e37d630..e6c9a84d9e2 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -148,7 +147,7 @@ func TestVDiff(t *testing.T) { ), "NULL", ), nil) - vdenv.dbClient.ExpectRequest(fmt.Sprintf("select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = '%s' and table_name in ('t1')", vdiffDBName), sqltypes.MakeTestResult(sqltypes.MakeTestFields( + vdenv.dbClient.ExpectRequest(fmt.Sprintf("select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = '%s' and table_name in ('t1') order by table_name", vdiffDBName), sqltypes.MakeTestResult(sqltypes.MakeTestFields( "table_name|table_rows", "varchar|int64", ), @@ -193,7 +192,7 @@ func TestVDiff(t *testing.T) { vdenv.dbClient.ExpectRequest(`insert into _vt.vdiff_log(vdiff_id, message) values (1, 'completed: table \'t1\'')`, singleRowAffected, nil) vdenv.dbClient.ExpectRequest("update _vt.vdiff_table set state = 'completed' where vdiff_id = 1 and table_name = 't1'", singleRowAffected, nil) vdenv.dbClient.ExpectRequest(`insert into _vt.vdiff_log(vdiff_id, message) values (1, 'completed: table \'t1\'')`, singleRowAffected, nil) - vdenv.dbClient.ExpectRequest("select table_name as table_name from _vt.vdiff_table where vdiff_id = 1 and state != 'completed'", singleRowAffected, nil) + vdenv.dbClient.ExpectRequest("select table_name as table_name from _vt.vdiff_table where vdiff_id = 1 and state != 'completed' order by table_name", singleRowAffected, nil) vdenv.dbClient.ExpectRequest("update _vt.vdiff set state = 'completed', last_error = left('', 1024) , completed_at = utc_timestamp() where id = 1", singleRowAffected, nil) vdenv.dbClient.ExpectRequest("insert into _vt.vdiff_log(vdiff_id, message) values (1, 'State changed to: completed')", singleRowAffected, nil) @@ -270,7 +269,7 @@ func TestEngineRetryErroredVDiffs(t *testing.T) { fmt.Sprintf("%s|%s|%s|%s||9223372036854775807|9223372036854775807||PRIMARY,REPLICA|1669511347|0|Running||%s|200||1669511347|1|0||1", id, vdiffenv.workflow, vreplSource, vdiffSourceGtid, vdiffDBName), ), nil) - // At this point we know that we kicked off the expected retry so we can short circit the vdiff. + // At this point we know that we kicked off the expected retry so we can short circuit the vdiff. shortCircuitTestAfterQuery(fmt.Sprintf("update _vt.vdiff set state = 'started', last_error = left('', 1024) , started_at = utc_timestamp() where id = %s", id), vdiffenv.dbClient) expectedControllerCnt++ diff --git a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go index d5e8c134814..43aa76894d4 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go @@ -100,16 +100,26 @@ var ( Columns: []string{"id", "dt"}, PrimaryKeyColumns: []string{"id"}, Fields: sqltypes.MakeTestFields("id|dt", "int64|datetime"), + }, { + Name: "nopk", + Columns: []string{"c1", "c2", "c3"}, + Fields: sqltypes.MakeTestFields("c1|c2|c3", "int64|int64|int64"), + }, { + Name: "nopkwithpke", + Columns: []string{"c1", "c2", "c3"}, + Fields: sqltypes.MakeTestFields("c1|c2|c3", "int64|int64|int64"), }, }, } tableDefMap = map[string]int{ - "t1": 0, - "nonpktext": 1, - "pktext": 2, - "multipk": 3, - "aggr": 4, - "datze": 5, + "t1": 0, + "nonpktext": 1, + "pktext": 2, + "multipk": 3, + "aggr": 4, + "datze": 5, + "nopk": 6, + "nopkwithpke": 7, } ) @@ -145,7 +155,7 @@ type LogExpectation struct { } func init() { - tabletconn.RegisterDialer("test", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tabletconn.RegisterDialer("test", func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { vdiffenv.mu.Lock() defer vdiffenv.mu.Unlock() if qs, ok := vdiffenv.tablets[int(tablet.Alias.Uid)]; ok { @@ -154,7 +164,7 @@ func init() { return nil, fmt.Errorf("tablet %d not found", tablet.Alias.Uid) }) // TableDiffer does a default grpc dial just to be sure it can talk to the tablet. - tabletconn.RegisterDialer("grpc", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tabletconn.RegisterDialer("grpc", func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { vdiffenv.mu.Lock() defer vdiffenv.mu.Unlock() if qs, ok := vdiffenv.tablets[int(tablet.Alias.Uid)]; ok { @@ -285,7 +295,7 @@ type fakeBinlogClient struct { lastCharset *binlogdatapb.Charset } -func (fbc *fakeBinlogClient) Dial(tablet *topodatapb.Tablet) error { +func (fbc *fakeBinlogClient) Dial(ctx context.Context, tablet *topodatapb.Tablet) error { fbc.lastTablet = tablet return nil } @@ -396,6 +406,22 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu return qr, err } +func (dbc *realDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) + if err != nil { + return nil, err + } + results := make([]*sqltypes.Result, 0, len(queries)) + for _, query := range queries { + qr, err := dbc.ExecuteFetch(query, maxrows) + if err != nil { + return nil, err + } + results = append(results, qr) + } + return results, nil +} + //---------------------------------------------- // fakeTMClient diff --git a/go/vt/vttablet/tabletmanager/vdiff/report.go b/go/vt/vttablet/tabletmanager/vdiff/report.go index 4f9b264cddd..62ce6d24585 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/report.go +++ b/go/vt/vttablet/tabletmanager/vdiff/report.go @@ -26,9 +26,7 @@ import ( ) const ( - // At most how many samples we should show for row differences in the final report - maxVDiffReportSampleRows = 10 - truncatedNotation = "...[TRUNCATED]" + truncatedNotation = "...[TRUNCATED]" ) // DiffReport is the summary of differences for one table. @@ -68,7 +66,7 @@ type RowDiff struct { func (td *tableDiffer) genRowDiff(queryStmt string, row []sqltypes.Value, debug, onlyPks bool) (*RowDiff, error) { drp := &RowDiff{} drp.Row = make(map[string]string) - statement, err := sqlparser.Parse(queryStmt) + statement, err := td.wd.ct.vde.parser.Parse(queryStmt) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vdiff/schema.go b/go/vt/vttablet/tabletmanager/vdiff/schema.go index a63e60d9434..afb79b4e4b3 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/schema.go +++ b/go/vt/vttablet/tabletmanager/vdiff/schema.go @@ -37,19 +37,19 @@ const ( vd.started_at as started_at, vdt.rows_compared as rows_compared, vd.completed_at as completed_at, IF(vdt.mismatch = 1, 1, 0) as has_mismatch, vdt.report as report from _vt.vdiff as vd left join _vt.vdiff_table as vdt on (vd.id = vdt.vdiff_id) - where vd.id = %a` + where vd.id = %a order by table_name` // sqlUpdateVDiffState has a penultimate placeholder for any additional columns you want to update, e.g. `, foo = 1`. // It also truncates the error if needed to ensure that we can save the state when the error text is very long. sqlUpdateVDiffState = "update _vt.vdiff set state = %s, last_error = left(%s, 1024) %s where id = %d" sqlUpdateVDiffStopped = `update _vt.vdiff as vd, _vt.vdiff_table as vdt set vd.state = 'stopped', vdt.state = 'stopped', vd.last_error = '' where vd.id = vdt.vdiff_id and vd.id = %a and vd.state != 'completed'` - sqlGetVReplicationEntry = "select * from _vt.vreplication %s" + sqlGetVReplicationEntry = "select * from _vt.vreplication %s" // A filter/where is added by the caller sqlGetVDiffsToRun = "select * from _vt.vdiff where state in ('started','pending')" // what VDiffs have not been stopped or completed sqlGetVDiffsToRetry = "select * from _vt.vdiff where state = 'error' and json_unquote(json_extract(options, '$.core_options.auto_retry')) = 'true'" sqlGetVDiffID = "select id as id from _vt.vdiff where vdiff_uuid = %a" sqlGetVDiffIDsByKeyspaceWorkflow = "select id as id from _vt.vdiff where keyspace = %a and workflow = %a" sqlGetTableRows = "select table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %a and table_name = %a" - sqlGetAllTableRows = "select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %s and table_name in (%s)" + sqlGetAllTableRows = "select table_name as table_name, table_rows as table_rows from INFORMATION_SCHEMA.TABLES where table_schema = %s and table_name in (%s) order by table_name" sqlNewVDiffTable = "insert into _vt.vdiff_table(vdiff_id, table_name, state, table_rows) values(%a, %a, 'pending', %a)" sqlGetVDiffTable = `select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report @@ -62,5 +62,5 @@ const ( sqlUpdateTableStateAndReport = "update _vt.vdiff_table set state = %a, rows_compared = %a, report = %a where vdiff_id = %a and table_name = %a" sqlUpdateTableMismatch = "update _vt.vdiff_table set mismatch = true where vdiff_id = %a and table_name = %a" - sqlGetIncompleteTables = "select table_name as table_name from _vt.vdiff_table where vdiff_id = %a and state != 'completed'" + sqlGetIncompleteTables = "select table_name as table_name from _vt.vdiff_table where vdiff_id = %a and state != 'completed' order by table_name" ) diff --git a/go/vt/vttablet/tabletmanager/vdiff/stats.go b/go/vt/vttablet/tabletmanager/vdiff/stats.go new file mode 100644 index 00000000000..04cda6ac0c1 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vdiff/stats.go @@ -0,0 +1,149 @@ +/* +Copyright 2023 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vdiff + +import ( + "fmt" + "sync" + + "vitess.io/vitess/go/stats" +) + +var ( + globalStats = &vdiffStats{} +) + +func init() { + globalStats.register() +} + +// This is a singleton. +// vdiffStats exports the stats for Engine. It's a separate structure to +// prevent potential deadlocks with the mutex in Engine. +type vdiffStats struct { + mu sync.Mutex + controllers map[int64]*controller + + Count *stats.Gauge + ErrorCount *stats.Counter + RestartedTableDiffs *stats.CountersWithSingleLabel + RowsDiffedCount *stats.Counter +} + +func (vds *vdiffStats) register() { + globalStats.Count = stats.NewGauge("", "") + globalStats.ErrorCount = stats.NewCounter("", "") + globalStats.RestartedTableDiffs = stats.NewCountersWithSingleLabel("", "", "Table") + globalStats.RowsDiffedCount = stats.NewCounter("", "") + + stats.NewGaugeFunc("VDiffCount", "Number of current vdiffs", vds.numControllers) + + stats.NewCounterFunc( + "VDiffErrorCountTotal", + "Number of errors encountered across all vdiff actions", + func() int64 { + vds.mu.Lock() + defer vds.mu.Unlock() + return globalStats.ErrorCount.Get() + }, + ) + + stats.NewGaugesFuncWithMultiLabels( + "VDiffRestartedTableDiffsCount", + "Table diffs restarted due to --max-diff-duration counts by table", + []string{"table_name"}, + func() map[string]int64 { + vds.mu.Lock() + defer vds.mu.Unlock() + result := make(map[string]int64) + for label, count := range globalStats.RestartedTableDiffs.Counts() { + if label == "" { + continue + } + result[label] = count + } + return result + }, + ) + + stats.NewCounterFunc( + "VDiffRowsComparedTotal", + "Number of rows compared across all vdiffs", + func() int64 { + vds.mu.Lock() + defer vds.mu.Unlock() + return globalStats.RowsDiffedCount.Get() + }, + ) + + stats.NewGaugesFuncWithMultiLabels( + "VDiffRowsCompared", + "Live number of rows compared per vdiff by table", + []string{"workflow", "uuid", "table"}, + func() map[string]int64 { + vds.mu.Lock() + defer vds.mu.Unlock() + result := make(map[string]int64, len(vds.controllers)) + for _, ct := range vds.controllers { + for key, val := range ct.TableDiffRowCounts.Counts() { + result[fmt.Sprintf("%s.%s.%s", ct.workflow, ct.uuid, key)] = val + } + } + return result + }, + ) + + stats.NewCountersFuncWithMultiLabels( + "VDiffErrors", + "Count of specific errors seen during the lifetime of a vdiff", + []string{"workflow", "uuid", "error"}, + func() map[string]int64 { + vds.mu.Lock() + defer vds.mu.Unlock() + result := make(map[string]int64, len(vds.controllers)) + for _, ct := range vds.controllers { + for key, val := range ct.Errors.Counts() { + result[fmt.Sprintf("%s.%s.%s", ct.workflow, ct.uuid, key)] = val + } + } + return result + }, + ) + + stats.NewGaugesFuncWithMultiLabels( + "VDiffPhaseTimings", + "VDiff phase timings", + []string{"workflow", "uuid", "table", "phase"}, + func() map[string]int64 { + vds.mu.Lock() + defer vds.mu.Unlock() + result := make(map[string]int64, len(vds.controllers)) + for _, ct := range vds.controllers { + for tablePhase, h := range ct.TableDiffPhaseTimings.Histograms() { + result[fmt.Sprintf("%s.%s.%s", ct.workflow, ct.uuid, tablePhase)] = h.Total() + } + } + return result + }, + ) +} + +func (vds *vdiffStats) numControllers() int64 { + vds.mu.Lock() + defer vds.mu.Unlock() + return int64(len(vds.controllers)) +} diff --git a/go/vt/vttablet/tabletmanager/vdiff/stats_test.go b/go/vt/vttablet/tabletmanager/vdiff/stats_test.go new file mode 100644 index 00000000000..21b2caa9992 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vdiff/stats_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vdiff + +import ( + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/stats" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +func TestVDiffStats(t *testing.T) { + testStats := &vdiffStats{ + ErrorCount: stats.NewCounter("", ""), + RestartedTableDiffs: stats.NewCountersWithSingleLabel("", "", "Table"), + RowsDiffedCount: stats.NewCounter("", ""), + } + id := int64(1) + testStats.controllers = map[int64]*controller{ + id: { + id: id, + workflow: "testwf", + workflowType: binlogdatapb.VReplicationWorkflowType_MoveTables, + uuid: uuid.New().String(), + Errors: stats.NewCountersWithSingleLabel("", "", "Error"), + TableDiffRowCounts: stats.NewCountersWithSingleLabel("", "", "Rows"), + TableDiffPhaseTimings: stats.NewTimings("", "", "", "TablePhase"), + }, + } + + require.Equal(t, int64(1), testStats.numControllers()) + + sleepTime := 1 * time.Millisecond + record := func(phase string) { + defer testStats.controllers[id].TableDiffPhaseTimings.Record(phase, time.Now()) + time.Sleep(sleepTime) + } + want := 10 * sleepTime // Allow 10x overhead for recording timing on flaky test hosts + record(string(initializing)) + require.Greater(t, want, testStats.controllers[id].TableDiffPhaseTimings.Histograms()[string(initializing)].Total()) + record(string(pickingTablets)) + require.Greater(t, want, testStats.controllers[id].TableDiffPhaseTimings.Histograms()[string(pickingTablets)].Total()) + record(string(diffingTable)) + require.Greater(t, want, testStats.controllers[id].TableDiffPhaseTimings.Histograms()[string(diffingTable)].Total()) + + testStats.ErrorCount.Set(11) + require.Equal(t, int64(11), testStats.ErrorCount.Get()) + + testStats.controllers[id].Errors.Add("test error", int64(12)) + require.Equal(t, int64(12), testStats.controllers[id].Errors.Counts()["test error"]) + + testStats.RestartedTableDiffs.Add("t1", int64(5)) + require.Equal(t, int64(5), testStats.RestartedTableDiffs.Counts()["t1"]) + + testStats.RowsDiffedCount.Add(512) + require.Equal(t, int64(512), testStats.RowsDiffedCount.Get()) +} diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go index c0cba599bdd..a98a3ce90f9 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go @@ -49,9 +49,25 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +type tableDiffPhase string + +const ( + initializing = tableDiffPhase("initializing") + pickingTablets = tableDiffPhase("picking_streaming_tablets") + syncingSources = tableDiffPhase("syncing_source_streams") + syncingTargets = tableDiffPhase("syncing_target_streams") + startingSources = tableDiffPhase("starting_source_data_streams") + startingTargets = tableDiffPhase("starting_target_data_streams") + restartingVreplication = tableDiffPhase("restarting_vreplication_streams") + diffingTable = tableDiffPhase("diffing_table") +) + // how long to wait for background operations to complete var BackgroundOperationTimeout = topo.RemoteOperationTimeout * 4 +var ErrMaxDiffDurationExceeded = vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "table diff was stopped due to exceeding the max-diff-duration time") +var ErrVDiffStoppedByUser = vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped by user") + // compareColInfo contains the metadata for a column of the table being diffed type compareColInfo struct { colIndex int // index of the column in the filter's select @@ -87,6 +103,7 @@ func newTableDiffer(wd *workflowDiffer, table *tabletmanagerdatapb.TableDefiniti // initialize func (td *tableDiffer) initialize(ctx context.Context) error { + defer td.wd.ct.TableDiffPhaseTimings.Record(fmt.Sprintf("%s.%s", td.table.Name, initializing), time.Now()) vdiffEngine := td.wd.ct.vde vdiffEngine.snapshotMu.Lock() defer vdiffEngine.snapshotMu.Unlock() @@ -109,7 +126,7 @@ func (td *tableDiffer) initialize(ctx context.Context) error { defer func() { unlock(&err) if err != nil { - log.Errorf("UnlockKeyspace %s failed: %v", targetKeyspace, lockErr) + log.Errorf("UnlockKeyspace %s failed: %v", targetKeyspace, err) } }() @@ -209,6 +226,7 @@ func (td *tableDiffer) forEachSource(cb func(source *migrationSource) error) err } func (td *tableDiffer) selectTablets(ctx context.Context) error { + defer td.wd.ct.TableDiffPhaseTimings.Record(fmt.Sprintf("%s.%s", td.table.Name, pickingTablets), time.Now()) var ( wg sync.WaitGroup sourceErr, targetErr error @@ -284,6 +302,7 @@ func (td *tableDiffer) pickTablet(ctx context.Context, ts *topo.Server, cells [] } func (td *tableDiffer) syncSourceStreams(ctx context.Context) error { + defer td.wd.ct.TableDiffPhaseTimings.Record(fmt.Sprintf("%s.%s", td.table.Name, syncingSources), time.Now()) // source can be replica, wait for them to at least reach max gtid of all target streams ct := td.wd.ct waitCtx, cancel := context.WithTimeout(ctx, time.Duration(ct.options.CoreOptions.TimeoutSeconds*int64(time.Second))) @@ -302,6 +321,7 @@ func (td *tableDiffer) syncSourceStreams(ctx context.Context) error { } func (td *tableDiffer) syncTargetStreams(ctx context.Context) error { + defer td.wd.ct.TableDiffPhaseTimings.Record(fmt.Sprintf("%s.%s", td.table.Name, syncingTargets), time.Now()) ct := td.wd.ct waitCtx, cancel := context.WithTimeout(ctx, time.Duration(ct.options.CoreOptions.TimeoutSeconds*int64(time.Second))) defer cancel() @@ -324,6 +344,7 @@ func (td *tableDiffer) syncTargetStreams(ctx context.Context) error { } func (td *tableDiffer) startTargetDataStream(ctx context.Context) error { + defer td.wd.ct.TableDiffPhaseTimings.Record(fmt.Sprintf("%s.%s", td.table.Name, startingTargets), time.Now()) ct := td.wd.ct gtidch := make(chan string, 1) ct.targetShardStreamer.result = make(chan *sqltypes.Result, 1) @@ -338,6 +359,7 @@ func (td *tableDiffer) startTargetDataStream(ctx context.Context) error { } func (td *tableDiffer) startSourceDataStreams(ctx context.Context) error { + defer td.wd.ct.TableDiffPhaseTimings.Record(fmt.Sprintf("%s.%s", td.table.Name, startingSources), time.Now()) if err := td.forEachSource(func(source *migrationSource) error { gtidch := make(chan string, 1) source.result = make(chan *sqltypes.Result, 1) @@ -356,6 +378,7 @@ func (td *tableDiffer) startSourceDataStreams(ctx context.Context) error { } func (td *tableDiffer) restartTargetVReplicationStreams(ctx context.Context) error { + defer td.wd.ct.TableDiffPhaseTimings.Record(fmt.Sprintf("%s.%s", td.table.Name, restartingVreplication), time.Now()) ct := td.wd.ct query := fmt.Sprintf("update _vt.vreplication set state='Running', message='', stop_pos='' where db_name=%s and workflow=%s", encodeString(ct.vde.dbName), encodeString(ct.workflow)) @@ -383,7 +406,7 @@ func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStr td.wgShardStreamers.Done() }() participant.err = func() error { - conn, err := tabletconn.GetDialer()(participant.tablet, false) + conn, err := tabletconn.GetDialer()(ctx, participant.tablet, false) if err != nil { return err } @@ -431,7 +454,7 @@ func (td *tableDiffer) streamOneShard(ctx context.Context, participant *shardStr case <-ctx.Done(): return vterrors.Wrap(ctx.Err(), "VStreamRows") case <-td.wd.ct.done: - return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") + return ErrVDiffStoppedByUser } return nil }) @@ -444,25 +467,26 @@ func (td *tableDiffer) setupRowSorters() { for shard, source := range td.wd.ct.sources { sources[shard] = source.shardStreamer } - td.sourcePrimitive = newMergeSorter(sources, td.tablePlan.comparePKs) + td.sourcePrimitive = newMergeSorter(sources, td.tablePlan.comparePKs, td.wd.collationEnv) // Create a merge sorter for the target. targets := make(map[string]*shardStreamer) targets[td.wd.ct.targetShardStreamer.shard] = td.wd.ct.targetShardStreamer - td.targetPrimitive = newMergeSorter(targets, td.tablePlan.comparePKs) + td.targetPrimitive = newMergeSorter(targets, td.tablePlan.comparePKs, td.wd.collationEnv) // If there were aggregate expressions, we have to re-aggregate // the results, which engine.OrderedAggregate can do. if len(td.tablePlan.aggregates) != 0 { td.sourcePrimitive = &engine.OrderedAggregate{ Aggregates: td.tablePlan.aggregates, - GroupByKeys: pkColsToGroupByParams(td.tablePlan.pkCols), + GroupByKeys: pkColsToGroupByParams(td.tablePlan.pkCols, td.wd.collationEnv), Input: td.sourcePrimitive, } } } -func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onlyPks bool, maxExtraRowsToCompare int64) (*DiffReport, error) { +func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onlyPks bool, maxExtraRowsToCompare int64, maxReportSampleRows int64, stop <-chan time.Time) (*DiffReport, error) { + defer td.wd.ct.TableDiffPhaseTimings.Record(fmt.Sprintf("%s.%s", td.table.Name, diffingTable), time.Now()) dbClient := td.wd.ct.dbClientFactory() if err := dbClient.Connect(); err != nil { return nil, err @@ -506,11 +530,12 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl advanceSource := true advanceTarget := true - // Save our progress when we finish the run + // Save our progress when we finish the run. defer func() { if err := td.updateTableProgress(dbClient, dr, lastProcessedRow); err != nil { log.Errorf("Failed to update vdiff progress on %s table: %v", td.table.Name, err) } + globalStats.RowsDiffedCount.Add(dr.ProcessedRows) }() for { @@ -520,7 +545,10 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl case <-ctx.Done(): return nil, vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") case <-td.wd.ct.done: - return nil, vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") + return nil, ErrVDiffStoppedByUser + case <-stop: + globalStats.RestartedTableDiffs.Add(td.table.Name, 1) + return nil, ErrMaxDiffDurationExceeded default: } @@ -533,7 +561,7 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl } rowsToCompare-- if rowsToCompare < 0 { - log.Infof("Stopping vdiff, specified limit reached") + log.Infof("Stopping vdiff, specified row limit reached") return dr, nil } if advanceSource { @@ -564,7 +592,7 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl } dr.ExtraRowsTargetDiffs = append(dr.ExtraRowsTargetDiffs, diffRow) - // drain target, update count + // Drain target, update count. count, err := targetExecutor.drain(ctx) if err != nil { return nil, err @@ -574,8 +602,8 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl return dr, nil } if targetRow == nil { - // no more rows from the target - // we know we have rows from source, drain, update count + // No more rows from the target but we know we have more rows from + // source, so drain them and update the counts. diffRow, err := td.genRowDiff(td.tablePlan.sourceQuery, sourceRow, debug, onlyPks) if err != nil { return nil, vterrors.Wrap(err, "unexpected error generating diff") @@ -628,8 +656,8 @@ func (td *tableDiffer) diff(ctx context.Context, rowsToCompare int64, debug, onl case err != nil: return nil, err case c != 0: - // We don't do a second pass to compare mismatched rows so we can cap the slice here - if dr.MismatchedRows < maxVDiffReportSampleRows { + // We don't do a second pass to compare mismatched rows so we can cap the slice here. + if maxReportSampleRows == 0 || dr.MismatchedRows < maxReportSampleRows { sourceDiffRow, err := td.genRowDiff(td.tablePlan.targetQuery, sourceRow, debug, onlyPks) if err != nil { return nil, vterrors.Wrap(err, "unexpected error generating diff") @@ -672,7 +700,7 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com if collationID == collations.Unknown { collationID = collations.CollationBinaryID } - c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], collationID) + c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], td.wd.collationEnv, collationID, nil) if err != nil { return 0, err } @@ -700,6 +728,16 @@ func (td *tableDiffer) updateTableProgress(dbClient binlogplayer.DBClient, dr *D return err } + if td.wd.opts.CoreOptions.MaxDiffSeconds > 0 { + // Update the in-memory lastPK as well so that we can restart the table + // diff if needed. + lastpkpb := &querypb.QueryResult{} + if err := prototext.Unmarshal(lastPK, lastpkpb); err != nil { + return err + } + td.lastPK = lastpkpb + } + query, err = sqlparser.ParseAndBind(sqlUpdateTableProgress, sqltypes.Int64BindVariable(dr.ProcessedRows), sqltypes.StringBindVariable(string(lastPK)), @@ -724,6 +762,7 @@ func (td *tableDiffer) updateTableProgress(dbClient binlogplayer.DBClient, dr *D if _, err := dbClient.ExecuteFetch(query, 1); err != nil { return err } + td.wd.ct.TableDiffRowCounts.Add(td.table.Name, dr.ProcessedRows) return nil } @@ -824,10 +863,10 @@ func (td *tableDiffer) adjustForSourceTimeZone(targetSelectExprs sqlparser.Selec if fieldType == querypb.Type_DATETIME { convertTZFuncExpr = &sqlparser.FuncExpr{ Name: sqlparser.NewIdentifierCI("convert_tz"), - Exprs: sqlparser.SelectExprs{ - expr, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(td.wd.ct.targetTimeZone)}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(td.wd.ct.sourceTimeZone)}, + Exprs: sqlparser.Exprs{ + selExpr.Expr, + sqlparser.NewStrLiteral(td.wd.ct.targetTimeZone), + sqlparser.NewStrLiteral(td.wd.ct.sourceTimeZone), }, } log.Infof("converting datetime column %s using convert_tz()", colName) diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go index e669dbd9a33..548f902e9ac 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go @@ -17,21 +17,23 @@ limitations under the License. package vdiff import ( + "context" "fmt" "strings" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/log" - querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/engine/opcode" + + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const sqlSelectColumnCollations = "select column_name as column_name, collation_name as collation_name from information_schema.columns where table_schema=%a and table_name=%a and column_name in %a" @@ -59,12 +61,12 @@ type tablePlan struct { aggregates []*engine.AggregateParams } -func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName string) (*tablePlan, error) { +func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName string, collationEnv *collations.Environment) (*tablePlan, error) { tp := &tablePlan{ table: td.table, dbName: dbName, } - statement, err := sqlparser.Parse(td.sourceQuery) + statement, err := td.wd.ct.vde.parser.Parse(td.sourceQuery) if err != nil { return nil, err } @@ -75,7 +77,7 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str sourceSelect := &sqlparser.Select{} targetSelect := &sqlparser.Select{} - // aggregates is the list of Aggregate functions, if any. + // Aggregates is the list of Aggregate functions, if any. var aggregates []*engine.AggregateParams for _, selExpr := range sel.SelectExprs { switch selExpr := selExpr.(type) { @@ -88,14 +90,14 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str } case *sqlparser.AliasedExpr: var targetCol *sqlparser.ColName - if !selExpr.As.IsEmpty() { - targetCol = &sqlparser.ColName{Name: selExpr.As} - } else { + if selExpr.As.IsEmpty() { if colAs, ok := selExpr.Expr.(*sqlparser.ColName); ok { targetCol = colAs } else { return nil, fmt.Errorf("expression needs an alias: %v", sqlparser.String(selExpr)) } + } else { + targetCol = &sqlparser.ColName{Name: selExpr.As} } // If the input was "select a as b", then source will use "a" and target will use "b". sourceSelect.SelectExprs = append(sourceSelect.SelectExprs, selExpr) @@ -112,7 +114,8 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str aggregates = append(aggregates, engine.NewAggregateParam( /*opcode*/ opcode.AggregateSum, /*offset*/ len(sourceSelect.SelectExprs)-1, - /*alias*/ "")) + /*alias*/ "", collationEnv), + ) } } default: @@ -152,12 +155,27 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str }, } - err = tp.findPKs(dbClient, targetSelect) + if len(tp.table.PrimaryKeyColumns) == 0 { + // We use the columns from a PKE if there is one. + pkeCols, err := tp.getPKEquivalentColumns(dbClient) + if err != nil { + return nil, vterrors.Wrapf(err, "error getting PK equivalent columns for table %s", tp.table.Name) + } + if len(pkeCols) > 0 { + tp.table.PrimaryKeyColumns = append(tp.table.PrimaryKeyColumns, pkeCols...) + } else { + // We use every column together as a substitute PK. + tp.table.PrimaryKeyColumns = append(tp.table.PrimaryKeyColumns, tp.table.Columns...) + } + } + + err = tp.findPKs(dbClient, targetSelect, collationEnv) if err != nil { return nil, err } + // Remove in_keyrange. It's not understood by mysql. - sourceSelect.Where = sel.Where //removeKeyrange(sel.Where) + sourceSelect.Where = sel.Where // removeKeyrange(sel.Where) // The source should also perform the group by. sourceSelect.GroupBy = sel.GroupBy sourceSelect.OrderBy = tp.orderBy @@ -167,8 +185,8 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str tp.sourceQuery = sqlparser.String(sourceSelect) tp.targetQuery = sqlparser.String(targetSelect) - log.Info("VDiff query on source: %v", tp.sourceQuery) - log.Info("VDiff query on target: %v", tp.targetQuery) + log.Infof("VDiff query on source: %v", tp.sourceQuery) + log.Infof("VDiff query on target: %v", tp.targetQuery) tp.aggregates = aggregates td.tablePlan = tp @@ -176,7 +194,10 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str } // findPKs identifies PKs and removes them from the columns to do data comparison. -func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlparser.Select) error { +func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlparser.Select, collationEnv *collations.Environment) error { + if len(tp.table.PrimaryKeyColumns) == 0 { + return nil + } var orderby sqlparser.OrderBy for _, pk := range tp.table.PrimaryKeyColumns { found := false @@ -186,8 +207,8 @@ func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlpa switch ct := expr.(type) { case *sqlparser.ColName: colname = ct.Name.String() - case *sqlparser.FuncExpr: //eg. weight_string() - //no-op + case *sqlparser.FuncExpr: // eg. weight_string() + // no-op default: log.Warningf("Not considering column %v for PK, type %v not handled", selExpr, ct) } @@ -195,7 +216,7 @@ func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlpa tp.compareCols[i].isPK = true tp.comparePKs = append(tp.comparePKs, tp.compareCols[i]) tp.selectPks = append(tp.selectPks, i) - // We'll be comparing pks separately. So, remove them from compareCols. + // We'll be comparing PKs separately. So, remove them from compareCols. tp.pkCols = append(tp.pkCols, i) found = true break @@ -210,7 +231,7 @@ func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlpa Direction: sqlparser.AscOrder, }) } - if err := tp.getPKColumnCollations(dbClient); err != nil { + if err := tp.getPKColumnCollations(dbClient, collationEnv); err != nil { return vterrors.Wrapf(err, "error getting PK column collations for table %s", tp.table.Name) } tp.orderBy = orderby @@ -222,7 +243,10 @@ func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlpa // sorting when we do the merge sort and for the comparisons. It then // saves the collations in the tablePlan's comparePKs column info // structs for those subsequent operations. -func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient) error { +func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient, collationEnv *collations.Environment) error { + if len(tp.comparePKs) == 0 { + return nil + } columnList := make([]string, len(tp.comparePKs)) for i := range tp.comparePKs { columnList[i] = tp.comparePKs[i].colName @@ -246,7 +270,6 @@ func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient) error if qr == nil || len(qr.Rows) != len(tp.comparePKs) { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected result for query %s: %+v", query, qr) } - collationEnv := collations.Local() for _, row := range qr.Named().Rows { columnName := row["column_name"].ToString() collateName := strings.ToLower(row["collation_name"].ToString()) @@ -259,3 +282,17 @@ func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient) error } return nil } + +func (tp *tablePlan) getPKEquivalentColumns(dbClient binlogplayer.DBClient) ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), BackgroundOperationTimeout/2) + defer cancel() + executeFetch := func(query string, maxrows int, wantfields bool) (*sqltypes.Result, error) { + // This sets wantfields to true. + return dbClient.ExecuteFetch(query, maxrows) + } + pkeCols, _, err := mysqlctl.GetPrimaryKeyEquivalentColumns(ctx, executeFetch, tp.dbName, tp.table.Name) + if err != nil { + return nil, err + } + return pkeCols, nil +} diff --git a/go/vt/vttablet/tabletmanager/vdiff/utils.go b/go/vt/vttablet/tabletmanager/vdiff/utils.go index 5904fd41795..07e070976a9 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/utils.go +++ b/go/vt/vttablet/tabletmanager/vdiff/utils.go @@ -33,7 +33,7 @@ import ( ) // newMergeSorter creates an engine.MergeSort based on the shard streamers and pk columns -func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compareColInfo) *engine.MergeSort { +func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compareColInfo, collationEnv *collations.Environment) *engine.MergeSort { prims := make([]engine.StreamExecutor, 0, len(participants)) for _, participant := range participants { prims = append(prims, participant) @@ -42,14 +42,11 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compare for i, cpk := range comparePKs { weightStringCol := -1 // if the collation is nil or unknown, use binary collation to compare as bytes - t := evalengine.Type{ - Type: sqltypes.Unknown, - Coll: collations.CollationBinaryID, - } + var collation collations.ID = collations.CollationBinaryID if cpk.collation != collations.Unknown { - t.Coll = cpk.collation + collation = cpk.collation } - ob[i] = evalengine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: t} + ob[i] = evalengine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: evalengine.NewType(sqltypes.Unknown, collation), CollationEnv: collationEnv} } return &engine.MergeSort{ Primitives: prims, @@ -66,10 +63,10 @@ func encodeString(in string) string { return buf.String() } -func pkColsToGroupByParams(pkCols []int) []*engine.GroupByParams { +func pkColsToGroupByParams(pkCols []int, collationEnv *collations.Environment) []*engine.GroupByParams { var res []*engine.GroupByParams for _, col := range pkCols { - res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1, Type: evalengine.UnknownType()}) + res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1, CollationEnv: collationEnv}) } return res } diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go index d7d2583a5d3..8c00b61b784 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go @@ -18,28 +18,31 @@ package vdiff import ( "context" + "errors" "fmt" "reflect" "strings" + "time" + + "vitess.io/vitess/go/vt/schema" "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" - "vitess.io/vitess/go/vt/vtgate/vindexes" - - "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // workflowDiffer has metadata and state for the vdiff of a single workflow on this tablet @@ -49,13 +52,16 @@ type workflowDiffer struct { tableDiffers map[string]*tableDiffer // key is table name opts *tabletmanagerdatapb.VDiffOptions + + collationEnv *collations.Environment } -func newWorkflowDiffer(ct *controller, opts *tabletmanagerdatapb.VDiffOptions) (*workflowDiffer, error) { +func newWorkflowDiffer(ct *controller, opts *tabletmanagerdatapb.VDiffOptions, collationEnv *collations.Environment) (*workflowDiffer, error) { wd := &workflowDiffer{ ct: ct, opts: opts, tableDiffers: make(map[string]*tableDiffer, 1), + collationEnv: collationEnv, } return wd, nil } @@ -64,7 +70,7 @@ func newWorkflowDiffer(ct *controller, opts *tabletmanagerdatapb.VDiffOptions) ( // by MySQL on each side then we'll have the same number of extras on // both sides. If that's the case, then let's see if the extra rows on // both sides are actually different. -func (wd *workflowDiffer) reconcileExtraRows(dr *DiffReport, maxExtraRowsToCompare int64) error { +func (wd *workflowDiffer) reconcileExtraRows(dr *DiffReport, maxExtraRowsToCompare int64, maxReportSampleRows int64) error { if dr.MismatchedRows == 0 { // Get the VSchema on the target and source keyspaces. We can then use this // for handling additional edge cases, such as adjusting results for reference @@ -122,69 +128,121 @@ func (wd *workflowDiffer) reconcileExtraRows(dr *DiffReport, maxExtraRowsToCompa } } } - // We can now trim the extra rows diffs on both sides to the maxVDiffReportSampleRows value - if len(dr.ExtraRowsSourceDiffs) > maxVDiffReportSampleRows { - dr.ExtraRowsSourceDiffs = dr.ExtraRowsSourceDiffs[:maxVDiffReportSampleRows-1] + // We can now trim the extra rows diffs on both sides to the maxReportSampleRows value + if int64(len(dr.ExtraRowsSourceDiffs)) > maxReportSampleRows && maxReportSampleRows > 0 { + dr.ExtraRowsSourceDiffs = dr.ExtraRowsSourceDiffs[:maxReportSampleRows-1] } - if len(dr.ExtraRowsTargetDiffs) > maxVDiffReportSampleRows { - dr.ExtraRowsTargetDiffs = dr.ExtraRowsTargetDiffs[:maxVDiffReportSampleRows-1] + if int64(len(dr.ExtraRowsTargetDiffs)) > maxReportSampleRows && maxReportSampleRows > 0 { + dr.ExtraRowsTargetDiffs = dr.ExtraRowsTargetDiffs[:maxReportSampleRows-1] } return nil } func (wd *workflowDiffer) diffTable(ctx context.Context, dbClient binlogplayer.DBClient, td *tableDiffer) error { - defer func() { + cancelShardStreams := func() { if td.shardStreamsCancel != nil { td.shardStreamsCancel() } // Wait for all the shard streams to finish before returning. td.wgShardStreamers.Wait() + } + defer func() { + cancelShardStreams() }() - select { - case <-ctx.Done(): - return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") - case <-wd.ct.done: - return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") - default: + var ( + diffTimer *time.Timer + diffReport *DiffReport + diffErr error + ) + defer func() { + if diffTimer != nil { + if !diffTimer.Stop() { + select { + case <-diffTimer.C: + default: + } + } + } + }() + + maxDiffRuntime := time.Duration(24 * time.Hour * 365) // 1 year (effectively forever) + if wd.ct.options.CoreOptions.MaxDiffSeconds > 0 { + // Restart the diff if it takes longer than the specified max diff time. + maxDiffRuntime = time.Duration(wd.ct.options.CoreOptions.MaxDiffSeconds) * time.Second } log.Infof("Starting differ on table %s for vdiff %s", td.table.Name, wd.ct.uuid) if err := td.updateTableState(ctx, dbClient, StartedState); err != nil { return err } - if err := td.initialize(ctx); err != nil { - return err - } - log.Infof("Table initialization done on table %s for vdiff %s", td.table.Name, wd.ct.uuid) - dr, err := td.diff(ctx, wd.opts.CoreOptions.MaxRows, wd.opts.ReportOptions.DebugQuery, wd.opts.ReportOptions.OnlyPks, wd.opts.CoreOptions.MaxExtraRowsToCompare) - if err != nil { - log.Errorf("Encountered an error diffing table %s for vdiff %s: %v", td.table.Name, wd.ct.uuid, err) - return err + + for { + select { + case <-ctx.Done(): + return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") + case <-wd.ct.done: + return ErrVDiffStoppedByUser + default: + } + + if diffTimer != nil { // We're restarting the diff + if !diffTimer.Stop() { + select { + case <-diffTimer.C: + default: + } + } + diffTimer = nil + cancelShardStreams() + // Give the underlying resources (mainly MySQL) a moment to catch up + // before we pick up where we left off (but with new database snapshots). + time.Sleep(30 * time.Second) + } + if err := td.initialize(ctx); err != nil { // Setup the consistent snapshots + return err + } + log.Infof("Table initialization done on table %s for vdiff %s", td.table.Name, wd.ct.uuid) + diffTimer = time.NewTimer(maxDiffRuntime) + diffReport, diffErr = td.diff(ctx, wd.opts.CoreOptions.MaxRows, wd.opts.ReportOptions.DebugQuery, wd.opts.ReportOptions.OnlyPks, wd.opts.CoreOptions.MaxExtraRowsToCompare, wd.opts.ReportOptions.MaxSampleRows, diffTimer.C) + if diffErr == nil { // We finished the diff successfully + break + } + log.Errorf("Encountered an error diffing table %s for vdiff %s: %v", td.table.Name, wd.ct.uuid, diffErr) + if !errors.Is(diffErr, ErrMaxDiffDurationExceeded) { // We only want to retry if we hit the max-diff-duration + return diffErr + } } - log.Infof("Table diff done on table %s for vdiff %s with report: %+v", td.table.Name, wd.ct.uuid, dr) - if dr.ExtraRowsSource > 0 || dr.ExtraRowsTarget > 0 { - if err := wd.reconcileExtraRows(dr, wd.opts.CoreOptions.MaxExtraRowsToCompare); err != nil { + log.Infof("Table diff done on table %s for vdiff %s with report: %+v", td.table.Name, wd.ct.uuid, diffReport) + + if diffReport.ExtraRowsSource > 0 || diffReport.ExtraRowsTarget > 0 { + if err := wd.reconcileExtraRows(diffReport, wd.opts.CoreOptions.MaxExtraRowsToCompare, wd.opts.ReportOptions.MaxSampleRows); err != nil { log.Errorf("Encountered an error reconciling extra rows found for table %s for vdiff %s: %v", td.table.Name, wd.ct.uuid, err) return vterrors.Wrap(err, "failed to reconcile extra rows") } } - if dr.MismatchedRows > 0 || dr.ExtraRowsTarget > 0 || dr.ExtraRowsSource > 0 { + if diffReport.MismatchedRows > 0 || diffReport.ExtraRowsTarget > 0 || diffReport.ExtraRowsSource > 0 { if err := updateTableMismatch(dbClient, wd.ct.id, td.table.Name); err != nil { return err } } - log.Infof("Completed reconciliation on table %s for vdiff %s with updated report: %+v", td.table.Name, wd.ct.uuid, dr) - if err := td.updateTableStateAndReport(ctx, dbClient, CompletedState, dr); err != nil { + log.Infof("Completed reconciliation on table %s for vdiff %s with updated report: %+v", td.table.Name, wd.ct.uuid, diffReport) + if err := td.updateTableStateAndReport(ctx, dbClient, CompletedState, diffReport); err != nil { return err } return nil } -func (wd *workflowDiffer) diff(ctx context.Context) error { +func (wd *workflowDiffer) diff(ctx context.Context) (err error) { + defer func() { + if err != nil { + globalStats.ErrorCount.Add(1) + wd.ct.Errors.Add(err.Error(), 1) + } + }() dbClient := wd.ct.dbClientFactory() if err := dbClient.Connect(); err != nil { return err @@ -195,7 +253,7 @@ func (wd *workflowDiffer) diff(ctx context.Context) error { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") case <-wd.ct.done: - return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") + return ErrVDiffStoppedByUser default: } @@ -216,7 +274,7 @@ func (wd *workflowDiffer) diff(ctx context.Context) error { case <-ctx.Done(): return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") case <-wd.ct.done: - return vterrors.Errorf(vtrpcpb.Code_CANCELED, "vdiff was stopped") + return ErrVDiffStoppedByUser default: } query, err := sqlparser.ParseAndBind(sqlGetVDiffTable, @@ -286,7 +344,7 @@ func (wd *workflowDiffer) buildPlan(dbClient binlogplayer.DBClient, filter *binl if len(specifiedTables) != 0 && !stringListContains(specifiedTables, table.Name) { continue } - if schema.IsInternalOperationTableName(table.Name) { + if schema.IsInternalOperationTableName(table.Name) && !schema.IsOnlineDDLTableName(table.Name) { continue } rule, err := vreplication.MatchTable(table.Name, filter) @@ -315,7 +373,7 @@ func (wd *workflowDiffer) buildPlan(dbClient binlogplayer.DBClient, filter *binl } td.lastPK = lastpkpb wd.tableDiffers[table.Name] = td - if _, err := td.buildTablePlan(dbClient, wd.ct.vde.dbName); err != nil { + if _, err := td.buildTablePlan(dbClient, wd.ct.vde.dbName, wd.collationEnv); err != nil { return err } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go index 10c6406f046..a460b87a4f6 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go @@ -67,8 +67,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -87,8 +87,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -107,8 +107,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -127,8 +127,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c2, c1 from t1 order by c1 asc", targetQuery: "select c2, c1 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, - comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + comparePKs: []compareColInfo{{1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -147,8 +147,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c0 as c1, c2 from t2 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -157,7 +157,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // non-pk text column. + // Non-PK text column. input: &binlogdatapb.Rule{ Match: "nonpktext", Filter: "select c1, textcol from nonpktext", @@ -168,8 +168,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["nonpktext"]], sourceQuery: "select c1, textcol from nonpktext order by c1 asc", targetQuery: "select c1, textcol from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "textcol"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "textcol"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -178,7 +178,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // non-pk text column, different order. + // Non-PK text column, different order. input: &binlogdatapb.Rule{ Match: "nonpktext", Filter: "select textcol, c1 from nonpktext", @@ -189,8 +189,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["nonpktext"]], sourceQuery: "select textcol, c1 from nonpktext order by c1 asc", targetQuery: "select textcol, c1 from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "textcol"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, - comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "textcol"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + comparePKs: []compareColInfo{{1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -199,7 +199,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // pk text column. + // PK text column. input: &binlogdatapb.Rule{ Match: "pktext", Filter: "select textcol, c2 from pktext", @@ -210,8 +210,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select textcol, c2 from pktext order by textcol asc", targetQuery: "select textcol, c2 from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -220,7 +220,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // pk text column, different order. + // PK text column, different order. input: &binlogdatapb.Rule{ Match: "pktext", Filter: "select c2, textcol from pktext", @@ -231,8 +231,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select c2, textcol from pktext order by textcol asc", targetQuery: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, - comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + comparePKs: []compareColInfo{{1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -241,7 +241,61 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // text column as expression. + // No PK. Use all columns as a substitute. + input: &binlogdatapb.Rule{ + Match: "nopk", + Filter: "select * from nopk", + }, + table: "nopk", + tablePlan: &tablePlan{ + dbName: vdiffDBName, + table: testSchema.TableDefinitions[tableDefMap["nopk"]], + sourceQuery: "select c1, c2, c3 from nopk order by c1 asc, c2 asc, c3 asc", + targetQuery: "select c1, c2, c3 from nopk order by c1 asc, c2 asc, c3 asc", + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c2"}, {2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c3"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c2"}, {2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c3"}}, + pkCols: []int{0, 1, 2}, + selectPks: []int{0, 1, 2}, + orderBy: sqlparser.OrderBy{ + &sqlparser.Order{ + Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c1")}, + Direction: sqlparser.AscOrder, + }, + &sqlparser.Order{ + Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c2")}, + Direction: sqlparser.AscOrder, + }, + &sqlparser.Order{ + Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c3")}, + Direction: sqlparser.AscOrder, + }, + }, + }, + }, { + // No PK, but a PKE on c3. + input: &binlogdatapb.Rule{ + Match: "nopkwithpke", + Filter: "select * from nopkwithpke", + }, + table: "nopkwithpke", + tablePlan: &tablePlan{ + dbName: vdiffDBName, + table: testSchema.TableDefinitions[tableDefMap["nopkwithpke"]], + sourceQuery: "select c1, c2, c3 from nopkwithpke order by c3 asc", + targetQuery: "select c1, c2, c3 from nopkwithpke order by c3 asc", + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c3"}}, + comparePKs: []compareColInfo{{2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c3"}}, + pkCols: []int{2}, + selectPks: []int{2}, + orderBy: sqlparser.OrderBy{ + &sqlparser.Order{ + Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c3")}, + Direction: sqlparser.AscOrder, + }, + }, + }, + }, { + // Text column as expression. input: &binlogdatapb.Rule{ Match: "pktext", Filter: "select c2, a+b as textcol from pktext", @@ -252,8 +306,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select c2, a + b as textcol from pktext order by textcol asc", targetQuery: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, - comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + comparePKs: []compareColInfo{{1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -262,7 +316,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // multiple pk columns. + // Multiple PK columns. input: &binlogdatapb.Rule{ Match: "multipk", }, @@ -272,8 +326,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["multipk"]], sourceQuery: "select c1, c2 from multipk order by c1 asc, c2 asc", targetQuery: "select c1, c2 from multipk order by c1 asc, c2 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c2"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c2"}}, pkCols: []int{0, 1}, selectPks: []int{0, 1}, orderBy: sqlparser.OrderBy{ @@ -299,8 +353,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -321,8 +375,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -343,8 +397,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') and c2 = 2 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -365,8 +419,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and c1 = 1 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -387,8 +441,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -397,7 +451,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // group by + // Group by. input: &binlogdatapb.Rule{ Match: "t1", Filter: "select * from t1 group by c1", @@ -408,8 +462,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 group by c1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -418,7 +472,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // aggregations + // Aggregations. input: &binlogdatapb.Rule{ Match: "aggr", Filter: "select c1, c2, count(*) as c3, sum(c4) as c4 from t1 group by c1", @@ -429,8 +483,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["aggr"]], sourceQuery: "select c1, c2, count(*) as c3, sum(c4) as c4 from t1 group by c1 order by c1 asc", targetQuery: "select c1, c2, c3, c4 from aggr order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {2, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c3"}, {3, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c4"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c3"}, {3, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c4"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -438,12 +492,12 @@ func TestBuildPlanSuccess(t *testing.T) { Direction: sqlparser.AscOrder, }}, aggregates: []*engine.AggregateParams{ - engine.NewAggregateParam(opcode.AggregateSum, 2, ""), - engine.NewAggregateParam(opcode.AggregateSum, 3, ""), + engine.NewAggregateParam(opcode.AggregateSum, 2, "", collations.MySQL8()), + engine.NewAggregateParam(opcode.AggregateSum, 3, "", collations.MySQL8()), }, }, }, { - // date conversion on import. + // Date conversion on import. input: &binlogdatapb.Rule{ Match: "datze", }, @@ -454,8 +508,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["datze"]], sourceQuery: "select id, dt from datze order by id asc", targetQuery: "select id, convert_tz(dt, 'UTC', 'US/Pacific') as dt from datze order by id asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "id"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "dt"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "id"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "id"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "dt"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "id"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -478,34 +532,49 @@ func TestBuildPlanSuccess(t *testing.T) { dbc := binlogplayer.NewMockDBClient(t) filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{tcase.input}} vdiffenv.opts.CoreOptions.Tables = tcase.table - wd, err := newWorkflowDiffer(ct, vdiffenv.opts) + wd, err := newWorkflowDiffer(ct, vdiffenv.opts, collations.MySQL8()) require.NoError(t, err) dbc.ExpectRequestRE("select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report", noResults, nil) - columnList := make([]string, len(tcase.tablePlan.comparePKs)) - collationList := make([]string, len(tcase.tablePlan.comparePKs)) - env := collations.Local() - for i := range tcase.tablePlan.comparePKs { - columnList[i] = tcase.tablePlan.comparePKs[i].colName - if tcase.tablePlan.comparePKs[i].collation != collations.Unknown { - collationList[i] = env.LookupName(tcase.tablePlan.comparePKs[i].collation) - } else { - collationList[i] = sqltypes.NULL.String() + if len(tcase.tablePlan.table.PrimaryKeyColumns) == 0 { + result := noResults + if tcase.table == "nopkwithpke" { // This has a PKE column: c3 + result = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "column_name|index_name", + "varchar|varchar", + ), + "c3|c3", + ) } + dbc.ExpectRequestRE("SELECT index_cols.COLUMN_NAME AS column_name, index_cols.INDEX_NAME as index_name FROM information_schema.STATISTICS", result, nil) + } + if len(tcase.tablePlan.comparePKs) > 0 { + columnList := make([]string, len(tcase.tablePlan.comparePKs)) + collationList := make([]string, len(tcase.tablePlan.comparePKs)) + env := collations.MySQL8() + for i := range tcase.tablePlan.comparePKs { + columnList[i] = tcase.tablePlan.comparePKs[i].colName + if tcase.tablePlan.comparePKs[i].collation != collations.Unknown { + collationList[i] = env.LookupName(tcase.tablePlan.comparePKs[i].collation) + } else { + collationList[i] = sqltypes.NULL.String() + } + } + columnBV, err := sqltypes.BuildBindVariable(columnList) + require.NoError(t, err) + query, err := sqlparser.ParseAndBind(sqlSelectColumnCollations, + sqltypes.StringBindVariable(vdiffDBName), + sqltypes.StringBindVariable(tcase.tablePlan.table.Name), + columnBV, + ) + require.NoError(t, err) + dbc.ExpectRequest(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "collation_name", + "varchar", + ), + collationList..., + ), nil) } - columnBV, err := sqltypes.BuildBindVariable(columnList) - require.NoError(t, err) - query, err := sqlparser.ParseAndBind(sqlSelectColumnCollations, - sqltypes.StringBindVariable(vdiffDBName), - sqltypes.StringBindVariable(tcase.tablePlan.table.Name), - columnBV, - ) - require.NoError(t, err) - dbc.ExpectRequest(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "collation_name", - "varchar", - ), - collationList..., - ), nil) err = wd.buildPlan(dbc, filter, testSchema) require.NoError(t, err, tcase.input) require.Equal(t, 1, len(wd.tableDiffers), tcase.input) @@ -577,7 +646,7 @@ func TestBuildPlanInclude(t *testing.T) { for _, tcase := range testcases { dbc := binlogplayer.NewMockDBClient(t) vdiffenv.opts.CoreOptions.Tables = strings.Join(tcase.tables, ",") - wd, err := newWorkflowDiffer(ct, vdiffenv.opts) + wd, err := newWorkflowDiffer(ct, vdiffenv.opts, collations.MySQL8()) require.NoError(t, err) for _, table := range tcase.tables { query := fmt.Sprintf(`select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report @@ -650,7 +719,7 @@ func TestBuildPlanFailure(t *testing.T) { dbc := binlogplayer.NewMockDBClient(t) filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{tcase.input}} vdiffenv.opts.CoreOptions.Tables = tcase.input.Match - wd, err := newWorkflowDiffer(ct, vdiffenv.opts) + wd, err := newWorkflowDiffer(ct, vdiffenv.opts, collations.MySQL8()) require.NoError(t, err) dbc.ExpectRequestRE("select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report", noResults, nil) err = wd.buildPlan(dbc, filter, testSchema) diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index b9aad39fe6c..581244eebb3 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -24,18 +24,16 @@ import ( "sync/atomic" "time" - "vitess.io/vitess/go/vt/vttablet" - "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -49,7 +47,7 @@ const ( ) // controller is created by Engine. Members are initialized upfront. -// There is no mutex within a controller becaust its members are +// There is no mutex within a controller because its members are // either read-only or self-synchronized. type controller struct { vre *Engine @@ -74,7 +72,7 @@ type controller struct { // newController creates a new controller. Unless a stream is explicitly 'Stopped', // this function launches a goroutine to perform continuous vreplication. -func newController(ctx context.Context, params map[string]string, dbClientFactory func() binlogplayer.DBClient, mysqld mysqlctl.MysqlDaemon, ts *topo.Server, cell, tabletTypesStr string, blpStats *binlogplayer.Stats, vre *Engine) (*controller, error) { +func newController(ctx context.Context, params map[string]string, dbClientFactory func() binlogplayer.DBClient, mysqld mysqlctl.MysqlDaemon, ts *topo.Server, cell, tabletTypesStr string, blpStats *binlogplayer.Stats, vre *Engine, tpo discovery.TabletPickerOptions) (*controller, error) { if blpStats == nil { blpStats = binlogplayer.NewStats() } @@ -131,7 +129,7 @@ func newController(ctx context.Context, params map[string]string, dbClientFactor return nil, err } } - tp, err := discovery.NewTabletPicker(ctx, sourceTopo, cells, ct.vre.cell, ct.source.Keyspace, ct.source.Shard, tabletTypesStr, discovery.TabletPickerOptions{}) + tp, err := discovery.NewTabletPicker(ctx, sourceTopo, cells, ct.vre.cell, ct.source.Keyspace, ct.source.Shard, tabletTypesStr, tpo) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index b168625d20a..42da92fe2cf 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -18,8 +18,10 @@ package vreplication import ( "fmt" + "strings" "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/sqlparser" ) @@ -28,6 +30,9 @@ type controllerPlan struct { query string opcode int + // tabletPickerOptions is set for updateQuery. + tabletPickerOptions discovery.TabletPickerOptions + // numInserts is set for insertQuery. numInserts int @@ -50,9 +55,90 @@ const ( reshardingJournalQuery ) +// A comment directive that you can include in your VReplication write +// statements if you want to bypass the safety checks that ensure you are +// being selective. The full comment directive looks like this: +// delete /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ from _vt.vreplication +const AllowUnsafeWriteCommentDirective = "ALLOW_UNSAFE_VREPLICATION_WRITE" + +// A comment directive that you need to include in your VReplication +// statements if you want the controller to include non-serving tablets +// in the execution plan (via tablet picker options). The full comment +// directive looks like this: +// update /*vt+ INCLUDE_NON_SERVING_TABLETS_IN_PLAN=1 */ _vt.vreplication set ... +const IncludeNonServingTabletsCommentDirective = "INCLUDE_NON_SERVING_TABLETS_IN_PLAN" + +// Check that the given WHERE clause is using at least one of the specified +// columns with an equality or in operator to ensure that it is being +// properly selective and not unintentionally going to potentially affect +// multiple workflows. +// The engine's exec function -- used by the VReplicationExec RPC -- should +// provide guardrails for data changing statements and if the user wants get +// around them they can e.g. use the ExecuteFetchAsDba RPC. +// If you as a developer truly do want to affect multiple workflows, you can +// add a comment directive using the AllowUnsafeWriteCommentDirective constant. +var isSelective = func(where *sqlparser.Where, columns ...*sqlparser.ColName) bool { + if where == nil { + return false + } + if len(columns) == 0 { + return true + } + selective := false + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *sqlparser.ComparisonExpr: + column, ok := node.Left.(*sqlparser.ColName) + if !ok { + return true, nil + } + wantedColumn := false + for i := range columns { + if columns[i].Equal(column) { + wantedColumn = true + break + } + } + // If we found a desired column, check that it is being used with an + // equality operator OR an in clause, logically being equal to any + // of N things. + if wantedColumn && + (node.Operator == sqlparser.EqualOp || node.Operator == sqlparser.InOp) { + selective = true // This is a safe statement + return false, nil // We can stop walking + } + default: + } + return true, nil + }, where) + return selective +} + +// tableSelectiveColumns is a map that can be used to declare +// what selective columns should be used (one or more) in queries +// against a table. +var tableSelectiveColumns = map[string][]*sqlparser.ColName{ + vreplicationTableName: { + {Name: sqlparser.NewIdentifierCI("id")}, + {Name: sqlparser.NewIdentifierCI("workflow")}, + }, +} + +// columnsAsCSV returns a comma-separated list of column names. +func columnsAsCSV(columns []*sqlparser.ColName) string { + if len(columns) == 0 { + return "" + } + colsForError := make([]string, len(columns)) + for i := range columns { + colsForError[i] = columns[i].Name.String() + } + return strings.Join(colsForError, ", ") +} + // buildControllerPlan parses the input query and returns an appropriate plan. -func buildControllerPlan(query string) (*controllerPlan, error) { - stmt, err := sqlparser.Parse(query) +func buildControllerPlan(query string, parser *sqlparser.Parser) (*controllerPlan, error) { + stmt, err := parser.Parse(query) if err != nil { return nil, err } @@ -157,13 +243,27 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { if tableName.Qualifier.String() != sidecar.GetName() && tableName.Qualifier.String() != sidecar.DefaultName { return nil, fmt.Errorf("invalid database name: %s", tableName.Qualifier.String()) } + cp := &controllerPlan{ + opcode: updateQuery, + } switch tableName.Name.String() { case reshardingJournalTableName: return &controllerPlan{ opcode: reshardingJournalQuery, }, nil case vreplicationTableName: - // no-op + if upd.Comments == nil || upd.Comments.Directives() == nil || !upd.Comments.Directives().IsSet(AllowUnsafeWriteCommentDirective) { + if safe := isSelective(upd.Where, tableSelectiveColumns[vreplicationTableName]...); !safe { + return nil, fmt.Errorf("unsafe WHERE clause in update without the /*vt+ %s */ comment directive: %s; should be using = or in with at least one of the following columns: %s", + AllowUnsafeWriteCommentDirective, sqlparser.String(upd.Where), columnsAsCSV(tableSelectiveColumns[vreplicationTableName])) + } + } + + if upd.Comments != nil && upd.Comments.Directives().IsSet(IncludeNonServingTabletsCommentDirective) { + cp.tabletPickerOptions = discovery.TabletPickerOptions{ + IncludeNonServingTablets: true, + } + } default: return nil, fmt.Errorf("invalid table name: %s", tableName.Name.String()) } @@ -186,15 +286,13 @@ func buildUpdatePlan(upd *sqlparser.Update) (*controllerPlan, error) { Right: sqlparser.ListArg("ids"), }, } + cp.selector = buf1.String() buf2 := sqlparser.NewTrackedBuffer(nil) buf2.Myprintf("%v", upd) + cp.applier = buf2.ParsedQuery() - return &controllerPlan{ - opcode: updateQuery, - selector: buf1.String(), - applier: buf2.ParsedQuery(), - }, nil + return cp, nil } func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { @@ -220,7 +318,12 @@ func buildDeletePlan(del *sqlparser.Delete) (*controllerPlan, error) { opcode: reshardingJournalQuery, }, nil case vreplicationTableName: - // no-op + if del.Comments == nil || del.Comments.Directives() == nil || !del.Comments.Directives().IsSet(AllowUnsafeWriteCommentDirective) { + if safe := isSelective(del.Where, tableSelectiveColumns[vreplicationTableName]...); !safe { + return nil, fmt.Errorf("unsafe WHERE clause in delete without the /*vt+ %s */ comment directive: %s; should be using = or in with at least one of the following columns: %s", + AllowUnsafeWriteCommentDirective, sqlparser.String(del.Where), columnsAsCSV(tableSelectiveColumns[vreplicationTableName])) + } + } default: return nil, fmt.Errorf("invalid table name: %s", tableName.Name.String()) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go index 391b8d9c67e..4d4383a79f1 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) type testControllerPlan struct { @@ -111,13 +113,25 @@ func TestControllerPlan(t *testing.T) { applier: "update _vt.vreplication set state = 'Running' where id in ::ids", }, }, { - in: "update _vt.vreplication set state='Running'", + in: "update _vt.vreplication set state='Running'", + err: "unsafe WHERE clause in update without the /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ comment directive: ; should be using = or in with at least one of the following columns: id, workflow", + }, { + in: "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running'", plan: &testControllerPlan{ - query: "update _vt.vreplication set state='Running'", + query: "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running'", opcode: updateQuery, selector: "select id from _vt.vreplication", - applier: "update _vt.vreplication set state = 'Running' where id in ::ids", + applier: "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running' where id in ::ids", }, + }, { + in: "update _vt.vreplication set state='Running', message='' where id >= 1", + err: "unsafe WHERE clause in update without the /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ comment directive: where id >= 1; should be using = or in with at least one of the following columns: id, workflow", + }, { + in: "update _vt.vreplication set state = 'Running' where state in ('Stopped', 'Error')", + err: "unsafe WHERE clause in update without the /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ comment directive: where state in ('Stopped', 'Error'); should be using = or in with at least one of the following columns: id, workflow", + }, { + in: "update _vt.vreplication set state='Running', message='' where state='Stopped'", + err: "unsafe WHERE clause in update without the /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ comment directive: where state = 'Stopped'; should be using = or in with at least one of the following columns: id, workflow", }, { in: "update _vt.vreplication set state='Running' where a = 1", plan: &testControllerPlan{ @@ -126,6 +140,7 @@ func TestControllerPlan(t *testing.T) { selector: "select id from _vt.vreplication where a = 1", applier: "update _vt.vreplication set state = 'Running' where id in ::ids", }, + err: "unsafe WHERE clause in update without the /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ comment directive: where a = 1; should be using = or in with at least one of the following columns: id, workflow", }, { in: "update _vt.resharding_journal set col = 1", plan: &testControllerPlan{ @@ -157,15 +172,21 @@ func TestControllerPlan(t *testing.T) { delPostCopyAction: "delete from _vt.post_copy_action where vrepl_id in ::ids", }, }, { - in: "delete from _vt.vreplication", + in: "delete from _vt.vreplication", + err: "unsafe WHERE clause in delete without the /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ comment directive: ; should be using = or in with at least one of the following columns: id, workflow", + }, { + in: "delete /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ from _vt.vreplication", plan: &testControllerPlan{ - query: "delete from _vt.vreplication", + query: "delete /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ from _vt.vreplication", opcode: deleteQuery, selector: "select id from _vt.vreplication", - applier: "delete from _vt.vreplication where id in ::ids", + applier: "delete /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ from _vt.vreplication where id in ::ids", delCopyState: "delete from _vt.copy_state where vrepl_id in ::ids", delPostCopyAction: "delete from _vt.post_copy_action where vrepl_id in ::ids", }, + }, { + in: "delete from _vt.vreplication where state='Stopped'", + err: "unsafe WHERE clause in delete without the /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ comment directive: where state = 'Stopped'; should be using = or in with at least one of the following columns: id, workflow", }, { in: "delete from _vt.vreplication where a = 1", plan: &testControllerPlan{ @@ -176,6 +197,7 @@ func TestControllerPlan(t *testing.T) { delCopyState: "delete from _vt.copy_state where vrepl_id in ::ids", delPostCopyAction: "delete from _vt.post_copy_action where vrepl_id in ::ids", }, + err: "unsafe WHERE clause in delete without the /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ comment directive: where a = 1; should be using = or in with at least one of the following columns: id, workflow", }, { in: "delete from _vt.resharding_journal where id = 1", plan: &testControllerPlan{ @@ -240,7 +262,7 @@ func TestControllerPlan(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.in, func(t *testing.T) { - pl, err := buildControllerPlan(tcase.in) + pl, err := buildControllerPlan(tcase.in, sqlparser.NewTestParser()) if tcase.err != "" { require.EqualError(t, err, tcase.err) return diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go index efab9693fa2..57cb60384c6 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/tmutils" @@ -62,10 +63,11 @@ var ( }, }, } - testSelectorResponse1 = &sqltypes.Result{Rows: [][]sqltypes.Value{{sqltypes.NewInt64(1)}}} - testSelectorResponse2 = &sqltypes.Result{Rows: [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}} - testDMLResponse = &sqltypes.Result{RowsAffected: 1} - testPos = "MariaDB/0-1-1083" + testSelectorResponse1 = &sqltypes.Result{Rows: [][]sqltypes.Value{{sqltypes.NewInt64(1)}}} + testSelectorResponse2 = &sqltypes.Result{Rows: [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}} + testDMLResponse = &sqltypes.Result{RowsAffected: 1} + testPos = "MariaDB/0-1-1083" + defaultTabletPickerOptions = discovery.TabletPickerOptions{} ) func TestControllerKeyRange(t *testing.T) { @@ -92,7 +94,7 @@ func TestControllerKeyRange(t *testing.T) { mysqld.MysqlPort.Store(3306) vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre, defaultTabletPickerOptions) if err != nil { t.Fatal(err) } @@ -154,7 +156,7 @@ func TestControllerTables(t *testing.T) { mysqld.MysqlPort.Store(3306) vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre, defaultTabletPickerOptions) if err != nil { t.Fatal(err) } @@ -171,7 +173,7 @@ func TestControllerBadID(t *testing.T) { params := map[string]string{ "id": "bad", } - _, err := newController(context.Background(), params, nil, nil, nil, "", "", nil, nil) + _, err := newController(context.Background(), params, nil, nil, nil, "", "", nil, nil, defaultTabletPickerOptions) want := `strconv.ParseInt: parsing "bad": invalid syntax` if err == nil || err.Error() != want { t.Errorf("newController err: %v, want %v", err, want) @@ -184,7 +186,7 @@ func TestControllerStopped(t *testing.T) { "state": binlogdatapb.VReplicationWorkflowState_Stopped.String(), } - ct, err := newController(context.Background(), params, nil, nil, nil, "", "", nil, nil) + ct, err := newController(context.Background(), params, nil, nil, nil, "", "", nil, nil, defaultTabletPickerOptions) if err != nil { t.Fatal(err) } @@ -224,7 +226,7 @@ func TestControllerOverrides(t *testing.T) { mysqld.MysqlPort.Store(3306) vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, vre) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, vre, defaultTabletPickerOptions) if err != nil { t.Fatal(err) } @@ -251,7 +253,7 @@ func TestControllerCanceledContext(t *testing.T) { cancel() vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, nil, nil, nil, "", nil) - ct, err := newController(ctx, params, nil, nil, env.TopoServ, env.Cells[0], "rdonly", nil, vre) + ct, err := newController(ctx, params, nil, nil, env.TopoServ, env.Cells[0], "rdonly", nil, vre, defaultTabletPickerOptions) if err != nil { t.Fatal(err) } @@ -297,7 +299,7 @@ func TestControllerRetry(t *testing.T) { mysqld.MysqlPort.Store(3306) vre := NewTestEngine(nil, env.Cells[0], mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, vre) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "rdonly", nil, vre, defaultTabletPickerOptions) if err != nil { t.Fatal(err) } @@ -359,7 +361,7 @@ func TestControllerStopPosition(t *testing.T) { mysqld.MysqlPort.Store(3306) vre := NewTestEngine(nil, wantTablet.GetAlias().Cell, mysqld, dbClientFactory, dbClientFactory, dbClient.DBName(), nil) - ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre) + ct, err := newController(context.Background(), params, dbClientFactory, mysqld, env.TopoServ, env.Cells[0], "replica", nil, vre, defaultTabletPickerOptions) if err != nil { t.Fatal(err) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index 8b81dd722c6..54902928e02 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -28,22 +28,23 @@ import ( "time" "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( @@ -72,7 +73,7 @@ var waitRetryTime = 1 * time.Second // How frequently vcopier will update _vt.vreplication rows_copied var rowsCopiedUpdateInterval = 30 * time.Second -// How frequntly vcopier will garbage collect old copy_state rows. +// How frequently vcopier will garbage collect old copy_state rows. // By default, do it in between every 2nd and 3rd rows copied update. var copyStateGCInterval = (rowsCopiedUpdateInterval * 3) - (rowsCopiedUpdateInterval / 2) @@ -107,10 +108,12 @@ type Engine struct { throttlerClient *throttle.Client // This should only be set in Test Engines in order to short - // curcuit functions as needed in unit tests. It's automatically + // circuit functions as needed in unit tests. It's automatically // enabled in NewSimpleTestEngine. This should NOT be used in // production. shortcircuit bool + + env *vtenv.Environment } type journalEvent struct { @@ -127,14 +130,15 @@ type PostCopyAction struct { // NewEngine creates a new Engine. // A nil ts means that the Engine is disabled. -func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, lagThrottler *throttle.Throttler) *Engine { +func NewEngine(env *vtenv.Environment, config *tabletenv.TabletConfig, ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, lagThrottler *throttle.Throttler) *Engine { vre := &Engine{ + env: env, controllers: make(map[int32]*controller), ts: ts, cell: cell, mysqld: mysqld, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(config.ExternalConnections), + ec: newExternalConnector(env, config.ExternalConnections), throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerapp.VReplicationName, throttle.ThrottleCheckPrimaryWrite), } @@ -143,22 +147,24 @@ func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mys // InitDBConfig should be invoked after the db name is computed. func (vre *Engine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) { - // If we're already initilized, it's a test engine. Ignore the call. + // If we're already initialized, it's a test engine. Ignore the call. if vre.dbClientFactoryFiltered != nil && vre.dbClientFactoryDba != nil { return } vre.dbClientFactoryFiltered = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB()) + return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB(), vre.env.Parser()) } vre.dbClientFactoryDba = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.DbaWithDB()) + return binlogplayer.NewDBClient(dbcfgs.DbaWithDB(), vre.env.Parser()) } vre.dbName = dbcfgs.DBName } // NewTestEngine creates a new Engine for testing. func NewTestEngine(ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, dbClientFactoryFiltered func() binlogplayer.DBClient, dbClientFactoryDba func() binlogplayer.DBClient, dbname string, externalConfig map[string]*dbconfigs.DBConfigs) *Engine { + env := vtenv.NewTestEnv() vre := &Engine{ + env: env, controllers: make(map[int32]*controller), ts: ts, cell: cell, @@ -167,15 +173,17 @@ func NewTestEngine(ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, db dbClientFactoryDba: dbClientFactoryDba, dbName: dbname, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(externalConfig), + ec: newExternalConnector(env, externalConfig), } return vre } // NewSimpleTestEngine creates a new Engine for testing that can -// also short curcuit functions as needed. +// also short circuit functions as needed. func NewSimpleTestEngine(ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, dbClientFactoryFiltered func() binlogplayer.DBClient, dbClientFactoryDba func() binlogplayer.DBClient, dbname string, externalConfig map[string]*dbconfigs.DBConfigs) *Engine { + env := vtenv.NewTestEnv() vre := &Engine{ + env: env, controllers: make(map[int32]*controller), ts: ts, cell: cell, @@ -184,7 +192,7 @@ func NewSimpleTestEngine(ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaem dbClientFactoryDba: dbClientFactoryDba, dbName: dbname, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(externalConfig), + ec: newExternalConnector(env, externalConfig), shortcircuit: true, } return vre @@ -262,7 +270,7 @@ func (vre *Engine) retry(ctx context.Context, err error) { } if err := vre.openLocked(ctx); err == nil { // Don't invoke cancelRetry because openLocked - // will hold on to this context for later cancelation. + // will hold on to this context for later cancellation. vre.cancelRetry = nil vre.mu.Unlock() return @@ -273,7 +281,7 @@ func (vre *Engine) retry(ctx context.Context, err error) { func (vre *Engine) initControllers(rows []map[string]string) { for _, row := range rows { - ct, err := newController(vre.ctx, row, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, tabletTypesStr, nil, vre) + ct, err := newController(vre.ctx, row, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, tabletTypesStr, nil, vre, discovery.TabletPickerOptions{}) if err != nil { log.Errorf("Controller could not be initialized for stream: %v", row) continue @@ -330,12 +338,12 @@ func (vre *Engine) getDBClient(isAdmin bool) binlogplayer.DBClient { return vre.dbClientFactoryFiltered() } -// ExecWithDBA runs the specified query as the DBA user +// ExecWithDBA runs the specified query as the DBA user. func (vre *Engine) ExecWithDBA(query string) (*sqltypes.Result, error) { return vre.exec(query, true /*runAsAdmin*/) } -// Exec runs the specified query as the Filtered user +// Exec runs the specified query as the Filtered user. func (vre *Engine) Exec(query string) (*sqltypes.Result, error) { return vre.exec(query, false /*runAsAdmin*/) } @@ -362,7 +370,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) } defer vre.updateStats() - plan, err := buildControllerPlan(query) + plan, err := buildControllerPlan(query, vre.env.Parser()) if err != nil { return nil, err } @@ -421,14 +429,12 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) if err != nil { return nil, err } - ct, err := newController(vre.ctx, params, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, tabletTypesStr, nil, vre) + ct, err := newController(vre.ctx, params, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, tabletTypesStr, nil, vre, plan.tabletPickerOptions) if err != nil { return nil, err } vre.controllers[id] = ct - if err := insertLogWithParams(vdbc, LogStreamCreate, id, params); err != nil { - return nil, err - } + insertLogWithParams(vdbc, LogStreamCreate, id, params) } return qr, nil case updateQuery: @@ -463,14 +469,12 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) } // Create a new controller in place of the old one. // For continuity, the new controller inherits the previous stats. - ct, err := newController(vre.ctx, params, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, tabletTypesStr, blpStats[id], vre) + ct, err := newController(vre.ctx, params, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, tabletTypesStr, blpStats[id], vre, plan.tabletPickerOptions) if err != nil { return nil, err } vre.controllers[id] = ct - if err := insertLog(vdbc, LogStateChange, id, params["state"], ""); err != nil { - return nil, err - } + insertLog(vdbc, LogStateChange, id, params["state"], "") } return qr, nil case deleteQuery: @@ -488,9 +492,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) ct.Stop() delete(vre.controllers, id) } - if err := insertLogWithParams(vdbc, LogStreamDelete, id, nil); err != nil { - return nil, err - } + insertLogWithParams(vdbc, LogStreamDelete, id, nil) } if err := dbClient.Begin(); err != nil { return nil, err @@ -524,7 +526,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) } return qr, nil case selectQuery, reshardingJournalQuery: - // select and resharding journal queries are passed through. + // Selects and resharding journal queries are passed through. return dbClient.ExecuteFetch(plan.query, maxRows) } panic("unreachable") @@ -727,7 +729,7 @@ func (vre *Engine) transitionJournal(je *journalEvent) { log.Errorf("transitionJournal: %v", err) return } - ct, err := newController(vre.ctx, params, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, tabletTypesStr, nil, vre) + ct, err := newController(vre.ctx, params, vre.dbClientFactoryFiltered, vre.mysqld, vre.ts, vre.cell, tabletTypesStr, nil, vre, discovery.TabletPickerOptions{}) if err != nil { log.Errorf("transitionJournal: %v", err) return @@ -788,8 +790,10 @@ func (vre *Engine) WaitForPos(ctx context.Context, id int32, pos string) error { } case len(qr.Rows) == 0: return fmt.Errorf("vreplication stream %d not found", id) - case len(qr.Rows) > 1 || len(qr.Rows[0]) != 3: - return fmt.Errorf("unexpected result: %v", qr) + case len(qr.Rows) > 1: + return fmt.Errorf("vreplication stream received more rows than expected, got %d instead of 1", len(qr.Rows)) + case len(qr.Rows[0]) != 3: + return fmt.Errorf("vreplication stream received an unexpected number of columns, got %d instead of 3", len(qr.Rows[0])) } // When err is not nil then we got a retryable error and will loop again. @@ -847,7 +851,7 @@ func (vre *Engine) readAllRows(ctx context.Context) ([]map[string]string, error) return nil, err } defer dbClient.Close() - qr, err := dbClient.ExecuteFetch(fmt.Sprintf("select * from _vt.vreplication where db_name=%v", encodeString(vre.dbName)), maxRows) + qr, err := dbClient.ExecuteFetch(fmt.Sprintf("select * from _vt.vreplication where db_name=%s", encodeString(vre.dbName)), maxRows) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go index 32add04c8e0..ea46e126895 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine_test.go @@ -277,23 +277,7 @@ func TestEngineExec(t *testing.T) { t.Errorf("stats are mismatched: %v, want %v", globalStats.controllers, vre.controllers) } - // Test Delete of multiple rows - - dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) - dbClient.ExpectRequest("select id from _vt.vreplication where id > 1", testSelectorResponse2, nil) - dbClient.ExpectRequest("begin", nil, nil) - dbClient.ExpectRequest("delete from _vt.vreplication where id in (1, 2)", testDMLResponse, nil) - dbClient.ExpectRequest("delete from _vt.copy_state where vrepl_id in (1, 2)", nil, nil) - dbClient.ExpectRequest("delete from _vt.post_copy_action where vrepl_id in (1, 2)", nil, nil) - dbClient.ExpectRequest("commit", nil, nil) - - _, err = vre.Exec("delete from _vt.vreplication where id > 1") - if err != nil { - t.Fatal(err) - } - dbClient.Wait() - - // Test no delete + // Test simple delete. dbClient.ExpectRequest("use _vt", &sqltypes.Result{}, nil) dbClient.ExpectRequest("select id from _vt.vreplication where id = 3", &sqltypes.Result{}, nil) _, err = vre.Exec("delete from _vt.vreplication where id = 3") @@ -301,6 +285,21 @@ func TestEngineExec(t *testing.T) { t.Fatal(err) } dbClient.Wait() + + // Test unsafe writes of multiple rows, which we want to prevent. + unsafeQueries := []string{ + "delete from _vt.vreplication", + "delete from _vt.vreplication where id > 1", + "delete from _vt.vreplication where message != 'FROZEN'", + "update _vt.vreplication set workflow = 'bad'", + "update _vt.vreplication set state = 'Stopped' where id > 1", + "update _vt.vreplication set message = '' where state == 'Running'", + } + for _, unsafeQuery := range unsafeQueries { + _, err = vre.Exec(unsafeQuery) + require.Error(t, err, "%s should fail", unsafeQuery) + dbClient.Wait() + } } func TestEngineBadInsert(t *testing.T) { @@ -427,7 +426,8 @@ func TestWaitForPosError(t *testing.T) { dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{}}}, nil) err = vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") - want = "unexpected result: &{[] 0 0 [[]] 0 }" + want = "vreplication stream received an unexpected number of columns, got 0 instead of 3" + assert.EqualError(t, err, want, "WaitForPos:") dbClient.ExpectRequest("select pos, state, message from _vt.vreplication where id=1", &sqltypes.Result{Rows: [][]sqltypes.Value{{ @@ -436,7 +436,7 @@ func TestWaitForPosError(t *testing.T) { sqltypes.NewVarBinary("MariaDB/0-1-1083"), }}}, nil) err = vre.WaitForPos(context.Background(), 1, "MariaDB/0-1-1084") - want = `unexpected result: &{[] 0 0 [[VARBINARY("MariaDB/0-1-1083")] [VARBINARY("MariaDB/0-1-1083")]] 0 }` + want = "vreplication stream received more rows than expected, got 2 instead of 1" assert.EqualError(t, err, want, "WaitForPos:") } diff --git a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go index 1c20e2054be..c53bfd2a584 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go @@ -17,9 +17,8 @@ limitations under the License. package vreplication import ( - "sync" - "context" + "sync" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" @@ -28,6 +27,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletconn" @@ -58,13 +58,15 @@ type VStreamerClient interface { } type externalConnector struct { + env *vtenv.Environment mu sync.Mutex dbconfigs map[string]*dbconfigs.DBConfigs connectors map[string]*mysqlConnector } -func newExternalConnector(dbcfgs map[string]*dbconfigs.DBConfigs) *externalConnector { +func newExternalConnector(env *vtenv.Environment, dbcfgs map[string]*dbconfigs.DBConfigs) *externalConnector { return &externalConnector{ + env: env, dbconfigs: dbcfgs, connectors: make(map[string]*mysqlConnector), } @@ -91,7 +93,7 @@ func (ec *externalConnector) Get(name string) (*mysqlConnector, error) { return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "external mysqlConnector %v not found", name) } c := &mysqlConnector{} - c.env = tabletenv.NewEnv(config, name) + c.env = tabletenv.NewEnv(ec.env, config, name) c.se = schema.NewEngine(c.env) c.vstreamer = vstreamer.NewEngine(c.env, nil, c.se, nil, "") c.vstreamer.InitDBConfig("", "") @@ -170,7 +172,7 @@ func newTabletConnector(tablet *topodatapb.Tablet) *tabletConnector { func (tc *tabletConnector) Open(ctx context.Context) error { var err error - tc.qs, err = tabletconn.GetDialer()(tc.tablet, grpcclient.FailFast(true)) + tc.qs, err = tabletconn.GetDialer()(ctx, tc.tablet, grpcclient.FailFast(true)) return err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/flags.go b/go/vt/vttablet/tabletmanager/vreplication/flags.go index 44f07f87a0f..e45158ab99a 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/flags.go +++ b/go/vt/vttablet/tabletmanager/vreplication/flags.go @@ -26,9 +26,9 @@ import ( var ( retryDelay = 5 * time.Second - maxTimeToRetryError time.Duration // default behavior is to keep retrying, for backward compatibility + maxTimeToRetryError time.Duration // Default behavior is to keep retrying, for backward compatibility - tabletTypesStr = "in_order:REPLICA,PRIMARY" + tabletTypesStr = "in_order:REPLICA,PRIMARY" // Default value relayLogMaxSize = 250000 relayLogMaxItems = 5000 @@ -45,10 +45,6 @@ func registerVReplicationFlags(fs *pflag.FlagSet) { fs.DurationVar(&retryDelay, "vreplication_retry_delay", retryDelay, "delay before retrying a failed workflow event in the replication phase") fs.DurationVar(&maxTimeToRetryError, "vreplication_max_time_to_retry_on_error", maxTimeToRetryError, "stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence") - // these are the default tablet_types that will be used by the tablet picker to find source tablets for a vreplication stream - // it can be overridden by passing a different list to the MoveTables or Reshard commands - fs.StringVar(&tabletTypesStr, "vreplication_tablet_type", tabletTypesStr, "comma separated list of tablet types used as a source") - fs.IntVar(&relayLogMaxSize, "relay_log_max_size", relayLogMaxSize, "Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time.") fs.IntVar(&relayLogMaxItems, "relay_log_max_items", relayLogMaxItems, "Maximum number of rows for VReplication target buffering.") @@ -62,11 +58,6 @@ func registerVReplicationFlags(fs *pflag.FlagSet) { fs.IntVar(&vreplicationHeartbeatUpdateInterval, "vreplication_heartbeat_update_interval", vreplicationHeartbeatUpdateInterval, "Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling") fs.BoolVar(&vreplicationStoreCompressedGTID, "vreplication_store_compressed_gtid", vreplicationStoreCompressedGTID, "Store compressed gtids in the pos column of the sidecar database's vreplication table") - // deprecated flags (7.0), however there are several e2e tests that still depend on them - fs.Duration("vreplication_healthcheck_topology_refresh", 30*time.Second, "refresh interval for re-reading the topology") - fs.Duration("vreplication_healthcheck_retry_delay", 5*time.Second, "healthcheck retry delay") - fs.Duration("vreplication_healthcheck_timeout", 1*time.Minute, "healthcheck retry delay") - fs.IntVar(&vreplicationParallelInsertWorkers, "vreplication-parallel-insert-workers", vreplicationParallelInsertWorkers, "Number of parallel insertion workers to use during copy phase. Set <= 1 to disable parallelism, or > 1 to enable concurrent insertion during copy phase.") } diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index 576ce4c22a8..04c4c8f3e41 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -28,25 +28,26 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/vt/vttablet" - - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/dbconfigs" - "github.com/spf13/pflag" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconn" @@ -69,6 +70,7 @@ var ( globalFBC = &fakeBinlogClient{} vrepldb = "vrepl" globalDBQueries = make(chan string, 1000) + lastMultiExecQuery = "" testForeignKeyQueries = false testSetForeignKeyQueries = false doNotLogDBQueries = false @@ -107,7 +109,7 @@ func setFlag(flagName, flagValue string) { } func init() { - tabletconn.RegisterDialer("test", func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + tabletconn.RegisterDialer("test", func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { return &fakeTabletConn{ QueryService: fakes.ErrorQueryService, tablet: tablet, @@ -210,7 +212,7 @@ func resetBinlogClient() { func primaryPosition(t *testing.T) string { t.Helper() - pos, err := env.Mysqld.PrimaryPosition() + pos, err := env.Mysqld.PrimaryPosition(context.Background()) if err != nil { t.Fatal(err) } @@ -225,6 +227,15 @@ func execStatements(t *testing.T, queries []string) { } } +func execConnStatements(t *testing.T, conn *dbconnpool.DBConnection, queries []string) { + t.Helper() + for _, query := range queries { + if _, err := conn.ExecuteFetch(query, 10000, false); err != nil { + t.Fatalf("ExecuteFetch(%v) failed: %v", query, err) + } + } +} + //-------------------------------------- // Topos and tablets @@ -245,7 +256,6 @@ func addTablet(id int) *topodatapb.Tablet { if err := env.TopoServ.CreateTablet(context.Background(), tablet); err != nil { panic(err) } - env.SchemaEngine.Reload(context.Background()) return tablet } @@ -266,7 +276,6 @@ func addOtherTablet(id int, keyspace, shard string) *topodatapb.Tablet { if err := env.TopoServ.CreateTablet(context.Background(), tablet); err != nil { panic(err) } - env.SchemaEngine.Reload(context.Background()) return tablet } @@ -274,7 +283,6 @@ func deleteTablet(tablet *topodatapb.Tablet) { env.TopoServ.DeleteTablet(context.Background(), tablet.Alias) // This is not automatically removed from shard replication, which results in log spam. topo.DeleteTabletReplicationData(context.Background(), env.TopoServ, tablet) - env.SchemaEngine.Reload(context.Background()) } // fakeTabletConn implement TabletConn interface. We only care about the @@ -353,7 +361,7 @@ type fakeBinlogClient struct { lastCharset *binlogdatapb.Charset } -func (fbc *fakeBinlogClient) Dial(tablet *topodatapb.Tablet) error { +func (fbc *fakeBinlogClient) Dial(ctx context.Context, tablet *topodatapb.Tablet) error { fbc.lastTablet = tablet return nil } @@ -484,6 +492,23 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu return qr, err } +func (dc *realDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) + if err != nil { + return nil, err + } + results := make([]*sqltypes.Result, 0, len(queries)) + for _, query := range queries { + qr, err := dc.ExecuteFetch(query, maxrows) + if err != nil { + return nil, err + } + results = append(results, qr) + } + lastMultiExecQuery = query + return results, nil +} + func expectDeleteQueries(t *testing.T) { t.Helper() if doNotLogDBQueries { @@ -496,6 +521,19 @@ func expectDeleteQueries(t *testing.T) { )) } +func deleteAllVReplicationStreams(t *testing.T) { + t.Helper() + res, err := playerEngine.Exec("select id from _vt.vreplication") + require.NoError(t, err, "could not select ids from _vt.vreplication: %v", err) + ids := make([]string, len(res.Rows)) + for i, row := range res.Rows { + id := row[0].ToString() + ids[i] = id + } + _, err = playerEngine.Exec(fmt.Sprintf("delete from _vt.vreplication where id in (%s)", strings.Join(ids, ","))) + require.NoError(t, err, "failed to delete vreplication rows: %v", err) +} + func expectLogsAndUnsubscribe(t *testing.T, logs []LogExpectation, logCh chan *VrLogStats) { t.Helper() defer vrLogStatsLogger.Unsubscribe(logCh) @@ -540,6 +578,9 @@ func shouldIgnoreQuery(query string) bool { ", component_throttled=", // update of last throttle time, can happen out-of-band, so can't test for it "context cancel", "SELECT rows_copied FROM _vt.vreplication WHERE id=", + // This is only executed if the table has no defined Primary Key, which we don't know in the lower level + // code. + "SELECT index_cols.COLUMN_NAME AS column_name, index_cols.INDEX_NAME as index_name FROM information_schema.STATISTICS", } if sidecardb.MatchesInitQuery(query) { return true diff --git a/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go b/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go index da1753a8444..6a127b084b5 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/insert_generator.go @@ -21,8 +21,10 @@ import ( "strings" "time" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/throttler" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) // InsertGenerator generates a vreplication insert statement. @@ -38,7 +40,7 @@ type InsertGenerator struct { // NewInsertGenerator creates a new InsertGenerator. func NewInsertGenerator(state binlogdatapb.VReplicationWorkflowState, dbname string) *InsertGenerator { buf := &strings.Builder{} - buf.WriteString("insert into _vt.vreplication(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values ") + buf.WriteString("insert into _vt.vreplication(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options) values ") return &InsertGenerator{ buf: buf, state: state.String(), @@ -50,7 +52,8 @@ func NewInsertGenerator(state binlogdatapb.VReplicationWorkflowState, dbname str // AddRow adds a row to the insert statement. func (ig *InsertGenerator) AddRow(workflow string, bls *binlogdatapb.BinlogSource, pos, cell, tabletTypes string, workflowType binlogdatapb.VReplicationWorkflowType, workflowSubType binlogdatapb.VReplicationWorkflowSubType, deferSecondaryKeys bool) { - fmt.Fprintf(ig.buf, "%s(%v, %v, %v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d, %v)", + protoutil.SortBinlogSourceTables(bls) + fmt.Fprintf(ig.buf, "%s(%v, %v, %v, %v, %v, %v, %v, %v, 0, '%v', %v, %d, %d, %v, %v)", ig.prefix, encodeString(workflow), encodeString(bls.String()), @@ -65,6 +68,7 @@ func (ig *InsertGenerator) AddRow(workflow string, bls *binlogdatapb.BinlogSourc workflowType, workflowSubType, deferSecondaryKeys, + "'{}'", ) ig.prefix = ", " } diff --git a/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go b/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go index 5ccdfe3da10..2b07308c4c2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/insert_generator_test.go @@ -28,11 +28,11 @@ func TestInsertGenerator(t *testing.T) { ig := NewInsertGenerator(binlogdatapb.VReplicationWorkflowState_Stopped, "a") ig.now = 111 ig.AddRow("b", &binlogdatapb.BinlogSource{Keyspace: "c"}, "d", "e", "f", binlogdatapb.VReplicationWorkflowType_Materialize, binlogdatapb.VReplicationWorkflowSubType_None, false) - want := `insert into _vt.vreplication(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys) values ` + - `('b', 'keyspace:\"c\"', 'd', 9223372036854775807, 9223372036854775807, 'e', 'f', 111, 0, 'Stopped', 'a', 0, 0, false)` + want := `insert into _vt.vreplication(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options) values ` + + `('b', 'keyspace:\"c\"', 'd', 9223372036854775807, 9223372036854775807, 'e', 'f', 111, 0, 'Stopped', 'a', 0, 0, false, '{}')` assert.Equal(t, ig.String(), want) ig.AddRow("g", &binlogdatapb.BinlogSource{Keyspace: "h"}, "i", "j", "k", binlogdatapb.VReplicationWorkflowType_Reshard, binlogdatapb.VReplicationWorkflowSubType_Partial, true) - want += `, ('g', 'keyspace:\"h\"', 'i', 9223372036854775807, 9223372036854775807, 'j', 'k', 111, 0, 'Stopped', 'a', 4, 1, true)` + want += `, ('g', 'keyspace:\"h\"', 'i', 9223372036854775807, 9223372036854775807, 'j', 'k', 111, 0, 'Stopped', 'a', 4, 1, true, '{}')` assert.Equal(t, ig.String(), want) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/journal_test.go b/go/vt/vttablet/tabletmanager/vreplication/journal_test.go index 9dfdee766d1..18dbe1e7fd8 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/journal_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/journal_test.go @@ -20,8 +20,6 @@ import ( "fmt" "testing" - "context" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" qh "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication/queryhistory" ) @@ -38,7 +36,6 @@ func TestJournalOneToOne(t *testing.T) { "drop table t", fmt.Sprintf("drop table %s.t", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -81,9 +78,7 @@ func TestJournalOneToOne(t *testing.T) { )) // Delete all vreplication streams. There should be only one, but we don't know its id. - if _, err := playerEngine.Exec("delete from _vt.vreplication"); err != nil { - t.Fatal(err) - } + deleteAllVReplicationStreams(t) expectDeleteQueries(t) } @@ -100,7 +95,6 @@ func TestJournalOneToMany(t *testing.T) { "drop table t", fmt.Sprintf("drop table %s.t", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -150,9 +144,7 @@ func TestJournalOneToMany(t *testing.T) { )) // Delete all vreplication streams. There should be only one, but we don't know its id. - if _, err := playerEngine.Exec("delete from _vt.vreplication"); err != nil { - t.Fatal(err) - } + deleteAllVReplicationStreams(t) expectDeleteQueries(t) } @@ -168,7 +160,6 @@ func TestJournalTablePresent(t *testing.T) { "drop table t", fmt.Sprintf("drop table %s.t", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -211,9 +202,7 @@ func TestJournalTablePresent(t *testing.T) { )) // Delete all vreplication streams. There should be only one, but we don't know its id. - if _, err := playerEngine.Exec("delete from _vt.vreplication"); err != nil { - t.Fatal(err) - } + deleteAllVReplicationStreams(t) expectDeleteQueries(t) } @@ -229,7 +218,6 @@ func TestJournalTableNotPresent(t *testing.T) { "drop table t", fmt.Sprintf("drop table %s.t", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -264,9 +252,7 @@ func TestJournalTableNotPresent(t *testing.T) { defer execStatements(t, []string{"delete from _vt.resharding_journal"}) // Delete all vreplication streams. There should be only one, but we don't know its id. - if _, err := playerEngine.Exec("delete from _vt.vreplication"); err != nil { - t.Fatal(err) - } + deleteAllVReplicationStreams(t) expectDeleteQueries(t) } @@ -286,7 +272,6 @@ func TestJournalTableMixed(t *testing.T) { fmt.Sprintf("drop table %s.t", vrepldb), fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -326,8 +311,6 @@ func TestJournalTableMixed(t *testing.T) { )) // Delete all vreplication streams. There should be only one, but we don't know its id. - if _, err := playerEngine.Exec("delete from _vt.vreplication"); err != nil { - t.Fatal(err) - } + deleteAllVReplicationStreams(t) expectDeleteQueries(t) } diff --git a/go/vt/vttablet/tabletmanager/vreplication/queryhistory/sequenced_expectation_set.go b/go/vt/vttablet/tabletmanager/vreplication/queryhistory/sequenced_expectation_set.go index 9ab0bf99043..95b2c3e4f67 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/queryhistory/sequenced_expectation_set.go +++ b/go/vt/vttablet/tabletmanager/vreplication/queryhistory/sequenced_expectation_set.go @@ -12,7 +12,7 @@ type sequencedExpectationSet map[SequencedExpectation]any func (ses *sequencedExpectationSet) Add(expectation SequencedExpectation) { if ses == nil { - *ses = make(sequencedExpectationSet) + ses = new(sequencedExpectationSet) } (*ses)[expectation] = true } @@ -27,7 +27,7 @@ func (ses *sequencedExpectationSet) Contains(expectation SequencedExpectation) b func (ses *sequencedExpectationSet) Slice() []SequencedExpectation { s := make([]SequencedExpectation, 0) - if len(*ses) == 0 { + if ses == nil || len(*ses) == 0 { return s } for se := range *ses { diff --git a/go/vt/vttablet/tabletmanager/vreplication/queryhistory/verifier.go b/go/vt/vttablet/tabletmanager/vreplication/queryhistory/verifier.go index a7015b0daf5..ebe145461d7 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/queryhistory/verifier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/queryhistory/verifier.go @@ -41,7 +41,7 @@ func NewVerifier(sequence ExpectationSequence) *Verifier { } // AcceptQuery verifies that the provided query is valid according to the -// internal ExpectationSequence and the internal History of preceeding queries. +// internal ExpectationSequence and the internal History of preceding queries. // Returns a *Result indicating whether the query was accepted and, if not, // diagnostic details indicating why not. func (v *Verifier) AcceptQuery(query string) *Result { @@ -159,7 +159,7 @@ func (v *Verifier) checkQueryAgainstExpectation(query string, expectation Sequen // Query passed expectation. result.Accepted = true result.Matched = true - result.Message = "matched expectated query and expected order" + result.Message = "matched expected query and expected order" return true } diff --git a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go index 9c6f427b418..c3cd073f0bf 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go @@ -19,16 +19,13 @@ package vreplication import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "context" - "vitess.io/vitess/go/sqltypes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" ) @@ -38,8 +35,7 @@ import ( // This is used by binlog server to make vstream connection // using the vstream connection, it will parse the events from binglog // to fetch the corresponding GTID for required recovery time -func NewReplicaConnector(connParams *mysql.ConnParams) *ReplicaConnector { - +func NewReplicaConnector(venv *vtenv.Environment, connParams *mysql.ConnParams) *ReplicaConnector { // Construct config := tabletenv.NewDefaultConfig() dbCfg := &dbconfigs.DBConfigs{ @@ -49,7 +45,7 @@ func NewReplicaConnector(connParams *mysql.ConnParams) *ReplicaConnector { dbCfg.SetDbParams(*connParams, *connParams, *connParams) config.DB = dbCfg c := &ReplicaConnector{conn: connParams} - env := tabletenv.NewEnv(config, "source") + env := tabletenv.NewEnv(venv, config, "source") c.se = schema.NewEngine(env) c.se.SkipMetaCheck = true c.vstreamer = vstreamer.NewEngine(env, nil, c.se, nil, "") @@ -70,33 +66,12 @@ type ReplicaConnector struct { vstreamer *vstreamer.Engine } -func (c *ReplicaConnector) shutdown() { +func (c *ReplicaConnector) Close() error { c.vstreamer.Close() c.se.Close() -} - -func (c *ReplicaConnector) Open(ctx context.Context) error { - return nil -} - -func (c *ReplicaConnector) Close(ctx context.Context) error { - c.shutdown() return nil } func (c *ReplicaConnector) VStream(ctx context.Context, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { return c.vstreamer.Stream(ctx, startPos, nil, filter, throttlerapp.ReplicaConnectorName, send) } - -// VStreamRows streams rows from query result -func (c *ReplicaConnector) VStreamRows(ctx context.Context, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { - var row []sqltypes.Value - if lastpk != nil { - r := sqltypes.Proto3ToResult(lastpk) - if len(r.Rows) != 1 { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected lastpk input: %v", lastpk) - } - row = r.Rows[0] - } - return c.vstreamer.StreamRows(ctx, query, row, send) -} diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go index 39ffdef04ae..3bef997d0be 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go @@ -29,13 +29,14 @@ import ( vjson "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) // ReplicatorPlan is the execution plan for the replicator. It contains @@ -58,6 +59,7 @@ type ReplicatorPlan struct { ColInfoMap map[string][]*ColumnInfo stats *binlogplayer.Stats Source *binlogdatapb.BinlogSource + collationEnv *collations.Environment } // buildExecution plan uses the field info as input and the partially built @@ -97,11 +99,12 @@ func (rp *ReplicatorPlan) buildExecutionPlan(fieldEvent *binlogdatapb.FieldEvent // requires us to wait for the field info sent by the source. func (rp *ReplicatorPlan) buildFromFields(tableName string, lastpk *sqltypes.Result, fields []*querypb.Field) (*TablePlan, error) { tpb := &tablePlanBuilder{ - name: sqlparser.NewIdentifierCS(tableName), - lastpk: lastpk, - colInfos: rp.ColInfoMap[tableName], - stats: rp.stats, - source: rp.Source, + name: sqlparser.NewIdentifierCS(tableName), + lastpk: lastpk, + colInfos: rp.ColInfoMap[tableName], + stats: rp.stats, + source: rp.Source, + collationEnv: rp.collationEnv, } for _, field := range fields { colName := sqlparser.NewIdentifierCI(field.Name) @@ -195,8 +198,8 @@ type TablePlan struct { Insert *sqlparser.ParsedQuery Update *sqlparser.ParsedQuery Delete *sqlparser.ParsedQuery + MultiDelete *sqlparser.ParsedQuery Fields []*querypb.Field - EnumValuesMap map[string](map[string]string) ConvertIntToEnum map[string]bool // PKReferences is used to check if an event changed // a primary key column (row move). @@ -215,6 +218,8 @@ type TablePlan struct { PartialInserts map[string]*sqlparser.ParsedQuery // PartialUpdates are same as PartialInserts, but for update statements PartialUpdates map[string]*sqlparser.ParsedQuery + + CollationEnv *collations.Environment } // MarshalJSON performs a custom JSON Marshalling. @@ -252,7 +257,7 @@ func (tp *TablePlan) applyBulkInsert(sqlbuffer *bytes2.Buffer, rows []*querypb.R if i > 0 { sqlbuffer.WriteString(", ") } - if err := tp.BulkInsertValues.AppendFromRow(sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil { + if err := appendFromRow(tp.BulkInsertValues, sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil { return nil, err } } @@ -297,7 +302,7 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable, rowVal, _ := sqltypes.BindVariableToValue(bindvar) // TODO(king-11) make collation aware - result, err := evalengine.NullsafeCompare(rowVal, tp.Lastpk.Rows[0][0], collations.Unknown) + result, err := evalengine.NullsafeCompare(rowVal, tp.Lastpk.Rows[0][0], tp.CollationEnv, collations.Unknown, nil) // If rowVal is > last pk, transaction will be a noop, so don't apply this statement if err == nil && result > 0 { tp.Stats.NoopQueryCount.Add(stmtType, 1) @@ -315,7 +320,7 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable, func (tp *TablePlan) bindFieldVal(field *querypb.Field, val *sqltypes.Value) (*querypb.BindVariable, error) { if conversion, ok := tp.ConvertCharset[field.Name]; ok && !val.IsNull() { // Non-null string value, for which we have a charset conversion instruction - fromCollation := collations.Local().DefaultCollationForCharset(conversion.FromCharset) + fromCollation := tp.CollationEnv.DefaultCollationForCharset(conversion.FromCharset) if fromCollation == collations.Unknown { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", conversion.FromCharset, field.Name) } @@ -329,29 +334,6 @@ func (tp *TablePlan) bindFieldVal(field *querypb.Field, val *sqltypes.Value) (*q // An integer converted to an enum. We must write the textual value of the int. i.e. 0 turns to '0' return sqltypes.StringBindVariable(val.ToString()), nil } - if enumValues, ok := tp.EnumValuesMap[field.Name]; ok && !val.IsNull() { - // The fact that this field has a EnumValuesMap entry, means we must - // use the enum's text value as opposed to the enum's numerical value. - // Once known use case is with Online DDL, when a column is converted from - // ENUM to a VARCHAR/TEXT. - enumValue, enumValueOK := enumValues[val.ToString()] - if !enumValueOK { - return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "Invalid enum value: %v for field %s", val, field.Name) - } - // get the enum text for this val - return sqltypes.StringBindVariable(enumValue), nil - } - if field.Type == querypb.Type_ENUM { - // This is an ENUM w/o a values map, which means that we are most likely using - // the index value -- what is stored and binlogged vs. the list of strings - // defined in the table schema -- and we must use an int bindvar or we'll have - // invalid/incorrect predicates like WHERE enumcol='2'. - // This will be the case when applying binlog events. - enumIndexVal := sqltypes.MakeTrusted(querypb.Type_UINT64, val.Raw()) - if enumIndex, err := enumIndexVal.ToUint64(); err == nil { - return sqltypes.Uint64BindVariable(enumIndex), nil - } - } return sqltypes.ValueBindVariable(*val), nil } @@ -444,6 +426,126 @@ func (tp *TablePlan) applyChange(rowChange *binlogdatapb.RowChange, executor fun return nil, nil } +// applyBulkDeleteChanges applies a bulk DELETE statement from the row changes +// to the target table -- which resulted from a DELETE statement executed on the +// source that deleted N rows -- using an IN clause with the primary key values +// of the rows to be deleted. This currently only supports tables with single +// column primary keys. This limitation is in place for now as we know that case +// will still be efficient. When using large multi-column IN or OR group clauses +// in DELETES we could end up doing large (table) scans that actually make things +// slower. +// TODO: Add support for multi-column primary keys. +func (tp *TablePlan) applyBulkDeleteChanges(rowDeletes []*binlogdatapb.RowChange, executor func(string) (*sqltypes.Result, error), maxQuerySize int64) (*sqltypes.Result, error) { + if len(rowDeletes) == 0 { + return &sqltypes.Result{}, nil + } + if (len(tp.TablePlanBuilder.pkCols) + len(tp.TablePlanBuilder.extraSourcePkCols)) != 1 { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "bulk delete is only supported for tables with a single primary key column") + } + if tp.MultiDelete == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "plan has no bulk delete query") + } + + baseQuerySize := int64(len(tp.MultiDelete.Query)) + querySize := baseQuerySize + + execQuery := func(pkVals *[]sqltypes.Value) (*sqltypes.Result, error) { + pksBV, err := sqltypes.BuildBindVariable(*pkVals) + if err != nil { + return nil, err + } + query, err := tp.MultiDelete.GenerateQuery(map[string]*querypb.BindVariable{"bulk_pks": pksBV}, nil) + if err != nil { + return nil, err + } + tp.TablePlanBuilder.stats.BulkQueryCount.Add("delete", 1) + return executor(query) + } + + pkIndex := -1 + pkVals := make([]sqltypes.Value, 0, len(rowDeletes)) + for _, rowDelete := range rowDeletes { + vals := sqltypes.MakeRowTrusted(tp.Fields, rowDelete.Before) + if pkIndex == -1 { + for i := range vals { + if tp.PKIndices[i] { + pkIndex = i + break + } + } + } + addedSize := int64(len(vals[pkIndex].Raw()) + 2) // Plus 2 for the comma and space + if querySize+addedSize > maxQuerySize { + if _, err := execQuery(&pkVals); err != nil { + return nil, err + } + pkVals = nil + querySize = baseQuerySize + } + pkVals = append(pkVals, vals[pkIndex]) + querySize += addedSize + } + + return execQuery(&pkVals) +} + +// applyBulkInsertChanges generates a multi-row INSERT statement from the row +// changes generated from a multi-row INSERT statement executed on the source. +func (tp *TablePlan) applyBulkInsertChanges(rowInserts []*binlogdatapb.RowChange, executor func(string) (*sqltypes.Result, error), maxQuerySize int64) (*sqltypes.Result, error) { + if len(rowInserts) == 0 { + return &sqltypes.Result{}, nil + } + if tp.BulkInsertFront == nil { + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "plan has no bulk insert query") + } + + prefix := &strings.Builder{} + prefix.WriteString(tp.BulkInsertFront.Query) + prefix.WriteString(" values ") + insertPrefix := prefix.String() + maxQuerySize -= int64(len(insertPrefix)) + values := &strings.Builder{} + + execQuery := func(vals *strings.Builder) (*sqltypes.Result, error) { + if tp.BulkInsertOnDup != nil { + vals.WriteString(tp.BulkInsertOnDup.Query) + } + tp.TablePlanBuilder.stats.BulkQueryCount.Add("insert", 1) + return executor(insertPrefix + vals.String()) + } + + newStmt := true + for _, rowInsert := range rowInserts { + rowValues := &strings.Builder{} + bindvars := make(map[string]*querypb.BindVariable, len(tp.Fields)) + vals := sqltypes.MakeRowTrusted(tp.Fields, rowInsert.After) + for n, field := range tp.Fields { + bindVar, err := tp.bindFieldVal(field, &vals[n]) + if err != nil { + return nil, err + } + bindvars["a_"+field.Name] = bindVar + } + if err := tp.BulkInsertValues.Append(rowValues, bindvars, nil); err != nil { + return nil, err + } + if int64(values.Len()+2+rowValues.Len()) > maxQuerySize { // Plus 2 for the comma and space + if _, err := execQuery(values); err != nil { + return nil, err + } + values.Reset() + newStmt = true + } + if !newStmt { + values.WriteString(", ") + } + values.WriteString(rowValues.String()) + newStmt = false + } + + return execQuery(values) +} + func getQuery(pq *sqlparser.ParsedQuery, bindvars map[string]*querypb.BindVariable) (string, error) { sql, err := pq.GenerateQuery(bindvars, nil) if err != nil { @@ -481,3 +583,74 @@ func valsEqual(v1, v2 sqltypes.Value) bool { // Compare content only if none are null. return v1.ToString() == v2.ToString() } + +// AppendFromRow behaves like Append but takes a querypb.Row directly, assuming that +// the fields in the row are in the same order as the placeholders in this query. The fields might include generated +// columns which are dropped, by checking against skipFields, before binding the variables +// note: there can be more fields than bind locations since extra columns might be requested from the source if not all +// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for +// values from the database on the source: sum/count for aggregation queries, for example +func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error { + bindLocations := pq.BindLocations() + if len(fields) < len(bindLocations) { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ", + len(fields), len(bindLocations)) + } + + type colInfo struct { + typ querypb.Type + length int64 + offset int64 + } + rowInfo := make([]*colInfo, 0) + + offset := int64(0) + for i, field := range fields { // collect info required for fields to be bound + length := row.Lengths[i] + if !skipFields[strings.ToLower(field.Name)] { + rowInfo = append(rowInfo, &colInfo{ + typ: field.Type, + length: length, + offset: offset, + }) + } + if length > 0 { + offset += row.Lengths[i] + } + } + + // bind field values to locations + var offsetQuery int + for i, loc := range bindLocations { + col := rowInfo[i] + buf.WriteString(pq.Query[offsetQuery:loc.Offset]) + typ := col.typ + + switch typ { + case querypb.Type_TUPLE: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) + case querypb.Type_JSON: + if col.length < 0 { // An SQL NULL and not an actual JSON value + buf.WriteString(sqltypes.NullStr) + } else { // A JSON value (which may be a JSON null literal value) + buf2 := row.Values[col.offset : col.offset+col.length] + vv, err := vjson.MarshalSQLValue(buf2) + if err != nil { + return err + } + buf.WriteString(vv.RawStr()) + } + default: + if col.length < 0 { + // -1 means a null variable; serialize it directly + buf.WriteString(sqltypes.NullStr) + } else { + vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length]) + vv.EncodeSQLBytes2(buf) + } + } + offsetQuery = loc.Offset + loc.Length + } + buf.WriteString(pq.Query[offsetQuery:]) + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go index 780b1c0d064..6c9f92128ac 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go @@ -21,11 +21,14 @@ import ( "strings" "testing" - "vitess.io/vitess/go/vt/binlog/binlogplayer" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/sqlparser" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -239,7 +242,7 @@ func TestBuildPlayerPlan(t *testing.T) { PKReferences: []string{"c1"}, InsertFront: "insert into t1(c1,c2,c3)", InsertValues: "(:a_c1,:a_c2,:a_c3)", - InsertOnDup: "on duplicate key update c2=values(c2)", + InsertOnDup: " on duplicate key update c2=values(c2)", Insert: "insert into t1(c1,c2,c3) values (:a_c1,:a_c2,:a_c3) on duplicate key update c2=values(c2)", Update: "update t1 set c2=:a_c2 where c1=:b_c1", Delete: "update t1 set c2=null where c1=:b_c1", @@ -261,7 +264,7 @@ func TestBuildPlayerPlan(t *testing.T) { PKReferences: []string{"c1", "pk1", "pk2"}, InsertFront: "insert into t1(c1,c2,c3)", InsertValues: "(:a_c1,:a_c2,:a_c3)", - InsertOnDup: "on duplicate key update c2=values(c2)", + InsertOnDup: " on duplicate key update c2=values(c2)", Insert: "insert into t1(c1,c2,c3) select :a_c1, :a_c2, :a_c3 from dual where (:a_pk1,:a_pk2) <= (1,'aaa') on duplicate key update c2=values(c2)", Update: "update t1 set c2=:a_c2 where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", Delete: "update t1 set c2=null where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", @@ -571,16 +574,16 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "bad query", }}, }, - err: "syntax error at position 4 near 'bad'", + err: "syntax error at position 4 near 'bad' in query: bad query", }, { // not a select input: &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "t1", - Filter: "update t1 set val=1", + Filter: "update t1 set val = 1", }}, }, - err: "unexpected: update t1 set val = 1", + err: "unsupported non-select statement in query: update t1 set val = 1", }, { // no distinct input: &binlogdatapb.Filter{ @@ -589,7 +592,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select distinct c1 from t1", }}, }, - err: "unexpected: select distinct c1 from t1", + err: "unsupported distinct clause in query: select distinct c1 from t1", }, { // no ',' join input: &binlogdatapb.Filter{ @@ -598,7 +601,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select * from t1, t2", }}, }, - err: "unexpected: select * from t1, t2", + err: "unsupported multi-table usage in query: select * from t1, t2", }, { // no join input: &binlogdatapb.Filter{ @@ -607,7 +610,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select * from t1 join t2", }}, }, - err: "unexpected: select * from t1 join t2", + err: "unsupported from expression (*sqlparser.JoinTableExpr) in query: select * from t1 join t2", }, { // no subqueries input: &binlogdatapb.Filter{ @@ -616,7 +619,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select * from (select * from t2) as a", }}, }, - err: "unexpected: select * from (select * from t2) as a", + err: "unsupported from source (*sqlparser.DerivedTable) in query: select * from (select * from t2) as a", }, { // cannot combine '*' with other input: &binlogdatapb.Filter{ @@ -625,7 +628,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select *, c1 from t1", }}, }, - err: "unexpected: select *, c1 from t1", + err: "unsupported mix of '*' and columns in query: select *, c1 from t1", }, { // cannot combine '*' with other (different code path) input: &binlogdatapb.Filter{ @@ -634,7 +637,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select c1, * from t1", }}, }, - err: "unexpected: *", + err: "invalid expression: * in query: select c1, * from t1", }, { // no distinct in func input: &binlogdatapb.Filter{ @@ -643,7 +646,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select hour(distinct c1) as a from t1", }}, }, - err: "syntax error at position 21 near 'distinct'", + err: "syntax error at position 21 near 'distinct' in query: select hour(distinct c1) as a from t1", }, { // funcs need alias input: &binlogdatapb.Filter{ @@ -652,7 +655,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select hour(c1) from t1", }}, }, - err: "expression needs an alias: hour(c1)", + err: "expression needs an alias: hour(c1) in query: select hour(c1) from t1", }, { // only count(*) input: &binlogdatapb.Filter{ @@ -661,7 +664,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select count(c1) as c from t1", }}, }, - err: "only count(*) is supported: count(c1)", + err: "only count(*) is supported: count(c1) in query: select count(c1) as c from t1", }, { // no sum(*) input: &binlogdatapb.Filter{ @@ -670,7 +673,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select sum(*) as c from t1", }}, }, - err: "syntax error at position 13", + err: "syntax error at position 13 in query: select sum(*) as c from t1", }, { // sum should have only one argument input: &binlogdatapb.Filter{ @@ -679,7 +682,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select sum(a, b) as c from t1", }}, }, - err: "syntax error at position 14", + err: "syntax error at position 14 in query: select sum(a, b) as c from t1", }, { // no complex expr in sum input: &binlogdatapb.Filter{ @@ -688,7 +691,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select sum(a + b) as c from t1", }}, }, - err: "unexpected: sum(a + b)", + err: "unsupported non-column name in sum clause: sum(a + b) in query: select sum(a + b) as c from t1", }, { // no complex expr in group by input: &binlogdatapb.Filter{ @@ -697,7 +700,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select a from t1 group by a + 1", }}, }, - err: "unexpected: a + 1", + err: "unsupported non-column name or alias in group by clause: a + 1 in query: select a from t1 group by a + 1", }, { // group by does not reference alias input: &binlogdatapb.Filter{ @@ -706,7 +709,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select a as b from t1 group by a", }}, }, - err: "group by expression does not reference an alias in the select list: a", + err: "group by expression does not reference an alias in the select list: a in query: select a as b from t1 group by a", }, { // cannot group by aggr input: &binlogdatapb.Filter{ @@ -715,7 +718,7 @@ func TestBuildPlayerPlan(t *testing.T) { Filter: "select count(*) as a from t1 group by a", }}, }, - err: "group by expression is not allowed to reference an aggregate expression: a", + err: "group by expression is not allowed to reference an aggregate expression: a in query: select count(*) as a from t1 group by a", }} PrimaryKeyInfos := map[string][]*ColumnInfo{ @@ -733,29 +736,23 @@ func TestBuildPlayerPlan(t *testing.T) { } for _, tcase := range testcases { - plan, err := buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, nil, binlogplayer.NewStats()) - gotPlan, _ := json.Marshal(plan) - wantPlan, _ := json.Marshal(tcase.plan) - if string(gotPlan) != string(wantPlan) { - t.Errorf("Filter(%v):\n%s, want\n%s", tcase.input, gotPlan, wantPlan) - } + plan, err := buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) gotErr := "" if err != nil { gotErr = err.Error() } - if gotErr != tcase.err { - t.Errorf("Filter err(%v): %s, want %v", tcase.input, gotErr, tcase.err) - } + require.Equal(t, tcase.err, gotErr, "Filter err(%v): %s, want %v", tcase.input, gotErr, tcase.err) + gotPlan, _ := json.Marshal(plan) + wantPlan, _ := json.Marshal(tcase.plan) + require.Equal(t, string(wantPlan), string(gotPlan), "Filter(%v):\n%s, want\n%s", tcase.input, gotPlan, wantPlan) - plan, err = buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, copyState, binlogplayer.NewStats()) + plan, err = buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, copyState, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) if err != nil { continue } gotPlan, _ = json.Marshal(plan) wantPlan, _ = json.Marshal(tcase.planpk) - if string(gotPlan) != string(wantPlan) { - t.Errorf("Filter(%v,copyState):\n%s, want\n%s", tcase.input, gotPlan, wantPlan) - } + require.Equal(t, string(wantPlan), string(gotPlan), "Filter(%v,copyState):\n%s, want\n%s", tcase.input, gotPlan, wantPlan) } } @@ -777,7 +774,7 @@ func TestBuildPlayerPlanNoDup(t *testing.T) { Filter: "select * from t", }}, } - _, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats()) + _, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) want := "more than one target for source table t" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("buildReplicatorPlan err: %v, must contain: %v", err, want) @@ -798,7 +795,7 @@ func TestBuildPlayerPlanExclude(t *testing.T) { Filter: "", }}, } - plan, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats()) + plan, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) assert.NoError(t, err) want := &TestReplicatorPlan{ diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats.go b/go/vt/vttablet/tabletmanager/vreplication/stats.go index 6379a9ba04f..11f458d9541 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/stats.go +++ b/go/vt/vttablet/tabletmanager/vreplication/stats.go @@ -59,10 +59,12 @@ type vrStats struct { mu sync.Mutex isOpen bool controllers map[int32]*controller + + ThrottledCount *stats.Counter } func (st *vrStats) register() { - + st.ThrottledCount = stats.NewCounter("", "") stats.NewGaugeFunc("VReplicationStreamCount", "Number of vreplication streams", st.numControllers) stats.NewGaugeFunc("VReplicationLagSecondsMax", "Max vreplication seconds behind primary", st.maxReplicationLagSeconds) stats.NewStringMapFuncWithMultiLabels( @@ -254,6 +256,39 @@ func (st *vrStats) register() { return result }) + stats.NewGaugesFuncWithMultiLabels( + "VReplicationBulkQueryCount", + "vreplication vplayer queries with consolidated row events counts per DML type per stream", + []string{"source_keyspace", "source_shard", "workflow", "counts", "dml_type"}, + func() map[string]int64 { + st.mu.Lock() + defer st.mu.Unlock() + result := make(map[string]int64, len(st.controllers)) + for _, ct := range st.controllers { + for label, count := range ct.blpStats.BulkQueryCount.Counts() { + if label == "" { + continue + } + result[ct.source.Keyspace+"."+ct.source.Shard+"."+ct.workflow+"."+fmt.Sprintf("%v", ct.id)+"."+label] = count + } + } + return result + }) + stats.NewCounterFunc( + "VReplicationBulkQueryCountTotal", + "vreplication vplayer queries with consolidated row events counts aggregated across all streams", + func() int64 { + st.mu.Lock() + defer st.mu.Unlock() + result := int64(0) + for _, ct := range st.controllers { + for _, count := range ct.blpStats.BulkQueryCount.Counts() { + result += count + } + } + return result + }) + stats.NewGaugesFuncWithMultiLabels( "VReplicationNoopQueryCount", "vreplication noop query counts per stream", @@ -287,6 +322,41 @@ func (st *vrStats) register() { } return result }) + + stats.NewGaugesFuncWithMultiLabels( + "VReplicationTrxQueryBatchCount", + "vreplication vplayer transaction query batch counts per type per stream", + []string{"source_keyspace", "source_shard", "workflow", "counts", "commit_or_not"}, + func() map[string]int64 { + st.mu.Lock() + defer st.mu.Unlock() + result := make(map[string]int64, len(st.controllers)) + for _, ct := range st.controllers { + for label, count := range ct.blpStats.TrxQueryBatchCount.Counts() { + if label == "" { + continue + } + result[ct.source.Keyspace+"."+ct.source.Shard+"."+ct.workflow+"."+fmt.Sprintf("%v", ct.id)+"."+label] = count + } + } + return result + }) + + stats.NewCounterFunc( + "VReplicationTrxQueryBatchCountTotal", + "vreplication vplayer transaction query batch counts aggregated across all streams", + func() int64 { + st.mu.Lock() + defer st.mu.Unlock() + result := int64(0) + for _, ct := range st.controllers { + for _, count := range ct.blpStats.TrxQueryBatchCount.Counts() { + result += count + } + } + return result + }) + stats.NewGaugesFuncWithMultiLabels( "VReplicationCopyRowCount", "vreplication rows copied in copy phase per stream", @@ -434,6 +504,45 @@ func (st *vrStats) register() { return result }) + stats.NewCounterFunc( + "VReplicationThrottledCountTotal", + "The total number of times that vreplication has been throttled", + func() int64 { + st.mu.Lock() + defer st.mu.Unlock() + return st.ThrottledCount.Get() + }) + stats.NewCountersFuncWithMultiLabels( + "VReplicationThrottledCounts", + "The number of times vreplication was throttled by workflow, id, throttler (trx or tablet), and the sub-component that was throttled", + []string{"workflow", "id", "throttler", "component"}, + func() map[string]int64 { + st.mu.Lock() + defer st.mu.Unlock() + result := make(map[string]int64) + for _, ct := range st.controllers { + for key, val := range ct.blpStats.ThrottledCounts.Counts() { + result[fmt.Sprintf("%s.%d.%s", ct.workflow, ct.id, key)] = val + } + } + return result + }) + + stats.NewCountersFuncWithMultiLabels( + "VReplicationDDLActions", + "vreplication DDL processing actions per stream", + []string{"workflow", "action"}, + func() map[string]int64 { + st.mu.Lock() + defer st.mu.Unlock() + result := make(map[string]int64, len(st.controllers)) + for _, ct := range st.controllers { + for key, val := range ct.blpStats.DDLEventActions.Counts() { + result[fmt.Sprintf("%s.%d.%s", ct.workflow, ct.id, key)] = val + } + } + return result + }) } func (st *vrStats) numControllers() int64 { @@ -476,6 +585,8 @@ func (st *vrStats) status() *EngineStatus { SourceTablet: ct.sourceTablet.Load().(*topodatapb.TabletAlias), Messages: ct.blpStats.MessageHistory(), QueryCounts: ct.blpStats.QueryCount.Counts(), + BulkQueryCounts: ct.blpStats.BulkQueryCount.Counts(), + TrxQueryBatchCounts: ct.blpStats.TrxQueryBatchCount.Counts(), PhaseTimings: ct.blpStats.PhaseTimings.Counts(), CopyRowCount: ct.blpStats.CopyRowCount.Get(), CopyLoopCount: ct.blpStats.CopyLoopCount.Get(), @@ -514,6 +625,8 @@ type ControllerStatus struct { SourceTablet *topodatapb.TabletAlias Messages []string QueryCounts map[string]int64 + BulkQueryCounts map[string]int64 + TrxQueryBatchCounts map[string]int64 PhaseTimings map[string]int64 CopyRowCount int64 CopyLoopCount int64 diff --git a/go/vt/vttablet/tabletmanager/vreplication/stats_test.go b/go/vt/vttablet/tabletmanager/vreplication/stats_test.go index d5b5eacbdf2..12b79008d0b 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/stats_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/stats_test.go @@ -26,10 +26,10 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/replication" - + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/binlog/binlogplayer" - "vitess.io/vitess/go/vt/proto/binlogdata" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -92,7 +92,7 @@ func TestStatusHtml(t *testing.T) { testStats.controllers = map[int32]*controller{ 1: { id: 1, - source: &binlogdata.BinlogSource{ + source: &binlogdatapb.BinlogSource{ Keyspace: "ks", Shard: "0", }, @@ -102,7 +102,7 @@ func TestStatusHtml(t *testing.T) { }, 2: { id: 2, - source: &binlogdata.BinlogSource{ + source: &binlogdatapb.BinlogSource{ Keyspace: "ks", Shard: "1", }, @@ -132,12 +132,14 @@ func TestStatusHtml(t *testing.T) { func TestVReplicationStats(t *testing.T) { blpStats := binlogplayer.NewStats() defer blpStats.Stop() - testStats := &vrStats{} + testStats := &vrStats{ + ThrottledCount: stats.NewCounter("", ""), + } testStats.isOpen = true testStats.controllers = map[int32]*controller{ 1: { id: 1, - source: &binlogdata.BinlogSource{ + source: &binlogdatapb.BinlogSource{ Keyspace: "ks", Shard: "0", }, @@ -169,11 +171,38 @@ func TestVReplicationStats(t *testing.T) { require.Equal(t, int64(11), testStats.status().Controllers[0].QueryCounts["replicate"]) require.Equal(t, int64(23), testStats.status().Controllers[0].QueryCounts["fastforward"]) + blpStats.BulkQueryCount.Add("insert", 101) + blpStats.BulkQueryCount.Add("delete", 203) + require.Equal(t, int64(101), testStats.status().Controllers[0].BulkQueryCounts["insert"]) + require.Equal(t, int64(203), testStats.status().Controllers[0].BulkQueryCounts["delete"]) + + blpStats.TrxQueryBatchCount.Add("without_commit", 10) + blpStats.TrxQueryBatchCount.Add("with_commit", 2193) + require.Equal(t, int64(10), testStats.status().Controllers[0].TrxQueryBatchCounts["without_commit"]) + require.Equal(t, int64(2193), testStats.status().Controllers[0].TrxQueryBatchCounts["with_commit"]) + blpStats.CopyLoopCount.Add(100) blpStats.CopyRowCount.Add(200) require.Equal(t, int64(100), testStats.status().Controllers[0].CopyLoopCount) require.Equal(t, int64(200), testStats.status().Controllers[0].CopyRowCount) + testStats.ThrottledCount.Add(99) + require.Equal(t, int64(99), testStats.ThrottledCount.Get()) + + blpStats.ThrottledCounts.Add([]string{"tablet", "vcopier"}, 10) + blpStats.ThrottledCounts.Add([]string{"tablet", "vplayer"}, 80) + require.Equal(t, int64(10), testStats.controllers[1].blpStats.ThrottledCounts.Counts()["tablet.vcopier"]) + require.Equal(t, int64(80), testStats.controllers[1].blpStats.ThrottledCounts.Counts()["tablet.vplayer"]) + + blpStats.DDLEventActions.Add(binlogdatapb.OnDDLAction_IGNORE.String(), 4) + blpStats.DDLEventActions.Add(binlogdatapb.OnDDLAction_EXEC.String(), 3) + blpStats.DDLEventActions.Add(binlogdatapb.OnDDLAction_EXEC_IGNORE.String(), 2) + blpStats.DDLEventActions.Add(binlogdatapb.OnDDLAction_STOP.String(), 1) + require.Equal(t, int64(4), testStats.controllers[1].blpStats.DDLEventActions.Counts()[binlogdatapb.OnDDLAction_IGNORE.String()]) + require.Equal(t, int64(3), testStats.controllers[1].blpStats.DDLEventActions.Counts()[binlogdatapb.OnDDLAction_EXEC.String()]) + require.Equal(t, int64(2), testStats.controllers[1].blpStats.DDLEventActions.Counts()[binlogdatapb.OnDDLAction_EXEC_IGNORE.String()]) + require.Equal(t, int64(1), testStats.controllers[1].blpStats.DDLEventActions.Counts()[binlogdatapb.OnDDLAction_STOP.String()]) + var tm int64 = 1234567890 blpStats.RecordHeartbeat(tm) require.Equal(t, tm, blpStats.Heartbeat()) diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index d94d0640529..2e44fd49e9b 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -22,13 +22,14 @@ import ( "sort" "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/key" - "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" @@ -59,6 +60,8 @@ type tablePlanBuilder struct { stats *binlogplayer.Stats source *binlogdatapb.BinlogSource pkIndices []bool + + collationEnv *collations.Environment } // colExpr describes the processing to be performed to @@ -128,7 +131,7 @@ const ( // The TablePlan built is a partial plan. The full plan for a table is built // when we receive field information from events or rows sent by the source. // buildExecutionPlan is the function that builds the full plan. -func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[string][]*ColumnInfo, copyState map[string]*sqltypes.Result, stats *binlogplayer.Stats) (*ReplicatorPlan, error) { +func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[string][]*ColumnInfo, copyState map[string]*sqltypes.Result, stats *binlogplayer.Stats, collationEnv *collations.Environment, parser *sqlparser.Parser) (*ReplicatorPlan, error) { filter := source.Filter plan := &ReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{FieldEventMode: filter.FieldEventMode}, @@ -137,6 +140,7 @@ func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[strin ColInfoMap: colInfoMap, stats: stats, Source: source, + collationEnv: collationEnv, } for tableName := range colInfoMap { lastpk, ok := copyState[tableName] @@ -155,7 +159,7 @@ func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[strin if !ok { return nil, fmt.Errorf("table %s not found in schema", tableName) } - tablePlan, err := buildTablePlan(tableName, rule, colInfos, lastpk, stats, source) + tablePlan, err := buildTablePlan(tableName, rule, colInfos, lastpk, stats, source, collationEnv, parser) if err != nil { return nil, err } @@ -195,7 +199,13 @@ func MatchTable(tableName string, filter *binlogdatapb.Filter) (*binlogdatapb.Ru } func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*ColumnInfo, lastpk *sqltypes.Result, - stats *binlogplayer.Stats, source *binlogdatapb.BinlogSource) (*TablePlan, error) { + stats *binlogplayer.Stats, source *binlogdatapb.BinlogSource, collationEnv *collations.Environment, parser *sqlparser.Parser) (*TablePlan, error) { + + planError := func(err error, query string) error { + // Use the error string here to ensure things are uniform across + // vterrors (from parse) and errors (all others). + return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s in query: %s", err.Error(), query) + } filter := rule.Filter query := filter @@ -212,28 +222,22 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum case filter == ExcludeStr: return nil, nil } - sel, fromTable, err := analyzeSelectFrom(query) + sel, fromTable, err := analyzeSelectFrom(query, parser) if err != nil { - return nil, err + return nil, planError(err, query) } sendRule := &binlogdatapb.Rule{ Match: fromTable, } - enumValuesMap := map[string](map[string]string){} - for k, v := range rule.ConvertEnumToText { - tokensMap := schema.ParseEnumOrSetTokensMap(v) - enumValuesMap[k] = tokensMap - } - if expr, ok := sel.SelectExprs[0].(*sqlparser.StarExpr); ok { // If it's a "select *", we return a partial plan, and complete // it when we get back field info from the stream. if len(sel.SelectExprs) != 1 { - return nil, fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + return nil, planError(fmt.Errorf("unsupported mix of '*' and columns"), sqlparser.String(sel)) } if !expr.TableName.IsEmpty() { - return nil, fmt.Errorf("unsupported qualifier for '*' expression: %v", sqlparser.String(expr)) + return nil, planError(fmt.Errorf("unsupported qualifier for '*' expression"), sqlparser.String(expr)) } sendRule.Filter = query tablePlan := &TablePlan{ @@ -241,9 +245,9 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum SendRule: sendRule, Lastpk: lastpk, Stats: stats, - EnumValuesMap: enumValuesMap, ConvertCharset: rule.ConvertCharset, ConvertIntToEnum: rule.ConvertIntToEnum, + CollationEnv: collationEnv, } return tablePlan, nil @@ -255,14 +259,15 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum From: sel.From, Where: sel.Where, }, - lastpk: lastpk, - colInfos: colInfos, - stats: stats, - source: source, + lastpk: lastpk, + colInfos: colInfos, + stats: stats, + source: source, + collationEnv: collationEnv, } if err := tpb.analyzeExprs(sel.SelectExprs); err != nil { - return nil, err + return nil, planError(err, sqlparser.String(sel)) } // It's possible that the target table does not materialize all // the primary keys of the source table. In such situations, @@ -277,7 +282,7 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum } } if err := tpb.analyzeGroupBy(sel.GroupBy); err != nil { - return nil, err + return nil, planError(err, sqlparser.String(sel)) } targetKeyColumnNames, err := textutil.SplitUnescape(rule.TargetUniqueKeyColumns, ",") if err != nil { @@ -309,6 +314,9 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum if rule.SourceUniqueKeyColumns != "" { commentsList = append(commentsList, fmt.Sprintf(`ukColumns="%s"`, rule.SourceUniqueKeyColumns)) } + if rule.ForceUniqueKey != "" { + commentsList = append(commentsList, fmt.Sprintf(`ukForce="%s"`, rule.ForceUniqueKey)) + } if len(commentsList) > 0 { comments := sqlparser.Comments{ fmt.Sprintf(`/*vt+ %s */`, strings.Join(commentsList, " ")), @@ -319,7 +327,6 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum tablePlan := tpb.generate() tablePlan.SendRule = sendRule - tablePlan.EnumValuesMap = enumValuesMap tablePlan.ConvertCharset = rule.ConvertCharset tablePlan.ConvertIntToEnum = rule.ConvertIntToEnum return tablePlan, nil @@ -361,6 +368,7 @@ func (tpb *tablePlanBuilder) generate() *TablePlan { Insert: tpb.generateInsertStatement(), Update: tpb.generateUpdateStatement(), Delete: tpb.generateDeleteStatement(), + MultiDelete: tpb.generateMultiDeleteStatement(), PKReferences: pkrefs, PKIndices: tpb.pkIndices, Stats: tpb.stats, @@ -369,31 +377,32 @@ func (tpb *tablePlanBuilder) generate() *TablePlan { TablePlanBuilder: tpb, PartialInserts: make(map[string]*sqlparser.ParsedQuery, 0), PartialUpdates: make(map[string]*sqlparser.ParsedQuery, 0), + CollationEnv: tpb.collationEnv, } } -func analyzeSelectFrom(query string) (sel *sqlparser.Select, from string, err error) { - statement, err := sqlparser.Parse(query) +func analyzeSelectFrom(query string, parser *sqlparser.Parser) (sel *sqlparser.Select, from string, err error) { + statement, err := parser.Parse(query) if err != nil { return nil, "", err } sel, ok := statement.(*sqlparser.Select) if !ok { - return nil, "", fmt.Errorf("unexpected: %v", sqlparser.String(statement)) + return nil, "", fmt.Errorf("unsupported non-select statement") } if sel.Distinct { - return nil, "", fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + return nil, "", fmt.Errorf("unsupported distinct clause") } if len(sel.From) > 1 { - return nil, "", fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + return nil, "", fmt.Errorf("unsupported multi-table usage") } node, ok := sel.From[0].(*sqlparser.AliasedTableExpr) if !ok { - return nil, "", fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + return nil, "", fmt.Errorf("unsupported from expression (%T)", sel.From[0]) } fromTable := sqlparser.GetTableName(node.Expr) if fromTable.IsEmpty() { - return nil, "", fmt.Errorf("unexpected: %v", sqlparser.String(sel)) + return nil, "", fmt.Errorf("unsupported from source (%T)", node.Expr) } return sel, fromTable.String(), nil } @@ -412,7 +421,7 @@ func (tpb *tablePlanBuilder) analyzeExprs(selExprs sqlparser.SelectExprs) error func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr, error) { aliased, ok := selExpr.(*sqlparser.AliasedExpr) if !ok { - return nil, fmt.Errorf("unexpected: %v", sqlparser.String(selExpr)) + return nil, fmt.Errorf("invalid expression: %v", sqlparser.String(selExpr)) } as := aliased.As if as.IsEmpty() { @@ -461,7 +470,7 @@ func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr switch fname := expr.Name.Lowered(); fname { case "keyspace_id": if len(expr.Exprs) != 0 { - return nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + return nil, fmt.Errorf("unsupported multiple keyspace_id expressions: %v", sqlparser.String(expr)) } tpb.sendSelect.SelectExprs = append(tpb.sendSelect.SelectExprs, &sqlparser.AliasedExpr{Expr: aliased.Expr}) // The vstreamer responds with "keyspace_id" as the field name for this request. @@ -471,7 +480,7 @@ func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr } if expr, ok := aliased.Expr.(sqlparser.AggrFunc); ok { if sqlparser.IsDistinct(expr) { - return nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + return nil, fmt.Errorf("unsupported distinct expression usage: %v", sqlparser.String(expr)) } switch fname := expr.AggrName(); fname { case "count": @@ -482,11 +491,11 @@ func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr return cexpr, nil case "sum": if len(expr.GetArgs()) != 1 { - return nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + return nil, fmt.Errorf("unsupported multiple columns in sum clause: %v", sqlparser.String(expr)) } innerCol, ok := expr.GetArg().(*sqlparser.ColName) if !ok { - return nil, fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + return nil, fmt.Errorf("unsupported non-column name in sum clause: %v", sqlparser.String(expr)) } if !innerCol.Qualifier.IsEmpty() { return nil, fmt.Errorf("unsupported qualifier for column: %v", sqlparser.String(innerCol)) @@ -509,7 +518,7 @@ func (tpb *tablePlanBuilder) analyzeExpr(selExpr sqlparser.SelectExpr) (*colExpr case *sqlparser.Subquery: return false, fmt.Errorf("unsupported subquery: %v", sqlparser.String(node)) case sqlparser.AggrFunc: - return false, fmt.Errorf("unexpected: %v", sqlparser.String(node)) + return false, fmt.Errorf("unsupported aggregation function: %v", sqlparser.String(node)) } return true, nil }, aliased.Expr) @@ -528,15 +537,15 @@ func (tpb *tablePlanBuilder) addCol(ident sqlparser.IdentifierCI) { }) } -func (tpb *tablePlanBuilder) analyzeGroupBy(groupBy sqlparser.GroupBy) error { +func (tpb *tablePlanBuilder) analyzeGroupBy(groupBy *sqlparser.GroupBy) error { if groupBy == nil { // If there's no grouping, the it's an insertNormal. return nil } - for _, expr := range groupBy { + for _, expr := range groupBy.Exprs { colname, ok := expr.(*sqlparser.ColName) if !ok { - return fmt.Errorf("unexpected: %v", sqlparser.String(expr)) + return fmt.Errorf("unsupported non-column name or alias in group by clause: %v", sqlparser.String(expr)) } cexpr := tpb.findCol(colname.Name) if cexpr == nil { @@ -870,6 +879,18 @@ func (tpb *tablePlanBuilder) generateDeleteStatement() *sqlparser.ParsedQuery { return buf.ParsedQuery() } +func (tpb *tablePlanBuilder) generateMultiDeleteStatement() *sqlparser.ParsedQuery { + if vttablet.VReplicationExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching == 0 || + (len(tpb.pkCols)+len(tpb.extraSourcePkCols)) != 1 { + return nil + } + return sqlparser.BuildParsedQuery("delete from %s where %s in %a", + sqlparser.String(tpb.name), + sqlparser.String(tpb.pkCols[0].colName), + "::bulk_pks", + ) +} + func (tpb *tablePlanBuilder) generateWhere(buf *sqlparser.TrackedBuffer, bvf *bindvarFormatter) { buf.WriteString(" where ") bvf.mode = bvBefore diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils.go b/go/vt/vttablet/tabletmanager/vreplication/utils.go index 21c3a61c9f1..2b80bfb62a2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/utils.go +++ b/go/vt/vttablet/tabletmanager/vreplication/utils.go @@ -24,6 +24,8 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -33,6 +35,8 @@ import ( const ( vreplicationLogTableName = "vreplication_log" + // This comes from the fact that the message column in the vreplication_log table is of type TEXT. + maxVReplicationLogMessageLen = 65535 ) const ( @@ -82,46 +86,50 @@ func getLastLog(dbClient *vdbClient, vreplID int32) (id int64, typ, state, messa return id, typ, state, message, nil } -func insertLog(dbClient *vdbClient, typ string, vreplID int32, state, message string) error { +func insertLog(dbClient *vdbClient, typ string, vreplID int32, state, message string) { // getLastLog returns the last log for a stream. During insertion, if the type/state/message match we do not insert // a new log but increment the count. This prevents spamming of the log table in case the same message is logged continuously. id, _, lastLogState, lastLogMessage, err := getLastLog(dbClient, vreplID) if err != nil { - return err + log.Errorf("Could not insert vreplication_log record because we failed to get the last log record: %v", err) + return } if typ == LogStateChange && state == lastLogState { // handles case where current state is Running, controller restarts after an error and initializes the state Running - return nil + return } var query string if id > 0 && message == lastLogMessage { query = fmt.Sprintf("update %s.vreplication_log set count = count + 1 where id = %d", sidecar.GetIdentifier(), id) } else { buf := sqlparser.NewTrackedBuffer(nil) + if len(message) > maxVReplicationLogMessageLen { + message, err = textutil.TruncateText(message, maxVReplicationLogMessageLen, binlogplayer.TruncationLocation, binlogplayer.TruncationIndicator) + if err != nil { + log.Errorf("Could not insert vreplication_log record because we failed to truncate the message: %v", err) + return + } + } buf.Myprintf("insert into %s.vreplication_log(vrepl_id, type, state, message) values(%s, %s, %s, %s)", sidecar.GetIdentifier(), strconv.Itoa(int(vreplID)), encodeString(typ), encodeString(state), encodeString(message)) query = buf.ParsedQuery().Query } if _, err = dbClient.ExecuteFetch(query, 10000); err != nil { - return fmt.Errorf("could not insert into log table: %v: %v", query, err) + log.Errorf("Could not insert into vreplication_log table: %v: %v", query, err) } - return nil } -// insertLogWithParams is called when a stream is created. The attributes of the stream are stored as a json string -func insertLogWithParams(dbClient *vdbClient, action string, vreplID int32, params map[string]string) error { +// insertLogWithParams is called when a stream is created. The attributes of the stream are stored as a json string. +func insertLogWithParams(dbClient *vdbClient, action string, vreplID int32, params map[string]string) { var message string if params != nil { obj, _ := json.Marshal(params) message = string(obj) } - if err := insertLog(dbClient, action, vreplID, params["state"], message); err != nil { - return err - } - return nil + insertLog(dbClient, action, vreplID, params["state"], message) } -// isUnrecoverableError returns true if vreplication cannot recover from the given error and should completely terminate +// isUnrecoverableError returns true if vreplication cannot recover from the given error and should completely terminate. func isUnrecoverableError(err error) bool { if err == nil { return false diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils_test.go b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go new file mode 100644 index 00000000000..bfe79036f3c --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/textutil" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +func TestInsertLogTruncation(t *testing.T) { + dbClient := binlogplayer.NewMockDBClient(t) + defer dbClient.Close() + dbClient.RemoveInvariant("insert into _vt.vreplication_log") // Otherwise the insert will be ignored + stats := binlogplayer.NewStats() + defer stats.Stop() + vdbClient := newVDBClient(dbClient, stats) + defer vdbClient.Close() + vrID := int32(1) + typ := "Testing" + state := binlogdatapb.VReplicationWorkflowState_Error.String() + + insertStmtf := "insert into _vt.vreplication_log(vrepl_id, type, state, message) values(%d, '%s', '%s', %s)" + + tests := []struct { + message string + expectTruncation bool + }{ + { + message: "Simple message that's not truncated", + }, + { + message: "Simple message that needs to be truncated " + strings.Repeat("a", 80000) + " cuz it's long", + expectTruncation: true, + }, + { + message: "Simple message that doesn't need to be truncated " + strings.Repeat("b", 64000) + " cuz it's not quite too long", + }, + { + message: "Message that is just barely short enough " + strings.Repeat("c", maxVReplicationLogMessageLen-(len("Message that is just barely short enough ")+len(" so it doesn't get truncated"))) + " so it doesn't get truncated", + }, + { + message: "Message that is just barely too long " + strings.Repeat("d", maxVReplicationLogMessageLen-(len("Message that is just barely too long ")+len(" so it gets truncated"))+1) + " so it gets truncated", + expectTruncation: true, + }, + { + message: "Super long message brosef wut r ya doin " + strings.Repeat("e", 60000) + strings.Repeat("f", 60000) + " so maybe don't do that to yourself and your friends", + expectTruncation: true, + }, + { + message: "Super duper long message brosef wut r ya doin " + strings.Repeat("g", 120602) + strings.Repeat("h", 120001) + " so maybe really don't do that to yourself and your friends", + expectTruncation: true, + }, + } + for _, tc := range tests { + t.Run("insertLog", func(t *testing.T) { + var ( + messageOut string + err error + ) + if tc.expectTruncation { + messageOut, err = textutil.TruncateText(tc.message, maxVReplicationLogMessageLen, binlogplayer.TruncationLocation, binlogplayer.TruncationIndicator) + require.NoError(t, err) + require.True(t, strings.HasPrefix(messageOut, tc.message[:1024])) // Confirm we still have the same beginning + require.True(t, strings.HasSuffix(messageOut, tc.message[len(tc.message)-1024:])) // Confirm we still have the same end + require.True(t, strings.Contains(messageOut, binlogplayer.TruncationIndicator)) // Confirm we have the truncation text + t.Logf("Original message length: %d, truncated message length: %d", len(tc.message), len(messageOut)) + } else { + messageOut = tc.message + } + require.LessOrEqual(t, len(messageOut), maxVReplicationLogMessageLen) + dbClient.ExpectRequest(fmt.Sprintf(insertStmtf, vrID, typ, state, encodeString(messageOut)), &sqltypes.Result{}, nil) + insertLog(vdbClient, typ, vrID, state, tc.message) + dbClient.Wait() + }) + } +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index cbf524c54c3..9057a55707f 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -21,26 +21,28 @@ import ( "fmt" "io" "math" + "slices" "strconv" "strings" "time" + "golang.org/x/exp/maps" "google.golang.org/protobuf/encoding/prototext" - "vitess.io/vitess/go/vt/vttablet" - "vitess.io/vitess/go/bytes2" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/pools" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) type vcopier struct { @@ -219,7 +221,7 @@ func newVCopierCopyWorker( func (vc *vcopier) initTablesForCopy(ctx context.Context) error { defer vc.vr.dbClient.Rollback() - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.env.CollationEnv(), vc.vr.vre.env.Parser()) if err != nil { return err } @@ -230,9 +232,12 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { if len(plan.TargetTables) != 0 { var buf strings.Builder buf.WriteString("insert into _vt.copy_state(vrepl_id, table_name) values ") + // Sort the tables by name to ensure a consistent order. + tableNames := maps.Keys(plan.TargetTables) + slices.Sort(tableNames) prefix := "" - for name := range plan.TargetTables { - fmt.Fprintf(&buf, "%s(%d, %s)", prefix, vc.vr.id, encodeString(name)) + for _, tableName := range tableNames { + fmt.Fprintf(&buf, "%s(%d, %s)", prefix, vc.vr.id, encodeString(tableName)) prefix = ", " } if _, err := vc.vr.dbClient.Execute(buf.String()); err != nil { @@ -241,10 +246,7 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Copying, ""); err != nil { return err } - if err := vc.vr.insertLog(LogCopyStart, fmt.Sprintf("Copy phase started for %d table(s)", - len(plan.TargetTables))); err != nil { - return err - } + vc.vr.insertLog(LogCopyStart, fmt.Sprintf("Copy phase started for %d table(s)", len(plan.TargetTables))) if vc.vr.supportsDeferredSecondaryKeys() { settings, err := binlogplayer.ReadVRSettings(vc.vr.dbClient, vc.vr.id) @@ -252,20 +254,15 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { return err } if settings.DeferSecondaryKeys { - if err := vc.vr.insertLog(LogCopyStart, fmt.Sprintf("Copy phase temporarily dropping secondary keys for %d table(s)", - len(plan.TargetTables))); err != nil { - return err - } - for name := range plan.TargetTables { - if err := vc.vr.stashSecondaryKeys(ctx, name); err != nil { + vc.vr.insertLog(LogCopyStart, fmt.Sprintf("Copy phase temporarily dropping secondary keys for %d table(s)", len(plan.TargetTables))) + for _, tableName := range tableNames { + if err := vc.vr.stashSecondaryKeys(ctx, tableName); err != nil { return err } } - if err := vc.vr.insertLog(LogCopyStart, + vc.vr.insertLog(LogCopyStart, fmt.Sprintf("Copy phase finished dropping secondary keys and saving post copy actions to restore them for %d table(s)", - len(plan.TargetTables))); err != nil { - return err - } + len(plan.TargetTables))) } } } else { @@ -294,7 +291,7 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { // primary key that was copied. A nil Result means that nothing has been copied. // A table that was fully copied is removed from copyState. func (vc *vcopier) copyNext(ctx context.Context, settings binlogplayer.VRSettings) error { - qr, err := vc.vr.dbClient.Execute(fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state group by vrepl_id, table_name)", vc.vr.id)) + qr, err := vc.vr.dbClient.Execute(fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state group by vrepl_id, table_name) order by table_name", vc.vr.id)) if err != nil { return err } @@ -385,7 +382,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma log.Infof("Copying table %s, lastpk: %v", tableName, copyState[tableName]) - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.env.CollationEnv(), vc.vr.vre.env.Parser()) if err != nil { return err } @@ -612,7 +609,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma case result := <-resultCh: switch result.state { case vcopierCopyTaskCancel: - // A task cancelation probably indicates an expired context due + // A task cancellation probably indicates an expired context due // to a PlannedReparentShard or elapsed copy phase duration, // neither of which are error conditions. case vcopierCopyTaskComplete: @@ -833,7 +830,7 @@ func (vtl *vcopierCopyTaskLifecycle) after(state vcopierCopyTaskState) *vcopierC } // before returns a vcopierCopyTaskHooks that can be used to register callbacks -// to be triggered before the the specified vcopierCopyTaskState. +// to be triggered before the specified vcopierCopyTaskState. func (vtl *vcopierCopyTaskLifecycle) before(state vcopierCopyTaskState) *vcopierCopyTaskHooks { key := "before:" + state.String() if _, ok := vtl.hooks[key]; !ok { @@ -1087,7 +1084,7 @@ func (vbc *vcopierCopyWorker) execute(ctx context.Context, task *vcopierCopyTask advanceFn = func(context.Context, *vcopierCopyTaskArgs) error { // Commit. if err := vbc.vdbClient.Commit(); err != nil { - return vterrors.Wrapf(err, "error commiting transaction") + return vterrors.Wrapf(err, "error committing transaction") } return nil } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go index 4da072e3955..02e1188cdb7 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go @@ -54,7 +54,7 @@ func newCopyAllState(vc *vcopier) (*copyAllState, error) { state := ©AllState{ vc: vc, } - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.env.CollationEnv(), vc.vr.vre.env.Parser()) if err != nil { return nil, err } @@ -303,7 +303,6 @@ func (vc *vcopier) copyAll(ctx context.Context, settings binlogplayer.VRSettings // deleteCopyState deletes the copy state entry for a table, signifying that the copy phase is complete for that table. func (vc *vcopier) deleteCopyState(tableName string) error { log.Infof("Deleting copy state for table %s", tableName) - //FIXME get sidecar db name delQuery := fmt.Sprintf("delete from _vt.copy_state where table_name=%s and vrepl_id = %d", encodeString(tableName), vc.vr.id) if _, err := vc.vr.dbClient.Execute(delQuery); err != nil { return err diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go index 82a6d211b4f..8f23f28c87d 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_test.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "os" + "regexp" "strings" "testing" "time" @@ -102,12 +103,12 @@ func testPlayerCopyCharPK(t *testing.T) { defer func() { vttablet.CopyPhaseDuration = savedCopyPhaseDuration }() savedWaitRetryTime := waitRetryTime - // waitRetry time should be very low to cause the wait loop to execute multipel times. + // waitRetry time should be very low to cause the wait loop to execute multiple times. waitRetryTime = 10 * time.Millisecond defer func() { waitRetryTime = savedWaitRetryTime }() execStatements(t, []string{ - "create table src(idc binary(2) , val int, primary key(idc))", + "create table src(idc binary(2), val int, primary key(idc))", "insert into src values('a', 1), ('c', 2)", fmt.Sprintf("create table %s.dst(idc binary(2), val int, primary key(idc))", vrepldb), }) @@ -115,7 +116,6 @@ func testPlayerCopyCharPK(t *testing.T) { "drop table src", fmt.Sprintf("drop table %s.dst", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) count := 0 vstreamRowsSendHook = func(ctx context.Context) { @@ -214,7 +214,7 @@ func testPlayerCopyVarcharPKCaseInsensitive(t *testing.T) { defer func() { waitRetryTime = savedWaitRetryTime }() execStatements(t, []string{ - "create table src(idc varchar(20), val int, primary key(idc))", + "create table src(idc varchar(20), val int, primary key(idc)) character set utf8mb3", // Use utf8mb3 to get a consistent default collation across MySQL versions "insert into src values('a', 1), ('c', 2)", fmt.Sprintf("create table %s.dst(idc varchar(20), val int, primary key(idc))", vrepldb), }) @@ -222,7 +222,6 @@ func testPlayerCopyVarcharPKCaseInsensitive(t *testing.T) { "drop table src", fmt.Sprintf("drop table %s.dst", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) count := 0 vstreamRowsSendHook = func(ctx context.Context) { @@ -284,7 +283,7 @@ func testPlayerCopyVarcharPKCaseInsensitive(t *testing.T) { "/update _vt.vreplication set state='Copying'", // Copy mode. "insert into dst(idc,val) values ('a',1)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:45 flags:20483} rows:{lengths:1 values:\\"a\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:33 flags:20483} rows:{lengths:1 values:\\"a\\"}'.*`, // Copy-catchup mode. `/insert into dst\(idc,val\) select 'B', 3 from dual where \( .* 'B' COLLATE .* \) <= \( .* 'a' COLLATE .* \)`, ).Then(func(expect qh.ExpectationSequencer) qh.ExpectationSequencer { @@ -294,11 +293,11 @@ func testPlayerCopyVarcharPKCaseInsensitive(t *testing.T) { //upd1 := expect. upd1 := expect.Then(qh.Eventually( "insert into dst(idc,val) values ('B',3)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:45 flags:20483} rows:{lengths:1 values:\\"B\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:33 flags:20483} rows:{lengths:1 values:\\"B\\"}'.*`, )) upd2 := expect.Then(qh.Eventually( "insert into dst(idc,val) values ('c',2)", - `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:45 flags:20483} rows:{lengths:1 values:\\"c\\"}'.*`, + `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"idc\\" type:VARCHAR charset:33 flags:20483} rows:{lengths:1 values:\\"c\\"}'.*`, )) upd1.Then(upd2.Eventually()) return upd2 @@ -332,7 +331,7 @@ func testPlayerCopyVarcharCompositePKCaseSensitiveCollation(t *testing.T) { defer func() { vttablet.CopyPhaseDuration = savedCopyPhaseDuration }() savedWaitRetryTime := waitRetryTime - // waitRetry time should be very low to cause the wait loop to execute multipel times. + // waitRetry time should be very low to cause the wait loop to execute multiple times. waitRetryTime = 10 * time.Millisecond defer func() { waitRetryTime = savedWaitRetryTime }() @@ -452,7 +451,6 @@ func testPlayerCopyTablesWithFK(t *testing.T) { "drop table src2", fmt.Sprintf("drop table %s.dst2", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -562,26 +560,32 @@ func testPlayerCopyTables(t *testing.T) { defer deleteTablet(addTablet(100)) execStatements(t, []string{ + "create table ast1(id int, primary key(id))", "create table src1(id int, val varbinary(128), d decimal(8,0), j json, primary key(id))", "insert into src1 values(2, 'bbb', 1, '{\"foo\": \"bar\"}'), (1, 'aaa', 0, JSON_ARRAY(123456789012345678901234567890, \"abcd\")), (3, 'ccc', 2, 'null'), (4, 'ddd', 3, '{\"name\": \"matt\", \"size\": null}'), (5, 'eee', 4, null)", + fmt.Sprintf("create table %s.ast1(id int, primary key(id))", vrepldb), fmt.Sprintf("create table %s.dst1(id int, val varbinary(128), val2 varbinary(128), d decimal(8,0), j json, primary key(id))", vrepldb), "create table yes(id int, val varbinary(128), primary key(id))", fmt.Sprintf("create table %s.yes(id int, val varbinary(128), primary key(id))", vrepldb), "create table no(id int, val varbinary(128), primary key(id))", }) defer execStatements(t, []string{ + "drop table ast1", "drop table src1", + fmt.Sprintf("drop table %s.ast1", vrepldb), fmt.Sprintf("drop table %s.dst1", vrepldb), "drop table yes", fmt.Sprintf("drop table %s.yes", vrepldb), "drop table no", }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "dst1", Filter: "select id, val, val as val2, d, j from src1", + }, { + Match: "ast1", + Filter: "select * from ast1", }, { Match: "/yes", }}, @@ -595,9 +599,7 @@ func testPlayerCopyTables(t *testing.T) { } query := binlogplayer.CreateVReplicationState("test", bls, "", binlogdatapb.VReplicationWorkflowState_Init, playerEngine.dbName, 0, 0) qr, err := playerEngine.Exec(query) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer func() { query := fmt.Sprintf("delete from _vt.vreplication where id = %d", qr.InsertID) if _, err := playerEngine.Exec(query); err != nil { @@ -607,15 +609,24 @@ func testPlayerCopyTables(t *testing.T) { }() expectDBClientQueries(t, qh.Expect( - "/insert into _vt.vreplication", + // Filters should be lexicographically ordered by name. + regexp.QuoteMeta("/insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, options) values ('test', 'keyspace:\\\"vttest\\\" shard:\\\"0\\\" filter:{rules:{match:\\\"ast1\\\" filter:\\\"select * from ast1\\\"} rules:{match:\\\"dst1\\\" filter:\\\"select id, val, val as val2, d, j from src1\\\"} rules:{match:\\\"/yes\\\"}}'"), "/update _vt.vreplication set message='Picked source tablet.*", // Create the list of tables to copy and transition to Copying state. "begin", - "/insert into _vt.copy_state", + // The table names should be lexicographically ordered by name. + fmt.Sprintf("insert into _vt.copy_state(vrepl_id, table_name) values (%d, 'ast1'), (%d, 'dst1'), (%d, 'yes')", qr.InsertID, qr.InsertID, qr.InsertID), "/update _vt.vreplication set state='Copying'", "commit", // The first fast-forward has no starting point. So, it just saves the current position. "/update _vt.vreplication set pos=", + // Now the tables should be copied in lexicographical order: ast1, dst1, yes. + // Nothing to copy from ast1. Delete from copy_state. + "/delete cs, pca from _vt.copy_state as cs left join _vt.post_copy_action as pca on cs.vrepl_id=pca.vrepl_id and cs.table_name=pca.table_name.*ast1", + // The next FF executes and updates the position before copying. + "begin", + "/update _vt.vreplication set pos=", + "commit", "begin", "insert into dst1(id,val,val2,d,j) values (1,'aaa','aaa',0,JSON_ARRAY(123456789012345678901234567890, _utf8mb4'abcd')), (2,'bbb','bbb',1,JSON_OBJECT(_utf8mb4'foo', _utf8mb4'bar')), (3,'ccc','ccc',2,CAST(_utf8mb4'null' as JSON)), (4,'ddd','ddd',3,JSON_OBJECT(_utf8mb4'name', _utf8mb4'matt', _utf8mb4'size', null)), (5,'eee','eee',4,null)", `/insert into _vt.copy_state \(lastpk, vrepl_id, table_name\) values \('fields:{name:\\"id\\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\\"5\\"}'.*`, @@ -693,7 +704,6 @@ func testPlayerCopyBigTable(t *testing.T) { "drop table src", fmt.Sprintf("drop table %s.dst", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) count := 0 vstreamRowsSendHook = func(ctx context.Context) { @@ -811,7 +821,7 @@ func testPlayerCopyWildcardRule(t *testing.T) { defer func() { vttablet.CopyPhaseDuration = savedCopyPhaseDuration }() savedWaitRetryTime := waitRetryTime - // waitRetry time should be very low to cause the wait loop to execute multipel times. + // waitRetry time should be very low to cause the wait loop to execute multiple times. waitRetryTime = 10 * time.Millisecond defer func() { waitRetryTime = savedWaitRetryTime }() @@ -824,7 +834,6 @@ func testPlayerCopyWildcardRule(t *testing.T) { "drop table src", fmt.Sprintf("drop table %s.src", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) count := 0 vstreamRowsSendHook = func(ctx context.Context) { @@ -953,7 +962,6 @@ func testPlayerCopyTableContinuation(t *testing.T) { "drop table src1", fmt.Sprintf("drop table %s.dst1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -979,7 +987,7 @@ func testPlayerCopyTableContinuation(t *testing.T) { "update src1 set id2=10 where id1=5", // move row from within to outside range. "update src1 set id1=12 where id1=6", - // move row from outside to witihn range. + // move row from outside to within range. "update src1 set id1=4 where id1=11", // modify the copied table. "update copied set val='bbb' where id=1", @@ -1120,7 +1128,6 @@ func testPlayerCopyWildcardTableContinuation(t *testing.T) { "drop table src", fmt.Sprintf("drop table %s.dst", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1217,7 +1224,6 @@ func TestPlayerCopyWildcardTableContinuationWithOptimizeInserts(t *testing.T) { "drop table src", fmt.Sprintf("drop table %s.dst", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1343,7 +1349,6 @@ func testPlayerCopyTablesStopAfterCopy(t *testing.T) { "drop table src1", fmt.Sprintf("drop table %s.dst1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1429,7 +1434,6 @@ func testPlayerCopyTablesGIPK(t *testing.T) { "drop table src2", fmt.Sprintf("drop table %s.dst2", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1520,7 +1524,6 @@ func testPlayerCopyTableCancel(t *testing.T) { "drop table src1", fmt.Sprintf("drop table %s.dst1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) saveTimeout := vttablet.CopyPhaseDuration vttablet.CopyPhaseDuration = 1 * time.Millisecond @@ -1611,7 +1614,6 @@ func testPlayerCopyTablesWithGeneratedColumn(t *testing.T) { "drop table src2", fmt.Sprintf("drop table %s.dst2", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1676,26 +1678,29 @@ func TestCopyTablesWithInvalidDates(t *testing.T) { func testCopyTablesWithInvalidDates(t *testing.T) { defer deleteTablet(addTablet(100)) - execStatements(t, []string{ - "create table src1(id int, dt date, primary key(id))", - fmt.Sprintf("create table %s.dst1(id int, dt date, primary key(id))", vrepldb), - "insert into src1 values(1, '2020-01-12'), (2, '0000-00-00');", - }) + conn, err := env.Mysqld.GetDbaConnection(context.Background()) + require.NoError(t, err) // default mysql flavor allows invalid dates: so disallow explicitly for this test - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "SET @@global.sql_mode=REPLACE(REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', ''), 'NO_ZERO_IN_DATE', '')"); err != nil { + if _, err := conn.ExecuteFetch("SET @@session.sql_mode=REPLACE(REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', ''), 'NO_ZERO_IN_DATE', '')", 0, false); err != nil { fmt.Fprintf(os.Stderr, "%v", err) } defer func() { - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "SET @@global.sql_mode=REPLACE(@@global.sql_mode, ',NO_ZERO_DATE,NO_ZERO_IN_DATE','')"); err != nil { + if _, err := conn.ExecuteFetch("SET @@session.sql_mode=REPLACE(@@session.sql_mode, ',NO_ZERO_DATE,NO_ZERO_IN_DATE','')", 0, false); err != nil { fmt.Fprintf(os.Stderr, "%v", err) } }() - defer execStatements(t, []string{ + + execConnStatements(t, conn, []string{ + "create table src1(id int, dt date, primary key(id))", + fmt.Sprintf("create table %s.dst1(id int, dt date, primary key(id))", vrepldb), + "insert into src1 values(1, '2020-01-12'), (2, '0000-00-00');", + }) + + defer execConnStatements(t, conn, []string{ "drop table src1", fmt.Sprintf("drop table %s.dst1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ diff --git a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go index c3941b0f1bb..39a8229efc6 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go @@ -19,12 +19,15 @@ package vreplication import ( "context" "io" + "strings" "time" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) // vdbClient is a wrapper on binlogplayer.DBClient. @@ -35,6 +38,9 @@ type vdbClient struct { InTransaction bool startTime time.Time queries []string + queriesPos int64 + batchSize int64 + maxBatchSize int64 } func newVDBClient(dbclient binlogplayer.DBClient, stats *binlogplayer.Stats) *vdbClient { @@ -51,6 +57,13 @@ func (vc *vdbClient) Begin() error { if err := vc.DBClient.Begin(); err != nil { return err } + + // If we're batching, we only batch the contents of the + // transaction, which starts with the begin and ends with + // the commit. + vc.queriesPos = int64(len(vc.queries)) + vc.batchSize = 6 // begin and semicolon + vc.queries = append(vc.queries, "begin") vc.InTransaction = true vc.startTime = time.Now() @@ -63,10 +76,30 @@ func (vc *vdbClient) Commit() error { } vc.InTransaction = false vc.queries = nil + vc.batchSize = 0 vc.stats.Timings.Record(binlogplayer.BlplTransaction, vc.startTime) return nil } +// CommitTrxQueryBatch sends the current transaction's query batch -- which +// is often the full contents of the transaction, unless we've crossed +// the maxBatchSize one or more times -- down the wire to the database, +// including the final commit. +func (vc *vdbClient) CommitTrxQueryBatch() error { + vc.queries = append(vc.queries, "commit") + queries := strings.Join(vc.queries[vc.queriesPos:], ";") + for _, err := vc.DBClient.ExecuteFetchMulti(queries, -1); err != nil; { + return err + } + vc.InTransaction = false + vc.queries = nil + vc.queriesPos = 0 + vc.batchSize = 0 + vc.stats.TrxQueryBatchCount.Add("with_commit", 1) + vc.stats.Timings.Record(binlogplayer.BlplBatchTransaction, vc.startTime) + return nil +} + func (vc *vdbClient) Rollback() error { if !vc.InTransaction { return nil @@ -90,6 +123,43 @@ func (vc *vdbClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, return vc.DBClient.ExecuteFetch(query, maxrows) } +// AddQueryToTrxBatch adds the query to the current transaction's query +// batch. If this new query would cause the current batch to exceed +// the maxBatchSize, then the current unsent batch is sent down the +// wire and this query will be included in the next batch. +func (vc *vdbClient) AddQueryToTrxBatch(query string) error { + if !vc.InTransaction { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cannot batch query outside of a transaction: %s", query) + } + + addedSize := int64(len(query)) + 1 // Plus 1 for the semicolon + if vc.batchSize+addedSize > vc.maxBatchSize { + if _, err := vc.ExecuteTrxQueryBatch(); err != nil { + return err + } + } + vc.queries = append(vc.queries, query) + vc.batchSize += addedSize + + return nil +} + +// ExecuteQueryBatch sends the transaction's current batch of queries +// down the wire to the database. +func (vc *vdbClient) ExecuteTrxQueryBatch() ([]*sqltypes.Result, error) { + defer vc.stats.Timings.Record(binlogplayer.BlplMultiQuery, time.Now()) + + qrs, err := vc.DBClient.ExecuteFetchMulti(strings.Join(vc.queries[vc.queriesPos:], ";"), -1) + if err != nil { + return nil, err + } + vc.stats.TrxQueryBatchCount.Add("without_commit", 1) + vc.queriesPos += int64(len(vc.queries[vc.queriesPos:])) + vc.batchSize = 0 + + return qrs, nil +} + // Execute is ExecuteFetch without the maxrows. func (vc *vdbClient) Execute(query string) (*sqltypes.Result, error) { // Number of rows should never exceed relayLogMaxItems. diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index 8eee211ff9e..d7b60a104c4 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -28,9 +28,9 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -47,6 +47,14 @@ type vplayer struct { replicatorPlan *ReplicatorPlan tablePlans map[string]*TablePlan + // These are set when creating the VPlayer based on whether the VPlayer + // is in batch (stmt and trx) execution mode or not. + query func(ctx context.Context, sql string) (*sqltypes.Result, error) + commit func() error + // If the VPlayer is in batch mode, we accumulate each transaction's statements + // that are then sent as a single multi-statement protocol request to the database. + batchMode bool + pos replication.Position // unsavedEvent is set any time we skip an event without // saving, which is on an empty commit. @@ -104,6 +112,47 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map settings.StopPos = pausePos saveStop = false } + + queryFunc := func(ctx context.Context, sql string) (*sqltypes.Result, error) { + return vr.dbClient.ExecuteWithRetry(ctx, sql) + } + commitFunc := func() error { + return vr.dbClient.Commit() + } + batchMode := false + if vttablet.VReplicationExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching != 0 { + batchMode = true + } + if batchMode { + // relayLogMaxSize is effectively the limit used when not batching. + maxAllowedPacket := int64(relayLogMaxSize) + // We explicitly do NOT want to batch this, we want to send it down the wire + // immediately so we use ExecuteFetch directly. + res, err := vr.dbClient.ExecuteFetch("select @@session.max_allowed_packet as max_allowed_packet", 1) + if err != nil { + log.Errorf("Error getting max_allowed_packet, will use the relay_log_max_size value of %d bytes: %v", relayLogMaxSize, err) + } else { + if maxAllowedPacket, err = res.Rows[0][0].ToInt64(); err != nil { + log.Errorf("Error getting max_allowed_packet, will use the relay_log_max_size value of %d bytes: %v", relayLogMaxSize, err) + } + } + // Leave 64 bytes of room for the commit to be sure that we have a more than + // ample buffer left. The default value of max_allowed_packet is 4MiB in 5.7 + // and 64MiB in 8.0 -- and the default for max_relay_log_size is 250000 + // bytes -- so we have plenty of room. + maxAllowedPacket -= 64 + queryFunc = func(ctx context.Context, sql string) (*sqltypes.Result, error) { + if !vr.dbClient.InTransaction { // Should be sent down the wire immediately + return vr.dbClient.Execute(sql) + } + return nil, vr.dbClient.AddQueryToTrxBatch(sql) // Should become part of the trx batch + } + commitFunc = func() error { + return vr.dbClient.CommitTrxQueryBatch() // Commit the current trx batch + } + vr.dbClient.maxBatchSize = maxAllowedPacket + } + return &vplayer{ vr: vr, startPos: settings.StartPos, @@ -115,6 +164,9 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map tablePlans: make(map[string]*TablePlan), phase: phase, throttlerAppName: throttlerapp.VCopierName.ConcatenateString(vr.throttlerAppName()), + query: queryFunc, + commit: commitFunc, + batchMode: batchMode, } } @@ -128,7 +180,7 @@ func (vp *vplayer) play(ctx context.Context) error { return nil } - plan, err := buildReplicatorPlan(vp.vr.source, vp.vr.colInfoMap, vp.copyState, vp.vr.stats) + plan, err := buildReplicatorPlan(vp.vr.source, vp.vr.colInfoMap, vp.copyState, vp.vr.stats, vp.vr.vre.env.CollationEnv(), vp.vr.vre.env.Parser()) if err != nil { vp.vr.stats.ErrorCounts.Add([]string{"Plan"}, 1) return err @@ -152,8 +204,21 @@ func (vp *vplayer) play(ctx context.Context) error { // The foreign_key_checks value for a transaction is determined by the 2nd bit (least significant) of the flags: // - If set (1), foreign key checks are disabled. // - If unset (0), foreign key checks are enabled. -// updateFKCheck also updates the state for the first row event that this vplayer and hence the connection sees. +// updateFKCheck also updates the state for the first row event that this vplayer, and hence the db connection, sees. func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { + mustUpdate := false + if vp.vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy) { + // If this is an atomic copy, we must update the foreign_key_checks state even when the vplayer runs during + // the copy phase, i.e., for catchup and fastforward. + mustUpdate = true + } else if vp.vr.state == binlogdatapb.VReplicationWorkflowState_Running { + // If the vreplication workflow is in Running state, we must update the foreign_key_checks + // state for all workflow types. + mustUpdate = true + } + if !mustUpdate { + return nil + } dbForeignKeyChecksEnabled := true if flags2&NoForeignKeyCheckFlagBitmask == NoForeignKeyCheckFlagBitmask { dbForeignKeyChecksEnabled = false @@ -164,7 +229,7 @@ func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { return nil } log.Infof("Setting this session's foreign_key_checks to %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) - if _, err := vp.vr.dbClient.ExecuteWithRetry(ctx, "set @@session.foreign_key_checks="+strconv.FormatBool(dbForeignKeyChecksEnabled)); err != nil { + if _, err := vp.query(ctx, "set @@session.foreign_key_checks="+strconv.FormatBool(dbForeignKeyChecksEnabled)); err != nil { return fmt.Errorf("failed to set session foreign_key_checks: %w", err) } vp.foreignKeyChecksEnabled = dbForeignKeyChecksEnabled @@ -250,7 +315,7 @@ func (vp *vplayer) applyStmtEvent(ctx context.Context, event *binlogdatapb.VEven } if event.Type == binlogdatapb.VEventType_SAVEPOINT || vp.canAcceptStmtEvents { start := time.Now() - _, err := vp.vr.dbClient.ExecuteWithRetry(ctx, sql) + _, err := vp.query(ctx, sql) vp.vr.stats.QueryTimings.Record(vp.phase, start) vp.vr.stats.QueryCount.Add(vp.phase, 1) return err @@ -266,27 +331,46 @@ func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.Row if tplan == nil { return fmt.Errorf("unexpected event on table %s", rowEvent.TableName) } + applyFunc := func(sql string) (*sqltypes.Result, error) { + stats := NewVrLogStats("ROWCHANGE") + start := time.Now() + qr, err := vp.query(ctx, sql) + vp.vr.stats.QueryCount.Add(vp.phase, 1) + vp.vr.stats.QueryTimings.Record(vp.phase, start) + stats.Send(sql) + return qr, err + } + + if vp.batchMode && len(rowEvent.RowChanges) > 1 { + // If we have multiple delete row events for a table with a single PK column + // then we can perform a simple bulk DELETE using an IN clause. + if (rowEvent.RowChanges[0].Before != nil && rowEvent.RowChanges[0].After == nil) && + tplan.MultiDelete != nil { + _, err := tplan.applyBulkDeleteChanges(rowEvent.RowChanges, applyFunc, vp.vr.dbClient.maxBatchSize) + return err + } + // If we're done with the copy phase then we will be replicating all INSERTS + // regardless of the PK value and can use a single INSERT statment with + // multiple VALUES clauses. + if len(vp.copyState) == 0 && (rowEvent.RowChanges[0].Before == nil && rowEvent.RowChanges[0].After != nil) { + _, err := tplan.applyBulkInsertChanges(rowEvent.RowChanges, applyFunc, vp.vr.dbClient.maxBatchSize) + return err + } + } + for _, change := range rowEvent.RowChanges { - _, err := tplan.applyChange(change, func(sql string) (*sqltypes.Result, error) { - stats := NewVrLogStats("ROWCHANGE") - start := time.Now() - qr, err := vp.vr.dbClient.ExecuteWithRetry(ctx, sql) - vp.vr.stats.QueryCount.Add(vp.phase, 1) - vp.vr.stats.QueryTimings.Record(vp.phase, start) - stats.Send(sql) - return qr, err - }) - if err != nil { + if _, err := tplan.applyChange(change, applyFunc); err != nil { return err } } + return nil } -func (vp *vplayer) updatePos(ts int64) (posReached bool, err error) { +func (vp *vplayer) updatePos(ctx context.Context, ts int64) (posReached bool, err error) { vp.numAccumulatedHeartbeats = 0 update := binlogplayer.GenerateUpdatePos(vp.vr.id, vp.pos, time.Now().Unix(), ts, vp.vr.stats.CopyRowCount.Get(), vreplicationStoreCompressedGTID) - if _, err := vp.vr.dbClient.Execute(update); err != nil { + if _, err := vp.query(ctx, update); err != nil { return false, fmt.Errorf("error %v updating position", err) } vp.unsavedEvent = nil @@ -346,8 +430,8 @@ func (vp *vplayer) recordHeartbeat() error { // of transactions come in, with the last one being partial. In this case, all transactions // up to the last one have to be committed, and the final one must be partially applied. // -// Of the above events, the saveable ones are COMMIT, DDL, and OTHER. Eventhough -// A GTID comes as a separate event, it's not saveable until a subsequent saveable +// Of the above events, the saveable ones are COMMIT, DDL, and OTHER. Even though +// a GTID comes as a separate event, it's not saveable until a subsequent saveable // event occurs. VStreamer currently sequences the GTID to be sent just before // a saveable event, but we do not rely on this. To handle this, we only remember // the position when a GTID is encountered. The next saveable event causes the @@ -380,7 +464,7 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { if ctx.Err() != nil { return ctx.Err() } - // check throttler. + // Check throttler. if !vp.vr.vre.throttlerClient.ThrottleCheckOKOrWaitAppName(ctx, throttlerapp.Name(vp.throttlerAppName)) { _ = vp.vr.updateTimeThrottled(throttlerapp.VPlayerName) continue @@ -404,7 +488,7 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { // In both cases, now > timeLastSaved. If so, the GTID of the last unsavedEvent // must be saved. if time.Since(vp.timeLastSaved) >= idleTimeout && vp.unsavedEvent != nil { - posReached, err := vp.updatePos(vp.unsavedEvent.Timestamp) + posReached, err := vp.updatePos(ctx, vp.unsavedEvent.Timestamp) if err != nil { return err } @@ -503,11 +587,11 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m vp.unsavedEvent = event return nil } - posReached, err := vp.updatePos(event.Timestamp) + posReached, err := vp.updatePos(ctx, event.Timestamp) if err != nil { return err } - if err := vp.vr.dbClient.Commit(); err != nil { + if err := vp.commit(); err != nil { return err } if posReached { @@ -560,7 +644,7 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) } // Just update the position. - posReached, err := vp.updatePos(event.Timestamp) + posReached, err := vp.updatePos(ctx, event.Timestamp) if err != nil { return err } @@ -573,10 +657,11 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m log.Errorf("internal error: vplayer is in a transaction on event: %v", event) return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) } + vp.vr.stats.DDLEventActions.Add(vp.vr.source.OnDdl.String(), 1) // Record the DDL handling switch vp.vr.source.OnDdl { case binlogdatapb.OnDDLAction_IGNORE: // We still have to update the position. - posReached, err := vp.updatePos(event.Timestamp) + posReached, err := vp.updatePos(ctx, event.Timestamp) if err != nil { return err } @@ -587,13 +672,13 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m if err := vp.vr.dbClient.Begin(); err != nil { return err } - if _, err := vp.updatePos(event.Timestamp); err != nil { + if _, err := vp.updatePos(ctx, event.Timestamp); err != nil { return err } if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stopped at DDL %s", event.Statement)); err != nil { return err } - if err := vp.vr.dbClient.Commit(); err != nil { + if err := vp.commit(); err != nil { return err } return io.EOF @@ -602,11 +687,11 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m // So, we apply the DDL first, and then save the position. // Manual intervention may be needed if there is a partial // failure here. - if _, err := vp.vr.dbClient.ExecuteWithRetry(ctx, event.Statement); err != nil { + if _, err := vp.query(ctx, event.Statement); err != nil { return err } stats.Send(fmt.Sprintf("%v", event.Statement)) - posReached, err := vp.updatePos(event.Timestamp) + posReached, err := vp.updatePos(ctx, event.Timestamp) if err != nil { return err } @@ -614,11 +699,11 @@ func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, m return io.EOF } case binlogdatapb.OnDDLAction_EXEC_IGNORE: - if _, err := vp.vr.dbClient.ExecuteWithRetry(ctx, event.Statement); err != nil { + if _, err := vp.query(ctx, event.Statement); err != nil { log.Infof("Ignoring error: %v for DDL: %s", err, event.Statement) } stats.Send(fmt.Sprintf("%v", event.Statement)) - posReached, err := vp.updatePos(event.Timestamp) + posReached, err := vp.updatePos(ctx, event.Timestamp) if err != nil { return err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go index 3b215d03791..f79f7a42744 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_flaky_test.go @@ -21,6 +21,7 @@ import ( "fmt" "math" "os" + "regexp" "strconv" "strings" "sync" @@ -65,7 +66,6 @@ func TestPlayerGeneratedInvisiblePrimaryKey(t *testing.T) { "drop table t2", fmt.Sprintf("drop table %s.t2", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -143,7 +143,6 @@ func TestPlayerInvisibleColumns(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -185,7 +184,6 @@ func TestPlayerInvisibleColumns(t *testing.T) { output := qh.Expect(tcases.output) expectNontxQueries(t, output) time.Sleep(1 * time.Second) - log.Flush() if tcases.table != "" { expectData(t, tcases.table, tcases.data) } @@ -241,7 +239,6 @@ func TestVReplicationTimeUpdated(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -308,7 +305,6 @@ func TestCharPK(t *testing.T) { "drop table t4", fmt.Sprintf("drop table %s.t4", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -423,7 +419,6 @@ func TestRollup(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -480,7 +475,6 @@ func TestPlayerSavepoint(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -548,8 +542,6 @@ func TestPlayerForeignKeyCheck(t *testing.T) { fmt.Sprintf("drop table %s.parent", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) - filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "/.*", @@ -593,7 +585,6 @@ func TestPlayerStatementModeWithFilter(t *testing.T) { defer execStatements(t, []string{ "drop table src1", }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -638,7 +629,6 @@ func TestPlayerStatementMode(t *testing.T) { "drop table src1", fmt.Sprintf("drop table %s.src1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -691,8 +681,8 @@ func TestPlayerFilters(t *testing.T) { fmt.Sprintf("create table %s.dst4(id1 int, val varbinary(128), primary key(id1))", vrepldb), "create table src5(id1 int, id2 int, val varbinary(128), primary key(id1))", fmt.Sprintf("create table %s.dst5(id1 int, val varbinary(128), primary key(id1))", vrepldb), - "create table srcCharset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", - fmt.Sprintf("create table %s.dstCharset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, val2 varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", vrepldb), + "create table src_charset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", + fmt.Sprintf("create table %s.dst_charset(id1 int, val varchar(128) character set utf8mb4 collate utf8mb4_bin, val2 varchar(128) character set utf8mb4 collate utf8mb4_bin, primary key(id1))", vrepldb), }) defer execStatements(t, []string{ "drop table src1", @@ -710,10 +700,9 @@ func TestPlayerFilters(t *testing.T) { fmt.Sprintf("drop table %s.dst4", vrepldb), "drop table src5", fmt.Sprintf("drop table %s.dst5", vrepldb), - "drop table srcCharset", - fmt.Sprintf("drop table %s.dstCharset", vrepldb), + "drop table src_charset", + fmt.Sprintf("drop table %s.dst_charset", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -736,8 +725,8 @@ func TestPlayerFilters(t *testing.T) { Match: "dst5", Filter: "select id1, val from src5 where val = 'abc'", }, { - Match: "dstCharset", - Filter: "select id1, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val2 from srcCharset", + Match: "dst_charset", + Filter: "select id1, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val, concat(substr(_utf8mb4 val collate utf8mb4_bin,1,1),'abcxyz') val2 from src_charset", }}, } bls := &binlogdatapb.BinlogSource{ @@ -985,14 +974,14 @@ func TestPlayerFilters(t *testing.T) { data: [][]string{{"1", "abc"}, {"4", "abc"}}, }, { // test collation + filter - input: "insert into srcCharset values (1,'木元')", + input: "insert into src_charset values (1,'木元')", output: qh.Expect( "begin", - "insert into dstCharset(id1,val,val2) values (1,concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'),concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'))", + "insert into dst_charset(id1,val,val2) values (1,concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'),concat(substr(_utf8mb4 '木元' collate utf8mb4_bin, 1, 1), 'abcxyz'))", "/update _vt.vreplication set pos=", "commit", ), - table: "dstCharset", + table: "dst_charset", data: [][]string{{"1", "木abcxyz", "木abcxyz"}}, }} @@ -1030,7 +1019,6 @@ func TestPlayerKeywordNames(t *testing.T) { "drop table `commit`", fmt.Sprintf("drop table %s.`commit`", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1216,7 +1204,6 @@ func TestPlayerKeyspaceID(t *testing.T) { "drop table src1", fmt.Sprintf("drop table %s.dst1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) if err := env.SetVSchema(shardedVSchema); err != nil { t.Fatal(err) @@ -1278,7 +1265,6 @@ func TestUnicode(t *testing.T) { "drop table src1", fmt.Sprintf("drop table %s.dst1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1351,7 +1337,6 @@ func TestPlayerUpdates(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1467,7 +1452,6 @@ func TestPlayerRowMove(t *testing.T) { "drop table src", fmt.Sprintf("drop table %s.dst", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1525,8 +1509,8 @@ func TestPlayerTypes(t *testing.T) { fmt.Sprintf("create table %s.vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny))", vrepldb), "create table vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id))", fmt.Sprintf("create table %s.vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id))", vrepldb), - "create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(5), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb))", - fmt.Sprintf("create table %s.vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(5), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb))", vrepldb), + "create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(5), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'), primary key(vb))", + fmt.Sprintf("create table %s.vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(5), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'), primary key(vb))", vrepldb), "create table vitess_misc(id int, b bit(8), d date, dt datetime, t time, g geometry, primary key(id))", fmt.Sprintf("create table %s.vitess_misc(id int, b bit(8), d date, dt datetime, t time, g geometry, primary key(id))", vrepldb), "create table vitess_null(id int, val varbinary(128), primary key(id))", @@ -1561,8 +1545,6 @@ func TestPlayerTypes(t *testing.T) { fmt.Sprintf("drop table %s.vitess_json", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) - filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "/.*", @@ -1597,11 +1579,11 @@ func TestPlayerTypes(t *testing.T) { {"1", "1.99", "2.99", "3.99", "4.99"}, }, }, { - input: "insert into vitess_strings values('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a', 'a,b')", - output: "insert into vitess_strings(vb,c,vc,b,tb,bl,ttx,tx,en,s) values ('a','b','c','d\\0\\0\\0\\0','e','f','g','h',1,'3')", + input: "insert into vitess_strings values('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a', 'a,b,f,l,q,s,v,z')", + output: "insert into vitess_strings(vb,c,vc,b,tb,bl,ttx,tx,en,s) values ('a','b','c','d\\0\\0\\0\\0','e','f','g','h','a','a,b,f,l,q,s,v,z')", table: "vitess_strings", data: [][]string{ - {"a", "b", "c", "d\000\000\000\000", "e", "f", "g", "h", "a", "a,b"}, + {"a", "b", "c", "d\000\000\000\000", "e", "f", "g", "h", "a", "a,b,f,l,q,s,v,z"}, }, }, { input: "insert into vitess_misc values(1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))", @@ -1689,7 +1671,6 @@ func TestPlayerDDL(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1817,7 +1798,7 @@ func TestPlayerDDL(t *testing.T) { func TestGTIDCompress(t *testing.T) { ctx := context.Background() defer deleteTablet(addTablet(100)) - err := env.Mysqld.ExecuteSuperQuery(ctx, "insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state,db_name) values (1, '', '', '', 0,0,0,0,'Stopped','')") + err := env.Mysqld.ExecuteSuperQuery(ctx, "insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state,db_name, options) values (1, '', '', '', 0,0,0,0,'Stopped','', '{}')") require.NoError(t, err) type testCase struct { @@ -1880,7 +1861,6 @@ func TestPlayerStopPos(t *testing.T) { fmt.Sprintf("drop table %s.yes", vrepldb), "drop table no", }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -1980,7 +1960,6 @@ func TestPlayerStopAtOther(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) // Insert a source row. execStatements(t, []string{ @@ -2090,7 +2069,6 @@ func TestPlayerIdleUpdate(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2144,7 +2122,6 @@ func TestPlayerSplitTransaction(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2188,7 +2165,6 @@ func TestPlayerLockErrors(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2268,7 +2244,6 @@ func TestPlayerCancelOnLock(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2335,7 +2310,7 @@ func TestPlayerCancelOnLock(t *testing.T) { } } -func TestPlayerBatching(t *testing.T) { +func TestPlayerTransactions(t *testing.T) { defer deleteTablet(addTablet(100)) execStatements(t, []string{ @@ -2346,7 +2321,6 @@ func TestPlayerBatching(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2452,7 +2426,6 @@ func TestPlayerRelayLogMaxSize(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2547,7 +2520,6 @@ func TestRestartOnVStreamEnd(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2602,7 +2574,6 @@ func TestTimestamp(t *testing.T) { "drop table t1", fmt.Sprintf("drop table %s.t1", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2655,8 +2626,6 @@ func TestPlayerJSONDocs(t *testing.T) { fmt.Sprintf("drop table %s.vitess_json", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) - filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "/.*", @@ -2730,8 +2699,6 @@ func TestPlayerJSONTwoColumns(t *testing.T) { fmt.Sprintf("drop table %s.vitess_json2", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) - filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ Match: "/.*", @@ -2799,8 +2766,7 @@ func TestVReplicationLogs(t *testing.T) { for _, want := range expected { t.Run("", func(t *testing.T) { - err = insertLog(vdbc, LogMessage, 1, binlogdatapb.VReplicationWorkflowState_Running.String(), "message1") - require.NoError(t, err) + insertLog(vdbc, LogMessage, 1, binlogdatapb.VReplicationWorkflowState_Running.String(), "message1") qr, err := env.Mysqld.FetchSuperQuery(context.Background(), query) require.NoError(t, err) require.Equal(t, want, fmt.Sprintf("%v", qr.Rows)) @@ -2824,7 +2790,6 @@ func TestGeneratedColumns(t *testing.T) { "drop table t2", fmt.Sprintf("drop table %s.t2", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -2900,8 +2865,7 @@ func TestPlayerInvalidDates(t *testing.T) { fmt.Sprintf("drop table %s.dst1", vrepldb), }) pos := primaryPosition(t) - execStatements(t, []string{"set sql_mode='';insert into src1 values(1, '0000-00-00');set sql_mode='STRICT_TRANS_TABLES';"}) - env.SchemaEngine.Reload(context.Background()) + execStatements(t, []string{"set sql_mode=''", "insert into src1 values(1, '0000-00-00')", "set sql_mode='STRICT_TRANS_TABLES'"}) // default mysql flavor allows invalid dates: so disallow explicitly for this test if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "SET @@global.sql_mode=REPLACE(REPLACE(@@session.sql_mode, 'NO_ZERO_DATE', ''), 'NO_ZERO_IN_DATE', '')"); err != nil { @@ -2987,7 +2951,6 @@ func TestPlayerNoBlob(t *testing.T) { "drop table t2", fmt.Sprintf("drop table %s.t2", vrepldb), }) - env.SchemaEngine.Reload(context.Background()) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -3093,7 +3056,6 @@ func TestPlayerNoBlob(t *testing.T) { output := qh.Expect(tcases.output) expectNontxQueries(t, output) time.Sleep(1 * time.Second) - log.Flush() if tcases.table != "" { expectData(t, tcases.table, tcases.data) } @@ -3107,6 +3069,254 @@ func TestPlayerNoBlob(t *testing.T) { require.Equal(t, int64(4), stats.PartialQueryCount.Counts()["update"]) } +func TestPlayerBatchMode(t *testing.T) { + // To test trx batch splitting at 1024-64 bytes. + maxAllowedPacket := 1024 + oldVreplicationExperimentalFlags := vttablet.VReplicationExperimentalFlags + vttablet.VReplicationExperimentalFlags = vttablet.VReplicationExperimentalFlagVPlayerBatching + defer func() { + vttablet.VReplicationExperimentalFlags = oldVreplicationExperimentalFlags + }() + + defer deleteTablet(addTablet(100)) + execStatements(t, []string{ + fmt.Sprintf("set @@global.max_allowed_packet=%d", maxAllowedPacket), + "create table t1(id bigint, val1 varchar(1000), primary key(id))", + fmt.Sprintf("create table %s.t1(id bigint, val1 varchar(1000), primary key(id))", vrepldb), + }) + defer execStatements(t, []string{ + "drop table t1", + fmt.Sprintf("drop table %s.t1", vrepldb), + }) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + OnDdl: binlogdatapb.OnDDLAction_IGNORE, + } + cancel, vrID := startVReplication(t, bls, "") + defer cancel() + + maxBatchSize := maxAllowedPacket - 64 // VPlayer leaves 64 bytes of room + // When the trx will be in a single batch. + trxFullBatchExpectRE := `^begin;(set @@session\.foreign_key_checks=.*;)?%s;update _vt\.vreplication set pos=.*;commit$` + // If the trx batch is split, then we only expect the end part. + trxLastBatchExpectRE := `%s;update _vt\.vreplication set pos=.*;commit$` + // The vreplication position update statement will look like this: + // update _vt.vreplication set pos='MySQL56/b213e4de-937a-11ee-b184-668979c675f4:1-38', time_updated=1701786574, transaction_timestamp=1701786574, rows_copied=0, message='' where id=1; + // So it will use 182 bytes in the batch. + // This long value can be used to test the handling of bulk statements + // which bump up against the max batch size, as well as testing the trx + // batch splitting into multiple wire messages when hitting the max size. + longStr := strings.Repeat("a", maxBatchSize-70) + + testcases := []struct { + input string + output []string + expectedNonCommitBatches int64 + expectedInLastBatch string // Should only be set if we expect 1+ non-commit batches + expectedBulkInserts int64 + expectedBulkDeletes int64 + table string + data [][]string + }{ + { + input: "insert into t1(id, val1) values (1, 'aaa'), (2, 'bbb'), (3, 'ccc'), (4, 'ddd'), (5, 'eee')", + output: []string{"insert into t1(id,val1) values (1,'aaa'), (2,'bbb'), (3,'ccc'), (4,'ddd'), (5,'eee')"}, + expectedBulkInserts: 1, + table: "t1", + data: [][]string{ + {"1", "aaa"}, + {"2", "bbb"}, + {"3", "ccc"}, + {"4", "ddd"}, + {"5", "eee"}, + }, + }, + { + input: "delete from t1 where id = 1", + output: []string{"delete from t1 where id=1"}, + table: "t1", + data: [][]string{ + {"2", "bbb"}, + {"3", "ccc"}, + {"4", "ddd"}, + {"5", "eee"}, + }, + }, + { + input: "delete from t1 where id > 3", + output: []string{"delete from t1 where id in (4, 5)"}, + expectedBulkDeletes: 1, + table: "t1", + data: [][]string{ + {"2", "bbb"}, + {"3", "ccc"}, + }, + }, + { + input: fmt.Sprintf("insert into t1(id, val1) values (1, '%s'), (2, 'bbb'), (3, 'ccc') on duplicate key update id = id+100", longStr), + output: []string{ + fmt.Sprintf("insert into t1(id,val1) values (1,'%s')", longStr), + "delete from t1 where id=2", + "insert into t1(id,val1) values (102,'bbb')", + "delete from t1 where id=3", + // This will be in the second/last batch, along with the vrepl pos update. + "insert into t1(id,val1) values (103,'ccc')", + }, + expectedInLastBatch: "insert into t1(id,val1) values (103,'ccc')", + expectedNonCommitBatches: 1, + table: "t1", + data: [][]string{ + {"1", longStr}, + {"102", "bbb"}, + {"103", "ccc"}, + }, + }, + { + input: "insert into t1(id, val1) values (1, 'aaa'), (2, 'bbb'), (3, 'ccc') on duplicate key update id = id+500, val1 = values(val1)", + output: []string{ + "delete from t1 where id=1", + "insert into t1(id,val1) values (501,'aaa')", + "insert into t1(id,val1) values (2,'bbb'), (3,'ccc')", + }, + expectedBulkInserts: 1, + table: "t1", + data: [][]string{ + {"2", "bbb"}, + {"3", "ccc"}, + {"102", "bbb"}, + {"103", "ccc"}, + {"501", "aaa"}, + }, + }, + { + input: "delete from t1", + output: []string{"delete from t1 where id in (2, 3, 102, 103, 501)"}, + expectedBulkDeletes: 1, + table: "t1", + }, + { + input: fmt.Sprintf("insert into t1(id, val1) values (1, '%s'), (2, 'bbb'), (3, 'ccc'), (4, 'ddd'), (5, 'eee')", longStr), + output: []string{ + // This bulk insert is long enough that the BEGIN gets sent down by itself. + // The bulk query then gets split into two queries. It also causes the trx + // to get split into three batches (BEGIN, INSERT, INSERT). + fmt.Sprintf("insert into t1(id,val1) values (1,'%s'), (2,'bbb'), (3,'ccc'), (4,'ddd')", longStr), + // This will be in the second/last batch, along with the vrepl pos update. + "insert into t1(id,val1) values (5,'eee')", + }, + expectedBulkInserts: 2, + // The BEGIN, then the INSERT. + expectedNonCommitBatches: 2, // The last one includes the commit + expectedInLastBatch: "insert into t1(id,val1) values (5,'eee')", + table: "t1", + data: [][]string{ + {"1", longStr}, + {"2", "bbb"}, + {"3", "ccc"}, + {"4", "ddd"}, + {"5", "eee"}, + }, + }, + { + input: "insert into t1(id, val1) values (1000000000000, 'x'), (1000000000001, 'x'), (1000000000002, 'x'), (1000000000003, 'x'), (1000000000004, 'x'), (1000000000005, 'x'), (1000000000006, 'x'), (1000000000007, 'x'), (1000000000008, 'x'), (1000000000009, 'x'), (1000000000010, 'x'), (1000000000011, 'x'), (1000000000012, 'x'), (1000000000013, 'x'), (1000000000014, 'x'), (1000000000015, 'x'), (1000000000016, 'x'), (1000000000017, 'x'), (1000000000018, 'x'), (1000000000019, 'x'), (1000000000020, 'x'), (1000000000021, 'x'), (1000000000022, 'x'), (1000000000023, 'x'), (1000000000024, 'x'), (1000000000025, 'x'), (1000000000026, 'x'), (1000000000027, 'x'), (1000000000028, 'x'), (1000000000029, 'x'), (1000000000030, 'x'), (1000000000031, 'x'), (1000000000032, 'x'), (1000000000033, 'x'), (1000000000034, 'x'), (1000000000035, 'x'), (1000000000036, 'x'), (1000000000037, 'x'), (1000000000038, 'x'), (1000000000039, 'x'), (1000000000040, 'x'), (1000000000041, 'x'), (1000000000042, 'x'), (1000000000043, 'x'), (1000000000044, 'x'), (1000000000045, 'x'), (1000000000046, 'x'), (1000000000047, 'x'), (1000000000048, 'x'), (1000000000049, 'x'), (1000000000050, 'x'), (1000000000051, 'x'), (1000000000052, 'x'), (1000000000053, 'x'), (1000000000054, 'x'), (1000000000055, 'x'), (1000000000056, 'x'), (1000000000057, 'x'), (1000000000058, 'x'), (1000000000059, 'x'), (1000000000060, 'x'), (1000000000061, 'x'), (1000000000062, 'x'), (1000000000063, 'x'), (1000000000064, 'x'), (1000000000065, 'x'), (1000000000066, 'x'), (1000000000067, 'x'), (1000000000068, 'x'), (1000000000069, 'x'), (1000000000070, 'x'), (1000000000071, 'x'), (1000000000072, 'x'), (1000000000073, 'x'), (1000000000074, 'x'), (1000000000075, 'x'), (1000000000076, 'x'), (1000000000077, 'x'), (1000000000078, 'x'), (1000000000079, 'x'), (1000000000080, 'x'), (1000000000081, 'x'), (1000000000082, 'x'), (1000000000083, 'x'), (1000000000084, 'x'), (1000000000085, 'x'), (1000000000086, 'x'), (1000000000087, 'x'), (1000000000088, 'x'), (1000000000089, 'x'), (1000000000090, 'x'), (1000000000091, 'x'), (1000000000092, 'x'), (1000000000093, 'x'), (1000000000094, 'x'), (1000000000095, 'x'), (1000000000096, 'x'), (1000000000097, 'x'), (1000000000098, 'x'), (1000000000099, 'x'), (1000000000100, 'x'), (1000000000101, 'x'), (1000000000102, 'x'), (1000000000103, 'x'), (1000000000104, 'x'), (1000000000105, 'x'), (1000000000106, 'x'), (1000000000107, 'x'), (1000000000108, 'x'), (1000000000109, 'x'), (1000000000110, 'x'), (1000000000111, 'x'), (1000000000112, 'x'), (1000000000113, 'x'), (1000000000114, 'x'), (1000000000115, 'x'), (1000000000116, 'x'), (1000000000117, 'x'), (1000000000118, 'x'), (1000000000119, 'x'), (1000000000120, 'x'), (1000000000121, 'x'), (1000000000122, 'x'), (1000000000123, 'x'), (1000000000124, 'x'), (1000000000125, 'x'), (1000000000126, 'x'), (1000000000127, 'x'), (1000000000128, 'x'), (1000000000129, 'x'), (1000000000130, 'x'), (1000000000131, 'x'), (1000000000132, 'x'), (1000000000133, 'x'), (1000000000134, 'x'), (1000000000135, 'x'), (1000000000136, 'x'), (1000000000137, 'x'), (1000000000138, 'x'), (1000000000139, 'x'), (1000000000140, 'x'), (1000000000141, 'x'), (1000000000142, 'x'), (1000000000143, 'x'), (1000000000144, 'x'), (1000000000145, 'x'), (1000000000146, 'x'), (1000000000147, 'x'), (1000000000148, 'x'), (1000000000149, 'x'), (1000000000150, 'x')", + output: []string{ + "insert into t1(id,val1) values (1000000000000,'x'), (1000000000001,'x'), (1000000000002,'x'), (1000000000003,'x'), (1000000000004,'x'), (1000000000005,'x'), (1000000000006,'x'), (1000000000007,'x'), (1000000000008,'x'), (1000000000009,'x'), (1000000000010,'x'), (1000000000011,'x'), (1000000000012,'x'), (1000000000013,'x'), (1000000000014,'x'), (1000000000015,'x'), (1000000000016,'x'), (1000000000017,'x'), (1000000000018,'x'), (1000000000019,'x'), (1000000000020,'x'), (1000000000021,'x'), (1000000000022,'x'), (1000000000023,'x'), (1000000000024,'x'), (1000000000025,'x'), (1000000000026,'x'), (1000000000027,'x'), (1000000000028,'x'), (1000000000029,'x'), (1000000000030,'x'), (1000000000031,'x'), (1000000000032,'x'), (1000000000033,'x'), (1000000000034,'x'), (1000000000035,'x'), (1000000000036,'x'), (1000000000037,'x'), (1000000000038,'x'), (1000000000039,'x'), (1000000000040,'x'), (1000000000041,'x'), (1000000000042,'x'), (1000000000043,'x')", + "insert into t1(id,val1) values (1000000000044,'x'), (1000000000045,'x'), (1000000000046,'x'), (1000000000047,'x'), (1000000000048,'x'), (1000000000049,'x'), (1000000000050,'x'), (1000000000051,'x'), (1000000000052,'x'), (1000000000053,'x'), (1000000000054,'x'), (1000000000055,'x'), (1000000000056,'x'), (1000000000057,'x'), (1000000000058,'x'), (1000000000059,'x'), (1000000000060,'x'), (1000000000061,'x'), (1000000000062,'x'), (1000000000063,'x'), (1000000000064,'x'), (1000000000065,'x'), (1000000000066,'x'), (1000000000067,'x'), (1000000000068,'x'), (1000000000069,'x'), (1000000000070,'x'), (1000000000071,'x'), (1000000000072,'x'), (1000000000073,'x'), (1000000000074,'x'), (1000000000075,'x'), (1000000000076,'x'), (1000000000077,'x'), (1000000000078,'x'), (1000000000079,'x'), (1000000000080,'x'), (1000000000081,'x'), (1000000000082,'x'), (1000000000083,'x'), (1000000000084,'x'), (1000000000085,'x'), (1000000000086,'x'), (1000000000087,'x')", + "insert into t1(id,val1) values (1000000000088,'x'), (1000000000089,'x'), (1000000000090,'x'), (1000000000091,'x'), (1000000000092,'x'), (1000000000093,'x'), (1000000000094,'x'), (1000000000095,'x'), (1000000000096,'x'), (1000000000097,'x'), (1000000000098,'x'), (1000000000099,'x'), (1000000000100,'x'), (1000000000101,'x'), (1000000000102,'x'), (1000000000103,'x'), (1000000000104,'x'), (1000000000105,'x'), (1000000000106,'x'), (1000000000107,'x'), (1000000000108,'x'), (1000000000109,'x'), (1000000000110,'x'), (1000000000111,'x'), (1000000000112,'x'), (1000000000113,'x'), (1000000000114,'x'), (1000000000115,'x'), (1000000000116,'x'), (1000000000117,'x'), (1000000000118,'x'), (1000000000119,'x'), (1000000000120,'x'), (1000000000121,'x'), (1000000000122,'x'), (1000000000123,'x'), (1000000000124,'x'), (1000000000125,'x'), (1000000000126,'x'), (1000000000127,'x'), (1000000000128,'x'), (1000000000129,'x'), (1000000000130,'x'), (1000000000131,'x')", + // This will be in the last batch, along with the vrepl pos update. + "insert into t1(id,val1) values (1000000000132,'x'), (1000000000133,'x'), (1000000000134,'x'), (1000000000135,'x'), (1000000000136,'x'), (1000000000137,'x'), (1000000000138,'x'), (1000000000139,'x'), (1000000000140,'x'), (1000000000141,'x'), (1000000000142,'x'), (1000000000143,'x'), (1000000000144,'x'), (1000000000145,'x'), (1000000000146,'x'), (1000000000147,'x'), (1000000000148,'x'), (1000000000149,'x'), (1000000000150,'x')", + }, + expectedBulkInserts: 4, + expectedNonCommitBatches: 3, // The last one includes the commit + expectedInLastBatch: "insert into t1(id,val1) values (1000000000132,'x'), (1000000000133,'x'), (1000000000134,'x'), (1000000000135,'x'), (1000000000136,'x'), (1000000000137,'x'), (1000000000138,'x'), (1000000000139,'x'), (1000000000140,'x'), (1000000000141,'x'), (1000000000142,'x'), (1000000000143,'x'), (1000000000144,'x'), (1000000000145,'x'), (1000000000146,'x'), (1000000000147,'x'), (1000000000148,'x'), (1000000000149,'x'), (1000000000150,'x')", + table: "t1", + data: [][]string{ + {"1", longStr}, + {"2", "bbb"}, + {"3", "ccc"}, + {"4", "ddd"}, + {"5", "eee"}, + {"1000000000000", "x"}, {"1000000000001", "x"}, {"1000000000002", "x"}, {"1000000000003", "x"}, {"1000000000004", "x"}, {"1000000000005", "x"}, {"1000000000006", "x"}, {"1000000000007", "x"}, {"1000000000008", "x"}, {"1000000000009", "x"}, {"1000000000010", "x"}, {"1000000000011", "x"}, {"1000000000012", "x"}, {"1000000000013", "x"}, {"1000000000014", "x"}, {"1000000000015", "x"}, {"1000000000016", "x"}, {"1000000000017", "x"}, {"1000000000018", "x"}, {"1000000000019", "x"}, {"1000000000020", "x"}, {"1000000000021", "x"}, {"1000000000022", "x"}, {"1000000000023", "x"}, {"1000000000024", "x"}, {"1000000000025", "x"}, {"1000000000026", "x"}, {"1000000000027", "x"}, {"1000000000028", "x"}, {"1000000000029", "x"}, {"1000000000030", "x"}, {"1000000000031", "x"}, {"1000000000032", "x"}, {"1000000000033", "x"}, {"1000000000034", "x"}, {"1000000000035", "x"}, {"1000000000036", "x"}, {"1000000000037", "x"}, {"1000000000038", "x"}, {"1000000000039", "x"}, {"1000000000040", "x"}, {"1000000000041", "x"}, {"1000000000042", "x"}, {"1000000000043", "x"}, {"1000000000044", "x"}, {"1000000000045", "x"}, {"1000000000046", "x"}, {"1000000000047", "x"}, {"1000000000048", "x"}, {"1000000000049", "x"}, {"1000000000050", "x"}, {"1000000000051", "x"}, {"1000000000052", "x"}, {"1000000000053", "x"}, {"1000000000054", "x"}, {"1000000000055", "x"}, {"1000000000056", "x"}, {"1000000000057", "x"}, {"1000000000058", "x"}, {"1000000000059", "x"}, {"1000000000060", "x"}, {"1000000000061", "x"}, {"1000000000062", "x"}, {"1000000000063", "x"}, {"1000000000064", "x"}, {"1000000000065", "x"}, {"1000000000066", "x"}, {"1000000000067", "x"}, {"1000000000068", "x"}, {"1000000000069", "x"}, {"1000000000070", "x"}, {"1000000000071", "x"}, {"1000000000072", "x"}, {"1000000000073", "x"}, {"1000000000074", "x"}, {"1000000000075", "x"}, {"1000000000076", "x"}, {"1000000000077", "x"}, {"1000000000078", "x"}, {"1000000000079", "x"}, {"1000000000080", "x"}, {"1000000000081", "x"}, {"1000000000082", "x"}, {"1000000000083", "x"}, {"1000000000084", "x"}, {"1000000000085", "x"}, {"1000000000086", "x"}, {"1000000000087", "x"}, {"1000000000088", "x"}, {"1000000000089", "x"}, {"1000000000090", "x"}, {"1000000000091", "x"}, {"1000000000092", "x"}, {"1000000000093", "x"}, {"1000000000094", "x"}, {"1000000000095", "x"}, {"1000000000096", "x"}, {"1000000000097", "x"}, {"1000000000098", "x"}, {"1000000000099", "x"}, {"1000000000100", "x"}, {"1000000000101", "x"}, {"1000000000102", "x"}, {"1000000000103", "x"}, {"1000000000104", "x"}, {"1000000000105", "x"}, {"1000000000106", "x"}, {"1000000000107", "x"}, {"1000000000108", "x"}, {"1000000000109", "x"}, {"1000000000110", "x"}, {"1000000000111", "x"}, {"1000000000112", "x"}, {"1000000000113", "x"}, {"1000000000114", "x"}, {"1000000000115", "x"}, {"1000000000116", "x"}, {"1000000000117", "x"}, {"1000000000118", "x"}, {"1000000000119", "x"}, {"1000000000120", "x"}, {"1000000000121", "x"}, {"1000000000122", "x"}, {"1000000000123", "x"}, {"1000000000124", "x"}, {"1000000000125", "x"}, {"1000000000126", "x"}, {"1000000000127", "x"}, {"1000000000128", "x"}, {"1000000000129", "x"}, {"1000000000130", "x"}, {"1000000000131", "x"}, {"1000000000132", "x"}, {"1000000000133", "x"}, {"1000000000134", "x"}, {"1000000000135", "x"}, {"1000000000136", "x"}, {"1000000000137", "x"}, {"1000000000138", "x"}, {"1000000000139", "x"}, {"1000000000140", "x"}, {"1000000000141", "x"}, {"1000000000142", "x"}, {"1000000000143", "x"}, {"1000000000144", "x"}, {"1000000000145", "x"}, {"1000000000146", "x"}, {"1000000000147", "x"}, {"1000000000148", "x"}, {"1000000000149", "x"}, {"1000000000150", "x"}, + }, + }, + { // Now we have enough long IDs to cause the bulk delete to also be split along with the trx batch. + input: "delete from t1 where id > 1 and id <= 1000000000149", + output: []string{ + "delete from t1 where id in (2, 3, 4, 5, 1000000000000, 1000000000001, 1000000000002, 1000000000003, 1000000000004, 1000000000005, 1000000000006, 1000000000007, 1000000000008, 1000000000009, 1000000000010, 1000000000011, 1000000000012, 1000000000013, 1000000000014, 1000000000015, 1000000000016, 1000000000017, 1000000000018, 1000000000019, 1000000000020, 1000000000021, 1000000000022, 1000000000023, 1000000000024, 1000000000025, 1000000000026, 1000000000027, 1000000000028, 1000000000029, 1000000000030, 1000000000031, 1000000000032, 1000000000033, 1000000000034, 1000000000035, 1000000000036, 1000000000037, 1000000000038, 1000000000039, 1000000000040, 1000000000041, 1000000000042, 1000000000043, 1000000000044, 1000000000045, 1000000000046, 1000000000047, 1000000000048, 1000000000049, 1000000000050, 1000000000051, 1000000000052, 1000000000053, 1000000000054, 1000000000055, 1000000000056, 1000000000057, 1000000000058, 1000000000059)", + "delete from t1 where id in (1000000000060, 1000000000061, 1000000000062, 1000000000063, 1000000000064, 1000000000065, 1000000000066, 1000000000067, 1000000000068, 1000000000069, 1000000000070, 1000000000071, 1000000000072, 1000000000073, 1000000000074, 1000000000075, 1000000000076, 1000000000077, 1000000000078, 1000000000079, 1000000000080, 1000000000081, 1000000000082, 1000000000083, 1000000000084, 1000000000085, 1000000000086, 1000000000087, 1000000000088, 1000000000089, 1000000000090, 1000000000091, 1000000000092, 1000000000093, 1000000000094, 1000000000095, 1000000000096, 1000000000097, 1000000000098, 1000000000099, 1000000000100, 1000000000101, 1000000000102, 1000000000103, 1000000000104, 1000000000105, 1000000000106, 1000000000107, 1000000000108, 1000000000109, 1000000000110, 1000000000111, 1000000000112, 1000000000113, 1000000000114, 1000000000115, 1000000000116, 1000000000117, 1000000000118, 1000000000119, 1000000000120)", + // This will be in the last batch, along with the vrepl pos update. + "delete from t1 where id in (1000000000121, 1000000000122, 1000000000123, 1000000000124, 1000000000125, 1000000000126, 1000000000127, 1000000000128, 1000000000129, 1000000000130, 1000000000131, 1000000000132, 1000000000133, 1000000000134, 1000000000135, 1000000000136, 1000000000137, 1000000000138, 1000000000139, 1000000000140, 1000000000141, 1000000000142, 1000000000143, 1000000000144, 1000000000145, 1000000000146, 1000000000147, 1000000000148, 1000000000149)", + }, + expectedBulkDeletes: 3, + expectedNonCommitBatches: 2, // The last one includes the commit + expectedInLastBatch: "delete from t1 where id in (1000000000121, 1000000000122, 1000000000123, 1000000000124, 1000000000125, 1000000000126, 1000000000127, 1000000000128, 1000000000129, 1000000000130, 1000000000131, 1000000000132, 1000000000133, 1000000000134, 1000000000135, 1000000000136, 1000000000137, 1000000000138, 1000000000139, 1000000000140, 1000000000141, 1000000000142, 1000000000143, 1000000000144, 1000000000145, 1000000000146, 1000000000147, 1000000000148, 1000000000149)", + table: "t1", + data: [][]string{ + {"1", longStr}, + {"1000000000150", "x"}, + }, + }, + { + input: "delete from t1 where id = 1 or id > 1000000000149", + output: []string{"delete from t1 where id in (1, 1000000000150)"}, + expectedBulkDeletes: 1, + table: "t1", + }, + } + + expectedBulkInserts, expectedBulkDeletes, expectedTrxBatchExecs, expectedTrxBatchCommits := int64(0), int64(0), int64(0), int64(0) + stats := globalStats.controllers[int32(vrID)].blpStats + + for _, tcase := range testcases { + t.Run(fmt.Sprintf("%.50s", tcase.input), func(t *testing.T) { + execStatements(t, []string{tcase.input}) + var output qh.ExpectationSequencer + switch len(tcase.output) { + case 0: + require.FailNow(t, "no expected output provided for test case") + case 1: + output = qh.Expect(tcase.output[0]) + default: + output = qh.Expect(tcase.output[0], tcase.output[1:]...) + } + for _, stmt := range tcase.output { + require.LessOrEqual(t, len(stmt), maxBatchSize, "expected output statement is longer than the max batch size (%d): %s", maxBatchSize, stmt) + } + expectNontxQueries(t, output) + time.Sleep(1 * time.Second) + if tcase.table != "" { + expectData(t, tcase.table, tcase.data) + } + + // Confirm that the row events generated the expected multi-row + // statements and the statements were sent in multi-statement + // protocol message(s) as expected. + expectedBulkDeletes += tcase.expectedBulkDeletes + expectedBulkInserts += tcase.expectedBulkInserts + expectedTrxBatchCommits++ // Should only ever be 1 per test case + expectedTrxBatchExecs += tcase.expectedNonCommitBatches + if tcase.expectedInLastBatch != "" { // We expect the trx to be split + require.Regexpf(t, regexp.MustCompile(fmt.Sprintf(trxLastBatchExpectRE, regexp.QuoteMeta(tcase.expectedInLastBatch))), lastMultiExecQuery, "Unexpected batch statement: %s", lastMultiExecQuery) + } else { + require.Regexpf(t, regexp.MustCompile(fmt.Sprintf(trxFullBatchExpectRE, regexp.QuoteMeta(strings.Join(tcase.output, ";")))), lastMultiExecQuery, "Unexpected batch statement: %s", lastMultiExecQuery) + } + require.Equal(t, expectedBulkInserts, stats.BulkQueryCount.Counts()["insert"], "expected %d bulk inserts but got %d", expectedBulkInserts, stats.BulkQueryCount.Counts()["insert"]) + require.Equal(t, expectedBulkDeletes, stats.BulkQueryCount.Counts()["delete"], "expected %d bulk deletes but got %d", expectedBulkDeletes, stats.BulkQueryCount.Counts()["delete"]) + require.Equal(t, expectedTrxBatchExecs, stats.TrxQueryBatchCount.Counts()["without_commit"], "expected %d trx batch execs but got %d", expectedTrxBatchExecs, stats.TrxQueryBatchCount.Counts()["without_commit"]) + require.Equal(t, expectedTrxBatchCommits, stats.TrxQueryBatchCount.Counts()["with_commit"], "expected %d trx batch commits but got %d", expectedTrxBatchCommits, stats.TrxQueryBatchCount.Counts()["with_commit"]) + }) + } +} + func expectJSON(t *testing.T, table string, values [][]string, id int, exec func(ctx context.Context, query string) (*sqltypes.Result, error)) { t.Helper() diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index e148151934e..8a01cf7c8ed 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -28,22 +28,20 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/timer" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/binlog/binlogplayer" - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/mysqlctl" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) var ( @@ -290,9 +288,7 @@ func (vr *vreplicator) replicate(ctx context.Context) error { return err } if numTablesToCopy == 0 { - if err := vr.insertLog(LogCopyEnd, fmt.Sprintf("Copy phase completed at gtid %s", settings.StartPos)); err != nil { - return err - } + vr.insertLog(LogCopyEnd, fmt.Sprintf("Copy phase completed at gtid %s", settings.StartPos)) } } case settings.StartPos.IsZero(): @@ -329,7 +325,13 @@ type ColumnInfo struct { } func (vr *vreplicator) buildColInfoMap(ctx context.Context) (map[string][]*ColumnInfo, error) { - req := &tabletmanagerdatapb.GetSchemaRequest{Tables: []string{"/.*/"}, ExcludeTables: []string{"/" + schema.GCTableNameExpression + "/"}} + req := &tabletmanagerdatapb.GetSchemaRequest{ + Tables: []string{"/.*/"}, + ExcludeTables: []string{ + "/" + schema.OldGCTableNameExpression + "/", + "/" + schema.GCTableNameExpression + "/", + }, + } schema, err := vr.mysqld.GetSchema(ctx, vr.dbClient.DBName(), req) if err != nil { return nil, err @@ -352,7 +354,11 @@ func (vr *vreplicator) buildColInfoMap(ctx context.Context) (map[string][]*Colum pks = td.PrimaryKeyColumns } else { // Use a PK equivalent if one exists. - if pks, _, err = vr.mysqld.GetPrimaryKeyEquivalentColumns(ctx, vr.dbClient.DBName(), td.Name); err != nil { + executeFetch := func(query string, maxrows int, wantfields bool) (*sqltypes.Result, error) { + // This sets wantfields to true. + return vr.dbClient.ExecuteFetch(query, maxrows) + } + if pks, _, err = mysqlctl.GetPrimaryKeyEquivalentColumns(ctx, executeFetch, vr.dbClient.DBName(), td.Name); err != nil { return nil, err } // Fall back to using every column in the table if there's no PK or PKE. @@ -444,7 +450,7 @@ func (vr *vreplicator) readSettings(ctx context.Context, dbClient *vdbClient) (s return settings, numTablesToCopy, nil } -func (vr *vreplicator) setMessage(message string) error { +func (vr *vreplicator) setMessage(message string) (err error) { message = binlogplayer.MessageTruncate(message) vr.stats.History.Add(&binlogplayer.StatsHistoryRecord{ Time: time.Now(), @@ -456,14 +462,12 @@ func (vr *vreplicator) setMessage(message string) error { if _, err := vr.dbClient.Execute(query); err != nil { return fmt.Errorf("could not set message: %v: %v", query, err) } - if err := insertLog(vr.dbClient, LogMessage, vr.id, vr.state.String(), message); err != nil { - return err - } + insertLog(vr.dbClient, LogMessage, vr.id, vr.state.String(), message) return nil } -func (vr *vreplicator) insertLog(typ, message string) error { - return insertLog(vr.dbClient, typ, vr.id, vr.state.String(), message) +func (vr *vreplicator) insertLog(typ, message string) { + insertLog(vr.dbClient, typ, vr.id, vr.state.String(), message) } func (vr *vreplicator) setState(state binlogdatapb.VReplicationWorkflowState, message string) error { @@ -481,9 +485,7 @@ func (vr *vreplicator) setState(state binlogdatapb.VReplicationWorkflowState, me if state == vr.state { return nil } - if err := insertLog(vr.dbClient, LogStateChange, vr.id, state.String(), message); err != nil { - return err - } + insertLog(vr.dbClient, LogStateChange, vr.id, state.String(), message) vr.state = state return nil @@ -561,7 +563,7 @@ func (vr *vreplicator) setSQLMode(ctx context.Context, dbClient *vdbClient) (fun // - "vreplication" for most flows // - "vreplication:online-ddl" for online ddl flows. // Note that with such name, it's possible to throttle -// the worflow by either /throttler/throttle-app?app=vreplication and/or /throttler/throttle-app?app=online-ddl +// the workflow by either /throttler/throttle-app?app=vreplication and/or /throttler/throttle-app?app=online-ddl // This is useful when we want to throttle all migrations. We throttle "online-ddl" and that applies to both vreplication // migrations as well as gh-ost migrations. func (vr *vreplicator) throttlerAppName() string { @@ -572,10 +574,21 @@ func (vr *vreplicator) throttlerAppName() string { return throttlerapp.Concatenate(names...) } +// updateTimeThrottled updates the time_throttled field in the _vt.vreplication record +// with a rate limit so that it's only saved in the database at most once per +// throttleUpdatesRateLimiter.tickerTime. +// It also increments the throttled count in the stats to keep track of how many +// times a VReplication workflow, and the specific sub-component, is throttled by the +// tablet throttler over time. It also increments the global throttled count to keep +// track of how many times in total vreplication has been throttled across all workflows +// (both ones that currently exist and ones that no longer do). func (vr *vreplicator) updateTimeThrottled(appThrottled throttlerapp.Name) error { + appName := appThrottled.String() + vr.stats.ThrottledCounts.Add([]string{"tablet", appName}, 1) + globalStats.ThrottledCount.Add(1) err := vr.throttleUpdatesRateLimiter.Do(func() error { tm := time.Now().Unix() - update, err := binlogplayer.GenerateUpdateTimeThrottled(vr.id, tm, appThrottled.String()) + update, err := binlogplayer.GenerateUpdateTimeThrottled(vr.id, tm, appName) if err != nil { return err } @@ -728,7 +741,7 @@ func (vr *vreplicator) getTableSecondaryKeys(ctx context.Context, tableName stri } tableSchema := schema.TableDefinitions[0].Schema var secondaryKeys []*sqlparser.IndexDefinition - parsedDDL, err := sqlparser.ParseStrictDDL(tableSchema) + parsedDDL, err := vr.vre.env.Parser().ParseStrictDDL(tableSchema) if err != nil { return secondaryKeys, err } @@ -739,8 +752,27 @@ func (vr *vreplicator) getTableSecondaryKeys(ctx context.Context, tableName stri return nil, fmt.Errorf("could not determine CREATE TABLE statement from table schema %q", tableSchema) } - for _, index := range createTable.GetTableSpec().Indexes { + tableSpec := createTable.GetTableSpec() + fkIndexCols := make(map[string]bool) + for _, constraint := range tableSpec.Constraints { + if fkDef, ok := constraint.Details.(*sqlparser.ForeignKeyDefinition); ok { + fkCols := make([]string, len(fkDef.Source)) + for i, fkCol := range fkDef.Source { + fkCols[i] = fkCol.Lowered() + } + fkIndexCols[strings.Join(fkCols, ",")] = true + } + } + for _, index := range tableSpec.Indexes { if index.Info.Type != sqlparser.IndexTypePrimary { + cols := make([]string, len(index.Columns)) + for i, col := range index.Columns { + cols[i] = col.Column.Lowered() + } + if fkIndexCols[strings.Join(cols, ",")] { + // This index is needed for a FK constraint so we cannot drop it. + continue + } secondaryKeys = append(secondaryKeys, index) } } @@ -777,10 +809,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string return nil } - if err := vr.insertLog(LogCopyStart, fmt.Sprintf("Executing %d post copy action(s) for %s table", - len(qr.Rows), tableName)); err != nil { - return err - } + vr.insertLog(LogCopyStart, fmt.Sprintf("Executing %d post copy action(s) for %s table", len(qr.Rows), tableName)) // Save our connection ID so we can use it to easily KILL any // running SQL action we may perform later if needed. @@ -956,7 +985,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string // the table schema and if so move forward and delete the // post_copy_action record. if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERDupKeyName { - stmt, err := sqlparser.ParseStrictDDL(action.Task) + stmt, err := vr.vre.env.Parser().ParseStrictDDL(action.Task) if err != nil { return failedAlterErr } @@ -1001,10 +1030,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string } } - if err := vr.insertLog(LogCopyStart, fmt.Sprintf("Completed all post copy actions for %s table", - tableName)); err != nil { - return err - } + vr.insertLog(LogCopyStart, fmt.Sprintf("Completed all post copy actions for %s table", tableName)) return nil } @@ -1044,7 +1070,7 @@ func (vr *vreplicator) setExistingRowsCopied() { if vr.stats.CopyRowCount.Get() == 0 { rowsCopiedExisting, err := vr.readExistingRowsCopied(vr.id) if err != nil { - log.Warningf("Failed to read existing rows copied value for %s worfklow: %v", vr.WorkflowName, err) + log.Warningf("Failed to read existing rows copied value for %s workflow: %v", vr.WorkflowName, err) } else if rowsCopiedExisting != 0 { log.Infof("Resuming the %s vreplication workflow started on another tablet, setting rows copied counter to %v", vr.WorkflowName, rowsCopiedExisting) vr.stats.CopyRowCount.Set(rowsCopiedExisting) diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go index 128d41d4bc2..e4f55cc2384 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go @@ -34,9 +34,10 @@ import ( "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/schemadiff" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - "vitess.io/vitess/go/vt/schemadiff" ) func TestRecalculatePKColsInfoByColumnNames(t *testing.T) { @@ -182,11 +183,11 @@ func TestPrimaryKeyEquivalentColumns(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require.NoError(t, env.Mysqld.ExecuteSuperQuery(ctx, tt.ddl)) - cols, indexName, err := env.Mysqld.GetPrimaryKeyEquivalentColumns(ctx, env.Dbcfgs.DBName, tt.table) - if (err != nil) != tt.wantErr { - t.Errorf("Mysqld.GetPrimaryKeyEquivalentColumns() error = %v, wantErr %v", err, tt.wantErr) - return - } + conn, err := env.Mysqld.GetDbaConnection(ctx) + require.NoError(t, err, "could not connect to mysqld: %v", err) + defer conn.Close() + cols, indexName, err := mysqlctl.GetPrimaryKeyEquivalentColumns(ctx, conn.ExecuteFetch, env.Dbcfgs.DBName, tt.table) + assert.NoError(t, err) require.Equalf(t, cols, tt.wantCols, "Mysqld.GetPrimaryKeyEquivalentColumns() columns = %v, want %v", cols, tt.wantCols) require.Equalf(t, indexName, tt.wantIndex, "Mysqld.GetPrimaryKeyEquivalentColumns() index = %v, want %v", indexName, tt.wantIndex) }) @@ -223,7 +224,7 @@ func TestDeferSecondaryKeys(t *testing.T) { defer dbClient.Close() dbName := dbClient.DBName() // Ensure there's a dummy vreplication workflow record - _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s') on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s'", + _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, options) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s', '{}') on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s'", id, dbName, dbName), 1) require.NoError(t, err) defer func() { @@ -256,6 +257,7 @@ func TestDeferSecondaryKeys(t *testing.T) { wantStashErr string wantExecErr string expectFinalSchemaDiff bool + preStashHook func() error postStashHook func() error }{ { @@ -297,6 +299,54 @@ func TestDeferSecondaryKeys(t *testing.T) { actionDDL: "alter table %s.t1 add key c1 (c1), add key c2 (c2)", WorkflowType: int32(binlogdatapb.VReplicationWorkflowType_MoveTables), }, + { + name: "2SK:1FK", + tableName: "t1", + initialDDL: "create table t1 (id int not null, c1 int default null, c2 int default null, t2_id int not null, primary key (id), key c1 (c1), key c2 (c2), foreign key (t2_id) references t2 (id))", + // Secondary key t2_id is needed to enforce the FK constraint so we do not drop it. + strippedDDL: "create table t1 (id int not null, c1 int default null, c2 int default null, t2_id int not null, primary key (id), key t2_id (t2_id), constraint t1_ibfk_1 foreign key (t2_id) references t2 (id))", + actionDDL: "alter table %s.t1 add key c1 (c1), add key c2 (c2)", + WorkflowType: int32(binlogdatapb.VReplicationWorkflowType_MoveTables), + preStashHook: func() error { + if _, err := dbClient.ExecuteFetch("drop table if exists t2", 1); err != nil { + return err + } + _, err = dbClient.ExecuteFetch("create table t2 (id int not null, c1 int not null, primary key (id))", 1) + return err + }, + }, + { + name: "3SK:2FK", + tableName: "t1", + initialDDL: "create table t1 (id int not null, id2 int default null, c1 int default null, c2 int default null, c3 int default null, t2_id int not null, t2_id2 int not null, primary key (id), key c1 (c1), key c2 (c2), foreign key (t2_id) references t2 (id), key c3 (c3), foreign key (t2_id2) references t2 (id2))", + // Secondary keys t2_id and t2_id2 are needed to enforce the FK constraint so we do not drop them. + strippedDDL: "create table t1 (id int not null, id2 int default null, c1 int default null, c2 int default null, c3 int default null, t2_id int not null, t2_id2 int not null, primary key (id), key t2_id (t2_id), key t2_id2 (t2_id2), constraint t1_ibfk_1 foreign key (t2_id) references t2 (id), constraint t1_ibfk_2 foreign key (t2_id2) references t2 (id2))", + actionDDL: "alter table %s.t1 add key c1 (c1), add key c2 (c2), add key c3 (c3)", + WorkflowType: int32(binlogdatapb.VReplicationWorkflowType_MoveTables), + preStashHook: func() error { + if _, err := dbClient.ExecuteFetch("drop table if exists t2", 1); err != nil { + return err + } + _, err = dbClient.ExecuteFetch("create table t2 (id int not null, id2 int default null, c1 int not null, primary key (id), key (id2))", 1) + return err + }, + }, + { + name: "5SK:2FK_multi-column", + tableName: "t1", + initialDDL: "create table t1 (id int not null, id2 int default null, c1 int default null, c2 int default null, c3 int default null, t2_id int not null, t2_id2 int not null, primary key (id), key c1 (c1), key c2 (c2), key t2_cs (c1,c2), key t2_ids (t2_id,t2_id2), foreign key (t2_id,t2_id2) references t2 (id, id2), key c3 (c3), foreign key (c1, c2) references t2 (c1, c2))", + // Secondary keys t2_ids and t2_cs are needed to enforce the FK constraint so we do not drop them. + strippedDDL: "create table t1 (id int not null, id2 int default null, c1 int default null, c2 int default null, c3 int default null, t2_id int not null, t2_id2 int not null, primary key (id), key t2_cs (c1,c2), key t2_ids (t2_id,t2_id2), constraint t1_ibfk_1 foreign key (t2_id, t2_id2) references t2 (id, id2), constraint t1_ibfk_2 foreign key (c1, c2) references t2 (c1, c2))", + actionDDL: "alter table %s.t1 add key c1 (c1), add key c2 (c2), add key c3 (c3)", + WorkflowType: int32(binlogdatapb.VReplicationWorkflowType_MoveTables), + preStashHook: func() error { + if _, err := dbClient.ExecuteFetch("drop table if exists t2", 1); err != nil { + return err + } + _, err = dbClient.ExecuteFetch("create table t2 (id int not null, id2 int not null, c1 int not null, c2 int not null, primary key (id,id2), key (c1,c2))", 1) + return err + }, + }, { name: "2tSK", tableName: "t1", @@ -330,7 +380,7 @@ func TestDeferSecondaryKeys(t *testing.T) { postStashHook: func() error { myid := id + 1000 // Insert second vreplication record to simulate a second controller/vreplicator - _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s')", + _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, options) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s', '{}')", myid, dbName), 1) if err != nil { return err @@ -339,7 +389,7 @@ func TestDeferSecondaryKeys(t *testing.T) { myvr.WorkflowType = int32(binlogdatapb.VReplicationWorkflowType_Reshard) // Insert second post copy action record to simulate a shard merge where you // have N controllers/replicators running for the same table on the tablet. - // This forces a second row, which would otherwise not get created beacause + // This forces a second row, which would otherwise not get created because // when this is called there's no secondary keys to stash anymore. addlAction, err := json.Marshal(PostCopyAction{ Type: PostCopyActionSQL, @@ -425,6 +475,11 @@ func TestDeferSecondaryKeys(t *testing.T) { // MoveTables and Reshard workflows. vr.WorkflowType = tcase.WorkflowType + if tcase.preStashHook != nil { + err = tcase.preStashHook() + require.NoError(t, err, "error executing pre stash hook: %v", err) + } + // Create the table. _, err := dbClient.ExecuteFetch(tcase.initialDDL, 1) require.NoError(t, err) @@ -456,7 +511,7 @@ func TestDeferSecondaryKeys(t *testing.T) { if tcase.postStashHook != nil { err = tcase.postStashHook() - require.NoError(t, err) + require.NoError(t, err, "error executing post stash hook: %v", err) // We should still NOT have any secondary keys because there's still // a running controller/vreplicator in the copy phase. @@ -494,7 +549,7 @@ func TestDeferSecondaryKeys(t *testing.T) { // order in the table schema. if !tcase.expectFinalSchemaDiff { currentDDL := getCurrentDDL(tcase.tableName) - sdiff, err := schemadiff.DiffCreateTablesQueries(currentDDL, tcase.initialDDL, diffHints) + sdiff, err := schemadiff.DiffCreateTablesQueries(schemadiff.NewTestEnv(), currentDDL, tcase.initialDDL, diffHints) require.NoError(t, err) require.Nil(t, sdiff, "Expected no schema difference but got: %s", sdiff.CanonicalStatementString()) } @@ -559,7 +614,7 @@ func TestCancelledDeferSecondaryKeys(t *testing.T) { defer dbClient.Close() dbName := dbClient.DBName() // Ensure there's a dummy vreplication workflow record - _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s') on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s'", + _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, options) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s', '{}') on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s'", id, dbName, dbName), 1) require.NoError(t, err) defer func() { @@ -678,7 +733,7 @@ func TestResumingFromPreviousWorkflowKeepingRowsCopied(t *testing.T) { dbName := dbClient.DBName() rowsCopied := int64(500000) // Ensure there's an existing vreplication workflow - _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, rows_copied) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s', %v) on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s', rows_copied=%v", + _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, rows_copied, options) values (%d, 'test', '', '', 99999, 99999, 0, 0, 'Running', '%s', %v, '{}') on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s', rows_copied=%v", id, dbName, rowsCopied, dbName, rowsCopied), 1) require.NoError(t, err) defer func() { diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go index 63f4c73520e..af8c5fbc78e 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -18,6 +18,7 @@ package connpool import ( "context" + "errors" "fmt" "strings" "sync" @@ -40,6 +41,8 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) +const defaultKillTimeout = 5 * time.Second + // Conn is a db connection for tabletserver. // It performs automatic reconnects as needed. // Its Execute function has a timeout that can kill @@ -52,11 +55,13 @@ type Conn struct { env tabletenv.Env dbaPool *dbconnpool.ConnectionPool stats *tabletenv.Stats - current atomic.Value + current atomic.Pointer[string] // err will be set if a query is killed through a Kill. errmu sync.Mutex err error + + killTimeout time.Duration } // NewConnection creates a new DBConn. It triggers a CheckMySQL if creation fails. @@ -71,27 +76,28 @@ func newPooledConn(ctx context.Context, pool *Pool, appParams dbconfigs.Connecto return nil, err } db := &Conn{ - conn: c, - env: pool.env, - stats: pool.env.Stats(), - dbaPool: pool.dbaPool, + conn: c, + env: pool.env, + stats: pool.env.Stats(), + dbaPool: pool.dbaPool, + killTimeout: defaultKillTimeout, } - db.current.Store("") return db, nil } // NewConn creates a new Conn without a pool. -func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpool.ConnectionPool, setting *smartconnpool.Setting) (*Conn, error) { +func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpool.ConnectionPool, setting *smartconnpool.Setting, env tabletenv.Env) (*Conn, error) { c, err := dbconnpool.NewDBConnection(ctx, params) if err != nil { return nil, err } dbconn := &Conn{ - conn: c, - dbaPool: dbaPool, - stats: tabletenv.NewStats(servenv.NewExporter("Temp", "Tablet")), + conn: c, + dbaPool: dbaPool, + stats: tabletenv.NewStats(servenv.NewExporter("Temp", "Tablet")), + env: env, + killTimeout: defaultKillTimeout, } - dbconn.current.Store("") if setting == nil { return dbconn, nil } @@ -103,11 +109,13 @@ func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpoo } // Err returns an error if there was a client initiated error -// like a query kill. +// like a query kill and resets the error message on the connection. func (dbc *Conn) Err() error { dbc.errmu.Lock() defer dbc.errmu.Unlock() - return dbc.err + err := dbc.err + dbc.err = nil + return err } // Exec executes the specified query. If there is a connection error, it will reconnect @@ -117,7 +125,7 @@ func (dbc *Conn) Exec(ctx context.Context, query string, maxrows int, wantfields defer span.Finish() for attempt := 1; attempt <= 2; attempt++ { - r, err := dbc.execOnce(ctx, query, maxrows, wantfields) + r, err := dbc.execOnce(ctx, query, maxrows, wantfields, false) switch { case err == nil: // Success. @@ -151,9 +159,9 @@ func (dbc *Conn) Exec(ctx context.Context, query string, maxrows int, wantfields panic("unreachable") } -func (dbc *Conn) execOnce(ctx context.Context, query string, maxrows int, wantfields bool) (*sqltypes.Result, error) { - dbc.current.Store(query) - defer dbc.current.Store("") +func (dbc *Conn) execOnce(ctx context.Context, query string, maxrows int, wantfields bool, insideTxn bool) (*sqltypes.Result, error) { + dbc.current.Store(&query) + defer dbc.current.Store(nil) // Check if the context is already past its deadline before // trying to execute the query. @@ -161,24 +169,59 @@ func (dbc *Conn) execOnce(ctx context.Context, query string, maxrows int, wantfi return nil, fmt.Errorf("%v before execution started", err) } - defer dbc.stats.MySQLTimings.Record("Exec", time.Now()) + now := time.Now() + defer dbc.stats.MySQLTimings.Record("Exec", now) + + type execResult struct { + result *sqltypes.Result + err error + } + + ch := make(chan execResult) + go func() { + result, err := dbc.conn.ExecuteFetch(query, maxrows, wantfields) + ch <- execResult{result, err} + close(ch) + }() - done, wg := dbc.setDeadline(ctx) - qr, err := dbc.conn.ExecuteFetch(query, maxrows, wantfields) + select { + case <-ctx.Done(): + dbc.terminate(ctx, insideTxn, now) + if !insideTxn { + // wait for the execute method to finish to make connection reusable. + <-ch + } + return nil, dbc.Err() + case r := <-ch: + if dbcErr := dbc.Err(); dbcErr != nil { + return nil, dbcErr + } + return r.result, r.err + } +} - if done != nil { - close(done) - wg.Wait() +// terminate kills the query or connection based on the transaction status +func (dbc *Conn) terminate(ctx context.Context, insideTxn bool, now time.Time) { + var errMsg string + switch { + case errors.Is(ctx.Err(), context.DeadlineExceeded): + errMsg = "(errno 3024) (sqlstate HY000): Query execution was interrupted, maximum statement execution time exceeded" + case errors.Is(ctx.Err(), context.Canceled): + errMsg = "(errno 1317) (sqlstate 70100): Query execution was interrupted" + default: + errMsg = ctx.Err().Error() } - if dbcerr := dbc.Err(); dbcerr != nil { - return nil, dbcerr + if insideTxn { + // we can't safely kill a query in a transaction, we need to kill the connection + _ = dbc.Kill(errMsg, time.Since(now)) + } else { + _ = dbc.KillQuery(errMsg, time.Since(now)) } - return qr, err } // ExecOnce executes the specified query, but does not retry on connection errors. func (dbc *Conn) ExecOnce(ctx context.Context, query string, maxrows int, wantfields bool) (*sqltypes.Result, error) { - return dbc.execOnce(ctx, query, maxrows, wantfields) + return dbc.execOnce(ctx, query, maxrows, wantfields, true /* Once means we are in a txn*/) } // FetchNext returns the next result set. @@ -216,6 +259,7 @@ func (dbc *Conn) Stream(ctx context.Context, query string, callback func(*sqltyp }, alloc, streamBufferSize, + false, ) switch { case err == nil: @@ -248,27 +292,51 @@ func (dbc *Conn) Stream(ctx context.Context, query string, callback func(*sqltyp panic("unreachable") } -func (dbc *Conn) streamOnce(ctx context.Context, query string, callback func(*sqltypes.Result) error, alloc func() *sqltypes.Result, streamBufferSize int) error { - defer dbc.stats.MySQLTimings.Record("ExecStream", time.Now()) - - dbc.current.Store(query) - defer dbc.current.Store("") - - done, wg := dbc.setDeadline(ctx) - err := dbc.conn.ExecuteStreamFetch(query, callback, alloc, streamBufferSize) +func (dbc *Conn) streamOnce( + ctx context.Context, + query string, + callback func(*sqltypes.Result) error, + alloc func() *sqltypes.Result, + streamBufferSize int, + insideTxn bool, +) error { + dbc.current.Store(&query) + defer dbc.current.Store(nil) + + now := time.Now() + defer dbc.stats.MySQLTimings.Record("ExecStream", now) + + ch := make(chan error) + go func() { + ch <- dbc.conn.ExecuteStreamFetch(query, callback, alloc, streamBufferSize) + close(ch) + }() - if done != nil { - close(done) - wg.Wait() - } - if dbcerr := dbc.Err(); dbcerr != nil { - return dbcerr + select { + case <-ctx.Done(): + dbc.terminate(ctx, insideTxn, now) + if !insideTxn { + // wait for the execute method to finish to make connection reusable. + <-ch + } + return dbc.Err() + case err := <-ch: + if dbcErr := dbc.Err(); dbcErr != nil { + return dbcErr + } + return err } - return err } // StreamOnce executes the query and streams the results. But, does not retry on connection errors. -func (dbc *Conn) StreamOnce(ctx context.Context, query string, callback func(*sqltypes.Result) error, alloc func() *sqltypes.Result, streamBufferSize int, includedFields querypb.ExecuteOptions_IncludedFields) error { +func (dbc *Conn) StreamOnce( + ctx context.Context, + query string, + callback func(*sqltypes.Result) error, + alloc func() *sqltypes.Result, + streamBufferSize int, + includedFields querypb.ExecuteOptions_IncludedFields, +) error { resultSent := false return dbc.streamOnce( ctx, @@ -282,6 +350,7 @@ func (dbc *Conn) StreamOnce(ctx context.Context, query string, callback func(*sq }, alloc, streamBufferSize, + true, // Once means we are in a txn ) } @@ -337,7 +406,7 @@ func (dbc *Conn) Close() { // ApplySetting implements the pools.Resource interface. func (dbc *Conn) ApplySetting(ctx context.Context, setting *smartconnpool.Setting) error { - if _, err := dbc.execOnce(ctx, setting.ApplyQuery(), 1, false); err != nil { + if _, err := dbc.execOnce(ctx, setting.ApplyQuery(), 1, false, false); err != nil { return err } dbc.setting = setting @@ -346,7 +415,7 @@ func (dbc *Conn) ApplySetting(ctx context.Context, setting *smartconnpool.Settin // ResetSetting implements the pools.Resource interface. func (dbc *Conn) ResetSetting(ctx context.Context) error { - if _, err := dbc.execOnce(ctx, dbc.setting.ResetQuery(), 1, false); err != nil { + if _, err := dbc.execOnce(ctx, dbc.setting.ResetQuery(), 1, false, false); err != nil { return err } dbc.setting = nil @@ -362,39 +431,115 @@ func (dbc *Conn) IsClosed() bool { return dbc.conn.IsClosed() } -// Kill kills the currently executing query both on MySQL side -// and on the connection side. If no query is executing, it's a no-op. -// Kill will also not kill a query more than once. +// Kill executes a kill statement to terminate the connection. func (dbc *Conn) Kill(reason string, elapsed time.Duration) error { - dbc.stats.KillCounters.Add("Queries", 1) - log.Infof("Due to %s, elapsed time: %v, killing query ID %v %s", reason, elapsed, dbc.conn.ID(), dbc.CurrentForLogging()) + ctx, cancel := context.WithTimeout(context.Background(), dbc.killTimeout) + defer cancel() + + return dbc.kill(ctx, reason, elapsed) +} + +// KillQuery executes a kill query statement to terminate the running query on the connection. +func (dbc *Conn) KillQuery(reason string, elapsed time.Duration) error { + ctx, cancel := context.WithTimeout(context.Background(), dbc.killTimeout) + defer cancel() + + return dbc.killQuery(ctx, reason, elapsed) +} + +// kill closes the connection and stops any executing query both on MySQL and +// vttablet. +func (dbc *Conn) kill(ctx context.Context, reason string, elapsed time.Duration) error { + dbc.stats.KillCounters.Add("Connections", 1) + log.Infof("Due to %s, elapsed time: %v, killing connection ID %v %s", reason, elapsed, dbc.conn.ID(), dbc.CurrentForLogging()) // Client side action. Set error and close connection. dbc.errmu.Lock() - dbc.err = vterrors.Errorf(vtrpcpb.Code_CANCELED, "(errno 2013) due to %s, elapsed time: %v, killing query ID %v", reason, elapsed, dbc.conn.ID()) + dbc.err = vterrors.Errorf(vtrpcpb.Code_CANCELED, "%s, elapsed time: %v, killing connection ID %v", reason, elapsed, dbc.conn.ID()) dbc.errmu.Unlock() dbc.conn.Close() // Server side action. Kill the session. - killConn, err := dbc.dbaPool.Get(context.TODO()) + killConn, err := dbc.dbaPool.Get(ctx) if err != nil { log.Warningf("Failed to get conn from dba pool: %v", err) return err } defer killConn.Recycle() + + ch := make(chan error) sql := fmt.Sprintf("kill %d", dbc.conn.ID()) - _, err = killConn.Conn.ExecuteFetch(sql, 10000, false) + go func() { + _, err := killConn.Conn.ExecuteFetch(sql, -1, false) + ch <- err + close(ch) + }() + + select { + case <-ctx.Done(): + killConn.Close() + + dbc.stats.InternalErrors.Add("HungConnection", 1) + log.Warningf("Failed to kill MySQL connection ID %d which was executing the following query, it may be hung: %s", dbc.conn.ID(), dbc.CurrentForLogging()) + return context.Cause(ctx) + case err := <-ch: + if err != nil { + log.Errorf("Could not kill connection ID %v %s: %v", dbc.conn.ID(), dbc.CurrentForLogging(), err) + return err + } + return nil + } +} + +// killQuery kills the currently executing query both on MySQL side +// and on the connection side. +func (dbc *Conn) killQuery(ctx context.Context, reason string, elapsed time.Duration) error { + dbc.stats.KillCounters.Add("Queries", 1) + log.Infof("Due to %s, elapsed time: %v, killing query ID %v %s", reason, elapsed, dbc.conn.ID(), dbc.CurrentForLogging()) + + // Client side action. Set error for killing the query on timeout. + dbc.errmu.Lock() + dbc.err = vterrors.Errorf(vtrpcpb.Code_CANCELED, "%s, elapsed time: %v, killing query ID %v", reason, elapsed, dbc.conn.ID()) + dbc.errmu.Unlock() + + // Server side action. Kill the executing query. + killConn, err := dbc.dbaPool.Get(ctx) if err != nil { - log.Errorf("Could not kill query ID %v %s: %v", dbc.conn.ID(), - dbc.CurrentForLogging(), err) + log.Warningf("Failed to get conn from dba pool: %v", err) return err } - return nil + defer killConn.Recycle() + + ch := make(chan error) + sql := fmt.Sprintf("kill query %d", dbc.conn.ID()) + go func() { + _, err := killConn.Conn.ExecuteFetch(sql, -1, false) + ch <- err + close(ch) + }() + + select { + case <-ctx.Done(): + killConn.Close() + + dbc.stats.InternalErrors.Add("HungQuery", 1) + log.Warningf("Failed to kill MySQL query ID %d which was executing the following query, it may be hung: %s", dbc.conn.ID(), dbc.CurrentForLogging()) + return context.Cause(ctx) + case err := <-ch: + if err != nil { + log.Errorf("Could not kill query ID %v %s: %v", dbc.conn.ID(), dbc.CurrentForLogging(), err) + return err + } + return nil + } } // Current returns the currently executing query. func (dbc *Conn) Current() string { - return dbc.current.Load().(string) + if q := dbc.current.Load(); q != nil { + return *q + } + return "" } // ID returns the connection id. @@ -436,45 +581,6 @@ func (dbc *Conn) Reconnect(ctx context.Context) error { return nil } -// setDeadline starts a goroutine that will kill the currently executing query -// if the deadline is exceeded. It returns a channel and a waitgroup. After the -// query is done executing, the caller is required to close the done channel -// and wait for the waitgroup to make sure that the necessary cleanup is done. -func (dbc *Conn) setDeadline(ctx context.Context) (chan bool, *sync.WaitGroup) { - if ctx.Done() == nil { - return nil, nil - } - done := make(chan bool) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - startTime := time.Now() - select { - case <-ctx.Done(): - dbc.Kill(ctx.Err().Error(), time.Since(startTime)) - case <-done: - return - } - elapsed := time.Since(startTime) - - // Give 2x the elapsed time and some buffer as grace period - // for the query to get killed. - tmr2 := time.NewTimer(2*elapsed + 5*time.Second) - defer tmr2.Stop() - select { - case <-tmr2.C: - dbc.stats.InternalErrors.Add("HungQuery", 1) - log.Warningf("Query may be hung: %s", dbc.CurrentForLogging()) - case <-done: - return - } - <-done - log.Warningf("Hung query returned") - }() - return done, &wg -} - // CurrentForLogging applies transformations to the query making it suitable to log. // It applies sanitization rules based on tablet settings and limits the max length of // queries. @@ -483,12 +589,12 @@ func (dbc *Conn) CurrentForLogging() string { if dbc.env != nil && dbc.env.Config() != nil && !dbc.env.Config().SanitizeLogMessages { queryToLog = dbc.Current() } else { - queryToLog, _ = sqlparser.RedactSQLQuery(dbc.Current()) + queryToLog, _ = dbc.env.Environment().Parser().RedactSQLQuery(dbc.Current()) } - return sqlparser.TruncateForLog(queryToLog) + return dbc.env.Environment().Parser().TruncateForLog(queryToLog) } -func (dbc *Conn) applySameSetting(ctx context.Context) (err error) { - _, err = dbc.execOnce(ctx, dbc.setting.ApplyQuery(), 1, false) - return +func (dbc *Conn) applySameSetting(ctx context.Context) error { + _, err := dbc.execOnce(ctx, dbc.setting.ApplyQuery(), 1, false, false) + return err } diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go index 9717c95d9f7..6f3c77de528 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go @@ -21,18 +21,23 @@ import ( "errors" "fmt" "strings" + "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" - - "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) func compareTimingCounts(t *testing.T, op string, delta int64, before, after map[string]int64) { @@ -62,11 +67,12 @@ func TestDBConnExec(t *testing.T) { connPool := newPool() mysqlTimings := connPool.env.Stats().MySQLTimings startCounts := mysqlTimings.Counts() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) defer cancel() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -135,11 +141,12 @@ func TestDBConnExecLost(t *testing.T) { connPool := newPool() mysqlTimings := connPool.env.Stats().MySQLTimings startCounts := mysqlTimings.Counts() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) defer cancel() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -193,14 +200,15 @@ func TestDBConnDeadline(t *testing.T) { connPool := newPool() mysqlTimings := connPool.env.Stats().MySQLTimings startCounts := mysqlTimings.Counts() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() db.SetConnDelay(100 * time.Millisecond) ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(50*time.Millisecond)) defer cancel() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -251,9 +259,10 @@ func TestDBConnKill(t *testing.T) { db := fakesqldb.New(t) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -291,40 +300,182 @@ func TestDBConnKill(t *testing.T) { } } -// TestDBConnClose tests that an Exec returns immediately if a connection -// is asynchronously killed (and closed) in the middle of an execution. -func TestDBConnClose(t *testing.T) { +func TestDBKillWithContext(t *testing.T) { db := fakesqldb.New(t) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) + if dbConn != nil { + defer dbConn.Close() + } require.NoError(t, err) - defer dbConn.Close() + + query := fmt.Sprintf("kill %d", dbConn.ID()) + db.AddQuery(query, &sqltypes.Result{}) + db.SetBeforeFunc(query, func() { + // should take longer than our context deadline below. + time.Sleep(200 * time.Millisecond) + }) + + // set a lower timeout value + dbConn.killTimeout = 100 * time.Millisecond + + // Kill should return context.DeadlineExceeded + err = dbConn.Kill("test kill", 0) + require.ErrorIs(t, err, context.DeadlineExceeded) +} + +// TestDBConnCtxError tests that an Exec returns with appropriate error code. +// Also, verifies that does it wait for the query to finish before returning. +func TestDBConnCtxError(t *testing.T) { + exec := func(ctx context.Context, query string, dbconn *Conn) error { + _, err := dbconn.Exec(ctx, query, 1, false) + return err + } + + execOnce := func(ctx context.Context, query string, dbconn *Conn) error { + _, err := dbconn.ExecOnce(ctx, query, 1, false) + return err + } + + t.Run("context cancel - non-tx exec", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(10 * time.Millisecond) + cancel() + }() + testContextError(t, ctx, exec, + "(errno 1317) (sqlstate 70100): Query execution was interrupted", + 150*time.Millisecond) + }) + + t.Run("context deadline - non-tx exec", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + testContextError(t, ctx, exec, + "(errno 3024) (sqlstate HY000): Query execution was interrupted, maximum statement execution time exceeded", + 150*time.Millisecond) + }) + + t.Run("context cancel - tx exec", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(10 * time.Millisecond) + cancel() + }() + testContextError(t, ctx, execOnce, + "(errno 1317) (sqlstate 70100): Query execution was interrupted", + 50*time.Millisecond) + }) + + t.Run("context deadline - tx exec", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + testContextError(t, ctx, execOnce, + "(errno 3024) (sqlstate HY000): Query execution was interrupted, maximum statement execution time exceeded", + 50*time.Millisecond) + }) +} + +var alloc = func() *sqltypes.Result { + return &sqltypes.Result{} +} + +// TestDBConnStreamCtxError tests that an StreamExec returns with appropriate error code. +// Also, verifies that does it wait for the query to finish before returning. +func TestDBConnStreamCtxError(t *testing.T) { + exec := func(ctx context.Context, query string, dbconn *Conn) error { + return dbconn.Stream(ctx, query, func(result *sqltypes.Result) error { + return nil + }, alloc, 1, querypb.ExecuteOptions_ALL) + } + + execOnce := func(ctx context.Context, query string, dbconn *Conn) error { + return dbconn.StreamOnce(ctx, query, func(result *sqltypes.Result) error { + return nil + }, alloc, 1, querypb.ExecuteOptions_ALL) + } + + t.Run("context cancel - non-tx exec", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(10 * time.Millisecond) + cancel() + }() + testContextError(t, ctx, exec, + "(errno 1317) (sqlstate 70100): Query execution was interrupted", + 150*time.Millisecond) + }) + + t.Run("context deadline - non-tx exec", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + testContextError(t, ctx, exec, + "(errno 3024) (sqlstate HY000): Query execution was interrupted, maximum statement execution time exceeded", + 150*time.Millisecond) + }) + + t.Run("context cancel - tx exec", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(10 * time.Millisecond) + cancel() + }() + testContextError(t, ctx, execOnce, + "(errno 1317) (sqlstate 70100): Query execution was interrupted", + 50*time.Millisecond) + }) + + t.Run("context deadline - tx exec", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + testContextError(t, ctx, execOnce, + "(errno 3024) (sqlstate HY000): Query execution was interrupted, maximum statement execution time exceeded", + 50*time.Millisecond) + }) +} + +func testContextError(t *testing.T, + ctx context.Context, + exec func(context.Context, string, *Conn) error, + expErrMsg string, + expDuration time.Duration) { + db := fakesqldb.New(t) + defer db.Close() + connPool := newPool() + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) + defer connPool.Close() query := "sleep" db.AddQuery(query, &sqltypes.Result{}) db.SetBeforeFunc(query, func() { time.Sleep(100 * time.Millisecond) }) + db.AddQueryPattern(`kill query \d+`, &sqltypes.Result{}) + db.AddQueryPattern(`kill \d+`, &sqltypes.Result{}) + + dbConn, err := newPooledConn(context.Background(), connPool, params) + require.NoError(t, err) + defer dbConn.Close() start := time.Now() - go func() { - time.Sleep(10 * time.Millisecond) - dbConn.Kill("test kill", 0) - }() - _, err = dbConn.Exec(context.Background(), query, 1, false) - assert.Contains(t, err.Error(), "(errno 2013) due to") - assert.True(t, time.Since(start) < 100*time.Millisecond, "%v %v", time.Since(start), 100*time.Millisecond) + err = exec(ctx, query, dbConn) + end := time.Now() + assert.ErrorContains(t, err, expErrMsg) + assert.WithinDuration(t, end, start, expDuration) } func TestDBNoPoolConnKill(t *testing.T) { db := fakesqldb.New(t) connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := NewConn(context.Background(), db.ConnParams(), connPool.dbaPool, nil) + dbConn, err := NewConn(context.Background(), params, connPool.dbaPool, nil, tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "TestDBNoPoolConnKill")) if dbConn != nil { defer dbConn.Close() } @@ -376,11 +527,12 @@ func TestDBConnStream(t *testing.T) { } db.AddQuery(sql, expectedResult) connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) defer cancel() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -398,9 +550,7 @@ func TestDBConnStream(t *testing.T) { result.Rows = append(result.Rows, r.Rows...) } return nil - }, func() *sqltypes.Result { - return &sqltypes.Result{} - }, + }, alloc, 10, querypb.ExecuteOptions_ALL) if err != nil { t.Fatalf("should not get an error, err: %v", err) @@ -425,7 +575,25 @@ func TestDBConnStream(t *testing.T) { } } -func TestDBConnStreamKill(t *testing.T) { +// TestDBConnKillCall tests that direct Kill method calls work as expected. +func TestDBConnKillCall(t *testing.T) { + t.Run("stream exec", func(t *testing.T) { + testKill(t, func(ctx context.Context, query string, dbconn *Conn) error { + return dbconn.Stream(context.Background(), query, + func(r *sqltypes.Result) error { return nil }, + alloc, 10, querypb.ExecuteOptions_ALL) + }) + }) + + t.Run("exec", func(t *testing.T) { + testKill(t, func(ctx context.Context, query string, dbconn *Conn) error { + _, err := dbconn.Exec(context.Background(), query, 1, false) + return err + }) + }) +} + +func testKill(t *testing.T, exec func(context.Context, string, *Conn) error) { db := fakesqldb.New(t) defer db.Close() sql := "select * from test_table limit 1000" @@ -435,29 +603,28 @@ func TestDBConnStreamKill(t *testing.T) { }, } db.AddQuery(sql, expectedResult) + db.SetBeforeFunc(sql, func() { + time.Sleep(100 * time.Millisecond) + }) + + db.AddQueryPattern(`kill query \d+`, &sqltypes.Result{}) + db.AddQueryPattern(`kill \d+`, &sqltypes.Result{}) + connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) require.NoError(t, err) defer dbConn.Close() go func() { time.Sleep(10 * time.Millisecond) - dbConn.Kill("test kill", 0) + dbConn.Kill("kill connection called", 0) }() - err = dbConn.Stream(context.Background(), sql, - func(r *sqltypes.Result) error { - time.Sleep(100 * time.Millisecond) - return nil - }, - func() *sqltypes.Result { - return &sqltypes.Result{} - }, - 10, querypb.ExecuteOptions_ALL) - - assert.Contains(t, err.Error(), "(errno 2013) due to") + err = exec(context.Background(), sql, dbConn) + assert.ErrorContains(t, err, "kill connection called") } func TestDBConnReconnect(t *testing.T) { @@ -465,10 +632,11 @@ func TestDBConnReconnect(t *testing.T) { defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) require.NoError(t, err) defer dbConn.Close() @@ -490,11 +658,12 @@ func TestDBConnReApplySetting(t *testing.T) { db.OrderMatters() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() ctx := context.Background() - dbConn, err := newPooledConn(ctx, connPool, db.ConnParams()) + dbConn, err := newPooledConn(ctx, connPool, params) require.NoError(t, err) defer dbConn.Close() @@ -519,3 +688,73 @@ func TestDBConnReApplySetting(t *testing.T) { db.VerifyAllExecutedOrFail() } + +func TestDBExecOnceKillTimeout(t *testing.T) { + executeWithTimeout(t, `kill \d+`, 150*time.Millisecond, func(ctx context.Context, dbConn *Conn) (*sqltypes.Result, error) { + return dbConn.ExecOnce(ctx, "select 1", 1, false) + }) +} + +func TestDBExecKillTimeout(t *testing.T) { + executeWithTimeout(t, `kill query \d+`, 1000*time.Millisecond, func(ctx context.Context, dbConn *Conn) (*sqltypes.Result, error) { + return dbConn.Exec(ctx, "select 1", 1, false) + }) +} + +func executeWithTimeout( + t *testing.T, + expectedKillQuery string, + responseTime time.Duration, + execute func(context.Context, *Conn) (*sqltypes.Result, error), +) { + db := fakesqldb.New(t) + defer db.Close() + connPool := newPool() + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) + defer connPool.Close() + dbConn, err := newPooledConn(context.Background(), connPool, params) + if dbConn != nil { + defer dbConn.Close() + } + require.NoError(t, err) + + // A very long running query that will be killed. + expectedQuery := "select 1" + var timestampQuery atomic.Int64 + db.AddQuery(expectedQuery, &sqltypes.Result{}) + db.SetBeforeFunc(expectedQuery, func() { + timestampQuery.Store(time.Now().UnixMicro()) + // should take longer than our context deadline below. + time.Sleep(1000 * time.Millisecond) + }) + + // We expect a kill-query to be fired, too. + // It should also run into a timeout. + var timestampKill atomic.Int64 + dbConn.killTimeout = 100 * time.Millisecond + + db.AddQueryPatternWithCallback(expectedKillQuery, &sqltypes.Result{}, func(string) { + timestampKill.Store(time.Now().UnixMicro()) + // should take longer than the configured kill timeout above. + time.Sleep(200 * time.Millisecond) + }) + + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + result, err := execute(ctx, dbConn) + timeDone := time.Now() + + require.Error(t, err) + require.Equal(t, vtrpcpb.Code_CANCELED, vterrors.Code(err)) + require.Nil(t, result) + timeQuery := time.UnixMicro(timestampQuery.Load()) + timeKill := time.UnixMicro(timestampKill.Load()) + // In this unit test, the execution of `select 1` is blocked for 1000ms. + // The kill query gets executed after 100ms but waits for the query to return which will happen after 1000ms due to the test framework. + // In real scenario mysql will kill the query immediately and return the error. + // Here, kill call happens after 100ms but took 1000ms to complete. + require.WithinDuration(t, timeQuery, timeKill, 150*time.Millisecond) + require.WithinDuration(t, timeKill, timeDone, responseTime) +} diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go index 6f8b72870e0..14fcc6d0f2e 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -31,15 +31,9 @@ import ( "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -// ErrConnPoolClosed is returned when the connection pool is closed. -var ErrConnPoolClosed = vterrors.New(vtrpcpb.Code_INTERNAL, "internal error: unexpected: conn pool is closed") - const ( getWithoutS = "GetWithoutSettings" getWithS = "GetWithSettings" @@ -68,14 +62,14 @@ type Pool struct { // to publish stats only. func NewPool(env tabletenv.Env, name string, cfg tabletenv.ConnPoolConfig) *Pool { cp := &Pool{ - timeout: cfg.TimeoutSeconds.Get(), + timeout: cfg.Timeout, env: env, } config := smartconnpool.Config[*Conn]{ Capacity: int64(cfg.Size), - IdleTimeout: cfg.IdleTimeoutSeconds.Get(), - MaxLifetime: cfg.MaxLifetimeSeconds.Get(), + IdleTimeout: cfg.IdleTimeout, + MaxLifetime: cfg.MaxLifetime, RefreshInterval: mysqlctl.PoolDynamicHostnameResolution, } @@ -126,7 +120,7 @@ func (cp *Pool) Get(ctx context.Context, setting *smartconnpool.Setting) (*Poole defer span.Finish() if cp.isCallerIDAppDebug(ctx) { - conn, err := NewConn(ctx, cp.appDebugParams, cp.dbaPool, setting) + conn, err := NewConn(ctx, cp.appDebugParams, cp.dbaPool, setting, cp.env) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletserver/connpool/pool_test.go b/go/vt/vttablet/tabletserver/connpool/pool_test.go index ecdd2df4465..8cf27cbb327 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool_test.go +++ b/go/vt/vttablet/tabletserver/connpool/pool_test.go @@ -28,6 +28,8 @@ import ( "vitess.io/vitess/go/pools/smartconnpool" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -35,7 +37,8 @@ func TestConnPoolGet(t *testing.T) { db := fakesqldb.New(t) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() dbConn, err := connPool.Get(context.Background(), nil) if err != nil { @@ -54,24 +57,26 @@ func TestConnPoolTimeout(t *testing.T) { cfg := tabletenv.ConnPoolConfig{ Size: 1, } - _ = cfg.TimeoutSeconds.Set("1s") - _ = cfg.IdleTimeoutSeconds.Set("10s") - connPool := NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", cfg) - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + cfg.Timeout = time.Second + cfg.IdleTimeout = 10 * time.Second + connPool := NewPool(tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "PoolTest"), "TestPool", cfg) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() dbConn, err := connPool.Get(context.Background(), nil) require.NoError(t, err) defer dbConn.Recycle() _, err = connPool.Get(context.Background(), nil) - assert.EqualError(t, err, "resource pool timed out") + assert.EqualError(t, err, "connection pool timed out") } func TestConnPoolGetEmptyDebugConfig(t *testing.T) { db := fakesqldb.New(t) - debugConn := db.ConnParamsWithUname("") + debugConn := dbconfigs.New(db.ConnParamsWithUname("")) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), debugConn) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, debugConn) im := callerid.NewImmediateCallerID("") ecid := callerid.NewEffectiveCallerID("p", "c", "sc") ctx := context.Background() @@ -89,14 +94,15 @@ func TestConnPoolGetEmptyDebugConfig(t *testing.T) { func TestConnPoolGetAppDebug(t *testing.T) { db := fakesqldb.New(t) - debugConn := db.ConnParamsWithUname("debugUsername") + debugConn := dbconfigs.New(db.ConnParamsWithUname("debugUsername")) ctx := context.Background() im := callerid.NewImmediateCallerID("debugUsername") ecid := callerid.NewEffectiveCallerID("p", "c", "sc") ctx = callerid.NewContext(ctx, ecid, im) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), debugConn) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, debugConn) defer connPool.Close() dbConn, err := connPool.Get(ctx, nil) if err != nil { @@ -115,13 +121,15 @@ func TestConnPoolSetCapacity(t *testing.T) { db := fakesqldb.New(t) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() assert.Panics(t, func() { - connPool.SetCapacity(-10) + _ = connPool.SetCapacity(context.Background(), -10) }) - connPool.SetCapacity(10) + err := connPool.SetCapacity(context.Background(), 10) + assert.NoError(t, err) if connPool.Capacity() != 10 { t.Fatalf("capacity should be 10") } @@ -134,7 +142,8 @@ func TestConnPoolStatJSON(t *testing.T) { if connPool.StatsJSON() != "{}" { t.Fatalf("pool is closed, stats json should be empty; was: %q", connPool.StatsJSON()) } - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() statsJSON := connPool.StatsJSON() if statsJSON == "" || statsJSON == "{}" { @@ -153,7 +162,8 @@ func TestConnPoolStateWhilePoolIsOpen(t *testing.T) { defer db.Close() idleTimeout := 10 * time.Second connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() assert.EqualValues(t, 100, connPool.Capacity(), "pool capacity should be 100") assert.EqualValues(t, 0, connPool.Metrics.WaitTime(), "pool wait time should be 0") @@ -179,7 +189,8 @@ func TestConnPoolStateWithSettings(t *testing.T) { defer db.Close() capacity := 5 connPool := newPoolWithCapacity(capacity) - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() assert.EqualValues(t, 5, connPool.Available(), "pool available connections should be 5") assert.EqualValues(t, 0, connPool.Active(), "pool active connections should be 0") @@ -294,7 +305,8 @@ func TestPoolGetConnTime(t *testing.T) { defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() connPool.getConnTime.Reset() @@ -325,9 +337,8 @@ func newPool() *Pool { } func newPoolWithCapacity(capacity int) *Pool { - cfg := tabletenv.ConnPoolConfig{ - Size: capacity, - } - _ = cfg.IdleTimeoutSeconds.Set("10s") - return NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", cfg) + return NewPool(tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "PoolTest"), "TestPool", tabletenv.ConnPoolConfig{ + Size: capacity, + IdleTimeout: 10 * time.Second, + }) } diff --git a/go/vt/vttablet/tabletserver/controller.go b/go/vt/vttablet/tabletserver/controller.go index ca4eeb8747b..4d7e35862de 100644 --- a/go/vt/vttablet/tabletserver/controller.go +++ b/go/vt/vttablet/tabletserver/controller.go @@ -66,7 +66,7 @@ type Controller interface { // ClearQueryPlanCache clears internal query plan cache ClearQueryPlanCache() - // ReloadSchema makes the quey service reload its schema cache + // ReloadSchema makes the query service reload its schema cache ReloadSchema(ctx context.Context) error // RegisterQueryRuleSource adds a query rule source diff --git a/go/vt/vttablet/tabletserver/debugenv.go b/go/vt/vttablet/tabletserver/debugenv.go index e229c46cadd..924d5acbebb 100644 --- a/go/vt/vttablet/tabletserver/debugenv.go +++ b/go/vt/vttablet/tabletserver/debugenv.go @@ -17,6 +17,7 @@ limitations under the License. package tabletserver import ( + "context" "encoding/json" "fmt" "html" @@ -82,6 +83,17 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) f(ival) msg = fmt.Sprintf("Setting %v to: %v", varname, value) } + setIntValCtx := func(f func(context.Context, int) error) { + ival, err := strconv.Atoi(value) + if err == nil { + err = f(r.Context(), ival) + if err == nil { + msg = fmt.Sprintf("Setting %v to: %v", varname, value) + return + } + } + msg = fmt.Sprintf("Failed setting value for %v: %v", varname, err) + } setInt64Val := func(f func(int64)) { ival, err := strconv.ParseInt(value, 10, 64) if err != nil { @@ -111,11 +123,11 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) } switch varname { case "PoolSize": - setIntVal(tsv.SetPoolSize) + setIntValCtx(tsv.SetPoolSize) case "StreamPoolSize": - setIntVal(tsv.SetStreamPoolSize) + setIntValCtx(tsv.SetStreamPoolSize) case "TxPoolSize": - setIntVal(tsv.SetTxPoolSize) + setIntValCtx(tsv.SetTxPoolSize) case "MaxResultSize": setIntVal(tsv.SetMaxResultSize) case "WarnResultSize": @@ -125,7 +137,7 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) case "RowStreamerMaxMySQLReplLagSecs": setInt64Val(func(val int64) { tsv.Config().RowStreamer.MaxMySQLReplLagSecs = val }) case "UnhealthyThreshold": - setDurationVal(func(d time.Duration) { _ = tsv.Config().Healthcheck.UnhealthyThresholdSeconds.Set(d.String()) }) + setDurationVal(func(d time.Duration) { tsv.Config().Healthcheck.UnhealthyThreshold = d }) setDurationVal(tsv.hs.SetUnhealthyThreshold) setDurationVal(tsv.sm.SetUnhealthyThreshold) case "ThrottleMetricThreshold": @@ -145,7 +157,7 @@ func debugEnvHandler(tsv *TabletServer, w http.ResponseWriter, r *http.Request) vars = addVar(vars, "WarnResultSize", tsv.WarnResultSize) vars = addVar(vars, "RowStreamerMaxInnoDBTrxHistLen", func() int64 { return tsv.Config().RowStreamer.MaxInnoDBTrxHistLen }) vars = addVar(vars, "RowStreamerMaxMySQLReplLagSecs", func() int64 { return tsv.Config().RowStreamer.MaxMySQLReplLagSecs }) - vars = addVar(vars, "UnhealthyThreshold", tsv.Config().Healthcheck.UnhealthyThresholdSeconds.Get) + vars = addVar(vars, "UnhealthyThreshold", func() time.Duration { return tsv.Config().Healthcheck.UnhealthyThreshold }) vars = addVar(vars, "ThrottleMetricThreshold", tsv.ThrottleMetricThreshold) vars = append(vars, envValue{ Name: "Consolidator", diff --git a/go/vt/vttablet/tabletserver/exclude_race_test.go b/go/vt/vttablet/tabletserver/exclude_race_test.go deleted file mode 100644 index 6e55671ac96..00000000000 --- a/go/vt/vttablet/tabletserver/exclude_race_test.go +++ /dev/null @@ -1,62 +0,0 @@ -//go:build !race - -package tabletserver - -import ( - "context" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/sqltypes" - querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" -) - -// TestHandlePanicAndSendLogStatsMessageTruncation tests that when an error truncation -// length is set and a panic occurs, the code in handlePanicAndSendLogStats will -// truncate the error text in logs, but will not truncate the error text in the -// error value. -func TestHandlePanicAndSendLogStatsMessageTruncation(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - tl := newTestLogger() - defer tl.Close() - logStats := tabletenv.NewLogStats(ctx, "TestHandlePanicAndSendLogStatsMessageTruncation") - db, tsv := setupTabletServerTest(t, ctx, "") - defer tsv.StopService() - defer db.Close() - - longSql := "select * from test_table_loooooooooooooooooooooooooooooooooooong" - longBv := map[string]*querypb.BindVariable{ - "bv1": sqltypes.Int64BindVariable(1111111111), - "bv2": sqltypes.Int64BindVariable(2222222222), - "bv3": sqltypes.Int64BindVariable(3333333333), - "bv4": sqltypes.Int64BindVariable(4444444444), - } - origTruncateErrLen := sqlparser.GetTruncateErrLen() - sqlparser.SetTruncateErrLen(32) - defer sqlparser.SetTruncateErrLen(origTruncateErrLen) - - defer func() { - err := logStats.Error - want := "Uncaught panic for Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {bv1: \"type:INT64 value:\\\"1111111111\\\"\"bv2: \"type:INT64 value:\\\"2222222222\\\"\"bv3: \"type:INT64 value:\\\"3333333333\\\"\"bv4: \"type:INT64 value:\\\"4444444444\\\"\"}" - require.Error(t, err) - assert.Contains(t, err.Error(), want) - want = "Uncaught panic for Sql: \"select * from test_t [TRUNCATED]\", BindVars: {bv1: \"typ [TRUNCATED]" - gotWhatWeWant := false - for _, log := range tl.getLogs() { - if strings.HasPrefix(log, want) { - gotWhatWeWant = true - break - } - } - assert.True(t, gotWhatWeWant) - }() - - defer tsv.handlePanicAndSendLogStats(longSql, longBv, logStats) - panic("panic from TestHandlePanicAndSendLogStatsMessageTruncation") -} diff --git a/go/vt/vttablet/tabletserver/fuzz.go b/go/vt/vttablet/tabletserver/fuzz.go index fb14455d3f4..c7f3dabde97 100644 --- a/go/vt/vttablet/tabletserver/fuzz.go +++ b/go/vt/vttablet/tabletserver/fuzz.go @@ -23,8 +23,10 @@ import ( fuzz "github.com/AdaLogics/go-fuzz-headers" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -57,7 +59,7 @@ func FuzzGetPlan(data []byte) int { // Set up the environment config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - env := tabletenv.NewEnv(config, "TabletServerTest") + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) defer qe.Close() diff --git a/go/vt/vttablet/tabletserver/gc/tablegc.go b/go/vt/vttablet/tabletserver/gc/tablegc.go index 4947fd9c97a..fced176b027 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc.go @@ -19,7 +19,6 @@ package gc import ( "context" "fmt" - "math" "sort" "sync" "sync/atomic" @@ -27,9 +26,9 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/log" @@ -49,9 +48,12 @@ const ( ) var ( - checkInterval = 1 * time.Hour - purgeReentranceInterval = 1 * time.Minute - gcLifecycle = "hold,purge,evac,drop" + checkInterval = 1 * time.Hour + purgeReentranceInterval = 1 * time.Minute + nextPurgeReentry = 1 * time.Second + checkTablesReentryMinInterval = 10 * time.Second + NextChecksIntervals = []time.Duration{time.Second, checkTablesReentryMinInterval + 5*time.Second} + gcLifecycle = "hold,purge,evac,drop" ) func init() { @@ -65,15 +67,14 @@ func registerGCFlags(fs *pflag.FlagSet) { // purgeReentranceInterval marks the interval between searching tables to purge fs.DurationVar(&purgeReentranceInterval, "gc_purge_check_interval", purgeReentranceInterval, "Interval between purge discovery checks") // gcLifecycle is the sequence of steps the table goes through in the process of getting dropped - fs.StringVar(&gcLifecycle, "table_gc_lifecycle", gcLifecycle, "States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included)") + fs.StringVar(&gcLifecycle, "table_gc_lifecycle", gcLifecycle, "States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implicitly always included)") } var ( - sqlPurgeTable = `delete from %a limit 50` - sqlShowVtTables = `show full tables like '\_vt\_%'` - sqlDropTable = "drop table if exists `%a`" - sqlDropView = "drop view if exists `%a`" - purgeReentranceFlag int64 + sqlPurgeTable = `delete from %a limit 50` + sqlShowVtTables = `show full tables like '\_vt\_%'` + sqlDropTable = "drop table if exists `%a`" + sqlDropView = "drop view if exists `%a`" ) type gcTable struct { @@ -105,6 +106,10 @@ type TableGC struct { isOpen int64 cancelOperation context.CancelFunc + purgeReentranceFlag atomic.Int64 + readReentranceFlag atomic.Int64 + checkRequestChan chan bool + throttlerClient *throttle.Client env tabletenv.Env @@ -120,7 +125,7 @@ type TableGC struct { lifecycleStates map[schema.TableGCState]bool } -// Status published some status valus from the collector +// Status published some status values from the collector type Status struct { Keyspace string Shard string @@ -139,11 +144,12 @@ func NewTableGC(env tabletenv.Env, ts *topo.Server, lagThrottler *throttle.Throt env: env, ts: ts, pool: connpool.NewPool(env, "TableGCPool", tabletenv.ConnPoolConfig{ - Size: 2, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 2, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), - purgingTables: map[string]bool{}, + purgingTables: map[string]bool{}, + checkRequestChan: make(chan bool), } return collector @@ -183,7 +189,7 @@ func (collector *TableGC) Open() (err error) { return err } defer conn.Close() - serverSupportsFastDrops, err := conn.SupportsCapability(mysql.FastDropTableFlavorCapability) + serverSupportsFastDrops, err := conn.SupportsCapability(capabilities.FastDropTableFlavorCapability) if err != nil { return err } @@ -226,6 +232,16 @@ func (collector *TableGC) Close() { log.Infof("TableGC - finished execution of Close") } +// RequestChecks requests that the GC will do a table check right away, as well as in a few seconds. +// Calling this function is useful to modules that are performing operations that affect GC tables. Those modules +// _know_ that changes have been made, and now have a way to tell TableGC: "please take a look asap rather +// than in the next hour". +func (collector *TableGC) RequestChecks() { + for _, d := range NextChecksIntervals { + time.AfterFunc(d, func() { collector.checkRequestChan <- true }) + } +} + // operate is the main entry point for the table garbage collector operation and logic. func (collector *TableGC) operate(ctx context.Context) { @@ -254,55 +270,46 @@ func (collector *TableGC) operate(ctx context.Context) { case <-ctx.Done(): log.Info("TableGC: done operating") return + case <-collector.checkRequestChan: + // Got a request to check tables. Probably some event took place and we will + // find something new to do. + go tableCheckTicker.TickNow() case <-tableCheckTicker.C: - { - log.Info("TableGC: tableCheckTicker") - if gcTables, err := collector.readTables(ctx); err != nil { - log.Errorf("TableGC: error while reading tables: %+v", err) - } else { - _ = collector.checkTables(ctx, gcTables, dropTablesChan, transitionRequestsChan) - } + if err := collector.readAndCheckTables(ctx, dropTablesChan, transitionRequestsChan); err != nil { + log.Error(err) } case <-purgeReentranceTicker.C: - { - // relay the request - go func() { purgeRequestsChan <- true }() - } + // relay the request + go func() { purgeRequestsChan <- true }() case <-purgeRequestsChan: - { - go func() { - tableName, err := collector.purge(ctx) - if err != nil { - log.Errorf("TableGC: error purging table %s: %+v", tableName, err) - return - } - if tableName == "" { - // No table purged (or at least not to completion) - // Either because there _is_ nothing to purge, or because PURGE isn't a handled state - return - } - // The table has been purged! Let's move the table into the next phase: - _, _, uuid, _, _ := schema.AnalyzeGCTableName(tableName) - collector.submitTransitionRequest(ctx, transitionRequestsChan, schema.PurgeTableGCState, tableName, true, uuid) - collector.removePurgingTable(tableName) - // Chances are, there's more tables waiting to be purged. Let's speed things by - // requesting another purge, instead of waiting a full purgeReentranceInterval cycle - time.AfterFunc(time.Second, func() { purgeRequestsChan <- true }) - }() - } - case dropTable := <-dropTablesChan: - { - log.Infof("TableGC: found %v in dropTablesChan", dropTable.tableName) - if err := collector.dropTable(ctx, dropTable.tableName, dropTable.isBaseTable); err != nil { - log.Errorf("TableGC: error dropping table %s: %+v", dropTable.tableName, err) + go func() { + tableName, err := collector.purge(ctx) + if err != nil { + log.Errorf("TableGC: error purging table %s: %+v", tableName, err) + return } + if tableName == "" { + // No table purged (or at least not to completion) + // Either because there _is_ nothing to purge, or because PURGE isn't a handled state + return + } + // The table has been purged! Let's move the table into the next phase: + _, _, uuid, _, _ := schema.AnalyzeGCTableName(tableName) + collector.submitTransitionRequest(ctx, transitionRequestsChan, schema.PurgeTableGCState, tableName, true, uuid) + collector.removePurgingTable(tableName) + // Chances are, there's more tables waiting to be purged. Let's speed things by + // requesting another purge, instead of waiting a full purgeReentranceInterval cycle + purgeReentranceTicker.TickAfter(nextPurgeReentry) + }() + case dropTable := <-dropTablesChan: + log.Infof("TableGC: found %v in dropTablesChan", dropTable.tableName) + if err := collector.dropTable(ctx, dropTable.tableName, dropTable.isBaseTable); err != nil { + log.Errorf("TableGC: error dropping table %s: %+v", dropTable.tableName, err) } case transition := <-transitionRequestsChan: - { - log.Info("TableGC: transitionRequestsChan, transition=%v", transition) - if err := collector.transitionTable(ctx, transition); err != nil { - log.Errorf("TableGC: error transitioning table %s to %+v: %+v", transition.fromTableName, transition.toGCState, err) - } + log.Info("TableGC: transitionRequestsChan, transition=%v", transition) + if err := collector.transitionTable(ctx, transition); err != nil { + log.Errorf("TableGC: error transitioning table %s to %+v: %+v", transition.fromTableName, transition.toGCState, err) } } } @@ -378,17 +385,41 @@ func (collector *TableGC) shouldTransitionTable(tableName string) (shouldTransit return true, state, uuid, nil } +// readAndCheckTables is the routine check for which GC tables exist, and which of those need to transition +// into the next state. The function is non-reentrant, and poses a minimal duration between any two executions. +func (collector *TableGC) readAndCheckTables( + ctx context.Context, + dropTablesChan chan<- *gcTable, + transitionRequestsChan chan<- *transitionRequest, +) (err error) { + if !collector.readReentranceFlag.CompareAndSwap(0, 1) { + // An instance of this function is already running + return nil + } + defer time.AfterFunc(checkTablesReentryMinInterval, func() { + collector.readReentranceFlag.Store(0) + }) + + log.Info("TableGC: readAndCheckTables") + gcTables, err := collector.readTables(ctx) + if err != nil { + return fmt.Errorf("TableGC: error while reading tables: %+v", err) + } + if err := collector.checkTables(ctx, gcTables, dropTablesChan, transitionRequestsChan); err != nil { + return err + } + return nil +} + // readTables reads the list of _vt_% tables from the database func (collector *TableGC) readTables(ctx context.Context) (gcTables []*gcTable, err error) { - log.Infof("TableGC: read tables") - conn, err := collector.pool.Get(ctx, nil) if err != nil { return nil, err } defer conn.Recycle() - res, err := conn.Conn.Exec(ctx, sqlShowVtTables, math.MaxInt32, true) + res, err := conn.Conn.Exec(ctx, sqlShowVtTables, -1, true) if err != nil { return nil, err } @@ -406,8 +437,6 @@ func (collector *TableGC) readTables(ctx context.Context) (gcTables []*gcTable, // It lists _vt_% tables, then filters through those which are due-date. // It then applies the necessary operation per table. func (collector *TableGC) checkTables(ctx context.Context, gcTables []*gcTable, dropTablesChan chan<- *gcTable, transitionRequestsChan chan<- *transitionRequest) error { - log.Infof("TableGC: check tables") - for i := range gcTables { table := gcTables[i] // we capture as local variable as we will later use this in a goroutine shouldTransition, state, uuid, err := collector.shouldTransitionTable(table.tableName) @@ -457,12 +486,11 @@ func (collector *TableGC) checkTables(ctx context.Context, gcTables []*gcTable, // This function is non-reentrant: there's only one instance of this function running at any given time. // A timer keeps calling this function, so if it bails out (e.g. on error) it will later resume work func (collector *TableGC) purge(ctx context.Context) (tableName string, err error) { - if atomic.CompareAndSwapInt64(&purgeReentranceFlag, 0, 1) { - defer atomic.StoreInt64(&purgeReentranceFlag, 0) - } else { + if !collector.purgeReentranceFlag.CompareAndSwap(0, 1) { // An instance of this function is already running return "", nil } + defer collector.purgeReentranceFlag.Store(0) tableName, found := collector.nextTableToPurge() if !found { @@ -575,7 +603,7 @@ func (collector *TableGC) transitionTable(ctx context.Context, transition *trans // when we transition into PURGE, that means we want to begin purging immediately // when we transition into DROP, that means we want to drop immediately - // Thereforce the default timestamp is Now + // Therefore the default timestamp is Now t := time.Now().UTC() switch transition.toGCState { case schema.EvacTableGCState: @@ -598,16 +626,29 @@ func (collector *TableGC) transitionTable(ctx context.Context, transition *trans return err } log.Infof("TableGC: renamed table: %s", transition.fromTableName) + // Since the table has transitioned, there is a potential for more work on this table or on other tables, + // let's kick a check request. + collector.RequestChecks() return nil } -// addPurgingTable adds a table to the list of droppingpurging (or pending purging) tables +// addPurgingTable adds a table to the list of dropping purging (or pending purging) tables func (collector *TableGC) addPurgingTable(tableName string) (added bool) { if _, ok := collector.lifecycleStates[schema.PurgeTableGCState]; !ok { // PURGE is not a handled state. We don't want to purge this table or any other table, // so we don't populate the purgingTables map. return false } + isGCTable, state, _, _, err := schema.AnalyzeGCTableName(tableName) + if err != nil { + return false + } + if !isGCTable { + return false + } + if state != schema.PurgeTableGCState { + return false + } collector.purgeMutex.Lock() defer collector.purgeMutex.Unlock() diff --git a/go/vt/vttablet/tabletserver/gc/tablegc_test.go b/go/vt/vttablet/tabletserver/gc/tablegc_test.go index 446f6e6ff85..6e26a77f291 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc_test.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc_test.go @@ -29,15 +29,18 @@ import ( func TestNextTableToPurge(t *testing.T) { tt := []struct { + name string tables []string next string ok bool }{ { + name: "empty", tables: []string{}, ok: false, }, { + name: "first", tables: []string{ "_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", "_vt_PURGE_2ace8bcef73211ea87e9f875a4d24e90_20200915120411", @@ -48,6 +51,7 @@ func TestNextTableToPurge(t *testing.T) { ok: true, }, { + name: "mid", tables: []string{ "_vt_PURGE_2ace8bcef73211ea87e9f875a4d24e90_20200915120411", "_vt_PURGE_3ace8bcef73211ea87e9f875a4d24e90_20200915120412", @@ -57,19 +61,71 @@ func TestNextTableToPurge(t *testing.T) { next: "_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", ok: true, }, + { + name: "none", + tables: []string{ + "_vt_HOLD_2ace8bcef73211ea87e9f875a4d24e90_20200915120411", + "_vt_EVAC_3ace8bcef73211ea87e9f875a4d24e90_20200915120412", + "_vt_EVAC_6ace8bcef73211ea87e9f875a4d24e90_20200915120410", + "_vt_DROP_4ace8bcef73211ea87e9f875a4d24e90_20200915120413", + }, + next: "", + ok: false, + }, + { + name: "first, new format", + tables: []string{ + "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_prg_2ace8bcef73211ea87e9f875a4d24e90_20200915120411_", + "_vt_prg_3ace8bcef73211ea87e9f875a4d24e90_20200915120412_", + "_vt_prg_4ace8bcef73211ea87e9f875a4d24e90_20200915120413_", + }, + next: "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + ok: true, + }, + { + name: "mid, new format", + tables: []string{ + "_vt_prg_2ace8bcef73211ea87e9f875a4d24e90_20200915120411_", + "_vt_prg_3ace8bcef73211ea87e9f875a4d24e90_20200915120412_", + "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_prg_4ace8bcef73211ea87e9f875a4d24e90_20200915120413_", + }, + next: "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + ok: true, + }, + { + name: "none, new format", + tables: []string{ + "_vt_hld_2ace8bcef73211ea87e9f875a4d24e90_20200915120411_", + "_vt_evc_3ace8bcef73211ea87e9f875a4d24e90_20200915120412_", + "_vt_evc_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + "_vt_drp_4ace8bcef73211ea87e9f875a4d24e90_20200915120413_", + "_vt_prg_4ace8bcef73211ea87e9f875a4d24e90_20200915999999_", + }, + next: "", + ok: false, + }, } for _, ts := range tt { - collector := &TableGC{ - purgingTables: make(map[string]bool), - } - for _, table := range ts.tables { - collector.purgingTables[table] = true - } - next, ok := collector.nextTableToPurge() - assert.Equal(t, ts.ok, ok) - if ok { - assert.Equal(t, ts.next, next) - } + t.Run(ts.name, func(t *testing.T) { + collector := &TableGC{ + purgingTables: make(map[string]bool), + checkRequestChan: make(chan bool), + } + var err error + collector.lifecycleStates, err = schema.ParseGCLifecycle("hold,purge,evac,drop") + assert.NoError(t, err) + for _, table := range ts.tables { + collector.addPurgingTable(table) + } + + next, ok := collector.nextTableToPurge() + assert.Equal(t, ts.ok, ok) + if ok { + assert.Equal(t, ts.next, next) + } + }) } } @@ -171,6 +227,13 @@ func TestShouldTransitionTable(t *testing.T) { uuid: "6ace8bcef73211ea87e9f875a4d24e90", shouldTransition: true, }, + { + name: "purge, old timestamp, new format", + table: "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_20200915120410_", + state: schema.PurgeTableGCState, + uuid: "6ace8bcef73211ea87e9f875a4d24e90", + shouldTransition: true, + }, { name: "no purge, future timestamp", table: "_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_29990915120410", @@ -178,6 +241,13 @@ func TestShouldTransitionTable(t *testing.T) { uuid: "6ace8bcef73211ea87e9f875a4d24e90", shouldTransition: false, }, + { + name: "no purge, future timestamp, new format", + table: "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_29990915120410_", + state: schema.PurgeTableGCState, + uuid: "6ace8bcef73211ea87e9f875a4d24e90", + shouldTransition: false, + }, { name: "no purge, PURGE not handled state", table: "_vt_PURGE_6ace8bcef73211ea87e9f875a4d24e90_29990915120410", @@ -186,6 +256,14 @@ func TestShouldTransitionTable(t *testing.T) { handledStates: "hold,evac", // no PURGE shouldTransition: true, }, + { + name: "no purge, PURGE not handled state, new format", + table: "_vt_prg_6ace8bcef73211ea87e9f875a4d24e90_29990915120410_", + state: schema.PurgeTableGCState, + uuid: "6ace8bcef73211ea87e9f875a4d24e90", + handledStates: "hold,evac", // no PURGE + shouldTransition: true, + }, { name: "no drop, future timestamp", table: "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_29990915120410", @@ -193,6 +271,13 @@ func TestShouldTransitionTable(t *testing.T) { uuid: "6ace8bcef73211ea87e9f875a4d24e90", shouldTransition: false, }, + { + name: "no drop, future timestamp, new format", + table: "_vt_drp_6ace8bcef73211ea87e9f875a4d24e90_29990915120410_", + state: schema.DropTableGCState, + uuid: "6ace8bcef73211ea87e9f875a4d24e90", + shouldTransition: false, + }, { name: "drop, old timestamp", table: "_vt_DROP_6ace8bcef73211ea87e9f875a4d24e90_20090915120410", @@ -200,6 +285,13 @@ func TestShouldTransitionTable(t *testing.T) { uuid: "6ace8bcef73211ea87e9f875a4d24e90", shouldTransition: true, }, + { + name: "drop, old timestamp, new format", + table: "_vt_drp_6ace8bcef73211ea87e9f875a4d24e90_20090915120410_", + state: schema.DropTableGCState, + uuid: "6ace8bcef73211ea87e9f875a4d24e90", + shouldTransition: true, + }, { name: "no evac, future timestamp", table: "_vt_EVAC_6ace8bcef73211ea87e9f875a4d24e90_29990915120410", @@ -207,6 +299,13 @@ func TestShouldTransitionTable(t *testing.T) { uuid: "6ace8bcef73211ea87e9f875a4d24e90", shouldTransition: false, }, + { + name: "no evac, future timestamp, new format", + table: "_vt_evc_6ace8bcef73211ea87e9f875a4d24e90_29990915120410_", + state: schema.EvacTableGCState, + uuid: "6ace8bcef73211ea87e9f875a4d24e90", + shouldTransition: false, + }, { name: "no hold, HOLD not handled state", table: "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_29990915120410", @@ -214,6 +313,13 @@ func TestShouldTransitionTable(t *testing.T) { uuid: "6ace8bcef73211ea87e9f875a4d24e90", shouldTransition: true, }, + { + name: "no hold, HOLD not handled state, new format", + table: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_29990915120410_", + state: schema.HoldTableGCState, + uuid: "6ace8bcef73211ea87e9f875a4d24e90", + shouldTransition: true, + }, { name: "hold, future timestamp", table: "_vt_HOLD_6ace8bcef73211ea87e9f875a4d24e90_29990915120410", @@ -222,6 +328,14 @@ func TestShouldTransitionTable(t *testing.T) { handledStates: "hold,purge,evac,drop", shouldTransition: false, }, + { + name: "hold, future timestamp, new format", + table: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_29990915120410_", + state: schema.HoldTableGCState, + uuid: "6ace8bcef73211ea87e9f875a4d24e90", + handledStates: "hold,purge,evac,drop", + shouldTransition: false, + }, { name: "not a GC table", table: "_vt_SOMETHING_6ace8bcef73211ea87e9f875a4d24e90_29990915120410", @@ -229,6 +343,13 @@ func TestShouldTransitionTable(t *testing.T) { uuid: "", shouldTransition: false, }, + { + name: "invalid new format", + table: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_29990915999999_", + state: "", + uuid: "", + shouldTransition: false, + }, } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { @@ -256,8 +377,9 @@ func TestShouldTransitionTable(t *testing.T) { func TestCheckTables(t *testing.T) { collector := &TableGC{ - isOpen: 0, - purgingTables: map[string]bool{}, + isOpen: 0, + purgingTables: map[string]bool{}, + checkRequestChan: make(chan bool), } var err error collector.lifecycleStates, err = schema.ParseGCLifecycle("hold,purge,evac,drop") @@ -268,35 +390,70 @@ func TestCheckTables(t *testing.T) { tableName: "_vt_something_that_isnt_a_gc_table", isBaseTable: true, }, + { + tableName: "_vt_hld_6ace8bcef73211ea87e9f875a4d24e90_29990915999999_", + isBaseTable: true, + }, { tableName: "_vt_HOLD_11111111111111111111111111111111_20990920093324", // 2099 is in the far future isBaseTable: true, }, + { + tableName: "_vt_hld_11111111111111111111111111111111_20990920093324_", // 2099 is in the far future + isBaseTable: true, + }, { tableName: "_vt_HOLD_22222222222222222222222222222222_20200920093324", isBaseTable: true, }, + { + tableName: "_vt_hld_22222222222222222222222222222222_20200920093324_", + isBaseTable: true, + }, { tableName: "_vt_DROP_33333333333333333333333333333333_20200919083451", isBaseTable: true, }, + { + tableName: "_vt_drp_33333333333333333333333333333333_20200919083451_", + isBaseTable: true, + }, { tableName: "_vt_DROP_44444444444444444444444444444444_20200919083451", isBaseTable: false, }, + { + tableName: "_vt_drp_44444444444444444444444444444444_20200919083451_", + isBaseTable: false, + }, } - // one gcTable above is irrelevant, does not have a GC table name + expectResponses := len(gcTables) + // one gcTable above is irrelevant: it does not have a GC table name + expectResponses = expectResponses - 1 // one will not transition: its date is 2099 - expectResponses := len(gcTables) - 2 + expectResponses = expectResponses - 1 + // one gcTable above is irrelevant: it has an invalid new format timestamp + expectResponses = expectResponses - 1 + // one will not transition: its date is 2099 in new format + expectResponses = expectResponses - 1 + expectDropTables := []*gcTable{ { tableName: "_vt_DROP_33333333333333333333333333333333_20200919083451", isBaseTable: true, }, + { + tableName: "_vt_drp_33333333333333333333333333333333_20200919083451_", + isBaseTable: true, + }, { tableName: "_vt_DROP_44444444444444444444444444444444_20200919083451", isBaseTable: false, }, + { + tableName: "_vt_drp_44444444444444444444444444444444_20200919083451_", + isBaseTable: false, + }, } expectTransitionRequests := []*transitionRequest{ { @@ -305,6 +462,12 @@ func TestCheckTables(t *testing.T) { toGCState: schema.PurgeTableGCState, uuid: "22222222222222222222222222222222", }, + { + fromTableName: "_vt_hld_22222222222222222222222222222222_20200920093324_", + isBaseTable: true, + toGCState: schema.PurgeTableGCState, + uuid: "22222222222222222222222222222222", + }, } ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) diff --git a/go/vt/vttablet/tabletserver/health_streamer.go b/go/vt/vttablet/tabletserver/health_streamer.go index 87c70a7133d..c13d11df69e 100644 --- a/go/vt/vttablet/tabletserver/health_streamer.go +++ b/go/vt/vttablet/tabletserver/health_streamer.go @@ -20,25 +20,19 @@ import ( "context" "fmt" "io" - "strings" "sync" "sync/atomic" "time" "github.com/spf13/pflag" - "vitess.io/vitess/go/constants/sidecar" - vtschema "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/sqlparser" - "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/history" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -50,8 +44,8 @@ import ( ) var ( - // blpFunc is a legaacy feature. - // TODO(sougou): remove after legacy resharding worflows are removed. + // blpFunc is a legacy feature. + // TODO(sougou): remove after legacy resharding workflows are removed. blpFunc = vreplication.StatusSummary errUnintialized = "tabletserver uninitialized" @@ -98,13 +92,13 @@ func newHealthStreamer(env tabletenv.Env, alias *topodatapb.TabletAlias, engine if env.Config().SignalWhenSchemaChange { // We need one connection for the reloader. pool = connpool.NewPool(env, "", tabletenv.ConnPoolConfig{ - Size: 1, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 1, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }) } hs := &healthStreamer{ stats: env.Stats(), - degradedThreshold: env.Config().Healthcheck.DegradedThresholdSeconds.Get(), + degradedThreshold: env.Config().Healthcheck.DegradedThreshold, clients: make(map[chan *querypb.StreamHealthResponse]struct{}), state: &querypb.StreamHealthResponse{ @@ -122,7 +116,7 @@ func newHealthStreamer(env tabletenv.Env, alias *topodatapb.TabletAlias, engine viewsEnabled: env.Config().EnableViews, se: engine, } - hs.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThresholdSeconds.Get().Nanoseconds()) + hs.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThreshold.Nanoseconds()) return hs } @@ -318,8 +312,8 @@ func (hs *healthStreamer) MakePrimary(serving bool) { // We register for notifications from the schema Engine only when schema tracking is enabled, // and we are going to a serving primary state. if serving && hs.signalWhenSchemaChange { - hs.se.RegisterNotifier("healthStreamer", func(full map[string]*schema.Table, created, altered, dropped []*schema.Table) { - if err := hs.reload(full, created, altered, dropped); err != nil { + hs.se.RegisterNotifier("healthStreamer", func(full map[string]*schema.Table, created, altered, dropped []*schema.Table, udfsChanged bool) { + if err := hs.reload(created, altered, dropped, udfsChanged); err != nil { log.Errorf("periodic schema reload failed in health stream: %v", err) } }, false) @@ -334,7 +328,7 @@ func (hs *healthStreamer) MakeNonPrimary() { } // reload reloads the schema from the underlying mysql for the tables that we get the alert on. -func (hs *healthStreamer) reload(full map[string]*schema.Table, created, altered, dropped []*schema.Table) error { +func (hs *healthStreamer) reload(created, altered, dropped []*schema.Table, udfsChanged bool) error { hs.mu.Lock() defer hs.mu.Unlock() // Schema Reload to happen only on primary when it is serving. @@ -371,63 +365,18 @@ func (hs *healthStreamer) reload(full map[string]*schema.Table, created, altered } } - // Reload the tables and views. - // This stores the data that is used by VTGates upto v17. So, we can remove this reload of - // tables and views in v19. - err = hs.reloadTables(ctx, conn.Conn, tables) - if err != nil { - return err - } - // no change detected - if len(tables) == 0 && len(views) == 0 { + if len(tables) == 0 && len(views) == 0 && !udfsChanged { return nil } hs.state.RealtimeStats.TableSchemaChanged = tables hs.state.RealtimeStats.ViewSchemaChanged = views + hs.state.RealtimeStats.UdfsChanged = udfsChanged shr := hs.state.CloneVT() hs.broadCastToClients(shr) hs.state.RealtimeStats.TableSchemaChanged = nil hs.state.RealtimeStats.ViewSchemaChanged = nil - - return nil -} - -func (hs *healthStreamer) reloadTables(ctx context.Context, conn *connpool.Conn, tableNames []string) error { - if len(tableNames) == 0 { - return nil - } - var escapedTableNames []string - for _, tableName := range tableNames { - escapedTblName := sqlparser.String(sqlparser.NewStrLiteral(tableName)) - escapedTableNames = append(escapedTableNames, escapedTblName) - } - - tableNamePredicate := fmt.Sprintf("table_name IN (%s)", strings.Join(escapedTableNames, ", ")) - del := fmt.Sprintf("%s AND %s", sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query, tableNamePredicate) - upd := fmt.Sprintf("%s AND %s", sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query, tableNamePredicate) - - // Reload the schema in a transaction. - _, err := conn.Exec(ctx, "begin", 1, false) - if err != nil { - return err - } - defer conn.Exec(ctx, "rollback", 1, false) - - _, err = conn.Exec(ctx, del, 1, false) - if err != nil { - return err - } - - _, err = conn.Exec(ctx, upd, 1, false) - if err != nil { - return err - } - - _, err = conn.Exec(ctx, "commit", 1, false) - if err != nil { - return err - } + hs.state.RealtimeStats.UdfsChanged = false return nil } diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go index b2fbb2db1ea..ff61787dd1d 100644 --- a/go/vt/vttablet/tabletserver/health_streamer_test.go +++ b/go/vt/vttablet/tabletserver/health_streamer_test.go @@ -29,14 +29,13 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" - "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -44,8 +43,8 @@ import ( func TestHealthStreamerClosed(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - config := newConfig(db) - env := tabletenv.NewEnv(config, "ReplTrackerTest") + cfg := newConfig(db) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "ReplTrackerTest") alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -69,10 +68,10 @@ func newConfig(db *fakesqldb.DB) *tabletenv.TabletConfig { func TestNotServingPrimaryNoWrite(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - config := newConfig(db) - config.SignalWhenSchemaChange = true + cfg := newConfig(db) + cfg.SignalWhenSchemaChange = true - env := tabletenv.NewEnv(config, "TestNotServingPrimary") + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TestNotServingPrimary") alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -80,11 +79,11 @@ func TestNotServingPrimaryNoWrite(t *testing.T) { // Create a new health streamer and set it to a serving primary state hs := newHealthStreamer(env, alias, &schema.Engine{}) hs.isServingPrimary = true - hs.InitDBConfig(&querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}, config.DB.DbaWithDB()) + hs.InitDBConfig(&querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}, cfg.DB.DbaWithDB()) hs.Open() defer hs.Close() target := &querypb.Target{} - hs.InitDBConfig(target, db.ConnParams()) + hs.InitDBConfig(target, dbconfigs.New(db.ConnParams())) // Let's say the tablet goes to a non-serving primary state. hs.MakePrimary(false) @@ -92,7 +91,7 @@ func TestNotServingPrimaryNoWrite(t *testing.T) { // A reload now should not write anything to the database. If any write happens it will error out since we have not // added any query to the database to expect. t1 := schema.NewTable("t1", schema.NoType) - err := hs.reload(map[string]*schema.Table{"t1": t1}, []*schema.Table{t1}, nil, nil) + err := hs.reload([]*schema.Table{t1}, nil, nil, false) require.NoError(t, err) require.NoError(t, db.LastError()) } @@ -100,21 +99,21 @@ func TestNotServingPrimaryNoWrite(t *testing.T) { func TestHealthStreamerBroadcast(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - config := newConfig(db) - config.SignalWhenSchemaChange = false + cfg := newConfig(db) + cfg.SignalWhenSchemaChange = false - env := tabletenv.NewEnv(config, "ReplTrackerTest") + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "ReplTrackerTest") alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, } blpFunc = testBlpFunc hs := newHealthStreamer(env, alias, &schema.Engine{}) - hs.InitDBConfig(&querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}, config.DB.DbaWithDB()) + hs.InitDBConfig(&querypb.Target{TabletType: topodatapb.TabletType_PRIMARY}, cfg.DB.DbaWithDB()) hs.Open() defer hs.Close() target := &querypb.Target{} - hs.InitDBConfig(target, db.ConnParams()) + hs.InitDBConfig(target, dbconfigs.New(db.ConnParams())) ch, cancel := testStream(hs) defer cancel() @@ -214,11 +213,11 @@ func TestReloadSchema(t *testing.T) { defer cancel() db := fakesqldb.New(t) defer db.Close() - config := newConfig(db) - config.SignalWhenSchemaChange = testcase.enableSchemaChange - _ = config.SchemaReloadIntervalSeconds.Set("100ms") + cfg := newConfig(db) + cfg.SignalWhenSchemaChange = testcase.enableSchemaChange + cfg.SchemaReloadInterval = 100 * time.Millisecond - env := tabletenv.NewEnv(config, "ReplTrackerTest") + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "ReplTrackerTest") alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -228,10 +227,8 @@ func TestReloadSchema(t *testing.T) { hs := newHealthStreamer(env, alias, se) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - configs := config.DB + configs := cfg.DB - db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) - db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) db.AddQueryPattern("SELECT UNIX_TIMESTAMP()"+".*", sqltypes.MakeTestResult( sqltypes.MakeTestFields( "UNIX_TIMESTAMP(now())", @@ -331,21 +328,19 @@ func TestReloadView(t *testing.T) { defer cancel() db := fakesqldb.New(t) defer db.Close() - config := newConfig(db) - config.SignalWhenSchemaChange = true - _ = config.SchemaReloadIntervalSeconds.Set("100ms") - config.EnableViews = true + cfg := newConfig(db) + cfg.SignalWhenSchemaChange = true + cfg.SchemaReloadInterval = 100 * time.Millisecond + cfg.EnableViews = true - env := tabletenv.NewEnv(config, "TestReloadView") + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TestReloadView") alias := &topodatapb.TabletAlias{Cell: "cell", Uid: 1} se := schema.NewEngine(env) hs := newHealthStreamer(env, alias, se) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} - configs := config.DB + configs := cfg.DB - db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) - db.AddQueryPattern(sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query+".*", &sqltypes.Result{}) db.AddQueryPattern("SELECT UNIX_TIMESTAMP()"+".*", sqltypes.MakeTestResult( sqltypes.MakeTestFields( "UNIX_TIMESTAMP(now())", @@ -385,6 +380,8 @@ func TestReloadView(t *testing.T) { )) db.AddQueryPattern(".*SELECT table_name, view_definition.*views.*", &sqltypes.Result{}) db.AddQuery("SELECT TABLE_NAME, CREATE_TIME FROM _vt.`tables`", &sqltypes.Result{}) + // adding query pattern for udfs + db.AddQueryPattern("SELECT name.*", &sqltypes.Result{}) hs.InitDBConfig(target, configs.DbaWithDB()) se.InitDBConfig(configs.DbaWithDB()) diff --git a/go/vt/vttablet/tabletserver/livequeryz_test.go b/go/vt/vttablet/tabletserver/livequeryz_test.go index 18e62047226..8dad3cd1631 100644 --- a/go/vt/vttablet/tabletserver/livequeryz_test.go +++ b/go/vt/vttablet/tabletserver/livequeryz_test.go @@ -17,20 +17,25 @@ limitations under the License. package tabletserver import ( + "context" "net/http" "net/http/httptest" "testing" - "context" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) func TestLiveQueryzHandlerJSON(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz/?format=json", nil) - queryList := NewQueryList("test") - queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 1})) - queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 2})) + queryList := NewQueryList("test", sqlparser.NewTestParser()) + err := queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 1})) + require.NoError(t, err) + err = queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 2})) + require.NoError(t, err) livequeryzHandler([]*QueryList{queryList}, resp, req) } @@ -39,9 +44,11 @@ func TestLiveQueryzHandlerHTTP(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz/", nil) - queryList := NewQueryList("test") - queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 1})) - queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 2})) + queryList := NewQueryList("test", sqlparser.NewTestParser()) + err := queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 1})) + require.NoError(t, err) + err = queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 2})) + require.NoError(t, err) livequeryzHandler([]*QueryList{queryList}, resp, req) } @@ -50,7 +57,7 @@ func TestLiveQueryzHandlerHTTPFailedInvalidForm(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("POST", "/livequeryz/", nil) - livequeryzHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) @@ -61,9 +68,10 @@ func TestLiveQueryzHandlerTerminateConn(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz//terminate?connID=1", nil) - queryList := NewQueryList("test") + queryList := NewQueryList("test", sqlparser.NewTestParser()) testConn := &testConn{id: 1} - queryList.Add(NewQueryDetail(context.Background(), testConn)) + err := queryList.Add(NewQueryDetail(context.Background(), testConn)) + require.NoError(t, err) if testConn.IsKilled() { t.Fatalf("conn should still be alive") } @@ -77,7 +85,7 @@ func TestLiveQueryzHandlerTerminateFailedInvalidConnID(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz//terminate?connID=invalid", nil) - livequeryzTerminateHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzTerminateHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) @@ -88,7 +96,7 @@ func TestLiveQueryzHandlerTerminateFailedInvalidForm(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("POST", "/livequeryz//terminate?inva+lid=2", nil) - livequeryzTerminateHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzTerminateHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) diff --git a/go/vt/vttablet/tabletserver/messager/engine.go b/go/vt/vttablet/tabletserver/messager/engine.go index 4204c5c0b7e..612619f7ccc 100644 --- a/go/vt/vttablet/tabletserver/messager/engine.go +++ b/go/vt/vttablet/tabletserver/messager/engine.go @@ -138,7 +138,7 @@ func (me *Engine) Subscribe(ctx context.Context, name string, send func(*sqltype return mm.Subscribe(ctx, send), nil } -func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []*schema.Table) { +func (me *Engine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []*schema.Table, _ bool) { me.mu.Lock() defer me.mu.Unlock() for _, table := range append(dropped, altered...) { diff --git a/go/vt/vttablet/tabletserver/messager/engine_test.go b/go/vt/vttablet/tabletserver/messager/engine_test.go index e134a6fbe21..30e849ac73b 100644 --- a/go/vt/vttablet/tabletserver/messager/engine_test.go +++ b/go/vt/vttablet/tabletserver/messager/engine_test.go @@ -21,10 +21,10 @@ import ( "reflect" "testing" - "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -67,33 +67,31 @@ var ( ) func TestEngineSchemaChanged(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - engine := newTestEngine(db) + engine := newTestEngine() defer engine.Close() - engine.schemaChanged(nil, []*schema.Table{meTableT1, tableT2}, nil, nil) + engine.schemaChanged(nil, []*schema.Table{meTableT1, tableT2}, nil, nil, true) got := extractManagerNames(engine.managers) want := map[string]bool{"t1": true} if !reflect.DeepEqual(got, want) { t.Errorf("got: %+v, want %+v", got, want) } - engine.schemaChanged(nil, []*schema.Table{meTableT3}, nil, nil) + engine.schemaChanged(nil, []*schema.Table{meTableT3}, nil, nil, true) got = extractManagerNames(engine.managers) want = map[string]bool{"t1": true, "t3": true} if !reflect.DeepEqual(got, want) { t.Errorf("got: %+v, want %+v", got, want) } - engine.schemaChanged(nil, []*schema.Table{meTableT4}, nil, []*schema.Table{meTableT3, tableT5}) + engine.schemaChanged(nil, []*schema.Table{meTableT4}, nil, []*schema.Table{meTableT3, tableT5}, true) got = extractManagerNames(engine.managers) want = map[string]bool{"t1": true, "t4": true} if !reflect.DeepEqual(got, want) { t.Errorf("got: %+v, want %+v", got, want) } // Test update - engine.schemaChanged(nil, nil, []*schema.Table{meTableT2, tableT4}, nil) + engine.schemaChanged(nil, nil, []*schema.Table{meTableT2, tableT4}, nil, true) got = extractManagerNames(engine.managers) want = map[string]bool{"t1": true, "t2": true} if !reflect.DeepEqual(got, want) { @@ -110,10 +108,8 @@ func extractManagerNames(in map[string]*messageManager) map[string]bool { } func TestSubscribe(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - engine := newTestEngine(db) - engine.schemaChanged(nil, []*schema.Table{meTableT1, meTableT2}, nil, nil) + engine := newTestEngine() + engine.schemaChanged(nil, []*schema.Table{meTableT1, meTableT2}, nil, nil, true) f1, ch1 := newEngineReceiver() f2, ch2 := newEngineReceiver() // Each receiver is subscribed to different managers. @@ -142,11 +138,9 @@ func TestSubscribe(t *testing.T) { } func TestEngineGenerate(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - engine := newTestEngine(db) + engine := newTestEngine() defer engine.Close() - engine.schemaChanged(nil, []*schema.Table{meTableT1}, nil, nil) + engine.schemaChanged(nil, []*schema.Table{meTableT1}, nil, nil, true) if _, err := engine.GetGenerator("t1"); err != nil { t.Error(err) @@ -157,10 +151,10 @@ func TestEngineGenerate(t *testing.T) { } } -func newTestEngine(db *fakesqldb.DB) *Engine { - config := tabletenv.NewDefaultConfig() +func newTestEngine() *Engine { + cfg := tabletenv.NewDefaultConfig() tsv := &fakeTabletServer{ - Env: tabletenv.NewEnv(config, "MessagerTest"), + Env: tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "MessagerTest"), } se := schema.NewEngine(tsv) te := NewEngine(tsv, se, newFakeVStreamer()) diff --git a/go/vt/vttablet/tabletserver/messager/message_manager.go b/go/vt/vttablet/tabletserver/messager/message_manager.go index 0629b31629f..2f4f8605870 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager.go @@ -21,24 +21,24 @@ import ( "context" "fmt" "io" - "math/rand" + "math/rand/v2" "sync" "time" "golang.org/x/sync/semaphore" "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" ) var ( @@ -227,7 +227,7 @@ type messageManager struct { // wg is for ensuring all running goroutines have returned // before we can close the manager. You need to Add before - // launching any gorooutine while holding a lock on mu. + // launching any goroutine while holding a lock on mu. // The goroutine must in turn defer on Done. wg sync.WaitGroup @@ -272,7 +272,7 @@ func newMessageManager(tsv TabletService, vs VStreamer, table *schema.Table, pos } mm.readByPriorityAndTimeNext = sqlparser.BuildParsedQuery( // There should be a poller_idx defined on (time_acked, priority, time_next desc) - // for this to be as effecient as possible + // for this to be as efficient as possible "select priority, time_next, epoch, time_acked, %s from %v where time_acked is null and time_next < %a order by priority, time_next desc limit %a", columnList, mm.name, ":time_next", ":max") mm.ackQuery = sqlparser.BuildParsedQuery( @@ -366,37 +366,37 @@ func (mm *messageManager) Open() { // Close stops the messageManager service. func (mm *messageManager) Close() { - log.Infof("messageManager - started execution of Close") + log.Infof("messageManager (%v) - started execution of Close", mm.name) mm.pollerTicks.Stop() mm.purgeTicks.Stop() - log.Infof("messageManager - stopped the ticks. Acquiring mu Lock") + log.Infof("messageManager (%v) - stopped the ticks. Acquiring mu Lock", mm.name) mm.mu.Lock() - log.Infof("messageManager - acquired mu Lock") + log.Infof("messageManager (%v) - acquired mu Lock", mm.name) if !mm.isOpen { - log.Infof("messageManager - manager is not open") + log.Infof("messageManager (%v) - manager is not open", mm.name) mm.mu.Unlock() return } mm.isOpen = false - log.Infof("messageManager - cancelling all receivers") + log.Infof("messageManager (%v) - cancelling all receivers", mm.name) for _, rcvr := range mm.receivers { rcvr.receiver.cancel() } mm.receivers = nil MessageStats.Set([]string{mm.name.String(), "ClientCount"}, 0) - log.Infof("messageManager - clearing cache") + log.Infof("messageManager (%v) - clearing cache", mm.name) mm.cache.Clear() - log.Infof("messageManager - sending a broadcast") + log.Infof("messageManager (%v) - sending a broadcast", mm.name) // This broadcast will cause runSend to exit. mm.cond.Broadcast() - log.Infof("messageManager - stopping VStream") + log.Infof("messageManager (%v) - stopping VStream", mm.name) mm.stopVStream() mm.mu.Unlock() - log.Infof("messageManager - Waiting for the wait group") + log.Infof("messageManager (%v) - Waiting for the wait group", mm.name) mm.wg.Wait() - log.Infof("messageManager - closed") + log.Infof("messageManager (%v) - closed", mm.name) } // Subscribe registers the send function as a receiver of messages @@ -414,7 +414,7 @@ func (mm *messageManager) Subscribe(ctx context.Context, send func(*sqltypes.Res } if err := receiver.Send(mm.fieldResult); err != nil { - log.Errorf("Terminating connection due to error sending field info: %v", err) + log.Errorf("messageManager (%v) - Terminating connection due to error sending field info: %v", mm.name, err) receiver.cancel() return done } @@ -578,7 +578,7 @@ func (mm *messageManager) runSend() { go func() { err := mm.send(context.Background(), receiver, &sqltypes.Result{Rows: rows}) // calls the offsetting mm.wg.Done() if err != nil { - log.Errorf("messageManager - send failed: %v", err) + log.Errorf("messageManager (%v) - send failed: %v", mm.name, err) } }() } @@ -621,7 +621,7 @@ func (mm *messageManager) send(ctx context.Context, receiver *receiverWithStatus // Log the error, but we still want to postpone the message. // Otherwise, if this is a chronic failure like "message too // big", we'll end up spamming non-stop. - log.Errorf("Error sending messages: %v: %v", qr, err) + log.Errorf("messageManager (%v) - Error sending messages: %v: %v", mm.name, qr, err) } return mm.postpone(ctx, mm.tsv, mm.ackWaitTime, ids) } @@ -652,7 +652,7 @@ func (mm *messageManager) startVStream() { } func (mm *messageManager) stopVStream() { - log.Infof("messageManager - calling stream cancel") + log.Infof("messageManager (%v) - calling stream cancel", mm.name) if mm.streamCancel != nil { mm.streamCancel() mm.streamCancel = nil @@ -664,12 +664,12 @@ func (mm *messageManager) runVStream(ctx context.Context) { err := mm.runOneVStream(ctx) select { case <-ctx.Done(): - log.Info("Context canceled, exiting vstream") + log.Info("messageManager (%v) - Context canceled, exiting vstream", mm.name) return default: } MessageStats.Add([]string{mm.name.String(), "VStreamFailed"}, 1) - log.Infof("VStream ended: %v, retrying in 5 seconds", err) + log.Infof("messageManager (%v) - VStream ended: %v, retrying in 5 seconds", mm.name, err) time.Sleep(5 * time.Second) } } @@ -815,7 +815,7 @@ func (mm *messageManager) runPoller() { mr, err := BuildMessageRow(row) if err != nil { mm.tsv.Stats().InternalErrors.Add("Messages", 1) - log.Errorf("Error reading message row: %v", err) + log.Errorf("messageManager (%v) - Error reading message row: %v", mm.name, err) continue } if !mm.cache.Add(mr) { @@ -836,7 +836,7 @@ func (mm *messageManager) runPurge() { count, err := mm.tsv.PurgeMessages(ctx, nil, mm, time.Now().Add(-mm.purgeAfter).UnixNano()) if err != nil { MessageStats.Add([]string{mm.name.String(), "PurgeFailed"}, 1) - log.Errorf("Unable to delete messages: %v", err) + log.Errorf("messageManager (%v) - Unable to delete messages: %v", mm.name, err) } else { MessageStats.Add([]string{mm.name.String(), "Purged"}, count) } @@ -939,7 +939,7 @@ func (mm *messageManager) readPending(ctx context.Context, bindVars map[string]* query, err := mm.readByPriorityAndTimeNext.GenerateQuery(bindVars, nil) if err != nil { mm.tsv.Stats().InternalErrors.Add("Messages", 1) - log.Errorf("Error reading rows from message table: %v", err) + log.Errorf("messageManager (%v) - Error reading rows from message table: %v", mm.name, err) return nil, err } qr := &sqltypes.Result{} diff --git a/go/vt/vttablet/tabletserver/messager/message_manager_test.go b/go/vt/vttablet/tabletserver/messager/message_manager_test.go index b8ca47ae46d..fdf39556e5c 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager_test.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager_test.go @@ -34,6 +34,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" @@ -317,7 +318,7 @@ func TestMessageManagerPostponeThrottle(t *testing.T) { // Postpone will wait on the unbuffered ch. <-r1.ch - // Set up a second subsriber, add a message. + // Set up a second subscriber, add a message. r2 := newTestReceiver(1) mm.Subscribe(context.Background(), r2.rcv) <-r2.ch @@ -831,9 +832,9 @@ type fakeTabletServer struct { } func newFakeTabletServer() *fakeTabletServer { - config := tabletenv.NewDefaultConfig() + cfg := tabletenv.NewDefaultConfig() return &fakeTabletServer{ - Env: tabletenv.NewEnv(config, "MessagerTest"), + Env: tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "MessagerTest"), } } diff --git a/go/vt/vttablet/tabletserver/planbuilder/builder.go b/go/vt/vttablet/tabletserver/planbuilder/builder.go index 3cae292b593..94f5fc1caa2 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/builder.go +++ b/go/vt/vttablet/tabletserver/planbuilder/builder.go @@ -20,6 +20,7 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -27,7 +28,7 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan *Plan, err error) { +func analyzeSelect(env *vtenv.Environment, sel *sqlparser.Select, tables map[string]*schema.Table) (plan *Plan, err error) { plan = &Plan{ PlanID: PlanSelect, FullQuery: GenerateLimitQuery(sel), @@ -48,7 +49,10 @@ func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s is not a sequence", sqlparser.ToString(sel.From)) } plan.PlanID = PlanNextval - v, err := evalengine.Translate(nextVal.Expr, nil) + v, err := evalengine.Translate(nextVal.Expr, &evalengine.Config{ + Environment: env, + Collation: env.CollationEnv().DefaultConnectionCharset(), + }) if err != nil { return nil, err } @@ -219,3 +223,21 @@ func analyzeDDL(stmt sqlparser.DDLStatement) (*Plan, error) { } return &Plan{PlanID: PlanDDL, FullQuery: fullQuery, FullStmt: stmt, NeedsReservedConn: stmt.IsTemporary()}, nil } + +func analyzeFlush(stmt *sqlparser.Flush, tables map[string]*schema.Table) (*Plan, error) { + plan := &Plan{PlanID: PlanFlush, FullQuery: GenerateFullQuery(stmt)} + + for _, tbl := range stmt.TableNames { + if schemaTbl, ok := tables[tbl.Name.String()]; ok { + plan.AllTables = append(plan.AllTables, schemaTbl) + } + } + if len(plan.AllTables) == 1 { + plan.Table = plan.AllTables[0] + } + + if stmt.WithLock { + plan.NeedsReservedConn = true + } + return plan, nil +} diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission.go b/go/vt/vttablet/tabletserver/planbuilder/permission.go index a9d772f2931..79b2f9eb430 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/permission.go +++ b/go/vt/vttablet/tabletserver/planbuilder/permission.go @@ -65,7 +65,8 @@ func BuildPermissions(stmt sqlparser.Statement) []Permission { case *sqlparser.Analyze: permissions = buildTableNamePermissions(node.Table, tableacl.WRITER, permissions) case *sqlparser.OtherAdmin, *sqlparser.CallProc, *sqlparser.Begin, *sqlparser.Commit, *sqlparser.Rollback, - *sqlparser.Load, *sqlparser.Savepoint, *sqlparser.Release, *sqlparser.SRollback, *sqlparser.Set, *sqlparser.Show, sqlparser.Explain: + *sqlparser.Load, *sqlparser.Savepoint, *sqlparser.Release, *sqlparser.SRollback, *sqlparser.Set, *sqlparser.Show, sqlparser.Explain, + *sqlparser.UnlockTables: // no op default: panic(fmt.Errorf("BUG: unexpected statement type: %T", node)) @@ -75,18 +76,15 @@ func BuildPermissions(stmt sqlparser.Statement) []Permission { func buildSubqueryPermissions(stmt sqlparser.Statement, role tableacl.Role, permissions []Permission) []Permission { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { - switch node := node.(type) { - case *sqlparser.Select: - permissions = buildTableExprsPermissions(node.From, role, permissions) - case sqlparser.TableExprs: - return false, nil + if sel, ok := node.(*sqlparser.Select); ok { + permissions = buildTableExprsPermissions(sel.From, role, permissions) } return true, nil }, stmt) return permissions } -func buildTableExprsPermissions(node sqlparser.TableExprs, role tableacl.Role, permissions []Permission) []Permission { +func buildTableExprsPermissions(node []sqlparser.TableExpr, role tableacl.Role, permissions []Permission) []Permission { for _, node := range node { permissions = buildTableExprPermissions(node, role, permissions) } @@ -96,14 +94,11 @@ func buildTableExprsPermissions(node sqlparser.TableExprs, role tableacl.Role, p func buildTableExprPermissions(node sqlparser.TableExpr, role tableacl.Role, permissions []Permission) []Permission { switch node := node.(type) { case *sqlparser.AliasedTableExpr: - // An AliasedTableExpr can also be a subquery, but we should skip them here + // An AliasedTableExpr can also be a derived table, but we should skip them here // because the buildSubQueryPermissions walker will catch them and extract // the corresponding table names. - switch node := node.Expr.(type) { - case sqlparser.TableName: - permissions = buildTableNamePermissions(node, role, permissions) - case *sqlparser.DerivedTable: - permissions = buildSubqueryPermissions(node.Select, role, permissions) + if tblName, ok := node.Expr.(sqlparser.TableName); ok { + permissions = buildTableNamePermissions(tblName, role, permissions) } case *sqlparser.ParenTableExpr: permissions = buildTableExprsPermissions(node.Exprs, role, permissions) diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go index 17baa72595e..6d42118cb0b 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go @@ -17,9 +17,9 @@ limitations under the License. package planbuilder import ( - "reflect" "testing" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" ) @@ -169,22 +169,21 @@ func TestBuildPermissions(t *testing.T) { }, { input: "update (select * from t1) as a join t2 on a=b set c=d", output: []Permission{{ - TableName: "t1", - Role: tableacl.WRITER, - }, { TableName: "t2", Role: tableacl.WRITER, + }, { + TableName: "t1", // derived table in update or delete needs reader permission as they cannot be modified. }}, }} for _, tcase := range tcases { - stmt, err := sqlparser.Parse(tcase.input) - if err != nil { - t.Fatal(err) - } - got := BuildPermissions(stmt) - if !reflect.DeepEqual(got, tcase.output) { - t.Errorf("BuildPermissions(%s): %v, want %v", tcase.input, got, tcase.output) - } + t.Run(tcase.input, func(t *testing.T) { + stmt, err := sqlparser.NewTestParser().Parse(tcase.input) + if err != nil { + t.Fatal(err) + } + got := BuildPermissions(stmt) + utils.MustMatch(t, tcase.output, got) + }) } } diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index c4a8f905607..7b1e57c2f90 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -20,11 +20,11 @@ import ( "encoding/json" "strings" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -202,7 +202,7 @@ func (plan *Plan) TableNames() (names []string) { } // Build builds a plan based on the schema. -func Build(statement sqlparser.Statement, tables map[string]*schema.Table, dbName string, viewsEnabled bool) (plan *Plan, err error) { +func Build(env *vtenv.Environment, statement sqlparser.Statement, tables map[string]*schema.Table, dbName string, viewsEnabled bool) (plan *Plan, err error) { switch stmt := statement.(type) { case *sqlparser.Union: plan, err = &Plan{ @@ -210,7 +210,7 @@ func Build(statement sqlparser.Statement, tables map[string]*schema.Table, dbNam FullQuery: GenerateLimitQuery(stmt), }, nil case *sqlparser.Select: - plan, err = analyzeSelect(stmt, tables) + plan, err = analyzeSelect(env, stmt, tables) case *sqlparser.Insert: plan, err = analyzeInsert(stmt, tables) case *sqlparser.Update: @@ -246,7 +246,9 @@ func Build(statement sqlparser.Statement, tables map[string]*schema.Table, dbNam case *sqlparser.Load: plan, err = &Plan{PlanID: PlanLoad}, nil case *sqlparser.Flush: - plan, err = &Plan{PlanID: PlanFlush, FullQuery: GenerateFullQuery(stmt)}, nil + plan, err = analyzeFlush(stmt, tables) + case *sqlparser.UnlockTables: + plan, err = &Plan{PlanID: PlanUnlockTables}, nil case *sqlparser.CallProc: plan, err = &Plan{PlanID: PlanCallProc, FullQuery: GenerateFullQuery(stmt)}, nil default: @@ -321,7 +323,7 @@ func hasLockFunc(sel *sqlparser.Select) bool { } // BuildSettingQuery builds a query for system settings. -func BuildSettingQuery(settings []string) (query string, resetQuery string, err error) { +func BuildSettingQuery(settings []string, parser *sqlparser.Parser) (query string, resetQuery string, err error) { if len(settings) == 0 { return "", "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG]: plan called for empty system settings") } @@ -329,7 +331,7 @@ func BuildSettingQuery(settings []string) (query string, resetQuery string, err var resetSetExprs sqlparser.SetExprs lDefault := sqlparser.NewStrLiteral("default") for _, setting := range settings { - stmt, err := sqlparser.Parse(setting) + stmt, err := parser.Parse(setting) if err != nil { return "", "", vterrors.Wrapf(err, "[BUG]: failed to parse system setting: %s", setting) } diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go index 7c1f364cac8..9569121cb8f 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go @@ -32,6 +32,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" ) @@ -73,6 +74,7 @@ func TestDDLPlan(t *testing.T) { func testPlan(t *testing.T, fileName string) { t.Helper() + parser := sqlparser.NewTestParser() testSchema := loadSchema("schema_test.json") for tcase := range iterateExecFile(fileName) { t.Run(tcase.input, func(t *testing.T) { @@ -81,9 +83,9 @@ func testPlan(t *testing.T, fileName string) { } var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { - plan, err = Build(statement, testSchema, "dbName", false) + plan, err = Build(vtenv.NewTestEnv(), statement, testSchema, "dbName", false) } PassthroughDMLs = false @@ -111,6 +113,7 @@ func testPlan(t *testing.T, fileName string) { func TestPlanInReservedConn(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("exec_cases.txt") { t.Run(tcase.input, func(t *testing.T) { if strings.Contains(tcase.options, "PassthroughDMLs") { @@ -118,9 +121,9 @@ func TestPlanInReservedConn(t *testing.T) { } var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { - plan, err = Build(statement, testSchema, "dbName", false) + plan, err = Build(vtenv.NewTestEnv(), statement, testSchema, "dbName", false) } PassthroughDMLs = false @@ -154,6 +157,7 @@ func TestCustom(t *testing.T) { t.Log("No schemas to test") return } + parser := sqlparser.NewTestParser() for _, schemFile := range testSchemas { schem := loadSchema(schemFile) t.Logf("Testing schema %s", schemFile) @@ -167,11 +171,11 @@ func TestCustom(t *testing.T) { for _, file := range files { t.Logf("Testing file %s", file) for tcase := range iterateExecFile(file) { - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err != nil { t.Fatalf("Got error: %v, parsing sql: %v", err.Error(), tcase.input) } - plan, err := Build(statement, schem, "dbName", false) + plan, err := Build(vtenv.NewTestEnv(), statement, schem, "dbName", false) var out string if err != nil { out = err.Error() @@ -192,10 +196,11 @@ func TestCustom(t *testing.T) { func TestStreamPlan(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("stream_cases.txt") { var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { plan, err = BuildStreaming(statement, testSchema) } @@ -252,13 +257,14 @@ func TestMessageStreamingPlan(t *testing.T) { func TestLockPlan(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("lock_cases.txt") { t.Run(tcase.input, func(t *testing.T) { var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { - plan, err = Build(statement, testSchema, "dbName", false) + plan, err = Build(vtenv.NewTestEnv(), statement, testSchema, "dbName", false) } var out string diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt index 5565f405bc7..977b3822050 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt @@ -175,7 +175,7 @@ "NextCount": ":a" } -# squence with bad value +# sequence with bad value "select next 12345667852342342342323423423 values from seq" { "PlanID": "Nextval", @@ -339,7 +339,7 @@ } ], "FullQuery": "update d set foo = 'foo' where `name` in ('a', 'b') limit :#maxLimit", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # normal update @@ -355,7 +355,7 @@ options:PassthroughDMLs } ], "FullQuery": "update d set foo = 'foo' where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # cross-db update @@ -370,7 +370,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a.b set foo = 'foo' where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # update unknown table @@ -385,7 +385,7 @@ options:PassthroughDMLs } ], "FullQuery": "update bogus set `name` = 'foo' where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } # update unknown table @@ -401,7 +401,7 @@ options:PassthroughDMLs } ], "FullQuery": "update bogus set `name` = 'foo' where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } # multi-table update @@ -420,7 +420,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a, b set a.`name` = 'foo' where a.id = b.id and b.var = 'test'", - "WhereClause": "where a.id = b.id and b.var = 'test'" + "WhereClause": " where a.id = b.id and b.var = 'test'" } # multi-table update @@ -440,7 +440,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a join b on a.id = b.id set a.`name` = 'foo' where b.var = 'test'", - "WhereClause": "where b.var = 'test'" + "WhereClause": " where b.var = 'test'" } @@ -499,7 +499,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete from d where `name` in ('a', 'b') limit :#maxLimit", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # normal delete @@ -515,7 +515,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete from d where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # delete unknown table @@ -563,7 +563,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete a, b from a, b where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } @@ -939,6 +939,25 @@ options:PassthroughDMLs "FullQuery": "flush tables a, b" } +# flush statement with read lock +"flush tables a,b with read lock" +{ + "PlanID": "Flush", + "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + }, + { + "TableName": "b", + "Role": 2 + } + ], + "FullQuery": "flush tables a, b with read lock", + "NeedsReservedConn": true +} + # call proc "call getAllTheThings()" { diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go index 7f83a29fc51..dc4128a7c69 100644 --- a/go/vt/vttablet/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -109,7 +109,7 @@ func (ep *TabletPlan) IsValid(hasReservedCon, hasSysSettings bool) error { func isValid(planType planbuilder.PlanType, hasReservedCon bool, hasSysSettings bool) error { switch planType { - case planbuilder.PlanSelectLockFunc, planbuilder.PlanDDL: + case planbuilder.PlanSelectLockFunc, planbuilder.PlanDDL, planbuilder.PlanFlush: if hasReservedCon { return nil } @@ -188,6 +188,7 @@ type QueryEngine struct { // stats // Note: queryErrorCountsWithCode is similar to queryErrorCounts except it contains error code as an additional dimension queryCounts, queryCountsWithTabletType, queryTimes, queryErrorCounts, queryErrorCountsWithCode, queryRowsAffected, queryRowsReturned *stats.CountersWithMultiLabels + queryCacheHits, queryCacheMisses *stats.CounterFunc // stats flags enablePerWorkloadTableMetrics bool @@ -280,6 +281,12 @@ func NewQueryEngine(env tabletenv.Env, se *schema.Engine) *QueryEngine { env.Exporter().NewCounterFunc("QueryCacheEvictions", "Query engine query cache evictions", func() int64 { return qe.plans.Metrics.Evicted() }) + qe.queryCacheHits = env.Exporter().NewCounterFunc("QueryCacheHits", "Query engine query cache hits", func() int64 { + return qe.plans.Metrics.Hits() + }) + qe.queryCacheMisses = env.Exporter().NewCounterFunc("QueryCacheMisses", "Query engine query cache misses", func() int64 { + return qe.plans.Metrics.Misses() + }) labels := []string{"Table", "Plan"} if config.EnablePerWorkloadTableMetrics { @@ -359,11 +366,11 @@ func (qe *QueryEngine) Close() { var errNoCache = errors.New("plan should not be cached") func (qe *QueryEngine) getPlan(curSchema *currentSchema, sql string) (*TabletPlan, error) { - statement, err := sqlparser.Parse(sql) + statement, err := qe.env.Environment().Parser().Parse(sql) if err != nil { return nil, err } - splan, err := planbuilder.Build(statement, curSchema.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews) + splan, err := planbuilder.Build(qe.env.Environment(), statement, curSchema.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews) if err != nil { return nil, err } @@ -377,7 +384,7 @@ func (qe *QueryEngine) getPlan(curSchema *currentSchema, sql string) (*TabletPla return plan, errNoCache } -// GetPlan returns the TabletPlan that for the query. Plans are cached in a theine LRU cache. +// GetPlan returns the TabletPlan that for the query. Plans are cached in an LRU cache. func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string, skipQueryPlanCache bool) (*TabletPlan, error) { span, _ := trace.NewSpan(ctx, "QueryEngine.GetPlan") defer span.Finish() @@ -402,7 +409,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats } func (qe *QueryEngine) getStreamPlan(curSchema *currentSchema, sql string) (*TabletPlan, error) { - statement, err := sqlparser.Parse(sql) + statement, err := qe.env.Environment().Parser().Parse(sql) if err != nil { return nil, err } @@ -424,7 +431,7 @@ func (qe *QueryEngine) getStreamPlan(curSchema *currentSchema, sql string) (*Tab return plan, errNoCache } -// GetStreamPlan returns the TabletPlan that for the query. Plans are cached in a theine LRU cache. +// GetStreamPlan returns the TabletPlan that for the query. Plans are cached in an LRU cache. func (qe *QueryEngine) GetStreamPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string, skipQueryPlanCache bool) (*TabletPlan, error) { span, _ := trace.NewSpan(ctx, "QueryEngine.GetStreamPlan") defer span.Finish() @@ -479,7 +486,7 @@ func (qe *QueryEngine) GetConnSetting(ctx context.Context, settings []string) (* cacheKey := SettingsCacheKey(buf.String()) connSetting, _, err := qe.settings.GetOrLoad(cacheKey, 0, func() (*smartconnpool.Setting, error) { // build the setting queries - query, resetQuery, err := planbuilder.BuildSettingQuery(settings) + query, resetQuery, err := planbuilder.BuildSettingQuery(settings, qe.env.Environment().Parser()) if err != nil { return nil, err } @@ -523,7 +530,7 @@ func (qe *QueryEngine) IsMySQLReachable() error { return nil } -func (qe *QueryEngine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []*schema.Table) { +func (qe *QueryEngine) schemaChanged(tables map[string]*schema.Table, created, altered, dropped []*schema.Table, _ bool) { qe.schemaMu.Lock() defer qe.schemaMu.Unlock() @@ -609,7 +616,7 @@ func (qe *QueryEngine) handleHTTPQueryPlans(response http.ResponseWriter, reques response.Header().Set("Content-Type", "text/plain") qe.ForEachPlan(func(plan *TabletPlan) bool { - response.Write([]byte(fmt.Sprintf("%#v\n", sqlparser.TruncateForUI(plan.Original)))) + response.Write([]byte(fmt.Sprintf("%#v\n", qe.env.Environment().Parser().TruncateForUI(plan.Original)))) if b, err := json.MarshalIndent(plan.Plan, "", " "); err != nil { response.Write([]byte(err.Error())) } else { @@ -629,7 +636,7 @@ func (qe *QueryEngine) handleHTTPQueryStats(response http.ResponseWriter, reques var qstats []perQueryStats qe.ForEachPlan(func(plan *TabletPlan) bool { var pqstats perQueryStats - pqstats.Query = unicoded(sqlparser.TruncateForUI(plan.Original)) + pqstats.Query = unicoded(qe.env.Environment().Parser().TruncateForUI(plan.Original)) pqstats.Table = plan.TableName().String() pqstats.Plan = plan.PlanID pqstats.QueryCount, pqstats.Time, pqstats.MysqlTime, pqstats.RowsAffected, pqstats.RowsReturned, pqstats.ErrorCount = plan.Stats() @@ -697,7 +704,7 @@ func (qe *QueryEngine) handleHTTPConsolidations(response http.ResponseWriter, re for _, v := range items { var query string if streamlog.GetRedactDebugUIQueries() { - query, _ = sqlparser.RedactSQLQuery(v.Query) + query, _ = qe.env.Environment().Parser().RedactSQLQuery(v.Query) } else { query = v.Query } diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index 7bfac4988f2..146414e819b 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -19,7 +19,7 @@ package tabletserver import ( "context" "fmt" - "math/rand" + "math/rand/v2" "net/http" "net/http/httptest" "os" @@ -33,6 +33,7 @@ import ( "vitess.io/vitess/go/cache/theine" "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/sqlparser" @@ -60,9 +61,9 @@ func TestStrictMode(t *testing.T) { schematest.AddDefaultQueries(db) // Test default behavior. - config := tabletenv.NewDefaultConfig() - config.DB = newDBConfigs(db) - env := tabletenv.NewEnv(config, "TabletServerTest") + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(db) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest") se := schema.NewEngine(env) qe := NewQueryEngine(env, se) qe.se.InitDBConfig(newDBConfigs(db).DbaWithDB()) @@ -89,7 +90,7 @@ func TestStrictMode(t *testing.T) { qe.Close() // Test that we succeed if the enforcement flag is off. - config.EnforceStrictTransTables = false + cfg.EnforceStrictTransTables = false qe = NewQueryEngine(env, se) if err := qe.Open(); err != nil { t.Fatal(err) @@ -185,11 +186,27 @@ func TestQueryPlanCache(t *testing.T) { ctx := context.Background() logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") + initialHits := qe.queryCacheHits.Get() + initialMisses := qe.queryCacheMisses.Get() + firstPlan, err := qe.GetPlan(ctx, logStats, firstQuery, false) require.NoError(t, err) require.NotNil(t, firstPlan, "plan should not be nil") assertPlanCacheSize(t, qe, 1) + + require.Equal(t, int64(0), qe.queryCacheHits.Get()-initialHits) + require.Equal(t, int64(1), qe.queryCacheMisses.Get()-initialMisses) + + secondPlan, err := qe.GetPlan(ctx, logStats, firstQuery, false) + require.NoError(t, err) + require.NotNil(t, secondPlan, "plan should not be nil") + + assertPlanCacheSize(t, qe, 1) + + require.Equal(t, int64(1), qe.queryCacheHits.Get()-initialHits) + require.Equal(t, int64(1), qe.queryCacheMisses.Get()-initialMisses) + qe.ClearQueryPlanCache() } @@ -350,12 +367,12 @@ func TestStatsURL(t *testing.T) { } func newTestQueryEngine(idleTimeout time.Duration, strict bool, dbcfgs *dbconfigs.DBConfigs) *QueryEngine { - config := tabletenv.NewDefaultConfig() - config.DB = dbcfgs - _ = config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - _ = config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - _ = config.TxPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - env := tabletenv.NewEnv(config, "TabletServerTest") + cfg := tabletenv.NewDefaultConfig() + cfg.DB = dbcfgs + cfg.OltpReadPool.IdleTimeout = idleTimeout + cfg.OlapReadPool.IdleTimeout = idleTimeout + cfg.TxPool.IdleTimeout = idleTimeout + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest") se := schema.NewEngine(env) qe := NewQueryEngine(env, se) // the integration tests that check cache behavior do not expect a doorkeeper; disable it @@ -440,7 +457,7 @@ func BenchmarkPlanCacheThroughput(b *testing.B) { logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") for i := 0; i < b.N; i++ { - query := fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.Intn(500)) + query := fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.IntN(500)) _, err := qe.GetPlan(ctx, logStats, query, false) if err != nil { b.Fatal(err) @@ -452,10 +469,10 @@ func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, par int) { b.Helper() dbcfgs := newDBConfigs(db) - config := tabletenv.NewDefaultConfig() - config.DB = dbcfgs + cfg := tabletenv.NewDefaultConfig() + cfg.DB = dbcfgs - env := tabletenv.NewEnv(config, "TabletServerTest") + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest") se := schema.NewEngine(env) qe := NewQueryEngine(env, se) @@ -470,7 +487,7 @@ func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, par int) { logStats := tabletenv.NewLogStats(ctx, "GetPlanStats") for pb.Next() { - query := fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.Intn(500)) + query := fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.IntN(500)) _, err := qe.GetPlan(ctx, logStats, query, false) require.NoErrorf(b, err, "bad query: %s", query) } @@ -509,11 +526,11 @@ func TestPlanCachePollution(t *testing.T) { db.AddQueryPattern(".*", &sqltypes.Result{}) dbcfgs := newDBConfigs(db) - config := tabletenv.NewDefaultConfig() - config.DB = dbcfgs + cfg := tabletenv.NewDefaultConfig() + cfg.DB = dbcfgs // config.LFUQueryCacheSizeBytes = 3 * 1024 * 1024 - env := tabletenv.NewEnv(config, "TabletServerTest") + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest") se := schema.NewEngine(env) qe := NewQueryEngine(env, se) @@ -535,7 +552,7 @@ func TestPlanCachePollution(t *testing.T) { go func() { cacheMode := "lfu" - out, err := os.Create(path.Join(plotPath, fmt.Sprintf("cache_plot_%d_%s.dat", config.QueryCacheMemory, cacheMode))) + out, err := os.Create(path.Join(plotPath, fmt.Sprintf("cache_plot_%d_%s.dat", cfg.QueryCacheMemory, cacheMode))) require.NoError(t, err) defer out.Close() @@ -602,7 +619,7 @@ func TestPlanCachePollution(t *testing.T) { go func() { defer wg.Done() runner(NormalQueries, &stats1, func() string { - return fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.Intn(5000)) + return fmt.Sprintf("SELECT (a, b, c) FROM test_table_%d", rand.IntN(5000)) }) }() @@ -826,10 +843,10 @@ func TestAddQueryStats(t *testing.T) { t.Parallel() for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.DB = newDBConfigs(fakesqldb.New(t)) - config.EnablePerWorkloadTableMetrics = testcase.enablePerWorkloadTableMetrics - env := tabletenv.NewEnv(config, "TestAddQueryStats_"+testcase.name) + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(fakesqldb.New(t)) + cfg.EnablePerWorkloadTableMetrics = testcase.enablePerWorkloadTableMetrics + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TestAddQueryStats_"+testcase.name) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) qe.AddStats(testcase.planType, testcase.tableName, testcase.workload, testcase.tabletType, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount, testcase.errorCode) @@ -868,9 +885,9 @@ func TestPlanPoolUnsafe(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { - statement, err := sqlparser.Parse(tcase.query) + statement, err := sqlparser.NewTestParser().Parse(tcase.query) require.NoError(t, err) - plan, err := planbuilder.Build(statement, map[string]*schema.Table{}, "dbName", false) + plan, err := planbuilder.Build(vtenv.NewTestEnv(), statement, map[string]*schema.Table{}, "dbName", false) // Plan building will not fail, but it will mark that reserved connection is needed. // checking plan is valid will fail. require.NoError(t, err) diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index 63dcd42d0a8..d5099b1a0cc 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -18,18 +18,18 @@ package tabletserver import ( "context" + "errors" "fmt" "io" "strings" "sync" "time" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/callerid" @@ -62,8 +62,11 @@ type QueryExecutor struct { ctx context.Context logStats *tabletenv.LogStats tsv *TabletServer - tabletType topodatapb.TabletType - setting *smartconnpool.Setting + // targetTabletType stores the target tablet type that we got as part of the request. + // We have the tablet server object too, which stores the current tablet type, but this is different. + // The target type we requested might be different from tsv's tablet type, if we had a change to the tablet type recently. + targetTabletType topodatapb.TabletType + setting *smartconnpool.Setting } const ( @@ -108,10 +111,10 @@ func (qre *QueryExecutor) shouldConsolidate() bool { case querypb.ExecuteOptions_CONSOLIDATOR_ENABLED: return true case querypb.ExecuteOptions_CONSOLIDATOR_ENABLED_REPLICAS: - return qre.tabletType != topodatapb.TabletType_PRIMARY + return qre.targetTabletType != topodatapb.TabletType_PRIMARY default: cm := qre.tsv.qe.consolidatorMode.Load().(string) - return cm == tabletenv.Enable || (cm == tabletenv.NotOnPrimary && qre.tabletType != topodatapb.TabletType_PRIMARY) + return cm == tabletenv.Enable || (cm == tabletenv.NotOnPrimary && qre.targetTabletType != topodatapb.TabletType_PRIMARY) } } @@ -122,7 +125,7 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { defer func(start time.Time) { duration := time.Since(start) qre.tsv.stats.QueryTimings.Add(planName, duration) - qre.tsv.stats.QueryTimingsByTabletType.Add(qre.tabletType.String(), duration) + qre.tsv.stats.QueryTimingsByTabletType.Add(qre.targetTabletType.String(), duration) qre.recordUserQuery("Execute", int64(duration)) mysqlTime := qre.logStats.MysqlResponseTime @@ -136,12 +139,12 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { errCode = vtErrorCode.String() if reply == nil { - qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), qre.tabletType, 1, duration, mysqlTime, 0, 0, 1, errCode) + qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), qre.targetTabletType, 1, duration, mysqlTime, 0, 0, 1, errCode) qre.plan.AddStats(1, duration, mysqlTime, 0, 0, 1) return } - qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), qre.tabletType, 1, duration, mysqlTime, int64(reply.RowsAffected), int64(len(reply.Rows)), 0, errCode) + qre.tsv.qe.AddStats(qre.plan.PlanID, tableName, qre.options.GetWorkloadName(), qre.targetTabletType, 1, duration, mysqlTime, int64(reply.RowsAffected), int64(len(reply.Rows)), 0, errCode) qre.plan.AddStats(1, duration, mysqlTime, reply.RowsAffected, uint64(len(reply.Rows)), 0) qre.logStats.RowsAffected = int(reply.RowsAffected) qre.logStats.Rows = reply.Rows @@ -207,6 +210,8 @@ func (qre *QueryExecutor) Execute() (reply *sqltypes.Result, err error) { return qre.execShowThrottledApps() case p.PlanShowThrottlerStatus: return qre.execShowThrottlerStatus() + case p.PlanUnlockTables: + return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "unlock tables should be executed with an existing connection") case p.PlanSet: if qre.setting == nil { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "[BUG] %s not allowed without setting connection", qre.query) @@ -279,7 +284,7 @@ func (qre *QueryExecutor) txConnExec(conn *StatefulConnection) (*sqltypes.Result return qre.txFetch(conn, true) case p.PlanUpdateLimit, p.PlanDeleteLimit: return qre.execDMLLimit(conn) - case p.PlanOtherRead, p.PlanOtherAdmin, p.PlanFlush: + case p.PlanOtherRead, p.PlanOtherAdmin, p.PlanFlush, p.PlanUnlockTables: return qre.execStatefulConn(conn, qre.query, true) case p.PlanSavepoint, p.PlanRelease, p.PlanSRollback: return qre.execStatefulConn(conn, qre.query, true) @@ -313,7 +318,7 @@ func (qre *QueryExecutor) Stream(callback StreamCallback) error { defer func(start time.Time) { qre.tsv.stats.QueryTimings.Record(qre.plan.PlanID.String(), start) - qre.tsv.stats.QueryTimingsByTabletType.Record(qre.tabletType.String(), start) + qre.tsv.stats.QueryTimingsByTabletType.Record(qre.targetTabletType.String(), start) qre.recordUserQuery("Stream", int64(time.Since(start))) }(time.Now()) @@ -340,7 +345,7 @@ func (qre *QueryExecutor) Stream(callback StreamCallback) error { if consolidator := qre.tsv.qe.streamConsolidator; consolidator != nil { if qre.connID == 0 && qre.plan.PlanID == p.PlanSelectStream && qre.shouldConsolidate() { - return consolidator.Consolidate(qre.logStats, sqlWithoutComments, callback, + return consolidator.Consolidate(qre.tsv.stats.WaitTimings, qre.logStats, sqlWithoutComments, callback, func(callback StreamCallback) error { dbConn, err := qre.getStreamConn() if err != nil { @@ -403,7 +408,7 @@ func (qre *QueryExecutor) MessageStream(callback StreamCallback) error { defer func(start time.Time) { qre.tsv.stats.QueryTimings.Record(qre.plan.PlanID.String(), start) - qre.tsv.stats.QueryTimingsByTabletType.Record(qre.tabletType.String(), start) + qre.tsv.stats.QueryTimingsByTabletType.Record(qre.targetTabletType.String(), start) qre.recordUserQuery("MessageStream", int64(time.Since(start))) }(time.Now()) @@ -611,13 +616,13 @@ func (*QueryExecutor) BeginAgain(ctx context.Context, dc *StatefulConnection) er } func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { - env := evalengine.NewExpressionEnv(qre.ctx, qre.bindVars, nil) + env := evalengine.NewExpressionEnv(qre.ctx, qre.bindVars, evalengine.NewEmptyVCursor(qre.tsv.Environment(), time.Local)) result, err := env.Evaluate(qre.plan.NextCount) if err != nil { return nil, err } tableName := qre.plan.TableName() - v := result.Value(collations.Default()) + v := result.Value(qre.tsv.env.CollationEnv().DefaultConnectionCharset()) inc, err := v.ToInt64() if err != nil || inc < 1 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid increment for sequence %s: %s", tableName, v.String()) @@ -754,7 +759,7 @@ func (qre *QueryExecutor) verifyRowCount(count, maxrows int64) error { if warnThreshold > 0 && count > warnThreshold { callerID := callerid.ImmediateCallerIDFromContext(qre.ctx) qre.tsv.Stats().Warnings.Add("ResultsExceeded", 1) - log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true)) + log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true, qre.tsv.env.Parser())) } return nil } @@ -772,33 +777,20 @@ func (qre *QueryExecutor) getConn() (*connpool.PooledConn, error) { span, ctx := trace.NewSpan(qre.ctx, "QueryExecutor.getConn") defer span.Finish() - start := time.Now() - conn, err := qre.tsv.qe.conns.Get(ctx, qre.setting) - - switch err { - case nil: + defer func(start time.Time) { qre.logStats.WaitingForConnection += time.Since(start) - return conn, nil - case connpool.ErrConnPoolClosed: - return nil, err - } - return nil, err + }(time.Now()) + return qre.tsv.qe.conns.Get(ctx, qre.setting) } func (qre *QueryExecutor) getStreamConn() (*connpool.PooledConn, error) { span, ctx := trace.NewSpan(qre.ctx, "QueryExecutor.getStreamConn") defer span.Finish() - start := time.Now() - conn, err := qre.tsv.qe.streamConns.Get(ctx, qre.setting) - switch err { - case nil: + defer func(start time.Time) { qre.logStats.WaitingForConnection += time.Since(start) - return conn, nil - case connpool.ErrConnPoolClosed: - return nil, err - } - return nil, err + }(time.Now()) + return qre.tsv.qe.streamConns.Get(ctx, qre.setting) } // txFetch fetches from a TxConnection. @@ -875,6 +867,9 @@ func (qre *QueryExecutor) execCallProc() (*sqltypes.Result, error) { } qr, err := qre.execDBConn(conn.Conn, sql, true) + if errors.Is(err, mysql.ErrExecuteFetchMultipleResults) { + return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "Multi-Resultset not supported in stored procedure") + } if err != nil { return nil, rewriteOUTParamError(err) } @@ -948,6 +943,10 @@ func (qre *QueryExecutor) execAlterMigration() (*sqltypes.Result, error) { return qre.tsv.onlineDDLExecutor.UnthrottleMigration(qre.ctx, alterMigration.UUID) case sqlparser.UnthrottleAllMigrationType: return qre.tsv.onlineDDLExecutor.UnthrottleAllMigrations(qre.ctx) + case sqlparser.ForceCutOverMigrationType: + return qre.tsv.onlineDDLExecutor.ForceCutOverMigration(qre.ctx, alterMigration.UUID) + case sqlparser.ForceCutOverAllMigrationType: + return qre.tsv.onlineDDLExecutor.ForceCutOverPendingMigrations(qre.ctx) } return nil, vterrors.New(vtrpcpb.Code_UNIMPLEMENTED, "ALTER VITESS_MIGRATION not implemented") } @@ -1070,7 +1069,10 @@ func (qre *QueryExecutor) execDBConn(conn *connpool.Conn, sql string, wantfields defer qre.logStats.AddRewrittenSQL(sql, time.Now()) qd := NewQueryDetail(qre.logStats.Ctx, conn) - qre.tsv.statelessql.Add(qd) + err := qre.tsv.statelessql.Add(qd) + if err != nil { + return nil, err + } defer qre.tsv.statelessql.Remove(qd) return conn.Exec(ctx, sql, int(qre.tsv.qe.maxResultSize.Load()), wantfields) @@ -1083,7 +1085,10 @@ func (qre *QueryExecutor) execStatefulConn(conn *StatefulConnection, sql string, defer qre.logStats.AddRewrittenSQL(sql, time.Now()) qd := NewQueryDetail(qre.logStats.Ctx, conn) - qre.tsv.statefulql.Add(qd) + err := qre.tsv.statefulql.Add(qd) + if err != nil { + return nil, err + } defer qre.tsv.statefulql.Remove(qd) return conn.Exec(ctx, sql, int(qre.tsv.qe.maxResultSize.Load()), wantfields) @@ -1102,16 +1107,22 @@ func (qre *QueryExecutor) execStreamSQL(conn *connpool.PooledConn, isTransaction // Add query detail object into QueryExecutor TableServer list w.r.t if it is a transactional or not. Previously we were adding it // to olapql list regardless but that resulted in problems, where long-running stream queries which can be stateful (or transactional) - // weren't getting cleaned up during unserveCommon>handleShutdownGracePeriod in state_manager.go. + // weren't getting cleaned up during unserveCommon>terminateAllQueries in state_manager.go. // This change will ensure that long-running streaming stateful queries get gracefully shutdown during ServingTypeChange // once their grace period is over. qd := NewQueryDetail(qre.logStats.Ctx, conn.Conn) if isTransaction { - qre.tsv.statefulql.Add(qd) + err := qre.tsv.statefulql.Add(qd) + if err != nil { + return err + } defer qre.tsv.statefulql.Remove(qd) return conn.Conn.StreamOnce(ctx, sql, callBackClosingSpan, allocStreamResult, int(qre.tsv.qe.streamBufferSize.Load()), sqltypes.IncludeFieldsOrDefault(qre.options)) } - qre.tsv.olapql.Add(qd) + err := qre.tsv.olapql.Add(qd) + if err != nil { + return err + } defer qre.tsv.olapql.Remove(qd) return conn.Conn.Stream(ctx, sql, callBackClosingSpan, allocStreamResult, int(qre.tsv.qe.streamBufferSize.Load()), sqltypes.IncludeFieldsOrDefault(qre.options)) } @@ -1134,12 +1145,14 @@ func (qre *QueryExecutor) GetSchemaDefinitions(tableType querypb.SchemaTableType return qre.getTableDefinitions(tableNames, callback) case querypb.SchemaTableType_ALL: return qre.getAllDefinitions(tableNames, callback) + case querypb.SchemaTableType_UDFS: + return qre.getUDFs(callback) } return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid table type %v", tableType) } func (qre *QueryExecutor) getViewDefinitions(viewNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchViewQuery(viewNames) + query, err := eschema.GetFetchViewQuery(viewNames, qre.tsv.env.Parser()) if err != nil { return err } @@ -1147,7 +1160,7 @@ func (qre *QueryExecutor) getViewDefinitions(viewNames []string, callback func(s } func (qre *QueryExecutor) getTableDefinitions(tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchTableQuery(tableNames) + query, err := eschema.GetFetchTableQuery(tableNames, qre.tsv.env.Parser()) if err != nil { return err } @@ -1155,7 +1168,7 @@ func (qre *QueryExecutor) getTableDefinitions(tableNames []string, callback func } func (qre *QueryExecutor) getAllDefinitions(tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchTableAndViewsQuery(tableNames) + query, err := eschema.GetFetchTableAndViewsQuery(tableNames, qre.tsv.env.Parser()) if err != nil { return err } @@ -1172,8 +1185,42 @@ func (qre *QueryExecutor) executeGetSchemaQuery(query string, callback func(sche return qre.execStreamSQL(conn, false /* isTransaction */, query, func(result *sqltypes.Result) error { schemaDef := make(map[string]string) for _, row := range result.Rows { - schemaDef[row[0].ToString()] = row[1].ToString() + tableName := row[0].ToString() + // Schema RPC should ignore the internal table in the response. + if schema.IsInternalOperationTableName(tableName) { + continue + } + schemaDef[tableName] = row[1].ToString() } return callback(&querypb.GetSchemaResponse{TableDefinition: schemaDef}) }) } + +func (qre *QueryExecutor) getUDFs(callback func(schemaRes *querypb.GetSchemaResponse) error) error { + query, err := eschema.GetFetchUDFsQuery(qre.tsv.env.Parser()) + if err != nil { + return err + } + + conn, err := qre.getStreamConn() + if err != nil { + return err + } + defer conn.Recycle() + + return qre.execStreamSQL(conn, false /* isTransaction */, query, func(result *sqltypes.Result) error { + var udfs []*querypb.UDFInfo + for _, row := range result.Rows { + aggr := strings.EqualFold(row[2].ToString(), "aggregate") + udf := &querypb.UDFInfo{ + Name: row[0].ToString(), + Aggregating: aggr, + ReturnType: sqlparser.SQLTypeToQueryType(row[1].ToString(), false), + } + udfs = append(udfs, udf) + } + return callback(&querypb.GetSchemaResponse{ + Udfs: udfs, + }) + }) +} diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go index d4058df8ad2..0c845d7c2ae 100644 --- a/go/vt/vttablet/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -20,7 +20,7 @@ import ( "context" "fmt" "io" - "math/rand" + "math/rand/v2" "strings" "testing" "time" @@ -28,6 +28,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" @@ -71,7 +75,7 @@ func TestQueryExecutorPlans(t *testing.T) { input string // passThrough specifies if planbuilder.PassthroughDML must be set. passThrough bool - // dbResponses specifes the list of queries and responses to add to the fake db. + // dbResponses specifies the list of queries and responses to add to the fake db. dbResponses []dbResponse // resultWant is the result we want. resultWant *sqltypes.Result @@ -791,7 +795,7 @@ func TestQueryExecutorPlanNextval(t *testing.T) { func TestQueryExecutorMessageStreamACL(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) + aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int64()) tableacl.Register(aclName, &simpleacl.Factory{}) tableacl.SetDefaultACL(aclName) config := &tableaclpb.Config{ @@ -854,7 +858,7 @@ func TestQueryExecutorMessageStreamACL(t *testing.T) { } func TestQueryExecutorTableAcl(t *testing.T) { - aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) + aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int64()) tableacl.Register(aclName, &simpleacl.Factory{}) tableacl.SetDefaultACL(aclName) db := setUpQueryExecutorTest(t) @@ -898,7 +902,7 @@ func TestQueryExecutorTableAcl(t *testing.T) { } func TestQueryExecutorTableAclNoPermission(t *testing.T) { - aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) + aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int64()) tableacl.Register(aclName, &simpleacl.Factory{}) tableacl.SetDefaultACL(aclName) db := setUpQueryExecutorTest(t) @@ -957,7 +961,7 @@ func TestQueryExecutorTableAclNoPermission(t *testing.T) { } func TestQueryExecutorTableAclDualTableExempt(t *testing.T) { - aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) + aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int64()) tableacl.Register(aclName, &simpleacl.Factory{}) tableacl.SetDefaultACL(aclName) db := setUpQueryExecutorTest(t) @@ -1009,7 +1013,7 @@ func TestQueryExecutorTableAclDualTableExempt(t *testing.T) { } func TestQueryExecutorTableAclExemptACL(t *testing.T) { - aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) + aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int64()) tableacl.Register(aclName, &simpleacl.Factory{}) tableacl.SetDefaultACL(aclName) db := setUpQueryExecutorTest(t) @@ -1074,7 +1078,7 @@ func TestQueryExecutorTableAclExemptACL(t *testing.T) { } func TestQueryExecutorTableAclDryRun(t *testing.T) { - aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int63()) + aclName := fmt.Sprintf("simpleacl-test-%d", rand.Int64()) tableacl.Register(aclName, &simpleacl.Factory{}) tableacl.SetDefaultACL(aclName) db := setUpQueryExecutorTest(t) @@ -1434,6 +1438,44 @@ func TestQueryExecutorShouldConsolidate(t *testing.T) { } } +func TestGetConnectionLogStats(t *testing.T) { + db := setUpQueryExecutorTest(t) + defer db.Close() + + ctx := context.Background() + tsv := newTestTabletServer(ctx, noFlags, db) + input := "select * from test_table limit 1" + + // getConn() happy path + qre := newTestQueryExecutor(ctx, tsv, input, 0) + conn, err := qre.getConn() + assert.NoError(t, err) + assert.NotNil(t, conn) + assert.True(t, qre.logStats.WaitingForConnection > 0) + + // getStreamConn() happy path + qre = newTestQueryExecutor(ctx, tsv, input, 0) + conn, err = qre.getStreamConn() + assert.NoError(t, err) + assert.NotNil(t, conn) + assert.True(t, qre.logStats.WaitingForConnection > 0) + + // Close the db connection to induce connection errors + db.Close() + + // getConn() error path + qre = newTestQueryExecutor(ctx, tsv, input, 0) + _, err = qre.getConn() + assert.Error(t, err) + assert.True(t, qre.logStats.WaitingForConnection > 0) + + // getStreamConn() error path + qre = newTestQueryExecutor(ctx, tsv, input, 0) + _, err = qre.getStreamConn() + assert.Error(t, err) + assert.True(t, qre.logStats.WaitingForConnection > 0) +} + type executorFlags int64 const ( @@ -1449,48 +1491,49 @@ const ( // newTestQueryExecutor uses a package level variable testTabletServer defined in tabletserver_test.go func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb.DB) *TabletServer { - config := tabletenv.NewDefaultConfig() - config.OltpReadPool.Size = 100 + cfg := tabletenv.NewDefaultConfig() + cfg.OltpReadPool.Size = 100 if flags&smallTxPool > 0 { - config.TxPool.Size = 3 + cfg.TxPool.Size = 3 } else { - config.TxPool.Size = 100 + cfg.TxPool.Size = 100 } if flags&enableStrictTableACL > 0 { - config.StrictTableACL = true + cfg.StrictTableACL = true } else { - config.StrictTableACL = false + cfg.StrictTableACL = false } if flags&noTwopc > 0 { - config.TwoPCEnable = false + cfg.TwoPCEnable = false } else { - config.TwoPCEnable = true + cfg.TwoPCEnable = true } if flags&disableOnlineDDL > 0 { - config.EnableOnlineDDL = false + cfg.EnableOnlineDDL = false } else { - config.EnableOnlineDDL = true + cfg.EnableOnlineDDL = true } - config.TwoPCCoordinatorAddress = "fake" + cfg.TwoPCCoordinatorAddress = "fake" if flags&shortTwopcAge > 0 { - config.TwoPCAbandonAge = 0.5 + cfg.TwoPCAbandonAge = 0.5 } else { - config.TwoPCAbandonAge = 10 + cfg.TwoPCAbandonAge = 10 } if flags&smallResultSize > 0 { - config.Oltp.MaxRows = 2 + cfg.Oltp.MaxRows = 2 } if flags&enableConsolidator > 0 { - config.Consolidator = tabletenv.Enable + cfg.Consolidator = tabletenv.Enable } else { - config.Consolidator = tabletenv.Disable + cfg.Consolidator = tabletenv.Disable } dbconfigs := newDBConfigs(db) - config.DB = dbconfigs - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg.DB = dbconfigs + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} err := tsv.StartService(target, dbconfigs, nil /* mysqld */) - if config.TwoPCEnable { + if cfg.TwoPCEnable { tsv.TwoPCEngineWait() } if err != nil { @@ -1565,7 +1608,7 @@ func initQueryExecutorTestDB(db *fakesqldb.DB) { "varchar|int64"), "Innodb_rows_read|0", )) - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, sqlparser.NewTestParser()) } func getTestTableFields() []*querypb.Field { @@ -1658,7 +1701,7 @@ func addQueryExecutorSupportedQueries(db *fakesqldb.DB) { fmt.Sprintf(sqlReadAllRedo, "_vt", "_vt"): {}, } - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, sqlparser.NewTestParser()) for query, result := range queryResultMap { db.AddQuery(query, result) } @@ -1729,7 +1772,7 @@ func TestQueryExecSchemaReloadCount(t *testing.T) { testcases := []struct { // input is the input query. input string - // dbResponses specifes the list of queries and responses to add to the fake db. + // dbResponses specifies the list of queries and responses to add to the fake db. dbResponses []dbResponse schemaReloadCount int }{{ diff --git a/go/vt/vttablet/tabletserver/query_list.go b/go/vt/vttablet/tabletserver/query_list.go index efe63ab0a8e..60fac1ea3af 100644 --- a/go/vt/vttablet/tabletserver/query_list.go +++ b/go/vt/vttablet/tabletserver/query_list.go @@ -26,7 +26,10 @@ import ( "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/callinfo" + "vitess.io/vitess/go/vt/log" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" ) // QueryDetail is a simple wrapper for Query, Context and a killable conn. @@ -57,26 +60,54 @@ type QueryList struct { // so have to maintain a list to compare with the actual connection. // and remove appropriately. queryDetails map[int64][]*QueryDetail + + parser *sqlparser.Parser + ca ClusterActionState } +type ClusterActionState int + +const ( + ClusterActionNotInProgress ClusterActionState = iota + ClusterActionInProgress ClusterActionState = iota + ClusterActionNoQueries ClusterActionState = iota +) + // NewQueryList creates a new QueryList -func NewQueryList(name string) *QueryList { +func NewQueryList(name string, parser *sqlparser.Parser) *QueryList { return &QueryList{ name: name, queryDetails: make(map[int64][]*QueryDetail), + parser: parser, + ca: ClusterActionNotInProgress, } } +// SetClusterAction sets the clusterActionInProgress field. +func (ql *QueryList) SetClusterAction(ca ClusterActionState) { + ql.mu.Lock() + defer ql.mu.Unlock() + // If the current state is ClusterActionNotInProgress, then we want to ignore setting ClusterActionNoQueries. + if ca == ClusterActionNoQueries && ql.ca == ClusterActionNotInProgress { + return + } + ql.ca = ca +} + // Add adds a QueryDetail to QueryList -func (ql *QueryList) Add(qd *QueryDetail) { +func (ql *QueryList) Add(qd *QueryDetail) error { ql.mu.Lock() defer ql.mu.Unlock() + if ql.ca == ClusterActionNoQueries { + return vterrors.New(vtrpcpb.Code_CLUSTER_EVENT, vterrors.ShuttingDown) + } qds, exists := ql.queryDetails[qd.connID] if exists { ql.queryDetails[qd.connID] = append(qds, qd) } else { ql.queryDetails[qd.connID] = []*QueryDetail{qd} } + return nil } // Remove removes a QueryDetail from QueryList @@ -109,7 +140,10 @@ func (ql *QueryList) Terminate(connID int64) bool { return false } for _, qd := range qds { - _ = qd.conn.Kill("QueryList.Terminate()", time.Since(qd.start)) + err := qd.conn.Kill("QueryList.Terminate()", time.Since(qd.start)) + if err != nil { + log.Warningf("Error terminating query on connection id: %d, error: %v", qd.conn.ID(), err) + } } return true } @@ -120,7 +154,10 @@ func (ql *QueryList) TerminateAll() { defer ql.mu.Unlock() for _, qds := range ql.queryDetails { for _, qd := range qds { - _ = qd.conn.Kill("QueryList.TerminateAll()", time.Since(qd.start)) + err := qd.conn.Kill("QueryList.TerminateAll()", time.Since(qd.start)) + if err != nil { + log.Warningf("Error terminating query on connection id: %d, error: %v", qd.conn.ID(), err) + } } } } @@ -150,7 +187,7 @@ func (ql *QueryList) AppendQueryzRows(rows []QueryDetailzRow) []QueryDetailzRow for _, qd := range qds { query := qd.conn.Current() if streamlog.GetRedactDebugUIQueries() { - query, _ = sqlparser.RedactSQLQuery(query) + query, _ = ql.parser.RedactSQLQuery(query) } row := QueryDetailzRow{ Type: ql.name, diff --git a/go/vt/vttablet/tabletserver/query_list_test.go b/go/vt/vttablet/tabletserver/query_list_test.go index 02b24d86cda..1e9dc2bf42c 100644 --- a/go/vt/vttablet/tabletserver/query_list_test.go +++ b/go/vt/vttablet/tabletserver/query_list_test.go @@ -22,6 +22,8 @@ import ( "time" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) type testConn struct { @@ -44,10 +46,11 @@ func (tc *testConn) IsKilled() bool { } func TestQueryList(t *testing.T) { - ql := NewQueryList("test") + ql := NewQueryList("test", sqlparser.NewTestParser()) connID := int64(1) qd := NewQueryDetail(context.Background(), &testConn{id: connID}) - ql.Add(qd) + err := ql.Add(qd) + require.NoError(t, err) if qd1, ok := ql.queryDetails[connID]; !ok || qd1[0].connID != connID { t.Errorf("failed to add to QueryList") @@ -55,7 +58,8 @@ func TestQueryList(t *testing.T) { conn2ID := int64(2) qd2 := NewQueryDetail(context.Background(), &testConn{id: conn2ID}) - ql.Add(qd2) + err = ql.Add(qd2) + require.NoError(t, err) rows := ql.AppendQueryzRows(nil) if len(rows) != 2 || rows[0].ConnID != 1 || rows[1].ConnID != 2 { @@ -69,14 +73,16 @@ func TestQueryList(t *testing.T) { } func TestQueryListChangeConnIDInMiddle(t *testing.T) { - ql := NewQueryList("test") + ql := NewQueryList("test", sqlparser.NewTestParser()) connID := int64(1) qd1 := NewQueryDetail(context.Background(), &testConn{id: connID}) - ql.Add(qd1) + err := ql.Add(qd1) + require.NoError(t, err) conn := &testConn{id: connID} qd2 := NewQueryDetail(context.Background(), conn) - ql.Add(qd2) + err = ql.Add(qd2) + require.NoError(t, err) require.Len(t, ql.queryDetails[1], 2) @@ -90,3 +96,22 @@ func TestQueryListChangeConnIDInMiddle(t *testing.T) { require.Equal(t, qd1, ql.queryDetails[1][0]) require.NotEqual(t, qd2, ql.queryDetails[1][0]) } + +func TestClusterAction(t *testing.T) { + ql := NewQueryList("test", sqlparser.NewTestParser()) + connID := int64(1) + qd1 := NewQueryDetail(context.Background(), &testConn{id: connID}) + + ql.SetClusterAction(ClusterActionInProgress) + ql.SetClusterAction(ClusterActionNoQueries) + err := ql.Add(qd1) + require.ErrorContains(t, err, "operation not allowed in state SHUTTING_DOWN") + + ql.SetClusterAction(ClusterActionNotInProgress) + err = ql.Add(qd1) + require.NoError(t, err) + // If the current state is not in progress, then setting no queries, shouldn't change anything. + ql.SetClusterAction(ClusterActionNoQueries) + err = ql.Add(qd1) + require.NoError(t, err) +} diff --git a/go/vt/vttablet/tabletserver/querylogz.go b/go/vt/vttablet/tabletserver/querylogz.go index 41a40a0720c..33341d1641b 100644 --- a/go/vt/vttablet/tabletserver/querylogz.go +++ b/go/vt/vttablet/tabletserver/querylogz.go @@ -26,7 +26,6 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -57,10 +56,9 @@ var ( `) querylogzFuncMap = template.FuncMap{ - "stampMicro": func(t time.Time) string { return t.Format(time.StampMicro) }, - "cssWrappable": logz.Wrappable, - "truncateQuery": sqlparser.TruncateForUI, - "unquote": func(s string) string { return strings.Trim(s, "\"") }, + "stampMicro": func(t time.Time) string { return t.Format(time.StampMicro) }, + "cssWrappable": logz.Wrappable, + "unquote": func(s string) string { return strings.Trim(s, "\"") }, } querylogzTmpl = template.Must(template.New("example").Funcs(querylogzFuncMap).Parse(` @@ -74,7 +72,7 @@ var ( - + @@ -86,17 +84,9 @@ var ( `)) ) -func init() { - servenv.HTTPHandleFunc("/querylogz", func(w http.ResponseWriter, r *http.Request) { - ch := tabletenv.StatsLogger.Subscribe("querylogz") - defer tabletenv.StatsLogger.Unsubscribe(ch) - querylogzHandler(ch, w, r) - }) -} - // querylogzHandler serves a human readable snapshot of the // current query log. -func querylogzHandler(ch chan *tabletenv.LogStats, w http.ResponseWriter, r *http.Request) { +func querylogzHandler(ch chan *tabletenv.LogStats, w http.ResponseWriter, r *http.Request, parser *sqlparser.Parser) { if err := acl.CheckAccessHTTP(r, acl.DEBUGGING); err != nil { acl.SendError(w, err) return @@ -127,7 +117,8 @@ func querylogzHandler(ch chan *tabletenv.LogStats, w http.ResponseWriter, r *htt tmplData := struct { *tabletenv.LogStats ColorLevel string - }{stats, level} + Parser *sqlparser.Parser + }{stats, level, parser} if err := querylogzTmpl.Execute(w, tmplData); err != nil { log.Errorf("querylogz: couldn't execute template: %v", err) } diff --git a/go/vt/vttablet/tabletserver/querylogz_test.go b/go/vt/vttablet/tabletserver/querylogz_test.go index 2e5caa3891b..25f03c762c7 100644 --- a/go/vt/vttablet/tabletserver/querylogz_test.go +++ b/go/vt/vttablet/tabletserver/querylogz_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -76,7 +77,7 @@ func TestQuerylogzHandler(t *testing.T) { response := httptest.NewRecorder() ch := make(chan *tabletenv.LogStats, 1) ch <- logStats - querylogzHandler(ch, response, req) + querylogzHandler(ch, response, req, sqlparser.NewTestParser()) close(ch) body, _ := io.ReadAll(response.Body) checkQuerylogzHasStats(t, fastQueryPattern, logStats, body) @@ -107,7 +108,7 @@ func TestQuerylogzHandler(t *testing.T) { response = httptest.NewRecorder() ch = make(chan *tabletenv.LogStats, 1) ch <- logStats - querylogzHandler(ch, response, req) + querylogzHandler(ch, response, req, sqlparser.NewTestParser()) close(ch) body, _ = io.ReadAll(response.Body) checkQuerylogzHasStats(t, mediumQueryPattern, logStats, body) @@ -137,7 +138,7 @@ func TestQuerylogzHandler(t *testing.T) { logStats.EndTime = logStats.StartTime.Add(500 * time.Millisecond) ch = make(chan *tabletenv.LogStats, 1) ch <- logStats - querylogzHandler(ch, response, req) + querylogzHandler(ch, response, req, sqlparser.NewTestParser()) close(ch) body, _ = io.ReadAll(response.Body) checkQuerylogzHasStats(t, slowQueryPattern, logStats, body) @@ -147,7 +148,7 @@ func TestQuerylogzHandler(t *testing.T) { defer func() { streamlog.SetQueryLogFilterTag("") }() ch = make(chan *tabletenv.LogStats, 1) ch <- logStats - querylogzHandler(ch, response, req) + querylogzHandler(ch, response, req, sqlparser.NewTestParser()) close(ch) body, _ = io.ReadAll(response.Body) checkQuerylogzHasStats(t, slowQueryPattern, logStats, body) diff --git a/go/vt/vttablet/tabletserver/queryz.go b/go/vt/vttablet/tabletserver/queryz.go index 151f028ca09..5d674b260cf 100644 --- a/go/vt/vttablet/tabletserver/queryz.go +++ b/go/vt/vttablet/tabletserver/queryz.go @@ -27,7 +27,6 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" ) @@ -157,7 +156,7 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { return true } Value := &queryzRow{ - Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), + Query: logz.Wrappable(qe.env.Environment().Parser().TruncateForUI(plan.Original)), Table: plan.TableName().String(), Plan: plan.PlanID, } diff --git a/go/vt/vttablet/tabletserver/repltracker/poller.go b/go/vt/vttablet/tabletserver/repltracker/poller.go index 21a2e9baf1c..7023562f0d6 100644 --- a/go/vt/vttablet/tabletserver/repltracker/poller.go +++ b/go/vt/vttablet/tabletserver/repltracker/poller.go @@ -22,10 +22,10 @@ import ( "time" "vitess.io/vitess/go/stats" - "vitess.io/vitess/go/vt/mysqlctl" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" + + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) var replicationLagSeconds = stats.NewGauge("replicationLagSec", "replication lag in seconds") @@ -46,17 +46,16 @@ func (p *poller) Status() (time.Duration, error) { p.mu.Lock() defer p.mu.Unlock() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - - status, err := p.mysqld.ReplicationStatusWithContext(ctx) + status, err := p.mysqld.ReplicationStatus(ctx) if err != nil { return 0, err } // If replication is not currently running or we don't know what the lag is -- most commonly // because the replica mysqld is in the process of trying to start replicating from its source - // but it hasn't yet reached the point where it can calculate the seconds_behind_master + // but it hasn't yet reached the point where it can calculate the seconds_behind_source // value and it's thus NULL -- then we will estimate the lag ourselves using the last seen // value + the time elapsed since. if !status.Healthy() || status.ReplicationLagUnknown { diff --git a/go/vt/vttablet/tabletserver/repltracker/reader.go b/go/vt/vttablet/tabletserver/repltracker/reader.go index 985bb136f1f..6b798d99ca1 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader.go @@ -23,13 +23,12 @@ import ( "time" "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -71,7 +70,7 @@ func newHeartbeatReader(env tabletenv.Env) *heartbeatReader { return &heartbeatReader{} } - heartbeatInterval := config.ReplicationTracker.HeartbeatIntervalSeconds.Get() + heartbeatInterval := config.ReplicationTracker.HeartbeatInterval return &heartbeatReader{ env: env, enabled: true, @@ -80,8 +79,8 @@ func newHeartbeatReader(env tabletenv.Env) *heartbeatReader { ticks: timer.NewTimer(heartbeatInterval), errorLog: logutil.NewThrottledLogger("HeartbeatReporter", 60*time.Second), pool: connpool.NewPool(env, "HeartbeatReadPool", tabletenv.ConnPoolConfig{ - Size: 1, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 1, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), } } diff --git a/go/vt/vttablet/tabletserver/repltracker/reader_test.go b/go/vt/vttablet/tabletserver/repltracker/reader_test.go index 54ece70fc1a..e065b05da7a 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader_test.go @@ -21,14 +21,14 @@ import ( "testing" "time" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -137,15 +137,15 @@ func TestReaderReadHeartbeatError(t *testing.T) { } func newReader(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatReader { - config := tabletenv.NewDefaultConfig() - config.ReplicationTracker.Mode = tabletenv.Heartbeat - _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") - params, _ := db.ConnParams().MysqlParams() + cfg := tabletenv.NewDefaultConfig() + cfg.ReplicationTracker.Mode = tabletenv.Heartbeat + cfg.ReplicationTracker.HeartbeatInterval = time.Second + params := db.ConnParams() cp := *params dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") - config.DB = dbc + cfg.DB = dbc - tr := newHeartbeatReader(tabletenv.NewEnv(config, "ReaderTest")) + tr := newHeartbeatReader(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "ReaderTest")) tr.keyspaceShard = "test:0" if frozenTime != nil { diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker.go b/go/vt/vttablet/tabletserver/repltracker/repltracker.go index 5ab44eb774e..c98005851d1 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker.go @@ -23,10 +23,11 @@ import ( "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/vttablet/tabletserver/heartbeat" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) var ( @@ -66,7 +67,7 @@ type ReplTracker struct { func NewReplTracker(env tabletenv.Env, alias *topodatapb.TabletAlias) *ReplTracker { return &ReplTracker{ mode: env.Config().ReplicationTracker.Mode, - forceHeartbeat: env.Config().ReplicationTracker.HeartbeatOnDemandSeconds.Get() > 0, + forceHeartbeat: env.Config().ReplicationTracker.HeartbeatOnDemand > 0, hw: newHeartbeatWriter(env, alias), hr: newHeartbeatReader(env), poller: &poller{}, diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go index 01912c3f689..5e6150ddeb3 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go @@ -28,6 +28,7 @@ import ( "vitess.io/vitess/go/vt/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -35,13 +36,13 @@ func TestReplTracker(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - config := tabletenv.NewDefaultConfig() - config.ReplicationTracker.Mode = tabletenv.Heartbeat - _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") - params, _ := db.ConnParams().MysqlParams() + cfg := tabletenv.NewDefaultConfig() + cfg.ReplicationTracker.Mode = tabletenv.Heartbeat + cfg.ReplicationTracker.HeartbeatInterval = time.Second + params := db.ConnParams() cp := *params - config.DB = dbconfigs.NewTestDBConfigs(cp, cp, "") - env := tabletenv.NewEnv(config, "ReplTrackerTest") + cfg.DB = dbconfigs.NewTestDBConfigs(cp, cp, "") + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "ReplTrackerTest") alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -78,7 +79,7 @@ func TestReplTracker(t *testing.T) { assert.False(t, rt.hw.isOpen) assert.False(t, rt.hr.isOpen) - config.ReplicationTracker.Mode = tabletenv.Polling + cfg.ReplicationTracker.Mode = tabletenv.Polling rt = NewReplTracker(env, alias) rt.InitDBConfig(target, mysqld) assert.Equal(t, tabletenv.Polling, rt.mode) diff --git a/go/vt/vttablet/tabletserver/repltracker/writer.go b/go/vt/vttablet/tabletserver/repltracker/writer.go index b13b78b59b7..a72b44d1845 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer.go @@ -59,6 +59,7 @@ type heartbeatWriter struct { appPool *dbconnpool.ConnectionPool allPrivsPool *dbconnpool.ConnectionPool ticks *timer.Timer + writeConnID atomic.Int64 onDemandDuration time.Duration onDemandMu sync.Mutex @@ -72,17 +73,17 @@ func newHeartbeatWriter(env tabletenv.Env, alias *topodatapb.TabletAlias) *heart config := env.Config() // config.EnableLagThrottler is a feature flag for the throttler; if throttler runs, then heartbeat must also run - if config.ReplicationTracker.Mode != tabletenv.Heartbeat && config.ReplicationTracker.HeartbeatOnDemandSeconds.Get() == 0 { + if config.ReplicationTracker.Mode != tabletenv.Heartbeat && config.ReplicationTracker.HeartbeatOnDemand == 0 { return &heartbeatWriter{} } - heartbeatInterval := config.ReplicationTracker.HeartbeatIntervalSeconds.Get() + heartbeatInterval := config.ReplicationTracker.HeartbeatInterval w := &heartbeatWriter{ env: env, enabled: true, tabletAlias: alias.CloneVT(), now: time.Now, interval: heartbeatInterval, - onDemandDuration: config.ReplicationTracker.HeartbeatOnDemandSeconds.Get(), + onDemandDuration: config.ReplicationTracker.HeartbeatOnDemand, ticks: timer.NewTimer(heartbeatInterval), errorLog: logutil.NewThrottledLogger("HeartbeatWriter", 60*time.Second), // We make this pool size 2; to prevent pool exhausted @@ -90,9 +91,10 @@ func newHeartbeatWriter(env tabletenv.Env, alias *topodatapb.TabletAlias) *heart appPool: dbconnpool.NewConnectionPool("HeartbeatWriteAppPool", env.Exporter(), 2, mysqlctl.DbaIdleTimeout, 0, mysqlctl.PoolDynamicHostnameResolution), allPrivsPool: dbconnpool.NewConnectionPool("HeartbeatWriteAllPrivsPool", env.Exporter(), 2, mysqlctl.DbaIdleTimeout, 0, mysqlctl.PoolDynamicHostnameResolution), } + w.writeConnID.Store(-1) if w.onDemandDuration > 0 { // see RequestHeartbeats() for use of onDemandRequestTicks - // it's basically a mechnism to rate limit operation RequestHeartbeats(). + // it's basically a mechanism to rate limit operation RequestHeartbeats(). // and selectively drop excessive requests. w.allowNextHeartbeatRequest() go func() { @@ -123,7 +125,7 @@ func (w *heartbeatWriter) Open() { if w.isOpen { return } - log.Info("Hearbeat Writer: opening") + log.Info("Heartbeat Writer: opening") // We cannot create the database and tables in this Open function // since, this is run when a tablet changes to Primary type. The other replicas @@ -159,7 +161,7 @@ func (w *heartbeatWriter) Close() { w.appPool.Close() w.allPrivsPool.Close() w.isOpen = false - log.Info("Hearbeat Writer: closed") + log.Info("Heartbeat Writer: closed") } // bindHeartbeatVars takes a heartbeat write (insert or update) and @@ -192,11 +194,6 @@ func (w *heartbeatWriter) write() error { defer w.env.LogError() ctx, cancel := context.WithDeadline(context.Background(), w.now().Add(w.interval)) defer cancel() - allPrivsConn, err := w.allPrivsPool.Get(ctx) - if err != nil { - return err - } - defer allPrivsConn.Recycle() upsert, err := w.bindHeartbeatVars(sqlUpsertHeartbeat) if err != nil { @@ -207,6 +204,8 @@ func (w *heartbeatWriter) write() error { return err } defer appConn.Recycle() + w.writeConnID.Store(appConn.Conn.ID()) + defer w.writeConnID.Store(-1) _, err = appConn.Conn.ExecuteFetch(upsert, 1, false) if err != nil { return err @@ -215,11 +214,14 @@ func (w *heartbeatWriter) write() error { } func (w *heartbeatWriter) recordError(err error) { + if err == nil { + return + } w.errorLog.Errorf("%v", err) writeErrors.Add(1) } -// enableWrites actives or deactives heartbeat writes +// enableWrites activates or deactivates heartbeat writes func (w *heartbeatWriter) enableWrites(enable bool) { if w.ticks == nil { return @@ -238,7 +240,17 @@ func (w *heartbeatWriter) enableWrites(enable bool) { w.ticks.Start(w.writeHeartbeat) }() case false: - w.ticks.Stop() + // We stop the ticks in a separate go routine because it can block if the write is stuck on semi-sync ACKs. + // At the same time we try and kill the write that is in progress. We use the context and its cancellation + // for coordination between the two go-routines. In the end we will have guaranteed that the ticks have stopped + // and no write is in progress. + ctx, cancel := context.WithCancel(context.Background()) + go func() { + w.ticks.Stop() + cancel() + }() + w.killWritesUntilStopped(ctx) + if w.onDemandDuration > 0 { // Let the next RequestHeartbeats() go through w.allowNextHeartbeatRequest() @@ -246,6 +258,45 @@ func (w *heartbeatWriter) enableWrites(enable bool) { } } +// killWritesUntilStopped tries to kill the write in progress until the ticks have stopped. +func (w *heartbeatWriter) killWritesUntilStopped(ctx context.Context) { + ticker := time.NewTicker(100 * time.Millisecond) + defer ticker.Stop() + for { + // Actually try to kill the query. + err := w.killWrite() + w.recordError(err) + select { + case <-ctx.Done(): + // If the context has been cancelled, then we know that the ticks have stopped. + // This guarantees that there are no writes in progress, so there is nothing to kill. + return + case <-ticker.C: + } + } +} + +// killWrite kills the write in progress (if any). +func (w *heartbeatWriter) killWrite() error { + defer w.env.LogError() + writeId := w.writeConnID.Load() + if writeId == -1 { + return nil + } + + ctx, cancel := context.WithDeadline(context.Background(), w.now().Add(w.interval)) + defer cancel() + killConn, err := w.allPrivsPool.Get(ctx) + if err != nil { + log.Errorf("Kill conn didn't get connection :(") + return err + } + defer killConn.Recycle() + + _, err = killConn.Conn.ExecuteFetch(fmt.Sprintf("kill %d", writeId), 1, false) + return err +} + // allowNextHeartbeatRequest ensures that the next call to RequestHeartbeats() passes through and // is not dropped. func (w *heartbeatWriter) allowNextHeartbeatRequest() { diff --git a/go/vt/vttablet/tabletserver/repltracker/writer_test.go b/go/vt/vttablet/tabletserver/repltracker/writer_test.go index 5044586c0d2..0add32a1de0 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer_test.go @@ -17,7 +17,9 @@ limitations under the License. package repltracker import ( + "context" "fmt" + "sync" "testing" "time" @@ -27,6 +29,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -63,16 +66,58 @@ func TestWriteHeartbeatError(t *testing.T) { assert.Equal(t, int64(1), writeErrors.Get()) } +// TestCloseWhileStuckWriting tests that Close shouldn't get stuck even if the heartbeat writer is stuck waiting for a semi-sync ACK. +func TestCloseWhileStuckWriting(t *testing.T) { + db := fakesqldb.New(t) + tw := newTestWriter(db, nil) + tw.isOpen = true + + killWg := sync.WaitGroup{} + killWg.Add(1) + startedWaitWg := sync.WaitGroup{} + startedWaitWg.Add(1) + + // Insert a query pattern that causes the upsert to block indefinitely until it has been killed. + // This simulates a stuck primary write due to a semi-sync ACK requirement. + db.AddQueryPatternWithCallback(`INSERT INTO .*heartbeat \(ts, tabletUid, keyspaceShard\).*`, &sqltypes.Result{}, func(s string) { + startedWaitWg.Done() + killWg.Wait() + }) + + // When we receive a kill query, we want to finish running the wait group to unblock the upsert query. + db.AddQueryPatternWithCallback("kill.*", &sqltypes.Result{}, func(s string) { + killWg.Done() + }) + + // Now we enable writes, but the first write will get blocked. + tw.enableWrites(true) + // We wait until the write has blocked to ensure we only call Close after we are stuck writing. + startedWaitWg.Wait() + // Even if the write is blocked, we should be able to disable writes without waiting indefinitely. + // This is what we call, when we try to Close the heartbeat writer. + ctx, cancel := context.WithCancel(context.Background()) + go func() { + tw.enableWrites(false) + cancel() + }() + select { + case <-ctx.Done(): + db.Close() + case <-time.After(1000 * time.Second): + t.Fatalf("Timed out waiting for heartbeat writer to close") + } +} + func newTestWriter(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatWriter { - config := tabletenv.NewDefaultConfig() - config.ReplicationTracker.Mode = tabletenv.Heartbeat - _ = config.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + cfg := tabletenv.NewDefaultConfig() + cfg.ReplicationTracker.Mode = tabletenv.Heartbeat + cfg.ReplicationTracker.HeartbeatInterval = time.Second - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") - tw := newHeartbeatWriter(tabletenv.NewEnv(config, "WriterTest"), &topodatapb.TabletAlias{Cell: "test", Uid: 1111}) + tw := newHeartbeatWriter(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "WriterTest"), &topodatapb.TabletAlias{Cell: "test", Uid: 1111}) tw.keyspaceShard = "test:0" if frozenTime != nil { diff --git a/go/vt/vttablet/tabletserver/requests_waiter.go b/go/vt/vttablet/tabletserver/requests_waiter.go new file mode 100644 index 00000000000..39e08f924cc --- /dev/null +++ b/go/vt/vttablet/tabletserver/requests_waiter.go @@ -0,0 +1,78 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletserver + +import "sync" + +// requestsWaiter is used to wait for requests. It stores the count of the requests pending, +// and also the number of waiters currently waiting. It has a mutex as well to protects its fields. +type requestsWaiter struct { + mu sync.Mutex + wg sync.WaitGroup + // waitCounter is the number of goroutines that are waiting for wg to be empty. + // If this value is greater than zero, then we have to ensure that we don't Add to the requests + // to avoid any panics in the wait. + waitCounter int + // counter is the count of the number of outstanding requests. + counter int +} + +// newRequestsWaiter creates a new requestsWaiter. +func newRequestsWaiter() *requestsWaiter { + return &requestsWaiter{} +} + +// Add adds to the requestsWaiter. +func (r *requestsWaiter) Add(val int) { + r.mu.Lock() + defer r.mu.Unlock() + r.counter += val + r.wg.Add(val) +} + +// Done subtracts 1 from the requestsWaiter. +func (r *requestsWaiter) Done() { + r.Add(-1) +} + +// addToWaitCounter adds to the waitCounter while being protected by a mutex. +func (r *requestsWaiter) addToWaitCounter(val int) { + r.mu.Lock() + defer r.mu.Unlock() + r.waitCounter += val +} + +// WaitToBeEmpty waits for requests to be empty. It also increments and decrements the waitCounter as required. +func (r *requestsWaiter) WaitToBeEmpty() { + r.addToWaitCounter(1) + r.wg.Wait() + r.addToWaitCounter(-1) +} + +// GetWaiterCount gets the number of go routines currently waiting on the wait group. +func (r *requestsWaiter) GetWaiterCount() int { + r.mu.Lock() + defer r.mu.Unlock() + return r.waitCounter +} + +// GetOutstandingRequestsCount gets the number of requests outstanding. +func (r *requestsWaiter) GetOutstandingRequestsCount() int { + r.mu.Lock() + defer r.mu.Unlock() + return r.counter +} diff --git a/go/vt/vttablet/tabletserver/requests_waiter_test.go b/go/vt/vttablet/tabletserver/requests_waiter_test.go new file mode 100644 index 00000000000..078e32e92ca --- /dev/null +++ b/go/vt/vttablet/tabletserver/requests_waiter_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tabletserver + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestRequestWaiter tests the functionality of request waiter. +func TestRequestWaiter(t *testing.T) { + rw := newRequestsWaiter() + require.EqualValues(t, 0, rw.GetWaiterCount()) + require.EqualValues(t, 0, rw.GetOutstandingRequestsCount()) + + rw.Add(3) + require.EqualValues(t, 0, rw.GetWaiterCount()) + require.EqualValues(t, 3, rw.GetOutstandingRequestsCount()) + + rw.Done() + require.EqualValues(t, 0, rw.GetWaiterCount()) + require.EqualValues(t, 2, rw.GetOutstandingRequestsCount()) + + go func() { + rw.WaitToBeEmpty() + }() + go func() { + rw.WaitToBeEmpty() + }() + + time.Sleep(100 * time.Millisecond) + require.EqualValues(t, 2, rw.GetWaiterCount()) + require.EqualValues(t, 2, rw.GetOutstandingRequestsCount()) + + rw.Done() + rw.Done() + + time.Sleep(100 * time.Millisecond) + require.EqualValues(t, 0, rw.GetWaiterCount()) + require.EqualValues(t, 0, rw.GetOutstandingRequestsCount()) +} diff --git a/go/vt/vttablet/tabletserver/rules/cached_size.go b/go/vt/vttablet/tabletserver/rules/cached_size.go index acfd199f1f2..1375ef2cb7b 100644 --- a/go/vt/vttablet/tabletserver/rules/cached_size.go +++ b/go/vt/vttablet/tabletserver/rules/cached_size.go @@ -108,7 +108,7 @@ func (cached *bvcre) CachedSize(alloc bool) int64 { } // field re *regexp.Regexp if cached.re != nil { - size += hack.RuntimeAllocSize(int64(153)) + size += hack.RuntimeAllocSize(int64(160)) } return size } @@ -124,7 +124,7 @@ func (cached *namedRegexp) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.name))) // field Regexp *regexp.Regexp if cached.Regexp != nil { - size += hack.RuntimeAllocSize(int64(153)) + size += hack.RuntimeAllocSize(int64(160)) } return size } diff --git a/go/vt/vttablet/tabletserver/rules/map_test.go b/go/vt/vttablet/tabletserver/rules/map_test.go index 1e86b938a48..bd1030f119c 100644 --- a/go/vt/vttablet/tabletserver/rules/map_test.go +++ b/go/vt/vttablet/tabletserver/rules/map_test.go @@ -136,7 +136,7 @@ func TestMapGetSetQueryRules(t *testing.T) { t.Errorf("Failed to set custom Rules: %s", err) } - // Test if we can successfully retrieve rules that've been set + // Test if we can successfully retrieve rules which been set qrs, err = qri.Get(denyListQueryRules) if err != nil { t.Errorf("GetRules failed to retrieve denyListQueryRules that has been set: %s", err) diff --git a/go/vt/vttablet/tabletserver/rules/rules.go b/go/vt/vttablet/tabletserver/rules/rules.go index efbfcdf87e4..4a7d128b950 100644 --- a/go/vt/vttablet/tabletserver/rules/rules.go +++ b/go/vt/vttablet/tabletserver/rules/rules.go @@ -27,15 +27,14 @@ import ( "time" "vitess.io/vitess/go/sqltypes" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" - - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -//----------------------------------------------- +// ----------------------------------------------- const ( bufferedTableRuleName = "buffered_table" @@ -189,7 +188,7 @@ func (qrs *Rules) GetAction( return QRContinue, nil, 0, "" } -//----------------------------------------------- +// ----------------------------------------------- // Rule represents one rule (conditions-action). // Name is meant to uniquely identify a rule. @@ -561,10 +560,10 @@ func bvMatch(bvcond BindVarCond, bindVars map[string]*querypb.BindVariable) bool return bvcond.value.eval(bv, bvcond.op, bvcond.onMismatch) } -//----------------------------------------------- +// ----------------------------------------------- // Support types for Rule -// Action speficies the list of actions to perform +// Action specifies the list of actions to perform // when a Rule is triggered. type Action int @@ -656,7 +655,7 @@ func init() { } } -// These are return statii. +// These are return states. const ( QROK = iota QRMismatch @@ -852,13 +851,13 @@ func getint64(val *querypb.BindVariable) (iv int64, status int) { // TODO(sougou): this is inefficient. Optimize to use []byte. func getstring(val *querypb.BindVariable) (s string, status int) { - if sqltypes.IsIntegral(val.Type) || sqltypes.IsFloat(val.Type) || sqltypes.IsText(val.Type) || sqltypes.IsBinary(val.Type) { + if sqltypes.IsIntegral(val.Type) || sqltypes.IsFloat(val.Type) || sqltypes.IsTextOrBinary(val.Type) { return string(val.Value), QROK } return "", QRMismatch } -//----------------------------------------------- +// ----------------------------------------------- // Support functions for JSON // MapStrOperator maps a string representation to an Operator. diff --git a/go/vt/vttablet/tabletserver/schema/db.go b/go/vt/vttablet/tabletserver/schema/db.go index 5699ffc1bde..7b328c518d2 100644 --- a/go/vt/vttablet/tabletserver/schema/db.go +++ b/go/vt/vttablet/tabletserver/schema/db.go @@ -19,6 +19,8 @@ package schema import ( "context" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -86,10 +88,36 @@ where table_schema = database() and table_name in ::viewNames` // fetchTablesAndViews queries fetches all information about tables and views fetchTablesAndViews = `select table_name, create_statement from %s.tables where table_schema = database() union select table_name, create_statement from %s.views where table_schema = database()` + + // detectUdfChange query detects if there is any udf change from previous copy. + detectUdfChange = `SELECT name +FROM ( + SELECT name FROM + mysql.func + + UNION ALL + + SELECT function_name + FROM %s.udfs +) _inner +GROUP BY name +HAVING COUNT(*) = 1 +LIMIT 1 +` + + // deleteAllUdfs clears out the udfs table. + deleteAllUdfs = `delete from %s.udfs` + + // copyUdfs copies user defined function to the udfs table. + copyUdfs = `INSERT INTO %s.udfs(FUNCTION_NAME, FUNCTION_RETURN_TYPE, FUNCTION_TYPE) +SELECT f.name, i.UDF_RETURN_TYPE, f.type FROM mysql.func f left join performance_schema.user_defined_functions i on f.name = i.udf_name +` + // fetchAggregateUdfs queries fetches all the aggregate user defined functions. + fetchAggregateUdfs = `select function_name, function_return_type, function_type from %s.udfs` ) // reloadTablesDataInDB reloads teh tables information we have stored in our database we use for schema-tracking. -func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Table, droppedTables []string) error { +func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Table, droppedTables []string, parser *sqlparser.Parser) error { // No need to do anything if we have no tables to refresh or drop. if len(tables) == 0 && len(droppedTables) == 0 { return nil @@ -117,7 +145,7 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta } // Generate the queries to delete and insert table data. - clearTableParsedQuery, err := generateFullQuery(deleteFromSchemaEngineTablesTable) + clearTableParsedQuery, err := generateFullQuery(deleteFromSchemaEngineTablesTable, parser) if err != nil { return err } @@ -126,7 +154,7 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta return err } - insertTablesParsedQuery, err := generateFullQuery(insertTableIntoSchemaEngineTables) + insertTablesParsedQuery, err := generateFullQuery(insertTableIntoSchemaEngineTables, parser) if err != nil { return err } @@ -162,8 +190,8 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta } // generateFullQuery generates the full query from the query as a string. -func generateFullQuery(query string) (*sqlparser.ParsedQuery, error) { - stmt, err := sqlparser.Parse( +func generateFullQuery(query string, parser *sqlparser.Parser) (*sqlparser.ParsedQuery, error) { + stmt, err := parser.Parse( sqlparser.BuildParsedQuery(query, sidecar.GetIdentifier(), sidecar.GetIdentifier()).Query) if err != nil { return nil, err @@ -174,7 +202,7 @@ func generateFullQuery(query string) (*sqlparser.ParsedQuery, error) { } // reloadViewsDataInDB reloads teh views information we have stored in our database we use for schema-tracking. -func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Table, droppedViews []string) error { +func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Table, droppedViews []string, parser *sqlparser.Parser) error { // No need to do anything if we have no views to refresh or drop. if len(views) == 0 && len(droppedViews) == 0 { return nil @@ -213,7 +241,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl return nil }, func() *sqltypes.Result { return &sqltypes.Result{} }, - 1000, + 1000, parser, ) if err != nil { return err @@ -221,7 +249,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl } // Generate the queries to delete and insert view data. - clearViewParsedQuery, err := generateFullQuery(deleteFromSchemaEngineViewsTable) + clearViewParsedQuery, err := generateFullQuery(deleteFromSchemaEngineViewsTable, parser) if err != nil { return err } @@ -230,7 +258,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl return err } - insertViewsParsedQuery, err := generateFullQuery(insertViewIntoSchemaEngineViews) + insertViewsParsedQuery, err := generateFullQuery(insertViewIntoSchemaEngineViews, parser) if err != nil { return err } @@ -266,8 +294,8 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl } // getViewDefinition gets the viewDefinition for the given views. -func getViewDefinition(ctx context.Context, conn *connpool.Conn, bv map[string]*querypb.BindVariable, callback func(qr *sqltypes.Result) error, alloc func() *sqltypes.Result, bufferSize int) error { - viewsDefParsedQuery, err := generateFullQuery(fetchViewDefinitions) +func getViewDefinition(ctx context.Context, conn *connpool.Conn, bv map[string]*querypb.BindVariable, callback func(qr *sqltypes.Result) error, alloc func() *sqltypes.Result, bufferSize int, parser *sqlparser.Parser) error { + viewsDefParsedQuery, err := generateFullQuery(fetchViewDefinitions, parser) if err != nil { return err } @@ -313,6 +341,31 @@ func getChangedViewNames(ctx context.Context, conn *connpool.Conn, isServingPrim return views, nil } +func getChangedUserDefinedFunctions(ctx context.Context, conn *connpool.Conn, isServingPrimary bool) (bool, error) { + if !isServingPrimary { + return false, nil + } + + udfsChanged := false + callback := func(qr *sqltypes.Result) error { + // If we receive any row as output which means udf was modified. + udfsChanged = len(qr.Rows) > 0 + return nil + } + alloc := func() *sqltypes.Result { return &sqltypes.Result{} } + bufferSize := 1000 + + udfChangeQuery := sqlparser.BuildParsedQuery(detectUdfChange, sidecar.GetIdentifier()).Query + err := conn.Stream(ctx, udfChangeQuery, callback, alloc, bufferSize, 0) + if err != nil { + return false, err + } + if udfsChanged { + log.Info("Underlying User Defined Functions have changed") + } + return udfsChanged, nil +} + // getMismatchedTableNames gets the tables that do not align with the tables information we have in the cache. func (se *Engine) getMismatchedTableNames(ctx context.Context, conn *connpool.Conn, isServingPrimary bool) (map[string]any, error) { tablesMismatched := make(map[string]any) @@ -358,7 +411,7 @@ func (se *Engine) getMismatchedTableNames(ctx context.Context, conn *connpool.Co } // reloadDataInDB reloads the schema tracking data in the database -func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered []*Table, created []*Table, dropped []*Table) error { +func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered, created, dropped []*Table, udfsChanged bool, parser *sqlparser.Parser) error { // tablesToReload and viewsToReload stores the tables and views that need reloading and storing in our MySQL database. var tablesToReload, viewsToReload []*Table // droppedTables, droppedViews stores the list of tables and views we need to delete, respectively. @@ -382,19 +435,55 @@ func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered []*Table, } } - if err := reloadTablesDataInDB(ctx, conn, tablesToReload, droppedTables); err != nil { + if err := reloadTablesDataInDB(ctx, conn, tablesToReload, droppedTables, parser); err != nil { + return err + } + if err := reloadViewsDataInDB(ctx, conn, viewsToReload, droppedViews, parser); err != nil { + return err + } + if err := reloadUdfsInDB(ctx, conn, udfsChanged, parser); err != nil { + return err + } + return nil +} + +func reloadUdfsInDB(ctx context.Context, conn *connpool.Conn, udfsChanged bool, parser *sqlparser.Parser) error { + if !udfsChanged { + return nil + } + + clearUdfQuery := sqlparser.BuildParsedQuery(deleteAllUdfs, sidecar.GetIdentifier()).Query + copyUdfQuery := sqlparser.BuildParsedQuery(copyUdfs, sidecar.GetIdentifier()).Query + + // Reload the udfs in a transaction. + _, err := conn.Exec(ctx, "begin", 1, false) + if err != nil { return err } - if err := reloadViewsDataInDB(ctx, conn, viewsToReload, droppedViews); err != nil { + defer conn.Exec(ctx, "rollback", 1, false) + + _, err = conn.Exec(ctx, clearUdfQuery, 1, false) + if err != nil { return err } + + _, err = conn.Exec(ctx, copyUdfQuery, 1, false) + if err != nil { + return err + } + + _, err = conn.Exec(ctx, "commit", 1, false) + if err != nil { + return err + } + return nil } // GetFetchViewQuery gets the fetch query to run for getting the listed views. If no views are provided, then all the views are fetched. -func GetFetchViewQuery(viewNames []string) (string, error) { +func GetFetchViewQuery(viewNames []string, parser *sqlparser.Parser) (string, error) { if len(viewNames) == 0 { - parsedQuery, err := generateFullQuery(fetchViews) + parsedQuery, err := generateFullQuery(fetchViews, parser) if err != nil { return "", err } @@ -407,7 +496,7 @@ func GetFetchViewQuery(viewNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"viewNames": viewsBV} - parsedQuery, err := generateFullQuery(fetchUpdatedViews) + parsedQuery, err := generateFullQuery(fetchUpdatedViews, parser) if err != nil { return "", err } @@ -415,9 +504,9 @@ func GetFetchViewQuery(viewNames []string) (string, error) { } // GetFetchTableQuery gets the fetch query to run for getting the listed tables. If no tables are provided, then all the tables are fetched. -func GetFetchTableQuery(tableNames []string) (string, error) { +func GetFetchTableQuery(tableNames []string, parser *sqlparser.Parser) (string, error) { if len(tableNames) == 0 { - parsedQuery, err := generateFullQuery(fetchTables) + parsedQuery, err := generateFullQuery(fetchTables, parser) if err != nil { return "", err } @@ -430,7 +519,7 @@ func GetFetchTableQuery(tableNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} - parsedQuery, err := generateFullQuery(fetchUpdatedTables) + parsedQuery, err := generateFullQuery(fetchUpdatedTables, parser) if err != nil { return "", err } @@ -438,9 +527,9 @@ func GetFetchTableQuery(tableNames []string) (string, error) { } // GetFetchTableAndViewsQuery gets the fetch query to run for getting the listed tables and views. If no table names are provided, then all the tables and views are fetched. -func GetFetchTableAndViewsQuery(tableNames []string) (string, error) { +func GetFetchTableAndViewsQuery(tableNames []string, parser *sqlparser.Parser) (string, error) { if len(tableNames) == 0 { - parsedQuery, err := generateFullQuery(fetchTablesAndViews) + parsedQuery, err := generateFullQuery(fetchTablesAndViews, parser) if err != nil { return "", err } @@ -453,9 +542,18 @@ func GetFetchTableAndViewsQuery(tableNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} - parsedQuery, err := generateFullQuery(fetchUpdatedTablesAndViews) + parsedQuery, err := generateFullQuery(fetchUpdatedTablesAndViews, parser) if err != nil { return "", err } return parsedQuery.GenerateQuery(bv, nil) } + +// GetFetchUDFsQuery gets the fetch query to retrieve all the UDFs. +func GetFetchUDFsQuery(parser *sqlparser.Parser) (string, error) { + parsedQuery, err := generateFullQuery(fetchAggregateUdfs, parser) + if err != nil { + return "", err + } + return parsedQuery.Query, nil +} diff --git a/go/vt/vttablet/tabletserver/schema/db_test.go b/go/vt/vttablet/tabletserver/schema/db_test.go index ac6999d309a..d0ff91b63d5 100644 --- a/go/vt/vttablet/tabletserver/schema/db_test.go +++ b/go/vt/vttablet/tabletserver/schema/db_test.go @@ -23,15 +23,17 @@ import ( "testing" "github.com/stretchr/testify/require" + "golang.org/x/exp/maps" "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/maps2" - "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) var ( @@ -81,7 +83,7 @@ func TestGenerateFullQuery(t *testing.T) { tt.wantQuery = tt.query } - got, err := generateFullQuery(tt.query) + got, err := generateFullQuery(tt.query, sqlparser.NewTestParser()) if tt.wantErr != "" { require.EqualError(t, err, tt.wantErr) return @@ -96,7 +98,8 @@ func TestGenerateFullQuery(t *testing.T) { func TestGetCreateStatement(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "TestGetCreateStatement") + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Success view @@ -131,7 +134,8 @@ func TestGetCreateStatement(t *testing.T) { func TestGetChangedViewNames(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "TestGetChangedViewNames") + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Success @@ -145,7 +149,7 @@ func TestGetChangedViewNames(t *testing.T) { got, err := getChangedViewNames(context.Background(), conn, true) require.NoError(t, err) require.Len(t, got, 3) - require.ElementsMatch(t, maps2.Keys(got), []string{"v1", "v2", "lead"}) + require.ElementsMatch(t, maps.Keys(got), []string{"v1", "v2", "lead"}) require.NoError(t, db.LastError()) // Not serving primary @@ -164,7 +168,8 @@ func TestGetChangedViewNames(t *testing.T) { func TestGetViewDefinition(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "TestGetViewDefinition") + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) viewsBV, err := sqltypes.BuildBindVariable([]string{"v1", "lead"}) @@ -181,7 +186,7 @@ func TestGetViewDefinition(t *testing.T) { got, err := collectGetViewDefinitions(conn, bv) require.NoError(t, err) require.Len(t, got, 2) - require.ElementsMatch(t, maps2.Keys(got), []string{"v1", "lead"}) + require.ElementsMatch(t, maps.Keys(got), []string{"v1", "lead"}) require.Equal(t, "create_view_v1", got["v1"]) require.Equal(t, "create_view_lead", got["lead"]) require.NoError(t, db.LastError()) @@ -209,7 +214,7 @@ func collectGetViewDefinitions(conn *connpool.Conn, bv map[string]*querypb.BindV return nil }, func() *sqltypes.Result { return &sqltypes.Result{} - }, 1000) + }, 1000, sqlparser.NewTestParser()) return viewDefinitions, err } @@ -226,7 +231,7 @@ func TestGetMismatchedTableNames(t *testing.T) { expectedError string }{ { - name: "Table create time differs", + name: "TableCreateTimeDiffers", tables: map[string]*Table{ "t1": { Name: sqlparser.NewIdentifierCS("t1"), @@ -239,7 +244,7 @@ func TestGetMismatchedTableNames(t *testing.T) { isServingPrimary: true, expectedTableNames: []string{"t1"}, }, { - name: "Table got deleted", + name: "TableGotDeleted", tables: map[string]*Table{ "t1": { Name: sqlparser.NewIdentifierCS("t1"), @@ -253,7 +258,7 @@ func TestGetMismatchedTableNames(t *testing.T) { isServingPrimary: true, expectedTableNames: []string{"t2"}, }, { - name: "Table got created", + name: "TableGotCreated", tables: map[string]*Table{ "t1": { Name: sqlparser.NewIdentifierCS("t1"), @@ -270,7 +275,7 @@ func TestGetMismatchedTableNames(t *testing.T) { isServingPrimary: true, expectedTableNames: []string{"t2"}, }, { - name: "Dual gets ignored", + name: "DualGetsIgnored", tables: map[string]*Table{ "dual": NewTable("dual", NoType), "t2": { @@ -284,7 +289,7 @@ func TestGetMismatchedTableNames(t *testing.T) { isServingPrimary: true, expectedTableNames: []string{}, }, { - name: "All problems", + name: "AllProblems", tables: map[string]*Table{ "dual": NewTable("dual", NoType), "t2": { @@ -304,7 +309,7 @@ func TestGetMismatchedTableNames(t *testing.T) { isServingPrimary: true, expectedTableNames: []string{"t1", "t2", "t3"}, }, { - name: "Not serving primary", + name: "NotServingPrimary", tables: map[string]*Table{ "t1": { Name: sqlparser.NewIdentifierCS("t1"), @@ -317,7 +322,7 @@ func TestGetMismatchedTableNames(t *testing.T) { isServingPrimary: false, expectedTableNames: []string{}, }, { - name: "Error in query", + name: "ErrorInQuery", tables: map[string]*Table{ "t1": { Name: sqlparser.NewIdentifierCS("t1"), @@ -336,7 +341,8 @@ func TestGetMismatchedTableNames(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, tc.name) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) if tc.dbError != "" { @@ -351,7 +357,7 @@ func TestGetMismatchedTableNames(t *testing.T) { if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) } else { - require.ElementsMatch(t, maps2.Keys(mismatchedTableNames), tc.expectedTableNames) + require.ElementsMatch(t, maps.Keys(mismatchedTableNames), tc.expectedTableNames) require.NoError(t, db.LastError()) } }) @@ -370,7 +376,7 @@ func TestReloadTablesInDB(t *testing.T) { expectedError string }{ { - name: "Only tables to delete", + name: "OnlyTablesToDelete", tablesToDelete: []string{"t1", "lead"}, expectedQueries: map[string]*sqltypes.Result{ "begin": {}, @@ -379,7 +385,7 @@ func TestReloadTablesInDB(t *testing.T) { "delete from _vt.`tables` where table_schema = database() and table_name in ('t1', 'lead')": {}, }, }, { - name: "Only tables to reload", + name: "OnlyTablesToReload", tablesToReload: []*Table{ { Name: sqlparser.NewIdentifierCS("t1"), @@ -404,7 +410,7 @@ func TestReloadTablesInDB(t *testing.T) { "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'lead', 'create_table_lead', 1234)": {}, }, }, { - name: "Reload and Delete", + name: "ReloadAndDelete", tablesToReload: []*Table{ { Name: sqlparser.NewIdentifierCS("t1"), @@ -430,7 +436,7 @@ func TestReloadTablesInDB(t *testing.T) { "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'lead', 'create_table_lead', 1234)": {}, }, }, { - name: "Error In Insert", + name: "ErrorInInsert", tablesToReload: []*Table{ { Name: sqlparser.NewIdentifierCS("t1"), @@ -456,7 +462,8 @@ func TestReloadTablesInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, tc.name) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -467,7 +474,7 @@ func TestReloadTablesInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadTablesDataInDB(context.Background(), conn, tc.tablesToReload, tc.tablesToDelete) + err = reloadTablesDataInDB(context.Background(), conn, tc.tablesToReload, tc.tablesToDelete, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -491,7 +498,7 @@ func TestReloadViewsInDB(t *testing.T) { expectedError string }{ { - name: "Only views to delete", + name: "OnlyViewsToDelete", viewsToDelete: []string{"v1", "lead"}, expectedQueries: map[string]*sqltypes.Result{ "begin": {}, @@ -500,7 +507,7 @@ func TestReloadViewsInDB(t *testing.T) { "delete from _vt.views where table_schema = database() and table_name in ('v1', 'lead')": {}, }, }, { - name: "Only views to reload", + name: "OnlyViewsToReload", viewsToReload: []*Table{ { Name: sqlparser.NewIdentifierCS("v1"), @@ -529,7 +536,7 @@ func TestReloadViewsInDB(t *testing.T) { "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, }, }, { - name: "Reload and delete", + name: "ReloadAndDelete", viewsToReload: []*Table{ { Name: sqlparser.NewIdentifierCS("v1"), @@ -559,7 +566,7 @@ func TestReloadViewsInDB(t *testing.T) { "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, }, }, { - name: "Error In Insert", + name: "ErrorInInsert", viewsToReload: []*Table{ { Name: sqlparser.NewIdentifierCS("v1"), @@ -588,7 +595,8 @@ func TestReloadViewsInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, tc.name) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -599,7 +607,7 @@ func TestReloadViewsInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadViewsDataInDB(context.Background(), conn, tc.viewsToReload, tc.viewsToDelete) + err = reloadViewsDataInDB(context.Background(), conn, tc.viewsToReload, tc.viewsToDelete, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -625,7 +633,7 @@ func TestReloadDataInDB(t *testing.T) { expectedError string }{ { - name: "Only views to delete", + name: "OnlyViewsToDelete", dropped: []*Table{ NewTable("v1", View), NewTable("lead", View), @@ -637,7 +645,7 @@ func TestReloadDataInDB(t *testing.T) { "delete from _vt.views where table_schema = database() and table_name in ('v1', 'lead')": {}, }, }, { - name: "Only views to reload", + name: "OnlyViewsToReload", created: []*Table{ { Name: sqlparser.NewIdentifierCS("v1"), @@ -669,7 +677,7 @@ func TestReloadDataInDB(t *testing.T) { "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, }, }, { - name: "Reload and delete views", + name: "ReloadAndDeleteViews", created: []*Table{ { Name: sqlparser.NewIdentifierCS("v1"), @@ -705,7 +713,7 @@ func TestReloadDataInDB(t *testing.T) { "insert into _vt.views(table_schema, table_name, create_statement, view_definition) values (database(), 'lead', 'create_view_lead', 'select_lead')": {}, }, }, { - name: "Error In Inserting View Data", + name: "ErrorInInsertingViewData", created: []*Table{ { Name: sqlparser.NewIdentifierCS("v1"), @@ -729,7 +737,7 @@ func TestReloadDataInDB(t *testing.T) { }, expectedError: errMessage, }, { - name: "Only tables to delete", + name: "OnlyTablesToDelete", dropped: []*Table{ NewTable("t1", NoType), NewTable("lead", NoType), @@ -741,7 +749,7 @@ func TestReloadDataInDB(t *testing.T) { "delete from _vt.`tables` where table_schema = database() and table_name in ('t1', 'lead')": {}, }, }, { - name: "Only tables to reload", + name: "OnlyTablesToReload", created: []*Table{ { Name: sqlparser.NewIdentifierCS("t1"), @@ -769,7 +777,7 @@ func TestReloadDataInDB(t *testing.T) { "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'lead', 'create_table_lead', 1234)": {}, }, }, { - name: "Reload and delete tables", + name: "ReloadAndDeleteTables", created: []*Table{ { Name: sqlparser.NewIdentifierCS("t1"), @@ -801,7 +809,7 @@ func TestReloadDataInDB(t *testing.T) { "insert into _vt.`tables`(table_schema, table_name, create_statement, create_time) values (database(), 'lead', 'create_table_lead', 1234)": {}, }, }, { - name: "Error In Inserting Table Data", + name: "ErrorInInsertingTableData", altered: []*Table{ { Name: sqlparser.NewIdentifierCS("t1"), @@ -822,7 +830,7 @@ func TestReloadDataInDB(t *testing.T) { }, expectedError: errMessage, }, { - name: "Reload and delete all", + name: "ReloadAndDeleteAll", created: []*Table{ { Name: sqlparser.NewIdentifierCS("v1"), @@ -878,7 +886,8 @@ func TestReloadDataInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, tc.name) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -889,7 +898,7 @@ func TestReloadDataInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadDataInDB(context.Background(), conn, tc.altered, tc.created, tc.dropped) + err = reloadDataInDB(context.Background(), conn, tc.altered, tc.created, tc.dropped, false, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -920,7 +929,7 @@ func TestGetFetchViewQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchViewQuery(testcase.viewNames) + query, err := GetFetchViewQuery(testcase.viewNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) @@ -947,7 +956,7 @@ func TestGetFetchTableQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchTableQuery(testcase.tableNames) + query, err := GetFetchTableQuery(testcase.tableNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) @@ -974,7 +983,7 @@ func TestGetFetchTableAndViewsQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchTableAndViewsQuery(testcase.tableNames) + query, err := GetFetchTableAndViewsQuery(testcase.tableNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index ae50b460a96..ddc1b376628 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -26,13 +26,13 @@ import ( "sync" "time" - "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/maps2" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql/sqlerror" + "golang.org/x/exp/maps" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/timer" @@ -45,6 +45,7 @@ import ( "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -56,7 +57,7 @@ import ( const maxTableCount = 10000 -type notifier func(full map[string]*Table, created, altered, dropped []*Table) +type notifier func(full map[string]*Table, created, altered, dropped []*Table, udfsChanged bool) // Engine stores the schema info and performs operations that // keep itself up-to-date. @@ -99,14 +100,14 @@ type Engine struct { // NewEngine creates a new Engine. func NewEngine(env tabletenv.Env) *Engine { - reloadTime := env.Config().SchemaReloadIntervalSeconds.Get() + reloadTime := env.Config().SchemaReloadInterval se := &Engine{ env: env, // We need three connections: one for the reloader, one for // the historian, and one for the tracker. conns: connpool.NewPool(env, "", tabletenv.ConnPoolConfig{ - Size: 3, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 3, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), ticks: timer.NewTimer(reloadTime), } @@ -160,7 +161,7 @@ func (se *Engine) syncSidecarDB(ctx context.Context, conn *dbconnpool.DBConnecti } return conn.ExecuteFetch(query, maxRows, true) } - if err := sidecardb.Init(ctx, exec); err != nil { + if err := sidecardb.Init(ctx, se.env.Environment(), exec); err != nil { log.Errorf("Error in sidecardb.Init: %+v", err) if se.env.Config().DB.HasGlobalSettings() { log.Warning("Ignoring sidecardb.Init error for unmanaged tablets") @@ -445,6 +446,11 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { return err } + udfsChanged, err := getChangedUserDefinedFunctions(ctx, conn.Conn, shouldUseDatabase) + if err != nil { + return err + } + rec := concurrency.AllErrorRecorder{} // curTables keeps track of tables in the new snapshot so we can detect what was dropped. curTables := map[string]bool{"dual": true} @@ -497,7 +503,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { log.V(2).Infof("Reading schema for table: %s", tableName) tableType := row[1].String() - table, err := LoadTable(conn, se.cp.DBName(), tableName, tableType, row[3].ToString()) + table, err := LoadTable(conn, se.cp.DBName(), tableName, tableType, row[3].ToString(), se.env.Environment().CollationEnv()) if err != nil { if isView := strings.Contains(tableType, tmutils.TableView); isView { log.Warningf("Failed reading schema for the view: %s, error: %v", tableName, err) @@ -525,7 +531,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { dropped := se.getDroppedTables(curTables, changedViews, mismatchTables) - // Populate PKColumns for changed tables. + // Populate PK Columns for changed tables. if err := se.populatePrimaryKeys(ctx, conn.Conn, changedTables); err != nil { return err } @@ -534,7 +540,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { if shouldUseDatabase { // If reloadDataInDB succeeds, then we don't want to prevent sending the broadcast notification. // So, we do this step in the end when we can receive no more errors that fail the reload operation. - err = reloadDataInDB(ctx, conn.Conn, altered, created, dropped) + err = reloadDataInDB(ctx, conn.Conn, altered, created, dropped, udfsChanged, se.env.Environment().Parser()) if err != nil { log.Errorf("error in updating schema information in Engine.reload() - %v", err) } @@ -548,7 +554,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { if len(created) > 0 || len(altered) > 0 || len(dropped) > 0 { log.Infof("schema engine created %v, altered %v, dropped %v", extractNamesFromTablesList(created), extractNamesFromTablesList(altered), extractNamesFromTablesList(dropped)) } - se.broadcast(created, altered, dropped) + se.broadcast(created, altered, dropped, udfsChanged) return nil } @@ -586,7 +592,7 @@ func (se *Engine) getDroppedTables(curTables map[string]bool, changedViews map[s } } - return maps2.Values(dropped) + return maps.Values(dropped) } func getTableData(ctx context.Context, conn *connpool.Conn, includeStats bool) (*sqltypes.Result, error) { @@ -663,8 +669,14 @@ func (se *Engine) RegisterVersionEvent() error { return se.historian.RegisterVersionEvent() } -// GetTableForPos returns a best-effort schema for a specific gtid -func (se *Engine) GetTableForPos(tableName sqlparser.IdentifierCS, gtid string) (*binlogdatapb.MinimalTable, error) { +// GetTableForPos makes a best-effort attempt to return a table's schema at a specific +// GTID/position. If it cannot get the table schema for the given GTID/position then it +// returns the latest table schema that is available in the database -- the table schema +// for the "current" GTID/position (updating the cache entry). If the table is not found +// in the cache, it will reload the cache from the database in case the table was created +// after the last schema reload or the cache has not yet been initialized. This function +// makes the schema cache a read-through cache for VReplication purposes. +func (se *Engine) GetTableForPos(ctx context.Context, tableName sqlparser.IdentifierCS, gtid string) (*binlogdatapb.MinimalTable, error) { mt, err := se.historian.GetTableForPos(tableName, gtid) if err != nil { log.Infof("GetTableForPos returned error: %s", err.Error()) @@ -673,19 +685,66 @@ func (se *Engine) GetTableForPos(tableName sqlparser.IdentifierCS, gtid string) if mt != nil { return mt, nil } + // We got nothing from the historian, which typically means that it's not enabled. se.mu.Lock() defer se.mu.Unlock() tableNameStr := tableName.String() - st, ok := se.tables[tableNameStr] - if !ok { - if schema.IsInternalOperationTableName(tableNameStr) { - log.Infof("internal table %v found in vttablet schema: skipping for GTID search", tableNameStr) - } else { - log.Infof("table %v not found in vttablet schema, current tables: %v", tableNameStr, se.tables) - return nil, fmt.Errorf("table %v not found in vttablet schema", tableNameStr) + if st, ok := se.tables[tableNameStr]; ok && tableNameStr != "dual" { // No need to refresh dual + // Test Engines (NewEngineForTests()) don't have a conns pool and are not + // supposed to talk to the database, so don't update the cache entry in that + // case. + if se.conns == nil { + return newMinimalTable(st), nil + } + // We have the table in our cache. Let's be sure that our table definition is + // up-to-date for the "current" position. + conn, err := se.conns.Get(ctx, nil) + if err != nil { + return nil, err + } + defer conn.Recycle() + cst := *st // Make a copy + cst.Fields = nil // We're going to refresh the columns/fields + if err := fetchColumns(&cst, conn, se.cp.DBName(), tableNameStr); err != nil { + return nil, err + } + // Update the PK columns for the table as well as they may have changed. + cst.PKColumns = nil // We're going to repopulate the PK columns + if err := se.populatePrimaryKeys(ctx, conn.Conn, map[string]*Table{tableNameStr: &cst}); err != nil { + return nil, err + } + se.tables[tableNameStr] = &cst + return newMinimalTable(&cst), nil + } + // It's expected that internal tables are not found within VReplication workflows. + // No need to refresh the cache for internal tables. + if schema.IsInternalOperationTableName(tableNameStr) { + log.Infof("internal table %v found in vttablet schema: skipping for GTID search", tableNameStr) + return nil, nil + } + // We don't currently have the non-internal table in the cache. This can happen when + // a table was created after the last schema reload (which happens at least every + // --queryserver-config-schema-reload-time). + // Whatever the reason, we should ensure that our cache is able to get the latest + // table schema for the "current" position IF the table exists in the database. + // In order to ensure this, we need to reload the latest schema so that our cache + // is up to date. This effectively turns our in-memory cache into a read-through + // cache for VReplication related needs (this function is only used by vstreamers). + // This adds an additional cost, but for VReplication it should be rare that we are + // trying to replicate a table that doesn't actually exist. + // This also allows us to perform a just-in-time initialization of the cache if + // a vstreamer is the first one to access it. + if se.conns != nil { // Test Engines (NewEngineForTests()) don't have a conns pool + if err := se.reload(ctx, true); err != nil { + return nil, err + } + if st, ok := se.tables[tableNameStr]; ok { + return newMinimalTable(st), nil } } - return newMinimalTable(st), nil + + log.Infof("table %v not found in vttablet schema, current tables: %v", tableNameStr, se.tables) + return nil, fmt.Errorf("table %v not found in vttablet schema", tableNameStr) } // RegisterNotifier registers the function for schema change notification. @@ -706,7 +765,8 @@ func (se *Engine) RegisterNotifier(name string, f notifier, runNotifier bool) { created = append(created, table) } if runNotifier { - f(se.tables, created, nil, nil) + s := maps.Clone(se.tables) + f(s, created, nil, nil, true) } } @@ -727,19 +787,16 @@ func (se *Engine) UnregisterNotifier(name string) { } // broadcast must be called while holding a lock on se.mu. -func (se *Engine) broadcast(created, altered, dropped []*Table) { +func (se *Engine) broadcast(created, altered, dropped []*Table, udfsChanged bool) { if !se.isOpen { return } se.notifierMu.Lock() defer se.notifierMu.Unlock() - s := make(map[string]*Table, len(se.tables)) - for k, v := range se.tables { - s[k] = v - } + s := maps.Clone(se.tables) for _, f := range se.notifiers { - f(s, created, altered, dropped) + f(s, created, altered, dropped, udfsChanged) } } @@ -755,10 +812,7 @@ func (se *Engine) GetTable(tableName sqlparser.IdentifierCS) *Table { func (se *Engine) GetSchema() map[string]*Table { se.mu.Lock() defer se.mu.Unlock() - tables := make(map[string]*Table, len(se.tables)) - for k, v := range se.tables { - tables[k] = v - } + tables := maps.Clone(se.tables) return tables } @@ -831,6 +885,7 @@ func NewEngineForTests() *Engine { isOpen: true, tables: make(map[string]*Table), historian: newHistorian(false, 0, nil), + env: tabletenv.NewEnv(vtenv.NewTestEnv(), tabletenv.NewDefaultConfig(), "SchemaEngineForTests"), } return se } @@ -846,6 +901,10 @@ func (se *Engine) GetDBConnector() dbconfigs.Connector { return se.cp } +func (se *Engine) Environment() *vtenv.Environment { + return se.env.Environment() +} + func extractNamesFromTablesList(tables []*Table) []string { var tableNames []string for _, table := range tables { diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index 0a98a6ee676..b3a8b1e2971 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -21,6 +21,7 @@ import ( "errors" "expvar" "fmt" + "math/rand/v2" "net/http" "net/http/httptest" "sort" @@ -32,22 +33,23 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/constants/sidecar" - - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/event/syslogger" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" - querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema/schematest" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" ) const baseShowTablesPattern = `SELECT t\.table_name.*` @@ -155,7 +157,7 @@ func TestOpenAndReload(t *testing.T) { AddFakeInnoDBReadRowsResult(db, secondReadRowsValue) firstTime := true - notifier := func(full map[string]*Table, created, altered, dropped []*Table) { + notifier := func(full map[string]*Table, created, altered, dropped []*Table, _ bool) { if firstTime { firstTime = false createTables := extractNamesFromTablesList(created) @@ -565,7 +567,7 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { } finished <- true }() - // Wait until the ticks are stopped or 2 seonds have expired. + // Wait until the ticks are stopped or 2 seconds have expired. select { case <-finished: return @@ -575,19 +577,19 @@ func TestSchemaEngineCloseTickRace(t *testing.T) { } func newEngine(reloadTime time.Duration, idleTimeout time.Duration, schemaMaxAgeSeconds int64, db *fakesqldb.DB) *Engine { - config := tabletenv.NewDefaultConfig() - _ = config.SchemaReloadIntervalSeconds.Set(reloadTime.String()) - _ = config.OltpReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - _ = config.OlapReadPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - _ = config.TxPool.IdleTimeoutSeconds.Set(idleTimeout.String()) - config.SchemaVersionMaxAgeSeconds = schemaMaxAgeSeconds - se := NewEngine(tabletenv.NewEnv(config, "SchemaTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.SchemaReloadInterval = reloadTime + cfg.OltpReadPool.IdleTimeout = idleTimeout + cfg.OlapReadPool.IdleTimeout = idleTimeout + cfg.TxPool.IdleTimeout = idleTimeout + cfg.SchemaVersionMaxAgeSeconds = schemaMaxAgeSeconds + se := NewEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "SchemaTest")) se.InitDBConfig(newDBConfigs(db).DbaWithDB()) return se } func newDBConfigs(db *fakesqldb.DB) *dbconfigs.DBConfigs { - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params return dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") } @@ -705,6 +707,29 @@ func AddFakeInnoDBReadRowsResult(db *fakesqldb.DB, value int) *fakesqldb.Expecte )) } +// TestRegisterNotifier tests the functionality of RegisterNotifier +// It also makes sure that writing to the tables map in the schema engine doesn't change the tables received by the notifiers. +func TestRegisterNotifier(t *testing.T) { + // Create a new engine for testing + se := NewEngineForTests() + se.notifiers = map[string]notifier{} + se.tables = map[string]*Table{ + "t1": nil, + "t2": nil, + "t3": nil, + } + + var tablesReceived map[string]*Table + // Register a notifier and make it run immediately. + se.RegisterNotifier("TestRegisterNotifier", func(full map[string]*Table, created, altered, dropped []*Table, _ bool) { + tablesReceived = full + }, true) + + // Change the se.tables and make sure it doesn't affect the tables received by the notifier. + se.tables["t4"] = nil + require.Len(t, tablesReceived, 3) +} + // TestEngineMysqlTime tests the functionality of Engine.mysqlTime function func TestEngineMysqlTime(t *testing.T) { tests := []struct { @@ -742,7 +767,8 @@ func TestEngineMysqlTime(t *testing.T) { t.Run(tt.name, func(t *testing.T) { se := &Engine{} db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, tt.name) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) if tt.timeStampErr != nil { @@ -848,7 +874,8 @@ func TestEnginePopulatePrimaryKeys(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, tt.name) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := &Engine{} @@ -909,7 +936,8 @@ func TestEngineUpdateInnoDBRowsRead(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, tt.name) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := &Engine{} se.innoDbReadRowsCounter = stats.NewCounter("TestEngineUpdateInnoDBRowsRead-"+tt.name, "") @@ -936,7 +964,8 @@ func TestEngineUpdateInnoDBRowsRead(t *testing.T) { // TestEngineGetTableData tests the functionality of getTableData function func TestEngineGetTableData(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "TestEngineGetTableData") + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) tests := []struct { @@ -1110,7 +1139,8 @@ func TestEngineReload(t *testing.T) { cfg := tabletenv.NewDefaultConfig() cfg.DB = newDBConfigs(db) cfg.SignalWhenSchemaChange = true - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "TestEngineReload") + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := newEngine(10*time.Second, 10*time.Second, 0, db) @@ -1162,23 +1192,23 @@ func TestEngineReload(t *testing.T) { } // MySQL unix timestamp query. db.AddQuery("SELECT UNIX_TIMESTAMP()", sqltypes.MakeTestResult(sqltypes.MakeTestFields("UNIX_TIMESTAMP", "int64"), "987654326")) - // Table t2 is updated, t3 is created and t4 is deleted. - // View v2 is updated, v3 is created and v4 is deleted. + // Table t2 is updated, T2 is created and t4 is deleted. + // View v2 is updated, V2 is created and v4 is deleted. db.AddQuery(conn.BaseShowTables(), sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|table_type|unix_timestamp(create_time)|table_comment", "varchar|varchar|int64|varchar"), "t1|BASE_TABLE|123456789|", "t2|BASE_TABLE|123456790|", - "t3|BASE_TABLE|123456789|", + "T2|BASE_TABLE|123456789|", "v1|VIEW|123456789|", "v2|VIEW|123456789|", - "v3|VIEW|123456789|", + "V2|VIEW|123456789|", )) // Detecting view changes. - // According to the database, v2, v3, v4, and v5 require updating. + // According to the database, v2, V2, v4, and v5 require updating. db.AddQuery(fmt.Sprintf(detectViewChange, sidecar.GetIdentifier()), sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"), "v2", - "v3", + "V2", "v4", "v5", )) @@ -1197,7 +1227,7 @@ func TestEngineReload(t *testing.T) { "Innodb_rows_read|35")) // Queries to load the tables' information. - for _, tableName := range []string{"t2", "t3", "v2", "v3"} { + for _, tableName := range []string{"t2", "T2", "v2", "V2"} { db.AddQuery(fmt.Sprintf(`SELECT COLUMN_NAME as column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'fakesqldb' AND TABLE_NAME = '%s' @@ -1211,12 +1241,12 @@ func TestEngineReload(t *testing.T) { db.AddQuery(mysql.BaseShowPrimary, sqltypes.MakeTestResult(mysql.ShowPrimaryFields, "t1|col1", "t2|col1", - "t3|col1", + "T2|col1", )) // Queries for reloading the tables' information. { - for _, tableName := range []string{"t2", "t3"} { + for _, tableName := range []string{"t2", "T2"} { db.AddQuery(fmt.Sprintf(`show create table %s`, tableName), sqltypes.MakeTestResult(sqltypes.MakeTestFields("Table | Create Table", "varchar|varchar"), fmt.Sprintf("%v|create_table_%v", tableName, tableName))) @@ -1225,41 +1255,44 @@ func TestEngineReload(t *testing.T) { db.AddQuery("commit", &sqltypes.Result{}) db.AddQuery("rollback", &sqltypes.Result{}) // We are adding both the variants of the delete statements that we can see in the test, since the deleted tables are initially stored as a map, the order is not defined. - db.AddQuery("delete from _vt.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('t5', 't4', 't3', 't2')", &sqltypes.Result{}) - db.AddQuery("delete from _vt.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('t4', 't5', 't3', 't2')", &sqltypes.Result{}) + db.AddQuery("delete from _vt.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('t5', 't4', 'T2', 't2')", &sqltypes.Result{}) + db.AddQuery("delete from _vt.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('t4', 't5', 'T2', 't2')", &sqltypes.Result{}) db.AddQuery("insert into _vt.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), 't2', 'create_table_t2', 123456790)", &sqltypes.Result{}) - db.AddQuery("insert into _vt.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), 't3', 'create_table_t3', 123456789)", &sqltypes.Result{}) + db.AddQuery("insert into _vt.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), 'T2', 'create_table_T2', 123456789)", &sqltypes.Result{}) } // Queries for reloading the views' information. { - for _, tableName := range []string{"v2", "v3"} { + for _, tableName := range []string{"v2", "V2"} { db.AddQuery(fmt.Sprintf(`show create table %s`, tableName), sqltypes.MakeTestResult(sqltypes.MakeTestFields(" View | Create View | character_set_client | collation_connection", "varchar|varchar|varchar|varchar"), fmt.Sprintf("%v|create_table_%v|utf8mb4|utf8mb4_0900_ai_ci", tableName, tableName))) } // We are adding both the variants of the select statements that we can see in the test, since the deleted views are initially stored as a map, the order is not defined. - db.AddQuery("select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v4', 'v5', 'v3', 'v2')", + db.AddQuery("select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v4', 'v5', 'V2', 'v2')", sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|view_definition", "varchar|varchar"), "v2|select_v2", - "v3|select_v3", + "V2|select_V2", )) - db.AddQuery("select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v5', 'v4', 'v3', 'v2')", + db.AddQuery("select table_name, view_definition from information_schema.views where table_schema = database() and table_name in ('v5', 'v4', 'V2', 'v2')", sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|view_definition", "varchar|varchar"), "v2|select_v2", - "v3|select_v3", + "V2|select_V2", )) // We are adding both the variants of the delete statements that we can see in the test, since the deleted views are initially stored as a map, the order is not defined. - db.AddQuery("delete from _vt.views where TABLE_SCHEMA = database() and TABLE_NAME in ('v4', 'v5', 'v3', 'v2')", &sqltypes.Result{}) - db.AddQuery("delete from _vt.views where TABLE_SCHEMA = database() and TABLE_NAME in ('v5', 'v4', 'v3', 'v2')", &sqltypes.Result{}) + db.AddQuery("delete from _vt.views where TABLE_SCHEMA = database() and TABLE_NAME in ('v4', 'v5', 'V2', 'v2')", &sqltypes.Result{}) + db.AddQuery("delete from _vt.views where TABLE_SCHEMA = database() and TABLE_NAME in ('v5', 'v4', 'V2', 'v2')", &sqltypes.Result{}) db.AddQuery("insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'v2', 'create_table_v2', 'select_v2')", &sqltypes.Result{}) - db.AddQuery("insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'v3', 'create_table_v3', 'select_v3')", &sqltypes.Result{}) + db.AddQuery("insert into _vt.views(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, VIEW_DEFINITION) values (database(), 'V2', 'create_table_V2', 'select_V2')", &sqltypes.Result{}) } + // adding query pattern for udfs + db.AddQueryPattern("SELECT name.*", &sqltypes.Result{}) + // Verify the list of created, altered and dropped tables seen. - se.RegisterNotifier("test", func(full map[string]*Table, created, altered, dropped []*Table) { - require.ElementsMatch(t, extractNamesFromTablesList(created), []string{"t3", "v3"}) + se.RegisterNotifier("test", func(full map[string]*Table, created, altered, dropped []*Table, _ bool) { + require.ElementsMatch(t, extractNamesFromTablesList(created), []string{"T2", "V2"}) require.ElementsMatch(t, extractNamesFromTablesList(altered), []string{"t2", "v2"}) require.ElementsMatch(t, extractNamesFromTablesList(dropped), []string{"t4", "v4", "t5", "v5"}) }, false) @@ -1269,3 +1302,192 @@ func TestEngineReload(t *testing.T) { require.NoError(t, err) require.NoError(t, db.LastError()) } + +// TestEngineReload tests the vreplication specific GetTableForPos function to ensure +// that it conforms to the intended/expected behavior in various scenarios. +// This more specifically tests the behavior of the function when the historian is +// disabled or otherwise unable to get a table schema for the given position. When it +// CAN, that is tested indepenently in the historian tests. +func TestGetTableForPos(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fakedb := fakesqldb.New(t) + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(fakedb) + table := sqlparser.NewIdentifierCS("t1") + column := "col1" + tableSchema := fmt.Sprintf("create table %s (%s varchar(50), primary key(col1))", table.String(), column) + tableMt := &binlogdatapb.MinimalTable{ + Name: table.String(), + Fields: []*querypb.Field{ + { + Name: column, + Type: sqltypes.VarChar, + }, + }, + PKColumns: []int64{0}, // First column: col1 + } + + // Don't do any automatic / TTL based cache refreshes. + se := newEngine(1*time.Hour, 1*time.Hour, 0, fakedb) + se.conns.Open(se.cp, se.cp, se.cp) + se.isOpen = true + se.notifiers = make(map[string]notifier) + se.MakePrimary(true) + se.historian.enabled = false + + addExpectedReloadQueries := func(db *fakesqldb.DB) { + db.AddQuery("SELECT UNIX_TIMESTAMP()", sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "UNIX_TIMESTAMP()", + "int64"), + fmt.Sprintf("%d", time.Now().Unix()), + )) + db.AddQuery(fmt.Sprintf(detectViewChange, sidecar.GetIdentifier()), sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name", "varchar"))) + db.AddQuery(fmt.Sprintf(readTableCreateTimes, sidecar.GetIdentifier()), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("table_name|create_time", "varchar|int64"))) + db.AddQuery(fmt.Sprintf(detectUdfChange, sidecar.GetIdentifier()), &sqltypes.Result{}) + db.AddQueryPattern(baseShowTablesPattern, + &sqltypes.Result{ + Fields: mysql.BaseShowTablesFields, + RowsAffected: 0, + InsertID: 0, + Rows: [][]sqltypes.Value{ + { + sqltypes.MakeTrusted(sqltypes.VarChar, []byte(table.String())), // table_name + sqltypes.MakeTrusted(sqltypes.VarChar, []byte("BASE TABLE")), // table_type + sqltypes.MakeTrusted(sqltypes.Int64, []byte(fmt.Sprintf("%d", time.Now().Unix()-1000))), // unix_timestamp(t.create_time) + sqltypes.MakeTrusted(sqltypes.VarChar, []byte("")), // table_comment + sqltypes.MakeTrusted(sqltypes.Int64, []byte("128")), // file_size + sqltypes.MakeTrusted(sqltypes.Int64, []byte("256")), // allocated_size + }, + }, + SessionStateChanges: "", + StatusFlags: 0, + }, + ) + db.AddQuery(mysql.BaseShowPrimary, &sqltypes.Result{ + Fields: mysql.ShowPrimaryFields, + Rows: [][]sqltypes.Value{ + mysql.ShowPrimaryRow(table.String(), column), + }, + }) + db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, table.String()), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), column)) + db.AddQuery(fmt.Sprintf("SELECT `%s` FROM `fakesqldb`.`%v` WHERE 1 != 1", column, table.String()), + sqltypes.MakeTestResult(sqltypes.MakeTestFields(column, "varchar"))) + db.AddQuery(fmt.Sprintf(`show create table %s`, table.String()), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("Table|Create Table", "varchar|varchar"), table.String(), tableSchema)) + db.AddQuery("begin", &sqltypes.Result{}) + db.AddQuery(fmt.Sprintf("delete from %s.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('%s')", + sidecar.GetIdentifier(), table.String()), &sqltypes.Result{}) + db.AddQuery(fmt.Sprintf("insert into %s.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), '%s', '%s', %d)", + sidecar.GetIdentifier(), table.String(), tableSchema, time.Now().Unix()), &sqltypes.Result{RowsAffected: 1}) + db.AddQuery("rollback", &sqltypes.Result{}) + } + + type testcase struct { + name string + initialCacheState map[string]*Table + expectedQueriesFunc func(db *fakesqldb.DB) + expectFunc func() + } + tests := []testcase{ + { + name: "GetTableForPos with cache uninitialized", + initialCacheState: make(map[string]*Table), // empty + expectedQueriesFunc: func(db *fakesqldb.DB) { + // We do a reload to initialize the cache. + addExpectedReloadQueries(db) + }, + expectFunc: func() { + tbl, err := se.GetTableForPos(ctx, table, "") + require.NoError(t, err) + require.Equal(t, tableMt, tbl) + }, + }, + { + name: "GetTableForPos with cache uninitialized, table not found", + initialCacheState: make(map[string]*Table), // empty + expectedQueriesFunc: func(db *fakesqldb.DB) { + // We do a reload to initialize the cache and in doing so get the missing table. + addExpectedReloadQueries(db) + }, + expectFunc: func() { + tbl, err := se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("nobueno"), "") + require.EqualError(t, err, "table nobueno not found in vttablet schema") + require.Nil(t, tbl) + }, + }, + { + name: "GetTableForPos with cache initialized, table not found", + initialCacheState: map[string]*Table{"t2": {Name: sqlparser.NewIdentifierCS("t2")}}, + expectedQueriesFunc: func(db *fakesqldb.DB) { + // We do a reload to try and get this missing table and any other recently created ones. + addExpectedReloadQueries(db) + }, + expectFunc: func() { + tbl, err := se.GetTableForPos(ctx, table, "") + require.NoError(t, err) + require.Equal(t, tableMt, tbl) + }, + }, + { + name: "GetTableForPos with cache initialized, table found", + initialCacheState: map[string]*Table{table.String(): {Name: table}}, + expectedQueriesFunc: func(db *fakesqldb.DB) { + // We only reload the column and PK info for the table in our cache. A new column + // called col2 has been added to the table schema and it is the new PK. + newTableSchema := fmt.Sprintf("create table %s (%s varchar(50), col2 varchar(50), primary key(col2))", table.String(), column) + db.AddQuery(mysql.BaseShowPrimary, &sqltypes.Result{ + Fields: mysql.ShowPrimaryFields, + Rows: [][]sqltypes.Value{ + mysql.ShowPrimaryRow(table.String(), "col2"), + }, + }) + db.AddQueryPattern(fmt.Sprintf(mysql.GetColumnNamesQueryPatternForTable, table.String()), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("column_name", "varchar"), column, "col2")) + db.AddQuery(fmt.Sprintf("SELECT `%s`, `%s` FROM `fakesqldb`.`%v` WHERE 1 != 1", + column, "col2", table.String()), sqltypes.MakeTestResult(sqltypes.MakeTestFields(fmt.Sprintf("%s|%s", column, "col2"), "varchar|varchar"))) + db.AddQuery(fmt.Sprintf(`show create table %s`, table.String()), + sqltypes.MakeTestResult(sqltypes.MakeTestFields("Table|Create Table", "varchar|varchar"), table.String(), newTableSchema)) + db.AddQuery("begin", &sqltypes.Result{}) + db.AddQuery(fmt.Sprintf("delete from %s.`tables` where TABLE_SCHEMA = database() and TABLE_NAME in ('%s')", + sidecar.GetIdentifier(), table.String()), &sqltypes.Result{}) + db.AddQuery(fmt.Sprintf("insert into %s.`tables`(TABLE_SCHEMA, TABLE_NAME, CREATE_STATEMENT, CREATE_TIME) values (database(), '%s', '%s', %d)", + sidecar.GetIdentifier(), table.String(), newTableSchema, time.Now().Unix()), &sqltypes.Result{}) + db.AddQuery("rollback", &sqltypes.Result{}) + }, + expectFunc: func() { + tbl, err := se.GetTableForPos(ctx, table, "MySQL56/1497ddb0-7cb9-11ed-a1eb-0242ac120002:1-891") + require.NoError(t, err) + require.NotNil(t, tbl) + require.Equal(t, &binlogdatapb.MinimalTable{ + Name: table.String(), + Fields: []*querypb.Field{ + { + Name: column, + Type: sqltypes.VarChar, + }, + { + Name: "col2", + Type: sqltypes.VarChar, + }, + }, + PKColumns: []int64{1}, // Second column: col2 + }, tbl) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + fakedb.DeleteAllQueries() + AddFakeInnoDBReadRowsResult(fakedb, int(rand.Int32N(1000000))) + tc.expectedQueriesFunc(fakedb) + se.tables = tc.initialCacheState + tc.expectFunc() + fakedb.VerifyAllExecutedOrFail() + require.NoError(t, fakedb.LastError()) + }) + } +} diff --git a/go/vt/vttablet/tabletserver/schema/historian.go b/go/vt/vttablet/tabletserver/schema/historian.go index b65ab514585..ca57f6d43e0 100644 --- a/go/vt/vttablet/tabletserver/schema/historian.go +++ b/go/vt/vttablet/tabletserver/schema/historian.go @@ -26,11 +26,11 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "vitess.io/vitess/go/vt/sqlparser" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) const getInitialSchemaVersions = "select id, pos, ddl, time_updated, schemax from %s.schema_version where time_updated > %d order by id asc" diff --git a/go/vt/vttablet/tabletserver/schema/historian_test.go b/go/vt/vttablet/tabletserver/schema/historian_test.go index f66306966de..2b7482866f1 100644 --- a/go/vt/vttablet/tabletserver/schema/historian_test.go +++ b/go/vt/vttablet/tabletserver/schema/historian_test.go @@ -17,6 +17,7 @@ limitations under the License. package schema import ( + "context" "fmt" "testing" "time" @@ -25,11 +26,11 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" ) func getTable(name string, fieldNames []string, fieldTypes []querypb.Type, pks []int64) *binlogdatapb.MinimalTable { @@ -39,7 +40,7 @@ func getTable(name string, fieldNames []string, fieldTypes []querypb.Type, pks [ fields := []*querypb.Field{} for i := range fieldNames { typ := fieldTypes[i] - cs := collations.DefaultCollationForType(typ) + cs := collations.CollationForType(typ, collations.MySQL8().DefaultConnectionCharset()) fields = append(fields, &querypb.Field{ Name: fieldNames[i], Type: typ, @@ -78,6 +79,7 @@ func getDbSchemaBlob(t *testing.T, tables map[string]*binlogdatapb.MinimalTable) } func TestHistorian(t *testing.T) { + ctx := context.Background() se, db, cancel := getTestSchemaEngine(t, 0) defer cancel() @@ -88,13 +90,13 @@ func TestHistorian(t *testing.T) { ddl1 := "create table tracker_test (id int)" ts1 := int64(1427325876) _, _, _ = ddl1, ts1, db - _, err := se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + _, err := se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid1) require.Equal(t, "table t1 not found in vttablet schema", err.Error()) - tab, err := se.GetTableForPos(sqlparser.NewIdentifierCS("dual"), gtid1) + tab, err := se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("dual"), gtid1) require.NoError(t, err) require.Equal(t, `name:"dual"`, fmt.Sprintf("%v", tab)) se.EnableHistorian(true) - _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + _, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid1) require.Equal(t, "table t1 not found in vttablet schema", err.Error()) var blob1 string @@ -127,11 +129,11 @@ func TestHistorian(t *testing.T) { }) require.Nil(t, se.RegisterVersionEvent()) exp1 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1" charset:63 flags:32768} fields:{name:"id2" type:INT32 table:"t1" charset:63 flags:32768} p_k_columns:0` - tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid1) require.NoError(t, err) require.Equal(t, exp1, fmt.Sprintf("%v", tab)) gtid2 := gtidPrefix + "1-20" - _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + _, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.Equal(t, "table t1 not found in vttablet schema", err.Error()) table = getTable("t1", []string{"id1", "id2"}, []querypb.Type{querypb.Type_INT32, querypb.Type_VARBINARY}, []int64{0}) @@ -147,11 +149,11 @@ func TestHistorian(t *testing.T) { }) require.Nil(t, se.RegisterVersionEvent()) exp2 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1" charset:63 flags:32768} fields:{name:"id2" type:VARBINARY table:"t1" charset:63 flags:128} p_k_columns:0` - tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.NoError(t, err) require.Equal(t, exp2, fmt.Sprintf("%v", tab)) gtid3 := gtidPrefix + "1-30" - _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid3) + _, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid3) require.Equal(t, "table t1 not found in vttablet schema", err.Error()) table = getTable("t1", []string{"id1", "id2", "id3"}, []querypb.Type{querypb.Type_INT32, querypb.Type_VARBINARY, querypb.Type_INT32}, []int64{0}) @@ -167,22 +169,23 @@ func TestHistorian(t *testing.T) { }) require.Nil(t, se.RegisterVersionEvent()) exp3 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1" charset:63 flags:32768} fields:{name:"id2" type:VARBINARY table:"t1" charset:63 flags:128} fields:{name:"id3" type:INT32 table:"t1" charset:63 flags:32768} p_k_columns:0` - tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid3) + tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid3) require.NoError(t, err) require.Equal(t, exp3, fmt.Sprintf("%v", tab)) - tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid1) require.NoError(t, err) require.Equal(t, exp1, fmt.Sprintf("%v", tab)) - tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.NoError(t, err) require.Equal(t, exp2, fmt.Sprintf("%v", tab)) - tab, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid3) + tab, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid3) require.NoError(t, err) require.Equal(t, exp3, fmt.Sprintf("%v", tab)) } func TestHistorianPurgeOldSchemas(t *testing.T) { + ctx := context.Background() schemaVersionMaxAgeSeconds := 3600 // 1 hour se, db, cancel := getTestSchemaEngine(t, int64(schemaVersionMaxAgeSeconds)) defer cancel() @@ -194,7 +197,7 @@ func TestHistorianPurgeOldSchemas(t *testing.T) { ts1 := time.Now().Add(time.Duration(-24) * time.Hour) _, _, _ = ddl1, ts1, db se.EnableHistorian(true) - _, err := se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + _, err := se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid1) require.Equal(t, "table t1 not found in vttablet schema", err.Error()) var blob1 string @@ -226,14 +229,14 @@ func TestHistorianPurgeOldSchemas(t *testing.T) { }, }) require.Nil(t, se.RegisterVersionEvent()) - _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid1) + _, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid1) // validate the old schema has been purged require.Equal(t, "table t1 not found in vttablet schema", err.Error()) require.Equal(t, 0, len(se.historian.schemas)) // add a second schema record row with a time_updated that won't be purged gtid2 := gtidPrefix + "1-20" - _, err = se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + _, err = se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.Equal(t, "table t1 not found in vttablet schema", err.Error()) table = getTable("t1", []string{"id1", "id2"}, []querypb.Type{querypb.Type_INT32, querypb.Type_VARBINARY}, []int64{0}) @@ -250,7 +253,7 @@ func TestHistorianPurgeOldSchemas(t *testing.T) { }) require.Nil(t, se.RegisterVersionEvent()) exp2 := `name:"t1" fields:{name:"id1" type:INT32 table:"t1" charset:63 flags:32768} fields:{name:"id2" type:VARBINARY table:"t1" charset:63 flags:128} p_k_columns:0` - tab, err := se.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid2) + tab, err := se.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid2) require.NoError(t, err) require.Equal(t, exp2, fmt.Sprintf("%v", tab)) require.Equal(t, 1, len(se.historian.schemas)) diff --git a/go/vt/vttablet/tabletserver/schema/load_table.go b/go/vt/vttablet/tabletserver/schema/load_table.go index 687672a4a02..6022f8724eb 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table.go +++ b/go/vt/vttablet/tabletserver/schema/load_table.go @@ -34,7 +34,7 @@ import ( ) // LoadTable creates a Table from the schema info in the database. -func LoadTable(conn *connpool.PooledConn, databaseName, tableName, tableType string, comment string) (*Table, error) { +func LoadTable(conn *connpool.PooledConn, databaseName, tableName, tableType string, comment string, collationEnv *collations.Environment) (*Table, error) { ta := NewTable(tableName, NoType) sqlTableName := sqlparser.String(ta.Name) if err := fetchColumns(ta, conn, databaseName, sqlTableName); err != nil { @@ -45,7 +45,7 @@ func LoadTable(conn *connpool.PooledConn, databaseName, tableName, tableType str ta.Type = Sequence ta.SequenceInfo = &SequenceInfo{} case strings.Contains(comment, "vitess_message"): - if err := loadMessageInfo(ta, comment); err != nil { + if err := loadMessageInfo(ta, comment, collationEnv); err != nil { return nil, err } ta.Type = Message @@ -68,7 +68,7 @@ func fetchColumns(ta *Table, conn *connpool.PooledConn, databaseName, sqlTableNa return nil } -func loadMessageInfo(ta *Table, comment string) error { +func loadMessageInfo(ta *Table, comment string, collationEnv *collations.Environment) error { ta.MessageInfo = &MessageInfo{} // Extract keyvalues. keyvals := make(map[string]string) @@ -152,7 +152,7 @@ func loadMessageInfo(ta *Table, comment string) error { if specifiedCols[0] != "id" { return fmt.Errorf("vt_message_cols must begin with id: %s", ta.Name.String()) } - ta.MessageInfo.Fields = getSpecifiedMessageFields(ta.Fields, specifiedCols) + ta.MessageInfo.Fields = getSpecifiedMessageFields(ta.Fields, specifiedCols, collationEnv) } else { ta.MessageInfo.Fields = getDefaultMessageFields(ta.Fields, hiddenCols) } @@ -211,11 +211,11 @@ func getDefaultMessageFields(tableFields []*querypb.Field, hiddenCols map[string // we have already validated that all the specified columns exist in the table schema, so we don't need to // check again and possibly return an error here. -func getSpecifiedMessageFields(tableFields []*querypb.Field, specifiedCols []string) []*querypb.Field { +func getSpecifiedMessageFields(tableFields []*querypb.Field, specifiedCols []string, collationEnv *collations.Environment) []*querypb.Field { fields := make([]*querypb.Field, 0, len(specifiedCols)) for _, col := range specifiedCols { for _, field := range tableFields { - if res, _ := evalengine.NullsafeCompare(sqltypes.NewVarChar(field.Name), sqltypes.NewVarChar(strings.TrimSpace(col)), collations.Default()); res == 0 { + if res, _ := evalengine.NullsafeCompare(sqltypes.NewVarChar(field.Name), sqltypes.NewVarChar(strings.TrimSpace(col)), collationEnv, collationEnv.DefaultConnectionCharset(), nil); res == 0 { fields = append(fields, field) break } diff --git a/go/vt/vttablet/tabletserver/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go index eeefb688e61..6416e2e306e 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -23,7 +23,10 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -227,13 +230,13 @@ func TestLoadTableMessage(t *testing.T) { func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Table, error) { ctx := context.Background() - appParams := db.ConnParams() - dbaParams := db.ConnParams() + appParams := dbconfigs.New(db.ConnParams()) + dbaParams := dbconfigs.New(db.ConnParams()) cfg := tabletenv.ConnPoolConfig{ - Size: 2, + Size: 2, + IdleTimeout: 10 * time.Second, } - _ = cfg.IdleTimeoutSeconds.Set("10s") - connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", cfg) + connPool := connpool.NewPool(tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "SchemaTest"), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) if err != nil { @@ -241,7 +244,7 @@ func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Tabl } defer conn.Recycle() - return LoadTable(conn, "fakesqldb", "test_table", tableType, comment) + return LoadTable(conn, "fakesqldb", "test_table", tableType, comment, collations.MySQL8()) } func mockLoadTableQueries(db *fakesqldb.DB) { diff --git a/go/vt/vttablet/tabletserver/schema/schema.go b/go/vt/vttablet/tabletserver/schema/schema.go index 95c191392cd..4b3d9c88fb5 100644 --- a/go/vt/vttablet/tabletserver/schema/schema.go +++ b/go/vt/vttablet/tabletserver/schema/schema.go @@ -62,7 +62,7 @@ type Table struct { AllocatedSize uint64 } -// SequenceInfo contains info specific to sequence tabels. +// SequenceInfo contains info specific to sequence tables. // It must be locked before accessing the values inside. // If CurVal==LastVal, we have to cache new values. // When the schema is first loaded, the values are all 0, diff --git a/go/vt/vttablet/tabletserver/schema/tracker.go b/go/vt/vttablet/tabletserver/schema/tracker.go index 9b4deaff6c4..8db202efa13 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker.go +++ b/go/vt/vttablet/tabletserver/schema/tracker.go @@ -25,18 +25,18 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) -// VStreamer defines the functions of VStreamer +// VStreamer defines the functions of VStreamer // that the replicationWatcher needs. type VStreamer interface { Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, throttlerApp throttlerapp.Name, send func([]*binlogdatapb.VEvent) error) error @@ -134,12 +134,12 @@ func (tr *Tracker) process(ctx context.Context) { gtid = event.Gtid } if event.Type == binlogdatapb.VEventType_DDL && - MustReloadSchemaOnDDL(event.Statement, tr.engine.cp.DBName()) { + MustReloadSchemaOnDDL(event.Statement, tr.engine.cp.DBName(), tr.env.Environment().Parser()) { if err := tr.schemaUpdated(gtid, event.Statement, event.Timestamp); err != nil { tr.env.Stats().ErrorCounters.Add(vtrpcpb.Code_INTERNAL.String(), 1) log.Errorf("Error updating schema: %s for ddl %s, gtid %s", - sqlparser.TruncateForLog(err.Error()), event.Statement, gtid) + tr.env.Environment().Parser().TruncateForLog(err.Error()), event.Statement, gtid) } } } @@ -248,8 +248,8 @@ func encodeString(in string) string { } // MustReloadSchemaOnDDL returns true if the ddl is for the db which is part of the workflow and is not an online ddl artifact -func MustReloadSchemaOnDDL(sql string, dbname string) bool { - ast, err := sqlparser.Parse(sql) +func MustReloadSchemaOnDDL(sql string, dbname string, parser *sqlparser.Parser) bool { + ast, err := parser.Parse(sql) if err != nil { return false } @@ -263,7 +263,7 @@ func MustReloadSchemaOnDDL(sql string, dbname string) bool { if table.IsEmpty() { continue } - if !table.Qualifier.IsEmpty() && table.Qualifier.String() != dbname { + if table.Qualifier.NotEmpty() && table.Qualifier.String() != dbname { continue } tableName := table.Name.String() diff --git a/go/vt/vttablet/tabletserver/schema/tracker_test.go b/go/vt/vttablet/tabletserver/schema/tracker_test.go index 2029235b2e3..32f68597779 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker_test.go +++ b/go/vt/vttablet/tabletserver/schema/tracker_test.go @@ -17,14 +17,15 @@ limitations under the License. package schema import ( + "context" "testing" "github.com/stretchr/testify/require" - "context" - "vitess.io/vitess/go/sqltypes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) @@ -76,9 +77,9 @@ func TestTracker(t *testing.T) { }, }}, } - config := se.env.Config() - config.TrackSchemaVersions = true - env := tabletenv.NewEnv(config, "TrackerTest") + cfg := se.env.Config() + cfg.TrackSchemaVersions = true + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TrackerTest") initial := env.Stats().ErrorCounters.Counts()["INTERNAL"] tracker := NewTracker(env, vs, se) tracker.Open() @@ -120,9 +121,9 @@ func TestTrackerShouldNotInsertInitialSchema(t *testing.T) { }, }}, } - config := se.env.Config() - config.TrackSchemaVersions = true - env := tabletenv.NewEnv(config, "TrackerTest") + cfg := se.env.Config() + cfg.TrackSchemaVersions = true + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TrackerTest") tracker := NewTracker(env, vs, se) tracker.Open() <-vs.done @@ -170,7 +171,7 @@ func TestMustReloadSchemaOnDDL(t *testing.T) { } for _, tc := range testcases { t.Run("", func(t *testing.T) { - require.Equal(t, tc.want, MustReloadSchemaOnDDL(tc.query, tc.dbname)) + require.Equal(t, tc.want, MustReloadSchemaOnDDL(tc.query, tc.dbname, sqlparser.NewTestParser())) }) } } diff --git a/go/vt/vttablet/tabletserver/state_manager.go b/go/vt/vttablet/tabletserver/state_manager.go index 2115871c6bb..308f9165ba6 100644 --- a/go/vt/vttablet/tabletserver/state_manager.go +++ b/go/vt/vttablet/tabletserver/state_manager.go @@ -64,6 +64,9 @@ func (state servingState) String() string { // transitionRetryInterval is for tests. var transitionRetryInterval = 1 * time.Second +var logInitTime sync.Once + +var ErrNoTarget = vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No target") // stateManager manages state transition for all the TabletServer // subcomponents. @@ -97,7 +100,7 @@ type stateManager struct { reason string transitionErr error - requests sync.WaitGroup + rw *requestsWaiter // QueryList does not have an Open or Close. statelessql *QueryList @@ -121,7 +124,7 @@ type stateManager struct { throttler lagThrottler tableGC tableGarbageCollector - // hcticks starts on initialiazation and runs forever. + // hcticks starts on initialization and runs forever. hcticks *timer.Timer // checkMySQLThrottler ensures that CheckMysql @@ -194,11 +197,11 @@ func (sm *stateManager) Init(env tabletenv.Env, target *querypb.Target) { sm.target = target.CloneVT() sm.transitioning = semaphore.NewWeighted(1) sm.checkMySQLThrottler = semaphore.NewWeighted(1) - sm.timebombDuration = env.Config().OltpReadPool.TimeoutSeconds.Get() * 10 - sm.hcticks = timer.NewTimer(env.Config().Healthcheck.IntervalSeconds.Get()) - sm.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThresholdSeconds.Get().Nanoseconds()) - sm.shutdownGracePeriod = env.Config().GracePeriods.ShutdownSeconds.Get() - sm.transitionGracePeriod = env.Config().GracePeriods.TransitionSeconds.Get() + sm.timebombDuration = env.Config().OltpReadPool.Timeout * 10 + sm.hcticks = timer.NewTimer(env.Config().Healthcheck.Interval) + sm.unhealthyThreshold.Store(env.Config().Healthcheck.UnhealthyThreshold.Nanoseconds()) + sm.shutdownGracePeriod = env.Config().GracePeriods.Shutdown + sm.transitionGracePeriod = env.Config().GracePeriods.Transition } // SetServingType changes the state to the specified settings. @@ -389,7 +392,9 @@ func (sm *stateManager) StartRequest(ctx context.Context, target *querypb.Target } shuttingDown := sm.wantState != StateServing - if shuttingDown && !allowOnShutdown { + // If wait counter for the requests is not zero, then there are go-routines blocked on waiting for requests to be empty. + // We cannot allow adding to the requests to prevent any panics from happening. + if (shuttingDown && !allowOnShutdown) || sm.rw.GetWaiterCount() > 0 { // This specific error string needs to be returned for vtgate buffering to work. return vterrors.New(vtrpcpb.Code_CLUSTER_EVENT, vterrors.ShuttingDown) } @@ -398,13 +403,13 @@ func (sm *stateManager) StartRequest(ctx context.Context, target *querypb.Target if err != nil { return err } - sm.requests.Add(1) + sm.rw.Add(1) return nil } // EndRequest unregisters the current request (a waitgroup) as done. func (sm *stateManager) EndRequest() { - sm.requests.Done() + sm.rw.Done() } // VerifyTarget allows requests to be executed even in non-serving state. @@ -432,7 +437,7 @@ func (sm *stateManager) verifyTargetLocked(ctx context.Context, target *querypb. } } else { if !tabletenv.IsLocalContext(ctx) { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "No target") + return ErrNoTarget } } return nil @@ -484,7 +489,7 @@ func (sm *stateManager) unservePrimary() error { func (sm *stateManager) serveNonPrimary(wantTabletType topodatapb.TabletType) error { // We are likely transitioning from primary. We have to honor // the shutdown grace period. - cancel := sm.handleShutdownGracePeriod() + cancel := sm.terminateAllQueries(nil) defer cancel() sm.ddle.Close() @@ -537,9 +542,14 @@ func (sm *stateManager) connect(tabletType topodatapb.TabletType) error { } func (sm *stateManager) unserveCommon() { + sm.markClusterAction(ClusterActionInProgress) + defer sm.markClusterAction(ClusterActionNotInProgress) + // We create a wait group that tracks whether all the queries have been terminated or not. + wg := sync.WaitGroup{} + wg.Add(1) log.Infof("Started execution of unserveCommon") - cancel := sm.handleShutdownGracePeriod() - log.Infof("Finished execution of handleShutdownGracePeriod") + cancel := sm.terminateAllQueries(&wg) + log.Infof("Finished execution of terminateAllQueries") defer cancel() log.Infof("Started online ddl executor close") @@ -557,22 +567,47 @@ func (sm *stateManager) unserveCommon() { log.Info("Finished Killing all OLAP queries. Started tracker close") sm.tracker.Close() log.Infof("Finished tracker close. Started wait for requests") - sm.requests.Wait() - log.Infof("Finished wait for requests. Finished execution of unserveCommon") + sm.handleShutdownGracePeriod(&wg) + log.Infof("Finished handling grace period. Finished execution of unserveCommon") } -func (sm *stateManager) handleShutdownGracePeriod() (cancel func()) { +// handleShutdownGracePeriod checks if we have shutdwonGracePeriod specified. +// If its not, then we have to wait for all the requests to be empty. +// Otherwise, we only wait for all the queries against MySQL to be terminated. +func (sm *stateManager) handleShutdownGracePeriod(wg *sync.WaitGroup) { + // If there is no shutdown grace period specified, then we should wait for all the requests to be empty. + if sm.shutdownGracePeriod == 0 { + sm.rw.WaitToBeEmpty() + } else { + // We quickly check if the requests are empty or not. + // If they are, then we don't need to wait for the shutdown to complete. + count := sm.rw.GetOutstandingRequestsCount() + if count == 0 { + return + } + // Otherwise, we should wait for all olap queries to be killed. + // We don't need to wait for requests to be empty since we have ensured all the queries against MySQL have been killed. + wg.Wait() + } +} + +func (sm *stateManager) terminateAllQueries(wg *sync.WaitGroup) (cancel func()) { if sm.shutdownGracePeriod == 0 { return func() {} } ctx, cancel := context.WithCancel(context.TODO()) go func() { + if wg != nil { + defer wg.Done() + } if err := timer.SleepContext(ctx, sm.shutdownGracePeriod); err != nil { return } + // Prevent any new queries from being added before we kill all the queries in the list. + sm.markClusterAction(ClusterActionNoQueries) log.Infof("Grace Period %v exceeded. Killing all OLTP queries.", sm.shutdownGracePeriod) sm.statelessql.TerminateAll() - log.Infof("Killed all stateful OLTP queries.") + log.Infof("Killed all stateless OLTP queries.") sm.statefulql.TerminateAll() log.Infof("Killed all OLTP queries.") }() @@ -611,9 +646,9 @@ func (sm *stateManager) setTimeBomb() chan struct{} { // setState changes the state and logs the event. func (sm *stateManager) setState(tabletType topodatapb.TabletType, state servingState) { - defer func() { + defer logInitTime.Do(func() { log.Infof("Tablet Init took %d ms", time.Since(servenv.GetInitStartTime()).Milliseconds()) - }() + }) sm.mu.Lock() defer sm.mu.Unlock() if tabletType == topodatapb.TabletType_UNKNOWN { @@ -622,7 +657,7 @@ func (sm *stateManager) setState(tabletType topodatapb.TabletType, state serving log.Infof("TabletServer transition: %v -> %v for tablet %s:%s/%s", sm.stateStringLocked(sm.target.TabletType, sm.state), sm.stateStringLocked(tabletType, state), sm.target.Cell, sm.target.Keyspace, sm.target.Shard) - sm.handleGracePeriod(tabletType) + sm.handleTransitionGracePeriod(tabletType) sm.target.TabletType = tabletType if sm.state == StateNotConnected { // If we're transitioning out of StateNotConnected, we have @@ -641,7 +676,7 @@ func (sm *stateManager) stateStringLocked(tabletType topodatapb.TabletType, stat return fmt.Sprintf("%v: %v, %v", tabletType, state, sm.ptsTimestamp.Local().Format("Jan 2, 2006 at 15:04:05 (MST)")) } -func (sm *stateManager) handleGracePeriod(tabletType topodatapb.TabletType) { +func (sm *stateManager) handleTransitionGracePeriod(tabletType topodatapb.TabletType) { if tabletType != topodatapb.TabletType_PRIMARY { // We allow serving of previous type only for a primary transition. sm.alsoAllow = nil @@ -819,3 +854,10 @@ func (sm *stateManager) IsServingString() string { func (sm *stateManager) SetUnhealthyThreshold(v time.Duration) { sm.unhealthyThreshold.Store(v.Nanoseconds()) } + +// markClusterAction marks whether a cluster action is in progress or not for all the query details. +func (sm *stateManager) markClusterAction(ca ClusterActionState) { + sm.statefulql.SetClusterAction(ca) + sm.statelessql.SetClusterAction(ca) + sm.olapql.SetClusterAction(ca) +} diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go index 23e70a66760..f6345b9b29c 100644 --- a/go/vt/vttablet/tabletserver/state_manager_test.go +++ b/go/vt/vttablet/tabletserver/state_manager_test.go @@ -24,17 +24,19 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql/fakesqldb" - "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -397,24 +399,30 @@ func (k *killableConn) Kill(message string, elapsed time.Duration) error { return nil } +func (k *killableConn) SQLParser() *sqlparser.Parser { + return sqlparser.NewTestParser() +} + func TestStateManagerShutdownGracePeriod(t *testing.T) { sm := newTestStateManager(t) defer sm.StopService() sm.te = &delayedTxEngine{} kconn1 := &killableConn{id: 1} - sm.statelessql.Add(&QueryDetail{ + err := sm.statelessql.Add(&QueryDetail{ conn: kconn1, connID: kconn1.id, }) + require.NoError(t, err) kconn2 := &killableConn{id: 2} - sm.statefulql.Add(&QueryDetail{ + err = sm.statefulql.Add(&QueryDetail{ conn: kconn2, connID: kconn2.id, }) + require.NoError(t, err) // Transition to replica with no shutdown grace period should kill kconn2 but not kconn1. - err := sm.SetServingType(topodatapb.TabletType_PRIMARY, testNow, StateServing, "") + err = sm.SetServingType(topodatapb.TabletType_PRIMARY, testNow, StateServing, "") require.NoError(t, err) assert.False(t, kconn1.killed.Load()) assert.True(t, kconn2.killed.Load()) @@ -695,6 +703,29 @@ func TestRefreshReplHealthLocked(t *testing.T) { assert.False(t, sm.replHealthy) } +// TestPanicInWait tests that we don't panic when we wait for requests if more StartRequest calls come up after we start waiting. +func TestPanicInWait(t *testing.T) { + sm := newTestStateManager(t) + sm.wantState = StateServing + sm.state = StateServing + sm.replHealthy = true + ctx := context.Background() + // Simulate an Execute RPC running + err := sm.StartRequest(ctx, sm.target, false) + require.NoError(t, err) + go func() { + time.Sleep(100 * time.Millisecond) + // Simulate the previous RPC finishing after some delay + sm.EndRequest() + // Simulate a COMMIT call arriving right afterwards + _ = sm.StartRequest(ctx, sm.target, true) + }() + + // Simulate going to a not serving state and calling unserveCommon that waits on requests. + sm.wantState = StateNotServing + sm.rw.WaitToBeEmpty() +} + func verifySubcomponent(t *testing.T, order int64, component any, state testState) { tos := component.(orderState) assert.Equal(t, order, tos.Order()) @@ -703,12 +734,13 @@ func verifySubcomponent(t *testing.T, order int64, component any, state testStat func newTestStateManager(t *testing.T) *stateManager { order.Store(0) - config := tabletenv.NewDefaultConfig() - env := tabletenv.NewEnv(config, "StateManagerTest") + cfg := tabletenv.NewDefaultConfig() + parser := sqlparser.NewTestParser() + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "StateManagerTest") sm := &stateManager{ - statelessql: NewQueryList("stateless"), - statefulql: NewQueryList("stateful"), - olapql: NewQueryList("olap"), + statelessql: NewQueryList("stateless", parser), + statefulql: NewQueryList("stateful", parser), + olapql: NewQueryList("olap", parser), hs: newHealthStreamer(env, &topodatapb.TabletAlias{}, schema.NewEngine(env)), se: &testSchemaEngine{}, rt: &testReplTracker{lag: 1 * time.Second}, @@ -722,9 +754,10 @@ func newTestStateManager(t *testing.T) *stateManager { ddle: &testOnlineDDLExecutor{}, throttler: &testLagThrottler{}, tableGC: &testTableGC{}, + rw: newRequestsWaiter(), } sm.Init(env, &querypb.Target{}) - sm.hs.InitDBConfig(&querypb.Target{}, fakesqldb.New(t).ConnParams()) + sm.hs.InitDBConfig(&querypb.Target{}, dbconfigs.New(fakesqldb.New(t).ConnParams())) log.Infof("returning sm: %p", sm) return sm } diff --git a/go/vt/vttablet/tabletserver/stateful_connection.go b/go/vt/vttablet/tabletserver/stateful_connection.go index 739ed5c4295..067f2194655 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection.go +++ b/go/vt/vttablet/tabletserver/stateful_connection.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -184,11 +185,11 @@ func (sc *StatefulConnection) Renew() error { } // String returns a printable version of the connection info. -func (sc *StatefulConnection) String(sanitize bool) string { +func (sc *StatefulConnection) String(sanitize bool, parser *sqlparser.Parser) string { return fmt.Sprintf( "%v\t%s", sc.ConnID, - sc.txProps.String(sanitize), + sc.txProps.String(sanitize, parser), ) } diff --git a/go/vt/vttablet/tabletserver/stateful_connection_pool.go b/go/vt/vttablet/tabletserver/stateful_connection_pool.go index ce6f917610e..64268825b70 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection_pool.go +++ b/go/vt/vttablet/tabletserver/stateful_connection_pool.go @@ -93,7 +93,7 @@ func (sf *StatefulConnectionPool) Close() { if conn.IsInTransaction() { thing = "transaction" } - log.Warningf("killing %s for shutdown: %s", thing, conn.String(sf.env.Config().SanitizeLogMessages)) + log.Warningf("killing %s for shutdown: %s", thing, conn.String(sf.env.Config().SanitizeLogMessages, sf.env.Environment().Parser())) sf.env.Stats().InternalErrors.Add("StrayTransactions", 1) conn.Close() conn.Releasef("pool closed") diff --git a/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go b/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go index b9ea4dfc185..b93c822cfdc 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go +++ b/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" ) @@ -37,7 +38,8 @@ func TestActivePoolClientRowsFound(t *testing.T) { db.AddQuery("begin", &sqltypes.Result{}) pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) startNormalSize := pool.conns.Available() startFoundRowsSize := pool.foundRowsPool.Available() @@ -63,7 +65,8 @@ func TestActivePoolForAllTxProps(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn1, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) conn1.txProps = &tx.Properties{} @@ -91,7 +94,8 @@ func TestStatefulPoolShutdownNonTx(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) // conn1 non-tx, not in use. conn1, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) @@ -131,7 +135,8 @@ func TestStatefulPoolShutdownAll(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) // conn1 not in use conn1, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) @@ -157,9 +162,10 @@ func TestActivePoolGetConnNonExistentTransaction(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) _, err := pool.GetAndLock(12345, "for query") - require.EqualError(t, err, "not found") + require.EqualError(t, err, "not found (potential transaction timeout)") } func TestExecWithAbortedCtx(t *testing.T) { @@ -167,7 +173,8 @@ func TestExecWithAbortedCtx(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) cancel() @@ -181,7 +188,8 @@ func TestExecWithDbconnClosed(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) conn.Close() @@ -196,7 +204,8 @@ func TestExecWithDbconnClosedHavingTx(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) conn.txProps = &tx.Properties{Conclusion: "foobar"} @@ -212,7 +221,8 @@ func TestFailOnConnectionRegistering(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) defer conn.Close() diff --git a/go/vt/vttablet/tabletserver/status.go b/go/vt/vttablet/tabletserver/status.go index f91cc4ad566..b1ebb24bc57 100644 --- a/go/vt/vttablet/tabletserver/status.go +++ b/go/vt/vttablet/tabletserver/status.go @@ -229,8 +229,8 @@ func (tsv *TabletServer) AddStatusHeader() { // AddStatusPart registers the status part for the status page. func (tsv *TabletServer) AddStatusPart() { // Save the threshold values for reporting. - degradedThreshold.Store(tsv.config.Healthcheck.DegradedThresholdSeconds.Get().Nanoseconds()) - unhealthyThreshold.Store(tsv.config.Healthcheck.UnhealthyThresholdSeconds.Get().Nanoseconds()) + degradedThreshold.Store(tsv.config.Healthcheck.DegradedThreshold.Nanoseconds()) + unhealthyThreshold.Store(tsv.config.Healthcheck.UnhealthyThreshold.Nanoseconds()) tsv.exporter.AddStatusPart("Health", queryserviceStatusTemplate, func() any { status := queryserviceStatus{ diff --git a/go/vt/vttablet/tabletserver/stream_consolidator.go b/go/vt/vttablet/tabletserver/stream_consolidator.go index 497c9011040..cbf99eaffd4 100644 --- a/go/vt/vttablet/tabletserver/stream_consolidator.go +++ b/go/vt/vttablet/tabletserver/stream_consolidator.go @@ -19,9 +19,11 @@ package tabletserver import ( "sync" "sync/atomic" + "time" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -70,7 +72,7 @@ func (sc *StreamConsolidator) SetBlocking(block bool) { // `callback`. A `leaderCallback` must also be supplied: this function must perform the actual // query in the upstream MySQL server, yielding results into the modified callback that it receives // as an argument. -func (sc *StreamConsolidator) Consolidate(logStats *tabletenv.LogStats, sql string, callback StreamCallback, leaderCallback func(StreamCallback) error) error { +func (sc *StreamConsolidator) Consolidate(waitTimings *servenv.TimingsWrapper, logStats *tabletenv.LogStats, sql string, callback StreamCallback, leaderCallback func(StreamCallback) error) error { var ( inflight *streamInFlight catchup []*sqltypes.Result @@ -100,9 +102,11 @@ func (sc *StreamConsolidator) Consolidate(logStats *tabletenv.LogStats, sql stri // if we have a followChan, we're following up on a query that is already being served if followChan != nil { + startTime := time.Now() defer func() { memchange := inflight.unfollow(followChan, sc.cleanup) atomic.AddInt64(&sc.memory, memchange) + waitTimings.Record("StreamConsolidations", startTime) }() logStats.QuerySources |= tabletenv.QuerySourceConsolidator @@ -252,7 +256,7 @@ func (s *streamInFlight) update(result *sqltypes.Result, block bool, maxMemoryQu s.mu.Lock() defer s.mu.Unlock() - // if this stream can still be catched up with, we need to store the result in + // if this stream can still be caught up with, we need to store the result in // a catch up buffer; otherwise, we can skip this altogether and just fan out the result // to all the followers that are already caught up if s.catchupAllowed { diff --git a/go/vt/vttablet/tabletserver/stream_consolidator_flaky_test.go b/go/vt/vttablet/tabletserver/stream_consolidator_flaky_test.go index 0c903933412..caa519cc477 100644 --- a/go/vt/vttablet/tabletserver/stream_consolidator_flaky_test.go +++ b/go/vt/vttablet/tabletserver/stream_consolidator_flaky_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/sqltypes" @@ -123,10 +124,12 @@ func (ct *consolidationTest) run(workers int, generateCallback func(int) (string go func(worker int) { defer wg.Done() + exporter := servenv.NewExporter("ConsolidatorTest", "") + timings := exporter.NewTimings("ConsolidatorWaits", "", "StreamConsolidations") logStats := tabletenv.NewLogStats(context.Background(), "StreamConsolidation") query, callback := generateCallback(worker) start := time.Now() - err := ct.cc.Consolidate(logStats, query, func(result *sqltypes.Result) error { + err := ct.cc.Consolidate(timings, logStats, query, func(result *sqltypes.Result) error { cr := ct.results[worker] cr.items = append(cr.items, result) atomic.AddInt64(&cr.count, 1) diff --git a/go/vt/vttablet/tabletserver/tabletenv/config.go b/go/vt/vttablet/tabletserver/tabletenv/config.go index d490c97326a..1b89829825b 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config.go @@ -17,6 +17,7 @@ limitations under the License. package tabletenv import ( + "context" "encoding/json" "errors" "fmt" @@ -27,9 +28,11 @@ import ( "google.golang.org/protobuf/encoding/prototext" "vitess.io/vitess/go/flagutil" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/throttler" @@ -122,45 +125,27 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.IntVar(¤tConfig.OlapReadPool.Size, "queryserver-config-stream-pool-size", defaultConfig.OlapReadPool.Size, "query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion") fs.IntVar(¤tConfig.TxPool.Size, "queryserver-config-transaction-cap", defaultConfig.TxPool.Size, "query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout)") fs.IntVar(¤tConfig.MessagePostponeParallelism, "queryserver-config-message-postpone-cap", defaultConfig.MessagePostponeParallelism, "query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem.") - currentConfig.Oltp.TxTimeoutSeconds = defaultConfig.Oltp.TxTimeoutSeconds.Clone() - fs.Var(¤tConfig.Oltp.TxTimeoutSeconds, currentConfig.Oltp.TxTimeoutSeconds.Name(), "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value") - currentConfig.GracePeriods.ShutdownSeconds = flagutil.NewDeprecatedFloat64Seconds(defaultConfig.GracePeriods.ShutdownSeconds.Name(), defaultConfig.GracePeriods.TransitionSeconds.Get()) - fs.Var(¤tConfig.GracePeriods.ShutdownSeconds, currentConfig.GracePeriods.ShutdownSeconds.Name(), "how long to wait (in seconds) for queries and transactions to complete during graceful shutdown.") + fs.DurationVar(¤tConfig.Oltp.TxTimeout, "queryserver-config-transaction-timeout", defaultConfig.Oltp.TxTimeout, "query server transaction timeout, a transaction will be killed if it takes longer than this value") + fs.DurationVar(¤tConfig.GracePeriods.Shutdown, "shutdown_grace_period", defaultConfig.GracePeriods.Shutdown, "how long to wait for queries and transactions to complete during graceful shutdown.") fs.IntVar(¤tConfig.Oltp.MaxRows, "queryserver-config-max-result-size", defaultConfig.Oltp.MaxRows, "query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries.") fs.IntVar(¤tConfig.Oltp.WarnRows, "queryserver-config-warn-result-size", defaultConfig.Oltp.WarnRows, "query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this") fs.BoolVar(¤tConfig.PassthroughDML, "queryserver-config-passthrough-dmls", defaultConfig.PassthroughDML, "query server pass through all dml statements without rewriting") fs.IntVar(¤tConfig.StreamBufferSize, "queryserver-config-stream-buffer-size", defaultConfig.StreamBufferSize, "query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size.") - fs.Int("queryserver-config-query-cache-size", 0, "query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - _ = fs.MarkDeprecated("queryserver-config-query-cache-size", "`--queryserver-config-query-cache-size` is deprecated and will be removed in `v19.0`. This option only applied to LRU caches, which are now unsupported.") - fs.Int64Var(¤tConfig.QueryCacheMemory, "queryserver-config-query-cache-memory", defaultConfig.QueryCacheMemory, "query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - fs.Bool("queryserver-config-query-cache-lfu", false, "query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries") - _ = fs.MarkDeprecated("queryserver-config-query-cache-lfu", "`--queryserver-config-query-cache-lfu` is deprecated and will be removed in `v19.0`. The query cache always uses a LFU implementation now.") - - currentConfig.SchemaReloadIntervalSeconds = defaultConfig.SchemaReloadIntervalSeconds.Clone() - fs.Var(¤tConfig.SchemaReloadIntervalSeconds, currentConfig.SchemaReloadIntervalSeconds.Name(), "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.") + fs.DurationVar(¤tConfig.SchemaReloadInterval, "queryserver-config-schema-reload-time", defaultConfig.SchemaReloadInterval, "query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time.") fs.DurationVar(¤tConfig.SchemaChangeReloadTimeout, "schema-change-reload-timeout", defaultConfig.SchemaChangeReloadTimeout, "query server schema change reload timeout, this is how long to wait for the signaled schema reload operation to complete before giving up") fs.BoolVar(¤tConfig.SignalWhenSchemaChange, "queryserver-config-schema-change-signal", defaultConfig.SignalWhenSchemaChange, "query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work") - currentConfig.Olap.TxTimeoutSeconds = defaultConfig.Olap.TxTimeoutSeconds.Clone() - fs.Var(¤tConfig.Olap.TxTimeoutSeconds, defaultConfig.Olap.TxTimeoutSeconds.Name(), "query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed") - currentConfig.Oltp.QueryTimeoutSeconds = defaultConfig.Oltp.QueryTimeoutSeconds.Clone() - fs.Var(¤tConfig.Oltp.QueryTimeoutSeconds, currentConfig.Oltp.QueryTimeoutSeconds.Name(), "query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed.") - currentConfig.OltpReadPool.TimeoutSeconds = defaultConfig.OltpReadPool.TimeoutSeconds.Clone() - fs.Var(¤tConfig.OltpReadPool.TimeoutSeconds, currentConfig.OltpReadPool.TimeoutSeconds.Name(), "query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.") - currentConfig.OlapReadPool.TimeoutSeconds = defaultConfig.OlapReadPool.TimeoutSeconds.Clone() - fs.Var(¤tConfig.OlapReadPool.TimeoutSeconds, currentConfig.OlapReadPool.TimeoutSeconds.Name(), "query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.") - currentConfig.TxPool.TimeoutSeconds = defaultConfig.TxPool.TimeoutSeconds.Clone() - fs.Var(¤tConfig.TxPool.TimeoutSeconds, currentConfig.TxPool.TimeoutSeconds.Name(), "query server transaction pool timeout, it is how long vttablet waits if tx pool is full") - currentConfig.OltpReadPool.IdleTimeoutSeconds = defaultConfig.OltpReadPool.IdleTimeoutSeconds.Clone() - fs.Var(¤tConfig.OltpReadPool.IdleTimeoutSeconds, currentConfig.OltpReadPool.IdleTimeoutSeconds.Name(), "query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance.") - currentConfig.OltpReadPool.MaxLifetimeSeconds = defaultConfig.OltpReadPool.MaxLifetimeSeconds.Clone() - fs.Var(¤tConfig.OltpReadPool.MaxLifetimeSeconds, currentConfig.OltpReadPool.MaxLifetimeSeconds.Name(), "query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool.") - fs.IntVar(¤tConfig.OltpReadPool.MaxWaiters, "queryserver-config-query-pool-waiter-cap", defaultConfig.OltpReadPool.MaxWaiters, "query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection") - fs.IntVar(¤tConfig.OlapReadPool.MaxWaiters, "queryserver-config-stream-pool-waiter-cap", defaultConfig.OlapReadPool.MaxWaiters, "query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection") - fs.IntVar(¤tConfig.TxPool.MaxWaiters, "queryserver-config-txpool-waiter-cap", defaultConfig.TxPool.MaxWaiters, "query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection") + fs.DurationVar(¤tConfig.Olap.TxTimeout, "queryserver-config-olap-transaction-timeout", defaultConfig.Olap.TxTimeout, "query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed") + fs.DurationVar(¤tConfig.Oltp.QueryTimeout, "queryserver-config-query-timeout", defaultConfig.Oltp.QueryTimeout, "query server query timeout, this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed.") + fs.DurationVar(¤tConfig.OltpReadPool.Timeout, "queryserver-config-query-pool-timeout", defaultConfig.OltpReadPool.Timeout, "query server query pool timeout, it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead.") + fs.DurationVar(¤tConfig.OlapReadPool.Timeout, "queryserver-config-stream-pool-timeout", defaultConfig.OlapReadPool.Timeout, "query server stream pool timeout, it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout.") + fs.DurationVar(¤tConfig.TxPool.Timeout, "queryserver-config-txpool-timeout", defaultConfig.TxPool.Timeout, "query server transaction pool timeout, it is how long vttablet waits if tx pool is full") + fs.DurationVar(¤tConfig.OltpReadPool.IdleTimeout, "queryserver-config-idle-timeout", defaultConfig.OltpReadPool.IdleTimeout, "query server idle timeout, vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance.") + fs.DurationVar(¤tConfig.OltpReadPool.MaxLifetime, "queryserver-config-pool-conn-max-lifetime", defaultConfig.OltpReadPool.MaxLifetime, "query server connection max lifetime, vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool.") + // tableacl related configurations. fs.BoolVar(¤tConfig.StrictTableACL, "queryserver-config-strict-table-acl", defaultConfig.StrictTableACL, "only allow queries that pass table acl checks") fs.BoolVar(¤tConfig.EnableTableACLDryRun, "queryserver-config-enable-table-acl-dry-run", defaultConfig.EnableTableACLDryRun, "If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results") @@ -207,13 +192,9 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.Int64Var(¤tConfig.ConsolidatorStreamQuerySize, "consolidator-stream-query-size", defaultConfig.ConsolidatorStreamQuerySize, "Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator.") fs.Int64Var(¤tConfig.ConsolidatorStreamTotalSize, "consolidator-stream-total-size", defaultConfig.ConsolidatorStreamTotalSize, "Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator.") - currentConfig.Healthcheck.IntervalSeconds = flagutil.NewDeprecatedFloat64Seconds(defaultConfig.Healthcheck.IntervalSeconds.Name(), defaultConfig.Healthcheck.IntervalSeconds.Get()) - currentConfig.Healthcheck.DegradedThresholdSeconds = flagutil.NewDeprecatedFloat64Seconds(defaultConfig.Healthcheck.DegradedThresholdSeconds.Name(), defaultConfig.Healthcheck.DegradedThresholdSeconds.Get()) - currentConfig.Healthcheck.UnhealthyThresholdSeconds = flagutil.NewDeprecatedFloat64Seconds(defaultConfig.Healthcheck.UnhealthyThresholdSeconds.Name(), defaultConfig.Healthcheck.UnhealthyThresholdSeconds.Get()) - - fs.DurationVar(&healthCheckInterval, currentConfig.Healthcheck.IntervalSeconds.Name(), currentConfig.Healthcheck.IntervalSeconds.Get(), "Interval between health checks") - fs.DurationVar(°radedThreshold, currentConfig.Healthcheck.DegradedThresholdSeconds.Name(), currentConfig.Healthcheck.DegradedThresholdSeconds.Get(), "replication lag after which a replica is considered degraded") - fs.DurationVar(&unhealthyThreshold, currentConfig.Healthcheck.UnhealthyThresholdSeconds.Name(), currentConfig.Healthcheck.UnhealthyThresholdSeconds.Get(), "replication lag after which a replica is considered unhealthy") + fs.DurationVar(&healthCheckInterval, "health_check_interval", defaultConfig.Healthcheck.Interval, "Interval between health checks") + fs.DurationVar(°radedThreshold, "degraded_threshold", defaultConfig.Healthcheck.DegradedThreshold, "replication lag after which a replica is considered degraded") + fs.DurationVar(&unhealthyThreshold, "unhealthy_threshold", defaultConfig.Healthcheck.UnhealthyThreshold, "replication lag after which a replica is considered unhealthy") fs.DurationVar(&transitionGracePeriod, "serving_state_grace_period", 0, "how long to pause after broadcasting health to vtgate, before enforcing a new serving state") fs.BoolVar(&enableReplicationReporter, "enable_replication_reporter", false, "Use polling to track replication lag.") @@ -227,6 +208,8 @@ func registerTabletEnvFlags(fs *pflag.FlagSet) { fs.BoolVar(¤tConfig.EnableViews, "queryserver-enable-views", false, "Enable views support in vttablet.") fs.BoolVar(¤tConfig.EnablePerWorkloadTableMetrics, "enable-per-workload-table-metrics", defaultConfig.EnablePerWorkloadTableMetrics, "If true, query counts and query error metrics include a label that identifies the workload") + + fs.BoolVar(¤tConfig.Unmanaged, "unmanaged", false, "Indicates an unmanaged tablet, i.e. using an external mysql-compatible database") } var ( @@ -238,10 +221,10 @@ var ( func Init() { // IdleTimeout is only initialized for OltpReadPool , but the other pools need to inherit the value. // TODO(sougou): Make a decision on whether this should be global or per-pool. - _ = currentConfig.OlapReadPool.IdleTimeoutSeconds.Set(currentConfig.OltpReadPool.IdleTimeoutSeconds.Get().String()) - _ = currentConfig.TxPool.IdleTimeoutSeconds.Set(currentConfig.OltpReadPool.IdleTimeoutSeconds.Get().String()) - _ = currentConfig.OlapReadPool.MaxLifetimeSeconds.Set(currentConfig.OltpReadPool.MaxLifetimeSeconds.Get().String()) - _ = currentConfig.TxPool.MaxLifetimeSeconds.Set(currentConfig.OltpReadPool.MaxLifetimeSeconds.Get().String()) + currentConfig.OlapReadPool.IdleTimeout = currentConfig.OltpReadPool.IdleTimeout + currentConfig.TxPool.IdleTimeout = currentConfig.OltpReadPool.IdleTimeout + currentConfig.OlapReadPool.MaxLifetime = currentConfig.OltpReadPool.MaxLifetime + currentConfig.TxPool.MaxLifetime = currentConfig.OltpReadPool.MaxLifetime if enableHotRowProtection { if enableHotRowProtectionDryRun { @@ -263,7 +246,7 @@ func Init() { } if heartbeatInterval == 0 { - heartbeatInterval = defaultConfig.ReplicationTracker.HeartbeatIntervalSeconds.Get() + heartbeatInterval = defaultConfig.ReplicationTracker.HeartbeatInterval } if heartbeatInterval > time.Second { heartbeatInterval = time.Second @@ -271,8 +254,8 @@ func Init() { if heartbeatOnDemandDuration < 0 { heartbeatOnDemandDuration = 0 } - _ = currentConfig.ReplicationTracker.HeartbeatIntervalSeconds.Set(heartbeatInterval.String()) - _ = currentConfig.ReplicationTracker.HeartbeatOnDemandSeconds.Set(heartbeatOnDemandDuration.String()) + currentConfig.ReplicationTracker.HeartbeatInterval = heartbeatInterval + currentConfig.ReplicationTracker.HeartbeatOnDemand = heartbeatOnDemandDuration switch { case enableHeartbeat: @@ -283,10 +266,10 @@ func Init() { currentConfig.ReplicationTracker.Mode = Disable } - _ = currentConfig.Healthcheck.IntervalSeconds.Set(healthCheckInterval.String()) - _ = currentConfig.Healthcheck.DegradedThresholdSeconds.Set(degradedThreshold.String()) - _ = currentConfig.Healthcheck.UnhealthyThresholdSeconds.Set(unhealthyThreshold.String()) - _ = currentConfig.GracePeriods.TransitionSeconds.Set(transitionGracePeriod.String()) + currentConfig.Healthcheck.Interval = healthCheckInterval + currentConfig.Healthcheck.DegradedThreshold = degradedThreshold + currentConfig.Healthcheck.UnhealthyThreshold = unhealthyThreshold + currentConfig.GracePeriods.Transition = transitionGracePeriod switch streamlog.GetQueryLogFormat() { case streamlog.QueryLogFormatText: @@ -312,6 +295,8 @@ func Init() { type TabletConfig struct { DB *dbconfigs.DBConfigs `json:"db,omitempty"` + Unmanaged bool `json:"unmanaged,omitempty"` + OltpReadPool ConnPoolConfig `json:"oltpReadPool,omitempty"` OlapReadPool ConnPoolConfig `json:"olapReadPool,omitempty"` TxPool ConnPoolConfig `json:"txPool,omitempty"` @@ -326,24 +311,24 @@ type TabletConfig struct { ReplicationTracker ReplicationTrackerConfig `json:"replicationTracker,omitempty"` // Consolidator can be enable, disable, or notOnPrimary. Default is enable. - Consolidator string `json:"consolidator,omitempty"` - PassthroughDML bool `json:"passthroughDML,omitempty"` - StreamBufferSize int `json:"streamBufferSize,omitempty"` - ConsolidatorStreamTotalSize int64 `json:"consolidatorStreamTotalSize,omitempty"` - ConsolidatorStreamQuerySize int64 `json:"consolidatorStreamQuerySize,omitempty"` - QueryCacheMemory int64 `json:"queryCacheMemory,omitempty"` - QueryCacheDoorkeeper bool `json:"queryCacheDoorkeeper,omitempty"` - SchemaReloadIntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"schemaReloadIntervalSeconds,omitempty"` - SignalSchemaChangeReloadIntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` - SchemaChangeReloadTimeout time.Duration `json:"schemaChangeReloadTimeout,omitempty"` - WatchReplication bool `json:"watchReplication,omitempty"` - TrackSchemaVersions bool `json:"trackSchemaVersions,omitempty"` - SchemaVersionMaxAgeSeconds int64 `json:"schemaVersionMaxAgeSeconds,omitempty"` - TerseErrors bool `json:"terseErrors,omitempty"` - TruncateErrorLen int `json:"truncateErrorLen,omitempty"` - AnnotateQueries bool `json:"annotateQueries,omitempty"` - MessagePostponeParallelism int `json:"messagePostponeParallelism,omitempty"` - SignalWhenSchemaChange bool `json:"signalWhenSchemaChange,omitempty"` + Consolidator string `json:"consolidator,omitempty"` + PassthroughDML bool `json:"passthroughDML,omitempty"` + StreamBufferSize int `json:"streamBufferSize,omitempty"` + ConsolidatorStreamTotalSize int64 `json:"consolidatorStreamTotalSize,omitempty"` + ConsolidatorStreamQuerySize int64 `json:"consolidatorStreamQuerySize,omitempty"` + QueryCacheMemory int64 `json:"queryCacheMemory,omitempty"` + QueryCacheDoorkeeper bool `json:"queryCacheDoorkeeper,omitempty"` + SchemaReloadInterval time.Duration `json:"schemaReloadIntervalSeconds,omitempty"` + SignalSchemaChangeReloadInterval time.Duration `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` + SchemaChangeReloadTimeout time.Duration `json:"schemaChangeReloadTimeout,omitempty"` + WatchReplication bool `json:"watchReplication,omitempty"` + TrackSchemaVersions bool `json:"trackSchemaVersions,omitempty"` + SchemaVersionMaxAgeSeconds int64 `json:"schemaVersionMaxAgeSeconds,omitempty"` + TerseErrors bool `json:"terseErrors,omitempty"` + TruncateErrorLen int `json:"truncateErrorLen,omitempty"` + AnnotateQueries bool `json:"annotateQueries,omitempty"` + MessagePostponeParallelism int `json:"messagePostponeParallelism,omitempty"` + SignalWhenSchemaChange bool `json:"signalWhenSchemaChange,omitempty"` ExternalConnections map[string]*dbconfigs.DBConfigs `json:"externalConnections,omitempty"` @@ -383,15 +368,19 @@ func (cfg *TabletConfig) MarshalJSON() ([]byte, error) { tmp := struct { TCProxy - SchemaReloadIntervalSeconds string `json:"schemaReloadIntervalSeconds,omitempty"` - SignalSchemaChangeReloadIntervalSeconds string `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` - SchemaChangeReloadTimeout string `json:"schemaChangeReloadTimeout,omitempty"` + SchemaReloadInterval string `json:"schemaReloadIntervalSeconds,omitempty"` + SignalSchemaChangeReloadInterval string `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` + SchemaChangeReloadTimeout string `json:"schemaChangeReloadTimeout,omitempty"` }{ TCProxy: TCProxy(*cfg), } - if d := cfg.SchemaReloadIntervalSeconds.Get(); d != 0 { - tmp.SchemaReloadIntervalSeconds = d.String() + if d := cfg.SchemaReloadInterval; d != 0 { + tmp.SchemaReloadInterval = d.String() + } + + if d := cfg.SignalSchemaChangeReloadInterval; d != 0 { + tmp.SignalSchemaChangeReloadInterval = d.String() } if d := cfg.SchemaChangeReloadTimeout; d != 0 { @@ -401,14 +390,61 @@ func (cfg *TabletConfig) MarshalJSON() ([]byte, error) { return json.Marshal(&tmp) } +func (cfg *TabletConfig) UnmarshalJSON(data []byte) (err error) { + type TCProxy TabletConfig + + var tmp struct { + TCProxy + SchemaReloadInterval string `json:"schemaReloadIntervalSeconds,omitempty"` + SignalSchemaChangeReloadInterval string `json:"signalSchemaChangeReloadIntervalSeconds,omitempty"` + SchemaChangeReloadTimeout string `json:"schemaChangeReloadTimeout,omitempty"` + } + + tmp.TCProxy = TCProxy(*cfg) + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + *cfg = TabletConfig(tmp.TCProxy) + + if tmp.SchemaReloadInterval != "" { + cfg.SchemaReloadInterval, err = time.ParseDuration(tmp.SchemaReloadInterval) + if err != nil { + return err + } + } else { + cfg.SchemaReloadInterval = 0 + } + + if tmp.SignalSchemaChangeReloadInterval != "" { + cfg.SignalSchemaChangeReloadInterval, err = time.ParseDuration(tmp.SignalSchemaChangeReloadInterval) + if err != nil { + return err + } + } else { + cfg.SignalSchemaChangeReloadInterval = 0 + } + + if tmp.SchemaChangeReloadTimeout != "" { + cfg.SchemaChangeReloadTimeout, err = time.ParseDuration(tmp.SchemaChangeReloadTimeout) + if err != nil { + return err + } + } else { + cfg.SchemaChangeReloadTimeout = 0 + } + + return nil +} + // ConnPoolConfig contains the config for a conn pool. type ConnPoolConfig struct { - Size int `json:"size,omitempty"` - TimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"timeoutSeconds,omitempty"` - IdleTimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"idleTimeoutSeconds,omitempty"` - MaxLifetimeSeconds flagutil.DeprecatedFloat64Seconds `json:"maxLifetimeSeconds,omitempty"` - PrefillParallelism int `json:"prefillParallelism,omitempty"` - MaxWaiters int `json:"maxWaiters,omitempty"` + Size int `json:"size,omitempty"` + Timeout time.Duration `json:"timeoutSeconds,omitempty"` + IdleTimeout time.Duration `json:"idleTimeoutSeconds,omitempty"` + MaxLifetime time.Duration `json:"maxLifetimeSeconds,omitempty"` + PrefillParallelism int `json:"prefillParallelism,omitempty"` } func (cfg *ConnPoolConfig) MarshalJSON() ([]byte, error) { @@ -416,31 +452,71 @@ func (cfg *ConnPoolConfig) MarshalJSON() ([]byte, error) { tmp := struct { Proxy - TimeoutSeconds string `json:"timeoutSeconds,omitempty"` - IdleTimeoutSeconds string `json:"idleTimeoutSeconds,omitempty"` - MaxLifetimeSeconds string `json:"maxLifetimeSeconds,omitempty"` + Timeout string `json:"timeoutSeconds,omitempty"` + IdleTimeout string `json:"idleTimeoutSeconds,omitempty"` + MaxLifetime string `json:"maxLifetimeSeconds,omitempty"` }{ Proxy: Proxy(*cfg), } - if d := cfg.TimeoutSeconds.Get(); d != 0 { - tmp.TimeoutSeconds = d.String() + if d := cfg.Timeout; d != 0 { + tmp.Timeout = d.String() } - if d := cfg.IdleTimeoutSeconds.Get(); d != 0 { - tmp.IdleTimeoutSeconds = d.String() + if d := cfg.IdleTimeout; d != 0 { + tmp.IdleTimeout = d.String() } - if d := cfg.MaxLifetimeSeconds.Get(); d != 0 { - tmp.MaxLifetimeSeconds = d.String() + if d := cfg.MaxLifetime; d != 0 { + tmp.MaxLifetime = d.String() } return json.Marshal(&tmp) } +func (cfg *ConnPoolConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + Size int `json:"size,omitempty"` + Timeout string `json:"timeoutSeconds,omitempty"` + IdleTimeout string `json:"idleTimeoutSeconds,omitempty"` + MaxLifetime string `json:"maxLifetimeSeconds,omitempty"` + PrefillParallelism int `json:"prefillParallelism,omitempty"` + } + + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.Timeout != "" { + cfg.Timeout, err = time.ParseDuration(tmp.Timeout) + if err != nil { + return err + } + } + + if tmp.IdleTimeout != "" { + cfg.IdleTimeout, err = time.ParseDuration(tmp.IdleTimeout) + if err != nil { + return err + } + } + + if tmp.MaxLifetime != "" { + cfg.MaxLifetime, err = time.ParseDuration(tmp.MaxLifetime) + if err != nil { + return err + } + } + + cfg.Size = tmp.Size + cfg.PrefillParallelism = tmp.PrefillParallelism + + return nil +} + // OlapConfig contains the config for olap settings. type OlapConfig struct { - TxTimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"txTimeoutSeconds,omitempty"` + TxTimeout time.Duration `json:"txTimeoutSeconds,omitempty"` } func (cfg *OlapConfig) MarshalJSON() ([]byte, error) { @@ -453,19 +529,38 @@ func (cfg *OlapConfig) MarshalJSON() ([]byte, error) { Proxy: Proxy(*cfg), } - if d := cfg.TxTimeoutSeconds.Get(); d != 0 { + if d := cfg.TxTimeout; d != 0 { tmp.TxTimeoutSeconds = d.String() } return json.Marshal(&tmp) } +func (cfg *OlapConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + TxTimeout string `json:"txTimeoutSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.TxTimeout != "" { + cfg.TxTimeout, err = time.ParseDuration(tmp.TxTimeout) + if err != nil { + return err + } + } + + return nil +} + // OltpConfig contains the config for oltp settings. type OltpConfig struct { - QueryTimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"queryTimeoutSeconds,omitempty"` - TxTimeoutSeconds flagutil.DeprecatedFloat64Seconds `json:"txTimeoutSeconds,omitempty"` - MaxRows int `json:"maxRows,omitempty"` - WarnRows int `json:"warnRows,omitempty"` + QueryTimeout time.Duration `json:"queryTimeoutSeconds,omitempty"` + TxTimeout time.Duration `json:"txTimeoutSeconds,omitempty"` + MaxRows int `json:"maxRows,omitempty"` + WarnRows int `json:"warnRows,omitempty"` } func (cfg *OltpConfig) MarshalJSON() ([]byte, error) { @@ -473,23 +568,51 @@ func (cfg *OltpConfig) MarshalJSON() ([]byte, error) { tmp := struct { Proxy - QueryTimeoutSeconds string `json:"queryTimeoutSeconds,omitempty"` - TxTimeoutSeconds string `json:"txTimeoutSeconds,omitempty"` + QueryTimeout string `json:"queryTimeoutSeconds,omitempty"` + TxTimeout string `json:"txTimeoutSeconds,omitempty"` }{ Proxy: Proxy(*cfg), } - if d := cfg.QueryTimeoutSeconds.Get(); d != 0 { - tmp.QueryTimeoutSeconds = d.String() + if d := cfg.QueryTimeout; d != 0 { + tmp.QueryTimeout = d.String() } - if d := cfg.TxTimeoutSeconds.Get(); d != 0 { - tmp.TxTimeoutSeconds = d.String() + if d := cfg.TxTimeout; d != 0 { + tmp.TxTimeout = d.String() } return json.Marshal(&tmp) } +func (cfg *OltpConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + OltpConfig + QueryTimeout string `json:"queryTimeoutSeconds,omitempty"` + TxTimeout string `json:"txTimeoutSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.QueryTimeout != "" { + cfg.QueryTimeout, err = time.ParseDuration(tmp.QueryTimeout) + if err != nil { + return err + } + } + + if tmp.TxTimeout != "" { + cfg.TxTimeout, err = time.ParseDuration(tmp.TxTimeout) + if err != nil { + return err + } + } + + return nil +} + // HotRowProtectionConfig contains the config for hot row protection. type HotRowProtectionConfig struct { // Mode can be disable, dryRun or enable. Default is disable. @@ -501,97 +624,177 @@ type HotRowProtectionConfig struct { // HealthcheckConfig contains the config for healthcheck. type HealthcheckConfig struct { - IntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"intervalSeconds,omitempty"` - DegradedThresholdSeconds flagutil.DeprecatedFloat64Seconds `json:"degradedThresholdSeconds,omitempty"` - UnhealthyThresholdSeconds flagutil.DeprecatedFloat64Seconds `json:"unhealthyThresholdSeconds,omitempty"` + Interval time.Duration + DegradedThreshold time.Duration + UnhealthyThreshold time.Duration } func (cfg *HealthcheckConfig) MarshalJSON() ([]byte, error) { - type Proxy HealthcheckConfig - - tmp := struct { - Proxy + var tmp struct { IntervalSeconds string `json:"intervalSeconds,omitempty"` DegradedThresholdSeconds string `json:"degradedThresholdSeconds,omitempty"` UnhealthyThresholdSeconds string `json:"unhealthyThresholdSeconds,omitempty"` - }{ - Proxy: Proxy(*cfg), } - if d := cfg.IntervalSeconds.Get(); d != 0 { + if d := cfg.Interval; d != 0 { tmp.IntervalSeconds = d.String() } - if d := cfg.DegradedThresholdSeconds.Get(); d != 0 { + if d := cfg.DegradedThreshold; d != 0 { tmp.DegradedThresholdSeconds = d.String() } - if d := cfg.UnhealthyThresholdSeconds.Get(); d != 0 { + if d := cfg.UnhealthyThreshold; d != 0 { tmp.UnhealthyThresholdSeconds = d.String() } return json.Marshal(&tmp) } +func (cfg *HealthcheckConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + Interval string `json:"intervalSeconds,omitempty"` + DegradedThreshold string `json:"degradedThresholdSeconds,omitempty"` + UnhealthyThreshold string `json:"unhealthyThresholdSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.Interval != "" { + cfg.Interval, err = time.ParseDuration(tmp.Interval) + if err != nil { + return err + } + } + + if tmp.DegradedThreshold != "" { + cfg.DegradedThreshold, err = time.ParseDuration(tmp.DegradedThreshold) + if err != nil { + return err + } + } + + if tmp.UnhealthyThreshold != "" { + cfg.UnhealthyThreshold, err = time.ParseDuration(tmp.UnhealthyThreshold) + if err != nil { + return err + } + } + + return nil +} + // GracePeriodsConfig contains various grace periods. // TODO(sougou): move lameduck here? type GracePeriodsConfig struct { - ShutdownSeconds flagutil.DeprecatedFloat64Seconds `json:"shutdownSeconds,omitempty"` - TransitionSeconds flagutil.DeprecatedFloat64Seconds `json:"transitionSeconds,omitempty"` + Shutdown time.Duration + Transition time.Duration } func (cfg *GracePeriodsConfig) MarshalJSON() ([]byte, error) { - type Proxy GracePeriodsConfig - - tmp := struct { - Proxy + var tmp struct { ShutdownSeconds string `json:"shutdownSeconds,omitempty"` TransitionSeconds string `json:"transitionSeconds,omitempty"` - }{ - Proxy: Proxy(*cfg), } - if d := cfg.ShutdownSeconds.Get(); d != 0 { + if d := cfg.Shutdown; d != 0 { tmp.ShutdownSeconds = d.String() } - if d := cfg.TransitionSeconds.Get(); d != 0 { + if d := cfg.Transition; d != 0 { tmp.TransitionSeconds = d.String() } return json.Marshal(&tmp) } +func (cfg *GracePeriodsConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + Shutdown string `json:"shutdownSeconds,omitempty"` + Transition string `json:"transitionSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.Shutdown != "" { + cfg.Shutdown, err = time.ParseDuration(tmp.Shutdown) + if err != nil { + return err + } + } + + if tmp.Transition != "" { + cfg.Transition, err = time.ParseDuration(tmp.Transition) + if err != nil { + return err + } + } + + return nil +} + // ReplicationTrackerConfig contains the config for the replication tracker. type ReplicationTrackerConfig struct { // Mode can be disable, polling or heartbeat. Default is disable. - Mode string `json:"mode,omitempty"` - HeartbeatIntervalSeconds flagutil.DeprecatedFloat64Seconds `json:"heartbeatIntervalSeconds,omitempty"` - HeartbeatOnDemandSeconds flagutil.DeprecatedFloat64Seconds `json:"heartbeatOnDemandSeconds,omitempty"` + Mode string `json:"mode,omitempty"` + HeartbeatInterval time.Duration + HeartbeatOnDemand time.Duration } func (cfg *ReplicationTrackerConfig) MarshalJSON() ([]byte, error) { - type Proxy ReplicationTrackerConfig - tmp := struct { - Proxy + Mode string `json:"mode,omitempty"` HeartbeatIntervalSeconds string `json:"heartbeatIntervalSeconds,omitempty"` HeartbeatOnDemandSeconds string `json:"heartbeatOnDemandSeconds,omitempty"` }{ - Proxy: Proxy(*cfg), + Mode: cfg.Mode, } - if d := cfg.HeartbeatIntervalSeconds.Get(); d != 0 { + if d := cfg.HeartbeatInterval; d != 0 { tmp.HeartbeatIntervalSeconds = d.String() } - if d := cfg.HeartbeatOnDemandSeconds.Get(); d != 0 { + if d := cfg.HeartbeatOnDemand; d != 0 { tmp.HeartbeatOnDemandSeconds = d.String() } return json.Marshal(&tmp) } +func (cfg *ReplicationTrackerConfig) UnmarshalJSON(data []byte) (err error) { + var tmp struct { + Mode string `json:"mode,omitempty"` + HeartbeatInterval string `json:"heartbeatIntervalSeconds,omitempty"` + HeartbeatOnDemand string `json:"heartbeatOnDemandSeconds,omitempty"` + } + + if err = json.Unmarshal(data, &tmp); err != nil { + return err + } + + if tmp.HeartbeatInterval != "" { + cfg.HeartbeatInterval, err = time.ParseDuration(tmp.HeartbeatInterval) + if err != nil { + return err + } + } + + if tmp.HeartbeatOnDemand != "" { + cfg.HeartbeatOnDemand, err = time.ParseDuration(tmp.HeartbeatOnDemand) + if err != nil { + return err + } + } + + cfg.Mode = tmp.Mode + + return nil +} + // TransactionLimitConfig captures configuration of transaction pool slots // limiter configuration. type TransactionLimitConfig struct { @@ -634,9 +837,9 @@ func (c *TabletConfig) Clone() *TabletConfig { func (c *TabletConfig) SetTxTimeoutForWorkload(val time.Duration, workload querypb.ExecuteOptions_Workload) { switch workload { case querypb.ExecuteOptions_OLAP: - _ = c.Olap.TxTimeoutSeconds.Set(val.String()) + c.Olap.TxTimeout = val case querypb.ExecuteOptions_OLTP: - _ = c.Oltp.TxTimeoutSeconds.Set(val.String()) + c.Oltp.TxTimeout = val default: panic(fmt.Sprintf("unsupported workload type: %v", workload)) } @@ -649,14 +852,17 @@ func (c *TabletConfig) TxTimeoutForWorkload(workload querypb.ExecuteOptions_Work case querypb.ExecuteOptions_DBA: return 0 case querypb.ExecuteOptions_OLAP: - return c.Olap.TxTimeoutSeconds.Get() + return c.Olap.TxTimeout default: - return c.Oltp.TxTimeoutSeconds.Get() + return c.Oltp.TxTimeout } } // Verify checks for contradicting flags. func (c *TabletConfig) Verify() error { + if err := c.verifyUnmanagedTabletConfig(); err != nil { + return err + } if err := c.verifyTransactionLimitConfig(); err != nil { return err } @@ -678,6 +884,50 @@ func (c *TabletConfig) Verify() error { return nil } +// verifyUnmanagedTabletConfig checks unmanaged tablet related config for sanity +func (c *TabletConfig) verifyUnmanagedTabletConfig() error { + // Skip checks if tablet is not unmanaged + if !c.Unmanaged { + return nil + } + + // Throw error if both host and socket are null + if !c.DB.HasGlobalSettings() { + return errors.New("no connection parameters specified but unmanaged mode specified") + } + if c.DB.App.User == "" { + return errors.New("database app user not specified") + } + if c.DB.App.Password == "" { + return errors.New("database app user password not specified") + } + // Replication fixes should be disabled for Unmanaged tablets. + mysqlctl.DisableActiveReparents = true + + return c.checkConnectionForExternalMysql() +} + +// Test connectivity of external mysql +func (c *TabletConfig) checkConnectionForExternalMysql() error { + params := mysql.ConnParams{ + Host: c.DB.Host, + Port: c.DB.Port, + DbName: c.DB.DBName, + Uname: c.DB.App.User, + Pass: c.DB.App.Password, + UnixSocket: c.DB.Socket, + } + + conn, err := mysql.Connect(context.Background(), ¶ms) + if err != nil { + return err + } + + defer conn.Close() + + return conn.Ping() +} + // verifyTransactionLimitConfig checks TransactionLimitConfig for sanity func (c *TabletConfig) verifyTransactionLimitConfig() error { actual, dryRun := c.EnableTransactionLimit, c.EnableTransactionLimitDryRun @@ -742,54 +992,37 @@ func (c *TabletConfig) verifyTxThrottlerConfig() error { // They actually get overwritten during Init. var defaultConfig = TabletConfig{ OltpReadPool: ConnPoolConfig{ - Size: 16, - // TODO (ajm188): remove the zero-value ones after these are durations. - // See the comment below in GracePeriodsConfig as to why they are needed - // for now. - TimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-query-pool-timeout", 0), - IdleTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-idle-timeout", 30*time.Minute), - MaxLifetimeSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-pool-conn-max-lifetime", 0), - MaxWaiters: 5000, + Size: 16, + IdleTimeout: 30 * time.Minute, }, OlapReadPool: ConnPoolConfig{ - Size: 200, - // TODO (ajm188): remove the zero-value ones after these are durations. - // See the comment below in GracePeriodsConfig as to why they are needed - // for now. - TimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-stream-pool-timeout", 0), - IdleTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-stream-pool-idle-timeout", 30*time.Minute), + Size: 200, + IdleTimeout: 30 * time.Minute, }, TxPool: ConnPoolConfig{ - Size: 20, - TimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-txpool-timeout", time.Second), - // No actual flag for this one, but has non-zero value - IdleTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-txpool-idle-timeout", 30*time.Minute), - MaxWaiters: 5000, + Size: 20, + Timeout: time.Second, + IdleTimeout: 30 * time.Minute, }, Olap: OlapConfig{ - TxTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-olap-transaction-timeout", 30*time.Second), + TxTimeout: 30 * time.Second, }, Oltp: OltpConfig{ - QueryTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-query-timeout", 30*time.Second), - TxTimeoutSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-transaction-timeout", 30*time.Second), - MaxRows: 10000, + QueryTimeout: 30 * time.Second, + TxTimeout: 30 * time.Second, + MaxRows: 10000, }, Healthcheck: HealthcheckConfig{ - IntervalSeconds: flagutil.NewDeprecatedFloat64Seconds("health_check_interval", 20*time.Second), - DegradedThresholdSeconds: flagutil.NewDeprecatedFloat64Seconds("degraded_threshold", 30*time.Second), - UnhealthyThresholdSeconds: flagutil.NewDeprecatedFloat64Seconds("unhealthy_threshold", 2*time.Hour), - }, - GracePeriods: GracePeriodsConfig{ - // TODO (ajm188) remove after these are durations. it's not necessary - // for production code because it's the zero value, but it's required - // for tests to pass (which require the name field to be present for - // deep equality). - ShutdownSeconds: flagutil.NewDeprecatedFloat64Seconds("shutdown_grace_period", 0), + Interval: 20 * time.Second, + DegradedThreshold: 30 * time.Second, + UnhealthyThreshold: 2 * time.Hour, }, ReplicationTracker: ReplicationTrackerConfig{ - Mode: Disable, - HeartbeatIntervalSeconds: flagutil.NewDeprecatedFloat64Seconds("heartbeat_interval", 250*time.Millisecond), - HeartbeatOnDemandSeconds: flagutil.NewDeprecatedFloat64Seconds("heartbeat_on_demand_duration", 0), + Mode: Disable, + HeartbeatInterval: 250 * time.Millisecond, + }, + GracePeriods: GracePeriodsConfig{ + Shutdown: 3 * time.Second, }, HotRowProtection: HotRowProtectionConfig{ Mode: Disable, @@ -813,8 +1046,8 @@ var defaultConfig = TabletConfig{ QueryCacheMemory: 32 * 1024 * 1024, // 32 mb for our query cache // The doorkeeper for the plan cache is disabled by default in endtoend tests to ensure // results are consistent between runs. - QueryCacheDoorkeeper: !servenv.TestingEndtoend, - SchemaReloadIntervalSeconds: flagutil.NewDeprecatedFloat64Seconds("queryserver-config-schema-reload-time", 30*time.Minute), + QueryCacheDoorkeeper: !servenv.TestingEndtoend, + SchemaReloadInterval: 30 * time.Minute, // SchemaChangeReloadTimeout is used for the signal reload operation where we have to query mysqld. // The queries during the signal reload operation are typically expected to have low load, // but in busy systems with many tables, some queries may take longer than anticipated. diff --git a/go/vt/vttablet/tabletserver/tabletenv/config_test.go b/go/vt/vttablet/tabletserver/tabletenv/config_test.go index e472cbb4789..a51a3c599e8 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/config_test.go +++ b/go/vt/vttablet/tabletserver/tabletenv/config_test.go @@ -24,8 +24,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/throttler" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" @@ -47,8 +49,10 @@ func TestConfigParse(t *testing.T) { }, }, OltpReadPool: ConnPoolConfig{ - Size: 16, - MaxWaiters: 40, + Size: 16, + Timeout: 10 * time.Second, + IdleTimeout: 20 * time.Second, + MaxLifetime: 50 * time.Second, }, RowStreamer: RowStreamerConfig{ MaxInnoDBTrxHistLen: 1000, @@ -56,10 +60,6 @@ func TestConfigParse(t *testing.T) { }, } - _ = cfg.OltpReadPool.TimeoutSeconds.Set("10s") - _ = cfg.OltpReadPool.IdleTimeoutSeconds.Set("20s") - _ = cfg.OltpReadPool.MaxLifetimeSeconds.Set("50s") - gotBytes, err := yaml2.Marshal(&cfg) require.NoError(t, err) wantBytes := `db: @@ -87,7 +87,6 @@ oltp: {} oltpReadPool: idleTimeoutSeconds: 20s maxLifetimeSeconds: 50s - maxWaiters: 40 size: 16 timeoutSeconds: 10s replicationTracker: {} @@ -109,9 +108,8 @@ txPool: {} user: c oltpReadPool: size: 16 - idleTimeoutSeconds: 20 - maxWaiters: 40 - maxLifetimeSeconds: 50 + idleTimeoutSeconds: 20s + maxLifetimeSeconds: 50s `) gotCfg := cfg gotCfg.DB = cfg.DB.Clone() @@ -127,7 +125,8 @@ func TestDefaultConfig(t *testing.T) { want := `consolidator: enable consolidatorStreamQuerySize: 2097152 consolidatorStreamTotalSize: 134217728 -gracePeriods: {} +gracePeriods: + shutdownSeconds: 3s healthcheck: degradedThresholdSeconds: 30s intervalSeconds: 20s @@ -149,7 +148,6 @@ oltp: txTimeoutSeconds: 30s oltpReadPool: idleTimeoutSeconds: 30m0s - maxWaiters: 5000 size: 16 queryCacheDoorkeeper: true queryCacheMemory: 33554432 @@ -165,7 +163,6 @@ signalWhenSchemaChange: true streamBufferSize: 32768 txPool: idleTimeoutSeconds: 30m0s - maxWaiters: 5000 size: 20 timeoutSeconds: 1s ` @@ -178,17 +175,16 @@ func TestClone(t *testing.T) { cfg1 := &TabletConfig{ OltpReadPool: ConnPoolConfig{ - Size: 16, - MaxWaiters: 40, + Size: 16, + Timeout: 10 * time.Second, + IdleTimeout: 20 * time.Second, + MaxLifetime: 50 * time.Second, }, RowStreamer: RowStreamerConfig{ MaxInnoDBTrxHistLen: 1000000, MaxMySQLReplLagSecs: 43200, }, } - _ = cfg1.OltpReadPool.TimeoutSeconds.Set("10s") - _ = cfg1.OltpReadPool.IdleTimeoutSeconds.Set("20s") - _ = cfg1.OltpReadPool.MaxLifetimeSeconds.Set("50s") cfg2 := cfg1.Clone() assert.Equal(t, cfg1, cfg2) @@ -206,14 +202,14 @@ func TestFlags(t *testing.T) { // Simple Init. Init() - _ = want.OlapReadPool.IdleTimeoutSeconds.Set("30m") - _ = want.TxPool.IdleTimeoutSeconds.Set("30m") + want.OlapReadPool.IdleTimeout = 30 * time.Minute + want.TxPool.IdleTimeout = 30 * time.Minute want.HotRowProtection.Mode = Disable want.Consolidator = Enable - _ = want.Healthcheck.IntervalSeconds.Set("20s") - _ = want.Healthcheck.DegradedThresholdSeconds.Set("30s") - _ = want.Healthcheck.UnhealthyThresholdSeconds.Set("2h") - _ = want.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + want.Healthcheck.Interval = 20 * time.Second + want.Healthcheck.DegradedThreshold = 30 * time.Second + want.Healthcheck.UnhealthyThreshold = 2 * time.Hour + want.ReplicationTracker.HeartbeatInterval = time.Second want.ReplicationTracker.Mode = Disable assert.Equal(t, want.DB, currentConfig.DB) assert.Equal(t, want, currentConfig) @@ -269,52 +265,52 @@ func TestFlags(t *testing.T) { enableHeartbeat = true heartbeatInterval = 1 * time.Second currentConfig.ReplicationTracker.Mode = "" - currentConfig.ReplicationTracker.HeartbeatIntervalSeconds.Set("0s") + currentConfig.ReplicationTracker.HeartbeatInterval = 0 Init() want.ReplicationTracker.Mode = Heartbeat - want.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + want.ReplicationTracker.HeartbeatInterval = time.Second assert.Equal(t, want, currentConfig) enableHeartbeat = false heartbeatInterval = 1 * time.Second currentConfig.ReplicationTracker.Mode = "" - currentConfig.ReplicationTracker.HeartbeatIntervalSeconds.Set("0s") + currentConfig.ReplicationTracker.HeartbeatInterval = 0 Init() want.ReplicationTracker.Mode = Disable - want.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + want.ReplicationTracker.HeartbeatInterval = time.Second assert.Equal(t, want, currentConfig) enableReplicationReporter = true heartbeatInterval = 1 * time.Second currentConfig.ReplicationTracker.Mode = "" - currentConfig.ReplicationTracker.HeartbeatIntervalSeconds.Set("0s") + currentConfig.ReplicationTracker.HeartbeatInterval = 0 Init() want.ReplicationTracker.Mode = Polling - want.ReplicationTracker.HeartbeatIntervalSeconds.Set("1s") + want.ReplicationTracker.HeartbeatInterval = time.Second assert.Equal(t, want, currentConfig) - healthCheckInterval = 1 * time.Second - currentConfig.Healthcheck.IntervalSeconds.Set("0s") + healthCheckInterval = time.Second + currentConfig.Healthcheck.Interval = 0 Init() - want.Healthcheck.IntervalSeconds.Set("1s") + want.Healthcheck.Interval = time.Second assert.Equal(t, want, currentConfig) degradedThreshold = 2 * time.Second - currentConfig.Healthcheck.DegradedThresholdSeconds.Set("0s") + currentConfig.Healthcheck.DegradedThreshold = 0 Init() - want.Healthcheck.DegradedThresholdSeconds.Set("2s") + want.Healthcheck.DegradedThreshold = 2 * time.Second assert.Equal(t, want, currentConfig) unhealthyThreshold = 3 * time.Second - currentConfig.Healthcheck.UnhealthyThresholdSeconds.Set("0s") + currentConfig.Healthcheck.UnhealthyThreshold = 0 Init() - want.Healthcheck.UnhealthyThresholdSeconds.Set("3s") + want.Healthcheck.UnhealthyThreshold = 3 * time.Second assert.Equal(t, want, currentConfig) transitionGracePeriod = 4 * time.Second - currentConfig.GracePeriods.TransitionSeconds.Set("0s") + currentConfig.GracePeriods.Transition = 0 Init() - want.GracePeriods.TransitionSeconds.Set("4s") + want.GracePeriods.Transition = 4 * time.Second assert.Equal(t, want, currentConfig) currentConfig.SanitizeLogMessages = false @@ -425,7 +421,6 @@ func TestVerifyTxThrottlerConfig(t *testing.T) { } for _, test := range tests { - test := test t.Run(test.Name, func(t *testing.T) { t.Parallel() @@ -451,3 +446,38 @@ func TestVerifyTxThrottlerConfig(t *testing.T) { }) } } + +func TestVerifyUnmanagedTabletConfig(t *testing.T) { + oldDisableActiveReparents := mysqlctl.DisableActiveReparents + defer func() { + mysqlctl.DisableActiveReparents = oldDisableActiveReparents + }() + + config := defaultConfig + + db := fakesqldb.New(t) + defer db.Close() + + params := db.ConnParams() + config.DB = dbconfigs.NewTestDBConfigs(*params, *params, "") + + // By default, unmanaged mode should be false + err := config.verifyUnmanagedTabletConfig() + assert.Nil(t, err) + + config.Unmanaged = true + err = config.verifyUnmanagedTabletConfig() + assert.EqualError(t, err, "no connection parameters specified but unmanaged mode specified") + + config.DB.Socket = db.ConnParams().UnixSocket + err = config.verifyUnmanagedTabletConfig() + assert.EqualError(t, err, "database app user not specified") + + config.DB.App.User = "testUser" + err = config.verifyUnmanagedTabletConfig() + assert.EqualError(t, err, "database app user password not specified") + + config.DB.App.Password = "testPassword" + err = config.verifyUnmanagedTabletConfig() + assert.Nil(t, err) +} diff --git a/go/vt/vttablet/tabletserver/tabletenv/env.go b/go/vt/vttablet/tabletserver/tabletenv/env.go index c7202080c4d..27b4330c735 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/env.go +++ b/go/vt/vttablet/tabletserver/tabletenv/env.go @@ -22,39 +22,44 @@ import ( "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtenv" ) // Env defines the functions supported by TabletServer -// that the sub-componennts need to access. +// that the sub-components need to access. type Env interface { CheckMySQL() Config() *TabletConfig Exporter() *servenv.Exporter Stats() *Stats LogError() + Environment() *vtenv.Environment } type testEnv struct { config *TabletConfig exporter *servenv.Exporter stats *Stats + env *vtenv.Environment } // NewEnv creates an Env that can be used for tabletserver subcomponents // without an actual TabletServer. -func NewEnv(config *TabletConfig, exporterName string) Env { +func NewEnv(env *vtenv.Environment, config *TabletConfig, exporterName string) Env { exporter := servenv.NewExporter(exporterName, "Tablet") return &testEnv{ config: config, exporter: exporter, stats: NewStats(exporter), + env: env, } } -func (*testEnv) CheckMySQL() {} -func (te *testEnv) Config() *TabletConfig { return te.config } -func (te *testEnv) Exporter() *servenv.Exporter { return te.exporter } -func (te *testEnv) Stats() *Stats { return te.stats } +func (*testEnv) CheckMySQL() {} +func (te *testEnv) Config() *TabletConfig { return te.config } +func (te *testEnv) Exporter() *servenv.Exporter { return te.exporter } +func (te *testEnv) Stats() *Stats { return te.stats } +func (te *testEnv) Environment() *vtenv.Environment { return te.env } func (te *testEnv) LogError() { if x := recover(); x != nil { diff --git a/go/vt/vttablet/tabletserver/tabletenv/logstats.go b/go/vt/vttablet/tabletserver/tabletenv/logstats.go index 962b5f0b122..ad7e09de169 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/logstats.go +++ b/go/vt/vttablet/tabletserver/tabletenv/logstats.go @@ -18,7 +18,6 @@ package tabletenv import ( "context" - "fmt" "io" "net/url" "strings" @@ -26,6 +25,7 @@ import ( "github.com/google/safehtml" + "vitess.io/vitess/go/logstats" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/vt/callerid" @@ -181,55 +181,65 @@ func (stats *LogStats) Logf(w io.Writer, params url.Values) error { return nil } - rewrittenSQL := "[REDACTED]" - formattedBindVars := "\"[REDACTED]\"" - - if !streamlog.GetRedactDebugUIQueries() { - rewrittenSQL = stats.RewrittenSQL() - - _, fullBindParams := params["full"] - formattedBindVars = sqltypes.FormatBindVariables( - stats.BindVariables, - fullBindParams, - streamlog.GetQueryLogFormat() == streamlog.QueryLogFormatJSON, - ) - } - + redacted := streamlog.GetRedactDebugUIQueries() + _, fullBindParams := params["full"] // TODO: remove username here we fully enforce immediate caller id callInfo, username := stats.CallInfo() - // Valid options for the QueryLogFormat are text or json - var fmtString string - switch streamlog.GetQueryLogFormat() { - case streamlog.QueryLogFormatText: - fmtString = "%v\t%v\t%v\t'%v'\t'%v'\t%v\t%v\t%.6f\t%v\t%q\t%v\t%v\t%q\t%v\t%.6f\t%.6f\t%v\t%v\t%v\t%q\t\n" - case streamlog.QueryLogFormatJSON: - fmtString = "{\"Method\": %q, \"CallInfo\": %q, \"Username\": %q, \"ImmediateCaller\": %q, \"Effective Caller\": %q, \"Start\": \"%v\", \"End\": \"%v\", \"TotalTime\": %.6f, \"PlanType\": %q, \"OriginalSQL\": %q, \"BindVars\": %v, \"Queries\": %v, \"RewrittenSQL\": %q, \"QuerySources\": %q, \"MysqlTime\": %.6f, \"ConnWaitTime\": %.6f, \"RowsAffected\": %v,\"TransactionID\": %v,\"ResponseSize\": %v, \"Error\": %q}\n" + log := logstats.NewLogger() + log.Init(streamlog.GetQueryLogFormat() == streamlog.QueryLogFormatJSON) + log.Key("Method") + log.StringUnquoted(stats.Method) + log.Key("CallInfo") + log.StringUnquoted(callInfo) + log.Key("Username") + log.StringUnquoted(username) + log.Key("ImmediateCaller") + log.StringSingleQuoted(stats.ImmediateCaller()) + log.Key("Effective Caller") + log.StringSingleQuoted(stats.EffectiveCaller()) + log.Key("Start") + log.Time(stats.StartTime) + log.Key("End") + log.Time(stats.EndTime) + log.Key("TotalTime") + log.Duration(stats.TotalTime()) + log.Key("PlanType") + log.StringUnquoted(stats.PlanType) + log.Key("OriginalSQL") + log.String(stats.OriginalSQL) + log.Key("BindVars") + if redacted { + log.Redacted() + } else { + log.BindVariables(stats.BindVariables, fullBindParams) } - - _, err := fmt.Fprintf( - w, - fmtString, - stats.Method, - callInfo, - username, - stats.ImmediateCaller(), - stats.EffectiveCaller(), - stats.StartTime.Format("2006-01-02 15:04:05.000000"), - stats.EndTime.Format("2006-01-02 15:04:05.000000"), - stats.TotalTime().Seconds(), - stats.PlanType, - stats.OriginalSQL, - formattedBindVars, - stats.NumberOfQueries, - rewrittenSQL, - stats.FmtQuerySources(), - stats.MysqlResponseTime.Seconds(), - stats.WaitingForConnection.Seconds(), - stats.RowsAffected, - stats.TransactionID, - stats.SizeOfResponse(), - stats.ErrorStr(), - ) - return err + log.Key("Queries") + log.Int(int64(stats.NumberOfQueries)) + log.Key("RewrittenSQL") + if redacted { + log.Redacted() + } else { + log.String(stats.RewrittenSQL()) + } + log.Key("QuerySources") + log.StringUnquoted(stats.FmtQuerySources()) + log.Key("MysqlTime") + log.Duration(stats.MysqlResponseTime) + log.Key("ConnWaitTime") + log.Duration(stats.WaitingForConnection) + log.Key("RowsAffected") + log.Uint(uint64(stats.RowsAffected)) + log.Key("TransactionID") + log.Int(stats.TransactionID) + log.Key("ResponseSize") + log.Int(int64(stats.SizeOfResponse())) + log.Key("Error") + log.String(stats.ErrorStr()) + + // logstats from the vttablet are always tab-terminated; keep this for backwards + // compatibility for existing parsers + log.TabTerminated() + + return log.Flush(w) } diff --git a/go/vt/vttablet/tabletserver/tabletenv/logstats_test.go b/go/vt/vttablet/tabletserver/tabletenv/logstats_test.go index 84de50aae74..7412a0a436c 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/logstats_test.go +++ b/go/vt/vttablet/tabletserver/tabletenv/logstats_test.go @@ -17,7 +17,6 @@ limitations under the License. package tabletenv import ( - "bytes" "context" "encoding/json" "errors" @@ -54,7 +53,7 @@ func TestLogStats(t *testing.T) { } func testFormat(stats *LogStats, params url.Values) string { - var b bytes.Buffer + var b strings.Builder stats.Logf(&b, params) return b.String() } @@ -74,7 +73,7 @@ func TestLogStatsFormat(t *testing.T) { streamlog.SetRedactDebugUIQueries(false) streamlog.SetQueryLogFormat("text") got := testFormat(logStats, url.Values(params)) - want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t\t\"sql\"\tmap[intVal:type:INT64 value:\"1\"]\t1\t\"sql with pii\"\tmysql\t0.000000\t0.000000\t0\t12345\t1\t\"\"\t\n" + want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t\t\"sql\"\t{\"intVal\": {\"type\": \"INT64\", \"value\": 1}}\t1\t\"sql with pii\"\tmysql\t0.000000\t0.000000\t0\t12345\t1\t\"\"\t\n" if got != want { t.Errorf("logstats format: got:\n%q\nwant:\n%q\n", got, want) } @@ -128,7 +127,7 @@ func TestLogStatsFormat(t *testing.T) { streamlog.SetQueryLogFormat("text") got = testFormat(logStats, url.Values(params)) - want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t\t\"sql\"\tmap[strVal:type:VARCHAR value:\"abc\"]\t1\t\"sql with pii\"\tmysql\t0.000000\t0.000000\t0\t12345\t1\t\"\"\t\n" + want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t\t\"sql\"\t{\"strVal\": {\"type\": \"VARCHAR\", \"value\": \"abc\"}}\t1\t\"sql with pii\"\tmysql\t0.000000\t0.000000\t0\t12345\t1\t\"\"\t\n" if got != want { t.Errorf("logstats format: got:\n%q\nwant:\n%q\n", got, want) } @@ -165,14 +164,14 @@ func TestLogStatsFilter(t *testing.T) { params := map[string][]string{"full": {}} got := testFormat(logStats, url.Values(params)) - want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t\t\"sql /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t1\t\"sql with pii\"\tmysql\t0.000000\t0.000000\t0\t0\t1\t\"\"\t\n" + want := "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t\t\"sql /* LOG_THIS_QUERY */\"\t{\"intVal\": {\"type\": \"INT64\", \"value\": 1}}\t1\t\"sql with pii\"\tmysql\t0.000000\t0.000000\t0\t0\t1\t\"\"\t\n" if got != want { t.Errorf("logstats format: got:\n%q\nwant:\n%q\n", got, want) } streamlog.SetQueryLogFilterTag("LOG_THIS_QUERY") got = testFormat(logStats, url.Values(params)) - want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t\t\"sql /* LOG_THIS_QUERY */\"\tmap[intVal:type:INT64 value:\"1\"]\t1\t\"sql with pii\"\tmysql\t0.000000\t0.000000\t0\t0\t1\t\"\"\t\n" + want = "test\t\t\t''\t''\t2017-01-01 01:02:03.000000\t2017-01-01 01:02:04.000001\t1.000001\t\t\"sql /* LOG_THIS_QUERY */\"\t{\"intVal\": {\"type\": \"INT64\", \"value\": 1}}\t1\t\"sql with pii\"\tmysql\t0.000000\t0.000000\t0\t0\t1\t\"\"\t\n" if got != want { t.Errorf("logstats format: got:\n%q\nwant:\n%q\n", got, want) } @@ -183,7 +182,6 @@ func TestLogStatsFilter(t *testing.T) { if got != want { t.Errorf("logstats format: got:\n%q\nwant:\n%q\n", got, want) } - } func TestLogStatsFormatQuerySources(t *testing.T) { diff --git a/go/vt/vttablet/tabletserver/tabletenv/seconds.go b/go/vt/vttablet/tabletserver/tabletenv/seconds.go index 205b571c9b1..ae11121f2de 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/seconds.go +++ b/go/vt/vttablet/tabletserver/tabletenv/seconds.go @@ -23,7 +23,7 @@ import ( ) // Seconds provides convenience functions for extracting -// duration from flaot64 seconds values. +// duration from float64 seconds values. type Seconds float64 // SecondsVar is like a flag.Float64Var, but it works for Seconds. diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 25eb4da7168..d74bcb09952 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -28,15 +28,13 @@ import ( "sort" "strconv" "strings" - "sync" "sync/atomic" "syscall" "time" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" - - "vitess.io/vitess/go/acl" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/tb" @@ -52,6 +50,7 @@ import ( "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/onlineddl" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -128,6 +127,8 @@ type TabletServer struct { // This field is only stored for testing checkMysqlGaugeFunc *stats.GaugeFunc + + env *vtenv.Environment } var _ queryservice.QueryService = (*TabletServer)(nil) @@ -138,18 +139,13 @@ var _ queryservice.QueryService = (*TabletServer)(nil) var RegisterFunctions []func(Controller) // NewServer creates a new TabletServer based on the command line flags. -func NewServer(ctx context.Context, name string, topoServer *topo.Server, alias *topodatapb.TabletAlias) *TabletServer { - return NewTabletServer(ctx, name, tabletenv.NewCurrentConfig(), topoServer, alias) +func NewServer(ctx context.Context, env *vtenv.Environment, name string, topoServer *topo.Server, alias *topodatapb.TabletAlias, srvTopoCounts *stats.CountersWithSingleLabel) *TabletServer { + return NewTabletServer(ctx, env, name, tabletenv.NewCurrentConfig(), topoServer, alias, srvTopoCounts) } -var ( - tsOnce sync.Once - srvTopoServer srvtopo.Server -) - // NewTabletServer creates an instance of TabletServer. Only the first // instance of TabletServer will expose its state variables. -func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletConfig, topoServer *topo.Server, alias *topodatapb.TabletAlias) *TabletServer { +func NewTabletServer(ctx context.Context, env *vtenv.Environment, name string, config *tabletenv.TabletConfig, topoServer *topo.Server, alias *topodatapb.TabletAlias, srvTopoCounts *stats.CountersWithSingleLabel) *TabletServer { exporter := servenv.NewExporter(name, "Tablet") tsv := &TabletServer{ exporter: exporter, @@ -160,21 +156,22 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC enableHotRowProtection: config.HotRowProtection.Mode != tabletenv.Disable, topoServer: topoServer, alias: alias.CloneVT(), + env: env, } - tsv.QueryTimeout.Store(config.Oltp.QueryTimeoutSeconds.Get().Nanoseconds()) + tsv.QueryTimeout.Store(config.Oltp.QueryTimeout.Nanoseconds()) - tsOnce.Do(func() { srvTopoServer = srvtopo.NewResilientServer(ctx, topoServer, "TabletSrvTopo") }) + srvTopoServer := srvtopo.NewResilientServer(ctx, topoServer, srvTopoCounts) tabletTypeFunc := func() topodatapb.TabletType { - if tsv.sm == nil { + if tsv.sm == nil || tsv.sm.Target() == nil { return topodatapb.TabletType_UNKNOWN } return tsv.sm.Target().TabletType } - tsv.statelessql = NewQueryList("oltp-stateless") - tsv.statefulql = NewQueryList("oltp-stateful") - tsv.olapql = NewQueryList("olap") + tsv.statelessql = NewQueryList("oltp-stateless", env.Parser()) + tsv.statefulql = NewQueryList("oltp-stateful", env.Parser()) + tsv.olapql = NewQueryList("olap", env.Parser()) tsv.se = schema.NewEngine(tsv) tsv.hs = newHealthStreamer(tsv, alias, tsv.se) tsv.rt = repltracker.NewReplTracker(tsv, alias) @@ -187,8 +184,8 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC tsv.te = NewTxEngine(tsv) tsv.messager = messager.NewEngine(tsv, tsv.se, tsv.vstreamer) - tsv.onlineDDLExecutor = onlineddl.NewExecutor(tsv, alias, topoServer, tsv.lagThrottler, tabletTypeFunc, tsv.onlineDDLExecutorToggleTableBuffer) tsv.tableGC = gc.NewTableGC(tsv, topoServer, tsv.lagThrottler) + tsv.onlineDDLExecutor = onlineddl.NewExecutor(tsv, alias, topoServer, tsv.lagThrottler, tabletTypeFunc, tsv.onlineDDLExecutorToggleTableBuffer, tsv.tableGC.RequestChecks) tsv.sm = &stateManager{ statelessql: tsv.statelessql, @@ -207,6 +204,7 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC ddle: tsv.onlineDDLExecutor, throttler: tsv.lagThrottler, tableGC: tsv.tableGC, + rw: newRequestsWaiter(), } tsv.exporter.NewGaugeFunc("TabletState", "Tablet server state", func() int64 { return int64(tsv.sm.State()) }) @@ -223,6 +221,8 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC tsv.registerHealthzHealthHandler() tsv.registerDebugHealthHandler() tsv.registerQueryzHandler() + tsv.registerQuerylogzHandler() + tsv.registerTxlogzHandler() tsv.registerQueryListHandlers([]*QueryList{tsv.statelessql, tsv.statefulql, tsv.olapql}) tsv.registerTwopczHandler() tsv.registerMigrationStatusHandler() @@ -238,11 +238,11 @@ func (tsv *TabletServer) loadQueryTimeout() time.Duration { // onlineDDLExecutorToggleTableBuffer is called by onlineDDLExecutor as a callback function. onlineDDLExecutor // uses it to start/stop query buffering for a given table. -// It is onlineDDLExecutor's responsibility to make sure beffering is stopped after some definite amount of time. +// It is onlineDDLExecutor's responsibility to make sure buffering is stopped after some definite amount of time. // There are two layers to buffering/unbuffering: // 1. the creation and destruction of a QueryRuleSource. The existence of such source affects query plan rules // for all new queries (see Execute() function and call to GetPlan()) -// 2. affecting already existing rules: a Rule has a concext.WithCancel, that is cancelled by onlineDDLExecutor +// 2. affecting already existing rules: a Rule has a context.WithCancel, that is cancelled by onlineDDLExecutor func (tsv *TabletServer) onlineDDLExecutorToggleTableBuffer(bufferingCtx context.Context, tableName string, timeout time.Duration, bufferQueries bool) { queryRuleSource := fmt.Sprintf("onlineddl/%s", tableName) @@ -301,6 +301,11 @@ func (tsv *TabletServer) Stats() *tabletenv.Stats { return tsv.stats } +// Environment satisfies tabletenv.Env. +func (tsv *TabletServer) Environment() *vtenv.Environment { + return tsv.env +} + // LogError satisfies tabletenv.Env. func (tsv *TabletServer) LogError() { if x := recover(); x != nil { @@ -426,7 +431,7 @@ func (tsv *TabletServer) ReloadSchema(ctx context.Context) error { // changes to finish being applied. func (tsv *TabletServer) WaitForSchemaReset(timeout time.Duration) { onSchemaChange := make(chan struct{}, 1) - tsv.se.RegisterNotifier("_tsv_wait", func(_ map[string]*schema.Table, _, _, _ []*schema.Table) { + tsv.se.RegisterNotifier("_tsv_wait", func(_ map[string]*schema.Table, _, _, _ []*schema.Table, _ bool) { onSchemaChange <- struct{}{} }, true) defer tsv.se.UnregisterNotifier("_tsv_wait") @@ -516,7 +521,11 @@ func (tsv *TabletServer) begin(ctx context.Context, target *querypb.Target, save logStats.OriginalSQL = beginSQL if beginSQL != "" { tsv.stats.QueryTimings.Record("BEGIN", startTime) - tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), startTime) + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } + tsv.stats.QueryTimingsByTabletType.Record(targetType.String(), startTime) } else { logStats.Method = "" } @@ -550,6 +559,24 @@ func (tsv *TabletServer) getPriorityFromOptions(options *querypb.ExecuteOptions) return optionsPriority } +// resolveTargetType returns the appropriate target tablet type for a +// TabletServer request. If the caller has a local context then it's +// an internal request and the target is the local tablet's current +// target. If it's not a local context then there should always be a +// non-nil target specified. +func (tsv *TabletServer) resolveTargetType(ctx context.Context, target *querypb.Target) (topodatapb.TabletType, error) { + if target != nil { + return target.TabletType, nil + } + if !tabletenv.IsLocalContext(ctx) { + return topodatapb.TabletType_UNKNOWN, ErrNoTarget + } + if tsv.sm.Target() == nil { + return topodatapb.TabletType_UNKNOWN, nil // This is true, and does not block the request + } + return tsv.sm.Target().TabletType, nil +} + // Commit commits the specified transaction. func (tsv *TabletServer) Commit(ctx context.Context, target *querypb.Target, transactionID int64) (newReservedID int64, err error) { err = tsv.execRequest( @@ -572,7 +599,11 @@ func (tsv *TabletServer) Commit(ctx context.Context, target *querypb.Target, tra // handlePanicAndSendLogStats doesn't log the no-op. if commitSQL != "" { tsv.stats.QueryTimings.Record("COMMIT", startTime) - tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), startTime) + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } + tsv.stats.QueryTimingsByTabletType.Record(targetType.String(), startTime) } else { logStats.Method = "" } @@ -590,7 +621,11 @@ func (tsv *TabletServer) Rollback(ctx context.Context, target *querypb.Target, t target, nil, true, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("ROLLBACK", time.Now()) - defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } + defer tsv.stats.QueryTimingsByTabletType.Record(targetType.String(), time.Now()) logStats.TransactionID = transactionID newReservedID, err = tsv.te.Rollback(ctx, transactionID) if newReservedID > 0 { @@ -801,18 +836,22 @@ func (tsv *TabletServer) execute(ctx context.Context, target *querypb.Target, sq return err } } + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } qre := &QueryExecutor{ - query: query, - marginComments: comments, - bindVars: bindVariables, - connID: connID, - options: options, - plan: plan, - ctx: ctx, - logStats: logStats, - tsv: tsv, - tabletType: target.GetTabletType(), - setting: connSetting, + query: query, + marginComments: comments, + bindVars: bindVariables, + connID: connID, + options: options, + plan: plan, + ctx: ctx, + logStats: logStats, + tsv: tsv, + targetTabletType: targetType, + setting: connSetting, } result, err = qre.Execute() if err != nil { @@ -904,16 +943,17 @@ func (tsv *TabletServer) streamExecute(ctx context.Context, target *querypb.Targ } } qre := &QueryExecutor{ - query: query, - marginComments: comments, - bindVars: bindVariables, - connID: connID, - options: options, - plan: plan, - ctx: ctx, - logStats: logStats, - tsv: tsv, - setting: connSetting, + query: query, + marginComments: comments, + bindVars: bindVariables, + connID: connID, + options: options, + plan: plan, + ctx: ctx, + logStats: logStats, + tsv: tsv, + targetTabletType: target.GetTabletType(), + setting: connSetting, } return qre.Stream(callback) }, @@ -1204,7 +1244,11 @@ func (tsv *TabletServer) ReserveBeginExecute(ctx context.Context, target *queryp target, options, false, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RESERVE", time.Now()) - defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } + defer tsv.stats.QueryTimingsByTabletType.Record(targetType.String(), time.Now()) connID, sessionStateChanges, err = tsv.te.ReserveBegin(ctx, options, preQueries, postBeginQueries) if err != nil { return err @@ -1250,7 +1294,11 @@ func (tsv *TabletServer) ReserveBeginStreamExecute( target, options, false, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RESERVE", time.Now()) - defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } + defer tsv.stats.QueryTimingsByTabletType.Record(targetType.String(), time.Now()) connID, sessionStateChanges, err = tsv.te.ReserveBegin(ctx, options, preQueries, postBeginQueries) if err != nil { return err @@ -1304,7 +1352,11 @@ func (tsv *TabletServer) ReserveExecute(ctx context.Context, target *querypb.Tar target, options, allowOnShutdown, func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RESERVE", time.Now()) - defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } + defer tsv.stats.QueryTimingsByTabletType.Record(targetType.String(), time.Now()) state.ReservedID, err = tsv.te.Reserve(ctx, options, transactionID, preQueries) if err != nil { return err @@ -1355,7 +1407,11 @@ func (tsv *TabletServer) ReserveStreamExecute( target, options, allowOnShutdown, func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RESERVE", time.Now()) - defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } + defer tsv.stats.QueryTimingsByTabletType.Record(targetType.String(), time.Now()) state.ReservedID, err = tsv.te.Reserve(ctx, options, transactionID, preQueries) if err != nil { return err @@ -1385,7 +1441,11 @@ func (tsv *TabletServer) Release(ctx context.Context, target *querypb.Target, tr target, nil, true, /* allowOnShutdown */ func(ctx context.Context, logStats *tabletenv.LogStats) error { defer tsv.stats.QueryTimings.Record("RELEASE", time.Now()) - defer tsv.stats.QueryTimingsByTabletType.Record(target.TabletType.String(), time.Now()) + targetType, err := tsv.resolveTargetType(ctx, target) + if err != nil { + return err + } + defer tsv.stats.QueryTimingsByTabletType.Record(targetType.String(), time.Now()) logStats.TransactionID = transactionID logStats.ReservedID = reservedID if reservedID != 0 { @@ -1393,7 +1453,7 @@ func (tsv *TabletServer) Release(ctx context.Context, target *querypb.Target, tr return tsv.te.Release(reservedID) } // Rollback to cleanup the transaction before returning to the pool. - _, err := tsv.te.Rollback(ctx, transactionID) + _, err = tsv.te.Rollback(ctx, transactionID) return err }, ) @@ -1469,6 +1529,7 @@ func (tsv *TabletServer) execRequest( span.Annotate("workload_name", options.WorkloadName) } trace.AnnotateSQL(span, sqlparser.Preview(sql)) + // With a tabletenv.LocalContext() the target will be nil. if target != nil { span.Annotate("cell", target.Cell) span.Annotate("shard", target.Shard) @@ -1512,13 +1573,13 @@ func (tsv *TabletServer) handlePanicAndSendLogStats( // not a concern. var messagef, logMessage, query, truncatedQuery string messagef = fmt.Sprintf("Uncaught panic for %%v:\n%v\n%s", x, tb.Stack(4) /* Skip the last 4 boiler-plate frames. */) - query = queryAsString(sql, bindVariables, tsv.TerseErrors, false) + query = queryAsString(sql, bindVariables, tsv.TerseErrors, false, tsv.env.Parser()) terr := vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "%s", fmt.Sprintf(messagef, query)) if tsv.TerseErrors == tsv.Config().SanitizeLogMessages { - truncatedQuery = queryAsString(sql, bindVariables, tsv.TerseErrors, true) + truncatedQuery = queryAsString(sql, bindVariables, tsv.TerseErrors, true, tsv.env.Parser()) logMessage = fmt.Sprintf(messagef, truncatedQuery) } else { - truncatedQuery = queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true) + truncatedQuery = queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.env.Parser()) logMessage = fmt.Sprintf(messagef, truncatedQuery) } log.Error(logMessage) @@ -1578,20 +1639,20 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin sqlState := sqlErr.SQLState() errnum := sqlErr.Number() if tsv.TerseErrors && errCode != vtrpcpb.Code_FAILED_PRECONDITION { - err = vterrors.Errorf(errCode, "(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.TerseErrors, false)) + err = vterrors.Errorf(errCode, "(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.TerseErrors, false, tsv.env.Parser())) if logMethod != nil { - message = fmt.Sprintf("(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.env.Parser())) } } else { - err = vterrors.Errorf(errCode, "%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, false, false)) + err = vterrors.Errorf(errCode, "%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, false, false, tsv.env.Parser())) if logMethod != nil { - message = fmt.Sprintf("%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.env.Parser())) } } } else { err = vterrors.Errorf(errCode, "%v%s", err.Error(), callerID) if logMethod != nil { - message = fmt.Sprintf("%v: %v", err, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("%v: %v", err, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.env.Parser())) } } @@ -1775,6 +1836,18 @@ func (tsv *TabletServer) registerQueryzHandler() { }) } +func (tsv *TabletServer) registerQuerylogzHandler() { + tsv.exporter.HandleFunc("/querylogz", func(w http.ResponseWriter, r *http.Request) { + ch := tabletenv.StatsLogger.Subscribe("querylogz") + defer tabletenv.StatsLogger.Unsubscribe(ch) + querylogzHandler(ch, w, r, tsv.env.Parser()) + }) +} + +func (tsv *TabletServer) registerTxlogzHandler() { + tsv.exporter.HandleFunc("/txlogz", txlogzHandler) +} + func (tsv *TabletServer) registerQueryListHandlers(queryLists []*QueryList) { tsv.exporter.HandleFunc("/livequeryz/", func(w http.ResponseWriter, r *http.Request) { livequeryzHandler(queryLists, w, r) @@ -1923,11 +1996,11 @@ func (tsv *TabletServer) EnableHistorian(enabled bool) { } // SetPoolSize changes the pool size to the specified value. -func (tsv *TabletServer) SetPoolSize(val int) { +func (tsv *TabletServer) SetPoolSize(ctx context.Context, val int) error { if val <= 0 { - return + return nil } - tsv.qe.conns.SetCapacity(int64(val)) + return tsv.qe.conns.SetCapacity(ctx, int64(val)) } // PoolSize returns the pool size. @@ -1936,8 +2009,8 @@ func (tsv *TabletServer) PoolSize() int { } // SetStreamPoolSize changes the pool size to the specified value. -func (tsv *TabletServer) SetStreamPoolSize(val int) { - tsv.qe.streamConns.SetCapacity(int64(val)) +func (tsv *TabletServer) SetStreamPoolSize(ctx context.Context, val int) error { + return tsv.qe.streamConns.SetCapacity(ctx, int64(val)) } // SetStreamConsolidationBlocking sets whether the stream consolidator should wait for slow clients @@ -1951,8 +2024,8 @@ func (tsv *TabletServer) StreamPoolSize() int { } // SetTxPoolSize changes the tx pool size to the specified value. -func (tsv *TabletServer) SetTxPoolSize(val int) { - tsv.te.txPool.scp.conns.SetCapacity(int64(val)) +func (tsv *TabletServer) SetTxPoolSize(ctx context.Context, val int) error { + return tsv.te.txPool.scp.conns.SetCapacity(ctx, int64(val)) } // TxPoolSize returns the tx pool size. @@ -2023,7 +2096,7 @@ func (tsv *TabletServer) ConsolidatorMode() string { // If sanitize is false it also includes the bind variables. // If truncateForLog is true, it truncates the sql query and the // bind variables. -func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, sanitize bool, truncateForLog bool) string { +func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, sanitize bool, truncateForLog bool, parser *sqlparser.Parser) string { // Add the bind vars unless this needs to be sanitized, e.g. for log messages bvBuf := &bytes.Buffer{} fmt.Fprintf(bvBuf, "BindVars: {") @@ -2047,7 +2120,7 @@ func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, s // Truncate the bind vars if necessary bv := bvBuf.String() - maxLen := sqlparser.GetTruncateErrLen() + maxLen := parser.GetTruncateErrLen() if truncateForLog && maxLen > 0 && len(bv) > maxLen { if maxLen <= 12 { bv = sqlparser.TruncationText @@ -2058,7 +2131,7 @@ func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, s // Truncate the sql query if necessary if truncateForLog { - sql = sqlparser.TruncateForLog(sql) + sql = parser.TruncateForLog(sql) } // sql is the normalized query without the bind vars diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index d2fb10e5a77..92bfa25650a 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -30,10 +30,12 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/sidecardb" - + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/test/utils" @@ -151,6 +153,10 @@ func TestTabletServerPrimaryToReplica(t *testing.T) { defer cancel() // Reuse code from tx_executor_test. _, tsv, db := newTestTxExecutor(t, ctx) + // This is required because the test is verifying that we rollback transactions on changing serving type, + // but that only happens immediately if the shut down grace period is not specified. + tsv.te.shutdownGracePeriod = 0 + tsv.sm.shutdownGracePeriod = 0 defer tsv.StopService() defer db.Close() target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} @@ -178,7 +184,7 @@ func TestTabletServerPrimaryToReplica(t *testing.T) { select { case <-ch: t.Fatal("ch should not fire") - case <-time.After(10 * time.Millisecond): + case <-time.After(100 * time.Millisecond): } require.EqualValues(t, 1, tsv.te.txPool.scp.active.Size(), "tsv.te.txPool.scp.active.Size()") @@ -441,9 +447,9 @@ func TestTabletServerConcludeTransaction(t *testing.T) { func TestTabletServerBeginFail(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TxPool.Size = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + cfg := tabletenv.NewDefaultConfig() + cfg.TxPool.Size = 1 + db, tsv := setupTabletServerTestCustom(t, ctx, cfg, "", vtenv.NewTestEnv()) defer tsv.StopService() defer db.Close() @@ -491,7 +497,7 @@ func TestTabletServerCommiRollbacktFail(t *testing.T) { target := querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} _, err := tsv.Commit(ctx, &target, -1) - want := "transaction -1: not found" + want := "transaction -1: not found (potential transaction timeout)" require.Equal(t, want, err.Error()) _, err = tsv.Rollback(ctx, &target, -1) require.Equal(t, want, err.Error()) @@ -563,6 +569,78 @@ func TestTabletServerCommitPrepared(t *testing.T) { require.NoError(t, err) } +// TestTabletServerWithNilTarget confirms that a nil target is +// handled correctly. This means that when a local context is +// used, the target type is inferred from the local tablet's +// latest target type. +// And if it's not a local context then we return an error. +func TestTabletServerWithNilTarget(t *testing.T) { + // A non-nil target is required when not using a local context. + ctx := tabletenv.LocalContext() + db, tsv := setupTabletServerTest(t, ctx, "") + defer tsv.StopService() + defer db.Close() + + // With a nil target, the local tablet's latest target type is + // what should be used as the inferred target type for our local + // calls. + target := (*querypb.Target)(nil) + localTargetType := topodatapb.TabletType_RDONLY // Use a non-default type + err := tsv.SetServingType(localTargetType, time.Now(), true, "test") + require.NoError(t, err) + + baseKey := "TabletServerTest" // Our TabletServer's name + fullKey := fmt.Sprintf("%s.%s", baseKey, localTargetType.String()) + + executeSQL := "select * from test_table limit 1000" + executeSQLResult := &sqltypes.Result{ + Fields: []*querypb.Field{ + {Type: sqltypes.VarBinary}, + }, + Rows: [][]sqltypes.Value{ + {sqltypes.NewVarBinary("row01")}, + }, + } + // BEGIN gets transmuted to this since it's a RDONLY tablet. + db.AddQuery("start transaction read only", &sqltypes.Result{}) + db.AddQuery(executeSQL, executeSQLResult) + + expectedCount := tsv.stats.QueryTimingsByTabletType.Counts()[fullKey] + + state, err := tsv.Begin(ctx, target, nil) + require.NoError(t, err) + expectedCount++ + require.Equal(t, expectedCount, tsv.stats.QueryTimingsByTabletType.Counts()[fullKey]) + + _, err = tsv.Execute(ctx, target, executeSQL, nil, state.TransactionID, 0, nil) + require.NoError(t, err) + expectedCount++ + require.Equal(t, expectedCount, tsv.stats.QueryTimingsByTabletType.Counts()[fullKey]) + + _, err = tsv.Rollback(ctx, target, state.TransactionID) + require.NoError(t, err) + expectedCount++ + require.Equal(t, expectedCount, tsv.stats.QueryTimingsByTabletType.Counts()[fullKey]) + + state, err = tsv.Begin(ctx, target, nil) + require.NoError(t, err) + expectedCount++ + require.Equal(t, expectedCount, tsv.stats.QueryTimingsByTabletType.Counts()[fullKey]) + + _, err = tsv.Commit(ctx, target, state.TransactionID) + require.NoError(t, err) + expectedCount++ + require.Equal(t, expectedCount, tsv.stats.QueryTimingsByTabletType.Counts()[fullKey]) + + // Finally be sure that we return an error now as expected when NOT + // using a local context but passing a nil target. + nonLocalCtx := context.Background() + _, err = tsv.Begin(nonLocalCtx, target, nil) + require.True(t, errors.Is(err, ErrNoTarget)) + _, err = tsv.resolveTargetType(nonLocalCtx, target) + require.True(t, errors.Is(err, ErrNoTarget)) +} + func TestSmallerTimeout(t *testing.T) { testcases := []struct { t1, t2, want time.Duration @@ -874,12 +952,12 @@ func TestSerializeTransactionsSameRow(t *testing.T) { // The actual execution looks like this: // tx1 | tx3 // tx2 - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.Mode = tabletenv.Enable - config.HotRowProtection.MaxConcurrency = 1 + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.Mode = tabletenv.Enable + cfg.HotRowProtection.MaxConcurrency = 1 // Reduce the txpool to 2 because we should never consume more than two slots. - config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + cfg.TxPool.Size = 2 + db, tsv := setupTabletServerTestCustom(t, ctx, cfg, "", vtenv.NewTestEnv()) defer tsv.StopService() defer db.Close() @@ -982,11 +1060,11 @@ func TestSerializeTransactionsSameRow(t *testing.T) { func TestDMLQueryWithoutWhereClause(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.Mode = tabletenv.Enable - config.HotRowProtection.MaxConcurrency = 1 - config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.Mode = tabletenv.Enable + cfg.HotRowProtection.MaxConcurrency = 1 + cfg.TxPool.Size = 2 + db, tsv := setupTabletServerTestCustom(t, ctx, cfg, "", vtenv.NewTestEnv()) defer tsv.StopService() defer db.Close() @@ -1009,12 +1087,12 @@ func TestSerializeTransactionsSameRow_ConcurrentTransactions(t *testing.T) { // Out of these three, two can run in parallel because we increased the // ConcurrentTransactions limit to 2. // One out of the three transaction will always get serialized though. - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.Mode = tabletenv.Enable - config.HotRowProtection.MaxConcurrency = 2 + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.Mode = tabletenv.Enable + cfg.HotRowProtection.MaxConcurrency = 2 // Reduce the txpool to 2 because we should never consume more than two slots. - config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + cfg.TxPool.Size = 2 + db, tsv := setupTabletServerTestCustom(t, ctx, cfg, "", vtenv.NewTestEnv()) defer tsv.StopService() defer db.Close() @@ -1044,6 +1122,20 @@ func TestSerializeTransactionsSameRow_ConcurrentTransactions(t *testing.T) { db.SetBeforeFunc("update test_table set name_string = 'tx1' where pk = 1 and `name` = 1 limit 10001", func() { close(tx1Started) + + // Wait for other queries to be pending. + <-allQueriesPending + }) + + db.SetBeforeFunc("update test_table set name_string = 'tx2' where pk = 1 and `name` = 1 limit 10001", + func() { + // Wait for other queries to be pending. + <-allQueriesPending + }) + + db.SetBeforeFunc("update test_table set name_string = 'tx3' where pk = 1 and `name` = 1 limit 10001", + func() { + // Wait for other queries to be pending. <-allQueriesPending }) @@ -1112,6 +1204,8 @@ func TestSerializeTransactionsSameRow_ConcurrentTransactions(t *testing.T) { // to allow more than connection attempt at a time. err := waitForTxSerializationPendingQueries(tsv, "test_table where pk = 1 and `name` = 1", 3) require.NoError(t, err) + + // Signal that all queries are pending now. close(allQueriesPending) wg.Wait() @@ -1146,11 +1240,11 @@ func TestSerializeTransactionsSameRow_TooManyPendingRequests(t *testing.T) { // serialized. // Since we start to queue before the transaction pool would queue, we need // to enforce an upper limit as well to protect vttablet. - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.Mode = tabletenv.Enable - config.HotRowProtection.MaxQueueSize = 1 - config.HotRowProtection.MaxConcurrency = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.Mode = tabletenv.Enable + cfg.HotRowProtection.MaxQueueSize = 1 + cfg.HotRowProtection.MaxConcurrency = 1 + db, tsv := setupTabletServerTestCustom(t, ctx, cfg, "", vtenv.NewTestEnv()) defer tsv.StopService() defer db.Close() @@ -1230,10 +1324,10 @@ func TestSerializeTransactionsSameRow_RequestCanceled(t *testing.T) { // tx1 and tx2 run against the same row. // tx2 is blocked on tx1. Eventually, tx2 is canceled and its request fails. // Only after that tx1 commits and finishes. - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.Mode = tabletenv.Enable - config.HotRowProtection.MaxConcurrency = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.Mode = tabletenv.Enable + cfg.HotRowProtection.MaxConcurrency = 1 + db, tsv := setupTabletServerTestCustom(t, ctx, cfg, "", vtenv.NewTestEnv()) defer tsv.StopService() defer db.Close() @@ -1486,12 +1580,61 @@ func TestHandleExecUnknownError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() logStats := tabletenv.NewLogStats(ctx, "TestHandleExecError") - config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) defer tsv.handlePanicAndSendLogStats("select * from test_table", nil, logStats) panic("unknown exec error") } +// TestHandlePanicAndSendLogStatsMessageTruncation tests that when an error truncation +// length is set and a panic occurs, the code in handlePanicAndSendLogStats will +// truncate the error text in logs, but will not truncate the error text in the +// error value. +func TestHandlePanicAndSendLogStatsMessageTruncation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tl := newTestLogger() + defer tl.Close() + logStats := tabletenv.NewLogStats(ctx, "TestHandlePanicAndSendLogStatsMessageTruncation") + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 32, + }) + require.NoError(t, err) + + db, tsv := setupTabletServerTestCustom(t, ctx, tabletenv.NewDefaultConfig(), "", env) + defer tsv.StopService() + defer db.Close() + + longSql := "select * from test_table_loooooooooooooooooooooooooooooooooooong" + longBv := map[string]*querypb.BindVariable{ + "bv1": sqltypes.Int64BindVariable(1111111111), + "bv2": sqltypes.Int64BindVariable(2222222222), + "bv3": sqltypes.Int64BindVariable(3333333333), + "bv4": sqltypes.Int64BindVariable(4444444444), + } + + defer func() { + err := logStats.Error + want := "Uncaught panic for Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {bv1: \"type:INT64 value:\\\"1111111111\\\"\"bv2: \"type:INT64 value:\\\"2222222222\\\"\"bv3: \"type:INT64 value:\\\"3333333333\\\"\"bv4: \"type:INT64 value:\\\"4444444444\\\"\"}" + require.Error(t, err) + assert.Contains(t, err.Error(), want) + want = "Uncaught panic for Sql: \"select * from test_t [TRUNCATED]\", BindVars: {bv1: \"typ [TRUNCATED]" + gotWhatWeWant := false + for _, log := range tl.getLogs() { + if strings.HasPrefix(log, want) { + gotWhatWeWant = true + break + } + } + assert.True(t, gotWhatWeWant) + }() + + defer tsv.handlePanicAndSendLogStats(longSql, longBv, logStats) + panic("panic from TestHandlePanicAndSendLogStatsMessageTruncation") +} + func TestQueryAsString(t *testing.T) { longSql := "select * from test_table_loooooooooooooooooooooooooooooooooooong" longBv := map[string]*querypb.BindVariable{ @@ -1500,23 +1643,25 @@ func TestQueryAsString(t *testing.T) { "bv3": sqltypes.Int64BindVariable(3333333333), "bv4": sqltypes.Int64BindVariable(4444444444), } - origTruncateErrLen := sqlparser.GetTruncateErrLen() - sqlparser.SetTruncateErrLen(32) - defer sqlparser.SetTruncateErrLen(origTruncateErrLen) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 32, + }) + require.NoError(t, err) - query := queryAsString(longSql, longBv, true, true) + query := queryAsString(longSql, longBv, true, true, parser) want := "Sql: \"select * from test_t [TRUNCATED]\", BindVars: {[REDACTED]}" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, true, false) + query = queryAsString(longSql, longBv, true, false, parser) want = "Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {[REDACTED]}" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, false, true) + query = queryAsString(longSql, longBv, false, true, parser) want = "Sql: \"select * from test_t [TRUNCATED]\", BindVars: {bv1: \"typ [TRUNCATED]" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, false, false) + query = queryAsString(longSql, longBv, false, false, parser) want = "Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {bv1: \"type:INT64 value:\\\"1111111111\\\"\"bv2: \"type:INT64 value:\\\"2222222222\\\"\"bv3: \"type:INT64 value:\\\"3333333333\\\"\"bv4: \"type:INT64 value:\\\"4444444444\\\"\"}" assert.Equal(t, want, query) } @@ -1606,8 +1751,9 @@ func (tl *testLogger) getLogs() []string { func TestHandleExecTabletError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1629,10 +1775,11 @@ func TestHandleExecTabletError(t *testing.T) { func TestTerseErrors(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = true - config.SanitizeLogMessages = false - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = true + cfg.SanitizeLogMessages = false + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() @@ -1663,10 +1810,11 @@ func TestTerseErrors(t *testing.T) { func TestSanitizeLogMessages(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = false - config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = false + cfg.SanitizeLogMessages = true + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() @@ -1697,9 +1845,10 @@ func TestSanitizeLogMessages(t *testing.T) { func TestTerseErrorsNonSQLError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = true + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1721,10 +1870,11 @@ func TestTerseErrorsNonSQLError(t *testing.T) { func TestSanitizeLogMessagesNonSQLError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = false - config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = false + cfg.SanitizeLogMessages = true + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1746,10 +1896,11 @@ func TestSanitizeLogMessagesNonSQLError(t *testing.T) { func TestSanitizeMessagesBindVars(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = true - config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = true + cfg.SanitizeLogMessages = true + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() @@ -1777,10 +1928,11 @@ func TestSanitizeMessagesBindVars(t *testing.T) { func TestSanitizeMessagesNoBindVars(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = true - config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = true + cfg.SanitizeLogMessages = true + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError(ctx, "", nil, vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "sensitive message"), nil) @@ -1796,9 +1948,10 @@ func TestSanitizeMessagesNoBindVars(t *testing.T) { func TestTruncateErrorLen(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TruncateErrorLen = 32 - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + cfg.TruncateErrorLen = 32 + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1820,19 +1973,24 @@ func TestTruncateErrorLen(t *testing.T) { func TestTruncateMessages(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = false + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = false // Sanitize the log messages, which means that the bind vars are omitted - config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg.SanitizeLogMessages = true + env, err := vtenv.New(vtenv.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 52, + }) + require.NoError(t, err) + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, env, "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() - sqlparser.SetTruncateErrLen(52) sql := "select * from test_table where xyz = :vtg1 order by abc desc" sqlErr := sqlerror.NewSQLError(10, "HY000", "sensitive message") sqlErr.Query = "select * from test_table where xyz = 'this is kinda long eh'" - err := tsv.convertAndLogError( + err = tsv.convertAndLogError( ctx, sql, map[string]*querypb.BindVariable{"vtg1": sqltypes.StringBindVariable("this is kinda long eh")}, @@ -1852,7 +2010,7 @@ func TestTruncateMessages(t *testing.T) { t.Errorf("log got '%s', want '%s'", tl.getLog(0), wantLog) } - sqlparser.SetTruncateErrLen(140) + env.Parser().SetTruncateErrLen(140) err = tsv.convertAndLogError( ctx, sql, @@ -1872,15 +2030,15 @@ func TestTruncateMessages(t *testing.T) { if wantLog != tl.getLog(1) { t.Errorf("log got '%s', want '%s'", tl.getLog(1), wantLog) } - sqlparser.SetTruncateErrLen(0) } func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = true + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError(ctx, "select * from test_table where id = :a", @@ -1921,8 +2079,9 @@ func TestACLHUP(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() tableacl.Register("simpleacl", &simpleacl.Factory{}) - config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg := tabletenv.NewDefaultConfig() + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, vtenv.NewTestEnv(), "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) f, err := os.CreateTemp("", "tableacl") require.NoError(t, err) @@ -1974,7 +2133,9 @@ func TestConfigChanges(t *testing.T) { newSize := 10 newDuration := time.Duration(10 * time.Millisecond) - tsv.SetPoolSize(newSize) + err := tsv.SetPoolSize(context.Background(), newSize) + require.NoError(t, err) + if val := tsv.PoolSize(); val != newSize { t.Errorf("PoolSize: %d, want %d", val, newSize) } @@ -1982,7 +2143,9 @@ func TestConfigChanges(t *testing.T) { t.Errorf("tsv.qe.connPool.Capacity: %d, want %d", val, newSize) } - tsv.SetStreamPoolSize(newSize) + err = tsv.SetStreamPoolSize(context.Background(), newSize) + require.NoError(t, err) + if val := tsv.StreamPoolSize(); val != newSize { t.Errorf("StreamPoolSize: %d, want %d", val, newSize) } @@ -1990,7 +2153,9 @@ func TestConfigChanges(t *testing.T) { t.Errorf("tsv.qe.streamConnPool.Capacity: %d, want %d", val, newSize) } - tsv.SetTxPoolSize(newSize) + err = tsv.SetTxPoolSize(context.Background(), newSize) + require.NoError(t, err) + if val := tsv.TxPoolSize(); val != newSize { t.Errorf("TxPoolSize: %d, want %d", val, newSize) } @@ -2431,14 +2596,15 @@ func TestDatabaseNameReplaceByKeyspaceNameReserveBeginExecuteMethod(t *testing.T } func setupTabletServerTest(t testing.TB, ctx context.Context, keyspaceName string) (*fakesqldb.DB, *TabletServer) { - config := tabletenv.NewDefaultConfig() - return setupTabletServerTestCustom(t, ctx, config, keyspaceName) + cfg := tabletenv.NewDefaultConfig() + return setupTabletServerTestCustom(t, ctx, cfg, keyspaceName, vtenv.NewTestEnv()) } -func setupTabletServerTestCustom(t testing.TB, ctx context.Context, config *tabletenv.TabletConfig, keyspaceName string) (*fakesqldb.DB, *TabletServer) { +func setupTabletServerTestCustom(t testing.TB, ctx context.Context, cfg *tabletenv.TabletConfig, keyspaceName string, env *vtenv.Environment) (*fakesqldb.DB, *TabletServer) { db := setupFakeDB(t) - sidecardb.AddSchemaInitQueries(db, true) - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + sidecardb.AddSchemaInitQueries(db, true, env.Parser()) + srvTopoCounts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + tsv := NewTabletServer(ctx, env, "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, srvTopoCounts) require.Equal(t, StateNotConnected, tsv.sm.State()) dbcfgs := newDBConfigs(db) target := &querypb.Target{ @@ -2585,7 +2751,8 @@ func addTabletServerSupportedQueries(db *fakesqldb.DB) { "rollback": {}, fmt.Sprintf(sqlReadAllRedo, "_vt", "_vt"): {}, } - sidecardb.AddSchemaInitQueries(db, true) + parser := sqlparser.NewTestParser() + sidecardb.AddSchemaInitQueries(db, true, parser) for query, result := range queryResultMap { db.AddQuery(query, result) } diff --git a/go/vt/vttablet/tabletserver/testutils_test.go b/go/vt/vttablet/tabletserver/testutils_test.go index 4760558f6ec..464e84ab47f 100644 --- a/go/vt/vttablet/tabletserver/testutils_test.go +++ b/go/vt/vttablet/tabletserver/testutils_test.go @@ -30,7 +30,7 @@ import ( var errRejected = errors.New("rejected") func newDBConfigs(db *fakesqldb.DB) *dbconfigs.DBConfigs { - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params return dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") } diff --git a/go/vt/vttablet/tabletserver/throttle/base/http.go b/go/vt/vttablet/tabletserver/throttle/base/http.go index 6f657766ad1..bbf4662d6cf 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/http.go +++ b/go/vt/vttablet/tabletserver/throttle/base/http.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package base diff --git a/go/vt/vttablet/tabletserver/throttle/base/metric_health.go b/go/vt/vttablet/tabletserver/throttle/base/metric_health.go index e970888bf13..458e8e28264 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/metric_health.go +++ b/go/vt/vttablet/tabletserver/throttle/base/metric_health.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package base diff --git a/go/vt/vttablet/tabletserver/throttle/base/metric_health_test.go b/go/vt/vttablet/tabletserver/throttle/base/metric_health_test.go index d11ecd7b8e5..a1a1ad4e0c0 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/metric_health_test.go +++ b/go/vt/vttablet/tabletserver/throttle/base/metric_health_test.go @@ -1,9 +1,43 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ package base import ( diff --git a/go/vt/vttablet/tabletserver/throttle/base/recent_app.go b/go/vt/vttablet/tabletserver/throttle/base/recent_app.go index 2c629fbff25..64527c4cc1c 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/recent_app.go +++ b/go/vt/vttablet/tabletserver/throttle/base/recent_app.go @@ -1,9 +1,43 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ package base import ( diff --git a/go/vt/vttablet/tabletserver/throttle/base/throttle_metric.go b/go/vt/vttablet/tabletserver/throttle/base/throttle_metric.go index ff6e1b146d9..3d4c4f95a2e 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/throttle_metric.go +++ b/go/vt/vttablet/tabletserver/throttle/base/throttle_metric.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package base @@ -30,7 +65,7 @@ var ErrNoSuchMetric = errors.New("No such metric") // ErrInvalidCheckType is an internal error indicating an unknown check type var ErrInvalidCheckType = errors.New("Unknown throttler check type") -// IsDialTCPError sees if th egiven error indicates a TCP issue +// IsDialTCPError sees if the given error indicates a TCP issue func IsDialTCPError(e error) bool { if e == nil { return false diff --git a/go/vt/vttablet/tabletserver/throttle/base/throttle_metric_app.go b/go/vt/vttablet/tabletserver/throttle/base/throttle_metric_app.go index ce77f7068b6..482f319365f 100644 --- a/go/vt/vttablet/tabletserver/throttle/base/throttle_metric_app.go +++ b/go/vt/vttablet/tabletserver/throttle/base/throttle_metric_app.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package base diff --git a/go/vt/vttablet/tabletserver/throttle/check.go b/go/vt/vttablet/tabletserver/throttle/check.go index dd209a0c423..98d887e8342 100644 --- a/go/vt/vttablet/tabletserver/throttle/check.go +++ b/go/vt/vttablet/tabletserver/throttle/check.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package throttle @@ -11,7 +46,6 @@ import ( "fmt" "net/http" "strings" - "sync/atomic" "time" "vitess.io/vitess/go/stats" @@ -24,6 +58,11 @@ const ( selfCheckInterval = 250 * time.Millisecond ) +var ( + statsThrottlerCheckAnyTotal = stats.NewCounter("ThrottlerCheckAnyTotal", "total number of checks") + statsThrottlerCheckAnyError = stats.GetOrNewCounter("ThrottlerCheckAnyError", "total number of failed checks") +) + // CheckFlags provide hints for a check type CheckFlags struct { ReadCheck bool @@ -114,20 +153,18 @@ func (check *ThrottlerCheck) Check(ctx context.Context, appName string, storeTyp } checkResult = check.checkAppMetricResult(ctx, appName, storeType, storeName, metricResultFunc, flags) - atomic.StoreInt64(&check.throttler.lastCheckTimeNano, time.Now().UnixNano()) - - go func(statusCode int) { - stats.GetOrNewCounter("ThrottlerCheckAnyTotal", "total number of checks").Add(1) - stats.GetOrNewCounter(fmt.Sprintf("ThrottlerCheckAny%s%sTotal", textutil.SingleWordCamel(storeType), textutil.SingleWordCamel(storeName)), "").Add(1) - - if statusCode != http.StatusOK { - stats.GetOrNewCounter("ThrottlerCheckAnyError", "total number of failed checks").Add(1) - stats.GetOrNewCounter(fmt.Sprintf("ThrottlerCheckAny%s%sError", textutil.SingleWordCamel(storeType), textutil.SingleWordCamel(storeName)), "").Add(1) - } - - check.throttler.markRecentApp(appName, remoteAddr) - }(checkResult.StatusCode) - + check.throttler.markRecentApp(appName, remoteAddr) + if !throttlerapp.VitessName.Equals(appName) { + go func(statusCode int) { + statsThrottlerCheckAnyTotal.Add(1) + stats.GetOrNewCounter(fmt.Sprintf("ThrottlerCheckAny%s%sTotal", textutil.SingleWordCamel(storeType), textutil.SingleWordCamel(storeName)), "").Add(1) + + if statusCode != http.StatusOK { + statsThrottlerCheckAnyError.Add(1) + stats.GetOrNewCounter(fmt.Sprintf("ThrottlerCheckAny%s%sError", textutil.SingleWordCamel(storeType), textutil.SingleWordCamel(storeName)), "").Add(1) + } + }(checkResult.StatusCode) + } return checkResult } @@ -193,6 +230,7 @@ func (check *ThrottlerCheck) SelfChecks(ctx context.Context) { for metricName, metricResult := range check.AggregatedMetrics(ctx) { metricName := metricName metricResult := metricResult + go check.localCheck(ctx, metricName) go check.reportAggregated(metricName, metricResult) } diff --git a/go/vt/vttablet/tabletserver/throttle/check_result.go b/go/vt/vttablet/tabletserver/throttle/check_result.go index 3bc162b623a..41a1b240934 100644 --- a/go/vt/vttablet/tabletserver/throttle/check_result.go +++ b/go/vt/vttablet/tabletserver/throttle/check_result.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package throttle diff --git a/go/vt/vttablet/tabletserver/throttle/client.go b/go/vt/vttablet/tabletserver/throttle/client.go index 41888340b5a..546d75c040d 100644 --- a/go/vt/vttablet/tabletserver/throttle/client.go +++ b/go/vt/vttablet/tabletserver/throttle/client.go @@ -86,7 +86,7 @@ func NewBackgroundClient(throttler *Throttler, appName throttlerapp.Name, checkT // ThrottleCheckOK checks the throttler, and returns 'true' when the throttler is satisfied. // It does not sleep. // The function caches results for a brief amount of time, hence it's safe and efficient to -// be called very frequenty. +// be called very frequently. // The function is not thread safe. func (c *Client) ThrottleCheckOK(ctx context.Context, overrideAppName throttlerapp.Name) (throttleCheckOK bool) { if c == nil { @@ -117,7 +117,7 @@ func (c *Client) ThrottleCheckOK(ctx context.Context, overrideAppName throttlera } -// ThrottleCheckOKOrWait checks the throttler; if throttler is satisfied, the function returns 'true' mmediately, +// ThrottleCheckOKOrWait checks the throttler; if throttler is satisfied, the function returns 'true' immediately, // otherwise it briefly sleeps and returns 'false'. // Non-empty appName overrides the default appName. // The function is not thread safe. @@ -129,7 +129,7 @@ func (c *Client) ThrottleCheckOKOrWaitAppName(ctx context.Context, appName throt return ok } -// ThrottleCheckOKOrWait checks the throttler; if throttler is satisfied, the function returns 'true' mmediately, +// ThrottleCheckOKOrWait checks the throttler; if throttler is satisfied, the function returns 'true' immediately, // otherwise it briefly sleeps and returns 'false'. // The function is not thread safe. func (c *Client) ThrottleCheckOKOrWait(ctx context.Context) bool { diff --git a/go/vt/vttablet/tabletserver/throttle/config/config.go b/go/vt/vttablet/tabletserver/throttle/config/config.go index b1f3ad61f80..f6234955cc4 100644 --- a/go/vt/vttablet/tabletserver/throttle/config/config.go +++ b/go/vt/vttablet/tabletserver/throttle/config/config.go @@ -1,17 +1,49 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ -package config +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub -// Instance is the one configuration for the throttler -var Instance = &ConfigurationSettings{} + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ + +package config -// Settings returns the settings of the global instance of Configuration -func Settings() *ConfigurationSettings { - return Instance +// NewConfigurationSettings creates new throttler configuration settings. +func NewConfigurationSettings() *ConfigurationSettings { + return &ConfigurationSettings{} } // ConfigurationSettings models a set of configurable values, that can be diff --git a/go/vt/vttablet/tabletserver/throttle/config/mysql_config.go b/go/vt/vttablet/tabletserver/throttle/config/mysql_config.go index 3e3e82adff4..3aa0607fb28 100644 --- a/go/vt/vttablet/tabletserver/throttle/config/mysql_config.go +++ b/go/vt/vttablet/tabletserver/throttle/config/mysql_config.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package config diff --git a/go/vt/vttablet/tabletserver/throttle/config/store_config.go b/go/vt/vttablet/tabletserver/throttle/config/store_config.go index 9a19025df05..7e5594050d9 100644 --- a/go/vt/vttablet/tabletserver/throttle/config/store_config.go +++ b/go/vt/vttablet/tabletserver/throttle/config/store_config.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package config diff --git a/go/vt/vttablet/tabletserver/throttle/mysql.go b/go/vt/vttablet/tabletserver/throttle/mysql.go index 350ad465b73..81a967ddacb 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package throttle @@ -16,9 +51,9 @@ import ( func aggregateMySQLProbes( ctx context.Context, - probes *mysql.Probes, + probes mysql.Probes, clusterName string, - instanceResultsMap mysql.InstanceMetricResultMap, + tabletResultsMap mysql.TabletResultMap, ignoreHostsCount int, IgnoreDialTCPErrors bool, ignoreHostsThreshold float64, @@ -26,13 +61,13 @@ func aggregateMySQLProbes( // probes is known not to change. It can be *replaced*, but not changed. // so it's safe to iterate it probeValues := []float64{} - for _, probe := range *probes { - instanceMetricResult, ok := instanceResultsMap[mysql.GetClusterInstanceKey(clusterName, &probe.Key)] + for _, probe := range probes { + tabletMetricResult, ok := tabletResultsMap[mysql.GetClusterTablet(clusterName, probe.Alias)] if !ok { return base.NoMetricResultYet } - value, err := instanceMetricResult.Get() + value, err := tabletMetricResult.Get() if err != nil { if IgnoreDialTCPErrors && base.IsDialTCPError(err) { continue @@ -42,7 +77,7 @@ func aggregateMySQLProbes( ignoreHostsCount = ignoreHostsCount - 1 continue } - return instanceMetricResult + return tabletMetricResult } // No error diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/instance_key.go b/go/vt/vttablet/tabletserver/throttle/mysql/instance_key.go deleted file mode 100644 index adcd6f422fb..00000000000 --- a/go/vt/vttablet/tabletserver/throttle/mysql/instance_key.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - Copyright 2015 Shlomi Noach, courtesy Booking.com - See https://github.com/github/freno/blob/master/LICENSE -*/ - -package mysql - -import ( - "fmt" - "strconv" - "strings" -) - -// InstanceKey is an instance indicator, identified by hostname and port -type InstanceKey struct { - Hostname string - Port int -} - -// SelfInstanceKey is a special indicator for "this instance", e.g. denoting the MySQL server associated with local tablet -// The values of this key are immaterial and are intentionally descriptive -var SelfInstanceKey = &InstanceKey{Hostname: "(self)", Port: 1} - -// newRawInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306 -// It expects such format and returns with error if input differs in format -func newRawInstanceKey(hostPort string) (*InstanceKey, error) { - tokens := strings.SplitN(hostPort, ":", 2) - if len(tokens) != 2 { - return nil, fmt.Errorf("Cannot parse InstanceKey from %s. Expected format is host:port", hostPort) - } - instanceKey := &InstanceKey{Hostname: tokens[0]} - var err error - if instanceKey.Port, err = strconv.Atoi(tokens[1]); err != nil { - return instanceKey, fmt.Errorf("Invalid port: %s", tokens[1]) - } - - return instanceKey, nil -} - -// ParseInstanceKey will parse an InstanceKey from a string representation such as 127.0.0.1:3306 or some.hostname -// `defaultPort` is used if `hostPort` does not include a port. -func ParseInstanceKey(hostPort string, defaultPort int) (*InstanceKey, error) { - if !strings.Contains(hostPort, ":") { - return &InstanceKey{Hostname: hostPort, Port: defaultPort}, nil - } - return newRawInstanceKey(hostPort) -} - -// Equals tests equality between this key and another key -func (i *InstanceKey) Equals(other *InstanceKey) bool { - if other == nil { - return false - } - return i.Hostname == other.Hostname && i.Port == other.Port -} - -// SmallerThan returns true if this key is dictionary-smaller than another. -// This is used for consistent sorting/ordering; there's nothing magical about it. -func (i *InstanceKey) SmallerThan(other *InstanceKey) bool { - if i.Hostname < other.Hostname { - return true - } - if i.Hostname == other.Hostname && i.Port < other.Port { - return true - } - return false -} - -// IsValid uses simple heuristics to see whether this key represents an actual instance -func (i *InstanceKey) IsValid() bool { - if i.Hostname == "_" { - return false - } - return len(i.Hostname) > 0 && i.Port > 0 -} - -// IsSelf checks if this is the special "self" instance key -func (i *InstanceKey) IsSelf() bool { - if SelfInstanceKey == i { - return true - } - return SelfInstanceKey.Equals(i) -} - -// StringCode returns an official string representation of this key -func (i *InstanceKey) StringCode() string { - return fmt.Sprintf("%s:%d", i.Hostname, i.Port) -} - -// DisplayString returns a user-friendly string representation of this key -func (i *InstanceKey) DisplayString() string { - return i.StringCode() -} - -// String returns a user-friendly string representation of this key -func (i InstanceKey) String() string { - return i.StringCode() -} diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/instance_key_test.go b/go/vt/vttablet/tabletserver/throttle/mysql/instance_key_test.go deleted file mode 100644 index a8d3424c36a..00000000000 --- a/go/vt/vttablet/tabletserver/throttle/mysql/instance_key_test.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - Copyright 2017 GitHub Inc. - - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE -*/ - -package mysql - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewRawInstanceKey(t *testing.T) { - { - key, err := newRawInstanceKey("127.0.0.1:3307") - assert.NoError(t, err) - assert.Equal(t, key.Hostname, "127.0.0.1") - assert.Equal(t, key.Port, 3307) - } - { - _, err := newRawInstanceKey("127.0.0.1:abcd") - assert.Error(t, err) - } - { - _, err := newRawInstanceKey("127.0.0.1:") - assert.Error(t, err) - } - { - _, err := newRawInstanceKey("127.0.0.1") - assert.Error(t, err) - } -} - -func TestParseInstanceKey(t *testing.T) { - { - key, err := ParseInstanceKey("127.0.0.1:3307", 3306) - assert.NoError(t, err) - assert.Equal(t, "127.0.0.1", key.Hostname) - assert.Equal(t, 3307, key.Port) - } - { - key, err := ParseInstanceKey("127.0.0.1", 3306) - assert.NoError(t, err) - assert.Equal(t, "127.0.0.1", key.Hostname) - assert.Equal(t, 3306, key.Port) - } -} - -func TestEquals(t *testing.T) { - { - expect := &InstanceKey{Hostname: "127.0.0.1", Port: 3306} - key, err := ParseInstanceKey("127.0.0.1", 3306) - assert.NoError(t, err) - assert.True(t, key.Equals(expect)) - } -} - -func TestStringCode(t *testing.T) { - { - key := &InstanceKey{Hostname: "127.0.0.1", Port: 3306} - stringCode := key.StringCode() - assert.Equal(t, "127.0.0.1:3306", stringCode) - } -} diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/mysql_inventory.go b/go/vt/vttablet/tabletserver/throttle/mysql/mysql_inventory.go index ace9a2853a7..744bcc99a44 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql/mysql_inventory.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql/mysql_inventory.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package mysql @@ -10,35 +45,35 @@ import ( "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" ) -// ClusterInstanceKey combines a cluster name with an instance key -type ClusterInstanceKey struct { +// ClusterTablet combines a cluster name with a tablet alias +type ClusterTablet struct { ClusterName string - Key InstanceKey + Alias string } -// GetClusterInstanceKey creates a ClusterInstanceKey object -func GetClusterInstanceKey(clusterName string, key *InstanceKey) ClusterInstanceKey { - return ClusterInstanceKey{ClusterName: clusterName, Key: *key} +// GetClusterTablet creates a GetClusterTablet object +func GetClusterTablet(clusterName string, alias string) ClusterTablet { + return ClusterTablet{ClusterName: clusterName, Alias: alias} } -// InstanceMetricResultMap maps a cluster-instance to a result -type InstanceMetricResultMap map[ClusterInstanceKey]base.MetricResult +// TabletResultMap maps a cluster-tablet to a result +type TabletResultMap map[ClusterTablet]base.MetricResult // Inventory has the operational data about probes, their metrics, and relevant configuration type Inventory struct { - ClustersProbes map[string](*Probes) + ClustersProbes map[string](Probes) IgnoreHostsCount map[string]int IgnoreHostsThreshold map[string]float64 - InstanceKeyMetrics InstanceMetricResultMap + TabletMetrics TabletResultMap } // NewInventory creates a Inventory func NewInventory() *Inventory { inventory := &Inventory{ - ClustersProbes: make(map[string](*Probes)), + ClustersProbes: make(map[string](Probes)), IgnoreHostsCount: make(map[string]int), IgnoreHostsThreshold: make(map[string]float64), - InstanceKeyMetrics: make(map[ClusterInstanceKey]base.MetricResult), + TabletMetrics: make(map[ClusterTablet]base.MetricResult), } return inventory } diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/mysql_throttle_metric.go b/go/vt/vttablet/tabletserver/throttle/mysql/mysql_throttle_metric.go index 8c8a5cc4b32..966c7a93d7f 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql/mysql_throttle_metric.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql/mysql_throttle_metric.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package mysql @@ -20,9 +55,9 @@ import ( type MetricsQueryType int const ( - // MetricsQueryTypeDefault indictes the default, internal implementation. Specifically, our throttler runs a replication lag query + // MetricsQueryTypeDefault indicates the default, internal implementation. Specifically, our throttler runs a replication lag query MetricsQueryTypeDefault MetricsQueryType = iota - // MetricsQueryTypeShowGlobal indicatesa SHOW GLOBAL (STATUS|VARIABLES) query + // MetricsQueryTypeShowGlobal indicates SHOW GLOBAL (STATUS|VARIABLES) query MetricsQueryTypeShowGlobal // MetricsQueryTypeSelect indicates a custom SELECT query MetricsQueryTypeSelect @@ -33,7 +68,7 @@ const ( var mysqlMetricCache = cache.New(cache.NoExpiration, 10*time.Second) func getMySQLMetricCacheKey(probe *Probe) string { - return fmt.Sprintf("%s:%s", probe.Key, probe.MetricQuery) + return fmt.Sprintf("%s:%s", probe.Alias, probe.MetricQuery) } func cacheMySQLThrottleMetric(probe *Probe, mySQLThrottleMetric *MySQLThrottleMetric) *MySQLThrottleMetric { @@ -71,10 +106,10 @@ func GetMetricsQueryType(query string) MetricsQueryType { return MetricsQueryTypeUnknown } -// MySQLThrottleMetric has the probed metric for a mysql instance +// MySQLThrottleMetric has the probed metric for a tablet type MySQLThrottleMetric struct { // nolint:revive ClusterName string - Key InstanceKey + Alias string Value float64 Err error } @@ -84,9 +119,9 @@ func NewMySQLThrottleMetric() *MySQLThrottleMetric { return &MySQLThrottleMetric{Value: 0} } -// GetClusterInstanceKey returns the ClusterInstanceKey part of the metric -func (metric *MySQLThrottleMetric) GetClusterInstanceKey() ClusterInstanceKey { - return GetClusterInstanceKey(metric.ClusterName, &metric.Key) +// GetClusterTablet returns the ClusterTablet part of the metric +func (metric *MySQLThrottleMetric) GetClusterTablet() ClusterTablet { + return GetClusterTablet(metric.ClusterName, metric.Alias) } // Get implements MetricResult @@ -105,7 +140,7 @@ func ReadThrottleMetric(probe *Probe, clusterName string, overrideGetMetricFunc started := time.Now() mySQLThrottleMetric = NewMySQLThrottleMetric() mySQLThrottleMetric.ClusterName = clusterName - mySQLThrottleMetric.Key = probe.Key + mySQLThrottleMetric.Alias = probe.Alias defer func(metric *MySQLThrottleMetric, started time.Time) { go func() { diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/probe.go b/go/vt/vttablet/tabletserver/throttle/mysql/probe.go index 53b835497b4..8c3e069c0d1 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql/probe.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql/probe.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package mysql @@ -14,45 +49,35 @@ import ( // Probe is the minimal configuration required to connect to a MySQL server type Probe struct { - Key InstanceKey + Alias string MetricQuery string Tablet *topodatapb.Tablet - TabletHost string - TabletPort int CacheMillis int QueryInProgress int64 } -// Probes maps instances to probe(s) -type Probes map[InstanceKey](*Probe) +// Probes maps tablet aliases to probe(s) +type Probes map[string](*Probe) // ClusterProbes has the probes for a specific cluster type ClusterProbes struct { ClusterName string IgnoreHostsCount int IgnoreHostsThreshold float64 - InstanceProbes *Probes + TabletProbes Probes } // NewProbes creates Probes -func NewProbes() *Probes { - return &Probes{} +func NewProbes() Probes { + return Probes{} } // NewProbe creates Probe func NewProbe() *Probe { - config := &Probe{ - Key: InstanceKey{}, - } - return config + return &Probe{} } // String returns a human readable string of this struct func (p *Probe) String() string { - return fmt.Sprintf("%s, tablet=%s:%d", p.Key.DisplayString(), p.TabletHost, p.TabletPort) -} - -// Equals checks if this probe has same instance key as another -func (p *Probe) Equals(other *Probe) bool { - return p.Key.Equals(&other.Key) + return fmt.Sprintf("probe alias=%s", p.Alias) } diff --git a/go/vt/vttablet/tabletserver/throttle/mysql/probe_test.go b/go/vt/vttablet/tabletserver/throttle/mysql/probe_test.go index cb63441d419..8f489f39258 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql/probe_test.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql/probe_test.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package mysql @@ -14,6 +49,5 @@ import ( func TestNewProbe(t *testing.T) { c := NewProbe() - assert.Equal(t, "", c.Key.Hostname) - assert.Equal(t, 0, c.Key.Port) + assert.Equal(t, "", c.Alias) } diff --git a/go/vt/vttablet/tabletserver/throttle/mysql_test.go b/go/vt/vttablet/tabletserver/throttle/mysql_test.go index e90f9a69614..15d6feab03f 100644 --- a/go/vt/vttablet/tabletserver/throttle/mysql_test.go +++ b/go/vt/vttablet/tabletserver/throttle/mysql_test.go @@ -1,7 +1,42 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package throttle @@ -17,64 +52,64 @@ import ( ) var ( - key1 = mysql.InstanceKey{Hostname: "10.0.0.1", Port: 3306} - key2 = mysql.InstanceKey{Hostname: "10.0.0.2", Port: 3306} - key3 = mysql.InstanceKey{Hostname: "10.0.0.3", Port: 3306} - key4 = mysql.InstanceKey{Hostname: "10.0.0.4", Port: 3306} - key5 = mysql.InstanceKey{Hostname: "10.0.0.5", Port: 3306} + alias1 = "zone1-0001" + alias2 = "zone1-0002" + alias3 = "zone1-0003" + alias4 = "zone1-0004" + alias5 = "zone1-0005" ) func TestAggregateMySQLProbesNoErrors(t *testing.T) { ctx := context.Background() clusterName := "c0" - key1cluster := mysql.GetClusterInstanceKey(clusterName, &key1) - key2cluster := mysql.GetClusterInstanceKey(clusterName, &key2) - key3cluster := mysql.GetClusterInstanceKey(clusterName, &key3) - key4cluster := mysql.GetClusterInstanceKey(clusterName, &key4) - key5cluster := mysql.GetClusterInstanceKey(clusterName, &key5) - instanceResultsMap := mysql.InstanceMetricResultMap{ + key1cluster := mysql.GetClusterTablet(clusterName, alias1) + key2cluster := mysql.GetClusterTablet(clusterName, alias2) + key3cluster := mysql.GetClusterTablet(clusterName, alias3) + key4cluster := mysql.GetClusterTablet(clusterName, alias4) + key5cluster := mysql.GetClusterTablet(clusterName, alias5) + tabletResultsMap := mysql.TabletResultMap{ key1cluster: base.NewSimpleMetricResult(1.2), key2cluster: base.NewSimpleMetricResult(1.7), key3cluster: base.NewSimpleMetricResult(0.3), key4cluster: base.NewSimpleMetricResult(0.6), key5cluster: base.NewSimpleMetricResult(1.1), } - var probes mysql.Probes = map[mysql.InstanceKey](*mysql.Probe){} - for clusterKey := range instanceResultsMap { - probes[clusterKey.Key] = &mysql.Probe{Key: clusterKey.Key} + var probes mysql.Probes = map[string](*mysql.Probe){} + for clusterKey := range tabletResultsMap { + probes[clusterKey.Alias] = &mysql.Probe{Alias: clusterKey.Alias} } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 0, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 0, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.7) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 1, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 1, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.2) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 2, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 2, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.1) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 3, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 3, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 0.6) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 4, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 4, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 0.3) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 5, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 5, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 0.3) @@ -84,54 +119,54 @@ func TestAggregateMySQLProbesNoErrors(t *testing.T) { func TestAggregateMySQLProbesNoErrorsIgnoreHostsThreshold(t *testing.T) { ctx := context.Background() clusterName := "c0" - key1cluster := mysql.GetClusterInstanceKey(clusterName, &key1) - key2cluster := mysql.GetClusterInstanceKey(clusterName, &key2) - key3cluster := mysql.GetClusterInstanceKey(clusterName, &key3) - key4cluster := mysql.GetClusterInstanceKey(clusterName, &key4) - key5cluster := mysql.GetClusterInstanceKey(clusterName, &key5) - instanceResultsMap := mysql.InstanceMetricResultMap{ + key1cluster := mysql.GetClusterTablet(clusterName, alias1) + key2cluster := mysql.GetClusterTablet(clusterName, alias2) + key3cluster := mysql.GetClusterTablet(clusterName, alias3) + key4cluster := mysql.GetClusterTablet(clusterName, alias4) + key5cluster := mysql.GetClusterTablet(clusterName, alias5) + tableteResultsMap := mysql.TabletResultMap{ key1cluster: base.NewSimpleMetricResult(1.2), key2cluster: base.NewSimpleMetricResult(1.7), key3cluster: base.NewSimpleMetricResult(0.3), key4cluster: base.NewSimpleMetricResult(0.6), key5cluster: base.NewSimpleMetricResult(1.1), } - var probes mysql.Probes = map[mysql.InstanceKey](*mysql.Probe){} - for clusterKey := range instanceResultsMap { - probes[clusterKey.Key] = &mysql.Probe{Key: clusterKey.Key} + var probes mysql.Probes = map[string](*mysql.Probe){} + for clusterKey := range tableteResultsMap { + probes[clusterKey.Alias] = &mysql.Probe{Alias: clusterKey.Alias} } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 0, false, 1.0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tableteResultsMap, 0, false, 1.0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.7) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 1, false, 1.0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tableteResultsMap, 1, false, 1.0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.2) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 2, false, 1.0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tableteResultsMap, 2, false, 1.0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.1) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 3, false, 1.0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tableteResultsMap, 3, false, 1.0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 0.6) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 4, false, 1.0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tableteResultsMap, 4, false, 1.0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 0.6) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 5, false, 1.0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tableteResultsMap, 5, false, 1.0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 0.6) @@ -141,56 +176,56 @@ func TestAggregateMySQLProbesNoErrorsIgnoreHostsThreshold(t *testing.T) { func TestAggregateMySQLProbesWithErrors(t *testing.T) { ctx := context.Background() clusterName := "c0" - key1cluster := mysql.GetClusterInstanceKey(clusterName, &key1) - key2cluster := mysql.GetClusterInstanceKey(clusterName, &key2) - key3cluster := mysql.GetClusterInstanceKey(clusterName, &key3) - key4cluster := mysql.GetClusterInstanceKey(clusterName, &key4) - key5cluster := mysql.GetClusterInstanceKey(clusterName, &key5) - instanceResultsMap := mysql.InstanceMetricResultMap{ + key1cluster := mysql.GetClusterTablet(clusterName, alias1) + key2cluster := mysql.GetClusterTablet(clusterName, alias2) + key3cluster := mysql.GetClusterTablet(clusterName, alias3) + key4cluster := mysql.GetClusterTablet(clusterName, alias4) + key5cluster := mysql.GetClusterTablet(clusterName, alias5) + tabletResultsMap := mysql.TabletResultMap{ key1cluster: base.NewSimpleMetricResult(1.2), key2cluster: base.NewSimpleMetricResult(1.7), key3cluster: base.NewSimpleMetricResult(0.3), key4cluster: base.NoSuchMetric, key5cluster: base.NewSimpleMetricResult(1.1), } - var probes mysql.Probes = map[mysql.InstanceKey](*mysql.Probe){} - for clusterKey := range instanceResultsMap { - probes[clusterKey.Key] = &mysql.Probe{Key: clusterKey.Key} + var probes mysql.Probes = map[string](*mysql.Probe){} + for clusterKey := range tabletResultsMap { + probes[clusterKey.Alias] = &mysql.Probe{Alias: clusterKey.Alias} } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 0, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 0, false, 0) _, err := worstMetric.Get() assert.Error(t, err) assert.Equal(t, err, base.ErrNoSuchMetric) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 1, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 1, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.7) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 2, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 2, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.2) } - instanceResultsMap[key1cluster] = base.NoSuchMetric + tabletResultsMap[key1cluster] = base.NoSuchMetric { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 0, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 0, false, 0) _, err := worstMetric.Get() assert.Error(t, err) assert.Equal(t, err, base.ErrNoSuchMetric) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 1, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 1, false, 0) _, err := worstMetric.Get() assert.Error(t, err) assert.Equal(t, err, base.ErrNoSuchMetric) } { - worstMetric := aggregateMySQLProbes(ctx, &probes, clusterName, instanceResultsMap, 2, false, 0) + worstMetric := aggregateMySQLProbes(ctx, probes, clusterName, tabletResultsMap, 2, false, 0) value, err := worstMetric.Get() assert.NoError(t, err) assert.Equal(t, value, 1.7) diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index b8d84b1ed5e..d59a7c92e7c 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -1,19 +1,52 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This codebase originates from https://github.com/github/freno, See https://github.com/github/freno/blob/master/LICENSE +/* + MIT License + + Copyright (c) 2017 GitHub + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. */ package throttle import ( "context" - "encoding/json" "errors" "fmt" - "io" "math" - "math/rand" + "math/rand/v2" "net/http" "strconv" "strings" @@ -26,6 +59,7 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/protoutil" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/timer" @@ -36,6 +70,7 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/heartbeat" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -47,19 +82,20 @@ import ( ) const ( - leaderCheckInterval = 5 * time.Second - mysqlCollectInterval = 250 * time.Millisecond - mysqlDormantCollectInterval = 5 * time.Second - mysqlRefreshInterval = 10 * time.Second - mysqlAggregateInterval = 125 * time.Millisecond + leaderCheckInterval = 5 * time.Second + mysqlCollectInterval = 250 * time.Millisecond // PRIMARY polls replicas + mysqlDormantCollectInterval = 5 * time.Second // PRIMARY polls replicas when dormant (no recent checks) + mysqlRefreshInterval = 10 * time.Second // Refreshing tablet inventory + mysqlAggregateInterval = 125 * time.Millisecond + throttledAppsSnapshotInterval = 5 * time.Second + recentCheckRateLimiterInterval = 1 * time.Second // Ticker assisting in determining when the throttler was last checked - aggregatedMetricsExpiration = 5 * time.Second - throttledAppsSnapshotInterval = 5 * time.Second - recentAppsExpiration = time.Hour * 24 + aggregatedMetricsExpiration = 5 * time.Second + recentAppsExpiration = time.Hour * 24 nonDeprioritizedAppMapExpiration = time.Second - dormantPeriod = time.Minute + dormantPeriod = time.Minute // How long since last check to be considered dormant DefaultAppThrottleDuration = time.Hour DefaultThrottleRatio = 1.0 @@ -75,25 +111,19 @@ var ( throttleTabletTypes = "replica" ) +var ( + statsThrottlerHeartbeatRequests = stats.NewCounter("ThrottlerHeartbeatRequests", "heartbeat requests") + statsThrottlerRecentlyChecked = stats.NewCounter("ThrottlerRecentlyChecked", "recently checked") + statsThrottlerProbeRecentlyChecked = stats.NewCounter("ThrottlerProbeRecentlyChecked", "probe recently checked") +) + func init() { servenv.OnParseFor("vtcombo", registerThrottlerFlags) servenv.OnParseFor("vttablet", registerThrottlerFlags) } func registerThrottlerFlags(fs *pflag.FlagSet) { - fs.StringVar(&throttleTabletTypes, "throttle_tablet_types", throttleTabletTypes, "Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included") - - fs.Duration("throttle_threshold", 0, "Replication lag threshold for default lag throttling") - fs.String("throttle_metrics_query", "", "Override default heartbeat/lag metric. Use either `SELECT` (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.") - fs.Float64("throttle_metrics_threshold", 0, "Override default throttle threshold, respective to --throttle_metrics_query") - fs.Bool("throttle_check_as_check_self", false, "Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)") - fs.Bool("throttler-config-via-topo", false, "Deprecated, will be removed in v19. Assumed to be 'true'") - - fs.MarkDeprecated("throttle_threshold", "Replication lag threshold for default lag throttling") - fs.MarkDeprecated("throttle_metrics_query", "Override default heartbeat/lag metric. Use either `SELECT` (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.") - fs.MarkDeprecated("throttle_metrics_threshold", "Override default throttle threshold, respective to --throttle_metrics_query") - fs.MarkDeprecated("throttle_check_as_check_self", "Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)") - fs.MarkDeprecated("throttler-config-via-topo", "Assumed to be 'true'") + fs.StringVar(&throttleTabletTypes, "throttle_tablet_types", throttleTabletTypes, "Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' always implicitly included") } var ( @@ -130,18 +160,25 @@ type Throttler struct { isLeader atomic.Bool isOpen atomic.Bool - env tabletenv.Env - pool *connpool.Pool - tabletTypeFunc func() topodatapb.TabletType - ts throttlerTopoService - srvTopoServer srvtopo.Server - heartbeatWriter heartbeat.HeartbeatWriter - - // recentCheckTickerValue is an ever increasing number, incrementing once per second. - recentCheckTickerValue int64 - // recentCheckValue is set to match or exceed recentCheckTickerValue whenever a "check" was made (other than by the throttler itself). - // when recentCheckValue < recentCheckTickerValue that means there hasn't been a recent check. - recentCheckValue int64 + leaderCheckInterval time.Duration + mysqlCollectInterval time.Duration + mysqlDormantCollectInterval time.Duration + mysqlRefreshInterval time.Duration + mysqlAggregateInterval time.Duration + throttledAppsSnapshotInterval time.Duration + dormantPeriod time.Duration + + configSettings *config.ConfigurationSettings + env tabletenv.Env + pool *connpool.Pool + tabletTypeFunc func() topodatapb.TabletType + ts throttlerTopoService + srvTopoServer srvtopo.Server + heartbeatWriter heartbeat.HeartbeatWriter + overrideTmClient tmclient.TabletManagerClient + + recentCheckRateLimiter *timer.RateLimiter + recentCheckDormantDiff int64 throttleTabletTypesMap map[topodatapb.TabletType]bool @@ -162,14 +199,13 @@ type Throttler struct { recentApps *cache.Cache metricsHealth *cache.Cache - lastCheckTimeNano int64 + initMutex sync.Mutex + enableMutex sync.Mutex + cancelOpenContext context.CancelFunc + cancelEnableContext context.CancelFunc + throttledAppsMutex sync.Mutex - initMutex sync.Mutex - enableMutex sync.Mutex - cancelOpenContext context.CancelFunc - cancelEnableContext context.CancelFunc - throttledAppsMutex sync.Mutex - watchSrvKeyspaceOnce sync.Once + readSelfThrottleMetric func(context.Context, *mysql.Probe) *mysql.MySQLThrottleMetric // overwritten by unit test nonLowPriorityAppRequestsThrottled *cache.Cache httpClient *http.Client @@ -202,8 +238,8 @@ func NewThrottler(env tabletenv.Env, srvTopoServer srvtopo.Server, ts *topo.Serv ts: ts, heartbeatWriter: heartbeatWriter, pool: connpool.NewPool(env, "ThrottlerPool", tabletenv.ConnPoolConfig{ - Size: 2, - IdleTimeoutSeconds: env.Config().OltpReadPool.IdleTimeoutSeconds, + Size: 2, + IdleTimeout: env.Config().OltpReadPool.IdleTimeout, }), } @@ -224,7 +260,19 @@ func NewThrottler(env tabletenv.Env, srvTopoServer srvtopo.Server, ts *topo.Serv throttler.initThrottleTabletTypes() throttler.check = NewThrottlerCheck(throttler) + throttler.leaderCheckInterval = leaderCheckInterval + throttler.mysqlCollectInterval = mysqlCollectInterval + throttler.mysqlDormantCollectInterval = mysqlDormantCollectInterval + throttler.mysqlRefreshInterval = mysqlRefreshInterval + throttler.mysqlAggregateInterval = mysqlAggregateInterval + throttler.throttledAppsSnapshotInterval = throttledAppsSnapshotInterval + throttler.dormantPeriod = dormantPeriod + throttler.recentCheckDormantDiff = int64(throttler.dormantPeriod / recentCheckRateLimiterInterval) + throttler.StoreMetricsThreshold(defaultThrottleLagThreshold.Seconds()) //default + throttler.readSelfThrottleMetric = func(ctx context.Context, p *mysql.Probe) *mysql.MySQLThrottleMetric { + return throttler.readSelfMySQLThrottleMetric(ctx, p) + } return throttler } @@ -267,7 +315,7 @@ func (throttler *Throttler) GetMetricsThreshold() float64 { func (throttler *Throttler) initConfig() { log.Infof("Throttler: initializing config") - config.Instance = &config.ConfigurationSettings{ + throttler.configSettings = &config.ConfigurationSettings{ Stores: config.StoresSettings{ MySQL: config.MySQLConfigurationSettings{ IgnoreDialTCPErrors: true, @@ -275,12 +323,12 @@ func (throttler *Throttler) initConfig() { }, }, } - config.Instance.Stores.MySQL.Clusters[selfStoreName] = &config.MySQLClusterConfigurationSettings{ + throttler.configSettings.Stores.MySQL.Clusters[selfStoreName] = &config.MySQLClusterConfigurationSettings{ MetricQuery: throttler.GetMetricsQuery(), ThrottleThreshold: &throttler.MetricsThreshold, IgnoreHostsCount: 0, } - config.Instance.Stores.MySQL.Clusters[shardStoreName] = &config.MySQLClusterConfigurationSettings{ + throttler.configSettings.Stores.MySQL.Clusters[shardStoreName] = &config.MySQLClusterConfigurationSettings{ MetricQuery: throttler.GetMetricsQuery(), ThrottleThreshold: &throttler.MetricsThreshold, IgnoreHostsCount: 0, @@ -296,7 +344,7 @@ func (throttler *Throttler) readThrottlerConfig(ctx context.Context) (*topodatap return throttler.normalizeThrottlerConfig(srvks.ThrottlerConfig), nil } -// normalizeThrottlerConfig noramlizes missing throttler config information, as needed. +// normalizeThrottlerConfig normalizes missing throttler config information, as needed. func (throttler *Throttler) normalizeThrottlerConfig(throttlerConfig *topodatapb.ThrottlerConfig) *topodatapb.ThrottlerConfig { if throttlerConfig == nil { throttlerConfig = &topodatapb.ThrottlerConfig{} @@ -314,9 +362,10 @@ func (throttler *Throttler) normalizeThrottlerConfig(throttlerConfig *topodatapb } func (throttler *Throttler) WatchSrvKeyspaceCallback(srvks *topodatapb.SrvKeyspace, err error) bool { - log.Infof("Throttler: WatchSrvKeyspaceCallback called with: %+v", srvks) if err != nil { - log.Errorf("WatchSrvKeyspaceCallback error: %v", err) + if !topo.IsErrType(err, topo.Interrupted) && !errors.Is(err, context.Canceled) { + log.Errorf("WatchSrvKeyspaceCallback error: %v", err) + } return false } throttlerConfig := throttler.normalizeThrottlerConfig(srvks.ThrottlerConfig) @@ -325,7 +374,6 @@ func (throttler *Throttler) WatchSrvKeyspaceCallback(srvks *topodatapb.SrvKeyspa // Throttler is enabled and we should apply the config change // through Operate() or else we get into race conditions. go func() { - log.Infof("Throttler: submitting a throttler config apply message with: %+v", throttlerConfig) throttler.throttlerConfigChan <- throttlerConfig }() } else { @@ -354,9 +402,9 @@ func (throttler *Throttler) applyThrottlerConfig(ctx context.Context, throttlerC throttler.ThrottleApp(appRule.Name, protoutil.TimeFromProto(appRule.ExpiresAt).UTC(), appRule.Ratio, appRule.Exempt) } if throttlerConfig.Enabled { - go throttler.Enable(ctx) + go throttler.Enable() } else { - go throttler.Disable(ctx) + go throttler.Disable() } } @@ -384,49 +432,95 @@ func (throttler *Throttler) IsRunning() bool { // Enable activates the throttler probes; when enabled, the throttler responds to check queries based on // the collected metrics. -func (throttler *Throttler) Enable(ctx context.Context) bool { +// The function returns a WaitGroup that can be used to wait for the throttler to be fully disabled, ie when +// the Operate() goroutine function terminates and caches are invalidated. +func (throttler *Throttler) Enable() *sync.WaitGroup { throttler.enableMutex.Lock() defer throttler.enableMutex.Unlock() - isEnabled := throttler.isEnabled.Swap(true) - if isEnabled { + if wasEnabled := throttler.isEnabled.Swap(true); wasEnabled { log.Infof("Throttler: already enabled") - return false + return nil } log.Infof("Throttler: enabling") - ctx, throttler.cancelEnableContext = context.WithCancel(ctx) + wg := &sync.WaitGroup{} + var ctx context.Context + ctx, throttler.cancelEnableContext = context.WithCancel(context.Background()) throttler.check.SelfChecks(ctx) - throttler.Operate(ctx) + throttler.Operate(ctx, wg) // Make a one-time request for a lease of heartbeats - go throttler.heartbeatWriter.RequestHeartbeats() + throttler.requestHeartbeats() - return true + return wg } -// Disable deactivates the probes and associated operations. When disabled, the throttler reponds to check +// Disable deactivates the probes and associated operations. When disabled, the throttler responds to check // queries with "200 OK" irrespective of lag or any other metrics. -func (throttler *Throttler) Disable(ctx context.Context) bool { +func (throttler *Throttler) Disable() bool { throttler.enableMutex.Lock() defer throttler.enableMutex.Unlock() - isEnabled := throttler.isEnabled.Swap(false) - if !isEnabled { + if wasEnabled := throttler.isEnabled.Swap(false); !wasEnabled { log.Infof("Throttler: already disabled") return false } log.Infof("Throttler: disabling") // _ = throttler.updateConfig(ctx, false, throttler.MetricsThreshold.Get()) // TODO(shlomi) - throttler.aggregatedMetrics.Flush() - throttler.recentApps.Flush() - throttler.nonLowPriorityAppRequestsThrottled.Flush() - // we do not flush throttler.throttledApps because this is data submitted by the user; the user expects the data to survive a disable+enable throttler.cancelEnableContext() return true } +// retryReadAndApplyThrottlerConfig() is called by Open(), read throttler config from topo, applies it, and starts watching +// for topo changes. +// But also, we're in an Open() function, which blocks state manager's operation, and affects +// opening of all other components. We thus read the throttler config in the background. +// However, we want to handle a situation where the read errors out. +// So we kick a loop that keeps retrying reading the config, for as long as this throttler is open. +func (throttler *Throttler) retryReadAndApplyThrottlerConfig(ctx context.Context) { + var watchSrvKeyspaceOnce sync.Once + retryInterval := 10 * time.Second + retryTicker := time.NewTicker(retryInterval) + defer retryTicker.Stop() + for { + if !throttler.IsOpen() { + // Throttler is not open so no need to keep retrying. + log.Warningf("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") + return + } + + requestCtx, requestCancel := context.WithTimeout(ctx, 5*time.Second) + defer requestCancel() + throttlerConfig, err := throttler.readThrottlerConfig(requestCtx) + if err == nil { + log.Infof("Throttler.retryReadAndApplyThrottlerConfig(): success reading throttler config: %+v", throttlerConfig) + // It's possible that during a retry-sleep, the throttler is closed and opened again, leading + // to two (or more) instances of this goroutine. That's not a big problem; it's fine if all + // attempt to read the throttler config; but we just want to ensure they don't step on each other + // while applying the changes. + throttler.initMutex.Lock() + defer throttler.initMutex.Unlock() + throttler.applyThrottlerConfig(ctx, throttlerConfig) // may issue an Enable + go watchSrvKeyspaceOnce.Do(func() { + // We start watching SrvKeyspace only after we know it's been created. Now is that time! + // We watch using the given ctx, which is cancelled when the throttler is Close()d. + throttler.srvTopoServer.WatchSrvKeyspace(ctx, throttler.cell, throttler.keyspace, throttler.WatchSrvKeyspaceCallback) + }) + return + } + log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): error reading throttler config. Will retry in %v. Err=%+v", retryInterval, err) + select { + case <-ctx.Done(): + // Throttler is not open so no need to keep retrying. + log.Infof("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") + return + case <-retryTicker.C: + } + } +} + // Open opens database pool and initializes the schema func (throttler *Throttler) Open() error { log.Infof("Throttler: started execution of Open. Acquiring initMutex lock") @@ -451,52 +545,7 @@ func (throttler *Throttler) Open() error { throttler.ThrottleApp("always-throttled-app", time.Now().Add(time.Hour*24*365*10), DefaultThrottleRatio, false) - log.Infof("Throttler: throttler-config-via-topo detected") - // We want to read throttler config from topo and apply it. - // But also, we're in an Open() function, which blocks state manager's operation, and affects - // opening of all other components. We thus read the throttler config in the background. - // However, we want to handle a situation where the read errors out. - // So we kick a loop that keeps retrying reading the config, for as long as this throttler is open. - retryReadAndApplyThrottlerConfig := func(ctx context.Context) { - retryInterval := 10 * time.Second - retryTicker := time.NewTicker(retryInterval) - defer retryTicker.Stop() - for { - if !throttler.IsOpen() { - // Throttler is not open so no need to keep retrying. - log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") - return - } - - requestCtx, requestCancel := context.WithTimeout(ctx, 5*time.Second) - defer requestCancel() - throttlerConfig, err := throttler.readThrottlerConfig(requestCtx) - if err == nil { - log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): success reading throttler config: %+v", throttlerConfig) - // It's possible that during a retry-sleep, the throttler is closed and opened again, leading - // to two (or more) instances of this goroutine. That's not a big problem; it's fine if all - // attempt to read the throttler config; but we just want to ensure they don't step on each other - // while applying the changes. - throttler.initMutex.Lock() - defer throttler.initMutex.Unlock() - throttler.applyThrottlerConfig(ctx, throttlerConfig) // may issue an Enable - go throttler.watchSrvKeyspaceOnce.Do(func() { - // We start watching SrvKeyspace only after we know it's been created. Now is that time! - throttler.srvTopoServer.WatchSrvKeyspace(context.Background(), throttler.cell, throttler.keyspace, throttler.WatchSrvKeyspaceCallback) - }) - return - } - log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): error reading throttler config. Will retry in %v. Err=%+v", retryInterval, err) - select { - case <-ctx.Done(): - // Throttler is not open so no need to keep retrying. - log.Errorf("Throttler.retryReadAndApplyThrottlerConfig(): throttler no longer seems to be open, exiting") - return - case <-retryTicker.C: - } - } - } - go retryReadAndApplyThrottlerConfig(ctx) + go throttler.retryReadAndApplyThrottlerConfig(ctx) return nil } @@ -512,19 +561,63 @@ func (throttler *Throttler) Close() { log.Infof("Throttler: throttler is not open") return } - ctx := context.Background() - throttler.Disable(ctx) + throttler.Disable() throttler.isLeader.Store(false) - log.Infof("Throttler: closing pool") - throttler.pool.Close() - throttler.cancelOpenContext() + // The below " != nil " checks are relevant to unit tests, where perhaps not all + // fields are supplied. + if throttler.pool != nil { + log.Infof("Throttler: closing pool") + throttler.pool.Close() + } + if throttler.cancelOpenContext != nil { + throttler.cancelOpenContext() + } log.Infof("Throttler: finished execution of Close") } +// requestHeartbeats sends a heartbeat lease request to the heartbeat writer. +// This action is recorded in stats. +func (throttler *Throttler) requestHeartbeats() { + if !throttler.isLeader.Load() { + return + } + go throttler.heartbeatWriter.RequestHeartbeats() + statsThrottlerHeartbeatRequests.Add(1) +} + +// stimulatePrimaryThrottler sends a check request to the primary tablet in the shard, to stimulate +// it to request for heartbeats. +func (throttler *Throttler) stimulatePrimaryThrottler(ctx context.Context, tmClient tmclient.TabletManagerClient) error { + // Some reasonable timeout, to ensure we release connections even if they're hanging (otherwise grpc-go keeps polling those connections forever) + ctx, cancel := context.WithTimeout(ctx, throttler.dormantPeriod) + defer cancel() + + tabletAliases, err := throttler.ts.FindAllTabletAliasesInShard(ctx, throttler.keyspace, throttler.shard) + if err != nil { + return err + } + for _, tabletAlias := range tabletAliases { + tablet, err := throttler.ts.GetTablet(ctx, tabletAlias) + if err != nil { + return err + } + if tablet.Type != topodatapb.TabletType_PRIMARY { + continue + } + req := &tabletmanagerdatapb.CheckThrottlerRequest{AppName: throttlerapp.ThrottlerStimulatorName.String()} + _, err = tmClient.CheckThrottler(ctx, tablet.Tablet, req) + if err != nil { + log.Errorf("stimulatePrimaryThrottler: %+v", err) + } + return err + } + return nil +} + func (throttler *Throttler) generateSelfMySQLThrottleMetricFunc(ctx context.Context, probe *mysql.Probe) func() *mysql.MySQLThrottleMetric { f := func() *mysql.MySQLThrottleMetric { - return throttler.readSelfMySQLThrottleMetric(ctx, probe) + return throttler.readSelfThrottleMetric(ctx, probe) } return f } @@ -533,7 +626,7 @@ func (throttler *Throttler) generateSelfMySQLThrottleMetricFunc(ctx context.Cont func (throttler *Throttler) readSelfMySQLThrottleMetric(ctx context.Context, probe *mysql.Probe) *mysql.MySQLThrottleMetric { metric := &mysql.MySQLThrottleMetric{ ClusterName: selfStoreName, - Key: *mysql.SelfInstanceKey, + Alias: "", Value: 0, Err: nil, } @@ -559,7 +652,7 @@ func (throttler *Throttler) readSelfMySQLThrottleMetric(ctx context.Context, pro switch metricsQueryType { case mysql.MetricsQueryTypeSelect: // We expect a single row, single column result. - // The "for" iteration below is just a way to get first result without knowning column name + // The "for" iteration below is just a way to get first result without knowing column name for k := range row { metric.Value, metric.Err = row.ToFloat64(k) } @@ -586,133 +679,157 @@ func (throttler *Throttler) ThrottledApps() (result []base.AppThrottle) { return result } -// isDormant returns true when the last check was more than dormantPeriod ago +// isDormant returns true when the last check was more than dormantPeriod ago. +// Instead of measuring actual time, we use the fact recentCheckRateLimiter ticks every second, and take +// a logical diff, counting the number of ticks since the last check. This is a good enough approximation. func (throttler *Throttler) isDormant() bool { - lastCheckTime := time.Unix(0, atomic.LoadInt64(&throttler.lastCheckTimeNano)) - return time.Since(lastCheckTime) > dormantPeriod + return throttler.recentCheckRateLimiter.Diff() > throttler.recentCheckDormantDiff } // Operate is the main entry point for the throttler operation and logic. It will // run the probes, collect metrics, refresh inventory, etc. -func (throttler *Throttler) Operate(ctx context.Context) { +func (throttler *Throttler) Operate(ctx context.Context, wg *sync.WaitGroup) { tickers := [](*timer.SuspendableTicker){} addTicker := func(d time.Duration) *timer.SuspendableTicker { t := timer.NewSuspendableTicker(d, false) tickers = append(tickers, t) return t } - leaderCheckTicker := addTicker(leaderCheckInterval) - mysqlCollectTicker := addTicker(mysqlCollectInterval) - mysqlDormantCollectTicker := addTicker(mysqlDormantCollectInterval) - mysqlRefreshTicker := addTicker(mysqlRefreshInterval) - mysqlAggregateTicker := addTicker(mysqlAggregateInterval) - throttledAppsTicker := addTicker(throttledAppsSnapshotInterval) - recentCheckTicker := addTicker(time.Second) - - tmClient := tmclient.NewTabletManagerClient() - + leaderCheckTicker := addTicker(throttler.leaderCheckInterval) + mysqlCollectTicker := addTicker(throttler.mysqlCollectInterval) + mysqlDormantCollectTicker := addTicker(throttler.mysqlDormantCollectInterval) + mysqlRefreshTicker := addTicker(throttler.mysqlRefreshInterval) + mysqlAggregateTicker := addTicker(throttler.mysqlAggregateInterval) + throttledAppsTicker := addTicker(throttler.throttledAppsSnapshotInterval) + primaryStimulatorRateLimiter := timer.NewRateLimiter(throttler.dormantPeriod) + throttler.recentCheckRateLimiter = timer.NewRateLimiter(recentCheckRateLimiterInterval) + + wg.Add(1) go func() { + defer func() { + throttler.recentCheckRateLimiter.Stop() + primaryStimulatorRateLimiter.Stop() + throttler.aggregatedMetrics.Flush() + throttler.recentApps.Flush() + throttler.nonLowPriorityAppRequestsThrottled.Flush() + wg.Done() + }() + // we do not flush throttler.throttledApps because this is data submitted by the user; the user expects the data to survive a disable+enable + defer log.Infof("Throttler: Operate terminated, tickers stopped") - defer tmClient.Close() for _, t := range tickers { defer t.Stop() // since we just started the tickers now, speed up the ticks by forcing an immediate tick go t.TickNow() } + tmClient := throttler.overrideTmClient + if tmClient == nil { + // This is the normal production behavior. + // throttler.overrideTmClient != nil only in unit testing + tmClient = tmclient.NewTabletManagerClient() + defer tmClient.Close() + } + for { select { case <-ctx.Done(): return case <-leaderCheckTicker.C: - { - func() { - throttler.initMutex.Lock() - defer throttler.initMutex.Unlock() - - // sparse - shouldBeLeader := false - if throttler.IsOpen() { - if throttler.tabletTypeFunc() == topodatapb.TabletType_PRIMARY { - shouldBeLeader = true - } - } + func() { + throttler.initMutex.Lock() + defer throttler.initMutex.Unlock() - isLeader := throttler.isLeader.Swap(shouldBeLeader) - transitionedIntoLeader := false - if shouldBeLeader && !isLeader { - log.Infof("Throttler: transition into leadership") - transitionedIntoLeader = true - } - if !shouldBeLeader && isLeader { - log.Infof("Throttler: transition out of leadership") - } + // sparse + shouldBeLeader := false + if throttler.IsOpen() && throttler.tabletTypeFunc() == topodatapb.TabletType_PRIMARY { + shouldBeLeader = true + } - if transitionedIntoLeader { - // transitioned into leadership, let's speed up the next 'refresh' and 'collect' ticks - go mysqlRefreshTicker.TickNow() - go throttler.heartbeatWriter.RequestHeartbeats() - } - }() - } + isLeader := throttler.isLeader.Swap(shouldBeLeader) + transitionedIntoLeader := false + if shouldBeLeader && !isLeader { + log.Infof("Throttler: transition into leadership") + transitionedIntoLeader = true + } + if !shouldBeLeader && isLeader { + log.Infof("Throttler: transition out of leadership") + } + + if transitionedIntoLeader { + // transitioned into leadership, let's speed up the next 'refresh' and 'collect' ticks + go mysqlRefreshTicker.TickNow() + throttler.requestHeartbeats() + } + }() case <-mysqlCollectTicker.C: - { - if throttler.IsOpen() { - // frequent - if !throttler.isDormant() { - throttler.collectMySQLMetrics(ctx, tmClient) + if throttler.IsOpen() { + // frequent + // Always collect self metrics: + throttler.collectMySQLMetrics(ctx, tmClient, func(clusterName string) bool { + return clusterName == selfStoreName + }) + if !throttler.isDormant() { + throttler.collectMySQLMetrics(ctx, tmClient, func(clusterName string) bool { + return clusterName != selfStoreName + }) + } + // + if throttler.recentCheckRateLimiter.Diff() <= 1 { // recently checked + if !throttler.isLeader.Load() { + // This is a replica, and has just recently been checked. + // We want to proactively "stimulate" the primary throttler to renew the heartbeat lease. + // The intent is to "wake up" an on-demand heartbeat lease. We don't need to poke the + // primary for every single time this replica was checked, so we rate limit. The idea is that + // once heartbeats update, more checks will be successful, this replica will be "recently checked" + // more than not, and the primary throttler will pick that up, extending the on-demand lease + // even further. + // Another outcome is that the primary will go out of "dormant" mode, and start collecting + // replica metrics more frequently. + primaryStimulatorRateLimiter.Do( + func() error { + return throttler.stimulatePrimaryThrottler(ctx, tmClient) + }) } } + } case <-mysqlDormantCollectTicker.C: - { - if throttler.IsOpen() { - // infrequent - if throttler.isDormant() { - throttler.collectMySQLMetrics(ctx, tmClient) - } + if throttler.IsOpen() { + // infrequent + if throttler.isDormant() { + throttler.collectMySQLMetrics(ctx, tmClient, func(clusterName string) bool { + return clusterName != selfStoreName + }) } } case metric := <-throttler.mysqlThrottleMetricChan: - { - // incoming MySQL metric, frequent, as result of collectMySQLMetrics() - throttler.mysqlInventory.InstanceKeyMetrics[metric.GetClusterInstanceKey()] = metric - } + // incoming MySQL metric, frequent, as result of collectMySQLMetrics() + throttler.mysqlInventory.TabletMetrics[metric.GetClusterTablet()] = metric case <-mysqlRefreshTicker.C: - { - // sparse - if throttler.IsOpen() { - throttler.refreshMySQLInventory(ctx) - } + // sparse + if throttler.IsOpen() { + throttler.refreshMySQLInventory(ctx) } case probes := <-throttler.mysqlClusterProbesChan: - { - // incoming structural update, sparse, as result of refreshMySQLInventory() - throttler.updateMySQLClusterProbes(ctx, probes) - } + // incoming structural update, sparse, as result of refreshMySQLInventory() + throttler.updateMySQLClusterProbes(ctx, probes) case <-mysqlAggregateTicker.C: - { - if throttler.IsOpen() { - throttler.aggregateMySQLMetrics(ctx) - } + if throttler.IsOpen() { + throttler.aggregateMySQLMetrics(ctx) } case <-throttledAppsTicker.C: - { - if throttler.IsOpen() { - go throttler.expireThrottledApps() - } + if throttler.IsOpen() { + go throttler.expireThrottledApps() } case throttlerConfig := <-throttler.throttlerConfigChan: throttler.applyThrottlerConfig(ctx, throttlerConfig) - case <-recentCheckTicker.C: - // Increment recentCheckTickerValue by one. - atomic.AddInt64(&throttler.recentCheckTickerValue, 1) } } }() } -func (throttler *Throttler) generateTabletHTTPProbeFunction(ctx context.Context, tmClient tmclient.TabletManagerClient, clusterName string, probe *mysql.Probe) (probeFunc func() *mysql.MySQLThrottleMetric) { +func (throttler *Throttler) generateTabletProbeFunction(ctx context.Context, clusterName string, tmClient tmclient.TabletManagerClient, probe *mysql.Probe) (probeFunc func() *mysql.MySQLThrottleMetric) { return func() *mysql.MySQLThrottleMetric { // Some reasonable timeout, to ensure we release connections even if they're hanging (otherwise grpc-go keeps polling those connections forever) ctx, cancel := context.WithTimeout(ctx, 4*mysqlCollectInterval) @@ -721,94 +838,66 @@ func (throttler *Throttler) generateTabletHTTPProbeFunction(ctx context.Context, // Hit a tablet's `check-self` via HTTP, and convert its CheckResult JSON output into a MySQLThrottleMetric mySQLThrottleMetric := mysql.NewMySQLThrottleMetric() mySQLThrottleMetric.ClusterName = clusterName - mySQLThrottleMetric.Key = probe.Key - - { - req := &tabletmanagerdatapb.CheckThrottlerRequest{} // We leave AppName empty; it will default to VitessName anyway, and we can save some proto space - if resp, gRPCErr := tmClient.CheckThrottler(ctx, probe.Tablet, req); gRPCErr == nil { - mySQLThrottleMetric.Value = resp.Value - if resp.StatusCode == http.StatusInternalServerError { - mySQLThrottleMetric.Err = fmt.Errorf("Status code: %d", resp.StatusCode) - } - if resp.RecentlyChecked { - // We have just probed a tablet, and it reported back that someone just recently "check"ed it. - // We therefore renew the heartbeats lease. - go throttler.heartbeatWriter.RequestHeartbeats() - } - return mySQLThrottleMetric - - // } else { - // In v18 we need to be backwards compatible. If we have a gRPC error it might be because the replica is v17 and - // does not support CheckThrottler() RPC. This is why: - // 1. We fall back to HTTP - // 2. We don't log an error (it would just spam the logs) - // In v19 we will remove all HTTP code, and will *potentially* log an error. - // log.Errorf("error in GRPC call to tablet %v: %v", probe.Tablet.GetAlias(), gRPCErr) - } - } - // Backwards compatibility to v17: if the underlying tablets do not support CheckThrottler gRPC, attempt a HTTP cehck: - tabletCheckSelfURL := fmt.Sprintf("http://%s:%d/throttler/check-self?app=%s", probe.TabletHost, probe.TabletPort, throttlerapp.VitessName) - resp, err := throttler.httpClient.Get(tabletCheckSelfURL) - if err != nil { - mySQLThrottleMetric.Err = err - return mySQLThrottleMetric - } - defer resp.Body.Close() - b, err := io.ReadAll(resp.Body) - if err != nil { - mySQLThrottleMetric.Err = err + mySQLThrottleMetric.Alias = probe.Alias + + if probe.Tablet == nil { + mySQLThrottleMetric.Err = fmt.Errorf("found nil tablet reference for alias %v, hostname %v", probe.Alias, probe.Tablet.Hostname) return mySQLThrottleMetric } - checkResult := &CheckResult{} - if err := json.Unmarshal(b, checkResult); err != nil { - mySQLThrottleMetric.Err = err + req := &tabletmanagerdatapb.CheckThrottlerRequest{} // We leave AppName empty; it will default to VitessName anyway, and we can save some proto space + resp, gRPCErr := tmClient.CheckThrottler(ctx, probe.Tablet, req) + if gRPCErr != nil { + mySQLThrottleMetric.Err = fmt.Errorf("gRPC error accessing tablet %v. Err=%v", probe.Alias, gRPCErr) return mySQLThrottleMetric } - mySQLThrottleMetric.Value = checkResult.Value - - if checkResult.StatusCode == http.StatusInternalServerError { - mySQLThrottleMetric.Err = fmt.Errorf("Status code: %d", checkResult.StatusCode) + mySQLThrottleMetric.Value = resp.Value + if resp.StatusCode == http.StatusInternalServerError { + mySQLThrottleMetric.Err = fmt.Errorf("Status code: %d", resp.StatusCode) } - if checkResult.RecentlyChecked { + if resp.RecentlyChecked { // We have just probed a tablet, and it reported back that someone just recently "check"ed it. // We therefore renew the heartbeats lease. - go throttler.heartbeatWriter.RequestHeartbeats() + throttler.requestHeartbeats() + statsThrottlerProbeRecentlyChecked.Add(1) } return mySQLThrottleMetric } } -func (throttler *Throttler) collectMySQLMetrics(ctx context.Context, tmClient tmclient.TabletManagerClient) error { +func (throttler *Throttler) collectMySQLMetrics(ctx context.Context, tmClient tmclient.TabletManagerClient, includeCluster func(clusterName string) bool) error { // synchronously, get lists of probes for clusterName, probes := range throttler.mysqlInventory.ClustersProbes { + if !includeCluster(clusterName) { + continue + } clusterName := clusterName - probes := probes - go func() { - // probes is known not to change. It can be *replaced*, but not changed. - // so it's safe to iterate it - for _, probe := range *probes { - probe := probe - go func() { - // Avoid querying the same server twice at the same time. If previous read is still there, - // we avoid re-reading it. - if !atomic.CompareAndSwapInt64(&probe.QueryInProgress, 0, 1) { - return - } - defer atomic.StoreInt64(&probe.QueryInProgress, 0) - - var throttleMetricFunc func() *mysql.MySQLThrottleMetric - if clusterName == selfStoreName { - // Throttler is probing its own tablet's metrics: - throttleMetricFunc = throttler.generateSelfMySQLThrottleMetricFunc(ctx, probe) - } else { - // Throttler probing other tablets: - throttleMetricFunc = throttler.generateTabletHTTPProbeFunction(ctx, tmClient, clusterName, probe) - } - throttleMetrics := mysql.ReadThrottleMetric(probe, clusterName, throttleMetricFunc) - throttler.mysqlThrottleMetricChan <- throttleMetrics - }() - } - }() + // probes is known not to change. It can be *replaced*, but not changed. + // so it's safe to iterate it + for _, probe := range probes { + go func(probe *mysql.Probe) { + // Avoid querying the same server twice at the same time. If previous read is still there, + // we avoid re-reading it. + if !atomic.CompareAndSwapInt64(&probe.QueryInProgress, 0, 1) { + return + } + defer atomic.StoreInt64(&probe.QueryInProgress, 0) + + var throttleMetricFunc func() *mysql.MySQLThrottleMetric + if clusterName == selfStoreName { + // Throttler is probing its own tablet's metrics: + throttleMetricFunc = throttler.generateSelfMySQLThrottleMetricFunc(ctx, probe) + } else { + // Throttler probing other tablets: + throttleMetricFunc = throttler.generateTabletProbeFunction(ctx, clusterName, tmClient, probe) + } + throttleMetrics := mysql.ReadThrottleMetric(probe, clusterName, throttleMetricFunc) + select { + case <-ctx.Done(): + return + case throttler.mysqlThrottleMetricChan <- throttleMetrics: + } + }(probe) + } } return nil } @@ -818,85 +907,96 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { // distribute the query/threshold from the throttler down to the cluster settings and from there to the probes metricsQuery := throttler.GetMetricsQuery() metricsThreshold := throttler.MetricsThreshold.Load() - addInstanceKey := func(tablet *topodatapb.Tablet, tabletHost string, tabletPort int, key *mysql.InstanceKey, clusterName string, clusterSettings *config.MySQLClusterConfigurationSettings, probes *mysql.Probes) { + addProbe := func(alias string, tablet *topodatapb.Tablet, clusterName string, clusterSettings *config.MySQLClusterConfigurationSettings, probes mysql.Probes) bool { for _, ignore := range clusterSettings.IgnoreHosts { - if strings.Contains(key.StringCode(), ignore) { - log.Infof("Throttler: instance key ignored: %+v", key) - return + if strings.Contains(alias, ignore) { + log.Infof("Throttler: tablet ignored: %+v", alias) + return false } } - if !key.IsValid() && !key.IsSelf() { - log.Infof("Throttler: read invalid instance key: [%+v] for cluster %+v", key, clusterName) - return + if clusterName != selfStoreName { + if alias == "" { + log.Errorf("Throttler: got empty alias for cluster: %+v", clusterName) + return false + } + if tablet == nil { + log.Errorf("Throttler: got nil tablet for alias: %v in cluster: %+v", alias, clusterName) + return false + } } probe := &mysql.Probe{ - Key: *key, + Alias: alias, Tablet: tablet, - TabletHost: tabletHost, - TabletPort: tabletPort, MetricQuery: clusterSettings.MetricQuery, CacheMillis: clusterSettings.CacheMillis, } - (*probes)[*key] = probe + probes[alias] = probe + return true } - for clusterName, clusterSettings := range config.Settings().Stores.MySQL.Clusters { + attemptWriteProbes := func(clusterProbes *mysql.ClusterProbes) error { + select { + case <-ctx.Done(): + return ctx.Err() + case throttler.mysqlClusterProbesChan <- clusterProbes: + return nil + } + } + + for clusterName, clusterSettings := range throttler.configSettings.Stores.MySQL.Clusters { clusterName := clusterName - clusterSettings := clusterSettings clusterSettings.MetricQuery = metricsQuery clusterSettings.ThrottleThreshold.Store(metricsThreshold) + + clusterSettingsCopy := *clusterSettings // config may dynamically change, but internal structure (config.Settings().Stores.MySQL.Clusters in our case) // is immutable and can only be _replaced_. Hence, it's safe to read in a goroutine: - go func() { - throttler.mysqlClusterThresholds.Set(clusterName, math.Float64frombits(clusterSettings.ThrottleThreshold.Load()), cache.DefaultExpiration) + collect := func() error { + throttler.mysqlClusterThresholds.Set(clusterName, math.Float64frombits(clusterSettingsCopy.ThrottleThreshold.Load()), cache.DefaultExpiration) clusterProbes := &mysql.ClusterProbes{ ClusterName: clusterName, - IgnoreHostsCount: clusterSettings.IgnoreHostsCount, - InstanceProbes: mysql.NewProbes(), + IgnoreHostsCount: clusterSettingsCopy.IgnoreHostsCount, + TabletProbes: mysql.NewProbes(), } if clusterName == selfStoreName { // special case: just looking at this tablet's MySQL server. // We will probe this "cluster" (of one server) is a special way. - addInstanceKey(nil, "", 0, mysql.SelfInstanceKey, clusterName, clusterSettings, clusterProbes.InstanceProbes) - throttler.mysqlClusterProbesChan <- clusterProbes - return + addProbe("", nil, clusterName, &clusterSettingsCopy, clusterProbes.TabletProbes) + return attemptWriteProbes(clusterProbes) } if !throttler.isLeader.Load() { // This tablet may have used to be the primary, but it isn't now. It may have a recollection // of previous clusters it used to probe. It may have recollection of specific probes for such clusters. - // This now ensures any existing cluster probes are overrridden with an empty list of probes. - // `clusterProbes` was created above as empty, and identificable via `clusterName`. This will in turn + // This now ensures any existing cluster probes are overridden with an empty list of probes. + // `clusterProbes` was created above as empty, and identifiable via `clusterName`. This will in turn // be used to overwrite throttler.mysqlInventory.ClustersProbes[clusterProbes.ClusterName] in // updateMySQLClusterProbes(). - throttler.mysqlClusterProbesChan <- clusterProbes + return attemptWriteProbes(clusterProbes) // not the leader (primary tablet)? Then no more work for us. - return } // The primary tablet is also in charge of collecting the shard's metrics - err := func() error { - ctx, cancel := context.WithTimeout(ctx, mysqlRefreshInterval) - defer cancel() + ctx, cancel := context.WithTimeout(ctx, mysqlRefreshInterval) + defer cancel() - tabletAliases, err := throttler.ts.FindAllTabletAliasesInShard(ctx, throttler.keyspace, throttler.shard) + tabletAliases, err := throttler.ts.FindAllTabletAliasesInShard(ctx, throttler.keyspace, throttler.shard) + if err != nil { + return err + } + for _, tabletAlias := range tabletAliases { + tablet, err := throttler.ts.GetTablet(ctx, tabletAlias) if err != nil { return err } - for _, tabletAlias := range tabletAliases { - tablet, err := throttler.ts.GetTablet(ctx, tabletAlias) - if err != nil { - return err - } - if throttler.throttleTabletTypesMap[tablet.Type] { - key := mysql.InstanceKey{Hostname: tablet.MysqlHostname, Port: int(tablet.MysqlPort)} - addInstanceKey(tablet.Tablet, tablet.Hostname, int(tablet.PortMap["vt"]), &key, clusterName, clusterSettings, clusterProbes.InstanceProbes) - } + if throttler.throttleTabletTypesMap[tablet.Type] { + addProbe(topoproto.TabletAliasString(tabletAlias), tablet.Tablet, clusterName, &clusterSettingsCopy, clusterProbes.TabletProbes) } - throttler.mysqlClusterProbesChan <- clusterProbes - return nil - }() - if err != nil { + } + return attemptWriteProbes(clusterProbes) + } + go func() { + if err := collect(); err != nil { log.Errorf("refreshMySQLInventory: %+v", err) } }() @@ -906,7 +1006,7 @@ func (throttler *Throttler) refreshMySQLInventory(ctx context.Context) error { // synchronous update of inventory func (throttler *Throttler) updateMySQLClusterProbes(ctx context.Context, clusterProbes *mysql.ClusterProbes) error { - throttler.mysqlInventory.ClustersProbes[clusterProbes.ClusterName] = clusterProbes.InstanceProbes + throttler.mysqlInventory.ClustersProbes[clusterProbes.ClusterName] = clusterProbes.TabletProbes throttler.mysqlInventory.IgnoreHostsCount[clusterProbes.ClusterName] = clusterProbes.IgnoreHostsCount throttler.mysqlInventory.IgnoreHostsThreshold[clusterProbes.ClusterName] = clusterProbes.IgnoreHostsThreshold return nil @@ -918,7 +1018,7 @@ func (throttler *Throttler) aggregateMySQLMetrics(ctx context.Context) error { metricName := fmt.Sprintf("mysql/%s", clusterName) ignoreHostsCount := throttler.mysqlInventory.IgnoreHostsCount[clusterName] ignoreHostsThreshold := throttler.mysqlInventory.IgnoreHostsThreshold[clusterName] - aggregatedMetric := aggregateMySQLProbes(ctx, probes, clusterName, throttler.mysqlInventory.InstanceKeyMetrics, ignoreHostsCount, config.Settings().Stores.MySQL.IgnoreDialTCPErrors, ignoreHostsThreshold) + aggregatedMetric := aggregateMySQLProbes(ctx, probes, clusterName, throttler.mysqlInventory.TabletMetrics, ignoreHostsCount, throttler.configSettings.Stores.MySQL.IgnoreDialTCPErrors, ignoreHostsThreshold) throttler.aggregatedMetrics.Set(metricName, aggregatedMetric, cache.DefaultExpiration) } return nil @@ -960,7 +1060,7 @@ func (throttler *Throttler) expireThrottledApps() { } } -// ThrottleApp instructs the throttler to begin throttling an app, to som eperiod and with some ratio. +// ThrottleApp instructs the throttler to begin throttling an app, to some period and with some ratio. func (throttler *Throttler) ThrottleApp(appName string, expireAt time.Time, ratio float64, exempt bool) (appThrottle *base.AppThrottle) { throttler.throttledAppsMutex.Lock() defer throttler.throttledAppsMutex.Unlock() @@ -996,7 +1096,7 @@ func (throttler *Throttler) ThrottleApp(appName string, expireAt time.Time, rati func (throttler *Throttler) UnthrottleApp(appName string) (appThrottle *base.AppThrottle) { throttler.throttledApps.Delete(appName) // the app is likely to check - go throttler.heartbeatWriter.RequestHeartbeats() + throttler.requestHeartbeats() return base.NewAppThrottle(appName, time.Now(), 0, false) } @@ -1142,21 +1242,27 @@ func (throttler *Throttler) checkStore(ctx context.Context, appName string, stor // continuous and do not generate a substantial load. return okMetricCheckResult } - if !flags.SkipRequestHeartbeats && !throttlerapp.VitessName.Equals(appName) { - go throttler.heartbeatWriter.RequestHeartbeats() + + checkResult = throttler.check.Check(ctx, appName, "mysql", storeName, remoteAddr, flags) + + shouldRequestHeartbeats := !flags.SkipRequestHeartbeats + if throttlerapp.VitessName.Equals(appName) { + // Override: "vitess" app never requests heartbeats. + shouldRequestHeartbeats = false + } + if throttlerapp.ThrottlerStimulatorName.Equals(appName) { + // Ovreride: "throttler-stimulator" app always requests heartbeats. + shouldRequestHeartbeats = true + } + + if shouldRequestHeartbeats { + throttler.requestHeartbeats() + throttler.recentCheckRateLimiter.DoEmpty() // This check was made by someone other than the throttler itself, i.e. this came from online-ddl or vreplication or other. // We mark the fact that someone just made a check. If this is a REPLICA or RDONLY tables, this will be reported back // to the PRIMARY so that it knows it must renew the heartbeat lease. - atomic.StoreInt64(&throttler.recentCheckValue, 1+atomic.LoadInt64(&throttler.recentCheckTickerValue)) - } - checkResult = throttler.check.Check(ctx, appName, "mysql", storeName, remoteAddr, flags) - - if atomic.LoadInt64(&throttler.recentCheckValue) >= atomic.LoadInt64(&throttler.recentCheckTickerValue) { - // This indicates someone, who is not "vitess" ie not internal to the throttling logic, did a _recent_ `check`. - // This could be online-ddl, or vreplication or whoever else. - // If this tablet is a REPLICA or RDONLY, we want to advertise to the PRIMARY that someone did a recent check, - // so that the PRIMARY knows it must renew the heartbeat lease. checkResult.RecentlyChecked = true + statsThrottlerRecentlyChecked.Add(1) } return checkResult diff --git a/go/vt/vttablet/tabletserver/throttle/throttler_test.go b/go/vt/vttablet/tabletserver/throttle/throttler_test.go index c47466df522..98f94439a3d 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler_test.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler_test.go @@ -1,7 +1,17 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ package throttle @@ -9,6 +19,8 @@ package throttle import ( "context" "fmt" + "net/http" + "sync" "sync/atomic" "testing" "time" @@ -18,9 +30,15 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/config" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/mysql" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + "vitess.io/vitess/go/vt/vttablet/tmclient" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -28,10 +46,43 @@ const ( waitForProbesTimeout = 30 * time.Second ) +type fakeTMClient struct { + tmclient.TabletManagerClient + appNames []string + + mu sync.Mutex +} + +func (c *fakeTMClient) Close() { +} + +func (c *fakeTMClient) CheckThrottler(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CheckThrottlerRequest) (*tabletmanagerdatapb.CheckThrottlerResponse, error) { + resp := &tabletmanagerdatapb.CheckThrottlerResponse{ + StatusCode: http.StatusOK, + Value: 0, + Threshold: 1, + RecentlyChecked: false, + } + c.mu.Lock() + defer c.mu.Unlock() + c.appNames = append(c.appNames, request.AppName) + return resp, nil +} + +func (c *fakeTMClient) AppNames() []string { + c.mu.Lock() + defer c.mu.Unlock() + return c.appNames +} + type FakeTopoServer struct { } func (ts *FakeTopoServer) GetTablet(ctx context.Context, alias *topodatapb.TabletAlias) (*topo.TabletInfo, error) { + tabletType := topodatapb.TabletType_PRIMARY + if alias.Uid != 100 { + tabletType = topodatapb.TabletType_REPLICA + } tablet := &topo.TabletInfo{ Tablet: &topodatapb.Tablet{ Alias: alias, @@ -39,7 +90,7 @@ func (ts *FakeTopoServer) GetTablet(ctx context.Context, alias *topodatapb.Table MysqlHostname: "127.0.0.1", MysqlPort: 3306, PortMap: map[string]int32{"vt": 5000}, - Type: topodatapb.TabletType_REPLICA, + Type: tabletType, }, } return tablet, nil @@ -47,8 +98,9 @@ func (ts *FakeTopoServer) GetTablet(ctx context.Context, alias *topodatapb.Table func (ts *FakeTopoServer) FindAllTabletAliasesInShard(ctx context.Context, keyspace, shard string) ([]*topodatapb.TabletAlias, error) { aliases := []*topodatapb.TabletAlias{ - {Cell: "zone1", Uid: 100}, - {Cell: "zone2", Uid: 101}, + {Cell: "fakezone1", Uid: 100}, + {Cell: "fakezone2", Uid: 101}, + {Cell: "fakezone3", Uid: 103}, } return aliases, nil } @@ -59,15 +111,83 @@ func (ts *FakeTopoServer) GetSrvKeyspace(ctx context.Context, cell, keyspace str } type FakeHeartbeatWriter struct { + requests atomic.Int64 +} + +func (w *FakeHeartbeatWriter) RequestHeartbeats() { + w.requests.Add(1) } -func (w FakeHeartbeatWriter) RequestHeartbeats() { +func (w *FakeHeartbeatWriter) Requests() int64 { + return w.requests.Load() +} + +func newTestThrottler() *Throttler { + metricsQuery := "select 1" + configSettings := config.NewConfigurationSettings() + configSettings.Stores.MySQL.Clusters = map[string]*config.MySQLClusterConfigurationSettings{ + selfStoreName: {}, + shardStoreName: {}, + } + for _, s := range configSettings.Stores.MySQL.Clusters { + s.MetricQuery = metricsQuery + s.ThrottleThreshold = &atomic.Uint64{} + s.ThrottleThreshold.Store(1) + } + env := tabletenv.NewEnv(vtenv.NewTestEnv(), nil, "TabletServerTest") + throttler := &Throttler{ + mysqlClusterProbesChan: make(chan *mysql.ClusterProbes), + mysqlClusterThresholds: cache.New(cache.NoExpiration, 0), + heartbeatWriter: &FakeHeartbeatWriter{}, + ts: &FakeTopoServer{}, + mysqlInventory: mysql.NewInventory(), + pool: connpool.NewPool(env, "ThrottlerPool", tabletenv.ConnPoolConfig{}), + tabletTypeFunc: func() topodatapb.TabletType { return topodatapb.TabletType_PRIMARY }, + overrideTmClient: &fakeTMClient{}, + } + throttler.configSettings = configSettings + throttler.mysqlThrottleMetricChan = make(chan *mysql.MySQLThrottleMetric) + throttler.mysqlInventoryChan = make(chan *mysql.Inventory, 1) + throttler.mysqlClusterProbesChan = make(chan *mysql.ClusterProbes) + throttler.throttlerConfigChan = make(chan *topodatapb.ThrottlerConfig) + throttler.mysqlInventory = mysql.NewInventory() + + throttler.throttledApps = cache.New(cache.NoExpiration, 0) + throttler.mysqlClusterThresholds = cache.New(cache.NoExpiration, 0) + throttler.aggregatedMetrics = cache.New(10*aggregatedMetricsExpiration, 0) + throttler.recentApps = cache.New(recentAppsExpiration, 0) + throttler.metricsHealth = cache.New(cache.NoExpiration, 0) + throttler.nonLowPriorityAppRequestsThrottled = cache.New(nonDeprioritizedAppMapExpiration, 0) + throttler.metricsQuery.Store(metricsQuery) + throttler.initThrottleTabletTypes() + throttler.check = NewThrottlerCheck(throttler) + + // High contention & racy intervals: + throttler.leaderCheckInterval = 10 * time.Millisecond + throttler.mysqlCollectInterval = 10 * time.Millisecond + throttler.mysqlDormantCollectInterval = 10 * time.Millisecond + throttler.mysqlRefreshInterval = 10 * time.Millisecond + throttler.mysqlAggregateInterval = 10 * time.Millisecond + throttler.throttledAppsSnapshotInterval = 10 * time.Millisecond + throttler.dormantPeriod = 5 * time.Second + throttler.recentCheckDormantDiff = int64(throttler.dormantPeriod / recentCheckRateLimiterInterval) + + throttler.readSelfThrottleMetric = func(ctx context.Context, p *mysql.Probe) *mysql.MySQLThrottleMetric { + return &mysql.MySQLThrottleMetric{ + ClusterName: selfStoreName, + Alias: "", + Value: 1, + Err: nil, + } + } + + return throttler } func TestIsAppThrottled(t *testing.T) { throttler := Throttler{ throttledApps: cache.New(cache.NoExpiration, 0), - heartbeatWriter: FakeHeartbeatWriter{}, + heartbeatWriter: &FakeHeartbeatWriter{}, } assert.False(t, throttler.IsAppThrottled("app1")) assert.False(t, throttler.IsAppThrottled("app2")) @@ -97,7 +217,7 @@ func TestIsAppExempted(t *testing.T) { throttler := Throttler{ throttledApps: cache.New(cache.NoExpiration, 0), - heartbeatWriter: FakeHeartbeatWriter{}, + heartbeatWriter: &FakeHeartbeatWriter{}, } assert.False(t, throttler.IsAppExempted("app1")) assert.False(t, throttler.IsAppExempted("app2")) @@ -129,17 +249,18 @@ func TestIsAppExempted(t *testing.T) { // `PRIMARY` tablet, probes other tablets). On the leader, the list is expected to be non-empty. func TestRefreshMySQLInventory(t *testing.T) { metricsQuery := "select 1" - config.Settings().Stores.MySQL.Clusters = map[string]*config.MySQLClusterConfigurationSettings{ + configSettings := config.NewConfigurationSettings() + clusters := map[string]*config.MySQLClusterConfigurationSettings{ selfStoreName: {}, "ks1": {}, "ks2": {}, } - clusters := config.Settings().Stores.MySQL.Clusters for _, s := range clusters { s.MetricQuery = metricsQuery s.ThrottleThreshold = &atomic.Uint64{} s.ThrottleThreshold.Store(1) } + configSettings.Stores.MySQL.Clusters = clusters throttler := &Throttler{ mysqlClusterProbesChan: make(chan *mysql.ClusterProbes), @@ -147,20 +268,21 @@ func TestRefreshMySQLInventory(t *testing.T) { ts: &FakeTopoServer{}, mysqlInventory: mysql.NewInventory(), } + throttler.configSettings = configSettings throttler.metricsQuery.Store(metricsQuery) throttler.initThrottleTabletTypes() validateClusterProbes := func(t *testing.T, ctx context.Context) { testName := fmt.Sprintf("leader=%t", throttler.isLeader.Load()) t.Run(testName, func(t *testing.T) { - // validateProbesCount expectes number of probes according to cluster name and throttler's leadership status - validateProbesCount := func(t *testing.T, clusterName string, probes *mysql.Probes) { + // validateProbesCount expects number of probes according to cluster name and throttler's leadership status + validateProbesCount := func(t *testing.T, clusterName string, probes mysql.Probes) { if clusterName == selfStoreName { - assert.Equal(t, 1, len(*probes)) + assert.Equal(t, 1, len(probes)) } else if throttler.isLeader.Load() { - assert.NotZero(t, len(*probes)) + assert.NotZero(t, len(probes)) } else { - assert.Empty(t, *probes) + assert.Empty(t, probes) } } t.Run("waiting for probes", func(t *testing.T) { @@ -170,7 +292,7 @@ func TestRefreshMySQLInventory(t *testing.T) { for { select { case probes := <-throttler.mysqlClusterProbesChan: - // Worth noting that in this unit test, the throttler is _closed_. Its own Operate() function does + // Worth noting that in this unit test, the throttler is _closed_ and _disabled_. Its own Operate() function does // not run, and therefore there is none but us to both populate `mysqlClusterProbesChan` as well as // read from it. We do not compete here with any other goroutine. assert.NotNil(t, probes) @@ -178,7 +300,7 @@ func TestRefreshMySQLInventory(t *testing.T) { throttler.updateMySQLClusterProbes(ctx, probes) numClusterProbesResults++ - validateProbesCount(t, probes.ClusterName, probes.InstanceProbes) + validateProbesCount(t, probes.ClusterName, probes.TabletProbes) if numClusterProbesResults == len(clusters) { // Achieved our goal @@ -219,3 +341,248 @@ func TestRefreshMySQLInventory(t *testing.T) { validateClusterProbes(t, ctx) }) } + +// runThrottler opens and enables the throttler, thereby making it run the Operate() function, for a given amount of time. +// Optionally, running a given function halfway while the throttler is still open and running. +func runThrottler(t *testing.T, ctx context.Context, throttler *Throttler, timeout time.Duration, f func(*testing.T, context.Context)) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + assert.False(t, throttler.IsOpen()) + assert.False(t, throttler.IsEnabled()) + + throttler.isOpen.Swap(true) + defer throttler.isOpen.Swap(false) + assert.True(t, throttler.IsOpen()) + assert.False(t, throttler.IsEnabled()) + + wg := throttler.Enable() + require.NotNil(t, wg) + defer wg.Wait() + defer throttler.Disable() + assert.True(t, throttler.IsEnabled()) + + // Enabling again does nothing: + wg2 := throttler.Enable() + assert.Nil(t, wg2) + + sleepTime := 3 * time.Second + if timeout/2 < sleepTime { + sleepTime = timeout / 2 + } + if f != nil { + select { + case <-ctx.Done(): + return + case <-time.After(sleepTime): + f(t, ctx) + } + } + + <-ctx.Done() + assert.Error(t, ctx.Err()) + + throttler.Disable() + assert.False(t, throttler.IsEnabled()) +} + +// TestRace merely lets the throttler run with aggressive intervals for a few seconds, so as to detect race conditions. +// This is relevant to `go test -race` +func TestRace(t *testing.T) { + throttler := newTestThrottler() + runThrottler(t, context.Background(), throttler, 5*time.Second, nil) +} + +// TestProbes enables a throttler for a few seconds, and afterwards expects to find probes and metrics. +func TestProbesWhileOperating(t *testing.T) { + throttler := newTestThrottler() + + tmClient, ok := throttler.overrideTmClient.(*fakeTMClient) + require.True(t, ok) + assert.Empty(t, tmClient.AppNames()) + + t.Run("aggregated", func(t *testing.T) { + assert.Equal(t, 0, throttler.aggregatedMetrics.ItemCount()) + }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + runThrottler(t, ctx, throttler, time.Minute, func(t *testing.T, ctx context.Context) { + t.Run("aggregated", func(t *testing.T) { + assert.Equal(t, 2, throttler.aggregatedMetrics.ItemCount()) // flushed upon Disable() + aggr := throttler.aggregatedMetricsSnapshot() + assert.Equal(t, 2, len(aggr)) // "self" and "shard" clusters + for clusterName, metricResult := range aggr { + val, err := metricResult.Get() + assert.NoError(t, err) + switch clusterName { + case "mysql/self": + assert.Equal(t, float64(1), val) + case "mysql/shard": + assert.Equal(t, float64(0), val) + default: + assert.Failf(t, "unknown clusterName", "%v", clusterName) + } + } + assert.NotEmpty(t, tmClient.AppNames()) + // The throttler here emulates a PRIMARY tablet, and therefore should probe the replicas using + // the "vitess" app name. + uniqueNames := map[string]int{} + for _, appName := range tmClient.AppNames() { + uniqueNames[appName]++ + } + // PRIMARY throttler probes replicas with empty app name, which is then + // interpreted as "vitess" name. + _, ok := uniqueNames[""] + assert.Truef(t, ok, "%+v", uniqueNames) + // And that's the only app we expect to see. + assert.Equalf(t, 1, len(uniqueNames), "%+v", uniqueNames) + + cancel() // end test early + }) + }) +} + +// TestProbesPostDisable runs the throttler for some time, and then investigates the internal throttler maps and values. +func TestProbesPostDisable(t *testing.T) { + throttler := newTestThrottler() + runThrottler(t, context.Background(), throttler, 2*time.Second, nil) + + probes := throttler.mysqlInventory.ClustersProbes + assert.NotEmpty(t, probes) + + selfProbes := probes[selfStoreName] + t.Run("self", func(t *testing.T) { + assert.NotEmpty(t, selfProbes) + require.Equal(t, 1, len(selfProbes)) // should always be true once refreshMySQLInventory() runs + probe, ok := selfProbes[""] + assert.True(t, ok) + assert.NotNil(t, probe) + + assert.Equal(t, "", probe.Alias) + assert.Nil(t, probe.Tablet) + assert.Equal(t, "select 1", probe.MetricQuery) + assert.Zero(t, atomic.LoadInt64(&probe.QueryInProgress)) + }) + + shardProbes := probes[shardStoreName] + t.Run("shard", func(t *testing.T) { + assert.NotEmpty(t, shardProbes) + assert.Equal(t, 2, len(shardProbes)) // see fake FindAllTabletAliasesInShard above + for _, probe := range shardProbes { + require.NotNil(t, probe) + assert.NotEmpty(t, probe.Alias) + assert.NotNil(t, probe.Tablet) + assert.Equal(t, "select 1", probe.MetricQuery) + assert.Zero(t, atomic.LoadInt64(&probe.QueryInProgress)) + } + }) + + t.Run("metrics", func(t *testing.T) { + assert.Equal(t, 3, len(throttler.mysqlInventory.TabletMetrics)) // 1 self tablet + 2 shard tablets + }) + + t.Run("aggregated", func(t *testing.T) { + assert.Zero(t, throttler.aggregatedMetrics.ItemCount()) // flushed upon Disable() + aggr := throttler.aggregatedMetricsSnapshot() + assert.Empty(t, aggr) + }) +} + +func TestDormant(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + throttler := newTestThrottler() + + heartbeatWriter, ok := throttler.heartbeatWriter.(*FakeHeartbeatWriter) + assert.True(t, ok) + assert.Zero(t, heartbeatWriter.Requests()) // once upon Enable() + + runThrottler(t, ctx, throttler, time.Minute, func(t *testing.T, ctx context.Context) { + assert.True(t, throttler.isDormant()) + assert.EqualValues(t, 1, heartbeatWriter.Requests()) // once upon Enable() + flags := &CheckFlags{} + throttler.CheckByType(ctx, throttlerapp.VitessName.String(), "", flags, ThrottleCheckSelf) + go func() { + select { + case <-ctx.Done(): + require.FailNow(t, "context expired before testing completed") + case <-time.After(time.Second): + assert.True(t, throttler.isDormant()) + assert.EqualValues(t, 1, heartbeatWriter.Requests()) // "vitess" name does not cause heartbeat requests + } + throttler.CheckByType(ctx, throttlerapp.ThrottlerStimulatorName.String(), "", flags, ThrottleCheckSelf) + select { + case <-ctx.Done(): + require.FailNow(t, "context expired before testing completed") + case <-time.After(time.Second): + assert.False(t, throttler.isDormant()) + assert.Greater(t, heartbeatWriter.Requests(), int64(1)) + } + throttler.CheckByType(ctx, throttlerapp.OnlineDDLName.String(), "", flags, ThrottleCheckSelf) + select { + case <-ctx.Done(): + require.FailNow(t, "context expired before testing completed") + case <-time.After(time.Second): + assert.False(t, throttler.isDormant()) + assert.Greater(t, heartbeatWriter.Requests(), int64(2)) + } + + // Dormant period + select { + case <-ctx.Done(): + require.FailNow(t, "context expired before testing completed") + case <-time.After(throttler.dormantPeriod): + assert.True(t, throttler.isDormant()) + } + cancel() // end test early + }() + }) +} + +func TestReplica(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + throttler := newTestThrottler() + throttler.dormantPeriod = time.Minute + throttler.tabletTypeFunc = func() topodatapb.TabletType { return topodatapb.TabletType_REPLICA } + + tmClient, ok := throttler.overrideTmClient.(*fakeTMClient) + require.True(t, ok) + assert.Empty(t, tmClient.AppNames()) + + runThrottler(t, ctx, throttler, time.Minute, func(t *testing.T, ctx context.Context) { + assert.Empty(t, tmClient.AppNames()) + flags := &CheckFlags{} + throttler.CheckByType(ctx, throttlerapp.VitessName.String(), "", flags, ThrottleCheckSelf) + go func() { + select { + case <-ctx.Done(): + require.FailNow(t, "context expired before testing completed") + case <-time.After(time.Second): + assert.Empty(t, tmClient.AppNames()) + } + throttler.CheckByType(ctx, throttlerapp.OnlineDDLName.String(), "", flags, ThrottleCheckSelf) + select { + case <-ctx.Done(): + require.FailNow(t, "context expired before testing completed") + case <-time.After(time.Second): + appNames := tmClient.AppNames() + assert.NotEmpty(t, appNames) + assert.Containsf(t, appNames, throttlerapp.ThrottlerStimulatorName.String(), "%+v", appNames) + assert.Equalf(t, 1, len(appNames), "%+v", appNames) + } + throttler.CheckByType(ctx, throttlerapp.OnlineDDLName.String(), "", flags, ThrottleCheckSelf) + select { + case <-ctx.Done(): + require.FailNow(t, "context expired before testing completed") + case <-time.After(time.Second): + // Due to stimulation rate limiting, we shouldn't see a 2nd CheckThrottler request. + appNames := tmClient.AppNames() + assert.Equalf(t, 1, len(appNames), "%+v", appNames) + } + cancel() // end test early + }() + }) +} diff --git a/go/vt/vttablet/tabletserver/throttle/throttlerapp/app.go b/go/vt/vttablet/tabletserver/throttle/throttlerapp/app.go index cc86ad0620b..7594df6c1b2 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttlerapp/app.go +++ b/go/vt/vttablet/tabletserver/throttle/throttlerapp/app.go @@ -42,8 +42,9 @@ func (n Name) Concatenate(other Name) Name { const ( // DefaultName is the app name used by vitess when app doesn't indicate its name - DefaultName Name = "default" - VitessName Name = "vitess" + DefaultName Name = "default" + VitessName Name = "vitess" + ThrottlerStimulatorName Name = "throttler-stimulator" TableGCName Name = "tablegc" OnlineDDLName Name = "online-ddl" @@ -73,7 +74,7 @@ var ( ) // ExemptFromChecks returns 'true' for apps that should skip the throttler checks. The throttler should -// always repsond with automated "OK" to those apps, without delay. These apps also do not cause a heartbeat renewal. +// always respond with automated "OK" to those apps, without delay. These apps also do not cause a heartbeat renewal. func ExemptFromChecks(appName string) bool { return exemptFromChecks[appName] } diff --git a/go/vt/vttablet/tabletserver/throttle/throttlerapp/app_test.go b/go/vt/vttablet/tabletserver/throttle/throttlerapp/app_test.go index bd14624f49b..c468009c793 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttlerapp/app_test.go +++ b/go/vt/vttablet/tabletserver/throttle/throttlerapp/app_test.go @@ -1,7 +1,17 @@ /* - Copyright 2017 GitHub Inc. +Copyright 2023 The Vitess Authors. - Licensed under MIT License. See https://github.com/github/freno/blob/master/LICENSE +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ package throttlerapp diff --git a/go/vt/vttablet/tabletserver/tx/api.go b/go/vt/vttablet/tabletserver/tx/api.go index a06923776c0..a392e530ffa 100644 --- a/go/vt/vttablet/tabletserver/tx/api.go +++ b/go/vt/vttablet/tabletserver/tx/api.go @@ -126,7 +126,7 @@ func (p *Properties) RecordQuery(query string) { func (p *Properties) InTransaction() bool { return p != nil } // String returns a printable version of the transaction -func (p *Properties) String(sanitize bool) string { +func (p *Properties) String(sanitize bool, parser *sqlparser.Parser) string { if p == nil { return "" } @@ -135,7 +135,7 @@ func (p *Properties) String(sanitize bool) string { sb := strings.Builder{} for _, query := range p.Queries { if sanitize { - query, _ = sqlparser.RedactSQLQuery(query) + query, _ = parser.RedactSQLQuery(query) } sb.WriteString(query) sb.WriteString(";") diff --git a/go/vt/vttablet/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go index fe8f1aa0b6e..7e8ecc06a75 100644 --- a/go/vt/vttablet/tabletserver/tx_engine.go +++ b/go/vt/vttablet/tabletserver/tx_engine.go @@ -97,7 +97,7 @@ func NewTxEngine(env tabletenv.Env) *TxEngine { config := env.Config() te := &TxEngine{ env: env, - shutdownGracePeriod: config.GracePeriods.ShutdownSeconds.Get(), + shutdownGracePeriod: config.GracePeriods.Shutdown, reservedConnStats: env.Exporter().NewTimings("ReservedConnections", "Reserved connections stats", "operation"), } limiter := txlimiter.New(env) @@ -124,8 +124,8 @@ func NewTxEngine(env tabletenv.Env) *TxEngine { // the TxPreparedPool. te.preparedPool = NewTxPreparedPool(config.TxPool.Size - 2) readPool := connpool.NewPool(env, "TxReadPool", tabletenv.ConnPoolConfig{ - Size: 3, - IdleTimeoutSeconds: env.Config().TxPool.IdleTimeoutSeconds, + Size: 3, + IdleTimeout: env.Config().TxPool.IdleTimeout, }) te.twoPC = NewTwoPC(readPool) te.state = NotServing diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go index 6ddf2f5a9d3..3c3a8a4eb4f 100644 --- a/go/vt/vttablet/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" "github.com/stretchr/testify/assert" @@ -43,12 +44,12 @@ func TestTxEngineClose(t *testing.T) { db := setUpQueryExecutorTest(t) defer db.Close() ctx := context.Background() - config := tabletenv.NewDefaultConfig() - config.DB = newDBConfigs(db) - config.TxPool.Size = 10 - _ = config.Oltp.TxTimeoutSeconds.Set("100ms") - _ = config.GracePeriods.ShutdownSeconds.Set("0s") - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(db) + cfg.TxPool.Size = 10 + cfg.Oltp.TxTimeout = 100 * time.Millisecond + cfg.GracePeriods.Shutdown = 0 + te := NewTxEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")) // Normal close. te.AcceptReadWrite() @@ -149,9 +150,9 @@ func TestTxEngineBegin(t *testing.T) { db := setUpQueryExecutorTest(t) defer db.Close() db.AddQueryPattern(".*", &sqltypes.Result{}) - config := tabletenv.NewDefaultConfig() - config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(db) + te := NewTxEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")) for _, exec := range []func() (int64, string, error){ func() (int64, string, error) { @@ -195,9 +196,9 @@ func TestTxEngineRenewFails(t *testing.T) { db := setUpQueryExecutorTest(t) defer db.Close() db.AddQueryPattern(".*", &sqltypes.Result{}) - config := tabletenv.NewDefaultConfig() - config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(db) + te := NewTxEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")) te.AcceptReadOnly() options := &querypb.ExecuteOptions{} connID, _, err := te.ReserveBegin(ctx, options, nil, nil) @@ -530,12 +531,12 @@ func TestWithInnerTests(outerT *testing.T) { } func setupTxEngine(db *fakesqldb.DB) *TxEngine { - config := tabletenv.NewDefaultConfig() - config.DB = newDBConfigs(db) - config.TxPool.Size = 10 - config.Oltp.TxTimeoutSeconds.Set("100ms") - _ = config.GracePeriods.ShutdownSeconds.Set("0s") - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(db) + cfg.TxPool.Size = 10 + cfg.Oltp.TxTimeout = 100 * time.Millisecond + cfg.GracePeriods.Shutdown = 0 + te := NewTxEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")) return te } @@ -565,9 +566,9 @@ func TestTxEngineFailReserve(t *testing.T) { db := setUpQueryExecutorTest(t) defer db.Close() db.AddQueryPattern(".*", &sqltypes.Result{}) - config := tabletenv.NewDefaultConfig() - config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.DB = newDBConfigs(db) + te := NewTxEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")) options := &querypb.ExecuteOptions{} _, err := te.Reserve(ctx, options, 0, nil) @@ -587,7 +588,7 @@ func TestTxEngineFailReserve(t *testing.T) { nonExistingID := int64(42) _, err = te.Reserve(ctx, options, nonExistingID, nil) - assert.EqualError(t, err, "transaction 42: not found") + assert.EqualError(t, err, "transaction 42: not found (potential transaction timeout)") txID, _, _, err := te.Begin(ctx, nil, 0, nil, options) require.NoError(t, err) diff --git a/go/vt/vttablet/tabletserver/tx_executor.go b/go/vt/vttablet/tabletserver/tx_executor.go index 9dc92506e84..93d18a200f9 100644 --- a/go/vt/vttablet/tabletserver/tx_executor.go +++ b/go/vt/vttablet/tabletserver/tx_executor.go @@ -235,7 +235,7 @@ func (txe *TxExecutor) ConcludeTransaction(dtid string) error { }) } -// ReadTransaction returns the metadata for the sepcified dtid. +// ReadTransaction returns the metadata for the specified dtid. func (txe *TxExecutor) ReadTransaction(dtid string) (*querypb.TransactionMetadata, error) { if !txe.te.twopcEnabled { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") diff --git a/go/vt/vttablet/tabletserver/tx_executor_test.go b/go/vt/vttablet/tabletserver/tx_executor_test.go index 2651eb2a6cc..c3949240147 100644 --- a/go/vt/vttablet/tabletserver/tx_executor_test.go +++ b/go/vt/vttablet/tabletserver/tx_executor_test.go @@ -78,7 +78,7 @@ func TestTxExecutorPrepareNotInTx(t *testing.T) { defer db.Close() defer tsv.StopService() err := txe.Prepare(0, "aa") - require.EqualError(t, err, "transaction 0: not found") + require.EqualError(t, err, "transaction 0: not found (potential transaction timeout)") } func TestTxExecutorPreparePoolFail(t *testing.T) { diff --git a/go/vt/vttablet/tabletserver/tx_pool.go b/go/vt/vttablet/tabletserver/tx_pool.go index f42e3c95408..52f356e0cca 100644 --- a/go/vt/vttablet/tabletserver/tx_pool.go +++ b/go/vt/vttablet/tabletserver/tx_pool.go @@ -130,7 +130,7 @@ func (tp *TxPool) Shutdown(ctx context.Context) { func (tp *TxPool) transactionKiller() { defer tp.env.LogError() for _, conn := range tp.scp.GetElapsedTimeout(vterrors.TxKillerRollback) { - log.Warningf("killing transaction (exceeded timeout: %v): %s", conn.timeout, conn.String(tp.env.Config().SanitizeLogMessages)) + log.Warningf("killing transaction (exceeded timeout: %v): %s", conn.timeout, conn.String(tp.env.Config().SanitizeLogMessages, tp.env.Environment().Parser())) switch { case conn.IsTainted(): conn.Close() diff --git a/go/vt/vttablet/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go index 3515310c481..aa2d5b69e89 100644 --- a/go/vt/vttablet/tabletserver/tx_pool_test.go +++ b/go/vt/vttablet/tabletserver/tx_pool_test.go @@ -24,7 +24,9 @@ import ( "time" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/dbconfigs" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" @@ -214,8 +216,10 @@ func primeTxPoolWithConnection(t *testing.T, ctx context.Context) (*fakesqldb.DB db := fakesqldb.New(t) txPool, _ := newTxPool() // Set the capacity to 1 to ensure that the db connection is reused. - txPool.scp.conns.SetCapacity(1) - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + err := txPool.scp.conns.SetCapacity(context.Background(), 1) + require.NoError(t, err) + params := dbconfigs.New(db.ConnParams()) + txPool.Open(params, params, params) // Run a query to trigger a database connection. That connection will be // reused by subsequent transactions. @@ -302,8 +306,7 @@ func TestTxPoolWaitTimeoutError(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 - env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().TxPool.TimeoutSeconds.Set("1s") + env.Config().TxPool.Timeout = time.Second // given db, txPool, _, closer := setupWithEnv(t, env) defer closer() @@ -374,7 +377,8 @@ func TestTxPoolGetConnRecentlyRemovedTransaction(t *testing.T) { assertErrorMatch(id, "pool closed") txPool, _ = newTxPool() - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + txPool.Open(params, params, params) conn1, _, _, _ = txPool.Begin(ctx, &querypb.ExecuteOptions{}, false, 0, nil, nil) id = conn1.ReservedID() @@ -389,7 +393,7 @@ func TestTxPoolGetConnRecentlyRemovedTransaction(t *testing.T) { env.Config().SetTxTimeoutForWorkload(1*time.Millisecond, querypb.ExecuteOptions_OLTP) env.Config().SetTxTimeoutForWorkload(1*time.Millisecond, querypb.ExecuteOptions_OLAP) txPool, _ = newTxPoolWithEnv(env) - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + txPool.Open(params, params, params) defer txPool.Close() conn1, _, _, err = txPool.Begin(ctx, &querypb.ExecuteOptions{}, false, 0, nil, nil) @@ -424,8 +428,7 @@ func TestTxTimeoutKillsTransactions(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 - env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") + env.Config().Oltp.TxTimeout = time.Second _, txPool, limiter, closer := setupWithEnv(t, env) defer closer() startingKills := txPool.env.Stats().KillCounters.Counts()["Transactions"] @@ -473,8 +476,7 @@ func TestTxTimeoutDoesNotKillShortLivedTransactions(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 - env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") + env.Config().Oltp.TxTimeout = time.Second _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingKills := txPool.env.Stats().KillCounters.Counts()["Transactions"] @@ -506,9 +508,8 @@ func TestTxTimeoutKillsOlapTransactions(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 - env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") - _ = env.Config().Olap.TxTimeoutSeconds.Set("2s") + env.Config().Oltp.TxTimeout = time.Second + env.Config().Olap.TxTimeout = 2 * time.Second _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingKills := txPool.env.Stats().KillCounters.Counts()["Transactions"] @@ -544,9 +545,8 @@ func TestTxTimeoutNotEnforcedForZeroLengthTimeouts(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 2 - env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("0s") - _ = env.Config().Olap.TxTimeoutSeconds.Set("0s") + env.Config().Oltp.TxTimeout = 0 + env.Config().Olap.TxTimeout = 0 _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingKills := txPool.env.Stats().KillCounters.Counts()["Transactions"] @@ -587,9 +587,8 @@ func TestTxTimeoutReservedConn(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 - env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") - _ = env.Config().Olap.TxTimeoutSeconds.Set("2s") + env.Config().Oltp.TxTimeout = time.Second + env.Config().Olap.TxTimeout = 2 * time.Second _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingRcKills := txPool.env.Stats().KillCounters.Counts()["ReservedConnection"] @@ -630,9 +629,8 @@ func TestTxTimeoutReusedReservedConn(t *testing.T) { env := newEnv("TabletServerTest") env.Config().TxPool.Size = 1 - env.Config().TxPool.MaxWaiters = 0 - _ = env.Config().Oltp.TxTimeoutSeconds.Set("1s") - _ = env.Config().Olap.TxTimeoutSeconds.Set("2s") + env.Config().Oltp.TxTimeout = time.Second + env.Config().Olap.TxTimeout = 2 * time.Second _, txPool, _, closer := setupWithEnv(t, env) defer closer() startingRcKills := txPool.env.Stats().KillCounters.Counts()["ReservedConnection"] @@ -812,15 +810,14 @@ func newTxPoolWithEnv(env tabletenv.Env) (*TxPool, *fakeLimiter) { } func newEnv(exporterName string) tabletenv.Env { - config := tabletenv.NewDefaultConfig() - config.TxPool.Size = 300 - _ = config.Oltp.TxTimeoutSeconds.Set("30s") - _ = config.TxPool.TimeoutSeconds.Set("40s") - config.TxPool.MaxWaiters = 500000 - _ = config.OltpReadPool.IdleTimeoutSeconds.Set("30s") - _ = config.OlapReadPool.IdleTimeoutSeconds.Set("30s") - _ = config.TxPool.IdleTimeoutSeconds.Set("30s") - env := tabletenv.NewEnv(config, exporterName) + cfg := tabletenv.NewDefaultConfig() + cfg.TxPool.Size = 300 + cfg.Oltp.TxTimeout = 30 * time.Second + cfg.TxPool.Timeout = 40 * time.Second + cfg.OltpReadPool.IdleTimeout = 30 * time.Second + cfg.OlapReadPool.IdleTimeout = 30 * time.Second + cfg.TxPool.IdleTimeout = 30 * time.Second + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, exporterName) return env } @@ -869,7 +866,8 @@ func setup(t *testing.T) (*fakesqldb.DB, *TxPool, *fakeLimiter, func()) { db.AddQueryPattern(".*", &sqltypes.Result{}) txPool, limiter := newTxPool() - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + txPool.Open(params, params, params) return db, txPool, limiter, func() { txPool.Close() @@ -882,7 +880,8 @@ func setupWithEnv(t *testing.T, env tabletenv.Env) (*fakesqldb.DB, *TxPool, *fak db.AddQueryPattern(".*", &sqltypes.Result{}) txPool, limiter := newTxPoolWithEnv(env) - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + txPool.Open(params, params, params) return db, txPool, limiter, func() { txPool.Close() diff --git a/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go b/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go index 3a4133b54d6..46c95193f6f 100644 --- a/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go +++ b/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go @@ -20,6 +20,7 @@ import ( "testing" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -38,16 +39,16 @@ func createCallers(username, principal, component, subcomponent string) (*queryp } func TestTxLimiter_DisabledAllowsAll(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.TxPool.Size = 10 - config.TransactionLimitPerUser = 0.1 - config.EnableTransactionLimit = false - config.EnableTransactionLimitDryRun = false - config.TransactionLimitByUsername = false - config.TransactionLimitByPrincipal = false - config.TransactionLimitByComponent = false - config.TransactionLimitBySubcomponent = false - limiter := New(tabletenv.NewEnv(config, "TabletServerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.TxPool.Size = 10 + cfg.TransactionLimitPerUser = 0.1 + cfg.EnableTransactionLimit = false + cfg.EnableTransactionLimitDryRun = false + cfg.TransactionLimitByUsername = false + cfg.TransactionLimitByPrincipal = false + cfg.TransactionLimitByComponent = false + cfg.TransactionLimitBySubcomponent = false + limiter := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")) im, ef := createCallers("", "", "", "") for i := 0; i < 5; i++ { if got, want := limiter.Get(im, ef), true; got != want { @@ -58,18 +59,18 @@ func TestTxLimiter_DisabledAllowsAll(t *testing.T) { } func TestTxLimiter_LimitsOnlyOffendingUser(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.TxPool.Size = 10 - config.TransactionLimitPerUser = 0.3 - config.EnableTransactionLimit = true - config.EnableTransactionLimitDryRun = false - config.TransactionLimitByUsername = true - config.TransactionLimitByPrincipal = false - config.TransactionLimitByComponent = false - config.TransactionLimitBySubcomponent = false + cfg := tabletenv.NewDefaultConfig() + cfg.TxPool.Size = 10 + cfg.TransactionLimitPerUser = 0.3 + cfg.EnableTransactionLimit = true + cfg.EnableTransactionLimitDryRun = false + cfg.TransactionLimitByUsername = true + cfg.TransactionLimitByPrincipal = false + cfg.TransactionLimitByComponent = false + cfg.TransactionLimitBySubcomponent = false // This should allow 3 slots to all users - newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest")) + newlimiter := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")) limiter, ok := newlimiter.(*Impl) if !ok { t.Fatalf("New returned limiter of unexpected type: got %T, want %T", newlimiter, limiter) @@ -117,25 +118,25 @@ func TestTxLimiter_LimitsOnlyOffendingUser(t *testing.T) { t.Errorf("Get(im1, ef1) after releasing: got %v, want %v", got, want) } - // Rejection coutner for user 1 should still be 1. + // Rejection count for user 1 should still be 1. if got, want := limiter.rejections.Counts()[key1], int64(1); got != want { t.Errorf("Rejections count for %s: got %d, want %d", key1, got, want) } } func TestTxLimiterDryRun(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.TxPool.Size = 10 - config.TransactionLimitPerUser = 0.3 - config.EnableTransactionLimit = true - config.EnableTransactionLimitDryRun = true - config.TransactionLimitByUsername = true - config.TransactionLimitByPrincipal = false - config.TransactionLimitByComponent = false - config.TransactionLimitBySubcomponent = false + cfg := tabletenv.NewDefaultConfig() + cfg.TxPool.Size = 10 + cfg.TransactionLimitPerUser = 0.3 + cfg.EnableTransactionLimit = true + cfg.EnableTransactionLimitDryRun = true + cfg.TransactionLimitByUsername = true + cfg.TransactionLimitByPrincipal = false + cfg.TransactionLimitByComponent = false + cfg.TransactionLimitBySubcomponent = false // This should allow 3 slots to all users - newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest")) + newlimiter := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TabletServerTest")) limiter, ok := newlimiter.(*Impl) if !ok { t.Fatalf("New returned limiter of unexpected type: got %T, want %T", newlimiter, limiter) diff --git a/go/vt/vttablet/tabletserver/txlogz.go b/go/vt/vttablet/tabletserver/txlogz.go index 04a2147a7e0..8d1b88c8c85 100644 --- a/go/vt/vttablet/tabletserver/txlogz.go +++ b/go/vt/vttablet/tabletserver/txlogz.go @@ -31,7 +31,6 @@ import ( "vitess.io/vitess/go/vt/logz" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -72,10 +71,6 @@ var ( `)) ) -func init() { - servenv.HTTPHandleFunc("/txlogz", txlogzHandler) -} - // txlogzHandler serves a human readable snapshot of the // current transaction log. // Endpoint: /txlogz?timeout=%d&limit=%d diff --git a/go/vt/vttablet/tabletserver/txserializer/tx_serializer.go b/go/vt/vttablet/tabletserver/txserializer/tx_serializer.go index ec1ab47758c..10428ed67c7 100644 --- a/go/vt/vttablet/tabletserver/txserializer/tx_serializer.go +++ b/go/vt/vttablet/tabletserver/txserializer/tx_serializer.go @@ -51,7 +51,7 @@ import ( // - Waiting transactions are unblocked if their context is done. // - Both the local queue (per row range) and global queue (whole process) are // limited to avoid that queued transactions can consume the full capacity -// of vttablet. This is important if the capaciy is finite. For example, the +// of vttablet. This is important if the capacity is finite. For example, the // number of RPCs in flight could be limited by the RPC subsystem. type TxSerializer struct { env tabletenv.Env @@ -151,7 +151,7 @@ func (txs *TxSerializer) Wait(ctx context.Context, key, table string) (done Done if err != nil { if waited { // Waiting failed early e.g. due a canceled context and we did NOT get the - // slot. Call "done" now because we don'txs return it to the caller. + // slot. Call "done" now because we do not return it to the caller. txs.unlockLocked(key, false /* returnSlot */) } return nil, waited, err @@ -273,15 +273,18 @@ func (txs *TxSerializer) unlockLocked(key string, returnSlot bool) { delete(txs.queues, key) if q.max > 1 { + var formattedKey = key var logMsg string + if txs.env.Config().SanitizeLogMessages { - logMsg = fmt.Sprintf("%v simultaneous transactions (%v in total) for the same row range (%v) would have been queued.", q.max, q.count, txs.sanitizeKey(key)) - } else { - logMsg = fmt.Sprintf("%v simultaneous transactions (%v in total) for the same row range (%v) would have been queued.", q.max, q.count, key) + formattedKey = txs.sanitizeKey(key) } + if txs.dryRun { + logMsg = fmt.Sprintf("%v simultaneous transactions (%v in total) for the same row range (%v) would have been queued.", q.max, q.count, formattedKey) txs.logDryRun.Infof(logMsg) } else { + logMsg = fmt.Sprintf("%v simultaneous transactions (%v in total) for the same row range (%v) have been queued.", q.max, q.count, formattedKey) txs.log.Infof(logMsg) } } diff --git a/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go b/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go index d495800e141..e1b4b5a7612 100644 --- a/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go +++ b/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go @@ -17,6 +17,7 @@ limitations under the License. package txserializer import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -25,9 +26,8 @@ import ( "testing" "time" - "context" - "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -44,11 +44,11 @@ func resetVariables(txs *TxSerializer) { } func TestTxSerializer_NoHotRow(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.MaxQueueSize = 1 - config.HotRowProtection.MaxGlobalQueueSize = 1 - config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.MaxQueueSize = 1 + cfg.HotRowProtection.MaxGlobalQueueSize = 1 + cfg.HotRowProtection.MaxConcurrency = 5 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) resetVariables(txs) done, waited, err := txs.Wait(context.Background(), "t1 where1", "t1") @@ -76,11 +76,11 @@ func TestTxSerializerRedactDebugUI(t *testing.T) { streamlog.SetRedactDebugUIQueries(false) }() - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.MaxQueueSize = 1 - config.HotRowProtection.MaxGlobalQueueSize = 1 - config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.MaxQueueSize = 1 + cfg.HotRowProtection.MaxGlobalQueueSize = 1 + cfg.HotRowProtection.MaxConcurrency = 5 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) resetVariables(txs) done, waited, err := txs.Wait(context.Background(), "t1 where1", "t1") @@ -103,8 +103,8 @@ func TestTxSerializerRedactDebugUI(t *testing.T) { } func TestKeySanitization(t *testing.T) { - config := tabletenv.NewDefaultConfig() - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) // with a where clause key := "t1 where c1='foo'" want := "t1 ... [REDACTED]" @@ -122,11 +122,11 @@ func TestKeySanitization(t *testing.T) { } func TestTxSerializer(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.MaxQueueSize = 2 - config.HotRowProtection.MaxGlobalQueueSize = 3 - config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.MaxQueueSize = 2 + cfg.HotRowProtection.MaxGlobalQueueSize = 3 + cfg.HotRowProtection.MaxConcurrency = 1 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) resetVariables(txs) // tx1. @@ -195,11 +195,11 @@ func TestTxSerializer(t *testing.T) { func TestTxSerializer_ConcurrentTransactions(t *testing.T) { // Allow up to 2 concurrent transactions per hot row. - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.MaxQueueSize = 3 - config.HotRowProtection.MaxGlobalQueueSize = 3 - config.HotRowProtection.MaxConcurrency = 2 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.MaxQueueSize = 3 + cfg.HotRowProtection.MaxGlobalQueueSize = 3 + cfg.HotRowProtection.MaxConcurrency = 2 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) resetVariables(txs) // tx1. @@ -318,11 +318,11 @@ func testHTTPHandler(txs *TxSerializer, count int, redacted bool) error { // tx1 and tx2 are allowed to run concurrently while tx3 and tx4 are queued. // tx3 will get canceled and tx4 will be unblocked once tx1 is done. func TestTxSerializerCancel(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.MaxQueueSize = 4 - config.HotRowProtection.MaxGlobalQueueSize = 4 - config.HotRowProtection.MaxConcurrency = 2 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.MaxQueueSize = 4 + cfg.HotRowProtection.MaxGlobalQueueSize = 4 + cfg.HotRowProtection.MaxConcurrency = 2 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) resetVariables(txs) // tx3 and tx4 will record their number once they're done waiting. @@ -418,12 +418,12 @@ func TestTxSerializerCancel(t *testing.T) { // TestTxSerializerDryRun verifies that the dry-run mode does not serialize // the two concurrent transactions for the same key. func TestTxSerializerDryRun(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.Mode = tabletenv.Dryrun - config.HotRowProtection.MaxQueueSize = 1 - config.HotRowProtection.MaxGlobalQueueSize = 2 - config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.Mode = tabletenv.Dryrun + cfg.HotRowProtection.MaxQueueSize = 1 + cfg.HotRowProtection.MaxGlobalQueueSize = 2 + cfg.HotRowProtection.MaxConcurrency = 1 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) resetVariables(txs) // tx1. @@ -489,11 +489,11 @@ func TestTxSerializerDryRun(t *testing.T) { // reject transactions although they may succeed within the txpool constraints // and RPC deadline. func TestTxSerializerGlobalQueueOverflow(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.MaxQueueSize = 1 - config.HotRowProtection.MaxGlobalQueueSize = 1 - config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.MaxQueueSize = 1 + cfg.HotRowProtection.MaxGlobalQueueSize = 1 + cfg.HotRowProtection.MaxConcurrency = 1 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) // tx1. done1, waited1, err1 := txs.Wait(context.Background(), "t1 where1", "t1") @@ -530,22 +530,22 @@ func TestTxSerializerGlobalQueueOverflow(t *testing.T) { } func TestTxSerializerPending(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.MaxQueueSize = 1 - config.HotRowProtection.MaxGlobalQueueSize = 1 - config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.MaxQueueSize = 1 + cfg.HotRowProtection.MaxGlobalQueueSize = 1 + cfg.HotRowProtection.MaxConcurrency = 1 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) if got, want := txs.Pending("t1 where1"), 0; got != want { t.Errorf("there should be no pending transaction: got = %v, want = %v", got, want) } } func BenchmarkTxSerializer_NoHotRow(b *testing.B) { - config := tabletenv.NewDefaultConfig() - config.HotRowProtection.MaxQueueSize = 1 - config.HotRowProtection.MaxGlobalQueueSize = 1 - config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + cfg := tabletenv.NewDefaultConfig() + cfg.HotRowProtection.MaxQueueSize = 1 + cfg.HotRowProtection.MaxGlobalQueueSize = 1 + cfg.HotRowProtection.MaxConcurrency = 5 + txs := New(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "TxSerializerTest")) b.ResetTimer() diff --git a/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go index 1e503dc7020..ecc6688fb9d 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go +++ b/go/vt/vttablet/tabletserver/txthrottler/mock_healthcheck_test.go @@ -59,6 +59,14 @@ func (m *MockHealthCheck) CacheStatus() discovery.TabletsCacheStatusList { return ret0 } +// HealthyStatus mocks base method. +func (m *MockHealthCheck) HealthyStatus() discovery.TabletsCacheStatusList { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HealthyStatus") + ret0, _ := ret[0].(discovery.TabletsCacheStatusList) + return ret0 +} + // CacheStatus indicates an expected call of CacheStatus. func (mr *MockHealthCheckMockRecorder) CacheStatus() *gomock.Call { mr.mock.ctrl.T.Helper() @@ -202,9 +210,9 @@ func (mr *MockHealthCheckMockRecorder) Subscribe() *gomock.Call { } // TabletConnection mocks base method. -func (m *MockHealthCheck) TabletConnection(arg0 *topodata.TabletAlias, arg1 *query.Target) (queryservice.QueryService, error) { +func (m *MockHealthCheck) TabletConnection(arg0 context.Context, arg1 *topodata.TabletAlias, arg2 *query.Target) (queryservice.QueryService, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TabletConnection", arg0, arg1) + ret := m.ctrl.Call(m, "TabletConnection", arg0, arg1, arg2) ret0, _ := ret[0].(queryservice.QueryService) ret1, _ := ret[1].(error) return ret0, ret1 diff --git a/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go index 3ffb3a78a1a..aeb75d258a3 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go +++ b/go/vt/vttablet/tabletserver/txthrottler/mock_throttler_test.go @@ -12,6 +12,7 @@ import ( discovery "vitess.io/vitess/go/vt/discovery" throttlerdata "vitess.io/vitess/go/vt/proto/throttlerdata" + topodata "vitess.io/vitess/go/vt/proto/topodata" ) // MockThrottlerInterface is a mock of ThrottlerInterface interface. @@ -63,6 +64,20 @@ func (mr *MockThrottlerInterfaceMockRecorder) GetConfiguration() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfiguration", reflect.TypeOf((*MockThrottlerInterface)(nil).GetConfiguration)) } +// MaxLag mocks base method. +func (m *MockThrottlerInterface) MaxLag(tabletType topodata.TabletType) uint32 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MaxLag", tabletType) + ret0, _ := ret[0].(uint32) + return ret0 +} + +// MaxLag indicates an expected call of LastMaxLagNotIgnoredForTabletType. +func (mr *MockThrottlerInterfaceMockRecorder) MaxLag(tabletType interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxLag", reflect.TypeOf((*MockThrottlerInterface)(nil).MaxLag), tabletType) +} + // MaxRate mocks base method. func (m *MockThrottlerInterface) MaxRate() int64 { m.ctrl.T.Helper() diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go index 92976bbedf2..7cb774663a4 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -18,10 +18,11 @@ package txthrottler import ( "context" - "math/rand" + "math/rand/v2" "reflect" "strings" "sync" + "sync/atomic" "time" "vitess.io/vitess/go/stats" @@ -81,14 +82,7 @@ type ThrottlerInterface interface { GetConfiguration() *throttlerdatapb.Configuration UpdateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error ResetConfiguration() -} - -// TopologyWatcherInterface defines the public interface that is implemented by -// discovery.LegacyTopologyWatcher. It is only used here to allow mocking out -// go/vt/discovery.LegacyTopologyWatcher. -type TopologyWatcherInterface interface { - Start() - Stop() + MaxLag(tabletType topodatapb.TabletType) uint32 } // TxThrottlerName is the name the wrapped go/vt/throttler object will be registered with @@ -175,6 +169,10 @@ type txThrottlerStateImpl struct { // tabletTypes stores the tablet types for throttling tabletTypes map[topodatapb.TabletType]bool + + maxLag int64 + done chan bool + waitForTermination sync.WaitGroup } // NewTxThrottler tries to construct a txThrottler from the relevant @@ -253,7 +251,7 @@ func (t *txThrottler) Throttle(priority int, workload string) (result bool) { // Throttle according to both what the throttler state says and the priority. Workloads with lower priority value // are less likely to be throttled. - result = t.state.throttle() && rand.Intn(sqlparser.MaxPriorityValue) < priority + result = rand.IntN(sqlparser.MaxPriorityValue) < priority && t.state.throttle() t.requestsTotal.Add(workload, 1) if result { @@ -292,6 +290,7 @@ func newTxThrottlerState(txThrottler *txThrottler, config *tabletenv.TabletConfi tabletTypes: tabletTypes, throttler: t, txThrottler: txThrottler, + done: make(chan bool, 1), } // get cells from topo if none defined in tabletenv config @@ -306,6 +305,8 @@ func newTxThrottlerState(txThrottler *txThrottler, config *tabletenv.TabletConfi state.stopHealthCheck = cancel state.initHealthCheckStream(txThrottler.topoServer, target) go state.healthChecksProcessor(ctx, txThrottler.topoServer, target) + state.waitForTermination.Add(1) + go state.updateMaxLag() return state, nil } @@ -364,7 +365,35 @@ func (ts *txThrottlerStateImpl) throttle() bool { // Serialize calls to ts.throttle.Throttle() ts.throttleMu.Lock() defer ts.throttleMu.Unlock() - return ts.throttler.Throttle(0 /* threadId */) > 0 + + maxLag := atomic.LoadInt64(&ts.maxLag) + + return maxLag > ts.config.TxThrottlerConfig.TargetReplicationLagSec && + ts.throttler.Throttle(0 /* threadId */) > 0 +} + +func (ts *txThrottlerStateImpl) updateMaxLag() { + defer ts.waitForTermination.Done() + // We use half of the target lag to ensure we have enough resolution to see changes in lag below that value + ticker := time.NewTicker(time.Duration(ts.config.TxThrottlerConfig.TargetReplicationLagSec/2) * time.Second) + defer ticker.Stop() +outerloop: + for { + select { + case <-ticker.C: + var maxLag uint32 + + for tabletType := range ts.tabletTypes { + maxLagPerTabletType := ts.throttler.MaxLag(tabletType) + if maxLagPerTabletType > maxLag { + maxLag = maxLagPerTabletType + } + } + atomic.StoreInt64(&ts.maxLag, int64(maxLag)) + case <-ts.done: + break outerloop + } + } } func (ts *txThrottlerStateImpl) deallocateResources() { @@ -372,6 +401,8 @@ func (ts *txThrottlerStateImpl) deallocateResources() { ts.closeHealthCheckStream() ts.healthCheck = nil + ts.done <- true + ts.waitForTermination.Wait() // After ts.healthCheck is closed txThrottlerStateImpl.StatsUpdate() is guaranteed not // to be executing, so we can safely close the throttler. ts.throttler.Close() diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go index 268a37437d9..fe352cf96f4 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and @@ -22,6 +22,7 @@ package txthrottler import ( "context" + "sync/atomic" "testing" "time" @@ -33,6 +34,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -40,16 +42,16 @@ import ( ) func TestDisabledThrottler(t *testing.T) { - config := tabletenv.NewDefaultConfig() - config.EnableTxThrottler = false - env := tabletenv.NewEnv(config, t.Name()) + cfg := tabletenv.NewDefaultConfig() + cfg.EnableTxThrottler = false + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, t.Name()) throttler := NewTxThrottler(env, nil) throttler.InitDBConfig(&querypb.Target{ Keyspace: "keyspace", Shard: "shard", }) assert.Nil(t, throttler.Open()) - assert.False(t, throttler.Throttle(0, "some_workload")) + assert.False(t, throttler.Throttle(0, "some-workload")) throttlerImpl, _ := throttler.(*txThrottler) assert.Zero(t, throttlerImpl.throttlerRunning.Get()) throttler.Close() @@ -79,34 +81,51 @@ func TestEnabledThrottler(t *testing.T) { return mockThrottler, nil } - call0 := mockThrottler.EXPECT().UpdateConfiguration(gomock.Any(), true /* copyZeroValues */) - call1 := mockThrottler.EXPECT().Throttle(0) - call1.Return(0 * time.Second) + var calls []*gomock.Call + + call := mockThrottler.EXPECT().UpdateConfiguration(gomock.Any(), true /* copyZeroValues */) + calls = append(calls, call) + + // 1 + call = mockThrottler.EXPECT().Throttle(0) + call.Return(0 * time.Second) + calls = append(calls, call) + tabletStats := &discovery.TabletHealth{ Target: &querypb.Target{ Cell: "cell1", TabletType: topodatapb.TabletType_REPLICA, }, } - call2 := mockThrottler.EXPECT().RecordReplicationLag(gomock.Any(), tabletStats) - call3 := mockThrottler.EXPECT().Throttle(0) - call3.Return(1 * time.Second) - call4 := mockThrottler.EXPECT().Throttle(0) - call4.Return(1 * time.Second) - calllast := mockThrottler.EXPECT().Close() + call = mockThrottler.EXPECT().RecordReplicationLag(gomock.Any(), tabletStats) + calls = append(calls, call) + + // 2 + call = mockThrottler.EXPECT().Throttle(0) + call.Return(1 * time.Second) + calls = append(calls, call) + + // 3 + // Nothing gets mocked here because the order of evaluation in txThrottler.Throttle() evaluates first + // whether the priority allows for throttling or not, so no need to mock calls in mockThrottler.Throttle() + + // 4 + // Nothing gets mocked here because the order of evaluation in txThrottlerStateImpl.Throttle() evaluates first + // whether there is lag or not, so no call to the underlying mockThrottler is issued. - call1.After(call0) - call2.After(call1) - call3.After(call2) - call4.After(call3) - calllast.After(call4) + call = mockThrottler.EXPECT().Close() + calls = append(calls, call) - config := tabletenv.NewDefaultConfig() - config.EnableTxThrottler = true - config.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA} + for i := 1; i < len(calls); i++ { + calls[i].After(calls[i-1]) + } + + cfg := tabletenv.NewDefaultConfig() + cfg.EnableTxThrottler = true + cfg.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA} - env := tabletenv.NewEnv(config, t.Name()) + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, t.Name()) throttler := NewTxThrottler(env, ts) throttlerImpl, _ := throttler.(*txThrottler) assert.NotNil(t, throttlerImpl) @@ -117,13 +136,20 @@ func TestEnabledThrottler(t *testing.T) { }) assert.Nil(t, throttlerImpl.Open()) - throttlerStateImpl := throttlerImpl.state.(*txThrottlerStateImpl) + throttlerStateImpl, ok := throttlerImpl.state.(*txThrottlerStateImpl) + assert.True(t, ok) assert.Equal(t, map[topodatapb.TabletType]bool{topodatapb.TabletType_REPLICA: true}, throttlerStateImpl.tabletTypes) assert.Equal(t, int64(1), throttlerImpl.throttlerRunning.Get()) - assert.False(t, throttlerImpl.Throttle(100, "some_workload")) - assert.Equal(t, int64(1), throttlerImpl.requestsTotal.Counts()["some_workload"]) - assert.Zero(t, throttlerImpl.requestsThrottled.Counts()["some_workload"]) + // Stop the go routine that keeps updating the cached shard's max lag to prevent it from changing the value in a + // way that will interfere with how we manipulate that value in our tests to evaluate different cases: + throttlerStateImpl.done <- true + + // 1 should not throttle due to return value of underlying Throttle(), despite high lag + atomic.StoreInt64(&throttlerStateImpl.maxLag, 20) + assert.False(t, throttlerImpl.Throttle(100, "some-workload")) + assert.Equal(t, int64(1), throttlerImpl.requestsTotal.Counts()["some-workload"]) + assert.Zero(t, throttlerImpl.requestsThrottled.Counts()["some-workload"]) throttlerImpl.state.StatsUpdate(tabletStats) // This calls replication lag thing assert.Equal(t, map[string]int64{"cell1.REPLICA": 1}, throttlerImpl.healthChecksReadTotal.Counts()) @@ -139,16 +165,23 @@ func TestEnabledThrottler(t *testing.T) { assert.Equal(t, map[string]int64{"cell1.REPLICA": 1, "cell2.RDONLY": 1}, throttlerImpl.healthChecksReadTotal.Counts()) assert.Equal(t, map[string]int64{"cell1.REPLICA": 1}, throttlerImpl.healthChecksRecordedTotal.Counts()) - // The second throttle call should reject. - assert.True(t, throttlerImpl.Throttle(100, "some_workload")) - assert.Equal(t, int64(2), throttlerImpl.requestsTotal.Counts()["some_workload"]) - assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some_workload"]) + // 2 should throttle due to return value of underlying Throttle(), high lag & priority = 100 + assert.True(t, throttlerImpl.Throttle(100, "some-workload")) + assert.Equal(t, int64(2), throttlerImpl.requestsTotal.Counts()["some-workload"]) + assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some-workload"]) - // This call should not throttle due to priority. Check that's the case and counters agree. - assert.False(t, throttlerImpl.Throttle(0, "some_workload")) - assert.Equal(t, int64(3), throttlerImpl.requestsTotal.Counts()["some_workload"]) - assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some_workload"]) - throttlerImpl.Close() + // 3 should not throttle despite return value of underlying Throttle() and high lag, due to priority = 0 + assert.False(t, throttlerImpl.Throttle(0, "some-workload")) + assert.Equal(t, int64(3), throttlerImpl.requestsTotal.Counts()["some-workload"]) + assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some-workload"]) + + // 4 should not throttle despite return value of underlying Throttle() and priority = 100, due to low lag + atomic.StoreInt64(&throttlerStateImpl.maxLag, 1) + assert.False(t, throttler.Throttle(100, "some-workload")) + assert.Equal(t, int64(4), throttlerImpl.requestsTotal.Counts()["some-workload"]) + assert.Equal(t, int64(1), throttlerImpl.requestsThrottled.Counts()["some-workload"]) + + throttler.Close() assert.Zero(t, throttlerImpl.throttlerRunning.Get()) } @@ -168,8 +201,8 @@ func TestFetchKnownCells(t *testing.T) { } func TestDryRunThrottler(t *testing.T) { - config := tabletenv.NewDefaultConfig() - env := tabletenv.NewEnv(config, t.Name()) + cfg := tabletenv.NewDefaultConfig() + env := tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, t.Name()) testCases := []struct { Name string diff --git a/go/vt/vttablet/tabletserver/vstreamer/copy.go b/go/vt/vttablet/tabletserver/vstreamer/copy.go index 585be09dec3..2f54ea235fe 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/copy.go +++ b/go/vt/vttablet/tabletserver/vstreamer/copy.go @@ -258,6 +258,13 @@ func (uvs *uvstreamer) copyTable(ctx context.Context, tableName string) error { Fields: uvs.fields, Keyspace: uvs.vse.keyspace, Shard: uvs.vse.shard, + // In the copy phase the values for ENUM and SET fields are always strings. + // We are including this extra uint8 in the message even though there may + // not be an ENUM or SET column in the table because we only have one field + // event for each batch of ROWs being copied so it's negligible overhead + // and less costly and intrusive than iterating over the fields to see if + // we do indeed have any ENUM or SET columns in the table. + EnumSetStringValues: true, } if err := uvs.sendFieldEvent(ctx, rows.Gtid, fieldEvent); err != nil { log.Infof("sendFieldEvent returned error %v", err) diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go index 2862601bf1b..501b3708eed 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go @@ -28,17 +28,16 @@ import ( "sync/atomic" "time" - "vitess.io/vitess/go/vt/dbconfigs" - "vitess.io/vitess/go/vt/mysqlctl" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/acl" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -105,6 +104,8 @@ type Engine struct { throttlerClient *throttle.Client } +const throttledLoggerInterval = 5 * time.Minute + // NewEngine creates a new Engine. // Initialization sequence is: NewEngine->InitDBConfig->Open. // Open and Close can be called multiple times and are idempotent. @@ -149,6 +150,10 @@ func NewEngine(env tabletenv.Env, ts srvtopo.Server, se *schema.Engine, lagThrot return vse } +func (vse *Engine) GetTabletInfo() string { + return fmt.Sprintf("%s/%s/%s", vse.cell, vse.keyspace, vse.shard) +} + // InitDBConfig initializes the target parameters for the Engine. func (vse *Engine) InitDBConfig(keyspace, shard string) { vse.keyspace = keyspace @@ -228,7 +233,6 @@ func (vse *Engine) validateBinlogRowImage(ctx context.Context, db dbconfigs.Conn // Stream starts a new stream. // This streams events from the binary logs func (vse *Engine) Stream(ctx context.Context, startPos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, throttlerApp throttlerapp.Name, send func([]*binlogdatapb.VEvent) error) error { - if err := vse.validateBinlogRowImage(ctx, vse.se.GetDBConnector()); err != nil { return err } @@ -432,7 +436,7 @@ func (vse *Engine) setWatch() { } var vschema *vindexes.VSchema if v != nil { - vschema = vindexes.BuildVSchema(v) + vschema = vindexes.BuildVSchema(v, vse.env.Environment().Parser()) if err != nil { log.Errorf("Error building vschema: %v", err) vse.vschemaErrors.Add(1) @@ -552,7 +556,7 @@ func (vse *Engine) getInnoDBTrxHistoryLen(ctx context.Context, db dbconfigs.Conn return histLen } -// getMySQLReplicationLag attempts to get the seconds_behind_master value. +// getMySQLReplicationLag attempts to get the seconds_behind_source value. // If the value cannot be determined for any reason then -1 is returned, which // means "unknown" or "irrelevant" (meaning it's not actively replicating). func (vse *Engine) getMySQLReplicationLag(ctx context.Context, db dbconfigs.Connector) int64 { @@ -563,12 +567,11 @@ func (vse *Engine) getMySQLReplicationLag(ctx context.Context, db dbconfigs.Conn } defer conn.Close() - res, err := conn.ExecuteFetch(replicaLagQuery, 1, true) - if err != nil || len(res.Rows) != 1 || res.Rows[0] == nil { + status, err := conn.ShowReplicationStatus() + if err != nil { return lagSecs } - row := res.Named().Row() - return row.AsInt64("Seconds_Behind_Master", -1) + return int64(status.ReplicationLagSeconds) } // getMySQLEndpoint returns the host:port value for the vstreamer (MySQL) instance @@ -590,9 +593,13 @@ func (vse *Engine) getMySQLEndpoint(ctx context.Context, db dbconfigs.Connector) // mapPKEquivalentCols gets a PK equivalent from mysqld for the table // and maps the column names to field indexes in the MinimalTable struct. -func (vse *Engine) mapPKEquivalentCols(ctx context.Context, table *binlogdatapb.MinimalTable) ([]int, error) { - mysqld := mysqlctl.NewMysqld(vse.env.Config().DB) - pkeColNames, indexName, err := mysqld.GetPrimaryKeyEquivalentColumns(ctx, vse.env.Config().DB.DBName, table.Name) +func (vse *Engine) mapPKEquivalentCols(ctx context.Context, db dbconfigs.Connector, table *binlogdatapb.MinimalTable) ([]int, error) { + conn, err := db.Connect(ctx) + if err != nil { + return nil, err + } + defer conn.Close() + pkeColNames, indexName, err := mysqlctl.GetPrimaryKeyEquivalentColumns(ctx, conn.ExecuteFetch, vse.env.Config().DB.DBName, table.Name) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine_test.go b/go/vt/vttablet/tabletserver/vstreamer/engine_test.go index 36bcc8f181a..b0b31e256cc 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine_test.go @@ -141,7 +141,8 @@ func TestUpdateVSchema(t *testing.T) { } } }, - "shard_routing_rules": null + "shard_routing_rules": null, + "keyspace_routing_rules": null }` b, err := json.MarshalIndent(engine.vschema(), "", " ") if err != nil { @@ -186,6 +187,11 @@ func TestVStreamerWaitForMySQL(t *testing.T) { "1000", ) sbmres := sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "Seconds_Behind_Source", + "int64"), + "10", + ) + sbmlegacyres := sqltypes.MakeTestResult(sqltypes.MakeTestFields( "Seconds_Behind_Master", "int64"), "10", @@ -241,9 +247,10 @@ func TestVStreamerWaitForMySQL(t *testing.T) { testDB.AddQuery(hostQuery, hostres) testDB.AddQuery(trxHistoryLenQuery, thlres) testDB.AddQuery(replicaLagQuery, sbmres) + testDB.AddQuery(legacyLagQuery, sbmlegacyres) for _, tt := range tests { - tt.fields.cp = testDB.ConnParams() + tt.fields.cp = dbconfigs.New(testDB.ConnParams()) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() t.Run(tt.name, func(t *testing.T) { diff --git a/go/vt/vttablet/tabletserver/vstreamer/fuzz.go b/go/vt/vttablet/tabletserver/vstreamer/fuzz.go index 90387e97f2c..83369f27d5e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/fuzz.go +++ b/go/vt/vttablet/tabletserver/vstreamer/fuzz.go @@ -24,6 +24,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -65,7 +66,7 @@ func Fuzz(data []byte) int { if err != nil { return -1 } - _, _ = buildPlan(t1, testLocalVSchema, &binlogdatapb.Filter{ + _, _ = buildPlan(t1, testLocalVSchema, sqlparser.NewTestParser(), &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{ {Match: str1, Filter: str2}, }, diff --git a/go/vt/vttablet/tabletserver/vstreamer/helper_event_test.go b/go/vt/vttablet/tabletserver/vstreamer/helper_event_test.go new file mode 100644 index 00000000000..49dabae3973 --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/helper_event_test.go @@ -0,0 +1,786 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +// This file contains the test framework for testing the event generation logic in vstreamer. +// The test framework is designed to be used in the following way: +// 1. Define a TestSpec with the following fields: +// - ddls: a list of create table statements for the tables to be used in the test +// - tests: a list of test cases, each test case is a list of TestQuery +// - options: test-specific options, if any +// 2. Call ts.Init() to initialize the test. +// 3. Call ts.Run() to run the test. This will run the queries and validate the events. +// 4. Call ts.Close() to clean up the tables created in the test. +// The test framework will take care of creating the tables, running the queries, and validating the events for +// simpler cases. For more complex cases, the test framework provides hooks to customize the event generation. + +// Note: To simplify the initial implementation, the test framework is designed to be used in the vstreamer package only. +// It makes several assumptions about how the test cases are written. For example, queries are expected to +// use single quotes for string literals, for example: +// `"insert into t1 values (1, 'blob1', 'aaa')"`. +// The test framework will not work if the queries use double quotes for string literals at the moment. + +import ( + "fmt" + "slices" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/collations/colldata" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/schemadiff" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +const ( + lengthInt = 11 + lengthBlob = 65535 + lengthText = 262140 + + // We have to hardcode the set lengths as we don't yet have an encoded way + // to calculate the length for the TableMap event, + // This is the expected length of the only SET column in the test schema. + lengthSet = 204 + // This is the expected length of the only SET column using a binary collation + // in the test schema. + lengthSetBinary = 428 + lengthJSON = 4294967295 +) + +var ( + // noEvents is used to indicate that a query is expected to generate no events. + noEvents = []TestRowEvent{} +) + +// TestColumn has all the attributes of a column required for the test cases. +type TestColumn struct { + name, dataType, colType string + len int64 + collationID collations.ID + dataTypeLowered string + skip bool + collationName string +} + +// TestFieldEvent has all the attributes of a table required for creating a field event. +type TestFieldEvent struct { + table, db string + cols []*TestColumn + enumSetStrings bool +} + +func (tfe *TestFieldEvent) String() string { + var fe binlogdatapb.FieldEvent + var field *query.Field + fe.TableName = tfe.table + fe.EnumSetStringValues = tfe.enumSetStrings + for _, col := range tfe.cols { + if col.skip { + continue + } + if col.name == "keyspace_id" { + field = &query.Field{ + Name: col.name, + Type: getQueryType(col.dataType), + Charset: uint32(col.collationID), + } + } else { + field = &query.Field{ + Name: col.name, + Type: getQueryType(col.dataType), + Table: tfe.table, + OrgTable: tfe.table, + Database: tfe.db, + OrgName: col.name, + ColumnLength: uint32(col.len), + Charset: uint32(col.collationID), + ColumnType: col.colType, + } + } + fe.Fields = append(fe.Fields, field) + + } + if !ignoreKeyspaceShardInFieldAndRowEvents { + fe.Keyspace = testenv.DBName + fe.Shard = testenv.DefaultShard + } + ev := &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_FIELD, + FieldEvent: &fe, + } + return ev.String() +} + +// TestQuery represents a database query and the expected events it generates. +type TestQuery struct { + query string + events []TestRowEvent +} + +// TestRowChange represents the before and after state of a row due to a dml +type TestRowChange struct { + before []string + after []string +} + +// TestRowEventSpec is used for defining a custom row event. +type TestRowEventSpec struct { + table string + changes []TestRowChange + keyspace string + shard string +} + +// Generates a string representation for a custom row event. +func (s *TestRowEventSpec) String() string { + ev := &binlogdatapb.RowEvent{ + TableName: s.table, + } + var rowChanges []*binlogdatapb.RowChange + if s.changes != nil && len(s.changes) > 0 { + for _, c := range s.changes { + rowChange := binlogdatapb.RowChange{} + if c.before != nil && len(c.before) > 0 { + rowChange.Before = &query.Row{} + for _, val := range c.before { + if val == sqltypes.NullStr { + val = "" + } + rowChange.Before.Lengths = append(rowChange.Before.Lengths, int64(len(val))) + rowChange.Before.Values = append(rowChange.Before.Values, []byte(val)...) + } + } + if c.after != nil && len(c.after) > 0 { + rowChange.After = &query.Row{} + for i, val := range c.after { + if val == sqltypes.NullStr { + val = "" + } + l := int64(len(val)) + if strings.HasPrefix(val, "\x00") { + // The null byte hex representation is used when printing NULL ENUM/SET values. + // The length is 0, however, rather than the string representation of those + // null bytes. + l = 0 + // The previous column's length increases by 1 for some reason. No idea why MySQL + // does this, but it does. It may be including the backslash, for example: + // row_changes:{after:{lengths:1 lengths:4 lengths:0 lengths:0 values:\"5mmm\\x00\"}}}" + if i > 0 { + rowChange.After.Lengths[i-1]++ + } + } + rowChange.After.Lengths = append(rowChange.After.Lengths, l) + rowChange.After.Values = append(rowChange.After.Values, []byte(val)...) + } + } + rowChanges = append(rowChanges, &rowChange) + } + ev.RowChanges = rowChanges + } + if !ignoreKeyspaceShardInFieldAndRowEvents { + ev.Keyspace = testenv.DBName + ev.Shard = "0" // this is the default shard + if s.keyspace != "" { + ev.Keyspace = s.keyspace + } + if s.shard != "" { + ev.Shard = s.shard + } + } + vEvent := &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_ROW, + RowEvent: ev, + } + return vEvent.String() +} + +// TestRowEvent is used to define either the actual row event string (the `event` field) or a custom row event +// (the `spec` field). Only one should be specified. If a test validates `flags` of a RowEvent then it is set. +type TestRowEvent struct { + event string + spec *TestRowEventSpec + flags int + restart bool // if set to true, it will start a new group of output events +} + +// TestSpecOptions has any non-standard test-specific options which can modify the event generation behaviour. +type TestSpecOptions struct { + noblob bool // if set to true, it will skip blob and text columns in the row event + // by default the filter will be a "select * from table", set this to specify a custom one + // if filter is set, customFieldEvents need to be specified as well + filter *binlogdatapb.Filter + customFieldEvents bool + position string +} + +// TestSpec is defined one per unit test. +type TestSpec struct { + // test=specific parameters + t *testing.T + ddls []string // create table statements + tests [][]*TestQuery // list of input queries and expected events for each query + options *TestSpecOptions // test-specific options + + // internal state + inited bool // whether the test has been initialized + tables []string // list of tables in the schema (created in `ddls`) + pkColumns map[string][]string // map of table name to primary key columns + schema *schemadiff.Schema // parsed schema from `ddls` using `schemadiff` + fieldEvents map[string]*TestFieldEvent // map of table name to field event for the table + fieldEventsSent map[string]bool // whether the field event has been sent for the table in the test + state map[string]*query.Row // last row inserted for each table. Useful to generate events only for inserts + metadata map[string][]string // list of enum/set values for enum/set columns +} + +func (ts *TestSpec) getCurrentState(table string) *query.Row { + return ts.state[table] +} + +func (ts *TestSpec) setCurrentState(table string, row *query.Row) { + ts.state[table] = row +} + +// Init() initializes the test. It creates the tables and sets up the internal state. +func (ts *TestSpec) Init() { + var err error + if ts.inited { + return + } + // setup SrvVschema watcher, if not already done + engine.watcherOnce.Do(engine.setWatch) + defer func() { ts.inited = true }() + if ts.options == nil { + ts.options = &TestSpecOptions{} + } + // Add the unicode character set to each table definition. + // The collation used will then be the default for that character set + // in the given MySQL version used in the test: + // - 5.7: utf8mb4_general_ci + // - 8.0: utf8mb4_0900_ai_ci + tableOptions := "ENGINE=InnoDB CHARSET=utf8mb4" + for i := range ts.ddls { + ts.ddls[i] = fmt.Sprintf("%s %s", ts.ddls[i], tableOptions) + } + ts.schema, err = schemadiff.NewSchemaFromQueries(schemadiff.NewTestEnv(), ts.ddls) + require.NoError(ts.t, err) + ts.fieldEvents = make(map[string]*TestFieldEvent) + ts.fieldEventsSent = make(map[string]bool) + ts.state = make(map[string]*query.Row) + ts.metadata = make(map[string][]string) + ts.pkColumns = make(map[string][]string) + // create tables + require.Equal(ts.t, len(ts.ddls), len(ts.schema.Tables()), "number of tables in ddls and schema do not match") + for i, t := range ts.schema.Tables() { + execStatement(ts.t, ts.ddls[i]) + fe := ts.getFieldEvent(t) + ts.fieldEvents[t.Name()] = fe + var pkColumns []string + var hasPK bool + for _, index := range t.TableSpec.Indexes { + require.NotNil(ts.t, index.Info, "index.Info is nil") + if index.Info.Type == sqlparser.IndexTypePrimary { + for _, col := range index.Columns { + pkColumns = append(pkColumns, col.Column.String()) + } + hasPK = true + } + } + if !hasPK { + // add all columns as pk columns + for _, col := range t.TableSpec.Columns { + pkColumns = append(pkColumns, col.Name.String()) + } + } + ts.pkColumns[t.Name()] = pkColumns + } +} + +// Close() should be called (via defer) at the end of the test to clean up the tables created in the test. +func (ts *TestSpec) Close() { + dropStatement := fmt.Sprintf("drop tables %s", strings.Join(ts.schema.TableNames(), ", ")) + execStatement(ts.t, dropStatement) +} + +func (ts *TestSpec) getBindVarsForInsert(stmt sqlparser.Statement) (string, map[string]string) { + bv := make(map[string]string) + ins := stmt.(*sqlparser.Insert) + tn, err := ins.Table.TableName() + require.NoError(ts.t, err) + table := tn.Name.String() + fe := ts.fieldEvents[table] + vals, ok := ins.Rows.(sqlparser.Values) + require.True(ts.t, ok, "insert statement does not have values") + for _, val := range vals { + for i, v := range val { + bufV := sqlparser.NewTrackedBuffer(nil) + v.Format(bufV) + s := bufV.String() + switch fe.cols[i].dataTypeLowered { + case "varchar", "char", "binary", "varbinary", "blob", "text", "enum", "set": + s = strings.Trim(s, "'") + } + bv[fe.cols[i].name] = s + } + } + return table, bv +} + +func (ts *TestSpec) getBindVarsForUpdate(stmt sqlparser.Statement) (string, map[string]string) { + bv := make(map[string]string) + upd := stmt.(*sqlparser.Update) + table := sqlparser.String(upd.TableExprs[0].(*sqlparser.AliasedTableExpr).Expr) + fe, ok := ts.fieldEvents[table] + require.True(ts.t, ok, "field event for table %s not found", table) + index := int64(0) + state := ts.getCurrentState(table) + for i, col := range fe.cols { + bv[col.name] = string(state.Values[index : index+state.Lengths[i]]) + index += state.Lengths[i] + } + for _, expr := range upd.Exprs { + bufV := sqlparser.NewTrackedBuffer(nil) + bufN := sqlparser.NewTrackedBuffer(nil) + expr.Expr.Format(bufV) + expr.Name.Format(bufN) + bv[bufN.String()] = strings.Trim(bufV.String(), "'") + } + return table, bv +} + +// Run() runs the test. It first initializes the test, then runs the queries and validates the events. +func (ts *TestSpec) Run() { + if !ts.inited { + ts.Init() + } + var testcases []testcase + for _, t := range ts.tests { + var tc testcase + var input []string + var output []string + for _, tq := range t { + var table string + input = append(input, tq.query) + switch { + case tq.events != nil && len(tq.events) == 0: // when an input query is expected to generate no events + continue + case tq.events != nil && // when we define the actual events either as a serialized string or as a TestRowEvent + (len(tq.events) > 0 && + !(len(tq.events) == 1 && tq.events[0].event == "" && tq.events[0].spec == nil)): + for _, e := range tq.events { + if e.restart { + tc.output = append(tc.output, output) + output = []string{} + } + if e.event != "" { + output = append(output, e.event) + } else if e.spec != nil { + output = append(output, e.spec.String()) + } else { + panic("invalid event") + } + } + continue + default: + // when we don't define the actual events, we generate them based on the input query + flags := 0 + if len(tq.events) == 1 { + flags = tq.events[0].flags + } + stmt, err := sqlparser.NewTestParser().Parse(tq.query) + require.NoError(ts.t, err) + bv := make(map[string]string) + isRowEvent := false + switch stmt.(type) { + case *sqlparser.Begin: + output = append(output, "begin") + case *sqlparser.Commit: + output = append(output, "gtid", "commit") + case *sqlparser.Insert: + isRowEvent = true + table, bv = ts.getBindVarsForInsert(stmt) + case *sqlparser.Update: + isRowEvent = true + table, bv = ts.getBindVarsForUpdate(stmt) + case *sqlparser.Delete: + isRowEvent = true + del := stmt.(*sqlparser.Delete) + table = del.TableExprs[0].(*sqlparser.AliasedTableExpr).As.String() + case *sqlparser.Set: + default: + _, ok := stmt.(sqlparser.DDLStatement) + if !ok { + require.FailNowf(ts.t, "unsupported statement type", "stmt: %s", stmt) + } + output = append(output, "gtid") + output = append(output, ts.getDDLEvent(tq.query)) + } + if isRowEvent { + fe := ts.fieldEvents[table] + if fe == nil { + require.FailNowf(ts.t, "field event for table %s not found", table) + } + // for the first row event, we send the field event as well, if a custom field event is not specified + if !ts.options.customFieldEvents && !ts.fieldEventsSent[table] { + output = append(output, fe.String()) + ts.fieldEventsSent[table] = true + } + output = append(output, ts.getRowEvent(table, bv, fe, stmt, uint32(flags))) + } + } + } + tc.input = input + tc.output = append(tc.output, output) + testcases = append(testcases, tc) + } + startPos := "current" + if ts.options.position != "" { + startPos = ts.options.position + } + runCases(ts.t, ts.options.filter, testcases, startPos, nil) +} + +func (ts *TestSpec) getDDLEvent(query string) string { + ddlEvent := &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_DDL, + Statement: query, + } + return ddlEvent.String() +} + +func (ts *TestSpec) getFieldEvent(table *schemadiff.CreateTableEntity) *TestFieldEvent { + var tfe TestFieldEvent + tfe.table = table.Name() + tfe.db = testenv.DBName + for _, col := range table.TableSpec.Columns { + tc := TestColumn{} + tc.name = col.Name.String() + sqlType := col.Type.SQLType() + tc.dataType = sqlType.String() + tc.dataTypeLowered = strings.ToLower(tc.dataType) + collationName := col.Type.Options.Collate + if collationName == "" { + // Use the default, which is derived from the mysqld server default set + // in the testenv. + tc.collationID = testenv.DefaultCollationID + } else { + tc.collationID = testenv.CollationEnv.LookupByName(collationName) + } + collation := colldata.Lookup(tc.collationID) + switch tc.dataTypeLowered { + case "int32": + tc.len = lengthInt + tc.collationID = collations.CollationBinaryID + tc.colType = "int(11)" + case "varchar", "varbinary", "char", "binary": + l := *col.Type.Length + switch tc.dataTypeLowered { + case "binary", "varbinary": + tc.len = int64(l) + tc.collationID = collations.CollationBinaryID + default: + tc.len = int64(collation.Charset().MaxWidth()) * int64(l) + if tc.dataTypeLowered == "char" && collation.IsBinary() { + tc.dataType = "BINARY" + } + } + tc.colType = fmt.Sprintf("%s(%d)", tc.dataTypeLowered, l) + case "blob": + tc.len = lengthBlob + tc.collationID = collations.CollationBinaryID + tc.colType = "blob" + case "text": + tc.len = lengthText + tc.colType = "text" + case "set": + if collation.IsBinary() { + tc.len = lengthSetBinary + tc.dataType = "BINARY" + } else { + tc.len = lengthSet + } + tc.colType = fmt.Sprintf("%s(%s)", tc.dataTypeLowered, strings.Join(col.Type.EnumValues, ",")) + ts.metadata[getMetadataKey(table.Name(), tc.name)] = col.Type.EnumValues + tfe.enumSetStrings = true + case "enum": + tc.len = int64(len(col.Type.EnumValues) + 1) + if collation.IsBinary() { + tc.dataType = "BINARY" + } + tc.colType = fmt.Sprintf("%s(%s)", tc.dataTypeLowered, strings.Join(col.Type.EnumValues, ",")) + ts.metadata[getMetadataKey(table.Name(), tc.name)] = col.Type.EnumValues + tfe.enumSetStrings = true + case "json": + tc.colType = "json" + tc.len = lengthJSON + tc.collationID = collations.CollationBinaryID + default: + require.FailNowf(ts.t, "unknown sqlTypeString %s", tc.dataTypeLowered) + } + tfe.cols = append(tfe.cols, &tc) + } + return &tfe +} + +func getMetadataKey(table, col string) string { + return fmt.Sprintf("%s:%s", table, col) +} + +func (ts *TestSpec) setMetadataMap(table, col, value string) { + values := strings.Split(value, ",") + valuesReversed := slices.Clone(values) + slices.Reverse(valuesReversed) + ts.metadata[getMetadataKey(table, col)] = valuesReversed +} + +func (ts *TestSpec) getRowEvent(table string, bv map[string]string, fe *TestFieldEvent, stmt sqlparser.Statement, flags uint32) string { + ev := &binlogdatapb.RowEvent{ + TableName: table, + RowChanges: []*binlogdatapb.RowChange{ + { + Before: nil, + After: nil, + }, + }, + Flags: flags, + } + if !ignoreKeyspaceShardInFieldAndRowEvents { + ev.Keyspace = testenv.DBName + ev.Shard = "0" // this is the default shard + } + var row query.Row + for i, col := range fe.cols { + if fe.cols[i].skip { + continue + } + if col.dataTypeLowered == "binary" { + bv[col.name] = strings.TrimSuffix(bv[col.name], "\\0") + } + val := []byte(bv[col.name]) + l := int64(len(val)) + switch col.dataTypeLowered { + case "binary": + for l < col.len { + val = append(val, "\x00"...) + l++ + } + case "json": + sval := strings.Trim(string(val), "'") + sval = strings.ReplaceAll(sval, "\\", "") + val = []byte(sval) + l = int64(len(val)) + } + if slices.Equal(val, sqltypes.NullBytes) { + l = -1 + val = []byte{} + } + row.Lengths = append(row.Lengths, l) + row.Values = append(row.Values, val...) + } + ev.RowChanges = ts.getRowChanges(table, stmt, &row) + vEvent := &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_ROW, + RowEvent: ev, + } + return vEvent.String() +} + +func (ts *TestSpec) getRowChanges(table string, stmt sqlparser.Statement, row *query.Row) []*binlogdatapb.RowChange { + var rowChanges []*binlogdatapb.RowChange + var rowChange binlogdatapb.RowChange + switch stmt.(type) { + case *sqlparser.Insert: + rowChange.After = row + ts.setCurrentState(table, row) + case *sqlparser.Update: + rowChange = *ts.getRowChangeForUpdate(table, row) + ts.setCurrentState(table, row) + case *sqlparser.Delete: + rowChange.Before = row + ts.setCurrentState(table, nil) + } + rowChanges = append(rowChanges, &rowChange) + return rowChanges +} + +func (ts *TestSpec) getRowChangeForUpdate(table string, newState *query.Row) *binlogdatapb.RowChange { + var rowChange binlogdatapb.RowChange + var bitmap byte + var before, after query.Row + + currentState := ts.getCurrentState(table) + if currentState == nil { + return nil + } + var currentValueIndex int64 + var hasSkip bool + for i, l := range currentState.Lengths { + skip := false + isPKColumn := false + for _, pkColumn := range ts.pkColumns[table] { + if pkColumn == ts.fieldEvents[table].cols[i].name { + isPKColumn = true + break + } + } + if ts.options.noblob { + switch ts.fieldEvents[table].cols[i].dataTypeLowered { + case "blob", "text": + currentValue := currentState.Values[currentValueIndex : currentValueIndex+l] + newValue := newState.Values[currentValueIndex : currentValueIndex+l] + if string(currentValue) == string(newValue) { + skip = true + hasSkip = true + } + } + } + if skip && !isPKColumn { + before.Lengths = append(before.Lengths, -1) + } else { + before.Values = append(before.Values, currentState.Values[currentValueIndex:currentValueIndex+l]...) + before.Lengths = append(before.Lengths, l) + } + if skip { + after.Lengths = append(after.Lengths, -1) + } else { + after.Values = append(after.Values, newState.Values[currentValueIndex:currentValueIndex+l]...) + after.Lengths = append(after.Lengths, l) + bitmap |= 1 << uint(i) + } + currentValueIndex += l + } + rowChange.Before = &before + rowChange.After = &after + if hasSkip { + rowChange.DataColumns = &binlogdatapb.RowChange_Bitmap{ + Count: int64(len(currentState.Lengths)), + Cols: []byte{bitmap}, + } + } + return &rowChange +} + +func (ts *TestSpec) getBefore(table string) *query.Row { + currentState := ts.getCurrentState(table) + if currentState == nil { + return nil + } + var row query.Row + var currentValueIndex int64 + for i, l := range currentState.Lengths { + dataTypeIsRedacted := false + switch ts.fieldEvents[table].cols[i].dataTypeLowered { + case "blob", "text": + dataTypeIsRedacted = true + } + if ts.options.noblob && dataTypeIsRedacted { + row.Lengths = append(row.Lengths, -1) + } else { + row.Values = append(row.Values, currentState.Values[currentValueIndex:currentValueIndex+l]...) + row.Lengths = append(row.Lengths, l) + } + currentValueIndex += l + } + return &row +} + +func (ts *TestSpec) Reset() { + for table := range ts.fieldEvents { + ts.fieldEventsSent[table] = false + } +} + +func (ts *TestSpec) SetStartPosition(pos string) { + ts.options.position = pos +} + +func getRowEvent(ts *TestSpec, fe *TestFieldEvent, query string) string { + stmt, err := sqlparser.NewTestParser().Parse(query) + var bv map[string]string + var table string + switch stmt.(type) { + case *sqlparser.Insert: + table, bv = ts.getBindVarsForInsert(stmt) + default: + panic("unhandled statement type for query " + query) + } + require.NoError(ts.t, err) + return ts.getRowEvent(table, bv, fe, stmt, 0) +} + +func getLastPKEvent(table, colName string, colType query.Type, colValue []sqltypes.Value, collationId, flags uint32) string { + lastPK := getQRFromLastPK([]*query.Field{{Name: colName, + Type: colType, Charset: collationId, + Flags: flags}}, colValue) + ev := &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_LASTPK, + LastPKEvent: &binlogdatapb.LastPKEvent{ + TableLastPK: &binlogdatapb.TableLastPK{TableName: table, Lastpk: lastPK}, + }, + } + return ev.String() +} + +func getCopyCompletedEvent(table string) string { + ev := &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_LASTPK, + LastPKEvent: &binlogdatapb.LastPKEvent{ + Completed: true, + TableLastPK: &binlogdatapb.TableLastPK{TableName: table}, + }, + } + return ev.String() +} + +func getQueryType(strType string) query.Type { + switch strType { + case "INT32": + return query.Type_INT32 + case "INT64": + return query.Type_INT64 + case "UINT64": + return query.Type_UINT64 + case "UINT32": + return query.Type_UINT32 + case "VARBINARY": + return query.Type_VARBINARY + case "BINARY": + return query.Type_BINARY + case "VARCHAR": + return query.Type_VARCHAR + case "CHAR": + return query.Type_CHAR + case "TEXT": + return query.Type_TEXT + case "BLOB": + return query.Type_BLOB + case "ENUM": + return query.Type_ENUM + case "SET": + return query.Type_SET + case "JSON": + return query.Type_JSON + default: + panic("unknown type " + strType) + } +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go b/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go index f514298e844..5d57effbadf 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -86,7 +87,7 @@ func TestFindColVindex(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { keyspace string @@ -149,7 +150,7 @@ func TestFindOrCreateVindex(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) lvs := &localVSchema{ keyspace: "ks1", @@ -204,7 +205,7 @@ func TestFindTable(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { keyspace string diff --git a/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go deleted file mode 100644 index f3743c6de46..00000000000 --- a/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vstreamer - -import ( - "context" - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/require" - - _flag "vitess.io/vitess/go/internal/flag" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/vt/dbconfigs" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" -) - -var ( - engine *Engine - env *testenv.Env - - ignoreKeyspaceShardInFieldAndRowEvents bool - testRowEventFlags bool -) - -func TestMain(m *testing.M) { - _flag.ParseFlagsForTest() - ignoreKeyspaceShardInFieldAndRowEvents = true - - exitCode := func() int { - var err error - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env, err = testenv.Init(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "%v", err) - return 1 - } - defer env.Close() - - // engine cannot be initialized in testenv because it introduces - // circular dependencies - engine = NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) - engine.InitDBConfig(env.KeyspaceName, env.ShardName) - engine.Open() - defer engine.Close() - - return m.Run() - }() - os.Exit(exitCode) -} - -func newEngine(t *testing.T, ctx context.Context, binlogRowImage string) { - if engine != nil { - engine.Close() - } - if env != nil { - env.Close() - } - var err error - env, err = testenv.Init(ctx) - require.NoError(t, err) - - setBinlogRowImage(t, binlogRowImage) - - // engine cannot be initialized in testenv because it introduces - // circular dependencies - engine = NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) - engine.InitDBConfig(env.KeyspaceName, env.ShardName) - engine.Open() -} - -func customEngine(t *testing.T, modifier func(mysql.ConnParams) mysql.ConnParams) *Engine { - original, err := env.Dbcfgs.AppWithDB().MysqlParams() - require.NoError(t, err) - modified := modifier(*original) - config := env.TabletEnv.Config().Clone() - config.DB = dbconfigs.NewTestDBConfigs(modified, modified, modified.DbName) - - engine := NewEngine(tabletenv.NewEnv(config, "VStreamerTest"), env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) - engine.InitDBConfig(env.KeyspaceName, env.ShardName) - engine.Open() - return engine -} - -func setBinlogRowImage(t *testing.T, mode string) { - execStatements(t, []string{ - fmt.Sprintf("set @@binlog_row_image='%s'", mode), - fmt.Sprintf("set @@session.binlog_row_image='%s'", mode), - fmt.Sprintf("set @@global.binlog_row_image='%s'", mode), - }) - -} diff --git a/go/vt/vttablet/tabletserver/vstreamer/main_test.go b/go/vt/vttablet/tabletserver/vstreamer/main_test.go new file mode 100644 index 00000000000..aade1511060 --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/main_test.go @@ -0,0 +1,396 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "context" + "fmt" + "io" + "os" + "slices" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + _flag "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" +) + +var ( + engine *Engine + env *testenv.Env + + ignoreKeyspaceShardInFieldAndRowEvents bool + testRowEventFlags bool +) + +func TestMain(m *testing.M) { + _flag.ParseFlagsForTest() + ignoreKeyspaceShardInFieldAndRowEvents = true + + exitCode := func() int { + var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + env, err = testenv.Init(ctx) + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + defer env.Close() + + // engine cannot be initialized in testenv because it introduces + // circular dependencies + engine = NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) + engine.InitDBConfig(env.KeyspaceName, env.ShardName) + engine.Open() + defer engine.Close() + + return m.Run() + }() + os.Exit(exitCode) +} + +func newEngine(t *testing.T, ctx context.Context, binlogRowImage string) { + if engine != nil { + engine.Close() + } + if env != nil { + env.Close() + } + var err error + env, err = testenv.Init(ctx) + require.NoError(t, err) + + setBinlogRowImage(t, binlogRowImage) + + // engine cannot be initialized in testenv because it introduces + // circular dependencies + engine = NewEngine(env.TabletEnv, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) + engine.InitDBConfig(env.KeyspaceName, env.ShardName) + engine.Open() +} + +func customEngine(t *testing.T, modifier func(mysql.ConnParams) mysql.ConnParams) *Engine { + original, err := env.Dbcfgs.AppWithDB().MysqlParams() + require.NoError(t, err) + modified := modifier(*original) + cfg := env.TabletEnv.Config().Clone() + cfg.DB = dbconfigs.NewTestDBConfigs(modified, modified, modified.DbName) + + engine := NewEngine(tabletenv.NewEnv(vtenv.NewTestEnv(), cfg, "VStreamerTest"), env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) + engine.InitDBConfig(env.KeyspaceName, env.ShardName) + engine.Open() + return engine +} + +func setBinlogRowImage(t *testing.T, mode string) { + execStatements(t, []string{ + fmt.Sprintf("set @@binlog_row_image='%s'", mode), + fmt.Sprintf("set @@session.binlog_row_image='%s'", mode), + fmt.Sprintf("set @@global.binlog_row_image='%s'", mode), + }) +} + +func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase, position string, tablePK []*binlogdatapb.TableLastPK) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wg, ch := startStream(ctx, t, filter, position, tablePK) + defer wg.Wait() + // If position is 'current', we wait for a heartbeat to be + // sure the vstreamer has started. + if position == "current" { + log.Infof("Starting stream with current position") + expectLog(ctx, t, "current pos", ch, [][]string{{`gtid`, `type:OTHER`}}) + } + log.Infof("Starting to run test cases") + for _, tcase := range testcases { + switch input := tcase.input.(type) { + case []string: + execStatements(t, input) + case string: + execStatement(t, input) + default: + t.Fatalf("unexpected input: %#v", input) + } + engine.se.Reload(ctx) + expectLog(ctx, t, tcase.input, ch, tcase.output) + } + cancel() + if evs, ok := <-ch; ok { + t.Fatalf("unexpected evs: %v", evs) + } + log.Infof("Last line of runCases") +} + +func expectLog(ctx context.Context, t *testing.T, input any, ch <-chan []*binlogdatapb.VEvent, output [][]string) { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() + for _, wantset := range output { + var evs []*binlogdatapb.VEvent + inCopyPhase := false + haveEnumOrSetField := func(fields []*querypb.Field) bool { + return slices.ContainsFunc(fields, func(f *querypb.Field) bool { + // We can't simply use querypb.Type_ENUM or querypb.Type_SET here + // because if a binary collation is used then the field Type will + // be BINARY. And we don't have the binlog event metadata from the + // original event any longer that we could use to get the MySQL type + // (which would still be ENUM or SET). So we instead look at the column + // type string value which will be e.g enum('s','m','l'). + colTypeStr := strings.ToLower(f.GetColumnType()) + if strings.HasPrefix(colTypeStr, "enum(") || strings.HasPrefix(colTypeStr, "set(") { + return true + } + return false + }) + } + for { + select { + case allevs, ok := <-ch: + if !ok { + require.FailNow(t, "expectLog: not ok, stream ended early") + } + for _, ev := range allevs { + // Ignore spurious heartbeats that can happen on slow machines. + if ev.Throttled || ev.Type == binlogdatapb.VEventType_HEARTBEAT { + continue + } + switch ev.Type { + case binlogdatapb.VEventType_OTHER: + if strings.Contains(ev.Gtid, copyPhaseStart) { + inCopyPhase = true + } + case binlogdatapb.VEventType_COPY_COMPLETED: + inCopyPhase = false + case binlogdatapb.VEventType_FIELD: + // This is always set in the copy phase. It's also set in the + // running phase when the table has an ENUM or SET field. + ev.FieldEvent.EnumSetStringValues = inCopyPhase || haveEnumOrSetField(ev.FieldEvent.Fields) + } + evs = append(evs, ev) + } + case <-ctx.Done(): + require.Fail(t, "expectLog: Done(), stream ended early") + case <-timer.C: + require.Fail(t, "expectLog: timed out waiting for events: %v", wantset) + } + if len(evs) != 0 { + break + } + } + + numEventsToMatch := len(evs) + if len(wantset) != len(evs) { + log.Warningf("%v: evs\n%v, want\n%v, >> got length %d, wanted length %d", input, evs, wantset, len(evs), len(wantset)) + if len(wantset) < len(evs) { + numEventsToMatch = len(wantset) + } + } + for i := 0; i < numEventsToMatch; i++ { + want := wantset[i] + // CurrentTime is not testable. + evs[i].CurrentTime = 0 + evs[i].Keyspace = "" + evs[i].Shard = "" + switch want { + case "begin": + if evs[i].Type != binlogdatapb.VEventType_BEGIN { + t.Fatalf("%v (%d): event: %v, want begin", input, i, evs[i]) + } + case "gtid": + if evs[i].Type != binlogdatapb.VEventType_GTID { + t.Fatalf("%v (%d): event: %v, want gtid", input, i, evs[i]) + } + case "lastpk": + if evs[i].Type != binlogdatapb.VEventType_LASTPK { + t.Fatalf("%v (%d): event: %v, want lastpk", input, i, evs[i]) + } + case "commit": + if evs[i].Type != binlogdatapb.VEventType_COMMIT { + t.Fatalf("%v (%d): event: %v, want commit", input, i, evs[i]) + } + case "other": + if evs[i].Type != binlogdatapb.VEventType_OTHER { + t.Fatalf("%v (%d): event: %v, want other", input, i, evs[i]) + } + case "ddl": + if evs[i].Type != binlogdatapb.VEventType_DDL { + t.Fatalf("%v (%d): event: %v, want ddl", input, i, evs[i]) + } + case "copy_completed": + if evs[i].Type != binlogdatapb.VEventType_COPY_COMPLETED { + t.Fatalf("%v (%d): event: %v, want copy_completed", input, i, evs[i]) + } + default: + evs[i].Timestamp = 0 + if evs[i].Type == binlogdatapb.VEventType_FIELD { + for j := range evs[i].FieldEvent.Fields { + evs[i].FieldEvent.Fields[j].Flags = 0 + if ignoreKeyspaceShardInFieldAndRowEvents { + evs[i].FieldEvent.Keyspace = "" + evs[i].FieldEvent.Shard = "" + } + } + } + if ignoreKeyspaceShardInFieldAndRowEvents && evs[i].Type == binlogdatapb.VEventType_ROW { + evs[i].RowEvent.Keyspace = "" + evs[i].RowEvent.Shard = "" + } + if !testRowEventFlags && evs[i].Type == binlogdatapb.VEventType_ROW { + evs[i].RowEvent.Flags = 0 + } + want = env.RemoveAnyDeprecatedDisplayWidths(want) + if got := fmt.Sprintf("%v", evs[i]); got != want { + log.Errorf("%v (%d): event:\n%q, want\n%q", input, i, got, want) + t.Fatalf("%v (%d): event:\n%q, want\n%q", input, i, got, want) + } + } + } + if len(wantset) != len(evs) { + t.Fatalf("%v: evs\n%v, want\n%v, got length %d, wanted length %d", input, evs, wantset, len(evs), len(wantset)) + } + } +} + +func startStream(ctx context.Context, t *testing.T, filter *binlogdatapb.Filter, position string, tablePKs []*binlogdatapb.TableLastPK) (*sync.WaitGroup, <-chan []*binlogdatapb.VEvent) { + switch position { + case "": + position = primaryPosition(t) + case "vscopy": + position = "" + } + + wg := sync.WaitGroup{} + wg.Add(1) + ch := make(chan []*binlogdatapb.VEvent) + + go func() { + defer close(ch) + defer wg.Done() + if vstream(ctx, t, position, tablePKs, filter, ch) != nil { + t.Log("vstream returned error") + } + }() + return &wg, ch +} + +func vstream(ctx context.Context, t *testing.T, pos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, ch chan []*binlogdatapb.VEvent) error { + if filter == nil { + filter = &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*/", + }}, + } + } + return engine.Stream(ctx, pos, tablePKs, filter, throttlerapp.VStreamerName, func(evs []*binlogdatapb.VEvent) error { + timer := time.NewTimer(2 * time.Second) + defer timer.Stop() + + log.Infof("Received events: %v", evs) + select { + case ch <- evs: + case <-ctx.Done(): + return fmt.Errorf("engine.Stream Done() stream ended early") + case <-timer.C: + t.Log("VStream timed out waiting for events") + return io.EOF + } + return nil + }) +} + +func execStatement(t *testing.T, query string) { + t.Helper() + if err := env.Mysqld.ExecuteSuperQuery(context.Background(), query); err != nil { + t.Fatal(err) + } +} + +func execStatements(t *testing.T, queries []string) { + if err := env.Mysqld.ExecuteSuperQueryList(context.Background(), queries); err != nil { + t.Fatal(err) + } +} + +func primaryPosition(t *testing.T) string { + t.Helper() + // We use the engine's cp because there is one test that overrides + // the flavor to FilePos. If so, we have to obtain the position + // in that flavor format. + connParam, err := engine.env.Config().DB.DbaWithDB().MysqlParams() + if err != nil { + t.Fatal(err) + } + conn, err := mysql.Connect(context.Background(), connParam) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + pos, err := conn.PrimaryPosition() + if err != nil { + t.Fatal(err) + } + return replication.EncodePosition(pos) +} + +func setVSchema(t *testing.T, vschema string) { + t.Helper() + + curCount := engine.vschemaUpdates.Get() + if err := env.SetVSchema(vschema); err != nil { + t.Fatal(err) + } + // Wait for curCount to go up. + updated := false + for i := 0; i < 10; i++ { + if engine.vschemaUpdates.Get() != curCount { + updated = true + break + } + time.Sleep(10 * time.Millisecond) + } + if !updated { + log.Infof("vschema did not get updated") + t.Error("vschema did not get updated") + } +} + +func insertSomeRows(t *testing.T, numRows int) { + var queries []string + for idx, query := range []string{ + "insert into t1 (id11, id12) values", + "insert into t2 (id21, id22) values", + } { + for i := 1; i <= numRows; i++ { + queries = append(queries, fmt.Sprintf("%s (%d, %d)", query, i, i*(idx+1)*10)) + } + } + execStatements(t, queries) +} diff --git a/go/vt/vttablet/tabletserver/vstreamer/packet_size_test.go b/go/vt/vttablet/tabletserver/vstreamer/packet_size_test.go index 35542e0a879..fc430543a36 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/packet_size_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/packet_size_test.go @@ -18,7 +18,7 @@ package vstreamer import ( "math" - "math/rand" + "math/rand/v2" "testing" "time" @@ -35,7 +35,7 @@ func (p polynomial) fit(x float64) float64 { return y } -func simulate(t *testing.T, rand *rand.Rand, ps PacketSizer, base, mustSend int, interpolate func(float64) float64) (time.Duration, int) { +func simulate(t *testing.T, ps PacketSizer, base, mustSend int, interpolate func(float64) float64) (time.Duration, int) { t.Helper() var elapsed time.Duration @@ -45,7 +45,7 @@ func simulate(t *testing.T, rand *rand.Rand, ps PacketSizer, base, mustSend int, packetSize := 0 for sent < mustSend { - packetSize += rand.Intn(base / 100) + packetSize += rand.IntN(base / 100) if ps.ShouldSend(packetSize) { x := float64(packetSize) / packetRange @@ -91,16 +91,13 @@ func TestPacketSizeSimulation(t *testing.T) { for _, tc := range cases { t.Run(tc.name, func(t *testing.T) { - seed := time.Now().UnixNano() - rand := rand.New(rand.NewSource(seed)) - // Simulate a replication using the given polynomial and the dynamic packet sizer ps1 := newDynamicPacketSizer(tc.baseSize) - elapsed1, sent1 := simulate(t, rand, ps1, tc.baseSize, tc.baseSize*1000, tc.p.fit) + elapsed1, sent1 := simulate(t, ps1, tc.baseSize, tc.baseSize*1000, tc.p.fit) // Simulate the same polynomial using a fixed packet size ps2 := newFixedPacketSize(tc.baseSize) - elapsed2, sent2 := simulate(t, rand, ps2, tc.baseSize, tc.baseSize*1000, tc.p.fit) + elapsed2, sent2 := simulate(t, ps2, tc.baseSize, tc.baseSize*1000, tc.p.fit) // the simulation for dynamic packet sizing should always be faster then the fixed packet, // and should also send fewer packets in total diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index 30fbfdb7a01..2c768d7d3c6 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -56,6 +57,13 @@ type Plan struct { // Filters is the list of filters to be applied to the columns // of the table. Filters []Filter + + // Convert any integer values seen in the binlog events for ENUM or SET + // columns to the string values. The map is keyed on the column number, with + // the value being the map of ordinal values to string values. + EnumSetValuesMap map[int](map[int]string) + + env *vtenv.Environment } // Opcode enumerates the operators supported in a where clause @@ -162,14 +170,14 @@ func getOpcode(comparison *sqlparser.ComparisonExpr) (Opcode, error) { } // compare returns true after applying the comparison specified in the Filter to the actual data in the column -func compare(comparison Opcode, columnValue, filterValue sqltypes.Value, charset collations.ID) (bool, error) { +func compare(comparison Opcode, columnValue, filterValue sqltypes.Value, collationEnv *collations.Environment, charset collations.ID) (bool, error) { // use null semantics: return false if either value is null if columnValue.IsNull() || filterValue.IsNull() { return false, nil } // at this point neither values can be null // NullsafeCompare returns 0 if values match, -1 if columnValue < filterValue, 1 if columnValue > filterValue - result, err := evalengine.NullsafeCompare(columnValue, filterValue, charset) + result, err := evalengine.NullsafeCompare(columnValue, filterValue, collationEnv, charset, nil) if err != nil { return false, err } @@ -228,7 +236,7 @@ func (plan *Plan) filter(values, result []sqltypes.Value, charsets []collations. return false, nil } default: - match, err := compare(filter.Opcode, values[filter.ColNum], filter.Value, charsets[filter.ColNum]) + match, err := compare(filter.Opcode, values[filter.ColNum], filter.Value, plan.env.CollationEnv(), charsets[filter.ColNum]) if err != nil { return false, err } @@ -284,11 +292,11 @@ func mustSendStmt(query mysql.Query, dbname string) bool { return true } -func mustSendDDL(query mysql.Query, dbname string, filter *binlogdatapb.Filter) bool { +func mustSendDDL(query mysql.Query, dbname string, filter *binlogdatapb.Filter, parser *sqlparser.Parser) bool { if query.Database != "" && query.Database != dbname { return false } - ast, err := sqlparser.Parse(query.SQL) + ast, err := parser.Parse(query.SQL) // If there was a parsing error, we send it through. Hopefully, // recipient can handle it. if err != nil { @@ -338,13 +346,13 @@ func ruleMatches(tableName string, filter *binlogdatapb.Filter) bool { // tableMatches is similar to buildPlan below and MatchTable in vreplication/table_plan_builder.go. func tableMatches(table sqlparser.TableName, dbname string, filter *binlogdatapb.Filter) bool { - if !table.Qualifier.IsEmpty() && table.Qualifier.String() != dbname { + if table.Qualifier.NotEmpty() && table.Qualifier.String() != dbname { return false } return ruleMatches(table.Name.String(), filter) } -func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter) (*Plan, error) { +func buildPlan(env *vtenv.Environment, ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter) (*Plan, error) { for _, rule := range filter.Rules { switch { case strings.HasPrefix(rule.Match, "/"): @@ -356,9 +364,9 @@ func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter) (* if !result { continue } - return buildREPlan(ti, vschema, rule.Filter) + return buildREPlan(env, ti, vschema, rule.Filter) case rule.Match == ti.Name: - return buildTablePlan(ti, vschema, rule.Filter) + return buildTablePlan(env, ti, vschema, rule.Filter) } } return nil, nil @@ -366,8 +374,9 @@ func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter) (* // buildREPlan handles cases where Match has a regular expression. // If so, the Filter can be an empty string or a keyrange, like "-80". -func buildREPlan(ti *Table, vschema *localVSchema, filter string) (*Plan, error) { +func buildREPlan(env *vtenv.Environment, ti *Table, vschema *localVSchema, filter string) (*Plan, error) { plan := &Plan{ + env: env, Table: ti, } plan.ColExprs = make([]ColExpr, len(ti.Fields)) @@ -409,8 +418,8 @@ func buildREPlan(ti *Table, vschema *localVSchema, filter string) (*Plan, error) // BuildTablePlan handles cases where a specific table name is specified. // The filter must be a select statement. -func buildTablePlan(ti *Table, vschema *localVSchema, query string) (*Plan, error) { - sel, fromTable, err := analyzeSelect(query) +func buildTablePlan(env *vtenv.Environment, ti *Table, vschema *localVSchema, query string) (*Plan, error) { + sel, fromTable, err := analyzeSelect(query, env.Parser()) if err != nil { log.Errorf("%s", err.Error()) return nil, err @@ -422,6 +431,7 @@ func buildTablePlan(ti *Table, vschema *localVSchema, query string) (*Plan, erro plan := &Plan{ Table: ti, + env: env, } if err := plan.analyzeWhere(vschema, sel.Where); err != nil { log.Errorf("%s", err.Error()) @@ -439,8 +449,8 @@ func buildTablePlan(ti *Table, vschema *localVSchema, query string) (*Plan, erro return plan, nil } -func analyzeSelect(query string) (sel *sqlparser.Select, fromTable sqlparser.IdentifierCS, err error) { - statement, err := sqlparser.Parse(query) +func analyzeSelect(query string, parser *sqlparser.Parser) (sel *sqlparser.Select, fromTable sqlparser.IdentifierCS, err error) { + statement, err := parser.Parse(query) if err != nil { return nil, fromTable, err } @@ -528,15 +538,18 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er if !ok { return fmt.Errorf("unexpected: %v", sqlparser.String(expr)) } - //StrVal is varbinary, we do not support varchar since we would have to implement all collation types + // StrVal is varbinary, we do not support varchar since we would have to implement all collation types if val.Type != sqlparser.IntVal && val.Type != sqlparser.StrVal { return fmt.Errorf("unexpected: %v", sqlparser.String(expr)) } - pv, err := evalengine.Translate(val, nil) + pv, err := evalengine.Translate(val, &evalengine.Config{ + Collation: plan.env.CollationEnv().DefaultConnectionCharset(), + Environment: plan.env, + }) if err != nil { return err } - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(plan.env) resolved, err := env.Evaluate(pv) if err != nil { return err @@ -544,7 +557,7 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er plan.Filters = append(plan.Filters, Filter{ Opcode: opcode, ColNum: colnum, - Value: resolved.Value(collations.Default()), + Value: resolved.Value(plan.env.CollationEnv().DefaultConnectionCharset()), }) case *sqlparser.FuncExpr: if !expr.Name.EqualString("in_keyrange") { @@ -702,7 +715,7 @@ func (plan *Plan) analyzeExpr(vschema *localVSchema, selExpr sqlparser.SelectExp return ColExpr{}, fmt.Errorf("unsupported function: %v", sqlparser.String(inner)) } case *sqlparser.Literal: - //allow only intval 1 + // allow only intval 1 if inner.Type != sqlparser.IntVal { return ColExpr{}, fmt.Errorf("only integer literals are supported") } @@ -762,9 +775,9 @@ func (plan *Plan) analyzeExpr(vschema *localVSchema, selExpr sqlparser.SelectExp // analyzeInKeyRange allows the following constructs: "in_keyrange('-80')", // "in_keyrange(col, 'hash', '-80')", "in_keyrange(col, 'local_vindex', '-80')", or // "in_keyrange(col, 'ks.external_vindex', '-80')". -func (plan *Plan) analyzeInKeyRange(vschema *localVSchema, exprs sqlparser.SelectExprs) error { +func (plan *Plan) analyzeInKeyRange(vschema *localVSchema, exprs sqlparser.Exprs) error { var colnames []sqlparser.IdentifierCI - var krExpr sqlparser.SelectExpr + var krExpr sqlparser.Expr whereFilter := Filter{ Opcode: VindexMatch, } @@ -779,14 +792,10 @@ func (plan *Plan) analyzeInKeyRange(vschema *localVSchema, exprs sqlparser.Selec krExpr = exprs[0] case len(exprs) >= 3: for _, expr := range exprs[:len(exprs)-2] { - aexpr, ok := expr.(*sqlparser.AliasedExpr) + qualifiedName, ok := expr.(*sqlparser.ColName) if !ok { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected: %T %s", expr, sqlparser.String(expr)) } - qualifiedName, ok := aexpr.Expr.(*sqlparser.ColName) - if !ok { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected: %T %s", aexpr.Expr, sqlparser.String(aexpr.Expr)) - } if !qualifiedName.Qualifier.IsEmpty() { return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported qualifier for column: %v", sqlparser.String(qualifiedName)) } @@ -830,16 +839,12 @@ func (plan *Plan) analyzeInKeyRange(vschema *localVSchema, exprs sqlparser.Selec return nil } -func selString(expr sqlparser.SelectExpr) (string, error) { - aexpr, ok := expr.(*sqlparser.AliasedExpr) - if !ok { - return "", fmt.Errorf("unsupported: %v", sqlparser.String(expr)) - } - val, ok := aexpr.Expr.(*sqlparser.Literal) +func selString(expr sqlparser.Expr) (string, error) { + val, ok := expr.(*sqlparser.Literal) if !ok { return "", fmt.Errorf("unsupported: %v", sqlparser.String(expr)) } - return string(val.Val), nil + return val.Val, nil } // buildVindexColumns builds the list of column numbers of the table diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go index 03001362073..e9721daa693 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go @@ -20,18 +20,17 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/proto/topodata" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -86,7 +85,7 @@ func init() { "ks": &kspb, }, } - vschema := vindexes.BuildVSchema(srvVSchema) + vschema := vindexes.BuildVSchema(srvVSchema, sqlparser.NewTestParser()) testLocalVSchema = &localVSchema{ keyspace: "ks", vschema: vschema, @@ -167,7 +166,7 @@ func TestMustSendDDL(t *testing.T) { }} for _, tcase := range testcases { q := mysql.Query{SQL: tcase.sql, Database: tcase.db} - got := mustSendDDL(q, "mydb", filter) + got := mustSendDDL(q, "mydb", filter, sqlparser.NewTestParser()) if got != tcase.output { t.Errorf("%v: %v, want %v", q, got, tcase.output) } @@ -259,6 +258,7 @@ func TestPlanBuilder(t *testing.T) { Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -289,6 +289,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: []int{0}, KeyRange: nil, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -311,6 +312,7 @@ func TestPlanBuilder(t *testing.T) { Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -333,6 +335,7 @@ func TestPlanBuilder(t *testing.T) { Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -355,6 +358,7 @@ func TestPlanBuilder(t *testing.T) { Flags: uint32(querypb.MySqlFlag_NUM_FLAG), }, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -385,6 +389,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: []int{0}, KeyRange: nil, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -415,6 +420,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: []int{0}, KeyRange: nil, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -445,6 +451,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: nil, KeyRange: nil, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t2, @@ -478,6 +485,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: []int{0, 1}, KeyRange: nil, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -501,6 +509,7 @@ func TestPlanBuilder(t *testing.T) { }, }}, convertUsingUTF8Columns: map[string]bool{"val": true}, + env: vtenv.NewTestEnv(), }, }, { inTable: regional, @@ -524,6 +533,7 @@ func TestPlanBuilder(t *testing.T) { Vindex: testLocalVSchema.vschema.Keyspaces["ks"].Vindexes["region_vdx"], VindexColumns: []int{0, 1}, }}, + env: vtenv.NewTestEnv(), }, }, { inTable: t1, @@ -581,10 +591,6 @@ func TestPlanBuilder(t *testing.T) { inTable: t1, inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id)"}, outErr: `unsupported: id`, - }, { - inTable: t1, - inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(*, 'hash', '-80')"}, - outErr: `[BUG] unexpected: *sqlparser.StarExpr *`, }, { inTable: t1, inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(1, 'hash', '-80')"}, @@ -622,11 +628,6 @@ func TestPlanBuilder(t *testing.T) { inTable: t1, inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select t1.id, val from t1"}, outErr: `unsupported qualifier for column: t1.id`, - }, { - // selString - inTable: t1, - inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, *, '-80')"}, - outErr: `unsupported: *`, }, { inTable: t1, inRule: &binlogdatapb.Rule{Match: "t1", Filter: "select id, val from t1 where in_keyrange(id, 1+1, '-80')"}, @@ -634,7 +635,7 @@ func TestPlanBuilder(t *testing.T) { }} for _, tcase := range testcases { t.Run(tcase.inRule.String(), func(t *testing.T) { - plan, err := buildPlan(tcase.inTable, testLocalVSchema, &binlogdatapb.Filter{ + plan, err := buildPlan(vtenv.NewTestEnv(), tcase.inTable, testLocalVSchema, &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{tcase.inRule}, }) @@ -731,7 +732,7 @@ func TestPlanBuilderFilterComparison(t *testing.T) { for _, tcase := range testcases { t.Run(tcase.name, func(t *testing.T) { - plan, err := buildPlan(t1, testLocalVSchema, &binlogdatapb.Filter{ + plan, err := buildPlan(vtenv.NewTestEnv(), t1, testLocalVSchema, &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{Match: "t1", Filter: tcase.inFilter}}, }) @@ -775,7 +776,7 @@ func TestCompare(t *testing.T) { } for _, tc := range testcases { t.Run("", func(t *testing.T) { - got, err := compare(tc.opcode, tc.columnValue, tc.filterValue, collations.CollationUtf8mb4ID) + got, err := compare(tc.opcode, tc.columnValue, tc.filterValue, collations.MySQL8(), collations.CollationUtf8mb4ID) require.NoError(t, err) require.Equal(t, tc.want, got) }) diff --git a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go index 91f319fa2c5..4632bea672b 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go @@ -23,9 +23,11 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) // resultStreamer streams the results of the requested query @@ -62,7 +64,7 @@ func (rs *resultStreamer) Cancel() { } func (rs *resultStreamer) Stream() error { - _, fromTable, err := analyzeSelect(rs.query) + _, fromTable, err := analyzeSelect(rs.query, rs.vse.env.Environment().Parser()) if err != nil { return err } @@ -97,6 +99,8 @@ func (rs *resultStreamer) Stream() error { response := &binlogdatapb.VStreamResultsResponse{} byteCount := 0 + loggerName := fmt.Sprintf("%s (%v)", rs.vse.GetTabletInfo(), rs.tableName) + logger := logutil.NewThrottledLogger(loggerName, throttledLoggerInterval) for { select { case <-rs.ctx.Done(): @@ -106,6 +110,7 @@ func (rs *resultStreamer) Stream() error { // check throttler. if !rs.vse.throttlerClient.ThrottleCheckOKOrWaitAppName(rs.ctx, throttlerapp.ResultStreamerName) { + logger.Infof("throttled.") continue } diff --git a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer_test.go index a349c89f0a3..964e06362cd 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer_test.go @@ -43,7 +43,6 @@ func TestStreamResults(t *testing.T) { defer execStatements(t, []string{ "drop table t1", }) - engine.se.Reload(context.Background()) query := "select id, val from t1 order by id" wantStream := []string{ diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go index bd259864981..bb8ff7af85f 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go @@ -19,43 +19,33 @@ package vstreamer import ( "context" "fmt" + "net/url" "sync" "time" "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/timer" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) var ( rowStreamertHeartbeatInterval = 10 * time.Second ) -// RowStreamer exposes an externally usable interface to rowStreamer. -type RowStreamer interface { - Stream() error - Cancel() -} - -// NewRowStreamer returns a RowStreamer -func NewRowStreamer(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, query string, lastpk []sqltypes.Value, send func(*binlogdatapb.VStreamRowsResponse) error, vse *Engine, mode RowStreamerMode) RowStreamer { - return newRowStreamer(ctx, cp, se, query, lastpk, &localVSchema{vschema: &vindexes.VSchema{}}, send, vse, mode, nil) -} - type RowStreamerMode int32 const ( @@ -151,27 +141,12 @@ func (rs *rowStreamer) Stream() error { func (rs *rowStreamer) buildPlan() error { // This pre-parsing is required to extract the table name // and create its metadata. - sel, fromTable, err := analyzeSelect(rs.query) + sel, fromTable, err := analyzeSelect(rs.query, rs.se.Environment().Parser()) if err != nil { return err } - st, err := rs.se.GetTableForPos(fromTable, "") - if err != nil { - // There is a scenario where vstreamer's table state can be out-of-date, and this happens - // with vitess migrations, based on vreplication. - // Vitess migrations use an elaborate cut-over flow where tables are swapped away while traffic is - // being blocked. The RENAME flow is such that at some point the table is renamed away, leaving a - // "puncture"; this is an event that is captured by vstreamer. The completion of the flow fixes the - // puncture, and places a new table under the original table's name, but the way it is done does not - // cause vstreamer to refresh schema state. - // There is therefore a reproducable valid sequence of events where vstreamer thinks a table does not - // exist, where it in fact does exist. - // For this reason we give vstreamer a "second chance" to review the up-to-date state of the schema. - // In the future, we will reduce this operation to reading a single table rather than the entire schema. - rs.se.ReloadAt(context.Background(), replication.Position{}) - st, err = rs.se.GetTableForPos(fromTable, "") - } + st, err := rs.se.GetTableForPos(rs.ctx, fromTable, "") if err != nil { return err } @@ -179,7 +154,7 @@ func (rs *rowStreamer) buildPlan() error { Name: st.Name, } - ti.Fields, err = getFields(rs.ctx, rs.cp, st.Name, rs.cp.DBName(), st.Fields) + ti.Fields, err = getFields(rs.ctx, rs.cp, rs.vse.se, st.Name, rs.cp.DBName(), st.Fields) if err != nil { return err } @@ -188,7 +163,7 @@ func (rs *rowStreamer) buildPlan() error { // This is because the row format of a read is identical // to the row format of a binlog event. So, the same // filtering will work. - rs.plan, err = buildTablePlan(ti, rs.vschema, rs.query) + rs.plan, err = buildTablePlan(rs.se.Environment(), ti, rs.vschema, rs.query) if err != nil { log.Errorf("%s", err.Error()) return err @@ -201,7 +176,12 @@ func (rs *rowStreamer) buildPlan() error { return err } } - + if s, found := directives.GetString("ukForce", ""); found { + st.PKIndexName, err = url.QueryUnescape(s) + if err != nil { + return err + } + } rs.pkColumns, err = rs.buildPKColumns(st) if err != nil { return err @@ -235,7 +215,7 @@ func (rs *rowStreamer) buildPKColumns(st *binlogdatapb.MinimalTable) ([]int, err var pkColumns = make([]int, 0) if len(st.PKColumns) == 0 { // Use a PK equivalent if one exists. - pkColumns, err := rs.vse.mapPKEquivalentCols(rs.ctx, st) + pkColumns, err := rs.vse.mapPKEquivalentCols(rs.ctx, rs.cp, st) if err == nil && len(pkColumns) != 0 { return pkColumns, nil } @@ -279,8 +259,11 @@ func (rs *rowStreamer) buildSelect(st *binlogdatapb.MinimalTable) (string, error // of the PK columns which are used in the ORDER BY clause below. var indexHint string if st.PKIndexName != "" { - indexHint = fmt.Sprintf(" force index (%s)", - sqlescape.EscapeID(sqlescape.UnescapeID(st.PKIndexName))) + escapedPKIndexName, err := sqlescape.EnsureEscaped(st.PKIndexName) + if err != nil { + return "", err + } + indexHint = fmt.Sprintf(" force index (%s)", escapedPKIndexName) } buf.Myprintf(" from %v%s", sqlparser.NewIdentifierCS(rs.plan.Table.Name), indexHint) if len(rs.lastpk) != 0 { @@ -397,6 +380,7 @@ func (rs *rowStreamer) streamQuery(send func(*binlogdatapb.VStreamRowsResponse) filtered := make([]sqltypes.Value, len(rs.plan.ColExprs)) lastpk := make([]sqltypes.Value, len(rs.pkColumns)) byteCount := 0 + logger := logutil.NewThrottledLogger(rs.vse.GetTabletInfo(), throttledLoggerInterval) for { if rs.ctx.Err() != nil { log.Infof("Stream ended because of ctx.Done") @@ -408,6 +392,7 @@ func (rs *rowStreamer) streamQuery(send func(*binlogdatapb.VStreamRowsResponse) throttleResponseRateLimiter.Do(func() error { return safeSend(&binlogdatapb.VStreamRowsResponse{Throttled: true}) }) + logger.Infof("throttled.") continue } diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go index 9828481397b..48d11d9e856 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go @@ -21,18 +21,68 @@ import ( "fmt" "regexp" "testing" - "time" - - "vitess.io/vitess/go/vt/log" "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) +// TestRowStreamerQuery validates that the correct force index hint and order by is added to the rowstreamer query. +func TestRowStreamerQuery(t *testing.T) { + execStatements(t, []string{ + "create table t1(id int, uk1 int, val varbinary(128), primary key(id), unique key uk2 (uk1))", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + // We need to StreamRows, to get an initialized RowStreamer. + // Note that the query passed into StreamRows is overwritten while running the test. + err := engine.StreamRows(context.Background(), "select * from t1", nil, func(rows *binlogdatapb.VStreamRowsResponse) error { + type testCase struct { + directives string + sendQuerySuffix string + } + queryTemplate := "select %s id, uk1, val from t1" + getQuery := func(directives string) string { + return fmt.Sprintf(queryTemplate, directives) + } + sendQueryPrefix := "select /*+ MAX_EXECUTION_TIME(3600000) */ id, uk1, val from t1" + testCases := []testCase{ + {"", "force index (`PRIMARY`) order by id"}, + {"/*vt+ ukColumns=\"uk1\" ukForce=\"uk2\" */", "force index (`uk2`) order by uk1"}, + {"/*vt+ ukForce=\"uk2\" */", "force index (`uk2`) order by uk1"}, + {"/*vt+ ukColumns=\"uk1\" */", "order by uk1"}, + } + + for _, tc := range testCases { + t.Run(tc.directives, func(t *testing.T) { + var err error + var rs *rowStreamer + // Depending on the order of the test cases, the index of the engine.rowStreamers slice may change. + for _, rs2 := range engine.rowStreamers { + if rs2 != nil { + rs = rs2 + break + } + } + require.NotNil(t, rs) + rs.query = getQuery(tc.directives) + err = rs.buildPlan() + require.NoError(t, err) + want := fmt.Sprintf("%s %s", sendQueryPrefix, tc.sendQuerySuffix) + require.Equal(t, want, rs.sendQuery) + }) + } + return nil + }) + require.NoError(t, err) +} + func TestStreamRowsScan(t *testing.T) { if testing.Short() { t.Skip() @@ -64,8 +114,6 @@ func TestStreamRowsScan(t *testing.T) { "drop table t5", }) - engine.se.Reload(context.Background()) - // t1: simulates rollup wantStream := []string{ `fields:{name:"1" type:INT64 charset:63} pkfields:{name:"id" type:INT32 charset:63}`, @@ -178,13 +226,13 @@ func TestStreamRowsScan(t *testing.T) { wantQuery = "select /*+ MAX_EXECUTION_TIME(3600000) */ id1, id2, id3, val from t5 force index (`id1_id2_id3`) where (id1 = 1 and id2 = 2 and id3 > 3) or (id1 = 1 and id2 > 2) or (id1 > 1) order by id1, id2, id3" checkStream(t, "select * from t5", []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2), sqltypes.NewInt64(3)}, wantQuery, wantStream) - // t1: test for unsupported integer literal + // t5: test for unsupported integer literal wantError := "only the integer literal 1 is supported" - expectStreamError(t, "select 2 from t1", wantError) + expectStreamError(t, "select 2 from t5", wantError) - // t1: test for unsupported literal type + // t5: test for unsupported literal type wantError = "only integer literals are supported" - expectStreamError(t, "select 'a' from t1", wantError) + expectStreamError(t, "select 'a' from t5", wantError) } func TestStreamRowsUnicode(t *testing.T) { @@ -206,11 +254,10 @@ func TestStreamRowsUnicode(t *testing.T) { engine = savedEngine }() engine = customEngine(t, func(in mysql.ConnParams) mysql.ConnParams { - in.Charset = "latin1" + in.Charset = collations.CollationLatin1Swedish return in }) defer engine.Close() - engine.se.Reload(context.Background()) // We need a latin1 connection. conn, err := env.Mysqld.GetDbaConnection(context.Background()) if err != nil { @@ -246,7 +293,6 @@ func TestStreamRowsKeyRange(t *testing.T) { if testing.Short() { t.Skip() } - engine.se.Reload(context.Background()) if err := env.SetVSchema(shardedVSchema); err != nil { t.Fatal(err) @@ -261,9 +307,6 @@ func TestStreamRowsKeyRange(t *testing.T) { defer execStatements(t, []string{ "drop table t1", }) - engine.se.Reload(context.Background()) - - time.Sleep(1 * time.Second) // Only the first row should be returned, but lastpk should be 6. wantStream := []string{ @@ -294,9 +337,6 @@ func TestStreamRowsFilterInt(t *testing.T) { defer execStatements(t, []string{ "drop table t1", }) - engine.se.Reload(context.Background()) - - time.Sleep(1 * time.Second) wantStream := []string{ `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63}`, @@ -327,9 +367,6 @@ func TestStreamRowsFilterVarBinary(t *testing.T) { defer execStatements(t, []string{ "drop table t1", }) - engine.se.Reload(context.Background()) - - time.Sleep(1 * time.Second) wantStream := []string{ `fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id1" type:INT32 charset:63}`, @@ -355,7 +392,6 @@ func TestStreamRowsMultiPacket(t *testing.T) { defer execStatements(t, []string{ "drop table t1", }) - engine.se.Reload(context.Background()) wantStream := []string{ `fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} pkfields:{name:"id" type:INT32 charset:63}`, @@ -383,7 +419,6 @@ func TestStreamRowsCancel(t *testing.T) { defer execStatements(t, []string{ "drop table t1", }) - engine.se.Reload(context.Background()) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go index b9a3a70ea98..bf49ca46618 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go +++ b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn.go @@ -19,18 +19,21 @@ package vstreamer import ( "context" "fmt" + "strings" "sync/atomic" "time" "github.com/spf13/pflag" "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" ) // If the current binary log is greater than this byte size, we @@ -77,7 +80,7 @@ func (conn *snapshotConn) streamWithSnapshot(ctx context.Context, table, query s // Rotating the log when it's above a certain size ensures that we are processing // a relatively small binary log that will be minimal in size and GTID events. // We only attempt to rotate it if the current log is of any significant size to - // avoid too many unecessary rotations. + // avoid too many unnecessary rotations. if rotatedLog, err = conn.limitOpenBinlogSize(); err != nil { // This is a best effort operation meant to lower overhead and improve performance. // Thus it should not be required, nor cause the operation to fail. @@ -112,18 +115,15 @@ func (conn *snapshotConn) startSnapshot(ctx context.Context, table string) (gtid defer func() { _, err := lockConn.ExecuteFetch("unlock tables", 0, false) if err != nil { - log.Warning("Unlock tables failed: %v", err) - } else { - log.Infof("Tables unlocked: %v", table) + log.Warning("Unlock tables (%s) failed: %v", table, err) } lockConn.Close() }() tableName := sqlparser.String(sqlparser.NewIdentifierCS(table)) - log.Infof("Locking table %s for copying", table) if _, err := lockConn.ExecuteFetch(fmt.Sprintf("lock tables %s read", tableName), 1, false); err != nil { - log.Infof("Error locking table %s to read", tableName) + log.Warningf("Error locking table %s to read: %v", tableName, err) return "", err } mpos, err := lockConn.PrimaryPosition() @@ -136,7 +136,7 @@ func (conn *snapshotConn) startSnapshot(ctx context.Context, table string) (gtid if _, err := conn.ExecuteFetch("set transaction isolation level repeatable read", 1, false); err != nil { return "", err } - if _, err := conn.ExecuteFetch("start transaction with consistent snapshot", 1, false); err != nil { + if _, err := conn.ExecuteFetch("start transaction with consistent snapshot, read only", 1, false); err != nil { return "", err } if _, err := conn.ExecuteFetch("set @@session.time_zone = '+00:00'", 1, false); err != nil { @@ -152,7 +152,7 @@ func (conn *snapshotConn) startSnapshotWithConsistentGTID(ctx context.Context) ( if _, err := conn.ExecuteFetch("set transaction isolation level repeatable read", 1, false); err != nil { return "", err } - result, err := conn.ExecuteFetch("start transaction with consistent snapshot", 1, false) + result, err := conn.ExecuteFetch("start transaction with consistent snapshot, read only", 1, false) if err != nil { return "", err } @@ -168,7 +168,7 @@ func (conn *snapshotConn) startSnapshotWithConsistentGTID(ctx context.Context) ( return replication.EncodePosition(mpos), nil } -// Close rollsback any open transactions and closes the connection. +// Close rolls back any open transactions and closes the connection. func (conn *snapshotConn) Close() { _, _ = conn.ExecuteFetch("rollback", 1, false) conn.Conn.Close() @@ -241,8 +241,44 @@ func (conn *snapshotConn) startSnapshotAllTables(ctx context.Context) (gtid stri log.Infof("Locking all tables") if _, err := lockConn.ExecuteFetch("FLUSH TABLES WITH READ LOCK", 1, false); err != nil { + attemptExplicitTablesLocks := false + if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERAccessDeniedError { + // Access denied. On some systems this is either because the user doesn't have SUPER or RELOAD privileges. + // On some other systems, namely RDS, the command is just unsupported. + // There is an alternative way: run a `LOCK TABLES tbl1 READ, tbl2 READ, ...` for all tables. It not as + // efficient, and make a huge query, but still better than nothing. + attemptExplicitTablesLocks = true + } log.Infof("Error locking all tables") - return "", err + if !attemptExplicitTablesLocks { + return "", err + } + // get list of all tables + rs, err := conn.ExecuteFetch("show full tables", -1, true) + if err != nil { + return "", err + } + + var lockClauses []string + for _, row := range rs.Rows { + tableName := row[0].ToString() + tableType := row[1].ToString() + if tableType != "BASE TABLE" { + continue + } + tableName = sqlparser.String(sqlparser.NewIdentifierCS(tableName)) + lockClause := fmt.Sprintf("%s read", tableName) + lockClauses = append(lockClauses, lockClause) + } + if len(lockClauses) > 0 { + query := fmt.Sprintf("lock tables %s", strings.Join(lockClauses, ",")) + if _, err := lockConn.ExecuteFetch(query, 1, false); err != nil { + log.Error(vterrors.Wrapf(err, "explicitly locking all %v tables", len(lockClauses))) + return "", err + } + } else { + log.Infof("explicit lock tables: no tables found") + } } mpos, err := lockConn.PrimaryPosition() if err != nil { @@ -254,7 +290,7 @@ func (conn *snapshotConn) startSnapshotAllTables(ctx context.Context) (gtid stri if _, err := conn.ExecuteFetch("set transaction isolation level repeatable read", 1, false); err != nil { return "", err } - if _, err := conn.ExecuteFetch("start transaction with consistent snapshot", 1, false); err != nil { + if _, err := conn.ExecuteFetch("start transaction with consistent snapshot, read only", 1, false); err != nil { return "", err } if _, err := conn.ExecuteFetch("set @@session.time_zone = '+00:00'", 1, false); err != nil { diff --git a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn_test.go b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn_test.go index eca56797db5..cc0899bc14b 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/snapshot_conn_test.go @@ -53,7 +53,7 @@ func TestStartSnapshot(t *testing.T) { Rows: [][]sqltypes.Value{ {sqltypes.NewInt32(1), sqltypes.NewVarBinary("aaa")}, }, - StatusFlags: sqltypes.ServerStatusNoIndexUsed | sqltypes.ServerStatusAutocommit | sqltypes.ServerStatusInTrans, + StatusFlags: sqltypes.ServerStatusInTransReadonly | sqltypes.ServerStatusNoIndexUsed | sqltypes.ServerStatusAutocommit | sqltypes.ServerStatusInTrans, } qr, err := conn.ExecuteFetch("select * from t1", 10, false) require.NoError(t, err) diff --git a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go index 0bbd265435b..80f850dae2e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer.go @@ -23,12 +23,12 @@ import ( "strings" "sync/atomic" - "vitess.io/vitess/go/vt/vttablet" - "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -117,12 +117,16 @@ func (ts *tableStreamer) Stream() error { return err } - rs, err := conn.ExecuteFetch("show tables", -1, true) + rs, err := conn.ExecuteFetch("show full tables", -1, true) if err != nil { return err } for _, row := range rs.Rows { tableName := row[0].ToString() + tableType := row[1].ToString() + if tableType != tmutils.TableBaseTable { + continue + } if schema2.IsInternalOperationTableName(tableName) { log.Infof("Skipping internal table %s", tableName) continue diff --git a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer_test.go index bc6ba98d636..9be3940c01d 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/tablestreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/tablestreamer_test.go @@ -51,8 +51,6 @@ func TestTableStreamer(t *testing.T) { "drop table t4", }) - engine.se.Reload(context.Background()) - wantStream := []string{ "table_name:\"t1\" fields:{name:\"id\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id\" column_length:11 charset:63 flags:53251} fields:{name:\"val\" type:VARBINARY table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"val\" column_length:128 charset:63 flags:128} pkfields:{name:\"id\" type:INT32 charset:63 flags:53251}", "table_name:\"t1\" rows:{lengths:1 lengths:3 values:\"1aaa\"} rows:{lengths:1 lengths:3 values:\"2bbb\"} lastpk:{lengths:1 values:\"2\"}", diff --git a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go index c40e180110f..1d49db8c503 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go +++ b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go @@ -25,11 +25,15 @@ import ( "strings" "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttest" @@ -39,6 +43,38 @@ import ( vttestpb "vitess.io/vitess/go/vt/proto/vttest" ) +const ( + DBName = "vttest" + DefaultCollationName = "utf8mb4_0900_ai_ci" + DefaultShard = "0" +) + +var ( + // These are exported to coordinate on version specific + // behavior between the testenv and its users. + CollationEnv *collations.Environment + DefaultCollationID collations.ID + MySQLVersion string +) + +func init() { + vs, err := mysqlctl.GetVersionString() + if err != nil { + panic("could not get MySQL version: " + err.Error()) + } + _, mv, err := mysqlctl.ParseVersionString(vs) + if err != nil { + panic("could not parse MySQL version: " + err.Error()) + } + MySQLVersion = fmt.Sprintf("%d.%d.%d", mv.Major, mv.Minor, mv.Patch) + log.Infof("MySQL version: %s", MySQLVersion) + CollationEnv = collations.NewEnvironment(MySQLVersion) + // utf8mb4_general_ci is the default for MySQL 5.7 and + // utf8mb4_0900_ai_ci is the default for MySQL 8.0. + DefaultCollationID = CollationEnv.DefaultConnectionCharset() + log.Infof("Default collation ID: %d", DefaultCollationID) +} + // Env contains all the env vars for a test against a mysql instance. type Env struct { cluster *vttest.LocalCluster @@ -63,7 +99,7 @@ type Env struct { // Init initializes an Env. func Init(ctx context.Context) (*Env, error) { te := &Env{ - KeyspaceName: "vttest", + KeyspaceName: DBName, ShardName: "0", Cells: []string{"cell1"}, } @@ -75,7 +111,8 @@ func Init(ctx context.Context) (*Env, error) { if err := te.TopoServ.CreateShard(ctx, te.KeyspaceName, te.ShardName); err != nil { panic(err) } - te.SrvTopo = srvtopo.NewResilientServer(ctx, te.TopoServ, "TestTopo") + counts := stats.NewCountersWithSingleLabel("", "Resilient srvtopo server operations", "type") + te.SrvTopo = srvtopo.NewResilientServer(ctx, te.TopoServ, counts) cfg := vttest.Config{ Topology: &vttestpb.VTTestTopology{ @@ -85,14 +122,14 @@ func Init(ctx context.Context) (*Env, error) { Shards: []*vttestpb.Shard{ { Name: "0", - DbNameOverride: "vttest", + DbNameOverride: DBName, }, }, }, }, }, OnlyMySQL: true, - Charset: "utf8mb4_general_ci", + Charset: CollationEnv.LookupName(DefaultCollationID), ExtraMyCnf: strings.Split(os.Getenv("EXTRA_MY_CNF"), ":"), } te.cluster = &vttest.LocalCluster{ @@ -103,11 +140,18 @@ func Init(ctx context.Context) (*Env, error) { return nil, fmt.Errorf("could not launch mysql: %v", err) } te.Dbcfgs = dbconfigs.NewTestDBConfigs(te.cluster.MySQLConnParams(), te.cluster.MySQLAppDebugConnParams(), te.cluster.DbName()) - config := tabletenv.NewDefaultConfig() - config.DB = te.Dbcfgs - te.TabletEnv = tabletenv.NewEnv(config, "VStreamerTest") + conf := tabletenv.NewDefaultConfig() + conf.DB = te.Dbcfgs + vtenvCfg := vtenv.Options{ + MySQLServerVersion: MySQLVersion, + } + vtenv, err := vtenv.New(vtenvCfg) + if err != nil { + return nil, fmt.Errorf("could not initialize new vtenv: %v", err) + } + te.TabletEnv = tabletenv.NewEnv(vtenv, conf, "VStreamerTest") te.Mysqld = mysqlctl.NewMysqld(te.Dbcfgs) - pos, _ := te.Mysqld.PrimaryPosition() + pos, _ := te.Mysqld.PrimaryPosition(ctx) if strings.HasPrefix(strings.ToLower(pos.GTIDSet.Flavor()), string(mysqlctl.FlavorMariaDB)) { te.DBType = string(mysqlctl.FlavorMariaDB) } else { @@ -118,6 +162,9 @@ func Init(ctx context.Context) (*Env, error) { if err != nil { return nil, fmt.Errorf("could not get server version: %w", err) } + if !strings.Contains(dbVersionStr, MySQLVersion) { + return nil, fmt.Errorf("MySQL version mismatch between mysqlctl %s and mysqld %s", MySQLVersion, dbVersionStr) + } _, version, err := mysqlctl.ParseVersionString(dbVersionStr) if err != nil { return nil, fmt.Errorf("could not parse server version %q: %w", dbVersionStr, err) diff --git a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go index 203052e981e..389c06a671e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/uvstreamer_flaky_test.go @@ -69,13 +69,14 @@ const ( bulkInsertQuery = "insert into %s (id%d1, id%d2) values " insertQuery = "insert into %s (id%d1, id%d2) values (%d, %d)" numInitialRows = 10 + copyPhaseStart = "Copy Start" ) -type state struct { +type TestState struct { tables []string } -var testState = &state{} +var testState = &TestState{} var positions map[string]string var allEvents []*binlogdatapb.VEvent @@ -173,7 +174,6 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { } ctx, cancel := context.WithCancel(context.Background()) defer cancel() - engine.se.Reload(context.Background()) defer execStatements(t, []string{ "drop table t1", @@ -206,7 +206,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { } // Test event called after t1 copy is complete - callbacks["OTHER.*Copy Start t2"] = func() { + callbacks[fmt.Sprintf("OTHER.*%s t2", copyPhaseStart)] = func() { conn, err := env.Mysqld.GetDbaConnection(ctx) require.NoError(t, err) defer conn.Close() @@ -220,7 +220,7 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { } - callbacks["OTHER.*Copy Start t3"] = func() { + callbacks[fmt.Sprintf("OTHER.*%s t3", copyPhaseStart)] = func() { conn, err := env.Mysqld.GetDbaConnection(ctx) require.NoError(t, err) defer conn.Close() @@ -240,17 +240,17 @@ func TestVStreamCopyCompleteFlow(t *testing.T) { insertRow(t, "t1", 1, numInitialRows+4) insertRow(t, "t2", 2, numInitialRows+3) // savepoints should not be sent in the event stream - execStatement(t, ` -begin; -insert into t3 (id31, id32) values (12, 360); -savepoint a; -insert into t3 (id31, id32) values (13, 390); -rollback work to savepoint a; -savepoint b; -insert into t3 (id31, id32) values (13, 390); -release savepoint b; -commit;" -`) + execStatements(t, []string{ + "begin", + "insert into t3 (id31, id32) values (12, 360)", + "savepoint a", + "insert into t3 (id31, id32) values (13, 390)", + "rollback work to savepoint a", + "savepoint b", + "insert into t3 (id31, id32) values (13, 390)", + "release savepoint b", + "commit", + }) } numCopyEvents := 3 /*t1,t2,t3*/ * (numInitialRows + 1 /*FieldEvent*/ + 1 /*LastPKEvent*/ + 1 /*TestEvent: Copy Start*/ + 2 /*begin,commit*/ + 3 /* LastPK Completed*/) @@ -298,18 +298,29 @@ commit;" } func validateReceivedEvents(t *testing.T) { + inCopyPhase := false for i, ev := range allEvents { ev.Timestamp = 0 - if ev.Type == binlogdatapb.VEventType_FIELD { + switch ev.Type { + case binlogdatapb.VEventType_OTHER: + if strings.Contains(ev.Gtid, copyPhaseStart) { + inCopyPhase = true + } + case binlogdatapb.VEventType_FIELD: for j := range ev.FieldEvent.Fields { ev.FieldEvent.Fields[j].Flags = 0 ev.FieldEvent.Keyspace = "" ev.FieldEvent.Shard = "" + // We always set this in the copy phase. In the + // running phase we only set it IF the table has + // an ENUM or SET column. + ev.FieldEvent.EnumSetStringValues = inCopyPhase } - } - if ev.Type == binlogdatapb.VEventType_ROW { + case binlogdatapb.VEventType_ROW: ev.RowEvent.Keyspace = "" ev.RowEvent.Shard = "" + case binlogdatapb.VEventType_COPY_COMPLETED: + inCopyPhase = false } got := ev.String() want := env.RemoveAnyDeprecatedDisplayWidths(expectedEvents[i]) @@ -469,9 +480,9 @@ func startVStreamCopy(ctx context.Context, t *testing.T, filter *binlogdatapb.Fi } var expectedEvents = []string{ - "type:OTHER gtid:\"Copy Start t1\"", + fmt.Sprintf("type:OTHER gtid:\"%s t1\"", copyPhaseStart), "type:BEGIN", - "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}", + "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"} enum_set_string_values:true}", "type:GTID", "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:1 lengths:2 values:\"110\"}}}", "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:1 lengths:2 values:\"220\"}}}", @@ -489,18 +500,18 @@ var expectedEvents = []string{ "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\"} completed:true}", "type:COMMIT", "type:BEGIN", - "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}", + "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"} enum_set_string_values:true}", "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:2 lengths:3 values:\"11110\"}}}", "type:GTID", "type:COMMIT", //insert for t2 done along with t1 does not generate an event since t2 is not yet copied - "type:OTHER gtid:\"Copy Start t2\"", + fmt.Sprintf("type:OTHER gtid:\"%s t2\"", copyPhaseStart), "type:BEGIN", - "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}", + "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"} enum_set_string_values:true}", "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:2 lengths:3 values:\"12120\"}}}", "type:GTID", "type:COMMIT", "type:BEGIN", - "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63 column_type:\"int(11)\"}}", + "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63 column_type:\"int(11)\"} enum_set_string_values:true}", "type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:1 lengths:2 values:\"120\"}}}", "type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:1 lengths:2 values:\"240\"}}}", "type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:1 lengths:2 values:\"360\"}}}", @@ -517,19 +528,19 @@ var expectedEvents = []string{ "type:BEGIN", "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2\"} completed:true}", "type:COMMIT", - "type:OTHER gtid:\"Copy Start t3\"", + fmt.Sprintf("type:OTHER gtid:\"%s t3\"", copyPhaseStart), "type:BEGIN", - "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}", + "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"} enum_set_string_values:true}", "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:2 lengths:3 values:\"13130\"}}}", "type:GTID", "type:COMMIT", "type:BEGIN", - "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63 column_type:\"int(11)\"}}", + "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63 column_type:\"int(11)\"} enum_set_string_values:true}", "type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:2 lengths:3 values:\"12240\"}}}", "type:GTID", "type:COMMIT", "type:BEGIN", - "type:FIELD field_event:{table_name:\"t3\" fields:{name:\"id31\" type:INT32 table:\"t3\" org_table:\"t3\" database:\"vttest\" org_name:\"id31\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id32\" type:INT32 table:\"t3\" org_table:\"t3\" database:\"vttest\" org_name:\"id32\" column_length:11 charset:63 column_type:\"int(11)\"}}", + "type:FIELD field_event:{table_name:\"t3\" fields:{name:\"id31\" type:INT32 table:\"t3\" org_table:\"t3\" database:\"vttest\" org_name:\"id31\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id32\" type:INT32 table:\"t3\" org_table:\"t3\" database:\"vttest\" org_name:\"id32\" column_length:11 charset:63 column_type:\"int(11)\"} enum_set_string_values:true}", "type:ROW row_event:{table_name:\"t3\" row_changes:{after:{lengths:1 lengths:2 values:\"130\"}}}", "type:ROW row_event:{table_name:\"t3\" row_changes:{after:{lengths:1 lengths:2 values:\"260\"}}}", "type:ROW row_event:{table_name:\"t3\" row_changes:{after:{lengths:1 lengths:2 values:\"390\"}}}", diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index f210e756da1..bf41111bbc8 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -21,13 +21,13 @@ import ( "context" "fmt" "io" + "strings" "time" "google.golang.org/protobuf/encoding/prototext" "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" - mysqlbinlog "vitess.io/vitess/go/mysql/binlog" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" @@ -35,20 +35,24 @@ import ( "vitess.io/vitess/go/vt/binlog" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/logutil" vtschema "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + + mysqlbinlog "vitess.io/vitess/go/mysql/binlog" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const ( trxHistoryLenQuery = `select count as history_len from information_schema.INNODB_METRICS where name = 'trx_rseg_history_len'` - replicaLagQuery = `show slave status` + replicaLagQuery = `show replica status` + legacyLagQuery = `show slave status` hostQuery = `select @@hostname as hostname, @@port as port` ) @@ -211,7 +215,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog // GTID->DDL // GTID->OTHER // HEARTBEAT is issued if there's inactivity, which is likely - // to heppend between one group of events and another. + // to happen between one group of events and another. // // Buffering only takes row or statement lengths into consideration. // Length of other events is considered negligible. @@ -299,6 +303,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog } } + logger := logutil.NewThrottledLogger(vs.vse.GetTabletInfo(), throttledLoggerInterval) throttleEvents := func(throttledEvents chan mysql.BinlogEvent) { throttledHeartbeatsRateLimiter := timer.NewRateLimiter(HeartbeatTime) defer throttledHeartbeatsRateLimiter.Stop() @@ -316,6 +321,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog return injectHeartbeat(true) }) // we won't process events, until we're no longer throttling + logger.Infof("throttled.") continue } select { @@ -503,7 +509,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_COMMIT, }) case sqlparser.StmtDDL: - if mustSendDDL(q, vs.cp.DBName(), vs.filter) { + if mustSendDDL(q, vs.cp.DBName(), vs.filter, vs.vse.env.Environment().Parser()) { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, Gtid: replication.EncodePosition(vs.pos), @@ -520,7 +526,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_OTHER, }) } - if schema.MustReloadSchemaOnDDL(q.SQL, vs.cp.DBName()) { + if schema.MustReloadSchemaOnDDL(q.SQL, vs.cp.DBName(), vs.vse.env.Environment().Parser()) { vs.se.ReloadAt(context.Background(), vs.pos) } case sqlparser.StmtSavepoint: @@ -682,7 +688,7 @@ func (vs *vstreamer) buildJournalPlan(id uint64, tm *mysql.TableMap) error { // Build a normal table plan, which means, return all rows // and columns as is. Special handling is done when we actually // receive the row event. We'll build a JOURNAL event instead. - plan, err := buildREPlan(table, nil, "") + plan, err := buildREPlan(vs.se.Environment(), table, nil, "") if err != nil { return err } @@ -716,7 +722,7 @@ func (vs *vstreamer) buildVersionPlan(id uint64, tm *mysql.TableMap) error { // Build a normal table plan, which means, return all rows // and columns as is. Special handling is done when we actually // receive the row event. We'll build a JOURNAL event instead. - plan, err := buildREPlan(table, nil, "") + plan, err := buildREPlan(vs.se.Environment(), table, nil, "") if err != nil { return err } @@ -733,12 +739,11 @@ func (vs *vstreamer) buildTablePlan(id uint64, tm *mysql.TableMap) (*binlogdatap if err != nil { return nil, err } - table := &Table{ Name: tm.Name, Fields: cols, } - plan, err := buildPlan(table, vs.vschema, vs.filter) + plan, err := buildPlan(vs.se.Environment(), table, vs.vschema, vs.filter) if err != nil { return nil, err } @@ -746,6 +751,9 @@ func (vs *vstreamer) buildTablePlan(id uint64, tm *mysql.TableMap) (*binlogdatap vs.plans[id] = nil return nil, nil } + if err := addEnumAndSetMappingstoPlan(plan, cols, tm.Metadata); err != nil { + return nil, vterrors.Wrapf(err, "failed to build ENUM and SET column integer to string mappings") + } vs.plans[id] = &streamerPlan{ Plan: plan, TableMap: tm, @@ -757,25 +765,47 @@ func (vs *vstreamer) buildTablePlan(id uint64, tm *mysql.TableMap) (*binlogdatap Fields: plan.fields(), Keyspace: vs.vse.keyspace, Shard: vs.vse.shard, + // This mapping will be done, if needed, in the vstreamer when we process + // and build ROW events. + EnumSetStringValues: len(plan.EnumSetValuesMap) > 0, }, }, nil } func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, error) { var fields []*querypb.Field + var txtFieldIdx int for i, typ := range tm.Types { - t, err := sqltypes.MySQLToType(int64(typ), 0) + t, err := sqltypes.MySQLToType(typ, 0) if err != nil { return nil, fmt.Errorf("unsupported type: %d, position: %d", typ, i) } + // Use the collation inherited or the one specified explicitly for the + // column if one was provided in the event's optional metadata (MySQL only + // provides this for text based columns). + var coll collations.ID + switch { + case sqltypes.IsText(t) && len(tm.ColumnCollationIDs) > txtFieldIdx: + coll = tm.ColumnCollationIDs[txtFieldIdx] + txtFieldIdx++ + case t == sqltypes.TypeJSON: + // JSON is a blob at this (storage) layer -- vs the connection/query serving + // layer which CollationForType seems primarily concerned about and JSON at + // the response layer should be using utf-8 as that's the standard -- so we + // should NOT use utf8mb4 as the collation in MySQL for a JSON column is + // NULL, meaning there is not one (same as for int) and we should use binary. + coll = collations.CollationBinaryID + default: // Use the server defined default for the column's type + coll = collations.CollationForType(t, vs.se.Environment().CollationEnv().DefaultConnectionCharset()) + } fields = append(fields, &querypb.Field{ Name: fmt.Sprintf("@%d", i+1), Type: t, - Charset: uint32(collations.DefaultCollationForType(t)), - Flags: mysql.FlagsForColumn(t, collations.DefaultCollationForType(t)), + Charset: uint32(coll), + Flags: mysql.FlagsForColumn(t, coll), }) } - st, err := vs.se.GetTableForPos(sqlparser.NewIdentifierCS(tm.Name), replication.EncodePosition(vs.pos)) + st, err := vs.se.GetTableForPos(vs.ctx, sqlparser.NewIdentifierCS(tm.Name), replication.EncodePosition(vs.pos)) if err != nil { if vs.filter.FieldEventMode == binlogdatapb.Filter_ERR_ON_MISMATCH { log.Infof("No schema found for table %s", tm.Name) @@ -792,7 +822,7 @@ func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, er return fields, nil } - // check if the schema returned by schema.Engine matches with row. + // Check if the schema returned by schema.Engine matches with row. for i := range tm.Types { if !sqltypes.AreTypesEquivalent(fields[i].Type, st.Fields[i].Type) { return fields, nil @@ -800,21 +830,30 @@ func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, er } // Columns should be truncated to match those in tm. - fieldsCopy, err := getFields(vs.ctx, vs.cp, tm.Name, tm.Database, st.Fields[:len(tm.Types)]) + // This uses the historian which queries the columns in the table and uses the + // generated fields metadata. This means that the fields for text types are + // initially using collations for the column types based on the *connection + // collation* and not the actual *column collation*. + // But because we now get the correct collation for the actual column from + // mysqld in getExtColInfos we know this is the correct one for the vstream + // target and we use that rather than any that were in the binlog events, + // which were for the source and which can be using a different collation + // than the target. + fieldsCopy, err := getFields(vs.ctx, vs.cp, vs.se, tm.Name, tm.Database, st.Fields[:len(tm.Types)]) if err != nil { return nil, err } return fieldsCopy, nil } -func getExtColInfos(ctx context.Context, cp dbconfigs.Connector, table, database string) (map[string]*extColInfo, error) { +func getExtColInfos(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, table, database string) (map[string]*extColInfo, error) { extColInfos := make(map[string]*extColInfo) conn, err := cp.Connect(ctx) if err != nil { return nil, err } defer conn.Close() - queryTemplate := "select column_name, column_type from information_schema.columns where table_schema=%s and table_name=%s;" + queryTemplate := "select column_name, column_type, collation_name from information_schema.columns where table_schema=%s and table_name=%s;" query := fmt.Sprintf(queryTemplate, encodeString(database), encodeString(table)) qr, err := conn.ExecuteFetch(query, 10000, false) if err != nil { @@ -824,34 +863,43 @@ func getExtColInfos(ctx context.Context, cp dbconfigs.Connector, table, database extColInfo := &extColInfo{ columnType: row[1].ToString(), } + collationName := row[2].ToString() + var coll collations.ID + if row[2].IsNull() || collationName == "" { + coll = collations.CollationBinaryID + } else { + coll = se.Environment().CollationEnv().LookupByName(collationName) + } + extColInfo.collationID = coll extColInfos[row[0].ToString()] = extColInfo } return extColInfos, nil } -func getFields(ctx context.Context, cp dbconfigs.Connector, table, database string, fields []*querypb.Field) ([]*querypb.Field, error) { +func getFields(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, table, database string, fields []*querypb.Field) ([]*querypb.Field, error) { // Make a deep copy of the schema.Engine fields as they are pointers and // will be modified by adding ColumnType below fieldsCopy := make([]*querypb.Field, len(fields)) for i, field := range fields { fieldsCopy[i] = field.CloneVT() } - extColInfos, err := getExtColInfos(ctx, cp, table, database) + extColInfos, err := getExtColInfos(ctx, cp, se, table, database) if err != nil { return nil, err } for _, field := range fieldsCopy { if colInfo, ok := extColInfos[field.Name]; ok { field.ColumnType = colInfo.columnType + field.Charset = uint32(colInfo.collationID) } } return fieldsCopy, nil } -// additional column attributes from information_schema.columns. Currently only column_type is used, but -// we expect to add more in the future +// Additional column attributes to get from information_schema.columns. type extColInfo struct { - columnType string + columnType string + collationID collations.ID } func encodeString(in string) string { @@ -956,7 +1004,7 @@ func (vs *vstreamer) rebuildPlans() error { // cause that to change. continue } - newPlan, err := buildPlan(plan.Table, vs.vschema, vs.filter) + newPlan, err := buildPlan(vs.se.Environment(), plan.Table, vs.vschema, vs.filter) if err != nil { return err } @@ -1005,6 +1053,30 @@ func (vs *vstreamer) extractRowAndFilter(plan *streamerPlan, data []byte, dataCo } pos += l + if !value.IsNull() { // ENUMs and SETs require no special handling if they are NULL + // If the column is a CHAR based type with a binary collation (e.g. utf8mb4_bin) then the + // actual column type is included in the second byte of the event metadata while the + // event's type for the field is BINARY. This is true for ENUM and SET types. + var mysqlType uint16 + if sqltypes.IsQuoted(plan.Table.Fields[colNum].Type) { + mysqlType = plan.TableMap.Metadata[colNum] >> 8 + } + // Convert the integer values in the binlog event for any SET and ENUM fields into their + // string representations. + if plan.Table.Fields[colNum].Type == querypb.Type_ENUM || mysqlType == mysqlbinlog.TypeEnum { + value, err = buildEnumStringValue(plan, colNum, value) + if err != nil { + return false, nil, false, vterrors.Wrapf(err, "failed to perform ENUM column integer to string value mapping") + } + } + if plan.Table.Fields[colNum].Type == querypb.Type_SET || mysqlType == mysqlbinlog.TypeSet { + value, err = buildSetStringValue(plan, colNum, value) + if err != nil { + return false, nil, false, vterrors.Wrapf(err, "failed to perform SET column integer to string value mapping") + } + } + } + charsets[colNum] = collations.ID(plan.Table.Fields[colNum].Charset) values[colNum] = value valueIndex++ @@ -1014,6 +1086,109 @@ func (vs *vstreamer) extractRowAndFilter(plan *streamerPlan, data []byte, dataCo return ok, filtered, partial, err } +// addEnumAndSetMappingstoPlan sets up any necessary ENUM and SET integer to string mappings. +func addEnumAndSetMappingstoPlan(plan *Plan, cols []*querypb.Field, metadata []uint16) error { + plan.EnumSetValuesMap = make(map[int]map[int]string) + for i, col := range cols { + // If the column is a CHAR based type with a binary collation (e.g. utf8mb4_bin) then + // the actual column type is included in the second byte of the event metadata while + // the event's type for the field is BINARY. This is true for ENUM and SET types. + var mysqlType uint16 + if sqltypes.IsQuoted(col.Type) { + mysqlType = metadata[i] >> 8 + } + if col.Type == querypb.Type_ENUM || mysqlType == mysqlbinlog.TypeEnum || + col.Type == querypb.Type_SET || mysqlType == mysqlbinlog.TypeSet { + // Strip the enum() / set() parts out. + begin := strings.Index(col.ColumnType, "(") + end := strings.LastIndex(col.ColumnType, ")") + if begin == -1 || end == -1 { + return fmt.Errorf("enum or set column %s does not have valid string values: %s", + col.Name, col.ColumnType) + } + plan.EnumSetValuesMap[i] = vtschema.ParseEnumOrSetTokensMap(col.ColumnType[begin+1 : end]) + } + } + return nil +} + +// buildEnumStringValue takes the integer value of an ENUM column and returns the string value. +func buildEnumStringValue(plan *streamerPlan, colNum int, value sqltypes.Value) (sqltypes.Value, error) { + if value.IsNull() { // No work is needed + return value, nil + } + // Add the mappings just-in-time in case we haven't properly received and processed a + // table map event to initialize it. + if plan.EnumSetValuesMap == nil { + if err := addEnumAndSetMappingstoPlan(plan.Plan, plan.Table.Fields, plan.TableMap.Metadata); err != nil { + return sqltypes.Value{}, err + } + } + // ENUM columns are stored as an unsigned 16-bit integer as they can contain a maximum + // of 65,535 elements (https://dev.mysql.com/doc/refman/en/enum.html) with the 0 element + // reserved for any integer value that has no string mapping. + iv, err := value.ToUint16() + if err != nil { + return sqltypes.Value{}, fmt.Errorf("no valid integer value found for column %s in table %s, bytes: %b", + plan.Table.Fields[colNum].Name, plan.Table.Name, iv) + } + var strVal string + // Match the MySQL behavior of returning an empty string for invalid ENUM values. + // This is what the 0 position in an ENUM is reserved for. + if iv != 0 { + var ok bool + strVal, ok = plan.EnumSetValuesMap[colNum][int(iv)] + if !ok { + // The integer value was NOT 0 yet we found no mapping. This should never happen. + return sqltypes.Value{}, fmt.Errorf("no string value found for ENUM column %s in table %s -- with available values being: %v -- using the found integer value: %d", + plan.Table.Fields[colNum].Name, plan.Table.Name, plan.EnumSetValuesMap[colNum], iv) + } + } + return sqltypes.MakeTrusted(plan.Table.Fields[colNum].Type, []byte(strVal)), nil +} + +// buildSetStringValue takes the integer value of a SET column and returns the string value. +func buildSetStringValue(plan *streamerPlan, colNum int, value sqltypes.Value) (sqltypes.Value, error) { + if value.IsNull() { // No work is needed + return value, nil + } + // Add the mappings just-in-time in case we haven't properly received and processed a + // table map event to initialize it. + if plan.EnumSetValuesMap == nil { + if err := addEnumAndSetMappingstoPlan(plan.Plan, plan.Table.Fields, plan.TableMap.Metadata); err != nil { + return sqltypes.Value{}, err + } + } + // A SET column can have 64 unique values: https://dev.mysql.com/doc/refman/en/set.html + // For this reason the binlog event contains the values encoded as an unsigned 64-bit + // integer which is really a bitmap. + val := bytes.Buffer{} + iv, err := value.ToUint64() + if err != nil { + return value, fmt.Errorf("no valid integer value found for column %s in table %s, bytes: %b", + plan.Table.Fields[colNum].Name, plan.Table.Name, iv) + } + idx := 1 + // See what bits are set in the bitmap using bitmasks. + for b := uint64(1); b < 1<<63; b <<= 1 { + if iv&b > 0 { // This bit is set and the SET's string value needs to be provided. + strVal, ok := plan.EnumSetValuesMap[colNum][idx] + // When you insert values not found in the SET (which requires disabling STRICT mode) then + // they are effectively pruned and ignored (not actually saved). So this should never happen. + if !ok { + return sqltypes.Value{}, fmt.Errorf("no valid integer value found for SET column %s in table %s, bytes: %b", + plan.Table.Fields[colNum].Name, plan.Table.Name, iv) + } + if val.Len() > 0 { + val.WriteByte(',') + } + val.WriteString(strVal) + } + idx++ + } + return sqltypes.MakeTrusted(plan.Table.Fields[colNum].Type, val.Bytes()), nil +} + func wrapError(err error, stopPos replication.Position, vse *Engine) error { if err != nil { vse.vstreamersEndedWithErrors.Add(1) diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go deleted file mode 100644 index 0eda0d6c52e..00000000000 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_flaky_test.go +++ /dev/null @@ -1,2455 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vstreamer - -import ( - "context" - "fmt" - "io" - "strconv" - "strings" - "sync" - "testing" - "time" - - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" - "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sqlparser" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/mysql" - binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" -) - -type testcase struct { - input any - output [][]string -} - -func checkIfOptionIsSupported(t *testing.T, variable string) bool { - qr, err := env.Mysqld.FetchSuperQuery(context.Background(), fmt.Sprintf("show variables like '%s'", variable)) - require.NoError(t, err) - require.NotNil(t, qr) - if qr.Rows != nil && len(qr.Rows) == 1 { - return true - } - return false -} - -type TestColumn struct { - name, dataType, colType string - len, charset int64 -} - -type TestFieldEvent struct { - table, db string - cols []*TestColumn -} - -func (tfe *TestFieldEvent) String() string { - s := fmt.Sprintf("type:FIELD field_event:{table_name:\"%s\"", tfe.table) - fld := "" - for _, col := range tfe.cols { - fld += fmt.Sprintf(" fields:{name:\"%s\" type:%s table:\"%s\" org_table:\"%s\" database:\"%s\" org_name:\"%s\" column_length:%d charset:%d", - col.name, col.dataType, tfe.table, tfe.table, tfe.db, col.name, col.len, col.charset) - if col.colType != "" { - fld += fmt.Sprintf(" column_type:\"%s\"", col.colType) - } - fld += "}" - } - s += fld - s += "}" - return s -} - -// TestPlayerNoBlob sets up a new environment with mysql running with binlog_row_image as noblob. It confirms that -// the VEvents created are correct: that they don't contain the missing columns and that the DataColumns bitmap is sent -func TestNoBlob(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - oldEngine := engine - engine = nil - oldEnv := env - env = nil - newEngine(t, ctx, "noblob") - defer func() { - engine = oldEngine - env = oldEnv - }() - execStatements(t, []string{ - "create table t1(id int, blb blob, val varchar(4), primary key(id))", - "create table t2(id int, txt text, val varchar(4), unique key(id, val))", - }) - defer execStatements(t, []string{ - "drop table t1", - "drop table t2", - }) - engine.se.Reload(context.Background()) - queries := []string{ - "begin", - "insert into t1 values (1, 'blob1', 'aaa')", - "update t1 set val = 'bbb'", - "commit", - "begin", - "insert into t2 values (1, 'text1', 'aaa')", - "update t2 set val = 'bbb'", - "commit", - } - - fe1 := &TestFieldEvent{ - table: "t1", - db: "vttest", - cols: []*TestColumn{ - {name: "id", dataType: "INT32", colType: "int(11)", len: 11, charset: 63}, - {name: "blb", dataType: "BLOB", colType: "blob", len: 65535, charset: 63}, - {name: "val", dataType: "VARCHAR", colType: "varchar(4)", len: 16, charset: 45}, - }, - } - fe2 := &TestFieldEvent{ - table: "t2", - db: "vttest", - cols: []*TestColumn{ - {name: "id", dataType: "INT32", colType: "int(11)", len: 11, charset: 63}, - {name: "txt", dataType: "TEXT", colType: "text", len: 262140, charset: 45}, - {name: "val", dataType: "VARCHAR", colType: "varchar(4)", len: 16, charset: 45}, - }, - } - - testcases := []testcase{{ - input: queries, - output: [][]string{{ - "begin", - fe1.String(), - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:5 lengths:3 values:"1blob1aaa"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:-1 lengths:3 values:"1aaa"} after:{lengths:1 lengths:-1 lengths:3 values:"1bbb"} data_columns:{count:3 cols:"\x05"}}}`, - "gtid", - "commit", - }, { - "begin", - fe2.String(), - `type:ROW row_event:{table_name:"t2" row_changes:{after:{lengths:1 lengths:5 lengths:3 values:"1text1aaa"}}}`, - `type:ROW row_event:{table_name:"t2" row_changes:{before:{lengths:1 lengths:5 lengths:3 values:"1text1aaa"} after:{lengths:1 lengths:-1 lengths:3 values:"1bbb"} data_columns:{count:3 cols:"\x05"}}}`, - "gtid", - "commit", - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -func TestSetAndEnum(t *testing.T) { - execStatements(t, []string{ - "create table t1(id int, val binary(4), color set('red','green','blue'), size enum('S','M','L'), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - queries := []string{ - "begin", - "insert into t1 values (1, 'aaa', 'red,blue', 'S')", - "insert into t1 values (2, 'bbb', 'green', 'M')", - "insert into t1 values (3, 'ccc', 'red,blue,green', 'L')", - "commit", - } - - fe := &TestFieldEvent{ - table: "t1", - db: "vttest", - cols: []*TestColumn{ - {name: "id", dataType: "INT32", colType: "int(11)", len: 11, charset: 63}, - {name: "val", dataType: "BINARY", colType: "binary(4)", len: 4, charset: 63}, - {name: "color", dataType: "SET", colType: "set('red','green','blue')", len: 56, charset: 45}, - {name: "size", dataType: "ENUM", colType: "enum('S','M','L')", len: 4, charset: 45}, - }, - } - - testcases := []testcase{{ - input: queries, - output: [][]string{{ - `begin`, - fe.String(), - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 lengths:1 lengths:1 values:"1aaa\x0051"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 lengths:1 lengths:1 values:"2bbb\x0022"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 lengths:1 lengths:1 values:"3ccc\x0073"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -func TestCellValuePadding(t *testing.T) { - - execStatements(t, []string{ - "create table t1(id int, val binary(4), primary key(val))", - "create table t2(id int, val char(4), primary key(val))", - "create table t3(id int, val char(4) collate utf8mb4_bin, primary key(val))", - }) - defer execStatements(t, []string{ - "drop table t1", - "drop table t2", - "drop table t3", - }) - engine.se.Reload(context.Background()) - queries := []string{ - "begin", - "insert into t1 values (1, 'aaa\000')", - "insert into t1 values (2, 'bbb\000')", - "update t1 set id = 11 where val = 'aaa\000'", - "insert into t2 values (1, 'aaa')", - "insert into t2 values (2, 'bbb')", - "update t2 set id = 11 where val = 'aaa'", - "insert into t3 values (1, 'aaa')", - "insert into t3 values (2, 'bb')", - "update t3 set id = 11 where val = 'aaa'", - "commit", - } - - testcases := []testcase{{ - input: queries, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:BINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:4 charset:63 column_type:"binary(4)"}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 values:"1aaa\x00"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 values:"2bbb\x00"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:4 values:"1aaa\x00"} after:{lengths:2 lengths:4 values:"11aaa\x00"}}}`, - `type:FIELD field_event:{table_name:"t2" fields:{name:"id" type:INT32 table:"t2" org_table:"t2" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:CHAR table:"t2" org_table:"t2" database:"vttest" org_name:"val" column_length:16 charset:45 column_type:"char(4)"}}`, - `type:ROW row_event:{table_name:"t2" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"t2" row_changes:{after:{lengths:1 lengths:3 values:"2bbb"}}}`, - `type:ROW row_event:{table_name:"t2" row_changes:{before:{lengths:1 lengths:3 values:"1aaa"} after:{lengths:2 lengths:3 values:"11aaa"}}}`, - `type:FIELD field_event:{table_name:"t3" fields:{name:"id" type:INT32 table:"t3" org_table:"t3" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:BINARY table:"t3" org_table:"t3" database:"vttest" org_name:"val" column_length:16 charset:45 column_type:"char(4)"}}`, - `type:ROW row_event:{table_name:"t3" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"t3" row_changes:{after:{lengths:1 lengths:2 values:"2bb"}}}`, - `type:ROW row_event:{table_name:"t3" row_changes:{before:{lengths:1 lengths:3 values:"1aaa"} after:{lengths:2 lengths:3 values:"11aaa"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -func TestSetStatement(t *testing.T) { - - if testing.Short() { - t.Skip() - } - if !checkIfOptionIsSupported(t, "log_builtin_as_identified_by_password") { - // the combination of setting this option and support for "set password" only works on a few flavors - log.Info("Cannot test SetStatement on this flavor") - return - } - engine.se.Reload(context.Background()) - - execStatements(t, []string{ - "create table t1(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - queries := []string{ - "begin", - "insert into t1 values (1, 'aaa')", - "commit", - "set global log_builtin_as_identified_by_password=1", - "SET PASSWORD FOR 'vt_appdebug'@'localhost'='*AA17DA66C7C714557F5485E84BCAFF2C209F2F53'", //select password('vtappdebug_password'); - } - testcases := []testcase{{ - input: queries, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `gtid`, - `commit`, - }, { - `gtid`, - `other`, - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -// TestSetForeignKeyCheck confirms that the binlog RowEvent flags are set correctly when foreign_key_checks are on and off. -func TestSetForeignKeyCheck(t *testing.T) { - testRowEventFlags = true - defer func() { testRowEventFlags = false }() - - execStatements(t, []string{ - "create table t1(id int, val binary(4), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - queries := []string{ - "begin", - "insert into t1 values (1, 'aaa')", - "set @@session.foreign_key_checks=1", - "insert into t1 values (2, 'bbb')", - "set @@session.foreign_key_checks=0", - "insert into t1 values (3, 'ccc')", - "commit", - } - - fe := &TestFieldEvent{ - table: "t1", - db: "vttest", - cols: []*TestColumn{ - {name: "id", dataType: "INT32", colType: "int(11)", len: 11, charset: 63}, - {name: "val", dataType: "BINARY", colType: "binary(4)", len: 4, charset: 63}, - }, - } - - testcases := []testcase{{ - input: queries, - output: [][]string{{ - `begin`, - fe.String(), - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 values:"1aaa\x00"}} flags:1}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 values:"2bbb\x00"}} flags:1}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:4 values:"3ccc\x00"}} flags:3}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -func TestStmtComment(t *testing.T) { - - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table t1(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - queries := []string{ - "begin", - "insert into t1 values (1, 'aaa')", - "commit", - "/*!40000 ALTER TABLE `t1` DISABLE KEYS */", - } - testcases := []testcase{{ - input: queries, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `gtid`, - `commit`, - }, { - `gtid`, - `other`, - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -func TestVersion(t *testing.T) { - if testing.Short() { - t.Skip() - } - - oldEngine := engine - defer func() { - engine = oldEngine - }() - - err := env.SchemaEngine.EnableHistorian(true) - require.NoError(t, err) - defer env.SchemaEngine.EnableHistorian(false) - - engine = NewEngine(engine.env, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) - engine.InitDBConfig(env.KeyspaceName, env.ShardName) - engine.Open() - defer engine.Close() - - execStatements(t, []string{ - "create database if not exists _vt", - "create table if not exists _vt.schema_version(id int, pos varbinary(10000), time_updated bigint(20), ddl varchar(10000), schemax blob, primary key(id))", - }) - defer execStatements(t, []string{ - "drop table _vt.schema_version", - }) - dbSchema := &binlogdatapb.MinimalSchema{ - Tables: []*binlogdatapb.MinimalTable{{ - Name: "t1", - }}, - } - blob, _ := dbSchema.MarshalVT() - engine.se.Reload(context.Background()) - gtid := "MariaDB/0-41983-20" - testcases := []testcase{{ - input: []string{ - fmt.Sprintf("insert into _vt.schema_version values(1, '%s', 123, 'create table t1', %v)", gtid, encodeString(string(blob))), - }, - // External table events don't get sent. - output: [][]string{{ - `begin`, - `type:VERSION`}, { - `gtid`, - `commit`}}, - }} - runCases(t, nil, testcases, "", nil) - mt, err := env.SchemaEngine.GetTableForPos(sqlparser.NewIdentifierCS("t1"), gtid) - require.NoError(t, err) - assert.True(t, proto.Equal(mt, dbSchema.Tables[0])) -} - -func insertLotsOfData(t *testing.T, numRows int) { - query1 := "insert into t1 (id11, id12) values" - s := "" - for i := 1; i <= numRows; i++ { - if s != "" { - s += "," - } - s += fmt.Sprintf("(%d,%d)", i, i*10) - } - query1 += s - query2 := "insert into t2 (id21, id22) values" - s = "" - for i := 1; i <= numRows; i++ { - if s != "" { - s += "," - } - s += fmt.Sprintf("(%d,%d)", i, i*20) - } - query2 += s - execStatements(t, []string{ - query1, - query2, - }) -} - -func TestMissingTables(t *testing.T) { - if testing.Short() { - t.Skip() - } - engine.se.Reload(context.Background()) - execStatements(t, []string{ - "create table t1(id11 int, id12 int, primary key(id11))", - "create table shortlived(id31 int, id32 int, primary key(id31))", - }) - defer execStatements(t, []string{ - "drop table t1", - "drop table _shortlived", - }) - startPos := primaryPosition(t) - execStatements(t, []string{ - "insert into shortlived values (1,1), (2,2)", - "alter table shortlived rename to _shortlived", - }) - engine.se.Reload(context.Background()) - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select * from t1", - }}, - } - testcases := []testcase{ - { - input: []string{}, - output: [][]string{}, - }, - - { - input: []string{ - "insert into t1 values (101, 1010)", - }, - output: [][]string{ - { - "begin", - "gtid", - "commit", - }, - { - "gtid", - "type:OTHER", - }, - { - "begin", - "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}", - "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:3 lengths:4 values:\"1011010\"}}}", - "gtid", - "commit", - }, - }, - }, - } - runCases(t, filter, testcases, startPos, nil) -} - -func TestVStreamCopySimpleFlow(t *testing.T) { - if testing.Short() { - t.Skip() - } - execStatements(t, []string{ - "create table t1(id11 int, id12 int, primary key(id11))", - "create table t2(id21 int, id22 int, primary key(id21))", - }) - log.Infof("Pos before bulk insert: %s", primaryPosition(t)) - insertLotsOfData(t, 10) - log.Infof("Pos after bulk insert: %s", primaryPosition(t)) - defer execStatements(t, []string{ - "drop table t1", - "drop table t2", - }) - engine.se.Reload(context.Background()) - ctx := context.Background() - qr, err := env.Mysqld.FetchSuperQuery(ctx, "SELECT count(*) as cnt from t1, t2 where t1.id11 = t2.id21") - if err != nil { - t.Fatal("Query failed") - } - require.Equal(t, "[[INT64(10)]]", fmt.Sprintf("%v", qr.Rows)) - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select * from t1", - }, { - Match: "t2", - Filter: "select * from t2", - }}, - } - - var tablePKs []*binlogdatapb.TableLastPK - tablePKs = append(tablePKs, getTablePK("t1", 1)) - tablePKs = append(tablePKs, getTablePK("t2", 2)) - - t1FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}"} - t2FieldEvent := []string{"begin", "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63 column_type:\"int(11)\"}}"} - t1Events := []string{} - t2Events := []string{} - for i := 1; i <= 10; i++ { - t1Events = append(t1Events, - fmt.Sprintf("type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:%d lengths:%d values:\"%d%d\"}}}", len(strconv.Itoa(i)), len(strconv.Itoa(i*10)), i, i*10)) - t2Events = append(t2Events, - fmt.Sprintf("type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:%d lengths:%d values:\"%d%d\"}}}", len(strconv.Itoa(i)), len(strconv.Itoa(i*20)), i, i*20)) - } - t1Events = append(t1Events, "lastpk", "commit") - t2Events = append(t2Events, "lastpk", "commit") - - insertEvents1 := []string{ - "begin", - "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id11\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id11\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id12\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id12\" column_length:11 charset:63 column_type:\"int(11)\"}}", - "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:3 lengths:4 values:\"1011010\"}}}", - "gtid", - "commit"} - insertEvents2 := []string{ - "begin", - "type:FIELD field_event:{table_name:\"t2\" fields:{name:\"id21\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id21\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id22\" type:INT32 table:\"t2\" org_table:\"t2\" database:\"vttest\" org_name:\"id22\" column_length:11 charset:63 column_type:\"int(11)\"}}", - "type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:3 lengths:4 values:\"2022020\"}}}", - "gtid", - "commit"} - - testcases := []testcase{ - { - input: []string{}, - output: [][]string{t1FieldEvent, {"gtid"}, t1Events, {"begin", "lastpk", "commit"}, t2FieldEvent, t2Events, {"begin", "lastpk", "commit"}, {"copy_completed"}}, - }, - - { - input: []string{ - "insert into t1 values (101, 1010)", - }, - output: [][]string{insertEvents1}, - }, - { - input: []string{ - "insert into t2 values (202, 2020)", - }, - output: [][]string{insertEvents2}, - }, - } - - runCases(t, filter, testcases, "vscopy", tablePKs) - log.Infof("Pos at end of test: %s", primaryPosition(t)) -} - -func TestVStreamCopyWithDifferentFilters(t *testing.T) { - if testing.Short() { - t.Skip() - } - execStatements(t, []string{ - "create table t1(id1 int, id2 int, id3 int, primary key(id1))", - "create table t2a(id1 int, id2 int, primary key(id1))", - "create table t2b(id1 varchar(20), id2 int, primary key(id1))", - }) - defer execStatements(t, []string{ - "drop table t1", - "drop table t2a", - "drop table t2b", - }) - engine.se.Reload(context.Background()) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/t2.*", - }, { - Match: "t1", - Filter: "select id1, id2 from t1", - }}, - } - - execStatements(t, []string{ - "insert into t1(id1, id2, id3) values (1, 2, 3)", - "insert into t2a(id1, id2) values (1, 4)", - "insert into t2b(id1, id2) values ('b', 6)", - "insert into t2b(id1, id2) values ('a', 5)", - }) - - var expectedEvents = []string{ - "type:BEGIN", - "type:FIELD field_event:{table_name:\"t1\" fields:{name:\"id1\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id2\" type:INT32 table:\"t1\" org_table:\"t1\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}", - "type:GTID", - "type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:1 lengths:1 values:\"12\"}}}", - "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\" lastpk:{fields:{name:\"id1\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\"1\"}}}}", - "type:COMMIT", - "type:BEGIN", - "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t1\"} completed:true}", - "type:COMMIT", - "type:BEGIN", - "type:FIELD field_event:{table_name:\"t2a\" fields:{name:\"id1\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id1\" column_length:11 charset:63 column_type:\"int(11)\"} fields:{name:\"id2\" type:INT32 table:\"t2a\" org_table:\"t2a\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}", - "type:ROW row_event:{table_name:\"t2a\" row_changes:{after:{lengths:1 lengths:1 values:\"14\"}}}", - "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2a\" lastpk:{fields:{name:\"id1\" type:INT32 charset:63 flags:53251} rows:{lengths:1 values:\"1\"}}}}", - "type:COMMIT", - "type:BEGIN", - "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2a\"} completed:true}", - "type:COMMIT", - "type:BEGIN", - "type:FIELD field_event:{table_name:\"t2b\" fields:{name:\"id1\" type:VARCHAR table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id1\" column_length:80 charset:45 column_type:\"varchar(20)\"} fields:{name:\"id2\" type:INT32 table:\"t2b\" org_table:\"t2b\" database:\"vttest\" org_name:\"id2\" column_length:11 charset:63 column_type:\"int(11)\"}}", - "type:ROW row_event:{table_name:\"t2b\" row_changes:{after:{lengths:1 lengths:1 values:\"a5\"}}}", - "type:ROW row_event:{table_name:\"t2b\" row_changes:{after:{lengths:1 lengths:1 values:\"b6\"}}}", - "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2b\" lastpk:{fields:{name:\"id1\" type:VARCHAR charset:45 flags:20483} rows:{lengths:1 values:\"b\"}}}}", - "type:COMMIT", - "type:BEGIN", - "type:LASTPK last_p_k_event:{table_last_p_k:{table_name:\"t2b\"} completed:true}", - "type:COMMIT", - } - - var allEvents []*binlogdatapb.VEvent - var wg sync.WaitGroup - wg.Add(1) - ctx2, cancel2 := context.WithDeadline(ctx, time.Now().Add(10*time.Second)) - defer cancel2() - - var errGoroutine error - go func() { - defer wg.Done() - engine.Stream(ctx2, "", nil, filter, throttlerapp.VStreamerName, func(evs []*binlogdatapb.VEvent) error { - for _, ev := range evs { - if ev.Type == binlogdatapb.VEventType_HEARTBEAT { - continue - } - if ev.Throttled { - continue - } - allEvents = append(allEvents, ev) - } - if len(allEvents) == len(expectedEvents) { - log.Infof("Got %d events as expected", len(allEvents)) - for i, ev := range allEvents { - ev.Timestamp = 0 - if ev.Type == binlogdatapb.VEventType_FIELD { - for j := range ev.FieldEvent.Fields { - ev.FieldEvent.Fields[j].Flags = 0 - } - ev.FieldEvent.Keyspace = "" - ev.FieldEvent.Shard = "" - } - if ev.Type == binlogdatapb.VEventType_ROW { - ev.RowEvent.Keyspace = "" - ev.RowEvent.Shard = "" - } - got := ev.String() - want := expectedEvents[i] - - want = env.RemoveAnyDeprecatedDisplayWidths(want) - - if !strings.HasPrefix(got, want) { - errGoroutine = fmt.Errorf("event %d did not match, want %s, got %s", i, want, got) - return errGoroutine - } - } - - return io.EOF - } - return nil - }) - }() - wg.Wait() - if errGoroutine != nil { - t.Fatalf(errGoroutine.Error()) - } -} - -func TestFilteredVarBinary(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table t1(id1 int, val varbinary(128), primary key(id1))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select id1, val from t1 where val = 'newton'", - }}, - } - - testcases := []testcase{{ - input: []string{ - "begin", - "insert into t1 values (1, 'kepler')", - "insert into t1 values (2, 'newton')", - "insert into t1 values (3, 'newton')", - "insert into t1 values (4, 'kepler')", - "insert into t1 values (5, 'newton')", - "update t1 set val = 'newton' where id1 = 1", - "update t1 set val = 'kepler' where id1 = 2", - "update t1 set val = 'newton' where id1 = 2", - "update t1 set val = 'kepler' where id1 = 1", - "delete from t1 where id1 in (2,3)", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:6 values:"2newton"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:6 values:"3newton"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:6 values:"5newton"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:6 values:"1newton"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:6 values:"2newton"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:6 values:"2newton"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:6 values:"1newton"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:6 values:"2newton"}} row_changes:{before:{lengths:1 lengths:6 values:"3newton"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, filter, testcases, "", nil) -} - -func TestFilteredInt(t *testing.T) { - if testing.Short() { - t.Skip() - } - engine.se.Reload(context.Background()) - - execStatements(t, []string{ - "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select id1, val from t1 where id2 = 200", - }}, - } - - testcases := []testcase{{ - input: []string{ - "begin", - "insert into t1 values (1, 100, 'aaa')", - "insert into t1 values (2, 200, 'bbb')", - "insert into t1 values (3, 100, 'ccc')", - "insert into t1 values (4, 200, 'ddd')", - "insert into t1 values (5, 200, 'eee')", - "update t1 set val = 'newddd' where id1 = 4", - "update t1 set id2 = 200 where id1 = 1", - "update t1 set id2 = 100 where id1 = 2", - "update t1 set id2 = 100 where id1 = 1", - "update t1 set id2 = 200 where id1 = 2", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"2bbb"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"4ddd"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"5eee"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:3 values:"4ddd"} after:{lengths:1 lengths:6 values:"4newddd"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:3 values:"2bbb"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"2bbb"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, filter, testcases, "", nil) -} - -func TestSavepoint(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table stream1(id int, val varbinary(128), primary key(id))", - "create table stream2(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table stream1", - "drop table stream2", - }) - engine.se.Reload(context.Background()) - testcases := []testcase{{ - input: []string{ - "begin", - "insert into stream1 values (1, 'aaa')", - "savepoint a", - "insert into stream1 values (2, 'aaa')", - "rollback work to savepoint a", - "savepoint b", - "update stream1 set val='bbb' where id = 1", - "release savepoint b", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"stream1" fields:{name:"id" type:INT32 table:"stream1" org_table:"stream1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"stream1" org_table:"stream1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"stream1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"stream1" row_changes:{before:{lengths:1 lengths:3 values:"1aaa"} after:{lengths:1 lengths:3 values:"1bbb"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -func TestSavepointWithFilter(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table stream1(id int, val varbinary(128), primary key(id))", - "create table stream2(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table stream1", - "drop table stream2", - }) - engine.se.Reload(context.Background()) - testcases := []testcase{{ - input: []string{ - "begin", - "insert into stream1 values (1, 'aaa')", - "savepoint a", - "insert into stream1 values (2, 'aaa')", - "savepoint b", - "insert into stream1 values (3, 'aaa')", - "savepoint c", - "insert into stream1 values (4, 'aaa')", - "savepoint d", - "commit", - - "begin", - "insert into stream1 values (5, 'aaa')", - "savepoint d", - "insert into stream1 values (6, 'aaa')", - "savepoint c", - "insert into stream1 values (7, 'aaa')", - "savepoint b", - "insert into stream1 values (8, 'aaa')", - "savepoint a", - "commit", - - "begin", - "insert into stream1 values (9, 'aaa')", - "savepoint a", - "insert into stream2 values (1, 'aaa')", - "savepoint b", - "insert into stream1 values (10, 'aaa')", - "savepoint c", - "insert into stream2 values (2, 'aaa')", - "savepoint d", - "commit", - }, - output: [][]string{{ - `begin`, - `gtid`, - `commit`, - }, { - `begin`, - `gtid`, - `commit`, - }, { - `begin`, - `type:FIELD field_event:{table_name:"stream2" fields:{name:"id" type:INT32 table:"stream2" org_table:"stream2" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"stream2" org_table:"stream2" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"stream2" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"stream2" row_changes:{after:{lengths:1 lengths:3 values:"2aaa"}}}`, - `gtid`, - `commit`, - }}, - }} - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "stream2", - Filter: "select * from stream2", - }}, - } - runCases(t, filter, testcases, "current", nil) -} - -func TestStatements(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table stream1(id int, val varbinary(128), primary key(id))", - "create table stream2(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table stream1", - "drop table stream2", - }) - engine.se.Reload(context.Background()) - - testcases := []testcase{{ - input: []string{ - "begin", - "insert into stream1 values (1, 'aaa')", - "update stream1 set val='bbb' where id = 1", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"stream1" fields:{name:"id" type:INT32 table:"stream1" org_table:"stream1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"stream1" org_table:"stream1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"stream1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"stream1" row_changes:{before:{lengths:1 lengths:3 values:"1aaa"} after:{lengths:1 lengths:3 values:"1bbb"}}}`, - `gtid`, - `commit`, - }}, - }, { - // Normal DDL. - input: "alter table stream1 change column val val varbinary(128)", - output: [][]string{{ - `gtid`, - `type:DDL statement:"alter table stream1 change column val val varbinary(128)"`, - }}, - }, { - // DDL padded with comments. - input: " /* prefix */ alter table stream1 change column val val varbinary(256) /* suffix */ ", - output: [][]string{{ - `gtid`, - `type:DDL statement:"/* prefix */ alter table stream1 change column val val varbinary(256) /* suffix */"`, - }}, - }, { - // Multiple tables, and multiple rows changed per statement. - input: []string{ - "begin", - "insert into stream1 values (2, 'bbb')", - "insert into stream2 values (1, 'aaa')", - "update stream1 set val='ccc'", - "delete from stream1", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"stream1" fields:{name:"id" type:INT32 table:"stream1" org_table:"stream1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"stream1" org_table:"stream1" database:"vttest" org_name:"val" column_length:256 charset:63 column_type:"varbinary(256)"}}`, - `type:ROW row_event:{table_name:"stream1" row_changes:{after:{lengths:1 lengths:3 values:"2bbb"}}}`, - `type:FIELD field_event:{table_name:"stream2" fields:{name:"id" type:INT32 table:"stream2" org_table:"stream2" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"stream2" org_table:"stream2" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"stream2" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"stream1" ` + - `row_changes:{before:{lengths:1 lengths:3 values:"1bbb"} after:{lengths:1 lengths:3 values:"1ccc"}} ` + - `row_changes:{before:{lengths:1 lengths:3 values:"2bbb"} after:{lengths:1 lengths:3 values:"2ccc"}}}`, - `type:ROW row_event:{table_name:"stream1" ` + - `row_changes:{before:{lengths:1 lengths:3 values:"1ccc"}} ` + - `row_changes:{before:{lengths:1 lengths:3 values:"2ccc"}}}`, - `gtid`, - `commit`, - }}, - }, { - // truncate is a DDL - input: "truncate table stream2", - output: [][]string{{ - `gtid`, - `type:DDL statement:"truncate table stream2"`, - }}, - }, { - // Reverse alter table, else FilePos tests fail - input: " /* prefix */ alter table stream1 change column val val varbinary(128) /* suffix */ ", - output: [][]string{{ - `gtid`, - `type:DDL statement:"/* prefix */ alter table stream1 change column val val varbinary(128) /* suffix */"`, - }}, - }} - runCases(t, nil, testcases, "current", nil) - // Test FilePos flavor - savedEngine := engine - defer func() { engine = savedEngine }() - engine = customEngine(t, func(in mysql.ConnParams) mysql.ConnParams { - in.Flavor = "FilePos" - return in - }) - - defer engine.Close() - runCases(t, nil, testcases, "current", nil) -} - -// TestOther tests "other" and "priv" statements. These statements can -// produce very different events depending on the version of mysql or -// mariadb. So, we just show that vreplication transmits "OTHER" events -// if the binlog is affected by the statement. -func TestOther(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table stream1(id int, val varbinary(128), primary key(id))", - "create table stream2(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table stream1", - "drop table stream2", - }) - engine.se.Reload(context.Background()) - - testcases := []string{ - "repair table stream2", - "optimize table stream2", - "analyze table stream2", - "select * from stream1", - "set @val=1", - "show tables", - "describe stream1", - "grant select on stream1 to current_user()", - "revoke select on stream1 from current_user()", - } - - // customRun is a modified version of runCases. - customRun := func(mode string) { - t.Logf("Run mode: %v", mode) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - wg, ch := startStream(ctx, t, nil, "", nil) - defer wg.Wait() - want := [][]string{{ - `gtid`, - `type:OTHER`, - }} - - for _, stmt := range testcases { - startPosition := primaryPosition(t) - execStatement(t, stmt) - endPosition := primaryPosition(t) - if startPosition == endPosition { - t.Logf("statement %s did not affect binlog", stmt) - continue - } - expectLog(ctx, t, stmt, ch, want) - } - cancel() - if evs, ok := <-ch; ok { - t.Fatalf("unexpected evs: %v", evs) - } - } - customRun("gtid") - - // Test FilePos flavor - savedEngine := engine - defer func() { engine = savedEngine }() - engine = customEngine(t, func(in mysql.ConnParams) mysql.ConnParams { - in.Flavor = "FilePos" - return in - }) - defer engine.Close() - customRun("filePos") -} - -func TestRegexp(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table yes_stream(id int, val varbinary(128), primary key(id))", - "create table no_stream(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table yes_stream", - "drop table no_stream", - }) - engine.se.Reload(context.Background()) - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/yes.*/", - }}, - } - - testcases := []testcase{{ - input: []string{ - "begin", - "insert into yes_stream values (1, 'aaa')", - "insert into no_stream values (2, 'bbb')", - "update yes_stream set val='bbb' where id = 1", - "update no_stream set val='bbb' where id = 2", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"yes_stream" fields:{name:"id" type:INT32 table:"yes_stream" org_table:"yes_stream" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"yes_stream" org_table:"yes_stream" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"yes_stream" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"yes_stream" row_changes:{before:{lengths:1 lengths:3 values:"1aaa"} after:{lengths:1 lengths:3 values:"1bbb"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, filter, testcases, "", nil) -} - -func TestREKeyRange(t *testing.T) { - if testing.Short() { - t.Skip() - } - ignoreKeyspaceShardInFieldAndRowEvents = false - defer func() { - ignoreKeyspaceShardInFieldAndRowEvents = true - }() - // Needed for this test to run if run standalone - engine.watcherOnce.Do(engine.setWatch) - - execStatements(t, []string{ - "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - - setVSchema(t, shardedVSchema) - defer env.SetVSchema("{}") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/.*/", - Filter: "-80", - }}, - } - wg, ch := startStream(ctx, t, filter, "", nil) - defer wg.Wait() - // 1, 2, 3 and 5 are in shard -80. - // 4 and 6 are in shard 80-. - input := []string{ - "begin", - "insert into t1 values (1, 4, 'aaa')", - "insert into t1 values (4, 1, 'bbb')", - // Stay in shard. - "update t1 set id1 = 2 where id1 = 1", - // Move from -80 to 80-. - "update t1 set id1 = 6 where id1 = 2", - // Move from 80- to -80. - "update t1 set id1 = 3 where id1 = 4", - "commit", - } - execStatements(t, input) - expectLog(ctx, t, input, ch, [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id2" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} keyspace:"vttest" shard:"0"}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:1 lengths:3 values:"14aaa"}} keyspace:"vttest" shard:"0"}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:1 lengths:3 values:"14aaa"} after:{lengths:1 lengths:1 lengths:3 values:"24aaa"}} keyspace:"vttest" shard:"0"}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:1 lengths:3 values:"24aaa"}} keyspace:"vttest" shard:"0"}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:1 lengths:3 values:"31bbb"}} keyspace:"vttest" shard:"0"}`, - `gtid`, - `commit`, - }}) - - // Switch the vschema to make id2 the primary vindex. - altVSchema := `{ - "sharded": true, - "vindexes": { - "hash": { - "type": "hash" - } - }, - "tables": { - "t1": { - "column_vindexes": [ - { - "column": "id2", - "name": "hash" - } - ] - } - } -}` - setVSchema(t, altVSchema) - - // Only the first insert should be sent. - input = []string{ - "begin", - "insert into t1 values (4, 1, 'aaa')", - "insert into t1 values (1, 4, 'aaa')", - "commit", - } - execStatements(t, input) - expectLog(ctx, t, input, ch, [][]string{{ - `begin`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:1 lengths:3 values:"41aaa"}} keyspace:"vttest" shard:"0"}`, - `gtid`, - `commit`, - }}) - cancel() -} - -func TestInKeyRangeMultiColumn(t *testing.T) { - if testing.Short() { - t.Skip() - } - engine.watcherOnce.Do(engine.setWatch) - engine.se.Reload(context.Background()) - - execStatements(t, []string{ - "create table t1(region int, id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - - setVSchema(t, multicolumnVSchema) - defer env.SetVSchema("{}") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select id, region, val, keyspace_id() from t1 where in_keyrange('-80')", - }}, - } - wg, ch := startStream(ctx, t, filter, "", nil) - defer wg.Wait() - - // 1, 2, 3 and 5 are in shard -80. - // 4 and 6 are in shard 80-. - input := []string{ - "begin", - "insert into t1 values (1, 1, 'aaa')", - "insert into t1 values (128, 2, 'bbb')", - // Stay in shard. - "update t1 set region = 2 where id = 1", - // Move from -80 to 80-. - "update t1 set region = 128 where id = 1", - // Move from 80- to -80. - "update t1 set region = 1 where id = 2", - "commit", - } - execStatements(t, input) - expectLog(ctx, t, input, ch, [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"region" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"region" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"} fields:{name:"keyspace_id" type:VARBINARY charset:63}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:1 lengths:3 lengths:9 values:"11aaa\x01\x16k@\xb4J\xbaK\xd6"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:1 lengths:3 lengths:9 values:"11aaa\x01\x16k@\xb4J\xbaK\xd6"} ` + - `after:{lengths:1 lengths:1 lengths:3 lengths:9 values:"12aaa\x02\x16k@\xb4J\xbaK\xd6"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:1 lengths:3 lengths:9 values:"12aaa\x02\x16k@\xb4J\xbaK\xd6"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:1 lengths:3 lengths:9 values:"21bbb\x01\x06\xe7\xea\"Βp\x8f"}}}`, - `gtid`, - `commit`, - }}) - cancel() -} - -func TestREMultiColumnVindex(t *testing.T) { - if testing.Short() { - t.Skip() - } - engine.watcherOnce.Do(engine.setWatch) - - execStatements(t, []string{ - "create table t1(region int, id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - - setVSchema(t, multicolumnVSchema) - defer env.SetVSchema("{}") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/.*/", - Filter: "-80", - }}, - } - wg, ch := startStream(ctx, t, filter, "", nil) - defer wg.Wait() - - // 1, 2, 3 and 5 are in shard -80. - // 4 and 6 are in shard 80-. - input := []string{ - "begin", - "insert into t1 values (1, 1, 'aaa')", - "insert into t1 values (128, 2, 'bbb')", - // Stay in shard. - "update t1 set region = 2 where id = 1", - // Move from -80 to 80-. - "update t1 set region = 128 where id = 1", - // Move from 80- to -80. - "update t1 set region = 1 where id = 2", - "commit", - } - execStatements(t, input) - expectLog(ctx, t, input, ch, [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"region" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"region" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:1 lengths:3 values:"11aaa"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:1 lengths:3 values:"11aaa"} after:{lengths:1 lengths:1 lengths:3 values:"21aaa"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:1 lengths:3 values:"21aaa"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:1 lengths:3 values:"12bbb"}}}`, - `gtid`, - `commit`, - }}) - cancel() -} - -func TestSelectFilter(t *testing.T) { - if testing.Short() { - t.Skip() - } - engine.se.Reload(context.Background()) - - execStatements(t, []string{ - "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select id2, val from t1 where in_keyrange(id2, 'hash', '-80')", - }}, - } - - testcases := []testcase{{ - input: []string{ - "begin", - "insert into t1 values (4, 1, 'aaa')", - "insert into t1 values (2, 4, 'aaa')", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id2" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id2" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, filter, testcases, "", nil) -} - -func TestDDLAddColumn(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table ddl_test1(id int, val1 varbinary(128), primary key(id))", - "create table ddl_test2(id int, val1 varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table ddl_test1", - "drop table ddl_test2", - }) - - // Record position before the next few statements. - pos := primaryPosition(t) - execStatements(t, []string{ - "begin", - "insert into ddl_test1 values(1, 'aaa')", - "insert into ddl_test2 values(1, 'aaa')", - "commit", - // Adding columns is allowed. - "alter table ddl_test1 add column val2 varbinary(128)", - "alter table ddl_test2 add column val2 varbinary(128)", - "begin", - "insert into ddl_test1 values(2, 'bbb', 'ccc')", - "insert into ddl_test2 values(2, 'bbb', 'ccc')", - "commit", - }) - engine.se.Reload(context.Background()) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Test RE as well as select-based filters. - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "ddl_test2", - Filter: "select * from ddl_test2", - }, { - Match: "/.*/", - }}, - } - - ch := make(chan []*binlogdatapb.VEvent) - go func() { - defer close(ch) - if err := vstream(ctx, t, pos, nil, filter, ch); err != nil { - t.Error(err) - } - }() - expectLog(ctx, t, "ddls", ch, [][]string{{ - // Current schema has 3 columns, but they'll be truncated to match the two columns in the event. - `begin`, - `type:FIELD field_event:{table_name:"ddl_test1" fields:{name:"id" type:INT32 table:"ddl_test1" org_table:"ddl_test1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val1" type:VARBINARY table:"ddl_test1" org_table:"ddl_test1" database:"vttest" org_name:"val1" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"ddl_test1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:FIELD field_event:{table_name:"ddl_test2" fields:{name:"id" type:INT32 table:"ddl_test2" org_table:"ddl_test2" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val1" type:VARBINARY table:"ddl_test2" org_table:"ddl_test2" database:"vttest" org_name:"val1" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"ddl_test2" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `gtid`, - `commit`, - }, { - `gtid`, - `type:DDL statement:"alter table ddl_test1 add column val2 varbinary(128)"`, - }, { - `gtid`, - `type:DDL statement:"alter table ddl_test2 add column val2 varbinary(128)"`, - }, { - // The plan will be updated to now include the third column - // because the new table map will have three columns. - `begin`, - `type:FIELD field_event:{table_name:"ddl_test1" fields:{name:"id" type:INT32 table:"ddl_test1" org_table:"ddl_test1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val1" type:VARBINARY table:"ddl_test1" org_table:"ddl_test1" database:"vttest" org_name:"val1" column_length:128 charset:63 column_type:"varbinary(128)"} fields:{name:"val2" type:VARBINARY table:"ddl_test1" org_table:"ddl_test1" database:"vttest" org_name:"val2" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"ddl_test1" row_changes:{after:{lengths:1 lengths:3 lengths:3 values:"2bbbccc"}}}`, - `type:FIELD field_event:{table_name:"ddl_test2" fields:{name:"id" type:INT32 table:"ddl_test2" org_table:"ddl_test2" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val1" type:VARBINARY table:"ddl_test2" org_table:"ddl_test2" database:"vttest" org_name:"val1" column_length:128 charset:63 column_type:"varbinary(128)"} fields:{name:"val2" type:VARBINARY table:"ddl_test2" org_table:"ddl_test2" database:"vttest" org_name:"val2" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"ddl_test2" row_changes:{after:{lengths:1 lengths:3 lengths:3 values:"2bbbccc"}}}`, - `gtid`, - `commit`, - }}) -} - -func TestDDLDropColumn(t *testing.T) { - if testing.Short() { - t.Skip() - } - env.SchemaEngine.Reload(context.Background()) - execStatement(t, "create table ddl_test2(id int, val1 varbinary(128), val2 varbinary(128), primary key(id))") - defer execStatement(t, "drop table ddl_test2") - - // Record position before the next few statements. - pos := primaryPosition(t) - execStatements(t, []string{ - "insert into ddl_test2 values(1, 'aaa', 'ccc')", - // Adding columns is allowed. - "alter table ddl_test2 drop column val2", - "insert into ddl_test2 values(2, 'bbb')", - }) - engine.se.Reload(context.Background()) - env.SchemaEngine.Reload(context.Background()) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ch := make(chan []*binlogdatapb.VEvent) - go func() { - for range ch { - } - }() - defer close(ch) - err := vstream(ctx, t, pos, nil, nil, ch) - want := "cannot determine table columns" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must contain %s", err, want) - } -} - -func TestUnsentDDL(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatement(t, "create table unsent(id int, val varbinary(128), primary key(id))") - - testcases := []testcase{{ - input: []string{ - "drop table unsent", - }, - // An unsent DDL is sent as an empty transaction. - output: [][]string{{ - `gtid`, - `type:OTHER`, - }}, - }} - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/none/", - }}, - } - runCases(t, filter, testcases, "", nil) -} - -func TestBuffering(t *testing.T) { - if testing.Short() { - t.Skip() - } - - reset := AdjustPacketSize(10) - defer reset() - - execStatement(t, "create table packet_test(id int, val varbinary(128), primary key(id))") - defer execStatement(t, "drop table packet_test") - engine.se.Reload(context.Background()) - - testcases := []testcase{{ - // All rows in one packet. - input: []string{ - "begin", - "insert into packet_test values (1, '123')", - "insert into packet_test values (2, '456')", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"packet_test" fields:{name:"id" type:INT32 table:"packet_test" org_table:"packet_test" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"packet_test" org_table:"packet_test" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"packet_test" row_changes:{after:{lengths:1 lengths:3 values:"1123"}}}`, - `type:ROW row_event:{table_name:"packet_test" row_changes:{after:{lengths:1 lengths:3 values:"2456"}}}`, - `gtid`, - `commit`, - }}, - }, { - // A new row causes packet size to be exceeded. - // Also test deletes - input: []string{ - "begin", - "insert into packet_test values (3, '123456')", - "insert into packet_test values (4, '789012')", - "delete from packet_test where id=3", - "delete from packet_test where id=4", - "commit", - }, - output: [][]string{{ - `begin`, - `type:ROW row_event:{table_name:"packet_test" row_changes:{after:{lengths:1 lengths:6 values:"3123456"}}}`, - }, { - `type:ROW row_event:{table_name:"packet_test" row_changes:{after:{lengths:1 lengths:6 values:"4789012"}}}`, - }, { - `type:ROW row_event:{table_name:"packet_test" row_changes:{before:{lengths:1 lengths:6 values:"3123456"}}}`, - }, { - `type:ROW row_event:{table_name:"packet_test" row_changes:{before:{lengths:1 lengths:6 values:"4789012"}}}`, - `gtid`, - `commit`, - }}, - }, { - // A single row is itself bigger than the packet size. - input: []string{ - "begin", - "insert into packet_test values (5, '123456')", - "insert into packet_test values (6, '12345678901')", - "insert into packet_test values (7, '23456')", - "commit", - }, - output: [][]string{{ - `begin`, - `type:ROW row_event:{table_name:"packet_test" row_changes:{after:{lengths:1 lengths:6 values:"5123456"}}}`, - }, { - `type:ROW row_event:{table_name:"packet_test" row_changes:{after:{lengths:1 lengths:11 values:"612345678901"}}}`, - }, { - `type:ROW row_event:{table_name:"packet_test" row_changes:{after:{lengths:1 lengths:5 values:"723456"}}}`, - `gtid`, - `commit`, - }}, - }, { - // An update packet is bigger because it has a before and after image. - input: []string{ - "begin", - "insert into packet_test values (8, '123')", - "update packet_test set val='456' where id=8", - "commit", - }, - output: [][]string{{ - `begin`, - `type:ROW row_event:{table_name:"packet_test" row_changes:{after:{lengths:1 lengths:3 values:"8123"}}}`, - }, { - `type:ROW row_event:{table_name:"packet_test" row_changes:{before:{lengths:1 lengths:3 values:"8123"} after:{lengths:1 lengths:3 values:"8456"}}}`, - `gtid`, - `commit`, - }}, - }, { - // DDL is in its own packet - input: []string{ - "alter table packet_test change val val varchar(128)", - }, - output: [][]string{{ - `gtid`, - `type:DDL statement:"alter table packet_test change val val varchar(128)"`, - }}, - }} - runCases(t, nil, testcases, "", nil) -} - -func TestBestEffortNameInFieldEvent(t *testing.T) { - if testing.Short() { - t.Skip() - } - filter := &binlogdatapb.Filter{ - FieldEventMode: binlogdatapb.Filter_BEST_EFFORT, - Rules: []*binlogdatapb.Rule{{ - Match: "/.*/", - }}, - } - // Modeled after vttablet endtoend compatibility tests. - execStatements(t, []string{ - "create table vitess_test(id int, val varbinary(128), primary key(id))", - }) - position := primaryPosition(t) - execStatements(t, []string{ - "insert into vitess_test values(1, 'abc')", - "rename table vitess_test to vitess_test_new", - }) - - defer execStatements(t, []string{ - "drop table vitess_test_new", - }) - engine.se.Reload(context.Background()) - testcases := []testcase{{ - input: []string{ - "insert into vitess_test_new values(2, 'abc')", - }, - // In this case, we don't have information about vitess_test since it was renamed to vitess_test_test. - // information returned by binlog for val column == varchar (rather than varbinary). - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"vitess_test" fields:{name:"@1" type:INT32 charset:63} fields:{name:"@2" type:VARCHAR charset:255}}`, - `type:ROW row_event:{table_name:"vitess_test" row_changes:{after:{lengths:1 lengths:3 values:"1abc"}}}`, - `gtid`, - `commit`, - }, { - `gtid`, - `type:DDL statement:"rename table vitess_test to vitess_test_new"`, - }, { - `begin`, - `type:FIELD field_event:{table_name:"vitess_test_new" fields:{name:"id" type:INT32 table:"vitess_test_new" org_table:"vitess_test_new" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"vitess_test_new" org_table:"vitess_test_new" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"vitess_test_new" row_changes:{after:{lengths:1 lengths:3 values:"2abc"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, filter, testcases, position, nil) -} - -// test that vstreamer ignores tables created by OnlineDDL -func TestInternalTables(t *testing.T) { - if testing.Short() { - t.Skip() - } - filter := &binlogdatapb.Filter{ - FieldEventMode: binlogdatapb.Filter_BEST_EFFORT, - Rules: []*binlogdatapb.Rule{{ - Match: "/.*/", - }}, - } - // Modeled after vttablet endtoend compatibility tests. - execStatements(t, []string{ - "create table vitess_test(id int, val varbinary(128), primary key(id))", - "create table _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho(id int, val varbinary(128), primary key(id))", - "create table _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431(id int, val varbinary(128), primary key(id))", - "create table _product_old(id int, val varbinary(128), primary key(id))", - }) - position := primaryPosition(t) - execStatements(t, []string{ - "insert into vitess_test values(1, 'abc')", - "insert into _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho values(1, 'abc')", - "insert into _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431 values(1, 'abc')", - "insert into _product_old values(1, 'abc')", - }) - - defer execStatements(t, []string{ - "drop table vitess_test", - "drop table _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho", - "drop table _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431", - "drop table _product_old", - }) - engine.se.Reload(context.Background()) - testcases := []testcase{{ - input: []string{ - "insert into vitess_test values(2, 'abc')", - }, - // In this case, we don't have information about vitess_test since it was renamed to vitess_test_test. - // information returned by binlog for val column == varchar (rather than varbinary). - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"vitess_test" fields:{name:"id" type:INT32 table:"vitess_test" org_table:"vitess_test" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"vitess_test" org_table:"vitess_test" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"vitess_test" row_changes:{after:{lengths:1 lengths:3 values:"1abc"}}}`, - `gtid`, - `commit`, - }, {`begin`, `gtid`, `commit`}, {`begin`, `gtid`, `commit`}, {`begin`, `gtid`, `commit`}, // => inserts into the three internal comments - { - `begin`, - `type:ROW row_event:{table_name:"vitess_test" row_changes:{after:{lengths:1 lengths:3 values:"2abc"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, filter, testcases, position, nil) -} - -func TestTypes(t *testing.T) { - if testing.Short() { - t.Skip() - } - - // Modeled after vttablet endtoend compatibility tests. - execStatements(t, []string{ - "create table vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny))", - "create table vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id))", - "create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb))", - "create table vitess_misc(id int, b bit(8), d date, dt datetime, t time, g geometry, primary key(id))", - "create table vitess_null(id int, val varbinary(128), primary key(id))", - "create table vitess_decimal(id int, dec1 decimal(12,4), dec2 decimal(13,4), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table vitess_ints", - "drop table vitess_fracts", - "drop table vitess_strings", - "drop table vitess_misc", - "drop table vitess_null", - "drop table vitess_decimal", - }) - engine.se.Reload(context.Background()) - - testcases := []testcase{{ - input: []string{ - "insert into vitess_ints values(-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"vitess_ints" fields:{name:"tiny" type:INT8 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"tiny" column_length:4 charset:63 column_type:"tinyint(4)"} fields:{name:"tinyu" type:UINT8 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"tinyu" column_length:3 charset:63 column_type:"tinyint(3) unsigned"} fields:{name:"small" type:INT16 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"small" column_length:6 charset:63 column_type:"smallint(6)"} fields:{name:"smallu" type:UINT16 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"smallu" column_length:5 charset:63 column_type:"smallint(5) unsigned"} fields:{name:"medium" type:INT24 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"medium" column_length:9 charset:63 column_type:"mediumint(9)"} fields:{name:"mediumu" type:UINT24 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"mediumu" column_length:8 charset:63 column_type:"mediumint(8) unsigned"} fields:{name:"normal" type:INT32 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"normal" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"normalu" type:UINT32 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"normalu" column_length:10 charset:63 column_type:"int(10) unsigned"} fields:{name:"big" type:INT64 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"big" column_length:20 charset:63 column_type:"bigint(20)"} fields:{name:"bigu" type:UINT64 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"bigu" column_length:20 charset:63 column_type:"bigint(20) unsigned"} fields:{name:"y" type:YEAR table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"y" column_length:4 charset:63 column_type:"year(4)"}}`, - `type:ROW row_event:{table_name:"vitess_ints" row_changes:{after:{lengths:4 lengths:3 lengths:6 lengths:5 lengths:8 lengths:8 lengths:11 lengths:10 lengths:20 lengths:20 lengths:4 values:"` + - `-128` + - `255` + - `-32768` + - `65535` + - `-8388608` + - `16777215` + - `-2147483648` + - `4294967295` + - `-9223372036854775808` + - `18446744073709551615` + - `2012` + - `"}}}`, - `gtid`, - `commit`, - }}, - }, { - input: []string{ - "insert into vitess_fracts values(1, 1.99, 2.99, 3.99, 4.99)", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"vitess_fracts" fields:{name:"id" type:INT32 table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"deci" type:DECIMAL table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"deci" column_length:7 charset:63 decimals:2 column_type:"decimal(5,2)"} fields:{name:"num" type:DECIMAL table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"num" column_length:7 charset:63 decimals:2 column_type:"decimal(5,2)"} fields:{name:"f" type:FLOAT32 table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"f" column_length:12 charset:63 decimals:31 column_type:"float"} fields:{name:"d" type:FLOAT64 table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"d" column_length:22 charset:63 decimals:31 column_type:"double"}}`, - `type:ROW row_event:{table_name:"vitess_fracts" row_changes:{after:{lengths:1 lengths:4 lengths:4 lengths:8 lengths:8 values:"11.992.993.99E+004.99E+00"}}}`, - `gtid`, - `commit`, - }}, - }, { - // TODO(sougou): validate that binary and char data generate correct DMLs on the other end. - input: []string{ - "insert into vitess_strings values('a', 'b', 'c', 'd\000\000\000', 'e', 'f', 'g', 'h', 'a', 'a,b')", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"vitess_strings" fields:{name:"vb" type:VARBINARY table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"vb" column_length:16 charset:63 column_type:"varbinary(16)"} fields:{name:"c" type:CHAR table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"c" column_length:64 charset:45 column_type:"char(16)"} fields:{name:"vc" type:VARCHAR table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"vc" column_length:64 charset:45 column_type:"varchar(16)"} fields:{name:"b" type:BINARY table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"b" column_length:4 charset:63 column_type:"binary(4)"} fields:{name:"tb" type:BLOB table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"tb" column_length:255 charset:63 column_type:"tinyblob"} fields:{name:"bl" type:BLOB table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"bl" column_length:65535 charset:63 column_type:"blob"} fields:{name:"ttx" type:TEXT table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"ttx" column_length:1020 charset:45 column_type:"tinytext"} fields:{name:"tx" type:TEXT table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"tx" column_length:262140 charset:45 column_type:"text"} fields:{name:"en" type:ENUM table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"en" column_length:4 charset:45 column_type:"enum('a','b')"} fields:{name:"s" type:SET table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"s" column_length:12 charset:45 column_type:"set('a','b')"}}`, - `type:ROW row_event:{table_name:"vitess_strings" row_changes:{after:{lengths:1 lengths:1 lengths:1 lengths:4 lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 ` + - `values:"abcd\x00\x00\x00efgh13"}}}`, - `gtid`, - `commit`, - }}, - }, { - // TODO(sougou): validate that the geometry value generates the correct DMLs on the other end. - input: []string{ - "insert into vitess_misc values(1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"vitess_misc" fields:{name:"id" type:INT32 table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"b" type:BIT table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"b" column_length:8 charset:63 column_type:"bit(8)"} fields:{name:"d" type:DATE table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"d" column_length:10 charset:63 column_type:"date"} fields:{name:"dt" type:DATETIME table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"dt" column_length:19 charset:63 column_type:"datetime"} fields:{name:"t" type:TIME table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"t" column_length:10 charset:63 column_type:"time"} fields:{name:"g" type:GEOMETRY table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"g" column_length:4294967295 charset:63 column_type:"geometry"}}`, - `type:ROW row_event:{table_name:"vitess_misc" row_changes:{after:{lengths:1 lengths:1 lengths:10 lengths:19 lengths:8 lengths:25 values:"1\x012012-01-012012-01-01 15:45:4515:45:45\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@"}}}`, - `gtid`, - `commit`, - }}, - }, { - input: []string{ - "insert into vitess_null values(1, null)", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"vitess_null" fields:{name:"id" type:INT32 table:"vitess_null" org_table:"vitess_null" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"vitess_null" org_table:"vitess_null" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"vitess_null" row_changes:{after:{lengths:1 lengths:-1 values:"1"}}}`, - `gtid`, - `commit`, - }}, - }, { - input: []string{ - "insert into vitess_decimal values(1, 1.23, 1.23)", - "insert into vitess_decimal values(2, -1.23, -1.23)", - "insert into vitess_decimal values(3, 0000000001.23, 0000000001.23)", - "insert into vitess_decimal values(4, -0000000001.23, -0000000001.23)", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"vitess_decimal" fields:{name:"id" type:INT32 table:"vitess_decimal" org_table:"vitess_decimal" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"dec1" type:DECIMAL table:"vitess_decimal" org_table:"vitess_decimal" database:"vttest" org_name:"dec1" column_length:14 charset:63 decimals:4 column_type:"decimal(12,4)"} fields:{name:"dec2" type:DECIMAL table:"vitess_decimal" org_table:"vitess_decimal" database:"vttest" org_name:"dec2" column_length:15 charset:63 decimals:4 column_type:"decimal(13,4)"}}`, - `type:ROW row_event:{table_name:"vitess_decimal" row_changes:{after:{lengths:1 lengths:6 lengths:6 values:"11.23001.2300"}}}`, - `gtid`, - `commit`, - }, { - `begin`, - `type:ROW row_event:{table_name:"vitess_decimal" row_changes:{after:{lengths:1 lengths:7 lengths:7 values:"2-1.2300-1.2300"}}}`, - `gtid`, - `commit`, - }, { - `begin`, - `type:ROW row_event:{table_name:"vitess_decimal" row_changes:{after:{lengths:1 lengths:6 lengths:6 values:"31.23001.2300"}}}`, - `gtid`, - `commit`, - }, { - `begin`, - `type:ROW row_event:{table_name:"vitess_decimal" row_changes:{after:{lengths:1 lengths:7 lengths:7 values:"4-1.2300-1.2300"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "", nil) -} - -func TestJSON(t *testing.T) { - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), "create table vitess_json(id int default 1, val json, primary key(id))"); err != nil { - // If it's a syntax error, MySQL is an older version. Skip this test. - if strings.Contains(err.Error(), "syntax") { - return - } - t.Fatal(err) - } - defer execStatement(t, "drop table vitess_json") - engine.se.Reload(context.Background()) - jsonValues := []string{"{}", "123456", `"vtTablet"`, `{"foo": "bar"}`, `["abc", 3.14, true]`} - - var inputs, outputs []string - var outputsArray [][]string - fieldAdded := false - var expect = func(in string) string { - return strings.ReplaceAll(in, "\"", "\\\"") - } - for i, val := range jsonValues { - inputs = append(inputs, fmt.Sprintf("insert into vitess_json values(%d, %s)", i+1, encodeString(val))) - - outputs = []string{} - outputs = append(outputs, `begin`) - if !fieldAdded { - outputs = append(outputs, `type:FIELD field_event:{table_name:"vitess_json" fields:{name:"id" type:INT32 table:"vitess_json" org_table:"vitess_json" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:JSON table:"vitess_json" org_table:"vitess_json" database:"vttest" org_name:"val" column_length:4294967295 charset:63 column_type:"json"}}`) - fieldAdded = true - } - out := expect(val) - - outputs = append(outputs, fmt.Sprintf(`type:ROW row_event:{table_name:"vitess_json" row_changes:{after:{lengths:1 lengths:%d values:"%d%s"}}}`, - len(val), i+1 /*id increments*/, out)) - outputs = append(outputs, `gtid`) - outputs = append(outputs, `commit`) - outputsArray = append(outputsArray, outputs) - } - testcases := []testcase{{ - input: inputs, - output: outputsArray, - }} - runCases(t, nil, testcases, "", nil) -} - -func TestExternalTable(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create database external", - "create table external.ext(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop database external", - }) - engine.se.Reload(context.Background()) - - testcases := []testcase{{ - input: []string{ - "begin", - "insert into external.ext values (1, 'aaa')", - "commit", - }, - // External table events don't get sent. - output: [][]string{{ - `begin`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "", nil) -} - -func TestJournal(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table if not exists _vt.resharding_journal(id int, db_name varchar(128), val blob, primary key(id))", - }) - defer execStatements(t, []string{ - "drop table _vt.resharding_journal", - }) - engine.se.Reload(context.Background()) - - journal1 := &binlogdatapb.Journal{ - Id: 1, - MigrationType: binlogdatapb.MigrationType_SHARDS, - } - journal2 := &binlogdatapb.Journal{ - Id: 2, - MigrationType: binlogdatapb.MigrationType_SHARDS, - } - testcases := []testcase{{ - input: []string{ - "begin", - fmt.Sprintf("insert into _vt.resharding_journal values(1, 'vttest', '%v')", journal1.String()), - fmt.Sprintf("insert into _vt.resharding_journal values(2, 'nosend', '%v')", journal2.String()), - "commit", - }, - // External table events don't get sent. - output: [][]string{{ - `begin`, - `type:JOURNAL journal:{id:1 migration_type:SHARDS}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "", nil) -} - -// TestMinimalMode confirms that we don't support minimal binlog_row_image mode. -func TestMinimalMode(t *testing.T) { - if testing.Short() { - t.Skip() - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - oldEngine := engine - engine = nil - oldEnv := env - env = nil - newEngine(t, ctx, "minimal") - defer func() { - engine = oldEngine - env = oldEnv - }() - err := engine.Stream(context.Background(), "current", nil, nil, throttlerapp.VStreamerName, func(evs []*binlogdatapb.VEvent) error { return nil }) - require.Error(t, err, "minimal binlog_row_image is not supported by Vitess VReplication") -} - -func TestStatementMode(t *testing.T) { - if testing.Short() { - t.Skip() - } - execStatements(t, []string{ - "create table stream1(id int, val varbinary(128), primary key(id))", - "create table stream2(id int, val varbinary(128), primary key(id))", - }) - - engine.se.Reload(context.Background()) - - defer execStatements(t, []string{ - "drop table stream1", - "drop table stream2", - }) - - testcases := []testcase{{ - input: []string{ - "set @@session.binlog_format='STATEMENT'", - "begin", - "insert into stream1 values (1, 'aaa')", - "update stream1 set val='bbb' where id = 1", - "delete from stream1 where id = 1", - "commit", - "set @@session.binlog_format='ROW'", - }, - output: [][]string{{ - `begin`, - `type:INSERT dml:"insert into stream1 values (1, 'aaa')"`, - `type:UPDATE dml:"update stream1 set val='bbb' where id = 1"`, - `type:DELETE dml:"delete from stream1 where id = 1"`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "", nil) -} - -func TestHeartbeat(t *testing.T) { - if testing.Short() { - t.Skip() - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - wg, ch := startStream(ctx, t, nil, "", nil) - defer wg.Wait() - evs := <-ch - require.Equal(t, 1, len(evs)) - assert.Equal(t, binlogdatapb.VEventType_HEARTBEAT, evs[0].Type) - cancel() -} - -func TestNoFutureGTID(t *testing.T) { - if testing.Short() { - t.Skip() - } - - // Execute something to make sure we have ranges in GTIDs. - execStatements(t, []string{ - "create table stream1(id int, val varbinary(128), primary key(id))", - }) - defer execStatements(t, []string{ - "drop table stream1", - }) - engine.se.Reload(context.Background()) - - pos := primaryPosition(t) - t.Logf("current position: %v", pos) - // Both mysql and mariadb have '-' in their gtids. - // Invent a GTID in the future. - index := strings.LastIndexByte(pos, '-') - num, err := strconv.Atoi(pos[index+1:]) - require.NoError(t, err) - future := pos[:index+1] + fmt.Sprintf("%d", num+1) - t.Logf("future position: %v", future) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ch := make(chan []*binlogdatapb.VEvent) - go func() { - for range ch { - } - }() - defer close(ch) - err = vstream(ctx, t, future, nil, nil, ch) - want := "GTIDSet Mismatch" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("err: %v, must contain %s", err, want) - } -} - -func TestFilteredMultipleWhere(t *testing.T) { - if testing.Short() { - t.Skip() - } - - execStatements(t, []string{ - "create table t1(id1 int, id2 int, id3 int, val varbinary(128), primary key(id1))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - - setVSchema(t, shardedVSchema) - defer env.SetVSchema("{}") - - filter := &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: "select id1, val from t1 where in_keyrange('-80') and id2 = 200 and id3 = 1000 and val = 'newton'", - }}, - } - - testcases := []testcase{{ - input: []string{ - "begin", - "insert into t1 values (1, 100, 1000, 'kepler')", - "insert into t1 values (2, 200, 1000, 'newton')", - "insert into t1 values (3, 100, 2000, 'kepler')", - "insert into t1 values (128, 200, 1000, 'newton')", - "insert into t1 values (5, 200, 2000, 'kepler')", - "insert into t1 values (129, 200, 1000, 'kepler')", - "commit", - }, - output: [][]string{{ - `begin`, - `type:FIELD field_event:{table_name:"t1" fields:{name:"id1" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id1" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:6 values:"2newton"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:3 lengths:6 values:"128newton"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, filter, testcases, "", nil) -} - -// TestGeneratedColumns just confirms that generated columns are sent in a vstream as expected -func TestGeneratedColumns(t *testing.T) { - execStatements(t, []string{ - "create table t1(id int, val varbinary(6), val2 varbinary(6) as (concat(id, val)), val3 varbinary(6) as (concat(val, id)), id2 int, primary key(id))", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - queries := []string{ - "begin", - "insert into t1(id, val, id2) values (1, 'aaa', 10)", - "insert into t1(id, val, id2) values (2, 'bbb', 20)", - "commit", - } - - fe := &TestFieldEvent{ - table: "t1", - db: "vttest", - cols: []*TestColumn{ - {name: "id", dataType: "INT32", colType: "int(11)", len: 11, charset: 63}, - {name: "val", dataType: "VARBINARY", colType: "varbinary(6)", len: 6, charset: 63}, - {name: "val2", dataType: "VARBINARY", colType: "varbinary(6)", len: 6, charset: 63}, - {name: "val3", dataType: "VARBINARY", colType: "varbinary(6)", len: 6, charset: 63}, - {name: "id2", dataType: "INT32", colType: "int(11)", len: 11, charset: 63}, - }, - } - - testcases := []testcase{{ - input: queries, - output: [][]string{{ - `begin`, - fe.String(), - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 lengths:4 lengths:4 lengths:2 values:"1aaa1aaaaaa110"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 lengths:4 lengths:4 lengths:2 values:"2bbb2bbbbbb220"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -// TestGeneratedInvisiblePrimaryKey validates that generated invisible primary keys are sent in row events. -func TestGeneratedInvisiblePrimaryKey(t *testing.T) { - if !env.HasCapability(testenv.ServerCapabilityGeneratedInvisiblePrimaryKey) { - t.Skip("skipping test as server does not support generated invisible primary keys") - } - execStatements(t, []string{ - "SET @@session.sql_generate_invisible_primary_key=ON;", - "create table t1(val varbinary(6))", - "SET @@session.sql_generate_invisible_primary_key=OFF;", - }) - defer execStatements(t, []string{ - "drop table t1", - }) - engine.se.Reload(context.Background()) - queries := []string{ - "begin", - "insert into t1 values ('aaa')", - "update t1 set val = 'bbb' where my_row_id = 1", - "commit", - } - - fe := &TestFieldEvent{ - table: "t1", - db: "vttest", - cols: []*TestColumn{ - {name: "my_row_id", dataType: "UINT64", colType: "bigint unsigned", len: 20, charset: 63}, - {name: "val", dataType: "VARBINARY", colType: "varbinary(6)", len: 6, charset: 63}, - }, - } - - testcases := []testcase{{ - input: queries, - output: [][]string{{ - `begin`, - fe.String(), - `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, - `type:ROW row_event:{table_name:"t1" row_changes:{before:{lengths:1 lengths:3 values:"1aaa"} after:{lengths:1 lengths:3 values:"1bbb"}}}`, - `gtid`, - `commit`, - }}, - }} - runCases(t, nil, testcases, "current", nil) -} - -func runCases(t *testing.T, filter *binlogdatapb.Filter, testcases []testcase, position string, tablePK []*binlogdatapb.TableLastPK) { - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - wg, ch := startStream(ctx, t, filter, position, tablePK) - defer wg.Wait() - // If position is 'current', we wait for a heartbeat to be - // sure the vstreamer has started. - if position == "current" { - log.Infof("Starting stream with current position") - expectLog(ctx, t, "current pos", ch, [][]string{{`gtid`, `type:OTHER`}}) - } - - log.Infof("Starting to run test cases") - for _, tcase := range testcases { - switch input := tcase.input.(type) { - case []string: - execStatements(t, input) - case string: - execStatement(t, input) - default: - t.Fatalf("unexpected input: %#v", input) - } - engine.se.Reload(ctx) - expectLog(ctx, t, tcase.input, ch, tcase.output) - } - - cancel() - if evs, ok := <-ch; ok { - t.Fatalf("unexpected evs: %v", evs) - } - log.Infof("Last line of runCases") -} - -func expectLog(ctx context.Context, t *testing.T, input any, ch <-chan []*binlogdatapb.VEvent, output [][]string) { - timer := time.NewTimer(1 * time.Minute) - defer timer.Stop() - for _, wantset := range output { - var evs []*binlogdatapb.VEvent - for { - select { - case allevs, ok := <-ch: - if !ok { - t.Fatal("expectLog: not ok, stream ended early") - } - for _, ev := range allevs { - // Ignore spurious heartbeats that can happen on slow machines. - if ev.Type == binlogdatapb.VEventType_HEARTBEAT { - continue - } - if ev.Throttled { - continue - } - evs = append(evs, ev) - } - case <-ctx.Done(): - t.Fatalf("expectLog: Done(), stream ended early") - case <-timer.C: - t.Fatalf("expectLog: timed out waiting for events: %v", wantset) - } - if len(evs) != 0 { - break - } - } - if len(wantset) != len(evs) { - t.Fatalf("%v: evs\n%v, want\n%v, >> got length %d, wanted length %d", input, evs, wantset, len(evs), len(wantset)) - } - for i, want := range wantset { - // CurrentTime is not testable. - evs[i].CurrentTime = 0 - evs[i].Keyspace = "" - evs[i].Shard = "" - switch want { - case "begin": - if evs[i].Type != binlogdatapb.VEventType_BEGIN { - t.Fatalf("%v (%d): event: %v, want gtid or begin", input, i, evs[i]) - } - case "gtid": - if evs[i].Type != binlogdatapb.VEventType_GTID { - t.Fatalf("%v (%d): event: %v, want gtid", input, i, evs[i]) - } - case "lastpk": - if evs[i].Type != binlogdatapb.VEventType_LASTPK { - t.Fatalf("%v (%d): event: %v, want lastpk", input, i, evs[i]) - } - case "commit": - if evs[i].Type != binlogdatapb.VEventType_COMMIT { - t.Fatalf("%v (%d): event: %v, want commit", input, i, evs[i]) - } - case "other": - if evs[i].Type != binlogdatapb.VEventType_OTHER { - t.Fatalf("%v (%d): event: %v, want other", input, i, evs[i]) - } - case "ddl": - if evs[i].Type != binlogdatapb.VEventType_DDL { - t.Fatalf("%v (%d): event: %v, want ddl", input, i, evs[i]) - } - case "copy_completed": - if evs[i].Type != binlogdatapb.VEventType_COPY_COMPLETED { - t.Fatalf("%v (%d): event: %v, want copy_completed", input, i, evs[i]) - } - default: - evs[i].Timestamp = 0 - if evs[i].Type == binlogdatapb.VEventType_FIELD { - for j := range evs[i].FieldEvent.Fields { - evs[i].FieldEvent.Fields[j].Flags = 0 - if ignoreKeyspaceShardInFieldAndRowEvents { - evs[i].FieldEvent.Keyspace = "" - evs[i].FieldEvent.Shard = "" - } - } - } - if ignoreKeyspaceShardInFieldAndRowEvents && evs[i].Type == binlogdatapb.VEventType_ROW { - evs[i].RowEvent.Keyspace = "" - evs[i].RowEvent.Shard = "" - } - if !testRowEventFlags && evs[i].Type == binlogdatapb.VEventType_ROW { - evs[i].RowEvent.Flags = 0 - } - want = env.RemoveAnyDeprecatedDisplayWidths(want) - if got := fmt.Sprintf("%v", evs[i]); got != want { - log.Errorf("%v (%d): event:\n%q, want\n%q", input, i, got, want) - t.Fatalf("%v (%d): event:\n%q, want\n%q", input, i, got, want) - } - } - } - } -} - -func startStream(ctx context.Context, t *testing.T, filter *binlogdatapb.Filter, position string, tablePKs []*binlogdatapb.TableLastPK) (*sync.WaitGroup, <-chan []*binlogdatapb.VEvent) { - switch position { - case "": - position = primaryPosition(t) - case "vscopy": - position = "" - } - - wg := sync.WaitGroup{} - wg.Add(1) - ch := make(chan []*binlogdatapb.VEvent) - - go func() { - defer close(ch) - defer wg.Done() - vstream(ctx, t, position, tablePKs, filter, ch) - }() - return &wg, ch -} - -func vstream(ctx context.Context, t *testing.T, pos string, tablePKs []*binlogdatapb.TableLastPK, filter *binlogdatapb.Filter, ch chan []*binlogdatapb.VEvent) error { - if filter == nil { - filter = &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "/.*/", - }}, - } - } - return engine.Stream(ctx, pos, tablePKs, filter, throttlerapp.VStreamerName, func(evs []*binlogdatapb.VEvent) error { - timer := time.NewTimer(2 * time.Second) - defer timer.Stop() - - t.Logf("Received events: %v", evs) - select { - case ch <- evs: - case <-ctx.Done(): - return fmt.Errorf("engine.Stream Done() stream ended early") - case <-timer.C: - t.Log("VStream timed out waiting for events") - return io.EOF - } - return nil - }) -} - -func execStatement(t *testing.T, query string) { - t.Helper() - if err := env.Mysqld.ExecuteSuperQuery(context.Background(), query); err != nil { - t.Fatal(err) - } -} - -func execStatements(t *testing.T, queries []string) { - if err := env.Mysqld.ExecuteSuperQueryList(context.Background(), queries); err != nil { - t.Fatal(err) - } -} - -func primaryPosition(t *testing.T) string { - t.Helper() - // We use the engine's cp because there is one test that overrides - // the flavor to FilePos. If so, we have to obtain the position - // in that flavor format. - connParam, err := engine.env.Config().DB.DbaWithDB().MysqlParams() - if err != nil { - t.Fatal(err) - } - conn, err := mysql.Connect(context.Background(), connParam) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - pos, err := conn.PrimaryPosition() - if err != nil { - t.Fatal(err) - } - return replication.EncodePosition(pos) -} - -func setVSchema(t *testing.T, vschema string) { - t.Helper() - - curCount := engine.vschemaUpdates.Get() - if err := env.SetVSchema(vschema); err != nil { - t.Fatal(err) - } - // Wait for curCount to go up. - updated := false - for i := 0; i < 10; i++ { - if engine.vschemaUpdates.Get() != curCount { - updated = true - break - } - time.Sleep(10 * time.Millisecond) - } - if !updated { - log.Infof("vschema did not get updated") - t.Error("vschema did not get updated") - } -} diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go new file mode 100644 index 00000000000..8d0d182790e --- /dev/null +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer_test.go @@ -0,0 +1,1954 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vstreamer + +import ( + "context" + "fmt" + "io" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/prometheus/common/version" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" + "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" +) + +type testcase struct { + input any + output [][]string +} + +func checkIfOptionIsSupported(t *testing.T, variable string) bool { + qr, err := env.Mysqld.FetchSuperQuery(context.Background(), fmt.Sprintf("show variables like '%s'", variable)) + require.NoError(t, err) + require.NotNil(t, qr) + if qr.Rows != nil && len(qr.Rows) == 1 { + return true + } + return false +} + +// TestPlayerNoBlob sets up a new environment with mysql running with +// binlog_row_image as noblob. It confirms that the VEvents created are +// correct: that they don't contain the missing columns and that the +// DataColumns bitmap is sent. +func TestNoBlob(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + oldEngine := engine + engine = nil + oldEnv := env + env = nil + newEngine(t, ctx, "noblob") + defer func() { + if engine != nil { + engine.Close() + } + if env != nil { + env.Close() + } + engine = oldEngine + env = oldEnv + }() + + ts := &TestSpec{ + t: t, + ddls: []string{ + // t1 has a blob column and a primary key. The blob column will not be in update row events. + "create table t1(id int, blb blob, val varchar(4), primary key(id))", + // t2 has a text column and no primary key. The text column will be in update row events. + "create table t2(id int, txt text, val varchar(4), unique key(id, val))", + // t3 has a text column and a primary key. The text column will not be in update row events. + "create table t3(id int, txt text, val varchar(4), primary key(id))", + }, + options: &TestSpecOptions{ + noblob: true, + }, + } + defer ts.Close() + ts.Init() + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 'blob1', 'aaa')", nil}, + {"update t1 set val = 'bbb'", nil}, + {"commit", nil}, + }, {{"begin", nil}, + {"insert into t2 values (1, 'text1', 'aaa')", nil}, + {"update t2 set val = 'bbb'", nil}, + {"commit", nil}, + }, {{"begin", nil}, + {"insert into t3 values (1, 'text1', 'aaa')", nil}, + {"update t3 set val = 'bbb'", nil}, + {"commit", nil}, + }} + ts.Run() +} + +// TestSetAndEnum confirms that the events for set and enum columns are correct. +func TestSetAndEnum(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id int, val binary(4), color set('red','green','blue','black','white','pink','purple','yellow','brown'), size enum('S','M','L'), primary key(id))", + "create table t2(id int, val binary(4), color set('red','green','blue','black','white','pink','purple','yellow','brown','eggshell','mint','tan','fuschia','teal','babyblue','grey','bulletgrey') collate utf8mb4_bin, size enum('S','M','L') collate utf8mb4_bin, primary key(id)) charset=utf8mb4", + }, + } + defer ts.Close() + ts.Init() + ts.tests = [][]*TestQuery{ + { + {"begin", nil}, + {"insert into t1 values (1, 'aaa', 'red,blue', 'S')", nil}, + {"insert into t1 values (2, 'bbb', 'green,pink,purple,yellow,brown', 'M')", nil}, + {"insert into t1 values (3, 'ccc', 'red,green,blue', 'L')", nil}, + {"commit", nil}, + }, + { + {"begin", nil}, + {"insert into t2 values (1, 'xxx', 'red,blue,black,grey', 'S')", nil}, + {"insert into t2 values (2, 'yyy', 'green,black,pink,purple,yellow,brown,mint,tan,bulletgrey', 'M')", nil}, + {"insert into t2 values (3, 'zzz', 'red,green,blue', 'L')", nil}, + {"commit", nil}, + }, + { + {"begin", nil}, + // This query fails with the following error when SQL mode includes STRICT: + // failed: Data truncated for column 'size' at row 1 (errno 1265) (sqlstate 01000) during query: insert into t2 values (4, 'lll', '', '') + {"set @@session.sql_mode = ''", nil}, + {"insert into t2 values (4, 'lll', '', '')", nil}, + {"insert into t2 values (5, 'mmm', 'invalid', 'invalid,invalid,mint,invalid')", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t2", changes: []TestRowChange{{after: []string{"5", "mmm", "\x00", ""}}}}}, + }}, + {"insert into t2 values (6, 'nnn', NULL, NULL)", nil}, + {"commit", nil}, + }, + } + ts.Run() +} + +// TestCellValuePadding tests that the events are correctly padded for binary columns. +func TestCellValuePadding(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id int, val binary(4), primary key(val))", + "create table t2(id int, val char(4), primary key(val))", + "create table t3(id int, val char(4) collate utf8mb4_bin, primary key(val))"}, + } + defer ts.Close() + ts.Init() + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 'aaa\000')", nil}, + {"insert into t1 values (2, 'bbb\000')", nil}, + {"update t1 set id = 11 where val = 'aaa\000'", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"1", "aaa\x00"}, after: []string{"11", "aaa\x00"}}}}}, + }}, + {"insert into t2 values (1, 'aaa')", nil}, + {"insert into t2 values (2, 'bbb')", nil}, + {"update t2 set id = 11 where val = 'aaa'", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t2", changes: []TestRowChange{{before: []string{"1", "aaa"}, after: []string{"11", "aaa"}}}}}, + }}, + {"insert into t3 values (1, 'aaa')", nil}, + {"insert into t3 values (2, 'bb')", nil}, + {"update t3 set id = 11 where val = 'aaa'", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t3", changes: []TestRowChange{{before: []string{"1", "aaa"}, after: []string{"11", "aaa"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} + +// TestColumnCollationHandling confirms that we handle column collations +// properly in vstreams now that we parse any optional collation ID values +// in binlog_row_metadata AND we query mysqld for the collation when possible. +func TestColumnCollationHandling(t *testing.T) { + extraCollation := "utf8mb4_ja_0900_as_cs" // Test 2 byte collation ID handling + if strings.HasPrefix(testenv.MySQLVersion, "5.7") { // 5.7 does not support 2 byte collation IDs + extraCollation = "utf8mb4_croatian_ci" + } + ts := &TestSpec{ + t: t, + ddls: []string{ + fmt.Sprintf("create table t1(id int, txt text, val char(4) collate utf8mb4_bin, id2 int, val2 varchar(64) collate utf8mb4_general_ci, valvb varbinary(128), val3 varchar(255) collate %s, primary key(val))", extraCollation), + }, + } + defer ts.Close() + ts.Init() + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 'aaa', 'aaa', 1, 'aaa', 'aaa', 'aaa')", nil}, + {"insert into t1 values (2, 'bb', 'bb', 1, 'bb', 'bb', 'bb')", nil}, + {"update t1 set id = 11 where val = 'aaa'", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"1", "aaa", "aaa", "1", "aaa", "aaa", "aaa"}, after: []string{"11", "aaa", "aaa", "1", "aaa", "aaa", "aaa"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} + +// This test is not ported to the new test framework because it only runs on old deprecated versions of MySQL. +// We leave the test for older flavors until we EOL them. +func TestSetStatement(t *testing.T) { + if !checkIfOptionIsSupported(t, "log_builtin_as_identified_by_password") { + // the combination of setting this option and support for "set password" only works on a few flavors + log.Info("Cannot test SetStatement on this flavor") + return + } + + execStatements(t, []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table t1", + }) + queries := []string{ + "begin", + "insert into t1 values (1, 'aaa')", + "commit", + "set global log_builtin_as_identified_by_password=1", + "SET PASSWORD FOR 'vt_appdebug'@'localhost'='*AA17DA66C7C714557F5485E84BCAFF2C209F2F53'", // select password('vtappdebug_password'); + } + testcases := []testcase{{ + input: queries, + output: [][]string{{ + `begin`, + `type:FIELD field_event:{table_name:"t1" fields:{name:"id" type:INT32 table:"t1" org_table:"t1" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"t1" org_table:"t1" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, + `type:ROW row_event:{table_name:"t1" row_changes:{after:{lengths:1 lengths:3 values:"1aaa"}}}`, + `gtid`, + `commit`, + }, { + `gtid`, + `other`, + }}, + }} + runCases(t, nil, testcases, "current", nil) +} + +// TestSetForeignKeyCheck confirms that the binlog RowEvent flags are set correctly when foreign_key_checks are on and off. +func TestSetForeignKeyCheck(t *testing.T) { + testRowEventFlags = true + defer func() { testRowEventFlags = false }() + + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id int, val binary(4), primary key(id))", + }, + } + defer ts.Close() + ts.Init() + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 'aaa')", []TestRowEvent{{flags: 1}}}, + {"set @@session.foreign_key_checks=1", noEvents}, + {"insert into t1 values (2, 'bbb')", []TestRowEvent{{flags: 1}}}, + {"set @@session.foreign_key_checks=0", noEvents}, + {"insert into t1 values (3, 'ccc')", []TestRowEvent{{flags: 3}}}, + {"commit", nil}, + }} + ts.Run() + +} + +func TestStmtComment(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id int, val varbinary(128), primary key(id))", + }, + options: nil, + } + defer ts.Close() + + ts.Init() + + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 'aaa')", nil}, + {"commit", nil}, + {"/*!40000 ALTER TABLE `t1` DISABLE KEYS */", []TestRowEvent{ + {restart: true, event: "gtid"}, + {event: "other"}}, + }, + }} + ts.Run() +} + +func TestVersion(t *testing.T) { + oldEngine := engine + defer func() { + engine = oldEngine + }() + + ctx := context.Background() + err := env.SchemaEngine.EnableHistorian(true) + require.NoError(t, err) + defer env.SchemaEngine.EnableHistorian(false) + + engine = NewEngine(engine.env, env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) + engine.InitDBConfig(env.KeyspaceName, env.ShardName) + engine.Open() + defer engine.Close() + + execStatements(t, []string{ + "create database if not exists _vt", + "create table if not exists _vt.schema_version(id int, pos varbinary(10000), time_updated bigint(20), ddl varchar(10000), schemax blob, primary key(id))", + }) + defer execStatements(t, []string{ + "drop table _vt.schema_version", + }) + dbSchema := &binlogdatapb.MinimalSchema{ + Tables: []*binlogdatapb.MinimalTable{{ + Name: "t1", + }}, + } + blob, _ := dbSchema.MarshalVT() + gtid := "MariaDB/0-41983-20" + testcases := []testcase{{ + input: []string{ + fmt.Sprintf("insert into _vt.schema_version values(1, '%s', 123, 'create table t1', %v)", gtid, encodeString(string(blob))), + }, + // External table events don't get sent. + output: [][]string{{ + `begin`, + `type:VERSION`}, { + `gtid`, + `commit`}}, + }} + runCases(t, nil, testcases, "", nil) + mt, err := env.SchemaEngine.GetTableForPos(ctx, sqlparser.NewIdentifierCS("t1"), gtid) + require.NoError(t, err) + assert.True(t, proto.Equal(mt, dbSchema.Tables[0])) +} + +func TestMissingTables(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id11 int, id12 int, primary key(id11))", + }, + } + ts.Init() + defer ts.Close() + execStatements(t, []string{ + "create table shortlived(id31 int, id32 int, primary key(id31))", + }) + defer execStatements(t, []string{ + "drop table _shortlived", + }) + startPos := primaryPosition(t) + execStatements(t, []string{ + "insert into shortlived values (1,1), (2,2)", + "alter table shortlived rename to _shortlived", + }) + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }}, + } + fe := ts.fieldEvents["t1"] + insert := "insert into t1 values (101, 1010)" + rowEvent := getRowEvent(ts, fe, insert) + testcases := []testcase{ + { + input: []string{}, + output: [][]string{}, + }, + + { + input: []string{insert}, + output: [][]string{ + {"begin", "gtid", "commit"}, + {"gtid", "type:OTHER"}, + {"begin", fe.String(), rowEvent, "gtid", "commit"}, + }, + }, + } + runCases(t, filter, testcases, startPos, nil) +} + +func TestVStreamCopySimpleFlow(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id11 int, id12 int, primary key(id11))", + "create table t2(id21 int, id22 int, primary key(id21))", + }, + } + ts.Init() + defer ts.Close() + + log.Infof("Pos before bulk insert: %s", primaryPosition(t)) + insertSomeRows(t, 10) + log.Infof("Pos after bulk insert: %s", primaryPosition(t)) + + ctx := context.Background() + qr, err := env.Mysqld.FetchSuperQuery(ctx, "SELECT count(*) as cnt from t1, t2 where t1.id11 = t2.id21") + if err != nil { + t.Fatal("Query failed") + } + require.Equal(t, "[[INT64(10)]]", fmt.Sprintf("%v", qr.Rows)) + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select * from t1", + }, { + Match: "t2", + Filter: "select * from t2", + }}, + } + + var tablePKs []*binlogdatapb.TableLastPK + tablePKs = append(tablePKs, getTablePK("t1", 1)) + tablePKs = append(tablePKs, getTablePK("t2", 2)) + t1FieldEvent := &TestFieldEvent{ + table: "t1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id11", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "id12", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + }, + enumSetStrings: true, + } + t2FieldEvent := &TestFieldEvent{ + table: "t2", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id21", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "id22", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + }, + enumSetStrings: true, + } + + t1Events := []string{} + t2Events := []string{} + for i := 1; i <= 10; i++ { + t1Events = append(t1Events, + fmt.Sprintf("type:ROW row_event:{table_name:\"t1\" row_changes:{after:{lengths:%d lengths:%d values:\"%d%d\"}}}", len(strconv.Itoa(i)), len(strconv.Itoa(i*10)), i, i*10)) + t2Events = append(t2Events, + fmt.Sprintf("type:ROW row_event:{table_name:\"t2\" row_changes:{after:{lengths:%d lengths:%d values:\"%d%d\"}}}", len(strconv.Itoa(i)), len(strconv.Itoa(i*20)), i, i*20)) + } + t1Events = append(t1Events, "lastpk", "commit") + t2Events = append(t2Events, "lastpk", "commit") + + // Now we're past the copy phase and have no ENUM or SET columns. + t1FieldEvent.enumSetStrings = false + t2FieldEvent.enumSetStrings = false + insertEvents1 := []string{ + "begin", + t1FieldEvent.String(), + getRowEvent(ts, t1FieldEvent, "insert into t1 values (101, 1010)"), + "gtid", + "commit", + } + insertEvents2 := []string{ + "begin", + t2FieldEvent.String(), + getRowEvent(ts, t2FieldEvent, "insert into t2 values (202, 2020)"), + "gtid", + "commit", + } + + testcases := []testcase{ + { + input: []string{}, + output: [][]string{{"begin", t1FieldEvent.String()}, {"gtid"}, t1Events, {"begin", "lastpk", "commit"}, {"begin", t2FieldEvent.String()}, t2Events, {"begin", "lastpk", "commit"}, {"copy_completed"}}, + }, + + { + input: []string{ + "insert into t1 values (101, 1010)", + }, + output: [][]string{insertEvents1}, + }, + { + input: []string{ + "insert into t2 values (202, 2020)", + }, + output: [][]string{insertEvents2}, + }, + } + + runCases(t, filter, testcases, "vscopy", tablePKs) + log.Infof("Pos at end of test: %s", primaryPosition(t)) +} + +func TestVStreamCopyWithDifferentFilters(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id1 int, id2 int, id3 int, primary key(id1)) charset=utf8mb4", + "create table t2a(id1 int, id2 int, primary key(id1)) charset=utf8mb4", + "create table t2b(id1 varchar(20), id2 int, primary key(id1)) charset=utf8mb4", + }, + } + ts.Init() + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/t2.*", + }, { + Match: "t1", + Filter: "select id1, id2 from t1", + }}, + } + + t1FieldEvent := &TestFieldEvent{ + table: "t1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id1", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "id2", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + }, + enumSetStrings: true, + } + + execStatements(t, []string{ + "insert into t1(id1, id2, id3) values (1, 2, 3)", + "insert into t2a(id1, id2) values (1, 4)", + "insert into t2b(id1, id2) values ('b', 6)", + "insert into t2b(id1, id2) values ('a', 5)", + }) + + // All field events in this test are in the copy phase so they should all + // have the enum_set_string_values field set. + for _, fe := range ts.fieldEvents { + fe.enumSetStrings = true + } + + var expectedEvents = []string{ + "begin", + t1FieldEvent.String(), + "gtid", + getRowEvent(ts, t1FieldEvent, "insert into t1 values (1, 2)"), + getLastPKEvent("t1", "id1", sqltypes.Int32, []sqltypes.Value{sqltypes.NewInt32(1)}, collations.CollationBinaryID, uint32(53251)), + "commit", + "begin", + getCopyCompletedEvent("t1"), + "commit", + "begin", + ts.fieldEvents["t2a"].String(), + getRowEvent(ts, ts.fieldEvents["t2a"], "insert into t2a values (1, 4)"), + getLastPKEvent("t2a", "id1", sqltypes.Int32, []sqltypes.Value{sqltypes.NewInt32(1)}, collations.CollationBinaryID, uint32(53251)), + "commit", + "begin", + getCopyCompletedEvent("t2a"), + "commit", + "begin", + ts.fieldEvents["t2b"].String(), + getRowEvent(ts, ts.fieldEvents["t2b"], "insert into t2b values ('a', 5)"), + getRowEvent(ts, ts.fieldEvents["t2b"], "insert into t2b values ('b', 6)"), + getLastPKEvent("t2b", "id1", sqltypes.VarChar, []sqltypes.Value{sqltypes.NewVarChar("b")}, uint32(testenv.DefaultCollationID), uint32(20483)), + "commit", + "begin", + getCopyCompletedEvent("t2b"), + "commit", + } + + var allEvents []*binlogdatapb.VEvent + var wg sync.WaitGroup + wg.Add(1) + ctx2, cancel2 := context.WithDeadline(ctx, time.Now().Add(10*time.Second)) + defer cancel2() + + var errGoroutine error + go func() { + defer wg.Done() + engine.Stream(ctx2, "", nil, filter, throttlerapp.VStreamerName, func(evs []*binlogdatapb.VEvent) error { + for _, ev := range evs { + if ev.Type == binlogdatapb.VEventType_HEARTBEAT { + continue + } + if ev.Throttled { + continue + } + allEvents = append(allEvents, ev) + } + if len(allEvents) == len(expectedEvents) { + log.Infof("Got %d events as expected", len(allEvents)) + for i, ev := range allEvents { + ev.Timestamp = 0 + switch ev.Type { + case binlogdatapb.VEventType_FIELD: + for j := range ev.FieldEvent.Fields { + ev.FieldEvent.Fields[j].Flags = 0 + } + ev.FieldEvent.Keyspace = "" + ev.FieldEvent.Shard = "" + // All events in this test are in the copy phase so they should + // all have the enum_set_string_values field set. + ev.FieldEvent.EnumSetStringValues = true + case binlogdatapb.VEventType_ROW: + ev.RowEvent.Keyspace = "" + ev.RowEvent.Shard = "" + } + ev.Keyspace = "" + ev.Shard = "" + got := ev.String() + want := expectedEvents[i] + switch want { + case "begin", "commit", "gtid": + want = fmt.Sprintf("type:%s", strings.ToUpper(want)) + default: + want = env.RemoveAnyDeprecatedDisplayWidths(want) + } + if !strings.HasPrefix(got, want) { + errGoroutine = fmt.Errorf("event %d did not match, want %s, got %s", i, want, got) + return errGoroutine + } + } + + return io.EOF + } + return nil + }) + }() + wg.Wait() + if errGoroutine != nil { + t.Fatalf(errGoroutine.Error()) + } +} + +// TestFilteredVarBinary confirms that adding a filter using a varbinary column results in the correct set of events. +func TestFilteredVarBinary(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id1 int, val varbinary(128), primary key(id1))", + }, + options: &TestSpecOptions{ + filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select id1, val from t1 where val = 'newton'", + }}, + }, + }, + } + defer ts.Close() + ts.Init() + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 'kepler')", noEvents}, + {"insert into t1 values (2, 'newton')", nil}, + {"insert into t1 values (3, 'newton')", nil}, + {"insert into t1 values (4, 'kepler')", noEvents}, + {"insert into t1 values (5, 'newton')", nil}, + {"update t1 set val = 'newton' where id1 = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"1", "newton"}}}}}, + }}, + {"update t1 set val = 'kepler' where id1 = 2", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"2", "newton"}}}}}, + }}, + {"update t1 set val = 'newton' where id1 = 2", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"2", "newton"}}}}}, + }}, + {"update t1 set val = 'kepler' where id1 = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"1", "newton"}}}}}, + }}, + {"delete from t1 where id1 in (2,3)", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"2", "newton"}}, {before: []string{"3", "newton"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} + +// TestFilteredInt confirms that adding a filter using an int column results in the correct set of events. +func TestFilteredInt(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))", + }, + options: &TestSpecOptions{ + filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select id1, val from t1 where id2 = 200", + }}, + }, + }, + } + defer ts.Close() + ts.Init() + ts.fieldEvents["t1"].cols[1].skip = true + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 100, 'aaa')", noEvents}, + {"insert into t1 values (2, 200, 'bbb')", nil}, + {"insert into t1 values (3, 100, 'ccc')", noEvents}, + {"insert into t1 values (4, 200, 'ddd')", nil}, + {"insert into t1 values (5, 200, 'eee')", nil}, + {"update t1 set val = 'newddd' where id1 = 4", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"4", "ddd"}, after: []string{"4", "newddd"}}}}}, + }}, + {"update t1 set id2 = 200 where id1 = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"1", "aaa"}}}}}, + }}, + {"update t1 set id2 = 100 where id1 = 2", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"2", "bbb"}}}}}, + }}, + {"update t1 set id2 = 100 where id1 = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"1", "aaa"}}}}}, + }}, + {"update t1 set id2 = 200 where id1 = 2", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"2", "bbb"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} + +// TestSavepoint confirms that rolling back to a savepoint drops the dmls that were executed during the savepoint. +func TestSavepoint(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table stream1(id int, val varbinary(128), primary key(id))", + }, + } + defer ts.Close() + ts.Init() + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into stream1 values (1, 'aaa')", nil}, + {"savepoint a", noEvents}, + {"insert into stream1 values (2, 'aaa')", noEvents}, + {"rollback work to savepoint a", noEvents}, + {"savepoint b", noEvents}, + {"update stream1 set val='bbb' where id = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "stream1", changes: []TestRowChange{{before: []string{"1", "aaa"}, after: []string{"1", "bbb"}}}}}, + }}, + {"release savepoint b", noEvents}, + {"commit", nil}, + }} + ts.Run() +} + +// TestSavepointWithFilter tests that using savepoints with both filtered and unfiltered tables works as expected. +func TestSavepointWithFilter(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table stream1(id int, val varbinary(128), primary key(id))", + "create table stream2(id int, val varbinary(128), primary key(id))", + }, + options: &TestSpecOptions{ + filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "stream2", + Filter: "select * from stream2", + }}, + }, + }, + } + defer ts.Close() + ts.Init() + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into stream1 values (1, 'aaa')", noEvents}, + {"savepoint a", noEvents}, + {"insert into stream1 values (2, 'aaa')", noEvents}, + {"savepoint b", noEvents}, + {"insert into stream1 values (3, 'aaa')", noEvents}, + {"savepoint c", noEvents}, + {"insert into stream1 values (4, 'aaa')", noEvents}, + {"savepoint d", noEvents}, + {"commit", nil}, + }, { + {"begin", nil}, + {"insert into stream1 values (5, 'aaa')", noEvents}, + {"savepoint d", noEvents}, + {"insert into stream1 values (6, 'aaa')", noEvents}, + {"savepoint c", noEvents}, + {"insert into stream1 values (7, 'aaa')", noEvents}, + {"savepoint b", noEvents}, + {"insert into stream1 values (8, 'aaa')", noEvents}, + {"savepoint a", noEvents}, + {"commit", nil}, + }, { + {"begin", nil}, + {"insert into stream1 values (9, 'aaa')", noEvents}, + {"savepoint a", noEvents}, + {"insert into stream2 values (1, 'aaa')", nil}, + {"savepoint b", noEvents}, + {"insert into stream1 values (10, 'aaa')", noEvents}, + {"savepoint c", noEvents}, + {"insert into stream2 values (2, 'aaa')", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "stream2", changes: []TestRowChange{{after: []string{"2", "aaa"}}}}}, + }}, + {"savepoint d", noEvents}, + {"commit", nil}, + }} + ts.Run() +} + +func TestStatements(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table stream1(id int, val varbinary(128), primary key(id))", + "create table stream2(id int, val varbinary(128), primary key(id))", + }, + } + defer ts.Close() + ts.Init() + fe := &TestFieldEvent{ + table: "stream1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "val", dataType: "VARBINARY", colType: "varbinary(256)", len: 256, collationID: 63}, + }, + } + ddlAlterWithPrefixAndSuffix := "/* prefix */ alter table stream1 change column val val varbinary(256) /* suffix */" + ddlTruncate := "truncate table stream2" + ddlReverseAlter := "/* prefix */ alter table stream1 change column val val varbinary(128) /* suffix */" + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into stream1 values (1, 'aaa')", nil}, + {"update stream1 set val='bbb' where id = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "stream1", changes: []TestRowChange{{before: []string{"1", "aaa"}, after: []string{"1", "bbb"}}}}}, + }}, + {"commit", nil}, + }, { // Normal DDL. + {"alter table stream1 change column val val varbinary(128)", nil}, + }, { // DDL padded with comments. + {ddlAlterWithPrefixAndSuffix, []TestRowEvent{ + {event: "gtid"}, + {event: ts.getDDLEvent(ddlAlterWithPrefixAndSuffix)}, + }}, + }, { // Multiple tables, and multiple rows changed per statement. + {"begin", nil}, + {"insert into stream1 values (2, 'bbb')", []TestRowEvent{ + {event: fe.String()}, + {spec: &TestRowEventSpec{table: "stream1", changes: []TestRowChange{{after: []string{"2", "bbb"}}}}}, + }}, + {"insert into stream2 values (1, 'aaa')", nil}, + {"update stream1 set val='ccc'", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "stream1", changes: []TestRowChange{{before: []string{"1", "bbb"}, after: []string{"1", "ccc"}}, {before: []string{"2", "bbb"}, after: []string{"2", "ccc"}}}}}, + }}, + {"delete from stream1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "stream1", changes: []TestRowChange{{before: []string{"1", "ccc"}}, {before: []string{"2", "ccc"}}}}}, + }}, + {"commit", nil}, + }, { + {ddlTruncate, []TestRowEvent{ + {event: "gtid"}, + {event: ts.getDDLEvent(ddlTruncate)}, + }}, + }, { + {ddlReverseAlter, []TestRowEvent{ + {event: "gtid"}, + {event: ts.getDDLEvent(ddlReverseAlter)}, + }}, + }} + ts.Run() + + ts.Reset() + // Test FilePos flavor + savedEngine := engine + defer func() { engine = savedEngine }() + engine = customEngine(t, func(in mysql.ConnParams) mysql.ConnParams { + in.Flavor = "FilePos" + return in + }) + defer engine.Close() + ts.Run() +} + +// TestOther tests "other" and "priv" statements. These statements can +// produce very different events depending on the version of mysql or +// mariadb. So, we just show that vreplication transmits "OTHER" events +// if the binlog is affected by the statement. +func TestOther(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table stream1(id int, val varbinary(128), primary key(id))", + "create table stream2(id int, val varbinary(128), primary key(id))", + }, + } + ts.Init() + defer ts.Close() + + testcases := []string{ + "repair table stream2", + "optimize table stream2", + "analyze table stream2", + "select * from stream1", + "set @val=1", + "show tables", + "describe stream1", + "grant select on stream1 to current_user()", + "revoke select on stream1 from current_user()", + } + + // customRun is a modified version of runCases. + customRun := func(mode string) { + t.Logf("Run mode: %v", mode) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wg, ch := startStream(ctx, t, nil, "", nil) + defer wg.Wait() + want := [][]string{{ + `gtid`, + `type:OTHER`, + }} + + for _, stmt := range testcases { + startPosition := primaryPosition(t) + execStatement(t, stmt) + endPosition := primaryPosition(t) + if startPosition == endPosition { + t.Logf("statement %s did not affect binlog", stmt) + continue + } + expectLog(ctx, t, stmt, ch, want) + } + cancel() + if evs, ok := <-ch; ok { + t.Fatalf("unexpected evs: %v", evs) + } + } + customRun("gtid") + + // Test FilePos flavor + savedEngine := engine + defer func() { engine = savedEngine }() + engine = customEngine(t, func(in mysql.ConnParams) mysql.ConnParams { + in.Flavor = "FilePos" + return in + }) + defer engine.Close() + customRun("filePos") +} + +// TestRegexp tests a filter which has a regexp suffix. +func TestRegexp(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table yes_stream(id int, val varbinary(128), primary key(id))", + "create table no_stream(id int, val varbinary(128), primary key(id))", + }, + options: &TestSpecOptions{ + filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/yes.*/", + }}, + }, + }, + } + defer ts.Close() + + ts.Init() + + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into yes_stream values (1, 'aaa')", nil}, + {"insert into no_stream values (2, 'bbb')", noEvents}, + {"update yes_stream set val='bbb' where id = 1", nil}, + {"update no_stream set val='bbb' where id = 2", noEvents}, + {"commit", nil}, + }} + ts.Run() +} + +func TestREKeyRange(t *testing.T) { + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*/", + Filter: "-80", + }}, + } + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))", + }, + options: &TestSpecOptions{ + filter: filter, + }, + } + ignoreKeyspaceShardInFieldAndRowEvents = false + defer func() { + ignoreKeyspaceShardInFieldAndRowEvents = true + }() + ts.Init() + defer ts.Close() + + setVSchema(t, shardedVSchema) + defer env.SetVSchema("{}") + + // 1, 2, 3 and 5 are in shard -80. + // 4 and 6 are in shard 80-. + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 1, 'aaa')", nil}, + {"insert into t1 values (4, 1, 'bbb')", noEvents}, + {"update t1 set id1 = 2 where id1 = 1", []TestRowEvent{ // Stays in shard. + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"1", "1", "aaa"}, after: []string{"2", "1", "aaa"}}}}}, + }}, + {"update t1 set id1 = 6 where id1 = 2", []TestRowEvent{ // Moves from -80 to 80-. + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"2", "1", "aaa"}}}}}, + }}, + {"update t1 set id1 = 3 where id1 = 4", []TestRowEvent{ // Moves from 80- back to -80. + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"3", "1", "bbb"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() + + // Switch the vschema to make id2 the primary vindex. + altVSchema := `{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id2", + "name": "hash" + } + ] + } + } +}` + setVSchema(t, altVSchema) + ts.Reset() + // Only the first insert should be sent. + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (4, 1, 'aaa')", nil}, + {"insert into t1 values (1, 4, 'aaa')", noEvents}, + {"commit", nil}, + }} + ts.Init() + ts.Run() +} + +func TestInKeyRangeMultiColumn(t *testing.T) { + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select id, region, val, keyspace_id() from t1 where in_keyrange('-80')", + }}, + } + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(region int, id int, val varbinary(128), primary key(id))", + }, + options: &TestSpecOptions{ + filter: filter, + }, + } + ts.Init() + defer ts.Close() + + setVSchema(t, multicolumnVSchema) + defer env.SetVSchema("{}") + + fe := &TestFieldEvent{ + table: "t1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "region", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "val", dataType: "VARBINARY", colType: "varbinary(128)", len: 128, collationID: 63}, + {name: "keyspace_id", dataType: "VARBINARY", colType: "varbinary(256)", len: 256, collationID: 63}, + }, + } + + // 1 and 2 are in shard -80. + // 128 is in shard 80-. + keyspaceId1 := "\x01\x16k@\xb4J\xbaK\xd6" + keyspaceId2 := "\x02\x16k@\xb4J\xbaK\xd6" + keyspaceId3 := "\x01\x06\xe7\xea\"Βp\x8f" + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 1, 'aaa')", []TestRowEvent{ + {event: fe.String()}, + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"1", "1", "aaa", keyspaceId1}}}}}, + }}, + {"insert into t1 values (128, 2, 'bbb')", noEvents}, + {"update t1 set region = 2 where id = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"1", "1", "aaa", keyspaceId1}, after: []string{"1", "2", "aaa", keyspaceId2}}}}}, + }}, + {"update t1 set region = 128 where id = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"1", "2", "aaa", keyspaceId2}}}}}, + }}, + {"update t1 set region = 1 where id = 2", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"2", "1", "bbb", keyspaceId3}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} + +func TestREMultiColumnVindex(t *testing.T) { + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/.*/", + Filter: "-80", + }}, + } + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(region int, id int, val varbinary(128), primary key(id))", + }, + options: &TestSpecOptions{ + filter: filter, + }, + } + ts.Init() + defer ts.Close() + + setVSchema(t, multicolumnVSchema) + defer env.SetVSchema("{}") + // (region, id) is the primary vindex. + // (1,1), (1, 2) are in shard -80. + // (128, 1) (128, 2) are in shard 80-. + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 1, 'aaa')", nil}, + {"insert into t1 values (128, 2, 'bbb')", noEvents}, + {"update t1 set region = 2 where id = 1", nil}, + {"update t1 set region = 128 where id = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"2", "1", "aaa"}}}}}, + }}, + {"update t1 set region = 1 where id = 2", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"1", "2", "bbb"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} + +// TestSelectFilter tests a filter with an in_keyrange function, used in a sharded keyspace. +func TestSelectFilter(t *testing.T) { + fe := &TestFieldEvent{ + table: "t1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id2", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "val", dataType: "VARBINARY", colType: "varbinary(128)", len: 128, collationID: 63}, + }, + } + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id1 int, id2 int, val varbinary(128), primary key(id1))", + }, + options: &TestSpecOptions{ + filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select id2, val from t1 where in_keyrange(id2, 'hash', '-80')", + }}, + }, + }, + } + defer ts.Close() + + ts.Init() + + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (4, 1, 'aaa')", []TestRowEvent{ + {event: fe.String()}, + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"1", "aaa"}}}}}, + }}, + {"insert into t1 values (2, 4, 'aaa')", noEvents}, // not in keyrange + {"commit", nil}, + }} + ts.Run() +} + +func TestDDLAddColumn(t *testing.T) { + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "ddl_test2", + Filter: "select * from ddl_test2", + }, { + Match: "/.*/", + }}, + } + + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table ddl_test1(id int, val1 varbinary(128), primary key(id))", + "create table ddl_test2(id int, val1 varbinary(128), primary key(id))", + }, + options: &TestSpecOptions{ + // Test RE as well as select-based filters. + filter: filter, + }, + } + defer ts.Close() + // Record position before the next few statements. + ts.Init() + pos := primaryPosition(t) + ts.SetStartPosition(pos) + alterTest1 := "alter table ddl_test1 add column val2 varbinary(128)" + alterTest2 := "alter table ddl_test2 add column val2 varbinary(128)" + fe1 := &TestFieldEvent{ + table: "ddl_test1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "val1", dataType: "VARBINARY", colType: "varbinary(128)", len: 128, collationID: 63}, + {name: "val2", dataType: "VARBINARY", colType: "varbinary(128)", len: 128, collationID: 63}, + }, + } + fe2 := &TestFieldEvent{ + table: "ddl_test2", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "val1", dataType: "VARBINARY", colType: "varbinary(128)", len: 128, collationID: 63}, + {name: "val2", dataType: "VARBINARY", colType: "varbinary(128)", len: 128, collationID: 63}, + }, + } + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into ddl_test1 values(1, 'aaa')", nil}, + {"insert into ddl_test2 values(1, 'aaa')", nil}, + {"commit", nil}, + }, { + // Adding columns is allowed. + {alterTest1, []TestRowEvent{ + {event: "gtid"}, + {event: ts.getDDLEvent(alterTest1)}, + }}, + }, { + {alterTest2, []TestRowEvent{ + {event: "gtid"}, + {event: ts.getDDLEvent(alterTest2)}, + }}, + }, { + {"begin", nil}, + {"insert into ddl_test1 values(2, 'bbb', 'ccc')", []TestRowEvent{ + {event: fe1.String()}, + {spec: &TestRowEventSpec{table: "ddl_test1", changes: []TestRowChange{{after: []string{"2", "bbb", "ccc"}}}}}, + }}, + {"insert into ddl_test2 values(2, 'bbb', 'ccc')", []TestRowEvent{ + {event: fe2.String()}, + {spec: &TestRowEventSpec{table: "ddl_test2", changes: []TestRowChange{{after: []string{"2", "bbb", "ccc"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} + +func TestDDLDropColumn(t *testing.T) { + execStatement(t, "create table ddl_test2(id int, val1 varbinary(128), val2 varbinary(128), primary key(id))") + defer execStatement(t, "drop table ddl_test2") + + // Record position before the next few statements. + pos := primaryPosition(t) + execStatements(t, []string{ + "insert into ddl_test2 values(1, 'aaa', 'ccc')", + // Adding columns is allowed. + "alter table ddl_test2 drop column val2", + "insert into ddl_test2 values(2, 'bbb')", + }) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan []*binlogdatapb.VEvent) + go func() { + for range ch { + } + }() + defer close(ch) + err := vstream(ctx, t, pos, nil, nil, ch) + want := "cannot determine table columns" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("err: %v, must contain %s", err, want) + } +} + +func TestUnsentDDL(t *testing.T) { + execStatement(t, "create table unsent(id int, val varbinary(128), primary key(id))") + + testcases := []testcase{{ + input: []string{ + "drop table unsent", + }, + // An unsent DDL is sent as an empty transaction. + output: [][]string{{ + `gtid`, + `type:OTHER`, + }}, + }} + + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "/none/", + }}, + } + runCases(t, filter, testcases, "", nil) +} + +func TestBuffering(t *testing.T) { + reset := AdjustPacketSize(10) + defer reset() + + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table packet_test(id int, val varbinary(128), primary key(id))", + }, + } + defer ts.Close() + ts.Init() + ddl := "alter table packet_test change val val varchar(128)" + ts.tests = [][]*TestQuery{{ + // All rows in one packet. + {"begin", nil}, + {"insert into packet_test values (1, '123')", nil}, + {"insert into packet_test values (2, '456')", nil}, + {"commit", nil}, + }, { + // A new row causes packet size to be exceeded. + // Also test deletes + {"begin", nil}, + {"insert into packet_test values (3, '123456')", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "packet_test", changes: []TestRowChange{{after: []string{"3", "123456"}}}}}, + }}, + {"insert into packet_test values (4, '789012')", []TestRowEvent{ + {restart: true, spec: &TestRowEventSpec{table: "packet_test", changes: []TestRowChange{{after: []string{"4", "789012"}}}}}, + }}, + {"delete from packet_test where id=3", []TestRowEvent{ + {restart: true, spec: &TestRowEventSpec{table: "packet_test", changes: []TestRowChange{{before: []string{"3", "123456"}}}}}, + }}, + {"delete from packet_test where id=4", []TestRowEvent{ + {restart: true, spec: &TestRowEventSpec{table: "packet_test", changes: []TestRowChange{{before: []string{"4", "789012"}}}}}, + }}, + {"commit", nil}, + }, { + // A single row is itself bigger than the packet size. + {"begin", nil}, + {"insert into packet_test values (5, '123456')", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "packet_test", changes: []TestRowChange{{after: []string{"5", "123456"}}}}}, + }}, + {"insert into packet_test values (6, '12345678901')", []TestRowEvent{ + {restart: true, spec: &TestRowEventSpec{table: "packet_test", changes: []TestRowChange{{after: []string{"6", "12345678901"}}}}}, + }}, + {"insert into packet_test values (7, '23456')", []TestRowEvent{ + {restart: true, spec: &TestRowEventSpec{table: "packet_test", changes: []TestRowChange{{after: []string{"7", "23456"}}}}}, + }}, + {"commit", nil}, + }, { + // An update packet is bigger because it has a before and after image. + {"begin", nil}, + {"insert into packet_test values (8, '123')", nil}, + {"update packet_test set val='456' where id=8", []TestRowEvent{ + {restart: true, spec: &TestRowEventSpec{table: "packet_test", changes: []TestRowChange{{before: []string{"8", "123"}, after: []string{"8", "456"}}}}}, + }}, + {"commit", nil}, + }, { + // DDL is in its own packet. + {ddl, []TestRowEvent{ + {event: "gtid"}, + {event: ts.getDDLEvent(ddl)}, + }}, + }} + ts.Run() +} + +// TestBestEffortNameInFieldEvent tests that we make a valid best effort +// attempt to deduce the type and collation in the event of table renames. +// In both cases the varbinary becomes a varchar. We get the correct +// collation information, however, in the binlog_row_metadata in 8.0 but +// not in 5.7. So in 5.7 our best effort uses varchar with its default +// collation for text fields. +func TestBestEffortNameInFieldEvent(t *testing.T) { + bestEffortCollation := collations.ID(collations.CollationBinaryID) + if strings.HasPrefix(testenv.MySQLVersion, "5.7") { + bestEffortCollation = testenv.DefaultCollationID + } + filter := &binlogdatapb.Filter{ + FieldEventMode: binlogdatapb.Filter_BEST_EFFORT, + Rules: []*binlogdatapb.Rule{{ + Match: "/.*/", + }}, + } + // Modeled after vttablet endtoend compatibility tests. + execStatements(t, []string{ + "create table vitess_test(id int, val varbinary(128), primary key(id)) ENGINE=InnoDB CHARSET=utf8mb4", + }) + position := primaryPosition(t) + execStatements(t, []string{ + "insert into vitess_test values(1, 'abc')", + "rename table vitess_test to vitess_test_new", + }) + + defer execStatements(t, []string{ + "drop table vitess_test_new", + }) + testcases := []testcase{{ + input: []string{ + "insert into vitess_test_new values(2, 'abc')", + }, + // In this case, we don't have information about vitess_test since it was renamed to vitess_test_test. + // information returned by binlog for val column == varchar (rather than varbinary). + output: [][]string{{ + `begin`, + fmt.Sprintf(`type:FIELD field_event:{table_name:"vitess_test" fields:{name:"@1" type:INT32 charset:63} fields:{name:"@2" type:VARCHAR charset:%d}}`, bestEffortCollation), + `type:ROW row_event:{table_name:"vitess_test" row_changes:{after:{lengths:1 lengths:3 values:"1abc"}}}`, + `gtid`, + `commit`, + }, { + `gtid`, + `type:DDL statement:"rename table vitess_test to vitess_test_new"`, + }, { + `begin`, + `type:FIELD field_event:{table_name:"vitess_test_new" fields:{name:"id" type:INT32 table:"vitess_test_new" org_table:"vitess_test_new" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"vitess_test_new" org_table:"vitess_test_new" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, + `type:ROW row_event:{table_name:"vitess_test_new" row_changes:{after:{lengths:1 lengths:3 values:"2abc"}}}`, + `gtid`, + `commit`, + }}, + }} + runCases(t, filter, testcases, position, nil) +} + +// todo: migrate to new framework +// test that vstreamer ignores tables created by OnlineDDL +func TestInternalTables(t *testing.T) { + if version.GoOS == "darwin" { + t.Skip("internal online ddl table matching doesn't work on Mac because it is case insensitive") + } + filter := &binlogdatapb.Filter{ + FieldEventMode: binlogdatapb.Filter_BEST_EFFORT, + Rules: []*binlogdatapb.Rule{{ + Match: "/.*/", + }}, + } + // Modeled after vttablet endtoend compatibility tests. + execStatements(t, []string{ + "create table vitess_test(id int, val varbinary(128), primary key(id))", + "create table _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho(id int, val varbinary(128), primary key(id))", + "create table _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431(id int, val varbinary(128), primary key(id))", + "create table _product_old(id int, val varbinary(128), primary key(id))", + }) + position := primaryPosition(t) + execStatements(t, []string{ + "insert into vitess_test values(1, 'abc')", + "insert into _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho values(1, 'abc')", + "insert into _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431 values(1, 'abc')", + "insert into _product_old values(1, 'abc')", + }) + + defer execStatements(t, []string{ + "drop table vitess_test", + "drop table _1e275eef_3b20_11eb_a38f_04ed332e05c2_20201210204529_gho", + "drop table _vt_PURGE_1f9194b43b2011eb8a0104ed332e05c2_20201210194431", + "drop table _product_old", + }) + testcases := []testcase{{ + input: []string{ + "insert into vitess_test values(2, 'abc')", + }, + // In this case, we don't have information about vitess_test since it was renamed to vitess_test_test. + // information returned by binlog for val column == varchar (rather than varbinary). + output: [][]string{{ + `begin`, + `type:FIELD field_event:{table_name:"vitess_test" fields:{name:"id" type:INT32 table:"vitess_test" org_table:"vitess_test" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"vitess_test" org_table:"vitess_test" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, + `type:ROW row_event:{table_name:"vitess_test" row_changes:{after:{lengths:1 lengths:3 values:"1abc"}}}`, + `gtid`, + `commit`, + }, {`begin`, `gtid`, `commit`}, {`begin`, `gtid`, `commit`}, {`begin`, `gtid`, `commit`}, // => inserts into the three internal comments + { + `begin`, + `type:ROW row_event:{table_name:"vitess_test" row_changes:{after:{lengths:1 lengths:3 values:"2abc"}}}`, + `gtid`, + `commit`, + }}, + }} + runCases(t, filter, testcases, position, nil) +} + +func TestTypes(t *testing.T) { + // Modeled after vttablet endtoend compatibility tests. + execStatements(t, []string{ + "create table vitess_ints(tiny tinyint, tinyu tinyint unsigned, small smallint, smallu smallint unsigned, medium mediumint, mediumu mediumint unsigned, normal int, normalu int unsigned, big bigint, bigu bigint unsigned, y year, primary key(tiny)) ENGINE=InnoDB CHARSET=utf8mb4", + "create table vitess_fracts(id int, deci decimal(5,2), num numeric(5,2), f float, d double, primary key(id)) ENGINE=InnoDB CHARSET=utf8mb4", + "create table vitess_strings(vb varbinary(16), c char(16), vc varchar(16), b binary(4), tb tinyblob, bl blob, ttx tinytext, tx text, en enum('a','b'), s set('a','b'), primary key(vb)) ENGINE=InnoDB CHARSET=utf8mb4", + "create table vitess_misc(id int, b bit(8), d date, dt datetime, t time, g geometry, primary key(id)) ENGINE=InnoDB CHARSET=utf8mb4", + "create table vitess_null(id int, val varbinary(128), primary key(id)) ENGINE=InnoDB CHARSET=utf8mb4", + "create table vitess_decimal(id int, dec1 decimal(12,4), dec2 decimal(13,4), primary key(id)) ENGINE=InnoDB CHARSET=utf8mb4", + }) + defer execStatements(t, []string{ + "drop table vitess_ints", + "drop table vitess_fracts", + "drop table vitess_strings", + "drop table vitess_misc", + "drop table vitess_null", + "drop table vitess_decimal", + }) + + testcases := []testcase{{ + input: []string{ + "insert into vitess_ints values(-128, 255, -32768, 65535, -8388608, 16777215, -2147483648, 4294967295, -9223372036854775808, 18446744073709551615, 2012)", + }, + output: [][]string{{ + `begin`, + `type:FIELD field_event:{table_name:"vitess_ints" fields:{name:"tiny" type:INT8 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"tiny" column_length:4 charset:63 column_type:"tinyint(4)"} fields:{name:"tinyu" type:UINT8 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"tinyu" column_length:3 charset:63 column_type:"tinyint(3) unsigned"} fields:{name:"small" type:INT16 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"small" column_length:6 charset:63 column_type:"smallint(6)"} fields:{name:"smallu" type:UINT16 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"smallu" column_length:5 charset:63 column_type:"smallint(5) unsigned"} fields:{name:"medium" type:INT24 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"medium" column_length:9 charset:63 column_type:"mediumint(9)"} fields:{name:"mediumu" type:UINT24 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"mediumu" column_length:8 charset:63 column_type:"mediumint(8) unsigned"} fields:{name:"normal" type:INT32 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"normal" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"normalu" type:UINT32 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"normalu" column_length:10 charset:63 column_type:"int(10) unsigned"} fields:{name:"big" type:INT64 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"big" column_length:20 charset:63 column_type:"bigint(20)"} fields:{name:"bigu" type:UINT64 table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"bigu" column_length:20 charset:63 column_type:"bigint(20) unsigned"} fields:{name:"y" type:YEAR table:"vitess_ints" org_table:"vitess_ints" database:"vttest" org_name:"y" column_length:4 charset:63 column_type:"year(4)"}}`, + `type:ROW row_event:{table_name:"vitess_ints" row_changes:{after:{lengths:4 lengths:3 lengths:6 lengths:5 lengths:8 lengths:8 lengths:11 lengths:10 lengths:20 lengths:20 lengths:4 values:"` + + `-128` + + `255` + + `-32768` + + `65535` + + `-8388608` + + `16777215` + + `-2147483648` + + `4294967295` + + `-9223372036854775808` + + `18446744073709551615` + + `2012` + + `"}}}`, + `gtid`, + `commit`, + }}, + }, { + input: []string{ + "insert into vitess_fracts values(1, 1.99, 2.99, 3.99, 4.99)", + }, + output: [][]string{{ + `begin`, + `type:FIELD field_event:{table_name:"vitess_fracts" fields:{name:"id" type:INT32 table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"deci" type:DECIMAL table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"deci" column_length:7 charset:63 decimals:2 column_type:"decimal(5,2)"} fields:{name:"num" type:DECIMAL table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"num" column_length:7 charset:63 decimals:2 column_type:"decimal(5,2)"} fields:{name:"f" type:FLOAT32 table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"f" column_length:12 charset:63 decimals:31 column_type:"float"} fields:{name:"d" type:FLOAT64 table:"vitess_fracts" org_table:"vitess_fracts" database:"vttest" org_name:"d" column_length:22 charset:63 decimals:31 column_type:"double"}}`, + `type:ROW row_event:{table_name:"vitess_fracts" row_changes:{after:{lengths:1 lengths:4 lengths:4 lengths:8 lengths:8 values:"11.992.993.99E+004.99E+00"}}}`, + `gtid`, + `commit`, + }}, + }, { + // TODO(sougou): validate that binary and char data generate correct DMLs on the other end. + input: []string{ + "insert into vitess_strings values('a', 'b', 'c', 'd\000\000\000', 'e', 'f', 'g', 'h', 'a', 'a,b')", + }, + output: [][]string{{ + `begin`, + fmt.Sprintf(`type:FIELD field_event:{table_name:"vitess_strings" fields:{name:"vb" type:VARBINARY table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"vb" column_length:16 charset:63 column_type:"varbinary(16)"} fields:{name:"c" type:CHAR table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"c" column_length:64 charset:%d column_type:"char(16)"} fields:{name:"vc" type:VARCHAR table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"vc" column_length:64 charset:%d column_type:"varchar(16)"} fields:{name:"b" type:BINARY table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"b" column_length:4 charset:63 column_type:"binary(4)"} fields:{name:"tb" type:BLOB table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"tb" column_length:255 charset:63 column_type:"tinyblob"} fields:{name:"bl" type:BLOB table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"bl" column_length:65535 charset:63 column_type:"blob"} fields:{name:"ttx" type:TEXT table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"ttx" column_length:1020 charset:%d column_type:"tinytext"} fields:{name:"tx" type:TEXT table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"tx" column_length:262140 charset:%d column_type:"text"} fields:{name:"en" type:ENUM table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"en" column_length:4 charset:%d column_type:"enum('a','b')"} fields:{name:"s" type:SET table:"vitess_strings" org_table:"vitess_strings" database:"vttest" org_name:"s" column_length:12 charset:%d column_type:"set('a','b')"} enum_set_string_values:true}`, testenv.DefaultCollationID, testenv.DefaultCollationID, testenv.DefaultCollationID, testenv.DefaultCollationID, testenv.DefaultCollationID, testenv.DefaultCollationID), + `type:ROW row_event:{table_name:"vitess_strings" row_changes:{after:{lengths:1 lengths:1 lengths:1 lengths:4 lengths:1 lengths:1 lengths:1 lengths:1 lengths:1 lengths:3 ` + + `values:"abcd\x00\x00\x00efghaa,b"}}}`, + `gtid`, + `commit`, + }}, + }, { + // TODO(sougou): validate that the geometry value generates the correct DMLs on the other end. + input: []string{ + "insert into vitess_misc values(1, '\x01', '2012-01-01', '2012-01-01 15:45:45', '15:45:45', point(1, 2))", + }, + output: [][]string{{ + `begin`, + `type:FIELD field_event:{table_name:"vitess_misc" fields:{name:"id" type:INT32 table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"b" type:BIT table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"b" column_length:8 charset:63 column_type:"bit(8)"} fields:{name:"d" type:DATE table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"d" column_length:10 charset:63 column_type:"date"} fields:{name:"dt" type:DATETIME table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"dt" column_length:19 charset:63 column_type:"datetime"} fields:{name:"t" type:TIME table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"t" column_length:10 charset:63 column_type:"time"} fields:{name:"g" type:GEOMETRY table:"vitess_misc" org_table:"vitess_misc" database:"vttest" org_name:"g" column_length:4294967295 charset:63 column_type:"geometry"}}`, + `type:ROW row_event:{table_name:"vitess_misc" row_changes:{after:{lengths:1 lengths:1 lengths:10 lengths:19 lengths:8 lengths:25 values:"1\x012012-01-012012-01-01 15:45:4515:45:45\x00\x00\x00\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00\x00\x00\x00\x00\x00@"}}}`, + `gtid`, + `commit`, + }}, + }, { + input: []string{ + "insert into vitess_null values(1, null)", + }, + output: [][]string{{ + `begin`, + `type:FIELD field_event:{table_name:"vitess_null" fields:{name:"id" type:INT32 table:"vitess_null" org_table:"vitess_null" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"val" type:VARBINARY table:"vitess_null" org_table:"vitess_null" database:"vttest" org_name:"val" column_length:128 charset:63 column_type:"varbinary(128)"}}`, + `type:ROW row_event:{table_name:"vitess_null" row_changes:{after:{lengths:1 lengths:-1 values:"1"}}}`, + `gtid`, + `commit`, + }}, + }, { + input: []string{ + "insert into vitess_decimal values(1, 1.23, 1.23)", + "insert into vitess_decimal values(2, -1.23, -1.23)", + "insert into vitess_decimal values(3, 0000000001.23, 0000000001.23)", + "insert into vitess_decimal values(4, -0000000001.23, -0000000001.23)", + }, + output: [][]string{{ + `begin`, + `type:FIELD field_event:{table_name:"vitess_decimal" fields:{name:"id" type:INT32 table:"vitess_decimal" org_table:"vitess_decimal" database:"vttest" org_name:"id" column_length:11 charset:63 column_type:"int(11)"} fields:{name:"dec1" type:DECIMAL table:"vitess_decimal" org_table:"vitess_decimal" database:"vttest" org_name:"dec1" column_length:14 charset:63 decimals:4 column_type:"decimal(12,4)"} fields:{name:"dec2" type:DECIMAL table:"vitess_decimal" org_table:"vitess_decimal" database:"vttest" org_name:"dec2" column_length:15 charset:63 decimals:4 column_type:"decimal(13,4)"}}`, + `type:ROW row_event:{table_name:"vitess_decimal" row_changes:{after:{lengths:1 lengths:6 lengths:6 values:"11.23001.2300"}}}`, + `gtid`, + `commit`, + }, { + `begin`, + `type:ROW row_event:{table_name:"vitess_decimal" row_changes:{after:{lengths:1 lengths:7 lengths:7 values:"2-1.2300-1.2300"}}}`, + `gtid`, + `commit`, + }, { + `begin`, + `type:ROW row_event:{table_name:"vitess_decimal" row_changes:{after:{lengths:1 lengths:6 lengths:6 values:"31.23001.2300"}}}`, + `gtid`, + `commit`, + }, { + `begin`, + `type:ROW row_event:{table_name:"vitess_decimal" row_changes:{after:{lengths:1 lengths:7 lengths:7 values:"4-1.2300-1.2300"}}}`, + `gtid`, + `commit`, + }}, + }} + runCases(t, nil, testcases, "", nil) +} + +func TestJSON(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table vitess_json(id int default 1, val json, primary key(id))", + }, + } + ts.Init() + defer ts.Close() + ts.tests = [][]*TestQuery{} + queries := []*TestQuery{} + jsonValues := []string{"{}", "123456", `"vtTablet"`, `{"foo": "bar"}`, `["abc", 3.14, true]`} + queries = append(queries, &TestQuery{"begin", nil}) + for i, val := range jsonValues { + queries = append(queries, &TestQuery{fmt.Sprintf("insert into vitess_json values(%d, %s)", i+1, encodeString(val)), nil}) + } + queries = append(queries, &TestQuery{"commit", nil}) + + ts.tests = append(ts.tests, queries) + ts.Run() +} + +func TestExternalTable(t *testing.T) { + execStatements(t, []string{ + "create database external", + "create table external.ext(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop database external", + }) + + testcases := []testcase{{ + input: []string{ + "begin", + "insert into external.ext values (1, 'aaa')", + "commit", + }, + // External table events don't get sent. + output: [][]string{{ + `begin`, + `gtid`, + `commit`, + }}, + }} + runCases(t, nil, testcases, "", nil) +} + +func TestJournal(t *testing.T) { + execStatements(t, []string{ + "create table if not exists _vt.resharding_journal(id int, db_name varchar(128), val blob, primary key(id))", + }) + defer execStatements(t, []string{ + "drop table _vt.resharding_journal", + }) + + journal1 := &binlogdatapb.Journal{ + Id: 1, + MigrationType: binlogdatapb.MigrationType_SHARDS, + } + journal2 := &binlogdatapb.Journal{ + Id: 2, + MigrationType: binlogdatapb.MigrationType_SHARDS, + } + testcases := []testcase{{ + input: []string{ + "begin", + fmt.Sprintf("insert into _vt.resharding_journal values(1, 'vttest', '%v')", journal1.String()), + fmt.Sprintf("insert into _vt.resharding_journal values(2, 'nosend', '%v')", journal2.String()), + "commit", + }, + // External table events don't get sent. + output: [][]string{{ + `begin`, + `type:JOURNAL journal:{id:1 migration_type:SHARDS}`, + `gtid`, + `commit`, + }}, + }} + runCases(t, nil, testcases, "", nil) +} + +// TestMinimalMode confirms that we don't support minimal binlog_row_image mode. +func TestMinimalMode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + oldEngine := engine + engine = nil + oldEnv := env + env = nil + newEngine(t, ctx, "minimal") + defer func() { + if engine != nil { + engine.Close() + } + if env != nil { + env.Close() + } + engine = oldEngine + env = oldEnv + }() + err := engine.Stream(context.Background(), "current", nil, nil, throttlerapp.VStreamerName, func(evs []*binlogdatapb.VEvent) error { return nil }) + require.Error(t, err, "minimal binlog_row_image is not supported by Vitess VReplication") +} + +func TestStatementMode(t *testing.T) { + execStatements(t, []string{ + "create table stream1(id int, val varbinary(128), primary key(id))", + "create table stream2(id int, val varbinary(128), primary key(id))", + }) + + defer execStatements(t, []string{ + "drop table stream1", + "drop table stream2", + }) + + testcases := []testcase{{ + input: []string{ + "set @@session.binlog_format='STATEMENT'", + "begin", + "insert into stream1 values (1, 'aaa')", + "update stream1 set val='bbb' where id = 1", + "delete from stream1 where id = 1", + "commit", + "set @@session.binlog_format='ROW'", + }, + output: [][]string{{ + `begin`, + `type:INSERT dml:"insert into stream1 values (1, 'aaa')"`, + `type:UPDATE dml:"update stream1 set val='bbb' where id = 1"`, + `type:DELETE dml:"delete from stream1 where id = 1"`, + `gtid`, + `commit`, + }}, + }} + runCases(t, nil, testcases, "", nil) +} + +func TestHeartbeat(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wg, ch := startStream(ctx, t, nil, "", nil) + defer wg.Wait() + evs := <-ch + require.Equal(t, 1, len(evs)) + assert.Equal(t, binlogdatapb.VEventType_HEARTBEAT, evs[0].Type) + cancel() +} + +func TestNoFutureGTID(t *testing.T) { + // Execute something to make sure we have ranges in GTIDs. + execStatements(t, []string{ + "create table stream1(id int, val varbinary(128), primary key(id))", + }) + defer execStatements(t, []string{ + "drop table stream1", + }) + + pos := primaryPosition(t) + t.Logf("current position: %v", pos) + // Both mysql and mariadb have '-' in their gtids. + // Invent a GTID in the future. + index := strings.LastIndexByte(pos, '-') + num, err := strconv.Atoi(pos[index+1:]) + require.NoError(t, err) + future := pos[:index+1] + fmt.Sprintf("%d", num+1) + t.Logf("future position: %v", future) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := make(chan []*binlogdatapb.VEvent) + go func() { + for range ch { + } + }() + defer close(ch) + err = vstream(ctx, t, future, nil, nil, ch) + want := "GTIDSet Mismatch" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("err: %v, must contain %s", err, want) + } +} + +func TestFilteredMultipleWhere(t *testing.T) { + fe := &TestFieldEvent{ + table: "t1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id1", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "val", dataType: "VARBINARY", colType: "varbinary(128)", len: 128, collationID: 63}, + }, + } + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id1 int, id2 int, id3 int, val varbinary(128), primary key(id1))", + }, + options: &TestSpecOptions{ + filter: &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + Filter: "select id1, val from t1 where in_keyrange('-80') and id2 = 200 and id3 = 1000 and val = 'newton'", + }}, + }, + customFieldEvents: true, + }, + } + _ = fe + defer ts.Close() // Ensure clean-up + + ts.Init() + + setVSchema(t, shardedVSchema) + defer env.SetVSchema("{}") + + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values (1, 100, 1000, 'kepler')", noEvents}, + {"insert into t1 values (2, 200, 1000, 'newton')", []TestRowEvent{ + {event: fe.String()}, + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"2", "newton"}}}}}, + }}, + {"insert into t1 values (3, 100, 2000, 'kepler')", noEvents}, + {"insert into t1 values (128, 200, 1000, 'newton')", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"128", "newton"}}}}}, + }}, + {"insert into t1 values (5, 200, 2000, 'kepler')", noEvents}, + {"insert into t1 values (129, 200, 1000, 'kepler')", noEvents}, + {"commit", nil}, + }} + ts.Run() +} + +// TestGeneratedColumns just confirms that generated columns are sent in a vstream as expected +func TestGeneratedColumns(t *testing.T) { + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(id int, val varbinary(6), val2 varbinary(6) as (concat(id, val)), val3 varbinary(6) as (concat(val, id)), id2 int, primary key(id))", + }, + options: &TestSpecOptions{ + customFieldEvents: true, + }, + } + defer ts.Close() + + ts.Init() + + fe := &TestFieldEvent{ + table: "t1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "id", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + {name: "val", dataType: "VARBINARY", colType: "varbinary(6)", len: 6, collationID: 63}, + {name: "val2", dataType: "VARBINARY", colType: "varbinary(6)", len: 6, collationID: 63}, + {name: "val3", dataType: "VARBINARY", colType: "varbinary(6)", len: 6, collationID: 63}, + {name: "id2", dataType: "INT32", colType: "int(11)", len: 11, collationID: 63}, + }, + } + + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1(id, val, id2) values (1, 'aaa', 10)", []TestRowEvent{ + {event: fe.String()}, + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"1", "aaa", "1aaa", "aaa1", "10"}}}}}, + }}, + {"insert into t1(id, val, id2) values (2, 'bbb', 20)", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"2", "bbb", "2bbb", "bbb2", "20"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} + +// TestGeneratedInvisiblePrimaryKey validates that generated invisible primary keys are sent in row events. +func TestGeneratedInvisiblePrimaryKey(t *testing.T) { + if !env.HasCapability(testenv.ServerCapabilityGeneratedInvisiblePrimaryKey) { + t.Skip("skipping test as server does not support generated invisible primary keys") + } + + execStatement(t, "SET @@session.sql_generate_invisible_primary_key=ON") + defer execStatement(t, "SET @@session.sql_generate_invisible_primary_key=OFF") + ts := &TestSpec{ + t: t, + ddls: []string{ + "create table t1(val varbinary(6))", + }, + options: nil, + } + defer ts.Close() + + ts.Init() + + fe := &TestFieldEvent{ + table: "t1", + db: testenv.DBName, + cols: []*TestColumn{ + {name: "my_row_id", dataType: "UINT64", colType: "bigint unsigned", len: 20, collationID: 63}, + {name: "val", dataType: "VARBINARY", colType: "varbinary(6)", len: 6, collationID: 63}, + }, + } + + ts.tests = [][]*TestQuery{{ + {"begin", nil}, + {"insert into t1 values ('aaa')", []TestRowEvent{ + {event: fe.String()}, + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{after: []string{"1", "aaa"}}}}}, + }}, + {"update t1 set val = 'bbb' where my_row_id = 1", []TestRowEvent{ + {spec: &TestRowEventSpec{table: "t1", changes: []TestRowChange{{before: []string{"1", "aaa"}, after: []string{"1", "bbb"}}}}}, + }}, + {"commit", nil}, + }} + ts.Run() +} diff --git a/go/vt/vttablet/tmclient/rpc_client_api.go b/go/vt/vttablet/tmclient/rpc_client_api.go index 2e75dbd45fc..936d98fa3ce 100644 --- a/go/vt/vttablet/tmclient/rpc_client_api.go +++ b/go/vt/vttablet/tmclient/rpc_client_api.go @@ -127,6 +127,12 @@ type TabletManagerClient interface { // query faster. Close() should close the pool in that case. ExecuteFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteFetchAsDbaRequest) (*querypb.QueryResult, error) + // ExecuteFetchAsDba executes multiple queries remotely using the DBA pool. + // req.DbName is ignored in favor of using the tablet's DbName field. + // If usePool is set, a connection pool may be used to make the + // query faster. Close() should close the pool in that case. + ExecuteMultiFetchAsDba(ctx context.Context, tablet *topodatapb.Tablet, usePool bool, req *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) ([]*querypb.QueryResult, error) + // ExecuteFetchAsAllPrivs executes a query remotely using the allprivs user. // req.DbName is ignored in favor of using the tablet's DbName field. ExecuteFetchAsAllPrivs(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) @@ -177,8 +183,11 @@ type TabletManagerClient interface { CreateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.CreateVReplicationWorkflowRequest) (*tabletmanagerdatapb.CreateVReplicationWorkflowResponse, error) DeleteVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.DeleteVReplicationWorkflowRequest) (*tabletmanagerdatapb.DeleteVReplicationWorkflowResponse, error) + HasVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) + ReadVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) UpdateVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.UpdateVReplicationWorkflowRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowResponse, error) + UpdateVReplicationWorkflows(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) // VReplicationExec executes a VReplication command VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) VReplicationWaitForPos(ctx context.Context, tablet *topodatapb.Tablet, id int32, pos string) error @@ -224,7 +233,7 @@ type TabletManagerClient interface { // SetReplicationSource tells a tablet to start replicating from the // passed in tablet alias, and wait for the row in the // reparent_journal table (if timeCreatedNS is non-zero). - SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool) error + SetReplicationSource(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplication bool, semiSync bool, heartbeatInterval float64) error // ReplicaWasRestarted tells the replica tablet its primary has changed ReplicaWasRestarted(ctx context.Context, tablet *topodatapb.Tablet, parent *topodatapb.TabletAlias) error diff --git a/go/vt/vttablet/tmrpctest/test_tm_rpc.go b/go/vt/vttablet/tmrpctest/test_tm_rpc.go index 2393a3fb2f0..fdd10f8f4d3 100644 --- a/go/vt/vttablet/tmrpctest/test_tm_rpc.go +++ b/go/vt/vttablet/tmrpctest/test_tm_rpc.go @@ -65,6 +65,16 @@ func (fra *fakeRPCTM) DeleteVReplicationWorkflow(ctx context.Context, req *table panic("implement me") } +func (fra *fakeRPCTM) HasVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.HasVReplicationWorkflowsRequest) (*tabletmanagerdatapb.HasVReplicationWorkflowsResponse, error) { + //TODO implement me + panic("implement me") +} + +func (fra *fakeRPCTM) ReadVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowsRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowsResponse, error) { + //TODO implement me + panic("implement me") +} + func (fra *fakeRPCTM) ReadVReplicationWorkflow(ctx context.Context, req *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) { //TODO implement me panic("implement me") @@ -75,6 +85,11 @@ func (fra *fakeRPCTM) UpdateVReplicationWorkflow(ctx context.Context, req *table panic("implement me") } +func (fra *fakeRPCTM) UpdateVReplicationWorkflows(ctx context.Context, req *tabletmanagerdatapb.UpdateVReplicationWorkflowsRequest) (*tabletmanagerdatapb.UpdateVReplicationWorkflowsResponse, error) { + //TODO implement me + panic("implement me") +} + func (fra *fakeRPCTM) ResetSequences(ctx context.Context, tables []string) error { //TODO implement me panic("implement me") @@ -657,6 +672,18 @@ func (fra *fakeRPCTM) ExecuteFetchAsDba(ctx context.Context, req *tabletmanagerd return testExecuteFetchResult, nil } +func (fra *fakeRPCTM) ExecuteMultiFetchAsDba(ctx context.Context, req *tabletmanagerdatapb.ExecuteMultiFetchAsDbaRequest) ([]*querypb.QueryResult, error) { + if fra.panics { + panic(fmt.Errorf("test-triggered panic")) + } + compare(fra.t, "ExecuteMultiFetchAsDba query", req.Sql, testExecuteFetchQuery) + compare(fra.t, "ExecuteMultiFetchAsDba maxrows", req.MaxRows, testExecuteFetchMaxRows) + compareBool(fra.t, "ExecuteMultiFetchAsDba disableBinlogs", req.DisableBinlogs) + compareBool(fra.t, "ExecuteMultiFetchAsDba reloadSchema", req.ReloadSchema) + + return []*querypb.QueryResult{testExecuteFetchResult}, nil +} + func (fra *fakeRPCTM) ExecuteFetchAsAllPrivs(ctx context.Context, req *tabletmanagerdatapb.ExecuteFetchAsAllPrivsRequest) (*querypb.QueryResult, error) { if fra.panics { panic(fmt.Errorf("test-triggered panic")) @@ -1039,7 +1066,12 @@ func tmRPCTestPopulateReparentJournal(ctx context.Context, t *testing.T, client func tmRPCTestPopulateReparentJournalPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { err := client.PopulateReparentJournal(ctx, tablet, testTimeCreatedNS, testActionName, testPrimaryAlias, testReplicationPosition) - expectHandleRPCPanic(t, "PopulateReparentJournal", false /*verbose*/, err) + expectHandleRPCPanic(t, "PopulateReparentJournal", true /*verbose*/, err) +} + +func tmRPCTestWaitForPositionPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { + err := client.WaitForPosition(ctx, tablet, testReplicationPosition) + expectHandleRPCPanic(t, "WaitForPosition", true /*verbose*/, err) } var testInitReplicaCalled = false @@ -1146,8 +1178,9 @@ func tmRPCTestResetReplicationParametersPanic(ctx context.Context, t *testing.T, var testSetReplicationSourceCalled = false var testForceStartReplica = true +var testHeartbeatInterval float64 = 4.2 -func (fra *fakeRPCTM) SetReplicationSource(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplica bool, semiSync bool) error { +func (fra *fakeRPCTM) SetReplicationSource(ctx context.Context, parent *topodatapb.TabletAlias, timeCreatedNS int64, waitPosition string, forceStartReplica bool, semiSync bool, heartbeatInterval float64) error { if fra.panics { panic(fmt.Errorf("test-triggered panic")) } @@ -1155,17 +1188,18 @@ func (fra *fakeRPCTM) SetReplicationSource(ctx context.Context, parent *topodata compare(fra.t, "SetReplicationSource timeCreatedNS", timeCreatedNS, testTimeCreatedNS) compare(fra.t, "SetReplicationSource waitPosition", waitPosition, testWaitPosition) compare(fra.t, "SetReplicationSource forceStartReplica", forceStartReplica, testForceStartReplica) + compare(fra.t, "SetReplicationSource heartbeatInterval", heartbeatInterval, testHeartbeatInterval) testSetReplicationSourceCalled = true return nil } func tmRPCTestSetReplicationSource(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { - err := client.SetReplicationSource(ctx, tablet, testPrimaryAlias, testTimeCreatedNS, testWaitPosition, testForceStartReplica, false) + err := client.SetReplicationSource(ctx, tablet, testPrimaryAlias, testTimeCreatedNS, testWaitPosition, testForceStartReplica, false, testHeartbeatInterval) compareError(t, "SetReplicationSource", err, true, testSetReplicationSourceCalled) } func tmRPCTestSetReplicationSourcePanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { - err := client.SetReplicationSource(ctx, tablet, testPrimaryAlias, testTimeCreatedNS, testWaitPosition, testForceStartReplica, false) + err := client.SetReplicationSource(ctx, tablet, testPrimaryAlias, testTimeCreatedNS, testWaitPosition, testForceStartReplica, false, 0) expectHandleRPCPanic(t, "SetReplicationSource", true /*verbose*/, err) } @@ -1239,7 +1273,7 @@ func tmRPCTestPromoteReplicaPanic(ctx context.Context, t *testing.T, client tmcl // Backup / restore related methods // -var testBackupConcurrency = int64(24) +var testBackupConcurrency = int32(24) var testBackupAllowPrimary = false var testBackupCalled = false var testRestoreFromBackupCalled = false @@ -1256,7 +1290,7 @@ func (fra *fakeRPCTM) Backup(ctx context.Context, logger logutil.Logger, request } func tmRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { - req := &tabletmanagerdatapb.BackupRequest{Concurrency: int64(testBackupConcurrency), AllowPrimary: testBackupAllowPrimary} + req := &tabletmanagerdatapb.BackupRequest{Concurrency: testBackupConcurrency, AllowPrimary: testBackupAllowPrimary} stream, err := client.Backup(ctx, tablet, req) if err != nil { t.Fatalf("Backup failed: %v", err) @@ -1266,7 +1300,7 @@ func tmRPCTestBackup(ctx context.Context, t *testing.T, client tmclient.TabletMa } func tmRPCTestBackupPanic(ctx context.Context, t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.Tablet) { - req := &tabletmanagerdatapb.BackupRequest{Concurrency: int64(testBackupConcurrency), AllowPrimary: testBackupAllowPrimary} + req := &tabletmanagerdatapb.BackupRequest{Concurrency: testBackupConcurrency, AllowPrimary: testBackupAllowPrimary} stream, err := client.Backup(ctx, tablet, req) if err != nil { t.Fatalf("Backup failed: %v", err) @@ -1447,6 +1481,7 @@ func Run(t *testing.T, client tmclient.TabletManagerClient, tablet *topodatapb.T tmRPCTestResetReplicationPanic(ctx, t, client, tablet) tmRPCTestInitPrimaryPanic(ctx, t, client, tablet) tmRPCTestPopulateReparentJournalPanic(ctx, t, client, tablet) + tmRPCTestWaitForPositionPanic(ctx, t, client, tablet) tmRPCTestDemotePrimaryPanic(ctx, t, client, tablet) tmRPCTestUndoDemotePrimaryPanic(ctx, t, client, tablet) tmRPCTestSetReplicationSourcePanic(ctx, t, client, tablet) diff --git a/go/vt/vttest/environment.go b/go/vt/vttest/environment.go index 7f3ea88437a..3487a9fc1c7 100644 --- a/go/vt/vttest/environment.go +++ b/go/vt/vttest/environment.go @@ -18,7 +18,7 @@ package vttest import ( "fmt" - "math/rand" + "math/rand/v2" "os" "path" "strings" @@ -99,6 +99,7 @@ type LocalTestEnv struct { BasePort int TmpPath string DefaultMyCnf []string + InitDBFile string Env []string EnableToxiproxy bool } @@ -133,7 +134,7 @@ func (env *LocalTestEnv) BinaryPath(binary string) string { func (env *LocalTestEnv) MySQLManager(mycnf []string, snapshot string) (MySQLManager, error) { mysqlctl := &Mysqlctl{ Binary: env.BinaryPath("mysqlctl"), - InitFile: path.Join(os.Getenv("VTROOT"), "config/init_db.sql"), + InitFile: env.InitDBFile, Directory: env.TmpPath, Port: env.PortForProtocol("mysql", ""), MyCnf: append(env.DefaultMyCnf, mycnf...), @@ -231,7 +232,7 @@ func tmpdir(dataroot string) (dir string, err error) { } func randomPort() int { - v := rand.Int31n(20000) + v := rand.Int32N(20000) return int(v + 10000) } @@ -281,6 +282,7 @@ func NewLocalTestEnvWithDirectory(basePort int, directory string) (*LocalTestEnv BasePort: basePort, TmpPath: directory, DefaultMyCnf: mycnf, + InitDBFile: path.Join(os.Getenv("VTROOT"), "config/init_db.sql"), Env: []string{ fmt.Sprintf("VTDATAROOT=%s", directory), "VTTEST=endtoend", diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index 9d84cb7fceb..3c65f7de1eb 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -18,7 +18,6 @@ package vttest import ( "bufio" - "bytes" "context" "encoding/json" "fmt" @@ -36,14 +35,15 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/constants/sidecar" - - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/proto/logutil" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/vtctl/vtctlclient" vschemapb "vitess.io/vitess/go/vt/proto/vschema" @@ -110,6 +110,10 @@ type Config struct { // cluster startup if the data directory does not already exist. PersistentMode bool + // VtCombo bind address. + // vtcombo will bind to this address when running the servenv. + VtComboBindAddress string + // MySQL protocol bind address. // vtcombo will bind to this address when exposing the mysql protocol socket MySQLBindHost string @@ -152,6 +156,9 @@ type Config struct { ExternalTopoGlobalRoot string VtgateTabletRefreshInterval time.Duration + + // Set the planner to fail on scatter queries + NoScatter bool } // InitSchemas is a shortcut for tests that just want to setup a single @@ -173,12 +180,12 @@ func (cfg *Config) InitSchemas(keyspace, schema string, vschema *vschemapb.Keysp // Write the schema if set. if schema != "" { ksDir := path.Join(schemaDir, keyspace) - err := os.Mkdir(ksDir, os.ModeDir|0775) + err := os.Mkdir(ksDir, os.ModeDir|0o775) if err != nil { return err } fileName := path.Join(ksDir, "schema.sql") - err = os.WriteFile(fileName, []byte(schema), 0666) + err = os.WriteFile(fileName, []byte(schema), 0o666) if err != nil { return err } @@ -191,7 +198,7 @@ func (cfg *Config) InitSchemas(keyspace, schema string, vschema *vschemapb.Keysp if err != nil { return err } - if err := os.WriteFile(vschemaFilePath, vschemaJSON, 0644); err != nil { + if err := os.WriteFile(vschemaFilePath, vschemaJSON, 0o644); err != nil { return err } } @@ -277,7 +284,11 @@ type LocalCluster struct { // cluster access should be performed through the vtgate port. func (db *LocalCluster) MySQLConnParams() mysql.ConnParams { connParams := db.mysql.Params(db.DbName()) - connParams.Charset = db.Config.Charset + ch, err := collations.MySQL8().ParseConnectionCharset(db.Config.Charset) + if err != nil { + panic(err) + } + connParams.Charset = ch return connParams } @@ -298,7 +309,11 @@ func (db *LocalCluster) MySQLCleanConnParams() mysql.ConnParams { mysqlctl = toxiproxy.mysqlctl } connParams := mysqlctl.Params(db.DbName()) - connParams.Charset = db.Config.Charset + ch, err := collations.MySQL8().ParseConnectionCharset(db.Config.Charset) + if err != nil { + panic(err) + } + connParams.Charset = ch return connParams } @@ -489,11 +504,6 @@ func (db *LocalCluster) loadSchema(shouldRunDatabaseMigrations bool) error { } for _, kpb := range db.Topology.Keyspaces { - if kpb.ServedFrom != "" { - // redirected keyspaces have no underlying database - continue - } - keyspace := kpb.Name keyspaceDir := path.Join(db.SchemaDir, keyspace) @@ -549,11 +559,12 @@ func (db *LocalCluster) createVTSchema() error { return db.ExecuteFetch(query, "") } - if err := sidecardb.Init(context.Background(), sidecardbExec); err != nil { + if err := sidecardb.Init(context.Background(), vtenv.NewTestEnv(), sidecardbExec); err != nil { return err } return nil } + func (db *LocalCluster) createDatabases() error { log.Info("Creating databases in cluster...") @@ -565,9 +576,6 @@ func (db *LocalCluster) createDatabases() error { var sql []string for _, kpb := range db.Topology.Keyspaces { - if kpb.ServedFrom != "" { - continue - } for _, dbname := range db.shardNames(kpb) { sql = append(sql, fmt.Sprintf("create database `%s`", dbname)) } @@ -641,6 +649,7 @@ func (db *LocalCluster) JSONConfig() any { } config := map[string]any{ + "bind_address": db.vt.BindAddress, "port": db.vt.Port, "socket": db.mysql.UnixSocket(), "vtcombo_mysql_port": db.Env.PortForProtocol("vtcombo_mysql_port", ""), @@ -697,7 +706,7 @@ func dirExist(dir string) bool { // statements in the SQL file. func LoadSQLFile(filename, sourceroot string) ([]string, error) { var ( - cmd bytes.Buffer + cmd strings.Builder sql []string inSQ bool inDQ bool @@ -783,7 +792,7 @@ func (db *LocalCluster) VTProcess() *VtProcess { // a pointer to the interface. To read this vschema, the caller must convert it to a map func (vt *VtProcess) ReadVSchema() (*interface{}, error) { httpClient := &http.Client{Timeout: 5 * time.Second} - resp, err := httpClient.Get(fmt.Sprintf("http://%s:%d/debug/vschema", "127.0.0.1", vt.Port)) + resp, err := httpClient.Get(fmt.Sprintf("http://%s:%d/debug/vschema", vt.BindAddress, vt.Port)) if err != nil { return nil, err } diff --git a/go/vt/vttest/plugin_consultopo.go b/go/vt/vttest/plugin_consultopo.go index cb10acc2cd2..3d47ee51681 100644 --- a/go/vt/vttest/plugin_consultopo.go +++ b/go/vt/vttest/plugin_consultopo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/vt/vttest/plugin_zk2topo.go b/go/vt/vttest/plugin_zk2topo.go index 3859454f7bd..7f1f81a5701 100644 --- a/go/vt/vttest/plugin_zk2topo.go +++ b/go/vt/vttest/plugin_zk2topo.go @@ -7,7 +7,7 @@ You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreedto in writing, software +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and diff --git a/go/vt/vttest/randomdata.go b/go/vt/vttest/randomdata.go index 19eaeb98fb0..6efcecfce04 100644 --- a/go/vt/vttest/randomdata.go +++ b/go/vt/vttest/randomdata.go @@ -18,7 +18,7 @@ package vttest import ( "fmt" - "math/rand" + "math/rand/v2" "strings" ) @@ -28,7 +28,7 @@ import ( // being used, as to ensure reproducible generation between runs. // A FieldGenerator must return the raw SQL data for the field, ready to be // placed into a SQL statement. The returned value will _NOT_ be escaped. -type FieldGenerator func(name, t string, rng *rand.Rand) (string, error) +type FieldGenerator func(name, t string) (string, error) // SeedConfig are the settings to enable the initialization of the // local cluster with random data. This struct must be set in Config @@ -54,16 +54,6 @@ type SeedConfig struct { RandomField FieldGenerator } -// SeedConfigDefaults returns the default values for SeedConfig -func SeedConfigDefaults() *SeedConfig { - return &SeedConfig{ - RngSeed: rand.Int(), - MinSize: 1000, - MaxSize: 10000, - NullProbability: 0.1, - } -} - const batchInsertSize = 1000 func (db *LocalCluster) batchInsert(dbname, table string, fields []string, rows [][]string) error { @@ -84,14 +74,14 @@ func (db *LocalCluster) batchInsert(dbname, table string, fields []string, rows return db.Execute([]string{sql}, dbname) } -func (db *LocalCluster) randomField(name, t string, allowNull bool, rng *rand.Rand) (string, error) { - if allowNull && rng.Float64() < db.Seed.NullProbability { +func (db *LocalCluster) randomField(name, t string, allowNull bool) (string, error) { + if allowNull && rand.Float64() < db.Seed.NullProbability { return "NULL", nil } - return db.Seed.RandomField(name, t, rng) + return db.Seed.RandomField(name, t) } -func (db *LocalCluster) populateTable(dbname, table string, rng *rand.Rand) error { +func (db *LocalCluster) populateTable(dbname, table string) error { fieldInfo, err := db.Query(fmt.Sprintf("DESCRIBE %s", table), dbname, 1024) if err != nil { return err @@ -100,7 +90,7 @@ func (db *LocalCluster) populateTable(dbname, table string, rng *rand.Rand) erro var ( minRows = db.Seed.MinSize maxRows = db.Seed.MaxSize - numRows = rng.Intn(maxRows-minRows) + minRows + numRows = rand.IntN(maxRows-minRows) + minRows rows [][]string fieldNames []string ) @@ -112,7 +102,7 @@ func (db *LocalCluster) populateTable(dbname, table string, rng *rand.Rand) erro fieldType := row[1].ToString() allowNull := row[2].ToString() == "YES" - f, err := db.randomField(fieldName, fieldType, allowNull, rng) + f, err := db.randomField(fieldName, fieldType, allowNull) if err != nil { return err } @@ -134,14 +124,14 @@ func (db *LocalCluster) populateTable(dbname, table string, rng *rand.Rand) erro return nil } -func (db *LocalCluster) populateShard(dbname string, rng *rand.Rand) error { +func (db *LocalCluster) populateShard(dbname string) error { q, err := db.Query("SHOW TABLES", dbname, 1024) if err != nil { return err } for _, row := range q.Rows { - if err := db.populateTable(dbname, row[0].ToString(), rng); err != nil { + if err := db.populateTable(dbname, row[0].ToString()); err != nil { return err } } @@ -149,13 +139,9 @@ func (db *LocalCluster) populateShard(dbname string, rng *rand.Rand) error { } func (db *LocalCluster) populateWithRandomData() error { - rng := rand.New(rand.NewSource(int64(db.Seed.RngSeed))) for _, kpb := range db.Topology.Keyspaces { - if kpb.ServedFrom != "" { - continue - } for _, dbname := range db.shardNames(kpb) { - if err := db.populateShard(dbname, rng); err != nil { + if err := db.populateShard(dbname); err != nil { return err } } diff --git a/go/vt/vttest/toxiproxyctl.go b/go/vt/vttest/toxiproxyctl.go index 436739fcf4c..6ffc9548c07 100644 --- a/go/vt/vttest/toxiproxyctl.go +++ b/go/vt/vttest/toxiproxyctl.go @@ -63,21 +63,16 @@ func NewToxiproxyctl(binary string, apiPort, mysqlPort int, mysqlctl *Mysqlctl, // The original initFile does not have any users who can access through TCP/IP connection. // Here we update the init file to create the user. - initDb, _ := os.ReadFile(mysqlctl.InitFile) createUserCmd := fmt.Sprintf(` # Admin user for TCP/IP connection with all privileges. CREATE USER '%s'@'127.0.0.1'; GRANT ALL ON *.* TO '%s'@'127.0.0.1'; GRANT GRANT OPTION ON *.* TO '%s'@'127.0.0.1'; `, dbaUser, dbaUser, dbaUser) - sql, err := getInitDBSQL(string(initDb), createUserCmd) - if err != nil { - return nil, vterrors.Wrap(err, "failed to get a modified init db sql") - } newInitFile := path.Join(mysqlctl.Directory, "init_db_toxiproxyctl.sql") - err = os.WriteFile(newInitFile, []byte(sql), 0600) + err := WriteInitDBFile(mysqlctl.InitFile, createUserCmd, newInitFile) if err != nil { - return nil, vterrors.Wrap(err, "failed to write a modified init db file") + return nil, vterrors.Wrap(err, "failed to get a modified init db sql") } mysqlctl.InitFile = newInitFile @@ -235,6 +230,20 @@ func (ctl *Toxiproxyctl) RemoveTimeoutToxic() error { return ctl.proxy.RemoveToxic("my-timeout") } +// WriteInitDBFile is a helper function that writes a modified init_db.sql file with custom SQL statements. +func WriteInitDBFile(initFile, customSQL, newInitFile string) error { + initDb, _ := os.ReadFile(initFile) + sql, err := getInitDBSQL(string(initDb), customSQL) + if err != nil { + return vterrors.Wrap(err, "failed to get a modified init db sql") + } + err = os.WriteFile(newInitFile, []byte(sql), 0600) + if err != nil { + return vterrors.Wrap(err, "failed to write a modified init db file") + } + return nil +} + // getInitDBSQL is a helper function that retrieves the modified contents of the init_db.sql file with custom SQL statements. // We avoid using vitess.io/vitess/go/test/endtoend/utils.GetInitDBSQL as importing this package adds unnecessary flags to vttestserver. func getInitDBSQL(initDBSQL string, customSQL string) (string, error) { diff --git a/go/vt/vttest/vtprocess.go b/go/vt/vttest/vtprocess.go index 2053973b766..2d2c9116c6d 100644 --- a/go/vt/vttest/vtprocess.go +++ b/go/vt/vttest/vtprocess.go @@ -50,6 +50,7 @@ type VtProcess struct { Binary string ExtraArgs []string Env []string + BindAddress string Port int PortGrpc int HealthCheck HealthChecker @@ -91,7 +92,7 @@ func (vtp *VtProcess) IsHealthy() bool { // Address returns the main address for this Vitess process. // This is usually the main HTTP endpoint for the service. func (vtp *VtProcess) Address() string { - return fmt.Sprintf("localhost:%d", vtp.Port) + return fmt.Sprintf("%s:%d", vtp.BindAddress, vtp.Port) } // WaitTerminate attempts to gracefully shutdown the Vitess process by sending @@ -128,7 +129,7 @@ func (vtp *VtProcess) WaitStart() (err error) { vtp.proc = exec.Command( vtp.Binary, "--port", fmt.Sprintf("%d", vtp.Port), - "--bind-address", "127.0.0.1", + "--bind-address", vtp.BindAddress, "--log_dir", vtp.LogDirectory, "--alsologtostderr", ) @@ -141,8 +142,7 @@ func (vtp *VtProcess) WaitStart() (err error) { vtp.proc.Args = append(vtp.proc.Args, vtp.ExtraArgs...) vtp.proc.Env = append(vtp.proc.Env, os.Environ()...) vtp.proc.Env = append(vtp.proc.Env, vtp.Env...) - - if testing.Verbose() { + if !testing.Testing() || testing.Verbose() { vtp.proc.Stderr = os.Stderr vtp.proc.Stdout = os.Stdout } @@ -184,23 +184,28 @@ const ( // QueryServerArgs are the default arguments passed to all Vitess query servers var QueryServerArgs = []string{ "--queryserver-config-pool-size", "4", - "--queryserver-config-query-timeout", "300", - "--queryserver-config-schema-reload-time", "60", + "--queryserver-config-query-timeout", "300s", + "--queryserver-config-schema-reload-time", "60s", "--queryserver-config-stream-pool-size", "4", "--queryserver-config-transaction-cap", "4", - "--queryserver-config-transaction-timeout", "300", - "--queryserver-config-txpool-timeout", "300", + "--queryserver-config-transaction-timeout", "300s", + "--queryserver-config-txpool-timeout", "300s", } // VtcomboProcess returns a VtProcess handle for a local `vtcombo` service, // configured with the given Config. // The process must be manually started by calling WaitStart() func VtcomboProcess(environment Environment, args *Config, mysql MySQLManager) (*VtProcess, error) { + vtcomboBindAddress := "127.0.0.1" + if args.VtComboBindAddress != "" { + vtcomboBindAddress = args.VtComboBindAddress + } vt := &VtProcess{ Name: "vtcombo", Directory: environment.Directory(), LogDirectory: environment.LogDirectory(), Binary: environment.BinaryPath("vtcombo"), + BindAddress: vtcomboBindAddress, Port: environment.PortForProtocol("vtcombo", ""), PortGrpc: environment.PortForProtocol("vtcombo", "grpc"), HealthCheck: environment.ProcessHealthCheck("vtcombo"), @@ -230,6 +235,7 @@ func VtcomboProcess(environment Environment, args *Config, mysql MySQLManager) ( fmt.Sprintf("--enable_online_ddl=%t", args.EnableOnlineDDL), fmt.Sprintf("--enable_direct_ddl=%t", args.EnableDirectDDL), fmt.Sprintf("--enable_system_settings=%t", args.EnableSystemSettings), + fmt.Sprintf("--no_scatter=%t", args.NoScatter), }...) // If topo tablet refresh interval is not defined then we will give it value of 10s. Please note diff --git a/go/vt/vttls/vttls.go b/go/vt/vttls/vttls.go index 098ed67eec4..adaf2cca672 100644 --- a/go/vt/vttls/vttls.go +++ b/go/vt/vttls/vttls.go @@ -283,7 +283,7 @@ func loadTLSCertificate(cert, key string) (*[]tls.Certificate, error) { result, ok := tlsCertificates.Load(tlsIdentifier) if !ok { - return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "Cannot find loaded tls certificate with cert: %s, key%s", cert, key) + return nil, vterrors.Errorf(vtrpc.Code_NOT_FOUND, "Cannot find loaded tls certificate with cert: %s, key: %s", cert, key) } return result.(*[]tls.Certificate), nil diff --git a/go/vt/wrangler/external_cluster_test.go b/go/vt/wrangler/external_cluster_test.go index 3c878411b6b..9876e2bf999 100644 --- a/go/vt/wrangler/external_cluster_test.go +++ b/go/vt/wrangler/external_cluster_test.go @@ -4,13 +4,13 @@ import ( "context" "testing" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" ) func TestVitessCluster(t *testing.T) { @@ -18,7 +18,7 @@ func TestVitessCluster(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") tmc := newTestWranglerTMClient() - wr := New(logutil.NewConsoleLogger(), ts, tmc) + wr := New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmc) name, topoType, topoServer, topoRoot := "c1", "x", "y", "z" t.Run("Zero clusters to start", func(t *testing.T) { diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go index 7bcc5f5bcf2..7fce5ce9afc 100644 --- a/go/vt/wrangler/fake_dbclient_test.go +++ b/go/vt/wrangler/fake_dbclient_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/assert" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/sqltypes" ) @@ -160,6 +161,22 @@ func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resul return qr, err } +func (dc *fakeDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) + if err != nil { + return nil, err + } + results := make([]*sqltypes.Result, 0, len(queries)) + for _, query := range queries { + qr, err := dc.executeFetch(query, maxrows) + if err != nil { + return nil, err + } + results = append(results, qr) + } + return results, nil +} + // ExecuteFetch is part of the DBClient interface func (dc *fakeDBClient) executeFetch(query string, maxrows int) (*sqltypes.Result, error) { if dbrs := dc.queries[query]; dbrs != nil { diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go index 66d5cf474d6..b70a64d644e 100644 --- a/go/vt/wrangler/fake_tablet_test.go +++ b/go/vt/wrangler/fake_tablet_test.go @@ -23,29 +23,29 @@ import ( "testing" "time" - vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/grpctmserver" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconntest" "vitess.io/vitess/go/vt/vttablet/tabletmanager" + vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" "vitess.io/vitess/go/vt/vttablet/tabletservermock" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/tmclienttest" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - // import the gRPC client implementation for tablet manager _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" @@ -190,7 +190,6 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { ft.Tablet.PortMap["vt"] = vtPort ft.Tablet.PortMap["grpc"] = gRPCPort ft.Tablet.Hostname = "127.0.0.1" - config := &tabletenv.TabletConfig{} // Create a test tm on that port, and re-read the record // (it has new ports and IP). ft.TM = &tabletmanager.TabletManager{ @@ -199,9 +198,10 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { MysqlDaemon: ft.FakeMysqlDaemon, DBConfigs: &dbconfigs.DBConfigs{}, QueryServiceControl: tabletservermock.NewController(), - VDiffEngine: vdiff2.NewEngine(config, wr.TopoServer(), ft.Tablet), + VDiffEngine: vdiff2.NewEngine(wr.TopoServer(), ft.Tablet, collations.MySQL8(), sqlparser.NewTestParser()), + Env: vtenv.NewTestEnv(), } - if err := ft.TM.Start(ft.Tablet, 0); err != nil { + if err := ft.TM.Start(ft.Tablet, nil); err != nil { t.Fatal(err) } ft.Tablet = ft.TM.Tablet() diff --git a/go/vt/wrangler/keyspace.go b/go/vt/wrangler/keyspace.go index 7f3f00da4f8..a5f7d6ae0bf 100644 --- a/go/vt/wrangler/keyspace.go +++ b/go/vt/wrangler/keyspace.go @@ -44,7 +44,7 @@ const ( // validateNewWorkflow ensures that the specified workflow doesn't already exist // in the keyspace. func (wr *Wrangler) validateNewWorkflow(ctx context.Context, keyspace, workflow string) error { - allshards, err := wr.ts.FindAllShardsInKeyspace(ctx, keyspace) + allshards, err := wr.ts.FindAllShardsInKeyspace(ctx, keyspace, nil) if err != nil { return err } diff --git a/go/vt/wrangler/log_recorder_test.go b/go/vt/wrangler/log_recorder_test.go index 5eaecdac702..852b80876a4 100644 --- a/go/vt/wrangler/log_recorder_test.go +++ b/go/vt/wrangler/log_recorder_test.go @@ -19,7 +19,7 @@ package wrangler import ( "testing" - "github.com/magiconair/properties/assert" + "github.com/stretchr/testify/assert" ) func TestLogRecorder(t *testing.T) { diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 990492bd191..bd3f7b232ef 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -39,6 +39,7 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/schemadiff" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -133,7 +134,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta cell, tabletTypesStr string, allTables bool, excludeTables string, autoStart, stopAfterCopy bool, externalCluster string, dropForeignKeys, deferSecondaryKeys bool, sourceTimeZone, onDDL string, sourceShards []string, noRoutingRules bool, atomicCopy bool) (err error) { - //FIXME validate tableSpecs, allTables, excludeTables + // FIXME validate tableSpecs, allTables, excludeTables var tables []string var externalTopo *topo.Server @@ -177,7 +178,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta return err } if len(tables) > 0 { - err = wr.validateSourceTablesExist(ctx, sourceKeyspace, ksTables, tables) + err = wr.validateSourceTablesExist(sourceKeyspace, ksTables, tables) if err != nil { return err } @@ -192,7 +193,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta excludeTables = strings.TrimSpace(excludeTables) if excludeTables != "" { excludeTablesList = strings.Split(excludeTables, ",") - err = wr.validateSourceTablesExist(ctx, sourceKeyspace, ksTables, excludeTablesList) + err = wr.validateSourceTablesExist(sourceKeyspace, ksTables, excludeTablesList) if err != nil { return err } @@ -317,13 +318,11 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta return err } } - if vschema != nil { - // We added to the vschema. - if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { - return err - } - } + // We added to the vschema. + if err := wr.ts.SaveVSchema(ctx, targetKeyspace, vschema); err != nil { + return err + } } if err := wr.ts.RebuildSrvVSchema(ctx, nil); err != nil { return err @@ -367,7 +366,7 @@ func (wr *Wrangler) MoveTables(ctx context.Context, workflow, sourceKeyspace, ta return nil } -func (wr *Wrangler) validateSourceTablesExist(ctx context.Context, sourceKeyspace string, ksTables, tables []string) error { +func (wr *Wrangler) validateSourceTablesExist(sourceKeyspace string, ksTables, tables []string) error { // validate that tables provided are present in the source keyspace var missingTables []string for _, table := range tables { @@ -446,7 +445,7 @@ func (wr *Wrangler) checkIfPreviousJournalExists(ctx context.Context, mz *materi mu sync.Mutex exists bool tablets []string - ws = workflow.NewServer(wr.ts, wr.tmc) + ws = workflow.NewServer(wr.env, wr.ts, wr.tmc) ) err := forAllSources(func(si *topo.ShardInfo) error { @@ -541,7 +540,7 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp return nil, nil, nil, fmt.Errorf("vindex %s is not a lookup type", vindex.Type) } - targetKeyspace, targetTableName, err = sqlparser.ParseTable(vindex.Params["table"]) + targetKeyspace, targetTableName, err = wr.env.Parser().ParseTable(vindex.Params["table"]) if err != nil || targetKeyspace == "" { return nil, nil, nil, fmt.Errorf("vindex table name must be in the form .
{{.MysqlResponseTime.Seconds}} {{.WaitingForConnection.Seconds}} {{.PlanType}}{{.OriginalSQL | truncateQuery | unquote | cssWrappable}}{{.OriginalSQL | .Parser.TruncateForUI | unquote | cssWrappable}} {{.NumberOfQueries}} {{.FmtQuerySources}} {{.RowsAffected}}
. Got: %v", vindex.Params["table"]) } @@ -838,7 +837,7 @@ func (wr *Wrangler) ExternalizeVindex(ctx context.Context, qualifiedVindexName s return fmt.Errorf("vindex %s not found in vschema", qualifiedVindexName) } - targetKeyspace, targetTableName, err := sqlparser.ParseTable(sourceVindex.Params["table"]) + targetKeyspace, targetTableName, err := wr.env.Parser().ParseTable(sourceVindex.Params["table"]) if err != nil || targetKeyspace == "" { return fmt.Errorf("vindex table name must be in the form .
. Got: %v", sourceVindex.Params["table"]) } @@ -1065,7 +1064,7 @@ func (wr *Wrangler) buildMaterializer(ctx context.Context, ms *vtctldatapb.Mater if err != nil { return nil, err } - targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace) + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace, wr.env.Parser()) if err != nil { return nil, err } @@ -1206,9 +1205,9 @@ func (mz *materializer) deploySchema(ctx context.Context) error { var err error mu.Lock() if len(sourceDDLs) == 0 { - //only get ddls for tables, once and lazily: if we need to copy the schema from source to target - //we copy schemas from primaries on the source keyspace - //and we have found use cases where user just has a replica (no primary) in the source keyspace + // only get ddls for tables, once and lazily: if we need to copy the schema from source to target + // we copy schemas from primaries on the source keyspace + // and we have found use cases where user just has a replica (no primary) in the source keyspace sourceDDLs, err = mz.getSourceTableDDLs(ctx) } mu.Unlock() @@ -1221,7 +1220,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { if createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { if ts.SourceExpression != "" { // Check for table if non-empty SourceExpression. - sourceTableName, err := sqlparser.TableFromStatement(ts.SourceExpression) + sourceTableName, err := mz.wr.env.Parser().TableFromStatement(ts.SourceExpression) if err != nil { return err } @@ -1237,7 +1236,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } if createDDL == createDDLAsCopyDropConstraint { - strippedDDL, err := stripTableConstraints(ddl) + strippedDDL, err := stripTableConstraints(ddl, mz.wr.env.Parser()) if err != nil { return err } @@ -1246,7 +1245,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } if createDDL == createDDLAsCopyDropForeignKeys { - strippedDDL, err := stripTableForeignKeys(ddl) + strippedDDL, err := stripTableForeignKeys(ddl, mz.wr.env.Parser()) if err != nil { return err } @@ -1260,13 +1259,31 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } if len(applyDDLs) > 0 { - sql := strings.Join(applyDDLs, ";\n") + if mz.ms.AtomicCopy { + // AtomicCopy suggests we may be interested in Foreign Key support. As such, we want to + // normalize the source schema: ensure the order of table definitions is compatible with + // the constraints graph. We want to first create the parents, then the children. + // We use schemadiff to normalize the schema. + // For now, and because this is could have wider implications, we ignore any errors in + // reading the source schema. + env := schemadiff.NewEnv(mz.wr.env, mz.wr.env.CollationEnv().DefaultConnectionCharset()) + schema, err := schemadiff.NewSchemaFromQueries(env, applyDDLs) + log.Infof("AtomicCopy schema:\n %v", applyDDLs) + if err != nil { + log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) + } else { + applyDDLs = schema.ToQueries() + log.Infof("AtomicCopy used, and schema was normalized via schemadiff. %v queries normalized", len(applyDDLs)) + } + } + sql := strings.Join(applyDDLs, ";\n") _, err = mz.wr.tmc.ApplySchema(ctx, targetTablet.Tablet, &tmutils.SchemaChange{ - SQL: sql, - Force: false, - AllowReplication: true, - SQLMode: vreplication.SQLMode, + SQL: sql, + Force: false, + AllowReplication: true, + SQLMode: vreplication.SQLMode, + DisableForeignKeyChecks: true, }) if err != nil { return err @@ -1277,9 +1294,8 @@ func (mz *materializer) deploySchema(ctx context.Context) error { }) } -func stripTableForeignKeys(ddl string) (string, error) { - - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableForeignKeys(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -1307,8 +1323,8 @@ func stripTableForeignKeys(ddl string) (string, error) { return newDDL, nil } -func stripTableConstraints(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableConstraints(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -1354,7 +1370,7 @@ func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*top } // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + stmt, err := mz.wr.env.Parser().Parse(ts.SourceExpression) if err != nil { return "", err } @@ -1377,13 +1393,13 @@ func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*top } mappedCols = append(mappedCols, colName) } - subExprs := make(sqlparser.SelectExprs, 0, len(mappedCols)+2) + subExprs := make(sqlparser.Exprs, 0, len(mappedCols)+2) for _, mappedCol := range mappedCols { - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: mappedCol}) + subExprs = append(subExprs, mappedCol) } vindexName := fmt.Sprintf("%s.%s", mz.ms.TargetKeyspace, cv.Name) - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(vindexName)}) - subExprs = append(subExprs, &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral("{{.keyrange}}")}) + subExprs = append(subExprs, sqlparser.NewStrLiteral(vindexName)) + subExprs = append(subExprs, sqlparser.NewStrLiteral("{{.keyrange}}")) inKeyRange := &sqlparser.FuncExpr{ Name: sqlparser.NewIdentifierCI("in_keyrange"), Exprs: subExprs, diff --git a/go/vt/wrangler/materializer_env_test.go b/go/vt/wrangler/materializer_env_test.go index b98621ffa1b..4de02b5abcc 100644 --- a/go/vt/wrangler/materializer_env_test.go +++ b/go/vt/wrangler/materializer_env_test.go @@ -21,25 +21,23 @@ import ( "fmt" "os" "regexp" - "runtime" "strconv" "strings" "sync" "testing" - "time" - "go.uber.org/goleak" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" - _flag "vitess.io/vitess/go/internal/flag" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -60,66 +58,9 @@ type testMaterializerEnv struct { //---------------------------------------------- // testMaterializerEnv -// EnsureNoLeaks is a helper function to fail tests if there are goroutine leaks. -// At this moment we still have a lot of goroutine leaks in the unit tests in this package. -// So we only use this while debugging and fixing the leaks. Once fixed we will use this -// in TestMain instead of just logging the number of leaked goroutines. -func EnsureNoLeaks(t testing.TB) { - if t.Failed() { - return - } - err := ensureNoGoroutines() - if err != nil { - t.Fatal(err) - } -} - -func ensureNoGoroutines() error { - // These goroutines have been found to stay around. - // Need to investigate and fix the Vitess ones at some point, if we indeed find out that they are unintended leaks. - var leaksToIgnore = []goleak.Option{ - goleak.IgnoreTopFunction("github.com/golang/glog.(*fileSink).flushDaemon"), - goleak.IgnoreTopFunction("github.com/golang/glog.(*loggingT).flushDaemon"), - goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/dbconfigs.init.0.func1"), - goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vtgate.resetAggregators"), - goleak.IgnoreTopFunction("vitess.io/vitess/go/vt/vtgate.processQueryInfo"), - goleak.IgnoreTopFunction("github.com/patrickmn/go-cache.(*janitor).Run"), - } - - const ( - // give ample time for the goroutines to exit in CI. - waitTime = 100 * time.Millisecond - numIterations = 50 // 5 seconds - ) - var err error - for i := 0; i < numIterations; i++ { - err = goleak.Find(leaksToIgnore...) - if err == nil { - return nil - } - time.Sleep(waitTime) - } - return err -} - -func testMainWrapper(m *testing.M) int { - startingNumGoRoutines := runtime.NumGoroutine() - defer func() { - numGoroutines := runtime.NumGoroutine() - if numGoroutines > startingNumGoRoutines { - log.Infof("!!!!!!!!!!!! Wrangler unit tests Leaked %d goroutines", numGoroutines-startingNumGoRoutines) - } - }() - _flag.ParseFlagsForTest() - return m.Run() -} - -func TestMain(m *testing.M) { - os.Exit(testMainWrapper(m)) -} - -func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.MaterializeSettings, sources, targets []string) *testMaterializerEnv { +func newTestMaterializerEnv(t *testing.T, ms *vtctldatapb.MaterializeSettings, sources, targets []string) (*testMaterializerEnv, context.Context) { t.Helper() + ctx, cancel := context.WithCancel(context.Background()) env := &testMaterializerEnv{ ms: ms, sources: sources, @@ -129,7 +70,8 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M cell: "cell", tmc: newTestMaterializerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + parser := sqlparser.NewTestParser() + env.wr = New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), env.topoServ, env.tmc) tabletID := 100 for _, shard := range sources { _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_PRIMARY) @@ -145,7 +87,7 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M for _, ts := range ms.TableSettings { tableName := ts.TargetTable - table, err := sqlparser.TableFromStatement(ts.SourceExpression) + table, err := parser.TableFromStatement(ts.SourceExpression) if err == nil { tableName = table.Name.String() } @@ -165,7 +107,12 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M if ms.Workflow != "" { env.expectValidation() } - return env + t.Cleanup(func() { + defer utils.EnsureNoLeaks(t) + env.close() + cancel() + }) + return env, ctx } func (env *testMaterializerEnv) expectValidation() { @@ -355,3 +302,8 @@ func (tmc *testMaterializerTMClient) ApplySchema(ctx context.Context, tablet *to return nil, nil } + +func TestMain(m *testing.M) { + _flag.ParseFlagsForTest() + os.Exit(m.Run()) +} diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go index 242bca31e49..23cae954b83 100644 --- a/go/vt/wrangler/materializer_test.go +++ b/go/vt/wrangler/materializer_test.go @@ -33,8 +33,10 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -63,10 +65,7 @@ func TestMoveTablesNoRoutingRules(t *testing.T) { SourceExpression: "select * from t1", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) @@ -91,11 +90,7 @@ func TestMigrateTables(t *testing.T) { SourceExpression: "select * from t1", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) @@ -133,12 +128,8 @@ func TestMissingTables(t *testing.T) { SourceExpression: "select * from t3", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) @@ -197,11 +188,7 @@ func TestMoveTablesAllAndExclude(t *testing.T) { } for _, tcase := range testCases { t.Run("", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) @@ -229,11 +216,7 @@ func TestMoveTablesStopFlags(t *testing.T) { var err error t.Run("StopStartedAndStopAfterCopyFlags", func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) // insert expects flag stop_after_copy to be true @@ -259,12 +242,7 @@ func TestMigrateVSchema(t *testing.T) { SourceExpression: "select * from t1", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, insertPrefix, &sqltypes.Result{}) @@ -292,11 +270,8 @@ func TestCreateLookupVindexFull(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) specs := &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ @@ -418,11 +393,9 @@ func TestCreateLookupVindexCreateDDL(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, _ := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + vs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ @@ -638,11 +611,8 @@ func TestCreateLookupVindexSourceVSchema(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, _ := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) specs := &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ @@ -877,11 +847,9 @@ func TestCreateLookupVindexTargetVSchema(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, _ := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + sourcevs := &vschemapb.Keyspace{ Sharded: true, Vindexes: map[string]*vschemapb.Vindex{ @@ -1116,11 +1084,8 @@ func TestCreateLookupVindexSameKeyspace(t *testing.T) { SourceKeyspace: "ks", TargetKeyspace: "ks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, _ := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) specs := &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ @@ -1228,11 +1193,8 @@ func TestCreateCustomizedVindex(t *testing.T) { SourceKeyspace: "ks", TargetKeyspace: "ks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, _ := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) specs := &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ @@ -1341,11 +1303,8 @@ func TestCreateLookupVindexIgnoreNulls(t *testing.T) { SourceKeyspace: "ks", TargetKeyspace: "ks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, _ := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) specs := &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ @@ -1462,11 +1421,9 @@ func TestStopAfterCopyFlag(t *testing.T) { SourceKeyspace: "ks", TargetKeyspace: "ks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, _ := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + specs := &vschemapb.Keyspace{ Vindexes: map[string]*vschemapb.Vindex{ "v": { @@ -1541,7 +1498,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { defer cancel() topoServ := memorytopo.NewServer(ctx, "cell") - wr := New(logutil.NewConsoleLogger(), topoServ, nil) + wr := New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), topoServ, nil) unique := map[string]*vschemapb.Vindex{ "v": { @@ -1815,11 +1772,8 @@ func TestExternalizeVindex(t *testing.T) { SourceKeyspace: "sourceks", TargetKeyspace: "targetks", } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + env, _ := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) sourceVSchema := &vschemapb.Keyspace{ Sharded: true, @@ -1966,11 +1920,8 @@ func TestMaterializerOneToOne(t *testing.T) { topodatapb.TabletType_RDONLY, }), } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery( @@ -1984,7 +1935,7 @@ func TestMaterializerOneToOne(t *testing.T) { `rules:{match:\\"t2\\" filter:\\"select.*t3\\"} `+ `rules:{match:\\"t4\\"}`+ `}', `)+ - `'', [0-9]*, [0-9]*, 'zone1', 'primary,rdonly', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false`+ + `'', [0-9]*, [0-9]*, 'zone1', 'primary,rdonly', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false, '{}'`+ `\)`+eol, &sqltypes.Result{}, ) @@ -2010,19 +1961,16 @@ func TestMaterializerManyToOne(t *testing.T) { CreateDdl: "t2ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"-80", "80-"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"-80", "80-"}, []string{"0"}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery( 200, insertPrefix+ - `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"-80\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ + `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"-80\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false, '{}'\)`+ `, `+ - `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"80-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ + `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"80-\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) @@ -2044,11 +1992,8 @@ func TestMaterializerOneToMany(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2104,11 +2049,8 @@ func TestMaterializerManyToMany(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"-40", "40-"}, []string{"-80", "80-"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"-40", "40-"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2165,11 +2107,8 @@ func TestMaterializerMulticolumnVindex(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2232,11 +2171,8 @@ func TestMaterializerDeploySchema(t *testing.T) { CreateDdl: "t2ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) delete(env.tmc.schema, "targetks.t2") @@ -2245,7 +2181,7 @@ func TestMaterializerDeploySchema(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ + `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) @@ -2273,11 +2209,8 @@ func TestMaterializerCopySchema(t *testing.T) { CreateDdl: "t2ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) delete(env.tmc.schema, "targetks.t1") @@ -2286,7 +2219,7 @@ func TestMaterializerCopySchema(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false\)`+ + `\('workflow', 'keyspace:\\"sourceks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"select.*t1\\"} rules:{match:\\"t2\\" filter:\\"select.*t3\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_targetks', 0, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) @@ -2311,11 +2244,8 @@ func TestMaterializerExplicitColumns(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2374,11 +2304,8 @@ func TestMaterializerRenamedColumns(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2442,11 +2369,8 @@ func TestMaterializerStopAfterCopy(t *testing.T) { CreateDdl: "t2ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) env.tmc.expectVRQuery(200, insertPrefix+`.*stop_after_copy:true`, &sqltypes.Result{}) @@ -2468,11 +2392,8 @@ func TestMaterializerNoTargetVSchema(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2498,11 +2419,8 @@ func TestMaterializerNoDDL(t *testing.T) { CreateDdl: "", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) delete(env.tmc.schema, "targetks.t1") @@ -2541,8 +2459,7 @@ func TestMaterializerNoSourcePrimary(t *testing.T) { cell: "cell", tmc: newTestMaterializerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) - defer env.close() + env.wr = New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), env.topoServ, env.tmc) tabletID := 100 for _, shard := range sources { @@ -2575,11 +2492,8 @@ func TestMaterializerTableMismatchNonCopy(t *testing.T) { CreateDdl: "", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) delete(env.tmc.schema, "targetks.t1") @@ -2599,11 +2513,8 @@ func TestMaterializerTableMismatchCopy(t *testing.T) { CreateDdl: "copy", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) delete(env.tmc.schema, "targetks.t1") @@ -2623,11 +2534,8 @@ func TestMaterializerNoSourceTable(t *testing.T) { CreateDdl: "copy", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) delete(env.tmc.schema, "targetks.t1") delete(env.tmc.schema, "sourceks.t1") @@ -2648,11 +2556,8 @@ func TestMaterializerSyntaxError(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) err := env.wr.Materialize(ctx, ms) @@ -2670,11 +2575,8 @@ func TestMaterializerNotASelect(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) err := env.wr.Materialize(ctx, ms) @@ -2692,11 +2594,8 @@ func TestMaterializerNoGoodVindex(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2741,10 +2640,8 @@ func TestMaterializerComplexVindexExpression(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2784,10 +2681,8 @@ func TestMaterializerNoVindexInExpression(t *testing.T) { CreateDdl: "t1ddl", }}, } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"-80", "80-"}) - defer env.close() + + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"-80", "80-"}) vs := &vschemapb.Keyspace{ Sharded: true, @@ -2870,7 +2765,7 @@ func TestStripForeignKeys(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableForeignKeys(tc.ddl) + newDDL, err := stripTableForeignKeys(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -2944,7 +2839,7 @@ func TestStripConstraints(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableConstraints(tc.ddl) + newDDL, err := stripTableConstraints(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -3242,9 +3137,7 @@ func TestMaterializerSourceShardSelection(t *testing.T) { for _, tcase := range testcases { t.Run(tcase.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, tcase.sourceShards, tcase.targetShards) + env, ctx := newTestMaterializerEnv(t, ms, tcase.sourceShards, tcase.targetShards) if err := env.topoServ.SaveVSchema(ctx, "targetks", tcase.targetVSchema); err != nil { t.Fatal(err) } @@ -3253,7 +3146,7 @@ func TestMaterializerSourceShardSelection(t *testing.T) { t.Fatal(err) } } - defer env.close() + for i, targetShard := range tcase.targetShards { tabletID := 200 + i*10 env.tmc.expectVRQuery(tabletID, mzSelectFrozenQuery, &sqltypes.Result{}) @@ -3291,16 +3184,12 @@ func TestMoveTablesDDLFlag(t *testing.T) { SourceExpression: "select * from t1", }}, } - ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) - defer cancel() for onDDLAction := range binlogdatapb.OnDDLAction_value { t.Run(fmt.Sprintf("OnDDL Flag:%v", onDDLAction), func(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) + env, ctx := newTestMaterializerEnv(t, ms, []string{"0"}, []string{"0"}) + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() - env := newTestMaterializerEnv(t, ctx, ms, []string{"0"}, []string{"0"}) - defer env.close() - env.tmc.expectVRQuery(100, mzCheckJournal, &sqltypes.Result{}) env.tmc.expectVRQuery(200, mzSelectFrozenQuery, &sqltypes.Result{}) if onDDLAction == binlogdatapb.OnDDLAction_IGNORE.String() { @@ -3520,8 +3409,6 @@ func TestAddTablesToVSchema(t *testing.T) { // means that even if the target keyspace is sharded, the source // does not need to perform the in_keyrange filtering. func TestKeyRangesEqualOptimization(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() workflow := "testwf" sourceKs := "sourceks" targetKs := "targetks" @@ -3696,9 +3583,9 @@ func TestKeyRangesEqualOptimization(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - env := newTestMaterializerEnv(t, ctx, tc.ms, tc.sourceShards, tc.targetShards) - defer env.close() - + env, ctx := newTestMaterializerEnv(t, tc.ms, tc.sourceShards, tc.targetShards) + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() // Target is always sharded. err := env.wr.ts.SaveVSchema(ctx, targetKs, targetVSchema) require.NoError(t, err, "SaveVSchema failed: %v", err) diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index fbeec55cbbc..1a3a45cf99b 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -26,7 +26,6 @@ import ( "time" "vitess.io/vitess/go/event" - "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools/events" @@ -60,7 +59,7 @@ func (wr *Wrangler) InitShardPrimary(ctx context.Context, keyspace, shard string ev := &events.Reparent{} // do the work - err = grpcvtctldserver.NewVtctldServer(wr.ts).InitShardPrimaryLocked(ctx, ev, &vtctldatapb.InitShardPrimaryRequest{ + err = grpcvtctldserver.NewVtctldServer(wr.env, wr.ts).InitShardPrimaryLocked(ctx, ev, &vtctldatapb.InitShardPrimaryRequest{ Keyspace: keyspace, Shard: shard, PrimaryElectTabletAlias: primaryElectTabletAlias, @@ -76,16 +75,16 @@ func (wr *Wrangler) InitShardPrimary(ctx context.Context, keyspace, shard string // PlannedReparentShard will make the provided tablet the primary for the shard, // when both the current and new primary are reachable and in good shape. -func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias, avoidTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration) (err error) { +func (wr *Wrangler) PlannedReparentShard( + ctx context.Context, + keyspace, shard string, + opts reparentutil.PlannedReparentOptions, +) (err error) { _, err = reparentutil.NewPlannedReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( ctx, keyspace, shard, - reparentutil.PlannedReparentOptions{ - AvoidPrimaryAlias: avoidTabletAlias, - NewPrimaryAlias: primaryElectTabletAlias, - WaitReplicasTimeout: waitReplicasTimeout, - }, + opts, ) return err @@ -93,18 +92,12 @@ func (wr *Wrangler) PlannedReparentShard(ctx context.Context, keyspace, shard st // EmergencyReparentShard will make the provided tablet the primary for // the shard, when the old primary is completely unreachable. -func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, primaryElectTabletAlias *topodatapb.TabletAlias, waitReplicasTimeout time.Duration, ignoredTablets sets.Set[string], preventCrossCellPromotion bool, waitForAllTablets bool) (err error) { +func (wr *Wrangler) EmergencyReparentShard(ctx context.Context, keyspace, shard string, opts reparentutil.EmergencyReparentOptions) (err error) { _, err = reparentutil.NewEmergencyReparenter(wr.ts, wr.tmc, wr.logger).ReparentShard( ctx, keyspace, shard, - reparentutil.EmergencyReparentOptions{ - NewPrimaryAlias: primaryElectTabletAlias, - WaitReplicasTimeout: waitReplicasTimeout, - IgnoreReplicas: ignoredTablets, - PreventCrossCellPromotion: preventCrossCellPromotion, - WaitAllTablets: waitForAllTablets, - }, + opts, ) return err diff --git a/go/vt/wrangler/resharder.go b/go/vt/wrangler/resharder.go index a81c3e8d598..536f4c643cc 100644 --- a/go/vt/wrangler/resharder.go +++ b/go/vt/wrangler/resharder.go @@ -164,6 +164,9 @@ func (wr *Wrangler) buildResharder(ctx context.Context, keyspace, workflow strin return rs, nil } +// validateTargets ensures that the target shards have no existing +// VReplication workflow streams as that is an invalid starting +// state for the non-serving shards involved in a Reshard. func (rs *resharder) validateTargets(ctx context.Context) error { err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { targetPrimary := rs.targetPrimaries[target.ShardName()] @@ -298,6 +301,8 @@ func (rs *resharder) copySchema(ctx context.Context) error { return err } +// createStreams creates all of the VReplication streams that +// need to now exist on the new shards. func (rs *resharder) createStreams(ctx context.Context) error { var excludeRules []*binlogdatapb.Rule for tableName, table := range rs.vschema.Tables { @@ -359,7 +364,14 @@ func (rs *resharder) createStreams(ctx context.Context) error { func (rs *resharder) startStreams(ctx context.Context) error { err := rs.forAll(rs.targetShards, func(target *topo.ShardInfo) error { targetPrimary := rs.targetPrimaries[target.ShardName()] - query := fmt.Sprintf("update _vt.vreplication set state='Running' where db_name=%s", encodeString(targetPrimary.DbName())) + // This is the rare case where we truly want to update every stream/record + // because we've already confirmed that there were no existing workflows + // on the shards when we started, and we want to start all of the ones + // that we've created on the new shards as we're migrating them. + // We use the comment directive to indicate that this is intentional + // and OK. + query := fmt.Sprintf("update /*vt+ %s */ _vt.vreplication set state='Running' where db_name=%s", + vreplication.AllowUnsafeWriteCommentDirective, encodeString(targetPrimary.DbName())) if _, err := rs.wr.tmc.VReplicationExec(ctx, targetPrimary.Tablet, query); err != nil { return vterrors.Wrapf(err, "VReplicationExec(%v, %s)", targetPrimary.Tablet, query) } diff --git a/go/vt/wrangler/resharder_env_test.go b/go/vt/wrangler/resharder_env_test.go index ee39c7e5eaa..3fbdde6f52e 100644 --- a/go/vt/wrangler/resharder_env_test.go +++ b/go/vt/wrangler/resharder_env_test.go @@ -31,6 +31,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" querypb "vitess.io/vitess/go/vt/proto/query" @@ -93,7 +94,7 @@ func newTestResharderEnv(t *testing.T, ctx context.Context, sources, targets []s cell: "cell", tmc: newTestResharderTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + env.wr = New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), env.topoServ, env.tmc) initTopo(t, env.topoServ, "ks", sources, targets, []string{"cell"}) tabletID := 100 for _, shard := range sources { diff --git a/go/vt/wrangler/resharder_test.go b/go/vt/wrangler/resharder_test.go index 40d31d36e1f..b4a939775ca 100644 --- a/go/vt/wrangler/resharder_test.go +++ b/go/vt/wrangler/resharder_test.go @@ -22,19 +22,19 @@ import ( "strings" "testing" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/vtgate/vindexes" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" - "vitess.io/vitess/go/vt/vtgate/vindexes" ) const rsSelectFrozenQuery = "select 1 from _vt.vreplication where db_name='vt_ks' and message='FROZEN' and workflow_sub_type != 1" -const insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys\) values ` +const insertPrefix = `/insert into _vt.vreplication\(workflow, source, pos, max_tps, max_replication_lag, cell, tablet_types, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, defer_secondary_keys, options\) values ` const eol = "$" func TestResharderOneToMany(t *testing.T) { @@ -94,18 +94,18 @@ func TestResharderOneToMany(t *testing.T) { 200, insertPrefix+ `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '`+ - tc.cells+`', '`+tc.tabletTypes+`', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+eol, + tc.cells+`', '`+tc.tabletTypes+`', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+eol, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '`+ - tc.cells+`', '`+tc.tabletTypes+`', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+eol, + tc.cells+`', '`+tc.tabletTypes+`', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+eol, &sqltypes.Result{}, ) - env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(210, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, tc.cells, tc.tabletTypes, defaultOnDDL, true, false, false) require.NoError(t, err) @@ -137,13 +137,13 @@ func TestResharderManyToOne(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"-80\\" filter:{rules:{match:\\"/.*\\" filter:\\"-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\).*`+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"80-\\" filter:{rules:{match:\\"/.*\\" filter:\\"-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"-80\\" filter:{rules:{match:\\"/.*\\" filter:\\"-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\).*`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"80-\\" filter:{rules:{match:\\"/.*\\" filter:\\"-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) - env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", defaultOnDDL, true, false, false) assert.NoError(t, err) @@ -172,21 +172,21 @@ func TestResharderManyToMany(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"-40\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\).*`+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"40-\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"-40\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\).*`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"40-\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"40-\\" filter:{rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"40-\\" filter:{rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) - env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(210, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", defaultOnDDL, true, false, false) assert.NoError(t, err) @@ -228,20 +228,20 @@ func TestResharderOneRefTable(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) - env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(210, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", defaultOnDDL, true, false, false) assert.NoError(t, err) @@ -283,14 +283,14 @@ func TestReshardStopFlags(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"-80\\"}} stop_after_copy:true', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"-80\\"}} stop_after_copy:true', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"80-\\"}} stop_after_copy:true', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"80-\\"}} stop_after_copy:true', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) @@ -347,24 +347,24 @@ func TestResharderOneRefStream(t *testing.T) { ) env.tmc.expectVRQuery(100, fmt.Sprintf("select workflow, source, cell, tablet_types from _vt.vreplication where db_name='vt_%s' and message != 'FROZEN'", env.keyspace), result) - refRow := `\('t1', 'keyspace:\\"ks1\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\"}}', '', [0-9]*, [0-9]*, 'cell1', 'primary,replica', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)` + refRow := `\('t1', 'keyspace:\\"ks1\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\"}}', '', [0-9]*, [0-9]*, 'cell1', 'primary,replica', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)` env.tmc.expectVRQuery( 200, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\).*`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\).*`+ refRow+eol, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\).*`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"t1\\" filter:\\"exclude\\"} rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\).*`+ refRow+eol, &sqltypes.Result{}, ) - env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(210, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", defaultOnDDL, true, false, false) assert.NoError(t, err) @@ -430,20 +430,20 @@ func TestResharderNoRefStream(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) - env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(210, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, true, "", "", defaultOnDDL, true, false, false) assert.NoError(t, err) @@ -472,20 +472,20 @@ func TestResharderCopySchema(t *testing.T) { env.tmc.expectVRQuery( 200, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"-80\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) env.tmc.expectVRQuery( 210, insertPrefix+ - `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false\)`+ + `\('resharderTest', 'keyspace:\\"ks\\" shard:\\"0\\" filter:{rules:{match:\\"/.*\\" filter:\\"80-\\"}}', '', [0-9]*, [0-9]*, '', '', [0-9]*, 0, 'Stopped', 'vt_ks', 4, 0, false, '{}'\)`+ eol, &sqltypes.Result{}, ) - env.tmc.expectVRQuery(200, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) - env.tmc.expectVRQuery(210, "update _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(200, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) + env.tmc.expectVRQuery(210, "update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state='Running' where db_name='vt_ks'", &sqltypes.Result{}) err := env.wr.Reshard(context.Background(), env.keyspace, env.workflow, env.sources, env.targets, false, "", "", defaultOnDDL, true, false, false) assert.NoError(t, err) diff --git a/go/vt/wrangler/schema.go b/go/vt/wrangler/schema.go index 84bc078f240..ae24106c97f 100644 --- a/go/vt/wrangler/schema.go +++ b/go/vt/wrangler/schema.go @@ -17,9 +17,9 @@ limitations under the License. package wrangler import ( - "bytes" "context" "fmt" + "strings" "sync" "text/template" "time" @@ -262,7 +262,7 @@ func (wr *Wrangler) CopySchemaShard(ctx context.Context, sourceTabletAlias *topo } } - // Notify Replicass to reload schema. This is best-effort. + // Notify Replicas to reload schema. This is best-effort. reloadCtx, cancel := context.WithTimeout(ctx, waitReplicasTimeout) defer cancel() resp, err := wr.VtctldServer().ReloadSchemaShard(reloadCtx, &vtctldatapb.ReloadSchemaShardRequest{ @@ -307,8 +307,8 @@ func (wr *Wrangler) applySQLShard(ctx context.Context, tabletInfo *topo.TabletIn // fillStringTemplate returns the string template filled func fillStringTemplate(tmpl string, vars any) (string, error) { myTemplate := template.Must(template.New("").Parse(tmpl)) - data := new(bytes.Buffer) - if err := myTemplate.Execute(data, vars); err != nil { + var data strings.Builder + if err := myTemplate.Execute(&data, vars); err != nil { return "", err } return data.String(), nil diff --git a/go/vt/wrangler/shard.go b/go/vt/wrangler/shard.go index c1c65b0407b..6b74f32031e 100644 --- a/go/vt/wrangler/shard.go +++ b/go/vt/wrangler/shard.go @@ -113,7 +113,7 @@ func (wr *Wrangler) DeleteShard(ctx context.Context, keyspace, shard string, rec // GetTabletMap ignores ErrNoNode, and it's good for // our purpose, it means a tablet was deleted but is // still referenced. - tabletMap, err := wr.ts.GetTabletMap(ctx, aliases) + tabletMap, err := wr.ts.GetTabletMap(ctx, aliases, nil) if err != nil { return fmt.Errorf("GetTabletMap() failed: %v", err) } diff --git a/go/vt/wrangler/split.go b/go/vt/wrangler/split.go index ba67fd8efef..197bfe4cc66 100644 --- a/go/vt/wrangler/split.go +++ b/go/vt/wrangler/split.go @@ -40,7 +40,7 @@ const ( // on a Shard. func (wr *Wrangler) SetSourceShards(ctx context.Context, keyspace, shard string, sources []*topodatapb.TabletAlias, tables []string) error { // Read the source tablets. - sourceTablets, err := wr.ts.GetTabletMap(ctx, sources) + sourceTablets, err := wr.ts.GetTabletMap(ctx, sources, nil) if err != nil { return err } @@ -101,7 +101,7 @@ func (wr *Wrangler) WaitForFilteredReplication(ctx context.Context, keyspace, sh return fmt.Errorf("failed to run explicit healthcheck on tablet: %v err: %v", tabletInfo, err) } - conn, err := tabletconn.GetDialer()(tabletInfo.Tablet, grpcclient.FailFast(false)) + conn, err := tabletconn.GetDialer()(ctx, tabletInfo.Tablet, grpcclient.FailFast(false)) if err != nil { return fmt.Errorf("cannot connect to tablet %v: %v", alias, err) } diff --git a/go/vt/wrangler/stream_migrater_test.go b/go/vt/wrangler/stream_migrater_test.go index 98828261b27..6432a188f36 100644 --- a/go/vt/wrangler/stream_migrater_test.go +++ b/go/vt/wrangler/stream_migrater_test.go @@ -939,7 +939,7 @@ func TestStreamMigrateSyncFail(t *testing.T) { tme.dbTargetClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow in ('t1')", &sqltypes.Result{}, nil) tme.dbTargetClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow in ('t1')", &sqltypes.Result{}, nil) - tme.expectCancelMigration() + tme.expectCancelStreamMigrations() _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, false, false, true, false, false) want := "does not match" @@ -1022,8 +1022,8 @@ func TestStreamMigrateCancel(t *testing.T) { // sm.migrateStreams->->restart source streams tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow != 'test_reverse'", resultid12, nil) tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow != 'test_reverse'", resultid12, nil) - tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', stop_pos = null, message = '' where id in (1, 2)", &sqltypes.Result{}, nil) - tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', stop_pos = null, message = '' where id in (1, 2)", &sqltypes.Result{}, nil) + tme.dbSourceClients[0].addQuery("update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running', stop_pos = null, message = '' where id in (1, 2)", &sqltypes.Result{}, nil) + tme.dbSourceClients[1].addQuery("update /*vt+ ALLOW_UNSAFE_VREPLICATION_WRITE */ _vt.vreplication set state = 'Running', stop_pos = null, message = '' where id in (1, 2)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 1", runningResult(1), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 1", runningResult(1), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 2", runningResult(2), nil) @@ -1173,7 +1173,7 @@ func TestStreamMigrateCancelWithStoppedStreams(t *testing.T) { tme.dbTargetClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow in ('t1t2')", &sqltypes.Result{}, nil) tme.dbTargetClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow in ('t1t2')", &sqltypes.Result{}, nil) - tme.expectCancelMigration() + tme.expectCancelStreamMigrations() _, _, err = tme.wr.SwitchWrites(ctx, tme.targetKeyspace, "test", 1*time.Second, true, false, false, false, false) if err != nil { diff --git a/go/vt/wrangler/switcher.go b/go/vt/wrangler/switcher.go index 0e1f33b90ea..c9e22f4669e 100644 --- a/go/vt/wrangler/switcher.go +++ b/go/vt/wrangler/switcher.go @@ -109,7 +109,7 @@ func (r *switcher) stopSourceWrites(ctx context.Context) error { } func (r *switcher) stopStreams(ctx context.Context, sm *workflow.StreamMigrator) ([]string, error) { - return sm.StopStreams(ctx) + return sm.LegacyStopStreams(ctx) } func (r *switcher) cancelMigration(ctx context.Context, sm *workflow.StreamMigrator) { diff --git a/go/vt/wrangler/switcher_dry_run.go b/go/vt/wrangler/switcher_dry_run.go index 7b21ac65fe0..6c1b48bb5c6 100644 --- a/go/vt/wrangler/switcher_dry_run.go +++ b/go/vt/wrangler/switcher_dry_run.go @@ -24,7 +24,8 @@ import ( "strings" "time" - "vitess.io/vitess/go/maps2" + "golang.org/x/exp/maps" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/vtctl/workflow" @@ -224,7 +225,7 @@ func (dr *switcherDryRun) stopStreams(ctx context.Context, sm *workflow.StreamMi } func (dr *switcherDryRun) cancelMigration(ctx context.Context, sm *workflow.StreamMigrator) { - dr.drLog.Log("Cancel stream migrations as requested") + dr.drLog.Log("Cancel migration as requested") } func (dr *switcherDryRun) lockKeyspace(ctx context.Context, keyspace, _ string) (context.Context, func(*error), error) { @@ -399,7 +400,7 @@ func (dr *switcherDryRun) resetSequences(ctx context.Context) error { } func (dr *switcherDryRun) initializeTargetSequences(ctx context.Context, sequencesByBackingTable map[string]*sequenceMetadata) error { - sortedBackingTableNames := maps2.Keys(sequencesByBackingTable) + sortedBackingTableNames := maps.Keys(sequencesByBackingTable) slices.Sort(sortedBackingTableNames) dr.drLog.Log(fmt.Sprintf("The following sequence backing tables used by tables being moved will be initialized: %s", strings.Join(sortedBackingTableNames, ","))) diff --git a/go/vt/wrangler/tablet.go b/go/vt/wrangler/tablet.go index 17c547ade58..fdc6f9a92ac 100644 --- a/go/vt/wrangler/tablet.go +++ b/go/vt/wrangler/tablet.go @@ -196,6 +196,21 @@ func (wr *Wrangler) ExecuteFetchAsDba(ctx context.Context, tabletAlias *topodata return resp.Result, nil } +// ExecuteMultiFetchAsDba executes one or more queries remotely using the DBA pool +func (wr *Wrangler) ExecuteMultiFetchAsDba(ctx context.Context, tabletAlias *topodatapb.TabletAlias, sql string, maxRows int, disableBinlogs bool, reloadSchema bool) ([]*querypb.QueryResult, error) { + resp, err := wr.VtctldServer().ExecuteMultiFetchAsDBA(ctx, &vtctldatapb.ExecuteMultiFetchAsDBARequest{ + TabletAlias: tabletAlias, + Sql: sql, + MaxRows: int64(maxRows), + DisableBinlogs: disableBinlogs, + ReloadSchema: reloadSchema, + }) + if err != nil { + return nil, err + } + return resp.Results, nil +} + // VReplicationExec executes a query remotely using the DBA pool func (wr *Wrangler) VReplicationExec(ctx context.Context, tabletAlias *topodatapb.TabletAlias, query string) (*querypb.QueryResult, error) { ti, err := wr.ts.GetTablet(ctx, tabletAlias) diff --git a/go/vt/wrangler/tablet_test.go b/go/vt/wrangler/tablet_test.go index 1350b6b574c..c5ae032fe07 100644 --- a/go/vt/wrangler/tablet_test.go +++ b/go/vt/wrangler/tablet_test.go @@ -25,6 +25,7 @@ import ( topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" ) // TestInitTabletShardConversion makes sure InitTablet converts the @@ -36,7 +37,7 @@ func TestInitTabletShardConversion(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, nil) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -70,7 +71,7 @@ func TestDeleteTabletBasic(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, nil) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -102,7 +103,7 @@ func TestDeleteTabletTruePrimary(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, nil) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -149,7 +150,7 @@ func TestDeleteTabletFalsePrimary(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, nil) tablet1 := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -201,7 +202,7 @@ func TestDeleteTabletShardNonExisting(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, nil) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ diff --git a/go/vt/wrangler/testdata/show-80dash.json b/go/vt/wrangler/testdata/show-80dash.json new file mode 100644 index 00000000000..f1b14bfccf0 --- /dev/null +++ b/go/vt/wrangler/testdata/show-80dash.json @@ -0,0 +1,71 @@ +{ + "Workflow": "wrWorkflow", + "SourceLocation": { + "Keyspace": "source", + "Shards": [ + "0" + ] + }, + "TargetLocation": { + "Keyspace": "target", + "Shards": [ + "80-" + ] + }, + "MaxVReplicationLag": 0, + "MaxVReplicationTransactionLag": 0, + "Frozen": false, + "ShardStatuses": { + "80-/zone1-0000000210": { + "PrimaryReplicationStatuses": [ + { + "Shard": "80-", + "Tablet": "zone1-0000000210", + "ID": 1, + "Bls": { + "keyspace": "source", + "shard": "0", + "filter": { + "rules": [ + { + "match": "t1" + }, + { + "match": "t2" + } + ] + } + }, + "Pos": "14b68925-696a-11ea-aee7-fec597a91f5e:1-3", + "StopPos": "", + "State": "Copying", + "DBName": "vt_target", + "TransactionTimestamp": 0, + "TimeUpdated": 1234, + "TimeHeartbeat": 1234, + "TimeThrottled": 0, + "ComponentThrottled": "", + "Message": "", + "Tags": "", + "WorkflowType": "Materialize", + "WorkflowSubType": "None", + "CopyState": [ + { + "Table": "t1", + "LastPK": "pk1" + }, + { + "Table": "t2", + "LastPK": "pk2" + } + ], + "RowsCopied": 1000 + } + ], + "TabletControls": null, + "PrimaryIsServing": true + } + }, + "SourceTimeZone": "", + "TargetTimeZone": "" +} \ No newline at end of file diff --git a/go/vt/wrangler/testdata/show-all-shards.json b/go/vt/wrangler/testdata/show-all-shards.json new file mode 100644 index 00000000000..4868c164a72 --- /dev/null +++ b/go/vt/wrangler/testdata/show-all-shards.json @@ -0,0 +1,121 @@ +{ + "Workflow": "wrWorkflow", + "SourceLocation": { + "Keyspace": "source", + "Shards": [ + "0" + ] + }, + "TargetLocation": { + "Keyspace": "target", + "Shards": [ + "-80", + "80-" + ] + }, + "MaxVReplicationLag": 0, + "MaxVReplicationTransactionLag": 0, + "Frozen": false, + "ShardStatuses": { + "-80/zone1-0000000200": { + "PrimaryReplicationStatuses": [ + { + "Shard": "-80", + "Tablet": "zone1-0000000200", + "ID": 1, + "Bls": { + "keyspace": "source", + "shard": "0", + "filter": { + "rules": [ + { + "match": "t1" + }, + { + "match": "t2" + } + ] + } + }, + "Pos": "14b68925-696a-11ea-aee7-fec597a91f5e:1-3", + "StopPos": "", + "State": "Copying", + "DBName": "vt_target", + "TransactionTimestamp": 0, + "TimeUpdated": 1234, + "TimeHeartbeat": 1234, + "TimeThrottled": 0, + "ComponentThrottled": "", + "Message": "", + "Tags": "", + "WorkflowType": "Materialize", + "WorkflowSubType": "None", + "CopyState": [ + { + "Table": "t1", + "LastPK": "pk1" + }, + { + "Table": "t2", + "LastPK": "pk2" + } + ], + "RowsCopied": 1000 + } + ], + "TabletControls": null, + "PrimaryIsServing": true + }, + "80-/zone1-0000000210": { + "PrimaryReplicationStatuses": [ + { + "Shard": "80-", + "Tablet": "zone1-0000000210", + "ID": 1, + "Bls": { + "keyspace": "source", + "shard": "0", + "filter": { + "rules": [ + { + "match": "t1" + }, + { + "match": "t2" + } + ] + } + }, + "Pos": "14b68925-696a-11ea-aee7-fec597a91f5e:1-3", + "StopPos": "", + "State": "Copying", + "DBName": "vt_target", + "TransactionTimestamp": 0, + "TimeUpdated": 1234, + "TimeHeartbeat": 1234, + "TimeThrottled": 0, + "ComponentThrottled": "", + "Message": "", + "Tags": "", + "WorkflowType": "Materialize", + "WorkflowSubType": "None", + "CopyState": [ + { + "Table": "t1", + "LastPK": "pk1" + }, + { + "Table": "t2", + "LastPK": "pk2" + } + ], + "RowsCopied": 1000 + } + ], + "TabletControls": null, + "PrimaryIsServing": true + } + }, + "SourceTimeZone": "", + "TargetTimeZone": "" +} \ No newline at end of file diff --git a/go/vt/wrangler/testdata/show-dash80.json b/go/vt/wrangler/testdata/show-dash80.json new file mode 100644 index 00000000000..7e810278e36 --- /dev/null +++ b/go/vt/wrangler/testdata/show-dash80.json @@ -0,0 +1,71 @@ +{ + "Workflow": "wrWorkflow", + "SourceLocation": { + "Keyspace": "source", + "Shards": [ + "0" + ] + }, + "TargetLocation": { + "Keyspace": "target", + "Shards": [ + "-80" + ] + }, + "MaxVReplicationLag": 0, + "MaxVReplicationTransactionLag": 0, + "Frozen": false, + "ShardStatuses": { + "-80/zone1-0000000200": { + "PrimaryReplicationStatuses": [ + { + "Shard": "-80", + "Tablet": "zone1-0000000200", + "ID": 1, + "Bls": { + "keyspace": "source", + "shard": "0", + "filter": { + "rules": [ + { + "match": "t1" + }, + { + "match": "t2" + } + ] + } + }, + "Pos": "14b68925-696a-11ea-aee7-fec597a91f5e:1-3", + "StopPos": "", + "State": "Copying", + "DBName": "vt_target", + "TransactionTimestamp": 0, + "TimeUpdated": 1234, + "TimeHeartbeat": 1234, + "TimeThrottled": 0, + "ComponentThrottled": "", + "Message": "", + "Tags": "", + "WorkflowType": "Materialize", + "WorkflowSubType": "None", + "CopyState": [ + { + "Table": "t1", + "LastPK": "pk1" + }, + { + "Table": "t2", + "LastPK": "pk2" + } + ], + "RowsCopied": 1000 + } + ], + "TabletControls": null, + "PrimaryIsServing": true + } + }, + "SourceTimeZone": "", + "TargetTimeZone": "" +} \ No newline at end of file diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 787e4ce1946..5e73d266705 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -27,13 +27,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/replication" - - "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" @@ -41,12 +40,15 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) +const mysqlShutdownTimeout = 1 * time.Minute + type compressionDetails struct { CompressionEngineName string ExternalCompressorCmd string @@ -90,8 +92,8 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Set up mock query results. @@ -179,24 +181,22 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { } sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // This first set of STOP and START commands come from // the builtinBackupEngine implementation which stops the replication // while taking the backup - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "START REPLICA", // These commands come from SetReplicationSource RPC called // to set the correct primary and semi-sync after Backup has concluded. // Since the primary hasn't changed, we only restart replication after fixing semi-sync. - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "START REPLICA", } sourceTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ - "SHOW DATABASES": {}, - "RESET MASTER": {}, - "SET GLOBAL gtid_purged": {}, + "SHOW DATABASES": {}, } sourceTablet.StartActionLoop(t, wr) defer sourceTablet.StopActionLoop(t) @@ -232,20 +232,21 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { } destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "RESET SLAVE ALL", - "FAKE SET SLAVE POSITION", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "STOP REPLICA", + "FAKE RESET REPLICA ALL", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ - "SHOW DATABASES": {}, - "RESET MASTER": {}, - "SET GLOBAL gtid_purged": {}, + "SHOW DATABASES": {}, } destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) @@ -263,7 +264,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { RelayLogInfoPath: path.Join(root, "relay-log.info"), } - err = destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* backupTime */, time.Time{} /* restoreToTimestamp */, "") + err = destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* backupTime */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout) if err != nil { return err } @@ -287,22 +288,23 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { } primary.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ - "SHOW DATABASES": {}, - "RESET MASTER": {}, - "SET GLOBAL gtid_purged": {}, + "SHOW DATABASES": {}, } primary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "RESET SLAVE ALL", - "FAKE SET SLAVE POSITION", - "FAKE SET MASTER", - "START SLAVE", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "STOP REPLICA", + "FAKE RESET REPLICA ALL", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "FAKE SET SOURCE", + "START REPLICA", } primary.FakeMysqlDaemon.SetReplicationPositionPos = primary.FakeMysqlDaemon.CurrentPrimaryPosition // restore primary from latest backup - require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, ""), + require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout), "RestoreData failed") // tablet was created as PRIMARY, so it's baseTabletType is PRIMARY assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type) @@ -318,7 +320,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { } // Test restore with the backup timestamp - require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, backupTime, time.Time{} /* restoreToTimestamp */, ""), + require.NoError(t, primary.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, backupTime, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout), "RestoreData with backup timestamp failed") assert.Equal(t, topodatapb.TabletType_PRIMARY, primary.Tablet.Type) assert.False(t, primary.FakeMysqlDaemon.Replicating) @@ -342,8 +344,8 @@ func TestBackupRestoreLagged(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Set up mock query results. @@ -417,19 +419,19 @@ func TestBackupRestoreLagged(t *testing.T) { sourceTablet.FakeMysqlDaemon.SetReplicationSourceInputs = []string{fmt.Sprintf("%s:%d", primary.Tablet.MysqlHostname, primary.Tablet.MysqlPort)} sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // This first set of STOP and START commands come from // the builtinBackupEngine implementation which stops the replication // while taking the backup - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "START REPLICA", // These commands come from SetReplicationSource RPC called // to set the correct primary and semi-sync after Backup has concluded. // Since the primary hasn't changed, we only restart replication after fixing semi-sync. - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "START REPLICA", } sourceTablet.StartActionLoop(t, wr) defer sourceTablet.StopActionLoop(t) @@ -486,20 +488,21 @@ func TestBackupRestoreLagged(t *testing.T) { } destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "RESET SLAVE ALL", - "FAKE SET SLAVE POSITION", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "STOP REPLICA", + "FAKE RESET REPLICA ALL", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ - "SHOW DATABASES": {}, - "RESET MASTER": {}, - "SET GLOBAL gtid_purged": {}, + "SHOW DATABASES": {}, } destTablet.FakeMysqlDaemon.SetReplicationPositionPos = destTablet.FakeMysqlDaemon.CurrentPrimaryPosition destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) @@ -519,7 +522,7 @@ func TestBackupRestoreLagged(t *testing.T) { errCh = make(chan error, 1) go func(ctx context.Context, tablet *FakeTablet) { - errCh <- tablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "") + errCh <- tablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout) }(ctx, destTablet) timer = time.NewTicker(1 * time.Second) @@ -561,8 +564,8 @@ func TestRestoreUnreachablePrimary(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Set up mock query results. @@ -635,19 +638,19 @@ func TestRestoreUnreachablePrimary(t *testing.T) { sourceTablet.FakeMysqlDaemon.SetReplicationSourceInputs = []string{fmt.Sprintf("%s:%d", primary.Tablet.MysqlHostname, primary.Tablet.MysqlPort)} sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // This first set of STOP and START commands come from // the builtinBackupEngine implementation which stops the replication // while taking the backup - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "START REPLICA", // These commands come from SetReplicationSource RPC called // to set the correct primary and semi-sync after Backup has concluded. // Since the primary hasn't changed, we only restart replication after fixing semi-sync. - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "START REPLICA", } sourceTablet.StartActionLoop(t, wr) defer sourceTablet.StopActionLoop(t) @@ -676,20 +679,21 @@ func TestRestoreUnreachablePrimary(t *testing.T) { } destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "RESET SLAVE ALL", - "FAKE SET SLAVE POSITION", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "STOP REPLICA", + "FAKE RESET REPLICA ALL", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ - "SHOW DATABASES": {}, - "RESET MASTER": {}, - "SET GLOBAL gtid_purged": {}, + "SHOW DATABASES": {}, } destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) @@ -713,7 +717,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) { // set a short timeout so that we don't have to wait 30 seconds topo.RemoteOperationTimeout = 2 * time.Second // Restore should still succeed - require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "")) + require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout)) // verify the full status require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed") assert.True(t, destTablet.FakeMysqlDaemon.Replicating) @@ -736,8 +740,8 @@ func TestDisableActiveReparents(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Set up mock query results. @@ -809,7 +813,7 @@ func TestDisableActiveReparents(t *testing.T) { }, } sourceTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", + "STOP REPLICA", } sourceTablet.StartActionLoop(t, wr) defer sourceTablet.StopActionLoop(t) @@ -842,14 +846,15 @@ func TestDisableActiveReparents(t *testing.T) { }, } destTablet.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "RESET SLAVE ALL", - "FAKE SET SLAVE POSITION", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", + "STOP REPLICA", + "FAKE RESET REPLICA ALL", + "FAKE RESET BINARY LOGS AND GTIDS", + "FAKE SET GLOBAL gtid_purged", } destTablet.FakeMysqlDaemon.FetchSuperQueryMap = map[string]*sqltypes.Result{ - "SHOW DATABASES": {}, - "RESET MASTER": {}, - "SET GLOBAL gtid_purged": {}, + "SHOW DATABASES": {}, } destTablet.FakeMysqlDaemon.SetReplicationPositionPos = sourceTablet.FakeMysqlDaemon.CurrentPrimaryPosition destTablet.FakeMysqlDaemon.SetReplicationSourceInputs = append(destTablet.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) @@ -867,7 +872,7 @@ func TestDisableActiveReparents(t *testing.T) { RelayLogInfoPath: path.Join(root, "relay-log.info"), } - require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "")) + require.NoError(t, destTablet.TM.RestoreData(ctx, logutil.NewConsoleLogger(), 0 /* waitForBackupInterval */, false /* deleteBeforeRestore */, time.Time{} /* restoreFromBackupTs */, time.Time{} /* restoreToTimestamp */, "", mysqlShutdownTimeout)) // verify the full status require.NoError(t, destTablet.FakeMysqlDaemon.CheckSuperQueryList(), "destTablet.FakeMysqlDaemon.CheckSuperQueryList failed") assert.False(t, destTablet.FakeMysqlDaemon.Replicating) @@ -889,9 +894,9 @@ func needInnoDBRedoLogSubdir() (needIt bool, err error) { return needIt, err } versionStr := fmt.Sprintf("%d.%d.%d", sv.Major, sv.Minor, sv.Patch) - _, capableOf, _ := mysql.GetFlavor(versionStr, nil) + capableOf := mysql.ServerVersionCapableOf(versionStr) if capableOf == nil { return needIt, fmt.Errorf("cannot determine database flavor details for version %s", versionStr) } - return capableOf(mysql.DynamicRedoLogCapacityFlavorCapability) + return capableOf(capabilities.DynamicRedoLogCapacityFlavorCapability) } diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go index 866ec2fe931..d91fdad76eb 100644 --- a/go/vt/wrangler/testlib/copy_schema_shard_test.go +++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go @@ -22,14 +22,14 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -56,8 +56,8 @@ func copySchema(t *testing.T, useShardAsSource bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{}); err != nil { @@ -75,9 +75,9 @@ func copySchema(t *testing.T, useShardAsSource bool) { topodatapb.TabletType_RDONLY, sourceRdonlyDb, TabletKeyspaceShard(t, "ks", "-80")) sourceRdonly.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } sourceRdonly.FakeMysqlDaemon.SetReplicationSourceInputs = append(sourceRdonly.FakeMysqlDaemon.SetReplicationSourceInputs, fmt.Sprintf("%v:%v", sourcePrimary.Tablet.MysqlHostname, sourcePrimary.Tablet.MysqlPort)) diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 99cc1839186..96f9df74405 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -25,15 +25,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -50,8 +51,8 @@ func TestEmergencyReparentShard(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -94,7 +95,7 @@ func TestEmergencyReparentShard(t *testing.T) { } newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition) newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE IO_THREAD", + "STOP REPLICA IO_THREAD", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES", } newPrimary.FakeMysqlDaemon.PromoteResult = replication.Position{ @@ -114,7 +115,7 @@ func TestEmergencyReparentShard(t *testing.T) { oldPrimary.FakeMysqlDaemon.ReplicationStatusError = mysql.ErrNotReplica oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", + "STOP REPLICA", } oldPrimary.StartActionLoop(t, wr) defer oldPrimary.StopActionLoop(t) @@ -139,13 +140,13 @@ func TestEmergencyReparentShard(t *testing.T) { goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE IO_THREAD", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA IO_THREAD", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) defer goodReplica1.StopActionLoop(t) @@ -170,10 +171,10 @@ func TestEmergencyReparentShard(t *testing.T) { goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "FAKE SET MASTER", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE SET SOURCE", } goodReplica2.StartActionLoop(t, wr) defer goodReplica2.StopActionLoop(t) @@ -204,7 +205,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create a primary, a couple good replicas oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -232,10 +233,10 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, newPrimary.FakeMysqlDaemon.CurrentSourceFilePosition) newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(newPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(moreAdvancedReplica.Tablet)) newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE IO_THREAD", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA IO_THREAD", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES", } newPrimary.StartActionLoop(t, wr) @@ -267,19 +268,25 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { newPrimary.FakeMysqlDaemon.WaitPrimaryPositions = append(newPrimary.FakeMysqlDaemon.WaitPrimaryPositions, moreAdvancedReplica.FakeMysqlDaemon.CurrentPrimaryPosition) moreAdvancedReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE IO_THREAD", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA IO_THREAD", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } moreAdvancedReplica.StartActionLoop(t, wr) defer moreAdvancedReplica.StopActionLoop(t) // run EmergencyReparentShard - err := wr.EmergencyReparentShard(ctx, newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard, newPrimary.Tablet.Alias, 10*time.Second, sets.New[string](), false, false) + err := wr.EmergencyReparentShard(ctx, newPrimary.Tablet.Keyspace, newPrimary.Tablet.Shard, reparentutil.EmergencyReparentOptions{ + NewPrimaryAlias: newPrimary.Tablet.Alias, + WaitAllTablets: false, + WaitReplicasTimeout: 10 * time.Second, + IgnoreReplicas: sets.New[string](), + PreventCrossCellPromotion: false, + }) cancel() assert.NoError(t, err) diff --git a/go/vt/wrangler/testlib/external_reparent_test.go b/go/vt/wrangler/testlib/external_reparent_test.go index c0152de3cf3..556debae64a 100644 --- a/go/vt/wrangler/testlib/external_reparent_test.go +++ b/go/vt/wrangler/testlib/external_reparent_test.go @@ -22,14 +22,14 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/discovery" - "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -50,8 +50,8 @@ func TestTabletExternallyReparentedBasic(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create an old primary, a new primary, two good replicas, one bad replica @@ -91,7 +91,7 @@ func TestTabletExternallyReparentedBasic(t *testing.T) { oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", + "FAKE SET SOURCE", "START Replica", } @@ -143,7 +143,7 @@ func TestTabletExternallyReparentedToReplica(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -171,7 +171,7 @@ func TestTabletExternallyReparentedToReplica(t *testing.T) { // primary is still good to go. oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", + "FAKE SET SOURCE", "START Replica", } @@ -226,7 +226,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -250,7 +250,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", + "FAKE SET SOURCE", "START Replica", } // On the old primary, we will only respond to @@ -263,9 +263,9 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica.StartActionLoop(t, wr) defer goodReplica.StopActionLoop(t) @@ -319,7 +319,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -339,7 +339,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) { oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", + "FAKE SET SOURCE", "START Replica", } // On the old primary, we will only respond to @@ -352,9 +352,9 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) { goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica.StartActionLoop(t, wr) defer goodReplica.StopActionLoop(t) @@ -405,7 +405,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, a new primary, and a good replica. oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -424,7 +424,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", + "FAKE SET SOURCE", "START Replica", } // On the old primary, we will only respond to @@ -437,9 +437,9 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { // TabletActionReplicaWasRestarted. goodReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica.StartActionLoop(t, wr) defer goodReplica.StopActionLoop(t) @@ -509,7 +509,7 @@ func TestRPCTabletExternallyReparentedDemotesPrimaryToConfiguredTabletType(t *te ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary and a new primary oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_SPARE, nil) diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index a1b30813f53..97d74edf3f7 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/grpctmserver" "vitess.io/vitess/go/vt/vttablet/tabletconntest" "vitess.io/vitess/go/vt/vttablet/tabletmanager" @@ -209,8 +210,9 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { DBConfigs: &dbconfigs.DBConfigs{}, QueryServiceControl: tabletservermock.NewController(), VREngine: vreplication.NewTestEngine(wr.TopoServer(), ft.Tablet.Alias.Cell, ft.FakeMysqlDaemon, binlogplayer.NewFakeDBClient, binlogplayer.NewFakeDBClient, topoproto.TabletDbName(ft.Tablet), nil), + Env: vtenv.NewTestEnv(), } - if err := ft.TM.Start(ft.Tablet, 0); err != nil { + if err := ft.TM.Start(ft.Tablet, nil); err != nil { t.Fatalf("Error in tablet - %v, err - %v", topoproto.TabletAliasString(ft.Tablet.Alias), err.Error()) } ft.Tablet = ft.TM.Tablet() diff --git a/go/vt/wrangler/testlib/find_tablet_test.go b/go/vt/wrangler/testlib/find_tablet_test.go index 5b6f26f7056..069eb913ddf 100644 --- a/go/vt/wrangler/testlib/find_tablet_test.go +++ b/go/vt/wrangler/testlib/find_tablet_test.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -36,7 +37,7 @@ func TestFindTablet(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // Create an old primary, two good replicas oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) diff --git a/go/vt/wrangler/testlib/permissions_test.go b/go/vt/wrangler/testlib/permissions_test.go index 4a0e71512f3..a1b14350b7f 100644 --- a/go/vt/wrangler/testlib/permissions_test.go +++ b/go/vt/wrangler/testlib/permissions_test.go @@ -24,12 +24,13 @@ import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" querypb "vitess.io/vitess/go/vt/proto/query" @@ -47,8 +48,8 @@ func TestPermissions(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() primary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -565,9 +566,9 @@ func TestPermissions(t *testing.T) { replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } replica.StartActionLoop(t, wr) defer replica.StopActionLoop(t) diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index 0125e69cac0..28ffd34b756 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -24,6 +24,7 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/vtenv" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -51,8 +52,8 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -83,9 +84,9 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { }, } newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES", } newPrimary.StartActionLoop(t, wr) @@ -98,13 +99,13 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0] oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", // We might end up calling SetReplicationSource twice on the old primary // one coming from `PlannedReparentShard` and one coming from `endPrimaryTerm`. // This is a race though between SetReplicationSource on this tablet and `PromoteReplica` on the new primary. - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", } oldPrimary.StartActionLoop(t, wr) defer oldPrimary.StopActionLoop(t) @@ -119,12 +120,12 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) defer goodReplica1.StopActionLoop(t) @@ -167,8 +168,8 @@ func TestPlannedReparentShardNoError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -200,9 +201,9 @@ func TestPlannedReparentShardNoError(t *testing.T) { }, } newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES", } newPrimary.StartActionLoop(t, wr) @@ -215,13 +216,13 @@ func TestPlannedReparentShardNoError(t *testing.T) { oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0] oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", // We might end up calling SetReplicationSource twice on the old primary // one coming from `PlannedReparentShard` and one coming from `endPrimaryTerm`. // This is a race though between SetReplicationSource on this tablet and `PromoteReplica` on the new primary. - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", } oldPrimary.StartActionLoop(t, wr) defer oldPrimary.StopActionLoop(t) @@ -236,12 +237,12 @@ func TestPlannedReparentShardNoError(t *testing.T) { goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) defer goodReplica1.StopActionLoop(t) @@ -251,10 +252,10 @@ func TestPlannedReparentShardNoError(t *testing.T) { goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "FAKE SET MASTER", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE SET SOURCE", } goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.Replicating = false @@ -303,8 +304,8 @@ func TestPlannedReparentInitialization(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a few replicas. @@ -337,9 +338,9 @@ func TestPlannedReparentInitialization(t *testing.T) { goodReplica1.FakeMysqlDaemon.Replicating = true goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) defer goodReplica1.StopActionLoop(t) @@ -350,7 +351,7 @@ func TestPlannedReparentInitialization(t *testing.T) { goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", + "FAKE SET SOURCE", } defer goodReplica2.StopActionLoop(t) @@ -389,8 +390,8 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -421,9 +422,9 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { }, } newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES", } newPrimary.StartActionLoop(t, wr) @@ -436,8 +437,8 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.PromoteResult oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", } oldPrimary.StartActionLoop(t, wr) defer oldPrimary.StopActionLoop(t) @@ -451,12 +452,12 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) defer goodReplica1.StopActionLoop(t) @@ -466,10 +467,10 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "FAKE SET MASTER", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE SET SOURCE", } goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.Replicating = false @@ -497,8 +498,8 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -530,9 +531,9 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { }, } newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES", } newPrimary.StartActionLoop(t, wr) @@ -544,8 +545,8 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0] oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", } oldPrimary.StartActionLoop(t, wr) defer oldPrimary.StopActionLoop(t) @@ -559,12 +560,12 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) defer goodReplica1.StopActionLoop(t) @@ -574,10 +575,10 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "FAKE SET MASTER", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE SET SOURCE", } goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.Replicating = false @@ -603,8 +604,8 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -637,17 +638,17 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // simulate error that will trigger a call to RestartReplication - "STOP SLAVE", - "RESET SLAVE", - "START SLAVE", - "START SLAVE", + "STOP REPLICA", + "RESET REPLICA", + "START REPLICA", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) - goodReplica1.FakeMysqlDaemon.StopReplicationError = errors.New("Slave failed to initialize relay log info structure from the repository") + goodReplica1.FakeMysqlDaemon.StopReplicationError = errors.New("Replica failed to initialize relay log info structure from the repository") defer goodReplica1.StopActionLoop(t) // run PlannedReparentShard @@ -683,8 +684,8 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -722,19 +723,19 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // simulate error that will trigger a call to RestartReplication // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // In SetReplicationSource, we find that the source host and port was already set correctly, - // So we try to stop and start replication. The first STOP SLAVE comes from there - "STOP SLAVE", - // During the START SLAVE call, we find a relay log error, so we try to restart replication. - "STOP SLAVE", - "RESET SLAVE", - "START SLAVE", + // So we try to stop and start replication. The first STOP REPLICA comes from there + "STOP REPLICA", + // During the START REPLICA call, we find a relay log error, so we try to restart replication. + "STOP REPLICA", + "RESET REPLICA", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) - goodReplica1.FakeMysqlDaemon.StartReplicationError = errors.New("Slave failed to initialize relay log info structure from the repository") + goodReplica1.FakeMysqlDaemon.StartReplicationError = errors.New("Replica failed to initialize relay log info structure from the repository") defer goodReplica1.StopActionLoop(t) // run PlannedReparentShard @@ -768,8 +769,8 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -802,9 +803,9 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { }, } newPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", "SUBINSERT INTO _vt.reparent_journal (time_created_ns, action_name, primary_alias, replication_position) VALUES", } newPrimary.StartActionLoop(t, wr) @@ -817,14 +818,14 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { oldPrimary.FakeMysqlDaemon.CurrentPrimaryPosition = newPrimary.FakeMysqlDaemon.WaitPrimaryPositions[0] oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs = append(oldPrimary.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet)) oldPrimary.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", // We call a SetReplicationSource explicitly - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", // extra SetReplicationSource call due to retry - "FAKE SET MASTER", - "START SLAVE", + "FAKE SET SOURCE", + "START REPLICA", } oldPrimary.StartActionLoop(t, wr) defer oldPrimary.StopActionLoop(t) @@ -838,15 +839,15 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // extra SetReplicationSource call due to retry - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) defer goodReplica1.StopActionLoop(t) @@ -856,10 +857,10 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(newPrimary.Tablet), topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "FAKE SET MASTER", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE SET SOURCE", } goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.Replicating = false @@ -878,7 +879,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { // After the first call to PRS has failed, we don't know whether `SetReplicationSource` RPC has succeeded on the oldPrimary or not. // This causes the test to become non-deterministic. To prevent this, we call `SetReplicationSource` on the oldPrimary again, and make sure it has succeeded. // We also wait until the oldPrimary has demoted itself to a replica type. - err = wr.TabletManagerClient().SetReplicationSource(context.Background(), oldPrimary.Tablet, newPrimary.Tablet.Alias, 0, "", false, false) + err = wr.TabletManagerClient().SetReplicationSource(context.Background(), oldPrimary.Tablet, newPrimary.Tablet.Alias, 0, "", false, false, 0) require.NoError(t, err) waitForTabletType(t, wr, oldPrimary.Tablet.Alias, topodatapb.TabletType_REPLICA) @@ -908,8 +909,8 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas @@ -943,11 +944,11 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica1.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica1.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA", + "START REPLICA", } goodReplica1.StartActionLoop(t, wr) defer goodReplica1.StopActionLoop(t) @@ -957,10 +958,10 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs = append(goodReplica2.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(oldPrimary.Tablet)) goodReplica2.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "FAKE SET MASTER", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "FAKE SET SOURCE", } goodReplica2.StartActionLoop(t, wr) goodReplica2.FakeMysqlDaemon.Replicating = false diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index 0d1d84e89f5..e0a2077c778 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -25,16 +25,14 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/replication" - - "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" - "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/vtctl/reparentutil" - "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -51,7 +49,7 @@ func TestShardReplicationStatuses(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // create shard and tablets if _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0"); err != nil { @@ -96,9 +94,9 @@ func TestShardReplicationStatuses(t *testing.T) { replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } replica.StartActionLoop(t, wr) defer replica.StopActionLoop(t) @@ -135,7 +133,7 @@ func TestReparentTablet(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // create shard and tablets if _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0"); err != nil { @@ -166,11 +164,11 @@ func TestReparentTablet(t *testing.T) { replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", - "STOP SLAVE", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", + "STOP REPLICA", + "START REPLICA", } replica.StartActionLoop(t, wr) defer replica.StopActionLoop(t) @@ -192,7 +190,7 @@ func TestSetReplicationSource(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) // create shard and tablets _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0") @@ -224,14 +222,14 @@ func TestSetReplicationSource(t *testing.T) { replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // We stop and reset the replication parameters because of relay log issues. - "STOP SLAVE", - "STOP SLAVE", - "RESET SLAVE", - "START SLAVE", + "STOP REPLICA", + "STOP REPLICA", + "RESET REPLICA", + "START REPLICA", } replica.StartActionLoop(t, wr) defer replica.StopActionLoop(t) @@ -257,9 +255,9 @@ func TestSetReplicationSource(t *testing.T) { replica.FakeMysqlDaemon.SetReplicationSourceInputs = append(replica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(primary.Tablet)) replica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", // For the SetReplicationSource call, we shouldn't get any queries at all! } replica.StartActionLoop(t, wr) diff --git a/go/vt/wrangler/testlib/shard_test.go b/go/vt/wrangler/testlib/shard_test.go index a0b1b0a3562..7528a220d1f 100644 --- a/go/vt/wrangler/testlib/shard_test.go +++ b/go/vt/wrangler/testlib/shard_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -35,8 +36,8 @@ func TestDeleteShardCleanup(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // Create a primary, a couple good replicas diff --git a/go/vt/wrangler/testlib/version_test.go b/go/vt/wrangler/testlib/version_test.go index 102bcdfe6e5..cf5f3fd1487 100644 --- a/go/vt/wrangler/testlib/version_test.go +++ b/go/vt/wrangler/testlib/version_test.go @@ -27,10 +27,11 @@ import ( "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtenv" + "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/memorytopo" - "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -70,8 +71,8 @@ func TestVersion(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) - vp := NewVtctlPipe(t, ts) + wr := wrangler.New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() // couple tablets is enough @@ -93,9 +94,9 @@ func TestVersion(t *testing.T) { sourceReplica.FakeMysqlDaemon.SetReplicationSourceInputs = append(sourceReplica.FakeMysqlDaemon.SetReplicationSourceInputs, topoproto.MysqlAddr(sourcePrimary.Tablet)) sourceReplica.FakeMysqlDaemon.ExpectedExecuteSuperQueryList = []string{ // These 3 statements come from tablet startup - "STOP SLAVE", - "FAKE SET MASTER", - "START SLAVE", + "STOP REPLICA", + "FAKE SET SOURCE", + "START REPLICA", } sourceReplica.StartActionLoop(t, wr) sourceReplica.HTTPServer.Handler.(*http.ServeMux).HandleFunc("/debug/vars", expvarHandler(&sourceReplicaGitRev)) diff --git a/go/vt/wrangler/testlib/vtctl_pipe.go b/go/vt/wrangler/testlib/vtctl_pipe.go index 38c535f005f..594290e4023 100644 --- a/go/vt/wrangler/testlib/vtctl_pipe.go +++ b/go/vt/wrangler/testlib/vtctl_pipe.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/grpcvtctlserver" "vitess.io/vitess/go/vt/vtctl/vtctlclient" + "vitess.io/vitess/go/vt/vtenv" // we need to import the grpcvtctlclient library so the gRPC // vtctl client is registered and can be used. @@ -52,7 +53,7 @@ type VtctlPipe struct { } // NewVtctlPipe creates a new VtctlPipe based on the given topo server. -func NewVtctlPipe(t *testing.T, ts *topo.Server) *VtctlPipe { +func NewVtctlPipe(ctx context.Context, t *testing.T, ts *topo.Server) *VtctlPipe { // Register all vtctl commands servenvInitialized.Do(func() { // make sure we use the right protocol @@ -76,11 +77,11 @@ func NewVtctlPipe(t *testing.T, ts *topo.Server) *VtctlPipe { // Create a gRPC server and listen on the port server := grpc.NewServer() - grpcvtctlserver.StartServer(server, ts) + grpcvtctlserver.StartServer(server, vtenv.NewTestEnv(), ts) go server.Serve(listener) // Create a VtctlClient gRPC client to talk to the fake server - client, err := vtctlclient.New(listener.Addr().String()) + client, err := vtctlclient.New(ctx, listener.Addr().String()) if err != nil { t.Fatalf("Cannot create client: %v", err) } @@ -138,7 +139,7 @@ func (vp *VtctlPipe) run(args []string, outputFunc func(string)) error { } // RunAndStreamOutput returns the output of the vtctl command as a channel. -// When the channcel is closed, the command did finish. +// When the channel is closed, the command did finish. func (vp *VtctlPipe) RunAndStreamOutput(args []string) (logutil.EventStream, error) { actionTimeout := 30 * time.Second ctx := context.Background() diff --git a/go/vt/wrangler/testlib/vtctl_topo_test.go b/go/vt/wrangler/testlib/vtctl_topo_test.go index a13535f4111..325d629c1ff 100644 --- a/go/vt/wrangler/testlib/vtctl_topo_test.go +++ b/go/vt/wrangler/testlib/vtctl_topo_test.go @@ -62,7 +62,7 @@ func TestVtctlTopoCommands(t *testing.T) { if err := ts.CreateKeyspace(context.Background(), "ks2", &topodatapb.Keyspace{KeyspaceType: topodatapb.KeyspaceType_SNAPSHOT}); err != nil { t.Fatalf("CreateKeyspace() failed: %v", err) } - vp := NewVtctlPipe(t, ts) + vp := NewVtctlPipe(ctx, t, ts) defer vp.Close() tmp := t.TempDir() diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index 654a5bd1588..fb76b8e8f21 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -26,10 +26,10 @@ import ( "sync" "time" + "golang.org/x/exp/maps" "golang.org/x/sync/errgroup" "vitess.io/vitess/go/json2" - "vitess.io/vitess/go/maps2" "vitess.io/vitess/go/sqlescape" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -40,6 +40,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" "vitess.io/vitess/go/vt/vtctl/workflow" "vitess.io/vitess/go/vt/vterrors" @@ -49,6 +50,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -221,7 +223,7 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl return nil, nil, err } - ws := workflow.NewServer(wr.ts, wr.tmc) + ws := workflow.NewServer(wr.env, wr.ts, wr.tmc) state := &workflow.State{ Workflow: workflowName, SourceKeyspace: ts.SourceKeyspaceName(), @@ -437,11 +439,8 @@ func (wr *Wrangler) areTabletsAvailableToStreamFrom(ctx context.Context, ts *tra if ts.optCells != "" { cells = strings.Split(ts.optCells, ",") } - // FIXME: currently there is a default setting in the tablet that is used if user does not specify a tablet type, - // we use the value specified in the tablet flag `-vreplication_tablet_type` - // but ideally we should populate the vreplication table with a default value when we setup the workflow if tabletTypes == "" { - tabletTypes = "PRIMARY,REPLICA" + tabletTypes = "in_order:REPLICA,PRIMARY" // default } var wg sync.WaitGroup @@ -487,11 +486,11 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa ts, ws, err := wr.getWorkflowState(ctx, targetKeyspace, workflowName) _ = ws if err != nil { - handleError("failed to get the current workflow state", err) + return handleError("failed to get the current workflow state", err) } if ts == nil { errorMsg := fmt.Sprintf("workflow %s not found in keyspace %s", workflowName, targetKeyspace) - handleError("failed to get the current workflow state", fmt.Errorf(errorMsg)) + return handleError("failed to get the current workflow state", fmt.Errorf(errorMsg)) } var sw iswitcher @@ -508,7 +507,7 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa ts.Logger().Infof("Built switching metadata: %+v", ts) if err := ts.validate(ctx); err != nil { - handleError("workflow validation failed", err) + return handleError("workflow validation failed", err) } if reverseReplication { @@ -556,7 +555,7 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa } if !journalsExist { ts.Logger().Infof("No previous journals were found. Proceeding normally.") - sm, err := workflow.BuildStreamMigrator(ctx, ts, cancel) + sm, err := workflow.BuildLegacyStreamMigrator(ctx, ts, cancel, wr.env.Parser()) if err != nil { return handleError("failed to migrate the workflow streams", err) } @@ -621,6 +620,20 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa sw.cancelMigration(ctx, sm) return handleError("failed to create the reverse vreplication streams", err) } + + // Initialize any target sequences, if there are any, before allowing new writes. + if initializeTargetSequences && len(sequenceMetadata) > 0 { + ts.Logger().Infof("Initializing target sequences") + // Writes are blocked so we can safely initialize the sequence tables but + // we also want to use a shorter timeout than the parent context. + // We use at most half of the overall timeout. + initSeqCtx, cancel := context.WithTimeout(ctx, timeout/2) + defer cancel() + if err := sw.initializeTargetSequences(initSeqCtx, sequenceMetadata); err != nil { + sw.cancelMigration(ctx, sm) + return handleError(fmt.Sprintf("failed to initialize the sequences used in the %s keyspace", ts.TargetKeyspaceName()), err) + } + } } else { if cancel { return handleError("invalid cancel", fmt.Errorf("traffic switching has reached the point of no return, cannot cancel")) @@ -637,17 +650,6 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa if err := sw.createJournals(ctx, sourceWorkflows); err != nil { return handleError("failed to create the journal", err) } - // Initialize any target sequences, if there are any, before allowing new writes. - if initializeTargetSequences && len(sequenceMetadata) > 0 { - // Writes are blocked so we can safely initialize the sequence tables but - // we also want to use a shorter timeout than the parent context. - // We use up at most half of the overall timeout. - initSeqCtx, cancel := context.WithTimeout(ctx, timeout/2) - defer cancel() - if err := sw.initializeTargetSequences(initSeqCtx, sequenceMetadata); err != nil { - return handleError(fmt.Sprintf("failed to initialize the sequences used in the %s keyspace", ts.TargetKeyspaceName()), err) - } - } if err := sw.allowTargetWrites(ctx); err != nil { return handleError(fmt.Sprintf("failed to allow writes in the %s keyspace", ts.TargetKeyspaceName()), err) } @@ -655,7 +657,7 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa return handleError("failed to update the routing rules", err) } if err := sw.streamMigraterfinalize(ctx, ts, sourceWorkflows); err != nil { - handleError("failed to finalize the traffic switch", err) + return handleError("failed to finalize the traffic switch", err) } if reverseReplication { if err := sw.startReverseVReplication(ctx); err != nil { @@ -859,8 +861,45 @@ func (wr *Wrangler) DropSources(ctx context.Context, targetKeyspace, workflowNam return sw.logs(), nil } +func (wr *Wrangler) getShardSubset(ctx context.Context, keyspace string, shardSubset []string) ([]string, error) { + if wr.WorkflowParams != nil && len(wr.WorkflowParams.ShardSubset) > 0 { + shardSubset = wr.WorkflowParams.ShardSubset + } + allShards, err := wr.ts.GetShardNames(ctx, keyspace) + if err != nil { + return nil, err + } + if len(allShards) == 0 { + return nil, fmt.Errorf("no shards found in keyspace %s", keyspace) + } + + if len(shardSubset) == 0 { + return allShards, nil + } + + existingShards := make(map[string]bool, len(allShards)) + for _, shard := range allShards { + existingShards[shard] = true + } + // Validate that the provided shards are part of the keyspace. + for _, shard := range shardSubset { + _, found := existingShards[shard] + if !found { + return nil, fmt.Errorf("shard %s not found in keyspace %s", shard, keyspace) + } + } + log.Infof("Selecting subset of shards in keyspace %s: %d from %d :: %+v", + keyspace, len(shardSubset), len(allShards), shardSubset) + return shardSubset, nil + +} + func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, workflowName string) (*trafficSwitcher, error) { - tgtInfo, err := workflow.LegacyBuildTargets(ctx, wr.ts, wr.tmc, targetKeyspace, workflowName) + shardSubset, err := wr.getShardSubset(ctx, targetKeyspace, nil) + if err != nil { + return nil, err + } + tgtInfo, err := workflow.LegacyBuildTargets(ctx, wr.ts, wr.tmc, targetKeyspace, workflowName, shardSubset) if err != nil { log.Infof("Error building targets: %s", err) return nil, err @@ -956,7 +995,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo if err != nil { return nil, err } - ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace) + ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace, wr.env.Parser()) if err != nil { return nil, err } @@ -1150,7 +1189,7 @@ func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, // If so, it also returns the list of sourceWorkflows that need to be switched. func (ts *trafficSwitcher) checkJournals(ctx context.Context) (journalsExist bool, sourceWorkflows []string, err error) { var ( - ws = workflow.NewServer(ts.TopoServer(), ts.TabletManagerClient()) + ws = workflow.NewServer(ts.wr.env, ts.TopoServer(), ts.TabletManagerClient()) mu sync.Mutex ) @@ -1300,7 +1339,7 @@ func (ts *trafficSwitcher) cancelMigration(ctx context.Context, sm *workflow.Str ts.Logger().Errorf("Cancel migration failed:", err) } - sm.CancelMigration(ctx) + sm.CancelStreamMigrations(ctx) err = ts.ForAllTargets(func(target *workflow.MigrationTarget) error { query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", encodeString(target.GetPrimary().DbName()), encodeString(ts.WorkflowName())) @@ -1394,8 +1433,8 @@ func (ts *trafficSwitcher) createReverseVReplication(ctx context.Context) error Filter: filter, }) } - log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s", - source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position) + log.Infof("Creating reverse workflow vreplication stream on tablet %s: workflow %s, startPos %s for target %s:%s, uid %d", + source.GetPrimary().Alias, ts.ReverseWorkflowName(), target.Position, ts.TargetKeyspaceName(), target.GetShard().ShardName(), uid) _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, binlogplayer.CreateVReplicationState(ts.ReverseWorkflowName(), reverseBls, target.Position, binlogdatapb.VReplicationWorkflowState_Stopped, source.GetPrimary().DbName(), ts.workflowType, ts.workflowSubType)) @@ -1615,7 +1654,8 @@ func (ts *trafficSwitcher) deleteShardRoutingRules(ctx context.Context) error { func (ts *trafficSwitcher) startReverseVReplication(ctx context.Context) error { return ts.ForAllSources(func(source *workflow.MigrationSource) error { - query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s", encodeString(source.GetPrimary().DbName())) + query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", + encodeString(source.GetPrimary().DbName()), encodeString(ts.ReverseWorkflowName())) _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, query) return err }) @@ -1734,23 +1774,33 @@ func getRenameFileName(tableName string) string { func (ts *trafficSwitcher) removeSourceTables(ctx context.Context, removalType workflow.TableRemovalType) error { err := ts.ForAllSources(func(source *workflow.MigrationSource) error { for _, tableName := range ts.Tables() { - query := fmt.Sprintf("drop table %s.%s", - sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), - sqlescape.EscapeID(sqlescape.UnescapeID(tableName))) + primaryDbName, err := sqlescape.EnsureEscaped(source.GetPrimary().DbName()) + if err != nil { + return err + } + tableNameEscaped, err := sqlescape.EnsureEscaped(tableName) + if err != nil { + return err + } + query := fmt.Sprintf("drop table %s.%s", primaryDbName, tableNameEscaped) if removalType == workflow.DropTable { ts.Logger().Infof("%s: Dropping table %s.%s\n", source.GetPrimary().String(), source.GetPrimary().DbName(), tableName) } else { - renameName := getRenameFileName(tableName) + renameName, err := sqlescape.EnsureEscaped(getRenameFileName(tableName)) + if err != nil { + return err + } ts.Logger().Infof("%s: Renaming table %s.%s to %s.%s\n", source.GetPrimary().String(), source.GetPrimary().DbName(), tableName, source.GetPrimary().DbName(), renameName) - query = fmt.Sprintf("rename table %s.%s TO %s.%s", - sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), - sqlescape.EscapeID(sqlescape.UnescapeID(tableName)), - sqlescape.EscapeID(sqlescape.UnescapeID(source.GetPrimary().DbName())), - sqlescape.EscapeID(sqlescape.UnescapeID(renameName))) + query = fmt.Sprintf("rename table %s.%s TO %s.%s", primaryDbName, tableNameEscaped, primaryDbName, renameName) } - _, err := ts.wr.ExecuteFetchAsDba(ctx, source.GetPrimary().Alias, query, 1, false, true) + _, err = ts.wr.tmc.ExecuteFetchAsDba(ctx, source.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: 1, + ReloadSchema: true, + DisableForeignKeyChecks: true, + }) if err != nil { ts.Logger().Errorf("%s: Error removing table %s: %v", source.GetPrimary().String(), tableName, err) return err @@ -1845,12 +1895,23 @@ func (ts *trafficSwitcher) removeTargetTables(ctx context.Context) error { log.Infof("removeTargetTables") err := ts.ForAllTargets(func(target *workflow.MigrationTarget) error { for _, tableName := range ts.Tables() { - query := fmt.Sprintf("drop table %s.%s", - sqlescape.EscapeID(sqlescape.UnescapeID(target.GetPrimary().DbName())), - sqlescape.EscapeID(sqlescape.UnescapeID(tableName))) + primaryDbName, err := sqlescape.EnsureEscaped(target.GetPrimary().DbName()) + if err != nil { + return err + } + tableName, err := sqlescape.EnsureEscaped(tableName) + if err != nil { + return err + } + query := fmt.Sprintf("drop table %s.%s", primaryDbName, tableName) ts.Logger().Infof("%s: Dropping table %s.%s\n", target.GetPrimary().String(), target.GetPrimary().DbName(), tableName) - _, err := ts.wr.ExecuteFetchAsDba(ctx, target.GetPrimary().Alias, query, 1, false, true) + _, err = ts.wr.tmc.ExecuteFetchAsDba(ctx, target.GetPrimary().Tablet, false, &tabletmanagerdatapb.ExecuteFetchAsDbaRequest{ + Query: []byte(query), + MaxRows: 1, + ReloadSchema: true, + DisableForeignKeyChecks: true, + }) if err != nil { ts.Logger().Errorf("%s: Error removing table %s: %v", target.GetPrimary().String(), tableName, err) @@ -1923,9 +1984,6 @@ func (ts *trafficSwitcher) addParticipatingTablesToKeyspace(ctx context.Context, if err := json2.Unmarshal([]byte(wrap), ks); err != nil { return err } - if err != nil { - return err - } for table, vtab := range ks.Tables { vschema.Tables[table] = vtab } @@ -2073,7 +2131,7 @@ func (ts *trafficSwitcher) getTargetSequenceMetadata(ctx context.Context) (map[s // error if any is seen. func (ts *trafficSwitcher) findSequenceUsageInKeyspace(vschema *vschemapb.Keyspace) (map[string]*sequenceMetadata, bool, error) { allFullyQualified := true - targets := maps2.Values(ts.Targets()) + targets := maps.Values(ts.Targets()) if len(targets) == 0 || targets[0].GetPrimary() == nil { // This should never happen return nil, false, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "no primary tablet found for target keyspace %s", ts.targetKeyspace) } @@ -2143,13 +2201,17 @@ func (ts *trafficSwitcher) initializeTargetSequences(ctx context.Context, sequen ) qr, terr := ts.wr.ExecuteFetchAsApp(ictx, primary.GetAlias(), true, query.Query, 1) if terr != nil || len(qr.Rows) != 1 { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", - ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s on tablet %s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, topoproto.TabletAliasString(primary.Alias), terr) } - maxID, terr := sqltypes.Proto3ToResult(qr).Rows[0][0].ToInt64() - if terr != nil { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s in order to initialize the backing sequence table: %v", - ts.targetKeyspace, sequenceMetadata.usingTableName, terr) + rawVal := sqltypes.Proto3ToResult(qr).Rows[0][0] + maxID := int64(0) + if !rawVal.IsNull() { // If it's NULL then there are no rows and 0 remains the max + maxID, terr = rawVal.ToInt64() + if terr != nil { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to get the max used sequence value for target table %s.%s on tablet %s in order to initialize the backing sequence table: %v", + ts.targetKeyspace, sequenceMetadata.usingTableName, topoproto.TabletAliasString(primary.Alias), terr) + } } srMu.Lock() defer srMu.Unlock() diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go index c8ec71dba96..4e58024785d 100644 --- a/go/vt/wrangler/traffic_switcher_env_test.go +++ b/go/vt/wrangler/traffic_switcher_env_test.go @@ -19,7 +19,9 @@ package wrangler import ( "context" "fmt" - "math/rand" + "math/rand/v2" + "strconv" + "strings" "sync" "testing" "time" @@ -40,6 +42,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletconn" @@ -56,7 +59,7 @@ import ( const ( streamInfoQuery = "select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow='%s' and db_name='vt_%s'" streamExtInfoQuery = "select id, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, time_heartbeat, time_throttled, component_throttled, message, tags, workflow_type, workflow_sub_type, defer_secondary_keys, rows_copied from _vt.vreplication where db_name = 'vt_%s' and workflow = '%s'" - copyStateQuery = "select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)" + copyStateQuery = "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (%s) and id in (select max(id) from _vt.copy_state where vrepl_id in (%s) group by vrepl_id, table_name)" maxValForSequence = "select max(`id`) as maxval from `vt_%s`.`%s`" ) @@ -118,7 +121,7 @@ func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, targetShards []string, fmtQuery string) *testMigraterEnv { tme := &testMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + tme.wr = New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) tme.wr.sem = semaphore.NewWeighted(1) tme.sourceShards = sourceShards tme.targetShards = targetShards @@ -155,8 +158,8 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange) } - dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000)) - tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.IntN(1000000000)) + tabletconn.RegisterDialer(dialerName, func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { tme.mu.Lock() defer tme.mu.Unlock() allPrimaries := append(tme.sourcePrimaries, tme.targetPrimaries...) @@ -255,7 +258,7 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, "maxval", "int64", ), - "5", + "NULL", ), ) tme.tmeDB.AddQuery(fmt.Sprintf(maxValForSequence, "ks2", "t2"), @@ -271,7 +274,7 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, // Now tell the fakesqldb used by the global keyspace tablets to expect // the sequence management related queries against the target keyspace. gfdb.AddQuery( - sqlparser.BuildParsedQuery(sqlInitSequenceTable, sqlescape.EscapeID("vt_global"), sqlescape.EscapeID("t1_seq"), 6, 6, 6).Query, + sqlparser.BuildParsedQuery(sqlInitSequenceTable, sqlescape.EscapeID("vt_global"), sqlescape.EscapeID("t1_seq"), 1, 1, 1).Query, &sqltypes.Result{RowsAffected: 0}, ) gfdb.AddQuery( @@ -298,6 +301,7 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, for i, targetShard := range targetShards { var streamInfoRows []string var streamExtInfoRows []string + var vreplIDs []string for j, sourceShard := range sourceShards { bls := &binlogdatapb.BinlogSource{ Keyspace: "ks1", @@ -314,8 +318,10 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, } streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls)) streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0||1||0", j+1, now, now)) - tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult) + vreplIDs = append(vreplIDs, strconv.FormatInt(int64(j+1), 10)) } + vreplIDsJoined := strings.Join(vreplIDs, ", ") + tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult) tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), @@ -332,6 +338,7 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, for i, sourceShard := range sourceShards { var streamInfoRows []string + var vreplIDs []string for j, targetShard := range targetShards { bls := &binlogdatapb.BinlogSource{ Keyspace: "ks2", @@ -347,8 +354,10 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, }, } streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls)) - tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult) + vreplIDs = append(vreplIDs, strconv.FormatInt(int64(j+1), 10)) } + vreplIDsJoined := strings.Join(vreplIDs, ", ") + tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult) tme.dbSourceClients[i].addInvariant(reverseStreamInfoKs1, sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), @@ -373,7 +382,7 @@ func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, } // newTestTablePartialMigrater creates a test tablet migrater -// specifially for partial or shard by shard migrations. +// specifically for partial or shard by shard migrations. // The shards must be the same on the source and target, and we // must be moving a subset of them. // fmtQuery should be of the form: 'select a, b %s group by a'. @@ -382,7 +391,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar require.Greater(t, len(shards), 1, "shard by shard migrations can only be done on sharded keyspaces") tme := &testMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + tme.wr = New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) tme.wr.sem = semaphore.NewWeighted(1) tme.sourceShards = shards tme.targetShards = shards @@ -415,8 +424,8 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange) } - dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000)) - tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.IntN(1000000000)) + tabletconn.RegisterDialer(dialerName, func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { tme.mu.Lock() defer tme.mu.Unlock() for _, ft := range append(tme.sourcePrimaries, tme.targetPrimaries...) { @@ -467,9 +476,10 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar now := time.Now().Unix() for i, shard := range shards { + var streamInfoRows []string + var streamExtInfoRows []string + var vreplIDs []string for _, shardToMove := range shardsToMove { - var streamInfoRows []string - var streamExtInfoRows []string if shardToMove == shard { bls := &binlogdatapb.BinlogSource{ Keyspace: "ks1", @@ -486,26 +496,31 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar } streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", i+1, bls)) streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0|||1||0", i+1, now, now)) + vreplIDs = append(vreplIDs, strconv.FormatInt(int64(i+1), 10)) } - tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, i+1, i+1), noResult) - tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", - "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), - streamInfoRows...)) - tme.dbTargetClients[i].addInvariant(streamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys", - "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|int64|int64|int64"), - streamExtInfoRows...)) - tme.dbTargetClients[i].addInvariant(reverseStreamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys", - "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|int64|int64|int64"), - streamExtInfoRows...)) } + vreplIDsJoined := strings.Join(vreplIDs, ", ") + tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult) + log.Infof("Adding streamInfoKs2 invariant for shard %s, client %s,rows %q", + shard, tme.dbTargetClients[i].name, streamExtInfoRows) + tme.dbTargetClients[i].addInvariant(streamInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), + streamInfoRows...)) + tme.dbTargetClients[i].addInvariant(streamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|int64|int64|int64"), + streamExtInfoRows...)) + tme.dbTargetClients[i].addInvariant(reverseStreamExtInfoKs2, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|int64|varchar|varchar|int64|int64|int64"), + streamExtInfoRows...)) } for i, shard := range shards { + var streamInfoRows []string + var vreplIDs []string for _, shardToMove := range shardsToMove { - var streamInfoRows []string if shardToMove == shard { bls := &binlogdatapb.BinlogSource{ Keyspace: "ks2", @@ -521,16 +536,17 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar }, } streamInfoRows = append(streamInfoRows, fmt.Sprintf("%d|%v||||1|0|0", i+1, bls)) - tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, i+1, i+1), noResult) + vreplIDs = append(vreplIDs, strconv.FormatInt(int64(i+1), 10)) } - tme.dbSourceClients[i].addInvariant(reverseStreamInfoKs1, sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", - "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), - streamInfoRows...), - ) } + vreplIDsJoined := strings.Join(vreplIDs, ", ") + tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult) + tme.dbSourceClients[i].addInvariant(reverseStreamInfoKs1, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", + "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), + streamInfoRows...), + ) } - tme.targetKeyspace = "ks2" return tme } @@ -538,7 +554,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targetShards []string) *testShardMigraterEnv { tme := &testShardMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + tme.wr = New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) tme.sourceShards = sourceShards tme.targetShards = targetShards tme.tmeDB = fakesqldb.New(t) @@ -573,8 +589,8 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe tme.targetKeyRanges = append(tme.targetKeyRanges, targetKeyRange) } - dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.Intn(1000000000)) - tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + dialerName := fmt.Sprintf("TrafficSwitcherTest-%s-%d", t.Name(), rand.IntN(1000000000)) + tabletconn.RegisterDialer(dialerName, func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { tme.mu.Lock() defer tme.mu.Unlock() for _, ft := range append(tme.sourcePrimaries, tme.targetPrimaries...) { @@ -632,6 +648,7 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe for i, targetShard := range targetShards { var rows, rowsRdOnly []string var streamExtInfoRows []string + var vreplIDs []string for j, sourceShard := range sourceShards { if !key.KeyRangeIntersect(tme.targetKeyRanges[i], tme.sourceKeyRanges[j]) { continue @@ -649,8 +666,10 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe rows = append(rows, fmt.Sprintf("%d|%v||||1|0|0", j+1, bls)) rowsRdOnly = append(rows, fmt.Sprintf("%d|%v|||RDONLY|1|0|0", j+1, bls)) streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks1|%d|%d|0|0|||", j+1, now, now)) - tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult) + vreplIDs = append(vreplIDs, strconv.FormatInt(int64(j+1), 10)) } + vreplIDsJoined := strings.Join(vreplIDs, ", ") + tme.dbTargetClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult) tme.dbTargetClients[i].addInvariant(streamInfoKs, sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), @@ -670,11 +689,14 @@ func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targe tme.targetKeyspace = "ks" for i, dbclient := range tme.dbSourceClients { var streamExtInfoRows []string + var vreplIDs []string dbclient.addInvariant(streamInfoKs, &sqltypes.Result{}) for j := range targetShards { streamExtInfoRows = append(streamExtInfoRows, fmt.Sprintf("%d|||||Running|vt_ks|%d|%d|0|0|||", j+1, now, now)) - tme.dbSourceClients[i].addInvariant(fmt.Sprintf(copyStateQuery, j+1, j+1), noResult) + vreplIDs = append(vreplIDs, strconv.FormatInt(int64(j+1), 10)) } + vreplIDsJoined := strings.Join(vreplIDs, ", ") + tme.dbSourceClients[i].addInvariant(fmt.Sprintf(copyStateQuery, vreplIDsJoined, vreplIDsJoined), noResult) tme.dbSourceClients[i].addInvariant(streamExtInfoKs, sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags", "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|varchar|varchar"), @@ -862,7 +884,7 @@ func (tme *testShardMigraterEnv) expectStartReverseVReplication() { // NOTE: this is not a faithful reproduction of what should happen. // The ids returned are not accurate. for _, dbclient := range tme.dbSourceClients { - dbclient.addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + dbclient.addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) dbclient.addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) dbclient.addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) dbclient.addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -891,7 +913,7 @@ func (tme *testShardMigraterEnv) expectDeleteTargetVReplication() { } } -func (tme *testShardMigraterEnv) expectCancelMigration() { +func (tme *testShardMigraterEnv) expectCancelStreamMigrations() { for _, dbclient := range tme.dbTargetClients { dbclient.addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test'", &sqltypes.Result{}, nil) } diff --git a/go/vt/wrangler/traffic_switcher_test.go b/go/vt/wrangler/traffic_switcher_test.go index 6c97758ad48..e1ae1ce908f 100644 --- a/go/vt/wrangler/traffic_switcher_test.go +++ b/go/vt/wrangler/traffic_switcher_test.go @@ -286,7 +286,7 @@ func TestTableMigrateMainflow(t *testing.T) { verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- - // Test SwitchWrites cancelation on failure. + // Test SwitchWrites cancellation on failure. tme.expectNoPreviousJournals() // Switch all the reads first. @@ -434,11 +434,11 @@ func TestTableMigrateMainflow(t *testing.T) { createJournals() startReverseVReplication := func() { - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -607,7 +607,7 @@ func TestShardMigrateMainflow(t *testing.T) { verifyQueries(t, tme.allDBClients) //------------------------------------------------------------------------------------------------------------------- - // Test SwitchWrites cancelation on failure. + // Test SwitchWrites cancellation on failure. tme.expectNoPreviousJournals() // Switch all the reads first. @@ -731,11 +731,11 @@ func TestShardMigrateMainflow(t *testing.T) { createJournals() startReverseVReplication := func() { - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -949,8 +949,10 @@ func testTableMigrateOneToMany(t *testing.T, keepData, keepRoutingRules bool) { tme.dbTargetClients[0].addQuery("select 1 from _vt.vreplication where db_name='vt_ks2' and workflow='test' and message!='FROZEN'", &sqltypes.Result{}, nil) tme.dbTargetClients[1].addQuery("select 1 from _vt.vreplication where db_name='vt_ks2' and workflow='test' and message!='FROZEN'", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", &sqltypes.Result{}, nil) + tme.tmeDB.AddQuery("SET SESSION foreign_key_checks = OFF", &sqltypes.Result{}) tme.tmeDB.AddQuery(fmt.Sprintf("rename table `vt_ks1`.`t1` TO `vt_ks1`.`%s`", getRenameFileName("t1")), &sqltypes.Result{}) tme.tmeDB.AddQuery(fmt.Sprintf("rename table `vt_ks1`.`t2` TO `vt_ks1`.`%s`", getRenameFileName("t2")), &sqltypes.Result{}) + tme.tmeDB.AddQuery("SET SESSION foreign_key_checks = ON", &sqltypes.Result{}) tme.dbTargetClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks2' and workflow = 'test'", &sqltypes.Result{}, nil) // tme.dbTargetClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks2' and workflow = 'test'", &sqltypes.Result{}, nil) } @@ -1009,8 +1011,8 @@ func TestTableMigrateOneToManyDryRun(t *testing.T) { "\tKeyspace ks1, Shard 0 at Position MariaDB/5-456-892", "Wait for VReplication on stopped streams to catchup for up to 1s", "Create reverse replication workflow test_reverse", - "Create journal entries on source databases", "The following sequence backing tables used by tables being moved will be initialized: t1_seq,t2_seq", + "Create journal entries on source databases", "Enable writes on keyspace ks2 tables [t1,t2]", "Switch routing from keyspace ks1 to keyspace ks2", "Routing rules for tables [t1,t2] will be updated", @@ -1233,11 +1235,11 @@ func TestTableMigrateJournalExists(t *testing.T) { tme.dbSourceClients[1].addQueryRE(journal2, &sqltypes.Result{}, nil) // mi.startReverseVReplication - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -1312,11 +1314,11 @@ func TestShardMigrateJournalExists(t *testing.T) { tme.dbSourceClients[1].addQueryRE(journal2, &sqltypes.Result{}, nil) // mi.startReverseVReplication - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -1411,7 +1413,7 @@ func TestTableMigrateCancelDryRun(t *testing.T) { want := []string{ "Lock keyspace ks1", "Lock keyspace ks2", - "Cancel stream migrations as requested", + "Cancel migration as requested", "Unlock keyspace ks2", "Unlock keyspace ks1", } @@ -2043,11 +2045,11 @@ func TestShardMigrateNoAvailableTabletsForReverseReplication(t *testing.T) { createJournals() startReverseVReplication := func() { - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index 2d6e49b73d7..4caad42ce1f 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -31,6 +31,8 @@ import ( "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" + "vitess.io/vitess/go/ptr" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -92,6 +94,7 @@ type RowDiff struct { // vdiff contains the metadata for performing vdiff for one workflow. type vdiff struct { + env *vtenv.Environment ts *trafficSwitcher sourceCell string targetCell string @@ -114,9 +117,10 @@ type vdiff struct { // compareColInfo contains the metadata for a column of the table being diffed type compareColInfo struct { - colIndex int // index of the column in the filter's select - collation collations.ID // is the collation of the column, if any - isPK bool // is this column part of the primary key + colIndex int // index of the column in the filter's select + collation collations.ID // is the collation of the column, if any + values *evalengine.EnumSetValues // is the list of enum or set values for the column, if any + isPK bool // is this column part of the primary key } // tableDiffer performs a diff for one table in the workflow. @@ -142,6 +146,9 @@ type tableDiffer struct { // source Primitive and targetPrimitive are used for streaming sourcePrimitive engine.Primitive targetPrimitive engine.Primitive + + collationEnv *collations.Environment + parser *sqlparser.Parser } // shardStreamer streams rows from one shard. This works for @@ -207,6 +214,7 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou } // Initialize vdiff df := &vdiff{ + env: wr.env, ts: ts, sourceCell: sourceCell, targetCell: targetCell, @@ -241,7 +249,7 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou if err != nil { return nil, vterrors.Wrap(err, "GetSchema") } - if err = df.buildVDiffPlan(ctx, oneFilter, schm, df.tables); err != nil { + if err = df.buildVDiffPlan(oneFilter, schm, df.tables); err != nil { return nil, vterrors.Wrap(err, "buildVDiffPlan") } @@ -369,11 +377,11 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou diffReports[table] = dr } if format == "json" { - json, err := json.MarshalIndent(diffReports, "", "") + j, err := json.MarshalIndent(diffReports, "", "") if err != nil { wr.Logger().Printf("Error converting report to json: %v", err.Error()) } - jsonOutput += string(json) + jsonOutput += string(j) wr.logger.Printf("%s", jsonOutput) } else { for table, dr := range diffReports { @@ -443,7 +451,7 @@ func (df *vdiff) diffTable(ctx context.Context, wr *Wrangler, table string, td * } // buildVDiffPlan builds all the differs. -func (df *vdiff) buildVDiffPlan(ctx context.Context, filter *binlogdatapb.Filter, schm *tabletmanagerdatapb.SchemaDefinition, tablesToInclude []string) error { +func (df *vdiff) buildVDiffPlan(filter *binlogdatapb.Filter, schm *tabletmanagerdatapb.SchemaDefinition, tablesToInclude []string) error { df.differs = make(map[string]*tableDiffer) for _, table := range schm.TableDefinitions { rule, err := vreplication.MatchTable(table.Name, filter) @@ -485,8 +493,8 @@ func (df *vdiff) buildVDiffPlan(ctx context.Context, filter *binlogdatapb.Filter // findPKs identifies PKs, determines any collations to be used for // them, and removes them from the columns used for data comparison. -func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser.Select, td *tableDiffer) (sqlparser.OrderBy, error) { - columnCollations, err := getColumnCollations(table) +func findPKs(env *vtenv.Environment, table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser.Select, td *tableDiffer) (sqlparser.OrderBy, error) { + columnCollations, columnValues, err := getColumnCollations(env, table) if err != nil { return nil, err } @@ -507,6 +515,7 @@ func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser if strings.EqualFold(pk, colname) { td.compareCols[i].isPK = true td.compareCols[i].collation = columnCollations[strings.ToLower(colname)] + td.compareCols[i].values = columnValues[strings.ToLower(colname)] td.comparePKs = append(td.comparePKs, td.compareCols[i]) td.selectPks = append(td.selectPks, i) // We'll be comparing pks separately. So, remove them from compareCols. @@ -528,51 +537,52 @@ func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser } // getColumnCollations determines the proper collation to use for each -// column in the table definition leveraging MySQL's collation inheritence +// column in the table definition leveraging MySQL's collation inheritance // rules. -func getColumnCollations(table *tabletmanagerdatapb.TableDefinition) (map[string]collations.ID, error) { - collationEnv := collations.Local() - createstmt, err := sqlparser.Parse(table.Schema) +func getColumnCollations(venv *vtenv.Environment, table *tabletmanagerdatapb.TableDefinition) (map[string]collations.ID, map[string]*evalengine.EnumSetValues, error) { + createstmt, err := venv.Parser().Parse(table.Schema) if err != nil { - return nil, err + return nil, nil, err } createtable, ok := createstmt.(*sqlparser.CreateTable) if !ok { - return nil, vterrors.Wrapf(err, "invalid table schema %s for table %s", table.Schema, table.Name) + return nil, nil, vterrors.Wrapf(err, "invalid table schema %s for table %s", table.Schema, table.Name) } - tableschema, err := schemadiff.NewCreateTableEntity(createtable) + env := schemadiff.NewEnv(venv, venv.CollationEnv().DefaultConnectionCharset()) + tableschema, err := schemadiff.NewCreateTableEntity(env, createtable) if err != nil { - return nil, vterrors.Wrapf(err, "invalid table schema %s for table %s", table.Schema, table.Name) + return nil, nil, vterrors.Wrapf(err, "invalid table schema %s for table %s", table.Schema, table.Name) } tableCharset := tableschema.GetCharset() tableCollation := tableschema.GetCollation() // If no explicit collation is specified for the column then we need - // to walk the inheritence tree. + // to walk the inheritance tree. getColumnCollation := func(column *sqlparser.ColumnDefinition) collations.ID { // If there's an explicit collation listed then use that. if column.Type.Options.Collate != "" { - return collationEnv.LookupByName(strings.ToLower(column.Type.Options.Collate)) + return env.CollationEnv().LookupByName(strings.ToLower(column.Type.Options.Collate)) } // If the column has a charset listed then the default collation // for that charset is used. if column.Type.Charset.Name != "" { - return collationEnv.DefaultCollationForCharset(strings.ToLower(column.Type.Charset.Name)) + return env.CollationEnv().DefaultCollationForCharset(strings.ToLower(column.Type.Charset.Name)) } // If the table has an explicit collation listed then use that. if tableCollation != "" { - return collationEnv.LookupByName(strings.ToLower(tableCollation)) + return env.CollationEnv().LookupByName(strings.ToLower(tableCollation)) } // If the table has a charset listed then use the default collation // for that charset. if tableCharset != "" { - return collationEnv.DefaultCollationForCharset(strings.ToLower(tableCharset)) + return env.CollationEnv().DefaultCollationForCharset(strings.ToLower(tableCharset)) } // The table is using the global default charset and collation and - // we inherite that. - return collations.Default() + // we inherit that. + return env.CollationEnv().DefaultConnectionCharset() } columnCollations := make(map[string]collations.ID) + columnValues := make(map[string]*evalengine.EnumSetValues) for _, column := range tableschema.TableSpec.Columns { // If it's not a character based type then no collation is used. if !sqltypes.IsQuoted(column.Type.SQLType()) { @@ -580,8 +590,12 @@ func getColumnCollations(table *tabletmanagerdatapb.TableDefinition) (map[string continue } columnCollations[column.Name.Lowered()] = getColumnCollation(column) + if len(column.Type.EnumValues) == 0 { + continue + } + columnValues[column.Name.Lowered()] = ptr.Of(evalengine.EnumSetValues(column.Type.EnumValues)) } - return columnCollations, nil + return columnCollations, columnValues, nil } // If SourceTimeZone is defined in the BinlogSource, the VReplication workflow would have converted the datetime @@ -605,10 +619,10 @@ func (df *vdiff) adjustForSourceTimeZone(targetSelectExprs sqlparser.SelectExprs if fieldType == querypb.Type_DATETIME { convertTZFuncExpr = &sqlparser.FuncExpr{ Name: sqlparser.NewIdentifierCI("convert_tz"), - Exprs: sqlparser.SelectExprs{ - expr, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(df.targetTimeZone)}, - &sqlparser.AliasedExpr{Expr: sqlparser.NewStrLiteral(df.sourceTimeZone)}, + Exprs: sqlparser.Exprs{ + colAs, + sqlparser.NewStrLiteral(df.targetTimeZone), + sqlparser.NewStrLiteral(df.sourceTimeZone), }, } log.Infof("converting datetime column %s using convert_tz()", colName) @@ -646,7 +660,7 @@ func getColumnNameForSelectExpr(selectExpression sqlparser.SelectExpr) (string, // buildTablePlan builds one tableDiffer. func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, query string) (*tableDiffer, error) { - statement, err := sqlparser.Parse(query) + statement, err := df.env.Parser().Parse(query) if err != nil { return nil, err } @@ -655,7 +669,9 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer return nil, fmt.Errorf("unexpected: %v", sqlparser.String(statement)) } td := &tableDiffer{ - targetTable: table.Name, + targetTable: table.Name, + collationEnv: df.env.CollationEnv(), + parser: df.env.Parser(), } sourceSelect := &sqlparser.Select{} targetSelect := &sqlparser.Select{} @@ -672,14 +688,14 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer } case *sqlparser.AliasedExpr: var targetCol *sqlparser.ColName - if !selExpr.As.IsEmpty() { - targetCol = &sqlparser.ColName{Name: selExpr.As} - } else { + if selExpr.As.IsEmpty() { if colAs, ok := selExpr.Expr.(*sqlparser.ColName); ok { targetCol = colAs } else { return nil, fmt.Errorf("expression needs an alias: %v", sqlparser.String(selExpr)) } + } else { + targetCol = &sqlparser.ColName{Name: selExpr.As} } // If the input was "select a as b", then source will use "a" and target will use "b". sourceSelect.SelectExprs = append(sourceSelect.SelectExprs, selExpr) @@ -696,7 +712,7 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer aggregates = append(aggregates, engine.NewAggregateParam( /*opcode*/ opcode.AggregateSum, /*offset*/ len(sourceSelect.SelectExprs)-1, - /*alias*/ "")) + /*alias*/ "", df.env.CollationEnv())) } } default: @@ -735,7 +751,7 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer }, } - orderby, err := findPKs(table, targetSelect, td) + orderby, err := findPKs(df.env, table, targetSelect, td) if err != nil { return nil, err } @@ -751,14 +767,14 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer td.sourceExpression = sqlparser.String(sourceSelect) td.targetExpression = sqlparser.String(targetSelect) - td.sourcePrimitive = newMergeSorter(df.sources, td.comparePKs) - td.targetPrimitive = newMergeSorter(df.targets, td.comparePKs) + td.sourcePrimitive = newMergeSorter(df.sources, td.comparePKs, df.env.CollationEnv()) + td.targetPrimitive = newMergeSorter(df.targets, td.comparePKs, df.env.CollationEnv()) // If there were aggregate expressions, we have to re-aggregate // the results, which engine.OrderedAggregate can do. if len(aggregates) != 0 { td.sourcePrimitive = &engine.OrderedAggregate{ Aggregates: aggregates, - GroupByKeys: pkColsToGroupByParams(td.pkCols), + GroupByKeys: pkColsToGroupByParams(td.pkCols, td.collationEnv), Input: td.sourcePrimitive, } } @@ -766,16 +782,16 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer return td, nil } -func pkColsToGroupByParams(pkCols []int) []*engine.GroupByParams { +func pkColsToGroupByParams(pkCols []int, collationEnv *collations.Environment) []*engine.GroupByParams { var res []*engine.GroupByParams for _, col := range pkCols { - res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1, Type: evalengine.UnknownType()}) + res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1, Type: evalengine.Type{}, CollationEnv: collationEnv}) } return res } // newMergeSorter creates an engine.MergeSort based on the shard streamers and pk columns. -func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compareColInfo) *engine.MergeSort { +func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compareColInfo, collationEnv *collations.Environment) *engine.MergeSort { prims := make([]engine.StreamExecutor, 0, len(participants)) for _, participant := range participants { prims = append(prims, participant) @@ -784,11 +800,11 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compare for _, cpk := range comparePKs { weightStringCol := -1 // if the collation is nil or unknown, use binary collation to compare as bytes - t := evalengine.Type{Type: sqltypes.Unknown, Coll: collations.CollationBinaryID} + var collation collations.ID = collations.CollationBinaryID if cpk.collation != collations.Unknown { - t.Coll = cpk.collation + collation = cpk.collation } - ob = append(ob, evalengine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: t}) + ob = append(ob, evalengine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: evalengine.NewType(sqltypes.Unknown, collation), CollationEnv: collationEnv}) } return &engine.MergeSort{ Primitives: prims, @@ -960,7 +976,7 @@ func (df *vdiff) streamOne(ctx context.Context, keyspace, shard string, particip // Wrap the streaming in a separate function so we can capture the error. // This shows that the error will be set before the channels are closed. participant.err = func() error { - conn, err := tabletconn.GetDialer()(participant.tablet, grpcclient.FailFast(false)) + conn, err := tabletconn.GetDialer()(ctx, participant.tablet, grpcclient.FailFast(false)) if err != nil { return err } @@ -996,7 +1012,7 @@ func (df *vdiff) streamOne(ctx context.Context, keyspace, shard string, particip }() } -// syncTargets fast-forwards the vreplication to the source snapshot positons +// syncTargets fast-forwards the vreplication to the source snapshot positions // and waits for the selected tablets to catch up to that point. func (df *vdiff) syncTargets(ctx context.Context, filteredReplicationWaitTime time.Duration) error { waitCtx, cancel := context.WithTimeout(ctx, filteredReplicationWaitTime) @@ -1309,7 +1325,7 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com if col.collation == collations.Unknown { collationID = collations.CollationBinaryID } - c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], collationID) + c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], td.collationEnv, collationID, col.values) if err != nil { return 0, err } @@ -1323,7 +1339,7 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com func (td *tableDiffer) genRowDiff(queryStmt string, row []sqltypes.Value, debug, onlyPks bool) (*RowDiff, error) { drp := &RowDiff{} drp.Row = make(map[string]sqltypes.Value) - statement, err := sqlparser.Parse(queryStmt) + statement, err := td.parser.Parse(queryStmt) if err != nil { return nil, err } diff --git a/go/vt/wrangler/vdiff_env_test.go b/go/vt/wrangler/vdiff_env_test.go index 01f3a3a0f9e..5d1967770ce 100644 --- a/go/vt/wrangler/vdiff_env_test.go +++ b/go/vt/wrangler/vdiff_env_test.go @@ -19,17 +19,17 @@ package wrangler import ( "context" "fmt" - "math/rand" + "math/rand/v2" "sync" "testing" "vitess.io/vitess/go/mysql/fakesqldb" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconn" @@ -78,11 +78,11 @@ func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShar tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + env.wr = New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), env.topoServ, env.tmc) // Generate a unique dialer name. - dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.Intn(1000000000)) - tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.IntN(1000000000)) + tabletconn.RegisterDialer(dialerName, func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { env.mu.Lock() defer env.mu.Unlock() if qs, ok := env.tablets[int(tablet.Alias.Uid)]; ok { diff --git a/go/vt/wrangler/vdiff_test.go b/go/vt/wrangler/vdiff_test.go index 28422b6cd4d..3ac6edb373c 100644 --- a/go/vt/wrangler/vdiff_test.go +++ b/go/vt/wrangler/vdiff_test.go @@ -18,13 +18,10 @@ package wrangler import ( "context" - "reflect" "strings" "testing" "time" - "vitess.io/vitess/go/vt/vtgate/evalengine" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -34,11 +31,15 @@ import ( tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/engine/opcode" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) func TestVDiffPlanSuccess(t *testing.T) { + collationEnv := collations.MySQL8() + parser := sqlparser.NewTestParser() schm := &tabletmanagerdatapb.SchemaDefinition{ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ Name: "t1", @@ -93,12 +94,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { input: &binlogdatapb.Rule{ @@ -110,12 +113,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { input: &binlogdatapb.Rule{ @@ -127,12 +132,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { input: &binlogdatapb.Rule{ @@ -144,12 +151,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c2, c1 from t1 order by c1 asc", targetExpression: "select c2, c1 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Unknown, true}}, - comparePKs: []compareColInfo{{1, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, false}, {1, collations.Unknown, nil, true}}, + comparePKs: []compareColInfo{{1, collations.Unknown, nil, true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { input: &binlogdatapb.Rule{ @@ -161,12 +170,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c0 as c1, c2 from t2 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // non-pk text column. @@ -179,12 +190,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "nonpktext", sourceExpression: "select c1, textcol from nonpktext order by c1 asc", targetExpression: "select c1, textcol from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // non-pk text column, different order. @@ -197,12 +210,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "nonpktext", sourceExpression: "select textcol, c1 from nonpktext order by c1 asc", targetExpression: "select textcol, c1 from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Unknown, true}}, - comparePKs: []compareColInfo{{1, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, false}, {1, collations.Unknown, nil, true}}, + comparePKs: []compareColInfo{{1, collations.Unknown, nil, true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // pk text column. @@ -215,12 +230,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select textcol, c2 from pktext order by textcol asc", targetExpression: "select textcol, c2 from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Default(), true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Default(), true}}, + compareCols: []compareColInfo{{0, collationEnv.DefaultConnectionCharset(), nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collationEnv.DefaultConnectionCharset(), nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Default(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Default(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collationEnv.DefaultConnectionCharset(), nil, false}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collationEnv.DefaultConnectionCharset(), nil, false}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // pk text column, different order. @@ -233,12 +250,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select c2, textcol from pktext order by textcol asc", targetExpression: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Default(), true}}, - comparePKs: []compareColInfo{{1, collations.Default(), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, false}, {1, collationEnv.DefaultConnectionCharset(), nil, true}}, + comparePKs: []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), nil, true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), nil, false}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), nil, false}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // text column as expression. @@ -251,12 +270,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select c2, a + b as textcol from pktext order by textcol asc", targetExpression: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Default(), true}}, - comparePKs: []compareColInfo{{1, collations.Default(), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, false}, {1, collationEnv.DefaultConnectionCharset(), nil, true}}, + comparePKs: []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), nil, true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), nil, false}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), nil, false}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { input: &binlogdatapb.Rule{ @@ -267,12 +288,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "multipk", sourceExpression: "select c1, c2 from multipk order by c1 asc, c2 asc", targetExpression: "select c1, c2 from multipk order by c1 asc, c2 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, true}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, true}}, pkCols: []int{0, 1}, selectPks: []int{0, 1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // in_keyrange @@ -285,12 +308,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // in_keyrange on RHS of AND. @@ -304,12 +329,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 where c2 = 2 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // in_keyrange on LHS of AND. @@ -323,12 +350,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 where c2 = 2 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // in_keyrange on cascaded AND expression @@ -342,12 +371,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 where c2 = 2 and c1 = 1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // in_keyrange parenthesized @@ -361,12 +392,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 where c2 = 2 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // group by @@ -379,12 +412,14 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "t1", sourceExpression: "select c1, c2 from t1 group by c1 order by c1 asc", targetExpression: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { // aggregations @@ -397,19 +432,21 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "aggr", sourceExpression: "select c1, c2, count(*) as c3, sum(c4) as c4 from t1 group by c1 order by c1 asc", targetExpression: "select c1, c2, c3, c4 from aggr order by c1 asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}, {2, collations.Unknown, false}, {3, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}, {2, collations.Unknown, nil, false}, {3, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, sourcePrimitive: &engine.OrderedAggregate{ Aggregates: []*engine.AggregateParams{ - engine.NewAggregateParam(opcode.AggregateSum, 2, ""), - engine.NewAggregateParam(opcode.AggregateSum, 3, ""), + engine.NewAggregateParam(opcode.AggregateSum, 2, "", collationEnv), + engine.NewAggregateParam(opcode.AggregateSum, 3, "", collationEnv), }, - GroupByKeys: []*engine.GroupByParams{{KeyCol: 0, WeightStringCol: -1, Type: evalengine.UnknownType()}}, - Input: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + GroupByKeys: []*engine.GroupByParams{{KeyCol: 0, WeightStringCol: -1, CollationEnv: collations.MySQL8()}}, + Input: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), }, - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }, { input: &binlogdatapb.Rule{ @@ -421,20 +458,22 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "datze", sourceExpression: "select id, dt from datze order by id asc", targetExpression: "select id, convert_tz(dt, 'UTC', 'US/Pacific') as dt from datze order by id asc", - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, nil, true}}, collationEnv), + collationEnv: collationEnv, + parser: parser, }, }} for _, tcase := range testcases { t.Run(tcase.input.Filter, func(t *testing.T) { filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{tcase.input}} - df := &vdiff{sourceTimeZone: tcase.sourceTimeZone, targetTimeZone: "UTC"} - err := df.buildVDiffPlan(context.Background(), filter, schm, nil) + df := &vdiff{env: vtenv.NewTestEnv(), sourceTimeZone: tcase.sourceTimeZone, targetTimeZone: "UTC"} + err := df.buildVDiffPlan(filter, schm, nil) require.NoError(t, err, tcase.input) require.Equal(t, 1, len(df.differs), tcase.input) assert.Equal(t, tcase.td, df.differs[tcase.table], tcase.input) @@ -488,8 +527,8 @@ func TestVDiffPlanFailure(t *testing.T) { }} for _, tcase := range testcases { filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{tcase.input}} - df := &vdiff{} - err := df.buildVDiffPlan(context.Background(), filter, schm, nil) + df := &vdiff{env: vtenv.NewTestEnv()} + err := df.buildVDiffPlan(filter, schm, nil) assert.EqualError(t, err, tcase.err, tcase.input) } } @@ -1038,13 +1077,13 @@ func TestVDiffFindPKs(t *testing.T) { }, }, tdIn: &tableDiffer{ - compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Unknown, false}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, false}, {1, collations.Unknown, nil, false}}, comparePKs: []compareColInfo{}, pkCols: []int{}, }, tdOut: &tableDiffer{ - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}}, pkCols: []int{0}, selectPks: []int{0}, }, @@ -1066,22 +1105,23 @@ func TestVDiffFindPKs(t *testing.T) { }, }, tdIn: &tableDiffer{ - compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Unknown, false}, {2, collations.Unknown, false}, {3, collations.Unknown, false}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, false}, {1, collations.Unknown, nil, false}, {2, collations.Unknown, nil, false}, {3, collations.Unknown, nil, false}}, comparePKs: []compareColInfo{}, pkCols: []int{}, }, tdOut: &tableDiffer{ - compareCols: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, false}, {2, collations.Unknown, false}, {3, collations.Unknown, true}}, - comparePKs: []compareColInfo{{0, collations.Unknown, true}, {3, collations.Unknown, true}}, + compareCols: []compareColInfo{{0, collations.Unknown, nil, true}, {1, collations.Unknown, nil, false}, {2, collations.Unknown, nil, false}, {3, collations.Unknown, nil, true}}, + comparePKs: []compareColInfo{{0, collations.Unknown, nil, true}, {3, collations.Unknown, nil, true}}, pkCols: []int{0, 3}, selectPks: []int{0, 3}, }, }, } + env := vtenv.NewTestEnv() for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - _, err := findPKs(tc.table, tc.targetSelect, tc.tdIn) + _, err := findPKs(env, tc.table, tc.targetSelect, tc.tdIn) require.NoError(t, err) require.EqualValues(t, tc.tdOut, tc.tdIn) }) @@ -1118,35 +1158,36 @@ func TestVDiffPlanInclude(t *testing.T) { }}, } - df := &vdiff{} + df := &vdiff{env: vtenv.NewTestEnv()} rule := &binlogdatapb.Rule{ Match: "/.*", } filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{rule}} var err error - err = df.buildVDiffPlan(context.Background(), filter, schm, []string{"t2"}) + err = df.buildVDiffPlan(filter, schm, []string{"t2"}) require.NoError(t, err) require.Equal(t, 1, len(df.differs)) - err = df.buildVDiffPlan(context.Background(), filter, schm, []string{"t2", "t3"}) + err = df.buildVDiffPlan(filter, schm, []string{"t2", "t3"}) require.NoError(t, err) require.Equal(t, 2, len(df.differs)) - err = df.buildVDiffPlan(context.Background(), filter, schm, []string{"t1", "t2", "t3"}) + err = df.buildVDiffPlan(filter, schm, []string{"t1", "t2", "t3"}) require.NoError(t, err) require.Equal(t, 3, len(df.differs)) - err = df.buildVDiffPlan(context.Background(), filter, schm, []string{"t1", "t2", "t3", "t4"}) + err = df.buildVDiffPlan(filter, schm, []string{"t1", "t2", "t3", "t4"}) require.NoError(t, err) require.Equal(t, 4, len(df.differs)) - err = df.buildVDiffPlan(context.Background(), filter, schm, []string{"t1", "t2", "t3", "t5"}) + err = df.buildVDiffPlan(filter, schm, []string{"t1", "t2", "t3", "t5"}) require.Error(t, err) } func TestGetColumnCollations(t *testing.T) { - collationEnv := collations.Local() + collationEnv := collations.MySQL8() tests := []struct { - name string - table *tabletmanagerdatapb.TableDefinition - want map[string]collations.ID - wantErr bool + name string + table *tabletmanagerdatapb.TableDefinition + wantCols map[string]collations.ID + wantValues map[string]*evalengine.EnumSetValues + wantErr bool }{ { name: "invalid schema", @@ -1160,93 +1201,128 @@ func TestGetColumnCollations(t *testing.T) { table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 int, name varchar(10), primary key(c1))", }, - want: map[string]collations.ID{ + wantCols: map[string]collations.ID{ "c1": collations.Unknown, - "name": collations.Default(), + "name": collationEnv.DefaultConnectionCharset(), }, + wantValues: map[string]*evalengine.EnumSetValues{}, }, { name: "char pk with global default collation", table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10), name varchar(10), primary key(c1))", }, - want: map[string]collations.ID{ - "c1": collations.Default(), - "name": collations.Default(), + wantCols: map[string]collations.ID{ + "c1": collationEnv.DefaultConnectionCharset(), + "name": collationEnv.DefaultConnectionCharset(), }, + wantValues: map[string]*evalengine.EnumSetValues{}, }, { name: "compound char int pk with global default collation", table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 int, name varchar(10), primary key(c1, name))", }, - want: map[string]collations.ID{ + wantCols: map[string]collations.ID{ "c1": collations.Unknown, - "name": collations.Default(), + "name": collationEnv.DefaultConnectionCharset(), }, + wantValues: map[string]*evalengine.EnumSetValues{}, }, { name: "char pk with table default charset", table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10), name varchar(10), primary key(c1)) default character set ucs2", }, - want: map[string]collations.ID{ + wantCols: map[string]collations.ID{ "c1": collationEnv.DefaultCollationForCharset("ucs2"), "name": collationEnv.DefaultCollationForCharset("ucs2"), }, + wantValues: map[string]*evalengine.EnumSetValues{}, }, { name: "char pk with table default collation", table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10), name varchar(10), primary key(c1)) charset=utf32 collate=utf32_icelandic_ci", }, - want: map[string]collations.ID{ + wantCols: map[string]collations.ID{ "c1": collationEnv.LookupByName("utf32_icelandic_ci"), "name": collationEnv.LookupByName("utf32_icelandic_ci"), }, + wantValues: map[string]*evalengine.EnumSetValues{}, }, { name: "char pk with column charset override", table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10) charset sjis, name varchar(10), primary key(c1)) character set=utf8", }, - want: map[string]collations.ID{ + wantCols: map[string]collations.ID{ "c1": collationEnv.DefaultCollationForCharset("sjis"), "name": collationEnv.DefaultCollationForCharset("utf8mb3"), }, + wantValues: map[string]*evalengine.EnumSetValues{}, }, { name: "char pk with column collation override", table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10) collate hebrew_bin, name varchar(10), primary key(c1)) charset=hebrew", }, - want: map[string]collations.ID{ + wantCols: map[string]collations.ID{ "c1": collationEnv.LookupByName("hebrew_bin"), "name": collationEnv.DefaultCollationForCharset("hebrew"), }, + wantValues: map[string]*evalengine.EnumSetValues{}, }, { name: "compound char int pk with column collation override", table: &tabletmanagerdatapb.TableDefinition{ Schema: "create table t1 (c1 varchar(10) collate utf16_turkish_ci, c2 int, name varchar(10), primary key(c1, c2)) charset=utf16 collate=utf16_icelandic_ci", }, - want: map[string]collations.ID{ + wantCols: map[string]collations.ID{ "c1": collationEnv.LookupByName("utf16_turkish_ci"), "c2": collations.Unknown, "name": collationEnv.LookupByName("utf16_icelandic_ci"), }, + wantValues: map[string]*evalengine.EnumSetValues{}, + }, + { + name: "col with enum values", + table: &tabletmanagerdatapb.TableDefinition{ + Schema: "create table t1 (c1 varchar(10), size enum('small', 'medium', 'large'), primary key(c1))", + }, + wantCols: map[string]collations.ID{ + "c1": collationEnv.DefaultConnectionCharset(), + "size": collationEnv.DefaultConnectionCharset(), + }, + wantValues: map[string]*evalengine.EnumSetValues{ + "size": {"'small'", "'medium'", "'large'"}, + }, + }, + { + name: "col with set values", + table: &tabletmanagerdatapb.TableDefinition{ + Schema: "create table t1 (c1 varchar(10), size set('small', 'medium', 'large'), primary key(c1))", + }, + wantCols: map[string]collations.ID{ + "c1": collationEnv.DefaultConnectionCharset(), + "size": collationEnv.DefaultConnectionCharset(), + }, + wantValues: map[string]*evalengine.EnumSetValues{ + "size": {"'small'", "'medium'", "'large'"}, + }, }, } + env := vtenv.NewTestEnv() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := getColumnCollations(tt.table) - if (err != nil) != tt.wantErr { - t.Errorf("getColumnCollations() error = %v, wantErr = %t", err, tt.wantErr) + gotCols, gotValues, err := getColumnCollations(env, tt.table) + if tt.wantErr { + require.Error(t, err) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("getColumnCollations() = %+v, want %+v", got, tt.want) - } + require.NoError(t, err) + require.Equal(t, tt.wantCols, gotCols) + require.Equal(t, tt.wantValues, gotValues) }) } } diff --git a/go/vt/wrangler/vexec.go b/go/vt/wrangler/vexec.go index 0734fa7b593..2c279c5c6cf 100644 --- a/go/vt/wrangler/vexec.go +++ b/go/vt/wrangler/vexec.go @@ -44,6 +44,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" workflow2 "vitess.io/vitess/go/vt/vtctl/workflow" vtctldvexec "vitess.io/vitess/go/vt/vtctl/workflow/vexec" // renamed to avoid a collision with the vexec struct in this package ) @@ -158,7 +159,7 @@ func (wr *Wrangler) VExec(ctx context.Context, workflow, keyspace, query string, if wr.VExecFunc != nil { return wr.VExecFunc(ctx, workflow, keyspace, query, dryRun) } - results, err := wr.runVexec(ctx, workflow, keyspace, query, nil, dryRun) + results, err := wr.runVexec(ctx, workflow, keyspace, query, nil, dryRun, nil) retResults := make(map[*topo.TabletInfo]*sqltypes.Result) for tablet, result := range results { retResults[tablet] = sqltypes.Proto3ToResult(result) @@ -167,10 +168,13 @@ func (wr *Wrangler) VExec(ctx context.Context, workflow, keyspace, query string, } // runVexec is the main function that runs a dry or wet execution of 'query' on backend shards. -func (wr *Wrangler) runVexec(ctx context.Context, workflow, keyspace, query string, callback func(context.Context, *topo.TabletInfo) (*querypb.QueryResult, error), dryRun bool) (map[*topo.TabletInfo]*querypb.QueryResult, error) { +func (wr *Wrangler) runVexec(ctx context.Context, workflow, keyspace, query string, + callback func(context.Context, *topo.TabletInfo) (*querypb.QueryResult, error), + dryRun bool, shards []string) (map[*topo.TabletInfo]*querypb.QueryResult, error) { + vx := newVExec(ctx, workflow, keyspace, query, wr) - if err := vx.getPrimaries(); err != nil { + if err := vx.getPrimaries(shards); err != nil { return nil, err } if callback == nil { // Using legacy SQL query path @@ -275,7 +279,7 @@ func (vx *vexec) execCallback(callback func(context.Context, *topo.TabletInfo) ( // parseQuery parses the input query func (vx *vexec) parseQuery() (err error) { - if vx.stmt, err = sqlparser.Parse(vx.query); err != nil { + if vx.stmt, err = vx.wr.SQLParser().Parse(vx.query); err != nil { return err } if vx.tableName, err = extractTableName(vx.stmt); err != nil { @@ -285,15 +289,13 @@ func (vx *vexec) parseQuery() (err error) { } // getPrimaries identifies primary tablet for all shards relevant to our keyspace -func (vx *vexec) getPrimaries() error { +func (vx *vexec) getPrimaries(shards []string) error { var err error - shards, err := vx.wr.ts.GetShardNames(vx.ctx, vx.keyspace) + shards, err = vx.wr.getShardSubset(vx.ctx, vx.keyspace, shards) if err != nil { return err } - if len(shards) == 0 { - return fmt.Errorf("no shards found in keyspace %s", vx.keyspace) - } + var allPrimaries []*topo.TabletInfo var primary *topo.TabletInfo for _, shard := range shards { @@ -340,10 +342,11 @@ func (wr *Wrangler) convertQueryResultToSQLTypesResult(results map[*topo.TabletI // rpcReq is an optional argument for any actions that use the new RPC path. Today // that is only the update action. When using the SQL interface this is ignored and // you can pass nil. -func (wr *Wrangler) WorkflowAction(ctx context.Context, workflow, keyspace, action string, dryRun bool, rpcReq any) (map[*topo.TabletInfo]*sqltypes.Result, error) { +func (wr *Wrangler) WorkflowAction(ctx context.Context, workflow, keyspace, action string, dryRun bool, rpcReq any, + shards []string) (map[*topo.TabletInfo]*sqltypes.Result, error) { switch action { case "show": - replStatus, err := wr.ShowWorkflow(ctx, workflow, keyspace) + replStatus, err := wr.ShowWorkflow(ctx, workflow, keyspace, shards) if err != nil { return nil, err } @@ -358,7 +361,7 @@ func (wr *Wrangler) WorkflowAction(ctx context.Context, workflow, keyspace, acti return nil, err default: } - results, err := wr.execWorkflowAction(ctx, workflow, keyspace, action, dryRun, rpcReq) + results, err := wr.execWorkflowAction(ctx, workflow, keyspace, action, dryRun, rpcReq, shards) if err != nil { return nil, err } @@ -390,7 +393,7 @@ func (wr *Wrangler) getWorkflowActionQuery(action string) (string, error) { // canRestartWorkflow validates that, for an atomic copy workflow, none of the streams are still in the copy phase. // Since we copy all tables in a single snapshot, we cannot restart a workflow which broke before all tables were copied. func (wr *Wrangler) canRestartWorkflow(ctx context.Context, workflow, keyspace string) error { - res, err := wr.ShowWorkflow(ctx, workflow, keyspace) + res, err := wr.ShowWorkflow(ctx, workflow, keyspace, nil) if err != nil { return err } @@ -409,7 +412,8 @@ func (wr *Wrangler) canRestartWorkflow(ctx context.Context, workflow, keyspace s return nil } -func (wr *Wrangler) execWorkflowAction(ctx context.Context, workflow, keyspace, action string, dryRun bool, rpcReq any) (map[*topo.TabletInfo]*querypb.QueryResult, error) { +func (wr *Wrangler) execWorkflowAction(ctx context.Context, workflow, keyspace, action string, dryRun bool, rpcReq any, + shards []string) (map[*topo.TabletInfo]*querypb.QueryResult, error) { var callback func(context.Context, *topo.TabletInfo) (*querypb.QueryResult, error) = nil query, err := wr.getWorkflowActionQuery(action) if err != nil { @@ -452,7 +456,7 @@ func (wr *Wrangler) execWorkflowAction(ctx context.Context, workflow, keyspace, wr.Logger().Printf("On the following tablets in the %s keyspace for workflow %s:\n", keyspace, workflow) vx := newVExec(ctx, workflow, keyspace, "", wr) - if err := vx.getPrimaries(); err != nil { + if err := vx.getPrimaries(shards); err != nil { return nil, err } tablets := vx.primaries @@ -475,13 +479,15 @@ func (wr *Wrangler) execWorkflowAction(ctx context.Context, workflow, keyspace, } } - return wr.runVexec(ctx, workflow, keyspace, query, callback, dryRun) + return wr.runVexec(ctx, workflow, keyspace, query, callback, dryRun, shards) } -// WorkflowTagAction sets or clears the tags for a workflow in a keyspace +// WorkflowTagAction sets or clears the tags for a workflow in a keyspace. func (wr *Wrangler) WorkflowTagAction(ctx context.Context, keyspace string, workflow string, tags string) (map[*topo.TabletInfo]*sqltypes.Result, error) { + // A WHERE clause with the correct workflow name is automatically added + // to the query later on in vexec.addDefaultWheres(). query := fmt.Sprintf("update _vt.vreplication set tags = %s", encodeString(tags)) - results, err := wr.runVexec(ctx, workflow, keyspace, query, nil, false) + results, err := wr.runVexec(ctx, workflow, keyspace, query, nil, false, nil) return wr.convertQueryResultToSQLTypesResult(results), err } @@ -582,7 +588,7 @@ type ReplicationStatus struct { deferSecondaryKeys bool } -func (wr *Wrangler) getReplicationStatusFromRow(ctx context.Context, row sqltypes.RowNamedValues, primary *topo.TabletInfo) (*ReplicationStatus, string, error) { +func (wr *Wrangler) getReplicationStatusFromRow(ctx context.Context, row sqltypes.RowNamedValues, copyStates []copyState, primary *topo.TabletInfo) (*ReplicationStatus, string, error) { var err error var id int32 var timeUpdated, transactionTimestamp, timeHeartbeat, timeThrottled int64 @@ -661,9 +667,6 @@ func (wr *Wrangler) getReplicationStatusFromRow(ctx context.Context, row sqltype workflowSubType, _ = row.ToInt32("workflow_sub_type") deferSecondaryKeys, _ = row.ToBool("defer_secondary_keys") rowsCopied = row.AsInt64("rows_copied", 0) - if err != nil { - return nil, "", err - } status := &ReplicationStatus{ Shard: primary.Shard, @@ -688,21 +691,18 @@ func (wr *Wrangler) getReplicationStatusFromRow(ctx context.Context, row sqltype deferSecondaryKeys: deferSecondaryKeys, RowsCopied: rowsCopied, } - status.CopyState, err = wr.getCopyState(ctx, primary, id) - if err != nil { - return nil, "", err - } + status.CopyState = copyStates status.State = updateState(message, binlogdatapb.VReplicationWorkflowState(binlogdatapb.VReplicationWorkflowState_value[state]), status.CopyState, timeUpdated) return status, bls.Keyspace, nil } -func (wr *Wrangler) getStreams(ctx context.Context, workflow, keyspace string) (*ReplicationStatusResult, error) { +func (wr *Wrangler) getStreams(ctx context.Context, workflow, keyspace string, shards []string) (*ReplicationStatusResult, error) { var rsr ReplicationStatusResult rsr.ShardStatuses = make(map[string]*ShardReplicationStatus) rsr.Workflow = workflow - var results map[*topo.TabletInfo]*querypb.QueryResult - query := `select + + const query = `select id, source, pos, @@ -722,7 +722,7 @@ func (wr *Wrangler) getStreams(ctx context.Context, workflow, keyspace string) ( defer_secondary_keys, rows_copied from _vt.vreplication` - results, err := wr.runVexec(ctx, workflow, keyspace, query, nil, false) + results, err := wr.runVexec(ctx, workflow, keyspace, query, nil, false, shards) if err != nil { return nil, err } @@ -739,8 +739,27 @@ func (wr *Wrangler) getStreams(ctx context.Context, workflow, keyspace string) ( if len(nqr.Rows) == 0 { continue } + // Get all copy states for the shard. + vreplIDs := make([]int64, len(nqr.Rows)) + for i, row := range nqr.Rows { + vreplID, err := row.ToInt64("id") + if err != nil { + return nil, err + } + vreplIDs[i] = vreplID + } + copyStatesByVReplID, err := wr.getCopyStates(ctx, primary, vreplIDs) + if err != nil { + return nil, err + } for _, row := range nqr.Rows { - status, sk, err := wr.getReplicationStatusFromRow(ctx, row, primary) + vreplID, err := row.ToInt64("id") + if err != nil { + return nil, err + } + + copyStates := copyStatesByVReplID[vreplID] + status, sk, err := wr.getReplicationStatusFromRow(ctx, row, copyStates, primary) if err != nil { return nil, err } @@ -761,7 +780,7 @@ func (wr *Wrangler) getStreams(ctx context.Context, workflow, keyspace string) ( // Note: this is done here only because golang does // not currently support setting json tags in proto // declarations so that I could request it always be - // ommitted from marshalled JSON output: + // omitted from marshalled JSON output: // https://github.com/golang/protobuf/issues/52 status.Bls.OnDdl = 0 } @@ -838,7 +857,7 @@ func (wr *Wrangler) ListAllWorkflows(ctx context.Context, keyspace string, activ where = " where state <> 'Stopped'" } query := "select distinct workflow from _vt.vreplication" + where - vx := vtctldvexec.NewVExec(keyspace, "", wr.ts, wr.tmc) + vx := vtctldvexec.NewVExec(keyspace, "", wr.ts, wr.tmc, wr.SQLParser()) results, err := vx.QueryContext(ctx, query) if err != nil { return nil, err @@ -861,8 +880,9 @@ func (wr *Wrangler) ListAllWorkflows(ctx context.Context, keyspace string, activ } // ShowWorkflow will return all of the relevant replication related information for the given workflow. -func (wr *Wrangler) ShowWorkflow(ctx context.Context, workflow, keyspace string) (*ReplicationStatusResult, error) { - replStatus, err := wr.getStreams(ctx, workflow, keyspace) +// If shardSubset is nil, then all shards will be queried. +func (wr *Wrangler) ShowWorkflow(ctx context.Context, workflow, keyspace string, shardSubset []string) (*ReplicationStatusResult, error) { + replStatus, err := wr.getStreams(ctx, workflow, keyspace, shardSubset) if err != nil { return nil, err } @@ -902,27 +922,41 @@ func (wr *Wrangler) printWorkflowList(keyspace string, workflows []string) { wr.Logger().Printf("Following workflow(s) found in keyspace %s: %v\n", keyspace, list) } -func (wr *Wrangler) getCopyState(ctx context.Context, tablet *topo.TabletInfo, id int32) ([]copyState, error) { - var cs []copyState - query := fmt.Sprintf("select table_name, lastpk from _vt.copy_state where vrepl_id = %d and id in (select max(id) from _vt.copy_state where vrepl_id = %d group by vrepl_id, table_name)", - id, id) - qr, err := wr.VReplicationExec(ctx, tablet.Alias, query) +func (wr *Wrangler) getCopyStates(ctx context.Context, tablet *topo.TabletInfo, ids []int64) (map[int64][]copyState, error) { + idsBV, err := sqltypes.BuildBindVariable(ids) + if err != nil { + return nil, err + } + query, err := sqlparser.ParseAndBind("select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in %a and id in (select max(id) from _vt.copy_state where vrepl_id in %a group by vrepl_id, table_name)", + idsBV, idsBV) + if err != nil { + return nil, err + } + qr, err := wr.tmc.VReplicationExec(ctx, tablet.Tablet, query) if err != nil { return nil, err } result := sqltypes.Proto3ToResult(qr) - if result != nil { - for _, row := range result.Rows { - // These fields are varbinary, but close enough - table := row[0].ToString() - lastPK := row[1].ToString() - copyState := copyState{ - Table: table, - LastPK: lastPK, - } - cs = append(cs, copyState) + if result == nil { + cs := make(map[int64][]copyState) + return cs, nil + } + + cs := make(map[int64][]copyState, len(result.Rows)) + for _, row := range result.Rows { + vreplID, err := row[0].ToInt64() + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "failed to cast vrepl_id to int64: %v", err) + } + // These fields are varbinary, but close enough + table := row[1].ToString() + lastPK := row[2].ToString() + copyState := copyState{ + Table: table, + LastPK: lastPK, } + cs[vreplID] = append(cs[vreplID], copyState) } return cs, nil diff --git a/go/vt/wrangler/vexec_plan.go b/go/vt/wrangler/vexec_plan.go index 5b68d9ada5f..76b2d0fe732 100644 --- a/go/vt/wrangler/vexec_plan.go +++ b/go/vt/wrangler/vexec_plan.go @@ -21,12 +21,13 @@ import ( "fmt" "strings" + "github.com/olekukonko/tablewriter" + "vitess.io/vitess/go/vt/log" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" - "github.com/olekukonko/tablewriter" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // vexecPlan contains the final query to be sent to the tablets @@ -83,7 +84,7 @@ func (p vreplicationPlanner) exec( return qr, nil } func (p vreplicationPlanner) dryRun(ctx context.Context) error { - rsr, err := p.vx.wr.getStreams(p.vx.ctx, p.vx.workflow, p.vx.keyspace) + rsr, err := p.vx.wr.getStreams(p.vx.ctx, p.vx.workflow, p.vx.keyspace, nil) if err != nil { return err } @@ -119,9 +120,9 @@ const ( func extractTableName(stmt sqlparser.Statement) (string, error) { switch stmt := stmt.(type) { case *sqlparser.Update: - return sqlparser.String(stmt.TableExprs), nil + return sqlparser.ToString(stmt.TableExprs), nil case *sqlparser.Delete: - return sqlparser.String(stmt.TableExprs), nil + return sqlparser.ToString(stmt.TableExprs), nil case *sqlparser.Insert: return sqlparser.String(stmt.Table), nil case *sqlparser.Select: @@ -247,19 +248,13 @@ func (vx *vexec) buildUpdatePlan(ctx context.Context, planner vexecPlanner, upd if updatableColumnNames := plannerParams.updatableColumnNames; len(updatableColumnNames) > 0 { // if updatableColumnNames is non empty, then we must only accept changes to columns listed there for _, expr := range upd.Exprs { - isUpdatable := false - for _, updatableColName := range updatableColumnNames { - if expr.Name.Name.EqualString(updatableColName) { - isUpdatable = true - } - } - if !isUpdatable { + if !expr.Name.Name.EqualsAnyString(updatableColumnNames) { return nil, fmt.Errorf("%+v cannot be changed: %v", expr.Name.Name, sqlparser.String(expr)) } } } if templates := plannerParams.updateTemplates; len(templates) > 0 { - match, err := sqlparser.QueryMatchesTemplates(vx.query, templates) + match, err := vx.wr.env.Parser().QueryMatchesTemplates(vx.query, templates) if err != nil { return nil, err } @@ -278,7 +273,7 @@ func (vx *vexec) buildUpdatePlan(ctx context.Context, planner vexecPlanner, upd }, nil } -// buildUpdatePlan builds a plan for a DELETE query +// buildDeletePlan builds a plan for a DELETE query func (vx *vexec) buildDeletePlan(ctx context.Context, planner vexecPlanner, del *sqlparser.Delete) (*vexecPlan, error) { if del.Targets != nil { return nil, fmt.Errorf("unsupported construct: %v", sqlparser.String(del)) @@ -311,7 +306,7 @@ func (vx *vexec) buildInsertPlan(ctx context.Context, planner vexecPlanner, ins return nil, fmt.Errorf("query not supported by vexec: %s", sqlparser.String(ins)) } if len(templates) > 0 { - match, err := sqlparser.QueryMatchesTemplates(vx.query, templates) + match, err := vx.wr.env.Parser().QueryMatchesTemplates(vx.query, templates) if err != nil { return nil, err } @@ -329,7 +324,7 @@ func (vx *vexec) buildInsertPlan(ctx context.Context, planner vexecPlanner, ins }, nil } -// buildUpdatePlan builds a plan for a SELECT query +// buildSelectPlan builds a plan for a SELECT query func (vx *vexec) buildSelectPlan(ctx context.Context, planner vexecPlanner, sel *sqlparser.Select) (*vexecPlan, error) { sel.Where = vx.addDefaultWheres(planner, sel.Where) buf := sqlparser.NewTrackedBuffer(nil) diff --git a/go/vt/wrangler/vexec_test.go b/go/vt/wrangler/vexec_test.go index ead2be6a56f..27efbe61a9f 100644 --- a/go/vt/wrangler/vexec_test.go +++ b/go/vt/wrangler/vexec_test.go @@ -18,6 +18,7 @@ package wrangler import ( "context" + _ "embed" "fmt" "regexp" "sort" @@ -33,6 +34,16 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vtenv" +) + +var ( + //go:embed testdata/show-all-shards.json + want_show_all_shards string + //go:embed testdata/show-dash80.json + want_show_dash_80 string + //go:embed testdata/show-80dash.json + want_show_80_dash string ) func TestVExec(t *testing.T) { @@ -41,13 +52,13 @@ func TestVExec(t *testing.T) { workflow := "wrWorkflow" keyspace := "target" query := "update _vt.vreplication set state = 'Running'" - env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, time.Now().Unix()) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, nil, time.Now().Unix()) defer env.close() var logger = logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc) + wr := New(vtenv.NewTestEnv(), logger, env.topoServ, env.tmc) vx := newVExec(ctx, workflow, keyspace, query, wr) - err := vx.getPrimaries() + err := vx.getPrimaries(nil) require.Nil(t, err) primaries := vx.primaries require.NotNil(t, primaries) @@ -78,7 +89,7 @@ func TestVExec(t *testing.T) { vx.plannedQuery = plan.parsedQuery.Query vx.exec() - res, err := wr.getStreams(ctx, workflow, keyspace) + res, err := wr.getStreams(ctx, workflow, keyspace, nil) require.NoError(t, err) require.Less(t, res.MaxVReplicationLag, int64(3 /*seconds*/)) // lag should be very small @@ -94,7 +105,7 @@ func TestVExec(t *testing.T) { result = sqltypes.MakeTestResult(sqltypes.MakeTestFields( "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys", "int64|varchar|varchar|varchar|varchar|int64|int64|int64"), - "1|keyspace:\"source\" shard:\"0\" filter:{rules:{match:\"t1\"}}||||0|0|0", + "1|keyspace:\"source\" shard:\"0\" filter:{rules:{match:\"t1\"} rules:{match:\"t2\"}}||||0|0|0", ) testCases = append(testCases, &TestCase{ name: "select", @@ -163,10 +174,12 @@ func TestVExec(t *testing.T) { | TABLET | ID | BINLOGSOURCE | STATE | DBNAME | CURRENT GTID | +----------------------+----+--------------------------------+---------+-----------+------------------------------------------+ | -80/zone1-0000000200 | 1 | keyspace:"source" shard:"0" | Copying | vt_target | 14b68925-696a-11ea-aee7-fec597a91f5e:1-3 | -| | | filter:{rules:{match:"t1"}} | | | | +| | | filter:{rules:{match:"t1"} | | | | +| | | rules:{match:"t2"}} | | | | +----------------------+----+--------------------------------+---------+-----------+------------------------------------------+ | 80-/zone1-0000000210 | 1 | keyspace:"source" shard:"0" | Copying | vt_target | 14b68925-696a-11ea-aee7-fec597a91f5e:1-3 | -| | | filter:{rules:{match:"t1"}} | | | | +| | | filter:{rules:{match:"t1"} | | | | +| | | rules:{match:"t2"}} | | | | +----------------------+----+--------------------------------+---------+-----------+------------------------------------------+`, } require.Equal(t, strings.Join(dryRunResults, "\n")+"\n\n\n\n\n", logger.String()) @@ -186,140 +199,52 @@ func TestWorkflowListStreams(t *testing.T) { defer cancel() workflow := "wrWorkflow" keyspace := "target" - env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, nil, 1234) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc) + wr := New(vtenv.NewTestEnv(), logger, env.topoServ, env.tmc) - _, err := wr.WorkflowAction(ctx, workflow, keyspace, "listall", false, nil) + _, err := wr.WorkflowAction(ctx, workflow, keyspace, "listall", false, nil, nil) require.NoError(t, err) - _, err = wr.WorkflowAction(ctx, workflow, "badks", "show", false, nil) + _, err = wr.WorkflowAction(ctx, workflow, "badks", "show", false, nil, nil) require.Errorf(t, err, "node doesn't exist: keyspaces/badks/shards") - _, err = wr.WorkflowAction(ctx, "badwf", keyspace, "show", false, nil) + _, err = wr.WorkflowAction(ctx, "badwf", keyspace, "show", false, nil, nil) require.Errorf(t, err, "no streams found for workflow badwf in keyspace target") logger.Clear() - _, err = wr.WorkflowAction(ctx, workflow, keyspace, "show", false, nil) - require.NoError(t, err) - want := `{ - "Workflow": "wrWorkflow", - "SourceLocation": { - "Keyspace": "source", - "Shards": [ - "0" - ] - }, - "TargetLocation": { - "Keyspace": "target", - "Shards": [ - "-80", - "80-" - ] - }, - "MaxVReplicationLag": 0, - "MaxVReplicationTransactionLag": 0, - "Frozen": false, - "ShardStatuses": { - "-80/zone1-0000000200": { - "PrimaryReplicationStatuses": [ - { - "Shard": "-80", - "Tablet": "zone1-0000000200", - "ID": 1, - "Bls": { - "keyspace": "source", - "shard": "0", - "filter": { - "rules": [ - { - "match": "t1" - } - ] - } - }, - "Pos": "14b68925-696a-11ea-aee7-fec597a91f5e:1-3", - "StopPos": "", - "State": "Copying", - "DBName": "vt_target", - "TransactionTimestamp": 0, - "TimeUpdated": 1234, - "TimeHeartbeat": 1234, - "TimeThrottled": 0, - "ComponentThrottled": "", - "Message": "", - "Tags": "", - "WorkflowType": "Materialize", - "WorkflowSubType": "None", - "CopyState": [ - { - "Table": "t1", - "LastPK": "pk1" - } - ], - "RowsCopied": 1000 - } - ], - "TabletControls": null, - "PrimaryIsServing": true - }, - "80-/zone1-0000000210": { - "PrimaryReplicationStatuses": [ - { - "Shard": "80-", - "Tablet": "zone1-0000000210", - "ID": 1, - "Bls": { - "keyspace": "source", - "shard": "0", - "filter": { - "rules": [ - { - "match": "t1" - } - ] - } - }, - "Pos": "14b68925-696a-11ea-aee7-fec597a91f5e:1-3", - "StopPos": "", - "State": "Copying", - "DBName": "vt_target", - "TransactionTimestamp": 0, - "TimeUpdated": 1234, - "TimeHeartbeat": 1234, - "TimeThrottled": 0, - "ComponentThrottled": "", - "Message": "", - "Tags": "", - "WorkflowType": "Materialize", - "WorkflowSubType": "None", - "CopyState": [ - { - "Table": "t1", - "LastPK": "pk1" - } - ], - "RowsCopied": 1000 - } - ], - "TabletControls": null, - "PrimaryIsServing": true - } - }, - "SourceTimeZone": "", - "TargetTimeZone": "" -} + var testCases = []struct { + shards []string + want string + }{ + {[]string{"-80", "80-"}, want_show_all_shards}, + {[]string{"-80"}, want_show_dash_80}, + {[]string{"80-"}, want_show_80_dash}, + } + scrub := func(s string) string { + s = strings.ReplaceAll(s, "\t", "") + s = strings.ReplaceAll(s, "\n", "") + s = strings.ReplaceAll(s, " ", "") + return s + } + for _, testCase := range testCases { + t.Run(fmt.Sprintf("%v", testCase.shards), func(t *testing.T) { + want := scrub(testCase.want) + _, err = wr.WorkflowAction(ctx, workflow, keyspace, "show", false, nil, testCase.shards) + require.NoError(t, err) + got := scrub(logger.String()) + // MaxVReplicationLag needs to be reset. This can't be determinable in this kind of a test because + // time.Now() is constantly shifting. + re := regexp.MustCompile(`"MaxVReplicationLag":\d+`) + got = re.ReplaceAllLiteralString(got, `"MaxVReplicationLag":0`) + re = regexp.MustCompile(`"MaxVReplicationTransactionLag":\d+`) + got = re.ReplaceAllLiteralString(got, `"MaxVReplicationTransactionLag":0`) + require.Equal(t, want, got) + logger.Clear() + }) + } -` - got := logger.String() - // MaxVReplicationLag needs to be reset. This can't be determinable in this kind of a test because time.Now() is constantly shifting. - re := regexp.MustCompile(`"MaxVReplicationLag": \d+`) - got = re.ReplaceAllLiteralString(got, `"MaxVReplicationLag": 0`) - re = regexp.MustCompile(`"MaxVReplicationTransactionLag": \d+`) - got = re.ReplaceAllLiteralString(got, `"MaxVReplicationTransactionLag": 0`) - require.Equal(t, want, got) - - results, err := wr.execWorkflowAction(ctx, workflow, keyspace, "stop", false, nil) + results, err := wr.execWorkflowAction(ctx, workflow, keyspace, "stop", false, nil, nil) require.Nil(t, err) // convert map to list and sort it for comparison @@ -333,7 +258,7 @@ func TestWorkflowListStreams(t *testing.T) { require.ElementsMatch(t, wantResults, gotResults) logger.Clear() - results, err = wr.execWorkflowAction(ctx, workflow, keyspace, "stop", true, nil) + results, err = wr.execWorkflowAction(ctx, workflow, keyspace, "stop", true, nil, nil) require.Nil(t, err) require.Equal(t, "map[]", fmt.Sprintf("%v", results)) dryRunResult := `Query: update _vt.vreplication set state = 'Stopped' where db_name = 'vt_target' and workflow = 'wrWorkflow' @@ -344,10 +269,12 @@ will be run on the following streams in keyspace target for workflow wrWorkflow: | TABLET | ID | BINLOGSOURCE | STATE | DBNAME | CURRENT GTID | +----------------------+----+--------------------------------+---------+-----------+------------------------------------------+ | -80/zone1-0000000200 | 1 | keyspace:"source" shard:"0" | Copying | vt_target | 14b68925-696a-11ea-aee7-fec597a91f5e:1-3 | -| | | filter:{rules:{match:"t1"}} | | | | +| | | filter:{rules:{match:"t1"} | | | | +| | | rules:{match:"t2"}} | | | | +----------------------+----+--------------------------------+---------+-----------+------------------------------------------+ | 80-/zone1-0000000210 | 1 | keyspace:"source" shard:"0" | Copying | vt_target | 14b68925-696a-11ea-aee7-fec597a91f5e:1-3 | -| | | filter:{rules:{match:"t1"}} | | | | +| | | filter:{rules:{match:"t1"} | | | | +| | | rules:{match:"t2"}} | | | | +----------------------+----+--------------------------------+---------+-----------+------------------------------------------+ @@ -362,10 +289,10 @@ func TestWorkflowListAll(t *testing.T) { defer cancel() keyspace := "target" workflow := "wrWorkflow" - env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, nil, 0) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc) + wr := New(vtenv.NewTestEnv(), logger, env.topoServ, env.tmc) workflows, err := wr.ListAllWorkflows(ctx, keyspace, true) require.Nil(t, err) @@ -383,10 +310,10 @@ func TestVExecValidations(t *testing.T) { workflow := "wf" keyspace := "ks" query := "" - env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, nil, 0) defer env.close() - wr := New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + wr := New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), env.topoServ, env.tmc) vx := newVExec(ctx, workflow, keyspace, query, wr) @@ -469,10 +396,10 @@ func TestWorkflowUpdate(t *testing.T) { defer cancel() workflow := "wrWorkflow" keyspace := "target" - env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) + env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, nil, 1234) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc) + wr := New(vtenv.NewTestEnv(), logger, env.topoServ, env.tmc) nullSlice := textutil.SimulatedNullStringSlice // Used to represent a non-provided value nullOnDDL := binlogdatapb.OnDDLAction(textutil.SimulatedNullInt) // Used to represent a non-provided value tests := []struct { @@ -528,7 +455,7 @@ func TestWorkflowUpdate(t *testing.T) { OnDdl: tcase.onDDL, } - _, err := wr.WorkflowAction(ctx, workflow, keyspace, "update", true, rpcReq) + _, err := wr.WorkflowAction(ctx, workflow, keyspace, "update", true, rpcReq, nil) if tcase.wantErr != "" { require.Error(t, err) require.Equal(t, err.Error(), tcase.wantErr) diff --git a/go/vt/wrangler/workflow.go b/go/vt/wrangler/workflow.go index d9dbcee7291..6862f5f4d3f 100644 --- a/go/vt/wrangler/workflow.go +++ b/go/vt/wrangler/workflow.go @@ -77,6 +77,11 @@ type VReplicationWorkflowParams struct { // MoveTables only NoRoutingRules bool + + // Only these shards will be expected to participate in the workflow. Expects user to know what they are doing + // and provide the correct set of shards associated with the workflow. This is for reducing latency for workflows + // that only use a small set of shards in a keyspace with a large number of shards. + ShardSubset []string } // VReplicationWorkflow stores various internal objects for a workflow @@ -101,6 +106,7 @@ func (vrw *VReplicationWorkflow) String() string { func (wr *Wrangler) NewVReplicationWorkflow(ctx context.Context, workflowType VReplicationWorkflowType, params *VReplicationWorkflowParams) (*VReplicationWorkflow, error) { + wr.WorkflowParams = params log.Infof("NewVReplicationWorkflow with params %+v", params) vrw := &VReplicationWorkflow{wr: wr, ctx: ctx, params: params, workflowType: workflowType} ts, ws, err := wr.getWorkflowState(ctx, params.TargetKeyspace, params.Workflow) @@ -187,6 +193,7 @@ func (vrw *VReplicationWorkflow) stateAsString(ws *workflow.State) string { // at the shard level, so reads are effectively switched on the // shard when writes are switched. if len(ws.ShardsAlreadySwitched) > 0 && len(ws.ShardsNotYetSwitched) > 0 { + sort.Strings(ws.ShardsAlreadySwitched) stateInfo = append(stateInfo, fmt.Sprintf("Reads partially switched, for shards: %s", strings.Join(ws.ShardsAlreadySwitched, ","))) stateInfo = append(stateInfo, fmt.Sprintf("Writes partially switched, for shards: %s", strings.Join(ws.ShardsAlreadySwitched, ","))) } else { @@ -253,12 +260,19 @@ func NewWorkflowError(tablet string, id int32, description string) *WorkflowErro return wfErr } +func (vrw *VReplicationWorkflow) IsPartialMigration() bool { + if vrw.ws == nil { + return false + } + return vrw.ws.IsPartialMigration +} + // GetStreamCount returns a count of total streams and of streams that have started processing -func (vrw *VReplicationWorkflow) GetStreamCount() (int64, int64, []*WorkflowError, error) { +func (vrw *VReplicationWorkflow) GetStreamCount(shards []string) (int64, int64, []*WorkflowError, error) { var err error var workflowErrors []*WorkflowError var total, started int64 - res, err := vrw.wr.ShowWorkflow(vrw.ctx, vrw.params.Workflow, vrw.params.TargetKeyspace) + res, err := vrw.wr.ShowWorkflow(vrw.ctx, vrw.params.Workflow, vrw.params.TargetKeyspace, shards) if err != nil { return 0, 0, nil, err } @@ -525,7 +539,7 @@ func (vrw *VReplicationWorkflow) canSwitch(keyspace, workflowName string) (reaso return "", nil } log.Infof("state:%s, direction %d, switched %t", vrw.CachedState(), vrw.params.Direction, ws.WritesSwitched) - result, err := vrw.wr.getStreams(vrw.ctx, workflowName, keyspace) + result, err := vrw.wr.getStreams(vrw.ctx, workflowName, keyspace, vrw.params.ShardSubset) if err != nil { return "", err } diff --git a/go/vt/wrangler/workflow_test.go b/go/vt/wrangler/workflow_test.go index be3589a3f58..4f508766330 100644 --- a/go/vt/wrangler/workflow_test.go +++ b/go/vt/wrangler/workflow_test.go @@ -108,19 +108,19 @@ func expectCanSwitchQueries(t *testing.T, tme *testMigraterEnv, keyspace, state "id|source|pos|stop_pos|max_replication_lag|state|db_name|time_updated|transaction_timestamp|time_heartbeat|time_throttled|component_throttled|message|tags", "int64|varchar|int64|int64|int64|varchar|varchar|int64|int64|int64|int64|varchar|varchar|varchar"), row) - copyStateResult := sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "table|lastpk", - "varchar|varchar"), - "t1|pk1", + copyStateResult := sqltypes.MakeTestResult( + sqltypes.MakeTestFields("vrepl_id|table|lastpk", "int64|varchar|varchar"), + "1|t1|pk1", + "1|t2|pk2", ) for _, db := range tme.dbTargetClients { db.addInvariant(streamExtInfoKs2, replicationResult) if state == "Copying" { - db.addInvariant(fmt.Sprintf(copyStateQuery, 1, 1), copyStateResult) + db.addInvariant(fmt.Sprintf(copyStateQuery, "1", "1"), copyStateResult) } else { - db.addInvariant(fmt.Sprintf(copyStateQuery, 1, 1), noResult) + db.addInvariant(fmt.Sprintf(copyStateQuery, "1", "1"), noResult) } } } @@ -378,6 +378,85 @@ func TestPartialMoveTables(t *testing.T) { require.Equal(t, WorkflowStateNotSwitched, wf.CurrentState()) } +// TestPartialMoveTablesShardSubset is a version of TestPartialMoveTables which uses the --shards option. +func TestPartialMoveTablesShardSubset(t *testing.T) { + ctx := context.Background() + shards := []string{"-40", "40-80", "80-c0", "c0-"} + shardsToMove := shards[0:2] + otherShards := shards[2:] + p := &VReplicationWorkflowParams{ + Workflow: "test", + WorkflowType: MoveTablesWorkflow, + SourceKeyspace: "ks1", + SourceShards: shardsToMove, // shard by shard + TargetShards: shardsToMove, // shard by shard + TargetKeyspace: "ks2", + Tables: "t1,t2", + Cells: "cell1,cell2", + TabletTypes: "REPLICA,RDONLY,PRIMARY", + Timeout: DefaultActionTimeout, + MaxAllowedTransactionLagSeconds: defaultMaxAllowedTransactionLagSeconds, + OnDDL: binlogdatapb.OnDDLAction_STOP.String(), + } + tme := newTestTablePartialMigrater(ctx, t, shards, shardsToMove, "select * %s") + defer tme.stopTablets(t) + + // Save some unrelated shard routing rules to be sure that + // they don't interfere in any way. + srr, err := tme.ts.GetShardRoutingRules(ctx) + require.NoError(t, err) + srr.Rules = append(srr.Rules, []*vschema.ShardRoutingRule{ + { + FromKeyspace: "wut", + Shard: "40-80", + ToKeyspace: "bloop", + }, + { + FromKeyspace: "haylo", + Shard: "-80", + ToKeyspace: "blarg", + }, + }...) + err = tme.ts.SaveShardRoutingRules(ctx, srr) + require.NoError(t, err) + + // Providing an incorrect shard should result in the workflow not being found. + p.ShardSubset = otherShards + wf, err := tme.wr.NewVReplicationWorkflow(ctx, MoveTablesWorkflow, p) + require.NoError(t, err) + require.Nil(t, wf.ts) + + p.ShardSubset = shardsToMove + wf, err = tme.wr.NewVReplicationWorkflow(ctx, MoveTablesWorkflow, p) + require.NoError(t, err) + require.NotNil(t, wf) + require.Equal(t, WorkflowStateNotSwitched, wf.CurrentState()) + require.True(t, wf.ts.isPartialMigration, "expected partial shard migration") + + srr, err = tme.ts.GetShardRoutingRules(ctx) + require.NoError(t, err) + srr.Rules = append(srr.Rules, &vschema.ShardRoutingRule{ + FromKeyspace: "ks2", + Shard: "80-", + ToKeyspace: "ks1", + }) + err = tme.ts.SaveShardRoutingRules(ctx, srr) + require.NoError(t, err) + + tme.expectNoPreviousJournals() + expectMoveTablesQueries(t, tme, p) + tme.expectNoPreviousJournals() + wf.params.ShardSubset = shardsToMove + require.NoError(t, testSwitchForward(t, wf)) + require.Equal(t, "Reads partially switched, for shards: -40,40-80. Writes partially switched, for shards: -40,40-80", wf.CurrentState()) + require.NoError(t, err) + + tme.expectNoPreviousJournals() + tme.expectNoPreviousReverseJournals() + require.NoError(t, testReverse(t, wf)) + require.Equal(t, WorkflowStateNotSwitched, wf.CurrentState()) +} + func validateRoutingRuleCount(ctx context.Context, t *testing.T, ts *topo.Server, cnt int) { rr, err := ts.GetRoutingRules(ctx) require.NoError(t, err) @@ -671,7 +750,7 @@ func expectReshardQueries(t *testing.T, tme *testShardMigraterEnv, params *VRepl dbclient.addInvariant("delete from _vt.vreplication where id in (1)", noResult) dbclient.addInvariant("delete from _vt.copy_state where vrepl_id in (1)", noResult) dbclient.addInvariant("delete from _vt.post_copy_action where vrepl_id in (1)", noResult) - dbclient.addInvariant("insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type)", &sqltypes.Result{InsertID: uint64(1)}) + dbclient.addInvariant("insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, options)", &sqltypes.Result{InsertID: uint64(1)}) dbclient.addInvariant("select id from _vt.vreplication where id = 1", resultid1) dbclient.addInvariant("select id from _vt.vreplication where id = 2", resultid2) dbclient.addInvariant("select * from _vt.vreplication where id = 1", runningResult(1)) @@ -739,7 +818,7 @@ func expectMoveTablesQueries(t *testing.T, tme *testMigraterEnv, params *VReplic dbclient.addInvariant("select id from _vt.vreplication where id = 2", resultid2) dbclient.addInvariant("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id in (1)", noResult) dbclient.addInvariant("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id in (2)", noResult) - dbclient.addInvariant("insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type)", &sqltypes.Result{InsertID: uint64(1)}) + dbclient.addInvariant("insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, options)", &sqltypes.Result{InsertID: uint64(1)}) dbclient.addInvariant("update _vt.vreplication set message = 'FROZEN'", noResult) dbclient.addInvariant("select 1 from _vt.vreplication where db_name='vt_ks2' and workflow='test' and message!='FROZEN'", noResult) dbclient.addInvariant("delete from _vt.vreplication where id in (1)", noResult) @@ -758,7 +837,7 @@ func expectMoveTablesQueries(t *testing.T, tme *testMigraterEnv, params *VReplic for _, dbclient := range tme.dbSourceClients { dbclient.addInvariant("select val from _vt.resharding_journal", noResult) dbclient.addInvariant("update _vt.vreplication set message = 'FROZEN'", noResult) - dbclient.addInvariant("insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type)", &sqltypes.Result{InsertID: uint64(1)}) + dbclient.addInvariant("insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, options)", &sqltypes.Result{InsertID: uint64(1)}) dbclient.addInvariant("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id in (1)", noResult) dbclient.addInvariant("update _vt.vreplication set state = 'Stopped', message = 'stopped for cutover' where id in (2)", noResult) dbclient.addInvariant("select id from _vt.vreplication where id = 1", resultid1) @@ -767,7 +846,7 @@ func expectMoveTablesQueries(t *testing.T, tme *testMigraterEnv, params *VReplic dbclient.addInvariant("delete from _vt.vreplication where id in (1)", noResult) dbclient.addInvariant("delete from _vt.copy_state where vrepl_id in (1)", noResult) dbclient.addInvariant("delete from _vt.post_copy_action where vrepl_id in (1)", noResult) - dbclient.addInvariant("insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type)", &sqltypes.Result{InsertID: uint64(1)}) + dbclient.addInvariant("insert into _vt.vreplication (workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, workflow_type, workflow_sub_type, options)", &sqltypes.Result{InsertID: uint64(1)}) dbclient.addInvariant("select * from _vt.vreplication where id = 1", runningResult(1)) dbclient.addInvariant("select * from _vt.vreplication where id = 2", runningResult(2)) dbclient.addInvariant("insert into _vt.resharding_journal", noResult) @@ -792,12 +871,14 @@ func expectMoveTablesQueries(t *testing.T, tme *testMigraterEnv, params *VReplic tme.dbSourceClients[0].addInvariant("select pos, state, message from _vt.vreplication where id=2", state) tme.dbSourceClients[1].addInvariant("select pos, state, message from _vt.vreplication where id=1", state) tme.dbSourceClients[1].addInvariant("select pos, state, message from _vt.vreplication where id=2", state) + tme.tmeDB.AddQuery("SET SESSION foreign_key_checks = OFF", &sqltypes.Result{}) tme.tmeDB.AddQuery("USE `vt_ks1`", noResult) tme.tmeDB.AddQuery("USE `vt_ks2`", noResult) tme.tmeDB.AddQuery("drop table `vt_ks1`.`t1`", noResult) tme.tmeDB.AddQuery("drop table `vt_ks1`.`t2`", noResult) tme.tmeDB.AddQuery("drop table `vt_ks2`.`t1`", noResult) tme.tmeDB.AddQuery("drop table `vt_ks2`.`t2`", noResult) + tme.tmeDB.AddQuery("SET SESSION foreign_key_checks = ON", &sqltypes.Result{}) tme.tmeDB.AddQuery("update _vt.vreplication set message='Picked source tablet: cell:\"cell1\" uid:10 ' where id=1", noResult) tme.tmeDB.AddQuery("lock tables `t1` read,`t2` read", &sqltypes.Result{}) tme.tmeDB.AddQuery("select distinct table_name from _vt.copy_state cs, _vt.vreplication vr where vr.id = cs.vrepl_id and vr.id = 1", noResult) diff --git a/go/vt/wrangler/wrangler.go b/go/vt/wrangler/wrangler.go index dbb046a36b3..ee18643cc78 100644 --- a/go/vt/wrangler/wrangler.go +++ b/go/vt/wrangler/wrangler.go @@ -25,8 +25,10 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/tmclient" vtctlservicepb "vitess.io/vitess/go/vt/proto/vtctlservice" @@ -47,6 +49,7 @@ var ( // Multiple go routines can use the same Wrangler at the same time, // provided they want to share the same logger / topo server / lock timeout. type Wrangler struct { + env *vtenv.Environment logger logutil.Logger ts *topo.Server tmc tmclient.TabletManagerClient @@ -56,16 +59,18 @@ type Wrangler struct { // DO NOT USE in production code. VExecFunc func(ctx context.Context, workflow, keyspace, query string, dryRun bool) (map[*topo.TabletInfo]*sqltypes.Result, error) // Limt the number of concurrent background goroutines if needed. - sem *semaphore.Weighted + sem *semaphore.Weighted + WorkflowParams *VReplicationWorkflowParams } // New creates a new Wrangler object. -func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient) *Wrangler { +func New(env *vtenv.Environment, logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient) *Wrangler { return &Wrangler{ + env: env, logger: logger, ts: ts, tmc: tmc, - vtctld: grpcvtctldserver.NewVtctldServer(ts), + vtctld: grpcvtctldserver.NewVtctldServer(env, ts), sourceTs: ts, } } @@ -74,6 +79,7 @@ func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClien // in production. func NewTestWrangler(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient) *Wrangler { return &Wrangler{ + env: vtenv.NewTestEnv(), logger: logger, ts: ts, tmc: tmc, @@ -109,3 +115,8 @@ func (wr *Wrangler) SetLogger(logger logutil.Logger) { func (wr *Wrangler) Logger() logutil.Logger { return wr.logger } + +// SQLParser returns the parser this wrangler is using. +func (wr *Wrangler) SQLParser() *sqlparser.Parser { + return wr.env.Parser() +} diff --git a/go/vt/wrangler/wrangler_env_test.go b/go/vt/wrangler/wrangler_env_test.go index 4dd5e342c35..2b174bee176 100644 --- a/go/vt/wrangler/wrangler_env_test.go +++ b/go/vt/wrangler/wrangler_env_test.go @@ -19,7 +19,7 @@ package wrangler import ( "context" "fmt" - "math/rand" + "math/rand/v2" "sync" "testing" @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" + "vitess.io/vitess/go/vt/vtenv" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconn" @@ -60,7 +61,7 @@ type testWranglerEnv struct { //---------------------------------------------- // testWranglerEnv -func newWranglerTestEnv(t testing.TB, ctx context.Context, sourceShards, targetShards []string, query string, positions map[string]string, timeUpdated int64) *testWranglerEnv { +func newWranglerTestEnv(t testing.TB, ctx context.Context, sourceShards, targetShards []string, positions map[string]string, timeUpdated int64) *testWranglerEnv { env := &testWranglerEnv{ workflow: "wrWorkflow", topoServ: memorytopo.NewServer(ctx, "zone1"), @@ -68,12 +69,12 @@ func newWranglerTestEnv(t testing.TB, ctx context.Context, sourceShards, targetS tabletType: topodatapb.TabletType_REPLICA, tmc: newTestWranglerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + env.wr = New(vtenv.NewTestEnv(), logutil.NewConsoleLogger(), env.topoServ, env.tmc) env.tmc.tablets = make(map[int]*testWranglerTablet) // Generate a unique dialer name. - dialerName := fmt.Sprintf("WranglerTest-%s-%d", t.Name(), rand.Intn(1000000000)) - tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { + dialerName := fmt.Sprintf("WranglerTest-%s-%d", t.Name(), rand.IntN(1000000000)) + tabletconn.RegisterDialer(dialerName, func(ctx context.Context, tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) { env.mu.Lock() defer env.mu.Unlock() if qs, ok := env.tmc.tablets[int(tablet.Alias.Uid)]; ok { @@ -106,10 +107,16 @@ func newWranglerTestEnv(t testing.TB, ctx context.Context, sourceShards, targetS Keyspace: "source", Shard: sourceShard, Filter: &binlogdatapb.Filter{ - Rules: []*binlogdatapb.Rule{{ - Match: "t1", - Filter: query, - }}, + Rules: []*binlogdatapb.Rule{ + { + Match: "t1", + Filter: "", + }, + { + Match: "t2", + Filter: "", + }, + }, }, } rows = append(rows, fmt.Sprintf("%d|%v||||0|0|0", j+1, bls)) @@ -163,13 +170,13 @@ func newWranglerTestEnv(t testing.TB, ctx context.Context, sourceShards, targetS ) env.tmc.setVRResults(primary.tablet, "select distinct workflow from _vt.vreplication where state != 'Stopped' and db_name = 'vt_target'", result) - result = sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "table|lastpk", - "varchar|varchar"), - "t1|pk1", + result = sqltypes.MakeTestResult( + sqltypes.MakeTestFields("vrepl_id|table|lastpk", "int64|varchar|varchar"), + "1|t1|pk1", + "1|t2|pk2", ) - env.tmc.setVRResults(primary.tablet, "select table_name, lastpk from _vt.copy_state where vrepl_id = 1 and id in (select max(id) from _vt.copy_state where vrepl_id = 1 group by vrepl_id, table_name)", result) + env.tmc.setVRResults(primary.tablet, "select vrepl_id, table_name, lastpk from _vt.copy_state where vrepl_id in (1) and id in (select max(id) from _vt.copy_state where vrepl_id in (1) group by vrepl_id, table_name)", result) env.tmc.setVRResults(primary.tablet, "select id, source, pos, stop_pos, max_replication_lag, state, db_name, time_updated, transaction_timestamp, time_heartbeat, time_throttled, component_throttled, message, tags from _vt.vreplication where db_name = 'vt_target' and workflow = 'bad'", &sqltypes.Result{}) diff --git a/go/vt/zkctl/zkconf.go b/go/vt/zkctl/zkconf.go index 15a912231ff..92be0eb492e 100644 --- a/go/vt/zkctl/zkconf.go +++ b/go/vt/zkctl/zkconf.go @@ -24,7 +24,6 @@ limitations under the License. package zkctl import ( - "bytes" "fmt" "os" "path" @@ -94,29 +93,27 @@ func (cnf *ZkConfig) MyidFile() string { } func (cnf *ZkConfig) WriteMyid() error { - return os.WriteFile(cnf.MyidFile(), []byte(fmt.Sprintf("%v", cnf.ServerId)), 0664) + return os.WriteFile(cnf.MyidFile(), []byte(fmt.Sprintf("%v", cnf.ServerId)), 0o664) } /* Search for first existing file in cnfFiles and subsitute in the right values. */ func MakeZooCfg(cnfFiles []string, cnf *ZkConfig, header string) (string, error) { - myTemplateSource := new(bytes.Buffer) + var myTemplateSource strings.Builder for _, line := range strings.Split(header, "\n") { - fmt.Fprintf(myTemplateSource, "## %v\n", strings.TrimSpace(line)) + fmt.Fprintf(&myTemplateSource, "## %v\n", strings.TrimSpace(line)) } - var dataErr error + for _, path := range cnfFiles { - data, dataErr := os.ReadFile(path) - if dataErr != nil { + data, err := os.ReadFile(path) + if err != nil { continue } + myTemplateSource.WriteString("## " + path + "\n") myTemplateSource.Write(data) } - if dataErr != nil { - return "", dataErr - } myTemplateSource.WriteString("\n") // in case `data` did not end with a newline for _, extra := range cnf.Extra { @@ -127,9 +124,9 @@ func MakeZooCfg(cnfFiles []string, cnf *ZkConfig, header string) (string, error) if err != nil { return "", err } - cnfData := new(bytes.Buffer) - err = myTemplate.Execute(cnfData, cnf) - if err != nil { + + var cnfData strings.Builder + if err := myTemplate.Execute(&cnfData, cnf); err != nil { return "", err } return cnfData.String(), nil @@ -161,8 +158,10 @@ func MakeZkConfigFromString(cmdLine string, myID uint32) *ZkConfig { } myID = myID % 1000 - zkServer := zkServerAddr{ServerId: uint32(serverID), ClientPort: 2181, - LeaderPort: 2888, ElectionPort: 3888} + zkServer := zkServerAddr{ + ServerId: uint32(serverID), ClientPort: 2181, + LeaderPort: 2888, ElectionPort: 3888, + } switch len(zkAddrParts) { case 4: zkServer.ClientPort, _ = strconv.Atoi(zkAddrParts[3]) diff --git a/go/vt/zkctl/zkctl.go b/go/vt/zkctl/zkctl.go index acb8dba6356..60102d1bbf9 100644 --- a/go/vt/zkctl/zkctl.go +++ b/go/vt/zkctl/zkctl.go @@ -33,6 +33,7 @@ import ( zookeeper "github.com/z-division/go-zookeeper/zk" + "vitess.io/vitess/go/syscallutil" "vitess.io/vitess/go/vt/env" "vitess.io/vitess/go/vt/log" ) @@ -137,13 +138,13 @@ func (zkd *Zkd) Shutdown() error { if err != nil { return err } - err = syscall.Kill(pid, syscall.SIGKILL) + err = syscallutil.Kill(pid, syscall.SIGKILL) if err != nil && err != syscall.ESRCH { return err } timeout := time.Now().Add(shutdownWaitTime) for time.Now().Before(timeout) { - if syscall.Kill(pid, syscall.SIGKILL) == syscall.ESRCH { + if syscallutil.Kill(pid, syscall.SIGKILL) == syscall.ESRCH { return nil } time.Sleep(time.Second) diff --git a/go/vt/zkctl/zkctl_test.go b/go/vt/zkctl/zkctl_test.go index e237c572eae..5e4c856b5a7 100644 --- a/go/vt/zkctl/zkctl_test.go +++ b/go/vt/zkctl/zkctl_test.go @@ -35,13 +35,16 @@ func TestLifeCycle(t *testing.T) { myID := 255 zkConf := MakeZkConfigFromString(config, uint32(myID)) - zkExtraConfLine := "tcpKeepAlive=true" - zkConf.Extra = []string{zkExtraConfLine} + tpcKeepAliveCfg := "tcpKeepAlive=true" + adminServerCfg := "admin.serverPort=8081" + zkConf.Extra = []string{tpcKeepAliveCfg, adminServerCfg} if zkObservedConf, err := MakeZooCfg([]string{zkConf.ConfigFile()}, zkConf, "header"); err != nil { t.Fatalf("MakeZooCfg err: %v", err) - } else if !strings.Contains(string(zkObservedConf), fmt.Sprintf("\n%s\n", zkExtraConfLine)) { - t.Fatalf("Expected zkExtraConfLine in zkObservedConf") + } else if !strings.Contains(zkObservedConf, fmt.Sprintf("\n%s\n", tpcKeepAliveCfg)) { + t.Fatalf("Expected tpcKeepAliveCfg in zkObservedConf") + } else if !strings.Contains(zkObservedConf, fmt.Sprintf("\n%s\n", adminServerCfg)) { + t.Fatalf("Expected adminServerCfg in zkObservedConf") } zkd := NewZkd(zkConf) diff --git a/go/vtbench/client.go b/go/vtbench/client.go index 1a6751a62db..3e3ef3c495d 100644 --- a/go/vtbench/client.go +++ b/go/vtbench/client.go @@ -137,7 +137,7 @@ func (c *grpcVttabletConn) connect(ctx context.Context, cp ConnParams) error { Keyspace: keyspace, } var err error - qs, err = tabletconn.GetDialer()(&tablet, true) + qs, err = tabletconn.GetDialer()(ctx, &tablet, true) if err != nil { return err } diff --git a/go/yaml2/yaml_test.go b/go/yaml2/yaml_test.go new file mode 100644 index 00000000000..6d6503711ca --- /dev/null +++ b/go/yaml2/yaml_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2024 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml2 + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestYamlVars(t *testing.T) { + type TestStruct struct { + StringField string `yaml:"stringfield"` + IntField int `yaml:"intfield"` + BoolField bool `yaml:"boolfield"` + Float64Field float64 `yaml:"float64field"` + } + + inputData := TestStruct{ + "tricky text to test text", + 32, + true, + 3.141, + } + + //testing Marshal + var marshalData []byte + var err error + t.Run("Marshal", func(t *testing.T) { + marshalData, err = Marshal(inputData) + assert.NoError(t, err) + require.EqualValues(t, `BoolField: true +Float64Field: 3.141 +IntField: 32 +StringField: tricky text to test text +`, string(marshalData)) + }) + + //testing Unmarshal + t.Run("Unmarshal", func(t *testing.T) { + var unmarshalData TestStruct + err = Unmarshal(marshalData, &unmarshalData) + assert.NoError(t, err) + assert.Equal(t, inputData, unmarshalData) + + unmarshalData.StringField = "changed text" + assert.NotEqual(t, inputData, unmarshalData) + }) +} diff --git a/java/client/pom.xml b/java/client/pom.xml index 60af9f72fde..9e0fa8fd2d1 100644 --- a/java/client/pom.xml +++ b/java/client/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 19.0.0-SNAPSHOT + 20.0.0-SNAPSHOT vitess-client diff --git a/java/example/pom.xml b/java/example/pom.xml index 7be8aa0cada..b0d013ac927 100644 --- a/java/example/pom.xml +++ b/java/example/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 19.0.0-SNAPSHOT + 20.0.0-SNAPSHOT vitess-example diff --git a/java/grpc-client/pom.xml b/java/grpc-client/pom.xml index cdd75855e74..20681619ca8 100644 --- a/java/grpc-client/pom.xml +++ b/java/grpc-client/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 19.0.0-SNAPSHOT + 20.0.0-SNAPSHOT vitess-grpc-client diff --git a/java/jdbc/pom.xml b/java/jdbc/pom.xml index 98a3f14ea0a..d8587f8e7b1 100644 --- a/java/jdbc/pom.xml +++ b/java/jdbc/pom.xml @@ -5,7 +5,7 @@ io.vitess vitess-parent - 19.0.0-SNAPSHOT + 20.0.0-SNAPSHOT vitess-jdbc diff --git a/java/pom.xml b/java/pom.xml index e424b304229..4737ea80a9c 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -11,7 +11,7 @@ io.vitessvitess-parent - 19.0.0-SNAPSHOT + 20.0.0-SNAPSHOTpomVitess Java Client libraries [Parent] @@ -69,7 +69,7 @@ 1.57.1 - 4.1.93.Final + 4.1.94.Final2.0.61.Final3.24.3 @@ -94,7 +94,7 @@ com.google.guava guava - 30.1.1-jre + 32.0.0-jre com.google.protobuf diff --git a/misc/git/hooks/golangci-lint b/misc/git/hooks/golangci-lint index 3ad4775271a..d152cb965a3 100755 --- a/misc/git/hooks/golangci-lint +++ b/misc/git/hooks/golangci-lint @@ -16,7 +16,7 @@ GOLANGCI_LINT=$(command -v golangci-lint >/dev/null 2>&1) if [ $? -eq 1 ]; then echo "Downloading golangci-lint..." - go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 + go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.56.2 fi gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '^go/.*\.go$') diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index b1f36261522..1e70275e8b5 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -176,6 +176,9 @@ message Rule { // such columns need to have special transofrmation of the data, from an integral format into a // string format. e.g. the value 0 needs to be converted to '0'. map convert_int_to_enum = 8; + + // ForceUniqueKey gives vtreamer a hint for `FORCE INDEX (...)` usage. + string force_unique_key = 9; } // Filter represents a list of ordered rules. The first @@ -349,6 +352,14 @@ message FieldEvent { repeated query.Field fields = 2; string keyspace = 3; string shard = 4; + // Are ENUM and SET field values already mapped to strings in the ROW + // events? This allows us to transition VTGate VStream consumers from + // the pre v20 behavior of having to do this mapping themselves to the + // v20+ behavior of not having to do this anymore and to expect string + // values directly. + // NOTE: because this is the use case, this is ONLY ever set today in + // vstreams managed by the vstreamManager. + bool enum_set_string_values = 25; } // ShardGtid contains the GTID position for one shard. diff --git a/proto/mysqlctl.proto b/proto/mysqlctl.proto index bc67cef07c1..7e5fe13b991 100644 --- a/proto/mysqlctl.proto +++ b/proto/mysqlctl.proto @@ -33,6 +33,7 @@ message StartResponse{} message ShutdownRequest{ bool wait_for_mysqld = 1; + vttime.Duration mysql_shutdown_timeout = 2; } message ShutdownResponse{} diff --git a/proto/query.proto b/proto/query.proto index 4d94fcb2c83..d4e99af7c7d 100644 --- a/proto/query.proto +++ b/proto/query.proto @@ -71,7 +71,7 @@ message EventToken { // Flags sent from the MySQL C API enum MySqlFlag { option allow_alias = true; - + EMPTY = 0; NOT_NULL_FLAG = 1; PRI_KEY_FLAG = 2; @@ -277,7 +277,7 @@ message ExecuteOptions { // query timeouts are shorter. // OLAP: DMLS not allowed, no limit on row count, timeouts // can be as high as desired. - // DBA: no limit on rowcount or timeout, all queries allowed + // DBA: no limit on rowcount or timeout, all queries allowed // but intended for long DMLs and DDLs. Workload workload = 6; @@ -317,8 +317,8 @@ message ExecuteOptions { Gen4CompareV3 = 6; V3Insert = 7; } - - // PlannerVersion specifies which planner to use. + + // PlannerVersion specifies which planner to use. // If DEFAULT is chosen, whatever vtgate was started with will be used PlannerVersion planner_version = 11; @@ -883,6 +883,9 @@ message RealtimeStats { // view_schema_changed is to provide list of views that have schema changes detected by the tablet. repeated string view_schema_changed = 8; + + // udfs_changed is used to signal that the UDFs have changed on the tablet. + bool udfs_changed = 9; } // AggregateStats contains information about the health of a group of @@ -989,6 +992,7 @@ enum SchemaTableType { VIEWS = 0; TABLES = 1; ALL = 2; + UDFS = 3; } // GetSchemaRequest is the payload to GetSchema @@ -998,8 +1002,16 @@ message GetSchemaRequest { repeated string table_names = 3; } +// UDFInfo represents the information about a UDF. +message UDFInfo { + string name = 1; + bool aggregating = 2; + Type return_type = 3; +} + // GetSchemaResponse is the returned value from GetSchema message GetSchemaResponse { - // this is for the schema definition for the requested tables. + repeated UDFInfo udfs = 1; + // this is for the schema definition for the requested tables and views. map table_definition = 2; -} \ No newline at end of file +} diff --git a/proto/replicationdata.proto b/proto/replicationdata.proto index 2f98e30576f..1a8b608f984 100644 --- a/proto/replicationdata.proto +++ b/proto/replicationdata.proto @@ -52,6 +52,14 @@ message Status { bool replication_lag_unknown = 24; } +// Configuration holds replication configuration information gathered from performance_schema and global variables. +message Configuration { + // HeartbeatInterval controls the heartbeat interval that the primary sends to the replica + double heartbeat_interval = 1; + // ReplicaNetTimeout specifies the number of seconds to wait for more data or a heartbeat signal from the source before the replica considers the connection broken + int32 replica_net_timeout = 2; +} + // StopReplicationStatus represents the replication status before calling StopReplication, and the replication status collected immediately after // calling StopReplication. message StopReplicationStatus { @@ -65,7 +73,7 @@ enum StopReplicationMode { IOTHREADONLY = 1; } -// PrimaryStatus is the replication status for a MySQL primary (returned by 'show master status'). +// PrimaryStatus is the replication status for a MySQL primary (returned by 'show binary log status'). message PrimaryStatus { string position = 1; string file_position = 2; @@ -94,4 +102,5 @@ message FullStatus { uint64 semi_sync_primary_timeout = 19; uint32 semi_sync_wait_for_replica_count = 20; bool super_read_only = 21; + replicationdata.Configuration replication_configuration = 22; } diff --git a/proto/tabletmanagerdata.proto b/proto/tabletmanagerdata.proto index fc9f6fa97b9..f853e2e4ea8 100644 --- a/proto/tabletmanagerdata.proto +++ b/proto/tabletmanagerdata.proto @@ -219,8 +219,10 @@ message ApplySchemaRequest { SchemaDefinition before_schema = 4; SchemaDefinition after_schema = 5; string sql_mode = 6; - // BatchSize indicates how many queries to apply together + // BatchSize indicates how many queries to apply together. int64 batch_size = 7; + // DisableForeignKeyChecks will result in setting foreign_key_checks to off before applying the schema. + bool disable_foreign_key_checks = 8; } message ApplySchemaResponse { @@ -259,12 +261,26 @@ message ExecuteFetchAsDbaRequest { uint64 max_rows = 3; bool disable_binlogs = 4; bool reload_schema = 5; + bool disable_foreign_key_checks = 6; } message ExecuteFetchAsDbaResponse { query.QueryResult result = 1; } +message ExecuteMultiFetchAsDbaRequest { + bytes sql = 1; + string db_name = 2; + uint64 max_rows = 3; + bool disable_binlogs = 4; + bool reload_schema = 5; + bool disable_foreign_key_checks = 6; +} + +message ExecuteMultiFetchAsDbaResponse { + repeated query.QueryResult results = 1; +} + message ExecuteFetchAsAllPrivsRequest { bytes query = 1; string db_name = 2; @@ -408,7 +424,7 @@ message DemotePrimaryResponse { //string deprecated_position = 1 [deprecated = true]; reserved 1; - // PrimaryStatus represents the response from calling `SHOW MASTER STATUS` on a primary that has been demoted. + // PrimaryStatus represents the response from calling `SHOW BINARY LOG STATUS` on a primary that has been demoted. replicationdata.PrimaryStatus primary_status = 2; } @@ -444,6 +460,7 @@ message SetReplicationSourceRequest { bool force_start_replication = 3; string wait_position = 4; bool semiSync = 5; + double heartbeat_interval = 6; } message SetReplicationSourceResponse { @@ -483,7 +500,7 @@ message PromoteReplicaResponse { // Backup / Restore related messages message BackupRequest { - int64 concurrency = 1; + int32 concurrency = 1; bool allow_primary = 2; // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty // then the backup becomes incremental and applies as of given position. @@ -535,6 +552,7 @@ message CreateVReplicationWorkflowRequest { bool auto_start = 9; // Should the workflow stop after the copy phase. bool stop_after_copy = 10; + string options = 11; } message CreateVReplicationWorkflowResponse { @@ -549,6 +567,26 @@ message DeleteVReplicationWorkflowResponse { query.QueryResult result = 1; } +message HasVReplicationWorkflowsRequest { +} + +message HasVReplicationWorkflowsResponse { + bool has = 1; +} + +message ReadVReplicationWorkflowsRequest { + repeated int32 include_ids = 1; + repeated string include_workflows = 2; + repeated binlogdata.VReplicationWorkflowState include_states = 3; + repeated string exclude_workflows = 4; + repeated binlogdata.VReplicationWorkflowState exclude_states = 5; + bool exclude_frozen = 6; +} + +message ReadVReplicationWorkflowsResponse { + repeated ReadVReplicationWorkflowResponse workflows = 1; +} + message ReadVReplicationWorkflowRequest { string workflow = 1; } @@ -580,6 +618,7 @@ message ReadVReplicationWorkflowResponse { string component_throttled = 14; } repeated Stream streams = 11; + string options = 12; } message VDiffRequest { @@ -609,17 +648,19 @@ message VDiffReportOptions { bool only_pks = 1; bool debug_query = 2; string format = 3; + int64 max_sample_rows = 4; } message VDiffCoreOptions { string tables = 1; bool auto_retry = 2; - int64 max_rows = 3; + int64 max_rows = 3; bool checksum = 4; int64 sample_pct = 5; int64 timeout_seconds = 6; int64 max_extra_rows_to_compare = 7; bool update_table_stats = 8; + int64 max_diff_seconds = 9; } message VDiffOptions { @@ -628,6 +669,12 @@ message VDiffOptions { VDiffReportOptions report_options = 3; } +// UpdateVReplicationWorkflowRequest is used to update an existing VReplication +// workflow. Note that the following fields MUST have an explicit value provided +// if you do NOT wish to update the existing value to the given type's ZeroValue: +// cells, tablet_types, on_ddl, and state. +// TODO: leverage the optional modifier for these fields rather than using SimulatedNull +// values: https://github.com/vitessio/vitess/issues/15627 message UpdateVReplicationWorkflowRequest { string workflow = 1; repeated string cells = 2; @@ -635,12 +682,32 @@ message UpdateVReplicationWorkflowRequest { TabletSelectionPreference tablet_selection_preference = 4; binlogdata.OnDDLAction on_ddl = 5; binlogdata.VReplicationWorkflowState state = 6; + reserved 7; // unused, was: repeated string shards } message UpdateVReplicationWorkflowResponse { query.QueryResult result = 1; } +// UpdateVReplicationWorkflowsRequest is used to update multiple existing VReplication +// workflows. Note that the following fields MUST have an explicit value provided +// if you do NOT wish to update the existing values to the given type's ZeroValue: +// state, message, and stop_position. +// TODO: leverage the optional modifier for these fields rather than using SimulatedNull +// values: https://github.com/vitessio/vitess/issues/15627 +message UpdateVReplicationWorkflowsRequest { + bool all_workflows = 1; + repeated string include_workflows = 2; + repeated string exclude_workflows = 3; + binlogdata.VReplicationWorkflowState state = 4; + string message = 5; + string stop_position = 6; +} + +message UpdateVReplicationWorkflowsResponse { + query.QueryResult result = 1; +} + message ResetSequencesRequest { repeated string tables = 1; } diff --git a/proto/tabletmanagerservice.proto b/proto/tabletmanagerservice.proto index 7492bdd7cca..862b4819563 100644 --- a/proto/tabletmanagerservice.proto +++ b/proto/tabletmanagerservice.proto @@ -76,6 +76,8 @@ service TabletManager { rpc ExecuteFetchAsDba(tabletmanagerdata.ExecuteFetchAsDbaRequest) returns (tabletmanagerdata.ExecuteFetchAsDbaResponse) {}; + rpc ExecuteMultiFetchAsDba(tabletmanagerdata.ExecuteMultiFetchAsDbaRequest) returns (tabletmanagerdata.ExecuteMultiFetchAsDbaResponse) {}; + rpc ExecuteFetchAsAllPrivs(tabletmanagerdata.ExecuteFetchAsAllPrivsRequest) returns (tabletmanagerdata.ExecuteFetchAsAllPrivsResponse) {}; rpc ExecuteFetchAsApp(tabletmanagerdata.ExecuteFetchAsAppRequest) returns (tabletmanagerdata.ExecuteFetchAsAppResponse) {}; @@ -116,10 +118,13 @@ service TabletManager { // VReplication API rpc CreateVReplicationWorkflow(tabletmanagerdata.CreateVReplicationWorkflowRequest) returns (tabletmanagerdata.CreateVReplicationWorkflowResponse) {}; rpc DeleteVReplicationWorkflow(tabletmanagerdata.DeleteVReplicationWorkflowRequest) returns(tabletmanagerdata.DeleteVReplicationWorkflowResponse) {}; + rpc HasVReplicationWorkflows(tabletmanagerdata.HasVReplicationWorkflowsRequest) returns(tabletmanagerdata.HasVReplicationWorkflowsResponse) {}; rpc ReadVReplicationWorkflow(tabletmanagerdata.ReadVReplicationWorkflowRequest) returns(tabletmanagerdata.ReadVReplicationWorkflowResponse) {}; + rpc ReadVReplicationWorkflows(tabletmanagerdata.ReadVReplicationWorkflowsRequest) returns(tabletmanagerdata.ReadVReplicationWorkflowsResponse) {}; rpc VReplicationExec(tabletmanagerdata.VReplicationExecRequest) returns(tabletmanagerdata.VReplicationExecResponse) {}; rpc VReplicationWaitForPos(tabletmanagerdata.VReplicationWaitForPosRequest) returns(tabletmanagerdata.VReplicationWaitForPosResponse) {}; rpc UpdateVReplicationWorkflow(tabletmanagerdata.UpdateVReplicationWorkflowRequest) returns(tabletmanagerdata.UpdateVReplicationWorkflowResponse) {}; + rpc UpdateVReplicationWorkflows(tabletmanagerdata.UpdateVReplicationWorkflowsRequest) returns(tabletmanagerdata.UpdateVReplicationWorkflowsResponse) {}; // VDiff API rpc VDiff(tabletmanagerdata.VDiffRequest) returns(tabletmanagerdata.VDiffResponse) {}; diff --git a/proto/topodata.proto b/proto/topodata.proto index c921f72dfa4..364095be0ee 100644 --- a/proto/topodata.proto +++ b/proto/topodata.proto @@ -267,22 +267,8 @@ message Keyspace { // OBSOLETE int32 split_shard_count = 3; reserved 3; - // ServedFrom indicates a relationship between a TabletType and the - // keyspace name that's serving it. - message ServedFrom { - // the tablet type (key for the map) - TabletType tablet_type = 1; - - // the cells to limit this to - repeated string cells = 2; - - // the keyspace name that's serving it - string keyspace = 3; - } - - // ServedFrom will redirect the appropriate traffic to - // another keyspace. - repeated ServedFrom served_froms = 4; + // OBSOLETE ServedFrom served_froms = 4; + reserved 4; // keyspace_type will determine how this keyspace is treated by // vtgate / vschema. Normal keyspaces are routable by @@ -417,16 +403,6 @@ message SrvKeyspace { // The partitions this keyspace is serving, per tablet type. repeated KeyspacePartition partitions = 1; - // ServedFrom indicates a relationship between a TabletType and the - // keyspace name that's serving it. - message ServedFrom { - // the tablet type - TabletType tablet_type = 1; - - // the keyspace name that's serving it - string keyspace = 2; - } - // copied from Keyspace // OBSOLETE string sharding_column_name = 2; reserved 2; @@ -434,7 +410,8 @@ message SrvKeyspace { // OBSOLETE KeyspaceIdType sharding_column_type = 3; reserved 3; - repeated ServedFrom served_from = 4; + // OBSOLETE repeated ServedFrom served_from = 4; + reserved 4; // OBSOLETE int32 split_shard_count = 5; reserved 5; diff --git a/proto/vschema.proto b/proto/vschema.proto index 067be686db5..dd327f863dc 100644 --- a/proto/vschema.proto +++ b/proto/vschema.proto @@ -54,6 +54,16 @@ message Keyspace { unmanaged = 2; managed = 3; } + + // multi_tenant_mode specifies that the keyspace is multi-tenant. Currently used during migrations with MoveTables. + MultiTenantSpec multi_tenant_spec = 6; +} + +message MultiTenantSpec { + // tenant_column is the name of the column that specifies the tenant id. + string tenant_id_column_name = 1; + // tenant_column_type is the type of the column that specifies the tenant id. + query.Type tenant_id_column_type = 2; } // Vindex is the vindex info for a Keyspace. @@ -127,6 +137,13 @@ message Column { string name = 1; query.Type type = 2; bool invisible = 3; + string default = 4; + string collation_name = 5; + int32 size = 6; + int32 scale = 7; + optional bool nullable = 8; + // values contains the list of values for an enum or set column. + repeated string values = 9; } // SrvVSchema is the roll-up of all the Keyspace schema for a cell. @@ -135,6 +152,7 @@ message SrvVSchema { map keyspaces = 1; RoutingRules routing_rules = 2; // table routing rules ShardRoutingRules shard_routing_rules = 3; + KeyspaceRoutingRules keyspace_routing_rules = 4; } // ShardRoutingRules specify the shard routing rules for the VSchema. @@ -142,9 +160,19 @@ message ShardRoutingRules { repeated ShardRoutingRule rules = 1; } -// RoutingRule specifies a routing rule. +// ShardRoutingRule specifies a routing rule. message ShardRoutingRule { string from_keyspace = 1; string to_keyspace = 2; string shard = 3; } + +message KeyspaceRoutingRules { + repeated KeyspaceRoutingRule rules = 1; +} + +message KeyspaceRoutingRule { + string from_keyspace = 1; + string to_keyspace = 2; +} + diff --git a/proto/vtadmin.proto b/proto/vtadmin.proto index 6a387466879..d6f1047fc1e 100644 --- a/proto/vtadmin.proto +++ b/proto/vtadmin.proto @@ -33,6 +33,17 @@ import "vtctldata.proto"; // VTAdmin is the Vitess Admin API service. It provides RPCs that operate on // across a range of Vitess clusters. service VTAdmin { + // ApplySchema applies a schema to a keyspace in the given cluster. + rpc ApplySchema(ApplySchemaRequest) returns (vtctldata.ApplySchemaResponse) {}; + // CancelSchemaMigration cancels one or all schema migrations in the given + // cluster, terminating any running ones as needed. + rpc CancelSchemaMigration(CancelSchemaMigrationRequest) returns (vtctldata.CancelSchemaMigrationResponse) {}; + // CleanupSchemaMigration marks a schema migration in the given cluster as + // ready for artifact cleanup. + rpc CleanupSchemaMigration(CleanupSchemaMigrationRequest) returns (vtctldata.CleanupSchemaMigrationResponse) {}; + // CompleteSchemaMigration completes one or all migrations in the given + // cluster executed with --postpone-completion. + rpc CompleteSchemaMigration(CompleteSchemaMigrationRequest) returns (vtctldata.CompleteSchemaMigrationResponse) {}; // CreateKeyspace creates a new keyspace in the given cluster. rpc CreateKeyspace(CreateKeyspaceRequest) returns (CreateKeyspaceResponse) {}; // CreateShard creates a new shard in the given cluster and keyspace. @@ -77,6 +88,13 @@ service VTAdmin { rpc GetSchema(GetSchemaRequest) returns (Schema) {}; // GetSchemas returns all schemas across the specified clusters. rpc GetSchemas(GetSchemasRequest) returns (GetSchemasResponse) {}; + // GetSchemaMigrations returns one or more online schema migrations for the + // set of keyspaces (or all keyspaces) in the given clusters, analagous to + // repeated executions of `SHOW VITESS_MIGRATIONS`. + // + // Different fields in the request message result in different behaviors. + // See the documentation on vtctldata.GetSchemaMigrationsRequest for details. + rpc GetSchemaMigrations(GetSchemaMigrationsRequest) returns (GetSchemaMigrationsResponse) {}; // GetShardReplicationPositions returns shard replication positions grouped // by cluster. rpc GetShardReplicationPositions(GetShardReplicationPositionsRequest) returns (GetShardReplicationPositionsResponse) {}; @@ -108,6 +126,9 @@ service VTAdmin { rpc GetWorkflow(GetWorkflowRequest) returns (Workflow) {}; // GetWorkflows returns the Workflows for all specified clusters. rpc GetWorkflows(GetWorkflowsRequest) returns (GetWorkflowsResponse) {}; + // LaunchSchemaMigration launches one or all migrations in the given + // cluster executed with --postpone-launch. + rpc LaunchSchemaMigration(LaunchSchemaMigrationRequest) returns (vtctldata.LaunchSchemaMigrationResponse) {}; // PingTablet checks that the specified tablet is awake and responding to // RPCs. This command can be blocked by other in-flight operations. rpc PingTablet(PingTabletRequest) returns (PingTabletResponse) {}; @@ -134,6 +155,9 @@ service VTAdmin { rpc ReloadSchemaShard(ReloadSchemaShardRequest) returns (ReloadSchemaShardResponse) {}; // RemoveKeyspaceCell removes the cell from the Cells list for all shards in the keyspace, and the SrvKeyspace for that keyspace in that cell. rpc RemoveKeyspaceCell(RemoveKeyspaceCellRequest) returns (RemoveKeyspaceCellResponse) {}; + // RetrySchemaMigration marks a given schema migration in the given cluster + // for retry. + rpc RetrySchemaMigration(RetrySchemaMigrationRequest) returns (vtctldata.RetrySchemaMigrationResponse) {}; // RunHealthCheck runs a healthcheck on the tablet. rpc RunHealthCheck(RunHealthCheckRequest) returns (RunHealthCheckResponse) {}; // SetReadOnly sets the tablet to read-only mode. @@ -249,6 +273,11 @@ message Schema { } } +message SchemaMigration { + Cluster cluster = 1; + vtctldata.SchemaMigration schema_migration = 2; +} + // Shard groups the vtctldata information about a shard record together with // the Vitess cluster it belongs to. message Shard { @@ -318,6 +347,26 @@ message Workflow { /* Request/Response types */ +message ApplySchemaRequest { + string cluster_id = 1; + vtctldata.ApplySchemaRequest request = 2; +} + +message CancelSchemaMigrationRequest { + string cluster_id = 1; + vtctldata.CancelSchemaMigrationRequest request = 2; +} + +message CleanupSchemaMigrationRequest { + string cluster_id = 1; + vtctldata.CleanupSchemaMigrationRequest request = 2; +} + +message CompleteSchemaMigrationRequest { + string cluster_id = 1; + vtctldata.CompleteSchemaMigrationRequest request = 2; +} + message CreateKeyspaceRequest { string cluster_id = 1; vtctldata.CreateKeyspaceRequest options = 2; @@ -472,6 +521,19 @@ message GetSchemasResponse { repeated Schema schemas = 1; } +message GetSchemaMigrationsRequest { + repeated ClusterRequest cluster_requests = 1; + + message ClusterRequest { + string cluster_id = 1; + vtctldata.GetSchemaMigrationsRequest request = 2; + } +} + +message GetSchemaMigrationsResponse { + repeated SchemaMigration schema_migrations = 1; +} + message GetShardReplicationPositionsRequest { repeated string cluster_ids = 1; // Keyspaces, if set, limits replication positions to just the specified @@ -605,6 +667,11 @@ message GetWorkflowsResponse { map workflows_by_cluster = 1; } +message LaunchSchemaMigrationRequest { + string cluster_id = 1; + vtctldata.LaunchSchemaMigrationRequest request = 2; +} + message PingTabletRequest { // Unique (per cluster) tablet alias of the standard form: "$cell-$uid" topodata.TabletAlias alias = 1; @@ -689,7 +756,7 @@ message ReloadSchemasRequest { // // In Tablets mode, Concurrency is the number of tablets to reload at once // *per cluster*. - uint32 concurrency = 5; + int32 concurrency = 5; // WaitPosition is the replication position that replicating tablets should // reach prior to reloading their schemas. // @@ -757,7 +824,7 @@ message ReloadSchemaShardRequest { string wait_position = 4; bool include_primary = 5; - uint32 concurrency = 6; + int32 concurrency = 6; } message ReloadSchemaShardResponse { @@ -788,6 +855,11 @@ message RemoveKeyspaceCellResponse { string status = 1; } +message RetrySchemaMigrationRequest { + string cluster_id = 1; + vtctldata.RetrySchemaMigrationRequest request = 2; +} + message RunHealthCheckRequest { topodata.TabletAlias alias = 1; repeated string cluster_ids = 2; diff --git a/proto/vtctldata.proto b/proto/vtctldata.proto index 3d59ea1bd5e..4e59384f2e2 100644 --- a/proto/vtctldata.proto +++ b/proto/vtctldata.proto @@ -97,6 +97,7 @@ message MaterializeSettings { bool defer_secondary_keys = 14; tabletmanagerdata.TabletSelectionPreference tablet_selection_preference = 15; bool atomic_copy = 16; + WorkflowOptions workflow_options = 17; } /* Data types for VtctldServer */ @@ -205,6 +206,16 @@ message Shard { topodata.Shard shard = 3; } +message WorkflowOptions { + string tenant_id = 1; + // Remove auto_increment clauses on tables when moving them to a sharded + // keyspace. + bool strip_sharded_auto_increment = 2; + // Shards on which vreplication streams in the target keyspace are created for this workflow and to which the data + // from the source will be vreplicated. + repeated string shards = 3; +} + // TODO: comment the hell out of this. message Workflow { string name = 1; @@ -222,6 +233,9 @@ message Workflow { int64 max_v_replication_transaction_lag = 8; // This specifies whether to defer the creation of secondary keys. bool defer_secondary_keys = 9; + // These are additional (optional) settings for vreplication workflows. Previously we used to add it to the + // binlogdata.BinlogSource proto object. More details in go/vt/sidecardb/schema/vreplication.sql. + WorkflowOptions options = 10; message ReplicationLocation { string keyspace = 1; @@ -260,10 +274,14 @@ message Workflow { repeated string tags = 15; int64 rows_copied = 16; ThrottlerStatus throttler_status = 17; + repeated topodata.TabletType tablet_types = 18; + tabletmanagerdata.TabletSelectionPreference tablet_selection_preference = 19; + repeated string cells = 20; message CopyState { string table = 1; string last_pk = 2; + int64 stream_id = 3; } message Log { @@ -303,6 +321,24 @@ message AddCellsAliasRequest { message AddCellsAliasResponse { } + +message ApplyKeyspaceRoutingRulesRequest { + vschema.KeyspaceRoutingRules keyspace_routing_rules = 1; + // SkipRebuild, if set, will cause ApplyKeyspaceRoutingRules to skip rebuilding the + // SrvVSchema objects in each cell in RebuildCells. + bool skip_rebuild = 2; + // RebuildCells limits the SrvVSchema rebuild to the specified cells. If not + // provided the SrvVSchema will be rebuilt in every cell in the topology. + // + // Ignored if SkipRebuild is set. + repeated string rebuild_cells = 3; +} + +message ApplyKeyspaceRoutingRulesResponse { + // KeyspaceRoutingRules returns the current set of rules. + vschema.KeyspaceRoutingRules keyspace_routing_rules = 1; +} + message ApplyRoutingRulesRequest { vschema.RoutingRules routing_rules = 1; // SkipRebuild, if set, will cause ApplyRoutingRules to skip rebuilding the @@ -333,6 +369,8 @@ message ApplyShardRoutingRulesRequest { message ApplyShardRoutingRulesResponse { } + + message ApplySchemaRequest { string keyspace = 1; reserved 2; @@ -370,10 +408,25 @@ message ApplyVSchemaRequest { repeated string cells = 4; vschema.Keyspace v_schema = 5; string sql = 6; + // Strict returns an error if there are unknown vindex params. + bool strict = 7; } message ApplyVSchemaResponse { vschema.Keyspace v_schema = 1; + // UnknownVindexParams is a map of vindex name to params that were not recognized by the vindex + // type. E.g.: + // + // { + // "lookup_vdx": { + // "params": ["raed_lock", "not_verify"] + // } + // } + map unknown_vindex_params = 2; + + message ParamList { + repeated string params = 1; + } } message BackupRequest { @@ -385,7 +438,7 @@ message BackupRequest { bool allow_primary = 2; // Concurrency specifies the number of compression/checksum jobs to run // simultaneously. - uint64 concurrency = 3; + int32 concurrency = 3; // IncrementalFromPos indicates a position of a previous backup. When this value is non-empty // then the backup becomes incremental and applies as of given position. string incremental_from_pos = 4; @@ -410,7 +463,7 @@ message BackupShardRequest { bool allow_primary = 3; // Concurrency specifies the number of compression/checksum jobs to run // simultaneously. - uint64 concurrency = 4; + int32 concurrency = 4; // UpgradeSafe indicates if the backup should be taken with innodb_fast_shutdown=0 // so that it's a backup that can be used for an upgrade. bool upgrade_safe = 5; @@ -449,7 +502,6 @@ message CleanupSchemaMigrationResponse { map rows_affected_by_shard = 1; } - message CompleteSchemaMigrationRequest { string keyspace = 1; string uuid = 2; @@ -473,9 +525,8 @@ message CreateKeyspaceRequest { // OBSOLETE topodata.KeyspaceIdType sharding_column_type = 5; reserved 5; - // ServedFroms specifies a set of db_type:keyspace pairs used to serve - // traffic for the keyspace. - repeated topodata.Keyspace.ServedFrom served_froms = 6; + // OBSOLETE: repeated topodata.Keyspace.ServedFrom served_froms = 6; + reserved 6; // Type is the type of the keyspace to create. topodata.KeyspaceType type = 7; @@ -675,6 +726,29 @@ message ExecuteHookResponse { tabletmanagerdata.ExecuteHookResponse hook_result = 1; } +message ExecuteMultiFetchAsDBARequest { + topodata.TabletAlias tablet_alias = 1; + // SQL could have potentially multiple queries separated by semicolons. + string sql = 2; + // MaxRows is an optional parameter to limit the number of rows read into the + // QueryResult. Note that this does not apply a LIMIT to a query, just how + // many rows are read from the MySQL server on the tablet side. + // + // This field is optional. Specifying a non-positive value will use whatever + // default is configured in the VtctldService. + int64 max_rows = 3; + // DisableBinlogs instructs the tablet not to use binary logging when + // executing the query. + bool disable_binlogs = 4; + // ReloadSchema instructs the tablet to reload its schema after executing the + // query. + bool reload_schema = 5; +} + +message ExecuteMultiFetchAsDBAResponse { + repeated query.QueryResult results = 1; +} + message FindAllShardsInKeyspaceRequest { string keyspace = 1; } @@ -683,6 +757,15 @@ message FindAllShardsInKeyspaceResponse { map shards = 1; } +message ForceCutOverSchemaMigrationRequest { + string keyspace = 1; + string uuid = 2; +} + +message ForceCutOverSchemaMigrationResponse { + map rows_affected_by_shard = 1; +} + message GetBackupsRequest { string keyspace = 1; string shard = 2; @@ -759,6 +842,13 @@ message GetPermissionsResponse { tabletmanagerdata.Permissions permissions = 1; } +message GetKeyspaceRoutingRulesRequest { +} + +message GetKeyspaceRoutingRulesResponse { + vschema.KeyspaceRoutingRules keyspace_routing_rules = 1; +} + message GetRoutingRulesRequest { } @@ -827,6 +917,18 @@ message GetSchemaMigrationsResponse { repeated SchemaMigration migrations = 1; } +message GetShardReplicationRequest { + string keyspace = 1; + string shard = 2; + // Cells is the list of cells to fetch data for. Omit to fetch data from all + // cells. + repeated string cells = 3; +} + +message GetShardReplicationResponse { + map shard_replication_by_cell = 1; +} + message GetShardRequest { string keyspace = 1; string shard_name = 2; @@ -985,6 +1087,7 @@ message GetWorkflowsRequest { // If you only want a specific workflow then set this field. string workflow = 4; bool include_logs = 5; + repeated string shards = 6; } message GetWorkflowsResponse { @@ -1155,6 +1258,7 @@ message MoveTablesCreateRequest { bool no_routing_rules = 18; // Run a single copy phase for the entire database. bool atomic_copy = 19; + WorkflowOptions workflow_options = 20; } message MoveTablesCreateResponse { @@ -1174,6 +1278,7 @@ message MoveTablesCompleteRequest { bool keep_routing_rules = 5; bool rename_tables = 6; bool dry_run = 7; + repeated string shards = 8; } message MoveTablesCompleteResponse { @@ -1211,6 +1316,10 @@ message PlannedReparentShardRequest { // WaitReplicasTimeout time to catch up before the reparent, and an additional // WaitReplicasTimeout time to catch up after the reparent. vttime.Duration wait_replicas_timeout = 5; + // TolerableReplicationLag is the amount of replication lag that is considered + // acceptable for a tablet to be eligible for promotion when Vitess makes the choice of a new primary. + // A value of 0 indicates that Vitess shouldn't consider the replication lag at all. + vttime.Duration tolerable_replication_lag = 6; } message PlannedReparentShardResponse { @@ -1279,7 +1388,7 @@ message ReloadSchemaKeyspaceRequest { // Concurrency is the global concurrency across all shards in the keyspace // (so, at most this many tablets will be reloaded across the keyspace at any // given point). - uint32 concurrency = 4; + int32 concurrency = 4; } message ReloadSchemaKeyspaceResponse { @@ -1292,7 +1401,7 @@ message ReloadSchemaShardRequest { string wait_position = 3; bool include_primary = 4; // Concurrency is the maximum number of tablets to reload at one time. - uint32 concurrency = 5; + int32 concurrency = 5; } message ReloadSchemaShardResponse { @@ -1429,19 +1538,6 @@ message SetKeyspaceDurabilityPolicyResponse { topodata.Keyspace keyspace = 1; } -message SetKeyspaceServedFromRequest { - string keyspace = 1; - topodata.TabletType tablet_type = 2; - repeated string cells = 3; - bool remove = 4; - string source_keyspace = 5; -} - -message SetKeyspaceServedFromResponse { - // Keyspace is the updated keyspace record. - topodata.Keyspace keyspace = 1; -} - message SetKeyspaceShardingInfoRequest { string keyspace = 1; // OBSOLETE string column_name = 2; @@ -1725,6 +1821,8 @@ message VDiffCreateRequest { vttime.Duration wait_update_interval = 16; bool auto_retry = 17; bool verbose = 18; + int64 max_report_sample_rows = 19; + vttime.Duration max_diff_duration = 20; } message VDiffCreateResponse { @@ -1778,6 +1876,7 @@ message WorkflowDeleteRequest { string workflow = 2; bool keep_data = 3; bool keep_routing_rules = 4; + repeated string shards = 5; } message WorkflowDeleteResponse { @@ -1793,6 +1892,7 @@ message WorkflowDeleteResponse { message WorkflowStatusRequest { string keyspace = 1; string workflow = 2; + repeated string shards = 3; } message WorkflowStatusResponse { @@ -1832,6 +1932,7 @@ message WorkflowSwitchTrafficRequest { vttime.Duration timeout = 8; bool dry_run = 9; bool initialize_target_sequences = 10; + repeated string shards = 11; } message WorkflowSwitchTrafficResponse { diff --git a/proto/vtctlservice.proto b/proto/vtctlservice.proto index 59c24dc8445..8abc37dab80 100644 --- a/proto/vtctlservice.proto +++ b/proto/vtctlservice.proto @@ -45,6 +45,8 @@ service Vtctld { rpc ApplyRoutingRules(vtctldata.ApplyRoutingRulesRequest) returns (vtctldata.ApplyRoutingRulesResponse) {}; // ApplySchema applies a schema to a keyspace. rpc ApplySchema(vtctldata.ApplySchemaRequest) returns (vtctldata.ApplySchemaResponse) {}; + // ApplyKeyspaceRoutingRules applies the VSchema keyspace routing rules. + rpc ApplyKeyspaceRoutingRules(vtctldata.ApplyKeyspaceRoutingRulesRequest) returns (vtctldata.ApplyKeyspaceRoutingRulesResponse) {}; // ApplyShardRoutingRules applies the VSchema shard routing rules. rpc ApplyShardRoutingRules(vtctldata.ApplyShardRoutingRulesRequest) returns (vtctldata.ApplyShardRoutingRulesResponse) {}; // ApplyVSchema applies a vschema to a keyspace. @@ -54,7 +56,7 @@ service Vtctld { rpc Backup(vtctldata.BackupRequest) returns (stream vtctldata.BackupResponse) {}; // BackupShard chooses a tablet in the shard and uses it to create a backup. rpc BackupShard(vtctldata.BackupShardRequest) returns (stream vtctldata.BackupResponse) {}; - // CancelSchemaMigration cancels one or all migrations, terminating any runnign ones as needed. + // CancelSchemaMigration cancels one or all migrations, terminating any running ones as needed. rpc CancelSchemaMigration(vtctldata.CancelSchemaMigrationRequest) returns (vtctldata.CancelSchemaMigrationResponse) {}; // ChangeTabletType changes the db type for the specified tablet, if possible. // This is used primarily to arrange replicas, and it will not convert a @@ -100,9 +102,13 @@ service Vtctld { rpc ExecuteFetchAsDBA(vtctldata.ExecuteFetchAsDBARequest) returns (vtctldata.ExecuteFetchAsDBAResponse) {}; // ExecuteHook runs the hook on the tablet. rpc ExecuteHook(vtctldata.ExecuteHookRequest) returns (vtctldata.ExecuteHookResponse); + // ExecuteMultiFetchAsDBA executes one or more SQL queries on the remote tablet as the DBA user. + rpc ExecuteMultiFetchAsDBA(vtctldata.ExecuteMultiFetchAsDBARequest) returns (vtctldata.ExecuteMultiFetchAsDBAResponse) {}; // FindAllShardsInKeyspace returns a map of shard names to shard references // for a given keyspace. rpc FindAllShardsInKeyspace(vtctldata.FindAllShardsInKeyspaceRequest) returns (vtctldata.FindAllShardsInKeyspaceResponse) {}; + // ForceCutOverSchemaMigration marks a schema migration for forced cut-over. + rpc ForceCutOverSchemaMigration(vtctldata.ForceCutOverSchemaMigrationRequest) returns (vtctldata.ForceCutOverSchemaMigrationResponse) {}; // GetBackups returns all the backups for a shard. rpc GetBackups(vtctldata.GetBackupsRequest) returns (vtctldata.GetBackupsResponse) {}; // GetCellInfo returns the information for a cell. @@ -119,6 +125,8 @@ service Vtctld { rpc GetKeyspace(vtctldata.GetKeyspaceRequest) returns (vtctldata.GetKeyspaceResponse) {}; // GetKeyspaces returns the keyspace struct of all keyspaces in the topo. rpc GetKeyspaces(vtctldata.GetKeyspacesRequest) returns (vtctldata.GetKeyspacesResponse) {}; + // GetKeyspaceRoutingRules returns the VSchema keyspace routing rules. + rpc GetKeyspaceRoutingRules(vtctldata.GetKeyspaceRoutingRulesRequest) returns (vtctldata.GetKeyspaceRoutingRulesResponse) {}; // GetPermissions returns the permissions set on the remote tablet. rpc GetPermissions(vtctldata.GetPermissionsRequest) returns (vtctldata.GetPermissionsResponse) {}; // GetRoutingRules returns the VSchema routing rules. @@ -132,6 +140,8 @@ service Vtctld { // Different fields in the request message result in different filtering // behaviors. See the documentation on GetSchemaMigrationsRequest for details. rpc GetSchemaMigrations(vtctldata.GetSchemaMigrationsRequest) returns (vtctldata.GetSchemaMigrationsResponse) {}; + // GetShardReplication returns the replication graph for a shard in a cell. + rpc GetShardReplication(vtctldata.GetShardReplicationRequest) returns (vtctldata.GetShardReplicationResponse) {}; // GetShard returns information about a shard in the topology. rpc GetShard(vtctldata.GetShardRequest) returns (vtctldata.GetShardResponse) {}; // GetShardRoutingRules returns the VSchema shard routing rules. diff --git a/proto/vttest.proto b/proto/vttest.proto index 3b3413979d4..b48107044d7 100644 --- a/proto/vttest.proto +++ b/proto/vttest.proto @@ -74,9 +74,9 @@ message Keyspace { // OBSOLETE string sharding_column_type = 4; reserved 4; - // redirects all traffic to another keyspace. If set, shards is ignored. - string served_from = 5; - + // OBSOLETE string served_from = 5; + reserved 5; + // number of replica tablets to instantiate. This includes the primary tablet. int32 replica_count = 6; diff --git a/resources/bin/gh-ost b/resources/bin/gh-ost deleted file mode 100644 index 24d63c753fc..00000000000 Binary files a/resources/bin/gh-ost and /dev/null differ diff --git a/resources/embed.go b/resources/embed.go deleted file mode 100644 index 5b9ed344664..00000000000 --- a/resources/embed.go +++ /dev/null @@ -1,6 +0,0 @@ -package resources - -import _ "embed" - -//go:embed bin/gh-ost -var GhostBinary []byte diff --git a/test.go b/test.go index c7594557161..101f2dc01bf 100755 --- a/test.go +++ b/test.go @@ -76,8 +76,8 @@ For example: // Flags var ( - flavor = flag.String("flavor", "mysql57", "comma-separated bootstrap flavor(s) to run against (when using Docker mode). Available flavors: all,"+flavors) - bootstrapVersion = flag.String("bootstrap-version", "24", "the version identifier to use for the docker images") + flavor = flag.String("flavor", "mysql80", "comma-separated bootstrap flavor(s) to run against (when using Docker mode). Available flavors: all,"+flavors) + bootstrapVersion = flag.String("bootstrap-version", "32", "the version identifier to use for the docker images") runCount = flag.Int("runs", 1, "run each test this many times") retryMax = flag.Int("retry", 3, "max number of retries, to detect flaky tests") logPass = flag.Bool("log-pass", false, "log test output even if it passes") diff --git a/test/README.md b/test/README.md index 5fd5fadbedb..6579245ef45 100644 --- a/test/README.md +++ b/test/README.md @@ -1,4 +1,4 @@ -##Github CI Workflows +## Github CI Workflows This document has a short outline of how tests are run in CI, how to add new tests and where these are configured. diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 5a3031d7307..f1457f1be66 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -55,11 +55,7 @@ const ( // to be used. clusterTestTemplate = "templates/cluster_endtoend_test%s.tpl" - unitTestSelfHostedTemplate = "templates/unit_test_self_hosted.tpl" - unitTestSelfHostedDatabases = "" - dockerFileTemplate = "templates/dockerfile.tpl" - clusterTestSelfHostedTemplate = "templates/cluster_endtoend_test_self_hosted.tpl" - clusterTestDockerTemplate = "templates/cluster_endtoend_test_docker.tpl" + clusterTestDockerTemplate = "templates/cluster_endtoend_test_docker.tpl" ) var ( @@ -80,16 +76,11 @@ var ( "21", "22", "mysql_server_vault", - "vstream_failover", - "vstream_stoponreshard_true", - "vstream_stoponreshard_false", - "vstream_with_keyspaces_to_watch", - "onlineddl_ghost", + "vstream", "onlineddl_vrepl", "onlineddl_vrepl_stress", "onlineddl_vrepl_stress_suite", "onlineddl_vrepl_suite", - "vreplication_migrate_vdiff2_convert_tz", "onlineddl_revert", "onlineddl_scheduler", "tabletmanager_throttler_topo", @@ -116,19 +107,21 @@ var ( "xb_recovery", "mysql80", "vreplication_across_db_versions", - "vreplication_multicell", - "vreplication_cellalias", + "vreplication_mariadb_to_mysql", "vreplication_basic", + "vreplication_cellalias", + "vreplication_copy_parallel", "vreplication_v2", - "vreplication_partial_movetables_basic", - "vreplication_partial_movetables_sequences", + "vreplication_partial_movetables_and_materialize", + "vreplication_foreign_key_stress", + "vreplication_migrate_vdiff2_convert_tz", + "vreplication_multi_tenant", "schemadiff_vrepl", "topo_connection_cache", "vtgate_partial_keyspace", "vttablet_prscomplex", } - clusterSelfHostedList = []string{} clusterDockerList = []string{} clustersRequiringXtraBackup = []string{ "xb_backup", @@ -141,6 +134,9 @@ var ( "vtgate_topo_consul", "tabletmanager_consul", } + clustersRequiringMemoryCheck = []string{ + "vtorc", + } clusterRequiring16CoresMachines = []string{ "onlineddl_vrepl", "onlineddl_vrepl_stress", @@ -152,12 +148,13 @@ var ( ) type unitTest struct { - Name, Platform, FileName string + Name, Platform, FileName, Evalengine string } type clusterTest struct { Name, Shard, Platform string FileName string + MemoryCheck bool MakeTools, InstallXtraBackup bool Docker bool LimitResourceUsage bool @@ -166,31 +163,15 @@ type clusterTest struct { Cores16 bool } -type selfHostedTest struct { - Name, Platform, Dockerfile, Shard, ImageName, directoryName string - FileName string - MakeTools, InstallXtraBackup, Docker bool -} - // clusterMySQLVersions return list of mysql versions (one or more) that this cluster needs to test against func clusterMySQLVersions(clusterName string) mysqlVersions { switch { - case strings.HasPrefix(clusterName, "onlineddl_"): - return allMySQLVersions - case clusterName == "schemadiff_vrepl": - return allMySQLVersions - case clusterName == "backup_pitr": - return allMySQLVersions - case clusterName == "backup_pitr_xtrabackup": - return allMySQLVersions - case clusterName == "tabletmanager_tablegc": - return allMySQLVersions - case clusterName == "vtorc": - return allMySQLVersions - case clusterName == "xb_backup": - return allMySQLVersions - case clusterName == "xb_recovery": - return allMySQLVersions + // Add any specific clusters, or groups of clusters, here, + // that require allMySQLVersions to be tested against. + // At this time this list is clean because Vitess stopped + // supporting MySQL 5.7. At some point, we will need to + // support post 8.0 versions of MySQL, and this list will + // inevitably grow. default: return defaultMySQLVersions } @@ -219,16 +200,6 @@ func main() { generateUnitTestWorkflows() generateClusterWorkflows(clusterList, clusterTestTemplate) generateClusterWorkflows(clusterDockerList, clusterTestDockerTemplate) - - // tests that will use self-hosted runners - err := generateSelfHostedUnitTestWorkflows() - if err != nil { - log.Fatal(err) - } - err = generateSelfHostedClusterWorkflows() - if err != nil { - log.Fatal(err) - } } func canonnizeList(list []string) []string { @@ -241,98 +212,6 @@ func canonnizeList(list []string) []string { return output } -func parseList(csvList string) []string { - var list []string - for _, item := range strings.Split(csvList, ",") { - if item != "" { - list = append(list, strings.TrimSpace(item)) - } - } - return list -} - -func generateSelfHostedUnitTestWorkflows() error { - platforms := parseList(unitTestSelfHostedDatabases) - for _, platform := range platforms { - directoryName := fmt.Sprintf("unit_test_%s", platform) - test := &selfHostedTest{ - Name: fmt.Sprintf("Unit Test (%s)", platform), - ImageName: fmt.Sprintf("unit_test_%s", platform), - Platform: platform, - directoryName: directoryName, - Dockerfile: fmt.Sprintf("./.github/docker/%s/Dockerfile", directoryName), - MakeTools: true, - InstallXtraBackup: false, - } - err := setupTestDockerFile(test) - if err != nil { - return err - } - test.FileName = fmt.Sprintf("unit_test_%s.yml", platform) - filePath := fmt.Sprintf("%s/%s", workflowConfigDir, test.FileName) - err = writeFileFromTemplate(unitTestSelfHostedTemplate, filePath, test) - if err != nil { - log.Print(err) - } - } - return nil -} - -func generateSelfHostedClusterWorkflows() error { - clusters := canonnizeList(clusterSelfHostedList) - for _, cluster := range clusters { - for _, mysqlVersion := range clusterMySQLVersions(cluster) { - // check mysqlversion - mysqlVersionIndicator := "" - if mysqlVersion != defaultMySQLVersion && len(clusterMySQLVersions(cluster)) > 1 { - mysqlVersionIndicator = "_" + string(mysqlVersion) - } - - directoryName := fmt.Sprintf("cluster_test_%s%s", cluster, mysqlVersionIndicator) - test := &selfHostedTest{ - Name: fmt.Sprintf("Cluster (%s)(%s)", cluster, mysqlVersion), - ImageName: fmt.Sprintf("cluster_test_%s%s", cluster, mysqlVersionIndicator), - Platform: "mysql80", - directoryName: directoryName, - Dockerfile: fmt.Sprintf("./.github/docker/%s/Dockerfile", directoryName), - Shard: cluster, - MakeTools: false, - InstallXtraBackup: false, - } - makeToolClusters := canonnizeList(clustersRequiringMakeTools) - for _, makeToolCluster := range makeToolClusters { - if makeToolCluster == cluster { - test.MakeTools = true - break - } - } - xtraBackupClusters := canonnizeList(clustersRequiringXtraBackup) - for _, xtraBackupCluster := range xtraBackupClusters { - if xtraBackupCluster == cluster { - test.InstallXtraBackup = true - break - } - } - if mysqlVersion == mysql57 { - test.Platform = string(mysql57) - } - - err := setupTestDockerFile(test) - if err != nil { - return err - } - - test.FileName = fmt.Sprintf("cluster_endtoend_%s%s.yml", cluster, mysqlVersionIndicator) - filePath := fmt.Sprintf("%s/%s", workflowConfigDir, test.FileName) - err = writeFileFromTemplate(clusterTestSelfHostedTemplate, filePath, test) - if err != nil { - log.Print(err) - } - } - } - return nil -} - func generateClusterWorkflows(list []string, tpl string) { clusters := canonnizeList(list) for _, cluster := range clusters { @@ -355,6 +234,13 @@ func generateClusterWorkflows(list []string, tpl string) { break } } + memoryCheckClusters := canonnizeList(clustersRequiringMemoryCheck) + for _, memCheckCluster := range memoryCheckClusters { + if memCheckCluster == cluster { + test.MemoryCheck = true + break + } + } xtraBackupClusters := canonnizeList(clustersRequiringXtraBackup) for _, xtraBackupCluster := range xtraBackupClusters { if xtraBackupCluster == cluster { @@ -398,40 +284,27 @@ func generateClusterWorkflows(list []string, tpl string) { func generateUnitTestWorkflows() { for _, platform := range unitTestDatabases { - test := &unitTest{ - Name: fmt.Sprintf("Unit Test (%s)", platform), - Platform: string(platform), - } - test.FileName = fmt.Sprintf("unit_test_%s.yml", platform) - path := fmt.Sprintf("%s/%s", workflowConfigDir, test.FileName) - err := writeFileFromTemplate(unitTestTemplate, path, test) - if err != nil { - log.Print(err) + for _, evalengine := range []string{"1", "0"} { + test := &unitTest{ + Name: fmt.Sprintf("Unit Test (%s%s)", evalengineToString(evalengine), platform), + Platform: string(platform), + Evalengine: evalengine, + } + test.FileName = fmt.Sprintf("unit_test_%s%s.yml", evalengineToString(evalengine), platform) + path := fmt.Sprintf("%s/%s", workflowConfigDir, test.FileName) + err := writeFileFromTemplate(unitTestTemplate, path, test) + if err != nil { + log.Print(err) + } } } } -func setupTestDockerFile(test *selfHostedTest) error { - // remove the directory - relDirectoryName := fmt.Sprintf("../.github/docker/%s", test.directoryName) - err := os.RemoveAll(relDirectoryName) - if err != nil { - return err - } - // create the directory - err = os.MkdirAll(relDirectoryName, 0755) - if err != nil { - return err - } - - // generate the docker file - dockerFilePath := path.Join(relDirectoryName, "Dockerfile") - err = writeFileFromTemplate(dockerFileTemplate, dockerFilePath, test) - if err != nil { - return err +func evalengineToString(evalengine string) string { + if evalengine == "1" { + return "evalengine_" } - - return nil + return "" } func writeFileFromTemplate(templateFile, filePath string, test any) error { diff --git a/test/client/client.go b/test/client/client.go index d1a174d8d04..bd6c6ad2af4 100644 --- a/test/client/client.go +++ b/test/client/client.go @@ -27,7 +27,7 @@ package main import ( "fmt" - "math/rand" + "math/rand/v2" "os" "time" @@ -59,7 +59,7 @@ func main() { fmt.Printf("begin failed: %v\n", err) os.Exit(1) } - page := rand.Intn(100) + 1 + page := rand.IntN(100) + 1 timeCreated := time.Now().UnixNano() if _, err := tx.Exec("INSERT INTO messages (page,time_created_ns,message) VALUES (?,?,?)", page, timeCreated, "V is for speed"); err != nil { diff --git a/test/config.json b/test/config.json index 66657b4f37e..2e612e57ca5 100644 --- a/test/config.json +++ b/test/config.json @@ -259,15 +259,6 @@ "site_test" ] }, - "onlineddl_ghost": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/ghost", "-timeout", "30m"], - "Command": [], - "Manual": false, - "Shard": "onlineddl_ghost", - "RetryMax": 2, - "Tags": [] - }, "onlineddl_vrepl": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/vrepl", "-timeout", "30m"], @@ -507,7 +498,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_aggregation": { "File": "unused.go", @@ -516,7 +507,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_foundrows": { "File": "unused.go", @@ -525,7 +516,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_informationschema": { "File": "unused.go", @@ -534,7 +525,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_misc": { "File": "unused.go", @@ -543,7 +534,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_timeout": { "File": "unused.go", @@ -552,7 +543,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_normalize": { "File": "unused.go", @@ -561,7 +552,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_no_scatter": { "File": "unused.go", @@ -570,7 +561,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_orderby": { "File": "unused.go", @@ -581,6 +572,15 @@ "RetryMax": 2, "Tags": ["upgrade_downgrade_query_serving_queries"] }, + "vtgate_queries_tpch": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/queries/tpch", "-timeout", "20m"], + "Command": [], + "Manual": false, + "Shard": "vtgate_queries", + "RetryMax": 2, + "Tags": ["upgrade_downgrade_query_serving_queries"] + }, "vtgate_queries_subquery": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vtgate/queries/subquery", "-timeout", "20m"], @@ -606,7 +606,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_vexplain": { "File": "unused.go", @@ -615,7 +615,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_reference": { "File": "unused.go", @@ -624,7 +624,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_random": { "File": "unused.go", @@ -633,7 +633,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_kill": { "File": "unused.go", @@ -642,7 +642,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_concurrentdml": { "File": "unused.go", @@ -1004,21 +1004,12 @@ "RetryMax": 1, "Tags": [] }, - "vreplication_multicell": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "MultiCell"], - "Command": [], - "Manual": false, - "Shard": "vreplication_multicell", - "RetryMax": 2, - "Tags": [] - }, "vreplication_materialize": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMaterialize"], "Command": [], "Manual": false, - "Shard": "vreplication_multicell", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1027,7 +1018,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMaterializeVtctldClient"], "Command": [], "Manual": false, - "Shard": "vreplication_multicell", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1040,12 +1031,21 @@ "RetryMax": 0, "Tags": [] }, + "vreplication_multi_tenant": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication","-run", "MultiTenant"], + "Command": [], + "Manual": false, + "Shard": "vreplication_multi_tenant", + "RetryMax": 0, + "Tags": [] + }, "vreplication_partial_movetables_basic": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "PartialMoveTablesBasic"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_basic", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1054,7 +1054,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMultipleConcurrentVDiffs"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_basic", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 0, "Tags": [] }, @@ -1067,6 +1067,15 @@ "RetryMax": 0, "Tags": [] }, + "vreplication_onlineddl_vdiff": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestOnlineDDLVDiff"], + "Command": [], + "Manual": false, + "Shard": "vreplication_cellalias", + "RetryMax": 2, + "Tags": [] + }, "vreplication_vschema_load": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVSchemaChangesUnderLoad"], @@ -1099,7 +1108,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVreplicationCopyParallel", "-timeout", "20m"], "Command": [], "Manual": false, - "Shard": "vreplication_basic", + "Shard": "vreplication_copy_parallel", "RetryMax": 1, "Tags": [] }, @@ -1108,7 +1117,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestPartialMoveTablesWithSequences"], "Command": [], "Manual": false, - "Shard": "vreplication_partial_movetables_sequences", + "Shard": "vreplication_partial_movetables_and_materialize", "RetryMax": 1, "Tags": [] }, @@ -1121,12 +1130,21 @@ "RetryMax": 1, "Tags": [] }, + "multi_vstreams_keyspace_reshard": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMultiVStreamsKeyspaceReshard", "-timeout", "15m"], + "Command": [], + "Manual": false, + "Shard": "vstream", + "RetryMax": 1, + "Tags": [] + }, "vstream_failover": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamFailover"], "Command": [], "Manual": false, - "Shard": "vstream_failover", + "Shard": "vstream", "RetryMax": 3, "Tags": [] }, @@ -1135,7 +1153,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamStopOnReshardTrue"], "Command": [], "Manual": false, - "Shard": "vstream_stoponreshard_true", + "Shard": "vstream", "RetryMax": 1, "Tags": [] }, @@ -1144,7 +1162,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamStopOnReshardFalse"], "Command": [], "Manual": false, - "Shard": "vstream_stoponreshard_false", + "Shard": "vstream", "RetryMax": 1, "Tags": [] }, @@ -1153,7 +1171,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "VStreamWithKeyspacesToWatch"], "Command": [], "Manual": false, - "Shard": "vstream_with_keyspaces_to_watch", + "Shard": "vstream", "RetryMax": 1, "Tags": [] }, @@ -1204,7 +1222,7 @@ }, "vreplication_v2": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestBasicV2Workflows", "-timeout", "20m"], + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestBasicV2Workflows", "-timeout", "30m"], "Command": [], "Manual": false, "Shard": "vreplication_v2", @@ -1220,6 +1238,15 @@ "RetryMax": 1, "Tags": [] }, + "vreplication_foreign_key_stress": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestFKExt"], + "Command": [], + "Manual": false, + "Shard": "vreplication_foreign_key_stress", + "RetryMax": 1, + "Tags": [] + }, "vreplication_across_db_versions": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestV2WorkflowsAcrossDBVersions", "-timeout", "20m"], @@ -1229,21 +1256,30 @@ "RetryMax": 1, "Tags": [] }, - "vreplication_mariadb_to_mysql": { - "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMoveTablesMariaDBToMySQL", "-timeout", "20m"], - "Command": [], - "Manual": false, - "Shard": "vreplication_across_db_versions", - "RetryMax": 1, - "Tags": [] - }, + "vreplication_mariadb_to_mysql": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMoveTablesMariaDBToMySQL", "-timeout", "20m"], + "Command": [], + "Manual": false, + "Shard": "vreplication_mariadb_to_mysql", + "RetryMax": 1, + "Tags": [] + }, + "vreplication_vtctldclient_cli": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVtctldclientCLI", "-timeout", "20m"], + "Command": [], + "Manual": false, +"Shard": "vreplication_cli_migrate_vdiff2_convert_tz", + "RetryMax": 1, + "Tags": [] + }, "vreplication_vtctl_migrate": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVtctlMigrate", "-timeout", "30m"], "Command": [], "Manual": false, - "Shard": "vreplication_migrate_vdiff2_convert_tz", + "Shard": "vreplication_cli_migrate_vdiff2_convert_tz", "RetryMax": 1, "Tags": [] }, @@ -1252,16 +1288,16 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVtctldMigrate", "-timeout", "30m"], "Command": [], "Manual": false, - "Shard": "vreplication_migrate_vdiff2_convert_tz", + "Shard": "vreplication_cli_migrate_vdiff2_convert_tz", "RetryMax": 1, "Tags": [] }, "vdiff2": { "File": "unused.go", - "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVDiff2", "-timeout", "20m"], + "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestVDiff2", "-timeout", "30m"], "Command": [], "Manual": false, - "Shard": "vreplication_migrate_vdiff2_convert_tz", + "Shard": "vreplication_cli_migrate_vdiff2_convert_tz", "RetryMax": 1, "Tags": [] }, @@ -1270,7 +1306,7 @@ "Args": ["vitess.io/vitess/go/test/endtoend/vreplication", "-run", "TestMoveTablesTZ"], "Command": [], "Manual": false, - "Shard": "vreplication_migrate_vdiff2_convert_tz", + "Shard": "vreplication_cli_migrate_vdiff2_convert_tz", "RetryMax": 1, "Tags": [] }, diff --git a/test/templates/cluster_endtoend_test.tpl b/test/templates/cluster_endtoend_test.tpl index b5c409d8f51..8dadd7b73be 100644 --- a/test/templates/cluster_endtoend_test.tpl +++ b/test/templates/cluster_endtoend_test.tpl @@ -34,26 +34,41 @@ jobs: echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - PR_DATA=$(curl \ + PR_DATA=$(curl -s\ -H "{{"Authorization: token ${{ secrets.GITHUB_TOKEN }}"}}" \ -H "Accept: application/vnd.github.v3+json" \ "{{"https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}"}}") draft=$(echo "$PR_DATA" | jq .draft -r) echo "is_draft=${draft}" >> $GITHUB_OUTPUT + {{if .MemoryCheck}} + + - name: Check Memory + run: | + totalMem=$(free -g | awk 'NR==2 {print $2}') + echo "total memory $totalMem GB" + if [[ "$totalMem" -lt 15 ]]; then + echo "Less memory than required" + exit 1 + fi + + {{end}} + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -70,13 +85,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -94,27 +109,27 @@ jobs: {{if .InstallXtraBackup}} # Setup Percona Server for MySQL 8.0 - sudo apt-get update - sudo apt-get install -y lsb-release gnupg2 curl + sudo apt-get -qq update + sudo apt-get -qq install -y lsb-release gnupg2 curl wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb sudo percona-release setup ps80 - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5 {{else}} # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # Setup MySQL 8.0 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update + sudo apt-get -qq update # Install everything else we need, and configure - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + sudo apt-get -qq install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 {{end}} @@ -129,7 +144,7 @@ jobs: {{if .InstallXtraBackup}} - sudo apt-get install -y percona-xtrabackup-80 lz4 + sudo apt-get -qq install -y percona-xtrabackup-80 lz4 {{end}} @@ -168,7 +183,7 @@ jobs: {{if .LimitResourceUsage}} # Increase our open file descriptor limit as we could hit this ulimit -n 65536 - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf innodb_buffer_pool_dump_at_shutdown=OFF innodb_buffer_pool_in_core_file=OFF innodb_buffer_pool_load_at_startup=OFF @@ -186,7 +201,7 @@ jobs: {{end}} {{if .EnableBinlogTransactionCompression}} - cat <<-EOF>>./config/mycnf/mysql80.cnf + cat <<-EOF>>./config/mycnf/mysql8026.cnf binlog-transaction-compression=ON EOF {{end}} diff --git a/test/templates/cluster_endtoend_test_docker.tpl b/test/templates/cluster_endtoend_test_docker.tpl index f53c705e2c1..650fc81a57a 100644 --- a/test/templates/cluster_endtoend_test_docker.tpl +++ b/test/templates/cluster_endtoend_test_docker.tpl @@ -28,17 +28,19 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -52,9 +54,9 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/test/templates/cluster_endtoend_test_mysql57.tpl b/test/templates/cluster_endtoend_test_mysql57.tpl index f5bc482cda9..fba14ffc0d8 100644 --- a/test/templates/cluster_endtoend_test_mysql57.tpl +++ b/test/templates/cluster_endtoend_test_mysql57.tpl @@ -46,19 +46,34 @@ jobs: draft=$(echo "$PR_DATA" | jq .draft -r) echo "is_draft=${draft}" >> $GITHUB_OUTPUT + {{if .MemoryCheck}} + + - name: Check Memory + run: | + totalMem=$(free -g | awk 'NR==2 {print $2}') + echo "total memory $totalMem GB" + if [[ "$totalMem" -lt 15 ]]; then + echo "Less memory than required" + exit 1 + fi + + {{end}} + - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' filters: | end_to_end: - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' - 'test.go' - 'Makefile' - 'build.env' @@ -75,13 +90,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' @@ -109,9 +124,9 @@ jobs: sudo rm -rf /etc/mysql # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 # packages for Jammy. echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections diff --git a/test/templates/cluster_endtoend_test_self_hosted.tpl b/test/templates/cluster_endtoend_test_self_hosted.tpl deleted file mode 100644 index d9b48f6aecf..00000000000 --- a/test/templates/cluster_endtoend_test_self_hosted.tpl +++ /dev/null @@ -1,89 +0,0 @@ -name: {{.Name}} -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{"{{"}} github.ref {{"}}"}}, '{{.Name}}') - cancel-in-progress: true - -permissions: read-all - -jobs: - build: - name: Run endtoend tests on {{.Name}} - runs-on: self-hosted - - steps: - - name: Skip CI - run: | - if [[ "{{"${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}"}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - end_to_end: - - 'go/**/*.go' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - '.github/docker/**' - - 'bootstrap.sh' - - '.github/workflows/{{.FileName}}' - - - name: Build Docker Image - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: docker build -f {{.Dockerfile}} -t {{.ImageName}}:$GITHUB_SHA . - - - name: Run test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - timeout-minutes: 30 - run: | - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - - docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'source build.env && go run test.go -keep-data=true -docker=false -print-log -follow -shard {{.Shard}} -- -- --keep-data=true' - - - name: Print Volume Used - if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - docker inspect -f '{{"{{ (index .Mounts 0).Name }}"}}' {{.ImageName}}_$GITHUB_SHA - - - name: Cleanup Docker Volume - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - docker rm -v {{.ImageName}}_$GITHUB_SHA - - - name: Cleanup Docker Container - if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - docker rm -f {{.ImageName}}_$GITHUB_SHA - - - name: Cleanup Docker Image - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' - run: | - docker image rm {{.ImageName}}:$GITHUB_SHA diff --git a/test/templates/dockerfile.tpl b/test/templates/dockerfile.tpl index 16ea54e724b..38cd2b93d9f 100644 --- a/test/templates/dockerfile.tpl +++ b/test/templates/dockerfile.tpl @@ -1,4 +1,4 @@ -ARG bootstrap_version=24 +ARG bootstrap_version=32 ARG image="vitess/bootstrap:${bootstrap_version}-{{.Platform}}" FROM "${image}" diff --git a/test/templates/unit_test.tpl b/test/templates/unit_test.tpl index 7dffd83267b..94350008767 100644 --- a/test/templates/unit_test.tpl +++ b/test/templates/unit_test.tpl @@ -43,11 +43,11 @@ jobs: - name: Check out code if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main + uses: dorny/paths-filter@v3.0.1 id: changes with: token: '' @@ -67,13 +67,13 @@ jobs: - name: Set up Go if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.21.3 + go-version: 1.22.3 - name: Set up python if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 - name: Tune the OS if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' @@ -87,54 +87,54 @@ jobs: if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' run: | export DEBIAN_FRONTEND="noninteractive" - sudo apt-get update + sudo apt-get -qq update # Uninstall any previously installed MySQL first sudo systemctl stop apparmor - sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common - sudo apt-get -y autoremove - sudo apt-get -y autoclean + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -qq -y autoremove + sudo apt-get -qq -y autoclean sudo deluser mysql sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql {{if (eq .Platform "mysql57")}} # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # mysql57 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 # packages for Jammy. echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 + sudo apt-get -qq update + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 {{end}} {{if (eq .Platform "mysql80")}} # Get key to latest MySQL repo - sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C # mysql80 - wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.29-1_all.deb echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* - sudo apt-get update - sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + sudo apt-get -qq update + sudo DEBIAN_FRONTEND="noninteractive" apt-get -qq install -y mysql-server mysql-client {{end}} - sudo apt-get install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata + sudo apt-get -qq install -y make unzip g++ curl git wget ant openjdk-11-jdk eatmydata sudo service mysql stop sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" mkdir -p dist bin - curl -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist + curl -s -L https://github.com/coreos/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz | tar -zxC dist mv dist/etcd-v3.3.10-linux-amd64/{etcd,etcdctl} bin/ go mod download @@ -170,6 +170,8 @@ jobs: export VTDATAROOT="/tmp/" export NOVTADMINBUILD=1 + export VTEVALENGINETEST="{{.Evalengine}}" + eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml - name: Print test output and Record test result in launchable if PR is not a draft diff --git a/test/templates/unit_test_self_hosted.tpl b/test/templates/unit_test_self_hosted.tpl deleted file mode 100644 index 45d88392b9b..00000000000 --- a/test/templates/unit_test_self_hosted.tpl +++ /dev/null @@ -1,90 +0,0 @@ -name: {{.Name}} -on: [push, pull_request] -concurrency: - group: format('{0}-{1}', ${{"{{"}} github.ref {{"}}"}}, '{{.Name}}') - cancel-in-progress: true - -permissions: read-all - -jobs: - test: - runs-on: self-hosted - - steps: - - name: Skip CI - run: | - if [[ "{{"${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}"}}" == "true" ]]; then - echo "skipping CI due to the 'Skip CI' label" - exit 1 - fi - - - name: Check if workflow needs to be skipped - id: skip-workflow - run: | - skip='false' - if [[ "{{"${{github.event.pull_request}}"}}" == "" ]] && [[ "{{"${{github.ref}}"}}" != "refs/heads/main" ]] && [[ ! "{{"${{github.ref}}"}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "{{"${{github.ref}}"}}" =~ "refs/tags/.*" ]]; then - skip='true' - fi - echo Skip ${skip} - echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - - name: Check out code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - - name: Check for changes in relevant files - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: frouioui/paths-filter@main - id: changes - with: - token: '' - filters: | - unit_tests: - - 'go/**' - - 'test.go' - - 'Makefile' - - 'build.env' - - 'go.sum' - - 'go.mod' - - 'proto/*.proto' - - 'tools/**' - - 'config/**' - - 'bootstrap.sh' - - '.github/workflows/{{.FileName}}' - - - name: Build Docker Image - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - run: docker build -f {{.Dockerfile}} -t {{.ImageName}}:$GITHUB_SHA . - - - name: Run test - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - timeout-minutes: 30 - run: | - set -exo pipefail - # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file - # which musn't be more than 107 characters long. - export VTDATAROOT="/tmp/" - - docker run --name "{{.ImageName}}_$GITHUB_SHA" {{.ImageName}}:$GITHUB_SHA /bin/bash -c 'NOVTADMINBUILD=1 make unit_test' - - - name: Print Volume Used - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - if: ${{"{{ always() }}"}} - run: | - docker inspect -f '{{"{{ (index .Mounts 0).Name }}"}}' {{.ImageName}}_$GITHUB_SHA - - - name: Cleanup Docker Volume - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - run: | - docker rm -v {{.ImageName}}_$GITHUB_SHA - - - name: Cleanup Docker Container - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - if: ${{"{{ always() }}"}} - run: | - docker rm -f {{.ImageName}}_$GITHUB_SHA - - - name: Cleanup Docker Image - if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' - run: | - docker image rm {{.ImageName}}:$GITHUB_SHA diff --git a/test/vtop_example.sh b/test/vtop_example.sh index 421e1c230df..5ff90a2be7e 100755 --- a/test/vtop_example.sh +++ b/test/vtop_example.sh @@ -36,7 +36,7 @@ unset VTROOT # ensure that the examples can run without VTROOT now. function checkSemiSyncSetup() { for vttablet in $(kubectl get pods --no-headers -o custom-columns=":metadata.name" | grep "vttablet") ; do echo "Checking semi-sync in $vttablet" - kubectl exec "$vttablet" -c mysqld -- mysql -S "/vt/socket/mysql.sock" -u root -e "show variables like 'rpl_semi_sync_slave_enabled'" | grep "OFF" + kubectl exec "$vttablet" -c mysqld -- mysql -S "/vt/socket/mysql.sock" -u root -e "show variables like 'rpl_semi_sync_replica_enabled'" | grep "OFF" if [ $? -ne 0 ]; then echo "Semi Sync setup on $vttablet" exit 1 @@ -486,9 +486,7 @@ EOF # Build the docker image for vitess/lite using the local code docker build -f docker/lite/Dockerfile -t vitess/lite:pr . # Build the docker image for vitess/vtadmin using the local code -docker build -f docker/base/Dockerfile -t vitess/base:pr . -docker build -f docker/k8s/Dockerfile --build-arg VT_BASE_VER=pr -t vitess/k8s:pr . -docker build -f docker/k8s/vtadmin/Dockerfile --build-arg VT_BASE_VER=pr -t vitess/vtadmin:pr . +docker build -f docker/binaries/vtadmin/Dockerfile --build-arg VT_BASE_VER=pr -t vitess/vtadmin:pr . # Print the docker images available docker image ls diff --git a/tools/check_make_parser.sh b/tools/check_make_parser.sh index 2f58eeb050a..c3bd129bd30 100755 --- a/tools/check_make_parser.sh +++ b/tools/check_make_parser.sh @@ -20,7 +20,7 @@ fi mv $CUR $TMP output=$(go run ./goyacc -fo $CUR sql.y) -expectedOutput=$'\nconflicts: 3 shift/reduce' +expectedOutput=$'\nconflicts: 4 shift/reduce' if [[ "$output" != "$expectedOutput" ]]; then echo -e "Expected output from goyacc:$expectedOutput\ngot:$output" diff --git a/tools/create_release.sh b/tools/create_release.sh index 68e051f884e..17c8139dce2 100755 --- a/tools/create_release.sh +++ b/tools/create_release.sh @@ -52,7 +52,7 @@ function createRelease () { rm -f ./.github/workflows/code_freeze.yml.bak # Wait for release notes to be injected in the code base - echo -n Pausing so relase notes can be added. Press enter to continue + echo -n Pausing so release notes can be added. Press enter to continue read line git add --all diff --git a/tools/rowlog/rowlog.go b/tools/rowlog/rowlog.go index 475006b2b59..34d16a1777b 100644 --- a/tools/rowlog/rowlog.go +++ b/tools/rowlog/rowlog.go @@ -71,7 +71,6 @@ func usage() { func main() { usage() - defer log.Flush() ctx := context.Background() config := parseCommandLine() if !config.Validate() { @@ -497,7 +496,7 @@ func getPosition(ctx context.Context, server, keyspace, shard string) (string, e } func execVtctl(ctx context.Context, server string, args []string) ([]string, error) { - client, err := vtctlclient.New(server) + client, err := vtctlclient.New(ctx, server) if err != nil { fmt.Println(err) return nil, err diff --git a/tools/unit_test_race.sh b/tools/unit_test_race.sh index 4cec1f365a9..3b6a137edf1 100755 --- a/tools/unit_test_race.sh +++ b/tools/unit_test_race.sh @@ -24,11 +24,13 @@ fi # Output per line: * packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}{{if len .XTestGoFiles}}{{.ImportPath}} {{join .XTestGoFiles " "}}{{end}}' ./go/... | sort) +if [[ "$VTEVALENGINETEST" == "1" ]]; then + packages_with_tests=$(echo "$packages_with_tests" | grep "evalengine") +fi -# exclude end to end tests -packages_to_test=$(echo "$packages_with_tests" | cut -d" " -f1 | grep -v "endtoend") -all_except_flaky_tests=$(echo "$packages_to_test" | grep -vE ".+ .+_flaky_test\.go" | cut -d" " -f1 | grep -v "endtoend") -flaky_tests=$(echo "$packages_to_test" | grep -E ".+ .+_flaky_test\.go" | cut -d" " -f1) +if [[ "$VTEVALENGINETEST" == "0" ]]; then + packages_with_tests=$(echo "$packages_with_tests" | grep -v "evalengine") +fi # Flaky tests have the suffix "_flaky_test.go". # Exclude endtoend tests @@ -36,7 +38,7 @@ all_except_flaky_tests=$(echo "$packages_with_tests" | grep -vE ".+ .+_flaky_tes flaky_tests=$(echo "$packages_with_tests" | grep -E ".+ .+_flaky_test\.go" | cut -d" " -f1) # Run non-flaky tests. -echo "$all_except_flaky_tests" | xargs go test $VT_GO_PARALLEL -race -count=1 +echo "$all_except_flaky_tests" | xargs go test $VT_GO_PARALLEL -v -race -count=1 if [ $? -ne 0 ]; then echo "ERROR: Go unit tests failed. See above for errors." echo @@ -52,7 +54,7 @@ for pkg in $flaky_tests; do max_attempts=3 attempt=1 # Set a timeout because some tests may deadlock when they flake. - until go test -timeout 2m $VT_GO_PARALLEL $pkg -race -count=1; do + until go test -timeout 2m $VT_GO_PARALLEL $pkg -v -race -count=1; do echo "FAILED (try $attempt/$max_attempts) in $pkg (return code $?). See above for errors." if [ $((++attempt)) -gt $max_attempts ]; then echo "ERROR: Flaky Go unit tests in package $pkg failed too often (after $max_attempts retries). Please reduce the flakiness." diff --git a/tools/unit_test_runner.sh b/tools/unit_test_runner.sh index 8fba8920ec6..d48f7162a4b 100755 --- a/tools/unit_test_runner.sh +++ b/tools/unit_test_runner.sh @@ -54,6 +54,14 @@ esac # Output per line: * packages_with_tests=$(go list -f '{{if len .TestGoFiles}}{{.ImportPath}} {{join .TestGoFiles " "}}{{end}}{{if len .XTestGoFiles}}{{.ImportPath}} {{join .XTestGoFiles " "}}{{end}}' ./go/... | sort) +if [[ "$VTEVALENGINETEST" == "1" ]]; then + packages_with_tests=$(echo "$packages_with_tests" | grep "evalengine") +fi + +if [[ "$VTEVALENGINETEST" == "0" ]]; then + packages_with_tests=$(echo "$packages_with_tests" | grep -v "evalengine") +fi + # Flaky tests have the suffix "_flaky_test.go". # Exclude endtoend tests all_except_flaky_tests=$(echo "$packages_with_tests" | grep -vE ".+ .+_flaky_test\.go" | cut -d" " -f1 | grep -v "endtoend") diff --git a/vitess-mixin/e2e/config/init_db.sql b/vitess-mixin/e2e/config/init_db.sql index 12e5601d8cc..6059bbf7ca6 100644 --- a/vitess-mixin/e2e/config/init_db.sql +++ b/vitess-mixin/e2e/config/init_db.sql @@ -12,10 +12,8 @@ SET GLOBAL super_read_only='OFF'; # Changes during the init db should not make it to the binlog. # They could potentially create errant transactions on replicas. SET sql_log_bin = 0; -# Remove anonymous users. -DELETE FROM mysql.user WHERE User = ''; -# Disable remote root access (only allow UNIX socket). -DELETE FROM mysql.user WHERE User = 'root' AND Host != 'localhost'; +# Remove anonymous users & disable remote root access (only allow UNIX socket). +DROP USER IF EXISTS ''@'%', ''@'localhost', 'root'@'%'; # Remove test database. DROP DATABASE IF EXISTS test; ############################################################################### @@ -71,7 +69,7 @@ GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, PROCESS, FILE, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER ON *.* TO 'vt_filtered'@'localhost'; -FLUSH PRIVILEGES; + RESET SLAVE ALL; RESET MASTER; # custom sql is used to add custom scripts like creating users/passwords. We use it in our tests diff --git a/vitess-mixin/e2e/external_db/mysql/grant.sh b/vitess-mixin/e2e/external_db/mysql/grant.sh index 897c1b5dcf2..9371377d074 100755 --- a/vitess-mixin/e2e/external_db/mysql/grant.sh +++ b/vitess-mixin/e2e/external_db/mysql/grant.sh @@ -3,5 +3,5 @@ echo '**********GRANTING PRIVILEGES START*******************' echo ${mysql[@]} # PURGE BINARY LOGS BEFORE DATE(NOW()); mysql --protocol=socket -uroot -hlocalhost --socket=/var/run/mysqld/mysqld.sock -p$MYSQL_ROOT_PASSWORD -e \ -"GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD'; FLUSH PRIVILEGES;" -echo '*************GRANTING PRIVILEGES END****************' \ No newline at end of file +"GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD'" +echo '*************GRANTING PRIVILEGES END****************' diff --git a/vitess-mixin/e2e/vtcompose/docker-compose.test.yml b/vitess-mixin/e2e/vtcompose/docker-compose.test.yml index a96ef3b1ca9..e6928d73acc 100644 --- a/vitess-mixin/e2e/vtcompose/docker-compose.test.yml +++ b/vitess-mixin/e2e/vtcompose/docker-compose.test.yml @@ -52,7 +52,7 @@ services: - SCHEMA_FILES=test_keyspace_schema_file.sql - POST_LOAD_FILE= - EXTERNAL_DB=0 - image: vitess/base + image: vitess/lite volumes: - .:/script schemaload_unsharded_keyspace: @@ -76,7 +76,7 @@ services: - SCHEMA_FILES=unsharded_keyspace_schema_file.sql - POST_LOAD_FILE= - EXTERNAL_DB=0 - image: vitess/base + image: vitess/lite volumes: - .:/script vtctld: @@ -91,7 +91,7 @@ services: - consul1 - consul2 - consul3 - image: vitess/base + image: vitess/lite ports: - 15000:8080 - "15999" @@ -108,7 +108,7 @@ services: --pid_file $$VTDATAROOT/tmp/vtgate.pid --normalize_queries=true ' depends_on: - vtctld - image: vitess/base + image: vitess/lite ports: - 15099:8080 - "15999" @@ -145,7 +145,7 @@ services: - CMD-SHELL - curl localhost:8080/debug/health timeout: 10s - image: vitess/base + image: vitess/lite ports: - 15101:8080 - "15999" @@ -182,7 +182,7 @@ services: - CMD-SHELL - curl localhost:8080/debug/health timeout: 10s - image: vitess/base + image: vitess/lite ports: - 15102:8080 - "15999" @@ -219,7 +219,7 @@ services: - CMD-SHELL - curl localhost:8080/debug/health timeout: 10s - image: vitess/base + image: vitess/lite ports: - 15201:8080 - "15999" @@ -256,7 +256,7 @@ services: - CMD-SHELL - curl localhost:8080/debug/health timeout: 10s - image: vitess/base + image: vitess/lite ports: - 15202:8080 - "15999" @@ -293,7 +293,7 @@ services: - CMD-SHELL - curl localhost:8080/debug/health timeout: 10s - image: vitess/base + image: vitess/lite ports: - 15301:8080 - "15999" diff --git a/vitess-mixin/e2e/vttablet-up.sh b/vitess-mixin/e2e/vttablet-up.sh index f41b31f025c..0bc9d7a629d 100755 --- a/vitess-mixin/e2e/vttablet-up.sh +++ b/vitess-mixin/e2e/vttablet-up.sh @@ -68,7 +68,7 @@ if [ "$external" = "1" ]; then # We need a common user for the unmanaged and managed tablets else tools like orchestrator will not function correctly echo "Creating matching user for managed tablets..." echo "CREATE USER IF NOT EXISTS '$DB_USER'@'%' IDENTIFIED BY '$DB_PASS';" >> $init_db_sql_file - echo "GRANT ALL ON *.* TO '$DB_USER'@'%';FLUSH PRIVILEGES;" >> $init_db_sql_file + echo "GRANT ALL ON *.* TO '$DB_USER'@'%';" >> $init_db_sql_file fi echo "##[CUSTOM_SQL_END]##" >> $init_db_sql_file @@ -133,7 +133,6 @@ if [ $tablet_role = "externalprimary" ]; then --enable_replication_reporter=false \ --enforce_strict_trans_tables=false \ --track_schema_versions=true \ - --vreplication_tablet_type=primary \ --watch_replication_stream=true" else external_db_args="--init_db_name_override $DB_NAME \ diff --git a/vitess-mixin/go.mod b/vitess-mixin/go.mod index d38b8bc4d80..6251472c550 100644 --- a/vitess-mixin/go.mod +++ b/vitess-mixin/go.mod @@ -1,20 +1,132 @@ module vitess-mixin -go 1.13 +go 1.22.3 require ( - github.com/Azure/go-autorest/autorest v0.11.1 // indirect - github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect - github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible + github.com/evanphx/json-patch v5.9.0+incompatible github.com/google/go-jsonnet v0.16.0 github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 github.com/krishicks/yaml-patch v0.0.10 // Believe it or not, this is actually version 2.13.1 // See https://github.com/prometheus/prometheus/issues/5590#issuecomment-546368944 github.com/prometheus/prometheus v1.8.2-0.20191017095924-6f92ce560538 - github.com/stretchr/testify v1.8.1 - vitess.io/vitess v0.16.2 + github.com/stretchr/testify v1.9.0 + vitess.io/vitess v0.19.4 +) + +require ( + cloud.google.com/go/compute v1.25.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + github.com/Azure/azure-sdk-for-go v23.2.0+incompatible // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.1 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go v1.50.32 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/edsrzf/mmap-go v1.0.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-kit/kit v0.9.0 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang/glog v1.2.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/gnostic v0.6.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.2 // indirect + github.com/gophercloud/gophercloud v0.3.0 // indirect + github.com/hashicorp/consul/api v1.28.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/serf v0.10.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/miekg/dns v1.1.41 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/nxadm/tail v1.4.11 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.49.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/api v0.168.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 // indirect + google.golang.org/grpc v1.62.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect + gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.26.1 // indirect + k8s.io/apimachinery v0.26.1 // indirect + k8s.io/client-go v0.26.1 // indirect + k8s.io/klog/v2 v2.90.0 // indirect + k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 // indirect + k8s.io/utils v0.0.0-20230115233650-391b47cb4029 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) replace k8s.io/client-go v2.0.0-alpha.0.0.20181121191925-a47917edff34+incompatible => k8s.io/client-go v2.0.0-alpha.1+incompatible diff --git a/vitess-mixin/go.sum b/vitess-mixin/go.sum index c419ab2ccae..73171b17f6a 100644 --- a/vitess-mixin/go.sum +++ b/vitess-mixin/go.sum @@ -1,424 +1,25 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk= -cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU= +cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.4.0/go.mod h1:LFrqilwgdw4X2cJS9ALgzYmMu+ULyrUN6IHV3CPK4TM= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/99designs/gqlgen v0.16.0/go.mod h1:nbeSjFkqphIqpZsYe1ULVz0yfH8hjpJdJIQoX/e0G2I= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v23.2.0+incompatible h1:bch1RS060vGpHpY3zvQDV4rOiRw25J1zmR/B9a76aSA= github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= @@ -426,290 +27,132 @@ github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+X github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.0.0-20211129110424-6491aa3bf583/go.mod h1:EP9f4GqaDJyP1F5jTNMtzdIpw3JpNs3rMSJOnYywCiw= -github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0/go.mod h1:7Bsrm5U8/B+B8dffT3t733tDvdCr7upqIPSVuDqJ0Mw= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0-rc.1/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM= -github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v4.8.2+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go/v5 v5.0.2/go.mod h1:ZI9JFB4ewXbw1sBnF4sxsR2k1H3xjV+PUAOUsHvKpcU= -github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E= -github.com/DataDog/datadog-go/v5 v5.2.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q= -github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs= -github.com/DataDog/gostackparse v0.5.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM= -github.com/DataDog/sketches-go v1.2.1/go.mod h1:1xYmPLY1So10AwxV6MJV0J53XVH+WL9Ad1KetxVivVI= -github.com/DataDog/sketches-go v1.4.1/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk= -github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/HdrHistogram/hdrhistogram-go v0.9.0/go.mod h1:nxrse8/Tzg2tg3DZcZjm6qEclQKK70g0KxO61gFFZD4= -github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.22.0/go.mod h1:lm3THZ8reqBDBQKQyb5HB3sY1lKp3grEbQ81aWSgPp4= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/agnivade/levenshtein v1.1.0/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= -github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= -github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.23.12/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM= -github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v1.0.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= -github.com/aws/aws-sdk-go-v2/config v1.0.0/go.mod h1:WysE/OpUgE37tjtmtJd8GXgT8s1euilE5XtUkRNUQ1w= -github.com/aws/aws-sdk-go-v2/credentials v1.0.0/go.mod h1:/SvsiqBf509hG4Bddigr3NB12MIpfHhZapyBurJe8aY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.0/go.mod h1:wpMHDCXvOXZxGCRSidyepa8uJHY4vaBGfY2/+oKU/Bc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.0/go.mod h1:3jExOmpbjgPnz2FJaMOfbSk1heTkZ66aD3yNtVhnjvI= -github.com/aws/aws-sdk-go-v2/service/sqs v1.0.0/go.mod h1:w5BclCU8ptTbagzXS/fHBr+vAyXUjggg/72qDIURKMk= -github.com/aws/aws-sdk-go-v2/service/sts v1.0.0/go.mod h1:5f+cELGATgill5Pu3/vK3Ebuigstc+qYEHW5MvGWZO4= -github.com/aws/smithy-go v1.0.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= -github.com/aws/smithy-go v1.11.0/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/aws/aws-sdk-go v1.50.32 h1:POt81DvegnpQKM4DMDLlHz1CO6OBnEoQ1gRhYFd7QRY= +github.com/aws/aws-sdk-go v1.50.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U= -github.com/bradfitz/gomemcache v0.0.0-20220106215444-fb4bf637b56d/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/confluentinc/confluent-kafka-go v1.4.0/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/dave/astrid v0.0.0-20170323122508-8c2895878b14/go.mod h1:Sth2QfxfATb/nW4EsrSi2KyJmbcniZ8TgTaji17D6ms= -github.com/dave/brenda v1.1.0/go.mod h1:4wCUr6gSlu5/1Tk7akE5X7UorwiQ8Rij0SKH3/BGMOM= -github.com/dave/courtney v0.3.0/go.mod h1:BAv3hA06AYfNUjfjQr+5gc6vxeBVOupLqrColj+QSD8= -github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= -github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+EgvszgGRnk= -github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= -github.com/dave/patsy v0.0.0-20210517141501-957256f50cba/go.mod h1:qfR88CgEGLoiqDaE+xxDCi5QA5v4vUoW0UCX2Nd5Tlc= -github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= -github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denisenkom/go-mssqldb v0.11.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= -github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/go-elasticsearch/v6 v6.8.5/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI= -github.com/elastic/go-elasticsearch/v7 v7.17.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/r/VLSOOIySU= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/garyburd/redigo v1.6.3/go.mod h1:rTb6epsqigu3kYKBnaF028A7Tf/Aw5s0cqA47doKKqw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-chi/chi v1.5.0/go.mod h1:REp24E+25iKvxgeTfHmdUoL5x15kBiDBlnIl5bCwe2k= -github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa/Cohzb8= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= @@ -719,8 +162,6 @@ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= @@ -728,9 +169,6 @@ github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3Hfo github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -738,96 +176,58 @@ github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-pg/pg/v10 v10.0.0/go.mod h1:XHU1AkQW534GFuUdSiQ46+Xw6Ah+9+b8DlT4YwhiXL8= -github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-redis/redis/v7 v7.1.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= -github.com/go-redis/redis/v8 v8.0.0/go.mod h1:isLoQT/NFSP7V67lyvM9GmdvLdyZ7pEhsXvvyQtnQTo= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gocql/gocql v0.0.0-20220224095938-0eacd3183625/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofiber/fiber/v2 v2.24.0/go.mod h1:MR1usVH3JHYRyQwMe2eZXRSZHRX38fkV+A7CPB+DlDQ= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -837,134 +237,71 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-jsonnet v0.16.0 h1:Nb4EEOp+rdeGGyB1rQ5eisgSAqrTnhf9ip+X6lzZbY0= github.com/google/go-jsonnet v0.16.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70 h1:XTnP8fJpa4Kvpw2qARB4KS9izqxPS0Sd92cDlY3uk+w= github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210423192551-a2663126120b/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= -github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul/api v1.0.0/go.mod h1:mbFwfRxOTDHZpT3iUsMAFcLNoVm6Xbe1xZ6KiSm8FY0= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= -github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= -github.com/hashicorp/consul/internal v0.1.0/go.mod h1:zi9bMZYbiPHyAjgBWo7kCUcy5l2NrTdrkVupCc7Oo6c= +github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= +github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= -github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -972,18 +309,12 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -991,9 +322,7 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= @@ -1004,116 +333,36 @@ github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjG github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.6/go.mod h1:5VDNHjqFMgEcclnwmkCnC99IPwxBmIsxwY8qn+Nl0H4= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/hashicorp/serf v0.8.6/go.mod h1:P/AVgr4UHsUYqVHG1y9eFhz8S35pqhGhLZaDpfGKIMo= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= -github.com/hashicorp/vault/api v1.1.0/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= -github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= -github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= -github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= -github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= -github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= -github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= -github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.9.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= -github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= -github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.8.1/go.mod h1:4HOLxrl8wToZJReD04/yB20GDwf4KBYETvlHciCnwW0= -github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.14.0/go.mod h1:jT3ibf/A0ZVCp89rtCIN0zCJxcE74ypROmHEZYsG/j8= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jinzhu/gorm v1.9.10/go.mod h1:Kh6hTsSGffh4ui079FHrR5Gg+5D0hgihqDcsDN2BBJY= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jinzhu/now v1.1.3/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= @@ -1123,142 +372,74 @@ github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBv github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 h1:4BKZ6LDqPc2wJDmaKnmYD/vDjUptJtnUpai802MibFc= github.com/jsonnet-bundler/jsonnet-bundler v0.4.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E= github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= -github.com/labstack/echo/v4 v4.2.0/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180730094502-03f2033d19d5/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matryer/moq v0.2.3/go.mod h1:9RtPYjTnH1bSBIkpvtHkFN7nbWAnO7oRpdJkEIn6UtE= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.9/go.mod h1:eF30/rfdQUO9EnzNIZQr0r9HiLMlZNCpJkHbmMuOAE0= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1267,8 +448,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1276,443 +455,179 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= +github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v0.0.0-20170117200651-66bb6560562f/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= -github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= -github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pires/go-proxyproto v0.6.2/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE= -github.com/planetscale/vtprotobuf v0.4.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/common v0.49.0 h1:ToNTdK4zSnPVJmh698mGFkDor9wBI/iGaJy5dbH1EgI= +github.com/prometheus/common v0.49.0/go.mod h1:Kxm+EULxRbUkjGU6WFsQqo3ORzB4tyKvlWFOE9mB2sE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v1.8.2-0.20191017095924-6f92ce560538 h1:iyerK9/VU1F02ASqYyIXp60gKxo7ualRoEezXPqbQZE= github.com/prometheus/prometheus v1.8.2-0.20191017095924-6f92ce560538/go.mod h1:SgN99nHQ/tVJyAuyLKKz6i2j5cJx3eLy9MCRCPOXqUI= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sagikazarmark/crypt v0.9.0/go.mod h1:RnH7sEhxfdnPm1z+XMgSLjWTEIjyK4z2dw6+4vHTMuo= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 h1:cA+Ubq9qEVIQhIWvP2kNuSZ2CmnfBJFSRq+kO1pu2cc= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= -github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= -github.com/segmentio/kafka-go v0.4.29/go.mod h1:m1lXeqJtIFYZayv0shM/tjrAFljvWLTprxBHd+3PnaU= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sjmudd/stopwatch v0.1.1/go.mod h1:BLw0oIQJ1YLXBO/q9ufK/SgnKBVIkC2qrm6uy78Zw6U= -github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= -github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= -github.com/spyzhov/ajson v0.8.0/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tidwall/btree v0.3.0/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8= -github.com/tidwall/btree v1.1.0/go.mod h1:TzIRzen6yHbibdSfK6t8QimqbUnoxUSrZfeW7Uob0q4= -github.com/tidwall/buntdb v1.2.0/go.mod h1:XLza/dhlwzO6dc5o/KWor4kfZSt3BP8QV+77ZMKfI58= -github.com/tidwall/gjson v1.6.7/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= -github.com/tidwall/gjson v1.6.8/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= -github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/grect v0.1.0/go.mod h1:sa5O42oP6jWfTShL9ka6Sgmg3TgIK649veZe05B7+J8= -github.com/tidwall/grect v0.1.4/go.mod h1:9FBsaYRaR0Tcy4UwefBX/UDcDcDy9V5jUcxHzv2jd5Q= -github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tidwall/rtred v0.1.2/go.mod h1:hd69WNXQ5RP9vHd7dqekAz+RIdtfBogmglkZSRxCHFQ= -github.com/tidwall/tinyqueue v0.1.1/go.mod h1:O/QNHwrnjqr6IHItYrzoHAKYhBkLI67Q096fQP5zMYw= -github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= -github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/twitchtv/twirp v8.1.1+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= -github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.31.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= -github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4= -github.com/vmihailenco/bufpool v0.1.11/go.mod h1:AFf/MOy3l2CFTKbxwt0mp2MwnqjNEs5H/UxrkA5jxTQ= -github.com/vmihailenco/msgpack/v4 v4.3.11/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/msgpack/v5 v5.0.0-beta.1/go.mod h1:xlngVLeyQ/Qi05oQxhQ+oTuqa03RjMwMfk/7/TCs+QI= -github.com/vmihailenco/msgpack/v5 v5.3.4/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -github.com/zenazn/goji v1.0.1/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= -go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= -go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= -go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= -go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0= -go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk= -go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= -go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= -go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200901203048-c4f52b2c50aa/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw= -golang.org/x/exp v0.0.0-20230131160201-f062dba9d201/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20230131160201-f062dba9d201/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1722,7 +637,6 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1735,115 +649,37 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1852,17 +688,12 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1871,476 +702,125 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190918214516-5a1a30219888/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200527183253-8e7acdbce89d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200815165600-90abf76919f3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= -google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.168.0 h1:MBRe+Ki4mMN93jhDDbpuRLjRddooArz4FeSObvUMmjY= +google.golang.org/api v0.168.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200528110217-3d3490e7e671/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200726014623-da3ae01ef02d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 h1:p0kMzw6AG0JEzd7Z+kXqOiLhC6gjUQTbtS2zR0Q3DbI= -google.golang.org/genproto v0.0.0-20230131230820-1c016267d619/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20240304212257-790db918fca8 h1:Fe8QycXyEd9mJgnwB9kmw00WgB43eQ/xYO5C6gceybQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 h1:IR+hp6ypxjH24bkMfEJ0yHR21+gwPWdV+/IBrPQyn3k= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ= -google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= -google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2349,47 +829,28 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/DataDog/dd-trace-go.v1 v1.47.0/go.mod h1:aHb6c4hPRANXnB64LDAKyfWotKgfRjlHv23MnahM8AI= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/jinzhu/gorm.v1 v1.9.1/go.mod h1:56JJPUzbikvTVnoyP1nppSkbJ2L8sunqTBDY2fDrmFg= -gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/olivere/elastic.v3 v3.0.75/go.mod h1:yDEuSnrM51Pc8dM5ov7U8aI/ToR3PG0llA8aRv2qmw0= -gopkg.in/olivere/elastic.v5 v5.0.84/go.mod h1:LXF6q9XNBxpMqrcgax95C6xyARXWbbCXUrtTxrNrxJI= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2397,133 +858,51 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/mysql v1.0.1/go.mod h1:KtqSthtg55lFp3S5kUXqlGaelnWpKitn4k1xZTnoiPw= -gorm.io/driver/postgres v1.0.0/go.mod h1:wtMFcOzmuA5QigNsgEIb7O5lhvH1tHAF1RbWmLWV4to= -gorm.io/driver/sqlserver v1.0.4/go.mod h1:ciEo5btfITTBCj9BkoUVDvgQbUdLWQNqdFY5OGuGnRg= -gorm.io/gorm v1.9.19/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.20.0/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gorm.io/gorm v1.20.6/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= -inet.af/netaddr v0.0.0-20220617031823-097006376321/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= -inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k= k8s.io/api v0.0.0-20190620084959-7cf5895f2711/go.mod h1:TBhBqb1AWbBQbW3XRusr7n7E4v2+5ZY8r8sAMnyFC5A= k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= -k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= -k8s.io/api v0.18.19/go.mod h1:lmViaHqL3es8JiaK3pCJMjBKm2CnzIcAXpHKifwbmAg= k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/apiextensions-apiserver v0.18.19/go.mod h1:kiomVdryKCrn+R0E+iPx+bZ/00rgj5tPXEBduSEJwgI= k8s.io/apimachinery v0.0.0-20190612205821-1799e75a0719/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= -k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.18.19/go.mod h1:70HIRzSveORLKbatTlXzI2B2UUhbWzbq8Vqyf+HbdUQ= k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/apiserver v0.18.19/go.mod h1:VY80gRUh89Cmnx2s9S5nZTF8vwzEKweAFy7nTFuFLRU= k8s.io/client-go v0.0.0-20190620085101-78d2af792bab/go.mod h1:E95RaSlHr79aHaX0aGSwcPNfygDiPKOVXdmivCIZT0k= -k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= -k8s.io/client-go v0.18.19/go.mod h1:lB+d4UqdzSjaU41VODLYm/oon3o05LAzsVpm6Me5XkY= k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= -k8s.io/code-generator v0.18.19/go.mod h1:l5yJd8cLSvkIb0ZJMsQdWuDOx5rWfLNpgmHQyl3LmBE= -k8s.io/code-generator v0.26.1/go.mod h1:OMoJ5Dqx1wgaQzKgc+ZWaZPfGjdRq/Y3WubFrZmeI3I= -k8s.io/component-base v0.18.19/go.mod h1:nQMCdH6RaS/GD0J1YZqc5NInfCdknth4BwlAT5Mf7tA= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M= k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 h1:vV3ZKAUX0nMjTflyfVea98dTfROpIxDaEsQws0FT2Ts= k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230115233650-391b47cb4029 h1:L8zDtT4jrxj+TaQYD0k8KNlr556WaVQylDXswKmX+dE= k8s.io/utils v0.0.0-20230115233650-391b47cb4029/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= -modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= -modernc.org/cc/v3 v3.38.1/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= -modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= -modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI= -modernc.org/ccgo/v3 v3.0.0-20220910160915-348f15de615a/go.mod h1:8p47QxPkdugex9J4n9P2tLZ9bK01yngIVp00g4nomW0= -modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g= -modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA= -modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0= -modernc.org/libc v1.19.0/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= -modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= -modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI= -modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.15.0/go.mod h1:xRoGotBZ6dU+Zo2tca+2EqVEeMmOUBzHnhIwq4YrVnE= -modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.1/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -vitess.io/vitess v0.16.2 h1:vu6xCrM2GpQuX03Nwhb20P8xAh9jCIJy0FESkr3Ohjw= -vitess.io/vitess v0.16.2/go.mod h1:Ennjgg9bMpYbSKqh7TSQJFZdFKtUUUXO0QW8qcjxkBQ= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +vitess.io/vitess v0.19.4 h1:fJhZm5RknYbF7YUBXBmcvST/mvA4CeevlI9N0TLlYXA= +vitess.io/vitess v0.19.4/go.mod h1:15uIi9x/Gu+BdDhUR80yP0M1v7aMG2mZQXSd56CE2t0= diff --git a/web/vtadmin/README.md b/web/vtadmin/README.md index 843c0711de2..d6bcd67d277 100644 --- a/web/vtadmin/README.md +++ b/web/vtadmin/README.md @@ -2,8 +2,8 @@ ## Prerequisites -- [node](https://nodejs.org) >= 18.16.0 LTS -- npm >= 9.7.1 (comes with node) +- [node](https://nodejs.org) >= 20.12.0 LTS +- npm >= 10.5.0 (comes with node) ## Available scripts diff --git a/web/vtadmin/build.sh b/web/vtadmin/build.sh index 54d1a5b1926..b8481ccdc0e 100755 --- a/web/vtadmin/build.sh +++ b/web/vtadmin/build.sh @@ -19,18 +19,28 @@ function output() { } script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")" -source "${script_dir}/../../build.env" +pushd ${VTROOT} +source "./build.env" +popd web_dir="${script_dir}" vtadmin_api_port=14200 +if [ -z "${hostname}" ] +then + hostname=$(hostname -f) + output "\n\033[1;32mhostname was empty, set it to \"${hostname}\"\033[0m" +fi + +case_insensitive_hostname=$(echo "$hostname" | tr '[:upper:]' '[:lower:]') + # Download nvm and node if [[ -z ${NVM_DIR} ]]; then export NVM_DIR="$HOME/.nvm" fi if [[ -z ${NODE_VERSION} ]]; then - export NODE_VERSION="18.16.0" + export NODE_VERSION="20.12.2" fi output "\nInstalling nvm...\n" @@ -38,7 +48,7 @@ output "\nInstalling nvm...\n" if [ -d "$NVM_DIR" ]; then output "\033[1;32mnvm is already installed!\033[0m" else - curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash && output "\033[1;32mnvm is installed!\033[0m" || fail "\033[1;32mnvm failed to install!\033[0m" + curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash && output "\033[1;32mnvm is installed!\033[0m" || fail "\033[1;32mnvm failed to install!\033[0m" fi source "$NVM_DIR/nvm.sh" @@ -50,6 +60,9 @@ npm --prefix "$web_dir" --silent install export PATH=$PATH:$web_dir/node_modules/.bin/ -VITE_VTADMIN_API_ADDRESS="http://${hostname}:${vtadmin_api_port}" \ +vite_vtadmin_api_address="http://${case_insensitive_hostname}:${vtadmin_api_port}" +output "\n\033[1;32mSetting VITE_VTADMIN_API_ADDRESS to \"${vite_vtadmin_api_address}\"\033[0m" + +VITE_VTADMIN_API_ADDRESS="http://${case_insensitive_hostname}:${vtadmin_api_port}" \ VITE_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \ npm run --prefix "$web_dir" build diff --git a/web/vtadmin/package-lock.json b/web/vtadmin/package-lock.json index 94044b24a27..2257ad058f8 100644 --- a/web/vtadmin/package-lock.json +++ b/web/vtadmin/package-lock.json @@ -1,7 +1,7 @@ { "name": "vtadmin", "version": "0.1.0", - "lockfileVersion": 2, + "lockfileVersion": 3, "requires": true, "packages": { "": { @@ -61,133 +61,89 @@ "stylelint-config-standard-scss": "^3.0.0", "tailwindcss": "^3.0.18", "typescript": "^5.0.2", - "vite": "^4.2.3", + "vite": "^4.5.3", "vite-plugin-eslint": "^1.8.1", "vite-plugin-svgr": "^2.4.0", "vitest": "^0.29.8" }, "engines": { - "node": ">=18.16.0", - "npm": ">=9.5.1" + "node": ">=20.12.0", + "npm": ">=10.5.0" } }, "node_modules/@adobe/css-tools": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.1.tgz", - "integrity": "sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg==", + "version": "4.3.3", + "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.3.tgz", + "integrity": "sha512-rE0Pygv0sEZ4vBWHlAgJLGDU7Pm8xoO6p3wsEceb7GYAjScrOHpEo8KK/eVkAcnSM+slAEtXjA2JpdjLp4fJQQ==", "dev": true }, - "node_modules/@ampproject/remapping": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.0.tgz", - "integrity": "sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==", + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", "dev": true, - "dependencies": { - "@jridgewell/gen-mapping": "^0.1.0", - "@jridgewell/trace-mapping": "^0.3.9" - }, - "engines": { - "node": ">=6.0.0" - } - }, - "node_modules/@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", - "dependencies": { - "@babel/highlight": "^7.22.13", - "chalk": "^2.4.2" - }, "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/code-frame/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dependencies": { - "color-convert": "^1.9.0" + "node": ">=10" }, - "engines": { - "node": ">=4" + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@babel/code-frame/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { - "node": ">=4" - } - }, - "node_modules/@babel/code-frame/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/@babel/code-frame/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "node_modules/@babel/code-frame/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "engines": { - "node": ">=4" + "node": ">=6.0.0" } }, - "node_modules/@babel/code-frame/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "node_modules/@babel/code-frame": { + "version": "7.24.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.2.tgz", + "integrity": "sha512-y5+tLQyV8pg3fsiln67BVLD1P13Eg4lh5RW9mF0zUuvLrv9uIQ4MCL+CRT+FTsBlBjcIan6PGsLcBN0m3ClUyQ==", "dependencies": { - "has-flag": "^3.0.0" + "@babel/highlight": "^7.24.2", + "picocolors": "^1.0.0" }, "engines": { - "node": ">=4" + "node": ">=6.9.0" } }, "node_modules/@babel/compat-data": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.21.4.tgz", - "integrity": "sha512-/DYyDpeCfaVinT40FPGdkkb+lYSKvsVuMjDAG7jPOWWiM1ibOaB9CXJAlc4d1QpP/U2q2P9jbrSlClKSErd55g==", + "version": "7.24.4", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.4.tgz", + "integrity": "sha512-vg8Gih2MLK+kOkHJp4gBEIkyaIi00jgWot2D9QOmmfLC8jINSOzmCLta6Bvz/JSBCqnegV0L80jhxkol5GWNfQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.21.4.tgz", - "integrity": "sha512-qt/YV149Jman/6AfmlxJ04LMIu8bMoyl3RB91yTFrxQmgbrSvQMy7cI8Q62FHx1t8wJ8B5fu0UDoLwHAhUo1QA==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.5.tgz", + "integrity": "sha512-tVQRucExLQ02Boi4vdPp49svNGcfL2GhdTCT9aldhXgCJVAI21EtRfBettiuLUwce/7r6bFdgs6JFkcdTiFttA==", "dev": true, "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.21.4", - "@babel/generator": "^7.21.4", - "@babel/helper-compilation-targets": "^7.21.4", - "@babel/helper-module-transforms": "^7.21.2", - "@babel/helpers": "^7.21.0", - "@babel/parser": "^7.21.4", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.4", - "@babel/types": "^7.21.4", - "convert-source-map": "^1.7.0", + "@babel/code-frame": "^7.24.2", + "@babel/generator": "^7.24.5", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-module-transforms": "^7.24.5", + "@babel/helpers": "^7.24.5", + "@babel/parser": "^7.24.5", + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.5", + "@babel/types": "^7.24.5", + "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", - "json5": "^2.2.2", - "semver": "^6.3.0" + "json5": "^2.2.3", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" @@ -198,21 +154,21 @@ } }, "node_modules/@babel/eslint-parser": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/eslint-parser/-/eslint-parser-7.21.3.tgz", - "integrity": "sha512-kfhmPimwo6k4P8zxNs8+T7yR44q1LdpsZdE1NkCsVlfiuTPRfnGgjaF8Qgug9q9Pou17u6wneYF0lDCZJATMFg==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/eslint-parser/-/eslint-parser-7.24.5.tgz", + "integrity": "sha512-gsUcqS/fPlgAw1kOtpss7uhY6E9SFFANQ6EFX5GTvzUwaV0+sGaZWk6xq22MOdeT9wfxyokW3ceCUvOiRtZciQ==", "dev": true, "dependencies": { "@nicolo-ribaudo/eslint-scope-5-internals": "5.1.1-v1", "eslint-visitor-keys": "^2.1.0", - "semver": "^6.3.0" + "semver": "^6.3.1" }, "engines": { "node": "^10.13.0 || ^12.13.0 || >=14.0.0" }, "peerDependencies": { - "@babel/core": ">=7.11.0", - "eslint": "^7.5.0 || ^8.0.0" + "@babel/core": "^7.11.0", + "eslint": "^7.5.0 || ^8.0.0 || ^9.0.0" } }, "node_modules/@babel/eslint-parser/node_modules/eslint-visitor-keys": { @@ -225,92 +181,75 @@ } }, "node_modules/@babel/generator": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", - "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.5.tgz", + "integrity": "sha512-x32i4hEXvr+iI0NEoEfDKzlemF8AmtOP8CcrRaEcpzysWuoEb1KknpcvMsHKPONoKZiDuItklgWhB18xEhr9PA==", "dev": true, "dependencies": { - "@babel/types": "^7.23.0", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", + "@babel/types": "^7.24.5", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^2.5.1" }, "engines": { "node": ">=6.9.0" } }, - "node_modules/@babel/generator/node_modules/@jridgewell/gen-mapping": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz", - "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==", - "dev": true, - "dependencies": { - "@jridgewell/set-array": "^1.0.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" - }, - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/@babel/helper-annotate-as-pure": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz", - "integrity": "sha512-duORpUiYrEpzKIop6iNbjnwKLAKnJ47csTyRACyEmWj0QdUrm5aqNJGHSSEQSUAvNW0ojX0dOmK9dZduvkfeXA==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz", + "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==", "dev": true, "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz", - "integrity": "sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz", + "integrity": "sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw==", "dev": true, "dependencies": { - "@babel/helper-explode-assignable-expression": "^7.18.6", - "@babel/types": "^7.18.9" + "@babel/types": "^7.22.15" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.21.4.tgz", - "integrity": "sha512-Fa0tTuOXZ1iL8IeDFUWCzjZcn+sJGd9RZdH9esYVjEejGmzf+FFYQpMi/kZUk2kPy/q1H3/GPw7np8qar/stfg==", + "version": "7.23.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz", + "integrity": "sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ==", "dev": true, "dependencies": { - "@babel/compat-data": "^7.21.4", - "@babel/helper-validator-option": "^7.21.0", - "browserslist": "^4.21.3", + "@babel/compat-data": "^7.23.5", + "@babel/helper-validator-option": "^7.23.5", + "browserslist": "^4.22.2", "lru-cache": "^5.1.1", - "semver": "^6.3.0" + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-create-class-features-plugin": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.21.4.tgz", - "integrity": "sha512-46QrX2CQlaFRF4TkwfTt6nJD7IHq8539cCL7SDpqWSDeJKY1xylKKY5F/33mJhLZ3mFvKv2gGrVS6NkyF6qs+Q==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.5.tgz", + "integrity": "sha512-uRc4Cv8UQWnE4NXlYTIIdM7wfFkOqlFztcC/gVXDKohKoVB3OyonfelUBaJzSwpBntZ2KYGF/9S7asCHsXwW6g==", "dev": true, "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-member-expression-to-functions": "^7.21.0", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-replace-supers": "^7.20.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", - "@babel/helper-split-export-declaration": "^7.18.6" + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-member-expression-to-functions": "^7.24.5", + "@babel/helper-optimise-call-expression": "^7.22.5", + "@babel/helper-replace-supers": "^7.24.1", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" @@ -320,13 +259,14 @@ } }, "node_modules/@babel/helper-create-regexp-features-plugin": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.21.4.tgz", - "integrity": "sha512-M00OuhU+0GyZ5iBBN9czjugzWrEq2vDpf/zCYHxxf93ul/Q5rv+a5h+/+0WnI1AebHNVtl5bFV0qsJoH23DbfA==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz", + "integrity": "sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w==", "dev": true, "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "regexpu-core": "^5.3.1" + "@babel/helper-annotate-as-pure": "^7.22.5", + "regexpu-core": "^5.3.1", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" @@ -336,20 +276,19 @@ } }, "node_modules/@babel/helper-define-polyfill-provider": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.3.tgz", - "integrity": "sha512-z5aQKU4IzbqCC1XH0nAqfsFLMVSo22SBKUc0BxGrLkolTdPTructy0ToNnlO2zA4j9Q/7pjMZf0DSY+DSTYzww==", + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz", + "integrity": "sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ==", "dev": true, "dependencies": { - "@babel/helper-compilation-targets": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", "debug": "^4.1.1", "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" + "resolve": "^1.14.2" }, "peerDependencies": { - "@babel/core": "^7.4.0-0" + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, "node_modules/@babel/helper-environment-visitor": { @@ -361,18 +300,6 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/helper-explode-assignable-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz", - "integrity": "sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg==", - "dev": true, - "dependencies": { - "@babel/types": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-function-name": { "version": "7.23.0", "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", @@ -399,79 +326,78 @@ } }, "node_modules/@babel/helper-member-expression-to-functions": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.21.0.tgz", - "integrity": "sha512-Muu8cdZwNN6mRRNG6lAYErJ5X3bRevgYR2O8wN0yn7jJSnGDu6eG59RfT29JHxGUovyfrh6Pj0XzmR7drNVL3Q==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.24.5.tgz", + "integrity": "sha512-4owRteeihKWKamtqg4JmWSsEZU445xpFRXPEwp44HbgbxdWlUV1b4Agg4lkA806Lil5XM/e+FJyS0vj5T6vmcA==", "dev": true, "dependencies": { - "@babel/types": "^7.21.0" + "@babel/types": "^7.24.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-imports": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.21.4.tgz", - "integrity": "sha512-orajc5T2PsRYUN3ZryCEFeMDYwyw09c/pZeaQEZPH0MpKzSvn3e0uXsDBu3k03VI+9DBiRo+l22BfKTpKwa/Wg==", + "version": "7.24.3", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.3.tgz", + "integrity": "sha512-viKb0F9f2s0BCS22QSF308z/+1YWKV/76mwt61NBzS5izMzDPwdq1pTrzf+Li3npBWX9KdQbkeCt1jSAM7lZqg==", "dev": true, "dependencies": { - "@babel/types": "^7.21.4" + "@babel/types": "^7.24.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.21.2", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.21.2.tgz", - "integrity": "sha512-79yj2AR4U/Oqq/WOV7Lx6hUjau1Zfo4cI+JLAVYeMV5XIlbOhmjEk5ulbTc9fMpmlojzZHkUUxAiK+UKn+hNQQ==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.24.5.tgz", + "integrity": "sha512-9GxeY8c2d2mdQUP1Dye0ks3VDyIMS98kt/llQ2nUId8IsWqTF0l1LkSX0/uP7l7MCDrzXS009Hyhe2gzTiGW8A==", "dev": true, "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-simple-access": "^7.20.2", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/helper-validator-identifier": "^7.19.1", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.2", - "@babel/types": "^7.21.2" + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-module-imports": "^7.24.3", + "@babel/helper-simple-access": "^7.24.5", + "@babel/helper-split-export-declaration": "^7.24.5", + "@babel/helper-validator-identifier": "^7.24.5" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-optimise-call-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz", - "integrity": "sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz", + "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==", "dev": true, "dependencies": { - "@babel/types": "^7.18.6" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.20.2.tgz", - "integrity": "sha512-8RvlJG2mj4huQ4pZ+rU9lqKi9ZKiRmuvGuM2HlWmkmgOhbs6zEAw6IEiJ5cQqGbDzGZOhwuOQNtZMi/ENLjZoQ==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.5.tgz", + "integrity": "sha512-xjNLDopRzW2o6ba0gKbkZq5YWEBaK3PCyTOY1K2P/O07LGMhMqlMXPxwN4S5/RhWuCobT8z0jrlKGlYmeR1OhQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-remap-async-to-generator": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz", - "integrity": "sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA==", + "version": "7.22.20", + "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz", + "integrity": "sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw==", "dev": true, "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-wrap-function": "^7.18.9", - "@babel/types": "^7.18.9" + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-wrap-function": "^7.22.20" }, "engines": { "node": ">=6.9.0" @@ -481,121 +407,121 @@ } }, "node_modules/@babel/helper-replace-supers": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.20.7.tgz", - "integrity": "sha512-vujDMtB6LVfNW13jhlCrp48QNslK6JXi7lQG736HVbHz/mbf4Dc7tIRh1Xf5C0rF7BP8iiSxGMCmY6Ci1ven3A==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.24.1.tgz", + "integrity": "sha512-QCR1UqC9BzG5vZl8BMicmZ28RuUBnHhAMddD8yHFHDRH9lLTZ9uUPehX8ctVPT8l0TKblJidqcgUUKGVrePleQ==", "dev": true, "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-member-expression-to-functions": "^7.20.7", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.20.7", - "@babel/types": "^7.20.7" + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-member-expression-to-functions": "^7.23.0", + "@babel/helper-optimise-call-expression": "^7.22.5" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" } }, "node_modules/@babel/helper-simple-access": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.20.2.tgz", - "integrity": "sha512-+0woI/WPq59IrqDYbVGfshjT5Dmk/nnbdpcF8SnMhhXObpTq2KNBdLFRFrkVdbDOyUmHBCxzm5FHV1rACIkIbA==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.5.tgz", + "integrity": "sha512-uH3Hmf5q5n7n8mz7arjUlDOCbttY/DW4DYhE6FUsjKJ/oYC1kQQUvwEQWxRwUpX9qQKRXeqLwWxrqilMrf32sQ==", "dev": true, "dependencies": { - "@babel/types": "^7.20.2" + "@babel/types": "^7.24.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.20.0.tgz", - "integrity": "sha512-5y1JYeNKfvnT8sZcK9DVRtpTbGiomYIHviSP3OQWmDPU3DeH4a1ZlT/N2lyQ5P8egjcRaT/Y9aNqUxK0WsnIIg==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz", + "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==", "dev": true, "dependencies": { - "@babel/types": "^7.20.0" + "@babel/types": "^7.22.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-split-export-declaration": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", - "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.5.tgz", + "integrity": "sha512-5CHncttXohrHk8GWOFCcCl4oRD9fKosWlIRgWm4ql9VYioKm52Mk2xsmoohvm7f3JoiLSM5ZgJuRaf5QZZYd3Q==", "dev": true, "dependencies": { - "@babel/types": "^7.22.5" + "@babel/types": "^7.24.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", - "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.1.tgz", + "integrity": "sha512-2ofRCjnnA9y+wk8b9IAREroeUP02KHp431N2mhKniy2yKIDKpbrHv9eXwm8cBeWQYcJmzv5qKCu65P47eCF7CQ==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", - "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.5.tgz", + "integrity": "sha512-3q93SSKX2TWCG30M2G2kwaKeTYgEUp5Snjuj8qm729SObL6nbtUldAi37qbxkD5gg3xnBio+f9nqpSepGZMvxA==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz", - "integrity": "sha512-rmL/B8/f0mKS2baE9ZpyTcTavvEuWhTTW8amjzXNvYG4AwBsqTLikfXsEofsJEfKHf+HQVQbFOHy6o+4cnC/fQ==", + "version": "7.23.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz", + "integrity": "sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw==", "dev": true, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-wrap-function": { - "version": "7.20.5", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.20.5.tgz", - "integrity": "sha512-bYMxIWK5mh+TgXGVqAtnu5Yn1un+v8DDZtqyzKRLUzrh70Eal2O3aZ7aPYiMADO4uKlkzOiRiZ6GX5q3qxvW9Q==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.24.5.tgz", + "integrity": "sha512-/xxzuNvgRl4/HLNKvnFwdhdgN3cpLxgLROeLDl83Yx0AJ1SGvq1ak0OszTOjDfiB8Vx03eJbeDWh9r+jCCWttw==", "dev": true, "dependencies": { - "@babel/helper-function-name": "^7.19.0", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.5", - "@babel/types": "^7.20.5" + "@babel/helper-function-name": "^7.23.0", + "@babel/template": "^7.24.0", + "@babel/types": "^7.24.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.21.0.tgz", - "integrity": "sha512-XXve0CBtOW0pd7MRzzmoyuSj0e3SEzj8pgyFxnTT1NJZL38BD1MK7yYrm8yefRPIDvNNe14xR4FdbHwpInD4rA==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.5.tgz", + "integrity": "sha512-CiQmBMMpMQHwM5m01YnrM6imUG1ebgYJ+fAIW4FZe6m4qHTPaRHti+R8cggAwkdz4oXhtO4/K9JWlh+8hIfR2Q==", "dev": true, "dependencies": { - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.0", - "@babel/types": "^7.21.0" + "@babel/template": "^7.24.0", + "@babel/traverse": "^7.24.5", + "@babel/types": "^7.24.5" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", - "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.5.tgz", + "integrity": "sha512-8lLmua6AVh/8SLJRRVD6V8p73Hir9w5mJrhE+IPpILG31KKlI9iz5zmBYKcWPS59qSfgP9RaSBQSHHE81WKuEw==", "dependencies": { - "@babel/helper-validator-identifier": "^7.22.20", + "@babel/helper-validator-identifier": "^7.24.5", "chalk": "^2.4.2", - "js-tokens": "^4.0.0" + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" }, "engines": { "node": ">=6.9.0" @@ -638,6 +564,14 @@ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" }, + "node_modules/@babel/highlight/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "engines": { + "node": ">=0.8.0" + } + }, "node_modules/@babel/highlight/node_modules/has-flag": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", @@ -658,9 +592,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", - "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.5.tgz", + "integrity": "sha512-EOv5IK8arwh3LI47dz1b0tKUb/1uhHAnHJOrjgtQMIpu1uXd9mlFrJg9IUgGUgZ41Ch0K8REPTYpO7B76b4vJg==", "dev": true, "bin": { "parser": "bin/babel-parser.js" @@ -669,13 +603,14 @@ "node": ">=6.0.0" } }, - "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz", - "integrity": "sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ==", + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.24.5.tgz", + "integrity": "sha512-LdXRi1wEMTrHVR4Zc9F8OewC3vdm5h4QB6L71zy6StmYeqGi1b3ttIO8UC+BfZKcH9jdr4aI249rBkm+3+YvHw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.24.5" }, "engines": { "node": ">=6.9.0" @@ -684,133 +619,63 @@ "@babel/core": "^7.0.0" } }, - "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.20.7.tgz", - "integrity": "sha512-sbr9+wNE5aXMBBFBICk01tt7sBf2Oc9ikRFEcem/ZORup9IMUdNhW7/wVLEbbtlWOsEubJet46mHAL2C8+2jKQ==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", - "@babel/plugin-proposal-optional-chaining": "^7.20.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.13.0" - } - }, - "node_modules/@babel/plugin-proposal-async-generator-functions": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.20.7.tgz", - "integrity": "sha512-xMbiLsn/8RK7Wq7VeVytytS2L6qE69bXPB10YCmMdDZbKF4okCqY74pI/jJQ/8U0b/F6NrT2+14b8/P9/3AMGA==", - "dev": true, - "dependencies": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-remap-async-to-generator": "^7.18.9", - "@babel/plugin-syntax-async-generators": "^7.8.4" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-properties": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", - "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", - "dev": true, - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-class-static-block": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.21.0.tgz", - "integrity": "sha512-XP5G9MWNUskFuP30IfFSEFB0Z6HzLIUcjYM4bYOPHXl7eiJ9HFv8tWj6TXTN5QODiEhDZAeI4hLok2iHFFV4hw==", - "dev": true, - "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.12.0" - } - }, - "node_modules/@babel/plugin-proposal-decorators": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.21.0.tgz", - "integrity": "sha512-MfgX49uRrFUTL/HvWtmx3zmpyzMMr4MTj3d527MLlr/4RTT9G/ytFFP7qet2uM2Ve03b+BkpWUpK+lRXnQ+v9w==", + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.24.1.tgz", + "integrity": "sha512-y4HqEnkelJIOQGd+3g1bTeKsA5c6qM7eOn7VggGVbBc0y8MLSKHacwcIE2PplNlQSj0PqS9rrXL/nkPVK+kUNg==", "dev": true, "dependencies": { - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-replace-supers": "^7.20.7", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/plugin-syntax-decorators": "^7.21.0" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" }, "peerDependencies": { - "@babel/core": "^7.0.0-0" + "@babel/core": "^7.0.0" } }, - "node_modules/@babel/plugin-proposal-dynamic-import": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz", - "integrity": "sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw==", + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.24.1.tgz", + "integrity": "sha512-Hj791Ii4ci8HqnaKHAlLNs+zaLXb0EzSDhiAWp5VNlyvCNymYfacs64pxTxbH1znW/NcArSmwpmG9IKE/TUVVQ==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-transform-optional-chaining": "^7.24.1" }, "engines": { "node": ">=6.9.0" }, "peerDependencies": { - "@babel/core": "^7.0.0-0" + "@babel/core": "^7.13.0" } }, - "node_modules/@babel/plugin-proposal-export-namespace-from": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz", - "integrity": "sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA==", + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.24.1.tgz", + "integrity": "sha512-m9m/fXsXLiHfwdgydIFnpk+7jlVbnvlK5B2EKiPdLUb6WX654ZaaEWJUjk8TftRbZpK0XibovlLWX4KIZhV6jw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" }, "peerDependencies": { - "@babel/core": "^7.0.0-0" + "@babel/core": "^7.0.0" } }, - "node_modules/@babel/plugin-proposal-json-strings": { + "node_modules/@babel/plugin-proposal-class-properties": { "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz", - "integrity": "sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ==", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", + "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-class-properties instead.", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-json-strings": "^7.8.3" + "@babel/helper-create-class-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" }, "engines": { "node": ">=6.9.0" @@ -819,14 +684,15 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.20.7.tgz", - "integrity": "sha512-y7C7cZgpMIjWlKE5T7eJwp+tnRYM89HmRvWM5EQuB5BoHEONjmQ8lSNmBUwOyy/GFRsohJED51YBF79hE1djug==", + "node_modules/@babel/plugin-proposal-decorators": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.24.1.tgz", + "integrity": "sha512-zPEvzFijn+hRvJuX2Vu3KbEBN39LN3f7tW3MQO2LsIs57B26KU+kUc82BdAktS1VCM6libzh45eKGI65lg0cpA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" + "@babel/helper-create-class-features-plugin": "^7.24.1", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-decorators": "^7.24.1" }, "engines": { "node": ">=6.9.0" @@ -839,6 +705,7 @@ "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz", "integrity": "sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-nullish-coalescing-operator instead.", "dev": true, "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", @@ -855,6 +722,7 @@ "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz", "integrity": "sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-numeric-separator instead.", "dev": true, "dependencies": { "@babel/helper-plugin-utils": "^7.18.6", @@ -867,45 +735,11 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-proposal-object-rest-spread": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.7.tgz", - "integrity": "sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg==", - "dev": true, - "dependencies": { - "@babel/compat-data": "^7.20.5", - "@babel/helper-compilation-targets": "^7.20.7", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.20.7" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, - "node_modules/@babel/plugin-proposal-optional-catch-binding": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz", - "integrity": "sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw==", - "dev": true, - "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - }, - "engines": { - "node": ">=6.9.0" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/@babel/plugin-proposal-optional-chaining": { "version": "7.21.0", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.21.0.tgz", "integrity": "sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-optional-chaining instead.", "dev": true, "dependencies": { "@babel/helper-plugin-utils": "^7.20.2", @@ -923,6 +757,7 @@ "version": "7.18.6", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz", "integrity": "sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==", + "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-private-methods instead.", "dev": true, "dependencies": { "@babel/helper-create-class-features-plugin": "^7.18.6", @@ -936,16 +771,10 @@ } }, "node_modules/@babel/plugin-proposal-private-property-in-object": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0.tgz", - "integrity": "sha512-ha4zfehbJjc5MmXBlHec1igel5TJXXLDDRbuJ4+XT2TJcyD9/V1919BA8gMvsdHcNMBy4WBUBiRb3nw/EQUtBw==", + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", "dev": true, - "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - }, "engines": { "node": ">=6.9.0" }, @@ -953,22 +782,6 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-proposal-unicode-property-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", - "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", - "dev": true, - "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - }, - "engines": { - "node": ">=4" - }, - "peerDependencies": { - "@babel/core": "^7.0.0-0" - } - }, "node_modules/@babel/plugin-syntax-async-generators": { "version": "7.8.4", "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", @@ -1009,12 +822,12 @@ } }, "node_modules/@babel/plugin-syntax-decorators": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.21.0.tgz", - "integrity": "sha512-tIoPpGBR8UuM4++ccWN3gifhVvQu7ZizuR1fklhRJrd5ewgbkUS+0KVFeWWxELtn18NTLoW32XV7zyOgIAiz+w==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.24.1.tgz", + "integrity": "sha512-05RJdO/cCrtVWuAaSn1tS3bH8jbsJa/Y1uD186u6J4C/1mnHFxseeuWpsqr9anvo7TUulev7tm7GDwRV+VuhDw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1048,12 +861,12 @@ } }, "node_modules/@babel/plugin-syntax-flow": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.21.4.tgz", - "integrity": "sha512-l9xd3N+XG4fZRxEP3vXdK6RW7vN1Uf5dxzRC/09wV86wqZ/YYQooBIGNsiRdfNR3/q2/5pPzV4B54J/9ctX5jw==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.24.1.tgz", + "integrity": "sha512-sxi2kLTI5DeW5vDtMUsk4mTPwvlUDbjOnoWayhynCwrw4QXRld4QEYwqzY8JmQXaJUtgUuCIurtSRH5sn4c7mA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1063,12 +876,12 @@ } }, "node_modules/@babel/plugin-syntax-import-assertions": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.20.0.tgz", - "integrity": "sha512-IUh1vakzNoWalR8ch/areW7qFopR2AEw03JlG7BbrDqmQ4X3q9uuipQwSGrUn7oGiemKjtSLDhNtQHzMHr1JdQ==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.24.1.tgz", + "integrity": "sha512-IuwnI5XnuF189t91XbxmXeCDz3qs6iDRO7GJ++wcfgeXNs/8FmIlKcpDSXNVyuLQxlwvskmI3Ct73wUODkJBlQ==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.19.0" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1077,7 +890,34 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-syntax-json-strings": { + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.24.1.tgz", + "integrity": "sha512-zhQTMH0X2nVLnb04tz+s7AMuasX8U0FnpE+nHTOhSOINjWMnopoZTxtIKsd45n4GQ/HIZLyfIpoul8e2m0DnRA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-meta": { + "version": "7.10.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz", + "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.10.4" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-json-strings": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", @@ -1090,12 +930,12 @@ } }, "node_modules/@babel/plugin-syntax-jsx": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.21.4.tgz", - "integrity": "sha512-5hewiLct5OKyh6PLKEYaFclcqtIgCb6bmELouxjF6up5q3Sov7rOayW4RwhbaBL0dit8rA80GNfY+UuDp2mBbQ==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.24.1.tgz", + "integrity": "sha512-2eCtxZXf+kbkMIsXS4poTvT4Yu5rXiRa+9xGVT56raghjmBTKMpFNc9R4IDiB4emao9eO22Ox7CxuJG7BgExqA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1207,12 +1047,12 @@ } }, "node_modules/@babel/plugin-syntax-typescript": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.21.4.tgz", - "integrity": "sha512-xz0D39NvhQn4t4RNsHmDnnsaQizIlUkdtYvLs8La1BlfjQ6JEwxkJGeqJMW2tAXx+q6H+WFuUTXNdYVpEya0YA==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.24.1.tgz", + "integrity": "sha512-Yhnmvy5HZEnHUty6i++gcfH1/l68AHnItFHnaCv6hn9dNh0hQvvQJsxpi4BMBFN5DLeHBuucT/0DgzXif/OyRw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1221,13 +1061,47 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, "node_modules/@babel/plugin-transform-arrow-functions": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.20.7.tgz", - "integrity": "sha512-3poA5E7dzDomxj9WXWwuD6A5F3kc7VXwIJO+E+J8qtDtS+pXPAhrgEyh+9GBwBgPq1Z+bB+/JD60lp5jsN7JPQ==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.24.1.tgz", + "integrity": "sha512-ngT/3NkRhsaep9ck9uj2Xhv9+xB1zShY3tM3g6om4xxCELwCDN4g4Aq5dRn48+0hasAql7s2hdBOysCfNpr4fw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.24.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.24.3.tgz", + "integrity": "sha512-Qe26CMYVjpQxJ8zxM1340JFNjZaF+ISWpr1Kt/jGo+ZTUzKkfw/pphEWbRCb+lmSM6k/TOgfYLvmbHkUQ0asIg==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-remap-async-to-generator": "^7.22.20", + "@babel/plugin-syntax-async-generators": "^7.8.4" }, "engines": { "node": ">=6.9.0" @@ -1237,14 +1111,14 @@ } }, "node_modules/@babel/plugin-transform-async-to-generator": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.20.7.tgz", - "integrity": "sha512-Uo5gwHPT9vgnSXQxqGtpdufUiWp96gk7yiP4Mp5bm1QMkEmLXBO7PAGYbKoJ6DhAwiNkcHFBol/x5zZZkL/t0Q==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.24.1.tgz", + "integrity": "sha512-AawPptitRXp1y0n4ilKcGbRYWfbbzFWz2NqNu7dacYDtFtz0CMjG64b3LQsb3KIgnf4/obcUL78hfaOS7iCUfw==", "dev": true, "dependencies": { - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-remap-async-to-generator": "^7.18.9" + "@babel/helper-module-imports": "^7.24.1", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-remap-async-to-generator": "^7.22.20" }, "engines": { "node": ">=6.9.0" @@ -1254,12 +1128,12 @@ } }, "node_modules/@babel/plugin-transform-block-scoped-functions": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz", - "integrity": "sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.24.1.tgz", + "integrity": "sha512-TWWC18OShZutrv9C6mye1xwtam+uNi2bnTOCBUd5sZxyHOiWbU6ztSROofIMrK84uweEZC219POICK/sTYwfgg==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1269,12 +1143,28 @@ } }, "node_modules/@babel/plugin-transform-block-scoping": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.21.0.tgz", - "integrity": "sha512-Mdrbunoh9SxwFZapeHVrwFmri16+oYotcZysSzhNIVDwIAb1UV+kvnxULSYq9J3/q5MDG+4X6w8QVgD1zhBXNQ==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.24.5.tgz", + "integrity": "sha512-sMfBc3OxghjC95BkYrYocHL3NaOplrcaunblzwXhGmlPwpmfsxr4vK+mBBt49r+S240vahmv+kUxkeKgs+haCw==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.24.1.tgz", + "integrity": "sha512-OMLCXi0NqvJfORTaPQBwqLXHhb93wkBKZ4aNwMl6WtehO7ar+cmp+89iPEQPqxAnxsOKTaMcs3POz3rKayJ72g==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-create-class-features-plugin": "^7.24.1", + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1283,20 +1173,36 @@ "@babel/core": "^7.0.0-0" } }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.24.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.24.4.tgz", + "integrity": "sha512-B8q7Pz870Hz/q9UgP8InNpY01CSLDSCyqX7zcRuv3FcPl87A2G17lASroHWaCtbdIcbYzOZ7kWmXFKbijMSmFg==", + "dev": true, + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.24.4", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-class-static-block": "^7.14.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, "node_modules/@babel/plugin-transform-classes": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.21.0.tgz", - "integrity": "sha512-RZhbYTCEUAe6ntPehC4hlslPWosNHDox+vAs4On/mCLRLfoDVHf6hVEd7kuxr1RnHwJmxFfUM3cZiZRmPxJPXQ==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.24.5.tgz", + "integrity": "sha512-gWkLP25DFj2dwe9Ck8uwMOpko4YsqyfZJrOmqqcegeDYEbp7rmn4U6UQZNj08UF6MaX39XenSpKRCvpDRBtZ7Q==", "dev": true, "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-compilation-targets": "^7.20.7", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-replace-supers": "^7.20.7", - "@babel/helper-split-export-declaration": "^7.18.6", + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-environment-visitor": "^7.22.20", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/helper-replace-supers": "^7.24.1", + "@babel/helper-split-export-declaration": "^7.24.5", "globals": "^11.1.0" }, "engines": { @@ -1307,13 +1213,13 @@ } }, "node_modules/@babel/plugin-transform-computed-properties": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.20.7.tgz", - "integrity": "sha512-Lz7MvBK6DTjElHAmfu6bfANzKcxpyNPeYBGEafyA6E5HtRpjpZwU+u7Qrgz/2OR0z+5TvKYbPdphfSaAcZBrYQ==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.24.1.tgz", + "integrity": "sha512-5pJGVIUfJpOS+pAqBQd+QMaTD2vCL/HcePooON6pDpHgRp4gNRmzyHTPIkXntwKsq3ayUFVfJaIKPw2pOkOcTw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/template": "^7.20.7" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/template": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1323,12 +1229,12 @@ } }, "node_modules/@babel/plugin-transform-destructuring": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.21.3.tgz", - "integrity": "sha512-bp6hwMFzuiE4HqYEyoGJ/V2LeIWn+hLVKc4pnj++E5XQptwhtcGmSayM029d/j2X1bPKGTlsyPwAubuU22KhMA==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.24.5.tgz", + "integrity": "sha512-SZuuLyfxvsm+Ah57I/i1HVjveBENYK9ue8MJ7qkc7ndoNjqquJiElzA7f5yaAXjyW2hKojosOTAQQRX50bPSVg==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-plugin-utils": "^7.24.5" }, "engines": { "node": ">=6.9.0" @@ -1338,13 +1244,13 @@ } }, "node_modules/@babel/plugin-transform-dotall-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz", - "integrity": "sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.24.1.tgz", + "integrity": "sha512-p7uUxgSoZwZ2lPNMzUkqCts3xlp8n+o05ikjy7gbtFJSt9gdU88jAmtfmOxHM14noQXBxfgzf2yRWECiNVhTCw==", "dev": true, "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1354,12 +1260,28 @@ } }, "node_modules/@babel/plugin-transform-duplicate-keys": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz", - "integrity": "sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.24.1.tgz", + "integrity": "sha512-msyzuUnvsjsaSaocV6L7ErfNsa5nDWL1XKNnDePLgmz+WdU4w/J8+AxBMrWfi9m4IxfL5sZQKUPQKDQeeAT6lA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.24.1.tgz", + "integrity": "sha512-av2gdSTyXcJVdI+8aFZsCAtR29xJt0S5tas+Ef8NvBNmD1a+N/3ecMLeMBgfcK+xzsjdLDT6oHt+DFPyeqUbDA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-dynamic-import": "^7.8.3" }, "engines": { "node": ">=6.9.0" @@ -1369,13 +1291,29 @@ } }, "node_modules/@babel/plugin-transform-exponentiation-operator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz", - "integrity": "sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.24.1.tgz", + "integrity": "sha512-U1yX13dVBSwS23DEAqU+Z/PkwE9/m7QQy8Y9/+Tdb8UWYaGNDYwTLi19wqIAiROr8sXVum9A/rtiH5H0boUcTw==", "dev": true, "dependencies": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.24.1.tgz", + "integrity": "sha512-Ft38m/KFOyzKw2UaJFkWG9QnHPG/Q/2SkOrRk4pNBPg5IPZ+dOxcmkK5IyuBcxiNPyyYowPGUReyBvrvZs7IlQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3" }, "engines": { "node": ">=6.9.0" @@ -1385,13 +1323,13 @@ } }, "node_modules/@babel/plugin-transform-flow-strip-types": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.21.0.tgz", - "integrity": "sha512-FlFA2Mj87a6sDkW4gfGrQQqwY/dLlBAyJa2dJEZ+FHXUVHBflO2wyKvg+OOEzXfrKYIa4HWl0mgmbCzt0cMb7w==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.24.1.tgz", + "integrity": "sha512-iIYPIWt3dUmUKKE10s3W+jsQ3icFkw0JyRVyY1B7G4yK/nngAOHLVx8xlhA6b/Jzl/Y0nis8gjqhqKtRDQqHWQ==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-flow": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-flow": "^7.24.1" }, "engines": { "node": ">=6.9.0" @@ -1401,12 +1339,13 @@ } }, "node_modules/@babel/plugin-transform-for-of": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.21.0.tgz", - "integrity": "sha512-LlUYlydgDkKpIY7mcBWvyPPmMcOphEyYA27Ef4xpbh1IiDNLr0kZsos2nf92vz3IccvJI25QUwp86Eo5s6HmBQ==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.24.1.tgz", + "integrity": "sha512-OxBdcnF04bpdQdR3i4giHZNZQn7cm8RQKcSwA17wAAqEELo1ZOwp5FFgeptWUQXFyT9kwHo10aqqauYkRZPCAg==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -1416,14 +1355,30 @@ } }, "node_modules/@babel/plugin-transform-function-name": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz", - "integrity": "sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.24.1.tgz", + "integrity": "sha512-BXmDZpPlh7jwicKArQASrj8n22/w6iymRnvHYYd2zO30DbE277JO20/7yXJT3QxDPtiQiOxQBbZH4TpivNXIxA==", + "dev": true, + "dependencies": { + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-function-name": "^7.23.0", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.24.1.tgz", + "integrity": "sha512-U7RMFmRvoasscrIFy5xA4gIp8iWnWubnKkKuUGJjsuOH7GfbMkB+XZzeslx2kLdEGdOJDamEmCqOks6e8nv8DQ==", "dev": true, "dependencies": { - "@babel/helper-compilation-targets": "^7.18.9", - "@babel/helper-function-name": "^7.18.9", - "@babel/helper-plugin-utils": "^7.18.9" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-json-strings": "^7.8.3" }, "engines": { "node": ">=6.9.0" @@ -1433,12 +1388,28 @@ } }, "node_modules/@babel/plugin-transform-literals": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz", - "integrity": "sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.24.1.tgz", + "integrity": "sha512-zn9pwz8U7nCqOYIiBaOxoQOtYmMODXTJnkxG4AtX8fPmnCRYWBOHD0qcpwS9e2VDSp1zNJYpdnFMIKb8jmwu6g==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.24.1.tgz", + "integrity": "sha512-OhN6J4Bpz+hIBqItTeWJujDOfNP+unqv/NJgyhlpSqgBTPm37KkMmZV6SYcOj+pnDbdcl1qRGV/ZiIjX9Iy34w==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" }, "engines": { "node": ">=6.9.0" @@ -1448,12 +1419,12 @@ } }, "node_modules/@babel/plugin-transform-member-expression-literals": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz", - "integrity": "sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.24.1.tgz", + "integrity": "sha512-4ojai0KysTWXzHseJKa1XPNXKRbuUrhkOPY4rEGeR+7ChlJVKxFa3H3Bz+7tWaGKgJAXUWKOGmltN+u9B3+CVg==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1463,13 +1434,13 @@ } }, "node_modules/@babel/plugin-transform-modules-amd": { - "version": "7.20.11", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.20.11.tgz", - "integrity": "sha512-NuzCt5IIYOW0O30UvqktzHYR2ud5bOWbY0yaxWZ6G+aFzOMJvrs5YHNikrbdaT15+KNO31nPOy5Fim3ku6Zb5g==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.24.1.tgz", + "integrity": "sha512-lAxNHi4HVtjnHd5Rxg3D5t99Xm6H7b04hUS7EHIXcUl2EV4yl1gWdqZrNzXnSrHveL9qMdbODlLF55mvgjAfaQ==", "dev": true, "dependencies": { - "@babel/helper-module-transforms": "^7.20.11", - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1479,14 +1450,14 @@ } }, "node_modules/@babel/plugin-transform-modules-commonjs": { - "version": "7.21.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.21.2.tgz", - "integrity": "sha512-Cln+Yy04Gxua7iPdj6nOV96smLGjpElir5YwzF0LBPKoPlLDNJePNlrGGaybAJkd0zKRnOVXOgizSqPYMNYkzA==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.24.1.tgz", + "integrity": "sha512-szog8fFTUxBfw0b98gEWPaEqF42ZUD/T3bkynW/wtgx2p/XCP55WEsb+VosKceRSd6njipdZvNogqdtI4Q0chw==", "dev": true, "dependencies": { - "@babel/helper-module-transforms": "^7.21.2", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-simple-access": "^7.20.2" + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-simple-access": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -1496,15 +1467,15 @@ } }, "node_modules/@babel/plugin-transform-modules-systemjs": { - "version": "7.20.11", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.20.11.tgz", - "integrity": "sha512-vVu5g9BPQKSFEmvt2TA4Da5N+QVS66EX21d8uoOihC+OCpUoGvzVsXeqFdtAEfVa5BILAeFt+U7yVmLbQnAJmw==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.1.tgz", + "integrity": "sha512-mqQ3Zh9vFO1Tpmlt8QPnbwGHzNz3lpNEMxQb1kAemn/erstyqw1r9KeOlOfo3y6xAnFEcOv2tSyrXfmMk+/YZA==", "dev": true, "dependencies": { - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-module-transforms": "^7.20.11", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-identifier": "^7.19.1" + "@babel/helper-hoist-variables": "^7.22.5", + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-validator-identifier": "^7.22.20" }, "engines": { "node": ">=6.9.0" @@ -1514,13 +1485,13 @@ } }, "node_modules/@babel/plugin-transform-modules-umd": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz", - "integrity": "sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.24.1.tgz", + "integrity": "sha512-tuA3lpPj+5ITfcCluy6nWonSL7RvaG0AOTeAuvXqEKS34lnLzXpDb0dcP6K8jD0zWZFNDVly90AGFJPnm4fOYg==", "dev": true, "dependencies": { - "@babel/helper-module-transforms": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-module-transforms": "^7.23.3", + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1530,13 +1501,13 @@ } }, "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.20.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.20.5.tgz", - "integrity": "sha512-mOW4tTzi5iTLnw+78iEq3gr8Aoq4WNRGpmSlrogqaiCBoR1HFhpU4JkpQFOHfeYx3ReVIFWOQJS4aZBRvuZ6mA==", + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz", + "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==", "dev": true, "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.20.5", - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-create-regexp-features-plugin": "^7.22.5", + "@babel/helper-plugin-utils": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -1546,12 +1517,12 @@ } }, "node_modules/@babel/plugin-transform-new-target": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz", - "integrity": "sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw==", + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.24.1.tgz", + "integrity": "sha512-/rurytBM34hYy0HKZQyA0nHbQgQNFm4Q/BOc9Hflxi2X3twRof7NaE5W46j4kQitm7SvACVRXsa6N/tSZxvPug==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1560,14 +1531,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-object-super": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz", - "integrity": "sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA==", + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.24.1.tgz", + "integrity": "sha512-iQ+caew8wRrhCikO5DrUYx0mrmdhkaELgFa+7baMcVuhxIkN7oxt06CZ51D65ugIb1UWRQ8oQe+HXAVM6qHFjw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-replace-supers": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" }, "engines": { "node": ">=6.9.0" @@ -1576,13 +1547,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-parameters": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.21.3.tgz", - "integrity": "sha512-Wxc+TvppQG9xWFYatvCGPvZ6+SIUxQ2ZdiBP+PHYMIjnPXD+uThCshaz4NZOnODAtBjjcVQQ/3OKs9LW28purQ==", + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.24.1.tgz", + "integrity": "sha512-7GAsGlK4cNL2OExJH1DzmDeKnRv/LXq0eLUSvudrehVA5Rgg4bIrqEUW29FbKMBRT0ztSqisv7kjP+XIC4ZMNw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-numeric-separator": "^7.10.4" }, "engines": { "node": ">=6.9.0" @@ -1591,13 +1563,16 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-property-literals": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz", - "integrity": "sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg==", + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.5.tgz", + "integrity": "sha512-7EauQHszLGM3ay7a161tTQH7fj+3vVM/gThlz5HpFtnygTxjrlvoeq7MPVA1Vy9Q555OB8SnAOsMkLShNkkrHA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.24.5" }, "engines": { "node": ">=6.9.0" @@ -1606,13 +1581,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-react-display-name": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.18.6.tgz", - "integrity": "sha512-TV4sQ+T013n61uMoygyMRm+xf04Bd5oqFpv2jAEQwSZ8NwQA7zeRPg1LMVg2PWi3zWBz+CLKD+v5bcpZ/BS0aA==", + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.24.1.tgz", + "integrity": "sha512-oKJqR3TeI5hSLRxudMjFQ9re9fBVUU0GICqM3J1mi8MqlhVr6hC/ZN4ttAyMuQR6EZZIY6h/exe5swqGNNIkWQ==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-replace-supers": "^7.24.1" }, "engines": { "node": ">=6.9.0" @@ -1621,17 +1597,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-react-jsx": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.21.0.tgz", - "integrity": "sha512-6OAWljMvQrZjR2DaNhVfRz6dkCAVV+ymcLUmaf8bccGOHn2v5rHJK3tTpij0BuhdYWP4LLaqj5lwcdlpAAPuvg==", + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.24.1.tgz", + "integrity": "sha512-oBTH7oURV4Y+3EUrf6cWn1OHio3qG/PVwO5J03iSJmBg6m2EhKjkAu/xuaXaYwWW9miYtvbWv4LNf0AmR43LUA==", "dev": true, "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-jsx": "^7.18.6", - "@babel/types": "^7.21.0" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" }, "engines": { "node": ">=6.9.0" @@ -1640,13 +1613,15 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-react-jsx-development": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.18.6.tgz", - "integrity": "sha512-SA6HEjwYFKF7WDjWcMcMGUimmw/nhNRDWxr+KaLSCrkD/LMDBvWRmHAYgE1HDeF8KUuI8OAu+RT6EOtKxSW2qA==", + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.24.5.tgz", + "integrity": "sha512-xWCkmwKT+ihmA6l7SSTpk8e4qQl/274iNbSKRRS8mpqFR32ksy36+a+LWY8OXCCEefF8WFlnOHVsaDI2231wBg==", "dev": true, "dependencies": { - "@babel/plugin-transform-react-jsx": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5", + "@babel/plugin-syntax-optional-chaining": "^7.8.3" }, "engines": { "node": ">=6.9.0" @@ -1655,13 +1630,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-react-jsx-self": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.21.0.tgz", - "integrity": "sha512-f/Eq+79JEu+KUANFks9UZCcvydOOGMgF7jBrcwjHa5jTZD8JivnhCJYvmlhR/WTXBWonDExPoW0eO/CR4QJirA==", + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.24.5.tgz", + "integrity": "sha512-9Co00MqZ2aoky+4j2jhofErthm6QVLKbpQrvz20c3CH9KQCLHyNB+t2ya4/UrRpQGR+Wrwjg9foopoeSdnHOkA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2" + "@babel/helper-plugin-utils": "^7.24.5" }, "engines": { "node": ">=6.9.0" @@ -1670,13 +1645,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-react-jsx-source": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.19.6.tgz", - "integrity": "sha512-RpAi004QyMNisst/pvSanoRdJ4q+jMCWyk9zdw/CyLB9j8RXEahodR6l2GyttDRyEVWZtbN+TpLiHJ3t34LbsQ==", + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.24.1.tgz", + "integrity": "sha512-tGvisebwBO5em4PaYNqt4fkw56K2VALsAbAakY0FjTYqJp7gfdrgr7YX76Or8/cpik0W6+tj3rZ0uHU9Oil4tw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.19.0" + "@babel/helper-create-class-features-plugin": "^7.24.1", + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1685,14 +1661,16 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-react-pure-annotations": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.18.6.tgz", - "integrity": "sha512-I8VfEPg9r2TRDdvnHgPepTKvuRomzA8+u+nhY7qSI1fR2hRNebasZEETLyM5mAUr0Ku56OkXJ0I7NHJnO6cJiQ==", + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.24.5.tgz", + "integrity": "sha512-JM4MHZqnWR04jPMujQDTBVRnqxpLLpx2tkn7iPn+Hmsc0Gnb79yvRWOkvqFOx3Z7P7VxiRIR22c4eGSNj87OBQ==", "dev": true, "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.24.5", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5" }, "engines": { "node": ">=6.9.0" @@ -1701,14 +1679,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-regenerator": { - "version": "7.20.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.20.5.tgz", - "integrity": "sha512-kW/oO7HPBtntbsahzQ0qSE3tFvkFwnbozz3NWFhLGqH75vLEg+sCGngLlhVkePlCs3Jv0dBBHDzCHxNiFAQKCQ==", + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.24.1.tgz", + "integrity": "sha512-LetvD7CrHmEx0G442gOomRr66d7q8HzzGGr4PMHGr+5YIm6++Yke+jxj246rpvsbyhJwCLxcTn6zW1P1BSenqA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "regenerator-transform": "^0.15.1" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1717,13 +1694,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-reserved-words": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz", - "integrity": "sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA==", + "node_modules/@babel/plugin-transform-react-display-name": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.24.1.tgz", + "integrity": "sha512-mvoQg2f9p2qlpDQRBC7M3c3XTr0k7cp/0+kFKKO/7Gtu0LSw16eKB+Fabe2bDT/UpsyasTBBkAnbdsLrkD5XMw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1732,18 +1709,17 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-runtime": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.21.4.tgz", - "integrity": "sha512-1J4dhrw1h1PqnNNpzwxQ2UBymJUF8KuPjAAnlLwZcGhHAIqUigFW7cdK6GHoB64ubY4qXQNYknoUeks4Wz7CUA==", + "node_modules/@babel/plugin-transform-react-jsx": { + "version": "7.23.4", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.23.4.tgz", + "integrity": "sha512-5xOpoPguCZCRbo/JeHlloSkTA8Bld1J/E1/kLfD1nsuiW1m8tduTA1ERCgIZokDflX/IBzKcqR3l7VlRgiIfHA==", "dev": true, "dependencies": { - "@babel/helper-module-imports": "^7.21.4", - "@babel/helper-plugin-utils": "^7.20.2", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "semver": "^6.3.0" + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-module-imports": "^7.22.15", + "@babel/helper-plugin-utils": "^7.22.5", + "@babel/plugin-syntax-jsx": "^7.23.3", + "@babel/types": "^7.23.4" }, "engines": { "node": ">=6.9.0" @@ -1752,13 +1728,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-shorthand-properties": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz", - "integrity": "sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw==", + "node_modules/@babel/plugin-transform-react-jsx-development": { + "version": "7.22.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz", + "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/plugin-transform-react-jsx": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -1767,14 +1743,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-spread": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.20.7.tgz", - "integrity": "sha512-ewBbHQ+1U/VnH1fxltbJqDeWBU1oNLG8Dj11uIv3xVf7nrQu0bPGe5Rf716r7K5Qz+SqtAOVswoVunoiBtGhxw==", + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.24.5.tgz", + "integrity": "sha512-RtCJoUO2oYrYwFPtR1/jkoBEcFuI1ae9a9IMxeyAVa3a1Ap4AnxmyIKG2b2FaJKqkidw/0cxRbWN+HOs6ZWd1w==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0" + "@babel/helper-plugin-utils": "^7.24.5" }, "engines": { "node": ">=6.9.0" @@ -1783,13 +1758,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-sticky-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz", - "integrity": "sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q==", + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.24.1.tgz", + "integrity": "sha512-1v202n7aUq4uXAieRTKcwPzNyphlCuqHHDcdSNc+vdhoTEZcFMh+L5yZuCmGaIO7bs1nJUNfHB89TZyoL48xNA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1798,13 +1773,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-template-literals": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz", - "integrity": "sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA==", + "node_modules/@babel/plugin-transform-react-pure-annotations": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.24.1.tgz", + "integrity": "sha512-+pWEAaDJvSm9aFvJNpLiM2+ktl2Sn2U5DdyiWdZBxmLc6+xGt88dvFqsHiAiDS+8WqUwbDfkKz9jRxK3M0k+kA==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1813,13 +1789,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-typeof-symbol": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz", - "integrity": "sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw==", + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.24.1.tgz", + "integrity": "sha512-sJwZBCzIBE4t+5Q4IGLaaun5ExVMRY0lYwos/jNecjMrVCygCdph3IKv0tkP5Fc87e/1+bebAmEAGBfnRD+cnw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" + "@babel/helper-plugin-utils": "^7.24.0", + "regenerator-transform": "^0.15.2" }, "engines": { "node": ">=6.9.0" @@ -1828,16 +1805,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-typescript": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.21.3.tgz", - "integrity": "sha512-RQxPz6Iqt8T0uw/WsJNReuBpWpBqs/n7mNo18sKLoTbMp+UrEekhH+pKSVC7gWz+DNjo9gryfV8YzCiT45RgMw==", + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.24.1.tgz", + "integrity": "sha512-JAclqStUfIwKN15HrsQADFgeZt+wexNQ0uLhuqvqAUFoqPMjEcFCYZBhq0LUdz6dZK/mD+rErhW71fbx8RYElg==", "dev": true, "dependencies": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-typescript": "^7.20.0" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1846,13 +1820,18 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-unicode-escapes": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz", - "integrity": "sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ==", + "node_modules/@babel/plugin-transform-runtime": { + "version": "7.24.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.3.tgz", + "integrity": "sha512-J0BuRPNlNqlMTRJ72eVptpt9VcInbxO6iP3jaxr+1NPhC0UkKL+6oeX6VXMEYdADnuqmMmsBspt4d5w8Y/TCbQ==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.9" + "@babel/helper-module-imports": "^7.24.3", + "@babel/helper-plugin-utils": "^7.24.0", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.1", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "semver": "^6.3.1" }, "engines": { "node": ">=6.9.0" @@ -1861,14 +1840,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/plugin-transform-unicode-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz", - "integrity": "sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA==", + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.24.1.tgz", + "integrity": "sha512-LyjVB1nsJ6gTTUKRjRWx9C1s9hE7dLfP/knKdrfeH9UPtAGjYGgxIbFfx7xyLIEWs7Xe1Gnf8EWiUqfjLhInZA==", "dev": true, "dependencies": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -1877,87 +1855,14 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/preset-env": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.21.4.tgz", - "integrity": "sha512-2W57zHs2yDLm6GD5ZpvNn71lZ0B/iypSdIeq25OurDKji6AdzV07qp4s3n1/x5BqtiGaTrPN3nerlSCaC5qNTw==", + "node_modules/@babel/plugin-transform-spread": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.24.1.tgz", + "integrity": "sha512-KjmcIM+fxgY+KxPVbjelJC6hrH1CgtPmTvdXAfn3/a9CnWGSTY7nH4zm5+cjmWJybdcPSsD0++QssDsjcpe47g==", "dev": true, "dependencies": { - "@babel/compat-data": "^7.21.4", - "@babel/helper-compilation-targets": "^7.21.4", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-option": "^7.21.0", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.18.6", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.20.7", - "@babel/plugin-proposal-async-generator-functions": "^7.20.7", - "@babel/plugin-proposal-class-properties": "^7.18.6", - "@babel/plugin-proposal-class-static-block": "^7.21.0", - "@babel/plugin-proposal-dynamic-import": "^7.18.6", - "@babel/plugin-proposal-export-namespace-from": "^7.18.9", - "@babel/plugin-proposal-json-strings": "^7.18.6", - "@babel/plugin-proposal-logical-assignment-operators": "^7.20.7", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.18.6", - "@babel/plugin-proposal-numeric-separator": "^7.18.6", - "@babel/plugin-proposal-object-rest-spread": "^7.20.7", - "@babel/plugin-proposal-optional-catch-binding": "^7.18.6", - "@babel/plugin-proposal-optional-chaining": "^7.21.0", - "@babel/plugin-proposal-private-methods": "^7.18.6", - "@babel/plugin-proposal-private-property-in-object": "^7.21.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.18.6", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.20.0", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-transform-arrow-functions": "^7.20.7", - "@babel/plugin-transform-async-to-generator": "^7.20.7", - "@babel/plugin-transform-block-scoped-functions": "^7.18.6", - "@babel/plugin-transform-block-scoping": "^7.21.0", - "@babel/plugin-transform-classes": "^7.21.0", - "@babel/plugin-transform-computed-properties": "^7.20.7", - "@babel/plugin-transform-destructuring": "^7.21.3", - "@babel/plugin-transform-dotall-regex": "^7.18.6", - "@babel/plugin-transform-duplicate-keys": "^7.18.9", - "@babel/plugin-transform-exponentiation-operator": "^7.18.6", - "@babel/plugin-transform-for-of": "^7.21.0", - "@babel/plugin-transform-function-name": "^7.18.9", - "@babel/plugin-transform-literals": "^7.18.9", - "@babel/plugin-transform-member-expression-literals": "^7.18.6", - "@babel/plugin-transform-modules-amd": "^7.20.11", - "@babel/plugin-transform-modules-commonjs": "^7.21.2", - "@babel/plugin-transform-modules-systemjs": "^7.20.11", - "@babel/plugin-transform-modules-umd": "^7.18.6", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.20.5", - "@babel/plugin-transform-new-target": "^7.18.6", - "@babel/plugin-transform-object-super": "^7.18.6", - "@babel/plugin-transform-parameters": "^7.21.3", - "@babel/plugin-transform-property-literals": "^7.18.6", - "@babel/plugin-transform-regenerator": "^7.20.5", - "@babel/plugin-transform-reserved-words": "^7.18.6", - "@babel/plugin-transform-shorthand-properties": "^7.18.6", - "@babel/plugin-transform-spread": "^7.20.7", - "@babel/plugin-transform-sticky-regex": "^7.18.6", - "@babel/plugin-transform-template-literals": "^7.18.9", - "@babel/plugin-transform-typeof-symbol": "^7.18.9", - "@babel/plugin-transform-unicode-escapes": "^7.18.10", - "@babel/plugin-transform-unicode-regex": "^7.18.6", - "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.21.4", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "core-js-compat": "^3.25.1", - "semver": "^6.3.0" + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5" }, "engines": { "node": ">=6.9.0" @@ -1966,34 +1871,28 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/preset-modules": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", - "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.24.1.tgz", + "integrity": "sha512-9v0f1bRXgPVcPrngOQvLXeGNNVLc8UjMVfebo9ka0WF3/7+aVUHmaJVT3sa0XCzEFioPfPHZiOcYG9qOsH63cw==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/preset-react": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.18.6.tgz", - "integrity": "sha512-zXr6atUmyYdiWRVLOZahakYmOBHtWc2WGCkP8PYTgZi0iJXDY2CN180TdrIW4OGOAdLc7TifzDIvtx6izaRIzg==", + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.24.1.tgz", + "integrity": "sha512-WRkhROsNzriarqECASCNu/nojeXCDTE/F2HmRgOzi7NGvyfYGq1NEjKBK3ckLfRgGc6/lPAqP0vDOSw3YtG34g==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-transform-react-display-name": "^7.18.6", - "@babel/plugin-transform-react-jsx": "^7.18.6", - "@babel/plugin-transform-react-jsx-development": "^7.18.6", - "@babel/plugin-transform-react-pure-annotations": "^7.18.6" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" @@ -2002,17 +1901,13 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/preset-typescript": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.21.4.tgz", - "integrity": "sha512-sMLNWY37TCdRH/bJ6ZeeOH1nPuanED7Ai9Y/vH31IPqalioJ6ZNFUWONsakhv4r4n+I6gm5lmoE0olkgib/j/A==", + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.24.5.tgz", + "integrity": "sha512-UTGnhYVZtTAjdwOTzT+sCyXmTn8AhaxOS/MjG9REclZ6ULHWF9KoCZur0HSGU7hk8PdBFKKbYe6+gqdXWz84Jg==", "dev": true, "dependencies": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-option": "^7.21.0", - "@babel/plugin-syntax-jsx": "^7.21.4", - "@babel/plugin-transform-modules-commonjs": "^7.21.2", - "@babel/plugin-transform-typescript": "^7.21.3" + "@babel/helper-plugin-utils": "^7.24.5" }, "engines": { "node": ">=6.9.0" @@ -2021,65 +1916,294 @@ "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/regjsgen": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", - "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==", - "dev": true - }, - "node_modules/@babel/runtime": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.21.0.tgz", - "integrity": "sha512-xwII0//EObnq89Ji5AKYQaRYiW/nZ3llSv29d49IuxPhKbtJoLP+9QUUZ4nVragQVtaVGeZrpB+ZtG/Pdy/POw==", + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.24.5.tgz", + "integrity": "sha512-E0VWu/hk83BIFUWnsKZ4D81KXjN5L3MobvevOHErASk9IPwKHOkTgvqzvNo1yP/ePJWqqK2SpUR5z+KQbl6NVw==", + "dev": true, "dependencies": { - "regenerator-runtime": "^0.13.11" + "@babel/helper-annotate-as-pure": "^7.22.5", + "@babel/helper-create-class-features-plugin": "^7.24.5", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/plugin-syntax-typescript": "^7.24.1" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/runtime-corejs3": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.21.0.tgz", - "integrity": "sha512-TDD4UJzos3JJtM+tHX+w2Uc+KWj7GV+VKKFdMVd2Rx8sdA19hcc3P3AHFYd5LVOw+pYuSd5lICC3gm52B6Rwxw==", + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.24.1.tgz", + "integrity": "sha512-RlkVIcWT4TLI96zM660S877E7beKlQw7Ig+wqkKBiWfj0zH5Q4h50q6er4wzZKRNSYpfo6ILJ+hrJAGSX2qcNw==", "dev": true, "dependencies": { - "core-js-pure": "^3.25.1", - "regenerator-runtime": "^0.13.11" + "@babel/helper-plugin-utils": "^7.24.0" }, "engines": { "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" } }, - "node_modules/@babel/template": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", - "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.24.1.tgz", + "integrity": "sha512-Ss4VvlfYV5huWApFsF8/Sq0oXnGO+jB+rijFEFugTd3cwSObUSnUi88djgR5528Csl0uKlrI331kRqe56Ov2Ng==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/parser": "^7.22.15", - "@babel/types": "^7.22.15" + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.24.1.tgz", + "integrity": "sha512-2A/94wgZgxfTsiLaQ2E36XAOdcZmGAaEEgVmxQWwZXWkGhvoHbaqXcKnU8zny4ycpu3vNqg0L/PcCiYtHtA13g==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.24.1.tgz", + "integrity": "sha512-fqj4WuzzS+ukpgerpAoOnMfQXwUHFxXUZUE84oL2Kao2N8uSlvcpnAidKASgsNgzZHBsHWvcm8s9FPWUhAb8fA==", + "dev": true, + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.22.15", + "@babel/helper-plugin-utils": "^7.24.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.24.5.tgz", + "integrity": "sha512-UGK2ifKtcC8i5AI4cH+sbLLuLc2ktYSFJgBAXorKAsHUZmrQ1q6aQ6i3BvU24wWs2AAKqQB6kq3N9V9Gw1HiMQ==", + "dev": true, + "dependencies": { + "@babel/compat-data": "^7.24.4", + "@babel/helper-compilation-targets": "^7.23.6", + "@babel/helper-plugin-utils": "^7.24.5", + "@babel/helper-validator-option": "^7.23.5", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.24.5", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.24.1", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.24.1", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.24.1", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-async-generators": "^7.8.4", + "@babel/plugin-syntax-class-properties": "^7.12.13", + "@babel/plugin-syntax-class-static-block": "^7.14.5", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-export-namespace-from": "^7.8.3", + "@babel/plugin-syntax-import-assertions": "^7.24.1", + "@babel/plugin-syntax-import-attributes": "^7.24.1", + "@babel/plugin-syntax-import-meta": "^7.10.4", + "@babel/plugin-syntax-json-strings": "^7.8.3", + "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-syntax-numeric-separator": "^7.10.4", + "@babel/plugin-syntax-object-rest-spread": "^7.8.3", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.3", + "@babel/plugin-syntax-private-property-in-object": "^7.14.5", + "@babel/plugin-syntax-top-level-await": "^7.14.5", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.24.1", + "@babel/plugin-transform-async-generator-functions": "^7.24.3", + "@babel/plugin-transform-async-to-generator": "^7.24.1", + "@babel/plugin-transform-block-scoped-functions": "^7.24.1", + "@babel/plugin-transform-block-scoping": "^7.24.5", + "@babel/plugin-transform-class-properties": "^7.24.1", + "@babel/plugin-transform-class-static-block": "^7.24.4", + "@babel/plugin-transform-classes": "^7.24.5", + "@babel/plugin-transform-computed-properties": "^7.24.1", + "@babel/plugin-transform-destructuring": "^7.24.5", + "@babel/plugin-transform-dotall-regex": "^7.24.1", + "@babel/plugin-transform-duplicate-keys": "^7.24.1", + "@babel/plugin-transform-dynamic-import": "^7.24.1", + "@babel/plugin-transform-exponentiation-operator": "^7.24.1", + "@babel/plugin-transform-export-namespace-from": "^7.24.1", + "@babel/plugin-transform-for-of": "^7.24.1", + "@babel/plugin-transform-function-name": "^7.24.1", + "@babel/plugin-transform-json-strings": "^7.24.1", + "@babel/plugin-transform-literals": "^7.24.1", + "@babel/plugin-transform-logical-assignment-operators": "^7.24.1", + "@babel/plugin-transform-member-expression-literals": "^7.24.1", + "@babel/plugin-transform-modules-amd": "^7.24.1", + "@babel/plugin-transform-modules-commonjs": "^7.24.1", + "@babel/plugin-transform-modules-systemjs": "^7.24.1", + "@babel/plugin-transform-modules-umd": "^7.24.1", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5", + "@babel/plugin-transform-new-target": "^7.24.1", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.24.1", + "@babel/plugin-transform-numeric-separator": "^7.24.1", + "@babel/plugin-transform-object-rest-spread": "^7.24.5", + "@babel/plugin-transform-object-super": "^7.24.1", + "@babel/plugin-transform-optional-catch-binding": "^7.24.1", + "@babel/plugin-transform-optional-chaining": "^7.24.5", + "@babel/plugin-transform-parameters": "^7.24.5", + "@babel/plugin-transform-private-methods": "^7.24.1", + "@babel/plugin-transform-private-property-in-object": "^7.24.5", + "@babel/plugin-transform-property-literals": "^7.24.1", + "@babel/plugin-transform-regenerator": "^7.24.1", + "@babel/plugin-transform-reserved-words": "^7.24.1", + "@babel/plugin-transform-shorthand-properties": "^7.24.1", + "@babel/plugin-transform-spread": "^7.24.1", + "@babel/plugin-transform-sticky-regex": "^7.24.1", + "@babel/plugin-transform-template-literals": "^7.24.1", + "@babel/plugin-transform-typeof-symbol": "^7.24.5", + "@babel/plugin-transform-unicode-escapes": "^7.24.1", + "@babel/plugin-transform-unicode-property-regex": "^7.24.1", + "@babel/plugin-transform-unicode-regex": "^7.24.1", + "@babel/plugin-transform-unicode-sets-regex": "^7.24.1", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.10.4", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.31.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/preset-react": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.24.1.tgz", + "integrity": "sha512-eFa8up2/8cZXLIpkafhaADTXSnl7IsUFCYenRWrARBz0/qZwcT0RBXpys0LJU4+WfPoF2ZG6ew6s2V6izMCwRA==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-validator-option": "^7.23.5", + "@babel/plugin-transform-react-display-name": "^7.24.1", + "@babel/plugin-transform-react-jsx": "^7.23.4", + "@babel/plugin-transform-react-jsx-development": "^7.22.5", + "@babel/plugin-transform-react-pure-annotations": "^7.24.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.24.1", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.24.1.tgz", + "integrity": "sha512-1DBaMmRDpuYQBPWD8Pf/WEwCrtgRHxsZnP4mIy9G/X+hFfbI47Q2G4t1Paakld84+qsk2fSsUPMKg71jkoOOaQ==", + "dev": true, + "dependencies": { + "@babel/helper-plugin-utils": "^7.24.0", + "@babel/helper-validator-option": "^7.23.5", + "@babel/plugin-syntax-jsx": "^7.24.1", + "@babel/plugin-transform-modules-commonjs": "^7.24.1", + "@babel/plugin-transform-typescript": "^7.24.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/regjsgen": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==", + "dev": true + }, + "node_modules/@babel/runtime": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.5.tgz", + "integrity": "sha512-Nms86NXrsaeU9vbBJKni6gXiEXZ4CVpYVzEjDH9Sb8vmZ3UljyA1GSOJl/6LGPO8EHLuSF9H+IxNXHPX8QHJ4g==", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.24.5.tgz", + "integrity": "sha512-GWO0mgzNMLWaSYM4z4NVIuY0Cd1fl8cPnuetuddu5w/qGuvt5Y7oUi/kvvQGK9xgOkFJDQX2heIvTRn/OQ1XTg==", + "dev": true, + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.0.tgz", + "integrity": "sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA==", + "dev": true, + "dependencies": { + "@babel/code-frame": "^7.23.5", + "@babel/parser": "^7.24.0", + "@babel/types": "^7.24.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", - "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.5.tgz", + "integrity": "sha512-7aaBLeDQ4zYcUFDUD41lJc1fG8+5IU9DaNSJAgal866FGvmD5EbWQgnEC6kO1gGLsX0esNkfnJSndbTXA3r7UA==", "dev": true, "dependencies": { - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.23.0", + "@babel/code-frame": "^7.24.2", + "@babel/generator": "^7.24.5", "@babel/helper-environment-visitor": "^7.22.20", "@babel/helper-function-name": "^7.23.0", "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.0", - "@babel/types": "^7.23.0", - "debug": "^4.1.0", + "@babel/helper-split-export-declaration": "^7.24.5", + "@babel/parser": "^7.24.5", + "@babel/types": "^7.24.5", + "debug": "^4.3.1", "globals": "^11.1.0" }, "engines": { @@ -2087,13 +2211,13 @@ } }, "node_modules/@babel/types": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", - "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", + "version": "7.24.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.5.tgz", + "integrity": "sha512-6mQNsaLeXTw0nxYUYu+NSa4Hx4BlF1x1x8/PMFbiR+GBSr+2DkECc69b8hgy2frEodNcvPffeH8YfWd3LI6jhQ==", "dev": true, "dependencies": { - "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.20", + "@babel/helper-string-parser": "^7.24.1", + "@babel/helper-validator-identifier": "^7.24.5", "to-fast-properties": "^2.0.0" }, "engines": { @@ -2101,17 +2225,17 @@ } }, "node_modules/@bugsnag/browser": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@bugsnag/browser/-/browser-7.20.0.tgz", - "integrity": "sha512-LzZWI6q5cWYQSXvfJDcSl287d2xXESVn0L20lK+K5nwo/jXcK9IVZr9L+CYZ40HVXaC9jOmQbqZ18hsbO2QNIw==", + "version": "7.22.7", + "resolved": "https://registry.npmjs.org/@bugsnag/browser/-/browser-7.22.7.tgz", + "integrity": "sha512-70jFkWKscK2osm7bnFbPLevrzHClrygM3UcKetKs/l81Xuzlxnu1SS3onN5OUl9kd9RN4XMFr46Pv5jSqWqImQ==", "dependencies": { - "@bugsnag/core": "^7.19.0" + "@bugsnag/core": "^7.22.7" } }, "node_modules/@bugsnag/core": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@bugsnag/core/-/core-7.19.0.tgz", - "integrity": "sha512-2KGwdaLD9PhR7Wk7xPi3jGuGsKTatc/28U4TOZIDU3CgC2QhGjubwiXSECel5gwxhZ3jACKcMKSV2ovHhv1NrA==", + "version": "7.22.7", + "resolved": "https://registry.npmjs.org/@bugsnag/core/-/core-7.22.7.tgz", + "integrity": "sha512-9DPWBkkBjhFJc5dCFy/wVC3HE0Aw3ZiLJKjyAxgywSKbILgtpD+qT1Xe8sacWyxU92znamlZ8H8ziQOe7jhhbA==", "dependencies": { "@bugsnag/cuid": "^3.0.0", "@bugsnag/safe-json-stringify": "^6.0.0", @@ -2121,25 +2245,25 @@ } }, "node_modules/@bugsnag/cuid": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@bugsnag/cuid/-/cuid-3.0.2.tgz", - "integrity": "sha512-cIwzC93r3PQ/INeuwtZwkZIG2K8WWN0rRLZQhu+mr48Ay+i6sEki4GYfTsflse7hZ1BeDWrNb/Q9vgY3B31xHQ==" + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@bugsnag/cuid/-/cuid-3.1.1.tgz", + "integrity": "sha512-d2z4b0rEo3chI07FNN1Xds8v25CNeekecU6FC/2Fs9MxY2EipkZTThVcV2YinMn8dvRUlViKOyC50evoUxg8tw==" }, "node_modules/@bugsnag/js": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@bugsnag/js/-/js-7.20.0.tgz", - "integrity": "sha512-lhUUSOveE8fP10RagAINqBmuH+eoOpyUOiTN1WRkjHUevWG0LZjRRUWEGN3AA+ZyTphmC6ljd2qE3/64qfOSGQ==", + "version": "7.22.7", + "resolved": "https://registry.npmjs.org/@bugsnag/js/-/js-7.22.7.tgz", + "integrity": "sha512-Qq8l06rSDTZtxgNIDpTeXHrin9C30INNbPfnR2CNcEsCmfqyVQb4USPEuRb0xg5wiaLKU9r4IAatMqiCgdzG6A==", "dependencies": { - "@bugsnag/browser": "^7.20.0", - "@bugsnag/node": "^7.19.0" + "@bugsnag/browser": "^7.22.7", + "@bugsnag/node": "^7.22.7" } }, "node_modules/@bugsnag/node": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@bugsnag/node/-/node-7.19.0.tgz", - "integrity": "sha512-c4snyxx5d/fsMogmgehFBGc//daH6+4XCplia4zrEQYltjaQ+l8ud0dPx623DgJl/2j1+2zlRc7y7IHSd7Gm5w==", + "version": "7.22.7", + "resolved": "https://registry.npmjs.org/@bugsnag/node/-/node-7.22.7.tgz", + "integrity": "sha512-Ud8vpX9UkGxoWAk7OigyR7w1eycbsE5uv5KZx0aWiqDPXylvICd42V5ZiWstpkdm9IVFo9AQ4+gmerHPe4Lwrg==", "dependencies": { - "@bugsnag/core": "^7.19.0", + "@bugsnag/core": "^7.22.7", "byline": "^5.0.0", "error-stack-parser": "^2.0.2", "iserror": "^0.0.2", @@ -2153,110 +2277,152 @@ "integrity": "sha512-htzFO1Zc57S8kgdRK9mLcPVTW1BY2ijfH7Dk2CeZmspTWKdKqSo1iwmqrq2WtRjFlo8aRZYgLX0wFrDXF/9DLA==" }, "node_modules/@csstools/cascade-layer-name-parser": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/cascade-layer-name-parser/-/cascade-layer-name-parser-1.0.1.tgz", - "integrity": "sha512-SAAi5DpgJJWkfTvWSaqkgyIsTawa83hMwKrktkj6ra2h+q6ZN57vOGZ6ySHq6RSo+CbP64fA3aPChPBRDDUgtw==", + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/@csstools/cascade-layer-name-parser/-/cascade-layer-name-parser-1.0.9.tgz", + "integrity": "sha512-RRqNjxTZDUhx7pxYOBG/AkCVmPS3zYzfE47GEhIGkFuWFTQGJBgWOUUkKNo5MfxIfjDz5/1L3F3rF1oIsYaIpw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0" + "@csstools/css-parser-algorithms": "^2.6.1", + "@csstools/css-tokenizer": "^2.2.4" } }, "node_modules/@csstools/color-helpers": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-2.0.0.tgz", - "integrity": "sha512-VcPjEnp07RNgz/D+oI2uIALg+IPCSl6mj0XhA3pl3F2bM2B95vgzatExmmzSg/X0zkh+R2v+jFY/J2pV/bnwpw==", + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-4.2.0.tgz", + "integrity": "sha512-hJJrSBzbfGxUsaR6X4Bzd/FLx0F1ulKnR5ljY9AiXCtsR+H+zSWQDFWlKES1BRaVZTDHLpIIHS9K2o0h+JLlrg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "engines": { "node": "^14 || ^16 || >=18" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" } }, "node_modules/@csstools/css-calc": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-1.0.1.tgz", - "integrity": "sha512-VBI8X0bmStfc85wWTa2bsbnlBQxgW4FmJ0Ts9ar9UqytE6kii3yg6GO+wpgzht2oK5Qlbpkm1Fy2kcqVmu6f3Q==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-1.2.0.tgz", + "integrity": "sha512-iQqIW5vDPqQdLx07/atCuNKDprhIWjB0b8XRhUyXZWBZYUG+9mNyFwyu30rypX84WLevVo25NYW2ipxR8WyseQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.0.1" + "@csstools/css-parser-algorithms": "^2.6.1", + "@csstools/css-tokenizer": "^2.2.4" } }, "node_modules/@csstools/css-color-parser": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-1.1.0.tgz", - "integrity": "sha512-jRpIhjThaH8jxuJ8Q1H+jai/dekP5952kzLHTuN+rPI48eF2esf/18TMb3N/HtEgmnybhfiwUO6Ph2OkHi3jpA==", + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-1.6.3.tgz", + "integrity": "sha512-pQPUPo32HW3/NuZxrwr3VJHE+vGqSTVI5gK4jGbuJ7eOFUrsTmZikXcVdInCVWOvuxK5xbCzwDWoTlZUCAKN+A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/color-helpers": "^2.0.0", - "@csstools/css-calc": "^1.0.1" + "@csstools/color-helpers": "^4.1.0", + "@csstools/css-calc": "^1.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { - "@csstools/css-parser-algorithms": "^2.1.0", - "@csstools/css-tokenizer": "^2.1.0" + "@csstools/css-parser-algorithms": "^2.6.1", + "@csstools/css-tokenizer": "^2.2.4" } }, "node_modules/@csstools/css-parser-algorithms": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-2.1.0.tgz", - "integrity": "sha512-KP8TicdXpUyeB1NMlbHud/1l39xvLGvqNFWMpG4qC6H1zs9SadGUHe5SO92n/659sDW9aGDvm9AMru0DZkN1Bw==", + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-2.6.1.tgz", + "integrity": "sha512-ubEkAaTfVZa+WwGhs5jbo5Xfqpeaybr/RvWzvFxRs4jfq16wH8l8Ty/QEEpINxll4xhuGfdMbipRyz5QZh9+FA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { - "@csstools/css-tokenizer": "^2.0.0" + "@csstools/css-tokenizer": "^2.2.4" } }, "node_modules/@csstools/css-tokenizer": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-2.1.0.tgz", - "integrity": "sha512-dtqFyoJBHUxGi9zPZdpCKP1xk8tq6KPHJ/NY4qWXiYo6IcSGwzk3L8x2XzZbbyOyBs9xQARoGveU2AsgLj6D2A==", + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-2.2.4.tgz", + "integrity": "sha512-PuWRAewQLbDhGeTvFuq2oClaSCKPIBmHyIobCV39JHRYN0byDcUWJl5baPeNUcqrjtdMNqFooE0FGl31I3JOqw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "engines": { "node": "^14 || ^16 || >=18" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" } }, "node_modules/@csstools/media-query-list-parser": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@csstools/media-query-list-parser/-/media-query-list-parser-2.0.2.tgz", - "integrity": "sha512-8V6JD8Av1HttuClYr1ZBu0LRVe5Nnz4qrv8RppO8mobsX/USBHZy5JQOXYIlpOVhl46nzkx3X5cfH6CqUghjrQ==", + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@csstools/media-query-list-parser/-/media-query-list-parser-2.1.9.tgz", + "integrity": "sha512-qqGuFfbn4rUmyOB0u8CVISIp5FfJ5GAR3mBrZ9/TKndHakdnm6pY0L/fbLcpPnrzwCyyTEZl1nUcXAYHEWneTA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0" + "@csstools/css-parser-algorithms": "^2.6.1", + "@csstools/css-tokenizer": "^2.2.4" } }, "node_modules/@csstools/postcss-cascade-layers": { @@ -2279,43 +2445,55 @@ } }, "node_modules/@csstools/postcss-color-function": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-color-function/-/postcss-color-function-2.2.0.tgz", - "integrity": "sha512-4z3k3p35Gmv4ZDX79OytvhwYx6Hz+y3hitikw2F+XG1yhSjalXoMCV04atgLjc/ThLg+Hwnp1pxhQ2G07UHknQ==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-function/-/postcss-color-function-2.2.3.tgz", + "integrity": "sha512-b1ptNkr1UWP96EEHqKBWWaV5m/0hgYGctgA/RVZhONeP1L3T/8hwoqDm9bB23yVCfOgE9U93KI9j06+pEkJTvw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" + "@csstools/css-color-parser": "^1.2.0", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/postcss-progressive-custom-properties": "^2.3.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/@csstools/postcss-color-mix-function": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-color-mix-function/-/postcss-color-mix-function-1.0.0.tgz", - "integrity": "sha512-JuI8SKpE/XIpfmvALcxvk6flaq36KCJwqQgZ958Jz189r1diQZADq+7xFmjcv+B0vHQ4nSa92gGExtzOZ1iiUg==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@csstools/postcss-color-mix-function/-/postcss-color-mix-function-1.0.3.tgz", + "integrity": "sha512-QGXjGugTluqFZWzVf+S3wCiRiI0ukXlYqCi7OnpDotP/zaVTyl/aqZujLFzTOXy24BoWnu89frGMc79ohY5eog==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" + "@csstools/css-color-parser": "^1.2.0", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/postcss-progressive-custom-properties": "^2.3.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -2339,69 +2517,97 @@ } }, "node_modules/@csstools/postcss-gradients-interpolation-method": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-gradients-interpolation-method/-/postcss-gradients-interpolation-method-3.0.1.tgz", - "integrity": "sha512-sCfFSzL5HRb/GhrGuTEi8IRrxp2bUeKakyXvuXzuBBxL0L2X8kZAljQwkuRkd0W/wIWTsQG/E72REb5XMmRfrA==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@csstools/postcss-gradients-interpolation-method/-/postcss-gradients-interpolation-method-3.0.6.tgz", + "integrity": "sha512-rBOBTat/YMmB0G8VHwKqDEx+RZ4KCU9j42K8LwS0IpZnyThalZZF7BCSsZ6TFlZhcRZKlZy3LLFI2pLqjNVGGA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/css-color-parser": "^1.1.0", - "@csstools/css-parser-algorithms": "^2.1.0", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" + "@csstools/css-color-parser": "^1.2.0", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/postcss-progressive-custom-properties": "^2.3.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/@csstools/postcss-hwb-function": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-hwb-function/-/postcss-hwb-function-2.2.0.tgz", - "integrity": "sha512-7gDPKacr3KhonzEyj4dzAEcetFJbN+JVPZXtANpf9SAVUHDUK+cCw7367uRlXnCeAoTdmRAyBk3agg2+snFxAw==", - "dependencies": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0" - }, - "engines": { - "node": "^14 || ^16 || >=18" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@csstools/postcss-hwb-function/-/postcss-hwb-function-2.2.2.tgz", + "integrity": "sha512-W5Y5oaJ382HSlbdGfPf60d7dAK6Hqf10+Be1yZbd/TNNrQ/3dDdV1c07YwOXPQ3PZ6dvFMhxbIbn8EC3ki3nEg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "dependencies": { + "@csstools/css-color-parser": "^1.2.0", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1" + }, + "engines": { + "node": "^14 || ^16 || >=18" }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/@csstools/postcss-ic-unit": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-ic-unit/-/postcss-ic-unit-2.0.2.tgz", - "integrity": "sha512-N84qGTJkfLTPj2qOG5P4CIqGjpZBbjOEMKMn+UjO5wlb9lcBTfBsxCF0lQsFdWJUzBHYFOz19dL66v71WF3Pig==", + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-ic-unit/-/postcss-ic-unit-2.0.4.tgz", + "integrity": "sha512-9W2ZbV7whWnr1Gt4qYgxMWzbevZMOvclUczT5vk4yR6vS53W/njiiUhtm/jh/BKYwQ1W3PECZjgAd2dH4ebJig==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^2.0.0", + "@csstools/postcss-progressive-custom-properties": "^2.3.0", "postcss-value-parser": "^4.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/@csstools/postcss-is-pseudo-class": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-is-pseudo-class/-/postcss-is-pseudo-class-3.1.1.tgz", - "integrity": "sha512-hhiacuby4YdUnnxfCYCRMBIobyJImozf0u+gHSbQ/tNOdwvmrZtVROvgW7zmfYuRkHVDNZJWZslq2v5jOU+j/A==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-is-pseudo-class/-/postcss-is-pseudo-class-3.2.1.tgz", + "integrity": "sha512-AtANdV34kJl04Al62is3eQRk/BfOfyAvEmRJvbt+nx5REqImLC+2XhuE6skgkcPli1l8ONS67wS+l1sBzySc3Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { "@csstools/selector-specificity": "^2.0.0", "postcss-selector-parser": "^6.0.10" @@ -2409,10 +2615,6 @@ "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -2451,11 +2653,11 @@ } }, "node_modules/@csstools/postcss-logical-viewport-units": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-viewport-units/-/postcss-logical-viewport-units-1.0.2.tgz", - "integrity": "sha512-nnKFywBqRMYjv5jyjSplD/nbAnboUEGFfdxKw1o34Y1nvycgqjQavhKkmxbORxroBBIDwC5y6SfgENcPPUcOxQ==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-viewport-units/-/postcss-logical-viewport-units-1.0.3.tgz", + "integrity": "sha512-6zqcyRg9HSqIHIPMYdt6THWhRmE5/tyHKJQLysn2TeDf/ftq7Em9qwMTx98t2C/7UxIsYS8lOiHHxAVjWn2WUg==", "dependencies": { - "@csstools/css-tokenizer": "^2.0.0" + "@csstools/css-tokenizer": "^2.1.1" }, "engines": { "node": "^14 || ^16 || >=18" @@ -2469,42 +2671,54 @@ } }, "node_modules/@csstools/postcss-media-minmax": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-media-minmax/-/postcss-media-minmax-1.0.0.tgz", - "integrity": "sha512-qXHZ0QVDszKf4SsLazOEzFl+m+IkhHOigqMy/gHNIzAtqB3XeBQUa+dTi1ROmQBDH1HXktGwy+tafFBg9UoaxA==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-media-minmax/-/postcss-media-minmax-1.1.4.tgz", + "integrity": "sha512-xl/PIO3TUbXO1ZA4SA6HCw+Q9UGe2cgeRKx3lHCzoNig2D4bT5vfVCOrwhxjUb09oHihc9eI3I0iIfVPiXaN1A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/css-calc": "^1.0.1", - "@csstools/css-parser-algorithms": "^2.1.0", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/media-query-list-parser": "^2.0.2" + "@csstools/css-calc": "^1.2.0", + "@csstools/css-parser-algorithms": "^2.6.1", + "@csstools/css-tokenizer": "^2.2.4", + "@csstools/media-query-list-parser": "^2.1.9" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/@csstools/postcss-media-queries-aspect-ratio-number-values": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-media-queries-aspect-ratio-number-values/-/postcss-media-queries-aspect-ratio-number-values-1.0.1.tgz", - "integrity": "sha512-V9yQqXdje6OfqDf6EL5iGOpi6N0OEczwYK83rql9UapQwFEryXlAehR5AqH8QqLYb6+y31wUXK6vMxCp0920Zg==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-media-queries-aspect-ratio-number-values/-/postcss-media-queries-aspect-ratio-number-values-1.0.4.tgz", + "integrity": "sha512-IwyTbyR8E2y3kh6Fhrs251KjKBJeUPV5GlnUKnpU70PRFEN2DolWbf2V4+o/B9+Oj77P/DullLTulWEQ8uFtAA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0", - "@csstools/media-query-list-parser": "^2.0.0" + "@csstools/css-parser-algorithms": "^2.2.0", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/media-query-list-parser": "^2.1.1" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -2546,39 +2760,78 @@ } }, "node_modules/@csstools/postcss-oklab-function": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-oklab-function/-/postcss-oklab-function-2.2.0.tgz", - "integrity": "sha512-5QMtgn9IWpeTbbt8DwLvr41CQRJef2fKhznTFQI1Og/v3zr/uKYu+aSKZEEaoZnO9OophM4YJnkVJne3CqvJDQ==", + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/@csstools/postcss-oklab-function/-/postcss-oklab-function-2.2.3.tgz", + "integrity": "sha512-AgJ2rWMnLCDcbSMTHSqBYn66DNLBym6JpBpCaqmwZ9huGdljjDRuH3DzOYzkgQ7Pm2K92IYIq54IvFHloUOdvA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" + "@csstools/css-color-parser": "^1.2.0", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/postcss-progressive-custom-properties": "^2.3.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/@csstools/postcss-progressive-custom-properties": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-progressive-custom-properties/-/postcss-progressive-custom-properties-2.1.1.tgz", - "integrity": "sha512-6p8eO5+j+9hn4h2Klr9dbmya0GIb9SRrnPaCxqR1muVlV1waAZq6YkmlApEwXrox9qxggSwGZD5TnLRIY9f7WA==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@csstools/postcss-progressive-custom-properties/-/postcss-progressive-custom-properties-2.3.0.tgz", + "integrity": "sha512-Zd8ojyMlsL919TBExQ1I0CTpBDdyCpH/yOdqatZpuC3sd22K4SwC7+Yez3Q/vmXMWSAl+shjNeFZ7JMyxMjK+Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" + "peerDependencies": { + "postcss": "^8.4" + } + }, + "node_modules/@csstools/postcss-relative-color-syntax": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@csstools/postcss-relative-color-syntax/-/postcss-relative-color-syntax-1.0.2.tgz", + "integrity": "sha512-juCoVInkgH2TZPfOhyx6tIal7jW37L/0Tt+Vcl1LoxqQA9sxcg3JWYZ98pl1BonDnki6s/M7nXzFQHWsWMeHgw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "dependencies": { + "@csstools/css-color-parser": "^1.2.0", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/postcss-progressive-custom-properties": "^2.3.0" + }, + "engines": { + "node": "^14 || ^16 || >=18" }, "peerDependencies": { "postcss": "^8.4" @@ -2603,13 +2856,13 @@ } }, "node_modules/@csstools/postcss-stepped-value-functions": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-stepped-value-functions/-/postcss-stepped-value-functions-2.1.0.tgz", - "integrity": "sha512-CkEo9BF8fQeMoXW3biXjlgTLY7PA4UFihn6leq7hPoRzIguLUI0WZIVgsITGXfX8LXmkhCSTjXO2DLYu/LUixQ==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-stepped-value-functions/-/postcss-stepped-value-functions-2.1.1.tgz", + "integrity": "sha512-YCvdF0GCZK35nhLgs7ippcxDlRVe5QsSht3+EghqTjnYnyl3BbWIN6fYQ1dKWYTJ+7Bgi41TgqQFfJDcp9Xy/w==", "dependencies": { - "@csstools/css-calc": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.0.1" + "@csstools/css-calc": "^1.1.1", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1" }, "engines": { "node": "^14 || ^16 || >=18" @@ -2623,44 +2876,56 @@ } }, "node_modules/@csstools/postcss-text-decoration-shorthand": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-text-decoration-shorthand/-/postcss-text-decoration-shorthand-2.2.2.tgz", - "integrity": "sha512-aR9l/V7p0SkdrIyBysqlQWIbGXeGC7U4ccBAIlWMpVpG/MsGhxs1JvdBpjim4UDF3U+1VmF+MbvZFb7dL+d7XA==", + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/@csstools/postcss-text-decoration-shorthand/-/postcss-text-decoration-shorthand-2.2.4.tgz", + "integrity": "sha512-zPN56sQkS/7YTCVZhOBVCWf7AiNge8fXDl7JVaHLz2RyT4pnyK2gFjckWRLpO0A2xkm1lCgZ0bepYZTwAVd/5A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/color-helpers": "^1.0.0", + "@csstools/color-helpers": "^2.1.0", "postcss-value-parser": "^4.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/@csstools/postcss-text-decoration-shorthand/node_modules/@csstools/color-helpers": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-1.0.0.tgz", - "integrity": "sha512-tgqtiV8sU/VaWYjOB3O7PWs7HR/MmOLl2kTYRW2qSsTSEniJq7xmyAYFB1LPpXvvQcE5u2ih2dK9fyc8BnrAGQ==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-2.1.0.tgz", + "integrity": "sha512-OWkqBa7PDzZuJ3Ha7T5bxdSVfSCfTq6K1mbAhbO1MD+GSULGjrp45i5RudyJOedstSarN/3mdwu9upJE7gDXfw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "engines": { "node": "^14 || ^16 || >=18" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" } }, "node_modules/@csstools/postcss-trigonometric-functions": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-trigonometric-functions/-/postcss-trigonometric-functions-2.1.0.tgz", - "integrity": "sha512-Ly7YczO+QdnByYeGqlppJoA2Tb2vsFfj5gSrszPTXJ+/4g3nnEZnG0VSeTK/WA8y7fzyL/qVNkkdEeOnruNWFQ==", + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/@csstools/postcss-trigonometric-functions/-/postcss-trigonometric-functions-2.1.1.tgz", + "integrity": "sha512-XcXmHEFfHXhvYz40FtDlA4Fp4NQln2bWTsCwthd2c+MCnYArUYU3YaMqzR5CrKP3pMoGYTBnp5fMqf1HxItNyw==", "dependencies": { - "@csstools/css-calc": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.0.1" + "@csstools/css-calc": "^1.1.1", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1" }, "engines": { "node": "^14 || ^16 || >=18" @@ -2703,10 +2968,31 @@ "postcss-selector-parser": "^6.0.10" } }, + "node_modules/@csstools/utilities": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@csstools/utilities/-/utilities-1.0.0.tgz", + "integrity": "sha512-tAgvZQe/t2mlvpNosA4+CkMiZ2azISW5WPAcdSalZlEjQvUfghHxfQcrCiK/7/CrfAWVxyM88kGFYO82heIGDg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "engines": { + "node": "^14 || ^16 || >=18" + }, + "peerDependencies": { + "postcss": "^8.4" + } + }, "node_modules/@esbuild/android-arm": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.17.15.tgz", - "integrity": "sha512-sRSOVlLawAktpMvDyJIkdLI/c/kdRTOqo8t6ImVxg8yT7LQDUYV5Rp2FKeEosLr6ZCja9UjYAzyRSxGteSJPYg==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.18.20.tgz", + "integrity": "sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==", "cpu": [ "arm" ], @@ -2720,9 +3006,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.17.15.tgz", - "integrity": "sha512-0kOB6Y7Br3KDVgHeg8PRcvfLkq+AccreK///B4Z6fNZGr/tNHX0z2VywCc7PTeWp+bPvjA5WMvNXltHw5QjAIA==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz", + "integrity": "sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==", "cpu": [ "arm64" ], @@ -2736,9 +3022,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.17.15.tgz", - "integrity": "sha512-MzDqnNajQZ63YkaUWVl9uuhcWyEyh69HGpMIrf+acR4otMkfLJ4sUCxqwbCyPGicE9dVlrysI3lMcDBjGiBBcQ==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.18.20.tgz", + "integrity": "sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==", "cpu": [ "x64" ], @@ -2752,9 +3038,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.17.15.tgz", - "integrity": "sha512-7siLjBc88Z4+6qkMDxPT2juf2e8SJxmsbNVKFY2ifWCDT72v5YJz9arlvBw5oB4W/e61H1+HDB/jnu8nNg0rLA==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz", + "integrity": "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==", "cpu": [ "arm64" ], @@ -2768,9 +3054,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.17.15.tgz", - "integrity": "sha512-NbImBas2rXwYI52BOKTW342Tm3LTeVlaOQ4QPZ7XuWNKiO226DisFk/RyPk3T0CKZkKMuU69yOvlapJEmax7cg==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz", + "integrity": "sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==", "cpu": [ "x64" ], @@ -2784,9 +3070,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.15.tgz", - "integrity": "sha512-Xk9xMDjBVG6CfgoqlVczHAdJnCs0/oeFOspFap5NkYAmRCT2qTn1vJWA2f419iMtsHSLm+O8B6SLV/HlY5cYKg==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz", + "integrity": "sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==", "cpu": [ "arm64" ], @@ -2800,9 +3086,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.17.15.tgz", - "integrity": "sha512-3TWAnnEOdclvb2pnfsTWtdwthPfOz7qAfcwDLcfZyGJwm1SRZIMOeB5FODVhnM93mFSPsHB9b/PmxNNbSnd0RQ==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz", + "integrity": "sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==", "cpu": [ "x64" ], @@ -2816,9 +3102,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.17.15.tgz", - "integrity": "sha512-MLTgiXWEMAMr8nmS9Gigx43zPRmEfeBfGCwxFQEMgJ5MC53QKajaclW6XDPjwJvhbebv+RzK05TQjvH3/aM4Xw==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz", + "integrity": "sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==", "cpu": [ "arm" ], @@ -2832,9 +3118,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.17.15.tgz", - "integrity": "sha512-T0MVnYw9KT6b83/SqyznTs/3Jg2ODWrZfNccg11XjDehIved2oQfrX/wVuev9N936BpMRaTR9I1J0tdGgUgpJA==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz", + "integrity": "sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==", "cpu": [ "arm64" ], @@ -2848,9 +3134,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.17.15.tgz", - "integrity": "sha512-wp02sHs015T23zsQtU4Cj57WiteiuASHlD7rXjKUyAGYzlOKDAjqK6bk5dMi2QEl/KVOcsjwL36kD+WW7vJt8Q==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz", + "integrity": "sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==", "cpu": [ "ia32" ], @@ -2864,9 +3150,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.17.15.tgz", - "integrity": "sha512-k7FsUJjGGSxwnBmMh8d7IbObWu+sF/qbwc+xKZkBe/lTAF16RqxRCnNHA7QTd3oS2AfGBAnHlXL67shV5bBThQ==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz", + "integrity": "sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==", "cpu": [ "loong64" ], @@ -2880,9 +3166,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.17.15.tgz", - "integrity": "sha512-ZLWk6czDdog+Q9kE/Jfbilu24vEe/iW/Sj2d8EVsmiixQ1rM2RKH2n36qfxK4e8tVcaXkvuV3mU5zTZviE+NVQ==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz", + "integrity": "sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==", "cpu": [ "mips64el" ], @@ -2896,9 +3182,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.17.15.tgz", - "integrity": "sha512-mY6dPkIRAiFHRsGfOYZC8Q9rmr8vOBZBme0/j15zFUKM99d4ILY4WpOC7i/LqoY+RE7KaMaSfvY8CqjJtuO4xg==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz", + "integrity": "sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==", "cpu": [ "ppc64" ], @@ -2912,9 +3198,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.17.15.tgz", - "integrity": "sha512-EcyUtxffdDtWjjwIH8sKzpDRLcVtqANooMNASO59y+xmqqRYBBM7xVLQhqF7nksIbm2yHABptoioS9RAbVMWVA==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz", + "integrity": "sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==", "cpu": [ "riscv64" ], @@ -2928,9 +3214,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.17.15.tgz", - "integrity": "sha512-BuS6Jx/ezxFuHxgsfvz7T4g4YlVrmCmg7UAwboeyNNg0OzNzKsIZXpr3Sb/ZREDXWgt48RO4UQRDBxJN3B9Rbg==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz", + "integrity": "sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==", "cpu": [ "s390x" ], @@ -2944,9 +3230,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.17.15.tgz", - "integrity": "sha512-JsdS0EgEViwuKsw5tiJQo9UdQdUJYuB+Mf6HxtJSPN35vez1hlrNb1KajvKWF5Sa35j17+rW1ECEO9iNrIXbNg==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz", + "integrity": "sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==", "cpu": [ "x64" ], @@ -2960,9 +3246,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.17.15.tgz", - "integrity": "sha512-R6fKjtUysYGym6uXf6qyNephVUQAGtf3n2RCsOST/neIwPqRWcnc3ogcielOd6pT+J0RDR1RGcy0ZY7d3uHVLA==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz", + "integrity": "sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==", "cpu": [ "x64" ], @@ -2976,9 +3262,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.17.15.tgz", - "integrity": "sha512-mVD4PGc26b8PI60QaPUltYKeSX0wxuy0AltC+WCTFwvKCq2+OgLP4+fFd+hZXzO2xW1HPKcytZBdjqL6FQFa7w==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz", + "integrity": "sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==", "cpu": [ "x64" ], @@ -2992,9 +3278,9 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.17.15.tgz", - "integrity": "sha512-U6tYPovOkw3459t2CBwGcFYfFRjivcJJc1WC8Q3funIwX8x4fP+R6xL/QuTPNGOblbq/EUDxj9GU+dWKX0oWlQ==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz", + "integrity": "sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==", "cpu": [ "x64" ], @@ -3008,9 +3294,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.17.15.tgz", - "integrity": "sha512-W+Z5F++wgKAleDABemiyXVnzXgvRFs+GVKThSI+mGgleLWluv0D7Diz4oQpgdpNzh4i2nNDzQtWbjJiqutRp6Q==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz", + "integrity": "sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==", "cpu": [ "arm64" ], @@ -3024,9 +3310,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.17.15.tgz", - "integrity": "sha512-Muz/+uGgheShKGqSVS1KsHtCyEzcdOn/W/Xbh6H91Etm+wiIfwZaBn1W58MeGtfI8WA961YMHFYTthBdQs4t+w==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz", + "integrity": "sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==", "cpu": [ "ia32" ], @@ -3040,9 +3326,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.17.15.tgz", - "integrity": "sha512-DjDa9ywLUUmjhV2Y9wUTIF+1XsmuFGvZoCmOWkli1XcNAh5t25cc7fgsCx4Zi/Uurep3TTLyDiKATgGEg61pkA==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz", + "integrity": "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==", "cpu": [ "x64" ], @@ -3071,23 +3357,23 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.5.0.tgz", - "integrity": "sha512-vITaYzIcNmjn5tF5uxcZ/ft7/RXGrMUIS9HalWckEOF6ESiwXKoMzAQf2UW0aVd6rnOeExTJVd5hmWXucBKGXQ==", + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", + "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", "dev": true, "engines": { "node": "^12.0.0 || ^14.0.0 || >=16.0.0" } }, "node_modules/@eslint/eslintrc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.2.tgz", - "integrity": "sha512-3W4f5tDUra+pA+FzgugqL2pRimUTDJWKr7BINqOpkZrC0uYI0NIc0/JFgBROCU07HR6GieA5m3/rsPIhDmCXTQ==", + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", "dev": true, "dependencies": { "ajv": "^6.12.4", "debug": "^4.3.2", - "espree": "^9.5.1", + "espree": "^9.6.0", "globals": "^13.19.0", "ignore": "^5.2.0", "import-fresh": "^3.2.1", @@ -3102,36 +3388,10 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/@eslint/eslintrc/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/@eslint/eslintrc/node_modules/globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -3143,36 +3403,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "node_modules/@eslint/eslintrc/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/@eslint/eslintrc/node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/@eslint/eslintrc/node_modules/type-fest": { "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", @@ -3186,19 +3416,20 @@ } }, "node_modules/@eslint/js": { - "version": "8.37.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.37.0.tgz", - "integrity": "sha512-x5vzdtOOGgFVDCUs81QRB2+liax8rFg3+7hqM+QhBG0/G3F1ZsoYl97UrqgHgQ9KKT7G6c4V+aTUCgu/n22v1A==", + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", + "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, "node_modules/@headlessui/react": { - "version": "1.7.13", - "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.13.tgz", - "integrity": "sha512-9n+EQKRtD9266xIHXdY5MfiXPDfYwl7zBM7KOx2Ae3Gdgxy8QML1FkCMjq6AsOf0l6N9uvI4HcFtuFlenaldKg==", + "version": "1.7.19", + "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.19.tgz", + "integrity": "sha512-Ll+8q3OlMJfJbAKM/+/Y2q6PPYbryqNTXDbryx7SXLIDamkF6iQFbriYHga0dY44PvDhvvBWCx1Xj4U5+G4hOw==", "dependencies": { + "@tanstack/react-virtual": "^3.0.0-beta.60", "client-only": "^0.0.1" }, "engines": { @@ -3210,41 +3441,19 @@ } }, "node_modules/@humanwhocodes/config-array": { - "version": "0.11.8", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", - "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", "dev": true, "dependencies": { - "@humanwhocodes/object-schema": "^1.2.1", - "debug": "^4.1.1", + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", "minimatch": "^3.0.5" }, "engines": { "node": ">=10.10.0" } }, - "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, "node_modules/@humanwhocodes/module-importer": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", @@ -3259,39 +3468,129 @@ } }, "node_modules/@humanwhocodes/object-schema": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", - "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", "dev": true }, - "node_modules/@jest/expect-utils": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.5.0.tgz", - "integrity": "sha512-fmKzsidoXQT2KwnrwE0SQq3uj8Z763vzR8LnLBwC2qYWEFpjX8daRsk6rHUM1QvNlEW/UJXNXm59ztmJJWs2Mg==", + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, "dependencies": { - "jest-get-type": "^29.4.3" + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" }, "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + "node": ">=12" } }, - "node_modules/@jest/schemas": { - "version": "29.4.3", - "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.4.3.tgz", - "integrity": "sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==", - "dependencies": { - "@sinclair/typebox": "^0.25.16" - }, - "engines": { - "node": "^14.15.0 || ^16.10.0 || >=18.0.0" - } + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", + "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@jest/expect-utils": { + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.7.0.tgz", + "integrity": "sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==", + "dependencies": { + "jest-get-type": "^29.6.3" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } + }, + "node_modules/@jest/schemas": { + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.6.3.tgz", + "integrity": "sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==", + "dependencies": { + "@sinclair/typebox": "^0.27.8" + }, + "engines": { + "node": "^14.15.0 || ^16.10.0 || >=18.0.0" + } }, "node_modules/@jest/types": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.5.0.tgz", - "integrity": "sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==", + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.6.3.tgz", + "integrity": "sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==", "dependencies": { - "@jest/schemas": "^29.4.3", + "@jest/schemas": "^29.6.3", "@types/istanbul-lib-coverage": "^2.0.0", "@types/istanbul-reports": "^3.0.0", "@types/node": "*", @@ -3303,56 +3602,57 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz", - "integrity": "sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==", + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz", + "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==", "dev": true, "dependencies": { - "@jridgewell/set-array": "^1.0.0", - "@jridgewell/sourcemap-codec": "^1.4.10" + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" }, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", - "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", "dev": true, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", "dev": true, "engines": { "node": ">=6.0.0" } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", - "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", + "version": "1.4.15", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", + "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==", "dev": true }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.17", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz", - "integrity": "sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g==", + "version": "0.3.25", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", "dev": true, "dependencies": { - "@jridgewell/resolve-uri": "3.1.0", - "@jridgewell/sourcemap-codec": "1.4.14" + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" } }, "node_modules/@jsdoc/salty": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/@jsdoc/salty/-/salty-0.2.5.tgz", - "integrity": "sha512-TfRP53RqunNe2HBobVBJ0VLhK1HbfvBYeTC1ahnN64PWvyYyGebmMiPkuwvD9fpw2ZbkoPb8Q7mwy0aR8Z9rvw==", + "version": "0.2.8", + "resolved": "https://registry.npmjs.org/@jsdoc/salty/-/salty-0.2.8.tgz", + "integrity": "sha512-5e+SFVavj1ORKlKaKr2BmTOekmXbelU7dC0cDkQLqag7xfuTPuGMUFx7KWJuv4bYZrTsoL2Z18VVCOKYxzoHcg==", "dev": true, "dependencies": { "lodash": "^4.17.21" @@ -3457,6 +3757,16 @@ "integrity": "sha512-Aq58f5HiWdyDlFffbbSjAlv596h/cOnt2DO1w3DOC7OJ5EHs0hd/nycJfiu9RJbT6Yk6F1knnRRXNSpxoIVZ9Q==", "dev": true }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", @@ -3532,9 +3842,9 @@ "peer": true }, "node_modules/@rollup/plugin-commonjs": { - "version": "24.0.1", - "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-24.0.1.tgz", - "integrity": "sha512-15LsiWRZk4eOGqvrJyu3z3DaBu5BhXIMeWnijSRvd8irrrg9SHpQ1pH+BUK4H6Z9wL9yOxZJMTLU+Au86XHxow==", + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-24.1.0.tgz", + "integrity": "sha512-eSL45hjhCWI0jCCXcNtLVqM5N1JlBGvlFfY0m6oOYnLCJ6N0qEXoZql4sY2MOUArzhH4SA/qBpTxvvZp2Sc+DQ==", "dev": true, "dependencies": { "@rollup/pluginutils": "^5.0.1", @@ -3557,9 +3867,9 @@ } }, "node_modules/@rollup/pluginutils": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.0.2.tgz", - "integrity": "sha512-pTd9rIsP92h+B6wWwFbW8RkZv4hiR/xKsqre4SIuAOaOEQRxi0lqLke9k2/7WegC85GgUs9pjmOjCUi3In4vwA==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.1.0.tgz", + "integrity": "sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g==", "dev": true, "dependencies": { "@types/estree": "^1.0.0", @@ -3570,7 +3880,7 @@ "node": ">=14.0.0" }, "peerDependencies": { - "rollup": "^1.20.0||^2.0.0||^3.0.0" + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "peerDependenciesMeta": { "rollup": { @@ -3579,15 +3889,15 @@ } }, "node_modules/@rushstack/eslint-patch": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.2.0.tgz", - "integrity": "sha512-sXo/qW2/pAcmT43VoRKOJbDOfV3cYpq3szSVfIThQXNt+E4DfKj361vaAt3c88U5tPUxzEswam7GW48PJqtKAg==", + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.10.2.tgz", + "integrity": "sha512-hw437iINopmQuxWPSUEvqE56NCPsiU8N4AYtfHmJFckclktzK9YQJieD3XkDCDH4OjL+C7zgPUh73R/nrcHrqw==", "dev": true }, "node_modules/@sinclair/typebox": { - "version": "0.25.24", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz", - "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==" + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" }, "node_modules/@svgr/babel-plugin-add-jsx-attribute": { "version": "6.5.1", @@ -3606,9 +3916,9 @@ } }, "node_modules/@svgr/babel-plugin-remove-jsx-attribute": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-7.0.0.tgz", - "integrity": "sha512-iiZaIvb3H/c7d3TH2HBeK91uI2rMhZNwnsIrvd7ZwGLkFw6mmunOCoVnjdYua662MqGFxlN9xTq4fv9hgR4VXQ==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz", + "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==", "dev": true, "engines": { "node": ">=14" @@ -3622,9 +3932,9 @@ } }, "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-7.0.0.tgz", - "integrity": "sha512-sQQmyo+qegBx8DfFc04PFmIO1FP1MHI1/QEpzcIcclo5OAISsOJPW76ZIs0bDyO/DBSJEa/tDa1W26pVtt0FRw==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz", + "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==", "dev": true, "engines": { "node": ">=14" @@ -3814,30 +4124,55 @@ "@svgr/core": "^6.0.0" } }, + "node_modules/@tanstack/react-virtual": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.5.0.tgz", + "integrity": "sha512-rtvo7KwuIvqK9zb0VZ5IL7fiJAEnG+0EiFZz8FUOs+2mhGqdGmjKIaT1XU7Zq0eFqL0jonLlhbayJI/J2SA/Bw==", + "dependencies": { + "@tanstack/virtual-core": "3.5.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0", + "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/@tanstack/virtual-core": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.5.0.tgz", + "integrity": "sha512-KnPRCkQTyqhanNC0K63GBG3wA8I+D1fQuVnAvcBF8f13akOKeQp1gSbu6f77zCxhEk727iV5oQnbHLYzHrECLg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/tannerlinsley" + } + }, "node_modules/@testing-library/dom": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-9.2.0.tgz", - "integrity": "sha512-xTEnpUKiV/bMyEsE5bT4oYA0x0Z/colMtxzUY8bKyPXBNLn/e0V4ZjBZkEhms0xE4pv9QsPfSRu9AWS4y5wGvA==", + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.1.0.tgz", + "integrity": "sha512-wdsYKy5zupPyLCW2Je5DLHSxSfbIp6h80WoHOQc+RPtmPGA52O9x5MJEkv92Sjonpq+poOAtUKhh1kBGAXBrNA==", "dev": true, "peer": true, "dependencies": { "@babel/code-frame": "^7.10.4", "@babel/runtime": "^7.12.5", "@types/aria-query": "^5.0.1", - "aria-query": "^5.0.0", + "aria-query": "5.3.0", "chalk": "^4.1.0", "dom-accessibility-api": "^0.5.9", "lz-string": "^1.5.0", "pretty-format": "^27.0.2" }, "engines": { - "node": ">=14" + "node": ">=18" } }, "node_modules/@testing-library/jest-dom": { - "version": "5.16.5", - "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.16.5.tgz", - "integrity": "sha512-N5ixQ2qKpi5OLYfwQmUb/5mSV9LneAcaUfp32pn4yCnpb8r/Yz0pXFPck21dIicKmi+ta5WRAknkZCfA8refMA==", + "version": "5.17.0", + "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.17.0.tgz", + "integrity": "sha512-ynmNeT7asXyH3aSVv4vvX4Rb+0qjOhdNHnO/3vuZNqPmhDpV/+rCSGwQ7bLcmU2cJ4dvoheIO85LQj0IbJHEtg==", "dev": true, "dependencies": { "@adobe/css-tools": "^4.0.1", @@ -3955,9 +4290,9 @@ "dev": true }, "node_modules/@testing-library/react/node_modules/@types/yargs": { - "version": "15.0.15", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-15.0.15.tgz", - "integrity": "sha512-IziEYMU9XoVj8hWg7k+UJrXALkGFjWJhn5QFEv9q4p+v40oZhSuC135M38st8XPjICL7Ey4TV64ferBGUoJhBg==", + "version": "15.0.19", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-15.0.19.tgz", + "integrity": "sha512-2XUaGVmyQjgyAZldf0D0c14vvo/yv0MhQBSTJcejMMaitsn3nxCB6TmH4G0ZQf+uxROOa9mpanoSm8h6SG/1ZA==", "dev": true, "dependencies": { "@types/yargs-parser": "*" @@ -3992,9 +4327,9 @@ } }, "node_modules/@testing-library/user-event": { - "version": "14.4.3", - "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.4.3.tgz", - "integrity": "sha512-kCUc5MEwaEMakkO5x7aoD+DLi02ehmEM2QCGWvNqAS1dV/fAvORWEjnjsEIvml59M7Y5kCkWN6fCCyPOe8OL6Q==", + "version": "14.5.2", + "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.5.2.tgz", + "integrity": "sha512-YAh82Wh4TIrxYLmfGcixwD18oIjyC1pFQC2Y01F2lzV2HTMiYrI0nze0FD0ocB//CKS/7jIUgae+adPqxK5yCQ==", "dev": true, "engines": { "node": ">=12", @@ -4022,22 +4357,22 @@ } }, "node_modules/@types/aria-query": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.1.tgz", - "integrity": "sha512-XTIieEY+gvJ39ChLcB4If5zHtPxt3Syj5rgZR+e1ctpmK8NjPf0zFqsz4JpLJT0xla9GFDKjy8Cpu331nrmE1Q==", + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", + "integrity": "sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==", "dev": true, "peer": true }, "node_modules/@types/chai": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.4.tgz", - "integrity": "sha512-KnRanxnpfpjUTqTCXslZSEdLfXExwgNxYPdiO2WGUj8+HDjFi8R3k5RVKPeSCzLjCcshCAtVO2QBbVuAV4kTnw==", + "version": "4.3.14", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.14.tgz", + "integrity": "sha512-Wj71sXE4Q4AkGdG9Tvq1u/fquNz9EdG4LIJMwVVII7ashjD/8cf8fyIfJAjRr6YcsXnSE8cOGQPq1gqeR8z+3w==", "dev": true }, "node_modules/@types/chai-subset": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/@types/chai-subset/-/chai-subset-1.3.3.tgz", - "integrity": "sha512-frBecisrNGz+F4T6bcc+NLeolfiojh5FxW2klu669+8BARtyQv2C/GkNW6FUodVe4BroGMP/wER/YDGc7rEllw==", + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/@types/chai-subset/-/chai-subset-1.3.5.tgz", + "integrity": "sha512-c2mPnw+xHtXDoHmdtcCXGwyLMiauiAyxWMzhGpqHC4nqI/Y5G2XhTampslK2rb59kpcuHon03UH8W6iYUzw88A==", "dev": true, "dependencies": { "@types/chai": "*" @@ -4050,9 +4385,9 @@ "dev": true }, "node_modules/@types/d3": { - "version": "7.4.0", - "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.0.tgz", - "integrity": "sha512-jIfNVK0ZlxcuRDKtRS/SypEyOQ6UHaFQBKv032X45VvxSJ6Yi5G9behy9h6tNTHTDGh5Vq+KbmBjUWLgY4meCA==", + "version": "7.4.3", + "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.3.tgz", + "integrity": "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==", "dependencies": { "@types/d3-array": "*", "@types/d3-axis": "*", @@ -4087,194 +4422,194 @@ } }, "node_modules/@types/d3-array": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.0.4.tgz", - "integrity": "sha512-nwvEkG9vYOc0Ic7G7kwgviY4AQlTfYGIZ0fqB7CQHXGyYM6nO7kJh5EguSNA3jfh4rq7Sb7eMVq8isuvg2/miQ==" + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.2.1.tgz", + "integrity": "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg==" }, "node_modules/@types/d3-axis": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.2.tgz", - "integrity": "sha512-uGC7DBh0TZrU/LY43Fd8Qr+2ja1FKmH07q2FoZFHo1eYl8aj87GhfVoY1saJVJiq24rp1+wpI6BvQJMKgQm8oA==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.6.tgz", + "integrity": "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==", "dependencies": { "@types/d3-selection": "*" } }, "node_modules/@types/d3-brush": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.2.tgz", - "integrity": "sha512-2TEm8KzUG3N7z0TrSKPmbxByBx54M+S9lHoP2J55QuLU0VSQ9mE96EJSAOVNEqd1bbynMjeTS9VHmz8/bSw8rA==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.6.tgz", + "integrity": "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==", "dependencies": { "@types/d3-selection": "*" } }, "node_modules/@types/d3-chord": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.2.tgz", - "integrity": "sha512-abT/iLHD3sGZwqMTX1TYCMEulr+wBd0SzyOQnjYNLp7sngdOHYtNkMRI5v3w5thoN+BWtlHVDx2Osvq6fxhZWw==" + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.6.tgz", + "integrity": "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==" }, "node_modules/@types/d3-color": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.0.tgz", - "integrity": "sha512-HKuicPHJuvPgCD+np6Se9MQvS6OCbJmOjGvylzMJRlDwUXjKTTXs6Pwgk79O09Vj/ho3u1ofXnhFOaEWWPrlwA==" + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz", + "integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==" }, "node_modules/@types/d3-contour": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.2.tgz", - "integrity": "sha512-k6/bGDoAGJZnZWaKzeB+9glgXCYGvh6YlluxzBREiVo8f/X2vpTEdgPy9DN7Z2i42PZOZ4JDhVdlTSTSkLDPlQ==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.6.tgz", + "integrity": "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==", "dependencies": { "@types/d3-array": "*", "@types/geojson": "*" } }, "node_modules/@types/d3-delaunay": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.1.tgz", - "integrity": "sha512-tLxQ2sfT0p6sxdG75c6f/ekqxjyYR0+LwPrsO1mbC9YDBzPJhs2HbJJRrn8Ez1DBoHRo2yx7YEATI+8V1nGMnQ==" + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==" }, "node_modules/@types/d3-dispatch": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.2.tgz", - "integrity": "sha512-rxN6sHUXEZYCKV05MEh4z4WpPSqIw+aP7n9ZN6WYAAvZoEAghEK1WeVZMZcHRBwyaKflU43PCUAJNjFxCzPDjg==" + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.6.tgz", + "integrity": "sha512-4fvZhzMeeuBJYZXRXrRIQnvUYfyXwYmLsdiN7XXmVNQKKw1cM8a5WdID0g1hVFZDqT9ZqZEY5pD44p24VS7iZQ==" }, "node_modules/@types/d3-drag": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.2.tgz", - "integrity": "sha512-qmODKEDvyKWVHcWWCOVcuVcOwikLVsyc4q4EBJMREsoQnR2Qoc2cZQUyFUPgO9q4S3qdSqJKBsuefv+h0Qy+tw==", + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz", + "integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==", "dependencies": { "@types/d3-selection": "*" } }, "node_modules/@types/d3-dsv": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.1.tgz", - "integrity": "sha512-76pBHCMTvPLt44wFOieouXcGXWOF0AJCceUvaFkxSZEu4VDUdv93JfpMa6VGNFs01FHfuP4a5Ou68eRG1KBfTw==" + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.7.tgz", + "integrity": "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==" }, "node_modules/@types/d3-ease": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.0.tgz", - "integrity": "sha512-aMo4eaAOijJjA6uU+GIeW018dvy9+oH5Y2VPPzjjfxevvGQ/oRDs+tfYC9b50Q4BygRR8yE2QCLsrT0WtAVseA==" + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.2.tgz", + "integrity": "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==" }, "node_modules/@types/d3-fetch": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.2.tgz", - "integrity": "sha512-gllwYWozWfbep16N9fByNBDTkJW/SyhH6SGRlXloR7WdtAaBui4plTP+gbUgiEot7vGw/ZZop1yDZlgXXSuzjA==", + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.7.tgz", + "integrity": "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==", "dependencies": { "@types/d3-dsv": "*" } }, "node_modules/@types/d3-force": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.4.tgz", - "integrity": "sha512-q7xbVLrWcXvSBBEoadowIUJ7sRpS1yvgMWnzHJggFy5cUZBq2HZL5k/pBSm0GdYWS1vs5/EDwMjSKF55PDY4Aw==" + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.9.tgz", + "integrity": "sha512-IKtvyFdb4Q0LWna6ymywQsEYjK/94SGhPrMfEr1TIc5OBeziTi+1jcCvttts8e0UWZIxpasjnQk9MNk/3iS+kA==" }, "node_modules/@types/d3-format": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.1.tgz", - "integrity": "sha512-5KY70ifCCzorkLuIkDe0Z9YTf9RR2CjBX1iaJG+rgM/cPP+sO+q9YdQ9WdhQcgPj1EQiJ2/0+yUkkziTG6Lubg==" + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.4.tgz", + "integrity": "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==" }, "node_modules/@types/d3-geo": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.0.3.tgz", - "integrity": "sha512-bK9uZJS3vuDCNeeXQ4z3u0E7OeJZXjUgzFdSOtNtMCJCLvDtWDwfpRVWlyt3y8EvRzI0ccOu9xlMVirawolSCw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.1.0.tgz", + "integrity": "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==", "dependencies": { "@types/geojson": "*" } }, "node_modules/@types/d3-hierarchy": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", - "integrity": "sha512-9hjRTVoZjRFR6xo8igAJyNXQyPX6Aq++Nhb5ebrUF414dv4jr2MitM2fWiOY475wa3Za7TOS2Gh9fmqEhLTt0A==" + "version": "3.1.7", + "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.7.tgz", + "integrity": "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==" }, "node_modules/@types/d3-interpolate": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.1.tgz", - "integrity": "sha512-jx5leotSeac3jr0RePOH1KdR9rISG91QIE4Q2PYTu4OymLTZfA3SrnURSLzKH48HmXVUru50b8nje4E79oQSQw==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz", + "integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==", "dependencies": { "@types/d3-color": "*" } }, "node_modules/@types/d3-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.0.0.tgz", - "integrity": "sha512-0g/A+mZXgFkQxN3HniRDbXMN79K3CdTpLsevj+PXiTcb2hVyvkZUBg37StmgCQkaD84cUJ4uaDAWq7UJOQy2Tg==" + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-P2dlU/q51fkOc/Gfl3Ul9kicV7l+ra934qBFXCFhrZMOL6du1TM0pm1ThYvENukyOn5h9v+yMJ9Fn5JK4QozrQ==" }, "node_modules/@types/d3-polygon": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.0.tgz", - "integrity": "sha512-D49z4DyzTKXM0sGKVqiTDTYr+DHg/uxsiWDAkNrwXYuiZVd9o9wXZIo+YsHkifOiyBkmSWlEngHCQme54/hnHw==" + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.2.tgz", + "integrity": "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==" }, "node_modules/@types/d3-quadtree": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.2.tgz", - "integrity": "sha512-QNcK8Jguvc8lU+4OfeNx+qnVy7c0VrDJ+CCVFS9srBo2GL9Y18CnIxBdTF3v38flrGy5s1YggcoAiu6s4fLQIw==" + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.6.tgz", + "integrity": "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==" }, "node_modules/@types/d3-random": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.1.tgz", - "integrity": "sha512-IIE6YTekGczpLYo/HehAy3JGF1ty7+usI97LqraNa8IiDur+L44d0VOjAvFQWJVdZOJHukUJw+ZdZBlgeUsHOQ==" + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.3.tgz", + "integrity": "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==" }, "node_modules/@types/d3-scale": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.3.tgz", - "integrity": "sha512-PATBiMCpvHJSMtZAMEhc2WyL+hnzarKzI6wAHYjhsonjWJYGq5BXTzQjv4l8m2jO183/4wZ90rKvSeT7o72xNQ==", + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.8.tgz", + "integrity": "sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ==", "dependencies": { "@types/d3-time": "*" } }, "node_modules/@types/d3-scale-chromatic": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz", - "integrity": "sha512-dsoJGEIShosKVRBZB0Vo3C8nqSDqVGujJU6tPznsBJxNJNwMF8utmS83nvCBKQYPpjCzaaHcrf66iTRpZosLPw==" + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.3.tgz", + "integrity": "sha512-laXM4+1o5ImZv3RpFAsTRn3TEkzqkytiOY0Dz0sq5cnd1dtNlk6sHLon4OvqaiJb28T0S/TdsBI3Sjsy+keJrw==" }, "node_modules/@types/d3-selection": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.5.tgz", - "integrity": "sha512-xCB0z3Hi8eFIqyja3vW8iV01+OHGYR2di/+e+AiOcXIOrY82lcvWW8Ke1DYE/EUVMsBl4Db9RppSBS3X1U6J0w==" + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.10.tgz", + "integrity": "sha512-cuHoUgS/V3hLdjJOLTT691+G2QoqAjCVLmr4kJXR4ha56w1Zdu8UUQ5TxLRqudgNjwXeQxKMq4j+lyf9sWuslg==" }, "node_modules/@types/d3-shape": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.1.tgz", - "integrity": "sha512-6Uh86YFF7LGg4PQkuO2oG6EMBRLuW9cbavUW46zkIO5kuS2PfTqo2o9SkgtQzguBHbLgNnU90UNsITpsX1My+A==", + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.6.tgz", + "integrity": "sha512-5KKk5aKGu2I+O6SONMYSNflgiP0WfZIQvVUMan50wHsLG1G94JlxEVnCpQARfTtzytuY0p/9PXXZb3I7giofIA==", "dependencies": { "@types/d3-path": "*" } }, "node_modules/@types/d3-time": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.0.tgz", - "integrity": "sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg==" + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.3.tgz", + "integrity": "sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw==" }, "node_modules/@types/d3-time-format": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.0.tgz", - "integrity": "sha512-yjfBUe6DJBsDin2BMIulhSHmr5qNR5Pxs17+oW4DoVPyVIXZ+m6bs7j1UVKP08Emv6jRmYrYqxYzO63mQxy1rw==" + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.3.tgz", + "integrity": "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==" }, "node_modules/@types/d3-timer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.0.tgz", - "integrity": "sha512-HNB/9GHqu7Fo8AQiugyJbv6ZxYz58wef0esl4Mv828w1ZKpAshw/uFWVDUcIB9KKFeFKoxS3cHY07FFgtTRZ1g==" + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.2.tgz", + "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==" }, "node_modules/@types/d3-transition": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.3.tgz", - "integrity": "sha512-/S90Od8Id1wgQNvIA8iFv9jRhCiZcGhPd2qX0bKF/PS+y0W5CrXKgIiELd2CvG1mlQrWK/qlYh3VxicqG1ZvgA==", + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.8.tgz", + "integrity": "sha512-ew63aJfQ/ms7QQ4X7pk5NxQ9fZH/z+i24ZfJ6tJSfqxJMrYLiK01EAs2/Rtw/JreGUsS3pLPNV644qXFGnoZNQ==", "dependencies": { "@types/d3-selection": "*" } }, "node_modules/@types/d3-zoom": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.2.tgz", - "integrity": "sha512-t09DDJVBI6AkM7N8kuPsnq/3d/ehtRKBN1xSiYjjMCgbiw6HM6Ged5VhvswmhprfKyGvzeTEL/4WBaK9llWvlA==", + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz", + "integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==", "dependencies": { "@types/d3-interpolate": "*", "@types/d3-selection": "*" } }, "node_modules/@types/eslint": { - "version": "8.37.0", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.37.0.tgz", - "integrity": "sha512-Piet7dG2JBuDIfohBngQ3rCt7MgO9xCO4xIMKxBThCq5PNRB91IjlJ10eJVwfoNtvTErmxLzwBZ7rHZtbOMmFQ==", + "version": "8.56.10", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.56.10.tgz", + "integrity": "sha512-Shavhk87gCtY2fhXDctcfS3e6FdxWkCx1iUZ9eEUbh7rTqlZT0/IzOkCOVt0fCjcFuZ9FPYfuezTBImfHCDBGQ==", "dev": true, "dependencies": { "@types/estree": "*", @@ -4282,15 +4617,15 @@ } }, "node_modules/@types/estree": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.0.tgz", - "integrity": "sha512-WulqXMDUTYAXCjZnk6JtIHPigp55cVtDgDrO2gHRwhyJto21+1zbVCtOYB2L1F9w4qCQ0rOGWBnBe0FNTiEJIQ==", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", "dev": true }, "node_modules/@types/geojson": { - "version": "7946.0.10", - "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.10.tgz", - "integrity": "sha512-Nmh0K3iWQJzniTuPRcJn5hxXkfB1T1pgB89SBig5PlJQU5yocazeu4jATJlaA0GYFKWMqDdvYemoSnF2pXgLVA==" + "version": "7946.0.14", + "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.14.tgz", + "integrity": "sha512-WCfD5Ht3ZesJUsONdhvm84dmzWOiOzOAqOncN0++w0lBw1o8OuDNJF2McvvCef/yBqb/HYRahp1BYtODFQ8bRg==" }, "node_modules/@types/history": { "version": "4.7.11", @@ -4298,9 +4633,9 @@ "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" }, "node_modules/@types/inquirer": { - "version": "8.2.6", - "resolved": "https://registry.npmjs.org/@types/inquirer/-/inquirer-8.2.6.tgz", - "integrity": "sha512-3uT88kxg8lNzY8ay2ZjP44DKcRaTGztqeIvN2zHvhzIBH/uAPaL75aBtdNRKbA7xXoMbBt5kX0M00VKAnfOYlA==", + "version": "8.2.10", + "resolved": "https://registry.npmjs.org/@types/inquirer/-/inquirer-8.2.10.tgz", + "integrity": "sha512-IdD5NmHyVjWM8SHWo/kPBgtzXatwPkfwzyP3fN1jF2g9BWt5WO+8hL2F4o2GKIYsU40PpqeevuUWvkS/roXJkA==", "dev": true, "dependencies": { "@types/through": "*", @@ -4308,30 +4643,30 @@ } }, "node_modules/@types/istanbul-lib-coverage": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", - "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==" + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==" }, "node_modules/@types/istanbul-lib-report": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", - "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz", + "integrity": "sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==", "dependencies": { "@types/istanbul-lib-coverage": "*" } }, "node_modules/@types/istanbul-reports": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", - "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz", + "integrity": "sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==", "dependencies": { "@types/istanbul-lib-report": "*" } }, "node_modules/@types/jest": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.0.tgz", - "integrity": "sha512-3Emr5VOl/aoBwnWcH/EFQvlSAmjV+XtV9GGu5mwdYew5vhQh0IUZx/60x0TzHDu09Bi7HMx10t/namdJw5QIcg==", + "version": "29.5.12", + "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.12.tgz", + "integrity": "sha512-eDC8bTvT/QhYdxJAulQikueigY5AsdBRH2yDKW3yveW7svY3+DzN84/2NUgkw10RTiJbWqZrTtoGVdYlvFJdLw==", "dependencies": { "expect": "^29.0.0", "pretty-format": "^29.0.0" @@ -4349,11 +4684,11 @@ } }, "node_modules/@types/jest/node_modules/pretty-format": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", - "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dependencies": { - "@jest/schemas": "^29.4.3", + "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", "react-is": "^18.0.0" }, @@ -4362,20 +4697,20 @@ } }, "node_modules/@types/jest/node_modules/react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" }, "node_modules/@types/js-levenshtein": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/js-levenshtein/-/js-levenshtein-1.1.1.tgz", - "integrity": "sha512-qC4bCqYGy1y/NP7dDVr7KJarn+PbX1nSpwA7JXdu0HxT3QYjO8MJ+cntENtHFVy2dRAyBV23OZ6MxsW1AM1L8g==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@types/js-levenshtein/-/js-levenshtein-1.1.3.tgz", + "integrity": "sha512-jd+Q+sD20Qfu9e2aEXogiO3vpOC1PYJOUdyN9gvs4Qrvkg4wF43L5OhqrPeokdv8TL0/mXoYfpkcoGZMNN2pkQ==", "dev": true }, "node_modules/@types/json-schema": { - "version": "7.0.11", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", - "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", "dev": true }, "node_modules/@types/json5": { @@ -4385,21 +4720,21 @@ "dev": true }, "node_modules/@types/linkify-it": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-3.0.2.tgz", - "integrity": "sha512-HZQYqbiFVWufzCwexrvh694SOim8z2d+xJl5UNamcvQFejLY/2YUtzXHYi3cHdI7PMlS8ejH2slRAOJQ32aNbA==", + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-3.0.5.tgz", + "integrity": "sha512-yg6E+u0/+Zjva+buc3EIb+29XEg4wltq7cSmd4Uc2EE/1nUVmxyzpX6gUXD0V8jIrG0r7YeOGVIbYRkxeooCtw==", "dev": true }, "node_modules/@types/lodash": { - "version": "4.14.192", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.192.tgz", - "integrity": "sha512-km+Vyn3BYm5ytMO13k9KTp27O75rbQ0NFw+U//g+PX7VZyjCioXaRFisqSIJRECljcTv73G3i6BpglNGHgUQ5A==", + "version": "4.17.0", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.0.tgz", + "integrity": "sha512-t7dhREVv6dbNj0q17X12j7yDG4bD/DHYX7o5/DbDxobP0HnGPgpRz2Ej77aL7TZT3DSw13fqUTj8J4mMnqa7WA==", "dev": true }, "node_modules/@types/lodash-es": { - "version": "4.17.7", - "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.7.tgz", - "integrity": "sha512-z0ptr6UI10VlU6l5MYhGwS4mC8DZyYer2mCoyysZtSF7p26zOX8UpbrV0YpNYLGS8K4PUFIyEr62IMFFjveSiQ==", + "version": "4.17.12", + "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.12.tgz", + "integrity": "sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ==", "dev": true, "dependencies": { "@types/lodash": "*" @@ -4416,53 +4751,52 @@ } }, "node_modules/@types/mdurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@types/mdurl/-/mdurl-1.0.2.tgz", - "integrity": "sha512-eC4U9MlIcu2q0KQmXszyn5Akca/0jrQmwDRgpAMJai7qBWq4amIQhZyNau4VYGtCeALvW1/NtjzJJ567aZxfKA==", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/mdurl/-/mdurl-1.0.5.tgz", + "integrity": "sha512-6L6VymKTzYSrEf4Nev4Xa1LCHKrlTlYCBMTlQKFuddo1CvQcE52I0mwfOJayueUC7MJuXOeHTcIU683lzd0cUA==", "dev": true }, "node_modules/@types/minimist": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz", - "integrity": "sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag==", "dev": true }, "node_modules/@types/node": { - "version": "16.18.23", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.23.tgz", - "integrity": "sha512-XAMpaw1s1+6zM+jn2tmw8MyaRDIJfXxqmIQIS0HfoGYPuf7dUWeiUKopwq13KFX9lEp1+THGtlaaYx39Nxr58g==" + "version": "16.18.96", + "resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.96.tgz", + "integrity": "sha512-84iSqGXoO+Ha16j8pRZ/L90vDMKX04QTYMTfYeE1WrjWaZXuchBehGUZEpNgx7JnmlrIHdnABmpjrQjhCnNldQ==" }, "node_modules/@types/normalize-package-data": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz", - "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==", + "version": "2.4.4", + "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz", + "integrity": "sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==", "dev": true }, "node_modules/@types/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", "dev": true }, "node_modules/@types/prop-types": { - "version": "15.7.5", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", - "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" + "version": "15.7.12", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz", + "integrity": "sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==" }, "node_modules/@types/react": { - "version": "18.0.33", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.0.33.tgz", - "integrity": "sha512-sHxzVxeanvQyQ1lr8NSHaj0kDzcNiGpILEVt69g9S31/7PfMvNCKLKcsHw4lYKjs3cGNJjXSP4mYzX43QlnjNA==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.1.tgz", + "integrity": "sha512-V0kuGBX3+prX+DQ/7r2qsv1NsdfnCLnTgnRJ1pYnxykBhGMz+qj+box5lq7XsO5mtZsBqpjwwTu/7wszPfMBcw==", "dependencies": { "@types/prop-types": "*", - "@types/scheduler": "*", "csstype": "^3.0.2" } }, "node_modules/@types/react-dom": { - "version": "18.0.11", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.11.tgz", - "integrity": "sha512-O38bPbI2CWtgw/OoQoY+BRelw7uysmXbWvw3nLWO21H1HSh+GOlqPuXshJfjmpNlKiiSDG9cc1JZAaMmVdcTlw==", + "version": "18.3.0", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.0.tgz", + "integrity": "sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==", "dev": true, "dependencies": { "@types/react": "*" @@ -4488,87 +4822,82 @@ } }, "node_modules/@types/react-test-renderer": { - "version": "18.0.0", - "resolved": "https://registry.npmjs.org/@types/react-test-renderer/-/react-test-renderer-18.0.0.tgz", - "integrity": "sha512-C7/5FBJ3g3sqUahguGi03O79b8afNeSD6T8/GU50oQrJCU0bVCCGQHaGKUbg2Ce8VQEEqTw8/HiS6lXHHdgkdQ==", + "version": "18.3.0", + "resolved": "https://registry.npmjs.org/@types/react-test-renderer/-/react-test-renderer-18.3.0.tgz", + "integrity": "sha512-HW4MuEYxfDbOHQsVlY/XtOvNHftCVEPhJF2pQXXwcUiUF+Oyb0usgp48HSgpK5rt8m9KZb22yqOeZm+rrVG8gw==", "dev": true, "dependencies": { "@types/react": "*" } }, "node_modules/@types/resize-observer-browser": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/@types/resize-observer-browser/-/resize-observer-browser-0.1.7.tgz", - "integrity": "sha512-G9eN0Sn0ii9PWQ3Vl72jDPgeJwRWhv2Qk/nQkJuWmRmOB4HX3/BhD5SE1dZs/hzPZL/WKnvF0RHdTSG54QJFyg==" - }, - "node_modules/@types/scheduler": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", - "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==" + "version": "0.1.11", + "resolved": "https://registry.npmjs.org/@types/resize-observer-browser/-/resize-observer-browser-0.1.11.tgz", + "integrity": "sha512-cNw5iH8JkMkb3QkCoe7DaZiawbDQEUX8t7iuQaRTyLOyQCR2h+ibBD4GJt7p5yhUHrlOeL7ZtbxNHeipqNsBzQ==" }, "node_modules/@types/semver": { - "version": "7.3.13", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.3.13.tgz", - "integrity": "sha512-21cFJr9z3g5dW8B0CVI9g2O9beqaThGQ6ZFBqHfwhzLDKUxaqTIy3vnfah/UPkfOiF2pLq+tGz+W8RyCskuslw==", + "version": "7.5.8", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", + "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", "dev": true }, "node_modules/@types/set-cookie-parser": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/@types/set-cookie-parser/-/set-cookie-parser-2.4.2.tgz", - "integrity": "sha512-fBZgytwhYAUkj/jC/FAV4RQ5EerRup1YQsXQCh8rZfiHkc4UahC192oH0smGwsXol3cL3A5oETuAHeQHmhXM4w==", + "version": "2.4.7", + "resolved": "https://registry.npmjs.org/@types/set-cookie-parser/-/set-cookie-parser-2.4.7.tgz", + "integrity": "sha512-+ge/loa0oTozxip6zmhRIk8Z/boU51wl9Q6QdLZcokIGMzY5lFXYy/x7Htj2HTC6/KZP1hUbZ1ekx8DYXICvWg==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/stack-utils": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz", - "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==" + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.3.tgz", + "integrity": "sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==" }, "node_modules/@types/testing-library__jest-dom": { - "version": "5.14.5", - "resolved": "https://registry.npmjs.org/@types/testing-library__jest-dom/-/testing-library__jest-dom-5.14.5.tgz", - "integrity": "sha512-SBwbxYoyPIvxHbeHxTZX2Pe/74F/tX2/D3mMvzabdeJ25bBojfW0TyB8BHrbq/9zaaKICJZjLP+8r6AeZMFCuQ==", + "version": "5.14.9", + "resolved": "https://registry.npmjs.org/@types/testing-library__jest-dom/-/testing-library__jest-dom-5.14.9.tgz", + "integrity": "sha512-FSYhIjFlfOpGSRyVoMBMuS3ws5ehFQODymf3vlI7U1K8c7PHwWwFY7VREfmsuzHSOnoKs/9/Y983ayOs7eRzqw==", "dev": true, "dependencies": { "@types/jest": "*" } }, "node_modules/@types/through": { - "version": "0.0.30", - "resolved": "https://registry.npmjs.org/@types/through/-/through-0.0.30.tgz", - "integrity": "sha512-FvnCJljyxhPM3gkRgWmxmDZyAQSiBQQWLI0A0VFL0K7W1oRUrPJSqNO0NvTnLkBcotdlp3lKvaT0JrnyRDkzOg==", + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/@types/through/-/through-0.0.33.tgz", + "integrity": "sha512-HsJ+z3QuETzP3cswwtzt2vEIiHBk/dCcHGhbmG5X3ecnwFD/lPrMpliGXxSCg03L9AhrdwA4Oz/qfspkDW+xGQ==", "dev": true, "dependencies": { "@types/node": "*" } }, "node_modules/@types/yargs": { - "version": "17.0.24", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", - "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", + "version": "17.0.32", + "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.32.tgz", + "integrity": "sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog==", "dependencies": { "@types/yargs-parser": "*" } }, "node_modules/@types/yargs-parser": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", - "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==" + "version": "21.0.3", + "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.3.tgz", + "integrity": "sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.57.1.tgz", - "integrity": "sha512-1MeobQkQ9tztuleT3v72XmY0XuKXVXusAhryoLuU5YZ+mXoYKZP9SQ7Flulh1NX4DTjpGTc2b/eMu4u7M7dhnQ==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.62.0.tgz", + "integrity": "sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.4.0", - "@typescript-eslint/scope-manager": "5.57.1", - "@typescript-eslint/type-utils": "5.57.1", - "@typescript-eslint/utils": "5.57.1", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/type-utils": "5.62.0", + "@typescript-eslint/utils": "5.62.0", "debug": "^4.3.4", - "grapheme-splitter": "^1.0.4", + "graphemer": "^1.4.0", "ignore": "^5.2.0", "natural-compare-lite": "^1.4.0", "semver": "^7.3.7", @@ -4604,9 +4933,9 @@ } }, "node_modules/@typescript-eslint/eslint-plugin/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -4625,12 +4954,12 @@ "dev": true }, "node_modules/@typescript-eslint/experimental-utils": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.57.1.tgz", - "integrity": "sha512-5F5s8mpM1Y0RQ5iWzKQPQm5cmhARgcMfUwyHX1ZZFL8Tm0PyzyQ+9jgYSMaW74XXvpDg9/KdmMICLlwNwKtO7w==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.62.0.tgz", + "integrity": "sha512-RTXpeB3eMkpoclG3ZHft6vG/Z30azNHuqY6wKPBHlVMZFuEvrtlEDe8gMqDb+SO+9hjC/pLekeSCryf9vMZlCw==", "dev": true, "dependencies": { - "@typescript-eslint/utils": "5.57.1" + "@typescript-eslint/utils": "5.62.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4644,14 +4973,14 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.57.1.tgz", - "integrity": "sha512-hlA0BLeVSA/wBPKdPGxoVr9Pp6GutGoY380FEhbVi0Ph4WNe8kLvqIRx76RSQt1lynZKfrXKs0/XeEk4zZycuA==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.62.0.tgz", + "integrity": "sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "5.57.1", - "@typescript-eslint/types": "5.57.1", - "@typescript-eslint/typescript-estree": "5.57.1", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", "debug": "^4.3.4" }, "engines": { @@ -4671,13 +5000,13 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.57.1.tgz", - "integrity": "sha512-N/RrBwEUKMIYxSKl0oDK5sFVHd6VI7p9K5MyUlVYAY6dyNb/wHUqndkTd3XhpGlXgnQsBkRZuu4f9kAHghvgPw==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz", + "integrity": "sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.57.1", - "@typescript-eslint/visitor-keys": "5.57.1" + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4688,13 +5017,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.57.1.tgz", - "integrity": "sha512-/RIPQyx60Pt6ga86hKXesXkJ2WOS4UemFrmmq/7eOyiYjYv/MUSHPlkhU6k9T9W1ytnTJueqASW+wOmW4KrViw==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.62.0.tgz", + "integrity": "sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "5.57.1", - "@typescript-eslint/utils": "5.57.1", + "@typescript-eslint/typescript-estree": "5.62.0", + "@typescript-eslint/utils": "5.62.0", "debug": "^4.3.4", "tsutils": "^3.21.0" }, @@ -4715,9 +5044,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.57.1.tgz", - "integrity": "sha512-bSs4LOgyV3bJ08F5RDqO2KXqg3WAdwHCu06zOqcQ6vqbTJizyBhuh1o1ImC69X4bV2g1OJxbH71PJqiO7Y1RuA==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.62.0.tgz", + "integrity": "sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -4728,13 +5057,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.57.1.tgz", - "integrity": "sha512-A2MZqD8gNT0qHKbk2wRspg7cHbCDCk2tcqt6ScCFLr5Ru8cn+TCfM786DjPhqwseiS+PrYwcXht5ztpEQ6TFTw==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz", + "integrity": "sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.57.1", - "@typescript-eslint/visitor-keys": "5.57.1", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/visitor-keys": "5.62.0", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -4767,9 +5096,9 @@ } }, "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -4788,17 +5117,17 @@ "dev": true }, "node_modules/@typescript-eslint/utils": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.57.1.tgz", - "integrity": "sha512-kN6vzzf9NkEtawECqze6v99LtmDiUJCVpvieTFA1uL7/jDghiJGubGZ5csicYHU1Xoqb3oH/R5cN5df6W41Nfg==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.62.0.tgz", + "integrity": "sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@types/json-schema": "^7.0.9", "@types/semver": "^7.3.12", - "@typescript-eslint/scope-manager": "5.57.1", - "@typescript-eslint/types": "5.57.1", - "@typescript-eslint/typescript-estree": "5.57.1", + "@typescript-eslint/scope-manager": "5.62.0", + "@typescript-eslint/types": "5.62.0", + "@typescript-eslint/typescript-estree": "5.62.0", "eslint-scope": "^5.1.1", "semver": "^7.3.7" }, @@ -4848,9 +5177,9 @@ } }, "node_modules/@typescript-eslint/utils/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -4869,12 +5198,12 @@ "dev": true }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.57.1.tgz", - "integrity": "sha512-RjQrAniDU0CEk5r7iphkm731zKlFiUjvcBS2yHAg8WWqFMCaCrD0rKEVOMUyMMcbGPZ0bPp56srkGWrgfZqLRA==", + "version": "5.62.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz", + "integrity": "sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==", "dev": true, "dependencies": { - "@typescript-eslint/types": "5.57.1", + "@typescript-eslint/types": "5.62.0", "eslint-visitor-keys": "^3.3.0" }, "engines": { @@ -4885,6 +5214,12 @@ "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true + }, "node_modules/@vitejs/plugin-react": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-3.1.0.tgz", @@ -4941,6 +5276,18 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/@vitest/runner/node_modules/yocto-queue": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "dev": true, + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@vitest/spy": { "version": "0.29.8", "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-0.29.8.tgz", @@ -4963,24 +5310,25 @@ } }, "node_modules/@xmldom/xmldom": { - "version": "0.7.10", - "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.7.10.tgz", - "integrity": "sha512-hb9QhOg5MGmpVkFcoZ9XJMe1em5gd0e2eqqjK87O1dwULedXsnY/Zg/Ju6lcohA+t6jVkmKpe7I1etqhvdRdrQ==", + "version": "0.7.13", + "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.7.13.tgz", + "integrity": "sha512-lm2GW5PkosIzccsaZIz7tp8cPADSIlIHWDFTR1N0SzfinhhYgeIQjFMz4rYzanCScr3DqQLeomUDArp6MWKm+g==", "dev": true, "engines": { "node": ">=10.0.0" } }, "node_modules/@zeit/schemas": { - "version": "2.29.0", - "resolved": "https://registry.npmjs.org/@zeit/schemas/-/schemas-2.29.0.tgz", - "integrity": "sha512-g5QiLIfbg3pLuYUJPlisNKY+epQJTcMDsOnVNkscrDP1oi7vmJnzOANYJI/1pZcVJ6umUkBv3aFtlg1UvUHGzA==", + "version": "2.36.0", + "resolved": "https://registry.npmjs.org/@zeit/schemas/-/schemas-2.36.0.tgz", + "integrity": "sha512-7kjMwcChYEzMKjeex9ZFXkt1AyNov9R5HZtjBKVsmVpw7pa7ZtlCGvCBC2vnnXctaYN+aRI61HjIqeetZW5ROg==", "dev": true }, "node_modules/abab": { "version": "2.0.6", "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", + "deprecated": "Use your platform's native atob() and btoa() methods instead", "dev": true }, "node_modules/accepts": { @@ -4997,9 +5345,9 @@ } }, "node_modules/acorn": { - "version": "8.8.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", - "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", + "version": "8.11.3", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", + "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -5028,9 +5376,9 @@ } }, "node_modules/acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz", + "integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==", "dev": true, "engines": { "node": ">=0.4.0" @@ -5049,14 +5397,14 @@ } }, "node_modules/ajv": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "dev": true, "dependencies": { "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" }, "funding": { @@ -5174,37 +5522,41 @@ "dev": true }, "node_modules/aria-query": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.1.3.tgz", - "integrity": "sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", "dev": true, "dependencies": { - "deep-equal": "^2.0.5" + "dequal": "^2.0.3" } }, "node_modules/array-buffer-byte-length": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz", - "integrity": "sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==", + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz", + "integrity": "sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "is-array-buffer": "^3.0.1" + "call-bind": "^1.0.5", + "is-array-buffer": "^3.0.4" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/array-includes": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz", - "integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==", + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.8.tgz", + "integrity": "sha512-itaWrbYbqpGXkGhZPGUulwnhVf5Hpy1xiCFsGqyIGglbBxmG5vSjxQen3/WGOjPpNEv1RtBLKxbmVXm8HpJStQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", "is-string": "^1.0.7" }, "engines": { @@ -5223,15 +5575,55 @@ "node": ">=8" } }, + "node_modules/array.prototype.findlast": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlast/-/array.prototype.findlast-1.2.5.tgz", + "integrity": "sha512-CVvd6FHg1Z3POpBLxO6E6zr+rSKEQ9L6rZHAaY7lLfhKsWYUBBOuMs0e9o24oopj6H+geRCX0YJ+TJLBK2eHyQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/array.prototype.findlastindex": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.5.tgz", + "integrity": "sha512-zfETvRFA8o7EiNn++N5f/kaCw221hrpGsDmcpndVupkPzEc1Wuf3VgC0qby1BbHs7f5DVYjgtEU2LLh5bqeGfQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-shim-unscopables": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/array.prototype.flat": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz", + "integrity": "sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" }, "engines": { @@ -5242,14 +5634,14 @@ } }, "node_modules/array.prototype.flatmap": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.1.tgz", - "integrity": "sha512-8UGn9O1FDVvMNB0UlLv4voxRMze7+FpHyF5mSMRjWHUMlpoDViniy05870VlxhfgTnLbpuwTzvD76MTtWxB/mQ==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz", + "integrity": "sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", "es-shim-unscopables": "^1.0.0" }, "engines": { @@ -5259,17 +5651,51 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/array.prototype.tosorted": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.1.tgz", - "integrity": "sha512-pZYPXPRl2PqWcsUs6LOMn+1f1532nEoPTYowBtqLwAW+W8vSVhkIGnmOX1t/UQjD6YGI0vcD2B1U7ZFGQH9jnQ==", + "node_modules/array.prototype.toreversed": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/array.prototype.toreversed/-/array.prototype.toreversed-1.1.2.tgz", + "integrity": "sha512-wwDCoT4Ck4Cz7sLtgUmzR5UV3YF5mFHUlbChCzZBQZ+0m2cl/DH3tKgvphv1nKgFsJ48oCSg6p91q2Vm0I/ZMA==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0", - "get-intrinsic": "^1.1.3" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "es-shim-unscopables": "^1.0.0" + } + }, + "node_modules/array.prototype.tosorted": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.3.tgz", + "integrity": "sha512-/DdH4TiTmOKzyQbp/eadcCVexiCb36xJg7HshYOYJnNZFDj33GEv0P7GxsynpShhq4OLYJzbGcBDkLsDt7MnNg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.1.0", + "es-shim-unscopables": "^1.0.2" + } + }, + "node_modules/arraybuffer.prototype.slice": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz", + "integrity": "sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==", + "dev": true, + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", + "es-abstract": "^1.22.3", + "es-errors": "^1.2.1", + "get-intrinsic": "^1.2.3", + "is-array-buffer": "^3.0.4", + "is-shared-array-buffer": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, "node_modules/arrify": { @@ -5291,9 +5717,9 @@ } }, "node_modules/ast-types-flow": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz", - "integrity": "sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==", + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.8.tgz", + "integrity": "sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==", "dev": true }, "node_modules/astral-regex": { @@ -5312,9 +5738,9 @@ "dev": true }, "node_modules/autoprefixer": { - "version": "10.4.14", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.14.tgz", - "integrity": "sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==", + "version": "10.4.19", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.19.tgz", + "integrity": "sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==", "funding": [ { "type": "opencollective", @@ -5323,12 +5749,16 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "browserslist": "^4.21.5", - "caniuse-lite": "^1.0.30001464", - "fraction.js": "^4.2.0", + "browserslist": "^4.23.0", + "caniuse-lite": "^1.0.30001599", + "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", "picocolors": "^1.0.0", "postcss-value-parser": "^4.2.0" @@ -5344,10 +5774,13 @@ } }, "node_modules/available-typed-arrays": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", - "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", "dev": true, + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, "engines": { "node": ">= 0.4" }, @@ -5356,21 +5789,21 @@ } }, "node_modules/axe-core": { - "version": "4.6.3", - "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.6.3.tgz", - "integrity": "sha512-/BQzOX780JhsxDnPpH4ZiyrJAzcd8AfzFPkv+89veFSr1rcMjuq2JDCwypKaPeB6ljHp9KjXhPpjgCvQlWYuqg==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.7.0.tgz", + "integrity": "sha512-M0JtH+hlOL5pLQwHOLNYZaXuhqmvS8oExsqB1SBYgA4Dk7u/xx+YdGHXaK5pyUfed5mYXdlYiphWq3G8cRi5JQ==", "dev": true, "engines": { "node": ">=4" } }, "node_modules/axobject-query": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.1.1.tgz", - "integrity": "sha512-goKlv8DZrK9hUh975fnHzhNIO4jUnFCfv/dszV5VwUGDFjI6vQ2VwoyjYjYNEbBE8AH87TduWP5uyDR1D+Iteg==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.2.1.tgz", + "integrity": "sha512-jsyHu61e6N4Vbz/v18DHwWYKK0bSWLqn47eeDSKPB7m8tqMHF9YJ+mhIk2lVteyZrY8tnSj/jHOv4YiTCuCJgg==", "dev": true, "dependencies": { - "deep-equal": "^2.0.5" + "dequal": "^2.0.3" } }, "node_modules/babel-plugin-macros": { @@ -5389,42 +5822,42 @@ } }, "node_modules/babel-plugin-polyfill-corejs2": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.3.tgz", - "integrity": "sha512-8hOdmFYFSZhqg2C/JgLUQ+t52o5nirNwaWM2B9LWteozwIvM14VSwdsCAUET10qT+kmySAlseadmfeeSWFCy+Q==", + "version": "0.4.11", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz", + "integrity": "sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q==", "dev": true, "dependencies": { - "@babel/compat-data": "^7.17.7", - "@babel/helper-define-polyfill-provider": "^0.3.3", - "semver": "^6.1.1" + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.6.2", + "semver": "^6.3.1" }, "peerDependencies": { - "@babel/core": "^7.0.0-0" + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, "node_modules/babel-plugin-polyfill-corejs3": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.6.0.tgz", - "integrity": "sha512-+eHqR6OPcBhJOGgsIar7xoAB1GcSwVUA3XjAd7HJNzOXT4wv6/H7KIdA/Nc60cvUlDbKApmqNvD1B1bzOt4nyA==", + "version": "0.10.4", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.10.4.tgz", + "integrity": "sha512-25J6I8NGfa5YkCDogHRID3fVCadIR8/pGl1/spvCkzb6lVn6SR3ojpx9nOn9iEBcUsjY24AmdKm5khcfKdylcg==", "dev": true, "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.3", - "core-js-compat": "^3.25.1" + "@babel/helper-define-polyfill-provider": "^0.6.1", + "core-js-compat": "^3.36.1" }, "peerDependencies": { - "@babel/core": "^7.0.0-0" + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, "node_modules/babel-plugin-polyfill-regenerator": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.1.tgz", - "integrity": "sha512-NtQGmyQDXjQqQ+IzRkBVwEOz9lQ4zxAQZgoAYEtU9dJjnl1Oc98qnN7jcp+bE7O7aYzVpavXE3/VKXNzUbh7aw==", + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz", + "integrity": "sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg==", "dev": true, "dependencies": { - "@babel/helper-define-polyfill-provider": "^0.3.3" + "@babel/helper-define-polyfill-provider": "^0.6.2" }, "peerDependencies": { - "@babel/core": "^7.0.0-0" + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" } }, "node_modules/babel-plugin-transform-react-remove-prop-types": { @@ -5483,19 +5916,22 @@ ] }, "node_modules/big-integer": { - "version": "1.6.51", - "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz", - "integrity": "sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==", + "version": "1.6.52", + "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.52.tgz", + "integrity": "sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg==", "engines": { "node": ">=0.6" } }, "node_modules/binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", "engines": { "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/bl": { @@ -5567,9 +6003,9 @@ } }, "node_modules/boxen/node_modules/chalk": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.2.0.tgz", - "integrity": "sha512-ree3Gqw/nazQAPuJJEy+avdl7QfZMcUvmHIKgEZkGL+xOBzRvup5Hxo6LHuMceSxOabuJLJm5Yp/92R9eMmMvA==", + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.3.0.tgz", + "integrity": "sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==", "dev": true, "engines": { "node": "^12.17.0 || ^14.13 || >=16.0.0" @@ -5578,12 +6014,6 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/boxen/node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, "node_modules/boxen/node_modules/string-width": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", @@ -5602,9 +6032,9 @@ } }, "node_modules/boxen/node_modules/strip-ansi": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", "dev": true, "dependencies": { "ansi-regex": "^6.0.1" @@ -5646,12 +6076,12 @@ } }, "node_modules/brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "dependencies": { - "balanced-match": "^1.0.0" + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" } }, "node_modules/braces": { @@ -5681,9 +6111,9 @@ } }, "node_modules/browserslist": { - "version": "4.21.5", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.5.tgz", - "integrity": "sha512-tUkiguQGW7S3IhB7N+c2MV/HZPSCPAAiYBZXLsBhFB/PCy6ZKKsZrmBayHV9fdGV/ARIfJ14NkxKzRDjvp7L6w==", + "version": "4.23.0", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", + "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", "funding": [ { "type": "opencollective", @@ -5692,13 +6122,17 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { - "caniuse-lite": "^1.0.30001449", - "electron-to-chromium": "^1.4.284", - "node-releases": "^2.0.8", - "update-browserslist-db": "^1.0.10" + "caniuse-lite": "^1.0.30001587", + "electron-to-chromium": "^1.4.668", + "node-releases": "^2.0.14", + "update-browserslist-db": "^1.0.13" }, "bin": { "browserslist": "cli.js" @@ -5758,13 +6192,19 @@ } }, "node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", "dev": true, "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -5827,9 +6267,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001473", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001473.tgz", - "integrity": "sha512-ewDad7+D2vlyy+E4UJuVfiBsU69IL+8oVmTuZnH5Q6CIUbxNfI50uVpRHbUPDD6SUaN2o0Lh4DhTrvLG/Tn1yg==", + "version": "1.0.30001614", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001614.tgz", + "integrity": "sha512-jmZQ1VpmlRwHgdP1/uiKzgiAuGOfLEJsYFP4+GBou/QQ4U6IOJCB4NP1c+1p9RGLpwObcT94jA5/uO+F1vBbog==", "funding": [ { "type": "opencollective", @@ -5858,18 +6298,18 @@ } }, "node_modules/chai": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.7.tgz", - "integrity": "sha512-HLnAzZ2iupm25PlN0xFreAlBA5zaBSv3og0DdeGA4Ar6h6rJ3A0rolRUKJhSF2V10GZKDgWF/VmAEsNWjCRB+A==", + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.4.1.tgz", + "integrity": "sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==", "dev": true, "dependencies": { "assertion-error": "^1.1.0", - "check-error": "^1.0.2", - "deep-eql": "^4.1.2", - "get-func-name": "^2.0.0", - "loupe": "^2.3.1", + "check-error": "^1.0.3", + "deep-eql": "^4.1.3", + "get-func-name": "^2.0.2", + "loupe": "^2.3.6", "pathval": "^1.1.1", - "type-detect": "^4.0.5" + "type-detect": "^4.0.8" }, "engines": { "node": ">=4" @@ -5912,24 +6352,21 @@ "dev": true }, "node_modules/check-error": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", - "integrity": "sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz", + "integrity": "sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==", "dev": true, + "dependencies": { + "get-func-name": "^2.0.2" + }, "engines": { "node": "*" } }, "node_modules/chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -5942,14 +6379,28 @@ "engines": { "node": ">= 8.10.0" }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, "optionalDependencies": { "fsevents": "~2.3.2" } }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==", + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", "funding": [ { "type": "github", @@ -5961,14 +6412,14 @@ } }, "node_modules/classcat": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz", - "integrity": "sha512-sbpkOw6z413p+HDGcBENe498WM9woqWHiJxCq7nvmxe9WmrUmqfAcxpIwAiMtM5Q3AhYkzXcNQHqsWq0mND51g==" + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz", + "integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==" }, "node_modules/classnames": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz", - "integrity": "sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==" + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.5.1.tgz", + "integrity": "sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==" }, "node_modules/cli-boxes": { "version": "3.0.0", @@ -5995,9 +6446,9 @@ } }, "node_modules/cli-spinners": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.7.0.tgz", - "integrity": "sha512-qu3pN8Y3qHNgE2AFweciB1IfMnmZ/fsNTEE+NOFjmGB2F/7rLhnhzppvpCnN4FovtP26k8lHyy9ptEbNwWFLzw==", + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", "dev": true, "engines": { "node": ">=6" @@ -6046,12 +6497,6 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/cli-truncate/node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, "node_modules/cli-truncate/node_modules/is-fullwidth-code-point": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", @@ -6098,9 +6543,9 @@ } }, "node_modules/cli-truncate/node_modules/strip-ansi": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", "dev": true, "dependencies": { "ansi-regex": "^6.0.1" @@ -6157,6 +6602,23 @@ "node": ">=12" } }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/clone": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", @@ -6277,6 +6739,12 @@ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" }, + "node_modules/confbox": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.7.tgz", + "integrity": "sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==", + "dev": true + }, "node_modules/confusing-browser-globals": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.11.tgz", @@ -6293,9 +6761,9 @@ } }, "node_modules/convert-source-map": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", - "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", "dev": true }, "node_modules/cookie": { @@ -6308,12 +6776,12 @@ } }, "node_modules/core-js-compat": { - "version": "3.30.0", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.30.0.tgz", - "integrity": "sha512-P5A2h/9mRYZFIAP+5Ab8ns6083IyVpSclU74UNvbGVQ8VM7n3n3/g2yF3AkKQ9NXz2O+ioxLbEWKnDtgsFamhg==", + "version": "3.37.0", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.37.0.tgz", + "integrity": "sha512-vYq4L+T8aS5UuFg4UwDhc7YNRWVeVZwltad9C/jV3R2LgVOpS9BDr7l/WL6BN0dbV3k1XejPTHqqEzJgsa0frA==", "dev": true, "dependencies": { - "browserslist": "^4.21.5" + "browserslist": "^4.23.0" }, "funding": { "type": "opencollective", @@ -6321,9 +6789,9 @@ } }, "node_modules/core-js-pure": { - "version": "3.30.0", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.30.0.tgz", - "integrity": "sha512-+2KbMFGeBU0ln/csoPqTe0i/yfHbrd2EUhNMObsGtXMKS/RTtlkYyi+/3twLcevbgNR0yM/r0Psa3TEoQRpFMQ==", + "version": "3.37.0", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.37.0.tgz", + "integrity": "sha512-d3BrpyFr5eD4KcbRvQ3FTUx/KWmaDesr7+a3+1+P46IUnNoEt+oiLijPINZMEon7w9oGkIINWxrBAU9DEciwFQ==", "dev": true, "hasInstallScript": true, "funding": { @@ -6348,12 +6816,12 @@ } }, "node_modules/cross-fetch": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz", - "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", + "version": "3.1.8", + "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.8.tgz", + "integrity": "sha512-cvA+JwZoU0Xq+h6WkMvAUqPEYy92Obet6UdKLfW60qn99ftItKjB5T+BkyWOFWe2pUyfQ+IJHmpOTznqk1M6Kg==", "dev": true, "dependencies": { - "node-fetch": "2.6.7" + "node-fetch": "^2.6.12" } }, "node_modules/cross-spawn": { @@ -6389,12 +6857,12 @@ } }, "node_modules/css-functions-list": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/css-functions-list/-/css-functions-list-3.1.0.tgz", - "integrity": "sha512-/9lCvYZaUbBGvYUgYGFJ4dcYiyqdhSjG7IPVluoV8A1ILjkF7ilmhp1OGUz8n+nmBcu0RNrQAzgD8B6FJbrt2w==", + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/css-functions-list/-/css-functions-list-3.2.2.tgz", + "integrity": "sha512-c+N0v6wbKVxTu5gOBBFkr9BEdBWaqqjQeiJ8QvSRIJOf+UxlJh930m8e6/WNeODIK0mYLFkoONrnj16i2EcvfQ==", "dev": true, "engines": { - "node": ">=12.22" + "node": ">=12 || >=16" } }, "node_modules/css-has-pseudo": { @@ -6477,13 +6945,19 @@ "dev": true }, "node_modules/cssdb": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/cssdb/-/cssdb-7.5.3.tgz", - "integrity": "sha512-NQNRhrEnS6cW+RU/foLphb6xI/MDA70bI3Cy6VxJU8ilxgyTYz1X9zUzFGVTG5nGPylcKAGIt/UNc4deT56lQQ==", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - } + "version": "7.11.2", + "resolved": "https://registry.npmjs.org/cssdb/-/cssdb-7.11.2.tgz", + "integrity": "sha512-lhQ32TFkc1X4eTefGfYPvgovRSzIMofHkigfH8nWtyRL4XJLsRhJFreRvEgKzept7x1rjBuy3J/MurXLaFxW/A==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + }, + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + } + ] }, "node_modules/cssesc": { "version": "3.0.0", @@ -6539,9 +7013,9 @@ } }, "node_modules/csstype": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", - "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" }, "node_modules/d3-color": { "version": "3.1.0", @@ -6659,10 +7133,61 @@ "node": ">=14" } }, + "node_modules/data-view-buffer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-buffer/-/data-view-buffer-1.0.1.tgz", + "integrity": "sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz", + "integrity": "sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/data-view-byte-offset": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz", + "integrity": "sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", + "is-data-view": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/dayjs": { - "version": "1.11.7", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.7.tgz", - "integrity": "sha512-+Yw9U6YO5TQohxLcIkrXBeY73WP3ejHWVvx8XCk3gxvQDCTEmS48ZrSZCKciI7Bhl/uCMyxYtE9UqRILmFphkQ==" + "version": "1.11.11", + "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.11.tgz", + "integrity": "sha512-okzr3f11N6WuqYtZSvm+F776mB41wRZMhKP+hc34YdW+KmtYYK9iqvHSwo2k9FEH3fhGXvOPV6yz2IcSrfRUDg==" }, "node_modules/debug": { "version": "4.3.4", @@ -6741,34 +7266,6 @@ "node": ">=6" } }, - "node_modules/deep-equal": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-2.2.0.tgz", - "integrity": "sha512-RdpzE0Hv4lhowpIUKKMJfeH6C1pXdtT1/it80ubgWqwI3qpuxUBpC1S4hnHg+zjnuOoDkzUtUCEEkG+XG5l3Mw==", - "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "es-get-iterator": "^1.1.2", - "get-intrinsic": "^1.1.3", - "is-arguments": "^1.1.1", - "is-array-buffer": "^3.0.1", - "is-date-object": "^1.0.5", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "isarray": "^2.0.5", - "object-is": "^1.1.5", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "side-channel": "^1.0.4", - "which-boxed-primitive": "^1.0.2", - "which-collection": "^1.0.1", - "which-typed-array": "^1.1.9" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/deep-extend": { "version": "0.6.0", "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", @@ -6796,12 +7293,30 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dev": true, + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/define-properties": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz", - "integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", "dev": true, "dependencies": { + "define-data-property": "^1.0.1", "has-property-descriptors": "^1.0.0", "object-keys": "^1.1.1" }, @@ -6821,6 +7336,15 @@ "node": ">=0.4.0" } }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "dev": true, + "engines": { + "node": ">=6" + } + }, "node_modules/detect-node": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", @@ -6833,18 +7357,18 @@ "dev": true }, "node_modules/diff": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz", - "integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", "dev": true, "engines": { "node": ">=0.3.1" } }, "node_modules/diff-sequences": { - "version": "29.4.3", - "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz", - "integrity": "sha512-ofrBgwpPhCD85kMKtE9RYFFq6OC1A89oW2vvgWZNCwxrUpRUILopY7lsYyMDSjc8g6U6aiO0Qubg6r4Wgt5ZnA==", + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz", + "integrity": "sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } @@ -6913,6 +7437,7 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz", "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==", + "deprecated": "Use your platform's native DOMException instead", "dev": true, "dependencies": { "webidl-conversions": "^7.0.0" @@ -6936,22 +7461,22 @@ } }, "node_modules/domutils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", "dependencies": { "dom-serializer": "^2.0.0", "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" + "domhandler": "^5.0.3" }, "funding": { "url": "https://github.com/fb55/domutils?sponsor=1" } }, "node_modules/downshift": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", - "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", + "version": "7.6.2", + "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.2.tgz", + "integrity": "sha512-iOv+E1Hyt3JDdL9yYcOgW7nZ7GQ2Uz6YbggwXvKUSleetYhU2nXD482Rz6CzvM4lvI1At34BYruKAL4swRGxaA==", "dependencies": { "@babel/runtime": "^7.14.8", "compute-scroll-into-view": "^2.0.4", @@ -6970,14 +7495,14 @@ "dev": true }, "node_modules/electron-to-chromium": { - "version": "1.4.349", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.349.tgz", - "integrity": "sha512-34LBfVDiL6byWorSmQOPwq4gD5wpN8Mhh5yPGQr67FbcxsfUS0BDJP9y6RykSgeWVUfSkN/2dChywnsrmKVyUg==" + "version": "1.4.752", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.752.tgz", + "integrity": "sha512-P3QJreYI/AUTcfBVrC4zy9KvnZWekViThgQMX/VpJ+IsOBbcX5JFpORM4qWapwWQ+agb2nYAOyn/4PMXOk0m2Q==" }, "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", "dev": true }, "node_modules/end-of-stream": { @@ -6989,9 +7514,9 @@ } }, "node_modules/entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", "engines": { "node": ">=0.12" }, @@ -7017,45 +7542,57 @@ } }, "node_modules/es-abstract": { - "version": "1.21.2", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.21.2.tgz", - "integrity": "sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==", - "dev": true, - "dependencies": { - "array-buffer-byte-length": "^1.0.0", - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "es-set-tostringtag": "^2.0.1", + "version": "1.23.3", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.3.tgz", + "integrity": "sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==", + "dev": true, + "dependencies": { + "array-buffer-byte-length": "^1.0.1", + "arraybuffer.prototype.slice": "^1.0.3", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "data-view-buffer": "^1.0.1", + "data-view-byte-length": "^1.0.1", + "data-view-byte-offset": "^1.0.0", + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "es-set-tostringtag": "^2.0.3", "es-to-primitive": "^1.2.1", - "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.2.0", - "get-symbol-description": "^1.0.0", + "function.prototype.name": "^1.1.6", + "get-intrinsic": "^1.2.4", + "get-symbol-description": "^1.0.2", "globalthis": "^1.0.3", "gopd": "^1.0.1", - "has": "^1.0.3", - "has-property-descriptors": "^1.0.0", - "has-proto": "^1.0.1", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", "has-symbols": "^1.0.3", - "internal-slot": "^1.0.5", - "is-array-buffer": "^3.0.2", + "hasown": "^2.0.2", + "internal-slot": "^1.0.7", + "is-array-buffer": "^3.0.4", "is-callable": "^1.2.7", - "is-negative-zero": "^2.0.2", + "is-data-view": "^1.0.1", + "is-negative-zero": "^2.0.3", "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", + "is-shared-array-buffer": "^1.0.3", "is-string": "^1.0.7", - "is-typed-array": "^1.1.10", + "is-typed-array": "^1.1.13", "is-weakref": "^1.0.2", - "object-inspect": "^1.12.3", + "object-inspect": "^1.13.1", "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "safe-regex-test": "^1.0.0", - "string.prototype.trim": "^1.2.7", - "string.prototype.trimend": "^1.0.6", - "string.prototype.trimstart": "^1.0.6", - "typed-array-length": "^1.0.4", + "object.assign": "^4.1.5", + "regexp.prototype.flags": "^1.5.2", + "safe-array-concat": "^1.1.2", + "safe-regex-test": "^1.0.3", + "string.prototype.trim": "^1.2.9", + "string.prototype.trimend": "^1.0.8", + "string.prototype.trimstart": "^1.0.8", + "typed-array-buffer": "^1.0.2", + "typed-array-byte-length": "^1.0.1", + "typed-array-byte-offset": "^1.0.2", + "typed-array-length": "^1.0.6", "unbox-primitive": "^1.0.2", - "which-typed-array": "^1.1.9" + "which-typed-array": "^1.1.15" }, "engines": { "node": ">= 0.4" @@ -7064,49 +7601,87 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/es-get-iterator": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz", - "integrity": "sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==", + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "has-symbols": "^1.0.3", - "is-arguments": "^1.1.1", - "is-map": "^2.0.2", - "is-set": "^2.0.2", - "is-string": "^1.0.7", - "isarray": "^2.0.5", - "stop-iteration-iterator": "^1.0.0" + "get-intrinsic": "^1.2.4" }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "engines": { + "node": ">= 0.4" } }, - "node_modules/es-set-tostringtag": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz", - "integrity": "sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==", + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "dev": true, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-iterator-helpers": { + "version": "1.0.19", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.0.19.tgz", + "integrity": "sha512-zoMwbCcH5hwUkKJkT8kDIBZSz9I6mVG//+lDCinLCGov4+r7NIy0ld8o03M0cJxl2spVf6ESYVS6/gpIfq1FFw==", "dev": true, "dependencies": { - "get-intrinsic": "^1.1.3", - "has": "^1.0.3", - "has-tostringtag": "^1.0.0" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.3", + "es-errors": "^1.3.0", + "es-set-tostringtag": "^2.0.3", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "has-property-descriptors": "^1.0.2", + "has-proto": "^1.0.3", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "iterator.prototype": "^1.1.2", + "safe-array-concat": "^1.1.2" }, "engines": { "node": ">= 0.4" } }, - "node_modules/es-shim-unscopables": { + "node_modules/es-object-atoms": { "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz", - "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.0.0.tgz", + "integrity": "sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==", "dev": true, "dependencies": { - "has": "^1.0.3" - } - }, + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz", + "integrity": "sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==", + "dev": true, + "dependencies": { + "get-intrinsic": "^1.2.4", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.1" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-shim-unscopables": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz", + "integrity": "sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw==", + "dev": true, + "dependencies": { + "hasown": "^2.0.0" + } + }, "node_modules/es-to-primitive": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", @@ -7125,9 +7700,9 @@ } }, "node_modules/esbuild": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.17.15.tgz", - "integrity": "sha512-LBUV2VsUIc/iD9ME75qhT4aJj0r75abCVS0jakhFzOtR7TQsqQA5w0tZ+KTKnwl3kXE0MhskNdHDh/I5aCR1Zw==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.18.20.tgz", + "integrity": "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==", "dev": true, "hasInstallScript": true, "bin": { @@ -7137,56 +7712,59 @@ "node": ">=12" }, "optionalDependencies": { - "@esbuild/android-arm": "0.17.15", - "@esbuild/android-arm64": "0.17.15", - "@esbuild/android-x64": "0.17.15", - "@esbuild/darwin-arm64": "0.17.15", - "@esbuild/darwin-x64": "0.17.15", - "@esbuild/freebsd-arm64": "0.17.15", - "@esbuild/freebsd-x64": "0.17.15", - "@esbuild/linux-arm": "0.17.15", - "@esbuild/linux-arm64": "0.17.15", - "@esbuild/linux-ia32": "0.17.15", - "@esbuild/linux-loong64": "0.17.15", - "@esbuild/linux-mips64el": "0.17.15", - "@esbuild/linux-ppc64": "0.17.15", - "@esbuild/linux-riscv64": "0.17.15", - "@esbuild/linux-s390x": "0.17.15", - "@esbuild/linux-x64": "0.17.15", - "@esbuild/netbsd-x64": "0.17.15", - "@esbuild/openbsd-x64": "0.17.15", - "@esbuild/sunos-x64": "0.17.15", - "@esbuild/win32-arm64": "0.17.15", - "@esbuild/win32-ia32": "0.17.15", - "@esbuild/win32-x64": "0.17.15" + "@esbuild/android-arm": "0.18.20", + "@esbuild/android-arm64": "0.18.20", + "@esbuild/android-x64": "0.18.20", + "@esbuild/darwin-arm64": "0.18.20", + "@esbuild/darwin-x64": "0.18.20", + "@esbuild/freebsd-arm64": "0.18.20", + "@esbuild/freebsd-x64": "0.18.20", + "@esbuild/linux-arm": "0.18.20", + "@esbuild/linux-arm64": "0.18.20", + "@esbuild/linux-ia32": "0.18.20", + "@esbuild/linux-loong64": "0.18.20", + "@esbuild/linux-mips64el": "0.18.20", + "@esbuild/linux-ppc64": "0.18.20", + "@esbuild/linux-riscv64": "0.18.20", + "@esbuild/linux-s390x": "0.18.20", + "@esbuild/linux-x64": "0.18.20", + "@esbuild/netbsd-x64": "0.18.20", + "@esbuild/openbsd-x64": "0.18.20", + "@esbuild/sunos-x64": "0.18.20", + "@esbuild/win32-arm64": "0.18.20", + "@esbuild/win32-ia32": "0.18.20", + "@esbuild/win32-x64": "0.18.20" } }, "node_modules/escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", + "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", "engines": { "node": ">=6" } }, "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, "engines": { - "node": ">=0.8.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/escodegen": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.0.0.tgz", - "integrity": "sha512-mmHKys/C8BFUGI+MAWNcSYoORYLMdPzjrknd2Vc+bUsjN5bXcr8EhrNB+UTqfL1y3I9c4fw2ihgtMPQLBRiQxw==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.1.0.tgz", + "integrity": "sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==", "dev": true, "dependencies": { "esprima": "^4.0.1", "estraverse": "^5.2.0", - "esutils": "^2.0.2", - "optionator": "^0.8.1" + "esutils": "^2.0.2" }, "bin": { "escodegen": "bin/escodegen.js", @@ -7200,27 +7778,28 @@ } }, "node_modules/eslint": { - "version": "8.37.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.37.0.tgz", - "integrity": "sha512-NU3Ps9nI05GUoVMxcZx1J8CNR6xOvUT4jAUMH5+z8lpp3aEdPVCImKw6PWG4PY+Vfkpr+jvMpxs/qoE7wq0sPw==", + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", + "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.4.0", - "@eslint/eslintrc": "^2.0.2", - "@eslint/js": "8.37.0", - "@humanwhocodes/config-array": "^0.11.8", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.0", + "@humanwhocodes/config-array": "^0.11.14", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", - "ajv": "^6.10.0", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", "debug": "^4.3.2", "doctrine": "^3.0.0", "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.1.1", - "eslint-visitor-keys": "^3.4.0", - "espree": "^9.5.1", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", "esquery": "^1.4.2", "esutils": "^2.0.2", "fast-deep-equal": "^3.1.3", @@ -7228,22 +7807,19 @@ "find-up": "^5.0.0", "glob-parent": "^6.0.2", "globals": "^13.19.0", - "grapheme-splitter": "^1.0.4", + "graphemer": "^1.4.0", "ignore": "^5.2.0", - "import-fresh": "^3.0.0", "imurmurhash": "^0.1.4", "is-glob": "^4.0.0", "is-path-inside": "^3.0.3", - "js-sdsl": "^4.1.4", "js-yaml": "^4.1.0", "json-stable-stringify-without-jsonify": "^1.0.1", "levn": "^0.4.1", "lodash.merge": "^4.6.2", "minimatch": "^3.1.2", "natural-compare": "^1.4.0", - "optionator": "^0.9.1", + "optionator": "^0.9.3", "strip-ansi": "^6.0.1", - "strip-json-comments": "^3.1.0", "text-table": "^0.2.0" }, "bin": { @@ -7285,14 +7861,14 @@ } }, "node_modules/eslint-import-resolver-node": { - "version": "0.3.7", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz", - "integrity": "sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==", + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz", + "integrity": "sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==", "dev": true, "dependencies": { "debug": "^3.2.7", - "is-core-module": "^2.11.0", - "resolve": "^1.22.1" + "is-core-module": "^2.13.0", + "resolve": "^1.22.4" } }, "node_modules/eslint-import-resolver-node/node_modules/debug": { @@ -7305,9 +7881,9 @@ } }, "node_modules/eslint-module-utils": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz", - "integrity": "sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.8.1.tgz", + "integrity": "sha512-rXDXR3h7cs7dy9RNpUlQf80nX31XWJEyGq1tRMo+6GsO5VmTe4UTwtmonAD4ZkAsrfMVDA2wlGJ3790Ys+D49Q==", "dev": true, "dependencies": { "debug": "^3.2.7" @@ -7349,26 +7925,28 @@ } }, "node_modules/eslint-plugin-import": { - "version": "2.27.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz", - "integrity": "sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==", + "version": "2.29.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.29.1.tgz", + "integrity": "sha512-BbPC0cuExzhiMo4Ff1BTVwHpjjv28C5R+btTOGaCRC7UEz801up0JadwkeSk5Ued6TG34uaczuVuH6qyy5YUxw==", "dev": true, "dependencies": { - "array-includes": "^3.1.6", - "array.prototype.flat": "^1.3.1", - "array.prototype.flatmap": "^1.3.1", + "array-includes": "^3.1.7", + "array.prototype.findlastindex": "^1.2.3", + "array.prototype.flat": "^1.3.2", + "array.prototype.flatmap": "^1.3.2", "debug": "^3.2.7", "doctrine": "^2.1.0", - "eslint-import-resolver-node": "^0.3.7", - "eslint-module-utils": "^2.7.4", - "has": "^1.0.3", - "is-core-module": "^2.11.0", + "eslint-import-resolver-node": "^0.3.9", + "eslint-module-utils": "^2.8.0", + "hasown": "^2.0.0", + "is-core-module": "^2.13.1", "is-glob": "^4.0.3", "minimatch": "^3.1.2", - "object.values": "^1.1.6", - "resolve": "^1.22.1", - "semver": "^6.3.0", - "tsconfig-paths": "^3.14.1" + "object.fromentries": "^2.0.7", + "object.groupby": "^1.0.1", + "object.values": "^1.1.7", + "semver": "^6.3.1", + "tsconfig-paths": "^3.15.0" }, "engines": { "node": ">=4" @@ -7377,16 +7955,6 @@ "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8" } }, - "node_modules/eslint-plugin-import/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/eslint-plugin-import/node_modules/debug": { "version": "3.2.7", "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", @@ -7408,18 +7976,6 @@ "node": ">=0.10.0" } }, - "node_modules/eslint-plugin-import/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, "node_modules/eslint-plugin-jest": { "version": "25.7.0", "resolved": "https://registry.npmjs.org/eslint-plugin-jest/-/eslint-plugin-jest-25.7.0.tgz", @@ -7445,27 +8001,27 @@ } }, "node_modules/eslint-plugin-jsx-a11y": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.7.1.tgz", - "integrity": "sha512-63Bog4iIethyo8smBklORknVjB0T2dwB8Mr/hIC+fBS0uyHdYYpzM/Ed+YC8VxTjlXHEWFOdmgwcDn1U2L9VCA==", - "dev": true, - "dependencies": { - "@babel/runtime": "^7.20.7", - "aria-query": "^5.1.3", - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "ast-types-flow": "^0.0.7", - "axe-core": "^4.6.2", - "axobject-query": "^3.1.1", + "version": "6.8.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.8.0.tgz", + "integrity": "sha512-Hdh937BS3KdwwbBaKd5+PLCOmYY6U4f2h9Z2ktwtNKvIdIEu137rjYbcb9ApSbVJfWxANNuiKTD/9tOKjK9qOA==", + "dev": true, + "dependencies": { + "@babel/runtime": "^7.23.2", + "aria-query": "^5.3.0", + "array-includes": "^3.1.7", + "array.prototype.flatmap": "^1.3.2", + "ast-types-flow": "^0.0.8", + "axe-core": "=4.7.0", + "axobject-query": "^3.2.1", "damerau-levenshtein": "^1.0.8", "emoji-regex": "^9.2.2", - "has": "^1.0.3", - "jsx-ast-utils": "^3.3.3", - "language-tags": "=1.0.5", + "es-iterator-helpers": "^1.0.15", + "hasown": "^2.0.0", + "jsx-ast-utils": "^3.3.5", + "language-tags": "^1.0.9", "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "semver": "^6.3.0" + "object.entries": "^1.1.7", + "object.fromentries": "^2.0.7" }, "engines": { "node": ">=4.0" @@ -7474,55 +8030,30 @@ "eslint": "^3 || ^4 || ^5 || ^6 || ^7 || ^8" } }, - "node_modules/eslint-plugin-jsx-a11y/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/eslint-plugin-jsx-a11y/node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "node_modules/eslint-plugin-jsx-a11y/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, "node_modules/eslint-plugin-react": { - "version": "7.32.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.32.2.tgz", - "integrity": "sha512-t2fBMa+XzonrrNkyVirzKlvn5RXzzPwRHtMvLAtVZrt8oxgnTQaYbU6SXTOO1mwQgp1y5+toMSKInnzGr0Knqg==", + "version": "7.34.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.34.1.tgz", + "integrity": "sha512-N97CxlouPT1AHt8Jn0mhhN2RrADlUAsk1/atcT2KyA/l9Q/E6ll7OIGwNumFmWfZ9skV3XXccYS19h80rHtgkw==", "dev": true, "dependencies": { - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "array.prototype.tosorted": "^1.1.1", + "array-includes": "^3.1.7", + "array.prototype.findlast": "^1.2.4", + "array.prototype.flatmap": "^1.3.2", + "array.prototype.toreversed": "^1.1.2", + "array.prototype.tosorted": "^1.1.3", "doctrine": "^2.1.0", + "es-iterator-helpers": "^1.0.17", "estraverse": "^5.3.0", "jsx-ast-utils": "^2.4.1 || ^3.0.0", "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "object.hasown": "^1.1.2", - "object.values": "^1.1.6", + "object.entries": "^1.1.7", + "object.fromentries": "^2.0.7", + "object.hasown": "^1.1.3", + "object.values": "^1.1.7", "prop-types": "^15.8.1", - "resolve": "^2.0.0-next.4", - "semver": "^6.3.0", - "string.prototype.matchall": "^4.0.8" + "resolve": "^2.0.0-next.5", + "semver": "^6.3.1", + "string.prototype.matchall": "^4.0.10" }, "engines": { "node": ">=4" @@ -7532,9 +8063,9 @@ } }, "node_modules/eslint-plugin-react-hooks": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", - "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.2.tgz", + "integrity": "sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==", "dev": true, "engines": { "node": ">=10" @@ -7543,16 +8074,6 @@ "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0" } }, - "node_modules/eslint-plugin-react/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/eslint-plugin-react/node_modules/doctrine": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", @@ -7565,25 +8086,13 @@ "node": ">=0.10.0" } }, - "node_modules/eslint-plugin-react/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, "node_modules/eslint-plugin-react/node_modules/resolve": { - "version": "2.0.0-next.4", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.4.tgz", - "integrity": "sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==", + "version": "2.0.0-next.5", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", + "integrity": "sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA==", "dev": true, "dependencies": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -7595,12 +8104,12 @@ } }, "node_modules/eslint-plugin-testing-library": { - "version": "5.10.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-testing-library/-/eslint-plugin-testing-library-5.10.2.tgz", - "integrity": "sha512-f1DmDWcz5SDM+IpCkEX0lbFqrrTs8HRsEElzDEqN/EBI0hpRj8Cns5+IVANXswE8/LeybIJqPAOQIFu2j5Y5sw==", + "version": "5.11.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-testing-library/-/eslint-plugin-testing-library-5.11.1.tgz", + "integrity": "sha512-5eX9e1Kc2PqVRed3taaLnAAqPZGEX75C+M/rXzUAI3wIg/ZxzUm1OVAwfe/O+vE+6YXOLetSe9g5GKD2ecXipw==", "dev": true, "dependencies": { - "@typescript-eslint/utils": "^5.43.0" + "@typescript-eslint/utils": "^5.58.0" }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0", @@ -7611,9 +8120,9 @@ } }, "node_modules/eslint-scope": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.1.1.tgz", - "integrity": "sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==", + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", "dev": true, "dependencies": { "esrecurse": "^4.3.0", @@ -7621,12 +8130,15 @@ }, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" } }, "node_modules/eslint-visitor-keys": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.0.tgz", - "integrity": "sha512-HPpKPUBQcAsZOsHAFwTtIKcYlCje62XB7SEAcxjtmW6TD1WVpkS6i6/hOVtTZIl4zGj/mBqpFVGvaDneik+VoQ==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -7635,36 +8147,25 @@ "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "node_modules/eslint/node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", "dev": true, "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" }, "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/eslint/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "node_modules/eslint/node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", "dev": true, "engines": { "node": ">=10" @@ -7673,284 +8174,91 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint/node_modules/find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", "dev": true, "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" }, "engines": { - "node": ">=10" + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "url": "https://opencollective.com/eslint" } }, - "node_modules/eslint/node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", "dev": true, - "dependencies": { - "is-glob": "^4.0.3" + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" }, "engines": { - "node": ">=10.13.0" + "node": ">=4" } }, - "node_modules/eslint/node_modules/globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", + "node_modules/esquery": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", + "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", "dev": true, "dependencies": { - "type-fest": "^0.20.2" + "estraverse": "^5.1.0" }, "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=0.10" } }, - "node_modules/eslint/node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "node_modules/eslint/node_modules/levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", "dev": true, "dependencies": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" + "estraverse": "^5.2.0" }, "engines": { - "node": ">= 0.8.0" + "node": ">=4.0" } }, - "node_modules/eslint/node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", "dev": true, - "dependencies": { - "p-locate": "^5.0.0" - }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4.0" } }, - "node_modules/eslint/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "dev": true + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, "engines": { - "node": "*" + "node": ">=0.10.0" } }, - "node_modules/eslint/node_modules/optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", "dev": true, - "dependencies": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" - }, "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/eslint/node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "dependencies": { - "yocto-queue": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "dependencies": { - "p-limit": "^3.0.2" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/eslint/node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "dependencies": { - "prelude-ls": "^1.2.1" - }, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/eslint/node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/eslint/node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/espree": { - "version": "9.5.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.1.tgz", - "integrity": "sha512-5yxtHSZXRSW5pvv3hAlXM5+/Oswi1AUFqBmbibKb5s6bp3rGIDkyXU6xCoyuuLhijr4SFwPrXRoZjz0AZDN9tg==", - "dev": true, - "dependencies": { - "acorn": "^8.8.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.0" - }, - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true, - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/esquery": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", - "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", - "dev": true, - "dependencies": { - "estraverse": "^5.1.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "dependencies": { - "estraverse": "^5.2.0" - }, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true, - "engines": { - "node": ">=4.0" - } - }, - "node_modules/estree-walker": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", - "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", - "dev": true - }, - "node_modules/esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "dev": true, - "engines": { - "node": ">=0.8.x" + "node": ">=0.8.x" } }, "node_modules/execa": { @@ -7977,15 +8285,15 @@ } }, "node_modules/expect": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/expect/-/expect-29.5.0.tgz", - "integrity": "sha512-yM7xqUrCO2JdpFo4XpM82t+PJBFybdqoQuJLDGeDX2ij8NZzqRHyu3Hp188/JX7SWqud+7t4MUdvcgGBICMHZg==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/expect/-/expect-29.7.0.tgz", + "integrity": "sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==", "dependencies": { - "@jest/expect-utils": "^29.5.0", - "jest-get-type": "^29.4.3", - "jest-matcher-utils": "^29.5.0", - "jest-message-util": "^29.5.0", - "jest-util": "^29.5.0" + "@jest/expect-utils": "^29.7.0", + "jest-get-type": "^29.6.3", + "jest-matcher-utils": "^29.7.0", + "jest-message-util": "^29.7.0", + "jest-util": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -8012,9 +8320,9 @@ "dev": true }, "node_modules/fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dev": true, "dependencies": { "@nodelib/fs.stat": "^2.0.2", @@ -8027,6 +8335,18 @@ "node": ">=8.6.0" } }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, "node_modules/fast-json-stable-stringify": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", @@ -8058,9 +8378,9 @@ } }, "node_modules/fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", "dev": true, "dependencies": { "reusify": "^1.0.4" @@ -8081,6 +8401,15 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/figures/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "engines": { + "node": ">=0.8.0" + } + }, "node_modules/file-entry-cache": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", @@ -8122,25 +8451,29 @@ } }, "node_modules/find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", "dev": true, "dependencies": { - "locate-path": "^5.0.0", + "locate-path": "^6.0.0", "path-exists": "^4.0.0" }, "engines": { - "node": ">=8" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", "dev": true, "dependencies": { - "flatted": "^3.1.0", + "flatted": "^3.2.9", + "keyv": "^4.5.3", "rimraf": "^3.0.2" }, "engines": { @@ -8148,9 +8481,9 @@ } }, "node_modules/flatted": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", - "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", "dev": true }, "node_modules/for-each": { @@ -8162,6 +8495,34 @@ "is-callable": "^1.1.3" } }, + "node_modules/foreground-child": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", + "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/form-data": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", @@ -8177,15 +8538,15 @@ } }, "node_modules/fraction.js": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", - "integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==", + "version": "4.3.7", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", + "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==", "engines": { "node": "*" }, "funding": { "type": "patreon", - "url": "https://www.patreon.com/infusion" + "url": "https://github.com/sponsors/rawify" } }, "node_modules/fs.realpath": { @@ -8194,9 +8555,9 @@ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" }, "node_modules/fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", "hasInstallScript": true, "optional": true, "os": [ @@ -8207,21 +8568,24 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/function.prototype.name": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.6.tgz", + "integrity": "sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" + "define-properties": "^1.2.0", + "es-abstract": "^1.22.1", + "functions-have-names": "^1.2.3" }, "engines": { "node": ">= 0.4" @@ -8267,14 +8631,19 @@ } }, "node_modules/get-intrinsic": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", - "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", "dev": true, "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -8293,13 +8662,14 @@ } }, "node_modules/get-symbol-description": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", - "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz", + "integrity": "sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" + "call-bind": "^1.0.5", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4" }, "engines": { "node": ">= 0.4" @@ -8328,20 +8698,42 @@ } }, "node_modules/glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, "dependencies": { - "is-glob": "^4.0.1" + "is-glob": "^4.0.3" }, "engines": { - "node": ">= 6" + "node": ">=10.13.0" } }, - "node_modules/global-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", + "node_modules/glob/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/global-modules": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", + "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", "dev": true, "dependencies": { "global-prefix": "^3.0.0" @@ -8386,12 +8778,13 @@ } }, "node_modules/globalthis": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", "dev": true, "dependencies": { - "define-properties": "^1.1.3" + "define-properties": "^1.2.1", + "gopd": "^1.0.1" }, "engines": { "node": ">= 0.4" @@ -8443,10 +8836,10 @@ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" }, - "node_modules/grapheme-splitter": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz", - "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", "dev": true }, "node_modules/graphql": { @@ -8467,18 +8860,6 @@ "node": ">=6" } }, - "node_modules/has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "dependencies": { - "function-bind": "^1.1.1" - }, - "engines": { - "node": ">= 0.4.0" - } - }, "node_modules/has-bigints": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", @@ -8497,21 +8878,21 @@ } }, "node_modules/has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", "dev": true, "dependencies": { - "get-intrinsic": "^1.1.1" + "es-define-property": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", "dev": true, "engines": { "node": ">= 0.4" @@ -8533,12 +8914,12 @@ } }, "node_modules/has-tostringtag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", "dev": true, "dependencies": { - "has-symbols": "^1.0.2" + "has-symbols": "^1.0.3" }, "engines": { "node": ">= 0.4" @@ -8547,6 +8928,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dev": true, + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/headers-utils": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/headers-utils/-/headers-utils-3.0.2.tgz", @@ -8559,9 +8952,9 @@ "integrity": "sha512-r7wgUPQI9tr3jFDn3XT36qsNwEIZYcfgz4mkKEA6E4nn5p86y+u1EZjazIG4TRkl5/gmGRtkBUiZW81g029RIw==" }, "node_modules/highcharts-react-official": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/highcharts-react-official/-/highcharts-react-official-3.2.0.tgz", - "integrity": "sha512-71IJZsLmEboYFjONpwC3NRsg6JKvtKYtS5Si3e6s6MLRSOFNOY8KILTkzvO36kjpeR/A0X3/kvvewE+GMPpkjw==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/highcharts-react-official/-/highcharts-react-official-3.2.1.tgz", + "integrity": "sha512-hyQTX7ezCxl7JqumaWiGsroGWalzh24GedQIgO3vJbkGOZ6ySRAltIYjfxhrq4HszJOySZegotEF7v+haQ75UA==", "peerDependencies": { "highcharts": ">=6.0.0", "react": ">=16.8.0" @@ -8631,9 +9024,9 @@ } }, "node_modules/html-tags": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.0.tgz", - "integrity": "sha512-mH3dWNbvfCKcAEysbpD7wvtIJ6ImPog8aFhfzqog9gCN8CJFhKjLDtjpohG3IxYRLqHMJ1PWpBvnSMkFJBQ6Jg==", + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz", + "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==", "dev": true, "engines": { "node": ">=8" @@ -8720,18 +9113,18 @@ ] }, "node_modules/ignore": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", - "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", "dev": true, "engines": { "node": ">= 4" } }, "node_modules/immutable": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.0.tgz", - "integrity": "sha512-0AOCmOip+xgJwEVTQj1EfiDDOkPmuyllDuTuEX+DDXUgapLAsBIfkg3sxCYyCEA8mQqZrrxPUGjcOQ2JS3WLkg==" + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.5.tgz", + "integrity": "sha512-8eabxkth9gZatlwl5TBuJnCsoTADlL6ftEr7A4qgdaTsPyreilDSnUk57SO+jfKcNtxPa22U5KK6DSeAYhpBJw==" }, "node_modules/import-fresh": { "version": "3.3.0", @@ -8749,15 +9142,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/import-fresh/node_modules/resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true, - "engines": { - "node": ">=4" - } - }, "node_modules/import-lazy": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", @@ -8806,9 +9190,9 @@ "dev": true }, "node_modules/inquirer": { - "version": "8.2.5", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.5.tgz", - "integrity": "sha512-QAgPDQMEgrDssk1XiwwHoOGYF9BAbUcc1+j+FhEvaOt8/cKRqyLn0U5qA6F74fGhTMGxf92pOvPBeh29jQJDTQ==", + "version": "8.2.6", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.6.tgz", + "integrity": "sha512-M1WuAmb7pn9zdFRtQYk26ZBoY043Sse0wVDdk4Bppr+JOXyQYybdtvK+l9wUibhtjdjvtoiNy8tk+EgsYIUqKg==", "dev": true, "dependencies": { "ansi-escapes": "^4.2.1", @@ -8825,34 +9209,34 @@ "string-width": "^4.1.0", "strip-ansi": "^6.0.0", "through": "^2.3.6", - "wrap-ansi": "^7.0.0" + "wrap-ansi": "^6.0.1" }, "engines": { "node": ">=12.0.0" } }, "node_modules/internal-slot": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.5.tgz", - "integrity": "sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.7.tgz", + "integrity": "sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==", "dev": true, "dependencies": { - "get-intrinsic": "^1.2.0", - "has": "^1.0.3", + "es-errors": "^1.3.0", + "hasown": "^2.0.0", "side-channel": "^1.0.4" }, "engines": { "node": ">= 0.4" } }, - "node_modules/is-arguments": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", - "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "node_modules/is-array-buffer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.4.tgz", + "integrity": "sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==", "dev": true, "dependencies": { "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" + "get-intrinsic": "^1.2.1" }, "engines": { "node": ">= 0.4" @@ -8861,26 +9245,27 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-array-buffer": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.2.tgz", - "integrity": "sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==", + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true + }, + "node_modules/is-async-function": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz", + "integrity": "sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.2.0", - "is-typed-array": "^1.1.10" + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", - "dev": true - }, "node_modules/is-bigint": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", @@ -8933,12 +9318,27 @@ } }, "node_modules/is-core-module": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz", - "integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==", + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", + "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "dev": true, + "dependencies": { + "hasown": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-data-view": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-data-view/-/is-data-view-1.0.1.tgz", + "integrity": "sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==", "dev": true, "dependencies": { - "has": "^1.0.3" + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -8982,6 +9382,18 @@ "node": ">=0.10.0" } }, + "node_modules/is-finalizationregistry": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz", + "integrity": "sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -8991,6 +9403,21 @@ "node": ">=8" } }, + "node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dev": true, + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -9012,18 +9439,21 @@ } }, "node_modules/is-map": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.2.tgz", - "integrity": "sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.3.tgz", + "integrity": "sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-negative-zero": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", - "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.3.tgz", + "integrity": "sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==", "dev": true, "engines": { "node": ">= 0.4" @@ -9132,21 +9562,27 @@ } }, "node_modules/is-set": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.2.tgz", - "integrity": "sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.3.tgz", + "integrity": "sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/is-shared-array-buffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", - "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz", + "integrity": "sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==", "dev": true, "dependencies": { - "call-bind": "^1.0.2" + "call-bind": "^1.0.7" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -9195,16 +9631,12 @@ } }, "node_modules/is-typed-array": { - "version": "1.1.10", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", - "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.13.tgz", + "integrity": "sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==", "dev": true, "dependencies": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0" + "which-typed-array": "^1.1.14" }, "engines": { "node": ">= 0.4" @@ -9226,10 +9658,13 @@ } }, "node_modules/is-weakmap": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.1.tgz", - "integrity": "sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.2.tgz", + "integrity": "sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -9247,13 +9682,16 @@ } }, "node_modules/is-weakset": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.2.tgz", - "integrity": "sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.3.tgz", + "integrity": "sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -9288,15 +9726,46 @@ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true }, + "node_modules/iterator.prototype": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/iterator.prototype/-/iterator.prototype-1.1.2.tgz", + "integrity": "sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w==", + "dev": true, + "dependencies": { + "define-properties": "^1.2.1", + "get-intrinsic": "^1.2.1", + "has-symbols": "^1.0.3", + "reflect.getprototypeof": "^1.0.4", + "set-function-name": "^2.0.1" + } + }, + "node_modules/jackspeak": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", + "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "dev": true, + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, "node_modules/jest-diff": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.5.0.tgz", - "integrity": "sha512-LtxijLLZBduXnHSniy0WMdaHjmQnt3g5sa16W4p0HqukYTTsyTW3GD1q41TyGl5YFXj/5B2U6dlh5FM1LIMgxw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.7.0.tgz", + "integrity": "sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==", "dependencies": { "chalk": "^4.0.0", - "diff-sequences": "^29.4.3", - "jest-get-type": "^29.4.3", - "pretty-format": "^29.5.0" + "diff-sequences": "^29.6.3", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -9314,11 +9783,11 @@ } }, "node_modules/jest-diff/node_modules/pretty-format": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", - "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dependencies": { - "@jest/schemas": "^29.4.3", + "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", "react-is": "^18.0.0" }, @@ -9327,27 +9796,27 @@ } }, "node_modules/jest-diff/node_modules/react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" }, "node_modules/jest-get-type": { - "version": "29.4.3", - "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.4.3.tgz", - "integrity": "sha512-J5Xez4nRRMjk8emnTpWrlkyb9pfRQQanDrvWHhsR1+VUfbwxi30eVcZFlcdGInRibU4G5LwHXpI7IRHU0CY+gg==", + "version": "29.6.3", + "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.6.3.tgz", + "integrity": "sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==", "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, "node_modules/jest-matcher-utils": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.5.0.tgz", - "integrity": "sha512-lecRtgm/rjIK0CQ7LPQwzCs2VwW6WAahA55YBuI+xqmhm7LAaxokSB8C97yJeYyT+HvQkH741StzpU41wohhWw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz", + "integrity": "sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==", "dependencies": { "chalk": "^4.0.0", - "jest-diff": "^29.5.0", - "jest-get-type": "^29.4.3", - "pretty-format": "^29.5.0" + "jest-diff": "^29.7.0", + "jest-get-type": "^29.6.3", + "pretty-format": "^29.7.0" }, "engines": { "node": "^14.15.0 || ^16.10.0 || >=18.0.0" @@ -9365,11 +9834,11 @@ } }, "node_modules/jest-matcher-utils/node_modules/pretty-format": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", - "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dependencies": { - "@jest/schemas": "^29.4.3", + "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", "react-is": "^18.0.0" }, @@ -9378,22 +9847,22 @@ } }, "node_modules/jest-matcher-utils/node_modules/react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" }, "node_modules/jest-message-util": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.5.0.tgz", - "integrity": "sha512-Kijeg9Dag6CKtIDA7O21zNTACqD5MD/8HfIV8pdD94vFyFuer52SigdC3IQMhab3vACxXMiFk+yMHNdbqtyTGA==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.7.0.tgz", + "integrity": "sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==", "dependencies": { "@babel/code-frame": "^7.12.13", - "@jest/types": "^29.5.0", + "@jest/types": "^29.6.3", "@types/stack-utils": "^2.0.0", "chalk": "^4.0.0", "graceful-fs": "^4.2.9", "micromatch": "^4.0.4", - "pretty-format": "^29.5.0", + "pretty-format": "^29.7.0", "slash": "^3.0.0", "stack-utils": "^2.0.3" }, @@ -9413,11 +9882,11 @@ } }, "node_modules/jest-message-util/node_modules/pretty-format": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", - "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.7.0.tgz", + "integrity": "sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==", "dependencies": { - "@jest/schemas": "^29.4.3", + "@jest/schemas": "^29.6.3", "ansi-styles": "^5.0.0", "react-is": "^18.0.0" }, @@ -9426,16 +9895,16 @@ } }, "node_modules/jest-message-util/node_modules/react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==" }, "node_modules/jest-util": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.5.0.tgz", - "integrity": "sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==", + "version": "29.7.0", + "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.7.0.tgz", + "integrity": "sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==", "dependencies": { - "@jest/types": "^29.5.0", + "@jest/types": "^29.6.3", "@types/node": "*", "chalk": "^4.0.0", "ci-info": "^3.2.0", @@ -9447,9 +9916,9 @@ } }, "node_modules/jiti": { - "version": "1.18.2", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.18.2.tgz", - "integrity": "sha512-QAdOptna2NYiSSpv0O/BwoHBSmz4YhpzJHyi+fnMRTXFjp7B8i/YG5Z8IfusxB1ufjcD2Sre1F3R+nX3fvy7gg==", + "version": "1.21.0", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.0.tgz", + "integrity": "sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==", "dev": true, "bin": { "jiti": "bin/jiti.js" @@ -9464,16 +9933,6 @@ "node": ">=0.10.0" } }, - "node_modules/js-sdsl": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.4.0.tgz", - "integrity": "sha512-FfVSdx6pJ41Oa+CF7RDaFmTnCaFhua+SNYQX74riGOpl96x+2jQCqEfQ2bnXu/5DPCqlRuiqyvTJM0Qjz26IVg==", - "dev": true, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/js-sdsl" - } - }, "node_modules/js-sha3": { "version": "0.8.0", "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz", @@ -9543,22 +10002,10 @@ "node": ">=8" } }, - "node_modules/jsdoc/node_modules/strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/jsdom": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-21.1.1.tgz", - "integrity": "sha512-Jjgdmw48RKcdAIQyUD1UdBh2ecH7VqwaXPN3ehoZN6MqgVbMn+lRm1aAT1AsdJRAJpwfa4IpwgzySn61h2qu3w==", + "version": "21.1.2", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-21.1.2.tgz", + "integrity": "sha512-sCpFmK2jv+1sjff4u7fzft+pUh2KSUbUrEHYHyfSIbGTIcmnjyp83qg6qLwdJ/I3LpTXx33ACxeRL7Lsyc6lGQ==", "dev": true, "dependencies": { "abab": "^2.0.6", @@ -9574,7 +10021,7 @@ "http-proxy-agent": "^5.0.0", "https-proxy-agent": "^5.0.1", "is-potential-custom-element-name": "^1.0.1", - "nwsapi": "^2.2.2", + "nwsapi": "^2.2.4", "parse5": "^7.1.2", "rrweb-cssom": "^0.6.0", "saxes": "^6.0.0", @@ -9612,6 +10059,12 @@ "node": ">=4" } }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true + }, "node_modules/json-parse-even-better-errors": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", @@ -9619,9 +10072,9 @@ "dev": true }, "node_modules/json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, "node_modules/json-stable-stringify-without-jsonify": { @@ -9642,25 +10095,30 @@ "node": ">=6" } }, - "node_modules/jsonc-parser": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz", - "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==", - "dev": true - }, "node_modules/jsx-ast-utils": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.3.tgz", - "integrity": "sha512-fYQHZTZ8jSfmWZ0iyzfwiU4WDX4HpHbMCZ3gPlWYiCl3BoeOTsqKBqnTVfH2rYT7eP5c3sVbeSPHnnJOaTrWiw==", + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", + "integrity": "sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==", "dev": true, "dependencies": { - "array-includes": "^3.1.5", - "object.assign": "^4.1.3" + "array-includes": "^3.1.6", + "array.prototype.flat": "^1.3.1", + "object.assign": "^4.1.4", + "object.values": "^1.1.6" }, "engines": { "node": ">=4.0" } }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "dependencies": { + "json-buffer": "3.0.1" + } + }, "node_modules/kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", @@ -9692,22 +10150,25 @@ "dev": true }, "node_modules/language-tags": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.5.tgz", - "integrity": "sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==", + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.9.tgz", + "integrity": "sha512-MbjN408fEndfiQXbFQ1vnd+1NoLDsnQW41410oQBXiyXDMYH5z505juWa4KUE1LqxRC7DgOgZDbKLxHIwm27hA==", "dev": true, "dependencies": { - "language-subtag-registry": "~0.3.2" + "language-subtag-registry": "^0.3.20" + }, + "engines": { + "node": ">=0.10" } }, "node_modules/levn": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", - "integrity": "sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==", + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", "dev": true, "dependencies": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" }, "engines": { "node": ">= 0.8.0" @@ -9750,15 +10211,18 @@ } }, "node_modules/locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", "dev": true, "dependencies": { - "p-locate": "^4.1.0" + "p-locate": "^5.0.0" }, "engines": { - "node": ">=8" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/lodash": { @@ -9807,9 +10271,9 @@ } }, "node_modules/long": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/long/-/long-5.2.1.tgz", - "integrity": "sha512-GKSNGeNAtw8IryjjkhZxuKB3JzlcLTwjtiQCHKvqQet81I93kXslhDQruGI/QsddO83mcDToBVy7GqGS/zYf/A==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", + "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==", "dev": true, "peer": true }, @@ -9825,12 +10289,12 @@ } }, "node_modules/loupe": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.6.tgz", - "integrity": "sha512-RaPMZKiMy8/JruncMU5Bt6na1eftNoo++R4Y+N2FrxkDVTrGvcyzFTsaGif4QTeKESheMGegbhw6iUAq+5A8zA==", + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.7.tgz", + "integrity": "sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==", "dev": true, "dependencies": { - "get-func-name": "^2.0.0" + "get-func-name": "^2.0.1" } }, "node_modules/lru-cache": { @@ -9923,12 +10387,12 @@ } }, "node_modules/match-sorter": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/match-sorter/-/match-sorter-6.3.1.tgz", - "integrity": "sha512-mxybbo3pPNuA+ZuCUhm5bwNkXrJTbsk5VWbR5wiwz/GC6LIiegBGn2w3O08UG/jdbYLinw51fSQ5xNU1U3MgBw==", + "version": "6.3.4", + "resolved": "https://registry.npmjs.org/match-sorter/-/match-sorter-6.3.4.tgz", + "integrity": "sha512-jfZW7cWS5y/1xswZo8VBOdudUiSd9nifYRWphc9M5D/ee4w4AoXLgBEdRbgVaxbMuagBPeUC5y2Hi8DO6o9aDg==", "dependencies": { - "@babel/runtime": "^7.12.5", - "remove-accents": "0.4.2" + "@babel/runtime": "^7.23.8", + "remove-accents": "0.5.0" } }, "node_modules/mathml-tag-names": { @@ -10062,15 +10526,14 @@ } }, "node_modules/minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dependencies": { - "brace-expansion": "^2.0.1" + "brace-expansion": "^1.1.7" }, "engines": { - "node": ">=10" + "node": "*" } }, "node_modules/minimist": { @@ -10096,8 +10559,17 @@ "node": ">= 6" } }, - "node_modules/mkdirp": { - "version": "1.0.4", + "node_modules/minipass": { + "version": "7.0.4", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", + "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", + "dev": true, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/mkdirp": { + "version": "1.0.4", "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", "dev": true, @@ -10109,15 +10581,15 @@ } }, "node_modules/mlly": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.2.0.tgz", - "integrity": "sha512-+c7A3CV0KGdKcylsI6khWyts/CYrGTrRVo4R/I7u/cUsy0Conxa6LUhiEzVKIw14lc2L5aiO4+SeVe4TeGRKww==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.6.1.tgz", + "integrity": "sha512-vLgaHvaeunuOXHSmEbZ9izxPx3USsk8KCQ8iC+aTlp5sKRSoZvwhHh5L9VbKSaVC6sJDqbyohIS76E2VmHIPAA==", "dev": true, "dependencies": { - "acorn": "^8.8.2", - "pathe": "^1.1.0", - "pkg-types": "^1.0.2", - "ufo": "^1.1.1" + "acorn": "^8.11.3", + "pathe": "^1.1.2", + "pkg-types": "^1.0.3", + "ufo": "^1.3.2" } }, "node_modules/ms": { @@ -10204,9 +10676,9 @@ } }, "node_modules/nanoid": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", - "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", "funding": [ { "type": "github", @@ -10242,9 +10714,9 @@ } }, "node_modules/node-fetch": { - "version": "2.6.7", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", - "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", "dev": true, "dependencies": { "whatwg-url": "^5.0.0" @@ -10284,9 +10756,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.10", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.10.tgz", - "integrity": "sha512-5GFldHPXVG/YZmFzJvKK2zDSzPKhEp0+ZR5SVaoSag9fsL5YgHbUHDfnG5494ISANDcK4KwPXAx2xqVEydmd7w==" + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", + "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" }, "node_modules/normalize-package-data": { "version": "3.0.3", @@ -10316,9 +10788,9 @@ } }, "node_modules/normalize-package-data/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -10353,9 +10825,9 @@ } }, "node_modules/npm": { - "version": "9.9.0", - "resolved": "https://registry.npmjs.org/npm/-/npm-9.9.0.tgz", - "integrity": "sha512-wkd7sjz4KmdmddYQcd0aTP73P1cEuPlekeulz4jTDeMVx/Zo5XZ5KQ1z3eUzV3Q/WZpEO0NJXTrD5FNFe6fhCA==", + "version": "9.9.3", + "resolved": "https://registry.npmjs.org/npm/-/npm-9.9.3.tgz", + "integrity": "sha512-Z1l+rcQ5kYb17F3hHtO601arEpvdRYnCLtg8xo3AGtyj3IthwaraEOexI9903uANkifFbqHC8hT53KIrozWg8A==", "bundleDependencies": [ "@isaacs/string-locale-compare", "@npmcli/arborist", @@ -10429,6 +10901,13 @@ "write-file-atomic" ], "dev": true, + "workspaces": [ + "docs", + "smoke-tests", + "mock-globals", + "mock-registry", + "workspaces/*" + ], "dependencies": { "@isaacs/string-locale-compare": "^1.1.0", "@npmcli/arborist": "^6.5.0", @@ -10440,21 +10919,21 @@ "@npmcli/run-script": "^6.0.2", "abbrev": "^2.0.0", "archy": "~1.0.0", - "cacache": "^17.1.3", + "cacache": "^17.1.4", "chalk": "^5.3.0", - "ci-info": "^3.8.0", + "ci-info": "^4.0.0", "cli-columns": "^4.0.0", "cli-table3": "^0.6.3", "columnify": "^1.6.0", "fastest-levenshtein": "^1.0.16", - "fs-minipass": "^3.0.2", - "glob": "^10.2.7", + "fs-minipass": "^3.0.3", + "glob": "^10.3.10", "graceful-fs": "^4.2.11", "hosted-git-info": "^6.1.1", "ini": "^4.1.1", "init-package-json": "^5.0.0", "is-cidr": "^4.0.2", - "json-parse-even-better-errors": "^3.0.0", + "json-parse-even-better-errors": "^3.0.1", "libnpmaccess": "^7.0.2", "libnpmdiff": "^5.0.20", "libnpmexec": "^6.0.4", @@ -10462,20 +10941,20 @@ "libnpmhook": "^9.0.3", "libnpmorg": "^5.0.4", "libnpmpack": "^5.0.20", - "libnpmpublish": "^7.5.0", + "libnpmpublish": "^7.5.1", "libnpmsearch": "^6.0.2", "libnpmteam": "^5.0.3", "libnpmversion": "^4.0.2", "make-fetch-happen": "^11.1.1", "minimatch": "^9.0.3", - "minipass": "^5.0.0", + "minipass": "^7.0.4", "minipass-pipeline": "^1.2.4", "ms": "^2.1.2", - "node-gyp": "^9.4.0", + "node-gyp": "^9.4.1", "nopt": "^7.2.0", "normalize-package-data": "^5.0.0", "npm-audit-report": "^5.0.0", - "npm-install-checks": "^6.2.0", + "npm-install-checks": "^6.3.0", "npm-package-arg": "^10.1.0", "npm-pick-manifest": "^8.0.2", "npm-profile": "^7.0.1", @@ -10488,12 +10967,12 @@ "proc-log": "^3.0.0", "qrcode-terminal": "^0.12.0", "read": "^2.1.0", - "semver": "^7.5.4", + "semver": "^7.6.0", "sigstore": "^1.9.0", "spdx-expression-parse": "^3.0.1", - "ssri": "^10.0.4", + "ssri": "^10.0.5", "supports-color": "^9.4.0", - "tar": "^6.1.15", + "tar": "^6.2.0", "text-table": "~0.2.0", "tiny-relative-date": "^1.3.0", "treeverse": "^3.0.0", @@ -10531,6 +11010,12 @@ "node": ">=0.1.90" } }, + "node_modules/npm/node_modules/@gar/promisify": { + "version": "1.1.3", + "dev": true, + "inBundle": true, + "license": "MIT" + }, "node_modules/npm/node_modules/@isaacs/cliui": { "version": "8.0.2", "dev": true, @@ -10605,7 +11090,7 @@ "license": "ISC" }, "node_modules/npm/node_modules/@npmcli/arborist": { - "version": "6.5.0", + "version": "6.5.1", "dev": true, "inBundle": true, "license": "ISC", @@ -10618,7 +11103,7 @@ "@npmcli/name-from-folder": "^2.0.0", "@npmcli/node-gyp": "^3.0.0", "@npmcli/package-json": "^4.0.0", - "@npmcli/query": "^3.0.0", + "@npmcli/query": "^3.1.0", "@npmcli/run-script": "^6.0.0", "bin-links": "^4.0.1", "cacache": "^17.0.4", @@ -10652,13 +11137,13 @@ } }, "node_modules/npm/node_modules/@npmcli/config": { - "version": "6.4.0", + "version": "6.4.1", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "@npmcli/map-workspaces": "^3.0.2", - "ci-info": "^3.8.0", + "ci-info": "^4.0.0", "ini": "^4.1.0", "nopt": "^7.0.0", "proc-log": "^3.0.0", @@ -10759,6 +11244,19 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, + "node_modules/npm/node_modules/@npmcli/move-file": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, "node_modules/npm/node_modules/@npmcli/name-from-folder": { "version": "2.0.0", "dev": true, @@ -10808,7 +11306,7 @@ } }, "node_modules/npm/node_modules/@npmcli/query": { - "version": "3.0.0", + "version": "3.1.0", "dev": true, "inBundle": true, "license": "ISC", @@ -10933,18 +11431,6 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, - "node_modules/npm/node_modules/abort-controller": { - "version": "3.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "dependencies": { - "event-target-shim": "^5.0.0" - }, - "engines": { - "node": ">=6.5" - } - }, "node_modules/npm/node_modules/agent-base": { "version": "6.0.2", "dev": true, @@ -10958,13 +11444,11 @@ } }, "node_modules/npm/node_modules/agentkeepalive": { - "version": "4.3.0", + "version": "4.5.0", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "debug": "^4.1.0", - "depd": "^2.0.0", "humanize-ms": "^1.2.1" }, "engines": { @@ -11021,14 +11505,10 @@ "license": "MIT" }, "node_modules/npm/node_modules/are-we-there-yet": { - "version": "4.0.0", + "version": "4.0.2", "dev": true, "inBundle": true, "license": "ISC", - "dependencies": { - "delegates": "^1.0.0", - "readable-stream": "^4.1.0" - }, "engines": { "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } @@ -11039,28 +11519,8 @@ "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/base64-js": { - "version": "1.5.1", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "inBundle": true, - "license": "MIT" - }, "node_modules/npm/node_modules/bin-links": { - "version": "4.0.2", + "version": "4.0.3", "dev": true, "inBundle": true, "license": "ISC", @@ -11092,30 +11552,6 @@ "balanced-match": "^1.0.0" } }, - "node_modules/npm/node_modules/buffer": { - "version": "6.0.3", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "inBundle": true, - "license": "MIT", - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, "node_modules/npm/node_modules/builtins": { "version": "5.0.1", "dev": true, @@ -11126,7 +11562,7 @@ } }, "node_modules/npm/node_modules/cacache": { - "version": "17.1.3", + "version": "17.1.4", "dev": true, "inBundle": true, "license": "ISC", @@ -11135,7 +11571,7 @@ "fs-minipass": "^3.0.0", "glob": "^10.2.2", "lru-cache": "^7.7.1", - "minipass": "^5.0.0", + "minipass": "^7.0.3", "minipass-collect": "^1.0.2", "minipass-flush": "^1.0.5", "minipass-pipeline": "^1.2.4", @@ -11170,7 +11606,7 @@ } }, "node_modules/npm/node_modules/ci-info": { - "version": "3.8.0", + "version": "4.0.0", "dev": true, "funding": [ { @@ -11243,7 +11679,7 @@ } }, "node_modules/npm/node_modules/cmd-shim": { - "version": "6.0.1", + "version": "6.0.2", "dev": true, "inBundle": true, "license": "ISC", @@ -11391,17 +11827,8 @@ "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/depd": { - "version": "2.0.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, "node_modules/npm/node_modules/diff": { - "version": "5.1.0", + "version": "5.2.0", "dev": true, "inBundle": true, "license": "BSD-3-Clause", @@ -11446,24 +11873,6 @@ "inBundle": true, "license": "MIT" }, - "node_modules/npm/node_modules/event-target-shim": { - "version": "5.0.1", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/npm/node_modules/events": { - "version": "3.3.0", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">=0.8.x" - } - }, "node_modules/npm/node_modules/exponential-backoff": { "version": "3.1.1", "dev": true, @@ -11496,12 +11905,12 @@ } }, "node_modules/npm/node_modules/fs-minipass": { - "version": "3.0.2", + "version": "3.0.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "minipass": "^5.0.0" + "minipass": "^7.0.3" }, "engines": { "node": "^14.17.0 || ^16.13.0 || >=18.0.0" @@ -11514,10 +11923,13 @@ "license": "ISC" }, "node_modules/npm/node_modules/function-bind": { - "version": "1.1.1", + "version": "1.1.2", "dev": true, "inBundle": true, - "license": "MIT" + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/npm/node_modules/gauge": { "version": "5.0.1", @@ -11539,19 +11951,19 @@ } }, "node_modules/npm/node_modules/glob": { - "version": "10.2.7", + "version": "10.3.10", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "foreground-child": "^3.1.0", - "jackspeak": "^2.0.3", + "jackspeak": "^2.3.5", "minimatch": "^9.0.1", - "minipass": "^5.0.0 || ^6.0.2", - "path-scurry": "^1.7.0" + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0", + "path-scurry": "^1.10.1" }, "bin": { - "glob": "dist/cjs/src/bin.js" + "glob": "dist/esm/bin.mjs" }, "engines": { "node": ">=16 || 14 >=14.17" @@ -11566,24 +11978,24 @@ "inBundle": true, "license": "ISC" }, - "node_modules/npm/node_modules/has": { - "version": "1.0.3", + "node_modules/npm/node_modules/has-unicode": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "ISC" + }, + "node_modules/npm/node_modules/hasown": { + "version": "2.0.1", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "function-bind": "^1.1.1" + "function-bind": "^1.1.2" }, "engines": { - "node": ">= 0.4.0" + "node": ">= 0.4" } }, - "node_modules/npm/node_modules/has-unicode": { - "version": "2.0.1", - "dev": true, - "inBundle": true, - "license": "ISC" - }, "node_modules/npm/node_modules/hosted-git-info": { "version": "6.1.1", "dev": true, @@ -11651,28 +12063,8 @@ "node": ">=0.10.0" } }, - "node_modules/npm/node_modules/ieee754": { - "version": "1.2.1", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "inBundle": true, - "license": "BSD-3-Clause" - }, "node_modules/npm/node_modules/ignore-walk": { - "version": "6.0.3", + "version": "6.0.4", "dev": true, "inBundle": true, "license": "ISC", @@ -11701,6 +12093,12 @@ "node": ">=8" } }, + "node_modules/npm/node_modules/infer-owner": { + "version": "1.0.4", + "dev": true, + "inBundle": true, + "license": "ISC" + }, "node_modules/npm/node_modules/inflight": { "version": "1.0.6", "dev": true, @@ -11744,11 +12142,24 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, - "node_modules/npm/node_modules/ip": { - "version": "2.0.0", + "node_modules/npm/node_modules/ip-address": { + "version": "9.0.5", "dev": true, "inBundle": true, - "license": "MIT" + "license": "MIT", + "dependencies": { + "jsbn": "1.1.0", + "sprintf-js": "^1.1.3" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/npm/node_modules/ip-address/node_modules/sprintf-js": { + "version": "1.1.3", + "dev": true, + "inBundle": true, + "license": "BSD-3-Clause" }, "node_modules/npm/node_modules/ip-regex": { "version": "4.3.0", @@ -11772,12 +12183,12 @@ } }, "node_modules/npm/node_modules/is-core-module": { - "version": "2.12.1", + "version": "2.13.1", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "has": "^1.0.3" + "hasown": "^2.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -11805,7 +12216,7 @@ "license": "ISC" }, "node_modules/npm/node_modules/jackspeak": { - "version": "2.2.1", + "version": "2.3.6", "dev": true, "inBundle": true, "license": "BlueOak-1.0.0", @@ -11822,8 +12233,14 @@ "@pkgjs/parseargs": "^0.11.0" } }, + "node_modules/npm/node_modules/jsbn": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT" + }, "node_modules/npm/node_modules/json-parse-even-better-errors": { - "version": "3.0.0", + "version": "3.0.1", "dev": true, "inBundle": true, "license": "MIT", @@ -11862,7 +12279,7 @@ "license": "MIT" }, "node_modules/npm/node_modules/libnpmaccess": { - "version": "7.0.2", + "version": "7.0.3", "dev": true, "inBundle": true, "license": "ISC", @@ -11875,7 +12292,7 @@ } }, "node_modules/npm/node_modules/libnpmdiff": { - "version": "5.0.20", + "version": "5.0.21", "dev": true, "inBundle": true, "license": "ISC", @@ -11895,14 +12312,14 @@ } }, "node_modules/npm/node_modules/libnpmexec": { - "version": "6.0.4", + "version": "6.0.5", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { "@npmcli/arborist": "^6.5.0", "@npmcli/run-script": "^6.0.0", - "ci-info": "^3.7.1", + "ci-info": "^4.0.0", "npm-package-arg": "^10.1.0", "npmlog": "^7.0.1", "pacote": "^15.0.8", @@ -11917,7 +12334,7 @@ } }, "node_modules/npm/node_modules/libnpmfund": { - "version": "4.2.1", + "version": "4.2.2", "dev": true, "inBundle": true, "license": "ISC", @@ -11929,7 +12346,7 @@ } }, "node_modules/npm/node_modules/libnpmhook": { - "version": "9.0.3", + "version": "9.0.4", "dev": true, "inBundle": true, "license": "ISC", @@ -11942,7 +12359,7 @@ } }, "node_modules/npm/node_modules/libnpmorg": { - "version": "5.0.4", + "version": "5.0.5", "dev": true, "inBundle": true, "license": "ISC", @@ -11955,7 +12372,7 @@ } }, "node_modules/npm/node_modules/libnpmpack": { - "version": "5.0.20", + "version": "5.0.21", "dev": true, "inBundle": true, "license": "ISC", @@ -11970,12 +12387,12 @@ } }, "node_modules/npm/node_modules/libnpmpublish": { - "version": "7.5.0", + "version": "7.5.2", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "ci-info": "^3.6.1", + "ci-info": "^4.0.0", "normalize-package-data": "^5.0.0", "npm-package-arg": "^10.1.0", "npm-registry-fetch": "^14.0.3", @@ -11989,7 +12406,7 @@ } }, "node_modules/npm/node_modules/libnpmsearch": { - "version": "6.0.2", + "version": "6.0.3", "dev": true, "inBundle": true, "license": "ISC", @@ -12001,7 +12418,7 @@ } }, "node_modules/npm/node_modules/libnpmteam": { - "version": "5.0.3", + "version": "5.0.4", "dev": true, "inBundle": true, "license": "ISC", @@ -12014,7 +12431,7 @@ } }, "node_modules/npm/node_modules/libnpmversion": { - "version": "4.0.2", + "version": "4.0.3", "dev": true, "inBundle": true, "license": "ISC", @@ -12064,6 +12481,15 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, + "node_modules/npm/node_modules/make-fetch-happen/node_modules/minipass": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, "node_modules/npm/node_modules/minimatch": { "version": "9.0.3", "dev": true, @@ -12080,12 +12506,12 @@ } }, "node_modules/npm/node_modules/minipass": { - "version": "5.0.0", + "version": "7.0.4", "dev": true, "inBundle": true, "license": "ISC", "engines": { - "node": ">=8" + "node": ">=16 || 14 >=14.17" } }, "node_modules/npm/node_modules/minipass-collect": { @@ -12113,12 +12539,12 @@ } }, "node_modules/npm/node_modules/minipass-fetch": { - "version": "3.0.3", + "version": "3.0.4", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "minipass": "^5.0.0", + "minipass": "^7.0.3", "minipass-sized": "^1.0.3", "minizlib": "^2.1.2" }, @@ -12285,7 +12711,7 @@ } }, "node_modules/npm/node_modules/node-gyp": { - "version": "9.4.0", + "version": "9.4.1", "dev": true, "inBundle": true, "license": "MIT", @@ -12294,7 +12720,7 @@ "exponential-backoff": "^3.1.1", "glob": "^7.1.4", "graceful-fs": "^4.2.6", - "make-fetch-happen": "^11.0.3", + "make-fetch-happen": "^10.0.3", "nopt": "^6.0.0", "npmlog": "^6.0.0", "rimraf": "^3.0.2", @@ -12309,6 +12735,19 @@ "node": "^12.13 || ^14.13 || >=16" } }, + "node_modules/npm/node_modules/node-gyp/node_modules/@npmcli/fs": { + "version": "2.1.2", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "@gar/promisify": "^1.1.3", + "semver": "^7.3.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, "node_modules/npm/node_modules/node-gyp/node_modules/abbrev": { "version": "1.1.1", "dev": true, @@ -12338,23 +12777,104 @@ "concat-map": "0.0.1" } }, - "node_modules/npm/node_modules/node-gyp/node_modules/gauge": { - "version": "4.0.4", + "node_modules/npm/node_modules/node-gyp/node_modules/cacache": { + "version": "16.1.3", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^3.0.7", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" - }, - "engines": { - "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm/node_modules/node-gyp/node_modules/cacache/node_modules/brace-expansion": { + "version": "2.0.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/npm/node_modules/node-gyp/node_modules/cacache/node_modules/glob": { + "version": "8.1.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm/node_modules/node-gyp/node_modules/cacache/node_modules/minimatch": { + "version": "5.1.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/npm/node_modules/node-gyp/node_modules/fs-minipass": { + "version": "2.1.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/npm/node_modules/node-gyp/node_modules/gauge": { + "version": "4.0.4", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.3", + "console-control-strings": "^1.1.0", + "has-unicode": "^2.0.1", + "signal-exit": "^3.0.7", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, "node_modules/npm/node_modules/node-gyp/node_modules/glob": { @@ -12377,6 +12897,33 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/npm/node_modules/node-gyp/node_modules/make-fetch-happen": { + "version": "10.2.1", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^9.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, "node_modules/npm/node_modules/node-gyp/node_modules/minimatch": { "version": "3.1.2", "dev": true, @@ -12389,6 +12936,35 @@ "node": "*" } }, + "node_modules/npm/node_modules/node-gyp/node_modules/minipass": { + "version": "3.3.6", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npm/node_modules/node-gyp/node_modules/minipass-fetch": { + "version": "2.1.2", + "dev": true, + "inBundle": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.1.6", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, "node_modules/npm/node_modules/node-gyp/node_modules/nopt": { "version": "6.0.0", "dev": true, @@ -12419,25 +12995,47 @@ "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/npm/node_modules/node-gyp/node_modules/readable-stream": { - "version": "3.6.2", + "node_modules/npm/node_modules/node-gyp/node_modules/signal-exit": { + "version": "3.0.7", "dev": true, "inBundle": true, - "license": "MIT", + "license": "ISC" + }, + "node_modules/npm/node_modules/node-gyp/node_modules/ssri": { + "version": "9.0.1", + "dev": true, + "inBundle": true, + "license": "ISC", "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" + "minipass": "^3.1.1" }, "engines": { - "node": ">= 6" + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" } }, - "node_modules/npm/node_modules/node-gyp/node_modules/signal-exit": { - "version": "3.0.7", + "node_modules/npm/node_modules/node-gyp/node_modules/unique-filename": { + "version": "2.0.1", "dev": true, "inBundle": true, - "license": "ISC" + "license": "ISC", + "dependencies": { + "unique-slug": "^3.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm/node_modules/node-gyp/node_modules/unique-slug": { + "version": "3.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } }, "node_modules/npm/node_modules/node-gyp/node_modules/which": { "version": "2.0.2", @@ -12506,7 +13104,7 @@ } }, "node_modules/npm/node_modules/npm-install-checks": { - "version": "6.2.0", + "version": "6.3.0", "dev": true, "inBundle": true, "license": "BSD-2-Clause", @@ -12599,6 +13197,15 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, + "node_modules/npm/node_modules/npm-registry-fetch/node_modules/minipass": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, "node_modules/npm/node_modules/npm-user-validate": { "version": "2.0.0", "dev": true, @@ -12679,6 +13286,15 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, + "node_modules/npm/node_modules/pacote/node_modules/minipass": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, "node_modules/npm/node_modules/parse-conflict-json": { "version": "3.0.1", "dev": true, @@ -12712,13 +13328,13 @@ } }, "node_modules/npm/node_modules/path-scurry": { - "version": "1.9.2", + "version": "1.10.1", "dev": true, "inBundle": true, "license": "BlueOak-1.0.0", "dependencies": { - "lru-cache": "^9.1.1", - "minipass": "^5.0.0 || ^6.0.2" + "lru-cache": "^9.1.1 || ^10.0.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" }, "engines": { "node": ">=16 || 14 >=14.17" @@ -12728,7 +13344,7 @@ } }, "node_modules/npm/node_modules/path-scurry/node_modules/lru-cache": { - "version": "9.1.1", + "version": "10.2.0", "dev": true, "inBundle": true, "license": "ISC", @@ -12737,7 +13353,7 @@ } }, "node_modules/npm/node_modules/postcss-selector-parser": { - "version": "6.0.13", + "version": "6.0.15", "dev": true, "inBundle": true, "license": "MIT", @@ -12758,15 +13374,6 @@ "node": "^14.17.0 || ^16.13.0 || >=18.0.0" } }, - "node_modules/npm/node_modules/process": { - "version": "0.11.10", - "dev": true, - "inBundle": true, - "license": "MIT", - "engines": { - "node": ">= 0.6.0" - } - }, "node_modules/npm/node_modules/promise-all-reject-late": { "version": "1.0.1", "dev": true, @@ -12874,18 +13481,17 @@ } }, "node_modules/npm/node_modules/readable-stream": { - "version": "4.4.0", + "version": "3.6.2", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "abort-controller": "^3.0.0", - "buffer": "^6.0.3", - "events": "^3.3.0", - "process": "^0.11.10" + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" }, "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + "node": ">= 6" } }, "node_modules/npm/node_modules/retry": { @@ -12982,7 +13588,7 @@ "optional": true }, "node_modules/npm/node_modules/semver": { - "version": "7.5.4", + "version": "7.6.0", "dev": true, "inBundle": true, "license": "ISC", @@ -13036,7 +13642,7 @@ } }, "node_modules/npm/node_modules/signal-exit": { - "version": "4.0.2", + "version": "4.1.0", "dev": true, "inBundle": true, "license": "ISC", @@ -13077,16 +13683,16 @@ } }, "node_modules/npm/node_modules/socks": { - "version": "2.7.1", + "version": "2.8.1", "dev": true, "inBundle": true, "license": "MIT", "dependencies": { - "ip": "^2.0.0", + "ip-address": "^9.0.5", "smart-buffer": "^4.2.0" }, "engines": { - "node": ">= 10.13.0", + "node": ">= 10.0.0", "npm": ">= 3.0.0" } }, @@ -13115,7 +13721,7 @@ } }, "node_modules/npm/node_modules/spdx-exceptions": { - "version": "2.3.0", + "version": "2.5.0", "dev": true, "inBundle": true, "license": "CC-BY-3.0" @@ -13131,18 +13737,18 @@ } }, "node_modules/npm/node_modules/spdx-license-ids": { - "version": "3.0.13", + "version": "3.0.17", "dev": true, "inBundle": true, "license": "CC0-1.0" }, "node_modules/npm/node_modules/ssri": { - "version": "10.0.4", + "version": "10.0.5", "dev": true, "inBundle": true, "license": "ISC", "dependencies": { - "minipass": "^5.0.0" + "minipass": "^7.0.3" }, "engines": { "node": "^14.17.0 || ^16.13.0 || >=18.0.0" @@ -13224,7 +13830,7 @@ } }, "node_modules/npm/node_modules/tar": { - "version": "6.1.15", + "version": "6.2.0", "dev": true, "inBundle": true, "license": "ISC", @@ -13264,6 +13870,15 @@ "node": ">=8" } }, + "node_modules/npm/node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "dev": true, + "inBundle": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, "node_modules/npm/node_modules/text-table": { "version": "0.2.0", "dev": true, @@ -13524,9 +14139,9 @@ } }, "node_modules/nwsapi": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.2.tgz", - "integrity": "sha512-90yv+6538zuvUMnN+zCr8LuV6bPFdq50304114vJYJ8RDyK8D5O9Phpbd6SZWgI7PwzmmfN1upeOJlvybDSgCw==", + "version": "2.2.9", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.9.tgz", + "integrity": "sha512-2f3F0SEEer8bBu0dsNCFF50N0cTThV1nWFYcEYFZttdW0lDAoybv9cQoK7X7/68Z89S7FoRrVjP1LPX4XRf9vg==", "dev": true }, "node_modules/object-assign": { @@ -13547,26 +14162,10 @@ } }, "node_modules/object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", - "dev": true, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", "dev": true, - "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - }, - "engines": { - "node": ">= 0.4" - }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -13581,13 +14180,13 @@ } }, "node_modules/object.assign": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", - "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.5.tgz", + "integrity": "sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", + "call-bind": "^1.0.5", + "define-properties": "^1.2.1", "has-symbols": "^1.0.3", "object-keys": "^1.1.1" }, @@ -13599,28 +14198,29 @@ } }, "node_modules/object.entries": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.8.tgz", + "integrity": "sha512-cmopxi8VwRIAw/fkijJohSfpef5PdN0pMQJN6VC/ZKvn0LIknWD8KtgY6KlQdEc4tIjcQ3HxSMmnvtzIscdaYQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" } }, "node_modules/object.fromentries": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz", - "integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==", + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.8.tgz", + "integrity": "sha512-k6E21FzySsSK5a21KRADBd/NGneRegFO5pLHfdQLpRDETUNJueLXs3WCzyQ3tFRDYgbq3KHGXfTbi2bs8WQ6rQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -13629,28 +14229,46 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/object.groupby": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/object.groupby/-/object.groupby-1.0.3.tgz", + "integrity": "sha512-+Lhy3TQTuzXI5hevh8sBGqbmurHbbIjAi0Z4S63nthVLmLxfbj4T54a4CfZrXIrt9iP4mVAPYMo/v99taj3wjQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/object.hasown": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.2.tgz", - "integrity": "sha512-B5UIT3J1W+WuWIU55h0mjlwaqxiE5vYENJXIXZ4VFe05pNYrkKuK0U/6aFcb0pKywYJh7IhfoqUfKVmrJJHZHw==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.4.tgz", + "integrity": "sha512-FZ9LZt9/RHzGySlBARE3VF+gE26TxR38SdmqOqliuTnl9wrKulaQs+4dee1V+Io8VfxqzAfHu6YuRgUy8OHoTg==", "dev": true, "dependencies": { - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/object.values": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.2.0.tgz", + "integrity": "sha512-yBYjY9QX2hnRmZHAjG/f13MzmBzxzYgQhFrke06TTyKY5zSTEqkOeukBzIdVA3j3ulu8Qa3MbVFShV7T2RmGtQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -13697,17 +14315,17 @@ } }, "node_modules/optionator": { - "version": "0.8.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", - "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", "dev": true, "dependencies": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.6", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "word-wrap": "~1.2.3" + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" }, "engines": { "node": ">= 0.8.0" @@ -13746,36 +14364,39 @@ } }, "node_modules/outvariant": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.0.tgz", - "integrity": "sha512-AlWY719RF02ujitly7Kk/0QlV+pXGFDHrHf9O2OKqyqgBieaPOIeuSkL8sRK6j2WK+/ZAURq2kZsY0d8JapUiw==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.2.tgz", + "integrity": "sha512-Ou3dJ6bA/UJ5GVHxah4LnqDwZRwAmWxrG3wtrHrbGnP4RnLCtA64A4F+ae7Y8ww660JaddSoArUR5HjipWSHAQ==", "dev": true }, "node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", "dev": true, "dependencies": { - "p-try": "^2.0.0" + "yocto-queue": "^0.1.0" }, "engines": { - "node": ">=6" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", "dev": true, "dependencies": { - "p-limit": "^2.2.0" + "p-limit": "^3.0.2" }, "engines": { - "node": ">=8" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/p-try": { @@ -13867,10 +14488,35 @@ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", "dev": true }, + "node_modules/path-scurry": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.2.tgz", + "integrity": "sha512-7xTavNy5RQXnsjANvVvMkEjvloOinkAjv/Z6Ildz9v2RinZ4SBKTWFOVRbaF8p0vpHnyjV/UwNDdKuUv6M5qcA==", + "dev": true, + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.2.tgz", + "integrity": "sha512-9hp3Vp2/hFQUiIwKo8XCeFVnrg8Pk3TYNPIR7tJADKi5YfcF7vEaK7avFHTlSy3kOKYaJQaalfEo6YuXdceBOQ==", + "dev": true, + "engines": { + "node": "14 || >=16.14" + } + }, "node_modules/path-to-regexp": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", - "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==", + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.2.tgz", + "integrity": "sha512-GQX3SSMokngb36+whdpRXE+3f9V8UzyAorlYvOGx87ufGHehNTn5lCxrKtLyZ4Yl/wEKnNnr98ZzOwwDZV5ogw==", "dev": true }, "node_modules/path-type": { @@ -13883,9 +14529,9 @@ } }, "node_modules/pathe": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.0.tgz", - "integrity": "sha512-ODbEPR0KKHqECXW1GoxdDb+AZvULmXjVPy4rt+pGo2+TnjJTIPJQSVS6N63n8T2Ip+syHhbn52OewKicV0373w==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", "dev": true }, "node_modules/pathval": { @@ -13923,29 +14569,38 @@ } }, "node_modules/pirates": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz", - "integrity": "sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==", + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", "dev": true, "engines": { "node": ">= 6" } }, "node_modules/pkg-types": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.0.2.tgz", - "integrity": "sha512-hM58GKXOcj8WTqUXnsQyJYXdeAPbythQgEF3nTcEo+nkD49chjQ9IKm/QJy9xf6JakXptz86h7ecP2024rrLaQ==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.1.0.tgz", + "integrity": "sha512-/RpmvKdxKf8uILTtoOhAgf30wYbP2Qw+L9p3Rvshx1JZVX+XQNZQFjlbmGHEGIm4CkVPlSn+NXmIM8+9oWQaSA==", "dev": true, "dependencies": { - "jsonc-parser": "^3.2.0", - "mlly": "^1.1.1", - "pathe": "^1.1.0" + "confbox": "^0.1.7", + "mlly": "^1.6.1", + "pathe": "^1.1.2" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz", + "integrity": "sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==", + "dev": true, + "engines": { + "node": ">= 0.4" } }, "node_modules/postcss": { - "version": "8.4.31", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", - "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", + "version": "8.4.38", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.38.tgz", + "integrity": "sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A==", "funding": [ { "type": "opencollective", @@ -13961,28 +14616,34 @@ } ], "dependencies": { - "nanoid": "^3.3.6", + "nanoid": "^3.3.7", "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" + "source-map-js": "^1.2.0" }, "engines": { "node": "^10 || ^12 || >=14" } }, "node_modules/postcss-attribute-case-insensitive": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-6.0.2.tgz", - "integrity": "sha512-IRuCwwAAQbgaLhxQdQcIIK0dCVXg3XDUnzgKD8iwdiYdwU4rMWRWyl/W9/0nA4ihVpq5pyALiHB2veBJ0292pw==", + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-6.0.3.tgz", + "integrity": "sha512-KHkmCILThWBRtg+Jn1owTnHPnFit4OkqS+eKiGEOPIGke54DCeYGJ6r0Fx/HjfE9M9kznApCLcU0DvnPchazMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "postcss-selector-parser": "^6.0.10" + "postcss-selector-parser": "^6.0.13" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -14002,37 +14663,51 @@ } }, "node_modules/postcss-color-functional-notation": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/postcss-color-functional-notation/-/postcss-color-functional-notation-5.0.2.tgz", - "integrity": "sha512-M6ygxWOyd6eWf3sd1Lv8xi4SeF4iBPfJvkfMU4ITh8ExJc1qhbvh/U8Cv/uOvBgUVOMDdScvCdlg8+hREQzs7w==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/postcss-color-functional-notation/-/postcss-color-functional-notation-5.1.0.tgz", + "integrity": "sha512-w2R4py6zrVE1U7FwNaAc76tNQlG9GLkrBbcFw+VhUjyDDiV28vfZG+l4LyPmpoQpeSJVtu8VgNjE8Jv5SpC7dQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { + "@csstools/postcss-progressive-custom-properties": "^2.3.0", "postcss-value-parser": "^4.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/postcss-color-hex-alpha": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/postcss-color-hex-alpha/-/postcss-color-hex-alpha-9.0.2.tgz", - "integrity": "sha512-SfPjgr//VQ/DOCf80STIAsdAs7sbIbxATvVmd+Ec7JvR8onz9pjawhq3BJM3Pie40EE3TyB0P6hft16D33Nlyg==", + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/postcss-color-hex-alpha/-/postcss-color-hex-alpha-9.0.4.tgz", + "integrity": "sha512-XQZm4q4fNFqVCYMGPiBjcqDhuG7Ey2xrl99AnDJMyr5eDASsAGalndVgHZF8i97VFNy1GQeZc4q2ydagGmhelQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { + "@csstools/utilities": "^1.0.0", "postcss-value-parser": "^4.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -14056,64 +14731,83 @@ } }, "node_modules/postcss-custom-media": { - "version": "9.1.2", - "resolved": "https://registry.npmjs.org/postcss-custom-media/-/postcss-custom-media-9.1.2.tgz", - "integrity": "sha512-osM9g4UKq4XKimAC7RAXroqi3BXpxfwTswAJQiZdrBjWGFGEyxQrY5H2eDWI8F+MEvEUfYDxA8scqi3QWROCSw==", + "version": "9.1.5", + "resolved": "https://registry.npmjs.org/postcss-custom-media/-/postcss-custom-media-9.1.5.tgz", + "integrity": "sha512-GStyWMz7Qbo/Gtw1xVspzVSX8eipgNg4lpsO3CAeY4/A1mzok+RV6MCv3fg62trWijh/lYEj6vps4o8JcBBpDA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/cascade-layer-name-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0", - "@csstools/media-query-list-parser": "^2.0.0" + "@csstools/cascade-layer-name-parser": "^1.0.2", + "@csstools/css-parser-algorithms": "^2.2.0", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/media-query-list-parser": "^2.1.1" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/postcss-custom-properties": { - "version": "13.1.4", - "resolved": "https://registry.npmjs.org/postcss-custom-properties/-/postcss-custom-properties-13.1.4.tgz", - "integrity": "sha512-iSAdaZrM3KMec8cOSzeTUNXPYDlhqsMJHpt62yrjwG6nAnMtRHPk5JdMzGosBJtqEahDolvD5LNbcq+EZ78o5g==", + "version": "13.3.8", + "resolved": "https://registry.npmjs.org/postcss-custom-properties/-/postcss-custom-properties-13.3.8.tgz", + "integrity": "sha512-OP9yj4yXxYOiW2n2TRpnE7C0yePvBiZb72S22mZVNzZEObdTYFjNaX6oZO4R4E8Ie9RmC/Jxw8EKYSbLrC1EFA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/cascade-layer-name-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0", + "@csstools/cascade-layer-name-parser": "^1.0.9", + "@csstools/css-parser-algorithms": "^2.6.1", + "@csstools/css-tokenizer": "^2.2.4", + "@csstools/utilities": "^1.0.0", "postcss-value-parser": "^4.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/postcss-custom-selectors": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/postcss-custom-selectors/-/postcss-custom-selectors-7.1.2.tgz", - "integrity": "sha512-jX7VlE3jrgfBIOfxiGNRFq81xUoHSZhvxhQurzE7ZFRv+bUmMwB7/XnA0nNlts2CwNtbXm4Ozy0ZAYKHlCRmBQ==", + "version": "7.1.8", + "resolved": "https://registry.npmjs.org/postcss-custom-selectors/-/postcss-custom-selectors-7.1.8.tgz", + "integrity": "sha512-fqDkGSEsO7+oQaqdRdR8nwwqH+N2uk6LE/2g4myVJJYz/Ly418lHKEleKTdV/GzjBjFcG4n0dbfuH/Pd2BE8YA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/cascade-layer-name-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0", - "postcss-selector-parser": "^6.0.4" + "@csstools/cascade-layer-name-parser": "^1.0.9", + "@csstools/css-parser-algorithms": "^2.6.1", + "@csstools/css-tokenizer": "^2.2.4", + "postcss-selector-parser": "^6.0.13" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -14137,20 +14831,26 @@ } }, "node_modules/postcss-double-position-gradients": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-double-position-gradients/-/postcss-double-position-gradients-4.0.2.tgz", - "integrity": "sha512-GXL1RmFREDK4Q9aYvI2RhVrA6a6qqSMQQ5ke8gSH1xgV6exsqbcJpIumC7AOgooH6/WIG3/K/T8xxAiVHy/tJg==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/postcss-double-position-gradients/-/postcss-double-position-gradients-4.0.4.tgz", + "integrity": "sha512-nUAbUXURemLXIrl4Xoia2tiu5z/n8sY+BVDZApoeT9BlpByyrp02P/lFCRrRvZ/zrGRE+MOGLhk8o7VcMCtPtQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/postcss-progressive-custom-properties": "^2.0.0", + "@csstools/postcss-progressive-custom-properties": "^2.3.0", "postcss-value-parser": "^4.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -14241,9 +14941,9 @@ } }, "node_modules/postcss-import": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-14.1.0.tgz", - "integrity": "sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw==", + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", "dev": true, "dependencies": { "postcss-value-parser": "^4.0.0", @@ -14251,7 +14951,7 @@ "resolve": "^1.1.7" }, "engines": { - "node": ">=10.0.0" + "node": ">=14.0.0" }, "peerDependencies": { "postcss": "^8.0.0" @@ -14285,41 +14985,53 @@ } }, "node_modules/postcss-lab-function": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-lab-function/-/postcss-lab-function-5.2.0.tgz", - "integrity": "sha512-ie/k0xFCib22LV56jZoygLuWfM4J4migb89QnEXOjORGh6UwsDVSPW/x+P2MYS+AKFfZ5Npcu5HYEzYcezAAag==", + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/postcss-lab-function/-/postcss-lab-function-5.2.3.tgz", + "integrity": "sha512-fi32AYKzji5/rvgxo5zXHFvAYBw0u0OzELbeCNjEZVLUir18Oj+9RmNphtM8QdLUaUnrfx8zy8vVYLmFLkdmrQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" + "@csstools/css-color-parser": "^1.2.0", + "@csstools/css-parser-algorithms": "^2.1.1", + "@csstools/css-tokenizer": "^2.1.1", + "@csstools/postcss-progressive-custom-properties": "^2.3.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/postcss-load-config": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-3.1.4.tgz", - "integrity": "sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz", + "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==", "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "dependencies": { - "lilconfig": "^2.0.5", - "yaml": "^1.10.2" + "lilconfig": "^3.0.0", + "yaml": "^2.3.4" }, "engines": { - "node": ">= 10" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" + "node": ">= 14" }, "peerDependencies": { "postcss": ">=8.0.9", @@ -14334,20 +15046,50 @@ } } }, + "node_modules/postcss-load-config/node_modules/lilconfig": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.1.tgz", + "integrity": "sha512-O18pf7nyvHTckunPWCV1XUNXU1piu01y2b7ATJ0ppkUkk8ocqVWBrYjJBCwHDjD/ZWcfyrA0P4gKhzWGi5EINQ==", + "dev": true, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/postcss-load-config/node_modules/yaml": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.4.2.tgz", + "integrity": "sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA==", + "dev": true, + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14" + } + }, "node_modules/postcss-logical": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-logical/-/postcss-logical-6.1.0.tgz", - "integrity": "sha512-qb1+LpClhYjxac8SfOcWotnY3unKZesDqIOm+jnGt8rTl7xaIWpE2bPGZHxflOip1E/4ETo79qlJyRL3yrHn1g==", + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-logical/-/postcss-logical-6.2.0.tgz", + "integrity": "sha512-aqlfKGaY0nnbgI9jwUikp4gJKBqcH5noU/EdnIVceghaaDPYhZuyJVxlvWNy55tlTG5tunRKCTAX9yljLiFgmw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { "postcss-value-parser": "^4.2.0" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -14359,12 +15101,12 @@ "dev": true }, "node_modules/postcss-nested": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.0.tgz", - "integrity": "sha512-0DkamqrPcmkBDsLn+vQDIrtkSbNkv5AD/M322ySo9kqFkCIYklym2xEmWkwo+Y3/qZo34tzEPNUw4y7yMCdv5w==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", + "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", "dev": true, "dependencies": { - "postcss-selector-parser": "^6.0.10" + "postcss-selector-parser": "^6.0.11" }, "engines": { "node": ">=12.0" @@ -14378,9 +15120,19 @@ } }, "node_modules/postcss-nesting": { - "version": "11.2.2", - "resolved": "https://registry.npmjs.org/postcss-nesting/-/postcss-nesting-11.2.2.tgz", - "integrity": "sha512-aOTiUniAB1bcPE6GGiynWRa6PZFPhOTAm5q3q5cem6QeSijIHHkWr6gs65ukCZMXeak8yXeZVbBJET3VM+HlhA==", + "version": "11.3.0", + "resolved": "https://registry.npmjs.org/postcss-nesting/-/postcss-nesting-11.3.0.tgz", + "integrity": "sha512-JlS10AQm/RzyrUGgl5irVkAlZYTJ99mNueUl+Qab+TcHhVedLiylWVkKBhRale+rS9yWIJK48JVzQlq3LcSdeA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { "@csstools/selector-specificity": "^2.0.0", "postcss-selector-parser": "^6.0.10" @@ -14388,10 +15140,6 @@ "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -14462,57 +15210,68 @@ } }, "node_modules/postcss-preset-env": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/postcss-preset-env/-/postcss-preset-env-8.3.0.tgz", - "integrity": "sha512-VFc/bhwRo37RoTVzCTCKDJLw0lwsqLRCTc7dkJkfs9S7XXfTbk7QkhbMWHd2L+iZsAsE5yqdSRBZ41/Q828TbA==", + "version": "8.5.1", + "resolved": "https://registry.npmjs.org/postcss-preset-env/-/postcss-preset-env-8.5.1.tgz", + "integrity": "sha512-qhWnJJjP6ArLUINWJ38t6Aftxnv9NW6cXK0NuwcLCcRilbuw72dSFLkCVUJeCfHGgJiKzX+pnhkGiki0PEynWg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { "@csstools/postcss-cascade-layers": "^3.0.1", - "@csstools/postcss-color-function": "^2.1.0", - "@csstools/postcss-color-mix-function": "^1.0.0", + "@csstools/postcss-color-function": "^2.2.3", + "@csstools/postcss-color-mix-function": "^1.0.3", "@csstools/postcss-font-format-keywords": "^2.0.2", - "@csstools/postcss-gradients-interpolation-method": "^3.0.1", - "@csstools/postcss-hwb-function": "^2.2.0", - "@csstools/postcss-ic-unit": "^2.0.2", - "@csstools/postcss-is-pseudo-class": "^3.1.1", + "@csstools/postcss-gradients-interpolation-method": "^3.0.6", + "@csstools/postcss-hwb-function": "^2.2.2", + "@csstools/postcss-ic-unit": "^2.0.4", + "@csstools/postcss-is-pseudo-class": "^3.2.1", "@csstools/postcss-logical-float-and-clear": "^1.0.1", "@csstools/postcss-logical-resize": "^1.0.1", - "@csstools/postcss-logical-viewport-units": "^1.0.2", - "@csstools/postcss-media-minmax": "^1.0.0", - "@csstools/postcss-media-queries-aspect-ratio-number-values": "^1.0.1", + "@csstools/postcss-logical-viewport-units": "^1.0.3", + "@csstools/postcss-media-minmax": "^1.0.4", + "@csstools/postcss-media-queries-aspect-ratio-number-values": "^1.0.4", "@csstools/postcss-nested-calc": "^2.0.2", "@csstools/postcss-normalize-display-values": "^2.0.1", - "@csstools/postcss-oklab-function": "^2.2.0", - "@csstools/postcss-progressive-custom-properties": "^2.1.0", + "@csstools/postcss-oklab-function": "^2.2.3", + "@csstools/postcss-progressive-custom-properties": "^2.3.0", + "@csstools/postcss-relative-color-syntax": "^1.0.2", "@csstools/postcss-scope-pseudo-class": "^2.0.2", - "@csstools/postcss-stepped-value-functions": "^2.1.0", - "@csstools/postcss-text-decoration-shorthand": "^2.2.1", - "@csstools/postcss-trigonometric-functions": "^2.1.0", + "@csstools/postcss-stepped-value-functions": "^2.1.1", + "@csstools/postcss-text-decoration-shorthand": "^2.2.4", + "@csstools/postcss-trigonometric-functions": "^2.1.1", "@csstools/postcss-unset-value": "^2.0.1", "autoprefixer": "^10.4.14", - "browserslist": "^4.21.5", + "browserslist": "^4.21.9", "css-blank-pseudo": "^5.0.2", "css-has-pseudo": "^5.0.2", "css-prefers-color-scheme": "^8.0.2", - "cssdb": "^7.5.3", + "cssdb": "^7.6.0", "postcss-attribute-case-insensitive": "^6.0.2", "postcss-clamp": "^4.1.0", - "postcss-color-functional-notation": "^5.0.2", + "postcss-color-functional-notation": "^5.1.0", "postcss-color-hex-alpha": "^9.0.2", "postcss-color-rebeccapurple": "^8.0.2", - "postcss-custom-media": "^9.1.2", - "postcss-custom-properties": "^13.1.4", - "postcss-custom-selectors": "^7.1.2", + "postcss-custom-media": "^9.1.5", + "postcss-custom-properties": "^13.2.0", + "postcss-custom-selectors": "^7.1.3", "postcss-dir-pseudo-class": "^7.0.2", - "postcss-double-position-gradients": "^4.0.2", + "postcss-double-position-gradients": "^4.0.4", "postcss-focus-visible": "^8.0.2", "postcss-focus-within": "^7.0.2", "postcss-font-variant": "^5.0.0", "postcss-gap-properties": "^4.0.1", "postcss-image-set-function": "^5.0.2", "postcss-initial": "^4.0.1", - "postcss-lab-function": "^5.2.0", - "postcss-logical": "^6.1.0", - "postcss-nesting": "^11.2.1", + "postcss-lab-function": "^5.2.3", + "postcss-logical": "^6.2.0", + "postcss-nesting": "^11.3.0", "postcss-opacity-percentage": "^2.0.0", "postcss-overflow-shorthand": "^4.0.1", "postcss-page-break": "^3.0.4", @@ -14525,10 +15284,6 @@ "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } @@ -14582,9 +15337,9 @@ } }, "node_modules/postcss-scss": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/postcss-scss/-/postcss-scss-4.0.6.tgz", - "integrity": "sha512-rLDPhJY4z/i4nVFZ27j9GqLxj1pwxE80eAzUNRMXtcpipFYIeowerzBgG3yJhMtObGEXidtIgbUpQ3eLDsf5OQ==", + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/postcss-scss/-/postcss-scss-4.0.9.tgz", + "integrity": "sha512-AjKOeiwAitL/MXxQW2DliT28EKukvvbEWx3LBmJIRN8KfBGZbRTxNYW0kSqi1COiTZ57nZ9NW06S6ux//N1c9A==", "dev": true, "funding": [ { @@ -14594,37 +15349,47 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/postcss-scss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "engines": { "node": ">=12.0" }, "peerDependencies": { - "postcss": "^8.4.19" + "postcss": "^8.4.29" } }, "node_modules/postcss-selector-not": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-selector-not/-/postcss-selector-not-7.0.1.tgz", - "integrity": "sha512-1zT5C27b/zeJhchN7fP0kBr16Cc61mu7Si9uWWLoA3Px/D9tIJPKchJCkUH3tPO5D0pCFmGeApAv8XpXBQJ8SQ==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/postcss-selector-not/-/postcss-selector-not-7.0.2.tgz", + "integrity": "sha512-/SSxf/90Obye49VZIfc0ls4H0P6i6V1iHv0pzZH8SdgvZOPFkF37ef1r5cyWcMflJSFJ5bfuoluTnFnBBFiuSA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], "dependencies": { - "postcss-selector-parser": "^6.0.10" + "postcss-selector-parser": "^6.0.13" }, "engines": { "node": "^14 || ^16 || >=18" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/csstools" - }, "peerDependencies": { "postcss": "^8.4" } }, "node_modules/postcss-selector-parser": { - "version": "6.0.11", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.11.tgz", - "integrity": "sha512-zbARubNdogI9j7WY4nQJBiNqQf3sLS3wCP4WfOidu+p28LofJqDH1tcXypGrcmMHhDk2t9wGhCsYe/+szLTy1g==", + "version": "6.0.16", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.16.tgz", + "integrity": "sha512-A0RVJrX+IUkVZbW3ClroRWurercFhieevHB38sr2+l9eUClMqome3LmEmnhlNy+5Mr2EYN6B2Kaw9wYdd+VHiw==", "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -14639,18 +15404,18 @@ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" }, "node_modules/prelude-ls": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", "dev": true, "engines": { "node": ">= 0.8.0" } }, "node_modules/prettier": { - "version": "2.8.7", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.7.tgz", - "integrity": "sha512-yPngTo3aXUUmyuTjeTUT75txrf+aMh9FiD7q9ZE/i6r0bPb22g4FsE6Y338PQX1bmfy08i9QQCB7/rcUAVntfw==", + "version": "2.8.8", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", + "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", "dev": true, "bin": { "prettier": "bin-prettier.js" @@ -14704,9 +15469,9 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" }, "node_modules/protobufjs": { - "version": "7.2.5", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.5.tgz", - "integrity": "sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A==", + "version": "7.2.6", + "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.6.tgz", + "integrity": "sha512-dgJaEDDL6x8ASUZ1YqWciTRrdOuYNzoOf27oHNfdyvKqHr5i0FV7FSLU+aIeFjyFgVxrpTOtQUi0BLLBymZaBw==", "dev": true, "hasInstallScript": true, "peer": true, @@ -14729,9 +15494,9 @@ } }, "node_modules/protobufjs-cli": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/protobufjs-cli/-/protobufjs-cli-1.1.1.tgz", - "integrity": "sha512-VPWMgIcRNyQwWUv8OLPyGQ/0lQY/QTQAVN5fh+XzfDwsVw1FZ2L3DM/bcBf8WPiRz2tNpaov9lPZfNcmNo6LXA==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/protobufjs-cli/-/protobufjs-cli-1.1.2.tgz", + "integrity": "sha512-8ivXWxT39gZN4mm4ArQyJrRgnIwZqffBWoLDsE21TmMcKI3XwJMV4lEF2WU02C4JAtgYYc2SfJIltelD8to35g==", "dev": true, "dependencies": { "chalk": "^4.0.0", @@ -14787,6 +15552,19 @@ "node": ">=4.0" } }, + "node_modules/protobufjs-cli/node_modules/levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==", + "dev": true, + "dependencies": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/protobufjs-cli/node_modules/lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", @@ -14799,10 +15577,36 @@ "node": ">=10" } }, + "node_modules/protobufjs-cli/node_modules/optionator": { + "version": "0.8.3", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", + "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", + "dev": true, + "dependencies": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.6", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "word-wrap": "~1.2.3" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/protobufjs-cli/node_modules/prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==", + "dev": true, + "engines": { + "node": ">= 0.8.0" + } + }, "node_modules/protobufjs-cli/node_modules/semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", + "version": "7.6.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", + "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", "dev": true, "dependencies": { "lru-cache": "^6.0.0" @@ -14815,15 +15619,24 @@ } }, "node_modules/protobufjs-cli/node_modules/tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.3.tgz", + "integrity": "sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==", + "dev": true, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/protobufjs-cli/node_modules/type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==", "dev": true, "dependencies": { - "rimraf": "^3.0.0" + "prelude-ls": "~1.1.2" }, "engines": { - "node": ">=8.17.0" + "node": ">= 0.8.0" } }, "node_modules/protobufjs-cli/node_modules/yallist": { @@ -14929,6 +15742,15 @@ "rc": "cli.js" } }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/react": { "version": "17.0.2", "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", @@ -15024,9 +15846,9 @@ } }, "node_modules/react-refresh": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", - "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", + "version": "0.14.2", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.2.tgz", + "integrity": "sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==", "dev": true, "engines": { "node": ">=0.10.0" @@ -15174,6 +15996,58 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/read-pkg-up/node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up/node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg-up/node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/read-pkg-up/node_modules/type-fest": { "version": "0.8.1", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", @@ -15257,6 +16131,27 @@ "node": ">=8" } }, + "node_modules/reflect.getprototypeof": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.6.tgz", + "integrity": "sha512-fmfw4XgoDke3kdI6h4xcUz1dG8uaiv5q9gcEwLS4Pnth2kxT+GZ7YehS1JTMGBQmtV7Y4GFGbs2re2NqhdozUg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.1", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "globalthis": "^1.0.3", + "which-builtin-type": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/regenerate": { "version": "1.4.2", "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", @@ -15264,9 +16159,9 @@ "dev": true }, "node_modules/regenerate-unicode-properties": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz", - "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==", + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz", + "integrity": "sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q==", "dev": true, "dependencies": { "regenerate": "^1.4.2" @@ -15276,28 +16171,29 @@ } }, "node_modules/regenerator-runtime": { - "version": "0.13.11", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", - "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + "version": "0.14.1", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==" }, "node_modules/regenerator-transform": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz", - "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==", + "version": "0.15.2", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", "dev": true, "dependencies": { "@babel/runtime": "^7.8.4" } }, "node_modules/regexp.prototype.flags": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz", - "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==", + "version": "1.5.2", + "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz", + "integrity": "sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "functions-have-names": "^1.2.2" + "call-bind": "^1.0.6", + "define-properties": "^1.2.1", + "es-errors": "^1.3.0", + "set-function-name": "^2.0.1" }, "engines": { "node": ">= 0.4" @@ -15367,9 +16263,9 @@ } }, "node_modules/remove-accents": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/remove-accents/-/remove-accents-0.4.2.tgz", - "integrity": "sha512-7pXIJqJOq5tFgG1A2Zxti3Ht8jJF337m4sowbuHsW30ZnkQFnDzy9qBNhgzX8ZLW4+UBcXiiR7SwR6pokHsxiA==" + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/remove-accents/-/remove-accents-0.5.0.tgz", + "integrity": "sha512-8g3/Otx1eJaVD12e31UbJj1YzdtVvzH85HV7t+9MJYk/u3XmkOUJ5Ys9wQrf9PCPK8+xn4ymzqYCiZl6QWKn+A==" }, "node_modules/require-directory": { "version": "2.1.1", @@ -15405,12 +16301,12 @@ } }, "node_modules/resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", + "version": "1.22.8", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz", + "integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==", "dev": true, "dependencies": { - "is-core-module": "^2.9.0", + "is-core-module": "^2.13.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -15422,12 +16318,12 @@ } }, "node_modules/resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", "dev": true, "engines": { - "node": ">=8" + "node": ">=4" } }, "node_modules/resolve-pathname": { @@ -15472,15 +16368,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/rimraf/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/rimraf/node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", @@ -15500,21 +16387,10 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/rimraf/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, "node_modules/rollup": { - "version": "3.20.2", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.20.2.tgz", - "integrity": "sha512-3zwkBQl7Ai7MFYQE0y1MeQ15+9jsi7XxfrqwTb/9EK8D9C9+//EBR4M+CuA1KODRaNbFez/lWxA5vhEGZp4MUg==", + "version": "3.29.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz", + "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==", "dev": true, "bin": { "rollup": "dist/bin/rollup" @@ -15566,14 +16442,32 @@ } }, "node_modules/rxjs": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.0.tgz", - "integrity": "sha512-F2+gxDshqmIub1KdvZkaEfGDwLNpPvk9Fs6LD/MyQxNgMds/WH9OdDDXOmxUZpME+iSK3rQCctkL0DYyytUqMg==", + "version": "7.8.1", + "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz", + "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==", "dev": true, "dependencies": { "tslib": "^2.1.0" } }, + "node_modules/safe-array-concat": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz", + "integrity": "sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "get-intrinsic": "^1.2.4", + "has-symbols": "^1.0.3", + "isarray": "^2.0.5" + }, + "engines": { + "node": ">=0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/safe-buffer": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", @@ -15581,15 +16475,18 @@ "dev": true }, "node_modules/safe-regex-test": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz", - "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.3.tgz", + "integrity": "sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", + "call-bind": "^1.0.6", + "es-errors": "^1.3.0", "is-regex": "^1.1.4" }, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -15601,9 +16498,9 @@ "dev": true }, "node_modules/sass": { - "version": "1.60.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.60.0.tgz", - "integrity": "sha512-updbwW6fNb5gGm8qMXzVO7V4sWf7LMXnMly/JEyfbfERbVH46Fn6q02BX7/eHTdKpE7d+oTkMMQpFWNUMfFbgQ==", + "version": "1.75.0", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.75.0.tgz", + "integrity": "sha512-ShMYi3WkrDWxExyxSZPst4/okE9ts46xZmJDSawJQrnte7M1V9fScVB+uNXOVKRBt0PggHOwoZcn8mYX4trnBw==", "dependencies": { "chokidar": ">=3.0.0 <4.0.0", "immutable": "^4.0.0", @@ -15613,7 +16510,7 @@ "sass": "sass.js" }, "engines": { - "node": ">=12.0.0" + "node": ">=14.0.0" } }, "node_modules/saxes": { @@ -15647,13 +16544,13 @@ } }, "node_modules/serve": { - "version": "14.2.0", - "resolved": "https://registry.npmjs.org/serve/-/serve-14.2.0.tgz", - "integrity": "sha512-+HOw/XK1bW8tw5iBilBz/mJLWRzM8XM6MPxL4J/dKzdxq1vfdEWSwhaR7/yS8EJp5wzvP92p1qirysJvnEtjXg==", + "version": "14.2.3", + "resolved": "https://registry.npmjs.org/serve/-/serve-14.2.3.tgz", + "integrity": "sha512-VqUFMC7K3LDGeGnJM9h56D3XGKb6KGgOw0cVNtA26yYXHCcpxf3xwCTUaQoWlVS7i8Jdh3GjQkOB23qsXyjoyQ==", "dev": true, "dependencies": { - "@zeit/schemas": "2.29.0", - "ajv": "8.11.0", + "@zeit/schemas": "2.36.0", + "ajv": "8.12.0", "arg": "5.0.2", "boxen": "7.0.0", "chalk": "5.0.1", @@ -15687,16 +16584,6 @@ "range-parser": "1.2.0" } }, - "node_modules/serve-handler/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, "node_modules/serve-handler/node_modules/mime-db": { "version": "1.33.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", @@ -15718,24 +16605,28 @@ "node": ">= 0.6" } }, - "node_modules/serve-handler/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, "node_modules/serve-handler/node_modules/path-to-regexp": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==", "dev": true }, + "node_modules/serve/node_modules/ajv": { + "version": "8.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz", + "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, "node_modules/serve/node_modules/chalk": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.0.1.tgz", @@ -15748,12 +16639,50 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/serve/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, "node_modules/set-cookie-parser": { "version": "2.6.0", "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz", "integrity": "sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==", "dev": true }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dev": true, + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-name": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/set-function-name/-/set-function-name-2.0.2.tgz", + "integrity": "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==", + "dev": true, + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "functions-have-names": "^1.2.3", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -15776,14 +16705,18 @@ } }, "node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", "dev": true, "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -15836,9 +16769,9 @@ } }, "node_modules/source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", "engines": { "node": ">=0.10.0" } @@ -15854,9 +16787,9 @@ } }, "node_modules/spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==", "dev": true }, "node_modules/spdx-expression-parse": { @@ -15870,9 +16803,9 @@ } }, "node_modules/spdx-license-ids": { - "version": "3.0.13", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.13.tgz", - "integrity": "sha512-XkD+zwiqXHikFZm4AX/7JSCXA98U5Db4AFd5XUg/+9UNtnH75+Z9KxtpYiJZx36mUDVOwH83pl7yvCer6ewM3w==", + "version": "3.0.17", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.17.tgz", + "integrity": "sha512-sh8PWc/ftMqAAdFiBu6Fy6JUOYjqDJBJvIhpfDMyHrr0Rbp5liZqd4TjtQ/RgfLjKFZb+LMx5hpml5qOWy0qvg==", "dev": true }, "node_modules/split-on-first": { @@ -15931,23 +16864,11 @@ } }, "node_modules/std-env": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.3.2.tgz", - "integrity": "sha512-uUZI65yrV2Qva5gqE0+A7uVAvO40iPo6jGhs7s8keRfHCmtg+uB2X6EiLGCI9IgL1J17xGhvoOqSz79lzICPTA==", + "version": "3.7.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.7.0.tgz", + "integrity": "sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==", "dev": true }, - "node_modules/stop-iteration-iterator": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz", - "integrity": "sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ==", - "dev": true, - "dependencies": { - "internal-slot": "^1.0.4" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/strict-event-emitter": { "version": "0.2.8", "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.2.8.tgz", @@ -16014,34 +16935,69 @@ "node": ">=8" } }, - "node_modules/string.prototype.matchall": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.8.tgz", - "integrity": "sha512-6zOCOcJ+RJAQshcTvXPHoxoQGONa3e/Lqx90wUA+wEzX78sg5Bo+1tQo4N0pohS0erG9qtCqJDjNCQBjeWVxyg==", + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", - "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.4.3", - "side-channel": "^1.0.4" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/string-width/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true + }, + "node_modules/string.prototype.matchall": { + "version": "4.0.11", + "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.11.tgz", + "integrity": "sha512-NUdh0aDavY2og7IbBPenWqR9exH+E26Sv8e0/eTe1tltDGZL+GtBkDAnnyBtmekfK6/Dq3MkcGtzXFEd1LQrtg==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.2", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.0.0", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-symbols": "^1.0.3", + "internal-slot": "^1.0.7", + "regexp.prototype.flags": "^1.5.2", + "set-function-name": "^2.0.2", + "side-channel": "^1.0.6" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/string.prototype.trim": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz", - "integrity": "sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==", + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz", + "integrity": "sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-abstract": "^1.23.0", + "es-object-atoms": "^1.0.0" }, "engines": { "node": ">= 0.4" @@ -16051,28 +17007,31 @@ } }, "node_modules/string.prototype.trimend": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz", + "integrity": "sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/string.prototype.trimstart": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz", + "integrity": "sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" + "call-bind": "^1.0.7", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -16090,6 +17049,19 @@ "node": ">=8" } }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-bom": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", @@ -16121,21 +17093,24 @@ } }, "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", "dev": true, "engines": { - "node": ">=0.10.0" + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/strip-literal": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-1.0.1.tgz", - "integrity": "sha512-QZTsipNpa2Ppr6v1AmJHESqJ3Uz247MUS0OjrnnZjFAvEoWqxuyFuXn2xLgMtRnijJShAa1HL0gtJyUs7u7n3Q==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-1.3.0.tgz", + "integrity": "sha512-PugKzOsyXpArk0yWmUwqOZecSO0GH0bPoctLcqNDH9J04pVW3lflYE0ujElBGTloevcxF5MofAOZ7C5l2b+wLg==", "dev": true, "dependencies": { - "acorn": "^8.8.2" + "acorn": "^8.10.0" }, "funding": { "url": "https://github.com/sponsors/antfu" @@ -16268,12 +17243,11 @@ } }, "node_modules/stylelint-scss": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-4.6.0.tgz", - "integrity": "sha512-M+E0BQim6G4XEkaceEhfVjP/41C9Klg5/tTPTCQVlgw/jm2tvB+OXJGaU0TDP5rnTCB62aX6w+rT+gqJW/uwjA==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-4.7.0.tgz", + "integrity": "sha512-TSUgIeS0H3jqDZnby1UO1Qv3poi1N8wUYIJY6D1tuUq2MN3lwp/rITVo0wD+1SWTmRm0tNmGO0b7nKInnqF6Hg==", "dev": true, "dependencies": { - "dlv": "^1.1.3", "postcss-media-query-parser": "^0.2.3", "postcss-resolve-nested-selector": "^0.1.1", "postcss-selector-parser": "^6.0.11", @@ -16289,14 +17263,24 @@ "integrity": "sha512-1ugUSr8BHXRnK23KfuYS+gVMC3LB8QGH9W1iGtDPsNWoQbgtXSExkBu2aDR4epiGWZOjZsj6lDl/N/AqqTC3UA==", "dev": true }, + "node_modules/stylelint/node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "engines": { + "node": ">=8" + } + }, "node_modules/sucrase": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.31.0.tgz", - "integrity": "sha512-6QsHnkqyVEzYcaiHsOKkzOtOgdJcb8i54x6AV2hDwyZcY9ZyykGZVw6L/YN98xC0evwTP6utsWWrKRaa8QlfEQ==", + "version": "3.35.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", + "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", "dev": true, "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", "commander": "^4.0.0", - "glob": "7.1.6", + "glob": "^10.3.10", "lines-and-columns": "^1.1.6", "mz": "^2.7.0", "pirates": "^4.0.1", @@ -16307,17 +17291,16 @@ "sucrase-node": "bin/sucrase-node" }, "engines": { - "node": ">=8" + "node": ">=16 || 14 >=14.17" } }, "node_modules/sucrase/node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "dev": true, "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "balanced-match": "^1.0.0" } }, "node_modules/sucrase/node_modules/commander": { @@ -16330,35 +17313,40 @@ } }, "node_modules/sucrase/node_modules/glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", + "version": "10.3.12", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.12.tgz", + "integrity": "sha512-TCNv8vJ+xz4QiqTpfOJA7HvYv+tNIRHKfUWw/q+v2jdgN4ebz+KY9tGx5J4rHP0o84mNP+ApH66HRX8us3Khqg==", "dev": true, "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" + "foreground-child": "^3.1.0", + "jackspeak": "^2.3.6", + "minimatch": "^9.0.1", + "minipass": "^7.0.4", + "path-scurry": "^1.10.2" + }, + "bin": { + "glob": "dist/esm/bin.mjs" }, "engines": { - "node": "*" + "node": ">=16 || 14 >=14.17" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/sucrase/node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "version": "9.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.4.tgz", + "integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==", "dev": true, "dependencies": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "*" + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/supports-color": { @@ -16410,14 +17398,15 @@ "dev": true }, "node_modules/svgo": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.0.2.tgz", - "integrity": "sha512-Z706C1U2pb1+JGP48fbazf3KxHrWOsLme6Rv7imFBn5EnuanDW1GPaA/P1/dvObE670JDePC3mnj0k0B7P0jjQ==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.2.0.tgz", + "integrity": "sha512-4PP6CMW/V7l/GmKRKzsLR8xxjdHTV4IMvhTnpuHwwBazSIlw5W/5SmPjN8Dwyt7lKbSJrRDgp4t9ph0HgChFBQ==", "dependencies": { "@trysound/sax": "0.2.0", "commander": "^7.2.0", "css-select": "^5.1.0", - "css-tree": "^2.2.1", + "css-tree": "^2.3.1", + "css-what": "^6.1.0", "csso": "^5.0.5", "picocolors": "^1.0.0" }, @@ -16439,9 +17428,9 @@ "dev": true }, "node_modules/table": { - "version": "6.8.1", - "resolved": "https://registry.npmjs.org/table/-/table-6.8.1.tgz", - "integrity": "sha512-Y4X9zqrCftUhMeH2EptSSERdVKt/nEdijTOacGD/97EKjhQ/Qs8RTlEGABSJNNN8lac9kheH+af7yAkEWlgneA==", + "version": "6.8.2", + "resolved": "https://registry.npmjs.org/table/-/table-6.8.2.tgz", + "integrity": "sha512-w2sfv80nrAh2VCbqR5AK27wswXhqcck2AhfnNW76beQXskGZ1V12GwS//yYVa3d3fcvAip2OUnbDAjW2k3v9fA==", "dev": true, "dependencies": { "ajv": "^8.0.1", @@ -16454,70 +17443,63 @@ "node": ">=10.0.0" } }, + "node_modules/table/node_modules/ajv": { + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.13.0.tgz", + "integrity": "sha512-PRA911Blj99jR5RMeTunVbNXMF6Lp4vZXnk5GQjcnUWUTsrXtekg/pnmFFI2u/I36Y/2bITGS30GZCXei6uNkA==", + "dev": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2", + "uri-js": "^4.4.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/table/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true + }, "node_modules/tailwindcss": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.1.tgz", - "integrity": "sha512-Vkiouc41d4CEq0ujXl6oiGFQ7bA3WEhUZdTgXAhtKxSy49OmKs8rEfQmupsfF0IGW8fv2iQkp1EVUuapCFrZ9g==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.3.tgz", + "integrity": "sha512-U7sxQk/n397Bmx4JHbJx/iSOOv5G+II3f1kpLpY2QeUv5DcPdcTsYLlusZfq1NthHS1c1cZoyFmmkex1rzke0A==", "dev": true, "dependencies": { + "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", "chokidar": "^3.5.3", - "color-name": "^1.1.4", "didyoumean": "^1.2.2", "dlv": "^1.1.3", - "fast-glob": "^3.2.12", + "fast-glob": "^3.3.0", "glob-parent": "^6.0.2", "is-glob": "^4.0.3", - "jiti": "^1.17.2", - "lilconfig": "^2.0.6", + "jiti": "^1.21.0", + "lilconfig": "^2.1.0", "micromatch": "^4.0.5", "normalize-path": "^3.0.0", "object-hash": "^3.0.0", "picocolors": "^1.0.0", - "postcss": "^8.0.9", - "postcss-import": "^14.1.0", - "postcss-js": "^4.0.0", - "postcss-load-config": "^3.1.4", - "postcss-nested": "6.0.0", + "postcss": "^8.4.23", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.1", + "postcss-nested": "^6.0.1", "postcss-selector-parser": "^6.0.11", - "postcss-value-parser": "^4.2.0", - "quick-lru": "^5.1.1", - "resolve": "^1.22.1", - "sucrase": "^3.29.0" + "resolve": "^1.22.2", + "sucrase": "^3.32.0" }, "bin": { "tailwind": "lib/cli.js", "tailwindcss": "lib/cli.js" }, "engines": { - "node": ">=12.13.0" - }, - "peerDependencies": { - "postcss": "^8.0.9" - } - }, - "node_modules/tailwindcss/node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "dependencies": { - "is-glob": "^4.0.3" - }, - "engines": { - "node": ">=10.13.0" - } - }, - "node_modules/tailwindcss/node_modules/quick-lru": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", - "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=14.0.0" } }, "node_modules/text-table": { @@ -16554,9 +17536,9 @@ "dev": true }, "node_modules/tiny-invariant": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz", - "integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==" + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==" }, "node_modules/tiny-warning": { "version": "1.0.3", @@ -16564,9 +17546,9 @@ "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" }, "node_modules/tinybench": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.4.0.tgz", - "integrity": "sha512-iyziEiyFxX4kyxSp+MtY1oCH/lvjH3PxFN8PGCDeqcZWAJ/i+9y+nL85w99PxVzrIvew/GSkSbDYtiGVa85Afg==", + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.8.0.tgz", + "integrity": "sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==", "dev": true }, "node_modules/tinypool": { @@ -16620,9 +17602,9 @@ } }, "node_modules/tough-cookie": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", - "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", + "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", "dev": true, "dependencies": { "psl": "^1.1.33", @@ -16635,9 +17617,9 @@ } }, "node_modules/tough-cookie/node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, "engines": { "node": ">=6" @@ -16656,9 +17638,9 @@ } }, "node_modules/tr46/node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, "engines": { "node": ">=6" @@ -16680,9 +17662,9 @@ "dev": true }, "node_modules/tsconfig-paths": { - "version": "3.14.2", - "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz", - "integrity": "sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==", + "version": "3.15.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.15.0.tgz", + "integrity": "sha512-2Ac2RgzDe/cn48GvOe3M+o82pEFewD3UPbyoUHHdKasHwJKjds4fLXWf/Ux5kATBKN20oaFGu+jbElp1pos0mg==", "dev": true, "dependencies": { "@types/json5": "^0.0.29", @@ -16704,9 +17686,9 @@ } }, "node_modules/tslib": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz", - "integrity": "sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==" + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" }, "node_modules/tsutils": { "version": "3.21.0", @@ -16730,12 +17712,12 @@ "dev": true }, "node_modules/type-check": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", - "integrity": "sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==", + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", "dev": true, "dependencies": { - "prelude-ls": "~1.1.2" + "prelude-ls": "^1.2.1" }, "engines": { "node": ">= 0.8.0" @@ -16762,31 +17744,90 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/typed-array-buffer": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz", + "integrity": "sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/typed-array-byte-length": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz", + "integrity": "sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==", + "dev": true, + "dependencies": { + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/typed-array-byte-offset": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz", + "integrity": "sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==", + "dev": true, + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/typed-array-length": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz", - "integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==", + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.6.tgz", + "integrity": "sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==", "dev": true, "dependencies": { - "call-bind": "^1.0.2", + "call-bind": "^1.0.7", "for-each": "^0.3.3", - "is-typed-array": "^1.1.9" + "gopd": "^1.0.1", + "has-proto": "^1.0.3", + "is-typed-array": "^1.1.13", + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/typescript": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.3.tgz", - "integrity": "sha512-xv8mOEDnigb/tN9PSMTwSEqAnUvkoXMQlicOb0IUVDBSQCgBSaAAROUZYy2IcUy5qU6XajK5jjjO7TMWqBTKZA==", + "version": "5.4.5", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz", + "integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==", "dev": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" }, "engines": { - "node": ">=12.20" + "node": ">=14.17" } }, "node_modules/uc.micro": { @@ -16796,9 +17837,9 @@ "dev": true }, "node_modules/ufo": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.1.1.tgz", - "integrity": "sha512-MvlCc4GHrmZdAllBc0iUDowff36Q9Ndw/UzqmEKyrfSzokTd9ZCy1i+IIk5hrYKkjoYVQyNbrw7/F8XJ2rEwTg==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.3.tgz", + "integrity": "sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==", "dev": true }, "node_modules/uglify-js": { @@ -16893,9 +17934,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.10.tgz", - "integrity": "sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ==", + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", + "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", "funding": [ { "type": "opencollective", @@ -16904,6 +17945,10 @@ { "type": "tidelift", "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" } ], "dependencies": { @@ -16911,7 +17956,7 @@ "picocolors": "^1.0.0" }, "bin": { - "browserslist-lint": "cli.js" + "update-browserslist-db": "cli.js" }, "peerDependencies": { "browserslist": ">= 4.21.0" @@ -16937,9 +17982,9 @@ } }, "node_modules/uri-js/node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, "engines": { "node": ">=6" @@ -16961,9 +18006,9 @@ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, "node_modules/v8-compile-cache": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", - "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==", + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.4.0.tgz", + "integrity": "sha512-ocyWc3bAHBB/guyqJQVI5o4BZkPhznPYUG2ea80Gond/BgNWpap8TOmLSeeQG7bnh2KMISxskdADG59j7zruhw==", "dev": true }, "node_modules/validate-npm-package-license": { @@ -16991,15 +18036,14 @@ } }, "node_modules/vite": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/vite/-/vite-4.2.3.tgz", - "integrity": "sha512-kLU+m2q0Y434Y1kCy3TchefAdtFso0ILi0dLyFV8Us3InXTU11H/B5ZTqCKIQHzSKNxVG/yEx813EA9f1imQ9A==", + "version": "4.5.3", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.3.tgz", + "integrity": "sha512-kQL23kMeX92v3ph7IauVkXkikdDRsYMGTVl5KY2E9OY4ONLvkHf04MDTbnfo6NKxZiDLWzVpP5oTa8hQD8U3dg==", "dev": true, "dependencies": { - "esbuild": "^0.17.5", - "postcss": "^8.4.21", - "resolve": "^1.22.1", - "rollup": "^3.18.0" + "esbuild": "^0.18.10", + "postcss": "^8.4.27", + "rollup": "^3.27.1" }, "bin": { "vite": "bin/vite.js" @@ -17007,12 +18051,16 @@ "engines": { "node": "^14.18.0 || >=16.0.0" }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, "optionalDependencies": { "fsevents": "~2.3.2" }, "peerDependencies": { "@types/node": ">= 14", "less": "*", + "lightningcss": "^1.21.0", "sass": "*", "stylus": "*", "sugarss": "*", @@ -17025,6 +18073,9 @@ "less": { "optional": true }, + "lightningcss": { + "optional": true + }, "sass": { "optional": true }, @@ -17217,9 +18268,9 @@ } }, "node_modules/web-vitals": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/web-vitals/-/web-vitals-3.3.0.tgz", - "integrity": "sha512-GZsEmJBNclIpViS/7QVOTr7Kbt4BgLeR7kQ5zCCtJVuiWsA+K6xTXaoEXssvl8yYFICEyNmA2Nr+vgBYTnS4bA==" + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/web-vitals/-/web-vitals-3.5.2.tgz", + "integrity": "sha512-c0rhqNcHXRkY/ogGDJQxZ9Im9D19hDihbzSQJrsioex+KnFgmMzBiy57Z1EjkhX/+OjyBpclDCzz2ITtjokFmg==" }, "node_modules/webidl-conversions": { "version": "7.0.0", @@ -17307,33 +18358,61 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/which-builtin-type": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.3.tgz", + "integrity": "sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw==", + "dev": true, + "dependencies": { + "function.prototype.name": "^1.1.5", + "has-tostringtag": "^1.0.0", + "is-async-function": "^2.0.0", + "is-date-object": "^1.0.5", + "is-finalizationregistry": "^1.0.2", + "is-generator-function": "^1.0.10", + "is-regex": "^1.1.4", + "is-weakref": "^1.0.2", + "isarray": "^2.0.5", + "which-boxed-primitive": "^1.0.2", + "which-collection": "^1.0.1", + "which-typed-array": "^1.1.9" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/which-collection": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.1.tgz", - "integrity": "sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==", + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.2.tgz", + "integrity": "sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw==", "dev": true, "dependencies": { - "is-map": "^2.0.1", - "is-set": "^2.0.1", - "is-weakmap": "^2.0.1", - "is-weakset": "^2.0.1" + "is-map": "^2.0.3", + "is-set": "^2.0.3", + "is-weakmap": "^2.0.2", + "is-weakset": "^2.0.3" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/which-typed-array": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", - "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.15.tgz", + "integrity": "sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==", "dev": true, "dependencies": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.7", "for-each": "^0.3.3", "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0", - "is-typed-array": "^1.1.10" + "has-tostringtag": "^1.0.2" }, "engines": { "node": ">= 0.4" @@ -17385,12 +18464,6 @@ "url": "https://github.com/chalk/ansi-regex?sponsor=1" } }, - "node_modules/widest-line/node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, "node_modules/widest-line/node_modules/string-width": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", @@ -17409,9 +18482,9 @@ } }, "node_modules/widest-line/node_modules/strip-ansi": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", "dev": true, "dependencies": { "ansi-regex": "^6.0.1" @@ -17424,15 +18497,30 @@ } }, "node_modules/word-wrap": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", - "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", "dev": true, "engines": { "node": ">=0.10.0" } }, "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", @@ -17468,9 +18556,9 @@ } }, "node_modules/ws": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", - "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", + "version": "8.17.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.0.tgz", + "integrity": "sha512-uJq6108EgZMAl20KagGkzCKfMEjxmKvZHG7Tlq0Z6nOky7YF7aq4mOx6xK8TJ/i1LeK4Qus7INktacctDgY8Ow==", "dev": true, "engines": { "node": ">=10.0.0" @@ -17534,9 +18622,9 @@ } }, "node_modules/yargs": { - "version": "17.7.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz", - "integrity": "sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==", + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "dev": true, "dependencies": { "cliui": "^8.0.1", @@ -17570,12 +18658,12 @@ } }, "node_modules/yocto-queue": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", - "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "dev": true, "engines": { - "node": ">=12.20" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -17597,12286 +18685,5 @@ } } } - }, - "dependencies": { - "@adobe/css-tools": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.3.1.tgz", - "integrity": "sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg==", - "dev": true - }, - "@ampproject/remapping": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.0.tgz", - "integrity": "sha512-qRmjj8nj9qmLTQXXmaR1cck3UXSRMPrbsLJAasZpF+t3riI71BXed5ebIOYwQntykeZuhjsdweEc9BxH5Jc26w==", - "dev": true, - "requires": { - "@jridgewell/gen-mapping": "^0.1.0", - "@jridgewell/trace-mapping": "^0.3.9" - } - }, - "@babel/code-frame": { - "version": "7.22.13", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.22.13.tgz", - "integrity": "sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==", - "requires": { - "@babel/highlight": "^7.22.13", - "chalk": "^2.4.2" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "@babel/compat-data": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.21.4.tgz", - "integrity": "sha512-/DYyDpeCfaVinT40FPGdkkb+lYSKvsVuMjDAG7jPOWWiM1ibOaB9CXJAlc4d1QpP/U2q2P9jbrSlClKSErd55g==", - "dev": true - }, - "@babel/core": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.21.4.tgz", - "integrity": "sha512-qt/YV149Jman/6AfmlxJ04LMIu8bMoyl3RB91yTFrxQmgbrSvQMy7cI8Q62FHx1t8wJ8B5fu0UDoLwHAhUo1QA==", - "dev": true, - "requires": { - "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.21.4", - "@babel/generator": "^7.21.4", - "@babel/helper-compilation-targets": "^7.21.4", - "@babel/helper-module-transforms": "^7.21.2", - "@babel/helpers": "^7.21.0", - "@babel/parser": "^7.21.4", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.4", - "@babel/types": "^7.21.4", - "convert-source-map": "^1.7.0", - "debug": "^4.1.0", - "gensync": "^1.0.0-beta.2", - "json5": "^2.2.2", - "semver": "^6.3.0" - } - }, - "@babel/eslint-parser": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/eslint-parser/-/eslint-parser-7.21.3.tgz", - "integrity": "sha512-kfhmPimwo6k4P8zxNs8+T7yR44q1LdpsZdE1NkCsVlfiuTPRfnGgjaF8Qgug9q9Pou17u6wneYF0lDCZJATMFg==", - "dev": true, - "requires": { - "@nicolo-ribaudo/eslint-scope-5-internals": "5.1.1-v1", - "eslint-visitor-keys": "^2.1.0", - "semver": "^6.3.0" - }, - "dependencies": { - "eslint-visitor-keys": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz", - "integrity": "sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==", - "dev": true - } - } - }, - "@babel/generator": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.23.0.tgz", - "integrity": "sha512-lN85QRR+5IbYrMWM6Y4pE/noaQtg4pNiqeNGX60eqOfo6gtEj6uw/JagelB8vVztSd7R6M5n1+PQkDbHbBRU4g==", - "dev": true, - "requires": { - "@babel/types": "^7.23.0", - "@jridgewell/gen-mapping": "^0.3.2", - "@jridgewell/trace-mapping": "^0.3.17", - "jsesc": "^2.5.1" - }, - "dependencies": { - "@jridgewell/gen-mapping": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.2.tgz", - "integrity": "sha512-mh65xKQAzI6iBcFzwv28KVWSmCkdRBWoOh+bYQGW3+6OZvbbN3TqMGo5hqYxQniRcH9F2VZIoJCm4pa3BPDK/A==", - "dev": true, - "requires": { - "@jridgewell/set-array": "^1.0.1", - "@jridgewell/sourcemap-codec": "^1.4.10", - "@jridgewell/trace-mapping": "^0.3.9" - } - } - } - }, - "@babel/helper-annotate-as-pure": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.18.6.tgz", - "integrity": "sha512-duORpUiYrEpzKIop6iNbjnwKLAKnJ47csTyRACyEmWj0QdUrm5aqNJGHSSEQSUAvNW0ojX0dOmK9dZduvkfeXA==", - "dev": true, - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-builder-binary-assignment-operator-visitor": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.18.9.tgz", - "integrity": "sha512-yFQ0YCHoIqarl8BCRwBL8ulYUaZpz3bNsA7oFepAzee+8/+ImtADXNOmO5vJvsPff3qi+hvpkY/NYBTrBQgdNw==", - "dev": true, - "requires": { - "@babel/helper-explode-assignable-expression": "^7.18.6", - "@babel/types": "^7.18.9" - } - }, - "@babel/helper-compilation-targets": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.21.4.tgz", - "integrity": "sha512-Fa0tTuOXZ1iL8IeDFUWCzjZcn+sJGd9RZdH9esYVjEejGmzf+FFYQpMi/kZUk2kPy/q1H3/GPw7np8qar/stfg==", - "dev": true, - "requires": { - "@babel/compat-data": "^7.21.4", - "@babel/helper-validator-option": "^7.21.0", - "browserslist": "^4.21.3", - "lru-cache": "^5.1.1", - "semver": "^6.3.0" - } - }, - "@babel/helper-create-class-features-plugin": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.21.4.tgz", - "integrity": "sha512-46QrX2CQlaFRF4TkwfTt6nJD7IHq8539cCL7SDpqWSDeJKY1xylKKY5F/33mJhLZ3mFvKv2gGrVS6NkyF6qs+Q==", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-member-expression-to-functions": "^7.21.0", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-replace-supers": "^7.20.7", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", - "@babel/helper-split-export-declaration": "^7.18.6" - } - }, - "@babel/helper-create-regexp-features-plugin": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.21.4.tgz", - "integrity": "sha512-M00OuhU+0GyZ5iBBN9czjugzWrEq2vDpf/zCYHxxf93ul/Q5rv+a5h+/+0WnI1AebHNVtl5bFV0qsJoH23DbfA==", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "regexpu-core": "^5.3.1" - } - }, - "@babel/helper-define-polyfill-provider": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.3.tgz", - "integrity": "sha512-z5aQKU4IzbqCC1XH0nAqfsFLMVSo22SBKUc0BxGrLkolTdPTructy0ToNnlO2zA4j9Q/7pjMZf0DSY+DSTYzww==", - "dev": true, - "requires": { - "@babel/helper-compilation-targets": "^7.17.7", - "@babel/helper-plugin-utils": "^7.16.7", - "debug": "^4.1.1", - "lodash.debounce": "^4.0.8", - "resolve": "^1.14.2", - "semver": "^6.1.2" - } - }, - "@babel/helper-environment-visitor": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz", - "integrity": "sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA==", - "dev": true - }, - "@babel/helper-explode-assignable-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.18.6.tgz", - "integrity": "sha512-eyAYAsQmB80jNfg4baAtLeWAQHfHFiR483rzFK+BhETlGZaQC9bsfrugfXDCbRHLQbIA7U5NxhhOxN7p/dWIcg==", - "dev": true, - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-function-name": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz", - "integrity": "sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw==", - "dev": true, - "requires": { - "@babel/template": "^7.22.15", - "@babel/types": "^7.23.0" - } - }, - "@babel/helper-hoist-variables": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz", - "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==", - "dev": true, - "requires": { - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-member-expression-to-functions": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.21.0.tgz", - "integrity": "sha512-Muu8cdZwNN6mRRNG6lAYErJ5X3bRevgYR2O8wN0yn7jJSnGDu6eG59RfT29JHxGUovyfrh6Pj0XzmR7drNVL3Q==", - "dev": true, - "requires": { - "@babel/types": "^7.21.0" - } - }, - "@babel/helper-module-imports": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.21.4.tgz", - "integrity": "sha512-orajc5T2PsRYUN3ZryCEFeMDYwyw09c/pZeaQEZPH0MpKzSvn3e0uXsDBu3k03VI+9DBiRo+l22BfKTpKwa/Wg==", - "dev": true, - "requires": { - "@babel/types": "^7.21.4" - } - }, - "@babel/helper-module-transforms": { - "version": "7.21.2", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.21.2.tgz", - "integrity": "sha512-79yj2AR4U/Oqq/WOV7Lx6hUjau1Zfo4cI+JLAVYeMV5XIlbOhmjEk5ulbTc9fMpmlojzZHkUUxAiK+UKn+hNQQ==", - "dev": true, - "requires": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-simple-access": "^7.20.2", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/helper-validator-identifier": "^7.19.1", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.2", - "@babel/types": "^7.21.2" - } - }, - "@babel/helper-optimise-call-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.18.6.tgz", - "integrity": "sha512-HP59oD9/fEHQkdcbgFCnbmgH5vIQTJbxh2yf+CdM89/glUNnuzr87Q8GIjGEnOktTROemO0Pe0iPAYbqZuOUiA==", - "dev": true, - "requires": { - "@babel/types": "^7.18.6" - } - }, - "@babel/helper-plugin-utils": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.20.2.tgz", - "integrity": "sha512-8RvlJG2mj4huQ4pZ+rU9lqKi9ZKiRmuvGuM2HlWmkmgOhbs6zEAw6IEiJ5cQqGbDzGZOhwuOQNtZMi/ENLjZoQ==", - "dev": true - }, - "@babel/helper-remap-async-to-generator": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.18.9.tgz", - "integrity": "sha512-dI7q50YKd8BAv3VEfgg7PS7yD3Rtbi2J1XMXaalXO0W0164hYLnh8zpjRS0mte9MfVp/tltvr/cfdXPvJr1opA==", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-wrap-function": "^7.18.9", - "@babel/types": "^7.18.9" - } - }, - "@babel/helper-replace-supers": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.20.7.tgz", - "integrity": "sha512-vujDMtB6LVfNW13jhlCrp48QNslK6JXi7lQG736HVbHz/mbf4Dc7tIRh1Xf5C0rF7BP8iiSxGMCmY6Ci1ven3A==", - "dev": true, - "requires": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-member-expression-to-functions": "^7.20.7", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.20.7", - "@babel/types": "^7.20.7" - } - }, - "@babel/helper-simple-access": { - "version": "7.20.2", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.20.2.tgz", - "integrity": "sha512-+0woI/WPq59IrqDYbVGfshjT5Dmk/nnbdpcF8SnMhhXObpTq2KNBdLFRFrkVdbDOyUmHBCxzm5FHV1rACIkIbA==", - "dev": true, - "requires": { - "@babel/types": "^7.20.2" - } - }, - "@babel/helper-skip-transparent-expression-wrappers": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.20.0.tgz", - "integrity": "sha512-5y1JYeNKfvnT8sZcK9DVRtpTbGiomYIHviSP3OQWmDPU3DeH4a1ZlT/N2lyQ5P8egjcRaT/Y9aNqUxK0WsnIIg==", - "dev": true, - "requires": { - "@babel/types": "^7.20.0" - } - }, - "@babel/helper-split-export-declaration": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz", - "integrity": "sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g==", - "dev": true, - "requires": { - "@babel/types": "^7.22.5" - } - }, - "@babel/helper-string-parser": { - "version": "7.22.5", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.22.5.tgz", - "integrity": "sha512-mM4COjgZox8U+JcXQwPijIZLElkgEpO5rsERVDJTc2qfCDfERyob6k5WegS14SX18IIjv+XD+GrqNumY5JRCDw==", - "dev": true - }, - "@babel/helper-validator-identifier": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz", - "integrity": "sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==" - }, - "@babel/helper-validator-option": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.21.0.tgz", - "integrity": "sha512-rmL/B8/f0mKS2baE9ZpyTcTavvEuWhTTW8amjzXNvYG4AwBsqTLikfXsEofsJEfKHf+HQVQbFOHy6o+4cnC/fQ==", - "dev": true - }, - "@babel/helper-wrap-function": { - "version": "7.20.5", - "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.20.5.tgz", - "integrity": "sha512-bYMxIWK5mh+TgXGVqAtnu5Yn1un+v8DDZtqyzKRLUzrh70Eal2O3aZ7aPYiMADO4uKlkzOiRiZ6GX5q3qxvW9Q==", - "dev": true, - "requires": { - "@babel/helper-function-name": "^7.19.0", - "@babel/template": "^7.18.10", - "@babel/traverse": "^7.20.5", - "@babel/types": "^7.20.5" - } - }, - "@babel/helpers": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.21.0.tgz", - "integrity": "sha512-XXve0CBtOW0pd7MRzzmoyuSj0e3SEzj8pgyFxnTT1NJZL38BD1MK7yYrm8yefRPIDvNNe14xR4FdbHwpInD4rA==", - "dev": true, - "requires": { - "@babel/template": "^7.20.7", - "@babel/traverse": "^7.21.0", - "@babel/types": "^7.21.0" - } - }, - "@babel/highlight": { - "version": "7.22.20", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.22.20.tgz", - "integrity": "sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==", - "requires": { - "@babel/helper-validator-identifier": "^7.22.20", - "chalk": "^2.4.2", - "js-tokens": "^4.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==" - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "@babel/parser": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.23.0.tgz", - "integrity": "sha512-vvPKKdMemU85V9WE/l5wZEmImpCtLqbnTvqDS2U1fJ96KrxoW7KrXhNsNCblQlg8Ck4b85yxdTyelsMUgFUXiw==", - "dev": true - }, - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.18.6.tgz", - "integrity": "sha512-Dgxsyg54Fx1d4Nge8UnvTrED63vrwOdPmyvPzlNN/boaliRP54pm3pGzZD1SJUwrBA+Cs/xdG8kXX6Mn/RfISQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.20.7.tgz", - "integrity": "sha512-sbr9+wNE5aXMBBFBICk01tt7sBf2Oc9ikRFEcem/ZORup9IMUdNhW7/wVLEbbtlWOsEubJet46mHAL2C8+2jKQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", - "@babel/plugin-proposal-optional-chaining": "^7.20.7" - } - }, - "@babel/plugin-proposal-async-generator-functions": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.20.7.tgz", - "integrity": "sha512-xMbiLsn/8RK7Wq7VeVytytS2L6qE69bXPB10YCmMdDZbKF4okCqY74pI/jJQ/8U0b/F6NrT2+14b8/P9/3AMGA==", - "dev": true, - "requires": { - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-remap-async-to-generator": "^7.18.9", - "@babel/plugin-syntax-async-generators": "^7.8.4" - } - }, - "@babel/plugin-proposal-class-properties": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz", - "integrity": "sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ==", - "dev": true, - "requires": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-proposal-class-static-block": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.21.0.tgz", - "integrity": "sha512-XP5G9MWNUskFuP30IfFSEFB0Z6HzLIUcjYM4bYOPHXl7eiJ9HFv8tWj6TXTN5QODiEhDZAeI4hLok2iHFFV4hw==", - "dev": true, - "requires": { - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-class-static-block": "^7.14.5" - } - }, - "@babel/plugin-proposal-decorators": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.21.0.tgz", - "integrity": "sha512-MfgX49uRrFUTL/HvWtmx3zmpyzMMr4MTj3d527MLlr/4RTT9G/ytFFP7qet2uM2Ve03b+BkpWUpK+lRXnQ+v9w==", - "dev": true, - "requires": { - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-replace-supers": "^7.20.7", - "@babel/helper-split-export-declaration": "^7.18.6", - "@babel/plugin-syntax-decorators": "^7.21.0" - } - }, - "@babel/plugin-proposal-dynamic-import": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.18.6.tgz", - "integrity": "sha512-1auuwmK+Rz13SJj36R+jqFPMJWyKEDd7lLSdOj4oJK0UTgGueSAtkrCvz9ewmgyU/P941Rv2fQwZJN8s6QruXw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-dynamic-import": "^7.8.3" - } - }, - "@babel/plugin-proposal-export-namespace-from": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.18.9.tgz", - "integrity": "sha512-k1NtHyOMvlDDFeb9G5PhUXuGj8m/wiwojgQVEhJ/fsVsMCpLyOP4h0uGEjYJKrRI+EVPlb5Jk+Gt9P97lOGwtA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.9", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3" - } - }, - "@babel/plugin-proposal-json-strings": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.18.6.tgz", - "integrity": "sha512-lr1peyn9kOdbYc0xr0OdHTZ5FMqS6Di+H0Fz2I/JwMzGmzJETNeOFq2pBySw6X/KFL5EWDjlJuMsUGRFb8fQgQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-json-strings": "^7.8.3" - } - }, - "@babel/plugin-proposal-logical-assignment-operators": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.20.7.tgz", - "integrity": "sha512-y7C7cZgpMIjWlKE5T7eJwp+tnRYM89HmRvWM5EQuB5BoHEONjmQ8lSNmBUwOyy/GFRsohJED51YBF79hE1djug==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4" - } - }, - "@babel/plugin-proposal-nullish-coalescing-operator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz", - "integrity": "sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3" - } - }, - "@babel/plugin-proposal-numeric-separator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz", - "integrity": "sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-numeric-separator": "^7.10.4" - } - }, - "@babel/plugin-proposal-object-rest-spread": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.7.tgz", - "integrity": "sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg==", - "dev": true, - "requires": { - "@babel/compat-data": "^7.20.5", - "@babel/helper-compilation-targets": "^7.20.7", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-transform-parameters": "^7.20.7" - } - }, - "@babel/plugin-proposal-optional-catch-binding": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz", - "integrity": "sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3" - } - }, - "@babel/plugin-proposal-optional-chaining": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.21.0.tgz", - "integrity": "sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0", - "@babel/plugin-syntax-optional-chaining": "^7.8.3" - } - }, - "@babel/plugin-proposal-private-methods": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.18.6.tgz", - "integrity": "sha512-nutsvktDItsNn4rpGItSNV2sz1XwS+nfU0Rg8aCx3W3NOKVzdMjJRu0O5OkgDp3ZGICSTbgRpxZoWsxoKRvbeA==", - "dev": true, - "requires": { - "@babel/helper-create-class-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-proposal-private-property-in-object": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0.tgz", - "integrity": "sha512-ha4zfehbJjc5MmXBlHec1igel5TJXXLDDRbuJ4+XT2TJcyD9/V1919BA8gMvsdHcNMBy4WBUBiRb3nw/EQUtBw==", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5" - } - }, - "@babel/plugin-proposal-unicode-property-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz", - "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==", - "dev": true, - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-syntax-async-generators": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz", - "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-class-properties": { - "version": "7.12.13", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz", - "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.12.13" - } - }, - "@babel/plugin-syntax-class-static-block": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz", - "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-decorators": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.21.0.tgz", - "integrity": "sha512-tIoPpGBR8UuM4++ccWN3gifhVvQu7ZizuR1fklhRJrd5ewgbkUS+0KVFeWWxELtn18NTLoW32XV7zyOgIAiz+w==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-syntax-dynamic-import": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz", - "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-export-namespace-from": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz", - "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.3" - } - }, - "@babel/plugin-syntax-flow": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.21.4.tgz", - "integrity": "sha512-l9xd3N+XG4fZRxEP3vXdK6RW7vN1Uf5dxzRC/09wV86wqZ/YYQooBIGNsiRdfNR3/q2/5pPzV4B54J/9ctX5jw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-syntax-import-assertions": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.20.0.tgz", - "integrity": "sha512-IUh1vakzNoWalR8ch/areW7qFopR2AEw03JlG7BbrDqmQ4X3q9uuipQwSGrUn7oGiemKjtSLDhNtQHzMHr1JdQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.19.0" - } - }, - "@babel/plugin-syntax-json-strings": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz", - "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-jsx": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.21.4.tgz", - "integrity": "sha512-5hewiLct5OKyh6PLKEYaFclcqtIgCb6bmELouxjF6up5q3Sov7rOayW4RwhbaBL0dit8rA80GNfY+UuDp2mBbQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-syntax-logical-assignment-operators": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz", - "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "@babel/plugin-syntax-nullish-coalescing-operator": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", - "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-numeric-separator": { - "version": "7.10.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz", - "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.10.4" - } - }, - "@babel/plugin-syntax-object-rest-spread": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-optional-catch-binding": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz", - "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-optional-chaining": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", - "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.8.0" - } - }, - "@babel/plugin-syntax-private-property-in-object": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz", - "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-top-level-await": { - "version": "7.14.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz", - "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.14.5" - } - }, - "@babel/plugin-syntax-typescript": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.21.4.tgz", - "integrity": "sha512-xz0D39NvhQn4t4RNsHmDnnsaQizIlUkdtYvLs8La1BlfjQ6JEwxkJGeqJMW2tAXx+q6H+WFuUTXNdYVpEya0YA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-arrow-functions": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.20.7.tgz", - "integrity": "sha512-3poA5E7dzDomxj9WXWwuD6A5F3kc7VXwIJO+E+J8qtDtS+pXPAhrgEyh+9GBwBgPq1Z+bB+/JD60lp5jsN7JPQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-async-to-generator": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.20.7.tgz", - "integrity": "sha512-Uo5gwHPT9vgnSXQxqGtpdufUiWp96gk7yiP4Mp5bm1QMkEmLXBO7PAGYbKoJ6DhAwiNkcHFBol/x5zZZkL/t0Q==", - "dev": true, - "requires": { - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-remap-async-to-generator": "^7.18.9" - } - }, - "@babel/plugin-transform-block-scoped-functions": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.18.6.tgz", - "integrity": "sha512-ExUcOqpPWnliRcPqves5HJcJOvHvIIWfuS4sroBUenPuMdmW+SMHDakmtS7qOo13sVppmUijqeTv7qqGsvURpQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-block-scoping": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.21.0.tgz", - "integrity": "sha512-Mdrbunoh9SxwFZapeHVrwFmri16+oYotcZysSzhNIVDwIAb1UV+kvnxULSYq9J3/q5MDG+4X6w8QVgD1zhBXNQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-classes": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.21.0.tgz", - "integrity": "sha512-RZhbYTCEUAe6ntPehC4hlslPWosNHDox+vAs4On/mCLRLfoDVHf6hVEd7kuxr1RnHwJmxFfUM3cZiZRmPxJPXQ==", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-compilation-targets": "^7.20.7", - "@babel/helper-environment-visitor": "^7.18.9", - "@babel/helper-function-name": "^7.21.0", - "@babel/helper-optimise-call-expression": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-replace-supers": "^7.20.7", - "@babel/helper-split-export-declaration": "^7.18.6", - "globals": "^11.1.0" - } - }, - "@babel/plugin-transform-computed-properties": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.20.7.tgz", - "integrity": "sha512-Lz7MvBK6DTjElHAmfu6bfANzKcxpyNPeYBGEafyA6E5HtRpjpZwU+u7Qrgz/2OR0z+5TvKYbPdphfSaAcZBrYQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/template": "^7.20.7" - } - }, - "@babel/plugin-transform-destructuring": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.21.3.tgz", - "integrity": "sha512-bp6hwMFzuiE4HqYEyoGJ/V2LeIWn+hLVKc4pnj++E5XQptwhtcGmSayM029d/j2X1bPKGTlsyPwAubuU22KhMA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-dotall-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.18.6.tgz", - "integrity": "sha512-6S3jpun1eEbAxq7TdjLotAsl4WpQI9DxfkycRcKrjhQYzU87qpXdknpBg/e+TdcMehqGnLFi7tnFUBR02Vq6wg==", - "dev": true, - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-duplicate-keys": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.18.9.tgz", - "integrity": "sha512-d2bmXCtZXYc59/0SanQKbiWINadaJXqtvIQIzd4+hNwkWBgyCd5F/2t1kXoUdvPMrxzPvhK6EMQRROxsue+mfw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-exponentiation-operator": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.18.6.tgz", - "integrity": "sha512-wzEtc0+2c88FVR34aQmiz56dxEkxr2g8DQb/KfaFa1JYXOFVsbhvAonFN6PwVWj++fKmku8NP80plJ5Et4wqHw==", - "dev": true, - "requires": { - "@babel/helper-builder-binary-assignment-operator-visitor": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-flow-strip-types": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.21.0.tgz", - "integrity": "sha512-FlFA2Mj87a6sDkW4gfGrQQqwY/dLlBAyJa2dJEZ+FHXUVHBflO2wyKvg+OOEzXfrKYIa4HWl0mgmbCzt0cMb7w==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-flow": "^7.18.6" - } - }, - "@babel/plugin-transform-for-of": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.21.0.tgz", - "integrity": "sha512-LlUYlydgDkKpIY7mcBWvyPPmMcOphEyYA27Ef4xpbh1IiDNLr0kZsos2nf92vz3IccvJI25QUwp86Eo5s6HmBQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-function-name": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.18.9.tgz", - "integrity": "sha512-WvIBoRPaJQ5yVHzcnJFor7oS5Ls0PYixlTYE63lCj2RtdQEl15M68FXQlxnG6wdraJIXRdR7KI+hQ7q/9QjrCQ==", - "dev": true, - "requires": { - "@babel/helper-compilation-targets": "^7.18.9", - "@babel/helper-function-name": "^7.18.9", - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-literals": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.18.9.tgz", - "integrity": "sha512-IFQDSRoTPnrAIrI5zoZv73IFeZu2dhu6irxQjY9rNjTT53VmKg9fenjvoiOWOkJ6mm4jKVPtdMzBY98Fp4Z4cg==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-member-expression-literals": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.18.6.tgz", - "integrity": "sha512-qSF1ihLGO3q+/g48k85tUjD033C29TNTVB2paCwZPVmOsjn9pClvYYrM2VeJpBY2bcNkuny0YUyTNRyRxJ54KA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-modules-amd": { - "version": "7.20.11", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.20.11.tgz", - "integrity": "sha512-NuzCt5IIYOW0O30UvqktzHYR2ud5bOWbY0yaxWZ6G+aFzOMJvrs5YHNikrbdaT15+KNO31nPOy5Fim3ku6Zb5g==", - "dev": true, - "requires": { - "@babel/helper-module-transforms": "^7.20.11", - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-modules-commonjs": { - "version": "7.21.2", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.21.2.tgz", - "integrity": "sha512-Cln+Yy04Gxua7iPdj6nOV96smLGjpElir5YwzF0LBPKoPlLDNJePNlrGGaybAJkd0zKRnOVXOgizSqPYMNYkzA==", - "dev": true, - "requires": { - "@babel/helper-module-transforms": "^7.21.2", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-simple-access": "^7.20.2" - } - }, - "@babel/plugin-transform-modules-systemjs": { - "version": "7.20.11", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.20.11.tgz", - "integrity": "sha512-vVu5g9BPQKSFEmvt2TA4Da5N+QVS66EX21d8uoOihC+OCpUoGvzVsXeqFdtAEfVa5BILAeFt+U7yVmLbQnAJmw==", - "dev": true, - "requires": { - "@babel/helper-hoist-variables": "^7.18.6", - "@babel/helper-module-transforms": "^7.20.11", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-identifier": "^7.19.1" - } - }, - "@babel/plugin-transform-modules-umd": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.18.6.tgz", - "integrity": "sha512-dcegErExVeXcRqNtkRU/z8WlBLnvD4MRnHgNs3MytRO1Mn1sHRyhbcpYbVMGclAqOjdW+9cfkdZno9dFdfKLfQ==", - "dev": true, - "requires": { - "@babel/helper-module-transforms": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-named-capturing-groups-regex": { - "version": "7.20.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.20.5.tgz", - "integrity": "sha512-mOW4tTzi5iTLnw+78iEq3gr8Aoq4WNRGpmSlrogqaiCBoR1HFhpU4JkpQFOHfeYx3ReVIFWOQJS4aZBRvuZ6mA==", - "dev": true, - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.20.5", - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-new-target": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.18.6.tgz", - "integrity": "sha512-DjwFA/9Iu3Z+vrAn+8pBUGcjhxKguSMlsFqeCKbhb9BAV756v0krzVK04CRDi/4aqmk8BsHb4a/gFcaA5joXRw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-object-super": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.18.6.tgz", - "integrity": "sha512-uvGz6zk+pZoS1aTZrOvrbj6Pp/kK2mp45t2B+bTDre2UgsZZ8EZLSJtUg7m/no0zOJUWgFONpB7Zv9W2tSaFlA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-replace-supers": "^7.18.6" - } - }, - "@babel/plugin-transform-parameters": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.21.3.tgz", - "integrity": "sha512-Wxc+TvppQG9xWFYatvCGPvZ6+SIUxQ2ZdiBP+PHYMIjnPXD+uThCshaz4NZOnODAtBjjcVQQ/3OKs9LW28purQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-property-literals": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.18.6.tgz", - "integrity": "sha512-cYcs6qlgafTud3PAzrrRNbQtfpQ8+y/+M5tKmksS9+M1ckbH6kzY8MrexEM9mcA6JDsukE19iIRvAyYl463sMg==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-react-display-name": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.18.6.tgz", - "integrity": "sha512-TV4sQ+T013n61uMoygyMRm+xf04Bd5oqFpv2jAEQwSZ8NwQA7zeRPg1LMVg2PWi3zWBz+CLKD+v5bcpZ/BS0aA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-react-jsx": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.21.0.tgz", - "integrity": "sha512-6OAWljMvQrZjR2DaNhVfRz6dkCAVV+ymcLUmaf8bccGOHn2v5rHJK3tTpij0BuhdYWP4LLaqj5lwcdlpAAPuvg==", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-module-imports": "^7.18.6", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-jsx": "^7.18.6", - "@babel/types": "^7.21.0" - } - }, - "@babel/plugin-transform-react-jsx-development": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.18.6.tgz", - "integrity": "sha512-SA6HEjwYFKF7WDjWcMcMGUimmw/nhNRDWxr+KaLSCrkD/LMDBvWRmHAYgE1HDeF8KUuI8OAu+RT6EOtKxSW2qA==", - "dev": true, - "requires": { - "@babel/plugin-transform-react-jsx": "^7.18.6" - } - }, - "@babel/plugin-transform-react-jsx-self": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.21.0.tgz", - "integrity": "sha512-f/Eq+79JEu+KUANFks9UZCcvydOOGMgF7jBrcwjHa5jTZD8JivnhCJYvmlhR/WTXBWonDExPoW0eO/CR4QJirA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2" - } - }, - "@babel/plugin-transform-react-jsx-source": { - "version": "7.19.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.19.6.tgz", - "integrity": "sha512-RpAi004QyMNisst/pvSanoRdJ4q+jMCWyk9zdw/CyLB9j8RXEahodR6l2GyttDRyEVWZtbN+TpLiHJ3t34LbsQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.19.0" - } - }, - "@babel/plugin-transform-react-pure-annotations": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.18.6.tgz", - "integrity": "sha512-I8VfEPg9r2TRDdvnHgPepTKvuRomzA8+u+nhY7qSI1fR2hRNebasZEETLyM5mAUr0Ku56OkXJ0I7NHJnO6cJiQ==", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-regenerator": { - "version": "7.20.5", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.20.5.tgz", - "integrity": "sha512-kW/oO7HPBtntbsahzQ0qSE3tFvkFwnbozz3NWFhLGqH75vLEg+sCGngLlhVkePlCs3Jv0dBBHDzCHxNiFAQKCQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2", - "regenerator-transform": "^0.15.1" - } - }, - "@babel/plugin-transform-reserved-words": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.18.6.tgz", - "integrity": "sha512-oX/4MyMoypzHjFrT1CdivfKZ+XvIPMFXwwxHp/r0Ddy2Vuomt4HDFGmft1TAY2yiTKiNSsh3kjBAzcM8kSdsjA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-runtime": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.21.4.tgz", - "integrity": "sha512-1J4dhrw1h1PqnNNpzwxQ2UBymJUF8KuPjAAnlLwZcGhHAIqUigFW7cdK6GHoB64ubY4qXQNYknoUeks4Wz7CUA==", - "dev": true, - "requires": { - "@babel/helper-module-imports": "^7.21.4", - "@babel/helper-plugin-utils": "^7.20.2", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "semver": "^6.3.0" - } - }, - "@babel/plugin-transform-shorthand-properties": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.18.6.tgz", - "integrity": "sha512-eCLXXJqv8okzg86ywZJbRn19YJHU4XUa55oz2wbHhaQVn/MM+XhukiT7SYqp/7o00dg52Rj51Ny+Ecw4oyoygw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-spread": { - "version": "7.20.7", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.20.7.tgz", - "integrity": "sha512-ewBbHQ+1U/VnH1fxltbJqDeWBU1oNLG8Dj11uIv3xVf7nrQu0bPGe5Rf716r7K5Qz+SqtAOVswoVunoiBtGhxw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-skip-transparent-expression-wrappers": "^7.20.0" - } - }, - "@babel/plugin-transform-sticky-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.18.6.tgz", - "integrity": "sha512-kfiDrDQ+PBsQDO85yj1icueWMfGfJFKN1KCkndygtu/C9+XUfydLC8Iv5UYJqRwy4zk8EcplRxEOeLyjq1gm6Q==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/plugin-transform-template-literals": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.18.9.tgz", - "integrity": "sha512-S8cOWfT82gTezpYOiVaGHrCbhlHgKhQt8XH5ES46P2XWmX92yisoZywf5km75wv5sYcXDUCLMmMxOLCtthDgMA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-typeof-symbol": { - "version": "7.18.9", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.18.9.tgz", - "integrity": "sha512-SRfwTtF11G2aemAZWivL7PD+C9z52v9EvMqH9BuYbabyPuKUvSWks3oCg6041pT925L4zVFqaVBeECwsmlguEw==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-typescript": { - "version": "7.21.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.21.3.tgz", - "integrity": "sha512-RQxPz6Iqt8T0uw/WsJNReuBpWpBqs/n7mNo18sKLoTbMp+UrEekhH+pKSVC7gWz+DNjo9gryfV8YzCiT45RgMw==", - "dev": true, - "requires": { - "@babel/helper-annotate-as-pure": "^7.18.6", - "@babel/helper-create-class-features-plugin": "^7.21.0", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/plugin-syntax-typescript": "^7.20.0" - } - }, - "@babel/plugin-transform-unicode-escapes": { - "version": "7.18.10", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.18.10.tgz", - "integrity": "sha512-kKAdAI+YzPgGY/ftStBFXTI1LZFju38rYThnfMykS+IXy8BVx+res7s2fxf1l8I35DV2T97ezo6+SGrXz6B3iQ==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.9" - } - }, - "@babel/plugin-transform-unicode-regex": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.18.6.tgz", - "integrity": "sha512-gE7A6Lt7YLnNOL3Pb9BNeZvi+d8l7tcRrG4+pwJjK9hD2xX4mEvjlQW60G9EEmfXVYRPv9VRQcyegIVHCql/AA==", - "dev": true, - "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.18.6", - "@babel/helper-plugin-utils": "^7.18.6" - } - }, - "@babel/preset-env": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.21.4.tgz", - "integrity": "sha512-2W57zHs2yDLm6GD5ZpvNn71lZ0B/iypSdIeq25OurDKji6AdzV07qp4s3n1/x5BqtiGaTrPN3nerlSCaC5qNTw==", - "dev": true, - "requires": { - "@babel/compat-data": "^7.21.4", - "@babel/helper-compilation-targets": "^7.21.4", - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-option": "^7.21.0", - "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.18.6", - "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.20.7", - "@babel/plugin-proposal-async-generator-functions": "^7.20.7", - "@babel/plugin-proposal-class-properties": "^7.18.6", - "@babel/plugin-proposal-class-static-block": "^7.21.0", - "@babel/plugin-proposal-dynamic-import": "^7.18.6", - "@babel/plugin-proposal-export-namespace-from": "^7.18.9", - "@babel/plugin-proposal-json-strings": "^7.18.6", - "@babel/plugin-proposal-logical-assignment-operators": "^7.20.7", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.18.6", - "@babel/plugin-proposal-numeric-separator": "^7.18.6", - "@babel/plugin-proposal-object-rest-spread": "^7.20.7", - "@babel/plugin-proposal-optional-catch-binding": "^7.18.6", - "@babel/plugin-proposal-optional-chaining": "^7.21.0", - "@babel/plugin-proposal-private-methods": "^7.18.6", - "@babel/plugin-proposal-private-property-in-object": "^7.21.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.18.6", - "@babel/plugin-syntax-async-generators": "^7.8.4", - "@babel/plugin-syntax-class-properties": "^7.12.13", - "@babel/plugin-syntax-class-static-block": "^7.14.5", - "@babel/plugin-syntax-dynamic-import": "^7.8.3", - "@babel/plugin-syntax-export-namespace-from": "^7.8.3", - "@babel/plugin-syntax-import-assertions": "^7.20.0", - "@babel/plugin-syntax-json-strings": "^7.8.3", - "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4", - "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3", - "@babel/plugin-syntax-numeric-separator": "^7.10.4", - "@babel/plugin-syntax-object-rest-spread": "^7.8.3", - "@babel/plugin-syntax-optional-catch-binding": "^7.8.3", - "@babel/plugin-syntax-optional-chaining": "^7.8.3", - "@babel/plugin-syntax-private-property-in-object": "^7.14.5", - "@babel/plugin-syntax-top-level-await": "^7.14.5", - "@babel/plugin-transform-arrow-functions": "^7.20.7", - "@babel/plugin-transform-async-to-generator": "^7.20.7", - "@babel/plugin-transform-block-scoped-functions": "^7.18.6", - "@babel/plugin-transform-block-scoping": "^7.21.0", - "@babel/plugin-transform-classes": "^7.21.0", - "@babel/plugin-transform-computed-properties": "^7.20.7", - "@babel/plugin-transform-destructuring": "^7.21.3", - "@babel/plugin-transform-dotall-regex": "^7.18.6", - "@babel/plugin-transform-duplicate-keys": "^7.18.9", - "@babel/plugin-transform-exponentiation-operator": "^7.18.6", - "@babel/plugin-transform-for-of": "^7.21.0", - "@babel/plugin-transform-function-name": "^7.18.9", - "@babel/plugin-transform-literals": "^7.18.9", - "@babel/plugin-transform-member-expression-literals": "^7.18.6", - "@babel/plugin-transform-modules-amd": "^7.20.11", - "@babel/plugin-transform-modules-commonjs": "^7.21.2", - "@babel/plugin-transform-modules-systemjs": "^7.20.11", - "@babel/plugin-transform-modules-umd": "^7.18.6", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.20.5", - "@babel/plugin-transform-new-target": "^7.18.6", - "@babel/plugin-transform-object-super": "^7.18.6", - "@babel/plugin-transform-parameters": "^7.21.3", - "@babel/plugin-transform-property-literals": "^7.18.6", - "@babel/plugin-transform-regenerator": "^7.20.5", - "@babel/plugin-transform-reserved-words": "^7.18.6", - "@babel/plugin-transform-shorthand-properties": "^7.18.6", - "@babel/plugin-transform-spread": "^7.20.7", - "@babel/plugin-transform-sticky-regex": "^7.18.6", - "@babel/plugin-transform-template-literals": "^7.18.9", - "@babel/plugin-transform-typeof-symbol": "^7.18.9", - "@babel/plugin-transform-unicode-escapes": "^7.18.10", - "@babel/plugin-transform-unicode-regex": "^7.18.6", - "@babel/preset-modules": "^0.1.5", - "@babel/types": "^7.21.4", - "babel-plugin-polyfill-corejs2": "^0.3.3", - "babel-plugin-polyfill-corejs3": "^0.6.0", - "babel-plugin-polyfill-regenerator": "^0.4.1", - "core-js-compat": "^3.25.1", - "semver": "^6.3.0" - } - }, - "@babel/preset-modules": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz", - "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", - "@babel/plugin-transform-dotall-regex": "^7.4.4", - "@babel/types": "^7.4.4", - "esutils": "^2.0.2" - } - }, - "@babel/preset-react": { - "version": "7.18.6", - "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.18.6.tgz", - "integrity": "sha512-zXr6atUmyYdiWRVLOZahakYmOBHtWc2WGCkP8PYTgZi0iJXDY2CN180TdrIW4OGOAdLc7TifzDIvtx6izaRIzg==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.18.6", - "@babel/helper-validator-option": "^7.18.6", - "@babel/plugin-transform-react-display-name": "^7.18.6", - "@babel/plugin-transform-react-jsx": "^7.18.6", - "@babel/plugin-transform-react-jsx-development": "^7.18.6", - "@babel/plugin-transform-react-pure-annotations": "^7.18.6" - } - }, - "@babel/preset-typescript": { - "version": "7.21.4", - "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.21.4.tgz", - "integrity": "sha512-sMLNWY37TCdRH/bJ6ZeeOH1nPuanED7Ai9Y/vH31IPqalioJ6ZNFUWONsakhv4r4n+I6gm5lmoE0olkgib/j/A==", - "dev": true, - "requires": { - "@babel/helper-plugin-utils": "^7.20.2", - "@babel/helper-validator-option": "^7.21.0", - "@babel/plugin-syntax-jsx": "^7.21.4", - "@babel/plugin-transform-modules-commonjs": "^7.21.2", - "@babel/plugin-transform-typescript": "^7.21.3" - } - }, - "@babel/regjsgen": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz", - "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==", - "dev": true - }, - "@babel/runtime": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.21.0.tgz", - "integrity": "sha512-xwII0//EObnq89Ji5AKYQaRYiW/nZ3llSv29d49IuxPhKbtJoLP+9QUUZ4nVragQVtaVGeZrpB+ZtG/Pdy/POw==", - "requires": { - "regenerator-runtime": "^0.13.11" - } - }, - "@babel/runtime-corejs3": { - "version": "7.21.0", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.21.0.tgz", - "integrity": "sha512-TDD4UJzos3JJtM+tHX+w2Uc+KWj7GV+VKKFdMVd2Rx8sdA19hcc3P3AHFYd5LVOw+pYuSd5lICC3gm52B6Rwxw==", - "dev": true, - "requires": { - "core-js-pure": "^3.25.1", - "regenerator-runtime": "^0.13.11" - } - }, - "@babel/template": { - "version": "7.22.15", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.22.15.tgz", - "integrity": "sha512-QPErUVm4uyJa60rkI73qneDacvdvzxshT3kksGqlGWYdOTIUOwJ7RDUL8sGqslY1uXWSL6xMFKEXDS3ox2uF0w==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.22.13", - "@babel/parser": "^7.22.15", - "@babel/types": "^7.22.15" - } - }, - "@babel/traverse": { - "version": "7.23.2", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.23.2.tgz", - "integrity": "sha512-azpe59SQ48qG6nu2CzcMLbxUudtN+dOM9kDbUqGq3HXUJRlo7i8fvPoxQUzYgLZ4cMVmuZgm8vvBpNeRhd6XSw==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.22.13", - "@babel/generator": "^7.23.0", - "@babel/helper-environment-visitor": "^7.22.20", - "@babel/helper-function-name": "^7.23.0", - "@babel/helper-hoist-variables": "^7.22.5", - "@babel/helper-split-export-declaration": "^7.22.6", - "@babel/parser": "^7.23.0", - "@babel/types": "^7.23.0", - "debug": "^4.1.0", - "globals": "^11.1.0" - } - }, - "@babel/types": { - "version": "7.23.0", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.23.0.tgz", - "integrity": "sha512-0oIyUfKoI3mSqMvsxBdclDwxXKXAUA8v/apZbc+iSyARYou1o8ZGDxbUYyLFoW2arqS2jDGqJuZvv1d/io1axg==", - "dev": true, - "requires": { - "@babel/helper-string-parser": "^7.22.5", - "@babel/helper-validator-identifier": "^7.22.20", - "to-fast-properties": "^2.0.0" - } - }, - "@bugsnag/browser": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@bugsnag/browser/-/browser-7.20.0.tgz", - "integrity": "sha512-LzZWI6q5cWYQSXvfJDcSl287d2xXESVn0L20lK+K5nwo/jXcK9IVZr9L+CYZ40HVXaC9jOmQbqZ18hsbO2QNIw==", - "requires": { - "@bugsnag/core": "^7.19.0" - } - }, - "@bugsnag/core": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@bugsnag/core/-/core-7.19.0.tgz", - "integrity": "sha512-2KGwdaLD9PhR7Wk7xPi3jGuGsKTatc/28U4TOZIDU3CgC2QhGjubwiXSECel5gwxhZ3jACKcMKSV2ovHhv1NrA==", - "requires": { - "@bugsnag/cuid": "^3.0.0", - "@bugsnag/safe-json-stringify": "^6.0.0", - "error-stack-parser": "^2.0.3", - "iserror": "0.0.2", - "stack-generator": "^2.0.3" - } - }, - "@bugsnag/cuid": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@bugsnag/cuid/-/cuid-3.0.2.tgz", - "integrity": "sha512-cIwzC93r3PQ/INeuwtZwkZIG2K8WWN0rRLZQhu+mr48Ay+i6sEki4GYfTsflse7hZ1BeDWrNb/Q9vgY3B31xHQ==" - }, - "@bugsnag/js": { - "version": "7.20.0", - "resolved": "https://registry.npmjs.org/@bugsnag/js/-/js-7.20.0.tgz", - "integrity": "sha512-lhUUSOveE8fP10RagAINqBmuH+eoOpyUOiTN1WRkjHUevWG0LZjRRUWEGN3AA+ZyTphmC6ljd2qE3/64qfOSGQ==", - "requires": { - "@bugsnag/browser": "^7.20.0", - "@bugsnag/node": "^7.19.0" - } - }, - "@bugsnag/node": { - "version": "7.19.0", - "resolved": "https://registry.npmjs.org/@bugsnag/node/-/node-7.19.0.tgz", - "integrity": "sha512-c4snyxx5d/fsMogmgehFBGc//daH6+4XCplia4zrEQYltjaQ+l8ud0dPx623DgJl/2j1+2zlRc7y7IHSd7Gm5w==", - "requires": { - "@bugsnag/core": "^7.19.0", - "byline": "^5.0.0", - "error-stack-parser": "^2.0.2", - "iserror": "^0.0.2", - "pump": "^3.0.0", - "stack-generator": "^2.0.3" - } - }, - "@bugsnag/safe-json-stringify": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/@bugsnag/safe-json-stringify/-/safe-json-stringify-6.0.0.tgz", - "integrity": "sha512-htzFO1Zc57S8kgdRK9mLcPVTW1BY2ijfH7Dk2CeZmspTWKdKqSo1iwmqrq2WtRjFlo8aRZYgLX0wFrDXF/9DLA==" - }, - "@csstools/cascade-layer-name-parser": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/cascade-layer-name-parser/-/cascade-layer-name-parser-1.0.1.tgz", - "integrity": "sha512-SAAi5DpgJJWkfTvWSaqkgyIsTawa83hMwKrktkj6ra2h+q6ZN57vOGZ6ySHq6RSo+CbP64fA3aPChPBRDDUgtw==", - "requires": {} - }, - "@csstools/color-helpers": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-2.0.0.tgz", - "integrity": "sha512-VcPjEnp07RNgz/D+oI2uIALg+IPCSl6mj0XhA3pl3F2bM2B95vgzatExmmzSg/X0zkh+R2v+jFY/J2pV/bnwpw==" - }, - "@csstools/css-calc": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-1.0.1.tgz", - "integrity": "sha512-VBI8X0bmStfc85wWTa2bsbnlBQxgW4FmJ0Ts9ar9UqytE6kii3yg6GO+wpgzht2oK5Qlbpkm1Fy2kcqVmu6f3Q==", - "requires": {} - }, - "@csstools/css-color-parser": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-1.1.0.tgz", - "integrity": "sha512-jRpIhjThaH8jxuJ8Q1H+jai/dekP5952kzLHTuN+rPI48eF2esf/18TMb3N/HtEgmnybhfiwUO6Ph2OkHi3jpA==", - "requires": { - "@csstools/color-helpers": "^2.0.0", - "@csstools/css-calc": "^1.0.1" - } - }, - "@csstools/css-parser-algorithms": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-2.1.0.tgz", - "integrity": "sha512-KP8TicdXpUyeB1NMlbHud/1l39xvLGvqNFWMpG4qC6H1zs9SadGUHe5SO92n/659sDW9aGDvm9AMru0DZkN1Bw==", - "requires": {} - }, - "@csstools/css-tokenizer": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-2.1.0.tgz", - "integrity": "sha512-dtqFyoJBHUxGi9zPZdpCKP1xk8tq6KPHJ/NY4qWXiYo6IcSGwzk3L8x2XzZbbyOyBs9xQARoGveU2AsgLj6D2A==" - }, - "@csstools/media-query-list-parser": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@csstools/media-query-list-parser/-/media-query-list-parser-2.0.2.tgz", - "integrity": "sha512-8V6JD8Av1HttuClYr1ZBu0LRVe5Nnz4qrv8RppO8mobsX/USBHZy5JQOXYIlpOVhl46nzkx3X5cfH6CqUghjrQ==", - "requires": {} - }, - "@csstools/postcss-cascade-layers": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-cascade-layers/-/postcss-cascade-layers-3.0.1.tgz", - "integrity": "sha512-dD8W98dOYNOH/yX4V4HXOhfCOnvVAg8TtsL+qCGNoKXuq5z2C/d026wGWgySgC8cajXXo/wNezS31Glj5GcqrA==", - "requires": { - "@csstools/selector-specificity": "^2.0.2", - "postcss-selector-parser": "^6.0.10" - } - }, - "@csstools/postcss-color-function": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-color-function/-/postcss-color-function-2.2.0.tgz", - "integrity": "sha512-4z3k3p35Gmv4ZDX79OytvhwYx6Hz+y3hitikw2F+XG1yhSjalXoMCV04atgLjc/ThLg+Hwnp1pxhQ2G07UHknQ==", - "requires": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" - } - }, - "@csstools/postcss-color-mix-function": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-color-mix-function/-/postcss-color-mix-function-1.0.0.tgz", - "integrity": "sha512-JuI8SKpE/XIpfmvALcxvk6flaq36KCJwqQgZ958Jz189r1diQZADq+7xFmjcv+B0vHQ4nSa92gGExtzOZ1iiUg==", - "requires": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" - } - }, - "@csstools/postcss-font-format-keywords": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-font-format-keywords/-/postcss-font-format-keywords-2.0.2.tgz", - "integrity": "sha512-iKYZlIs6JsNT7NKyRjyIyezTCHLh4L4BBB3F5Nx7Dc4Z/QmBgX+YJFuUSar8IM6KclGiAUFGomXFdYxAwJydlA==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "@csstools/postcss-gradients-interpolation-method": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-gradients-interpolation-method/-/postcss-gradients-interpolation-method-3.0.1.tgz", - "integrity": "sha512-sCfFSzL5HRb/GhrGuTEi8IRrxp2bUeKakyXvuXzuBBxL0L2X8kZAljQwkuRkd0W/wIWTsQG/E72REb5XMmRfrA==", - "requires": { - "@csstools/css-color-parser": "^1.1.0", - "@csstools/css-parser-algorithms": "^2.1.0", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" - } - }, - "@csstools/postcss-hwb-function": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-hwb-function/-/postcss-hwb-function-2.2.0.tgz", - "integrity": "sha512-7gDPKacr3KhonzEyj4dzAEcetFJbN+JVPZXtANpf9SAVUHDUK+cCw7367uRlXnCeAoTdmRAyBk3agg2+snFxAw==", - "requires": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0" - } - }, - "@csstools/postcss-ic-unit": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-ic-unit/-/postcss-ic-unit-2.0.2.tgz", - "integrity": "sha512-N84qGTJkfLTPj2qOG5P4CIqGjpZBbjOEMKMn+UjO5wlb9lcBTfBsxCF0lQsFdWJUzBHYFOz19dL66v71WF3Pig==", - "requires": { - "@csstools/postcss-progressive-custom-properties": "^2.0.0", - "postcss-value-parser": "^4.2.0" - } - }, - "@csstools/postcss-is-pseudo-class": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-is-pseudo-class/-/postcss-is-pseudo-class-3.1.1.tgz", - "integrity": "sha512-hhiacuby4YdUnnxfCYCRMBIobyJImozf0u+gHSbQ/tNOdwvmrZtVROvgW7zmfYuRkHVDNZJWZslq2v5jOU+j/A==", - "requires": { - "@csstools/selector-specificity": "^2.0.0", - "postcss-selector-parser": "^6.0.10" - } - }, - "@csstools/postcss-logical-float-and-clear": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-float-and-clear/-/postcss-logical-float-and-clear-1.0.1.tgz", - "integrity": "sha512-eO9z2sMLddvlfFEW5Fxbjyd03zaO7cJafDurK4rCqyRt9P7aaWwha0LcSzoROlcZrw1NBV2JAp2vMKfPMQO1xw==", - "requires": {} - }, - "@csstools/postcss-logical-resize": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-resize/-/postcss-logical-resize-1.0.1.tgz", - "integrity": "sha512-x1ge74eCSvpBkDDWppl+7FuD2dL68WP+wwP2qvdUcKY17vJksz+XoE1ZRV38uJgS6FNUwC0AxrPW5gy3MxsDHQ==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "@csstools/postcss-logical-viewport-units": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-logical-viewport-units/-/postcss-logical-viewport-units-1.0.2.tgz", - "integrity": "sha512-nnKFywBqRMYjv5jyjSplD/nbAnboUEGFfdxKw1o34Y1nvycgqjQavhKkmxbORxroBBIDwC5y6SfgENcPPUcOxQ==", - "requires": { - "@csstools/css-tokenizer": "^2.0.0" - } - }, - "@csstools/postcss-media-minmax": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-media-minmax/-/postcss-media-minmax-1.0.0.tgz", - "integrity": "sha512-qXHZ0QVDszKf4SsLazOEzFl+m+IkhHOigqMy/gHNIzAtqB3XeBQUa+dTi1ROmQBDH1HXktGwy+tafFBg9UoaxA==", - "requires": { - "@csstools/css-calc": "^1.0.1", - "@csstools/css-parser-algorithms": "^2.1.0", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/media-query-list-parser": "^2.0.2" - } - }, - "@csstools/postcss-media-queries-aspect-ratio-number-values": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-media-queries-aspect-ratio-number-values/-/postcss-media-queries-aspect-ratio-number-values-1.0.1.tgz", - "integrity": "sha512-V9yQqXdje6OfqDf6EL5iGOpi6N0OEczwYK83rql9UapQwFEryXlAehR5AqH8QqLYb6+y31wUXK6vMxCp0920Zg==", - "requires": { - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0", - "@csstools/media-query-list-parser": "^2.0.0" - } - }, - "@csstools/postcss-nested-calc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-nested-calc/-/postcss-nested-calc-2.0.2.tgz", - "integrity": "sha512-jbwrP8rN4e7LNaRcpx3xpMUjhtt34I9OV+zgbcsYAAk6k1+3kODXJBf95/JMYWhu9g1oif7r06QVUgfWsKxCFw==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "@csstools/postcss-normalize-display-values": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-normalize-display-values/-/postcss-normalize-display-values-2.0.1.tgz", - "integrity": "sha512-TQT5g3JQ5gPXC239YuRK8jFceXF9d25ZvBkyjzBGGoW5st5sPXFVQS8OjYb9IJ/K3CdfK4528y483cgS2DJR/w==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "@csstools/postcss-oklab-function": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-oklab-function/-/postcss-oklab-function-2.2.0.tgz", - "integrity": "sha512-5QMtgn9IWpeTbbt8DwLvr41CQRJef2fKhznTFQI1Og/v3zr/uKYu+aSKZEEaoZnO9OophM4YJnkVJne3CqvJDQ==", - "requires": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" - } - }, - "@csstools/postcss-progressive-custom-properties": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-progressive-custom-properties/-/postcss-progressive-custom-properties-2.1.1.tgz", - "integrity": "sha512-6p8eO5+j+9hn4h2Klr9dbmya0GIb9SRrnPaCxqR1muVlV1waAZq6YkmlApEwXrox9qxggSwGZD5TnLRIY9f7WA==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "@csstools/postcss-scope-pseudo-class": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-scope-pseudo-class/-/postcss-scope-pseudo-class-2.0.2.tgz", - "integrity": "sha512-6Pvo4uexUCXt+Hz5iUtemQAcIuCYnL+ePs1khFR6/xPgC92aQLJ0zGHonWoewiBE+I++4gXK3pr+R1rlOFHe5w==", - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "@csstools/postcss-stepped-value-functions": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-stepped-value-functions/-/postcss-stepped-value-functions-2.1.0.tgz", - "integrity": "sha512-CkEo9BF8fQeMoXW3biXjlgTLY7PA4UFihn6leq7hPoRzIguLUI0WZIVgsITGXfX8LXmkhCSTjXO2DLYu/LUixQ==", - "requires": { - "@csstools/css-calc": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.0.1" - } - }, - "@csstools/postcss-text-decoration-shorthand": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/@csstools/postcss-text-decoration-shorthand/-/postcss-text-decoration-shorthand-2.2.2.tgz", - "integrity": "sha512-aR9l/V7p0SkdrIyBysqlQWIbGXeGC7U4ccBAIlWMpVpG/MsGhxs1JvdBpjim4UDF3U+1VmF+MbvZFb7dL+d7XA==", - "requires": { - "@csstools/color-helpers": "^1.0.0", - "postcss-value-parser": "^4.2.0" - }, - "dependencies": { - "@csstools/color-helpers": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-1.0.0.tgz", - "integrity": "sha512-tgqtiV8sU/VaWYjOB3O7PWs7HR/MmOLl2kTYRW2qSsTSEniJq7xmyAYFB1LPpXvvQcE5u2ih2dK9fyc8BnrAGQ==" - } - } - }, - "@csstools/postcss-trigonometric-functions": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/@csstools/postcss-trigonometric-functions/-/postcss-trigonometric-functions-2.1.0.tgz", - "integrity": "sha512-Ly7YczO+QdnByYeGqlppJoA2Tb2vsFfj5gSrszPTXJ+/4g3nnEZnG0VSeTK/WA8y7fzyL/qVNkkdEeOnruNWFQ==", - "requires": { - "@csstools/css-calc": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.0.1" - } - }, - "@csstools/postcss-unset-value": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@csstools/postcss-unset-value/-/postcss-unset-value-2.0.1.tgz", - "integrity": "sha512-oJ9Xl29/yU8U7/pnMJRqAZd4YXNCfGEdcP4ywREuqm/xMqcgDNDppYRoCGDt40aaZQIEKBS79LytUDN/DHf0Ew==", - "requires": {} - }, - "@csstools/selector-specificity": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/@csstools/selector-specificity/-/selector-specificity-2.2.0.tgz", - "integrity": "sha512-+OJ9konv95ClSTOJCmMZqpd5+YGsB2S+x6w3E1oaM8UuR5j8nTNHYSz8c9BEPGDOCMQYIEEGlVPj/VY64iTbGw==", - "requires": {} - }, - "@esbuild/android-arm": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.17.15.tgz", - "integrity": "sha512-sRSOVlLawAktpMvDyJIkdLI/c/kdRTOqo8t6ImVxg8yT7LQDUYV5Rp2FKeEosLr6ZCja9UjYAzyRSxGteSJPYg==", - "dev": true, - "optional": true - }, - "@esbuild/android-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.17.15.tgz", - "integrity": "sha512-0kOB6Y7Br3KDVgHeg8PRcvfLkq+AccreK///B4Z6fNZGr/tNHX0z2VywCc7PTeWp+bPvjA5WMvNXltHw5QjAIA==", - "dev": true, - "optional": true - }, - "@esbuild/android-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.17.15.tgz", - "integrity": "sha512-MzDqnNajQZ63YkaUWVl9uuhcWyEyh69HGpMIrf+acR4otMkfLJ4sUCxqwbCyPGicE9dVlrysI3lMcDBjGiBBcQ==", - "dev": true, - "optional": true - }, - "@esbuild/darwin-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.17.15.tgz", - "integrity": "sha512-7siLjBc88Z4+6qkMDxPT2juf2e8SJxmsbNVKFY2ifWCDT72v5YJz9arlvBw5oB4W/e61H1+HDB/jnu8nNg0rLA==", - "dev": true, - "optional": true - }, - "@esbuild/darwin-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.17.15.tgz", - "integrity": "sha512-NbImBas2rXwYI52BOKTW342Tm3LTeVlaOQ4QPZ7XuWNKiO226DisFk/RyPk3T0CKZkKMuU69yOvlapJEmax7cg==", - "dev": true, - "optional": true - }, - "@esbuild/freebsd-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.17.15.tgz", - "integrity": "sha512-Xk9xMDjBVG6CfgoqlVczHAdJnCs0/oeFOspFap5NkYAmRCT2qTn1vJWA2f419iMtsHSLm+O8B6SLV/HlY5cYKg==", - "dev": true, - "optional": true - }, - "@esbuild/freebsd-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.17.15.tgz", - "integrity": "sha512-3TWAnnEOdclvb2pnfsTWtdwthPfOz7qAfcwDLcfZyGJwm1SRZIMOeB5FODVhnM93mFSPsHB9b/PmxNNbSnd0RQ==", - "dev": true, - "optional": true - }, - "@esbuild/linux-arm": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.17.15.tgz", - "integrity": "sha512-MLTgiXWEMAMr8nmS9Gigx43zPRmEfeBfGCwxFQEMgJ5MC53QKajaclW6XDPjwJvhbebv+RzK05TQjvH3/aM4Xw==", - "dev": true, - "optional": true - }, - "@esbuild/linux-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.17.15.tgz", - "integrity": "sha512-T0MVnYw9KT6b83/SqyznTs/3Jg2ODWrZfNccg11XjDehIved2oQfrX/wVuev9N936BpMRaTR9I1J0tdGgUgpJA==", - "dev": true, - "optional": true - }, - "@esbuild/linux-ia32": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.17.15.tgz", - "integrity": "sha512-wp02sHs015T23zsQtU4Cj57WiteiuASHlD7rXjKUyAGYzlOKDAjqK6bk5dMi2QEl/KVOcsjwL36kD+WW7vJt8Q==", - "dev": true, - "optional": true - }, - "@esbuild/linux-loong64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.17.15.tgz", - "integrity": "sha512-k7FsUJjGGSxwnBmMh8d7IbObWu+sF/qbwc+xKZkBe/lTAF16RqxRCnNHA7QTd3oS2AfGBAnHlXL67shV5bBThQ==", - "dev": true, - "optional": true - }, - "@esbuild/linux-mips64el": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.17.15.tgz", - "integrity": "sha512-ZLWk6czDdog+Q9kE/Jfbilu24vEe/iW/Sj2d8EVsmiixQ1rM2RKH2n36qfxK4e8tVcaXkvuV3mU5zTZviE+NVQ==", - "dev": true, - "optional": true - }, - "@esbuild/linux-ppc64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.17.15.tgz", - "integrity": "sha512-mY6dPkIRAiFHRsGfOYZC8Q9rmr8vOBZBme0/j15zFUKM99d4ILY4WpOC7i/LqoY+RE7KaMaSfvY8CqjJtuO4xg==", - "dev": true, - "optional": true - }, - "@esbuild/linux-riscv64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.17.15.tgz", - "integrity": "sha512-EcyUtxffdDtWjjwIH8sKzpDRLcVtqANooMNASO59y+xmqqRYBBM7xVLQhqF7nksIbm2yHABptoioS9RAbVMWVA==", - "dev": true, - "optional": true - }, - "@esbuild/linux-s390x": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.17.15.tgz", - "integrity": "sha512-BuS6Jx/ezxFuHxgsfvz7T4g4YlVrmCmg7UAwboeyNNg0OzNzKsIZXpr3Sb/ZREDXWgt48RO4UQRDBxJN3B9Rbg==", - "dev": true, - "optional": true - }, - "@esbuild/linux-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.17.15.tgz", - "integrity": "sha512-JsdS0EgEViwuKsw5tiJQo9UdQdUJYuB+Mf6HxtJSPN35vez1hlrNb1KajvKWF5Sa35j17+rW1ECEO9iNrIXbNg==", - "dev": true, - "optional": true - }, - "@esbuild/netbsd-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.17.15.tgz", - "integrity": "sha512-R6fKjtUysYGym6uXf6qyNephVUQAGtf3n2RCsOST/neIwPqRWcnc3ogcielOd6pT+J0RDR1RGcy0ZY7d3uHVLA==", - "dev": true, - "optional": true - }, - "@esbuild/openbsd-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.17.15.tgz", - "integrity": "sha512-mVD4PGc26b8PI60QaPUltYKeSX0wxuy0AltC+WCTFwvKCq2+OgLP4+fFd+hZXzO2xW1HPKcytZBdjqL6FQFa7w==", - "dev": true, - "optional": true - }, - "@esbuild/sunos-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.17.15.tgz", - "integrity": "sha512-U6tYPovOkw3459t2CBwGcFYfFRjivcJJc1WC8Q3funIwX8x4fP+R6xL/QuTPNGOblbq/EUDxj9GU+dWKX0oWlQ==", - "dev": true, - "optional": true - }, - "@esbuild/win32-arm64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.17.15.tgz", - "integrity": "sha512-W+Z5F++wgKAleDABemiyXVnzXgvRFs+GVKThSI+mGgleLWluv0D7Diz4oQpgdpNzh4i2nNDzQtWbjJiqutRp6Q==", - "dev": true, - "optional": true - }, - "@esbuild/win32-ia32": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.17.15.tgz", - "integrity": "sha512-Muz/+uGgheShKGqSVS1KsHtCyEzcdOn/W/Xbh6H91Etm+wiIfwZaBn1W58MeGtfI8WA961YMHFYTthBdQs4t+w==", - "dev": true, - "optional": true - }, - "@esbuild/win32-x64": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.17.15.tgz", - "integrity": "sha512-DjDa9ywLUUmjhV2Y9wUTIF+1XsmuFGvZoCmOWkli1XcNAh5t25cc7fgsCx4Zi/Uurep3TTLyDiKATgGEg61pkA==", - "dev": true, - "optional": true - }, - "@eslint-community/eslint-utils": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", - "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", - "dev": true, - "requires": { - "eslint-visitor-keys": "^3.3.0" - } - }, - "@eslint-community/regexpp": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.5.0.tgz", - "integrity": "sha512-vITaYzIcNmjn5tF5uxcZ/ft7/RXGrMUIS9HalWckEOF6ESiwXKoMzAQf2UW0aVd6rnOeExTJVd5hmWXucBKGXQ==", - "dev": true - }, - "@eslint/eslintrc": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.0.2.tgz", - "integrity": "sha512-3W4f5tDUra+pA+FzgugqL2pRimUTDJWKr7BINqOpkZrC0uYI0NIc0/JFgBROCU07HR6GieA5m3/rsPIhDmCXTQ==", - "dev": true, - "requires": { - "ajv": "^6.12.4", - "debug": "^4.3.2", - "espree": "^9.5.1", - "globals": "^13.19.0", - "ignore": "^5.2.0", - "import-fresh": "^3.2.1", - "js-yaml": "^4.1.0", - "minimatch": "^3.1.2", - "strip-json-comments": "^3.1.1" - }, - "dependencies": { - "ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", - "dev": true, - "requires": { - "type-fest": "^0.20.2" - } - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true - }, - "type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true - } - } - }, - "@eslint/js": { - "version": "8.37.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.37.0.tgz", - "integrity": "sha512-x5vzdtOOGgFVDCUs81QRB2+liax8rFg3+7hqM+QhBG0/G3F1ZsoYl97UrqgHgQ9KKT7G6c4V+aTUCgu/n22v1A==", - "dev": true - }, - "@headlessui/react": { - "version": "1.7.13", - "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.13.tgz", - "integrity": "sha512-9n+EQKRtD9266xIHXdY5MfiXPDfYwl7zBM7KOx2Ae3Gdgxy8QML1FkCMjq6AsOf0l6N9uvI4HcFtuFlenaldKg==", - "requires": { - "client-only": "^0.0.1" - } - }, - "@humanwhocodes/config-array": { - "version": "0.11.8", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.8.tgz", - "integrity": "sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==", - "dev": true, - "requires": { - "@humanwhocodes/object-schema": "^1.2.1", - "debug": "^4.1.1", - "minimatch": "^3.0.5" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - } - } - }, - "@humanwhocodes/module-importer": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", - "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", - "dev": true - }, - "@humanwhocodes/object-schema": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", - "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", - "dev": true - }, - "@jest/expect-utils": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/@jest/expect-utils/-/expect-utils-29.5.0.tgz", - "integrity": "sha512-fmKzsidoXQT2KwnrwE0SQq3uj8Z763vzR8LnLBwC2qYWEFpjX8daRsk6rHUM1QvNlEW/UJXNXm59ztmJJWs2Mg==", - "requires": { - "jest-get-type": "^29.4.3" - } - }, - "@jest/schemas": { - "version": "29.4.3", - "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.4.3.tgz", - "integrity": "sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==", - "requires": { - "@sinclair/typebox": "^0.25.16" - } - }, - "@jest/types": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.5.0.tgz", - "integrity": "sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==", - "requires": { - "@jest/schemas": "^29.4.3", - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^17.0.8", - "chalk": "^4.0.0" - } - }, - "@jridgewell/gen-mapping": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.1.1.tgz", - "integrity": "sha512-sQXCasFk+U8lWYEe66WxRDOE9PjVz4vSM51fTu3Hw+ClTpUSQb718772vH3pyS5pShp6lvQM7SxgIDXXXmOX7w==", - "dev": true, - "requires": { - "@jridgewell/set-array": "^1.0.0", - "@jridgewell/sourcemap-codec": "^1.4.10" - } - }, - "@jridgewell/resolve-uri": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz", - "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==", - "dev": true - }, - "@jridgewell/set-array": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.1.2.tgz", - "integrity": "sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==", - "dev": true - }, - "@jridgewell/sourcemap-codec": { - "version": "1.4.14", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz", - "integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==", - "dev": true - }, - "@jridgewell/trace-mapping": { - "version": "0.3.17", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.17.tgz", - "integrity": "sha512-MCNzAp77qzKca9+W/+I0+sEpaUnZoeasnghNeVc41VZCEKaCH73Vq3BZZ/SzWIgrqE4H4ceI+p+b6C0mHf9T4g==", - "dev": true, - "requires": { - "@jridgewell/resolve-uri": "3.1.0", - "@jridgewell/sourcemap-codec": "1.4.14" - } - }, - "@jsdoc/salty": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/@jsdoc/salty/-/salty-0.2.5.tgz", - "integrity": "sha512-TfRP53RqunNe2HBobVBJ0VLhK1HbfvBYeTC1ahnN64PWvyYyGebmMiPkuwvD9fpw2ZbkoPb8Q7mwy0aR8Z9rvw==", - "dev": true, - "requires": { - "lodash": "^4.17.21" - } - }, - "@mswjs/cookies": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/@mswjs/cookies/-/cookies-0.1.7.tgz", - "integrity": "sha512-bDg1ReMBx+PYDB4Pk7y1Q07Zz1iKIEUWQpkEXiA2lEWg9gvOZ8UBmGXilCEUvyYoRFlmr/9iXTRR69TrgSwX/Q==", - "dev": true, - "requires": { - "@types/set-cookie-parser": "^2.4.0", - "set-cookie-parser": "^2.4.6" - } - }, - "@mswjs/interceptors": { - "version": "0.12.7", - "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.12.7.tgz", - "integrity": "sha512-eGjZ3JRAt0Fzi5FgXiV/P3bJGj0NqsN7vBS0J0FO2AQRQ0jCKQS4lEFm4wvlSgKQNfeuc/Vz6d81VtU3Gkx/zg==", - "dev": true, - "requires": { - "@open-draft/until": "^1.0.3", - "@xmldom/xmldom": "^0.7.2", - "debug": "^4.3.2", - "headers-utils": "^3.0.2", - "outvariant": "^1.2.0", - "strict-event-emitter": "^0.2.0" - } - }, - "@nicolo-ribaudo/eslint-scope-5-internals": { - "version": "5.1.1-v1", - "resolved": "https://registry.npmjs.org/@nicolo-ribaudo/eslint-scope-5-internals/-/eslint-scope-5-internals-5.1.1-v1.tgz", - "integrity": "sha512-54/JRvkLIzzDWshCWfuhadfrfZVPiElY8Fcgmg1HroEly/EDSszzhBAsarCux+D/kOslTRquNzuyGSmUSTTHGg==", - "dev": true, - "requires": { - "eslint-scope": "5.1.1" - }, - "dependencies": { - "eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "dev": true, - "requires": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - } - }, - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true - } - } - }, - "@nodelib/fs.scandir": { - "version": "2.1.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", - "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "2.0.5", - "run-parallel": "^1.1.9" - } - }, - "@nodelib/fs.stat": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", - "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true - }, - "@nodelib/fs.walk": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", - "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, - "requires": { - "@nodelib/fs.scandir": "2.1.5", - "fastq": "^1.6.0" - } - }, - "@open-draft/until": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@open-draft/until/-/until-1.0.3.tgz", - "integrity": "sha512-Aq58f5HiWdyDlFffbbSjAlv596h/cOnt2DO1w3DOC7OJ5EHs0hd/nycJfiu9RJbT6Yk6F1knnRRXNSpxoIVZ9Q==", - "dev": true - }, - "@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", - "dev": true, - "peer": true - }, - "@protobufjs/base64": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", - "dev": true, - "peer": true - }, - "@protobufjs/codegen": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", - "dev": true, - "peer": true - }, - "@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", - "dev": true, - "peer": true - }, - "@protobufjs/fetch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", - "dev": true, - "peer": true, - "requires": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "@protobufjs/float": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", - "dev": true, - "peer": true - }, - "@protobufjs/inquire": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", - "dev": true, - "peer": true - }, - "@protobufjs/path": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", - "dev": true, - "peer": true - }, - "@protobufjs/pool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", - "dev": true, - "peer": true - }, - "@protobufjs/utf8": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", - "dev": true, - "peer": true - }, - "@rollup/plugin-commonjs": { - "version": "24.0.1", - "resolved": "https://registry.npmjs.org/@rollup/plugin-commonjs/-/plugin-commonjs-24.0.1.tgz", - "integrity": "sha512-15LsiWRZk4eOGqvrJyu3z3DaBu5BhXIMeWnijSRvd8irrrg9SHpQ1pH+BUK4H6Z9wL9yOxZJMTLU+Au86XHxow==", - "dev": true, - "requires": { - "@rollup/pluginutils": "^5.0.1", - "commondir": "^1.0.1", - "estree-walker": "^2.0.2", - "glob": "^8.0.3", - "is-reference": "1.2.1", - "magic-string": "^0.27.0" - } - }, - "@rollup/pluginutils": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.0.2.tgz", - "integrity": "sha512-pTd9rIsP92h+B6wWwFbW8RkZv4hiR/xKsqre4SIuAOaOEQRxi0lqLke9k2/7WegC85GgUs9pjmOjCUi3In4vwA==", - "dev": true, - "requires": { - "@types/estree": "^1.0.0", - "estree-walker": "^2.0.2", - "picomatch": "^2.3.1" - } - }, - "@rushstack/eslint-patch": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.2.0.tgz", - "integrity": "sha512-sXo/qW2/pAcmT43VoRKOJbDOfV3cYpq3szSVfIThQXNt+E4DfKj361vaAt3c88U5tPUxzEswam7GW48PJqtKAg==", - "dev": true - }, - "@sinclair/typebox": { - "version": "0.25.24", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz", - "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ==" - }, - "@svgr/babel-plugin-add-jsx-attribute": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz", - "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==", - "dev": true, - "requires": {} - }, - "@svgr/babel-plugin-remove-jsx-attribute": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-7.0.0.tgz", - "integrity": "sha512-iiZaIvb3H/c7d3TH2HBeK91uI2rMhZNwnsIrvd7ZwGLkFw6mmunOCoVnjdYua662MqGFxlN9xTq4fv9hgR4VXQ==", - "dev": true, - "requires": {} - }, - "@svgr/babel-plugin-remove-jsx-empty-expression": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-7.0.0.tgz", - "integrity": "sha512-sQQmyo+qegBx8DfFc04PFmIO1FP1MHI1/QEpzcIcclo5OAISsOJPW76ZIs0bDyO/DBSJEa/tDa1W26pVtt0FRw==", - "dev": true, - "requires": {} - }, - "@svgr/babel-plugin-replace-jsx-attribute-value": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz", - "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==", - "dev": true, - "requires": {} - }, - "@svgr/babel-plugin-svg-dynamic-title": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz", - "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==", - "dev": true, - "requires": {} - }, - "@svgr/babel-plugin-svg-em-dimensions": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz", - "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==", - "dev": true, - "requires": {} - }, - "@svgr/babel-plugin-transform-react-native-svg": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz", - "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==", - "dev": true, - "requires": {} - }, - "@svgr/babel-plugin-transform-svg-component": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz", - "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==", - "dev": true, - "requires": {} - }, - "@svgr/babel-preset": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz", - "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==", - "dev": true, - "requires": { - "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1", - "@svgr/babel-plugin-remove-jsx-attribute": "*", - "@svgr/babel-plugin-remove-jsx-empty-expression": "*", - "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1", - "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1", - "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1", - "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1", - "@svgr/babel-plugin-transform-svg-component": "^6.5.1" - } - }, - "@svgr/core": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz", - "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==", - "dev": true, - "requires": { - "@babel/core": "^7.19.6", - "@svgr/babel-preset": "^6.5.1", - "@svgr/plugin-jsx": "^6.5.1", - "camelcase": "^6.2.0", - "cosmiconfig": "^7.0.1" - }, - "dependencies": { - "camelcase": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz", - "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==", - "dev": true - } - } - }, - "@svgr/hast-util-to-babel-ast": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz", - "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==", - "dev": true, - "requires": { - "@babel/types": "^7.20.0", - "entities": "^4.4.0" - } - }, - "@svgr/plugin-jsx": { - "version": "6.5.1", - "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz", - "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==", - "dev": true, - "requires": { - "@babel/core": "^7.19.6", - "@svgr/babel-preset": "^6.5.1", - "@svgr/hast-util-to-babel-ast": "^6.5.1", - "svg-parser": "^2.0.4" - } - }, - "@testing-library/dom": { - "version": "9.2.0", - "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-9.2.0.tgz", - "integrity": "sha512-xTEnpUKiV/bMyEsE5bT4oYA0x0Z/colMtxzUY8bKyPXBNLn/e0V4ZjBZkEhms0xE4pv9QsPfSRu9AWS4y5wGvA==", - "dev": true, - "peer": true, - "requires": { - "@babel/code-frame": "^7.10.4", - "@babel/runtime": "^7.12.5", - "@types/aria-query": "^5.0.1", - "aria-query": "^5.0.0", - "chalk": "^4.1.0", - "dom-accessibility-api": "^0.5.9", - "lz-string": "^1.5.0", - "pretty-format": "^27.0.2" - } - }, - "@testing-library/jest-dom": { - "version": "5.16.5", - "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-5.16.5.tgz", - "integrity": "sha512-N5ixQ2qKpi5OLYfwQmUb/5mSV9LneAcaUfp32pn4yCnpb8r/Yz0pXFPck21dIicKmi+ta5WRAknkZCfA8refMA==", - "dev": true, - "requires": { - "@adobe/css-tools": "^4.0.1", - "@babel/runtime": "^7.9.2", - "@types/testing-library__jest-dom": "^5.9.1", - "aria-query": "^5.0.0", - "chalk": "^3.0.0", - "css.escape": "^1.5.1", - "dom-accessibility-api": "^0.5.6", - "lodash": "^4.17.15", - "redent": "^3.0.0" - }, - "dependencies": { - "chalk": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz", - "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - } - } - }, - "@testing-library/react": { - "version": "11.2.7", - "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-11.2.7.tgz", - "integrity": "sha512-tzRNp7pzd5QmbtXNG/mhdcl7Awfu/Iz1RaVHY75zTdOkmHCuzMhRL83gWHSgOAcjS3CCbyfwUHMZgRJb4kAfpA==", - "dev": true, - "requires": { - "@babel/runtime": "^7.12.5", - "@testing-library/dom": "^7.28.1" - }, - "dependencies": { - "@jest/types": { - "version": "26.6.2", - "resolved": "https://registry.npmjs.org/@jest/types/-/types-26.6.2.tgz", - "integrity": "sha512-fC6QCp7Sc5sX6g8Tvbmj4XUTbyrik0akgRy03yjXbQaBWWNWGE7SGtJk98m0N8nzegD/7SggrUlivxo5ax4KWQ==", - "dev": true, - "requires": { - "@types/istanbul-lib-coverage": "^2.0.0", - "@types/istanbul-reports": "^3.0.0", - "@types/node": "*", - "@types/yargs": "^15.0.0", - "chalk": "^4.0.0" - } - }, - "@testing-library/dom": { - "version": "7.31.2", - "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-7.31.2.tgz", - "integrity": "sha512-3UqjCpey6HiTZT92vODYLPxTBWlM8ZOOjr3LX5F37/VRipW2M1kX6I/Cm4VXzteZqfGfagg8yXywpcOgQBlNsQ==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.10.4", - "@babel/runtime": "^7.12.5", - "@types/aria-query": "^4.2.0", - "aria-query": "^4.2.2", - "chalk": "^4.1.0", - "dom-accessibility-api": "^0.5.6", - "lz-string": "^1.4.4", - "pretty-format": "^26.6.2" - } - }, - "@types/aria-query": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-4.2.2.tgz", - "integrity": "sha512-HnYpAE1Y6kRyKM/XkEuiRQhTHvkzMBurTHnpFLYLBGPIylZNPs9jJcuOOYWxPLJCSEtmZT0Y8rHDokKN7rRTig==", - "dev": true - }, - "@types/yargs": { - "version": "15.0.15", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-15.0.15.tgz", - "integrity": "sha512-IziEYMU9XoVj8hWg7k+UJrXALkGFjWJhn5QFEv9q4p+v40oZhSuC135M38st8XPjICL7Ey4TV64ferBGUoJhBg==", - "dev": true, - "requires": { - "@types/yargs-parser": "*" - } - }, - "aria-query": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-4.2.2.tgz", - "integrity": "sha512-o/HelwhuKpTj/frsOsbNLNgnNGVIFsVP/SW2BSF14gVl7kAfMOJ6/8wUAUvG1R1NHKrfG+2sHZTu0yauT1qBrA==", - "dev": true, - "requires": { - "@babel/runtime": "^7.10.2", - "@babel/runtime-corejs3": "^7.10.2" - } - }, - "pretty-format": { - "version": "26.6.2", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-26.6.2.tgz", - "integrity": "sha512-7AeGuCYNGmycyQbCqd/3PWH4eOoX/OiCa0uphp57NVTeAGdJGaAliecxwBDHYQCIvrW7aDBZCYeNTP/WX69mkg==", - "dev": true, - "requires": { - "@jest/types": "^26.6.2", - "ansi-regex": "^5.0.0", - "ansi-styles": "^4.0.0", - "react-is": "^17.0.1" - } - } - } - }, - "@testing-library/react-hooks": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/@testing-library/react-hooks/-/react-hooks-5.1.3.tgz", - "integrity": "sha512-UdEUtlQapQ579NEcXDAUE275u+KUsPtxW7NmFrNt0bE6lW8lqNCyxDK0RSuECmNZ/S0/fgP00W9RWRhVKO/hRg==", - "dev": true, - "requires": { - "@babel/runtime": "^7.12.5", - "@types/react": ">=16.9.0", - "@types/react-dom": ">=16.9.0", - "@types/react-test-renderer": ">=16.9.0", - "filter-console": "^0.1.1", - "react-error-boundary": "^3.1.0" - } - }, - "@testing-library/user-event": { - "version": "14.4.3", - "resolved": "https://registry.npmjs.org/@testing-library/user-event/-/user-event-14.4.3.tgz", - "integrity": "sha512-kCUc5MEwaEMakkO5x7aoD+DLi02ehmEM2QCGWvNqAS1dV/fAvORWEjnjsEIvml59M7Y5kCkWN6fCCyPOe8OL6Q==", - "dev": true, - "requires": {} - }, - "@tootallnate/once": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz", - "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", - "dev": true - }, - "@trysound/sax": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz", - "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==" - }, - "@types/aria-query": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.1.tgz", - "integrity": "sha512-XTIieEY+gvJ39ChLcB4If5zHtPxt3Syj5rgZR+e1ctpmK8NjPf0zFqsz4JpLJT0xla9GFDKjy8Cpu331nrmE1Q==", - "dev": true, - "peer": true - }, - "@types/chai": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.4.tgz", - "integrity": "sha512-KnRanxnpfpjUTqTCXslZSEdLfXExwgNxYPdiO2WGUj8+HDjFi8R3k5RVKPeSCzLjCcshCAtVO2QBbVuAV4kTnw==", - "dev": true - }, - "@types/chai-subset": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/@types/chai-subset/-/chai-subset-1.3.3.tgz", - "integrity": "sha512-frBecisrNGz+F4T6bcc+NLeolfiojh5FxW2klu669+8BARtyQv2C/GkNW6FUodVe4BroGMP/wER/YDGc7rEllw==", - "dev": true, - "requires": { - "@types/chai": "*" - } - }, - "@types/cookie": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.4.1.tgz", - "integrity": "sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q==", - "dev": true - }, - "@types/d3": { - "version": "7.4.0", - "resolved": "https://registry.npmjs.org/@types/d3/-/d3-7.4.0.tgz", - "integrity": "sha512-jIfNVK0ZlxcuRDKtRS/SypEyOQ6UHaFQBKv032X45VvxSJ6Yi5G9behy9h6tNTHTDGh5Vq+KbmBjUWLgY4meCA==", - "requires": { - "@types/d3-array": "*", - "@types/d3-axis": "*", - "@types/d3-brush": "*", - "@types/d3-chord": "*", - "@types/d3-color": "*", - "@types/d3-contour": "*", - "@types/d3-delaunay": "*", - "@types/d3-dispatch": "*", - "@types/d3-drag": "*", - "@types/d3-dsv": "*", - "@types/d3-ease": "*", - "@types/d3-fetch": "*", - "@types/d3-force": "*", - "@types/d3-format": "*", - "@types/d3-geo": "*", - "@types/d3-hierarchy": "*", - "@types/d3-interpolate": "*", - "@types/d3-path": "*", - "@types/d3-polygon": "*", - "@types/d3-quadtree": "*", - "@types/d3-random": "*", - "@types/d3-scale": "*", - "@types/d3-scale-chromatic": "*", - "@types/d3-selection": "*", - "@types/d3-shape": "*", - "@types/d3-time": "*", - "@types/d3-time-format": "*", - "@types/d3-timer": "*", - "@types/d3-transition": "*", - "@types/d3-zoom": "*" - } - }, - "@types/d3-array": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-3.0.4.tgz", - "integrity": "sha512-nwvEkG9vYOc0Ic7G7kwgviY4AQlTfYGIZ0fqB7CQHXGyYM6nO7kJh5EguSNA3jfh4rq7Sb7eMVq8isuvg2/miQ==" - }, - "@types/d3-axis": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-3.0.2.tgz", - "integrity": "sha512-uGC7DBh0TZrU/LY43Fd8Qr+2ja1FKmH07q2FoZFHo1eYl8aj87GhfVoY1saJVJiq24rp1+wpI6BvQJMKgQm8oA==", - "requires": { - "@types/d3-selection": "*" - } - }, - "@types/d3-brush": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-3.0.2.tgz", - "integrity": "sha512-2TEm8KzUG3N7z0TrSKPmbxByBx54M+S9lHoP2J55QuLU0VSQ9mE96EJSAOVNEqd1bbynMjeTS9VHmz8/bSw8rA==", - "requires": { - "@types/d3-selection": "*" - } - }, - "@types/d3-chord": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-3.0.2.tgz", - "integrity": "sha512-abT/iLHD3sGZwqMTX1TYCMEulr+wBd0SzyOQnjYNLp7sngdOHYtNkMRI5v3w5thoN+BWtlHVDx2Osvq6fxhZWw==" - }, - "@types/d3-color": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.0.tgz", - "integrity": "sha512-HKuicPHJuvPgCD+np6Se9MQvS6OCbJmOjGvylzMJRlDwUXjKTTXs6Pwgk79O09Vj/ho3u1ofXnhFOaEWWPrlwA==" - }, - "@types/d3-contour": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-3.0.2.tgz", - "integrity": "sha512-k6/bGDoAGJZnZWaKzeB+9glgXCYGvh6YlluxzBREiVo8f/X2vpTEdgPy9DN7Z2i42PZOZ4JDhVdlTSTSkLDPlQ==", - "requires": { - "@types/d3-array": "*", - "@types/geojson": "*" - } - }, - "@types/d3-delaunay": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-delaunay/-/d3-delaunay-6.0.1.tgz", - "integrity": "sha512-tLxQ2sfT0p6sxdG75c6f/ekqxjyYR0+LwPrsO1mbC9YDBzPJhs2HbJJRrn8Ez1DBoHRo2yx7YEATI+8V1nGMnQ==" - }, - "@types/d3-dispatch": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-3.0.2.tgz", - "integrity": "sha512-rxN6sHUXEZYCKV05MEh4z4WpPSqIw+aP7n9ZN6WYAAvZoEAghEK1WeVZMZcHRBwyaKflU43PCUAJNjFxCzPDjg==" - }, - "@types/d3-drag": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.2.tgz", - "integrity": "sha512-qmODKEDvyKWVHcWWCOVcuVcOwikLVsyc4q4EBJMREsoQnR2Qoc2cZQUyFUPgO9q4S3qdSqJKBsuefv+h0Qy+tw==", - "requires": { - "@types/d3-selection": "*" - } - }, - "@types/d3-dsv": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-3.0.1.tgz", - "integrity": "sha512-76pBHCMTvPLt44wFOieouXcGXWOF0AJCceUvaFkxSZEu4VDUdv93JfpMa6VGNFs01FHfuP4a5Ou68eRG1KBfTw==" - }, - "@types/d3-ease": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-3.0.0.tgz", - "integrity": "sha512-aMo4eaAOijJjA6uU+GIeW018dvy9+oH5Y2VPPzjjfxevvGQ/oRDs+tfYC9b50Q4BygRR8yE2QCLsrT0WtAVseA==" - }, - "@types/d3-fetch": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-3.0.2.tgz", - "integrity": "sha512-gllwYWozWfbep16N9fByNBDTkJW/SyhH6SGRlXloR7WdtAaBui4plTP+gbUgiEot7vGw/ZZop1yDZlgXXSuzjA==", - "requires": { - "@types/d3-dsv": "*" - } - }, - "@types/d3-force": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-3.0.4.tgz", - "integrity": "sha512-q7xbVLrWcXvSBBEoadowIUJ7sRpS1yvgMWnzHJggFy5cUZBq2HZL5k/pBSm0GdYWS1vs5/EDwMjSKF55PDY4Aw==" - }, - "@types/d3-format": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-3.0.1.tgz", - "integrity": "sha512-5KY70ifCCzorkLuIkDe0Z9YTf9RR2CjBX1iaJG+rgM/cPP+sO+q9YdQ9WdhQcgPj1EQiJ2/0+yUkkziTG6Lubg==" - }, - "@types/d3-geo": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-3.0.3.tgz", - "integrity": "sha512-bK9uZJS3vuDCNeeXQ4z3u0E7OeJZXjUgzFdSOtNtMCJCLvDtWDwfpRVWlyt3y8EvRzI0ccOu9xlMVirawolSCw==", - "requires": { - "@types/geojson": "*" - } - }, - "@types/d3-hierarchy": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", - "integrity": "sha512-9hjRTVoZjRFR6xo8igAJyNXQyPX6Aq++Nhb5ebrUF414dv4jr2MitM2fWiOY475wa3Za7TOS2Gh9fmqEhLTt0A==" - }, - "@types/d3-interpolate": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.1.tgz", - "integrity": "sha512-jx5leotSeac3jr0RePOH1KdR9rISG91QIE4Q2PYTu4OymLTZfA3SrnURSLzKH48HmXVUru50b8nje4E79oQSQw==", - "requires": { - "@types/d3-color": "*" - } - }, - "@types/d3-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-3.0.0.tgz", - "integrity": "sha512-0g/A+mZXgFkQxN3HniRDbXMN79K3CdTpLsevj+PXiTcb2hVyvkZUBg37StmgCQkaD84cUJ4uaDAWq7UJOQy2Tg==" - }, - "@types/d3-polygon": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-3.0.0.tgz", - "integrity": "sha512-D49z4DyzTKXM0sGKVqiTDTYr+DHg/uxsiWDAkNrwXYuiZVd9o9wXZIo+YsHkifOiyBkmSWlEngHCQme54/hnHw==" - }, - "@types/d3-quadtree": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-3.0.2.tgz", - "integrity": "sha512-QNcK8Jguvc8lU+4OfeNx+qnVy7c0VrDJ+CCVFS9srBo2GL9Y18CnIxBdTF3v38flrGy5s1YggcoAiu6s4fLQIw==" - }, - "@types/d3-random": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-3.0.1.tgz", - "integrity": "sha512-IIE6YTekGczpLYo/HehAy3JGF1ty7+usI97LqraNa8IiDur+L44d0VOjAvFQWJVdZOJHukUJw+ZdZBlgeUsHOQ==" - }, - "@types/d3-scale": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.3.tgz", - "integrity": "sha512-PATBiMCpvHJSMtZAMEhc2WyL+hnzarKzI6wAHYjhsonjWJYGq5BXTzQjv4l8m2jO183/4wZ90rKvSeT7o72xNQ==", - "requires": { - "@types/d3-time": "*" - } - }, - "@types/d3-scale-chromatic": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz", - "integrity": "sha512-dsoJGEIShosKVRBZB0Vo3C8nqSDqVGujJU6tPznsBJxNJNwMF8utmS83nvCBKQYPpjCzaaHcrf66iTRpZosLPw==" - }, - "@types/d3-selection": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.5.tgz", - "integrity": "sha512-xCB0z3Hi8eFIqyja3vW8iV01+OHGYR2di/+e+AiOcXIOrY82lcvWW8Ke1DYE/EUVMsBl4Db9RppSBS3X1U6J0w==" - }, - "@types/d3-shape": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-3.1.1.tgz", - "integrity": "sha512-6Uh86YFF7LGg4PQkuO2oG6EMBRLuW9cbavUW46zkIO5kuS2PfTqo2o9SkgtQzguBHbLgNnU90UNsITpsX1My+A==", - "requires": { - "@types/d3-path": "*" - } - }, - "@types/d3-time": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.0.tgz", - "integrity": "sha512-sZLCdHvBUcNby1cB6Fd3ZBrABbjz3v1Vm90nysCQ6Vt7vd6e/h9Lt7SiJUoEX0l4Dzc7P5llKyhqSi1ycSf1Hg==" - }, - "@types/d3-time-format": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-4.0.0.tgz", - "integrity": "sha512-yjfBUe6DJBsDin2BMIulhSHmr5qNR5Pxs17+oW4DoVPyVIXZ+m6bs7j1UVKP08Emv6jRmYrYqxYzO63mQxy1rw==" - }, - "@types/d3-timer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-3.0.0.tgz", - "integrity": "sha512-HNB/9GHqu7Fo8AQiugyJbv6ZxYz58wef0esl4Mv828w1ZKpAshw/uFWVDUcIB9KKFeFKoxS3cHY07FFgtTRZ1g==" - }, - "@types/d3-transition": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.3.tgz", - "integrity": "sha512-/S90Od8Id1wgQNvIA8iFv9jRhCiZcGhPd2qX0bKF/PS+y0W5CrXKgIiELd2CvG1mlQrWK/qlYh3VxicqG1ZvgA==", - "requires": { - "@types/d3-selection": "*" - } - }, - "@types/d3-zoom": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.2.tgz", - "integrity": "sha512-t09DDJVBI6AkM7N8kuPsnq/3d/ehtRKBN1xSiYjjMCgbiw6HM6Ged5VhvswmhprfKyGvzeTEL/4WBaK9llWvlA==", - "requires": { - "@types/d3-interpolate": "*", - "@types/d3-selection": "*" - } - }, - "@types/eslint": { - "version": "8.37.0", - "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-8.37.0.tgz", - "integrity": "sha512-Piet7dG2JBuDIfohBngQ3rCt7MgO9xCO4xIMKxBThCq5PNRB91IjlJ10eJVwfoNtvTErmxLzwBZ7rHZtbOMmFQ==", - "dev": true, - "requires": { - "@types/estree": "*", - "@types/json-schema": "*" - } - }, - "@types/estree": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.0.tgz", - "integrity": "sha512-WulqXMDUTYAXCjZnk6JtIHPigp55cVtDgDrO2gHRwhyJto21+1zbVCtOYB2L1F9w4qCQ0rOGWBnBe0FNTiEJIQ==", - "dev": true - }, - "@types/geojson": { - "version": "7946.0.10", - "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.10.tgz", - "integrity": "sha512-Nmh0K3iWQJzniTuPRcJn5hxXkfB1T1pgB89SBig5PlJQU5yocazeu4jATJlaA0GYFKWMqDdvYemoSnF2pXgLVA==" - }, - "@types/history": { - "version": "4.7.11", - "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz", - "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA==" - }, - "@types/inquirer": { - "version": "8.2.6", - "resolved": "https://registry.npmjs.org/@types/inquirer/-/inquirer-8.2.6.tgz", - "integrity": "sha512-3uT88kxg8lNzY8ay2ZjP44DKcRaTGztqeIvN2zHvhzIBH/uAPaL75aBtdNRKbA7xXoMbBt5kX0M00VKAnfOYlA==", - "dev": true, - "requires": { - "@types/through": "*", - "rxjs": "^7.2.0" - } - }, - "@types/istanbul-lib-coverage": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz", - "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g==" - }, - "@types/istanbul-lib-report": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz", - "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==", - "requires": { - "@types/istanbul-lib-coverage": "*" - } - }, - "@types/istanbul-reports": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz", - "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==", - "requires": { - "@types/istanbul-lib-report": "*" - } - }, - "@types/jest": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/@types/jest/-/jest-29.5.0.tgz", - "integrity": "sha512-3Emr5VOl/aoBwnWcH/EFQvlSAmjV+XtV9GGu5mwdYew5vhQh0IUZx/60x0TzHDu09Bi7HMx10t/namdJw5QIcg==", - "requires": { - "expect": "^29.0.0", - "pretty-format": "^29.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==" - }, - "pretty-format": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", - "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", - "requires": { - "@jest/schemas": "^29.4.3", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - } - }, - "react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" - } - } - }, - "@types/js-levenshtein": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@types/js-levenshtein/-/js-levenshtein-1.1.1.tgz", - "integrity": "sha512-qC4bCqYGy1y/NP7dDVr7KJarn+PbX1nSpwA7JXdu0HxT3QYjO8MJ+cntENtHFVy2dRAyBV23OZ6MxsW1AM1L8g==", - "dev": true - }, - "@types/json-schema": { - "version": "7.0.11", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz", - "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ==", - "dev": true - }, - "@types/json5": { - "version": "0.0.29", - "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz", - "integrity": "sha512-dRLjCWHYg4oaA77cxO64oO+7JwCwnIzkZPdrrC71jQmQtlhM556pwKo5bUzqvZndkVbeFLIIi+9TC40JNF5hNQ==", - "dev": true - }, - "@types/linkify-it": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-3.0.2.tgz", - "integrity": "sha512-HZQYqbiFVWufzCwexrvh694SOim8z2d+xJl5UNamcvQFejLY/2YUtzXHYi3cHdI7PMlS8ejH2slRAOJQ32aNbA==", - "dev": true - }, - "@types/lodash": { - "version": "4.14.192", - "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.192.tgz", - "integrity": "sha512-km+Vyn3BYm5ytMO13k9KTp27O75rbQ0NFw+U//g+PX7VZyjCioXaRFisqSIJRECljcTv73G3i6BpglNGHgUQ5A==", - "dev": true - }, - "@types/lodash-es": { - "version": "4.17.7", - "resolved": "https://registry.npmjs.org/@types/lodash-es/-/lodash-es-4.17.7.tgz", - "integrity": "sha512-z0ptr6UI10VlU6l5MYhGwS4mC8DZyYer2mCoyysZtSF7p26zOX8UpbrV0YpNYLGS8K4PUFIyEr62IMFFjveSiQ==", - "dev": true, - "requires": { - "@types/lodash": "*" - } - }, - "@types/markdown-it": { - "version": "12.2.3", - "resolved": "https://registry.npmjs.org/@types/markdown-it/-/markdown-it-12.2.3.tgz", - "integrity": "sha512-GKMHFfv3458yYy+v/N8gjufHO6MSZKCOXpZc5GXIWWy8uldwfmPn98vp81gZ5f9SVw8YYBctgfJ22a2d7AOMeQ==", - "dev": true, - "requires": { - "@types/linkify-it": "*", - "@types/mdurl": "*" - } - }, - "@types/mdurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@types/mdurl/-/mdurl-1.0.2.tgz", - "integrity": "sha512-eC4U9MlIcu2q0KQmXszyn5Akca/0jrQmwDRgpAMJai7qBWq4amIQhZyNau4VYGtCeALvW1/NtjzJJ567aZxfKA==", - "dev": true - }, - "@types/minimist": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/@types/minimist/-/minimist-1.2.2.tgz", - "integrity": "sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==", - "dev": true - }, - "@types/node": { - "version": "16.18.23", - "resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.23.tgz", - "integrity": "sha512-XAMpaw1s1+6zM+jn2tmw8MyaRDIJfXxqmIQIS0HfoGYPuf7dUWeiUKopwq13KFX9lEp1+THGtlaaYx39Nxr58g==" - }, - "@types/normalize-package-data": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@types/normalize-package-data/-/normalize-package-data-2.4.1.tgz", - "integrity": "sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==", - "dev": true - }, - "@types/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==", - "dev": true - }, - "@types/prop-types": { - "version": "15.7.5", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", - "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" - }, - "@types/react": { - "version": "18.0.33", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.0.33.tgz", - "integrity": "sha512-sHxzVxeanvQyQ1lr8NSHaj0kDzcNiGpILEVt69g9S31/7PfMvNCKLKcsHw4lYKjs3cGNJjXSP4mYzX43QlnjNA==", - "requires": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" - } - }, - "@types/react-dom": { - "version": "18.0.11", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.0.11.tgz", - "integrity": "sha512-O38bPbI2CWtgw/OoQoY+BRelw7uysmXbWvw3nLWO21H1HSh+GOlqPuXshJfjmpNlKiiSDG9cc1JZAaMmVdcTlw==", - "dev": true, - "requires": { - "@types/react": "*" - } - }, - "@types/react-router": { - "version": "5.1.20", - "resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz", - "integrity": "sha512-jGjmu/ZqS7FjSH6owMcD5qpq19+1RS9DeVRqfl1FeBMxTDQAGwlMWOcs52NDoXaNKyG3d1cYQFMs9rCrb88o9Q==", - "requires": { - "@types/history": "^4.7.11", - "@types/react": "*" - } - }, - "@types/react-router-dom": { - "version": "5.3.3", - "resolved": "https://registry.npmjs.org/@types/react-router-dom/-/react-router-dom-5.3.3.tgz", - "integrity": "sha512-kpqnYK4wcdm5UaWI3fLcELopqLrHgLqNsdpHauzlQktfkHL3npOSwtj1Uz9oKBAzs7lFtVkV8j83voAz2D8fhw==", - "requires": { - "@types/history": "^4.7.11", - "@types/react": "*", - "@types/react-router": "*" - } - }, - "@types/react-test-renderer": { - "version": "18.0.0", - "resolved": "https://registry.npmjs.org/@types/react-test-renderer/-/react-test-renderer-18.0.0.tgz", - "integrity": "sha512-C7/5FBJ3g3sqUahguGi03O79b8afNeSD6T8/GU50oQrJCU0bVCCGQHaGKUbg2Ce8VQEEqTw8/HiS6lXHHdgkdQ==", - "dev": true, - "requires": { - "@types/react": "*" - } - }, - "@types/resize-observer-browser": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/@types/resize-observer-browser/-/resize-observer-browser-0.1.7.tgz", - "integrity": "sha512-G9eN0Sn0ii9PWQ3Vl72jDPgeJwRWhv2Qk/nQkJuWmRmOB4HX3/BhD5SE1dZs/hzPZL/WKnvF0RHdTSG54QJFyg==" - }, - "@types/scheduler": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", - "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==" - }, - "@types/semver": { - "version": "7.3.13", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.3.13.tgz", - "integrity": "sha512-21cFJr9z3g5dW8B0CVI9g2O9beqaThGQ6ZFBqHfwhzLDKUxaqTIy3vnfah/UPkfOiF2pLq+tGz+W8RyCskuslw==", - "dev": true - }, - "@types/set-cookie-parser": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/@types/set-cookie-parser/-/set-cookie-parser-2.4.2.tgz", - "integrity": "sha512-fBZgytwhYAUkj/jC/FAV4RQ5EerRup1YQsXQCh8rZfiHkc4UahC192oH0smGwsXol3cL3A5oETuAHeQHmhXM4w==", - "dev": true, - "requires": { - "@types/node": "*" - } - }, - "@types/stack-utils": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@types/stack-utils/-/stack-utils-2.0.1.tgz", - "integrity": "sha512-Hl219/BT5fLAaz6NDkSuhzasy49dwQS/DSdu4MdggFB8zcXv7vflBI3xp7FEmkmdDkBUI2bPUNeMttp2knYdxw==" - }, - "@types/testing-library__jest-dom": { - "version": "5.14.5", - "resolved": "https://registry.npmjs.org/@types/testing-library__jest-dom/-/testing-library__jest-dom-5.14.5.tgz", - "integrity": "sha512-SBwbxYoyPIvxHbeHxTZX2Pe/74F/tX2/D3mMvzabdeJ25bBojfW0TyB8BHrbq/9zaaKICJZjLP+8r6AeZMFCuQ==", - "dev": true, - "requires": { - "@types/jest": "*" - } - }, - "@types/through": { - "version": "0.0.30", - "resolved": "https://registry.npmjs.org/@types/through/-/through-0.0.30.tgz", - "integrity": "sha512-FvnCJljyxhPM3gkRgWmxmDZyAQSiBQQWLI0A0VFL0K7W1oRUrPJSqNO0NvTnLkBcotdlp3lKvaT0JrnyRDkzOg==", - "dev": true, - "requires": { - "@types/node": "*" - } - }, - "@types/yargs": { - "version": "17.0.24", - "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz", - "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==", - "requires": { - "@types/yargs-parser": "*" - } - }, - "@types/yargs-parser": { - "version": "21.0.0", - "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz", - "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA==" - }, - "@typescript-eslint/eslint-plugin": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-5.57.1.tgz", - "integrity": "sha512-1MeobQkQ9tztuleT3v72XmY0XuKXVXusAhryoLuU5YZ+mXoYKZP9SQ7Flulh1NX4DTjpGTc2b/eMu4u7M7dhnQ==", - "dev": true, - "requires": { - "@eslint-community/regexpp": "^4.4.0", - "@typescript-eslint/scope-manager": "5.57.1", - "@typescript-eslint/type-utils": "5.57.1", - "@typescript-eslint/utils": "5.57.1", - "debug": "^4.3.4", - "grapheme-splitter": "^1.0.4", - "ignore": "^5.2.0", - "natural-compare-lite": "^1.4.0", - "semver": "^7.3.7", - "tsutils": "^3.21.0" - }, - "dependencies": { - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - } - } - }, - "@typescript-eslint/experimental-utils": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/experimental-utils/-/experimental-utils-5.57.1.tgz", - "integrity": "sha512-5F5s8mpM1Y0RQ5iWzKQPQm5cmhARgcMfUwyHX1ZZFL8Tm0PyzyQ+9jgYSMaW74XXvpDg9/KdmMICLlwNwKtO7w==", - "dev": true, - "requires": { - "@typescript-eslint/utils": "5.57.1" - } - }, - "@typescript-eslint/parser": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.57.1.tgz", - "integrity": "sha512-hlA0BLeVSA/wBPKdPGxoVr9Pp6GutGoY380FEhbVi0Ph4WNe8kLvqIRx76RSQt1lynZKfrXKs0/XeEk4zZycuA==", - "dev": true, - "requires": { - "@typescript-eslint/scope-manager": "5.57.1", - "@typescript-eslint/types": "5.57.1", - "@typescript-eslint/typescript-estree": "5.57.1", - "debug": "^4.3.4" - } - }, - "@typescript-eslint/scope-manager": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-5.57.1.tgz", - "integrity": "sha512-N/RrBwEUKMIYxSKl0oDK5sFVHd6VI7p9K5MyUlVYAY6dyNb/wHUqndkTd3XhpGlXgnQsBkRZuu4f9kAHghvgPw==", - "dev": true, - "requires": { - "@typescript-eslint/types": "5.57.1", - "@typescript-eslint/visitor-keys": "5.57.1" - } - }, - "@typescript-eslint/type-utils": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-5.57.1.tgz", - "integrity": "sha512-/RIPQyx60Pt6ga86hKXesXkJ2WOS4UemFrmmq/7eOyiYjYv/MUSHPlkhU6k9T9W1ytnTJueqASW+wOmW4KrViw==", - "dev": true, - "requires": { - "@typescript-eslint/typescript-estree": "5.57.1", - "@typescript-eslint/utils": "5.57.1", - "debug": "^4.3.4", - "tsutils": "^3.21.0" - } - }, - "@typescript-eslint/types": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-5.57.1.tgz", - "integrity": "sha512-bSs4LOgyV3bJ08F5RDqO2KXqg3WAdwHCu06zOqcQ6vqbTJizyBhuh1o1ImC69X4bV2g1OJxbH71PJqiO7Y1RuA==", - "dev": true - }, - "@typescript-eslint/typescript-estree": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-5.57.1.tgz", - "integrity": "sha512-A2MZqD8gNT0qHKbk2wRspg7cHbCDCk2tcqt6ScCFLr5Ru8cn+TCfM786DjPhqwseiS+PrYwcXht5ztpEQ6TFTw==", - "dev": true, - "requires": { - "@typescript-eslint/types": "5.57.1", - "@typescript-eslint/visitor-keys": "5.57.1", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "semver": "^7.3.7", - "tsutils": "^3.21.0" - }, - "dependencies": { - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - } - } - }, - "@typescript-eslint/utils": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-5.57.1.tgz", - "integrity": "sha512-kN6vzzf9NkEtawECqze6v99LtmDiUJCVpvieTFA1uL7/jDghiJGubGZ5csicYHU1Xoqb3oH/R5cN5df6W41Nfg==", - "dev": true, - "requires": { - "@eslint-community/eslint-utils": "^4.2.0", - "@types/json-schema": "^7.0.9", - "@types/semver": "^7.3.12", - "@typescript-eslint/scope-manager": "5.57.1", - "@typescript-eslint/types": "5.57.1", - "@typescript-eslint/typescript-estree": "5.57.1", - "eslint-scope": "^5.1.1", - "semver": "^7.3.7" - }, - "dependencies": { - "eslint-scope": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz", - "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", - "dev": true, - "requires": { - "esrecurse": "^4.3.0", - "estraverse": "^4.1.1" - } - }, - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - } - } - }, - "@typescript-eslint/visitor-keys": { - "version": "5.57.1", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-5.57.1.tgz", - "integrity": "sha512-RjQrAniDU0CEk5r7iphkm731zKlFiUjvcBS2yHAg8WWqFMCaCrD0rKEVOMUyMMcbGPZ0bPp56srkGWrgfZqLRA==", - "dev": true, - "requires": { - "@typescript-eslint/types": "5.57.1", - "eslint-visitor-keys": "^3.3.0" - } - }, - "@vitejs/plugin-react": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-3.1.0.tgz", - "integrity": "sha512-AfgcRL8ZBhAlc3BFdigClmTUMISmmzHn7sB2h9U1odvc5U/MjWXsAaz18b/WoppUTDBzxOJwo2VdClfUcItu9g==", - "dev": true, - "requires": { - "@babel/core": "^7.20.12", - "@babel/plugin-transform-react-jsx-self": "^7.18.6", - "@babel/plugin-transform-react-jsx-source": "^7.19.6", - "magic-string": "^0.27.0", - "react-refresh": "^0.14.0" - } - }, - "@vitest/expect": { - "version": "0.29.8", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-0.29.8.tgz", - "integrity": "sha512-xlcVXn5I5oTq6NiZSY3ykyWixBxr5mG8HYtjvpgg6KaqHm0mvhX18xuwl5YGxIRNt/A5jidd7CWcNHrSvgaQqQ==", - "dev": true, - "requires": { - "@vitest/spy": "0.29.8", - "@vitest/utils": "0.29.8", - "chai": "^4.3.7" - } - }, - "@vitest/runner": { - "version": "0.29.8", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-0.29.8.tgz", - "integrity": "sha512-FzdhnRDwEr/A3Oo1jtIk/B952BBvP32n1ObMEb23oEJNO+qO5cBet6M2XWIDQmA7BDKGKvmhUf2naXyp/2JEwQ==", - "dev": true, - "requires": { - "@vitest/utils": "0.29.8", - "p-limit": "^4.0.0", - "pathe": "^1.1.0" - }, - "dependencies": { - "p-limit": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", - "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", - "dev": true, - "requires": { - "yocto-queue": "^1.0.0" - } - } - } - }, - "@vitest/spy": { - "version": "0.29.8", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-0.29.8.tgz", - "integrity": "sha512-VdjBe9w34vOMl5I5mYEzNX8inTxrZ+tYUVk9jxaZJmHFwmDFC/GV3KBFTA/JKswr3XHvZL+FE/yq5EVhb6pSAw==", - "dev": true, - "requires": { - "tinyspy": "^1.0.2" - } - }, - "@vitest/utils": { - "version": "0.29.8", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-0.29.8.tgz", - "integrity": "sha512-qGzuf3vrTbnoY+RjjVVIBYfuWMjn3UMUqyQtdGNZ6ZIIyte7B37exj6LaVkrZiUTvzSadVvO/tJm8AEgbGCBPg==", - "dev": true, - "requires": { - "cli-truncate": "^3.1.0", - "diff": "^5.1.0", - "loupe": "^2.3.6", - "pretty-format": "^27.5.1" - } - }, - "@xmldom/xmldom": { - "version": "0.7.10", - "resolved": "https://registry.npmjs.org/@xmldom/xmldom/-/xmldom-0.7.10.tgz", - "integrity": "sha512-hb9QhOg5MGmpVkFcoZ9XJMe1em5gd0e2eqqjK87O1dwULedXsnY/Zg/Ju6lcohA+t6jVkmKpe7I1etqhvdRdrQ==", - "dev": true - }, - "@zeit/schemas": { - "version": "2.29.0", - "resolved": "https://registry.npmjs.org/@zeit/schemas/-/schemas-2.29.0.tgz", - "integrity": "sha512-g5QiLIfbg3pLuYUJPlisNKY+epQJTcMDsOnVNkscrDP1oi7vmJnzOANYJI/1pZcVJ6umUkBv3aFtlg1UvUHGzA==", - "dev": true - }, - "abab": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/abab/-/abab-2.0.6.tgz", - "integrity": "sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==", - "dev": true - }, - "accepts": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", - "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", - "dev": true, - "requires": { - "mime-types": "~2.1.34", - "negotiator": "0.6.3" - } - }, - "acorn": { - "version": "8.8.2", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz", - "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==", - "dev": true - }, - "acorn-globals": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/acorn-globals/-/acorn-globals-7.0.1.tgz", - "integrity": "sha512-umOSDSDrfHbTNPuNpC2NSnnA3LUrqpevPb4T9jRx4MagXNS0rs+gwiTcAvqCRmsD6utzsrzNt+ebm00SNWiC3Q==", - "dev": true, - "requires": { - "acorn": "^8.1.0", - "acorn-walk": "^8.0.2" - } - }, - "acorn-jsx": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", - "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", - "dev": true, - "requires": {} - }, - "acorn-walk": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz", - "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==", - "dev": true - }, - "agent-base": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-6.0.2.tgz", - "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", - "dev": true, - "requires": { - "debug": "4" - } - }, - "ajv": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.11.0.tgz", - "integrity": "sha512-wGgprdCvMalC0BztXvitD2hC04YffAvtsUn93JbGXYLAtCUO4xd17mCCZQxUOItiBwZvJScWo8NIvQMQ71rdpg==", - "dev": true, - "requires": { - "fast-deep-equal": "^3.1.1", - "json-schema-traverse": "^1.0.0", - "require-from-string": "^2.0.2", - "uri-js": "^4.2.2" - } - }, - "ansi-align": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", - "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", - "dev": true, - "requires": { - "string-width": "^4.1.0" - } - }, - "ansi-escapes": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", - "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", - "dev": true, - "requires": { - "type-fest": "^0.21.3" - }, - "dependencies": { - "type-fest": { - "version": "0.21.3", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", - "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", - "dev": true - } - } - }, - "ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "requires": { - "color-convert": "^2.0.1" - } - }, - "any-promise": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", - "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", - "dev": true - }, - "anymatch": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", - "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", - "requires": { - "normalize-path": "^3.0.0", - "picomatch": "^2.0.4" - } - }, - "arch": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", - "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", - "dev": true - }, - "arg": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", - "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", - "dev": true - }, - "argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "aria-query": { - "version": "5.1.3", - "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.1.3.tgz", - "integrity": "sha512-R5iJ5lkuHybztUfuOAznmboyjWq8O6sqNqtK7CLOqdydi54VNbORp49mb14KbWgG1QD3JFO9hJdZ+y4KutfdOQ==", - "dev": true, - "requires": { - "deep-equal": "^2.0.5" - } - }, - "array-buffer-byte-length": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.0.tgz", - "integrity": "sha512-LPuwb2P+NrQw3XhxGc36+XSvuBPopovXYTR9Ew++Du9Yb/bx5AzBfrIsBoj0EZUifjQU+sHL21sseZ3jerWO/A==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "is-array-buffer": "^3.0.1" - } - }, - "array-includes": { - "version": "3.1.6", - "resolved": "https://registry.npmjs.org/array-includes/-/array-includes-3.1.6.tgz", - "integrity": "sha512-sgTbLvL6cNnw24FnbaDyjmvddQ2ML8arZsgaJhoABMoplz/4QRhtrYS+alr1BUM1Bwp6dhx8vVCBSLG+StwOFw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", - "is-string": "^1.0.7" - } - }, - "array-union": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", - "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", - "dev": true - }, - "array.prototype.flat": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz", - "integrity": "sha512-roTU0KWIOmJ4DRLmwKd19Otg0/mT3qPNt0Qb3GWW8iObuZXxrjB/pzn0R3hqpRSWg4HCwqx+0vwOnWnvlOyeIA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0" - } - }, - "array.prototype.flatmap": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/array.prototype.flatmap/-/array.prototype.flatmap-1.3.1.tgz", - "integrity": "sha512-8UGn9O1FDVvMNB0UlLv4voxRMze7+FpHyF5mSMRjWHUMlpoDViniy05870VlxhfgTnLbpuwTzvD76MTtWxB/mQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0" - } - }, - "array.prototype.tosorted": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array.prototype.tosorted/-/array.prototype.tosorted-1.1.1.tgz", - "integrity": "sha512-pZYPXPRl2PqWcsUs6LOMn+1f1532nEoPTYowBtqLwAW+W8vSVhkIGnmOX1t/UQjD6YGI0vcD2B1U7ZFGQH9jnQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "es-shim-unscopables": "^1.0.0", - "get-intrinsic": "^1.1.3" - } - }, - "arrify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", - "integrity": "sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==", - "dev": true - }, - "assertion-error": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", - "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", - "dev": true - }, - "ast-types-flow": { - "version": "0.0.7", - "resolved": "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz", - "integrity": "sha512-eBvWn1lvIApYMhzQMsu9ciLfkBY499mFZlNqG+/9WR7PVlroQw0vG30cOQQbaKz3sCEc44TAOu2ykzqXSNnwag==", - "dev": true - }, - "astral-regex": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", - "integrity": "sha512-Z7tMw1ytTXt5jqMcOP+OQteU1VuNK9Y02uuJtKQ1Sv69jXQKKg5cibLwGJow8yzZP+eAc18EmLGPal0bp36rvQ==", - "dev": true - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", - "dev": true - }, - "autoprefixer": { - "version": "10.4.14", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.14.tgz", - "integrity": "sha512-FQzyfOsTlwVzjHxKEqRIAdJx9niO6VCBCoEwax/VLSoQF29ggECcPuBqUMZ+u8jCZOPSy8b8/8KnuFbp0SaFZQ==", - "requires": { - "browserslist": "^4.21.5", - "caniuse-lite": "^1.0.30001464", - "fraction.js": "^4.2.0", - "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", - "postcss-value-parser": "^4.2.0" - } - }, - "available-typed-arrays": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", - "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==", - "dev": true - }, - "axe-core": { - "version": "4.6.3", - "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.6.3.tgz", - "integrity": "sha512-/BQzOX780JhsxDnPpH4ZiyrJAzcd8AfzFPkv+89veFSr1rcMjuq2JDCwypKaPeB6ljHp9KjXhPpjgCvQlWYuqg==", - "dev": true - }, - "axobject-query": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.1.1.tgz", - "integrity": "sha512-goKlv8DZrK9hUh975fnHzhNIO4jUnFCfv/dszV5VwUGDFjI6vQ2VwoyjYjYNEbBE8AH87TduWP5uyDR1D+Iteg==", - "dev": true, - "requires": { - "deep-equal": "^2.0.5" - } - }, - "babel-plugin-macros": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz", - "integrity": "sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==", - "dev": true, - "requires": { - "@babel/runtime": "^7.12.5", - "cosmiconfig": "^7.0.0", - "resolve": "^1.19.0" - } - }, - "babel-plugin-polyfill-corejs2": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.3.tgz", - "integrity": "sha512-8hOdmFYFSZhqg2C/JgLUQ+t52o5nirNwaWM2B9LWteozwIvM14VSwdsCAUET10qT+kmySAlseadmfeeSWFCy+Q==", - "dev": true, - "requires": { - "@babel/compat-data": "^7.17.7", - "@babel/helper-define-polyfill-provider": "^0.3.3", - "semver": "^6.1.1" - } - }, - "babel-plugin-polyfill-corejs3": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.6.0.tgz", - "integrity": "sha512-+eHqR6OPcBhJOGgsIar7xoAB1GcSwVUA3XjAd7HJNzOXT4wv6/H7KIdA/Nc60cvUlDbKApmqNvD1B1bzOt4nyA==", - "dev": true, - "requires": { - "@babel/helper-define-polyfill-provider": "^0.3.3", - "core-js-compat": "^3.25.1" - } - }, - "babel-plugin-polyfill-regenerator": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.4.1.tgz", - "integrity": "sha512-NtQGmyQDXjQqQ+IzRkBVwEOz9lQ4zxAQZgoAYEtU9dJjnl1Oc98qnN7jcp+bE7O7aYzVpavXE3/VKXNzUbh7aw==", - "dev": true, - "requires": { - "@babel/helper-define-polyfill-provider": "^0.3.3" - } - }, - "babel-plugin-transform-react-remove-prop-types": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/babel-plugin-transform-react-remove-prop-types/-/babel-plugin-transform-react-remove-prop-types-0.4.24.tgz", - "integrity": "sha512-eqj0hVcJUR57/Ug2zE1Yswsw4LhuqqHhD+8v120T1cl3kjg76QwtyBrdIk4WVwK+lAhBJVYCd/v+4nc4y+8JsA==", - "dev": true - }, - "babel-preset-react-app": { - "version": "10.0.1", - "resolved": "https://registry.npmjs.org/babel-preset-react-app/-/babel-preset-react-app-10.0.1.tgz", - "integrity": "sha512-b0D9IZ1WhhCWkrTXyFuIIgqGzSkRIH5D5AmB0bXbzYAB1OBAwHcUeyWW2LorutLWF5btNo/N7r/cIdmvvKJlYg==", - "dev": true, - "requires": { - "@babel/core": "^7.16.0", - "@babel/plugin-proposal-class-properties": "^7.16.0", - "@babel/plugin-proposal-decorators": "^7.16.4", - "@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.0", - "@babel/plugin-proposal-numeric-separator": "^7.16.0", - "@babel/plugin-proposal-optional-chaining": "^7.16.0", - "@babel/plugin-proposal-private-methods": "^7.16.0", - "@babel/plugin-transform-flow-strip-types": "^7.16.0", - "@babel/plugin-transform-react-display-name": "^7.16.0", - "@babel/plugin-transform-runtime": "^7.16.4", - "@babel/preset-env": "^7.16.4", - "@babel/preset-react": "^7.16.0", - "@babel/preset-typescript": "^7.16.0", - "@babel/runtime": "^7.16.3", - "babel-plugin-macros": "^3.1.0", - "babel-plugin-transform-react-remove-prop-types": "^0.4.24" - } - }, - "balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "dev": true - }, - "big-integer": { - "version": "1.6.51", - "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz", - "integrity": "sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==" - }, - "binary-extensions": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", - "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==" - }, - "bl": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", - "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", - "dev": true, - "requires": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, - "bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true - }, - "boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" - }, - "boxen": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-7.0.0.tgz", - "integrity": "sha512-j//dBVuyacJbvW+tvZ9HuH03fZ46QcaKvvhZickZqtB271DxJ7SNRSNxrV/dZX0085m7hISRZWbzWlJvx/rHSg==", - "dev": true, - "requires": { - "ansi-align": "^3.0.1", - "camelcase": "^7.0.0", - "chalk": "^5.0.1", - "cli-boxes": "^3.0.0", - "string-width": "^5.1.2", - "type-fest": "^2.13.0", - "widest-line": "^4.0.1", - "wrap-ansi": "^8.0.1" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true - }, - "ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "dev": true - }, - "chalk": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.2.0.tgz", - "integrity": "sha512-ree3Gqw/nazQAPuJJEy+avdl7QfZMcUvmHIKgEZkGL+xOBzRvup5Hxo6LHuMceSxOabuJLJm5Yp/92R9eMmMvA==", - "dev": true - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", - "dev": true, - "requires": { - "ansi-regex": "^6.0.1" - } - }, - "type-fest": { - "version": "2.19.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz", - "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==", - "dev": true - }, - "wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "dev": true, - "requires": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - } - } - } - }, - "brace-expansion": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", - "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0" - } - }, - "braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "requires": { - "fill-range": "^7.0.1" - } - }, - "broadcast-channel": { - "version": "3.7.0", - "resolved": "https://registry.npmjs.org/broadcast-channel/-/broadcast-channel-3.7.0.tgz", - "integrity": "sha512-cIAKJXAxGJceNZGTZSBzMxzyOn72cVgPnKx4dc6LRjQgbaJUQqhy5rzL3zbMxkMWsGKkv2hSFkPRMEXfoMZ2Mg==", - "requires": { - "@babel/runtime": "^7.7.2", - "detect-node": "^2.1.0", - "js-sha3": "0.8.0", - "microseconds": "0.2.0", - "nano-time": "1.0.0", - "oblivious-set": "1.0.0", - "rimraf": "3.0.2", - "unload": "2.2.0" - } - }, - "browserslist": { - "version": "4.21.5", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.21.5.tgz", - "integrity": "sha512-tUkiguQGW7S3IhB7N+c2MV/HZPSCPAAiYBZXLsBhFB/PCy6ZKKsZrmBayHV9fdGV/ARIfJ14NkxKzRDjvp7L6w==", - "requires": { - "caniuse-lite": "^1.0.30001449", - "electron-to-chromium": "^1.4.284", - "node-releases": "^2.0.8", - "update-browserslist-db": "^1.0.10" - } - }, - "buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "dev": true, - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "byline": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", - "integrity": "sha512-s6webAy+R4SR8XVuJWt2V2rGvhnrhxN+9S15GNuTK3wKPOXFF6RNc+8ug2XhH+2s4f+uudG4kUVYmYOQWL2g0Q==" - }, - "bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==", - "dev": true - }, - "cac": { - "version": "6.7.14", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", - "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", - "dev": true - }, - "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dev": true, - "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "callsites": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", - "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", - "dev": true - }, - "camelcase": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-7.0.1.tgz", - "integrity": "sha512-xlx1yCK2Oc1APsPXDL2LdlNP6+uu8OCDdhOBSVT279M/S+y75O30C2VuD8T2ogdePBBl7PfPF4504tnLgX3zfw==", - "dev": true - }, - "camelcase-css": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", - "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", - "dev": true - }, - "camelcase-keys": { - "version": "6.2.2", - "resolved": "https://registry.npmjs.org/camelcase-keys/-/camelcase-keys-6.2.2.tgz", - "integrity": "sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==", - "dev": true, - "requires": { - "camelcase": "^5.3.1", - "map-obj": "^4.0.0", - "quick-lru": "^4.0.1" - }, - "dependencies": { - "camelcase": { - "version": "5.3.1", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-5.3.1.tgz", - "integrity": "sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==", - "dev": true - } - } - }, - "caniuse-lite": { - "version": "1.0.30001473", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001473.tgz", - "integrity": "sha512-ewDad7+D2vlyy+E4UJuVfiBsU69IL+8oVmTuZnH5Q6CIUbxNfI50uVpRHbUPDD6SUaN2o0Lh4DhTrvLG/Tn1yg==" - }, - "catharsis": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/catharsis/-/catharsis-0.9.0.tgz", - "integrity": "sha512-prMTQVpcns/tzFgFVkVp6ak6RykZyWb3gu8ckUpd6YkTlacOd3DXGJjIpD4Q6zJirizvaiAjSSHlOsA+6sNh2A==", - "dev": true, - "requires": { - "lodash": "^4.17.15" - } - }, - "chai": { - "version": "4.3.7", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.7.tgz", - "integrity": "sha512-HLnAzZ2iupm25PlN0xFreAlBA5zaBSv3og0DdeGA4Ar6h6rJ3A0rolRUKJhSF2V10GZKDgWF/VmAEsNWjCRB+A==", - "dev": true, - "requires": { - "assertion-error": "^1.1.0", - "check-error": "^1.0.2", - "deep-eql": "^4.1.2", - "get-func-name": "^2.0.0", - "loupe": "^2.3.1", - "pathval": "^1.1.1", - "type-detect": "^4.0.5" - } - }, - "chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "chalk-template": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/chalk-template/-/chalk-template-0.4.0.tgz", - "integrity": "sha512-/ghrgmhfY8RaSdeo43hNXxpoHAtxdbskUHjPpfqUWGttFgycUhYPGx3YZBCnUCvOa7Doivn1IZec3DEGFoMgLg==", - "dev": true, - "requires": { - "chalk": "^4.1.2" - } - }, - "chardet": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz", - "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", - "dev": true - }, - "check-error": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", - "integrity": "sha512-BrgHpW9NURQgzoNyjfq0Wu6VFO6D7IZEmJNdtgNqpzGG8RuNFHt2jQxWlAs4HMe119chBnv+34syEZtc6IhLtA==", - "dev": true - }, - "chokidar": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", - "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", - "requires": { - "anymatch": "~3.1.2", - "braces": "~3.0.2", - "fsevents": "~2.3.2", - "glob-parent": "~5.1.2", - "is-binary-path": "~2.1.0", - "is-glob": "~4.0.1", - "normalize-path": "~3.0.0", - "readdirp": "~3.6.0" - } - }, - "ci-info": { - "version": "3.8.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz", - "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==" - }, - "classcat": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.4.tgz", - "integrity": "sha512-sbpkOw6z413p+HDGcBENe498WM9woqWHiJxCq7nvmxe9WmrUmqfAcxpIwAiMtM5Q3AhYkzXcNQHqsWq0mND51g==" - }, - "classnames": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz", - "integrity": "sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==" - }, - "cli-boxes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", - "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", - "dev": true - }, - "cli-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", - "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", - "dev": true, - "requires": { - "restore-cursor": "^3.1.0" - } - }, - "cli-spinners": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.7.0.tgz", - "integrity": "sha512-qu3pN8Y3qHNgE2AFweciB1IfMnmZ/fsNTEE+NOFjmGB2F/7rLhnhzppvpCnN4FovtP26k8lHyy9ptEbNwWFLzw==", - "dev": true - }, - "cli-truncate": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-3.1.0.tgz", - "integrity": "sha512-wfOBkjXteqSnI59oPcJkcPl/ZmwvMMOj340qUIY1SKZCv0B9Cf4D4fAucRkIKQmsIuYK3x1rrgU7MeGRruiuiA==", - "dev": true, - "requires": { - "slice-ansi": "^5.0.0", - "string-width": "^5.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true - }, - "ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "dev": true - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", - "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", - "dev": true - }, - "slice-ansi": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-5.0.0.tgz", - "integrity": "sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==", - "dev": true, - "requires": { - "ansi-styles": "^6.0.0", - "is-fullwidth-code-point": "^4.0.0" - } - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", - "dev": true, - "requires": { - "ansi-regex": "^6.0.1" - } - } - } - }, - "cli-width": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-3.0.0.tgz", - "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", - "dev": true - }, - "client-only": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", - "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" - }, - "clipboardy": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/clipboardy/-/clipboardy-3.0.0.tgz", - "integrity": "sha512-Su+uU5sr1jkUy1sGRpLKjKrvEOVXgSgiSInwa/qeID6aJ07yh+5NWc3h2QfjHjBnfX4LhtFcuAWKUsJ3r+fjbg==", - "dev": true, - "requires": { - "arch": "^2.2.0", - "execa": "^5.1.1", - "is-wsl": "^2.2.0" - } - }, - "cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "dev": true, - "requires": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - } - }, - "clone": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", - "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", - "dev": true - }, - "clsx": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz", - "integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==" - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" - }, - "colord": { - "version": "2.9.3", - "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz", - "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==", - "dev": true - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dev": true, - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "commander": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz", - "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==" - }, - "commondir": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz", - "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", - "dev": true - }, - "compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "dev": true, - "requires": { - "mime-db": ">= 1.43.0 < 2" - } - }, - "compression": { - "version": "1.7.4", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz", - "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==", - "dev": true, - "requires": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.16", - "debug": "2.6.9", - "on-headers": "~1.0.2", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", - "dev": true - } - } - }, - "compute-scroll-into-view": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/compute-scroll-into-view/-/compute-scroll-into-view-2.0.4.tgz", - "integrity": "sha512-y/ZA3BGnxoM/QHHQ2Uy49CLtnWPbt4tTPpEEZiEmmiWBFKjej7nEyH8Ryz54jH0MLXflUYA3Er2zUxPSJu5R+g==" - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" - }, - "confusing-browser-globals": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.11.tgz", - "integrity": "sha512-JsPKdmh8ZkmnHxDk55FZ1TqVLvEQTvoByJZRN9jzI0UjxK/QgAmsphz7PGtqgPieQZ/CQcHWXCR7ATDNhGe+YA==", - "dev": true - }, - "content-disposition": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", - "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==", - "dev": true - }, - "convert-source-map": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", - "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", - "dev": true - }, - "cookie": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.2.tgz", - "integrity": "sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA==", - "dev": true - }, - "core-js-compat": { - "version": "3.30.0", - "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.30.0.tgz", - "integrity": "sha512-P5A2h/9mRYZFIAP+5Ab8ns6083IyVpSclU74UNvbGVQ8VM7n3n3/g2yF3AkKQ9NXz2O+ioxLbEWKnDtgsFamhg==", - "dev": true, - "requires": { - "browserslist": "^4.21.5" - } - }, - "core-js-pure": { - "version": "3.30.0", - "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.30.0.tgz", - "integrity": "sha512-+2KbMFGeBU0ln/csoPqTe0i/yfHbrd2EUhNMObsGtXMKS/RTtlkYyi+/3twLcevbgNR0yM/r0Psa3TEoQRpFMQ==", - "dev": true - }, - "cosmiconfig": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", - "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", - "dev": true, - "requires": { - "@types/parse-json": "^4.0.0", - "import-fresh": "^3.2.1", - "parse-json": "^5.0.0", - "path-type": "^4.0.0", - "yaml": "^1.10.0" - } - }, - "cross-fetch": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz", - "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", - "dev": true, - "requires": { - "node-fetch": "2.6.7" - } - }, - "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - } - }, - "css-blank-pseudo": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/css-blank-pseudo/-/css-blank-pseudo-5.0.2.tgz", - "integrity": "sha512-aCU4AZ7uEcVSUzagTlA9pHciz7aWPKA/YzrEkpdSopJ2pvhIxiQ5sYeMz1/KByxlIo4XBdvMNJAVKMg/GRnhfw==", - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "css-functions-list": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/css-functions-list/-/css-functions-list-3.1.0.tgz", - "integrity": "sha512-/9lCvYZaUbBGvYUgYGFJ4dcYiyqdhSjG7IPVluoV8A1ILjkF7ilmhp1OGUz8n+nmBcu0RNrQAzgD8B6FJbrt2w==", - "dev": true - }, - "css-has-pseudo": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/css-has-pseudo/-/css-has-pseudo-5.0.2.tgz", - "integrity": "sha512-q+U+4QdwwB7T9VEW/LyO6CFrLAeLqOykC5mDqJXc7aKZAhDbq7BvGT13VGJe+IwBfdN2o3Xdw2kJ5IxwV1Sc9Q==", - "requires": { - "@csstools/selector-specificity": "^2.0.1", - "postcss-selector-parser": "^6.0.10", - "postcss-value-parser": "^4.2.0" - } - }, - "css-prefers-color-scheme": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/css-prefers-color-scheme/-/css-prefers-color-scheme-8.0.2.tgz", - "integrity": "sha512-OvFghizHJ45x7nsJJUSYLyQNTzsCU8yWjxAc/nhPQg1pbs18LMoET8N3kOweFDPy0JV0OSXN2iqRFhPBHYOeMA==", - "requires": {} - }, - "css-select": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", - "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", - "requires": { - "boolbase": "^1.0.0", - "css-what": "^6.1.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "nth-check": "^2.0.1" - } - }, - "css-tree": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.3.1.tgz", - "integrity": "sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==", - "requires": { - "mdn-data": "2.0.30", - "source-map-js": "^1.0.1" - } - }, - "css-what": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", - "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==" - }, - "css.escape": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", - "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==", - "dev": true - }, - "cssdb": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/cssdb/-/cssdb-7.5.3.tgz", - "integrity": "sha512-NQNRhrEnS6cW+RU/foLphb6xI/MDA70bI3Cy6VxJU8ilxgyTYz1X9zUzFGVTG5nGPylcKAGIt/UNc4deT56lQQ==" - }, - "cssesc": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", - "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==" - }, - "csso": { - "version": "5.0.5", - "resolved": "https://registry.npmjs.org/csso/-/csso-5.0.5.tgz", - "integrity": "sha512-0LrrStPOdJj+SPCCrGhzryycLjwcgUSHBtxNA8aIDxf0GLsRh1cKYhB00Gd1lDOS4yGH69+SNn13+TWbVHETFQ==", - "requires": { - "css-tree": "~2.2.0" - }, - "dependencies": { - "css-tree": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-2.2.1.tgz", - "integrity": "sha512-OA0mILzGc1kCOCSJerOeqDxDQ4HOh+G8NbOJFOTgOCzpw7fCBubk0fEyxp8AgOL/jvLgYA/uV0cMbe43ElF1JA==", - "requires": { - "mdn-data": "2.0.28", - "source-map-js": "^1.0.1" - } - }, - "mdn-data": { - "version": "2.0.28", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.28.tgz", - "integrity": "sha512-aylIc7Z9y4yzHYAJNuESG3hfhC+0Ibp/MAMiaOZgNv4pmEdFyfZhhhny4MNiAfWdBQ1RQ2mfDWmM1x8SvGyp8g==" - } - } - }, - "cssstyle": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-3.0.0.tgz", - "integrity": "sha512-N4u2ABATi3Qplzf0hWbVCdjenim8F3ojEXpBDF5hBpjzW182MjNGLqfmQ0SkSPeQ+V86ZXgeH8aXj6kayd4jgg==", - "dev": true, - "requires": { - "rrweb-cssom": "^0.6.0" - } - }, - "csstype": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", - "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" - }, - "d3-color": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-3.1.0.tgz", - "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==" - }, - "d3-dispatch": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-3.0.1.tgz", - "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==" - }, - "d3-drag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-3.0.0.tgz", - "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", - "requires": { - "d3-dispatch": "1 - 3", - "d3-selection": "3" - } - }, - "d3-ease": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-3.0.1.tgz", - "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==" - }, - "d3-interpolate": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-3.0.1.tgz", - "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", - "requires": { - "d3-color": "1 - 3" - } - }, - "d3-selection": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz", - "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==" - }, - "d3-timer": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-3.0.1.tgz", - "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==" - }, - "d3-transition": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-3.0.1.tgz", - "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", - "requires": { - "d3-color": "1 - 3", - "d3-dispatch": "1 - 3", - "d3-ease": "1 - 3", - "d3-interpolate": "1 - 3", - "d3-timer": "1 - 3" - } - }, - "d3-zoom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-3.0.0.tgz", - "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", - "requires": { - "d3-dispatch": "1 - 3", - "d3-drag": "2 - 3", - "d3-interpolate": "1 - 3", - "d3-selection": "2 - 3", - "d3-transition": "2 - 3" - } - }, - "damerau-levenshtein": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz", - "integrity": "sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==", - "dev": true - }, - "data-urls": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-4.0.0.tgz", - "integrity": "sha512-/mMTei/JXPqvFqQtfyTowxmJVwr2PVAeCcDxyFf6LhoOu/09TX2OX3kb2wzi4DMXcfj4OItwDOnhl5oziPnT6g==", - "dev": true, - "requires": { - "abab": "^2.0.6", - "whatwg-mimetype": "^3.0.0", - "whatwg-url": "^12.0.0" - } - }, - "dayjs": { - "version": "1.11.7", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.11.7.tgz", - "integrity": "sha512-+Yw9U6YO5TQohxLcIkrXBeY73WP3ejHWVvx8XCk3gxvQDCTEmS48ZrSZCKciI7Bhl/uCMyxYtE9UqRILmFphkQ==" - }, - "debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, - "requires": { - "ms": "2.1.2" - } - }, - "decamelize": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz", - "integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==", - "dev": true - }, - "decamelize-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/decamelize-keys/-/decamelize-keys-1.1.1.tgz", - "integrity": "sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==", - "dev": true, - "requires": { - "decamelize": "^1.1.0", - "map-obj": "^1.0.0" - }, - "dependencies": { - "map-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-1.0.1.tgz", - "integrity": "sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==", - "dev": true - } - } - }, - "decimal.js": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz", - "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==", - "dev": true - }, - "decode-uri-component": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.2.tgz", - "integrity": "sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ==" - }, - "deep-eql": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", - "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", - "dev": true, - "requires": { - "type-detect": "^4.0.0" - } - }, - "deep-equal": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-2.2.0.tgz", - "integrity": "sha512-RdpzE0Hv4lhowpIUKKMJfeH6C1pXdtT1/it80ubgWqwI3qpuxUBpC1S4hnHg+zjnuOoDkzUtUCEEkG+XG5l3Mw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "es-get-iterator": "^1.1.2", - "get-intrinsic": "^1.1.3", - "is-arguments": "^1.1.1", - "is-array-buffer": "^3.0.1", - "is-date-object": "^1.0.5", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "isarray": "^2.0.5", - "object-is": "^1.1.5", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "side-channel": "^1.0.4", - "which-boxed-primitive": "^1.0.2", - "which-collection": "^1.0.1", - "which-typed-array": "^1.1.9" - } - }, - "deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "dev": true - }, - "deep-is": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", - "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", - "dev": true - }, - "defaults": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", - "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", - "dev": true, - "requires": { - "clone": "^1.0.2" - } - }, - "define-properties": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz", - "integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==", - "dev": true, - "requires": { - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" - } - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", - "dev": true - }, - "detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" - }, - "didyoumean": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", - "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", - "dev": true - }, - "diff": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz", - "integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==", - "dev": true - }, - "diff-sequences": { - "version": "29.4.3", - "resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.4.3.tgz", - "integrity": "sha512-ofrBgwpPhCD85kMKtE9RYFFq6OC1A89oW2vvgWZNCwxrUpRUILopY7lsYyMDSjc8g6U6aiO0Qubg6r4Wgt5ZnA==" - }, - "dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "requires": { - "path-type": "^4.0.0" - } - }, - "dlv": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", - "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", - "dev": true - }, - "doctrine": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", - "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "dom-accessibility-api": { - "version": "0.5.16", - "resolved": "https://registry.npmjs.org/dom-accessibility-api/-/dom-accessibility-api-0.5.16.tgz", - "integrity": "sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==", - "dev": true - }, - "dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", - "requires": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - } - }, - "domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==" - }, - "domexception": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/domexception/-/domexception-4.0.0.tgz", - "integrity": "sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw==", - "dev": true, - "requires": { - "webidl-conversions": "^7.0.0" - } - }, - "domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", - "requires": { - "domelementtype": "^2.3.0" - } - }, - "domutils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.0.1.tgz", - "integrity": "sha512-z08c1l761iKhDFtfXO04C7kTdPBLi41zwOZl00WS8b5eiaebNpY00HKbztwBq+e3vyqWNwWF3mP9YLUeqIrF+Q==", - "requires": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.1" - } - }, - "downshift": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/downshift/-/downshift-7.6.0.tgz", - "integrity": "sha512-VSoTVynTAsabou/hbZ6HJHUVhtBiVOjQoBsCPcQq5eAROIGP+9XKMp9asAKQ3cEcUP4oe0fFdD2pziUjhFY33Q==", - "requires": { - "@babel/runtime": "^7.14.8", - "compute-scroll-into-view": "^2.0.4", - "prop-types": "^15.7.2", - "react-is": "^17.0.2", - "tslib": "^2.3.0" - } - }, - "eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "dev": true - }, - "electron-to-chromium": { - "version": "1.4.349", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.349.tgz", - "integrity": "sha512-34LBfVDiL6byWorSmQOPwq4gD5wpN8Mhh5yPGQr67FbcxsfUS0BDJP9y6RykSgeWVUfSkN/2dChywnsrmKVyUg==" - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "requires": { - "once": "^1.4.0" - } - }, - "entities": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.4.0.tgz", - "integrity": "sha512-oYp7156SP8LkeGD0GF85ad1X9Ai79WtRsZ2gxJqtBuzH+98YUV6jkHEKlZkMbcrjJjIVJNIDP/3WL9wQkoPbWA==" - }, - "error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", - "dev": true, - "requires": { - "is-arrayish": "^0.2.1" - } - }, - "error-stack-parser": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz", - "integrity": "sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==", - "requires": { - "stackframe": "^1.3.4" - } - }, - "es-abstract": { - "version": "1.21.2", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.21.2.tgz", - "integrity": "sha512-y/B5POM2iBnIxCiernH1G7rC9qQoM77lLIMQLuob0zhp8C56Po81+2Nj0WFKnd0pNReDTnkYryc+zhOzpEIROg==", - "dev": true, - "requires": { - "array-buffer-byte-length": "^1.0.0", - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "es-set-tostringtag": "^2.0.1", - "es-to-primitive": "^1.2.1", - "function.prototype.name": "^1.1.5", - "get-intrinsic": "^1.2.0", - "get-symbol-description": "^1.0.0", - "globalthis": "^1.0.3", - "gopd": "^1.0.1", - "has": "^1.0.3", - "has-property-descriptors": "^1.0.0", - "has-proto": "^1.0.1", - "has-symbols": "^1.0.3", - "internal-slot": "^1.0.5", - "is-array-buffer": "^3.0.2", - "is-callable": "^1.2.7", - "is-negative-zero": "^2.0.2", - "is-regex": "^1.1.4", - "is-shared-array-buffer": "^1.0.2", - "is-string": "^1.0.7", - "is-typed-array": "^1.1.10", - "is-weakref": "^1.0.2", - "object-inspect": "^1.12.3", - "object-keys": "^1.1.1", - "object.assign": "^4.1.4", - "regexp.prototype.flags": "^1.4.3", - "safe-regex-test": "^1.0.0", - "string.prototype.trim": "^1.2.7", - "string.prototype.trimend": "^1.0.6", - "string.prototype.trimstart": "^1.0.6", - "typed-array-length": "^1.0.4", - "unbox-primitive": "^1.0.2", - "which-typed-array": "^1.1.9" - } - }, - "es-get-iterator": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/es-get-iterator/-/es-get-iterator-1.1.3.tgz", - "integrity": "sha512-sPZmqHBe6JIiTfN5q2pEi//TwxmAFHwj/XEuYjTuse78i8KxaqMTTzxPoFKuzRpDpTJ+0NAbpfenkmH2rePtuw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "has-symbols": "^1.0.3", - "is-arguments": "^1.1.1", - "is-map": "^2.0.2", - "is-set": "^2.0.2", - "is-string": "^1.0.7", - "isarray": "^2.0.5", - "stop-iteration-iterator": "^1.0.0" - } - }, - "es-set-tostringtag": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz", - "integrity": "sha512-g3OMbtlwY3QewlqAiMLI47KywjWZoEytKr8pf6iTC8uJq5bIAH52Z9pnQ8pVL6whrCto53JZDuUIsifGeLorTg==", - "dev": true, - "requires": { - "get-intrinsic": "^1.1.3", - "has": "^1.0.3", - "has-tostringtag": "^1.0.0" - } - }, - "es-shim-unscopables": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-shim-unscopables/-/es-shim-unscopables-1.0.0.tgz", - "integrity": "sha512-Jm6GPcCdC30eMLbZ2x8z2WuRwAws3zTBBKuusffYVUrNj/GVSUAZ+xKMaUpfNDR5IbyNA5LJbaecoUVbmUcB1w==", - "dev": true, - "requires": { - "has": "^1.0.3" - } - }, - "es-to-primitive": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.2.1.tgz", - "integrity": "sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==", - "dev": true, - "requires": { - "is-callable": "^1.1.4", - "is-date-object": "^1.0.1", - "is-symbol": "^1.0.2" - } - }, - "esbuild": { - "version": "0.17.15", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.17.15.tgz", - "integrity": "sha512-LBUV2VsUIc/iD9ME75qhT4aJj0r75abCVS0jakhFzOtR7TQsqQA5w0tZ+KTKnwl3kXE0MhskNdHDh/I5aCR1Zw==", - "dev": true, - "requires": { - "@esbuild/android-arm": "0.17.15", - "@esbuild/android-arm64": "0.17.15", - "@esbuild/android-x64": "0.17.15", - "@esbuild/darwin-arm64": "0.17.15", - "@esbuild/darwin-x64": "0.17.15", - "@esbuild/freebsd-arm64": "0.17.15", - "@esbuild/freebsd-x64": "0.17.15", - "@esbuild/linux-arm": "0.17.15", - "@esbuild/linux-arm64": "0.17.15", - "@esbuild/linux-ia32": "0.17.15", - "@esbuild/linux-loong64": "0.17.15", - "@esbuild/linux-mips64el": "0.17.15", - "@esbuild/linux-ppc64": "0.17.15", - "@esbuild/linux-riscv64": "0.17.15", - "@esbuild/linux-s390x": "0.17.15", - "@esbuild/linux-x64": "0.17.15", - "@esbuild/netbsd-x64": "0.17.15", - "@esbuild/openbsd-x64": "0.17.15", - "@esbuild/sunos-x64": "0.17.15", - "@esbuild/win32-arm64": "0.17.15", - "@esbuild/win32-ia32": "0.17.15", - "@esbuild/win32-x64": "0.17.15" - } - }, - "escalade": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", - "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==" - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==" - }, - "escodegen": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-2.0.0.tgz", - "integrity": "sha512-mmHKys/C8BFUGI+MAWNcSYoORYLMdPzjrknd2Vc+bUsjN5bXcr8EhrNB+UTqfL1y3I9c4fw2ihgtMPQLBRiQxw==", - "dev": true, - "requires": { - "esprima": "^4.0.1", - "estraverse": "^5.2.0", - "esutils": "^2.0.2", - "optionator": "^0.8.1", - "source-map": "~0.6.1" - } - }, - "eslint": { - "version": "8.37.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.37.0.tgz", - "integrity": "sha512-NU3Ps9nI05GUoVMxcZx1J8CNR6xOvUT4jAUMH5+z8lpp3aEdPVCImKw6PWG4PY+Vfkpr+jvMpxs/qoE7wq0sPw==", - "dev": true, - "requires": { - "@eslint-community/eslint-utils": "^4.2.0", - "@eslint-community/regexpp": "^4.4.0", - "@eslint/eslintrc": "^2.0.2", - "@eslint/js": "8.37.0", - "@humanwhocodes/config-array": "^0.11.8", - "@humanwhocodes/module-importer": "^1.0.1", - "@nodelib/fs.walk": "^1.2.8", - "ajv": "^6.10.0", - "chalk": "^4.0.0", - "cross-spawn": "^7.0.2", - "debug": "^4.3.2", - "doctrine": "^3.0.0", - "escape-string-regexp": "^4.0.0", - "eslint-scope": "^7.1.1", - "eslint-visitor-keys": "^3.4.0", - "espree": "^9.5.1", - "esquery": "^1.4.2", - "esutils": "^2.0.2", - "fast-deep-equal": "^3.1.3", - "file-entry-cache": "^6.0.1", - "find-up": "^5.0.0", - "glob-parent": "^6.0.2", - "globals": "^13.19.0", - "grapheme-splitter": "^1.0.4", - "ignore": "^5.2.0", - "import-fresh": "^3.0.0", - "imurmurhash": "^0.1.4", - "is-glob": "^4.0.0", - "is-path-inside": "^3.0.3", - "js-sdsl": "^4.1.4", - "js-yaml": "^4.1.0", - "json-stable-stringify-without-jsonify": "^1.0.1", - "levn": "^0.4.1", - "lodash.merge": "^4.6.2", - "minimatch": "^3.1.2", - "natural-compare": "^1.4.0", - "optionator": "^0.9.1", - "strip-ansi": "^6.0.1", - "strip-json-comments": "^3.1.0", - "text-table": "^0.2.0" - }, - "dependencies": { - "ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "escape-string-regexp": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", - "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", - "dev": true - }, - "find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", - "dev": true, - "requires": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" - } - }, - "glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "requires": { - "is-glob": "^4.0.3" - } - }, - "globals": { - "version": "13.20.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.20.0.tgz", - "integrity": "sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==", - "dev": true, - "requires": { - "type-fest": "^0.20.2" - } - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "levn": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", - "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1", - "type-check": "~0.4.0" - } - }, - "locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", - "dev": true, - "requires": { - "p-locate": "^5.0.0" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "optionator": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.1.tgz", - "integrity": "sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==", - "dev": true, - "requires": { - "deep-is": "^0.1.3", - "fast-levenshtein": "^2.0.6", - "levn": "^0.4.1", - "prelude-ls": "^1.2.1", - "type-check": "^0.4.0", - "word-wrap": "^1.2.3" - } - }, - "p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", - "dev": true, - "requires": { - "yocto-queue": "^0.1.0" - } - }, - "p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", - "dev": true, - "requires": { - "p-limit": "^3.0.2" - } - }, - "prelude-ls": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", - "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", - "dev": true - }, - "strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true - }, - "type-check": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", - "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", - "dev": true, - "requires": { - "prelude-ls": "^1.2.1" - } - }, - "type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true - }, - "yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", - "dev": true - } - } - }, - "eslint-config-react-app": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/eslint-config-react-app/-/eslint-config-react-app-7.0.1.tgz", - "integrity": "sha512-K6rNzvkIeHaTd8m/QEh1Zko0KI7BACWkkneSs6s9cKZC/J27X3eZR6Upt1jkmZ/4FK+XUOPPxMEN7+lbUXfSlA==", - "dev": true, - "requires": { - "@babel/core": "^7.16.0", - "@babel/eslint-parser": "^7.16.3", - "@rushstack/eslint-patch": "^1.1.0", - "@typescript-eslint/eslint-plugin": "^5.5.0", - "@typescript-eslint/parser": "^5.5.0", - "babel-preset-react-app": "^10.0.1", - "confusing-browser-globals": "^1.0.11", - "eslint-plugin-flowtype": "^8.0.3", - "eslint-plugin-import": "^2.25.3", - "eslint-plugin-jest": "^25.3.0", - "eslint-plugin-jsx-a11y": "^6.5.1", - "eslint-plugin-react": "^7.27.1", - "eslint-plugin-react-hooks": "^4.3.0", - "eslint-plugin-testing-library": "^5.0.1" - } - }, - "eslint-import-resolver-node": { - "version": "0.3.7", - "resolved": "https://registry.npmjs.org/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz", - "integrity": "sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==", - "dev": true, - "requires": { - "debug": "^3.2.7", - "is-core-module": "^2.11.0", - "resolve": "^1.22.1" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - } - } - }, - "eslint-module-utils": { - "version": "2.7.4", - "resolved": "https://registry.npmjs.org/eslint-module-utils/-/eslint-module-utils-2.7.4.tgz", - "integrity": "sha512-j4GT+rqzCoRKHwURX7pddtIPGySnX9Si/cgMI5ztrcqOPtk5dDEeZ34CQVPphnqkJytlc97Vuk05Um2mJ3gEQA==", - "dev": true, - "requires": { - "debug": "^3.2.7" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - } - } - }, - "eslint-plugin-flowtype": { - "version": "8.0.3", - "resolved": "https://registry.npmjs.org/eslint-plugin-flowtype/-/eslint-plugin-flowtype-8.0.3.tgz", - "integrity": "sha512-dX8l6qUL6O+fYPtpNRideCFSpmWOUVx5QcaGLVqe/vlDiBSe4vYljDWDETwnyFzpl7By/WVIu6rcrniCgH9BqQ==", - "dev": true, - "requires": { - "lodash": "^4.17.21", - "string-natural-compare": "^3.0.1" - } - }, - "eslint-plugin-import": { - "version": "2.27.5", - "resolved": "https://registry.npmjs.org/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz", - "integrity": "sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==", - "dev": true, - "requires": { - "array-includes": "^3.1.6", - "array.prototype.flat": "^1.3.1", - "array.prototype.flatmap": "^1.3.1", - "debug": "^3.2.7", - "doctrine": "^2.1.0", - "eslint-import-resolver-node": "^0.3.7", - "eslint-module-utils": "^2.7.4", - "has": "^1.0.3", - "is-core-module": "^2.11.0", - "is-glob": "^4.0.3", - "minimatch": "^3.1.2", - "object.values": "^1.1.6", - "resolve": "^1.22.1", - "semver": "^6.3.0", - "tsconfig-paths": "^3.14.1" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - }, - "doctrine": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - } - } - }, - "eslint-plugin-jest": { - "version": "25.7.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-jest/-/eslint-plugin-jest-25.7.0.tgz", - "integrity": "sha512-PWLUEXeeF7C9QGKqvdSbzLOiLTx+bno7/HC9eefePfEb257QFHg7ye3dh80AZVkaa/RQsBB1Q/ORQvg2X7F0NQ==", - "dev": true, - "requires": { - "@typescript-eslint/experimental-utils": "^5.0.0" - } - }, - "eslint-plugin-jsx-a11y": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.7.1.tgz", - "integrity": "sha512-63Bog4iIethyo8smBklORknVjB0T2dwB8Mr/hIC+fBS0uyHdYYpzM/Ed+YC8VxTjlXHEWFOdmgwcDn1U2L9VCA==", - "dev": true, - "requires": { - "@babel/runtime": "^7.20.7", - "aria-query": "^5.1.3", - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "ast-types-flow": "^0.0.7", - "axe-core": "^4.6.2", - "axobject-query": "^3.1.1", - "damerau-levenshtein": "^1.0.8", - "emoji-regex": "^9.2.2", - "has": "^1.0.3", - "jsx-ast-utils": "^3.3.3", - "language-tags": "=1.0.5", - "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "semver": "^6.3.0" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - } - } - }, - "eslint-plugin-react": { - "version": "7.32.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-react/-/eslint-plugin-react-7.32.2.tgz", - "integrity": "sha512-t2fBMa+XzonrrNkyVirzKlvn5RXzzPwRHtMvLAtVZrt8oxgnTQaYbU6SXTOO1mwQgp1y5+toMSKInnzGr0Knqg==", - "dev": true, - "requires": { - "array-includes": "^3.1.6", - "array.prototype.flatmap": "^1.3.1", - "array.prototype.tosorted": "^1.1.1", - "doctrine": "^2.1.0", - "estraverse": "^5.3.0", - "jsx-ast-utils": "^2.4.1 || ^3.0.0", - "minimatch": "^3.1.2", - "object.entries": "^1.1.6", - "object.fromentries": "^2.0.6", - "object.hasown": "^1.1.2", - "object.values": "^1.1.6", - "prop-types": "^15.8.1", - "resolve": "^2.0.0-next.4", - "semver": "^6.3.0", - "string.prototype.matchall": "^4.0.8" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "doctrine": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-2.1.0.tgz", - "integrity": "sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==", - "dev": true, - "requires": { - "esutils": "^2.0.2" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "resolve": { - "version": "2.0.0-next.4", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.4.tgz", - "integrity": "sha512-iMDbmAWtfU+MHpxt/I5iWI7cY6YVEZUQ3MBgPQ++XD1PELuJHIl82xBmObyP2KyQmkNB2dsqF7seoQQiAn5yDQ==", - "dev": true, - "requires": { - "is-core-module": "^2.9.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - } - } - } - }, - "eslint-plugin-react-hooks": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz", - "integrity": "sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g==", - "dev": true, - "requires": {} - }, - "eslint-plugin-testing-library": { - "version": "5.10.2", - "resolved": "https://registry.npmjs.org/eslint-plugin-testing-library/-/eslint-plugin-testing-library-5.10.2.tgz", - "integrity": "sha512-f1DmDWcz5SDM+IpCkEX0lbFqrrTs8HRsEElzDEqN/EBI0hpRj8Cns5+IVANXswE8/LeybIJqPAOQIFu2j5Y5sw==", - "dev": true, - "requires": { - "@typescript-eslint/utils": "^5.43.0" - } - }, - "eslint-scope": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.1.1.tgz", - "integrity": "sha512-QKQM/UXpIiHcLqJ5AOyIW7XZmzjkzQXYE54n1++wb0u9V/abW3l9uQnxX8Z5Xd18xyKIMTUAyQ0k1e8pz6LUrw==", - "dev": true, - "requires": { - "esrecurse": "^4.3.0", - "estraverse": "^5.2.0" - } - }, - "eslint-visitor-keys": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.0.tgz", - "integrity": "sha512-HPpKPUBQcAsZOsHAFwTtIKcYlCje62XB7SEAcxjtmW6TD1WVpkS6i6/hOVtTZIl4zGj/mBqpFVGvaDneik+VoQ==", - "dev": true - }, - "espree": { - "version": "9.5.1", - "resolved": "https://registry.npmjs.org/espree/-/espree-9.5.1.tgz", - "integrity": "sha512-5yxtHSZXRSW5pvv3hAlXM5+/Oswi1AUFqBmbibKb5s6bp3rGIDkyXU6xCoyuuLhijr4SFwPrXRoZjz0AZDN9tg==", - "dev": true, - "requires": { - "acorn": "^8.8.0", - "acorn-jsx": "^5.3.2", - "eslint-visitor-keys": "^3.4.0" - } - }, - "esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "dev": true - }, - "esquery": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", - "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", - "dev": true, - "requires": { - "estraverse": "^5.1.0" - } - }, - "esrecurse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", - "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", - "dev": true, - "requires": { - "estraverse": "^5.2.0" - } - }, - "estraverse": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", - "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", - "dev": true - }, - "estree-walker": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", - "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", - "dev": true - }, - "esutils": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", - "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", - "dev": true - }, - "events": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz", - "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", - "dev": true - }, - "execa": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", - "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", - "dev": true, - "requires": { - "cross-spawn": "^7.0.3", - "get-stream": "^6.0.0", - "human-signals": "^2.1.0", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.1", - "onetime": "^5.1.2", - "signal-exit": "^3.0.3", - "strip-final-newline": "^2.0.0" - } - }, - "expect": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/expect/-/expect-29.5.0.tgz", - "integrity": "sha512-yM7xqUrCO2JdpFo4XpM82t+PJBFybdqoQuJLDGeDX2ij8NZzqRHyu3Hp188/JX7SWqud+7t4MUdvcgGBICMHZg==", - "requires": { - "@jest/expect-utils": "^29.5.0", - "jest-get-type": "^29.4.3", - "jest-matcher-utils": "^29.5.0", - "jest-message-util": "^29.5.0", - "jest-util": "^29.5.0" - } - }, - "external-editor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz", - "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", - "dev": true, - "requires": { - "chardet": "^0.7.0", - "iconv-lite": "^0.4.24", - "tmp": "^0.0.33" - } - }, - "fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true - }, - "fast-glob": { - "version": "3.2.12", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz", - "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==", - "dev": true, - "requires": { - "@nodelib/fs.stat": "^2.0.2", - "@nodelib/fs.walk": "^1.2.3", - "glob-parent": "^5.1.2", - "merge2": "^1.3.0", - "micromatch": "^4.0.4" - } - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true - }, - "fast-levenshtein": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", - "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", - "dev": true - }, - "fast-url-parser": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", - "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==", - "dev": true, - "requires": { - "punycode": "^1.3.2" - } - }, - "fastest-levenshtein": { - "version": "1.0.16", - "resolved": "https://registry.npmjs.org/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", - "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", - "dev": true - }, - "fastq": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", - "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", - "dev": true, - "requires": { - "reusify": "^1.0.4" - } - }, - "figures": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", - "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5" - } - }, - "file-entry-cache": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", - "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", - "dev": true, - "requires": { - "flat-cache": "^3.0.4" - } - }, - "fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "requires": { - "to-regex-range": "^5.0.1" - } - }, - "filter-console": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/filter-console/-/filter-console-0.1.1.tgz", - "integrity": "sha512-zrXoV1Uaz52DqPs+qEwNJWJFAWZpYJ47UNmpN9q4j+/EYsz85uV0DC9k8tRND5kYmoVzL0W+Y75q4Rg8sRJCdg==", - "dev": true - }, - "filter-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/filter-obj/-/filter-obj-1.1.0.tgz", - "integrity": "sha512-8rXg1ZnX7xzy2NGDVkBVaAy+lSlPNwad13BtgSlLuxfIslyt5Vg64U7tFcCt4WS1R0hvtnQybT/IyCkGZ3DpXQ==" - }, - "find-up": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz", - "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", - "dev": true, - "requires": { - "locate-path": "^5.0.0", - "path-exists": "^4.0.0" - } - }, - "flat-cache": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", - "integrity": "sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==", - "dev": true, - "requires": { - "flatted": "^3.1.0", - "rimraf": "^3.0.2" - } - }, - "flatted": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.7.tgz", - "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", - "dev": true - }, - "for-each": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", - "integrity": "sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==", - "dev": true, - "requires": { - "is-callable": "^1.1.3" - } - }, - "form-data": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", - "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", - "dev": true, - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.8", - "mime-types": "^2.1.12" - } - }, - "fraction.js": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.2.0.tgz", - "integrity": "sha512-MhLuK+2gUcnZe8ZHlaaINnQLl0xRIGRfcGk2yl8xoQAfHrSsL3rYu6FCmBdkdbhc9EPlwyGHewaRsvwRMJtAlA==" - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" - }, - "fsevents": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", - "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==", - "optional": true - }, - "function-bind": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", - "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", - "dev": true - }, - "function.prototype.name": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/function.prototype.name/-/function.prototype.name-1.1.5.tgz", - "integrity": "sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "es-abstract": "^1.19.0", - "functions-have-names": "^1.2.2" - } - }, - "functions-have-names": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/functions-have-names/-/functions-have-names-1.2.3.tgz", - "integrity": "sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==", - "dev": true - }, - "gensync": { - "version": "1.0.0-beta.2", - "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", - "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", - "dev": true - }, - "get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "dev": true - }, - "get-func-name": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.2.tgz", - "integrity": "sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==", - "dev": true - }, - "get-intrinsic": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz", - "integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==", - "dev": true, - "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - } - }, - "get-stream": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", - "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", - "dev": true - }, - "get-symbol-description": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.0.tgz", - "integrity": "sha512-2EmdH1YvIQiZpltCNgkuiUnyukzxM/R6NDJX31Ke3BG1Nq5b0S2PhX59UKi9vZpPDQVdqn+1IcaAwnzTT5vCjw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" - } - }, - "glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^5.0.1", - "once": "^1.3.0" - } - }, - "glob-parent": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", - "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "requires": { - "is-glob": "^4.0.1" - } - }, - "global-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz", - "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==", - "dev": true, - "requires": { - "global-prefix": "^3.0.0" - } - }, - "global-prefix": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz", - "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==", - "dev": true, - "requires": { - "ini": "^1.3.5", - "kind-of": "^6.0.2", - "which": "^1.3.1" - }, - "dependencies": { - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "globals": { - "version": "11.12.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", - "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", - "dev": true - }, - "globalthis": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", - "dev": true, - "requires": { - "define-properties": "^1.1.3" - } - }, - "globby": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", - "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", - "dev": true, - "requires": { - "array-union": "^2.1.0", - "dir-glob": "^3.0.1", - "fast-glob": "^3.2.9", - "ignore": "^5.2.0", - "merge2": "^1.4.1", - "slash": "^3.0.0" - } - }, - "globjoin": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/globjoin/-/globjoin-0.1.4.tgz", - "integrity": "sha512-xYfnw62CKG8nLkZBfWbhWwDw02CHty86jfPcc2cr3ZfeuK9ysoVPPEUxf21bAD/rWAgk52SuBrLJlefNy8mvFg==", - "dev": true - }, - "gopd": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", - "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", - "dev": true, - "requires": { - "get-intrinsic": "^1.1.3" - } - }, - "graceful-fs": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", - "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" - }, - "grapheme-splitter": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/grapheme-splitter/-/grapheme-splitter-1.0.4.tgz", - "integrity": "sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==", - "dev": true - }, - "graphql": { - "version": "15.8.0", - "resolved": "https://registry.npmjs.org/graphql/-/graphql-15.8.0.tgz", - "integrity": "sha512-5gghUc24tP9HRznNpV2+FIoq3xKkj5dTQqf4v0CpdPbFVwFkWoxOM+o+2OC9ZSvjEMTjfmG9QT+gcvggTwW1zw==", - "dev": true - }, - "hard-rejection": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/hard-rejection/-/hard-rejection-2.1.0.tgz", - "integrity": "sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==", - "dev": true - }, - "has": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", - "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", - "dev": true, - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-bigints": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz", - "integrity": "sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==", - "dev": true - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==" - }, - "has-property-descriptors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.0.tgz", - "integrity": "sha512-62DVLZGoiEBDHQyqG4w9xCuZ7eJEwNmJRWw2VY84Oedb7WFcA27fiEVe8oUQx9hAUJ4ekurquucTGwsyO1XGdQ==", - "dev": true, - "requires": { - "get-intrinsic": "^1.1.1" - } - }, - "has-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", - "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", - "dev": true - }, - "has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "dev": true - }, - "has-tostringtag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", - "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", - "dev": true, - "requires": { - "has-symbols": "^1.0.2" - } - }, - "headers-utils": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/headers-utils/-/headers-utils-3.0.2.tgz", - "integrity": "sha512-xAxZkM1dRyGV2Ou5bzMxBPNLoRCjcX+ya7KSWybQD2KwLphxsapUVK6x/02o7f4VU6GPSXch9vNY2+gkU8tYWQ==", - "dev": true - }, - "highcharts": { - "version": "10.3.3", - "resolved": "https://registry.npmjs.org/highcharts/-/highcharts-10.3.3.tgz", - "integrity": "sha512-r7wgUPQI9tr3jFDn3XT36qsNwEIZYcfgz4mkKEA6E4nn5p86y+u1EZjazIG4TRkl5/gmGRtkBUiZW81g029RIw==" - }, - "highcharts-react-official": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/highcharts-react-official/-/highcharts-react-official-3.2.0.tgz", - "integrity": "sha512-71IJZsLmEboYFjONpwC3NRsg6JKvtKYtS5Si3e6s6MLRSOFNOY8KILTkzvO36kjpeR/A0X3/kvvewE+GMPpkjw==", - "requires": {} - }, - "history": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/history/-/history-5.3.0.tgz", - "integrity": "sha512-ZqaKwjjrAYUYfLG+htGaIIZ4nioX2L70ZUMIFysS3xvBsSG4x/n1V6TXV3N8ZYNuFGlDirFg32T7B6WOUPDYcQ==", - "requires": { - "@babel/runtime": "^7.7.6" - } - }, - "hoist-non-react-statics": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", - "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", - "requires": { - "react-is": "^16.7.0" - }, - "dependencies": { - "react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" - } - } - }, - "hosted-git-info": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", - "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - }, - "dependencies": { - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - } - } - }, - "html-encoding-sniffer": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz", - "integrity": "sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA==", - "dev": true, - "requires": { - "whatwg-encoding": "^2.0.0" - } - }, - "html-tags": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.0.tgz", - "integrity": "sha512-mH3dWNbvfCKcAEysbpD7wvtIJ6ImPog8aFhfzqog9gCN8CJFhKjLDtjpohG3IxYRLqHMJ1PWpBvnSMkFJBQ6Jg==", - "dev": true - }, - "http-proxy-agent": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", - "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", - "dev": true, - "requires": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" - } - }, - "https-proxy-agent": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", - "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", - "dev": true, - "requires": { - "agent-base": "6", - "debug": "4" - } - }, - "human-signals": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", - "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", - "dev": true - }, - "i": { - "version": "0.3.7", - "resolved": "https://registry.npmjs.org/i/-/i-0.3.7.tgz", - "integrity": "sha512-FYz4wlXgkQwIPqhzC5TdNMLSE5+GS1IIDJZY/1ZiEPCT2S3COUVZeT5OW4BmW4r5LHLQuOosSwsvnroG9GR59Q==", - "dev": true - }, - "iconv-lite": { - "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", - "dev": true, - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "dev": true - }, - "ignore": { - "version": "5.2.4", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", - "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", - "dev": true - }, - "immutable": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.0.tgz", - "integrity": "sha512-0AOCmOip+xgJwEVTQj1EfiDDOkPmuyllDuTuEX+DDXUgapLAsBIfkg3sxCYyCEA8mQqZrrxPUGjcOQ2JS3WLkg==" - }, - "import-fresh": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", - "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", - "dev": true, - "requires": { - "parent-module": "^1.0.0", - "resolve-from": "^4.0.0" - }, - "dependencies": { - "resolve-from": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", - "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", - "dev": true - } - } - }, - "import-lazy": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-4.0.0.tgz", - "integrity": "sha512-rKtvo6a868b5Hu3heneU+L4yEQ4jYKLtjpnPeUdK7h0yzXGmyBTypknlkCvHFBqfX9YlorEiMM6Dnq/5atfHkw==", - "dev": true - }, - "imurmurhash": { - "version": "0.1.4", - "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", - "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", - "dev": true - }, - "indent-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", - "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", - "dev": true - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" - }, - "ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true - }, - "inquirer": { - "version": "8.2.5", - "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-8.2.5.tgz", - "integrity": "sha512-QAgPDQMEgrDssk1XiwwHoOGYF9BAbUcc1+j+FhEvaOt8/cKRqyLn0U5qA6F74fGhTMGxf92pOvPBeh29jQJDTQ==", - "dev": true, - "requires": { - "ansi-escapes": "^4.2.1", - "chalk": "^4.1.1", - "cli-cursor": "^3.1.0", - "cli-width": "^3.0.0", - "external-editor": "^3.0.3", - "figures": "^3.0.0", - "lodash": "^4.17.21", - "mute-stream": "0.0.8", - "ora": "^5.4.1", - "run-async": "^2.4.0", - "rxjs": "^7.5.5", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0", - "through": "^2.3.6", - "wrap-ansi": "^7.0.0" - } - }, - "internal-slot": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/internal-slot/-/internal-slot-1.0.5.tgz", - "integrity": "sha512-Y+R5hJrzs52QCG2laLn4udYVnxsfny9CpOhNhUvk/SSSVyF6T27FzRbF0sroPidSu3X8oEAkOn2K804mjpt6UQ==", - "dev": true, - "requires": { - "get-intrinsic": "^1.2.0", - "has": "^1.0.3", - "side-channel": "^1.0.4" - } - }, - "is-arguments": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", - "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, - "is-array-buffer": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/is-array-buffer/-/is-array-buffer-3.0.2.tgz", - "integrity": "sha512-y+FyyR/w8vfIRq4eQcM1EYgSTnmHXPqaF+IgzgraytCFq5Xh8lllDVmAZolPJiZttZLeFSINPYMaEJ7/vWUa1w==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.2.0", - "is-typed-array": "^1.1.10" - } - }, - "is-arrayish": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", - "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", - "dev": true - }, - "is-bigint": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-bigint/-/is-bigint-1.0.4.tgz", - "integrity": "sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==", - "dev": true, - "requires": { - "has-bigints": "^1.0.1" - } - }, - "is-binary-path": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", - "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", - "requires": { - "binary-extensions": "^2.0.0" - } - }, - "is-boolean-object": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/is-boolean-object/-/is-boolean-object-1.1.2.tgz", - "integrity": "sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, - "is-callable": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", - "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", - "dev": true - }, - "is-core-module": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz", - "integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==", - "dev": true, - "requires": { - "has": "^1.0.3" - } - }, - "is-date-object": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.5.tgz", - "integrity": "sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==", - "dev": true, - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "dev": true - }, - "is-extglob": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", - "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==" - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true - }, - "is-glob": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", - "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "requires": { - "is-extglob": "^2.1.1" - } - }, - "is-interactive": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", - "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", - "dev": true - }, - "is-map": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-map/-/is-map-2.0.2.tgz", - "integrity": "sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==", - "dev": true - }, - "is-negative-zero": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-negative-zero/-/is-negative-zero-2.0.2.tgz", - "integrity": "sha512-dqJvarLawXsFbNDeJW7zAz8ItJ9cd28YufuuFzh0G8pNHjJMnY08Dv7sYX2uF5UpQOwieAeOExEYAWWfu7ZZUA==", - "dev": true - }, - "is-node-process": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/is-node-process/-/is-node-process-1.2.0.tgz", - "integrity": "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==", - "dev": true - }, - "is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==" - }, - "is-number-object": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.0.7.tgz", - "integrity": "sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==", - "dev": true, - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true - }, - "is-plain-obj": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", - "integrity": "sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==", - "dev": true - }, - "is-plain-object": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", - "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", - "dev": true - }, - "is-port-reachable": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-port-reachable/-/is-port-reachable-4.0.0.tgz", - "integrity": "sha512-9UoipoxYmSk6Xy7QFgRv2HDyaysmgSG75TFQs6S+3pDM7ZhKTF/bskZV+0UlABHzKjNVhPjYCLfeZUEg1wXxig==", - "dev": true - }, - "is-potential-custom-element-name": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", - "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", - "dev": true - }, - "is-reference": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/is-reference/-/is-reference-1.2.1.tgz", - "integrity": "sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ==", - "dev": true, - "requires": { - "@types/estree": "*" - } - }, - "is-regex": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.1.4.tgz", - "integrity": "sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-tostringtag": "^1.0.0" - } - }, - "is-set": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-set/-/is-set-2.0.2.tgz", - "integrity": "sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==", - "dev": true - }, - "is-shared-array-buffer": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz", - "integrity": "sha512-sqN2UDu1/0y6uvXyStCOzyhAjCSlHceFoMKJW8W9EU9cvic/QdsZ0kEU93HEy3IUEFZIiH/3w+AH/UQbPHNdhA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2" - } - }, - "is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "dev": true - }, - "is-string": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", - "integrity": "sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==", - "dev": true, - "requires": { - "has-tostringtag": "^1.0.0" - } - }, - "is-symbol": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.4.tgz", - "integrity": "sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==", - "dev": true, - "requires": { - "has-symbols": "^1.0.2" - } - }, - "is-typed-array": { - "version": "1.1.10", - "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", - "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", - "dev": true, - "requires": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0" - } - }, - "is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", - "dev": true - }, - "is-weakmap": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-weakmap/-/is-weakmap-2.0.1.tgz", - "integrity": "sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==", - "dev": true - }, - "is-weakref": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/is-weakref/-/is-weakref-1.0.2.tgz", - "integrity": "sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2" - } - }, - "is-weakset": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/is-weakset/-/is-weakset-2.0.2.tgz", - "integrity": "sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.1" - } - }, - "is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dev": true, - "requires": { - "is-docker": "^2.0.0" - } - }, - "isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", - "dev": true - }, - "iserror": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/iserror/-/iserror-0.0.2.tgz", - "integrity": "sha512-oKGGrFVaWwETimP3SiWwjDeY27ovZoyZPHtxblC4hCq9fXxed/jasx+ATWFFjCVSRZng8VTMsN1nDnGo6zMBSw==" - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true - }, - "jest-diff": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/jest-diff/-/jest-diff-29.5.0.tgz", - "integrity": "sha512-LtxijLLZBduXnHSniy0WMdaHjmQnt3g5sa16W4p0HqukYTTsyTW3GD1q41TyGl5YFXj/5B2U6dlh5FM1LIMgxw==", - "requires": { - "chalk": "^4.0.0", - "diff-sequences": "^29.4.3", - "jest-get-type": "^29.4.3", - "pretty-format": "^29.5.0" - }, - "dependencies": { - "ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==" - }, - "pretty-format": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", - "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", - "requires": { - "@jest/schemas": "^29.4.3", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - } - }, - "react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" - } - } - }, - "jest-get-type": { - "version": "29.4.3", - "resolved": "https://registry.npmjs.org/jest-get-type/-/jest-get-type-29.4.3.tgz", - "integrity": "sha512-J5Xez4nRRMjk8emnTpWrlkyb9pfRQQanDrvWHhsR1+VUfbwxi30eVcZFlcdGInRibU4G5LwHXpI7IRHU0CY+gg==" - }, - "jest-matcher-utils": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/jest-matcher-utils/-/jest-matcher-utils-29.5.0.tgz", - "integrity": "sha512-lecRtgm/rjIK0CQ7LPQwzCs2VwW6WAahA55YBuI+xqmhm7LAaxokSB8C97yJeYyT+HvQkH741StzpU41wohhWw==", - "requires": { - "chalk": "^4.0.0", - "jest-diff": "^29.5.0", - "jest-get-type": "^29.4.3", - "pretty-format": "^29.5.0" - }, - "dependencies": { - "ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==" - }, - "pretty-format": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", - "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", - "requires": { - "@jest/schemas": "^29.4.3", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - } - }, - "react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" - } - } - }, - "jest-message-util": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/jest-message-util/-/jest-message-util-29.5.0.tgz", - "integrity": "sha512-Kijeg9Dag6CKtIDA7O21zNTACqD5MD/8HfIV8pdD94vFyFuer52SigdC3IQMhab3vACxXMiFk+yMHNdbqtyTGA==", - "requires": { - "@babel/code-frame": "^7.12.13", - "@jest/types": "^29.5.0", - "@types/stack-utils": "^2.0.0", - "chalk": "^4.0.0", - "graceful-fs": "^4.2.9", - "micromatch": "^4.0.4", - "pretty-format": "^29.5.0", - "slash": "^3.0.0", - "stack-utils": "^2.0.3" - }, - "dependencies": { - "ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==" - }, - "pretty-format": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-29.5.0.tgz", - "integrity": "sha512-V2mGkI31qdttvTFX7Mt4efOqHXqJWMu4/r66Xh3Z3BwZaPfPJgp6/gbwoujRpPUtfEF6AUUWx3Jim3GCw5g/Qw==", - "requires": { - "@jest/schemas": "^29.4.3", - "ansi-styles": "^5.0.0", - "react-is": "^18.0.0" - } - }, - "react-is": { - "version": "18.2.0", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", - "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" - } - } - }, - "jest-util": { - "version": "29.5.0", - "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.5.0.tgz", - "integrity": "sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==", - "requires": { - "@jest/types": "^29.5.0", - "@types/node": "*", - "chalk": "^4.0.0", - "ci-info": "^3.2.0", - "graceful-fs": "^4.2.9", - "picomatch": "^2.2.3" - } - }, - "jiti": { - "version": "1.18.2", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.18.2.tgz", - "integrity": "sha512-QAdOptna2NYiSSpv0O/BwoHBSmz4YhpzJHyi+fnMRTXFjp7B8i/YG5Z8IfusxB1ufjcD2Sre1F3R+nX3fvy7gg==", - "dev": true - }, - "js-levenshtein": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/js-levenshtein/-/js-levenshtein-1.1.6.tgz", - "integrity": "sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==", - "dev": true - }, - "js-sdsl": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/js-sdsl/-/js-sdsl-4.4.0.tgz", - "integrity": "sha512-FfVSdx6pJ41Oa+CF7RDaFmTnCaFhua+SNYQX74riGOpl96x+2jQCqEfQ2bnXu/5DPCqlRuiqyvTJM0Qjz26IVg==", - "dev": true - }, - "js-sha3": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz", - "integrity": "sha512-gF1cRrHhIzNfToc802P800N8PpXS+evLLXfsVpowqmAFR9uwbi89WvXg2QspOmXL8QL86J4T1EpFu+yUkwJY3Q==" - }, - "js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" - }, - "js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, - "requires": { - "argparse": "^2.0.1" - } - }, - "js2xmlparser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/js2xmlparser/-/js2xmlparser-4.0.2.tgz", - "integrity": "sha512-6n4D8gLlLf1n5mNLQPRfViYzu9RATblzPEtm1SthMX1Pjao0r9YI9nw7ZIfRxQMERS87mcswrg+r/OYrPRX6jA==", - "dev": true, - "requires": { - "xmlcreate": "^2.0.4" - } - }, - "jsdoc": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/jsdoc/-/jsdoc-4.0.2.tgz", - "integrity": "sha512-e8cIg2z62InH7azBBi3EsSEqrKx+nUtAS5bBcYTSpZFA+vhNPyhv8PTFZ0WsjOPDj04/dOLlm08EDcQJDqaGQg==", - "dev": true, - "requires": { - "@babel/parser": "^7.20.15", - "@jsdoc/salty": "^0.2.1", - "@types/markdown-it": "^12.2.3", - "bluebird": "^3.7.2", - "catharsis": "^0.9.0", - "escape-string-regexp": "^2.0.0", - "js2xmlparser": "^4.0.2", - "klaw": "^3.0.0", - "markdown-it": "^12.3.2", - "markdown-it-anchor": "^8.4.1", - "marked": "^4.0.10", - "mkdirp": "^1.0.4", - "requizzle": "^0.2.3", - "strip-json-comments": "^3.1.0", - "underscore": "~1.13.2" - }, - "dependencies": { - "escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", - "dev": true - }, - "strip-json-comments": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", - "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", - "dev": true - } - } - }, - "jsdom": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-21.1.1.tgz", - "integrity": "sha512-Jjgdmw48RKcdAIQyUD1UdBh2ecH7VqwaXPN3ehoZN6MqgVbMn+lRm1aAT1AsdJRAJpwfa4IpwgzySn61h2qu3w==", - "dev": true, - "requires": { - "abab": "^2.0.6", - "acorn": "^8.8.2", - "acorn-globals": "^7.0.0", - "cssstyle": "^3.0.0", - "data-urls": "^4.0.0", - "decimal.js": "^10.4.3", - "domexception": "^4.0.0", - "escodegen": "^2.0.0", - "form-data": "^4.0.0", - "html-encoding-sniffer": "^3.0.0", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.1", - "is-potential-custom-element-name": "^1.0.1", - "nwsapi": "^2.2.2", - "parse5": "^7.1.2", - "rrweb-cssom": "^0.6.0", - "saxes": "^6.0.0", - "symbol-tree": "^3.2.4", - "tough-cookie": "^4.1.2", - "w3c-xmlserializer": "^4.0.0", - "webidl-conversions": "^7.0.0", - "whatwg-encoding": "^2.0.0", - "whatwg-mimetype": "^3.0.0", - "whatwg-url": "^12.0.1", - "ws": "^8.13.0", - "xml-name-validator": "^4.0.0" - } - }, - "jsesc": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", - "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==", - "dev": true - }, - "json-parse-even-better-errors": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", - "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", - "dev": true - }, - "json-schema-traverse": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", - "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", - "dev": true - }, - "json-stable-stringify-without-jsonify": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", - "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", - "dev": true - }, - "json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", - "dev": true - }, - "jsonc-parser": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz", - "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==", - "dev": true - }, - "jsx-ast-utils": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.3.tgz", - "integrity": "sha512-fYQHZTZ8jSfmWZ0iyzfwiU4WDX4HpHbMCZ3gPlWYiCl3BoeOTsqKBqnTVfH2rYT7eP5c3sVbeSPHnnJOaTrWiw==", - "dev": true, - "requires": { - "array-includes": "^3.1.5", - "object.assign": "^4.1.3" - } - }, - "kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "dev": true - }, - "klaw": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/klaw/-/klaw-3.0.0.tgz", - "integrity": "sha512-0Fo5oir+O9jnXu5EefYbVK+mHMBeEVEy2cmctR1O1NECcCkPRreJKrS6Qt/j3KC2C148Dfo9i3pCmCMsdqGr0g==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.9" - } - }, - "known-css-properties": { - "version": "0.26.0", - "resolved": "https://registry.npmjs.org/known-css-properties/-/known-css-properties-0.26.0.tgz", - "integrity": "sha512-5FZRzrZzNTBruuurWpvZnvP9pum+fe0HcK8z/ooo+U+Hmp4vtbyp1/QDsqmufirXy4egGzbaH/y2uCZf+6W5Kg==", - "dev": true - }, - "language-subtag-registry": { - "version": "0.3.22", - "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz", - "integrity": "sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w==", - "dev": true - }, - "language-tags": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/language-tags/-/language-tags-1.0.5.tgz", - "integrity": "sha512-qJhlO9cGXi6hBGKoxEG/sKZDAHD5Hnu9Hs4WbOY3pCWXDhw0N8x1NenNzm2EnNLkLkk7J2SdxAkDSbb6ftT+UQ==", - "dev": true, - "requires": { - "language-subtag-registry": "~0.3.2" - } - }, - "levn": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", - "integrity": "sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA==", - "dev": true, - "requires": { - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2" - } - }, - "lilconfig": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz", - "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==", - "dev": true - }, - "lines-and-columns": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", - "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", - "dev": true - }, - "linkify-it": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.3.tgz", - "integrity": "sha512-ynTsyrFSdE5oZ/O9GEf00kPngmOfVwazR5GKDq6EYfhlpFug3J2zybX56a2PRRpc9P+FuSoGNAwjlbDs9jJBPQ==", - "dev": true, - "requires": { - "uc.micro": "^1.0.1" - } - }, - "local-pkg": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.4.3.tgz", - "integrity": "sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==", - "dev": true - }, - "locate-path": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz", - "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", - "dev": true, - "requires": { - "p-locate": "^4.1.0" - } - }, - "lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "lodash-es": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz", - "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" - }, - "lodash.debounce": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", - "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", - "dev": true - }, - "lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "dev": true - }, - "lodash.truncate": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/lodash.truncate/-/lodash.truncate-4.4.2.tgz", - "integrity": "sha512-jttmRe7bRse52OsWIMDLaXxWqRAmtIUccAQ3garviCqJjafXOfNMO0yMfNpdD6zbGaTU0P5Nz7e7gAT6cKmJRw==", - "dev": true - }, - "log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", - "dev": true, - "requires": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - } - }, - "long": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/long/-/long-5.2.1.tgz", - "integrity": "sha512-GKSNGeNAtw8IryjjkhZxuKB3JzlcLTwjtiQCHKvqQet81I93kXslhDQruGI/QsddO83mcDToBVy7GqGS/zYf/A==", - "dev": true, - "peer": true - }, - "loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", - "requires": { - "js-tokens": "^3.0.0 || ^4.0.0" - } - }, - "loupe": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-2.3.6.tgz", - "integrity": "sha512-RaPMZKiMy8/JruncMU5Bt6na1eftNoo++R4Y+N2FrxkDVTrGvcyzFTsaGif4QTeKESheMGegbhw6iUAq+5A8zA==", - "dev": true, - "requires": { - "get-func-name": "^2.0.0" - } - }, - "lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", - "dev": true, - "requires": { - "yallist": "^3.0.2" - } - }, - "lz-string": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz", - "integrity": "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ==", - "dev": true - }, - "magic-string": { - "version": "0.27.0", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.27.0.tgz", - "integrity": "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==", - "dev": true, - "requires": { - "@jridgewell/sourcemap-codec": "^1.4.13" - } - }, - "map-obj": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.3.0.tgz", - "integrity": "sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==", - "dev": true - }, - "markdown-it": { - "version": "12.3.2", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.3.2.tgz", - "integrity": "sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg==", - "dev": true, - "requires": { - "argparse": "^2.0.1", - "entities": "~2.1.0", - "linkify-it": "^3.0.1", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "dependencies": { - "entities": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", - "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==", - "dev": true - } - } - }, - "markdown-it-anchor": { - "version": "8.6.7", - "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-8.6.7.tgz", - "integrity": "sha512-FlCHFwNnutLgVTflOYHPW2pPcl2AACqVzExlkGQNsi4CJgqOHN7YTgDd4LuhgN1BFO3TS0vLAruV1Td6dwWPJA==", - "dev": true, - "requires": {} - }, - "marked": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/marked/-/marked-4.3.0.tgz", - "integrity": "sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==", - "dev": true - }, - "match-sorter": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/match-sorter/-/match-sorter-6.3.1.tgz", - "integrity": "sha512-mxybbo3pPNuA+ZuCUhm5bwNkXrJTbsk5VWbR5wiwz/GC6LIiegBGn2w3O08UG/jdbYLinw51fSQ5xNU1U3MgBw==", - "requires": { - "@babel/runtime": "^7.12.5", - "remove-accents": "0.4.2" - } - }, - "mathml-tag-names": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/mathml-tag-names/-/mathml-tag-names-2.1.3.tgz", - "integrity": "sha512-APMBEanjybaPzUrfqU0IMU5I0AswKMH7k8OTLs0vvV4KZpExkTkY87nR/zpbuTPj+gARop7aGUbl11pnDfW6xg==", - "dev": true - }, - "mdn-data": { - "version": "2.0.30", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.30.tgz", - "integrity": "sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==" - }, - "mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==", - "dev": true - }, - "meow": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/meow/-/meow-9.0.0.tgz", - "integrity": "sha512-+obSblOQmRhcyBt62furQqRAQpNyWXo8BuQ5bN7dG8wmwQ+vwHKp/rCFD4CrTP8CsDQD1sjoZ94K417XEUk8IQ==", - "dev": true, - "requires": { - "@types/minimist": "^1.2.0", - "camelcase-keys": "^6.2.2", - "decamelize": "^1.2.0", - "decamelize-keys": "^1.1.0", - "hard-rejection": "^2.1.0", - "minimist-options": "4.1.0", - "normalize-package-data": "^3.0.0", - "read-pkg-up": "^7.0.1", - "redent": "^3.0.0", - "trim-newlines": "^3.0.0", - "type-fest": "^0.18.0", - "yargs-parser": "^20.2.3" - }, - "dependencies": { - "type-fest": { - "version": "0.18.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.18.1.tgz", - "integrity": "sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==", - "dev": true - } - } - }, - "merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "dev": true - }, - "merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true - }, - "micromatch": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", - "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", - "requires": { - "braces": "^3.0.2", - "picomatch": "^2.3.1" - } - }, - "microseconds": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/microseconds/-/microseconds-0.2.0.tgz", - "integrity": "sha512-n7DHHMjR1avBbSpsTBj6fmMGh2AGrifVV4e+WYc3Q9lO+xnSZ3NyhcBND3vzzatt05LFhoKFRxrIyklmLlUtyA==" - }, - "mime-db": { - "version": "1.52.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", - "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", - "dev": true - }, - "mime-types": { - "version": "2.1.35", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", - "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", - "dev": true, - "requires": { - "mime-db": "1.52.0" - } - }, - "mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true - }, - "min-indent": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz", - "integrity": "sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==", - "dev": true - }, - "minimatch": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-5.1.6.tgz", - "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", - "dev": true, - "requires": { - "brace-expansion": "^2.0.1" - } - }, - "minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true - }, - "minimist-options": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/minimist-options/-/minimist-options-4.1.0.tgz", - "integrity": "sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==", - "dev": true, - "requires": { - "arrify": "^1.0.1", - "is-plain-obj": "^1.1.0", - "kind-of": "^6.0.3" - } - }, - "mkdirp": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", - "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", - "dev": true - }, - "mlly": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.2.0.tgz", - "integrity": "sha512-+c7A3CV0KGdKcylsI6khWyts/CYrGTrRVo4R/I7u/cUsy0Conxa6LUhiEzVKIw14lc2L5aiO4+SeVe4TeGRKww==", - "dev": true, - "requires": { - "acorn": "^8.8.2", - "pathe": "^1.1.0", - "pkg-types": "^1.0.2", - "ufo": "^1.1.1" - } - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "msw": { - "version": "0.36.8", - "resolved": "https://registry.npmjs.org/msw/-/msw-0.36.8.tgz", - "integrity": "sha512-K7lOQoYqhGhTSChsmHMQbf/SDCsxh/m0uhN6Ipt206lGoe81fpTmaGD0KLh4jUxCONMOUnwCSj0jtX2CM4pEdw==", - "dev": true, - "requires": { - "@mswjs/cookies": "^0.1.7", - "@mswjs/interceptors": "^0.12.7", - "@open-draft/until": "^1.0.3", - "@types/cookie": "^0.4.1", - "@types/inquirer": "^8.1.3", - "@types/js-levenshtein": "^1.1.0", - "chalk": "4.1.1", - "chokidar": "^3.4.2", - "cookie": "^0.4.1", - "graphql": "^15.5.1", - "headers-utils": "^3.0.2", - "inquirer": "^8.2.0", - "is-node-process": "^1.0.1", - "js-levenshtein": "^1.1.6", - "node-fetch": "^2.6.7", - "path-to-regexp": "^6.2.0", - "statuses": "^2.0.0", - "strict-event-emitter": "^0.2.0", - "type-fest": "^1.2.2", - "yargs": "^17.3.0" - }, - "dependencies": { - "chalk": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.1.tgz", - "integrity": "sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - } - } - }, - "mute-stream": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", - "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", - "dev": true - }, - "mz": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", - "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", - "dev": true, - "requires": { - "any-promise": "^1.0.0", - "object-assign": "^4.0.1", - "thenify-all": "^1.0.0" - } - }, - "nano-time": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/nano-time/-/nano-time-1.0.0.tgz", - "integrity": "sha512-flnngywOoQ0lLQOTRNexn2gGSNuM9bKj9RZAWSzhQ+UJYaAFG9bac4DW9VHjUAzrOaIcajHybCTHe/bkvozQqA==", - "requires": { - "big-integer": "^1.6.16" - } - }, - "nanoid": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", - "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==" - }, - "natural-compare": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", - "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", - "dev": true - }, - "natural-compare-lite": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/natural-compare-lite/-/natural-compare-lite-1.4.0.tgz", - "integrity": "sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==", - "dev": true - }, - "negotiator": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", - "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", - "dev": true - }, - "node-fetch": { - "version": "2.6.7", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz", - "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", - "dev": true, - "requires": { - "whatwg-url": "^5.0.0" - }, - "dependencies": { - "tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "dev": true - }, - "webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "dev": true - }, - "whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "dev": true, - "requires": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - } - } - }, - "node-releases": { - "version": "2.0.10", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.10.tgz", - "integrity": "sha512-5GFldHPXVG/YZmFzJvKK2zDSzPKhEp0+ZR5SVaoSag9fsL5YgHbUHDfnG5494ISANDcK4KwPXAx2xqVEydmd7w==" - }, - "normalize-package-data": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-3.0.3.tgz", - "integrity": "sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==", - "dev": true, - "requires": { - "hosted-git-info": "^4.0.1", - "is-core-module": "^2.5.0", - "semver": "^7.3.4", - "validate-npm-package-license": "^3.0.1" - }, - "dependencies": { - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - } - } - }, - "normalize-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", - "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==" - }, - "normalize-range": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", - "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==" - }, - "npm": { - "version": "9.9.0", - "resolved": "https://registry.npmjs.org/npm/-/npm-9.9.0.tgz", - "integrity": "sha512-wkd7sjz4KmdmddYQcd0aTP73P1cEuPlekeulz4jTDeMVx/Zo5XZ5KQ1z3eUzV3Q/WZpEO0NJXTrD5FNFe6fhCA==", - "dev": true, - "requires": { - "@isaacs/string-locale-compare": "^1.1.0", - "@npmcli/arborist": "^6.5.0", - "@npmcli/config": "^6.4.0", - "@npmcli/fs": "^3.1.0", - "@npmcli/map-workspaces": "^3.0.4", - "@npmcli/package-json": "^4.0.1", - "@npmcli/promise-spawn": "^6.0.2", - "@npmcli/run-script": "^6.0.2", - "abbrev": "^2.0.0", - "archy": "~1.0.0", - "cacache": "^17.1.3", - "chalk": "^5.3.0", - "ci-info": "^3.8.0", - "cli-columns": "^4.0.0", - "cli-table3": "^0.6.3", - "columnify": "^1.6.0", - "fastest-levenshtein": "^1.0.16", - "fs-minipass": "^3.0.2", - "glob": "^10.2.7", - "graceful-fs": "^4.2.11", - "hosted-git-info": "^6.1.1", - "ini": "^4.1.1", - "init-package-json": "^5.0.0", - "is-cidr": "^4.0.2", - "json-parse-even-better-errors": "^3.0.0", - "libnpmaccess": "^7.0.2", - "libnpmdiff": "^5.0.20", - "libnpmexec": "^6.0.4", - "libnpmfund": "^4.2.1", - "libnpmhook": "^9.0.3", - "libnpmorg": "^5.0.4", - "libnpmpack": "^5.0.20", - "libnpmpublish": "^7.5.0", - "libnpmsearch": "^6.0.2", - "libnpmteam": "^5.0.3", - "libnpmversion": "^4.0.2", - "make-fetch-happen": "^11.1.1", - "minimatch": "^9.0.3", - "minipass": "^5.0.0", - "minipass-pipeline": "^1.2.4", - "ms": "^2.1.2", - "node-gyp": "^9.4.0", - "nopt": "^7.2.0", - "normalize-package-data": "^5.0.0", - "npm-audit-report": "^5.0.0", - "npm-install-checks": "^6.2.0", - "npm-package-arg": "^10.1.0", - "npm-pick-manifest": "^8.0.2", - "npm-profile": "^7.0.1", - "npm-registry-fetch": "^14.0.5", - "npm-user-validate": "^2.0.0", - "npmlog": "^7.0.1", - "p-map": "^4.0.0", - "pacote": "^15.2.0", - "parse-conflict-json": "^3.0.1", - "proc-log": "^3.0.0", - "qrcode-terminal": "^0.12.0", - "read": "^2.1.0", - "semver": "^7.5.4", - "sigstore": "^1.9.0", - "spdx-expression-parse": "^3.0.1", - "ssri": "^10.0.4", - "supports-color": "^9.4.0", - "tar": "^6.1.15", - "text-table": "~0.2.0", - "tiny-relative-date": "^1.3.0", - "treeverse": "^3.0.0", - "validate-npm-package-name": "^5.0.0", - "which": "^3.0.1", - "write-file-atomic": "^5.0.1" - }, - "dependencies": { - "@colors/colors": { - "version": "1.5.0", - "bundled": true, - "dev": true, - "optional": true - }, - "@isaacs/cliui": { - "version": "8.0.2", - "bundled": true, - "dev": true, - "requires": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "bundled": true, - "dev": true - }, - "emoji-regex": { - "version": "9.2.2", - "bundled": true, - "dev": true - }, - "string-width": { - "version": "5.1.2", - "bundled": true, - "dev": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.1.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^6.0.1" - } - } - } - }, - "@isaacs/string-locale-compare": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "@npmcli/arborist": { - "version": "6.5.0", - "bundled": true, - "dev": true, - "requires": { - "@isaacs/string-locale-compare": "^1.1.0", - "@npmcli/fs": "^3.1.0", - "@npmcli/installed-package-contents": "^2.0.2", - "@npmcli/map-workspaces": "^3.0.2", - "@npmcli/metavuln-calculator": "^5.0.0", - "@npmcli/name-from-folder": "^2.0.0", - "@npmcli/node-gyp": "^3.0.0", - "@npmcli/package-json": "^4.0.0", - "@npmcli/query": "^3.0.0", - "@npmcli/run-script": "^6.0.0", - "bin-links": "^4.0.1", - "cacache": "^17.0.4", - "common-ancestor-path": "^1.0.1", - "hosted-git-info": "^6.1.1", - "json-parse-even-better-errors": "^3.0.0", - "json-stringify-nice": "^1.1.4", - "minimatch": "^9.0.0", - "nopt": "^7.0.0", - "npm-install-checks": "^6.2.0", - "npm-package-arg": "^10.1.0", - "npm-pick-manifest": "^8.0.1", - "npm-registry-fetch": "^14.0.3", - "npmlog": "^7.0.1", - "pacote": "^15.0.8", - "parse-conflict-json": "^3.0.0", - "proc-log": "^3.0.0", - "promise-all-reject-late": "^1.0.0", - "promise-call-limit": "^1.0.2", - "read-package-json-fast": "^3.0.2", - "semver": "^7.3.7", - "ssri": "^10.0.1", - "treeverse": "^3.0.0", - "walk-up-path": "^3.0.1" - } - }, - "@npmcli/config": { - "version": "6.4.0", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/map-workspaces": "^3.0.2", - "ci-info": "^3.8.0", - "ini": "^4.1.0", - "nopt": "^7.0.0", - "proc-log": "^3.0.0", - "read-package-json-fast": "^3.0.2", - "semver": "^7.3.5", - "walk-up-path": "^3.0.1" - } - }, - "@npmcli/disparity-colors": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-styles": "^4.3.0" - } - }, - "@npmcli/fs": { - "version": "3.1.0", - "bundled": true, - "dev": true, - "requires": { - "semver": "^7.3.5" - } - }, - "@npmcli/git": { - "version": "4.1.0", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/promise-spawn": "^6.0.0", - "lru-cache": "^7.4.4", - "npm-pick-manifest": "^8.0.0", - "proc-log": "^3.0.0", - "promise-inflight": "^1.0.1", - "promise-retry": "^2.0.1", - "semver": "^7.3.5", - "which": "^3.0.0" - } - }, - "@npmcli/installed-package-contents": { - "version": "2.0.2", - "bundled": true, - "dev": true, - "requires": { - "npm-bundled": "^3.0.0", - "npm-normalize-package-bin": "^3.0.0" - } - }, - "@npmcli/map-workspaces": { - "version": "3.0.4", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/name-from-folder": "^2.0.0", - "glob": "^10.2.2", - "minimatch": "^9.0.0", - "read-package-json-fast": "^3.0.0" - } - }, - "@npmcli/metavuln-calculator": { - "version": "5.0.1", - "bundled": true, - "dev": true, - "requires": { - "cacache": "^17.0.0", - "json-parse-even-better-errors": "^3.0.0", - "pacote": "^15.0.0", - "semver": "^7.3.5" - } - }, - "@npmcli/name-from-folder": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "@npmcli/node-gyp": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "@npmcli/package-json": { - "version": "4.0.1", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/git": "^4.1.0", - "glob": "^10.2.2", - "hosted-git-info": "^6.1.1", - "json-parse-even-better-errors": "^3.0.0", - "normalize-package-data": "^5.0.0", - "proc-log": "^3.0.0", - "semver": "^7.5.3" - } - }, - "@npmcli/promise-spawn": { - "version": "6.0.2", - "bundled": true, - "dev": true, - "requires": { - "which": "^3.0.0" - } - }, - "@npmcli/query": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "@npmcli/run-script": { - "version": "6.0.2", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/node-gyp": "^3.0.0", - "@npmcli/promise-spawn": "^6.0.0", - "node-gyp": "^9.0.0", - "read-package-json-fast": "^3.0.0", - "which": "^3.0.0" - } - }, - "@pkgjs/parseargs": { - "version": "0.11.0", - "bundled": true, - "dev": true, - "optional": true - }, - "@sigstore/bundle": { - "version": "1.1.0", - "bundled": true, - "dev": true, - "requires": { - "@sigstore/protobuf-specs": "^0.2.0" - } - }, - "@sigstore/protobuf-specs": { - "version": "0.2.1", - "bundled": true, - "dev": true - }, - "@sigstore/sign": { - "version": "1.0.0", - "bundled": true, - "dev": true, - "requires": { - "@sigstore/bundle": "^1.1.0", - "@sigstore/protobuf-specs": "^0.2.0", - "make-fetch-happen": "^11.0.1" - } - }, - "@sigstore/tuf": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "requires": { - "@sigstore/protobuf-specs": "^0.2.0", - "tuf-js": "^1.1.7" - } - }, - "@tootallnate/once": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "@tufjs/canonical-json": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "@tufjs/models": { - "version": "1.0.4", - "bundled": true, - "dev": true, - "requires": { - "@tufjs/canonical-json": "1.0.0", - "minimatch": "^9.0.0" - } - }, - "abbrev": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "abort-controller": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "event-target-shim": "^5.0.0" - } - }, - "agent-base": { - "version": "6.0.2", - "bundled": true, - "dev": true, - "requires": { - "debug": "4" - } - }, - "agentkeepalive": { - "version": "4.3.0", - "bundled": true, - "dev": true, - "requires": { - "debug": "^4.1.0", - "depd": "^2.0.0", - "humanize-ms": "^1.2.1" - } - }, - "aggregate-error": { - "version": "3.1.0", - "bundled": true, - "dev": true, - "requires": { - "clean-stack": "^2.0.0", - "indent-string": "^4.0.0" - } - }, - "ansi-regex": { - "version": "5.0.1", - "bundled": true, - "dev": true - }, - "ansi-styles": { - "version": "4.3.0", - "bundled": true, - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "aproba": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "archy": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "are-we-there-yet": { - "version": "4.0.0", - "bundled": true, - "dev": true, - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^4.1.0" - } - }, - "balanced-match": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "base64-js": { - "version": "1.5.1", - "bundled": true, - "dev": true - }, - "bin-links": { - "version": "4.0.2", - "bundled": true, - "dev": true, - "requires": { - "cmd-shim": "^6.0.0", - "npm-normalize-package-bin": "^3.0.0", - "read-cmd-shim": "^4.0.0", - "write-file-atomic": "^5.0.0" - } - }, - "binary-extensions": { - "version": "2.2.0", - "bundled": true, - "dev": true - }, - "brace-expansion": { - "version": "2.0.1", - "bundled": true, - "dev": true, - "requires": { - "balanced-match": "^1.0.0" - } - }, - "buffer": { - "version": "6.0.3", - "bundled": true, - "dev": true, - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.2.1" - } - }, - "builtins": { - "version": "5.0.1", - "bundled": true, - "dev": true, - "requires": { - "semver": "^7.0.0" - } - }, - "cacache": { - "version": "17.1.3", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/fs": "^3.1.0", - "fs-minipass": "^3.0.0", - "glob": "^10.2.2", - "lru-cache": "^7.7.1", - "minipass": "^5.0.0", - "minipass-collect": "^1.0.2", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "p-map": "^4.0.0", - "ssri": "^10.0.0", - "tar": "^6.1.11", - "unique-filename": "^3.0.0" - } - }, - "chalk": { - "version": "5.3.0", - "bundled": true, - "dev": true - }, - "chownr": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "ci-info": { - "version": "3.8.0", - "bundled": true, - "dev": true - }, - "cidr-regex": { - "version": "3.1.1", - "bundled": true, - "dev": true, - "requires": { - "ip-regex": "^4.1.0" - } - }, - "clean-stack": { - "version": "2.2.0", - "bundled": true, - "dev": true - }, - "cli-columns": { - "version": "4.0.0", - "bundled": true, - "dev": true, - "requires": { - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1" - } - }, - "cli-table3": { - "version": "0.6.3", - "bundled": true, - "dev": true, - "requires": { - "@colors/colors": "1.5.0", - "string-width": "^4.2.0" - } - }, - "clone": { - "version": "1.0.4", - "bundled": true, - "dev": true - }, - "cmd-shim": { - "version": "6.0.1", - "bundled": true, - "dev": true - }, - "color-convert": { - "version": "2.0.1", - "bundled": true, - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "bundled": true, - "dev": true - }, - "color-support": { - "version": "1.1.3", - "bundled": true, - "dev": true - }, - "columnify": { - "version": "1.6.0", - "bundled": true, - "dev": true, - "requires": { - "strip-ansi": "^6.0.1", - "wcwidth": "^1.0.0" - } - }, - "common-ancestor-path": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "concat-map": { - "version": "0.0.1", - "bundled": true, - "dev": true - }, - "console-control-strings": { - "version": "1.1.0", - "bundled": true, - "dev": true - }, - "cross-spawn": { - "version": "7.0.3", - "bundled": true, - "dev": true, - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "dependencies": { - "which": { - "version": "2.0.2", - "bundled": true, - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "cssesc": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "debug": { - "version": "4.3.4", - "bundled": true, - "dev": true, - "requires": { - "ms": "2.1.2" - }, - "dependencies": { - "ms": { - "version": "2.1.2", - "bundled": true, - "dev": true - } - } - }, - "defaults": { - "version": "1.0.4", - "bundled": true, - "dev": true, - "requires": { - "clone": "^1.0.2" - } - }, - "delegates": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "depd": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "diff": { - "version": "5.1.0", - "bundled": true, - "dev": true - }, - "eastasianwidth": { - "version": "0.2.0", - "bundled": true, - "dev": true - }, - "emoji-regex": { - "version": "8.0.0", - "bundled": true, - "dev": true - }, - "encoding": { - "version": "0.1.13", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "iconv-lite": "^0.6.2" - } - }, - "env-paths": { - "version": "2.2.1", - "bundled": true, - "dev": true - }, - "err-code": { - "version": "2.0.3", - "bundled": true, - "dev": true - }, - "event-target-shim": { - "version": "5.0.1", - "bundled": true, - "dev": true - }, - "events": { - "version": "3.3.0", - "bundled": true, - "dev": true - }, - "exponential-backoff": { - "version": "3.1.1", - "bundled": true, - "dev": true - }, - "fastest-levenshtein": { - "version": "1.0.16", - "bundled": true, - "dev": true - }, - "foreground-child": { - "version": "3.1.1", - "bundled": true, - "dev": true, - "requires": { - "cross-spawn": "^7.0.0", - "signal-exit": "^4.0.1" - } - }, - "fs-minipass": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^5.0.0" - } - }, - "fs.realpath": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "function-bind": { - "version": "1.1.1", - "bundled": true, - "dev": true - }, - "gauge": { - "version": "5.0.1", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^4.0.1", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" - } - }, - "glob": { - "version": "10.2.7", - "bundled": true, - "dev": true, - "requires": { - "foreground-child": "^3.1.0", - "jackspeak": "^2.0.3", - "minimatch": "^9.0.1", - "minipass": "^5.0.0 || ^6.0.2", - "path-scurry": "^1.7.0" - } - }, - "graceful-fs": { - "version": "4.2.11", - "bundled": true, - "dev": true - }, - "has": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "requires": { - "function-bind": "^1.1.1" - } - }, - "has-unicode": { - "version": "2.0.1", - "bundled": true, - "dev": true - }, - "hosted-git-info": { - "version": "6.1.1", - "bundled": true, - "dev": true, - "requires": { - "lru-cache": "^7.5.1" - } - }, - "http-cache-semantics": { - "version": "4.1.1", - "bundled": true, - "dev": true - }, - "http-proxy-agent": { - "version": "5.0.0", - "bundled": true, - "dev": true, - "requires": { - "@tootallnate/once": "2", - "agent-base": "6", - "debug": "4" - } - }, - "https-proxy-agent": { - "version": "5.0.1", - "bundled": true, - "dev": true, - "requires": { - "agent-base": "6", - "debug": "4" - } - }, - "humanize-ms": { - "version": "1.2.1", - "bundled": true, - "dev": true, - "requires": { - "ms": "^2.0.0" - } - }, - "iconv-lite": { - "version": "0.6.3", - "bundled": true, - "dev": true, - "optional": true, - "requires": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - } - }, - "ieee754": { - "version": "1.2.1", - "bundled": true, - "dev": true - }, - "ignore-walk": { - "version": "6.0.3", - "bundled": true, - "dev": true, - "requires": { - "minimatch": "^9.0.0" - } - }, - "imurmurhash": { - "version": "0.1.4", - "bundled": true, - "dev": true - }, - "indent-string": { - "version": "4.0.0", - "bundled": true, - "dev": true - }, - "inflight": { - "version": "1.0.6", - "bundled": true, - "dev": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "bundled": true, - "dev": true - }, - "ini": { - "version": "4.1.1", - "bundled": true, - "dev": true - }, - "init-package-json": { - "version": "5.0.0", - "bundled": true, - "dev": true, - "requires": { - "npm-package-arg": "^10.0.0", - "promzard": "^1.0.0", - "read": "^2.0.0", - "read-package-json": "^6.0.0", - "semver": "^7.3.5", - "validate-npm-package-license": "^3.0.4", - "validate-npm-package-name": "^5.0.0" - } - }, - "ip": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "ip-regex": { - "version": "4.3.0", - "bundled": true, - "dev": true - }, - "is-cidr": { - "version": "4.0.2", - "bundled": true, - "dev": true, - "requires": { - "cidr-regex": "^3.1.1" - } - }, - "is-core-module": { - "version": "2.12.1", - "bundled": true, - "dev": true, - "requires": { - "has": "^1.0.3" - } - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "is-lambda": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "isexe": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "jackspeak": { - "version": "2.2.1", - "bundled": true, - "dev": true, - "requires": { - "@isaacs/cliui": "^8.0.2", - "@pkgjs/parseargs": "^0.11.0" - } - }, - "json-parse-even-better-errors": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "json-stringify-nice": { - "version": "1.1.4", - "bundled": true, - "dev": true - }, - "jsonparse": { - "version": "1.3.1", - "bundled": true, - "dev": true - }, - "just-diff": { - "version": "6.0.2", - "bundled": true, - "dev": true - }, - "just-diff-apply": { - "version": "5.5.0", - "bundled": true, - "dev": true - }, - "libnpmaccess": { - "version": "7.0.2", - "bundled": true, - "dev": true, - "requires": { - "npm-package-arg": "^10.1.0", - "npm-registry-fetch": "^14.0.3" - } - }, - "libnpmdiff": { - "version": "5.0.20", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/arborist": "^6.5.0", - "@npmcli/disparity-colors": "^3.0.0", - "@npmcli/installed-package-contents": "^2.0.2", - "binary-extensions": "^2.2.0", - "diff": "^5.1.0", - "minimatch": "^9.0.0", - "npm-package-arg": "^10.1.0", - "pacote": "^15.0.8", - "tar": "^6.1.13" - } - }, - "libnpmexec": { - "version": "6.0.4", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/arborist": "^6.5.0", - "@npmcli/run-script": "^6.0.0", - "ci-info": "^3.7.1", - "npm-package-arg": "^10.1.0", - "npmlog": "^7.0.1", - "pacote": "^15.0.8", - "proc-log": "^3.0.0", - "read": "^2.0.0", - "read-package-json-fast": "^3.0.2", - "semver": "^7.3.7", - "walk-up-path": "^3.0.1" - } - }, - "libnpmfund": { - "version": "4.2.1", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/arborist": "^6.5.0" - } - }, - "libnpmhook": { - "version": "9.0.3", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^2.0.0", - "npm-registry-fetch": "^14.0.3" - } - }, - "libnpmorg": { - "version": "5.0.4", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^2.0.0", - "npm-registry-fetch": "^14.0.3" - } - }, - "libnpmpack": { - "version": "5.0.20", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/arborist": "^6.5.0", - "@npmcli/run-script": "^6.0.0", - "npm-package-arg": "^10.1.0", - "pacote": "^15.0.8" - } - }, - "libnpmpublish": { - "version": "7.5.0", - "bundled": true, - "dev": true, - "requires": { - "ci-info": "^3.6.1", - "normalize-package-data": "^5.0.0", - "npm-package-arg": "^10.1.0", - "npm-registry-fetch": "^14.0.3", - "proc-log": "^3.0.0", - "semver": "^7.3.7", - "sigstore": "^1.4.0", - "ssri": "^10.0.1" - } - }, - "libnpmsearch": { - "version": "6.0.2", - "bundled": true, - "dev": true, - "requires": { - "npm-registry-fetch": "^14.0.3" - } - }, - "libnpmteam": { - "version": "5.0.3", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^2.0.0", - "npm-registry-fetch": "^14.0.3" - } - }, - "libnpmversion": { - "version": "4.0.2", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/git": "^4.0.1", - "@npmcli/run-script": "^6.0.0", - "json-parse-even-better-errors": "^3.0.0", - "proc-log": "^3.0.0", - "semver": "^7.3.7" - } - }, - "lru-cache": { - "version": "7.18.3", - "bundled": true, - "dev": true - }, - "make-fetch-happen": { - "version": "11.1.1", - "bundled": true, - "dev": true, - "requires": { - "agentkeepalive": "^4.2.1", - "cacache": "^17.0.0", - "http-cache-semantics": "^4.1.1", - "http-proxy-agent": "^5.0.0", - "https-proxy-agent": "^5.0.0", - "is-lambda": "^1.0.1", - "lru-cache": "^7.7.1", - "minipass": "^5.0.0", - "minipass-fetch": "^3.0.0", - "minipass-flush": "^1.0.5", - "minipass-pipeline": "^1.2.4", - "negotiator": "^0.6.3", - "promise-retry": "^2.0.1", - "socks-proxy-agent": "^7.0.0", - "ssri": "^10.0.0" - } - }, - "minimatch": { - "version": "9.0.3", - "bundled": true, - "dev": true, - "requires": { - "brace-expansion": "^2.0.1" - } - }, - "minipass": { - "version": "5.0.0", - "bundled": true, - "dev": true - }, - "minipass-collect": { - "version": "1.0.2", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minipass-fetch": { - "version": "3.0.3", - "bundled": true, - "dev": true, - "requires": { - "encoding": "^0.1.13", - "minipass": "^5.0.0", - "minipass-sized": "^1.0.3", - "minizlib": "^2.1.2" - } - }, - "minipass-flush": { - "version": "1.0.5", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minipass-json-stream": { - "version": "1.0.1", - "bundled": true, - "dev": true, - "requires": { - "jsonparse": "^1.3.1", - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minipass-pipeline": { - "version": "1.2.4", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minipass-sized": { - "version": "1.0.3", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "minizlib": { - "version": "2.1.2", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^3.0.0", - "yallist": "^4.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "mkdirp": { - "version": "1.0.4", - "bundled": true, - "dev": true - }, - "ms": { - "version": "2.1.3", - "bundled": true, - "dev": true - }, - "mute-stream": { - "version": "1.0.0", - "bundled": true, - "dev": true - }, - "negotiator": { - "version": "0.6.3", - "bundled": true, - "dev": true - }, - "node-gyp": { - "version": "9.4.0", - "bundled": true, - "dev": true, - "requires": { - "env-paths": "^2.2.0", - "exponential-backoff": "^3.1.1", - "glob": "^7.1.4", - "graceful-fs": "^4.2.6", - "make-fetch-happen": "^11.0.3", - "nopt": "^6.0.0", - "npmlog": "^6.0.0", - "rimraf": "^3.0.2", - "semver": "^7.3.5", - "tar": "^6.1.2", - "which": "^2.0.2" - }, - "dependencies": { - "abbrev": { - "version": "1.1.1", - "bundled": true, - "dev": true - }, - "are-we-there-yet": { - "version": "3.0.1", - "bundled": true, - "dev": true, - "requires": { - "delegates": "^1.0.0", - "readable-stream": "^3.6.0" - } - }, - "brace-expansion": { - "version": "1.1.11", - "bundled": true, - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "gauge": { - "version": "4.0.4", - "bundled": true, - "dev": true, - "requires": { - "aproba": "^1.0.3 || ^2.0.0", - "color-support": "^1.1.3", - "console-control-strings": "^1.1.0", - "has-unicode": "^2.0.1", - "signal-exit": "^3.0.7", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "wide-align": "^1.1.5" - } - }, - "glob": { - "version": "7.2.3", - "bundled": true, - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "minimatch": { - "version": "3.1.2", - "bundled": true, - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "nopt": { - "version": "6.0.0", - "bundled": true, - "dev": true, - "requires": { - "abbrev": "^1.0.0" - } - }, - "npmlog": { - "version": "6.0.2", - "bundled": true, - "dev": true, - "requires": { - "are-we-there-yet": "^3.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^4.0.3", - "set-blocking": "^2.0.0" - } - }, - "readable-stream": { - "version": "3.6.2", - "bundled": true, - "dev": true, - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - }, - "signal-exit": { - "version": "3.0.7", - "bundled": true, - "dev": true - }, - "which": { - "version": "2.0.2", - "bundled": true, - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - } - } - }, - "nopt": { - "version": "7.2.0", - "bundled": true, - "dev": true, - "requires": { - "abbrev": "^2.0.0" - } - }, - "normalize-package-data": { - "version": "5.0.0", - "bundled": true, - "dev": true, - "requires": { - "hosted-git-info": "^6.0.0", - "is-core-module": "^2.8.1", - "semver": "^7.3.5", - "validate-npm-package-license": "^3.0.4" - } - }, - "npm-audit-report": { - "version": "5.0.0", - "bundled": true, - "dev": true - }, - "npm-bundled": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "npm-normalize-package-bin": "^3.0.0" - } - }, - "npm-install-checks": { - "version": "6.2.0", - "bundled": true, - "dev": true, - "requires": { - "semver": "^7.1.1" - } - }, - "npm-normalize-package-bin": { - "version": "3.0.1", - "bundled": true, - "dev": true - }, - "npm-package-arg": { - "version": "10.1.0", - "bundled": true, - "dev": true, - "requires": { - "hosted-git-info": "^6.0.0", - "proc-log": "^3.0.0", - "semver": "^7.3.5", - "validate-npm-package-name": "^5.0.0" - } - }, - "npm-packlist": { - "version": "7.0.4", - "bundled": true, - "dev": true, - "requires": { - "ignore-walk": "^6.0.0" - } - }, - "npm-pick-manifest": { - "version": "8.0.2", - "bundled": true, - "dev": true, - "requires": { - "npm-install-checks": "^6.0.0", - "npm-normalize-package-bin": "^3.0.0", - "npm-package-arg": "^10.0.0", - "semver": "^7.3.5" - } - }, - "npm-profile": { - "version": "7.0.1", - "bundled": true, - "dev": true, - "requires": { - "npm-registry-fetch": "^14.0.0", - "proc-log": "^3.0.0" - } - }, - "npm-registry-fetch": { - "version": "14.0.5", - "bundled": true, - "dev": true, - "requires": { - "make-fetch-happen": "^11.0.0", - "minipass": "^5.0.0", - "minipass-fetch": "^3.0.0", - "minipass-json-stream": "^1.0.1", - "minizlib": "^2.1.2", - "npm-package-arg": "^10.0.0", - "proc-log": "^3.0.0" - } - }, - "npm-user-validate": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "npmlog": { - "version": "7.0.1", - "bundled": true, - "dev": true, - "requires": { - "are-we-there-yet": "^4.0.0", - "console-control-strings": "^1.1.0", - "gauge": "^5.0.0", - "set-blocking": "^2.0.0" - } - }, - "once": { - "version": "1.4.0", - "bundled": true, - "dev": true, - "requires": { - "wrappy": "1" - } - }, - "p-map": { - "version": "4.0.0", - "bundled": true, - "dev": true, - "requires": { - "aggregate-error": "^3.0.0" - } - }, - "pacote": { - "version": "15.2.0", - "bundled": true, - "dev": true, - "requires": { - "@npmcli/git": "^4.0.0", - "@npmcli/installed-package-contents": "^2.0.1", - "@npmcli/promise-spawn": "^6.0.1", - "@npmcli/run-script": "^6.0.0", - "cacache": "^17.0.0", - "fs-minipass": "^3.0.0", - "minipass": "^5.0.0", - "npm-package-arg": "^10.0.0", - "npm-packlist": "^7.0.0", - "npm-pick-manifest": "^8.0.0", - "npm-registry-fetch": "^14.0.0", - "proc-log": "^3.0.0", - "promise-retry": "^2.0.1", - "read-package-json": "^6.0.0", - "read-package-json-fast": "^3.0.0", - "sigstore": "^1.3.0", - "ssri": "^10.0.0", - "tar": "^6.1.11" - } - }, - "parse-conflict-json": { - "version": "3.0.1", - "bundled": true, - "dev": true, - "requires": { - "json-parse-even-better-errors": "^3.0.0", - "just-diff": "^6.0.0", - "just-diff-apply": "^5.2.0" - } - }, - "path-is-absolute": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "path-key": { - "version": "3.1.1", - "bundled": true, - "dev": true - }, - "path-scurry": { - "version": "1.9.2", - "bundled": true, - "dev": true, - "requires": { - "lru-cache": "^9.1.1", - "minipass": "^5.0.0 || ^6.0.2" - }, - "dependencies": { - "lru-cache": { - "version": "9.1.1", - "bundled": true, - "dev": true - } - } - }, - "postcss-selector-parser": { - "version": "6.0.13", - "bundled": true, - "dev": true, - "requires": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - } - }, - "proc-log": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "process": { - "version": "0.11.10", - "bundled": true, - "dev": true - }, - "promise-all-reject-late": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "promise-call-limit": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "promise-inflight": { - "version": "1.0.1", - "bundled": true, - "dev": true - }, - "promise-retry": { - "version": "2.0.1", - "bundled": true, - "dev": true, - "requires": { - "err-code": "^2.0.2", - "retry": "^0.12.0" - } - }, - "promzard": { - "version": "1.0.0", - "bundled": true, - "dev": true, - "requires": { - "read": "^2.0.0" - } - }, - "qrcode-terminal": { - "version": "0.12.0", - "bundled": true, - "dev": true - }, - "read": { - "version": "2.1.0", - "bundled": true, - "dev": true, - "requires": { - "mute-stream": "~1.0.0" - } - }, - "read-cmd-shim": { - "version": "4.0.0", - "bundled": true, - "dev": true - }, - "read-package-json": { - "version": "6.0.4", - "bundled": true, - "dev": true, - "requires": { - "glob": "^10.2.2", - "json-parse-even-better-errors": "^3.0.0", - "normalize-package-data": "^5.0.0", - "npm-normalize-package-bin": "^3.0.0" - } - }, - "read-package-json-fast": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "requires": { - "json-parse-even-better-errors": "^3.0.0", - "npm-normalize-package-bin": "^3.0.0" - } - }, - "readable-stream": { - "version": "4.4.0", - "bundled": true, - "dev": true, - "requires": { - "abort-controller": "^3.0.0", - "buffer": "^6.0.3", - "events": "^3.3.0", - "process": "^0.11.10" - } - }, - "retry": { - "version": "0.12.0", - "bundled": true, - "dev": true - }, - "rimraf": { - "version": "3.0.2", - "bundled": true, - "dev": true, - "requires": { - "glob": "^7.1.3" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.11", - "bundled": true, - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "glob": { - "version": "7.2.3", - "bundled": true, - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "minimatch": { - "version": "3.1.2", - "bundled": true, - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - } - } - }, - "safe-buffer": { - "version": "5.2.1", - "bundled": true, - "dev": true - }, - "safer-buffer": { - "version": "2.1.2", - "bundled": true, - "dev": true, - "optional": true - }, - "semver": { - "version": "7.5.4", - "bundled": true, - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - }, - "dependencies": { - "lru-cache": { - "version": "6.0.0", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - }, - "set-blocking": { - "version": "2.0.0", - "bundled": true, - "dev": true - }, - "shebang-command": { - "version": "2.0.0", - "bundled": true, - "dev": true, - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "signal-exit": { - "version": "4.0.2", - "bundled": true, - "dev": true - }, - "sigstore": { - "version": "1.9.0", - "bundled": true, - "dev": true, - "requires": { - "@sigstore/bundle": "^1.1.0", - "@sigstore/protobuf-specs": "^0.2.0", - "@sigstore/sign": "^1.0.0", - "@sigstore/tuf": "^1.0.3", - "make-fetch-happen": "^11.0.1" - } - }, - "smart-buffer": { - "version": "4.2.0", - "bundled": true, - "dev": true - }, - "socks": { - "version": "2.7.1", - "bundled": true, - "dev": true, - "requires": { - "ip": "^2.0.0", - "smart-buffer": "^4.2.0" - } - }, - "socks-proxy-agent": { - "version": "7.0.0", - "bundled": true, - "dev": true, - "requires": { - "agent-base": "^6.0.2", - "debug": "^4.3.3", - "socks": "^2.6.2" - } - }, - "spdx-correct": { - "version": "3.2.0", - "bundled": true, - "dev": true, - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.3.0", - "bundled": true, - "dev": true - }, - "spdx-expression-parse": { - "version": "3.0.1", - "bundled": true, - "dev": true, - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.13", - "bundled": true, - "dev": true - }, - "ssri": { - "version": "10.0.4", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^5.0.0" - } - }, - "string_decoder": { - "version": "1.3.0", - "bundled": true, - "dev": true, - "requires": { - "safe-buffer": "~5.2.0" - } - }, - "string-width": { - "version": "4.2.3", - "bundled": true, - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "string-width-cjs": { - "version": "npm:string-width@4.2.3", - "bundled": true, - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "strip-ansi": { - "version": "6.0.1", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "strip-ansi-cjs": { - "version": "npm:strip-ansi@6.0.1", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "supports-color": { - "version": "9.4.0", - "bundled": true, - "dev": true - }, - "tar": { - "version": "6.1.15", - "bundled": true, - "dev": true, - "requires": { - "chownr": "^2.0.0", - "fs-minipass": "^2.0.0", - "minipass": "^5.0.0", - "minizlib": "^2.1.1", - "mkdirp": "^1.0.3", - "yallist": "^4.0.0" - }, - "dependencies": { - "fs-minipass": { - "version": "2.1.0", - "bundled": true, - "dev": true, - "requires": { - "minipass": "^3.0.0" - }, - "dependencies": { - "minipass": { - "version": "3.3.6", - "bundled": true, - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - } - } - } - } - }, - "text-table": { - "version": "0.2.0", - "bundled": true, - "dev": true - }, - "tiny-relative-date": { - "version": "1.3.0", - "bundled": true, - "dev": true - }, - "treeverse": { - "version": "3.0.0", - "bundled": true, - "dev": true - }, - "tuf-js": { - "version": "1.1.7", - "bundled": true, - "dev": true, - "requires": { - "@tufjs/models": "1.0.4", - "debug": "^4.3.4", - "make-fetch-happen": "^11.1.1" - } - }, - "unique-filename": { - "version": "3.0.0", - "bundled": true, - "dev": true, - "requires": { - "unique-slug": "^4.0.0" - } - }, - "unique-slug": { - "version": "4.0.0", - "bundled": true, - "dev": true, - "requires": { - "imurmurhash": "^0.1.4" - } - }, - "util-deprecate": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "validate-npm-package-license": { - "version": "3.0.4", - "bundled": true, - "dev": true, - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "validate-npm-package-name": { - "version": "5.0.0", - "bundled": true, - "dev": true, - "requires": { - "builtins": "^5.0.0" - } - }, - "walk-up-path": { - "version": "3.0.1", - "bundled": true, - "dev": true - }, - "wcwidth": { - "version": "1.0.1", - "bundled": true, - "dev": true, - "requires": { - "defaults": "^1.0.3" - } - }, - "which": { - "version": "3.0.1", - "bundled": true, - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - }, - "wide-align": { - "version": "1.1.5", - "bundled": true, - "dev": true, - "requires": { - "string-width": "^1.0.2 || 2 || 3 || 4" - } - }, - "wrap-ansi": { - "version": "8.1.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "bundled": true, - "dev": true - }, - "ansi-styles": { - "version": "6.2.1", - "bundled": true, - "dev": true - }, - "emoji-regex": { - "version": "9.2.2", - "bundled": true, - "dev": true - }, - "string-width": { - "version": "5.1.2", - "bundled": true, - "dev": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.1.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-regex": "^6.0.1" - } - } - } - }, - "wrap-ansi-cjs": { - "version": "npm:wrap-ansi@7.0.0", - "bundled": true, - "dev": true, - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - } - }, - "wrappy": { - "version": "1.0.2", - "bundled": true, - "dev": true - }, - "write-file-atomic": { - "version": "5.0.1", - "bundled": true, - "dev": true, - "requires": { - "imurmurhash": "^0.1.4", - "signal-exit": "^4.0.1" - } - }, - "yallist": { - "version": "4.0.0", - "bundled": true, - "dev": true - } - } - }, - "npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "dev": true, - "requires": { - "path-key": "^3.0.0" - } - }, - "nth-check": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", - "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", - "requires": { - "boolbase": "^1.0.0" - } - }, - "nwsapi": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.2.tgz", - "integrity": "sha512-90yv+6538zuvUMnN+zCr8LuV6bPFdq50304114vJYJ8RDyK8D5O9Phpbd6SZWgI7PwzmmfN1upeOJlvybDSgCw==", - "dev": true - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==" - }, - "object-hash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", - "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", - "dev": true - }, - "object-inspect": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz", - "integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==", - "dev": true - }, - "object-is": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/object-is/-/object-is-1.1.5.tgz", - "integrity": "sha512-3cyDsyHgtmi7I7DfSSI2LDp6SK2lwvtbg0p0R1e0RvTqF5ceGx+K2dfSjm1bKDMVCFEDAQvy+o8c6a7VujOddw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3" - } - }, - "object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true - }, - "object.assign": { - "version": "4.1.4", - "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz", - "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "has-symbols": "^1.0.3", - "object-keys": "^1.1.1" - } - }, - "object.entries": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.entries/-/object.entries-1.1.6.tgz", - "integrity": "sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "object.fromentries": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/object.fromentries/-/object.fromentries-2.0.6.tgz", - "integrity": "sha512-VciD13dswC4j1Xt5394WR4MzmAQmlgN72phd/riNp9vtD7tp4QQWJ0R4wvclXcafgcYK8veHRed2W6XeGBvcfg==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "object.hasown": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/object.hasown/-/object.hasown-1.1.2.tgz", - "integrity": "sha512-B5UIT3J1W+WuWIU55h0mjlwaqxiE5vYENJXIXZ4VFe05pNYrkKuK0U/6aFcb0pKywYJh7IhfoqUfKVmrJJHZHw==", - "dev": true, - "requires": { - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "object.values": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/object.values/-/object.values-1.1.6.tgz", - "integrity": "sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "oblivious-set": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/oblivious-set/-/oblivious-set-1.0.0.tgz", - "integrity": "sha512-z+pI07qxo4c2CulUHCDf9lcqDlMSo72N/4rLUpRXf6fu+q8vjt8y0xS+Tlf8NTJDdTXHbdeO1n3MlbctwEoXZw==" - }, - "on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "dev": true - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "requires": { - "wrappy": "1" - } - }, - "onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dev": true, - "requires": { - "mimic-fn": "^2.1.0" - } - }, - "optionator": { - "version": "0.8.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz", - "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==", - "dev": true, - "requires": { - "deep-is": "~0.1.3", - "fast-levenshtein": "~2.0.6", - "levn": "~0.3.0", - "prelude-ls": "~1.1.2", - "type-check": "~0.3.2", - "word-wrap": "~1.2.3" - } - }, - "ora": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", - "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", - "dev": true, - "requires": { - "bl": "^4.1.0", - "chalk": "^4.1.0", - "cli-cursor": "^3.1.0", - "cli-spinners": "^2.5.0", - "is-interactive": "^1.0.0", - "is-unicode-supported": "^0.1.0", - "log-symbols": "^4.1.0", - "strip-ansi": "^6.0.0", - "wcwidth": "^1.0.1" - } - }, - "os-tmpdir": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", - "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", - "dev": true - }, - "outvariant": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.0.tgz", - "integrity": "sha512-AlWY719RF02ujitly7Kk/0QlV+pXGFDHrHf9O2OKqyqgBieaPOIeuSkL8sRK6j2WK+/ZAURq2kZsY0d8JapUiw==", - "dev": true - }, - "p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dev": true, - "requires": { - "p-try": "^2.0.0" - } - }, - "p-locate": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz", - "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", - "dev": true, - "requires": { - "p-limit": "^2.2.0" - } - }, - "p-try": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", - "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", - "dev": true - }, - "parent-module": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", - "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", - "dev": true, - "requires": { - "callsites": "^3.0.0" - } - }, - "parse-json": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", - "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", - "dev": true, - "requires": { - "@babel/code-frame": "^7.0.0", - "error-ex": "^1.3.1", - "json-parse-even-better-errors": "^2.3.0", - "lines-and-columns": "^1.1.6" - } - }, - "parse5": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", - "integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==", - "dev": true, - "requires": { - "entities": "^4.4.0" - } - }, - "path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", - "dev": true - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==" - }, - "path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w==", - "dev": true - }, - "path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true - }, - "path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "dev": true - }, - "path-to-regexp": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.1.tgz", - "integrity": "sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw==", - "dev": true - }, - "path-type": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", - "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", - "dev": true - }, - "pathe": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.0.tgz", - "integrity": "sha512-ODbEPR0KKHqECXW1GoxdDb+AZvULmXjVPy4rt+pGo2+TnjJTIPJQSVS6N63n8T2Ip+syHhbn52OewKicV0373w==", - "dev": true - }, - "pathval": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.1.tgz", - "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", - "dev": true - }, - "picocolors": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.0.tgz", - "integrity": "sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==" - }, - "picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==" - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", - "dev": true - }, - "pirates": { - "version": "4.0.5", - "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.5.tgz", - "integrity": "sha512-8V9+HQPupnaXMA23c5hvl69zXvTwTzyAYasnkb0Tts4XvO4CliqONMOnvlq26rkhLC3nWDFBJf73LU1e1VZLaQ==", - "dev": true - }, - "pkg-types": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.0.2.tgz", - "integrity": "sha512-hM58GKXOcj8WTqUXnsQyJYXdeAPbythQgEF3nTcEo+nkD49chjQ9IKm/QJy9xf6JakXptz86h7ecP2024rrLaQ==", - "dev": true, - "requires": { - "jsonc-parser": "^3.2.0", - "mlly": "^1.1.1", - "pathe": "^1.1.0" - } - }, - "postcss": { - "version": "8.4.31", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", - "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", - "requires": { - "nanoid": "^3.3.6", - "picocolors": "^1.0.0", - "source-map-js": "^1.0.2" - } - }, - "postcss-attribute-case-insensitive": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/postcss-attribute-case-insensitive/-/postcss-attribute-case-insensitive-6.0.2.tgz", - "integrity": "sha512-IRuCwwAAQbgaLhxQdQcIIK0dCVXg3XDUnzgKD8iwdiYdwU4rMWRWyl/W9/0nA4ihVpq5pyALiHB2veBJ0292pw==", - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "postcss-clamp": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/postcss-clamp/-/postcss-clamp-4.1.0.tgz", - "integrity": "sha512-ry4b1Llo/9zz+PKC+030KUnPITTJAHeOwjfAyyB60eT0AorGLdzp52s31OsPRHRf8NchkgFoG2y6fCfn1IV1Ow==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-color-functional-notation": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/postcss-color-functional-notation/-/postcss-color-functional-notation-5.0.2.tgz", - "integrity": "sha512-M6ygxWOyd6eWf3sd1Lv8xi4SeF4iBPfJvkfMU4ITh8ExJc1qhbvh/U8Cv/uOvBgUVOMDdScvCdlg8+hREQzs7w==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-color-hex-alpha": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/postcss-color-hex-alpha/-/postcss-color-hex-alpha-9.0.2.tgz", - "integrity": "sha512-SfPjgr//VQ/DOCf80STIAsdAs7sbIbxATvVmd+Ec7JvR8onz9pjawhq3BJM3Pie40EE3TyB0P6hft16D33Nlyg==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-color-rebeccapurple": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/postcss-color-rebeccapurple/-/postcss-color-rebeccapurple-8.0.2.tgz", - "integrity": "sha512-xWf/JmAxVoB5bltHpXk+uGRoGFwu4WDAR7210el+iyvTdqiKpDhtcT8N3edXMoVJY0WHFMrKMUieql/wRNiXkw==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-custom-media": { - "version": "9.1.2", - "resolved": "https://registry.npmjs.org/postcss-custom-media/-/postcss-custom-media-9.1.2.tgz", - "integrity": "sha512-osM9g4UKq4XKimAC7RAXroqi3BXpxfwTswAJQiZdrBjWGFGEyxQrY5H2eDWI8F+MEvEUfYDxA8scqi3QWROCSw==", - "requires": { - "@csstools/cascade-layer-name-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0", - "@csstools/media-query-list-parser": "^2.0.0" - } - }, - "postcss-custom-properties": { - "version": "13.1.4", - "resolved": "https://registry.npmjs.org/postcss-custom-properties/-/postcss-custom-properties-13.1.4.tgz", - "integrity": "sha512-iSAdaZrM3KMec8cOSzeTUNXPYDlhqsMJHpt62yrjwG6nAnMtRHPk5JdMzGosBJtqEahDolvD5LNbcq+EZ78o5g==", - "requires": { - "@csstools/cascade-layer-name-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-custom-selectors": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/postcss-custom-selectors/-/postcss-custom-selectors-7.1.2.tgz", - "integrity": "sha512-jX7VlE3jrgfBIOfxiGNRFq81xUoHSZhvxhQurzE7ZFRv+bUmMwB7/XnA0nNlts2CwNtbXm4Ozy0ZAYKHlCRmBQ==", - "requires": { - "@csstools/cascade-layer-name-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.0", - "@csstools/css-tokenizer": "^2.0.0", - "postcss-selector-parser": "^6.0.4" - } - }, - "postcss-dir-pseudo-class": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/postcss-dir-pseudo-class/-/postcss-dir-pseudo-class-7.0.2.tgz", - "integrity": "sha512-cMnslilYxBf9k3qejnovrUONZx1rXeUZJw06fgIUBzABJe3D2LiLL5WAER7Imt3nrkaIgG05XZBztueLEf5P8w==", - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "postcss-double-position-gradients": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-double-position-gradients/-/postcss-double-position-gradients-4.0.2.tgz", - "integrity": "sha512-GXL1RmFREDK4Q9aYvI2RhVrA6a6qqSMQQ5ke8gSH1xgV6exsqbcJpIumC7AOgooH6/WIG3/K/T8xxAiVHy/tJg==", - "requires": { - "@csstools/postcss-progressive-custom-properties": "^2.0.0", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-flexbugs-fixes": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/postcss-flexbugs-fixes/-/postcss-flexbugs-fixes-5.0.2.tgz", - "integrity": "sha512-18f9voByak7bTktR2QgDveglpn9DTbBWPUzSOe9g0N4WR/2eSt6Vrcbf0hmspvMI6YWGywz6B9f7jzpFNJJgnQ==", - "requires": {} - }, - "postcss-focus-visible": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/postcss-focus-visible/-/postcss-focus-visible-8.0.2.tgz", - "integrity": "sha512-f/Vd+EC/GaKElknU59esVcRYr/Y3t1ZAQyL4u2xSOgkDy4bMCmG7VP5cGvj3+BTLNE9ETfEuz2nnt4qkZwTTeA==", - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "postcss-focus-within": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/postcss-focus-within/-/postcss-focus-within-7.0.2.tgz", - "integrity": "sha512-AHAJ89UQBcqBvFgQJE9XasGuwMNkKsGj4D/f9Uk60jFmEBHpAL14DrnSk3Rj+SwZTr/WUG+mh+Rvf8fid/346w==", - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "postcss-font-variant": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/postcss-font-variant/-/postcss-font-variant-5.0.0.tgz", - "integrity": "sha512-1fmkBaCALD72CK2a9i468mA/+tr9/1cBxRRMXOUaZqO43oWPR5imcyPjXwuv7PXbCid4ndlP5zWhidQVVa3hmA==", - "requires": {} - }, - "postcss-gap-properties": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-gap-properties/-/postcss-gap-properties-4.0.1.tgz", - "integrity": "sha512-V5OuQGw4lBumPlwHWk/PRfMKjaq/LTGR4WDTemIMCaMevArVfCCA9wBJiL1VjDAd+rzuCIlkRoRvDsSiAaZ4Fg==", - "requires": {} - }, - "postcss-image-set-function": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/postcss-image-set-function/-/postcss-image-set-function-5.0.2.tgz", - "integrity": "sha512-Sszjwo0ubETX0Fi5MvpYzsONwrsjeabjMoc5YqHvURFItXgIu3HdCjcVuVKGMPGzKRhgaknmdM5uVWInWPJmeg==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-import": { - "version": "14.1.0", - "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-14.1.0.tgz", - "integrity": "sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw==", - "dev": true, - "requires": { - "postcss-value-parser": "^4.0.0", - "read-cache": "^1.0.0", - "resolve": "^1.1.7" - } - }, - "postcss-initial": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-initial/-/postcss-initial-4.0.1.tgz", - "integrity": "sha512-0ueD7rPqX8Pn1xJIjay0AZeIuDoF+V+VvMt/uOnn+4ezUKhZM/NokDeP6DwMNyIoYByuN/94IQnt5FEkaN59xQ==", - "requires": {} - }, - "postcss-js": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", - "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", - "dev": true, - "requires": { - "camelcase-css": "^2.0.1" - } - }, - "postcss-lab-function": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/postcss-lab-function/-/postcss-lab-function-5.2.0.tgz", - "integrity": "sha512-ie/k0xFCib22LV56jZoygLuWfM4J4migb89QnEXOjORGh6UwsDVSPW/x+P2MYS+AKFfZ5Npcu5HYEzYcezAAag==", - "requires": { - "@csstools/css-color-parser": "^1.0.0", - "@csstools/css-parser-algorithms": "^2.0.1", - "@csstools/css-tokenizer": "^2.1.0", - "@csstools/postcss-progressive-custom-properties": "^2.0.0" - } - }, - "postcss-load-config": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-3.1.4.tgz", - "integrity": "sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==", - "dev": true, - "requires": { - "lilconfig": "^2.0.5", - "yaml": "^1.10.2" - } - }, - "postcss-logical": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/postcss-logical/-/postcss-logical-6.1.0.tgz", - "integrity": "sha512-qb1+LpClhYjxac8SfOcWotnY3unKZesDqIOm+jnGt8rTl7xaIWpE2bPGZHxflOip1E/4ETo79qlJyRL3yrHn1g==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-media-query-parser": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/postcss-media-query-parser/-/postcss-media-query-parser-0.2.3.tgz", - "integrity": "sha512-3sOlxmbKcSHMjlUXQZKQ06jOswE7oVkXPxmZdoB1r5l0q6gTFTQSHxNxOrCccElbW7dxNytifNEo8qidX2Vsig==", - "dev": true - }, - "postcss-nested": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.0.tgz", - "integrity": "sha512-0DkamqrPcmkBDsLn+vQDIrtkSbNkv5AD/M322ySo9kqFkCIYklym2xEmWkwo+Y3/qZo34tzEPNUw4y7yMCdv5w==", - "dev": true, - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "postcss-nesting": { - "version": "11.2.2", - "resolved": "https://registry.npmjs.org/postcss-nesting/-/postcss-nesting-11.2.2.tgz", - "integrity": "sha512-aOTiUniAB1bcPE6GGiynWRa6PZFPhOTAm5q3q5cem6QeSijIHHkWr6gs65ukCZMXeak8yXeZVbBJET3VM+HlhA==", - "requires": { - "@csstools/selector-specificity": "^2.0.0", - "postcss-selector-parser": "^6.0.10" - } - }, - "postcss-opacity-percentage": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/postcss-opacity-percentage/-/postcss-opacity-percentage-2.0.0.tgz", - "integrity": "sha512-lyDrCOtntq5Y1JZpBFzIWm2wG9kbEdujpNt4NLannF+J9c8CgFIzPa80YQfdza+Y+yFfzbYj/rfoOsYsooUWTQ==", - "requires": {} - }, - "postcss-overflow-shorthand": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/postcss-overflow-shorthand/-/postcss-overflow-shorthand-4.0.1.tgz", - "integrity": "sha512-HQZ0qi/9iSYHW4w3ogNqVNr2J49DHJAl7r8O2p0Meip38jsdnRPgiDW7r/LlLrrMBMe3KHkvNtAV2UmRVxzLIg==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-page-break": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/postcss-page-break/-/postcss-page-break-3.0.4.tgz", - "integrity": "sha512-1JGu8oCjVXLa9q9rFTo4MbeeA5FMe00/9C7lN4va606Rdb+HkxXtXsmEDrIraQ11fGz/WvKWa8gMuCKkrXpTsQ==", - "requires": {} - }, - "postcss-place": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/postcss-place/-/postcss-place-8.0.1.tgz", - "integrity": "sha512-Ow2LedN8sL4pq8ubukO77phSVt4QyCm35ZGCYXKvRFayAwcpgB0sjNJglDoTuRdUL32q/ZC1VkPBo0AOEr4Uiw==", - "requires": { - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-preset-env": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/postcss-preset-env/-/postcss-preset-env-8.3.0.tgz", - "integrity": "sha512-VFc/bhwRo37RoTVzCTCKDJLw0lwsqLRCTc7dkJkfs9S7XXfTbk7QkhbMWHd2L+iZsAsE5yqdSRBZ41/Q828TbA==", - "requires": { - "@csstools/postcss-cascade-layers": "^3.0.1", - "@csstools/postcss-color-function": "^2.1.0", - "@csstools/postcss-color-mix-function": "^1.0.0", - "@csstools/postcss-font-format-keywords": "^2.0.2", - "@csstools/postcss-gradients-interpolation-method": "^3.0.1", - "@csstools/postcss-hwb-function": "^2.2.0", - "@csstools/postcss-ic-unit": "^2.0.2", - "@csstools/postcss-is-pseudo-class": "^3.1.1", - "@csstools/postcss-logical-float-and-clear": "^1.0.1", - "@csstools/postcss-logical-resize": "^1.0.1", - "@csstools/postcss-logical-viewport-units": "^1.0.2", - "@csstools/postcss-media-minmax": "^1.0.0", - "@csstools/postcss-media-queries-aspect-ratio-number-values": "^1.0.1", - "@csstools/postcss-nested-calc": "^2.0.2", - "@csstools/postcss-normalize-display-values": "^2.0.1", - "@csstools/postcss-oklab-function": "^2.2.0", - "@csstools/postcss-progressive-custom-properties": "^2.1.0", - "@csstools/postcss-scope-pseudo-class": "^2.0.2", - "@csstools/postcss-stepped-value-functions": "^2.1.0", - "@csstools/postcss-text-decoration-shorthand": "^2.2.1", - "@csstools/postcss-trigonometric-functions": "^2.1.0", - "@csstools/postcss-unset-value": "^2.0.1", - "autoprefixer": "^10.4.14", - "browserslist": "^4.21.5", - "css-blank-pseudo": "^5.0.2", - "css-has-pseudo": "^5.0.2", - "css-prefers-color-scheme": "^8.0.2", - "cssdb": "^7.5.3", - "postcss-attribute-case-insensitive": "^6.0.2", - "postcss-clamp": "^4.1.0", - "postcss-color-functional-notation": "^5.0.2", - "postcss-color-hex-alpha": "^9.0.2", - "postcss-color-rebeccapurple": "^8.0.2", - "postcss-custom-media": "^9.1.2", - "postcss-custom-properties": "^13.1.4", - "postcss-custom-selectors": "^7.1.2", - "postcss-dir-pseudo-class": "^7.0.2", - "postcss-double-position-gradients": "^4.0.2", - "postcss-focus-visible": "^8.0.2", - "postcss-focus-within": "^7.0.2", - "postcss-font-variant": "^5.0.0", - "postcss-gap-properties": "^4.0.1", - "postcss-image-set-function": "^5.0.2", - "postcss-initial": "^4.0.1", - "postcss-lab-function": "^5.2.0", - "postcss-logical": "^6.1.0", - "postcss-nesting": "^11.2.1", - "postcss-opacity-percentage": "^2.0.0", - "postcss-overflow-shorthand": "^4.0.1", - "postcss-page-break": "^3.0.4", - "postcss-place": "^8.0.1", - "postcss-pseudo-class-any-link": "^8.0.2", - "postcss-replace-overflow-wrap": "^4.0.0", - "postcss-selector-not": "^7.0.1", - "postcss-value-parser": "^4.2.0" - } - }, - "postcss-pseudo-class-any-link": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/postcss-pseudo-class-any-link/-/postcss-pseudo-class-any-link-8.0.2.tgz", - "integrity": "sha512-FYTIuRE07jZ2CW8POvctRgArQJ43yxhr5vLmImdKUvjFCkR09kh8pIdlCwdx/jbFm7MiW4QP58L4oOUv3grQYA==", - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "postcss-replace-overflow-wrap": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/postcss-replace-overflow-wrap/-/postcss-replace-overflow-wrap-4.0.0.tgz", - "integrity": "sha512-KmF7SBPphT4gPPcKZc7aDkweHiKEEO8cla/GjcBK+ckKxiZslIu3C4GCRW3DNfL0o7yW7kMQu9xlZ1kXRXLXtw==", - "requires": {} - }, - "postcss-resolve-nested-selector": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/postcss-resolve-nested-selector/-/postcss-resolve-nested-selector-0.1.1.tgz", - "integrity": "sha512-HvExULSwLqHLgUy1rl3ANIqCsvMS0WHss2UOsXhXnQaZ9VCc2oBvIpXrl00IUFT5ZDITME0o6oiXeiHr2SAIfw==", - "dev": true - }, - "postcss-safe-parser": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/postcss-safe-parser/-/postcss-safe-parser-6.0.0.tgz", - "integrity": "sha512-FARHN8pwH+WiS2OPCxJI8FuRJpTVnn6ZNFiqAM2aeW2LwTHWWmWgIyKC6cUo0L8aeKiF/14MNvnpls6R2PBeMQ==", - "dev": true, - "requires": {} - }, - "postcss-scss": { - "version": "4.0.6", - "resolved": "https://registry.npmjs.org/postcss-scss/-/postcss-scss-4.0.6.tgz", - "integrity": "sha512-rLDPhJY4z/i4nVFZ27j9GqLxj1pwxE80eAzUNRMXtcpipFYIeowerzBgG3yJhMtObGEXidtIgbUpQ3eLDsf5OQ==", - "dev": true, - "requires": {} - }, - "postcss-selector-not": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-selector-not/-/postcss-selector-not-7.0.1.tgz", - "integrity": "sha512-1zT5C27b/zeJhchN7fP0kBr16Cc61mu7Si9uWWLoA3Px/D9tIJPKchJCkUH3tPO5D0pCFmGeApAv8XpXBQJ8SQ==", - "requires": { - "postcss-selector-parser": "^6.0.10" - } - }, - "postcss-selector-parser": { - "version": "6.0.11", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.11.tgz", - "integrity": "sha512-zbARubNdogI9j7WY4nQJBiNqQf3sLS3wCP4WfOidu+p28LofJqDH1tcXypGrcmMHhDk2t9wGhCsYe/+szLTy1g==", - "requires": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - } - }, - "postcss-value-parser": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", - "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==" - }, - "prelude-ls": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", - "integrity": "sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w==", - "dev": true - }, - "prettier": { - "version": "2.8.7", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.7.tgz", - "integrity": "sha512-yPngTo3aXUUmyuTjeTUT75txrf+aMh9FiD7q9ZE/i6r0bPb22g4FsE6Y338PQX1bmfy08i9QQCB7/rcUAVntfw==", - "dev": true - }, - "pretty-format": { - "version": "27.5.1", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", - "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.1", - "ansi-styles": "^5.0.0", - "react-is": "^17.0.1" - }, - "dependencies": { - "ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", - "dev": true - } - } - }, - "prop-types": { - "version": "15.8.1", - "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", - "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", - "requires": { - "loose-envify": "^1.4.0", - "object-assign": "^4.1.1", - "react-is": "^16.13.1" - }, - "dependencies": { - "react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" - } - } - }, - "protobufjs": { - "version": "7.2.5", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.5.tgz", - "integrity": "sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A==", - "dev": true, - "peer": true, - "requires": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/node": ">=13.7.0", - "long": "^5.0.0" - } - }, - "protobufjs-cli": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/protobufjs-cli/-/protobufjs-cli-1.1.1.tgz", - "integrity": "sha512-VPWMgIcRNyQwWUv8OLPyGQ/0lQY/QTQAVN5fh+XzfDwsVw1FZ2L3DM/bcBf8WPiRz2tNpaov9lPZfNcmNo6LXA==", - "dev": true, - "requires": { - "chalk": "^4.0.0", - "escodegen": "^1.13.0", - "espree": "^9.0.0", - "estraverse": "^5.1.0", - "glob": "^8.0.0", - "jsdoc": "^4.0.0", - "minimist": "^1.2.0", - "semver": "^7.1.2", - "tmp": "^0.2.1", - "uglify-js": "^3.7.7" - }, - "dependencies": { - "escodegen": { - "version": "1.14.3", - "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.14.3.tgz", - "integrity": "sha512-qFcX0XJkdg+PB3xjZZG/wKSuT1PnQWx57+TVSjIMmILd2yC/6ByYElPwJnslDsuWuSAp4AwJGumarAAmJch5Kw==", - "dev": true, - "requires": { - "esprima": "^4.0.1", - "estraverse": "^4.2.0", - "esutils": "^2.0.2", - "optionator": "^0.8.1", - "source-map": "~0.6.1" - }, - "dependencies": { - "estraverse": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz", - "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", - "dev": true - } - } - }, - "lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "requires": { - "yallist": "^4.0.0" - } - }, - "semver": { - "version": "7.5.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", - "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, - "requires": { - "lru-cache": "^6.0.0" - } - }, - "tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", - "dev": true, - "requires": { - "rimraf": "^3.0.0" - } - }, - "yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - } - } - }, - "psl": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", - "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==", - "dev": true - }, - "pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ==", - "dev": true - }, - "query-string": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/query-string/-/query-string-7.1.3.tgz", - "integrity": "sha512-hh2WYhq4fi8+b+/2Kg9CEge4fDPvHS534aOOvOZeQ3+Vf2mCFsaFBYj0i+iXcAq6I9Vzp5fjMFBlONvayDC1qg==", - "requires": { - "decode-uri-component": "^0.2.2", - "filter-obj": "^1.1.0", - "split-on-first": "^1.0.0", - "strict-uri-encode": "^2.0.0" - } - }, - "querystringify": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", - "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", - "dev": true - }, - "queue-microtask": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", - "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true - }, - "quick-lru": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-4.0.1.tgz", - "integrity": "sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==", - "dev": true - }, - "range-parser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", - "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==", - "dev": true - }, - "rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dev": true, - "requires": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - } - }, - "react": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz", - "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, - "react-dom": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz", - "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1", - "scheduler": "^0.20.2" - } - }, - "react-error-boundary": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/react-error-boundary/-/react-error-boundary-3.1.4.tgz", - "integrity": "sha512-uM9uPzZJTF6wRQORmSrvOIgt4lJ9MC1sNgEOj2XGsDTRE4kmpWxg7ENK9EWNKJRMAOY9z0MuF4yIfl6gp4sotA==", - "dev": true, - "requires": { - "@babel/runtime": "^7.12.5" - } - }, - "react-flow-renderer": { - "version": "10.3.17", - "resolved": "https://registry.npmjs.org/react-flow-renderer/-/react-flow-renderer-10.3.17.tgz", - "integrity": "sha512-bywiqVErlh5kCDqw3x0an5Ur3mT9j9CwJsDwmhmz4i1IgYM1a0SPqqEhClvjX+s5pU4nHjmVaGXWK96pwsiGcQ==", - "requires": { - "@babel/runtime": "^7.18.9", - "@types/d3": "^7.4.0", - "@types/resize-observer-browser": "^0.1.7", - "classcat": "^5.0.3", - "d3-drag": "^3.0.0", - "d3-selection": "^3.0.0", - "d3-zoom": "^3.0.0", - "zustand": "^3.7.2" - } - }, - "react-is": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", - "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==" - }, - "react-query": { - "version": "3.39.3", - "resolved": "https://registry.npmjs.org/react-query/-/react-query-3.39.3.tgz", - "integrity": "sha512-nLfLz7GiohKTJDuT4us4X3h/8unOh+00MLb2yJoGTPjxKs2bc1iDhkNx2bd5MKklXnOD3NrVZ+J2UXujA5In4g==", - "requires": { - "@babel/runtime": "^7.5.5", - "broadcast-channel": "^3.4.1", - "match-sorter": "^6.0.2" - } - }, - "react-refresh": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.14.0.tgz", - "integrity": "sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==", - "dev": true - }, - "react-router": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz", - "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==", - "requires": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "hoist-non-react-statics": "^3.1.0", - "loose-envify": "^1.3.1", - "path-to-regexp": "^1.7.0", - "prop-types": "^15.6.2", - "react-is": "^16.6.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "dependencies": { - "history": { - "version": "4.10.1", - "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", - "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", - "requires": { - "@babel/runtime": "^7.1.2", - "loose-envify": "^1.2.0", - "resolve-pathname": "^3.0.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0", - "value-equal": "^1.0.1" - } - }, - "isarray": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", - "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ==" - }, - "path-to-regexp": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.8.0.tgz", - "integrity": "sha512-n43JRhlUKUAlibEJhPeir1ncUID16QnEjNpwzNdO3Lm4ywrBpBZ5oLD0I6br9evr1Y9JTqwRtAh7JLoOzAQdVA==", - "requires": { - "isarray": "0.0.1" - } - }, - "react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" - } - } - }, - "react-router-dom": { - "version": "5.3.4", - "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz", - "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==", - "requires": { - "@babel/runtime": "^7.12.13", - "history": "^4.9.0", - "loose-envify": "^1.3.1", - "prop-types": "^15.6.2", - "react-router": "5.3.4", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0" - }, - "dependencies": { - "history": { - "version": "4.10.1", - "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz", - "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==", - "requires": { - "@babel/runtime": "^7.1.2", - "loose-envify": "^1.2.0", - "resolve-pathname": "^3.0.0", - "tiny-invariant": "^1.0.2", - "tiny-warning": "^1.0.0", - "value-equal": "^1.0.1" - } - } - } - }, - "react-tiny-popover": { - "version": "6.0.10", - "resolved": "https://registry.npmjs.org/react-tiny-popover/-/react-tiny-popover-6.0.10.tgz", - "integrity": "sha512-ECMucd701SxWHGa+2YuVvccCxxTjmhomcD0ZYTF+Qmi5qNAj8pdlExFN+k+p1G78QTYIGPGNLocxRb9f6cZ0Mw==", - "requires": {} - }, - "react-toastify": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/react-toastify/-/react-toastify-8.2.0.tgz", - "integrity": "sha512-Pg2Ju7NngAamarFvLwqrFomJ57u/Ay6i6zfLurt/qPynWkAkOthu6vxfqYpJCyNhHRhR4hu7+bySSeWWJu6PAg==", - "requires": { - "clsx": "^1.1.1" - } - }, - "read-cache": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", - "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", - "dev": true, - "requires": { - "pify": "^2.3.0" - } - }, - "read-pkg": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-5.2.0.tgz", - "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", - "dev": true, - "requires": { - "@types/normalize-package-data": "^2.4.0", - "normalize-package-data": "^2.5.0", - "parse-json": "^5.0.0", - "type-fest": "^0.6.0" - }, - "dependencies": { - "hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", - "dev": true - }, - "normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "requires": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" - } - }, - "semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "dev": true - }, - "type-fest": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.6.0.tgz", - "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", - "dev": true - } - } - }, - "read-pkg-up": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz", - "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", - "dev": true, - "requires": { - "find-up": "^4.1.0", - "read-pkg": "^5.2.0", - "type-fest": "^0.8.1" - }, - "dependencies": { - "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", - "dev": true - } - } - }, - "readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "dev": true, - "requires": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - } - }, - "readdirp": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", - "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", - "requires": { - "picomatch": "^2.2.1" - } - }, - "redent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/redent/-/redent-3.0.0.tgz", - "integrity": "sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==", - "dev": true, - "requires": { - "indent-string": "^4.0.0", - "strip-indent": "^3.0.0" - } - }, - "regenerate": { - "version": "1.4.2", - "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz", - "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", - "dev": true - }, - "regenerate-unicode-properties": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz", - "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==", - "dev": true, - "requires": { - "regenerate": "^1.4.2" - } - }, - "regenerator-runtime": { - "version": "0.13.11", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", - "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" - }, - "regenerator-transform": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz", - "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==", - "dev": true, - "requires": { - "@babel/runtime": "^7.8.4" - } - }, - "regexp.prototype.flags": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.4.3.tgz", - "integrity": "sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.3", - "functions-have-names": "^1.2.2" - } - }, - "regexpu-core": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz", - "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==", - "dev": true, - "requires": { - "@babel/regjsgen": "^0.8.0", - "regenerate": "^1.4.2", - "regenerate-unicode-properties": "^10.1.0", - "regjsparser": "^0.9.1", - "unicode-match-property-ecmascript": "^2.0.0", - "unicode-match-property-value-ecmascript": "^2.1.0" - } - }, - "registry-auth-token": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-3.3.2.tgz", - "integrity": "sha512-JL39c60XlzCVgNrO+qq68FoNb56w/m7JYvGR2jT5iR1xBrUA3Mfx5Twk5rqTThPmQKMWydGmq8oFtDlxfrmxnQ==", - "dev": true, - "requires": { - "rc": "^1.1.6", - "safe-buffer": "^5.0.1" - } - }, - "registry-url": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-3.1.0.tgz", - "integrity": "sha512-ZbgR5aZEdf4UKZVBPYIgaglBmSF2Hi94s2PcIHhRGFjKYu+chjJdYfHn4rt3hB6eCKLJ8giVIIfgMa1ehDfZKA==", - "dev": true, - "requires": { - "rc": "^1.0.1" - } - }, - "regjsparser": { - "version": "0.9.1", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz", - "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==", - "dev": true, - "requires": { - "jsesc": "~0.5.0" - }, - "dependencies": { - "jsesc": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz", - "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==", - "dev": true - } - } - }, - "remove-accents": { - "version": "0.4.2", - "resolved": "https://registry.npmjs.org/remove-accents/-/remove-accents-0.4.2.tgz", - "integrity": "sha512-7pXIJqJOq5tFgG1A2Zxti3Ht8jJF337m4sowbuHsW30ZnkQFnDzy9qBNhgzX8ZLW4+UBcXiiR7SwR6pokHsxiA==" - }, - "require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "dev": true - }, - "require-from-string": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", - "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", - "dev": true - }, - "requires-port": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", - "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", - "dev": true - }, - "requizzle": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/requizzle/-/requizzle-0.2.4.tgz", - "integrity": "sha512-JRrFk1D4OQ4SqovXOgdav+K8EAhSB/LJZqCz8tbX0KObcdeM15Ss59ozWMBWmmINMagCwmqn4ZNryUGpBsl6Jw==", - "dev": true, - "requires": { - "lodash": "^4.17.21" - } - }, - "resolve": { - "version": "1.22.1", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz", - "integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==", - "dev": true, - "requires": { - "is-core-module": "^2.9.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - } - }, - "resolve-from": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-5.0.0.tgz", - "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", - "dev": true - }, - "resolve-pathname": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz", - "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng==" - }, - "restore-cursor": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", - "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", - "dev": true, - "requires": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" - } - }, - "reusify": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", - "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "requires": { - "glob": "^7.1.3" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "requires": { - "brace-expansion": "^1.1.7" - } - } - } - }, - "rollup": { - "version": "3.20.2", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.20.2.tgz", - "integrity": "sha512-3zwkBQl7Ai7MFYQE0y1MeQ15+9jsi7XxfrqwTb/9EK8D9C9+//EBR4M+CuA1KODRaNbFez/lWxA5vhEGZp4MUg==", - "dev": true, - "requires": { - "fsevents": "~2.3.2" - } - }, - "rrweb-cssom": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.6.0.tgz", - "integrity": "sha512-APM0Gt1KoXBz0iIkkdB/kfvGOwC4UuJFeG/c+yV7wSc7q96cG/kJ0HiYCnzivD9SB53cLV1MlHFNfOuPaadYSw==", - "dev": true - }, - "run-async": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.1.tgz", - "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", - "dev": true - }, - "run-parallel": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", - "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, - "requires": { - "queue-microtask": "^1.2.2" - } - }, - "rxjs": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.0.tgz", - "integrity": "sha512-F2+gxDshqmIub1KdvZkaEfGDwLNpPvk9Fs6LD/MyQxNgMds/WH9OdDDXOmxUZpME+iSK3rQCctkL0DYyytUqMg==", - "dev": true, - "requires": { - "tslib": "^2.1.0" - } - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "safe-regex-test": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/safe-regex-test/-/safe-regex-test-1.0.0.tgz", - "integrity": "sha512-JBUUzyOgEwXQY1NuPtvcj/qcBDbDmEvWufhlnXZIm75DEHp+afM1r1ujJpJsV/gSM4t59tpDyPi1sd6ZaPFfsA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "get-intrinsic": "^1.1.3", - "is-regex": "^1.1.4" - } - }, - "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true - }, - "sass": { - "version": "1.60.0", - "resolved": "https://registry.npmjs.org/sass/-/sass-1.60.0.tgz", - "integrity": "sha512-updbwW6fNb5gGm8qMXzVO7V4sWf7LMXnMly/JEyfbfERbVH46Fn6q02BX7/eHTdKpE7d+oTkMMQpFWNUMfFbgQ==", - "requires": { - "chokidar": ">=3.0.0 <4.0.0", - "immutable": "^4.0.0", - "source-map-js": ">=0.6.2 <2.0.0" - } - }, - "saxes": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", - "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", - "dev": true, - "requires": { - "xmlchars": "^2.2.0" - } - }, - "scheduler": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz", - "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==", - "requires": { - "loose-envify": "^1.1.0", - "object-assign": "^4.1.1" - } - }, - "semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true - }, - "serve": { - "version": "14.2.0", - "resolved": "https://registry.npmjs.org/serve/-/serve-14.2.0.tgz", - "integrity": "sha512-+HOw/XK1bW8tw5iBilBz/mJLWRzM8XM6MPxL4J/dKzdxq1vfdEWSwhaR7/yS8EJp5wzvP92p1qirysJvnEtjXg==", - "dev": true, - "requires": { - "@zeit/schemas": "2.29.0", - "ajv": "8.11.0", - "arg": "5.0.2", - "boxen": "7.0.0", - "chalk": "5.0.1", - "chalk-template": "0.4.0", - "clipboardy": "3.0.0", - "compression": "1.7.4", - "is-port-reachable": "4.0.0", - "serve-handler": "6.1.5", - "update-check": "1.5.4" - }, - "dependencies": { - "chalk": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.0.1.tgz", - "integrity": "sha512-Fo07WOYGqMfCWHOzSXOt2CxDbC6skS/jO9ynEcmpANMoPrD+W1r1K6Vx7iNm+AQmETU1Xr2t+n8nzkV9t6xh3w==", - "dev": true - } - } - }, - "serve-handler": { - "version": "6.1.5", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz", - "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==", - "dev": true, - "requires": { - "bytes": "3.0.0", - "content-disposition": "0.5.2", - "fast-url-parser": "1.1.3", - "mime-types": "2.1.18", - "minimatch": "3.1.2", - "path-is-inside": "1.0.2", - "path-to-regexp": "2.2.1", - "range-parser": "1.2.0" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "mime-db": { - "version": "1.33.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", - "dev": true - }, - "mime-types": { - "version": "2.1.18", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", - "dev": true, - "requires": { - "mime-db": "~1.33.0" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "path-to-regexp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", - "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==", - "dev": true - } - } - }, - "set-cookie-parser": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz", - "integrity": "sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==", - "dev": true - }, - "shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true - }, - "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "dev": true, - "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - } - }, - "siginfo": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", - "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", - "dev": true - }, - "signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true - }, - "slash": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", - "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==" - }, - "slice-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-4.0.0.tgz", - "integrity": "sha512-qMCMfhY040cVHT43K9BFygqYbUPFZKHOg7K73mtTWJRb8pyP3fzf4Ixd5SzdEJQ6MRUg/WBnOLxghZtKKurENQ==", - "dev": true, - "requires": { - "ansi-styles": "^4.0.0", - "astral-regex": "^2.0.0", - "is-fullwidth-code-point": "^3.0.0" - } - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", - "dev": true - }, - "source-map-js": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz", - "integrity": "sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==" - }, - "spdx-correct": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz", - "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", - "dev": true, - "requires": { - "spdx-expression-parse": "^3.0.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-exceptions": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz", - "integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==", - "dev": true - }, - "spdx-expression-parse": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", - "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", - "dev": true, - "requires": { - "spdx-exceptions": "^2.1.0", - "spdx-license-ids": "^3.0.0" - } - }, - "spdx-license-ids": { - "version": "3.0.13", - "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.13.tgz", - "integrity": "sha512-XkD+zwiqXHikFZm4AX/7JSCXA98U5Db4AFd5XUg/+9UNtnH75+Z9KxtpYiJZx36mUDVOwH83pl7yvCer6ewM3w==", - "dev": true - }, - "split-on-first": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/split-on-first/-/split-on-first-1.1.0.tgz", - "integrity": "sha512-43ZssAJaMusuKWL8sKUBQXHWOpq8d6CfN/u1p4gUzfJkM05C8rxTmYrkIPTXapZpORA6LkkzcUulJ8FqA7Uudw==" - }, - "stack-generator": { - "version": "2.0.10", - "resolved": "https://registry.npmjs.org/stack-generator/-/stack-generator-2.0.10.tgz", - "integrity": "sha512-mwnua/hkqM6pF4k8SnmZ2zfETsRUpWXREfA/goT8SLCV4iOFa4bzOX2nDipWAZFPTjLvQB82f5yaodMVhK0yJQ==", - "requires": { - "stackframe": "^1.3.4" - } - }, - "stack-utils": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/stack-utils/-/stack-utils-2.0.6.tgz", - "integrity": "sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==", - "requires": { - "escape-string-regexp": "^2.0.0" - }, - "dependencies": { - "escape-string-regexp": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", - "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==" - } - } - }, - "stackback": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", - "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", - "dev": true - }, - "stackframe": { - "version": "1.3.4", - "resolved": "https://registry.npmjs.org/stackframe/-/stackframe-1.3.4.tgz", - "integrity": "sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw==" - }, - "statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", - "dev": true - }, - "std-env": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.3.2.tgz", - "integrity": "sha512-uUZI65yrV2Qva5gqE0+A7uVAvO40iPo6jGhs7s8keRfHCmtg+uB2X6EiLGCI9IgL1J17xGhvoOqSz79lzICPTA==", - "dev": true - }, - "stop-iteration-iterator": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/stop-iteration-iterator/-/stop-iteration-iterator-1.0.0.tgz", - "integrity": "sha512-iCGQj+0l0HOdZ2AEeBADlsRC+vsnDsZsbdSiH1yNSjcfKM7fdpCMfqAL/dwF5BLiw/XhRft/Wax6zQbhq2BcjQ==", - "dev": true, - "requires": { - "internal-slot": "^1.0.4" - } - }, - "strict-event-emitter": { - "version": "0.2.8", - "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.2.8.tgz", - "integrity": "sha512-KDf/ujU8Zud3YaLtMCcTI4xkZlZVIYxTLr+XIULexP+77EEVWixeXroLUXQXiVtH4XH2W7jr/3PT1v3zBuvc3A==", - "dev": true, - "requires": { - "events": "^3.3.0" - } - }, - "strict-uri-encode": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-2.0.0.tgz", - "integrity": "sha512-QwiXZgpRcKkhTj2Scnn++4PKtWsH0kpzZ62L2R6c/LUVYv7hVnZqcg2+sMuT6R7Jusu1vviK/MFsu6kNJfWlEQ==" - }, - "string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dev": true, - "requires": { - "safe-buffer": "~5.2.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "dev": true - } - } - }, - "string-natural-compare": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/string-natural-compare/-/string-natural-compare-3.0.1.tgz", - "integrity": "sha512-n3sPwynL1nwKi3WJ6AIsClwBMa0zTi54fn2oLU6ndfTSIO05xaznjSf15PcBZU6FNWbmN5Q6cxT4V5hGvB4taw==", - "dev": true - }, - "string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - } - }, - "string.prototype.matchall": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/string.prototype.matchall/-/string.prototype.matchall-4.0.8.tgz", - "integrity": "sha512-6zOCOcJ+RJAQshcTvXPHoxoQGONa3e/Lqx90wUA+wEzX78sg5Bo+1tQo4N0pohS0erG9qtCqJDjNCQBjeWVxyg==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4", - "get-intrinsic": "^1.1.3", - "has-symbols": "^1.0.3", - "internal-slot": "^1.0.3", - "regexp.prototype.flags": "^1.4.3", - "side-channel": "^1.0.4" - } - }, - "string.prototype.trim": { - "version": "1.2.7", - "resolved": "https://registry.npmjs.org/string.prototype.trim/-/string.prototype.trim-1.2.7.tgz", - "integrity": "sha512-p6TmeT1T3411M8Cgg9wBTMRtY2q9+PNy9EV1i2lIXUN/btt763oIfxwN3RR8VU6wHX8j/1CFy0L+YuThm6bgOg==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "string.prototype.trimend": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.6.tgz", - "integrity": "sha512-JySq+4mrPf9EsDBEDYMOb/lM7XQLulwg5R/m1r0PXEFqrV0qHvl58sdTilSXtKOflCsK2E8jxf+GKC0T07RWwQ==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "string.prototype.trimstart": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.6.tgz", - "integrity": "sha512-omqjMDaY92pbn5HOX7f9IccLA+U1tA9GvtU4JrodiXFfYB7jPzzHpRzpglLAjtUV6bB557zwClJezTqnAiYnQA==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "define-properties": "^1.1.4", - "es-abstract": "^1.20.4" - } - }, - "strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.1" - } - }, - "strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", - "dev": true - }, - "strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "dev": true - }, - "strip-indent": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", - "integrity": "sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==", - "dev": true, - "requires": { - "min-indent": "^1.0.0" - } - }, - "strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "dev": true - }, - "strip-literal": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-1.0.1.tgz", - "integrity": "sha512-QZTsipNpa2Ppr6v1AmJHESqJ3Uz247MUS0OjrnnZjFAvEoWqxuyFuXn2xLgMtRnijJShAa1HL0gtJyUs7u7n3Q==", - "dev": true, - "requires": { - "acorn": "^8.8.2" - } - }, - "style-search": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/style-search/-/style-search-0.1.0.tgz", - "integrity": "sha512-Dj1Okke1C3uKKwQcetra4jSuk0DqbzbYtXipzFlFMZtowbF1x7BKJwB9AayVMyFARvU8EDrZdcax4At/452cAg==", - "dev": true - }, - "stylelint": { - "version": "14.16.1", - "resolved": "https://registry.npmjs.org/stylelint/-/stylelint-14.16.1.tgz", - "integrity": "sha512-ErlzR/T3hhbV+a925/gbfc3f3Fep9/bnspMiJPorfGEmcBbXdS+oo6LrVtoUZ/w9fqD6o6k7PtUlCOsCRdjX/A==", - "dev": true, - "requires": { - "@csstools/selector-specificity": "^2.0.2", - "balanced-match": "^2.0.0", - "colord": "^2.9.3", - "cosmiconfig": "^7.1.0", - "css-functions-list": "^3.1.0", - "debug": "^4.3.4", - "fast-glob": "^3.2.12", - "fastest-levenshtein": "^1.0.16", - "file-entry-cache": "^6.0.1", - "global-modules": "^2.0.0", - "globby": "^11.1.0", - "globjoin": "^0.1.4", - "html-tags": "^3.2.0", - "ignore": "^5.2.1", - "import-lazy": "^4.0.0", - "imurmurhash": "^0.1.4", - "is-plain-object": "^5.0.0", - "known-css-properties": "^0.26.0", - "mathml-tag-names": "^2.1.3", - "meow": "^9.0.0", - "micromatch": "^4.0.5", - "normalize-path": "^3.0.0", - "picocolors": "^1.0.0", - "postcss": "^8.4.19", - "postcss-media-query-parser": "^0.2.3", - "postcss-resolve-nested-selector": "^0.1.1", - "postcss-safe-parser": "^6.0.0", - "postcss-selector-parser": "^6.0.11", - "postcss-value-parser": "^4.2.0", - "resolve-from": "^5.0.0", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1", - "style-search": "^0.1.0", - "supports-hyperlinks": "^2.3.0", - "svg-tags": "^1.0.0", - "table": "^6.8.1", - "v8-compile-cache": "^2.3.0", - "write-file-atomic": "^4.0.2" - }, - "dependencies": { - "balanced-match": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-2.0.0.tgz", - "integrity": "sha512-1ugUSr8BHXRnK23KfuYS+gVMC3LB8QGH9W1iGtDPsNWoQbgtXSExkBu2aDR4epiGWZOjZsj6lDl/N/AqqTC3UA==", - "dev": true - } - } - }, - "stylelint-config-prettier": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/stylelint-config-prettier/-/stylelint-config-prettier-9.0.5.tgz", - "integrity": "sha512-U44lELgLZhbAD/xy/vncZ2Pq8sh2TnpiPvo38Ifg9+zeioR+LAkHu0i6YORIOxFafZoVg0xqQwex6e6F25S5XA==", - "dev": true, - "requires": {} - }, - "stylelint-config-recommended": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-recommended/-/stylelint-config-recommended-6.0.0.tgz", - "integrity": "sha512-ZorSSdyMcxWpROYUvLEMm0vSZud2uB7tX1hzBZwvVY9SV/uly4AvvJPPhCcymZL3fcQhEQG5AELmrxWqtmzacw==", - "dev": true, - "requires": {} - }, - "stylelint-config-recommended-scss": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/stylelint-config-recommended-scss/-/stylelint-config-recommended-scss-5.0.2.tgz", - "integrity": "sha512-b14BSZjcwW0hqbzm9b0S/ScN2+3CO3O4vcMNOw2KGf8lfVSwJ4p5TbNEXKwKl1+0FMtgRXZj6DqVUe/7nGnuBg==", - "dev": true, - "requires": { - "postcss-scss": "^4.0.2", - "stylelint-config-recommended": "^6.0.0", - "stylelint-scss": "^4.0.0" - } - }, - "stylelint-config-standard": { - "version": "24.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-standard/-/stylelint-config-standard-24.0.0.tgz", - "integrity": "sha512-+RtU7fbNT+VlNbdXJvnjc3USNPZRiRVp/d2DxOF/vBDDTi0kH5RX2Ny6errdtZJH3boO+bmqIYEllEmok4jiuw==", - "dev": true, - "requires": { - "stylelint-config-recommended": "^6.0.0" - } - }, - "stylelint-config-standard-scss": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/stylelint-config-standard-scss/-/stylelint-config-standard-scss-3.0.0.tgz", - "integrity": "sha512-zt3ZbzIbllN1iCmc94e4pDxqpkzeR6CJo5DDXzltshuXr+82B8ylHyMMARNnUYrZH80B7wgY7UkKTYCFM0UUyw==", - "dev": true, - "requires": { - "stylelint-config-recommended-scss": "^5.0.2", - "stylelint-config-standard": "^24.0.0" - } - }, - "stylelint-scss": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/stylelint-scss/-/stylelint-scss-4.6.0.tgz", - "integrity": "sha512-M+E0BQim6G4XEkaceEhfVjP/41C9Klg5/tTPTCQVlgw/jm2tvB+OXJGaU0TDP5rnTCB62aX6w+rT+gqJW/uwjA==", - "dev": true, - "requires": { - "dlv": "^1.1.3", - "postcss-media-query-parser": "^0.2.3", - "postcss-resolve-nested-selector": "^0.1.1", - "postcss-selector-parser": "^6.0.11", - "postcss-value-parser": "^4.2.0" - } - }, - "sucrase": { - "version": "3.31.0", - "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.31.0.tgz", - "integrity": "sha512-6QsHnkqyVEzYcaiHsOKkzOtOgdJcb8i54x6AV2hDwyZcY9ZyykGZVw6L/YN98xC0evwTP6utsWWrKRaa8QlfEQ==", - "dev": true, - "requires": { - "commander": "^4.0.0", - "glob": "7.1.6", - "lines-and-columns": "^1.1.6", - "mz": "^2.7.0", - "pirates": "^4.0.1", - "ts-interface-checker": "^0.1.9" - }, - "dependencies": { - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "commander": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", - "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", - "dev": true - }, - "glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - } - } - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "requires": { - "has-flag": "^4.0.0" - } - }, - "supports-hyperlinks": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/supports-hyperlinks/-/supports-hyperlinks-2.3.0.tgz", - "integrity": "sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==", - "dev": true, - "requires": { - "has-flag": "^4.0.0", - "supports-color": "^7.0.0" - } - }, - "supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "dev": true - }, - "svg-parser": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz", - "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ==", - "dev": true - }, - "svg-tags": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/svg-tags/-/svg-tags-1.0.0.tgz", - "integrity": "sha512-ovssysQTa+luh7A5Weu3Rta6FJlFBBbInjOh722LIt6klpU2/HtdUbszju/G4devcvk8PGt7FCLv5wftu3THUA==", - "dev": true - }, - "svgo": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/svgo/-/svgo-3.0.2.tgz", - "integrity": "sha512-Z706C1U2pb1+JGP48fbazf3KxHrWOsLme6Rv7imFBn5EnuanDW1GPaA/P1/dvObE670JDePC3mnj0k0B7P0jjQ==", - "requires": { - "@trysound/sax": "0.2.0", - "commander": "^7.2.0", - "css-select": "^5.1.0", - "css-tree": "^2.2.1", - "csso": "^5.0.5", - "picocolors": "^1.0.0" - } - }, - "symbol-tree": { - "version": "3.2.4", - "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", - "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", - "dev": true - }, - "table": { - "version": "6.8.1", - "resolved": "https://registry.npmjs.org/table/-/table-6.8.1.tgz", - "integrity": "sha512-Y4X9zqrCftUhMeH2EptSSERdVKt/nEdijTOacGD/97EKjhQ/Qs8RTlEGABSJNNN8lac9kheH+af7yAkEWlgneA==", - "dev": true, - "requires": { - "ajv": "^8.0.1", - "lodash.truncate": "^4.4.2", - "slice-ansi": "^4.0.0", - "string-width": "^4.2.3", - "strip-ansi": "^6.0.1" - } - }, - "tailwindcss": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.3.1.tgz", - "integrity": "sha512-Vkiouc41d4CEq0ujXl6oiGFQ7bA3WEhUZdTgXAhtKxSy49OmKs8rEfQmupsfF0IGW8fv2iQkp1EVUuapCFrZ9g==", - "dev": true, - "requires": { - "arg": "^5.0.2", - "chokidar": "^3.5.3", - "color-name": "^1.1.4", - "didyoumean": "^1.2.2", - "dlv": "^1.1.3", - "fast-glob": "^3.2.12", - "glob-parent": "^6.0.2", - "is-glob": "^4.0.3", - "jiti": "^1.17.2", - "lilconfig": "^2.0.6", - "micromatch": "^4.0.5", - "normalize-path": "^3.0.0", - "object-hash": "^3.0.0", - "picocolors": "^1.0.0", - "postcss": "^8.0.9", - "postcss-import": "^14.1.0", - "postcss-js": "^4.0.0", - "postcss-load-config": "^3.1.4", - "postcss-nested": "6.0.0", - "postcss-selector-parser": "^6.0.11", - "postcss-value-parser": "^4.2.0", - "quick-lru": "^5.1.1", - "resolve": "^1.22.1", - "sucrase": "^3.29.0" - }, - "dependencies": { - "glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dev": true, - "requires": { - "is-glob": "^4.0.3" - } - }, - "quick-lru": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", - "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==", - "dev": true - } - } - }, - "text-table": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", - "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", - "dev": true - }, - "thenify": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", - "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", - "dev": true, - "requires": { - "any-promise": "^1.0.0" - } - }, - "thenify-all": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", - "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", - "dev": true, - "requires": { - "thenify": ">= 3.1.0 < 4" - } - }, - "through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", - "dev": true - }, - "tiny-invariant": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz", - "integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==" - }, - "tiny-warning": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz", - "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==" - }, - "tinybench": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.4.0.tgz", - "integrity": "sha512-iyziEiyFxX4kyxSp+MtY1oCH/lvjH3PxFN8PGCDeqcZWAJ/i+9y+nL85w99PxVzrIvew/GSkSbDYtiGVa85Afg==", - "dev": true - }, - "tinypool": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.4.0.tgz", - "integrity": "sha512-2ksntHOKf893wSAH4z/+JbPpi92esw8Gn9N2deXX+B0EO92hexAVI9GIZZPx7P5aYo5KULfeOSt3kMOmSOy6uA==", - "dev": true - }, - "tinyspy": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-1.1.1.tgz", - "integrity": "sha512-UVq5AXt/gQlti7oxoIg5oi/9r0WpF7DGEVwXgqWSMmyN16+e3tl5lIvTaOpJ3TAtu5xFzWccFRM4R5NaWHF+4g==", - "dev": true - }, - "tmp": { - "version": "0.0.33", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", - "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", - "dev": true, - "requires": { - "os-tmpdir": "~1.0.2" - } - }, - "to-fast-properties": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz", - "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==", - "dev": true - }, - "to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "requires": { - "is-number": "^7.0.0" - } - }, - "tough-cookie": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz", - "integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==", - "dev": true, - "requires": { - "psl": "^1.1.33", - "punycode": "^2.1.1", - "universalify": "^0.2.0", - "url-parse": "^1.5.3" - }, - "dependencies": { - "punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", - "dev": true - } - } - }, - "tr46": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-4.1.1.tgz", - "integrity": "sha512-2lv/66T7e5yNyhAAC4NaKe5nVavzuGJQVVtRYLyQ2OI8tsJ61PMLlelehb0wi2Hx6+hT/OJUWZcw8MjlSRnxvw==", - "dev": true, - "requires": { - "punycode": "^2.3.0" - }, - "dependencies": { - "punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", - "dev": true - } - } - }, - "trim-newlines": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/trim-newlines/-/trim-newlines-3.0.1.tgz", - "integrity": "sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==", - "dev": true - }, - "ts-interface-checker": { - "version": "0.1.13", - "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", - "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", - "dev": true - }, - "tsconfig-paths": { - "version": "3.14.2", - "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz", - "integrity": "sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==", - "dev": true, - "requires": { - "@types/json5": "^0.0.29", - "json5": "^1.0.2", - "minimist": "^1.2.6", - "strip-bom": "^3.0.0" - }, - "dependencies": { - "json5": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.2.tgz", - "integrity": "sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==", - "dev": true, - "requires": { - "minimist": "^1.2.0" - } - } - } - }, - "tslib": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.0.tgz", - "integrity": "sha512-336iVw3rtn2BUK7ORdIAHTyxHGRIHVReokCR3XjbckJMK7ms8FysBfhLR8IXnAgy7T0PTPNBWKiH514FOW/WSg==" - }, - "tsutils": { - "version": "3.21.0", - "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz", - "integrity": "sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==", - "dev": true, - "requires": { - "tslib": "^1.8.1" - }, - "dependencies": { - "tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - } - } - }, - "type-check": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", - "integrity": "sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==", - "dev": true, - "requires": { - "prelude-ls": "~1.1.2" - } - }, - "type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", - "dev": true - }, - "type-fest": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-1.4.0.tgz", - "integrity": "sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==", - "dev": true - }, - "typed-array-length": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/typed-array-length/-/typed-array-length-1.0.4.tgz", - "integrity": "sha512-KjZypGq+I/H7HI5HlOoGHkWUUGq+Q0TPhQurLbyrVrvnKTBgzLhIJ7j6J/XTQOi0d1RjyZ0wdas8bKs2p0x3Ng==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "is-typed-array": "^1.1.9" - } - }, - "typescript": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.3.tgz", - "integrity": "sha512-xv8mOEDnigb/tN9PSMTwSEqAnUvkoXMQlicOb0IUVDBSQCgBSaAAROUZYy2IcUy5qU6XajK5jjjO7TMWqBTKZA==", - "dev": true - }, - "uc.micro": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==", - "dev": true - }, - "ufo": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.1.1.tgz", - "integrity": "sha512-MvlCc4GHrmZdAllBc0iUDowff36Q9Ndw/UzqmEKyrfSzokTd9ZCy1i+IIk5hrYKkjoYVQyNbrw7/F8XJ2rEwTg==", - "dev": true - }, - "uglify-js": { - "version": "3.17.4", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.17.4.tgz", - "integrity": "sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==", - "dev": true - }, - "unbox-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/unbox-primitive/-/unbox-primitive-1.0.2.tgz", - "integrity": "sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==", - "dev": true, - "requires": { - "call-bind": "^1.0.2", - "has-bigints": "^1.0.2", - "has-symbols": "^1.0.3", - "which-boxed-primitive": "^1.0.2" - } - }, - "underscore": { - "version": "1.13.6", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.6.tgz", - "integrity": "sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==", - "dev": true - }, - "unicode-canonical-property-names-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz", - "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==", - "dev": true - }, - "unicode-match-property-ecmascript": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", - "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", - "dev": true, - "requires": { - "unicode-canonical-property-names-ecmascript": "^2.0.0", - "unicode-property-aliases-ecmascript": "^2.0.0" - } - }, - "unicode-match-property-value-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz", - "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==", - "dev": true - }, - "unicode-property-aliases-ecmascript": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", - "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", - "dev": true - }, - "universalify": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", - "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", - "dev": true - }, - "unload": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/unload/-/unload-2.2.0.tgz", - "integrity": "sha512-B60uB5TNBLtN6/LsgAf3udH9saB5p7gqJwcFfbOEZ8BcBHnGwCf6G/TGiEqkRAxX7zAFIUtzdrXQSdL3Q/wqNA==", - "requires": { - "@babel/runtime": "^7.6.2", - "detect-node": "^2.0.4" - } - }, - "update-browserslist-db": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.10.tgz", - "integrity": "sha512-OztqDenkfFkbSG+tRxBeAnCVPckDBcvibKd35yDONx6OU8N7sqgwc7rCbkJ/WcYtVRZ4ba68d6byhC21GFh7sQ==", - "requires": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" - } - }, - "update-check": { - "version": "1.5.4", - "resolved": "https://registry.npmjs.org/update-check/-/update-check-1.5.4.tgz", - "integrity": "sha512-5YHsflzHP4t1G+8WGPlvKbJEbAJGCgw+Em+dGR1KmBUbr1J36SJBqlHLjR7oob7sco5hWHGQVcr9B2poIVDDTQ==", - "dev": true, - "requires": { - "registry-auth-token": "3.3.2", - "registry-url": "3.1.0" - } - }, - "uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "requires": { - "punycode": "^2.1.0" - }, - "dependencies": { - "punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", - "dev": true - } - } - }, - "url-parse": { - "version": "1.5.10", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", - "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", - "dev": true, - "requires": { - "querystringify": "^2.1.1", - "requires-port": "^1.0.0" - } - }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" - }, - "v8-compile-cache": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz", - "integrity": "sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA==", - "dev": true - }, - "validate-npm-package-license": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", - "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", - "dev": true, - "requires": { - "spdx-correct": "^3.0.0", - "spdx-expression-parse": "^3.0.0" - } - }, - "value-equal": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", - "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw==" - }, - "vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", - "dev": true - }, - "vite": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/vite/-/vite-4.2.3.tgz", - "integrity": "sha512-kLU+m2q0Y434Y1kCy3TchefAdtFso0ILi0dLyFV8Us3InXTU11H/B5ZTqCKIQHzSKNxVG/yEx813EA9f1imQ9A==", - "dev": true, - "requires": { - "esbuild": "^0.17.5", - "fsevents": "~2.3.2", - "postcss": "^8.4.21", - "resolve": "^1.22.1", - "rollup": "^3.18.0" - } - }, - "vite-node": { - "version": "0.29.8", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-0.29.8.tgz", - "integrity": "sha512-b6OtCXfk65L6SElVM20q5G546yu10/kNrhg08afEoWlFRJXFq9/6glsvSVY+aI6YeC1tu2TtAqI2jHEQmOmsFw==", - "dev": true, - "requires": { - "cac": "^6.7.14", - "debug": "^4.3.4", - "mlly": "^1.1.0", - "pathe": "^1.1.0", - "picocolors": "^1.0.0", - "vite": "^3.0.0 || ^4.0.0" - } - }, - "vite-plugin-eslint": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/vite-plugin-eslint/-/vite-plugin-eslint-1.8.1.tgz", - "integrity": "sha512-PqdMf3Y2fLO9FsNPmMX+//2BF5SF8nEWspZdgl4kSt7UvHDRHVVfHvxsD7ULYzZrJDGRxR81Nq7TOFgwMnUang==", - "dev": true, - "requires": { - "@rollup/pluginutils": "^4.2.1", - "@types/eslint": "^8.4.5", - "rollup": "^2.77.2" - }, - "dependencies": { - "@rollup/pluginutils": { - "version": "4.2.1", - "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-4.2.1.tgz", - "integrity": "sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ==", - "dev": true, - "requires": { - "estree-walker": "^2.0.1", - "picomatch": "^2.2.2" - } - }, - "rollup": { - "version": "2.79.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-2.79.1.tgz", - "integrity": "sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==", - "dev": true, - "requires": { - "fsevents": "~2.3.2" - } - } - } - }, - "vite-plugin-svgr": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/vite-plugin-svgr/-/vite-plugin-svgr-2.4.0.tgz", - "integrity": "sha512-q+mJJol6ThvqkkJvvVFEndI4EaKIjSI0I3jNFgSoC9fXAz1M7kYTVUin8fhUsFojFDKZ9VHKtX6NXNaOLpbsHA==", - "dev": true, - "requires": { - "@rollup/pluginutils": "^5.0.2", - "@svgr/core": "^6.5.1" - } - }, - "vitest": { - "version": "0.29.8", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-0.29.8.tgz", - "integrity": "sha512-JIAVi2GK5cvA6awGpH0HvH/gEG9PZ0a/WoxdiV3PmqK+3CjQMf8c+J/Vhv4mdZ2nRyXFw66sAg6qz7VNkaHfDQ==", - "dev": true, - "requires": { - "@types/chai": "^4.3.4", - "@types/chai-subset": "^1.3.3", - "@types/node": "*", - "@vitest/expect": "0.29.8", - "@vitest/runner": "0.29.8", - "@vitest/spy": "0.29.8", - "@vitest/utils": "0.29.8", - "acorn": "^8.8.1", - "acorn-walk": "^8.2.0", - "cac": "^6.7.14", - "chai": "^4.3.7", - "debug": "^4.3.4", - "local-pkg": "^0.4.2", - "pathe": "^1.1.0", - "picocolors": "^1.0.0", - "source-map": "^0.6.1", - "std-env": "^3.3.1", - "strip-literal": "^1.0.0", - "tinybench": "^2.3.1", - "tinypool": "^0.4.0", - "tinyspy": "^1.0.2", - "vite": "^3.0.0 || ^4.0.0", - "vite-node": "0.29.8", - "why-is-node-running": "^2.2.2" - } - }, - "w3c-xmlserializer": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-4.0.0.tgz", - "integrity": "sha512-d+BFHzbiCx6zGfz0HyQ6Rg69w9k19nviJspaj4yNscGjrHu94sVP+aRm75yEbCh+r2/yR+7q6hux9LVtbuTGBw==", - "dev": true, - "requires": { - "xml-name-validator": "^4.0.0" - } - }, - "wcwidth": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", - "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", - "dev": true, - "requires": { - "defaults": "^1.0.3" - } - }, - "web-vitals": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/web-vitals/-/web-vitals-3.3.0.tgz", - "integrity": "sha512-GZsEmJBNclIpViS/7QVOTr7Kbt4BgLeR7kQ5zCCtJVuiWsA+K6xTXaoEXssvl8yYFICEyNmA2Nr+vgBYTnS4bA==" - }, - "webidl-conversions": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", - "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", - "dev": true - }, - "whatwg-encoding": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz", - "integrity": "sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg==", - "dev": true, - "requires": { - "iconv-lite": "0.6.3" - }, - "dependencies": { - "iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "requires": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - } - } - } - }, - "whatwg-mimetype": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz", - "integrity": "sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q==", - "dev": true - }, - "whatwg-url": { - "version": "12.0.1", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-12.0.1.tgz", - "integrity": "sha512-Ed/LrqB8EPlGxjS+TrsXcpUond1mhccS3pchLhzSgPCnTimUCKj3IZE75pAs5m6heB2U2TMerKFUXheyHY+VDQ==", - "dev": true, - "requires": { - "tr46": "^4.1.1", - "webidl-conversions": "^7.0.0" - } - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - }, - "which-boxed-primitive": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz", - "integrity": "sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==", - "dev": true, - "requires": { - "is-bigint": "^1.0.1", - "is-boolean-object": "^1.1.0", - "is-number-object": "^1.0.4", - "is-string": "^1.0.5", - "is-symbol": "^1.0.3" - } - }, - "which-collection": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/which-collection/-/which-collection-1.0.1.tgz", - "integrity": "sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==", - "dev": true, - "requires": { - "is-map": "^2.0.1", - "is-set": "^2.0.1", - "is-weakmap": "^2.0.1", - "is-weakset": "^2.0.1" - } - }, - "which-typed-array": { - "version": "1.1.9", - "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", - "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", - "dev": true, - "requires": { - "available-typed-arrays": "^1.0.5", - "call-bind": "^1.0.2", - "for-each": "^0.3.3", - "gopd": "^1.0.1", - "has-tostringtag": "^1.0.0", - "is-typed-array": "^1.1.10" - } - }, - "why-is-node-running": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.2.2.tgz", - "integrity": "sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==", - "dev": true, - "requires": { - "siginfo": "^2.0.0", - "stackback": "0.0.2" - } - }, - "widest-line": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz", - "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==", - "dev": true, - "requires": { - "string-width": "^5.0.1" - }, - "dependencies": { - "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==", - "dev": true - }, - "emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "dev": true - }, - "string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "dev": true, - "requires": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - } - }, - "strip-ansi": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.0.1.tgz", - "integrity": "sha512-cXNxvT8dFNRVfhVME3JAe98mkXDYN2O1l7jmcwMnOslDeESg1rF/OZMtK0nRAhiari1unG5cD4jG3rapUAkLbw==", - "dev": true, - "requires": { - "ansi-regex": "^6.0.1" - } - } - } - }, - "word-wrap": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.4.tgz", - "integrity": "sha512-2V81OA4ugVo5pRo46hAoD2ivUJx8jXmWXfUkY4KFNw0hEptvN0QfH3K4nHiwzGeKl5rFKedV48QVoqYavy4YpA==", - "dev": true - }, - "wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "dev": true, - "requires": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" - }, - "write-file-atomic": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-4.0.2.tgz", - "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", - "dev": true, - "requires": { - "imurmurhash": "^0.1.4", - "signal-exit": "^3.0.7" - } - }, - "ws": { - "version": "8.13.0", - "resolved": "https://registry.npmjs.org/ws/-/ws-8.13.0.tgz", - "integrity": "sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==", - "dev": true, - "requires": {} - }, - "xml-name-validator": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", - "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", - "dev": true - }, - "xmlchars": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", - "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", - "dev": true - }, - "xmlcreate": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/xmlcreate/-/xmlcreate-2.0.4.tgz", - "integrity": "sha512-nquOebG4sngPmGPICTS5EnxqhKbCmz5Ox5hsszI2T6U5qdrJizBc+0ilYSEjTSzU0yZcmvppztXe/5Al5fUwdg==", - "dev": true - }, - "y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "dev": true - }, - "yallist": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", - "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", - "dev": true - }, - "yaml": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", - "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", - "dev": true - }, - "yargs": { - "version": "17.7.1", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz", - "integrity": "sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==", - "dev": true, - "requires": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "dependencies": { - "yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "dev": true - } - } - }, - "yargs-parser": { - "version": "20.2.9", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-20.2.9.tgz", - "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", - "dev": true - }, - "yocto-queue": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", - "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", - "dev": true - }, - "zustand": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/zustand/-/zustand-3.7.2.tgz", - "integrity": "sha512-PIJDIZKtokhof+9+60cpockVOq05sJzHCriyvaLBmEJixseQ1a5Kdov6fWZfWOu5SK9c+FhH1jU0tntLxRJYMA==", - "requires": {} - } } } diff --git a/web/vtadmin/package.json b/web/vtadmin/package.json index bbfab0eab26..93c363128f1 100644 --- a/web/vtadmin/package.json +++ b/web/vtadmin/package.json @@ -3,8 +3,8 @@ "version": "0.1.0", "private": true, "engines": { - "node": ">=18.16.0", - "npm": ">=9.5.1" + "node": ">=20.12.0", + "npm": ">=10.5.0" }, "dependencies": { "@bugsnag/js": "^7.20.0", @@ -98,7 +98,7 @@ "stylelint-config-standard-scss": "^3.0.0", "tailwindcss": "^3.0.18", "typescript": "^5.0.2", - "vite": "^4.2.3", + "vite": "^4.5.3", "vite-plugin-eslint": "^1.8.1", "vite-plugin-svgr": "^2.4.0", "vitest": "^0.29.8" diff --git a/web/vtadmin/src/api/http.test.ts b/web/vtadmin/src/api/http.test.ts index 09cf122d705..140b1a74c70 100644 --- a/web/vtadmin/src/api/http.test.ts +++ b/web/vtadmin/src/api/http.test.ts @@ -93,8 +93,8 @@ describe('api/http', () => { let e: MalformedHttpResponseError = error as MalformedHttpResponseError; /* eslint-disable jest/no-conditional-expect */ expect(e.name).toEqual(MALFORMED_HTTP_RESPONSE_ERROR); - expect(e.message).toEqual( - '[status 504] /api/tablets: invalid json response body at http://test-api.com/api/tablets reason: Unexpected token < in JSON at position 0' + expect(e.message).toContain( + '[status 504] /api/tablets: invalid json response body at http://test-api.com/api/tablets' ); expect(errorHandler.notify).toHaveBeenCalledTimes(1); diff --git a/web/vtadmin/src/components/routes/Vtctlds.tsx b/web/vtadmin/src/components/routes/Vtctlds.tsx index 26efd8498b4..33b66b11306 100644 --- a/web/vtadmin/src/components/routes/Vtctlds.tsx +++ b/web/vtadmin/src/components/routes/Vtctlds.tsx @@ -37,7 +37,6 @@ export const Vtctlds = () => { cluster: v.cluster?.name, clusterID: v.cluster?.id, hostname: v.hostname, - fqdn: v.FQDN, })); const filtered = filterNouns(filter, mapped); @@ -50,15 +49,7 @@ export const Vtctlds = () => { return ( -
- {row.fqdn ? ( - - {row.hostname} - - ) : ( - row.hostname - )} -
+
{row.hostname}
{row.cluster} diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index 398d93080dc..d6ec9baec6d 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -23,6 +23,62 @@ export namespace vtadmin { */ public static create(rpcImpl: $protobuf.RPCImpl, requestDelimited?: boolean, responseDelimited?: boolean): VTAdmin; + /** + * Calls ApplySchema. + * @param request ApplySchemaRequest message or plain object + * @param callback Node-style callback called with the error, if any, and ApplySchemaResponse + */ + public applySchema(request: vtadmin.IApplySchemaRequest, callback: vtadmin.VTAdmin.ApplySchemaCallback): void; + + /** + * Calls ApplySchema. + * @param request ApplySchemaRequest message or plain object + * @returns Promise + */ + public applySchema(request: vtadmin.IApplySchemaRequest): Promise; + + /** + * Calls CancelSchemaMigration. + * @param request CancelSchemaMigrationRequest message or plain object + * @param callback Node-style callback called with the error, if any, and CancelSchemaMigrationResponse + */ + public cancelSchemaMigration(request: vtadmin.ICancelSchemaMigrationRequest, callback: vtadmin.VTAdmin.CancelSchemaMigrationCallback): void; + + /** + * Calls CancelSchemaMigration. + * @param request CancelSchemaMigrationRequest message or plain object + * @returns Promise + */ + public cancelSchemaMigration(request: vtadmin.ICancelSchemaMigrationRequest): Promise; + + /** + * Calls CleanupSchemaMigration. + * @param request CleanupSchemaMigrationRequest message or plain object + * @param callback Node-style callback called with the error, if any, and CleanupSchemaMigrationResponse + */ + public cleanupSchemaMigration(request: vtadmin.ICleanupSchemaMigrationRequest, callback: vtadmin.VTAdmin.CleanupSchemaMigrationCallback): void; + + /** + * Calls CleanupSchemaMigration. + * @param request CleanupSchemaMigrationRequest message or plain object + * @returns Promise + */ + public cleanupSchemaMigration(request: vtadmin.ICleanupSchemaMigrationRequest): Promise; + + /** + * Calls CompleteSchemaMigration. + * @param request CompleteSchemaMigrationRequest message or plain object + * @param callback Node-style callback called with the error, if any, and CompleteSchemaMigrationResponse + */ + public completeSchemaMigration(request: vtadmin.ICompleteSchemaMigrationRequest, callback: vtadmin.VTAdmin.CompleteSchemaMigrationCallback): void; + + /** + * Calls CompleteSchemaMigration. + * @param request CompleteSchemaMigrationRequest message or plain object + * @returns Promise + */ + public completeSchemaMigration(request: vtadmin.ICompleteSchemaMigrationRequest): Promise; + /** * Calls CreateKeyspace. * @param request CreateKeyspaceRequest message or plain object @@ -261,6 +317,20 @@ export namespace vtadmin { */ public getSchemas(request: vtadmin.IGetSchemasRequest): Promise; + /** + * Calls GetSchemaMigrations. + * @param request GetSchemaMigrationsRequest message or plain object + * @param callback Node-style callback called with the error, if any, and GetSchemaMigrationsResponse + */ + public getSchemaMigrations(request: vtadmin.IGetSchemaMigrationsRequest, callback: vtadmin.VTAdmin.GetSchemaMigrationsCallback): void; + + /** + * Calls GetSchemaMigrations. + * @param request GetSchemaMigrationsRequest message or plain object + * @returns Promise + */ + public getSchemaMigrations(request: vtadmin.IGetSchemaMigrationsRequest): Promise; + /** * Calls GetShardReplicationPositions. * @param request GetShardReplicationPositionsRequest message or plain object @@ -443,6 +513,20 @@ export namespace vtadmin { */ public getWorkflows(request: vtadmin.IGetWorkflowsRequest): Promise; + /** + * Calls LaunchSchemaMigration. + * @param request LaunchSchemaMigrationRequest message or plain object + * @param callback Node-style callback called with the error, if any, and LaunchSchemaMigrationResponse + */ + public launchSchemaMigration(request: vtadmin.ILaunchSchemaMigrationRequest, callback: vtadmin.VTAdmin.LaunchSchemaMigrationCallback): void; + + /** + * Calls LaunchSchemaMigration. + * @param request LaunchSchemaMigrationRequest message or plain object + * @returns Promise + */ + public launchSchemaMigration(request: vtadmin.ILaunchSchemaMigrationRequest): Promise; + /** * Calls PingTablet. * @param request PingTabletRequest message or plain object @@ -555,6 +639,20 @@ export namespace vtadmin { */ public removeKeyspaceCell(request: vtadmin.IRemoveKeyspaceCellRequest): Promise; + /** + * Calls RetrySchemaMigration. + * @param request RetrySchemaMigrationRequest message or plain object + * @param callback Node-style callback called with the error, if any, and RetrySchemaMigrationResponse + */ + public retrySchemaMigration(request: vtadmin.IRetrySchemaMigrationRequest, callback: vtadmin.VTAdmin.RetrySchemaMigrationCallback): void; + + /** + * Calls RetrySchemaMigration. + * @param request RetrySchemaMigrationRequest message or plain object + * @returns Promise + */ + public retrySchemaMigration(request: vtadmin.IRetrySchemaMigrationRequest): Promise; + /** * Calls RunHealthCheck. * @param request RunHealthCheckRequest message or plain object @@ -740,6 +838,34 @@ export namespace vtadmin { namespace VTAdmin { + /** + * Callback as used by {@link vtadmin.VTAdmin#applySchema}. + * @param error Error, if any + * @param [response] ApplySchemaResponse + */ + type ApplySchemaCallback = (error: (Error|null), response?: vtctldata.ApplySchemaResponse) => void; + + /** + * Callback as used by {@link vtadmin.VTAdmin#cancelSchemaMigration}. + * @param error Error, if any + * @param [response] CancelSchemaMigrationResponse + */ + type CancelSchemaMigrationCallback = (error: (Error|null), response?: vtctldata.CancelSchemaMigrationResponse) => void; + + /** + * Callback as used by {@link vtadmin.VTAdmin#cleanupSchemaMigration}. + * @param error Error, if any + * @param [response] CleanupSchemaMigrationResponse + */ + type CleanupSchemaMigrationCallback = (error: (Error|null), response?: vtctldata.CleanupSchemaMigrationResponse) => void; + + /** + * Callback as used by {@link vtadmin.VTAdmin#completeSchemaMigration}. + * @param error Error, if any + * @param [response] CompleteSchemaMigrationResponse + */ + type CompleteSchemaMigrationCallback = (error: (Error|null), response?: vtctldata.CompleteSchemaMigrationResponse) => void; + /** * Callback as used by {@link vtadmin.VTAdmin#createKeyspace}. * @param error Error, if any @@ -859,6 +985,13 @@ export namespace vtadmin { */ type GetSchemasCallback = (error: (Error|null), response?: vtadmin.GetSchemasResponse) => void; + /** + * Callback as used by {@link vtadmin.VTAdmin#getSchemaMigrations}. + * @param error Error, if any + * @param [response] GetSchemaMigrationsResponse + */ + type GetSchemaMigrationsCallback = (error: (Error|null), response?: vtadmin.GetSchemaMigrationsResponse) => void; + /** * Callback as used by {@link vtadmin.VTAdmin#getShardReplicationPositions}. * @param error Error, if any @@ -950,6 +1083,13 @@ export namespace vtadmin { */ type GetWorkflowsCallback = (error: (Error|null), response?: vtadmin.GetWorkflowsResponse) => void; + /** + * Callback as used by {@link vtadmin.VTAdmin#launchSchemaMigration}. + * @param error Error, if any + * @param [response] LaunchSchemaMigrationResponse + */ + type LaunchSchemaMigrationCallback = (error: (Error|null), response?: vtctldata.LaunchSchemaMigrationResponse) => void; + /** * Callback as used by {@link vtadmin.VTAdmin#pingTablet}. * @param error Error, if any @@ -1006,6 +1146,13 @@ export namespace vtadmin { */ type RemoveKeyspaceCellCallback = (error: (Error|null), response?: vtadmin.RemoveKeyspaceCellResponse) => void; + /** + * Callback as used by {@link vtadmin.VTAdmin#retrySchemaMigration}. + * @param error Error, if any + * @param [response] RetrySchemaMigrationResponse + */ + type RetrySchemaMigrationCallback = (error: (Error|null), response?: vtctldata.RetrySchemaMigrationResponse) => void; + /** * Callback as used by {@link vtadmin.VTAdmin#runHealthCheck}. * @param error Error, if any @@ -2173,6 +2320,109 @@ export namespace vtadmin { } } + /** Properties of a SchemaMigration. */ + interface ISchemaMigration { + + /** SchemaMigration cluster */ + cluster?: (vtadmin.ICluster|null); + + /** SchemaMigration schema_migration */ + schema_migration?: (vtctldata.ISchemaMigration|null); + } + + /** Represents a SchemaMigration. */ + class SchemaMigration implements ISchemaMigration { + + /** + * Constructs a new SchemaMigration. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.ISchemaMigration); + + /** SchemaMigration cluster. */ + public cluster?: (vtadmin.ICluster|null); + + /** SchemaMigration schema_migration. */ + public schema_migration?: (vtctldata.ISchemaMigration|null); + + /** + * Creates a new SchemaMigration instance using the specified properties. + * @param [properties] Properties to set + * @returns SchemaMigration instance + */ + public static create(properties?: vtadmin.ISchemaMigration): vtadmin.SchemaMigration; + + /** + * Encodes the specified SchemaMigration message. Does not implicitly {@link vtadmin.SchemaMigration.verify|verify} messages. + * @param message SchemaMigration message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.ISchemaMigration, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified SchemaMigration message, length delimited. Does not implicitly {@link vtadmin.SchemaMigration.verify|verify} messages. + * @param message SchemaMigration message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.ISchemaMigration, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a SchemaMigration message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns SchemaMigration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.SchemaMigration; + + /** + * Decodes a SchemaMigration message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns SchemaMigration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.SchemaMigration; + + /** + * Verifies a SchemaMigration message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a SchemaMigration message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns SchemaMigration + */ + public static fromObject(object: { [k: string]: any }): vtadmin.SchemaMigration; + + /** + * Creates a plain object from a SchemaMigration message. Also converts values to other types if specified. + * @param message SchemaMigration + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.SchemaMigration, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this SchemaMigration to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for SchemaMigration + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a Shard. */ interface IShard { @@ -2964,6 +3214,418 @@ export namespace vtadmin { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of an ApplySchemaRequest. */ + interface IApplySchemaRequest { + + /** ApplySchemaRequest cluster_id */ + cluster_id?: (string|null); + + /** ApplySchemaRequest request */ + request?: (vtctldata.IApplySchemaRequest|null); + } + + /** Represents an ApplySchemaRequest. */ + class ApplySchemaRequest implements IApplySchemaRequest { + + /** + * Constructs a new ApplySchemaRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.IApplySchemaRequest); + + /** ApplySchemaRequest cluster_id. */ + public cluster_id: string; + + /** ApplySchemaRequest request. */ + public request?: (vtctldata.IApplySchemaRequest|null); + + /** + * Creates a new ApplySchemaRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplySchemaRequest instance + */ + public static create(properties?: vtadmin.IApplySchemaRequest): vtadmin.ApplySchemaRequest; + + /** + * Encodes the specified ApplySchemaRequest message. Does not implicitly {@link vtadmin.ApplySchemaRequest.verify|verify} messages. + * @param message ApplySchemaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.IApplySchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplySchemaRequest message, length delimited. Does not implicitly {@link vtadmin.ApplySchemaRequest.verify|verify} messages. + * @param message ApplySchemaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.IApplySchemaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplySchemaRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplySchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.ApplySchemaRequest; + + /** + * Decodes an ApplySchemaRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplySchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.ApplySchemaRequest; + + /** + * Verifies an ApplySchemaRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplySchemaRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplySchemaRequest + */ + public static fromObject(object: { [k: string]: any }): vtadmin.ApplySchemaRequest; + + /** + * Creates a plain object from an ApplySchemaRequest message. Also converts values to other types if specified. + * @param message ApplySchemaRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.ApplySchemaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplySchemaRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplySchemaRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CancelSchemaMigrationRequest. */ + interface ICancelSchemaMigrationRequest { + + /** CancelSchemaMigrationRequest cluster_id */ + cluster_id?: (string|null); + + /** CancelSchemaMigrationRequest request */ + request?: (vtctldata.ICancelSchemaMigrationRequest|null); + } + + /** Represents a CancelSchemaMigrationRequest. */ + class CancelSchemaMigrationRequest implements ICancelSchemaMigrationRequest { + + /** + * Constructs a new CancelSchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.ICancelSchemaMigrationRequest); + + /** CancelSchemaMigrationRequest cluster_id. */ + public cluster_id: string; + + /** CancelSchemaMigrationRequest request. */ + public request?: (vtctldata.ICancelSchemaMigrationRequest|null); + + /** + * Creates a new CancelSchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CancelSchemaMigrationRequest instance + */ + public static create(properties?: vtadmin.ICancelSchemaMigrationRequest): vtadmin.CancelSchemaMigrationRequest; + + /** + * Encodes the specified CancelSchemaMigrationRequest message. Does not implicitly {@link vtadmin.CancelSchemaMigrationRequest.verify|verify} messages. + * @param message CancelSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.ICancelSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CancelSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.CancelSchemaMigrationRequest.verify|verify} messages. + * @param message CancelSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.ICancelSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CancelSchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CancelSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.CancelSchemaMigrationRequest; + + /** + * Decodes a CancelSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CancelSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.CancelSchemaMigrationRequest; + + /** + * Verifies a CancelSchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CancelSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CancelSchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtadmin.CancelSchemaMigrationRequest; + + /** + * Creates a plain object from a CancelSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message CancelSchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.CancelSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CancelSchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CancelSchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CleanupSchemaMigrationRequest. */ + interface ICleanupSchemaMigrationRequest { + + /** CleanupSchemaMigrationRequest cluster_id */ + cluster_id?: (string|null); + + /** CleanupSchemaMigrationRequest request */ + request?: (vtctldata.ICleanupSchemaMigrationRequest|null); + } + + /** Represents a CleanupSchemaMigrationRequest. */ + class CleanupSchemaMigrationRequest implements ICleanupSchemaMigrationRequest { + + /** + * Constructs a new CleanupSchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.ICleanupSchemaMigrationRequest); + + /** CleanupSchemaMigrationRequest cluster_id. */ + public cluster_id: string; + + /** CleanupSchemaMigrationRequest request. */ + public request?: (vtctldata.ICleanupSchemaMigrationRequest|null); + + /** + * Creates a new CleanupSchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CleanupSchemaMigrationRequest instance + */ + public static create(properties?: vtadmin.ICleanupSchemaMigrationRequest): vtadmin.CleanupSchemaMigrationRequest; + + /** + * Encodes the specified CleanupSchemaMigrationRequest message. Does not implicitly {@link vtadmin.CleanupSchemaMigrationRequest.verify|verify} messages. + * @param message CleanupSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.ICleanupSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CleanupSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.CleanupSchemaMigrationRequest.verify|verify} messages. + * @param message CleanupSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.ICleanupSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CleanupSchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CleanupSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.CleanupSchemaMigrationRequest; + + /** + * Decodes a CleanupSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CleanupSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.CleanupSchemaMigrationRequest; + + /** + * Verifies a CleanupSchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CleanupSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CleanupSchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtadmin.CleanupSchemaMigrationRequest; + + /** + * Creates a plain object from a CleanupSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message CleanupSchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.CleanupSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CleanupSchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CleanupSchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a CompleteSchemaMigrationRequest. */ + interface ICompleteSchemaMigrationRequest { + + /** CompleteSchemaMigrationRequest cluster_id */ + cluster_id?: (string|null); + + /** CompleteSchemaMigrationRequest request */ + request?: (vtctldata.ICompleteSchemaMigrationRequest|null); + } + + /** Represents a CompleteSchemaMigrationRequest. */ + class CompleteSchemaMigrationRequest implements ICompleteSchemaMigrationRequest { + + /** + * Constructs a new CompleteSchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.ICompleteSchemaMigrationRequest); + + /** CompleteSchemaMigrationRequest cluster_id. */ + public cluster_id: string; + + /** CompleteSchemaMigrationRequest request. */ + public request?: (vtctldata.ICompleteSchemaMigrationRequest|null); + + /** + * Creates a new CompleteSchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns CompleteSchemaMigrationRequest instance + */ + public static create(properties?: vtadmin.ICompleteSchemaMigrationRequest): vtadmin.CompleteSchemaMigrationRequest; + + /** + * Encodes the specified CompleteSchemaMigrationRequest message. Does not implicitly {@link vtadmin.CompleteSchemaMigrationRequest.verify|verify} messages. + * @param message CompleteSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.ICompleteSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified CompleteSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.CompleteSchemaMigrationRequest.verify|verify} messages. + * @param message CompleteSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.ICompleteSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a CompleteSchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns CompleteSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.CompleteSchemaMigrationRequest; + + /** + * Decodes a CompleteSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns CompleteSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.CompleteSchemaMigrationRequest; + + /** + * Verifies a CompleteSchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a CompleteSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns CompleteSchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtadmin.CompleteSchemaMigrationRequest; + + /** + * Creates a plain object from a CompleteSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message CompleteSchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.CompleteSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this CompleteSchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for CompleteSchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a CreateKeyspaceRequest. */ interface ICreateKeyspaceRequest { @@ -5727,6 +6389,306 @@ export namespace vtadmin { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a GetSchemaMigrationsRequest. */ + interface IGetSchemaMigrationsRequest { + + /** GetSchemaMigrationsRequest cluster_requests */ + cluster_requests?: (vtadmin.GetSchemaMigrationsRequest.IClusterRequest[]|null); + } + + /** Represents a GetSchemaMigrationsRequest. */ + class GetSchemaMigrationsRequest implements IGetSchemaMigrationsRequest { + + /** + * Constructs a new GetSchemaMigrationsRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.IGetSchemaMigrationsRequest); + + /** GetSchemaMigrationsRequest cluster_requests. */ + public cluster_requests: vtadmin.GetSchemaMigrationsRequest.IClusterRequest[]; + + /** + * Creates a new GetSchemaMigrationsRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns GetSchemaMigrationsRequest instance + */ + public static create(properties?: vtadmin.IGetSchemaMigrationsRequest): vtadmin.GetSchemaMigrationsRequest; + + /** + * Encodes the specified GetSchemaMigrationsRequest message. Does not implicitly {@link vtadmin.GetSchemaMigrationsRequest.verify|verify} messages. + * @param message GetSchemaMigrationsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.IGetSchemaMigrationsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GetSchemaMigrationsRequest message, length delimited. Does not implicitly {@link vtadmin.GetSchemaMigrationsRequest.verify|verify} messages. + * @param message GetSchemaMigrationsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.IGetSchemaMigrationsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GetSchemaMigrationsRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GetSchemaMigrationsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.GetSchemaMigrationsRequest; + + /** + * Decodes a GetSchemaMigrationsRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GetSchemaMigrationsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.GetSchemaMigrationsRequest; + + /** + * Verifies a GetSchemaMigrationsRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a GetSchemaMigrationsRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GetSchemaMigrationsRequest + */ + public static fromObject(object: { [k: string]: any }): vtadmin.GetSchemaMigrationsRequest; + + /** + * Creates a plain object from a GetSchemaMigrationsRequest message. Also converts values to other types if specified. + * @param message GetSchemaMigrationsRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.GetSchemaMigrationsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this GetSchemaMigrationsRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for GetSchemaMigrationsRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + namespace GetSchemaMigrationsRequest { + + /** Properties of a ClusterRequest. */ + interface IClusterRequest { + + /** ClusterRequest cluster_id */ + cluster_id?: (string|null); + + /** ClusterRequest request */ + request?: (vtctldata.IGetSchemaMigrationsRequest|null); + } + + /** Represents a ClusterRequest. */ + class ClusterRequest implements IClusterRequest { + + /** + * Constructs a new ClusterRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.GetSchemaMigrationsRequest.IClusterRequest); + + /** ClusterRequest cluster_id. */ + public cluster_id: string; + + /** ClusterRequest request. */ + public request?: (vtctldata.IGetSchemaMigrationsRequest|null); + + /** + * Creates a new ClusterRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ClusterRequest instance + */ + public static create(properties?: vtadmin.GetSchemaMigrationsRequest.IClusterRequest): vtadmin.GetSchemaMigrationsRequest.ClusterRequest; + + /** + * Encodes the specified ClusterRequest message. Does not implicitly {@link vtadmin.GetSchemaMigrationsRequest.ClusterRequest.verify|verify} messages. + * @param message ClusterRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.GetSchemaMigrationsRequest.IClusterRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ClusterRequest message, length delimited. Does not implicitly {@link vtadmin.GetSchemaMigrationsRequest.ClusterRequest.verify|verify} messages. + * @param message ClusterRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.GetSchemaMigrationsRequest.IClusterRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ClusterRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ClusterRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.GetSchemaMigrationsRequest.ClusterRequest; + + /** + * Decodes a ClusterRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ClusterRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.GetSchemaMigrationsRequest.ClusterRequest; + + /** + * Verifies a ClusterRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ClusterRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ClusterRequest + */ + public static fromObject(object: { [k: string]: any }): vtadmin.GetSchemaMigrationsRequest.ClusterRequest; + + /** + * Creates a plain object from a ClusterRequest message. Also converts values to other types if specified. + * @param message ClusterRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.GetSchemaMigrationsRequest.ClusterRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ClusterRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ClusterRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + } + + /** Properties of a GetSchemaMigrationsResponse. */ + interface IGetSchemaMigrationsResponse { + + /** GetSchemaMigrationsResponse schema_migrations */ + schema_migrations?: (vtadmin.ISchemaMigration[]|null); + } + + /** Represents a GetSchemaMigrationsResponse. */ + class GetSchemaMigrationsResponse implements IGetSchemaMigrationsResponse { + + /** + * Constructs a new GetSchemaMigrationsResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.IGetSchemaMigrationsResponse); + + /** GetSchemaMigrationsResponse schema_migrations. */ + public schema_migrations: vtadmin.ISchemaMigration[]; + + /** + * Creates a new GetSchemaMigrationsResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns GetSchemaMigrationsResponse instance + */ + public static create(properties?: vtadmin.IGetSchemaMigrationsResponse): vtadmin.GetSchemaMigrationsResponse; + + /** + * Encodes the specified GetSchemaMigrationsResponse message. Does not implicitly {@link vtadmin.GetSchemaMigrationsResponse.verify|verify} messages. + * @param message GetSchemaMigrationsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.IGetSchemaMigrationsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GetSchemaMigrationsResponse message, length delimited. Does not implicitly {@link vtadmin.GetSchemaMigrationsResponse.verify|verify} messages. + * @param message GetSchemaMigrationsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.IGetSchemaMigrationsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GetSchemaMigrationsResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GetSchemaMigrationsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.GetSchemaMigrationsResponse; + + /** + * Decodes a GetSchemaMigrationsResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GetSchemaMigrationsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.GetSchemaMigrationsResponse; + + /** + * Verifies a GetSchemaMigrationsResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a GetSchemaMigrationsResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GetSchemaMigrationsResponse + */ + public static fromObject(object: { [k: string]: any }): vtadmin.GetSchemaMigrationsResponse; + + /** + * Creates a plain object from a GetSchemaMigrationsResponse message. Also converts values to other types if specified. + * @param message GetSchemaMigrationsResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.GetSchemaMigrationsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this GetSchemaMigrationsResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for GetSchemaMigrationsResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a GetShardReplicationPositionsRequest. */ interface IGetShardReplicationPositionsRequest { @@ -7866,6 +8828,109 @@ export namespace vtadmin { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a LaunchSchemaMigrationRequest. */ + interface ILaunchSchemaMigrationRequest { + + /** LaunchSchemaMigrationRequest cluster_id */ + cluster_id?: (string|null); + + /** LaunchSchemaMigrationRequest request */ + request?: (vtctldata.ILaunchSchemaMigrationRequest|null); + } + + /** Represents a LaunchSchemaMigrationRequest. */ + class LaunchSchemaMigrationRequest implements ILaunchSchemaMigrationRequest { + + /** + * Constructs a new LaunchSchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.ILaunchSchemaMigrationRequest); + + /** LaunchSchemaMigrationRequest cluster_id. */ + public cluster_id: string; + + /** LaunchSchemaMigrationRequest request. */ + public request?: (vtctldata.ILaunchSchemaMigrationRequest|null); + + /** + * Creates a new LaunchSchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns LaunchSchemaMigrationRequest instance + */ + public static create(properties?: vtadmin.ILaunchSchemaMigrationRequest): vtadmin.LaunchSchemaMigrationRequest; + + /** + * Encodes the specified LaunchSchemaMigrationRequest message. Does not implicitly {@link vtadmin.LaunchSchemaMigrationRequest.verify|verify} messages. + * @param message LaunchSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.ILaunchSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified LaunchSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.LaunchSchemaMigrationRequest.verify|verify} messages. + * @param message LaunchSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.ILaunchSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a LaunchSchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns LaunchSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.LaunchSchemaMigrationRequest; + + /** + * Decodes a LaunchSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns LaunchSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.LaunchSchemaMigrationRequest; + + /** + * Verifies a LaunchSchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a LaunchSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns LaunchSchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtadmin.LaunchSchemaMigrationRequest; + + /** + * Creates a plain object from a LaunchSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message LaunchSchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.LaunchSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this LaunchSchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for LaunchSchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a PingTabletRequest. */ interface IPingTabletRequest { @@ -9928,6 +10993,109 @@ export namespace vtadmin { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a RetrySchemaMigrationRequest. */ + interface IRetrySchemaMigrationRequest { + + /** RetrySchemaMigrationRequest cluster_id */ + cluster_id?: (string|null); + + /** RetrySchemaMigrationRequest request */ + request?: (vtctldata.IRetrySchemaMigrationRequest|null); + } + + /** Represents a RetrySchemaMigrationRequest. */ + class RetrySchemaMigrationRequest implements IRetrySchemaMigrationRequest { + + /** + * Constructs a new RetrySchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtadmin.IRetrySchemaMigrationRequest); + + /** RetrySchemaMigrationRequest cluster_id. */ + public cluster_id: string; + + /** RetrySchemaMigrationRequest request. */ + public request?: (vtctldata.IRetrySchemaMigrationRequest|null); + + /** + * Creates a new RetrySchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns RetrySchemaMigrationRequest instance + */ + public static create(properties?: vtadmin.IRetrySchemaMigrationRequest): vtadmin.RetrySchemaMigrationRequest; + + /** + * Encodes the specified RetrySchemaMigrationRequest message. Does not implicitly {@link vtadmin.RetrySchemaMigrationRequest.verify|verify} messages. + * @param message RetrySchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtadmin.IRetrySchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified RetrySchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.RetrySchemaMigrationRequest.verify|verify} messages. + * @param message RetrySchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtadmin.IRetrySchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a RetrySchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns RetrySchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtadmin.RetrySchemaMigrationRequest; + + /** + * Decodes a RetrySchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns RetrySchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtadmin.RetrySchemaMigrationRequest; + + /** + * Verifies a RetrySchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a RetrySchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns RetrySchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtadmin.RetrySchemaMigrationRequest; + + /** + * Creates a plain object from a RetrySchemaMigrationRequest message. Also converts values to other types if specified. + * @param message RetrySchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtadmin.RetrySchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this RetrySchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for RetrySchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a RunHealthCheckRequest. */ interface IRunHealthCheckRequest { @@ -12649,6 +13817,9 @@ export namespace mysqlctl { /** ShutdownRequest wait_for_mysqld */ wait_for_mysqld?: (boolean|null); + + /** ShutdownRequest mysql_shutdown_timeout */ + mysql_shutdown_timeout?: (vttime.IDuration|null); } /** Represents a ShutdownRequest. */ @@ -12663,6 +13834,9 @@ export namespace mysqlctl { /** ShutdownRequest wait_for_mysqld. */ public wait_for_mysqld: boolean; + /** ShutdownRequest mysql_shutdown_timeout. */ + public mysql_shutdown_timeout?: (vttime.IDuration|null); + /** * Creates a new ShutdownRequest instance using the specified properties. * @param [properties] Properties to set @@ -15090,9 +16264,6 @@ export namespace topodata { /** Properties of a Keyspace. */ interface IKeyspace { - /** Keyspace served_froms */ - served_froms?: (topodata.Keyspace.IServedFrom[]|null); - /** Keyspace keyspace_type */ keyspace_type?: (topodata.KeyspaceType|null); @@ -15121,9 +16292,6 @@ export namespace topodata { */ constructor(properties?: topodata.IKeyspace); - /** Keyspace served_froms. */ - public served_froms: topodata.Keyspace.IServedFrom[]; - /** Keyspace keyspace_type. */ public keyspace_type: topodata.KeyspaceType; @@ -15220,118 +16388,6 @@ export namespace topodata { public static getTypeUrl(typeUrlPrefix?: string): string; } - namespace Keyspace { - - /** Properties of a ServedFrom. */ - interface IServedFrom { - - /** ServedFrom tablet_type */ - tablet_type?: (topodata.TabletType|null); - - /** ServedFrom cells */ - cells?: (string[]|null); - - /** ServedFrom keyspace */ - keyspace?: (string|null); - } - - /** Represents a ServedFrom. */ - class ServedFrom implements IServedFrom { - - /** - * Constructs a new ServedFrom. - * @param [properties] Properties to set - */ - constructor(properties?: topodata.Keyspace.IServedFrom); - - /** ServedFrom tablet_type. */ - public tablet_type: topodata.TabletType; - - /** ServedFrom cells. */ - public cells: string[]; - - /** ServedFrom keyspace. */ - public keyspace: string; - - /** - * Creates a new ServedFrom instance using the specified properties. - * @param [properties] Properties to set - * @returns ServedFrom instance - */ - public static create(properties?: topodata.Keyspace.IServedFrom): topodata.Keyspace.ServedFrom; - - /** - * Encodes the specified ServedFrom message. Does not implicitly {@link topodata.Keyspace.ServedFrom.verify|verify} messages. - * @param message ServedFrom message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: topodata.Keyspace.IServedFrom, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Encodes the specified ServedFrom message, length delimited. Does not implicitly {@link topodata.Keyspace.ServedFrom.verify|verify} messages. - * @param message ServedFrom message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: topodata.Keyspace.IServedFrom, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a ServedFrom message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns ServedFrom - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): topodata.Keyspace.ServedFrom; - - /** - * Decodes a ServedFrom message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns ServedFrom - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): topodata.Keyspace.ServedFrom; - - /** - * Verifies a ServedFrom message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); - - /** - * Creates a ServedFrom message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns ServedFrom - */ - public static fromObject(object: { [k: string]: any }): topodata.Keyspace.ServedFrom; - - /** - * Creates a plain object from a ServedFrom message. Also converts values to other types if specified. - * @param message ServedFrom - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: topodata.Keyspace.ServedFrom, options?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this ServedFrom to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; - - /** - * Gets the default type url for ServedFrom - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } - } - /** Properties of a ShardReplication. */ interface IShardReplication { @@ -16096,9 +17152,6 @@ export namespace topodata { /** SrvKeyspace partitions */ partitions?: (topodata.SrvKeyspace.IKeyspacePartition[]|null); - /** SrvKeyspace served_from */ - served_from?: (topodata.SrvKeyspace.IServedFrom[]|null); - /** SrvKeyspace throttler_config */ throttler_config?: (topodata.IThrottlerConfig|null); } @@ -16115,9 +17168,6 @@ export namespace topodata { /** SrvKeyspace partitions. */ public partitions: topodata.SrvKeyspace.IKeyspacePartition[]; - /** SrvKeyspace served_from. */ - public served_from: topodata.SrvKeyspace.IServedFrom[]; - /** SrvKeyspace throttler_config. */ public throttler_config?: (topodata.IThrottlerConfig|null); @@ -16309,109 +17359,6 @@ export namespace topodata { */ public static getTypeUrl(typeUrlPrefix?: string): string; } - - /** Properties of a ServedFrom. */ - interface IServedFrom { - - /** ServedFrom tablet_type */ - tablet_type?: (topodata.TabletType|null); - - /** ServedFrom keyspace */ - keyspace?: (string|null); - } - - /** Represents a ServedFrom. */ - class ServedFrom implements IServedFrom { - - /** - * Constructs a new ServedFrom. - * @param [properties] Properties to set - */ - constructor(properties?: topodata.SrvKeyspace.IServedFrom); - - /** ServedFrom tablet_type. */ - public tablet_type: topodata.TabletType; - - /** ServedFrom keyspace. */ - public keyspace: string; - - /** - * Creates a new ServedFrom instance using the specified properties. - * @param [properties] Properties to set - * @returns ServedFrom instance - */ - public static create(properties?: topodata.SrvKeyspace.IServedFrom): topodata.SrvKeyspace.ServedFrom; - - /** - * Encodes the specified ServedFrom message. Does not implicitly {@link topodata.SrvKeyspace.ServedFrom.verify|verify} messages. - * @param message ServedFrom message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: topodata.SrvKeyspace.IServedFrom, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Encodes the specified ServedFrom message, length delimited. Does not implicitly {@link topodata.SrvKeyspace.ServedFrom.verify|verify} messages. - * @param message ServedFrom message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: topodata.SrvKeyspace.IServedFrom, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a ServedFrom message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns ServedFrom - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): topodata.SrvKeyspace.ServedFrom; - - /** - * Decodes a ServedFrom message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns ServedFrom - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): topodata.SrvKeyspace.ServedFrom; - - /** - * Verifies a ServedFrom message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); - - /** - * Creates a ServedFrom message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns ServedFrom - */ - public static fromObject(object: { [k: string]: any }): topodata.SrvKeyspace.ServedFrom; - - /** - * Creates a plain object from a ServedFrom message. Also converts values to other types if specified. - * @param message ServedFrom - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: topodata.SrvKeyspace.ServedFrom, options?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this ServedFrom to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; - - /** - * Gets the default type url for ServedFrom - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } } /** Properties of a CellInfo. */ @@ -19933,6 +20880,9 @@ export namespace tabletmanagerdata { /** ApplySchemaRequest batch_size */ batch_size?: (number|Long|null); + + /** ApplySchemaRequest disable_foreign_key_checks */ + disable_foreign_key_checks?: (boolean|null); } /** Represents an ApplySchemaRequest. */ @@ -19965,6 +20915,9 @@ export namespace tabletmanagerdata { /** ApplySchemaRequest batch_size. */ public batch_size: (number|Long); + /** ApplySchemaRequest disable_foreign_key_checks. */ + public disable_foreign_key_checks: boolean; + /** * Creates a new ApplySchemaRequest instance using the specified properties. * @param [properties] Properties to set @@ -20739,6 +21692,9 @@ export namespace tabletmanagerdata { /** ExecuteFetchAsDbaRequest reload_schema */ reload_schema?: (boolean|null); + + /** ExecuteFetchAsDbaRequest disable_foreign_key_checks */ + disable_foreign_key_checks?: (boolean|null); } /** Represents an ExecuteFetchAsDbaRequest. */ @@ -20765,6 +21721,9 @@ export namespace tabletmanagerdata { /** ExecuteFetchAsDbaRequest reload_schema. */ public reload_schema: boolean; + /** ExecuteFetchAsDbaRequest disable_foreign_key_checks. */ + public disable_foreign_key_checks: boolean; + /** * Creates a new ExecuteFetchAsDbaRequest instance using the specified properties. * @param [properties] Properties to set @@ -20940,6 +21899,230 @@ export namespace tabletmanagerdata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of an ExecuteMultiFetchAsDbaRequest. */ + interface IExecuteMultiFetchAsDbaRequest { + + /** ExecuteMultiFetchAsDbaRequest sql */ + sql?: (Uint8Array|null); + + /** ExecuteMultiFetchAsDbaRequest db_name */ + db_name?: (string|null); + + /** ExecuteMultiFetchAsDbaRequest max_rows */ + max_rows?: (number|Long|null); + + /** ExecuteMultiFetchAsDbaRequest disable_binlogs */ + disable_binlogs?: (boolean|null); + + /** ExecuteMultiFetchAsDbaRequest reload_schema */ + reload_schema?: (boolean|null); + + /** ExecuteMultiFetchAsDbaRequest disable_foreign_key_checks */ + disable_foreign_key_checks?: (boolean|null); + } + + /** Represents an ExecuteMultiFetchAsDbaRequest. */ + class ExecuteMultiFetchAsDbaRequest implements IExecuteMultiFetchAsDbaRequest { + + /** + * Constructs a new ExecuteMultiFetchAsDbaRequest. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IExecuteMultiFetchAsDbaRequest); + + /** ExecuteMultiFetchAsDbaRequest sql. */ + public sql: Uint8Array; + + /** ExecuteMultiFetchAsDbaRequest db_name. */ + public db_name: string; + + /** ExecuteMultiFetchAsDbaRequest max_rows. */ + public max_rows: (number|Long); + + /** ExecuteMultiFetchAsDbaRequest disable_binlogs. */ + public disable_binlogs: boolean; + + /** ExecuteMultiFetchAsDbaRequest reload_schema. */ + public reload_schema: boolean; + + /** ExecuteMultiFetchAsDbaRequest disable_foreign_key_checks. */ + public disable_foreign_key_checks: boolean; + + /** + * Creates a new ExecuteMultiFetchAsDbaRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteMultiFetchAsDbaRequest instance + */ + public static create(properties?: tabletmanagerdata.IExecuteMultiFetchAsDbaRequest): tabletmanagerdata.ExecuteMultiFetchAsDbaRequest; + + /** + * Encodes the specified ExecuteMultiFetchAsDbaRequest message. Does not implicitly {@link tabletmanagerdata.ExecuteMultiFetchAsDbaRequest.verify|verify} messages. + * @param message ExecuteMultiFetchAsDbaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IExecuteMultiFetchAsDbaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteMultiFetchAsDbaRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.ExecuteMultiFetchAsDbaRequest.verify|verify} messages. + * @param message ExecuteMultiFetchAsDbaRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IExecuteMultiFetchAsDbaRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteMultiFetchAsDbaRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteMultiFetchAsDbaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ExecuteMultiFetchAsDbaRequest; + + /** + * Decodes an ExecuteMultiFetchAsDbaRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteMultiFetchAsDbaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ExecuteMultiFetchAsDbaRequest; + + /** + * Verifies an ExecuteMultiFetchAsDbaRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteMultiFetchAsDbaRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteMultiFetchAsDbaRequest + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ExecuteMultiFetchAsDbaRequest; + + /** + * Creates a plain object from an ExecuteMultiFetchAsDbaRequest message. Also converts values to other types if specified. + * @param message ExecuteMultiFetchAsDbaRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.ExecuteMultiFetchAsDbaRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteMultiFetchAsDbaRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteMultiFetchAsDbaRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ExecuteMultiFetchAsDbaResponse. */ + interface IExecuteMultiFetchAsDbaResponse { + + /** ExecuteMultiFetchAsDbaResponse results */ + results?: (query.IQueryResult[]|null); + } + + /** Represents an ExecuteMultiFetchAsDbaResponse. */ + class ExecuteMultiFetchAsDbaResponse implements IExecuteMultiFetchAsDbaResponse { + + /** + * Constructs a new ExecuteMultiFetchAsDbaResponse. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IExecuteMultiFetchAsDbaResponse); + + /** ExecuteMultiFetchAsDbaResponse results. */ + public results: query.IQueryResult[]; + + /** + * Creates a new ExecuteMultiFetchAsDbaResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteMultiFetchAsDbaResponse instance + */ + public static create(properties?: tabletmanagerdata.IExecuteMultiFetchAsDbaResponse): tabletmanagerdata.ExecuteMultiFetchAsDbaResponse; + + /** + * Encodes the specified ExecuteMultiFetchAsDbaResponse message. Does not implicitly {@link tabletmanagerdata.ExecuteMultiFetchAsDbaResponse.verify|verify} messages. + * @param message ExecuteMultiFetchAsDbaResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IExecuteMultiFetchAsDbaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteMultiFetchAsDbaResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.ExecuteMultiFetchAsDbaResponse.verify|verify} messages. + * @param message ExecuteMultiFetchAsDbaResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IExecuteMultiFetchAsDbaResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteMultiFetchAsDbaResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteMultiFetchAsDbaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ExecuteMultiFetchAsDbaResponse; + + /** + * Decodes an ExecuteMultiFetchAsDbaResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteMultiFetchAsDbaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ExecuteMultiFetchAsDbaResponse; + + /** + * Verifies an ExecuteMultiFetchAsDbaResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteMultiFetchAsDbaResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteMultiFetchAsDbaResponse + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ExecuteMultiFetchAsDbaResponse; + + /** + * Creates a plain object from an ExecuteMultiFetchAsDbaResponse message. Also converts values to other types if specified. + * @param message ExecuteMultiFetchAsDbaResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.ExecuteMultiFetchAsDbaResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteMultiFetchAsDbaResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteMultiFetchAsDbaResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of an ExecuteFetchAsAllPrivsRequest. */ interface IExecuteFetchAsAllPrivsRequest { @@ -25177,6 +26360,9 @@ export namespace tabletmanagerdata { /** SetReplicationSourceRequest semiSync */ semiSync?: (boolean|null); + + /** SetReplicationSourceRequest heartbeat_interval */ + heartbeat_interval?: (number|null); } /** Represents a SetReplicationSourceRequest. */ @@ -25203,6 +26389,9 @@ export namespace tabletmanagerdata { /** SetReplicationSourceRequest semiSync. */ public semiSync: boolean; + /** SetReplicationSourceRequest heartbeat_interval. */ + public heartbeat_interval: number; + /** * Creates a new SetReplicationSourceRequest instance using the specified properties. * @param [properties] Properties to set @@ -25952,7 +27141,7 @@ export namespace tabletmanagerdata { interface IBackupRequest { /** BackupRequest concurrency */ - concurrency?: (number|Long|null); + concurrency?: (number|null); /** BackupRequest allow_primary */ allow_primary?: (boolean|null); @@ -25974,7 +27163,7 @@ export namespace tabletmanagerdata { constructor(properties?: tabletmanagerdata.IBackupRequest); /** BackupRequest concurrency. */ - public concurrency: (number|Long); + public concurrency: number; /** BackupRequest allow_primary. */ public allow_primary: boolean; @@ -26404,6 +27593,9 @@ export namespace tabletmanagerdata { /** CreateVReplicationWorkflowRequest stop_after_copy */ stop_after_copy?: (boolean|null); + + /** CreateVReplicationWorkflowRequest options */ + options?: (string|null); } /** Represents a CreateVReplicationWorkflowRequest. */ @@ -26445,6 +27637,9 @@ export namespace tabletmanagerdata { /** CreateVReplicationWorkflowRequest stop_after_copy. */ public stop_after_copy: boolean; + /** CreateVReplicationWorkflowRequest options. */ + public options: string; + /** * Creates a new CreateVReplicationWorkflowRequest instance using the specified properties. * @param [properties] Properties to set @@ -26814,6 +28009,418 @@ export namespace tabletmanagerdata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a HasVReplicationWorkflowsRequest. */ + interface IHasVReplicationWorkflowsRequest { + } + + /** Represents a HasVReplicationWorkflowsRequest. */ + class HasVReplicationWorkflowsRequest implements IHasVReplicationWorkflowsRequest { + + /** + * Constructs a new HasVReplicationWorkflowsRequest. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IHasVReplicationWorkflowsRequest); + + /** + * Creates a new HasVReplicationWorkflowsRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns HasVReplicationWorkflowsRequest instance + */ + public static create(properties?: tabletmanagerdata.IHasVReplicationWorkflowsRequest): tabletmanagerdata.HasVReplicationWorkflowsRequest; + + /** + * Encodes the specified HasVReplicationWorkflowsRequest message. Does not implicitly {@link tabletmanagerdata.HasVReplicationWorkflowsRequest.verify|verify} messages. + * @param message HasVReplicationWorkflowsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IHasVReplicationWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified HasVReplicationWorkflowsRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.HasVReplicationWorkflowsRequest.verify|verify} messages. + * @param message HasVReplicationWorkflowsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IHasVReplicationWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a HasVReplicationWorkflowsRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns HasVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.HasVReplicationWorkflowsRequest; + + /** + * Decodes a HasVReplicationWorkflowsRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns HasVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.HasVReplicationWorkflowsRequest; + + /** + * Verifies a HasVReplicationWorkflowsRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a HasVReplicationWorkflowsRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns HasVReplicationWorkflowsRequest + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.HasVReplicationWorkflowsRequest; + + /** + * Creates a plain object from a HasVReplicationWorkflowsRequest message. Also converts values to other types if specified. + * @param message HasVReplicationWorkflowsRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.HasVReplicationWorkflowsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this HasVReplicationWorkflowsRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for HasVReplicationWorkflowsRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a HasVReplicationWorkflowsResponse. */ + interface IHasVReplicationWorkflowsResponse { + + /** HasVReplicationWorkflowsResponse has */ + has?: (boolean|null); + } + + /** Represents a HasVReplicationWorkflowsResponse. */ + class HasVReplicationWorkflowsResponse implements IHasVReplicationWorkflowsResponse { + + /** + * Constructs a new HasVReplicationWorkflowsResponse. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IHasVReplicationWorkflowsResponse); + + /** HasVReplicationWorkflowsResponse has. */ + public has: boolean; + + /** + * Creates a new HasVReplicationWorkflowsResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns HasVReplicationWorkflowsResponse instance + */ + public static create(properties?: tabletmanagerdata.IHasVReplicationWorkflowsResponse): tabletmanagerdata.HasVReplicationWorkflowsResponse; + + /** + * Encodes the specified HasVReplicationWorkflowsResponse message. Does not implicitly {@link tabletmanagerdata.HasVReplicationWorkflowsResponse.verify|verify} messages. + * @param message HasVReplicationWorkflowsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IHasVReplicationWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified HasVReplicationWorkflowsResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.HasVReplicationWorkflowsResponse.verify|verify} messages. + * @param message HasVReplicationWorkflowsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IHasVReplicationWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a HasVReplicationWorkflowsResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns HasVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.HasVReplicationWorkflowsResponse; + + /** + * Decodes a HasVReplicationWorkflowsResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns HasVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.HasVReplicationWorkflowsResponse; + + /** + * Verifies a HasVReplicationWorkflowsResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a HasVReplicationWorkflowsResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns HasVReplicationWorkflowsResponse + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.HasVReplicationWorkflowsResponse; + + /** + * Creates a plain object from a HasVReplicationWorkflowsResponse message. Also converts values to other types if specified. + * @param message HasVReplicationWorkflowsResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.HasVReplicationWorkflowsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this HasVReplicationWorkflowsResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for HasVReplicationWorkflowsResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ReadVReplicationWorkflowsRequest. */ + interface IReadVReplicationWorkflowsRequest { + + /** ReadVReplicationWorkflowsRequest include_ids */ + include_ids?: (number[]|null); + + /** ReadVReplicationWorkflowsRequest include_workflows */ + include_workflows?: (string[]|null); + + /** ReadVReplicationWorkflowsRequest include_states */ + include_states?: (binlogdata.VReplicationWorkflowState[]|null); + + /** ReadVReplicationWorkflowsRequest exclude_workflows */ + exclude_workflows?: (string[]|null); + + /** ReadVReplicationWorkflowsRequest exclude_states */ + exclude_states?: (binlogdata.VReplicationWorkflowState[]|null); + + /** ReadVReplicationWorkflowsRequest exclude_frozen */ + exclude_frozen?: (boolean|null); + } + + /** Represents a ReadVReplicationWorkflowsRequest. */ + class ReadVReplicationWorkflowsRequest implements IReadVReplicationWorkflowsRequest { + + /** + * Constructs a new ReadVReplicationWorkflowsRequest. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IReadVReplicationWorkflowsRequest); + + /** ReadVReplicationWorkflowsRequest include_ids. */ + public include_ids: number[]; + + /** ReadVReplicationWorkflowsRequest include_workflows. */ + public include_workflows: string[]; + + /** ReadVReplicationWorkflowsRequest include_states. */ + public include_states: binlogdata.VReplicationWorkflowState[]; + + /** ReadVReplicationWorkflowsRequest exclude_workflows. */ + public exclude_workflows: string[]; + + /** ReadVReplicationWorkflowsRequest exclude_states. */ + public exclude_states: binlogdata.VReplicationWorkflowState[]; + + /** ReadVReplicationWorkflowsRequest exclude_frozen. */ + public exclude_frozen: boolean; + + /** + * Creates a new ReadVReplicationWorkflowsRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ReadVReplicationWorkflowsRequest instance + */ + public static create(properties?: tabletmanagerdata.IReadVReplicationWorkflowsRequest): tabletmanagerdata.ReadVReplicationWorkflowsRequest; + + /** + * Encodes the specified ReadVReplicationWorkflowsRequest message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowsRequest.verify|verify} messages. + * @param message ReadVReplicationWorkflowsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IReadVReplicationWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ReadVReplicationWorkflowsRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowsRequest.verify|verify} messages. + * @param message ReadVReplicationWorkflowsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IReadVReplicationWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ReadVReplicationWorkflowsRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ReadVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ReadVReplicationWorkflowsRequest; + + /** + * Decodes a ReadVReplicationWorkflowsRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ReadVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ReadVReplicationWorkflowsRequest; + + /** + * Verifies a ReadVReplicationWorkflowsRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ReadVReplicationWorkflowsRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ReadVReplicationWorkflowsRequest + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ReadVReplicationWorkflowsRequest; + + /** + * Creates a plain object from a ReadVReplicationWorkflowsRequest message. Also converts values to other types if specified. + * @param message ReadVReplicationWorkflowsRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.ReadVReplicationWorkflowsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ReadVReplicationWorkflowsRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ReadVReplicationWorkflowsRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ReadVReplicationWorkflowsResponse. */ + interface IReadVReplicationWorkflowsResponse { + + /** ReadVReplicationWorkflowsResponse workflows */ + workflows?: (tabletmanagerdata.IReadVReplicationWorkflowResponse[]|null); + } + + /** Represents a ReadVReplicationWorkflowsResponse. */ + class ReadVReplicationWorkflowsResponse implements IReadVReplicationWorkflowsResponse { + + /** + * Constructs a new ReadVReplicationWorkflowsResponse. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IReadVReplicationWorkflowsResponse); + + /** ReadVReplicationWorkflowsResponse workflows. */ + public workflows: tabletmanagerdata.IReadVReplicationWorkflowResponse[]; + + /** + * Creates a new ReadVReplicationWorkflowsResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ReadVReplicationWorkflowsResponse instance + */ + public static create(properties?: tabletmanagerdata.IReadVReplicationWorkflowsResponse): tabletmanagerdata.ReadVReplicationWorkflowsResponse; + + /** + * Encodes the specified ReadVReplicationWorkflowsResponse message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowsResponse.verify|verify} messages. + * @param message ReadVReplicationWorkflowsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IReadVReplicationWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ReadVReplicationWorkflowsResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowsResponse.verify|verify} messages. + * @param message ReadVReplicationWorkflowsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IReadVReplicationWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ReadVReplicationWorkflowsResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ReadVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.ReadVReplicationWorkflowsResponse; + + /** + * Decodes a ReadVReplicationWorkflowsResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ReadVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.ReadVReplicationWorkflowsResponse; + + /** + * Verifies a ReadVReplicationWorkflowsResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ReadVReplicationWorkflowsResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ReadVReplicationWorkflowsResponse + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.ReadVReplicationWorkflowsResponse; + + /** + * Creates a plain object from a ReadVReplicationWorkflowsResponse message. Also converts values to other types if specified. + * @param message ReadVReplicationWorkflowsResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.ReadVReplicationWorkflowsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ReadVReplicationWorkflowsResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ReadVReplicationWorkflowsResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a ReadVReplicationWorkflowRequest. */ interface IReadVReplicationWorkflowRequest { @@ -26943,6 +28550,9 @@ export namespace tabletmanagerdata { /** ReadVReplicationWorkflowResponse streams */ streams?: (tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream[]|null); + + /** ReadVReplicationWorkflowResponse options */ + options?: (string|null); } /** Represents a ReadVReplicationWorkflowResponse. */ @@ -26984,6 +28594,9 @@ export namespace tabletmanagerdata { /** ReadVReplicationWorkflowResponse streams. */ public streams: tabletmanagerdata.ReadVReplicationWorkflowResponse.IStream[]; + /** ReadVReplicationWorkflowResponse options. */ + public options: string; + /** * Creates a new ReadVReplicationWorkflowResponse instance using the specified properties. * @param [properties] Properties to set @@ -27596,6 +29209,9 @@ export namespace tabletmanagerdata { /** VDiffReportOptions format */ format?: (string|null); + + /** VDiffReportOptions max_sample_rows */ + max_sample_rows?: (number|Long|null); } /** Represents a VDiffReportOptions. */ @@ -27616,6 +29232,9 @@ export namespace tabletmanagerdata { /** VDiffReportOptions format. */ public format: string; + /** VDiffReportOptions max_sample_rows. */ + public max_sample_rows: (number|Long); + /** * Creates a new VDiffReportOptions instance using the specified properties. * @param [properties] Properties to set @@ -27720,6 +29339,9 @@ export namespace tabletmanagerdata { /** VDiffCoreOptions update_table_stats */ update_table_stats?: (boolean|null); + + /** VDiffCoreOptions max_diff_seconds */ + max_diff_seconds?: (number|Long|null); } /** Represents a VDiffCoreOptions. */ @@ -27755,6 +29377,9 @@ export namespace tabletmanagerdata { /** VDiffCoreOptions update_table_stats. */ public update_table_stats: boolean; + /** VDiffCoreOptions max_diff_seconds. */ + public max_diff_seconds: (number|Long); + /** * Creates a new VDiffCoreOptions instance using the specified properties. * @param [properties] Properties to set @@ -28166,6 +29791,230 @@ export namespace tabletmanagerdata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of an UpdateVReplicationWorkflowsRequest. */ + interface IUpdateVReplicationWorkflowsRequest { + + /** UpdateVReplicationWorkflowsRequest all_workflows */ + all_workflows?: (boolean|null); + + /** UpdateVReplicationWorkflowsRequest include_workflows */ + include_workflows?: (string[]|null); + + /** UpdateVReplicationWorkflowsRequest exclude_workflows */ + exclude_workflows?: (string[]|null); + + /** UpdateVReplicationWorkflowsRequest state */ + state?: (binlogdata.VReplicationWorkflowState|null); + + /** UpdateVReplicationWorkflowsRequest message */ + message?: (string|null); + + /** UpdateVReplicationWorkflowsRequest stop_position */ + stop_position?: (string|null); + } + + /** Represents an UpdateVReplicationWorkflowsRequest. */ + class UpdateVReplicationWorkflowsRequest implements IUpdateVReplicationWorkflowsRequest { + + /** + * Constructs a new UpdateVReplicationWorkflowsRequest. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IUpdateVReplicationWorkflowsRequest); + + /** UpdateVReplicationWorkflowsRequest all_workflows. */ + public all_workflows: boolean; + + /** UpdateVReplicationWorkflowsRequest include_workflows. */ + public include_workflows: string[]; + + /** UpdateVReplicationWorkflowsRequest exclude_workflows. */ + public exclude_workflows: string[]; + + /** UpdateVReplicationWorkflowsRequest state. */ + public state: binlogdata.VReplicationWorkflowState; + + /** UpdateVReplicationWorkflowsRequest message. */ + public message: string; + + /** UpdateVReplicationWorkflowsRequest stop_position. */ + public stop_position: string; + + /** + * Creates a new UpdateVReplicationWorkflowsRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns UpdateVReplicationWorkflowsRequest instance + */ + public static create(properties?: tabletmanagerdata.IUpdateVReplicationWorkflowsRequest): tabletmanagerdata.UpdateVReplicationWorkflowsRequest; + + /** + * Encodes the specified UpdateVReplicationWorkflowsRequest message. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowsRequest.verify|verify} messages. + * @param message UpdateVReplicationWorkflowsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IUpdateVReplicationWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified UpdateVReplicationWorkflowsRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowsRequest.verify|verify} messages. + * @param message UpdateVReplicationWorkflowsRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IUpdateVReplicationWorkflowsRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an UpdateVReplicationWorkflowsRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns UpdateVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.UpdateVReplicationWorkflowsRequest; + + /** + * Decodes an UpdateVReplicationWorkflowsRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns UpdateVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.UpdateVReplicationWorkflowsRequest; + + /** + * Verifies an UpdateVReplicationWorkflowsRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an UpdateVReplicationWorkflowsRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns UpdateVReplicationWorkflowsRequest + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.UpdateVReplicationWorkflowsRequest; + + /** + * Creates a plain object from an UpdateVReplicationWorkflowsRequest message. Also converts values to other types if specified. + * @param message UpdateVReplicationWorkflowsRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.UpdateVReplicationWorkflowsRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this UpdateVReplicationWorkflowsRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for UpdateVReplicationWorkflowsRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an UpdateVReplicationWorkflowsResponse. */ + interface IUpdateVReplicationWorkflowsResponse { + + /** UpdateVReplicationWorkflowsResponse result */ + result?: (query.IQueryResult|null); + } + + /** Represents an UpdateVReplicationWorkflowsResponse. */ + class UpdateVReplicationWorkflowsResponse implements IUpdateVReplicationWorkflowsResponse { + + /** + * Constructs a new UpdateVReplicationWorkflowsResponse. + * @param [properties] Properties to set + */ + constructor(properties?: tabletmanagerdata.IUpdateVReplicationWorkflowsResponse); + + /** UpdateVReplicationWorkflowsResponse result. */ + public result?: (query.IQueryResult|null); + + /** + * Creates a new UpdateVReplicationWorkflowsResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns UpdateVReplicationWorkflowsResponse instance + */ + public static create(properties?: tabletmanagerdata.IUpdateVReplicationWorkflowsResponse): tabletmanagerdata.UpdateVReplicationWorkflowsResponse; + + /** + * Encodes the specified UpdateVReplicationWorkflowsResponse message. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowsResponse.verify|verify} messages. + * @param message UpdateVReplicationWorkflowsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: tabletmanagerdata.IUpdateVReplicationWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified UpdateVReplicationWorkflowsResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowsResponse.verify|verify} messages. + * @param message UpdateVReplicationWorkflowsResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: tabletmanagerdata.IUpdateVReplicationWorkflowsResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an UpdateVReplicationWorkflowsResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns UpdateVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): tabletmanagerdata.UpdateVReplicationWorkflowsResponse; + + /** + * Decodes an UpdateVReplicationWorkflowsResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns UpdateVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): tabletmanagerdata.UpdateVReplicationWorkflowsResponse; + + /** + * Verifies an UpdateVReplicationWorkflowsResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an UpdateVReplicationWorkflowsResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns UpdateVReplicationWorkflowsResponse + */ + public static fromObject(object: { [k: string]: any }): tabletmanagerdata.UpdateVReplicationWorkflowsResponse; + + /** + * Creates a plain object from an UpdateVReplicationWorkflowsResponse message. Also converts values to other types if specified. + * @param message UpdateVReplicationWorkflowsResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: tabletmanagerdata.UpdateVReplicationWorkflowsResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this UpdateVReplicationWorkflowsResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for UpdateVReplicationWorkflowsResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a ResetSequencesRequest. */ interface IResetSequencesRequest { @@ -29464,6 +31313,9 @@ export namespace binlogdata { /** Rule convert_int_to_enum */ convert_int_to_enum?: ({ [k: string]: boolean }|null); + + /** Rule force_unique_key */ + force_unique_key?: (string|null); } /** Represents a Rule. */ @@ -29499,6 +31351,9 @@ export namespace binlogdata { /** Rule convert_int_to_enum. */ public convert_int_to_enum: { [k: string]: boolean }; + /** Rule force_unique_key. */ + public force_unique_key: string; + /** * Creates a new Rule instance using the specified properties. * @param [properties] Properties to set @@ -30275,6 +32130,9 @@ export namespace binlogdata { /** FieldEvent shard */ shard?: (string|null); + + /** FieldEvent enum_set_string_values */ + enum_set_string_values?: (boolean|null); } /** Represents a FieldEvent. */ @@ -30298,6 +32156,9 @@ export namespace binlogdata { /** FieldEvent shard. */ public shard: string; + /** FieldEvent enum_set_string_values. */ + public enum_set_string_values: boolean; + /** * Creates a new FieldEvent instance using the specified properties. * @param [properties] Properties to set @@ -39450,6 +41311,9 @@ export namespace query { /** RealtimeStats view_schema_changed */ view_schema_changed?: (string[]|null); + + /** RealtimeStats udfs_changed */ + udfs_changed?: (boolean|null); } /** Represents a RealtimeStats. */ @@ -39485,6 +41349,9 @@ export namespace query { /** RealtimeStats view_schema_changed. */ public view_schema_changed: string[]; + /** RealtimeStats udfs_changed. */ + public udfs_changed: boolean; + /** * Creates a new RealtimeStats instance using the specified properties. * @param [properties] Properties to set @@ -39926,7 +41793,8 @@ export namespace query { enum SchemaTableType { VIEWS = 0, TABLES = 1, - ALL = 2 + ALL = 2, + UDFS = 3 } /** Properties of a GetSchemaRequest. */ @@ -40038,9 +41906,121 @@ export namespace query { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a UDFInfo. */ + interface IUDFInfo { + + /** UDFInfo name */ + name?: (string|null); + + /** UDFInfo aggregating */ + aggregating?: (boolean|null); + + /** UDFInfo return_type */ + return_type?: (query.Type|null); + } + + /** Represents a UDFInfo. */ + class UDFInfo implements IUDFInfo { + + /** + * Constructs a new UDFInfo. + * @param [properties] Properties to set + */ + constructor(properties?: query.IUDFInfo); + + /** UDFInfo name. */ + public name: string; + + /** UDFInfo aggregating. */ + public aggregating: boolean; + + /** UDFInfo return_type. */ + public return_type: query.Type; + + /** + * Creates a new UDFInfo instance using the specified properties. + * @param [properties] Properties to set + * @returns UDFInfo instance + */ + public static create(properties?: query.IUDFInfo): query.UDFInfo; + + /** + * Encodes the specified UDFInfo message. Does not implicitly {@link query.UDFInfo.verify|verify} messages. + * @param message UDFInfo message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: query.IUDFInfo, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified UDFInfo message, length delimited. Does not implicitly {@link query.UDFInfo.verify|verify} messages. + * @param message UDFInfo message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: query.IUDFInfo, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a UDFInfo message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns UDFInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): query.UDFInfo; + + /** + * Decodes a UDFInfo message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns UDFInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): query.UDFInfo; + + /** + * Verifies a UDFInfo message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a UDFInfo message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns UDFInfo + */ + public static fromObject(object: { [k: string]: any }): query.UDFInfo; + + /** + * Creates a plain object from a UDFInfo message. Also converts values to other types if specified. + * @param message UDFInfo + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: query.UDFInfo, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this UDFInfo to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for UDFInfo + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a GetSchemaResponse. */ interface IGetSchemaResponse { + /** GetSchemaResponse udfs */ + udfs?: (query.IUDFInfo[]|null); + /** GetSchemaResponse table_definition */ table_definition?: ({ [k: string]: string }|null); } @@ -40054,6 +42034,9 @@ export namespace query { */ constructor(properties?: query.IGetSchemaResponse); + /** GetSchemaResponse udfs. */ + public udfs: query.IUDFInfo[]; + /** GetSchemaResponse table_definition. */ public table_definition: { [k: string]: string }; @@ -40362,6 +42345,109 @@ export namespace replicationdata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a Configuration. */ + interface IConfiguration { + + /** Configuration heartbeat_interval */ + heartbeat_interval?: (number|null); + + /** Configuration replica_net_timeout */ + replica_net_timeout?: (number|null); + } + + /** Represents a Configuration. */ + class Configuration implements IConfiguration { + + /** + * Constructs a new Configuration. + * @param [properties] Properties to set + */ + constructor(properties?: replicationdata.IConfiguration); + + /** Configuration heartbeat_interval. */ + public heartbeat_interval: number; + + /** Configuration replica_net_timeout. */ + public replica_net_timeout: number; + + /** + * Creates a new Configuration instance using the specified properties. + * @param [properties] Properties to set + * @returns Configuration instance + */ + public static create(properties?: replicationdata.IConfiguration): replicationdata.Configuration; + + /** + * Encodes the specified Configuration message. Does not implicitly {@link replicationdata.Configuration.verify|verify} messages. + * @param message Configuration message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: replicationdata.IConfiguration, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified Configuration message, length delimited. Does not implicitly {@link replicationdata.Configuration.verify|verify} messages. + * @param message Configuration message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: replicationdata.IConfiguration, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a Configuration message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns Configuration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): replicationdata.Configuration; + + /** + * Decodes a Configuration message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns Configuration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): replicationdata.Configuration; + + /** + * Verifies a Configuration message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a Configuration message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns Configuration + */ + public static fromObject(object: { [k: string]: any }): replicationdata.Configuration; + + /** + * Creates a plain object from a Configuration message. Also converts values to other types if specified. + * @param message Configuration + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: replicationdata.Configuration, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this Configuration to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for Configuration + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a StopReplicationStatus. */ interface IStopReplicationStatus { @@ -40639,6 +42725,9 @@ export namespace replicationdata { /** FullStatus super_read_only */ super_read_only?: (boolean|null); + + /** FullStatus replication_configuration */ + replication_configuration?: (replicationdata.IConfiguration|null); } /** Represents a FullStatus. */ @@ -40713,6 +42802,9 @@ export namespace replicationdata { /** FullStatus super_read_only. */ public super_read_only: boolean; + /** FullStatus replication_configuration. */ + public replication_configuration?: (replicationdata.IConfiguration|null); + /** * Creates a new FullStatus instance using the specified properties. * @param [properties] Properties to set @@ -41012,6 +43104,9 @@ export namespace vschema { /** Keyspace foreign_key_mode */ foreign_key_mode?: (vschema.Keyspace.ForeignKeyMode|null); + + /** Keyspace multi_tenant_spec */ + multi_tenant_spec?: (vschema.IMultiTenantSpec|null); } /** Represents a Keyspace. */ @@ -41038,6 +43133,9 @@ export namespace vschema { /** Keyspace foreign_key_mode. */ public foreign_key_mode: vschema.Keyspace.ForeignKeyMode; + /** Keyspace multi_tenant_spec. */ + public multi_tenant_spec?: (vschema.IMultiTenantSpec|null); + /** * Creates a new Keyspace instance using the specified properties. * @param [properties] Properties to set @@ -41127,6 +43225,109 @@ export namespace vschema { } } + /** Properties of a MultiTenantSpec. */ + interface IMultiTenantSpec { + + /** MultiTenantSpec tenant_id_column_name */ + tenant_id_column_name?: (string|null); + + /** MultiTenantSpec tenant_id_column_type */ + tenant_id_column_type?: (query.Type|null); + } + + /** Represents a MultiTenantSpec. */ + class MultiTenantSpec implements IMultiTenantSpec { + + /** + * Constructs a new MultiTenantSpec. + * @param [properties] Properties to set + */ + constructor(properties?: vschema.IMultiTenantSpec); + + /** MultiTenantSpec tenant_id_column_name. */ + public tenant_id_column_name: string; + + /** MultiTenantSpec tenant_id_column_type. */ + public tenant_id_column_type: query.Type; + + /** + * Creates a new MultiTenantSpec instance using the specified properties. + * @param [properties] Properties to set + * @returns MultiTenantSpec instance + */ + public static create(properties?: vschema.IMultiTenantSpec): vschema.MultiTenantSpec; + + /** + * Encodes the specified MultiTenantSpec message. Does not implicitly {@link vschema.MultiTenantSpec.verify|verify} messages. + * @param message MultiTenantSpec message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vschema.IMultiTenantSpec, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified MultiTenantSpec message, length delimited. Does not implicitly {@link vschema.MultiTenantSpec.verify|verify} messages. + * @param message MultiTenantSpec message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vschema.IMultiTenantSpec, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a MultiTenantSpec message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns MultiTenantSpec + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.MultiTenantSpec; + + /** + * Decodes a MultiTenantSpec message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns MultiTenantSpec + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.MultiTenantSpec; + + /** + * Verifies a MultiTenantSpec message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a MultiTenantSpec message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns MultiTenantSpec + */ + public static fromObject(object: { [k: string]: any }): vschema.MultiTenantSpec; + + /** + * Creates a plain object from a MultiTenantSpec message. Also converts values to other types if specified. + * @param message MultiTenantSpec + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vschema.MultiTenantSpec, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this MultiTenantSpec to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for MultiTenantSpec + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a Vindex. */ interface IVindex { @@ -41592,6 +43793,24 @@ export namespace vschema { /** Column invisible */ invisible?: (boolean|null); + + /** Column default */ + "default"?: (string|null); + + /** Column collation_name */ + collation_name?: (string|null); + + /** Column size */ + size?: (number|null); + + /** Column scale */ + scale?: (number|null); + + /** Column nullable */ + nullable?: (boolean|null); + + /** Column values */ + values?: (string[]|null); } /** Represents a Column. */ @@ -41612,6 +43831,27 @@ export namespace vschema { /** Column invisible. */ public invisible: boolean; + /** Column default. */ + public default: string; + + /** Column collation_name. */ + public collation_name: string; + + /** Column size. */ + public size: number; + + /** Column scale. */ + public scale: number; + + /** Column nullable. */ + public nullable?: (boolean|null); + + /** Column values. */ + public values: string[]; + + /** Column _nullable. */ + public _nullable?: "nullable"; + /** * Creates a new Column instance using the specified properties. * @param [properties] Properties to set @@ -41701,6 +43941,9 @@ export namespace vschema { /** SrvVSchema shard_routing_rules */ shard_routing_rules?: (vschema.IShardRoutingRules|null); + + /** SrvVSchema keyspace_routing_rules */ + keyspace_routing_rules?: (vschema.IKeyspaceRoutingRules|null); } /** Represents a SrvVSchema. */ @@ -41721,6 +43964,9 @@ export namespace vschema { /** SrvVSchema shard_routing_rules. */ public shard_routing_rules?: (vschema.IShardRoutingRules|null); + /** SrvVSchema keyspace_routing_rules. */ + public keyspace_routing_rules?: (vschema.IKeyspaceRoutingRules|null); + /** * Creates a new SrvVSchema instance using the specified properties. * @param [properties] Properties to set @@ -42004,6 +44250,206 @@ export namespace vschema { */ public static getTypeUrl(typeUrlPrefix?: string): string; } + + /** Properties of a KeyspaceRoutingRules. */ + interface IKeyspaceRoutingRules { + + /** KeyspaceRoutingRules rules */ + rules?: (vschema.IKeyspaceRoutingRule[]|null); + } + + /** Represents a KeyspaceRoutingRules. */ + class KeyspaceRoutingRules implements IKeyspaceRoutingRules { + + /** + * Constructs a new KeyspaceRoutingRules. + * @param [properties] Properties to set + */ + constructor(properties?: vschema.IKeyspaceRoutingRules); + + /** KeyspaceRoutingRules rules. */ + public rules: vschema.IKeyspaceRoutingRule[]; + + /** + * Creates a new KeyspaceRoutingRules instance using the specified properties. + * @param [properties] Properties to set + * @returns KeyspaceRoutingRules instance + */ + public static create(properties?: vschema.IKeyspaceRoutingRules): vschema.KeyspaceRoutingRules; + + /** + * Encodes the specified KeyspaceRoutingRules message. Does not implicitly {@link vschema.KeyspaceRoutingRules.verify|verify} messages. + * @param message KeyspaceRoutingRules message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vschema.IKeyspaceRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified KeyspaceRoutingRules message, length delimited. Does not implicitly {@link vschema.KeyspaceRoutingRules.verify|verify} messages. + * @param message KeyspaceRoutingRules message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vschema.IKeyspaceRoutingRules, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a KeyspaceRoutingRules message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns KeyspaceRoutingRules + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.KeyspaceRoutingRules; + + /** + * Decodes a KeyspaceRoutingRules message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns KeyspaceRoutingRules + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.KeyspaceRoutingRules; + + /** + * Verifies a KeyspaceRoutingRules message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a KeyspaceRoutingRules message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns KeyspaceRoutingRules + */ + public static fromObject(object: { [k: string]: any }): vschema.KeyspaceRoutingRules; + + /** + * Creates a plain object from a KeyspaceRoutingRules message. Also converts values to other types if specified. + * @param message KeyspaceRoutingRules + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vschema.KeyspaceRoutingRules, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this KeyspaceRoutingRules to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for KeyspaceRoutingRules + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a KeyspaceRoutingRule. */ + interface IKeyspaceRoutingRule { + + /** KeyspaceRoutingRule from_keyspace */ + from_keyspace?: (string|null); + + /** KeyspaceRoutingRule to_keyspace */ + to_keyspace?: (string|null); + } + + /** Represents a KeyspaceRoutingRule. */ + class KeyspaceRoutingRule implements IKeyspaceRoutingRule { + + /** + * Constructs a new KeyspaceRoutingRule. + * @param [properties] Properties to set + */ + constructor(properties?: vschema.IKeyspaceRoutingRule); + + /** KeyspaceRoutingRule from_keyspace. */ + public from_keyspace: string; + + /** KeyspaceRoutingRule to_keyspace. */ + public to_keyspace: string; + + /** + * Creates a new KeyspaceRoutingRule instance using the specified properties. + * @param [properties] Properties to set + * @returns KeyspaceRoutingRule instance + */ + public static create(properties?: vschema.IKeyspaceRoutingRule): vschema.KeyspaceRoutingRule; + + /** + * Encodes the specified KeyspaceRoutingRule message. Does not implicitly {@link vschema.KeyspaceRoutingRule.verify|verify} messages. + * @param message KeyspaceRoutingRule message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vschema.IKeyspaceRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified KeyspaceRoutingRule message, length delimited. Does not implicitly {@link vschema.KeyspaceRoutingRule.verify|verify} messages. + * @param message KeyspaceRoutingRule message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vschema.IKeyspaceRoutingRule, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a KeyspaceRoutingRule message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns KeyspaceRoutingRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vschema.KeyspaceRoutingRule; + + /** + * Decodes a KeyspaceRoutingRule message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns KeyspaceRoutingRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vschema.KeyspaceRoutingRule; + + /** + * Verifies a KeyspaceRoutingRule message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a KeyspaceRoutingRule message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns KeyspaceRoutingRule + */ + public static fromObject(object: { [k: string]: any }): vschema.KeyspaceRoutingRule; + + /** + * Creates a plain object from a KeyspaceRoutingRule message. Also converts values to other types if specified. + * @param message KeyspaceRoutingRule + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vschema.KeyspaceRoutingRule, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this KeyspaceRoutingRule to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for KeyspaceRoutingRule + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } } /** Namespace vtctldata. */ @@ -42375,6 +44821,9 @@ export namespace vtctldata { /** MaterializeSettings atomic_copy */ atomic_copy?: (boolean|null); + + /** MaterializeSettings workflow_options */ + workflow_options?: (vtctldata.IWorkflowOptions|null); } /** Represents a MaterializeSettings. */ @@ -42434,6 +44883,9 @@ export namespace vtctldata { /** MaterializeSettings atomic_copy. */ public atomic_copy: boolean; + /** MaterializeSettings workflow_options. */ + public workflow_options?: (vtctldata.IWorkflowOptions|null); + /** * Creates a new MaterializeSettings instance using the specified properties. * @param [properties] Properties to set @@ -43171,6 +45623,115 @@ export namespace vtctldata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a WorkflowOptions. */ + interface IWorkflowOptions { + + /** WorkflowOptions tenant_id */ + tenant_id?: (string|null); + + /** WorkflowOptions strip_sharded_auto_increment */ + strip_sharded_auto_increment?: (boolean|null); + + /** WorkflowOptions shards */ + shards?: (string[]|null); + } + + /** Represents a WorkflowOptions. */ + class WorkflowOptions implements IWorkflowOptions { + + /** + * Constructs a new WorkflowOptions. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IWorkflowOptions); + + /** WorkflowOptions tenant_id. */ + public tenant_id: string; + + /** WorkflowOptions strip_sharded_auto_increment. */ + public strip_sharded_auto_increment: boolean; + + /** WorkflowOptions shards. */ + public shards: string[]; + + /** + * Creates a new WorkflowOptions instance using the specified properties. + * @param [properties] Properties to set + * @returns WorkflowOptions instance + */ + public static create(properties?: vtctldata.IWorkflowOptions): vtctldata.WorkflowOptions; + + /** + * Encodes the specified WorkflowOptions message. Does not implicitly {@link vtctldata.WorkflowOptions.verify|verify} messages. + * @param message WorkflowOptions message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IWorkflowOptions, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified WorkflowOptions message, length delimited. Does not implicitly {@link vtctldata.WorkflowOptions.verify|verify} messages. + * @param message WorkflowOptions message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IWorkflowOptions, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a WorkflowOptions message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns WorkflowOptions + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.WorkflowOptions; + + /** + * Decodes a WorkflowOptions message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns WorkflowOptions + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.WorkflowOptions; + + /** + * Verifies a WorkflowOptions message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a WorkflowOptions message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns WorkflowOptions + */ + public static fromObject(object: { [k: string]: any }): vtctldata.WorkflowOptions; + + /** + * Creates a plain object from a WorkflowOptions message. Also converts values to other types if specified. + * @param message WorkflowOptions + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.WorkflowOptions, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this WorkflowOptions to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for WorkflowOptions + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a Workflow. */ interface IWorkflow { @@ -43200,6 +45761,9 @@ export namespace vtctldata { /** Workflow defer_secondary_keys */ defer_secondary_keys?: (boolean|null); + + /** Workflow options */ + options?: (vtctldata.IWorkflowOptions|null); } /** Represents a Workflow. */ @@ -43238,6 +45802,9 @@ export namespace vtctldata { /** Workflow defer_secondary_keys. */ public defer_secondary_keys: boolean; + /** Workflow options. */ + public options?: (vtctldata.IWorkflowOptions|null); + /** * Creates a new Workflow instance using the specified properties. * @param [properties] Properties to set @@ -43583,6 +46150,15 @@ export namespace vtctldata { /** Stream throttler_status */ throttler_status?: (vtctldata.Workflow.Stream.IThrottlerStatus|null); + + /** Stream tablet_types */ + tablet_types?: (topodata.TabletType[]|null); + + /** Stream tablet_selection_preference */ + tablet_selection_preference?: (tabletmanagerdata.TabletSelectionPreference|null); + + /** Stream cells */ + cells?: (string[]|null); } /** Represents a Stream. */ @@ -43645,6 +46221,15 @@ export namespace vtctldata { /** Stream throttler_status. */ public throttler_status?: (vtctldata.Workflow.Stream.IThrottlerStatus|null); + /** Stream tablet_types. */ + public tablet_types: topodata.TabletType[]; + + /** Stream tablet_selection_preference. */ + public tablet_selection_preference: tabletmanagerdata.TabletSelectionPreference; + + /** Stream cells. */ + public cells: string[]; + /** * Creates a new Stream instance using the specified properties. * @param [properties] Properties to set @@ -43733,6 +46318,9 @@ export namespace vtctldata { /** CopyState last_pk */ last_pk?: (string|null); + + /** CopyState stream_id */ + stream_id?: (number|Long|null); } /** Represents a CopyState. */ @@ -43750,6 +46338,9 @@ export namespace vtctldata { /** CopyState last_pk. */ public last_pk: string; + /** CopyState stream_id. */ + public stream_id: (number|Long); + /** * Creates a new CopyState instance using the specified properties. * @param [properties] Properties to set @@ -44460,6 +47051,212 @@ export namespace vtctldata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of an ApplyKeyspaceRoutingRulesRequest. */ + interface IApplyKeyspaceRoutingRulesRequest { + + /** ApplyKeyspaceRoutingRulesRequest keyspace_routing_rules */ + keyspace_routing_rules?: (vschema.IKeyspaceRoutingRules|null); + + /** ApplyKeyspaceRoutingRulesRequest skip_rebuild */ + skip_rebuild?: (boolean|null); + + /** ApplyKeyspaceRoutingRulesRequest rebuild_cells */ + rebuild_cells?: (string[]|null); + } + + /** Represents an ApplyKeyspaceRoutingRulesRequest. */ + class ApplyKeyspaceRoutingRulesRequest implements IApplyKeyspaceRoutingRulesRequest { + + /** + * Constructs a new ApplyKeyspaceRoutingRulesRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplyKeyspaceRoutingRulesRequest); + + /** ApplyKeyspaceRoutingRulesRequest keyspace_routing_rules. */ + public keyspace_routing_rules?: (vschema.IKeyspaceRoutingRules|null); + + /** ApplyKeyspaceRoutingRulesRequest skip_rebuild. */ + public skip_rebuild: boolean; + + /** ApplyKeyspaceRoutingRulesRequest rebuild_cells. */ + public rebuild_cells: string[]; + + /** + * Creates a new ApplyKeyspaceRoutingRulesRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplyKeyspaceRoutingRulesRequest instance + */ + public static create(properties?: vtctldata.IApplyKeyspaceRoutingRulesRequest): vtctldata.ApplyKeyspaceRoutingRulesRequest; + + /** + * Encodes the specified ApplyKeyspaceRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyKeyspaceRoutingRulesRequest.verify|verify} messages. + * @param message ApplyKeyspaceRoutingRulesRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplyKeyspaceRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplyKeyspaceRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyKeyspaceRoutingRulesRequest.verify|verify} messages. + * @param message ApplyKeyspaceRoutingRulesRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplyKeyspaceRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplyKeyspaceRoutingRulesRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplyKeyspaceRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyKeyspaceRoutingRulesRequest; + + /** + * Decodes an ApplyKeyspaceRoutingRulesRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplyKeyspaceRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyKeyspaceRoutingRulesRequest; + + /** + * Verifies an ApplyKeyspaceRoutingRulesRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplyKeyspaceRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplyKeyspaceRoutingRulesRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyKeyspaceRoutingRulesRequest; + + /** + * Creates a plain object from an ApplyKeyspaceRoutingRulesRequest message. Also converts values to other types if specified. + * @param message ApplyKeyspaceRoutingRulesRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyKeyspaceRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplyKeyspaceRoutingRulesRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplyKeyspaceRoutingRulesRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ApplyKeyspaceRoutingRulesResponse. */ + interface IApplyKeyspaceRoutingRulesResponse { + + /** ApplyKeyspaceRoutingRulesResponse keyspace_routing_rules */ + keyspace_routing_rules?: (vschema.IKeyspaceRoutingRules|null); + } + + /** Represents an ApplyKeyspaceRoutingRulesResponse. */ + class ApplyKeyspaceRoutingRulesResponse implements IApplyKeyspaceRoutingRulesResponse { + + /** + * Constructs a new ApplyKeyspaceRoutingRulesResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IApplyKeyspaceRoutingRulesResponse); + + /** ApplyKeyspaceRoutingRulesResponse keyspace_routing_rules. */ + public keyspace_routing_rules?: (vschema.IKeyspaceRoutingRules|null); + + /** + * Creates a new ApplyKeyspaceRoutingRulesResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ApplyKeyspaceRoutingRulesResponse instance + */ + public static create(properties?: vtctldata.IApplyKeyspaceRoutingRulesResponse): vtctldata.ApplyKeyspaceRoutingRulesResponse; + + /** + * Encodes the specified ApplyKeyspaceRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyKeyspaceRoutingRulesResponse.verify|verify} messages. + * @param message ApplyKeyspaceRoutingRulesResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IApplyKeyspaceRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ApplyKeyspaceRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyKeyspaceRoutingRulesResponse.verify|verify} messages. + * @param message ApplyKeyspaceRoutingRulesResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IApplyKeyspaceRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ApplyKeyspaceRoutingRulesResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ApplyKeyspaceRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyKeyspaceRoutingRulesResponse; + + /** + * Decodes an ApplyKeyspaceRoutingRulesResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ApplyKeyspaceRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyKeyspaceRoutingRulesResponse; + + /** + * Verifies an ApplyKeyspaceRoutingRulesResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ApplyKeyspaceRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ApplyKeyspaceRoutingRulesResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyKeyspaceRoutingRulesResponse; + + /** + * Creates a plain object from an ApplyKeyspaceRoutingRulesResponse message. Also converts values to other types if specified. + * @param message ApplyKeyspaceRoutingRulesResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyKeyspaceRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ApplyKeyspaceRoutingRulesResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ApplyKeyspaceRoutingRulesResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of an ApplyRoutingRulesRequest. */ interface IApplyRoutingRulesRequest { @@ -45122,6 +47919,9 @@ export namespace vtctldata { /** ApplyVSchemaRequest sql */ sql?: (string|null); + + /** ApplyVSchemaRequest strict */ + strict?: (boolean|null); } /** Represents an ApplyVSchemaRequest. */ @@ -45151,6 +47951,9 @@ export namespace vtctldata { /** ApplyVSchemaRequest sql. */ public sql: string; + /** ApplyVSchemaRequest strict. */ + public strict: boolean; + /** * Creates a new ApplyVSchemaRequest instance using the specified properties. * @param [properties] Properties to set @@ -45234,6 +48037,9 @@ export namespace vtctldata { /** ApplyVSchemaResponse v_schema */ v_schema?: (vschema.IKeyspace|null); + + /** ApplyVSchemaResponse unknown_vindex_params */ + unknown_vindex_params?: ({ [k: string]: vtctldata.ApplyVSchemaResponse.IParamList }|null); } /** Represents an ApplyVSchemaResponse. */ @@ -45248,6 +48054,9 @@ export namespace vtctldata { /** ApplyVSchemaResponse v_schema. */ public v_schema?: (vschema.IKeyspace|null); + /** ApplyVSchemaResponse unknown_vindex_params. */ + public unknown_vindex_params: { [k: string]: vtctldata.ApplyVSchemaResponse.IParamList }; + /** * Creates a new ApplyVSchemaResponse instance using the specified properties. * @param [properties] Properties to set @@ -45326,6 +48135,106 @@ export namespace vtctldata { public static getTypeUrl(typeUrlPrefix?: string): string; } + namespace ApplyVSchemaResponse { + + /** Properties of a ParamList. */ + interface IParamList { + + /** ParamList params */ + params?: (string[]|null); + } + + /** Represents a ParamList. */ + class ParamList implements IParamList { + + /** + * Constructs a new ParamList. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.ApplyVSchemaResponse.IParamList); + + /** ParamList params. */ + public params: string[]; + + /** + * Creates a new ParamList instance using the specified properties. + * @param [properties] Properties to set + * @returns ParamList instance + */ + public static create(properties?: vtctldata.ApplyVSchemaResponse.IParamList): vtctldata.ApplyVSchemaResponse.ParamList; + + /** + * Encodes the specified ParamList message. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.ParamList.verify|verify} messages. + * @param message ParamList message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.ApplyVSchemaResponse.IParamList, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ParamList message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.ParamList.verify|verify} messages. + * @param message ParamList message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.ApplyVSchemaResponse.IParamList, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ParamList message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ParamList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ApplyVSchemaResponse.ParamList; + + /** + * Decodes a ParamList message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ParamList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ApplyVSchemaResponse.ParamList; + + /** + * Verifies a ParamList message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ParamList message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ParamList + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ApplyVSchemaResponse.ParamList; + + /** + * Creates a plain object from a ParamList message. Also converts values to other types if specified. + * @param message ParamList + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ApplyVSchemaResponse.ParamList, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ParamList to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ParamList + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + } + /** Properties of a BackupRequest. */ interface IBackupRequest { @@ -45336,7 +48245,7 @@ export namespace vtctldata { allow_primary?: (boolean|null); /** BackupRequest concurrency */ - concurrency?: (number|Long|null); + concurrency?: (number|null); /** BackupRequest incremental_from_pos */ incremental_from_pos?: (string|null); @@ -45361,7 +48270,7 @@ export namespace vtctldata { public allow_primary: boolean; /** BackupRequest concurrency. */ - public concurrency: (number|Long); + public concurrency: number; /** BackupRequest incremental_from_pos. */ public incremental_from_pos: string; @@ -45575,7 +48484,7 @@ export namespace vtctldata { allow_primary?: (boolean|null); /** BackupShardRequest concurrency */ - concurrency?: (number|Long|null); + concurrency?: (number|null); /** BackupShardRequest upgrade_safe */ upgrade_safe?: (boolean|null); @@ -45603,7 +48512,7 @@ export namespace vtctldata { public allow_primary: boolean; /** BackupShardRequest concurrency. */ - public concurrency: (number|Long); + public concurrency: number; /** BackupShardRequest upgrade_safe. */ public upgrade_safe: boolean; @@ -46519,9 +49428,6 @@ export namespace vtctldata { /** CreateKeyspaceRequest allow_empty_v_schema */ allow_empty_v_schema?: (boolean|null); - /** CreateKeyspaceRequest served_froms */ - served_froms?: (topodata.Keyspace.IServedFrom[]|null); - /** CreateKeyspaceRequest type */ type?: (topodata.KeyspaceType|null); @@ -46556,9 +49462,6 @@ export namespace vtctldata { /** CreateKeyspaceRequest allow_empty_v_schema. */ public allow_empty_v_schema: boolean; - /** CreateKeyspaceRequest served_froms. */ - public served_froms: topodata.Keyspace.IServedFrom[]; - /** CreateKeyspaceRequest type. */ public type: topodata.KeyspaceType; @@ -49021,6 +51924,224 @@ export namespace vtctldata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of an ExecuteMultiFetchAsDBARequest. */ + interface IExecuteMultiFetchAsDBARequest { + + /** ExecuteMultiFetchAsDBARequest tablet_alias */ + tablet_alias?: (topodata.ITabletAlias|null); + + /** ExecuteMultiFetchAsDBARequest sql */ + sql?: (string|null); + + /** ExecuteMultiFetchAsDBARequest max_rows */ + max_rows?: (number|Long|null); + + /** ExecuteMultiFetchAsDBARequest disable_binlogs */ + disable_binlogs?: (boolean|null); + + /** ExecuteMultiFetchAsDBARequest reload_schema */ + reload_schema?: (boolean|null); + } + + /** Represents an ExecuteMultiFetchAsDBARequest. */ + class ExecuteMultiFetchAsDBARequest implements IExecuteMultiFetchAsDBARequest { + + /** + * Constructs a new ExecuteMultiFetchAsDBARequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteMultiFetchAsDBARequest); + + /** ExecuteMultiFetchAsDBARequest tablet_alias. */ + public tablet_alias?: (topodata.ITabletAlias|null); + + /** ExecuteMultiFetchAsDBARequest sql. */ + public sql: string; + + /** ExecuteMultiFetchAsDBARequest max_rows. */ + public max_rows: (number|Long); + + /** ExecuteMultiFetchAsDBARequest disable_binlogs. */ + public disable_binlogs: boolean; + + /** ExecuteMultiFetchAsDBARequest reload_schema. */ + public reload_schema: boolean; + + /** + * Creates a new ExecuteMultiFetchAsDBARequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteMultiFetchAsDBARequest instance + */ + public static create(properties?: vtctldata.IExecuteMultiFetchAsDBARequest): vtctldata.ExecuteMultiFetchAsDBARequest; + + /** + * Encodes the specified ExecuteMultiFetchAsDBARequest message. Does not implicitly {@link vtctldata.ExecuteMultiFetchAsDBARequest.verify|verify} messages. + * @param message ExecuteMultiFetchAsDBARequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteMultiFetchAsDBARequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteMultiFetchAsDBARequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteMultiFetchAsDBARequest.verify|verify} messages. + * @param message ExecuteMultiFetchAsDBARequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteMultiFetchAsDBARequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteMultiFetchAsDBARequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteMultiFetchAsDBARequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteMultiFetchAsDBARequest; + + /** + * Decodes an ExecuteMultiFetchAsDBARequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteMultiFetchAsDBARequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteMultiFetchAsDBARequest; + + /** + * Verifies an ExecuteMultiFetchAsDBARequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteMultiFetchAsDBARequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteMultiFetchAsDBARequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteMultiFetchAsDBARequest; + + /** + * Creates a plain object from an ExecuteMultiFetchAsDBARequest message. Also converts values to other types if specified. + * @param message ExecuteMultiFetchAsDBARequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteMultiFetchAsDBARequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteMultiFetchAsDBARequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteMultiFetchAsDBARequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of an ExecuteMultiFetchAsDBAResponse. */ + interface IExecuteMultiFetchAsDBAResponse { + + /** ExecuteMultiFetchAsDBAResponse results */ + results?: (query.IQueryResult[]|null); + } + + /** Represents an ExecuteMultiFetchAsDBAResponse. */ + class ExecuteMultiFetchAsDBAResponse implements IExecuteMultiFetchAsDBAResponse { + + /** + * Constructs a new ExecuteMultiFetchAsDBAResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IExecuteMultiFetchAsDBAResponse); + + /** ExecuteMultiFetchAsDBAResponse results. */ + public results: query.IQueryResult[]; + + /** + * Creates a new ExecuteMultiFetchAsDBAResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ExecuteMultiFetchAsDBAResponse instance + */ + public static create(properties?: vtctldata.IExecuteMultiFetchAsDBAResponse): vtctldata.ExecuteMultiFetchAsDBAResponse; + + /** + * Encodes the specified ExecuteMultiFetchAsDBAResponse message. Does not implicitly {@link vtctldata.ExecuteMultiFetchAsDBAResponse.verify|verify} messages. + * @param message ExecuteMultiFetchAsDBAResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IExecuteMultiFetchAsDBAResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ExecuteMultiFetchAsDBAResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteMultiFetchAsDBAResponse.verify|verify} messages. + * @param message ExecuteMultiFetchAsDBAResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IExecuteMultiFetchAsDBAResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes an ExecuteMultiFetchAsDBAResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ExecuteMultiFetchAsDBAResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ExecuteMultiFetchAsDBAResponse; + + /** + * Decodes an ExecuteMultiFetchAsDBAResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ExecuteMultiFetchAsDBAResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ExecuteMultiFetchAsDBAResponse; + + /** + * Verifies an ExecuteMultiFetchAsDBAResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates an ExecuteMultiFetchAsDBAResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ExecuteMultiFetchAsDBAResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ExecuteMultiFetchAsDBAResponse; + + /** + * Creates a plain object from an ExecuteMultiFetchAsDBAResponse message. Also converts values to other types if specified. + * @param message ExecuteMultiFetchAsDBAResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ExecuteMultiFetchAsDBAResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ExecuteMultiFetchAsDBAResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ExecuteMultiFetchAsDBAResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a FindAllShardsInKeyspaceRequest. */ interface IFindAllShardsInKeyspaceRequest { @@ -49215,6 +52336,206 @@ export namespace vtctldata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a ForceCutOverSchemaMigrationRequest. */ + interface IForceCutOverSchemaMigrationRequest { + + /** ForceCutOverSchemaMigrationRequest keyspace */ + keyspace?: (string|null); + + /** ForceCutOverSchemaMigrationRequest uuid */ + uuid?: (string|null); + } + + /** Represents a ForceCutOverSchemaMigrationRequest. */ + class ForceCutOverSchemaMigrationRequest implements IForceCutOverSchemaMigrationRequest { + + /** + * Constructs a new ForceCutOverSchemaMigrationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IForceCutOverSchemaMigrationRequest); + + /** ForceCutOverSchemaMigrationRequest keyspace. */ + public keyspace: string; + + /** ForceCutOverSchemaMigrationRequest uuid. */ + public uuid: string; + + /** + * Creates a new ForceCutOverSchemaMigrationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns ForceCutOverSchemaMigrationRequest instance + */ + public static create(properties?: vtctldata.IForceCutOverSchemaMigrationRequest): vtctldata.ForceCutOverSchemaMigrationRequest; + + /** + * Encodes the specified ForceCutOverSchemaMigrationRequest message. Does not implicitly {@link vtctldata.ForceCutOverSchemaMigrationRequest.verify|verify} messages. + * @param message ForceCutOverSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IForceCutOverSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ForceCutOverSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.ForceCutOverSchemaMigrationRequest.verify|verify} messages. + * @param message ForceCutOverSchemaMigrationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IForceCutOverSchemaMigrationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ForceCutOverSchemaMigrationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ForceCutOverSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ForceCutOverSchemaMigrationRequest; + + /** + * Decodes a ForceCutOverSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ForceCutOverSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ForceCutOverSchemaMigrationRequest; + + /** + * Verifies a ForceCutOverSchemaMigrationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ForceCutOverSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ForceCutOverSchemaMigrationRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ForceCutOverSchemaMigrationRequest; + + /** + * Creates a plain object from a ForceCutOverSchemaMigrationRequest message. Also converts values to other types if specified. + * @param message ForceCutOverSchemaMigrationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ForceCutOverSchemaMigrationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ForceCutOverSchemaMigrationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ForceCutOverSchemaMigrationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a ForceCutOverSchemaMigrationResponse. */ + interface IForceCutOverSchemaMigrationResponse { + + /** ForceCutOverSchemaMigrationResponse rows_affected_by_shard */ + rows_affected_by_shard?: ({ [k: string]: (number|Long) }|null); + } + + /** Represents a ForceCutOverSchemaMigrationResponse. */ + class ForceCutOverSchemaMigrationResponse implements IForceCutOverSchemaMigrationResponse { + + /** + * Constructs a new ForceCutOverSchemaMigrationResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IForceCutOverSchemaMigrationResponse); + + /** ForceCutOverSchemaMigrationResponse rows_affected_by_shard. */ + public rows_affected_by_shard: { [k: string]: (number|Long) }; + + /** + * Creates a new ForceCutOverSchemaMigrationResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns ForceCutOverSchemaMigrationResponse instance + */ + public static create(properties?: vtctldata.IForceCutOverSchemaMigrationResponse): vtctldata.ForceCutOverSchemaMigrationResponse; + + /** + * Encodes the specified ForceCutOverSchemaMigrationResponse message. Does not implicitly {@link vtctldata.ForceCutOverSchemaMigrationResponse.verify|verify} messages. + * @param message ForceCutOverSchemaMigrationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IForceCutOverSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified ForceCutOverSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.ForceCutOverSchemaMigrationResponse.verify|verify} messages. + * @param message ForceCutOverSchemaMigrationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IForceCutOverSchemaMigrationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a ForceCutOverSchemaMigrationResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns ForceCutOverSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.ForceCutOverSchemaMigrationResponse; + + /** + * Decodes a ForceCutOverSchemaMigrationResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns ForceCutOverSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.ForceCutOverSchemaMigrationResponse; + + /** + * Verifies a ForceCutOverSchemaMigrationResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a ForceCutOverSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns ForceCutOverSchemaMigrationResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.ForceCutOverSchemaMigrationResponse; + + /** + * Creates a plain object from a ForceCutOverSchemaMigrationResponse message. Also converts values to other types if specified. + * @param message ForceCutOverSchemaMigrationResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.ForceCutOverSchemaMigrationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this ForceCutOverSchemaMigrationResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for ForceCutOverSchemaMigrationResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a GetBackupsRequest. */ interface IGetBackupsRequest { @@ -50773,6 +54094,194 @@ export namespace vtctldata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a GetKeyspaceRoutingRulesRequest. */ + interface IGetKeyspaceRoutingRulesRequest { + } + + /** Represents a GetKeyspaceRoutingRulesRequest. */ + class GetKeyspaceRoutingRulesRequest implements IGetKeyspaceRoutingRulesRequest { + + /** + * Constructs a new GetKeyspaceRoutingRulesRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IGetKeyspaceRoutingRulesRequest); + + /** + * Creates a new GetKeyspaceRoutingRulesRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns GetKeyspaceRoutingRulesRequest instance + */ + public static create(properties?: vtctldata.IGetKeyspaceRoutingRulesRequest): vtctldata.GetKeyspaceRoutingRulesRequest; + + /** + * Encodes the specified GetKeyspaceRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetKeyspaceRoutingRulesRequest.verify|verify} messages. + * @param message GetKeyspaceRoutingRulesRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IGetKeyspaceRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GetKeyspaceRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceRoutingRulesRequest.verify|verify} messages. + * @param message GetKeyspaceRoutingRulesRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IGetKeyspaceRoutingRulesRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GetKeyspaceRoutingRulesRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GetKeyspaceRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspaceRoutingRulesRequest; + + /** + * Decodes a GetKeyspaceRoutingRulesRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GetKeyspaceRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspaceRoutingRulesRequest; + + /** + * Verifies a GetKeyspaceRoutingRulesRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a GetKeyspaceRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GetKeyspaceRoutingRulesRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspaceRoutingRulesRequest; + + /** + * Creates a plain object from a GetKeyspaceRoutingRulesRequest message. Also converts values to other types if specified. + * @param message GetKeyspaceRoutingRulesRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.GetKeyspaceRoutingRulesRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this GetKeyspaceRoutingRulesRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for GetKeyspaceRoutingRulesRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a GetKeyspaceRoutingRulesResponse. */ + interface IGetKeyspaceRoutingRulesResponse { + + /** GetKeyspaceRoutingRulesResponse keyspace_routing_rules */ + keyspace_routing_rules?: (vschema.IKeyspaceRoutingRules|null); + } + + /** Represents a GetKeyspaceRoutingRulesResponse. */ + class GetKeyspaceRoutingRulesResponse implements IGetKeyspaceRoutingRulesResponse { + + /** + * Constructs a new GetKeyspaceRoutingRulesResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IGetKeyspaceRoutingRulesResponse); + + /** GetKeyspaceRoutingRulesResponse keyspace_routing_rules. */ + public keyspace_routing_rules?: (vschema.IKeyspaceRoutingRules|null); + + /** + * Creates a new GetKeyspaceRoutingRulesResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns GetKeyspaceRoutingRulesResponse instance + */ + public static create(properties?: vtctldata.IGetKeyspaceRoutingRulesResponse): vtctldata.GetKeyspaceRoutingRulesResponse; + + /** + * Encodes the specified GetKeyspaceRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetKeyspaceRoutingRulesResponse.verify|verify} messages. + * @param message GetKeyspaceRoutingRulesResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IGetKeyspaceRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GetKeyspaceRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceRoutingRulesResponse.verify|verify} messages. + * @param message GetKeyspaceRoutingRulesResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IGetKeyspaceRoutingRulesResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GetKeyspaceRoutingRulesResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GetKeyspaceRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetKeyspaceRoutingRulesResponse; + + /** + * Decodes a GetKeyspaceRoutingRulesResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GetKeyspaceRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetKeyspaceRoutingRulesResponse; + + /** + * Verifies a GetKeyspaceRoutingRulesResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a GetKeyspaceRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GetKeyspaceRoutingRulesResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.GetKeyspaceRoutingRulesResponse; + + /** + * Creates a plain object from a GetKeyspaceRoutingRulesResponse message. Also converts values to other types if specified. + * @param message GetKeyspaceRoutingRulesResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.GetKeyspaceRoutingRulesResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this GetKeyspaceRoutingRulesResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for GetKeyspaceRoutingRulesResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a GetRoutingRulesRequest. */ interface IGetRoutingRulesRequest { } @@ -51427,6 +54936,212 @@ export namespace vtctldata { public static getTypeUrl(typeUrlPrefix?: string): string; } + /** Properties of a GetShardReplicationRequest. */ + interface IGetShardReplicationRequest { + + /** GetShardReplicationRequest keyspace */ + keyspace?: (string|null); + + /** GetShardReplicationRequest shard */ + shard?: (string|null); + + /** GetShardReplicationRequest cells */ + cells?: (string[]|null); + } + + /** Represents a GetShardReplicationRequest. */ + class GetShardReplicationRequest implements IGetShardReplicationRequest { + + /** + * Constructs a new GetShardReplicationRequest. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IGetShardReplicationRequest); + + /** GetShardReplicationRequest keyspace. */ + public keyspace: string; + + /** GetShardReplicationRequest shard. */ + public shard: string; + + /** GetShardReplicationRequest cells. */ + public cells: string[]; + + /** + * Creates a new GetShardReplicationRequest instance using the specified properties. + * @param [properties] Properties to set + * @returns GetShardReplicationRequest instance + */ + public static create(properties?: vtctldata.IGetShardReplicationRequest): vtctldata.GetShardReplicationRequest; + + /** + * Encodes the specified GetShardReplicationRequest message. Does not implicitly {@link vtctldata.GetShardReplicationRequest.verify|verify} messages. + * @param message GetShardReplicationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IGetShardReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GetShardReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardReplicationRequest.verify|verify} messages. + * @param message GetShardReplicationRequest message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IGetShardReplicationRequest, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GetShardReplicationRequest message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GetShardReplicationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardReplicationRequest; + + /** + * Decodes a GetShardReplicationRequest message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GetShardReplicationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardReplicationRequest; + + /** + * Verifies a GetShardReplicationRequest message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a GetShardReplicationRequest message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GetShardReplicationRequest + */ + public static fromObject(object: { [k: string]: any }): vtctldata.GetShardReplicationRequest; + + /** + * Creates a plain object from a GetShardReplicationRequest message. Also converts values to other types if specified. + * @param message GetShardReplicationRequest + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.GetShardReplicationRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this GetShardReplicationRequest to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for GetShardReplicationRequest + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + + /** Properties of a GetShardReplicationResponse. */ + interface IGetShardReplicationResponse { + + /** GetShardReplicationResponse shard_replication_by_cell */ + shard_replication_by_cell?: ({ [k: string]: topodata.IShardReplication }|null); + } + + /** Represents a GetShardReplicationResponse. */ + class GetShardReplicationResponse implements IGetShardReplicationResponse { + + /** + * Constructs a new GetShardReplicationResponse. + * @param [properties] Properties to set + */ + constructor(properties?: vtctldata.IGetShardReplicationResponse); + + /** GetShardReplicationResponse shard_replication_by_cell. */ + public shard_replication_by_cell: { [k: string]: topodata.IShardReplication }; + + /** + * Creates a new GetShardReplicationResponse instance using the specified properties. + * @param [properties] Properties to set + * @returns GetShardReplicationResponse instance + */ + public static create(properties?: vtctldata.IGetShardReplicationResponse): vtctldata.GetShardReplicationResponse; + + /** + * Encodes the specified GetShardReplicationResponse message. Does not implicitly {@link vtctldata.GetShardReplicationResponse.verify|verify} messages. + * @param message GetShardReplicationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encode(message: vtctldata.IGetShardReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Encodes the specified GetShardReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardReplicationResponse.verify|verify} messages. + * @param message GetShardReplicationResponse message or plain object to encode + * @param [writer] Writer to encode to + * @returns Writer + */ + public static encodeDelimited(message: vtctldata.IGetShardReplicationResponse, writer?: $protobuf.Writer): $protobuf.Writer; + + /** + * Decodes a GetShardReplicationResponse message from the specified reader or buffer. + * @param reader Reader or buffer to decode from + * @param [length] Message length if known beforehand + * @returns GetShardReplicationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.GetShardReplicationResponse; + + /** + * Decodes a GetShardReplicationResponse message from the specified reader or buffer, length delimited. + * @param reader Reader or buffer to decode from + * @returns GetShardReplicationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.GetShardReplicationResponse; + + /** + * Verifies a GetShardReplicationResponse message. + * @param message Plain object to verify + * @returns `null` if valid, otherwise the reason why it is not + */ + public static verify(message: { [k: string]: any }): (string|null); + + /** + * Creates a GetShardReplicationResponse message from a plain object. Also converts values to their respective internal types. + * @param object Plain object + * @returns GetShardReplicationResponse + */ + public static fromObject(object: { [k: string]: any }): vtctldata.GetShardReplicationResponse; + + /** + * Creates a plain object from a GetShardReplicationResponse message. Also converts values to other types if specified. + * @param message GetShardReplicationResponse + * @param [options] Conversion options + * @returns Plain object + */ + public static toObject(message: vtctldata.GetShardReplicationResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; + + /** + * Converts this GetShardReplicationResponse to JSON. + * @returns JSON object + */ + public toJSON(): { [k: string]: any }; + + /** + * Gets the default type url for GetShardReplicationResponse + * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns The default type url + */ + public static getTypeUrl(typeUrlPrefix?: string): string; + } + /** Properties of a GetShardRequest. */ interface IGetShardRequest { @@ -54065,6 +57780,9 @@ export namespace vtctldata { /** GetWorkflowsRequest include_logs */ include_logs?: (boolean|null); + + /** GetWorkflowsRequest shards */ + shards?: (string[]|null); } /** Represents a GetWorkflowsRequest. */ @@ -54091,6 +57809,9 @@ export namespace vtctldata { /** GetWorkflowsRequest include_logs. */ public include_logs: boolean; + /** GetWorkflowsRequest shards. */ + public shards: string[]; + /** * Creates a new GetWorkflowsRequest instance using the specified properties. * @param [properties] Properties to set @@ -56578,6 +60299,9 @@ export namespace vtctldata { /** MoveTablesCreateRequest atomic_copy */ atomic_copy?: (boolean|null); + + /** MoveTablesCreateRequest workflow_options */ + workflow_options?: (vtctldata.IWorkflowOptions|null); } /** Represents a MoveTablesCreateRequest. */ @@ -56646,6 +60370,9 @@ export namespace vtctldata { /** MoveTablesCreateRequest atomic_copy. */ public atomic_copy: boolean; + /** MoveTablesCreateRequest workflow_options. */ + public workflow_options?: (vtctldata.IWorkflowOptions|null); + /** * Creates a new MoveTablesCreateRequest instance using the specified properties. * @param [properties] Properties to set @@ -56953,6 +60680,9 @@ export namespace vtctldata { /** MoveTablesCompleteRequest dry_run */ dry_run?: (boolean|null); + + /** MoveTablesCompleteRequest shards */ + shards?: (string[]|null); } /** Represents a MoveTablesCompleteRequest. */ @@ -56982,6 +60712,9 @@ export namespace vtctldata { /** MoveTablesCompleteRequest dry_run. */ public dry_run: boolean; + /** MoveTablesCompleteRequest shards. */ + public shards: string[]; + /** * Creates a new MoveTablesCompleteRequest instance using the specified properties. * @param [properties] Properties to set @@ -57368,6 +61101,9 @@ export namespace vtctldata { /** PlannedReparentShardRequest wait_replicas_timeout */ wait_replicas_timeout?: (vttime.IDuration|null); + + /** PlannedReparentShardRequest tolerable_replication_lag */ + tolerable_replication_lag?: (vttime.IDuration|null); } /** Represents a PlannedReparentShardRequest. */ @@ -57394,6 +61130,9 @@ export namespace vtctldata { /** PlannedReparentShardRequest wait_replicas_timeout. */ public wait_replicas_timeout?: (vttime.IDuration|null); + /** PlannedReparentShardRequest tolerable_replication_lag. */ + public tolerable_replication_lag?: (vttime.IDuration|null); + /** * Creates a new PlannedReparentShardRequest instance using the specified properties. * @param [properties] Properties to set @@ -60804,224 +64543,6 @@ export namespace vtctldata { public static getTypeUrl(typeUrlPrefix?: string): string; } - /** Properties of a SetKeyspaceServedFromRequest. */ - interface ISetKeyspaceServedFromRequest { - - /** SetKeyspaceServedFromRequest keyspace */ - keyspace?: (string|null); - - /** SetKeyspaceServedFromRequest tablet_type */ - tablet_type?: (topodata.TabletType|null); - - /** SetKeyspaceServedFromRequest cells */ - cells?: (string[]|null); - - /** SetKeyspaceServedFromRequest remove */ - remove?: (boolean|null); - - /** SetKeyspaceServedFromRequest source_keyspace */ - source_keyspace?: (string|null); - } - - /** Represents a SetKeyspaceServedFromRequest. */ - class SetKeyspaceServedFromRequest implements ISetKeyspaceServedFromRequest { - - /** - * Constructs a new SetKeyspaceServedFromRequest. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.ISetKeyspaceServedFromRequest); - - /** SetKeyspaceServedFromRequest keyspace. */ - public keyspace: string; - - /** SetKeyspaceServedFromRequest tablet_type. */ - public tablet_type: topodata.TabletType; - - /** SetKeyspaceServedFromRequest cells. */ - public cells: string[]; - - /** SetKeyspaceServedFromRequest remove. */ - public remove: boolean; - - /** SetKeyspaceServedFromRequest source_keyspace. */ - public source_keyspace: string; - - /** - * Creates a new SetKeyspaceServedFromRequest instance using the specified properties. - * @param [properties] Properties to set - * @returns SetKeyspaceServedFromRequest instance - */ - public static create(properties?: vtctldata.ISetKeyspaceServedFromRequest): vtctldata.SetKeyspaceServedFromRequest; - - /** - * Encodes the specified SetKeyspaceServedFromRequest message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. - * @param message SetKeyspaceServedFromRequest message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: vtctldata.ISetKeyspaceServedFromRequest, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Encodes the specified SetKeyspaceServedFromRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. - * @param message SetKeyspaceServedFromRequest message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: vtctldata.ISetKeyspaceServedFromRequest, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns SetKeyspaceServedFromRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceServedFromRequest; - - /** - * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns SetKeyspaceServedFromRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceServedFromRequest; - - /** - * Verifies a SetKeyspaceServedFromRequest message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); - - /** - * Creates a SetKeyspaceServedFromRequest message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns SetKeyspaceServedFromRequest - */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceServedFromRequest; - - /** - * Creates a plain object from a SetKeyspaceServedFromRequest message. Also converts values to other types if specified. - * @param message SetKeyspaceServedFromRequest - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: vtctldata.SetKeyspaceServedFromRequest, options?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this SetKeyspaceServedFromRequest to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; - - /** - * Gets the default type url for SetKeyspaceServedFromRequest - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } - - /** Properties of a SetKeyspaceServedFromResponse. */ - interface ISetKeyspaceServedFromResponse { - - /** SetKeyspaceServedFromResponse keyspace */ - keyspace?: (topodata.IKeyspace|null); - } - - /** Represents a SetKeyspaceServedFromResponse. */ - class SetKeyspaceServedFromResponse implements ISetKeyspaceServedFromResponse { - - /** - * Constructs a new SetKeyspaceServedFromResponse. - * @param [properties] Properties to set - */ - constructor(properties?: vtctldata.ISetKeyspaceServedFromResponse); - - /** SetKeyspaceServedFromResponse keyspace. */ - public keyspace?: (topodata.IKeyspace|null); - - /** - * Creates a new SetKeyspaceServedFromResponse instance using the specified properties. - * @param [properties] Properties to set - * @returns SetKeyspaceServedFromResponse instance - */ - public static create(properties?: vtctldata.ISetKeyspaceServedFromResponse): vtctldata.SetKeyspaceServedFromResponse; - - /** - * Encodes the specified SetKeyspaceServedFromResponse message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. - * @param message SetKeyspaceServedFromResponse message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encode(message: vtctldata.ISetKeyspaceServedFromResponse, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Encodes the specified SetKeyspaceServedFromResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. - * @param message SetKeyspaceServedFromResponse message or plain object to encode - * @param [writer] Writer to encode to - * @returns Writer - */ - public static encodeDelimited(message: vtctldata.ISetKeyspaceServedFromResponse, writer?: $protobuf.Writer): $protobuf.Writer; - - /** - * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer. - * @param reader Reader or buffer to decode from - * @param [length] Message length if known beforehand - * @returns SetKeyspaceServedFromResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decode(reader: ($protobuf.Reader|Uint8Array), length?: number): vtctldata.SetKeyspaceServedFromResponse; - - /** - * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer, length delimited. - * @param reader Reader or buffer to decode from - * @returns SetKeyspaceServedFromResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - public static decodeDelimited(reader: ($protobuf.Reader|Uint8Array)): vtctldata.SetKeyspaceServedFromResponse; - - /** - * Verifies a SetKeyspaceServedFromResponse message. - * @param message Plain object to verify - * @returns `null` if valid, otherwise the reason why it is not - */ - public static verify(message: { [k: string]: any }): (string|null); - - /** - * Creates a SetKeyspaceServedFromResponse message from a plain object. Also converts values to their respective internal types. - * @param object Plain object - * @returns SetKeyspaceServedFromResponse - */ - public static fromObject(object: { [k: string]: any }): vtctldata.SetKeyspaceServedFromResponse; - - /** - * Creates a plain object from a SetKeyspaceServedFromResponse message. Also converts values to other types if specified. - * @param message SetKeyspaceServedFromResponse - * @param [options] Conversion options - * @returns Plain object - */ - public static toObject(message: vtctldata.SetKeyspaceServedFromResponse, options?: $protobuf.IConversionOptions): { [k: string]: any }; - - /** - * Converts this SetKeyspaceServedFromResponse to JSON. - * @returns JSON object - */ - public toJSON(): { [k: string]: any }; - - /** - * Gets the default type url for SetKeyspaceServedFromResponse - * @param [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns The default type url - */ - public static getTypeUrl(typeUrlPrefix?: string): string; - } - /** Properties of a SetKeyspaceShardingInfoRequest. */ interface ISetKeyspaceShardingInfoRequest { @@ -65804,6 +69325,12 @@ export namespace vtctldata { /** VDiffCreateRequest verbose */ verbose?: (boolean|null); + + /** VDiffCreateRequest max_report_sample_rows */ + max_report_sample_rows?: (number|Long|null); + + /** VDiffCreateRequest max_diff_duration */ + max_diff_duration?: (vttime.IDuration|null); } /** Represents a VDiffCreateRequest. */ @@ -65869,6 +69396,12 @@ export namespace vtctldata { /** VDiffCreateRequest verbose. */ public verbose: boolean; + /** VDiffCreateRequest max_report_sample_rows. */ + public max_report_sample_rows: (number|Long); + + /** VDiffCreateRequest max_diff_duration. */ + public max_diff_duration?: (vttime.IDuration|null); + /** * Creates a new VDiffCreateRequest instance using the specified properties. * @param [properties] Properties to set @@ -66864,6 +70397,9 @@ export namespace vtctldata { /** WorkflowDeleteRequest keep_routing_rules */ keep_routing_rules?: (boolean|null); + + /** WorkflowDeleteRequest shards */ + shards?: (string[]|null); } /** Represents a WorkflowDeleteRequest. */ @@ -66887,6 +70423,9 @@ export namespace vtctldata { /** WorkflowDeleteRequest keep_routing_rules. */ public keep_routing_rules: boolean; + /** WorkflowDeleteRequest shards. */ + public shards: string[]; + /** * Creates a new WorkflowDeleteRequest instance using the specified properties. * @param [properties] Properties to set @@ -67182,6 +70721,9 @@ export namespace vtctldata { /** WorkflowStatusRequest workflow */ workflow?: (string|null); + + /** WorkflowStatusRequest shards */ + shards?: (string[]|null); } /** Represents a WorkflowStatusRequest. */ @@ -67199,6 +70741,9 @@ export namespace vtctldata { /** WorkflowStatusRequest workflow. */ public workflow: string; + /** WorkflowStatusRequest shards. */ + public shards: string[]; + /** * Creates a new WorkflowStatusRequest instance using the specified properties. * @param [properties] Properties to set @@ -67772,6 +71317,9 @@ export namespace vtctldata { /** WorkflowSwitchTrafficRequest initialize_target_sequences */ initialize_target_sequences?: (boolean|null); + + /** WorkflowSwitchTrafficRequest shards */ + shards?: (string[]|null); } /** Represents a WorkflowSwitchTrafficRequest. */ @@ -67813,6 +71361,9 @@ export namespace vtctldata { /** WorkflowSwitchTrafficRequest initialize_target_sequences. */ public initialize_target_sequences: boolean; + /** WorkflowSwitchTrafficRequest shards. */ + public shards: string[]; + /** * Creates a new WorkflowSwitchTrafficRequest instance using the specified properties. * @param [properties] Properties to set diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index 9ddce1d0059..f827b6ee594 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -48,6 +48,138 @@ export const vtadmin = $root.vtadmin = (() => { return new this(rpcImpl, requestDelimited, responseDelimited); }; + /** + * Callback as used by {@link vtadmin.VTAdmin#applySchema}. + * @memberof vtadmin.VTAdmin + * @typedef ApplySchemaCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {vtctldata.ApplySchemaResponse} [response] ApplySchemaResponse + */ + + /** + * Calls ApplySchema. + * @function applySchema + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.IApplySchemaRequest} request ApplySchemaRequest message or plain object + * @param {vtadmin.VTAdmin.ApplySchemaCallback} callback Node-style callback called with the error, if any, and ApplySchemaResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(VTAdmin.prototype.applySchema = function applySchema(request, callback) { + return this.rpcCall(applySchema, $root.vtadmin.ApplySchemaRequest, $root.vtctldata.ApplySchemaResponse, request, callback); + }, "name", { value: "ApplySchema" }); + + /** + * Calls ApplySchema. + * @function applySchema + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.IApplySchemaRequest} request ApplySchemaRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + /** + * Callback as used by {@link vtadmin.VTAdmin#cancelSchemaMigration}. + * @memberof vtadmin.VTAdmin + * @typedef CancelSchemaMigrationCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {vtctldata.CancelSchemaMigrationResponse} [response] CancelSchemaMigrationResponse + */ + + /** + * Calls CancelSchemaMigration. + * @function cancelSchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.ICancelSchemaMigrationRequest} request CancelSchemaMigrationRequest message or plain object + * @param {vtadmin.VTAdmin.CancelSchemaMigrationCallback} callback Node-style callback called with the error, if any, and CancelSchemaMigrationResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(VTAdmin.prototype.cancelSchemaMigration = function cancelSchemaMigration(request, callback) { + return this.rpcCall(cancelSchemaMigration, $root.vtadmin.CancelSchemaMigrationRequest, $root.vtctldata.CancelSchemaMigrationResponse, request, callback); + }, "name", { value: "CancelSchemaMigration" }); + + /** + * Calls CancelSchemaMigration. + * @function cancelSchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.ICancelSchemaMigrationRequest} request CancelSchemaMigrationRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + /** + * Callback as used by {@link vtadmin.VTAdmin#cleanupSchemaMigration}. + * @memberof vtadmin.VTAdmin + * @typedef CleanupSchemaMigrationCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {vtctldata.CleanupSchemaMigrationResponse} [response] CleanupSchemaMigrationResponse + */ + + /** + * Calls CleanupSchemaMigration. + * @function cleanupSchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.ICleanupSchemaMigrationRequest} request CleanupSchemaMigrationRequest message or plain object + * @param {vtadmin.VTAdmin.CleanupSchemaMigrationCallback} callback Node-style callback called with the error, if any, and CleanupSchemaMigrationResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(VTAdmin.prototype.cleanupSchemaMigration = function cleanupSchemaMigration(request, callback) { + return this.rpcCall(cleanupSchemaMigration, $root.vtadmin.CleanupSchemaMigrationRequest, $root.vtctldata.CleanupSchemaMigrationResponse, request, callback); + }, "name", { value: "CleanupSchemaMigration" }); + + /** + * Calls CleanupSchemaMigration. + * @function cleanupSchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.ICleanupSchemaMigrationRequest} request CleanupSchemaMigrationRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + + /** + * Callback as used by {@link vtadmin.VTAdmin#completeSchemaMigration}. + * @memberof vtadmin.VTAdmin + * @typedef CompleteSchemaMigrationCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {vtctldata.CompleteSchemaMigrationResponse} [response] CompleteSchemaMigrationResponse + */ + + /** + * Calls CompleteSchemaMigration. + * @function completeSchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.ICompleteSchemaMigrationRequest} request CompleteSchemaMigrationRequest message or plain object + * @param {vtadmin.VTAdmin.CompleteSchemaMigrationCallback} callback Node-style callback called with the error, if any, and CompleteSchemaMigrationResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(VTAdmin.prototype.completeSchemaMigration = function completeSchemaMigration(request, callback) { + return this.rpcCall(completeSchemaMigration, $root.vtadmin.CompleteSchemaMigrationRequest, $root.vtctldata.CompleteSchemaMigrationResponse, request, callback); + }, "name", { value: "CompleteSchemaMigration" }); + + /** + * Calls CompleteSchemaMigration. + * @function completeSchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.ICompleteSchemaMigrationRequest} request CompleteSchemaMigrationRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + /** * Callback as used by {@link vtadmin.VTAdmin#createKeyspace}. * @memberof vtadmin.VTAdmin @@ -609,6 +741,39 @@ export const vtadmin = $root.vtadmin = (() => { * @variation 2 */ + /** + * Callback as used by {@link vtadmin.VTAdmin#getSchemaMigrations}. + * @memberof vtadmin.VTAdmin + * @typedef GetSchemaMigrationsCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {vtadmin.GetSchemaMigrationsResponse} [response] GetSchemaMigrationsResponse + */ + + /** + * Calls GetSchemaMigrations. + * @function getSchemaMigrations + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.IGetSchemaMigrationsRequest} request GetSchemaMigrationsRequest message or plain object + * @param {vtadmin.VTAdmin.GetSchemaMigrationsCallback} callback Node-style callback called with the error, if any, and GetSchemaMigrationsResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(VTAdmin.prototype.getSchemaMigrations = function getSchemaMigrations(request, callback) { + return this.rpcCall(getSchemaMigrations, $root.vtadmin.GetSchemaMigrationsRequest, $root.vtadmin.GetSchemaMigrationsResponse, request, callback); + }, "name", { value: "GetSchemaMigrations" }); + + /** + * Calls GetSchemaMigrations. + * @function getSchemaMigrations + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.IGetSchemaMigrationsRequest} request GetSchemaMigrationsRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + /** * Callback as used by {@link vtadmin.VTAdmin#getShardReplicationPositions}. * @memberof vtadmin.VTAdmin @@ -1038,6 +1203,39 @@ export const vtadmin = $root.vtadmin = (() => { * @variation 2 */ + /** + * Callback as used by {@link vtadmin.VTAdmin#launchSchemaMigration}. + * @memberof vtadmin.VTAdmin + * @typedef LaunchSchemaMigrationCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {vtctldata.LaunchSchemaMigrationResponse} [response] LaunchSchemaMigrationResponse + */ + + /** + * Calls LaunchSchemaMigration. + * @function launchSchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.ILaunchSchemaMigrationRequest} request LaunchSchemaMigrationRequest message or plain object + * @param {vtadmin.VTAdmin.LaunchSchemaMigrationCallback} callback Node-style callback called with the error, if any, and LaunchSchemaMigrationResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(VTAdmin.prototype.launchSchemaMigration = function launchSchemaMigration(request, callback) { + return this.rpcCall(launchSchemaMigration, $root.vtadmin.LaunchSchemaMigrationRequest, $root.vtctldata.LaunchSchemaMigrationResponse, request, callback); + }, "name", { value: "LaunchSchemaMigration" }); + + /** + * Calls LaunchSchemaMigration. + * @function launchSchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.ILaunchSchemaMigrationRequest} request LaunchSchemaMigrationRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + /** * Callback as used by {@link vtadmin.VTAdmin#pingTablet}. * @memberof vtadmin.VTAdmin @@ -1302,6 +1500,39 @@ export const vtadmin = $root.vtadmin = (() => { * @variation 2 */ + /** + * Callback as used by {@link vtadmin.VTAdmin#retrySchemaMigration}. + * @memberof vtadmin.VTAdmin + * @typedef RetrySchemaMigrationCallback + * @type {function} + * @param {Error|null} error Error, if any + * @param {vtctldata.RetrySchemaMigrationResponse} [response] RetrySchemaMigrationResponse + */ + + /** + * Calls RetrySchemaMigration. + * @function retrySchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.IRetrySchemaMigrationRequest} request RetrySchemaMigrationRequest message or plain object + * @param {vtadmin.VTAdmin.RetrySchemaMigrationCallback} callback Node-style callback called with the error, if any, and RetrySchemaMigrationResponse + * @returns {undefined} + * @variation 1 + */ + Object.defineProperty(VTAdmin.prototype.retrySchemaMigration = function retrySchemaMigration(request, callback) { + return this.rpcCall(retrySchemaMigration, $root.vtadmin.RetrySchemaMigrationRequest, $root.vtctldata.RetrySchemaMigrationResponse, request, callback); + }, "name", { value: "RetrySchemaMigration" }); + + /** + * Calls RetrySchemaMigration. + * @function retrySchemaMigration + * @memberof vtadmin.VTAdmin + * @instance + * @param {vtadmin.IRetrySchemaMigrationRequest} request RetrySchemaMigrationRequest message or plain object + * @returns {Promise} Promise + * @variation 2 + */ + /** * Callback as used by {@link vtadmin.VTAdmin#runHealthCheck}. * @memberof vtadmin.VTAdmin @@ -4501,6 +4732,243 @@ export const vtadmin = $root.vtadmin = (() => { return Schema; })(); + vtadmin.SchemaMigration = (function() { + + /** + * Properties of a SchemaMigration. + * @memberof vtadmin + * @interface ISchemaMigration + * @property {vtadmin.ICluster|null} [cluster] SchemaMigration cluster + * @property {vtctldata.ISchemaMigration|null} [schema_migration] SchemaMigration schema_migration + */ + + /** + * Constructs a new SchemaMigration. + * @memberof vtadmin + * @classdesc Represents a SchemaMigration. + * @implements ISchemaMigration + * @constructor + * @param {vtadmin.ISchemaMigration=} [properties] Properties to set + */ + function SchemaMigration(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * SchemaMigration cluster. + * @member {vtadmin.ICluster|null|undefined} cluster + * @memberof vtadmin.SchemaMigration + * @instance + */ + SchemaMigration.prototype.cluster = null; + + /** + * SchemaMigration schema_migration. + * @member {vtctldata.ISchemaMigration|null|undefined} schema_migration + * @memberof vtadmin.SchemaMigration + * @instance + */ + SchemaMigration.prototype.schema_migration = null; + + /** + * Creates a new SchemaMigration instance using the specified properties. + * @function create + * @memberof vtadmin.SchemaMigration + * @static + * @param {vtadmin.ISchemaMigration=} [properties] Properties to set + * @returns {vtadmin.SchemaMigration} SchemaMigration instance + */ + SchemaMigration.create = function create(properties) { + return new SchemaMigration(properties); + }; + + /** + * Encodes the specified SchemaMigration message. Does not implicitly {@link vtadmin.SchemaMigration.verify|verify} messages. + * @function encode + * @memberof vtadmin.SchemaMigration + * @static + * @param {vtadmin.ISchemaMigration} message SchemaMigration message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SchemaMigration.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster != null && Object.hasOwnProperty.call(message, "cluster")) + $root.vtadmin.Cluster.encode(message.cluster, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.schema_migration != null && Object.hasOwnProperty.call(message, "schema_migration")) + $root.vtctldata.SchemaMigration.encode(message.schema_migration, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified SchemaMigration message, length delimited. Does not implicitly {@link vtadmin.SchemaMigration.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.SchemaMigration + * @static + * @param {vtadmin.ISchemaMigration} message SchemaMigration message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + SchemaMigration.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a SchemaMigration message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.SchemaMigration + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.SchemaMigration} SchemaMigration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SchemaMigration.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.SchemaMigration(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cluster = $root.vtadmin.Cluster.decode(reader, reader.uint32()); + break; + } + case 2: { + message.schema_migration = $root.vtctldata.SchemaMigration.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a SchemaMigration message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.SchemaMigration + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.SchemaMigration} SchemaMigration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + SchemaMigration.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a SchemaMigration message. + * @function verify + * @memberof vtadmin.SchemaMigration + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + SchemaMigration.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster != null && message.hasOwnProperty("cluster")) { + let error = $root.vtadmin.Cluster.verify(message.cluster); + if (error) + return "cluster." + error; + } + if (message.schema_migration != null && message.hasOwnProperty("schema_migration")) { + let error = $root.vtctldata.SchemaMigration.verify(message.schema_migration); + if (error) + return "schema_migration." + error; + } + return null; + }; + + /** + * Creates a SchemaMigration message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.SchemaMigration + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.SchemaMigration} SchemaMigration + */ + SchemaMigration.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.SchemaMigration) + return object; + let message = new $root.vtadmin.SchemaMigration(); + if (object.cluster != null) { + if (typeof object.cluster !== "object") + throw TypeError(".vtadmin.SchemaMigration.cluster: object expected"); + message.cluster = $root.vtadmin.Cluster.fromObject(object.cluster); + } + if (object.schema_migration != null) { + if (typeof object.schema_migration !== "object") + throw TypeError(".vtadmin.SchemaMigration.schema_migration: object expected"); + message.schema_migration = $root.vtctldata.SchemaMigration.fromObject(object.schema_migration); + } + return message; + }; + + /** + * Creates a plain object from a SchemaMigration message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.SchemaMigration + * @static + * @param {vtadmin.SchemaMigration} message SchemaMigration + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + SchemaMigration.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.cluster = null; + object.schema_migration = null; + } + if (message.cluster != null && message.hasOwnProperty("cluster")) + object.cluster = $root.vtadmin.Cluster.toObject(message.cluster, options); + if (message.schema_migration != null && message.hasOwnProperty("schema_migration")) + object.schema_migration = $root.vtctldata.SchemaMigration.toObject(message.schema_migration, options); + return object; + }; + + /** + * Converts this SchemaMigration to JSON. + * @function toJSON + * @memberof vtadmin.SchemaMigration + * @instance + * @returns {Object.} JSON object + */ + SchemaMigration.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for SchemaMigration + * @function getTypeUrl + * @memberof vtadmin.SchemaMigration + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + SchemaMigration.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.SchemaMigration"; + }; + + return SchemaMigration; + })(); + vtadmin.Shard = (function() { /** @@ -6437,6 +6905,934 @@ export const vtadmin = $root.vtadmin = (() => { return Workflow; })(); + vtadmin.ApplySchemaRequest = (function() { + + /** + * Properties of an ApplySchemaRequest. + * @memberof vtadmin + * @interface IApplySchemaRequest + * @property {string|null} [cluster_id] ApplySchemaRequest cluster_id + * @property {vtctldata.IApplySchemaRequest|null} [request] ApplySchemaRequest request + */ + + /** + * Constructs a new ApplySchemaRequest. + * @memberof vtadmin + * @classdesc Represents an ApplySchemaRequest. + * @implements IApplySchemaRequest + * @constructor + * @param {vtadmin.IApplySchemaRequest=} [properties] Properties to set + */ + function ApplySchemaRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ApplySchemaRequest cluster_id. + * @member {string} cluster_id + * @memberof vtadmin.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.cluster_id = ""; + + /** + * ApplySchemaRequest request. + * @member {vtctldata.IApplySchemaRequest|null|undefined} request + * @memberof vtadmin.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.request = null; + + /** + * Creates a new ApplySchemaRequest instance using the specified properties. + * @function create + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {vtadmin.IApplySchemaRequest=} [properties] Properties to set + * @returns {vtadmin.ApplySchemaRequest} ApplySchemaRequest instance + */ + ApplySchemaRequest.create = function create(properties) { + return new ApplySchemaRequest(properties); + }; + + /** + * Encodes the specified ApplySchemaRequest message. Does not implicitly {@link vtadmin.ApplySchemaRequest.verify|verify} messages. + * @function encode + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {vtadmin.IApplySchemaRequest} message ApplySchemaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplySchemaRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id); + if (message.request != null && Object.hasOwnProperty.call(message, "request")) + $root.vtctldata.ApplySchemaRequest.encode(message.request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ApplySchemaRequest message, length delimited. Does not implicitly {@link vtadmin.ApplySchemaRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {vtadmin.IApplySchemaRequest} message ApplySchemaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplySchemaRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplySchemaRequest message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.ApplySchemaRequest} ApplySchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplySchemaRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.ApplySchemaRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cluster_id = reader.string(); + break; + } + case 2: { + message.request = $root.vtctldata.ApplySchemaRequest.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplySchemaRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.ApplySchemaRequest} ApplySchemaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplySchemaRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplySchemaRequest message. + * @function verify + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplySchemaRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + if (!$util.isString(message.cluster_id)) + return "cluster_id: string expected"; + if (message.request != null && message.hasOwnProperty("request")) { + let error = $root.vtctldata.ApplySchemaRequest.verify(message.request); + if (error) + return "request." + error; + } + return null; + }; + + /** + * Creates an ApplySchemaRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.ApplySchemaRequest} ApplySchemaRequest + */ + ApplySchemaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.ApplySchemaRequest) + return object; + let message = new $root.vtadmin.ApplySchemaRequest(); + if (object.cluster_id != null) + message.cluster_id = String(object.cluster_id); + if (object.request != null) { + if (typeof object.request !== "object") + throw TypeError(".vtadmin.ApplySchemaRequest.request: object expected"); + message.request = $root.vtctldata.ApplySchemaRequest.fromObject(object.request); + } + return message; + }; + + /** + * Creates a plain object from an ApplySchemaRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {vtadmin.ApplySchemaRequest} message ApplySchemaRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplySchemaRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.cluster_id = ""; + object.request = null; + } + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + object.cluster_id = message.cluster_id; + if (message.request != null && message.hasOwnProperty("request")) + object.request = $root.vtctldata.ApplySchemaRequest.toObject(message.request, options); + return object; + }; + + /** + * Converts this ApplySchemaRequest to JSON. + * @function toJSON + * @memberof vtadmin.ApplySchemaRequest + * @instance + * @returns {Object.} JSON object + */ + ApplySchemaRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplySchemaRequest + * @function getTypeUrl + * @memberof vtadmin.ApplySchemaRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplySchemaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.ApplySchemaRequest"; + }; + + return ApplySchemaRequest; + })(); + + vtadmin.CancelSchemaMigrationRequest = (function() { + + /** + * Properties of a CancelSchemaMigrationRequest. + * @memberof vtadmin + * @interface ICancelSchemaMigrationRequest + * @property {string|null} [cluster_id] CancelSchemaMigrationRequest cluster_id + * @property {vtctldata.ICancelSchemaMigrationRequest|null} [request] CancelSchemaMigrationRequest request + */ + + /** + * Constructs a new CancelSchemaMigrationRequest. + * @memberof vtadmin + * @classdesc Represents a CancelSchemaMigrationRequest. + * @implements ICancelSchemaMigrationRequest + * @constructor + * @param {vtadmin.ICancelSchemaMigrationRequest=} [properties] Properties to set + */ + function CancelSchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CancelSchemaMigrationRequest cluster_id. + * @member {string} cluster_id + * @memberof vtadmin.CancelSchemaMigrationRequest + * @instance + */ + CancelSchemaMigrationRequest.prototype.cluster_id = ""; + + /** + * CancelSchemaMigrationRequest request. + * @member {vtctldata.ICancelSchemaMigrationRequest|null|undefined} request + * @memberof vtadmin.CancelSchemaMigrationRequest + * @instance + */ + CancelSchemaMigrationRequest.prototype.request = null; + + /** + * Creates a new CancelSchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {vtadmin.ICancelSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtadmin.CancelSchemaMigrationRequest} CancelSchemaMigrationRequest instance + */ + CancelSchemaMigrationRequest.create = function create(properties) { + return new CancelSchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified CancelSchemaMigrationRequest message. Does not implicitly {@link vtadmin.CancelSchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {vtadmin.ICancelSchemaMigrationRequest} message CancelSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CancelSchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id); + if (message.request != null && Object.hasOwnProperty.call(message, "request")) + $root.vtctldata.CancelSchemaMigrationRequest.encode(message.request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified CancelSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.CancelSchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {vtadmin.ICancelSchemaMigrationRequest} message CancelSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CancelSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CancelSchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.CancelSchemaMigrationRequest} CancelSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CancelSchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.CancelSchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cluster_id = reader.string(); + break; + } + case 2: { + message.request = $root.vtctldata.CancelSchemaMigrationRequest.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CancelSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.CancelSchemaMigrationRequest} CancelSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CancelSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CancelSchemaMigrationRequest message. + * @function verify + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CancelSchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + if (!$util.isString(message.cluster_id)) + return "cluster_id: string expected"; + if (message.request != null && message.hasOwnProperty("request")) { + let error = $root.vtctldata.CancelSchemaMigrationRequest.verify(message.request); + if (error) + return "request." + error; + } + return null; + }; + + /** + * Creates a CancelSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.CancelSchemaMigrationRequest} CancelSchemaMigrationRequest + */ + CancelSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.CancelSchemaMigrationRequest) + return object; + let message = new $root.vtadmin.CancelSchemaMigrationRequest(); + if (object.cluster_id != null) + message.cluster_id = String(object.cluster_id); + if (object.request != null) { + if (typeof object.request !== "object") + throw TypeError(".vtadmin.CancelSchemaMigrationRequest.request: object expected"); + message.request = $root.vtctldata.CancelSchemaMigrationRequest.fromObject(object.request); + } + return message; + }; + + /** + * Creates a plain object from a CancelSchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {vtadmin.CancelSchemaMigrationRequest} message CancelSchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CancelSchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.cluster_id = ""; + object.request = null; + } + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + object.cluster_id = message.cluster_id; + if (message.request != null && message.hasOwnProperty("request")) + object.request = $root.vtctldata.CancelSchemaMigrationRequest.toObject(message.request, options); + return object; + }; + + /** + * Converts this CancelSchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtadmin.CancelSchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + CancelSchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CancelSchemaMigrationRequest + * @function getTypeUrl + * @memberof vtadmin.CancelSchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CancelSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.CancelSchemaMigrationRequest"; + }; + + return CancelSchemaMigrationRequest; + })(); + + vtadmin.CleanupSchemaMigrationRequest = (function() { + + /** + * Properties of a CleanupSchemaMigrationRequest. + * @memberof vtadmin + * @interface ICleanupSchemaMigrationRequest + * @property {string|null} [cluster_id] CleanupSchemaMigrationRequest cluster_id + * @property {vtctldata.ICleanupSchemaMigrationRequest|null} [request] CleanupSchemaMigrationRequest request + */ + + /** + * Constructs a new CleanupSchemaMigrationRequest. + * @memberof vtadmin + * @classdesc Represents a CleanupSchemaMigrationRequest. + * @implements ICleanupSchemaMigrationRequest + * @constructor + * @param {vtadmin.ICleanupSchemaMigrationRequest=} [properties] Properties to set + */ + function CleanupSchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CleanupSchemaMigrationRequest cluster_id. + * @member {string} cluster_id + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @instance + */ + CleanupSchemaMigrationRequest.prototype.cluster_id = ""; + + /** + * CleanupSchemaMigrationRequest request. + * @member {vtctldata.ICleanupSchemaMigrationRequest|null|undefined} request + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @instance + */ + CleanupSchemaMigrationRequest.prototype.request = null; + + /** + * Creates a new CleanupSchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {vtadmin.ICleanupSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtadmin.CleanupSchemaMigrationRequest} CleanupSchemaMigrationRequest instance + */ + CleanupSchemaMigrationRequest.create = function create(properties) { + return new CleanupSchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified CleanupSchemaMigrationRequest message. Does not implicitly {@link vtadmin.CleanupSchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {vtadmin.ICleanupSchemaMigrationRequest} message CleanupSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CleanupSchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id); + if (message.request != null && Object.hasOwnProperty.call(message, "request")) + $root.vtctldata.CleanupSchemaMigrationRequest.encode(message.request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified CleanupSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.CleanupSchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {vtadmin.ICleanupSchemaMigrationRequest} message CleanupSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CleanupSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CleanupSchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.CleanupSchemaMigrationRequest} CleanupSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CleanupSchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.CleanupSchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cluster_id = reader.string(); + break; + } + case 2: { + message.request = $root.vtctldata.CleanupSchemaMigrationRequest.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CleanupSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.CleanupSchemaMigrationRequest} CleanupSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CleanupSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CleanupSchemaMigrationRequest message. + * @function verify + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CleanupSchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + if (!$util.isString(message.cluster_id)) + return "cluster_id: string expected"; + if (message.request != null && message.hasOwnProperty("request")) { + let error = $root.vtctldata.CleanupSchemaMigrationRequest.verify(message.request); + if (error) + return "request." + error; + } + return null; + }; + + /** + * Creates a CleanupSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.CleanupSchemaMigrationRequest} CleanupSchemaMigrationRequest + */ + CleanupSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.CleanupSchemaMigrationRequest) + return object; + let message = new $root.vtadmin.CleanupSchemaMigrationRequest(); + if (object.cluster_id != null) + message.cluster_id = String(object.cluster_id); + if (object.request != null) { + if (typeof object.request !== "object") + throw TypeError(".vtadmin.CleanupSchemaMigrationRequest.request: object expected"); + message.request = $root.vtctldata.CleanupSchemaMigrationRequest.fromObject(object.request); + } + return message; + }; + + /** + * Creates a plain object from a CleanupSchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {vtadmin.CleanupSchemaMigrationRequest} message CleanupSchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CleanupSchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.cluster_id = ""; + object.request = null; + } + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + object.cluster_id = message.cluster_id; + if (message.request != null && message.hasOwnProperty("request")) + object.request = $root.vtctldata.CleanupSchemaMigrationRequest.toObject(message.request, options); + return object; + }; + + /** + * Converts this CleanupSchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + CleanupSchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CleanupSchemaMigrationRequest + * @function getTypeUrl + * @memberof vtadmin.CleanupSchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CleanupSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.CleanupSchemaMigrationRequest"; + }; + + return CleanupSchemaMigrationRequest; + })(); + + vtadmin.CompleteSchemaMigrationRequest = (function() { + + /** + * Properties of a CompleteSchemaMigrationRequest. + * @memberof vtadmin + * @interface ICompleteSchemaMigrationRequest + * @property {string|null} [cluster_id] CompleteSchemaMigrationRequest cluster_id + * @property {vtctldata.ICompleteSchemaMigrationRequest|null} [request] CompleteSchemaMigrationRequest request + */ + + /** + * Constructs a new CompleteSchemaMigrationRequest. + * @memberof vtadmin + * @classdesc Represents a CompleteSchemaMigrationRequest. + * @implements ICompleteSchemaMigrationRequest + * @constructor + * @param {vtadmin.ICompleteSchemaMigrationRequest=} [properties] Properties to set + */ + function CompleteSchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * CompleteSchemaMigrationRequest cluster_id. + * @member {string} cluster_id + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @instance + */ + CompleteSchemaMigrationRequest.prototype.cluster_id = ""; + + /** + * CompleteSchemaMigrationRequest request. + * @member {vtctldata.ICompleteSchemaMigrationRequest|null|undefined} request + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @instance + */ + CompleteSchemaMigrationRequest.prototype.request = null; + + /** + * Creates a new CompleteSchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {vtadmin.ICompleteSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtadmin.CompleteSchemaMigrationRequest} CompleteSchemaMigrationRequest instance + */ + CompleteSchemaMigrationRequest.create = function create(properties) { + return new CompleteSchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified CompleteSchemaMigrationRequest message. Does not implicitly {@link vtadmin.CompleteSchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {vtadmin.ICompleteSchemaMigrationRequest} message CompleteSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CompleteSchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id); + if (message.request != null && Object.hasOwnProperty.call(message, "request")) + $root.vtctldata.CompleteSchemaMigrationRequest.encode(message.request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified CompleteSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.CompleteSchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {vtadmin.ICompleteSchemaMigrationRequest} message CompleteSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + CompleteSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a CompleteSchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.CompleteSchemaMigrationRequest} CompleteSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CompleteSchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.CompleteSchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cluster_id = reader.string(); + break; + } + case 2: { + message.request = $root.vtctldata.CompleteSchemaMigrationRequest.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a CompleteSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.CompleteSchemaMigrationRequest} CompleteSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + CompleteSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a CompleteSchemaMigrationRequest message. + * @function verify + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + CompleteSchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + if (!$util.isString(message.cluster_id)) + return "cluster_id: string expected"; + if (message.request != null && message.hasOwnProperty("request")) { + let error = $root.vtctldata.CompleteSchemaMigrationRequest.verify(message.request); + if (error) + return "request." + error; + } + return null; + }; + + /** + * Creates a CompleteSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.CompleteSchemaMigrationRequest} CompleteSchemaMigrationRequest + */ + CompleteSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.CompleteSchemaMigrationRequest) + return object; + let message = new $root.vtadmin.CompleteSchemaMigrationRequest(); + if (object.cluster_id != null) + message.cluster_id = String(object.cluster_id); + if (object.request != null) { + if (typeof object.request !== "object") + throw TypeError(".vtadmin.CompleteSchemaMigrationRequest.request: object expected"); + message.request = $root.vtctldata.CompleteSchemaMigrationRequest.fromObject(object.request); + } + return message; + }; + + /** + * Creates a plain object from a CompleteSchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {vtadmin.CompleteSchemaMigrationRequest} message CompleteSchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + CompleteSchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.cluster_id = ""; + object.request = null; + } + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + object.cluster_id = message.cluster_id; + if (message.request != null && message.hasOwnProperty("request")) + object.request = $root.vtctldata.CompleteSchemaMigrationRequest.toObject(message.request, options); + return object; + }; + + /** + * Converts this CompleteSchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + CompleteSchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for CompleteSchemaMigrationRequest + * @function getTypeUrl + * @memberof vtadmin.CompleteSchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + CompleteSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.CompleteSchemaMigrationRequest"; + }; + + return CompleteSchemaMigrationRequest; + })(); + vtadmin.CreateKeyspaceRequest = (function() { /** @@ -12904,6 +14300,686 @@ export const vtadmin = $root.vtadmin = (() => { return GetSchemasResponse; })(); + vtadmin.GetSchemaMigrationsRequest = (function() { + + /** + * Properties of a GetSchemaMigrationsRequest. + * @memberof vtadmin + * @interface IGetSchemaMigrationsRequest + * @property {Array.|null} [cluster_requests] GetSchemaMigrationsRequest cluster_requests + */ + + /** + * Constructs a new GetSchemaMigrationsRequest. + * @memberof vtadmin + * @classdesc Represents a GetSchemaMigrationsRequest. + * @implements IGetSchemaMigrationsRequest + * @constructor + * @param {vtadmin.IGetSchemaMigrationsRequest=} [properties] Properties to set + */ + function GetSchemaMigrationsRequest(properties) { + this.cluster_requests = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GetSchemaMigrationsRequest cluster_requests. + * @member {Array.} cluster_requests + * @memberof vtadmin.GetSchemaMigrationsRequest + * @instance + */ + GetSchemaMigrationsRequest.prototype.cluster_requests = $util.emptyArray; + + /** + * Creates a new GetSchemaMigrationsRequest instance using the specified properties. + * @function create + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {vtadmin.IGetSchemaMigrationsRequest=} [properties] Properties to set + * @returns {vtadmin.GetSchemaMigrationsRequest} GetSchemaMigrationsRequest instance + */ + GetSchemaMigrationsRequest.create = function create(properties) { + return new GetSchemaMigrationsRequest(properties); + }; + + /** + * Encodes the specified GetSchemaMigrationsRequest message. Does not implicitly {@link vtadmin.GetSchemaMigrationsRequest.verify|verify} messages. + * @function encode + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {vtadmin.IGetSchemaMigrationsRequest} message GetSchemaMigrationsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetSchemaMigrationsRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster_requests != null && message.cluster_requests.length) + for (let i = 0; i < message.cluster_requests.length; ++i) + $root.vtadmin.GetSchemaMigrationsRequest.ClusterRequest.encode(message.cluster_requests[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified GetSchemaMigrationsRequest message, length delimited. Does not implicitly {@link vtadmin.GetSchemaMigrationsRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {vtadmin.IGetSchemaMigrationsRequest} message GetSchemaMigrationsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetSchemaMigrationsRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GetSchemaMigrationsRequest message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.GetSchemaMigrationsRequest} GetSchemaMigrationsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetSchemaMigrationsRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.GetSchemaMigrationsRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.cluster_requests && message.cluster_requests.length)) + message.cluster_requests = []; + message.cluster_requests.push($root.vtadmin.GetSchemaMigrationsRequest.ClusterRequest.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GetSchemaMigrationsRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.GetSchemaMigrationsRequest} GetSchemaMigrationsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetSchemaMigrationsRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GetSchemaMigrationsRequest message. + * @function verify + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetSchemaMigrationsRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster_requests != null && message.hasOwnProperty("cluster_requests")) { + if (!Array.isArray(message.cluster_requests)) + return "cluster_requests: array expected"; + for (let i = 0; i < message.cluster_requests.length; ++i) { + let error = $root.vtadmin.GetSchemaMigrationsRequest.ClusterRequest.verify(message.cluster_requests[i]); + if (error) + return "cluster_requests." + error; + } + } + return null; + }; + + /** + * Creates a GetSchemaMigrationsRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.GetSchemaMigrationsRequest} GetSchemaMigrationsRequest + */ + GetSchemaMigrationsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.GetSchemaMigrationsRequest) + return object; + let message = new $root.vtadmin.GetSchemaMigrationsRequest(); + if (object.cluster_requests) { + if (!Array.isArray(object.cluster_requests)) + throw TypeError(".vtadmin.GetSchemaMigrationsRequest.cluster_requests: array expected"); + message.cluster_requests = []; + for (let i = 0; i < object.cluster_requests.length; ++i) { + if (typeof object.cluster_requests[i] !== "object") + throw TypeError(".vtadmin.GetSchemaMigrationsRequest.cluster_requests: object expected"); + message.cluster_requests[i] = $root.vtadmin.GetSchemaMigrationsRequest.ClusterRequest.fromObject(object.cluster_requests[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a GetSchemaMigrationsRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {vtadmin.GetSchemaMigrationsRequest} message GetSchemaMigrationsRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetSchemaMigrationsRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.cluster_requests = []; + if (message.cluster_requests && message.cluster_requests.length) { + object.cluster_requests = []; + for (let j = 0; j < message.cluster_requests.length; ++j) + object.cluster_requests[j] = $root.vtadmin.GetSchemaMigrationsRequest.ClusterRequest.toObject(message.cluster_requests[j], options); + } + return object; + }; + + /** + * Converts this GetSchemaMigrationsRequest to JSON. + * @function toJSON + * @memberof vtadmin.GetSchemaMigrationsRequest + * @instance + * @returns {Object.} JSON object + */ + GetSchemaMigrationsRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for GetSchemaMigrationsRequest + * @function getTypeUrl + * @memberof vtadmin.GetSchemaMigrationsRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetSchemaMigrationsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.GetSchemaMigrationsRequest"; + }; + + GetSchemaMigrationsRequest.ClusterRequest = (function() { + + /** + * Properties of a ClusterRequest. + * @memberof vtadmin.GetSchemaMigrationsRequest + * @interface IClusterRequest + * @property {string|null} [cluster_id] ClusterRequest cluster_id + * @property {vtctldata.IGetSchemaMigrationsRequest|null} [request] ClusterRequest request + */ + + /** + * Constructs a new ClusterRequest. + * @memberof vtadmin.GetSchemaMigrationsRequest + * @classdesc Represents a ClusterRequest. + * @implements IClusterRequest + * @constructor + * @param {vtadmin.GetSchemaMigrationsRequest.IClusterRequest=} [properties] Properties to set + */ + function ClusterRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ClusterRequest cluster_id. + * @member {string} cluster_id + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @instance + */ + ClusterRequest.prototype.cluster_id = ""; + + /** + * ClusterRequest request. + * @member {vtctldata.IGetSchemaMigrationsRequest|null|undefined} request + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @instance + */ + ClusterRequest.prototype.request = null; + + /** + * Creates a new ClusterRequest instance using the specified properties. + * @function create + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {vtadmin.GetSchemaMigrationsRequest.IClusterRequest=} [properties] Properties to set + * @returns {vtadmin.GetSchemaMigrationsRequest.ClusterRequest} ClusterRequest instance + */ + ClusterRequest.create = function create(properties) { + return new ClusterRequest(properties); + }; + + /** + * Encodes the specified ClusterRequest message. Does not implicitly {@link vtadmin.GetSchemaMigrationsRequest.ClusterRequest.verify|verify} messages. + * @function encode + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {vtadmin.GetSchemaMigrationsRequest.IClusterRequest} message ClusterRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ClusterRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id); + if (message.request != null && Object.hasOwnProperty.call(message, "request")) + $root.vtctldata.GetSchemaMigrationsRequest.encode(message.request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ClusterRequest message, length delimited. Does not implicitly {@link vtadmin.GetSchemaMigrationsRequest.ClusterRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {vtadmin.GetSchemaMigrationsRequest.IClusterRequest} message ClusterRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ClusterRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ClusterRequest message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.GetSchemaMigrationsRequest.ClusterRequest} ClusterRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ClusterRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.GetSchemaMigrationsRequest.ClusterRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cluster_id = reader.string(); + break; + } + case 2: { + message.request = $root.vtctldata.GetSchemaMigrationsRequest.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ClusterRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.GetSchemaMigrationsRequest.ClusterRequest} ClusterRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ClusterRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ClusterRequest message. + * @function verify + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ClusterRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + if (!$util.isString(message.cluster_id)) + return "cluster_id: string expected"; + if (message.request != null && message.hasOwnProperty("request")) { + let error = $root.vtctldata.GetSchemaMigrationsRequest.verify(message.request); + if (error) + return "request." + error; + } + return null; + }; + + /** + * Creates a ClusterRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.GetSchemaMigrationsRequest.ClusterRequest} ClusterRequest + */ + ClusterRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.GetSchemaMigrationsRequest.ClusterRequest) + return object; + let message = new $root.vtadmin.GetSchemaMigrationsRequest.ClusterRequest(); + if (object.cluster_id != null) + message.cluster_id = String(object.cluster_id); + if (object.request != null) { + if (typeof object.request !== "object") + throw TypeError(".vtadmin.GetSchemaMigrationsRequest.ClusterRequest.request: object expected"); + message.request = $root.vtctldata.GetSchemaMigrationsRequest.fromObject(object.request); + } + return message; + }; + + /** + * Creates a plain object from a ClusterRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {vtadmin.GetSchemaMigrationsRequest.ClusterRequest} message ClusterRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ClusterRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.cluster_id = ""; + object.request = null; + } + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + object.cluster_id = message.cluster_id; + if (message.request != null && message.hasOwnProperty("request")) + object.request = $root.vtctldata.GetSchemaMigrationsRequest.toObject(message.request, options); + return object; + }; + + /** + * Converts this ClusterRequest to JSON. + * @function toJSON + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @instance + * @returns {Object.} JSON object + */ + ClusterRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ClusterRequest + * @function getTypeUrl + * @memberof vtadmin.GetSchemaMigrationsRequest.ClusterRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ClusterRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.GetSchemaMigrationsRequest.ClusterRequest"; + }; + + return ClusterRequest; + })(); + + return GetSchemaMigrationsRequest; + })(); + + vtadmin.GetSchemaMigrationsResponse = (function() { + + /** + * Properties of a GetSchemaMigrationsResponse. + * @memberof vtadmin + * @interface IGetSchemaMigrationsResponse + * @property {Array.|null} [schema_migrations] GetSchemaMigrationsResponse schema_migrations + */ + + /** + * Constructs a new GetSchemaMigrationsResponse. + * @memberof vtadmin + * @classdesc Represents a GetSchemaMigrationsResponse. + * @implements IGetSchemaMigrationsResponse + * @constructor + * @param {vtadmin.IGetSchemaMigrationsResponse=} [properties] Properties to set + */ + function GetSchemaMigrationsResponse(properties) { + this.schema_migrations = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GetSchemaMigrationsResponse schema_migrations. + * @member {Array.} schema_migrations + * @memberof vtadmin.GetSchemaMigrationsResponse + * @instance + */ + GetSchemaMigrationsResponse.prototype.schema_migrations = $util.emptyArray; + + /** + * Creates a new GetSchemaMigrationsResponse instance using the specified properties. + * @function create + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {vtadmin.IGetSchemaMigrationsResponse=} [properties] Properties to set + * @returns {vtadmin.GetSchemaMigrationsResponse} GetSchemaMigrationsResponse instance + */ + GetSchemaMigrationsResponse.create = function create(properties) { + return new GetSchemaMigrationsResponse(properties); + }; + + /** + * Encodes the specified GetSchemaMigrationsResponse message. Does not implicitly {@link vtadmin.GetSchemaMigrationsResponse.verify|verify} messages. + * @function encode + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {vtadmin.IGetSchemaMigrationsResponse} message GetSchemaMigrationsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetSchemaMigrationsResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.schema_migrations != null && message.schema_migrations.length) + for (let i = 0; i < message.schema_migrations.length; ++i) + $root.vtadmin.SchemaMigration.encode(message.schema_migrations[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified GetSchemaMigrationsResponse message, length delimited. Does not implicitly {@link vtadmin.GetSchemaMigrationsResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {vtadmin.IGetSchemaMigrationsResponse} message GetSchemaMigrationsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetSchemaMigrationsResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GetSchemaMigrationsResponse message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.GetSchemaMigrationsResponse} GetSchemaMigrationsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetSchemaMigrationsResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.GetSchemaMigrationsResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.schema_migrations && message.schema_migrations.length)) + message.schema_migrations = []; + message.schema_migrations.push($root.vtadmin.SchemaMigration.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GetSchemaMigrationsResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.GetSchemaMigrationsResponse} GetSchemaMigrationsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetSchemaMigrationsResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GetSchemaMigrationsResponse message. + * @function verify + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetSchemaMigrationsResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.schema_migrations != null && message.hasOwnProperty("schema_migrations")) { + if (!Array.isArray(message.schema_migrations)) + return "schema_migrations: array expected"; + for (let i = 0; i < message.schema_migrations.length; ++i) { + let error = $root.vtadmin.SchemaMigration.verify(message.schema_migrations[i]); + if (error) + return "schema_migrations." + error; + } + } + return null; + }; + + /** + * Creates a GetSchemaMigrationsResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.GetSchemaMigrationsResponse} GetSchemaMigrationsResponse + */ + GetSchemaMigrationsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.GetSchemaMigrationsResponse) + return object; + let message = new $root.vtadmin.GetSchemaMigrationsResponse(); + if (object.schema_migrations) { + if (!Array.isArray(object.schema_migrations)) + throw TypeError(".vtadmin.GetSchemaMigrationsResponse.schema_migrations: array expected"); + message.schema_migrations = []; + for (let i = 0; i < object.schema_migrations.length; ++i) { + if (typeof object.schema_migrations[i] !== "object") + throw TypeError(".vtadmin.GetSchemaMigrationsResponse.schema_migrations: object expected"); + message.schema_migrations[i] = $root.vtadmin.SchemaMigration.fromObject(object.schema_migrations[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a GetSchemaMigrationsResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {vtadmin.GetSchemaMigrationsResponse} message GetSchemaMigrationsResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetSchemaMigrationsResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.schema_migrations = []; + if (message.schema_migrations && message.schema_migrations.length) { + object.schema_migrations = []; + for (let j = 0; j < message.schema_migrations.length; ++j) + object.schema_migrations[j] = $root.vtadmin.SchemaMigration.toObject(message.schema_migrations[j], options); + } + return object; + }; + + /** + * Converts this GetSchemaMigrationsResponse to JSON. + * @function toJSON + * @memberof vtadmin.GetSchemaMigrationsResponse + * @instance + * @returns {Object.} JSON object + */ + GetSchemaMigrationsResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for GetSchemaMigrationsResponse + * @function getTypeUrl + * @memberof vtadmin.GetSchemaMigrationsResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetSchemaMigrationsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.GetSchemaMigrationsResponse"; + }; + + return GetSchemaMigrationsResponse; + })(); + vtadmin.GetShardReplicationPositionsRequest = (function() { /** @@ -18005,6 +20081,238 @@ export const vtadmin = $root.vtadmin = (() => { return GetWorkflowsResponse; })(); + vtadmin.LaunchSchemaMigrationRequest = (function() { + + /** + * Properties of a LaunchSchemaMigrationRequest. + * @memberof vtadmin + * @interface ILaunchSchemaMigrationRequest + * @property {string|null} [cluster_id] LaunchSchemaMigrationRequest cluster_id + * @property {vtctldata.ILaunchSchemaMigrationRequest|null} [request] LaunchSchemaMigrationRequest request + */ + + /** + * Constructs a new LaunchSchemaMigrationRequest. + * @memberof vtadmin + * @classdesc Represents a LaunchSchemaMigrationRequest. + * @implements ILaunchSchemaMigrationRequest + * @constructor + * @param {vtadmin.ILaunchSchemaMigrationRequest=} [properties] Properties to set + */ + function LaunchSchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * LaunchSchemaMigrationRequest cluster_id. + * @member {string} cluster_id + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @instance + */ + LaunchSchemaMigrationRequest.prototype.cluster_id = ""; + + /** + * LaunchSchemaMigrationRequest request. + * @member {vtctldata.ILaunchSchemaMigrationRequest|null|undefined} request + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @instance + */ + LaunchSchemaMigrationRequest.prototype.request = null; + + /** + * Creates a new LaunchSchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {vtadmin.ILaunchSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtadmin.LaunchSchemaMigrationRequest} LaunchSchemaMigrationRequest instance + */ + LaunchSchemaMigrationRequest.create = function create(properties) { + return new LaunchSchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified LaunchSchemaMigrationRequest message. Does not implicitly {@link vtadmin.LaunchSchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {vtadmin.ILaunchSchemaMigrationRequest} message LaunchSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + LaunchSchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id); + if (message.request != null && Object.hasOwnProperty.call(message, "request")) + $root.vtctldata.LaunchSchemaMigrationRequest.encode(message.request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified LaunchSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.LaunchSchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {vtadmin.ILaunchSchemaMigrationRequest} message LaunchSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + LaunchSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a LaunchSchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.LaunchSchemaMigrationRequest} LaunchSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + LaunchSchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.LaunchSchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cluster_id = reader.string(); + break; + } + case 2: { + message.request = $root.vtctldata.LaunchSchemaMigrationRequest.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a LaunchSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.LaunchSchemaMigrationRequest} LaunchSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + LaunchSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a LaunchSchemaMigrationRequest message. + * @function verify + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + LaunchSchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + if (!$util.isString(message.cluster_id)) + return "cluster_id: string expected"; + if (message.request != null && message.hasOwnProperty("request")) { + let error = $root.vtctldata.LaunchSchemaMigrationRequest.verify(message.request); + if (error) + return "request." + error; + } + return null; + }; + + /** + * Creates a LaunchSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.LaunchSchemaMigrationRequest} LaunchSchemaMigrationRequest + */ + LaunchSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.LaunchSchemaMigrationRequest) + return object; + let message = new $root.vtadmin.LaunchSchemaMigrationRequest(); + if (object.cluster_id != null) + message.cluster_id = String(object.cluster_id); + if (object.request != null) { + if (typeof object.request !== "object") + throw TypeError(".vtadmin.LaunchSchemaMigrationRequest.request: object expected"); + message.request = $root.vtctldata.LaunchSchemaMigrationRequest.fromObject(object.request); + } + return message; + }; + + /** + * Creates a plain object from a LaunchSchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {vtadmin.LaunchSchemaMigrationRequest} message LaunchSchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + LaunchSchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.cluster_id = ""; + object.request = null; + } + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + object.cluster_id = message.cluster_id; + if (message.request != null && message.hasOwnProperty("request")) + object.request = $root.vtctldata.LaunchSchemaMigrationRequest.toObject(message.request, options); + return object; + }; + + /** + * Converts this LaunchSchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + LaunchSchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for LaunchSchemaMigrationRequest + * @function getTypeUrl + * @memberof vtadmin.LaunchSchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + LaunchSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.LaunchSchemaMigrationRequest"; + }; + + return LaunchSchemaMigrationRequest; + })(); + vtadmin.PingTabletRequest = (function() { /** @@ -20145,7 +22453,7 @@ export const vtadmin = $root.vtadmin = (() => { for (let i = 0; i < message.cluster_ids.length; ++i) writer.uint32(/* id 4, wireType 2 =*/34).string(message.cluster_ids[i]); if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 5, wireType 0 =*/40).uint32(message.concurrency); + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.concurrency); if (message.wait_position != null && Object.hasOwnProperty.call(message, "wait_position")) writer.uint32(/* id 6, wireType 2 =*/50).string(message.wait_position); if (message.include_primary != null && Object.hasOwnProperty.call(message, "include_primary")) @@ -20209,7 +22517,7 @@ export const vtadmin = $root.vtadmin = (() => { break; } case 5: { - message.concurrency = reader.uint32(); + message.concurrency = reader.int32(); break; } case 6: { @@ -20341,7 +22649,7 @@ export const vtadmin = $root.vtadmin = (() => { message.cluster_ids[i] = String(object.cluster_ids[i]); } if (object.concurrency != null) - message.concurrency = object.concurrency >>> 0; + message.concurrency = object.concurrency | 0; if (object.wait_position != null) message.wait_position = String(object.wait_position); if (object.include_primary != null) @@ -21594,7 +23902,7 @@ export const vtadmin = $root.vtadmin = (() => { if (message.include_primary != null && Object.hasOwnProperty.call(message, "include_primary")) writer.uint32(/* id 5, wireType 0 =*/40).bool(message.include_primary); if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 6, wireType 0 =*/48).uint32(message.concurrency); + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.concurrency); return writer; }; @@ -21650,7 +23958,7 @@ export const vtadmin = $root.vtadmin = (() => { break; } case 6: { - message.concurrency = reader.uint32(); + message.concurrency = reader.int32(); break; } default: @@ -21732,7 +24040,7 @@ export const vtadmin = $root.vtadmin = (() => { if (object.include_primary != null) message.include_primary = Boolean(object.include_primary); if (object.concurrency != null) - message.concurrency = object.concurrency >>> 0; + message.concurrency = object.concurrency | 0; return message; }; @@ -23055,6 +25363,238 @@ export const vtadmin = $root.vtadmin = (() => { return RemoveKeyspaceCellResponse; })(); + vtadmin.RetrySchemaMigrationRequest = (function() { + + /** + * Properties of a RetrySchemaMigrationRequest. + * @memberof vtadmin + * @interface IRetrySchemaMigrationRequest + * @property {string|null} [cluster_id] RetrySchemaMigrationRequest cluster_id + * @property {vtctldata.IRetrySchemaMigrationRequest|null} [request] RetrySchemaMigrationRequest request + */ + + /** + * Constructs a new RetrySchemaMigrationRequest. + * @memberof vtadmin + * @classdesc Represents a RetrySchemaMigrationRequest. + * @implements IRetrySchemaMigrationRequest + * @constructor + * @param {vtadmin.IRetrySchemaMigrationRequest=} [properties] Properties to set + */ + function RetrySchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * RetrySchemaMigrationRequest cluster_id. + * @member {string} cluster_id + * @memberof vtadmin.RetrySchemaMigrationRequest + * @instance + */ + RetrySchemaMigrationRequest.prototype.cluster_id = ""; + + /** + * RetrySchemaMigrationRequest request. + * @member {vtctldata.IRetrySchemaMigrationRequest|null|undefined} request + * @memberof vtadmin.RetrySchemaMigrationRequest + * @instance + */ + RetrySchemaMigrationRequest.prototype.request = null; + + /** + * Creates a new RetrySchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {vtadmin.IRetrySchemaMigrationRequest=} [properties] Properties to set + * @returns {vtadmin.RetrySchemaMigrationRequest} RetrySchemaMigrationRequest instance + */ + RetrySchemaMigrationRequest.create = function create(properties) { + return new RetrySchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified RetrySchemaMigrationRequest message. Does not implicitly {@link vtadmin.RetrySchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {vtadmin.IRetrySchemaMigrationRequest} message RetrySchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + RetrySchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.cluster_id != null && Object.hasOwnProperty.call(message, "cluster_id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.cluster_id); + if (message.request != null && Object.hasOwnProperty.call(message, "request")) + $root.vtctldata.RetrySchemaMigrationRequest.encode(message.request, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified RetrySchemaMigrationRequest message, length delimited. Does not implicitly {@link vtadmin.RetrySchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {vtadmin.IRetrySchemaMigrationRequest} message RetrySchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + RetrySchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a RetrySchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtadmin.RetrySchemaMigrationRequest} RetrySchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + RetrySchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtadmin.RetrySchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.cluster_id = reader.string(); + break; + } + case 2: { + message.request = $root.vtctldata.RetrySchemaMigrationRequest.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a RetrySchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtadmin.RetrySchemaMigrationRequest} RetrySchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + RetrySchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a RetrySchemaMigrationRequest message. + * @function verify + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + RetrySchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + if (!$util.isString(message.cluster_id)) + return "cluster_id: string expected"; + if (message.request != null && message.hasOwnProperty("request")) { + let error = $root.vtctldata.RetrySchemaMigrationRequest.verify(message.request); + if (error) + return "request." + error; + } + return null; + }; + + /** + * Creates a RetrySchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtadmin.RetrySchemaMigrationRequest} RetrySchemaMigrationRequest + */ + RetrySchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtadmin.RetrySchemaMigrationRequest) + return object; + let message = new $root.vtadmin.RetrySchemaMigrationRequest(); + if (object.cluster_id != null) + message.cluster_id = String(object.cluster_id); + if (object.request != null) { + if (typeof object.request !== "object") + throw TypeError(".vtadmin.RetrySchemaMigrationRequest.request: object expected"); + message.request = $root.vtctldata.RetrySchemaMigrationRequest.fromObject(object.request); + } + return message; + }; + + /** + * Creates a plain object from a RetrySchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {vtadmin.RetrySchemaMigrationRequest} message RetrySchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + RetrySchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.cluster_id = ""; + object.request = null; + } + if (message.cluster_id != null && message.hasOwnProperty("cluster_id")) + object.cluster_id = message.cluster_id; + if (message.request != null && message.hasOwnProperty("request")) + object.request = $root.vtctldata.RetrySchemaMigrationRequest.toObject(message.request, options); + return object; + }; + + /** + * Converts this RetrySchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtadmin.RetrySchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + RetrySchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for RetrySchemaMigrationRequest + * @function getTypeUrl + * @memberof vtadmin.RetrySchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + RetrySchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtadmin.RetrySchemaMigrationRequest"; + }; + + return RetrySchemaMigrationRequest; + })(); + vtadmin.RunHealthCheckRequest = (function() { /** @@ -29336,6 +31876,7 @@ export const mysqlctl = $root.mysqlctl = (() => { * @memberof mysqlctl * @interface IShutdownRequest * @property {boolean|null} [wait_for_mysqld] ShutdownRequest wait_for_mysqld + * @property {vttime.IDuration|null} [mysql_shutdown_timeout] ShutdownRequest mysql_shutdown_timeout */ /** @@ -29361,6 +31902,14 @@ export const mysqlctl = $root.mysqlctl = (() => { */ ShutdownRequest.prototype.wait_for_mysqld = false; + /** + * ShutdownRequest mysql_shutdown_timeout. + * @member {vttime.IDuration|null|undefined} mysql_shutdown_timeout + * @memberof mysqlctl.ShutdownRequest + * @instance + */ + ShutdownRequest.prototype.mysql_shutdown_timeout = null; + /** * Creates a new ShutdownRequest instance using the specified properties. * @function create @@ -29387,6 +31936,8 @@ export const mysqlctl = $root.mysqlctl = (() => { writer = $Writer.create(); if (message.wait_for_mysqld != null && Object.hasOwnProperty.call(message, "wait_for_mysqld")) writer.uint32(/* id 1, wireType 0 =*/8).bool(message.wait_for_mysqld); + if (message.mysql_shutdown_timeout != null && Object.hasOwnProperty.call(message, "mysql_shutdown_timeout")) + $root.vttime.Duration.encode(message.mysql_shutdown_timeout, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); return writer; }; @@ -29425,6 +31976,10 @@ export const mysqlctl = $root.mysqlctl = (() => { message.wait_for_mysqld = reader.bool(); break; } + case 2: { + message.mysql_shutdown_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -29463,6 +32018,11 @@ export const mysqlctl = $root.mysqlctl = (() => { if (message.wait_for_mysqld != null && message.hasOwnProperty("wait_for_mysqld")) if (typeof message.wait_for_mysqld !== "boolean") return "wait_for_mysqld: boolean expected"; + if (message.mysql_shutdown_timeout != null && message.hasOwnProperty("mysql_shutdown_timeout")) { + let error = $root.vttime.Duration.verify(message.mysql_shutdown_timeout); + if (error) + return "mysql_shutdown_timeout." + error; + } return null; }; @@ -29480,6 +32040,11 @@ export const mysqlctl = $root.mysqlctl = (() => { let message = new $root.mysqlctl.ShutdownRequest(); if (object.wait_for_mysqld != null) message.wait_for_mysqld = Boolean(object.wait_for_mysqld); + if (object.mysql_shutdown_timeout != null) { + if (typeof object.mysql_shutdown_timeout !== "object") + throw TypeError(".mysqlctl.ShutdownRequest.mysql_shutdown_timeout: object expected"); + message.mysql_shutdown_timeout = $root.vttime.Duration.fromObject(object.mysql_shutdown_timeout); + } return message; }; @@ -29496,10 +32061,14 @@ export const mysqlctl = $root.mysqlctl = (() => { if (!options) options = {}; let object = {}; - if (options.defaults) + if (options.defaults) { object.wait_for_mysqld = false; + object.mysql_shutdown_timeout = null; + } if (message.wait_for_mysqld != null && message.hasOwnProperty("wait_for_mysqld")) object.wait_for_mysqld = message.wait_for_mysqld; + if (message.mysql_shutdown_timeout != null && message.hasOwnProperty("mysql_shutdown_timeout")) + object.mysql_shutdown_timeout = $root.vttime.Duration.toObject(message.mysql_shutdown_timeout, options); return object; }; @@ -35028,7 +37597,6 @@ export const topodata = $root.topodata = (() => { * Properties of a Keyspace. * @memberof topodata * @interface IKeyspace - * @property {Array.|null} [served_froms] Keyspace served_froms * @property {topodata.KeyspaceType|null} [keyspace_type] Keyspace keyspace_type * @property {string|null} [base_keyspace] Keyspace base_keyspace * @property {vttime.ITime|null} [snapshot_time] Keyspace snapshot_time @@ -35046,21 +37614,12 @@ export const topodata = $root.topodata = (() => { * @param {topodata.IKeyspace=} [properties] Properties to set */ function Keyspace(properties) { - this.served_froms = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) this[keys[i]] = properties[keys[i]]; } - /** - * Keyspace served_froms. - * @member {Array.} served_froms - * @memberof topodata.Keyspace - * @instance - */ - Keyspace.prototype.served_froms = $util.emptyArray; - /** * Keyspace keyspace_type. * @member {topodata.KeyspaceType} keyspace_type @@ -35133,9 +37692,6 @@ export const topodata = $root.topodata = (() => { Keyspace.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.served_froms != null && message.served_froms.length) - for (let i = 0; i < message.served_froms.length; ++i) - $root.topodata.Keyspace.ServedFrom.encode(message.served_froms[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.keyspace_type != null && Object.hasOwnProperty.call(message, "keyspace_type")) writer.uint32(/* id 5, wireType 0 =*/40).int32(message.keyspace_type); if (message.base_keyspace != null && Object.hasOwnProperty.call(message, "base_keyspace")) @@ -35182,12 +37738,6 @@ export const topodata = $root.topodata = (() => { while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { - case 4: { - if (!(message.served_froms && message.served_froms.length)) - message.served_froms = []; - message.served_froms.push($root.topodata.Keyspace.ServedFrom.decode(reader, reader.uint32())); - break; - } case 5: { message.keyspace_type = reader.int32(); break; @@ -35247,15 +37797,6 @@ export const topodata = $root.topodata = (() => { Keyspace.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.served_froms != null && message.hasOwnProperty("served_froms")) { - if (!Array.isArray(message.served_froms)) - return "served_froms: array expected"; - for (let i = 0; i < message.served_froms.length; ++i) { - let error = $root.topodata.Keyspace.ServedFrom.verify(message.served_froms[i]); - if (error) - return "served_froms." + error; - } - } if (message.keyspace_type != null && message.hasOwnProperty("keyspace_type")) switch (message.keyspace_type) { default: @@ -35298,16 +37839,6 @@ export const topodata = $root.topodata = (() => { if (object instanceof $root.topodata.Keyspace) return object; let message = new $root.topodata.Keyspace(); - if (object.served_froms) { - if (!Array.isArray(object.served_froms)) - throw TypeError(".topodata.Keyspace.served_froms: array expected"); - message.served_froms = []; - for (let i = 0; i < object.served_froms.length; ++i) { - if (typeof object.served_froms[i] !== "object") - throw TypeError(".topodata.Keyspace.served_froms: object expected"); - message.served_froms[i] = $root.topodata.Keyspace.ServedFrom.fromObject(object.served_froms[i]); - } - } switch (object.keyspace_type) { default: if (typeof object.keyspace_type === "number") { @@ -35356,8 +37887,6 @@ export const topodata = $root.topodata = (() => { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.served_froms = []; if (options.defaults) { object.keyspace_type = options.enums === String ? "NORMAL" : 0; object.base_keyspace = ""; @@ -35366,11 +37895,6 @@ export const topodata = $root.topodata = (() => { object.throttler_config = null; object.sidecar_db_name = ""; } - if (message.served_froms && message.served_froms.length) { - object.served_froms = []; - for (let j = 0; j < message.served_froms.length; ++j) - object.served_froms[j] = $root.topodata.Keyspace.ServedFrom.toObject(message.served_froms[j], options); - } if (message.keyspace_type != null && message.hasOwnProperty("keyspace_type")) object.keyspace_type = options.enums === String ? $root.topodata.KeyspaceType[message.keyspace_type] === undefined ? message.keyspace_type : $root.topodata.KeyspaceType[message.keyspace_type] : message.keyspace_type; if (message.base_keyspace != null && message.hasOwnProperty("base_keyspace")) @@ -35412,337 +37936,6 @@ export const topodata = $root.topodata = (() => { return typeUrlPrefix + "/topodata.Keyspace"; }; - Keyspace.ServedFrom = (function() { - - /** - * Properties of a ServedFrom. - * @memberof topodata.Keyspace - * @interface IServedFrom - * @property {topodata.TabletType|null} [tablet_type] ServedFrom tablet_type - * @property {Array.|null} [cells] ServedFrom cells - * @property {string|null} [keyspace] ServedFrom keyspace - */ - - /** - * Constructs a new ServedFrom. - * @memberof topodata.Keyspace - * @classdesc Represents a ServedFrom. - * @implements IServedFrom - * @constructor - * @param {topodata.Keyspace.IServedFrom=} [properties] Properties to set - */ - function ServedFrom(properties) { - this.cells = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * ServedFrom tablet_type. - * @member {topodata.TabletType} tablet_type - * @memberof topodata.Keyspace.ServedFrom - * @instance - */ - ServedFrom.prototype.tablet_type = 0; - - /** - * ServedFrom cells. - * @member {Array.} cells - * @memberof topodata.Keyspace.ServedFrom - * @instance - */ - ServedFrom.prototype.cells = $util.emptyArray; - - /** - * ServedFrom keyspace. - * @member {string} keyspace - * @memberof topodata.Keyspace.ServedFrom - * @instance - */ - ServedFrom.prototype.keyspace = ""; - - /** - * Creates a new ServedFrom instance using the specified properties. - * @function create - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {topodata.Keyspace.IServedFrom=} [properties] Properties to set - * @returns {topodata.Keyspace.ServedFrom} ServedFrom instance - */ - ServedFrom.create = function create(properties) { - return new ServedFrom(properties); - }; - - /** - * Encodes the specified ServedFrom message. Does not implicitly {@link topodata.Keyspace.ServedFrom.verify|verify} messages. - * @function encode - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {topodata.Keyspace.IServedFrom} message ServedFrom message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ServedFrom.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.tablet_type); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.cells[i]); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.keyspace); - return writer; - }; - - /** - * Encodes the specified ServedFrom message, length delimited. Does not implicitly {@link topodata.Keyspace.ServedFrom.verify|verify} messages. - * @function encodeDelimited - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {topodata.Keyspace.IServedFrom} message ServedFrom message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ServedFrom.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a ServedFrom message from the specified reader or buffer. - * @function decode - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {topodata.Keyspace.ServedFrom} ServedFrom - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ServedFrom.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.topodata.Keyspace.ServedFrom(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.tablet_type = reader.int32(); - break; - } - case 2: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); - break; - } - case 3: { - message.keyspace = reader.string(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a ServedFrom message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {topodata.Keyspace.ServedFrom} ServedFrom - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ServedFrom.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a ServedFrom message. - * @function verify - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - ServedFrom.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - switch (message.tablet_type) { - default: - return "tablet_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - return null; - }; - - /** - * Creates a ServedFrom message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {Object.} object Plain object - * @returns {topodata.Keyspace.ServedFrom} ServedFrom - */ - ServedFrom.fromObject = function fromObject(object) { - if (object instanceof $root.topodata.Keyspace.ServedFrom) - return object; - let message = new $root.topodata.Keyspace.ServedFrom(); - switch (object.tablet_type) { - default: - if (typeof object.tablet_type === "number") { - message.tablet_type = object.tablet_type; - break; - } - break; - case "UNKNOWN": - case 0: - message.tablet_type = 0; - break; - case "PRIMARY": - case 1: - message.tablet_type = 1; - break; - case "MASTER": - case 1: - message.tablet_type = 1; - break; - case "REPLICA": - case 2: - message.tablet_type = 2; - break; - case "RDONLY": - case 3: - message.tablet_type = 3; - break; - case "BATCH": - case 3: - message.tablet_type = 3; - break; - case "SPARE": - case 4: - message.tablet_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.tablet_type = 5; - break; - case "BACKUP": - case 6: - message.tablet_type = 6; - break; - case "RESTORE": - case 7: - message.tablet_type = 7; - break; - case "DRAINED": - case 8: - message.tablet_type = 8; - break; - } - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".topodata.Keyspace.ServedFrom.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); - } - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - return message; - }; - - /** - * Creates a plain object from a ServedFrom message. Also converts values to other types if specified. - * @function toObject - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {topodata.Keyspace.ServedFrom} message ServedFrom - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - ServedFrom.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.cells = []; - if (options.defaults) { - object.tablet_type = options.enums === String ? "UNKNOWN" : 0; - object.keyspace = ""; - } - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - return object; - }; - - /** - * Converts this ServedFrom to JSON. - * @function toJSON - * @memberof topodata.Keyspace.ServedFrom - * @instance - * @returns {Object.} JSON object - */ - ServedFrom.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for ServedFrom - * @function getTypeUrl - * @memberof topodata.Keyspace.ServedFrom - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - ServedFrom.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/topodata.Keyspace.ServedFrom"; - }; - - return ServedFrom; - })(); - return Keyspace; })(); @@ -37561,7 +39754,6 @@ export const topodata = $root.topodata = (() => { * @memberof topodata * @interface ISrvKeyspace * @property {Array.|null} [partitions] SrvKeyspace partitions - * @property {Array.|null} [served_from] SrvKeyspace served_from * @property {topodata.IThrottlerConfig|null} [throttler_config] SrvKeyspace throttler_config */ @@ -37575,7 +39767,6 @@ export const topodata = $root.topodata = (() => { */ function SrvKeyspace(properties) { this.partitions = []; - this.served_from = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -37590,14 +39781,6 @@ export const topodata = $root.topodata = (() => { */ SrvKeyspace.prototype.partitions = $util.emptyArray; - /** - * SrvKeyspace served_from. - * @member {Array.} served_from - * @memberof topodata.SrvKeyspace - * @instance - */ - SrvKeyspace.prototype.served_from = $util.emptyArray; - /** * SrvKeyspace throttler_config. * @member {topodata.IThrottlerConfig|null|undefined} throttler_config @@ -37633,9 +39816,6 @@ export const topodata = $root.topodata = (() => { if (message.partitions != null && message.partitions.length) for (let i = 0; i < message.partitions.length; ++i) $root.topodata.SrvKeyspace.KeyspacePartition.encode(message.partitions[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - if (message.served_from != null && message.served_from.length) - for (let i = 0; i < message.served_from.length; ++i) - $root.topodata.SrvKeyspace.ServedFrom.encode(message.served_from[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.throttler_config != null && Object.hasOwnProperty.call(message, "throttler_config")) $root.topodata.ThrottlerConfig.encode(message.throttler_config, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); return writer; @@ -37678,12 +39858,6 @@ export const topodata = $root.topodata = (() => { message.partitions.push($root.topodata.SrvKeyspace.KeyspacePartition.decode(reader, reader.uint32())); break; } - case 4: { - if (!(message.served_from && message.served_from.length)) - message.served_from = []; - message.served_from.push($root.topodata.SrvKeyspace.ServedFrom.decode(reader, reader.uint32())); - break; - } case 6: { message.throttler_config = $root.topodata.ThrottlerConfig.decode(reader, reader.uint32()); break; @@ -37732,15 +39906,6 @@ export const topodata = $root.topodata = (() => { return "partitions." + error; } } - if (message.served_from != null && message.hasOwnProperty("served_from")) { - if (!Array.isArray(message.served_from)) - return "served_from: array expected"; - for (let i = 0; i < message.served_from.length; ++i) { - let error = $root.topodata.SrvKeyspace.ServedFrom.verify(message.served_from[i]); - if (error) - return "served_from." + error; - } - } if (message.throttler_config != null && message.hasOwnProperty("throttler_config")) { let error = $root.topodata.ThrottlerConfig.verify(message.throttler_config); if (error) @@ -37771,16 +39936,6 @@ export const topodata = $root.topodata = (() => { message.partitions[i] = $root.topodata.SrvKeyspace.KeyspacePartition.fromObject(object.partitions[i]); } } - if (object.served_from) { - if (!Array.isArray(object.served_from)) - throw TypeError(".topodata.SrvKeyspace.served_from: array expected"); - message.served_from = []; - for (let i = 0; i < object.served_from.length; ++i) { - if (typeof object.served_from[i] !== "object") - throw TypeError(".topodata.SrvKeyspace.served_from: object expected"); - message.served_from[i] = $root.topodata.SrvKeyspace.ServedFrom.fromObject(object.served_from[i]); - } - } if (object.throttler_config != null) { if (typeof object.throttler_config !== "object") throw TypeError(".topodata.SrvKeyspace.throttler_config: object expected"); @@ -37802,10 +39957,8 @@ export const topodata = $root.topodata = (() => { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) { + if (options.arrays || options.defaults) object.partitions = []; - object.served_from = []; - } if (options.defaults) object.throttler_config = null; if (message.partitions && message.partitions.length) { @@ -37813,11 +39966,6 @@ export const topodata = $root.topodata = (() => { for (let j = 0; j < message.partitions.length; ++j) object.partitions[j] = $root.topodata.SrvKeyspace.KeyspacePartition.toObject(message.partitions[j], options); } - if (message.served_from && message.served_from.length) { - object.served_from = []; - for (let j = 0; j < message.served_from.length; ++j) - object.served_from[j] = $root.topodata.SrvKeyspace.ServedFrom.toObject(message.served_from[j], options); - } if (message.throttler_config != null && message.hasOwnProperty("throttler_config")) object.throttler_config = $root.topodata.ThrottlerConfig.toObject(message.throttler_config, options); return object; @@ -38206,297 +40354,6 @@ export const topodata = $root.topodata = (() => { return KeyspacePartition; })(); - SrvKeyspace.ServedFrom = (function() { - - /** - * Properties of a ServedFrom. - * @memberof topodata.SrvKeyspace - * @interface IServedFrom - * @property {topodata.TabletType|null} [tablet_type] ServedFrom tablet_type - * @property {string|null} [keyspace] ServedFrom keyspace - */ - - /** - * Constructs a new ServedFrom. - * @memberof topodata.SrvKeyspace - * @classdesc Represents a ServedFrom. - * @implements IServedFrom - * @constructor - * @param {topodata.SrvKeyspace.IServedFrom=} [properties] Properties to set - */ - function ServedFrom(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * ServedFrom tablet_type. - * @member {topodata.TabletType} tablet_type - * @memberof topodata.SrvKeyspace.ServedFrom - * @instance - */ - ServedFrom.prototype.tablet_type = 0; - - /** - * ServedFrom keyspace. - * @member {string} keyspace - * @memberof topodata.SrvKeyspace.ServedFrom - * @instance - */ - ServedFrom.prototype.keyspace = ""; - - /** - * Creates a new ServedFrom instance using the specified properties. - * @function create - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {topodata.SrvKeyspace.IServedFrom=} [properties] Properties to set - * @returns {topodata.SrvKeyspace.ServedFrom} ServedFrom instance - */ - ServedFrom.create = function create(properties) { - return new ServedFrom(properties); - }; - - /** - * Encodes the specified ServedFrom message. Does not implicitly {@link topodata.SrvKeyspace.ServedFrom.verify|verify} messages. - * @function encode - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {topodata.SrvKeyspace.IServedFrom} message ServedFrom message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ServedFrom.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) - writer.uint32(/* id 1, wireType 0 =*/8).int32(message.tablet_type); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 2, wireType 2 =*/18).string(message.keyspace); - return writer; - }; - - /** - * Encodes the specified ServedFrom message, length delimited. Does not implicitly {@link topodata.SrvKeyspace.ServedFrom.verify|verify} messages. - * @function encodeDelimited - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {topodata.SrvKeyspace.IServedFrom} message ServedFrom message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - ServedFrom.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a ServedFrom message from the specified reader or buffer. - * @function decode - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {topodata.SrvKeyspace.ServedFrom} ServedFrom - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ServedFrom.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.topodata.SrvKeyspace.ServedFrom(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.tablet_type = reader.int32(); - break; - } - case 2: { - message.keyspace = reader.string(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a ServedFrom message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {topodata.SrvKeyspace.ServedFrom} ServedFrom - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - ServedFrom.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a ServedFrom message. - * @function verify - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - ServedFrom.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - switch (message.tablet_type) { - default: - return "tablet_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - return null; - }; - - /** - * Creates a ServedFrom message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {Object.} object Plain object - * @returns {topodata.SrvKeyspace.ServedFrom} ServedFrom - */ - ServedFrom.fromObject = function fromObject(object) { - if (object instanceof $root.topodata.SrvKeyspace.ServedFrom) - return object; - let message = new $root.topodata.SrvKeyspace.ServedFrom(); - switch (object.tablet_type) { - default: - if (typeof object.tablet_type === "number") { - message.tablet_type = object.tablet_type; - break; - } - break; - case "UNKNOWN": - case 0: - message.tablet_type = 0; - break; - case "PRIMARY": - case 1: - message.tablet_type = 1; - break; - case "MASTER": - case 1: - message.tablet_type = 1; - break; - case "REPLICA": - case 2: - message.tablet_type = 2; - break; - case "RDONLY": - case 3: - message.tablet_type = 3; - break; - case "BATCH": - case 3: - message.tablet_type = 3; - break; - case "SPARE": - case 4: - message.tablet_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.tablet_type = 5; - break; - case "BACKUP": - case 6: - message.tablet_type = 6; - break; - case "RESTORE": - case 7: - message.tablet_type = 7; - break; - case "DRAINED": - case 8: - message.tablet_type = 8; - break; - } - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - return message; - }; - - /** - * Creates a plain object from a ServedFrom message. Also converts values to other types if specified. - * @function toObject - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {topodata.SrvKeyspace.ServedFrom} message ServedFrom - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - ServedFrom.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) { - object.tablet_type = options.enums === String ? "UNKNOWN" : 0; - object.keyspace = ""; - } - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - return object; - }; - - /** - * Converts this ServedFrom to JSON. - * @function toJSON - * @memberof topodata.SrvKeyspace.ServedFrom - * @instance - * @returns {Object.} JSON object - */ - ServedFrom.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for ServedFrom - * @function getTypeUrl - * @memberof topodata.SrvKeyspace.ServedFrom - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - ServedFrom.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/topodata.SrvKeyspace.ServedFrom"; - }; - - return ServedFrom; - })(); - return SrvKeyspace; })(); @@ -46452,6 +48309,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {tabletmanagerdata.ISchemaDefinition|null} [after_schema] ApplySchemaRequest after_schema * @property {string|null} [sql_mode] ApplySchemaRequest sql_mode * @property {number|Long|null} [batch_size] ApplySchemaRequest batch_size + * @property {boolean|null} [disable_foreign_key_checks] ApplySchemaRequest disable_foreign_key_checks */ /** @@ -46525,6 +48383,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ ApplySchemaRequest.prototype.batch_size = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** + * ApplySchemaRequest disable_foreign_key_checks. + * @member {boolean} disable_foreign_key_checks + * @memberof tabletmanagerdata.ApplySchemaRequest + * @instance + */ + ApplySchemaRequest.prototype.disable_foreign_key_checks = false; + /** * Creates a new ApplySchemaRequest instance using the specified properties. * @function create @@ -46563,6 +48429,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer.uint32(/* id 6, wireType 2 =*/50).string(message.sql_mode); if (message.batch_size != null && Object.hasOwnProperty.call(message, "batch_size")) writer.uint32(/* id 7, wireType 0 =*/56).int64(message.batch_size); + if (message.disable_foreign_key_checks != null && Object.hasOwnProperty.call(message, "disable_foreign_key_checks")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.disable_foreign_key_checks); return writer; }; @@ -46625,6 +48493,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.batch_size = reader.int64(); break; } + case 8: { + message.disable_foreign_key_checks = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -46685,6 +48557,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.batch_size != null && message.hasOwnProperty("batch_size")) if (!$util.isInteger(message.batch_size) && !(message.batch_size && $util.isInteger(message.batch_size.low) && $util.isInteger(message.batch_size.high))) return "batch_size: integer|Long expected"; + if (message.disable_foreign_key_checks != null && message.hasOwnProperty("disable_foreign_key_checks")) + if (typeof message.disable_foreign_key_checks !== "boolean") + return "disable_foreign_key_checks: boolean expected"; return null; }; @@ -46727,6 +48602,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.batch_size = object.batch_size; else if (typeof object.batch_size === "object") message.batch_size = new $util.LongBits(object.batch_size.low >>> 0, object.batch_size.high >>> 0).toNumber(); + if (object.disable_foreign_key_checks != null) + message.disable_foreign_key_checks = Boolean(object.disable_foreign_key_checks); return message; }; @@ -46755,6 +48632,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.batch_size = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; } else object.batch_size = options.longs === String ? "0" : 0; + object.disable_foreign_key_checks = false; } if (message.sql != null && message.hasOwnProperty("sql")) object.sql = message.sql; @@ -46773,6 +48651,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.batch_size = options.longs === String ? String(message.batch_size) : message.batch_size; else object.batch_size = options.longs === String ? $util.Long.prototype.toString.call(message.batch_size) : options.longs === Number ? new $util.LongBits(message.batch_size.low >>> 0, message.batch_size.high >>> 0).toNumber() : message.batch_size; + if (message.disable_foreign_key_checks != null && message.hasOwnProperty("disable_foreign_key_checks")) + object.disable_foreign_key_checks = message.disable_foreign_key_checks; return object; }; @@ -48262,6 +50142,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {number|Long|null} [max_rows] ExecuteFetchAsDbaRequest max_rows * @property {boolean|null} [disable_binlogs] ExecuteFetchAsDbaRequest disable_binlogs * @property {boolean|null} [reload_schema] ExecuteFetchAsDbaRequest reload_schema + * @property {boolean|null} [disable_foreign_key_checks] ExecuteFetchAsDbaRequest disable_foreign_key_checks */ /** @@ -48319,6 +50200,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ ExecuteFetchAsDbaRequest.prototype.reload_schema = false; + /** + * ExecuteFetchAsDbaRequest disable_foreign_key_checks. + * @member {boolean} disable_foreign_key_checks + * @memberof tabletmanagerdata.ExecuteFetchAsDbaRequest + * @instance + */ + ExecuteFetchAsDbaRequest.prototype.disable_foreign_key_checks = false; + /** * Creates a new ExecuteFetchAsDbaRequest instance using the specified properties. * @function create @@ -48353,6 +50242,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer.uint32(/* id 4, wireType 0 =*/32).bool(message.disable_binlogs); if (message.reload_schema != null && Object.hasOwnProperty.call(message, "reload_schema")) writer.uint32(/* id 5, wireType 0 =*/40).bool(message.reload_schema); + if (message.disable_foreign_key_checks != null && Object.hasOwnProperty.call(message, "disable_foreign_key_checks")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.disable_foreign_key_checks); return writer; }; @@ -48407,6 +50298,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.reload_schema = reader.bool(); break; } + case 6: { + message.disable_foreign_key_checks = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -48457,6 +50352,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) if (typeof message.reload_schema !== "boolean") return "reload_schema: boolean expected"; + if (message.disable_foreign_key_checks != null && message.hasOwnProperty("disable_foreign_key_checks")) + if (typeof message.disable_foreign_key_checks !== "boolean") + return "disable_foreign_key_checks: boolean expected"; return null; }; @@ -48492,6 +50390,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.disable_binlogs = Boolean(object.disable_binlogs); if (object.reload_schema != null) message.reload_schema = Boolean(object.reload_schema); + if (object.disable_foreign_key_checks != null) + message.disable_foreign_key_checks = Boolean(object.disable_foreign_key_checks); return message; }; @@ -48524,6 +50424,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.max_rows = options.longs === String ? "0" : 0; object.disable_binlogs = false; object.reload_schema = false; + object.disable_foreign_key_checks = false; } if (message.query != null && message.hasOwnProperty("query")) object.query = options.bytes === String ? $util.base64.encode(message.query, 0, message.query.length) : options.bytes === Array ? Array.prototype.slice.call(message.query) : message.query; @@ -48538,6 +50439,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.disable_binlogs = message.disable_binlogs; if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) object.reload_schema = message.reload_schema; + if (message.disable_foreign_key_checks != null && message.hasOwnProperty("disable_foreign_key_checks")) + object.disable_foreign_key_checks = message.disable_foreign_key_checks; return object; }; @@ -48778,6 +50681,572 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { return ExecuteFetchAsDbaResponse; })(); + tabletmanagerdata.ExecuteMultiFetchAsDbaRequest = (function() { + + /** + * Properties of an ExecuteMultiFetchAsDbaRequest. + * @memberof tabletmanagerdata + * @interface IExecuteMultiFetchAsDbaRequest + * @property {Uint8Array|null} [sql] ExecuteMultiFetchAsDbaRequest sql + * @property {string|null} [db_name] ExecuteMultiFetchAsDbaRequest db_name + * @property {number|Long|null} [max_rows] ExecuteMultiFetchAsDbaRequest max_rows + * @property {boolean|null} [disable_binlogs] ExecuteMultiFetchAsDbaRequest disable_binlogs + * @property {boolean|null} [reload_schema] ExecuteMultiFetchAsDbaRequest reload_schema + * @property {boolean|null} [disable_foreign_key_checks] ExecuteMultiFetchAsDbaRequest disable_foreign_key_checks + */ + + /** + * Constructs a new ExecuteMultiFetchAsDbaRequest. + * @memberof tabletmanagerdata + * @classdesc Represents an ExecuteMultiFetchAsDbaRequest. + * @implements IExecuteMultiFetchAsDbaRequest + * @constructor + * @param {tabletmanagerdata.IExecuteMultiFetchAsDbaRequest=} [properties] Properties to set + */ + function ExecuteMultiFetchAsDbaRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteMultiFetchAsDbaRequest sql. + * @member {Uint8Array} sql + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @instance + */ + ExecuteMultiFetchAsDbaRequest.prototype.sql = $util.newBuffer([]); + + /** + * ExecuteMultiFetchAsDbaRequest db_name. + * @member {string} db_name + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @instance + */ + ExecuteMultiFetchAsDbaRequest.prototype.db_name = ""; + + /** + * ExecuteMultiFetchAsDbaRequest max_rows. + * @member {number|Long} max_rows + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @instance + */ + ExecuteMultiFetchAsDbaRequest.prototype.max_rows = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + + /** + * ExecuteMultiFetchAsDbaRequest disable_binlogs. + * @member {boolean} disable_binlogs + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @instance + */ + ExecuteMultiFetchAsDbaRequest.prototype.disable_binlogs = false; + + /** + * ExecuteMultiFetchAsDbaRequest reload_schema. + * @member {boolean} reload_schema + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @instance + */ + ExecuteMultiFetchAsDbaRequest.prototype.reload_schema = false; + + /** + * ExecuteMultiFetchAsDbaRequest disable_foreign_key_checks. + * @member {boolean} disable_foreign_key_checks + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @instance + */ + ExecuteMultiFetchAsDbaRequest.prototype.disable_foreign_key_checks = false; + + /** + * Creates a new ExecuteMultiFetchAsDbaRequest instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {tabletmanagerdata.IExecuteMultiFetchAsDbaRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.ExecuteMultiFetchAsDbaRequest} ExecuteMultiFetchAsDbaRequest instance + */ + ExecuteMultiFetchAsDbaRequest.create = function create(properties) { + return new ExecuteMultiFetchAsDbaRequest(properties); + }; + + /** + * Encodes the specified ExecuteMultiFetchAsDbaRequest message. Does not implicitly {@link tabletmanagerdata.ExecuteMultiFetchAsDbaRequest.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {tabletmanagerdata.IExecuteMultiFetchAsDbaRequest} message ExecuteMultiFetchAsDbaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteMultiFetchAsDbaRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) + writer.uint32(/* id 1, wireType 2 =*/10).bytes(message.sql); + if (message.db_name != null && Object.hasOwnProperty.call(message, "db_name")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.db_name); + if (message.max_rows != null && Object.hasOwnProperty.call(message, "max_rows")) + writer.uint32(/* id 3, wireType 0 =*/24).uint64(message.max_rows); + if (message.disable_binlogs != null && Object.hasOwnProperty.call(message, "disable_binlogs")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.disable_binlogs); + if (message.reload_schema != null && Object.hasOwnProperty.call(message, "reload_schema")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.reload_schema); + if (message.disable_foreign_key_checks != null && Object.hasOwnProperty.call(message, "disable_foreign_key_checks")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.disable_foreign_key_checks); + return writer; + }; + + /** + * Encodes the specified ExecuteMultiFetchAsDbaRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.ExecuteMultiFetchAsDbaRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {tabletmanagerdata.IExecuteMultiFetchAsDbaRequest} message ExecuteMultiFetchAsDbaRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteMultiFetchAsDbaRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteMultiFetchAsDbaRequest message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.ExecuteMultiFetchAsDbaRequest} ExecuteMultiFetchAsDbaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteMultiFetchAsDbaRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ExecuteMultiFetchAsDbaRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.sql = reader.bytes(); + break; + } + case 2: { + message.db_name = reader.string(); + break; + } + case 3: { + message.max_rows = reader.uint64(); + break; + } + case 4: { + message.disable_binlogs = reader.bool(); + break; + } + case 5: { + message.reload_schema = reader.bool(); + break; + } + case 6: { + message.disable_foreign_key_checks = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteMultiFetchAsDbaRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.ExecuteMultiFetchAsDbaRequest} ExecuteMultiFetchAsDbaRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteMultiFetchAsDbaRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteMultiFetchAsDbaRequest message. + * @function verify + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteMultiFetchAsDbaRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.sql != null && message.hasOwnProperty("sql")) + if (!(message.sql && typeof message.sql.length === "number" || $util.isString(message.sql))) + return "sql: buffer expected"; + if (message.db_name != null && message.hasOwnProperty("db_name")) + if (!$util.isString(message.db_name)) + return "db_name: string expected"; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (!$util.isInteger(message.max_rows) && !(message.max_rows && $util.isInteger(message.max_rows.low) && $util.isInteger(message.max_rows.high))) + return "max_rows: integer|Long expected"; + if (message.disable_binlogs != null && message.hasOwnProperty("disable_binlogs")) + if (typeof message.disable_binlogs !== "boolean") + return "disable_binlogs: boolean expected"; + if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) + if (typeof message.reload_schema !== "boolean") + return "reload_schema: boolean expected"; + if (message.disable_foreign_key_checks != null && message.hasOwnProperty("disable_foreign_key_checks")) + if (typeof message.disable_foreign_key_checks !== "boolean") + return "disable_foreign_key_checks: boolean expected"; + return null; + }; + + /** + * Creates an ExecuteMultiFetchAsDbaRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.ExecuteMultiFetchAsDbaRequest} ExecuteMultiFetchAsDbaRequest + */ + ExecuteMultiFetchAsDbaRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ExecuteMultiFetchAsDbaRequest) + return object; + let message = new $root.tabletmanagerdata.ExecuteMultiFetchAsDbaRequest(); + if (object.sql != null) + if (typeof object.sql === "string") + $util.base64.decode(object.sql, message.sql = $util.newBuffer($util.base64.length(object.sql)), 0); + else if (object.sql.length >= 0) + message.sql = object.sql; + if (object.db_name != null) + message.db_name = String(object.db_name); + if (object.max_rows != null) + if ($util.Long) + (message.max_rows = $util.Long.fromValue(object.max_rows)).unsigned = true; + else if (typeof object.max_rows === "string") + message.max_rows = parseInt(object.max_rows, 10); + else if (typeof object.max_rows === "number") + message.max_rows = object.max_rows; + else if (typeof object.max_rows === "object") + message.max_rows = new $util.LongBits(object.max_rows.low >>> 0, object.max_rows.high >>> 0).toNumber(true); + if (object.disable_binlogs != null) + message.disable_binlogs = Boolean(object.disable_binlogs); + if (object.reload_schema != null) + message.reload_schema = Boolean(object.reload_schema); + if (object.disable_foreign_key_checks != null) + message.disable_foreign_key_checks = Boolean(object.disable_foreign_key_checks); + return message; + }; + + /** + * Creates a plain object from an ExecuteMultiFetchAsDbaRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {tabletmanagerdata.ExecuteMultiFetchAsDbaRequest} message ExecuteMultiFetchAsDbaRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteMultiFetchAsDbaRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + if (options.bytes === String) + object.sql = ""; + else { + object.sql = []; + if (options.bytes !== Array) + object.sql = $util.newBuffer(object.sql); + } + object.db_name = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, true); + object.max_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_rows = options.longs === String ? "0" : 0; + object.disable_binlogs = false; + object.reload_schema = false; + object.disable_foreign_key_checks = false; + } + if (message.sql != null && message.hasOwnProperty("sql")) + object.sql = options.bytes === String ? $util.base64.encode(message.sql, 0, message.sql.length) : options.bytes === Array ? Array.prototype.slice.call(message.sql) : message.sql; + if (message.db_name != null && message.hasOwnProperty("db_name")) + object.db_name = message.db_name; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (typeof message.max_rows === "number") + object.max_rows = options.longs === String ? String(message.max_rows) : message.max_rows; + else + object.max_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_rows) : options.longs === Number ? new $util.LongBits(message.max_rows.low >>> 0, message.max_rows.high >>> 0).toNumber(true) : message.max_rows; + if (message.disable_binlogs != null && message.hasOwnProperty("disable_binlogs")) + object.disable_binlogs = message.disable_binlogs; + if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) + object.reload_schema = message.reload_schema; + if (message.disable_foreign_key_checks != null && message.hasOwnProperty("disable_foreign_key_checks")) + object.disable_foreign_key_checks = message.disable_foreign_key_checks; + return object; + }; + + /** + * Converts this ExecuteMultiFetchAsDbaRequest to JSON. + * @function toJSON + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @instance + * @returns {Object.} JSON object + */ + ExecuteMultiFetchAsDbaRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteMultiFetchAsDbaRequest + * @function getTypeUrl + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteMultiFetchAsDbaRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.ExecuteMultiFetchAsDbaRequest"; + }; + + return ExecuteMultiFetchAsDbaRequest; + })(); + + tabletmanagerdata.ExecuteMultiFetchAsDbaResponse = (function() { + + /** + * Properties of an ExecuteMultiFetchAsDbaResponse. + * @memberof tabletmanagerdata + * @interface IExecuteMultiFetchAsDbaResponse + * @property {Array.|null} [results] ExecuteMultiFetchAsDbaResponse results + */ + + /** + * Constructs a new ExecuteMultiFetchAsDbaResponse. + * @memberof tabletmanagerdata + * @classdesc Represents an ExecuteMultiFetchAsDbaResponse. + * @implements IExecuteMultiFetchAsDbaResponse + * @constructor + * @param {tabletmanagerdata.IExecuteMultiFetchAsDbaResponse=} [properties] Properties to set + */ + function ExecuteMultiFetchAsDbaResponse(properties) { + this.results = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteMultiFetchAsDbaResponse results. + * @member {Array.} results + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @instance + */ + ExecuteMultiFetchAsDbaResponse.prototype.results = $util.emptyArray; + + /** + * Creates a new ExecuteMultiFetchAsDbaResponse instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {tabletmanagerdata.IExecuteMultiFetchAsDbaResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.ExecuteMultiFetchAsDbaResponse} ExecuteMultiFetchAsDbaResponse instance + */ + ExecuteMultiFetchAsDbaResponse.create = function create(properties) { + return new ExecuteMultiFetchAsDbaResponse(properties); + }; + + /** + * Encodes the specified ExecuteMultiFetchAsDbaResponse message. Does not implicitly {@link tabletmanagerdata.ExecuteMultiFetchAsDbaResponse.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {tabletmanagerdata.IExecuteMultiFetchAsDbaResponse} message ExecuteMultiFetchAsDbaResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteMultiFetchAsDbaResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + $root.query.QueryResult.encode(message.results[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ExecuteMultiFetchAsDbaResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.ExecuteMultiFetchAsDbaResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {tabletmanagerdata.IExecuteMultiFetchAsDbaResponse} message ExecuteMultiFetchAsDbaResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteMultiFetchAsDbaResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteMultiFetchAsDbaResponse message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.ExecuteMultiFetchAsDbaResponse} ExecuteMultiFetchAsDbaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteMultiFetchAsDbaResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ExecuteMultiFetchAsDbaResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.results && message.results.length)) + message.results = []; + message.results.push($root.query.QueryResult.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteMultiFetchAsDbaResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.ExecuteMultiFetchAsDbaResponse} ExecuteMultiFetchAsDbaResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteMultiFetchAsDbaResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteMultiFetchAsDbaResponse message. + * @function verify + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteMultiFetchAsDbaResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) { + let error = $root.query.QueryResult.verify(message.results[i]); + if (error) + return "results." + error; + } + } + return null; + }; + + /** + * Creates an ExecuteMultiFetchAsDbaResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.ExecuteMultiFetchAsDbaResponse} ExecuteMultiFetchAsDbaResponse + */ + ExecuteMultiFetchAsDbaResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ExecuteMultiFetchAsDbaResponse) + return object; + let message = new $root.tabletmanagerdata.ExecuteMultiFetchAsDbaResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".tabletmanagerdata.ExecuteMultiFetchAsDbaResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) { + if (typeof object.results[i] !== "object") + throw TypeError(".tabletmanagerdata.ExecuteMultiFetchAsDbaResponse.results: object expected"); + message.results[i] = $root.query.QueryResult.fromObject(object.results[i]); + } + } + return message; + }; + + /** + * Creates a plain object from an ExecuteMultiFetchAsDbaResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {tabletmanagerdata.ExecuteMultiFetchAsDbaResponse} message ExecuteMultiFetchAsDbaResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteMultiFetchAsDbaResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.results = []; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = $root.query.QueryResult.toObject(message.results[j], options); + } + return object; + }; + + /** + * Converts this ExecuteMultiFetchAsDbaResponse to JSON. + * @function toJSON + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @instance + * @returns {Object.} JSON object + */ + ExecuteMultiFetchAsDbaResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteMultiFetchAsDbaResponse + * @function getTypeUrl + * @memberof tabletmanagerdata.ExecuteMultiFetchAsDbaResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteMultiFetchAsDbaResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.ExecuteMultiFetchAsDbaResponse"; + }; + + return ExecuteMultiFetchAsDbaResponse; + })(); + tabletmanagerdata.ExecuteFetchAsAllPrivsRequest = (function() { /** @@ -57602,6 +60071,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {boolean|null} [force_start_replication] SetReplicationSourceRequest force_start_replication * @property {string|null} [wait_position] SetReplicationSourceRequest wait_position * @property {boolean|null} [semiSync] SetReplicationSourceRequest semiSync + * @property {number|null} [heartbeat_interval] SetReplicationSourceRequest heartbeat_interval */ /** @@ -57659,6 +60129,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ SetReplicationSourceRequest.prototype.semiSync = false; + /** + * SetReplicationSourceRequest heartbeat_interval. + * @member {number} heartbeat_interval + * @memberof tabletmanagerdata.SetReplicationSourceRequest + * @instance + */ + SetReplicationSourceRequest.prototype.heartbeat_interval = 0; + /** * Creates a new SetReplicationSourceRequest instance using the specified properties. * @function create @@ -57693,6 +60171,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer.uint32(/* id 4, wireType 2 =*/34).string(message.wait_position); if (message.semiSync != null && Object.hasOwnProperty.call(message, "semiSync")) writer.uint32(/* id 5, wireType 0 =*/40).bool(message.semiSync); + if (message.heartbeat_interval != null && Object.hasOwnProperty.call(message, "heartbeat_interval")) + writer.uint32(/* id 6, wireType 1 =*/49).double(message.heartbeat_interval); return writer; }; @@ -57747,6 +60227,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.semiSync = reader.bool(); break; } + case 6: { + message.heartbeat_interval = reader.double(); + break; + } default: reader.skipType(tag & 7); break; @@ -57799,6 +60283,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.semiSync != null && message.hasOwnProperty("semiSync")) if (typeof message.semiSync !== "boolean") return "semiSync: boolean expected"; + if (message.heartbeat_interval != null && message.hasOwnProperty("heartbeat_interval")) + if (typeof message.heartbeat_interval !== "number") + return "heartbeat_interval: number expected"; return null; }; @@ -57834,6 +60321,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.wait_position = String(object.wait_position); if (object.semiSync != null) message.semiSync = Boolean(object.semiSync); + if (object.heartbeat_interval != null) + message.heartbeat_interval = Number(object.heartbeat_interval); return message; }; @@ -57860,6 +60349,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.force_start_replication = false; object.wait_position = ""; object.semiSync = false; + object.heartbeat_interval = 0; } if (message.parent != null && message.hasOwnProperty("parent")) object.parent = $root.topodata.TabletAlias.toObject(message.parent, options); @@ -57874,6 +60364,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.wait_position = message.wait_position; if (message.semiSync != null && message.hasOwnProperty("semiSync")) object.semiSync = message.semiSync; + if (message.heartbeat_interval != null && message.hasOwnProperty("heartbeat_interval")) + object.heartbeat_interval = options.json && !isFinite(message.heartbeat_interval) ? String(message.heartbeat_interval) : message.heartbeat_interval; return object; }; @@ -59306,7 +61798,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * Properties of a BackupRequest. * @memberof tabletmanagerdata * @interface IBackupRequest - * @property {number|Long|null} [concurrency] BackupRequest concurrency + * @property {number|null} [concurrency] BackupRequest concurrency * @property {boolean|null} [allow_primary] BackupRequest allow_primary * @property {string|null} [incremental_from_pos] BackupRequest incremental_from_pos * @property {boolean|null} [upgrade_safe] BackupRequest upgrade_safe @@ -59329,11 +61821,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { /** * BackupRequest concurrency. - * @member {number|Long} concurrency + * @member {number} concurrency * @memberof tabletmanagerdata.BackupRequest * @instance */ - BackupRequest.prototype.concurrency = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + BackupRequest.prototype.concurrency = 0; /** * BackupRequest allow_primary. @@ -59384,7 +61876,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (!writer) writer = $Writer.create(); if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 1, wireType 0 =*/8).int64(message.concurrency); + writer.uint32(/* id 1, wireType 0 =*/8).int32(message.concurrency); if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary); if (message.incremental_from_pos != null && Object.hasOwnProperty.call(message, "incremental_from_pos")) @@ -59426,7 +61918,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.concurrency = reader.int64(); + message.concurrency = reader.int32(); break; } case 2: { @@ -59477,8 +61969,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (typeof message !== "object" || message === null) return "object expected"; if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high))) - return "concurrency: integer|Long expected"; + if (!$util.isInteger(message.concurrency)) + return "concurrency: integer expected"; if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) if (typeof message.allow_primary !== "boolean") return "allow_primary: boolean expected"; @@ -59504,14 +61996,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { return object; let message = new $root.tabletmanagerdata.BackupRequest(); if (object.concurrency != null) - if ($util.Long) - (message.concurrency = $util.Long.fromValue(object.concurrency)).unsigned = false; - else if (typeof object.concurrency === "string") - message.concurrency = parseInt(object.concurrency, 10); - else if (typeof object.concurrency === "number") - message.concurrency = object.concurrency; - else if (typeof object.concurrency === "object") - message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber(); + message.concurrency = object.concurrency | 0; if (object.allow_primary != null) message.allow_primary = Boolean(object.allow_primary); if (object.incremental_from_pos != null) @@ -59535,20 +62020,13 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { options = {}; let object = {}; if (options.defaults) { - if ($util.Long) { - let long = new $util.Long(0, 0, false); - object.concurrency = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.concurrency = options.longs === String ? "0" : 0; + object.concurrency = 0; object.allow_primary = false; object.incremental_from_pos = ""; object.upgrade_safe = false; } if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (typeof message.concurrency === "number") - object.concurrency = options.longs === String ? String(message.concurrency) : message.concurrency; - else - object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber() : message.concurrency; + object.concurrency = message.concurrency; if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) object.allow_primary = message.allow_primary; if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) @@ -60302,6 +62780,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {boolean|null} [defer_secondary_keys] CreateVReplicationWorkflowRequest defer_secondary_keys * @property {boolean|null} [auto_start] CreateVReplicationWorkflowRequest auto_start * @property {boolean|null} [stop_after_copy] CreateVReplicationWorkflowRequest stop_after_copy + * @property {string|null} [options] CreateVReplicationWorkflowRequest options */ /** @@ -60402,6 +62881,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ CreateVReplicationWorkflowRequest.prototype.stop_after_copy = false; + /** + * CreateVReplicationWorkflowRequest options. + * @member {string} options + * @memberof tabletmanagerdata.CreateVReplicationWorkflowRequest + * @instance + */ + CreateVReplicationWorkflowRequest.prototype.options = ""; + /** * Creates a new CreateVReplicationWorkflowRequest instance using the specified properties. * @function create @@ -60452,6 +62939,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer.uint32(/* id 9, wireType 0 =*/72).bool(message.auto_start); if (message.stop_after_copy != null && Object.hasOwnProperty.call(message, "stop_after_copy")) writer.uint32(/* id 10, wireType 0 =*/80).bool(message.stop_after_copy); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.options); return writer; }; @@ -60537,6 +63026,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.stop_after_copy = reader.bool(); break; } + case 11: { + message.options = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -60651,6 +63144,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) if (typeof message.stop_after_copy !== "boolean") return "stop_after_copy: boolean expected"; + if (message.options != null && message.hasOwnProperty("options")) + if (!$util.isString(message.options)) + return "options: string expected"; return null; }; @@ -60820,6 +63316,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.auto_start = Boolean(object.auto_start); if (object.stop_after_copy != null) message.stop_after_copy = Boolean(object.stop_after_copy); + if (object.options != null) + message.options = String(object.options); return message; }; @@ -60849,6 +63347,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.defer_secondary_keys = false; object.auto_start = false; object.stop_after_copy = false; + object.options = ""; } if (message.workflow != null && message.hasOwnProperty("workflow")) object.workflow = message.workflow; @@ -60879,6 +63378,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.auto_start = message.auto_start; if (message.stop_after_copy != null && message.hasOwnProperty("stop_after_copy")) object.stop_after_copy = message.stop_after_copy; + if (message.options != null && message.hasOwnProperty("options")) + object.options = message.options; return object; }; @@ -61530,6 +64031,1120 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { return DeleteVReplicationWorkflowResponse; })(); + tabletmanagerdata.HasVReplicationWorkflowsRequest = (function() { + + /** + * Properties of a HasVReplicationWorkflowsRequest. + * @memberof tabletmanagerdata + * @interface IHasVReplicationWorkflowsRequest + */ + + /** + * Constructs a new HasVReplicationWorkflowsRequest. + * @memberof tabletmanagerdata + * @classdesc Represents a HasVReplicationWorkflowsRequest. + * @implements IHasVReplicationWorkflowsRequest + * @constructor + * @param {tabletmanagerdata.IHasVReplicationWorkflowsRequest=} [properties] Properties to set + */ + function HasVReplicationWorkflowsRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new HasVReplicationWorkflowsRequest instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IHasVReplicationWorkflowsRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.HasVReplicationWorkflowsRequest} HasVReplicationWorkflowsRequest instance + */ + HasVReplicationWorkflowsRequest.create = function create(properties) { + return new HasVReplicationWorkflowsRequest(properties); + }; + + /** + * Encodes the specified HasVReplicationWorkflowsRequest message. Does not implicitly {@link tabletmanagerdata.HasVReplicationWorkflowsRequest.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IHasVReplicationWorkflowsRequest} message HasVReplicationWorkflowsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + HasVReplicationWorkflowsRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified HasVReplicationWorkflowsRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.HasVReplicationWorkflowsRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IHasVReplicationWorkflowsRequest} message HasVReplicationWorkflowsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + HasVReplicationWorkflowsRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a HasVReplicationWorkflowsRequest message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.HasVReplicationWorkflowsRequest} HasVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + HasVReplicationWorkflowsRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.HasVReplicationWorkflowsRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a HasVReplicationWorkflowsRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.HasVReplicationWorkflowsRequest} HasVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + HasVReplicationWorkflowsRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a HasVReplicationWorkflowsRequest message. + * @function verify + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + HasVReplicationWorkflowsRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a HasVReplicationWorkflowsRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.HasVReplicationWorkflowsRequest} HasVReplicationWorkflowsRequest + */ + HasVReplicationWorkflowsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.HasVReplicationWorkflowsRequest) + return object; + return new $root.tabletmanagerdata.HasVReplicationWorkflowsRequest(); + }; + + /** + * Creates a plain object from a HasVReplicationWorkflowsRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.HasVReplicationWorkflowsRequest} message HasVReplicationWorkflowsRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + HasVReplicationWorkflowsRequest.toObject = function toObject() { + return {}; + }; + + /** + * Converts this HasVReplicationWorkflowsRequest to JSON. + * @function toJSON + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @instance + * @returns {Object.} JSON object + */ + HasVReplicationWorkflowsRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for HasVReplicationWorkflowsRequest + * @function getTypeUrl + * @memberof tabletmanagerdata.HasVReplicationWorkflowsRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + HasVReplicationWorkflowsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.HasVReplicationWorkflowsRequest"; + }; + + return HasVReplicationWorkflowsRequest; + })(); + + tabletmanagerdata.HasVReplicationWorkflowsResponse = (function() { + + /** + * Properties of a HasVReplicationWorkflowsResponse. + * @memberof tabletmanagerdata + * @interface IHasVReplicationWorkflowsResponse + * @property {boolean|null} [has] HasVReplicationWorkflowsResponse has + */ + + /** + * Constructs a new HasVReplicationWorkflowsResponse. + * @memberof tabletmanagerdata + * @classdesc Represents a HasVReplicationWorkflowsResponse. + * @implements IHasVReplicationWorkflowsResponse + * @constructor + * @param {tabletmanagerdata.IHasVReplicationWorkflowsResponse=} [properties] Properties to set + */ + function HasVReplicationWorkflowsResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * HasVReplicationWorkflowsResponse has. + * @member {boolean} has + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @instance + */ + HasVReplicationWorkflowsResponse.prototype.has = false; + + /** + * Creates a new HasVReplicationWorkflowsResponse instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IHasVReplicationWorkflowsResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.HasVReplicationWorkflowsResponse} HasVReplicationWorkflowsResponse instance + */ + HasVReplicationWorkflowsResponse.create = function create(properties) { + return new HasVReplicationWorkflowsResponse(properties); + }; + + /** + * Encodes the specified HasVReplicationWorkflowsResponse message. Does not implicitly {@link tabletmanagerdata.HasVReplicationWorkflowsResponse.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IHasVReplicationWorkflowsResponse} message HasVReplicationWorkflowsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + HasVReplicationWorkflowsResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.has != null && Object.hasOwnProperty.call(message, "has")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.has); + return writer; + }; + + /** + * Encodes the specified HasVReplicationWorkflowsResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.HasVReplicationWorkflowsResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IHasVReplicationWorkflowsResponse} message HasVReplicationWorkflowsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + HasVReplicationWorkflowsResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a HasVReplicationWorkflowsResponse message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.HasVReplicationWorkflowsResponse} HasVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + HasVReplicationWorkflowsResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.HasVReplicationWorkflowsResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.has = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a HasVReplicationWorkflowsResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.HasVReplicationWorkflowsResponse} HasVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + HasVReplicationWorkflowsResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a HasVReplicationWorkflowsResponse message. + * @function verify + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + HasVReplicationWorkflowsResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.has != null && message.hasOwnProperty("has")) + if (typeof message.has !== "boolean") + return "has: boolean expected"; + return null; + }; + + /** + * Creates a HasVReplicationWorkflowsResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.HasVReplicationWorkflowsResponse} HasVReplicationWorkflowsResponse + */ + HasVReplicationWorkflowsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.HasVReplicationWorkflowsResponse) + return object; + let message = new $root.tabletmanagerdata.HasVReplicationWorkflowsResponse(); + if (object.has != null) + message.has = Boolean(object.has); + return message; + }; + + /** + * Creates a plain object from a HasVReplicationWorkflowsResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.HasVReplicationWorkflowsResponse} message HasVReplicationWorkflowsResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + HasVReplicationWorkflowsResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.has = false; + if (message.has != null && message.hasOwnProperty("has")) + object.has = message.has; + return object; + }; + + /** + * Converts this HasVReplicationWorkflowsResponse to JSON. + * @function toJSON + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @instance + * @returns {Object.} JSON object + */ + HasVReplicationWorkflowsResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for HasVReplicationWorkflowsResponse + * @function getTypeUrl + * @memberof tabletmanagerdata.HasVReplicationWorkflowsResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + HasVReplicationWorkflowsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.HasVReplicationWorkflowsResponse"; + }; + + return HasVReplicationWorkflowsResponse; + })(); + + tabletmanagerdata.ReadVReplicationWorkflowsRequest = (function() { + + /** + * Properties of a ReadVReplicationWorkflowsRequest. + * @memberof tabletmanagerdata + * @interface IReadVReplicationWorkflowsRequest + * @property {Array.|null} [include_ids] ReadVReplicationWorkflowsRequest include_ids + * @property {Array.|null} [include_workflows] ReadVReplicationWorkflowsRequest include_workflows + * @property {Array.|null} [include_states] ReadVReplicationWorkflowsRequest include_states + * @property {Array.|null} [exclude_workflows] ReadVReplicationWorkflowsRequest exclude_workflows + * @property {Array.|null} [exclude_states] ReadVReplicationWorkflowsRequest exclude_states + * @property {boolean|null} [exclude_frozen] ReadVReplicationWorkflowsRequest exclude_frozen + */ + + /** + * Constructs a new ReadVReplicationWorkflowsRequest. + * @memberof tabletmanagerdata + * @classdesc Represents a ReadVReplicationWorkflowsRequest. + * @implements IReadVReplicationWorkflowsRequest + * @constructor + * @param {tabletmanagerdata.IReadVReplicationWorkflowsRequest=} [properties] Properties to set + */ + function ReadVReplicationWorkflowsRequest(properties) { + this.include_ids = []; + this.include_workflows = []; + this.include_states = []; + this.exclude_workflows = []; + this.exclude_states = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ReadVReplicationWorkflowsRequest include_ids. + * @member {Array.} include_ids + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @instance + */ + ReadVReplicationWorkflowsRequest.prototype.include_ids = $util.emptyArray; + + /** + * ReadVReplicationWorkflowsRequest include_workflows. + * @member {Array.} include_workflows + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @instance + */ + ReadVReplicationWorkflowsRequest.prototype.include_workflows = $util.emptyArray; + + /** + * ReadVReplicationWorkflowsRequest include_states. + * @member {Array.} include_states + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @instance + */ + ReadVReplicationWorkflowsRequest.prototype.include_states = $util.emptyArray; + + /** + * ReadVReplicationWorkflowsRequest exclude_workflows. + * @member {Array.} exclude_workflows + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @instance + */ + ReadVReplicationWorkflowsRequest.prototype.exclude_workflows = $util.emptyArray; + + /** + * ReadVReplicationWorkflowsRequest exclude_states. + * @member {Array.} exclude_states + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @instance + */ + ReadVReplicationWorkflowsRequest.prototype.exclude_states = $util.emptyArray; + + /** + * ReadVReplicationWorkflowsRequest exclude_frozen. + * @member {boolean} exclude_frozen + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @instance + */ + ReadVReplicationWorkflowsRequest.prototype.exclude_frozen = false; + + /** + * Creates a new ReadVReplicationWorkflowsRequest instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IReadVReplicationWorkflowsRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.ReadVReplicationWorkflowsRequest} ReadVReplicationWorkflowsRequest instance + */ + ReadVReplicationWorkflowsRequest.create = function create(properties) { + return new ReadVReplicationWorkflowsRequest(properties); + }; + + /** + * Encodes the specified ReadVReplicationWorkflowsRequest message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowsRequest.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IReadVReplicationWorkflowsRequest} message ReadVReplicationWorkflowsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReadVReplicationWorkflowsRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.include_ids != null && message.include_ids.length) { + writer.uint32(/* id 1, wireType 2 =*/10).fork(); + for (let i = 0; i < message.include_ids.length; ++i) + writer.int32(message.include_ids[i]); + writer.ldelim(); + } + if (message.include_workflows != null && message.include_workflows.length) + for (let i = 0; i < message.include_workflows.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.include_workflows[i]); + if (message.include_states != null && message.include_states.length) { + writer.uint32(/* id 3, wireType 2 =*/26).fork(); + for (let i = 0; i < message.include_states.length; ++i) + writer.int32(message.include_states[i]); + writer.ldelim(); + } + if (message.exclude_workflows != null && message.exclude_workflows.length) + for (let i = 0; i < message.exclude_workflows.length; ++i) + writer.uint32(/* id 4, wireType 2 =*/34).string(message.exclude_workflows[i]); + if (message.exclude_states != null && message.exclude_states.length) { + writer.uint32(/* id 5, wireType 2 =*/42).fork(); + for (let i = 0; i < message.exclude_states.length; ++i) + writer.int32(message.exclude_states[i]); + writer.ldelim(); + } + if (message.exclude_frozen != null && Object.hasOwnProperty.call(message, "exclude_frozen")) + writer.uint32(/* id 6, wireType 0 =*/48).bool(message.exclude_frozen); + return writer; + }; + + /** + * Encodes the specified ReadVReplicationWorkflowsRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowsRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IReadVReplicationWorkflowsRequest} message ReadVReplicationWorkflowsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReadVReplicationWorkflowsRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ReadVReplicationWorkflowsRequest message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.ReadVReplicationWorkflowsRequest} ReadVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReadVReplicationWorkflowsRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ReadVReplicationWorkflowsRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.include_ids && message.include_ids.length)) + message.include_ids = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.include_ids.push(reader.int32()); + } else + message.include_ids.push(reader.int32()); + break; + } + case 2: { + if (!(message.include_workflows && message.include_workflows.length)) + message.include_workflows = []; + message.include_workflows.push(reader.string()); + break; + } + case 3: { + if (!(message.include_states && message.include_states.length)) + message.include_states = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.include_states.push(reader.int32()); + } else + message.include_states.push(reader.int32()); + break; + } + case 4: { + if (!(message.exclude_workflows && message.exclude_workflows.length)) + message.exclude_workflows = []; + message.exclude_workflows.push(reader.string()); + break; + } + case 5: { + if (!(message.exclude_states && message.exclude_states.length)) + message.exclude_states = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.exclude_states.push(reader.int32()); + } else + message.exclude_states.push(reader.int32()); + break; + } + case 6: { + message.exclude_frozen = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ReadVReplicationWorkflowsRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.ReadVReplicationWorkflowsRequest} ReadVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReadVReplicationWorkflowsRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ReadVReplicationWorkflowsRequest message. + * @function verify + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ReadVReplicationWorkflowsRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.include_ids != null && message.hasOwnProperty("include_ids")) { + if (!Array.isArray(message.include_ids)) + return "include_ids: array expected"; + for (let i = 0; i < message.include_ids.length; ++i) + if (!$util.isInteger(message.include_ids[i])) + return "include_ids: integer[] expected"; + } + if (message.include_workflows != null && message.hasOwnProperty("include_workflows")) { + if (!Array.isArray(message.include_workflows)) + return "include_workflows: array expected"; + for (let i = 0; i < message.include_workflows.length; ++i) + if (!$util.isString(message.include_workflows[i])) + return "include_workflows: string[] expected"; + } + if (message.include_states != null && message.hasOwnProperty("include_states")) { + if (!Array.isArray(message.include_states)) + return "include_states: array expected"; + for (let i = 0; i < message.include_states.length; ++i) + switch (message.include_states[i]) { + default: + return "include_states: enum value[] expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + break; + } + } + if (message.exclude_workflows != null && message.hasOwnProperty("exclude_workflows")) { + if (!Array.isArray(message.exclude_workflows)) + return "exclude_workflows: array expected"; + for (let i = 0; i < message.exclude_workflows.length; ++i) + if (!$util.isString(message.exclude_workflows[i])) + return "exclude_workflows: string[] expected"; + } + if (message.exclude_states != null && message.hasOwnProperty("exclude_states")) { + if (!Array.isArray(message.exclude_states)) + return "exclude_states: array expected"; + for (let i = 0; i < message.exclude_states.length; ++i) + switch (message.exclude_states[i]) { + default: + return "exclude_states: enum value[] expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + break; + } + } + if (message.exclude_frozen != null && message.hasOwnProperty("exclude_frozen")) + if (typeof message.exclude_frozen !== "boolean") + return "exclude_frozen: boolean expected"; + return null; + }; + + /** + * Creates a ReadVReplicationWorkflowsRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.ReadVReplicationWorkflowsRequest} ReadVReplicationWorkflowsRequest + */ + ReadVReplicationWorkflowsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ReadVReplicationWorkflowsRequest) + return object; + let message = new $root.tabletmanagerdata.ReadVReplicationWorkflowsRequest(); + if (object.include_ids) { + if (!Array.isArray(object.include_ids)) + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowsRequest.include_ids: array expected"); + message.include_ids = []; + for (let i = 0; i < object.include_ids.length; ++i) + message.include_ids[i] = object.include_ids[i] | 0; + } + if (object.include_workflows) { + if (!Array.isArray(object.include_workflows)) + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowsRequest.include_workflows: array expected"); + message.include_workflows = []; + for (let i = 0; i < object.include_workflows.length; ++i) + message.include_workflows[i] = String(object.include_workflows[i]); + } + if (object.include_states) { + if (!Array.isArray(object.include_states)) + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowsRequest.include_states: array expected"); + message.include_states = []; + for (let i = 0; i < object.include_states.length; ++i) + switch (object.include_states[i]) { + default: + if (typeof object.include_states[i] === "number") { + message.include_states[i] = object.include_states[i]; + break; + } + case "Unknown": + case 0: + message.include_states[i] = 0; + break; + case "Init": + case 1: + message.include_states[i] = 1; + break; + case "Stopped": + case 2: + message.include_states[i] = 2; + break; + case "Copying": + case 3: + message.include_states[i] = 3; + break; + case "Running": + case 4: + message.include_states[i] = 4; + break; + case "Error": + case 5: + message.include_states[i] = 5; + break; + case "Lagging": + case 6: + message.include_states[i] = 6; + break; + } + } + if (object.exclude_workflows) { + if (!Array.isArray(object.exclude_workflows)) + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowsRequest.exclude_workflows: array expected"); + message.exclude_workflows = []; + for (let i = 0; i < object.exclude_workflows.length; ++i) + message.exclude_workflows[i] = String(object.exclude_workflows[i]); + } + if (object.exclude_states) { + if (!Array.isArray(object.exclude_states)) + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowsRequest.exclude_states: array expected"); + message.exclude_states = []; + for (let i = 0; i < object.exclude_states.length; ++i) + switch (object.exclude_states[i]) { + default: + if (typeof object.exclude_states[i] === "number") { + message.exclude_states[i] = object.exclude_states[i]; + break; + } + case "Unknown": + case 0: + message.exclude_states[i] = 0; + break; + case "Init": + case 1: + message.exclude_states[i] = 1; + break; + case "Stopped": + case 2: + message.exclude_states[i] = 2; + break; + case "Copying": + case 3: + message.exclude_states[i] = 3; + break; + case "Running": + case 4: + message.exclude_states[i] = 4; + break; + case "Error": + case 5: + message.exclude_states[i] = 5; + break; + case "Lagging": + case 6: + message.exclude_states[i] = 6; + break; + } + } + if (object.exclude_frozen != null) + message.exclude_frozen = Boolean(object.exclude_frozen); + return message; + }; + + /** + * Creates a plain object from a ReadVReplicationWorkflowsRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.ReadVReplicationWorkflowsRequest} message ReadVReplicationWorkflowsRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ReadVReplicationWorkflowsRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.include_ids = []; + object.include_workflows = []; + object.include_states = []; + object.exclude_workflows = []; + object.exclude_states = []; + } + if (options.defaults) + object.exclude_frozen = false; + if (message.include_ids && message.include_ids.length) { + object.include_ids = []; + for (let j = 0; j < message.include_ids.length; ++j) + object.include_ids[j] = message.include_ids[j]; + } + if (message.include_workflows && message.include_workflows.length) { + object.include_workflows = []; + for (let j = 0; j < message.include_workflows.length; ++j) + object.include_workflows[j] = message.include_workflows[j]; + } + if (message.include_states && message.include_states.length) { + object.include_states = []; + for (let j = 0; j < message.include_states.length; ++j) + object.include_states[j] = options.enums === String ? $root.binlogdata.VReplicationWorkflowState[message.include_states[j]] === undefined ? message.include_states[j] : $root.binlogdata.VReplicationWorkflowState[message.include_states[j]] : message.include_states[j]; + } + if (message.exclude_workflows && message.exclude_workflows.length) { + object.exclude_workflows = []; + for (let j = 0; j < message.exclude_workflows.length; ++j) + object.exclude_workflows[j] = message.exclude_workflows[j]; + } + if (message.exclude_states && message.exclude_states.length) { + object.exclude_states = []; + for (let j = 0; j < message.exclude_states.length; ++j) + object.exclude_states[j] = options.enums === String ? $root.binlogdata.VReplicationWorkflowState[message.exclude_states[j]] === undefined ? message.exclude_states[j] : $root.binlogdata.VReplicationWorkflowState[message.exclude_states[j]] : message.exclude_states[j]; + } + if (message.exclude_frozen != null && message.hasOwnProperty("exclude_frozen")) + object.exclude_frozen = message.exclude_frozen; + return object; + }; + + /** + * Converts this ReadVReplicationWorkflowsRequest to JSON. + * @function toJSON + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @instance + * @returns {Object.} JSON object + */ + ReadVReplicationWorkflowsRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ReadVReplicationWorkflowsRequest + * @function getTypeUrl + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ReadVReplicationWorkflowsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.ReadVReplicationWorkflowsRequest"; + }; + + return ReadVReplicationWorkflowsRequest; + })(); + + tabletmanagerdata.ReadVReplicationWorkflowsResponse = (function() { + + /** + * Properties of a ReadVReplicationWorkflowsResponse. + * @memberof tabletmanagerdata + * @interface IReadVReplicationWorkflowsResponse + * @property {Array.|null} [workflows] ReadVReplicationWorkflowsResponse workflows + */ + + /** + * Constructs a new ReadVReplicationWorkflowsResponse. + * @memberof tabletmanagerdata + * @classdesc Represents a ReadVReplicationWorkflowsResponse. + * @implements IReadVReplicationWorkflowsResponse + * @constructor + * @param {tabletmanagerdata.IReadVReplicationWorkflowsResponse=} [properties] Properties to set + */ + function ReadVReplicationWorkflowsResponse(properties) { + this.workflows = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ReadVReplicationWorkflowsResponse workflows. + * @member {Array.} workflows + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @instance + */ + ReadVReplicationWorkflowsResponse.prototype.workflows = $util.emptyArray; + + /** + * Creates a new ReadVReplicationWorkflowsResponse instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IReadVReplicationWorkflowsResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.ReadVReplicationWorkflowsResponse} ReadVReplicationWorkflowsResponse instance + */ + ReadVReplicationWorkflowsResponse.create = function create(properties) { + return new ReadVReplicationWorkflowsResponse(properties); + }; + + /** + * Encodes the specified ReadVReplicationWorkflowsResponse message. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowsResponse.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IReadVReplicationWorkflowsResponse} message ReadVReplicationWorkflowsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReadVReplicationWorkflowsResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.workflows != null && message.workflows.length) + for (let i = 0; i < message.workflows.length; ++i) + $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.encode(message.workflows[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ReadVReplicationWorkflowsResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.ReadVReplicationWorkflowsResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IReadVReplicationWorkflowsResponse} message ReadVReplicationWorkflowsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ReadVReplicationWorkflowsResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ReadVReplicationWorkflowsResponse message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.ReadVReplicationWorkflowsResponse} ReadVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReadVReplicationWorkflowsResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.ReadVReplicationWorkflowsResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.workflows && message.workflows.length)) + message.workflows = []; + message.workflows.push($root.tabletmanagerdata.ReadVReplicationWorkflowResponse.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ReadVReplicationWorkflowsResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.ReadVReplicationWorkflowsResponse} ReadVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ReadVReplicationWorkflowsResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ReadVReplicationWorkflowsResponse message. + * @function verify + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ReadVReplicationWorkflowsResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.workflows != null && message.hasOwnProperty("workflows")) { + if (!Array.isArray(message.workflows)) + return "workflows: array expected"; + for (let i = 0; i < message.workflows.length; ++i) { + let error = $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.verify(message.workflows[i]); + if (error) + return "workflows." + error; + } + } + return null; + }; + + /** + * Creates a ReadVReplicationWorkflowsResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.ReadVReplicationWorkflowsResponse} ReadVReplicationWorkflowsResponse + */ + ReadVReplicationWorkflowsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.ReadVReplicationWorkflowsResponse) + return object; + let message = new $root.tabletmanagerdata.ReadVReplicationWorkflowsResponse(); + if (object.workflows) { + if (!Array.isArray(object.workflows)) + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowsResponse.workflows: array expected"); + message.workflows = []; + for (let i = 0; i < object.workflows.length; ++i) { + if (typeof object.workflows[i] !== "object") + throw TypeError(".tabletmanagerdata.ReadVReplicationWorkflowsResponse.workflows: object expected"); + message.workflows[i] = $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.fromObject(object.workflows[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a ReadVReplicationWorkflowsResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.ReadVReplicationWorkflowsResponse} message ReadVReplicationWorkflowsResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ReadVReplicationWorkflowsResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.workflows = []; + if (message.workflows && message.workflows.length) { + object.workflows = []; + for (let j = 0; j < message.workflows.length; ++j) + object.workflows[j] = $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.toObject(message.workflows[j], options); + } + return object; + }; + + /** + * Converts this ReadVReplicationWorkflowsResponse to JSON. + * @function toJSON + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @instance + * @returns {Object.} JSON object + */ + ReadVReplicationWorkflowsResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ReadVReplicationWorkflowsResponse + * @function getTypeUrl + * @memberof tabletmanagerdata.ReadVReplicationWorkflowsResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ReadVReplicationWorkflowsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.ReadVReplicationWorkflowsResponse"; + }; + + return ReadVReplicationWorkflowsResponse; + })(); + tabletmanagerdata.ReadVReplicationWorkflowRequest = (function() { /** @@ -61749,6 +65364,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {binlogdata.VReplicationWorkflowSubType|null} [workflow_sub_type] ReadVReplicationWorkflowResponse workflow_sub_type * @property {boolean|null} [defer_secondary_keys] ReadVReplicationWorkflowResponse defer_secondary_keys * @property {Array.|null} [streams] ReadVReplicationWorkflowResponse streams + * @property {string|null} [options] ReadVReplicationWorkflowResponse options */ /** @@ -61848,6 +65464,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ ReadVReplicationWorkflowResponse.prototype.streams = $util.emptyArray; + /** + * ReadVReplicationWorkflowResponse options. + * @member {string} options + * @memberof tabletmanagerdata.ReadVReplicationWorkflowResponse + * @instance + */ + ReadVReplicationWorkflowResponse.prototype.options = ""; + /** * Creates a new ReadVReplicationWorkflowResponse instance using the specified properties. * @function create @@ -61897,6 +65521,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.streams != null && message.streams.length) for (let i = 0; i < message.streams.length; ++i) $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.encode(message.streams[i], writer.uint32(/* id 11, wireType 2 =*/90).fork()).ldelim(); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + writer.uint32(/* id 12, wireType 2 =*/98).string(message.options); return writer; }; @@ -61980,6 +65606,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.streams.push($root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.decode(reader, reader.uint32())); break; } + case 12: { + message.options = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -62090,6 +65720,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { return "streams." + error; } } + if (message.options != null && message.hasOwnProperty("options")) + if (!$util.isString(message.options)) + return "options: string expected"; return null; }; @@ -62254,6 +65887,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.streams[i] = $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.fromObject(object.streams[i]); } } + if (object.options != null) + message.options = String(object.options); return message; }; @@ -62283,6 +65918,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.workflow_type = options.enums === String ? "Materialize" : 0; object.workflow_sub_type = options.enums === String ? "None" : 0; object.defer_secondary_keys = false; + object.options = ""; } if (message.workflow != null && message.hasOwnProperty("workflow")) object.workflow = message.workflow; @@ -62310,6 +65946,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { for (let j = 0; j < message.streams.length; ++j) object.streams[j] = $root.tabletmanagerdata.ReadVReplicationWorkflowResponse.Stream.toObject(message.streams[j], options); } + if (message.options != null && message.hasOwnProperty("options")) + object.options = message.options; return object; }; @@ -63808,6 +67446,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {boolean|null} [only_pks] VDiffReportOptions only_pks * @property {boolean|null} [debug_query] VDiffReportOptions debug_query * @property {string|null} [format] VDiffReportOptions format + * @property {number|Long|null} [max_sample_rows] VDiffReportOptions max_sample_rows */ /** @@ -63849,6 +67488,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ VDiffReportOptions.prototype.format = ""; + /** + * VDiffReportOptions max_sample_rows. + * @member {number|Long} max_sample_rows + * @memberof tabletmanagerdata.VDiffReportOptions + * @instance + */ + VDiffReportOptions.prototype.max_sample_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** * Creates a new VDiffReportOptions instance using the specified properties. * @function create @@ -63879,6 +67526,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer.uint32(/* id 2, wireType 0 =*/16).bool(message.debug_query); if (message.format != null && Object.hasOwnProperty.call(message, "format")) writer.uint32(/* id 3, wireType 2 =*/26).string(message.format); + if (message.max_sample_rows != null && Object.hasOwnProperty.call(message, "max_sample_rows")) + writer.uint32(/* id 4, wireType 0 =*/32).int64(message.max_sample_rows); return writer; }; @@ -63925,6 +67574,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.format = reader.string(); break; } + case 4: { + message.max_sample_rows = reader.int64(); + break; + } default: reader.skipType(tag & 7); break; @@ -63969,6 +67622,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.format != null && message.hasOwnProperty("format")) if (!$util.isString(message.format)) return "format: string expected"; + if (message.max_sample_rows != null && message.hasOwnProperty("max_sample_rows")) + if (!$util.isInteger(message.max_sample_rows) && !(message.max_sample_rows && $util.isInteger(message.max_sample_rows.low) && $util.isInteger(message.max_sample_rows.high))) + return "max_sample_rows: integer|Long expected"; return null; }; @@ -63990,6 +67646,15 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.debug_query = Boolean(object.debug_query); if (object.format != null) message.format = String(object.format); + if (object.max_sample_rows != null) + if ($util.Long) + (message.max_sample_rows = $util.Long.fromValue(object.max_sample_rows)).unsigned = false; + else if (typeof object.max_sample_rows === "string") + message.max_sample_rows = parseInt(object.max_sample_rows, 10); + else if (typeof object.max_sample_rows === "number") + message.max_sample_rows = object.max_sample_rows; + else if (typeof object.max_sample_rows === "object") + message.max_sample_rows = new $util.LongBits(object.max_sample_rows.low >>> 0, object.max_sample_rows.high >>> 0).toNumber(); return message; }; @@ -64010,6 +67675,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.only_pks = false; object.debug_query = false; object.format = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_sample_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_sample_rows = options.longs === String ? "0" : 0; } if (message.only_pks != null && message.hasOwnProperty("only_pks")) object.only_pks = message.only_pks; @@ -64017,6 +67687,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.debug_query = message.debug_query; if (message.format != null && message.hasOwnProperty("format")) object.format = message.format; + if (message.max_sample_rows != null && message.hasOwnProperty("max_sample_rows")) + if (typeof message.max_sample_rows === "number") + object.max_sample_rows = options.longs === String ? String(message.max_sample_rows) : message.max_sample_rows; + else + object.max_sample_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_sample_rows) : options.longs === Number ? new $util.LongBits(message.max_sample_rows.low >>> 0, message.max_sample_rows.high >>> 0).toNumber() : message.max_sample_rows; return object; }; @@ -64063,6 +67738,7 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { * @property {number|Long|null} [timeout_seconds] VDiffCoreOptions timeout_seconds * @property {number|Long|null} [max_extra_rows_to_compare] VDiffCoreOptions max_extra_rows_to_compare * @property {boolean|null} [update_table_stats] VDiffCoreOptions update_table_stats + * @property {number|Long|null} [max_diff_seconds] VDiffCoreOptions max_diff_seconds */ /** @@ -64144,6 +67820,14 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { */ VDiffCoreOptions.prototype.update_table_stats = false; + /** + * VDiffCoreOptions max_diff_seconds. + * @member {number|Long} max_diff_seconds + * @memberof tabletmanagerdata.VDiffCoreOptions + * @instance + */ + VDiffCoreOptions.prototype.max_diff_seconds = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** * Creates a new VDiffCoreOptions instance using the specified properties. * @function create @@ -64184,6 +67868,8 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { writer.uint32(/* id 7, wireType 0 =*/56).int64(message.max_extra_rows_to_compare); if (message.update_table_stats != null && Object.hasOwnProperty.call(message, "update_table_stats")) writer.uint32(/* id 8, wireType 0 =*/64).bool(message.update_table_stats); + if (message.max_diff_seconds != null && Object.hasOwnProperty.call(message, "max_diff_seconds")) + writer.uint32(/* id 9, wireType 0 =*/72).int64(message.max_diff_seconds); return writer; }; @@ -64250,6 +67936,10 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.update_table_stats = reader.bool(); break; } + case 9: { + message.max_diff_seconds = reader.int64(); + break; + } default: reader.skipType(tag & 7); break; @@ -64309,6 +67999,9 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { if (message.update_table_stats != null && message.hasOwnProperty("update_table_stats")) if (typeof message.update_table_stats !== "boolean") return "update_table_stats: boolean expected"; + if (message.max_diff_seconds != null && message.hasOwnProperty("max_diff_seconds")) + if (!$util.isInteger(message.max_diff_seconds) && !(message.max_diff_seconds && $util.isInteger(message.max_diff_seconds.low) && $util.isInteger(message.max_diff_seconds.high))) + return "max_diff_seconds: integer|Long expected"; return null; }; @@ -64368,6 +68061,15 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { message.max_extra_rows_to_compare = new $util.LongBits(object.max_extra_rows_to_compare.low >>> 0, object.max_extra_rows_to_compare.high >>> 0).toNumber(); if (object.update_table_stats != null) message.update_table_stats = Boolean(object.update_table_stats); + if (object.max_diff_seconds != null) + if ($util.Long) + (message.max_diff_seconds = $util.Long.fromValue(object.max_diff_seconds)).unsigned = false; + else if (typeof object.max_diff_seconds === "string") + message.max_diff_seconds = parseInt(object.max_diff_seconds, 10); + else if (typeof object.max_diff_seconds === "number") + message.max_diff_seconds = object.max_diff_seconds; + else if (typeof object.max_diff_seconds === "object") + message.max_diff_seconds = new $util.LongBits(object.max_diff_seconds.low >>> 0, object.max_diff_seconds.high >>> 0).toNumber(); return message; }; @@ -64409,6 +68111,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { } else object.max_extra_rows_to_compare = options.longs === String ? "0" : 0; object.update_table_stats = false; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_diff_seconds = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_diff_seconds = options.longs === String ? "0" : 0; } if (message.tables != null && message.hasOwnProperty("tables")) object.tables = message.tables; @@ -64438,6 +68145,11 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { object.max_extra_rows_to_compare = options.longs === String ? $util.Long.prototype.toString.call(message.max_extra_rows_to_compare) : options.longs === Number ? new $util.LongBits(message.max_extra_rows_to_compare.low >>> 0, message.max_extra_rows_to_compare.high >>> 0).toNumber() : message.max_extra_rows_to_compare; if (message.update_table_stats != null && message.hasOwnProperty("update_table_stats")) object.update_table_stats = message.update_table_stats; + if (message.max_diff_seconds != null && message.hasOwnProperty("max_diff_seconds")) + if (typeof message.max_diff_seconds === "number") + object.max_diff_seconds = options.longs === String ? String(message.max_diff_seconds) : message.max_diff_seconds; + else + object.max_diff_seconds = options.longs === String ? $util.Long.prototype.toString.call(message.max_diff_seconds) : options.longs === Number ? new $util.LongBits(message.max_diff_seconds.low >>> 0, message.max_diff_seconds.high >>> 0).toNumber() : message.max_diff_seconds; return object; }; @@ -65465,6 +69177,611 @@ export const tabletmanagerdata = $root.tabletmanagerdata = (() => { return UpdateVReplicationWorkflowResponse; })(); + tabletmanagerdata.UpdateVReplicationWorkflowsRequest = (function() { + + /** + * Properties of an UpdateVReplicationWorkflowsRequest. + * @memberof tabletmanagerdata + * @interface IUpdateVReplicationWorkflowsRequest + * @property {boolean|null} [all_workflows] UpdateVReplicationWorkflowsRequest all_workflows + * @property {Array.|null} [include_workflows] UpdateVReplicationWorkflowsRequest include_workflows + * @property {Array.|null} [exclude_workflows] UpdateVReplicationWorkflowsRequest exclude_workflows + * @property {binlogdata.VReplicationWorkflowState|null} [state] UpdateVReplicationWorkflowsRequest state + * @property {string|null} [message] UpdateVReplicationWorkflowsRequest message + * @property {string|null} [stop_position] UpdateVReplicationWorkflowsRequest stop_position + */ + + /** + * Constructs a new UpdateVReplicationWorkflowsRequest. + * @memberof tabletmanagerdata + * @classdesc Represents an UpdateVReplicationWorkflowsRequest. + * @implements IUpdateVReplicationWorkflowsRequest + * @constructor + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowsRequest=} [properties] Properties to set + */ + function UpdateVReplicationWorkflowsRequest(properties) { + this.include_workflows = []; + this.exclude_workflows = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * UpdateVReplicationWorkflowsRequest all_workflows. + * @member {boolean} all_workflows + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @instance + */ + UpdateVReplicationWorkflowsRequest.prototype.all_workflows = false; + + /** + * UpdateVReplicationWorkflowsRequest include_workflows. + * @member {Array.} include_workflows + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @instance + */ + UpdateVReplicationWorkflowsRequest.prototype.include_workflows = $util.emptyArray; + + /** + * UpdateVReplicationWorkflowsRequest exclude_workflows. + * @member {Array.} exclude_workflows + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @instance + */ + UpdateVReplicationWorkflowsRequest.prototype.exclude_workflows = $util.emptyArray; + + /** + * UpdateVReplicationWorkflowsRequest state. + * @member {binlogdata.VReplicationWorkflowState} state + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @instance + */ + UpdateVReplicationWorkflowsRequest.prototype.state = 0; + + /** + * UpdateVReplicationWorkflowsRequest message. + * @member {string} message + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @instance + */ + UpdateVReplicationWorkflowsRequest.prototype.message = ""; + + /** + * UpdateVReplicationWorkflowsRequest stop_position. + * @member {string} stop_position + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @instance + */ + UpdateVReplicationWorkflowsRequest.prototype.stop_position = ""; + + /** + * Creates a new UpdateVReplicationWorkflowsRequest instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowsRequest=} [properties] Properties to set + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowsRequest} UpdateVReplicationWorkflowsRequest instance + */ + UpdateVReplicationWorkflowsRequest.create = function create(properties) { + return new UpdateVReplicationWorkflowsRequest(properties); + }; + + /** + * Encodes the specified UpdateVReplicationWorkflowsRequest message. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowsRequest.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowsRequest} message UpdateVReplicationWorkflowsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + UpdateVReplicationWorkflowsRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.all_workflows != null && Object.hasOwnProperty.call(message, "all_workflows")) + writer.uint32(/* id 1, wireType 0 =*/8).bool(message.all_workflows); + if (message.include_workflows != null && message.include_workflows.length) + for (let i = 0; i < message.include_workflows.length; ++i) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.include_workflows[i]); + if (message.exclude_workflows != null && message.exclude_workflows.length) + for (let i = 0; i < message.exclude_workflows.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.exclude_workflows[i]); + if (message.state != null && Object.hasOwnProperty.call(message, "state")) + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.state); + if (message.message != null && Object.hasOwnProperty.call(message, "message")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.message); + if (message.stop_position != null && Object.hasOwnProperty.call(message, "stop_position")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.stop_position); + return writer; + }; + + /** + * Encodes the specified UpdateVReplicationWorkflowsRequest message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowsRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowsRequest} message UpdateVReplicationWorkflowsRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + UpdateVReplicationWorkflowsRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an UpdateVReplicationWorkflowsRequest message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowsRequest} UpdateVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + UpdateVReplicationWorkflowsRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.UpdateVReplicationWorkflowsRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.all_workflows = reader.bool(); + break; + } + case 2: { + if (!(message.include_workflows && message.include_workflows.length)) + message.include_workflows = []; + message.include_workflows.push(reader.string()); + break; + } + case 3: { + if (!(message.exclude_workflows && message.exclude_workflows.length)) + message.exclude_workflows = []; + message.exclude_workflows.push(reader.string()); + break; + } + case 4: { + message.state = reader.int32(); + break; + } + case 5: { + message.message = reader.string(); + break; + } + case 6: { + message.stop_position = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an UpdateVReplicationWorkflowsRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowsRequest} UpdateVReplicationWorkflowsRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + UpdateVReplicationWorkflowsRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an UpdateVReplicationWorkflowsRequest message. + * @function verify + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + UpdateVReplicationWorkflowsRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.all_workflows != null && message.hasOwnProperty("all_workflows")) + if (typeof message.all_workflows !== "boolean") + return "all_workflows: boolean expected"; + if (message.include_workflows != null && message.hasOwnProperty("include_workflows")) { + if (!Array.isArray(message.include_workflows)) + return "include_workflows: array expected"; + for (let i = 0; i < message.include_workflows.length; ++i) + if (!$util.isString(message.include_workflows[i])) + return "include_workflows: string[] expected"; + } + if (message.exclude_workflows != null && message.hasOwnProperty("exclude_workflows")) { + if (!Array.isArray(message.exclude_workflows)) + return "exclude_workflows: array expected"; + for (let i = 0; i < message.exclude_workflows.length; ++i) + if (!$util.isString(message.exclude_workflows[i])) + return "exclude_workflows: string[] expected"; + } + if (message.state != null && message.hasOwnProperty("state")) + switch (message.state) { + default: + return "state: enum value expected"; + case 0: + case 1: + case 2: + case 3: + case 4: + case 5: + case 6: + break; + } + if (message.message != null && message.hasOwnProperty("message")) + if (!$util.isString(message.message)) + return "message: string expected"; + if (message.stop_position != null && message.hasOwnProperty("stop_position")) + if (!$util.isString(message.stop_position)) + return "stop_position: string expected"; + return null; + }; + + /** + * Creates an UpdateVReplicationWorkflowsRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowsRequest} UpdateVReplicationWorkflowsRequest + */ + UpdateVReplicationWorkflowsRequest.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.UpdateVReplicationWorkflowsRequest) + return object; + let message = new $root.tabletmanagerdata.UpdateVReplicationWorkflowsRequest(); + if (object.all_workflows != null) + message.all_workflows = Boolean(object.all_workflows); + if (object.include_workflows) { + if (!Array.isArray(object.include_workflows)) + throw TypeError(".tabletmanagerdata.UpdateVReplicationWorkflowsRequest.include_workflows: array expected"); + message.include_workflows = []; + for (let i = 0; i < object.include_workflows.length; ++i) + message.include_workflows[i] = String(object.include_workflows[i]); + } + if (object.exclude_workflows) { + if (!Array.isArray(object.exclude_workflows)) + throw TypeError(".tabletmanagerdata.UpdateVReplicationWorkflowsRequest.exclude_workflows: array expected"); + message.exclude_workflows = []; + for (let i = 0; i < object.exclude_workflows.length; ++i) + message.exclude_workflows[i] = String(object.exclude_workflows[i]); + } + switch (object.state) { + default: + if (typeof object.state === "number") { + message.state = object.state; + break; + } + break; + case "Unknown": + case 0: + message.state = 0; + break; + case "Init": + case 1: + message.state = 1; + break; + case "Stopped": + case 2: + message.state = 2; + break; + case "Copying": + case 3: + message.state = 3; + break; + case "Running": + case 4: + message.state = 4; + break; + case "Error": + case 5: + message.state = 5; + break; + case "Lagging": + case 6: + message.state = 6; + break; + } + if (object.message != null) + message.message = String(object.message); + if (object.stop_position != null) + message.stop_position = String(object.stop_position); + return message; + }; + + /** + * Creates a plain object from an UpdateVReplicationWorkflowsRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {tabletmanagerdata.UpdateVReplicationWorkflowsRequest} message UpdateVReplicationWorkflowsRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + UpdateVReplicationWorkflowsRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) { + object.include_workflows = []; + object.exclude_workflows = []; + } + if (options.defaults) { + object.all_workflows = false; + object.state = options.enums === String ? "Unknown" : 0; + object.message = ""; + object.stop_position = ""; + } + if (message.all_workflows != null && message.hasOwnProperty("all_workflows")) + object.all_workflows = message.all_workflows; + if (message.include_workflows && message.include_workflows.length) { + object.include_workflows = []; + for (let j = 0; j < message.include_workflows.length; ++j) + object.include_workflows[j] = message.include_workflows[j]; + } + if (message.exclude_workflows && message.exclude_workflows.length) { + object.exclude_workflows = []; + for (let j = 0; j < message.exclude_workflows.length; ++j) + object.exclude_workflows[j] = message.exclude_workflows[j]; + } + if (message.state != null && message.hasOwnProperty("state")) + object.state = options.enums === String ? $root.binlogdata.VReplicationWorkflowState[message.state] === undefined ? message.state : $root.binlogdata.VReplicationWorkflowState[message.state] : message.state; + if (message.message != null && message.hasOwnProperty("message")) + object.message = message.message; + if (message.stop_position != null && message.hasOwnProperty("stop_position")) + object.stop_position = message.stop_position; + return object; + }; + + /** + * Converts this UpdateVReplicationWorkflowsRequest to JSON. + * @function toJSON + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @instance + * @returns {Object.} JSON object + */ + UpdateVReplicationWorkflowsRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for UpdateVReplicationWorkflowsRequest + * @function getTypeUrl + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + UpdateVReplicationWorkflowsRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.UpdateVReplicationWorkflowsRequest"; + }; + + return UpdateVReplicationWorkflowsRequest; + })(); + + tabletmanagerdata.UpdateVReplicationWorkflowsResponse = (function() { + + /** + * Properties of an UpdateVReplicationWorkflowsResponse. + * @memberof tabletmanagerdata + * @interface IUpdateVReplicationWorkflowsResponse + * @property {query.IQueryResult|null} [result] UpdateVReplicationWorkflowsResponse result + */ + + /** + * Constructs a new UpdateVReplicationWorkflowsResponse. + * @memberof tabletmanagerdata + * @classdesc Represents an UpdateVReplicationWorkflowsResponse. + * @implements IUpdateVReplicationWorkflowsResponse + * @constructor + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowsResponse=} [properties] Properties to set + */ + function UpdateVReplicationWorkflowsResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * UpdateVReplicationWorkflowsResponse result. + * @member {query.IQueryResult|null|undefined} result + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @instance + */ + UpdateVReplicationWorkflowsResponse.prototype.result = null; + + /** + * Creates a new UpdateVReplicationWorkflowsResponse instance using the specified properties. + * @function create + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowsResponse=} [properties] Properties to set + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowsResponse} UpdateVReplicationWorkflowsResponse instance + */ + UpdateVReplicationWorkflowsResponse.create = function create(properties) { + return new UpdateVReplicationWorkflowsResponse(properties); + }; + + /** + * Encodes the specified UpdateVReplicationWorkflowsResponse message. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowsResponse.verify|verify} messages. + * @function encode + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowsResponse} message UpdateVReplicationWorkflowsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + UpdateVReplicationWorkflowsResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.result != null && Object.hasOwnProperty.call(message, "result")) + $root.query.QueryResult.encode(message.result, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified UpdateVReplicationWorkflowsResponse message, length delimited. Does not implicitly {@link tabletmanagerdata.UpdateVReplicationWorkflowsResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.IUpdateVReplicationWorkflowsResponse} message UpdateVReplicationWorkflowsResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + UpdateVReplicationWorkflowsResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an UpdateVReplicationWorkflowsResponse message from the specified reader or buffer. + * @function decode + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowsResponse} UpdateVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + UpdateVReplicationWorkflowsResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.tabletmanagerdata.UpdateVReplicationWorkflowsResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.result = $root.query.QueryResult.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an UpdateVReplicationWorkflowsResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowsResponse} UpdateVReplicationWorkflowsResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + UpdateVReplicationWorkflowsResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an UpdateVReplicationWorkflowsResponse message. + * @function verify + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + UpdateVReplicationWorkflowsResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.result != null && message.hasOwnProperty("result")) { + let error = $root.query.QueryResult.verify(message.result); + if (error) + return "result." + error; + } + return null; + }; + + /** + * Creates an UpdateVReplicationWorkflowsResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {Object.} object Plain object + * @returns {tabletmanagerdata.UpdateVReplicationWorkflowsResponse} UpdateVReplicationWorkflowsResponse + */ + UpdateVReplicationWorkflowsResponse.fromObject = function fromObject(object) { + if (object instanceof $root.tabletmanagerdata.UpdateVReplicationWorkflowsResponse) + return object; + let message = new $root.tabletmanagerdata.UpdateVReplicationWorkflowsResponse(); + if (object.result != null) { + if (typeof object.result !== "object") + throw TypeError(".tabletmanagerdata.UpdateVReplicationWorkflowsResponse.result: object expected"); + message.result = $root.query.QueryResult.fromObject(object.result); + } + return message; + }; + + /** + * Creates a plain object from an UpdateVReplicationWorkflowsResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {tabletmanagerdata.UpdateVReplicationWorkflowsResponse} message UpdateVReplicationWorkflowsResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + UpdateVReplicationWorkflowsResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.result = null; + if (message.result != null && message.hasOwnProperty("result")) + object.result = $root.query.QueryResult.toObject(message.result, options); + return object; + }; + + /** + * Converts this UpdateVReplicationWorkflowsResponse to JSON. + * @function toJSON + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @instance + * @returns {Object.} JSON object + */ + UpdateVReplicationWorkflowsResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for UpdateVReplicationWorkflowsResponse + * @function getTypeUrl + * @memberof tabletmanagerdata.UpdateVReplicationWorkflowsResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + UpdateVReplicationWorkflowsResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/tabletmanagerdata.UpdateVReplicationWorkflowsResponse"; + }; + + return UpdateVReplicationWorkflowsResponse; + })(); + tabletmanagerdata.ResetSequencesRequest = (function() { /** @@ -68438,6 +72755,7 @@ export const binlogdata = $root.binlogdata = (() => { * @property {string|null} [target_unique_key_columns] Rule target_unique_key_columns * @property {string|null} [source_unique_key_target_columns] Rule source_unique_key_target_columns * @property {Object.|null} [convert_int_to_enum] Rule convert_int_to_enum + * @property {string|null} [force_unique_key] Rule force_unique_key */ /** @@ -68522,6 +72840,14 @@ export const binlogdata = $root.binlogdata = (() => { */ Rule.prototype.convert_int_to_enum = $util.emptyObject; + /** + * Rule force_unique_key. + * @member {string} force_unique_key + * @memberof binlogdata.Rule + * @instance + */ + Rule.prototype.force_unique_key = ""; + /** * Creates a new Rule instance using the specified properties. * @function create @@ -68567,6 +72893,8 @@ export const binlogdata = $root.binlogdata = (() => { if (message.convert_int_to_enum != null && Object.hasOwnProperty.call(message, "convert_int_to_enum")) for (let keys = Object.keys(message.convert_int_to_enum), i = 0; i < keys.length; ++i) writer.uint32(/* id 8, wireType 2 =*/66).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).bool(message.convert_int_to_enum[keys[i]]).ldelim(); + if (message.force_unique_key != null && Object.hasOwnProperty.call(message, "force_unique_key")) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.force_unique_key); return writer; }; @@ -68690,6 +73018,10 @@ export const binlogdata = $root.binlogdata = (() => { message.convert_int_to_enum[key] = value; break; } + case 9: { + message.force_unique_key = reader.string(); + break; + } default: reader.skipType(tag & 7); break; @@ -68766,6 +73098,9 @@ export const binlogdata = $root.binlogdata = (() => { if (typeof message.convert_int_to_enum[key[i]] !== "boolean") return "convert_int_to_enum: boolean{k:string} expected"; } + if (message.force_unique_key != null && message.hasOwnProperty("force_unique_key")) + if (!$util.isString(message.force_unique_key)) + return "force_unique_key: string expected"; return null; }; @@ -68815,6 +73150,8 @@ export const binlogdata = $root.binlogdata = (() => { for (let keys = Object.keys(object.convert_int_to_enum), i = 0; i < keys.length; ++i) message.convert_int_to_enum[keys[i]] = Boolean(object.convert_int_to_enum[keys[i]]); } + if (object.force_unique_key != null) + message.force_unique_key = String(object.force_unique_key); return message; }; @@ -68842,6 +73179,7 @@ export const binlogdata = $root.binlogdata = (() => { object.source_unique_key_columns = ""; object.target_unique_key_columns = ""; object.source_unique_key_target_columns = ""; + object.force_unique_key = ""; } if (message.match != null && message.hasOwnProperty("match")) object.match = message.match; @@ -68869,6 +73207,8 @@ export const binlogdata = $root.binlogdata = (() => { for (let j = 0; j < keys2.length; ++j) object.convert_int_to_enum[keys2[j]] = message.convert_int_to_enum[keys2[j]]; } + if (message.force_unique_key != null && message.hasOwnProperty("force_unique_key")) + object.force_unique_key = message.force_unique_key; return object; }; @@ -70795,6 +75135,7 @@ export const binlogdata = $root.binlogdata = (() => { * @property {Array.|null} [fields] FieldEvent fields * @property {string|null} [keyspace] FieldEvent keyspace * @property {string|null} [shard] FieldEvent shard + * @property {boolean|null} [enum_set_string_values] FieldEvent enum_set_string_values */ /** @@ -70845,6 +75186,14 @@ export const binlogdata = $root.binlogdata = (() => { */ FieldEvent.prototype.shard = ""; + /** + * FieldEvent enum_set_string_values. + * @member {boolean} enum_set_string_values + * @memberof binlogdata.FieldEvent + * @instance + */ + FieldEvent.prototype.enum_set_string_values = false; + /** * Creates a new FieldEvent instance using the specified properties. * @function create @@ -70878,6 +75227,8 @@ export const binlogdata = $root.binlogdata = (() => { writer.uint32(/* id 3, wireType 2 =*/26).string(message.keyspace); if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) writer.uint32(/* id 4, wireType 2 =*/34).string(message.shard); + if (message.enum_set_string_values != null && Object.hasOwnProperty.call(message, "enum_set_string_values")) + writer.uint32(/* id 25, wireType 0 =*/200).bool(message.enum_set_string_values); return writer; }; @@ -70930,6 +75281,10 @@ export const binlogdata = $root.binlogdata = (() => { message.shard = reader.string(); break; } + case 25: { + message.enum_set_string_values = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -70983,6 +75338,9 @@ export const binlogdata = $root.binlogdata = (() => { if (message.shard != null && message.hasOwnProperty("shard")) if (!$util.isString(message.shard)) return "shard: string expected"; + if (message.enum_set_string_values != null && message.hasOwnProperty("enum_set_string_values")) + if (typeof message.enum_set_string_values !== "boolean") + return "enum_set_string_values: boolean expected"; return null; }; @@ -71014,6 +75372,8 @@ export const binlogdata = $root.binlogdata = (() => { message.keyspace = String(object.keyspace); if (object.shard != null) message.shard = String(object.shard); + if (object.enum_set_string_values != null) + message.enum_set_string_values = Boolean(object.enum_set_string_values); return message; }; @@ -71036,6 +75396,7 @@ export const binlogdata = $root.binlogdata = (() => { object.table_name = ""; object.keyspace = ""; object.shard = ""; + object.enum_set_string_values = false; } if (message.table_name != null && message.hasOwnProperty("table_name")) object.table_name = message.table_name; @@ -71048,6 +75409,8 @@ export const binlogdata = $root.binlogdata = (() => { object.keyspace = message.keyspace; if (message.shard != null && message.hasOwnProperty("shard")) object.shard = message.shard; + if (message.enum_set_string_values != null && message.hasOwnProperty("enum_set_string_values")) + object.enum_set_string_values = message.enum_set_string_values; return object; }; @@ -95418,6 +99781,7 @@ export const query = $root.query = (() => { * @property {number|null} [qps] RealtimeStats qps * @property {Array.|null} [table_schema_changed] RealtimeStats table_schema_changed * @property {Array.|null} [view_schema_changed] RealtimeStats view_schema_changed + * @property {boolean|null} [udfs_changed] RealtimeStats udfs_changed */ /** @@ -95501,6 +99865,14 @@ export const query = $root.query = (() => { */ RealtimeStats.prototype.view_schema_changed = $util.emptyArray; + /** + * RealtimeStats udfs_changed. + * @member {boolean} udfs_changed + * @memberof query.RealtimeStats + * @instance + */ + RealtimeStats.prototype.udfs_changed = false; + /** * Creates a new RealtimeStats instance using the specified properties. * @function create @@ -95543,6 +99915,8 @@ export const query = $root.query = (() => { if (message.view_schema_changed != null && message.view_schema_changed.length) for (let i = 0; i < message.view_schema_changed.length; ++i) writer.uint32(/* id 8, wireType 2 =*/66).string(message.view_schema_changed[i]); + if (message.udfs_changed != null && Object.hasOwnProperty.call(message, "udfs_changed")) + writer.uint32(/* id 9, wireType 0 =*/72).bool(message.udfs_changed); return writer; }; @@ -95613,6 +99987,10 @@ export const query = $root.query = (() => { message.view_schema_changed.push(reader.string()); break; } + case 9: { + message.udfs_changed = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -95680,6 +100058,9 @@ export const query = $root.query = (() => { if (!$util.isString(message.view_schema_changed[i])) return "view_schema_changed: string[] expected"; } + if (message.udfs_changed != null && message.hasOwnProperty("udfs_changed")) + if (typeof message.udfs_changed !== "boolean") + return "udfs_changed: boolean expected"; return null; }; @@ -95728,6 +100109,8 @@ export const query = $root.query = (() => { for (let i = 0; i < object.view_schema_changed.length; ++i) message.view_schema_changed[i] = String(object.view_schema_changed[i]); } + if (object.udfs_changed != null) + message.udfs_changed = Boolean(object.udfs_changed); return message; }; @@ -95759,6 +100142,7 @@ export const query = $root.query = (() => { object.filtered_replication_lag_seconds = options.longs === String ? "0" : 0; object.cpu_usage = 0; object.qps = 0; + object.udfs_changed = false; } if (message.health_error != null && message.hasOwnProperty("health_error")) object.health_error = message.health_error; @@ -95785,6 +100169,8 @@ export const query = $root.query = (() => { for (let j = 0; j < message.view_schema_changed.length; ++j) object.view_schema_changed[j] = message.view_schema_changed[j]; } + if (message.udfs_changed != null && message.hasOwnProperty("udfs_changed")) + object.udfs_changed = message.udfs_changed; return object; }; @@ -96778,12 +101164,14 @@ export const query = $root.query = (() => { * @property {number} VIEWS=0 VIEWS value * @property {number} TABLES=1 TABLES value * @property {number} ALL=2 ALL value + * @property {number} UDFS=3 UDFS value */ query.SchemaTableType = (function() { const valuesById = {}, values = Object.create(valuesById); values[valuesById[0] = "VIEWS"] = 0; values[valuesById[1] = "TABLES"] = 1; values[valuesById[2] = "ALL"] = 2; + values[valuesById[3] = "UDFS"] = 3; return values; })(); @@ -96964,6 +101352,7 @@ export const query = $root.query = (() => { case 0: case 1: case 2: + case 3: break; } if (message.table_names != null && message.hasOwnProperty("table_names")) { @@ -97012,6 +101401,10 @@ export const query = $root.query = (() => { case 2: message.table_type = 2; break; + case "UDFS": + case 3: + message.table_type = 3; + break; } if (object.table_names) { if (!Array.isArray(object.table_names)) @@ -97083,12 +101476,447 @@ export const query = $root.query = (() => { return GetSchemaRequest; })(); + query.UDFInfo = (function() { + + /** + * Properties of a UDFInfo. + * @memberof query + * @interface IUDFInfo + * @property {string|null} [name] UDFInfo name + * @property {boolean|null} [aggregating] UDFInfo aggregating + * @property {query.Type|null} [return_type] UDFInfo return_type + */ + + /** + * Constructs a new UDFInfo. + * @memberof query + * @classdesc Represents a UDFInfo. + * @implements IUDFInfo + * @constructor + * @param {query.IUDFInfo=} [properties] Properties to set + */ + function UDFInfo(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * UDFInfo name. + * @member {string} name + * @memberof query.UDFInfo + * @instance + */ + UDFInfo.prototype.name = ""; + + /** + * UDFInfo aggregating. + * @member {boolean} aggregating + * @memberof query.UDFInfo + * @instance + */ + UDFInfo.prototype.aggregating = false; + + /** + * UDFInfo return_type. + * @member {query.Type} return_type + * @memberof query.UDFInfo + * @instance + */ + UDFInfo.prototype.return_type = 0; + + /** + * Creates a new UDFInfo instance using the specified properties. + * @function create + * @memberof query.UDFInfo + * @static + * @param {query.IUDFInfo=} [properties] Properties to set + * @returns {query.UDFInfo} UDFInfo instance + */ + UDFInfo.create = function create(properties) { + return new UDFInfo(properties); + }; + + /** + * Encodes the specified UDFInfo message. Does not implicitly {@link query.UDFInfo.verify|verify} messages. + * @function encode + * @memberof query.UDFInfo + * @static + * @param {query.IUDFInfo} message UDFInfo message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + UDFInfo.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.name != null && Object.hasOwnProperty.call(message, "name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.name); + if (message.aggregating != null && Object.hasOwnProperty.call(message, "aggregating")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.aggregating); + if (message.return_type != null && Object.hasOwnProperty.call(message, "return_type")) + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.return_type); + return writer; + }; + + /** + * Encodes the specified UDFInfo message, length delimited. Does not implicitly {@link query.UDFInfo.verify|verify} messages. + * @function encodeDelimited + * @memberof query.UDFInfo + * @static + * @param {query.IUDFInfo} message UDFInfo message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + UDFInfo.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a UDFInfo message from the specified reader or buffer. + * @function decode + * @memberof query.UDFInfo + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {query.UDFInfo} UDFInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + UDFInfo.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.query.UDFInfo(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.name = reader.string(); + break; + } + case 2: { + message.aggregating = reader.bool(); + break; + } + case 3: { + message.return_type = reader.int32(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a UDFInfo message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof query.UDFInfo + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {query.UDFInfo} UDFInfo + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + UDFInfo.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a UDFInfo message. + * @function verify + * @memberof query.UDFInfo + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + UDFInfo.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.name != null && message.hasOwnProperty("name")) + if (!$util.isString(message.name)) + return "name: string expected"; + if (message.aggregating != null && message.hasOwnProperty("aggregating")) + if (typeof message.aggregating !== "boolean") + return "aggregating: boolean expected"; + if (message.return_type != null && message.hasOwnProperty("return_type")) + switch (message.return_type) { + default: + return "return_type: enum value expected"; + case 0: + case 257: + case 770: + case 259: + case 772: + case 261: + case 774: + case 263: + case 776: + case 265: + case 778: + case 1035: + case 1036: + case 2061: + case 2062: + case 2063: + case 2064: + case 785: + case 18: + case 6163: + case 10260: + case 6165: + case 10262: + case 6167: + case 10264: + case 2073: + case 2074: + case 2075: + case 28: + case 2077: + case 2078: + case 31: + case 4128: + case 4129: + case 4130: + break; + } + return null; + }; + + /** + * Creates a UDFInfo message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof query.UDFInfo + * @static + * @param {Object.} object Plain object + * @returns {query.UDFInfo} UDFInfo + */ + UDFInfo.fromObject = function fromObject(object) { + if (object instanceof $root.query.UDFInfo) + return object; + let message = new $root.query.UDFInfo(); + if (object.name != null) + message.name = String(object.name); + if (object.aggregating != null) + message.aggregating = Boolean(object.aggregating); + switch (object.return_type) { + default: + if (typeof object.return_type === "number") { + message.return_type = object.return_type; + break; + } + break; + case "NULL_TYPE": + case 0: + message.return_type = 0; + break; + case "INT8": + case 257: + message.return_type = 257; + break; + case "UINT8": + case 770: + message.return_type = 770; + break; + case "INT16": + case 259: + message.return_type = 259; + break; + case "UINT16": + case 772: + message.return_type = 772; + break; + case "INT24": + case 261: + message.return_type = 261; + break; + case "UINT24": + case 774: + message.return_type = 774; + break; + case "INT32": + case 263: + message.return_type = 263; + break; + case "UINT32": + case 776: + message.return_type = 776; + break; + case "INT64": + case 265: + message.return_type = 265; + break; + case "UINT64": + case 778: + message.return_type = 778; + break; + case "FLOAT32": + case 1035: + message.return_type = 1035; + break; + case "FLOAT64": + case 1036: + message.return_type = 1036; + break; + case "TIMESTAMP": + case 2061: + message.return_type = 2061; + break; + case "DATE": + case 2062: + message.return_type = 2062; + break; + case "TIME": + case 2063: + message.return_type = 2063; + break; + case "DATETIME": + case 2064: + message.return_type = 2064; + break; + case "YEAR": + case 785: + message.return_type = 785; + break; + case "DECIMAL": + case 18: + message.return_type = 18; + break; + case "TEXT": + case 6163: + message.return_type = 6163; + break; + case "BLOB": + case 10260: + message.return_type = 10260; + break; + case "VARCHAR": + case 6165: + message.return_type = 6165; + break; + case "VARBINARY": + case 10262: + message.return_type = 10262; + break; + case "CHAR": + case 6167: + message.return_type = 6167; + break; + case "BINARY": + case 10264: + message.return_type = 10264; + break; + case "BIT": + case 2073: + message.return_type = 2073; + break; + case "ENUM": + case 2074: + message.return_type = 2074; + break; + case "SET": + case 2075: + message.return_type = 2075; + break; + case "TUPLE": + case 28: + message.return_type = 28; + break; + case "GEOMETRY": + case 2077: + message.return_type = 2077; + break; + case "JSON": + case 2078: + message.return_type = 2078; + break; + case "EXPRESSION": + case 31: + message.return_type = 31; + break; + case "HEXNUM": + case 4128: + message.return_type = 4128; + break; + case "HEXVAL": + case 4129: + message.return_type = 4129; + break; + case "BITNUM": + case 4130: + message.return_type = 4130; + break; + } + return message; + }; + + /** + * Creates a plain object from a UDFInfo message. Also converts values to other types if specified. + * @function toObject + * @memberof query.UDFInfo + * @static + * @param {query.UDFInfo} message UDFInfo + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + UDFInfo.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.name = ""; + object.aggregating = false; + object.return_type = options.enums === String ? "NULL_TYPE" : 0; + } + if (message.name != null && message.hasOwnProperty("name")) + object.name = message.name; + if (message.aggregating != null && message.hasOwnProperty("aggregating")) + object.aggregating = message.aggregating; + if (message.return_type != null && message.hasOwnProperty("return_type")) + object.return_type = options.enums === String ? $root.query.Type[message.return_type] === undefined ? message.return_type : $root.query.Type[message.return_type] : message.return_type; + return object; + }; + + /** + * Converts this UDFInfo to JSON. + * @function toJSON + * @memberof query.UDFInfo + * @instance + * @returns {Object.} JSON object + */ + UDFInfo.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for UDFInfo + * @function getTypeUrl + * @memberof query.UDFInfo + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + UDFInfo.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/query.UDFInfo"; + }; + + return UDFInfo; + })(); + query.GetSchemaResponse = (function() { /** * Properties of a GetSchemaResponse. * @memberof query * @interface IGetSchemaResponse + * @property {Array.|null} [udfs] GetSchemaResponse udfs * @property {Object.|null} [table_definition] GetSchemaResponse table_definition */ @@ -97101,6 +101929,7 @@ export const query = $root.query = (() => { * @param {query.IGetSchemaResponse=} [properties] Properties to set */ function GetSchemaResponse(properties) { + this.udfs = []; this.table_definition = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) @@ -97108,6 +101937,14 @@ export const query = $root.query = (() => { this[keys[i]] = properties[keys[i]]; } + /** + * GetSchemaResponse udfs. + * @member {Array.} udfs + * @memberof query.GetSchemaResponse + * @instance + */ + GetSchemaResponse.prototype.udfs = $util.emptyArray; + /** * GetSchemaResponse table_definition. * @member {Object.} table_definition @@ -97140,6 +101977,9 @@ export const query = $root.query = (() => { GetSchemaResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.udfs != null && message.udfs.length) + for (let i = 0; i < message.udfs.length; ++i) + $root.query.UDFInfo.encode(message.udfs[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.table_definition != null && Object.hasOwnProperty.call(message, "table_definition")) for (let keys = Object.keys(message.table_definition), i = 0; i < keys.length; ++i) writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.table_definition[keys[i]]).ldelim(); @@ -97177,6 +102017,12 @@ export const query = $root.query = (() => { while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + if (!(message.udfs && message.udfs.length)) + message.udfs = []; + message.udfs.push($root.query.UDFInfo.decode(reader, reader.uint32())); + break; + } case 2: { if (message.table_definition === $util.emptyObject) message.table_definition = {}; @@ -97235,6 +102081,15 @@ export const query = $root.query = (() => { GetSchemaResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.udfs != null && message.hasOwnProperty("udfs")) { + if (!Array.isArray(message.udfs)) + return "udfs: array expected"; + for (let i = 0; i < message.udfs.length; ++i) { + let error = $root.query.UDFInfo.verify(message.udfs[i]); + if (error) + return "udfs." + error; + } + } if (message.table_definition != null && message.hasOwnProperty("table_definition")) { if (!$util.isObject(message.table_definition)) return "table_definition: object expected"; @@ -97258,6 +102113,16 @@ export const query = $root.query = (() => { if (object instanceof $root.query.GetSchemaResponse) return object; let message = new $root.query.GetSchemaResponse(); + if (object.udfs) { + if (!Array.isArray(object.udfs)) + throw TypeError(".query.GetSchemaResponse.udfs: array expected"); + message.udfs = []; + for (let i = 0; i < object.udfs.length; ++i) { + if (typeof object.udfs[i] !== "object") + throw TypeError(".query.GetSchemaResponse.udfs: object expected"); + message.udfs[i] = $root.query.UDFInfo.fromObject(object.udfs[i]); + } + } if (object.table_definition) { if (typeof object.table_definition !== "object") throw TypeError(".query.GetSchemaResponse.table_definition: object expected"); @@ -97281,8 +102146,15 @@ export const query = $root.query = (() => { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.udfs = []; if (options.objects || options.defaults) object.table_definition = {}; + if (message.udfs && message.udfs.length) { + object.udfs = []; + for (let j = 0; j < message.udfs.length; ++j) + object.udfs[j] = $root.query.UDFInfo.toObject(message.udfs[j], options); + } let keys2; if (message.table_definition && (keys2 = Object.keys(message.table_definition)).length) { object.table_definition = {}; @@ -98020,6 +102892,233 @@ export const replicationdata = $root.replicationdata = (() => { return Status; })(); + replicationdata.Configuration = (function() { + + /** + * Properties of a Configuration. + * @memberof replicationdata + * @interface IConfiguration + * @property {number|null} [heartbeat_interval] Configuration heartbeat_interval + * @property {number|null} [replica_net_timeout] Configuration replica_net_timeout + */ + + /** + * Constructs a new Configuration. + * @memberof replicationdata + * @classdesc Represents a Configuration. + * @implements IConfiguration + * @constructor + * @param {replicationdata.IConfiguration=} [properties] Properties to set + */ + function Configuration(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Configuration heartbeat_interval. + * @member {number} heartbeat_interval + * @memberof replicationdata.Configuration + * @instance + */ + Configuration.prototype.heartbeat_interval = 0; + + /** + * Configuration replica_net_timeout. + * @member {number} replica_net_timeout + * @memberof replicationdata.Configuration + * @instance + */ + Configuration.prototype.replica_net_timeout = 0; + + /** + * Creates a new Configuration instance using the specified properties. + * @function create + * @memberof replicationdata.Configuration + * @static + * @param {replicationdata.IConfiguration=} [properties] Properties to set + * @returns {replicationdata.Configuration} Configuration instance + */ + Configuration.create = function create(properties) { + return new Configuration(properties); + }; + + /** + * Encodes the specified Configuration message. Does not implicitly {@link replicationdata.Configuration.verify|verify} messages. + * @function encode + * @memberof replicationdata.Configuration + * @static + * @param {replicationdata.IConfiguration} message Configuration message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Configuration.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.heartbeat_interval != null && Object.hasOwnProperty.call(message, "heartbeat_interval")) + writer.uint32(/* id 1, wireType 1 =*/9).double(message.heartbeat_interval); + if (message.replica_net_timeout != null && Object.hasOwnProperty.call(message, "replica_net_timeout")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.replica_net_timeout); + return writer; + }; + + /** + * Encodes the specified Configuration message, length delimited. Does not implicitly {@link replicationdata.Configuration.verify|verify} messages. + * @function encodeDelimited + * @memberof replicationdata.Configuration + * @static + * @param {replicationdata.IConfiguration} message Configuration message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + Configuration.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a Configuration message from the specified reader or buffer. + * @function decode + * @memberof replicationdata.Configuration + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {replicationdata.Configuration} Configuration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Configuration.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.replicationdata.Configuration(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.heartbeat_interval = reader.double(); + break; + } + case 2: { + message.replica_net_timeout = reader.int32(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a Configuration message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof replicationdata.Configuration + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {replicationdata.Configuration} Configuration + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + Configuration.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a Configuration message. + * @function verify + * @memberof replicationdata.Configuration + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + Configuration.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.heartbeat_interval != null && message.hasOwnProperty("heartbeat_interval")) + if (typeof message.heartbeat_interval !== "number") + return "heartbeat_interval: number expected"; + if (message.replica_net_timeout != null && message.hasOwnProperty("replica_net_timeout")) + if (!$util.isInteger(message.replica_net_timeout)) + return "replica_net_timeout: integer expected"; + return null; + }; + + /** + * Creates a Configuration message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof replicationdata.Configuration + * @static + * @param {Object.} object Plain object + * @returns {replicationdata.Configuration} Configuration + */ + Configuration.fromObject = function fromObject(object) { + if (object instanceof $root.replicationdata.Configuration) + return object; + let message = new $root.replicationdata.Configuration(); + if (object.heartbeat_interval != null) + message.heartbeat_interval = Number(object.heartbeat_interval); + if (object.replica_net_timeout != null) + message.replica_net_timeout = object.replica_net_timeout | 0; + return message; + }; + + /** + * Creates a plain object from a Configuration message. Also converts values to other types if specified. + * @function toObject + * @memberof replicationdata.Configuration + * @static + * @param {replicationdata.Configuration} message Configuration + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + Configuration.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.heartbeat_interval = 0; + object.replica_net_timeout = 0; + } + if (message.heartbeat_interval != null && message.hasOwnProperty("heartbeat_interval")) + object.heartbeat_interval = options.json && !isFinite(message.heartbeat_interval) ? String(message.heartbeat_interval) : message.heartbeat_interval; + if (message.replica_net_timeout != null && message.hasOwnProperty("replica_net_timeout")) + object.replica_net_timeout = message.replica_net_timeout; + return object; + }; + + /** + * Converts this Configuration to JSON. + * @function toJSON + * @memberof replicationdata.Configuration + * @instance + * @returns {Object.} JSON object + */ + Configuration.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for Configuration + * @function getTypeUrl + * @memberof replicationdata.Configuration + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + Configuration.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/replicationdata.Configuration"; + }; + + return Configuration; + })(); + replicationdata.StopReplicationStatus = (function() { /** @@ -98525,6 +103624,7 @@ export const replicationdata = $root.replicationdata = (() => { * @property {number|Long|null} [semi_sync_primary_timeout] FullStatus semi_sync_primary_timeout * @property {number|null} [semi_sync_wait_for_replica_count] FullStatus semi_sync_wait_for_replica_count * @property {boolean|null} [super_read_only] FullStatus super_read_only + * @property {replicationdata.IConfiguration|null} [replication_configuration] FullStatus replication_configuration */ /** @@ -98710,6 +103810,14 @@ export const replicationdata = $root.replicationdata = (() => { */ FullStatus.prototype.super_read_only = false; + /** + * FullStatus replication_configuration. + * @member {replicationdata.IConfiguration|null|undefined} replication_configuration + * @memberof replicationdata.FullStatus + * @instance + */ + FullStatus.prototype.replication_configuration = null; + /** * Creates a new FullStatus instance using the specified properties. * @function create @@ -98776,6 +103884,8 @@ export const replicationdata = $root.replicationdata = (() => { writer.uint32(/* id 20, wireType 0 =*/160).uint32(message.semi_sync_wait_for_replica_count); if (message.super_read_only != null && Object.hasOwnProperty.call(message, "super_read_only")) writer.uint32(/* id 21, wireType 0 =*/168).bool(message.super_read_only); + if (message.replication_configuration != null && Object.hasOwnProperty.call(message, "replication_configuration")) + $root.replicationdata.Configuration.encode(message.replication_configuration, writer.uint32(/* id 22, wireType 2 =*/178).fork()).ldelim(); return writer; }; @@ -98894,6 +104004,10 @@ export const replicationdata = $root.replicationdata = (() => { message.super_read_only = reader.bool(); break; } + case 22: { + message.replication_configuration = $root.replicationdata.Configuration.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -98996,6 +104110,11 @@ export const replicationdata = $root.replicationdata = (() => { if (message.super_read_only != null && message.hasOwnProperty("super_read_only")) if (typeof message.super_read_only !== "boolean") return "super_read_only: boolean expected"; + if (message.replication_configuration != null && message.hasOwnProperty("replication_configuration")) { + let error = $root.replicationdata.Configuration.verify(message.replication_configuration); + if (error) + return "replication_configuration." + error; + } return null; }; @@ -99066,6 +104185,11 @@ export const replicationdata = $root.replicationdata = (() => { message.semi_sync_wait_for_replica_count = object.semi_sync_wait_for_replica_count >>> 0; if (object.super_read_only != null) message.super_read_only = Boolean(object.super_read_only); + if (object.replication_configuration != null) { + if (typeof object.replication_configuration !== "object") + throw TypeError(".replicationdata.FullStatus.replication_configuration: object expected"); + message.replication_configuration = $root.replicationdata.Configuration.fromObject(object.replication_configuration); + } return message; }; @@ -99108,6 +104232,7 @@ export const replicationdata = $root.replicationdata = (() => { object.semi_sync_primary_timeout = options.longs === String ? "0" : 0; object.semi_sync_wait_for_replica_count = 0; object.super_read_only = false; + object.replication_configuration = null; } if (message.server_id != null && message.hasOwnProperty("server_id")) object.server_id = message.server_id; @@ -99154,6 +104279,8 @@ export const replicationdata = $root.replicationdata = (() => { object.semi_sync_wait_for_replica_count = message.semi_sync_wait_for_replica_count; if (message.super_read_only != null && message.hasOwnProperty("super_read_only")) object.super_read_only = message.super_read_only; + if (message.replication_configuration != null && message.hasOwnProperty("replication_configuration")) + object.replication_configuration = $root.replicationdata.Configuration.toObject(message.replication_configuration, options); return object; }; @@ -99676,6 +104803,7 @@ export const vschema = $root.vschema = (() => { * @property {Object.|null} [tables] Keyspace tables * @property {boolean|null} [require_explicit_routing] Keyspace require_explicit_routing * @property {vschema.Keyspace.ForeignKeyMode|null} [foreign_key_mode] Keyspace foreign_key_mode + * @property {vschema.IMultiTenantSpec|null} [multi_tenant_spec] Keyspace multi_tenant_spec */ /** @@ -99735,6 +104863,14 @@ export const vschema = $root.vschema = (() => { */ Keyspace.prototype.foreign_key_mode = 0; + /** + * Keyspace multi_tenant_spec. + * @member {vschema.IMultiTenantSpec|null|undefined} multi_tenant_spec + * @memberof vschema.Keyspace + * @instance + */ + Keyspace.prototype.multi_tenant_spec = null; + /** * Creates a new Keyspace instance using the specified properties. * @function create @@ -99775,6 +104911,8 @@ export const vschema = $root.vschema = (() => { writer.uint32(/* id 4, wireType 0 =*/32).bool(message.require_explicit_routing); if (message.foreign_key_mode != null && Object.hasOwnProperty.call(message, "foreign_key_mode")) writer.uint32(/* id 5, wireType 0 =*/40).int32(message.foreign_key_mode); + if (message.multi_tenant_spec != null && Object.hasOwnProperty.call(message, "multi_tenant_spec")) + $root.vschema.MultiTenantSpec.encode(message.multi_tenant_spec, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); return writer; }; @@ -99867,6 +105005,10 @@ export const vschema = $root.vschema = (() => { message.foreign_key_mode = reader.int32(); break; } + case 6: { + message.multi_tenant_spec = $root.vschema.MultiTenantSpec.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -99938,6 +105080,11 @@ export const vschema = $root.vschema = (() => { case 3: break; } + if (message.multi_tenant_spec != null && message.hasOwnProperty("multi_tenant_spec")) { + let error = $root.vschema.MultiTenantSpec.verify(message.multi_tenant_spec); + if (error) + return "multi_tenant_spec." + error; + } return null; }; @@ -100001,6 +105148,11 @@ export const vschema = $root.vschema = (() => { message.foreign_key_mode = 3; break; } + if (object.multi_tenant_spec != null) { + if (typeof object.multi_tenant_spec !== "object") + throw TypeError(".vschema.Keyspace.multi_tenant_spec: object expected"); + message.multi_tenant_spec = $root.vschema.MultiTenantSpec.fromObject(object.multi_tenant_spec); + } return message; }; @@ -100025,6 +105177,7 @@ export const vschema = $root.vschema = (() => { object.sharded = false; object.require_explicit_routing = false; object.foreign_key_mode = options.enums === String ? "unspecified" : 0; + object.multi_tenant_spec = null; } if (message.sharded != null && message.hasOwnProperty("sharded")) object.sharded = message.sharded; @@ -100043,6 +105196,8 @@ export const vschema = $root.vschema = (() => { object.require_explicit_routing = message.require_explicit_routing; if (message.foreign_key_mode != null && message.hasOwnProperty("foreign_key_mode")) object.foreign_key_mode = options.enums === String ? $root.vschema.Keyspace.ForeignKeyMode[message.foreign_key_mode] === undefined ? message.foreign_key_mode : $root.vschema.Keyspace.ForeignKeyMode[message.foreign_key_mode] : message.foreign_key_mode; + if (message.multi_tenant_spec != null && message.hasOwnProperty("multi_tenant_spec")) + object.multi_tenant_spec = $root.vschema.MultiTenantSpec.toObject(message.multi_tenant_spec, options); return object; }; @@ -100093,6 +105248,417 @@ export const vschema = $root.vschema = (() => { return Keyspace; })(); + vschema.MultiTenantSpec = (function() { + + /** + * Properties of a MultiTenantSpec. + * @memberof vschema + * @interface IMultiTenantSpec + * @property {string|null} [tenant_id_column_name] MultiTenantSpec tenant_id_column_name + * @property {query.Type|null} [tenant_id_column_type] MultiTenantSpec tenant_id_column_type + */ + + /** + * Constructs a new MultiTenantSpec. + * @memberof vschema + * @classdesc Represents a MultiTenantSpec. + * @implements IMultiTenantSpec + * @constructor + * @param {vschema.IMultiTenantSpec=} [properties] Properties to set + */ + function MultiTenantSpec(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * MultiTenantSpec tenant_id_column_name. + * @member {string} tenant_id_column_name + * @memberof vschema.MultiTenantSpec + * @instance + */ + MultiTenantSpec.prototype.tenant_id_column_name = ""; + + /** + * MultiTenantSpec tenant_id_column_type. + * @member {query.Type} tenant_id_column_type + * @memberof vschema.MultiTenantSpec + * @instance + */ + MultiTenantSpec.prototype.tenant_id_column_type = 0; + + /** + * Creates a new MultiTenantSpec instance using the specified properties. + * @function create + * @memberof vschema.MultiTenantSpec + * @static + * @param {vschema.IMultiTenantSpec=} [properties] Properties to set + * @returns {vschema.MultiTenantSpec} MultiTenantSpec instance + */ + MultiTenantSpec.create = function create(properties) { + return new MultiTenantSpec(properties); + }; + + /** + * Encodes the specified MultiTenantSpec message. Does not implicitly {@link vschema.MultiTenantSpec.verify|verify} messages. + * @function encode + * @memberof vschema.MultiTenantSpec + * @static + * @param {vschema.IMultiTenantSpec} message MultiTenantSpec message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + MultiTenantSpec.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tenant_id_column_name != null && Object.hasOwnProperty.call(message, "tenant_id_column_name")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.tenant_id_column_name); + if (message.tenant_id_column_type != null && Object.hasOwnProperty.call(message, "tenant_id_column_type")) + writer.uint32(/* id 2, wireType 0 =*/16).int32(message.tenant_id_column_type); + return writer; + }; + + /** + * Encodes the specified MultiTenantSpec message, length delimited. Does not implicitly {@link vschema.MultiTenantSpec.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.MultiTenantSpec + * @static + * @param {vschema.IMultiTenantSpec} message MultiTenantSpec message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + MultiTenantSpec.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a MultiTenantSpec message from the specified reader or buffer. + * @function decode + * @memberof vschema.MultiTenantSpec + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.MultiTenantSpec} MultiTenantSpec + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + MultiTenantSpec.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.MultiTenantSpec(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tenant_id_column_name = reader.string(); + break; + } + case 2: { + message.tenant_id_column_type = reader.int32(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a MultiTenantSpec message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.MultiTenantSpec + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.MultiTenantSpec} MultiTenantSpec + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + MultiTenantSpec.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a MultiTenantSpec message. + * @function verify + * @memberof vschema.MultiTenantSpec + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + MultiTenantSpec.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tenant_id_column_name != null && message.hasOwnProperty("tenant_id_column_name")) + if (!$util.isString(message.tenant_id_column_name)) + return "tenant_id_column_name: string expected"; + if (message.tenant_id_column_type != null && message.hasOwnProperty("tenant_id_column_type")) + switch (message.tenant_id_column_type) { + default: + return "tenant_id_column_type: enum value expected"; + case 0: + case 257: + case 770: + case 259: + case 772: + case 261: + case 774: + case 263: + case 776: + case 265: + case 778: + case 1035: + case 1036: + case 2061: + case 2062: + case 2063: + case 2064: + case 785: + case 18: + case 6163: + case 10260: + case 6165: + case 10262: + case 6167: + case 10264: + case 2073: + case 2074: + case 2075: + case 28: + case 2077: + case 2078: + case 31: + case 4128: + case 4129: + case 4130: + break; + } + return null; + }; + + /** + * Creates a MultiTenantSpec message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.MultiTenantSpec + * @static + * @param {Object.} object Plain object + * @returns {vschema.MultiTenantSpec} MultiTenantSpec + */ + MultiTenantSpec.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.MultiTenantSpec) + return object; + let message = new $root.vschema.MultiTenantSpec(); + if (object.tenant_id_column_name != null) + message.tenant_id_column_name = String(object.tenant_id_column_name); + switch (object.tenant_id_column_type) { + default: + if (typeof object.tenant_id_column_type === "number") { + message.tenant_id_column_type = object.tenant_id_column_type; + break; + } + break; + case "NULL_TYPE": + case 0: + message.tenant_id_column_type = 0; + break; + case "INT8": + case 257: + message.tenant_id_column_type = 257; + break; + case "UINT8": + case 770: + message.tenant_id_column_type = 770; + break; + case "INT16": + case 259: + message.tenant_id_column_type = 259; + break; + case "UINT16": + case 772: + message.tenant_id_column_type = 772; + break; + case "INT24": + case 261: + message.tenant_id_column_type = 261; + break; + case "UINT24": + case 774: + message.tenant_id_column_type = 774; + break; + case "INT32": + case 263: + message.tenant_id_column_type = 263; + break; + case "UINT32": + case 776: + message.tenant_id_column_type = 776; + break; + case "INT64": + case 265: + message.tenant_id_column_type = 265; + break; + case "UINT64": + case 778: + message.tenant_id_column_type = 778; + break; + case "FLOAT32": + case 1035: + message.tenant_id_column_type = 1035; + break; + case "FLOAT64": + case 1036: + message.tenant_id_column_type = 1036; + break; + case "TIMESTAMP": + case 2061: + message.tenant_id_column_type = 2061; + break; + case "DATE": + case 2062: + message.tenant_id_column_type = 2062; + break; + case "TIME": + case 2063: + message.tenant_id_column_type = 2063; + break; + case "DATETIME": + case 2064: + message.tenant_id_column_type = 2064; + break; + case "YEAR": + case 785: + message.tenant_id_column_type = 785; + break; + case "DECIMAL": + case 18: + message.tenant_id_column_type = 18; + break; + case "TEXT": + case 6163: + message.tenant_id_column_type = 6163; + break; + case "BLOB": + case 10260: + message.tenant_id_column_type = 10260; + break; + case "VARCHAR": + case 6165: + message.tenant_id_column_type = 6165; + break; + case "VARBINARY": + case 10262: + message.tenant_id_column_type = 10262; + break; + case "CHAR": + case 6167: + message.tenant_id_column_type = 6167; + break; + case "BINARY": + case 10264: + message.tenant_id_column_type = 10264; + break; + case "BIT": + case 2073: + message.tenant_id_column_type = 2073; + break; + case "ENUM": + case 2074: + message.tenant_id_column_type = 2074; + break; + case "SET": + case 2075: + message.tenant_id_column_type = 2075; + break; + case "TUPLE": + case 28: + message.tenant_id_column_type = 28; + break; + case "GEOMETRY": + case 2077: + message.tenant_id_column_type = 2077; + break; + case "JSON": + case 2078: + message.tenant_id_column_type = 2078; + break; + case "EXPRESSION": + case 31: + message.tenant_id_column_type = 31; + break; + case "HEXNUM": + case 4128: + message.tenant_id_column_type = 4128; + break; + case "HEXVAL": + case 4129: + message.tenant_id_column_type = 4129; + break; + case "BITNUM": + case 4130: + message.tenant_id_column_type = 4130; + break; + } + return message; + }; + + /** + * Creates a plain object from a MultiTenantSpec message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.MultiTenantSpec + * @static + * @param {vschema.MultiTenantSpec} message MultiTenantSpec + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + MultiTenantSpec.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tenant_id_column_name = ""; + object.tenant_id_column_type = options.enums === String ? "NULL_TYPE" : 0; + } + if (message.tenant_id_column_name != null && message.hasOwnProperty("tenant_id_column_name")) + object.tenant_id_column_name = message.tenant_id_column_name; + if (message.tenant_id_column_type != null && message.hasOwnProperty("tenant_id_column_type")) + object.tenant_id_column_type = options.enums === String ? $root.query.Type[message.tenant_id_column_type] === undefined ? message.tenant_id_column_type : $root.query.Type[message.tenant_id_column_type] : message.tenant_id_column_type; + return object; + }; + + /** + * Converts this MultiTenantSpec to JSON. + * @function toJSON + * @memberof vschema.MultiTenantSpec + * @instance + * @returns {Object.} JSON object + */ + MultiTenantSpec.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for MultiTenantSpec + * @function getTypeUrl + * @memberof vschema.MultiTenantSpec + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + MultiTenantSpec.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.MultiTenantSpec"; + }; + + return MultiTenantSpec; + })(); + vschema.Vindex = (function() { /** @@ -101273,6 +106839,12 @@ export const vschema = $root.vschema = (() => { * @property {string|null} [name] Column name * @property {query.Type|null} [type] Column type * @property {boolean|null} [invisible] Column invisible + * @property {string|null} ["default"] Column default + * @property {string|null} [collation_name] Column collation_name + * @property {number|null} [size] Column size + * @property {number|null} [scale] Column scale + * @property {boolean|null} [nullable] Column nullable + * @property {Array.|null} [values] Column values */ /** @@ -101284,6 +106856,7 @@ export const vschema = $root.vschema = (() => { * @param {vschema.IColumn=} [properties] Properties to set */ function Column(properties) { + this.values = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -101314,6 +106887,68 @@ export const vschema = $root.vschema = (() => { */ Column.prototype.invisible = false; + /** + * Column default. + * @member {string} default + * @memberof vschema.Column + * @instance + */ + Column.prototype["default"] = ""; + + /** + * Column collation_name. + * @member {string} collation_name + * @memberof vschema.Column + * @instance + */ + Column.prototype.collation_name = ""; + + /** + * Column size. + * @member {number} size + * @memberof vschema.Column + * @instance + */ + Column.prototype.size = 0; + + /** + * Column scale. + * @member {number} scale + * @memberof vschema.Column + * @instance + */ + Column.prototype.scale = 0; + + /** + * Column nullable. + * @member {boolean|null|undefined} nullable + * @memberof vschema.Column + * @instance + */ + Column.prototype.nullable = null; + + /** + * Column values. + * @member {Array.} values + * @memberof vschema.Column + * @instance + */ + Column.prototype.values = $util.emptyArray; + + // OneOf field names bound to virtual getters and setters + let $oneOfFields; + + /** + * Column _nullable. + * @member {"nullable"|undefined} _nullable + * @memberof vschema.Column + * @instance + */ + Object.defineProperty(Column.prototype, "_nullable", { + get: $util.oneOfGetter($oneOfFields = ["nullable"]), + set: $util.oneOfSetter($oneOfFields) + }); + /** * Creates a new Column instance using the specified properties. * @function create @@ -101344,6 +106979,19 @@ export const vschema = $root.vschema = (() => { writer.uint32(/* id 2, wireType 0 =*/16).int32(message.type); if (message.invisible != null && Object.hasOwnProperty.call(message, "invisible")) writer.uint32(/* id 3, wireType 0 =*/24).bool(message.invisible); + if (message["default"] != null && Object.hasOwnProperty.call(message, "default")) + writer.uint32(/* id 4, wireType 2 =*/34).string(message["default"]); + if (message.collation_name != null && Object.hasOwnProperty.call(message, "collation_name")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.collation_name); + if (message.size != null && Object.hasOwnProperty.call(message, "size")) + writer.uint32(/* id 6, wireType 0 =*/48).int32(message.size); + if (message.scale != null && Object.hasOwnProperty.call(message, "scale")) + writer.uint32(/* id 7, wireType 0 =*/56).int32(message.scale); + if (message.nullable != null && Object.hasOwnProperty.call(message, "nullable")) + writer.uint32(/* id 8, wireType 0 =*/64).bool(message.nullable); + if (message.values != null && message.values.length) + for (let i = 0; i < message.values.length; ++i) + writer.uint32(/* id 9, wireType 2 =*/74).string(message.values[i]); return writer; }; @@ -101390,6 +107038,32 @@ export const vschema = $root.vschema = (() => { message.invisible = reader.bool(); break; } + case 4: { + message["default"] = reader.string(); + break; + } + case 5: { + message.collation_name = reader.string(); + break; + } + case 6: { + message.size = reader.int32(); + break; + } + case 7: { + message.scale = reader.int32(); + break; + } + case 8: { + message.nullable = reader.bool(); + break; + } + case 9: { + if (!(message.values && message.values.length)) + message.values = []; + message.values.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -101425,6 +107099,7 @@ export const vschema = $root.vschema = (() => { Column.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + let properties = {}; if (message.name != null && message.hasOwnProperty("name")) if (!$util.isString(message.name)) return "name: string expected"; @@ -101472,6 +107147,30 @@ export const vschema = $root.vschema = (() => { if (message.invisible != null && message.hasOwnProperty("invisible")) if (typeof message.invisible !== "boolean") return "invisible: boolean expected"; + if (message["default"] != null && message.hasOwnProperty("default")) + if (!$util.isString(message["default"])) + return "default: string expected"; + if (message.collation_name != null && message.hasOwnProperty("collation_name")) + if (!$util.isString(message.collation_name)) + return "collation_name: string expected"; + if (message.size != null && message.hasOwnProperty("size")) + if (!$util.isInteger(message.size)) + return "size: integer expected"; + if (message.scale != null && message.hasOwnProperty("scale")) + if (!$util.isInteger(message.scale)) + return "scale: integer expected"; + if (message.nullable != null && message.hasOwnProperty("nullable")) { + properties._nullable = 1; + if (typeof message.nullable !== "boolean") + return "nullable: boolean expected"; + } + if (message.values != null && message.hasOwnProperty("values")) { + if (!Array.isArray(message.values)) + return "values: array expected"; + for (let i = 0; i < message.values.length; ++i) + if (!$util.isString(message.values[i])) + return "values: string[] expected"; + } return null; }; @@ -101639,6 +107338,23 @@ export const vschema = $root.vschema = (() => { } if (object.invisible != null) message.invisible = Boolean(object.invisible); + if (object["default"] != null) + message["default"] = String(object["default"]); + if (object.collation_name != null) + message.collation_name = String(object.collation_name); + if (object.size != null) + message.size = object.size | 0; + if (object.scale != null) + message.scale = object.scale | 0; + if (object.nullable != null) + message.nullable = Boolean(object.nullable); + if (object.values) { + if (!Array.isArray(object.values)) + throw TypeError(".vschema.Column.values: array expected"); + message.values = []; + for (let i = 0; i < object.values.length; ++i) + message.values[i] = String(object.values[i]); + } return message; }; @@ -101655,10 +107371,16 @@ export const vschema = $root.vschema = (() => { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.values = []; if (options.defaults) { object.name = ""; object.type = options.enums === String ? "NULL_TYPE" : 0; object.invisible = false; + object["default"] = ""; + object.collation_name = ""; + object.size = 0; + object.scale = 0; } if (message.name != null && message.hasOwnProperty("name")) object.name = message.name; @@ -101666,6 +107388,24 @@ export const vschema = $root.vschema = (() => { object.type = options.enums === String ? $root.query.Type[message.type] === undefined ? message.type : $root.query.Type[message.type] : message.type; if (message.invisible != null && message.hasOwnProperty("invisible")) object.invisible = message.invisible; + if (message["default"] != null && message.hasOwnProperty("default")) + object["default"] = message["default"]; + if (message.collation_name != null && message.hasOwnProperty("collation_name")) + object.collation_name = message.collation_name; + if (message.size != null && message.hasOwnProperty("size")) + object.size = message.size; + if (message.scale != null && message.hasOwnProperty("scale")) + object.scale = message.scale; + if (message.nullable != null && message.hasOwnProperty("nullable")) { + object.nullable = message.nullable; + if (options.oneofs) + object._nullable = "nullable"; + } + if (message.values && message.values.length) { + object.values = []; + for (let j = 0; j < message.values.length; ++j) + object.values[j] = message.values[j]; + } return object; }; @@ -101707,6 +107447,7 @@ export const vschema = $root.vschema = (() => { * @property {Object.|null} [keyspaces] SrvVSchema keyspaces * @property {vschema.IRoutingRules|null} [routing_rules] SrvVSchema routing_rules * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] SrvVSchema shard_routing_rules + * @property {vschema.IKeyspaceRoutingRules|null} [keyspace_routing_rules] SrvVSchema keyspace_routing_rules */ /** @@ -101749,6 +107490,14 @@ export const vschema = $root.vschema = (() => { */ SrvVSchema.prototype.shard_routing_rules = null; + /** + * SrvVSchema keyspace_routing_rules. + * @member {vschema.IKeyspaceRoutingRules|null|undefined} keyspace_routing_rules + * @memberof vschema.SrvVSchema + * @instance + */ + SrvVSchema.prototype.keyspace_routing_rules = null; + /** * Creates a new SrvVSchema instance using the specified properties. * @function create @@ -101782,6 +107531,8 @@ export const vschema = $root.vschema = (() => { $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 3, wireType 2 =*/26).fork()).ldelim(); + if (message.keyspace_routing_rules != null && Object.hasOwnProperty.call(message, "keyspace_routing_rules")) + $root.vschema.KeyspaceRoutingRules.encode(message.keyspace_routing_rules, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); return writer; }; @@ -101847,6 +107598,10 @@ export const vschema = $root.vschema = (() => { message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); break; } + case 4: { + message.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -101902,6 +107657,11 @@ export const vschema = $root.vschema = (() => { if (error) return "shard_routing_rules." + error; } + if (message.keyspace_routing_rules != null && message.hasOwnProperty("keyspace_routing_rules")) { + let error = $root.vschema.KeyspaceRoutingRules.verify(message.keyspace_routing_rules); + if (error) + return "keyspace_routing_rules." + error; + } return null; }; @@ -101937,6 +107697,11 @@ export const vschema = $root.vschema = (() => { throw TypeError(".vschema.SrvVSchema.shard_routing_rules: object expected"); message.shard_routing_rules = $root.vschema.ShardRoutingRules.fromObject(object.shard_routing_rules); } + if (object.keyspace_routing_rules != null) { + if (typeof object.keyspace_routing_rules !== "object") + throw TypeError(".vschema.SrvVSchema.keyspace_routing_rules: object expected"); + message.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.fromObject(object.keyspace_routing_rules); + } return message; }; @@ -101958,6 +107723,7 @@ export const vschema = $root.vschema = (() => { if (options.defaults) { object.routing_rules = null; object.shard_routing_rules = null; + object.keyspace_routing_rules = null; } let keys2; if (message.keyspaces && (keys2 = Object.keys(message.keyspaces)).length) { @@ -101969,6 +107735,8 @@ export const vschema = $root.vschema = (() => { object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); if (message.shard_routing_rules != null && message.hasOwnProperty("shard_routing_rules")) object.shard_routing_rules = $root.vschema.ShardRoutingRules.toObject(message.shard_routing_rules, options); + if (message.keyspace_routing_rules != null && message.hasOwnProperty("keyspace_routing_rules")) + object.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.toObject(message.keyspace_routing_rules, options); return object; }; @@ -102475,6 +108243,457 @@ export const vschema = $root.vschema = (() => { return ShardRoutingRule; })(); + vschema.KeyspaceRoutingRules = (function() { + + /** + * Properties of a KeyspaceRoutingRules. + * @memberof vschema + * @interface IKeyspaceRoutingRules + * @property {Array.|null} [rules] KeyspaceRoutingRules rules + */ + + /** + * Constructs a new KeyspaceRoutingRules. + * @memberof vschema + * @classdesc Represents a KeyspaceRoutingRules. + * @implements IKeyspaceRoutingRules + * @constructor + * @param {vschema.IKeyspaceRoutingRules=} [properties] Properties to set + */ + function KeyspaceRoutingRules(properties) { + this.rules = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * KeyspaceRoutingRules rules. + * @member {Array.} rules + * @memberof vschema.KeyspaceRoutingRules + * @instance + */ + KeyspaceRoutingRules.prototype.rules = $util.emptyArray; + + /** + * Creates a new KeyspaceRoutingRules instance using the specified properties. + * @function create + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {vschema.IKeyspaceRoutingRules=} [properties] Properties to set + * @returns {vschema.KeyspaceRoutingRules} KeyspaceRoutingRules instance + */ + KeyspaceRoutingRules.create = function create(properties) { + return new KeyspaceRoutingRules(properties); + }; + + /** + * Encodes the specified KeyspaceRoutingRules message. Does not implicitly {@link vschema.KeyspaceRoutingRules.verify|verify} messages. + * @function encode + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {vschema.IKeyspaceRoutingRules} message KeyspaceRoutingRules message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + KeyspaceRoutingRules.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.rules != null && message.rules.length) + for (let i = 0; i < message.rules.length; ++i) + $root.vschema.KeyspaceRoutingRule.encode(message.rules[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified KeyspaceRoutingRules message, length delimited. Does not implicitly {@link vschema.KeyspaceRoutingRules.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {vschema.IKeyspaceRoutingRules} message KeyspaceRoutingRules message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + KeyspaceRoutingRules.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a KeyspaceRoutingRules message from the specified reader or buffer. + * @function decode + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.KeyspaceRoutingRules} KeyspaceRoutingRules + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + KeyspaceRoutingRules.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.KeyspaceRoutingRules(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.rules && message.rules.length)) + message.rules = []; + message.rules.push($root.vschema.KeyspaceRoutingRule.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a KeyspaceRoutingRules message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.KeyspaceRoutingRules} KeyspaceRoutingRules + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + KeyspaceRoutingRules.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a KeyspaceRoutingRules message. + * @function verify + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + KeyspaceRoutingRules.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.rules != null && message.hasOwnProperty("rules")) { + if (!Array.isArray(message.rules)) + return "rules: array expected"; + for (let i = 0; i < message.rules.length; ++i) { + let error = $root.vschema.KeyspaceRoutingRule.verify(message.rules[i]); + if (error) + return "rules." + error; + } + } + return null; + }; + + /** + * Creates a KeyspaceRoutingRules message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {Object.} object Plain object + * @returns {vschema.KeyspaceRoutingRules} KeyspaceRoutingRules + */ + KeyspaceRoutingRules.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.KeyspaceRoutingRules) + return object; + let message = new $root.vschema.KeyspaceRoutingRules(); + if (object.rules) { + if (!Array.isArray(object.rules)) + throw TypeError(".vschema.KeyspaceRoutingRules.rules: array expected"); + message.rules = []; + for (let i = 0; i < object.rules.length; ++i) { + if (typeof object.rules[i] !== "object") + throw TypeError(".vschema.KeyspaceRoutingRules.rules: object expected"); + message.rules[i] = $root.vschema.KeyspaceRoutingRule.fromObject(object.rules[i]); + } + } + return message; + }; + + /** + * Creates a plain object from a KeyspaceRoutingRules message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {vschema.KeyspaceRoutingRules} message KeyspaceRoutingRules + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + KeyspaceRoutingRules.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.rules = []; + if (message.rules && message.rules.length) { + object.rules = []; + for (let j = 0; j < message.rules.length; ++j) + object.rules[j] = $root.vschema.KeyspaceRoutingRule.toObject(message.rules[j], options); + } + return object; + }; + + /** + * Converts this KeyspaceRoutingRules to JSON. + * @function toJSON + * @memberof vschema.KeyspaceRoutingRules + * @instance + * @returns {Object.} JSON object + */ + KeyspaceRoutingRules.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for KeyspaceRoutingRules + * @function getTypeUrl + * @memberof vschema.KeyspaceRoutingRules + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + KeyspaceRoutingRules.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.KeyspaceRoutingRules"; + }; + + return KeyspaceRoutingRules; + })(); + + vschema.KeyspaceRoutingRule = (function() { + + /** + * Properties of a KeyspaceRoutingRule. + * @memberof vschema + * @interface IKeyspaceRoutingRule + * @property {string|null} [from_keyspace] KeyspaceRoutingRule from_keyspace + * @property {string|null} [to_keyspace] KeyspaceRoutingRule to_keyspace + */ + + /** + * Constructs a new KeyspaceRoutingRule. + * @memberof vschema + * @classdesc Represents a KeyspaceRoutingRule. + * @implements IKeyspaceRoutingRule + * @constructor + * @param {vschema.IKeyspaceRoutingRule=} [properties] Properties to set + */ + function KeyspaceRoutingRule(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * KeyspaceRoutingRule from_keyspace. + * @member {string} from_keyspace + * @memberof vschema.KeyspaceRoutingRule + * @instance + */ + KeyspaceRoutingRule.prototype.from_keyspace = ""; + + /** + * KeyspaceRoutingRule to_keyspace. + * @member {string} to_keyspace + * @memberof vschema.KeyspaceRoutingRule + * @instance + */ + KeyspaceRoutingRule.prototype.to_keyspace = ""; + + /** + * Creates a new KeyspaceRoutingRule instance using the specified properties. + * @function create + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {vschema.IKeyspaceRoutingRule=} [properties] Properties to set + * @returns {vschema.KeyspaceRoutingRule} KeyspaceRoutingRule instance + */ + KeyspaceRoutingRule.create = function create(properties) { + return new KeyspaceRoutingRule(properties); + }; + + /** + * Encodes the specified KeyspaceRoutingRule message. Does not implicitly {@link vschema.KeyspaceRoutingRule.verify|verify} messages. + * @function encode + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {vschema.IKeyspaceRoutingRule} message KeyspaceRoutingRule message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + KeyspaceRoutingRule.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.from_keyspace != null && Object.hasOwnProperty.call(message, "from_keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.from_keyspace); + if (message.to_keyspace != null && Object.hasOwnProperty.call(message, "to_keyspace")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.to_keyspace); + return writer; + }; + + /** + * Encodes the specified KeyspaceRoutingRule message, length delimited. Does not implicitly {@link vschema.KeyspaceRoutingRule.verify|verify} messages. + * @function encodeDelimited + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {vschema.IKeyspaceRoutingRule} message KeyspaceRoutingRule message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + KeyspaceRoutingRule.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a KeyspaceRoutingRule message from the specified reader or buffer. + * @function decode + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vschema.KeyspaceRoutingRule} KeyspaceRoutingRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + KeyspaceRoutingRule.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vschema.KeyspaceRoutingRule(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.from_keyspace = reader.string(); + break; + } + case 2: { + message.to_keyspace = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a KeyspaceRoutingRule message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vschema.KeyspaceRoutingRule} KeyspaceRoutingRule + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + KeyspaceRoutingRule.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a KeyspaceRoutingRule message. + * @function verify + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + KeyspaceRoutingRule.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.from_keyspace != null && message.hasOwnProperty("from_keyspace")) + if (!$util.isString(message.from_keyspace)) + return "from_keyspace: string expected"; + if (message.to_keyspace != null && message.hasOwnProperty("to_keyspace")) + if (!$util.isString(message.to_keyspace)) + return "to_keyspace: string expected"; + return null; + }; + + /** + * Creates a KeyspaceRoutingRule message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {Object.} object Plain object + * @returns {vschema.KeyspaceRoutingRule} KeyspaceRoutingRule + */ + KeyspaceRoutingRule.fromObject = function fromObject(object) { + if (object instanceof $root.vschema.KeyspaceRoutingRule) + return object; + let message = new $root.vschema.KeyspaceRoutingRule(); + if (object.from_keyspace != null) + message.from_keyspace = String(object.from_keyspace); + if (object.to_keyspace != null) + message.to_keyspace = String(object.to_keyspace); + return message; + }; + + /** + * Creates a plain object from a KeyspaceRoutingRule message. Also converts values to other types if specified. + * @function toObject + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {vschema.KeyspaceRoutingRule} message KeyspaceRoutingRule + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + KeyspaceRoutingRule.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.from_keyspace = ""; + object.to_keyspace = ""; + } + if (message.from_keyspace != null && message.hasOwnProperty("from_keyspace")) + object.from_keyspace = message.from_keyspace; + if (message.to_keyspace != null && message.hasOwnProperty("to_keyspace")) + object.to_keyspace = message.to_keyspace; + return object; + }; + + /** + * Converts this KeyspaceRoutingRule to JSON. + * @function toJSON + * @memberof vschema.KeyspaceRoutingRule + * @instance + * @returns {Object.} JSON object + */ + KeyspaceRoutingRule.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for KeyspaceRoutingRule + * @function getTypeUrl + * @memberof vschema.KeyspaceRoutingRule + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + KeyspaceRoutingRule.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vschema.KeyspaceRoutingRule"; + }; + + return KeyspaceRoutingRule; + })(); + return vschema; })(); @@ -103240,6 +109459,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {boolean|null} [defer_secondary_keys] MaterializeSettings defer_secondary_keys * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] MaterializeSettings tablet_selection_preference * @property {boolean|null} [atomic_copy] MaterializeSettings atomic_copy + * @property {vtctldata.IWorkflowOptions|null} [workflow_options] MaterializeSettings workflow_options */ /** @@ -103387,6 +109607,14 @@ export const vtctldata = $root.vtctldata = (() => { */ MaterializeSettings.prototype.atomic_copy = false; + /** + * MaterializeSettings workflow_options. + * @member {vtctldata.IWorkflowOptions|null|undefined} workflow_options + * @memberof vtctldata.MaterializeSettings + * @instance + */ + MaterializeSettings.prototype.workflow_options = null; + /** * Creates a new MaterializeSettings instance using the specified properties. * @function create @@ -103445,6 +109673,8 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 15, wireType 0 =*/120).int32(message.tablet_selection_preference); if (message.atomic_copy != null && Object.hasOwnProperty.call(message, "atomic_copy")) writer.uint32(/* id 16, wireType 0 =*/128).bool(message.atomic_copy); + if (message.workflow_options != null && Object.hasOwnProperty.call(message, "workflow_options")) + $root.vtctldata.WorkflowOptions.encode(message.workflow_options, writer.uint32(/* id 17, wireType 2 =*/138).fork()).ldelim(); return writer; }; @@ -103547,6 +109777,10 @@ export const vtctldata = $root.vtctldata = (() => { message.atomic_copy = reader.bool(); break; } + case 17: { + message.workflow_options = $root.vtctldata.WorkflowOptions.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -103652,6 +109886,11 @@ export const vtctldata = $root.vtctldata = (() => { if (message.atomic_copy != null && message.hasOwnProperty("atomic_copy")) if (typeof message.atomic_copy !== "boolean") return "atomic_copy: boolean expected"; + if (message.workflow_options != null && message.hasOwnProperty("workflow_options")) { + let error = $root.vtctldata.WorkflowOptions.verify(message.workflow_options); + if (error) + return "workflow_options." + error; + } return null; }; @@ -103748,6 +109987,11 @@ export const vtctldata = $root.vtctldata = (() => { } if (object.atomic_copy != null) message.atomic_copy = Boolean(object.atomic_copy); + if (object.workflow_options != null) { + if (typeof object.workflow_options !== "object") + throw TypeError(".vtctldata.MaterializeSettings.workflow_options: object expected"); + message.workflow_options = $root.vtctldata.WorkflowOptions.fromObject(object.workflow_options); + } return message; }; @@ -103783,6 +110027,7 @@ export const vtctldata = $root.vtctldata = (() => { object.defer_secondary_keys = false; object.tablet_selection_preference = options.enums === String ? "ANY" : 0; object.atomic_copy = false; + object.workflow_options = null; } if (message.workflow != null && message.hasOwnProperty("workflow")) object.workflow = message.workflow; @@ -103822,6 +110067,8 @@ export const vtctldata = $root.vtctldata = (() => { object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; if (message.atomic_copy != null && message.hasOwnProperty("atomic_copy")) object.atomic_copy = message.atomic_copy; + if (message.workflow_options != null && message.hasOwnProperty("workflow_options")) + object.workflow_options = $root.vtctldata.WorkflowOptions.toObject(message.workflow_options, options); return object; }; @@ -106051,6 +112298,273 @@ export const vtctldata = $root.vtctldata = (() => { return Shard; })(); + vtctldata.WorkflowOptions = (function() { + + /** + * Properties of a WorkflowOptions. + * @memberof vtctldata + * @interface IWorkflowOptions + * @property {string|null} [tenant_id] WorkflowOptions tenant_id + * @property {boolean|null} [strip_sharded_auto_increment] WorkflowOptions strip_sharded_auto_increment + * @property {Array.|null} [shards] WorkflowOptions shards + */ + + /** + * Constructs a new WorkflowOptions. + * @memberof vtctldata + * @classdesc Represents a WorkflowOptions. + * @implements IWorkflowOptions + * @constructor + * @param {vtctldata.IWorkflowOptions=} [properties] Properties to set + */ + function WorkflowOptions(properties) { + this.shards = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * WorkflowOptions tenant_id. + * @member {string} tenant_id + * @memberof vtctldata.WorkflowOptions + * @instance + */ + WorkflowOptions.prototype.tenant_id = ""; + + /** + * WorkflowOptions strip_sharded_auto_increment. + * @member {boolean} strip_sharded_auto_increment + * @memberof vtctldata.WorkflowOptions + * @instance + */ + WorkflowOptions.prototype.strip_sharded_auto_increment = false; + + /** + * WorkflowOptions shards. + * @member {Array.} shards + * @memberof vtctldata.WorkflowOptions + * @instance + */ + WorkflowOptions.prototype.shards = $util.emptyArray; + + /** + * Creates a new WorkflowOptions instance using the specified properties. + * @function create + * @memberof vtctldata.WorkflowOptions + * @static + * @param {vtctldata.IWorkflowOptions=} [properties] Properties to set + * @returns {vtctldata.WorkflowOptions} WorkflowOptions instance + */ + WorkflowOptions.create = function create(properties) { + return new WorkflowOptions(properties); + }; + + /** + * Encodes the specified WorkflowOptions message. Does not implicitly {@link vtctldata.WorkflowOptions.verify|verify} messages. + * @function encode + * @memberof vtctldata.WorkflowOptions + * @static + * @param {vtctldata.IWorkflowOptions} message WorkflowOptions message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + WorkflowOptions.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tenant_id != null && Object.hasOwnProperty.call(message, "tenant_id")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.tenant_id); + if (message.strip_sharded_auto_increment != null && Object.hasOwnProperty.call(message, "strip_sharded_auto_increment")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.strip_sharded_auto_increment); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.shards[i]); + return writer; + }; + + /** + * Encodes the specified WorkflowOptions message, length delimited. Does not implicitly {@link vtctldata.WorkflowOptions.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.WorkflowOptions + * @static + * @param {vtctldata.IWorkflowOptions} message WorkflowOptions message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + WorkflowOptions.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a WorkflowOptions message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.WorkflowOptions + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.WorkflowOptions} WorkflowOptions + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + WorkflowOptions.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.WorkflowOptions(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tenant_id = reader.string(); + break; + } + case 2: { + message.strip_sharded_auto_increment = reader.bool(); + break; + } + case 3: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a WorkflowOptions message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.WorkflowOptions + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.WorkflowOptions} WorkflowOptions + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + WorkflowOptions.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a WorkflowOptions message. + * @function verify + * @memberof vtctldata.WorkflowOptions + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + WorkflowOptions.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tenant_id != null && message.hasOwnProperty("tenant_id")) + if (!$util.isString(message.tenant_id)) + return "tenant_id: string expected"; + if (message.strip_sharded_auto_increment != null && message.hasOwnProperty("strip_sharded_auto_increment")) + if (typeof message.strip_sharded_auto_increment !== "boolean") + return "strip_sharded_auto_increment: boolean expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) + if (!$util.isString(message.shards[i])) + return "shards: string[] expected"; + } + return null; + }; + + /** + * Creates a WorkflowOptions message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.WorkflowOptions + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.WorkflowOptions} WorkflowOptions + */ + WorkflowOptions.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.WorkflowOptions) + return object; + let message = new $root.vtctldata.WorkflowOptions(); + if (object.tenant_id != null) + message.tenant_id = String(object.tenant_id); + if (object.strip_sharded_auto_increment != null) + message.strip_sharded_auto_increment = Boolean(object.strip_sharded_auto_increment); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.WorkflowOptions.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) + message.shards[i] = String(object.shards[i]); + } + return message; + }; + + /** + * Creates a plain object from a WorkflowOptions message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.WorkflowOptions + * @static + * @param {vtctldata.WorkflowOptions} message WorkflowOptions + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + WorkflowOptions.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.shards = []; + if (options.defaults) { + object.tenant_id = ""; + object.strip_sharded_auto_increment = false; + } + if (message.tenant_id != null && message.hasOwnProperty("tenant_id")) + object.tenant_id = message.tenant_id; + if (message.strip_sharded_auto_increment != null && message.hasOwnProperty("strip_sharded_auto_increment")) + object.strip_sharded_auto_increment = message.strip_sharded_auto_increment; + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = message.shards[j]; + } + return object; + }; + + /** + * Converts this WorkflowOptions to JSON. + * @function toJSON + * @memberof vtctldata.WorkflowOptions + * @instance + * @returns {Object.} JSON object + */ + WorkflowOptions.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for WorkflowOptions + * @function getTypeUrl + * @memberof vtctldata.WorkflowOptions + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + WorkflowOptions.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.WorkflowOptions"; + }; + + return WorkflowOptions; + })(); + vtctldata.Workflow = (function() { /** @@ -106066,6 +112580,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {string|null} [workflow_sub_type] Workflow workflow_sub_type * @property {number|Long|null} [max_v_replication_transaction_lag] Workflow max_v_replication_transaction_lag * @property {boolean|null} [defer_secondary_keys] Workflow defer_secondary_keys + * @property {vtctldata.IWorkflowOptions|null} [options] Workflow options */ /** @@ -106156,6 +112671,14 @@ export const vtctldata = $root.vtctldata = (() => { */ Workflow.prototype.defer_secondary_keys = false; + /** + * Workflow options. + * @member {vtctldata.IWorkflowOptions|null|undefined} options + * @memberof vtctldata.Workflow + * @instance + */ + Workflow.prototype.options = null; + /** * Creates a new Workflow instance using the specified properties. * @function create @@ -106201,6 +112724,8 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 8, wireType 0 =*/64).int64(message.max_v_replication_transaction_lag); if (message.defer_secondary_keys != null && Object.hasOwnProperty.call(message, "defer_secondary_keys")) writer.uint32(/* id 9, wireType 0 =*/72).bool(message.defer_secondary_keys); + if (message.options != null && Object.hasOwnProperty.call(message, "options")) + $root.vtctldata.WorkflowOptions.encode(message.options, writer.uint32(/* id 10, wireType 2 =*/82).fork()).ldelim(); return writer; }; @@ -106290,6 +112815,10 @@ export const vtctldata = $root.vtctldata = (() => { message.defer_secondary_keys = reader.bool(); break; } + case 10: { + message.options = $root.vtctldata.WorkflowOptions.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -106363,6 +112892,11 @@ export const vtctldata = $root.vtctldata = (() => { if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) if (typeof message.defer_secondary_keys !== "boolean") return "defer_secondary_keys: boolean expected"; + if (message.options != null && message.hasOwnProperty("options")) { + let error = $root.vtctldata.WorkflowOptions.verify(message.options); + if (error) + return "options." + error; + } return null; }; @@ -106424,6 +112958,11 @@ export const vtctldata = $root.vtctldata = (() => { message.max_v_replication_transaction_lag = new $util.LongBits(object.max_v_replication_transaction_lag.low >>> 0, object.max_v_replication_transaction_lag.high >>> 0).toNumber(); if (object.defer_secondary_keys != null) message.defer_secondary_keys = Boolean(object.defer_secondary_keys); + if (object.options != null) { + if (typeof object.options !== "object") + throw TypeError(".vtctldata.Workflow.options: object expected"); + message.options = $root.vtctldata.WorkflowOptions.fromObject(object.options); + } return message; }; @@ -106459,6 +112998,7 @@ export const vtctldata = $root.vtctldata = (() => { } else object.max_v_replication_transaction_lag = options.longs === String ? "0" : 0; object.defer_secondary_keys = false; + object.options = null; } if (message.name != null && message.hasOwnProperty("name")) object.name = message.name; @@ -106488,6 +113028,8 @@ export const vtctldata = $root.vtctldata = (() => { object.max_v_replication_transaction_lag = options.longs === String ? $util.Long.prototype.toString.call(message.max_v_replication_transaction_lag) : options.longs === Number ? new $util.LongBits(message.max_v_replication_transaction_lag.low >>> 0, message.max_v_replication_transaction_lag.high >>> 0).toNumber() : message.max_v_replication_transaction_lag; if (message.defer_secondary_keys != null && message.hasOwnProperty("defer_secondary_keys")) object.defer_secondary_keys = message.defer_secondary_keys; + if (message.options != null && message.hasOwnProperty("options")) + object.options = $root.vtctldata.WorkflowOptions.toObject(message.options, options); return object; }; @@ -107076,6 +113618,9 @@ export const vtctldata = $root.vtctldata = (() => { * @property {Array.|null} [tags] Stream tags * @property {number|Long|null} [rows_copied] Stream rows_copied * @property {vtctldata.Workflow.Stream.IThrottlerStatus|null} [throttler_status] Stream throttler_status + * @property {Array.|null} [tablet_types] Stream tablet_types + * @property {tabletmanagerdata.TabletSelectionPreference|null} [tablet_selection_preference] Stream tablet_selection_preference + * @property {Array.|null} [cells] Stream cells */ /** @@ -107090,6 +113635,8 @@ export const vtctldata = $root.vtctldata = (() => { this.copy_states = []; this.logs = []; this.tags = []; + this.tablet_types = []; + this.cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -107232,6 +113779,30 @@ export const vtctldata = $root.vtctldata = (() => { */ Stream.prototype.throttler_status = null; + /** + * Stream tablet_types. + * @member {Array.} tablet_types + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.tablet_types = $util.emptyArray; + + /** + * Stream tablet_selection_preference. + * @member {tabletmanagerdata.TabletSelectionPreference} tablet_selection_preference + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.tablet_selection_preference = 0; + + /** + * Stream cells. + * @member {Array.} cells + * @memberof vtctldata.Workflow.Stream + * @instance + */ + Stream.prototype.cells = $util.emptyArray; + /** * Creates a new Stream instance using the specified properties. * @function create @@ -107293,6 +113864,17 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 16, wireType 0 =*/128).int64(message.rows_copied); if (message.throttler_status != null && Object.hasOwnProperty.call(message, "throttler_status")) $root.vtctldata.Workflow.Stream.ThrottlerStatus.encode(message.throttler_status, writer.uint32(/* id 17, wireType 2 =*/138).fork()).ldelim(); + if (message.tablet_types != null && message.tablet_types.length) { + writer.uint32(/* id 18, wireType 2 =*/146).fork(); + for (let i = 0; i < message.tablet_types.length; ++i) + writer.int32(message.tablet_types[i]); + writer.ldelim(); + } + if (message.tablet_selection_preference != null && Object.hasOwnProperty.call(message, "tablet_selection_preference")) + writer.uint32(/* id 19, wireType 0 =*/152).int32(message.tablet_selection_preference); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 20, wireType 2 =*/162).string(message.cells[i]); return writer; }; @@ -107401,6 +113983,27 @@ export const vtctldata = $root.vtctldata = (() => { message.throttler_status = $root.vtctldata.Workflow.Stream.ThrottlerStatus.decode(reader, reader.uint32()); break; } + case 18: { + if (!(message.tablet_types && message.tablet_types.length)) + message.tablet_types = []; + if ((tag & 7) === 2) { + let end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) + message.tablet_types.push(reader.int32()); + } else + message.tablet_types.push(reader.int32()); + break; + } + case 19: { + message.tablet_selection_preference = reader.int32(); + break; + } + case 20: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -107513,6 +114116,43 @@ export const vtctldata = $root.vtctldata = (() => { if (error) return "throttler_status." + error; } + if (message.tablet_types != null && message.hasOwnProperty("tablet_types")) { + if (!Array.isArray(message.tablet_types)) + return "tablet_types: array expected"; + for (let i = 0; i < message.tablet_types.length; ++i) + switch (message.tablet_types[i]) { + default: + return "tablet_types: enum value[] expected"; + case 0: + case 1: + case 1: + case 2: + case 3: + case 3: + case 4: + case 5: + case 6: + case 7: + case 8: + break; + } + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + switch (message.tablet_selection_preference) { + default: + return "tablet_selection_preference: enum value expected"; + case 0: + case 1: + case 3: + break; + } + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } return null; }; @@ -107612,6 +114252,90 @@ export const vtctldata = $root.vtctldata = (() => { throw TypeError(".vtctldata.Workflow.Stream.throttler_status: object expected"); message.throttler_status = $root.vtctldata.Workflow.Stream.ThrottlerStatus.fromObject(object.throttler_status); } + if (object.tablet_types) { + if (!Array.isArray(object.tablet_types)) + throw TypeError(".vtctldata.Workflow.Stream.tablet_types: array expected"); + message.tablet_types = []; + for (let i = 0; i < object.tablet_types.length; ++i) + switch (object.tablet_types[i]) { + default: + if (typeof object.tablet_types[i] === "number") { + message.tablet_types[i] = object.tablet_types[i]; + break; + } + case "UNKNOWN": + case 0: + message.tablet_types[i] = 0; + break; + case "PRIMARY": + case 1: + message.tablet_types[i] = 1; + break; + case "MASTER": + case 1: + message.tablet_types[i] = 1; + break; + case "REPLICA": + case 2: + message.tablet_types[i] = 2; + break; + case "RDONLY": + case 3: + message.tablet_types[i] = 3; + break; + case "BATCH": + case 3: + message.tablet_types[i] = 3; + break; + case "SPARE": + case 4: + message.tablet_types[i] = 4; + break; + case "EXPERIMENTAL": + case 5: + message.tablet_types[i] = 5; + break; + case "BACKUP": + case 6: + message.tablet_types[i] = 6; + break; + case "RESTORE": + case 7: + message.tablet_types[i] = 7; + break; + case "DRAINED": + case 8: + message.tablet_types[i] = 8; + break; + } + } + switch (object.tablet_selection_preference) { + default: + if (typeof object.tablet_selection_preference === "number") { + message.tablet_selection_preference = object.tablet_selection_preference; + break; + } + break; + case "ANY": + case 0: + message.tablet_selection_preference = 0; + break; + case "INORDER": + case 1: + message.tablet_selection_preference = 1; + break; + case "UNKNOWN": + case 3: + message.tablet_selection_preference = 3; + break; + } + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.Workflow.Stream.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } return message; }; @@ -107632,6 +114356,8 @@ export const vtctldata = $root.vtctldata = (() => { object.copy_states = []; object.logs = []; object.tags = []; + object.tablet_types = []; + object.cells = []; } if (options.defaults) { if ($util.Long) { @@ -107656,6 +114382,7 @@ export const vtctldata = $root.vtctldata = (() => { } else object.rows_copied = options.longs === String ? "0" : 0; object.throttler_status = null; + object.tablet_selection_preference = options.enums === String ? "ANY" : 0; } if (message.id != null && message.hasOwnProperty("id")) if (typeof message.id === "number") @@ -107706,6 +114433,18 @@ export const vtctldata = $root.vtctldata = (() => { object.rows_copied = options.longs === String ? $util.Long.prototype.toString.call(message.rows_copied) : options.longs === Number ? new $util.LongBits(message.rows_copied.low >>> 0, message.rows_copied.high >>> 0).toNumber() : message.rows_copied; if (message.throttler_status != null && message.hasOwnProperty("throttler_status")) object.throttler_status = $root.vtctldata.Workflow.Stream.ThrottlerStatus.toObject(message.throttler_status, options); + if (message.tablet_types && message.tablet_types.length) { + object.tablet_types = []; + for (let j = 0; j < message.tablet_types.length; ++j) + object.tablet_types[j] = options.enums === String ? $root.topodata.TabletType[message.tablet_types[j]] === undefined ? message.tablet_types[j] : $root.topodata.TabletType[message.tablet_types[j]] : message.tablet_types[j]; + } + if (message.tablet_selection_preference != null && message.hasOwnProperty("tablet_selection_preference")) + object.tablet_selection_preference = options.enums === String ? $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] === undefined ? message.tablet_selection_preference : $root.tabletmanagerdata.TabletSelectionPreference[message.tablet_selection_preference] : message.tablet_selection_preference; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } return object; }; @@ -107743,6 +114482,7 @@ export const vtctldata = $root.vtctldata = (() => { * @interface ICopyState * @property {string|null} [table] CopyState table * @property {string|null} [last_pk] CopyState last_pk + * @property {number|Long|null} [stream_id] CopyState stream_id */ /** @@ -107776,6 +114516,14 @@ export const vtctldata = $root.vtctldata = (() => { */ CopyState.prototype.last_pk = ""; + /** + * CopyState stream_id. + * @member {number|Long} stream_id + * @memberof vtctldata.Workflow.Stream.CopyState + * @instance + */ + CopyState.prototype.stream_id = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + /** * Creates a new CopyState instance using the specified properties. * @function create @@ -107804,6 +114552,8 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 1, wireType 2 =*/10).string(message.table); if (message.last_pk != null && Object.hasOwnProperty.call(message, "last_pk")) writer.uint32(/* id 2, wireType 2 =*/18).string(message.last_pk); + if (message.stream_id != null && Object.hasOwnProperty.call(message, "stream_id")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.stream_id); return writer; }; @@ -107846,6 +114596,10 @@ export const vtctldata = $root.vtctldata = (() => { message.last_pk = reader.string(); break; } + case 3: { + message.stream_id = reader.int64(); + break; + } default: reader.skipType(tag & 7); break; @@ -107887,6 +114641,9 @@ export const vtctldata = $root.vtctldata = (() => { if (message.last_pk != null && message.hasOwnProperty("last_pk")) if (!$util.isString(message.last_pk)) return "last_pk: string expected"; + if (message.stream_id != null && message.hasOwnProperty("stream_id")) + if (!$util.isInteger(message.stream_id) && !(message.stream_id && $util.isInteger(message.stream_id.low) && $util.isInteger(message.stream_id.high))) + return "stream_id: integer|Long expected"; return null; }; @@ -107906,6 +114663,15 @@ export const vtctldata = $root.vtctldata = (() => { message.table = String(object.table); if (object.last_pk != null) message.last_pk = String(object.last_pk); + if (object.stream_id != null) + if ($util.Long) + (message.stream_id = $util.Long.fromValue(object.stream_id)).unsigned = false; + else if (typeof object.stream_id === "string") + message.stream_id = parseInt(object.stream_id, 10); + else if (typeof object.stream_id === "number") + message.stream_id = object.stream_id; + else if (typeof object.stream_id === "object") + message.stream_id = new $util.LongBits(object.stream_id.low >>> 0, object.stream_id.high >>> 0).toNumber(); return message; }; @@ -107925,11 +114691,21 @@ export const vtctldata = $root.vtctldata = (() => { if (options.defaults) { object.table = ""; object.last_pk = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.stream_id = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.stream_id = options.longs === String ? "0" : 0; } if (message.table != null && message.hasOwnProperty("table")) object.table = message.table; if (message.last_pk != null && message.hasOwnProperty("last_pk")) object.last_pk = message.last_pk; + if (message.stream_id != null && message.hasOwnProperty("stream_id")) + if (typeof message.stream_id === "number") + object.stream_id = options.longs === String ? String(message.stream_id) : message.stream_id; + else + object.stream_id = options.longs === String ? $util.Long.prototype.toString.call(message.stream_id) : options.longs === Number ? new $util.LongBits(message.stream_id.low >>> 0, message.stream_id.high >>> 0).toNumber() : message.stream_id; return object; }; @@ -109442,26 +116218,26 @@ export const vtctldata = $root.vtctldata = (() => { return AddCellsAliasResponse; })(); - vtctldata.ApplyRoutingRulesRequest = (function() { + vtctldata.ApplyKeyspaceRoutingRulesRequest = (function() { /** - * Properties of an ApplyRoutingRulesRequest. + * Properties of an ApplyKeyspaceRoutingRulesRequest. * @memberof vtctldata - * @interface IApplyRoutingRulesRequest - * @property {vschema.IRoutingRules|null} [routing_rules] ApplyRoutingRulesRequest routing_rules - * @property {boolean|null} [skip_rebuild] ApplyRoutingRulesRequest skip_rebuild - * @property {Array.|null} [rebuild_cells] ApplyRoutingRulesRequest rebuild_cells + * @interface IApplyKeyspaceRoutingRulesRequest + * @property {vschema.IKeyspaceRoutingRules|null} [keyspace_routing_rules] ApplyKeyspaceRoutingRulesRequest keyspace_routing_rules + * @property {boolean|null} [skip_rebuild] ApplyKeyspaceRoutingRulesRequest skip_rebuild + * @property {Array.|null} [rebuild_cells] ApplyKeyspaceRoutingRulesRequest rebuild_cells */ /** - * Constructs a new ApplyRoutingRulesRequest. + * Constructs a new ApplyKeyspaceRoutingRulesRequest. * @memberof vtctldata - * @classdesc Represents an ApplyRoutingRulesRequest. - * @implements IApplyRoutingRulesRequest + * @classdesc Represents an ApplyKeyspaceRoutingRulesRequest. + * @implements IApplyKeyspaceRoutingRulesRequest * @constructor - * @param {vtctldata.IApplyRoutingRulesRequest=} [properties] Properties to set + * @param {vtctldata.IApplyKeyspaceRoutingRulesRequest=} [properties] Properties to set */ - function ApplyRoutingRulesRequest(properties) { + function ApplyKeyspaceRoutingRulesRequest(properties) { this.rebuild_cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) @@ -109470,55 +116246,55 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ApplyRoutingRulesRequest routing_rules. - * @member {vschema.IRoutingRules|null|undefined} routing_rules - * @memberof vtctldata.ApplyRoutingRulesRequest + * ApplyKeyspaceRoutingRulesRequest keyspace_routing_rules. + * @member {vschema.IKeyspaceRoutingRules|null|undefined} keyspace_routing_rules + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @instance */ - ApplyRoutingRulesRequest.prototype.routing_rules = null; + ApplyKeyspaceRoutingRulesRequest.prototype.keyspace_routing_rules = null; /** - * ApplyRoutingRulesRequest skip_rebuild. + * ApplyKeyspaceRoutingRulesRequest skip_rebuild. * @member {boolean} skip_rebuild - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @instance */ - ApplyRoutingRulesRequest.prototype.skip_rebuild = false; + ApplyKeyspaceRoutingRulesRequest.prototype.skip_rebuild = false; /** - * ApplyRoutingRulesRequest rebuild_cells. + * ApplyKeyspaceRoutingRulesRequest rebuild_cells. * @member {Array.} rebuild_cells - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @instance */ - ApplyRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; + ApplyKeyspaceRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; /** - * Creates a new ApplyRoutingRulesRequest instance using the specified properties. + * Creates a new ApplyKeyspaceRoutingRulesRequest instance using the specified properties. * @function create - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static - * @param {vtctldata.IApplyRoutingRulesRequest=} [properties] Properties to set - * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest instance + * @param {vtctldata.IApplyKeyspaceRoutingRulesRequest=} [properties] Properties to set + * @returns {vtctldata.ApplyKeyspaceRoutingRulesRequest} ApplyKeyspaceRoutingRulesRequest instance */ - ApplyRoutingRulesRequest.create = function create(properties) { - return new ApplyRoutingRulesRequest(properties); + ApplyKeyspaceRoutingRulesRequest.create = function create(properties) { + return new ApplyKeyspaceRoutingRulesRequest(properties); }; /** - * Encodes the specified ApplyRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. + * Encodes the specified ApplyKeyspaceRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyKeyspaceRoutingRulesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static - * @param {vtctldata.IApplyRoutingRulesRequest} message ApplyRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IApplyKeyspaceRoutingRulesRequest} message ApplyKeyspaceRoutingRulesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyRoutingRulesRequest.encode = function encode(message, writer) { + ApplyKeyspaceRoutingRulesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.routing_rules != null && Object.hasOwnProperty.call(message, "routing_rules")) - $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.keyspace_routing_rules != null && Object.hasOwnProperty.call(message, "keyspace_routing_rules")) + $root.vschema.KeyspaceRoutingRules.encode(message.keyspace_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); if (message.rebuild_cells != null && message.rebuild_cells.length) @@ -109528,38 +116304,38 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Encodes the specified ApplyRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. + * Encodes the specified ApplyKeyspaceRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyKeyspaceRoutingRulesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static - * @param {vtctldata.IApplyRoutingRulesRequest} message ApplyRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IApplyKeyspaceRoutingRulesRequest} message ApplyKeyspaceRoutingRulesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + ApplyKeyspaceRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer. + * Decodes an ApplyKeyspaceRoutingRulesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @returns {vtctldata.ApplyKeyspaceRoutingRulesRequest} ApplyKeyspaceRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyRoutingRulesRequest.decode = function decode(reader, length) { + ApplyKeyspaceRoutingRulesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyRoutingRulesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyKeyspaceRoutingRulesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.routing_rules = $root.vschema.RoutingRules.decode(reader, reader.uint32()); + message.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.decode(reader, reader.uint32()); break; } case 2: { @@ -109581,36 +116357,36 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer, length delimited. + * Decodes an ApplyKeyspaceRoutingRulesRequest message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @returns {vtctldata.ApplyKeyspaceRoutingRulesRequest} ApplyKeyspaceRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + ApplyKeyspaceRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplyRoutingRulesRequest message. + * Verifies an ApplyKeyspaceRoutingRulesRequest message. * @function verify - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplyRoutingRulesRequest.verify = function verify(message) { + ApplyKeyspaceRoutingRulesRequest.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; - if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) { - let error = $root.vschema.RoutingRules.verify(message.routing_rules); + if (message.keyspace_routing_rules != null && message.hasOwnProperty("keyspace_routing_rules")) { + let error = $root.vschema.KeyspaceRoutingRules.verify(message.keyspace_routing_rules); if (error) - return "routing_rules." + error; + return "keyspace_routing_rules." + error; } if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) if (typeof message.skip_rebuild !== "boolean") @@ -109626,27 +116402,27 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Creates an ApplyRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * Creates an ApplyKeyspaceRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @returns {vtctldata.ApplyKeyspaceRoutingRulesRequest} ApplyKeyspaceRoutingRulesRequest */ - ApplyRoutingRulesRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplyRoutingRulesRequest) + ApplyKeyspaceRoutingRulesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyKeyspaceRoutingRulesRequest) return object; - let message = new $root.vtctldata.ApplyRoutingRulesRequest(); - if (object.routing_rules != null) { - if (typeof object.routing_rules !== "object") - throw TypeError(".vtctldata.ApplyRoutingRulesRequest.routing_rules: object expected"); - message.routing_rules = $root.vschema.RoutingRules.fromObject(object.routing_rules); + let message = new $root.vtctldata.ApplyKeyspaceRoutingRulesRequest(); + if (object.keyspace_routing_rules != null) { + if (typeof object.keyspace_routing_rules !== "object") + throw TypeError(".vtctldata.ApplyKeyspaceRoutingRulesRequest.keyspace_routing_rules: object expected"); + message.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.fromObject(object.keyspace_routing_rules); } if (object.skip_rebuild != null) message.skip_rebuild = Boolean(object.skip_rebuild); if (object.rebuild_cells) { if (!Array.isArray(object.rebuild_cells)) - throw TypeError(".vtctldata.ApplyRoutingRulesRequest.rebuild_cells: array expected"); + throw TypeError(".vtctldata.ApplyKeyspaceRoutingRulesRequest.rebuild_cells: array expected"); message.rebuild_cells = []; for (let i = 0; i < object.rebuild_cells.length; ++i) message.rebuild_cells[i] = String(object.rebuild_cells[i]); @@ -109655,26 +116431,26 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Creates a plain object from an ApplyRoutingRulesRequest message. Also converts values to other types if specified. + * Creates a plain object from an ApplyKeyspaceRoutingRulesRequest message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static - * @param {vtctldata.ApplyRoutingRulesRequest} message ApplyRoutingRulesRequest + * @param {vtctldata.ApplyKeyspaceRoutingRulesRequest} message ApplyKeyspaceRoutingRulesRequest * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplyRoutingRulesRequest.toObject = function toObject(message, options) { + ApplyKeyspaceRoutingRulesRequest.toObject = function toObject(message, options) { if (!options) options = {}; let object = {}; if (options.arrays || options.defaults) object.rebuild_cells = []; if (options.defaults) { - object.routing_rules = null; + object.keyspace_routing_rules = null; object.skip_rebuild = false; } - if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) - object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); + if (message.keyspace_routing_rules != null && message.hasOwnProperty("keyspace_routing_rules")) + object.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.toObject(message.keyspace_routing_rules, options); if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) object.skip_rebuild = message.skip_rebuild; if (message.rebuild_cells && message.rebuild_cells.length) { @@ -109686,51 +116462,52 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Converts this ApplyRoutingRulesRequest to JSON. + * Converts this ApplyKeyspaceRoutingRulesRequest to JSON. * @function toJSON - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @instance * @returns {Object.} JSON object */ - ApplyRoutingRulesRequest.prototype.toJSON = function toJSON() { + ApplyKeyspaceRoutingRulesRequest.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplyRoutingRulesRequest + * Gets the default type url for ApplyKeyspaceRoutingRulesRequest * @function getTypeUrl - * @memberof vtctldata.ApplyRoutingRulesRequest + * @memberof vtctldata.ApplyKeyspaceRoutingRulesRequest * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplyRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ApplyKeyspaceRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplyRoutingRulesRequest"; + return typeUrlPrefix + "/vtctldata.ApplyKeyspaceRoutingRulesRequest"; }; - return ApplyRoutingRulesRequest; + return ApplyKeyspaceRoutingRulesRequest; })(); - vtctldata.ApplyRoutingRulesResponse = (function() { + vtctldata.ApplyKeyspaceRoutingRulesResponse = (function() { /** - * Properties of an ApplyRoutingRulesResponse. + * Properties of an ApplyKeyspaceRoutingRulesResponse. * @memberof vtctldata - * @interface IApplyRoutingRulesResponse + * @interface IApplyKeyspaceRoutingRulesResponse + * @property {vschema.IKeyspaceRoutingRules|null} [keyspace_routing_rules] ApplyKeyspaceRoutingRulesResponse keyspace_routing_rules */ /** - * Constructs a new ApplyRoutingRulesResponse. + * Constructs a new ApplyKeyspaceRoutingRulesResponse. * @memberof vtctldata - * @classdesc Represents an ApplyRoutingRulesResponse. - * @implements IApplyRoutingRulesResponse + * @classdesc Represents an ApplyKeyspaceRoutingRulesResponse. + * @implements IApplyKeyspaceRoutingRulesResponse * @constructor - * @param {vtctldata.IApplyRoutingRulesResponse=} [properties] Properties to set + * @param {vtctldata.IApplyKeyspaceRoutingRulesResponse=} [properties] Properties to set */ - function ApplyRoutingRulesResponse(properties) { + function ApplyKeyspaceRoutingRulesResponse(properties) { if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -109738,63 +116515,77 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * Creates a new ApplyRoutingRulesResponse instance using the specified properties. + * ApplyKeyspaceRoutingRulesResponse keyspace_routing_rules. + * @member {vschema.IKeyspaceRoutingRules|null|undefined} keyspace_routing_rules + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse + * @instance + */ + ApplyKeyspaceRoutingRulesResponse.prototype.keyspace_routing_rules = null; + + /** + * Creates a new ApplyKeyspaceRoutingRulesResponse instance using the specified properties. * @function create - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static - * @param {vtctldata.IApplyRoutingRulesResponse=} [properties] Properties to set - * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse instance + * @param {vtctldata.IApplyKeyspaceRoutingRulesResponse=} [properties] Properties to set + * @returns {vtctldata.ApplyKeyspaceRoutingRulesResponse} ApplyKeyspaceRoutingRulesResponse instance */ - ApplyRoutingRulesResponse.create = function create(properties) { - return new ApplyRoutingRulesResponse(properties); + ApplyKeyspaceRoutingRulesResponse.create = function create(properties) { + return new ApplyKeyspaceRoutingRulesResponse(properties); }; /** - * Encodes the specified ApplyRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * Encodes the specified ApplyKeyspaceRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyKeyspaceRoutingRulesResponse.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static - * @param {vtctldata.IApplyRoutingRulesResponse} message ApplyRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IApplyKeyspaceRoutingRulesResponse} message ApplyKeyspaceRoutingRulesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyRoutingRulesResponse.encode = function encode(message, writer) { + ApplyKeyspaceRoutingRulesResponse.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); + if (message.keyspace_routing_rules != null && Object.hasOwnProperty.call(message, "keyspace_routing_rules")) + $root.vschema.KeyspaceRoutingRules.encode(message.keyspace_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); return writer; }; /** - * Encodes the specified ApplyRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * Encodes the specified ApplyKeyspaceRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyKeyspaceRoutingRulesResponse.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static - * @param {vtctldata.IApplyRoutingRulesResponse} message ApplyRoutingRulesResponse message or plain object to encode + * @param {vtctldata.IApplyKeyspaceRoutingRulesResponse} message ApplyKeyspaceRoutingRulesResponse message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + ApplyKeyspaceRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer. + * Decodes an ApplyKeyspaceRoutingRulesResponse message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @returns {vtctldata.ApplyKeyspaceRoutingRulesResponse} ApplyKeyspaceRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyRoutingRulesResponse.decode = function decode(reader, length) { + ApplyKeyspaceRoutingRulesResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyRoutingRulesResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyKeyspaceRoutingRulesResponse(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { + case 1: { + message.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -109804,111 +116595,129 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer, length delimited. + * Decodes an ApplyKeyspaceRoutingRulesResponse message from the specified reader or buffer, length delimited. * @function decodeDelimited - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @returns {vtctldata.ApplyKeyspaceRoutingRulesResponse} ApplyKeyspaceRoutingRulesResponse * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + ApplyKeyspaceRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { if (!(reader instanceof $Reader)) reader = new $Reader(reader); return this.decode(reader, reader.uint32()); }; /** - * Verifies an ApplyRoutingRulesResponse message. + * Verifies an ApplyKeyspaceRoutingRulesResponse message. * @function verify - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static * @param {Object.} message Plain object to verify * @returns {string|null} `null` if valid, otherwise the reason why it is not */ - ApplyRoutingRulesResponse.verify = function verify(message) { + ApplyKeyspaceRoutingRulesResponse.verify = function verify(message) { if (typeof message !== "object" || message === null) return "object expected"; + if (message.keyspace_routing_rules != null && message.hasOwnProperty("keyspace_routing_rules")) { + let error = $root.vschema.KeyspaceRoutingRules.verify(message.keyspace_routing_rules); + if (error) + return "keyspace_routing_rules." + error; + } return null; }; /** - * Creates an ApplyRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * Creates an ApplyKeyspaceRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. * @function fromObject - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static * @param {Object.} object Plain object - * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @returns {vtctldata.ApplyKeyspaceRoutingRulesResponse} ApplyKeyspaceRoutingRulesResponse */ - ApplyRoutingRulesResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.ApplyRoutingRulesResponse) + ApplyKeyspaceRoutingRulesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyKeyspaceRoutingRulesResponse) return object; - return new $root.vtctldata.ApplyRoutingRulesResponse(); + let message = new $root.vtctldata.ApplyKeyspaceRoutingRulesResponse(); + if (object.keyspace_routing_rules != null) { + if (typeof object.keyspace_routing_rules !== "object") + throw TypeError(".vtctldata.ApplyKeyspaceRoutingRulesResponse.keyspace_routing_rules: object expected"); + message.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.fromObject(object.keyspace_routing_rules); + } + return message; }; /** - * Creates a plain object from an ApplyRoutingRulesResponse message. Also converts values to other types if specified. + * Creates a plain object from an ApplyKeyspaceRoutingRulesResponse message. Also converts values to other types if specified. * @function toObject - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static - * @param {vtctldata.ApplyRoutingRulesResponse} message ApplyRoutingRulesResponse + * @param {vtctldata.ApplyKeyspaceRoutingRulesResponse} message ApplyKeyspaceRoutingRulesResponse * @param {$protobuf.IConversionOptions} [options] Conversion options * @returns {Object.} Plain object */ - ApplyRoutingRulesResponse.toObject = function toObject() { - return {}; + ApplyKeyspaceRoutingRulesResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.keyspace_routing_rules = null; + if (message.keyspace_routing_rules != null && message.hasOwnProperty("keyspace_routing_rules")) + object.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.toObject(message.keyspace_routing_rules, options); + return object; }; /** - * Converts this ApplyRoutingRulesResponse to JSON. + * Converts this ApplyKeyspaceRoutingRulesResponse to JSON. * @function toJSON - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @instance * @returns {Object.} JSON object */ - ApplyRoutingRulesResponse.prototype.toJSON = function toJSON() { + ApplyKeyspaceRoutingRulesResponse.prototype.toJSON = function toJSON() { return this.constructor.toObject(this, $protobuf.util.toJSONOptions); }; /** - * Gets the default type url for ApplyRoutingRulesResponse + * Gets the default type url for ApplyKeyspaceRoutingRulesResponse * @function getTypeUrl - * @memberof vtctldata.ApplyRoutingRulesResponse + * @memberof vtctldata.ApplyKeyspaceRoutingRulesResponse * @static * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") * @returns {string} The default type url */ - ApplyRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + ApplyKeyspaceRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { if (typeUrlPrefix === undefined) { typeUrlPrefix = "type.googleapis.com"; } - return typeUrlPrefix + "/vtctldata.ApplyRoutingRulesResponse"; + return typeUrlPrefix + "/vtctldata.ApplyKeyspaceRoutingRulesResponse"; }; - return ApplyRoutingRulesResponse; + return ApplyKeyspaceRoutingRulesResponse; })(); - vtctldata.ApplyShardRoutingRulesRequest = (function() { + vtctldata.ApplyRoutingRulesRequest = (function() { /** - * Properties of an ApplyShardRoutingRulesRequest. + * Properties of an ApplyRoutingRulesRequest. * @memberof vtctldata - * @interface IApplyShardRoutingRulesRequest - * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] ApplyShardRoutingRulesRequest shard_routing_rules - * @property {boolean|null} [skip_rebuild] ApplyShardRoutingRulesRequest skip_rebuild - * @property {Array.|null} [rebuild_cells] ApplyShardRoutingRulesRequest rebuild_cells + * @interface IApplyRoutingRulesRequest + * @property {vschema.IRoutingRules|null} [routing_rules] ApplyRoutingRulesRequest routing_rules + * @property {boolean|null} [skip_rebuild] ApplyRoutingRulesRequest skip_rebuild + * @property {Array.|null} [rebuild_cells] ApplyRoutingRulesRequest rebuild_cells */ /** - * Constructs a new ApplyShardRoutingRulesRequest. + * Constructs a new ApplyRoutingRulesRequest. * @memberof vtctldata - * @classdesc Represents an ApplyShardRoutingRulesRequest. - * @implements IApplyShardRoutingRulesRequest + * @classdesc Represents an ApplyRoutingRulesRequest. + * @implements IApplyRoutingRulesRequest * @constructor - * @param {vtctldata.IApplyShardRoutingRulesRequest=} [properties] Properties to set + * @param {vtctldata.IApplyRoutingRulesRequest=} [properties] Properties to set */ - function ApplyShardRoutingRulesRequest(properties) { + function ApplyRoutingRulesRequest(properties) { this.rebuild_cells = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) @@ -109917,55 +116726,55 @@ export const vtctldata = $root.vtctldata = (() => { } /** - * ApplyShardRoutingRulesRequest shard_routing_rules. - * @member {vschema.IShardRoutingRules|null|undefined} shard_routing_rules - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * ApplyRoutingRulesRequest routing_rules. + * @member {vschema.IRoutingRules|null|undefined} routing_rules + * @memberof vtctldata.ApplyRoutingRulesRequest * @instance */ - ApplyShardRoutingRulesRequest.prototype.shard_routing_rules = null; + ApplyRoutingRulesRequest.prototype.routing_rules = null; /** - * ApplyShardRoutingRulesRequest skip_rebuild. + * ApplyRoutingRulesRequest skip_rebuild. * @member {boolean} skip_rebuild - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.ApplyRoutingRulesRequest * @instance */ - ApplyShardRoutingRulesRequest.prototype.skip_rebuild = false; + ApplyRoutingRulesRequest.prototype.skip_rebuild = false; /** - * ApplyShardRoutingRulesRequest rebuild_cells. + * ApplyRoutingRulesRequest rebuild_cells. * @member {Array.} rebuild_cells - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.ApplyRoutingRulesRequest * @instance */ - ApplyShardRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; + ApplyRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; /** - * Creates a new ApplyShardRoutingRulesRequest instance using the specified properties. + * Creates a new ApplyRoutingRulesRequest instance using the specified properties. * @function create - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.ApplyRoutingRulesRequest * @static - * @param {vtctldata.IApplyShardRoutingRulesRequest=} [properties] Properties to set - * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest instance + * @param {vtctldata.IApplyRoutingRulesRequest=} [properties] Properties to set + * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest instance */ - ApplyShardRoutingRulesRequest.create = function create(properties) { - return new ApplyShardRoutingRulesRequest(properties); + ApplyRoutingRulesRequest.create = function create(properties) { + return new ApplyRoutingRulesRequest(properties); }; /** - * Encodes the specified ApplyShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * Encodes the specified ApplyRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. * @function encode - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.ApplyRoutingRulesRequest * @static - * @param {vtctldata.IApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IApplyRoutingRulesRequest} message ApplyRoutingRulesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyShardRoutingRulesRequest.encode = function encode(message, writer) { + ApplyRoutingRulesRequest.encode = function encode(message, writer) { if (!writer) writer = $Writer.create(); - if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) - $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.routing_rules != null && Object.hasOwnProperty.call(message, "routing_rules")) + $root.vschema.RoutingRules.encode(message.routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); if (message.rebuild_cells != null && message.rebuild_cells.length) @@ -109975,38 +116784,485 @@ export const vtctldata = $root.vtctldata = (() => { }; /** - * Encodes the specified ApplyShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * Encodes the specified ApplyRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesRequest.verify|verify} messages. * @function encodeDelimited - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.ApplyRoutingRulesRequest * @static - * @param {vtctldata.IApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest message or plain object to encode + * @param {vtctldata.IApplyRoutingRulesRequest} message ApplyRoutingRulesRequest message or plain object to encode * @param {$protobuf.Writer} [writer] Writer to encode to * @returns {$protobuf.Writer} Writer */ - ApplyShardRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + ApplyRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { return this.encode(message, writer).ldelim(); }; /** - * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer. + * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer. * @function decode - * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @memberof vtctldata.ApplyRoutingRulesRequest * @static * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest + * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest * @throws {Error} If the payload is not a reader or valid buffer * @throws {$protobuf.util.ProtocolError} If required fields are missing */ - ApplyShardRoutingRulesRequest.decode = function decode(reader, length) { + ApplyRoutingRulesRequest.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyShardRoutingRulesRequest(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyRoutingRulesRequest(); while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { case 1: { - message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); + message.routing_rules = $root.vschema.RoutingRules.decode(reader, reader.uint32()); + break; + } + case 2: { + message.skip_rebuild = reader.bool(); + break; + } + case 3: { + if (!(message.rebuild_cells && message.rebuild_cells.length)) + message.rebuild_cells = []; + message.rebuild_cells.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplyRoutingRulesRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplyRoutingRulesRequest message. + * @function verify + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplyRoutingRulesRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) { + let error = $root.vschema.RoutingRules.verify(message.routing_rules); + if (error) + return "routing_rules." + error; + } + if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) + if (typeof message.skip_rebuild !== "boolean") + return "skip_rebuild: boolean expected"; + if (message.rebuild_cells != null && message.hasOwnProperty("rebuild_cells")) { + if (!Array.isArray(message.rebuild_cells)) + return "rebuild_cells: array expected"; + for (let i = 0; i < message.rebuild_cells.length; ++i) + if (!$util.isString(message.rebuild_cells[i])) + return "rebuild_cells: string[] expected"; + } + return null; + }; + + /** + * Creates an ApplyRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyRoutingRulesRequest} ApplyRoutingRulesRequest + */ + ApplyRoutingRulesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyRoutingRulesRequest) + return object; + let message = new $root.vtctldata.ApplyRoutingRulesRequest(); + if (object.routing_rules != null) { + if (typeof object.routing_rules !== "object") + throw TypeError(".vtctldata.ApplyRoutingRulesRequest.routing_rules: object expected"); + message.routing_rules = $root.vschema.RoutingRules.fromObject(object.routing_rules); + } + if (object.skip_rebuild != null) + message.skip_rebuild = Boolean(object.skip_rebuild); + if (object.rebuild_cells) { + if (!Array.isArray(object.rebuild_cells)) + throw TypeError(".vtctldata.ApplyRoutingRulesRequest.rebuild_cells: array expected"); + message.rebuild_cells = []; + for (let i = 0; i < object.rebuild_cells.length; ++i) + message.rebuild_cells[i] = String(object.rebuild_cells[i]); + } + return message; + }; + + /** + * Creates a plain object from an ApplyRoutingRulesRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {vtctldata.ApplyRoutingRulesRequest} message ApplyRoutingRulesRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplyRoutingRulesRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.rebuild_cells = []; + if (options.defaults) { + object.routing_rules = null; + object.skip_rebuild = false; + } + if (message.routing_rules != null && message.hasOwnProperty("routing_rules")) + object.routing_rules = $root.vschema.RoutingRules.toObject(message.routing_rules, options); + if (message.skip_rebuild != null && message.hasOwnProperty("skip_rebuild")) + object.skip_rebuild = message.skip_rebuild; + if (message.rebuild_cells && message.rebuild_cells.length) { + object.rebuild_cells = []; + for (let j = 0; j < message.rebuild_cells.length; ++j) + object.rebuild_cells[j] = message.rebuild_cells[j]; + } + return object; + }; + + /** + * Converts this ApplyRoutingRulesRequest to JSON. + * @function toJSON + * @memberof vtctldata.ApplyRoutingRulesRequest + * @instance + * @returns {Object.} JSON object + */ + ApplyRoutingRulesRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplyRoutingRulesRequest + * @function getTypeUrl + * @memberof vtctldata.ApplyRoutingRulesRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplyRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyRoutingRulesRequest"; + }; + + return ApplyRoutingRulesRequest; + })(); + + vtctldata.ApplyRoutingRulesResponse = (function() { + + /** + * Properties of an ApplyRoutingRulesResponse. + * @memberof vtctldata + * @interface IApplyRoutingRulesResponse + */ + + /** + * Constructs a new ApplyRoutingRulesResponse. + * @memberof vtctldata + * @classdesc Represents an ApplyRoutingRulesResponse. + * @implements IApplyRoutingRulesResponse + * @constructor + * @param {vtctldata.IApplyRoutingRulesResponse=} [properties] Properties to set + */ + function ApplyRoutingRulesResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new ApplyRoutingRulesResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {vtctldata.IApplyRoutingRulesResponse=} [properties] Properties to set + * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse instance + */ + ApplyRoutingRulesResponse.create = function create(properties) { + return new ApplyRoutingRulesResponse(properties); + }; + + /** + * Encodes the specified ApplyRoutingRulesResponse message. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {vtctldata.IApplyRoutingRulesResponse} message ApplyRoutingRulesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyRoutingRulesResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified ApplyRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.ApplyRoutingRulesResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {vtctldata.IApplyRoutingRulesResponse} message ApplyRoutingRulesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyRoutingRulesResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyRoutingRulesResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ApplyRoutingRulesResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ApplyRoutingRulesResponse message. + * @function verify + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ApplyRoutingRulesResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates an ApplyRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyRoutingRulesResponse} ApplyRoutingRulesResponse + */ + ApplyRoutingRulesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyRoutingRulesResponse) + return object; + return new $root.vtctldata.ApplyRoutingRulesResponse(); + }; + + /** + * Creates a plain object from an ApplyRoutingRulesResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {vtctldata.ApplyRoutingRulesResponse} message ApplyRoutingRulesResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ApplyRoutingRulesResponse.toObject = function toObject() { + return {}; + }; + + /** + * Converts this ApplyRoutingRulesResponse to JSON. + * @function toJSON + * @memberof vtctldata.ApplyRoutingRulesResponse + * @instance + * @returns {Object.} JSON object + */ + ApplyRoutingRulesResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ApplyRoutingRulesResponse + * @function getTypeUrl + * @memberof vtctldata.ApplyRoutingRulesResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ApplyRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyRoutingRulesResponse"; + }; + + return ApplyRoutingRulesResponse; + })(); + + vtctldata.ApplyShardRoutingRulesRequest = (function() { + + /** + * Properties of an ApplyShardRoutingRulesRequest. + * @memberof vtctldata + * @interface IApplyShardRoutingRulesRequest + * @property {vschema.IShardRoutingRules|null} [shard_routing_rules] ApplyShardRoutingRulesRequest shard_routing_rules + * @property {boolean|null} [skip_rebuild] ApplyShardRoutingRulesRequest skip_rebuild + * @property {Array.|null} [rebuild_cells] ApplyShardRoutingRulesRequest rebuild_cells + */ + + /** + * Constructs a new ApplyShardRoutingRulesRequest. + * @memberof vtctldata + * @classdesc Represents an ApplyShardRoutingRulesRequest. + * @implements IApplyShardRoutingRulesRequest + * @constructor + * @param {vtctldata.IApplyShardRoutingRulesRequest=} [properties] Properties to set + */ + function ApplyShardRoutingRulesRequest(properties) { + this.rebuild_cells = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ApplyShardRoutingRulesRequest shard_routing_rules. + * @member {vschema.IShardRoutingRules|null|undefined} shard_routing_rules + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @instance + */ + ApplyShardRoutingRulesRequest.prototype.shard_routing_rules = null; + + /** + * ApplyShardRoutingRulesRequest skip_rebuild. + * @member {boolean} skip_rebuild + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @instance + */ + ApplyShardRoutingRulesRequest.prototype.skip_rebuild = false; + + /** + * ApplyShardRoutingRulesRequest rebuild_cells. + * @member {Array.} rebuild_cells + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @instance + */ + ApplyShardRoutingRulesRequest.prototype.rebuild_cells = $util.emptyArray; + + /** + * Creates a new ApplyShardRoutingRulesRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {vtctldata.IApplyShardRoutingRulesRequest=} [properties] Properties to set + * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest instance + */ + ApplyShardRoutingRulesRequest.create = function create(properties) { + return new ApplyShardRoutingRulesRequest(properties); + }; + + /** + * Encodes the specified ApplyShardRoutingRulesRequest message. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {vtctldata.IApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyShardRoutingRulesRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.shard_routing_rules != null && Object.hasOwnProperty.call(message, "shard_routing_rules")) + $root.vschema.ShardRoutingRules.encode(message.shard_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.skip_rebuild != null && Object.hasOwnProperty.call(message, "skip_rebuild")) + writer.uint32(/* id 2, wireType 0 =*/16).bool(message.skip_rebuild); + if (message.rebuild_cells != null && message.rebuild_cells.length) + for (let i = 0; i < message.rebuild_cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.rebuild_cells[i]); + return writer; + }; + + /** + * Encodes the specified ApplyShardRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.ApplyShardRoutingRulesRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {vtctldata.IApplyShardRoutingRulesRequest} message ApplyShardRoutingRulesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ApplyShardRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ApplyShardRoutingRulesRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyShardRoutingRulesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyShardRoutingRulesRequest} ApplyShardRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ApplyShardRoutingRulesRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyShardRoutingRulesRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.shard_routing_rules = $root.vschema.ShardRoutingRules.decode(reader, reader.uint32()); break; } case 2: { @@ -111059,6 +118315,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {Array.|null} [cells] ApplyVSchemaRequest cells * @property {vschema.IKeyspace|null} [v_schema] ApplyVSchemaRequest v_schema * @property {string|null} [sql] ApplyVSchemaRequest sql + * @property {boolean|null} [strict] ApplyVSchemaRequest strict */ /** @@ -111125,6 +118382,14 @@ export const vtctldata = $root.vtctldata = (() => { */ ApplyVSchemaRequest.prototype.sql = ""; + /** + * ApplyVSchemaRequest strict. + * @member {boolean} strict + * @memberof vtctldata.ApplyVSchemaRequest + * @instance + */ + ApplyVSchemaRequest.prototype.strict = false; + /** * Creates a new ApplyVSchemaRequest instance using the specified properties. * @function create @@ -111162,6 +118427,8 @@ export const vtctldata = $root.vtctldata = (() => { $root.vschema.Keyspace.encode(message.v_schema, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) writer.uint32(/* id 6, wireType 2 =*/50).string(message.sql); + if (message.strict != null && Object.hasOwnProperty.call(message, "strict")) + writer.uint32(/* id 7, wireType 0 =*/56).bool(message.strict); return writer; }; @@ -111222,6 +118489,10 @@ export const vtctldata = $root.vtctldata = (() => { message.sql = reader.string(); break; } + case 7: { + message.strict = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -111281,6 +118552,9 @@ export const vtctldata = $root.vtctldata = (() => { if (message.sql != null && message.hasOwnProperty("sql")) if (!$util.isString(message.sql)) return "sql: string expected"; + if (message.strict != null && message.hasOwnProperty("strict")) + if (typeof message.strict !== "boolean") + return "strict: boolean expected"; return null; }; @@ -111316,6 +118590,8 @@ export const vtctldata = $root.vtctldata = (() => { } if (object.sql != null) message.sql = String(object.sql); + if (object.strict != null) + message.strict = Boolean(object.strict); return message; }; @@ -111340,6 +118616,7 @@ export const vtctldata = $root.vtctldata = (() => { object.dry_run = false; object.v_schema = null; object.sql = ""; + object.strict = false; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; @@ -111356,6 +118633,8 @@ export const vtctldata = $root.vtctldata = (() => { object.v_schema = $root.vschema.Keyspace.toObject(message.v_schema, options); if (message.sql != null && message.hasOwnProperty("sql")) object.sql = message.sql; + if (message.strict != null && message.hasOwnProperty("strict")) + object.strict = message.strict; return object; }; @@ -111395,6 +118674,7 @@ export const vtctldata = $root.vtctldata = (() => { * @memberof vtctldata * @interface IApplyVSchemaResponse * @property {vschema.IKeyspace|null} [v_schema] ApplyVSchemaResponse v_schema + * @property {Object.|null} [unknown_vindex_params] ApplyVSchemaResponse unknown_vindex_params */ /** @@ -111406,6 +118686,7 @@ export const vtctldata = $root.vtctldata = (() => { * @param {vtctldata.IApplyVSchemaResponse=} [properties] Properties to set */ function ApplyVSchemaResponse(properties) { + this.unknown_vindex_params = {}; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -111420,6 +118701,14 @@ export const vtctldata = $root.vtctldata = (() => { */ ApplyVSchemaResponse.prototype.v_schema = null; + /** + * ApplyVSchemaResponse unknown_vindex_params. + * @member {Object.} unknown_vindex_params + * @memberof vtctldata.ApplyVSchemaResponse + * @instance + */ + ApplyVSchemaResponse.prototype.unknown_vindex_params = $util.emptyObject; + /** * Creates a new ApplyVSchemaResponse instance using the specified properties. * @function create @@ -111446,6 +118735,11 @@ export const vtctldata = $root.vtctldata = (() => { writer = $Writer.create(); if (message.v_schema != null && Object.hasOwnProperty.call(message, "v_schema")) $root.vschema.Keyspace.encode(message.v_schema, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.unknown_vindex_params != null && Object.hasOwnProperty.call(message, "unknown_vindex_params")) + for (let keys = Object.keys(message.unknown_vindex_params), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 2, wireType 2 =*/18).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.vtctldata.ApplyVSchemaResponse.ParamList.encode(message.unknown_vindex_params[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } return writer; }; @@ -111476,7 +118770,7 @@ export const vtctldata = $root.vtctldata = (() => { ApplyVSchemaResponse.decode = function decode(reader, length) { if (!(reader instanceof $Reader)) reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyVSchemaResponse(); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyVSchemaResponse(), key, value; while (reader.pos < end) { let tag = reader.uint32(); switch (tag >>> 3) { @@ -111484,6 +118778,29 @@ export const vtctldata = $root.vtctldata = (() => { message.v_schema = $root.vschema.Keyspace.decode(reader, reader.uint32()); break; } + case 2: { + if (message.unknown_vindex_params === $util.emptyObject) + message.unknown_vindex_params = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.vtctldata.ApplyVSchemaResponse.ParamList.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.unknown_vindex_params[key] = value; + break; + } default: reader.skipType(tag & 7); break; @@ -111524,6 +118841,16 @@ export const vtctldata = $root.vtctldata = (() => { if (error) return "v_schema." + error; } + if (message.unknown_vindex_params != null && message.hasOwnProperty("unknown_vindex_params")) { + if (!$util.isObject(message.unknown_vindex_params)) + return "unknown_vindex_params: object expected"; + let key = Object.keys(message.unknown_vindex_params); + for (let i = 0; i < key.length; ++i) { + let error = $root.vtctldata.ApplyVSchemaResponse.ParamList.verify(message.unknown_vindex_params[key[i]]); + if (error) + return "unknown_vindex_params." + error; + } + } return null; }; @@ -111544,6 +118871,16 @@ export const vtctldata = $root.vtctldata = (() => { throw TypeError(".vtctldata.ApplyVSchemaResponse.v_schema: object expected"); message.v_schema = $root.vschema.Keyspace.fromObject(object.v_schema); } + if (object.unknown_vindex_params) { + if (typeof object.unknown_vindex_params !== "object") + throw TypeError(".vtctldata.ApplyVSchemaResponse.unknown_vindex_params: object expected"); + message.unknown_vindex_params = {}; + for (let keys = Object.keys(object.unknown_vindex_params), i = 0; i < keys.length; ++i) { + if (typeof object.unknown_vindex_params[keys[i]] !== "object") + throw TypeError(".vtctldata.ApplyVSchemaResponse.unknown_vindex_params: object expected"); + message.unknown_vindex_params[keys[i]] = $root.vtctldata.ApplyVSchemaResponse.ParamList.fromObject(object.unknown_vindex_params[keys[i]]); + } + } return message; }; @@ -111560,10 +118897,18 @@ export const vtctldata = $root.vtctldata = (() => { if (!options) options = {}; let object = {}; + if (options.objects || options.defaults) + object.unknown_vindex_params = {}; if (options.defaults) object.v_schema = null; if (message.v_schema != null && message.hasOwnProperty("v_schema")) object.v_schema = $root.vschema.Keyspace.toObject(message.v_schema, options); + let keys2; + if (message.unknown_vindex_params && (keys2 = Object.keys(message.unknown_vindex_params)).length) { + object.unknown_vindex_params = {}; + for (let j = 0; j < keys2.length; ++j) + object.unknown_vindex_params[keys2[j]] = $root.vtctldata.ApplyVSchemaResponse.ParamList.toObject(message.unknown_vindex_params[keys2[j]], options); + } return object; }; @@ -111593,6 +118938,225 @@ export const vtctldata = $root.vtctldata = (() => { return typeUrlPrefix + "/vtctldata.ApplyVSchemaResponse"; }; + ApplyVSchemaResponse.ParamList = (function() { + + /** + * Properties of a ParamList. + * @memberof vtctldata.ApplyVSchemaResponse + * @interface IParamList + * @property {Array.|null} [params] ParamList params + */ + + /** + * Constructs a new ParamList. + * @memberof vtctldata.ApplyVSchemaResponse + * @classdesc Represents a ParamList. + * @implements IParamList + * @constructor + * @param {vtctldata.ApplyVSchemaResponse.IParamList=} [properties] Properties to set + */ + function ParamList(properties) { + this.params = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ParamList params. + * @member {Array.} params + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @instance + */ + ParamList.prototype.params = $util.emptyArray; + + /** + * Creates a new ParamList instance using the specified properties. + * @function create + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {vtctldata.ApplyVSchemaResponse.IParamList=} [properties] Properties to set + * @returns {vtctldata.ApplyVSchemaResponse.ParamList} ParamList instance + */ + ParamList.create = function create(properties) { + return new ParamList(properties); + }; + + /** + * Encodes the specified ParamList message. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.ParamList.verify|verify} messages. + * @function encode + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {vtctldata.ApplyVSchemaResponse.IParamList} message ParamList message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ParamList.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.params != null && message.params.length) + for (let i = 0; i < message.params.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.params[i]); + return writer; + }; + + /** + * Encodes the specified ParamList message, length delimited. Does not implicitly {@link vtctldata.ApplyVSchemaResponse.ParamList.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {vtctldata.ApplyVSchemaResponse.IParamList} message ParamList message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ParamList.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ParamList message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ApplyVSchemaResponse.ParamList} ParamList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ParamList.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ApplyVSchemaResponse.ParamList(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.params && message.params.length)) + message.params = []; + message.params.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ParamList message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ApplyVSchemaResponse.ParamList} ParamList + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ParamList.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ParamList message. + * @function verify + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ParamList.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.params != null && message.hasOwnProperty("params")) { + if (!Array.isArray(message.params)) + return "params: array expected"; + for (let i = 0; i < message.params.length; ++i) + if (!$util.isString(message.params[i])) + return "params: string[] expected"; + } + return null; + }; + + /** + * Creates a ParamList message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ApplyVSchemaResponse.ParamList} ParamList + */ + ParamList.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ApplyVSchemaResponse.ParamList) + return object; + let message = new $root.vtctldata.ApplyVSchemaResponse.ParamList(); + if (object.params) { + if (!Array.isArray(object.params)) + throw TypeError(".vtctldata.ApplyVSchemaResponse.ParamList.params: array expected"); + message.params = []; + for (let i = 0; i < object.params.length; ++i) + message.params[i] = String(object.params[i]); + } + return message; + }; + + /** + * Creates a plain object from a ParamList message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {vtctldata.ApplyVSchemaResponse.ParamList} message ParamList + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ParamList.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.params = []; + if (message.params && message.params.length) { + object.params = []; + for (let j = 0; j < message.params.length; ++j) + object.params[j] = message.params[j]; + } + return object; + }; + + /** + * Converts this ParamList to JSON. + * @function toJSON + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @instance + * @returns {Object.} JSON object + */ + ParamList.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ParamList + * @function getTypeUrl + * @memberof vtctldata.ApplyVSchemaResponse.ParamList + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ParamList.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ApplyVSchemaResponse.ParamList"; + }; + + return ParamList; + })(); + return ApplyVSchemaResponse; })(); @@ -111604,7 +119168,7 @@ export const vtctldata = $root.vtctldata = (() => { * @interface IBackupRequest * @property {topodata.ITabletAlias|null} [tablet_alias] BackupRequest tablet_alias * @property {boolean|null} [allow_primary] BackupRequest allow_primary - * @property {number|Long|null} [concurrency] BackupRequest concurrency + * @property {number|null} [concurrency] BackupRequest concurrency * @property {string|null} [incremental_from_pos] BackupRequest incremental_from_pos * @property {boolean|null} [upgrade_safe] BackupRequest upgrade_safe */ @@ -111642,11 +119206,11 @@ export const vtctldata = $root.vtctldata = (() => { /** * BackupRequest concurrency. - * @member {number|Long} concurrency + * @member {number} concurrency * @memberof vtctldata.BackupRequest * @instance */ - BackupRequest.prototype.concurrency = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + BackupRequest.prototype.concurrency = 0; /** * BackupRequest incremental_from_pos. @@ -111693,7 +119257,7 @@ export const vtctldata = $root.vtctldata = (() => { if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) writer.uint32(/* id 2, wireType 0 =*/16).bool(message.allow_primary); if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 3, wireType 0 =*/24).uint64(message.concurrency); + writer.uint32(/* id 3, wireType 0 =*/24).int32(message.concurrency); if (message.incremental_from_pos != null && Object.hasOwnProperty.call(message, "incremental_from_pos")) writer.uint32(/* id 4, wireType 2 =*/34).string(message.incremental_from_pos); if (message.upgrade_safe != null && Object.hasOwnProperty.call(message, "upgrade_safe")) @@ -111741,7 +119305,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 3: { - message.concurrency = reader.uint64(); + message.concurrency = reader.int32(); break; } case 4: { @@ -111796,8 +119360,8 @@ export const vtctldata = $root.vtctldata = (() => { if (typeof message.allow_primary !== "boolean") return "allow_primary: boolean expected"; if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high))) - return "concurrency: integer|Long expected"; + if (!$util.isInteger(message.concurrency)) + return "concurrency: integer expected"; if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) if (!$util.isString(message.incremental_from_pos)) return "incremental_from_pos: string expected"; @@ -111827,14 +119391,7 @@ export const vtctldata = $root.vtctldata = (() => { if (object.allow_primary != null) message.allow_primary = Boolean(object.allow_primary); if (object.concurrency != null) - if ($util.Long) - (message.concurrency = $util.Long.fromValue(object.concurrency)).unsigned = true; - else if (typeof object.concurrency === "string") - message.concurrency = parseInt(object.concurrency, 10); - else if (typeof object.concurrency === "number") - message.concurrency = object.concurrency; - else if (typeof object.concurrency === "object") - message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber(true); + message.concurrency = object.concurrency | 0; if (object.incremental_from_pos != null) message.incremental_from_pos = String(object.incremental_from_pos); if (object.upgrade_safe != null) @@ -111858,11 +119415,7 @@ export const vtctldata = $root.vtctldata = (() => { if (options.defaults) { object.tablet_alias = null; object.allow_primary = false; - if ($util.Long) { - let long = new $util.Long(0, 0, true); - object.concurrency = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.concurrency = options.longs === String ? "0" : 0; + object.concurrency = 0; object.incremental_from_pos = ""; object.upgrade_safe = false; } @@ -111871,10 +119424,7 @@ export const vtctldata = $root.vtctldata = (() => { if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) object.allow_primary = message.allow_primary; if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (typeof message.concurrency === "number") - object.concurrency = options.longs === String ? String(message.concurrency) : message.concurrency; - else - object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber(true) : message.concurrency; + object.concurrency = message.concurrency; if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) object.incremental_from_pos = message.incremental_from_pos; if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) @@ -112203,7 +119753,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {string|null} [keyspace] BackupShardRequest keyspace * @property {string|null} [shard] BackupShardRequest shard * @property {boolean|null} [allow_primary] BackupShardRequest allow_primary - * @property {number|Long|null} [concurrency] BackupShardRequest concurrency + * @property {number|null} [concurrency] BackupShardRequest concurrency * @property {boolean|null} [upgrade_safe] BackupShardRequest upgrade_safe * @property {string|null} [incremental_from_pos] BackupShardRequest incremental_from_pos */ @@ -112249,11 +119799,11 @@ export const vtctldata = $root.vtctldata = (() => { /** * BackupShardRequest concurrency. - * @member {number|Long} concurrency + * @member {number} concurrency * @memberof vtctldata.BackupShardRequest * @instance */ - BackupShardRequest.prototype.concurrency = $util.Long ? $util.Long.fromBits(0,0,true) : 0; + BackupShardRequest.prototype.concurrency = 0; /** * BackupShardRequest upgrade_safe. @@ -112302,7 +119852,7 @@ export const vtctldata = $root.vtctldata = (() => { if (message.allow_primary != null && Object.hasOwnProperty.call(message, "allow_primary")) writer.uint32(/* id 3, wireType 0 =*/24).bool(message.allow_primary); if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 4, wireType 0 =*/32).uint64(message.concurrency); + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.concurrency); if (message.upgrade_safe != null && Object.hasOwnProperty.call(message, "upgrade_safe")) writer.uint32(/* id 5, wireType 0 =*/40).bool(message.upgrade_safe); if (message.incremental_from_pos != null && Object.hasOwnProperty.call(message, "incremental_from_pos")) @@ -112354,7 +119904,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 4: { - message.concurrency = reader.uint64(); + message.concurrency = reader.int32(); break; } case 5: { @@ -112410,8 +119960,8 @@ export const vtctldata = $root.vtctldata = (() => { if (typeof message.allow_primary !== "boolean") return "allow_primary: boolean expected"; if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (!$util.isInteger(message.concurrency) && !(message.concurrency && $util.isInteger(message.concurrency.low) && $util.isInteger(message.concurrency.high))) - return "concurrency: integer|Long expected"; + if (!$util.isInteger(message.concurrency)) + return "concurrency: integer expected"; if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) if (typeof message.upgrade_safe !== "boolean") return "upgrade_safe: boolean expected"; @@ -112440,14 +119990,7 @@ export const vtctldata = $root.vtctldata = (() => { if (object.allow_primary != null) message.allow_primary = Boolean(object.allow_primary); if (object.concurrency != null) - if ($util.Long) - (message.concurrency = $util.Long.fromValue(object.concurrency)).unsigned = true; - else if (typeof object.concurrency === "string") - message.concurrency = parseInt(object.concurrency, 10); - else if (typeof object.concurrency === "number") - message.concurrency = object.concurrency; - else if (typeof object.concurrency === "object") - message.concurrency = new $util.LongBits(object.concurrency.low >>> 0, object.concurrency.high >>> 0).toNumber(true); + message.concurrency = object.concurrency | 0; if (object.upgrade_safe != null) message.upgrade_safe = Boolean(object.upgrade_safe); if (object.incremental_from_pos != null) @@ -112472,11 +120015,7 @@ export const vtctldata = $root.vtctldata = (() => { object.keyspace = ""; object.shard = ""; object.allow_primary = false; - if ($util.Long) { - let long = new $util.Long(0, 0, true); - object.concurrency = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; - } else - object.concurrency = options.longs === String ? "0" : 0; + object.concurrency = 0; object.upgrade_safe = false; object.incremental_from_pos = ""; } @@ -112487,10 +120026,7 @@ export const vtctldata = $root.vtctldata = (() => { if (message.allow_primary != null && message.hasOwnProperty("allow_primary")) object.allow_primary = message.allow_primary; if (message.concurrency != null && message.hasOwnProperty("concurrency")) - if (typeof message.concurrency === "number") - object.concurrency = options.longs === String ? String(message.concurrency) : message.concurrency; - else - object.concurrency = options.longs === String ? $util.Long.prototype.toString.call(message.concurrency) : options.longs === Number ? new $util.LongBits(message.concurrency.low >>> 0, message.concurrency.high >>> 0).toNumber(true) : message.concurrency; + object.concurrency = message.concurrency; if (message.upgrade_safe != null && message.hasOwnProperty("upgrade_safe")) object.upgrade_safe = message.upgrade_safe; if (message.incremental_from_pos != null && message.hasOwnProperty("incremental_from_pos")) @@ -114540,7 +122076,6 @@ export const vtctldata = $root.vtctldata = (() => { * @property {string|null} [name] CreateKeyspaceRequest name * @property {boolean|null} [force] CreateKeyspaceRequest force * @property {boolean|null} [allow_empty_v_schema] CreateKeyspaceRequest allow_empty_v_schema - * @property {Array.|null} [served_froms] CreateKeyspaceRequest served_froms * @property {topodata.KeyspaceType|null} [type] CreateKeyspaceRequest type * @property {string|null} [base_keyspace] CreateKeyspaceRequest base_keyspace * @property {vttime.ITime|null} [snapshot_time] CreateKeyspaceRequest snapshot_time @@ -114557,7 +122092,6 @@ export const vtctldata = $root.vtctldata = (() => { * @param {vtctldata.ICreateKeyspaceRequest=} [properties] Properties to set */ function CreateKeyspaceRequest(properties) { - this.served_froms = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -114588,14 +122122,6 @@ export const vtctldata = $root.vtctldata = (() => { */ CreateKeyspaceRequest.prototype.allow_empty_v_schema = false; - /** - * CreateKeyspaceRequest served_froms. - * @member {Array.} served_froms - * @memberof vtctldata.CreateKeyspaceRequest - * @instance - */ - CreateKeyspaceRequest.prototype.served_froms = $util.emptyArray; - /** * CreateKeyspaceRequest type. * @member {topodata.KeyspaceType} type @@ -114666,9 +122192,6 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 2, wireType 0 =*/16).bool(message.force); if (message.allow_empty_v_schema != null && Object.hasOwnProperty.call(message, "allow_empty_v_schema")) writer.uint32(/* id 3, wireType 0 =*/24).bool(message.allow_empty_v_schema); - if (message.served_froms != null && message.served_froms.length) - for (let i = 0; i < message.served_froms.length; ++i) - $root.topodata.Keyspace.ServedFrom.encode(message.served_froms[i], writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); if (message.type != null && Object.hasOwnProperty.call(message, "type")) writer.uint32(/* id 7, wireType 0 =*/56).int32(message.type); if (message.base_keyspace != null && Object.hasOwnProperty.call(message, "base_keyspace")) @@ -114725,12 +122248,6 @@ export const vtctldata = $root.vtctldata = (() => { message.allow_empty_v_schema = reader.bool(); break; } - case 6: { - if (!(message.served_froms && message.served_froms.length)) - message.served_froms = []; - message.served_froms.push($root.topodata.Keyspace.ServedFrom.decode(reader, reader.uint32())); - break; - } case 7: { message.type = reader.int32(); break; @@ -114795,15 +122312,6 @@ export const vtctldata = $root.vtctldata = (() => { if (message.allow_empty_v_schema != null && message.hasOwnProperty("allow_empty_v_schema")) if (typeof message.allow_empty_v_schema !== "boolean") return "allow_empty_v_schema: boolean expected"; - if (message.served_froms != null && message.hasOwnProperty("served_froms")) { - if (!Array.isArray(message.served_froms)) - return "served_froms: array expected"; - for (let i = 0; i < message.served_froms.length; ++i) { - let error = $root.topodata.Keyspace.ServedFrom.verify(message.served_froms[i]); - if (error) - return "served_froms." + error; - } - } if (message.type != null && message.hasOwnProperty("type")) switch (message.type) { default: @@ -114847,16 +122355,6 @@ export const vtctldata = $root.vtctldata = (() => { message.force = Boolean(object.force); if (object.allow_empty_v_schema != null) message.allow_empty_v_schema = Boolean(object.allow_empty_v_schema); - if (object.served_froms) { - if (!Array.isArray(object.served_froms)) - throw TypeError(".vtctldata.CreateKeyspaceRequest.served_froms: array expected"); - message.served_froms = []; - for (let i = 0; i < object.served_froms.length; ++i) { - if (typeof object.served_froms[i] !== "object") - throw TypeError(".vtctldata.CreateKeyspaceRequest.served_froms: object expected"); - message.served_froms[i] = $root.topodata.Keyspace.ServedFrom.fromObject(object.served_froms[i]); - } - } switch (object.type) { default: if (typeof object.type === "number") { @@ -114900,8 +122398,6 @@ export const vtctldata = $root.vtctldata = (() => { if (!options) options = {}; let object = {}; - if (options.arrays || options.defaults) - object.served_froms = []; if (options.defaults) { object.name = ""; object.force = false; @@ -114918,11 +122414,6 @@ export const vtctldata = $root.vtctldata = (() => { object.force = message.force; if (message.allow_empty_v_schema != null && message.hasOwnProperty("allow_empty_v_schema")) object.allow_empty_v_schema = message.allow_empty_v_schema; - if (message.served_froms && message.served_froms.length) { - object.served_froms = []; - for (let j = 0; j < message.served_froms.length; ++j) - object.served_froms[j] = $root.topodata.Keyspace.ServedFrom.toObject(message.served_froms[j], options); - } if (message.type != null && message.hasOwnProperty("type")) object.type = options.enums === String ? $root.topodata.KeyspaceType[message.type] === undefined ? message.type : $root.topodata.KeyspaceType[message.type] : message.type; if (message.base_keyspace != null && message.hasOwnProperty("base_keyspace")) @@ -120324,6 +127815,545 @@ export const vtctldata = $root.vtctldata = (() => { return ExecuteHookResponse; })(); + vtctldata.ExecuteMultiFetchAsDBARequest = (function() { + + /** + * Properties of an ExecuteMultiFetchAsDBARequest. + * @memberof vtctldata + * @interface IExecuteMultiFetchAsDBARequest + * @property {topodata.ITabletAlias|null} [tablet_alias] ExecuteMultiFetchAsDBARequest tablet_alias + * @property {string|null} [sql] ExecuteMultiFetchAsDBARequest sql + * @property {number|Long|null} [max_rows] ExecuteMultiFetchAsDBARequest max_rows + * @property {boolean|null} [disable_binlogs] ExecuteMultiFetchAsDBARequest disable_binlogs + * @property {boolean|null} [reload_schema] ExecuteMultiFetchAsDBARequest reload_schema + */ + + /** + * Constructs a new ExecuteMultiFetchAsDBARequest. + * @memberof vtctldata + * @classdesc Represents an ExecuteMultiFetchAsDBARequest. + * @implements IExecuteMultiFetchAsDBARequest + * @constructor + * @param {vtctldata.IExecuteMultiFetchAsDBARequest=} [properties] Properties to set + */ + function ExecuteMultiFetchAsDBARequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteMultiFetchAsDBARequest tablet_alias. + * @member {topodata.ITabletAlias|null|undefined} tablet_alias + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @instance + */ + ExecuteMultiFetchAsDBARequest.prototype.tablet_alias = null; + + /** + * ExecuteMultiFetchAsDBARequest sql. + * @member {string} sql + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @instance + */ + ExecuteMultiFetchAsDBARequest.prototype.sql = ""; + + /** + * ExecuteMultiFetchAsDBARequest max_rows. + * @member {number|Long} max_rows + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @instance + */ + ExecuteMultiFetchAsDBARequest.prototype.max_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * ExecuteMultiFetchAsDBARequest disable_binlogs. + * @member {boolean} disable_binlogs + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @instance + */ + ExecuteMultiFetchAsDBARequest.prototype.disable_binlogs = false; + + /** + * ExecuteMultiFetchAsDBARequest reload_schema. + * @member {boolean} reload_schema + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @instance + */ + ExecuteMultiFetchAsDBARequest.prototype.reload_schema = false; + + /** + * Creates a new ExecuteMultiFetchAsDBARequest instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {vtctldata.IExecuteMultiFetchAsDBARequest=} [properties] Properties to set + * @returns {vtctldata.ExecuteMultiFetchAsDBARequest} ExecuteMultiFetchAsDBARequest instance + */ + ExecuteMultiFetchAsDBARequest.create = function create(properties) { + return new ExecuteMultiFetchAsDBARequest(properties); + }; + + /** + * Encodes the specified ExecuteMultiFetchAsDBARequest message. Does not implicitly {@link vtctldata.ExecuteMultiFetchAsDBARequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {vtctldata.IExecuteMultiFetchAsDBARequest} message ExecuteMultiFetchAsDBARequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteMultiFetchAsDBARequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.tablet_alias != null && Object.hasOwnProperty.call(message, "tablet_alias")) + $root.topodata.TabletAlias.encode(message.tablet_alias, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + if (message.sql != null && Object.hasOwnProperty.call(message, "sql")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.sql); + if (message.max_rows != null && Object.hasOwnProperty.call(message, "max_rows")) + writer.uint32(/* id 3, wireType 0 =*/24).int64(message.max_rows); + if (message.disable_binlogs != null && Object.hasOwnProperty.call(message, "disable_binlogs")) + writer.uint32(/* id 4, wireType 0 =*/32).bool(message.disable_binlogs); + if (message.reload_schema != null && Object.hasOwnProperty.call(message, "reload_schema")) + writer.uint32(/* id 5, wireType 0 =*/40).bool(message.reload_schema); + return writer; + }; + + /** + * Encodes the specified ExecuteMultiFetchAsDBARequest message, length delimited. Does not implicitly {@link vtctldata.ExecuteMultiFetchAsDBARequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {vtctldata.IExecuteMultiFetchAsDBARequest} message ExecuteMultiFetchAsDBARequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteMultiFetchAsDBARequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteMultiFetchAsDBARequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteMultiFetchAsDBARequest} ExecuteMultiFetchAsDBARequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteMultiFetchAsDBARequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteMultiFetchAsDBARequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.tablet_alias = $root.topodata.TabletAlias.decode(reader, reader.uint32()); + break; + } + case 2: { + message.sql = reader.string(); + break; + } + case 3: { + message.max_rows = reader.int64(); + break; + } + case 4: { + message.disable_binlogs = reader.bool(); + break; + } + case 5: { + message.reload_schema = reader.bool(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteMultiFetchAsDBARequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteMultiFetchAsDBARequest} ExecuteMultiFetchAsDBARequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteMultiFetchAsDBARequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteMultiFetchAsDBARequest message. + * @function verify + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteMultiFetchAsDBARequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) { + let error = $root.topodata.TabletAlias.verify(message.tablet_alias); + if (error) + return "tablet_alias." + error; + } + if (message.sql != null && message.hasOwnProperty("sql")) + if (!$util.isString(message.sql)) + return "sql: string expected"; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (!$util.isInteger(message.max_rows) && !(message.max_rows && $util.isInteger(message.max_rows.low) && $util.isInteger(message.max_rows.high))) + return "max_rows: integer|Long expected"; + if (message.disable_binlogs != null && message.hasOwnProperty("disable_binlogs")) + if (typeof message.disable_binlogs !== "boolean") + return "disable_binlogs: boolean expected"; + if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) + if (typeof message.reload_schema !== "boolean") + return "reload_schema: boolean expected"; + return null; + }; + + /** + * Creates an ExecuteMultiFetchAsDBARequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteMultiFetchAsDBARequest} ExecuteMultiFetchAsDBARequest + */ + ExecuteMultiFetchAsDBARequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteMultiFetchAsDBARequest) + return object; + let message = new $root.vtctldata.ExecuteMultiFetchAsDBARequest(); + if (object.tablet_alias != null) { + if (typeof object.tablet_alias !== "object") + throw TypeError(".vtctldata.ExecuteMultiFetchAsDBARequest.tablet_alias: object expected"); + message.tablet_alias = $root.topodata.TabletAlias.fromObject(object.tablet_alias); + } + if (object.sql != null) + message.sql = String(object.sql); + if (object.max_rows != null) + if ($util.Long) + (message.max_rows = $util.Long.fromValue(object.max_rows)).unsigned = false; + else if (typeof object.max_rows === "string") + message.max_rows = parseInt(object.max_rows, 10); + else if (typeof object.max_rows === "number") + message.max_rows = object.max_rows; + else if (typeof object.max_rows === "object") + message.max_rows = new $util.LongBits(object.max_rows.low >>> 0, object.max_rows.high >>> 0).toNumber(); + if (object.disable_binlogs != null) + message.disable_binlogs = Boolean(object.disable_binlogs); + if (object.reload_schema != null) + message.reload_schema = Boolean(object.reload_schema); + return message; + }; + + /** + * Creates a plain object from an ExecuteMultiFetchAsDBARequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {vtctldata.ExecuteMultiFetchAsDBARequest} message ExecuteMultiFetchAsDBARequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteMultiFetchAsDBARequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.tablet_alias = null; + object.sql = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_rows = options.longs === String ? "0" : 0; + object.disable_binlogs = false; + object.reload_schema = false; + } + if (message.tablet_alias != null && message.hasOwnProperty("tablet_alias")) + object.tablet_alias = $root.topodata.TabletAlias.toObject(message.tablet_alias, options); + if (message.sql != null && message.hasOwnProperty("sql")) + object.sql = message.sql; + if (message.max_rows != null && message.hasOwnProperty("max_rows")) + if (typeof message.max_rows === "number") + object.max_rows = options.longs === String ? String(message.max_rows) : message.max_rows; + else + object.max_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_rows) : options.longs === Number ? new $util.LongBits(message.max_rows.low >>> 0, message.max_rows.high >>> 0).toNumber() : message.max_rows; + if (message.disable_binlogs != null && message.hasOwnProperty("disable_binlogs")) + object.disable_binlogs = message.disable_binlogs; + if (message.reload_schema != null && message.hasOwnProperty("reload_schema")) + object.reload_schema = message.reload_schema; + return object; + }; + + /** + * Converts this ExecuteMultiFetchAsDBARequest to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @instance + * @returns {Object.} JSON object + */ + ExecuteMultiFetchAsDBARequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteMultiFetchAsDBARequest + * @function getTypeUrl + * @memberof vtctldata.ExecuteMultiFetchAsDBARequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteMultiFetchAsDBARequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteMultiFetchAsDBARequest"; + }; + + return ExecuteMultiFetchAsDBARequest; + })(); + + vtctldata.ExecuteMultiFetchAsDBAResponse = (function() { + + /** + * Properties of an ExecuteMultiFetchAsDBAResponse. + * @memberof vtctldata + * @interface IExecuteMultiFetchAsDBAResponse + * @property {Array.|null} [results] ExecuteMultiFetchAsDBAResponse results + */ + + /** + * Constructs a new ExecuteMultiFetchAsDBAResponse. + * @memberof vtctldata + * @classdesc Represents an ExecuteMultiFetchAsDBAResponse. + * @implements IExecuteMultiFetchAsDBAResponse + * @constructor + * @param {vtctldata.IExecuteMultiFetchAsDBAResponse=} [properties] Properties to set + */ + function ExecuteMultiFetchAsDBAResponse(properties) { + this.results = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ExecuteMultiFetchAsDBAResponse results. + * @member {Array.} results + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @instance + */ + ExecuteMultiFetchAsDBAResponse.prototype.results = $util.emptyArray; + + /** + * Creates a new ExecuteMultiFetchAsDBAResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {vtctldata.IExecuteMultiFetchAsDBAResponse=} [properties] Properties to set + * @returns {vtctldata.ExecuteMultiFetchAsDBAResponse} ExecuteMultiFetchAsDBAResponse instance + */ + ExecuteMultiFetchAsDBAResponse.create = function create(properties) { + return new ExecuteMultiFetchAsDBAResponse(properties); + }; + + /** + * Encodes the specified ExecuteMultiFetchAsDBAResponse message. Does not implicitly {@link vtctldata.ExecuteMultiFetchAsDBAResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {vtctldata.IExecuteMultiFetchAsDBAResponse} message ExecuteMultiFetchAsDBAResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteMultiFetchAsDBAResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.results != null && message.results.length) + for (let i = 0; i < message.results.length; ++i) + $root.query.QueryResult.encode(message.results[i], writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified ExecuteMultiFetchAsDBAResponse message, length delimited. Does not implicitly {@link vtctldata.ExecuteMultiFetchAsDBAResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {vtctldata.IExecuteMultiFetchAsDBAResponse} message ExecuteMultiFetchAsDBAResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ExecuteMultiFetchAsDBAResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes an ExecuteMultiFetchAsDBAResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ExecuteMultiFetchAsDBAResponse} ExecuteMultiFetchAsDBAResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteMultiFetchAsDBAResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ExecuteMultiFetchAsDBAResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (!(message.results && message.results.length)) + message.results = []; + message.results.push($root.query.QueryResult.decode(reader, reader.uint32())); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes an ExecuteMultiFetchAsDBAResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ExecuteMultiFetchAsDBAResponse} ExecuteMultiFetchAsDBAResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ExecuteMultiFetchAsDBAResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies an ExecuteMultiFetchAsDBAResponse message. + * @function verify + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ExecuteMultiFetchAsDBAResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.results != null && message.hasOwnProperty("results")) { + if (!Array.isArray(message.results)) + return "results: array expected"; + for (let i = 0; i < message.results.length; ++i) { + let error = $root.query.QueryResult.verify(message.results[i]); + if (error) + return "results." + error; + } + } + return null; + }; + + /** + * Creates an ExecuteMultiFetchAsDBAResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ExecuteMultiFetchAsDBAResponse} ExecuteMultiFetchAsDBAResponse + */ + ExecuteMultiFetchAsDBAResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ExecuteMultiFetchAsDBAResponse) + return object; + let message = new $root.vtctldata.ExecuteMultiFetchAsDBAResponse(); + if (object.results) { + if (!Array.isArray(object.results)) + throw TypeError(".vtctldata.ExecuteMultiFetchAsDBAResponse.results: array expected"); + message.results = []; + for (let i = 0; i < object.results.length; ++i) { + if (typeof object.results[i] !== "object") + throw TypeError(".vtctldata.ExecuteMultiFetchAsDBAResponse.results: object expected"); + message.results[i] = $root.query.QueryResult.fromObject(object.results[i]); + } + } + return message; + }; + + /** + * Creates a plain object from an ExecuteMultiFetchAsDBAResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {vtctldata.ExecuteMultiFetchAsDBAResponse} message ExecuteMultiFetchAsDBAResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ExecuteMultiFetchAsDBAResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.results = []; + if (message.results && message.results.length) { + object.results = []; + for (let j = 0; j < message.results.length; ++j) + object.results[j] = $root.query.QueryResult.toObject(message.results[j], options); + } + return object; + }; + + /** + * Converts this ExecuteMultiFetchAsDBAResponse to JSON. + * @function toJSON + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @instance + * @returns {Object.} JSON object + */ + ExecuteMultiFetchAsDBAResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ExecuteMultiFetchAsDBAResponse + * @function getTypeUrl + * @memberof vtctldata.ExecuteMultiFetchAsDBAResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ExecuteMultiFetchAsDBAResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ExecuteMultiFetchAsDBAResponse"; + }; + + return ExecuteMultiFetchAsDBAResponse; + })(); + vtctldata.FindAllShardsInKeyspaceRequest = (function() { /** @@ -120772,6 +128802,481 @@ export const vtctldata = $root.vtctldata = (() => { return FindAllShardsInKeyspaceResponse; })(); + vtctldata.ForceCutOverSchemaMigrationRequest = (function() { + + /** + * Properties of a ForceCutOverSchemaMigrationRequest. + * @memberof vtctldata + * @interface IForceCutOverSchemaMigrationRequest + * @property {string|null} [keyspace] ForceCutOverSchemaMigrationRequest keyspace + * @property {string|null} [uuid] ForceCutOverSchemaMigrationRequest uuid + */ + + /** + * Constructs a new ForceCutOverSchemaMigrationRequest. + * @memberof vtctldata + * @classdesc Represents a ForceCutOverSchemaMigrationRequest. + * @implements IForceCutOverSchemaMigrationRequest + * @constructor + * @param {vtctldata.IForceCutOverSchemaMigrationRequest=} [properties] Properties to set + */ + function ForceCutOverSchemaMigrationRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ForceCutOverSchemaMigrationRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @instance + */ + ForceCutOverSchemaMigrationRequest.prototype.keyspace = ""; + + /** + * ForceCutOverSchemaMigrationRequest uuid. + * @member {string} uuid + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @instance + */ + ForceCutOverSchemaMigrationRequest.prototype.uuid = ""; + + /** + * Creates a new ForceCutOverSchemaMigrationRequest instance using the specified properties. + * @function create + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {vtctldata.IForceCutOverSchemaMigrationRequest=} [properties] Properties to set + * @returns {vtctldata.ForceCutOverSchemaMigrationRequest} ForceCutOverSchemaMigrationRequest instance + */ + ForceCutOverSchemaMigrationRequest.create = function create(properties) { + return new ForceCutOverSchemaMigrationRequest(properties); + }; + + /** + * Encodes the specified ForceCutOverSchemaMigrationRequest message. Does not implicitly {@link vtctldata.ForceCutOverSchemaMigrationRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {vtctldata.IForceCutOverSchemaMigrationRequest} message ForceCutOverSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ForceCutOverSchemaMigrationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.uuid != null && Object.hasOwnProperty.call(message, "uuid")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.uuid); + return writer; + }; + + /** + * Encodes the specified ForceCutOverSchemaMigrationRequest message, length delimited. Does not implicitly {@link vtctldata.ForceCutOverSchemaMigrationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {vtctldata.IForceCutOverSchemaMigrationRequest} message ForceCutOverSchemaMigrationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ForceCutOverSchemaMigrationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ForceCutOverSchemaMigrationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ForceCutOverSchemaMigrationRequest} ForceCutOverSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ForceCutOverSchemaMigrationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ForceCutOverSchemaMigrationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.uuid = reader.string(); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ForceCutOverSchemaMigrationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ForceCutOverSchemaMigrationRequest} ForceCutOverSchemaMigrationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ForceCutOverSchemaMigrationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ForceCutOverSchemaMigrationRequest message. + * @function verify + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ForceCutOverSchemaMigrationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.uuid != null && message.hasOwnProperty("uuid")) + if (!$util.isString(message.uuid)) + return "uuid: string expected"; + return null; + }; + + /** + * Creates a ForceCutOverSchemaMigrationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ForceCutOverSchemaMigrationRequest} ForceCutOverSchemaMigrationRequest + */ + ForceCutOverSchemaMigrationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ForceCutOverSchemaMigrationRequest) + return object; + let message = new $root.vtctldata.ForceCutOverSchemaMigrationRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.uuid != null) + message.uuid = String(object.uuid); + return message; + }; + + /** + * Creates a plain object from a ForceCutOverSchemaMigrationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {vtctldata.ForceCutOverSchemaMigrationRequest} message ForceCutOverSchemaMigrationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ForceCutOverSchemaMigrationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) { + object.keyspace = ""; + object.uuid = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.uuid != null && message.hasOwnProperty("uuid")) + object.uuid = message.uuid; + return object; + }; + + /** + * Converts this ForceCutOverSchemaMigrationRequest to JSON. + * @function toJSON + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @instance + * @returns {Object.} JSON object + */ + ForceCutOverSchemaMigrationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ForceCutOverSchemaMigrationRequest + * @function getTypeUrl + * @memberof vtctldata.ForceCutOverSchemaMigrationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ForceCutOverSchemaMigrationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ForceCutOverSchemaMigrationRequest"; + }; + + return ForceCutOverSchemaMigrationRequest; + })(); + + vtctldata.ForceCutOverSchemaMigrationResponse = (function() { + + /** + * Properties of a ForceCutOverSchemaMigrationResponse. + * @memberof vtctldata + * @interface IForceCutOverSchemaMigrationResponse + * @property {Object.|null} [rows_affected_by_shard] ForceCutOverSchemaMigrationResponse rows_affected_by_shard + */ + + /** + * Constructs a new ForceCutOverSchemaMigrationResponse. + * @memberof vtctldata + * @classdesc Represents a ForceCutOverSchemaMigrationResponse. + * @implements IForceCutOverSchemaMigrationResponse + * @constructor + * @param {vtctldata.IForceCutOverSchemaMigrationResponse=} [properties] Properties to set + */ + function ForceCutOverSchemaMigrationResponse(properties) { + this.rows_affected_by_shard = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * ForceCutOverSchemaMigrationResponse rows_affected_by_shard. + * @member {Object.} rows_affected_by_shard + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @instance + */ + ForceCutOverSchemaMigrationResponse.prototype.rows_affected_by_shard = $util.emptyObject; + + /** + * Creates a new ForceCutOverSchemaMigrationResponse instance using the specified properties. + * @function create + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {vtctldata.IForceCutOverSchemaMigrationResponse=} [properties] Properties to set + * @returns {vtctldata.ForceCutOverSchemaMigrationResponse} ForceCutOverSchemaMigrationResponse instance + */ + ForceCutOverSchemaMigrationResponse.create = function create(properties) { + return new ForceCutOverSchemaMigrationResponse(properties); + }; + + /** + * Encodes the specified ForceCutOverSchemaMigrationResponse message. Does not implicitly {@link vtctldata.ForceCutOverSchemaMigrationResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {vtctldata.IForceCutOverSchemaMigrationResponse} message ForceCutOverSchemaMigrationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ForceCutOverSchemaMigrationResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.rows_affected_by_shard != null && Object.hasOwnProperty.call(message, "rows_affected_by_shard")) + for (let keys = Object.keys(message.rows_affected_by_shard), i = 0; i < keys.length; ++i) + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 0 =*/16).uint64(message.rows_affected_by_shard[keys[i]]).ldelim(); + return writer; + }; + + /** + * Encodes the specified ForceCutOverSchemaMigrationResponse message, length delimited. Does not implicitly {@link vtctldata.ForceCutOverSchemaMigrationResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {vtctldata.IForceCutOverSchemaMigrationResponse} message ForceCutOverSchemaMigrationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + ForceCutOverSchemaMigrationResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a ForceCutOverSchemaMigrationResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.ForceCutOverSchemaMigrationResponse} ForceCutOverSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ForceCutOverSchemaMigrationResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.ForceCutOverSchemaMigrationResponse(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.rows_affected_by_shard === $util.emptyObject) + message.rows_affected_by_shard = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = 0; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = reader.uint64(); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.rows_affected_by_shard[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a ForceCutOverSchemaMigrationResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.ForceCutOverSchemaMigrationResponse} ForceCutOverSchemaMigrationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + ForceCutOverSchemaMigrationResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a ForceCutOverSchemaMigrationResponse message. + * @function verify + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + ForceCutOverSchemaMigrationResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.rows_affected_by_shard != null && message.hasOwnProperty("rows_affected_by_shard")) { + if (!$util.isObject(message.rows_affected_by_shard)) + return "rows_affected_by_shard: object expected"; + let key = Object.keys(message.rows_affected_by_shard); + for (let i = 0; i < key.length; ++i) + if (!$util.isInteger(message.rows_affected_by_shard[key[i]]) && !(message.rows_affected_by_shard[key[i]] && $util.isInteger(message.rows_affected_by_shard[key[i]].low) && $util.isInteger(message.rows_affected_by_shard[key[i]].high))) + return "rows_affected_by_shard: integer|Long{k:string} expected"; + } + return null; + }; + + /** + * Creates a ForceCutOverSchemaMigrationResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.ForceCutOverSchemaMigrationResponse} ForceCutOverSchemaMigrationResponse + */ + ForceCutOverSchemaMigrationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.ForceCutOverSchemaMigrationResponse) + return object; + let message = new $root.vtctldata.ForceCutOverSchemaMigrationResponse(); + if (object.rows_affected_by_shard) { + if (typeof object.rows_affected_by_shard !== "object") + throw TypeError(".vtctldata.ForceCutOverSchemaMigrationResponse.rows_affected_by_shard: object expected"); + message.rows_affected_by_shard = {}; + for (let keys = Object.keys(object.rows_affected_by_shard), i = 0; i < keys.length; ++i) + if ($util.Long) + (message.rows_affected_by_shard[keys[i]] = $util.Long.fromValue(object.rows_affected_by_shard[keys[i]])).unsigned = true; + else if (typeof object.rows_affected_by_shard[keys[i]] === "string") + message.rows_affected_by_shard[keys[i]] = parseInt(object.rows_affected_by_shard[keys[i]], 10); + else if (typeof object.rows_affected_by_shard[keys[i]] === "number") + message.rows_affected_by_shard[keys[i]] = object.rows_affected_by_shard[keys[i]]; + else if (typeof object.rows_affected_by_shard[keys[i]] === "object") + message.rows_affected_by_shard[keys[i]] = new $util.LongBits(object.rows_affected_by_shard[keys[i]].low >>> 0, object.rows_affected_by_shard[keys[i]].high >>> 0).toNumber(true); + } + return message; + }; + + /** + * Creates a plain object from a ForceCutOverSchemaMigrationResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {vtctldata.ForceCutOverSchemaMigrationResponse} message ForceCutOverSchemaMigrationResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + ForceCutOverSchemaMigrationResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.rows_affected_by_shard = {}; + let keys2; + if (message.rows_affected_by_shard && (keys2 = Object.keys(message.rows_affected_by_shard)).length) { + object.rows_affected_by_shard = {}; + for (let j = 0; j < keys2.length; ++j) + if (typeof message.rows_affected_by_shard[keys2[j]] === "number") + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? String(message.rows_affected_by_shard[keys2[j]]) : message.rows_affected_by_shard[keys2[j]]; + else + object.rows_affected_by_shard[keys2[j]] = options.longs === String ? $util.Long.prototype.toString.call(message.rows_affected_by_shard[keys2[j]]) : options.longs === Number ? new $util.LongBits(message.rows_affected_by_shard[keys2[j]].low >>> 0, message.rows_affected_by_shard[keys2[j]].high >>> 0).toNumber(true) : message.rows_affected_by_shard[keys2[j]]; + } + return object; + }; + + /** + * Converts this ForceCutOverSchemaMigrationResponse to JSON. + * @function toJSON + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @instance + * @returns {Object.} JSON object + */ + ForceCutOverSchemaMigrationResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for ForceCutOverSchemaMigrationResponse + * @function getTypeUrl + * @memberof vtctldata.ForceCutOverSchemaMigrationResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + ForceCutOverSchemaMigrationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.ForceCutOverSchemaMigrationResponse"; + }; + + return ForceCutOverSchemaMigrationResponse; + })(); + vtctldata.GetBackupsRequest = (function() { /** @@ -124159,6 +132664,389 @@ export const vtctldata = $root.vtctldata = (() => { return GetPermissionsResponse; })(); + vtctldata.GetKeyspaceRoutingRulesRequest = (function() { + + /** + * Properties of a GetKeyspaceRoutingRulesRequest. + * @memberof vtctldata + * @interface IGetKeyspaceRoutingRulesRequest + */ + + /** + * Constructs a new GetKeyspaceRoutingRulesRequest. + * @memberof vtctldata + * @classdesc Represents a GetKeyspaceRoutingRulesRequest. + * @implements IGetKeyspaceRoutingRulesRequest + * @constructor + * @param {vtctldata.IGetKeyspaceRoutingRulesRequest=} [properties] Properties to set + */ + function GetKeyspaceRoutingRulesRequest(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * Creates a new GetKeyspaceRoutingRulesRequest instance using the specified properties. + * @function create + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {vtctldata.IGetKeyspaceRoutingRulesRequest=} [properties] Properties to set + * @returns {vtctldata.GetKeyspaceRoutingRulesRequest} GetKeyspaceRoutingRulesRequest instance + */ + GetKeyspaceRoutingRulesRequest.create = function create(properties) { + return new GetKeyspaceRoutingRulesRequest(properties); + }; + + /** + * Encodes the specified GetKeyspaceRoutingRulesRequest message. Does not implicitly {@link vtctldata.GetKeyspaceRoutingRulesRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {vtctldata.IGetKeyspaceRoutingRulesRequest} message GetKeyspaceRoutingRulesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetKeyspaceRoutingRulesRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + return writer; + }; + + /** + * Encodes the specified GetKeyspaceRoutingRulesRequest message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceRoutingRulesRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {vtctldata.IGetKeyspaceRoutingRulesRequest} message GetKeyspaceRoutingRulesRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetKeyspaceRoutingRulesRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GetKeyspaceRoutingRulesRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.GetKeyspaceRoutingRulesRequest} GetKeyspaceRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetKeyspaceRoutingRulesRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspaceRoutingRulesRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GetKeyspaceRoutingRulesRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.GetKeyspaceRoutingRulesRequest} GetKeyspaceRoutingRulesRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetKeyspaceRoutingRulesRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GetKeyspaceRoutingRulesRequest message. + * @function verify + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetKeyspaceRoutingRulesRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + return null; + }; + + /** + * Creates a GetKeyspaceRoutingRulesRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.GetKeyspaceRoutingRulesRequest} GetKeyspaceRoutingRulesRequest + */ + GetKeyspaceRoutingRulesRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetKeyspaceRoutingRulesRequest) + return object; + return new $root.vtctldata.GetKeyspaceRoutingRulesRequest(); + }; + + /** + * Creates a plain object from a GetKeyspaceRoutingRulesRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {vtctldata.GetKeyspaceRoutingRulesRequest} message GetKeyspaceRoutingRulesRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetKeyspaceRoutingRulesRequest.toObject = function toObject() { + return {}; + }; + + /** + * Converts this GetKeyspaceRoutingRulesRequest to JSON. + * @function toJSON + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @instance + * @returns {Object.} JSON object + */ + GetKeyspaceRoutingRulesRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for GetKeyspaceRoutingRulesRequest + * @function getTypeUrl + * @memberof vtctldata.GetKeyspaceRoutingRulesRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetKeyspaceRoutingRulesRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.GetKeyspaceRoutingRulesRequest"; + }; + + return GetKeyspaceRoutingRulesRequest; + })(); + + vtctldata.GetKeyspaceRoutingRulesResponse = (function() { + + /** + * Properties of a GetKeyspaceRoutingRulesResponse. + * @memberof vtctldata + * @interface IGetKeyspaceRoutingRulesResponse + * @property {vschema.IKeyspaceRoutingRules|null} [keyspace_routing_rules] GetKeyspaceRoutingRulesResponse keyspace_routing_rules + */ + + /** + * Constructs a new GetKeyspaceRoutingRulesResponse. + * @memberof vtctldata + * @classdesc Represents a GetKeyspaceRoutingRulesResponse. + * @implements IGetKeyspaceRoutingRulesResponse + * @constructor + * @param {vtctldata.IGetKeyspaceRoutingRulesResponse=} [properties] Properties to set + */ + function GetKeyspaceRoutingRulesResponse(properties) { + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GetKeyspaceRoutingRulesResponse keyspace_routing_rules. + * @member {vschema.IKeyspaceRoutingRules|null|undefined} keyspace_routing_rules + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @instance + */ + GetKeyspaceRoutingRulesResponse.prototype.keyspace_routing_rules = null; + + /** + * Creates a new GetKeyspaceRoutingRulesResponse instance using the specified properties. + * @function create + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {vtctldata.IGetKeyspaceRoutingRulesResponse=} [properties] Properties to set + * @returns {vtctldata.GetKeyspaceRoutingRulesResponse} GetKeyspaceRoutingRulesResponse instance + */ + GetKeyspaceRoutingRulesResponse.create = function create(properties) { + return new GetKeyspaceRoutingRulesResponse(properties); + }; + + /** + * Encodes the specified GetKeyspaceRoutingRulesResponse message. Does not implicitly {@link vtctldata.GetKeyspaceRoutingRulesResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {vtctldata.IGetKeyspaceRoutingRulesResponse} message GetKeyspaceRoutingRulesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetKeyspaceRoutingRulesResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace_routing_rules != null && Object.hasOwnProperty.call(message, "keyspace_routing_rules")) + $root.vschema.KeyspaceRoutingRules.encode(message.keyspace_routing_rules, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); + return writer; + }; + + /** + * Encodes the specified GetKeyspaceRoutingRulesResponse message, length delimited. Does not implicitly {@link vtctldata.GetKeyspaceRoutingRulesResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {vtctldata.IGetKeyspaceRoutingRulesResponse} message GetKeyspaceRoutingRulesResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetKeyspaceRoutingRulesResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GetKeyspaceRoutingRulesResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.GetKeyspaceRoutingRulesResponse} GetKeyspaceRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetKeyspaceRoutingRulesResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetKeyspaceRoutingRulesResponse(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.decode(reader, reader.uint32()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GetKeyspaceRoutingRulesResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.GetKeyspaceRoutingRulesResponse} GetKeyspaceRoutingRulesResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetKeyspaceRoutingRulesResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GetKeyspaceRoutingRulesResponse message. + * @function verify + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetKeyspaceRoutingRulesResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace_routing_rules != null && message.hasOwnProperty("keyspace_routing_rules")) { + let error = $root.vschema.KeyspaceRoutingRules.verify(message.keyspace_routing_rules); + if (error) + return "keyspace_routing_rules." + error; + } + return null; + }; + + /** + * Creates a GetKeyspaceRoutingRulesResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.GetKeyspaceRoutingRulesResponse} GetKeyspaceRoutingRulesResponse + */ + GetKeyspaceRoutingRulesResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetKeyspaceRoutingRulesResponse) + return object; + let message = new $root.vtctldata.GetKeyspaceRoutingRulesResponse(); + if (object.keyspace_routing_rules != null) { + if (typeof object.keyspace_routing_rules !== "object") + throw TypeError(".vtctldata.GetKeyspaceRoutingRulesResponse.keyspace_routing_rules: object expected"); + message.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.fromObject(object.keyspace_routing_rules); + } + return message; + }; + + /** + * Creates a plain object from a GetKeyspaceRoutingRulesResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {vtctldata.GetKeyspaceRoutingRulesResponse} message GetKeyspaceRoutingRulesResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetKeyspaceRoutingRulesResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.defaults) + object.keyspace_routing_rules = null; + if (message.keyspace_routing_rules != null && message.hasOwnProperty("keyspace_routing_rules")) + object.keyspace_routing_rules = $root.vschema.KeyspaceRoutingRules.toObject(message.keyspace_routing_rules, options); + return object; + }; + + /** + * Converts this GetKeyspaceRoutingRulesResponse to JSON. + * @function toJSON + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @instance + * @returns {Object.} JSON object + */ + GetKeyspaceRoutingRulesResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for GetKeyspaceRoutingRulesResponse + * @function getTypeUrl + * @memberof vtctldata.GetKeyspaceRoutingRulesResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetKeyspaceRoutingRulesResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.GetKeyspaceRoutingRulesResponse"; + }; + + return GetKeyspaceRoutingRulesResponse; + })(); + vtctldata.GetRoutingRulesRequest = (function() { /** @@ -125826,6 +134714,518 @@ export const vtctldata = $root.vtctldata = (() => { return GetSchemaMigrationsResponse; })(); + vtctldata.GetShardReplicationRequest = (function() { + + /** + * Properties of a GetShardReplicationRequest. + * @memberof vtctldata + * @interface IGetShardReplicationRequest + * @property {string|null} [keyspace] GetShardReplicationRequest keyspace + * @property {string|null} [shard] GetShardReplicationRequest shard + * @property {Array.|null} [cells] GetShardReplicationRequest cells + */ + + /** + * Constructs a new GetShardReplicationRequest. + * @memberof vtctldata + * @classdesc Represents a GetShardReplicationRequest. + * @implements IGetShardReplicationRequest + * @constructor + * @param {vtctldata.IGetShardReplicationRequest=} [properties] Properties to set + */ + function GetShardReplicationRequest(properties) { + this.cells = []; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GetShardReplicationRequest keyspace. + * @member {string} keyspace + * @memberof vtctldata.GetShardReplicationRequest + * @instance + */ + GetShardReplicationRequest.prototype.keyspace = ""; + + /** + * GetShardReplicationRequest shard. + * @member {string} shard + * @memberof vtctldata.GetShardReplicationRequest + * @instance + */ + GetShardReplicationRequest.prototype.shard = ""; + + /** + * GetShardReplicationRequest cells. + * @member {Array.} cells + * @memberof vtctldata.GetShardReplicationRequest + * @instance + */ + GetShardReplicationRequest.prototype.cells = $util.emptyArray; + + /** + * Creates a new GetShardReplicationRequest instance using the specified properties. + * @function create + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {vtctldata.IGetShardReplicationRequest=} [properties] Properties to set + * @returns {vtctldata.GetShardReplicationRequest} GetShardReplicationRequest instance + */ + GetShardReplicationRequest.create = function create(properties) { + return new GetShardReplicationRequest(properties); + }; + + /** + * Encodes the specified GetShardReplicationRequest message. Does not implicitly {@link vtctldata.GetShardReplicationRequest.verify|verify} messages. + * @function encode + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {vtctldata.IGetShardReplicationRequest} message GetShardReplicationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetShardReplicationRequest.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) + writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); + if (message.shard != null && Object.hasOwnProperty.call(message, "shard")) + writer.uint32(/* id 2, wireType 2 =*/18).string(message.shard); + if (message.cells != null && message.cells.length) + for (let i = 0; i < message.cells.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); + return writer; + }; + + /** + * Encodes the specified GetShardReplicationRequest message, length delimited. Does not implicitly {@link vtctldata.GetShardReplicationRequest.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {vtctldata.IGetShardReplicationRequest} message GetShardReplicationRequest message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetShardReplicationRequest.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GetShardReplicationRequest message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.GetShardReplicationRequest} GetShardReplicationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetShardReplicationRequest.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardReplicationRequest(); + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + message.keyspace = reader.string(); + break; + } + case 2: { + message.shard = reader.string(); + break; + } + case 3: { + if (!(message.cells && message.cells.length)) + message.cells = []; + message.cells.push(reader.string()); + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GetShardReplicationRequest message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.GetShardReplicationRequest} GetShardReplicationRequest + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetShardReplicationRequest.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GetShardReplicationRequest message. + * @function verify + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetShardReplicationRequest.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + if (!$util.isString(message.keyspace)) + return "keyspace: string expected"; + if (message.shard != null && message.hasOwnProperty("shard")) + if (!$util.isString(message.shard)) + return "shard: string expected"; + if (message.cells != null && message.hasOwnProperty("cells")) { + if (!Array.isArray(message.cells)) + return "cells: array expected"; + for (let i = 0; i < message.cells.length; ++i) + if (!$util.isString(message.cells[i])) + return "cells: string[] expected"; + } + return null; + }; + + /** + * Creates a GetShardReplicationRequest message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.GetShardReplicationRequest} GetShardReplicationRequest + */ + GetShardReplicationRequest.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetShardReplicationRequest) + return object; + let message = new $root.vtctldata.GetShardReplicationRequest(); + if (object.keyspace != null) + message.keyspace = String(object.keyspace); + if (object.shard != null) + message.shard = String(object.shard); + if (object.cells) { + if (!Array.isArray(object.cells)) + throw TypeError(".vtctldata.GetShardReplicationRequest.cells: array expected"); + message.cells = []; + for (let i = 0; i < object.cells.length; ++i) + message.cells[i] = String(object.cells[i]); + } + return message; + }; + + /** + * Creates a plain object from a GetShardReplicationRequest message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {vtctldata.GetShardReplicationRequest} message GetShardReplicationRequest + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetShardReplicationRequest.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.arrays || options.defaults) + object.cells = []; + if (options.defaults) { + object.keyspace = ""; + object.shard = ""; + } + if (message.keyspace != null && message.hasOwnProperty("keyspace")) + object.keyspace = message.keyspace; + if (message.shard != null && message.hasOwnProperty("shard")) + object.shard = message.shard; + if (message.cells && message.cells.length) { + object.cells = []; + for (let j = 0; j < message.cells.length; ++j) + object.cells[j] = message.cells[j]; + } + return object; + }; + + /** + * Converts this GetShardReplicationRequest to JSON. + * @function toJSON + * @memberof vtctldata.GetShardReplicationRequest + * @instance + * @returns {Object.} JSON object + */ + GetShardReplicationRequest.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for GetShardReplicationRequest + * @function getTypeUrl + * @memberof vtctldata.GetShardReplicationRequest + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetShardReplicationRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.GetShardReplicationRequest"; + }; + + return GetShardReplicationRequest; + })(); + + vtctldata.GetShardReplicationResponse = (function() { + + /** + * Properties of a GetShardReplicationResponse. + * @memberof vtctldata + * @interface IGetShardReplicationResponse + * @property {Object.|null} [shard_replication_by_cell] GetShardReplicationResponse shard_replication_by_cell + */ + + /** + * Constructs a new GetShardReplicationResponse. + * @memberof vtctldata + * @classdesc Represents a GetShardReplicationResponse. + * @implements IGetShardReplicationResponse + * @constructor + * @param {vtctldata.IGetShardReplicationResponse=} [properties] Properties to set + */ + function GetShardReplicationResponse(properties) { + this.shard_replication_by_cell = {}; + if (properties) + for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) + if (properties[keys[i]] != null) + this[keys[i]] = properties[keys[i]]; + } + + /** + * GetShardReplicationResponse shard_replication_by_cell. + * @member {Object.} shard_replication_by_cell + * @memberof vtctldata.GetShardReplicationResponse + * @instance + */ + GetShardReplicationResponse.prototype.shard_replication_by_cell = $util.emptyObject; + + /** + * Creates a new GetShardReplicationResponse instance using the specified properties. + * @function create + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {vtctldata.IGetShardReplicationResponse=} [properties] Properties to set + * @returns {vtctldata.GetShardReplicationResponse} GetShardReplicationResponse instance + */ + GetShardReplicationResponse.create = function create(properties) { + return new GetShardReplicationResponse(properties); + }; + + /** + * Encodes the specified GetShardReplicationResponse message. Does not implicitly {@link vtctldata.GetShardReplicationResponse.verify|verify} messages. + * @function encode + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {vtctldata.IGetShardReplicationResponse} message GetShardReplicationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetShardReplicationResponse.encode = function encode(message, writer) { + if (!writer) + writer = $Writer.create(); + if (message.shard_replication_by_cell != null && Object.hasOwnProperty.call(message, "shard_replication_by_cell")) + for (let keys = Object.keys(message.shard_replication_by_cell), i = 0; i < keys.length; ++i) { + writer.uint32(/* id 1, wireType 2 =*/10).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]); + $root.topodata.ShardReplication.encode(message.shard_replication_by_cell[keys[i]], writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim().ldelim(); + } + return writer; + }; + + /** + * Encodes the specified GetShardReplicationResponse message, length delimited. Does not implicitly {@link vtctldata.GetShardReplicationResponse.verify|verify} messages. + * @function encodeDelimited + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {vtctldata.IGetShardReplicationResponse} message GetShardReplicationResponse message or plain object to encode + * @param {$protobuf.Writer} [writer] Writer to encode to + * @returns {$protobuf.Writer} Writer + */ + GetShardReplicationResponse.encodeDelimited = function encodeDelimited(message, writer) { + return this.encode(message, writer).ldelim(); + }; + + /** + * Decodes a GetShardReplicationResponse message from the specified reader or buffer. + * @function decode + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @param {number} [length] Message length if known beforehand + * @returns {vtctldata.GetShardReplicationResponse} GetShardReplicationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetShardReplicationResponse.decode = function decode(reader, length) { + if (!(reader instanceof $Reader)) + reader = $Reader.create(reader); + let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.GetShardReplicationResponse(), key, value; + while (reader.pos < end) { + let tag = reader.uint32(); + switch (tag >>> 3) { + case 1: { + if (message.shard_replication_by_cell === $util.emptyObject) + message.shard_replication_by_cell = {}; + let end2 = reader.uint32() + reader.pos; + key = ""; + value = null; + while (reader.pos < end2) { + let tag2 = reader.uint32(); + switch (tag2 >>> 3) { + case 1: + key = reader.string(); + break; + case 2: + value = $root.topodata.ShardReplication.decode(reader, reader.uint32()); + break; + default: + reader.skipType(tag2 & 7); + break; + } + } + message.shard_replication_by_cell[key] = value; + break; + } + default: + reader.skipType(tag & 7); + break; + } + } + return message; + }; + + /** + * Decodes a GetShardReplicationResponse message from the specified reader or buffer, length delimited. + * @function decodeDelimited + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from + * @returns {vtctldata.GetShardReplicationResponse} GetShardReplicationResponse + * @throws {Error} If the payload is not a reader or valid buffer + * @throws {$protobuf.util.ProtocolError} If required fields are missing + */ + GetShardReplicationResponse.decodeDelimited = function decodeDelimited(reader) { + if (!(reader instanceof $Reader)) + reader = new $Reader(reader); + return this.decode(reader, reader.uint32()); + }; + + /** + * Verifies a GetShardReplicationResponse message. + * @function verify + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {Object.} message Plain object to verify + * @returns {string|null} `null` if valid, otherwise the reason why it is not + */ + GetShardReplicationResponse.verify = function verify(message) { + if (typeof message !== "object" || message === null) + return "object expected"; + if (message.shard_replication_by_cell != null && message.hasOwnProperty("shard_replication_by_cell")) { + if (!$util.isObject(message.shard_replication_by_cell)) + return "shard_replication_by_cell: object expected"; + let key = Object.keys(message.shard_replication_by_cell); + for (let i = 0; i < key.length; ++i) { + let error = $root.topodata.ShardReplication.verify(message.shard_replication_by_cell[key[i]]); + if (error) + return "shard_replication_by_cell." + error; + } + } + return null; + }; + + /** + * Creates a GetShardReplicationResponse message from a plain object. Also converts values to their respective internal types. + * @function fromObject + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {Object.} object Plain object + * @returns {vtctldata.GetShardReplicationResponse} GetShardReplicationResponse + */ + GetShardReplicationResponse.fromObject = function fromObject(object) { + if (object instanceof $root.vtctldata.GetShardReplicationResponse) + return object; + let message = new $root.vtctldata.GetShardReplicationResponse(); + if (object.shard_replication_by_cell) { + if (typeof object.shard_replication_by_cell !== "object") + throw TypeError(".vtctldata.GetShardReplicationResponse.shard_replication_by_cell: object expected"); + message.shard_replication_by_cell = {}; + for (let keys = Object.keys(object.shard_replication_by_cell), i = 0; i < keys.length; ++i) { + if (typeof object.shard_replication_by_cell[keys[i]] !== "object") + throw TypeError(".vtctldata.GetShardReplicationResponse.shard_replication_by_cell: object expected"); + message.shard_replication_by_cell[keys[i]] = $root.topodata.ShardReplication.fromObject(object.shard_replication_by_cell[keys[i]]); + } + } + return message; + }; + + /** + * Creates a plain object from a GetShardReplicationResponse message. Also converts values to other types if specified. + * @function toObject + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {vtctldata.GetShardReplicationResponse} message GetShardReplicationResponse + * @param {$protobuf.IConversionOptions} [options] Conversion options + * @returns {Object.} Plain object + */ + GetShardReplicationResponse.toObject = function toObject(message, options) { + if (!options) + options = {}; + let object = {}; + if (options.objects || options.defaults) + object.shard_replication_by_cell = {}; + let keys2; + if (message.shard_replication_by_cell && (keys2 = Object.keys(message.shard_replication_by_cell)).length) { + object.shard_replication_by_cell = {}; + for (let j = 0; j < keys2.length; ++j) + object.shard_replication_by_cell[keys2[j]] = $root.topodata.ShardReplication.toObject(message.shard_replication_by_cell[keys2[j]], options); + } + return object; + }; + + /** + * Converts this GetShardReplicationResponse to JSON. + * @function toJSON + * @memberof vtctldata.GetShardReplicationResponse + * @instance + * @returns {Object.} JSON object + */ + GetShardReplicationResponse.prototype.toJSON = function toJSON() { + return this.constructor.toObject(this, $protobuf.util.toJSONOptions); + }; + + /** + * Gets the default type url for GetShardReplicationResponse + * @function getTypeUrl + * @memberof vtctldata.GetShardReplicationResponse + * @static + * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") + * @returns {string} The default type url + */ + GetShardReplicationResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { + if (typeUrlPrefix === undefined) { + typeUrlPrefix = "type.googleapis.com"; + } + return typeUrlPrefix + "/vtctldata.GetShardReplicationResponse"; + }; + + return GetShardReplicationResponse; + })(); + vtctldata.GetShardRequest = (function() { /** @@ -131854,6 +141254,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {boolean|null} [name_only] GetWorkflowsRequest name_only * @property {string|null} [workflow] GetWorkflowsRequest workflow * @property {boolean|null} [include_logs] GetWorkflowsRequest include_logs + * @property {Array.|null} [shards] GetWorkflowsRequest shards */ /** @@ -131865,6 +141266,7 @@ export const vtctldata = $root.vtctldata = (() => { * @param {vtctldata.IGetWorkflowsRequest=} [properties] Properties to set */ function GetWorkflowsRequest(properties) { + this.shards = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -131911,6 +141313,14 @@ export const vtctldata = $root.vtctldata = (() => { */ GetWorkflowsRequest.prototype.include_logs = false; + /** + * GetWorkflowsRequest shards. + * @member {Array.} shards + * @memberof vtctldata.GetWorkflowsRequest + * @instance + */ + GetWorkflowsRequest.prototype.shards = $util.emptyArray; + /** * Creates a new GetWorkflowsRequest instance using the specified properties. * @function create @@ -131945,6 +141355,9 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 4, wireType 2 =*/34).string(message.workflow); if (message.include_logs != null && Object.hasOwnProperty.call(message, "include_logs")) writer.uint32(/* id 5, wireType 0 =*/40).bool(message.include_logs); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.shards[i]); return writer; }; @@ -131999,6 +141412,12 @@ export const vtctldata = $root.vtctldata = (() => { message.include_logs = reader.bool(); break; } + case 6: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -132049,6 +141468,13 @@ export const vtctldata = $root.vtctldata = (() => { if (message.include_logs != null && message.hasOwnProperty("include_logs")) if (typeof message.include_logs !== "boolean") return "include_logs: boolean expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) + if (!$util.isString(message.shards[i])) + return "shards: string[] expected"; + } return null; }; @@ -132074,6 +141500,13 @@ export const vtctldata = $root.vtctldata = (() => { message.workflow = String(object.workflow); if (object.include_logs != null) message.include_logs = Boolean(object.include_logs); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.GetWorkflowsRequest.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) + message.shards[i] = String(object.shards[i]); + } return message; }; @@ -132090,6 +141523,8 @@ export const vtctldata = $root.vtctldata = (() => { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.shards = []; if (options.defaults) { object.keyspace = ""; object.active_only = false; @@ -132107,6 +141542,11 @@ export const vtctldata = $root.vtctldata = (() => { object.workflow = message.workflow; if (message.include_logs != null && message.hasOwnProperty("include_logs")) object.include_logs = message.include_logs; + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = message.shards[j]; + } return object; }; @@ -137873,6 +147313,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {boolean|null} [auto_start] MoveTablesCreateRequest auto_start * @property {boolean|null} [no_routing_rules] MoveTablesCreateRequest no_routing_rules * @property {boolean|null} [atomic_copy] MoveTablesCreateRequest atomic_copy + * @property {vtctldata.IWorkflowOptions|null} [workflow_options] MoveTablesCreateRequest workflow_options */ /** @@ -138047,6 +147488,14 @@ export const vtctldata = $root.vtctldata = (() => { */ MoveTablesCreateRequest.prototype.atomic_copy = false; + /** + * MoveTablesCreateRequest workflow_options. + * @member {vtctldata.IWorkflowOptions|null|undefined} workflow_options + * @memberof vtctldata.MoveTablesCreateRequest + * @instance + */ + MoveTablesCreateRequest.prototype.workflow_options = null; + /** * Creates a new MoveTablesCreateRequest instance using the specified properties. * @function create @@ -138117,6 +147566,8 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 18, wireType 0 =*/144).bool(message.no_routing_rules); if (message.atomic_copy != null && Object.hasOwnProperty.call(message, "atomic_copy")) writer.uint32(/* id 19, wireType 0 =*/152).bool(message.atomic_copy); + if (message.workflow_options != null && Object.hasOwnProperty.call(message, "workflow_options")) + $root.vtctldata.WorkflowOptions.encode(message.workflow_options, writer.uint32(/* id 20, wireType 2 =*/162).fork()).ldelim(); return writer; }; @@ -138242,6 +147693,10 @@ export const vtctldata = $root.vtctldata = (() => { message.atomic_copy = reader.bool(); break; } + case 20: { + message.workflow_options = $root.vtctldata.WorkflowOptions.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -138374,6 +147829,11 @@ export const vtctldata = $root.vtctldata = (() => { if (message.atomic_copy != null && message.hasOwnProperty("atomic_copy")) if (typeof message.atomic_copy !== "boolean") return "atomic_copy: boolean expected"; + if (message.workflow_options != null && message.hasOwnProperty("workflow_options")) { + let error = $root.vtctldata.WorkflowOptions.verify(message.workflow_options); + if (error) + return "workflow_options." + error; + } return null; }; @@ -138520,6 +147980,11 @@ export const vtctldata = $root.vtctldata = (() => { message.no_routing_rules = Boolean(object.no_routing_rules); if (object.atomic_copy != null) message.atomic_copy = Boolean(object.atomic_copy); + if (object.workflow_options != null) { + if (typeof object.workflow_options !== "object") + throw TypeError(".vtctldata.MoveTablesCreateRequest.workflow_options: object expected"); + message.workflow_options = $root.vtctldata.WorkflowOptions.fromObject(object.workflow_options); + } return message; }; @@ -138558,6 +148023,7 @@ export const vtctldata = $root.vtctldata = (() => { object.auto_start = false; object.no_routing_rules = false; object.atomic_copy = false; + object.workflow_options = null; } if (message.workflow != null && message.hasOwnProperty("workflow")) object.workflow = message.workflow; @@ -138612,6 +148078,8 @@ export const vtctldata = $root.vtctldata = (() => { object.no_routing_rules = message.no_routing_rules; if (message.atomic_copy != null && message.hasOwnProperty("atomic_copy")) object.atomic_copy = message.atomic_copy; + if (message.workflow_options != null && message.hasOwnProperty("workflow_options")) + object.workflow_options = $root.vtctldata.WorkflowOptions.toObject(message.workflow_options, options); return object; }; @@ -139136,6 +148604,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {boolean|null} [keep_routing_rules] MoveTablesCompleteRequest keep_routing_rules * @property {boolean|null} [rename_tables] MoveTablesCompleteRequest rename_tables * @property {boolean|null} [dry_run] MoveTablesCompleteRequest dry_run + * @property {Array.|null} [shards] MoveTablesCompleteRequest shards */ /** @@ -139147,6 +148616,7 @@ export const vtctldata = $root.vtctldata = (() => { * @param {vtctldata.IMoveTablesCompleteRequest=} [properties] Properties to set */ function MoveTablesCompleteRequest(properties) { + this.shards = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -139201,6 +148671,14 @@ export const vtctldata = $root.vtctldata = (() => { */ MoveTablesCompleteRequest.prototype.dry_run = false; + /** + * MoveTablesCompleteRequest shards. + * @member {Array.} shards + * @memberof vtctldata.MoveTablesCompleteRequest + * @instance + */ + MoveTablesCompleteRequest.prototype.shards = $util.emptyArray; + /** * Creates a new MoveTablesCompleteRequest instance using the specified properties. * @function create @@ -139237,6 +148715,9 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 6, wireType 0 =*/48).bool(message.rename_tables); if (message.dry_run != null && Object.hasOwnProperty.call(message, "dry_run")) writer.uint32(/* id 7, wireType 0 =*/56).bool(message.dry_run); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + writer.uint32(/* id 8, wireType 2 =*/66).string(message.shards[i]); return writer; }; @@ -139295,6 +148776,12 @@ export const vtctldata = $root.vtctldata = (() => { message.dry_run = reader.bool(); break; } + case 8: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -139348,6 +148835,13 @@ export const vtctldata = $root.vtctldata = (() => { if (message.dry_run != null && message.hasOwnProperty("dry_run")) if (typeof message.dry_run !== "boolean") return "dry_run: boolean expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) + if (!$util.isString(message.shards[i])) + return "shards: string[] expected"; + } return null; }; @@ -139375,6 +148869,13 @@ export const vtctldata = $root.vtctldata = (() => { message.rename_tables = Boolean(object.rename_tables); if (object.dry_run != null) message.dry_run = Boolean(object.dry_run); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.MoveTablesCompleteRequest.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) + message.shards[i] = String(object.shards[i]); + } return message; }; @@ -139391,6 +148892,8 @@ export const vtctldata = $root.vtctldata = (() => { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.shards = []; if (options.defaults) { object.workflow = ""; object.target_keyspace = ""; @@ -139411,6 +148914,11 @@ export const vtctldata = $root.vtctldata = (() => { object.rename_tables = message.rename_tables; if (message.dry_run != null && message.hasOwnProperty("dry_run")) object.dry_run = message.dry_run; + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = message.shards[j]; + } return object; }; @@ -140080,6 +149588,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {topodata.ITabletAlias|null} [new_primary] PlannedReparentShardRequest new_primary * @property {topodata.ITabletAlias|null} [avoid_primary] PlannedReparentShardRequest avoid_primary * @property {vttime.IDuration|null} [wait_replicas_timeout] PlannedReparentShardRequest wait_replicas_timeout + * @property {vttime.IDuration|null} [tolerable_replication_lag] PlannedReparentShardRequest tolerable_replication_lag */ /** @@ -140137,6 +149646,14 @@ export const vtctldata = $root.vtctldata = (() => { */ PlannedReparentShardRequest.prototype.wait_replicas_timeout = null; + /** + * PlannedReparentShardRequest tolerable_replication_lag. + * @member {vttime.IDuration|null|undefined} tolerable_replication_lag + * @memberof vtctldata.PlannedReparentShardRequest + * @instance + */ + PlannedReparentShardRequest.prototype.tolerable_replication_lag = null; + /** * Creates a new PlannedReparentShardRequest instance using the specified properties. * @function create @@ -140171,6 +149688,8 @@ export const vtctldata = $root.vtctldata = (() => { $root.topodata.TabletAlias.encode(message.avoid_primary, writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); if (message.wait_replicas_timeout != null && Object.hasOwnProperty.call(message, "wait_replicas_timeout")) $root.vttime.Duration.encode(message.wait_replicas_timeout, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.tolerable_replication_lag != null && Object.hasOwnProperty.call(message, "tolerable_replication_lag")) + $root.vttime.Duration.encode(message.tolerable_replication_lag, writer.uint32(/* id 6, wireType 2 =*/50).fork()).ldelim(); return writer; }; @@ -140225,6 +149744,10 @@ export const vtctldata = $root.vtctldata = (() => { message.wait_replicas_timeout = $root.vttime.Duration.decode(reader, reader.uint32()); break; } + case 6: { + message.tolerable_replication_lag = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -140281,6 +149804,11 @@ export const vtctldata = $root.vtctldata = (() => { if (error) return "wait_replicas_timeout." + error; } + if (message.tolerable_replication_lag != null && message.hasOwnProperty("tolerable_replication_lag")) { + let error = $root.vttime.Duration.verify(message.tolerable_replication_lag); + if (error) + return "tolerable_replication_lag." + error; + } return null; }; @@ -140315,6 +149843,11 @@ export const vtctldata = $root.vtctldata = (() => { throw TypeError(".vtctldata.PlannedReparentShardRequest.wait_replicas_timeout: object expected"); message.wait_replicas_timeout = $root.vttime.Duration.fromObject(object.wait_replicas_timeout); } + if (object.tolerable_replication_lag != null) { + if (typeof object.tolerable_replication_lag !== "object") + throw TypeError(".vtctldata.PlannedReparentShardRequest.tolerable_replication_lag: object expected"); + message.tolerable_replication_lag = $root.vttime.Duration.fromObject(object.tolerable_replication_lag); + } return message; }; @@ -140337,6 +149870,7 @@ export const vtctldata = $root.vtctldata = (() => { object.new_primary = null; object.avoid_primary = null; object.wait_replicas_timeout = null; + object.tolerable_replication_lag = null; } if (message.keyspace != null && message.hasOwnProperty("keyspace")) object.keyspace = message.keyspace; @@ -140348,6 +149882,8 @@ export const vtctldata = $root.vtctldata = (() => { object.avoid_primary = $root.topodata.TabletAlias.toObject(message.avoid_primary, options); if (message.wait_replicas_timeout != null && message.hasOwnProperty("wait_replicas_timeout")) object.wait_replicas_timeout = $root.vttime.Duration.toObject(message.wait_replicas_timeout, options); + if (message.tolerable_replication_lag != null && message.hasOwnProperty("tolerable_replication_lag")) + object.tolerable_replication_lag = $root.vttime.Duration.toObject(message.tolerable_replication_lag, options); return object; }; @@ -142866,7 +152402,7 @@ export const vtctldata = $root.vtctldata = (() => { if (message.include_primary != null && Object.hasOwnProperty.call(message, "include_primary")) writer.uint32(/* id 3, wireType 0 =*/24).bool(message.include_primary); if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 4, wireType 0 =*/32).uint32(message.concurrency); + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.concurrency); return writer; }; @@ -142914,7 +152450,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 4: { - message.concurrency = reader.uint32(); + message.concurrency = reader.int32(); break; } default: @@ -142986,7 +152522,7 @@ export const vtctldata = $root.vtctldata = (() => { if (object.include_primary != null) message.include_primary = Boolean(object.include_primary); if (object.concurrency != null) - message.concurrency = object.concurrency >>> 0; + message.concurrency = object.concurrency | 0; return message; }; @@ -143374,7 +152910,7 @@ export const vtctldata = $root.vtctldata = (() => { if (message.include_primary != null && Object.hasOwnProperty.call(message, "include_primary")) writer.uint32(/* id 4, wireType 0 =*/32).bool(message.include_primary); if (message.concurrency != null && Object.hasOwnProperty.call(message, "concurrency")) - writer.uint32(/* id 5, wireType 0 =*/40).uint32(message.concurrency); + writer.uint32(/* id 5, wireType 0 =*/40).int32(message.concurrency); return writer; }; @@ -143426,7 +152962,7 @@ export const vtctldata = $root.vtctldata = (() => { break; } case 5: { - message.concurrency = reader.uint32(); + message.concurrency = reader.int32(); break; } default: @@ -143503,7 +153039,7 @@ export const vtctldata = $root.vtctldata = (() => { if (object.include_primary != null) message.include_primary = Boolean(object.include_primary); if (object.concurrency != null) - message.concurrency = object.concurrency >>> 0; + message.concurrency = object.concurrency | 0; return message; }; @@ -148106,591 +157642,6 @@ export const vtctldata = $root.vtctldata = (() => { return SetKeyspaceDurabilityPolicyResponse; })(); - vtctldata.SetKeyspaceServedFromRequest = (function() { - - /** - * Properties of a SetKeyspaceServedFromRequest. - * @memberof vtctldata - * @interface ISetKeyspaceServedFromRequest - * @property {string|null} [keyspace] SetKeyspaceServedFromRequest keyspace - * @property {topodata.TabletType|null} [tablet_type] SetKeyspaceServedFromRequest tablet_type - * @property {Array.|null} [cells] SetKeyspaceServedFromRequest cells - * @property {boolean|null} [remove] SetKeyspaceServedFromRequest remove - * @property {string|null} [source_keyspace] SetKeyspaceServedFromRequest source_keyspace - */ - - /** - * Constructs a new SetKeyspaceServedFromRequest. - * @memberof vtctldata - * @classdesc Represents a SetKeyspaceServedFromRequest. - * @implements ISetKeyspaceServedFromRequest - * @constructor - * @param {vtctldata.ISetKeyspaceServedFromRequest=} [properties] Properties to set - */ - function SetKeyspaceServedFromRequest(properties) { - this.cells = []; - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * SetKeyspaceServedFromRequest keyspace. - * @member {string} keyspace - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - */ - SetKeyspaceServedFromRequest.prototype.keyspace = ""; - - /** - * SetKeyspaceServedFromRequest tablet_type. - * @member {topodata.TabletType} tablet_type - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - */ - SetKeyspaceServedFromRequest.prototype.tablet_type = 0; - - /** - * SetKeyspaceServedFromRequest cells. - * @member {Array.} cells - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - */ - SetKeyspaceServedFromRequest.prototype.cells = $util.emptyArray; - - /** - * SetKeyspaceServedFromRequest remove. - * @member {boolean} remove - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - */ - SetKeyspaceServedFromRequest.prototype.remove = false; - - /** - * SetKeyspaceServedFromRequest source_keyspace. - * @member {string} source_keyspace - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - */ - SetKeyspaceServedFromRequest.prototype.source_keyspace = ""; - - /** - * Creates a new SetKeyspaceServedFromRequest instance using the specified properties. - * @function create - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {vtctldata.ISetKeyspaceServedFromRequest=} [properties] Properties to set - * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest instance - */ - SetKeyspaceServedFromRequest.create = function create(properties) { - return new SetKeyspaceServedFromRequest(properties); - }; - - /** - * Encodes the specified SetKeyspaceServedFromRequest message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. - * @function encode - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {vtctldata.ISetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - SetKeyspaceServedFromRequest.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); - if (message.tablet_type != null && Object.hasOwnProperty.call(message, "tablet_type")) - writer.uint32(/* id 2, wireType 0 =*/16).int32(message.tablet_type); - if (message.cells != null && message.cells.length) - for (let i = 0; i < message.cells.length; ++i) - writer.uint32(/* id 3, wireType 2 =*/26).string(message.cells[i]); - if (message.remove != null && Object.hasOwnProperty.call(message, "remove")) - writer.uint32(/* id 4, wireType 0 =*/32).bool(message.remove); - if (message.source_keyspace != null && Object.hasOwnProperty.call(message, "source_keyspace")) - writer.uint32(/* id 5, wireType 2 =*/42).string(message.source_keyspace); - return writer; - }; - - /** - * Encodes the specified SetKeyspaceServedFromRequest message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromRequest.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {vtctldata.ISetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - SetKeyspaceServedFromRequest.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - SetKeyspaceServedFromRequest.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceServedFromRequest(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.keyspace = reader.string(); - break; - } - case 2: { - message.tablet_type = reader.int32(); - break; - } - case 3: { - if (!(message.cells && message.cells.length)) - message.cells = []; - message.cells.push(reader.string()); - break; - } - case 4: { - message.remove = reader.bool(); - break; - } - case 5: { - message.source_keyspace = reader.string(); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a SetKeyspaceServedFromRequest message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - SetKeyspaceServedFromRequest.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a SetKeyspaceServedFromRequest message. - * @function verify - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - SetKeyspaceServedFromRequest.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - if (!$util.isString(message.keyspace)) - return "keyspace: string expected"; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - switch (message.tablet_type) { - default: - return "tablet_type: enum value expected"; - case 0: - case 1: - case 1: - case 2: - case 3: - case 3: - case 4: - case 5: - case 6: - case 7: - case 8: - break; - } - if (message.cells != null && message.hasOwnProperty("cells")) { - if (!Array.isArray(message.cells)) - return "cells: array expected"; - for (let i = 0; i < message.cells.length; ++i) - if (!$util.isString(message.cells[i])) - return "cells: string[] expected"; - } - if (message.remove != null && message.hasOwnProperty("remove")) - if (typeof message.remove !== "boolean") - return "remove: boolean expected"; - if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) - if (!$util.isString(message.source_keyspace)) - return "source_keyspace: string expected"; - return null; - }; - - /** - * Creates a SetKeyspaceServedFromRequest message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.SetKeyspaceServedFromRequest} SetKeyspaceServedFromRequest - */ - SetKeyspaceServedFromRequest.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetKeyspaceServedFromRequest) - return object; - let message = new $root.vtctldata.SetKeyspaceServedFromRequest(); - if (object.keyspace != null) - message.keyspace = String(object.keyspace); - switch (object.tablet_type) { - default: - if (typeof object.tablet_type === "number") { - message.tablet_type = object.tablet_type; - break; - } - break; - case "UNKNOWN": - case 0: - message.tablet_type = 0; - break; - case "PRIMARY": - case 1: - message.tablet_type = 1; - break; - case "MASTER": - case 1: - message.tablet_type = 1; - break; - case "REPLICA": - case 2: - message.tablet_type = 2; - break; - case "RDONLY": - case 3: - message.tablet_type = 3; - break; - case "BATCH": - case 3: - message.tablet_type = 3; - break; - case "SPARE": - case 4: - message.tablet_type = 4; - break; - case "EXPERIMENTAL": - case 5: - message.tablet_type = 5; - break; - case "BACKUP": - case 6: - message.tablet_type = 6; - break; - case "RESTORE": - case 7: - message.tablet_type = 7; - break; - case "DRAINED": - case 8: - message.tablet_type = 8; - break; - } - if (object.cells) { - if (!Array.isArray(object.cells)) - throw TypeError(".vtctldata.SetKeyspaceServedFromRequest.cells: array expected"); - message.cells = []; - for (let i = 0; i < object.cells.length; ++i) - message.cells[i] = String(object.cells[i]); - } - if (object.remove != null) - message.remove = Boolean(object.remove); - if (object.source_keyspace != null) - message.source_keyspace = String(object.source_keyspace); - return message; - }; - - /** - * Creates a plain object from a SetKeyspaceServedFromRequest message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {vtctldata.SetKeyspaceServedFromRequest} message SetKeyspaceServedFromRequest - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - SetKeyspaceServedFromRequest.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.arrays || options.defaults) - object.cells = []; - if (options.defaults) { - object.keyspace = ""; - object.tablet_type = options.enums === String ? "UNKNOWN" : 0; - object.remove = false; - object.source_keyspace = ""; - } - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = message.keyspace; - if (message.tablet_type != null && message.hasOwnProperty("tablet_type")) - object.tablet_type = options.enums === String ? $root.topodata.TabletType[message.tablet_type] === undefined ? message.tablet_type : $root.topodata.TabletType[message.tablet_type] : message.tablet_type; - if (message.cells && message.cells.length) { - object.cells = []; - for (let j = 0; j < message.cells.length; ++j) - object.cells[j] = message.cells[j]; - } - if (message.remove != null && message.hasOwnProperty("remove")) - object.remove = message.remove; - if (message.source_keyspace != null && message.hasOwnProperty("source_keyspace")) - object.source_keyspace = message.source_keyspace; - return object; - }; - - /** - * Converts this SetKeyspaceServedFromRequest to JSON. - * @function toJSON - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @instance - * @returns {Object.} JSON object - */ - SetKeyspaceServedFromRequest.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for SetKeyspaceServedFromRequest - * @function getTypeUrl - * @memberof vtctldata.SetKeyspaceServedFromRequest - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - SetKeyspaceServedFromRequest.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.SetKeyspaceServedFromRequest"; - }; - - return SetKeyspaceServedFromRequest; - })(); - - vtctldata.SetKeyspaceServedFromResponse = (function() { - - /** - * Properties of a SetKeyspaceServedFromResponse. - * @memberof vtctldata - * @interface ISetKeyspaceServedFromResponse - * @property {topodata.IKeyspace|null} [keyspace] SetKeyspaceServedFromResponse keyspace - */ - - /** - * Constructs a new SetKeyspaceServedFromResponse. - * @memberof vtctldata - * @classdesc Represents a SetKeyspaceServedFromResponse. - * @implements ISetKeyspaceServedFromResponse - * @constructor - * @param {vtctldata.ISetKeyspaceServedFromResponse=} [properties] Properties to set - */ - function SetKeyspaceServedFromResponse(properties) { - if (properties) - for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) - if (properties[keys[i]] != null) - this[keys[i]] = properties[keys[i]]; - } - - /** - * SetKeyspaceServedFromResponse keyspace. - * @member {topodata.IKeyspace|null|undefined} keyspace - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @instance - */ - SetKeyspaceServedFromResponse.prototype.keyspace = null; - - /** - * Creates a new SetKeyspaceServedFromResponse instance using the specified properties. - * @function create - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {vtctldata.ISetKeyspaceServedFromResponse=} [properties] Properties to set - * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse instance - */ - SetKeyspaceServedFromResponse.create = function create(properties) { - return new SetKeyspaceServedFromResponse(properties); - }; - - /** - * Encodes the specified SetKeyspaceServedFromResponse message. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. - * @function encode - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {vtctldata.ISetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - SetKeyspaceServedFromResponse.encode = function encode(message, writer) { - if (!writer) - writer = $Writer.create(); - if (message.keyspace != null && Object.hasOwnProperty.call(message, "keyspace")) - $root.topodata.Keyspace.encode(message.keyspace, writer.uint32(/* id 1, wireType 2 =*/10).fork()).ldelim(); - return writer; - }; - - /** - * Encodes the specified SetKeyspaceServedFromResponse message, length delimited. Does not implicitly {@link vtctldata.SetKeyspaceServedFromResponse.verify|verify} messages. - * @function encodeDelimited - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {vtctldata.ISetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse message or plain object to encode - * @param {$protobuf.Writer} [writer] Writer to encode to - * @returns {$protobuf.Writer} Writer - */ - SetKeyspaceServedFromResponse.encodeDelimited = function encodeDelimited(message, writer) { - return this.encode(message, writer).ldelim(); - }; - - /** - * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer. - * @function decode - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @param {number} [length] Message length if known beforehand - * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - SetKeyspaceServedFromResponse.decode = function decode(reader, length) { - if (!(reader instanceof $Reader)) - reader = $Reader.create(reader); - let end = length === undefined ? reader.len : reader.pos + length, message = new $root.vtctldata.SetKeyspaceServedFromResponse(); - while (reader.pos < end) { - let tag = reader.uint32(); - switch (tag >>> 3) { - case 1: { - message.keyspace = $root.topodata.Keyspace.decode(reader, reader.uint32()); - break; - } - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }; - - /** - * Decodes a SetKeyspaceServedFromResponse message from the specified reader or buffer, length delimited. - * @function decodeDelimited - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {$protobuf.Reader|Uint8Array} reader Reader or buffer to decode from - * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse - * @throws {Error} If the payload is not a reader or valid buffer - * @throws {$protobuf.util.ProtocolError} If required fields are missing - */ - SetKeyspaceServedFromResponse.decodeDelimited = function decodeDelimited(reader) { - if (!(reader instanceof $Reader)) - reader = new $Reader(reader); - return this.decode(reader, reader.uint32()); - }; - - /** - * Verifies a SetKeyspaceServedFromResponse message. - * @function verify - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {Object.} message Plain object to verify - * @returns {string|null} `null` if valid, otherwise the reason why it is not - */ - SetKeyspaceServedFromResponse.verify = function verify(message) { - if (typeof message !== "object" || message === null) - return "object expected"; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) { - let error = $root.topodata.Keyspace.verify(message.keyspace); - if (error) - return "keyspace." + error; - } - return null; - }; - - /** - * Creates a SetKeyspaceServedFromResponse message from a plain object. Also converts values to their respective internal types. - * @function fromObject - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {Object.} object Plain object - * @returns {vtctldata.SetKeyspaceServedFromResponse} SetKeyspaceServedFromResponse - */ - SetKeyspaceServedFromResponse.fromObject = function fromObject(object) { - if (object instanceof $root.vtctldata.SetKeyspaceServedFromResponse) - return object; - let message = new $root.vtctldata.SetKeyspaceServedFromResponse(); - if (object.keyspace != null) { - if (typeof object.keyspace !== "object") - throw TypeError(".vtctldata.SetKeyspaceServedFromResponse.keyspace: object expected"); - message.keyspace = $root.topodata.Keyspace.fromObject(object.keyspace); - } - return message; - }; - - /** - * Creates a plain object from a SetKeyspaceServedFromResponse message. Also converts values to other types if specified. - * @function toObject - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {vtctldata.SetKeyspaceServedFromResponse} message SetKeyspaceServedFromResponse - * @param {$protobuf.IConversionOptions} [options] Conversion options - * @returns {Object.} Plain object - */ - SetKeyspaceServedFromResponse.toObject = function toObject(message, options) { - if (!options) - options = {}; - let object = {}; - if (options.defaults) - object.keyspace = null; - if (message.keyspace != null && message.hasOwnProperty("keyspace")) - object.keyspace = $root.topodata.Keyspace.toObject(message.keyspace, options); - return object; - }; - - /** - * Converts this SetKeyspaceServedFromResponse to JSON. - * @function toJSON - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @instance - * @returns {Object.} JSON object - */ - SetKeyspaceServedFromResponse.prototype.toJSON = function toJSON() { - return this.constructor.toObject(this, $protobuf.util.toJSONOptions); - }; - - /** - * Gets the default type url for SetKeyspaceServedFromResponse - * @function getTypeUrl - * @memberof vtctldata.SetKeyspaceServedFromResponse - * @static - * @param {string} [typeUrlPrefix] your custom typeUrlPrefix(default "type.googleapis.com") - * @returns {string} The default type url - */ - SetKeyspaceServedFromResponse.getTypeUrl = function getTypeUrl(typeUrlPrefix) { - if (typeUrlPrefix === undefined) { - typeUrlPrefix = "type.googleapis.com"; - } - return typeUrlPrefix + "/vtctldata.SetKeyspaceServedFromResponse"; - }; - - return SetKeyspaceServedFromResponse; - })(); - vtctldata.SetKeyspaceShardingInfoRequest = (function() { /** @@ -159738,6 +168689,8 @@ export const vtctldata = $root.vtctldata = (() => { * @property {vttime.IDuration|null} [wait_update_interval] VDiffCreateRequest wait_update_interval * @property {boolean|null} [auto_retry] VDiffCreateRequest auto_retry * @property {boolean|null} [verbose] VDiffCreateRequest verbose + * @property {number|Long|null} [max_report_sample_rows] VDiffCreateRequest max_report_sample_rows + * @property {vttime.IDuration|null} [max_diff_duration] VDiffCreateRequest max_diff_duration */ /** @@ -159903,6 +168856,22 @@ export const vtctldata = $root.vtctldata = (() => { */ VDiffCreateRequest.prototype.verbose = false; + /** + * VDiffCreateRequest max_report_sample_rows. + * @member {number|Long} max_report_sample_rows + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.max_report_sample_rows = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * VDiffCreateRequest max_diff_duration. + * @member {vttime.IDuration|null|undefined} max_diff_duration + * @memberof vtctldata.VDiffCreateRequest + * @instance + */ + VDiffCreateRequest.prototype.max_diff_duration = null; + /** * Creates a new VDiffCreateRequest instance using the specified properties. * @function create @@ -159970,6 +168939,10 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 17, wireType 0 =*/136).bool(message.auto_retry); if (message.verbose != null && Object.hasOwnProperty.call(message, "verbose")) writer.uint32(/* id 18, wireType 0 =*/144).bool(message.verbose); + if (message.max_report_sample_rows != null && Object.hasOwnProperty.call(message, "max_report_sample_rows")) + writer.uint32(/* id 19, wireType 0 =*/152).int64(message.max_report_sample_rows); + if (message.max_diff_duration != null && Object.hasOwnProperty.call(message, "max_diff_duration")) + $root.vttime.Duration.encode(message.max_diff_duration, writer.uint32(/* id 20, wireType 2 =*/162).fork()).ldelim(); return writer; }; @@ -160089,6 +169062,14 @@ export const vtctldata = $root.vtctldata = (() => { message.verbose = reader.bool(); break; } + case 19: { + message.max_report_sample_rows = reader.int64(); + break; + } + case 20: { + message.max_diff_duration = $root.vttime.Duration.decode(reader, reader.uint32()); + break; + } default: reader.skipType(tag & 7); break; @@ -160218,6 +169199,14 @@ export const vtctldata = $root.vtctldata = (() => { if (message.verbose != null && message.hasOwnProperty("verbose")) if (typeof message.verbose !== "boolean") return "verbose: boolean expected"; + if (message.max_report_sample_rows != null && message.hasOwnProperty("max_report_sample_rows")) + if (!$util.isInteger(message.max_report_sample_rows) && !(message.max_report_sample_rows && $util.isInteger(message.max_report_sample_rows.low) && $util.isInteger(message.max_report_sample_rows.high))) + return "max_report_sample_rows: integer|Long expected"; + if (message.max_diff_duration != null && message.hasOwnProperty("max_diff_duration")) { + let error = $root.vttime.Duration.verify(message.max_diff_duration); + if (error) + return "max_diff_duration." + error; + } return null; }; @@ -160377,6 +169366,20 @@ export const vtctldata = $root.vtctldata = (() => { message.auto_retry = Boolean(object.auto_retry); if (object.verbose != null) message.verbose = Boolean(object.verbose); + if (object.max_report_sample_rows != null) + if ($util.Long) + (message.max_report_sample_rows = $util.Long.fromValue(object.max_report_sample_rows)).unsigned = false; + else if (typeof object.max_report_sample_rows === "string") + message.max_report_sample_rows = parseInt(object.max_report_sample_rows, 10); + else if (typeof object.max_report_sample_rows === "number") + message.max_report_sample_rows = object.max_report_sample_rows; + else if (typeof object.max_report_sample_rows === "object") + message.max_report_sample_rows = new $util.LongBits(object.max_report_sample_rows.low >>> 0, object.max_report_sample_rows.high >>> 0).toNumber(); + if (object.max_diff_duration != null) { + if (typeof object.max_diff_duration !== "object") + throw TypeError(".vtctldata.VDiffCreateRequest.max_diff_duration: object expected"); + message.max_diff_duration = $root.vttime.Duration.fromObject(object.max_diff_duration); + } return message; }; @@ -160422,6 +169425,12 @@ export const vtctldata = $root.vtctldata = (() => { object.wait_update_interval = null; object.auto_retry = false; object.verbose = false; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.max_report_sample_rows = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.max_report_sample_rows = options.longs === String ? "0" : 0; + object.max_diff_duration = null; } if (message.workflow != null && message.hasOwnProperty("workflow")) object.workflow = message.workflow; @@ -160477,6 +169486,13 @@ export const vtctldata = $root.vtctldata = (() => { object.auto_retry = message.auto_retry; if (message.verbose != null && message.hasOwnProperty("verbose")) object.verbose = message.verbose; + if (message.max_report_sample_rows != null && message.hasOwnProperty("max_report_sample_rows")) + if (typeof message.max_report_sample_rows === "number") + object.max_report_sample_rows = options.longs === String ? String(message.max_report_sample_rows) : message.max_report_sample_rows; + else + object.max_report_sample_rows = options.longs === String ? $util.Long.prototype.toString.call(message.max_report_sample_rows) : options.longs === Number ? new $util.LongBits(message.max_report_sample_rows.low >>> 0, message.max_report_sample_rows.high >>> 0).toNumber() : message.max_report_sample_rows; + if (message.max_diff_duration != null && message.hasOwnProperty("max_diff_duration")) + object.max_diff_duration = $root.vttime.Duration.toObject(message.max_diff_duration, options); return object; }; @@ -162492,6 +171508,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {string|null} [workflow] WorkflowDeleteRequest workflow * @property {boolean|null} [keep_data] WorkflowDeleteRequest keep_data * @property {boolean|null} [keep_routing_rules] WorkflowDeleteRequest keep_routing_rules + * @property {Array.|null} [shards] WorkflowDeleteRequest shards */ /** @@ -162503,6 +171520,7 @@ export const vtctldata = $root.vtctldata = (() => { * @param {vtctldata.IWorkflowDeleteRequest=} [properties] Properties to set */ function WorkflowDeleteRequest(properties) { + this.shards = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -162541,6 +171559,14 @@ export const vtctldata = $root.vtctldata = (() => { */ WorkflowDeleteRequest.prototype.keep_routing_rules = false; + /** + * WorkflowDeleteRequest shards. + * @member {Array.} shards + * @memberof vtctldata.WorkflowDeleteRequest + * @instance + */ + WorkflowDeleteRequest.prototype.shards = $util.emptyArray; + /** * Creates a new WorkflowDeleteRequest instance using the specified properties. * @function create @@ -162573,6 +171599,9 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 3, wireType 0 =*/24).bool(message.keep_data); if (message.keep_routing_rules != null && Object.hasOwnProperty.call(message, "keep_routing_rules")) writer.uint32(/* id 4, wireType 0 =*/32).bool(message.keep_routing_rules); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.shards[i]); return writer; }; @@ -162623,6 +171652,12 @@ export const vtctldata = $root.vtctldata = (() => { message.keep_routing_rules = reader.bool(); break; } + case 5: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -162670,6 +171705,13 @@ export const vtctldata = $root.vtctldata = (() => { if (message.keep_routing_rules != null && message.hasOwnProperty("keep_routing_rules")) if (typeof message.keep_routing_rules !== "boolean") return "keep_routing_rules: boolean expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) + if (!$util.isString(message.shards[i])) + return "shards: string[] expected"; + } return null; }; @@ -162693,6 +171735,13 @@ export const vtctldata = $root.vtctldata = (() => { message.keep_data = Boolean(object.keep_data); if (object.keep_routing_rules != null) message.keep_routing_rules = Boolean(object.keep_routing_rules); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.WorkflowDeleteRequest.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) + message.shards[i] = String(object.shards[i]); + } return message; }; @@ -162709,6 +171758,8 @@ export const vtctldata = $root.vtctldata = (() => { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.shards = []; if (options.defaults) { object.keyspace = ""; object.workflow = ""; @@ -162723,6 +171774,11 @@ export const vtctldata = $root.vtctldata = (() => { object.keep_data = message.keep_data; if (message.keep_routing_rules != null && message.hasOwnProperty("keep_routing_rules")) object.keep_routing_rules = message.keep_routing_rules; + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = message.shards[j]; + } return object; }; @@ -163243,6 +172299,7 @@ export const vtctldata = $root.vtctldata = (() => { * @interface IWorkflowStatusRequest * @property {string|null} [keyspace] WorkflowStatusRequest keyspace * @property {string|null} [workflow] WorkflowStatusRequest workflow + * @property {Array.|null} [shards] WorkflowStatusRequest shards */ /** @@ -163254,6 +172311,7 @@ export const vtctldata = $root.vtctldata = (() => { * @param {vtctldata.IWorkflowStatusRequest=} [properties] Properties to set */ function WorkflowStatusRequest(properties) { + this.shards = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -163276,6 +172334,14 @@ export const vtctldata = $root.vtctldata = (() => { */ WorkflowStatusRequest.prototype.workflow = ""; + /** + * WorkflowStatusRequest shards. + * @member {Array.} shards + * @memberof vtctldata.WorkflowStatusRequest + * @instance + */ + WorkflowStatusRequest.prototype.shards = $util.emptyArray; + /** * Creates a new WorkflowStatusRequest instance using the specified properties. * @function create @@ -163304,6 +172370,9 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 1, wireType 2 =*/10).string(message.keyspace); if (message.workflow != null && Object.hasOwnProperty.call(message, "workflow")) writer.uint32(/* id 2, wireType 2 =*/18).string(message.workflow); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + writer.uint32(/* id 3, wireType 2 =*/26).string(message.shards[i]); return writer; }; @@ -163346,6 +172415,12 @@ export const vtctldata = $root.vtctldata = (() => { message.workflow = reader.string(); break; } + case 3: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -163387,6 +172462,13 @@ export const vtctldata = $root.vtctldata = (() => { if (message.workflow != null && message.hasOwnProperty("workflow")) if (!$util.isString(message.workflow)) return "workflow: string expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) + if (!$util.isString(message.shards[i])) + return "shards: string[] expected"; + } return null; }; @@ -163406,6 +172488,13 @@ export const vtctldata = $root.vtctldata = (() => { message.keyspace = String(object.keyspace); if (object.workflow != null) message.workflow = String(object.workflow); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.WorkflowStatusRequest.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) + message.shards[i] = String(object.shards[i]); + } return message; }; @@ -163422,6 +172511,8 @@ export const vtctldata = $root.vtctldata = (() => { if (!options) options = {}; let object = {}; + if (options.arrays || options.defaults) + object.shards = []; if (options.defaults) { object.keyspace = ""; object.workflow = ""; @@ -163430,6 +172521,11 @@ export const vtctldata = $root.vtctldata = (() => { object.keyspace = message.keyspace; if (message.workflow != null && message.hasOwnProperty("workflow")) object.workflow = message.workflow; + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = message.shards[j]; + } return object; }; @@ -164735,6 +173831,7 @@ export const vtctldata = $root.vtctldata = (() => { * @property {vttime.IDuration|null} [timeout] WorkflowSwitchTrafficRequest timeout * @property {boolean|null} [dry_run] WorkflowSwitchTrafficRequest dry_run * @property {boolean|null} [initialize_target_sequences] WorkflowSwitchTrafficRequest initialize_target_sequences + * @property {Array.|null} [shards] WorkflowSwitchTrafficRequest shards */ /** @@ -164748,6 +173845,7 @@ export const vtctldata = $root.vtctldata = (() => { function WorkflowSwitchTrafficRequest(properties) { this.cells = []; this.tablet_types = []; + this.shards = []; if (properties) for (let keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -164834,6 +173932,14 @@ export const vtctldata = $root.vtctldata = (() => { */ WorkflowSwitchTrafficRequest.prototype.initialize_target_sequences = false; + /** + * WorkflowSwitchTrafficRequest shards. + * @member {Array.} shards + * @memberof vtctldata.WorkflowSwitchTrafficRequest + * @instance + */ + WorkflowSwitchTrafficRequest.prototype.shards = $util.emptyArray; + /** * Creates a new WorkflowSwitchTrafficRequest instance using the specified properties. * @function create @@ -164883,6 +173989,9 @@ export const vtctldata = $root.vtctldata = (() => { writer.uint32(/* id 9, wireType 0 =*/72).bool(message.dry_run); if (message.initialize_target_sequences != null && Object.hasOwnProperty.call(message, "initialize_target_sequences")) writer.uint32(/* id 10, wireType 0 =*/80).bool(message.initialize_target_sequences); + if (message.shards != null && message.shards.length) + for (let i = 0; i < message.shards.length; ++i) + writer.uint32(/* id 11, wireType 2 =*/90).string(message.shards[i]); return writer; }; @@ -164966,6 +174075,12 @@ export const vtctldata = $root.vtctldata = (() => { message.initialize_target_sequences = reader.bool(); break; } + case 11: { + if (!(message.shards && message.shards.length)) + message.shards = []; + message.shards.push(reader.string()); + break; + } default: reader.skipType(tag & 7); break; @@ -165057,6 +174172,13 @@ export const vtctldata = $root.vtctldata = (() => { if (message.initialize_target_sequences != null && message.hasOwnProperty("initialize_target_sequences")) if (typeof message.initialize_target_sequences !== "boolean") return "initialize_target_sequences: boolean expected"; + if (message.shards != null && message.hasOwnProperty("shards")) { + if (!Array.isArray(message.shards)) + return "shards: array expected"; + for (let i = 0; i < message.shards.length; ++i) + if (!$util.isString(message.shards[i])) + return "shards: string[] expected"; + } return null; }; @@ -165158,6 +174280,13 @@ export const vtctldata = $root.vtctldata = (() => { message.dry_run = Boolean(object.dry_run); if (object.initialize_target_sequences != null) message.initialize_target_sequences = Boolean(object.initialize_target_sequences); + if (object.shards) { + if (!Array.isArray(object.shards)) + throw TypeError(".vtctldata.WorkflowSwitchTrafficRequest.shards: array expected"); + message.shards = []; + for (let i = 0; i < object.shards.length; ++i) + message.shards[i] = String(object.shards[i]); + } return message; }; @@ -165177,6 +174306,7 @@ export const vtctldata = $root.vtctldata = (() => { if (options.arrays || options.defaults) { object.cells = []; object.tablet_types = []; + object.shards = []; } if (options.defaults) { object.keyspace = ""; @@ -165214,6 +174344,11 @@ export const vtctldata = $root.vtctldata = (() => { object.dry_run = message.dry_run; if (message.initialize_target_sequences != null && message.hasOwnProperty("initialize_target_sequences")) object.initialize_target_sequences = message.initialize_target_sequences; + if (message.shards && message.shards.length) { + object.shards = []; + for (let j = 0; j < message.shards.length; ++j) + object.shards[j] = message.shards[j]; + } return object; }; diff --git a/web/vtadmin/src/util/tabletDebugVars.ts b/web/vtadmin/src/util/tabletDebugVars.ts index 37ea08e49b1..1d01ec16bee 100644 --- a/web/vtadmin/src/util/tabletDebugVars.ts +++ b/web/vtadmin/src/util/tabletDebugVars.ts @@ -33,6 +33,7 @@ export type TabletDebugVars = Partial<{ BuildNumber: string; BuildTimestamp: string; BuildUser: string; + BuildVersion: string; QPS: { [k: string]: number[] };